id stringlengths 25 30 | content stringlengths 14 942k | max_stars_repo_path stringlengths 49 55 |
|---|---|---|
crossvul-cpp_data_bad_1276_2 | /*
** 2001 September 15
**
** The author disclaims copyright to this source code. In place of
** a legal notice, here is a blessing:
**
** May you do good and not evil.
** May you find forgiveness for yourself and forgive others.
** May you share freely, never taking more than you give.
**
*************************************************************************
** This file contains C code routines that are called by the parser
** to handle SELECT statements in SQLite.
*/
#include "sqliteInt.h"
/*
** Trace output macros
*/
#if SELECTTRACE_ENABLED
/***/ int sqlite3SelectTrace = 0;
# define SELECTTRACE(K,P,S,X) \
if(sqlite3SelectTrace&(K)) \
sqlite3DebugPrintf("%u/%d/%p: ",(S)->selId,(P)->addrExplain,(S)),\
sqlite3DebugPrintf X
#else
# define SELECTTRACE(K,P,S,X)
#endif
/*
** An instance of the following object is used to record information about
** how to process the DISTINCT keyword, to simplify passing that information
** into the selectInnerLoop() routine.
*/
typedef struct DistinctCtx DistinctCtx;
struct DistinctCtx {
u8 isTnct; /* True if the DISTINCT keyword is present */
u8 eTnctType; /* One of the WHERE_DISTINCT_* operators */
int tabTnct; /* Ephemeral table used for DISTINCT processing */
int addrTnct; /* Address of OP_OpenEphemeral opcode for tabTnct */
};
/*
** An instance of the following object is used to record information about
** the ORDER BY (or GROUP BY) clause of query is being coded.
**
** The aDefer[] array is used by the sorter-references optimization. For
** example, assuming there is no index that can be used for the ORDER BY,
** for the query:
**
** SELECT a, bigblob FROM t1 ORDER BY a LIMIT 10;
**
** it may be more efficient to add just the "a" values to the sorter, and
** retrieve the associated "bigblob" values directly from table t1 as the
** 10 smallest "a" values are extracted from the sorter.
**
** When the sorter-reference optimization is used, there is one entry in the
** aDefer[] array for each database table that may be read as values are
** extracted from the sorter.
*/
typedef struct SortCtx SortCtx;
struct SortCtx {
ExprList *pOrderBy; /* The ORDER BY (or GROUP BY clause) */
int nOBSat; /* Number of ORDER BY terms satisfied by indices */
int iECursor; /* Cursor number for the sorter */
int regReturn; /* Register holding block-output return address */
int labelBkOut; /* Start label for the block-output subroutine */
int addrSortIndex; /* Address of the OP_SorterOpen or OP_OpenEphemeral */
int labelDone; /* Jump here when done, ex: LIMIT reached */
int labelOBLopt; /* Jump here when sorter is full */
u8 sortFlags; /* Zero or more SORTFLAG_* bits */
#ifdef SQLITE_ENABLE_SORTER_REFERENCES
u8 nDefer; /* Number of valid entries in aDefer[] */
struct DeferredCsr {
Table *pTab; /* Table definition */
int iCsr; /* Cursor number for table */
int nKey; /* Number of PK columns for table pTab (>=1) */
} aDefer[4];
#endif
struct RowLoadInfo *pDeferredRowLoad; /* Deferred row loading info or NULL */
};
#define SORTFLAG_UseSorter 0x01 /* Use SorterOpen instead of OpenEphemeral */
/*
** Delete all the content of a Select structure. Deallocate the structure
** itself only if bFree is true.
*/
static void clearSelect(sqlite3 *db, Select *p, int bFree){
while( p ){
Select *pPrior = p->pPrior;
sqlite3ExprListDelete(db, p->pEList);
sqlite3SrcListDelete(db, p->pSrc);
sqlite3ExprDelete(db, p->pWhere);
sqlite3ExprListDelete(db, p->pGroupBy);
sqlite3ExprDelete(db, p->pHaving);
sqlite3ExprListDelete(db, p->pOrderBy);
sqlite3ExprDelete(db, p->pLimit);
#ifndef SQLITE_OMIT_WINDOWFUNC
if( OK_IF_ALWAYS_TRUE(p->pWinDefn) ){
sqlite3WindowListDelete(db, p->pWinDefn);
}
assert( p->pWin==0 );
#endif
if( OK_IF_ALWAYS_TRUE(p->pWith) ) sqlite3WithDelete(db, p->pWith);
if( bFree ) sqlite3DbFreeNN(db, p);
p = pPrior;
bFree = 1;
}
}
/*
** Initialize a SelectDest structure.
*/
void sqlite3SelectDestInit(SelectDest *pDest, int eDest, int iParm){
pDest->eDest = (u8)eDest;
pDest->iSDParm = iParm;
pDest->zAffSdst = 0;
pDest->iSdst = 0;
pDest->nSdst = 0;
}
/*
** Allocate a new Select structure and return a pointer to that
** structure.
*/
Select *sqlite3SelectNew(
Parse *pParse, /* Parsing context */
ExprList *pEList, /* which columns to include in the result */
SrcList *pSrc, /* the FROM clause -- which tables to scan */
Expr *pWhere, /* the WHERE clause */
ExprList *pGroupBy, /* the GROUP BY clause */
Expr *pHaving, /* the HAVING clause */
ExprList *pOrderBy, /* the ORDER BY clause */
u32 selFlags, /* Flag parameters, such as SF_Distinct */
Expr *pLimit /* LIMIT value. NULL means not used */
){
Select *pNew;
Select standin;
pNew = sqlite3DbMallocRawNN(pParse->db, sizeof(*pNew) );
if( pNew==0 ){
assert( pParse->db->mallocFailed );
pNew = &standin;
}
if( pEList==0 ){
pEList = sqlite3ExprListAppend(pParse, 0,
sqlite3Expr(pParse->db,TK_ASTERISK,0));
}
pNew->pEList = pEList;
pNew->op = TK_SELECT;
pNew->selFlags = selFlags;
pNew->iLimit = 0;
pNew->iOffset = 0;
pNew->selId = ++pParse->nSelect;
pNew->addrOpenEphm[0] = -1;
pNew->addrOpenEphm[1] = -1;
pNew->nSelectRow = 0;
if( pSrc==0 ) pSrc = sqlite3DbMallocZero(pParse->db, sizeof(*pSrc));
pNew->pSrc = pSrc;
pNew->pWhere = pWhere;
pNew->pGroupBy = pGroupBy;
pNew->pHaving = pHaving;
pNew->pOrderBy = pOrderBy;
pNew->pPrior = 0;
pNew->pNext = 0;
pNew->pLimit = pLimit;
pNew->pWith = 0;
#ifndef SQLITE_OMIT_WINDOWFUNC
pNew->pWin = 0;
pNew->pWinDefn = 0;
#endif
if( pParse->db->mallocFailed ) {
clearSelect(pParse->db, pNew, pNew!=&standin);
pNew = 0;
}else{
assert( pNew->pSrc!=0 || pParse->nErr>0 );
}
assert( pNew!=&standin );
return pNew;
}
/*
** Delete the given Select structure and all of its substructures.
*/
void sqlite3SelectDelete(sqlite3 *db, Select *p){
if( OK_IF_ALWAYS_TRUE(p) ) clearSelect(db, p, 1);
}
/*
** Return a pointer to the right-most SELECT statement in a compound.
*/
static Select *findRightmost(Select *p){
while( p->pNext ) p = p->pNext;
return p;
}
/*
** Given 1 to 3 identifiers preceding the JOIN keyword, determine the
** type of join. Return an integer constant that expresses that type
** in terms of the following bit values:
**
** JT_INNER
** JT_CROSS
** JT_OUTER
** JT_NATURAL
** JT_LEFT
** JT_RIGHT
**
** A full outer join is the combination of JT_LEFT and JT_RIGHT.
**
** If an illegal or unsupported join type is seen, then still return
** a join type, but put an error in the pParse structure.
*/
int sqlite3JoinType(Parse *pParse, Token *pA, Token *pB, Token *pC){
int jointype = 0;
Token *apAll[3];
Token *p;
/* 0123456789 123456789 123456789 123 */
static const char zKeyText[] = "naturaleftouterightfullinnercross";
static const struct {
u8 i; /* Beginning of keyword text in zKeyText[] */
u8 nChar; /* Length of the keyword in characters */
u8 code; /* Join type mask */
} aKeyword[] = {
/* natural */ { 0, 7, JT_NATURAL },
/* left */ { 6, 4, JT_LEFT|JT_OUTER },
/* outer */ { 10, 5, JT_OUTER },
/* right */ { 14, 5, JT_RIGHT|JT_OUTER },
/* full */ { 19, 4, JT_LEFT|JT_RIGHT|JT_OUTER },
/* inner */ { 23, 5, JT_INNER },
/* cross */ { 28, 5, JT_INNER|JT_CROSS },
};
int i, j;
apAll[0] = pA;
apAll[1] = pB;
apAll[2] = pC;
for(i=0; i<3 && apAll[i]; i++){
p = apAll[i];
for(j=0; j<ArraySize(aKeyword); j++){
if( p->n==aKeyword[j].nChar
&& sqlite3StrNICmp((char*)p->z, &zKeyText[aKeyword[j].i], p->n)==0 ){
jointype |= aKeyword[j].code;
break;
}
}
testcase( j==0 || j==1 || j==2 || j==3 || j==4 || j==5 || j==6 );
if( j>=ArraySize(aKeyword) ){
jointype |= JT_ERROR;
break;
}
}
if(
(jointype & (JT_INNER|JT_OUTER))==(JT_INNER|JT_OUTER) ||
(jointype & JT_ERROR)!=0
){
const char *zSp = " ";
assert( pB!=0 );
if( pC==0 ){ zSp++; }
sqlite3ErrorMsg(pParse, "unknown or unsupported join type: "
"%T %T%s%T", pA, pB, zSp, pC);
jointype = JT_INNER;
}else if( (jointype & JT_OUTER)!=0
&& (jointype & (JT_LEFT|JT_RIGHT))!=JT_LEFT ){
sqlite3ErrorMsg(pParse,
"RIGHT and FULL OUTER JOINs are not currently supported");
jointype = JT_INNER;
}
return jointype;
}
/*
** Return the index of a column in a table. Return -1 if the column
** is not contained in the table.
*/
static int columnIndex(Table *pTab, const char *zCol){
int i;
for(i=0; i<pTab->nCol; i++){
if( sqlite3StrICmp(pTab->aCol[i].zName, zCol)==0 ) return i;
}
return -1;
}
/*
** Search the first N tables in pSrc, from left to right, looking for a
** table that has a column named zCol.
**
** When found, set *piTab and *piCol to the table index and column index
** of the matching column and return TRUE.
**
** If not found, return FALSE.
*/
static int tableAndColumnIndex(
SrcList *pSrc, /* Array of tables to search */
int N, /* Number of tables in pSrc->a[] to search */
const char *zCol, /* Name of the column we are looking for */
int *piTab, /* Write index of pSrc->a[] here */
int *piCol /* Write index of pSrc->a[*piTab].pTab->aCol[] here */
){
int i; /* For looping over tables in pSrc */
int iCol; /* Index of column matching zCol */
assert( (piTab==0)==(piCol==0) ); /* Both or neither are NULL */
for(i=0; i<N; i++){
iCol = columnIndex(pSrc->a[i].pTab, zCol);
if( iCol>=0 ){
if( piTab ){
*piTab = i;
*piCol = iCol;
}
return 1;
}
}
return 0;
}
/*
** This function is used to add terms implied by JOIN syntax to the
** WHERE clause expression of a SELECT statement. The new term, which
** is ANDed with the existing WHERE clause, is of the form:
**
** (tab1.col1 = tab2.col2)
**
** where tab1 is the iSrc'th table in SrcList pSrc and tab2 is the
** (iSrc+1)'th. Column col1 is column iColLeft of tab1, and col2 is
** column iColRight of tab2.
*/
static void addWhereTerm(
Parse *pParse, /* Parsing context */
SrcList *pSrc, /* List of tables in FROM clause */
int iLeft, /* Index of first table to join in pSrc */
int iColLeft, /* Index of column in first table */
int iRight, /* Index of second table in pSrc */
int iColRight, /* Index of column in second table */
int isOuterJoin, /* True if this is an OUTER join */
Expr **ppWhere /* IN/OUT: The WHERE clause to add to */
){
sqlite3 *db = pParse->db;
Expr *pE1;
Expr *pE2;
Expr *pEq;
assert( iLeft<iRight );
assert( pSrc->nSrc>iRight );
assert( pSrc->a[iLeft].pTab );
assert( pSrc->a[iRight].pTab );
pE1 = sqlite3CreateColumnExpr(db, pSrc, iLeft, iColLeft);
pE2 = sqlite3CreateColumnExpr(db, pSrc, iRight, iColRight);
pEq = sqlite3PExpr(pParse, TK_EQ, pE1, pE2);
if( pEq && isOuterJoin ){
ExprSetProperty(pEq, EP_FromJoin);
assert( !ExprHasProperty(pEq, EP_TokenOnly|EP_Reduced) );
ExprSetVVAProperty(pEq, EP_NoReduce);
pEq->iRightJoinTable = (i16)pE2->iTable;
}
*ppWhere = sqlite3ExprAnd(pParse, *ppWhere, pEq);
}
/*
** Set the EP_FromJoin property on all terms of the given expression.
** And set the Expr.iRightJoinTable to iTable for every term in the
** expression.
**
** The EP_FromJoin property is used on terms of an expression to tell
** the LEFT OUTER JOIN processing logic that this term is part of the
** join restriction specified in the ON or USING clause and not a part
** of the more general WHERE clause. These terms are moved over to the
** WHERE clause during join processing but we need to remember that they
** originated in the ON or USING clause.
**
** The Expr.iRightJoinTable tells the WHERE clause processing that the
** expression depends on table iRightJoinTable even if that table is not
** explicitly mentioned in the expression. That information is needed
** for cases like this:
**
** SELECT * FROM t1 LEFT JOIN t2 ON t1.a=t2.b AND t1.x=5
**
** The where clause needs to defer the handling of the t1.x=5
** term until after the t2 loop of the join. In that way, a
** NULL t2 row will be inserted whenever t1.x!=5. If we do not
** defer the handling of t1.x=5, it will be processed immediately
** after the t1 loop and rows with t1.x!=5 will never appear in
** the output, which is incorrect.
*/
void sqlite3SetJoinExpr(Expr *p, int iTable){
while( p ){
ExprSetProperty(p, EP_FromJoin);
assert( !ExprHasProperty(p, EP_TokenOnly|EP_Reduced) );
ExprSetVVAProperty(p, EP_NoReduce);
p->iRightJoinTable = (i16)iTable;
if( p->op==TK_FUNCTION && p->x.pList ){
int i;
for(i=0; i<p->x.pList->nExpr; i++){
sqlite3SetJoinExpr(p->x.pList->a[i].pExpr, iTable);
}
}
sqlite3SetJoinExpr(p->pLeft, iTable);
p = p->pRight;
}
}
/* Undo the work of sqlite3SetJoinExpr(). In the expression p, convert every
** term that is marked with EP_FromJoin and iRightJoinTable==iTable into
** an ordinary term that omits the EP_FromJoin mark.
**
** This happens when a LEFT JOIN is simplified into an ordinary JOIN.
*/
static void unsetJoinExpr(Expr *p, int iTable){
while( p ){
if( ExprHasProperty(p, EP_FromJoin)
&& (iTable<0 || p->iRightJoinTable==iTable) ){
ExprClearProperty(p, EP_FromJoin);
}
if( p->op==TK_FUNCTION && p->x.pList ){
int i;
for(i=0; i<p->x.pList->nExpr; i++){
unsetJoinExpr(p->x.pList->a[i].pExpr, iTable);
}
}
unsetJoinExpr(p->pLeft, iTable);
p = p->pRight;
}
}
/*
** This routine processes the join information for a SELECT statement.
** ON and USING clauses are converted into extra terms of the WHERE clause.
** NATURAL joins also create extra WHERE clause terms.
**
** The terms of a FROM clause are contained in the Select.pSrc structure.
** The left most table is the first entry in Select.pSrc. The right-most
** table is the last entry. The join operator is held in the entry to
** the left. Thus entry 0 contains the join operator for the join between
** entries 0 and 1. Any ON or USING clauses associated with the join are
** also attached to the left entry.
**
** This routine returns the number of errors encountered.
*/
static int sqliteProcessJoin(Parse *pParse, Select *p){
SrcList *pSrc; /* All tables in the FROM clause */
int i, j; /* Loop counters */
struct SrcList_item *pLeft; /* Left table being joined */
struct SrcList_item *pRight; /* Right table being joined */
pSrc = p->pSrc;
pLeft = &pSrc->a[0];
pRight = &pLeft[1];
for(i=0; i<pSrc->nSrc-1; i++, pRight++, pLeft++){
Table *pRightTab = pRight->pTab;
int isOuter;
if( NEVER(pLeft->pTab==0 || pRightTab==0) ) continue;
isOuter = (pRight->fg.jointype & JT_OUTER)!=0;
/* When the NATURAL keyword is present, add WHERE clause terms for
** every column that the two tables have in common.
*/
if( pRight->fg.jointype & JT_NATURAL ){
if( pRight->pOn || pRight->pUsing ){
sqlite3ErrorMsg(pParse, "a NATURAL join may not have "
"an ON or USING clause", 0);
return 1;
}
for(j=0; j<pRightTab->nCol; j++){
char *zName; /* Name of column in the right table */
int iLeft; /* Matching left table */
int iLeftCol; /* Matching column in the left table */
zName = pRightTab->aCol[j].zName;
if( tableAndColumnIndex(pSrc, i+1, zName, &iLeft, &iLeftCol) ){
addWhereTerm(pParse, pSrc, iLeft, iLeftCol, i+1, j,
isOuter, &p->pWhere);
}
}
}
/* Disallow both ON and USING clauses in the same join
*/
if( pRight->pOn && pRight->pUsing ){
sqlite3ErrorMsg(pParse, "cannot have both ON and USING "
"clauses in the same join");
return 1;
}
/* Add the ON clause to the end of the WHERE clause, connected by
** an AND operator.
*/
if( pRight->pOn ){
if( isOuter ) sqlite3SetJoinExpr(pRight->pOn, pRight->iCursor);
p->pWhere = sqlite3ExprAnd(pParse, p->pWhere, pRight->pOn);
pRight->pOn = 0;
}
/* Create extra terms on the WHERE clause for each column named
** in the USING clause. Example: If the two tables to be joined are
** A and B and the USING clause names X, Y, and Z, then add this
** to the WHERE clause: A.X=B.X AND A.Y=B.Y AND A.Z=B.Z
** Report an error if any column mentioned in the USING clause is
** not contained in both tables to be joined.
*/
if( pRight->pUsing ){
IdList *pList = pRight->pUsing;
for(j=0; j<pList->nId; j++){
char *zName; /* Name of the term in the USING clause */
int iLeft; /* Table on the left with matching column name */
int iLeftCol; /* Column number of matching column on the left */
int iRightCol; /* Column number of matching column on the right */
zName = pList->a[j].zName;
iRightCol = columnIndex(pRightTab, zName);
if( iRightCol<0
|| !tableAndColumnIndex(pSrc, i+1, zName, &iLeft, &iLeftCol)
){
sqlite3ErrorMsg(pParse, "cannot join using column %s - column "
"not present in both tables", zName);
return 1;
}
addWhereTerm(pParse, pSrc, iLeft, iLeftCol, i+1, iRightCol,
isOuter, &p->pWhere);
}
}
}
return 0;
}
/*
** An instance of this object holds information (beyond pParse and pSelect)
** needed to load the next result row that is to be added to the sorter.
*/
typedef struct RowLoadInfo RowLoadInfo;
struct RowLoadInfo {
int regResult; /* Store results in array of registers here */
u8 ecelFlags; /* Flag argument to ExprCodeExprList() */
#ifdef SQLITE_ENABLE_SORTER_REFERENCES
ExprList *pExtra; /* Extra columns needed by sorter refs */
int regExtraResult; /* Where to load the extra columns */
#endif
};
/*
** This routine does the work of loading query data into an array of
** registers so that it can be added to the sorter.
*/
static void innerLoopLoadRow(
Parse *pParse, /* Statement under construction */
Select *pSelect, /* The query being coded */
RowLoadInfo *pInfo /* Info needed to complete the row load */
){
sqlite3ExprCodeExprList(pParse, pSelect->pEList, pInfo->regResult,
0, pInfo->ecelFlags);
#ifdef SQLITE_ENABLE_SORTER_REFERENCES
if( pInfo->pExtra ){
sqlite3ExprCodeExprList(pParse, pInfo->pExtra, pInfo->regExtraResult, 0, 0);
sqlite3ExprListDelete(pParse->db, pInfo->pExtra);
}
#endif
}
/*
** Code the OP_MakeRecord instruction that generates the entry to be
** added into the sorter.
**
** Return the register in which the result is stored.
*/
static int makeSorterRecord(
Parse *pParse,
SortCtx *pSort,
Select *pSelect,
int regBase,
int nBase
){
int nOBSat = pSort->nOBSat;
Vdbe *v = pParse->pVdbe;
int regOut = ++pParse->nMem;
if( pSort->pDeferredRowLoad ){
innerLoopLoadRow(pParse, pSelect, pSort->pDeferredRowLoad);
}
sqlite3VdbeAddOp3(v, OP_MakeRecord, regBase+nOBSat, nBase-nOBSat, regOut);
return regOut;
}
/*
** Generate code that will push the record in registers regData
** through regData+nData-1 onto the sorter.
*/
static void pushOntoSorter(
Parse *pParse, /* Parser context */
SortCtx *pSort, /* Information about the ORDER BY clause */
Select *pSelect, /* The whole SELECT statement */
int regData, /* First register holding data to be sorted */
int regOrigData, /* First register holding data before packing */
int nData, /* Number of elements in the regData data array */
int nPrefixReg /* No. of reg prior to regData available for use */
){
Vdbe *v = pParse->pVdbe; /* Stmt under construction */
int bSeq = ((pSort->sortFlags & SORTFLAG_UseSorter)==0);
int nExpr = pSort->pOrderBy->nExpr; /* No. of ORDER BY terms */
int nBase = nExpr + bSeq + nData; /* Fields in sorter record */
int regBase; /* Regs for sorter record */
int regRecord = 0; /* Assembled sorter record */
int nOBSat = pSort->nOBSat; /* ORDER BY terms to skip */
int op; /* Opcode to add sorter record to sorter */
int iLimit; /* LIMIT counter */
int iSkip = 0; /* End of the sorter insert loop */
assert( bSeq==0 || bSeq==1 );
/* Three cases:
** (1) The data to be sorted has already been packed into a Record
** by a prior OP_MakeRecord. In this case nData==1 and regData
** will be completely unrelated to regOrigData.
** (2) All output columns are included in the sort record. In that
** case regData==regOrigData.
** (3) Some output columns are omitted from the sort record due to
** the SQLITE_ENABLE_SORTER_REFERENCE optimization, or due to the
** SQLITE_ECEL_OMITREF optimization, or due to the
** SortCtx.pDeferredRowLoad optimiation. In any of these cases
** regOrigData is 0 to prevent this routine from trying to copy
** values that might not yet exist.
*/
assert( nData==1 || regData==regOrigData || regOrigData==0 );
if( nPrefixReg ){
assert( nPrefixReg==nExpr+bSeq );
regBase = regData - nPrefixReg;
}else{
regBase = pParse->nMem + 1;
pParse->nMem += nBase;
}
assert( pSelect->iOffset==0 || pSelect->iLimit!=0 );
iLimit = pSelect->iOffset ? pSelect->iOffset+1 : pSelect->iLimit;
pSort->labelDone = sqlite3VdbeMakeLabel(pParse);
sqlite3ExprCodeExprList(pParse, pSort->pOrderBy, regBase, regOrigData,
SQLITE_ECEL_DUP | (regOrigData? SQLITE_ECEL_REF : 0));
if( bSeq ){
sqlite3VdbeAddOp2(v, OP_Sequence, pSort->iECursor, regBase+nExpr);
}
if( nPrefixReg==0 && nData>0 ){
sqlite3ExprCodeMove(pParse, regData, regBase+nExpr+bSeq, nData);
}
if( nOBSat>0 ){
int regPrevKey; /* The first nOBSat columns of the previous row */
int addrFirst; /* Address of the OP_IfNot opcode */
int addrJmp; /* Address of the OP_Jump opcode */
VdbeOp *pOp; /* Opcode that opens the sorter */
int nKey; /* Number of sorting key columns, including OP_Sequence */
KeyInfo *pKI; /* Original KeyInfo on the sorter table */
regRecord = makeSorterRecord(pParse, pSort, pSelect, regBase, nBase);
regPrevKey = pParse->nMem+1;
pParse->nMem += pSort->nOBSat;
nKey = nExpr - pSort->nOBSat + bSeq;
if( bSeq ){
addrFirst = sqlite3VdbeAddOp1(v, OP_IfNot, regBase+nExpr);
}else{
addrFirst = sqlite3VdbeAddOp1(v, OP_SequenceTest, pSort->iECursor);
}
VdbeCoverage(v);
sqlite3VdbeAddOp3(v, OP_Compare, regPrevKey, regBase, pSort->nOBSat);
pOp = sqlite3VdbeGetOp(v, pSort->addrSortIndex);
if( pParse->db->mallocFailed ) return;
pOp->p2 = nKey + nData;
pKI = pOp->p4.pKeyInfo;
memset(pKI->aSortFlags, 0, pKI->nKeyField); /* Makes OP_Jump testable */
sqlite3VdbeChangeP4(v, -1, (char*)pKI, P4_KEYINFO);
testcase( pKI->nAllField > pKI->nKeyField+2 );
pOp->p4.pKeyInfo = sqlite3KeyInfoFromExprList(pParse,pSort->pOrderBy,nOBSat,
pKI->nAllField-pKI->nKeyField-1);
pOp = 0; /* Ensure pOp not used after sqltie3VdbeAddOp3() */
addrJmp = sqlite3VdbeCurrentAddr(v);
sqlite3VdbeAddOp3(v, OP_Jump, addrJmp+1, 0, addrJmp+1); VdbeCoverage(v);
pSort->labelBkOut = sqlite3VdbeMakeLabel(pParse);
pSort->regReturn = ++pParse->nMem;
sqlite3VdbeAddOp2(v, OP_Gosub, pSort->regReturn, pSort->labelBkOut);
sqlite3VdbeAddOp1(v, OP_ResetSorter, pSort->iECursor);
if( iLimit ){
sqlite3VdbeAddOp2(v, OP_IfNot, iLimit, pSort->labelDone);
VdbeCoverage(v);
}
sqlite3VdbeJumpHere(v, addrFirst);
sqlite3ExprCodeMove(pParse, regBase, regPrevKey, pSort->nOBSat);
sqlite3VdbeJumpHere(v, addrJmp);
}
if( iLimit ){
/* At this point the values for the new sorter entry are stored
** in an array of registers. They need to be composed into a record
** and inserted into the sorter if either (a) there are currently
** less than LIMIT+OFFSET items or (b) the new record is smaller than
** the largest record currently in the sorter. If (b) is true and there
** are already LIMIT+OFFSET items in the sorter, delete the largest
** entry before inserting the new one. This way there are never more
** than LIMIT+OFFSET items in the sorter.
**
** If the new record does not need to be inserted into the sorter,
** jump to the next iteration of the loop. If the pSort->labelOBLopt
** value is not zero, then it is a label of where to jump. Otherwise,
** just bypass the row insert logic. See the header comment on the
** sqlite3WhereOrderByLimitOptLabel() function for additional info.
*/
int iCsr = pSort->iECursor;
sqlite3VdbeAddOp2(v, OP_IfNotZero, iLimit, sqlite3VdbeCurrentAddr(v)+4);
VdbeCoverage(v);
sqlite3VdbeAddOp2(v, OP_Last, iCsr, 0);
iSkip = sqlite3VdbeAddOp4Int(v, OP_IdxLE,
iCsr, 0, regBase+nOBSat, nExpr-nOBSat);
VdbeCoverage(v);
sqlite3VdbeAddOp1(v, OP_Delete, iCsr);
}
if( regRecord==0 ){
regRecord = makeSorterRecord(pParse, pSort, pSelect, regBase, nBase);
}
if( pSort->sortFlags & SORTFLAG_UseSorter ){
op = OP_SorterInsert;
}else{
op = OP_IdxInsert;
}
sqlite3VdbeAddOp4Int(v, op, pSort->iECursor, regRecord,
regBase+nOBSat, nBase-nOBSat);
if( iSkip ){
sqlite3VdbeChangeP2(v, iSkip,
pSort->labelOBLopt ? pSort->labelOBLopt : sqlite3VdbeCurrentAddr(v));
}
}
/*
** Add code to implement the OFFSET
*/
static void codeOffset(
Vdbe *v, /* Generate code into this VM */
int iOffset, /* Register holding the offset counter */
int iContinue /* Jump here to skip the current record */
){
if( iOffset>0 ){
sqlite3VdbeAddOp3(v, OP_IfPos, iOffset, iContinue, 1); VdbeCoverage(v);
VdbeComment((v, "OFFSET"));
}
}
/*
** Add code that will check to make sure the N registers starting at iMem
** form a distinct entry. iTab is a sorting index that holds previously
** seen combinations of the N values. A new entry is made in iTab
** if the current N values are new.
**
** A jump to addrRepeat is made and the N+1 values are popped from the
** stack if the top N elements are not distinct.
*/
static void codeDistinct(
Parse *pParse, /* Parsing and code generating context */
int iTab, /* A sorting index used to test for distinctness */
int addrRepeat, /* Jump to here if not distinct */
int N, /* Number of elements */
int iMem /* First element */
){
Vdbe *v;
int r1;
v = pParse->pVdbe;
r1 = sqlite3GetTempReg(pParse);
sqlite3VdbeAddOp4Int(v, OP_Found, iTab, addrRepeat, iMem, N); VdbeCoverage(v);
sqlite3VdbeAddOp3(v, OP_MakeRecord, iMem, N, r1);
sqlite3VdbeAddOp4Int(v, OP_IdxInsert, iTab, r1, iMem, N);
sqlite3VdbeChangeP5(v, OPFLAG_USESEEKRESULT);
sqlite3ReleaseTempReg(pParse, r1);
}
#ifdef SQLITE_ENABLE_SORTER_REFERENCES
/*
** This function is called as part of inner-loop generation for a SELECT
** statement with an ORDER BY that is not optimized by an index. It
** determines the expressions, if any, that the sorter-reference
** optimization should be used for. The sorter-reference optimization
** is used for SELECT queries like:
**
** SELECT a, bigblob FROM t1 ORDER BY a LIMIT 10
**
** If the optimization is used for expression "bigblob", then instead of
** storing values read from that column in the sorter records, the PK of
** the row from table t1 is stored instead. Then, as records are extracted from
** the sorter to return to the user, the required value of bigblob is
** retrieved directly from table t1. If the values are very large, this
** can be more efficient than storing them directly in the sorter records.
**
** The ExprList_item.bSorterRef flag is set for each expression in pEList
** for which the sorter-reference optimization should be enabled.
** Additionally, the pSort->aDefer[] array is populated with entries
** for all cursors required to evaluate all selected expressions. Finally.
** output variable (*ppExtra) is set to an expression list containing
** expressions for all extra PK values that should be stored in the
** sorter records.
*/
static void selectExprDefer(
Parse *pParse, /* Leave any error here */
SortCtx *pSort, /* Sorter context */
ExprList *pEList, /* Expressions destined for sorter */
ExprList **ppExtra /* Expressions to append to sorter record */
){
int i;
int nDefer = 0;
ExprList *pExtra = 0;
for(i=0; i<pEList->nExpr; i++){
struct ExprList_item *pItem = &pEList->a[i];
if( pItem->u.x.iOrderByCol==0 ){
Expr *pExpr = pItem->pExpr;
Table *pTab = pExpr->y.pTab;
if( pExpr->op==TK_COLUMN && pExpr->iColumn>=0 && pTab && !IsVirtual(pTab)
&& (pTab->aCol[pExpr->iColumn].colFlags & COLFLAG_SORTERREF)
){
int j;
for(j=0; j<nDefer; j++){
if( pSort->aDefer[j].iCsr==pExpr->iTable ) break;
}
if( j==nDefer ){
if( nDefer==ArraySize(pSort->aDefer) ){
continue;
}else{
int nKey = 1;
int k;
Index *pPk = 0;
if( !HasRowid(pTab) ){
pPk = sqlite3PrimaryKeyIndex(pTab);
nKey = pPk->nKeyCol;
}
for(k=0; k<nKey; k++){
Expr *pNew = sqlite3PExpr(pParse, TK_COLUMN, 0, 0);
if( pNew ){
pNew->iTable = pExpr->iTable;
pNew->y.pTab = pExpr->y.pTab;
pNew->iColumn = pPk ? pPk->aiColumn[k] : -1;
pExtra = sqlite3ExprListAppend(pParse, pExtra, pNew);
}
}
pSort->aDefer[nDefer].pTab = pExpr->y.pTab;
pSort->aDefer[nDefer].iCsr = pExpr->iTable;
pSort->aDefer[nDefer].nKey = nKey;
nDefer++;
}
}
pItem->bSorterRef = 1;
}
}
}
pSort->nDefer = (u8)nDefer;
*ppExtra = pExtra;
}
#endif
/*
** This routine generates the code for the inside of the inner loop
** of a SELECT.
**
** If srcTab is negative, then the p->pEList expressions
** are evaluated in order to get the data for this row. If srcTab is
** zero or more, then data is pulled from srcTab and p->pEList is used only
** to get the number of columns and the collation sequence for each column.
*/
static void selectInnerLoop(
Parse *pParse, /* The parser context */
Select *p, /* The complete select statement being coded */
int srcTab, /* Pull data from this table if non-negative */
SortCtx *pSort, /* If not NULL, info on how to process ORDER BY */
DistinctCtx *pDistinct, /* If not NULL, info on how to process DISTINCT */
SelectDest *pDest, /* How to dispose of the results */
int iContinue, /* Jump here to continue with next row */
int iBreak /* Jump here to break out of the inner loop */
){
Vdbe *v = pParse->pVdbe;
int i;
int hasDistinct; /* True if the DISTINCT keyword is present */
int eDest = pDest->eDest; /* How to dispose of results */
int iParm = pDest->iSDParm; /* First argument to disposal method */
int nResultCol; /* Number of result columns */
int nPrefixReg = 0; /* Number of extra registers before regResult */
RowLoadInfo sRowLoadInfo; /* Info for deferred row loading */
/* Usually, regResult is the first cell in an array of memory cells
** containing the current result row. In this case regOrig is set to the
** same value. However, if the results are being sent to the sorter, the
** values for any expressions that are also part of the sort-key are omitted
** from this array. In this case regOrig is set to zero. */
int regResult; /* Start of memory holding current results */
int regOrig; /* Start of memory holding full result (or 0) */
assert( v );
assert( p->pEList!=0 );
hasDistinct = pDistinct ? pDistinct->eTnctType : WHERE_DISTINCT_NOOP;
if( pSort && pSort->pOrderBy==0 ) pSort = 0;
if( pSort==0 && !hasDistinct ){
assert( iContinue!=0 );
codeOffset(v, p->iOffset, iContinue);
}
/* Pull the requested columns.
*/
nResultCol = p->pEList->nExpr;
if( pDest->iSdst==0 ){
if( pSort ){
nPrefixReg = pSort->pOrderBy->nExpr;
if( !(pSort->sortFlags & SORTFLAG_UseSorter) ) nPrefixReg++;
pParse->nMem += nPrefixReg;
}
pDest->iSdst = pParse->nMem+1;
pParse->nMem += nResultCol;
}else if( pDest->iSdst+nResultCol > pParse->nMem ){
/* This is an error condition that can result, for example, when a SELECT
** on the right-hand side of an INSERT contains more result columns than
** there are columns in the table on the left. The error will be caught
** and reported later. But we need to make sure enough memory is allocated
** to avoid other spurious errors in the meantime. */
pParse->nMem += nResultCol;
}
pDest->nSdst = nResultCol;
regOrig = regResult = pDest->iSdst;
if( srcTab>=0 ){
for(i=0; i<nResultCol; i++){
sqlite3VdbeAddOp3(v, OP_Column, srcTab, i, regResult+i);
VdbeComment((v, "%s", p->pEList->a[i].zName));
}
}else if( eDest!=SRT_Exists ){
#ifdef SQLITE_ENABLE_SORTER_REFERENCES
ExprList *pExtra = 0;
#endif
/* If the destination is an EXISTS(...) expression, the actual
** values returned by the SELECT are not required.
*/
u8 ecelFlags; /* "ecel" is an abbreviation of "ExprCodeExprList" */
ExprList *pEList;
if( eDest==SRT_Mem || eDest==SRT_Output || eDest==SRT_Coroutine ){
ecelFlags = SQLITE_ECEL_DUP;
}else{
ecelFlags = 0;
}
if( pSort && hasDistinct==0 && eDest!=SRT_EphemTab && eDest!=SRT_Table ){
/* For each expression in p->pEList that is a copy of an expression in
** the ORDER BY clause (pSort->pOrderBy), set the associated
** iOrderByCol value to one more than the index of the ORDER BY
** expression within the sort-key that pushOntoSorter() will generate.
** This allows the p->pEList field to be omitted from the sorted record,
** saving space and CPU cycles. */
ecelFlags |= (SQLITE_ECEL_OMITREF|SQLITE_ECEL_REF);
for(i=pSort->nOBSat; i<pSort->pOrderBy->nExpr; i++){
int j;
if( (j = pSort->pOrderBy->a[i].u.x.iOrderByCol)>0 ){
p->pEList->a[j-1].u.x.iOrderByCol = i+1-pSort->nOBSat;
}
}
#ifdef SQLITE_ENABLE_SORTER_REFERENCES
selectExprDefer(pParse, pSort, p->pEList, &pExtra);
if( pExtra && pParse->db->mallocFailed==0 ){
/* If there are any extra PK columns to add to the sorter records,
** allocate extra memory cells and adjust the OpenEphemeral
** instruction to account for the larger records. This is only
** required if there are one or more WITHOUT ROWID tables with
** composite primary keys in the SortCtx.aDefer[] array. */
VdbeOp *pOp = sqlite3VdbeGetOp(v, pSort->addrSortIndex);
pOp->p2 += (pExtra->nExpr - pSort->nDefer);
pOp->p4.pKeyInfo->nAllField += (pExtra->nExpr - pSort->nDefer);
pParse->nMem += pExtra->nExpr;
}
#endif
/* Adjust nResultCol to account for columns that are omitted
** from the sorter by the optimizations in this branch */
pEList = p->pEList;
for(i=0; i<pEList->nExpr; i++){
if( pEList->a[i].u.x.iOrderByCol>0
#ifdef SQLITE_ENABLE_SORTER_REFERENCES
|| pEList->a[i].bSorterRef
#endif
){
nResultCol--;
regOrig = 0;
}
}
testcase( regOrig );
testcase( eDest==SRT_Set );
testcase( eDest==SRT_Mem );
testcase( eDest==SRT_Coroutine );
testcase( eDest==SRT_Output );
assert( eDest==SRT_Set || eDest==SRT_Mem
|| eDest==SRT_Coroutine || eDest==SRT_Output );
}
sRowLoadInfo.regResult = regResult;
sRowLoadInfo.ecelFlags = ecelFlags;
#ifdef SQLITE_ENABLE_SORTER_REFERENCES
sRowLoadInfo.pExtra = pExtra;
sRowLoadInfo.regExtraResult = regResult + nResultCol;
if( pExtra ) nResultCol += pExtra->nExpr;
#endif
if( p->iLimit
&& (ecelFlags & SQLITE_ECEL_OMITREF)!=0
&& nPrefixReg>0
){
assert( pSort!=0 );
assert( hasDistinct==0 );
pSort->pDeferredRowLoad = &sRowLoadInfo;
regOrig = 0;
}else{
innerLoopLoadRow(pParse, p, &sRowLoadInfo);
}
}
/* If the DISTINCT keyword was present on the SELECT statement
** and this row has been seen before, then do not make this row
** part of the result.
*/
if( hasDistinct ){
switch( pDistinct->eTnctType ){
case WHERE_DISTINCT_ORDERED: {
VdbeOp *pOp; /* No longer required OpenEphemeral instr. */
int iJump; /* Jump destination */
int regPrev; /* Previous row content */
/* Allocate space for the previous row */
regPrev = pParse->nMem+1;
pParse->nMem += nResultCol;
/* Change the OP_OpenEphemeral coded earlier to an OP_Null
** sets the MEM_Cleared bit on the first register of the
** previous value. This will cause the OP_Ne below to always
** fail on the first iteration of the loop even if the first
** row is all NULLs.
*/
sqlite3VdbeChangeToNoop(v, pDistinct->addrTnct);
pOp = sqlite3VdbeGetOp(v, pDistinct->addrTnct);
pOp->opcode = OP_Null;
pOp->p1 = 1;
pOp->p2 = regPrev;
pOp = 0; /* Ensure pOp is not used after sqlite3VdbeAddOp() */
iJump = sqlite3VdbeCurrentAddr(v) + nResultCol;
for(i=0; i<nResultCol; i++){
CollSeq *pColl = sqlite3ExprCollSeq(pParse, p->pEList->a[i].pExpr);
if( i<nResultCol-1 ){
sqlite3VdbeAddOp3(v, OP_Ne, regResult+i, iJump, regPrev+i);
VdbeCoverage(v);
}else{
sqlite3VdbeAddOp3(v, OP_Eq, regResult+i, iContinue, regPrev+i);
VdbeCoverage(v);
}
sqlite3VdbeChangeP4(v, -1, (const char *)pColl, P4_COLLSEQ);
sqlite3VdbeChangeP5(v, SQLITE_NULLEQ);
}
assert( sqlite3VdbeCurrentAddr(v)==iJump || pParse->db->mallocFailed );
sqlite3VdbeAddOp3(v, OP_Copy, regResult, regPrev, nResultCol-1);
break;
}
case WHERE_DISTINCT_UNIQUE: {
sqlite3VdbeChangeToNoop(v, pDistinct->addrTnct);
break;
}
default: {
assert( pDistinct->eTnctType==WHERE_DISTINCT_UNORDERED );
codeDistinct(pParse, pDistinct->tabTnct, iContinue, nResultCol,
regResult);
break;
}
}
if( pSort==0 ){
codeOffset(v, p->iOffset, iContinue);
}
}
switch( eDest ){
/* In this mode, write each query result to the key of the temporary
** table iParm.
*/
#ifndef SQLITE_OMIT_COMPOUND_SELECT
case SRT_Union: {
int r1;
r1 = sqlite3GetTempReg(pParse);
sqlite3VdbeAddOp3(v, OP_MakeRecord, regResult, nResultCol, r1);
sqlite3VdbeAddOp4Int(v, OP_IdxInsert, iParm, r1, regResult, nResultCol);
sqlite3ReleaseTempReg(pParse, r1);
break;
}
/* Construct a record from the query result, but instead of
** saving that record, use it as a key to delete elements from
** the temporary table iParm.
*/
case SRT_Except: {
sqlite3VdbeAddOp3(v, OP_IdxDelete, iParm, regResult, nResultCol);
break;
}
#endif /* SQLITE_OMIT_COMPOUND_SELECT */
/* Store the result as data using a unique key.
*/
case SRT_Fifo:
case SRT_DistFifo:
case SRT_Table:
case SRT_EphemTab: {
int r1 = sqlite3GetTempRange(pParse, nPrefixReg+1);
testcase( eDest==SRT_Table );
testcase( eDest==SRT_EphemTab );
testcase( eDest==SRT_Fifo );
testcase( eDest==SRT_DistFifo );
sqlite3VdbeAddOp3(v, OP_MakeRecord, regResult, nResultCol, r1+nPrefixReg);
#ifndef SQLITE_OMIT_CTE
if( eDest==SRT_DistFifo ){
/* If the destination is DistFifo, then cursor (iParm+1) is open
** on an ephemeral index. If the current row is already present
** in the index, do not write it to the output. If not, add the
** current row to the index and proceed with writing it to the
** output table as well. */
int addr = sqlite3VdbeCurrentAddr(v) + 4;
sqlite3VdbeAddOp4Int(v, OP_Found, iParm+1, addr, r1, 0);
VdbeCoverage(v);
sqlite3VdbeAddOp4Int(v, OP_IdxInsert, iParm+1, r1,regResult,nResultCol);
assert( pSort==0 );
}
#endif
if( pSort ){
assert( regResult==regOrig );
pushOntoSorter(pParse, pSort, p, r1+nPrefixReg, regOrig, 1, nPrefixReg);
}else{
int r2 = sqlite3GetTempReg(pParse);
sqlite3VdbeAddOp2(v, OP_NewRowid, iParm, r2);
sqlite3VdbeAddOp3(v, OP_Insert, iParm, r1, r2);
sqlite3VdbeChangeP5(v, OPFLAG_APPEND);
sqlite3ReleaseTempReg(pParse, r2);
}
sqlite3ReleaseTempRange(pParse, r1, nPrefixReg+1);
break;
}
#ifndef SQLITE_OMIT_SUBQUERY
/* If we are creating a set for an "expr IN (SELECT ...)" construct,
** then there should be a single item on the stack. Write this
** item into the set table with bogus data.
*/
case SRT_Set: {
if( pSort ){
/* At first glance you would think we could optimize out the
** ORDER BY in this case since the order of entries in the set
** does not matter. But there might be a LIMIT clause, in which
** case the order does matter */
pushOntoSorter(
pParse, pSort, p, regResult, regOrig, nResultCol, nPrefixReg);
}else{
int r1 = sqlite3GetTempReg(pParse);
assert( sqlite3Strlen30(pDest->zAffSdst)==nResultCol );
sqlite3VdbeAddOp4(v, OP_MakeRecord, regResult, nResultCol,
r1, pDest->zAffSdst, nResultCol);
sqlite3VdbeAddOp4Int(v, OP_IdxInsert, iParm, r1, regResult, nResultCol);
sqlite3ReleaseTempReg(pParse, r1);
}
break;
}
/* If any row exist in the result set, record that fact and abort.
*/
case SRT_Exists: {
sqlite3VdbeAddOp2(v, OP_Integer, 1, iParm);
/* The LIMIT clause will terminate the loop for us */
break;
}
/* If this is a scalar select that is part of an expression, then
** store the results in the appropriate memory cell or array of
** memory cells and break out of the scan loop.
*/
case SRT_Mem: {
if( pSort ){
assert( nResultCol<=pDest->nSdst );
pushOntoSorter(
pParse, pSort, p, regResult, regOrig, nResultCol, nPrefixReg);
}else{
assert( nResultCol==pDest->nSdst );
assert( regResult==iParm );
/* The LIMIT clause will jump out of the loop for us */
}
break;
}
#endif /* #ifndef SQLITE_OMIT_SUBQUERY */
case SRT_Coroutine: /* Send data to a co-routine */
case SRT_Output: { /* Return the results */
testcase( eDest==SRT_Coroutine );
testcase( eDest==SRT_Output );
if( pSort ){
pushOntoSorter(pParse, pSort, p, regResult, regOrig, nResultCol,
nPrefixReg);
}else if( eDest==SRT_Coroutine ){
sqlite3VdbeAddOp1(v, OP_Yield, pDest->iSDParm);
}else{
sqlite3VdbeAddOp2(v, OP_ResultRow, regResult, nResultCol);
}
break;
}
#ifndef SQLITE_OMIT_CTE
/* Write the results into a priority queue that is order according to
** pDest->pOrderBy (in pSO). pDest->iSDParm (in iParm) is the cursor for an
** index with pSO->nExpr+2 columns. Build a key using pSO for the first
** pSO->nExpr columns, then make sure all keys are unique by adding a
** final OP_Sequence column. The last column is the record as a blob.
*/
case SRT_DistQueue:
case SRT_Queue: {
int nKey;
int r1, r2, r3;
int addrTest = 0;
ExprList *pSO;
pSO = pDest->pOrderBy;
assert( pSO );
nKey = pSO->nExpr;
r1 = sqlite3GetTempReg(pParse);
r2 = sqlite3GetTempRange(pParse, nKey+2);
r3 = r2+nKey+1;
if( eDest==SRT_DistQueue ){
/* If the destination is DistQueue, then cursor (iParm+1) is open
** on a second ephemeral index that holds all values every previously
** added to the queue. */
addrTest = sqlite3VdbeAddOp4Int(v, OP_Found, iParm+1, 0,
regResult, nResultCol);
VdbeCoverage(v);
}
sqlite3VdbeAddOp3(v, OP_MakeRecord, regResult, nResultCol, r3);
if( eDest==SRT_DistQueue ){
sqlite3VdbeAddOp2(v, OP_IdxInsert, iParm+1, r3);
sqlite3VdbeChangeP5(v, OPFLAG_USESEEKRESULT);
}
for(i=0; i<nKey; i++){
sqlite3VdbeAddOp2(v, OP_SCopy,
regResult + pSO->a[i].u.x.iOrderByCol - 1,
r2+i);
}
sqlite3VdbeAddOp2(v, OP_Sequence, iParm, r2+nKey);
sqlite3VdbeAddOp3(v, OP_MakeRecord, r2, nKey+2, r1);
sqlite3VdbeAddOp4Int(v, OP_IdxInsert, iParm, r1, r2, nKey+2);
if( addrTest ) sqlite3VdbeJumpHere(v, addrTest);
sqlite3ReleaseTempReg(pParse, r1);
sqlite3ReleaseTempRange(pParse, r2, nKey+2);
break;
}
#endif /* SQLITE_OMIT_CTE */
#if !defined(SQLITE_OMIT_TRIGGER)
/* Discard the results. This is used for SELECT statements inside
** the body of a TRIGGER. The purpose of such selects is to call
** user-defined functions that have side effects. We do not care
** about the actual results of the select.
*/
default: {
assert( eDest==SRT_Discard );
break;
}
#endif
}
/* Jump to the end of the loop if the LIMIT is reached. Except, if
** there is a sorter, in which case the sorter has already limited
** the output for us.
*/
if( pSort==0 && p->iLimit ){
sqlite3VdbeAddOp2(v, OP_DecrJumpZero, p->iLimit, iBreak); VdbeCoverage(v);
}
}
/*
** Allocate a KeyInfo object sufficient for an index of N key columns and
** X extra columns.
*/
KeyInfo *sqlite3KeyInfoAlloc(sqlite3 *db, int N, int X){
int nExtra = (N+X)*(sizeof(CollSeq*)+1) - sizeof(CollSeq*);
KeyInfo *p = sqlite3DbMallocRawNN(db, sizeof(KeyInfo) + nExtra);
if( p ){
p->aSortFlags = (u8*)&p->aColl[N+X];
p->nKeyField = (u16)N;
p->nAllField = (u16)(N+X);
p->enc = ENC(db);
p->db = db;
p->nRef = 1;
memset(&p[1], 0, nExtra);
}else{
sqlite3OomFault(db);
}
return p;
}
/*
** Deallocate a KeyInfo object
*/
void sqlite3KeyInfoUnref(KeyInfo *p){
if( p ){
assert( p->nRef>0 );
p->nRef--;
if( p->nRef==0 ) sqlite3DbFreeNN(p->db, p);
}
}
/*
** Make a new pointer to a KeyInfo object
*/
KeyInfo *sqlite3KeyInfoRef(KeyInfo *p){
if( p ){
assert( p->nRef>0 );
p->nRef++;
}
return p;
}
#ifdef SQLITE_DEBUG
/*
** Return TRUE if a KeyInfo object can be change. The KeyInfo object
** can only be changed if this is just a single reference to the object.
**
** This routine is used only inside of assert() statements.
*/
int sqlite3KeyInfoIsWriteable(KeyInfo *p){ return p->nRef==1; }
#endif /* SQLITE_DEBUG */
/*
** Given an expression list, generate a KeyInfo structure that records
** the collating sequence for each expression in that expression list.
**
** If the ExprList is an ORDER BY or GROUP BY clause then the resulting
** KeyInfo structure is appropriate for initializing a virtual index to
** implement that clause. If the ExprList is the result set of a SELECT
** then the KeyInfo structure is appropriate for initializing a virtual
** index to implement a DISTINCT test.
**
** Space to hold the KeyInfo structure is obtained from malloc. The calling
** function is responsible for seeing that this structure is eventually
** freed.
*/
KeyInfo *sqlite3KeyInfoFromExprList(
Parse *pParse, /* Parsing context */
ExprList *pList, /* Form the KeyInfo object from this ExprList */
int iStart, /* Begin with this column of pList */
int nExtra /* Add this many extra columns to the end */
){
int nExpr;
KeyInfo *pInfo;
struct ExprList_item *pItem;
sqlite3 *db = pParse->db;
int i;
nExpr = pList->nExpr;
pInfo = sqlite3KeyInfoAlloc(db, nExpr-iStart, nExtra+1);
if( pInfo ){
assert( sqlite3KeyInfoIsWriteable(pInfo) );
for(i=iStart, pItem=pList->a+iStart; i<nExpr; i++, pItem++){
pInfo->aColl[i-iStart] = sqlite3ExprNNCollSeq(pParse, pItem->pExpr);
pInfo->aSortFlags[i-iStart] = pItem->sortFlags;
}
}
return pInfo;
}
/*
** Name of the connection operator, used for error messages.
*/
static const char *selectOpName(int id){
char *z;
switch( id ){
case TK_ALL: z = "UNION ALL"; break;
case TK_INTERSECT: z = "INTERSECT"; break;
case TK_EXCEPT: z = "EXCEPT"; break;
default: z = "UNION"; break;
}
return z;
}
#ifndef SQLITE_OMIT_EXPLAIN
/*
** Unless an "EXPLAIN QUERY PLAN" command is being processed, this function
** is a no-op. Otherwise, it adds a single row of output to the EQP result,
** where the caption is of the form:
**
** "USE TEMP B-TREE FOR xxx"
**
** where xxx is one of "DISTINCT", "ORDER BY" or "GROUP BY". Exactly which
** is determined by the zUsage argument.
*/
static void explainTempTable(Parse *pParse, const char *zUsage){
ExplainQueryPlan((pParse, 0, "USE TEMP B-TREE FOR %s", zUsage));
}
/*
** Assign expression b to lvalue a. A second, no-op, version of this macro
** is provided when SQLITE_OMIT_EXPLAIN is defined. This allows the code
** in sqlite3Select() to assign values to structure member variables that
** only exist if SQLITE_OMIT_EXPLAIN is not defined without polluting the
** code with #ifndef directives.
*/
# define explainSetInteger(a, b) a = b
#else
/* No-op versions of the explainXXX() functions and macros. */
# define explainTempTable(y,z)
# define explainSetInteger(y,z)
#endif
/*
** If the inner loop was generated using a non-null pOrderBy argument,
** then the results were placed in a sorter. After the loop is terminated
** we need to run the sorter and output the results. The following
** routine generates the code needed to do that.
*/
static void generateSortTail(
Parse *pParse, /* Parsing context */
Select *p, /* The SELECT statement */
SortCtx *pSort, /* Information on the ORDER BY clause */
int nColumn, /* Number of columns of data */
SelectDest *pDest /* Write the sorted results here */
){
Vdbe *v = pParse->pVdbe; /* The prepared statement */
int addrBreak = pSort->labelDone; /* Jump here to exit loop */
int addrContinue = sqlite3VdbeMakeLabel(pParse);/* Jump here for next cycle */
int addr; /* Top of output loop. Jump for Next. */
int addrOnce = 0;
int iTab;
ExprList *pOrderBy = pSort->pOrderBy;
int eDest = pDest->eDest;
int iParm = pDest->iSDParm;
int regRow;
int regRowid;
int iCol;
int nKey; /* Number of key columns in sorter record */
int iSortTab; /* Sorter cursor to read from */
int i;
int bSeq; /* True if sorter record includes seq. no. */
int nRefKey = 0;
struct ExprList_item *aOutEx = p->pEList->a;
assert( addrBreak<0 );
if( pSort->labelBkOut ){
sqlite3VdbeAddOp2(v, OP_Gosub, pSort->regReturn, pSort->labelBkOut);
sqlite3VdbeGoto(v, addrBreak);
sqlite3VdbeResolveLabel(v, pSort->labelBkOut);
}
#ifdef SQLITE_ENABLE_SORTER_REFERENCES
/* Open any cursors needed for sorter-reference expressions */
for(i=0; i<pSort->nDefer; i++){
Table *pTab = pSort->aDefer[i].pTab;
int iDb = sqlite3SchemaToIndex(pParse->db, pTab->pSchema);
sqlite3OpenTable(pParse, pSort->aDefer[i].iCsr, iDb, pTab, OP_OpenRead);
nRefKey = MAX(nRefKey, pSort->aDefer[i].nKey);
}
#endif
iTab = pSort->iECursor;
if( eDest==SRT_Output || eDest==SRT_Coroutine || eDest==SRT_Mem ){
regRowid = 0;
regRow = pDest->iSdst;
}else{
regRowid = sqlite3GetTempReg(pParse);
if( eDest==SRT_EphemTab || eDest==SRT_Table ){
regRow = sqlite3GetTempReg(pParse);
nColumn = 0;
}else{
regRow = sqlite3GetTempRange(pParse, nColumn);
}
}
nKey = pOrderBy->nExpr - pSort->nOBSat;
if( pSort->sortFlags & SORTFLAG_UseSorter ){
int regSortOut = ++pParse->nMem;
iSortTab = pParse->nTab++;
if( pSort->labelBkOut ){
addrOnce = sqlite3VdbeAddOp0(v, OP_Once); VdbeCoverage(v);
}
sqlite3VdbeAddOp3(v, OP_OpenPseudo, iSortTab, regSortOut,
nKey+1+nColumn+nRefKey);
if( addrOnce ) sqlite3VdbeJumpHere(v, addrOnce);
addr = 1 + sqlite3VdbeAddOp2(v, OP_SorterSort, iTab, addrBreak);
VdbeCoverage(v);
codeOffset(v, p->iOffset, addrContinue);
sqlite3VdbeAddOp3(v, OP_SorterData, iTab, regSortOut, iSortTab);
bSeq = 0;
}else{
addr = 1 + sqlite3VdbeAddOp2(v, OP_Sort, iTab, addrBreak); VdbeCoverage(v);
codeOffset(v, p->iOffset, addrContinue);
iSortTab = iTab;
bSeq = 1;
}
for(i=0, iCol=nKey+bSeq-1; i<nColumn; i++){
#ifdef SQLITE_ENABLE_SORTER_REFERENCES
if( aOutEx[i].bSorterRef ) continue;
#endif
if( aOutEx[i].u.x.iOrderByCol==0 ) iCol++;
}
#ifdef SQLITE_ENABLE_SORTER_REFERENCES
if( pSort->nDefer ){
int iKey = iCol+1;
int regKey = sqlite3GetTempRange(pParse, nRefKey);
for(i=0; i<pSort->nDefer; i++){
int iCsr = pSort->aDefer[i].iCsr;
Table *pTab = pSort->aDefer[i].pTab;
int nKey = pSort->aDefer[i].nKey;
sqlite3VdbeAddOp1(v, OP_NullRow, iCsr);
if( HasRowid(pTab) ){
sqlite3VdbeAddOp3(v, OP_Column, iSortTab, iKey++, regKey);
sqlite3VdbeAddOp3(v, OP_SeekRowid, iCsr,
sqlite3VdbeCurrentAddr(v)+1, regKey);
}else{
int k;
int iJmp;
assert( sqlite3PrimaryKeyIndex(pTab)->nKeyCol==nKey );
for(k=0; k<nKey; k++){
sqlite3VdbeAddOp3(v, OP_Column, iSortTab, iKey++, regKey+k);
}
iJmp = sqlite3VdbeCurrentAddr(v);
sqlite3VdbeAddOp4Int(v, OP_SeekGE, iCsr, iJmp+2, regKey, nKey);
sqlite3VdbeAddOp4Int(v, OP_IdxLE, iCsr, iJmp+3, regKey, nKey);
sqlite3VdbeAddOp1(v, OP_NullRow, iCsr);
}
}
sqlite3ReleaseTempRange(pParse, regKey, nRefKey);
}
#endif
for(i=nColumn-1; i>=0; i--){
#ifdef SQLITE_ENABLE_SORTER_REFERENCES
if( aOutEx[i].bSorterRef ){
sqlite3ExprCode(pParse, aOutEx[i].pExpr, regRow+i);
}else
#endif
{
int iRead;
if( aOutEx[i].u.x.iOrderByCol ){
iRead = aOutEx[i].u.x.iOrderByCol-1;
}else{
iRead = iCol--;
}
sqlite3VdbeAddOp3(v, OP_Column, iSortTab, iRead, regRow+i);
VdbeComment((v, "%s", aOutEx[i].zName?aOutEx[i].zName : aOutEx[i].zSpan));
}
}
switch( eDest ){
case SRT_Table:
case SRT_EphemTab: {
sqlite3VdbeAddOp3(v, OP_Column, iSortTab, nKey+bSeq, regRow);
sqlite3VdbeAddOp2(v, OP_NewRowid, iParm, regRowid);
sqlite3VdbeAddOp3(v, OP_Insert, iParm, regRow, regRowid);
sqlite3VdbeChangeP5(v, OPFLAG_APPEND);
break;
}
#ifndef SQLITE_OMIT_SUBQUERY
case SRT_Set: {
assert( nColumn==sqlite3Strlen30(pDest->zAffSdst) );
sqlite3VdbeAddOp4(v, OP_MakeRecord, regRow, nColumn, regRowid,
pDest->zAffSdst, nColumn);
sqlite3VdbeAddOp4Int(v, OP_IdxInsert, iParm, regRowid, regRow, nColumn);
break;
}
case SRT_Mem: {
/* The LIMIT clause will terminate the loop for us */
break;
}
#endif
default: {
assert( eDest==SRT_Output || eDest==SRT_Coroutine );
testcase( eDest==SRT_Output );
testcase( eDest==SRT_Coroutine );
if( eDest==SRT_Output ){
sqlite3VdbeAddOp2(v, OP_ResultRow, pDest->iSdst, nColumn);
}else{
sqlite3VdbeAddOp1(v, OP_Yield, pDest->iSDParm);
}
break;
}
}
if( regRowid ){
if( eDest==SRT_Set ){
sqlite3ReleaseTempRange(pParse, regRow, nColumn);
}else{
sqlite3ReleaseTempReg(pParse, regRow);
}
sqlite3ReleaseTempReg(pParse, regRowid);
}
/* The bottom of the loop
*/
sqlite3VdbeResolveLabel(v, addrContinue);
if( pSort->sortFlags & SORTFLAG_UseSorter ){
sqlite3VdbeAddOp2(v, OP_SorterNext, iTab, addr); VdbeCoverage(v);
}else{
sqlite3VdbeAddOp2(v, OP_Next, iTab, addr); VdbeCoverage(v);
}
if( pSort->regReturn ) sqlite3VdbeAddOp1(v, OP_Return, pSort->regReturn);
sqlite3VdbeResolveLabel(v, addrBreak);
}
/*
** Return a pointer to a string containing the 'declaration type' of the
** expression pExpr. The string may be treated as static by the caller.
**
** Also try to estimate the size of the returned value and return that
** result in *pEstWidth.
**
** The declaration type is the exact datatype definition extracted from the
** original CREATE TABLE statement if the expression is a column. The
** declaration type for a ROWID field is INTEGER. Exactly when an expression
** is considered a column can be complex in the presence of subqueries. The
** result-set expression in all of the following SELECT statements is
** considered a column by this function.
**
** SELECT col FROM tbl;
** SELECT (SELECT col FROM tbl;
** SELECT (SELECT col FROM tbl);
** SELECT abc FROM (SELECT col AS abc FROM tbl);
**
** The declaration type for any expression other than a column is NULL.
**
** This routine has either 3 or 6 parameters depending on whether or not
** the SQLITE_ENABLE_COLUMN_METADATA compile-time option is used.
*/
#ifdef SQLITE_ENABLE_COLUMN_METADATA
# define columnType(A,B,C,D,E) columnTypeImpl(A,B,C,D,E)
#else /* if !defined(SQLITE_ENABLE_COLUMN_METADATA) */
# define columnType(A,B,C,D,E) columnTypeImpl(A,B)
#endif
static const char *columnTypeImpl(
NameContext *pNC,
#ifndef SQLITE_ENABLE_COLUMN_METADATA
Expr *pExpr
#else
Expr *pExpr,
const char **pzOrigDb,
const char **pzOrigTab,
const char **pzOrigCol
#endif
){
char const *zType = 0;
int j;
#ifdef SQLITE_ENABLE_COLUMN_METADATA
char const *zOrigDb = 0;
char const *zOrigTab = 0;
char const *zOrigCol = 0;
#endif
assert( pExpr!=0 );
assert( pNC->pSrcList!=0 );
switch( pExpr->op ){
case TK_COLUMN: {
/* The expression is a column. Locate the table the column is being
** extracted from in NameContext.pSrcList. This table may be real
** database table or a subquery.
*/
Table *pTab = 0; /* Table structure column is extracted from */
Select *pS = 0; /* Select the column is extracted from */
int iCol = pExpr->iColumn; /* Index of column in pTab */
while( pNC && !pTab ){
SrcList *pTabList = pNC->pSrcList;
for(j=0;j<pTabList->nSrc && pTabList->a[j].iCursor!=pExpr->iTable;j++);
if( j<pTabList->nSrc ){
pTab = pTabList->a[j].pTab;
pS = pTabList->a[j].pSelect;
}else{
pNC = pNC->pNext;
}
}
if( pTab==0 ){
/* At one time, code such as "SELECT new.x" within a trigger would
** cause this condition to run. Since then, we have restructured how
** trigger code is generated and so this condition is no longer
** possible. However, it can still be true for statements like
** the following:
**
** CREATE TABLE t1(col INTEGER);
** SELECT (SELECT t1.col) FROM FROM t1;
**
** when columnType() is called on the expression "t1.col" in the
** sub-select. In this case, set the column type to NULL, even
** though it should really be "INTEGER".
**
** This is not a problem, as the column type of "t1.col" is never
** used. When columnType() is called on the expression
** "(SELECT t1.col)", the correct type is returned (see the TK_SELECT
** branch below. */
break;
}
assert( pTab && pExpr->y.pTab==pTab );
if( pS ){
/* The "table" is actually a sub-select or a view in the FROM clause
** of the SELECT statement. Return the declaration type and origin
** data for the result-set column of the sub-select.
*/
if( iCol>=0 && iCol<pS->pEList->nExpr ){
/* If iCol is less than zero, then the expression requests the
** rowid of the sub-select or view. This expression is legal (see
** test case misc2.2.2) - it always evaluates to NULL.
*/
NameContext sNC;
Expr *p = pS->pEList->a[iCol].pExpr;
sNC.pSrcList = pS->pSrc;
sNC.pNext = pNC;
sNC.pParse = pNC->pParse;
zType = columnType(&sNC, p,&zOrigDb,&zOrigTab,&zOrigCol);
}
}else{
/* A real table or a CTE table */
assert( !pS );
#ifdef SQLITE_ENABLE_COLUMN_METADATA
if( iCol<0 ) iCol = pTab->iPKey;
assert( iCol==XN_ROWID || (iCol>=0 && iCol<pTab->nCol) );
if( iCol<0 ){
zType = "INTEGER";
zOrigCol = "rowid";
}else{
zOrigCol = pTab->aCol[iCol].zName;
zType = sqlite3ColumnType(&pTab->aCol[iCol],0);
}
zOrigTab = pTab->zName;
if( pNC->pParse && pTab->pSchema ){
int iDb = sqlite3SchemaToIndex(pNC->pParse->db, pTab->pSchema);
zOrigDb = pNC->pParse->db->aDb[iDb].zDbSName;
}
#else
assert( iCol==XN_ROWID || (iCol>=0 && iCol<pTab->nCol) );
if( iCol<0 ){
zType = "INTEGER";
}else{
zType = sqlite3ColumnType(&pTab->aCol[iCol],0);
}
#endif
}
break;
}
#ifndef SQLITE_OMIT_SUBQUERY
case TK_SELECT: {
/* The expression is a sub-select. Return the declaration type and
** origin info for the single column in the result set of the SELECT
** statement.
*/
NameContext sNC;
Select *pS = pExpr->x.pSelect;
Expr *p = pS->pEList->a[0].pExpr;
assert( ExprHasProperty(pExpr, EP_xIsSelect) );
sNC.pSrcList = pS->pSrc;
sNC.pNext = pNC;
sNC.pParse = pNC->pParse;
zType = columnType(&sNC, p, &zOrigDb, &zOrigTab, &zOrigCol);
break;
}
#endif
}
#ifdef SQLITE_ENABLE_COLUMN_METADATA
if( pzOrigDb ){
assert( pzOrigTab && pzOrigCol );
*pzOrigDb = zOrigDb;
*pzOrigTab = zOrigTab;
*pzOrigCol = zOrigCol;
}
#endif
return zType;
}
/*
** Generate code that will tell the VDBE the declaration types of columns
** in the result set.
*/
static void generateColumnTypes(
Parse *pParse, /* Parser context */
SrcList *pTabList, /* List of tables */
ExprList *pEList /* Expressions defining the result set */
){
#ifndef SQLITE_OMIT_DECLTYPE
Vdbe *v = pParse->pVdbe;
int i;
NameContext sNC;
sNC.pSrcList = pTabList;
sNC.pParse = pParse;
sNC.pNext = 0;
for(i=0; i<pEList->nExpr; i++){
Expr *p = pEList->a[i].pExpr;
const char *zType;
#ifdef SQLITE_ENABLE_COLUMN_METADATA
const char *zOrigDb = 0;
const char *zOrigTab = 0;
const char *zOrigCol = 0;
zType = columnType(&sNC, p, &zOrigDb, &zOrigTab, &zOrigCol);
/* The vdbe must make its own copy of the column-type and other
** column specific strings, in case the schema is reset before this
** virtual machine is deleted.
*/
sqlite3VdbeSetColName(v, i, COLNAME_DATABASE, zOrigDb, SQLITE_TRANSIENT);
sqlite3VdbeSetColName(v, i, COLNAME_TABLE, zOrigTab, SQLITE_TRANSIENT);
sqlite3VdbeSetColName(v, i, COLNAME_COLUMN, zOrigCol, SQLITE_TRANSIENT);
#else
zType = columnType(&sNC, p, 0, 0, 0);
#endif
sqlite3VdbeSetColName(v, i, COLNAME_DECLTYPE, zType, SQLITE_TRANSIENT);
}
#endif /* !defined(SQLITE_OMIT_DECLTYPE) */
}
/*
** Compute the column names for a SELECT statement.
**
** The only guarantee that SQLite makes about column names is that if the
** column has an AS clause assigning it a name, that will be the name used.
** That is the only documented guarantee. However, countless applications
** developed over the years have made baseless assumptions about column names
** and will break if those assumptions changes. Hence, use extreme caution
** when modifying this routine to avoid breaking legacy.
**
** See Also: sqlite3ColumnsFromExprList()
**
** The PRAGMA short_column_names and PRAGMA full_column_names settings are
** deprecated. The default setting is short=ON, full=OFF. 99.9% of all
** applications should operate this way. Nevertheless, we need to support the
** other modes for legacy:
**
** short=OFF, full=OFF: Column name is the text of the expression has it
** originally appears in the SELECT statement. In
** other words, the zSpan of the result expression.
**
** short=ON, full=OFF: (This is the default setting). If the result
** refers directly to a table column, then the
** result column name is just the table column
** name: COLUMN. Otherwise use zSpan.
**
** full=ON, short=ANY: If the result refers directly to a table column,
** then the result column name with the table name
** prefix, ex: TABLE.COLUMN. Otherwise use zSpan.
*/
static void generateColumnNames(
Parse *pParse, /* Parser context */
Select *pSelect /* Generate column names for this SELECT statement */
){
Vdbe *v = pParse->pVdbe;
int i;
Table *pTab;
SrcList *pTabList;
ExprList *pEList;
sqlite3 *db = pParse->db;
int fullName; /* TABLE.COLUMN if no AS clause and is a direct table ref */
int srcName; /* COLUMN or TABLE.COLUMN if no AS clause and is direct */
#ifndef SQLITE_OMIT_EXPLAIN
/* If this is an EXPLAIN, skip this step */
if( pParse->explain ){
return;
}
#endif
if( pParse->colNamesSet ) return;
/* Column names are determined by the left-most term of a compound select */
while( pSelect->pPrior ) pSelect = pSelect->pPrior;
SELECTTRACE(1,pParse,pSelect,("generating column names\n"));
pTabList = pSelect->pSrc;
pEList = pSelect->pEList;
assert( v!=0 );
assert( pTabList!=0 );
pParse->colNamesSet = 1;
fullName = (db->flags & SQLITE_FullColNames)!=0;
srcName = (db->flags & SQLITE_ShortColNames)!=0 || fullName;
sqlite3VdbeSetNumCols(v, pEList->nExpr);
for(i=0; i<pEList->nExpr; i++){
Expr *p = pEList->a[i].pExpr;
assert( p!=0 );
assert( p->op!=TK_AGG_COLUMN ); /* Agg processing has not run yet */
assert( p->op!=TK_COLUMN || p->y.pTab!=0 ); /* Covering idx not yet coded */
if( pEList->a[i].zName ){
/* An AS clause always takes first priority */
char *zName = pEList->a[i].zName;
sqlite3VdbeSetColName(v, i, COLNAME_NAME, zName, SQLITE_TRANSIENT);
}else if( srcName && p->op==TK_COLUMN ){
char *zCol;
int iCol = p->iColumn;
pTab = p->y.pTab;
assert( pTab!=0 );
if( iCol<0 ) iCol = pTab->iPKey;
assert( iCol==-1 || (iCol>=0 && iCol<pTab->nCol) );
if( iCol<0 ){
zCol = "rowid";
}else{
zCol = pTab->aCol[iCol].zName;
}
if( fullName ){
char *zName = 0;
zName = sqlite3MPrintf(db, "%s.%s", pTab->zName, zCol);
sqlite3VdbeSetColName(v, i, COLNAME_NAME, zName, SQLITE_DYNAMIC);
}else{
sqlite3VdbeSetColName(v, i, COLNAME_NAME, zCol, SQLITE_TRANSIENT);
}
}else{
const char *z = pEList->a[i].zSpan;
z = z==0 ? sqlite3MPrintf(db, "column%d", i+1) : sqlite3DbStrDup(db, z);
sqlite3VdbeSetColName(v, i, COLNAME_NAME, z, SQLITE_DYNAMIC);
}
}
generateColumnTypes(pParse, pTabList, pEList);
}
/*
** Given an expression list (which is really the list of expressions
** that form the result set of a SELECT statement) compute appropriate
** column names for a table that would hold the expression list.
**
** All column names will be unique.
**
** Only the column names are computed. Column.zType, Column.zColl,
** and other fields of Column are zeroed.
**
** Return SQLITE_OK on success. If a memory allocation error occurs,
** store NULL in *paCol and 0 in *pnCol and return SQLITE_NOMEM.
**
** The only guarantee that SQLite makes about column names is that if the
** column has an AS clause assigning it a name, that will be the name used.
** That is the only documented guarantee. However, countless applications
** developed over the years have made baseless assumptions about column names
** and will break if those assumptions changes. Hence, use extreme caution
** when modifying this routine to avoid breaking legacy.
**
** See Also: generateColumnNames()
*/
int sqlite3ColumnsFromExprList(
Parse *pParse, /* Parsing context */
ExprList *pEList, /* Expr list from which to derive column names */
i16 *pnCol, /* Write the number of columns here */
Column **paCol /* Write the new column list here */
){
sqlite3 *db = pParse->db; /* Database connection */
int i, j; /* Loop counters */
u32 cnt; /* Index added to make the name unique */
Column *aCol, *pCol; /* For looping over result columns */
int nCol; /* Number of columns in the result set */
char *zName; /* Column name */
int nName; /* Size of name in zName[] */
Hash ht; /* Hash table of column names */
sqlite3HashInit(&ht);
if( pEList ){
nCol = pEList->nExpr;
aCol = sqlite3DbMallocZero(db, sizeof(aCol[0])*nCol);
testcase( aCol==0 );
if( nCol>32767 ) nCol = 32767;
}else{
nCol = 0;
aCol = 0;
}
assert( nCol==(i16)nCol );
*pnCol = nCol;
*paCol = aCol;
for(i=0, pCol=aCol; i<nCol && !db->mallocFailed; i++, pCol++){
/* Get an appropriate name for the column
*/
if( (zName = pEList->a[i].zName)!=0 ){
/* If the column contains an "AS <name>" phrase, use <name> as the name */
}else{
Expr *pColExpr = sqlite3ExprSkipCollateAndLikely(pEList->a[i].pExpr);
while( pColExpr->op==TK_DOT ){
pColExpr = pColExpr->pRight;
assert( pColExpr!=0 );
}
if( pColExpr->op==TK_COLUMN ){
/* For columns use the column name name */
int iCol = pColExpr->iColumn;
Table *pTab = pColExpr->y.pTab;
assert( pTab!=0 );
if( iCol<0 ) iCol = pTab->iPKey;
zName = iCol>=0 ? pTab->aCol[iCol].zName : "rowid";
}else if( pColExpr->op==TK_ID ){
assert( !ExprHasProperty(pColExpr, EP_IntValue) );
zName = pColExpr->u.zToken;
}else{
/* Use the original text of the column expression as its name */
zName = pEList->a[i].zSpan;
}
}
if( zName ){
zName = sqlite3DbStrDup(db, zName);
}else{
zName = sqlite3MPrintf(db,"column%d",i+1);
}
/* Make sure the column name is unique. If the name is not unique,
** append an integer to the name so that it becomes unique.
*/
cnt = 0;
while( zName && sqlite3HashFind(&ht, zName)!=0 ){
nName = sqlite3Strlen30(zName);
if( nName>0 ){
for(j=nName-1; j>0 && sqlite3Isdigit(zName[j]); j--){}
if( zName[j]==':' ) nName = j;
}
zName = sqlite3MPrintf(db, "%.*z:%u", nName, zName, ++cnt);
if( cnt>3 ) sqlite3_randomness(sizeof(cnt), &cnt);
}
pCol->zName = zName;
sqlite3ColumnPropertiesFromName(0, pCol);
if( zName && sqlite3HashInsert(&ht, zName, pCol)==pCol ){
sqlite3OomFault(db);
}
}
sqlite3HashClear(&ht);
if( db->mallocFailed ){
for(j=0; j<i; j++){
sqlite3DbFree(db, aCol[j].zName);
}
sqlite3DbFree(db, aCol);
*paCol = 0;
*pnCol = 0;
return SQLITE_NOMEM_BKPT;
}
return SQLITE_OK;
}
/*
** Add type and collation information to a column list based on
** a SELECT statement.
**
** The column list presumably came from selectColumnNamesFromExprList().
** The column list has only names, not types or collations. This
** routine goes through and adds the types and collations.
**
** This routine requires that all identifiers in the SELECT
** statement be resolved.
*/
void sqlite3SelectAddColumnTypeAndCollation(
Parse *pParse, /* Parsing contexts */
Table *pTab, /* Add column type information to this table */
Select *pSelect, /* SELECT used to determine types and collations */
char aff /* Default affinity for columns */
){
sqlite3 *db = pParse->db;
NameContext sNC;
Column *pCol;
CollSeq *pColl;
int i;
Expr *p;
struct ExprList_item *a;
assert( pSelect!=0 );
assert( (pSelect->selFlags & SF_Resolved)!=0 );
assert( pTab->nCol==pSelect->pEList->nExpr || db->mallocFailed );
if( db->mallocFailed ) return;
memset(&sNC, 0, sizeof(sNC));
sNC.pSrcList = pSelect->pSrc;
a = pSelect->pEList->a;
for(i=0, pCol=pTab->aCol; i<pTab->nCol; i++, pCol++){
const char *zType;
int n, m;
p = a[i].pExpr;
zType = columnType(&sNC, p, 0, 0, 0);
/* pCol->szEst = ... // Column size est for SELECT tables never used */
pCol->affinity = sqlite3ExprAffinity(p);
if( zType ){
m = sqlite3Strlen30(zType);
n = sqlite3Strlen30(pCol->zName);
pCol->zName = sqlite3DbReallocOrFree(db, pCol->zName, n+m+2);
if( pCol->zName ){
memcpy(&pCol->zName[n+1], zType, m+1);
pCol->colFlags |= COLFLAG_HASTYPE;
}
}
if( pCol->affinity<=SQLITE_AFF_NONE ) pCol->affinity = aff;
pColl = sqlite3ExprCollSeq(pParse, p);
if( pColl && pCol->zColl==0 ){
pCol->zColl = sqlite3DbStrDup(db, pColl->zName);
}
}
pTab->szTabRow = 1; /* Any non-zero value works */
}
/*
** Given a SELECT statement, generate a Table structure that describes
** the result set of that SELECT.
*/
Table *sqlite3ResultSetOfSelect(Parse *pParse, Select *pSelect, char aff){
Table *pTab;
sqlite3 *db = pParse->db;
u64 savedFlags;
savedFlags = db->flags;
db->flags &= ~(u64)SQLITE_FullColNames;
db->flags |= SQLITE_ShortColNames;
sqlite3SelectPrep(pParse, pSelect, 0);
db->flags = savedFlags;
if( pParse->nErr ) return 0;
while( pSelect->pPrior ) pSelect = pSelect->pPrior;
pTab = sqlite3DbMallocZero(db, sizeof(Table) );
if( pTab==0 ){
return 0;
}
pTab->nTabRef = 1;
pTab->zName = 0;
pTab->nRowLogEst = 200; assert( 200==sqlite3LogEst(1048576) );
sqlite3ColumnsFromExprList(pParse, pSelect->pEList, &pTab->nCol, &pTab->aCol);
sqlite3SelectAddColumnTypeAndCollation(pParse, pTab, pSelect, aff);
pTab->iPKey = -1;
if( db->mallocFailed ){
sqlite3DeleteTable(db, pTab);
return 0;
}
return pTab;
}
/*
** Get a VDBE for the given parser context. Create a new one if necessary.
** If an error occurs, return NULL and leave a message in pParse.
*/
Vdbe *sqlite3GetVdbe(Parse *pParse){
if( pParse->pVdbe ){
return pParse->pVdbe;
}
if( pParse->pToplevel==0
&& OptimizationEnabled(pParse->db,SQLITE_FactorOutConst)
){
pParse->okConstFactor = 1;
}
return sqlite3VdbeCreate(pParse);
}
/*
** Compute the iLimit and iOffset fields of the SELECT based on the
** pLimit expressions. pLimit->pLeft and pLimit->pRight hold the expressions
** that appear in the original SQL statement after the LIMIT and OFFSET
** keywords. Or NULL if those keywords are omitted. iLimit and iOffset
** are the integer memory register numbers for counters used to compute
** the limit and offset. If there is no limit and/or offset, then
** iLimit and iOffset are negative.
**
** This routine changes the values of iLimit and iOffset only if
** a limit or offset is defined by pLimit->pLeft and pLimit->pRight. iLimit
** and iOffset should have been preset to appropriate default values (zero)
** prior to calling this routine.
**
** The iOffset register (if it exists) is initialized to the value
** of the OFFSET. The iLimit register is initialized to LIMIT. Register
** iOffset+1 is initialized to LIMIT+OFFSET.
**
** Only if pLimit->pLeft!=0 do the limit registers get
** redefined. The UNION ALL operator uses this property to force
** the reuse of the same limit and offset registers across multiple
** SELECT statements.
*/
static void computeLimitRegisters(Parse *pParse, Select *p, int iBreak){
Vdbe *v = 0;
int iLimit = 0;
int iOffset;
int n;
Expr *pLimit = p->pLimit;
if( p->iLimit ) return;
/*
** "LIMIT -1" always shows all rows. There is some
** controversy about what the correct behavior should be.
** The current implementation interprets "LIMIT 0" to mean
** no rows.
*/
if( pLimit ){
assert( pLimit->op==TK_LIMIT );
assert( pLimit->pLeft!=0 );
p->iLimit = iLimit = ++pParse->nMem;
v = sqlite3GetVdbe(pParse);
assert( v!=0 );
if( sqlite3ExprIsInteger(pLimit->pLeft, &n) ){
sqlite3VdbeAddOp2(v, OP_Integer, n, iLimit);
VdbeComment((v, "LIMIT counter"));
if( n==0 ){
sqlite3VdbeGoto(v, iBreak);
}else if( n>=0 && p->nSelectRow>sqlite3LogEst((u64)n) ){
p->nSelectRow = sqlite3LogEst((u64)n);
p->selFlags |= SF_FixedLimit;
}
}else{
sqlite3ExprCode(pParse, pLimit->pLeft, iLimit);
sqlite3VdbeAddOp1(v, OP_MustBeInt, iLimit); VdbeCoverage(v);
VdbeComment((v, "LIMIT counter"));
sqlite3VdbeAddOp2(v, OP_IfNot, iLimit, iBreak); VdbeCoverage(v);
}
if( pLimit->pRight ){
p->iOffset = iOffset = ++pParse->nMem;
pParse->nMem++; /* Allocate an extra register for limit+offset */
sqlite3ExprCode(pParse, pLimit->pRight, iOffset);
sqlite3VdbeAddOp1(v, OP_MustBeInt, iOffset); VdbeCoverage(v);
VdbeComment((v, "OFFSET counter"));
sqlite3VdbeAddOp3(v, OP_OffsetLimit, iLimit, iOffset+1, iOffset);
VdbeComment((v, "LIMIT+OFFSET"));
}
}
}
#ifndef SQLITE_OMIT_COMPOUND_SELECT
/*
** Return the appropriate collating sequence for the iCol-th column of
** the result set for the compound-select statement "p". Return NULL if
** the column has no default collating sequence.
**
** The collating sequence for the compound select is taken from the
** left-most term of the select that has a collating sequence.
*/
static CollSeq *multiSelectCollSeq(Parse *pParse, Select *p, int iCol){
CollSeq *pRet;
if( p->pPrior ){
pRet = multiSelectCollSeq(pParse, p->pPrior, iCol);
}else{
pRet = 0;
}
assert( iCol>=0 );
/* iCol must be less than p->pEList->nExpr. Otherwise an error would
** have been thrown during name resolution and we would not have gotten
** this far */
if( pRet==0 && ALWAYS(iCol<p->pEList->nExpr) ){
pRet = sqlite3ExprCollSeq(pParse, p->pEList->a[iCol].pExpr);
}
return pRet;
}
/*
** The select statement passed as the second parameter is a compound SELECT
** with an ORDER BY clause. This function allocates and returns a KeyInfo
** structure suitable for implementing the ORDER BY.
**
** Space to hold the KeyInfo structure is obtained from malloc. The calling
** function is responsible for ensuring that this structure is eventually
** freed.
*/
static KeyInfo *multiSelectOrderByKeyInfo(Parse *pParse, Select *p, int nExtra){
ExprList *pOrderBy = p->pOrderBy;
int nOrderBy = p->pOrderBy->nExpr;
sqlite3 *db = pParse->db;
KeyInfo *pRet = sqlite3KeyInfoAlloc(db, nOrderBy+nExtra, 1);
if( pRet ){
int i;
for(i=0; i<nOrderBy; i++){
struct ExprList_item *pItem = &pOrderBy->a[i];
Expr *pTerm = pItem->pExpr;
CollSeq *pColl;
if( pTerm->flags & EP_Collate ){
pColl = sqlite3ExprCollSeq(pParse, pTerm);
}else{
pColl = multiSelectCollSeq(pParse, p, pItem->u.x.iOrderByCol-1);
if( pColl==0 ) pColl = db->pDfltColl;
pOrderBy->a[i].pExpr =
sqlite3ExprAddCollateString(pParse, pTerm, pColl->zName);
}
assert( sqlite3KeyInfoIsWriteable(pRet) );
pRet->aColl[i] = pColl;
pRet->aSortFlags[i] = pOrderBy->a[i].sortFlags;
}
}
return pRet;
}
#ifndef SQLITE_OMIT_CTE
/*
** This routine generates VDBE code to compute the content of a WITH RECURSIVE
** query of the form:
**
** <recursive-table> AS (<setup-query> UNION [ALL] <recursive-query>)
** \___________/ \_______________/
** p->pPrior p
**
**
** There is exactly one reference to the recursive-table in the FROM clause
** of recursive-query, marked with the SrcList->a[].fg.isRecursive flag.
**
** The setup-query runs once to generate an initial set of rows that go
** into a Queue table. Rows are extracted from the Queue table one by
** one. Each row extracted from Queue is output to pDest. Then the single
** extracted row (now in the iCurrent table) becomes the content of the
** recursive-table for a recursive-query run. The output of the recursive-query
** is added back into the Queue table. Then another row is extracted from Queue
** and the iteration continues until the Queue table is empty.
**
** If the compound query operator is UNION then no duplicate rows are ever
** inserted into the Queue table. The iDistinct table keeps a copy of all rows
** that have ever been inserted into Queue and causes duplicates to be
** discarded. If the operator is UNION ALL, then duplicates are allowed.
**
** If the query has an ORDER BY, then entries in the Queue table are kept in
** ORDER BY order and the first entry is extracted for each cycle. Without
** an ORDER BY, the Queue table is just a FIFO.
**
** If a LIMIT clause is provided, then the iteration stops after LIMIT rows
** have been output to pDest. A LIMIT of zero means to output no rows and a
** negative LIMIT means to output all rows. If there is also an OFFSET clause
** with a positive value, then the first OFFSET outputs are discarded rather
** than being sent to pDest. The LIMIT count does not begin until after OFFSET
** rows have been skipped.
*/
static void generateWithRecursiveQuery(
Parse *pParse, /* Parsing context */
Select *p, /* The recursive SELECT to be coded */
SelectDest *pDest /* What to do with query results */
){
SrcList *pSrc = p->pSrc; /* The FROM clause of the recursive query */
int nCol = p->pEList->nExpr; /* Number of columns in the recursive table */
Vdbe *v = pParse->pVdbe; /* The prepared statement under construction */
Select *pSetup = p->pPrior; /* The setup query */
int addrTop; /* Top of the loop */
int addrCont, addrBreak; /* CONTINUE and BREAK addresses */
int iCurrent = 0; /* The Current table */
int regCurrent; /* Register holding Current table */
int iQueue; /* The Queue table */
int iDistinct = 0; /* To ensure unique results if UNION */
int eDest = SRT_Fifo; /* How to write to Queue */
SelectDest destQueue; /* SelectDest targetting the Queue table */
int i; /* Loop counter */
int rc; /* Result code */
ExprList *pOrderBy; /* The ORDER BY clause */
Expr *pLimit; /* Saved LIMIT and OFFSET */
int regLimit, regOffset; /* Registers used by LIMIT and OFFSET */
#ifndef SQLITE_OMIT_WINDOWFUNC
if( p->pWin ){
sqlite3ErrorMsg(pParse, "cannot use window functions in recursive queries");
return;
}
#endif
/* Obtain authorization to do a recursive query */
if( sqlite3AuthCheck(pParse, SQLITE_RECURSIVE, 0, 0, 0) ) return;
/* Process the LIMIT and OFFSET clauses, if they exist */
addrBreak = sqlite3VdbeMakeLabel(pParse);
p->nSelectRow = 320; /* 4 billion rows */
computeLimitRegisters(pParse, p, addrBreak);
pLimit = p->pLimit;
regLimit = p->iLimit;
regOffset = p->iOffset;
p->pLimit = 0;
p->iLimit = p->iOffset = 0;
pOrderBy = p->pOrderBy;
/* Locate the cursor number of the Current table */
for(i=0; ALWAYS(i<pSrc->nSrc); i++){
if( pSrc->a[i].fg.isRecursive ){
iCurrent = pSrc->a[i].iCursor;
break;
}
}
/* Allocate cursors numbers for Queue and Distinct. The cursor number for
** the Distinct table must be exactly one greater than Queue in order
** for the SRT_DistFifo and SRT_DistQueue destinations to work. */
iQueue = pParse->nTab++;
if( p->op==TK_UNION ){
eDest = pOrderBy ? SRT_DistQueue : SRT_DistFifo;
iDistinct = pParse->nTab++;
}else{
eDest = pOrderBy ? SRT_Queue : SRT_Fifo;
}
sqlite3SelectDestInit(&destQueue, eDest, iQueue);
/* Allocate cursors for Current, Queue, and Distinct. */
regCurrent = ++pParse->nMem;
sqlite3VdbeAddOp3(v, OP_OpenPseudo, iCurrent, regCurrent, nCol);
if( pOrderBy ){
KeyInfo *pKeyInfo = multiSelectOrderByKeyInfo(pParse, p, 1);
sqlite3VdbeAddOp4(v, OP_OpenEphemeral, iQueue, pOrderBy->nExpr+2, 0,
(char*)pKeyInfo, P4_KEYINFO);
destQueue.pOrderBy = pOrderBy;
}else{
sqlite3VdbeAddOp2(v, OP_OpenEphemeral, iQueue, nCol);
}
VdbeComment((v, "Queue table"));
if( iDistinct ){
p->addrOpenEphm[0] = sqlite3VdbeAddOp2(v, OP_OpenEphemeral, iDistinct, 0);
p->selFlags |= SF_UsesEphemeral;
}
/* Detach the ORDER BY clause from the compound SELECT */
p->pOrderBy = 0;
/* Store the results of the setup-query in Queue. */
pSetup->pNext = 0;
ExplainQueryPlan((pParse, 1, "SETUP"));
rc = sqlite3Select(pParse, pSetup, &destQueue);
pSetup->pNext = p;
if( rc ) goto end_of_recursive_query;
/* Find the next row in the Queue and output that row */
addrTop = sqlite3VdbeAddOp2(v, OP_Rewind, iQueue, addrBreak); VdbeCoverage(v);
/* Transfer the next row in Queue over to Current */
sqlite3VdbeAddOp1(v, OP_NullRow, iCurrent); /* To reset column cache */
if( pOrderBy ){
sqlite3VdbeAddOp3(v, OP_Column, iQueue, pOrderBy->nExpr+1, regCurrent);
}else{
sqlite3VdbeAddOp2(v, OP_RowData, iQueue, regCurrent);
}
sqlite3VdbeAddOp1(v, OP_Delete, iQueue);
/* Output the single row in Current */
addrCont = sqlite3VdbeMakeLabel(pParse);
codeOffset(v, regOffset, addrCont);
selectInnerLoop(pParse, p, iCurrent,
0, 0, pDest, addrCont, addrBreak);
if( regLimit ){
sqlite3VdbeAddOp2(v, OP_DecrJumpZero, regLimit, addrBreak);
VdbeCoverage(v);
}
sqlite3VdbeResolveLabel(v, addrCont);
/* Execute the recursive SELECT taking the single row in Current as
** the value for the recursive-table. Store the results in the Queue.
*/
if( p->selFlags & SF_Aggregate ){
sqlite3ErrorMsg(pParse, "recursive aggregate queries not supported");
}else{
p->pPrior = 0;
ExplainQueryPlan((pParse, 1, "RECURSIVE STEP"));
sqlite3Select(pParse, p, &destQueue);
assert( p->pPrior==0 );
p->pPrior = pSetup;
}
/* Keep running the loop until the Queue is empty */
sqlite3VdbeGoto(v, addrTop);
sqlite3VdbeResolveLabel(v, addrBreak);
end_of_recursive_query:
sqlite3ExprListDelete(pParse->db, p->pOrderBy);
p->pOrderBy = pOrderBy;
p->pLimit = pLimit;
return;
}
#endif /* SQLITE_OMIT_CTE */
/* Forward references */
static int multiSelectOrderBy(
Parse *pParse, /* Parsing context */
Select *p, /* The right-most of SELECTs to be coded */
SelectDest *pDest /* What to do with query results */
);
/*
** Handle the special case of a compound-select that originates from a
** VALUES clause. By handling this as a special case, we avoid deep
** recursion, and thus do not need to enforce the SQLITE_LIMIT_COMPOUND_SELECT
** on a VALUES clause.
**
** Because the Select object originates from a VALUES clause:
** (1) There is no LIMIT or OFFSET or else there is a LIMIT of exactly 1
** (2) All terms are UNION ALL
** (3) There is no ORDER BY clause
**
** The "LIMIT of exactly 1" case of condition (1) comes about when a VALUES
** clause occurs within scalar expression (ex: "SELECT (VALUES(1),(2),(3))").
** The sqlite3CodeSubselect will have added the LIMIT 1 clause in tht case.
** Since the limit is exactly 1, we only need to evalutes the left-most VALUES.
*/
static int multiSelectValues(
Parse *pParse, /* Parsing context */
Select *p, /* The right-most of SELECTs to be coded */
SelectDest *pDest /* What to do with query results */
){
int nRow = 1;
int rc = 0;
int bShowAll = p->pLimit==0;
assert( p->selFlags & SF_MultiValue );
do{
assert( p->selFlags & SF_Values );
assert( p->op==TK_ALL || (p->op==TK_SELECT && p->pPrior==0) );
assert( p->pNext==0 || p->pEList->nExpr==p->pNext->pEList->nExpr );
if( p->pPrior==0 ) break;
assert( p->pPrior->pNext==p );
p = p->pPrior;
nRow += bShowAll;
}while(1);
ExplainQueryPlan((pParse, 0, "SCAN %d CONSTANT ROW%s", nRow,
nRow==1 ? "" : "S"));
while( p ){
selectInnerLoop(pParse, p, -1, 0, 0, pDest, 1, 1);
if( !bShowAll ) break;
p->nSelectRow = nRow;
p = p->pNext;
}
return rc;
}
/*
** This routine is called to process a compound query form from
** two or more separate queries using UNION, UNION ALL, EXCEPT, or
** INTERSECT
**
** "p" points to the right-most of the two queries. the query on the
** left is p->pPrior. The left query could also be a compound query
** in which case this routine will be called recursively.
**
** The results of the total query are to be written into a destination
** of type eDest with parameter iParm.
**
** Example 1: Consider a three-way compound SQL statement.
**
** SELECT a FROM t1 UNION SELECT b FROM t2 UNION SELECT c FROM t3
**
** This statement is parsed up as follows:
**
** SELECT c FROM t3
** |
** `-----> SELECT b FROM t2
** |
** `------> SELECT a FROM t1
**
** The arrows in the diagram above represent the Select.pPrior pointer.
** So if this routine is called with p equal to the t3 query, then
** pPrior will be the t2 query. p->op will be TK_UNION in this case.
**
** Notice that because of the way SQLite parses compound SELECTs, the
** individual selects always group from left to right.
*/
static int multiSelect(
Parse *pParse, /* Parsing context */
Select *p, /* The right-most of SELECTs to be coded */
SelectDest *pDest /* What to do with query results */
){
int rc = SQLITE_OK; /* Success code from a subroutine */
Select *pPrior; /* Another SELECT immediately to our left */
Vdbe *v; /* Generate code to this VDBE */
SelectDest dest; /* Alternative data destination */
Select *pDelete = 0; /* Chain of simple selects to delete */
sqlite3 *db; /* Database connection */
/* Make sure there is no ORDER BY or LIMIT clause on prior SELECTs. Only
** the last (right-most) SELECT in the series may have an ORDER BY or LIMIT.
*/
assert( p && p->pPrior ); /* Calling function guarantees this much */
assert( (p->selFlags & SF_Recursive)==0 || p->op==TK_ALL || p->op==TK_UNION );
assert( p->selFlags & SF_Compound );
db = pParse->db;
pPrior = p->pPrior;
dest = *pDest;
if( pPrior->pOrderBy || pPrior->pLimit ){
sqlite3ErrorMsg(pParse,"%s clause should come after %s not before",
pPrior->pOrderBy!=0 ? "ORDER BY" : "LIMIT", selectOpName(p->op));
rc = 1;
goto multi_select_end;
}
v = sqlite3GetVdbe(pParse);
assert( v!=0 ); /* The VDBE already created by calling function */
/* Create the destination temporary table if necessary
*/
if( dest.eDest==SRT_EphemTab ){
assert( p->pEList );
sqlite3VdbeAddOp2(v, OP_OpenEphemeral, dest.iSDParm, p->pEList->nExpr);
dest.eDest = SRT_Table;
}
/* Special handling for a compound-select that originates as a VALUES clause.
*/
if( p->selFlags & SF_MultiValue ){
rc = multiSelectValues(pParse, p, &dest);
goto multi_select_end;
}
/* Make sure all SELECTs in the statement have the same number of elements
** in their result sets.
*/
assert( p->pEList && pPrior->pEList );
assert( p->pEList->nExpr==pPrior->pEList->nExpr );
#ifndef SQLITE_OMIT_CTE
if( p->selFlags & SF_Recursive ){
generateWithRecursiveQuery(pParse, p, &dest);
}else
#endif
/* Compound SELECTs that have an ORDER BY clause are handled separately.
*/
if( p->pOrderBy ){
return multiSelectOrderBy(pParse, p, pDest);
}else{
#ifndef SQLITE_OMIT_EXPLAIN
if( pPrior->pPrior==0 ){
ExplainQueryPlan((pParse, 1, "COMPOUND QUERY"));
ExplainQueryPlan((pParse, 1, "LEFT-MOST SUBQUERY"));
}
#endif
/* Generate code for the left and right SELECT statements.
*/
switch( p->op ){
case TK_ALL: {
int addr = 0;
int nLimit;
assert( !pPrior->pLimit );
pPrior->iLimit = p->iLimit;
pPrior->iOffset = p->iOffset;
pPrior->pLimit = p->pLimit;
rc = sqlite3Select(pParse, pPrior, &dest);
p->pLimit = 0;
if( rc ){
goto multi_select_end;
}
p->pPrior = 0;
p->iLimit = pPrior->iLimit;
p->iOffset = pPrior->iOffset;
if( p->iLimit ){
addr = sqlite3VdbeAddOp1(v, OP_IfNot, p->iLimit); VdbeCoverage(v);
VdbeComment((v, "Jump ahead if LIMIT reached"));
if( p->iOffset ){
sqlite3VdbeAddOp3(v, OP_OffsetLimit,
p->iLimit, p->iOffset+1, p->iOffset);
}
}
ExplainQueryPlan((pParse, 1, "UNION ALL"));
rc = sqlite3Select(pParse, p, &dest);
testcase( rc!=SQLITE_OK );
pDelete = p->pPrior;
p->pPrior = pPrior;
p->nSelectRow = sqlite3LogEstAdd(p->nSelectRow, pPrior->nSelectRow);
if( pPrior->pLimit
&& sqlite3ExprIsInteger(pPrior->pLimit->pLeft, &nLimit)
&& nLimit>0 && p->nSelectRow > sqlite3LogEst((u64)nLimit)
){
p->nSelectRow = sqlite3LogEst((u64)nLimit);
}
if( addr ){
sqlite3VdbeJumpHere(v, addr);
}
break;
}
case TK_EXCEPT:
case TK_UNION: {
int unionTab; /* Cursor number of the temp table holding result */
u8 op = 0; /* One of the SRT_ operations to apply to self */
int priorOp; /* The SRT_ operation to apply to prior selects */
Expr *pLimit; /* Saved values of p->nLimit */
int addr;
SelectDest uniondest;
testcase( p->op==TK_EXCEPT );
testcase( p->op==TK_UNION );
priorOp = SRT_Union;
if( dest.eDest==priorOp ){
/* We can reuse a temporary table generated by a SELECT to our
** right.
*/
assert( p->pLimit==0 ); /* Not allowed on leftward elements */
unionTab = dest.iSDParm;
}else{
/* We will need to create our own temporary table to hold the
** intermediate results.
*/
unionTab = pParse->nTab++;
assert( p->pOrderBy==0 );
addr = sqlite3VdbeAddOp2(v, OP_OpenEphemeral, unionTab, 0);
assert( p->addrOpenEphm[0] == -1 );
p->addrOpenEphm[0] = addr;
findRightmost(p)->selFlags |= SF_UsesEphemeral;
assert( p->pEList );
}
/* Code the SELECT statements to our left
*/
assert( !pPrior->pOrderBy );
sqlite3SelectDestInit(&uniondest, priorOp, unionTab);
rc = sqlite3Select(pParse, pPrior, &uniondest);
if( rc ){
goto multi_select_end;
}
/* Code the current SELECT statement
*/
if( p->op==TK_EXCEPT ){
op = SRT_Except;
}else{
assert( p->op==TK_UNION );
op = SRT_Union;
}
p->pPrior = 0;
pLimit = p->pLimit;
p->pLimit = 0;
uniondest.eDest = op;
ExplainQueryPlan((pParse, 1, "%s USING TEMP B-TREE",
selectOpName(p->op)));
rc = sqlite3Select(pParse, p, &uniondest);
testcase( rc!=SQLITE_OK );
/* Query flattening in sqlite3Select() might refill p->pOrderBy.
** Be sure to delete p->pOrderBy, therefore, to avoid a memory leak. */
sqlite3ExprListDelete(db, p->pOrderBy);
pDelete = p->pPrior;
p->pPrior = pPrior;
p->pOrderBy = 0;
if( p->op==TK_UNION ){
p->nSelectRow = sqlite3LogEstAdd(p->nSelectRow, pPrior->nSelectRow);
}
sqlite3ExprDelete(db, p->pLimit);
p->pLimit = pLimit;
p->iLimit = 0;
p->iOffset = 0;
/* Convert the data in the temporary table into whatever form
** it is that we currently need.
*/
assert( unionTab==dest.iSDParm || dest.eDest!=priorOp );
if( dest.eDest!=priorOp ){
int iCont, iBreak, iStart;
assert( p->pEList );
iBreak = sqlite3VdbeMakeLabel(pParse);
iCont = sqlite3VdbeMakeLabel(pParse);
computeLimitRegisters(pParse, p, iBreak);
sqlite3VdbeAddOp2(v, OP_Rewind, unionTab, iBreak); VdbeCoverage(v);
iStart = sqlite3VdbeCurrentAddr(v);
selectInnerLoop(pParse, p, unionTab,
0, 0, &dest, iCont, iBreak);
sqlite3VdbeResolveLabel(v, iCont);
sqlite3VdbeAddOp2(v, OP_Next, unionTab, iStart); VdbeCoverage(v);
sqlite3VdbeResolveLabel(v, iBreak);
sqlite3VdbeAddOp2(v, OP_Close, unionTab, 0);
}
break;
}
default: assert( p->op==TK_INTERSECT ); {
int tab1, tab2;
int iCont, iBreak, iStart;
Expr *pLimit;
int addr;
SelectDest intersectdest;
int r1;
/* INTERSECT is different from the others since it requires
** two temporary tables. Hence it has its own case. Begin
** by allocating the tables we will need.
*/
tab1 = pParse->nTab++;
tab2 = pParse->nTab++;
assert( p->pOrderBy==0 );
addr = sqlite3VdbeAddOp2(v, OP_OpenEphemeral, tab1, 0);
assert( p->addrOpenEphm[0] == -1 );
p->addrOpenEphm[0] = addr;
findRightmost(p)->selFlags |= SF_UsesEphemeral;
assert( p->pEList );
/* Code the SELECTs to our left into temporary table "tab1".
*/
sqlite3SelectDestInit(&intersectdest, SRT_Union, tab1);
rc = sqlite3Select(pParse, pPrior, &intersectdest);
if( rc ){
goto multi_select_end;
}
/* Code the current SELECT into temporary table "tab2"
*/
addr = sqlite3VdbeAddOp2(v, OP_OpenEphemeral, tab2, 0);
assert( p->addrOpenEphm[1] == -1 );
p->addrOpenEphm[1] = addr;
p->pPrior = 0;
pLimit = p->pLimit;
p->pLimit = 0;
intersectdest.iSDParm = tab2;
ExplainQueryPlan((pParse, 1, "%s USING TEMP B-TREE",
selectOpName(p->op)));
rc = sqlite3Select(pParse, p, &intersectdest);
testcase( rc!=SQLITE_OK );
pDelete = p->pPrior;
p->pPrior = pPrior;
if( p->nSelectRow>pPrior->nSelectRow ){
p->nSelectRow = pPrior->nSelectRow;
}
sqlite3ExprDelete(db, p->pLimit);
p->pLimit = pLimit;
/* Generate code to take the intersection of the two temporary
** tables.
*/
assert( p->pEList );
iBreak = sqlite3VdbeMakeLabel(pParse);
iCont = sqlite3VdbeMakeLabel(pParse);
computeLimitRegisters(pParse, p, iBreak);
sqlite3VdbeAddOp2(v, OP_Rewind, tab1, iBreak); VdbeCoverage(v);
r1 = sqlite3GetTempReg(pParse);
iStart = sqlite3VdbeAddOp2(v, OP_RowData, tab1, r1);
sqlite3VdbeAddOp4Int(v, OP_NotFound, tab2, iCont, r1, 0);
VdbeCoverage(v);
sqlite3ReleaseTempReg(pParse, r1);
selectInnerLoop(pParse, p, tab1,
0, 0, &dest, iCont, iBreak);
sqlite3VdbeResolveLabel(v, iCont);
sqlite3VdbeAddOp2(v, OP_Next, tab1, iStart); VdbeCoverage(v);
sqlite3VdbeResolveLabel(v, iBreak);
sqlite3VdbeAddOp2(v, OP_Close, tab2, 0);
sqlite3VdbeAddOp2(v, OP_Close, tab1, 0);
break;
}
}
#ifndef SQLITE_OMIT_EXPLAIN
if( p->pNext==0 ){
ExplainQueryPlanPop(pParse);
}
#endif
}
/* Compute collating sequences used by
** temporary tables needed to implement the compound select.
** Attach the KeyInfo structure to all temporary tables.
**
** This section is run by the right-most SELECT statement only.
** SELECT statements to the left always skip this part. The right-most
** SELECT might also skip this part if it has no ORDER BY clause and
** no temp tables are required.
*/
if( p->selFlags & SF_UsesEphemeral ){
int i; /* Loop counter */
KeyInfo *pKeyInfo; /* Collating sequence for the result set */
Select *pLoop; /* For looping through SELECT statements */
CollSeq **apColl; /* For looping through pKeyInfo->aColl[] */
int nCol; /* Number of columns in result set */
assert( p->pNext==0 );
nCol = p->pEList->nExpr;
pKeyInfo = sqlite3KeyInfoAlloc(db, nCol, 1);
if( !pKeyInfo ){
rc = SQLITE_NOMEM_BKPT;
goto multi_select_end;
}
for(i=0, apColl=pKeyInfo->aColl; i<nCol; i++, apColl++){
*apColl = multiSelectCollSeq(pParse, p, i);
if( 0==*apColl ){
*apColl = db->pDfltColl;
}
}
for(pLoop=p; pLoop; pLoop=pLoop->pPrior){
for(i=0; i<2; i++){
int addr = pLoop->addrOpenEphm[i];
if( addr<0 ){
/* If [0] is unused then [1] is also unused. So we can
** always safely abort as soon as the first unused slot is found */
assert( pLoop->addrOpenEphm[1]<0 );
break;
}
sqlite3VdbeChangeP2(v, addr, nCol);
sqlite3VdbeChangeP4(v, addr, (char*)sqlite3KeyInfoRef(pKeyInfo),
P4_KEYINFO);
pLoop->addrOpenEphm[i] = -1;
}
}
sqlite3KeyInfoUnref(pKeyInfo);
}
multi_select_end:
pDest->iSdst = dest.iSdst;
pDest->nSdst = dest.nSdst;
sqlite3SelectDelete(db, pDelete);
return rc;
}
#endif /* SQLITE_OMIT_COMPOUND_SELECT */
/*
** Error message for when two or more terms of a compound select have different
** size result sets.
*/
void sqlite3SelectWrongNumTermsError(Parse *pParse, Select *p){
if( p->selFlags & SF_Values ){
sqlite3ErrorMsg(pParse, "all VALUES must have the same number of terms");
}else{
sqlite3ErrorMsg(pParse, "SELECTs to the left and right of %s"
" do not have the same number of result columns", selectOpName(p->op));
}
}
/*
** Code an output subroutine for a coroutine implementation of a
** SELECT statment.
**
** The data to be output is contained in pIn->iSdst. There are
** pIn->nSdst columns to be output. pDest is where the output should
** be sent.
**
** regReturn is the number of the register holding the subroutine
** return address.
**
** If regPrev>0 then it is the first register in a vector that
** records the previous output. mem[regPrev] is a flag that is false
** if there has been no previous output. If regPrev>0 then code is
** generated to suppress duplicates. pKeyInfo is used for comparing
** keys.
**
** If the LIMIT found in p->iLimit is reached, jump immediately to
** iBreak.
*/
static int generateOutputSubroutine(
Parse *pParse, /* Parsing context */
Select *p, /* The SELECT statement */
SelectDest *pIn, /* Coroutine supplying data */
SelectDest *pDest, /* Where to send the data */
int regReturn, /* The return address register */
int regPrev, /* Previous result register. No uniqueness if 0 */
KeyInfo *pKeyInfo, /* For comparing with previous entry */
int iBreak /* Jump here if we hit the LIMIT */
){
Vdbe *v = pParse->pVdbe;
int iContinue;
int addr;
addr = sqlite3VdbeCurrentAddr(v);
iContinue = sqlite3VdbeMakeLabel(pParse);
/* Suppress duplicates for UNION, EXCEPT, and INTERSECT
*/
if( regPrev ){
int addr1, addr2;
addr1 = sqlite3VdbeAddOp1(v, OP_IfNot, regPrev); VdbeCoverage(v);
addr2 = sqlite3VdbeAddOp4(v, OP_Compare, pIn->iSdst, regPrev+1, pIn->nSdst,
(char*)sqlite3KeyInfoRef(pKeyInfo), P4_KEYINFO);
sqlite3VdbeAddOp3(v, OP_Jump, addr2+2, iContinue, addr2+2); VdbeCoverage(v);
sqlite3VdbeJumpHere(v, addr1);
sqlite3VdbeAddOp3(v, OP_Copy, pIn->iSdst, regPrev+1, pIn->nSdst-1);
sqlite3VdbeAddOp2(v, OP_Integer, 1, regPrev);
}
if( pParse->db->mallocFailed ) return 0;
/* Suppress the first OFFSET entries if there is an OFFSET clause
*/
codeOffset(v, p->iOffset, iContinue);
assert( pDest->eDest!=SRT_Exists );
assert( pDest->eDest!=SRT_Table );
switch( pDest->eDest ){
/* Store the result as data using a unique key.
*/
case SRT_EphemTab: {
int r1 = sqlite3GetTempReg(pParse);
int r2 = sqlite3GetTempReg(pParse);
sqlite3VdbeAddOp3(v, OP_MakeRecord, pIn->iSdst, pIn->nSdst, r1);
sqlite3VdbeAddOp2(v, OP_NewRowid, pDest->iSDParm, r2);
sqlite3VdbeAddOp3(v, OP_Insert, pDest->iSDParm, r1, r2);
sqlite3VdbeChangeP5(v, OPFLAG_APPEND);
sqlite3ReleaseTempReg(pParse, r2);
sqlite3ReleaseTempReg(pParse, r1);
break;
}
#ifndef SQLITE_OMIT_SUBQUERY
/* If we are creating a set for an "expr IN (SELECT ...)".
*/
case SRT_Set: {
int r1;
testcase( pIn->nSdst>1 );
r1 = sqlite3GetTempReg(pParse);
sqlite3VdbeAddOp4(v, OP_MakeRecord, pIn->iSdst, pIn->nSdst,
r1, pDest->zAffSdst, pIn->nSdst);
sqlite3VdbeAddOp4Int(v, OP_IdxInsert, pDest->iSDParm, r1,
pIn->iSdst, pIn->nSdst);
sqlite3ReleaseTempReg(pParse, r1);
break;
}
/* If this is a scalar select that is part of an expression, then
** store the results in the appropriate memory cell and break out
** of the scan loop. Note that the select might return multiple columns
** if it is the RHS of a row-value IN operator.
*/
case SRT_Mem: {
if( pParse->nErr==0 ){
testcase( pIn->nSdst>1 );
sqlite3ExprCodeMove(pParse, pIn->iSdst, pDest->iSDParm, pIn->nSdst);
}
/* The LIMIT clause will jump out of the loop for us */
break;
}
#endif /* #ifndef SQLITE_OMIT_SUBQUERY */
/* The results are stored in a sequence of registers
** starting at pDest->iSdst. Then the co-routine yields.
*/
case SRT_Coroutine: {
if( pDest->iSdst==0 ){
pDest->iSdst = sqlite3GetTempRange(pParse, pIn->nSdst);
pDest->nSdst = pIn->nSdst;
}
sqlite3ExprCodeMove(pParse, pIn->iSdst, pDest->iSdst, pIn->nSdst);
sqlite3VdbeAddOp1(v, OP_Yield, pDest->iSDParm);
break;
}
/* If none of the above, then the result destination must be
** SRT_Output. This routine is never called with any other
** destination other than the ones handled above or SRT_Output.
**
** For SRT_Output, results are stored in a sequence of registers.
** Then the OP_ResultRow opcode is used to cause sqlite3_step() to
** return the next row of result.
*/
default: {
assert( pDest->eDest==SRT_Output );
sqlite3VdbeAddOp2(v, OP_ResultRow, pIn->iSdst, pIn->nSdst);
break;
}
}
/* Jump to the end of the loop if the LIMIT is reached.
*/
if( p->iLimit ){
sqlite3VdbeAddOp2(v, OP_DecrJumpZero, p->iLimit, iBreak); VdbeCoverage(v);
}
/* Generate the subroutine return
*/
sqlite3VdbeResolveLabel(v, iContinue);
sqlite3VdbeAddOp1(v, OP_Return, regReturn);
return addr;
}
/*
** Alternative compound select code generator for cases when there
** is an ORDER BY clause.
**
** We assume a query of the following form:
**
** <selectA> <operator> <selectB> ORDER BY <orderbylist>
**
** <operator> is one of UNION ALL, UNION, EXCEPT, or INTERSECT. The idea
** is to code both <selectA> and <selectB> with the ORDER BY clause as
** co-routines. Then run the co-routines in parallel and merge the results
** into the output. In addition to the two coroutines (called selectA and
** selectB) there are 7 subroutines:
**
** outA: Move the output of the selectA coroutine into the output
** of the compound query.
**
** outB: Move the output of the selectB coroutine into the output
** of the compound query. (Only generated for UNION and
** UNION ALL. EXCEPT and INSERTSECT never output a row that
** appears only in B.)
**
** AltB: Called when there is data from both coroutines and A<B.
**
** AeqB: Called when there is data from both coroutines and A==B.
**
** AgtB: Called when there is data from both coroutines and A>B.
**
** EofA: Called when data is exhausted from selectA.
**
** EofB: Called when data is exhausted from selectB.
**
** The implementation of the latter five subroutines depend on which
** <operator> is used:
**
**
** UNION ALL UNION EXCEPT INTERSECT
** ------------- ----------------- -------------- -----------------
** AltB: outA, nextA outA, nextA outA, nextA nextA
**
** AeqB: outA, nextA nextA nextA outA, nextA
**
** AgtB: outB, nextB outB, nextB nextB nextB
**
** EofA: outB, nextB outB, nextB halt halt
**
** EofB: outA, nextA outA, nextA outA, nextA halt
**
** In the AltB, AeqB, and AgtB subroutines, an EOF on A following nextA
** causes an immediate jump to EofA and an EOF on B following nextB causes
** an immediate jump to EofB. Within EofA and EofB, and EOF on entry or
** following nextX causes a jump to the end of the select processing.
**
** Duplicate removal in the UNION, EXCEPT, and INTERSECT cases is handled
** within the output subroutine. The regPrev register set holds the previously
** output value. A comparison is made against this value and the output
** is skipped if the next results would be the same as the previous.
**
** The implementation plan is to implement the two coroutines and seven
** subroutines first, then put the control logic at the bottom. Like this:
**
** goto Init
** coA: coroutine for left query (A)
** coB: coroutine for right query (B)
** outA: output one row of A
** outB: output one row of B (UNION and UNION ALL only)
** EofA: ...
** EofB: ...
** AltB: ...
** AeqB: ...
** AgtB: ...
** Init: initialize coroutine registers
** yield coA
** if eof(A) goto EofA
** yield coB
** if eof(B) goto EofB
** Cmpr: Compare A, B
** Jump AltB, AeqB, AgtB
** End: ...
**
** We call AltB, AeqB, AgtB, EofA, and EofB "subroutines" but they are not
** actually called using Gosub and they do not Return. EofA and EofB loop
** until all data is exhausted then jump to the "end" labe. AltB, AeqB,
** and AgtB jump to either L2 or to one of EofA or EofB.
*/
#ifndef SQLITE_OMIT_COMPOUND_SELECT
static int multiSelectOrderBy(
Parse *pParse, /* Parsing context */
Select *p, /* The right-most of SELECTs to be coded */
SelectDest *pDest /* What to do with query results */
){
int i, j; /* Loop counters */
Select *pPrior; /* Another SELECT immediately to our left */
Vdbe *v; /* Generate code to this VDBE */
SelectDest destA; /* Destination for coroutine A */
SelectDest destB; /* Destination for coroutine B */
int regAddrA; /* Address register for select-A coroutine */
int regAddrB; /* Address register for select-B coroutine */
int addrSelectA; /* Address of the select-A coroutine */
int addrSelectB; /* Address of the select-B coroutine */
int regOutA; /* Address register for the output-A subroutine */
int regOutB; /* Address register for the output-B subroutine */
int addrOutA; /* Address of the output-A subroutine */
int addrOutB = 0; /* Address of the output-B subroutine */
int addrEofA; /* Address of the select-A-exhausted subroutine */
int addrEofA_noB; /* Alternate addrEofA if B is uninitialized */
int addrEofB; /* Address of the select-B-exhausted subroutine */
int addrAltB; /* Address of the A<B subroutine */
int addrAeqB; /* Address of the A==B subroutine */
int addrAgtB; /* Address of the A>B subroutine */
int regLimitA; /* Limit register for select-A */
int regLimitB; /* Limit register for select-A */
int regPrev; /* A range of registers to hold previous output */
int savedLimit; /* Saved value of p->iLimit */
int savedOffset; /* Saved value of p->iOffset */
int labelCmpr; /* Label for the start of the merge algorithm */
int labelEnd; /* Label for the end of the overall SELECT stmt */
int addr1; /* Jump instructions that get retargetted */
int op; /* One of TK_ALL, TK_UNION, TK_EXCEPT, TK_INTERSECT */
KeyInfo *pKeyDup = 0; /* Comparison information for duplicate removal */
KeyInfo *pKeyMerge; /* Comparison information for merging rows */
sqlite3 *db; /* Database connection */
ExprList *pOrderBy; /* The ORDER BY clause */
int nOrderBy; /* Number of terms in the ORDER BY clause */
int *aPermute; /* Mapping from ORDER BY terms to result set columns */
assert( p->pOrderBy!=0 );
assert( pKeyDup==0 ); /* "Managed" code needs this. Ticket #3382. */
db = pParse->db;
v = pParse->pVdbe;
assert( v!=0 ); /* Already thrown the error if VDBE alloc failed */
labelEnd = sqlite3VdbeMakeLabel(pParse);
labelCmpr = sqlite3VdbeMakeLabel(pParse);
/* Patch up the ORDER BY clause
*/
op = p->op;
pPrior = p->pPrior;
assert( pPrior->pOrderBy==0 );
pOrderBy = p->pOrderBy;
assert( pOrderBy );
nOrderBy = pOrderBy->nExpr;
/* For operators other than UNION ALL we have to make sure that
** the ORDER BY clause covers every term of the result set. Add
** terms to the ORDER BY clause as necessary.
*/
if( op!=TK_ALL ){
for(i=1; db->mallocFailed==0 && i<=p->pEList->nExpr; i++){
struct ExprList_item *pItem;
for(j=0, pItem=pOrderBy->a; j<nOrderBy; j++, pItem++){
assert( pItem->u.x.iOrderByCol>0 );
if( pItem->u.x.iOrderByCol==i ) break;
}
if( j==nOrderBy ){
Expr *pNew = sqlite3Expr(db, TK_INTEGER, 0);
if( pNew==0 ) return SQLITE_NOMEM_BKPT;
pNew->flags |= EP_IntValue;
pNew->u.iValue = i;
p->pOrderBy = pOrderBy = sqlite3ExprListAppend(pParse, pOrderBy, pNew);
if( pOrderBy ) pOrderBy->a[nOrderBy++].u.x.iOrderByCol = (u16)i;
}
}
}
/* Compute the comparison permutation and keyinfo that is used with
** the permutation used to determine if the next
** row of results comes from selectA or selectB. Also add explicit
** collations to the ORDER BY clause terms so that when the subqueries
** to the right and the left are evaluated, they use the correct
** collation.
*/
aPermute = sqlite3DbMallocRawNN(db, sizeof(int)*(nOrderBy + 1));
if( aPermute ){
struct ExprList_item *pItem;
aPermute[0] = nOrderBy;
for(i=1, pItem=pOrderBy->a; i<=nOrderBy; i++, pItem++){
assert( pItem->u.x.iOrderByCol>0 );
assert( pItem->u.x.iOrderByCol<=p->pEList->nExpr );
aPermute[i] = pItem->u.x.iOrderByCol - 1;
}
pKeyMerge = multiSelectOrderByKeyInfo(pParse, p, 1);
}else{
pKeyMerge = 0;
}
/* Reattach the ORDER BY clause to the query.
*/
p->pOrderBy = pOrderBy;
pPrior->pOrderBy = sqlite3ExprListDup(pParse->db, pOrderBy, 0);
/* Allocate a range of temporary registers and the KeyInfo needed
** for the logic that removes duplicate result rows when the
** operator is UNION, EXCEPT, or INTERSECT (but not UNION ALL).
*/
if( op==TK_ALL ){
regPrev = 0;
}else{
int nExpr = p->pEList->nExpr;
assert( nOrderBy>=nExpr || db->mallocFailed );
regPrev = pParse->nMem+1;
pParse->nMem += nExpr+1;
sqlite3VdbeAddOp2(v, OP_Integer, 0, regPrev);
pKeyDup = sqlite3KeyInfoAlloc(db, nExpr, 1);
if( pKeyDup ){
assert( sqlite3KeyInfoIsWriteable(pKeyDup) );
for(i=0; i<nExpr; i++){
pKeyDup->aColl[i] = multiSelectCollSeq(pParse, p, i);
pKeyDup->aSortFlags[i] = 0;
}
}
}
/* Separate the left and the right query from one another
*/
p->pPrior = 0;
pPrior->pNext = 0;
sqlite3ResolveOrderGroupBy(pParse, p, p->pOrderBy, "ORDER");
if( pPrior->pPrior==0 ){
sqlite3ResolveOrderGroupBy(pParse, pPrior, pPrior->pOrderBy, "ORDER");
}
/* Compute the limit registers */
computeLimitRegisters(pParse, p, labelEnd);
if( p->iLimit && op==TK_ALL ){
regLimitA = ++pParse->nMem;
regLimitB = ++pParse->nMem;
sqlite3VdbeAddOp2(v, OP_Copy, p->iOffset ? p->iOffset+1 : p->iLimit,
regLimitA);
sqlite3VdbeAddOp2(v, OP_Copy, regLimitA, regLimitB);
}else{
regLimitA = regLimitB = 0;
}
sqlite3ExprDelete(db, p->pLimit);
p->pLimit = 0;
regAddrA = ++pParse->nMem;
regAddrB = ++pParse->nMem;
regOutA = ++pParse->nMem;
regOutB = ++pParse->nMem;
sqlite3SelectDestInit(&destA, SRT_Coroutine, regAddrA);
sqlite3SelectDestInit(&destB, SRT_Coroutine, regAddrB);
ExplainQueryPlan((pParse, 1, "MERGE (%s)", selectOpName(p->op)));
/* Generate a coroutine to evaluate the SELECT statement to the
** left of the compound operator - the "A" select.
*/
addrSelectA = sqlite3VdbeCurrentAddr(v) + 1;
addr1 = sqlite3VdbeAddOp3(v, OP_InitCoroutine, regAddrA, 0, addrSelectA);
VdbeComment((v, "left SELECT"));
pPrior->iLimit = regLimitA;
ExplainQueryPlan((pParse, 1, "LEFT"));
sqlite3Select(pParse, pPrior, &destA);
sqlite3VdbeEndCoroutine(v, regAddrA);
sqlite3VdbeJumpHere(v, addr1);
/* Generate a coroutine to evaluate the SELECT statement on
** the right - the "B" select
*/
addrSelectB = sqlite3VdbeCurrentAddr(v) + 1;
addr1 = sqlite3VdbeAddOp3(v, OP_InitCoroutine, regAddrB, 0, addrSelectB);
VdbeComment((v, "right SELECT"));
savedLimit = p->iLimit;
savedOffset = p->iOffset;
p->iLimit = regLimitB;
p->iOffset = 0;
ExplainQueryPlan((pParse, 1, "RIGHT"));
sqlite3Select(pParse, p, &destB);
p->iLimit = savedLimit;
p->iOffset = savedOffset;
sqlite3VdbeEndCoroutine(v, regAddrB);
/* Generate a subroutine that outputs the current row of the A
** select as the next output row of the compound select.
*/
VdbeNoopComment((v, "Output routine for A"));
addrOutA = generateOutputSubroutine(pParse,
p, &destA, pDest, regOutA,
regPrev, pKeyDup, labelEnd);
/* Generate a subroutine that outputs the current row of the B
** select as the next output row of the compound select.
*/
if( op==TK_ALL || op==TK_UNION ){
VdbeNoopComment((v, "Output routine for B"));
addrOutB = generateOutputSubroutine(pParse,
p, &destB, pDest, regOutB,
regPrev, pKeyDup, labelEnd);
}
sqlite3KeyInfoUnref(pKeyDup);
/* Generate a subroutine to run when the results from select A
** are exhausted and only data in select B remains.
*/
if( op==TK_EXCEPT || op==TK_INTERSECT ){
addrEofA_noB = addrEofA = labelEnd;
}else{
VdbeNoopComment((v, "eof-A subroutine"));
addrEofA = sqlite3VdbeAddOp2(v, OP_Gosub, regOutB, addrOutB);
addrEofA_noB = sqlite3VdbeAddOp2(v, OP_Yield, regAddrB, labelEnd);
VdbeCoverage(v);
sqlite3VdbeGoto(v, addrEofA);
p->nSelectRow = sqlite3LogEstAdd(p->nSelectRow, pPrior->nSelectRow);
}
/* Generate a subroutine to run when the results from select B
** are exhausted and only data in select A remains.
*/
if( op==TK_INTERSECT ){
addrEofB = addrEofA;
if( p->nSelectRow > pPrior->nSelectRow ) p->nSelectRow = pPrior->nSelectRow;
}else{
VdbeNoopComment((v, "eof-B subroutine"));
addrEofB = sqlite3VdbeAddOp2(v, OP_Gosub, regOutA, addrOutA);
sqlite3VdbeAddOp2(v, OP_Yield, regAddrA, labelEnd); VdbeCoverage(v);
sqlite3VdbeGoto(v, addrEofB);
}
/* Generate code to handle the case of A<B
*/
VdbeNoopComment((v, "A-lt-B subroutine"));
addrAltB = sqlite3VdbeAddOp2(v, OP_Gosub, regOutA, addrOutA);
sqlite3VdbeAddOp2(v, OP_Yield, regAddrA, addrEofA); VdbeCoverage(v);
sqlite3VdbeGoto(v, labelCmpr);
/* Generate code to handle the case of A==B
*/
if( op==TK_ALL ){
addrAeqB = addrAltB;
}else if( op==TK_INTERSECT ){
addrAeqB = addrAltB;
addrAltB++;
}else{
VdbeNoopComment((v, "A-eq-B subroutine"));
addrAeqB =
sqlite3VdbeAddOp2(v, OP_Yield, regAddrA, addrEofA); VdbeCoverage(v);
sqlite3VdbeGoto(v, labelCmpr);
}
/* Generate code to handle the case of A>B
*/
VdbeNoopComment((v, "A-gt-B subroutine"));
addrAgtB = sqlite3VdbeCurrentAddr(v);
if( op==TK_ALL || op==TK_UNION ){
sqlite3VdbeAddOp2(v, OP_Gosub, regOutB, addrOutB);
}
sqlite3VdbeAddOp2(v, OP_Yield, regAddrB, addrEofB); VdbeCoverage(v);
sqlite3VdbeGoto(v, labelCmpr);
/* This code runs once to initialize everything.
*/
sqlite3VdbeJumpHere(v, addr1);
sqlite3VdbeAddOp2(v, OP_Yield, regAddrA, addrEofA_noB); VdbeCoverage(v);
sqlite3VdbeAddOp2(v, OP_Yield, regAddrB, addrEofB); VdbeCoverage(v);
/* Implement the main merge loop
*/
sqlite3VdbeResolveLabel(v, labelCmpr);
sqlite3VdbeAddOp4(v, OP_Permutation, 0, 0, 0, (char*)aPermute, P4_INTARRAY);
sqlite3VdbeAddOp4(v, OP_Compare, destA.iSdst, destB.iSdst, nOrderBy,
(char*)pKeyMerge, P4_KEYINFO);
sqlite3VdbeChangeP5(v, OPFLAG_PERMUTE);
sqlite3VdbeAddOp3(v, OP_Jump, addrAltB, addrAeqB, addrAgtB); VdbeCoverage(v);
/* Jump to the this point in order to terminate the query.
*/
sqlite3VdbeResolveLabel(v, labelEnd);
/* Reassembly the compound query so that it will be freed correctly
** by the calling function */
if( p->pPrior ){
sqlite3SelectDelete(db, p->pPrior);
}
p->pPrior = pPrior;
pPrior->pNext = p;
/*** TBD: Insert subroutine calls to close cursors on incomplete
**** subqueries ****/
ExplainQueryPlanPop(pParse);
return pParse->nErr!=0;
}
#endif
#if !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW)
/* An instance of the SubstContext object describes an substitution edit
** to be performed on a parse tree.
**
** All references to columns in table iTable are to be replaced by corresponding
** expressions in pEList.
*/
typedef struct SubstContext {
Parse *pParse; /* The parsing context */
int iTable; /* Replace references to this table */
int iNewTable; /* New table number */
int isLeftJoin; /* Add TK_IF_NULL_ROW opcodes on each replacement */
ExprList *pEList; /* Replacement expressions */
} SubstContext;
/* Forward Declarations */
static void substExprList(SubstContext*, ExprList*);
static void substSelect(SubstContext*, Select*, int);
/*
** Scan through the expression pExpr. Replace every reference to
** a column in table number iTable with a copy of the iColumn-th
** entry in pEList. (But leave references to the ROWID column
** unchanged.)
**
** This routine is part of the flattening procedure. A subquery
** whose result set is defined by pEList appears as entry in the
** FROM clause of a SELECT such that the VDBE cursor assigned to that
** FORM clause entry is iTable. This routine makes the necessary
** changes to pExpr so that it refers directly to the source table
** of the subquery rather the result set of the subquery.
*/
static Expr *substExpr(
SubstContext *pSubst, /* Description of the substitution */
Expr *pExpr /* Expr in which substitution occurs */
){
if( pExpr==0 ) return 0;
if( ExprHasProperty(pExpr, EP_FromJoin)
&& pExpr->iRightJoinTable==pSubst->iTable
){
pExpr->iRightJoinTable = pSubst->iNewTable;
}
if( pExpr->op==TK_COLUMN && pExpr->iTable==pSubst->iTable ){
if( pExpr->iColumn<0 ){
pExpr->op = TK_NULL;
}else{
Expr *pNew;
Expr *pCopy = pSubst->pEList->a[pExpr->iColumn].pExpr;
Expr ifNullRow;
assert( pSubst->pEList!=0 && pExpr->iColumn<pSubst->pEList->nExpr );
assert( pExpr->pRight==0 );
if( sqlite3ExprIsVector(pCopy) ){
sqlite3VectorErrorMsg(pSubst->pParse, pCopy);
}else{
sqlite3 *db = pSubst->pParse->db;
if( pSubst->isLeftJoin && pCopy->op!=TK_COLUMN ){
memset(&ifNullRow, 0, sizeof(ifNullRow));
ifNullRow.op = TK_IF_NULL_ROW;
ifNullRow.pLeft = pCopy;
ifNullRow.iTable = pSubst->iNewTable;
pCopy = &ifNullRow;
}
testcase( ExprHasProperty(pCopy, EP_Subquery) );
pNew = sqlite3ExprDup(db, pCopy, 0);
if( pNew && pSubst->isLeftJoin ){
ExprSetProperty(pNew, EP_CanBeNull);
}
if( pNew && ExprHasProperty(pExpr,EP_FromJoin) ){
pNew->iRightJoinTable = pExpr->iRightJoinTable;
ExprSetProperty(pNew, EP_FromJoin);
}
sqlite3ExprDelete(db, pExpr);
pExpr = pNew;
/* Ensure that the expression now has an implicit collation sequence,
** just as it did when it was a column of a view or sub-query. */
if( pExpr ){
if( pExpr->op!=TK_COLUMN && pExpr->op!=TK_COLLATE ){
CollSeq *pColl = sqlite3ExprCollSeq(pSubst->pParse, pExpr);
pExpr = sqlite3ExprAddCollateString(pSubst->pParse, pExpr,
(pColl ? pColl->zName : "BINARY")
);
}
ExprClearProperty(pExpr, EP_Collate);
}
}
}
}else{
if( pExpr->op==TK_IF_NULL_ROW && pExpr->iTable==pSubst->iTable ){
pExpr->iTable = pSubst->iNewTable;
}
pExpr->pLeft = substExpr(pSubst, pExpr->pLeft);
pExpr->pRight = substExpr(pSubst, pExpr->pRight);
if( ExprHasProperty(pExpr, EP_xIsSelect) ){
substSelect(pSubst, pExpr->x.pSelect, 1);
}else{
substExprList(pSubst, pExpr->x.pList);
}
#ifndef SQLITE_OMIT_WINDOWFUNC
if( ExprHasProperty(pExpr, EP_WinFunc) ){
Window *pWin = pExpr->y.pWin;
pWin->pFilter = substExpr(pSubst, pWin->pFilter);
substExprList(pSubst, pWin->pPartition);
substExprList(pSubst, pWin->pOrderBy);
}
#endif
}
return pExpr;
}
static void substExprList(
SubstContext *pSubst, /* Description of the substitution */
ExprList *pList /* List to scan and in which to make substitutes */
){
int i;
if( pList==0 ) return;
for(i=0; i<pList->nExpr; i++){
pList->a[i].pExpr = substExpr(pSubst, pList->a[i].pExpr);
}
}
static void substSelect(
SubstContext *pSubst, /* Description of the substitution */
Select *p, /* SELECT statement in which to make substitutions */
int doPrior /* Do substitutes on p->pPrior too */
){
SrcList *pSrc;
struct SrcList_item *pItem;
int i;
if( !p ) return;
do{
substExprList(pSubst, p->pEList);
substExprList(pSubst, p->pGroupBy);
substExprList(pSubst, p->pOrderBy);
p->pHaving = substExpr(pSubst, p->pHaving);
p->pWhere = substExpr(pSubst, p->pWhere);
pSrc = p->pSrc;
assert( pSrc!=0 );
for(i=pSrc->nSrc, pItem=pSrc->a; i>0; i--, pItem++){
substSelect(pSubst, pItem->pSelect, 1);
if( pItem->fg.isTabFunc ){
substExprList(pSubst, pItem->u1.pFuncArg);
}
}
}while( doPrior && (p = p->pPrior)!=0 );
}
#endif /* !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW) */
#if !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW)
/*
** This routine attempts to flatten subqueries as a performance optimization.
** This routine returns 1 if it makes changes and 0 if no flattening occurs.
**
** To understand the concept of flattening, consider the following
** query:
**
** SELECT a FROM (SELECT x+y AS a FROM t1 WHERE z<100) WHERE a>5
**
** The default way of implementing this query is to execute the
** subquery first and store the results in a temporary table, then
** run the outer query on that temporary table. This requires two
** passes over the data. Furthermore, because the temporary table
** has no indices, the WHERE clause on the outer query cannot be
** optimized.
**
** This routine attempts to rewrite queries such as the above into
** a single flat select, like this:
**
** SELECT x+y AS a FROM t1 WHERE z<100 AND a>5
**
** The code generated for this simplification gives the same result
** but only has to scan the data once. And because indices might
** exist on the table t1, a complete scan of the data might be
** avoided.
**
** Flattening is subject to the following constraints:
**
** (**) We no longer attempt to flatten aggregate subqueries. Was:
** The subquery and the outer query cannot both be aggregates.
**
** (**) We no longer attempt to flatten aggregate subqueries. Was:
** (2) If the subquery is an aggregate then
** (2a) the outer query must not be a join and
** (2b) the outer query must not use subqueries
** other than the one FROM-clause subquery that is a candidate
** for flattening. (This is due to ticket [2f7170d73bf9abf80]
** from 2015-02-09.)
**
** (3) If the subquery is the right operand of a LEFT JOIN then
** (3a) the subquery may not be a join and
** (3b) the FROM clause of the subquery may not contain a virtual
** table and
** (3c) the outer query may not be an aggregate.
**
** (4) The subquery can not be DISTINCT.
**
** (**) At one point restrictions (4) and (5) defined a subset of DISTINCT
** sub-queries that were excluded from this optimization. Restriction
** (4) has since been expanded to exclude all DISTINCT subqueries.
**
** (**) We no longer attempt to flatten aggregate subqueries. Was:
** If the subquery is aggregate, the outer query may not be DISTINCT.
**
** (7) The subquery must have a FROM clause. TODO: For subqueries without
** A FROM clause, consider adding a FROM clause with the special
** table sqlite_once that consists of a single row containing a
** single NULL.
**
** (8) If the subquery uses LIMIT then the outer query may not be a join.
**
** (9) If the subquery uses LIMIT then the outer query may not be aggregate.
**
** (**) Restriction (10) was removed from the code on 2005-02-05 but we
** accidently carried the comment forward until 2014-09-15. Original
** constraint: "If the subquery is aggregate then the outer query
** may not use LIMIT."
**
** (11) The subquery and the outer query may not both have ORDER BY clauses.
**
** (**) Not implemented. Subsumed into restriction (3). Was previously
** a separate restriction deriving from ticket #350.
**
** (13) The subquery and outer query may not both use LIMIT.
**
** (14) The subquery may not use OFFSET.
**
** (15) If the outer query is part of a compound select, then the
** subquery may not use LIMIT.
** (See ticket #2339 and ticket [02a8e81d44]).
**
** (16) If the outer query is aggregate, then the subquery may not
** use ORDER BY. (Ticket #2942) This used to not matter
** until we introduced the group_concat() function.
**
** (17) If the subquery is a compound select, then
** (17a) all compound operators must be a UNION ALL, and
** (17b) no terms within the subquery compound may be aggregate
** or DISTINCT, and
** (17c) every term within the subquery compound must have a FROM clause
** (17d) the outer query may not be
** (17d1) aggregate, or
** (17d2) DISTINCT, or
** (17d3) a join.
**
** The parent and sub-query may contain WHERE clauses. Subject to
** rules (11), (13) and (14), they may also contain ORDER BY,
** LIMIT and OFFSET clauses. The subquery cannot use any compound
** operator other than UNION ALL because all the other compound
** operators have an implied DISTINCT which is disallowed by
** restriction (4).
**
** Also, each component of the sub-query must return the same number
** of result columns. This is actually a requirement for any compound
** SELECT statement, but all the code here does is make sure that no
** such (illegal) sub-query is flattened. The caller will detect the
** syntax error and return a detailed message.
**
** (18) If the sub-query is a compound select, then all terms of the
** ORDER BY clause of the parent must be simple references to
** columns of the sub-query.
**
** (19) If the subquery uses LIMIT then the outer query may not
** have a WHERE clause.
**
** (20) If the sub-query is a compound select, then it must not use
** an ORDER BY clause. Ticket #3773. We could relax this constraint
** somewhat by saying that the terms of the ORDER BY clause must
** appear as unmodified result columns in the outer query. But we
** have other optimizations in mind to deal with that case.
**
** (21) If the subquery uses LIMIT then the outer query may not be
** DISTINCT. (See ticket [752e1646fc]).
**
** (22) The subquery may not be a recursive CTE.
**
** (**) Subsumed into restriction (17d3). Was: If the outer query is
** a recursive CTE, then the sub-query may not be a compound query.
** This restriction is because transforming the
** parent to a compound query confuses the code that handles
** recursive queries in multiSelect().
**
** (**) We no longer attempt to flatten aggregate subqueries. Was:
** The subquery may not be an aggregate that uses the built-in min() or
** or max() functions. (Without this restriction, a query like:
** "SELECT x FROM (SELECT max(y), x FROM t1)" would not necessarily
** return the value X for which Y was maximal.)
**
** (25) If either the subquery or the parent query contains a window
** function in the select list or ORDER BY clause, flattening
** is not attempted.
**
**
** In this routine, the "p" parameter is a pointer to the outer query.
** The subquery is p->pSrc->a[iFrom]. isAgg is true if the outer query
** uses aggregates.
**
** If flattening is not attempted, this routine is a no-op and returns 0.
** If flattening is attempted this routine returns 1.
**
** All of the expression analysis must occur on both the outer query and
** the subquery before this routine runs.
*/
static int flattenSubquery(
Parse *pParse, /* Parsing context */
Select *p, /* The parent or outer SELECT statement */
int iFrom, /* Index in p->pSrc->a[] of the inner subquery */
int isAgg /* True if outer SELECT uses aggregate functions */
){
const char *zSavedAuthContext = pParse->zAuthContext;
Select *pParent; /* Current UNION ALL term of the other query */
Select *pSub; /* The inner query or "subquery" */
Select *pSub1; /* Pointer to the rightmost select in sub-query */
SrcList *pSrc; /* The FROM clause of the outer query */
SrcList *pSubSrc; /* The FROM clause of the subquery */
int iParent; /* VDBE cursor number of the pSub result set temp table */
int iNewParent = -1;/* Replacement table for iParent */
int isLeftJoin = 0; /* True if pSub is the right side of a LEFT JOIN */
int i; /* Loop counter */
Expr *pWhere; /* The WHERE clause */
struct SrcList_item *pSubitem; /* The subquery */
sqlite3 *db = pParse->db;
/* Check to see if flattening is permitted. Return 0 if not.
*/
assert( p!=0 );
assert( p->pPrior==0 );
if( OptimizationDisabled(db, SQLITE_QueryFlattener) ) return 0;
pSrc = p->pSrc;
assert( pSrc && iFrom>=0 && iFrom<pSrc->nSrc );
pSubitem = &pSrc->a[iFrom];
iParent = pSubitem->iCursor;
pSub = pSubitem->pSelect;
assert( pSub!=0 );
#ifndef SQLITE_OMIT_WINDOWFUNC
if( p->pWin || pSub->pWin ) return 0; /* Restriction (25) */
#endif
pSubSrc = pSub->pSrc;
assert( pSubSrc );
/* Prior to version 3.1.2, when LIMIT and OFFSET had to be simple constants,
** not arbitrary expressions, we allowed some combining of LIMIT and OFFSET
** because they could be computed at compile-time. But when LIMIT and OFFSET
** became arbitrary expressions, we were forced to add restrictions (13)
** and (14). */
if( pSub->pLimit && p->pLimit ) return 0; /* Restriction (13) */
if( pSub->pLimit && pSub->pLimit->pRight ) return 0; /* Restriction (14) */
if( (p->selFlags & SF_Compound)!=0 && pSub->pLimit ){
return 0; /* Restriction (15) */
}
if( pSubSrc->nSrc==0 ) return 0; /* Restriction (7) */
if( pSub->selFlags & SF_Distinct ) return 0; /* Restriction (4) */
if( pSub->pLimit && (pSrc->nSrc>1 || isAgg) ){
return 0; /* Restrictions (8)(9) */
}
if( p->pOrderBy && pSub->pOrderBy ){
return 0; /* Restriction (11) */
}
if( isAgg && pSub->pOrderBy ) return 0; /* Restriction (16) */
if( pSub->pLimit && p->pWhere ) return 0; /* Restriction (19) */
if( pSub->pLimit && (p->selFlags & SF_Distinct)!=0 ){
return 0; /* Restriction (21) */
}
if( pSub->selFlags & (SF_Recursive) ){
return 0; /* Restrictions (22) */
}
/*
** If the subquery is the right operand of a LEFT JOIN, then the
** subquery may not be a join itself (3a). Example of why this is not
** allowed:
**
** t1 LEFT OUTER JOIN (t2 JOIN t3)
**
** If we flatten the above, we would get
**
** (t1 LEFT OUTER JOIN t2) JOIN t3
**
** which is not at all the same thing.
**
** If the subquery is the right operand of a LEFT JOIN, then the outer
** query cannot be an aggregate. (3c) This is an artifact of the way
** aggregates are processed - there is no mechanism to determine if
** the LEFT JOIN table should be all-NULL.
**
** See also tickets #306, #350, and #3300.
*/
if( (pSubitem->fg.jointype & JT_OUTER)!=0 ){
isLeftJoin = 1;
if( pSubSrc->nSrc>1 || isAgg || IsVirtual(pSubSrc->a[0].pTab) ){
/* (3a) (3c) (3b) */
return 0;
}
}
#ifdef SQLITE_EXTRA_IFNULLROW
else if( iFrom>0 && !isAgg ){
/* Setting isLeftJoin to -1 causes OP_IfNullRow opcodes to be generated for
** every reference to any result column from subquery in a join, even
** though they are not necessary. This will stress-test the OP_IfNullRow
** opcode. */
isLeftJoin = -1;
}
#endif
/* Restriction (17): If the sub-query is a compound SELECT, then it must
** use only the UNION ALL operator. And none of the simple select queries
** that make up the compound SELECT are allowed to be aggregate or distinct
** queries.
*/
if( pSub->pPrior ){
if( pSub->pOrderBy ){
return 0; /* Restriction (20) */
}
if( isAgg || (p->selFlags & SF_Distinct)!=0 || pSrc->nSrc!=1 ){
return 0; /* (17d1), (17d2), or (17d3) */
}
for(pSub1=pSub; pSub1; pSub1=pSub1->pPrior){
testcase( (pSub1->selFlags & (SF_Distinct|SF_Aggregate))==SF_Distinct );
testcase( (pSub1->selFlags & (SF_Distinct|SF_Aggregate))==SF_Aggregate );
assert( pSub->pSrc!=0 );
assert( pSub->pEList->nExpr==pSub1->pEList->nExpr );
if( (pSub1->selFlags & (SF_Distinct|SF_Aggregate))!=0 /* (17b) */
|| (pSub1->pPrior && pSub1->op!=TK_ALL) /* (17a) */
|| pSub1->pSrc->nSrc<1 /* (17c) */
){
return 0;
}
testcase( pSub1->pSrc->nSrc>1 );
}
/* Restriction (18). */
if( p->pOrderBy ){
int ii;
for(ii=0; ii<p->pOrderBy->nExpr; ii++){
if( p->pOrderBy->a[ii].u.x.iOrderByCol==0 ) return 0;
}
}
}
/* Ex-restriction (23):
** The only way that the recursive part of a CTE can contain a compound
** subquery is for the subquery to be one term of a join. But if the
** subquery is a join, then the flattening has already been stopped by
** restriction (17d3)
*/
assert( (p->selFlags & SF_Recursive)==0 || pSub->pPrior==0 );
/***** If we reach this point, flattening is permitted. *****/
SELECTTRACE(1,pParse,p,("flatten %u.%p from term %d\n",
pSub->selId, pSub, iFrom));
/* Authorize the subquery */
pParse->zAuthContext = pSubitem->zName;
TESTONLY(i =) sqlite3AuthCheck(pParse, SQLITE_SELECT, 0, 0, 0);
testcase( i==SQLITE_DENY );
pParse->zAuthContext = zSavedAuthContext;
/* If the sub-query is a compound SELECT statement, then (by restrictions
** 17 and 18 above) it must be a UNION ALL and the parent query must
** be of the form:
**
** SELECT <expr-list> FROM (<sub-query>) <where-clause>
**
** followed by any ORDER BY, LIMIT and/or OFFSET clauses. This block
** creates N-1 copies of the parent query without any ORDER BY, LIMIT or
** OFFSET clauses and joins them to the left-hand-side of the original
** using UNION ALL operators. In this case N is the number of simple
** select statements in the compound sub-query.
**
** Example:
**
** SELECT a+1 FROM (
** SELECT x FROM tab
** UNION ALL
** SELECT y FROM tab
** UNION ALL
** SELECT abs(z*2) FROM tab2
** ) WHERE a!=5 ORDER BY 1
**
** Transformed into:
**
** SELECT x+1 FROM tab WHERE x+1!=5
** UNION ALL
** SELECT y+1 FROM tab WHERE y+1!=5
** UNION ALL
** SELECT abs(z*2)+1 FROM tab2 WHERE abs(z*2)+1!=5
** ORDER BY 1
**
** We call this the "compound-subquery flattening".
*/
for(pSub=pSub->pPrior; pSub; pSub=pSub->pPrior){
Select *pNew;
ExprList *pOrderBy = p->pOrderBy;
Expr *pLimit = p->pLimit;
Select *pPrior = p->pPrior;
p->pOrderBy = 0;
p->pSrc = 0;
p->pPrior = 0;
p->pLimit = 0;
pNew = sqlite3SelectDup(db, p, 0);
p->pLimit = pLimit;
p->pOrderBy = pOrderBy;
p->pSrc = pSrc;
p->op = TK_ALL;
if( pNew==0 ){
p->pPrior = pPrior;
}else{
pNew->pPrior = pPrior;
if( pPrior ) pPrior->pNext = pNew;
pNew->pNext = p;
p->pPrior = pNew;
SELECTTRACE(2,pParse,p,("compound-subquery flattener"
" creates %u as peer\n",pNew->selId));
}
if( db->mallocFailed ) return 1;
}
/* Begin flattening the iFrom-th entry of the FROM clause
** in the outer query.
*/
pSub = pSub1 = pSubitem->pSelect;
/* Delete the transient table structure associated with the
** subquery
*/
sqlite3DbFree(db, pSubitem->zDatabase);
sqlite3DbFree(db, pSubitem->zName);
sqlite3DbFree(db, pSubitem->zAlias);
pSubitem->zDatabase = 0;
pSubitem->zName = 0;
pSubitem->zAlias = 0;
pSubitem->pSelect = 0;
/* Defer deleting the Table object associated with the
** subquery until code generation is
** complete, since there may still exist Expr.pTab entries that
** refer to the subquery even after flattening. Ticket #3346.
**
** pSubitem->pTab is always non-NULL by test restrictions and tests above.
*/
if( ALWAYS(pSubitem->pTab!=0) ){
Table *pTabToDel = pSubitem->pTab;
if( pTabToDel->nTabRef==1 ){
Parse *pToplevel = sqlite3ParseToplevel(pParse);
pTabToDel->pNextZombie = pToplevel->pZombieTab;
pToplevel->pZombieTab = pTabToDel;
}else{
pTabToDel->nTabRef--;
}
pSubitem->pTab = 0;
}
/* The following loop runs once for each term in a compound-subquery
** flattening (as described above). If we are doing a different kind
** of flattening - a flattening other than a compound-subquery flattening -
** then this loop only runs once.
**
** This loop moves all of the FROM elements of the subquery into the
** the FROM clause of the outer query. Before doing this, remember
** the cursor number for the original outer query FROM element in
** iParent. The iParent cursor will never be used. Subsequent code
** will scan expressions looking for iParent references and replace
** those references with expressions that resolve to the subquery FROM
** elements we are now copying in.
*/
for(pParent=p; pParent; pParent=pParent->pPrior, pSub=pSub->pPrior){
int nSubSrc;
u8 jointype = 0;
assert( pSub!=0 );
pSubSrc = pSub->pSrc; /* FROM clause of subquery */
nSubSrc = pSubSrc->nSrc; /* Number of terms in subquery FROM clause */
pSrc = pParent->pSrc; /* FROM clause of the outer query */
if( pSrc ){
assert( pParent==p ); /* First time through the loop */
jointype = pSubitem->fg.jointype;
}else{
assert( pParent!=p ); /* 2nd and subsequent times through the loop */
pSrc = sqlite3SrcListAppend(pParse, 0, 0, 0);
if( pSrc==0 ) break;
pParent->pSrc = pSrc;
}
/* The subquery uses a single slot of the FROM clause of the outer
** query. If the subquery has more than one element in its FROM clause,
** then expand the outer query to make space for it to hold all elements
** of the subquery.
**
** Example:
**
** SELECT * FROM tabA, (SELECT * FROM sub1, sub2), tabB;
**
** The outer query has 3 slots in its FROM clause. One slot of the
** outer query (the middle slot) is used by the subquery. The next
** block of code will expand the outer query FROM clause to 4 slots.
** The middle slot is expanded to two slots in order to make space
** for the two elements in the FROM clause of the subquery.
*/
if( nSubSrc>1 ){
pSrc = sqlite3SrcListEnlarge(pParse, pSrc, nSubSrc-1,iFrom+1);
if( pSrc==0 ) break;
pParent->pSrc = pSrc;
}
/* Transfer the FROM clause terms from the subquery into the
** outer query.
*/
for(i=0; i<nSubSrc; i++){
sqlite3IdListDelete(db, pSrc->a[i+iFrom].pUsing);
assert( pSrc->a[i+iFrom].fg.isTabFunc==0 );
pSrc->a[i+iFrom] = pSubSrc->a[i];
iNewParent = pSubSrc->a[i].iCursor;
memset(&pSubSrc->a[i], 0, sizeof(pSubSrc->a[i]));
}
pSrc->a[iFrom].fg.jointype = jointype;
/* Now begin substituting subquery result set expressions for
** references to the iParent in the outer query.
**
** Example:
**
** SELECT a+5, b*10 FROM (SELECT x*3 AS a, y+10 AS b FROM t1) WHERE a>b;
** \ \_____________ subquery __________/ /
** \_____________________ outer query ______________________________/
**
** We look at every expression in the outer query and every place we see
** "a" we substitute "x*3" and every place we see "b" we substitute "y+10".
*/
if( pSub->pOrderBy ){
/* At this point, any non-zero iOrderByCol values indicate that the
** ORDER BY column expression is identical to the iOrderByCol'th
** expression returned by SELECT statement pSub. Since these values
** do not necessarily correspond to columns in SELECT statement pParent,
** zero them before transfering the ORDER BY clause.
**
** Not doing this may cause an error if a subsequent call to this
** function attempts to flatten a compound sub-query into pParent
** (the only way this can happen is if the compound sub-query is
** currently part of pSub->pSrc). See ticket [d11a6e908f]. */
ExprList *pOrderBy = pSub->pOrderBy;
for(i=0; i<pOrderBy->nExpr; i++){
pOrderBy->a[i].u.x.iOrderByCol = 0;
}
assert( pParent->pOrderBy==0 );
pParent->pOrderBy = pOrderBy;
pSub->pOrderBy = 0;
}
pWhere = pSub->pWhere;
pSub->pWhere = 0;
if( isLeftJoin>0 ){
sqlite3SetJoinExpr(pWhere, iNewParent);
}
pParent->pWhere = sqlite3ExprAnd(pParse, pWhere, pParent->pWhere);
if( db->mallocFailed==0 ){
SubstContext x;
x.pParse = pParse;
x.iTable = iParent;
x.iNewTable = iNewParent;
x.isLeftJoin = isLeftJoin;
x.pEList = pSub->pEList;
substSelect(&x, pParent, 0);
}
/* The flattened query is a compound if either the inner or the
** outer query is a compound. */
pParent->selFlags |= pSub->selFlags & SF_Compound;
assert( (pSub->selFlags & SF_Distinct)==0 ); /* restriction (17b) */
/*
** SELECT ... FROM (SELECT ... LIMIT a OFFSET b) LIMIT x OFFSET y;
**
** One is tempted to try to add a and b to combine the limits. But this
** does not work if either limit is negative.
*/
if( pSub->pLimit ){
pParent->pLimit = pSub->pLimit;
pSub->pLimit = 0;
}
}
/* Finially, delete what is left of the subquery and return
** success.
*/
sqlite3SelectDelete(db, pSub1);
#if SELECTTRACE_ENABLED
if( sqlite3SelectTrace & 0x100 ){
SELECTTRACE(0x100,pParse,p,("After flattening:\n"));
sqlite3TreeViewSelect(0, p, 0);
}
#endif
return 1;
}
#endif /* !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW) */
/*
** A structure to keep track of all of the column values that are fixed to
** a known value due to WHERE clause constraints of the form COLUMN=VALUE.
*/
typedef struct WhereConst WhereConst;
struct WhereConst {
Parse *pParse; /* Parsing context */
int nConst; /* Number for COLUMN=CONSTANT terms */
int nChng; /* Number of times a constant is propagated */
Expr **apExpr; /* [i*2] is COLUMN and [i*2+1] is VALUE */
};
/*
** Add a new entry to the pConst object. Except, do not add duplicate
** pColumn entires.
*/
static void constInsert(
WhereConst *pConst, /* The WhereConst into which we are inserting */
Expr *pColumn, /* The COLUMN part of the constraint */
Expr *pValue /* The VALUE part of the constraint */
){
int i;
assert( pColumn->op==TK_COLUMN );
/* 2018-10-25 ticket [cf5ed20f]
** Make sure the same pColumn is not inserted more than once */
for(i=0; i<pConst->nConst; i++){
const Expr *pExpr = pConst->apExpr[i*2];
assert( pExpr->op==TK_COLUMN );
if( pExpr->iTable==pColumn->iTable
&& pExpr->iColumn==pColumn->iColumn
){
return; /* Already present. Return without doing anything. */
}
}
pConst->nConst++;
pConst->apExpr = sqlite3DbReallocOrFree(pConst->pParse->db, pConst->apExpr,
pConst->nConst*2*sizeof(Expr*));
if( pConst->apExpr==0 ){
pConst->nConst = 0;
}else{
if( ExprHasProperty(pValue, EP_FixedCol) ) pValue = pValue->pLeft;
pConst->apExpr[pConst->nConst*2-2] = pColumn;
pConst->apExpr[pConst->nConst*2-1] = pValue;
}
}
/*
** Find all terms of COLUMN=VALUE or VALUE=COLUMN in pExpr where VALUE
** is a constant expression and where the term must be true because it
** is part of the AND-connected terms of the expression. For each term
** found, add it to the pConst structure.
*/
static void findConstInWhere(WhereConst *pConst, Expr *pExpr){
Expr *pRight, *pLeft;
if( pExpr==0 ) return;
if( ExprHasProperty(pExpr, EP_FromJoin) ) return;
if( pExpr->op==TK_AND ){
findConstInWhere(pConst, pExpr->pRight);
findConstInWhere(pConst, pExpr->pLeft);
return;
}
if( pExpr->op!=TK_EQ ) return;
pRight = pExpr->pRight;
pLeft = pExpr->pLeft;
assert( pRight!=0 );
assert( pLeft!=0 );
if( pRight->op==TK_COLUMN
&& !ExprHasProperty(pRight, EP_FixedCol)
&& sqlite3ExprIsConstant(pLeft)
&& sqlite3IsBinary(sqlite3ExprCompareCollSeq(pConst->pParse,pExpr))
){
constInsert(pConst, pRight, pLeft);
}else
if( pLeft->op==TK_COLUMN
&& !ExprHasProperty(pLeft, EP_FixedCol)
&& sqlite3ExprIsConstant(pRight)
&& sqlite3IsBinary(sqlite3ExprCompareCollSeq(pConst->pParse,pExpr))
){
constInsert(pConst, pLeft, pRight);
}
}
/*
** This is a Walker expression callback. pExpr is a candidate expression
** to be replaced by a value. If pExpr is equivalent to one of the
** columns named in pWalker->u.pConst, then overwrite it with its
** corresponding value.
*/
static int propagateConstantExprRewrite(Walker *pWalker, Expr *pExpr){
int i;
WhereConst *pConst;
if( pExpr->op!=TK_COLUMN ) return WRC_Continue;
if( ExprHasProperty(pExpr, EP_FixedCol) ) return WRC_Continue;
pConst = pWalker->u.pConst;
for(i=0; i<pConst->nConst; i++){
Expr *pColumn = pConst->apExpr[i*2];
if( pColumn==pExpr ) continue;
if( pColumn->iTable!=pExpr->iTable ) continue;
if( pColumn->iColumn!=pExpr->iColumn ) continue;
/* A match is found. Add the EP_FixedCol property */
pConst->nChng++;
ExprClearProperty(pExpr, EP_Leaf);
ExprSetProperty(pExpr, EP_FixedCol);
assert( pExpr->pLeft==0 );
pExpr->pLeft = sqlite3ExprDup(pConst->pParse->db, pConst->apExpr[i*2+1], 0);
break;
}
return WRC_Prune;
}
/*
** The WHERE-clause constant propagation optimization.
**
** If the WHERE clause contains terms of the form COLUMN=CONSTANT or
** CONSTANT=COLUMN that must be tree (in other words, if the terms top-level
** AND-connected terms that are not part of a ON clause from a LEFT JOIN)
** then throughout the query replace all other occurrences of COLUMN
** with CONSTANT within the WHERE clause.
**
** For example, the query:
**
** SELECT * FROM t1, t2, t3 WHERE t1.a=39 AND t2.b=t1.a AND t3.c=t2.b
**
** Is transformed into
**
** SELECT * FROM t1, t2, t3 WHERE t1.a=39 AND t2.b=39 AND t3.c=39
**
** Return true if any transformations where made and false if not.
**
** Implementation note: Constant propagation is tricky due to affinity
** and collating sequence interactions. Consider this example:
**
** CREATE TABLE t1(a INT,b TEXT);
** INSERT INTO t1 VALUES(123,'0123');
** SELECT * FROM t1 WHERE a=123 AND b=a;
** SELECT * FROM t1 WHERE a=123 AND b=123;
**
** The two SELECT statements above should return different answers. b=a
** is alway true because the comparison uses numeric affinity, but b=123
** is false because it uses text affinity and '0123' is not the same as '123'.
** To work around this, the expression tree is not actually changed from
** "b=a" to "b=123" but rather the "a" in "b=a" is tagged with EP_FixedCol
** and the "123" value is hung off of the pLeft pointer. Code generator
** routines know to generate the constant "123" instead of looking up the
** column value. Also, to avoid collation problems, this optimization is
** only attempted if the "a=123" term uses the default BINARY collation.
*/
static int propagateConstants(
Parse *pParse, /* The parsing context */
Select *p /* The query in which to propagate constants */
){
WhereConst x;
Walker w;
int nChng = 0;
x.pParse = pParse;
do{
x.nConst = 0;
x.nChng = 0;
x.apExpr = 0;
findConstInWhere(&x, p->pWhere);
if( x.nConst ){
memset(&w, 0, sizeof(w));
w.pParse = pParse;
w.xExprCallback = propagateConstantExprRewrite;
w.xSelectCallback = sqlite3SelectWalkNoop;
w.xSelectCallback2 = 0;
w.walkerDepth = 0;
w.u.pConst = &x;
sqlite3WalkExpr(&w, p->pWhere);
sqlite3DbFree(x.pParse->db, x.apExpr);
nChng += x.nChng;
}
}while( x.nChng );
return nChng;
}
#if !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW)
/*
** Make copies of relevant WHERE clause terms of the outer query into
** the WHERE clause of subquery. Example:
**
** SELECT * FROM (SELECT a AS x, c-d AS y FROM t1) WHERE x=5 AND y=10;
**
** Transformed into:
**
** SELECT * FROM (SELECT a AS x, c-d AS y FROM t1 WHERE a=5 AND c-d=10)
** WHERE x=5 AND y=10;
**
** The hope is that the terms added to the inner query will make it more
** efficient.
**
** Do not attempt this optimization if:
**
** (1) (** This restriction was removed on 2017-09-29. We used to
** disallow this optimization for aggregate subqueries, but now
** it is allowed by putting the extra terms on the HAVING clause.
** The added HAVING clause is pointless if the subquery lacks
** a GROUP BY clause. But such a HAVING clause is also harmless
** so there does not appear to be any reason to add extra logic
** to suppress it. **)
**
** (2) The inner query is the recursive part of a common table expression.
**
** (3) The inner query has a LIMIT clause (since the changes to the WHERE
** clause would change the meaning of the LIMIT).
**
** (4) The inner query is the right operand of a LEFT JOIN and the
** expression to be pushed down does not come from the ON clause
** on that LEFT JOIN.
**
** (5) The WHERE clause expression originates in the ON or USING clause
** of a LEFT JOIN where iCursor is not the right-hand table of that
** left join. An example:
**
** SELECT *
** FROM (SELECT 1 AS a1 UNION ALL SELECT 2) AS aa
** JOIN (SELECT 1 AS b2 UNION ALL SELECT 2) AS bb ON (a1=b2)
** LEFT JOIN (SELECT 8 AS c3 UNION ALL SELECT 9) AS cc ON (b2=2);
**
** The correct answer is three rows: (1,1,NULL),(2,2,8),(2,2,9).
** But if the (b2=2) term were to be pushed down into the bb subquery,
** then the (1,1,NULL) row would be suppressed.
**
** (6) The inner query features one or more window-functions (since
** changes to the WHERE clause of the inner query could change the
** window over which window functions are calculated).
**
** Return 0 if no changes are made and non-zero if one or more WHERE clause
** terms are duplicated into the subquery.
*/
static int pushDownWhereTerms(
Parse *pParse, /* Parse context (for malloc() and error reporting) */
Select *pSubq, /* The subquery whose WHERE clause is to be augmented */
Expr *pWhere, /* The WHERE clause of the outer query */
int iCursor, /* Cursor number of the subquery */
int isLeftJoin /* True if pSubq is the right term of a LEFT JOIN */
){
Expr *pNew;
int nChng = 0;
if( pWhere==0 ) return 0;
if( pSubq->selFlags & SF_Recursive ) return 0; /* restriction (2) */
#ifndef SQLITE_OMIT_WINDOWFUNC
if( pSubq->pWin ) return 0; /* restriction (6) */
#endif
#ifdef SQLITE_DEBUG
/* Only the first term of a compound can have a WITH clause. But make
** sure no other terms are marked SF_Recursive in case something changes
** in the future.
*/
{
Select *pX;
for(pX=pSubq; pX; pX=pX->pPrior){
assert( (pX->selFlags & (SF_Recursive))==0 );
}
}
#endif
if( pSubq->pLimit!=0 ){
return 0; /* restriction (3) */
}
while( pWhere->op==TK_AND ){
nChng += pushDownWhereTerms(pParse, pSubq, pWhere->pRight,
iCursor, isLeftJoin);
pWhere = pWhere->pLeft;
}
if( isLeftJoin
&& (ExprHasProperty(pWhere,EP_FromJoin)==0
|| pWhere->iRightJoinTable!=iCursor)
){
return 0; /* restriction (4) */
}
if( ExprHasProperty(pWhere,EP_FromJoin) && pWhere->iRightJoinTable!=iCursor ){
return 0; /* restriction (5) */
}
if( sqlite3ExprIsTableConstant(pWhere, iCursor) ){
nChng++;
while( pSubq ){
SubstContext x;
pNew = sqlite3ExprDup(pParse->db, pWhere, 0);
unsetJoinExpr(pNew, -1);
x.pParse = pParse;
x.iTable = iCursor;
x.iNewTable = iCursor;
x.isLeftJoin = 0;
x.pEList = pSubq->pEList;
pNew = substExpr(&x, pNew);
if( pSubq->selFlags & SF_Aggregate ){
pSubq->pHaving = sqlite3ExprAnd(pParse, pSubq->pHaving, pNew);
}else{
pSubq->pWhere = sqlite3ExprAnd(pParse, pSubq->pWhere, pNew);
}
pSubq = pSubq->pPrior;
}
}
return nChng;
}
#endif /* !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW) */
/*
** The pFunc is the only aggregate function in the query. Check to see
** if the query is a candidate for the min/max optimization.
**
** If the query is a candidate for the min/max optimization, then set
** *ppMinMax to be an ORDER BY clause to be used for the optimization
** and return either WHERE_ORDERBY_MIN or WHERE_ORDERBY_MAX depending on
** whether pFunc is a min() or max() function.
**
** If the query is not a candidate for the min/max optimization, return
** WHERE_ORDERBY_NORMAL (which must be zero).
**
** This routine must be called after aggregate functions have been
** located but before their arguments have been subjected to aggregate
** analysis.
*/
static u8 minMaxQuery(sqlite3 *db, Expr *pFunc, ExprList **ppMinMax){
int eRet = WHERE_ORDERBY_NORMAL; /* Return value */
ExprList *pEList = pFunc->x.pList; /* Arguments to agg function */
const char *zFunc; /* Name of aggregate function pFunc */
ExprList *pOrderBy;
u8 sortFlags;
assert( *ppMinMax==0 );
assert( pFunc->op==TK_AGG_FUNCTION );
assert( !IsWindowFunc(pFunc) );
if( pEList==0 || pEList->nExpr!=1 || ExprHasProperty(pFunc, EP_WinFunc) ){
return eRet;
}
zFunc = pFunc->u.zToken;
if( sqlite3StrICmp(zFunc, "min")==0 ){
eRet = WHERE_ORDERBY_MIN;
sortFlags = KEYINFO_ORDER_BIGNULL;
}else if( sqlite3StrICmp(zFunc, "max")==0 ){
eRet = WHERE_ORDERBY_MAX;
sortFlags = KEYINFO_ORDER_DESC;
}else{
return eRet;
}
*ppMinMax = pOrderBy = sqlite3ExprListDup(db, pEList, 0);
assert( pOrderBy!=0 || db->mallocFailed );
if( pOrderBy ) pOrderBy->a[0].sortFlags = sortFlags;
return eRet;
}
/*
** The select statement passed as the first argument is an aggregate query.
** The second argument is the associated aggregate-info object. This
** function tests if the SELECT is of the form:
**
** SELECT count(*) FROM <tbl>
**
** where table is a database table, not a sub-select or view. If the query
** does match this pattern, then a pointer to the Table object representing
** <tbl> is returned. Otherwise, 0 is returned.
*/
static Table *isSimpleCount(Select *p, AggInfo *pAggInfo){
Table *pTab;
Expr *pExpr;
assert( !p->pGroupBy );
if( p->pWhere || p->pEList->nExpr!=1
|| p->pSrc->nSrc!=1 || p->pSrc->a[0].pSelect
){
return 0;
}
pTab = p->pSrc->a[0].pTab;
pExpr = p->pEList->a[0].pExpr;
assert( pTab && !pTab->pSelect && pExpr );
if( IsVirtual(pTab) ) return 0;
if( pExpr->op!=TK_AGG_FUNCTION ) return 0;
if( NEVER(pAggInfo->nFunc==0) ) return 0;
if( (pAggInfo->aFunc[0].pFunc->funcFlags&SQLITE_FUNC_COUNT)==0 ) return 0;
if( ExprHasProperty(pExpr, EP_Distinct|EP_WinFunc) ) return 0;
return pTab;
}
/*
** If the source-list item passed as an argument was augmented with an
** INDEXED BY clause, then try to locate the specified index. If there
** was such a clause and the named index cannot be found, return
** SQLITE_ERROR and leave an error in pParse. Otherwise, populate
** pFrom->pIndex and return SQLITE_OK.
*/
int sqlite3IndexedByLookup(Parse *pParse, struct SrcList_item *pFrom){
if( pFrom->pTab && pFrom->fg.isIndexedBy ){
Table *pTab = pFrom->pTab;
char *zIndexedBy = pFrom->u1.zIndexedBy;
Index *pIdx;
for(pIdx=pTab->pIndex;
pIdx && sqlite3StrICmp(pIdx->zName, zIndexedBy);
pIdx=pIdx->pNext
);
if( !pIdx ){
sqlite3ErrorMsg(pParse, "no such index: %s", zIndexedBy, 0);
pParse->checkSchema = 1;
return SQLITE_ERROR;
}
pFrom->pIBIndex = pIdx;
}
return SQLITE_OK;
}
/*
** Detect compound SELECT statements that use an ORDER BY clause with
** an alternative collating sequence.
**
** SELECT ... FROM t1 EXCEPT SELECT ... FROM t2 ORDER BY .. COLLATE ...
**
** These are rewritten as a subquery:
**
** SELECT * FROM (SELECT ... FROM t1 EXCEPT SELECT ... FROM t2)
** ORDER BY ... COLLATE ...
**
** This transformation is necessary because the multiSelectOrderBy() routine
** above that generates the code for a compound SELECT with an ORDER BY clause
** uses a merge algorithm that requires the same collating sequence on the
** result columns as on the ORDER BY clause. See ticket
** http://www.sqlite.org/src/info/6709574d2a
**
** This transformation is only needed for EXCEPT, INTERSECT, and UNION.
** The UNION ALL operator works fine with multiSelectOrderBy() even when
** there are COLLATE terms in the ORDER BY.
*/
static int convertCompoundSelectToSubquery(Walker *pWalker, Select *p){
int i;
Select *pNew;
Select *pX;
sqlite3 *db;
struct ExprList_item *a;
SrcList *pNewSrc;
Parse *pParse;
Token dummy;
if( p->pPrior==0 ) return WRC_Continue;
if( p->pOrderBy==0 ) return WRC_Continue;
for(pX=p; pX && (pX->op==TK_ALL || pX->op==TK_SELECT); pX=pX->pPrior){}
if( pX==0 ) return WRC_Continue;
a = p->pOrderBy->a;
for(i=p->pOrderBy->nExpr-1; i>=0; i--){
if( a[i].pExpr->flags & EP_Collate ) break;
}
if( i<0 ) return WRC_Continue;
/* If we reach this point, that means the transformation is required. */
pParse = pWalker->pParse;
db = pParse->db;
pNew = sqlite3DbMallocZero(db, sizeof(*pNew) );
if( pNew==0 ) return WRC_Abort;
memset(&dummy, 0, sizeof(dummy));
pNewSrc = sqlite3SrcListAppendFromTerm(pParse,0,0,0,&dummy,pNew,0,0);
if( pNewSrc==0 ) return WRC_Abort;
*pNew = *p;
p->pSrc = pNewSrc;
p->pEList = sqlite3ExprListAppend(pParse, 0, sqlite3Expr(db, TK_ASTERISK, 0));
p->op = TK_SELECT;
p->pWhere = 0;
pNew->pGroupBy = 0;
pNew->pHaving = 0;
pNew->pOrderBy = 0;
p->pPrior = 0;
p->pNext = 0;
p->pWith = 0;
p->selFlags &= ~SF_Compound;
assert( (p->selFlags & SF_Converted)==0 );
p->selFlags |= SF_Converted;
assert( pNew->pPrior!=0 );
pNew->pPrior->pNext = pNew;
pNew->pLimit = 0;
return WRC_Continue;
}
/*
** Check to see if the FROM clause term pFrom has table-valued function
** arguments. If it does, leave an error message in pParse and return
** non-zero, since pFrom is not allowed to be a table-valued function.
*/
static int cannotBeFunction(Parse *pParse, struct SrcList_item *pFrom){
if( pFrom->fg.isTabFunc ){
sqlite3ErrorMsg(pParse, "'%s' is not a function", pFrom->zName);
return 1;
}
return 0;
}
#ifndef SQLITE_OMIT_CTE
/*
** Argument pWith (which may be NULL) points to a linked list of nested
** WITH contexts, from inner to outermost. If the table identified by
** FROM clause element pItem is really a common-table-expression (CTE)
** then return a pointer to the CTE definition for that table. Otherwise
** return NULL.
**
** If a non-NULL value is returned, set *ppContext to point to the With
** object that the returned CTE belongs to.
*/
static struct Cte *searchWith(
With *pWith, /* Current innermost WITH clause */
struct SrcList_item *pItem, /* FROM clause element to resolve */
With **ppContext /* OUT: WITH clause return value belongs to */
){
const char *zName;
if( pItem->zDatabase==0 && (zName = pItem->zName)!=0 ){
With *p;
for(p=pWith; p; p=p->pOuter){
int i;
for(i=0; i<p->nCte; i++){
if( sqlite3StrICmp(zName, p->a[i].zName)==0 ){
*ppContext = p;
return &p->a[i];
}
}
}
}
return 0;
}
/* The code generator maintains a stack of active WITH clauses
** with the inner-most WITH clause being at the top of the stack.
**
** This routine pushes the WITH clause passed as the second argument
** onto the top of the stack. If argument bFree is true, then this
** WITH clause will never be popped from the stack. In this case it
** should be freed along with the Parse object. In other cases, when
** bFree==0, the With object will be freed along with the SELECT
** statement with which it is associated.
*/
void sqlite3WithPush(Parse *pParse, With *pWith, u8 bFree){
assert( bFree==0 || (pParse->pWith==0 && pParse->pWithToFree==0) );
if( pWith ){
assert( pParse->pWith!=pWith );
pWith->pOuter = pParse->pWith;
pParse->pWith = pWith;
if( bFree ) pParse->pWithToFree = pWith;
}
}
/*
** This function checks if argument pFrom refers to a CTE declared by
** a WITH clause on the stack currently maintained by the parser. And,
** if currently processing a CTE expression, if it is a recursive
** reference to the current CTE.
**
** If pFrom falls into either of the two categories above, pFrom->pTab
** and other fields are populated accordingly. The caller should check
** (pFrom->pTab!=0) to determine whether or not a successful match
** was found.
**
** Whether or not a match is found, SQLITE_OK is returned if no error
** occurs. If an error does occur, an error message is stored in the
** parser and some error code other than SQLITE_OK returned.
*/
static int withExpand(
Walker *pWalker,
struct SrcList_item *pFrom
){
Parse *pParse = pWalker->pParse;
sqlite3 *db = pParse->db;
struct Cte *pCte; /* Matched CTE (or NULL if no match) */
With *pWith; /* WITH clause that pCte belongs to */
assert( pFrom->pTab==0 );
if( pParse->nErr ){
return SQLITE_ERROR;
}
pCte = searchWith(pParse->pWith, pFrom, &pWith);
if( pCte ){
Table *pTab;
ExprList *pEList;
Select *pSel;
Select *pLeft; /* Left-most SELECT statement */
int bMayRecursive; /* True if compound joined by UNION [ALL] */
With *pSavedWith; /* Initial value of pParse->pWith */
/* If pCte->zCteErr is non-NULL at this point, then this is an illegal
** recursive reference to CTE pCte. Leave an error in pParse and return
** early. If pCte->zCteErr is NULL, then this is not a recursive reference.
** In this case, proceed. */
if( pCte->zCteErr ){
sqlite3ErrorMsg(pParse, pCte->zCteErr, pCte->zName);
return SQLITE_ERROR;
}
if( cannotBeFunction(pParse, pFrom) ) return SQLITE_ERROR;
assert( pFrom->pTab==0 );
pFrom->pTab = pTab = sqlite3DbMallocZero(db, sizeof(Table));
if( pTab==0 ) return WRC_Abort;
pTab->nTabRef = 1;
pTab->zName = sqlite3DbStrDup(db, pCte->zName);
pTab->iPKey = -1;
pTab->nRowLogEst = 200; assert( 200==sqlite3LogEst(1048576) );
pTab->tabFlags |= TF_Ephemeral | TF_NoVisibleRowid;
pFrom->pSelect = sqlite3SelectDup(db, pCte->pSelect, 0);
if( db->mallocFailed ) return SQLITE_NOMEM_BKPT;
assert( pFrom->pSelect );
/* Check if this is a recursive CTE. */
pSel = pFrom->pSelect;
bMayRecursive = ( pSel->op==TK_ALL || pSel->op==TK_UNION );
if( bMayRecursive ){
int i;
SrcList *pSrc = pFrom->pSelect->pSrc;
for(i=0; i<pSrc->nSrc; i++){
struct SrcList_item *pItem = &pSrc->a[i];
if( pItem->zDatabase==0
&& pItem->zName!=0
&& 0==sqlite3StrICmp(pItem->zName, pCte->zName)
){
pItem->pTab = pTab;
pItem->fg.isRecursive = 1;
pTab->nTabRef++;
pSel->selFlags |= SF_Recursive;
}
}
}
/* Only one recursive reference is permitted. */
if( pTab->nTabRef>2 ){
sqlite3ErrorMsg(
pParse, "multiple references to recursive table: %s", pCte->zName
);
return SQLITE_ERROR;
}
assert( pTab->nTabRef==1 ||
((pSel->selFlags&SF_Recursive) && pTab->nTabRef==2 ));
pCte->zCteErr = "circular reference: %s";
pSavedWith = pParse->pWith;
pParse->pWith = pWith;
if( bMayRecursive ){
Select *pPrior = pSel->pPrior;
assert( pPrior->pWith==0 );
pPrior->pWith = pSel->pWith;
sqlite3WalkSelect(pWalker, pPrior);
pPrior->pWith = 0;
}else{
sqlite3WalkSelect(pWalker, pSel);
}
pParse->pWith = pWith;
for(pLeft=pSel; pLeft->pPrior; pLeft=pLeft->pPrior);
pEList = pLeft->pEList;
if( pCte->pCols ){
if( pEList && pEList->nExpr!=pCte->pCols->nExpr ){
sqlite3ErrorMsg(pParse, "table %s has %d values for %d columns",
pCte->zName, pEList->nExpr, pCte->pCols->nExpr
);
pParse->pWith = pSavedWith;
return SQLITE_ERROR;
}
pEList = pCte->pCols;
}
sqlite3ColumnsFromExprList(pParse, pEList, &pTab->nCol, &pTab->aCol);
if( bMayRecursive ){
if( pSel->selFlags & SF_Recursive ){
pCte->zCteErr = "multiple recursive references: %s";
}else{
pCte->zCteErr = "recursive reference in a subquery: %s";
}
sqlite3WalkSelect(pWalker, pSel);
}
pCte->zCteErr = 0;
pParse->pWith = pSavedWith;
}
return SQLITE_OK;
}
#endif
#ifndef SQLITE_OMIT_CTE
/*
** If the SELECT passed as the second argument has an associated WITH
** clause, pop it from the stack stored as part of the Parse object.
**
** This function is used as the xSelectCallback2() callback by
** sqlite3SelectExpand() when walking a SELECT tree to resolve table
** names and other FROM clause elements.
*/
static void selectPopWith(Walker *pWalker, Select *p){
Parse *pParse = pWalker->pParse;
if( OK_IF_ALWAYS_TRUE(pParse->pWith) && p->pPrior==0 ){
With *pWith = findRightmost(p)->pWith;
if( pWith!=0 ){
assert( pParse->pWith==pWith );
pParse->pWith = pWith->pOuter;
}
}
}
#else
#define selectPopWith 0
#endif
/*
** The SrcList_item structure passed as the second argument represents a
** sub-query in the FROM clause of a SELECT statement. This function
** allocates and populates the SrcList_item.pTab object. If successful,
** SQLITE_OK is returned. Otherwise, if an OOM error is encountered,
** SQLITE_NOMEM.
*/
int sqlite3ExpandSubquery(Parse *pParse, struct SrcList_item *pFrom){
Select *pSel = pFrom->pSelect;
Table *pTab;
assert( pSel );
pFrom->pTab = pTab = sqlite3DbMallocZero(pParse->db, sizeof(Table));
if( pTab==0 ) return SQLITE_NOMEM;
pTab->nTabRef = 1;
if( pFrom->zAlias ){
pTab->zName = sqlite3DbStrDup(pParse->db, pFrom->zAlias);
}else{
pTab->zName = sqlite3MPrintf(pParse->db, "subquery_%u", pSel->selId);
}
while( pSel->pPrior ){ pSel = pSel->pPrior; }
sqlite3ColumnsFromExprList(pParse, pSel->pEList,&pTab->nCol,&pTab->aCol);
pTab->iPKey = -1;
pTab->nRowLogEst = 200; assert( 200==sqlite3LogEst(1048576) );
pTab->tabFlags |= TF_Ephemeral;
return pParse->nErr ? SQLITE_ERROR : SQLITE_OK;
}
/*
** This routine is a Walker callback for "expanding" a SELECT statement.
** "Expanding" means to do the following:
**
** (1) Make sure VDBE cursor numbers have been assigned to every
** element of the FROM clause.
**
** (2) Fill in the pTabList->a[].pTab fields in the SrcList that
** defines FROM clause. When views appear in the FROM clause,
** fill pTabList->a[].pSelect with a copy of the SELECT statement
** that implements the view. A copy is made of the view's SELECT
** statement so that we can freely modify or delete that statement
** without worrying about messing up the persistent representation
** of the view.
**
** (3) Add terms to the WHERE clause to accommodate the NATURAL keyword
** on joins and the ON and USING clause of joins.
**
** (4) Scan the list of columns in the result set (pEList) looking
** for instances of the "*" operator or the TABLE.* operator.
** If found, expand each "*" to be every column in every table
** and TABLE.* to be every column in TABLE.
**
*/
static int selectExpander(Walker *pWalker, Select *p){
Parse *pParse = pWalker->pParse;
int i, j, k;
SrcList *pTabList;
ExprList *pEList;
struct SrcList_item *pFrom;
sqlite3 *db = pParse->db;
Expr *pE, *pRight, *pExpr;
u16 selFlags = p->selFlags;
u32 elistFlags = 0;
p->selFlags |= SF_Expanded;
if( db->mallocFailed ){
return WRC_Abort;
}
assert( p->pSrc!=0 );
if( (selFlags & SF_Expanded)!=0 ){
return WRC_Prune;
}
if( pWalker->eCode ){
/* Renumber selId because it has been copied from a view */
p->selId = ++pParse->nSelect;
}
pTabList = p->pSrc;
pEList = p->pEList;
sqlite3WithPush(pParse, p->pWith, 0);
/* Make sure cursor numbers have been assigned to all entries in
** the FROM clause of the SELECT statement.
*/
sqlite3SrcListAssignCursors(pParse, pTabList);
/* Look up every table named in the FROM clause of the select. If
** an entry of the FROM clause is a subquery instead of a table or view,
** then create a transient table structure to describe the subquery.
*/
for(i=0, pFrom=pTabList->a; i<pTabList->nSrc; i++, pFrom++){
Table *pTab;
assert( pFrom->fg.isRecursive==0 || pFrom->pTab!=0 );
if( pFrom->fg.isRecursive ) continue;
assert( pFrom->pTab==0 );
#ifndef SQLITE_OMIT_CTE
if( withExpand(pWalker, pFrom) ) return WRC_Abort;
if( pFrom->pTab ) {} else
#endif
if( pFrom->zName==0 ){
#ifndef SQLITE_OMIT_SUBQUERY
Select *pSel = pFrom->pSelect;
/* A sub-query in the FROM clause of a SELECT */
assert( pSel!=0 );
assert( pFrom->pTab==0 );
if( sqlite3WalkSelect(pWalker, pSel) ) return WRC_Abort;
if( sqlite3ExpandSubquery(pParse, pFrom) ) return WRC_Abort;
#endif
}else{
/* An ordinary table or view name in the FROM clause */
assert( pFrom->pTab==0 );
pFrom->pTab = pTab = sqlite3LocateTableItem(pParse, 0, pFrom);
if( pTab==0 ) return WRC_Abort;
if( pTab->nTabRef>=0xffff ){
sqlite3ErrorMsg(pParse, "too many references to \"%s\": max 65535",
pTab->zName);
pFrom->pTab = 0;
return WRC_Abort;
}
pTab->nTabRef++;
if( !IsVirtual(pTab) && cannotBeFunction(pParse, pFrom) ){
return WRC_Abort;
}
#if !defined(SQLITE_OMIT_VIEW) || !defined (SQLITE_OMIT_VIRTUALTABLE)
if( IsVirtual(pTab) || pTab->pSelect ){
i16 nCol;
u8 eCodeOrig = pWalker->eCode;
if( sqlite3ViewGetColumnNames(pParse, pTab) ) return WRC_Abort;
assert( pFrom->pSelect==0 );
if( pTab->pSelect && (db->flags & SQLITE_EnableView)==0 ){
sqlite3ErrorMsg(pParse, "access to view \"%s\" prohibited",
pTab->zName);
}
pFrom->pSelect = sqlite3SelectDup(db, pTab->pSelect, 0);
nCol = pTab->nCol;
pTab->nCol = -1;
pWalker->eCode = 1; /* Turn on Select.selId renumbering */
sqlite3WalkSelect(pWalker, pFrom->pSelect);
pWalker->eCode = eCodeOrig;
pTab->nCol = nCol;
}
#endif
}
/* Locate the index named by the INDEXED BY clause, if any. */
if( sqlite3IndexedByLookup(pParse, pFrom) ){
return WRC_Abort;
}
}
/* Process NATURAL keywords, and ON and USING clauses of joins.
*/
if( db->mallocFailed || sqliteProcessJoin(pParse, p) ){
return WRC_Abort;
}
/* For every "*" that occurs in the column list, insert the names of
** all columns in all tables. And for every TABLE.* insert the names
** of all columns in TABLE. The parser inserted a special expression
** with the TK_ASTERISK operator for each "*" that it found in the column
** list. The following code just has to locate the TK_ASTERISK
** expressions and expand each one to the list of all columns in
** all tables.
**
** The first loop just checks to see if there are any "*" operators
** that need expanding.
*/
for(k=0; k<pEList->nExpr; k++){
pE = pEList->a[k].pExpr;
if( pE->op==TK_ASTERISK ) break;
assert( pE->op!=TK_DOT || pE->pRight!=0 );
assert( pE->op!=TK_DOT || (pE->pLeft!=0 && pE->pLeft->op==TK_ID) );
if( pE->op==TK_DOT && pE->pRight->op==TK_ASTERISK ) break;
elistFlags |= pE->flags;
}
if( k<pEList->nExpr ){
/*
** If we get here it means the result set contains one or more "*"
** operators that need to be expanded. Loop through each expression
** in the result set and expand them one by one.
*/
struct ExprList_item *a = pEList->a;
ExprList *pNew = 0;
int flags = pParse->db->flags;
int longNames = (flags & SQLITE_FullColNames)!=0
&& (flags & SQLITE_ShortColNames)==0;
for(k=0; k<pEList->nExpr; k++){
pE = a[k].pExpr;
elistFlags |= pE->flags;
pRight = pE->pRight;
assert( pE->op!=TK_DOT || pRight!=0 );
if( pE->op!=TK_ASTERISK
&& (pE->op!=TK_DOT || pRight->op!=TK_ASTERISK)
){
/* This particular expression does not need to be expanded.
*/
pNew = sqlite3ExprListAppend(pParse, pNew, a[k].pExpr);
if( pNew ){
pNew->a[pNew->nExpr-1].zName = a[k].zName;
pNew->a[pNew->nExpr-1].zSpan = a[k].zSpan;
a[k].zName = 0;
a[k].zSpan = 0;
}
a[k].pExpr = 0;
}else{
/* This expression is a "*" or a "TABLE.*" and needs to be
** expanded. */
int tableSeen = 0; /* Set to 1 when TABLE matches */
char *zTName = 0; /* text of name of TABLE */
if( pE->op==TK_DOT ){
assert( pE->pLeft!=0 );
assert( !ExprHasProperty(pE->pLeft, EP_IntValue) );
zTName = pE->pLeft->u.zToken;
}
for(i=0, pFrom=pTabList->a; i<pTabList->nSrc; i++, pFrom++){
Table *pTab = pFrom->pTab;
Select *pSub = pFrom->pSelect;
char *zTabName = pFrom->zAlias;
const char *zSchemaName = 0;
int iDb;
if( zTabName==0 ){
zTabName = pTab->zName;
}
if( db->mallocFailed ) break;
if( pSub==0 || (pSub->selFlags & SF_NestedFrom)==0 ){
pSub = 0;
if( zTName && sqlite3StrICmp(zTName, zTabName)!=0 ){
continue;
}
iDb = sqlite3SchemaToIndex(db, pTab->pSchema);
zSchemaName = iDb>=0 ? db->aDb[iDb].zDbSName : "*";
}
for(j=0; j<pTab->nCol; j++){
char *zName = pTab->aCol[j].zName;
char *zColname; /* The computed column name */
char *zToFree; /* Malloced string that needs to be freed */
Token sColname; /* Computed column name as a token */
assert( zName );
if( zTName && pSub
&& sqlite3MatchSpanName(pSub->pEList->a[j].zSpan, 0, zTName, 0)==0
){
continue;
}
/* If a column is marked as 'hidden', omit it from the expanded
** result-set list unless the SELECT has the SF_IncludeHidden
** bit set.
*/
if( (p->selFlags & SF_IncludeHidden)==0
&& IsHiddenColumn(&pTab->aCol[j])
){
continue;
}
tableSeen = 1;
if( i>0 && zTName==0 ){
if( (pFrom->fg.jointype & JT_NATURAL)!=0
&& tableAndColumnIndex(pTabList, i, zName, 0, 0)
){
/* In a NATURAL join, omit the join columns from the
** table to the right of the join */
continue;
}
if( sqlite3IdListIndex(pFrom->pUsing, zName)>=0 ){
/* In a join with a USING clause, omit columns in the
** using clause from the table on the right. */
continue;
}
}
pRight = sqlite3Expr(db, TK_ID, zName);
zColname = zName;
zToFree = 0;
if( longNames || pTabList->nSrc>1 ){
Expr *pLeft;
pLeft = sqlite3Expr(db, TK_ID, zTabName);
pExpr = sqlite3PExpr(pParse, TK_DOT, pLeft, pRight);
if( zSchemaName ){
pLeft = sqlite3Expr(db, TK_ID, zSchemaName);
pExpr = sqlite3PExpr(pParse, TK_DOT, pLeft, pExpr);
}
if( longNames ){
zColname = sqlite3MPrintf(db, "%s.%s", zTabName, zName);
zToFree = zColname;
}
}else{
pExpr = pRight;
}
pNew = sqlite3ExprListAppend(pParse, pNew, pExpr);
sqlite3TokenInit(&sColname, zColname);
sqlite3ExprListSetName(pParse, pNew, &sColname, 0);
if( pNew && (p->selFlags & SF_NestedFrom)!=0 ){
struct ExprList_item *pX = &pNew->a[pNew->nExpr-1];
if( pSub ){
pX->zSpan = sqlite3DbStrDup(db, pSub->pEList->a[j].zSpan);
testcase( pX->zSpan==0 );
}else{
pX->zSpan = sqlite3MPrintf(db, "%s.%s.%s",
zSchemaName, zTabName, zColname);
testcase( pX->zSpan==0 );
}
pX->bSpanIsTab = 1;
}
sqlite3DbFree(db, zToFree);
}
}
if( !tableSeen ){
if( zTName ){
sqlite3ErrorMsg(pParse, "no such table: %s", zTName);
}else{
sqlite3ErrorMsg(pParse, "no tables specified");
}
}
}
}
sqlite3ExprListDelete(db, pEList);
p->pEList = pNew;
}
if( p->pEList ){
if( p->pEList->nExpr>db->aLimit[SQLITE_LIMIT_COLUMN] ){
sqlite3ErrorMsg(pParse, "too many columns in result set");
return WRC_Abort;
}
if( (elistFlags & (EP_HasFunc|EP_Subquery))!=0 ){
p->selFlags |= SF_ComplexResult;
}
}
return WRC_Continue;
}
/*
** No-op routine for the parse-tree walker.
**
** When this routine is the Walker.xExprCallback then expression trees
** are walked without any actions being taken at each node. Presumably,
** when this routine is used for Walker.xExprCallback then
** Walker.xSelectCallback is set to do something useful for every
** subquery in the parser tree.
*/
int sqlite3ExprWalkNoop(Walker *NotUsed, Expr *NotUsed2){
UNUSED_PARAMETER2(NotUsed, NotUsed2);
return WRC_Continue;
}
/*
** No-op routine for the parse-tree walker for SELECT statements.
** subquery in the parser tree.
*/
int sqlite3SelectWalkNoop(Walker *NotUsed, Select *NotUsed2){
UNUSED_PARAMETER2(NotUsed, NotUsed2);
return WRC_Continue;
}
#if SQLITE_DEBUG
/*
** Always assert. This xSelectCallback2 implementation proves that the
** xSelectCallback2 is never invoked.
*/
void sqlite3SelectWalkAssert2(Walker *NotUsed, Select *NotUsed2){
UNUSED_PARAMETER2(NotUsed, NotUsed2);
assert( 0 );
}
#endif
/*
** This routine "expands" a SELECT statement and all of its subqueries.
** For additional information on what it means to "expand" a SELECT
** statement, see the comment on the selectExpand worker callback above.
**
** Expanding a SELECT statement is the first step in processing a
** SELECT statement. The SELECT statement must be expanded before
** name resolution is performed.
**
** If anything goes wrong, an error message is written into pParse.
** The calling function can detect the problem by looking at pParse->nErr
** and/or pParse->db->mallocFailed.
*/
static void sqlite3SelectExpand(Parse *pParse, Select *pSelect){
Walker w;
w.xExprCallback = sqlite3ExprWalkNoop;
w.pParse = pParse;
if( OK_IF_ALWAYS_TRUE(pParse->hasCompound) ){
w.xSelectCallback = convertCompoundSelectToSubquery;
w.xSelectCallback2 = 0;
sqlite3WalkSelect(&w, pSelect);
}
w.xSelectCallback = selectExpander;
w.xSelectCallback2 = selectPopWith;
w.eCode = 0;
sqlite3WalkSelect(&w, pSelect);
}
#ifndef SQLITE_OMIT_SUBQUERY
/*
** This is a Walker.xSelectCallback callback for the sqlite3SelectTypeInfo()
** interface.
**
** For each FROM-clause subquery, add Column.zType and Column.zColl
** information to the Table structure that represents the result set
** of that subquery.
**
** The Table structure that represents the result set was constructed
** by selectExpander() but the type and collation information was omitted
** at that point because identifiers had not yet been resolved. This
** routine is called after identifier resolution.
*/
static void selectAddSubqueryTypeInfo(Walker *pWalker, Select *p){
Parse *pParse;
int i;
SrcList *pTabList;
struct SrcList_item *pFrom;
assert( p->selFlags & SF_Resolved );
if( p->selFlags & SF_HasTypeInfo ) return;
p->selFlags |= SF_HasTypeInfo;
pParse = pWalker->pParse;
pTabList = p->pSrc;
for(i=0, pFrom=pTabList->a; i<pTabList->nSrc; i++, pFrom++){
Table *pTab = pFrom->pTab;
assert( pTab!=0 );
if( (pTab->tabFlags & TF_Ephemeral)!=0 ){
/* A sub-query in the FROM clause of a SELECT */
Select *pSel = pFrom->pSelect;
if( pSel ){
while( pSel->pPrior ) pSel = pSel->pPrior;
sqlite3SelectAddColumnTypeAndCollation(pParse, pTab, pSel,
SQLITE_AFF_NONE);
}
}
}
}
#endif
/*
** This routine adds datatype and collating sequence information to
** the Table structures of all FROM-clause subqueries in a
** SELECT statement.
**
** Use this routine after name resolution.
*/
static void sqlite3SelectAddTypeInfo(Parse *pParse, Select *pSelect){
#ifndef SQLITE_OMIT_SUBQUERY
Walker w;
w.xSelectCallback = sqlite3SelectWalkNoop;
w.xSelectCallback2 = selectAddSubqueryTypeInfo;
w.xExprCallback = sqlite3ExprWalkNoop;
w.pParse = pParse;
sqlite3WalkSelect(&w, pSelect);
#endif
}
/*
** This routine sets up a SELECT statement for processing. The
** following is accomplished:
**
** * VDBE Cursor numbers are assigned to all FROM-clause terms.
** * Ephemeral Table objects are created for all FROM-clause subqueries.
** * ON and USING clauses are shifted into WHERE statements
** * Wildcards "*" and "TABLE.*" in result sets are expanded.
** * Identifiers in expression are matched to tables.
**
** This routine acts recursively on all subqueries within the SELECT.
*/
void sqlite3SelectPrep(
Parse *pParse, /* The parser context */
Select *p, /* The SELECT statement being coded. */
NameContext *pOuterNC /* Name context for container */
){
assert( p!=0 || pParse->db->mallocFailed );
if( pParse->db->mallocFailed ) return;
if( p->selFlags & SF_HasTypeInfo ) return;
sqlite3SelectExpand(pParse, p);
if( pParse->nErr || pParse->db->mallocFailed ) return;
sqlite3ResolveSelectNames(pParse, p, pOuterNC);
if( pParse->nErr || pParse->db->mallocFailed ) return;
sqlite3SelectAddTypeInfo(pParse, p);
}
/*
** Reset the aggregate accumulator.
**
** The aggregate accumulator is a set of memory cells that hold
** intermediate results while calculating an aggregate. This
** routine generates code that stores NULLs in all of those memory
** cells.
*/
static void resetAccumulator(Parse *pParse, AggInfo *pAggInfo){
Vdbe *v = pParse->pVdbe;
int i;
struct AggInfo_func *pFunc;
int nReg = pAggInfo->nFunc + pAggInfo->nColumn;
if( nReg==0 ) return;
#ifdef SQLITE_DEBUG
/* Verify that all AggInfo registers are within the range specified by
** AggInfo.mnReg..AggInfo.mxReg */
assert( nReg==pAggInfo->mxReg-pAggInfo->mnReg+1 );
for(i=0; i<pAggInfo->nColumn; i++){
assert( pAggInfo->aCol[i].iMem>=pAggInfo->mnReg
&& pAggInfo->aCol[i].iMem<=pAggInfo->mxReg );
}
for(i=0; i<pAggInfo->nFunc; i++){
assert( pAggInfo->aFunc[i].iMem>=pAggInfo->mnReg
&& pAggInfo->aFunc[i].iMem<=pAggInfo->mxReg );
}
#endif
sqlite3VdbeAddOp3(v, OP_Null, 0, pAggInfo->mnReg, pAggInfo->mxReg);
for(pFunc=pAggInfo->aFunc, i=0; i<pAggInfo->nFunc; i++, pFunc++){
if( pFunc->iDistinct>=0 ){
Expr *pE = pFunc->pExpr;
assert( !ExprHasProperty(pE, EP_xIsSelect) );
if( pE->x.pList==0 || pE->x.pList->nExpr!=1 ){
sqlite3ErrorMsg(pParse, "DISTINCT aggregates must have exactly one "
"argument");
pFunc->iDistinct = -1;
}else{
KeyInfo *pKeyInfo = sqlite3KeyInfoFromExprList(pParse, pE->x.pList,0,0);
sqlite3VdbeAddOp4(v, OP_OpenEphemeral, pFunc->iDistinct, 0, 0,
(char*)pKeyInfo, P4_KEYINFO);
}
}
}
}
/*
** Invoke the OP_AggFinalize opcode for every aggregate function
** in the AggInfo structure.
*/
static void finalizeAggFunctions(Parse *pParse, AggInfo *pAggInfo){
Vdbe *v = pParse->pVdbe;
int i;
struct AggInfo_func *pF;
for(i=0, pF=pAggInfo->aFunc; i<pAggInfo->nFunc; i++, pF++){
ExprList *pList = pF->pExpr->x.pList;
assert( !ExprHasProperty(pF->pExpr, EP_xIsSelect) );
sqlite3VdbeAddOp2(v, OP_AggFinal, pF->iMem, pList ? pList->nExpr : 0);
sqlite3VdbeAppendP4(v, pF->pFunc, P4_FUNCDEF);
}
}
/*
** Update the accumulator memory cells for an aggregate based on
** the current cursor position.
**
** If regAcc is non-zero and there are no min() or max() aggregates
** in pAggInfo, then only populate the pAggInfo->nAccumulator accumulator
** registers if register regAcc contains 0. The caller will take care
** of setting and clearing regAcc.
*/
static void updateAccumulator(Parse *pParse, int regAcc, AggInfo *pAggInfo){
Vdbe *v = pParse->pVdbe;
int i;
int regHit = 0;
int addrHitTest = 0;
struct AggInfo_func *pF;
struct AggInfo_col *pC;
pAggInfo->directMode = 1;
for(i=0, pF=pAggInfo->aFunc; i<pAggInfo->nFunc; i++, pF++){
int nArg;
int addrNext = 0;
int regAgg;
ExprList *pList = pF->pExpr->x.pList;
assert( !ExprHasProperty(pF->pExpr, EP_xIsSelect) );
assert( !IsWindowFunc(pF->pExpr) );
if( ExprHasProperty(pF->pExpr, EP_WinFunc) ){
Expr *pFilter = pF->pExpr->y.pWin->pFilter;
if( pAggInfo->nAccumulator
&& (pF->pFunc->funcFlags & SQLITE_FUNC_NEEDCOLL)
){
if( regHit==0 ) regHit = ++pParse->nMem;
/* If this is the first row of the group (regAcc==0), clear the
** "magnet" register regHit so that the accumulator registers
** are populated if the FILTER clause jumps over the the
** invocation of min() or max() altogether. Or, if this is not
** the first row (regAcc==1), set the magnet register so that the
** accumulators are not populated unless the min()/max() is invoked and
** indicates that they should be. */
sqlite3VdbeAddOp2(v, OP_Copy, regAcc, regHit);
}
addrNext = sqlite3VdbeMakeLabel(pParse);
sqlite3ExprIfFalse(pParse, pFilter, addrNext, SQLITE_JUMPIFNULL);
}
if( pList ){
nArg = pList->nExpr;
regAgg = sqlite3GetTempRange(pParse, nArg);
sqlite3ExprCodeExprList(pParse, pList, regAgg, 0, SQLITE_ECEL_DUP);
}else{
nArg = 0;
regAgg = 0;
}
if( pF->iDistinct>=0 ){
if( addrNext==0 ){
addrNext = sqlite3VdbeMakeLabel(pParse);
}
testcase( nArg==0 ); /* Error condition */
testcase( nArg>1 ); /* Also an error */
codeDistinct(pParse, pF->iDistinct, addrNext, 1, regAgg);
}
if( pF->pFunc->funcFlags & SQLITE_FUNC_NEEDCOLL ){
CollSeq *pColl = 0;
struct ExprList_item *pItem;
int j;
assert( pList!=0 ); /* pList!=0 if pF->pFunc has NEEDCOLL */
for(j=0, pItem=pList->a; !pColl && j<nArg; j++, pItem++){
pColl = sqlite3ExprCollSeq(pParse, pItem->pExpr);
}
if( !pColl ){
pColl = pParse->db->pDfltColl;
}
if( regHit==0 && pAggInfo->nAccumulator ) regHit = ++pParse->nMem;
sqlite3VdbeAddOp4(v, OP_CollSeq, regHit, 0, 0, (char *)pColl, P4_COLLSEQ);
}
sqlite3VdbeAddOp3(v, OP_AggStep, 0, regAgg, pF->iMem);
sqlite3VdbeAppendP4(v, pF->pFunc, P4_FUNCDEF);
sqlite3VdbeChangeP5(v, (u8)nArg);
sqlite3ReleaseTempRange(pParse, regAgg, nArg);
if( addrNext ){
sqlite3VdbeResolveLabel(v, addrNext);
}
}
if( regHit==0 && pAggInfo->nAccumulator ){
regHit = regAcc;
}
if( regHit ){
addrHitTest = sqlite3VdbeAddOp1(v, OP_If, regHit); VdbeCoverage(v);
}
for(i=0, pC=pAggInfo->aCol; i<pAggInfo->nAccumulator; i++, pC++){
sqlite3ExprCode(pParse, pC->pExpr, pC->iMem);
}
pAggInfo->directMode = 0;
if( addrHitTest ){
sqlite3VdbeJumpHere(v, addrHitTest);
}
}
/*
** Add a single OP_Explain instruction to the VDBE to explain a simple
** count(*) query ("SELECT count(*) FROM pTab").
*/
#ifndef SQLITE_OMIT_EXPLAIN
static void explainSimpleCount(
Parse *pParse, /* Parse context */
Table *pTab, /* Table being queried */
Index *pIdx /* Index used to optimize scan, or NULL */
){
if( pParse->explain==2 ){
int bCover = (pIdx!=0 && (HasRowid(pTab) || !IsPrimaryKeyIndex(pIdx)));
sqlite3VdbeExplain(pParse, 0, "SCAN TABLE %s%s%s",
pTab->zName,
bCover ? " USING COVERING INDEX " : "",
bCover ? pIdx->zName : ""
);
}
}
#else
# define explainSimpleCount(a,b,c)
#endif
/*
** sqlite3WalkExpr() callback used by havingToWhere().
**
** If the node passed to the callback is a TK_AND node, return
** WRC_Continue to tell sqlite3WalkExpr() to iterate through child nodes.
**
** Otherwise, return WRC_Prune. In this case, also check if the
** sub-expression matches the criteria for being moved to the WHERE
** clause. If so, add it to the WHERE clause and replace the sub-expression
** within the HAVING expression with a constant "1".
*/
static int havingToWhereExprCb(Walker *pWalker, Expr *pExpr){
if( pExpr->op!=TK_AND ){
Select *pS = pWalker->u.pSelect;
if( sqlite3ExprIsConstantOrGroupBy(pWalker->pParse, pExpr, pS->pGroupBy) ){
sqlite3 *db = pWalker->pParse->db;
Expr *pNew = sqlite3Expr(db, TK_INTEGER, "1");
if( pNew ){
Expr *pWhere = pS->pWhere;
SWAP(Expr, *pNew, *pExpr);
pNew = sqlite3ExprAnd(pWalker->pParse, pWhere, pNew);
pS->pWhere = pNew;
pWalker->eCode = 1;
}
}
return WRC_Prune;
}
return WRC_Continue;
}
/*
** Transfer eligible terms from the HAVING clause of a query, which is
** processed after grouping, to the WHERE clause, which is processed before
** grouping. For example, the query:
**
** SELECT * FROM <tables> WHERE a=? GROUP BY b HAVING b=? AND c=?
**
** can be rewritten as:
**
** SELECT * FROM <tables> WHERE a=? AND b=? GROUP BY b HAVING c=?
**
** A term of the HAVING expression is eligible for transfer if it consists
** entirely of constants and expressions that are also GROUP BY terms that
** use the "BINARY" collation sequence.
*/
static void havingToWhere(Parse *pParse, Select *p){
Walker sWalker;
memset(&sWalker, 0, sizeof(sWalker));
sWalker.pParse = pParse;
sWalker.xExprCallback = havingToWhereExprCb;
sWalker.u.pSelect = p;
sqlite3WalkExpr(&sWalker, p->pHaving);
#if SELECTTRACE_ENABLED
if( sWalker.eCode && (sqlite3SelectTrace & 0x100)!=0 ){
SELECTTRACE(0x100,pParse,p,("Move HAVING terms into WHERE:\n"));
sqlite3TreeViewSelect(0, p, 0);
}
#endif
}
/*
** Check to see if the pThis entry of pTabList is a self-join of a prior view.
** If it is, then return the SrcList_item for the prior view. If it is not,
** then return 0.
*/
static struct SrcList_item *isSelfJoinView(
SrcList *pTabList, /* Search for self-joins in this FROM clause */
struct SrcList_item *pThis /* Search for prior reference to this subquery */
){
struct SrcList_item *pItem;
for(pItem = pTabList->a; pItem<pThis; pItem++){
Select *pS1;
if( pItem->pSelect==0 ) continue;
if( pItem->fg.viaCoroutine ) continue;
if( pItem->zName==0 ) continue;
assert( pItem->pTab!=0 );
assert( pThis->pTab!=0 );
if( pItem->pTab->pSchema!=pThis->pTab->pSchema ) continue;
if( sqlite3_stricmp(pItem->zName, pThis->zName)!=0 ) continue;
pS1 = pItem->pSelect;
if( pItem->pTab->pSchema==0 && pThis->pSelect->selId!=pS1->selId ){
/* The query flattener left two different CTE tables with identical
** names in the same FROM clause. */
continue;
}
if( sqlite3ExprCompare(0, pThis->pSelect->pWhere, pS1->pWhere, -1)
|| sqlite3ExprCompare(0, pThis->pSelect->pHaving, pS1->pHaving, -1)
){
/* The view was modified by some other optimization such as
** pushDownWhereTerms() */
continue;
}
return pItem;
}
return 0;
}
#ifdef SQLITE_COUNTOFVIEW_OPTIMIZATION
/*
** Attempt to transform a query of the form
**
** SELECT count(*) FROM (SELECT x FROM t1 UNION ALL SELECT y FROM t2)
**
** Into this:
**
** SELECT (SELECT count(*) FROM t1)+(SELECT count(*) FROM t2)
**
** The transformation only works if all of the following are true:
**
** * The subquery is a UNION ALL of two or more terms
** * The subquery does not have a LIMIT clause
** * There is no WHERE or GROUP BY or HAVING clauses on the subqueries
** * The outer query is a simple count(*) with no WHERE clause or other
** extraneous syntax.
**
** Return TRUE if the optimization is undertaken.
*/
static int countOfViewOptimization(Parse *pParse, Select *p){
Select *pSub, *pPrior;
Expr *pExpr;
Expr *pCount;
sqlite3 *db;
if( (p->selFlags & SF_Aggregate)==0 ) return 0; /* This is an aggregate */
if( p->pEList->nExpr!=1 ) return 0; /* Single result column */
if( p->pWhere ) return 0;
if( p->pGroupBy ) return 0;
pExpr = p->pEList->a[0].pExpr;
if( pExpr->op!=TK_AGG_FUNCTION ) return 0; /* Result is an aggregate */
if( sqlite3_stricmp(pExpr->u.zToken,"count") ) return 0; /* Is count() */
if( pExpr->x.pList!=0 ) return 0; /* Must be count(*) */
if( p->pSrc->nSrc!=1 ) return 0; /* One table in FROM */
pSub = p->pSrc->a[0].pSelect;
if( pSub==0 ) return 0; /* The FROM is a subquery */
if( pSub->pPrior==0 ) return 0; /* Must be a compound ry */
do{
if( pSub->op!=TK_ALL && pSub->pPrior ) return 0; /* Must be UNION ALL */
if( pSub->pWhere ) return 0; /* No WHERE clause */
if( pSub->pLimit ) return 0; /* No LIMIT clause */
if( pSub->selFlags & SF_Aggregate ) return 0; /* Not an aggregate */
pSub = pSub->pPrior; /* Repeat over compound */
}while( pSub );
/* If we reach this point then it is OK to perform the transformation */
db = pParse->db;
pCount = pExpr;
pExpr = 0;
pSub = p->pSrc->a[0].pSelect;
p->pSrc->a[0].pSelect = 0;
sqlite3SrcListDelete(db, p->pSrc);
p->pSrc = sqlite3DbMallocZero(pParse->db, sizeof(*p->pSrc));
while( pSub ){
Expr *pTerm;
pPrior = pSub->pPrior;
pSub->pPrior = 0;
pSub->pNext = 0;
pSub->selFlags |= SF_Aggregate;
pSub->selFlags &= ~SF_Compound;
pSub->nSelectRow = 0;
sqlite3ExprListDelete(db, pSub->pEList);
pTerm = pPrior ? sqlite3ExprDup(db, pCount, 0) : pCount;
pSub->pEList = sqlite3ExprListAppend(pParse, 0, pTerm);
pTerm = sqlite3PExpr(pParse, TK_SELECT, 0, 0);
sqlite3PExprAddSelect(pParse, pTerm, pSub);
if( pExpr==0 ){
pExpr = pTerm;
}else{
pExpr = sqlite3PExpr(pParse, TK_PLUS, pTerm, pExpr);
}
pSub = pPrior;
}
p->pEList->a[0].pExpr = pExpr;
p->selFlags &= ~SF_Aggregate;
#if SELECTTRACE_ENABLED
if( sqlite3SelectTrace & 0x400 ){
SELECTTRACE(0x400,pParse,p,("After count-of-view optimization:\n"));
sqlite3TreeViewSelect(0, p, 0);
}
#endif
return 1;
}
#endif /* SQLITE_COUNTOFVIEW_OPTIMIZATION */
/*
** Generate code for the SELECT statement given in the p argument.
**
** The results are returned according to the SelectDest structure.
** See comments in sqliteInt.h for further information.
**
** This routine returns the number of errors. If any errors are
** encountered, then an appropriate error message is left in
** pParse->zErrMsg.
**
** This routine does NOT free the Select structure passed in. The
** calling function needs to do that.
*/
int sqlite3Select(
Parse *pParse, /* The parser context */
Select *p, /* The SELECT statement being coded. */
SelectDest *pDest /* What to do with the query results */
){
int i, j; /* Loop counters */
WhereInfo *pWInfo; /* Return from sqlite3WhereBegin() */
Vdbe *v; /* The virtual machine under construction */
int isAgg; /* True for select lists like "count(*)" */
ExprList *pEList = 0; /* List of columns to extract. */
SrcList *pTabList; /* List of tables to select from */
Expr *pWhere; /* The WHERE clause. May be NULL */
ExprList *pGroupBy; /* The GROUP BY clause. May be NULL */
Expr *pHaving; /* The HAVING clause. May be NULL */
int rc = 1; /* Value to return from this function */
DistinctCtx sDistinct; /* Info on how to code the DISTINCT keyword */
SortCtx sSort; /* Info on how to code the ORDER BY clause */
AggInfo sAggInfo; /* Information used by aggregate queries */
int iEnd; /* Address of the end of the query */
sqlite3 *db; /* The database connection */
ExprList *pMinMaxOrderBy = 0; /* Added ORDER BY for min/max queries */
u8 minMaxFlag; /* Flag for min/max queries */
db = pParse->db;
v = sqlite3GetVdbe(pParse);
if( p==0 || db->mallocFailed || pParse->nErr ){
return 1;
}
if( sqlite3AuthCheck(pParse, SQLITE_SELECT, 0, 0, 0) ) return 1;
memset(&sAggInfo, 0, sizeof(sAggInfo));
#if SELECTTRACE_ENABLED
SELECTTRACE(1,pParse,p, ("begin processing:\n", pParse->addrExplain));
if( sqlite3SelectTrace & 0x100 ){
sqlite3TreeViewSelect(0, p, 0);
}
#endif
assert( p->pOrderBy==0 || pDest->eDest!=SRT_DistFifo );
assert( p->pOrderBy==0 || pDest->eDest!=SRT_Fifo );
assert( p->pOrderBy==0 || pDest->eDest!=SRT_DistQueue );
assert( p->pOrderBy==0 || pDest->eDest!=SRT_Queue );
if( IgnorableOrderby(pDest) ){
assert(pDest->eDest==SRT_Exists || pDest->eDest==SRT_Union ||
pDest->eDest==SRT_Except || pDest->eDest==SRT_Discard ||
pDest->eDest==SRT_Queue || pDest->eDest==SRT_DistFifo ||
pDest->eDest==SRT_DistQueue || pDest->eDest==SRT_Fifo);
/* If ORDER BY makes no difference in the output then neither does
** DISTINCT so it can be removed too. */
sqlite3ExprListDelete(db, p->pOrderBy);
p->pOrderBy = 0;
p->selFlags &= ~SF_Distinct;
}
sqlite3SelectPrep(pParse, p, 0);
if( pParse->nErr || db->mallocFailed ){
goto select_end;
}
assert( p->pEList!=0 );
#if SELECTTRACE_ENABLED
if( sqlite3SelectTrace & 0x104 ){
SELECTTRACE(0x104,pParse,p, ("after name resolution:\n"));
sqlite3TreeViewSelect(0, p, 0);
}
#endif
if( pDest->eDest==SRT_Output ){
generateColumnNames(pParse, p);
}
#ifndef SQLITE_OMIT_WINDOWFUNC
if( sqlite3WindowRewrite(pParse, p) ){
goto select_end;
}
#if SELECTTRACE_ENABLED
if( p->pWin && (sqlite3SelectTrace & 0x108)!=0 ){
SELECTTRACE(0x104,pParse,p, ("after window rewrite:\n"));
sqlite3TreeViewSelect(0, p, 0);
}
#endif
#endif /* SQLITE_OMIT_WINDOWFUNC */
pTabList = p->pSrc;
isAgg = (p->selFlags & SF_Aggregate)!=0;
memset(&sSort, 0, sizeof(sSort));
sSort.pOrderBy = p->pOrderBy;
/* Try to various optimizations (flattening subqueries, and strength
** reduction of join operators) in the FROM clause up into the main query
*/
#if !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW)
for(i=0; !p->pPrior && i<pTabList->nSrc; i++){
struct SrcList_item *pItem = &pTabList->a[i];
Select *pSub = pItem->pSelect;
Table *pTab = pItem->pTab;
/* Convert LEFT JOIN into JOIN if there are terms of the right table
** of the LEFT JOIN used in the WHERE clause.
*/
if( (pItem->fg.jointype & JT_LEFT)!=0
&& sqlite3ExprImpliesNonNullRow(p->pWhere, pItem->iCursor)
&& OptimizationEnabled(db, SQLITE_SimplifyJoin)
){
SELECTTRACE(0x100,pParse,p,
("LEFT-JOIN simplifies to JOIN on term %d\n",i));
pItem->fg.jointype &= ~(JT_LEFT|JT_OUTER);
unsetJoinExpr(p->pWhere, pItem->iCursor);
}
/* No futher action if this term of the FROM clause is no a subquery */
if( pSub==0 ) continue;
/* Catch mismatch in the declared columns of a view and the number of
** columns in the SELECT on the RHS */
if( pTab->nCol!=pSub->pEList->nExpr ){
sqlite3ErrorMsg(pParse, "expected %d columns for '%s' but got %d",
pTab->nCol, pTab->zName, pSub->pEList->nExpr);
goto select_end;
}
/* Do not try to flatten an aggregate subquery.
**
** Flattening an aggregate subquery is only possible if the outer query
** is not a join. But if the outer query is not a join, then the subquery
** will be implemented as a co-routine and there is no advantage to
** flattening in that case.
*/
if( (pSub->selFlags & SF_Aggregate)!=0 ) continue;
assert( pSub->pGroupBy==0 );
/* If the outer query contains a "complex" result set (that is,
** if the result set of the outer query uses functions or subqueries)
** and if the subquery contains an ORDER BY clause and if
** it will be implemented as a co-routine, then do not flatten. This
** restriction allows SQL constructs like this:
**
** SELECT expensive_function(x)
** FROM (SELECT x FROM tab ORDER BY y LIMIT 10);
**
** The expensive_function() is only computed on the 10 rows that
** are output, rather than every row of the table.
**
** The requirement that the outer query have a complex result set
** means that flattening does occur on simpler SQL constraints without
** the expensive_function() like:
**
** SELECT x FROM (SELECT x FROM tab ORDER BY y LIMIT 10);
*/
if( pSub->pOrderBy!=0
&& i==0
&& (p->selFlags & SF_ComplexResult)!=0
&& (pTabList->nSrc==1
|| (pTabList->a[1].fg.jointype&(JT_LEFT|JT_CROSS))!=0)
){
continue;
}
if( flattenSubquery(pParse, p, i, isAgg) ){
if( pParse->nErr ) goto select_end;
/* This subquery can be absorbed into its parent. */
i = -1;
}
pTabList = p->pSrc;
if( db->mallocFailed ) goto select_end;
if( !IgnorableOrderby(pDest) ){
sSort.pOrderBy = p->pOrderBy;
}
}
#endif
#ifndef SQLITE_OMIT_COMPOUND_SELECT
/* Handle compound SELECT statements using the separate multiSelect()
** procedure.
*/
if( p->pPrior ){
rc = multiSelect(pParse, p, pDest);
#if SELECTTRACE_ENABLED
SELECTTRACE(0x1,pParse,p,("end compound-select processing\n"));
if( (sqlite3SelectTrace & 0x2000)!=0 && ExplainQueryPlanParent(pParse)==0 ){
sqlite3TreeViewSelect(0, p, 0);
}
#endif
if( p->pNext==0 ) ExplainQueryPlanPop(pParse);
return rc;
}
#endif
/* Do the WHERE-clause constant propagation optimization if this is
** a join. No need to speed time on this operation for non-join queries
** as the equivalent optimization will be handled by query planner in
** sqlite3WhereBegin().
*/
if( pTabList->nSrc>1
&& OptimizationEnabled(db, SQLITE_PropagateConst)
&& propagateConstants(pParse, p)
){
#if SELECTTRACE_ENABLED
if( sqlite3SelectTrace & 0x100 ){
SELECTTRACE(0x100,pParse,p,("After constant propagation:\n"));
sqlite3TreeViewSelect(0, p, 0);
}
#endif
}else{
SELECTTRACE(0x100,pParse,p,("Constant propagation not helpful\n"));
}
#ifdef SQLITE_COUNTOFVIEW_OPTIMIZATION
if( OptimizationEnabled(db, SQLITE_QueryFlattener|SQLITE_CountOfView)
&& countOfViewOptimization(pParse, p)
){
if( db->mallocFailed ) goto select_end;
pEList = p->pEList;
pTabList = p->pSrc;
}
#endif
/* For each term in the FROM clause, do two things:
** (1) Authorized unreferenced tables
** (2) Generate code for all sub-queries
*/
for(i=0; i<pTabList->nSrc; i++){
struct SrcList_item *pItem = &pTabList->a[i];
SelectDest dest;
Select *pSub;
#if !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW)
const char *zSavedAuthContext;
#endif
/* Issue SQLITE_READ authorizations with a fake column name for any
** tables that are referenced but from which no values are extracted.
** Examples of where these kinds of null SQLITE_READ authorizations
** would occur:
**
** SELECT count(*) FROM t1; -- SQLITE_READ t1.""
** SELECT t1.* FROM t1, t2; -- SQLITE_READ t2.""
**
** The fake column name is an empty string. It is possible for a table to
** have a column named by the empty string, in which case there is no way to
** distinguish between an unreferenced table and an actual reference to the
** "" column. The original design was for the fake column name to be a NULL,
** which would be unambiguous. But legacy authorization callbacks might
** assume the column name is non-NULL and segfault. The use of an empty
** string for the fake column name seems safer.
*/
if( pItem->colUsed==0 && pItem->zName!=0 ){
sqlite3AuthCheck(pParse, SQLITE_READ, pItem->zName, "", pItem->zDatabase);
}
#if !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW)
/* Generate code for all sub-queries in the FROM clause
*/
pSub = pItem->pSelect;
if( pSub==0 ) continue;
/* The code for a subquery should only be generated once, though it is
** technically harmless for it to be generated multiple times. The
** following assert() will detect if something changes to cause
** the same subquery to be coded multiple times, as a signal to the
** developers to try to optimize the situation.
**
** Update 2019-07-24:
** See ticket https://sqlite.org/src/tktview/c52b09c7f38903b1311cec40.
** The dbsqlfuzz fuzzer found a case where the same subquery gets
** coded twice. So this assert() now becomes a testcase(). It should
** be very rare, though.
*/
testcase( pItem->addrFillSub!=0 );
/* Increment Parse.nHeight by the height of the largest expression
** tree referred to by this, the parent select. The child select
** may contain expression trees of at most
** (SQLITE_MAX_EXPR_DEPTH-Parse.nHeight) height. This is a bit
** more conservative than necessary, but much easier than enforcing
** an exact limit.
*/
pParse->nHeight += sqlite3SelectExprHeight(p);
/* Make copies of constant WHERE-clause terms in the outer query down
** inside the subquery. This can help the subquery to run more efficiently.
*/
if( OptimizationEnabled(db, SQLITE_PushDown)
&& pushDownWhereTerms(pParse, pSub, p->pWhere, pItem->iCursor,
(pItem->fg.jointype & JT_OUTER)!=0)
){
#if SELECTTRACE_ENABLED
if( sqlite3SelectTrace & 0x100 ){
SELECTTRACE(0x100,pParse,p,
("After WHERE-clause push-down into subquery %d:\n", pSub->selId));
sqlite3TreeViewSelect(0, p, 0);
}
#endif
}else{
SELECTTRACE(0x100,pParse,p,("Push-down not possible\n"));
}
zSavedAuthContext = pParse->zAuthContext;
pParse->zAuthContext = pItem->zName;
/* Generate code to implement the subquery
**
** The subquery is implemented as a co-routine if the subquery is
** guaranteed to be the outer loop (so that it does not need to be
** computed more than once)
**
** TODO: Are there other reasons beside (1) to use a co-routine
** implementation?
*/
if( i==0
&& (pTabList->nSrc==1
|| (pTabList->a[1].fg.jointype&(JT_LEFT|JT_CROSS))!=0) /* (1) */
){
/* Implement a co-routine that will return a single row of the result
** set on each invocation.
*/
int addrTop = sqlite3VdbeCurrentAddr(v)+1;
pItem->regReturn = ++pParse->nMem;
sqlite3VdbeAddOp3(v, OP_InitCoroutine, pItem->regReturn, 0, addrTop);
VdbeComment((v, "%s", pItem->pTab->zName));
pItem->addrFillSub = addrTop;
sqlite3SelectDestInit(&dest, SRT_Coroutine, pItem->regReturn);
ExplainQueryPlan((pParse, 1, "CO-ROUTINE %u", pSub->selId));
sqlite3Select(pParse, pSub, &dest);
pItem->pTab->nRowLogEst = pSub->nSelectRow;
pItem->fg.viaCoroutine = 1;
pItem->regResult = dest.iSdst;
sqlite3VdbeEndCoroutine(v, pItem->regReturn);
sqlite3VdbeJumpHere(v, addrTop-1);
sqlite3ClearTempRegCache(pParse);
}else{
/* Generate a subroutine that will fill an ephemeral table with
** the content of this subquery. pItem->addrFillSub will point
** to the address of the generated subroutine. pItem->regReturn
** is a register allocated to hold the subroutine return address
*/
int topAddr;
int onceAddr = 0;
int retAddr;
struct SrcList_item *pPrior;
testcase( pItem->addrFillSub==0 ); /* Ticket c52b09c7f38903b1311 */
pItem->regReturn = ++pParse->nMem;
topAddr = sqlite3VdbeAddOp2(v, OP_Integer, 0, pItem->regReturn);
pItem->addrFillSub = topAddr+1;
if( pItem->fg.isCorrelated==0 ){
/* If the subquery is not correlated and if we are not inside of
** a trigger, then we only need to compute the value of the subquery
** once. */
onceAddr = sqlite3VdbeAddOp0(v, OP_Once); VdbeCoverage(v);
VdbeComment((v, "materialize \"%s\"", pItem->pTab->zName));
}else{
VdbeNoopComment((v, "materialize \"%s\"", pItem->pTab->zName));
}
pPrior = isSelfJoinView(pTabList, pItem);
if( pPrior ){
sqlite3VdbeAddOp2(v, OP_OpenDup, pItem->iCursor, pPrior->iCursor);
assert( pPrior->pSelect!=0 );
pSub->nSelectRow = pPrior->pSelect->nSelectRow;
}else{
sqlite3SelectDestInit(&dest, SRT_EphemTab, pItem->iCursor);
ExplainQueryPlan((pParse, 1, "MATERIALIZE %u", pSub->selId));
sqlite3Select(pParse, pSub, &dest);
}
pItem->pTab->nRowLogEst = pSub->nSelectRow;
if( onceAddr ) sqlite3VdbeJumpHere(v, onceAddr);
retAddr = sqlite3VdbeAddOp1(v, OP_Return, pItem->regReturn);
VdbeComment((v, "end %s", pItem->pTab->zName));
sqlite3VdbeChangeP1(v, topAddr, retAddr);
sqlite3ClearTempRegCache(pParse);
}
if( db->mallocFailed ) goto select_end;
pParse->nHeight -= sqlite3SelectExprHeight(p);
pParse->zAuthContext = zSavedAuthContext;
#endif
}
/* Various elements of the SELECT copied into local variables for
** convenience */
pEList = p->pEList;
pWhere = p->pWhere;
pGroupBy = p->pGroupBy;
pHaving = p->pHaving;
sDistinct.isTnct = (p->selFlags & SF_Distinct)!=0;
#if SELECTTRACE_ENABLED
if( sqlite3SelectTrace & 0x400 ){
SELECTTRACE(0x400,pParse,p,("After all FROM-clause analysis:\n"));
sqlite3TreeViewSelect(0, p, 0);
}
#endif
/* If the query is DISTINCT with an ORDER BY but is not an aggregate, and
** if the select-list is the same as the ORDER BY list, then this query
** can be rewritten as a GROUP BY. In other words, this:
**
** SELECT DISTINCT xyz FROM ... ORDER BY xyz
**
** is transformed to:
**
** SELECT xyz FROM ... GROUP BY xyz ORDER BY xyz
**
** The second form is preferred as a single index (or temp-table) may be
** used for both the ORDER BY and DISTINCT processing. As originally
** written the query must use a temp-table for at least one of the ORDER
** BY and DISTINCT, and an index or separate temp-table for the other.
*/
if( (p->selFlags & (SF_Distinct|SF_Aggregate))==SF_Distinct
&& sqlite3ExprListCompare(sSort.pOrderBy, pEList, -1)==0
){
p->selFlags &= ~SF_Distinct;
pGroupBy = p->pGroupBy = sqlite3ExprListDup(db, pEList, 0);
/* Notice that even thought SF_Distinct has been cleared from p->selFlags,
** the sDistinct.isTnct is still set. Hence, isTnct represents the
** original setting of the SF_Distinct flag, not the current setting */
assert( sDistinct.isTnct );
#if SELECTTRACE_ENABLED
if( sqlite3SelectTrace & 0x400 ){
SELECTTRACE(0x400,pParse,p,("Transform DISTINCT into GROUP BY:\n"));
sqlite3TreeViewSelect(0, p, 0);
}
#endif
}
/* If there is an ORDER BY clause, then create an ephemeral index to
** do the sorting. But this sorting ephemeral index might end up
** being unused if the data can be extracted in pre-sorted order.
** If that is the case, then the OP_OpenEphemeral instruction will be
** changed to an OP_Noop once we figure out that the sorting index is
** not needed. The sSort.addrSortIndex variable is used to facilitate
** that change.
*/
if( sSort.pOrderBy ){
KeyInfo *pKeyInfo;
pKeyInfo = sqlite3KeyInfoFromExprList(
pParse, sSort.pOrderBy, 0, pEList->nExpr);
sSort.iECursor = pParse->nTab++;
sSort.addrSortIndex =
sqlite3VdbeAddOp4(v, OP_OpenEphemeral,
sSort.iECursor, sSort.pOrderBy->nExpr+1+pEList->nExpr, 0,
(char*)pKeyInfo, P4_KEYINFO
);
}else{
sSort.addrSortIndex = -1;
}
/* If the output is destined for a temporary table, open that table.
*/
if( pDest->eDest==SRT_EphemTab ){
sqlite3VdbeAddOp2(v, OP_OpenEphemeral, pDest->iSDParm, pEList->nExpr);
}
/* Set the limiter.
*/
iEnd = sqlite3VdbeMakeLabel(pParse);
if( (p->selFlags & SF_FixedLimit)==0 ){
p->nSelectRow = 320; /* 4 billion rows */
}
computeLimitRegisters(pParse, p, iEnd);
if( p->iLimit==0 && sSort.addrSortIndex>=0 ){
sqlite3VdbeChangeOpcode(v, sSort.addrSortIndex, OP_SorterOpen);
sSort.sortFlags |= SORTFLAG_UseSorter;
}
/* Open an ephemeral index to use for the distinct set.
*/
if( p->selFlags & SF_Distinct ){
sDistinct.tabTnct = pParse->nTab++;
sDistinct.addrTnct = sqlite3VdbeAddOp4(v, OP_OpenEphemeral,
sDistinct.tabTnct, 0, 0,
(char*)sqlite3KeyInfoFromExprList(pParse, p->pEList,0,0),
P4_KEYINFO);
sqlite3VdbeChangeP5(v, BTREE_UNORDERED);
sDistinct.eTnctType = WHERE_DISTINCT_UNORDERED;
}else{
sDistinct.eTnctType = WHERE_DISTINCT_NOOP;
}
if( !isAgg && pGroupBy==0 ){
/* No aggregate functions and no GROUP BY clause */
u16 wctrlFlags = (sDistinct.isTnct ? WHERE_WANT_DISTINCT : 0)
| (p->selFlags & SF_FixedLimit);
#ifndef SQLITE_OMIT_WINDOWFUNC
Window *pWin = p->pWin; /* Master window object (or NULL) */
if( pWin ){
sqlite3WindowCodeInit(pParse, pWin);
}
#endif
assert( WHERE_USE_LIMIT==SF_FixedLimit );
/* Begin the database scan. */
SELECTTRACE(1,pParse,p,("WhereBegin\n"));
pWInfo = sqlite3WhereBegin(pParse, pTabList, pWhere, sSort.pOrderBy,
p->pEList, wctrlFlags, p->nSelectRow);
if( pWInfo==0 ) goto select_end;
if( sqlite3WhereOutputRowCount(pWInfo) < p->nSelectRow ){
p->nSelectRow = sqlite3WhereOutputRowCount(pWInfo);
}
if( sDistinct.isTnct && sqlite3WhereIsDistinct(pWInfo) ){
sDistinct.eTnctType = sqlite3WhereIsDistinct(pWInfo);
}
if( sSort.pOrderBy ){
sSort.nOBSat = sqlite3WhereIsOrdered(pWInfo);
sSort.labelOBLopt = sqlite3WhereOrderByLimitOptLabel(pWInfo);
if( sSort.nOBSat==sSort.pOrderBy->nExpr ){
sSort.pOrderBy = 0;
}
}
/* If sorting index that was created by a prior OP_OpenEphemeral
** instruction ended up not being needed, then change the OP_OpenEphemeral
** into an OP_Noop.
*/
if( sSort.addrSortIndex>=0 && sSort.pOrderBy==0 ){
sqlite3VdbeChangeToNoop(v, sSort.addrSortIndex);
}
assert( p->pEList==pEList );
#ifndef SQLITE_OMIT_WINDOWFUNC
if( pWin ){
int addrGosub = sqlite3VdbeMakeLabel(pParse);
int iCont = sqlite3VdbeMakeLabel(pParse);
int iBreak = sqlite3VdbeMakeLabel(pParse);
int regGosub = ++pParse->nMem;
sqlite3WindowCodeStep(pParse, p, pWInfo, regGosub, addrGosub);
sqlite3VdbeAddOp2(v, OP_Goto, 0, iBreak);
sqlite3VdbeResolveLabel(v, addrGosub);
VdbeNoopComment((v, "inner-loop subroutine"));
sSort.labelOBLopt = 0;
selectInnerLoop(pParse, p, -1, &sSort, &sDistinct, pDest, iCont, iBreak);
sqlite3VdbeResolveLabel(v, iCont);
sqlite3VdbeAddOp1(v, OP_Return, regGosub);
VdbeComment((v, "end inner-loop subroutine"));
sqlite3VdbeResolveLabel(v, iBreak);
}else
#endif /* SQLITE_OMIT_WINDOWFUNC */
{
/* Use the standard inner loop. */
selectInnerLoop(pParse, p, -1, &sSort, &sDistinct, pDest,
sqlite3WhereContinueLabel(pWInfo),
sqlite3WhereBreakLabel(pWInfo));
/* End the database scan loop.
*/
sqlite3WhereEnd(pWInfo);
}
}else{
/* This case when there exist aggregate functions or a GROUP BY clause
** or both */
NameContext sNC; /* Name context for processing aggregate information */
int iAMem; /* First Mem address for storing current GROUP BY */
int iBMem; /* First Mem address for previous GROUP BY */
int iUseFlag; /* Mem address holding flag indicating that at least
** one row of the input to the aggregator has been
** processed */
int iAbortFlag; /* Mem address which causes query abort if positive */
int groupBySort; /* Rows come from source in GROUP BY order */
int addrEnd; /* End of processing for this SELECT */
int sortPTab = 0; /* Pseudotable used to decode sorting results */
int sortOut = 0; /* Output register from the sorter */
int orderByGrp = 0; /* True if the GROUP BY and ORDER BY are the same */
/* Remove any and all aliases between the result set and the
** GROUP BY clause.
*/
if( pGroupBy ){
int k; /* Loop counter */
struct ExprList_item *pItem; /* For looping over expression in a list */
for(k=p->pEList->nExpr, pItem=p->pEList->a; k>0; k--, pItem++){
pItem->u.x.iAlias = 0;
}
for(k=pGroupBy->nExpr, pItem=pGroupBy->a; k>0; k--, pItem++){
pItem->u.x.iAlias = 0;
}
assert( 66==sqlite3LogEst(100) );
if( p->nSelectRow>66 ) p->nSelectRow = 66;
/* If there is both a GROUP BY and an ORDER BY clause and they are
** identical, then it may be possible to disable the ORDER BY clause
** on the grounds that the GROUP BY will cause elements to come out
** in the correct order. It also may not - the GROUP BY might use a
** database index that causes rows to be grouped together as required
** but not actually sorted. Either way, record the fact that the
** ORDER BY and GROUP BY clauses are the same by setting the orderByGrp
** variable. */
if( sSort.pOrderBy && pGroupBy->nExpr==sSort.pOrderBy->nExpr ){
int ii;
/* The GROUP BY processing doesn't care whether rows are delivered in
** ASC or DESC order - only that each group is returned contiguously.
** So set the ASC/DESC flags in the GROUP BY to match those in the
** ORDER BY to maximize the chances of rows being delivered in an
** order that makes the ORDER BY redundant. */
for(ii=0; ii<pGroupBy->nExpr; ii++){
u8 sortFlags = sSort.pOrderBy->a[ii].sortFlags & KEYINFO_ORDER_DESC;
pGroupBy->a[ii].sortFlags = sortFlags;
}
if( sqlite3ExprListCompare(pGroupBy, sSort.pOrderBy, -1)==0 ){
orderByGrp = 1;
}
}
}else{
assert( 0==sqlite3LogEst(1) );
p->nSelectRow = 0;
}
/* Create a label to jump to when we want to abort the query */
addrEnd = sqlite3VdbeMakeLabel(pParse);
/* Convert TK_COLUMN nodes into TK_AGG_COLUMN and make entries in
** sAggInfo for all TK_AGG_FUNCTION nodes in expressions of the
** SELECT statement.
*/
memset(&sNC, 0, sizeof(sNC));
sNC.pParse = pParse;
sNC.pSrcList = pTabList;
sNC.uNC.pAggInfo = &sAggInfo;
VVA_ONLY( sNC.ncFlags = NC_UAggInfo; )
sAggInfo.mnReg = pParse->nMem+1;
sAggInfo.nSortingColumn = pGroupBy ? pGroupBy->nExpr : 0;
sAggInfo.pGroupBy = pGroupBy;
sqlite3ExprAnalyzeAggList(&sNC, pEList);
sqlite3ExprAnalyzeAggList(&sNC, sSort.pOrderBy);
if( pHaving ){
if( pGroupBy ){
assert( pWhere==p->pWhere );
assert( pHaving==p->pHaving );
assert( pGroupBy==p->pGroupBy );
havingToWhere(pParse, p);
pWhere = p->pWhere;
}
sqlite3ExprAnalyzeAggregates(&sNC, pHaving);
}
sAggInfo.nAccumulator = sAggInfo.nColumn;
if( p->pGroupBy==0 && p->pHaving==0 && sAggInfo.nFunc==1 ){
minMaxFlag = minMaxQuery(db, sAggInfo.aFunc[0].pExpr, &pMinMaxOrderBy);
}else{
minMaxFlag = WHERE_ORDERBY_NORMAL;
}
for(i=0; i<sAggInfo.nFunc; i++){
Expr *pExpr = sAggInfo.aFunc[i].pExpr;
assert( !ExprHasProperty(pExpr, EP_xIsSelect) );
sNC.ncFlags |= NC_InAggFunc;
sqlite3ExprAnalyzeAggList(&sNC, pExpr->x.pList);
#ifndef SQLITE_OMIT_WINDOWFUNC
assert( !IsWindowFunc(pExpr) );
if( ExprHasProperty(pExpr, EP_WinFunc) ){
sqlite3ExprAnalyzeAggregates(&sNC, pExpr->y.pWin->pFilter);
}
#endif
sNC.ncFlags &= ~NC_InAggFunc;
}
sAggInfo.mxReg = pParse->nMem;
if( db->mallocFailed ) goto select_end;
#if SELECTTRACE_ENABLED
if( sqlite3SelectTrace & 0x400 ){
int ii;
SELECTTRACE(0x400,pParse,p,("After aggregate analysis:\n"));
sqlite3TreeViewSelect(0, p, 0);
for(ii=0; ii<sAggInfo.nColumn; ii++){
sqlite3DebugPrintf("agg-column[%d] iMem=%d\n",
ii, sAggInfo.aCol[ii].iMem);
sqlite3TreeViewExpr(0, sAggInfo.aCol[ii].pExpr, 0);
}
for(ii=0; ii<sAggInfo.nFunc; ii++){
sqlite3DebugPrintf("agg-func[%d]: iMem=%d\n",
ii, sAggInfo.aFunc[ii].iMem);
sqlite3TreeViewExpr(0, sAggInfo.aFunc[ii].pExpr, 0);
}
}
#endif
/* Processing for aggregates with GROUP BY is very different and
** much more complex than aggregates without a GROUP BY.
*/
if( pGroupBy ){
KeyInfo *pKeyInfo; /* Keying information for the group by clause */
int addr1; /* A-vs-B comparision jump */
int addrOutputRow; /* Start of subroutine that outputs a result row */
int regOutputRow; /* Return address register for output subroutine */
int addrSetAbort; /* Set the abort flag and return */
int addrTopOfLoop; /* Top of the input loop */
int addrSortingIdx; /* The OP_OpenEphemeral for the sorting index */
int addrReset; /* Subroutine for resetting the accumulator */
int regReset; /* Return address register for reset subroutine */
/* If there is a GROUP BY clause we might need a sorting index to
** implement it. Allocate that sorting index now. If it turns out
** that we do not need it after all, the OP_SorterOpen instruction
** will be converted into a Noop.
*/
sAggInfo.sortingIdx = pParse->nTab++;
pKeyInfo = sqlite3KeyInfoFromExprList(pParse,pGroupBy,0,sAggInfo.nColumn);
addrSortingIdx = sqlite3VdbeAddOp4(v, OP_SorterOpen,
sAggInfo.sortingIdx, sAggInfo.nSortingColumn,
0, (char*)pKeyInfo, P4_KEYINFO);
/* Initialize memory locations used by GROUP BY aggregate processing
*/
iUseFlag = ++pParse->nMem;
iAbortFlag = ++pParse->nMem;
regOutputRow = ++pParse->nMem;
addrOutputRow = sqlite3VdbeMakeLabel(pParse);
regReset = ++pParse->nMem;
addrReset = sqlite3VdbeMakeLabel(pParse);
iAMem = pParse->nMem + 1;
pParse->nMem += pGroupBy->nExpr;
iBMem = pParse->nMem + 1;
pParse->nMem += pGroupBy->nExpr;
sqlite3VdbeAddOp2(v, OP_Integer, 0, iAbortFlag);
VdbeComment((v, "clear abort flag"));
sqlite3VdbeAddOp3(v, OP_Null, 0, iAMem, iAMem+pGroupBy->nExpr-1);
/* Begin a loop that will extract all source rows in GROUP BY order.
** This might involve two separate loops with an OP_Sort in between, or
** it might be a single loop that uses an index to extract information
** in the right order to begin with.
*/
sqlite3VdbeAddOp2(v, OP_Gosub, regReset, addrReset);
SELECTTRACE(1,pParse,p,("WhereBegin\n"));
pWInfo = sqlite3WhereBegin(pParse, pTabList, pWhere, pGroupBy, 0,
WHERE_GROUPBY | (orderByGrp ? WHERE_SORTBYGROUP : 0), 0
);
if( pWInfo==0 ) goto select_end;
if( sqlite3WhereIsOrdered(pWInfo)==pGroupBy->nExpr ){
/* The optimizer is able to deliver rows in group by order so
** we do not have to sort. The OP_OpenEphemeral table will be
** cancelled later because we still need to use the pKeyInfo
*/
groupBySort = 0;
}else{
/* Rows are coming out in undetermined order. We have to push
** each row into a sorting index, terminate the first loop,
** then loop over the sorting index in order to get the output
** in sorted order
*/
int regBase;
int regRecord;
int nCol;
int nGroupBy;
explainTempTable(pParse,
(sDistinct.isTnct && (p->selFlags&SF_Distinct)==0) ?
"DISTINCT" : "GROUP BY");
groupBySort = 1;
nGroupBy = pGroupBy->nExpr;
nCol = nGroupBy;
j = nGroupBy;
for(i=0; i<sAggInfo.nColumn; i++){
if( sAggInfo.aCol[i].iSorterColumn>=j ){
nCol++;
j++;
}
}
regBase = sqlite3GetTempRange(pParse, nCol);
sqlite3ExprCodeExprList(pParse, pGroupBy, regBase, 0, 0);
j = nGroupBy;
for(i=0; i<sAggInfo.nColumn; i++){
struct AggInfo_col *pCol = &sAggInfo.aCol[i];
if( pCol->iSorterColumn>=j ){
int r1 = j + regBase;
sqlite3ExprCodeGetColumnOfTable(v,
pCol->pTab, pCol->iTable, pCol->iColumn, r1);
j++;
}
}
regRecord = sqlite3GetTempReg(pParse);
sqlite3VdbeAddOp3(v, OP_MakeRecord, regBase, nCol, regRecord);
sqlite3VdbeAddOp2(v, OP_SorterInsert, sAggInfo.sortingIdx, regRecord);
sqlite3ReleaseTempReg(pParse, regRecord);
sqlite3ReleaseTempRange(pParse, regBase, nCol);
sqlite3WhereEnd(pWInfo);
sAggInfo.sortingIdxPTab = sortPTab = pParse->nTab++;
sortOut = sqlite3GetTempReg(pParse);
sqlite3VdbeAddOp3(v, OP_OpenPseudo, sortPTab, sortOut, nCol);
sqlite3VdbeAddOp2(v, OP_SorterSort, sAggInfo.sortingIdx, addrEnd);
VdbeComment((v, "GROUP BY sort")); VdbeCoverage(v);
sAggInfo.useSortingIdx = 1;
}
/* If the index or temporary table used by the GROUP BY sort
** will naturally deliver rows in the order required by the ORDER BY
** clause, cancel the ephemeral table open coded earlier.
**
** This is an optimization - the correct answer should result regardless.
** Use the SQLITE_GroupByOrder flag with SQLITE_TESTCTRL_OPTIMIZER to
** disable this optimization for testing purposes. */
if( orderByGrp && OptimizationEnabled(db, SQLITE_GroupByOrder)
&& (groupBySort || sqlite3WhereIsSorted(pWInfo))
){
sSort.pOrderBy = 0;
sqlite3VdbeChangeToNoop(v, sSort.addrSortIndex);
}
/* Evaluate the current GROUP BY terms and store in b0, b1, b2...
** (b0 is memory location iBMem+0, b1 is iBMem+1, and so forth)
** Then compare the current GROUP BY terms against the GROUP BY terms
** from the previous row currently stored in a0, a1, a2...
*/
addrTopOfLoop = sqlite3VdbeCurrentAddr(v);
if( groupBySort ){
sqlite3VdbeAddOp3(v, OP_SorterData, sAggInfo.sortingIdx,
sortOut, sortPTab);
}
for(j=0; j<pGroupBy->nExpr; j++){
if( groupBySort ){
sqlite3VdbeAddOp3(v, OP_Column, sortPTab, j, iBMem+j);
}else{
sAggInfo.directMode = 1;
sqlite3ExprCode(pParse, pGroupBy->a[j].pExpr, iBMem+j);
}
}
sqlite3VdbeAddOp4(v, OP_Compare, iAMem, iBMem, pGroupBy->nExpr,
(char*)sqlite3KeyInfoRef(pKeyInfo), P4_KEYINFO);
addr1 = sqlite3VdbeCurrentAddr(v);
sqlite3VdbeAddOp3(v, OP_Jump, addr1+1, 0, addr1+1); VdbeCoverage(v);
/* Generate code that runs whenever the GROUP BY changes.
** Changes in the GROUP BY are detected by the previous code
** block. If there were no changes, this block is skipped.
**
** This code copies current group by terms in b0,b1,b2,...
** over to a0,a1,a2. It then calls the output subroutine
** and resets the aggregate accumulator registers in preparation
** for the next GROUP BY batch.
*/
sqlite3ExprCodeMove(pParse, iBMem, iAMem, pGroupBy->nExpr);
sqlite3VdbeAddOp2(v, OP_Gosub, regOutputRow, addrOutputRow);
VdbeComment((v, "output one row"));
sqlite3VdbeAddOp2(v, OP_IfPos, iAbortFlag, addrEnd); VdbeCoverage(v);
VdbeComment((v, "check abort flag"));
sqlite3VdbeAddOp2(v, OP_Gosub, regReset, addrReset);
VdbeComment((v, "reset accumulator"));
/* Update the aggregate accumulators based on the content of
** the current row
*/
sqlite3VdbeJumpHere(v, addr1);
updateAccumulator(pParse, iUseFlag, &sAggInfo);
sqlite3VdbeAddOp2(v, OP_Integer, 1, iUseFlag);
VdbeComment((v, "indicate data in accumulator"));
/* End of the loop
*/
if( groupBySort ){
sqlite3VdbeAddOp2(v, OP_SorterNext, sAggInfo.sortingIdx, addrTopOfLoop);
VdbeCoverage(v);
}else{
sqlite3WhereEnd(pWInfo);
sqlite3VdbeChangeToNoop(v, addrSortingIdx);
}
/* Output the final row of result
*/
sqlite3VdbeAddOp2(v, OP_Gosub, regOutputRow, addrOutputRow);
VdbeComment((v, "output final row"));
/* Jump over the subroutines
*/
sqlite3VdbeGoto(v, addrEnd);
/* Generate a subroutine that outputs a single row of the result
** set. This subroutine first looks at the iUseFlag. If iUseFlag
** is less than or equal to zero, the subroutine is a no-op. If
** the processing calls for the query to abort, this subroutine
** increments the iAbortFlag memory location before returning in
** order to signal the caller to abort.
*/
addrSetAbort = sqlite3VdbeCurrentAddr(v);
sqlite3VdbeAddOp2(v, OP_Integer, 1, iAbortFlag);
VdbeComment((v, "set abort flag"));
sqlite3VdbeAddOp1(v, OP_Return, regOutputRow);
sqlite3VdbeResolveLabel(v, addrOutputRow);
addrOutputRow = sqlite3VdbeCurrentAddr(v);
sqlite3VdbeAddOp2(v, OP_IfPos, iUseFlag, addrOutputRow+2);
VdbeCoverage(v);
VdbeComment((v, "Groupby result generator entry point"));
sqlite3VdbeAddOp1(v, OP_Return, regOutputRow);
finalizeAggFunctions(pParse, &sAggInfo);
sqlite3ExprIfFalse(pParse, pHaving, addrOutputRow+1, SQLITE_JUMPIFNULL);
selectInnerLoop(pParse, p, -1, &sSort,
&sDistinct, pDest,
addrOutputRow+1, addrSetAbort);
sqlite3VdbeAddOp1(v, OP_Return, regOutputRow);
VdbeComment((v, "end groupby result generator"));
/* Generate a subroutine that will reset the group-by accumulator
*/
sqlite3VdbeResolveLabel(v, addrReset);
resetAccumulator(pParse, &sAggInfo);
sqlite3VdbeAddOp2(v, OP_Integer, 0, iUseFlag);
VdbeComment((v, "indicate accumulator empty"));
sqlite3VdbeAddOp1(v, OP_Return, regReset);
} /* endif pGroupBy. Begin aggregate queries without GROUP BY: */
else {
#ifndef SQLITE_OMIT_BTREECOUNT
Table *pTab;
if( (pTab = isSimpleCount(p, &sAggInfo))!=0 ){
/* If isSimpleCount() returns a pointer to a Table structure, then
** the SQL statement is of the form:
**
** SELECT count(*) FROM <tbl>
**
** where the Table structure returned represents table <tbl>.
**
** This statement is so common that it is optimized specially. The
** OP_Count instruction is executed either on the intkey table that
** contains the data for table <tbl> or on one of its indexes. It
** is better to execute the op on an index, as indexes are almost
** always spread across less pages than their corresponding tables.
*/
const int iDb = sqlite3SchemaToIndex(pParse->db, pTab->pSchema);
const int iCsr = pParse->nTab++; /* Cursor to scan b-tree */
Index *pIdx; /* Iterator variable */
KeyInfo *pKeyInfo = 0; /* Keyinfo for scanned index */
Index *pBest = 0; /* Best index found so far */
int iRoot = pTab->tnum; /* Root page of scanned b-tree */
sqlite3CodeVerifySchema(pParse, iDb);
sqlite3TableLock(pParse, iDb, pTab->tnum, 0, pTab->zName);
/* Search for the index that has the lowest scan cost.
**
** (2011-04-15) Do not do a full scan of an unordered index.
**
** (2013-10-03) Do not count the entries in a partial index.
**
** In practice the KeyInfo structure will not be used. It is only
** passed to keep OP_OpenRead happy.
*/
if( !HasRowid(pTab) ) pBest = sqlite3PrimaryKeyIndex(pTab);
for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){
if( pIdx->bUnordered==0
&& pIdx->szIdxRow<pTab->szTabRow
&& pIdx->pPartIdxWhere==0
&& (!pBest || pIdx->szIdxRow<pBest->szIdxRow)
){
pBest = pIdx;
}
}
if( pBest ){
iRoot = pBest->tnum;
pKeyInfo = sqlite3KeyInfoOfIndex(pParse, pBest);
}
/* Open a read-only cursor, execute the OP_Count, close the cursor. */
sqlite3VdbeAddOp4Int(v, OP_OpenRead, iCsr, iRoot, iDb, 1);
if( pKeyInfo ){
sqlite3VdbeChangeP4(v, -1, (char *)pKeyInfo, P4_KEYINFO);
}
sqlite3VdbeAddOp2(v, OP_Count, iCsr, sAggInfo.aFunc[0].iMem);
sqlite3VdbeAddOp1(v, OP_Close, iCsr);
explainSimpleCount(pParse, pTab, pBest);
}else
#endif /* SQLITE_OMIT_BTREECOUNT */
{
int regAcc = 0; /* "populate accumulators" flag */
/* If there are accumulator registers but no min() or max() functions
** without FILTER clauses, allocate register regAcc. Register regAcc
** will contain 0 the first time the inner loop runs, and 1 thereafter.
** The code generated by updateAccumulator() uses this to ensure
** that the accumulator registers are (a) updated only once if
** there are no min() or max functions or (b) always updated for the
** first row visited by the aggregate, so that they are updated at
** least once even if the FILTER clause means the min() or max()
** function visits zero rows. */
if( sAggInfo.nAccumulator ){
for(i=0; i<sAggInfo.nFunc; i++){
if( ExprHasProperty(sAggInfo.aFunc[i].pExpr, EP_WinFunc) ) continue;
if( sAggInfo.aFunc[i].pFunc->funcFlags&SQLITE_FUNC_NEEDCOLL ) break;
}
if( i==sAggInfo.nFunc ){
regAcc = ++pParse->nMem;
sqlite3VdbeAddOp2(v, OP_Integer, 0, regAcc);
}
}
/* This case runs if the aggregate has no GROUP BY clause. The
** processing is much simpler since there is only a single row
** of output.
*/
assert( p->pGroupBy==0 );
resetAccumulator(pParse, &sAggInfo);
/* If this query is a candidate for the min/max optimization, then
** minMaxFlag will have been previously set to either
** WHERE_ORDERBY_MIN or WHERE_ORDERBY_MAX and pMinMaxOrderBy will
** be an appropriate ORDER BY expression for the optimization.
*/
assert( minMaxFlag==WHERE_ORDERBY_NORMAL || pMinMaxOrderBy!=0 );
assert( pMinMaxOrderBy==0 || pMinMaxOrderBy->nExpr==1 );
SELECTTRACE(1,pParse,p,("WhereBegin\n"));
pWInfo = sqlite3WhereBegin(pParse, pTabList, pWhere, pMinMaxOrderBy,
0, minMaxFlag, 0);
if( pWInfo==0 ){
goto select_end;
}
updateAccumulator(pParse, regAcc, &sAggInfo);
if( regAcc ) sqlite3VdbeAddOp2(v, OP_Integer, 1, regAcc);
if( sqlite3WhereIsOrdered(pWInfo)>0 ){
sqlite3VdbeGoto(v, sqlite3WhereBreakLabel(pWInfo));
VdbeComment((v, "%s() by index",
(minMaxFlag==WHERE_ORDERBY_MIN?"min":"max")));
}
sqlite3WhereEnd(pWInfo);
finalizeAggFunctions(pParse, &sAggInfo);
}
sSort.pOrderBy = 0;
sqlite3ExprIfFalse(pParse, pHaving, addrEnd, SQLITE_JUMPIFNULL);
selectInnerLoop(pParse, p, -1, 0, 0,
pDest, addrEnd, addrEnd);
}
sqlite3VdbeResolveLabel(v, addrEnd);
} /* endif aggregate query */
if( sDistinct.eTnctType==WHERE_DISTINCT_UNORDERED ){
explainTempTable(pParse, "DISTINCT");
}
/* If there is an ORDER BY clause, then we need to sort the results
** and send them to the callback one by one.
*/
if( sSort.pOrderBy ){
explainTempTable(pParse,
sSort.nOBSat>0 ? "RIGHT PART OF ORDER BY":"ORDER BY");
assert( p->pEList==pEList );
generateSortTail(pParse, p, &sSort, pEList->nExpr, pDest);
}
/* Jump here to skip this query
*/
sqlite3VdbeResolveLabel(v, iEnd);
/* The SELECT has been coded. If there is an error in the Parse structure,
** set the return code to 1. Otherwise 0. */
rc = (pParse->nErr>0);
/* Control jumps to here if an error is encountered above, or upon
** successful coding of the SELECT.
*/
select_end:
sqlite3ExprListDelete(db, pMinMaxOrderBy);
sqlite3DbFree(db, sAggInfo.aCol);
sqlite3DbFree(db, sAggInfo.aFunc);
#if SELECTTRACE_ENABLED
SELECTTRACE(0x1,pParse,p,("end processing\n"));
if( (sqlite3SelectTrace & 0x2000)!=0 && ExplainQueryPlanParent(pParse)==0 ){
sqlite3TreeViewSelect(0, p, 0);
}
#endif
ExplainQueryPlanPop(pParse);
return rc;
}
| ./CrossVul/dataset_final_sorted/CWE-20/c/bad_1276_2 |
crossvul-cpp_data_good_5845_11 | /*
BlueZ - Bluetooth protocol stack for Linux
Copyright (C) 2000-2001 Qualcomm Incorporated
Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License version 2 as
published by the Free Software Foundation;
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
SOFTWARE IS DISCLAIMED.
*/
/* Bluetooth SCO sockets. */
#include <linux/module.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/sco.h>
static bool disable_esco;
static const struct proto_ops sco_sock_ops;
static struct bt_sock_list sco_sk_list = {
.lock = __RW_LOCK_UNLOCKED(sco_sk_list.lock)
};
static void __sco_chan_add(struct sco_conn *conn, struct sock *sk, struct sock *parent);
static void sco_chan_del(struct sock *sk, int err);
static void sco_sock_close(struct sock *sk);
static void sco_sock_kill(struct sock *sk);
/* ---- SCO timers ---- */
static void sco_sock_timeout(unsigned long arg)
{
struct sock *sk = (struct sock *) arg;
BT_DBG("sock %p state %d", sk, sk->sk_state);
bh_lock_sock(sk);
sk->sk_err = ETIMEDOUT;
sk->sk_state_change(sk);
bh_unlock_sock(sk);
sco_sock_kill(sk);
sock_put(sk);
}
static void sco_sock_set_timer(struct sock *sk, long timeout)
{
BT_DBG("sock %p state %d timeout %ld", sk, sk->sk_state, timeout);
sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
}
static void sco_sock_clear_timer(struct sock *sk)
{
BT_DBG("sock %p state %d", sk, sk->sk_state);
sk_stop_timer(sk, &sk->sk_timer);
}
/* ---- SCO connections ---- */
static struct sco_conn *sco_conn_add(struct hci_conn *hcon)
{
struct hci_dev *hdev = hcon->hdev;
struct sco_conn *conn = hcon->sco_data;
if (conn)
return conn;
conn = kzalloc(sizeof(struct sco_conn), GFP_KERNEL);
if (!conn)
return NULL;
spin_lock_init(&conn->lock);
hcon->sco_data = conn;
conn->hcon = hcon;
if (hdev->sco_mtu > 0)
conn->mtu = hdev->sco_mtu;
else
conn->mtu = 60;
BT_DBG("hcon %p conn %p", hcon, conn);
return conn;
}
static struct sock *sco_chan_get(struct sco_conn *conn)
{
struct sock *sk = NULL;
sco_conn_lock(conn);
sk = conn->sk;
sco_conn_unlock(conn);
return sk;
}
static int sco_conn_del(struct hci_conn *hcon, int err)
{
struct sco_conn *conn = hcon->sco_data;
struct sock *sk;
if (!conn)
return 0;
BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
/* Kill socket */
sk = sco_chan_get(conn);
if (sk) {
bh_lock_sock(sk);
sco_sock_clear_timer(sk);
sco_chan_del(sk, err);
bh_unlock_sock(sk);
sco_sock_kill(sk);
}
hcon->sco_data = NULL;
kfree(conn);
return 0;
}
static int sco_chan_add(struct sco_conn *conn, struct sock *sk,
struct sock *parent)
{
int err = 0;
sco_conn_lock(conn);
if (conn->sk)
err = -EBUSY;
else
__sco_chan_add(conn, sk, parent);
sco_conn_unlock(conn);
return err;
}
static int sco_connect(struct sock *sk)
{
struct sco_conn *conn;
struct hci_conn *hcon;
struct hci_dev *hdev;
int err, type;
BT_DBG("%pMR -> %pMR", &sco_pi(sk)->src, &sco_pi(sk)->dst);
hdev = hci_get_route(&sco_pi(sk)->dst, &sco_pi(sk)->src);
if (!hdev)
return -EHOSTUNREACH;
hci_dev_lock(hdev);
if (lmp_esco_capable(hdev) && !disable_esco)
type = ESCO_LINK;
else
type = SCO_LINK;
if (sco_pi(sk)->setting == BT_VOICE_TRANSPARENT &&
(!lmp_transp_capable(hdev) || !lmp_esco_capable(hdev))) {
err = -EOPNOTSUPP;
goto done;
}
hcon = hci_connect_sco(hdev, type, &sco_pi(sk)->dst,
sco_pi(sk)->setting);
if (IS_ERR(hcon)) {
err = PTR_ERR(hcon);
goto done;
}
conn = sco_conn_add(hcon);
if (!conn) {
hci_conn_drop(hcon);
err = -ENOMEM;
goto done;
}
/* Update source addr of the socket */
bacpy(&sco_pi(sk)->src, &hcon->src);
err = sco_chan_add(conn, sk, NULL);
if (err)
goto done;
if (hcon->state == BT_CONNECTED) {
sco_sock_clear_timer(sk);
sk->sk_state = BT_CONNECTED;
} else {
sk->sk_state = BT_CONNECT;
sco_sock_set_timer(sk, sk->sk_sndtimeo);
}
done:
hci_dev_unlock(hdev);
hci_dev_put(hdev);
return err;
}
static int sco_send_frame(struct sock *sk, struct msghdr *msg, int len)
{
struct sco_conn *conn = sco_pi(sk)->conn;
struct sk_buff *skb;
int err;
/* Check outgoing MTU */
if (len > conn->mtu)
return -EINVAL;
BT_DBG("sk %p len %d", sk, len);
skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
if (!skb)
return err;
if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
kfree_skb(skb);
return -EFAULT;
}
hci_send_sco(conn->hcon, skb);
return len;
}
static void sco_recv_frame(struct sco_conn *conn, struct sk_buff *skb)
{
struct sock *sk = sco_chan_get(conn);
if (!sk)
goto drop;
BT_DBG("sk %p len %d", sk, skb->len);
if (sk->sk_state != BT_CONNECTED)
goto drop;
if (!sock_queue_rcv_skb(sk, skb))
return;
drop:
kfree_skb(skb);
}
/* -------- Socket interface ---------- */
static struct sock *__sco_get_sock_listen_by_addr(bdaddr_t *ba)
{
struct sock *sk;
sk_for_each(sk, &sco_sk_list.head) {
if (sk->sk_state != BT_LISTEN)
continue;
if (!bacmp(&sco_pi(sk)->src, ba))
return sk;
}
return NULL;
}
/* Find socket listening on source bdaddr.
* Returns closest match.
*/
static struct sock *sco_get_sock_listen(bdaddr_t *src)
{
struct sock *sk = NULL, *sk1 = NULL;
read_lock(&sco_sk_list.lock);
sk_for_each(sk, &sco_sk_list.head) {
if (sk->sk_state != BT_LISTEN)
continue;
/* Exact match. */
if (!bacmp(&sco_pi(sk)->src, src))
break;
/* Closest match */
if (!bacmp(&sco_pi(sk)->src, BDADDR_ANY))
sk1 = sk;
}
read_unlock(&sco_sk_list.lock);
return sk ? sk : sk1;
}
static void sco_sock_destruct(struct sock *sk)
{
BT_DBG("sk %p", sk);
skb_queue_purge(&sk->sk_receive_queue);
skb_queue_purge(&sk->sk_write_queue);
}
static void sco_sock_cleanup_listen(struct sock *parent)
{
struct sock *sk;
BT_DBG("parent %p", parent);
/* Close not yet accepted channels */
while ((sk = bt_accept_dequeue(parent, NULL))) {
sco_sock_close(sk);
sco_sock_kill(sk);
}
parent->sk_state = BT_CLOSED;
sock_set_flag(parent, SOCK_ZAPPED);
}
/* Kill socket (only if zapped and orphan)
* Must be called on unlocked socket.
*/
static void sco_sock_kill(struct sock *sk)
{
if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
return;
BT_DBG("sk %p state %d", sk, sk->sk_state);
/* Kill poor orphan */
bt_sock_unlink(&sco_sk_list, sk);
sock_set_flag(sk, SOCK_DEAD);
sock_put(sk);
}
static void __sco_sock_close(struct sock *sk)
{
BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
switch (sk->sk_state) {
case BT_LISTEN:
sco_sock_cleanup_listen(sk);
break;
case BT_CONNECTED:
case BT_CONFIG:
if (sco_pi(sk)->conn->hcon) {
sk->sk_state = BT_DISCONN;
sco_sock_set_timer(sk, SCO_DISCONN_TIMEOUT);
hci_conn_drop(sco_pi(sk)->conn->hcon);
sco_pi(sk)->conn->hcon = NULL;
} else
sco_chan_del(sk, ECONNRESET);
break;
case BT_CONNECT2:
case BT_CONNECT:
case BT_DISCONN:
sco_chan_del(sk, ECONNRESET);
break;
default:
sock_set_flag(sk, SOCK_ZAPPED);
break;
}
}
/* Must be called on unlocked socket. */
static void sco_sock_close(struct sock *sk)
{
sco_sock_clear_timer(sk);
lock_sock(sk);
__sco_sock_close(sk);
release_sock(sk);
sco_sock_kill(sk);
}
static void sco_sock_init(struct sock *sk, struct sock *parent)
{
BT_DBG("sk %p", sk);
if (parent) {
sk->sk_type = parent->sk_type;
bt_sk(sk)->flags = bt_sk(parent)->flags;
security_sk_clone(parent, sk);
}
}
static struct proto sco_proto = {
.name = "SCO",
.owner = THIS_MODULE,
.obj_size = sizeof(struct sco_pinfo)
};
static struct sock *sco_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
{
struct sock *sk;
sk = sk_alloc(net, PF_BLUETOOTH, prio, &sco_proto);
if (!sk)
return NULL;
sock_init_data(sock, sk);
INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
sk->sk_destruct = sco_sock_destruct;
sk->sk_sndtimeo = SCO_CONN_TIMEOUT;
sock_reset_flag(sk, SOCK_ZAPPED);
sk->sk_protocol = proto;
sk->sk_state = BT_OPEN;
sco_pi(sk)->setting = BT_VOICE_CVSD_16BIT;
setup_timer(&sk->sk_timer, sco_sock_timeout, (unsigned long)sk);
bt_sock_link(&sco_sk_list, sk);
return sk;
}
static int sco_sock_create(struct net *net, struct socket *sock, int protocol,
int kern)
{
struct sock *sk;
BT_DBG("sock %p", sock);
sock->state = SS_UNCONNECTED;
if (sock->type != SOCK_SEQPACKET)
return -ESOCKTNOSUPPORT;
sock->ops = &sco_sock_ops;
sk = sco_sock_alloc(net, sock, protocol, GFP_ATOMIC);
if (!sk)
return -ENOMEM;
sco_sock_init(sk, NULL);
return 0;
}
static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
{
struct sockaddr_sco *sa = (struct sockaddr_sco *) addr;
struct sock *sk = sock->sk;
int err = 0;
BT_DBG("sk %p %pMR", sk, &sa->sco_bdaddr);
if (!addr || addr->sa_family != AF_BLUETOOTH)
return -EINVAL;
lock_sock(sk);
if (sk->sk_state != BT_OPEN) {
err = -EBADFD;
goto done;
}
if (sk->sk_type != SOCK_SEQPACKET) {
err = -EINVAL;
goto done;
}
bacpy(&sco_pi(sk)->src, &sa->sco_bdaddr);
sk->sk_state = BT_BOUND;
done:
release_sock(sk);
return err;
}
static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
{
struct sockaddr_sco *sa = (struct sockaddr_sco *) addr;
struct sock *sk = sock->sk;
int err;
BT_DBG("sk %p", sk);
if (alen < sizeof(struct sockaddr_sco) ||
addr->sa_family != AF_BLUETOOTH)
return -EINVAL;
if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND)
return -EBADFD;
if (sk->sk_type != SOCK_SEQPACKET)
return -EINVAL;
lock_sock(sk);
/* Set destination address and psm */
bacpy(&sco_pi(sk)->dst, &sa->sco_bdaddr);
err = sco_connect(sk);
if (err)
goto done;
err = bt_sock_wait_state(sk, BT_CONNECTED,
sock_sndtimeo(sk, flags & O_NONBLOCK));
done:
release_sock(sk);
return err;
}
static int sco_sock_listen(struct socket *sock, int backlog)
{
struct sock *sk = sock->sk;
bdaddr_t *src = &sco_pi(sk)->src;
int err = 0;
BT_DBG("sk %p backlog %d", sk, backlog);
lock_sock(sk);
if (sk->sk_state != BT_BOUND) {
err = -EBADFD;
goto done;
}
if (sk->sk_type != SOCK_SEQPACKET) {
err = -EINVAL;
goto done;
}
write_lock(&sco_sk_list.lock);
if (__sco_get_sock_listen_by_addr(src)) {
err = -EADDRINUSE;
goto unlock;
}
sk->sk_max_ack_backlog = backlog;
sk->sk_ack_backlog = 0;
sk->sk_state = BT_LISTEN;
unlock:
write_unlock(&sco_sk_list.lock);
done:
release_sock(sk);
return err;
}
static int sco_sock_accept(struct socket *sock, struct socket *newsock, int flags)
{
DECLARE_WAITQUEUE(wait, current);
struct sock *sk = sock->sk, *ch;
long timeo;
int err = 0;
lock_sock(sk);
timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
BT_DBG("sk %p timeo %ld", sk, timeo);
/* Wait for an incoming connection. (wake-one). */
add_wait_queue_exclusive(sk_sleep(sk), &wait);
while (1) {
set_current_state(TASK_INTERRUPTIBLE);
if (sk->sk_state != BT_LISTEN) {
err = -EBADFD;
break;
}
ch = bt_accept_dequeue(sk, newsock);
if (ch)
break;
if (!timeo) {
err = -EAGAIN;
break;
}
if (signal_pending(current)) {
err = sock_intr_errno(timeo);
break;
}
release_sock(sk);
timeo = schedule_timeout(timeo);
lock_sock(sk);
}
__set_current_state(TASK_RUNNING);
remove_wait_queue(sk_sleep(sk), &wait);
if (err)
goto done;
newsock->state = SS_CONNECTED;
BT_DBG("new socket %p", ch);
done:
release_sock(sk);
return err;
}
static int sco_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
{
struct sockaddr_sco *sa = (struct sockaddr_sco *) addr;
struct sock *sk = sock->sk;
BT_DBG("sock %p, sk %p", sock, sk);
addr->sa_family = AF_BLUETOOTH;
*len = sizeof(struct sockaddr_sco);
if (peer)
bacpy(&sa->sco_bdaddr, &sco_pi(sk)->dst);
else
bacpy(&sa->sco_bdaddr, &sco_pi(sk)->src);
return 0;
}
static int sco_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len)
{
struct sock *sk = sock->sk;
int err;
BT_DBG("sock %p, sk %p", sock, sk);
err = sock_error(sk);
if (err)
return err;
if (msg->msg_flags & MSG_OOB)
return -EOPNOTSUPP;
lock_sock(sk);
if (sk->sk_state == BT_CONNECTED)
err = sco_send_frame(sk, msg, len);
else
err = -ENOTCONN;
release_sock(sk);
return err;
}
static void sco_conn_defer_accept(struct hci_conn *conn, u16 setting)
{
struct hci_dev *hdev = conn->hdev;
BT_DBG("conn %p", conn);
conn->state = BT_CONFIG;
if (!lmp_esco_capable(hdev)) {
struct hci_cp_accept_conn_req cp;
bacpy(&cp.bdaddr, &conn->dst);
cp.role = 0x00; /* Ignored */
hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
} else {
struct hci_cp_accept_sync_conn_req cp;
bacpy(&cp.bdaddr, &conn->dst);
cp.pkt_type = cpu_to_le16(conn->pkt_type);
cp.tx_bandwidth = __constant_cpu_to_le32(0x00001f40);
cp.rx_bandwidth = __constant_cpu_to_le32(0x00001f40);
cp.content_format = cpu_to_le16(setting);
switch (setting & SCO_AIRMODE_MASK) {
case SCO_AIRMODE_TRANSP:
if (conn->pkt_type & ESCO_2EV3)
cp.max_latency = __constant_cpu_to_le16(0x0008);
else
cp.max_latency = __constant_cpu_to_le16(0x000D);
cp.retrans_effort = 0x02;
break;
case SCO_AIRMODE_CVSD:
cp.max_latency = __constant_cpu_to_le16(0xffff);
cp.retrans_effort = 0xff;
break;
}
hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
sizeof(cp), &cp);
}
}
static int sco_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len, int flags)
{
struct sock *sk = sock->sk;
struct sco_pinfo *pi = sco_pi(sk);
lock_sock(sk);
if (sk->sk_state == BT_CONNECT2 &&
test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
sco_conn_defer_accept(pi->conn->hcon, pi->setting);
sk->sk_state = BT_CONFIG;
release_sock(sk);
return 0;
}
release_sock(sk);
return bt_sock_recvmsg(iocb, sock, msg, len, flags);
}
static int sco_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
int len, err = 0;
struct bt_voice voice;
u32 opt;
BT_DBG("sk %p", sk);
lock_sock(sk);
switch (optname) {
case BT_DEFER_SETUP:
if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
err = -EINVAL;
break;
}
if (get_user(opt, (u32 __user *) optval)) {
err = -EFAULT;
break;
}
if (opt)
set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
else
clear_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
break;
case BT_VOICE:
if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND &&
sk->sk_state != BT_CONNECT2) {
err = -EINVAL;
break;
}
voice.setting = sco_pi(sk)->setting;
len = min_t(unsigned int, sizeof(voice), optlen);
if (copy_from_user((char *) &voice, optval, len)) {
err = -EFAULT;
break;
}
/* Explicitly check for these values */
if (voice.setting != BT_VOICE_TRANSPARENT &&
voice.setting != BT_VOICE_CVSD_16BIT) {
err = -EINVAL;
break;
}
sco_pi(sk)->setting = voice.setting;
break;
default:
err = -ENOPROTOOPT;
break;
}
release_sock(sk);
return err;
}
static int sco_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
{
struct sock *sk = sock->sk;
struct sco_options opts;
struct sco_conninfo cinfo;
int len, err = 0;
BT_DBG("sk %p", sk);
if (get_user(len, optlen))
return -EFAULT;
lock_sock(sk);
switch (optname) {
case SCO_OPTIONS:
if (sk->sk_state != BT_CONNECTED &&
!(sk->sk_state == BT_CONNECT2 &&
test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))) {
err = -ENOTCONN;
break;
}
opts.mtu = sco_pi(sk)->conn->mtu;
BT_DBG("mtu %d", opts.mtu);
len = min_t(unsigned int, len, sizeof(opts));
if (copy_to_user(optval, (char *)&opts, len))
err = -EFAULT;
break;
case SCO_CONNINFO:
if (sk->sk_state != BT_CONNECTED &&
!(sk->sk_state == BT_CONNECT2 &&
test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))) {
err = -ENOTCONN;
break;
}
memset(&cinfo, 0, sizeof(cinfo));
cinfo.hci_handle = sco_pi(sk)->conn->hcon->handle;
memcpy(cinfo.dev_class, sco_pi(sk)->conn->hcon->dev_class, 3);
len = min_t(unsigned int, len, sizeof(cinfo));
if (copy_to_user(optval, (char *)&cinfo, len))
err = -EFAULT;
break;
default:
err = -ENOPROTOOPT;
break;
}
release_sock(sk);
return err;
}
static int sco_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
{
struct sock *sk = sock->sk;
int len, err = 0;
struct bt_voice voice;
BT_DBG("sk %p", sk);
if (level == SOL_SCO)
return sco_sock_getsockopt_old(sock, optname, optval, optlen);
if (get_user(len, optlen))
return -EFAULT;
lock_sock(sk);
switch (optname) {
case BT_DEFER_SETUP:
if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
err = -EINVAL;
break;
}
if (put_user(test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags),
(u32 __user *) optval))
err = -EFAULT;
break;
case BT_VOICE:
voice.setting = sco_pi(sk)->setting;
len = min_t(unsigned int, len, sizeof(voice));
if (copy_to_user(optval, (char *)&voice, len))
err = -EFAULT;
break;
default:
err = -ENOPROTOOPT;
break;
}
release_sock(sk);
return err;
}
static int sco_sock_shutdown(struct socket *sock, int how)
{
struct sock *sk = sock->sk;
int err = 0;
BT_DBG("sock %p, sk %p", sock, sk);
if (!sk)
return 0;
lock_sock(sk);
if (!sk->sk_shutdown) {
sk->sk_shutdown = SHUTDOWN_MASK;
sco_sock_clear_timer(sk);
__sco_sock_close(sk);
if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
err = bt_sock_wait_state(sk, BT_CLOSED,
sk->sk_lingertime);
}
release_sock(sk);
return err;
}
static int sco_sock_release(struct socket *sock)
{
struct sock *sk = sock->sk;
int err = 0;
BT_DBG("sock %p, sk %p", sock, sk);
if (!sk)
return 0;
sco_sock_close(sk);
if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) {
lock_sock(sk);
err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime);
release_sock(sk);
}
sock_orphan(sk);
sco_sock_kill(sk);
return err;
}
static void __sco_chan_add(struct sco_conn *conn, struct sock *sk, struct sock *parent)
{
BT_DBG("conn %p", conn);
sco_pi(sk)->conn = conn;
conn->sk = sk;
if (parent)
bt_accept_enqueue(parent, sk);
}
/* Delete channel.
* Must be called on the locked socket. */
static void sco_chan_del(struct sock *sk, int err)
{
struct sco_conn *conn;
conn = sco_pi(sk)->conn;
BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
if (conn) {
sco_conn_lock(conn);
conn->sk = NULL;
sco_pi(sk)->conn = NULL;
sco_conn_unlock(conn);
if (conn->hcon)
hci_conn_drop(conn->hcon);
}
sk->sk_state = BT_CLOSED;
sk->sk_err = err;
sk->sk_state_change(sk);
sock_set_flag(sk, SOCK_ZAPPED);
}
static void sco_conn_ready(struct sco_conn *conn)
{
struct sock *parent;
struct sock *sk = conn->sk;
BT_DBG("conn %p", conn);
if (sk) {
sco_sock_clear_timer(sk);
bh_lock_sock(sk);
sk->sk_state = BT_CONNECTED;
sk->sk_state_change(sk);
bh_unlock_sock(sk);
} else {
sco_conn_lock(conn);
parent = sco_get_sock_listen(&conn->hcon->src);
if (!parent) {
sco_conn_unlock(conn);
return;
}
bh_lock_sock(parent);
sk = sco_sock_alloc(sock_net(parent), NULL,
BTPROTO_SCO, GFP_ATOMIC);
if (!sk) {
bh_unlock_sock(parent);
sco_conn_unlock(conn);
return;
}
sco_sock_init(sk, parent);
bacpy(&sco_pi(sk)->src, &conn->hcon->src);
bacpy(&sco_pi(sk)->dst, &conn->hcon->dst);
hci_conn_hold(conn->hcon);
__sco_chan_add(conn, sk, parent);
if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags))
sk->sk_state = BT_CONNECT2;
else
sk->sk_state = BT_CONNECTED;
/* Wake up parent */
parent->sk_data_ready(parent, 1);
bh_unlock_sock(parent);
sco_conn_unlock(conn);
}
}
/* ----- SCO interface with lower layer (HCI) ----- */
int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags)
{
struct sock *sk;
int lm = 0;
BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
/* Find listening sockets */
read_lock(&sco_sk_list.lock);
sk_for_each(sk, &sco_sk_list.head) {
if (sk->sk_state != BT_LISTEN)
continue;
if (!bacmp(&sco_pi(sk)->src, &hdev->bdaddr) ||
!bacmp(&sco_pi(sk)->src, BDADDR_ANY)) {
lm |= HCI_LM_ACCEPT;
if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
*flags |= HCI_PROTO_DEFER;
break;
}
}
read_unlock(&sco_sk_list.lock);
return lm;
}
void sco_connect_cfm(struct hci_conn *hcon, __u8 status)
{
BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
if (!status) {
struct sco_conn *conn;
conn = sco_conn_add(hcon);
if (conn)
sco_conn_ready(conn);
} else
sco_conn_del(hcon, bt_to_errno(status));
}
void sco_disconn_cfm(struct hci_conn *hcon, __u8 reason)
{
BT_DBG("hcon %p reason %d", hcon, reason);
sco_conn_del(hcon, bt_to_errno(reason));
}
int sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb)
{
struct sco_conn *conn = hcon->sco_data;
if (!conn)
goto drop;
BT_DBG("conn %p len %d", conn, skb->len);
if (skb->len) {
sco_recv_frame(conn, skb);
return 0;
}
drop:
kfree_skb(skb);
return 0;
}
static int sco_debugfs_show(struct seq_file *f, void *p)
{
struct sock *sk;
read_lock(&sco_sk_list.lock);
sk_for_each(sk, &sco_sk_list.head) {
seq_printf(f, "%pMR %pMR %d\n", &sco_pi(sk)->src,
&sco_pi(sk)->dst, sk->sk_state);
}
read_unlock(&sco_sk_list.lock);
return 0;
}
static int sco_debugfs_open(struct inode *inode, struct file *file)
{
return single_open(file, sco_debugfs_show, inode->i_private);
}
static const struct file_operations sco_debugfs_fops = {
.open = sco_debugfs_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static struct dentry *sco_debugfs;
static const struct proto_ops sco_sock_ops = {
.family = PF_BLUETOOTH,
.owner = THIS_MODULE,
.release = sco_sock_release,
.bind = sco_sock_bind,
.connect = sco_sock_connect,
.listen = sco_sock_listen,
.accept = sco_sock_accept,
.getname = sco_sock_getname,
.sendmsg = sco_sock_sendmsg,
.recvmsg = sco_sock_recvmsg,
.poll = bt_sock_poll,
.ioctl = bt_sock_ioctl,
.mmap = sock_no_mmap,
.socketpair = sock_no_socketpair,
.shutdown = sco_sock_shutdown,
.setsockopt = sco_sock_setsockopt,
.getsockopt = sco_sock_getsockopt
};
static const struct net_proto_family sco_sock_family_ops = {
.family = PF_BLUETOOTH,
.owner = THIS_MODULE,
.create = sco_sock_create,
};
int __init sco_init(void)
{
int err;
err = proto_register(&sco_proto, 0);
if (err < 0)
return err;
err = bt_sock_register(BTPROTO_SCO, &sco_sock_family_ops);
if (err < 0) {
BT_ERR("SCO socket registration failed");
goto error;
}
err = bt_procfs_init(&init_net, "sco", &sco_sk_list, NULL);
if (err < 0) {
BT_ERR("Failed to create SCO proc file");
bt_sock_unregister(BTPROTO_SCO);
goto error;
}
BT_INFO("SCO socket layer initialized");
if (IS_ERR_OR_NULL(bt_debugfs))
return 0;
sco_debugfs = debugfs_create_file("sco", 0444, bt_debugfs,
NULL, &sco_debugfs_fops);
return 0;
error:
proto_unregister(&sco_proto);
return err;
}
void __exit sco_exit(void)
{
bt_procfs_cleanup(&init_net, "sco");
debugfs_remove(sco_debugfs);
bt_sock_unregister(BTPROTO_SCO);
proto_unregister(&sco_proto);
}
module_param(disable_esco, bool, 0644);
MODULE_PARM_DESC(disable_esco, "Disable eSCO connection creation");
| ./CrossVul/dataset_final_sorted/CWE-20/c/good_5845_11 |
crossvul-cpp_data_bad_5845_25 | /*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* PACKET - implements raw packet sockets.
*
* Authors: Ross Biro
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
* Alan Cox, <gw4pts@gw4pts.ampr.org>
*
* Fixes:
* Alan Cox : verify_area() now used correctly
* Alan Cox : new skbuff lists, look ma no backlogs!
* Alan Cox : tidied skbuff lists.
* Alan Cox : Now uses generic datagram routines I
* added. Also fixed the peek/read crash
* from all old Linux datagram code.
* Alan Cox : Uses the improved datagram code.
* Alan Cox : Added NULL's for socket options.
* Alan Cox : Re-commented the code.
* Alan Cox : Use new kernel side addressing
* Rob Janssen : Correct MTU usage.
* Dave Platt : Counter leaks caused by incorrect
* interrupt locking and some slightly
* dubious gcc output. Can you read
* compiler: it said _VOLATILE_
* Richard Kooijman : Timestamp fixes.
* Alan Cox : New buffers. Use sk->mac.raw.
* Alan Cox : sendmsg/recvmsg support.
* Alan Cox : Protocol setting support
* Alexey Kuznetsov : Untied from IPv4 stack.
* Cyrus Durgin : Fixed kerneld for kmod.
* Michal Ostrowski : Module initialization cleanup.
* Ulises Alonso : Frame number limit removal and
* packet_set_ring memory leak.
* Eric Biederman : Allow for > 8 byte hardware addresses.
* The convention is that longer addresses
* will simply extend the hardware address
* byte arrays at the end of sockaddr_ll
* and packet_mreq.
* Johann Baudy : Added TX RING.
* Chetan Loke : Implemented TPACKET_V3 block abstraction
* layer.
* Copyright (C) 2011, <lokec@ccs.neu.edu>
*
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
*/
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/capability.h>
#include <linux/fcntl.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/if_packet.h>
#include <linux/wireless.h>
#include <linux/kernel.h>
#include <linux/kmod.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <net/net_namespace.h>
#include <net/ip.h>
#include <net/protocol.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <linux/errno.h>
#include <linux/timer.h>
#include <asm/uaccess.h>
#include <asm/ioctls.h>
#include <asm/page.h>
#include <asm/cacheflush.h>
#include <asm/io.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/poll.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/mutex.h>
#include <linux/if_vlan.h>
#include <linux/virtio_net.h>
#include <linux/errqueue.h>
#include <linux/net_tstamp.h>
#include <linux/reciprocal_div.h>
#ifdef CONFIG_INET
#include <net/inet_common.h>
#endif
#include "internal.h"
/*
Assumptions:
- if device has no dev->hard_header routine, it adds and removes ll header
inside itself. In this case ll header is invisible outside of device,
but higher levels still should reserve dev->hard_header_len.
Some devices are enough clever to reallocate skb, when header
will not fit to reserved space (tunnel), another ones are silly
(PPP).
- packet socket receives packets with pulled ll header,
so that SOCK_RAW should push it back.
On receive:
-----------
Incoming, dev->hard_header!=NULL
mac_header -> ll header
data -> data
Outgoing, dev->hard_header!=NULL
mac_header -> ll header
data -> ll header
Incoming, dev->hard_header==NULL
mac_header -> UNKNOWN position. It is very likely, that it points to ll
header. PPP makes it, that is wrong, because introduce
assymetry between rx and tx paths.
data -> data
Outgoing, dev->hard_header==NULL
mac_header -> data. ll header is still not built!
data -> data
Resume
If dev->hard_header==NULL we are unlikely to restore sensible ll header.
On transmit:
------------
dev->hard_header != NULL
mac_header -> ll header
data -> ll header
dev->hard_header == NULL (ll header is added by device, we cannot control it)
mac_header -> data
data -> data
We should set nh.raw on output to correct posistion,
packet classifier depends on it.
*/
/* Private packet socket structures. */
/* identical to struct packet_mreq except it has
* a longer address field.
*/
struct packet_mreq_max {
int mr_ifindex;
unsigned short mr_type;
unsigned short mr_alen;
unsigned char mr_address[MAX_ADDR_LEN];
};
union tpacket_uhdr {
struct tpacket_hdr *h1;
struct tpacket2_hdr *h2;
struct tpacket3_hdr *h3;
void *raw;
};
static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
int closing, int tx_ring);
#define V3_ALIGNMENT (8)
#define BLK_HDR_LEN (ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT))
#define BLK_PLUS_PRIV(sz_of_priv) \
(BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT))
#define PGV_FROM_VMALLOC 1
#define BLOCK_STATUS(x) ((x)->hdr.bh1.block_status)
#define BLOCK_NUM_PKTS(x) ((x)->hdr.bh1.num_pkts)
#define BLOCK_O2FP(x) ((x)->hdr.bh1.offset_to_first_pkt)
#define BLOCK_LEN(x) ((x)->hdr.bh1.blk_len)
#define BLOCK_SNUM(x) ((x)->hdr.bh1.seq_num)
#define BLOCK_O2PRIV(x) ((x)->offset_to_priv)
#define BLOCK_PRIV(x) ((void *)((char *)(x) + BLOCK_O2PRIV(x)))
struct packet_sock;
static int tpacket_snd(struct packet_sock *po, struct msghdr *msg);
static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev);
static void *packet_previous_frame(struct packet_sock *po,
struct packet_ring_buffer *rb,
int status);
static void packet_increment_head(struct packet_ring_buffer *buff);
static int prb_curr_blk_in_use(struct tpacket_kbdq_core *,
struct tpacket_block_desc *);
static void *prb_dispatch_next_block(struct tpacket_kbdq_core *,
struct packet_sock *);
static void prb_retire_current_block(struct tpacket_kbdq_core *,
struct packet_sock *, unsigned int status);
static int prb_queue_frozen(struct tpacket_kbdq_core *);
static void prb_open_block(struct tpacket_kbdq_core *,
struct tpacket_block_desc *);
static void prb_retire_rx_blk_timer_expired(unsigned long);
static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *);
static void prb_init_blk_timer(struct packet_sock *,
struct tpacket_kbdq_core *,
void (*func) (unsigned long));
static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *);
static void prb_clear_rxhash(struct tpacket_kbdq_core *,
struct tpacket3_hdr *);
static void prb_fill_vlan_info(struct tpacket_kbdq_core *,
struct tpacket3_hdr *);
static void packet_flush_mclist(struct sock *sk);
struct packet_skb_cb {
unsigned int origlen;
union {
struct sockaddr_pkt pkt;
struct sockaddr_ll ll;
} sa;
};
#define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb))
#define GET_PBDQC_FROM_RB(x) ((struct tpacket_kbdq_core *)(&(x)->prb_bdqc))
#define GET_PBLOCK_DESC(x, bid) \
((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer))
#define GET_CURR_PBLOCK_DESC_FROM_CORE(x) \
((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer))
#define GET_NEXT_PRB_BLK_NUM(x) \
(((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \
((x)->kactive_blk_num+1) : 0)
static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
static void __fanout_link(struct sock *sk, struct packet_sock *po);
/* register_prot_hook must be invoked with the po->bind_lock held,
* or from a context in which asynchronous accesses to the packet
* socket is not possible (packet_create()).
*/
static void register_prot_hook(struct sock *sk)
{
struct packet_sock *po = pkt_sk(sk);
if (!po->running) {
if (po->fanout)
__fanout_link(sk, po);
else
dev_add_pack(&po->prot_hook);
sock_hold(sk);
po->running = 1;
}
}
/* {,__}unregister_prot_hook() must be invoked with the po->bind_lock
* held. If the sync parameter is true, we will temporarily drop
* the po->bind_lock and do a synchronize_net to make sure no
* asynchronous packet processing paths still refer to the elements
* of po->prot_hook. If the sync parameter is false, it is the
* callers responsibility to take care of this.
*/
static void __unregister_prot_hook(struct sock *sk, bool sync)
{
struct packet_sock *po = pkt_sk(sk);
po->running = 0;
if (po->fanout)
__fanout_unlink(sk, po);
else
__dev_remove_pack(&po->prot_hook);
__sock_put(sk);
if (sync) {
spin_unlock(&po->bind_lock);
synchronize_net();
spin_lock(&po->bind_lock);
}
}
static void unregister_prot_hook(struct sock *sk, bool sync)
{
struct packet_sock *po = pkt_sk(sk);
if (po->running)
__unregister_prot_hook(sk, sync);
}
static inline __pure struct page *pgv_to_page(void *addr)
{
if (is_vmalloc_addr(addr))
return vmalloc_to_page(addr);
return virt_to_page(addr);
}
static void __packet_set_status(struct packet_sock *po, void *frame, int status)
{
union tpacket_uhdr h;
h.raw = frame;
switch (po->tp_version) {
case TPACKET_V1:
h.h1->tp_status = status;
flush_dcache_page(pgv_to_page(&h.h1->tp_status));
break;
case TPACKET_V2:
h.h2->tp_status = status;
flush_dcache_page(pgv_to_page(&h.h2->tp_status));
break;
case TPACKET_V3:
default:
WARN(1, "TPACKET version not supported.\n");
BUG();
}
smp_wmb();
}
static int __packet_get_status(struct packet_sock *po, void *frame)
{
union tpacket_uhdr h;
smp_rmb();
h.raw = frame;
switch (po->tp_version) {
case TPACKET_V1:
flush_dcache_page(pgv_to_page(&h.h1->tp_status));
return h.h1->tp_status;
case TPACKET_V2:
flush_dcache_page(pgv_to_page(&h.h2->tp_status));
return h.h2->tp_status;
case TPACKET_V3:
default:
WARN(1, "TPACKET version not supported.\n");
BUG();
return 0;
}
}
static __u32 tpacket_get_timestamp(struct sk_buff *skb, struct timespec *ts,
unsigned int flags)
{
struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
if (shhwtstamps) {
if ((flags & SOF_TIMESTAMPING_SYS_HARDWARE) &&
ktime_to_timespec_cond(shhwtstamps->syststamp, ts))
return TP_STATUS_TS_SYS_HARDWARE;
if ((flags & SOF_TIMESTAMPING_RAW_HARDWARE) &&
ktime_to_timespec_cond(shhwtstamps->hwtstamp, ts))
return TP_STATUS_TS_RAW_HARDWARE;
}
if (ktime_to_timespec_cond(skb->tstamp, ts))
return TP_STATUS_TS_SOFTWARE;
return 0;
}
static __u32 __packet_set_timestamp(struct packet_sock *po, void *frame,
struct sk_buff *skb)
{
union tpacket_uhdr h;
struct timespec ts;
__u32 ts_status;
if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
return 0;
h.raw = frame;
switch (po->tp_version) {
case TPACKET_V1:
h.h1->tp_sec = ts.tv_sec;
h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
break;
case TPACKET_V2:
h.h2->tp_sec = ts.tv_sec;
h.h2->tp_nsec = ts.tv_nsec;
break;
case TPACKET_V3:
default:
WARN(1, "TPACKET version not supported.\n");
BUG();
}
/* one flush is safe, as both fields always lie on the same cacheline */
flush_dcache_page(pgv_to_page(&h.h1->tp_sec));
smp_wmb();
return ts_status;
}
static void *packet_lookup_frame(struct packet_sock *po,
struct packet_ring_buffer *rb,
unsigned int position,
int status)
{
unsigned int pg_vec_pos, frame_offset;
union tpacket_uhdr h;
pg_vec_pos = position / rb->frames_per_block;
frame_offset = position % rb->frames_per_block;
h.raw = rb->pg_vec[pg_vec_pos].buffer +
(frame_offset * rb->frame_size);
if (status != __packet_get_status(po, h.raw))
return NULL;
return h.raw;
}
static void *packet_current_frame(struct packet_sock *po,
struct packet_ring_buffer *rb,
int status)
{
return packet_lookup_frame(po, rb, rb->head, status);
}
static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc)
{
del_timer_sync(&pkc->retire_blk_timer);
}
static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
int tx_ring,
struct sk_buff_head *rb_queue)
{
struct tpacket_kbdq_core *pkc;
pkc = tx_ring ? &po->tx_ring.prb_bdqc : &po->rx_ring.prb_bdqc;
spin_lock(&rb_queue->lock);
pkc->delete_blk_timer = 1;
spin_unlock(&rb_queue->lock);
prb_del_retire_blk_timer(pkc);
}
static void prb_init_blk_timer(struct packet_sock *po,
struct tpacket_kbdq_core *pkc,
void (*func) (unsigned long))
{
init_timer(&pkc->retire_blk_timer);
pkc->retire_blk_timer.data = (long)po;
pkc->retire_blk_timer.function = func;
pkc->retire_blk_timer.expires = jiffies;
}
static void prb_setup_retire_blk_timer(struct packet_sock *po, int tx_ring)
{
struct tpacket_kbdq_core *pkc;
if (tx_ring)
BUG();
pkc = tx_ring ? &po->tx_ring.prb_bdqc : &po->rx_ring.prb_bdqc;
prb_init_blk_timer(po, pkc, prb_retire_rx_blk_timer_expired);
}
static int prb_calc_retire_blk_tmo(struct packet_sock *po,
int blk_size_in_bytes)
{
struct net_device *dev;
unsigned int mbits = 0, msec = 0, div = 0, tmo = 0;
struct ethtool_cmd ecmd;
int err;
u32 speed;
rtnl_lock();
dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex);
if (unlikely(!dev)) {
rtnl_unlock();
return DEFAULT_PRB_RETIRE_TOV;
}
err = __ethtool_get_settings(dev, &ecmd);
speed = ethtool_cmd_speed(&ecmd);
rtnl_unlock();
if (!err) {
/*
* If the link speed is so slow you don't really
* need to worry about perf anyways
*/
if (speed < SPEED_1000 || speed == SPEED_UNKNOWN) {
return DEFAULT_PRB_RETIRE_TOV;
} else {
msec = 1;
div = speed / 1000;
}
}
mbits = (blk_size_in_bytes * 8) / (1024 * 1024);
if (div)
mbits /= div;
tmo = mbits * msec;
if (div)
return tmo+1;
return tmo;
}
static void prb_init_ft_ops(struct tpacket_kbdq_core *p1,
union tpacket_req_u *req_u)
{
p1->feature_req_word = req_u->req3.tp_feature_req_word;
}
static void init_prb_bdqc(struct packet_sock *po,
struct packet_ring_buffer *rb,
struct pgv *pg_vec,
union tpacket_req_u *req_u, int tx_ring)
{
struct tpacket_kbdq_core *p1 = &rb->prb_bdqc;
struct tpacket_block_desc *pbd;
memset(p1, 0x0, sizeof(*p1));
p1->knxt_seq_num = 1;
p1->pkbdq = pg_vec;
pbd = (struct tpacket_block_desc *)pg_vec[0].buffer;
p1->pkblk_start = pg_vec[0].buffer;
p1->kblk_size = req_u->req3.tp_block_size;
p1->knum_blocks = req_u->req3.tp_block_nr;
p1->hdrlen = po->tp_hdrlen;
p1->version = po->tp_version;
p1->last_kactive_blk_num = 0;
po->stats.stats3.tp_freeze_q_cnt = 0;
if (req_u->req3.tp_retire_blk_tov)
p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov;
else
p1->retire_blk_tov = prb_calc_retire_blk_tmo(po,
req_u->req3.tp_block_size);
p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
prb_init_ft_ops(p1, req_u);
prb_setup_retire_blk_timer(po, tx_ring);
prb_open_block(p1, pbd);
}
/* Do NOT update the last_blk_num first.
* Assumes sk_buff_head lock is held.
*/
static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc)
{
mod_timer(&pkc->retire_blk_timer,
jiffies + pkc->tov_in_jiffies);
pkc->last_kactive_blk_num = pkc->kactive_blk_num;
}
/*
* Timer logic:
* 1) We refresh the timer only when we open a block.
* By doing this we don't waste cycles refreshing the timer
* on packet-by-packet basis.
*
* With a 1MB block-size, on a 1Gbps line, it will take
* i) ~8 ms to fill a block + ii) memcpy etc.
* In this cut we are not accounting for the memcpy time.
*
* So, if the user sets the 'tmo' to 10ms then the timer
* will never fire while the block is still getting filled
* (which is what we want). However, the user could choose
* to close a block early and that's fine.
*
* But when the timer does fire, we check whether or not to refresh it.
* Since the tmo granularity is in msecs, it is not too expensive
* to refresh the timer, lets say every '8' msecs.
* Either the user can set the 'tmo' or we can derive it based on
* a) line-speed and b) block-size.
* prb_calc_retire_blk_tmo() calculates the tmo.
*
*/
static void prb_retire_rx_blk_timer_expired(unsigned long data)
{
struct packet_sock *po = (struct packet_sock *)data;
struct tpacket_kbdq_core *pkc = &po->rx_ring.prb_bdqc;
unsigned int frozen;
struct tpacket_block_desc *pbd;
spin_lock(&po->sk.sk_receive_queue.lock);
frozen = prb_queue_frozen(pkc);
pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
if (unlikely(pkc->delete_blk_timer))
goto out;
/* We only need to plug the race when the block is partially filled.
* tpacket_rcv:
* lock(); increment BLOCK_NUM_PKTS; unlock()
* copy_bits() is in progress ...
* timer fires on other cpu:
* we can't retire the current block because copy_bits
* is in progress.
*
*/
if (BLOCK_NUM_PKTS(pbd)) {
while (atomic_read(&pkc->blk_fill_in_prog)) {
/* Waiting for skb_copy_bits to finish... */
cpu_relax();
}
}
if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) {
if (!frozen) {
prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO);
if (!prb_dispatch_next_block(pkc, po))
goto refresh_timer;
else
goto out;
} else {
/* Case 1. Queue was frozen because user-space was
* lagging behind.
*/
if (prb_curr_blk_in_use(pkc, pbd)) {
/*
* Ok, user-space is still behind.
* So just refresh the timer.
*/
goto refresh_timer;
} else {
/* Case 2. queue was frozen,user-space caught up,
* now the link went idle && the timer fired.
* We don't have a block to close.So we open this
* block and restart the timer.
* opening a block thaws the queue,restarts timer
* Thawing/timer-refresh is a side effect.
*/
prb_open_block(pkc, pbd);
goto out;
}
}
}
refresh_timer:
_prb_refresh_rx_retire_blk_timer(pkc);
out:
spin_unlock(&po->sk.sk_receive_queue.lock);
}
static void prb_flush_block(struct tpacket_kbdq_core *pkc1,
struct tpacket_block_desc *pbd1, __u32 status)
{
/* Flush everything minus the block header */
#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
u8 *start, *end;
start = (u8 *)pbd1;
/* Skip the block header(we know header WILL fit in 4K) */
start += PAGE_SIZE;
end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end);
for (; start < end; start += PAGE_SIZE)
flush_dcache_page(pgv_to_page(start));
smp_wmb();
#endif
/* Now update the block status. */
BLOCK_STATUS(pbd1) = status;
/* Flush the block header */
#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
start = (u8 *)pbd1;
flush_dcache_page(pgv_to_page(start));
smp_wmb();
#endif
}
/*
* Side effect:
*
* 1) flush the block
* 2) Increment active_blk_num
*
* Note:We DONT refresh the timer on purpose.
* Because almost always the next block will be opened.
*/
static void prb_close_block(struct tpacket_kbdq_core *pkc1,
struct tpacket_block_desc *pbd1,
struct packet_sock *po, unsigned int stat)
{
__u32 status = TP_STATUS_USER | stat;
struct tpacket3_hdr *last_pkt;
struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
if (po->stats.stats3.tp_drops)
status |= TP_STATUS_LOSING;
last_pkt = (struct tpacket3_hdr *)pkc1->prev;
last_pkt->tp_next_offset = 0;
/* Get the ts of the last pkt */
if (BLOCK_NUM_PKTS(pbd1)) {
h1->ts_last_pkt.ts_sec = last_pkt->tp_sec;
h1->ts_last_pkt.ts_nsec = last_pkt->tp_nsec;
} else {
/* Ok, we tmo'd - so get the current time */
struct timespec ts;
getnstimeofday(&ts);
h1->ts_last_pkt.ts_sec = ts.tv_sec;
h1->ts_last_pkt.ts_nsec = ts.tv_nsec;
}
smp_wmb();
/* Flush the block */
prb_flush_block(pkc1, pbd1, status);
pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1);
}
static void prb_thaw_queue(struct tpacket_kbdq_core *pkc)
{
pkc->reset_pending_on_curr_blk = 0;
}
/*
* Side effect of opening a block:
*
* 1) prb_queue is thawed.
* 2) retire_blk_timer is refreshed.
*
*/
static void prb_open_block(struct tpacket_kbdq_core *pkc1,
struct tpacket_block_desc *pbd1)
{
struct timespec ts;
struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
smp_rmb();
/* We could have just memset this but we will lose the
* flexibility of making the priv area sticky
*/
BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
BLOCK_NUM_PKTS(pbd1) = 0;
BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
getnstimeofday(&ts);
h1->ts_first_pkt.ts_sec = ts.tv_sec;
h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
pkc1->pkblk_start = (char *)pbd1;
pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
pbd1->version = pkc1->version;
pkc1->prev = pkc1->nxt_offset;
pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
prb_thaw_queue(pkc1);
_prb_refresh_rx_retire_blk_timer(pkc1);
smp_wmb();
}
/*
* Queue freeze logic:
* 1) Assume tp_block_nr = 8 blocks.
* 2) At time 't0', user opens Rx ring.
* 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7
* 4) user-space is either sleeping or processing block '0'.
* 5) tpacket_rcv is currently filling block '7', since there is no space left,
* it will close block-7,loop around and try to fill block '0'.
* call-flow:
* __packet_lookup_frame_in_block
* prb_retire_current_block()
* prb_dispatch_next_block()
* |->(BLOCK_STATUS == USER) evaluates to true
* 5.1) Since block-0 is currently in-use, we just freeze the queue.
* 6) Now there are two cases:
* 6.1) Link goes idle right after the queue is frozen.
* But remember, the last open_block() refreshed the timer.
* When this timer expires,it will refresh itself so that we can
* re-open block-0 in near future.
* 6.2) Link is busy and keeps on receiving packets. This is a simple
* case and __packet_lookup_frame_in_block will check if block-0
* is free and can now be re-used.
*/
static void prb_freeze_queue(struct tpacket_kbdq_core *pkc,
struct packet_sock *po)
{
pkc->reset_pending_on_curr_blk = 1;
po->stats.stats3.tp_freeze_q_cnt++;
}
#define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT))
/*
* If the next block is free then we will dispatch it
* and return a good offset.
* Else, we will freeze the queue.
* So, caller must check the return value.
*/
static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc,
struct packet_sock *po)
{
struct tpacket_block_desc *pbd;
smp_rmb();
/* 1. Get current block num */
pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
/* 2. If this block is currently in_use then freeze the queue */
if (TP_STATUS_USER & BLOCK_STATUS(pbd)) {
prb_freeze_queue(pkc, po);
return NULL;
}
/*
* 3.
* open this block and return the offset where the first packet
* needs to get stored.
*/
prb_open_block(pkc, pbd);
return (void *)pkc->nxt_offset;
}
static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
struct packet_sock *po, unsigned int status)
{
struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
/* retire/close the current block */
if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) {
/*
* Plug the case where copy_bits() is in progress on
* cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't
* have space to copy the pkt in the current block and
* called prb_retire_current_block()
*
* We don't need to worry about the TMO case because
* the timer-handler already handled this case.
*/
if (!(status & TP_STATUS_BLK_TMO)) {
while (atomic_read(&pkc->blk_fill_in_prog)) {
/* Waiting for skb_copy_bits to finish... */
cpu_relax();
}
}
prb_close_block(pkc, pbd, po, status);
return;
}
}
static int prb_curr_blk_in_use(struct tpacket_kbdq_core *pkc,
struct tpacket_block_desc *pbd)
{
return TP_STATUS_USER & BLOCK_STATUS(pbd);
}
static int prb_queue_frozen(struct tpacket_kbdq_core *pkc)
{
return pkc->reset_pending_on_curr_blk;
}
static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
{
struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
atomic_dec(&pkc->blk_fill_in_prog);
}
static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc,
struct tpacket3_hdr *ppd)
{
ppd->hv1.tp_rxhash = skb_get_rxhash(pkc->skb);
}
static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc,
struct tpacket3_hdr *ppd)
{
ppd->hv1.tp_rxhash = 0;
}
static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
struct tpacket3_hdr *ppd)
{
if (vlan_tx_tag_present(pkc->skb)) {
ppd->hv1.tp_vlan_tci = vlan_tx_tag_get(pkc->skb);
ppd->tp_status = TP_STATUS_VLAN_VALID;
} else {
ppd->hv1.tp_vlan_tci = 0;
ppd->tp_status = TP_STATUS_AVAILABLE;
}
}
static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc,
struct tpacket3_hdr *ppd)
{
prb_fill_vlan_info(pkc, ppd);
if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH)
prb_fill_rxhash(pkc, ppd);
else
prb_clear_rxhash(pkc, ppd);
}
static void prb_fill_curr_block(char *curr,
struct tpacket_kbdq_core *pkc,
struct tpacket_block_desc *pbd,
unsigned int len)
{
struct tpacket3_hdr *ppd;
ppd = (struct tpacket3_hdr *)curr;
ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len);
pkc->prev = curr;
pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len);
BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len);
BLOCK_NUM_PKTS(pbd) += 1;
atomic_inc(&pkc->blk_fill_in_prog);
prb_run_all_ft_ops(pkc, ppd);
}
/* Assumes caller has the sk->rx_queue.lock */
static void *__packet_lookup_frame_in_block(struct packet_sock *po,
struct sk_buff *skb,
int status,
unsigned int len
)
{
struct tpacket_kbdq_core *pkc;
struct tpacket_block_desc *pbd;
char *curr, *end;
pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
/* Queue is frozen when user space is lagging behind */
if (prb_queue_frozen(pkc)) {
/*
* Check if that last block which caused the queue to freeze,
* is still in_use by user-space.
*/
if (prb_curr_blk_in_use(pkc, pbd)) {
/* Can't record this packet */
return NULL;
} else {
/*
* Ok, the block was released by user-space.
* Now let's open that block.
* opening a block also thaws the queue.
* Thawing is a side effect.
*/
prb_open_block(pkc, pbd);
}
}
smp_mb();
curr = pkc->nxt_offset;
pkc->skb = skb;
end = (char *)pbd + pkc->kblk_size;
/* first try the current block */
if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) {
prb_fill_curr_block(curr, pkc, pbd, len);
return (void *)curr;
}
/* Ok, close the current block */
prb_retire_current_block(pkc, po, 0);
/* Now, try to dispatch the next block */
curr = (char *)prb_dispatch_next_block(pkc, po);
if (curr) {
pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
prb_fill_curr_block(curr, pkc, pbd, len);
return (void *)curr;
}
/*
* No free blocks are available.user_space hasn't caught up yet.
* Queue was just frozen and now this packet will get dropped.
*/
return NULL;
}
static void *packet_current_rx_frame(struct packet_sock *po,
struct sk_buff *skb,
int status, unsigned int len)
{
char *curr = NULL;
switch (po->tp_version) {
case TPACKET_V1:
case TPACKET_V2:
curr = packet_lookup_frame(po, &po->rx_ring,
po->rx_ring.head, status);
return curr;
case TPACKET_V3:
return __packet_lookup_frame_in_block(po, skb, status, len);
default:
WARN(1, "TPACKET version not supported\n");
BUG();
return NULL;
}
}
static void *prb_lookup_block(struct packet_sock *po,
struct packet_ring_buffer *rb,
unsigned int idx,
int status)
{
struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, idx);
if (status != BLOCK_STATUS(pbd))
return NULL;
return pbd;
}
static int prb_previous_blk_num(struct packet_ring_buffer *rb)
{
unsigned int prev;
if (rb->prb_bdqc.kactive_blk_num)
prev = rb->prb_bdqc.kactive_blk_num-1;
else
prev = rb->prb_bdqc.knum_blocks-1;
return prev;
}
/* Assumes caller has held the rx_queue.lock */
static void *__prb_previous_block(struct packet_sock *po,
struct packet_ring_buffer *rb,
int status)
{
unsigned int previous = prb_previous_blk_num(rb);
return prb_lookup_block(po, rb, previous, status);
}
static void *packet_previous_rx_frame(struct packet_sock *po,
struct packet_ring_buffer *rb,
int status)
{
if (po->tp_version <= TPACKET_V2)
return packet_previous_frame(po, rb, status);
return __prb_previous_block(po, rb, status);
}
static void packet_increment_rx_head(struct packet_sock *po,
struct packet_ring_buffer *rb)
{
switch (po->tp_version) {
case TPACKET_V1:
case TPACKET_V2:
return packet_increment_head(rb);
case TPACKET_V3:
default:
WARN(1, "TPACKET version not supported.\n");
BUG();
return;
}
}
static void *packet_previous_frame(struct packet_sock *po,
struct packet_ring_buffer *rb,
int status)
{
unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max;
return packet_lookup_frame(po, rb, previous, status);
}
static void packet_increment_head(struct packet_ring_buffer *buff)
{
buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
}
static bool packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb)
{
struct sock *sk = &po->sk;
bool has_room;
if (po->prot_hook.func != tpacket_rcv)
return (atomic_read(&sk->sk_rmem_alloc) + skb->truesize)
<= sk->sk_rcvbuf;
spin_lock(&sk->sk_receive_queue.lock);
if (po->tp_version == TPACKET_V3)
has_room = prb_lookup_block(po, &po->rx_ring,
po->rx_ring.prb_bdqc.kactive_blk_num,
TP_STATUS_KERNEL);
else
has_room = packet_lookup_frame(po, &po->rx_ring,
po->rx_ring.head,
TP_STATUS_KERNEL);
spin_unlock(&sk->sk_receive_queue.lock);
return has_room;
}
static void packet_sock_destruct(struct sock *sk)
{
skb_queue_purge(&sk->sk_error_queue);
WARN_ON(atomic_read(&sk->sk_rmem_alloc));
WARN_ON(atomic_read(&sk->sk_wmem_alloc));
if (!sock_flag(sk, SOCK_DEAD)) {
pr_err("Attempt to release alive packet socket: %p\n", sk);
return;
}
sk_refcnt_debug_dec(sk);
}
static int fanout_rr_next(struct packet_fanout *f, unsigned int num)
{
int x = atomic_read(&f->rr_cur) + 1;
if (x >= num)
x = 0;
return x;
}
static unsigned int fanout_demux_hash(struct packet_fanout *f,
struct sk_buff *skb,
unsigned int num)
{
return reciprocal_divide(skb->rxhash, num);
}
static unsigned int fanout_demux_lb(struct packet_fanout *f,
struct sk_buff *skb,
unsigned int num)
{
int cur, old;
cur = atomic_read(&f->rr_cur);
while ((old = atomic_cmpxchg(&f->rr_cur, cur,
fanout_rr_next(f, num))) != cur)
cur = old;
return cur;
}
static unsigned int fanout_demux_cpu(struct packet_fanout *f,
struct sk_buff *skb,
unsigned int num)
{
return smp_processor_id() % num;
}
static unsigned int fanout_demux_rnd(struct packet_fanout *f,
struct sk_buff *skb,
unsigned int num)
{
return reciprocal_divide(prandom_u32(), num);
}
static unsigned int fanout_demux_rollover(struct packet_fanout *f,
struct sk_buff *skb,
unsigned int idx, unsigned int skip,
unsigned int num)
{
unsigned int i, j;
i = j = min_t(int, f->next[idx], num - 1);
do {
if (i != skip && packet_rcv_has_room(pkt_sk(f->arr[i]), skb)) {
if (i != j)
f->next[idx] = i;
return i;
}
if (++i == num)
i = 0;
} while (i != j);
return idx;
}
static bool fanout_has_flag(struct packet_fanout *f, u16 flag)
{
return f->flags & (flag >> 8);
}
static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev)
{
struct packet_fanout *f = pt->af_packet_priv;
unsigned int num = f->num_members;
struct packet_sock *po;
unsigned int idx;
if (!net_eq(dev_net(dev), read_pnet(&f->net)) ||
!num) {
kfree_skb(skb);
return 0;
}
switch (f->type) {
case PACKET_FANOUT_HASH:
default:
if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) {
skb = ip_check_defrag(skb, IP_DEFRAG_AF_PACKET);
if (!skb)
return 0;
}
skb_get_rxhash(skb);
idx = fanout_demux_hash(f, skb, num);
break;
case PACKET_FANOUT_LB:
idx = fanout_demux_lb(f, skb, num);
break;
case PACKET_FANOUT_CPU:
idx = fanout_demux_cpu(f, skb, num);
break;
case PACKET_FANOUT_RND:
idx = fanout_demux_rnd(f, skb, num);
break;
case PACKET_FANOUT_ROLLOVER:
idx = fanout_demux_rollover(f, skb, 0, (unsigned int) -1, num);
break;
}
po = pkt_sk(f->arr[idx]);
if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER) &&
unlikely(!packet_rcv_has_room(po, skb))) {
idx = fanout_demux_rollover(f, skb, idx, idx, num);
po = pkt_sk(f->arr[idx]);
}
return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
}
DEFINE_MUTEX(fanout_mutex);
EXPORT_SYMBOL_GPL(fanout_mutex);
static LIST_HEAD(fanout_list);
static void __fanout_link(struct sock *sk, struct packet_sock *po)
{
struct packet_fanout *f = po->fanout;
spin_lock(&f->lock);
f->arr[f->num_members] = sk;
smp_wmb();
f->num_members++;
spin_unlock(&f->lock);
}
static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
{
struct packet_fanout *f = po->fanout;
int i;
spin_lock(&f->lock);
for (i = 0; i < f->num_members; i++) {
if (f->arr[i] == sk)
break;
}
BUG_ON(i >= f->num_members);
f->arr[i] = f->arr[f->num_members - 1];
f->num_members--;
spin_unlock(&f->lock);
}
static bool match_fanout_group(struct packet_type *ptype, struct sock * sk)
{
if (ptype->af_packet_priv == (void*)((struct packet_sock *)sk)->fanout)
return true;
return false;
}
static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
{
struct packet_sock *po = pkt_sk(sk);
struct packet_fanout *f, *match;
u8 type = type_flags & 0xff;
u8 flags = type_flags >> 8;
int err;
switch (type) {
case PACKET_FANOUT_ROLLOVER:
if (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)
return -EINVAL;
case PACKET_FANOUT_HASH:
case PACKET_FANOUT_LB:
case PACKET_FANOUT_CPU:
case PACKET_FANOUT_RND:
break;
default:
return -EINVAL;
}
if (!po->running)
return -EINVAL;
if (po->fanout)
return -EALREADY;
mutex_lock(&fanout_mutex);
match = NULL;
list_for_each_entry(f, &fanout_list, list) {
if (f->id == id &&
read_pnet(&f->net) == sock_net(sk)) {
match = f;
break;
}
}
err = -EINVAL;
if (match && match->flags != flags)
goto out;
if (!match) {
err = -ENOMEM;
match = kzalloc(sizeof(*match), GFP_KERNEL);
if (!match)
goto out;
write_pnet(&match->net, sock_net(sk));
match->id = id;
match->type = type;
match->flags = flags;
atomic_set(&match->rr_cur, 0);
INIT_LIST_HEAD(&match->list);
spin_lock_init(&match->lock);
atomic_set(&match->sk_ref, 0);
match->prot_hook.type = po->prot_hook.type;
match->prot_hook.dev = po->prot_hook.dev;
match->prot_hook.func = packet_rcv_fanout;
match->prot_hook.af_packet_priv = match;
match->prot_hook.id_match = match_fanout_group;
dev_add_pack(&match->prot_hook);
list_add(&match->list, &fanout_list);
}
err = -EINVAL;
if (match->type == type &&
match->prot_hook.type == po->prot_hook.type &&
match->prot_hook.dev == po->prot_hook.dev) {
err = -ENOSPC;
if (atomic_read(&match->sk_ref) < PACKET_FANOUT_MAX) {
__dev_remove_pack(&po->prot_hook);
po->fanout = match;
atomic_inc(&match->sk_ref);
__fanout_link(sk, po);
err = 0;
}
}
out:
mutex_unlock(&fanout_mutex);
return err;
}
static void fanout_release(struct sock *sk)
{
struct packet_sock *po = pkt_sk(sk);
struct packet_fanout *f;
f = po->fanout;
if (!f)
return;
mutex_lock(&fanout_mutex);
po->fanout = NULL;
if (atomic_dec_and_test(&f->sk_ref)) {
list_del(&f->list);
dev_remove_pack(&f->prot_hook);
kfree(f);
}
mutex_unlock(&fanout_mutex);
}
static const struct proto_ops packet_ops;
static const struct proto_ops packet_ops_spkt;
static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev)
{
struct sock *sk;
struct sockaddr_pkt *spkt;
/*
* When we registered the protocol we saved the socket in the data
* field for just this event.
*/
sk = pt->af_packet_priv;
/*
* Yank back the headers [hope the device set this
* right or kerboom...]
*
* Incoming packets have ll header pulled,
* push it back.
*
* For outgoing ones skb->data == skb_mac_header(skb)
* so that this procedure is noop.
*/
if (skb->pkt_type == PACKET_LOOPBACK)
goto out;
if (!net_eq(dev_net(dev), sock_net(sk)))
goto out;
skb = skb_share_check(skb, GFP_ATOMIC);
if (skb == NULL)
goto oom;
/* drop any routing info */
skb_dst_drop(skb);
/* drop conntrack reference */
nf_reset(skb);
spkt = &PACKET_SKB_CB(skb)->sa.pkt;
skb_push(skb, skb->data - skb_mac_header(skb));
/*
* The SOCK_PACKET socket receives _all_ frames.
*/
spkt->spkt_family = dev->type;
strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device));
spkt->spkt_protocol = skb->protocol;
/*
* Charge the memory to the socket. This is done specifically
* to prevent sockets using all the memory up.
*/
if (sock_queue_rcv_skb(sk, skb) == 0)
return 0;
out:
kfree_skb(skb);
oom:
return 0;
}
/*
* Output a raw packet to a device layer. This bypasses all the other
* protocol layers and you must therefore supply it with a complete frame
*/
static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len)
{
struct sock *sk = sock->sk;
struct sockaddr_pkt *saddr = (struct sockaddr_pkt *)msg->msg_name;
struct sk_buff *skb = NULL;
struct net_device *dev;
__be16 proto = 0;
int err;
int extra_len = 0;
/*
* Get and verify the address.
*/
if (saddr) {
if (msg->msg_namelen < sizeof(struct sockaddr))
return -EINVAL;
if (msg->msg_namelen == sizeof(struct sockaddr_pkt))
proto = saddr->spkt_protocol;
} else
return -ENOTCONN; /* SOCK_PACKET must be sent giving an address */
/*
* Find the device first to size check it
*/
saddr->spkt_device[sizeof(saddr->spkt_device) - 1] = 0;
retry:
rcu_read_lock();
dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
err = -ENODEV;
if (dev == NULL)
goto out_unlock;
err = -ENETDOWN;
if (!(dev->flags & IFF_UP))
goto out_unlock;
/*
* You may not queue a frame bigger than the mtu. This is the lowest level
* raw protocol and you must do your own fragmentation at this level.
*/
if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
if (!netif_supports_nofcs(dev)) {
err = -EPROTONOSUPPORT;
goto out_unlock;
}
extra_len = 4; /* We're doing our own CRC */
}
err = -EMSGSIZE;
if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len)
goto out_unlock;
if (!skb) {
size_t reserved = LL_RESERVED_SPACE(dev);
int tlen = dev->needed_tailroom;
unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
rcu_read_unlock();
skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL);
if (skb == NULL)
return -ENOBUFS;
/* FIXME: Save some space for broken drivers that write a hard
* header at transmission time by themselves. PPP is the notable
* one here. This should really be fixed at the driver level.
*/
skb_reserve(skb, reserved);
skb_reset_network_header(skb);
/* Try to align data part correctly */
if (hhlen) {
skb->data -= hhlen;
skb->tail -= hhlen;
if (len < hhlen)
skb_reset_network_header(skb);
}
err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
if (err)
goto out_free;
goto retry;
}
if (len > (dev->mtu + dev->hard_header_len + extra_len)) {
/* Earlier code assumed this would be a VLAN pkt,
* double-check this now that we have the actual
* packet in hand.
*/
struct ethhdr *ehdr;
skb_reset_mac_header(skb);
ehdr = eth_hdr(skb);
if (ehdr->h_proto != htons(ETH_P_8021Q)) {
err = -EMSGSIZE;
goto out_unlock;
}
}
skb->protocol = proto;
skb->dev = dev;
skb->priority = sk->sk_priority;
skb->mark = sk->sk_mark;
sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
if (unlikely(extra_len == 4))
skb->no_fcs = 1;
skb_probe_transport_header(skb, 0);
dev_queue_xmit(skb);
rcu_read_unlock();
return len;
out_unlock:
rcu_read_unlock();
out_free:
kfree_skb(skb);
return err;
}
static unsigned int run_filter(const struct sk_buff *skb,
const struct sock *sk,
unsigned int res)
{
struct sk_filter *filter;
rcu_read_lock();
filter = rcu_dereference(sk->sk_filter);
if (filter != NULL)
res = SK_RUN_FILTER(filter, skb);
rcu_read_unlock();
return res;
}
/*
* This function makes lazy skb cloning in hope that most of packets
* are discarded by BPF.
*
* Note tricky part: we DO mangle shared skb! skb->data, skb->len
* and skb->cb are mangled. It works because (and until) packets
* falling here are owned by current CPU. Output packets are cloned
* by dev_queue_xmit_nit(), input packets are processed by net_bh
* sequencially, so that if we return skb to original state on exit,
* we will not harm anyone.
*/
static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev)
{
struct sock *sk;
struct sockaddr_ll *sll;
struct packet_sock *po;
u8 *skb_head = skb->data;
int skb_len = skb->len;
unsigned int snaplen, res;
if (skb->pkt_type == PACKET_LOOPBACK)
goto drop;
sk = pt->af_packet_priv;
po = pkt_sk(sk);
if (!net_eq(dev_net(dev), sock_net(sk)))
goto drop;
skb->dev = dev;
if (dev->header_ops) {
/* The device has an explicit notion of ll header,
* exported to higher levels.
*
* Otherwise, the device hides details of its frame
* structure, so that corresponding packet head is
* never delivered to user.
*/
if (sk->sk_type != SOCK_DGRAM)
skb_push(skb, skb->data - skb_mac_header(skb));
else if (skb->pkt_type == PACKET_OUTGOING) {
/* Special case: outgoing packets have ll header at head */
skb_pull(skb, skb_network_offset(skb));
}
}
snaplen = skb->len;
res = run_filter(skb, sk, snaplen);
if (!res)
goto drop_n_restore;
if (snaplen > res)
snaplen = res;
if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
goto drop_n_acct;
if (skb_shared(skb)) {
struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
if (nskb == NULL)
goto drop_n_acct;
if (skb_head != skb->data) {
skb->data = skb_head;
skb->len = skb_len;
}
consume_skb(skb);
skb = nskb;
}
BUILD_BUG_ON(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8 >
sizeof(skb->cb));
sll = &PACKET_SKB_CB(skb)->sa.ll;
sll->sll_family = AF_PACKET;
sll->sll_hatype = dev->type;
sll->sll_protocol = skb->protocol;
sll->sll_pkttype = skb->pkt_type;
if (unlikely(po->origdev))
sll->sll_ifindex = orig_dev->ifindex;
else
sll->sll_ifindex = dev->ifindex;
sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
PACKET_SKB_CB(skb)->origlen = skb->len;
if (pskb_trim(skb, snaplen))
goto drop_n_acct;
skb_set_owner_r(skb, sk);
skb->dev = NULL;
skb_dst_drop(skb);
/* drop conntrack reference */
nf_reset(skb);
spin_lock(&sk->sk_receive_queue.lock);
po->stats.stats1.tp_packets++;
skb->dropcount = atomic_read(&sk->sk_drops);
__skb_queue_tail(&sk->sk_receive_queue, skb);
spin_unlock(&sk->sk_receive_queue.lock);
sk->sk_data_ready(sk, skb->len);
return 0;
drop_n_acct:
spin_lock(&sk->sk_receive_queue.lock);
po->stats.stats1.tp_drops++;
atomic_inc(&sk->sk_drops);
spin_unlock(&sk->sk_receive_queue.lock);
drop_n_restore:
if (skb_head != skb->data && skb_shared(skb)) {
skb->data = skb_head;
skb->len = skb_len;
}
drop:
consume_skb(skb);
return 0;
}
static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev)
{
struct sock *sk;
struct packet_sock *po;
struct sockaddr_ll *sll;
union tpacket_uhdr h;
u8 *skb_head = skb->data;
int skb_len = skb->len;
unsigned int snaplen, res;
unsigned long status = TP_STATUS_USER;
unsigned short macoff, netoff, hdrlen;
struct sk_buff *copy_skb = NULL;
struct timespec ts;
__u32 ts_status;
if (skb->pkt_type == PACKET_LOOPBACK)
goto drop;
sk = pt->af_packet_priv;
po = pkt_sk(sk);
if (!net_eq(dev_net(dev), sock_net(sk)))
goto drop;
if (dev->header_ops) {
if (sk->sk_type != SOCK_DGRAM)
skb_push(skb, skb->data - skb_mac_header(skb));
else if (skb->pkt_type == PACKET_OUTGOING) {
/* Special case: outgoing packets have ll header at head */
skb_pull(skb, skb_network_offset(skb));
}
}
if (skb->ip_summed == CHECKSUM_PARTIAL)
status |= TP_STATUS_CSUMNOTREADY;
snaplen = skb->len;
res = run_filter(skb, sk, snaplen);
if (!res)
goto drop_n_restore;
if (snaplen > res)
snaplen = res;
if (sk->sk_type == SOCK_DGRAM) {
macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
po->tp_reserve;
} else {
unsigned int maclen = skb_network_offset(skb);
netoff = TPACKET_ALIGN(po->tp_hdrlen +
(maclen < 16 ? 16 : maclen)) +
po->tp_reserve;
macoff = netoff - maclen;
}
if (po->tp_version <= TPACKET_V2) {
if (macoff + snaplen > po->rx_ring.frame_size) {
if (po->copy_thresh &&
atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
if (skb_shared(skb)) {
copy_skb = skb_clone(skb, GFP_ATOMIC);
} else {
copy_skb = skb_get(skb);
skb_head = skb->data;
}
if (copy_skb)
skb_set_owner_r(copy_skb, sk);
}
snaplen = po->rx_ring.frame_size - macoff;
if ((int)snaplen < 0)
snaplen = 0;
}
}
spin_lock(&sk->sk_receive_queue.lock);
h.raw = packet_current_rx_frame(po, skb,
TP_STATUS_KERNEL, (macoff+snaplen));
if (!h.raw)
goto ring_is_full;
if (po->tp_version <= TPACKET_V2) {
packet_increment_rx_head(po, &po->rx_ring);
/*
* LOSING will be reported till you read the stats,
* because it's COR - Clear On Read.
* Anyways, moving it for V1/V2 only as V3 doesn't need this
* at packet level.
*/
if (po->stats.stats1.tp_drops)
status |= TP_STATUS_LOSING;
}
po->stats.stats1.tp_packets++;
if (copy_skb) {
status |= TP_STATUS_COPY;
__skb_queue_tail(&sk->sk_receive_queue, copy_skb);
}
spin_unlock(&sk->sk_receive_queue.lock);
skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
getnstimeofday(&ts);
status |= ts_status;
switch (po->tp_version) {
case TPACKET_V1:
h.h1->tp_len = skb->len;
h.h1->tp_snaplen = snaplen;
h.h1->tp_mac = macoff;
h.h1->tp_net = netoff;
h.h1->tp_sec = ts.tv_sec;
h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
hdrlen = sizeof(*h.h1);
break;
case TPACKET_V2:
h.h2->tp_len = skb->len;
h.h2->tp_snaplen = snaplen;
h.h2->tp_mac = macoff;
h.h2->tp_net = netoff;
h.h2->tp_sec = ts.tv_sec;
h.h2->tp_nsec = ts.tv_nsec;
if (vlan_tx_tag_present(skb)) {
h.h2->tp_vlan_tci = vlan_tx_tag_get(skb);
status |= TP_STATUS_VLAN_VALID;
} else {
h.h2->tp_vlan_tci = 0;
}
h.h2->tp_padding = 0;
hdrlen = sizeof(*h.h2);
break;
case TPACKET_V3:
/* tp_nxt_offset,vlan are already populated above.
* So DONT clear those fields here
*/
h.h3->tp_status |= status;
h.h3->tp_len = skb->len;
h.h3->tp_snaplen = snaplen;
h.h3->tp_mac = macoff;
h.h3->tp_net = netoff;
h.h3->tp_sec = ts.tv_sec;
h.h3->tp_nsec = ts.tv_nsec;
hdrlen = sizeof(*h.h3);
break;
default:
BUG();
}
sll = h.raw + TPACKET_ALIGN(hdrlen);
sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
sll->sll_family = AF_PACKET;
sll->sll_hatype = dev->type;
sll->sll_protocol = skb->protocol;
sll->sll_pkttype = skb->pkt_type;
if (unlikely(po->origdev))
sll->sll_ifindex = orig_dev->ifindex;
else
sll->sll_ifindex = dev->ifindex;
smp_mb();
#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
{
u8 *start, *end;
if (po->tp_version <= TPACKET_V2) {
end = (u8 *)PAGE_ALIGN((unsigned long)h.raw
+ macoff + snaplen);
for (start = h.raw; start < end; start += PAGE_SIZE)
flush_dcache_page(pgv_to_page(start));
}
smp_wmb();
}
#endif
if (po->tp_version <= TPACKET_V2)
__packet_set_status(po, h.raw, status);
else
prb_clear_blk_fill_status(&po->rx_ring);
sk->sk_data_ready(sk, 0);
drop_n_restore:
if (skb_head != skb->data && skb_shared(skb)) {
skb->data = skb_head;
skb->len = skb_len;
}
drop:
kfree_skb(skb);
return 0;
ring_is_full:
po->stats.stats1.tp_drops++;
spin_unlock(&sk->sk_receive_queue.lock);
sk->sk_data_ready(sk, 0);
kfree_skb(copy_skb);
goto drop_n_restore;
}
static void tpacket_destruct_skb(struct sk_buff *skb)
{
struct packet_sock *po = pkt_sk(skb->sk);
void *ph;
if (likely(po->tx_ring.pg_vec)) {
__u32 ts;
ph = skb_shinfo(skb)->destructor_arg;
BUG_ON(atomic_read(&po->tx_ring.pending) == 0);
atomic_dec(&po->tx_ring.pending);
ts = __packet_set_timestamp(po, ph, skb);
__packet_set_status(po, ph, TP_STATUS_AVAILABLE | ts);
}
sock_wfree(skb);
}
static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
void *frame, struct net_device *dev, int size_max,
__be16 proto, unsigned char *addr, int hlen)
{
union tpacket_uhdr ph;
int to_write, offset, len, tp_len, nr_frags, len_max;
struct socket *sock = po->sk.sk_socket;
struct page *page;
void *data;
int err;
ph.raw = frame;
skb->protocol = proto;
skb->dev = dev;
skb->priority = po->sk.sk_priority;
skb->mark = po->sk.sk_mark;
sock_tx_timestamp(&po->sk, &skb_shinfo(skb)->tx_flags);
skb_shinfo(skb)->destructor_arg = ph.raw;
switch (po->tp_version) {
case TPACKET_V2:
tp_len = ph.h2->tp_len;
break;
default:
tp_len = ph.h1->tp_len;
break;
}
if (unlikely(tp_len > size_max)) {
pr_err("packet size is too long (%d > %d)\n", tp_len, size_max);
return -EMSGSIZE;
}
skb_reserve(skb, hlen);
skb_reset_network_header(skb);
skb_probe_transport_header(skb, 0);
if (po->tp_tx_has_off) {
int off_min, off_max, off;
off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll);
off_max = po->tx_ring.frame_size - tp_len;
if (sock->type == SOCK_DGRAM) {
switch (po->tp_version) {
case TPACKET_V2:
off = ph.h2->tp_net;
break;
default:
off = ph.h1->tp_net;
break;
}
} else {
switch (po->tp_version) {
case TPACKET_V2:
off = ph.h2->tp_mac;
break;
default:
off = ph.h1->tp_mac;
break;
}
}
if (unlikely((off < off_min) || (off_max < off)))
return -EINVAL;
data = ph.raw + off;
} else {
data = ph.raw + po->tp_hdrlen - sizeof(struct sockaddr_ll);
}
to_write = tp_len;
if (sock->type == SOCK_DGRAM) {
err = dev_hard_header(skb, dev, ntohs(proto), addr,
NULL, tp_len);
if (unlikely(err < 0))
return -EINVAL;
} else if (dev->hard_header_len) {
/* net device doesn't like empty head */
if (unlikely(tp_len <= dev->hard_header_len)) {
pr_err("packet size is too short (%d < %d)\n",
tp_len, dev->hard_header_len);
return -EINVAL;
}
skb_push(skb, dev->hard_header_len);
err = skb_store_bits(skb, 0, data,
dev->hard_header_len);
if (unlikely(err))
return err;
data += dev->hard_header_len;
to_write -= dev->hard_header_len;
}
offset = offset_in_page(data);
len_max = PAGE_SIZE - offset;
len = ((to_write > len_max) ? len_max : to_write);
skb->data_len = to_write;
skb->len += to_write;
skb->truesize += to_write;
atomic_add(to_write, &po->sk.sk_wmem_alloc);
while (likely(to_write)) {
nr_frags = skb_shinfo(skb)->nr_frags;
if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
pr_err("Packet exceed the number of skb frags(%lu)\n",
MAX_SKB_FRAGS);
return -EFAULT;
}
page = pgv_to_page(data);
data += len;
flush_dcache_page(page);
get_page(page);
skb_fill_page_desc(skb, nr_frags, page, offset, len);
to_write -= len;
offset = 0;
len_max = PAGE_SIZE;
len = ((to_write > len_max) ? len_max : to_write);
}
return tp_len;
}
static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
{
struct sk_buff *skb;
struct net_device *dev;
__be16 proto;
bool need_rls_dev = false;
int err, reserve = 0;
void *ph;
struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name;
int tp_len, size_max;
unsigned char *addr;
int len_sum = 0;
int status = TP_STATUS_AVAILABLE;
int hlen, tlen;
mutex_lock(&po->pg_vec_lock);
if (saddr == NULL) {
dev = po->prot_hook.dev;
proto = po->num;
addr = NULL;
} else {
err = -EINVAL;
if (msg->msg_namelen < sizeof(struct sockaddr_ll))
goto out;
if (msg->msg_namelen < (saddr->sll_halen
+ offsetof(struct sockaddr_ll,
sll_addr)))
goto out;
proto = saddr->sll_protocol;
addr = saddr->sll_addr;
dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
need_rls_dev = true;
}
err = -ENXIO;
if (unlikely(dev == NULL))
goto out;
reserve = dev->hard_header_len;
err = -ENETDOWN;
if (unlikely(!(dev->flags & IFF_UP)))
goto out_put;
size_max = po->tx_ring.frame_size
- (po->tp_hdrlen - sizeof(struct sockaddr_ll));
if (size_max > dev->mtu + reserve)
size_max = dev->mtu + reserve;
do {
ph = packet_current_frame(po, &po->tx_ring,
TP_STATUS_SEND_REQUEST);
if (unlikely(ph == NULL)) {
schedule();
continue;
}
status = TP_STATUS_SEND_REQUEST;
hlen = LL_RESERVED_SPACE(dev);
tlen = dev->needed_tailroom;
skb = sock_alloc_send_skb(&po->sk,
hlen + tlen + sizeof(struct sockaddr_ll),
0, &err);
if (unlikely(skb == NULL))
goto out_status;
tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto,
addr, hlen);
if (unlikely(tp_len < 0)) {
if (po->tp_loss) {
__packet_set_status(po, ph,
TP_STATUS_AVAILABLE);
packet_increment_head(&po->tx_ring);
kfree_skb(skb);
continue;
} else {
status = TP_STATUS_WRONG_FORMAT;
err = tp_len;
goto out_status;
}
}
skb->destructor = tpacket_destruct_skb;
__packet_set_status(po, ph, TP_STATUS_SENDING);
atomic_inc(&po->tx_ring.pending);
status = TP_STATUS_SEND_REQUEST;
err = dev_queue_xmit(skb);
if (unlikely(err > 0)) {
err = net_xmit_errno(err);
if (err && __packet_get_status(po, ph) ==
TP_STATUS_AVAILABLE) {
/* skb was destructed already */
skb = NULL;
goto out_status;
}
/*
* skb was dropped but not destructed yet;
* let's treat it like congestion or err < 0
*/
err = 0;
}
packet_increment_head(&po->tx_ring);
len_sum += tp_len;
} while (likely((ph != NULL) ||
((!(msg->msg_flags & MSG_DONTWAIT)) &&
(atomic_read(&po->tx_ring.pending))))
);
err = len_sum;
goto out_put;
out_status:
__packet_set_status(po, ph, status);
kfree_skb(skb);
out_put:
if (need_rls_dev)
dev_put(dev);
out:
mutex_unlock(&po->pg_vec_lock);
return err;
}
static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
size_t reserve, size_t len,
size_t linear, int noblock,
int *err)
{
struct sk_buff *skb;
/* Under a page? Don't bother with paged skb. */
if (prepad + len < PAGE_SIZE || !linear)
linear = len;
skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
err, 0);
if (!skb)
return NULL;
skb_reserve(skb, reserve);
skb_put(skb, linear);
skb->data_len = len - linear;
skb->len += len - linear;
return skb;
}
static int packet_snd(struct socket *sock,
struct msghdr *msg, size_t len)
{
struct sock *sk = sock->sk;
struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name;
struct sk_buff *skb;
struct net_device *dev;
__be16 proto;
bool need_rls_dev = false;
unsigned char *addr;
int err, reserve = 0;
struct virtio_net_hdr vnet_hdr = { 0 };
int offset = 0;
int vnet_hdr_len;
struct packet_sock *po = pkt_sk(sk);
unsigned short gso_type = 0;
int hlen, tlen;
int extra_len = 0;
/*
* Get and verify the address.
*/
if (saddr == NULL) {
dev = po->prot_hook.dev;
proto = po->num;
addr = NULL;
} else {
err = -EINVAL;
if (msg->msg_namelen < sizeof(struct sockaddr_ll))
goto out;
if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
goto out;
proto = saddr->sll_protocol;
addr = saddr->sll_addr;
dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
need_rls_dev = true;
}
err = -ENXIO;
if (dev == NULL)
goto out_unlock;
if (sock->type == SOCK_RAW)
reserve = dev->hard_header_len;
err = -ENETDOWN;
if (!(dev->flags & IFF_UP))
goto out_unlock;
if (po->has_vnet_hdr) {
vnet_hdr_len = sizeof(vnet_hdr);
err = -EINVAL;
if (len < vnet_hdr_len)
goto out_unlock;
len -= vnet_hdr_len;
err = memcpy_fromiovec((void *)&vnet_hdr, msg->msg_iov,
vnet_hdr_len);
if (err < 0)
goto out_unlock;
if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
(vnet_hdr.csum_start + vnet_hdr.csum_offset + 2 >
vnet_hdr.hdr_len))
vnet_hdr.hdr_len = vnet_hdr.csum_start +
vnet_hdr.csum_offset + 2;
err = -EINVAL;
if (vnet_hdr.hdr_len > len)
goto out_unlock;
if (vnet_hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
switch (vnet_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
case VIRTIO_NET_HDR_GSO_TCPV4:
gso_type = SKB_GSO_TCPV4;
break;
case VIRTIO_NET_HDR_GSO_TCPV6:
gso_type = SKB_GSO_TCPV6;
break;
case VIRTIO_NET_HDR_GSO_UDP:
gso_type = SKB_GSO_UDP;
break;
default:
goto out_unlock;
}
if (vnet_hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
gso_type |= SKB_GSO_TCP_ECN;
if (vnet_hdr.gso_size == 0)
goto out_unlock;
}
}
if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
if (!netif_supports_nofcs(dev)) {
err = -EPROTONOSUPPORT;
goto out_unlock;
}
extra_len = 4; /* We're doing our own CRC */
}
err = -EMSGSIZE;
if (!gso_type && (len > dev->mtu + reserve + VLAN_HLEN + extra_len))
goto out_unlock;
err = -ENOBUFS;
hlen = LL_RESERVED_SPACE(dev);
tlen = dev->needed_tailroom;
skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, vnet_hdr.hdr_len,
msg->msg_flags & MSG_DONTWAIT, &err);
if (skb == NULL)
goto out_unlock;
skb_set_network_header(skb, reserve);
err = -EINVAL;
if (sock->type == SOCK_DGRAM &&
(offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len)) < 0)
goto out_free;
/* Returns -EFAULT on error */
err = skb_copy_datagram_from_iovec(skb, offset, msg->msg_iov, 0, len);
if (err)
goto out_free;
sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
if (!gso_type && (len > dev->mtu + reserve + extra_len)) {
/* Earlier code assumed this would be a VLAN pkt,
* double-check this now that we have the actual
* packet in hand.
*/
struct ethhdr *ehdr;
skb_reset_mac_header(skb);
ehdr = eth_hdr(skb);
if (ehdr->h_proto != htons(ETH_P_8021Q)) {
err = -EMSGSIZE;
goto out_free;
}
}
skb->protocol = proto;
skb->dev = dev;
skb->priority = sk->sk_priority;
skb->mark = sk->sk_mark;
if (po->has_vnet_hdr) {
if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
if (!skb_partial_csum_set(skb, vnet_hdr.csum_start,
vnet_hdr.csum_offset)) {
err = -EINVAL;
goto out_free;
}
}
skb_shinfo(skb)->gso_size = vnet_hdr.gso_size;
skb_shinfo(skb)->gso_type = gso_type;
/* Header must be checked, and gso_segs computed. */
skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
skb_shinfo(skb)->gso_segs = 0;
len += vnet_hdr_len;
}
skb_probe_transport_header(skb, reserve);
if (unlikely(extra_len == 4))
skb->no_fcs = 1;
/*
* Now send it
*/
err = dev_queue_xmit(skb);
if (err > 0 && (err = net_xmit_errno(err)) != 0)
goto out_unlock;
if (need_rls_dev)
dev_put(dev);
return len;
out_free:
kfree_skb(skb);
out_unlock:
if (dev && need_rls_dev)
dev_put(dev);
out:
return err;
}
static int packet_sendmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len)
{
struct sock *sk = sock->sk;
struct packet_sock *po = pkt_sk(sk);
if (po->tx_ring.pg_vec)
return tpacket_snd(po, msg);
else
return packet_snd(sock, msg, len);
}
/*
* Close a PACKET socket. This is fairly simple. We immediately go
* to 'closed' state and remove our protocol entry in the device list.
*/
static int packet_release(struct socket *sock)
{
struct sock *sk = sock->sk;
struct packet_sock *po;
struct net *net;
union tpacket_req_u req_u;
if (!sk)
return 0;
net = sock_net(sk);
po = pkt_sk(sk);
mutex_lock(&net->packet.sklist_lock);
sk_del_node_init_rcu(sk);
mutex_unlock(&net->packet.sklist_lock);
preempt_disable();
sock_prot_inuse_add(net, sk->sk_prot, -1);
preempt_enable();
spin_lock(&po->bind_lock);
unregister_prot_hook(sk, false);
if (po->prot_hook.dev) {
dev_put(po->prot_hook.dev);
po->prot_hook.dev = NULL;
}
spin_unlock(&po->bind_lock);
packet_flush_mclist(sk);
if (po->rx_ring.pg_vec) {
memset(&req_u, 0, sizeof(req_u));
packet_set_ring(sk, &req_u, 1, 0);
}
if (po->tx_ring.pg_vec) {
memset(&req_u, 0, sizeof(req_u));
packet_set_ring(sk, &req_u, 1, 1);
}
fanout_release(sk);
synchronize_net();
/*
* Now the socket is dead. No more input will appear.
*/
sock_orphan(sk);
sock->sk = NULL;
/* Purge queues */
skb_queue_purge(&sk->sk_receive_queue);
sk_refcnt_debug_release(sk);
sock_put(sk);
return 0;
}
/*
* Attach a packet hook.
*/
static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 protocol)
{
struct packet_sock *po = pkt_sk(sk);
if (po->fanout) {
if (dev)
dev_put(dev);
return -EINVAL;
}
lock_sock(sk);
spin_lock(&po->bind_lock);
unregister_prot_hook(sk, true);
po->num = protocol;
po->prot_hook.type = protocol;
if (po->prot_hook.dev)
dev_put(po->prot_hook.dev);
po->prot_hook.dev = dev;
po->ifindex = dev ? dev->ifindex : 0;
if (protocol == 0)
goto out_unlock;
if (!dev || (dev->flags & IFF_UP)) {
register_prot_hook(sk);
} else {
sk->sk_err = ENETDOWN;
if (!sock_flag(sk, SOCK_DEAD))
sk->sk_error_report(sk);
}
out_unlock:
spin_unlock(&po->bind_lock);
release_sock(sk);
return 0;
}
/*
* Bind a packet socket to a device
*/
static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
int addr_len)
{
struct sock *sk = sock->sk;
char name[15];
struct net_device *dev;
int err = -ENODEV;
/*
* Check legality
*/
if (addr_len != sizeof(struct sockaddr))
return -EINVAL;
strlcpy(name, uaddr->sa_data, sizeof(name));
dev = dev_get_by_name(sock_net(sk), name);
if (dev)
err = packet_do_bind(sk, dev, pkt_sk(sk)->num);
return err;
}
static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
{
struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
struct sock *sk = sock->sk;
struct net_device *dev = NULL;
int err;
/*
* Check legality
*/
if (addr_len < sizeof(struct sockaddr_ll))
return -EINVAL;
if (sll->sll_family != AF_PACKET)
return -EINVAL;
if (sll->sll_ifindex) {
err = -ENODEV;
dev = dev_get_by_index(sock_net(sk), sll->sll_ifindex);
if (dev == NULL)
goto out;
}
err = packet_do_bind(sk, dev, sll->sll_protocol ? : pkt_sk(sk)->num);
out:
return err;
}
static struct proto packet_proto = {
.name = "PACKET",
.owner = THIS_MODULE,
.obj_size = sizeof(struct packet_sock),
};
/*
* Create a packet of type SOCK_PACKET.
*/
static int packet_create(struct net *net, struct socket *sock, int protocol,
int kern)
{
struct sock *sk;
struct packet_sock *po;
__be16 proto = (__force __be16)protocol; /* weird, but documented */
int err;
if (!ns_capable(net->user_ns, CAP_NET_RAW))
return -EPERM;
if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
sock->type != SOCK_PACKET)
return -ESOCKTNOSUPPORT;
sock->state = SS_UNCONNECTED;
err = -ENOBUFS;
sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto);
if (sk == NULL)
goto out;
sock->ops = &packet_ops;
if (sock->type == SOCK_PACKET)
sock->ops = &packet_ops_spkt;
sock_init_data(sock, sk);
po = pkt_sk(sk);
sk->sk_family = PF_PACKET;
po->num = proto;
sk->sk_destruct = packet_sock_destruct;
sk_refcnt_debug_inc(sk);
/*
* Attach a protocol block
*/
spin_lock_init(&po->bind_lock);
mutex_init(&po->pg_vec_lock);
po->prot_hook.func = packet_rcv;
if (sock->type == SOCK_PACKET)
po->prot_hook.func = packet_rcv_spkt;
po->prot_hook.af_packet_priv = sk;
if (proto) {
po->prot_hook.type = proto;
register_prot_hook(sk);
}
mutex_lock(&net->packet.sklist_lock);
sk_add_node_rcu(sk, &net->packet.sklist);
mutex_unlock(&net->packet.sklist_lock);
preempt_disable();
sock_prot_inuse_add(net, &packet_proto, 1);
preempt_enable();
return 0;
out:
return err;
}
/*
* Pull a packet from our receive queue and hand it to the user.
* If necessary we block.
*/
static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len, int flags)
{
struct sock *sk = sock->sk;
struct sk_buff *skb;
int copied, err;
struct sockaddr_ll *sll;
int vnet_hdr_len = 0;
err = -EINVAL;
if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE))
goto out;
#if 0
/* What error should we return now? EUNATTACH? */
if (pkt_sk(sk)->ifindex < 0)
return -ENODEV;
#endif
if (flags & MSG_ERRQUEUE) {
err = sock_recv_errqueue(sk, msg, len,
SOL_PACKET, PACKET_TX_TIMESTAMP);
goto out;
}
/*
* Call the generic datagram receiver. This handles all sorts
* of horrible races and re-entrancy so we can forget about it
* in the protocol layers.
*
* Now it will return ENETDOWN, if device have just gone down,
* but then it will block.
*/
skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
/*
* An error occurred so return it. Because skb_recv_datagram()
* handles the blocking we don't see and worry about blocking
* retries.
*/
if (skb == NULL)
goto out;
if (pkt_sk(sk)->has_vnet_hdr) {
struct virtio_net_hdr vnet_hdr = { 0 };
err = -EINVAL;
vnet_hdr_len = sizeof(vnet_hdr);
if (len < vnet_hdr_len)
goto out_free;
len -= vnet_hdr_len;
if (skb_is_gso(skb)) {
struct skb_shared_info *sinfo = skb_shinfo(skb);
/* This is a hint as to how much should be linear. */
vnet_hdr.hdr_len = skb_headlen(skb);
vnet_hdr.gso_size = sinfo->gso_size;
if (sinfo->gso_type & SKB_GSO_TCPV4)
vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
else if (sinfo->gso_type & SKB_GSO_TCPV6)
vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
else if (sinfo->gso_type & SKB_GSO_UDP)
vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
else if (sinfo->gso_type & SKB_GSO_FCOE)
goto out_free;
else
BUG();
if (sinfo->gso_type & SKB_GSO_TCP_ECN)
vnet_hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
} else
vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
vnet_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
vnet_hdr.csum_start = skb_checksum_start_offset(skb);
vnet_hdr.csum_offset = skb->csum_offset;
} else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
vnet_hdr.flags = VIRTIO_NET_HDR_F_DATA_VALID;
} /* else everything is zero */
err = memcpy_toiovec(msg->msg_iov, (void *)&vnet_hdr,
vnet_hdr_len);
if (err < 0)
goto out_free;
}
/*
* If the address length field is there to be filled in, we fill
* it in now.
*/
sll = &PACKET_SKB_CB(skb)->sa.ll;
if (sock->type == SOCK_PACKET)
msg->msg_namelen = sizeof(struct sockaddr_pkt);
else
msg->msg_namelen = sll->sll_halen + offsetof(struct sockaddr_ll, sll_addr);
/*
* You lose any data beyond the buffer you gave. If it worries a
* user program they can ask the device for its MTU anyway.
*/
copied = skb->len;
if (copied > len) {
copied = len;
msg->msg_flags |= MSG_TRUNC;
}
err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
if (err)
goto out_free;
sock_recv_ts_and_drops(msg, sk, skb);
if (msg->msg_name)
memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa,
msg->msg_namelen);
if (pkt_sk(sk)->auxdata) {
struct tpacket_auxdata aux;
aux.tp_status = TP_STATUS_USER;
if (skb->ip_summed == CHECKSUM_PARTIAL)
aux.tp_status |= TP_STATUS_CSUMNOTREADY;
aux.tp_len = PACKET_SKB_CB(skb)->origlen;
aux.tp_snaplen = skb->len;
aux.tp_mac = 0;
aux.tp_net = skb_network_offset(skb);
if (vlan_tx_tag_present(skb)) {
aux.tp_vlan_tci = vlan_tx_tag_get(skb);
aux.tp_status |= TP_STATUS_VLAN_VALID;
} else {
aux.tp_vlan_tci = 0;
}
aux.tp_padding = 0;
put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
}
/*
* Free or return the buffer as appropriate. Again this
* hides all the races and re-entrancy issues from us.
*/
err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied);
out_free:
skb_free_datagram(sk, skb);
out:
return err;
}
static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
int *uaddr_len, int peer)
{
struct net_device *dev;
struct sock *sk = sock->sk;
if (peer)
return -EOPNOTSUPP;
uaddr->sa_family = AF_PACKET;
memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data));
rcu_read_lock();
dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
if (dev)
strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data));
rcu_read_unlock();
*uaddr_len = sizeof(*uaddr);
return 0;
}
static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
int *uaddr_len, int peer)
{
struct net_device *dev;
struct sock *sk = sock->sk;
struct packet_sock *po = pkt_sk(sk);
DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
if (peer)
return -EOPNOTSUPP;
sll->sll_family = AF_PACKET;
sll->sll_ifindex = po->ifindex;
sll->sll_protocol = po->num;
sll->sll_pkttype = 0;
rcu_read_lock();
dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex);
if (dev) {
sll->sll_hatype = dev->type;
sll->sll_halen = dev->addr_len;
memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len);
} else {
sll->sll_hatype = 0; /* Bad: we have no ARPHRD_UNSPEC */
sll->sll_halen = 0;
}
rcu_read_unlock();
*uaddr_len = offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
return 0;
}
static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
int what)
{
switch (i->type) {
case PACKET_MR_MULTICAST:
if (i->alen != dev->addr_len)
return -EINVAL;
if (what > 0)
return dev_mc_add(dev, i->addr);
else
return dev_mc_del(dev, i->addr);
break;
case PACKET_MR_PROMISC:
return dev_set_promiscuity(dev, what);
break;
case PACKET_MR_ALLMULTI:
return dev_set_allmulti(dev, what);
break;
case PACKET_MR_UNICAST:
if (i->alen != dev->addr_len)
return -EINVAL;
if (what > 0)
return dev_uc_add(dev, i->addr);
else
return dev_uc_del(dev, i->addr);
break;
default:
break;
}
return 0;
}
static void packet_dev_mclist(struct net_device *dev, struct packet_mclist *i, int what)
{
for ( ; i; i = i->next) {
if (i->ifindex == dev->ifindex)
packet_dev_mc(dev, i, what);
}
}
static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
{
struct packet_sock *po = pkt_sk(sk);
struct packet_mclist *ml, *i;
struct net_device *dev;
int err;
rtnl_lock();
err = -ENODEV;
dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex);
if (!dev)
goto done;
err = -EINVAL;
if (mreq->mr_alen > dev->addr_len)
goto done;
err = -ENOBUFS;
i = kmalloc(sizeof(*i), GFP_KERNEL);
if (i == NULL)
goto done;
err = 0;
for (ml = po->mclist; ml; ml = ml->next) {
if (ml->ifindex == mreq->mr_ifindex &&
ml->type == mreq->mr_type &&
ml->alen == mreq->mr_alen &&
memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
ml->count++;
/* Free the new element ... */
kfree(i);
goto done;
}
}
i->type = mreq->mr_type;
i->ifindex = mreq->mr_ifindex;
i->alen = mreq->mr_alen;
memcpy(i->addr, mreq->mr_address, i->alen);
i->count = 1;
i->next = po->mclist;
po->mclist = i;
err = packet_dev_mc(dev, i, 1);
if (err) {
po->mclist = i->next;
kfree(i);
}
done:
rtnl_unlock();
return err;
}
static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
{
struct packet_mclist *ml, **mlp;
rtnl_lock();
for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) {
if (ml->ifindex == mreq->mr_ifindex &&
ml->type == mreq->mr_type &&
ml->alen == mreq->mr_alen &&
memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
if (--ml->count == 0) {
struct net_device *dev;
*mlp = ml->next;
dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
if (dev)
packet_dev_mc(dev, ml, -1);
kfree(ml);
}
rtnl_unlock();
return 0;
}
}
rtnl_unlock();
return -EADDRNOTAVAIL;
}
static void packet_flush_mclist(struct sock *sk)
{
struct packet_sock *po = pkt_sk(sk);
struct packet_mclist *ml;
if (!po->mclist)
return;
rtnl_lock();
while ((ml = po->mclist) != NULL) {
struct net_device *dev;
po->mclist = ml->next;
dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
if (dev != NULL)
packet_dev_mc(dev, ml, -1);
kfree(ml);
}
rtnl_unlock();
}
static int
packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
struct packet_sock *po = pkt_sk(sk);
int ret;
if (level != SOL_PACKET)
return -ENOPROTOOPT;
switch (optname) {
case PACKET_ADD_MEMBERSHIP:
case PACKET_DROP_MEMBERSHIP:
{
struct packet_mreq_max mreq;
int len = optlen;
memset(&mreq, 0, sizeof(mreq));
if (len < sizeof(struct packet_mreq))
return -EINVAL;
if (len > sizeof(mreq))
len = sizeof(mreq);
if (copy_from_user(&mreq, optval, len))
return -EFAULT;
if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
return -EINVAL;
if (optname == PACKET_ADD_MEMBERSHIP)
ret = packet_mc_add(sk, &mreq);
else
ret = packet_mc_drop(sk, &mreq);
return ret;
}
case PACKET_RX_RING:
case PACKET_TX_RING:
{
union tpacket_req_u req_u;
int len;
switch (po->tp_version) {
case TPACKET_V1:
case TPACKET_V2:
len = sizeof(req_u.req);
break;
case TPACKET_V3:
default:
len = sizeof(req_u.req3);
break;
}
if (optlen < len)
return -EINVAL;
if (pkt_sk(sk)->has_vnet_hdr)
return -EINVAL;
if (copy_from_user(&req_u.req, optval, len))
return -EFAULT;
return packet_set_ring(sk, &req_u, 0,
optname == PACKET_TX_RING);
}
case PACKET_COPY_THRESH:
{
int val;
if (optlen != sizeof(val))
return -EINVAL;
if (copy_from_user(&val, optval, sizeof(val)))
return -EFAULT;
pkt_sk(sk)->copy_thresh = val;
return 0;
}
case PACKET_VERSION:
{
int val;
if (optlen != sizeof(val))
return -EINVAL;
if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
return -EBUSY;
if (copy_from_user(&val, optval, sizeof(val)))
return -EFAULT;
switch (val) {
case TPACKET_V1:
case TPACKET_V2:
case TPACKET_V3:
po->tp_version = val;
return 0;
default:
return -EINVAL;
}
}
case PACKET_RESERVE:
{
unsigned int val;
if (optlen != sizeof(val))
return -EINVAL;
if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
return -EBUSY;
if (copy_from_user(&val, optval, sizeof(val)))
return -EFAULT;
po->tp_reserve = val;
return 0;
}
case PACKET_LOSS:
{
unsigned int val;
if (optlen != sizeof(val))
return -EINVAL;
if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
return -EBUSY;
if (copy_from_user(&val, optval, sizeof(val)))
return -EFAULT;
po->tp_loss = !!val;
return 0;
}
case PACKET_AUXDATA:
{
int val;
if (optlen < sizeof(val))
return -EINVAL;
if (copy_from_user(&val, optval, sizeof(val)))
return -EFAULT;
po->auxdata = !!val;
return 0;
}
case PACKET_ORIGDEV:
{
int val;
if (optlen < sizeof(val))
return -EINVAL;
if (copy_from_user(&val, optval, sizeof(val)))
return -EFAULT;
po->origdev = !!val;
return 0;
}
case PACKET_VNET_HDR:
{
int val;
if (sock->type != SOCK_RAW)
return -EINVAL;
if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
return -EBUSY;
if (optlen < sizeof(val))
return -EINVAL;
if (copy_from_user(&val, optval, sizeof(val)))
return -EFAULT;
po->has_vnet_hdr = !!val;
return 0;
}
case PACKET_TIMESTAMP:
{
int val;
if (optlen != sizeof(val))
return -EINVAL;
if (copy_from_user(&val, optval, sizeof(val)))
return -EFAULT;
po->tp_tstamp = val;
return 0;
}
case PACKET_FANOUT:
{
int val;
if (optlen != sizeof(val))
return -EINVAL;
if (copy_from_user(&val, optval, sizeof(val)))
return -EFAULT;
return fanout_add(sk, val & 0xffff, val >> 16);
}
case PACKET_TX_HAS_OFF:
{
unsigned int val;
if (optlen != sizeof(val))
return -EINVAL;
if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
return -EBUSY;
if (copy_from_user(&val, optval, sizeof(val)))
return -EFAULT;
po->tp_tx_has_off = !!val;
return 0;
}
default:
return -ENOPROTOOPT;
}
}
static int packet_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen)
{
int len;
int val, lv = sizeof(val);
struct sock *sk = sock->sk;
struct packet_sock *po = pkt_sk(sk);
void *data = &val;
union tpacket_stats_u st;
if (level != SOL_PACKET)
return -ENOPROTOOPT;
if (get_user(len, optlen))
return -EFAULT;
if (len < 0)
return -EINVAL;
switch (optname) {
case PACKET_STATISTICS:
spin_lock_bh(&sk->sk_receive_queue.lock);
memcpy(&st, &po->stats, sizeof(st));
memset(&po->stats, 0, sizeof(po->stats));
spin_unlock_bh(&sk->sk_receive_queue.lock);
if (po->tp_version == TPACKET_V3) {
lv = sizeof(struct tpacket_stats_v3);
st.stats3.tp_packets += st.stats3.tp_drops;
data = &st.stats3;
} else {
lv = sizeof(struct tpacket_stats);
st.stats1.tp_packets += st.stats1.tp_drops;
data = &st.stats1;
}
break;
case PACKET_AUXDATA:
val = po->auxdata;
break;
case PACKET_ORIGDEV:
val = po->origdev;
break;
case PACKET_VNET_HDR:
val = po->has_vnet_hdr;
break;
case PACKET_VERSION:
val = po->tp_version;
break;
case PACKET_HDRLEN:
if (len > sizeof(int))
len = sizeof(int);
if (copy_from_user(&val, optval, len))
return -EFAULT;
switch (val) {
case TPACKET_V1:
val = sizeof(struct tpacket_hdr);
break;
case TPACKET_V2:
val = sizeof(struct tpacket2_hdr);
break;
case TPACKET_V3:
val = sizeof(struct tpacket3_hdr);
break;
default:
return -EINVAL;
}
break;
case PACKET_RESERVE:
val = po->tp_reserve;
break;
case PACKET_LOSS:
val = po->tp_loss;
break;
case PACKET_TIMESTAMP:
val = po->tp_tstamp;
break;
case PACKET_FANOUT:
val = (po->fanout ?
((u32)po->fanout->id |
((u32)po->fanout->type << 16) |
((u32)po->fanout->flags << 24)) :
0);
break;
case PACKET_TX_HAS_OFF:
val = po->tp_tx_has_off;
break;
default:
return -ENOPROTOOPT;
}
if (len > lv)
len = lv;
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, data, len))
return -EFAULT;
return 0;
}
static int packet_notifier(struct notifier_block *this,
unsigned long msg, void *ptr)
{
struct sock *sk;
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct net *net = dev_net(dev);
rcu_read_lock();
sk_for_each_rcu(sk, &net->packet.sklist) {
struct packet_sock *po = pkt_sk(sk);
switch (msg) {
case NETDEV_UNREGISTER:
if (po->mclist)
packet_dev_mclist(dev, po->mclist, -1);
/* fallthrough */
case NETDEV_DOWN:
if (dev->ifindex == po->ifindex) {
spin_lock(&po->bind_lock);
if (po->running) {
__unregister_prot_hook(sk, false);
sk->sk_err = ENETDOWN;
if (!sock_flag(sk, SOCK_DEAD))
sk->sk_error_report(sk);
}
if (msg == NETDEV_UNREGISTER) {
po->ifindex = -1;
if (po->prot_hook.dev)
dev_put(po->prot_hook.dev);
po->prot_hook.dev = NULL;
}
spin_unlock(&po->bind_lock);
}
break;
case NETDEV_UP:
if (dev->ifindex == po->ifindex) {
spin_lock(&po->bind_lock);
if (po->num)
register_prot_hook(sk);
spin_unlock(&po->bind_lock);
}
break;
}
}
rcu_read_unlock();
return NOTIFY_DONE;
}
static int packet_ioctl(struct socket *sock, unsigned int cmd,
unsigned long arg)
{
struct sock *sk = sock->sk;
switch (cmd) {
case SIOCOUTQ:
{
int amount = sk_wmem_alloc_get(sk);
return put_user(amount, (int __user *)arg);
}
case SIOCINQ:
{
struct sk_buff *skb;
int amount = 0;
spin_lock_bh(&sk->sk_receive_queue.lock);
skb = skb_peek(&sk->sk_receive_queue);
if (skb)
amount = skb->len;
spin_unlock_bh(&sk->sk_receive_queue.lock);
return put_user(amount, (int __user *)arg);
}
case SIOCGSTAMP:
return sock_get_timestamp(sk, (struct timeval __user *)arg);
case SIOCGSTAMPNS:
return sock_get_timestampns(sk, (struct timespec __user *)arg);
#ifdef CONFIG_INET
case SIOCADDRT:
case SIOCDELRT:
case SIOCDARP:
case SIOCGARP:
case SIOCSARP:
case SIOCGIFADDR:
case SIOCSIFADDR:
case SIOCGIFBRDADDR:
case SIOCSIFBRDADDR:
case SIOCGIFNETMASK:
case SIOCSIFNETMASK:
case SIOCGIFDSTADDR:
case SIOCSIFDSTADDR:
case SIOCSIFFLAGS:
return inet_dgram_ops.ioctl(sock, cmd, arg);
#endif
default:
return -ENOIOCTLCMD;
}
return 0;
}
static unsigned int packet_poll(struct file *file, struct socket *sock,
poll_table *wait)
{
struct sock *sk = sock->sk;
struct packet_sock *po = pkt_sk(sk);
unsigned int mask = datagram_poll(file, sock, wait);
spin_lock_bh(&sk->sk_receive_queue.lock);
if (po->rx_ring.pg_vec) {
if (!packet_previous_rx_frame(po, &po->rx_ring,
TP_STATUS_KERNEL))
mask |= POLLIN | POLLRDNORM;
}
spin_unlock_bh(&sk->sk_receive_queue.lock);
spin_lock_bh(&sk->sk_write_queue.lock);
if (po->tx_ring.pg_vec) {
if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
mask |= POLLOUT | POLLWRNORM;
}
spin_unlock_bh(&sk->sk_write_queue.lock);
return mask;
}
/* Dirty? Well, I still did not learn better way to account
* for user mmaps.
*/
static void packet_mm_open(struct vm_area_struct *vma)
{
struct file *file = vma->vm_file;
struct socket *sock = file->private_data;
struct sock *sk = sock->sk;
if (sk)
atomic_inc(&pkt_sk(sk)->mapped);
}
static void packet_mm_close(struct vm_area_struct *vma)
{
struct file *file = vma->vm_file;
struct socket *sock = file->private_data;
struct sock *sk = sock->sk;
if (sk)
atomic_dec(&pkt_sk(sk)->mapped);
}
static const struct vm_operations_struct packet_mmap_ops = {
.open = packet_mm_open,
.close = packet_mm_close,
};
static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
unsigned int len)
{
int i;
for (i = 0; i < len; i++) {
if (likely(pg_vec[i].buffer)) {
if (is_vmalloc_addr(pg_vec[i].buffer))
vfree(pg_vec[i].buffer);
else
free_pages((unsigned long)pg_vec[i].buffer,
order);
pg_vec[i].buffer = NULL;
}
}
kfree(pg_vec);
}
static char *alloc_one_pg_vec_page(unsigned long order)
{
char *buffer = NULL;
gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
__GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
buffer = (char *) __get_free_pages(gfp_flags, order);
if (buffer)
return buffer;
/*
* __get_free_pages failed, fall back to vmalloc
*/
buffer = vzalloc((1 << order) * PAGE_SIZE);
if (buffer)
return buffer;
/*
* vmalloc failed, lets dig into swap here
*/
gfp_flags &= ~__GFP_NORETRY;
buffer = (char *)__get_free_pages(gfp_flags, order);
if (buffer)
return buffer;
/*
* complete and utter failure
*/
return NULL;
}
static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
{
unsigned int block_nr = req->tp_block_nr;
struct pgv *pg_vec;
int i;
pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL);
if (unlikely(!pg_vec))
goto out;
for (i = 0; i < block_nr; i++) {
pg_vec[i].buffer = alloc_one_pg_vec_page(order);
if (unlikely(!pg_vec[i].buffer))
goto out_free_pgvec;
}
out:
return pg_vec;
out_free_pgvec:
free_pg_vec(pg_vec, order, block_nr);
pg_vec = NULL;
goto out;
}
static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
int closing, int tx_ring)
{
struct pgv *pg_vec = NULL;
struct packet_sock *po = pkt_sk(sk);
int was_running, order = 0;
struct packet_ring_buffer *rb;
struct sk_buff_head *rb_queue;
__be16 num;
int err = -EINVAL;
/* Added to avoid minimal code churn */
struct tpacket_req *req = &req_u->req;
/* Opening a Tx-ring is NOT supported in TPACKET_V3 */
if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) {
WARN(1, "Tx-ring is not supported.\n");
goto out;
}
rb = tx_ring ? &po->tx_ring : &po->rx_ring;
rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
err = -EBUSY;
if (!closing) {
if (atomic_read(&po->mapped))
goto out;
if (atomic_read(&rb->pending))
goto out;
}
if (req->tp_block_nr) {
/* Sanity tests and some calculations */
err = -EBUSY;
if (unlikely(rb->pg_vec))
goto out;
switch (po->tp_version) {
case TPACKET_V1:
po->tp_hdrlen = TPACKET_HDRLEN;
break;
case TPACKET_V2:
po->tp_hdrlen = TPACKET2_HDRLEN;
break;
case TPACKET_V3:
po->tp_hdrlen = TPACKET3_HDRLEN;
break;
}
err = -EINVAL;
if (unlikely((int)req->tp_block_size <= 0))
goto out;
if (unlikely(req->tp_block_size & (PAGE_SIZE - 1)))
goto out;
if (unlikely(req->tp_frame_size < po->tp_hdrlen +
po->tp_reserve))
goto out;
if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
goto out;
rb->frames_per_block = req->tp_block_size/req->tp_frame_size;
if (unlikely(rb->frames_per_block <= 0))
goto out;
if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
req->tp_frame_nr))
goto out;
err = -ENOMEM;
order = get_order(req->tp_block_size);
pg_vec = alloc_pg_vec(req, order);
if (unlikely(!pg_vec))
goto out;
switch (po->tp_version) {
case TPACKET_V3:
/* Transmit path is not supported. We checked
* it above but just being paranoid
*/
if (!tx_ring)
init_prb_bdqc(po, rb, pg_vec, req_u, tx_ring);
break;
default:
break;
}
}
/* Done */
else {
err = -EINVAL;
if (unlikely(req->tp_frame_nr))
goto out;
}
lock_sock(sk);
/* Detach socket from network */
spin_lock(&po->bind_lock);
was_running = po->running;
num = po->num;
if (was_running) {
po->num = 0;
__unregister_prot_hook(sk, false);
}
spin_unlock(&po->bind_lock);
synchronize_net();
err = -EBUSY;
mutex_lock(&po->pg_vec_lock);
if (closing || atomic_read(&po->mapped) == 0) {
err = 0;
spin_lock_bh(&rb_queue->lock);
swap(rb->pg_vec, pg_vec);
rb->frame_max = (req->tp_frame_nr - 1);
rb->head = 0;
rb->frame_size = req->tp_frame_size;
spin_unlock_bh(&rb_queue->lock);
swap(rb->pg_vec_order, order);
swap(rb->pg_vec_len, req->tp_block_nr);
rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
po->prot_hook.func = (po->rx_ring.pg_vec) ?
tpacket_rcv : packet_rcv;
skb_queue_purge(rb_queue);
if (atomic_read(&po->mapped))
pr_err("packet_mmap: vma is busy: %d\n",
atomic_read(&po->mapped));
}
mutex_unlock(&po->pg_vec_lock);
spin_lock(&po->bind_lock);
if (was_running) {
po->num = num;
register_prot_hook(sk);
}
spin_unlock(&po->bind_lock);
if (closing && (po->tp_version > TPACKET_V2)) {
/* Because we don't support block-based V3 on tx-ring */
if (!tx_ring)
prb_shutdown_retire_blk_timer(po, tx_ring, rb_queue);
}
release_sock(sk);
if (pg_vec)
free_pg_vec(pg_vec, order, req->tp_block_nr);
out:
return err;
}
static int packet_mmap(struct file *file, struct socket *sock,
struct vm_area_struct *vma)
{
struct sock *sk = sock->sk;
struct packet_sock *po = pkt_sk(sk);
unsigned long size, expected_size;
struct packet_ring_buffer *rb;
unsigned long start;
int err = -EINVAL;
int i;
if (vma->vm_pgoff)
return -EINVAL;
mutex_lock(&po->pg_vec_lock);
expected_size = 0;
for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
if (rb->pg_vec) {
expected_size += rb->pg_vec_len
* rb->pg_vec_pages
* PAGE_SIZE;
}
}
if (expected_size == 0)
goto out;
size = vma->vm_end - vma->vm_start;
if (size != expected_size)
goto out;
start = vma->vm_start;
for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
if (rb->pg_vec == NULL)
continue;
for (i = 0; i < rb->pg_vec_len; i++) {
struct page *page;
void *kaddr = rb->pg_vec[i].buffer;
int pg_num;
for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) {
page = pgv_to_page(kaddr);
err = vm_insert_page(vma, start, page);
if (unlikely(err))
goto out;
start += PAGE_SIZE;
kaddr += PAGE_SIZE;
}
}
}
atomic_inc(&po->mapped);
vma->vm_ops = &packet_mmap_ops;
err = 0;
out:
mutex_unlock(&po->pg_vec_lock);
return err;
}
static const struct proto_ops packet_ops_spkt = {
.family = PF_PACKET,
.owner = THIS_MODULE,
.release = packet_release,
.bind = packet_bind_spkt,
.connect = sock_no_connect,
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.getname = packet_getname_spkt,
.poll = datagram_poll,
.ioctl = packet_ioctl,
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
.setsockopt = sock_no_setsockopt,
.getsockopt = sock_no_getsockopt,
.sendmsg = packet_sendmsg_spkt,
.recvmsg = packet_recvmsg,
.mmap = sock_no_mmap,
.sendpage = sock_no_sendpage,
};
static const struct proto_ops packet_ops = {
.family = PF_PACKET,
.owner = THIS_MODULE,
.release = packet_release,
.bind = packet_bind,
.connect = sock_no_connect,
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.getname = packet_getname,
.poll = packet_poll,
.ioctl = packet_ioctl,
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
.setsockopt = packet_setsockopt,
.getsockopt = packet_getsockopt,
.sendmsg = packet_sendmsg,
.recvmsg = packet_recvmsg,
.mmap = packet_mmap,
.sendpage = sock_no_sendpage,
};
static const struct net_proto_family packet_family_ops = {
.family = PF_PACKET,
.create = packet_create,
.owner = THIS_MODULE,
};
static struct notifier_block packet_netdev_notifier = {
.notifier_call = packet_notifier,
};
#ifdef CONFIG_PROC_FS
static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(RCU)
{
struct net *net = seq_file_net(seq);
rcu_read_lock();
return seq_hlist_start_head_rcu(&net->packet.sklist, *pos);
}
static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct net *net = seq_file_net(seq);
return seq_hlist_next_rcu(v, &net->packet.sklist, pos);
}
static void packet_seq_stop(struct seq_file *seq, void *v)
__releases(RCU)
{
rcu_read_unlock();
}
static int packet_seq_show(struct seq_file *seq, void *v)
{
if (v == SEQ_START_TOKEN)
seq_puts(seq, "sk RefCnt Type Proto Iface R Rmem User Inode\n");
else {
struct sock *s = sk_entry(v);
const struct packet_sock *po = pkt_sk(s);
seq_printf(seq,
"%pK %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
s,
atomic_read(&s->sk_refcnt),
s->sk_type,
ntohs(po->num),
po->ifindex,
po->running,
atomic_read(&s->sk_rmem_alloc),
from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)),
sock_i_ino(s));
}
return 0;
}
static const struct seq_operations packet_seq_ops = {
.start = packet_seq_start,
.next = packet_seq_next,
.stop = packet_seq_stop,
.show = packet_seq_show,
};
static int packet_seq_open(struct inode *inode, struct file *file)
{
return seq_open_net(inode, file, &packet_seq_ops,
sizeof(struct seq_net_private));
}
static const struct file_operations packet_seq_fops = {
.owner = THIS_MODULE,
.open = packet_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_net,
};
#endif
static int __net_init packet_net_init(struct net *net)
{
mutex_init(&net->packet.sklist_lock);
INIT_HLIST_HEAD(&net->packet.sklist);
if (!proc_create("packet", 0, net->proc_net, &packet_seq_fops))
return -ENOMEM;
return 0;
}
static void __net_exit packet_net_exit(struct net *net)
{
remove_proc_entry("packet", net->proc_net);
}
static struct pernet_operations packet_net_ops = {
.init = packet_net_init,
.exit = packet_net_exit,
};
static void __exit packet_exit(void)
{
unregister_netdevice_notifier(&packet_netdev_notifier);
unregister_pernet_subsys(&packet_net_ops);
sock_unregister(PF_PACKET);
proto_unregister(&packet_proto);
}
static int __init packet_init(void)
{
int rc = proto_register(&packet_proto, 0);
if (rc != 0)
goto out;
sock_register(&packet_family_ops);
register_pernet_subsys(&packet_net_ops);
register_netdevice_notifier(&packet_netdev_notifier);
out:
return rc;
}
module_init(packet_init);
module_exit(packet_exit);
MODULE_LICENSE("GPL");
MODULE_ALIAS_NETPROTO(PF_PACKET);
| ./CrossVul/dataset_final_sorted/CWE-20/c/bad_5845_25 |
crossvul-cpp_data_good_5732_0 | #undef DEBUG
/*
* ARM performance counter support.
*
* Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
* Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com>
*
* This code is based on the sparc64 perf event code, which is in turn based
* on the x86 code. Callchain code is based on the ARM OProfile backtrace
* code.
*/
#define pr_fmt(fmt) "hw perfevents: " fmt
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/uaccess.h>
#include <asm/irq_regs.h>
#include <asm/pmu.h>
#include <asm/stacktrace.h>
static int
armpmu_map_cache_event(const unsigned (*cache_map)
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX],
u64 config)
{
unsigned int cache_type, cache_op, cache_result, ret;
cache_type = (config >> 0) & 0xff;
if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
return -EINVAL;
cache_op = (config >> 8) & 0xff;
if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
return -EINVAL;
cache_result = (config >> 16) & 0xff;
if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
return -EINVAL;
ret = (int)(*cache_map)[cache_type][cache_op][cache_result];
if (ret == CACHE_OP_UNSUPPORTED)
return -ENOENT;
return ret;
}
static int
armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
{
int mapping = (*event_map)[config];
return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
}
static int
armpmu_map_raw_event(u32 raw_event_mask, u64 config)
{
return (int)(config & raw_event_mask);
}
int
armpmu_map_event(struct perf_event *event,
const unsigned (*event_map)[PERF_COUNT_HW_MAX],
const unsigned (*cache_map)
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX],
u32 raw_event_mask)
{
u64 config = event->attr.config;
switch (event->attr.type) {
case PERF_TYPE_HARDWARE:
return armpmu_map_hw_event(event_map, config);
case PERF_TYPE_HW_CACHE:
return armpmu_map_cache_event(cache_map, config);
case PERF_TYPE_RAW:
return armpmu_map_raw_event(raw_event_mask, config);
}
return -ENOENT;
}
int armpmu_event_set_period(struct perf_event *event)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
s64 left = local64_read(&hwc->period_left);
s64 period = hwc->sample_period;
int ret = 0;
/* The period may have been changed by PERF_EVENT_IOC_PERIOD */
if (unlikely(period != hwc->last_period))
left = period - (hwc->last_period - left);
if (unlikely(left <= -period)) {
left = period;
local64_set(&hwc->period_left, left);
hwc->last_period = period;
ret = 1;
}
if (unlikely(left <= 0)) {
left += period;
local64_set(&hwc->period_left, left);
hwc->last_period = period;
ret = 1;
}
if (left > (s64)armpmu->max_period)
left = armpmu->max_period;
local64_set(&hwc->prev_count, (u64)-left);
armpmu->write_counter(event, (u64)(-left) & 0xffffffff);
perf_event_update_userpage(event);
return ret;
}
u64 armpmu_event_update(struct perf_event *event)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
u64 delta, prev_raw_count, new_raw_count;
again:
prev_raw_count = local64_read(&hwc->prev_count);
new_raw_count = armpmu->read_counter(event);
if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
new_raw_count) != prev_raw_count)
goto again;
delta = (new_raw_count - prev_raw_count) & armpmu->max_period;
local64_add(delta, &event->count);
local64_sub(delta, &hwc->period_left);
return new_raw_count;
}
static void
armpmu_read(struct perf_event *event)
{
armpmu_event_update(event);
}
static void
armpmu_stop(struct perf_event *event, int flags)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
/*
* ARM pmu always has to update the counter, so ignore
* PERF_EF_UPDATE, see comments in armpmu_start().
*/
if (!(hwc->state & PERF_HES_STOPPED)) {
armpmu->disable(event);
armpmu_event_update(event);
hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
}
}
static void armpmu_start(struct perf_event *event, int flags)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
/*
* ARM pmu always has to reprogram the period, so ignore
* PERF_EF_RELOAD, see the comment below.
*/
if (flags & PERF_EF_RELOAD)
WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
hwc->state = 0;
/*
* Set the period again. Some counters can't be stopped, so when we
* were stopped we simply disabled the IRQ source and the counter
* may have been left counting. If we don't do this step then we may
* get an interrupt too soon or *way* too late if the overflow has
* happened since disabling.
*/
armpmu_event_set_period(event);
armpmu->enable(event);
}
static void
armpmu_del(struct perf_event *event, int flags)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct pmu_hw_events *hw_events = armpmu->get_hw_events();
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
armpmu_stop(event, PERF_EF_UPDATE);
hw_events->events[idx] = NULL;
clear_bit(idx, hw_events->used_mask);
perf_event_update_userpage(event);
}
static int
armpmu_add(struct perf_event *event, int flags)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct pmu_hw_events *hw_events = armpmu->get_hw_events();
struct hw_perf_event *hwc = &event->hw;
int idx;
int err = 0;
perf_pmu_disable(event->pmu);
/* If we don't have a space for the counter then finish early. */
idx = armpmu->get_event_idx(hw_events, event);
if (idx < 0) {
err = idx;
goto out;
}
/*
* If there is an event in the counter we are going to use then make
* sure it is disabled.
*/
event->hw.idx = idx;
armpmu->disable(event);
hw_events->events[idx] = event;
hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
if (flags & PERF_EF_START)
armpmu_start(event, PERF_EF_RELOAD);
/* Propagate our changes to the userspace mapping. */
perf_event_update_userpage(event);
out:
perf_pmu_enable(event->pmu);
return err;
}
static int
validate_event(struct pmu_hw_events *hw_events,
struct perf_event *event)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct pmu *leader_pmu = event->group_leader->pmu;
if (is_software_event(event))
return 1;
if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF)
return 1;
if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
return 1;
return armpmu->get_event_idx(hw_events, event) >= 0;
}
static int
validate_group(struct perf_event *event)
{
struct perf_event *sibling, *leader = event->group_leader;
struct pmu_hw_events fake_pmu;
DECLARE_BITMAP(fake_used_mask, ARMPMU_MAX_HWEVENTS);
/*
* Initialise the fake PMU. We only need to populate the
* used_mask for the purposes of validation.
*/
memset(fake_used_mask, 0, sizeof(fake_used_mask));
fake_pmu.used_mask = fake_used_mask;
if (!validate_event(&fake_pmu, leader))
return -EINVAL;
list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
if (!validate_event(&fake_pmu, sibling))
return -EINVAL;
}
if (!validate_event(&fake_pmu, event))
return -EINVAL;
return 0;
}
static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
{
struct arm_pmu *armpmu = (struct arm_pmu *) dev;
struct platform_device *plat_device = armpmu->plat_device;
struct arm_pmu_platdata *plat = dev_get_platdata(&plat_device->dev);
if (plat && plat->handle_irq)
return plat->handle_irq(irq, dev, armpmu->handle_irq);
else
return armpmu->handle_irq(irq, dev);
}
static void
armpmu_release_hardware(struct arm_pmu *armpmu)
{
armpmu->free_irq(armpmu);
pm_runtime_put_sync(&armpmu->plat_device->dev);
}
static int
armpmu_reserve_hardware(struct arm_pmu *armpmu)
{
int err;
struct platform_device *pmu_device = armpmu->plat_device;
if (!pmu_device)
return -ENODEV;
pm_runtime_get_sync(&pmu_device->dev);
err = armpmu->request_irq(armpmu, armpmu_dispatch_irq);
if (err) {
armpmu_release_hardware(armpmu);
return err;
}
return 0;
}
static void
hw_perf_event_destroy(struct perf_event *event)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
atomic_t *active_events = &armpmu->active_events;
struct mutex *pmu_reserve_mutex = &armpmu->reserve_mutex;
if (atomic_dec_and_mutex_lock(active_events, pmu_reserve_mutex)) {
armpmu_release_hardware(armpmu);
mutex_unlock(pmu_reserve_mutex);
}
}
static int
event_requires_mode_exclusion(struct perf_event_attr *attr)
{
return attr->exclude_idle || attr->exclude_user ||
attr->exclude_kernel || attr->exclude_hv;
}
static int
__hw_perf_event_init(struct perf_event *event)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
int mapping;
mapping = armpmu->map_event(event);
if (mapping < 0) {
pr_debug("event %x:%llx not supported\n", event->attr.type,
event->attr.config);
return mapping;
}
/*
* We don't assign an index until we actually place the event onto
* hardware. Use -1 to signify that we haven't decided where to put it
* yet. For SMP systems, each core has it's own PMU so we can't do any
* clever allocation or constraints checking at this point.
*/
hwc->idx = -1;
hwc->config_base = 0;
hwc->config = 0;
hwc->event_base = 0;
/*
* Check whether we need to exclude the counter from certain modes.
*/
if ((!armpmu->set_event_filter ||
armpmu->set_event_filter(hwc, &event->attr)) &&
event_requires_mode_exclusion(&event->attr)) {
pr_debug("ARM performance counters do not support "
"mode exclusion\n");
return -EOPNOTSUPP;
}
/*
* Store the event encoding into the config_base field.
*/
hwc->config_base |= (unsigned long)mapping;
if (!hwc->sample_period) {
/*
* For non-sampling runs, limit the sample_period to half
* of the counter width. That way, the new counter value
* is far less likely to overtake the previous one unless
* you have some serious IRQ latency issues.
*/
hwc->sample_period = armpmu->max_period >> 1;
hwc->last_period = hwc->sample_period;
local64_set(&hwc->period_left, hwc->sample_period);
}
if (event->group_leader != event) {
if (validate_group(event) != 0)
return -EINVAL;
}
return 0;
}
static int armpmu_event_init(struct perf_event *event)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
int err = 0;
atomic_t *active_events = &armpmu->active_events;
/* does not support taken branch sampling */
if (has_branch_stack(event))
return -EOPNOTSUPP;
if (armpmu->map_event(event) == -ENOENT)
return -ENOENT;
event->destroy = hw_perf_event_destroy;
if (!atomic_inc_not_zero(active_events)) {
mutex_lock(&armpmu->reserve_mutex);
if (atomic_read(active_events) == 0)
err = armpmu_reserve_hardware(armpmu);
if (!err)
atomic_inc(active_events);
mutex_unlock(&armpmu->reserve_mutex);
}
if (err)
return err;
err = __hw_perf_event_init(event);
if (err)
hw_perf_event_destroy(event);
return err;
}
static void armpmu_enable(struct pmu *pmu)
{
struct arm_pmu *armpmu = to_arm_pmu(pmu);
struct pmu_hw_events *hw_events = armpmu->get_hw_events();
int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
if (enabled)
armpmu->start(armpmu);
}
static void armpmu_disable(struct pmu *pmu)
{
struct arm_pmu *armpmu = to_arm_pmu(pmu);
armpmu->stop(armpmu);
}
#ifdef CONFIG_PM_RUNTIME
static int armpmu_runtime_resume(struct device *dev)
{
struct arm_pmu_platdata *plat = dev_get_platdata(dev);
if (plat && plat->runtime_resume)
return plat->runtime_resume(dev);
return 0;
}
static int armpmu_runtime_suspend(struct device *dev)
{
struct arm_pmu_platdata *plat = dev_get_platdata(dev);
if (plat && plat->runtime_suspend)
return plat->runtime_suspend(dev);
return 0;
}
#endif
const struct dev_pm_ops armpmu_dev_pm_ops = {
SET_RUNTIME_PM_OPS(armpmu_runtime_suspend, armpmu_runtime_resume, NULL)
};
static void armpmu_init(struct arm_pmu *armpmu)
{
atomic_set(&armpmu->active_events, 0);
mutex_init(&armpmu->reserve_mutex);
armpmu->pmu = (struct pmu) {
.pmu_enable = armpmu_enable,
.pmu_disable = armpmu_disable,
.event_init = armpmu_event_init,
.add = armpmu_add,
.del = armpmu_del,
.start = armpmu_start,
.stop = armpmu_stop,
.read = armpmu_read,
};
}
int armpmu_register(struct arm_pmu *armpmu, int type)
{
armpmu_init(armpmu);
pm_runtime_enable(&armpmu->plat_device->dev);
pr_info("enabled with %s PMU driver, %d counters available\n",
armpmu->name, armpmu->num_events);
return perf_pmu_register(&armpmu->pmu, armpmu->name, type);
}
/*
* Callchain handling code.
*/
/*
* The registers we're interested in are at the end of the variable
* length saved register structure. The fp points at the end of this
* structure so the address of this struct is:
* (struct frame_tail *)(xxx->fp)-1
*
* This code has been adapted from the ARM OProfile support.
*/
struct frame_tail {
struct frame_tail __user *fp;
unsigned long sp;
unsigned long lr;
} __attribute__((packed));
/*
* Get the return address for a single stackframe and return a pointer to the
* next frame tail.
*/
static struct frame_tail __user *
user_backtrace(struct frame_tail __user *tail,
struct perf_callchain_entry *entry)
{
struct frame_tail buftail;
/* Also check accessibility of one struct frame_tail beyond */
if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
return NULL;
if (__copy_from_user_inatomic(&buftail, tail, sizeof(buftail)))
return NULL;
perf_callchain_store(entry, buftail.lr);
/*
* Frame pointers should strictly progress back up the stack
* (towards higher addresses).
*/
if (tail + 1 >= buftail.fp)
return NULL;
return buftail.fp - 1;
}
void
perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
{
struct frame_tail __user *tail;
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
/* We don't support guest os callchain now */
return;
}
perf_callchain_store(entry, regs->ARM_pc);
tail = (struct frame_tail __user *)regs->ARM_fp - 1;
while ((entry->nr < PERF_MAX_STACK_DEPTH) &&
tail && !((unsigned long)tail & 0x3))
tail = user_backtrace(tail, entry);
}
/*
* Gets called by walk_stackframe() for every stackframe. This will be called
* whist unwinding the stackframe and is like a subroutine return so we use
* the PC.
*/
static int
callchain_trace(struct stackframe *fr,
void *data)
{
struct perf_callchain_entry *entry = data;
perf_callchain_store(entry, fr->pc);
return 0;
}
void
perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
{
struct stackframe fr;
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
/* We don't support guest os callchain now */
return;
}
fr.fp = regs->ARM_fp;
fr.sp = regs->ARM_sp;
fr.lr = regs->ARM_lr;
fr.pc = regs->ARM_pc;
walk_stackframe(&fr, callchain_trace, entry);
}
unsigned long perf_instruction_pointer(struct pt_regs *regs)
{
if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
return perf_guest_cbs->get_guest_ip();
return instruction_pointer(regs);
}
unsigned long perf_misc_flags(struct pt_regs *regs)
{
int misc = 0;
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
if (perf_guest_cbs->is_user_mode())
misc |= PERF_RECORD_MISC_GUEST_USER;
else
misc |= PERF_RECORD_MISC_GUEST_KERNEL;
} else {
if (user_mode(regs))
misc |= PERF_RECORD_MISC_USER;
else
misc |= PERF_RECORD_MISC_KERNEL;
}
return misc;
}
| ./CrossVul/dataset_final_sorted/CWE-20/c/good_5732_0 |
crossvul-cpp_data_good_5845_22 | /*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* Copyright Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
* Copyright Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk)
* Copyright Darryl Miles G7LED (dlm@g7led.demon.co.uk)
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/stat.h>
#include <net/ax25.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/skbuff.h>
#include <net/net_namespace.h>
#include <net/sock.h>
#include <asm/uaccess.h>
#include <linux/fcntl.h>
#include <linux/termios.h> /* For TIOCINQ/OUTQ */
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/notifier.h>
#include <net/netrom.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <net/ip.h>
#include <net/tcp_states.h>
#include <net/arp.h>
#include <linux/init.h>
static int nr_ndevs = 4;
int sysctl_netrom_default_path_quality = NR_DEFAULT_QUAL;
int sysctl_netrom_obsolescence_count_initialiser = NR_DEFAULT_OBS;
int sysctl_netrom_network_ttl_initialiser = NR_DEFAULT_TTL;
int sysctl_netrom_transport_timeout = NR_DEFAULT_T1;
int sysctl_netrom_transport_maximum_tries = NR_DEFAULT_N2;
int sysctl_netrom_transport_acknowledge_delay = NR_DEFAULT_T2;
int sysctl_netrom_transport_busy_delay = NR_DEFAULT_T4;
int sysctl_netrom_transport_requested_window_size = NR_DEFAULT_WINDOW;
int sysctl_netrom_transport_no_activity_timeout = NR_DEFAULT_IDLE;
int sysctl_netrom_routing_control = NR_DEFAULT_ROUTING;
int sysctl_netrom_link_fails_count = NR_DEFAULT_FAILS;
int sysctl_netrom_reset_circuit = NR_DEFAULT_RESET;
static unsigned short circuit = 0x101;
static HLIST_HEAD(nr_list);
static DEFINE_SPINLOCK(nr_list_lock);
static const struct proto_ops nr_proto_ops;
/*
* NETROM network devices are virtual network devices encapsulating NETROM
* frames into AX.25 which will be sent through an AX.25 device, so form a
* special "super class" of normal net devices; split their locks off into a
* separate class since they always nest.
*/
static struct lock_class_key nr_netdev_xmit_lock_key;
static struct lock_class_key nr_netdev_addr_lock_key;
static void nr_set_lockdep_one(struct net_device *dev,
struct netdev_queue *txq,
void *_unused)
{
lockdep_set_class(&txq->_xmit_lock, &nr_netdev_xmit_lock_key);
}
static void nr_set_lockdep_key(struct net_device *dev)
{
lockdep_set_class(&dev->addr_list_lock, &nr_netdev_addr_lock_key);
netdev_for_each_tx_queue(dev, nr_set_lockdep_one, NULL);
}
/*
* Socket removal during an interrupt is now safe.
*/
static void nr_remove_socket(struct sock *sk)
{
spin_lock_bh(&nr_list_lock);
sk_del_node_init(sk);
spin_unlock_bh(&nr_list_lock);
}
/*
* Kill all bound sockets on a dropped device.
*/
static void nr_kill_by_device(struct net_device *dev)
{
struct sock *s;
spin_lock_bh(&nr_list_lock);
sk_for_each(s, &nr_list)
if (nr_sk(s)->device == dev)
nr_disconnect(s, ENETUNREACH);
spin_unlock_bh(&nr_list_lock);
}
/*
* Handle device status changes.
*/
static int nr_device_event(struct notifier_block *this, unsigned long event, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
if (!net_eq(dev_net(dev), &init_net))
return NOTIFY_DONE;
if (event != NETDEV_DOWN)
return NOTIFY_DONE;
nr_kill_by_device(dev);
nr_rt_device_down(dev);
return NOTIFY_DONE;
}
/*
* Add a socket to the bound sockets list.
*/
static void nr_insert_socket(struct sock *sk)
{
spin_lock_bh(&nr_list_lock);
sk_add_node(sk, &nr_list);
spin_unlock_bh(&nr_list_lock);
}
/*
* Find a socket that wants to accept the Connect Request we just
* received.
*/
static struct sock *nr_find_listener(ax25_address *addr)
{
struct sock *s;
spin_lock_bh(&nr_list_lock);
sk_for_each(s, &nr_list)
if (!ax25cmp(&nr_sk(s)->source_addr, addr) &&
s->sk_state == TCP_LISTEN) {
bh_lock_sock(s);
goto found;
}
s = NULL;
found:
spin_unlock_bh(&nr_list_lock);
return s;
}
/*
* Find a connected NET/ROM socket given my circuit IDs.
*/
static struct sock *nr_find_socket(unsigned char index, unsigned char id)
{
struct sock *s;
spin_lock_bh(&nr_list_lock);
sk_for_each(s, &nr_list) {
struct nr_sock *nr = nr_sk(s);
if (nr->my_index == index && nr->my_id == id) {
bh_lock_sock(s);
goto found;
}
}
s = NULL;
found:
spin_unlock_bh(&nr_list_lock);
return s;
}
/*
* Find a connected NET/ROM socket given their circuit IDs.
*/
static struct sock *nr_find_peer(unsigned char index, unsigned char id,
ax25_address *dest)
{
struct sock *s;
spin_lock_bh(&nr_list_lock);
sk_for_each(s, &nr_list) {
struct nr_sock *nr = nr_sk(s);
if (nr->your_index == index && nr->your_id == id &&
!ax25cmp(&nr->dest_addr, dest)) {
bh_lock_sock(s);
goto found;
}
}
s = NULL;
found:
spin_unlock_bh(&nr_list_lock);
return s;
}
/*
* Find next free circuit ID.
*/
static unsigned short nr_find_next_circuit(void)
{
unsigned short id = circuit;
unsigned char i, j;
struct sock *sk;
for (;;) {
i = id / 256;
j = id % 256;
if (i != 0 && j != 0) {
if ((sk=nr_find_socket(i, j)) == NULL)
break;
bh_unlock_sock(sk);
}
id++;
}
return id;
}
/*
* Deferred destroy.
*/
void nr_destroy_socket(struct sock *);
/*
* Handler for deferred kills.
*/
static void nr_destroy_timer(unsigned long data)
{
struct sock *sk=(struct sock *)data;
bh_lock_sock(sk);
sock_hold(sk);
nr_destroy_socket(sk);
bh_unlock_sock(sk);
sock_put(sk);
}
/*
* This is called from user mode and the timers. Thus it protects itself
* against interrupt users but doesn't worry about being called during
* work. Once it is removed from the queue no interrupt or bottom half
* will touch it and we are (fairly 8-) ) safe.
*/
void nr_destroy_socket(struct sock *sk)
{
struct sk_buff *skb;
nr_remove_socket(sk);
nr_stop_heartbeat(sk);
nr_stop_t1timer(sk);
nr_stop_t2timer(sk);
nr_stop_t4timer(sk);
nr_stop_idletimer(sk);
nr_clear_queues(sk); /* Flush the queues */
while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
if (skb->sk != sk) { /* A pending connection */
/* Queue the unaccepted socket for death */
sock_set_flag(skb->sk, SOCK_DEAD);
nr_start_heartbeat(skb->sk);
nr_sk(skb->sk)->state = NR_STATE_0;
}
kfree_skb(skb);
}
if (sk_has_allocations(sk)) {
/* Defer: outstanding buffers */
sk->sk_timer.function = nr_destroy_timer;
sk->sk_timer.expires = jiffies + 2 * HZ;
add_timer(&sk->sk_timer);
} else
sock_put(sk);
}
/*
* Handling for system calls applied via the various interfaces to a
* NET/ROM socket object.
*/
static int nr_setsockopt(struct socket *sock, int level, int optname,
char __user *optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
struct nr_sock *nr = nr_sk(sk);
unsigned long opt;
if (level != SOL_NETROM)
return -ENOPROTOOPT;
if (optlen < sizeof(unsigned int))
return -EINVAL;
if (get_user(opt, (unsigned int __user *)optval))
return -EFAULT;
switch (optname) {
case NETROM_T1:
if (opt < 1 || opt > ULONG_MAX / HZ)
return -EINVAL;
nr->t1 = opt * HZ;
return 0;
case NETROM_T2:
if (opt < 1 || opt > ULONG_MAX / HZ)
return -EINVAL;
nr->t2 = opt * HZ;
return 0;
case NETROM_N2:
if (opt < 1 || opt > 31)
return -EINVAL;
nr->n2 = opt;
return 0;
case NETROM_T4:
if (opt < 1 || opt > ULONG_MAX / HZ)
return -EINVAL;
nr->t4 = opt * HZ;
return 0;
case NETROM_IDLE:
if (opt > ULONG_MAX / (60 * HZ))
return -EINVAL;
nr->idle = opt * 60 * HZ;
return 0;
default:
return -ENOPROTOOPT;
}
}
static int nr_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen)
{
struct sock *sk = sock->sk;
struct nr_sock *nr = nr_sk(sk);
int val = 0;
int len;
if (level != SOL_NETROM)
return -ENOPROTOOPT;
if (get_user(len, optlen))
return -EFAULT;
if (len < 0)
return -EINVAL;
switch (optname) {
case NETROM_T1:
val = nr->t1 / HZ;
break;
case NETROM_T2:
val = nr->t2 / HZ;
break;
case NETROM_N2:
val = nr->n2;
break;
case NETROM_T4:
val = nr->t4 / HZ;
break;
case NETROM_IDLE:
val = nr->idle / (60 * HZ);
break;
default:
return -ENOPROTOOPT;
}
len = min_t(unsigned int, len, sizeof(int));
if (put_user(len, optlen))
return -EFAULT;
return copy_to_user(optval, &val, len) ? -EFAULT : 0;
}
static int nr_listen(struct socket *sock, int backlog)
{
struct sock *sk = sock->sk;
lock_sock(sk);
if (sk->sk_state != TCP_LISTEN) {
memset(&nr_sk(sk)->user_addr, 0, AX25_ADDR_LEN);
sk->sk_max_ack_backlog = backlog;
sk->sk_state = TCP_LISTEN;
release_sock(sk);
return 0;
}
release_sock(sk);
return -EOPNOTSUPP;
}
static struct proto nr_proto = {
.name = "NETROM",
.owner = THIS_MODULE,
.obj_size = sizeof(struct nr_sock),
};
static int nr_create(struct net *net, struct socket *sock, int protocol,
int kern)
{
struct sock *sk;
struct nr_sock *nr;
if (!net_eq(net, &init_net))
return -EAFNOSUPPORT;
if (sock->type != SOCK_SEQPACKET || protocol != 0)
return -ESOCKTNOSUPPORT;
sk = sk_alloc(net, PF_NETROM, GFP_ATOMIC, &nr_proto);
if (sk == NULL)
return -ENOMEM;
nr = nr_sk(sk);
sock_init_data(sock, sk);
sock->ops = &nr_proto_ops;
sk->sk_protocol = protocol;
skb_queue_head_init(&nr->ack_queue);
skb_queue_head_init(&nr->reseq_queue);
skb_queue_head_init(&nr->frag_queue);
nr_init_timers(sk);
nr->t1 =
msecs_to_jiffies(sysctl_netrom_transport_timeout);
nr->t2 =
msecs_to_jiffies(sysctl_netrom_transport_acknowledge_delay);
nr->n2 =
msecs_to_jiffies(sysctl_netrom_transport_maximum_tries);
nr->t4 =
msecs_to_jiffies(sysctl_netrom_transport_busy_delay);
nr->idle =
msecs_to_jiffies(sysctl_netrom_transport_no_activity_timeout);
nr->window = sysctl_netrom_transport_requested_window_size;
nr->bpqext = 1;
nr->state = NR_STATE_0;
return 0;
}
static struct sock *nr_make_new(struct sock *osk)
{
struct sock *sk;
struct nr_sock *nr, *onr;
if (osk->sk_type != SOCK_SEQPACKET)
return NULL;
sk = sk_alloc(sock_net(osk), PF_NETROM, GFP_ATOMIC, osk->sk_prot);
if (sk == NULL)
return NULL;
nr = nr_sk(sk);
sock_init_data(NULL, sk);
sk->sk_type = osk->sk_type;
sk->sk_priority = osk->sk_priority;
sk->sk_protocol = osk->sk_protocol;
sk->sk_rcvbuf = osk->sk_rcvbuf;
sk->sk_sndbuf = osk->sk_sndbuf;
sk->sk_state = TCP_ESTABLISHED;
sock_copy_flags(sk, osk);
skb_queue_head_init(&nr->ack_queue);
skb_queue_head_init(&nr->reseq_queue);
skb_queue_head_init(&nr->frag_queue);
nr_init_timers(sk);
onr = nr_sk(osk);
nr->t1 = onr->t1;
nr->t2 = onr->t2;
nr->n2 = onr->n2;
nr->t4 = onr->t4;
nr->idle = onr->idle;
nr->window = onr->window;
nr->device = onr->device;
nr->bpqext = onr->bpqext;
return sk;
}
static int nr_release(struct socket *sock)
{
struct sock *sk = sock->sk;
struct nr_sock *nr;
if (sk == NULL) return 0;
sock_hold(sk);
sock_orphan(sk);
lock_sock(sk);
nr = nr_sk(sk);
switch (nr->state) {
case NR_STATE_0:
case NR_STATE_1:
case NR_STATE_2:
nr_disconnect(sk, 0);
nr_destroy_socket(sk);
break;
case NR_STATE_3:
nr_clear_queues(sk);
nr->n2count = 0;
nr_write_internal(sk, NR_DISCREQ);
nr_start_t1timer(sk);
nr_stop_t2timer(sk);
nr_stop_t4timer(sk);
nr_stop_idletimer(sk);
nr->state = NR_STATE_2;
sk->sk_state = TCP_CLOSE;
sk->sk_shutdown |= SEND_SHUTDOWN;
sk->sk_state_change(sk);
sock_set_flag(sk, SOCK_DESTROY);
break;
default:
break;
}
sock->sk = NULL;
release_sock(sk);
sock_put(sk);
return 0;
}
static int nr_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
{
struct sock *sk = sock->sk;
struct nr_sock *nr = nr_sk(sk);
struct full_sockaddr_ax25 *addr = (struct full_sockaddr_ax25 *)uaddr;
struct net_device *dev;
ax25_uid_assoc *user;
ax25_address *source;
lock_sock(sk);
if (!sock_flag(sk, SOCK_ZAPPED)) {
release_sock(sk);
return -EINVAL;
}
if (addr_len < sizeof(struct sockaddr_ax25) || addr_len > sizeof(struct full_sockaddr_ax25)) {
release_sock(sk);
return -EINVAL;
}
if (addr_len < (addr->fsa_ax25.sax25_ndigis * sizeof(ax25_address) + sizeof(struct sockaddr_ax25))) {
release_sock(sk);
return -EINVAL;
}
if (addr->fsa_ax25.sax25_family != AF_NETROM) {
release_sock(sk);
return -EINVAL;
}
if ((dev = nr_dev_get(&addr->fsa_ax25.sax25_call)) == NULL) {
release_sock(sk);
return -EADDRNOTAVAIL;
}
/*
* Only the super user can set an arbitrary user callsign.
*/
if (addr->fsa_ax25.sax25_ndigis == 1) {
if (!capable(CAP_NET_BIND_SERVICE)) {
dev_put(dev);
release_sock(sk);
return -EPERM;
}
nr->user_addr = addr->fsa_digipeater[0];
nr->source_addr = addr->fsa_ax25.sax25_call;
} else {
source = &addr->fsa_ax25.sax25_call;
user = ax25_findbyuid(current_euid());
if (user) {
nr->user_addr = user->call;
ax25_uid_put(user);
} else {
if (ax25_uid_policy && !capable(CAP_NET_BIND_SERVICE)) {
release_sock(sk);
dev_put(dev);
return -EPERM;
}
nr->user_addr = *source;
}
nr->source_addr = *source;
}
nr->device = dev;
nr_insert_socket(sk);
sock_reset_flag(sk, SOCK_ZAPPED);
dev_put(dev);
release_sock(sk);
return 0;
}
static int nr_connect(struct socket *sock, struct sockaddr *uaddr,
int addr_len, int flags)
{
struct sock *sk = sock->sk;
struct nr_sock *nr = nr_sk(sk);
struct sockaddr_ax25 *addr = (struct sockaddr_ax25 *)uaddr;
ax25_address *source = NULL;
ax25_uid_assoc *user;
struct net_device *dev;
int err = 0;
lock_sock(sk);
if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) {
sock->state = SS_CONNECTED;
goto out_release; /* Connect completed during a ERESTARTSYS event */
}
if (sk->sk_state == TCP_CLOSE && sock->state == SS_CONNECTING) {
sock->state = SS_UNCONNECTED;
err = -ECONNREFUSED;
goto out_release;
}
if (sk->sk_state == TCP_ESTABLISHED) {
err = -EISCONN; /* No reconnect on a seqpacket socket */
goto out_release;
}
sk->sk_state = TCP_CLOSE;
sock->state = SS_UNCONNECTED;
if (addr_len != sizeof(struct sockaddr_ax25) && addr_len != sizeof(struct full_sockaddr_ax25)) {
err = -EINVAL;
goto out_release;
}
if (addr->sax25_family != AF_NETROM) {
err = -EINVAL;
goto out_release;
}
if (sock_flag(sk, SOCK_ZAPPED)) { /* Must bind first - autobinding in this may or may not work */
sock_reset_flag(sk, SOCK_ZAPPED);
if ((dev = nr_dev_first()) == NULL) {
err = -ENETUNREACH;
goto out_release;
}
source = (ax25_address *)dev->dev_addr;
user = ax25_findbyuid(current_euid());
if (user) {
nr->user_addr = user->call;
ax25_uid_put(user);
} else {
if (ax25_uid_policy && !capable(CAP_NET_ADMIN)) {
dev_put(dev);
err = -EPERM;
goto out_release;
}
nr->user_addr = *source;
}
nr->source_addr = *source;
nr->device = dev;
dev_put(dev);
nr_insert_socket(sk); /* Finish the bind */
}
nr->dest_addr = addr->sax25_call;
release_sock(sk);
circuit = nr_find_next_circuit();
lock_sock(sk);
nr->my_index = circuit / 256;
nr->my_id = circuit % 256;
circuit++;
/* Move to connecting socket, start sending Connect Requests */
sock->state = SS_CONNECTING;
sk->sk_state = TCP_SYN_SENT;
nr_establish_data_link(sk);
nr->state = NR_STATE_1;
nr_start_heartbeat(sk);
/* Now the loop */
if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK)) {
err = -EINPROGRESS;
goto out_release;
}
/*
* A Connect Ack with Choke or timeout or failed routing will go to
* closed.
*/
if (sk->sk_state == TCP_SYN_SENT) {
DEFINE_WAIT(wait);
for (;;) {
prepare_to_wait(sk_sleep(sk), &wait,
TASK_INTERRUPTIBLE);
if (sk->sk_state != TCP_SYN_SENT)
break;
if (!signal_pending(current)) {
release_sock(sk);
schedule();
lock_sock(sk);
continue;
}
err = -ERESTARTSYS;
break;
}
finish_wait(sk_sleep(sk), &wait);
if (err)
goto out_release;
}
if (sk->sk_state != TCP_ESTABLISHED) {
sock->state = SS_UNCONNECTED;
err = sock_error(sk); /* Always set at this point */
goto out_release;
}
sock->state = SS_CONNECTED;
out_release:
release_sock(sk);
return err;
}
static int nr_accept(struct socket *sock, struct socket *newsock, int flags)
{
struct sk_buff *skb;
struct sock *newsk;
DEFINE_WAIT(wait);
struct sock *sk;
int err = 0;
if ((sk = sock->sk) == NULL)
return -EINVAL;
lock_sock(sk);
if (sk->sk_type != SOCK_SEQPACKET) {
err = -EOPNOTSUPP;
goto out_release;
}
if (sk->sk_state != TCP_LISTEN) {
err = -EINVAL;
goto out_release;
}
/*
* The write queue this time is holding sockets ready to use
* hooked into the SABM we saved
*/
for (;;) {
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
skb = skb_dequeue(&sk->sk_receive_queue);
if (skb)
break;
if (flags & O_NONBLOCK) {
err = -EWOULDBLOCK;
break;
}
if (!signal_pending(current)) {
release_sock(sk);
schedule();
lock_sock(sk);
continue;
}
err = -ERESTARTSYS;
break;
}
finish_wait(sk_sleep(sk), &wait);
if (err)
goto out_release;
newsk = skb->sk;
sock_graft(newsk, newsock);
/* Now attach up the new socket */
kfree_skb(skb);
sk_acceptq_removed(sk);
out_release:
release_sock(sk);
return err;
}
static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
int *uaddr_len, int peer)
{
struct full_sockaddr_ax25 *sax = (struct full_sockaddr_ax25 *)uaddr;
struct sock *sk = sock->sk;
struct nr_sock *nr = nr_sk(sk);
memset(&sax->fsa_ax25, 0, sizeof(struct sockaddr_ax25));
lock_sock(sk);
if (peer != 0) {
if (sk->sk_state != TCP_ESTABLISHED) {
release_sock(sk);
return -ENOTCONN;
}
sax->fsa_ax25.sax25_family = AF_NETROM;
sax->fsa_ax25.sax25_ndigis = 1;
sax->fsa_ax25.sax25_call = nr->user_addr;
memset(sax->fsa_digipeater, 0, sizeof(sax->fsa_digipeater));
sax->fsa_digipeater[0] = nr->dest_addr;
*uaddr_len = sizeof(struct full_sockaddr_ax25);
} else {
sax->fsa_ax25.sax25_family = AF_NETROM;
sax->fsa_ax25.sax25_ndigis = 0;
sax->fsa_ax25.sax25_call = nr->source_addr;
*uaddr_len = sizeof(struct sockaddr_ax25);
}
release_sock(sk);
return 0;
}
int nr_rx_frame(struct sk_buff *skb, struct net_device *dev)
{
struct sock *sk;
struct sock *make;
struct nr_sock *nr_make;
ax25_address *src, *dest, *user;
unsigned short circuit_index, circuit_id;
unsigned short peer_circuit_index, peer_circuit_id;
unsigned short frametype, flags, window, timeout;
int ret;
skb->sk = NULL; /* Initially we don't know who it's for */
/*
* skb->data points to the netrom frame start
*/
src = (ax25_address *)(skb->data + 0);
dest = (ax25_address *)(skb->data + 7);
circuit_index = skb->data[15];
circuit_id = skb->data[16];
peer_circuit_index = skb->data[17];
peer_circuit_id = skb->data[18];
frametype = skb->data[19] & 0x0F;
flags = skb->data[19] & 0xF0;
/*
* Check for an incoming IP over NET/ROM frame.
*/
if (frametype == NR_PROTOEXT &&
circuit_index == NR_PROTO_IP && circuit_id == NR_PROTO_IP) {
skb_pull(skb, NR_NETWORK_LEN + NR_TRANSPORT_LEN);
skb_reset_transport_header(skb);
return nr_rx_ip(skb, dev);
}
/*
* Find an existing socket connection, based on circuit ID, if it's
* a Connect Request base it on their circuit ID.
*
* Circuit ID 0/0 is not valid but it could still be a "reset" for a
* circuit that no longer exists at the other end ...
*/
sk = NULL;
if (circuit_index == 0 && circuit_id == 0) {
if (frametype == NR_CONNACK && flags == NR_CHOKE_FLAG)
sk = nr_find_peer(peer_circuit_index, peer_circuit_id, src);
} else {
if (frametype == NR_CONNREQ)
sk = nr_find_peer(circuit_index, circuit_id, src);
else
sk = nr_find_socket(circuit_index, circuit_id);
}
if (sk != NULL) {
skb_reset_transport_header(skb);
if (frametype == NR_CONNACK && skb->len == 22)
nr_sk(sk)->bpqext = 1;
else
nr_sk(sk)->bpqext = 0;
ret = nr_process_rx_frame(sk, skb);
bh_unlock_sock(sk);
return ret;
}
/*
* Now it should be a CONNREQ.
*/
if (frametype != NR_CONNREQ) {
/*
* Here it would be nice to be able to send a reset but
* NET/ROM doesn't have one. We've tried to extend the protocol
* by sending NR_CONNACK | NR_CHOKE_FLAGS replies but that
* apparently kills BPQ boxes... :-(
* So now we try to follow the established behaviour of
* G8PZT's Xrouter which is sending packets with command type 7
* as an extension of the protocol.
*/
if (sysctl_netrom_reset_circuit &&
(frametype != NR_RESET || flags != 0))
nr_transmit_reset(skb, 1);
return 0;
}
sk = nr_find_listener(dest);
user = (ax25_address *)(skb->data + 21);
if (sk == NULL || sk_acceptq_is_full(sk) ||
(make = nr_make_new(sk)) == NULL) {
nr_transmit_refusal(skb, 0);
if (sk)
bh_unlock_sock(sk);
return 0;
}
window = skb->data[20];
skb->sk = make;
make->sk_state = TCP_ESTABLISHED;
/* Fill in his circuit details */
nr_make = nr_sk(make);
nr_make->source_addr = *dest;
nr_make->dest_addr = *src;
nr_make->user_addr = *user;
nr_make->your_index = circuit_index;
nr_make->your_id = circuit_id;
bh_unlock_sock(sk);
circuit = nr_find_next_circuit();
bh_lock_sock(sk);
nr_make->my_index = circuit / 256;
nr_make->my_id = circuit % 256;
circuit++;
/* Window negotiation */
if (window < nr_make->window)
nr_make->window = window;
/* L4 timeout negotiation */
if (skb->len == 37) {
timeout = skb->data[36] * 256 + skb->data[35];
if (timeout * HZ < nr_make->t1)
nr_make->t1 = timeout * HZ;
nr_make->bpqext = 1;
} else {
nr_make->bpqext = 0;
}
nr_write_internal(make, NR_CONNACK);
nr_make->condition = 0x00;
nr_make->vs = 0;
nr_make->va = 0;
nr_make->vr = 0;
nr_make->vl = 0;
nr_make->state = NR_STATE_3;
sk_acceptq_added(sk);
skb_queue_head(&sk->sk_receive_queue, skb);
if (!sock_flag(sk, SOCK_DEAD))
sk->sk_data_ready(sk, skb->len);
bh_unlock_sock(sk);
nr_insert_socket(make);
nr_start_heartbeat(make);
nr_start_idletimer(make);
return 1;
}
static int nr_sendmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len)
{
struct sock *sk = sock->sk;
struct nr_sock *nr = nr_sk(sk);
struct sockaddr_ax25 *usax = (struct sockaddr_ax25 *)msg->msg_name;
int err;
struct sockaddr_ax25 sax;
struct sk_buff *skb;
unsigned char *asmptr;
int size;
if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_CMSG_COMPAT))
return -EINVAL;
lock_sock(sk);
if (sock_flag(sk, SOCK_ZAPPED)) {
err = -EADDRNOTAVAIL;
goto out;
}
if (sk->sk_shutdown & SEND_SHUTDOWN) {
send_sig(SIGPIPE, current, 0);
err = -EPIPE;
goto out;
}
if (nr->device == NULL) {
err = -ENETUNREACH;
goto out;
}
if (usax) {
if (msg->msg_namelen < sizeof(sax)) {
err = -EINVAL;
goto out;
}
sax = *usax;
if (ax25cmp(&nr->dest_addr, &sax.sax25_call) != 0) {
err = -EISCONN;
goto out;
}
if (sax.sax25_family != AF_NETROM) {
err = -EINVAL;
goto out;
}
} else {
if (sk->sk_state != TCP_ESTABLISHED) {
err = -ENOTCONN;
goto out;
}
sax.sax25_family = AF_NETROM;
sax.sax25_call = nr->dest_addr;
}
/* Build a packet - the conventional user limit is 236 bytes. We can
do ludicrously large NetROM frames but must not overflow */
if (len > 65536) {
err = -EMSGSIZE;
goto out;
}
size = len + NR_NETWORK_LEN + NR_TRANSPORT_LEN;
if ((skb = sock_alloc_send_skb(sk, size, msg->msg_flags & MSG_DONTWAIT, &err)) == NULL)
goto out;
skb_reserve(skb, size - len);
skb_reset_transport_header(skb);
/*
* Push down the NET/ROM header
*/
asmptr = skb_push(skb, NR_TRANSPORT_LEN);
/* Build a NET/ROM Transport header */
*asmptr++ = nr->your_index;
*asmptr++ = nr->your_id;
*asmptr++ = 0; /* To be filled in later */
*asmptr++ = 0; /* Ditto */
*asmptr++ = NR_INFO;
/*
* Put the data on the end
*/
skb_put(skb, len);
/* User data follows immediately after the NET/ROM transport header */
if (memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len)) {
kfree_skb(skb);
err = -EFAULT;
goto out;
}
if (sk->sk_state != TCP_ESTABLISHED) {
kfree_skb(skb);
err = -ENOTCONN;
goto out;
}
nr_output(sk, skb); /* Shove it onto the queue */
err = len;
out:
release_sock(sk);
return err;
}
static int nr_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t size, int flags)
{
struct sock *sk = sock->sk;
struct sockaddr_ax25 *sax = (struct sockaddr_ax25 *)msg->msg_name;
size_t copied;
struct sk_buff *skb;
int er;
/*
* This works for seqpacket too. The receiver has ordered the queue for
* us! We do one quick check first though
*/
lock_sock(sk);
if (sk->sk_state != TCP_ESTABLISHED) {
release_sock(sk);
return -ENOTCONN;
}
/* Now we can treat all alike */
if ((skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &er)) == NULL) {
release_sock(sk);
return er;
}
skb_reset_transport_header(skb);
copied = skb->len;
if (copied > size) {
copied = size;
msg->msg_flags |= MSG_TRUNC;
}
er = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
if (er < 0) {
skb_free_datagram(sk, skb);
release_sock(sk);
return er;
}
if (sax != NULL) {
memset(sax, 0, sizeof(*sax));
sax->sax25_family = AF_NETROM;
skb_copy_from_linear_data_offset(skb, 7, sax->sax25_call.ax25_call,
AX25_ADDR_LEN);
msg->msg_namelen = sizeof(*sax);
}
skb_free_datagram(sk, skb);
release_sock(sk);
return copied;
}
static int nr_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
struct sock *sk = sock->sk;
void __user *argp = (void __user *)arg;
int ret;
switch (cmd) {
case TIOCOUTQ: {
long amount;
lock_sock(sk);
amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
if (amount < 0)
amount = 0;
release_sock(sk);
return put_user(amount, (int __user *)argp);
}
case TIOCINQ: {
struct sk_buff *skb;
long amount = 0L;
lock_sock(sk);
/* These two are safe on a single CPU system as only user tasks fiddle here */
if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL)
amount = skb->len;
release_sock(sk);
return put_user(amount, (int __user *)argp);
}
case SIOCGSTAMP:
lock_sock(sk);
ret = sock_get_timestamp(sk, argp);
release_sock(sk);
return ret;
case SIOCGSTAMPNS:
lock_sock(sk);
ret = sock_get_timestampns(sk, argp);
release_sock(sk);
return ret;
case SIOCGIFADDR:
case SIOCSIFADDR:
case SIOCGIFDSTADDR:
case SIOCSIFDSTADDR:
case SIOCGIFBRDADDR:
case SIOCSIFBRDADDR:
case SIOCGIFNETMASK:
case SIOCSIFNETMASK:
case SIOCGIFMETRIC:
case SIOCSIFMETRIC:
return -EINVAL;
case SIOCADDRT:
case SIOCDELRT:
case SIOCNRDECOBS:
if (!capable(CAP_NET_ADMIN))
return -EPERM;
return nr_rt_ioctl(cmd, argp);
default:
return -ENOIOCTLCMD;
}
return 0;
}
#ifdef CONFIG_PROC_FS
static void *nr_info_start(struct seq_file *seq, loff_t *pos)
{
spin_lock_bh(&nr_list_lock);
return seq_hlist_start_head(&nr_list, *pos);
}
static void *nr_info_next(struct seq_file *seq, void *v, loff_t *pos)
{
return seq_hlist_next(v, &nr_list, pos);
}
static void nr_info_stop(struct seq_file *seq, void *v)
{
spin_unlock_bh(&nr_list_lock);
}
static int nr_info_show(struct seq_file *seq, void *v)
{
struct sock *s = sk_entry(v);
struct net_device *dev;
struct nr_sock *nr;
const char *devname;
char buf[11];
if (v == SEQ_START_TOKEN)
seq_puts(seq,
"user_addr dest_node src_node dev my your st vs vr va t1 t2 t4 idle n2 wnd Snd-Q Rcv-Q inode\n");
else {
bh_lock_sock(s);
nr = nr_sk(s);
if ((dev = nr->device) == NULL)
devname = "???";
else
devname = dev->name;
seq_printf(seq, "%-9s ", ax2asc(buf, &nr->user_addr));
seq_printf(seq, "%-9s ", ax2asc(buf, &nr->dest_addr));
seq_printf(seq,
"%-9s %-3s %02X/%02X %02X/%02X %2d %3d %3d %3d %3lu/%03lu %2lu/%02lu %3lu/%03lu %3lu/%03lu %2d/%02d %3d %5d %5d %ld\n",
ax2asc(buf, &nr->source_addr),
devname,
nr->my_index,
nr->my_id,
nr->your_index,
nr->your_id,
nr->state,
nr->vs,
nr->vr,
nr->va,
ax25_display_timer(&nr->t1timer) / HZ,
nr->t1 / HZ,
ax25_display_timer(&nr->t2timer) / HZ,
nr->t2 / HZ,
ax25_display_timer(&nr->t4timer) / HZ,
nr->t4 / HZ,
ax25_display_timer(&nr->idletimer) / (60 * HZ),
nr->idle / (60 * HZ),
nr->n2count,
nr->n2,
nr->window,
sk_wmem_alloc_get(s),
sk_rmem_alloc_get(s),
s->sk_socket ? SOCK_INODE(s->sk_socket)->i_ino : 0L);
bh_unlock_sock(s);
}
return 0;
}
static const struct seq_operations nr_info_seqops = {
.start = nr_info_start,
.next = nr_info_next,
.stop = nr_info_stop,
.show = nr_info_show,
};
static int nr_info_open(struct inode *inode, struct file *file)
{
return seq_open(file, &nr_info_seqops);
}
static const struct file_operations nr_info_fops = {
.owner = THIS_MODULE,
.open = nr_info_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
#endif /* CONFIG_PROC_FS */
static const struct net_proto_family nr_family_ops = {
.family = PF_NETROM,
.create = nr_create,
.owner = THIS_MODULE,
};
static const struct proto_ops nr_proto_ops = {
.family = PF_NETROM,
.owner = THIS_MODULE,
.release = nr_release,
.bind = nr_bind,
.connect = nr_connect,
.socketpair = sock_no_socketpair,
.accept = nr_accept,
.getname = nr_getname,
.poll = datagram_poll,
.ioctl = nr_ioctl,
.listen = nr_listen,
.shutdown = sock_no_shutdown,
.setsockopt = nr_setsockopt,
.getsockopt = nr_getsockopt,
.sendmsg = nr_sendmsg,
.recvmsg = nr_recvmsg,
.mmap = sock_no_mmap,
.sendpage = sock_no_sendpage,
};
static struct notifier_block nr_dev_notifier = {
.notifier_call = nr_device_event,
};
static struct net_device **dev_nr;
static struct ax25_protocol nr_pid = {
.pid = AX25_P_NETROM,
.func = nr_route_frame
};
static struct ax25_linkfail nr_linkfail_notifier = {
.func = nr_link_failed,
};
static int __init nr_proto_init(void)
{
int i;
int rc = proto_register(&nr_proto, 0);
if (rc != 0)
goto out;
if (nr_ndevs > 0x7fffffff/sizeof(struct net_device *)) {
printk(KERN_ERR "NET/ROM: nr_proto_init - nr_ndevs parameter to large\n");
return -1;
}
dev_nr = kzalloc(nr_ndevs * sizeof(struct net_device *), GFP_KERNEL);
if (dev_nr == NULL) {
printk(KERN_ERR "NET/ROM: nr_proto_init - unable to allocate device array\n");
return -1;
}
for (i = 0; i < nr_ndevs; i++) {
char name[IFNAMSIZ];
struct net_device *dev;
sprintf(name, "nr%d", i);
dev = alloc_netdev(0, name, nr_setup);
if (!dev) {
printk(KERN_ERR "NET/ROM: nr_proto_init - unable to allocate device structure\n");
goto fail;
}
dev->base_addr = i;
if (register_netdev(dev)) {
printk(KERN_ERR "NET/ROM: nr_proto_init - unable to register network device\n");
free_netdev(dev);
goto fail;
}
nr_set_lockdep_key(dev);
dev_nr[i] = dev;
}
if (sock_register(&nr_family_ops)) {
printk(KERN_ERR "NET/ROM: nr_proto_init - unable to register socket family\n");
goto fail;
}
register_netdevice_notifier(&nr_dev_notifier);
ax25_register_pid(&nr_pid);
ax25_linkfail_register(&nr_linkfail_notifier);
#ifdef CONFIG_SYSCTL
nr_register_sysctl();
#endif
nr_loopback_init();
proc_create("nr", S_IRUGO, init_net.proc_net, &nr_info_fops);
proc_create("nr_neigh", S_IRUGO, init_net.proc_net, &nr_neigh_fops);
proc_create("nr_nodes", S_IRUGO, init_net.proc_net, &nr_nodes_fops);
out:
return rc;
fail:
while (--i >= 0) {
unregister_netdev(dev_nr[i]);
free_netdev(dev_nr[i]);
}
kfree(dev_nr);
proto_unregister(&nr_proto);
rc = -1;
goto out;
}
module_init(nr_proto_init);
module_param(nr_ndevs, int, 0);
MODULE_PARM_DESC(nr_ndevs, "number of NET/ROM devices");
MODULE_AUTHOR("Jonathan Naylor G4KLX <g4klx@g4klx.demon.co.uk>");
MODULE_DESCRIPTION("The amateur radio NET/ROM network and transport layer protocol");
MODULE_LICENSE("GPL");
MODULE_ALIAS_NETPROTO(PF_NETROM);
static void __exit nr_exit(void)
{
int i;
remove_proc_entry("nr", init_net.proc_net);
remove_proc_entry("nr_neigh", init_net.proc_net);
remove_proc_entry("nr_nodes", init_net.proc_net);
nr_loopback_clear();
nr_rt_free();
#ifdef CONFIG_SYSCTL
nr_unregister_sysctl();
#endif
ax25_linkfail_release(&nr_linkfail_notifier);
ax25_protocol_release(AX25_P_NETROM);
unregister_netdevice_notifier(&nr_dev_notifier);
sock_unregister(PF_NETROM);
for (i = 0; i < nr_ndevs; i++) {
struct net_device *dev = dev_nr[i];
if (dev) {
unregister_netdev(dev);
free_netdev(dev);
}
}
kfree(dev_nr);
proto_unregister(&nr_proto);
}
module_exit(nr_exit);
| ./CrossVul/dataset_final_sorted/CWE-20/c/good_5845_22 |
crossvul-cpp_data_bad_2435_0 | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD DDDD SSSSS %
% D D D D SS %
% D D D D SSS %
% D D D D SS %
% DDDD DDDD SSSSS %
% %
% %
% Read/Write Microsoft Direct Draw Surface Image Format %
% %
% Software Design %
% Bianca van Schaik %
% March 2008 %
% Dirk Lemstra %
% September 2013 %
% %
% %
% Copyright 1999-2015 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/attribute.h"
#include "magick/blob.h"
#include "magick/blob-private.h"
#include "magick/cache.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/log.h"
#include "magick/magick.h"
#include "magick/memory_.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/pixel-accessor.h"
#include "magick/profile.h"
#include "magick/quantum.h"
#include "magick/quantum-private.h"
#include "magick/resource_.h"
#include "magick/static.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/module.h"
#include "magick/transform.h"
/*
Definitions
*/
#define DDSD_CAPS 0x00000001
#define DDSD_HEIGHT 0x00000002
#define DDSD_WIDTH 0x00000004
#define DDSD_PITCH 0x00000008
#define DDSD_PIXELFORMAT 0x00001000
#define DDSD_MIPMAPCOUNT 0x00020000
#define DDSD_LINEARSIZE 0x00080000
#define DDSD_DEPTH 0x00800000
#define DDPF_ALPHAPIXELS 0x00000001
#define DDPF_FOURCC 0x00000004
#define DDPF_RGB 0x00000040
#define DDPF_LUMINANCE 0x00020000
#define FOURCC_DXT1 0x31545844
#define FOURCC_DXT3 0x33545844
#define FOURCC_DXT5 0x35545844
#define DDSCAPS_COMPLEX 0x00000008
#define DDSCAPS_TEXTURE 0x00001000
#define DDSCAPS_MIPMAP 0x00400000
#define DDSCAPS2_CUBEMAP 0x00000200
#define DDSCAPS2_CUBEMAP_POSITIVEX 0x00000400
#define DDSCAPS2_CUBEMAP_NEGATIVEX 0x00000800
#define DDSCAPS2_CUBEMAP_POSITIVEY 0x00001000
#define DDSCAPS2_CUBEMAP_NEGATIVEY 0x00002000
#define DDSCAPS2_CUBEMAP_POSITIVEZ 0x00004000
#define DDSCAPS2_CUBEMAP_NEGATIVEZ 0x00008000
#define DDSCAPS2_VOLUME 0x00200000
#ifndef SIZE_MAX
#define SIZE_MAX ((size_t) -1)
#endif
/*
Structure declarations.
*/
typedef struct _DDSPixelFormat
{
size_t
flags,
fourcc,
rgb_bitcount,
r_bitmask,
g_bitmask,
b_bitmask,
alpha_bitmask;
} DDSPixelFormat;
typedef struct _DDSInfo
{
size_t
flags,
height,
width,
pitchOrLinearSize,
depth,
mipmapcount,
ddscaps1,
ddscaps2;
DDSPixelFormat
pixelformat;
} DDSInfo;
typedef struct _DDSColors
{
unsigned char
r[4],
g[4],
b[4],
a[4];
} DDSColors;
typedef struct _DDSVector4
{
float
x,
y,
z,
w;
} DDSVector4;
typedef struct _DDSVector3
{
float
x,
y,
z;
} DDSVector3;
typedef struct _DDSSourceBlock
{
unsigned char
start,
end,
error;
} DDSSourceBlock;
typedef struct _DDSSingleColourLookup
{
DDSSourceBlock sources[2];
} DDSSingleColourLookup;
typedef MagickBooleanType
DDSDecoder(Image *, DDSInfo *, ExceptionInfo *);
static const DDSSingleColourLookup DDSLookup_5_4[] =
{
{ { { 0, 0, 0 }, { 0, 0, 0 } } },
{ { { 0, 0, 1 }, { 0, 1, 1 } } },
{ { { 0, 0, 2 }, { 0, 1, 0 } } },
{ { { 0, 0, 3 }, { 0, 1, 1 } } },
{ { { 0, 0, 4 }, { 0, 2, 1 } } },
{ { { 1, 0, 3 }, { 0, 2, 0 } } },
{ { { 1, 0, 2 }, { 0, 2, 1 } } },
{ { { 1, 0, 1 }, { 0, 3, 1 } } },
{ { { 1, 0, 0 }, { 0, 3, 0 } } },
{ { { 1, 0, 1 }, { 1, 2, 1 } } },
{ { { 1, 0, 2 }, { 1, 2, 0 } } },
{ { { 1, 0, 3 }, { 0, 4, 0 } } },
{ { { 1, 0, 4 }, { 0, 5, 1 } } },
{ { { 2, 0, 3 }, { 0, 5, 0 } } },
{ { { 2, 0, 2 }, { 0, 5, 1 } } },
{ { { 2, 0, 1 }, { 0, 6, 1 } } },
{ { { 2, 0, 0 }, { 0, 6, 0 } } },
{ { { 2, 0, 1 }, { 2, 3, 1 } } },
{ { { 2, 0, 2 }, { 2, 3, 0 } } },
{ { { 2, 0, 3 }, { 0, 7, 0 } } },
{ { { 2, 0, 4 }, { 1, 6, 1 } } },
{ { { 3, 0, 3 }, { 1, 6, 0 } } },
{ { { 3, 0, 2 }, { 0, 8, 0 } } },
{ { { 3, 0, 1 }, { 0, 9, 1 } } },
{ { { 3, 0, 0 }, { 0, 9, 0 } } },
{ { { 3, 0, 1 }, { 0, 9, 1 } } },
{ { { 3, 0, 2 }, { 0, 10, 1 } } },
{ { { 3, 0, 3 }, { 0, 10, 0 } } },
{ { { 3, 0, 4 }, { 2, 7, 1 } } },
{ { { 4, 0, 4 }, { 2, 7, 0 } } },
{ { { 4, 0, 3 }, { 0, 11, 0 } } },
{ { { 4, 0, 2 }, { 1, 10, 1 } } },
{ { { 4, 0, 1 }, { 1, 10, 0 } } },
{ { { 4, 0, 0 }, { 0, 12, 0 } } },
{ { { 4, 0, 1 }, { 0, 13, 1 } } },
{ { { 4, 0, 2 }, { 0, 13, 0 } } },
{ { { 4, 0, 3 }, { 0, 13, 1 } } },
{ { { 4, 0, 4 }, { 0, 14, 1 } } },
{ { { 5, 0, 3 }, { 0, 14, 0 } } },
{ { { 5, 0, 2 }, { 2, 11, 1 } } },
{ { { 5, 0, 1 }, { 2, 11, 0 } } },
{ { { 5, 0, 0 }, { 0, 15, 0 } } },
{ { { 5, 0, 1 }, { 1, 14, 1 } } },
{ { { 5, 0, 2 }, { 1, 14, 0 } } },
{ { { 5, 0, 3 }, { 0, 16, 0 } } },
{ { { 5, 0, 4 }, { 0, 17, 1 } } },
{ { { 6, 0, 3 }, { 0, 17, 0 } } },
{ { { 6, 0, 2 }, { 0, 17, 1 } } },
{ { { 6, 0, 1 }, { 0, 18, 1 } } },
{ { { 6, 0, 0 }, { 0, 18, 0 } } },
{ { { 6, 0, 1 }, { 2, 15, 1 } } },
{ { { 6, 0, 2 }, { 2, 15, 0 } } },
{ { { 6, 0, 3 }, { 0, 19, 0 } } },
{ { { 6, 0, 4 }, { 1, 18, 1 } } },
{ { { 7, 0, 3 }, { 1, 18, 0 } } },
{ { { 7, 0, 2 }, { 0, 20, 0 } } },
{ { { 7, 0, 1 }, { 0, 21, 1 } } },
{ { { 7, 0, 0 }, { 0, 21, 0 } } },
{ { { 7, 0, 1 }, { 0, 21, 1 } } },
{ { { 7, 0, 2 }, { 0, 22, 1 } } },
{ { { 7, 0, 3 }, { 0, 22, 0 } } },
{ { { 7, 0, 4 }, { 2, 19, 1 } } },
{ { { 8, 0, 4 }, { 2, 19, 0 } } },
{ { { 8, 0, 3 }, { 0, 23, 0 } } },
{ { { 8, 0, 2 }, { 1, 22, 1 } } },
{ { { 8, 0, 1 }, { 1, 22, 0 } } },
{ { { 8, 0, 0 }, { 0, 24, 0 } } },
{ { { 8, 0, 1 }, { 0, 25, 1 } } },
{ { { 8, 0, 2 }, { 0, 25, 0 } } },
{ { { 8, 0, 3 }, { 0, 25, 1 } } },
{ { { 8, 0, 4 }, { 0, 26, 1 } } },
{ { { 9, 0, 3 }, { 0, 26, 0 } } },
{ { { 9, 0, 2 }, { 2, 23, 1 } } },
{ { { 9, 0, 1 }, { 2, 23, 0 } } },
{ { { 9, 0, 0 }, { 0, 27, 0 } } },
{ { { 9, 0, 1 }, { 1, 26, 1 } } },
{ { { 9, 0, 2 }, { 1, 26, 0 } } },
{ { { 9, 0, 3 }, { 0, 28, 0 } } },
{ { { 9, 0, 4 }, { 0, 29, 1 } } },
{ { { 10, 0, 3 }, { 0, 29, 0 } } },
{ { { 10, 0, 2 }, { 0, 29, 1 } } },
{ { { 10, 0, 1 }, { 0, 30, 1 } } },
{ { { 10, 0, 0 }, { 0, 30, 0 } } },
{ { { 10, 0, 1 }, { 2, 27, 1 } } },
{ { { 10, 0, 2 }, { 2, 27, 0 } } },
{ { { 10, 0, 3 }, { 0, 31, 0 } } },
{ { { 10, 0, 4 }, { 1, 30, 1 } } },
{ { { 11, 0, 3 }, { 1, 30, 0 } } },
{ { { 11, 0, 2 }, { 4, 24, 0 } } },
{ { { 11, 0, 1 }, { 1, 31, 1 } } },
{ { { 11, 0, 0 }, { 1, 31, 0 } } },
{ { { 11, 0, 1 }, { 1, 31, 1 } } },
{ { { 11, 0, 2 }, { 2, 30, 1 } } },
{ { { 11, 0, 3 }, { 2, 30, 0 } } },
{ { { 11, 0, 4 }, { 2, 31, 1 } } },
{ { { 12, 0, 4 }, { 2, 31, 0 } } },
{ { { 12, 0, 3 }, { 4, 27, 0 } } },
{ { { 12, 0, 2 }, { 3, 30, 1 } } },
{ { { 12, 0, 1 }, { 3, 30, 0 } } },
{ { { 12, 0, 0 }, { 4, 28, 0 } } },
{ { { 12, 0, 1 }, { 3, 31, 1 } } },
{ { { 12, 0, 2 }, { 3, 31, 0 } } },
{ { { 12, 0, 3 }, { 3, 31, 1 } } },
{ { { 12, 0, 4 }, { 4, 30, 1 } } },
{ { { 13, 0, 3 }, { 4, 30, 0 } } },
{ { { 13, 0, 2 }, { 6, 27, 1 } } },
{ { { 13, 0, 1 }, { 6, 27, 0 } } },
{ { { 13, 0, 0 }, { 4, 31, 0 } } },
{ { { 13, 0, 1 }, { 5, 30, 1 } } },
{ { { 13, 0, 2 }, { 5, 30, 0 } } },
{ { { 13, 0, 3 }, { 8, 24, 0 } } },
{ { { 13, 0, 4 }, { 5, 31, 1 } } },
{ { { 14, 0, 3 }, { 5, 31, 0 } } },
{ { { 14, 0, 2 }, { 5, 31, 1 } } },
{ { { 14, 0, 1 }, { 6, 30, 1 } } },
{ { { 14, 0, 0 }, { 6, 30, 0 } } },
{ { { 14, 0, 1 }, { 6, 31, 1 } } },
{ { { 14, 0, 2 }, { 6, 31, 0 } } },
{ { { 14, 0, 3 }, { 8, 27, 0 } } },
{ { { 14, 0, 4 }, { 7, 30, 1 } } },
{ { { 15, 0, 3 }, { 7, 30, 0 } } },
{ { { 15, 0, 2 }, { 8, 28, 0 } } },
{ { { 15, 0, 1 }, { 7, 31, 1 } } },
{ { { 15, 0, 0 }, { 7, 31, 0 } } },
{ { { 15, 0, 1 }, { 7, 31, 1 } } },
{ { { 15, 0, 2 }, { 8, 30, 1 } } },
{ { { 15, 0, 3 }, { 8, 30, 0 } } },
{ { { 15, 0, 4 }, { 10, 27, 1 } } },
{ { { 16, 0, 4 }, { 10, 27, 0 } } },
{ { { 16, 0, 3 }, { 8, 31, 0 } } },
{ { { 16, 0, 2 }, { 9, 30, 1 } } },
{ { { 16, 0, 1 }, { 9, 30, 0 } } },
{ { { 16, 0, 0 }, { 12, 24, 0 } } },
{ { { 16, 0, 1 }, { 9, 31, 1 } } },
{ { { 16, 0, 2 }, { 9, 31, 0 } } },
{ { { 16, 0, 3 }, { 9, 31, 1 } } },
{ { { 16, 0, 4 }, { 10, 30, 1 } } },
{ { { 17, 0, 3 }, { 10, 30, 0 } } },
{ { { 17, 0, 2 }, { 10, 31, 1 } } },
{ { { 17, 0, 1 }, { 10, 31, 0 } } },
{ { { 17, 0, 0 }, { 12, 27, 0 } } },
{ { { 17, 0, 1 }, { 11, 30, 1 } } },
{ { { 17, 0, 2 }, { 11, 30, 0 } } },
{ { { 17, 0, 3 }, { 12, 28, 0 } } },
{ { { 17, 0, 4 }, { 11, 31, 1 } } },
{ { { 18, 0, 3 }, { 11, 31, 0 } } },
{ { { 18, 0, 2 }, { 11, 31, 1 } } },
{ { { 18, 0, 1 }, { 12, 30, 1 } } },
{ { { 18, 0, 0 }, { 12, 30, 0 } } },
{ { { 18, 0, 1 }, { 14, 27, 1 } } },
{ { { 18, 0, 2 }, { 14, 27, 0 } } },
{ { { 18, 0, 3 }, { 12, 31, 0 } } },
{ { { 18, 0, 4 }, { 13, 30, 1 } } },
{ { { 19, 0, 3 }, { 13, 30, 0 } } },
{ { { 19, 0, 2 }, { 16, 24, 0 } } },
{ { { 19, 0, 1 }, { 13, 31, 1 } } },
{ { { 19, 0, 0 }, { 13, 31, 0 } } },
{ { { 19, 0, 1 }, { 13, 31, 1 } } },
{ { { 19, 0, 2 }, { 14, 30, 1 } } },
{ { { 19, 0, 3 }, { 14, 30, 0 } } },
{ { { 19, 0, 4 }, { 14, 31, 1 } } },
{ { { 20, 0, 4 }, { 14, 31, 0 } } },
{ { { 20, 0, 3 }, { 16, 27, 0 } } },
{ { { 20, 0, 2 }, { 15, 30, 1 } } },
{ { { 20, 0, 1 }, { 15, 30, 0 } } },
{ { { 20, 0, 0 }, { 16, 28, 0 } } },
{ { { 20, 0, 1 }, { 15, 31, 1 } } },
{ { { 20, 0, 2 }, { 15, 31, 0 } } },
{ { { 20, 0, 3 }, { 15, 31, 1 } } },
{ { { 20, 0, 4 }, { 16, 30, 1 } } },
{ { { 21, 0, 3 }, { 16, 30, 0 } } },
{ { { 21, 0, 2 }, { 18, 27, 1 } } },
{ { { 21, 0, 1 }, { 18, 27, 0 } } },
{ { { 21, 0, 0 }, { 16, 31, 0 } } },
{ { { 21, 0, 1 }, { 17, 30, 1 } } },
{ { { 21, 0, 2 }, { 17, 30, 0 } } },
{ { { 21, 0, 3 }, { 20, 24, 0 } } },
{ { { 21, 0, 4 }, { 17, 31, 1 } } },
{ { { 22, 0, 3 }, { 17, 31, 0 } } },
{ { { 22, 0, 2 }, { 17, 31, 1 } } },
{ { { 22, 0, 1 }, { 18, 30, 1 } } },
{ { { 22, 0, 0 }, { 18, 30, 0 } } },
{ { { 22, 0, 1 }, { 18, 31, 1 } } },
{ { { 22, 0, 2 }, { 18, 31, 0 } } },
{ { { 22, 0, 3 }, { 20, 27, 0 } } },
{ { { 22, 0, 4 }, { 19, 30, 1 } } },
{ { { 23, 0, 3 }, { 19, 30, 0 } } },
{ { { 23, 0, 2 }, { 20, 28, 0 } } },
{ { { 23, 0, 1 }, { 19, 31, 1 } } },
{ { { 23, 0, 0 }, { 19, 31, 0 } } },
{ { { 23, 0, 1 }, { 19, 31, 1 } } },
{ { { 23, 0, 2 }, { 20, 30, 1 } } },
{ { { 23, 0, 3 }, { 20, 30, 0 } } },
{ { { 23, 0, 4 }, { 22, 27, 1 } } },
{ { { 24, 0, 4 }, { 22, 27, 0 } } },
{ { { 24, 0, 3 }, { 20, 31, 0 } } },
{ { { 24, 0, 2 }, { 21, 30, 1 } } },
{ { { 24, 0, 1 }, { 21, 30, 0 } } },
{ { { 24, 0, 0 }, { 24, 24, 0 } } },
{ { { 24, 0, 1 }, { 21, 31, 1 } } },
{ { { 24, 0, 2 }, { 21, 31, 0 } } },
{ { { 24, 0, 3 }, { 21, 31, 1 } } },
{ { { 24, 0, 4 }, { 22, 30, 1 } } },
{ { { 25, 0, 3 }, { 22, 30, 0 } } },
{ { { 25, 0, 2 }, { 22, 31, 1 } } },
{ { { 25, 0, 1 }, { 22, 31, 0 } } },
{ { { 25, 0, 0 }, { 24, 27, 0 } } },
{ { { 25, 0, 1 }, { 23, 30, 1 } } },
{ { { 25, 0, 2 }, { 23, 30, 0 } } },
{ { { 25, 0, 3 }, { 24, 28, 0 } } },
{ { { 25, 0, 4 }, { 23, 31, 1 } } },
{ { { 26, 0, 3 }, { 23, 31, 0 } } },
{ { { 26, 0, 2 }, { 23, 31, 1 } } },
{ { { 26, 0, 1 }, { 24, 30, 1 } } },
{ { { 26, 0, 0 }, { 24, 30, 0 } } },
{ { { 26, 0, 1 }, { 26, 27, 1 } } },
{ { { 26, 0, 2 }, { 26, 27, 0 } } },
{ { { 26, 0, 3 }, { 24, 31, 0 } } },
{ { { 26, 0, 4 }, { 25, 30, 1 } } },
{ { { 27, 0, 3 }, { 25, 30, 0 } } },
{ { { 27, 0, 2 }, { 28, 24, 0 } } },
{ { { 27, 0, 1 }, { 25, 31, 1 } } },
{ { { 27, 0, 0 }, { 25, 31, 0 } } },
{ { { 27, 0, 1 }, { 25, 31, 1 } } },
{ { { 27, 0, 2 }, { 26, 30, 1 } } },
{ { { 27, 0, 3 }, { 26, 30, 0 } } },
{ { { 27, 0, 4 }, { 26, 31, 1 } } },
{ { { 28, 0, 4 }, { 26, 31, 0 } } },
{ { { 28, 0, 3 }, { 28, 27, 0 } } },
{ { { 28, 0, 2 }, { 27, 30, 1 } } },
{ { { 28, 0, 1 }, { 27, 30, 0 } } },
{ { { 28, 0, 0 }, { 28, 28, 0 } } },
{ { { 28, 0, 1 }, { 27, 31, 1 } } },
{ { { 28, 0, 2 }, { 27, 31, 0 } } },
{ { { 28, 0, 3 }, { 27, 31, 1 } } },
{ { { 28, 0, 4 }, { 28, 30, 1 } } },
{ { { 29, 0, 3 }, { 28, 30, 0 } } },
{ { { 29, 0, 2 }, { 30, 27, 1 } } },
{ { { 29, 0, 1 }, { 30, 27, 0 } } },
{ { { 29, 0, 0 }, { 28, 31, 0 } } },
{ { { 29, 0, 1 }, { 29, 30, 1 } } },
{ { { 29, 0, 2 }, { 29, 30, 0 } } },
{ { { 29, 0, 3 }, { 29, 30, 1 } } },
{ { { 29, 0, 4 }, { 29, 31, 1 } } },
{ { { 30, 0, 3 }, { 29, 31, 0 } } },
{ { { 30, 0, 2 }, { 29, 31, 1 } } },
{ { { 30, 0, 1 }, { 30, 30, 1 } } },
{ { { 30, 0, 0 }, { 30, 30, 0 } } },
{ { { 30, 0, 1 }, { 30, 31, 1 } } },
{ { { 30, 0, 2 }, { 30, 31, 0 } } },
{ { { 30, 0, 3 }, { 30, 31, 1 } } },
{ { { 30, 0, 4 }, { 31, 30, 1 } } },
{ { { 31, 0, 3 }, { 31, 30, 0 } } },
{ { { 31, 0, 2 }, { 31, 30, 1 } } },
{ { { 31, 0, 1 }, { 31, 31, 1 } } },
{ { { 31, 0, 0 }, { 31, 31, 0 } } }
};
static const DDSSingleColourLookup DDSLookup_6_4[] =
{
{ { { 0, 0, 0 }, { 0, 0, 0 } } },
{ { { 0, 0, 1 }, { 0, 1, 0 } } },
{ { { 0, 0, 2 }, { 0, 2, 0 } } },
{ { { 1, 0, 1 }, { 0, 3, 1 } } },
{ { { 1, 0, 0 }, { 0, 3, 0 } } },
{ { { 1, 0, 1 }, { 0, 4, 0 } } },
{ { { 1, 0, 2 }, { 0, 5, 0 } } },
{ { { 2, 0, 1 }, { 0, 6, 1 } } },
{ { { 2, 0, 0 }, { 0, 6, 0 } } },
{ { { 2, 0, 1 }, { 0, 7, 0 } } },
{ { { 2, 0, 2 }, { 0, 8, 0 } } },
{ { { 3, 0, 1 }, { 0, 9, 1 } } },
{ { { 3, 0, 0 }, { 0, 9, 0 } } },
{ { { 3, 0, 1 }, { 0, 10, 0 } } },
{ { { 3, 0, 2 }, { 0, 11, 0 } } },
{ { { 4, 0, 1 }, { 0, 12, 1 } } },
{ { { 4, 0, 0 }, { 0, 12, 0 } } },
{ { { 4, 0, 1 }, { 0, 13, 0 } } },
{ { { 4, 0, 2 }, { 0, 14, 0 } } },
{ { { 5, 0, 1 }, { 0, 15, 1 } } },
{ { { 5, 0, 0 }, { 0, 15, 0 } } },
{ { { 5, 0, 1 }, { 0, 16, 0 } } },
{ { { 5, 0, 2 }, { 1, 15, 0 } } },
{ { { 6, 0, 1 }, { 0, 17, 0 } } },
{ { { 6, 0, 0 }, { 0, 18, 0 } } },
{ { { 6, 0, 1 }, { 0, 19, 0 } } },
{ { { 6, 0, 2 }, { 3, 14, 0 } } },
{ { { 7, 0, 1 }, { 0, 20, 0 } } },
{ { { 7, 0, 0 }, { 0, 21, 0 } } },
{ { { 7, 0, 1 }, { 0, 22, 0 } } },
{ { { 7, 0, 2 }, { 4, 15, 0 } } },
{ { { 8, 0, 1 }, { 0, 23, 0 } } },
{ { { 8, 0, 0 }, { 0, 24, 0 } } },
{ { { 8, 0, 1 }, { 0, 25, 0 } } },
{ { { 8, 0, 2 }, { 6, 14, 0 } } },
{ { { 9, 0, 1 }, { 0, 26, 0 } } },
{ { { 9, 0, 0 }, { 0, 27, 0 } } },
{ { { 9, 0, 1 }, { 0, 28, 0 } } },
{ { { 9, 0, 2 }, { 7, 15, 0 } } },
{ { { 10, 0, 1 }, { 0, 29, 0 } } },
{ { { 10, 0, 0 }, { 0, 30, 0 } } },
{ { { 10, 0, 1 }, { 0, 31, 0 } } },
{ { { 10, 0, 2 }, { 9, 14, 0 } } },
{ { { 11, 0, 1 }, { 0, 32, 0 } } },
{ { { 11, 0, 0 }, { 0, 33, 0 } } },
{ { { 11, 0, 1 }, { 2, 30, 0 } } },
{ { { 11, 0, 2 }, { 0, 34, 0 } } },
{ { { 12, 0, 1 }, { 0, 35, 0 } } },
{ { { 12, 0, 0 }, { 0, 36, 0 } } },
{ { { 12, 0, 1 }, { 3, 31, 0 } } },
{ { { 12, 0, 2 }, { 0, 37, 0 } } },
{ { { 13, 0, 1 }, { 0, 38, 0 } } },
{ { { 13, 0, 0 }, { 0, 39, 0 } } },
{ { { 13, 0, 1 }, { 5, 30, 0 } } },
{ { { 13, 0, 2 }, { 0, 40, 0 } } },
{ { { 14, 0, 1 }, { 0, 41, 0 } } },
{ { { 14, 0, 0 }, { 0, 42, 0 } } },
{ { { 14, 0, 1 }, { 6, 31, 0 } } },
{ { { 14, 0, 2 }, { 0, 43, 0 } } },
{ { { 15, 0, 1 }, { 0, 44, 0 } } },
{ { { 15, 0, 0 }, { 0, 45, 0 } } },
{ { { 15, 0, 1 }, { 8, 30, 0 } } },
{ { { 15, 0, 2 }, { 0, 46, 0 } } },
{ { { 16, 0, 2 }, { 0, 47, 0 } } },
{ { { 16, 0, 1 }, { 1, 46, 0 } } },
{ { { 16, 0, 0 }, { 0, 48, 0 } } },
{ { { 16, 0, 1 }, { 0, 49, 0 } } },
{ { { 16, 0, 2 }, { 0, 50, 0 } } },
{ { { 17, 0, 1 }, { 2, 47, 0 } } },
{ { { 17, 0, 0 }, { 0, 51, 0 } } },
{ { { 17, 0, 1 }, { 0, 52, 0 } } },
{ { { 17, 0, 2 }, { 0, 53, 0 } } },
{ { { 18, 0, 1 }, { 4, 46, 0 } } },
{ { { 18, 0, 0 }, { 0, 54, 0 } } },
{ { { 18, 0, 1 }, { 0, 55, 0 } } },
{ { { 18, 0, 2 }, { 0, 56, 0 } } },
{ { { 19, 0, 1 }, { 5, 47, 0 } } },
{ { { 19, 0, 0 }, { 0, 57, 0 } } },
{ { { 19, 0, 1 }, { 0, 58, 0 } } },
{ { { 19, 0, 2 }, { 0, 59, 0 } } },
{ { { 20, 0, 1 }, { 7, 46, 0 } } },
{ { { 20, 0, 0 }, { 0, 60, 0 } } },
{ { { 20, 0, 1 }, { 0, 61, 0 } } },
{ { { 20, 0, 2 }, { 0, 62, 0 } } },
{ { { 21, 0, 1 }, { 8, 47, 0 } } },
{ { { 21, 0, 0 }, { 0, 63, 0 } } },
{ { { 21, 0, 1 }, { 1, 62, 0 } } },
{ { { 21, 0, 2 }, { 1, 63, 0 } } },
{ { { 22, 0, 1 }, { 10, 46, 0 } } },
{ { { 22, 0, 0 }, { 2, 62, 0 } } },
{ { { 22, 0, 1 }, { 2, 63, 0 } } },
{ { { 22, 0, 2 }, { 3, 62, 0 } } },
{ { { 23, 0, 1 }, { 11, 47, 0 } } },
{ { { 23, 0, 0 }, { 3, 63, 0 } } },
{ { { 23, 0, 1 }, { 4, 62, 0 } } },
{ { { 23, 0, 2 }, { 4, 63, 0 } } },
{ { { 24, 0, 1 }, { 13, 46, 0 } } },
{ { { 24, 0, 0 }, { 5, 62, 0 } } },
{ { { 24, 0, 1 }, { 5, 63, 0 } } },
{ { { 24, 0, 2 }, { 6, 62, 0 } } },
{ { { 25, 0, 1 }, { 14, 47, 0 } } },
{ { { 25, 0, 0 }, { 6, 63, 0 } } },
{ { { 25, 0, 1 }, { 7, 62, 0 } } },
{ { { 25, 0, 2 }, { 7, 63, 0 } } },
{ { { 26, 0, 1 }, { 16, 45, 0 } } },
{ { { 26, 0, 0 }, { 8, 62, 0 } } },
{ { { 26, 0, 1 }, { 8, 63, 0 } } },
{ { { 26, 0, 2 }, { 9, 62, 0 } } },
{ { { 27, 0, 1 }, { 16, 48, 0 } } },
{ { { 27, 0, 0 }, { 9, 63, 0 } } },
{ { { 27, 0, 1 }, { 10, 62, 0 } } },
{ { { 27, 0, 2 }, { 10, 63, 0 } } },
{ { { 28, 0, 1 }, { 16, 51, 0 } } },
{ { { 28, 0, 0 }, { 11, 62, 0 } } },
{ { { 28, 0, 1 }, { 11, 63, 0 } } },
{ { { 28, 0, 2 }, { 12, 62, 0 } } },
{ { { 29, 0, 1 }, { 16, 54, 0 } } },
{ { { 29, 0, 0 }, { 12, 63, 0 } } },
{ { { 29, 0, 1 }, { 13, 62, 0 } } },
{ { { 29, 0, 2 }, { 13, 63, 0 } } },
{ { { 30, 0, 1 }, { 16, 57, 0 } } },
{ { { 30, 0, 0 }, { 14, 62, 0 } } },
{ { { 30, 0, 1 }, { 14, 63, 0 } } },
{ { { 30, 0, 2 }, { 15, 62, 0 } } },
{ { { 31, 0, 1 }, { 16, 60, 0 } } },
{ { { 31, 0, 0 }, { 15, 63, 0 } } },
{ { { 31, 0, 1 }, { 24, 46, 0 } } },
{ { { 31, 0, 2 }, { 16, 62, 0 } } },
{ { { 32, 0, 2 }, { 16, 63, 0 } } },
{ { { 32, 0, 1 }, { 17, 62, 0 } } },
{ { { 32, 0, 0 }, { 25, 47, 0 } } },
{ { { 32, 0, 1 }, { 17, 63, 0 } } },
{ { { 32, 0, 2 }, { 18, 62, 0 } } },
{ { { 33, 0, 1 }, { 18, 63, 0 } } },
{ { { 33, 0, 0 }, { 27, 46, 0 } } },
{ { { 33, 0, 1 }, { 19, 62, 0 } } },
{ { { 33, 0, 2 }, { 19, 63, 0 } } },
{ { { 34, 0, 1 }, { 20, 62, 0 } } },
{ { { 34, 0, 0 }, { 28, 47, 0 } } },
{ { { 34, 0, 1 }, { 20, 63, 0 } } },
{ { { 34, 0, 2 }, { 21, 62, 0 } } },
{ { { 35, 0, 1 }, { 21, 63, 0 } } },
{ { { 35, 0, 0 }, { 30, 46, 0 } } },
{ { { 35, 0, 1 }, { 22, 62, 0 } } },
{ { { 35, 0, 2 }, { 22, 63, 0 } } },
{ { { 36, 0, 1 }, { 23, 62, 0 } } },
{ { { 36, 0, 0 }, { 31, 47, 0 } } },
{ { { 36, 0, 1 }, { 23, 63, 0 } } },
{ { { 36, 0, 2 }, { 24, 62, 0 } } },
{ { { 37, 0, 1 }, { 24, 63, 0 } } },
{ { { 37, 0, 0 }, { 32, 47, 0 } } },
{ { { 37, 0, 1 }, { 25, 62, 0 } } },
{ { { 37, 0, 2 }, { 25, 63, 0 } } },
{ { { 38, 0, 1 }, { 26, 62, 0 } } },
{ { { 38, 0, 0 }, { 32, 50, 0 } } },
{ { { 38, 0, 1 }, { 26, 63, 0 } } },
{ { { 38, 0, 2 }, { 27, 62, 0 } } },
{ { { 39, 0, 1 }, { 27, 63, 0 } } },
{ { { 39, 0, 0 }, { 32, 53, 0 } } },
{ { { 39, 0, 1 }, { 28, 62, 0 } } },
{ { { 39, 0, 2 }, { 28, 63, 0 } } },
{ { { 40, 0, 1 }, { 29, 62, 0 } } },
{ { { 40, 0, 0 }, { 32, 56, 0 } } },
{ { { 40, 0, 1 }, { 29, 63, 0 } } },
{ { { 40, 0, 2 }, { 30, 62, 0 } } },
{ { { 41, 0, 1 }, { 30, 63, 0 } } },
{ { { 41, 0, 0 }, { 32, 59, 0 } } },
{ { { 41, 0, 1 }, { 31, 62, 0 } } },
{ { { 41, 0, 2 }, { 31, 63, 0 } } },
{ { { 42, 0, 1 }, { 32, 61, 0 } } },
{ { { 42, 0, 0 }, { 32, 62, 0 } } },
{ { { 42, 0, 1 }, { 32, 63, 0 } } },
{ { { 42, 0, 2 }, { 41, 46, 0 } } },
{ { { 43, 0, 1 }, { 33, 62, 0 } } },
{ { { 43, 0, 0 }, { 33, 63, 0 } } },
{ { { 43, 0, 1 }, { 34, 62, 0 } } },
{ { { 43, 0, 2 }, { 42, 47, 0 } } },
{ { { 44, 0, 1 }, { 34, 63, 0 } } },
{ { { 44, 0, 0 }, { 35, 62, 0 } } },
{ { { 44, 0, 1 }, { 35, 63, 0 } } },
{ { { 44, 0, 2 }, { 44, 46, 0 } } },
{ { { 45, 0, 1 }, { 36, 62, 0 } } },
{ { { 45, 0, 0 }, { 36, 63, 0 } } },
{ { { 45, 0, 1 }, { 37, 62, 0 } } },
{ { { 45, 0, 2 }, { 45, 47, 0 } } },
{ { { 46, 0, 1 }, { 37, 63, 0 } } },
{ { { 46, 0, 0 }, { 38, 62, 0 } } },
{ { { 46, 0, 1 }, { 38, 63, 0 } } },
{ { { 46, 0, 2 }, { 47, 46, 0 } } },
{ { { 47, 0, 1 }, { 39, 62, 0 } } },
{ { { 47, 0, 0 }, { 39, 63, 0 } } },
{ { { 47, 0, 1 }, { 40, 62, 0 } } },
{ { { 47, 0, 2 }, { 48, 46, 0 } } },
{ { { 48, 0, 2 }, { 40, 63, 0 } } },
{ { { 48, 0, 1 }, { 41, 62, 0 } } },
{ { { 48, 0, 0 }, { 41, 63, 0 } } },
{ { { 48, 0, 1 }, { 48, 49, 0 } } },
{ { { 48, 0, 2 }, { 42, 62, 0 } } },
{ { { 49, 0, 1 }, { 42, 63, 0 } } },
{ { { 49, 0, 0 }, { 43, 62, 0 } } },
{ { { 49, 0, 1 }, { 48, 52, 0 } } },
{ { { 49, 0, 2 }, { 43, 63, 0 } } },
{ { { 50, 0, 1 }, { 44, 62, 0 } } },
{ { { 50, 0, 0 }, { 44, 63, 0 } } },
{ { { 50, 0, 1 }, { 48, 55, 0 } } },
{ { { 50, 0, 2 }, { 45, 62, 0 } } },
{ { { 51, 0, 1 }, { 45, 63, 0 } } },
{ { { 51, 0, 0 }, { 46, 62, 0 } } },
{ { { 51, 0, 1 }, { 48, 58, 0 } } },
{ { { 51, 0, 2 }, { 46, 63, 0 } } },
{ { { 52, 0, 1 }, { 47, 62, 0 } } },
{ { { 52, 0, 0 }, { 47, 63, 0 } } },
{ { { 52, 0, 1 }, { 48, 61, 0 } } },
{ { { 52, 0, 2 }, { 48, 62, 0 } } },
{ { { 53, 0, 1 }, { 56, 47, 0 } } },
{ { { 53, 0, 0 }, { 48, 63, 0 } } },
{ { { 53, 0, 1 }, { 49, 62, 0 } } },
{ { { 53, 0, 2 }, { 49, 63, 0 } } },
{ { { 54, 0, 1 }, { 58, 46, 0 } } },
{ { { 54, 0, 0 }, { 50, 62, 0 } } },
{ { { 54, 0, 1 }, { 50, 63, 0 } } },
{ { { 54, 0, 2 }, { 51, 62, 0 } } },
{ { { 55, 0, 1 }, { 59, 47, 0 } } },
{ { { 55, 0, 0 }, { 51, 63, 0 } } },
{ { { 55, 0, 1 }, { 52, 62, 0 } } },
{ { { 55, 0, 2 }, { 52, 63, 0 } } },
{ { { 56, 0, 1 }, { 61, 46, 0 } } },
{ { { 56, 0, 0 }, { 53, 62, 0 } } },
{ { { 56, 0, 1 }, { 53, 63, 0 } } },
{ { { 56, 0, 2 }, { 54, 62, 0 } } },
{ { { 57, 0, 1 }, { 62, 47, 0 } } },
{ { { 57, 0, 0 }, { 54, 63, 0 } } },
{ { { 57, 0, 1 }, { 55, 62, 0 } } },
{ { { 57, 0, 2 }, { 55, 63, 0 } } },
{ { { 58, 0, 1 }, { 56, 62, 1 } } },
{ { { 58, 0, 0 }, { 56, 62, 0 } } },
{ { { 58, 0, 1 }, { 56, 63, 0 } } },
{ { { 58, 0, 2 }, { 57, 62, 0 } } },
{ { { 59, 0, 1 }, { 57, 63, 1 } } },
{ { { 59, 0, 0 }, { 57, 63, 0 } } },
{ { { 59, 0, 1 }, { 58, 62, 0 } } },
{ { { 59, 0, 2 }, { 58, 63, 0 } } },
{ { { 60, 0, 1 }, { 59, 62, 1 } } },
{ { { 60, 0, 0 }, { 59, 62, 0 } } },
{ { { 60, 0, 1 }, { 59, 63, 0 } } },
{ { { 60, 0, 2 }, { 60, 62, 0 } } },
{ { { 61, 0, 1 }, { 60, 63, 1 } } },
{ { { 61, 0, 0 }, { 60, 63, 0 } } },
{ { { 61, 0, 1 }, { 61, 62, 0 } } },
{ { { 61, 0, 2 }, { 61, 63, 0 } } },
{ { { 62, 0, 1 }, { 62, 62, 1 } } },
{ { { 62, 0, 0 }, { 62, 62, 0 } } },
{ { { 62, 0, 1 }, { 62, 63, 0 } } },
{ { { 62, 0, 2 }, { 63, 62, 0 } } },
{ { { 63, 0, 1 }, { 63, 63, 1 } } },
{ { { 63, 0, 0 }, { 63, 63, 0 } } }
};
static const DDSSingleColourLookup*
DDS_LOOKUP[] =
{
DDSLookup_5_4,
DDSLookup_6_4,
DDSLookup_5_4
};
/*
Macros
*/
#define C565_r(x) (((x) & 0xF800) >> 11)
#define C565_g(x) (((x) & 0x07E0) >> 5)
#define C565_b(x) ((x) & 0x001F)
#define C565_red(x) ( (C565_r(x) << 3 | C565_r(x) >> 2))
#define C565_green(x) ( (C565_g(x) << 2 | C565_g(x) >> 4))
#define C565_blue(x) ( (C565_b(x) << 3 | C565_b(x) >> 2))
#define DIV2(x) ((x) > 1 ? ((x) >> 1) : 1)
#define FixRange(min, max, steps) \
if (min > max) \
min = max; \
if (max - min < steps) \
max = Min(min + steps, 255); \
if (max - min < steps) \
min = Max(min - steps, 0)
#define Dot(left, right) (left.x*right.x) + (left.y*right.y) + (left.z*right.z)
#define VectorInit(vector, value) vector.x = vector.y = vector.z = vector.w \
= value
#define VectorInit3(vector, value) vector.x = vector.y = vector.z = value
#define IsBitMask(mask, r, g, b, a) (mask.r_bitmask == r && mask.g_bitmask == \
g && mask.b_bitmask == b && mask.alpha_bitmask == a)
/*
Forward declarations
*/
static MagickBooleanType
ConstructOrdering(const size_t, const DDSVector4 *, const DDSVector3,
DDSVector4 *, DDSVector4 *, unsigned char *, size_t);
static MagickBooleanType
ReadDDSInfo(Image *, DDSInfo *);
static MagickBooleanType
ReadDXT1(Image *, DDSInfo *, ExceptionInfo *);
static MagickBooleanType
ReadDXT3(Image *, DDSInfo *, ExceptionInfo *);
static MagickBooleanType
ReadDXT5(Image *, DDSInfo *, ExceptionInfo *);
static MagickBooleanType
ReadUncompressedRGB(Image *, DDSInfo *, ExceptionInfo *);
static MagickBooleanType
ReadUncompressedRGBA(Image *, DDSInfo *, ExceptionInfo *);
static void
RemapIndices(const ssize_t *, const unsigned char *, unsigned char *);
static void
SkipDXTMipmaps(Image *, DDSInfo *, int);
static void
SkipRGBMipmaps(Image *, DDSInfo *, int);
static
MagickBooleanType WriteDDSImage(const ImageInfo *, Image *);
static void
WriteDDSInfo(Image *, const size_t, const size_t, const size_t);
static void
WriteFourCC(Image *, const size_t, const MagickBooleanType,
const MagickBooleanType, ExceptionInfo *);
static void
WriteImageData(Image *, const size_t, const size_t, const MagickBooleanType,
const MagickBooleanType, ExceptionInfo *);
static void
WriteIndices(Image *, const DDSVector3, const DDSVector3, unsigned char *);
static MagickBooleanType
WriteMipmaps(Image *, const size_t, const size_t, const size_t,
const MagickBooleanType, const MagickBooleanType, ExceptionInfo *);
static void
WriteSingleColorFit(Image *, const DDSVector4 *, const ssize_t *);
static void
WriteUncompressed(Image *, ExceptionInfo *);
static inline size_t Max(size_t one, size_t two)
{
if (one > two)
return one;
return two;
}
static inline float MaxF(float one, float two)
{
if (one > two)
return one;
return two;
}
static inline size_t Min(size_t one, size_t two)
{
if (one < two)
return one;
return two;
}
static inline float MinF(float one, float two)
{
if (one < two)
return one;
return two;
}
static inline void VectorAdd(const DDSVector4 left, const DDSVector4 right,
DDSVector4 *destination)
{
destination->x = left.x + right.x;
destination->y = left.y + right.y;
destination->z = left.z + right.z;
destination->w = left.w + right.w;
}
static inline void VectorClamp(DDSVector4 *value)
{
value->x = MinF(1.0f,MaxF(0.0f,value->x));
value->y = MinF(1.0f,MaxF(0.0f,value->y));
value->z = MinF(1.0f,MaxF(0.0f,value->z));
value->w = MinF(1.0f,MaxF(0.0f,value->w));
}
static inline void VectorClamp3(DDSVector3 *value)
{
value->x = MinF(1.0f,MaxF(0.0f,value->x));
value->y = MinF(1.0f,MaxF(0.0f,value->y));
value->z = MinF(1.0f,MaxF(0.0f,value->z));
}
static inline void VectorCopy43(const DDSVector4 source,
DDSVector3 *destination)
{
destination->x = source.x;
destination->y = source.y;
destination->z = source.z;
}
static inline void VectorCopy44(const DDSVector4 source,
DDSVector4 *destination)
{
destination->x = source.x;
destination->y = source.y;
destination->z = source.z;
destination->w = source.w;
}
static inline void VectorNegativeMultiplySubtract(const DDSVector4 a,
const DDSVector4 b, const DDSVector4 c, DDSVector4 *destination)
{
destination->x = c.x - (a.x * b.x);
destination->y = c.y - (a.y * b.y);
destination->z = c.z - (a.z * b.z);
destination->w = c.w - (a.w * b.w);
}
static inline void VectorMultiply(const DDSVector4 left,
const DDSVector4 right, DDSVector4 *destination)
{
destination->x = left.x * right.x;
destination->y = left.y * right.y;
destination->z = left.z * right.z;
destination->w = left.w * right.w;
}
static inline void VectorMultiply3(const DDSVector3 left,
const DDSVector3 right, DDSVector3 *destination)
{
destination->x = left.x * right.x;
destination->y = left.y * right.y;
destination->z = left.z * right.z;
}
static inline void VectorMultiplyAdd(const DDSVector4 a, const DDSVector4 b,
const DDSVector4 c, DDSVector4 *destination)
{
destination->x = (a.x * b.x) + c.x;
destination->y = (a.y * b.y) + c.y;
destination->z = (a.z * b.z) + c.z;
destination->w = (a.w * b.w) + c.w;
}
static inline void VectorMultiplyAdd3(const DDSVector3 a, const DDSVector3 b,
const DDSVector3 c, DDSVector3 *destination)
{
destination->x = (a.x * b.x) + c.x;
destination->y = (a.y * b.y) + c.y;
destination->z = (a.z * b.z) + c.z;
}
static inline void VectorReciprocal(const DDSVector4 value,
DDSVector4 *destination)
{
destination->x = 1.0f / value.x;
destination->y = 1.0f / value.y;
destination->z = 1.0f / value.z;
destination->w = 1.0f / value.w;
}
static inline void VectorSubtract(const DDSVector4 left,
const DDSVector4 right, DDSVector4 *destination)
{
destination->x = left.x - right.x;
destination->y = left.y - right.y;
destination->z = left.z - right.z;
destination->w = left.w - right.w;
}
static inline void VectorSubtract3(const DDSVector3 left,
const DDSVector3 right, DDSVector3 *destination)
{
destination->x = left.x - right.x;
destination->y = left.y - right.y;
destination->z = left.z - right.z;
}
static inline void VectorTruncate(DDSVector4 *value)
{
value->x = value->x > 0.0f ? floor(value->x) : ceil(value->x);
value->y = value->y > 0.0f ? floor(value->y) : ceil(value->y);
value->z = value->z > 0.0f ? floor(value->z) : ceil(value->z);
value->w = value->w > 0.0f ? floor(value->w) : ceil(value->w);
}
static inline void VectorTruncate3(DDSVector3 *value)
{
value->x = value->x > 0.0f ? floor(value->x) : ceil(value->x);
value->y = value->y > 0.0f ? floor(value->y) : ceil(value->y);
value->z = value->z > 0.0f ? floor(value->z) : ceil(value->z);
}
static void CalculateColors(unsigned short c0, unsigned short c1,
DDSColors *c, MagickBooleanType ignoreAlpha)
{
c->a[0] = c->a[1] = c->a[2] = c->a[3] = 0;
c->r[0] = (unsigned char) C565_red(c0);
c->g[0] = (unsigned char) C565_green(c0);
c->b[0] = (unsigned char) C565_blue(c0);
c->r[1] = (unsigned char) C565_red(c1);
c->g[1] = (unsigned char) C565_green(c1);
c->b[1] = (unsigned char) C565_blue(c1);
if (ignoreAlpha != MagickFalse || c0 > c1)
{
c->r[2] = (unsigned char) ((2 * c->r[0] + c->r[1]) / 3);
c->g[2] = (unsigned char) ((2 * c->g[0] + c->g[1]) / 3);
c->b[2] = (unsigned char) ((2 * c->b[0] + c->b[1]) / 3);
c->r[3] = (unsigned char) ((c->r[0] + 2 * c->r[1]) / 3);
c->g[3] = (unsigned char) ((c->g[0] + 2 * c->g[1]) / 3);
c->b[3] = (unsigned char) ((c->b[0] + 2 * c->b[1]) / 3);
}
else
{
c->r[2] = (unsigned char) ((c->r[0] + c->r[1]) / 2);
c->g[2] = (unsigned char) ((c->g[0] + c->g[1]) / 2);
c->b[2] = (unsigned char) ((c->b[0] + c->b[1]) / 2);
c->r[3] = c->g[3] = c->b[3] = 0;
c->a[3] = 255;
}
}
static size_t CompressAlpha(const size_t min, const size_t max,
const size_t steps, const ssize_t *alphas, unsigned char* indices)
{
unsigned char
codes[8];
register ssize_t
i;
size_t
error,
index,
j,
least,
value;
codes[0] = (unsigned char) min;
codes[1] = (unsigned char) max;
codes[6] = 0;
codes[7] = 255;
for (i=1; i < (ssize_t) steps; i++)
codes[i+1] = (unsigned char) (((steps-i)*min + i*max) / steps);
error = 0;
for (i=0; i<16; i++)
{
if (alphas[i] == -1)
{
indices[i] = 0;
continue;
}
value = alphas[i];
least = SIZE_MAX;
index = 0;
for (j=0; j<8; j++)
{
size_t
dist;
dist = value - (size_t)codes[j];
dist *= dist;
if (dist < least)
{
least = dist;
index = j;
}
}
indices[i] = (unsigned char)index;
error += least;
}
return error;
}
static void CompressClusterFit(const size_t count,
const DDSVector4 *points, const ssize_t *map, const DDSVector3 principle,
const DDSVector4 metric, DDSVector3 *start, DDSVector3 *end,
unsigned char *indices)
{
DDSVector3
axis;
DDSVector4
grid,
gridrcp,
half,
onethird_onethird2,
pointsWeights[16],
two,
twonineths,
twothirds_twothirds2,
xSumwSum;
float
bestError = 1e+37f;
size_t
bestIteration = 0,
besti = 0,
bestj = 0,
bestk = 0,
iterationIndex;
ssize_t
i;
unsigned char
*o,
order[128],
unordered[16];
VectorInit(half,0.5f);
VectorInit(two,2.0f);
VectorInit(onethird_onethird2,1.0f/3.0f);
onethird_onethird2.w = 1.0f/9.0f;
VectorInit(twothirds_twothirds2,2.0f/3.0f);
twothirds_twothirds2.w = 4.0f/9.0f;
VectorInit(twonineths,2.0f/9.0f);
grid.x = 31.0f;
grid.y = 63.0f;
grid.z = 31.0f;
grid.w = 0.0f;
gridrcp.x = 1.0f/31.0f;
gridrcp.y = 1.0f/63.0f;
gridrcp.z = 1.0f/31.0f;
gridrcp.w = 0.0f;
xSumwSum.x = 0.0f;
xSumwSum.y = 0.0f;
xSumwSum.z = 0.0f;
xSumwSum.w = 0.0f;
ConstructOrdering(count,points,principle,pointsWeights,&xSumwSum,order,0);
for (iterationIndex = 0;;)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,1) \
num_threads(GetMagickResourceLimit(ThreadResource))
#endif
for (i=0; i < (ssize_t) count; i++)
{
DDSVector4
part0,
part1,
part2;
size_t
ii,
j,
k,
kmin;
VectorInit(part0,0.0f);
for(ii=0; ii < (size_t) i; ii++)
VectorAdd(pointsWeights[ii],part0,&part0);
VectorInit(part1,0.0f);
for (j=(size_t) i;;)
{
if (j == 0)
{
VectorCopy44(pointsWeights[0],&part2);
kmin = 1;
}
else
{
VectorInit(part2,0.0f);
kmin = j;
}
for (k=kmin;;)
{
DDSVector4
a,
alpha2_sum,
alphax_sum,
alphabeta_sum,
b,
beta2_sum,
betax_sum,
e1,
e2,
factor,
part3;
float
error;
VectorSubtract(xSumwSum,part2,&part3);
VectorSubtract(part3,part1,&part3);
VectorSubtract(part3,part0,&part3);
VectorMultiplyAdd(part1,twothirds_twothirds2,part0,&alphax_sum);
VectorMultiplyAdd(part2,onethird_onethird2,alphax_sum,&alphax_sum);
VectorInit(alpha2_sum,alphax_sum.w);
VectorMultiplyAdd(part2,twothirds_twothirds2,part3,&betax_sum);
VectorMultiplyAdd(part1,onethird_onethird2,betax_sum,&betax_sum);
VectorInit(beta2_sum,betax_sum.w);
VectorAdd(part1,part2,&alphabeta_sum);
VectorInit(alphabeta_sum,alphabeta_sum.w);
VectorMultiply(twonineths,alphabeta_sum,&alphabeta_sum);
VectorMultiply(alpha2_sum,beta2_sum,&factor);
VectorNegativeMultiplySubtract(alphabeta_sum,alphabeta_sum,factor,
&factor);
VectorReciprocal(factor,&factor);
VectorMultiply(alphax_sum,beta2_sum,&a);
VectorNegativeMultiplySubtract(betax_sum,alphabeta_sum,a,&a);
VectorMultiply(a,factor,&a);
VectorMultiply(betax_sum,alpha2_sum,&b);
VectorNegativeMultiplySubtract(alphax_sum,alphabeta_sum,b,&b);
VectorMultiply(b,factor,&b);
VectorClamp(&a);
VectorMultiplyAdd(grid,a,half,&a);
VectorTruncate(&a);
VectorMultiply(a,gridrcp,&a);
VectorClamp(&b);
VectorMultiplyAdd(grid,b,half,&b);
VectorTruncate(&b);
VectorMultiply(b,gridrcp,&b);
VectorMultiply(b,b,&e1);
VectorMultiply(e1,beta2_sum,&e1);
VectorMultiply(a,a,&e2);
VectorMultiplyAdd(e2,alpha2_sum,e1,&e1);
VectorMultiply(a,b,&e2);
VectorMultiply(e2,alphabeta_sum,&e2);
VectorNegativeMultiplySubtract(a,alphax_sum,e2,&e2);
VectorNegativeMultiplySubtract(b,betax_sum,e2,&e2);
VectorMultiplyAdd(two,e2,e1,&e2);
VectorMultiply(e2,metric,&e2);
error = e2.x + e2.y + e2.z;
if (error < bestError)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (DDS_CompressClusterFit)
#endif
{
if (error < bestError)
{
VectorCopy43(a,start);
VectorCopy43(b,end);
bestError = error;
besti = i;
bestj = j;
bestk = k;
bestIteration = iterationIndex;
}
}
}
if (k == count)
break;
VectorAdd(pointsWeights[k],part2,&part2);
k++;
}
if (j == count)
break;
VectorAdd(pointsWeights[j],part1,&part1);
j++;
}
}
if (bestIteration != iterationIndex)
break;
iterationIndex++;
if (iterationIndex == 8)
break;
VectorSubtract3(*end,*start,&axis);
if (ConstructOrdering(count,points,axis,pointsWeights,&xSumwSum,order,
iterationIndex) == MagickFalse)
break;
}
o = order + (16*bestIteration);
for (i=0; i < (ssize_t) besti; i++)
unordered[o[i]] = 0;
for (i=besti; i < (ssize_t) bestj; i++)
unordered[o[i]] = 2;
for (i=bestj; i < (ssize_t) bestk; i++)
unordered[o[i]] = 3;
for (i=bestk; i < (ssize_t) count; i++)
unordered[o[i]] = 1;
RemapIndices(map,unordered,indices);
}
static void CompressRangeFit(const size_t count,
const DDSVector4 *points, const ssize_t *map, const DDSVector3 principle,
const DDSVector4 metric, DDSVector3 *start, DDSVector3 *end,
unsigned char *indices)
{
float
d,
bestDist,
max,
min,
val;
DDSVector3
codes[4],
grid,
gridrcp,
half,
dist;
register ssize_t
i;
size_t
bestj,
j;
unsigned char
closest[16];
VectorInit3(half,0.5f);
grid.x = 31.0f;
grid.y = 63.0f;
grid.z = 31.0f;
gridrcp.x = 1.0f/31.0f;
gridrcp.y = 1.0f/63.0f;
gridrcp.z = 1.0f/31.0f;
if (count > 0)
{
VectorCopy43(points[0],start);
VectorCopy43(points[0],end);
min = max = Dot(points[0],principle);
for (i=1; i < (ssize_t) count; i++)
{
val = Dot(points[i],principle);
if (val < min)
{
VectorCopy43(points[i],start);
min = val;
}
else if (val > max)
{
VectorCopy43(points[i],end);
max = val;
}
}
}
VectorClamp3(start);
VectorMultiplyAdd3(grid,*start,half,start);
VectorTruncate3(start);
VectorMultiply3(*start,gridrcp,start);
VectorClamp3(end);
VectorMultiplyAdd3(grid,*end,half,end);
VectorTruncate3(end);
VectorMultiply3(*end,gridrcp,end);
codes[0] = *start;
codes[1] = *end;
codes[2].x = (start->x * (2.0f/3.0f)) + (end->x * (1.0f/3.0f));
codes[2].y = (start->y * (2.0f/3.0f)) + (end->y * (1.0f/3.0f));
codes[2].z = (start->z * (2.0f/3.0f)) + (end->z * (1.0f/3.0f));
codes[3].x = (start->x * (1.0f/3.0f)) + (end->x * (2.0f/3.0f));
codes[3].y = (start->y * (1.0f/3.0f)) + (end->y * (2.0f/3.0f));
codes[3].z = (start->z * (1.0f/3.0f)) + (end->z * (2.0f/3.0f));
for (i=0; i < (ssize_t) count; i++)
{
bestDist = 1e+37f;
bestj = 0;
for (j=0; j < 4; j++)
{
dist.x = (points[i].x - codes[j].x) * metric.x;
dist.y = (points[i].y - codes[j].y) * metric.y;
dist.z = (points[i].z - codes[j].z) * metric.z;
d = Dot(dist,dist);
if (d < bestDist)
{
bestDist = d;
bestj = j;
}
}
closest[i] = (unsigned char) bestj;
}
RemapIndices(map, closest, indices);
}
static void ComputeEndPoints(const DDSSingleColourLookup *lookup[],
const unsigned char *color, DDSVector3 *start, DDSVector3 *end,
unsigned char *index)
{
register ssize_t
i;
size_t
c,
maxError = SIZE_MAX;
for (i=0; i < 2; i++)
{
const DDSSourceBlock*
sources[3];
size_t
error = 0;
for (c=0; c < 3; c++)
{
sources[c] = &lookup[c][color[c]].sources[i];
error += ((size_t) sources[c]->error) * ((size_t) sources[c]->error);
}
if (error > maxError)
continue;
start->x = (float) sources[0]->start / 31.0f;
start->y = (float) sources[1]->start / 63.0f;
start->z = (float) sources[2]->start / 31.0f;
end->x = (float) sources[0]->end / 31.0f;
end->y = (float) sources[1]->end / 63.0f;
end->z = (float) sources[2]->end / 31.0f;
*index = (unsigned char) (2*i);
maxError = error;
}
}
static void ComputePrincipleComponent(const float *covariance,
DDSVector3 *principle)
{
DDSVector4
row0,
row1,
row2,
v;
register ssize_t
i;
row0.x = covariance[0];
row0.y = covariance[1];
row0.z = covariance[2];
row0.w = 0.0f;
row1.x = covariance[1];
row1.y = covariance[3];
row1.z = covariance[4];
row1.w = 0.0f;
row2.x = covariance[2];
row2.y = covariance[4];
row2.z = covariance[5];
row2.w = 0.0f;
VectorInit(v,1.0f);
for (i=0; i < 8; i++)
{
DDSVector4
w;
float
a;
w.x = row0.x * v.x;
w.y = row0.y * v.x;
w.z = row0.z * v.x;
w.w = row0.w * v.x;
w.x = (row1.x * v.y) + w.x;
w.y = (row1.y * v.y) + w.y;
w.z = (row1.z * v.y) + w.z;
w.w = (row1.w * v.y) + w.w;
w.x = (row2.x * v.z) + w.x;
w.y = (row2.y * v.z) + w.y;
w.z = (row2.z * v.z) + w.z;
w.w = (row2.w * v.z) + w.w;
a = 1.0f / MaxF(w.x,MaxF(w.y,w.z));
v.x = w.x * a;
v.y = w.y * a;
v.z = w.z * a;
v.w = w.w * a;
}
VectorCopy43(v,principle);
}
static void ComputeWeightedCovariance(const size_t count,
const DDSVector4 *points, float *covariance)
{
DDSVector3
centroid;
float
total;
size_t
i;
total = 0.0f;
VectorInit3(centroid,0.0f);
for (i=0; i < count; i++)
{
total += points[i].w;
centroid.x += (points[i].x * points[i].w);
centroid.y += (points[i].y * points[i].w);
centroid.z += (points[i].z * points[i].w);
}
if( total > 1.192092896e-07F)
{
centroid.x /= total;
centroid.y /= total;
centroid.z /= total;
}
for (i=0; i < 6; i++)
covariance[i] = 0.0f;
for (i = 0; i < count; i++)
{
DDSVector3
a,
b;
a.x = points[i].x - centroid.x;
a.y = points[i].y - centroid.y;
a.z = points[i].z - centroid.z;
b.x = points[i].w * a.x;
b.y = points[i].w * a.y;
b.z = points[i].w * a.z;
covariance[0] += a.x*b.x;
covariance[1] += a.x*b.y;
covariance[2] += a.x*b.z;
covariance[3] += a.y*b.y;
covariance[4] += a.y*b.z;
covariance[5] += a.z*b.z;
}
}
static MagickBooleanType ConstructOrdering(const size_t count,
const DDSVector4 *points, const DDSVector3 axis, DDSVector4 *pointsWeights,
DDSVector4 *xSumwSum, unsigned char *order, size_t iteration)
{
float
dps[16],
f;
register ssize_t
i;
size_t
j;
unsigned char
c,
*o,
*p;
o = order + (16*iteration);
for (i=0; i < (ssize_t) count; i++)
{
dps[i] = Dot(points[i],axis);
o[i] = (unsigned char)i;
}
for (i=0; i < (ssize_t) count; i++)
{
for (j=i; j > 0 && dps[j] < dps[j - 1]; j--)
{
f = dps[j];
dps[j] = dps[j - 1];
dps[j - 1] = f;
c = o[j];
o[j] = o[j - 1];
o[j - 1] = c;
}
}
for (i=0; i < (ssize_t) iteration; i++)
{
MagickBooleanType
same;
p = order + (16*i);
same = MagickTrue;
for (j=0; j < count; j++)
{
if (o[j] != p[j])
{
same = MagickFalse;
break;
}
}
if (same != MagickFalse)
return MagickFalse;
}
xSumwSum->x = 0;
xSumwSum->y = 0;
xSumwSum->z = 0;
xSumwSum->w = 0;
for (i=0; i < (ssize_t) count; i++)
{
DDSVector4
v;
j = (size_t) o[i];
v.x = points[j].w * points[j].x;
v.y = points[j].w * points[j].y;
v.z = points[j].w * points[j].z;
v.w = points[j].w * 1.0f;
VectorCopy44(v,&pointsWeights[i]);
VectorAdd(*xSumwSum,v,xSumwSum);
}
return MagickTrue;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s D D S %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsDDS() returns MagickTrue if the image format type, identified by the
% magick string, is DDS.
%
% The format of the IsDDS method is:
%
% MagickBooleanType IsDDS(const unsigned char *magick,const size_t length)
%
% A description of each parameter follows:
%
% o magick: compare image format pattern against these bytes.
%
% o length: Specifies the length of the magick string.
%
*/
static MagickBooleanType IsDDS(const unsigned char *magick, const size_t length)
{
if (length < 4)
return(MagickFalse);
if (LocaleNCompare((char *) magick,"DDS ", 4) == 0)
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d D D S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadDDSImage() reads a DirectDraw Surface image file and returns it. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the ReadDDSImage method is:
%
% Image *ReadDDSImage(const ImageInfo *image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: The image info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *ReadDDSImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
Image
*image;
MagickBooleanType
status,
cubemap = MagickFalse,
volume = MagickFalse,
matte;
CompressionType
compression;
DDSInfo
dds_info;
DDSDecoder
*decoder;
size_t
n,
num_images;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
image=AcquireImage(image_info);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Initialize image structure.
*/
if (ReadDDSInfo(image, &dds_info) != MagickTrue) {
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP)
cubemap = MagickTrue;
if (dds_info.ddscaps2 & DDSCAPS2_VOLUME && dds_info.depth > 0)
volume = MagickTrue;
(void) SeekBlob(image, 128, SEEK_SET);
/*
Determine pixel format
*/
if (dds_info.pixelformat.flags & DDPF_RGB)
{
compression = NoCompression;
if (dds_info.pixelformat.flags & DDPF_ALPHAPIXELS)
{
matte = MagickTrue;
decoder = ReadUncompressedRGBA;
}
else
{
matte = MagickTrue;
decoder = ReadUncompressedRGB;
}
}
else if (dds_info.pixelformat.flags & DDPF_LUMINANCE)
{
compression = NoCompression;
if (dds_info.pixelformat.flags & DDPF_ALPHAPIXELS)
{
/* Not sure how to handle this */
ThrowReaderException(CorruptImageError, "ImageTypeNotSupported");
}
else
{
matte = MagickFalse;
decoder = ReadUncompressedRGB;
}
}
else if (dds_info.pixelformat.flags & DDPF_FOURCC)
{
switch (dds_info.pixelformat.fourcc)
{
case FOURCC_DXT1:
{
matte = MagickFalse;
compression = DXT1Compression;
decoder = ReadDXT1;
break;
}
case FOURCC_DXT3:
{
matte = MagickTrue;
compression = DXT3Compression;
decoder = ReadDXT3;
break;
}
case FOURCC_DXT5:
{
matte = MagickTrue;
compression = DXT5Compression;
decoder = ReadDXT5;
break;
}
default:
{
/* Unknown FOURCC */
ThrowReaderException(CorruptImageError, "ImageTypeNotSupported");
}
}
}
else
{
/* Neither compressed nor uncompressed... thus unsupported */
ThrowReaderException(CorruptImageError, "ImageTypeNotSupported");
}
num_images = 1;
if (cubemap)
{
/*
Determine number of faces defined in the cubemap
*/
num_images = 0;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEX) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEX) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEY) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEY) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEZ) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEZ) num_images++;
}
if (volume)
num_images = dds_info.depth;
for (n = 0; n < num_images; n++)
{
if (n != 0)
{
if (EOFBlob(image) != MagickFalse)
ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile");
/* Start a new image */
AcquireNextImage(image_info,image);
if (GetNextImageInList(image) == (Image *) NULL)
return(DestroyImageList(image));
image=SyncNextImageInList(image);
}
image->matte = matte;
image->compression = compression;
image->columns = dds_info.width;
image->rows = dds_info.height;
image->storage_class = DirectClass;
image->endian = LSBEndian;
image->depth = 8;
if (image_info->ping != MagickFalse)
{
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
status=SetImageExtent(image,image->columns,image->rows);
if (status == MagickFalse)
{
InheritException(exception,&image->exception);
return(DestroyImageList(image));
}
if ((decoder)(image, &dds_info, exception) != MagickTrue)
{
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
}
if (EOFBlob(image) != MagickFalse)
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
static MagickBooleanType ReadDDSInfo(Image *image, DDSInfo *dds_info)
{
size_t
hdr_size,
required;
/* Seek to start of header */
(void) SeekBlob(image, 4, SEEK_SET);
/* Check header field */
hdr_size = ReadBlobLSBLong(image);
if (hdr_size != 124)
return MagickFalse;
/* Fill in DDS info struct */
dds_info->flags = ReadBlobLSBLong(image);
/* Check required flags */
required=(size_t) (DDSD_WIDTH | DDSD_HEIGHT | DDSD_PIXELFORMAT);
if ((dds_info->flags & required) != required)
return MagickFalse;
dds_info->height = ReadBlobLSBLong(image);
dds_info->width = ReadBlobLSBLong(image);
dds_info->pitchOrLinearSize = ReadBlobLSBLong(image);
dds_info->depth = ReadBlobLSBLong(image);
dds_info->mipmapcount = ReadBlobLSBLong(image);
(void) SeekBlob(image, 44, SEEK_CUR); /* reserved region of 11 DWORDs */
/* Read pixel format structure */
hdr_size = ReadBlobLSBLong(image);
if (hdr_size != 32)
return MagickFalse;
dds_info->pixelformat.flags = ReadBlobLSBLong(image);
dds_info->pixelformat.fourcc = ReadBlobLSBLong(image);
dds_info->pixelformat.rgb_bitcount = ReadBlobLSBLong(image);
dds_info->pixelformat.r_bitmask = ReadBlobLSBLong(image);
dds_info->pixelformat.g_bitmask = ReadBlobLSBLong(image);
dds_info->pixelformat.b_bitmask = ReadBlobLSBLong(image);
dds_info->pixelformat.alpha_bitmask = ReadBlobLSBLong(image);
dds_info->ddscaps1 = ReadBlobLSBLong(image);
dds_info->ddscaps2 = ReadBlobLSBLong(image);
(void) SeekBlob(image, 12, SEEK_CUR); /* 3 reserved DWORDs */
return MagickTrue;
}
static MagickBooleanType ReadDXT1(Image *image, DDSInfo *dds_info,
ExceptionInfo *exception)
{
DDSColors
colors;
PixelPacket
*q;
register ssize_t
i,
x;
size_t
bits;
ssize_t
j,
y;
unsigned char
code;
unsigned short
c0,
c1;
for (y = 0; y < (ssize_t) dds_info->height; y += 4)
{
for (x = 0; x < (ssize_t) dds_info->width; x += 4)
{
/* Get 4x4 patch of pixels to write on */
q = QueueAuthenticPixels(image, x, y, Min(4, dds_info->width - x),
Min(4, dds_info->height - y),exception);
if (q == (PixelPacket *) NULL)
return MagickFalse;
/* Read 8 bytes of data from the image */
c0 = ReadBlobLSBShort(image);
c1 = ReadBlobLSBShort(image);
bits = ReadBlobLSBLong(image);
CalculateColors(c0, c1, &colors, MagickFalse);
/* Write the pixels */
for (j = 0; j < 4; j++)
{
for (i = 0; i < 4; i++)
{
if ((x + i) < (ssize_t) dds_info->width && (y + j) < (ssize_t) dds_info->height)
{
code = (unsigned char) ((bits >> ((j*4+i)*2)) & 0x3);
SetPixelRed(q,ScaleCharToQuantum(colors.r[code]));
SetPixelGreen(q,ScaleCharToQuantum(colors.g[code]));
SetPixelBlue(q,ScaleCharToQuantum(colors.b[code]));
SetPixelOpacity(q,ScaleCharToQuantum(colors.a[code]));
if (colors.a[code] && image->matte == MagickFalse)
/* Correct matte */
image->matte = MagickTrue;
q++;
}
}
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return MagickFalse;
}
}
SkipDXTMipmaps(image, dds_info, 8);
return MagickTrue;
}
static MagickBooleanType ReadDXT3(Image *image, DDSInfo *dds_info,
ExceptionInfo *exception)
{
DDSColors
colors;
ssize_t
j,
y;
PixelPacket
*q;
register ssize_t
i,
x;
unsigned char
alpha;
size_t
a0,
a1,
bits,
code;
unsigned short
c0,
c1;
for (y = 0; y < (ssize_t) dds_info->height; y += 4)
{
for (x = 0; x < (ssize_t) dds_info->width; x += 4)
{
/* Get 4x4 patch of pixels to write on */
q = QueueAuthenticPixels(image, x, y, Min(4, dds_info->width - x),
Min(4, dds_info->height - y),exception);
if (q == (PixelPacket *) NULL)
return MagickFalse;
/* Read alpha values (8 bytes) */
a0 = ReadBlobLSBLong(image);
a1 = ReadBlobLSBLong(image);
/* Read 8 bytes of data from the image */
c0 = ReadBlobLSBShort(image);
c1 = ReadBlobLSBShort(image);
bits = ReadBlobLSBLong(image);
CalculateColors(c0, c1, &colors, MagickTrue);
/* Write the pixels */
for (j = 0; j < 4; j++)
{
for (i = 0; i < 4; i++)
{
if ((x + i) < (ssize_t) dds_info->width && (y + j) < (ssize_t) dds_info->height)
{
code = (bits >> ((4*j+i)*2)) & 0x3;
SetPixelRed(q,ScaleCharToQuantum(colors.r[code]));
SetPixelGreen(q,ScaleCharToQuantum(colors.g[code]));
SetPixelBlue(q,ScaleCharToQuantum(colors.b[code]));
/*
Extract alpha value: multiply 0..15 by 17 to get range 0..255
*/
if (j < 2)
alpha = 17U * (unsigned char) ((a0 >> (4*(4*j+i))) & 0xf);
else
alpha = 17U * (unsigned char) ((a1 >> (4*(4*(j-2)+i))) & 0xf);
SetPixelAlpha(q,ScaleCharToQuantum((unsigned char)
alpha));
q++;
}
}
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return MagickFalse;
}
}
SkipDXTMipmaps(image, dds_info, 16);
return MagickTrue;
}
static MagickBooleanType ReadDXT5(Image *image, DDSInfo *dds_info,
ExceptionInfo *exception)
{
DDSColors
colors;
ssize_t
j,
y;
MagickSizeType
alpha_bits;
PixelPacket
*q;
register ssize_t
i,
x;
unsigned char
a0,
a1;
size_t
alpha,
bits,
code,
alpha_code;
unsigned short
c0,
c1;
for (y = 0; y < (ssize_t) dds_info->height; y += 4)
{
for (x = 0; x < (ssize_t) dds_info->width; x += 4)
{
/* Get 4x4 patch of pixels to write on */
q = QueueAuthenticPixels(image, x, y, Min(4, dds_info->width - x),
Min(4, dds_info->height - y),exception);
if (q == (PixelPacket *) NULL)
return MagickFalse;
/* Read alpha values (8 bytes) */
a0 = (unsigned char) ReadBlobByte(image);
a1 = (unsigned char) ReadBlobByte(image);
alpha_bits = (MagickSizeType)ReadBlobLSBLong(image);
alpha_bits = alpha_bits | ((MagickSizeType)ReadBlobLSBShort(image) << 32);
/* Read 8 bytes of data from the image */
c0 = ReadBlobLSBShort(image);
c1 = ReadBlobLSBShort(image);
bits = ReadBlobLSBLong(image);
CalculateColors(c0, c1, &colors, MagickTrue);
/* Write the pixels */
for (j = 0; j < 4; j++)
{
for (i = 0; i < 4; i++)
{
if ((x + i) < (ssize_t) dds_info->width && (y + j) < (ssize_t) dds_info->height)
{
code = (bits >> ((4*j+i)*2)) & 0x3;
SetPixelRed(q,ScaleCharToQuantum(colors.r[code]));
SetPixelGreen(q,ScaleCharToQuantum(colors.g[code]));
SetPixelBlue(q,ScaleCharToQuantum(colors.b[code]));
/* Extract alpha value */
alpha_code = (size_t) (alpha_bits >> (3*(4*j+i))) & 0x7;
if (alpha_code == 0)
alpha = a0;
else if (alpha_code == 1)
alpha = a1;
else if (a0 > a1)
alpha = ((8-alpha_code) * a0 + (alpha_code-1) * a1) / 7;
else if (alpha_code == 6)
alpha = 0;
else if (alpha_code == 7)
alpha = 255;
else
alpha = (((6-alpha_code) * a0 + (alpha_code-1) * a1) / 5);
SetPixelAlpha(q,ScaleCharToQuantum((unsigned char)
alpha));
q++;
}
}
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return MagickFalse;
}
}
SkipDXTMipmaps(image, dds_info, 16);
return MagickTrue;
}
static MagickBooleanType ReadUncompressedRGB(Image *image, DDSInfo *dds_info,
ExceptionInfo *exception)
{
PixelPacket
*q;
ssize_t
x, y;
unsigned short
color;
if (dds_info->pixelformat.rgb_bitcount == 8)
(void) SetImageType(image,GrayscaleType);
else if (dds_info->pixelformat.rgb_bitcount == 16 && !IsBitMask(
dds_info->pixelformat,0xf800,0x07e0,0x001f,0x0000))
ThrowBinaryException(CorruptImageError,"ImageTypeNotSupported",
image->filename);
for (y = 0; y < (ssize_t) dds_info->height; y++)
{
q = QueueAuthenticPixels(image, 0, y, dds_info->width, 1,exception);
if (q == (PixelPacket *) NULL)
return MagickFalse;
for (x = 0; x < (ssize_t) dds_info->width; x++)
{
if (dds_info->pixelformat.rgb_bitcount == 8)
SetPixelGray(q,ScaleCharToQuantum(ReadBlobByte(image)));
else if (dds_info->pixelformat.rgb_bitcount == 16)
{
color=ReadBlobShort(image);
SetPixelRed(q,ScaleCharToQuantum((unsigned char)
(((color >> 11)/31.0)*255)));
SetPixelGreen(q,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 5) >> 10)/63.0)*255)));
SetPixelBlue(q,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 11) >> 11)/31.0)*255)));
}
else
{
SetPixelBlue(q,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)));
SetPixelGreen(q,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)));
SetPixelRed(q,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)));
if (dds_info->pixelformat.rgb_bitcount == 32)
(void) ReadBlobByte(image);
}
SetPixelAlpha(q,QuantumRange);
q++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return MagickFalse;
}
SkipRGBMipmaps(image, dds_info, 3);
return MagickTrue;
}
static MagickBooleanType ReadUncompressedRGBA(Image *image, DDSInfo *dds_info,
ExceptionInfo *exception)
{
PixelPacket
*q;
ssize_t
alphaBits,
x,
y;
unsigned short
color;
alphaBits=0;
if (dds_info->pixelformat.rgb_bitcount == 16)
{
if (IsBitMask(dds_info->pixelformat,0x7c00,0x03e0,0x001f,0x8000))
alphaBits=1;
else if (IsBitMask(dds_info->pixelformat,0x00ff,0x00ff,0x00ff,0xff00))
{
alphaBits=2;
(void) SetImageType(image,GrayscaleMatteType);
}
else if (IsBitMask(dds_info->pixelformat,0x0f00,0x00f0,0x000f,0xf000))
alphaBits=4;
else
ThrowBinaryException(CorruptImageError,"ImageTypeNotSupported",
image->filename);
}
for (y = 0; y < (ssize_t) dds_info->height; y++)
{
q = QueueAuthenticPixels(image, 0, y, dds_info->width, 1,exception);
if (q == (PixelPacket *) NULL)
return MagickFalse;
for (x = 0; x < (ssize_t) dds_info->width; x++)
{
if (dds_info->pixelformat.rgb_bitcount == 16)
{
color=ReadBlobShort(image);
if (alphaBits == 1)
{
SetPixelAlpha(q,(color & (1 << 15)) ? QuantumRange : 0);
SetPixelRed(q,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 1) >> 11)/31.0)*255)));
SetPixelGreen(q,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 6) >> 11)/31.0)*255)));
SetPixelBlue(q,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 11) >> 11)/31.0)*255)));
}
else if (alphaBits == 2)
{
SetPixelAlpha(q,ScaleCharToQuantum((unsigned char)
(color >> 8)));
SetPixelGray(q,ScaleCharToQuantum((unsigned char)color));
}
else
{
SetPixelAlpha(q,ScaleCharToQuantum((unsigned char)
(((color >> 12)/15.0)*255)));
SetPixelRed(q,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 4) >> 12)/15.0)*255)));
SetPixelGreen(q,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 8) >> 12)/15.0)*255)));
SetPixelBlue(q,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 12) >> 12)/15.0)*255)));
}
}
else
{
SetPixelBlue(q,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)));
SetPixelGreen(q,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)));
SetPixelRed(q,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)));
SetPixelAlpha(q,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)));
}
q++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return MagickFalse;
}
SkipRGBMipmaps(image, dds_info, 4);
return MagickTrue;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e g i s t e r D D S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RegisterDDSImage() adds attributes for the DDS image format to
% the list of supported formats. The attributes include the image format
% tag, a method to read and/or write the format, whether the format
% supports the saving of more than one frame to the same file or blob,
% whether the format supports native in-memory I/O, and a brief
% description of the format.
%
% The format of the RegisterDDSImage method is:
%
% RegisterDDSImage(void)
%
*/
ModuleExport size_t RegisterDDSImage(void)
{
MagickInfo
*entry;
entry = SetMagickInfo("DDS");
entry->decoder = (DecodeImageHandler *) ReadDDSImage;
entry->encoder = (EncodeImageHandler *) WriteDDSImage;
entry->magick = (IsImageFormatHandler *) IsDDS;
entry->seekable_stream=MagickTrue;
entry->description = ConstantString("Microsoft DirectDraw Surface");
entry->module = ConstantString("DDS");
(void) RegisterMagickInfo(entry);
entry = SetMagickInfo("DXT1");
entry->decoder = (DecodeImageHandler *) ReadDDSImage;
entry->encoder = (EncodeImageHandler *) WriteDDSImage;
entry->magick = (IsImageFormatHandler *) IsDDS;
entry->seekable_stream=MagickTrue;
entry->description = ConstantString("Microsoft DirectDraw Surface");
entry->module = ConstantString("DDS");
(void) RegisterMagickInfo(entry);
entry = SetMagickInfo("DXT5");
entry->decoder = (DecodeImageHandler *) ReadDDSImage;
entry->encoder = (EncodeImageHandler *) WriteDDSImage;
entry->magick = (IsImageFormatHandler *) IsDDS;
entry->seekable_stream=MagickTrue;
entry->description = ConstantString("Microsoft DirectDraw Surface");
entry->module = ConstantString("DDS");
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
static void RemapIndices(const ssize_t *map, const unsigned char *source,
unsigned char *target)
{
register ssize_t
i;
for (i = 0; i < 16; i++)
{
if (map[i] == -1)
target[i] = 3;
else
target[i] = source[map[i]];
}
}
/*
Skip the mipmap images for compressed (DXTn) dds files
*/
static void SkipDXTMipmaps(Image *image, DDSInfo *dds_info, int texel_size)
{
register ssize_t
i;
MagickOffsetType
offset;
size_t
h,
w;
/*
Only skip mipmaps for textures and cube maps
*/
if (dds_info->ddscaps1 & DDSCAPS_MIPMAP
&& (dds_info->ddscaps1 & DDSCAPS_TEXTURE
|| dds_info->ddscaps2 & DDSCAPS2_CUBEMAP))
{
w = DIV2(dds_info->width);
h = DIV2(dds_info->height);
/*
Mipmapcount includes the main image, so start from one
*/
for (i = 1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++)
{
offset = (MagickOffsetType) ((w + 3) / 4) * ((h + 3) / 4) * texel_size;
(void) SeekBlob(image, offset, SEEK_CUR);
w = DIV2(w);
h = DIV2(h);
}
}
}
/*
Skip the mipmap images for uncompressed (RGB or RGBA) dds files
*/
static void SkipRGBMipmaps(Image *image, DDSInfo *dds_info, int pixel_size)
{
MagickOffsetType
offset;
register ssize_t
i;
size_t
h,
w;
/*
Only skip mipmaps for textures and cube maps
*/
if (dds_info->ddscaps1 & DDSCAPS_MIPMAP
&& (dds_info->ddscaps1 & DDSCAPS_TEXTURE
|| dds_info->ddscaps2 & DDSCAPS2_CUBEMAP))
{
w = DIV2(dds_info->width);
h = DIV2(dds_info->height);
/*
Mipmapcount includes the main image, so start from one
*/
for (i=1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++)
{
offset = (MagickOffsetType) w * h * pixel_size;
(void) SeekBlob(image, offset, SEEK_CUR);
w = DIV2(w);
h = DIV2(h);
}
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n r e g i s t e r D D S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnregisterDDSImage() removes format registrations made by the
% DDS module from the list of supported formats.
%
% The format of the UnregisterDDSImage method is:
%
% UnregisterDDSImage(void)
%
*/
ModuleExport void UnregisterDDSImage(void)
{
(void) UnregisterMagickInfo("DDS");
(void) UnregisterMagickInfo("DXT1");
(void) UnregisterMagickInfo("DXT5");
}
static void WriteAlphas(Image *image, const ssize_t* alphas, size_t min5,
size_t max5, size_t min7, size_t max7)
{
register ssize_t
i;
size_t
err5,
err7,
j;
unsigned char
indices5[16],
indices7[16];
FixRange(min5,max5,5);
err5 = CompressAlpha(min5,max5,5,alphas,indices5);
FixRange(min7,max7,7);
err7 = CompressAlpha(min7,max7,7,alphas,indices7);
if (err7 < err5)
{
for (i=0; i < 16; i++)
{
unsigned char
index;
index = indices7[i];
if( index == 0 )
indices5[i] = 1;
else if (index == 1)
indices5[i] = 0;
else
indices5[i] = 9 - index;
}
min5 = max7;
max5 = min7;
}
(void) WriteBlobByte(image,(unsigned char) min5);
(void) WriteBlobByte(image,(unsigned char) max5);
for(i=0; i < 2; i++)
{
size_t
value = 0;
for (j=0; j < 8; j++)
{
size_t index = (size_t) indices5[j + i*8];
value |= ( index << 3*j );
}
for (j=0; j < 3; j++)
{
size_t byte = (value >> 8*j) & 0xff;
(void) WriteBlobByte(image,(unsigned char) byte);
}
}
}
static void WriteCompressed(Image *image, const size_t count,
DDSVector4* points, const ssize_t* map, const MagickBooleanType clusterFit)
{
float
covariance[16];
DDSVector3
end,
principle,
start;
DDSVector4
metric;
unsigned char
indices[16];
VectorInit(metric,1.0f);
VectorInit3(start,0.0f);
VectorInit3(end,0.0f);
ComputeWeightedCovariance(count,points,covariance);
ComputePrincipleComponent(covariance,&principle);
if (clusterFit == MagickFalse || count == 0)
CompressRangeFit(count,points,map,principle,metric,&start,&end,indices);
else
CompressClusterFit(count,points,map,principle,metric,&start,&end,indices);
WriteIndices(image,start,end,indices);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e D D S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WriteDDSImage() writes a DirectDraw Surface image file in the DXT5 format.
%
% The format of the WriteBMPImage method is:
%
% MagickBooleanType WriteDDSImage(const ImageInfo *image_info,Image *image)
%
% A description of each parameter follows.
%
% o image_info: the image info.
%
% o image: The image.
%
*/
static MagickBooleanType WriteDDSImage(const ImageInfo *image_info,
Image *image)
{
const char
*option;
size_t
compression,
columns,
maxMipmaps,
mipmaps,
pixelFormat,
rows;
MagickBooleanType
clusterFit,
status,
weightByAlpha;
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,&image->exception);
if (status == MagickFalse)
return(status);
(void) TransformImageColorspace(image,sRGBColorspace);
pixelFormat=DDPF_FOURCC;
compression=FOURCC_DXT5;
if (!image->matte)
compression=FOURCC_DXT1;
if (LocaleCompare(image_info->magick,"dxt1") == 0)
compression=FOURCC_DXT1;
option=GetImageOption(image_info,"dds:compression");
if (option != (char *) NULL)
{
if (LocaleCompare(option,"dxt1") == 0)
compression=FOURCC_DXT1;
if (LocaleCompare(option,"none") == 0)
pixelFormat=DDPF_RGB;
}
clusterFit=MagickFalse;
weightByAlpha=MagickFalse;
if (pixelFormat == DDPF_FOURCC)
{
option=GetImageOption(image_info,"dds:cluster-fit");
if (option != (char *) NULL && LocaleCompare(option,"true") == 0)
{
clusterFit=MagickTrue;
if (compression != FOURCC_DXT1)
{
option=GetImageOption(image_info,"dds:weight-by-alpha");
if (option != (char *) NULL && LocaleCompare(option,"true") == 0)
weightByAlpha=MagickTrue;
}
}
}
maxMipmaps=SIZE_MAX;
mipmaps=0;
if ((image->columns & (image->columns - 1)) == 0 &&
(image->rows & (image->rows - 1)) == 0)
{
option=GetImageOption(image_info,"dds:mipmaps");
if (option != (char *) NULL)
maxMipmaps=StringToUnsignedLong(option);
if (maxMipmaps != 0)
{
columns=image->columns;
rows=image->rows;
while (columns != 1 && rows != 1 && mipmaps != maxMipmaps)
{
columns=DIV2(columns);
rows=DIV2(rows);
mipmaps++;
}
}
}
WriteDDSInfo(image,pixelFormat,compression,mipmaps);
WriteImageData(image,pixelFormat,compression,clusterFit,weightByAlpha,
&image->exception);
if (mipmaps > 0 && WriteMipmaps(image,pixelFormat,compression,mipmaps,
clusterFit,weightByAlpha,&image->exception) == MagickFalse)
return(MagickFalse);
(void) CloseBlob(image);
return(MagickTrue);
}
static void WriteDDSInfo(Image *image, const size_t pixelFormat,
const size_t compression, const size_t mipmaps)
{
char
software[MaxTextExtent];
register ssize_t
i;
unsigned int
format,
caps,
flags;
flags=(unsigned int) (DDSD_CAPS | DDSD_WIDTH | DDSD_HEIGHT |
DDSD_PIXELFORMAT | DDSD_LINEARSIZE);
caps=(unsigned int) DDSCAPS_TEXTURE;
format=(unsigned int) pixelFormat;
if (mipmaps > 0)
{
flags=flags | (unsigned int) DDSD_MIPMAPCOUNT;
caps=caps | (unsigned int) (DDSCAPS_MIPMAP | DDSCAPS_COMPLEX);
}
if (format != DDPF_FOURCC && image->matte)
format=format | DDPF_ALPHAPIXELS;
(void) WriteBlob(image,4,(unsigned char *) "DDS ");
(void) WriteBlobLSBLong(image,124);
(void) WriteBlobLSBLong(image,flags);
(void) WriteBlobLSBLong(image,(unsigned int) image->rows);
(void) WriteBlobLSBLong(image,(unsigned int) image->columns);
if (compression == FOURCC_DXT1)
(void) WriteBlobLSBLong(image,
(unsigned int) (Max(1,(image->columns+3)/4) * 8));
else
(void) WriteBlobLSBLong(image,
(unsigned int) (Max(1,(image->columns+3)/4) * 16));
(void) WriteBlobLSBLong(image,0x00);
(void) WriteBlobLSBLong(image,(unsigned int) mipmaps+1);
(void) ResetMagickMemory(software,0,sizeof(software));
(void) strcpy(software,"IMAGEMAGICK");
(void) WriteBlob(image,44,(unsigned char *) software);
(void) WriteBlobLSBLong(image,32);
(void) WriteBlobLSBLong(image,format);
if (pixelFormat == DDPF_FOURCC)
{
(void) WriteBlobLSBLong(image,(unsigned int) compression);
for(i=0;i < 5;i++) // bitcount / masks
(void) WriteBlobLSBLong(image,0x00);
}
else
{
(void) WriteBlobLSBLong(image,0x00);
if (image->matte)
{
(void) WriteBlobLSBLong(image,32);
(void) WriteBlobLSBLong(image,0xff0000);
(void) WriteBlobLSBLong(image,0xff00);
(void) WriteBlobLSBLong(image,0xff);
(void) WriteBlobLSBLong(image,0xff000000);
}
else
{
(void) WriteBlobLSBLong(image,24);
(void) WriteBlobLSBLong(image,0xff);
(void) WriteBlobLSBLong(image,0x00);
(void) WriteBlobLSBLong(image,0x00);
(void) WriteBlobLSBLong(image,0x00);
}
}
(void) WriteBlobLSBLong(image,caps);
for(i=0;i < 4;i++) // ddscaps2 + reserved region
(void) WriteBlobLSBLong(image,0x00);
}
static void WriteFourCC(Image *image, const size_t compression,
const MagickBooleanType clusterFit, const MagickBooleanType weightByAlpha,
ExceptionInfo *exception)
{
register const PixelPacket
*p;
register ssize_t
x;
ssize_t
i,
y,
bx,
by;
for (y=0; y < (ssize_t) image->rows; y+=4)
{
for (x=0; x < (ssize_t) image->columns; x+=4)
{
MagickBooleanType
match;
DDSVector4
point,
points[16];
size_t
count = 0,
max5 = 0,
max7 = 0,
min5 = 255,
min7 = 255,
columns = 4,
rows = 4;
ssize_t
alphas[16],
map[16];
unsigned char
alpha;
if (x + columns >= image->columns)
columns = image->columns - x;
if (y + rows >= image->rows)
rows = image->rows - y;
p=GetVirtualPixels(image,x,y,columns,rows,exception);
if (p == (const PixelPacket *) NULL)
break;
for (i=0; i<16; i++)
{
map[i] = -1;
alphas[i] = -1;
}
for (by=0; by < (ssize_t) rows; by++)
{
for (bx=0; bx < (ssize_t) columns; bx++)
{
if (compression == FOURCC_DXT5)
alpha = ScaleQuantumToChar(GetPixelAlpha(p));
else
alpha = 255;
alphas[4*by + bx] = (size_t)alpha;
point.x = (float)ScaleQuantumToChar(GetPixelRed(p)) / 255.0f;
point.y = (float)ScaleQuantumToChar(GetPixelGreen(p)) / 255.0f;
point.z = (float)ScaleQuantumToChar(GetPixelBlue(p)) / 255.0f;
point.w = weightByAlpha ? (float)(alpha + 1) / 256.0f : 1.0f;
p++;
match = MagickFalse;
for (i=0; i < (ssize_t) count; i++)
{
if ((points[i].x == point.x) &&
(points[i].y == point.y) &&
(points[i].z == point.z) &&
(alpha >= 128 || compression == FOURCC_DXT5))
{
points[i].w += point.w;
map[4*by + bx] = i;
match = MagickTrue;
break;
}
}
if (match != MagickFalse)
continue;
points[count].x = point.x;
points[count].y = point.y;
points[count].z = point.z;
points[count].w = point.w;
map[4*by + bx] = count;
count++;
if (compression == FOURCC_DXT5)
{
if (alpha < min7)
min7 = alpha;
if (alpha > max7)
max7 = alpha;
if (alpha != 0 && alpha < min5)
min5 = alpha;
if (alpha != 255 && alpha > max5)
max5 = alpha;
}
}
}
for (i=0; i < (ssize_t) count; i++)
points[i].w = sqrt(points[i].w);
if (compression == FOURCC_DXT5)
WriteAlphas(image,alphas,min5,max5,min7,max7);
if (count == 1)
WriteSingleColorFit(image,points,map);
else
WriteCompressed(image,count,points,map,clusterFit);
}
}
}
static void WriteImageData(Image *image, const size_t pixelFormat,
const size_t compression, const MagickBooleanType clusterFit,
const MagickBooleanType weightByAlpha, ExceptionInfo *exception)
{
if (pixelFormat == DDPF_FOURCC)
WriteFourCC(image,compression,clusterFit,weightByAlpha,exception);
else
WriteUncompressed(image,exception);
}
static inline size_t ClampToLimit(const float value,
const size_t limit)
{
size_t
result = (int) (value + 0.5f);
if (result < 0.0f)
return(0);
if (result > limit)
return(limit);
return result;
}
static inline size_t ColorTo565(const DDSVector3 point)
{
size_t r = ClampToLimit(31.0f*point.x,31);
size_t g = ClampToLimit(63.0f*point.y,63);
size_t b = ClampToLimit(31.0f*point.z,31);
return (r << 11) | (g << 5) | b;
}
static void WriteIndices(Image *image, const DDSVector3 start,
const DDSVector3 end, unsigned char* indices)
{
register ssize_t
i;
size_t
a,
b;
unsigned char
remapped[16];
const unsigned char
*ind;
a = ColorTo565(start);
b = ColorTo565(end);
for (i=0; i<16; i++)
{
if( a < b )
remapped[i] = (indices[i] ^ 0x1) & 0x3;
else if( a == b )
remapped[i] = 0;
else
remapped[i] = indices[i];
}
if( a < b )
Swap(a,b);
(void) WriteBlobByte(image,(unsigned char) (a & 0xff));
(void) WriteBlobByte(image,(unsigned char) (a >> 8));
(void) WriteBlobByte(image,(unsigned char) (b & 0xff));
(void) WriteBlobByte(image,(unsigned char) (b >> 8));
for (i=0; i<4; i++)
{
ind = remapped + 4*i;
(void) WriteBlobByte(image,ind[0] | (ind[1] << 2) | (ind[2] << 4) |
(ind[3] << 6));
}
}
static MagickBooleanType WriteMipmaps(Image *image, const size_t pixelFormat,
const size_t compression, const size_t mipmaps,
const MagickBooleanType clusterFit, const MagickBooleanType weightByAlpha,
ExceptionInfo *exception)
{
Image*
resize_image;
register ssize_t
i;
size_t
columns,
rows;
columns = image->columns;
rows = image->rows;
for (i=0; i< (ssize_t) mipmaps; i++)
{
resize_image = ResizeImage(image,columns/2,rows/2,TriangleFilter,1.0,
exception);
if (resize_image == (Image *) NULL)
return(MagickFalse);
DestroyBlob(resize_image);
resize_image->blob=ReferenceBlob(image->blob);
WriteImageData(resize_image,pixelFormat,compression,weightByAlpha,
clusterFit,exception);
resize_image=DestroyImage(resize_image);
columns = DIV2(columns);
rows = DIV2(rows);
}
return(MagickTrue);
}
static void WriteSingleColorFit(Image *image, const DDSVector4* points,
const ssize_t* map)
{
DDSVector3
start,
end;
register ssize_t
i;
unsigned char
color[3],
index,
indexes[16],
indices[16];
color[0] = (unsigned char) ClampToLimit(255.0f*points->x,255);
color[1] = (unsigned char) ClampToLimit(255.0f*points->y,255);
color[2] = (unsigned char) ClampToLimit(255.0f*points->z,255);
index=0;
ComputeEndPoints(DDS_LOOKUP,color,&start,&end,&index);
for (i=0; i< 16; i++)
indexes[i]=index;
RemapIndices(map,indexes,indices);
WriteIndices(image,start,end,indices);
}
static void WriteUncompressed(Image *image, ExceptionInfo *exception)
{
register const PixelPacket
*p;
register ssize_t
x;
ssize_t
y;
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
(void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelBlue(p)));
(void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelGreen(p)));
(void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelRed(p)));
if (image->matte)
(void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelAlpha(p)));
p++;
}
}
}
| ./CrossVul/dataset_final_sorted/CWE-20/c/bad_2435_0 |
crossvul-cpp_data_good_5845_0 | /*
* algif_hash: User-space interface for hash algorithms
*
* This file provides the user-space API for hash algorithms.
*
* Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#include <crypto/hash.h>
#include <crypto/if_alg.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/net.h>
#include <net/sock.h>
struct hash_ctx {
struct af_alg_sgl sgl;
u8 *result;
struct af_alg_completion completion;
unsigned int len;
bool more;
struct ahash_request req;
};
static int hash_sendmsg(struct kiocb *unused, struct socket *sock,
struct msghdr *msg, size_t ignored)
{
int limit = ALG_MAX_PAGES * PAGE_SIZE;
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
struct hash_ctx *ctx = ask->private;
unsigned long iovlen;
struct iovec *iov;
long copied = 0;
int err;
if (limit > sk->sk_sndbuf)
limit = sk->sk_sndbuf;
lock_sock(sk);
if (!ctx->more) {
err = crypto_ahash_init(&ctx->req);
if (err)
goto unlock;
}
ctx->more = 0;
for (iov = msg->msg_iov, iovlen = msg->msg_iovlen; iovlen > 0;
iovlen--, iov++) {
unsigned long seglen = iov->iov_len;
char __user *from = iov->iov_base;
while (seglen) {
int len = min_t(unsigned long, seglen, limit);
int newlen;
newlen = af_alg_make_sg(&ctx->sgl, from, len, 0);
if (newlen < 0) {
err = copied ? 0 : newlen;
goto unlock;
}
ahash_request_set_crypt(&ctx->req, ctx->sgl.sg, NULL,
newlen);
err = af_alg_wait_for_completion(
crypto_ahash_update(&ctx->req),
&ctx->completion);
af_alg_free_sg(&ctx->sgl);
if (err)
goto unlock;
seglen -= newlen;
from += newlen;
copied += newlen;
}
}
err = 0;
ctx->more = msg->msg_flags & MSG_MORE;
if (!ctx->more) {
ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0);
err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req),
&ctx->completion);
}
unlock:
release_sock(sk);
return err ?: copied;
}
static ssize_t hash_sendpage(struct socket *sock, struct page *page,
int offset, size_t size, int flags)
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
struct hash_ctx *ctx = ask->private;
int err;
lock_sock(sk);
sg_init_table(ctx->sgl.sg, 1);
sg_set_page(ctx->sgl.sg, page, size, offset);
ahash_request_set_crypt(&ctx->req, ctx->sgl.sg, ctx->result, size);
if (!(flags & MSG_MORE)) {
if (ctx->more)
err = crypto_ahash_finup(&ctx->req);
else
err = crypto_ahash_digest(&ctx->req);
} else {
if (!ctx->more) {
err = crypto_ahash_init(&ctx->req);
if (err)
goto unlock;
}
err = crypto_ahash_update(&ctx->req);
}
err = af_alg_wait_for_completion(err, &ctx->completion);
if (err)
goto unlock;
ctx->more = flags & MSG_MORE;
unlock:
release_sock(sk);
return err ?: size;
}
static int hash_recvmsg(struct kiocb *unused, struct socket *sock,
struct msghdr *msg, size_t len, int flags)
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
struct hash_ctx *ctx = ask->private;
unsigned ds = crypto_ahash_digestsize(crypto_ahash_reqtfm(&ctx->req));
int err;
if (len > ds)
len = ds;
else if (len < ds)
msg->msg_flags |= MSG_TRUNC;
lock_sock(sk);
if (ctx->more) {
ctx->more = 0;
ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0);
err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req),
&ctx->completion);
if (err)
goto unlock;
}
err = memcpy_toiovec(msg->msg_iov, ctx->result, len);
unlock:
release_sock(sk);
return err ?: len;
}
static int hash_accept(struct socket *sock, struct socket *newsock, int flags)
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
struct hash_ctx *ctx = ask->private;
struct ahash_request *req = &ctx->req;
char state[crypto_ahash_statesize(crypto_ahash_reqtfm(req))];
struct sock *sk2;
struct alg_sock *ask2;
struct hash_ctx *ctx2;
int err;
err = crypto_ahash_export(req, state);
if (err)
return err;
err = af_alg_accept(ask->parent, newsock);
if (err)
return err;
sk2 = newsock->sk;
ask2 = alg_sk(sk2);
ctx2 = ask2->private;
ctx2->more = 1;
err = crypto_ahash_import(&ctx2->req, state);
if (err) {
sock_orphan(sk2);
sock_put(sk2);
}
return err;
}
static struct proto_ops algif_hash_ops = {
.family = PF_ALG,
.connect = sock_no_connect,
.socketpair = sock_no_socketpair,
.getname = sock_no_getname,
.ioctl = sock_no_ioctl,
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
.getsockopt = sock_no_getsockopt,
.mmap = sock_no_mmap,
.bind = sock_no_bind,
.setsockopt = sock_no_setsockopt,
.poll = sock_no_poll,
.release = af_alg_release,
.sendmsg = hash_sendmsg,
.sendpage = hash_sendpage,
.recvmsg = hash_recvmsg,
.accept = hash_accept,
};
static void *hash_bind(const char *name, u32 type, u32 mask)
{
return crypto_alloc_ahash(name, type, mask);
}
static void hash_release(void *private)
{
crypto_free_ahash(private);
}
static int hash_setkey(void *private, const u8 *key, unsigned int keylen)
{
return crypto_ahash_setkey(private, key, keylen);
}
static void hash_sock_destruct(struct sock *sk)
{
struct alg_sock *ask = alg_sk(sk);
struct hash_ctx *ctx = ask->private;
sock_kfree_s(sk, ctx->result,
crypto_ahash_digestsize(crypto_ahash_reqtfm(&ctx->req)));
sock_kfree_s(sk, ctx, ctx->len);
af_alg_release_parent(sk);
}
static int hash_accept_parent(void *private, struct sock *sk)
{
struct hash_ctx *ctx;
struct alg_sock *ask = alg_sk(sk);
unsigned len = sizeof(*ctx) + crypto_ahash_reqsize(private);
unsigned ds = crypto_ahash_digestsize(private);
ctx = sock_kmalloc(sk, len, GFP_KERNEL);
if (!ctx)
return -ENOMEM;
ctx->result = sock_kmalloc(sk, ds, GFP_KERNEL);
if (!ctx->result) {
sock_kfree_s(sk, ctx, len);
return -ENOMEM;
}
memset(ctx->result, 0, ds);
ctx->len = len;
ctx->more = 0;
af_alg_init_completion(&ctx->completion);
ask->private = ctx;
ahash_request_set_tfm(&ctx->req, private);
ahash_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG,
af_alg_complete, &ctx->completion);
sk->sk_destruct = hash_sock_destruct;
return 0;
}
static const struct af_alg_type algif_type_hash = {
.bind = hash_bind,
.release = hash_release,
.setkey = hash_setkey,
.accept = hash_accept_parent,
.ops = &algif_hash_ops,
.name = "hash",
.owner = THIS_MODULE
};
static int __init algif_hash_init(void)
{
return af_alg_register_type(&algif_type_hash);
}
static void __exit algif_hash_exit(void)
{
int err = af_alg_unregister_type(&algif_type_hash);
BUG_ON(err);
}
module_init(algif_hash_init);
module_exit(algif_hash_exit);
MODULE_LICENSE("GPL");
| ./CrossVul/dataset_final_sorted/CWE-20/c/good_5845_0 |
crossvul-cpp_data_bad_3848_0 | /*
* Fast Userspace Mutexes (which I call "Futexes!").
* (C) Rusty Russell, IBM 2002
*
* Generalized futexes, futex requeueing, misc fixes by Ingo Molnar
* (C) Copyright 2003 Red Hat Inc, All Rights Reserved
*
* Removed page pinning, fix privately mapped COW pages and other cleanups
* (C) Copyright 2003, 2004 Jamie Lokier
*
* Robust futex support started by Ingo Molnar
* (C) Copyright 2006 Red Hat Inc, All Rights Reserved
* Thanks to Thomas Gleixner for suggestions, analysis and fixes.
*
* PI-futex support started by Ingo Molnar and Thomas Gleixner
* Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
* Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
*
* PRIVATE futexes by Eric Dumazet
* Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com>
*
* Requeue-PI support by Darren Hart <dvhltc@us.ibm.com>
* Copyright (C) IBM Corporation, 2009
* Thanks to Thomas Gleixner for conceptual design and careful reviews.
*
* Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly
* enough at me, Linus for the original (flawed) idea, Matthew
* Kirkwood for proof-of-concept implementation.
*
* "The futexes are also cursed."
* "But they come in a choice of three flavours!"
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/slab.h>
#include <linux/poll.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/jhash.h>
#include <linux/init.h>
#include <linux/futex.h>
#include <linux/mount.h>
#include <linux/pagemap.h>
#include <linux/syscalls.h>
#include <linux/signal.h>
#include <linux/export.h>
#include <linux/magic.h>
#include <linux/pid.h>
#include <linux/nsproxy.h>
#include <linux/ptrace.h>
#include <asm/futex.h>
#include "rtmutex_common.h"
int __read_mostly futex_cmpxchg_enabled;
#define FUTEX_HASHBITS (CONFIG_BASE_SMALL ? 4 : 8)
/*
* Futex flags used to encode options to functions and preserve them across
* restarts.
*/
#define FLAGS_SHARED 0x01
#define FLAGS_CLOCKRT 0x02
#define FLAGS_HAS_TIMEOUT 0x04
/*
* Priority Inheritance state:
*/
struct futex_pi_state {
/*
* list of 'owned' pi_state instances - these have to be
* cleaned up in do_exit() if the task exits prematurely:
*/
struct list_head list;
/*
* The PI object:
*/
struct rt_mutex pi_mutex;
struct task_struct *owner;
atomic_t refcount;
union futex_key key;
};
/**
* struct futex_q - The hashed futex queue entry, one per waiting task
* @list: priority-sorted list of tasks waiting on this futex
* @task: the task waiting on the futex
* @lock_ptr: the hash bucket lock
* @key: the key the futex is hashed on
* @pi_state: optional priority inheritance state
* @rt_waiter: rt_waiter storage for use with requeue_pi
* @requeue_pi_key: the requeue_pi target futex key
* @bitset: bitset for the optional bitmasked wakeup
*
* We use this hashed waitqueue, instead of a normal wait_queue_t, so
* we can wake only the relevant ones (hashed queues may be shared).
*
* A futex_q has a woken state, just like tasks have TASK_RUNNING.
* It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0.
* The order of wakeup is always to make the first condition true, then
* the second.
*
* PI futexes are typically woken before they are removed from the hash list via
* the rt_mutex code. See unqueue_me_pi().
*/
struct futex_q {
struct plist_node list;
struct task_struct *task;
spinlock_t *lock_ptr;
union futex_key key;
struct futex_pi_state *pi_state;
struct rt_mutex_waiter *rt_waiter;
union futex_key *requeue_pi_key;
u32 bitset;
};
static const struct futex_q futex_q_init = {
/* list gets initialized in queue_me()*/
.key = FUTEX_KEY_INIT,
.bitset = FUTEX_BITSET_MATCH_ANY
};
/*
* Hash buckets are shared by all the futex_keys that hash to the same
* location. Each key may have multiple futex_q structures, one for each task
* waiting on a futex.
*/
struct futex_hash_bucket {
spinlock_t lock;
struct plist_head chain;
};
static struct futex_hash_bucket futex_queues[1<<FUTEX_HASHBITS];
/*
* We hash on the keys returned from get_futex_key (see below).
*/
static struct futex_hash_bucket *hash_futex(union futex_key *key)
{
u32 hash = jhash2((u32*)&key->both.word,
(sizeof(key->both.word)+sizeof(key->both.ptr))/4,
key->both.offset);
return &futex_queues[hash & ((1 << FUTEX_HASHBITS)-1)];
}
/*
* Return 1 if two futex_keys are equal, 0 otherwise.
*/
static inline int match_futex(union futex_key *key1, union futex_key *key2)
{
return (key1 && key2
&& key1->both.word == key2->both.word
&& key1->both.ptr == key2->both.ptr
&& key1->both.offset == key2->both.offset);
}
/*
* Take a reference to the resource addressed by a key.
* Can be called while holding spinlocks.
*
*/
static void get_futex_key_refs(union futex_key *key)
{
if (!key->both.ptr)
return;
switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
case FUT_OFF_INODE:
ihold(key->shared.inode);
break;
case FUT_OFF_MMSHARED:
atomic_inc(&key->private.mm->mm_count);
break;
}
}
/*
* Drop a reference to the resource addressed by a key.
* The hash bucket spinlock must not be held.
*/
static void drop_futex_key_refs(union futex_key *key)
{
if (!key->both.ptr) {
/* If we're here then we tried to put a key we failed to get */
WARN_ON_ONCE(1);
return;
}
switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
case FUT_OFF_INODE:
iput(key->shared.inode);
break;
case FUT_OFF_MMSHARED:
mmdrop(key->private.mm);
break;
}
}
/**
* get_futex_key() - Get parameters which are the keys for a futex
* @uaddr: virtual address of the futex
* @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED
* @key: address where result is stored.
* @rw: mapping needs to be read/write (values: VERIFY_READ,
* VERIFY_WRITE)
*
* Returns a negative error code or 0
* The key words are stored in *key on success.
*
* For shared mappings, it's (page->index, vma->vm_file->f_path.dentry->d_inode,
* offset_within_page). For private mappings, it's (uaddr, current->mm).
* We can usually work out the index without swapping in the page.
*
* lock_page() might sleep, the caller should not hold a spinlock.
*/
static int
get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
{
unsigned long address = (unsigned long)uaddr;
struct mm_struct *mm = current->mm;
struct page *page, *page_head;
int err, ro = 0;
/*
* The futex address must be "naturally" aligned.
*/
key->both.offset = address % PAGE_SIZE;
if (unlikely((address % sizeof(u32)) != 0))
return -EINVAL;
address -= key->both.offset;
/*
* PROCESS_PRIVATE futexes are fast.
* As the mm cannot disappear under us and the 'key' only needs
* virtual address, we dont even have to find the underlying vma.
* Note : We do have to check 'uaddr' is a valid user address,
* but access_ok() should be faster than find_vma()
*/
if (!fshared) {
if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))))
return -EFAULT;
key->private.mm = mm;
key->private.address = address;
get_futex_key_refs(key);
return 0;
}
again:
err = get_user_pages_fast(address, 1, 1, &page);
/*
* If write access is not required (eg. FUTEX_WAIT), try
* and get read-only access.
*/
if (err == -EFAULT && rw == VERIFY_READ) {
err = get_user_pages_fast(address, 1, 0, &page);
ro = 1;
}
if (err < 0)
return err;
else
err = 0;
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
page_head = page;
if (unlikely(PageTail(page))) {
put_page(page);
/* serialize against __split_huge_page_splitting() */
local_irq_disable();
if (likely(__get_user_pages_fast(address, 1, 1, &page) == 1)) {
page_head = compound_head(page);
/*
* page_head is valid pointer but we must pin
* it before taking the PG_lock and/or
* PG_compound_lock. The moment we re-enable
* irqs __split_huge_page_splitting() can
* return and the head page can be freed from
* under us. We can't take the PG_lock and/or
* PG_compound_lock on a page that could be
* freed from under us.
*/
if (page != page_head) {
get_page(page_head);
put_page(page);
}
local_irq_enable();
} else {
local_irq_enable();
goto again;
}
}
#else
page_head = compound_head(page);
if (page != page_head) {
get_page(page_head);
put_page(page);
}
#endif
lock_page(page_head);
/*
* If page_head->mapping is NULL, then it cannot be a PageAnon
* page; but it might be the ZERO_PAGE or in the gate area or
* in a special mapping (all cases which we are happy to fail);
* or it may have been a good file page when get_user_pages_fast
* found it, but truncated or holepunched or subjected to
* invalidate_complete_page2 before we got the page lock (also
* cases which we are happy to fail). And we hold a reference,
* so refcount care in invalidate_complete_page's remove_mapping
* prevents drop_caches from setting mapping to NULL beneath us.
*
* The case we do have to guard against is when memory pressure made
* shmem_writepage move it from filecache to swapcache beneath us:
* an unlikely race, but we do need to retry for page_head->mapping.
*/
if (!page_head->mapping) {
int shmem_swizzled = PageSwapCache(page_head);
unlock_page(page_head);
put_page(page_head);
if (shmem_swizzled)
goto again;
return -EFAULT;
}
/*
* Private mappings are handled in a simple way.
*
* NOTE: When userspace waits on a MAP_SHARED mapping, even if
* it's a read-only handle, it's expected that futexes attach to
* the object not the particular process.
*/
if (PageAnon(page_head)) {
/*
* A RO anonymous page will never change and thus doesn't make
* sense for futex operations.
*/
if (ro) {
err = -EFAULT;
goto out;
}
key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
key->private.mm = mm;
key->private.address = address;
} else {
key->both.offset |= FUT_OFF_INODE; /* inode-based key */
key->shared.inode = page_head->mapping->host;
key->shared.pgoff = page_head->index;
}
get_futex_key_refs(key);
out:
unlock_page(page_head);
put_page(page_head);
return err;
}
static inline void put_futex_key(union futex_key *key)
{
drop_futex_key_refs(key);
}
/**
* fault_in_user_writeable() - Fault in user address and verify RW access
* @uaddr: pointer to faulting user space address
*
* Slow path to fixup the fault we just took in the atomic write
* access to @uaddr.
*
* We have no generic implementation of a non-destructive write to the
* user address. We know that we faulted in the atomic pagefault
* disabled section so we can as well avoid the #PF overhead by
* calling get_user_pages() right away.
*/
static int fault_in_user_writeable(u32 __user *uaddr)
{
struct mm_struct *mm = current->mm;
int ret;
down_read(&mm->mmap_sem);
ret = fixup_user_fault(current, mm, (unsigned long)uaddr,
FAULT_FLAG_WRITE);
up_read(&mm->mmap_sem);
return ret < 0 ? ret : 0;
}
/**
* futex_top_waiter() - Return the highest priority waiter on a futex
* @hb: the hash bucket the futex_q's reside in
* @key: the futex key (to distinguish it from other futex futex_q's)
*
* Must be called with the hb lock held.
*/
static struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb,
union futex_key *key)
{
struct futex_q *this;
plist_for_each_entry(this, &hb->chain, list) {
if (match_futex(&this->key, key))
return this;
}
return NULL;
}
static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
u32 uval, u32 newval)
{
int ret;
pagefault_disable();
ret = futex_atomic_cmpxchg_inatomic(curval, uaddr, uval, newval);
pagefault_enable();
return ret;
}
static int get_futex_value_locked(u32 *dest, u32 __user *from)
{
int ret;
pagefault_disable();
ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
pagefault_enable();
return ret ? -EFAULT : 0;
}
/*
* PI code:
*/
static int refill_pi_state_cache(void)
{
struct futex_pi_state *pi_state;
if (likely(current->pi_state_cache))
return 0;
pi_state = kzalloc(sizeof(*pi_state), GFP_KERNEL);
if (!pi_state)
return -ENOMEM;
INIT_LIST_HEAD(&pi_state->list);
/* pi_mutex gets initialized later */
pi_state->owner = NULL;
atomic_set(&pi_state->refcount, 1);
pi_state->key = FUTEX_KEY_INIT;
current->pi_state_cache = pi_state;
return 0;
}
static struct futex_pi_state * alloc_pi_state(void)
{
struct futex_pi_state *pi_state = current->pi_state_cache;
WARN_ON(!pi_state);
current->pi_state_cache = NULL;
return pi_state;
}
static void free_pi_state(struct futex_pi_state *pi_state)
{
if (!atomic_dec_and_test(&pi_state->refcount))
return;
/*
* If pi_state->owner is NULL, the owner is most probably dying
* and has cleaned up the pi_state already
*/
if (pi_state->owner) {
raw_spin_lock_irq(&pi_state->owner->pi_lock);
list_del_init(&pi_state->list);
raw_spin_unlock_irq(&pi_state->owner->pi_lock);
rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner);
}
if (current->pi_state_cache)
kfree(pi_state);
else {
/*
* pi_state->list is already empty.
* clear pi_state->owner.
* refcount is at 0 - put it back to 1.
*/
pi_state->owner = NULL;
atomic_set(&pi_state->refcount, 1);
current->pi_state_cache = pi_state;
}
}
/*
* Look up the task based on what TID userspace gave us.
* We dont trust it.
*/
static struct task_struct * futex_find_get_task(pid_t pid)
{
struct task_struct *p;
rcu_read_lock();
p = find_task_by_vpid(pid);
if (p)
get_task_struct(p);
rcu_read_unlock();
return p;
}
/*
* This task is holding PI mutexes at exit time => bad.
* Kernel cleans up PI-state, but userspace is likely hosed.
* (Robust-futex cleanup is separate and might save the day for userspace.)
*/
void exit_pi_state_list(struct task_struct *curr)
{
struct list_head *next, *head = &curr->pi_state_list;
struct futex_pi_state *pi_state;
struct futex_hash_bucket *hb;
union futex_key key = FUTEX_KEY_INIT;
if (!futex_cmpxchg_enabled)
return;
/*
* We are a ZOMBIE and nobody can enqueue itself on
* pi_state_list anymore, but we have to be careful
* versus waiters unqueueing themselves:
*/
raw_spin_lock_irq(&curr->pi_lock);
while (!list_empty(head)) {
next = head->next;
pi_state = list_entry(next, struct futex_pi_state, list);
key = pi_state->key;
hb = hash_futex(&key);
raw_spin_unlock_irq(&curr->pi_lock);
spin_lock(&hb->lock);
raw_spin_lock_irq(&curr->pi_lock);
/*
* We dropped the pi-lock, so re-check whether this
* task still owns the PI-state:
*/
if (head->next != next) {
spin_unlock(&hb->lock);
continue;
}
WARN_ON(pi_state->owner != curr);
WARN_ON(list_empty(&pi_state->list));
list_del_init(&pi_state->list);
pi_state->owner = NULL;
raw_spin_unlock_irq(&curr->pi_lock);
rt_mutex_unlock(&pi_state->pi_mutex);
spin_unlock(&hb->lock);
raw_spin_lock_irq(&curr->pi_lock);
}
raw_spin_unlock_irq(&curr->pi_lock);
}
static int
lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
union futex_key *key, struct futex_pi_state **ps)
{
struct futex_pi_state *pi_state = NULL;
struct futex_q *this, *next;
struct plist_head *head;
struct task_struct *p;
pid_t pid = uval & FUTEX_TID_MASK;
head = &hb->chain;
plist_for_each_entry_safe(this, next, head, list) {
if (match_futex(&this->key, key)) {
/*
* Another waiter already exists - bump up
* the refcount and return its pi_state:
*/
pi_state = this->pi_state;
/*
* Userspace might have messed up non-PI and PI futexes
*/
if (unlikely(!pi_state))
return -EINVAL;
WARN_ON(!atomic_read(&pi_state->refcount));
/*
* When pi_state->owner is NULL then the owner died
* and another waiter is on the fly. pi_state->owner
* is fixed up by the task which acquires
* pi_state->rt_mutex.
*
* We do not check for pid == 0 which can happen when
* the owner died and robust_list_exit() cleared the
* TID.
*/
if (pid && pi_state->owner) {
/*
* Bail out if user space manipulated the
* futex value.
*/
if (pid != task_pid_vnr(pi_state->owner))
return -EINVAL;
}
atomic_inc(&pi_state->refcount);
*ps = pi_state;
return 0;
}
}
/*
* We are the first waiter - try to look up the real owner and attach
* the new pi_state to it, but bail out when TID = 0
*/
if (!pid)
return -ESRCH;
p = futex_find_get_task(pid);
if (!p)
return -ESRCH;
/*
* We need to look at the task state flags to figure out,
* whether the task is exiting. To protect against the do_exit
* change of the task flags, we do this protected by
* p->pi_lock:
*/
raw_spin_lock_irq(&p->pi_lock);
if (unlikely(p->flags & PF_EXITING)) {
/*
* The task is on the way out. When PF_EXITPIDONE is
* set, we know that the task has finished the
* cleanup:
*/
int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN;
raw_spin_unlock_irq(&p->pi_lock);
put_task_struct(p);
return ret;
}
pi_state = alloc_pi_state();
/*
* Initialize the pi_mutex in locked state and make 'p'
* the owner of it:
*/
rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p);
/* Store the key for possible exit cleanups: */
pi_state->key = *key;
WARN_ON(!list_empty(&pi_state->list));
list_add(&pi_state->list, &p->pi_state_list);
pi_state->owner = p;
raw_spin_unlock_irq(&p->pi_lock);
put_task_struct(p);
*ps = pi_state;
return 0;
}
/**
* futex_lock_pi_atomic() - Atomic work required to acquire a pi aware futex
* @uaddr: the pi futex user address
* @hb: the pi futex hash bucket
* @key: the futex key associated with uaddr and hb
* @ps: the pi_state pointer where we store the result of the
* lookup
* @task: the task to perform the atomic lock work for. This will
* be "current" except in the case of requeue pi.
* @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0)
*
* Returns:
* 0 - ready to wait
* 1 - acquired the lock
* <0 - error
*
* The hb->lock and futex_key refs shall be held by the caller.
*/
static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
union futex_key *key,
struct futex_pi_state **ps,
struct task_struct *task, int set_waiters)
{
int lock_taken, ret, ownerdied = 0;
u32 uval, newval, curval, vpid = task_pid_vnr(task);
retry:
ret = lock_taken = 0;
/*
* To avoid races, we attempt to take the lock here again
* (by doing a 0 -> TID atomic cmpxchg), while holding all
* the locks. It will most likely not succeed.
*/
newval = vpid;
if (set_waiters)
newval |= FUTEX_WAITERS;
if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, 0, newval)))
return -EFAULT;
/*
* Detect deadlocks.
*/
if ((unlikely((curval & FUTEX_TID_MASK) == vpid)))
return -EDEADLK;
/*
* Surprise - we got the lock. Just return to userspace:
*/
if (unlikely(!curval))
return 1;
uval = curval;
/*
* Set the FUTEX_WAITERS flag, so the owner will know it has someone
* to wake at the next unlock.
*/
newval = curval | FUTEX_WAITERS;
/*
* There are two cases, where a futex might have no owner (the
* owner TID is 0): OWNER_DIED. We take over the futex in this
* case. We also do an unconditional take over, when the owner
* of the futex died.
*
* This is safe as we are protected by the hash bucket lock !
*/
if (unlikely(ownerdied || !(curval & FUTEX_TID_MASK))) {
/* Keep the OWNER_DIED bit */
newval = (curval & ~FUTEX_TID_MASK) | vpid;
ownerdied = 0;
lock_taken = 1;
}
if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)))
return -EFAULT;
if (unlikely(curval != uval))
goto retry;
/*
* We took the lock due to owner died take over.
*/
if (unlikely(lock_taken))
return 1;
/*
* We dont have the lock. Look up the PI state (or create it if
* we are the first waiter):
*/
ret = lookup_pi_state(uval, hb, key, ps);
if (unlikely(ret)) {
switch (ret) {
case -ESRCH:
/*
* No owner found for this futex. Check if the
* OWNER_DIED bit is set to figure out whether
* this is a robust futex or not.
*/
if (get_futex_value_locked(&curval, uaddr))
return -EFAULT;
/*
* We simply start over in case of a robust
* futex. The code above will take the futex
* and return happy.
*/
if (curval & FUTEX_OWNER_DIED) {
ownerdied = 1;
goto retry;
}
default:
break;
}
}
return ret;
}
/**
* __unqueue_futex() - Remove the futex_q from its futex_hash_bucket
* @q: The futex_q to unqueue
*
* The q->lock_ptr must not be NULL and must be held by the caller.
*/
static void __unqueue_futex(struct futex_q *q)
{
struct futex_hash_bucket *hb;
if (WARN_ON_SMP(!q->lock_ptr || !spin_is_locked(q->lock_ptr))
|| WARN_ON(plist_node_empty(&q->list)))
return;
hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock);
plist_del(&q->list, &hb->chain);
}
/*
* The hash bucket lock must be held when this is called.
* Afterwards, the futex_q must not be accessed.
*/
static void wake_futex(struct futex_q *q)
{
struct task_struct *p = q->task;
/*
* We set q->lock_ptr = NULL _before_ we wake up the task. If
* a non-futex wake up happens on another CPU then the task
* might exit and p would dereference a non-existing task
* struct. Prevent this by holding a reference on p across the
* wake up.
*/
get_task_struct(p);
__unqueue_futex(q);
/*
* The waiting task can free the futex_q as soon as
* q->lock_ptr = NULL is written, without taking any locks. A
* memory barrier is required here to prevent the following
* store to lock_ptr from getting ahead of the plist_del.
*/
smp_wmb();
q->lock_ptr = NULL;
wake_up_state(p, TASK_NORMAL);
put_task_struct(p);
}
static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
{
struct task_struct *new_owner;
struct futex_pi_state *pi_state = this->pi_state;
u32 uninitialized_var(curval), newval;
if (!pi_state)
return -EINVAL;
/*
* If current does not own the pi_state then the futex is
* inconsistent and user space fiddled with the futex value.
*/
if (pi_state->owner != current)
return -EINVAL;
raw_spin_lock(&pi_state->pi_mutex.wait_lock);
new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
/*
* It is possible that the next waiter (the one that brought
* this owner to the kernel) timed out and is no longer
* waiting on the lock.
*/
if (!new_owner)
new_owner = this->task;
/*
* We pass it to the next owner. (The WAITERS bit is always
* kept enabled while there is PI state around. We must also
* preserve the owner died bit.)
*/
if (!(uval & FUTEX_OWNER_DIED)) {
int ret = 0;
newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
ret = -EFAULT;
else if (curval != uval)
ret = -EINVAL;
if (ret) {
raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
return ret;
}
}
raw_spin_lock_irq(&pi_state->owner->pi_lock);
WARN_ON(list_empty(&pi_state->list));
list_del_init(&pi_state->list);
raw_spin_unlock_irq(&pi_state->owner->pi_lock);
raw_spin_lock_irq(&new_owner->pi_lock);
WARN_ON(!list_empty(&pi_state->list));
list_add(&pi_state->list, &new_owner->pi_state_list);
pi_state->owner = new_owner;
raw_spin_unlock_irq(&new_owner->pi_lock);
raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
rt_mutex_unlock(&pi_state->pi_mutex);
return 0;
}
static int unlock_futex_pi(u32 __user *uaddr, u32 uval)
{
u32 uninitialized_var(oldval);
/*
* There is no waiter, so we unlock the futex. The owner died
* bit has not to be preserved here. We are the owner:
*/
if (cmpxchg_futex_value_locked(&oldval, uaddr, uval, 0))
return -EFAULT;
if (oldval != uval)
return -EAGAIN;
return 0;
}
/*
* Express the locking dependencies for lockdep:
*/
static inline void
double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
{
if (hb1 <= hb2) {
spin_lock(&hb1->lock);
if (hb1 < hb2)
spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING);
} else { /* hb1 > hb2 */
spin_lock(&hb2->lock);
spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING);
}
}
static inline void
double_unlock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
{
spin_unlock(&hb1->lock);
if (hb1 != hb2)
spin_unlock(&hb2->lock);
}
/*
* Wake up waiters matching bitset queued on this futex (uaddr).
*/
static int
futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset)
{
struct futex_hash_bucket *hb;
struct futex_q *this, *next;
struct plist_head *head;
union futex_key key = FUTEX_KEY_INIT;
int ret;
if (!bitset)
return -EINVAL;
ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, VERIFY_READ);
if (unlikely(ret != 0))
goto out;
hb = hash_futex(&key);
spin_lock(&hb->lock);
head = &hb->chain;
plist_for_each_entry_safe(this, next, head, list) {
if (match_futex (&this->key, &key)) {
if (this->pi_state || this->rt_waiter) {
ret = -EINVAL;
break;
}
/* Check if one of the bits is set in both bitsets */
if (!(this->bitset & bitset))
continue;
wake_futex(this);
if (++ret >= nr_wake)
break;
}
}
spin_unlock(&hb->lock);
put_futex_key(&key);
out:
return ret;
}
/*
* Wake up all waiters hashed on the physical page that is mapped
* to this virtual address:
*/
static int
futex_wake_op(u32 __user *uaddr1, unsigned int flags, u32 __user *uaddr2,
int nr_wake, int nr_wake2, int op)
{
union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
struct futex_hash_bucket *hb1, *hb2;
struct plist_head *head;
struct futex_q *this, *next;
int ret, op_ret;
retry:
ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ);
if (unlikely(ret != 0))
goto out;
ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE);
if (unlikely(ret != 0))
goto out_put_key1;
hb1 = hash_futex(&key1);
hb2 = hash_futex(&key2);
retry_private:
double_lock_hb(hb1, hb2);
op_ret = futex_atomic_op_inuser(op, uaddr2);
if (unlikely(op_ret < 0)) {
double_unlock_hb(hb1, hb2);
#ifndef CONFIG_MMU
/*
* we don't get EFAULT from MMU faults if we don't have an MMU,
* but we might get them from range checking
*/
ret = op_ret;
goto out_put_keys;
#endif
if (unlikely(op_ret != -EFAULT)) {
ret = op_ret;
goto out_put_keys;
}
ret = fault_in_user_writeable(uaddr2);
if (ret)
goto out_put_keys;
if (!(flags & FLAGS_SHARED))
goto retry_private;
put_futex_key(&key2);
put_futex_key(&key1);
goto retry;
}
head = &hb1->chain;
plist_for_each_entry_safe(this, next, head, list) {
if (match_futex (&this->key, &key1)) {
wake_futex(this);
if (++ret >= nr_wake)
break;
}
}
if (op_ret > 0) {
head = &hb2->chain;
op_ret = 0;
plist_for_each_entry_safe(this, next, head, list) {
if (match_futex (&this->key, &key2)) {
wake_futex(this);
if (++op_ret >= nr_wake2)
break;
}
}
ret += op_ret;
}
double_unlock_hb(hb1, hb2);
out_put_keys:
put_futex_key(&key2);
out_put_key1:
put_futex_key(&key1);
out:
return ret;
}
/**
* requeue_futex() - Requeue a futex_q from one hb to another
* @q: the futex_q to requeue
* @hb1: the source hash_bucket
* @hb2: the target hash_bucket
* @key2: the new key for the requeued futex_q
*/
static inline
void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
struct futex_hash_bucket *hb2, union futex_key *key2)
{
/*
* If key1 and key2 hash to the same bucket, no need to
* requeue.
*/
if (likely(&hb1->chain != &hb2->chain)) {
plist_del(&q->list, &hb1->chain);
plist_add(&q->list, &hb2->chain);
q->lock_ptr = &hb2->lock;
}
get_futex_key_refs(key2);
q->key = *key2;
}
/**
* requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue
* @q: the futex_q
* @key: the key of the requeue target futex
* @hb: the hash_bucket of the requeue target futex
*
* During futex_requeue, with requeue_pi=1, it is possible to acquire the
* target futex if it is uncontended or via a lock steal. Set the futex_q key
* to the requeue target futex so the waiter can detect the wakeup on the right
* futex, but remove it from the hb and NULL the rt_waiter so it can detect
* atomic lock acquisition. Set the q->lock_ptr to the requeue target hb->lock
* to protect access to the pi_state to fixup the owner later. Must be called
* with both q->lock_ptr and hb->lock held.
*/
static inline
void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
struct futex_hash_bucket *hb)
{
get_futex_key_refs(key);
q->key = *key;
__unqueue_futex(q);
WARN_ON(!q->rt_waiter);
q->rt_waiter = NULL;
q->lock_ptr = &hb->lock;
wake_up_state(q->task, TASK_NORMAL);
}
/**
* futex_proxy_trylock_atomic() - Attempt an atomic lock for the top waiter
* @pifutex: the user address of the to futex
* @hb1: the from futex hash bucket, must be locked by the caller
* @hb2: the to futex hash bucket, must be locked by the caller
* @key1: the from futex key
* @key2: the to futex key
* @ps: address to store the pi_state pointer
* @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0)
*
* Try and get the lock on behalf of the top waiter if we can do it atomically.
* Wake the top waiter if we succeed. If the caller specified set_waiters,
* then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit.
* hb1 and hb2 must be held by the caller.
*
* Returns:
* 0 - failed to acquire the lock atomicly
* 1 - acquired the lock
* <0 - error
*/
static int futex_proxy_trylock_atomic(u32 __user *pifutex,
struct futex_hash_bucket *hb1,
struct futex_hash_bucket *hb2,
union futex_key *key1, union futex_key *key2,
struct futex_pi_state **ps, int set_waiters)
{
struct futex_q *top_waiter = NULL;
u32 curval;
int ret;
if (get_futex_value_locked(&curval, pifutex))
return -EFAULT;
/*
* Find the top_waiter and determine if there are additional waiters.
* If the caller intends to requeue more than 1 waiter to pifutex,
* force futex_lock_pi_atomic() to set the FUTEX_WAITERS bit now,
* as we have means to handle the possible fault. If not, don't set
* the bit unecessarily as it will force the subsequent unlock to enter
* the kernel.
*/
top_waiter = futex_top_waiter(hb1, key1);
/* There are no waiters, nothing for us to do. */
if (!top_waiter)
return 0;
/* Ensure we requeue to the expected futex. */
if (!match_futex(top_waiter->requeue_pi_key, key2))
return -EINVAL;
/*
* Try to take the lock for top_waiter. Set the FUTEX_WAITERS bit in
* the contended case or if set_waiters is 1. The pi_state is returned
* in ps in contended cases.
*/
ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task,
set_waiters);
if (ret == 1)
requeue_pi_wake_futex(top_waiter, key2, hb2);
return ret;
}
/**
* futex_requeue() - Requeue waiters from uaddr1 to uaddr2
* @uaddr1: source futex user address
* @flags: futex flags (FLAGS_SHARED, etc.)
* @uaddr2: target futex user address
* @nr_wake: number of waiters to wake (must be 1 for requeue_pi)
* @nr_requeue: number of waiters to requeue (0-INT_MAX)
* @cmpval: @uaddr1 expected value (or %NULL)
* @requeue_pi: if we are attempting to requeue from a non-pi futex to a
* pi futex (pi to pi requeue is not supported)
*
* Requeue waiters on uaddr1 to uaddr2. In the requeue_pi case, try to acquire
* uaddr2 atomically on behalf of the top waiter.
*
* Returns:
* >=0 - on success, the number of tasks requeued or woken
* <0 - on error
*/
static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
u32 __user *uaddr2, int nr_wake, int nr_requeue,
u32 *cmpval, int requeue_pi)
{
union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
int drop_count = 0, task_count = 0, ret;
struct futex_pi_state *pi_state = NULL;
struct futex_hash_bucket *hb1, *hb2;
struct plist_head *head1;
struct futex_q *this, *next;
u32 curval2;
if (requeue_pi) {
/*
* requeue_pi requires a pi_state, try to allocate it now
* without any locks in case it fails.
*/
if (refill_pi_state_cache())
return -ENOMEM;
/*
* requeue_pi must wake as many tasks as it can, up to nr_wake
* + nr_requeue, since it acquires the rt_mutex prior to
* returning to userspace, so as to not leave the rt_mutex with
* waiters and no owner. However, second and third wake-ups
* cannot be predicted as they involve race conditions with the
* first wake and a fault while looking up the pi_state. Both
* pthread_cond_signal() and pthread_cond_broadcast() should
* use nr_wake=1.
*/
if (nr_wake != 1)
return -EINVAL;
}
retry:
if (pi_state != NULL) {
/*
* We will have to lookup the pi_state again, so free this one
* to keep the accounting correct.
*/
free_pi_state(pi_state);
pi_state = NULL;
}
ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ);
if (unlikely(ret != 0))
goto out;
ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2,
requeue_pi ? VERIFY_WRITE : VERIFY_READ);
if (unlikely(ret != 0))
goto out_put_key1;
hb1 = hash_futex(&key1);
hb2 = hash_futex(&key2);
retry_private:
double_lock_hb(hb1, hb2);
if (likely(cmpval != NULL)) {
u32 curval;
ret = get_futex_value_locked(&curval, uaddr1);
if (unlikely(ret)) {
double_unlock_hb(hb1, hb2);
ret = get_user(curval, uaddr1);
if (ret)
goto out_put_keys;
if (!(flags & FLAGS_SHARED))
goto retry_private;
put_futex_key(&key2);
put_futex_key(&key1);
goto retry;
}
if (curval != *cmpval) {
ret = -EAGAIN;
goto out_unlock;
}
}
if (requeue_pi && (task_count - nr_wake < nr_requeue)) {
/*
* Attempt to acquire uaddr2 and wake the top waiter. If we
* intend to requeue waiters, force setting the FUTEX_WAITERS
* bit. We force this here where we are able to easily handle
* faults rather in the requeue loop below.
*/
ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1,
&key2, &pi_state, nr_requeue);
/*
* At this point the top_waiter has either taken uaddr2 or is
* waiting on it. If the former, then the pi_state will not
* exist yet, look it up one more time to ensure we have a
* reference to it.
*/
if (ret == 1) {
WARN_ON(pi_state);
drop_count++;
task_count++;
ret = get_futex_value_locked(&curval2, uaddr2);
if (!ret)
ret = lookup_pi_state(curval2, hb2, &key2,
&pi_state);
}
switch (ret) {
case 0:
break;
case -EFAULT:
double_unlock_hb(hb1, hb2);
put_futex_key(&key2);
put_futex_key(&key1);
ret = fault_in_user_writeable(uaddr2);
if (!ret)
goto retry;
goto out;
case -EAGAIN:
/* The owner was exiting, try again. */
double_unlock_hb(hb1, hb2);
put_futex_key(&key2);
put_futex_key(&key1);
cond_resched();
goto retry;
default:
goto out_unlock;
}
}
head1 = &hb1->chain;
plist_for_each_entry_safe(this, next, head1, list) {
if (task_count - nr_wake >= nr_requeue)
break;
if (!match_futex(&this->key, &key1))
continue;
/*
* FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always
* be paired with each other and no other futex ops.
*/
if ((requeue_pi && !this->rt_waiter) ||
(!requeue_pi && this->rt_waiter)) {
ret = -EINVAL;
break;
}
/*
* Wake nr_wake waiters. For requeue_pi, if we acquired the
* lock, we already woke the top_waiter. If not, it will be
* woken by futex_unlock_pi().
*/
if (++task_count <= nr_wake && !requeue_pi) {
wake_futex(this);
continue;
}
/* Ensure we requeue to the expected futex for requeue_pi. */
if (requeue_pi && !match_futex(this->requeue_pi_key, &key2)) {
ret = -EINVAL;
break;
}
/*
* Requeue nr_requeue waiters and possibly one more in the case
* of requeue_pi if we couldn't acquire the lock atomically.
*/
if (requeue_pi) {
/* Prepare the waiter to take the rt_mutex. */
atomic_inc(&pi_state->refcount);
this->pi_state = pi_state;
ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex,
this->rt_waiter,
this->task, 1);
if (ret == 1) {
/* We got the lock. */
requeue_pi_wake_futex(this, &key2, hb2);
drop_count++;
continue;
} else if (ret) {
/* -EDEADLK */
this->pi_state = NULL;
free_pi_state(pi_state);
goto out_unlock;
}
}
requeue_futex(this, hb1, hb2, &key2);
drop_count++;
}
out_unlock:
double_unlock_hb(hb1, hb2);
/*
* drop_futex_key_refs() must be called outside the spinlocks. During
* the requeue we moved futex_q's from the hash bucket at key1 to the
* one at key2 and updated their key pointer. We no longer need to
* hold the references to key1.
*/
while (--drop_count >= 0)
drop_futex_key_refs(&key1);
out_put_keys:
put_futex_key(&key2);
out_put_key1:
put_futex_key(&key1);
out:
if (pi_state != NULL)
free_pi_state(pi_state);
return ret ? ret : task_count;
}
/* The key must be already stored in q->key. */
static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
__acquires(&hb->lock)
{
struct futex_hash_bucket *hb;
hb = hash_futex(&q->key);
q->lock_ptr = &hb->lock;
spin_lock(&hb->lock);
return hb;
}
static inline void
queue_unlock(struct futex_q *q, struct futex_hash_bucket *hb)
__releases(&hb->lock)
{
spin_unlock(&hb->lock);
}
/**
* queue_me() - Enqueue the futex_q on the futex_hash_bucket
* @q: The futex_q to enqueue
* @hb: The destination hash bucket
*
* The hb->lock must be held by the caller, and is released here. A call to
* queue_me() is typically paired with exactly one call to unqueue_me(). The
* exceptions involve the PI related operations, which may use unqueue_me_pi()
* or nothing if the unqueue is done as part of the wake process and the unqueue
* state is implicit in the state of woken task (see futex_wait_requeue_pi() for
* an example).
*/
static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
__releases(&hb->lock)
{
int prio;
/*
* The priority used to register this element is
* - either the real thread-priority for the real-time threads
* (i.e. threads with a priority lower than MAX_RT_PRIO)
* - or MAX_RT_PRIO for non-RT threads.
* Thus, all RT-threads are woken first in priority order, and
* the others are woken last, in FIFO order.
*/
prio = min(current->normal_prio, MAX_RT_PRIO);
plist_node_init(&q->list, prio);
plist_add(&q->list, &hb->chain);
q->task = current;
spin_unlock(&hb->lock);
}
/**
* unqueue_me() - Remove the futex_q from its futex_hash_bucket
* @q: The futex_q to unqueue
*
* The q->lock_ptr must not be held by the caller. A call to unqueue_me() must
* be paired with exactly one earlier call to queue_me().
*
* Returns:
* 1 - if the futex_q was still queued (and we removed unqueued it)
* 0 - if the futex_q was already removed by the waking thread
*/
static int unqueue_me(struct futex_q *q)
{
spinlock_t *lock_ptr;
int ret = 0;
/* In the common case we don't take the spinlock, which is nice. */
retry:
lock_ptr = q->lock_ptr;
barrier();
if (lock_ptr != NULL) {
spin_lock(lock_ptr);
/*
* q->lock_ptr can change between reading it and
* spin_lock(), causing us to take the wrong lock. This
* corrects the race condition.
*
* Reasoning goes like this: if we have the wrong lock,
* q->lock_ptr must have changed (maybe several times)
* between reading it and the spin_lock(). It can
* change again after the spin_lock() but only if it was
* already changed before the spin_lock(). It cannot,
* however, change back to the original value. Therefore
* we can detect whether we acquired the correct lock.
*/
if (unlikely(lock_ptr != q->lock_ptr)) {
spin_unlock(lock_ptr);
goto retry;
}
__unqueue_futex(q);
BUG_ON(q->pi_state);
spin_unlock(lock_ptr);
ret = 1;
}
drop_futex_key_refs(&q->key);
return ret;
}
/*
* PI futexes can not be requeued and must remove themself from the
* hash bucket. The hash bucket lock (i.e. lock_ptr) is held on entry
* and dropped here.
*/
static void unqueue_me_pi(struct futex_q *q)
__releases(q->lock_ptr)
{
__unqueue_futex(q);
BUG_ON(!q->pi_state);
free_pi_state(q->pi_state);
q->pi_state = NULL;
spin_unlock(q->lock_ptr);
}
/*
* Fixup the pi_state owner with the new owner.
*
* Must be called with hash bucket lock held and mm->sem held for non
* private futexes.
*/
static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
struct task_struct *newowner)
{
u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
struct futex_pi_state *pi_state = q->pi_state;
struct task_struct *oldowner = pi_state->owner;
u32 uval, uninitialized_var(curval), newval;
int ret;
/* Owner died? */
if (!pi_state->owner)
newtid |= FUTEX_OWNER_DIED;
/*
* We are here either because we stole the rtmutex from the
* previous highest priority waiter or we are the highest priority
* waiter but failed to get the rtmutex the first time.
* We have to replace the newowner TID in the user space variable.
* This must be atomic as we have to preserve the owner died bit here.
*
* Note: We write the user space value _before_ changing the pi_state
* because we can fault here. Imagine swapped out pages or a fork
* that marked all the anonymous memory readonly for cow.
*
* Modifying pi_state _before_ the user space value would
* leave the pi_state in an inconsistent state when we fault
* here, because we need to drop the hash bucket lock to
* handle the fault. This might be observed in the PID check
* in lookup_pi_state.
*/
retry:
if (get_futex_value_locked(&uval, uaddr))
goto handle_fault;
while (1) {
newval = (uval & FUTEX_OWNER_DIED) | newtid;
if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
goto handle_fault;
if (curval == uval)
break;
uval = curval;
}
/*
* We fixed up user space. Now we need to fix the pi_state
* itself.
*/
if (pi_state->owner != NULL) {
raw_spin_lock_irq(&pi_state->owner->pi_lock);
WARN_ON(list_empty(&pi_state->list));
list_del_init(&pi_state->list);
raw_spin_unlock_irq(&pi_state->owner->pi_lock);
}
pi_state->owner = newowner;
raw_spin_lock_irq(&newowner->pi_lock);
WARN_ON(!list_empty(&pi_state->list));
list_add(&pi_state->list, &newowner->pi_state_list);
raw_spin_unlock_irq(&newowner->pi_lock);
return 0;
/*
* To handle the page fault we need to drop the hash bucket
* lock here. That gives the other task (either the highest priority
* waiter itself or the task which stole the rtmutex) the
* chance to try the fixup of the pi_state. So once we are
* back from handling the fault we need to check the pi_state
* after reacquiring the hash bucket lock and before trying to
* do another fixup. When the fixup has been done already we
* simply return.
*/
handle_fault:
spin_unlock(q->lock_ptr);
ret = fault_in_user_writeable(uaddr);
spin_lock(q->lock_ptr);
/*
* Check if someone else fixed it for us:
*/
if (pi_state->owner != oldowner)
return 0;
if (ret)
return ret;
goto retry;
}
static long futex_wait_restart(struct restart_block *restart);
/**
* fixup_owner() - Post lock pi_state and corner case management
* @uaddr: user address of the futex
* @q: futex_q (contains pi_state and access to the rt_mutex)
* @locked: if the attempt to take the rt_mutex succeeded (1) or not (0)
*
* After attempting to lock an rt_mutex, this function is called to cleanup
* the pi_state owner as well as handle race conditions that may allow us to
* acquire the lock. Must be called with the hb lock held.
*
* Returns:
* 1 - success, lock taken
* 0 - success, lock not taken
* <0 - on error (-EFAULT)
*/
static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
{
struct task_struct *owner;
int ret = 0;
if (locked) {
/*
* Got the lock. We might not be the anticipated owner if we
* did a lock-steal - fix up the PI-state in that case:
*/
if (q->pi_state->owner != current)
ret = fixup_pi_state_owner(uaddr, q, current);
goto out;
}
/*
* Catch the rare case, where the lock was released when we were on the
* way back before we locked the hash bucket.
*/
if (q->pi_state->owner == current) {
/*
* Try to get the rt_mutex now. This might fail as some other
* task acquired the rt_mutex after we removed ourself from the
* rt_mutex waiters list.
*/
if (rt_mutex_trylock(&q->pi_state->pi_mutex)) {
locked = 1;
goto out;
}
/*
* pi_state is incorrect, some other task did a lock steal and
* we returned due to timeout or signal without taking the
* rt_mutex. Too late.
*/
raw_spin_lock(&q->pi_state->pi_mutex.wait_lock);
owner = rt_mutex_owner(&q->pi_state->pi_mutex);
if (!owner)
owner = rt_mutex_next_owner(&q->pi_state->pi_mutex);
raw_spin_unlock(&q->pi_state->pi_mutex.wait_lock);
ret = fixup_pi_state_owner(uaddr, q, owner);
goto out;
}
/*
* Paranoia check. If we did not take the lock, then we should not be
* the owner of the rt_mutex.
*/
if (rt_mutex_owner(&q->pi_state->pi_mutex) == current)
printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p "
"pi-state %p\n", ret,
q->pi_state->pi_mutex.owner,
q->pi_state->owner);
out:
return ret ? ret : locked;
}
/**
* futex_wait_queue_me() - queue_me() and wait for wakeup, timeout, or signal
* @hb: the futex hash bucket, must be locked by the caller
* @q: the futex_q to queue up on
* @timeout: the prepared hrtimer_sleeper, or null for no timeout
*/
static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q,
struct hrtimer_sleeper *timeout)
{
/*
* The task state is guaranteed to be set before another task can
* wake it. set_current_state() is implemented using set_mb() and
* queue_me() calls spin_unlock() upon completion, both serializing
* access to the hash list and forcing another memory barrier.
*/
set_current_state(TASK_INTERRUPTIBLE);
queue_me(q, hb);
/* Arm the timer */
if (timeout) {
hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
if (!hrtimer_active(&timeout->timer))
timeout->task = NULL;
}
/*
* If we have been removed from the hash list, then another task
* has tried to wake us, and we can skip the call to schedule().
*/
if (likely(!plist_node_empty(&q->list))) {
/*
* If the timer has already expired, current will already be
* flagged for rescheduling. Only call schedule if there
* is no timeout, or if it has yet to expire.
*/
if (!timeout || timeout->task)
schedule();
}
__set_current_state(TASK_RUNNING);
}
/**
* futex_wait_setup() - Prepare to wait on a futex
* @uaddr: the futex userspace address
* @val: the expected value
* @flags: futex flags (FLAGS_SHARED, etc.)
* @q: the associated futex_q
* @hb: storage for hash_bucket pointer to be returned to caller
*
* Setup the futex_q and locate the hash_bucket. Get the futex value and
* compare it with the expected value. Handle atomic faults internally.
* Return with the hb lock held and a q.key reference on success, and unlocked
* with no q.key reference on failure.
*
* Returns:
* 0 - uaddr contains val and hb has been locked
* <1 - -EFAULT or -EWOULDBLOCK (uaddr does not contain val) and hb is unlocked
*/
static int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags,
struct futex_q *q, struct futex_hash_bucket **hb)
{
u32 uval;
int ret;
/*
* Access the page AFTER the hash-bucket is locked.
* Order is important:
*
* Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val);
* Userspace waker: if (cond(var)) { var = new; futex_wake(&var); }
*
* The basic logical guarantee of a futex is that it blocks ONLY
* if cond(var) is known to be true at the time of blocking, for
* any cond. If we locked the hash-bucket after testing *uaddr, that
* would open a race condition where we could block indefinitely with
* cond(var) false, which would violate the guarantee.
*
* On the other hand, we insert q and release the hash-bucket only
* after testing *uaddr. This guarantees that futex_wait() will NOT
* absorb a wakeup if *uaddr does not match the desired values
* while the syscall executes.
*/
retry:
ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q->key, VERIFY_READ);
if (unlikely(ret != 0))
return ret;
retry_private:
*hb = queue_lock(q);
ret = get_futex_value_locked(&uval, uaddr);
if (ret) {
queue_unlock(q, *hb);
ret = get_user(uval, uaddr);
if (ret)
goto out;
if (!(flags & FLAGS_SHARED))
goto retry_private;
put_futex_key(&q->key);
goto retry;
}
if (uval != val) {
queue_unlock(q, *hb);
ret = -EWOULDBLOCK;
}
out:
if (ret)
put_futex_key(&q->key);
return ret;
}
static int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val,
ktime_t *abs_time, u32 bitset)
{
struct hrtimer_sleeper timeout, *to = NULL;
struct restart_block *restart;
struct futex_hash_bucket *hb;
struct futex_q q = futex_q_init;
int ret;
if (!bitset)
return -EINVAL;
q.bitset = bitset;
if (abs_time) {
to = &timeout;
hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ?
CLOCK_REALTIME : CLOCK_MONOTONIC,
HRTIMER_MODE_ABS);
hrtimer_init_sleeper(to, current);
hrtimer_set_expires_range_ns(&to->timer, *abs_time,
current->timer_slack_ns);
}
retry:
/*
* Prepare to wait on uaddr. On success, holds hb lock and increments
* q.key refs.
*/
ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
if (ret)
goto out;
/* queue_me and wait for wakeup, timeout, or a signal. */
futex_wait_queue_me(hb, &q, to);
/* If we were woken (and unqueued), we succeeded, whatever. */
ret = 0;
/* unqueue_me() drops q.key ref */
if (!unqueue_me(&q))
goto out;
ret = -ETIMEDOUT;
if (to && !to->task)
goto out;
/*
* We expect signal_pending(current), but we might be the
* victim of a spurious wakeup as well.
*/
if (!signal_pending(current))
goto retry;
ret = -ERESTARTSYS;
if (!abs_time)
goto out;
restart = ¤t_thread_info()->restart_block;
restart->fn = futex_wait_restart;
restart->futex.uaddr = uaddr;
restart->futex.val = val;
restart->futex.time = abs_time->tv64;
restart->futex.bitset = bitset;
restart->futex.flags = flags | FLAGS_HAS_TIMEOUT;
ret = -ERESTART_RESTARTBLOCK;
out:
if (to) {
hrtimer_cancel(&to->timer);
destroy_hrtimer_on_stack(&to->timer);
}
return ret;
}
static long futex_wait_restart(struct restart_block *restart)
{
u32 __user *uaddr = restart->futex.uaddr;
ktime_t t, *tp = NULL;
if (restart->futex.flags & FLAGS_HAS_TIMEOUT) {
t.tv64 = restart->futex.time;
tp = &t;
}
restart->fn = do_no_restart_syscall;
return (long)futex_wait(uaddr, restart->futex.flags,
restart->futex.val, tp, restart->futex.bitset);
}
/*
* Userspace tried a 0 -> TID atomic transition of the futex value
* and failed. The kernel side here does the whole locking operation:
* if there are waiters then it will block, it does PI, etc. (Due to
* races the kernel might see a 0 value of the futex too.)
*/
static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, int detect,
ktime_t *time, int trylock)
{
struct hrtimer_sleeper timeout, *to = NULL;
struct futex_hash_bucket *hb;
struct futex_q q = futex_q_init;
int res, ret;
if (refill_pi_state_cache())
return -ENOMEM;
if (time) {
to = &timeout;
hrtimer_init_on_stack(&to->timer, CLOCK_REALTIME,
HRTIMER_MODE_ABS);
hrtimer_init_sleeper(to, current);
hrtimer_set_expires(&to->timer, *time);
}
retry:
ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q.key, VERIFY_WRITE);
if (unlikely(ret != 0))
goto out;
retry_private:
hb = queue_lock(&q);
ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current, 0);
if (unlikely(ret)) {
switch (ret) {
case 1:
/* We got the lock. */
ret = 0;
goto out_unlock_put_key;
case -EFAULT:
goto uaddr_faulted;
case -EAGAIN:
/*
* Task is exiting and we just wait for the
* exit to complete.
*/
queue_unlock(&q, hb);
put_futex_key(&q.key);
cond_resched();
goto retry;
default:
goto out_unlock_put_key;
}
}
/*
* Only actually queue now that the atomic ops are done:
*/
queue_me(&q, hb);
WARN_ON(!q.pi_state);
/*
* Block on the PI mutex:
*/
if (!trylock)
ret = rt_mutex_timed_lock(&q.pi_state->pi_mutex, to, 1);
else {
ret = rt_mutex_trylock(&q.pi_state->pi_mutex);
/* Fixup the trylock return value: */
ret = ret ? 0 : -EWOULDBLOCK;
}
spin_lock(q.lock_ptr);
/*
* Fixup the pi_state owner and possibly acquire the lock if we
* haven't already.
*/
res = fixup_owner(uaddr, &q, !ret);
/*
* If fixup_owner() returned an error, proprogate that. If it acquired
* the lock, clear our -ETIMEDOUT or -EINTR.
*/
if (res)
ret = (res < 0) ? res : 0;
/*
* If fixup_owner() faulted and was unable to handle the fault, unlock
* it and return the fault to userspace.
*/
if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current))
rt_mutex_unlock(&q.pi_state->pi_mutex);
/* Unqueue and drop the lock */
unqueue_me_pi(&q);
goto out_put_key;
out_unlock_put_key:
queue_unlock(&q, hb);
out_put_key:
put_futex_key(&q.key);
out:
if (to)
destroy_hrtimer_on_stack(&to->timer);
return ret != -EINTR ? ret : -ERESTARTNOINTR;
uaddr_faulted:
queue_unlock(&q, hb);
ret = fault_in_user_writeable(uaddr);
if (ret)
goto out_put_key;
if (!(flags & FLAGS_SHARED))
goto retry_private;
put_futex_key(&q.key);
goto retry;
}
/*
* Userspace attempted a TID -> 0 atomic transition, and failed.
* This is the in-kernel slowpath: we look up the PI state (if any),
* and do the rt-mutex unlock.
*/
static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
{
struct futex_hash_bucket *hb;
struct futex_q *this, *next;
struct plist_head *head;
union futex_key key = FUTEX_KEY_INIT;
u32 uval, vpid = task_pid_vnr(current);
int ret;
retry:
if (get_user(uval, uaddr))
return -EFAULT;
/*
* We release only a lock we actually own:
*/
if ((uval & FUTEX_TID_MASK) != vpid)
return -EPERM;
ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, VERIFY_WRITE);
if (unlikely(ret != 0))
goto out;
hb = hash_futex(&key);
spin_lock(&hb->lock);
/*
* To avoid races, try to do the TID -> 0 atomic transition
* again. If it succeeds then we can return without waking
* anyone else up:
*/
if (!(uval & FUTEX_OWNER_DIED) &&
cmpxchg_futex_value_locked(&uval, uaddr, vpid, 0))
goto pi_faulted;
/*
* Rare case: we managed to release the lock atomically,
* no need to wake anyone else up:
*/
if (unlikely(uval == vpid))
goto out_unlock;
/*
* Ok, other tasks may need to be woken up - check waiters
* and do the wakeup if necessary:
*/
head = &hb->chain;
plist_for_each_entry_safe(this, next, head, list) {
if (!match_futex (&this->key, &key))
continue;
ret = wake_futex_pi(uaddr, uval, this);
/*
* The atomic access to the futex value
* generated a pagefault, so retry the
* user-access and the wakeup:
*/
if (ret == -EFAULT)
goto pi_faulted;
goto out_unlock;
}
/*
* No waiters - kernel unlocks the futex:
*/
if (!(uval & FUTEX_OWNER_DIED)) {
ret = unlock_futex_pi(uaddr, uval);
if (ret == -EFAULT)
goto pi_faulted;
}
out_unlock:
spin_unlock(&hb->lock);
put_futex_key(&key);
out:
return ret;
pi_faulted:
spin_unlock(&hb->lock);
put_futex_key(&key);
ret = fault_in_user_writeable(uaddr);
if (!ret)
goto retry;
return ret;
}
/**
* handle_early_requeue_pi_wakeup() - Detect early wakeup on the initial futex
* @hb: the hash_bucket futex_q was original enqueued on
* @q: the futex_q woken while waiting to be requeued
* @key2: the futex_key of the requeue target futex
* @timeout: the timeout associated with the wait (NULL if none)
*
* Detect if the task was woken on the initial futex as opposed to the requeue
* target futex. If so, determine if it was a timeout or a signal that caused
* the wakeup and return the appropriate error code to the caller. Must be
* called with the hb lock held.
*
* Returns
* 0 - no early wakeup detected
* <0 - -ETIMEDOUT or -ERESTARTNOINTR
*/
static inline
int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
struct futex_q *q, union futex_key *key2,
struct hrtimer_sleeper *timeout)
{
int ret = 0;
/*
* With the hb lock held, we avoid races while we process the wakeup.
* We only need to hold hb (and not hb2) to ensure atomicity as the
* wakeup code can't change q.key from uaddr to uaddr2 if we hold hb.
* It can't be requeued from uaddr2 to something else since we don't
* support a PI aware source futex for requeue.
*/
if (!match_futex(&q->key, key2)) {
WARN_ON(q->lock_ptr && (&hb->lock != q->lock_ptr));
/*
* We were woken prior to requeue by a timeout or a signal.
* Unqueue the futex_q and determine which it was.
*/
plist_del(&q->list, &hb->chain);
/* Handle spurious wakeups gracefully */
ret = -EWOULDBLOCK;
if (timeout && !timeout->task)
ret = -ETIMEDOUT;
else if (signal_pending(current))
ret = -ERESTARTNOINTR;
}
return ret;
}
/**
* futex_wait_requeue_pi() - Wait on uaddr and take uaddr2
* @uaddr: the futex we initially wait on (non-pi)
* @flags: futex flags (FLAGS_SHARED, FLAGS_CLOCKRT, etc.), they must be
* the same type, no requeueing from private to shared, etc.
* @val: the expected value of uaddr
* @abs_time: absolute timeout
* @bitset: 32 bit wakeup bitset set by userspace, defaults to all
* @clockrt: whether to use CLOCK_REALTIME (1) or CLOCK_MONOTONIC (0)
* @uaddr2: the pi futex we will take prior to returning to user-space
*
* The caller will wait on uaddr and will be requeued by futex_requeue() to
* uaddr2 which must be PI aware. Normal wakeup will wake on uaddr2 and
* complete the acquisition of the rt_mutex prior to returning to userspace.
* This ensures the rt_mutex maintains an owner when it has waiters; without
* one, the pi logic wouldn't know which task to boost/deboost, if there was a
* need to.
*
* We call schedule in futex_wait_queue_me() when we enqueue and return there
* via the following:
* 1) wakeup on uaddr2 after an atomic lock acquisition by futex_requeue()
* 2) wakeup on uaddr2 after a requeue
* 3) signal
* 4) timeout
*
* If 3, cleanup and return -ERESTARTNOINTR.
*
* If 2, we may then block on trying to take the rt_mutex and return via:
* 5) successful lock
* 6) signal
* 7) timeout
* 8) other lock acquisition failure
*
* If 6, return -EWOULDBLOCK (restarting the syscall would do the same).
*
* If 4 or 7, we cleanup and return with -ETIMEDOUT.
*
* Returns:
* 0 - On success
* <0 - On error
*/
static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
u32 val, ktime_t *abs_time, u32 bitset,
u32 __user *uaddr2)
{
struct hrtimer_sleeper timeout, *to = NULL;
struct rt_mutex_waiter rt_waiter;
struct rt_mutex *pi_mutex = NULL;
struct futex_hash_bucket *hb;
union futex_key key2 = FUTEX_KEY_INIT;
struct futex_q q = futex_q_init;
int res, ret;
if (!bitset)
return -EINVAL;
if (abs_time) {
to = &timeout;
hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ?
CLOCK_REALTIME : CLOCK_MONOTONIC,
HRTIMER_MODE_ABS);
hrtimer_init_sleeper(to, current);
hrtimer_set_expires_range_ns(&to->timer, *abs_time,
current->timer_slack_ns);
}
/*
* The waiter is allocated on our stack, manipulated by the requeue
* code while we sleep on uaddr.
*/
debug_rt_mutex_init_waiter(&rt_waiter);
rt_waiter.task = NULL;
ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE);
if (unlikely(ret != 0))
goto out;
q.bitset = bitset;
q.rt_waiter = &rt_waiter;
q.requeue_pi_key = &key2;
/*
* Prepare to wait on uaddr. On success, increments q.key (key1) ref
* count.
*/
ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
if (ret)
goto out_key2;
/* Queue the futex_q, drop the hb lock, wait for wakeup. */
futex_wait_queue_me(hb, &q, to);
spin_lock(&hb->lock);
ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
spin_unlock(&hb->lock);
if (ret)
goto out_put_keys;
/*
* In order for us to be here, we know our q.key == key2, and since
* we took the hb->lock above, we also know that futex_requeue() has
* completed and we no longer have to concern ourselves with a wakeup
* race with the atomic proxy lock acquisition by the requeue code. The
* futex_requeue dropped our key1 reference and incremented our key2
* reference count.
*/
/* Check if the requeue code acquired the second futex for us. */
if (!q.rt_waiter) {
/*
* Got the lock. We might not be the anticipated owner if we
* did a lock-steal - fix up the PI-state in that case.
*/
if (q.pi_state && (q.pi_state->owner != current)) {
spin_lock(q.lock_ptr);
ret = fixup_pi_state_owner(uaddr2, &q, current);
spin_unlock(q.lock_ptr);
}
} else {
/*
* We have been woken up by futex_unlock_pi(), a timeout, or a
* signal. futex_unlock_pi() will not destroy the lock_ptr nor
* the pi_state.
*/
WARN_ON(!q.pi_state);
pi_mutex = &q.pi_state->pi_mutex;
ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter, 1);
debug_rt_mutex_free_waiter(&rt_waiter);
spin_lock(q.lock_ptr);
/*
* Fixup the pi_state owner and possibly acquire the lock if we
* haven't already.
*/
res = fixup_owner(uaddr2, &q, !ret);
/*
* If fixup_owner() returned an error, proprogate that. If it
* acquired the lock, clear -ETIMEDOUT or -EINTR.
*/
if (res)
ret = (res < 0) ? res : 0;
/* Unqueue and drop the lock. */
unqueue_me_pi(&q);
}
/*
* If fixup_pi_state_owner() faulted and was unable to handle the
* fault, unlock the rt_mutex and return the fault to userspace.
*/
if (ret == -EFAULT) {
if (pi_mutex && rt_mutex_owner(pi_mutex) == current)
rt_mutex_unlock(pi_mutex);
} else if (ret == -EINTR) {
/*
* We've already been requeued, but cannot restart by calling
* futex_lock_pi() directly. We could restart this syscall, but
* it would detect that the user space "val" changed and return
* -EWOULDBLOCK. Save the overhead of the restart and return
* -EWOULDBLOCK directly.
*/
ret = -EWOULDBLOCK;
}
out_put_keys:
put_futex_key(&q.key);
out_key2:
put_futex_key(&key2);
out:
if (to) {
hrtimer_cancel(&to->timer);
destroy_hrtimer_on_stack(&to->timer);
}
return ret;
}
/*
* Support for robust futexes: the kernel cleans up held futexes at
* thread exit time.
*
* Implementation: user-space maintains a per-thread list of locks it
* is holding. Upon do_exit(), the kernel carefully walks this list,
* and marks all locks that are owned by this thread with the
* FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is
* always manipulated with the lock held, so the list is private and
* per-thread. Userspace also maintains a per-thread 'list_op_pending'
* field, to allow the kernel to clean up if the thread dies after
* acquiring the lock, but just before it could have added itself to
* the list. There can only be one such pending lock.
*/
/**
* sys_set_robust_list() - Set the robust-futex list head of a task
* @head: pointer to the list-head
* @len: length of the list-head, as userspace expects
*/
SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head,
size_t, len)
{
if (!futex_cmpxchg_enabled)
return -ENOSYS;
/*
* The kernel knows only one size for now:
*/
if (unlikely(len != sizeof(*head)))
return -EINVAL;
current->robust_list = head;
return 0;
}
/**
* sys_get_robust_list() - Get the robust-futex list head of a task
* @pid: pid of the process [zero for current task]
* @head_ptr: pointer to a list-head pointer, the kernel fills it in
* @len_ptr: pointer to a length field, the kernel fills in the header size
*/
SYSCALL_DEFINE3(get_robust_list, int, pid,
struct robust_list_head __user * __user *, head_ptr,
size_t __user *, len_ptr)
{
struct robust_list_head __user *head;
unsigned long ret;
struct task_struct *p;
if (!futex_cmpxchg_enabled)
return -ENOSYS;
WARN_ONCE(1, "deprecated: get_robust_list will be deleted in 2013.\n");
rcu_read_lock();
ret = -ESRCH;
if (!pid)
p = current;
else {
p = find_task_by_vpid(pid);
if (!p)
goto err_unlock;
}
ret = -EPERM;
if (!ptrace_may_access(p, PTRACE_MODE_READ))
goto err_unlock;
head = p->robust_list;
rcu_read_unlock();
if (put_user(sizeof(*head), len_ptr))
return -EFAULT;
return put_user(head, head_ptr);
err_unlock:
rcu_read_unlock();
return ret;
}
/*
* Process a futex-list entry, check whether it's owned by the
* dying task, and do notification if so:
*/
int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
{
u32 uval, uninitialized_var(nval), mval;
retry:
if (get_user(uval, uaddr))
return -1;
if ((uval & FUTEX_TID_MASK) == task_pid_vnr(curr)) {
/*
* Ok, this dying thread is truly holding a futex
* of interest. Set the OWNER_DIED bit atomically
* via cmpxchg, and if the value had FUTEX_WAITERS
* set, wake up a waiter (if any). (We have to do a
* futex_wake() even if OWNER_DIED is already set -
* to handle the rare but possible case of recursive
* thread-death.) The rest of the cleanup is done in
* userspace.
*/
mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
/*
* We are not holding a lock here, but we want to have
* the pagefault_disable/enable() protection because
* we want to handle the fault gracefully. If the
* access fails we try to fault in the futex with R/W
* verification via get_user_pages. get_user() above
* does not guarantee R/W access. If that fails we
* give up and leave the futex locked.
*/
if (cmpxchg_futex_value_locked(&nval, uaddr, uval, mval)) {
if (fault_in_user_writeable(uaddr))
return -1;
goto retry;
}
if (nval != uval)
goto retry;
/*
* Wake robust non-PI futexes here. The wakeup of
* PI futexes happens in exit_pi_state():
*/
if (!pi && (uval & FUTEX_WAITERS))
futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
}
return 0;
}
/*
* Fetch a robust-list pointer. Bit 0 signals PI futexes:
*/
static inline int fetch_robust_entry(struct robust_list __user **entry,
struct robust_list __user * __user *head,
unsigned int *pi)
{
unsigned long uentry;
if (get_user(uentry, (unsigned long __user *)head))
return -EFAULT;
*entry = (void __user *)(uentry & ~1UL);
*pi = uentry & 1;
return 0;
}
/*
* Walk curr->robust_list (very carefully, it's a userspace list!)
* and mark any locks found there dead, and notify any waiters.
*
* We silently return on any sign of list-walking problem.
*/
void exit_robust_list(struct task_struct *curr)
{
struct robust_list_head __user *head = curr->robust_list;
struct robust_list __user *entry, *next_entry, *pending;
unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
unsigned int uninitialized_var(next_pi);
unsigned long futex_offset;
int rc;
if (!futex_cmpxchg_enabled)
return;
/*
* Fetch the list head (which was registered earlier, via
* sys_set_robust_list()):
*/
if (fetch_robust_entry(&entry, &head->list.next, &pi))
return;
/*
* Fetch the relative futex offset:
*/
if (get_user(futex_offset, &head->futex_offset))
return;
/*
* Fetch any possibly pending lock-add first, and handle it
* if it exists:
*/
if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
return;
next_entry = NULL; /* avoid warning with gcc */
while (entry != &head->list) {
/*
* Fetch the next entry in the list before calling
* handle_futex_death:
*/
rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi);
/*
* A pending lock might already be on the list, so
* don't process it twice:
*/
if (entry != pending)
if (handle_futex_death((void __user *)entry + futex_offset,
curr, pi))
return;
if (rc)
return;
entry = next_entry;
pi = next_pi;
/*
* Avoid excessively long or circular lists:
*/
if (!--limit)
break;
cond_resched();
}
if (pending)
handle_futex_death((void __user *)pending + futex_offset,
curr, pip);
}
long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
u32 __user *uaddr2, u32 val2, u32 val3)
{
int cmd = op & FUTEX_CMD_MASK;
unsigned int flags = 0;
if (!(op & FUTEX_PRIVATE_FLAG))
flags |= FLAGS_SHARED;
if (op & FUTEX_CLOCK_REALTIME) {
flags |= FLAGS_CLOCKRT;
if (cmd != FUTEX_WAIT_BITSET && cmd != FUTEX_WAIT_REQUEUE_PI)
return -ENOSYS;
}
switch (cmd) {
case FUTEX_LOCK_PI:
case FUTEX_UNLOCK_PI:
case FUTEX_TRYLOCK_PI:
case FUTEX_WAIT_REQUEUE_PI:
case FUTEX_CMP_REQUEUE_PI:
if (!futex_cmpxchg_enabled)
return -ENOSYS;
}
switch (cmd) {
case FUTEX_WAIT:
val3 = FUTEX_BITSET_MATCH_ANY;
case FUTEX_WAIT_BITSET:
return futex_wait(uaddr, flags, val, timeout, val3);
case FUTEX_WAKE:
val3 = FUTEX_BITSET_MATCH_ANY;
case FUTEX_WAKE_BITSET:
return futex_wake(uaddr, flags, val, val3);
case FUTEX_REQUEUE:
return futex_requeue(uaddr, flags, uaddr2, val, val2, NULL, 0);
case FUTEX_CMP_REQUEUE:
return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 0);
case FUTEX_WAKE_OP:
return futex_wake_op(uaddr, flags, uaddr2, val, val2, val3);
case FUTEX_LOCK_PI:
return futex_lock_pi(uaddr, flags, val, timeout, 0);
case FUTEX_UNLOCK_PI:
return futex_unlock_pi(uaddr, flags);
case FUTEX_TRYLOCK_PI:
return futex_lock_pi(uaddr, flags, 0, timeout, 1);
case FUTEX_WAIT_REQUEUE_PI:
val3 = FUTEX_BITSET_MATCH_ANY;
return futex_wait_requeue_pi(uaddr, flags, val, timeout, val3,
uaddr2);
case FUTEX_CMP_REQUEUE_PI:
return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 1);
}
return -ENOSYS;
}
SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
struct timespec __user *, utime, u32 __user *, uaddr2,
u32, val3)
{
struct timespec ts;
ktime_t t, *tp = NULL;
u32 val2 = 0;
int cmd = op & FUTEX_CMD_MASK;
if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
cmd == FUTEX_WAIT_BITSET ||
cmd == FUTEX_WAIT_REQUEUE_PI)) {
if (copy_from_user(&ts, utime, sizeof(ts)) != 0)
return -EFAULT;
if (!timespec_valid(&ts))
return -EINVAL;
t = timespec_to_ktime(ts);
if (cmd == FUTEX_WAIT)
t = ktime_add_safe(ktime_get(), t);
tp = &t;
}
/*
* requeue parameter in 'utime' if cmd == FUTEX_*_REQUEUE_*.
* number of waiters to wake in 'utime' if cmd == FUTEX_WAKE_OP.
*/
if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
val2 = (u32) (unsigned long) utime;
return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
}
static int __init futex_init(void)
{
u32 curval;
int i;
/*
* This will fail and we want it. Some arch implementations do
* runtime detection of the futex_atomic_cmpxchg_inatomic()
* functionality. We want to know that before we call in any
* of the complex code paths. Also we want to prevent
* registration of robust lists in that case. NULL is
* guaranteed to fault and we get -EFAULT on functional
* implementation, the non-functional ones will return
* -ENOSYS.
*/
if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
futex_cmpxchg_enabled = 1;
for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
plist_head_init(&futex_queues[i].chain);
spin_lock_init(&futex_queues[i].lock);
}
return 0;
}
__initcall(futex_init);
| ./CrossVul/dataset_final_sorted/CWE-20/c/bad_3848_0 |
crossvul-cpp_data_good_5844_4 | /*
* RAW sockets for IPv6
* Linux INET6 implementation
*
* Authors:
* Pedro Roque <roque@di.fc.ul.pt>
*
* Adapted from linux/net/ipv4/raw.c
*
* Fixes:
* Hideaki YOSHIFUJI : sin6_scope_id support
* YOSHIFUJI,H.@USAGI : raw checksum (RFC2292(bis) compliance)
* Kazunori MIYAZAWA @USAGI: change process style to use ip6_append_data
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/slab.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/in6.h>
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/icmpv6.h>
#include <linux/netfilter.h>
#include <linux/netfilter_ipv6.h>
#include <linux/skbuff.h>
#include <linux/compat.h>
#include <asm/uaccess.h>
#include <asm/ioctls.h>
#include <net/net_namespace.h>
#include <net/ip.h>
#include <net/sock.h>
#include <net/snmp.h>
#include <net/ipv6.h>
#include <net/ndisc.h>
#include <net/protocol.h>
#include <net/ip6_route.h>
#include <net/ip6_checksum.h>
#include <net/addrconf.h>
#include <net/transp_v6.h>
#include <net/udp.h>
#include <net/inet_common.h>
#include <net/tcp_states.h>
#if IS_ENABLED(CONFIG_IPV6_MIP6)
#include <net/mip6.h>
#endif
#include <linux/mroute6.h>
#include <net/raw.h>
#include <net/rawv6.h>
#include <net/xfrm.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/export.h>
#define ICMPV6_HDRLEN 4 /* ICMPv6 header, RFC 4443 Section 2.1 */
static struct raw_hashinfo raw_v6_hashinfo = {
.lock = __RW_LOCK_UNLOCKED(raw_v6_hashinfo.lock),
};
static struct sock *__raw_v6_lookup(struct net *net, struct sock *sk,
unsigned short num, const struct in6_addr *loc_addr,
const struct in6_addr *rmt_addr, int dif)
{
bool is_multicast = ipv6_addr_is_multicast(loc_addr);
sk_for_each_from(sk)
if (inet_sk(sk)->inet_num == num) {
if (!net_eq(sock_net(sk), net))
continue;
if (!ipv6_addr_any(&sk->sk_v6_daddr) &&
!ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr))
continue;
if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)
continue;
if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr)) {
if (ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr))
goto found;
if (is_multicast &&
inet6_mc_check(sk, loc_addr, rmt_addr))
goto found;
continue;
}
goto found;
}
sk = NULL;
found:
return sk;
}
/*
* 0 - deliver
* 1 - block
*/
static int icmpv6_filter(const struct sock *sk, const struct sk_buff *skb)
{
struct icmp6hdr _hdr;
const struct icmp6hdr *hdr;
/* We require only the four bytes of the ICMPv6 header, not any
* additional bytes of message body in "struct icmp6hdr".
*/
hdr = skb_header_pointer(skb, skb_transport_offset(skb),
ICMPV6_HDRLEN, &_hdr);
if (hdr) {
const __u32 *data = &raw6_sk(sk)->filter.data[0];
unsigned int type = hdr->icmp6_type;
return (data[type >> 5] & (1U << (type & 31))) != 0;
}
return 1;
}
#if IS_ENABLED(CONFIG_IPV6_MIP6)
typedef int mh_filter_t(struct sock *sock, struct sk_buff *skb);
static mh_filter_t __rcu *mh_filter __read_mostly;
int rawv6_mh_filter_register(mh_filter_t filter)
{
rcu_assign_pointer(mh_filter, filter);
return 0;
}
EXPORT_SYMBOL(rawv6_mh_filter_register);
int rawv6_mh_filter_unregister(mh_filter_t filter)
{
RCU_INIT_POINTER(mh_filter, NULL);
synchronize_rcu();
return 0;
}
EXPORT_SYMBOL(rawv6_mh_filter_unregister);
#endif
/*
* demultiplex raw sockets.
* (should consider queueing the skb in the sock receive_queue
* without calling rawv6.c)
*
* Caller owns SKB so we must make clones.
*/
static bool ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
{
const struct in6_addr *saddr;
const struct in6_addr *daddr;
struct sock *sk;
bool delivered = false;
__u8 hash;
struct net *net;
saddr = &ipv6_hdr(skb)->saddr;
daddr = saddr + 1;
hash = nexthdr & (RAW_HTABLE_SIZE - 1);
read_lock(&raw_v6_hashinfo.lock);
sk = sk_head(&raw_v6_hashinfo.ht[hash]);
if (sk == NULL)
goto out;
net = dev_net(skb->dev);
sk = __raw_v6_lookup(net, sk, nexthdr, daddr, saddr, IP6CB(skb)->iif);
while (sk) {
int filtered;
delivered = true;
switch (nexthdr) {
case IPPROTO_ICMPV6:
filtered = icmpv6_filter(sk, skb);
break;
#if IS_ENABLED(CONFIG_IPV6_MIP6)
case IPPROTO_MH:
{
/* XXX: To validate MH only once for each packet,
* this is placed here. It should be after checking
* xfrm policy, however it doesn't. The checking xfrm
* policy is placed in rawv6_rcv() because it is
* required for each socket.
*/
mh_filter_t *filter;
filter = rcu_dereference(mh_filter);
filtered = filter ? (*filter)(sk, skb) : 0;
break;
}
#endif
default:
filtered = 0;
break;
}
if (filtered < 0)
break;
if (filtered == 0) {
struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC);
/* Not releasing hash table! */
if (clone) {
nf_reset(clone);
rawv6_rcv(sk, clone);
}
}
sk = __raw_v6_lookup(net, sk_next(sk), nexthdr, daddr, saddr,
IP6CB(skb)->iif);
}
out:
read_unlock(&raw_v6_hashinfo.lock);
return delivered;
}
bool raw6_local_deliver(struct sk_buff *skb, int nexthdr)
{
struct sock *raw_sk;
raw_sk = sk_head(&raw_v6_hashinfo.ht[nexthdr & (RAW_HTABLE_SIZE - 1)]);
if (raw_sk && !ipv6_raw_deliver(skb, nexthdr))
raw_sk = NULL;
return raw_sk != NULL;
}
/* This cleans up af_inet6 a bit. -DaveM */
static int rawv6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{
struct inet_sock *inet = inet_sk(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
struct sockaddr_in6 *addr = (struct sockaddr_in6 *) uaddr;
__be32 v4addr = 0;
int addr_type;
int err;
if (addr_len < SIN6_LEN_RFC2133)
return -EINVAL;
addr_type = ipv6_addr_type(&addr->sin6_addr);
/* Raw sockets are IPv6 only */
if (addr_type == IPV6_ADDR_MAPPED)
return -EADDRNOTAVAIL;
lock_sock(sk);
err = -EINVAL;
if (sk->sk_state != TCP_CLOSE)
goto out;
rcu_read_lock();
/* Check if the address belongs to the host. */
if (addr_type != IPV6_ADDR_ANY) {
struct net_device *dev = NULL;
if (__ipv6_addr_needs_scope_id(addr_type)) {
if (addr_len >= sizeof(struct sockaddr_in6) &&
addr->sin6_scope_id) {
/* Override any existing binding, if another
* one is supplied by user.
*/
sk->sk_bound_dev_if = addr->sin6_scope_id;
}
/* Binding to link-local address requires an interface */
if (!sk->sk_bound_dev_if)
goto out_unlock;
err = -ENODEV;
dev = dev_get_by_index_rcu(sock_net(sk),
sk->sk_bound_dev_if);
if (!dev)
goto out_unlock;
}
/* ipv4 addr of the socket is invalid. Only the
* unspecified and mapped address have a v4 equivalent.
*/
v4addr = LOOPBACK4_IPV6;
if (!(addr_type & IPV6_ADDR_MULTICAST)) {
err = -EADDRNOTAVAIL;
if (!ipv6_chk_addr(sock_net(sk), &addr->sin6_addr,
dev, 0)) {
goto out_unlock;
}
}
}
inet->inet_rcv_saddr = inet->inet_saddr = v4addr;
sk->sk_v6_rcv_saddr = addr->sin6_addr;
if (!(addr_type & IPV6_ADDR_MULTICAST))
np->saddr = addr->sin6_addr;
err = 0;
out_unlock:
rcu_read_unlock();
out:
release_sock(sk);
return err;
}
static void rawv6_err(struct sock *sk, struct sk_buff *skb,
struct inet6_skb_parm *opt,
u8 type, u8 code, int offset, __be32 info)
{
struct inet_sock *inet = inet_sk(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
int err;
int harderr;
/* Report error on raw socket, if:
1. User requested recverr.
2. Socket is connected (otherwise the error indication
is useless without recverr and error is hard.
*/
if (!np->recverr && sk->sk_state != TCP_ESTABLISHED)
return;
harderr = icmpv6_err_convert(type, code, &err);
if (type == ICMPV6_PKT_TOOBIG) {
ip6_sk_update_pmtu(skb, sk, info);
harderr = (np->pmtudisc == IPV6_PMTUDISC_DO);
}
if (type == NDISC_REDIRECT) {
ip6_sk_redirect(skb, sk);
return;
}
if (np->recverr) {
u8 *payload = skb->data;
if (!inet->hdrincl)
payload += offset;
ipv6_icmp_error(sk, skb, err, 0, ntohl(info), payload);
}
if (np->recverr || harderr) {
sk->sk_err = err;
sk->sk_error_report(sk);
}
}
void raw6_icmp_error(struct sk_buff *skb, int nexthdr,
u8 type, u8 code, int inner_offset, __be32 info)
{
struct sock *sk;
int hash;
const struct in6_addr *saddr, *daddr;
struct net *net;
hash = nexthdr & (RAW_HTABLE_SIZE - 1);
read_lock(&raw_v6_hashinfo.lock);
sk = sk_head(&raw_v6_hashinfo.ht[hash]);
if (sk != NULL) {
/* Note: ipv6_hdr(skb) != skb->data */
const struct ipv6hdr *ip6h = (const struct ipv6hdr *)skb->data;
saddr = &ip6h->saddr;
daddr = &ip6h->daddr;
net = dev_net(skb->dev);
while ((sk = __raw_v6_lookup(net, sk, nexthdr, saddr, daddr,
IP6CB(skb)->iif))) {
rawv6_err(sk, skb, NULL, type, code,
inner_offset, info);
sk = sk_next(sk);
}
}
read_unlock(&raw_v6_hashinfo.lock);
}
static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
skb_checksum_complete(skb)) {
atomic_inc(&sk->sk_drops);
kfree_skb(skb);
return NET_RX_DROP;
}
/* Charge it to the socket. */
skb_dst_drop(skb);
if (sock_queue_rcv_skb(sk, skb) < 0) {
kfree_skb(skb);
return NET_RX_DROP;
}
return 0;
}
/*
* This is next to useless...
* if we demultiplex in network layer we don't need the extra call
* just to queue the skb...
* maybe we could have the network decide upon a hint if it
* should call raw_rcv for demultiplexing
*/
int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
{
struct inet_sock *inet = inet_sk(sk);
struct raw6_sock *rp = raw6_sk(sk);
if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
atomic_inc(&sk->sk_drops);
kfree_skb(skb);
return NET_RX_DROP;
}
if (!rp->checksum)
skb->ip_summed = CHECKSUM_UNNECESSARY;
if (skb->ip_summed == CHECKSUM_COMPLETE) {
skb_postpull_rcsum(skb, skb_network_header(skb),
skb_network_header_len(skb));
if (!csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr,
skb->len, inet->inet_num, skb->csum))
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
if (!skb_csum_unnecessary(skb))
skb->csum = ~csum_unfold(csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr,
skb->len,
inet->inet_num, 0));
if (inet->hdrincl) {
if (skb_checksum_complete(skb)) {
atomic_inc(&sk->sk_drops);
kfree_skb(skb);
return NET_RX_DROP;
}
}
rawv6_rcv_skb(sk, skb);
return 0;
}
/*
* This should be easy, if there is something there
* we return it, otherwise we block.
*/
static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk,
struct msghdr *msg, size_t len,
int noblock, int flags, int *addr_len)
{
struct ipv6_pinfo *np = inet6_sk(sk);
struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)msg->msg_name;
struct sk_buff *skb;
size_t copied;
int err;
if (flags & MSG_OOB)
return -EOPNOTSUPP;
if (flags & MSG_ERRQUEUE)
return ipv6_recv_error(sk, msg, len);
if (np->rxpmtu && np->rxopt.bits.rxpmtu)
return ipv6_recv_rxpmtu(sk, msg, len);
skb = skb_recv_datagram(sk, flags, noblock, &err);
if (!skb)
goto out;
copied = skb->len;
if (copied > len) {
copied = len;
msg->msg_flags |= MSG_TRUNC;
}
if (skb_csum_unnecessary(skb)) {
err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
} else if (msg->msg_flags&MSG_TRUNC) {
if (__skb_checksum_complete(skb))
goto csum_copy_err;
err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
} else {
err = skb_copy_and_csum_datagram_iovec(skb, 0, msg->msg_iov);
if (err == -EINVAL)
goto csum_copy_err;
}
if (err)
goto out_free;
/* Copy the address. */
if (sin6) {
sin6->sin6_family = AF_INET6;
sin6->sin6_port = 0;
sin6->sin6_addr = ipv6_hdr(skb)->saddr;
sin6->sin6_flowinfo = 0;
sin6->sin6_scope_id = ipv6_iface_scope_id(&sin6->sin6_addr,
IP6CB(skb)->iif);
*addr_len = sizeof(*sin6);
}
sock_recv_ts_and_drops(msg, sk, skb);
if (np->rxopt.all)
ip6_datagram_recv_ctl(sk, msg, skb);
err = copied;
if (flags & MSG_TRUNC)
err = skb->len;
out_free:
skb_free_datagram(sk, skb);
out:
return err;
csum_copy_err:
skb_kill_datagram(sk, skb, flags);
/* Error for blocking case is chosen to masquerade
as some normal condition.
*/
err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH;
goto out;
}
static int rawv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6,
struct raw6_sock *rp)
{
struct sk_buff *skb;
int err = 0;
int offset;
int len;
int total_len;
__wsum tmp_csum;
__sum16 csum;
if (!rp->checksum)
goto send;
if ((skb = skb_peek(&sk->sk_write_queue)) == NULL)
goto out;
offset = rp->offset;
total_len = inet_sk(sk)->cork.base.length;
if (offset >= total_len - 1) {
err = -EINVAL;
ip6_flush_pending_frames(sk);
goto out;
}
/* should be check HW csum miyazawa */
if (skb_queue_len(&sk->sk_write_queue) == 1) {
/*
* Only one fragment on the socket.
*/
tmp_csum = skb->csum;
} else {
struct sk_buff *csum_skb = NULL;
tmp_csum = 0;
skb_queue_walk(&sk->sk_write_queue, skb) {
tmp_csum = csum_add(tmp_csum, skb->csum);
if (csum_skb)
continue;
len = skb->len - skb_transport_offset(skb);
if (offset >= len) {
offset -= len;
continue;
}
csum_skb = skb;
}
skb = csum_skb;
}
offset += skb_transport_offset(skb);
if (skb_copy_bits(skb, offset, &csum, 2))
BUG();
/* in case cksum was not initialized */
if (unlikely(csum))
tmp_csum = csum_sub(tmp_csum, csum_unfold(csum));
csum = csum_ipv6_magic(&fl6->saddr, &fl6->daddr,
total_len, fl6->flowi6_proto, tmp_csum);
if (csum == 0 && fl6->flowi6_proto == IPPROTO_UDP)
csum = CSUM_MANGLED_0;
if (skb_store_bits(skb, offset, &csum, 2))
BUG();
send:
err = ip6_push_pending_frames(sk);
out:
return err;
}
static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
struct flowi6 *fl6, struct dst_entry **dstp,
unsigned int flags)
{
struct ipv6_pinfo *np = inet6_sk(sk);
struct ipv6hdr *iph;
struct sk_buff *skb;
int err;
struct rt6_info *rt = (struct rt6_info *)*dstp;
int hlen = LL_RESERVED_SPACE(rt->dst.dev);
int tlen = rt->dst.dev->needed_tailroom;
if (length > rt->dst.dev->mtu) {
ipv6_local_error(sk, EMSGSIZE, fl6, rt->dst.dev->mtu);
return -EMSGSIZE;
}
if (flags&MSG_PROBE)
goto out;
skb = sock_alloc_send_skb(sk,
length + hlen + tlen + 15,
flags & MSG_DONTWAIT, &err);
if (skb == NULL)
goto error;
skb_reserve(skb, hlen);
skb->protocol = htons(ETH_P_IPV6);
skb->priority = sk->sk_priority;
skb->mark = sk->sk_mark;
skb_dst_set(skb, &rt->dst);
*dstp = NULL;
skb_put(skb, length);
skb_reset_network_header(skb);
iph = ipv6_hdr(skb);
skb->ip_summed = CHECKSUM_NONE;
skb->transport_header = skb->network_header;
err = memcpy_fromiovecend((void *)iph, from, 0, length);
if (err)
goto error_fault;
IP6_UPD_PO_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL,
rt->dst.dev, dst_output);
if (err > 0)
err = net_xmit_errno(err);
if (err)
goto error;
out:
return 0;
error_fault:
err = -EFAULT;
kfree_skb(skb);
error:
IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
if (err == -ENOBUFS && !np->recverr)
err = 0;
return err;
}
static int rawv6_probe_proto_opt(struct flowi6 *fl6, struct msghdr *msg)
{
struct iovec *iov;
u8 __user *type = NULL;
u8 __user *code = NULL;
u8 len = 0;
int probed = 0;
int i;
if (!msg->msg_iov)
return 0;
for (i = 0; i < msg->msg_iovlen; i++) {
iov = &msg->msg_iov[i];
if (!iov)
continue;
switch (fl6->flowi6_proto) {
case IPPROTO_ICMPV6:
/* check if one-byte field is readable or not. */
if (iov->iov_base && iov->iov_len < 1)
break;
if (!type) {
type = iov->iov_base;
/* check if code field is readable or not. */
if (iov->iov_len > 1)
code = type + 1;
} else if (!code)
code = iov->iov_base;
if (type && code) {
if (get_user(fl6->fl6_icmp_type, type) ||
get_user(fl6->fl6_icmp_code, code))
return -EFAULT;
probed = 1;
}
break;
case IPPROTO_MH:
if (iov->iov_base && iov->iov_len < 1)
break;
/* check if type field is readable or not. */
if (iov->iov_len > 2 - len) {
u8 __user *p = iov->iov_base;
if (get_user(fl6->fl6_mh_type, &p[2 - len]))
return -EFAULT;
probed = 1;
} else
len += iov->iov_len;
break;
default:
probed = 1;
break;
}
if (probed)
break;
}
return 0;
}
static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
struct msghdr *msg, size_t len)
{
struct ipv6_txoptions opt_space;
struct sockaddr_in6 * sin6 = (struct sockaddr_in6 *) msg->msg_name;
struct in6_addr *daddr, *final_p, final;
struct inet_sock *inet = inet_sk(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
struct raw6_sock *rp = raw6_sk(sk);
struct ipv6_txoptions *opt = NULL;
struct ip6_flowlabel *flowlabel = NULL;
struct dst_entry *dst = NULL;
struct flowi6 fl6;
int addr_len = msg->msg_namelen;
int hlimit = -1;
int tclass = -1;
int dontfrag = -1;
u16 proto;
int err;
/* Rough check on arithmetic overflow,
better check is made in ip6_append_data().
*/
if (len > INT_MAX)
return -EMSGSIZE;
/* Mirror BSD error message compatibility */
if (msg->msg_flags & MSG_OOB)
return -EOPNOTSUPP;
/*
* Get and verify the address.
*/
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_mark = sk->sk_mark;
if (sin6) {
if (addr_len < SIN6_LEN_RFC2133)
return -EINVAL;
if (sin6->sin6_family && sin6->sin6_family != AF_INET6)
return -EAFNOSUPPORT;
/* port is the proto value [0..255] carried in nexthdr */
proto = ntohs(sin6->sin6_port);
if (!proto)
proto = inet->inet_num;
else if (proto != inet->inet_num)
return -EINVAL;
if (proto > 255)
return -EINVAL;
daddr = &sin6->sin6_addr;
if (np->sndflow) {
fl6.flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK;
if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
if (flowlabel == NULL)
return -EINVAL;
daddr = &flowlabel->dst;
}
}
/*
* Otherwise it will be difficult to maintain
* sk->sk_dst_cache.
*/
if (sk->sk_state == TCP_ESTABLISHED &&
ipv6_addr_equal(daddr, &sk->sk_v6_daddr))
daddr = &sk->sk_v6_daddr;
if (addr_len >= sizeof(struct sockaddr_in6) &&
sin6->sin6_scope_id &&
__ipv6_addr_needs_scope_id(__ipv6_addr_type(daddr)))
fl6.flowi6_oif = sin6->sin6_scope_id;
} else {
if (sk->sk_state != TCP_ESTABLISHED)
return -EDESTADDRREQ;
proto = inet->inet_num;
daddr = &sk->sk_v6_daddr;
fl6.flowlabel = np->flow_label;
}
if (fl6.flowi6_oif == 0)
fl6.flowi6_oif = sk->sk_bound_dev_if;
if (msg->msg_controllen) {
opt = &opt_space;
memset(opt, 0, sizeof(struct ipv6_txoptions));
opt->tot_len = sizeof(struct ipv6_txoptions);
err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
&hlimit, &tclass, &dontfrag);
if (err < 0) {
fl6_sock_release(flowlabel);
return err;
}
if ((fl6.flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) {
flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
if (flowlabel == NULL)
return -EINVAL;
}
if (!(opt->opt_nflen|opt->opt_flen))
opt = NULL;
}
if (opt == NULL)
opt = np->opt;
if (flowlabel)
opt = fl6_merge_options(&opt_space, flowlabel, opt);
opt = ipv6_fixup_options(&opt_space, opt);
fl6.flowi6_proto = proto;
err = rawv6_probe_proto_opt(&fl6, msg);
if (err)
goto out;
if (!ipv6_addr_any(daddr))
fl6.daddr = *daddr;
else
fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr))
fl6.saddr = np->saddr;
final_p = fl6_update_dst(&fl6, opt, &final);
if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
fl6.flowi6_oif = np->mcast_oif;
else if (!fl6.flowi6_oif)
fl6.flowi6_oif = np->ucast_oif;
security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
dst = ip6_dst_lookup_flow(sk, &fl6, final_p, true);
if (IS_ERR(dst)) {
err = PTR_ERR(dst);
goto out;
}
if (hlimit < 0) {
if (ipv6_addr_is_multicast(&fl6.daddr))
hlimit = np->mcast_hops;
else
hlimit = np->hop_limit;
if (hlimit < 0)
hlimit = ip6_dst_hoplimit(dst);
}
if (tclass < 0)
tclass = np->tclass;
if (dontfrag < 0)
dontfrag = np->dontfrag;
if (msg->msg_flags&MSG_CONFIRM)
goto do_confirm;
back_from_confirm:
if (inet->hdrincl)
err = rawv6_send_hdrinc(sk, msg->msg_iov, len, &fl6, &dst, msg->msg_flags);
else {
lock_sock(sk);
err = ip6_append_data(sk, ip_generic_getfrag, msg->msg_iov,
len, 0, hlimit, tclass, opt, &fl6, (struct rt6_info*)dst,
msg->msg_flags, dontfrag);
if (err)
ip6_flush_pending_frames(sk);
else if (!(msg->msg_flags & MSG_MORE))
err = rawv6_push_pending_frames(sk, &fl6, rp);
release_sock(sk);
}
done:
dst_release(dst);
out:
fl6_sock_release(flowlabel);
return err<0?err:len;
do_confirm:
dst_confirm(dst);
if (!(msg->msg_flags & MSG_PROBE) || len)
goto back_from_confirm;
err = 0;
goto done;
}
static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
char __user *optval, int optlen)
{
switch (optname) {
case ICMPV6_FILTER:
if (optlen > sizeof(struct icmp6_filter))
optlen = sizeof(struct icmp6_filter);
if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
return -EFAULT;
return 0;
default:
return -ENOPROTOOPT;
}
return 0;
}
static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
int len;
switch (optname) {
case ICMPV6_FILTER:
if (get_user(len, optlen))
return -EFAULT;
if (len < 0)
return -EINVAL;
if (len > sizeof(struct icmp6_filter))
len = sizeof(struct icmp6_filter);
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
return -EFAULT;
return 0;
default:
return -ENOPROTOOPT;
}
return 0;
}
static int do_rawv6_setsockopt(struct sock *sk, int level, int optname,
char __user *optval, unsigned int optlen)
{
struct raw6_sock *rp = raw6_sk(sk);
int val;
if (get_user(val, (int __user *)optval))
return -EFAULT;
switch (optname) {
case IPV6_CHECKSUM:
if (inet_sk(sk)->inet_num == IPPROTO_ICMPV6 &&
level == IPPROTO_IPV6) {
/*
* RFC3542 tells that IPV6_CHECKSUM socket
* option in the IPPROTO_IPV6 level is not
* allowed on ICMPv6 sockets.
* If you want to set it, use IPPROTO_RAW
* level IPV6_CHECKSUM socket option
* (Linux extension).
*/
return -EINVAL;
}
/* You may get strange result with a positive odd offset;
RFC2292bis agrees with me. */
if (val > 0 && (val&1))
return -EINVAL;
if (val < 0) {
rp->checksum = 0;
} else {
rp->checksum = 1;
rp->offset = val;
}
return 0;
default:
return -ENOPROTOOPT;
}
}
static int rawv6_setsockopt(struct sock *sk, int level, int optname,
char __user *optval, unsigned int optlen)
{
switch (level) {
case SOL_RAW:
break;
case SOL_ICMPV6:
if (inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
return -EOPNOTSUPP;
return rawv6_seticmpfilter(sk, level, optname, optval, optlen);
case SOL_IPV6:
if (optname == IPV6_CHECKSUM)
break;
default:
return ipv6_setsockopt(sk, level, optname, optval, optlen);
}
return do_rawv6_setsockopt(sk, level, optname, optval, optlen);
}
#ifdef CONFIG_COMPAT
static int compat_rawv6_setsockopt(struct sock *sk, int level, int optname,
char __user *optval, unsigned int optlen)
{
switch (level) {
case SOL_RAW:
break;
case SOL_ICMPV6:
if (inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
return -EOPNOTSUPP;
return rawv6_seticmpfilter(sk, level, optname, optval, optlen);
case SOL_IPV6:
if (optname == IPV6_CHECKSUM)
break;
default:
return compat_ipv6_setsockopt(sk, level, optname,
optval, optlen);
}
return do_rawv6_setsockopt(sk, level, optname, optval, optlen);
}
#endif
static int do_rawv6_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
struct raw6_sock *rp = raw6_sk(sk);
int val, len;
if (get_user(len,optlen))
return -EFAULT;
switch (optname) {
case IPV6_CHECKSUM:
/*
* We allow getsockopt() for IPPROTO_IPV6-level
* IPV6_CHECKSUM socket option on ICMPv6 sockets
* since RFC3542 is silent about it.
*/
if (rp->checksum == 0)
val = -1;
else
val = rp->offset;
break;
default:
return -ENOPROTOOPT;
}
len = min_t(unsigned int, sizeof(int), len);
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval,&val,len))
return -EFAULT;
return 0;
}
static int rawv6_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
switch (level) {
case SOL_RAW:
break;
case SOL_ICMPV6:
if (inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
return -EOPNOTSUPP;
return rawv6_geticmpfilter(sk, level, optname, optval, optlen);
case SOL_IPV6:
if (optname == IPV6_CHECKSUM)
break;
default:
return ipv6_getsockopt(sk, level, optname, optval, optlen);
}
return do_rawv6_getsockopt(sk, level, optname, optval, optlen);
}
#ifdef CONFIG_COMPAT
static int compat_rawv6_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
switch (level) {
case SOL_RAW:
break;
case SOL_ICMPV6:
if (inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
return -EOPNOTSUPP;
return rawv6_geticmpfilter(sk, level, optname, optval, optlen);
case SOL_IPV6:
if (optname == IPV6_CHECKSUM)
break;
default:
return compat_ipv6_getsockopt(sk, level, optname,
optval, optlen);
}
return do_rawv6_getsockopt(sk, level, optname, optval, optlen);
}
#endif
static int rawv6_ioctl(struct sock *sk, int cmd, unsigned long arg)
{
switch (cmd) {
case SIOCOUTQ: {
int amount = sk_wmem_alloc_get(sk);
return put_user(amount, (int __user *)arg);
}
case SIOCINQ: {
struct sk_buff *skb;
int amount = 0;
spin_lock_bh(&sk->sk_receive_queue.lock);
skb = skb_peek(&sk->sk_receive_queue);
if (skb != NULL)
amount = skb_tail_pointer(skb) -
skb_transport_header(skb);
spin_unlock_bh(&sk->sk_receive_queue.lock);
return put_user(amount, (int __user *)arg);
}
default:
#ifdef CONFIG_IPV6_MROUTE
return ip6mr_ioctl(sk, cmd, (void __user *)arg);
#else
return -ENOIOCTLCMD;
#endif
}
}
#ifdef CONFIG_COMPAT
static int compat_rawv6_ioctl(struct sock *sk, unsigned int cmd, unsigned long arg)
{
switch (cmd) {
case SIOCOUTQ:
case SIOCINQ:
return -ENOIOCTLCMD;
default:
#ifdef CONFIG_IPV6_MROUTE
return ip6mr_compat_ioctl(sk, cmd, compat_ptr(arg));
#else
return -ENOIOCTLCMD;
#endif
}
}
#endif
static void rawv6_close(struct sock *sk, long timeout)
{
if (inet_sk(sk)->inet_num == IPPROTO_RAW)
ip6_ra_control(sk, -1);
ip6mr_sk_done(sk);
sk_common_release(sk);
}
static void raw6_destroy(struct sock *sk)
{
lock_sock(sk);
ip6_flush_pending_frames(sk);
release_sock(sk);
inet6_destroy_sock(sk);
}
static int rawv6_init_sk(struct sock *sk)
{
struct raw6_sock *rp = raw6_sk(sk);
switch (inet_sk(sk)->inet_num) {
case IPPROTO_ICMPV6:
rp->checksum = 1;
rp->offset = 2;
break;
case IPPROTO_MH:
rp->checksum = 1;
rp->offset = 4;
break;
default:
break;
}
return 0;
}
struct proto rawv6_prot = {
.name = "RAWv6",
.owner = THIS_MODULE,
.close = rawv6_close,
.destroy = raw6_destroy,
.connect = ip6_datagram_connect,
.disconnect = udp_disconnect,
.ioctl = rawv6_ioctl,
.init = rawv6_init_sk,
.setsockopt = rawv6_setsockopt,
.getsockopt = rawv6_getsockopt,
.sendmsg = rawv6_sendmsg,
.recvmsg = rawv6_recvmsg,
.bind = rawv6_bind,
.backlog_rcv = rawv6_rcv_skb,
.hash = raw_hash_sk,
.unhash = raw_unhash_sk,
.obj_size = sizeof(struct raw6_sock),
.h.raw_hash = &raw_v6_hashinfo,
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_rawv6_setsockopt,
.compat_getsockopt = compat_rawv6_getsockopt,
.compat_ioctl = compat_rawv6_ioctl,
#endif
};
#ifdef CONFIG_PROC_FS
static int raw6_seq_show(struct seq_file *seq, void *v)
{
if (v == SEQ_START_TOKEN) {
seq_puts(seq, IPV6_SEQ_DGRAM_HEADER);
} else {
struct sock *sp = v;
__u16 srcp = inet_sk(sp)->inet_num;
ip6_dgram_sock_seq_show(seq, v, srcp, 0,
raw_seq_private(seq)->bucket);
}
return 0;
}
static const struct seq_operations raw6_seq_ops = {
.start = raw_seq_start,
.next = raw_seq_next,
.stop = raw_seq_stop,
.show = raw6_seq_show,
};
static int raw6_seq_open(struct inode *inode, struct file *file)
{
return raw_seq_open(inode, file, &raw_v6_hashinfo, &raw6_seq_ops);
}
static const struct file_operations raw6_seq_fops = {
.owner = THIS_MODULE,
.open = raw6_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_net,
};
static int __net_init raw6_init_net(struct net *net)
{
if (!proc_create("raw6", S_IRUGO, net->proc_net, &raw6_seq_fops))
return -ENOMEM;
return 0;
}
static void __net_exit raw6_exit_net(struct net *net)
{
remove_proc_entry("raw6", net->proc_net);
}
static struct pernet_operations raw6_net_ops = {
.init = raw6_init_net,
.exit = raw6_exit_net,
};
int __init raw6_proc_init(void)
{
return register_pernet_subsys(&raw6_net_ops);
}
void raw6_proc_exit(void)
{
unregister_pernet_subsys(&raw6_net_ops);
}
#endif /* CONFIG_PROC_FS */
/* Same as inet6_dgram_ops, sans udp_poll. */
static const struct proto_ops inet6_sockraw_ops = {
.family = PF_INET6,
.owner = THIS_MODULE,
.release = inet6_release,
.bind = inet6_bind,
.connect = inet_dgram_connect, /* ok */
.socketpair = sock_no_socketpair, /* a do nothing */
.accept = sock_no_accept, /* a do nothing */
.getname = inet6_getname,
.poll = datagram_poll, /* ok */
.ioctl = inet6_ioctl, /* must change */
.listen = sock_no_listen, /* ok */
.shutdown = inet_shutdown, /* ok */
.setsockopt = sock_common_setsockopt, /* ok */
.getsockopt = sock_common_getsockopt, /* ok */
.sendmsg = inet_sendmsg, /* ok */
.recvmsg = sock_common_recvmsg, /* ok */
.mmap = sock_no_mmap,
.sendpage = sock_no_sendpage,
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_sock_common_setsockopt,
.compat_getsockopt = compat_sock_common_getsockopt,
#endif
};
static struct inet_protosw rawv6_protosw = {
.type = SOCK_RAW,
.protocol = IPPROTO_IP, /* wild card */
.prot = &rawv6_prot,
.ops = &inet6_sockraw_ops,
.no_check = UDP_CSUM_DEFAULT,
.flags = INET_PROTOSW_REUSE,
};
int __init rawv6_init(void)
{
int ret;
ret = inet6_register_protosw(&rawv6_protosw);
if (ret)
goto out;
out:
return ret;
}
void rawv6_exit(void)
{
inet6_unregister_protosw(&rawv6_protosw);
}
| ./CrossVul/dataset_final_sorted/CWE-20/c/good_5844_4 |
crossvul-cpp_data_bad_5737_0 | /* -*- linux-c -*-
* sysctl_net.c: sysctl interface to net subsystem.
*
* Begun April 1, 1996, Mike Shaver.
* Added /proc/sys/net directories for each protocol family. [MS]
*
* Revision 1.2 1996/05/08 20:24:40 shaver
* Added bits for NET_BRIDGE and the NET_IPV4_ARP stuff and
* NET_IPV4_IP_FORWARD.
*
*
*/
#include <linux/mm.h>
#include <linux/export.h>
#include <linux/sysctl.h>
#include <linux/nsproxy.h>
#include <net/sock.h>
#ifdef CONFIG_INET
#include <net/ip.h>
#endif
#ifdef CONFIG_NET
#include <linux/if_ether.h>
#endif
static struct ctl_table_set *
net_ctl_header_lookup(struct ctl_table_root *root, struct nsproxy *namespaces)
{
return &namespaces->net_ns->sysctls;
}
static int is_seen(struct ctl_table_set *set)
{
return ¤t->nsproxy->net_ns->sysctls == set;
}
/* Return standard mode bits for table entry. */
static int net_ctl_permissions(struct ctl_table_header *head,
struct ctl_table *table)
{
struct net *net = container_of(head->set, struct net, sysctls);
kuid_t root_uid = make_kuid(net->user_ns, 0);
kgid_t root_gid = make_kgid(net->user_ns, 0);
/* Allow network administrator to have same access as root. */
if (ns_capable(net->user_ns, CAP_NET_ADMIN) ||
uid_eq(root_uid, current_uid())) {
int mode = (table->mode >> 6) & 7;
return (mode << 6) | (mode << 3) | mode;
}
/* Allow netns root group to have the same access as the root group */
if (gid_eq(root_gid, current_gid())) {
int mode = (table->mode >> 3) & 7;
return (mode << 3) | mode;
}
return table->mode;
}
static struct ctl_table_root net_sysctl_root = {
.lookup = net_ctl_header_lookup,
.permissions = net_ctl_permissions,
};
static int __net_init sysctl_net_init(struct net *net)
{
setup_sysctl_set(&net->sysctls, &net_sysctl_root, is_seen);
return 0;
}
static void __net_exit sysctl_net_exit(struct net *net)
{
retire_sysctl_set(&net->sysctls);
}
static struct pernet_operations sysctl_pernet_ops = {
.init = sysctl_net_init,
.exit = sysctl_net_exit,
};
static struct ctl_table_header *net_header;
__init int net_sysctl_init(void)
{
static struct ctl_table empty[1];
int ret = -ENOMEM;
/* Avoid limitations in the sysctl implementation by
* registering "/proc/sys/net" as an empty directory not in a
* network namespace.
*/
net_header = register_sysctl("net", empty);
if (!net_header)
goto out;
ret = register_pernet_subsys(&sysctl_pernet_ops);
if (ret)
goto out;
register_sysctl_root(&net_sysctl_root);
out:
return ret;
}
struct ctl_table_header *register_net_sysctl(struct net *net,
const char *path, struct ctl_table *table)
{
return __register_sysctl_table(&net->sysctls, path, table);
}
EXPORT_SYMBOL_GPL(register_net_sysctl);
void unregister_net_sysctl_table(struct ctl_table_header *header)
{
unregister_sysctl_table(header);
}
EXPORT_SYMBOL_GPL(unregister_net_sysctl_table);
| ./CrossVul/dataset_final_sorted/CWE-20/c/bad_5737_0 |
crossvul-cpp_data_bad_906_0 | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% CCC U U TTTTT %
% C U U T %
% C U U T %
% C U U T %
% CCC UUU T %
% %
% %
% Read DR Halo Image Format %
% %
% Software Design %
% Jaroslav Fojtik %
% June 2000 %
% %
% %
% Permission is hereby granted, free of charge, to any person obtaining a %
% copy of this software and associated documentation files ("ImageMagick"), %
% to deal in ImageMagick without restriction, including without limitation %
% the rights to use, copy, modify, merge, publish, distribute, sublicense, %
% and/or sell copies of ImageMagick, and to permit persons to whom the %
% ImageMagick is furnished to do so, subject to the following conditions: %
% %
% The above copyright notice and this permission notice shall be included in %
% all copies or substantial portions of ImageMagick. %
% %
% The software is provided "as is", without warranty of any kind, express or %
% implied, including but not limited to the warranties of merchantability, %
% fitness for a particular purpose and noninfringement. In no event shall %
% ImageMagick Studio be liable for any claim, damages or other liability, %
% whether in an action of contract, tort or otherwise, arising from, out of %
% or in connection with ImageMagick or the use or other dealings in %
% ImageMagick. %
% %
% Except as contained in this notice, the name of the ImageMagick Studio %
% shall not be used in advertising or otherwise to promote the sale, use or %
% other dealings in ImageMagick without prior written authorization from the %
% ImageMagick Studio. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colormap-private.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/static.h"
#include "MagickCore/string_.h"
#include "MagickCore/module.h"
#include "MagickCore/utility.h"
#include "MagickCore/utility-private.h"
typedef struct
{
unsigned Width;
unsigned Height;
unsigned Reserved;
} CUTHeader;
typedef struct
{
char FileId[2];
unsigned Version;
unsigned Size;
char FileType;
char SubType;
unsigned BoardID;
unsigned GraphicsMode;
unsigned MaxIndex;
unsigned MaxRed;
unsigned MaxGreen;
unsigned MaxBlue;
char PaletteId[20];
} CUTPalHeader;
static MagickBooleanType InsertRow(Image *image,ssize_t bpp,unsigned char *p,
ssize_t y,ExceptionInfo *exception)
{
int
bit;
Quantum
index;
register Quantum
*q;
ssize_t
x;
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
switch (bpp)
{
case 1: /* Convert bitmap scanline. */
{
for (x=0; x < ((ssize_t) image->columns-7); x+=8)
{
for (bit=0; bit < 8; bit++)
{
index=((*p) & (0x80 >> bit) ? 0x01 : 0x00);
SetPixelIndex(image,index,q);
if (index < image->colors)
SetPixelViaPixelInfo(image,image->colormap+(ssize_t) index,q);
q+=GetPixelChannels(image);
}
p++;
}
if ((image->columns % 8) != 0)
{
for (bit=0; bit < (ssize_t) (image->columns % 8); bit++)
{
index=((*p) & (0x80 >> bit) ? 0x01 : 0x00);
SetPixelIndex(image,index,q);
if (index < image->colors)
SetPixelViaPixelInfo(image,image->colormap+(ssize_t) index,q);
SetPixelViaPixelInfo(image,image->colormap+(ssize_t) index,q);
q+=GetPixelChannels(image);
}
p++;
}
break;
}
case 2: /* Convert PseudoColor scanline. */
{
for (x=0; x < ((ssize_t) image->columns-3); x+=4)
{
index=ConstrainColormapIndex(image,(*p >> 6) & 0x3,exception);
SetPixelIndex(image,index,q);
if (index < image->colors)
SetPixelViaPixelInfo(image,image->colormap+(ssize_t) index,q);
q+=GetPixelChannels(image);
index=ConstrainColormapIndex(image,(*p >> 4) & 0x3,exception);
SetPixelIndex(image,index,q);
if (index < image->colors)
SetPixelViaPixelInfo(image,image->colormap+(ssize_t) index,q);
q+=GetPixelChannels(image);
index=ConstrainColormapIndex(image,(*p >> 2) & 0x3,exception);
SetPixelIndex(image,index,q);
if (index < image->colors)
SetPixelViaPixelInfo(image,image->colormap+(ssize_t) index,q);
q+=GetPixelChannels(image);
index=ConstrainColormapIndex(image,(*p) & 0x3,exception);
SetPixelIndex(image,index,q);
if (index < image->colors)
SetPixelViaPixelInfo(image,image->colormap+(ssize_t) index,q);
q+=GetPixelChannels(image);
p++;
}
if ((image->columns % 4) != 0)
{
index=ConstrainColormapIndex(image,(*p >> 6) & 0x3,exception);
SetPixelIndex(image,index,q);
if (index < image->colors)
SetPixelViaPixelInfo(image,image->colormap+(ssize_t) index,q);
q+=GetPixelChannels(image);
if ((image->columns % 4) > 1)
{
index=ConstrainColormapIndex(image,(*p >> 4) & 0x3,exception);
SetPixelIndex(image,index,q);
if (index < image->colors)
SetPixelViaPixelInfo(image,image->colormap+(ssize_t) index,q);
q+=GetPixelChannels(image);
if ((image->columns % 4) > 2)
{
index=ConstrainColormapIndex(image,(*p >> 2) & 0x3,
exception);
SetPixelIndex(image,index,q);
if (index < image->colors)
SetPixelViaPixelInfo(image,image->colormap+(ssize_t)
index,q);
q+=GetPixelChannels(image);
}
}
p++;
}
break;
}
case 4: /* Convert PseudoColor scanline. */
{
for (x=0; x < ((ssize_t) image->columns-1); x+=2)
{
index=ConstrainColormapIndex(image,(*p >> 4) & 0x0f,exception);
SetPixelIndex(image,index,q);
if (index < image->colors)
SetPixelViaPixelInfo(image,image->colormap+(ssize_t) index,q);
q+=GetPixelChannels(image);
index=ConstrainColormapIndex(image,(*p) & 0x0f,exception);
SetPixelIndex(image,index,q);
if (index < image->colors)
SetPixelViaPixelInfo(image,image->colormap+(ssize_t) index,q);
p++;
q+=GetPixelChannels(image);
}
if ((image->columns % 2) != 0)
{
index=ConstrainColormapIndex(image,(*p >> 4) & 0x0f,exception);
SetPixelIndex(image,index,q);
if (index < image->colors)
SetPixelViaPixelInfo(image,image->colormap+(ssize_t) index,q);
p++;
q+=GetPixelChannels(image);
}
break;
}
case 8: /* Convert PseudoColor scanline. */
{
for (x=0; x < (ssize_t) image->columns; x++)
{
index=ConstrainColormapIndex(image,*p,exception);
SetPixelIndex(image,index,q);
if (index < image->colors)
SetPixelViaPixelInfo(image,image->colormap+(ssize_t) index,q);
p++;
q+=GetPixelChannels(image);
}
}
break;
case 24: /* Convert DirectColor scanline. */
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(image,ScaleCharToQuantum(*p++),q);
SetPixelGreen(image,ScaleCharToQuantum(*p++),q);
SetPixelBlue(image,ScaleCharToQuantum(*p++),q);
q+=GetPixelChannels(image);
}
break;
}
if (!SyncAuthenticPixels(image,exception))
return(MagickFalse);
return(MagickTrue);
}
/*
Compute the number of colors in Grayed R[i]=G[i]=B[i] image
*/
static int GetCutColors(Image *image,ExceptionInfo *exception)
{
Quantum
intensity,
scale_intensity;
register Quantum
*q;
ssize_t
x,
y;
intensity=0;
scale_intensity=ScaleCharToQuantum(16);
for (y=0; y < (ssize_t) image->rows; y++)
{
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (intensity < GetPixelRed(image,q))
intensity=GetPixelRed(image,q);
if (intensity >= scale_intensity)
return(255);
q+=GetPixelChannels(image);
}
}
if (intensity < ScaleCharToQuantum(2))
return(2);
if (intensity < ScaleCharToQuantum(16))
return(16);
return((int) intensity);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d C U T I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadCUTImage() reads an CUT X image file and returns it. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the ReadCUTImage method is:
%
% Image *ReadCUTImage(const ImageInfo *image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *ReadCUTImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
#define ThrowCUTReaderException(severity,tag) \
{ \
if (palette != NULL) \
palette=DestroyImage(palette); \
if (clone_info != NULL) \
clone_info=DestroyImageInfo(clone_info); \
ThrowReaderException(severity,tag); \
}
Image *image,*palette;
ImageInfo *clone_info;
MagickBooleanType status;
MagickOffsetType
offset;
size_t EncodedByte;
unsigned char RunCount,RunValue,RunCountMasked;
CUTHeader Header;
CUTPalHeader PalHeader;
ssize_t depth;
ssize_t i,j;
ssize_t ldblk;
unsigned char *BImgBuff=NULL,*ptrB;
register Quantum *q;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImage(image_info,exception);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Read CUT image.
*/
palette=NULL;
clone_info=NULL;
Header.Width=ReadBlobLSBShort(image);
Header.Height=ReadBlobLSBShort(image);
Header.Reserved=ReadBlobLSBShort(image);
if (Header.Width==0 || Header.Height==0 || Header.Reserved!=0)
CUT_KO: ThrowCUTReaderException(CorruptImageError,"ImproperImageHeader");
/*---This code checks first line of image---*/
EncodedByte=ReadBlobLSBShort(image);
RunCount=(unsigned char) ReadBlobByte(image);
RunCountMasked=RunCount & 0x7F;
ldblk=0;
while((int) RunCountMasked!=0) /*end of line?*/
{
i=1;
if((int) RunCount<0x80) i=(ssize_t) RunCountMasked;
offset=SeekBlob(image,TellBlob(image)+i,SEEK_SET);
if (offset < 0)
ThrowCUTReaderException(CorruptImageError,"ImproperImageHeader");
if(EOFBlob(image) != MagickFalse) goto CUT_KO; /*wrong data*/
EncodedByte-=i+1;
ldblk+=(ssize_t) RunCountMasked;
RunCount=(unsigned char) ReadBlobByte(image);
if(EOFBlob(image) != MagickFalse) goto CUT_KO; /*wrong data: unexpected eof in line*/
RunCountMasked=RunCount & 0x7F;
}
if(EncodedByte!=1) goto CUT_KO; /*wrong data: size incorrect*/
i=0; /*guess a number of bit planes*/
if(ldblk==(int) Header.Width) i=8;
if(2*ldblk==(int) Header.Width) i=4;
if(8*ldblk==(int) Header.Width) i=1;
if(i==0) goto CUT_KO; /*wrong data: incorrect bit planes*/
depth=i;
image->columns=Header.Width;
image->rows=Header.Height;
image->depth=8;
image->colors=(size_t) (GetQuantumRange(1UL*i)+1);
if (image_info->ping != MagickFalse) goto Finish;
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
/* ----- Do something with palette ----- */
if ((clone_info=CloneImageInfo(image_info)) == NULL) goto NoPalette;
i=(ssize_t) strlen(clone_info->filename);
j=i;
while(--i>0)
{
if(clone_info->filename[i]=='.')
{
break;
}
if(clone_info->filename[i]=='/' || clone_info->filename[i]=='\\' ||
clone_info->filename[i]==':' )
{
i=j;
break;
}
}
(void) CopyMagickString(clone_info->filename+i,".PAL",(size_t)
(MagickPathExtent-i));
if((clone_info->file=fopen_utf8(clone_info->filename,"rb"))==NULL)
{
(void) CopyMagickString(clone_info->filename+i,".pal",(size_t)
(MagickPathExtent-i));
if((clone_info->file=fopen_utf8(clone_info->filename,"rb"))==NULL)
{
clone_info->filename[i]='\0';
if((clone_info->file=fopen_utf8(clone_info->filename,"rb"))==NULL)
{
clone_info=DestroyImageInfo(clone_info);
clone_info=NULL;
goto NoPalette;
}
}
}
if( (palette=AcquireImage(clone_info,exception))==NULL ) goto NoPalette;
status=OpenBlob(clone_info,palette,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
ErasePalette:
palette=DestroyImage(palette);
palette=NULL;
goto NoPalette;
}
if(palette!=NULL)
{
(void) ReadBlob(palette,2,(unsigned char *) PalHeader.FileId);
if(strncmp(PalHeader.FileId,"AH",2) != 0) goto ErasePalette;
PalHeader.Version=ReadBlobLSBShort(palette);
PalHeader.Size=ReadBlobLSBShort(palette);
PalHeader.FileType=(char) ReadBlobByte(palette);
PalHeader.SubType=(char) ReadBlobByte(palette);
PalHeader.BoardID=ReadBlobLSBShort(palette);
PalHeader.GraphicsMode=ReadBlobLSBShort(palette);
PalHeader.MaxIndex=ReadBlobLSBShort(palette);
PalHeader.MaxRed=ReadBlobLSBShort(palette);
PalHeader.MaxGreen=ReadBlobLSBShort(palette);
PalHeader.MaxBlue=ReadBlobLSBShort(palette);
(void) ReadBlob(palette,20,(unsigned char *) PalHeader.PaletteId);
if (EOFBlob(image))
ThrowCUTReaderException(CorruptImageError,"UnexpectedEndOfFile");
if(PalHeader.MaxIndex<1) goto ErasePalette;
image->colors=PalHeader.MaxIndex+1;
if (AcquireImageColormap(image,image->colors,exception) == MagickFalse) goto NoMemory;
if(PalHeader.MaxRed==0) PalHeader.MaxRed=(unsigned int) QuantumRange; /*avoid division by 0*/
if(PalHeader.MaxGreen==0) PalHeader.MaxGreen=(unsigned int) QuantumRange;
if(PalHeader.MaxBlue==0) PalHeader.MaxBlue=(unsigned int) QuantumRange;
for(i=0;i<=(int) PalHeader.MaxIndex;i++)
{ /*this may be wrong- I don't know why is palette such strange*/
j=(ssize_t) TellBlob(palette);
if((j % 512)>512-6)
{
j=((j / 512)+1)*512;
offset=SeekBlob(palette,j,SEEK_SET);
if (offset < 0)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
image->colormap[i].red=(Quantum) ReadBlobLSBShort(palette);
if (QuantumRange != (Quantum) PalHeader.MaxRed)
{
image->colormap[i].red=ClampToQuantum(((double)
image->colormap[i].red*QuantumRange+(PalHeader.MaxRed>>1))/
PalHeader.MaxRed);
}
image->colormap[i].green=(Quantum) ReadBlobLSBShort(palette);
if (QuantumRange != (Quantum) PalHeader.MaxGreen)
{
image->colormap[i].green=ClampToQuantum
(((double) image->colormap[i].green*QuantumRange+(PalHeader.MaxGreen>>1))/PalHeader.MaxGreen);
}
image->colormap[i].blue=(Quantum) ReadBlobLSBShort(palette);
if (QuantumRange != (Quantum) PalHeader.MaxBlue)
{
image->colormap[i].blue=ClampToQuantum
(((double)image->colormap[i].blue*QuantumRange+(PalHeader.MaxBlue>>1))/PalHeader.MaxBlue);
}
}
if (EOFBlob(image))
ThrowCUTReaderException(CorruptImageError,"UnexpectedEndOfFile");
}
NoPalette:
if(palette==NULL)
{
image->colors=256;
if (AcquireImageColormap(image,image->colors,exception) == MagickFalse)
{
NoMemory:
ThrowCUTReaderException(ResourceLimitError,"MemoryAllocationFailed");
}
for (i=0; i < (ssize_t)image->colors; i++)
{
image->colormap[i].red=ScaleCharToQuantum((unsigned char) i);
image->colormap[i].green=ScaleCharToQuantum((unsigned char) i);
image->colormap[i].blue=ScaleCharToQuantum((unsigned char) i);
}
}
/* ----- Load RLE compressed raster ----- */
BImgBuff=(unsigned char *) AcquireQuantumMemory((size_t) ldblk,
sizeof(*BImgBuff)); /*Ldblk was set in the check phase*/
if(BImgBuff==NULL) goto NoMemory;
offset=SeekBlob(image,6 /*sizeof(Header)*/,SEEK_SET);
if (offset < 0)
{
if (palette != NULL)
palette=DestroyImage(palette);
if (clone_info != NULL)
clone_info=DestroyImageInfo(clone_info);
BImgBuff=(unsigned char *) RelinquishMagickMemory(BImgBuff);
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
for (i=0; i < (int) Header.Height; i++)
{
EncodedByte=ReadBlobLSBShort(image);
ptrB=BImgBuff;
j=ldblk;
RunCount=(unsigned char) ReadBlobByte(image);
RunCountMasked=RunCount & 0x7F;
while ((int) RunCountMasked != 0)
{
if((ssize_t) RunCountMasked>j)
{ /*Wrong Data*/
RunCountMasked=(unsigned char) j;
if(j==0)
{
break;
}
}
if((int) RunCount>0x80)
{
RunValue=(unsigned char) ReadBlobByte(image);
(void) memset(ptrB,(int) RunValue,(size_t) RunCountMasked);
}
else {
(void) ReadBlob(image,(size_t) RunCountMasked,ptrB);
}
ptrB+=(int) RunCountMasked;
j-=(int) RunCountMasked;
if (EOFBlob(image) != MagickFalse) goto Finish; /* wrong data: unexpected eof in line */
RunCount=(unsigned char) ReadBlobByte(image);
RunCountMasked=RunCount & 0x7F;
}
InsertRow(image,depth,BImgBuff,i,exception);
}
(void) SyncImage(image,exception);
/*detect monochrome image*/
if(palette==NULL)
{ /*attempt to detect binary (black&white) images*/
if ((image->storage_class == PseudoClass) &&
(SetImageGray(image,exception) != MagickFalse))
{
if(GetCutColors(image,exception)==2)
{
for (i=0; i < (ssize_t)image->colors; i++)
{
register Quantum
sample;
sample=ScaleCharToQuantum((unsigned char) i);
if(image->colormap[i].red!=sample) goto Finish;
if(image->colormap[i].green!=sample) goto Finish;
if(image->colormap[i].blue!=sample) goto Finish;
}
image->colormap[1].red=image->colormap[1].green=
image->colormap[1].blue=QuantumRange;
for (i=0; i < (ssize_t)image->rows; i++)
{
q=QueueAuthenticPixels(image,0,i,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
for (j=0; j < (ssize_t)image->columns; j++)
{
if (GetPixelRed(image,q) == ScaleCharToQuantum(1))
{
SetPixelRed(image,QuantumRange,q);
SetPixelGreen(image,QuantumRange,q);
SetPixelBlue(image,QuantumRange,q);
}
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse) goto Finish;
}
}
}
}
Finish:
if (BImgBuff != NULL)
BImgBuff=(unsigned char *) RelinquishMagickMemory(BImgBuff);
if (palette != NULL)
palette=DestroyImage(palette);
if (clone_info != NULL)
clone_info=DestroyImageInfo(clone_info);
if (EOFBlob(image) != MagickFalse)
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e g i s t e r C U T I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RegisterCUTImage() adds attributes for the CUT image format to
% the list of supported formats. The attributes include the image format
% tag, a method to read and/or write the format, whether the format
% supports the saving of more than one frame to the same file or blob,
% whether the format supports native in-memory I/O, and a brief
% description of the format.
%
% The format of the RegisterCUTImage method is:
%
% size_t RegisterCUTImage(void)
%
*/
ModuleExport size_t RegisterCUTImage(void)
{
MagickInfo
*entry;
entry=AcquireMagickInfo("CUT","CUT","DR Halo");
entry->decoder=(DecodeImageHandler *) ReadCUTImage;
entry->flags|=CoderDecoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n r e g i s t e r C U T I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnregisterCUTImage() removes format registrations made by the
% CUT module from the list of supported formats.
%
% The format of the UnregisterCUTImage method is:
%
% UnregisterCUTImage(void)
%
*/
ModuleExport void UnregisterCUTImage(void)
{
(void) UnregisterMagickInfo("CUT");
}
| ./CrossVul/dataset_final_sorted/CWE-20/c/bad_906_0 |
crossvul-cpp_data_good_2027_0 | /* SCTP kernel implementation
* (C) Copyright IBM Corp. 2001, 2004
* Copyright (c) 1999-2000 Cisco, Inc.
* Copyright (c) 1999-2001 Motorola, Inc.
* Copyright (c) 2001-2002 Intel Corp.
* Copyright (c) 2002 Nokia Corp.
*
* This is part of the SCTP Linux Kernel Implementation.
*
* These are the state functions for the state machine.
*
* This SCTP implementation is free software;
* you can redistribute it and/or modify it under the terms of
* the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This SCTP implementation is distributed in the hope that it
* will be useful, but WITHOUT ANY WARRANTY; without even the implied
* ************************
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with GNU CC; see the file COPYING. If not, see
* <http://www.gnu.org/licenses/>.
*
* Please send any bug reports or fixes you make to the
* email address(es):
* lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* La Monte H.P. Yarroll <piggy@acm.org>
* Karl Knutson <karl@athena.chicago.il.us>
* Mathew Kotowsky <kotowsky@sctp.org>
* Sridhar Samudrala <samudrala@us.ibm.com>
* Jon Grimm <jgrimm@us.ibm.com>
* Hui Huang <hui.huang@nokia.com>
* Dajiang Zhang <dajiang.zhang@nokia.com>
* Daisy Chang <daisyc@us.ibm.com>
* Ardelle Fan <ardelle.fan@intel.com>
* Ryan Layer <rmlayer@us.ibm.com>
* Kevin Gao <kevin.gao@intel.com>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/net.h>
#include <linux/inet.h>
#include <linux/slab.h>
#include <net/sock.h>
#include <net/inet_ecn.h>
#include <linux/skbuff.h>
#include <net/sctp/sctp.h>
#include <net/sctp/sm.h>
#include <net/sctp/structs.h>
static struct sctp_packet *sctp_abort_pkt_new(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
struct sctp_chunk *chunk,
const void *payload,
size_t paylen);
static int sctp_eat_data(const struct sctp_association *asoc,
struct sctp_chunk *chunk,
sctp_cmd_seq_t *commands);
static struct sctp_packet *sctp_ootb_pkt_new(struct net *net,
const struct sctp_association *asoc,
const struct sctp_chunk *chunk);
static void sctp_send_stale_cookie_err(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const struct sctp_chunk *chunk,
sctp_cmd_seq_t *commands,
struct sctp_chunk *err_chunk);
static sctp_disposition_t sctp_sf_do_5_2_6_stale(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands);
static sctp_disposition_t sctp_sf_shut_8_4_5(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands);
static sctp_disposition_t sctp_sf_tabort_8_4_8(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands);
static struct sctp_sackhdr *sctp_sm_pull_sack(struct sctp_chunk *chunk);
static sctp_disposition_t sctp_stop_t1_and_abort(struct net *net,
sctp_cmd_seq_t *commands,
__be16 error, int sk_err,
const struct sctp_association *asoc,
struct sctp_transport *transport);
static sctp_disposition_t sctp_sf_abort_violation(
struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
void *arg,
sctp_cmd_seq_t *commands,
const __u8 *payload,
const size_t paylen);
static sctp_disposition_t sctp_sf_violation_chunklen(
struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands);
static sctp_disposition_t sctp_sf_violation_paramlen(
struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg, void *ext,
sctp_cmd_seq_t *commands);
static sctp_disposition_t sctp_sf_violation_ctsn(
struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands);
static sctp_disposition_t sctp_sf_violation_chunk(
struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands);
static sctp_ierror_t sctp_sf_authenticate(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
struct sctp_chunk *chunk);
static sctp_disposition_t __sctp_sf_do_9_1_abort(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands);
/* Small helper function that checks if the chunk length
* is of the appropriate length. The 'required_length' argument
* is set to be the size of a specific chunk we are testing.
* Return Values: 1 = Valid length
* 0 = Invalid length
*
*/
static inline int
sctp_chunk_length_valid(struct sctp_chunk *chunk,
__u16 required_length)
{
__u16 chunk_length = ntohs(chunk->chunk_hdr->length);
if (unlikely(chunk_length < required_length))
return 0;
return 1;
}
/**********************************************************
* These are the state functions for handling chunk events.
**********************************************************/
/*
* Process the final SHUTDOWN COMPLETE.
*
* Section: 4 (C) (diagram), 9.2
* Upon reception of the SHUTDOWN COMPLETE chunk the endpoint will verify
* that it is in SHUTDOWN-ACK-SENT state, if it is not the chunk should be
* discarded. If the endpoint is in the SHUTDOWN-ACK-SENT state the endpoint
* should stop the T2-shutdown timer and remove all knowledge of the
* association (and thus the association enters the CLOSED state).
*
* Verification Tag: 8.5.1(C), sctpimpguide 2.41.
* C) Rules for packet carrying SHUTDOWN COMPLETE:
* ...
* - The receiver of a SHUTDOWN COMPLETE shall accept the packet
* if the Verification Tag field of the packet matches its own tag and
* the T bit is not set
* OR
* it is set to its peer's tag and the T bit is set in the Chunk
* Flags.
* Otherwise, the receiver MUST silently discard the packet
* and take no further action. An endpoint MUST ignore the
* SHUTDOWN COMPLETE if it is not in the SHUTDOWN-ACK-SENT state.
*
* Inputs
* (endpoint, asoc, chunk)
*
* Outputs
* (asoc, reply_msg, msg_up, timers, counters)
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t sctp_sf_do_4_C(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
struct sctp_ulpevent *ev;
if (!sctp_vtag_verify_either(chunk, asoc))
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
/* RFC 2960 6.10 Bundling
*
* An endpoint MUST NOT bundle INIT, INIT ACK or
* SHUTDOWN COMPLETE with any other chunks.
*/
if (!chunk->singleton)
return sctp_sf_violation_chunk(net, ep, asoc, type, arg, commands);
/* Make sure that the SHUTDOWN_COMPLETE chunk has a valid length. */
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
/* RFC 2960 10.2 SCTP-to-ULP
*
* H) SHUTDOWN COMPLETE notification
*
* When SCTP completes the shutdown procedures (section 9.2) this
* notification is passed to the upper layer.
*/
ev = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_SHUTDOWN_COMP,
0, 0, 0, NULL, GFP_ATOMIC);
if (ev)
sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
SCTP_ULPEVENT(ev));
/* Upon reception of the SHUTDOWN COMPLETE chunk the endpoint
* will verify that it is in SHUTDOWN-ACK-SENT state, if it is
* not the chunk should be discarded. If the endpoint is in
* the SHUTDOWN-ACK-SENT state the endpoint should stop the
* T2-shutdown timer and remove all knowledge of the
* association (and thus the association enters the CLOSED
* state).
*/
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN));
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD));
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_CLOSED));
SCTP_INC_STATS(net, SCTP_MIB_SHUTDOWNS);
SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
return SCTP_DISPOSITION_DELETE_TCB;
}
/*
* Respond to a normal INIT chunk.
* We are the side that is being asked for an association.
*
* Section: 5.1 Normal Establishment of an Association, B
* B) "Z" shall respond immediately with an INIT ACK chunk. The
* destination IP address of the INIT ACK MUST be set to the source
* IP address of the INIT to which this INIT ACK is responding. In
* the response, besides filling in other parameters, "Z" must set the
* Verification Tag field to Tag_A, and also provide its own
* Verification Tag (Tag_Z) in the Initiate Tag field.
*
* Verification Tag: Must be 0.
*
* Inputs
* (endpoint, asoc, chunk)
*
* Outputs
* (asoc, reply_msg, msg_up, timers, counters)
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t sctp_sf_do_5_1B_init(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
struct sctp_chunk *repl;
struct sctp_association *new_asoc;
struct sctp_chunk *err_chunk;
struct sctp_packet *packet;
sctp_unrecognized_param_t *unk_param;
int len;
/* 6.10 Bundling
* An endpoint MUST NOT bundle INIT, INIT ACK or
* SHUTDOWN COMPLETE with any other chunks.
*
* IG Section 2.11.2
* Furthermore, we require that the receiver of an INIT chunk MUST
* enforce these rules by silently discarding an arriving packet
* with an INIT chunk that is bundled with other chunks.
*/
if (!chunk->singleton)
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
/* If the packet is an OOTB packet which is temporarily on the
* control endpoint, respond with an ABORT.
*/
if (ep == sctp_sk(net->sctp.ctl_sock)->ep) {
SCTP_INC_STATS(net, SCTP_MIB_OUTOFBLUES);
return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
}
/* 3.1 A packet containing an INIT chunk MUST have a zero Verification
* Tag.
*/
if (chunk->sctp_hdr->vtag != 0)
return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
/* Make sure that the INIT chunk has a valid length.
* Normally, this would cause an ABORT with a Protocol Violation
* error, but since we don't have an association, we'll
* just discard the packet.
*/
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_init_chunk_t)))
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
/* If the INIT is coming toward a closing socket, we'll send back
* and ABORT. Essentially, this catches the race of INIT being
* backloged to the socket at the same time as the user isses close().
* Since the socket and all its associations are going away, we
* can treat this OOTB
*/
if (sctp_sstate(ep->base.sk, CLOSING))
return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
/* Verify the INIT chunk before processing it. */
err_chunk = NULL;
if (!sctp_verify_init(net, asoc, chunk->chunk_hdr->type,
(sctp_init_chunk_t *)chunk->chunk_hdr, chunk,
&err_chunk)) {
/* This chunk contains fatal error. It is to be discarded.
* Send an ABORT, with causes if there is any.
*/
if (err_chunk) {
packet = sctp_abort_pkt_new(net, ep, asoc, arg,
(__u8 *)(err_chunk->chunk_hdr) +
sizeof(sctp_chunkhdr_t),
ntohs(err_chunk->chunk_hdr->length) -
sizeof(sctp_chunkhdr_t));
sctp_chunk_free(err_chunk);
if (packet) {
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
SCTP_PACKET(packet));
SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
return SCTP_DISPOSITION_CONSUME;
} else {
return SCTP_DISPOSITION_NOMEM;
}
} else {
return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg,
commands);
}
}
/* Grab the INIT header. */
chunk->subh.init_hdr = (sctp_inithdr_t *)chunk->skb->data;
/* Tag the variable length parameters. */
chunk->param_hdr.v = skb_pull(chunk->skb, sizeof(sctp_inithdr_t));
new_asoc = sctp_make_temp_asoc(ep, chunk, GFP_ATOMIC);
if (!new_asoc)
goto nomem;
if (sctp_assoc_set_bind_addr_from_ep(new_asoc,
sctp_scope(sctp_source(chunk)),
GFP_ATOMIC) < 0)
goto nomem_init;
/* The call, sctp_process_init(), can fail on memory allocation. */
if (!sctp_process_init(new_asoc, chunk, sctp_source(chunk),
(sctp_init_chunk_t *)chunk->chunk_hdr,
GFP_ATOMIC))
goto nomem_init;
/* B) "Z" shall respond immediately with an INIT ACK chunk. */
/* If there are errors need to be reported for unknown parameters,
* make sure to reserve enough room in the INIT ACK for them.
*/
len = 0;
if (err_chunk)
len = ntohs(err_chunk->chunk_hdr->length) -
sizeof(sctp_chunkhdr_t);
repl = sctp_make_init_ack(new_asoc, chunk, GFP_ATOMIC, len);
if (!repl)
goto nomem_init;
/* If there are errors need to be reported for unknown parameters,
* include them in the outgoing INIT ACK as "Unrecognized parameter"
* parameter.
*/
if (err_chunk) {
/* Get the "Unrecognized parameter" parameter(s) out of the
* ERROR chunk generated by sctp_verify_init(). Since the
* error cause code for "unknown parameter" and the
* "Unrecognized parameter" type is the same, we can
* construct the parameters in INIT ACK by copying the
* ERROR causes over.
*/
unk_param = (sctp_unrecognized_param_t *)
((__u8 *)(err_chunk->chunk_hdr) +
sizeof(sctp_chunkhdr_t));
/* Replace the cause code with the "Unrecognized parameter"
* parameter type.
*/
sctp_addto_chunk(repl, len, unk_param);
sctp_chunk_free(err_chunk);
}
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc));
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
/*
* Note: After sending out INIT ACK with the State Cookie parameter,
* "Z" MUST NOT allocate any resources, nor keep any states for the
* new association. Otherwise, "Z" will be vulnerable to resource
* attacks.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
return SCTP_DISPOSITION_DELETE_TCB;
nomem_init:
sctp_association_free(new_asoc);
nomem:
if (err_chunk)
sctp_chunk_free(err_chunk);
return SCTP_DISPOSITION_NOMEM;
}
/*
* Respond to a normal INIT ACK chunk.
* We are the side that is initiating the association.
*
* Section: 5.1 Normal Establishment of an Association, C
* C) Upon reception of the INIT ACK from "Z", "A" shall stop the T1-init
* timer and leave COOKIE-WAIT state. "A" shall then send the State
* Cookie received in the INIT ACK chunk in a COOKIE ECHO chunk, start
* the T1-cookie timer, and enter the COOKIE-ECHOED state.
*
* Note: The COOKIE ECHO chunk can be bundled with any pending outbound
* DATA chunks, but it MUST be the first chunk in the packet and
* until the COOKIE ACK is returned the sender MUST NOT send any
* other packets to the peer.
*
* Verification Tag: 3.3.3
* If the value of the Initiate Tag in a received INIT ACK chunk is
* found to be 0, the receiver MUST treat it as an error and close the
* association by transmitting an ABORT.
*
* Inputs
* (endpoint, asoc, chunk)
*
* Outputs
* (asoc, reply_msg, msg_up, timers, counters)
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t sctp_sf_do_5_1C_ack(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
sctp_init_chunk_t *initchunk;
struct sctp_chunk *err_chunk;
struct sctp_packet *packet;
if (!sctp_vtag_verify(chunk, asoc))
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
/* 6.10 Bundling
* An endpoint MUST NOT bundle INIT, INIT ACK or
* SHUTDOWN COMPLETE with any other chunks.
*/
if (!chunk->singleton)
return sctp_sf_violation_chunk(net, ep, asoc, type, arg, commands);
/* Make sure that the INIT-ACK chunk has a valid length */
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_initack_chunk_t)))
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
/* Grab the INIT header. */
chunk->subh.init_hdr = (sctp_inithdr_t *) chunk->skb->data;
/* Verify the INIT chunk before processing it. */
err_chunk = NULL;
if (!sctp_verify_init(net, asoc, chunk->chunk_hdr->type,
(sctp_init_chunk_t *)chunk->chunk_hdr, chunk,
&err_chunk)) {
sctp_error_t error = SCTP_ERROR_NO_RESOURCE;
/* This chunk contains fatal error. It is to be discarded.
* Send an ABORT, with causes. If there are no causes,
* then there wasn't enough memory. Just terminate
* the association.
*/
if (err_chunk) {
packet = sctp_abort_pkt_new(net, ep, asoc, arg,
(__u8 *)(err_chunk->chunk_hdr) +
sizeof(sctp_chunkhdr_t),
ntohs(err_chunk->chunk_hdr->length) -
sizeof(sctp_chunkhdr_t));
sctp_chunk_free(err_chunk);
if (packet) {
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
SCTP_PACKET(packet));
SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
error = SCTP_ERROR_INV_PARAM;
}
}
/* SCTP-AUTH, Section 6.3:
* It should be noted that if the receiver wants to tear
* down an association in an authenticated way only, the
* handling of malformed packets should not result in
* tearing down the association.
*
* This means that if we only want to abort associations
* in an authenticated way (i.e AUTH+ABORT), then we
* can't destroy this association just because the packet
* was malformed.
*/
if (sctp_auth_recv_cid(SCTP_CID_ABORT, asoc))
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
return sctp_stop_t1_and_abort(net, commands, error, ECONNREFUSED,
asoc, chunk->transport);
}
/* Tag the variable length parameters. Note that we never
* convert the parameters in an INIT chunk.
*/
chunk->param_hdr.v = skb_pull(chunk->skb, sizeof(sctp_inithdr_t));
initchunk = (sctp_init_chunk_t *) chunk->chunk_hdr;
sctp_add_cmd_sf(commands, SCTP_CMD_PEER_INIT,
SCTP_PEER_INIT(initchunk));
/* Reset init error count upon receipt of INIT-ACK. */
sctp_add_cmd_sf(commands, SCTP_CMD_INIT_COUNTER_RESET, SCTP_NULL());
/* 5.1 C) "A" shall stop the T1-init timer and leave
* COOKIE-WAIT state. "A" shall then ... start the T1-cookie
* timer, and enter the COOKIE-ECHOED state.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START,
SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE));
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_COOKIE_ECHOED));
/* SCTP-AUTH: genereate the assocition shared keys so that
* we can potentially signe the COOKIE-ECHO.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_SHKEY, SCTP_NULL());
/* 5.1 C) "A" shall then send the State Cookie received in the
* INIT ACK chunk in a COOKIE ECHO chunk, ...
*/
/* If there is any errors to report, send the ERROR chunk generated
* for unknown parameters as well.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_GEN_COOKIE_ECHO,
SCTP_CHUNK(err_chunk));
return SCTP_DISPOSITION_CONSUME;
}
/*
* Respond to a normal COOKIE ECHO chunk.
* We are the side that is being asked for an association.
*
* Section: 5.1 Normal Establishment of an Association, D
* D) Upon reception of the COOKIE ECHO chunk, Endpoint "Z" will reply
* with a COOKIE ACK chunk after building a TCB and moving to
* the ESTABLISHED state. A COOKIE ACK chunk may be bundled with
* any pending DATA chunks (and/or SACK chunks), but the COOKIE ACK
* chunk MUST be the first chunk in the packet.
*
* IMPLEMENTATION NOTE: An implementation may choose to send the
* Communication Up notification to the SCTP user upon reception
* of a valid COOKIE ECHO chunk.
*
* Verification Tag: 8.5.1 Exceptions in Verification Tag Rules
* D) Rules for packet carrying a COOKIE ECHO
*
* - When sending a COOKIE ECHO, the endpoint MUST use the value of the
* Initial Tag received in the INIT ACK.
*
* - The receiver of a COOKIE ECHO follows the procedures in Section 5.
*
* Inputs
* (endpoint, asoc, chunk)
*
* Outputs
* (asoc, reply_msg, msg_up, timers, counters)
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t sctp_sf_do_5_1D_ce(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type, void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
struct sctp_association *new_asoc;
sctp_init_chunk_t *peer_init;
struct sctp_chunk *repl;
struct sctp_ulpevent *ev, *ai_ev = NULL;
int error = 0;
struct sctp_chunk *err_chk_p;
struct sock *sk;
/* If the packet is an OOTB packet which is temporarily on the
* control endpoint, respond with an ABORT.
*/
if (ep == sctp_sk(net->sctp.ctl_sock)->ep) {
SCTP_INC_STATS(net, SCTP_MIB_OUTOFBLUES);
return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
}
/* Make sure that the COOKIE_ECHO chunk has a valid length.
* In this case, we check that we have enough for at least a
* chunk header. More detailed verification is done
* in sctp_unpack_cookie().
*/
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
/* If the endpoint is not listening or if the number of associations
* on the TCP-style socket exceed the max backlog, respond with an
* ABORT.
*/
sk = ep->base.sk;
if (!sctp_sstate(sk, LISTENING) ||
(sctp_style(sk, TCP) && sk_acceptq_is_full(sk)))
return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
/* "Decode" the chunk. We have no optional parameters so we
* are in good shape.
*/
chunk->subh.cookie_hdr =
(struct sctp_signed_cookie *)chunk->skb->data;
if (!pskb_pull(chunk->skb, ntohs(chunk->chunk_hdr->length) -
sizeof(sctp_chunkhdr_t)))
goto nomem;
/* 5.1 D) Upon reception of the COOKIE ECHO chunk, Endpoint
* "Z" will reply with a COOKIE ACK chunk after building a TCB
* and moving to the ESTABLISHED state.
*/
new_asoc = sctp_unpack_cookie(ep, asoc, chunk, GFP_ATOMIC, &error,
&err_chk_p);
/* FIXME:
* If the re-build failed, what is the proper error path
* from here?
*
* [We should abort the association. --piggy]
*/
if (!new_asoc) {
/* FIXME: Several errors are possible. A bad cookie should
* be silently discarded, but think about logging it too.
*/
switch (error) {
case -SCTP_IERROR_NOMEM:
goto nomem;
case -SCTP_IERROR_STALE_COOKIE:
sctp_send_stale_cookie_err(net, ep, asoc, chunk, commands,
err_chk_p);
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
case -SCTP_IERROR_BAD_SIG:
default:
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
}
}
/* Delay state machine commands until later.
*
* Re-build the bind address for the association is done in
* the sctp_unpack_cookie() already.
*/
/* This is a brand-new association, so these are not yet side
* effects--it is safe to run them here.
*/
peer_init = &chunk->subh.cookie_hdr->c.peer_init[0];
if (!sctp_process_init(new_asoc, chunk,
&chunk->subh.cookie_hdr->c.peer_addr,
peer_init, GFP_ATOMIC))
goto nomem_init;
/* SCTP-AUTH: Now that we've populate required fields in
* sctp_process_init, set up the assocaition shared keys as
* necessary so that we can potentially authenticate the ACK
*/
error = sctp_auth_asoc_init_active_key(new_asoc, GFP_ATOMIC);
if (error)
goto nomem_init;
/* SCTP-AUTH: auth_chunk pointer is only set when the cookie-echo
* is supposed to be authenticated and we have to do delayed
* authentication. We've just recreated the association using
* the information in the cookie and now it's much easier to
* do the authentication.
*/
if (chunk->auth_chunk) {
struct sctp_chunk auth;
sctp_ierror_t ret;
/* Make sure that we and the peer are AUTH capable */
if (!net->sctp.auth_enable || !new_asoc->peer.auth_capable) {
kfree_skb(chunk->auth_chunk);
sctp_association_free(new_asoc);
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
}
/* set-up our fake chunk so that we can process it */
auth.skb = chunk->auth_chunk;
auth.asoc = chunk->asoc;
auth.sctp_hdr = chunk->sctp_hdr;
auth.chunk_hdr = (sctp_chunkhdr_t *)skb_push(chunk->auth_chunk,
sizeof(sctp_chunkhdr_t));
skb_pull(chunk->auth_chunk, sizeof(sctp_chunkhdr_t));
auth.transport = chunk->transport;
ret = sctp_sf_authenticate(net, ep, new_asoc, type, &auth);
/* We can now safely free the auth_chunk clone */
kfree_skb(chunk->auth_chunk);
if (ret != SCTP_IERROR_NO_ERROR) {
sctp_association_free(new_asoc);
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
}
}
repl = sctp_make_cookie_ack(new_asoc, chunk);
if (!repl)
goto nomem_init;
/* RFC 2960 5.1 Normal Establishment of an Association
*
* D) IMPLEMENTATION NOTE: An implementation may choose to
* send the Communication Up notification to the SCTP user
* upon reception of a valid COOKIE ECHO chunk.
*/
ev = sctp_ulpevent_make_assoc_change(new_asoc, 0, SCTP_COMM_UP, 0,
new_asoc->c.sinit_num_ostreams,
new_asoc->c.sinit_max_instreams,
NULL, GFP_ATOMIC);
if (!ev)
goto nomem_ev;
/* Sockets API Draft Section 5.3.1.6
* When a peer sends a Adaptation Layer Indication parameter , SCTP
* delivers this notification to inform the application that of the
* peers requested adaptation layer.
*/
if (new_asoc->peer.adaptation_ind) {
ai_ev = sctp_ulpevent_make_adaptation_indication(new_asoc,
GFP_ATOMIC);
if (!ai_ev)
goto nomem_aiev;
}
/* Add all the state machine commands now since we've created
* everything. This way we don't introduce memory corruptions
* during side-effect processing and correclty count established
* associations.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc));
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_ESTABLISHED));
SCTP_INC_STATS(net, SCTP_MIB_CURRESTAB);
SCTP_INC_STATS(net, SCTP_MIB_PASSIVEESTABS);
sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL());
if (new_asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE])
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START,
SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE));
/* This will send the COOKIE ACK */
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
/* Queue the ASSOC_CHANGE event */
sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev));
/* Send up the Adaptation Layer Indication event */
if (ai_ev)
sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
SCTP_ULPEVENT(ai_ev));
return SCTP_DISPOSITION_CONSUME;
nomem_aiev:
sctp_ulpevent_free(ev);
nomem_ev:
sctp_chunk_free(repl);
nomem_init:
sctp_association_free(new_asoc);
nomem:
return SCTP_DISPOSITION_NOMEM;
}
/*
* Respond to a normal COOKIE ACK chunk.
* We are the side that is being asked for an association.
*
* RFC 2960 5.1 Normal Establishment of an Association
*
* E) Upon reception of the COOKIE ACK, endpoint "A" will move from the
* COOKIE-ECHOED state to the ESTABLISHED state, stopping the T1-cookie
* timer. It may also notify its ULP about the successful
* establishment of the association with a Communication Up
* notification (see Section 10).
*
* Verification Tag:
* Inputs
* (endpoint, asoc, chunk)
*
* Outputs
* (asoc, reply_msg, msg_up, timers, counters)
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t sctp_sf_do_5_1E_ca(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type, void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
struct sctp_ulpevent *ev;
if (!sctp_vtag_verify(chunk, asoc))
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
/* Verify that the chunk length for the COOKIE-ACK is OK.
* If we don't do this, any bundled chunks may be junked.
*/
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
/* Reset init error count upon receipt of COOKIE-ACK,
* to avoid problems with the managemement of this
* counter in stale cookie situations when a transition back
* from the COOKIE-ECHOED state to the COOKIE-WAIT
* state is performed.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_INIT_COUNTER_RESET, SCTP_NULL());
/* RFC 2960 5.1 Normal Establishment of an Association
*
* E) Upon reception of the COOKIE ACK, endpoint "A" will move
* from the COOKIE-ECHOED state to the ESTABLISHED state,
* stopping the T1-cookie timer.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE));
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_ESTABLISHED));
SCTP_INC_STATS(net, SCTP_MIB_CURRESTAB);
SCTP_INC_STATS(net, SCTP_MIB_ACTIVEESTABS);
sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL());
if (asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE])
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START,
SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE));
/* It may also notify its ULP about the successful
* establishment of the association with a Communication Up
* notification (see Section 10).
*/
ev = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_UP,
0, asoc->c.sinit_num_ostreams,
asoc->c.sinit_max_instreams,
NULL, GFP_ATOMIC);
if (!ev)
goto nomem;
sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev));
/* Sockets API Draft Section 5.3.1.6
* When a peer sends a Adaptation Layer Indication parameter , SCTP
* delivers this notification to inform the application that of the
* peers requested adaptation layer.
*/
if (asoc->peer.adaptation_ind) {
ev = sctp_ulpevent_make_adaptation_indication(asoc, GFP_ATOMIC);
if (!ev)
goto nomem;
sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
SCTP_ULPEVENT(ev));
}
return SCTP_DISPOSITION_CONSUME;
nomem:
return SCTP_DISPOSITION_NOMEM;
}
/* Generate and sendout a heartbeat packet. */
static sctp_disposition_t sctp_sf_heartbeat(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_transport *transport = (struct sctp_transport *) arg;
struct sctp_chunk *reply;
/* Send a heartbeat to our peer. */
reply = sctp_make_heartbeat(asoc, transport);
if (!reply)
return SCTP_DISPOSITION_NOMEM;
/* Set rto_pending indicating that an RTT measurement
* is started with this heartbeat chunk.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_RTO_PENDING,
SCTP_TRANSPORT(transport));
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
return SCTP_DISPOSITION_CONSUME;
}
/* Generate a HEARTBEAT packet on the given transport. */
sctp_disposition_t sctp_sf_sendbeat_8_3(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_transport *transport = (struct sctp_transport *) arg;
if (asoc->overall_error_count >= asoc->max_retrans) {
sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
SCTP_ERROR(ETIMEDOUT));
/* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_PERR(SCTP_ERROR_NO_ERROR));
SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
return SCTP_DISPOSITION_DELETE_TCB;
}
/* Section 3.3.5.
* The Sender-specific Heartbeat Info field should normally include
* information about the sender's current time when this HEARTBEAT
* chunk is sent and the destination transport address to which this
* HEARTBEAT is sent (see Section 8.3).
*/
if (transport->param_flags & SPP_HB_ENABLE) {
if (SCTP_DISPOSITION_NOMEM ==
sctp_sf_heartbeat(ep, asoc, type, arg,
commands))
return SCTP_DISPOSITION_NOMEM;
/* Set transport error counter and association error counter
* when sending heartbeat.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_TRANSPORT_HB_SENT,
SCTP_TRANSPORT(transport));
}
sctp_add_cmd_sf(commands, SCTP_CMD_TRANSPORT_IDLE,
SCTP_TRANSPORT(transport));
sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMER_UPDATE,
SCTP_TRANSPORT(transport));
return SCTP_DISPOSITION_CONSUME;
}
/*
* Process an heartbeat request.
*
* Section: 8.3 Path Heartbeat
* The receiver of the HEARTBEAT should immediately respond with a
* HEARTBEAT ACK that contains the Heartbeat Information field copied
* from the received HEARTBEAT chunk.
*
* Verification Tag: 8.5 Verification Tag [Normal verification]
* When receiving an SCTP packet, the endpoint MUST ensure that the
* value in the Verification Tag field of the received SCTP packet
* matches its own Tag. If the received Verification Tag value does not
* match the receiver's own tag value, the receiver shall silently
* discard the packet and shall not process it any further except for
* those cases listed in Section 8.5.1 below.
*
* Inputs
* (endpoint, asoc, chunk)
*
* Outputs
* (asoc, reply_msg, msg_up, timers, counters)
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t sctp_sf_beat_8_3(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
sctp_paramhdr_t *param_hdr;
struct sctp_chunk *chunk = arg;
struct sctp_chunk *reply;
size_t paylen = 0;
if (!sctp_vtag_verify(chunk, asoc))
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
/* Make sure that the HEARTBEAT chunk has a valid length. */
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_heartbeat_chunk_t)))
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
/* 8.3 The receiver of the HEARTBEAT should immediately
* respond with a HEARTBEAT ACK that contains the Heartbeat
* Information field copied from the received HEARTBEAT chunk.
*/
chunk->subh.hb_hdr = (sctp_heartbeathdr_t *) chunk->skb->data;
param_hdr = (sctp_paramhdr_t *) chunk->subh.hb_hdr;
paylen = ntohs(chunk->chunk_hdr->length) - sizeof(sctp_chunkhdr_t);
if (ntohs(param_hdr->length) > paylen)
return sctp_sf_violation_paramlen(net, ep, asoc, type, arg,
param_hdr, commands);
if (!pskb_pull(chunk->skb, paylen))
goto nomem;
reply = sctp_make_heartbeat_ack(asoc, chunk, param_hdr, paylen);
if (!reply)
goto nomem;
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
return SCTP_DISPOSITION_CONSUME;
nomem:
return SCTP_DISPOSITION_NOMEM;
}
/*
* Process the returning HEARTBEAT ACK.
*
* Section: 8.3 Path Heartbeat
* Upon the receipt of the HEARTBEAT ACK, the sender of the HEARTBEAT
* should clear the error counter of the destination transport
* address to which the HEARTBEAT was sent, and mark the destination
* transport address as active if it is not so marked. The endpoint may
* optionally report to the upper layer when an inactive destination
* address is marked as active due to the reception of the latest
* HEARTBEAT ACK. The receiver of the HEARTBEAT ACK must also
* clear the association overall error count as well (as defined
* in section 8.1).
*
* The receiver of the HEARTBEAT ACK should also perform an RTT
* measurement for that destination transport address using the time
* value carried in the HEARTBEAT ACK chunk.
*
* Verification Tag: 8.5 Verification Tag [Normal verification]
*
* Inputs
* (endpoint, asoc, chunk)
*
* Outputs
* (asoc, reply_msg, msg_up, timers, counters)
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t sctp_sf_backbeat_8_3(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
union sctp_addr from_addr;
struct sctp_transport *link;
sctp_sender_hb_info_t *hbinfo;
unsigned long max_interval;
if (!sctp_vtag_verify(chunk, asoc))
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
/* Make sure that the HEARTBEAT-ACK chunk has a valid length. */
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t) +
sizeof(sctp_sender_hb_info_t)))
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
hbinfo = (sctp_sender_hb_info_t *) chunk->skb->data;
/* Make sure that the length of the parameter is what we expect */
if (ntohs(hbinfo->param_hdr.length) !=
sizeof(sctp_sender_hb_info_t)) {
return SCTP_DISPOSITION_DISCARD;
}
from_addr = hbinfo->daddr;
link = sctp_assoc_lookup_paddr(asoc, &from_addr);
/* This should never happen, but lets log it if so. */
if (unlikely(!link)) {
if (from_addr.sa.sa_family == AF_INET6) {
net_warn_ratelimited("%s association %p could not find address %pI6\n",
__func__,
asoc,
&from_addr.v6.sin6_addr);
} else {
net_warn_ratelimited("%s association %p could not find address %pI4\n",
__func__,
asoc,
&from_addr.v4.sin_addr.s_addr);
}
return SCTP_DISPOSITION_DISCARD;
}
/* Validate the 64-bit random nonce. */
if (hbinfo->hb_nonce != link->hb_nonce)
return SCTP_DISPOSITION_DISCARD;
max_interval = link->hbinterval + link->rto;
/* Check if the timestamp looks valid. */
if (time_after(hbinfo->sent_at, jiffies) ||
time_after(jiffies, hbinfo->sent_at + max_interval)) {
pr_debug("%s: HEARTBEAT ACK with invalid timestamp received "
"for transport:%p\n", __func__, link);
return SCTP_DISPOSITION_DISCARD;
}
/* 8.3 Upon the receipt of the HEARTBEAT ACK, the sender of
* the HEARTBEAT should clear the error counter of the
* destination transport address to which the HEARTBEAT was
* sent and mark the destination transport address as active if
* it is not so marked.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_TRANSPORT_ON, SCTP_TRANSPORT(link));
return SCTP_DISPOSITION_CONSUME;
}
/* Helper function to send out an abort for the restart
* condition.
*/
static int sctp_sf_send_restart_abort(struct net *net, union sctp_addr *ssa,
struct sctp_chunk *init,
sctp_cmd_seq_t *commands)
{
int len;
struct sctp_packet *pkt;
union sctp_addr_param *addrparm;
struct sctp_errhdr *errhdr;
struct sctp_endpoint *ep;
char buffer[sizeof(struct sctp_errhdr)+sizeof(union sctp_addr_param)];
struct sctp_af *af = sctp_get_af_specific(ssa->v4.sin_family);
/* Build the error on the stack. We are way to malloc crazy
* throughout the code today.
*/
errhdr = (struct sctp_errhdr *)buffer;
addrparm = (union sctp_addr_param *)errhdr->variable;
/* Copy into a parm format. */
len = af->to_addr_param(ssa, addrparm);
len += sizeof(sctp_errhdr_t);
errhdr->cause = SCTP_ERROR_RESTART;
errhdr->length = htons(len);
/* Assign to the control socket. */
ep = sctp_sk(net->sctp.ctl_sock)->ep;
/* Association is NULL since this may be a restart attack and we
* want to send back the attacker's vtag.
*/
pkt = sctp_abort_pkt_new(net, ep, NULL, init, errhdr, len);
if (!pkt)
goto out;
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, SCTP_PACKET(pkt));
SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
/* Discard the rest of the inbound packet. */
sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET, SCTP_NULL());
out:
/* Even if there is no memory, treat as a failure so
* the packet will get dropped.
*/
return 0;
}
static bool list_has_sctp_addr(const struct list_head *list,
union sctp_addr *ipaddr)
{
struct sctp_transport *addr;
list_for_each_entry(addr, list, transports) {
if (sctp_cmp_addr_exact(ipaddr, &addr->ipaddr))
return true;
}
return false;
}
/* A restart is occurring, check to make sure no new addresses
* are being added as we may be under a takeover attack.
*/
static int sctp_sf_check_restart_addrs(const struct sctp_association *new_asoc,
const struct sctp_association *asoc,
struct sctp_chunk *init,
sctp_cmd_seq_t *commands)
{
struct net *net = sock_net(new_asoc->base.sk);
struct sctp_transport *new_addr;
int ret = 1;
/* Implementor's Guide - Section 5.2.2
* ...
* Before responding the endpoint MUST check to see if the
* unexpected INIT adds new addresses to the association. If new
* addresses are added to the association, the endpoint MUST respond
* with an ABORT..
*/
/* Search through all current addresses and make sure
* we aren't adding any new ones.
*/
list_for_each_entry(new_addr, &new_asoc->peer.transport_addr_list,
transports) {
if (!list_has_sctp_addr(&asoc->peer.transport_addr_list,
&new_addr->ipaddr)) {
sctp_sf_send_restart_abort(net, &new_addr->ipaddr, init,
commands);
ret = 0;
break;
}
}
/* Return success if all addresses were found. */
return ret;
}
/* Populate the verification/tie tags based on overlapping INIT
* scenario.
*
* Note: Do not use in CLOSED or SHUTDOWN-ACK-SENT state.
*/
static void sctp_tietags_populate(struct sctp_association *new_asoc,
const struct sctp_association *asoc)
{
switch (asoc->state) {
/* 5.2.1 INIT received in COOKIE-WAIT or COOKIE-ECHOED State */
case SCTP_STATE_COOKIE_WAIT:
new_asoc->c.my_vtag = asoc->c.my_vtag;
new_asoc->c.my_ttag = asoc->c.my_vtag;
new_asoc->c.peer_ttag = 0;
break;
case SCTP_STATE_COOKIE_ECHOED:
new_asoc->c.my_vtag = asoc->c.my_vtag;
new_asoc->c.my_ttag = asoc->c.my_vtag;
new_asoc->c.peer_ttag = asoc->c.peer_vtag;
break;
/* 5.2.2 Unexpected INIT in States Other than CLOSED, COOKIE-ECHOED,
* COOKIE-WAIT and SHUTDOWN-ACK-SENT
*/
default:
new_asoc->c.my_ttag = asoc->c.my_vtag;
new_asoc->c.peer_ttag = asoc->c.peer_vtag;
break;
}
/* Other parameters for the endpoint SHOULD be copied from the
* existing parameters of the association (e.g. number of
* outbound streams) into the INIT ACK and cookie.
*/
new_asoc->rwnd = asoc->rwnd;
new_asoc->c.sinit_num_ostreams = asoc->c.sinit_num_ostreams;
new_asoc->c.sinit_max_instreams = asoc->c.sinit_max_instreams;
new_asoc->c.initial_tsn = asoc->c.initial_tsn;
}
/*
* Compare vtag/tietag values to determine unexpected COOKIE-ECHO
* handling action.
*
* RFC 2960 5.2.4 Handle a COOKIE ECHO when a TCB exists.
*
* Returns value representing action to be taken. These action values
* correspond to Action/Description values in RFC 2960, Table 2.
*/
static char sctp_tietags_compare(struct sctp_association *new_asoc,
const struct sctp_association *asoc)
{
/* In this case, the peer may have restarted. */
if ((asoc->c.my_vtag != new_asoc->c.my_vtag) &&
(asoc->c.peer_vtag != new_asoc->c.peer_vtag) &&
(asoc->c.my_vtag == new_asoc->c.my_ttag) &&
(asoc->c.peer_vtag == new_asoc->c.peer_ttag))
return 'A';
/* Collision case B. */
if ((asoc->c.my_vtag == new_asoc->c.my_vtag) &&
((asoc->c.peer_vtag != new_asoc->c.peer_vtag) ||
(0 == asoc->c.peer_vtag))) {
return 'B';
}
/* Collision case D. */
if ((asoc->c.my_vtag == new_asoc->c.my_vtag) &&
(asoc->c.peer_vtag == new_asoc->c.peer_vtag))
return 'D';
/* Collision case C. */
if ((asoc->c.my_vtag != new_asoc->c.my_vtag) &&
(asoc->c.peer_vtag == new_asoc->c.peer_vtag) &&
(0 == new_asoc->c.my_ttag) &&
(0 == new_asoc->c.peer_ttag))
return 'C';
/* No match to any of the special cases; discard this packet. */
return 'E';
}
/* Common helper routine for both duplicate and simulataneous INIT
* chunk handling.
*/
static sctp_disposition_t sctp_sf_do_unexpected_init(
struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg, sctp_cmd_seq_t *commands)
{
sctp_disposition_t retval;
struct sctp_chunk *chunk = arg;
struct sctp_chunk *repl;
struct sctp_association *new_asoc;
struct sctp_chunk *err_chunk;
struct sctp_packet *packet;
sctp_unrecognized_param_t *unk_param;
int len;
/* 6.10 Bundling
* An endpoint MUST NOT bundle INIT, INIT ACK or
* SHUTDOWN COMPLETE with any other chunks.
*
* IG Section 2.11.2
* Furthermore, we require that the receiver of an INIT chunk MUST
* enforce these rules by silently discarding an arriving packet
* with an INIT chunk that is bundled with other chunks.
*/
if (!chunk->singleton)
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
/* 3.1 A packet containing an INIT chunk MUST have a zero Verification
* Tag.
*/
if (chunk->sctp_hdr->vtag != 0)
return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
/* Make sure that the INIT chunk has a valid length.
* In this case, we generate a protocol violation since we have
* an association established.
*/
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_init_chunk_t)))
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
/* Grab the INIT header. */
chunk->subh.init_hdr = (sctp_inithdr_t *) chunk->skb->data;
/* Tag the variable length parameters. */
chunk->param_hdr.v = skb_pull(chunk->skb, sizeof(sctp_inithdr_t));
/* Verify the INIT chunk before processing it. */
err_chunk = NULL;
if (!sctp_verify_init(net, asoc, chunk->chunk_hdr->type,
(sctp_init_chunk_t *)chunk->chunk_hdr, chunk,
&err_chunk)) {
/* This chunk contains fatal error. It is to be discarded.
* Send an ABORT, with causes if there is any.
*/
if (err_chunk) {
packet = sctp_abort_pkt_new(net, ep, asoc, arg,
(__u8 *)(err_chunk->chunk_hdr) +
sizeof(sctp_chunkhdr_t),
ntohs(err_chunk->chunk_hdr->length) -
sizeof(sctp_chunkhdr_t));
if (packet) {
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
SCTP_PACKET(packet));
SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
retval = SCTP_DISPOSITION_CONSUME;
} else {
retval = SCTP_DISPOSITION_NOMEM;
}
goto cleanup;
} else {
return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg,
commands);
}
}
/*
* Other parameters for the endpoint SHOULD be copied from the
* existing parameters of the association (e.g. number of
* outbound streams) into the INIT ACK and cookie.
* FIXME: We are copying parameters from the endpoint not the
* association.
*/
new_asoc = sctp_make_temp_asoc(ep, chunk, GFP_ATOMIC);
if (!new_asoc)
goto nomem;
if (sctp_assoc_set_bind_addr_from_ep(new_asoc,
sctp_scope(sctp_source(chunk)), GFP_ATOMIC) < 0)
goto nomem;
/* In the outbound INIT ACK the endpoint MUST copy its current
* Verification Tag and Peers Verification tag into a reserved
* place (local tie-tag and per tie-tag) within the state cookie.
*/
if (!sctp_process_init(new_asoc, chunk, sctp_source(chunk),
(sctp_init_chunk_t *)chunk->chunk_hdr,
GFP_ATOMIC))
goto nomem;
/* Make sure no new addresses are being added during the
* restart. Do not do this check for COOKIE-WAIT state,
* since there are no peer addresses to check against.
* Upon return an ABORT will have been sent if needed.
*/
if (!sctp_state(asoc, COOKIE_WAIT)) {
if (!sctp_sf_check_restart_addrs(new_asoc, asoc, chunk,
commands)) {
retval = SCTP_DISPOSITION_CONSUME;
goto nomem_retval;
}
}
sctp_tietags_populate(new_asoc, asoc);
/* B) "Z" shall respond immediately with an INIT ACK chunk. */
/* If there are errors need to be reported for unknown parameters,
* make sure to reserve enough room in the INIT ACK for them.
*/
len = 0;
if (err_chunk) {
len = ntohs(err_chunk->chunk_hdr->length) -
sizeof(sctp_chunkhdr_t);
}
repl = sctp_make_init_ack(new_asoc, chunk, GFP_ATOMIC, len);
if (!repl)
goto nomem;
/* If there are errors need to be reported for unknown parameters,
* include them in the outgoing INIT ACK as "Unrecognized parameter"
* parameter.
*/
if (err_chunk) {
/* Get the "Unrecognized parameter" parameter(s) out of the
* ERROR chunk generated by sctp_verify_init(). Since the
* error cause code for "unknown parameter" and the
* "Unrecognized parameter" type is the same, we can
* construct the parameters in INIT ACK by copying the
* ERROR causes over.
*/
unk_param = (sctp_unrecognized_param_t *)
((__u8 *)(err_chunk->chunk_hdr) +
sizeof(sctp_chunkhdr_t));
/* Replace the cause code with the "Unrecognized parameter"
* parameter type.
*/
sctp_addto_chunk(repl, len, unk_param);
}
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc));
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
/*
* Note: After sending out INIT ACK with the State Cookie parameter,
* "Z" MUST NOT allocate any resources for this new association.
* Otherwise, "Z" will be vulnerable to resource attacks.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
retval = SCTP_DISPOSITION_CONSUME;
return retval;
nomem:
retval = SCTP_DISPOSITION_NOMEM;
nomem_retval:
if (new_asoc)
sctp_association_free(new_asoc);
cleanup:
if (err_chunk)
sctp_chunk_free(err_chunk);
return retval;
}
/*
* Handle simultaneous INIT.
* This means we started an INIT and then we got an INIT request from
* our peer.
*
* Section: 5.2.1 INIT received in COOKIE-WAIT or COOKIE-ECHOED State (Item B)
* This usually indicates an initialization collision, i.e., each
* endpoint is attempting, at about the same time, to establish an
* association with the other endpoint.
*
* Upon receipt of an INIT in the COOKIE-WAIT or COOKIE-ECHOED state, an
* endpoint MUST respond with an INIT ACK using the same parameters it
* sent in its original INIT chunk (including its Verification Tag,
* unchanged). These original parameters are combined with those from the
* newly received INIT chunk. The endpoint shall also generate a State
* Cookie with the INIT ACK. The endpoint uses the parameters sent in its
* INIT to calculate the State Cookie.
*
* After that, the endpoint MUST NOT change its state, the T1-init
* timer shall be left running and the corresponding TCB MUST NOT be
* destroyed. The normal procedures for handling State Cookies when
* a TCB exists will resolve the duplicate INITs to a single association.
*
* For an endpoint that is in the COOKIE-ECHOED state it MUST populate
* its Tie-Tags with the Tag information of itself and its peer (see
* section 5.2.2 for a description of the Tie-Tags).
*
* Verification Tag: Not explicit, but an INIT can not have a valid
* verification tag, so we skip the check.
*
* Inputs
* (endpoint, asoc, chunk)
*
* Outputs
* (asoc, reply_msg, msg_up, timers, counters)
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t sctp_sf_do_5_2_1_siminit(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
/* Call helper to do the real work for both simulataneous and
* duplicate INIT chunk handling.
*/
return sctp_sf_do_unexpected_init(net, ep, asoc, type, arg, commands);
}
/*
* Handle duplicated INIT messages. These are usually delayed
* restransmissions.
*
* Section: 5.2.2 Unexpected INIT in States Other than CLOSED,
* COOKIE-ECHOED and COOKIE-WAIT
*
* Unless otherwise stated, upon reception of an unexpected INIT for
* this association, the endpoint shall generate an INIT ACK with a
* State Cookie. In the outbound INIT ACK the endpoint MUST copy its
* current Verification Tag and peer's Verification Tag into a reserved
* place within the state cookie. We shall refer to these locations as
* the Peer's-Tie-Tag and the Local-Tie-Tag. The outbound SCTP packet
* containing this INIT ACK MUST carry a Verification Tag value equal to
* the Initiation Tag found in the unexpected INIT. And the INIT ACK
* MUST contain a new Initiation Tag (randomly generated see Section
* 5.3.1). Other parameters for the endpoint SHOULD be copied from the
* existing parameters of the association (e.g. number of outbound
* streams) into the INIT ACK and cookie.
*
* After sending out the INIT ACK, the endpoint shall take no further
* actions, i.e., the existing association, including its current state,
* and the corresponding TCB MUST NOT be changed.
*
* Note: Only when a TCB exists and the association is not in a COOKIE-
* WAIT state are the Tie-Tags populated. For a normal association INIT
* (i.e. the endpoint is in a COOKIE-WAIT state), the Tie-Tags MUST be
* set to 0 (indicating that no previous TCB existed). The INIT ACK and
* State Cookie are populated as specified in section 5.2.1.
*
* Verification Tag: Not specified, but an INIT has no way of knowing
* what the verification tag could be, so we ignore it.
*
* Inputs
* (endpoint, asoc, chunk)
*
* Outputs
* (asoc, reply_msg, msg_up, timers, counters)
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t sctp_sf_do_5_2_2_dupinit(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
/* Call helper to do the real work for both simulataneous and
* duplicate INIT chunk handling.
*/
return sctp_sf_do_unexpected_init(net, ep, asoc, type, arg, commands);
}
/*
* Unexpected INIT-ACK handler.
*
* Section 5.2.3
* If an INIT ACK received by an endpoint in any state other than the
* COOKIE-WAIT state, the endpoint should discard the INIT ACK chunk.
* An unexpected INIT ACK usually indicates the processing of an old or
* duplicated INIT chunk.
*/
sctp_disposition_t sctp_sf_do_5_2_3_initack(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg, sctp_cmd_seq_t *commands)
{
/* Per the above section, we'll discard the chunk if we have an
* endpoint. If this is an OOTB INIT-ACK, treat it as such.
*/
if (ep == sctp_sk(net->sctp.ctl_sock)->ep)
return sctp_sf_ootb(net, ep, asoc, type, arg, commands);
else
return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands);
}
/* Unexpected COOKIE-ECHO handler for peer restart (Table 2, action 'A')
*
* Section 5.2.4
* A) In this case, the peer may have restarted.
*/
static sctp_disposition_t sctp_sf_do_dupcook_a(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
struct sctp_chunk *chunk,
sctp_cmd_seq_t *commands,
struct sctp_association *new_asoc)
{
sctp_init_chunk_t *peer_init;
struct sctp_ulpevent *ev;
struct sctp_chunk *repl;
struct sctp_chunk *err;
sctp_disposition_t disposition;
/* new_asoc is a brand-new association, so these are not yet
* side effects--it is safe to run them here.
*/
peer_init = &chunk->subh.cookie_hdr->c.peer_init[0];
if (!sctp_process_init(new_asoc, chunk, sctp_source(chunk), peer_init,
GFP_ATOMIC))
goto nomem;
/* Make sure no new addresses are being added during the
* restart. Though this is a pretty complicated attack
* since you'd have to get inside the cookie.
*/
if (!sctp_sf_check_restart_addrs(new_asoc, asoc, chunk, commands)) {
return SCTP_DISPOSITION_CONSUME;
}
/* If the endpoint is in the SHUTDOWN-ACK-SENT state and recognizes
* the peer has restarted (Action A), it MUST NOT setup a new
* association but instead resend the SHUTDOWN ACK and send an ERROR
* chunk with a "Cookie Received while Shutting Down" error cause to
* its peer.
*/
if (sctp_state(asoc, SHUTDOWN_ACK_SENT)) {
disposition = sctp_sf_do_9_2_reshutack(net, ep, asoc,
SCTP_ST_CHUNK(chunk->chunk_hdr->type),
chunk, commands);
if (SCTP_DISPOSITION_NOMEM == disposition)
goto nomem;
err = sctp_make_op_error(asoc, chunk,
SCTP_ERROR_COOKIE_IN_SHUTDOWN,
NULL, 0, 0);
if (err)
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
SCTP_CHUNK(err));
return SCTP_DISPOSITION_CONSUME;
}
/* For now, stop pending T3-rtx and SACK timers, fail any unsent/unacked
* data. Consider the optional choice of resending of this data.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_T3_RTX_TIMERS_STOP, SCTP_NULL());
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_SACK));
sctp_add_cmd_sf(commands, SCTP_CMD_PURGE_OUTQUEUE, SCTP_NULL());
/* Stop pending T4-rto timer, teardown ASCONF queue, ASCONF-ACK queue
* and ASCONF-ACK cache.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
sctp_add_cmd_sf(commands, SCTP_CMD_PURGE_ASCONF_QUEUE, SCTP_NULL());
repl = sctp_make_cookie_ack(new_asoc, chunk);
if (!repl)
goto nomem;
/* Report association restart to upper layer. */
ev = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_RESTART, 0,
new_asoc->c.sinit_num_ostreams,
new_asoc->c.sinit_max_instreams,
NULL, GFP_ATOMIC);
if (!ev)
goto nomem_ev;
/* Update the content of current association. */
sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc));
sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev));
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_ESTABLISHED));
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
return SCTP_DISPOSITION_CONSUME;
nomem_ev:
sctp_chunk_free(repl);
nomem:
return SCTP_DISPOSITION_NOMEM;
}
/* Unexpected COOKIE-ECHO handler for setup collision (Table 2, action 'B')
*
* Section 5.2.4
* B) In this case, both sides may be attempting to start an association
* at about the same time but the peer endpoint started its INIT
* after responding to the local endpoint's INIT
*/
/* This case represents an initialization collision. */
static sctp_disposition_t sctp_sf_do_dupcook_b(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
struct sctp_chunk *chunk,
sctp_cmd_seq_t *commands,
struct sctp_association *new_asoc)
{
sctp_init_chunk_t *peer_init;
struct sctp_chunk *repl;
/* new_asoc is a brand-new association, so these are not yet
* side effects--it is safe to run them here.
*/
peer_init = &chunk->subh.cookie_hdr->c.peer_init[0];
if (!sctp_process_init(new_asoc, chunk, sctp_source(chunk), peer_init,
GFP_ATOMIC))
goto nomem;
/* Update the content of current association. */
sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc));
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_ESTABLISHED));
SCTP_INC_STATS(net, SCTP_MIB_CURRESTAB);
sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL());
repl = sctp_make_cookie_ack(new_asoc, chunk);
if (!repl)
goto nomem;
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
/* RFC 2960 5.1 Normal Establishment of an Association
*
* D) IMPLEMENTATION NOTE: An implementation may choose to
* send the Communication Up notification to the SCTP user
* upon reception of a valid COOKIE ECHO chunk.
*
* Sadly, this needs to be implemented as a side-effect, because
* we are not guaranteed to have set the association id of the real
* association and so these notifications need to be delayed until
* the association id is allocated.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_CHANGE, SCTP_U8(SCTP_COMM_UP));
/* Sockets API Draft Section 5.3.1.6
* When a peer sends a Adaptation Layer Indication parameter , SCTP
* delivers this notification to inform the application that of the
* peers requested adaptation layer.
*
* This also needs to be done as a side effect for the same reason as
* above.
*/
if (asoc->peer.adaptation_ind)
sctp_add_cmd_sf(commands, SCTP_CMD_ADAPTATION_IND, SCTP_NULL());
return SCTP_DISPOSITION_CONSUME;
nomem:
return SCTP_DISPOSITION_NOMEM;
}
/* Unexpected COOKIE-ECHO handler for setup collision (Table 2, action 'C')
*
* Section 5.2.4
* C) In this case, the local endpoint's cookie has arrived late.
* Before it arrived, the local endpoint sent an INIT and received an
* INIT-ACK and finally sent a COOKIE ECHO with the peer's same tag
* but a new tag of its own.
*/
/* This case represents an initialization collision. */
static sctp_disposition_t sctp_sf_do_dupcook_c(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
struct sctp_chunk *chunk,
sctp_cmd_seq_t *commands,
struct sctp_association *new_asoc)
{
/* The cookie should be silently discarded.
* The endpoint SHOULD NOT change states and should leave
* any timers running.
*/
return SCTP_DISPOSITION_DISCARD;
}
/* Unexpected COOKIE-ECHO handler lost chunk (Table 2, action 'D')
*
* Section 5.2.4
*
* D) When both local and remote tags match the endpoint should always
* enter the ESTABLISHED state, if it has not already done so.
*/
/* This case represents an initialization collision. */
static sctp_disposition_t sctp_sf_do_dupcook_d(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
struct sctp_chunk *chunk,
sctp_cmd_seq_t *commands,
struct sctp_association *new_asoc)
{
struct sctp_ulpevent *ev = NULL, *ai_ev = NULL;
struct sctp_chunk *repl;
/* Clarification from Implementor's Guide:
* D) When both local and remote tags match the endpoint should
* enter the ESTABLISHED state, if it is in the COOKIE-ECHOED state.
* It should stop any cookie timer that may be running and send
* a COOKIE ACK.
*/
/* Don't accidentally move back into established state. */
if (asoc->state < SCTP_STATE_ESTABLISHED) {
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE));
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_ESTABLISHED));
SCTP_INC_STATS(net, SCTP_MIB_CURRESTAB);
sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START,
SCTP_NULL());
/* RFC 2960 5.1 Normal Establishment of an Association
*
* D) IMPLEMENTATION NOTE: An implementation may choose
* to send the Communication Up notification to the
* SCTP user upon reception of a valid COOKIE
* ECHO chunk.
*/
ev = sctp_ulpevent_make_assoc_change(asoc, 0,
SCTP_COMM_UP, 0,
asoc->c.sinit_num_ostreams,
asoc->c.sinit_max_instreams,
NULL, GFP_ATOMIC);
if (!ev)
goto nomem;
/* Sockets API Draft Section 5.3.1.6
* When a peer sends a Adaptation Layer Indication parameter,
* SCTP delivers this notification to inform the application
* that of the peers requested adaptation layer.
*/
if (asoc->peer.adaptation_ind) {
ai_ev = sctp_ulpevent_make_adaptation_indication(asoc,
GFP_ATOMIC);
if (!ai_ev)
goto nomem;
}
}
repl = sctp_make_cookie_ack(new_asoc, chunk);
if (!repl)
goto nomem;
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
if (ev)
sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
SCTP_ULPEVENT(ev));
if (ai_ev)
sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
SCTP_ULPEVENT(ai_ev));
return SCTP_DISPOSITION_CONSUME;
nomem:
if (ai_ev)
sctp_ulpevent_free(ai_ev);
if (ev)
sctp_ulpevent_free(ev);
return SCTP_DISPOSITION_NOMEM;
}
/*
* Handle a duplicate COOKIE-ECHO. This usually means a cookie-carrying
* chunk was retransmitted and then delayed in the network.
*
* Section: 5.2.4 Handle a COOKIE ECHO when a TCB exists
*
* Verification Tag: None. Do cookie validation.
*
* Inputs
* (endpoint, asoc, chunk)
*
* Outputs
* (asoc, reply_msg, msg_up, timers, counters)
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t sctp_sf_do_5_2_4_dupcook(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
sctp_disposition_t retval;
struct sctp_chunk *chunk = arg;
struct sctp_association *new_asoc;
int error = 0;
char action;
struct sctp_chunk *err_chk_p;
/* Make sure that the chunk has a valid length from the protocol
* perspective. In this case check to make sure we have at least
* enough for the chunk header. Cookie length verification is
* done later.
*/
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
/* "Decode" the chunk. We have no optional parameters so we
* are in good shape.
*/
chunk->subh.cookie_hdr = (struct sctp_signed_cookie *)chunk->skb->data;
if (!pskb_pull(chunk->skb, ntohs(chunk->chunk_hdr->length) -
sizeof(sctp_chunkhdr_t)))
goto nomem;
/* In RFC 2960 5.2.4 3, if both Verification Tags in the State Cookie
* of a duplicate COOKIE ECHO match the Verification Tags of the
* current association, consider the State Cookie valid even if
* the lifespan is exceeded.
*/
new_asoc = sctp_unpack_cookie(ep, asoc, chunk, GFP_ATOMIC, &error,
&err_chk_p);
/* FIXME:
* If the re-build failed, what is the proper error path
* from here?
*
* [We should abort the association. --piggy]
*/
if (!new_asoc) {
/* FIXME: Several errors are possible. A bad cookie should
* be silently discarded, but think about logging it too.
*/
switch (error) {
case -SCTP_IERROR_NOMEM:
goto nomem;
case -SCTP_IERROR_STALE_COOKIE:
sctp_send_stale_cookie_err(net, ep, asoc, chunk, commands,
err_chk_p);
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
case -SCTP_IERROR_BAD_SIG:
default:
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
}
}
/* Compare the tie_tag in cookie with the verification tag of
* current association.
*/
action = sctp_tietags_compare(new_asoc, asoc);
switch (action) {
case 'A': /* Association restart. */
retval = sctp_sf_do_dupcook_a(net, ep, asoc, chunk, commands,
new_asoc);
break;
case 'B': /* Collision case B. */
retval = sctp_sf_do_dupcook_b(net, ep, asoc, chunk, commands,
new_asoc);
break;
case 'C': /* Collision case C. */
retval = sctp_sf_do_dupcook_c(net, ep, asoc, chunk, commands,
new_asoc);
break;
case 'D': /* Collision case D. */
retval = sctp_sf_do_dupcook_d(net, ep, asoc, chunk, commands,
new_asoc);
break;
default: /* Discard packet for all others. */
retval = sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
break;
}
/* Delete the tempory new association. */
sctp_add_cmd_sf(commands, SCTP_CMD_SET_ASOC, SCTP_ASOC(new_asoc));
sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
/* Restore association pointer to provide SCTP command interpeter
* with a valid context in case it needs to manipulate
* the queues */
sctp_add_cmd_sf(commands, SCTP_CMD_SET_ASOC,
SCTP_ASOC((struct sctp_association *)asoc));
return retval;
nomem:
return SCTP_DISPOSITION_NOMEM;
}
/*
* Process an ABORT. (SHUTDOWN-PENDING state)
*
* See sctp_sf_do_9_1_abort().
*/
sctp_disposition_t sctp_sf_shutdown_pending_abort(
struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
if (!sctp_vtag_verify_either(chunk, asoc))
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
/* Make sure that the ABORT chunk has a valid length.
* Since this is an ABORT chunk, we have to discard it
* because of the following text:
* RFC 2960, Section 3.3.7
* If an endpoint receives an ABORT with a format error or for an
* association that doesn't exist, it MUST silently discard it.
* Because the length is "invalid", we can't really discard just
* as we do not know its true length. So, to be safe, discard the
* packet.
*/
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_abort_chunk_t)))
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
/* ADD-IP: Special case for ABORT chunks
* F4) One special consideration is that ABORT Chunks arriving
* destined to the IP address being deleted MUST be
* ignored (see Section 5.3.1 for further details).
*/
if (SCTP_ADDR_DEL ==
sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest))
return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands);
return __sctp_sf_do_9_1_abort(net, ep, asoc, type, arg, commands);
}
/*
* Process an ABORT. (SHUTDOWN-SENT state)
*
* See sctp_sf_do_9_1_abort().
*/
sctp_disposition_t sctp_sf_shutdown_sent_abort(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
if (!sctp_vtag_verify_either(chunk, asoc))
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
/* Make sure that the ABORT chunk has a valid length.
* Since this is an ABORT chunk, we have to discard it
* because of the following text:
* RFC 2960, Section 3.3.7
* If an endpoint receives an ABORT with a format error or for an
* association that doesn't exist, it MUST silently discard it.
* Because the length is "invalid", we can't really discard just
* as we do not know its true length. So, to be safe, discard the
* packet.
*/
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_abort_chunk_t)))
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
/* ADD-IP: Special case for ABORT chunks
* F4) One special consideration is that ABORT Chunks arriving
* destined to the IP address being deleted MUST be
* ignored (see Section 5.3.1 for further details).
*/
if (SCTP_ADDR_DEL ==
sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest))
return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands);
/* Stop the T2-shutdown timer. */
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN));
/* Stop the T5-shutdown guard timer. */
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD));
return __sctp_sf_do_9_1_abort(net, ep, asoc, type, arg, commands);
}
/*
* Process an ABORT. (SHUTDOWN-ACK-SENT state)
*
* See sctp_sf_do_9_1_abort().
*/
sctp_disposition_t sctp_sf_shutdown_ack_sent_abort(
struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
/* The same T2 timer, so we should be able to use
* common function with the SHUTDOWN-SENT state.
*/
return sctp_sf_shutdown_sent_abort(net, ep, asoc, type, arg, commands);
}
/*
* Handle an Error received in COOKIE_ECHOED state.
*
* Only handle the error type of stale COOKIE Error, the other errors will
* be ignored.
*
* Inputs
* (endpoint, asoc, chunk)
*
* Outputs
* (asoc, reply_msg, msg_up, timers, counters)
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t sctp_sf_cookie_echoed_err(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
sctp_errhdr_t *err;
if (!sctp_vtag_verify(chunk, asoc))
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
/* Make sure that the ERROR chunk has a valid length.
* The parameter walking depends on this as well.
*/
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_operr_chunk_t)))
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
/* Process the error here */
/* FUTURE FIXME: When PR-SCTP related and other optional
* parms are emitted, this will have to change to handle multiple
* errors.
*/
sctp_walk_errors(err, chunk->chunk_hdr) {
if (SCTP_ERROR_STALE_COOKIE == err->cause)
return sctp_sf_do_5_2_6_stale(net, ep, asoc, type,
arg, commands);
}
/* It is possible to have malformed error causes, and that
* will cause us to end the walk early. However, since
* we are discarding the packet, there should be no adverse
* affects.
*/
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
}
/*
* Handle a Stale COOKIE Error
*
* Section: 5.2.6 Handle Stale COOKIE Error
* If the association is in the COOKIE-ECHOED state, the endpoint may elect
* one of the following three alternatives.
* ...
* 3) Send a new INIT chunk to the endpoint, adding a Cookie
* Preservative parameter requesting an extension to the lifetime of
* the State Cookie. When calculating the time extension, an
* implementation SHOULD use the RTT information measured based on the
* previous COOKIE ECHO / ERROR exchange, and should add no more
* than 1 second beyond the measured RTT, due to long State Cookie
* lifetimes making the endpoint more subject to a replay attack.
*
* Verification Tag: Not explicit, but safe to ignore.
*
* Inputs
* (endpoint, asoc, chunk)
*
* Outputs
* (asoc, reply_msg, msg_up, timers, counters)
*
* The return value is the disposition of the chunk.
*/
static sctp_disposition_t sctp_sf_do_5_2_6_stale(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
time_t stale;
sctp_cookie_preserve_param_t bht;
sctp_errhdr_t *err;
struct sctp_chunk *reply;
struct sctp_bind_addr *bp;
int attempts = asoc->init_err_counter + 1;
if (attempts > asoc->max_init_attempts) {
sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
SCTP_ERROR(ETIMEDOUT));
sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
SCTP_PERR(SCTP_ERROR_STALE_COOKIE));
return SCTP_DISPOSITION_DELETE_TCB;
}
err = (sctp_errhdr_t *)(chunk->skb->data);
/* When calculating the time extension, an implementation
* SHOULD use the RTT information measured based on the
* previous COOKIE ECHO / ERROR exchange, and should add no
* more than 1 second beyond the measured RTT, due to long
* State Cookie lifetimes making the endpoint more subject to
* a replay attack.
* Measure of Staleness's unit is usec. (1/1000000 sec)
* Suggested Cookie Life-span Increment's unit is msec.
* (1/1000 sec)
* In general, if you use the suggested cookie life, the value
* found in the field of measure of staleness should be doubled
* to give ample time to retransmit the new cookie and thus
* yield a higher probability of success on the reattempt.
*/
stale = ntohl(*(__be32 *)((u8 *)err + sizeof(sctp_errhdr_t)));
stale = (stale * 2) / 1000;
bht.param_hdr.type = SCTP_PARAM_COOKIE_PRESERVATIVE;
bht.param_hdr.length = htons(sizeof(bht));
bht.lifespan_increment = htonl(stale);
/* Build that new INIT chunk. */
bp = (struct sctp_bind_addr *) &asoc->base.bind_addr;
reply = sctp_make_init(asoc, bp, GFP_ATOMIC, sizeof(bht));
if (!reply)
goto nomem;
sctp_addto_chunk(reply, sizeof(bht), &bht);
/* Clear peer's init_tag cached in assoc as we are sending a new INIT */
sctp_add_cmd_sf(commands, SCTP_CMD_CLEAR_INIT_TAG, SCTP_NULL());
/* Stop pending T3-rtx and heartbeat timers */
sctp_add_cmd_sf(commands, SCTP_CMD_T3_RTX_TIMERS_STOP, SCTP_NULL());
sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_STOP, SCTP_NULL());
/* Delete non-primary peer ip addresses since we are transitioning
* back to the COOKIE-WAIT state
*/
sctp_add_cmd_sf(commands, SCTP_CMD_DEL_NON_PRIMARY, SCTP_NULL());
/* If we've sent any data bundled with COOKIE-ECHO we will need to
* resend
*/
sctp_add_cmd_sf(commands, SCTP_CMD_T1_RETRAN,
SCTP_TRANSPORT(asoc->peer.primary_path));
/* Cast away the const modifier, as we want to just
* rerun it through as a sideffect.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_INIT_COUNTER_INC, SCTP_NULL());
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE));
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_COOKIE_WAIT));
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START,
SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
return SCTP_DISPOSITION_CONSUME;
nomem:
return SCTP_DISPOSITION_NOMEM;
}
/*
* Process an ABORT.
*
* Section: 9.1
* After checking the Verification Tag, the receiving endpoint shall
* remove the association from its record, and shall report the
* termination to its upper layer.
*
* Verification Tag: 8.5.1 Exceptions in Verification Tag Rules
* B) Rules for packet carrying ABORT:
*
* - The endpoint shall always fill in the Verification Tag field of the
* outbound packet with the destination endpoint's tag value if it
* is known.
*
* - If the ABORT is sent in response to an OOTB packet, the endpoint
* MUST follow the procedure described in Section 8.4.
*
* - The receiver MUST accept the packet if the Verification Tag
* matches either its own tag, OR the tag of its peer. Otherwise, the
* receiver MUST silently discard the packet and take no further
* action.
*
* Inputs
* (endpoint, asoc, chunk)
*
* Outputs
* (asoc, reply_msg, msg_up, timers, counters)
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t sctp_sf_do_9_1_abort(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
if (!sctp_vtag_verify_either(chunk, asoc))
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
/* Make sure that the ABORT chunk has a valid length.
* Since this is an ABORT chunk, we have to discard it
* because of the following text:
* RFC 2960, Section 3.3.7
* If an endpoint receives an ABORT with a format error or for an
* association that doesn't exist, it MUST silently discard it.
* Because the length is "invalid", we can't really discard just
* as we do not know its true length. So, to be safe, discard the
* packet.
*/
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_abort_chunk_t)))
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
/* ADD-IP: Special case for ABORT chunks
* F4) One special consideration is that ABORT Chunks arriving
* destined to the IP address being deleted MUST be
* ignored (see Section 5.3.1 for further details).
*/
if (SCTP_ADDR_DEL ==
sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest))
return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands);
return __sctp_sf_do_9_1_abort(net, ep, asoc, type, arg, commands);
}
static sctp_disposition_t __sctp_sf_do_9_1_abort(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
unsigned int len;
__be16 error = SCTP_ERROR_NO_ERROR;
/* See if we have an error cause code in the chunk. */
len = ntohs(chunk->chunk_hdr->length);
if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_errhdr)) {
sctp_errhdr_t *err;
sctp_walk_errors(err, chunk->chunk_hdr);
if ((void *)err != (void *)chunk->chunk_end)
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
error = ((sctp_errhdr_t *)chunk->skb->data)->cause;
}
sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ECONNRESET));
/* ASSOC_FAILED will DELETE_TCB. */
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_PERR(error));
SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
return SCTP_DISPOSITION_ABORT;
}
/*
* Process an ABORT. (COOKIE-WAIT state)
*
* See sctp_sf_do_9_1_abort() above.
*/
sctp_disposition_t sctp_sf_cookie_wait_abort(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
unsigned int len;
__be16 error = SCTP_ERROR_NO_ERROR;
if (!sctp_vtag_verify_either(chunk, asoc))
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
/* Make sure that the ABORT chunk has a valid length.
* Since this is an ABORT chunk, we have to discard it
* because of the following text:
* RFC 2960, Section 3.3.7
* If an endpoint receives an ABORT with a format error or for an
* association that doesn't exist, it MUST silently discard it.
* Because the length is "invalid", we can't really discard just
* as we do not know its true length. So, to be safe, discard the
* packet.
*/
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_abort_chunk_t)))
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
/* See if we have an error cause code in the chunk. */
len = ntohs(chunk->chunk_hdr->length);
if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_errhdr))
error = ((sctp_errhdr_t *)chunk->skb->data)->cause;
return sctp_stop_t1_and_abort(net, commands, error, ECONNREFUSED, asoc,
chunk->transport);
}
/*
* Process an incoming ICMP as an ABORT. (COOKIE-WAIT state)
*/
sctp_disposition_t sctp_sf_cookie_wait_icmp_abort(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
return sctp_stop_t1_and_abort(net, commands, SCTP_ERROR_NO_ERROR,
ENOPROTOOPT, asoc,
(struct sctp_transport *)arg);
}
/*
* Process an ABORT. (COOKIE-ECHOED state)
*/
sctp_disposition_t sctp_sf_cookie_echoed_abort(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
/* There is a single T1 timer, so we should be able to use
* common function with the COOKIE-WAIT state.
*/
return sctp_sf_cookie_wait_abort(net, ep, asoc, type, arg, commands);
}
/*
* Stop T1 timer and abort association with "INIT failed".
*
* This is common code called by several sctp_sf_*_abort() functions above.
*/
static sctp_disposition_t sctp_stop_t1_and_abort(struct net *net,
sctp_cmd_seq_t *commands,
__be16 error, int sk_err,
const struct sctp_association *asoc,
struct sctp_transport *transport)
{
pr_debug("%s: ABORT received (INIT)\n", __func__);
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_CLOSED));
SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(sk_err));
/* CMD_INIT_FAILED will DELETE_TCB. */
sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
SCTP_PERR(error));
return SCTP_DISPOSITION_ABORT;
}
/*
* sctp_sf_do_9_2_shut
*
* Section: 9.2
* Upon the reception of the SHUTDOWN, the peer endpoint shall
* - enter the SHUTDOWN-RECEIVED state,
*
* - stop accepting new data from its SCTP user
*
* - verify, by checking the Cumulative TSN Ack field of the chunk,
* that all its outstanding DATA chunks have been received by the
* SHUTDOWN sender.
*
* Once an endpoint as reached the SHUTDOWN-RECEIVED state it MUST NOT
* send a SHUTDOWN in response to a ULP request. And should discard
* subsequent SHUTDOWN chunks.
*
* If there are still outstanding DATA chunks left, the SHUTDOWN
* receiver shall continue to follow normal data transmission
* procedures defined in Section 6 until all outstanding DATA chunks
* are acknowledged; however, the SHUTDOWN receiver MUST NOT accept
* new data from its SCTP user.
*
* Verification Tag: 8.5 Verification Tag [Normal verification]
*
* Inputs
* (endpoint, asoc, chunk)
*
* Outputs
* (asoc, reply_msg, msg_up, timers, counters)
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t sctp_sf_do_9_2_shutdown(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
sctp_shutdownhdr_t *sdh;
sctp_disposition_t disposition;
struct sctp_ulpevent *ev;
__u32 ctsn;
if (!sctp_vtag_verify(chunk, asoc))
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
/* Make sure that the SHUTDOWN chunk has a valid length. */
if (!sctp_chunk_length_valid(chunk,
sizeof(struct sctp_shutdown_chunk_t)))
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
/* Convert the elaborate header. */
sdh = (sctp_shutdownhdr_t *)chunk->skb->data;
skb_pull(chunk->skb, sizeof(sctp_shutdownhdr_t));
chunk->subh.shutdown_hdr = sdh;
ctsn = ntohl(sdh->cum_tsn_ack);
if (TSN_lt(ctsn, asoc->ctsn_ack_point)) {
pr_debug("%s: ctsn:%x, ctsn_ack_point:%x\n", __func__, ctsn,
asoc->ctsn_ack_point);
return SCTP_DISPOSITION_DISCARD;
}
/* If Cumulative TSN Ack beyond the max tsn currently
* send, terminating the association and respond to the
* sender with an ABORT.
*/
if (!TSN_lt(ctsn, asoc->next_tsn))
return sctp_sf_violation_ctsn(net, ep, asoc, type, arg, commands);
/* API 5.3.1.5 SCTP_SHUTDOWN_EVENT
* When a peer sends a SHUTDOWN, SCTP delivers this notification to
* inform the application that it should cease sending data.
*/
ev = sctp_ulpevent_make_shutdown_event(asoc, 0, GFP_ATOMIC);
if (!ev) {
disposition = SCTP_DISPOSITION_NOMEM;
goto out;
}
sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev));
/* Upon the reception of the SHUTDOWN, the peer endpoint shall
* - enter the SHUTDOWN-RECEIVED state,
* - stop accepting new data from its SCTP user
*
* [This is implicit in the new state.]
*/
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_SHUTDOWN_RECEIVED));
disposition = SCTP_DISPOSITION_CONSUME;
if (sctp_outq_is_empty(&asoc->outqueue)) {
disposition = sctp_sf_do_9_2_shutdown_ack(net, ep, asoc, type,
arg, commands);
}
if (SCTP_DISPOSITION_NOMEM == disposition)
goto out;
/* - verify, by checking the Cumulative TSN Ack field of the
* chunk, that all its outstanding DATA chunks have been
* received by the SHUTDOWN sender.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_CTSN,
SCTP_BE32(chunk->subh.shutdown_hdr->cum_tsn_ack));
out:
return disposition;
}
/*
* sctp_sf_do_9_2_shut_ctsn
*
* Once an endpoint has reached the SHUTDOWN-RECEIVED state,
* it MUST NOT send a SHUTDOWN in response to a ULP request.
* The Cumulative TSN Ack of the received SHUTDOWN chunk
* MUST be processed.
*/
sctp_disposition_t sctp_sf_do_9_2_shut_ctsn(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
sctp_shutdownhdr_t *sdh;
__u32 ctsn;
if (!sctp_vtag_verify(chunk, asoc))
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
/* Make sure that the SHUTDOWN chunk has a valid length. */
if (!sctp_chunk_length_valid(chunk,
sizeof(struct sctp_shutdown_chunk_t)))
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
sdh = (sctp_shutdownhdr_t *)chunk->skb->data;
ctsn = ntohl(sdh->cum_tsn_ack);
if (TSN_lt(ctsn, asoc->ctsn_ack_point)) {
pr_debug("%s: ctsn:%x, ctsn_ack_point:%x\n", __func__, ctsn,
asoc->ctsn_ack_point);
return SCTP_DISPOSITION_DISCARD;
}
/* If Cumulative TSN Ack beyond the max tsn currently
* send, terminating the association and respond to the
* sender with an ABORT.
*/
if (!TSN_lt(ctsn, asoc->next_tsn))
return sctp_sf_violation_ctsn(net, ep, asoc, type, arg, commands);
/* verify, by checking the Cumulative TSN Ack field of the
* chunk, that all its outstanding DATA chunks have been
* received by the SHUTDOWN sender.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_CTSN,
SCTP_BE32(sdh->cum_tsn_ack));
return SCTP_DISPOSITION_CONSUME;
}
/* RFC 2960 9.2
* If an endpoint is in SHUTDOWN-ACK-SENT state and receives an INIT chunk
* (e.g., if the SHUTDOWN COMPLETE was lost) with source and destination
* transport addresses (either in the IP addresses or in the INIT chunk)
* that belong to this association, it should discard the INIT chunk and
* retransmit the SHUTDOWN ACK chunk.
*/
sctp_disposition_t sctp_sf_do_9_2_reshutack(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = (struct sctp_chunk *) arg;
struct sctp_chunk *reply;
/* Make sure that the chunk has a valid length */
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
/* Since we are not going to really process this INIT, there
* is no point in verifying chunk boundries. Just generate
* the SHUTDOWN ACK.
*/
reply = sctp_make_shutdown_ack(asoc, chunk);
if (NULL == reply)
goto nomem;
/* Set the transport for the SHUTDOWN ACK chunk and the timeout for
* the T2-SHUTDOWN timer.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_SETUP_T2, SCTP_CHUNK(reply));
/* and restart the T2-shutdown timer. */
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN));
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
return SCTP_DISPOSITION_CONSUME;
nomem:
return SCTP_DISPOSITION_NOMEM;
}
/*
* sctp_sf_do_ecn_cwr
*
* Section: Appendix A: Explicit Congestion Notification
*
* CWR:
*
* RFC 2481 details a specific bit for a sender to send in the header of
* its next outbound TCP segment to indicate to its peer that it has
* reduced its congestion window. This is termed the CWR bit. For
* SCTP the same indication is made by including the CWR chunk.
* This chunk contains one data element, i.e. the TSN number that
* was sent in the ECNE chunk. This element represents the lowest
* TSN number in the datagram that was originally marked with the
* CE bit.
*
* Verification Tag: 8.5 Verification Tag [Normal verification]
* Inputs
* (endpoint, asoc, chunk)
*
* Outputs
* (asoc, reply_msg, msg_up, timers, counters)
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t sctp_sf_do_ecn_cwr(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
sctp_cwrhdr_t *cwr;
struct sctp_chunk *chunk = arg;
u32 lowest_tsn;
if (!sctp_vtag_verify(chunk, asoc))
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_ecne_chunk_t)))
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
cwr = (sctp_cwrhdr_t *) chunk->skb->data;
skb_pull(chunk->skb, sizeof(sctp_cwrhdr_t));
lowest_tsn = ntohl(cwr->lowest_tsn);
/* Does this CWR ack the last sent congestion notification? */
if (TSN_lte(asoc->last_ecne_tsn, lowest_tsn)) {
/* Stop sending ECNE. */
sctp_add_cmd_sf(commands,
SCTP_CMD_ECN_CWR,
SCTP_U32(lowest_tsn));
}
return SCTP_DISPOSITION_CONSUME;
}
/*
* sctp_sf_do_ecne
*
* Section: Appendix A: Explicit Congestion Notification
*
* ECN-Echo
*
* RFC 2481 details a specific bit for a receiver to send back in its
* TCP acknowledgements to notify the sender of the Congestion
* Experienced (CE) bit having arrived from the network. For SCTP this
* same indication is made by including the ECNE chunk. This chunk
* contains one data element, i.e. the lowest TSN associated with the IP
* datagram marked with the CE bit.....
*
* Verification Tag: 8.5 Verification Tag [Normal verification]
* Inputs
* (endpoint, asoc, chunk)
*
* Outputs
* (asoc, reply_msg, msg_up, timers, counters)
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t sctp_sf_do_ecne(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
sctp_ecnehdr_t *ecne;
struct sctp_chunk *chunk = arg;
if (!sctp_vtag_verify(chunk, asoc))
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_ecne_chunk_t)))
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
ecne = (sctp_ecnehdr_t *) chunk->skb->data;
skb_pull(chunk->skb, sizeof(sctp_ecnehdr_t));
/* If this is a newer ECNE than the last CWR packet we sent out */
sctp_add_cmd_sf(commands, SCTP_CMD_ECN_ECNE,
SCTP_U32(ntohl(ecne->lowest_tsn)));
return SCTP_DISPOSITION_CONSUME;
}
/*
* Section: 6.2 Acknowledgement on Reception of DATA Chunks
*
* The SCTP endpoint MUST always acknowledge the reception of each valid
* DATA chunk.
*
* The guidelines on delayed acknowledgement algorithm specified in
* Section 4.2 of [RFC2581] SHOULD be followed. Specifically, an
* acknowledgement SHOULD be generated for at least every second packet
* (not every second DATA chunk) received, and SHOULD be generated within
* 200 ms of the arrival of any unacknowledged DATA chunk. In some
* situations it may be beneficial for an SCTP transmitter to be more
* conservative than the algorithms detailed in this document allow.
* However, an SCTP transmitter MUST NOT be more aggressive than the
* following algorithms allow.
*
* A SCTP receiver MUST NOT generate more than one SACK for every
* incoming packet, other than to update the offered window as the
* receiving application consumes new data.
*
* Verification Tag: 8.5 Verification Tag [Normal verification]
*
* Inputs
* (endpoint, asoc, chunk)
*
* Outputs
* (asoc, reply_msg, msg_up, timers, counters)
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t sctp_sf_eat_data_6_2(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
sctp_arg_t force = SCTP_NOFORCE();
int error;
if (!sctp_vtag_verify(chunk, asoc)) {
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
SCTP_NULL());
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
}
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_data_chunk_t)))
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
error = sctp_eat_data(asoc, chunk, commands);
switch (error) {
case SCTP_IERROR_NO_ERROR:
break;
case SCTP_IERROR_HIGH_TSN:
case SCTP_IERROR_BAD_STREAM:
SCTP_INC_STATS(net, SCTP_MIB_IN_DATA_CHUNK_DISCARDS);
goto discard_noforce;
case SCTP_IERROR_DUP_TSN:
case SCTP_IERROR_IGNORE_TSN:
SCTP_INC_STATS(net, SCTP_MIB_IN_DATA_CHUNK_DISCARDS);
goto discard_force;
case SCTP_IERROR_NO_DATA:
goto consume;
case SCTP_IERROR_PROTO_VIOLATION:
return sctp_sf_abort_violation(net, ep, asoc, chunk, commands,
(u8 *)chunk->subh.data_hdr, sizeof(sctp_datahdr_t));
default:
BUG();
}
if (chunk->chunk_hdr->flags & SCTP_DATA_SACK_IMM)
force = SCTP_FORCE();
if (asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]) {
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE));
}
/* If this is the last chunk in a packet, we need to count it
* toward sack generation. Note that we need to SACK every
* OTHER packet containing data chunks, EVEN IF WE DISCARD
* THEM. We elect to NOT generate SACK's if the chunk fails
* the verification tag test.
*
* RFC 2960 6.2 Acknowledgement on Reception of DATA Chunks
*
* The SCTP endpoint MUST always acknowledge the reception of
* each valid DATA chunk.
*
* The guidelines on delayed acknowledgement algorithm
* specified in Section 4.2 of [RFC2581] SHOULD be followed.
* Specifically, an acknowledgement SHOULD be generated for at
* least every second packet (not every second DATA chunk)
* received, and SHOULD be generated within 200 ms of the
* arrival of any unacknowledged DATA chunk. In some
* situations it may be beneficial for an SCTP transmitter to
* be more conservative than the algorithms detailed in this
* document allow. However, an SCTP transmitter MUST NOT be
* more aggressive than the following algorithms allow.
*/
if (chunk->end_of_packet)
sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, force);
return SCTP_DISPOSITION_CONSUME;
discard_force:
/* RFC 2960 6.2 Acknowledgement on Reception of DATA Chunks
*
* When a packet arrives with duplicate DATA chunk(s) and with
* no new DATA chunk(s), the endpoint MUST immediately send a
* SACK with no delay. If a packet arrives with duplicate
* DATA chunk(s) bundled with new DATA chunks, the endpoint
* MAY immediately send a SACK. Normally receipt of duplicate
* DATA chunks will occur when the original SACK chunk was lost
* and the peer's RTO has expired. The duplicate TSN number(s)
* SHOULD be reported in the SACK as duplicate.
*/
/* In our case, we split the MAY SACK advice up whether or not
* the last chunk is a duplicate.'
*/
if (chunk->end_of_packet)
sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, SCTP_FORCE());
return SCTP_DISPOSITION_DISCARD;
discard_noforce:
if (chunk->end_of_packet)
sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, force);
return SCTP_DISPOSITION_DISCARD;
consume:
return SCTP_DISPOSITION_CONSUME;
}
/*
* sctp_sf_eat_data_fast_4_4
*
* Section: 4 (4)
* (4) In SHUTDOWN-SENT state the endpoint MUST acknowledge any received
* DATA chunks without delay.
*
* Verification Tag: 8.5 Verification Tag [Normal verification]
* Inputs
* (endpoint, asoc, chunk)
*
* Outputs
* (asoc, reply_msg, msg_up, timers, counters)
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t sctp_sf_eat_data_fast_4_4(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
int error;
if (!sctp_vtag_verify(chunk, asoc)) {
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
SCTP_NULL());
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
}
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_data_chunk_t)))
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
error = sctp_eat_data(asoc, chunk, commands);
switch (error) {
case SCTP_IERROR_NO_ERROR:
case SCTP_IERROR_HIGH_TSN:
case SCTP_IERROR_DUP_TSN:
case SCTP_IERROR_IGNORE_TSN:
case SCTP_IERROR_BAD_STREAM:
break;
case SCTP_IERROR_NO_DATA:
goto consume;
case SCTP_IERROR_PROTO_VIOLATION:
return sctp_sf_abort_violation(net, ep, asoc, chunk, commands,
(u8 *)chunk->subh.data_hdr, sizeof(sctp_datahdr_t));
default:
BUG();
}
/* Go a head and force a SACK, since we are shutting down. */
/* Implementor's Guide.
*
* While in SHUTDOWN-SENT state, the SHUTDOWN sender MUST immediately
* respond to each received packet containing one or more DATA chunk(s)
* with a SACK, a SHUTDOWN chunk, and restart the T2-shutdown timer
*/
if (chunk->end_of_packet) {
/* We must delay the chunk creation since the cumulative
* TSN has not been updated yet.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SHUTDOWN, SCTP_NULL());
sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, SCTP_FORCE());
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN));
}
consume:
return SCTP_DISPOSITION_CONSUME;
}
/*
* Section: 6.2 Processing a Received SACK
* D) Any time a SACK arrives, the endpoint performs the following:
*
* i) If Cumulative TSN Ack is less than the Cumulative TSN Ack Point,
* then drop the SACK. Since Cumulative TSN Ack is monotonically
* increasing, a SACK whose Cumulative TSN Ack is less than the
* Cumulative TSN Ack Point indicates an out-of-order SACK.
*
* ii) Set rwnd equal to the newly received a_rwnd minus the number
* of bytes still outstanding after processing the Cumulative TSN Ack
* and the Gap Ack Blocks.
*
* iii) If the SACK is missing a TSN that was previously
* acknowledged via a Gap Ack Block (e.g., the data receiver
* reneged on the data), then mark the corresponding DATA chunk
* as available for retransmit: Mark it as missing for fast
* retransmit as described in Section 7.2.4 and if no retransmit
* timer is running for the destination address to which the DATA
* chunk was originally transmitted, then T3-rtx is started for
* that destination address.
*
* Verification Tag: 8.5 Verification Tag [Normal verification]
*
* Inputs
* (endpoint, asoc, chunk)
*
* Outputs
* (asoc, reply_msg, msg_up, timers, counters)
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t sctp_sf_eat_sack_6_2(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
sctp_sackhdr_t *sackh;
__u32 ctsn;
if (!sctp_vtag_verify(chunk, asoc))
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
/* Make sure that the SACK chunk has a valid length. */
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_sack_chunk_t)))
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
/* Pull the SACK chunk from the data buffer */
sackh = sctp_sm_pull_sack(chunk);
/* Was this a bogus SACK? */
if (!sackh)
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
chunk->subh.sack_hdr = sackh;
ctsn = ntohl(sackh->cum_tsn_ack);
/* i) If Cumulative TSN Ack is less than the Cumulative TSN
* Ack Point, then drop the SACK. Since Cumulative TSN
* Ack is monotonically increasing, a SACK whose
* Cumulative TSN Ack is less than the Cumulative TSN Ack
* Point indicates an out-of-order SACK.
*/
if (TSN_lt(ctsn, asoc->ctsn_ack_point)) {
pr_debug("%s: ctsn:%x, ctsn_ack_point:%x\n", __func__, ctsn,
asoc->ctsn_ack_point);
return SCTP_DISPOSITION_DISCARD;
}
/* If Cumulative TSN Ack beyond the max tsn currently
* send, terminating the association and respond to the
* sender with an ABORT.
*/
if (!TSN_lt(ctsn, asoc->next_tsn))
return sctp_sf_violation_ctsn(net, ep, asoc, type, arg, commands);
/* Return this SACK for further processing. */
sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_SACK, SCTP_CHUNK(chunk));
/* Note: We do the rest of the work on the PROCESS_SACK
* sideeffect.
*/
return SCTP_DISPOSITION_CONSUME;
}
/*
* Generate an ABORT in response to a packet.
*
* Section: 8.4 Handle "Out of the blue" Packets, sctpimpguide 2.41
*
* 8) The receiver should respond to the sender of the OOTB packet with
* an ABORT. When sending the ABORT, the receiver of the OOTB packet
* MUST fill in the Verification Tag field of the outbound packet
* with the value found in the Verification Tag field of the OOTB
* packet and set the T-bit in the Chunk Flags to indicate that the
* Verification Tag is reflected. After sending this ABORT, the
* receiver of the OOTB packet shall discard the OOTB packet and take
* no further action.
*
* Verification Tag:
*
* The return value is the disposition of the chunk.
*/
static sctp_disposition_t sctp_sf_tabort_8_4_8(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_packet *packet = NULL;
struct sctp_chunk *chunk = arg;
struct sctp_chunk *abort;
packet = sctp_ootb_pkt_new(net, asoc, chunk);
if (packet) {
/* Make an ABORT. The T bit will be set if the asoc
* is NULL.
*/
abort = sctp_make_abort(asoc, chunk, 0);
if (!abort) {
sctp_ootb_pkt_free(packet);
return SCTP_DISPOSITION_NOMEM;
}
/* Reflect vtag if T-Bit is set */
if (sctp_test_T_bit(abort))
packet->vtag = ntohl(chunk->sctp_hdr->vtag);
/* Set the skb to the belonging sock for accounting. */
abort->skb->sk = ep->base.sk;
sctp_packet_append_chunk(packet, abort);
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
SCTP_PACKET(packet));
SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
return SCTP_DISPOSITION_CONSUME;
}
return SCTP_DISPOSITION_NOMEM;
}
/*
* Received an ERROR chunk from peer. Generate SCTP_REMOTE_ERROR
* event as ULP notification for each cause included in the chunk.
*
* API 5.3.1.3 - SCTP_REMOTE_ERROR
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t sctp_sf_operr_notify(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
sctp_errhdr_t *err;
if (!sctp_vtag_verify(chunk, asoc))
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
/* Make sure that the ERROR chunk has a valid length. */
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_operr_chunk_t)))
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
sctp_walk_errors(err, chunk->chunk_hdr);
if ((void *)err != (void *)chunk->chunk_end)
return sctp_sf_violation_paramlen(net, ep, asoc, type, arg,
(void *)err, commands);
sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_OPERR,
SCTP_CHUNK(chunk));
return SCTP_DISPOSITION_CONSUME;
}
/*
* Process an inbound SHUTDOWN ACK.
*
* From Section 9.2:
* Upon the receipt of the SHUTDOWN ACK, the SHUTDOWN sender shall
* stop the T2-shutdown timer, send a SHUTDOWN COMPLETE chunk to its
* peer, and remove all record of the association.
*
* The return value is the disposition.
*/
sctp_disposition_t sctp_sf_do_9_2_final(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
struct sctp_chunk *reply;
struct sctp_ulpevent *ev;
if (!sctp_vtag_verify(chunk, asoc))
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
/* Make sure that the SHUTDOWN_ACK chunk has a valid length. */
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
/* 10.2 H) SHUTDOWN COMPLETE notification
*
* When SCTP completes the shutdown procedures (section 9.2) this
* notification is passed to the upper layer.
*/
ev = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_SHUTDOWN_COMP,
0, 0, 0, NULL, GFP_ATOMIC);
if (!ev)
goto nomem;
/* ...send a SHUTDOWN COMPLETE chunk to its peer, */
reply = sctp_make_shutdown_complete(asoc, chunk);
if (!reply)
goto nomem_chunk;
/* Do all the commands now (after allocation), so that we
* have consistent state if memory allocation failes
*/
sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev));
/* Upon the receipt of the SHUTDOWN ACK, the SHUTDOWN sender shall
* stop the T2-shutdown timer,
*/
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN));
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD));
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_CLOSED));
SCTP_INC_STATS(net, SCTP_MIB_SHUTDOWNS);
SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
/* ...and remove all record of the association. */
sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
return SCTP_DISPOSITION_DELETE_TCB;
nomem_chunk:
sctp_ulpevent_free(ev);
nomem:
return SCTP_DISPOSITION_NOMEM;
}
/*
* RFC 2960, 8.4 - Handle "Out of the blue" Packets, sctpimpguide 2.41.
*
* 5) If the packet contains a SHUTDOWN ACK chunk, the receiver should
* respond to the sender of the OOTB packet with a SHUTDOWN COMPLETE.
* When sending the SHUTDOWN COMPLETE, the receiver of the OOTB
* packet must fill in the Verification Tag field of the outbound
* packet with the Verification Tag received in the SHUTDOWN ACK and
* set the T-bit in the Chunk Flags to indicate that the Verification
* Tag is reflected.
*
* 8) The receiver should respond to the sender of the OOTB packet with
* an ABORT. When sending the ABORT, the receiver of the OOTB packet
* MUST fill in the Verification Tag field of the outbound packet
* with the value found in the Verification Tag field of the OOTB
* packet and set the T-bit in the Chunk Flags to indicate that the
* Verification Tag is reflected. After sending this ABORT, the
* receiver of the OOTB packet shall discard the OOTB packet and take
* no further action.
*/
sctp_disposition_t sctp_sf_ootb(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
struct sk_buff *skb = chunk->skb;
sctp_chunkhdr_t *ch;
sctp_errhdr_t *err;
__u8 *ch_end;
int ootb_shut_ack = 0;
int ootb_cookie_ack = 0;
SCTP_INC_STATS(net, SCTP_MIB_OUTOFBLUES);
ch = (sctp_chunkhdr_t *) chunk->chunk_hdr;
do {
/* Report violation if the chunk is less then minimal */
if (ntohs(ch->length) < sizeof(sctp_chunkhdr_t))
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
/* Now that we know we at least have a chunk header,
* do things that are type appropriate.
*/
if (SCTP_CID_SHUTDOWN_ACK == ch->type)
ootb_shut_ack = 1;
/* RFC 2960, Section 3.3.7
* Moreover, under any circumstances, an endpoint that
* receives an ABORT MUST NOT respond to that ABORT by
* sending an ABORT of its own.
*/
if (SCTP_CID_ABORT == ch->type)
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
/* RFC 8.4, 7) If the packet contains a "Stale cookie" ERROR
* or a COOKIE ACK the SCTP Packet should be silently
* discarded.
*/
if (SCTP_CID_COOKIE_ACK == ch->type)
ootb_cookie_ack = 1;
if (SCTP_CID_ERROR == ch->type) {
sctp_walk_errors(err, ch) {
if (SCTP_ERROR_STALE_COOKIE == err->cause) {
ootb_cookie_ack = 1;
break;
}
}
}
/* Report violation if chunk len overflows */
ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
if (ch_end > skb_tail_pointer(skb))
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
ch = (sctp_chunkhdr_t *) ch_end;
} while (ch_end < skb_tail_pointer(skb));
if (ootb_shut_ack)
return sctp_sf_shut_8_4_5(net, ep, asoc, type, arg, commands);
else if (ootb_cookie_ack)
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
else
return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
}
/*
* Handle an "Out of the blue" SHUTDOWN ACK.
*
* Section: 8.4 5, sctpimpguide 2.41.
*
* 5) If the packet contains a SHUTDOWN ACK chunk, the receiver should
* respond to the sender of the OOTB packet with a SHUTDOWN COMPLETE.
* When sending the SHUTDOWN COMPLETE, the receiver of the OOTB
* packet must fill in the Verification Tag field of the outbound
* packet with the Verification Tag received in the SHUTDOWN ACK and
* set the T-bit in the Chunk Flags to indicate that the Verification
* Tag is reflected.
*
* Inputs
* (endpoint, asoc, type, arg, commands)
*
* Outputs
* (sctp_disposition_t)
*
* The return value is the disposition of the chunk.
*/
static sctp_disposition_t sctp_sf_shut_8_4_5(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_packet *packet = NULL;
struct sctp_chunk *chunk = arg;
struct sctp_chunk *shut;
packet = sctp_ootb_pkt_new(net, asoc, chunk);
if (packet) {
/* Make an SHUTDOWN_COMPLETE.
* The T bit will be set if the asoc is NULL.
*/
shut = sctp_make_shutdown_complete(asoc, chunk);
if (!shut) {
sctp_ootb_pkt_free(packet);
return SCTP_DISPOSITION_NOMEM;
}
/* Reflect vtag if T-Bit is set */
if (sctp_test_T_bit(shut))
packet->vtag = ntohl(chunk->sctp_hdr->vtag);
/* Set the skb to the belonging sock for accounting. */
shut->skb->sk = ep->base.sk;
sctp_packet_append_chunk(packet, shut);
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
SCTP_PACKET(packet));
SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
/* If the chunk length is invalid, we don't want to process
* the reset of the packet.
*/
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
/* We need to discard the rest of the packet to prevent
* potential bomming attacks from additional bundled chunks.
* This is documented in SCTP Threats ID.
*/
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
}
return SCTP_DISPOSITION_NOMEM;
}
/*
* Handle SHUTDOWN ACK in COOKIE_ECHOED or COOKIE_WAIT state.
*
* Verification Tag: 8.5.1 E) Rules for packet carrying a SHUTDOWN ACK
* If the receiver is in COOKIE-ECHOED or COOKIE-WAIT state the
* procedures in section 8.4 SHOULD be followed, in other words it
* should be treated as an Out Of The Blue packet.
* [This means that we do NOT check the Verification Tag on these
* chunks. --piggy ]
*
*/
sctp_disposition_t sctp_sf_do_8_5_1_E_sa(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
/* Make sure that the SHUTDOWN_ACK chunk has a valid length. */
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
/* Although we do have an association in this case, it corresponds
* to a restarted association. So the packet is treated as an OOTB
* packet and the state function that handles OOTB SHUTDOWN_ACK is
* called with a NULL association.
*/
SCTP_INC_STATS(net, SCTP_MIB_OUTOFBLUES);
return sctp_sf_shut_8_4_5(net, ep, NULL, type, arg, commands);
}
/* ADDIP Section 4.2 Upon reception of an ASCONF Chunk. */
sctp_disposition_t sctp_sf_do_asconf(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type, void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
struct sctp_chunk *asconf_ack = NULL;
struct sctp_paramhdr *err_param = NULL;
sctp_addiphdr_t *hdr;
union sctp_addr_param *addr_param;
__u32 serial;
int length;
if (!sctp_vtag_verify(chunk, asoc)) {
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
SCTP_NULL());
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
}
/* ADD-IP: Section 4.1.1
* This chunk MUST be sent in an authenticated way by using
* the mechanism defined in [I-D.ietf-tsvwg-sctp-auth]. If this chunk
* is received unauthenticated it MUST be silently discarded as
* described in [I-D.ietf-tsvwg-sctp-auth].
*/
if (!net->sctp.addip_noauth && !chunk->auth)
return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands);
/* Make sure that the ASCONF ADDIP chunk has a valid length. */
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_addip_chunk_t)))
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
hdr = (sctp_addiphdr_t *)chunk->skb->data;
serial = ntohl(hdr->serial);
addr_param = (union sctp_addr_param *)hdr->params;
length = ntohs(addr_param->p.length);
if (length < sizeof(sctp_paramhdr_t))
return sctp_sf_violation_paramlen(net, ep, asoc, type, arg,
(void *)addr_param, commands);
/* Verify the ASCONF chunk before processing it. */
if (!sctp_verify_asconf(asoc,
(sctp_paramhdr_t *)((void *)addr_param + length),
(void *)chunk->chunk_end,
&err_param))
return sctp_sf_violation_paramlen(net, ep, asoc, type, arg,
(void *)err_param, commands);
/* ADDIP 5.2 E1) Compare the value of the serial number to the value
* the endpoint stored in a new association variable
* 'Peer-Serial-Number'.
*/
if (serial == asoc->peer.addip_serial + 1) {
/* If this is the first instance of ASCONF in the packet,
* we can clean our old ASCONF-ACKs.
*/
if (!chunk->has_asconf)
sctp_assoc_clean_asconf_ack_cache(asoc);
/* ADDIP 5.2 E4) When the Sequence Number matches the next one
* expected, process the ASCONF as described below and after
* processing the ASCONF Chunk, append an ASCONF-ACK Chunk to
* the response packet and cache a copy of it (in the event it
* later needs to be retransmitted).
*
* Essentially, do V1-V5.
*/
asconf_ack = sctp_process_asconf((struct sctp_association *)
asoc, chunk);
if (!asconf_ack)
return SCTP_DISPOSITION_NOMEM;
} else if (serial < asoc->peer.addip_serial + 1) {
/* ADDIP 5.2 E2)
* If the value found in the Sequence Number is less than the
* ('Peer- Sequence-Number' + 1), simply skip to the next
* ASCONF, and include in the outbound response packet
* any previously cached ASCONF-ACK response that was
* sent and saved that matches the Sequence Number of the
* ASCONF. Note: It is possible that no cached ASCONF-ACK
* Chunk exists. This will occur when an older ASCONF
* arrives out of order. In such a case, the receiver
* should skip the ASCONF Chunk and not include ASCONF-ACK
* Chunk for that chunk.
*/
asconf_ack = sctp_assoc_lookup_asconf_ack(asoc, hdr->serial);
if (!asconf_ack)
return SCTP_DISPOSITION_DISCARD;
/* Reset the transport so that we select the correct one
* this time around. This is to make sure that we don't
* accidentally use a stale transport that's been removed.
*/
asconf_ack->transport = NULL;
} else {
/* ADDIP 5.2 E5) Otherwise, the ASCONF Chunk is discarded since
* it must be either a stale packet or from an attacker.
*/
return SCTP_DISPOSITION_DISCARD;
}
/* ADDIP 5.2 E6) The destination address of the SCTP packet
* containing the ASCONF-ACK Chunks MUST be the source address of
* the SCTP packet that held the ASCONF Chunks.
*
* To do this properly, we'll set the destination address of the chunk
* and at the transmit time, will try look up the transport to use.
* Since ASCONFs may be bundled, the correct transport may not be
* created until we process the entire packet, thus this workaround.
*/
asconf_ack->dest = chunk->source;
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(asconf_ack));
if (asoc->new_transport) {
sctp_sf_heartbeat(ep, asoc, type, asoc->new_transport, commands);
((struct sctp_association *)asoc)->new_transport = NULL;
}
return SCTP_DISPOSITION_CONSUME;
}
/*
* ADDIP Section 4.3 General rules for address manipulation
* When building TLV parameters for the ASCONF Chunk that will add or
* delete IP addresses the D0 to D13 rules should be applied:
*/
sctp_disposition_t sctp_sf_do_asconf_ack(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type, void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *asconf_ack = arg;
struct sctp_chunk *last_asconf = asoc->addip_last_asconf;
struct sctp_chunk *abort;
struct sctp_paramhdr *err_param = NULL;
sctp_addiphdr_t *addip_hdr;
__u32 sent_serial, rcvd_serial;
if (!sctp_vtag_verify(asconf_ack, asoc)) {
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
SCTP_NULL());
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
}
/* ADD-IP, Section 4.1.2:
* This chunk MUST be sent in an authenticated way by using
* the mechanism defined in [I-D.ietf-tsvwg-sctp-auth]. If this chunk
* is received unauthenticated it MUST be silently discarded as
* described in [I-D.ietf-tsvwg-sctp-auth].
*/
if (!net->sctp.addip_noauth && !asconf_ack->auth)
return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands);
/* Make sure that the ADDIP chunk has a valid length. */
if (!sctp_chunk_length_valid(asconf_ack, sizeof(sctp_addip_chunk_t)))
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
addip_hdr = (sctp_addiphdr_t *)asconf_ack->skb->data;
rcvd_serial = ntohl(addip_hdr->serial);
/* Verify the ASCONF-ACK chunk before processing it. */
if (!sctp_verify_asconf(asoc,
(sctp_paramhdr_t *)addip_hdr->params,
(void *)asconf_ack->chunk_end,
&err_param))
return sctp_sf_violation_paramlen(net, ep, asoc, type, arg,
(void *)err_param, commands);
if (last_asconf) {
addip_hdr = (sctp_addiphdr_t *)last_asconf->subh.addip_hdr;
sent_serial = ntohl(addip_hdr->serial);
} else {
sent_serial = asoc->addip_serial - 1;
}
/* D0) If an endpoint receives an ASCONF-ACK that is greater than or
* equal to the next serial number to be used but no ASCONF chunk is
* outstanding the endpoint MUST ABORT the association. Note that a
* sequence number is greater than if it is no more than 2^^31-1
* larger than the current sequence number (using serial arithmetic).
*/
if (ADDIP_SERIAL_gte(rcvd_serial, sent_serial + 1) &&
!(asoc->addip_last_asconf)) {
abort = sctp_make_abort(asoc, asconf_ack,
sizeof(sctp_errhdr_t));
if (abort) {
sctp_init_cause(abort, SCTP_ERROR_ASCONF_ACK, 0);
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
SCTP_CHUNK(abort));
}
/* We are going to ABORT, so we might as well stop
* processing the rest of the chunks in the packet.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET, SCTP_NULL());
sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
SCTP_ERROR(ECONNABORTED));
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_PERR(SCTP_ERROR_ASCONF_ACK));
SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
return SCTP_DISPOSITION_ABORT;
}
if ((rcvd_serial == sent_serial) && asoc->addip_last_asconf) {
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
if (!sctp_process_asconf_ack((struct sctp_association *)asoc,
asconf_ack)) {
/* Successfully processed ASCONF_ACK. We can
* release the next asconf if we have one.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_NEXT_ASCONF,
SCTP_NULL());
return SCTP_DISPOSITION_CONSUME;
}
abort = sctp_make_abort(asoc, asconf_ack,
sizeof(sctp_errhdr_t));
if (abort) {
sctp_init_cause(abort, SCTP_ERROR_RSRC_LOW, 0);
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
SCTP_CHUNK(abort));
}
/* We are going to ABORT, so we might as well stop
* processing the rest of the chunks in the packet.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET, SCTP_NULL());
sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
SCTP_ERROR(ECONNABORTED));
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_PERR(SCTP_ERROR_ASCONF_ACK));
SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
return SCTP_DISPOSITION_ABORT;
}
return SCTP_DISPOSITION_DISCARD;
}
/*
* PR-SCTP Section 3.6 Receiver Side Implementation of PR-SCTP
*
* When a FORWARD TSN chunk arrives, the data receiver MUST first update
* its cumulative TSN point to the value carried in the FORWARD TSN
* chunk, and then MUST further advance its cumulative TSN point locally
* if possible.
* After the above processing, the data receiver MUST stop reporting any
* missing TSNs earlier than or equal to the new cumulative TSN point.
*
* Verification Tag: 8.5 Verification Tag [Normal verification]
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t sctp_sf_eat_fwd_tsn(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
struct sctp_fwdtsn_hdr *fwdtsn_hdr;
struct sctp_fwdtsn_skip *skip;
__u16 len;
__u32 tsn;
if (!sctp_vtag_verify(chunk, asoc)) {
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
SCTP_NULL());
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
}
/* Make sure that the FORWARD_TSN chunk has valid length. */
if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_fwdtsn_chunk)))
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
fwdtsn_hdr = (struct sctp_fwdtsn_hdr *)chunk->skb->data;
chunk->subh.fwdtsn_hdr = fwdtsn_hdr;
len = ntohs(chunk->chunk_hdr->length);
len -= sizeof(struct sctp_chunkhdr);
skb_pull(chunk->skb, len);
tsn = ntohl(fwdtsn_hdr->new_cum_tsn);
pr_debug("%s: TSN 0x%x\n", __func__, tsn);
/* The TSN is too high--silently discard the chunk and count on it
* getting retransmitted later.
*/
if (sctp_tsnmap_check(&asoc->peer.tsn_map, tsn) < 0)
goto discard_noforce;
/* Silently discard the chunk if stream-id is not valid */
sctp_walk_fwdtsn(skip, chunk) {
if (ntohs(skip->stream) >= asoc->c.sinit_max_instreams)
goto discard_noforce;
}
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_FWDTSN, SCTP_U32(tsn));
if (len > sizeof(struct sctp_fwdtsn_hdr))
sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_FWDTSN,
SCTP_CHUNK(chunk));
/* Count this as receiving DATA. */
if (asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]) {
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE));
}
/* FIXME: For now send a SACK, but DATA processing may
* send another.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, SCTP_NOFORCE());
return SCTP_DISPOSITION_CONSUME;
discard_noforce:
return SCTP_DISPOSITION_DISCARD;
}
sctp_disposition_t sctp_sf_eat_fwd_tsn_fast(
struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
struct sctp_fwdtsn_hdr *fwdtsn_hdr;
struct sctp_fwdtsn_skip *skip;
__u16 len;
__u32 tsn;
if (!sctp_vtag_verify(chunk, asoc)) {
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
SCTP_NULL());
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
}
/* Make sure that the FORWARD_TSN chunk has a valid length. */
if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_fwdtsn_chunk)))
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
fwdtsn_hdr = (struct sctp_fwdtsn_hdr *)chunk->skb->data;
chunk->subh.fwdtsn_hdr = fwdtsn_hdr;
len = ntohs(chunk->chunk_hdr->length);
len -= sizeof(struct sctp_chunkhdr);
skb_pull(chunk->skb, len);
tsn = ntohl(fwdtsn_hdr->new_cum_tsn);
pr_debug("%s: TSN 0x%x\n", __func__, tsn);
/* The TSN is too high--silently discard the chunk and count on it
* getting retransmitted later.
*/
if (sctp_tsnmap_check(&asoc->peer.tsn_map, tsn) < 0)
goto gen_shutdown;
/* Silently discard the chunk if stream-id is not valid */
sctp_walk_fwdtsn(skip, chunk) {
if (ntohs(skip->stream) >= asoc->c.sinit_max_instreams)
goto gen_shutdown;
}
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_FWDTSN, SCTP_U32(tsn));
if (len > sizeof(struct sctp_fwdtsn_hdr))
sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_FWDTSN,
SCTP_CHUNK(chunk));
/* Go a head and force a SACK, since we are shutting down. */
gen_shutdown:
/* Implementor's Guide.
*
* While in SHUTDOWN-SENT state, the SHUTDOWN sender MUST immediately
* respond to each received packet containing one or more DATA chunk(s)
* with a SACK, a SHUTDOWN chunk, and restart the T2-shutdown timer
*/
sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SHUTDOWN, SCTP_NULL());
sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, SCTP_FORCE());
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN));
return SCTP_DISPOSITION_CONSUME;
}
/*
* SCTP-AUTH Section 6.3 Receiving authenticated chukns
*
* The receiver MUST use the HMAC algorithm indicated in the HMAC
* Identifier field. If this algorithm was not specified by the
* receiver in the HMAC-ALGO parameter in the INIT or INIT-ACK chunk
* during association setup, the AUTH chunk and all chunks after it MUST
* be discarded and an ERROR chunk SHOULD be sent with the error cause
* defined in Section 4.1.
*
* If an endpoint with no shared key receives a Shared Key Identifier
* other than 0, it MUST silently discard all authenticated chunks. If
* the endpoint has at least one endpoint pair shared key for the peer,
* it MUST use the key specified by the Shared Key Identifier if a
* key has been configured for that Shared Key Identifier. If no
* endpoint pair shared key has been configured for that Shared Key
* Identifier, all authenticated chunks MUST be silently discarded.
*
* Verification Tag: 8.5 Verification Tag [Normal verification]
*
* The return value is the disposition of the chunk.
*/
static sctp_ierror_t sctp_sf_authenticate(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
struct sctp_chunk *chunk)
{
struct sctp_authhdr *auth_hdr;
struct sctp_hmac *hmac;
unsigned int sig_len;
__u16 key_id;
__u8 *save_digest;
__u8 *digest;
/* Pull in the auth header, so we can do some more verification */
auth_hdr = (struct sctp_authhdr *)chunk->skb->data;
chunk->subh.auth_hdr = auth_hdr;
skb_pull(chunk->skb, sizeof(struct sctp_authhdr));
/* Make sure that we support the HMAC algorithm from the auth
* chunk.
*/
if (!sctp_auth_asoc_verify_hmac_id(asoc, auth_hdr->hmac_id))
return SCTP_IERROR_AUTH_BAD_HMAC;
/* Make sure that the provided shared key identifier has been
* configured
*/
key_id = ntohs(auth_hdr->shkey_id);
if (key_id != asoc->active_key_id && !sctp_auth_get_shkey(asoc, key_id))
return SCTP_IERROR_AUTH_BAD_KEYID;
/* Make sure that the length of the signature matches what
* we expect.
*/
sig_len = ntohs(chunk->chunk_hdr->length) - sizeof(sctp_auth_chunk_t);
hmac = sctp_auth_get_hmac(ntohs(auth_hdr->hmac_id));
if (sig_len != hmac->hmac_len)
return SCTP_IERROR_PROTO_VIOLATION;
/* Now that we've done validation checks, we can compute and
* verify the hmac. The steps involved are:
* 1. Save the digest from the chunk.
* 2. Zero out the digest in the chunk.
* 3. Compute the new digest
* 4. Compare saved and new digests.
*/
digest = auth_hdr->hmac;
skb_pull(chunk->skb, sig_len);
save_digest = kmemdup(digest, sig_len, GFP_ATOMIC);
if (!save_digest)
goto nomem;
memset(digest, 0, sig_len);
sctp_auth_calculate_hmac(asoc, chunk->skb,
(struct sctp_auth_chunk *)chunk->chunk_hdr,
GFP_ATOMIC);
/* Discard the packet if the digests do not match */
if (memcmp(save_digest, digest, sig_len)) {
kfree(save_digest);
return SCTP_IERROR_BAD_SIG;
}
kfree(save_digest);
chunk->auth = 1;
return SCTP_IERROR_NO_ERROR;
nomem:
return SCTP_IERROR_NOMEM;
}
sctp_disposition_t sctp_sf_eat_auth(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_authhdr *auth_hdr;
struct sctp_chunk *chunk = arg;
struct sctp_chunk *err_chunk;
sctp_ierror_t error;
/* Make sure that the peer has AUTH capable */
if (!asoc->peer.auth_capable)
return sctp_sf_unk_chunk(net, ep, asoc, type, arg, commands);
if (!sctp_vtag_verify(chunk, asoc)) {
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
SCTP_NULL());
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
}
/* Make sure that the AUTH chunk has valid length. */
if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_auth_chunk)))
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
auth_hdr = (struct sctp_authhdr *)chunk->skb->data;
error = sctp_sf_authenticate(net, ep, asoc, type, chunk);
switch (error) {
case SCTP_IERROR_AUTH_BAD_HMAC:
/* Generate the ERROR chunk and discard the rest
* of the packet
*/
err_chunk = sctp_make_op_error(asoc, chunk,
SCTP_ERROR_UNSUP_HMAC,
&auth_hdr->hmac_id,
sizeof(__u16), 0);
if (err_chunk) {
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
SCTP_CHUNK(err_chunk));
}
/* Fall Through */
case SCTP_IERROR_AUTH_BAD_KEYID:
case SCTP_IERROR_BAD_SIG:
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
case SCTP_IERROR_PROTO_VIOLATION:
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
case SCTP_IERROR_NOMEM:
return SCTP_DISPOSITION_NOMEM;
default: /* Prevent gcc warnings */
break;
}
if (asoc->active_key_id != ntohs(auth_hdr->shkey_id)) {
struct sctp_ulpevent *ev;
ev = sctp_ulpevent_make_authkey(asoc, ntohs(auth_hdr->shkey_id),
SCTP_AUTH_NEWKEY, GFP_ATOMIC);
if (!ev)
return -ENOMEM;
sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
SCTP_ULPEVENT(ev));
}
return SCTP_DISPOSITION_CONSUME;
}
/*
* Process an unknown chunk.
*
* Section: 3.2. Also, 2.1 in the implementor's guide.
*
* Chunk Types are encoded such that the highest-order two bits specify
* the action that must be taken if the processing endpoint does not
* recognize the Chunk Type.
*
* 00 - Stop processing this SCTP packet and discard it, do not process
* any further chunks within it.
*
* 01 - Stop processing this SCTP packet and discard it, do not process
* any further chunks within it, and report the unrecognized
* chunk in an 'Unrecognized Chunk Type'.
*
* 10 - Skip this chunk and continue processing.
*
* 11 - Skip this chunk and continue processing, but report in an ERROR
* Chunk using the 'Unrecognized Chunk Type' cause of error.
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t sctp_sf_unk_chunk(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *unk_chunk = arg;
struct sctp_chunk *err_chunk;
sctp_chunkhdr_t *hdr;
pr_debug("%s: processing unknown chunk id:%d\n", __func__, type.chunk);
if (!sctp_vtag_verify(unk_chunk, asoc))
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
/* Make sure that the chunk has a valid length.
* Since we don't know the chunk type, we use a general
* chunkhdr structure to make a comparison.
*/
if (!sctp_chunk_length_valid(unk_chunk, sizeof(sctp_chunkhdr_t)))
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
switch (type.chunk & SCTP_CID_ACTION_MASK) {
case SCTP_CID_ACTION_DISCARD:
/* Discard the packet. */
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
break;
case SCTP_CID_ACTION_DISCARD_ERR:
/* Generate an ERROR chunk as response. */
hdr = unk_chunk->chunk_hdr;
err_chunk = sctp_make_op_error(asoc, unk_chunk,
SCTP_ERROR_UNKNOWN_CHUNK, hdr,
WORD_ROUND(ntohs(hdr->length)),
0);
if (err_chunk) {
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
SCTP_CHUNK(err_chunk));
}
/* Discard the packet. */
sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
return SCTP_DISPOSITION_CONSUME;
break;
case SCTP_CID_ACTION_SKIP:
/* Skip the chunk. */
return SCTP_DISPOSITION_DISCARD;
break;
case SCTP_CID_ACTION_SKIP_ERR:
/* Generate an ERROR chunk as response. */
hdr = unk_chunk->chunk_hdr;
err_chunk = sctp_make_op_error(asoc, unk_chunk,
SCTP_ERROR_UNKNOWN_CHUNK, hdr,
WORD_ROUND(ntohs(hdr->length)),
0);
if (err_chunk) {
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
SCTP_CHUNK(err_chunk));
}
/* Skip the chunk. */
return SCTP_DISPOSITION_CONSUME;
break;
default:
break;
}
return SCTP_DISPOSITION_DISCARD;
}
/*
* Discard the chunk.
*
* Section: 0.2, 5.2.3, 5.2.5, 5.2.6, 6.0, 8.4.6, 8.5.1c, 9.2
* [Too numerous to mention...]
* Verification Tag: No verification needed.
* Inputs
* (endpoint, asoc, chunk)
*
* Outputs
* (asoc, reply_msg, msg_up, timers, counters)
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t sctp_sf_discard_chunk(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
/* Make sure that the chunk has a valid length.
* Since we don't know the chunk type, we use a general
* chunkhdr structure to make a comparison.
*/
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
pr_debug("%s: chunk:%d is discarded\n", __func__, type.chunk);
return SCTP_DISPOSITION_DISCARD;
}
/*
* Discard the whole packet.
*
* Section: 8.4 2)
*
* 2) If the OOTB packet contains an ABORT chunk, the receiver MUST
* silently discard the OOTB packet and take no further action.
*
* Verification Tag: No verification necessary
*
* Inputs
* (endpoint, asoc, chunk)
*
* Outputs
* (asoc, reply_msg, msg_up, timers, counters)
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t sctp_sf_pdiscard(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
SCTP_INC_STATS(net, SCTP_MIB_IN_PKT_DISCARDS);
sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET, SCTP_NULL());
return SCTP_DISPOSITION_CONSUME;
}
/*
* The other end is violating protocol.
*
* Section: Not specified
* Verification Tag: Not specified
* Inputs
* (endpoint, asoc, chunk)
*
* Outputs
* (asoc, reply_msg, msg_up, timers, counters)
*
* We simply tag the chunk as a violation. The state machine will log
* the violation and continue.
*/
sctp_disposition_t sctp_sf_violation(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
/* Make sure that the chunk has a valid length. */
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
return SCTP_DISPOSITION_VIOLATION;
}
/*
* Common function to handle a protocol violation.
*/
static sctp_disposition_t sctp_sf_abort_violation(
struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
void *arg,
sctp_cmd_seq_t *commands,
const __u8 *payload,
const size_t paylen)
{
struct sctp_packet *packet = NULL;
struct sctp_chunk *chunk = arg;
struct sctp_chunk *abort = NULL;
/* SCTP-AUTH, Section 6.3:
* It should be noted that if the receiver wants to tear
* down an association in an authenticated way only, the
* handling of malformed packets should not result in
* tearing down the association.
*
* This means that if we only want to abort associations
* in an authenticated way (i.e AUTH+ABORT), then we
* can't destroy this association just because the packet
* was malformed.
*/
if (sctp_auth_recv_cid(SCTP_CID_ABORT, asoc))
goto discard;
/* Make the abort chunk. */
abort = sctp_make_abort_violation(asoc, chunk, payload, paylen);
if (!abort)
goto nomem;
if (asoc) {
/* Treat INIT-ACK as a special case during COOKIE-WAIT. */
if (chunk->chunk_hdr->type == SCTP_CID_INIT_ACK &&
!asoc->peer.i.init_tag) {
sctp_initack_chunk_t *initack;
initack = (sctp_initack_chunk_t *)chunk->chunk_hdr;
if (!sctp_chunk_length_valid(chunk,
sizeof(sctp_initack_chunk_t)))
abort->chunk_hdr->flags |= SCTP_CHUNK_FLAG_T;
else {
unsigned int inittag;
inittag = ntohl(initack->init_hdr.init_tag);
sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_INITTAG,
SCTP_U32(inittag));
}
}
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
if (asoc->state <= SCTP_STATE_COOKIE_ECHOED) {
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
SCTP_ERROR(ECONNREFUSED));
sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION));
} else {
sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
SCTP_ERROR(ECONNABORTED));
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION));
SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
}
} else {
packet = sctp_ootb_pkt_new(net, asoc, chunk);
if (!packet)
goto nomem_pkt;
if (sctp_test_T_bit(abort))
packet->vtag = ntohl(chunk->sctp_hdr->vtag);
abort->skb->sk = ep->base.sk;
sctp_packet_append_chunk(packet, abort);
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
SCTP_PACKET(packet));
SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
}
SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
discard:
sctp_sf_pdiscard(net, ep, asoc, SCTP_ST_CHUNK(0), arg, commands);
return SCTP_DISPOSITION_ABORT;
nomem_pkt:
sctp_chunk_free(abort);
nomem:
return SCTP_DISPOSITION_NOMEM;
}
/*
* Handle a protocol violation when the chunk length is invalid.
* "Invalid" length is identified as smaller than the minimal length a
* given chunk can be. For example, a SACK chunk has invalid length
* if its length is set to be smaller than the size of sctp_sack_chunk_t.
*
* We inform the other end by sending an ABORT with a Protocol Violation
* error code.
*
* Section: Not specified
* Verification Tag: Nothing to do
* Inputs
* (endpoint, asoc, chunk)
*
* Outputs
* (reply_msg, msg_up, counters)
*
* Generate an ABORT chunk and terminate the association.
*/
static sctp_disposition_t sctp_sf_violation_chunklen(
struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
static const char err_str[] = "The following chunk had invalid length:";
return sctp_sf_abort_violation(net, ep, asoc, arg, commands, err_str,
sizeof(err_str));
}
/*
* Handle a protocol violation when the parameter length is invalid.
* If the length is smaller than the minimum length of a given parameter,
* or accumulated length in multi parameters exceeds the end of the chunk,
* the length is considered as invalid.
*/
static sctp_disposition_t sctp_sf_violation_paramlen(
struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg, void *ext,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
struct sctp_paramhdr *param = ext;
struct sctp_chunk *abort = NULL;
if (sctp_auth_recv_cid(SCTP_CID_ABORT, asoc))
goto discard;
/* Make the abort chunk. */
abort = sctp_make_violation_paramlen(asoc, chunk, param);
if (!abort)
goto nomem;
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
SCTP_ERROR(ECONNABORTED));
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION));
SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
discard:
sctp_sf_pdiscard(net, ep, asoc, SCTP_ST_CHUNK(0), arg, commands);
return SCTP_DISPOSITION_ABORT;
nomem:
return SCTP_DISPOSITION_NOMEM;
}
/* Handle a protocol violation when the peer trying to advance the
* cumulative tsn ack to a point beyond the max tsn currently sent.
*
* We inform the other end by sending an ABORT with a Protocol Violation
* error code.
*/
static sctp_disposition_t sctp_sf_violation_ctsn(
struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
static const char err_str[] = "The cumulative tsn ack beyond the max tsn currently sent:";
return sctp_sf_abort_violation(net, ep, asoc, arg, commands, err_str,
sizeof(err_str));
}
/* Handle protocol violation of an invalid chunk bundling. For example,
* when we have an association and we receive bundled INIT-ACK, or
* SHUDOWN-COMPLETE, our peer is clearly violationg the "MUST NOT bundle"
* statement from the specs. Additionally, there might be an attacker
* on the path and we may not want to continue this communication.
*/
static sctp_disposition_t sctp_sf_violation_chunk(
struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
static const char err_str[] = "The following chunk violates protocol:";
if (!asoc)
return sctp_sf_violation(net, ep, asoc, type, arg, commands);
return sctp_sf_abort_violation(net, ep, asoc, arg, commands, err_str,
sizeof(err_str));
}
/***************************************************************************
* These are the state functions for handling primitive (Section 10) events.
***************************************************************************/
/*
* sctp_sf_do_prm_asoc
*
* Section: 10.1 ULP-to-SCTP
* B) Associate
*
* Format: ASSOCIATE(local SCTP instance name, destination transport addr,
* outbound stream count)
* -> association id [,destination transport addr list] [,outbound stream
* count]
*
* This primitive allows the upper layer to initiate an association to a
* specific peer endpoint.
*
* The peer endpoint shall be specified by one of the transport addresses
* which defines the endpoint (see Section 1.4). If the local SCTP
* instance has not been initialized, the ASSOCIATE is considered an
* error.
* [This is not relevant for the kernel implementation since we do all
* initialization at boot time. It we hadn't initialized we wouldn't
* get anywhere near this code.]
*
* An association id, which is a local handle to the SCTP association,
* will be returned on successful establishment of the association. If
* SCTP is not able to open an SCTP association with the peer endpoint,
* an error is returned.
* [In the kernel implementation, the struct sctp_association needs to
* be created BEFORE causing this primitive to run.]
*
* Other association parameters may be returned, including the
* complete destination transport addresses of the peer as well as the
* outbound stream count of the local endpoint. One of the transport
* address from the returned destination addresses will be selected by
* the local endpoint as default primary path for sending SCTP packets
* to this peer. The returned "destination transport addr list" can
* be used by the ULP to change the default primary path or to force
* sending a packet to a specific transport address. [All of this
* stuff happens when the INIT ACK arrives. This is a NON-BLOCKING
* function.]
*
* Mandatory attributes:
*
* o local SCTP instance name - obtained from the INITIALIZE operation.
* [This is the argument asoc.]
* o destination transport addr - specified as one of the transport
* addresses of the peer endpoint with which the association is to be
* established.
* [This is asoc->peer.active_path.]
* o outbound stream count - the number of outbound streams the ULP
* would like to open towards this peer endpoint.
* [BUG: This is not currently implemented.]
* Optional attributes:
*
* None.
*
* The return value is a disposition.
*/
sctp_disposition_t sctp_sf_do_prm_asoc(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *repl;
struct sctp_association *my_asoc;
/* The comment below says that we enter COOKIE-WAIT AFTER
* sending the INIT, but that doesn't actually work in our
* implementation...
*/
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_COOKIE_WAIT));
/* RFC 2960 5.1 Normal Establishment of an Association
*
* A) "A" first sends an INIT chunk to "Z". In the INIT, "A"
* must provide its Verification Tag (Tag_A) in the Initiate
* Tag field. Tag_A SHOULD be a random number in the range of
* 1 to 4294967295 (see 5.3.1 for Tag value selection). ...
*/
repl = sctp_make_init(asoc, &asoc->base.bind_addr, GFP_ATOMIC, 0);
if (!repl)
goto nomem;
/* Choose transport for INIT. */
sctp_add_cmd_sf(commands, SCTP_CMD_INIT_CHOOSE_TRANSPORT,
SCTP_CHUNK(repl));
/* Cast away the const modifier, as we want to just
* rerun it through as a sideffect.
*/
my_asoc = (struct sctp_association *)asoc;
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(my_asoc));
/* After sending the INIT, "A" starts the T1-init timer and
* enters the COOKIE-WAIT state.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START,
SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
return SCTP_DISPOSITION_CONSUME;
nomem:
return SCTP_DISPOSITION_NOMEM;
}
/*
* Process the SEND primitive.
*
* Section: 10.1 ULP-to-SCTP
* E) Send
*
* Format: SEND(association id, buffer address, byte count [,context]
* [,stream id] [,life time] [,destination transport address]
* [,unorder flag] [,no-bundle flag] [,payload protocol-id] )
* -> result
*
* This is the main method to send user data via SCTP.
*
* Mandatory attributes:
*
* o association id - local handle to the SCTP association
*
* o buffer address - the location where the user message to be
* transmitted is stored;
*
* o byte count - The size of the user data in number of bytes;
*
* Optional attributes:
*
* o context - an optional 32 bit integer that will be carried in the
* sending failure notification to the ULP if the transportation of
* this User Message fails.
*
* o stream id - to indicate which stream to send the data on. If not
* specified, stream 0 will be used.
*
* o life time - specifies the life time of the user data. The user data
* will not be sent by SCTP after the life time expires. This
* parameter can be used to avoid efforts to transmit stale
* user messages. SCTP notifies the ULP if the data cannot be
* initiated to transport (i.e. sent to the destination via SCTP's
* send primitive) within the life time variable. However, the
* user data will be transmitted if SCTP has attempted to transmit a
* chunk before the life time expired.
*
* o destination transport address - specified as one of the destination
* transport addresses of the peer endpoint to which this packet
* should be sent. Whenever possible, SCTP should use this destination
* transport address for sending the packets, instead of the current
* primary path.
*
* o unorder flag - this flag, if present, indicates that the user
* would like the data delivered in an unordered fashion to the peer
* (i.e., the U flag is set to 1 on all DATA chunks carrying this
* message).
*
* o no-bundle flag - instructs SCTP not to bundle this user data with
* other outbound DATA chunks. SCTP MAY still bundle even when
* this flag is present, when faced with network congestion.
*
* o payload protocol-id - A 32 bit unsigned integer that is to be
* passed to the peer indicating the type of payload protocol data
* being transmitted. This value is passed as opaque data by SCTP.
*
* The return value is the disposition.
*/
sctp_disposition_t sctp_sf_do_prm_send(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_datamsg *msg = arg;
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_MSG, SCTP_DATAMSG(msg));
return SCTP_DISPOSITION_CONSUME;
}
/*
* Process the SHUTDOWN primitive.
*
* Section: 10.1:
* C) Shutdown
*
* Format: SHUTDOWN(association id)
* -> result
*
* Gracefully closes an association. Any locally queued user data
* will be delivered to the peer. The association will be terminated only
* after the peer acknowledges all the SCTP packets sent. A success code
* will be returned on successful termination of the association. If
* attempting to terminate the association results in a failure, an error
* code shall be returned.
*
* Mandatory attributes:
*
* o association id - local handle to the SCTP association
*
* Optional attributes:
*
* None.
*
* The return value is the disposition.
*/
sctp_disposition_t sctp_sf_do_9_2_prm_shutdown(
struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
int disposition;
/* From 9.2 Shutdown of an Association
* Upon receipt of the SHUTDOWN primitive from its upper
* layer, the endpoint enters SHUTDOWN-PENDING state and
* remains there until all outstanding data has been
* acknowledged by its peer. The endpoint accepts no new data
* from its upper layer, but retransmits data to the far end
* if necessary to fill gaps.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_SHUTDOWN_PENDING));
disposition = SCTP_DISPOSITION_CONSUME;
if (sctp_outq_is_empty(&asoc->outqueue)) {
disposition = sctp_sf_do_9_2_start_shutdown(net, ep, asoc, type,
arg, commands);
}
return disposition;
}
/*
* Process the ABORT primitive.
*
* Section: 10.1:
* C) Abort
*
* Format: Abort(association id [, cause code])
* -> result
*
* Ungracefully closes an association. Any locally queued user data
* will be discarded and an ABORT chunk is sent to the peer. A success code
* will be returned on successful abortion of the association. If
* attempting to abort the association results in a failure, an error
* code shall be returned.
*
* Mandatory attributes:
*
* o association id - local handle to the SCTP association
*
* Optional attributes:
*
* o cause code - reason of the abort to be passed to the peer
*
* None.
*
* The return value is the disposition.
*/
sctp_disposition_t sctp_sf_do_9_1_prm_abort(
struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
/* From 9.1 Abort of an Association
* Upon receipt of the ABORT primitive from its upper
* layer, the endpoint enters CLOSED state and
* discard all outstanding data has been
* acknowledged by its peer. The endpoint accepts no new data
* from its upper layer, but retransmits data to the far end
* if necessary to fill gaps.
*/
struct sctp_chunk *abort = arg;
sctp_disposition_t retval;
retval = SCTP_DISPOSITION_CONSUME;
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
/* Even if we can't send the ABORT due to low memory delete the
* TCB. This is a departure from our typical NOMEM handling.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
SCTP_ERROR(ECONNABORTED));
/* Delete the established association. */
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_PERR(SCTP_ERROR_USER_ABORT));
SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
return retval;
}
/* We tried an illegal operation on an association which is closed. */
sctp_disposition_t sctp_sf_error_closed(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_ERROR, SCTP_ERROR(-EINVAL));
return SCTP_DISPOSITION_CONSUME;
}
/* We tried an illegal operation on an association which is shutting
* down.
*/
sctp_disposition_t sctp_sf_error_shutdown(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_ERROR,
SCTP_ERROR(-ESHUTDOWN));
return SCTP_DISPOSITION_CONSUME;
}
/*
* sctp_cookie_wait_prm_shutdown
*
* Section: 4 Note: 2
* Verification Tag:
* Inputs
* (endpoint, asoc)
*
* The RFC does not explicitly address this issue, but is the route through the
* state table when someone issues a shutdown while in COOKIE_WAIT state.
*
* Outputs
* (timers)
*/
sctp_disposition_t sctp_sf_cookie_wait_prm_shutdown(
struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_CLOSED));
SCTP_INC_STATS(net, SCTP_MIB_SHUTDOWNS);
sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
return SCTP_DISPOSITION_DELETE_TCB;
}
/*
* sctp_cookie_echoed_prm_shutdown
*
* Section: 4 Note: 2
* Verification Tag:
* Inputs
* (endpoint, asoc)
*
* The RFC does not explcitly address this issue, but is the route through the
* state table when someone issues a shutdown while in COOKIE_ECHOED state.
*
* Outputs
* (timers)
*/
sctp_disposition_t sctp_sf_cookie_echoed_prm_shutdown(
struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg, sctp_cmd_seq_t *commands)
{
/* There is a single T1 timer, so we should be able to use
* common function with the COOKIE-WAIT state.
*/
return sctp_sf_cookie_wait_prm_shutdown(net, ep, asoc, type, arg, commands);
}
/*
* sctp_sf_cookie_wait_prm_abort
*
* Section: 4 Note: 2
* Verification Tag:
* Inputs
* (endpoint, asoc)
*
* The RFC does not explicitly address this issue, but is the route through the
* state table when someone issues an abort while in COOKIE_WAIT state.
*
* Outputs
* (timers)
*/
sctp_disposition_t sctp_sf_cookie_wait_prm_abort(
struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *abort = arg;
sctp_disposition_t retval;
/* Stop T1-init timer */
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
retval = SCTP_DISPOSITION_CONSUME;
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_CLOSED));
SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
/* Even if we can't send the ABORT due to low memory delete the
* TCB. This is a departure from our typical NOMEM handling.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
SCTP_ERROR(ECONNREFUSED));
/* Delete the established association. */
sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
SCTP_PERR(SCTP_ERROR_USER_ABORT));
return retval;
}
/*
* sctp_sf_cookie_echoed_prm_abort
*
* Section: 4 Note: 3
* Verification Tag:
* Inputs
* (endpoint, asoc)
*
* The RFC does not explcitly address this issue, but is the route through the
* state table when someone issues an abort while in COOKIE_ECHOED state.
*
* Outputs
* (timers)
*/
sctp_disposition_t sctp_sf_cookie_echoed_prm_abort(
struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
/* There is a single T1 timer, so we should be able to use
* common function with the COOKIE-WAIT state.
*/
return sctp_sf_cookie_wait_prm_abort(net, ep, asoc, type, arg, commands);
}
/*
* sctp_sf_shutdown_pending_prm_abort
*
* Inputs
* (endpoint, asoc)
*
* The RFC does not explicitly address this issue, but is the route through the
* state table when someone issues an abort while in SHUTDOWN-PENDING state.
*
* Outputs
* (timers)
*/
sctp_disposition_t sctp_sf_shutdown_pending_prm_abort(
struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
/* Stop the T5-shutdown guard timer. */
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD));
return sctp_sf_do_9_1_prm_abort(net, ep, asoc, type, arg, commands);
}
/*
* sctp_sf_shutdown_sent_prm_abort
*
* Inputs
* (endpoint, asoc)
*
* The RFC does not explicitly address this issue, but is the route through the
* state table when someone issues an abort while in SHUTDOWN-SENT state.
*
* Outputs
* (timers)
*/
sctp_disposition_t sctp_sf_shutdown_sent_prm_abort(
struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
/* Stop the T2-shutdown timer. */
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN));
/* Stop the T5-shutdown guard timer. */
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD));
return sctp_sf_do_9_1_prm_abort(net, ep, asoc, type, arg, commands);
}
/*
* sctp_sf_cookie_echoed_prm_abort
*
* Inputs
* (endpoint, asoc)
*
* The RFC does not explcitly address this issue, but is the route through the
* state table when someone issues an abort while in COOKIE_ECHOED state.
*
* Outputs
* (timers)
*/
sctp_disposition_t sctp_sf_shutdown_ack_sent_prm_abort(
struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
/* The same T2 timer, so we should be able to use
* common function with the SHUTDOWN-SENT state.
*/
return sctp_sf_shutdown_sent_prm_abort(net, ep, asoc, type, arg, commands);
}
/*
* Process the REQUESTHEARTBEAT primitive
*
* 10.1 ULP-to-SCTP
* J) Request Heartbeat
*
* Format: REQUESTHEARTBEAT(association id, destination transport address)
*
* -> result
*
* Instructs the local endpoint to perform a HeartBeat on the specified
* destination transport address of the given association. The returned
* result should indicate whether the transmission of the HEARTBEAT
* chunk to the destination address is successful.
*
* Mandatory attributes:
*
* o association id - local handle to the SCTP association
*
* o destination transport address - the transport address of the
* association on which a heartbeat should be issued.
*/
sctp_disposition_t sctp_sf_do_prm_requestheartbeat(
struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
if (SCTP_DISPOSITION_NOMEM == sctp_sf_heartbeat(ep, asoc, type,
(struct sctp_transport *)arg, commands))
return SCTP_DISPOSITION_NOMEM;
/*
* RFC 2960 (bis), section 8.3
*
* D) Request an on-demand HEARTBEAT on a specific destination
* transport address of a given association.
*
* The endpoint should increment the respective error counter of
* the destination transport address each time a HEARTBEAT is sent
* to that address and not acknowledged within one RTO.
*
*/
sctp_add_cmd_sf(commands, SCTP_CMD_TRANSPORT_HB_SENT,
SCTP_TRANSPORT(arg));
return SCTP_DISPOSITION_CONSUME;
}
/*
* ADDIP Section 4.1 ASCONF Chunk Procedures
* When an endpoint has an ASCONF signaled change to be sent to the
* remote endpoint it should do A1 to A9
*/
sctp_disposition_t sctp_sf_do_prm_asconf(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
sctp_add_cmd_sf(commands, SCTP_CMD_SETUP_T4, SCTP_CHUNK(chunk));
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START,
SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(chunk));
return SCTP_DISPOSITION_CONSUME;
}
/*
* Ignore the primitive event
*
* The return value is the disposition of the primitive.
*/
sctp_disposition_t sctp_sf_ignore_primitive(
struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
pr_debug("%s: primitive type:%d is ignored\n", __func__,
type.primitive);
return SCTP_DISPOSITION_DISCARD;
}
/***************************************************************************
* These are the state functions for the OTHER events.
***************************************************************************/
/*
* When the SCTP stack has no more user data to send or retransmit, this
* notification is given to the user. Also, at the time when a user app
* subscribes to this event, if there is no data to be sent or
* retransmit, the stack will immediately send up this notification.
*/
sctp_disposition_t sctp_sf_do_no_pending_tsn(
struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_ulpevent *event;
event = sctp_ulpevent_make_sender_dry_event(asoc, GFP_ATOMIC);
if (!event)
return SCTP_DISPOSITION_NOMEM;
sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(event));
return SCTP_DISPOSITION_CONSUME;
}
/*
* Start the shutdown negotiation.
*
* From Section 9.2:
* Once all its outstanding data has been acknowledged, the endpoint
* shall send a SHUTDOWN chunk to its peer including in the Cumulative
* TSN Ack field the last sequential TSN it has received from the peer.
* It shall then start the T2-shutdown timer and enter the SHUTDOWN-SENT
* state. If the timer expires, the endpoint must re-send the SHUTDOWN
* with the updated last sequential TSN received from its peer.
*
* The return value is the disposition.
*/
sctp_disposition_t sctp_sf_do_9_2_start_shutdown(
struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *reply;
/* Once all its outstanding data has been acknowledged, the
* endpoint shall send a SHUTDOWN chunk to its peer including
* in the Cumulative TSN Ack field the last sequential TSN it
* has received from the peer.
*/
reply = sctp_make_shutdown(asoc, NULL);
if (!reply)
goto nomem;
/* Set the transport for the SHUTDOWN chunk and the timeout for the
* T2-shutdown timer.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_SETUP_T2, SCTP_CHUNK(reply));
/* It shall then start the T2-shutdown timer */
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START,
SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN));
/* RFC 4960 Section 9.2
* The sender of the SHUTDOWN MAY also start an overall guard timer
* 'T5-shutdown-guard' to bound the overall time for shutdown sequence.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD));
if (asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE])
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE));
/* and enter the SHUTDOWN-SENT state. */
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_SHUTDOWN_SENT));
/* sctp-implguide 2.10 Issues with Heartbeating and failover
*
* HEARTBEAT ... is discontinued after sending either SHUTDOWN
* or SHUTDOWN-ACK.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_STOP, SCTP_NULL());
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
return SCTP_DISPOSITION_CONSUME;
nomem:
return SCTP_DISPOSITION_NOMEM;
}
/*
* Generate a SHUTDOWN ACK now that everything is SACK'd.
*
* From Section 9.2:
*
* If it has no more outstanding DATA chunks, the SHUTDOWN receiver
* shall send a SHUTDOWN ACK and start a T2-shutdown timer of its own,
* entering the SHUTDOWN-ACK-SENT state. If the timer expires, the
* endpoint must re-send the SHUTDOWN ACK.
*
* The return value is the disposition.
*/
sctp_disposition_t sctp_sf_do_9_2_shutdown_ack(
struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = (struct sctp_chunk *) arg;
struct sctp_chunk *reply;
/* There are 2 ways of getting here:
* 1) called in response to a SHUTDOWN chunk
* 2) called when SCTP_EVENT_NO_PENDING_TSN event is issued.
*
* For the case (2), the arg parameter is set to NULL. We need
* to check that we have a chunk before accessing it's fields.
*/
if (chunk) {
if (!sctp_vtag_verify(chunk, asoc))
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
/* Make sure that the SHUTDOWN chunk has a valid length. */
if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_shutdown_chunk_t)))
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
}
/* If it has no more outstanding DATA chunks, the SHUTDOWN receiver
* shall send a SHUTDOWN ACK ...
*/
reply = sctp_make_shutdown_ack(asoc, chunk);
if (!reply)
goto nomem;
/* Set the transport for the SHUTDOWN ACK chunk and the timeout for
* the T2-shutdown timer.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_SETUP_T2, SCTP_CHUNK(reply));
/* and start/restart a T2-shutdown timer of its own, */
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN));
if (asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE])
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE));
/* Enter the SHUTDOWN-ACK-SENT state. */
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_SHUTDOWN_ACK_SENT));
/* sctp-implguide 2.10 Issues with Heartbeating and failover
*
* HEARTBEAT ... is discontinued after sending either SHUTDOWN
* or SHUTDOWN-ACK.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_STOP, SCTP_NULL());
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
return SCTP_DISPOSITION_CONSUME;
nomem:
return SCTP_DISPOSITION_NOMEM;
}
/*
* Ignore the event defined as other
*
* The return value is the disposition of the event.
*/
sctp_disposition_t sctp_sf_ignore_other(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
pr_debug("%s: the event other type:%d is ignored\n",
__func__, type.other);
return SCTP_DISPOSITION_DISCARD;
}
/************************************************************
* These are the state functions for handling timeout events.
************************************************************/
/*
* RTX Timeout
*
* Section: 6.3.3 Handle T3-rtx Expiration
*
* Whenever the retransmission timer T3-rtx expires for a destination
* address, do the following:
* [See below]
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t sctp_sf_do_6_3_3_rtx(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_transport *transport = arg;
SCTP_INC_STATS(net, SCTP_MIB_T3_RTX_EXPIREDS);
if (asoc->overall_error_count >= asoc->max_retrans) {
if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING) {
/*
* We are here likely because the receiver had its rwnd
* closed for a while and we have not been able to
* transmit the locally queued data within the maximum
* retransmission attempts limit. Start the T5
* shutdown guard timer to give the receiver one last
* chance and some additional time to recover before
* aborting.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START_ONCE,
SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD));
} else {
sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
SCTP_ERROR(ETIMEDOUT));
/* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_PERR(SCTP_ERROR_NO_ERROR));
SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
return SCTP_DISPOSITION_DELETE_TCB;
}
}
/* E1) For the destination address for which the timer
* expires, adjust its ssthresh with rules defined in Section
* 7.2.3 and set the cwnd <- MTU.
*/
/* E2) For the destination address for which the timer
* expires, set RTO <- RTO * 2 ("back off the timer"). The
* maximum value discussed in rule C7 above (RTO.max) may be
* used to provide an upper bound to this doubling operation.
*/
/* E3) Determine how many of the earliest (i.e., lowest TSN)
* outstanding DATA chunks for the address for which the
* T3-rtx has expired will fit into a single packet, subject
* to the MTU constraint for the path corresponding to the
* destination transport address to which the retransmission
* is being sent (this may be different from the address for
* which the timer expires [see Section 6.4]). Call this
* value K. Bundle and retransmit those K DATA chunks in a
* single packet to the destination endpoint.
*
* Note: Any DATA chunks that were sent to the address for
* which the T3-rtx timer expired but did not fit in one MTU
* (rule E3 above), should be marked for retransmission and
* sent as soon as cwnd allows (normally when a SACK arrives).
*/
/* Do some failure management (Section 8.2). */
sctp_add_cmd_sf(commands, SCTP_CMD_STRIKE, SCTP_TRANSPORT(transport));
/* NB: Rules E4 and F1 are implicit in R1. */
sctp_add_cmd_sf(commands, SCTP_CMD_RETRAN, SCTP_TRANSPORT(transport));
return SCTP_DISPOSITION_CONSUME;
}
/*
* Generate delayed SACK on timeout
*
* Section: 6.2 Acknowledgement on Reception of DATA Chunks
*
* The guidelines on delayed acknowledgement algorithm specified in
* Section 4.2 of [RFC2581] SHOULD be followed. Specifically, an
* acknowledgement SHOULD be generated for at least every second packet
* (not every second DATA chunk) received, and SHOULD be generated
* within 200 ms of the arrival of any unacknowledged DATA chunk. In
* some situations it may be beneficial for an SCTP transmitter to be
* more conservative than the algorithms detailed in this document
* allow. However, an SCTP transmitter MUST NOT be more aggressive than
* the following algorithms allow.
*/
sctp_disposition_t sctp_sf_do_6_2_sack(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
SCTP_INC_STATS(net, SCTP_MIB_DELAY_SACK_EXPIREDS);
sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, SCTP_FORCE());
return SCTP_DISPOSITION_CONSUME;
}
/*
* sctp_sf_t1_init_timer_expire
*
* Section: 4 Note: 2
* Verification Tag:
* Inputs
* (endpoint, asoc)
*
* RFC 2960 Section 4 Notes
* 2) If the T1-init timer expires, the endpoint MUST retransmit INIT
* and re-start the T1-init timer without changing state. This MUST
* be repeated up to 'Max.Init.Retransmits' times. After that, the
* endpoint MUST abort the initialization process and report the
* error to SCTP user.
*
* Outputs
* (timers, events)
*
*/
sctp_disposition_t sctp_sf_t1_init_timer_expire(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *repl = NULL;
struct sctp_bind_addr *bp;
int attempts = asoc->init_err_counter + 1;
pr_debug("%s: timer T1 expired (INIT)\n", __func__);
SCTP_INC_STATS(net, SCTP_MIB_T1_INIT_EXPIREDS);
if (attempts <= asoc->max_init_attempts) {
bp = (struct sctp_bind_addr *) &asoc->base.bind_addr;
repl = sctp_make_init(asoc, bp, GFP_ATOMIC, 0);
if (!repl)
return SCTP_DISPOSITION_NOMEM;
/* Choose transport for INIT. */
sctp_add_cmd_sf(commands, SCTP_CMD_INIT_CHOOSE_TRANSPORT,
SCTP_CHUNK(repl));
/* Issue a sideeffect to do the needed accounting. */
sctp_add_cmd_sf(commands, SCTP_CMD_INIT_RESTART,
SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
} else {
pr_debug("%s: giving up on INIT, attempts:%d "
"max_init_attempts:%d\n", __func__, attempts,
asoc->max_init_attempts);
sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
SCTP_ERROR(ETIMEDOUT));
sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
SCTP_PERR(SCTP_ERROR_NO_ERROR));
return SCTP_DISPOSITION_DELETE_TCB;
}
return SCTP_DISPOSITION_CONSUME;
}
/*
* sctp_sf_t1_cookie_timer_expire
*
* Section: 4 Note: 2
* Verification Tag:
* Inputs
* (endpoint, asoc)
*
* RFC 2960 Section 4 Notes
* 3) If the T1-cookie timer expires, the endpoint MUST retransmit
* COOKIE ECHO and re-start the T1-cookie timer without changing
* state. This MUST be repeated up to 'Max.Init.Retransmits' times.
* After that, the endpoint MUST abort the initialization process and
* report the error to SCTP user.
*
* Outputs
* (timers, events)
*
*/
sctp_disposition_t sctp_sf_t1_cookie_timer_expire(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *repl = NULL;
int attempts = asoc->init_err_counter + 1;
pr_debug("%s: timer T1 expired (COOKIE-ECHO)\n", __func__);
SCTP_INC_STATS(net, SCTP_MIB_T1_COOKIE_EXPIREDS);
if (attempts <= asoc->max_init_attempts) {
repl = sctp_make_cookie_echo(asoc, NULL);
if (!repl)
return SCTP_DISPOSITION_NOMEM;
sctp_add_cmd_sf(commands, SCTP_CMD_INIT_CHOOSE_TRANSPORT,
SCTP_CHUNK(repl));
/* Issue a sideeffect to do the needed accounting. */
sctp_add_cmd_sf(commands, SCTP_CMD_COOKIEECHO_RESTART,
SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE));
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
} else {
sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
SCTP_ERROR(ETIMEDOUT));
sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
SCTP_PERR(SCTP_ERROR_NO_ERROR));
return SCTP_DISPOSITION_DELETE_TCB;
}
return SCTP_DISPOSITION_CONSUME;
}
/* RFC2960 9.2 If the timer expires, the endpoint must re-send the SHUTDOWN
* with the updated last sequential TSN received from its peer.
*
* An endpoint should limit the number of retransmissions of the
* SHUTDOWN chunk to the protocol parameter 'Association.Max.Retrans'.
* If this threshold is exceeded the endpoint should destroy the TCB and
* MUST report the peer endpoint unreachable to the upper layer (and
* thus the association enters the CLOSED state). The reception of any
* packet from its peer (i.e. as the peer sends all of its queued DATA
* chunks) should clear the endpoint's retransmission count and restart
* the T2-Shutdown timer, giving its peer ample opportunity to transmit
* all of its queued DATA chunks that have not yet been sent.
*/
sctp_disposition_t sctp_sf_t2_timer_expire(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *reply = NULL;
pr_debug("%s: timer T2 expired\n", __func__);
SCTP_INC_STATS(net, SCTP_MIB_T2_SHUTDOWN_EXPIREDS);
((struct sctp_association *)asoc)->shutdown_retries++;
if (asoc->overall_error_count >= asoc->max_retrans) {
sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
SCTP_ERROR(ETIMEDOUT));
/* Note: CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_PERR(SCTP_ERROR_NO_ERROR));
SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
return SCTP_DISPOSITION_DELETE_TCB;
}
switch (asoc->state) {
case SCTP_STATE_SHUTDOWN_SENT:
reply = sctp_make_shutdown(asoc, NULL);
break;
case SCTP_STATE_SHUTDOWN_ACK_SENT:
reply = sctp_make_shutdown_ack(asoc, NULL);
break;
default:
BUG();
break;
}
if (!reply)
goto nomem;
/* Do some failure management (Section 8.2).
* If we remove the transport an SHUTDOWN was last sent to, don't
* do failure management.
*/
if (asoc->shutdown_last_sent_to)
sctp_add_cmd_sf(commands, SCTP_CMD_STRIKE,
SCTP_TRANSPORT(asoc->shutdown_last_sent_to));
/* Set the transport for the SHUTDOWN/ACK chunk and the timeout for
* the T2-shutdown timer.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_SETUP_T2, SCTP_CHUNK(reply));
/* Restart the T2-shutdown timer. */
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN));
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
return SCTP_DISPOSITION_CONSUME;
nomem:
return SCTP_DISPOSITION_NOMEM;
}
/*
* ADDIP Section 4.1 ASCONF CHunk Procedures
* If the T4 RTO timer expires the endpoint should do B1 to B5
*/
sctp_disposition_t sctp_sf_t4_timer_expire(
struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = asoc->addip_last_asconf;
struct sctp_transport *transport = chunk->transport;
SCTP_INC_STATS(net, SCTP_MIB_T4_RTO_EXPIREDS);
/* ADDIP 4.1 B1) Increment the error counters and perform path failure
* detection on the appropriate destination address as defined in
* RFC2960 [5] section 8.1 and 8.2.
*/
if (transport)
sctp_add_cmd_sf(commands, SCTP_CMD_STRIKE,
SCTP_TRANSPORT(transport));
/* Reconfig T4 timer and transport. */
sctp_add_cmd_sf(commands, SCTP_CMD_SETUP_T4, SCTP_CHUNK(chunk));
/* ADDIP 4.1 B2) Increment the association error counters and perform
* endpoint failure detection on the association as defined in
* RFC2960 [5] section 8.1 and 8.2.
* association error counter is incremented in SCTP_CMD_STRIKE.
*/
if (asoc->overall_error_count >= asoc->max_retrans) {
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
SCTP_ERROR(ETIMEDOUT));
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_PERR(SCTP_ERROR_NO_ERROR));
SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
return SCTP_DISPOSITION_ABORT;
}
/* ADDIP 4.1 B3) Back-off the destination address RTO value to which
* the ASCONF chunk was sent by doubling the RTO timer value.
* This is done in SCTP_CMD_STRIKE.
*/
/* ADDIP 4.1 B4) Re-transmit the ASCONF Chunk last sent and if possible
* choose an alternate destination address (please refer to RFC2960
* [5] section 6.4.1). An endpoint MUST NOT add new parameters to this
* chunk, it MUST be the same (including its serial number) as the last
* ASCONF sent.
*/
sctp_chunk_hold(asoc->addip_last_asconf);
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
SCTP_CHUNK(asoc->addip_last_asconf));
/* ADDIP 4.1 B5) Restart the T-4 RTO timer. Note that if a different
* destination is selected, then the RTO used will be that of the new
* destination address.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
return SCTP_DISPOSITION_CONSUME;
}
/* sctpimpguide-05 Section 2.12.2
* The sender of the SHUTDOWN MAY also start an overall guard timer
* 'T5-shutdown-guard' to bound the overall time for shutdown sequence.
* At the expiration of this timer the sender SHOULD abort the association
* by sending an ABORT chunk.
*/
sctp_disposition_t sctp_sf_t5_timer_expire(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *reply = NULL;
pr_debug("%s: timer T5 expired\n", __func__);
SCTP_INC_STATS(net, SCTP_MIB_T5_SHUTDOWN_GUARD_EXPIREDS);
reply = sctp_make_abort(asoc, NULL, 0);
if (!reply)
goto nomem;
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
SCTP_ERROR(ETIMEDOUT));
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_PERR(SCTP_ERROR_NO_ERROR));
SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
return SCTP_DISPOSITION_DELETE_TCB;
nomem:
return SCTP_DISPOSITION_NOMEM;
}
/* Handle expiration of AUTOCLOSE timer. When the autoclose timer expires,
* the association is automatically closed by starting the shutdown process.
* The work that needs to be done is same as when SHUTDOWN is initiated by
* the user. So this routine looks same as sctp_sf_do_9_2_prm_shutdown().
*/
sctp_disposition_t sctp_sf_autoclose_timer_expire(
struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
int disposition;
SCTP_INC_STATS(net, SCTP_MIB_AUTOCLOSE_EXPIREDS);
/* From 9.2 Shutdown of an Association
* Upon receipt of the SHUTDOWN primitive from its upper
* layer, the endpoint enters SHUTDOWN-PENDING state and
* remains there until all outstanding data has been
* acknowledged by its peer. The endpoint accepts no new data
* from its upper layer, but retransmits data to the far end
* if necessary to fill gaps.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_SHUTDOWN_PENDING));
disposition = SCTP_DISPOSITION_CONSUME;
if (sctp_outq_is_empty(&asoc->outqueue)) {
disposition = sctp_sf_do_9_2_start_shutdown(net, ep, asoc, type,
arg, commands);
}
return disposition;
}
/*****************************************************************************
* These are sa state functions which could apply to all types of events.
****************************************************************************/
/*
* This table entry is not implemented.
*
* Inputs
* (endpoint, asoc, chunk)
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t sctp_sf_not_impl(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
return SCTP_DISPOSITION_NOT_IMPL;
}
/*
* This table entry represents a bug.
*
* Inputs
* (endpoint, asoc, chunk)
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t sctp_sf_bug(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
return SCTP_DISPOSITION_BUG;
}
/*
* This table entry represents the firing of a timer in the wrong state.
* Since timer deletion cannot be guaranteed a timer 'may' end up firing
* when the association is in the wrong state. This event should
* be ignored, so as to prevent any rearming of the timer.
*
* Inputs
* (endpoint, asoc, chunk)
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t sctp_sf_timer_ignore(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
pr_debug("%s: timer %d ignored\n", __func__, type.chunk);
return SCTP_DISPOSITION_CONSUME;
}
/********************************************************************
* 2nd Level Abstractions
********************************************************************/
/* Pull the SACK chunk based on the SACK header. */
static struct sctp_sackhdr *sctp_sm_pull_sack(struct sctp_chunk *chunk)
{
struct sctp_sackhdr *sack;
unsigned int len;
__u16 num_blocks;
__u16 num_dup_tsns;
/* Protect ourselves from reading too far into
* the skb from a bogus sender.
*/
sack = (struct sctp_sackhdr *) chunk->skb->data;
num_blocks = ntohs(sack->num_gap_ack_blocks);
num_dup_tsns = ntohs(sack->num_dup_tsns);
len = sizeof(struct sctp_sackhdr);
len += (num_blocks + num_dup_tsns) * sizeof(__u32);
if (len > chunk->skb->len)
return NULL;
skb_pull(chunk->skb, len);
return sack;
}
/* Create an ABORT packet to be sent as a response, with the specified
* error causes.
*/
static struct sctp_packet *sctp_abort_pkt_new(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
struct sctp_chunk *chunk,
const void *payload,
size_t paylen)
{
struct sctp_packet *packet;
struct sctp_chunk *abort;
packet = sctp_ootb_pkt_new(net, asoc, chunk);
if (packet) {
/* Make an ABORT.
* The T bit will be set if the asoc is NULL.
*/
abort = sctp_make_abort(asoc, chunk, paylen);
if (!abort) {
sctp_ootb_pkt_free(packet);
return NULL;
}
/* Reflect vtag if T-Bit is set */
if (sctp_test_T_bit(abort))
packet->vtag = ntohl(chunk->sctp_hdr->vtag);
/* Add specified error causes, i.e., payload, to the
* end of the chunk.
*/
sctp_addto_chunk(abort, paylen, payload);
/* Set the skb to the belonging sock for accounting. */
abort->skb->sk = ep->base.sk;
sctp_packet_append_chunk(packet, abort);
}
return packet;
}
/* Allocate a packet for responding in the OOTB conditions. */
static struct sctp_packet *sctp_ootb_pkt_new(struct net *net,
const struct sctp_association *asoc,
const struct sctp_chunk *chunk)
{
struct sctp_packet *packet;
struct sctp_transport *transport;
__u16 sport;
__u16 dport;
__u32 vtag;
/* Get the source and destination port from the inbound packet. */
sport = ntohs(chunk->sctp_hdr->dest);
dport = ntohs(chunk->sctp_hdr->source);
/* The V-tag is going to be the same as the inbound packet if no
* association exists, otherwise, use the peer's vtag.
*/
if (asoc) {
/* Special case the INIT-ACK as there is no peer's vtag
* yet.
*/
switch (chunk->chunk_hdr->type) {
case SCTP_CID_INIT_ACK:
{
sctp_initack_chunk_t *initack;
initack = (sctp_initack_chunk_t *)chunk->chunk_hdr;
vtag = ntohl(initack->init_hdr.init_tag);
break;
}
default:
vtag = asoc->peer.i.init_tag;
break;
}
} else {
/* Special case the INIT and stale COOKIE_ECHO as there is no
* vtag yet.
*/
switch (chunk->chunk_hdr->type) {
case SCTP_CID_INIT:
{
sctp_init_chunk_t *init;
init = (sctp_init_chunk_t *)chunk->chunk_hdr;
vtag = ntohl(init->init_hdr.init_tag);
break;
}
default:
vtag = ntohl(chunk->sctp_hdr->vtag);
break;
}
}
/* Make a transport for the bucket, Eliza... */
transport = sctp_transport_new(net, sctp_source(chunk), GFP_ATOMIC);
if (!transport)
goto nomem;
/* Cache a route for the transport with the chunk's destination as
* the source address.
*/
sctp_transport_route(transport, (union sctp_addr *)&chunk->dest,
sctp_sk(net->sctp.ctl_sock));
packet = sctp_packet_init(&transport->packet, transport, sport, dport);
packet = sctp_packet_config(packet, vtag, 0);
return packet;
nomem:
return NULL;
}
/* Free the packet allocated earlier for responding in the OOTB condition. */
void sctp_ootb_pkt_free(struct sctp_packet *packet)
{
sctp_transport_free(packet->transport);
}
/* Send a stale cookie error when a invalid COOKIE ECHO chunk is found */
static void sctp_send_stale_cookie_err(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const struct sctp_chunk *chunk,
sctp_cmd_seq_t *commands,
struct sctp_chunk *err_chunk)
{
struct sctp_packet *packet;
if (err_chunk) {
packet = sctp_ootb_pkt_new(net, asoc, chunk);
if (packet) {
struct sctp_signed_cookie *cookie;
/* Override the OOTB vtag from the cookie. */
cookie = chunk->subh.cookie_hdr;
packet->vtag = cookie->c.peer_vtag;
/* Set the skb to the belonging sock for accounting. */
err_chunk->skb->sk = ep->base.sk;
sctp_packet_append_chunk(packet, err_chunk);
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
SCTP_PACKET(packet));
SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
} else
sctp_chunk_free (err_chunk);
}
}
/* Process a data chunk */
static int sctp_eat_data(const struct sctp_association *asoc,
struct sctp_chunk *chunk,
sctp_cmd_seq_t *commands)
{
sctp_datahdr_t *data_hdr;
struct sctp_chunk *err;
size_t datalen;
sctp_verb_t deliver;
int tmp;
__u32 tsn;
struct sctp_tsnmap *map = (struct sctp_tsnmap *)&asoc->peer.tsn_map;
struct sock *sk = asoc->base.sk;
struct net *net = sock_net(sk);
u16 ssn;
u16 sid;
u8 ordered = 0;
data_hdr = chunk->subh.data_hdr = (sctp_datahdr_t *)chunk->skb->data;
skb_pull(chunk->skb, sizeof(sctp_datahdr_t));
tsn = ntohl(data_hdr->tsn);
pr_debug("%s: TSN 0x%x\n", __func__, tsn);
/* ASSERT: Now skb->data is really the user data. */
/* Process ECN based congestion.
*
* Since the chunk structure is reused for all chunks within
* a packet, we use ecn_ce_done to track if we've already
* done CE processing for this packet.
*
* We need to do ECN processing even if we plan to discard the
* chunk later.
*/
if (!chunk->ecn_ce_done) {
struct sctp_af *af;
chunk->ecn_ce_done = 1;
af = sctp_get_af_specific(
ipver2af(ip_hdr(chunk->skb)->version));
if (af && af->is_ce(chunk->skb) && asoc->peer.ecn_capable) {
/* Do real work as sideffect. */
sctp_add_cmd_sf(commands, SCTP_CMD_ECN_CE,
SCTP_U32(tsn));
}
}
tmp = sctp_tsnmap_check(&asoc->peer.tsn_map, tsn);
if (tmp < 0) {
/* The TSN is too high--silently discard the chunk and
* count on it getting retransmitted later.
*/
if (chunk->asoc)
chunk->asoc->stats.outofseqtsns++;
return SCTP_IERROR_HIGH_TSN;
} else if (tmp > 0) {
/* This is a duplicate. Record it. */
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_DUP, SCTP_U32(tsn));
return SCTP_IERROR_DUP_TSN;
}
/* This is a new TSN. */
/* Discard if there is no room in the receive window.
* Actually, allow a little bit of overflow (up to a MTU).
*/
datalen = ntohs(chunk->chunk_hdr->length);
datalen -= sizeof(sctp_data_chunk_t);
deliver = SCTP_CMD_CHUNK_ULP;
/* Think about partial delivery. */
if ((datalen >= asoc->rwnd) && (!asoc->ulpq.pd_mode)) {
/* Even if we don't accept this chunk there is
* memory pressure.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_PART_DELIVER, SCTP_NULL());
}
/* Spill over rwnd a little bit. Note: While allowed, this spill over
* seems a bit troublesome in that frag_point varies based on
* PMTU. In cases, such as loopback, this might be a rather
* large spill over.
*/
if ((!chunk->data_accepted) && (!asoc->rwnd ||
(datalen > asoc->rwnd + asoc->frag_point))) {
/* If this is the next TSN, consider reneging to make
* room. Note: Playing nice with a confused sender. A
* malicious sender can still eat up all our buffer
* space and in the future we may want to detect and
* do more drastic reneging.
*/
if (sctp_tsnmap_has_gap(map) &&
(sctp_tsnmap_get_ctsn(map) + 1) == tsn) {
pr_debug("%s: reneging for tsn:%u\n", __func__, tsn);
deliver = SCTP_CMD_RENEGE;
} else {
pr_debug("%s: discard tsn:%u len:%zu, rwnd:%d\n",
__func__, tsn, datalen, asoc->rwnd);
return SCTP_IERROR_IGNORE_TSN;
}
}
/*
* Also try to renege to limit our memory usage in the event that
* we are under memory pressure
* If we can't renege, don't worry about it, the sk_rmem_schedule
* in sctp_ulpevent_make_rcvmsg will drop the frame if we grow our
* memory usage too much
*/
if (*sk->sk_prot_creator->memory_pressure) {
if (sctp_tsnmap_has_gap(map) &&
(sctp_tsnmap_get_ctsn(map) + 1) == tsn) {
pr_debug("%s: under pressure, reneging for tsn:%u\n",
__func__, tsn);
deliver = SCTP_CMD_RENEGE;
}
}
/*
* Section 3.3.10.9 No User Data (9)
*
* Cause of error
* ---------------
* No User Data: This error cause is returned to the originator of a
* DATA chunk if a received DATA chunk has no user data.
*/
if (unlikely(0 == datalen)) {
err = sctp_make_abort_no_data(asoc, chunk, tsn);
if (err) {
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
SCTP_CHUNK(err));
}
/* We are going to ABORT, so we might as well stop
* processing the rest of the chunks in the packet.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET, SCTP_NULL());
sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
SCTP_ERROR(ECONNABORTED));
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_PERR(SCTP_ERROR_NO_DATA));
SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
return SCTP_IERROR_NO_DATA;
}
chunk->data_accepted = 1;
/* Note: Some chunks may get overcounted (if we drop) or overcounted
* if we renege and the chunk arrives again.
*/
if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) {
SCTP_INC_STATS(net, SCTP_MIB_INUNORDERCHUNKS);
if (chunk->asoc)
chunk->asoc->stats.iuodchunks++;
} else {
SCTP_INC_STATS(net, SCTP_MIB_INORDERCHUNKS);
if (chunk->asoc)
chunk->asoc->stats.iodchunks++;
ordered = 1;
}
/* RFC 2960 6.5 Stream Identifier and Stream Sequence Number
*
* If an endpoint receive a DATA chunk with an invalid stream
* identifier, it shall acknowledge the reception of the DATA chunk
* following the normal procedure, immediately send an ERROR chunk
* with cause set to "Invalid Stream Identifier" (See Section 3.3.10)
* and discard the DATA chunk.
*/
sid = ntohs(data_hdr->stream);
if (sid >= asoc->c.sinit_max_instreams) {
/* Mark tsn as received even though we drop it */
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_TSN, SCTP_U32(tsn));
err = sctp_make_op_error(asoc, chunk, SCTP_ERROR_INV_STRM,
&data_hdr->stream,
sizeof(data_hdr->stream),
sizeof(u16));
if (err)
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
SCTP_CHUNK(err));
return SCTP_IERROR_BAD_STREAM;
}
/* Check to see if the SSN is possible for this TSN.
* The biggest gap we can record is 4K wide. Since SSNs wrap
* at an unsigned short, there is no way that an SSN can
* wrap and for a valid TSN. We can simply check if the current
* SSN is smaller then the next expected one. If it is, it wrapped
* and is invalid.
*/
ssn = ntohs(data_hdr->ssn);
if (ordered && SSN_lt(ssn, sctp_ssn_peek(&asoc->ssnmap->in, sid))) {
return SCTP_IERROR_PROTO_VIOLATION;
}
/* Send the data up to the user. Note: Schedule the
* SCTP_CMD_CHUNK_ULP cmd before the SCTP_CMD_GEN_SACK, as the SACK
* chunk needs the updated rwnd.
*/
sctp_add_cmd_sf(commands, deliver, SCTP_CHUNK(chunk));
return SCTP_IERROR_NO_ERROR;
}
| ./CrossVul/dataset_final_sorted/CWE-20/c/good_2027_0 |
crossvul-cpp_data_bad_5844_2 | /*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* RAW - implementation of IP "raw" sockets.
*
* Authors: Ross Biro
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
*
* Fixes:
* Alan Cox : verify_area() fixed up
* Alan Cox : ICMP error handling
* Alan Cox : EMSGSIZE if you send too big a packet
* Alan Cox : Now uses generic datagrams and shared
* skbuff library. No more peek crashes,
* no more backlogs
* Alan Cox : Checks sk->broadcast.
* Alan Cox : Uses skb_free_datagram/skb_copy_datagram
* Alan Cox : Raw passes ip options too
* Alan Cox : Setsocketopt added
* Alan Cox : Fixed error return for broadcasts
* Alan Cox : Removed wake_up calls
* Alan Cox : Use ttl/tos
* Alan Cox : Cleaned up old debugging
* Alan Cox : Use new kernel side addresses
* Arnt Gulbrandsen : Fixed MSG_DONTROUTE in raw sockets.
* Alan Cox : BSD style RAW socket demultiplexing.
* Alan Cox : Beginnings of mrouted support.
* Alan Cox : Added IP_HDRINCL option.
* Alan Cox : Skip broadcast check if BSDism set.
* David S. Miller : New socket lookup architecture.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/types.h>
#include <linux/atomic.h>
#include <asm/byteorder.h>
#include <asm/current.h>
#include <asm/uaccess.h>
#include <asm/ioctls.h>
#include <linux/stddef.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/aio.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/spinlock.h>
#include <linux/sockios.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/mroute.h>
#include <linux/netdevice.h>
#include <linux/in_route.h>
#include <linux/route.h>
#include <linux/skbuff.h>
#include <net/net_namespace.h>
#include <net/dst.h>
#include <net/sock.h>
#include <linux/ip.h>
#include <linux/net.h>
#include <net/ip.h>
#include <net/icmp.h>
#include <net/udp.h>
#include <net/raw.h>
#include <net/snmp.h>
#include <net/tcp_states.h>
#include <net/inet_common.h>
#include <net/checksum.h>
#include <net/xfrm.h>
#include <linux/rtnetlink.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/netfilter.h>
#include <linux/netfilter_ipv4.h>
#include <linux/compat.h>
static struct raw_hashinfo raw_v4_hashinfo = {
.lock = __RW_LOCK_UNLOCKED(raw_v4_hashinfo.lock),
};
void raw_hash_sk(struct sock *sk)
{
struct raw_hashinfo *h = sk->sk_prot->h.raw_hash;
struct hlist_head *head;
head = &h->ht[inet_sk(sk)->inet_num & (RAW_HTABLE_SIZE - 1)];
write_lock_bh(&h->lock);
sk_add_node(sk, head);
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
write_unlock_bh(&h->lock);
}
EXPORT_SYMBOL_GPL(raw_hash_sk);
void raw_unhash_sk(struct sock *sk)
{
struct raw_hashinfo *h = sk->sk_prot->h.raw_hash;
write_lock_bh(&h->lock);
if (sk_del_node_init(sk))
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
write_unlock_bh(&h->lock);
}
EXPORT_SYMBOL_GPL(raw_unhash_sk);
static struct sock *__raw_v4_lookup(struct net *net, struct sock *sk,
unsigned short num, __be32 raddr, __be32 laddr, int dif)
{
sk_for_each_from(sk) {
struct inet_sock *inet = inet_sk(sk);
if (net_eq(sock_net(sk), net) && inet->inet_num == num &&
!(inet->inet_daddr && inet->inet_daddr != raddr) &&
!(inet->inet_rcv_saddr && inet->inet_rcv_saddr != laddr) &&
!(sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif))
goto found; /* gotcha */
}
sk = NULL;
found:
return sk;
}
/*
* 0 - deliver
* 1 - block
*/
static int icmp_filter(const struct sock *sk, const struct sk_buff *skb)
{
struct icmphdr _hdr;
const struct icmphdr *hdr;
hdr = skb_header_pointer(skb, skb_transport_offset(skb),
sizeof(_hdr), &_hdr);
if (!hdr)
return 1;
if (hdr->type < 32) {
__u32 data = raw_sk(sk)->filter.data;
return ((1U << hdr->type) & data) != 0;
}
/* Do not block unknown ICMP types */
return 0;
}
/* IP input processing comes here for RAW socket delivery.
* Caller owns SKB, so we must make clones.
*
* RFC 1122: SHOULD pass TOS value up to the transport layer.
* -> It does. And not only TOS, but all IP header.
*/
static int raw_v4_input(struct sk_buff *skb, const struct iphdr *iph, int hash)
{
struct sock *sk;
struct hlist_head *head;
int delivered = 0;
struct net *net;
read_lock(&raw_v4_hashinfo.lock);
head = &raw_v4_hashinfo.ht[hash];
if (hlist_empty(head))
goto out;
net = dev_net(skb->dev);
sk = __raw_v4_lookup(net, __sk_head(head), iph->protocol,
iph->saddr, iph->daddr,
skb->dev->ifindex);
while (sk) {
delivered = 1;
if (iph->protocol != IPPROTO_ICMP || !icmp_filter(sk, skb)) {
struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC);
/* Not releasing hash table! */
if (clone)
raw_rcv(sk, clone);
}
sk = __raw_v4_lookup(net, sk_next(sk), iph->protocol,
iph->saddr, iph->daddr,
skb->dev->ifindex);
}
out:
read_unlock(&raw_v4_hashinfo.lock);
return delivered;
}
int raw_local_deliver(struct sk_buff *skb, int protocol)
{
int hash;
struct sock *raw_sk;
hash = protocol & (RAW_HTABLE_SIZE - 1);
raw_sk = sk_head(&raw_v4_hashinfo.ht[hash]);
/* If there maybe a raw socket we must check - if not we
* don't care less
*/
if (raw_sk && !raw_v4_input(skb, ip_hdr(skb), hash))
raw_sk = NULL;
return raw_sk != NULL;
}
static void raw_err(struct sock *sk, struct sk_buff *skb, u32 info)
{
struct inet_sock *inet = inet_sk(sk);
const int type = icmp_hdr(skb)->type;
const int code = icmp_hdr(skb)->code;
int err = 0;
int harderr = 0;
if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED)
ipv4_sk_update_pmtu(skb, sk, info);
else if (type == ICMP_REDIRECT) {
ipv4_sk_redirect(skb, sk);
return;
}
/* Report error on raw socket, if:
1. User requested ip_recverr.
2. Socket is connected (otherwise the error indication
is useless without ip_recverr and error is hard.
*/
if (!inet->recverr && sk->sk_state != TCP_ESTABLISHED)
return;
switch (type) {
default:
case ICMP_TIME_EXCEEDED:
err = EHOSTUNREACH;
break;
case ICMP_SOURCE_QUENCH:
return;
case ICMP_PARAMETERPROB:
err = EPROTO;
harderr = 1;
break;
case ICMP_DEST_UNREACH:
err = EHOSTUNREACH;
if (code > NR_ICMP_UNREACH)
break;
err = icmp_err_convert[code].errno;
harderr = icmp_err_convert[code].fatal;
if (code == ICMP_FRAG_NEEDED) {
harderr = inet->pmtudisc != IP_PMTUDISC_DONT;
err = EMSGSIZE;
}
}
if (inet->recverr) {
const struct iphdr *iph = (const struct iphdr *)skb->data;
u8 *payload = skb->data + (iph->ihl << 2);
if (inet->hdrincl)
payload = skb->data;
ip_icmp_error(sk, skb, err, 0, info, payload);
}
if (inet->recverr || harderr) {
sk->sk_err = err;
sk->sk_error_report(sk);
}
}
void raw_icmp_error(struct sk_buff *skb, int protocol, u32 info)
{
int hash;
struct sock *raw_sk;
const struct iphdr *iph;
struct net *net;
hash = protocol & (RAW_HTABLE_SIZE - 1);
read_lock(&raw_v4_hashinfo.lock);
raw_sk = sk_head(&raw_v4_hashinfo.ht[hash]);
if (raw_sk != NULL) {
iph = (const struct iphdr *)skb->data;
net = dev_net(skb->dev);
while ((raw_sk = __raw_v4_lookup(net, raw_sk, protocol,
iph->daddr, iph->saddr,
skb->dev->ifindex)) != NULL) {
raw_err(raw_sk, skb, info);
raw_sk = sk_next(raw_sk);
iph = (const struct iphdr *)skb->data;
}
}
read_unlock(&raw_v4_hashinfo.lock);
}
static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
/* Charge it to the socket. */
ipv4_pktinfo_prepare(sk, skb);
if (sock_queue_rcv_skb(sk, skb) < 0) {
kfree_skb(skb);
return NET_RX_DROP;
}
return NET_RX_SUCCESS;
}
int raw_rcv(struct sock *sk, struct sk_buff *skb)
{
if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
atomic_inc(&sk->sk_drops);
kfree_skb(skb);
return NET_RX_DROP;
}
nf_reset(skb);
skb_push(skb, skb->data - skb_network_header(skb));
raw_rcv_skb(sk, skb);
return 0;
}
static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4,
void *from, size_t length,
struct rtable **rtp,
unsigned int flags)
{
struct inet_sock *inet = inet_sk(sk);
struct net *net = sock_net(sk);
struct iphdr *iph;
struct sk_buff *skb;
unsigned int iphlen;
int err;
struct rtable *rt = *rtp;
int hlen, tlen;
if (length > rt->dst.dev->mtu) {
ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
rt->dst.dev->mtu);
return -EMSGSIZE;
}
if (flags&MSG_PROBE)
goto out;
hlen = LL_RESERVED_SPACE(rt->dst.dev);
tlen = rt->dst.dev->needed_tailroom;
skb = sock_alloc_send_skb(sk,
length + hlen + tlen + 15,
flags & MSG_DONTWAIT, &err);
if (skb == NULL)
goto error;
skb_reserve(skb, hlen);
skb->priority = sk->sk_priority;
skb->mark = sk->sk_mark;
skb_dst_set(skb, &rt->dst);
*rtp = NULL;
skb_reset_network_header(skb);
iph = ip_hdr(skb);
skb_put(skb, length);
skb->ip_summed = CHECKSUM_NONE;
skb->transport_header = skb->network_header;
err = -EFAULT;
if (memcpy_fromiovecend((void *)iph, from, 0, length))
goto error_free;
iphlen = iph->ihl * 4;
/*
* We don't want to modify the ip header, but we do need to
* be sure that it won't cause problems later along the network
* stack. Specifically we want to make sure that iph->ihl is a
* sane value. If ihl points beyond the length of the buffer passed
* in, reject the frame as invalid
*/
err = -EINVAL;
if (iphlen > length)
goto error_free;
if (iphlen >= sizeof(*iph)) {
if (!iph->saddr)
iph->saddr = fl4->saddr;
iph->check = 0;
iph->tot_len = htons(length);
if (!iph->id)
ip_select_ident(skb, &rt->dst, NULL);
iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
}
if (iph->protocol == IPPROTO_ICMP)
icmp_out_count(net, ((struct icmphdr *)
skb_transport_header(skb))->type);
err = NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_OUT, skb, NULL,
rt->dst.dev, dst_output);
if (err > 0)
err = net_xmit_errno(err);
if (err)
goto error;
out:
return 0;
error_free:
kfree_skb(skb);
error:
IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS);
if (err == -ENOBUFS && !inet->recverr)
err = 0;
return err;
}
static int raw_probe_proto_opt(struct flowi4 *fl4, struct msghdr *msg)
{
struct iovec *iov;
u8 __user *type = NULL;
u8 __user *code = NULL;
int probed = 0;
unsigned int i;
if (!msg->msg_iov)
return 0;
for (i = 0; i < msg->msg_iovlen; i++) {
iov = &msg->msg_iov[i];
if (!iov)
continue;
switch (fl4->flowi4_proto) {
case IPPROTO_ICMP:
/* check if one-byte field is readable or not. */
if (iov->iov_base && iov->iov_len < 1)
break;
if (!type) {
type = iov->iov_base;
/* check if code field is readable or not. */
if (iov->iov_len > 1)
code = type + 1;
} else if (!code)
code = iov->iov_base;
if (type && code) {
if (get_user(fl4->fl4_icmp_type, type) ||
get_user(fl4->fl4_icmp_code, code))
return -EFAULT;
probed = 1;
}
break;
default:
probed = 1;
break;
}
if (probed)
break;
}
return 0;
}
static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
size_t len)
{
struct inet_sock *inet = inet_sk(sk);
struct ipcm_cookie ipc;
struct rtable *rt = NULL;
struct flowi4 fl4;
int free = 0;
__be32 daddr;
__be32 saddr;
u8 tos;
int err;
struct ip_options_data opt_copy;
err = -EMSGSIZE;
if (len > 0xFFFF)
goto out;
/*
* Check the flags.
*/
err = -EOPNOTSUPP;
if (msg->msg_flags & MSG_OOB) /* Mirror BSD error message */
goto out; /* compatibility */
/*
* Get and verify the address.
*/
if (msg->msg_namelen) {
struct sockaddr_in *usin = (struct sockaddr_in *)msg->msg_name;
err = -EINVAL;
if (msg->msg_namelen < sizeof(*usin))
goto out;
if (usin->sin_family != AF_INET) {
pr_info_once("%s: %s forgot to set AF_INET. Fix it!\n",
__func__, current->comm);
err = -EAFNOSUPPORT;
if (usin->sin_family)
goto out;
}
daddr = usin->sin_addr.s_addr;
/* ANK: I did not forget to get protocol from port field.
* I just do not know, who uses this weirdness.
* IP_HDRINCL is much more convenient.
*/
} else {
err = -EDESTADDRREQ;
if (sk->sk_state != TCP_ESTABLISHED)
goto out;
daddr = inet->inet_daddr;
}
ipc.addr = inet->inet_saddr;
ipc.opt = NULL;
ipc.tx_flags = 0;
ipc.ttl = 0;
ipc.tos = -1;
ipc.oif = sk->sk_bound_dev_if;
if (msg->msg_controllen) {
err = ip_cmsg_send(sock_net(sk), msg, &ipc);
if (err)
goto out;
if (ipc.opt)
free = 1;
}
saddr = ipc.addr;
ipc.addr = daddr;
if (!ipc.opt) {
struct ip_options_rcu *inet_opt;
rcu_read_lock();
inet_opt = rcu_dereference(inet->inet_opt);
if (inet_opt) {
memcpy(&opt_copy, inet_opt,
sizeof(*inet_opt) + inet_opt->opt.optlen);
ipc.opt = &opt_copy.opt;
}
rcu_read_unlock();
}
if (ipc.opt) {
err = -EINVAL;
/* Linux does not mangle headers on raw sockets,
* so that IP options + IP_HDRINCL is non-sense.
*/
if (inet->hdrincl)
goto done;
if (ipc.opt->opt.srr) {
if (!daddr)
goto done;
daddr = ipc.opt->opt.faddr;
}
}
tos = get_rtconn_flags(&ipc, sk);
if (msg->msg_flags & MSG_DONTROUTE)
tos |= RTO_ONLINK;
if (ipv4_is_multicast(daddr)) {
if (!ipc.oif)
ipc.oif = inet->mc_index;
if (!saddr)
saddr = inet->mc_addr;
} else if (!ipc.oif)
ipc.oif = inet->uc_index;
flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos,
RT_SCOPE_UNIVERSE,
inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
inet_sk_flowi_flags(sk) | FLOWI_FLAG_CAN_SLEEP |
(inet->hdrincl ? FLOWI_FLAG_KNOWN_NH : 0),
daddr, saddr, 0, 0);
if (!inet->hdrincl) {
err = raw_probe_proto_opt(&fl4, msg);
if (err)
goto done;
}
security_sk_classify_flow(sk, flowi4_to_flowi(&fl4));
rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
if (IS_ERR(rt)) {
err = PTR_ERR(rt);
rt = NULL;
goto done;
}
err = -EACCES;
if (rt->rt_flags & RTCF_BROADCAST && !sock_flag(sk, SOCK_BROADCAST))
goto done;
if (msg->msg_flags & MSG_CONFIRM)
goto do_confirm;
back_from_confirm:
if (inet->hdrincl)
err = raw_send_hdrinc(sk, &fl4, msg->msg_iov, len,
&rt, msg->msg_flags);
else {
if (!ipc.addr)
ipc.addr = fl4.daddr;
lock_sock(sk);
err = ip_append_data(sk, &fl4, ip_generic_getfrag,
msg->msg_iov, len, 0,
&ipc, &rt, msg->msg_flags);
if (err)
ip_flush_pending_frames(sk);
else if (!(msg->msg_flags & MSG_MORE)) {
err = ip_push_pending_frames(sk, &fl4);
if (err == -ENOBUFS && !inet->recverr)
err = 0;
}
release_sock(sk);
}
done:
if (free)
kfree(ipc.opt);
ip_rt_put(rt);
out:
if (err < 0)
return err;
return len;
do_confirm:
dst_confirm(&rt->dst);
if (!(msg->msg_flags & MSG_PROBE) || len)
goto back_from_confirm;
err = 0;
goto done;
}
static void raw_close(struct sock *sk, long timeout)
{
/*
* Raw sockets may have direct kernel references. Kill them.
*/
ip_ra_control(sk, 0, NULL);
sk_common_release(sk);
}
static void raw_destroy(struct sock *sk)
{
lock_sock(sk);
ip_flush_pending_frames(sk);
release_sock(sk);
}
/* This gets rid of all the nasties in af_inet. -DaveM */
static int raw_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{
struct inet_sock *inet = inet_sk(sk);
struct sockaddr_in *addr = (struct sockaddr_in *) uaddr;
int ret = -EINVAL;
int chk_addr_ret;
if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_in))
goto out;
chk_addr_ret = inet_addr_type(sock_net(sk), addr->sin_addr.s_addr);
ret = -EADDRNOTAVAIL;
if (addr->sin_addr.s_addr && chk_addr_ret != RTN_LOCAL &&
chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST)
goto out;
inet->inet_rcv_saddr = inet->inet_saddr = addr->sin_addr.s_addr;
if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST)
inet->inet_saddr = 0; /* Use device */
sk_dst_reset(sk);
ret = 0;
out: return ret;
}
/*
* This should be easy, if there is something there
* we return it, otherwise we block.
*/
static int raw_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
size_t len, int noblock, int flags, int *addr_len)
{
struct inet_sock *inet = inet_sk(sk);
size_t copied = 0;
int err = -EOPNOTSUPP;
struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;
struct sk_buff *skb;
if (flags & MSG_OOB)
goto out;
if (addr_len)
*addr_len = sizeof(*sin);
if (flags & MSG_ERRQUEUE) {
err = ip_recv_error(sk, msg, len);
goto out;
}
skb = skb_recv_datagram(sk, flags, noblock, &err);
if (!skb)
goto out;
copied = skb->len;
if (len < copied) {
msg->msg_flags |= MSG_TRUNC;
copied = len;
}
err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
if (err)
goto done;
sock_recv_ts_and_drops(msg, sk, skb);
/* Copy the address. */
if (sin) {
sin->sin_family = AF_INET;
sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
sin->sin_port = 0;
memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
}
if (inet->cmsg_flags)
ip_cmsg_recv(msg, skb);
if (flags & MSG_TRUNC)
copied = skb->len;
done:
skb_free_datagram(sk, skb);
out:
if (err)
return err;
return copied;
}
static int raw_init(struct sock *sk)
{
struct raw_sock *rp = raw_sk(sk);
if (inet_sk(sk)->inet_num == IPPROTO_ICMP)
memset(&rp->filter, 0, sizeof(rp->filter));
return 0;
}
static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
{
if (optlen > sizeof(struct icmp_filter))
optlen = sizeof(struct icmp_filter);
if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
return -EFAULT;
return 0;
}
static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
{
int len, ret = -EFAULT;
if (get_user(len, optlen))
goto out;
ret = -EINVAL;
if (len < 0)
goto out;
if (len > sizeof(struct icmp_filter))
len = sizeof(struct icmp_filter);
ret = -EFAULT;
if (put_user(len, optlen) ||
copy_to_user(optval, &raw_sk(sk)->filter, len))
goto out;
ret = 0;
out: return ret;
}
static int do_raw_setsockopt(struct sock *sk, int level, int optname,
char __user *optval, unsigned int optlen)
{
if (optname == ICMP_FILTER) {
if (inet_sk(sk)->inet_num != IPPROTO_ICMP)
return -EOPNOTSUPP;
else
return raw_seticmpfilter(sk, optval, optlen);
}
return -ENOPROTOOPT;
}
static int raw_setsockopt(struct sock *sk, int level, int optname,
char __user *optval, unsigned int optlen)
{
if (level != SOL_RAW)
return ip_setsockopt(sk, level, optname, optval, optlen);
return do_raw_setsockopt(sk, level, optname, optval, optlen);
}
#ifdef CONFIG_COMPAT
static int compat_raw_setsockopt(struct sock *sk, int level, int optname,
char __user *optval, unsigned int optlen)
{
if (level != SOL_RAW)
return compat_ip_setsockopt(sk, level, optname, optval, optlen);
return do_raw_setsockopt(sk, level, optname, optval, optlen);
}
#endif
static int do_raw_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
if (optname == ICMP_FILTER) {
if (inet_sk(sk)->inet_num != IPPROTO_ICMP)
return -EOPNOTSUPP;
else
return raw_geticmpfilter(sk, optval, optlen);
}
return -ENOPROTOOPT;
}
static int raw_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
if (level != SOL_RAW)
return ip_getsockopt(sk, level, optname, optval, optlen);
return do_raw_getsockopt(sk, level, optname, optval, optlen);
}
#ifdef CONFIG_COMPAT
static int compat_raw_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
if (level != SOL_RAW)
return compat_ip_getsockopt(sk, level, optname, optval, optlen);
return do_raw_getsockopt(sk, level, optname, optval, optlen);
}
#endif
static int raw_ioctl(struct sock *sk, int cmd, unsigned long arg)
{
switch (cmd) {
case SIOCOUTQ: {
int amount = sk_wmem_alloc_get(sk);
return put_user(amount, (int __user *)arg);
}
case SIOCINQ: {
struct sk_buff *skb;
int amount = 0;
spin_lock_bh(&sk->sk_receive_queue.lock);
skb = skb_peek(&sk->sk_receive_queue);
if (skb != NULL)
amount = skb->len;
spin_unlock_bh(&sk->sk_receive_queue.lock);
return put_user(amount, (int __user *)arg);
}
default:
#ifdef CONFIG_IP_MROUTE
return ipmr_ioctl(sk, cmd, (void __user *)arg);
#else
return -ENOIOCTLCMD;
#endif
}
}
#ifdef CONFIG_COMPAT
static int compat_raw_ioctl(struct sock *sk, unsigned int cmd, unsigned long arg)
{
switch (cmd) {
case SIOCOUTQ:
case SIOCINQ:
return -ENOIOCTLCMD;
default:
#ifdef CONFIG_IP_MROUTE
return ipmr_compat_ioctl(sk, cmd, compat_ptr(arg));
#else
return -ENOIOCTLCMD;
#endif
}
}
#endif
struct proto raw_prot = {
.name = "RAW",
.owner = THIS_MODULE,
.close = raw_close,
.destroy = raw_destroy,
.connect = ip4_datagram_connect,
.disconnect = udp_disconnect,
.ioctl = raw_ioctl,
.init = raw_init,
.setsockopt = raw_setsockopt,
.getsockopt = raw_getsockopt,
.sendmsg = raw_sendmsg,
.recvmsg = raw_recvmsg,
.bind = raw_bind,
.backlog_rcv = raw_rcv_skb,
.release_cb = ip4_datagram_release_cb,
.hash = raw_hash_sk,
.unhash = raw_unhash_sk,
.obj_size = sizeof(struct raw_sock),
.h.raw_hash = &raw_v4_hashinfo,
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_raw_setsockopt,
.compat_getsockopt = compat_raw_getsockopt,
.compat_ioctl = compat_raw_ioctl,
#endif
};
#ifdef CONFIG_PROC_FS
static struct sock *raw_get_first(struct seq_file *seq)
{
struct sock *sk;
struct raw_iter_state *state = raw_seq_private(seq);
for (state->bucket = 0; state->bucket < RAW_HTABLE_SIZE;
++state->bucket) {
sk_for_each(sk, &state->h->ht[state->bucket])
if (sock_net(sk) == seq_file_net(seq))
goto found;
}
sk = NULL;
found:
return sk;
}
static struct sock *raw_get_next(struct seq_file *seq, struct sock *sk)
{
struct raw_iter_state *state = raw_seq_private(seq);
do {
sk = sk_next(sk);
try_again:
;
} while (sk && sock_net(sk) != seq_file_net(seq));
if (!sk && ++state->bucket < RAW_HTABLE_SIZE) {
sk = sk_head(&state->h->ht[state->bucket]);
goto try_again;
}
return sk;
}
static struct sock *raw_get_idx(struct seq_file *seq, loff_t pos)
{
struct sock *sk = raw_get_first(seq);
if (sk)
while (pos && (sk = raw_get_next(seq, sk)) != NULL)
--pos;
return pos ? NULL : sk;
}
void *raw_seq_start(struct seq_file *seq, loff_t *pos)
{
struct raw_iter_state *state = raw_seq_private(seq);
read_lock(&state->h->lock);
return *pos ? raw_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
}
EXPORT_SYMBOL_GPL(raw_seq_start);
void *raw_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct sock *sk;
if (v == SEQ_START_TOKEN)
sk = raw_get_first(seq);
else
sk = raw_get_next(seq, v);
++*pos;
return sk;
}
EXPORT_SYMBOL_GPL(raw_seq_next);
void raw_seq_stop(struct seq_file *seq, void *v)
{
struct raw_iter_state *state = raw_seq_private(seq);
read_unlock(&state->h->lock);
}
EXPORT_SYMBOL_GPL(raw_seq_stop);
static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
{
struct inet_sock *inet = inet_sk(sp);
__be32 dest = inet->inet_daddr,
src = inet->inet_rcv_saddr;
__u16 destp = 0,
srcp = inet->inet_num;
seq_printf(seq, "%4d: %08X:%04X %08X:%04X"
" %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d\n",
i, src, srcp, dest, destp, sp->sk_state,
sk_wmem_alloc_get(sp),
sk_rmem_alloc_get(sp),
0, 0L, 0,
from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
0, sock_i_ino(sp),
atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
}
static int raw_seq_show(struct seq_file *seq, void *v)
{
if (v == SEQ_START_TOKEN)
seq_printf(seq, " sl local_address rem_address st tx_queue "
"rx_queue tr tm->when retrnsmt uid timeout "
"inode ref pointer drops\n");
else
raw_sock_seq_show(seq, v, raw_seq_private(seq)->bucket);
return 0;
}
static const struct seq_operations raw_seq_ops = {
.start = raw_seq_start,
.next = raw_seq_next,
.stop = raw_seq_stop,
.show = raw_seq_show,
};
int raw_seq_open(struct inode *ino, struct file *file,
struct raw_hashinfo *h, const struct seq_operations *ops)
{
int err;
struct raw_iter_state *i;
err = seq_open_net(ino, file, ops, sizeof(struct raw_iter_state));
if (err < 0)
return err;
i = raw_seq_private((struct seq_file *)file->private_data);
i->h = h;
return 0;
}
EXPORT_SYMBOL_GPL(raw_seq_open);
static int raw_v4_seq_open(struct inode *inode, struct file *file)
{
return raw_seq_open(inode, file, &raw_v4_hashinfo, &raw_seq_ops);
}
static const struct file_operations raw_seq_fops = {
.owner = THIS_MODULE,
.open = raw_v4_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_net,
};
static __net_init int raw_init_net(struct net *net)
{
if (!proc_create("raw", S_IRUGO, net->proc_net, &raw_seq_fops))
return -ENOMEM;
return 0;
}
static __net_exit void raw_exit_net(struct net *net)
{
remove_proc_entry("raw", net->proc_net);
}
static __net_initdata struct pernet_operations raw_net_ops = {
.init = raw_init_net,
.exit = raw_exit_net,
};
int __init raw_proc_init(void)
{
return register_pernet_subsys(&raw_net_ops);
}
void __init raw_proc_exit(void)
{
unregister_pernet_subsys(&raw_net_ops);
}
#endif /* CONFIG_PROC_FS */
| ./CrossVul/dataset_final_sorted/CWE-20/c/bad_5844_2 |
crossvul-cpp_data_good_2441_0 | /* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil -*- */
#include "k5-int.h"
#include <kadm5/admin.h>
#include <syslog.h>
#include <adm_proto.h> /* krb5_klog_syslog */
#include <stdio.h>
#include <errno.h>
#include "kadm5/server_internal.h" /* XXX for kadm5_server_handle_t */
#include "misc.h"
#ifndef GETSOCKNAME_ARG3_TYPE
#define GETSOCKNAME_ARG3_TYPE int
#endif
#define RFC3244_VERSION 0xff80
static krb5_error_code
process_chpw_request(krb5_context context, void *server_handle, char *realm,
krb5_keytab keytab, const krb5_fulladdr *local_faddr,
const krb5_fulladdr *remote_faddr, krb5_data *req,
krb5_data *rep)
{
krb5_error_code ret;
char *ptr;
unsigned int plen, vno;
krb5_data ap_req, ap_rep = empty_data();
krb5_data cipher = empty_data(), clear = empty_data();
krb5_auth_context auth_context = NULL;
krb5_principal changepw = NULL;
krb5_principal client, target = NULL;
krb5_ticket *ticket = NULL;
krb5_replay_data replay;
krb5_error krberror;
int numresult;
char strresult[1024];
char *clientstr = NULL, *targetstr = NULL;
const char *errmsg = NULL;
size_t clen;
char *cdots;
struct sockaddr_storage ss;
socklen_t salen;
char addrbuf[100];
krb5_address *addr = remote_faddr->address;
*rep = empty_data();
if (req->length < 4) {
/* either this, or the server is printing bad messages,
or the caller passed in garbage */
ret = KRB5KRB_AP_ERR_MODIFIED;
numresult = KRB5_KPASSWD_MALFORMED;
strlcpy(strresult, "Request was truncated", sizeof(strresult));
goto bailout;
}
ptr = req->data;
/* verify length */
plen = (*ptr++ & 0xff);
plen = (plen<<8) | (*ptr++ & 0xff);
if (plen != req->length) {
ret = KRB5KRB_AP_ERR_MODIFIED;
numresult = KRB5_KPASSWD_MALFORMED;
strlcpy(strresult, "Request length was inconsistent",
sizeof(strresult));
goto bailout;
}
/* verify version number */
vno = (*ptr++ & 0xff) ;
vno = (vno<<8) | (*ptr++ & 0xff);
if (vno != 1 && vno != RFC3244_VERSION) {
ret = KRB5KDC_ERR_BAD_PVNO;
numresult = KRB5_KPASSWD_BAD_VERSION;
snprintf(strresult, sizeof(strresult),
"Request contained unknown protocol version number %d", vno);
goto bailout;
}
/* read, check ap-req length */
ap_req.length = (*ptr++ & 0xff);
ap_req.length = (ap_req.length<<8) | (*ptr++ & 0xff);
if (ptr + ap_req.length >= req->data + req->length) {
ret = KRB5KRB_AP_ERR_MODIFIED;
numresult = KRB5_KPASSWD_MALFORMED;
strlcpy(strresult, "Request was truncated in AP-REQ",
sizeof(strresult));
goto bailout;
}
/* verify ap_req */
ap_req.data = ptr;
ptr += ap_req.length;
ret = krb5_auth_con_init(context, &auth_context);
if (ret) {
numresult = KRB5_KPASSWD_HARDERROR;
strlcpy(strresult, "Failed initializing auth context",
sizeof(strresult));
goto chpwfail;
}
ret = krb5_auth_con_setflags(context, auth_context,
KRB5_AUTH_CONTEXT_DO_SEQUENCE);
if (ret) {
numresult = KRB5_KPASSWD_HARDERROR;
strlcpy(strresult, "Failed initializing auth context",
sizeof(strresult));
goto chpwfail;
}
ret = krb5_build_principal(context, &changepw, strlen(realm), realm,
"kadmin", "changepw", NULL);
if (ret) {
numresult = KRB5_KPASSWD_HARDERROR;
strlcpy(strresult, "Failed building kadmin/changepw principal",
sizeof(strresult));
goto chpwfail;
}
ret = krb5_rd_req(context, &auth_context, &ap_req, changepw, keytab,
NULL, &ticket);
if (ret) {
numresult = KRB5_KPASSWD_AUTHERROR;
strlcpy(strresult, "Failed reading application request",
sizeof(strresult));
goto chpwfail;
}
/* construct the ap-rep */
ret = krb5_mk_rep(context, auth_context, &ap_rep);
if (ret) {
numresult = KRB5_KPASSWD_AUTHERROR;
strlcpy(strresult, "Failed replying to application request",
sizeof(strresult));
goto chpwfail;
}
/* decrypt the ChangePasswdData */
cipher.length = (req->data + req->length) - ptr;
cipher.data = ptr;
/*
* Don't set a remote address in auth_context before calling krb5_rd_priv,
* so that we can work against clients behind a NAT. Reflection attacks
* aren't a concern since we use sequence numbers and since our requests
* don't look anything like our responses. Also don't set a local address,
* since we don't know what interface the request was received on.
*/
ret = krb5_rd_priv(context, auth_context, &cipher, &clear, &replay);
if (ret) {
numresult = KRB5_KPASSWD_HARDERROR;
strlcpy(strresult, "Failed decrypting request", sizeof(strresult));
goto chpwfail;
}
client = ticket->enc_part2->client;
/* decode ChangePasswdData for setpw requests */
if (vno == RFC3244_VERSION) {
krb5_data *clear_data;
ret = decode_krb5_setpw_req(&clear, &clear_data, &target);
if (ret != 0) {
numresult = KRB5_KPASSWD_MALFORMED;
strlcpy(strresult, "Failed decoding ChangePasswdData",
sizeof(strresult));
goto chpwfail;
}
zapfree(clear.data, clear.length);
clear = *clear_data;
free(clear_data);
if (target != NULL) {
ret = krb5_unparse_name(context, target, &targetstr);
if (ret != 0) {
numresult = KRB5_KPASSWD_HARDERROR;
strlcpy(strresult, "Failed unparsing target name for log",
sizeof(strresult));
goto chpwfail;
}
}
}
ret = krb5_unparse_name(context, client, &clientstr);
if (ret) {
numresult = KRB5_KPASSWD_HARDERROR;
strlcpy(strresult, "Failed unparsing client name for log",
sizeof(strresult));
goto chpwfail;
}
/* for cpw, verify that this is an AS_REQ ticket */
if (vno == 1 &&
(ticket->enc_part2->flags & TKT_FLG_INITIAL) == 0) {
numresult = KRB5_KPASSWD_INITIAL_FLAG_NEEDED;
strlcpy(strresult, "Ticket must be derived from a password",
sizeof(strresult));
goto chpwfail;
}
/* change the password */
ptr = k5memdup0(clear.data, clear.length, &ret);
ret = schpw_util_wrapper(server_handle, client, target,
(ticket->enc_part2->flags & TKT_FLG_INITIAL) != 0,
ptr, NULL, strresult, sizeof(strresult));
if (ret)
errmsg = krb5_get_error_message(context, ret);
/* zap the password */
zapfree(clear.data, clear.length);
zapfree(ptr, clear.length);
clear = empty_data();
clen = strlen(clientstr);
trunc_name(&clen, &cdots);
switch (addr->addrtype) {
case ADDRTYPE_INET: {
struct sockaddr_in *sin = ss2sin(&ss);
sin->sin_family = AF_INET;
memcpy(&sin->sin_addr, addr->contents, addr->length);
sin->sin_port = htons(remote_faddr->port);
salen = sizeof(*sin);
break;
}
case ADDRTYPE_INET6: {
struct sockaddr_in6 *sin6 = ss2sin6(&ss);
sin6->sin6_family = AF_INET6;
memcpy(&sin6->sin6_addr, addr->contents, addr->length);
sin6->sin6_port = htons(remote_faddr->port);
salen = sizeof(*sin6);
break;
}
default: {
struct sockaddr *sa = ss2sa(&ss);
sa->sa_family = AF_UNSPEC;
salen = sizeof(*sa);
break;
}
}
if (getnameinfo(ss2sa(&ss), salen,
addrbuf, sizeof(addrbuf), NULL, 0,
NI_NUMERICHOST | NI_NUMERICSERV) != 0)
strlcpy(addrbuf, "<unprintable>", sizeof(addrbuf));
if (vno == RFC3244_VERSION) {
size_t tlen;
char *tdots;
const char *targetp;
if (target == NULL) {
tlen = clen;
tdots = cdots;
targetp = targetstr;
} else {
tlen = strlen(targetstr);
trunc_name(&tlen, &tdots);
targetp = clientstr;
}
krb5_klog_syslog(LOG_NOTICE, _("setpw request from %s by %.*s%s for "
"%.*s%s: %s"), addrbuf, (int) clen,
clientstr, cdots, (int) tlen, targetp, tdots,
errmsg ? errmsg : "success");
} else {
krb5_klog_syslog(LOG_NOTICE, _("chpw request from %s for %.*s%s: %s"),
addrbuf, (int) clen, clientstr, cdots,
errmsg ? errmsg : "success");
}
switch (ret) {
case KADM5_AUTH_CHANGEPW:
numresult = KRB5_KPASSWD_ACCESSDENIED;
break;
case KADM5_PASS_Q_TOOSHORT:
case KADM5_PASS_REUSE:
case KADM5_PASS_Q_CLASS:
case KADM5_PASS_Q_DICT:
case KADM5_PASS_Q_GENERIC:
case KADM5_PASS_TOOSOON:
numresult = KRB5_KPASSWD_SOFTERROR;
break;
case 0:
numresult = KRB5_KPASSWD_SUCCESS;
strlcpy(strresult, "", sizeof(strresult));
break;
default:
numresult = KRB5_KPASSWD_HARDERROR;
break;
}
chpwfail:
clear.length = 2 + strlen(strresult);
clear.data = (char *) malloc(clear.length);
ptr = clear.data;
*ptr++ = (numresult>>8) & 0xff;
*ptr++ = numresult & 0xff;
memcpy(ptr, strresult, strlen(strresult));
cipher = empty_data();
if (ap_rep.length) {
ret = krb5_auth_con_setaddrs(context, auth_context,
local_faddr->address, NULL);
if (ret) {
numresult = KRB5_KPASSWD_HARDERROR;
strlcpy(strresult,
"Failed storing client and server internet addresses",
sizeof(strresult));
} else {
ret = krb5_mk_priv(context, auth_context, &clear, &cipher,
&replay);
if (ret) {
numresult = KRB5_KPASSWD_HARDERROR;
strlcpy(strresult, "Failed encrypting reply",
sizeof(strresult));
}
}
}
/* if no KRB-PRIV was constructed, then we need a KRB-ERROR.
if this fails, just bail. there's nothing else we can do. */
if (cipher.length == 0) {
/* clear out ap_rep now, so that it won't be inserted in the
reply */
if (ap_rep.length) {
free(ap_rep.data);
ap_rep = empty_data();
}
krberror.ctime = 0;
krberror.cusec = 0;
krberror.susec = 0;
ret = krb5_timeofday(context, &krberror.stime);
if (ret)
goto bailout;
/* this is really icky. but it's what all the other callers
to mk_error do. */
krberror.error = ret;
krberror.error -= ERROR_TABLE_BASE_krb5;
if (krberror.error < 0 || krberror.error > 128)
krberror.error = KRB_ERR_GENERIC;
krberror.client = NULL;
ret = krb5_build_principal(context, &krberror.server,
strlen(realm), realm,
"kadmin", "changepw", NULL);
if (ret)
goto bailout;
krberror.text.length = 0;
krberror.e_data = clear;
ret = krb5_mk_error(context, &krberror, &cipher);
krb5_free_principal(context, krberror.server);
if (ret)
goto bailout;
}
/* construct the reply */
ret = alloc_data(rep, 6 + ap_rep.length + cipher.length);
if (ret)
goto bailout;
ptr = rep->data;
/* length */
*ptr++ = (rep->length>>8) & 0xff;
*ptr++ = rep->length & 0xff;
/* version == 0x0001 big-endian */
*ptr++ = 0;
*ptr++ = 1;
/* ap_rep length, big-endian */
*ptr++ = (ap_rep.length>>8) & 0xff;
*ptr++ = ap_rep.length & 0xff;
/* ap-rep data */
if (ap_rep.length) {
memcpy(ptr, ap_rep.data, ap_rep.length);
ptr += ap_rep.length;
}
/* krb-priv or krb-error */
memcpy(ptr, cipher.data, cipher.length);
bailout:
krb5_auth_con_free(context, auth_context);
krb5_free_principal(context, changepw);
krb5_free_ticket(context, ticket);
free(ap_rep.data);
free(clear.data);
free(cipher.data);
krb5_free_principal(context, target);
krb5_free_unparsed_name(context, targetstr);
krb5_free_unparsed_name(context, clientstr);
krb5_free_error_message(context, errmsg);
return ret;
}
/* Dispatch routine for set/change password */
void
dispatch(void *handle, struct sockaddr *local_saddr,
const krb5_fulladdr *remote_faddr, krb5_data *request, int is_tcp,
verto_ctx *vctx, loop_respond_fn respond, void *arg)
{
krb5_error_code ret;
krb5_keytab kt = NULL;
kadm5_server_handle_t server_handle = (kadm5_server_handle_t)handle;
krb5_fulladdr local_faddr;
krb5_address **local_kaddrs = NULL, local_kaddr_buf;
krb5_data *response = NULL;
if (local_saddr == NULL) {
ret = krb5_os_localaddr(server_handle->context, &local_kaddrs);
if (ret != 0)
goto egress;
local_faddr.address = local_kaddrs[0];
local_faddr.port = 0;
} else {
local_faddr.address = &local_kaddr_buf;
init_addr(&local_faddr, local_saddr);
}
ret = krb5_kt_resolve(server_handle->context, "KDB:", &kt);
if (ret != 0) {
krb5_klog_syslog(LOG_ERR, _("chpw: Couldn't open admin keytab %s"),
krb5_get_error_message(server_handle->context, ret));
goto egress;
}
response = k5alloc(sizeof(krb5_data), &ret);
if (response == NULL)
goto egress;
ret = process_chpw_request(server_handle->context,
handle,
server_handle->params.realm,
kt,
&local_faddr,
remote_faddr,
request,
response);
egress:
if (ret)
krb5_free_data(server_handle->context, response);
krb5_free_addresses(server_handle->context, local_kaddrs);
krb5_kt_close(server_handle->context, kt);
(*respond)(arg, ret, ret == 0 ? response : NULL);
}
| ./CrossVul/dataset_final_sorted/CWE-20/c/good_2441_0 |
crossvul-cpp_data_bad_4718_2 | /*
* verify.c:
*
* Author:
* Mono Project (http://www.mono-project.com)
*
* Copyright 2001-2003 Ximian, Inc (http://www.ximian.com)
* Copyright 2004-2009 Novell, Inc (http://www.novell.com)
*/
#include <config.h>
#include <mono/metadata/object-internals.h>
#include <mono/metadata/verify.h>
#include <mono/metadata/verify-internals.h>
#include <mono/metadata/opcodes.h>
#include <mono/metadata/tabledefs.h>
#include <mono/metadata/reflection.h>
#include <mono/metadata/debug-helpers.h>
#include <mono/metadata/mono-endian.h>
#include <mono/metadata/metadata.h>
#include <mono/metadata/metadata-internals.h>
#include <mono/metadata/class-internals.h>
#include <mono/metadata/security-manager.h>
#include <mono/metadata/security-core-clr.h>
#include <mono/metadata/tokentype.h>
#include <mono/metadata/mono-basic-block.h>
#include <mono/utils/mono-counters.h>
#include <mono/utils/monobitset.h>
#include <string.h>
#include <signal.h>
#include <ctype.h>
static MiniVerifierMode verifier_mode = MONO_VERIFIER_MODE_OFF;
static gboolean verify_all = FALSE;
/*
* Set the desired level of checks for the verfier.
*
*/
void
mono_verifier_set_mode (MiniVerifierMode mode)
{
verifier_mode = mode;
}
void
mono_verifier_enable_verify_all ()
{
verify_all = TRUE;
}
#ifndef DISABLE_VERIFIER
/*
* Pull the list of opcodes
*/
#define OPDEF(a,b,c,d,e,f,g,h,i,j) \
a = i,
enum {
#include "mono/cil/opcode.def"
LAST = 0xff
};
#undef OPDEF
#ifdef MONO_VERIFIER_DEBUG
#define VERIFIER_DEBUG(code) do { code } while (0)
#else
#define VERIFIER_DEBUG(code)
#endif
//////////////////////////////////////////////////////////////////
#define IS_STRICT_MODE(ctx) (((ctx)->level & MONO_VERIFY_NON_STRICT) == 0)
#define IS_FAIL_FAST_MODE(ctx) (((ctx)->level & MONO_VERIFY_FAIL_FAST) == MONO_VERIFY_FAIL_FAST)
#define IS_SKIP_VISIBILITY(ctx) (((ctx)->level & MONO_VERIFY_SKIP_VISIBILITY) == MONO_VERIFY_SKIP_VISIBILITY)
#define IS_REPORT_ALL_ERRORS(ctx) (((ctx)->level & MONO_VERIFY_REPORT_ALL_ERRORS) == MONO_VERIFY_REPORT_ALL_ERRORS)
#define CLEAR_PREFIX(ctx, prefix) do { (ctx)->prefix_set &= ~(prefix); } while (0)
#define ADD_VERIFY_INFO(__ctx, __msg, __status, __exception) \
do { \
MonoVerifyInfoExtended *vinfo = g_new (MonoVerifyInfoExtended, 1); \
vinfo->info.status = __status; \
vinfo->info.message = ( __msg ); \
vinfo->exception_type = (__exception); \
(__ctx)->list = g_slist_prepend ((__ctx)->list, vinfo); \
} while (0)
//TODO support MONO_VERIFY_REPORT_ALL_ERRORS
#define ADD_VERIFY_ERROR(__ctx, __msg) \
do { \
ADD_VERIFY_INFO(__ctx, __msg, MONO_VERIFY_ERROR, MONO_EXCEPTION_INVALID_PROGRAM); \
(__ctx)->valid = 0; \
} while (0)
#define CODE_NOT_VERIFIABLE(__ctx, __msg) \
do { \
if ((__ctx)->verifiable || IS_REPORT_ALL_ERRORS (__ctx)) { \
ADD_VERIFY_INFO(__ctx, __msg, MONO_VERIFY_NOT_VERIFIABLE, MONO_EXCEPTION_UNVERIFIABLE_IL); \
(__ctx)->verifiable = 0; \
if (IS_FAIL_FAST_MODE (__ctx)) \
(__ctx)->valid = 0; \
} \
} while (0)
#define ADD_VERIFY_ERROR2(__ctx, __msg, __exception) \
do { \
ADD_VERIFY_INFO(__ctx, __msg, MONO_VERIFY_ERROR, __exception); \
(__ctx)->valid = 0; \
} while (0)
#define CODE_NOT_VERIFIABLE2(__ctx, __msg, __exception) \
do { \
if ((__ctx)->verifiable || IS_REPORT_ALL_ERRORS (__ctx)) { \
ADD_VERIFY_INFO(__ctx, __msg, MONO_VERIFY_NOT_VERIFIABLE, __exception); \
(__ctx)->verifiable = 0; \
if (IS_FAIL_FAST_MODE (__ctx)) \
(__ctx)->valid = 0; \
} \
} while (0)
#define CHECK_ADD4_OVERFLOW_UN(a, b) ((guint32)(0xFFFFFFFFU) - (guint32)(b) < (guint32)(a))
#define CHECK_ADD8_OVERFLOW_UN(a, b) ((guint64)(0xFFFFFFFFFFFFFFFFUL) - (guint64)(b) < (guint64)(a))
#if SIZEOF_VOID_P == 4
#define CHECK_ADDP_OVERFLOW_UN(a,b) CHECK_ADD4_OVERFLOW_UN(a, b)
#else
#define CHECK_ADDP_OVERFLOW_UN(a,b) CHECK_ADD8_OVERFLOW_UN(a, b)
#endif
#define ADDP_IS_GREATER_OR_OVF(a, b, c) (((a) + (b) > (c)) || CHECK_ADDP_OVERFLOW_UN (a, b))
#define ADD_IS_GREATER_OR_OVF(a, b, c) (((a) + (b) > (c)) || CHECK_ADD4_OVERFLOW_UN (a, b))
/*Flags to be used with ILCodeDesc::flags */
enum {
/*Instruction has not been processed.*/
IL_CODE_FLAG_NOT_PROCESSED = 0,
/*Instruction was decoded by mono_method_verify loop.*/
IL_CODE_FLAG_SEEN = 1,
/*Instruction was target of a branch or is at a protected block boundary.*/
IL_CODE_FLAG_WAS_TARGET = 2,
/*Used by stack_init to avoid double initialize each entry.*/
IL_CODE_FLAG_STACK_INITED = 4,
/*Used by merge_stacks to decide if it should just copy the eval stack.*/
IL_CODE_STACK_MERGED = 8,
/*This instruction is part of the delegate construction sequence, it cannot be target of a branch.*/
IL_CODE_DELEGATE_SEQUENCE = 0x10,
/*This is a delegate created from a ldftn to a non final virtual method*/
IL_CODE_LDFTN_DELEGATE_NONFINAL_VIRTUAL = 0x20,
/*This is a call to a non final virtual method*/
IL_CODE_CALL_NONFINAL_VIRTUAL = 0x40,
};
typedef enum {
RESULT_VALID,
RESULT_UNVERIFIABLE,
RESULT_INVALID
} verify_result_t;
typedef struct {
MonoType *type;
int stype;
MonoMethod *method;
} ILStackDesc;
typedef struct {
ILStackDesc *stack;
guint16 size, max_size;
guint16 flags;
} ILCodeDesc;
typedef struct {
int max_args;
int max_stack;
int verifiable;
int valid;
int level;
int code_size;
ILCodeDesc *code;
ILCodeDesc eval;
MonoType **params;
GSList *list;
/*Allocated fnptr MonoType that should be freed by us.*/
GSList *funptrs;
/*Type dup'ed exception types from catch blocks.*/
GSList *exception_types;
int num_locals;
MonoType **locals;
/*TODO get rid of target here, need_merge in mono_method_verify and hoist the merging code in the branching code*/
int target;
guint32 ip_offset;
MonoMethodSignature *signature;
MonoMethodHeader *header;
MonoGenericContext *generic_context;
MonoImage *image;
MonoMethod *method;
/*This flag helps solving a corner case of delegate verification in that you cannot have a "starg 0"
*on a method that creates a delegate for a non-final virtual method using ldftn*/
gboolean has_this_store;
/*This flag is used to control if the contructor of the parent class has been called.
*If the this pointer is pushed on the eval stack and it's a reference type constructor and
* super_ctor_called is false, the uninitialized flag is set on the pushed value.
*
* Poping an uninitialized this ptr from the eval stack is an unverifiable operation unless
* the safe variant is used. Only a few opcodes can use it : dup, pop, ldfld, stfld and call to a constructor.
*/
gboolean super_ctor_called;
guint32 prefix_set;
gboolean has_flags;
MonoType *constrained_type;
} VerifyContext;
static void
merge_stacks (VerifyContext *ctx, ILCodeDesc *from, ILCodeDesc *to, gboolean start, gboolean external);
static int
get_stack_type (MonoType *type);
static gboolean
mono_delegate_signature_equal (MonoMethodSignature *delegate_sig, MonoMethodSignature *method_sig, gboolean is_static_ldftn);
static gboolean
mono_class_is_valid_generic_instantiation (VerifyContext *ctx, MonoClass *klass);
static gboolean
mono_method_is_valid_generic_instantiation (VerifyContext *ctx, MonoMethod *method);
static MonoGenericParam*
verifier_get_generic_param_from_type (VerifyContext *ctx, MonoType *type);
//////////////////////////////////////////////////////////////////
enum {
TYPE_INV = 0, /* leave at 0. */
TYPE_I4 = 1,
TYPE_I8 = 2,
TYPE_NATIVE_INT = 3,
TYPE_R8 = 4,
/* Used by operator tables to resolve pointer types (managed & unmanaged) and by unmanaged pointer types*/
TYPE_PTR = 5,
/* value types and classes */
TYPE_COMPLEX = 6,
/* Number of types, used to define the size of the tables*/
TYPE_MAX = 6,
/* Used by tables to signal that a result is not verifiable*/
NON_VERIFIABLE_RESULT = 0x80,
/*Mask used to extract just the type, excluding flags */
TYPE_MASK = 0x0F,
/* The stack type is a managed pointer, unmask the value to res */
POINTER_MASK = 0x100,
/*Stack type with the pointer mask*/
RAW_TYPE_MASK = 0x10F,
/* Controlled Mutability Manager Pointer */
CMMP_MASK = 0x200,
/* The stack type is a null literal*/
NULL_LITERAL_MASK = 0x400,
/**Used by ldarg.0 and family to let delegate verification happens.*/
THIS_POINTER_MASK = 0x800,
/**Signals that this is a boxed value type*/
BOXED_MASK = 0x1000,
/*This is an unitialized this ref*/
UNINIT_THIS_MASK = 0x2000,
};
static const char* const
type_names [TYPE_MAX + 1] = {
"Invalid",
"Int32",
"Int64",
"Native Int",
"Float64",
"Native Pointer",
"Complex"
};
enum {
PREFIX_UNALIGNED = 1,
PREFIX_VOLATILE = 2,
PREFIX_TAIL = 4,
PREFIX_CONSTRAINED = 8,
PREFIX_READONLY = 16
};
//////////////////////////////////////////////////////////////////
#ifdef ENABLE_VERIFIER_STATS
#define MEM_ALLOC(amt) do { allocated_memory += (amt); working_set += (amt); } while (0)
#define MEM_FREE(amt) do { working_set -= (amt); } while (0)
static int allocated_memory;
static int working_set;
static int max_allocated_memory;
static int max_working_set;
static int total_allocated_memory;
static void
finish_collect_stats (void)
{
max_allocated_memory = MAX (max_allocated_memory, allocated_memory);
max_working_set = MAX (max_working_set, working_set);
total_allocated_memory += allocated_memory;
allocated_memory = working_set = 0;
}
static void
init_verifier_stats (void)
{
static gboolean inited;
if (!inited) {
inited = TRUE;
mono_counters_register ("Maximum memory allocated during verification", MONO_COUNTER_METADATA | MONO_COUNTER_INT, &max_allocated_memory);
mono_counters_register ("Maximum memory used during verification", MONO_COUNTER_METADATA | MONO_COUNTER_INT, &max_working_set);
mono_counters_register ("Total memory allocated for verification", MONO_COUNTER_METADATA | MONO_COUNTER_INT, &total_allocated_memory);
}
}
#else
#define MEM_ALLOC(amt) do {} while (0)
#define MEM_FREE(amt) do { } while (0)
#define finish_collect_stats()
#define init_verifier_stats()
#endif
//////////////////////////////////////////////////////////////////
/*Token validation macros and functions */
#define IS_MEMBER_REF(token) (mono_metadata_token_table (token) == MONO_TABLE_MEMBERREF)
#define IS_METHOD_DEF(token) (mono_metadata_token_table (token) == MONO_TABLE_METHOD)
#define IS_METHOD_SPEC(token) (mono_metadata_token_table (token) == MONO_TABLE_METHODSPEC)
#define IS_FIELD_DEF(token) (mono_metadata_token_table (token) == MONO_TABLE_FIELD)
#define IS_TYPE_REF(token) (mono_metadata_token_table (token) == MONO_TABLE_TYPEREF)
#define IS_TYPE_DEF(token) (mono_metadata_token_table (token) == MONO_TABLE_TYPEDEF)
#define IS_TYPE_SPEC(token) (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC)
#define IS_METHOD_DEF_OR_REF_OR_SPEC(token) (IS_METHOD_DEF (token) || IS_MEMBER_REF (token) || IS_METHOD_SPEC (token))
#define IS_TYPE_DEF_OR_REF_OR_SPEC(token) (IS_TYPE_DEF (token) || IS_TYPE_REF (token) || IS_TYPE_SPEC (token))
#define IS_FIELD_DEF_OR_REF(token) (IS_FIELD_DEF (token) || IS_MEMBER_REF (token))
/*
* Verify if @token refers to a valid row on int's table.
*/
static gboolean
token_bounds_check (MonoImage *image, guint32 token)
{
if (image->dynamic)
return mono_reflection_is_valid_dynamic_token ((MonoDynamicImage*)image, token);
return image->tables [mono_metadata_token_table (token)].rows >= mono_metadata_token_index (token);
}
static MonoType *
mono_type_create_fnptr_from_mono_method (VerifyContext *ctx, MonoMethod *method)
{
MonoType *res = g_new0 (MonoType, 1);
MEM_ALLOC (sizeof (MonoType));
//FIXME use mono_method_get_signature_full
res->data.method = mono_method_signature (method);
res->type = MONO_TYPE_FNPTR;
ctx->funptrs = g_slist_prepend (ctx->funptrs, res);
return res;
}
/*
* mono_type_is_enum_type:
*
* Returns TRUE if @type is an enum type.
*/
static gboolean
mono_type_is_enum_type (MonoType *type)
{
if (type->type == MONO_TYPE_VALUETYPE && type->data.klass->enumtype)
return TRUE;
if (type->type == MONO_TYPE_GENERICINST && type->data.generic_class->container_class->enumtype)
return TRUE;
return FALSE;
}
/*
* mono_type_is_value_type:
*
* Returns TRUE if @type is named after @namespace.@name.
*
*/
static gboolean
mono_type_is_value_type (MonoType *type, const char *namespace, const char *name)
{
return type->type == MONO_TYPE_VALUETYPE &&
!strcmp (namespace, type->data.klass->name_space) &&
!strcmp (name, type->data.klass->name);
}
/*
* Returns TURE if @type is VAR or MVAR
*/
static gboolean
mono_type_is_generic_argument (MonoType *type)
{
return type->type == MONO_TYPE_VAR || type->type == MONO_TYPE_MVAR;
}
/*
* mono_type_get_underlying_type_any:
*
* This functions is just like mono_type_get_underlying_type but it doesn't care if the type is byref.
*
* Returns the underlying type of @type regardless if it is byref or not.
*/
static MonoType*
mono_type_get_underlying_type_any (MonoType *type)
{
if (type->type == MONO_TYPE_VALUETYPE && type->data.klass->enumtype)
return mono_class_enum_basetype (type->data.klass);
if (type->type == MONO_TYPE_GENERICINST && type->data.generic_class->container_class->enumtype)
return mono_class_enum_basetype (type->data.generic_class->container_class);
return type;
}
static G_GNUC_UNUSED const char*
mono_type_get_stack_name (MonoType *type)
{
return type_names [get_stack_type (type) & TYPE_MASK];
}
#define CTOR_REQUIRED_FLAGS (METHOD_ATTRIBUTE_SPECIAL_NAME | METHOD_ATTRIBUTE_RT_SPECIAL_NAME)
#define CTOR_INVALID_FLAGS (METHOD_ATTRIBUTE_STATIC)
static gboolean
mono_method_is_constructor (MonoMethod *method)
{
return ((method->flags & CTOR_REQUIRED_FLAGS) == CTOR_REQUIRED_FLAGS &&
!(method->flags & CTOR_INVALID_FLAGS) &&
!strcmp (".ctor", method->name));
}
static gboolean
mono_class_has_default_constructor (MonoClass *klass)
{
MonoMethod *method;
int i;
mono_class_setup_methods (klass);
if (klass->exception_type)
return FALSE;
for (i = 0; i < klass->method.count; ++i) {
method = klass->methods [i];
if (mono_method_is_constructor (method) &&
mono_method_signature (method) &&
mono_method_signature (method)->param_count == 0 &&
(method->flags & METHOD_ATTRIBUTE_MEMBER_ACCESS_MASK) == METHOD_ATTRIBUTE_PUBLIC)
return TRUE;
}
return FALSE;
}
/*
* Verify if @type is valid for the given @ctx verification context.
* this function checks for VAR and MVAR types that are invalid under the current verifier,
*/
static gboolean
mono_type_is_valid_type_in_context (MonoType *type, MonoGenericContext *context)
{
int i;
MonoGenericInst *inst;
switch (type->type) {
case MONO_TYPE_VAR:
case MONO_TYPE_MVAR:
if (!context)
return FALSE;
inst = type->type == MONO_TYPE_VAR ? context->class_inst : context->method_inst;
if (!inst || mono_type_get_generic_param_num (type) >= inst->type_argc)
return FALSE;
break;
case MONO_TYPE_SZARRAY:
return mono_type_is_valid_type_in_context (&type->data.klass->byval_arg, context);
case MONO_TYPE_ARRAY:
return mono_type_is_valid_type_in_context (&type->data.array->eklass->byval_arg, context);
case MONO_TYPE_PTR:
return mono_type_is_valid_type_in_context (type->data.type, context);
case MONO_TYPE_GENERICINST:
inst = type->data.generic_class->context.class_inst;
if (!inst->is_open)
break;
for (i = 0; i < inst->type_argc; ++i)
if (!mono_type_is_valid_type_in_context (inst->type_argv [i], context))
return FALSE;
break;
case MONO_TYPE_CLASS:
case MONO_TYPE_VALUETYPE: {
MonoClass *klass = type->data.klass;
/*
* It's possible to encode generic'sh types in such a way that they disguise themselves as class or valuetype.
* Fixing the type decoding is really tricky since under some cases this behavior is needed, for example, to
* have a 'class' type pointing to a 'genericinst' class.
*
* For the runtime these non canonical (weird) encodings work fine, they worst they can cause is some
* reflection oddities which are harmless - to security at least.
*/
if (klass->byval_arg.type != type->type)
return mono_type_is_valid_type_in_context (&klass->byval_arg, context);
break;
}
}
return TRUE;
}
/*This function returns NULL if the type is not instantiatable*/
static MonoType*
verifier_inflate_type (VerifyContext *ctx, MonoType *type, MonoGenericContext *context)
{
MonoError error;
MonoType *result;
result = mono_class_inflate_generic_type_checked (type, context, &error);
if (!mono_error_ok (&error)) {
mono_error_cleanup (&error);
return NULL;
}
return result;
}
static gboolean
is_valid_generic_instantiation (MonoGenericContainer *gc, MonoGenericContext *context, MonoGenericInst *ginst)
{
MonoError error;
int i;
if (ginst->type_argc != gc->type_argc)
return FALSE;
for (i = 0; i < gc->type_argc; ++i) {
MonoGenericParamInfo *param_info = mono_generic_container_get_param_info (gc, i);
MonoClass *paramClass;
MonoClass **constraints;
MonoType *param_type = ginst->type_argv [i];
/*it's not our job to validate type variables*/
if (mono_type_is_generic_argument (param_type))
continue;
paramClass = mono_class_from_mono_type (param_type);
if (paramClass->exception_type != MONO_EXCEPTION_NONE)
return FALSE;
/* A GTD can't be a generic argument.
*
* Due to how types are encoded we must check for the case of a genericinst MonoType and GTD MonoClass.
* This happens in cases such as: class Foo<T> { void X() { new Bar<T> (); } }
*
* Open instantiations can have GTDs as this happens when one type is instantiated with others params
* and the former has an expansion into the later. For example:
* class B<K> {}
* class A<T>: B<K> {}
* The type A <K> has a parent B<K>, that is inflated into the GTD B<>.
* Since A<K> is open, thus not instantiatable, this is valid.
*/
if (paramClass->generic_container && param_type->type != MONO_TYPE_GENERICINST && !ginst->is_open)
return FALSE;
/*it's not safe to call mono_class_init from here*/
if (paramClass->generic_class && !paramClass->inited) {
if (!mono_class_is_valid_generic_instantiation (NULL, paramClass))
return FALSE;
}
if (!param_info->constraints && !(param_info->flags & GENERIC_PARAMETER_ATTRIBUTE_SPECIAL_CONSTRAINTS_MASK))
continue;
if ((param_info->flags & GENERIC_PARAMETER_ATTRIBUTE_VALUE_TYPE_CONSTRAINT) && (!paramClass->valuetype || mono_class_is_nullable (paramClass)))
return FALSE;
if ((param_info->flags & GENERIC_PARAMETER_ATTRIBUTE_REFERENCE_TYPE_CONSTRAINT) && paramClass->valuetype)
return FALSE;
if ((param_info->flags & GENERIC_PARAMETER_ATTRIBUTE_CONSTRUCTOR_CONSTRAINT) && !paramClass->valuetype && !mono_class_has_default_constructor (paramClass))
return FALSE;
if (!param_info->constraints)
continue;
for (constraints = param_info->constraints; *constraints; ++constraints) {
MonoClass *ctr = *constraints;
MonoType *inflated;
inflated = mono_class_inflate_generic_type_checked (&ctr->byval_arg, context, &error);
if (!mono_error_ok (&error)) {
mono_error_cleanup (&error);
return FALSE;
}
ctr = mono_class_from_mono_type (inflated);
mono_metadata_free_type (inflated);
if (!mono_class_is_assignable_from_slow (ctr, paramClass))
return FALSE;
}
}
return TRUE;
}
/*
* Return true if @candidate is constraint compatible with @target.
*
* This means that @candidate constraints are a super set of @target constaints
*/
static gboolean
mono_generic_param_is_constraint_compatible (VerifyContext *ctx, MonoGenericParam *target, MonoGenericParam *candidate, MonoClass *candidate_param_class, MonoGenericContext *context)
{
MonoGenericParamInfo *tinfo = mono_generic_param_info (target);
MonoGenericParamInfo *cinfo = mono_generic_param_info (candidate);
int tmask = tinfo->flags & GENERIC_PARAMETER_ATTRIBUTE_SPECIAL_CONSTRAINTS_MASK;
int cmask = cinfo->flags & GENERIC_PARAMETER_ATTRIBUTE_SPECIAL_CONSTRAINTS_MASK;
if ((tmask & cmask) != tmask)
return FALSE;
if (tinfo->constraints) {
MonoClass **target_class, **candidate_class;
for (target_class = tinfo->constraints; *target_class; ++target_class) {
MonoClass *tc;
MonoType *inflated = verifier_inflate_type (ctx, &(*target_class)->byval_arg, context);
if (!inflated)
return FALSE;
tc = mono_class_from_mono_type (inflated);
mono_metadata_free_type (inflated);
/*
* A constraint from @target might inflate into @candidate itself and in that case we don't need
* check it's constraints since it satisfy the constraint by itself.
*/
if (mono_metadata_type_equal (&tc->byval_arg, &candidate_param_class->byval_arg))
continue;
if (!cinfo->constraints)
return FALSE;
for (candidate_class = cinfo->constraints; *candidate_class; ++candidate_class) {
MonoClass *cc;
inflated = verifier_inflate_type (ctx, &(*candidate_class)->byval_arg, ctx->generic_context);
if (!inflated)
return FALSE;
cc = mono_class_from_mono_type (inflated);
mono_metadata_free_type (inflated);
if (mono_class_is_assignable_from (tc, cc))
break;
/*
* This happens when we have the following:
*
* Bar<K> where K : IFace
* Foo<T, U> where T : U where U : IFace
* ...
* Bar<T> <- T here satisfy K constraint transitively through to U's constraint
*
*/
if (mono_type_is_generic_argument (&cc->byval_arg)) {
MonoGenericParam *other_candidate = verifier_get_generic_param_from_type (ctx, &cc->byval_arg);
if (mono_generic_param_is_constraint_compatible (ctx, target, other_candidate, cc, context)) {
break;
}
}
}
if (!*candidate_class)
return FALSE;
}
}
return TRUE;
}
static MonoGenericParam*
verifier_get_generic_param_from_type (VerifyContext *ctx, MonoType *type)
{
MonoGenericContainer *gc;
MonoMethod *method = ctx->method;
int num;
num = mono_type_get_generic_param_num (type);
if (type->type == MONO_TYPE_VAR) {
MonoClass *gtd = method->klass;
if (gtd->generic_class)
gtd = gtd->generic_class->container_class;
gc = gtd->generic_container;
} else { //MVAR
MonoMethod *gmd = method;
if (method->is_inflated)
gmd = ((MonoMethodInflated*)method)->declaring;
gc = mono_method_get_generic_container (gmd);
}
if (!gc)
return NULL;
return mono_generic_container_get_param (gc, num);
}
/*
* Verify if @type is valid for the given @ctx verification context.
* this function checks for VAR and MVAR types that are invalid under the current verifier,
* This means that it either
*/
static gboolean
is_valid_type_in_context (VerifyContext *ctx, MonoType *type)
{
return mono_type_is_valid_type_in_context (type, ctx->generic_context);
}
static gboolean
is_valid_generic_instantiation_in_context (VerifyContext *ctx, MonoGenericInst *ginst)
{
int i;
for (i = 0; i < ginst->type_argc; ++i) {
MonoType *type = ginst->type_argv [i];
if (!is_valid_type_in_context (ctx, type))
return FALSE;
}
return TRUE;
}
static gboolean
generic_arguments_respect_constraints (VerifyContext *ctx, MonoGenericContainer *gc, MonoGenericContext *context, MonoGenericInst *ginst)
{
int i;
for (i = 0; i < ginst->type_argc; ++i) {
MonoType *type = ginst->type_argv [i];
MonoGenericParam *target = mono_generic_container_get_param (gc, i);
MonoGenericParam *candidate;
MonoClass *candidate_class;
if (!mono_type_is_generic_argument (type))
continue;
if (!is_valid_type_in_context (ctx, type))
return FALSE;
candidate = verifier_get_generic_param_from_type (ctx, type);
candidate_class = mono_class_from_mono_type (type);
if (!mono_generic_param_is_constraint_compatible (ctx, target, candidate, candidate_class, context))
return FALSE;
}
return TRUE;
}
static gboolean
mono_method_repect_method_constraints (VerifyContext *ctx, MonoMethod *method)
{
MonoMethodInflated *gmethod = (MonoMethodInflated *)method;
MonoGenericInst *ginst = gmethod->context.method_inst;
MonoGenericContainer *gc = mono_method_get_generic_container (gmethod->declaring);
return !gc || generic_arguments_respect_constraints (ctx, gc, &gmethod->context, ginst);
}
static gboolean
mono_class_repect_method_constraints (VerifyContext *ctx, MonoClass *klass)
{
MonoGenericClass *gklass = klass->generic_class;
MonoGenericInst *ginst = gklass->context.class_inst;
MonoGenericContainer *gc = gklass->container_class->generic_container;
return !gc || generic_arguments_respect_constraints (ctx, gc, &gklass->context, ginst);
}
static gboolean
mono_method_is_valid_generic_instantiation (VerifyContext *ctx, MonoMethod *method)
{
MonoMethodInflated *gmethod = (MonoMethodInflated *)method;
MonoGenericInst *ginst = gmethod->context.method_inst;
MonoGenericContainer *gc = mono_method_get_generic_container (gmethod->declaring);
if (!gc) /*non-generic inflated method - it's part of a generic type */
return TRUE;
if (ctx && !is_valid_generic_instantiation_in_context (ctx, ginst))
return FALSE;
return is_valid_generic_instantiation (gc, &gmethod->context, ginst);
}
static gboolean
mono_class_is_valid_generic_instantiation (VerifyContext *ctx, MonoClass *klass)
{
MonoGenericClass *gklass = klass->generic_class;
MonoGenericInst *ginst = gklass->context.class_inst;
MonoGenericContainer *gc = gklass->container_class->generic_container;
if (ctx && !is_valid_generic_instantiation_in_context (ctx, ginst))
return FALSE;
return is_valid_generic_instantiation (gc, &gklass->context, ginst);
}
static gboolean
mono_type_is_valid_in_context (VerifyContext *ctx, MonoType *type)
{
MonoClass *klass;
if (type == NULL) {
ADD_VERIFY_ERROR2 (ctx, g_strdup_printf ("Invalid null type at 0x%04x", ctx->ip_offset), MONO_EXCEPTION_BAD_IMAGE);
return FALSE;
}
if (!is_valid_type_in_context (ctx, type)) {
char *str = mono_type_full_name (type);
ADD_VERIFY_ERROR2 (ctx, g_strdup_printf ("Invalid generic type (%s%s) (argument out of range or %s is not generic) at 0x%04x",
type->type == MONO_TYPE_VAR ? "!" : "!!",
str,
type->type == MONO_TYPE_VAR ? "class" : "method",
ctx->ip_offset),
MONO_EXCEPTION_BAD_IMAGE);
g_free (str);
return FALSE;
}
klass = mono_class_from_mono_type (type);
mono_class_init (klass);
if (mono_loader_get_last_error () || klass->exception_type != MONO_EXCEPTION_NONE) {
if (klass->generic_class && !mono_class_is_valid_generic_instantiation (NULL, klass))
ADD_VERIFY_ERROR2 (ctx, g_strdup_printf ("Invalid generic instantiation of type %s.%s at 0x%04x", klass->name_space, klass->name, ctx->ip_offset), MONO_EXCEPTION_TYPE_LOAD);
else
ADD_VERIFY_ERROR2 (ctx, g_strdup_printf ("Could not load type %s.%s at 0x%04x", klass->name_space, klass->name, ctx->ip_offset), MONO_EXCEPTION_TYPE_LOAD);
mono_loader_clear_error ();
return FALSE;
}
if (klass->generic_class && klass->generic_class->container_class->exception_type != MONO_EXCEPTION_NONE) {
ADD_VERIFY_ERROR2 (ctx, g_strdup_printf ("Could not load type %s.%s at 0x%04x", klass->name_space, klass->name, ctx->ip_offset), MONO_EXCEPTION_TYPE_LOAD);
return FALSE;
}
if (!klass->generic_class)
return TRUE;
if (!mono_class_is_valid_generic_instantiation (ctx, klass)) {
ADD_VERIFY_ERROR2 (ctx, g_strdup_printf ("Invalid generic type instantiation of type %s.%s at 0x%04x", klass->name_space, klass->name, ctx->ip_offset), MONO_EXCEPTION_TYPE_LOAD);
return FALSE;
}
if (!mono_class_repect_method_constraints (ctx, klass)) {
ADD_VERIFY_ERROR2 (ctx, g_strdup_printf ("Invalid generic type instantiation of type %s.%s (generic args don't respect target's constraints) at 0x%04x", klass->name_space, klass->name, ctx->ip_offset), MONO_EXCEPTION_TYPE_LOAD);
return FALSE;
}
return TRUE;
}
static verify_result_t
mono_method_is_valid_in_context (VerifyContext *ctx, MonoMethod *method)
{
if (!mono_type_is_valid_in_context (ctx, &method->klass->byval_arg))
return RESULT_INVALID;
if (!method->is_inflated)
return RESULT_VALID;
if (!mono_method_is_valid_generic_instantiation (ctx, method)) {
ADD_VERIFY_ERROR2 (ctx, g_strdup_printf ("Invalid generic method instantiation of method %s.%s::%s at 0x%04x", method->klass->name_space, method->klass->name, method->name, ctx->ip_offset), MONO_EXCEPTION_UNVERIFIABLE_IL);
return RESULT_INVALID;
}
if (!mono_method_repect_method_constraints (ctx, method)) {
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid generic method instantiation of method %s.%s::%s (generic args don't respect target's constraints) at 0x%04x", method->klass->name_space, method->klass->name, method->name, ctx->ip_offset));
return RESULT_UNVERIFIABLE;
}
return RESULT_VALID;
}
static MonoClassField*
verifier_load_field (VerifyContext *ctx, int token, MonoClass **out_klass, const char *opcode) {
MonoClassField *field;
MonoClass *klass = NULL;
if (!IS_FIELD_DEF_OR_REF (token) || !token_bounds_check (ctx->image, token)) {
ADD_VERIFY_ERROR2 (ctx, g_strdup_printf ("Invalid field token 0x%08x for %s at 0x%04x", token, opcode, ctx->ip_offset), MONO_EXCEPTION_BAD_IMAGE);
return NULL;
}
field = mono_field_from_token (ctx->image, token, &klass, ctx->generic_context);
if (!field || !field->parent || !klass || mono_loader_get_last_error ()) {
ADD_VERIFY_ERROR2 (ctx, g_strdup_printf ("Cannot load field from token 0x%08x for %s at 0x%04x", token, opcode, ctx->ip_offset), MONO_EXCEPTION_BAD_IMAGE);
mono_loader_clear_error ();
return NULL;
}
if (!mono_type_is_valid_in_context (ctx, &klass->byval_arg))
return NULL;
if (mono_field_get_flags (field) & FIELD_ATTRIBUTE_LITERAL) {
char *type_name = mono_type_get_full_name (field->parent);
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Cannot reference literal field %s::%s at 0x%04x", type_name, field->name, ctx->ip_offset));
g_free (type_name);
return NULL;
}
*out_klass = klass;
return field;
}
static MonoMethod*
verifier_load_method (VerifyContext *ctx, int token, const char *opcode) {
MonoMethod* method;
if (!IS_METHOD_DEF_OR_REF_OR_SPEC (token) || !token_bounds_check (ctx->image, token)) {
ADD_VERIFY_ERROR2 (ctx, g_strdup_printf ("Invalid method token 0x%08x for %s at 0x%04x", token, opcode, ctx->ip_offset), MONO_EXCEPTION_BAD_IMAGE);
return NULL;
}
method = mono_get_method_full (ctx->image, token, NULL, ctx->generic_context);
if (!method || mono_loader_get_last_error ()) {
ADD_VERIFY_ERROR2 (ctx, g_strdup_printf ("Cannot load method from token 0x%08x for %s at 0x%04x", token, opcode, ctx->ip_offset), MONO_EXCEPTION_BAD_IMAGE);
mono_loader_clear_error ();
return NULL;
}
if (mono_method_is_valid_in_context (ctx, method) == RESULT_INVALID)
return NULL;
return method;
}
static MonoType*
verifier_load_type (VerifyContext *ctx, int token, const char *opcode) {
MonoType* type;
if (!IS_TYPE_DEF_OR_REF_OR_SPEC (token) || !token_bounds_check (ctx->image, token)) {
ADD_VERIFY_ERROR2 (ctx, g_strdup_printf ("Invalid type token 0x%08x at 0x%04x", token, ctx->ip_offset), MONO_EXCEPTION_BAD_IMAGE);
return NULL;
}
type = mono_type_get_full (ctx->image, token, ctx->generic_context);
if (!type || mono_loader_get_last_error ()) {
ADD_VERIFY_ERROR2 (ctx, g_strdup_printf ("Cannot load type from token 0x%08x for %s at 0x%04x", token, opcode, ctx->ip_offset), MONO_EXCEPTION_BAD_IMAGE);
mono_loader_clear_error ();
return NULL;
}
if (!mono_type_is_valid_in_context (ctx, type))
return NULL;
return type;
}
/* stack_slot_get_type:
*
* Returns the stack type of @value. This value includes POINTER_MASK.
*
* Use this function to checks that account for a managed pointer.
*/
static gint32
stack_slot_get_type (ILStackDesc *value)
{
return value->stype & RAW_TYPE_MASK;
}
/* stack_slot_get_underlying_type:
*
* Returns the stack type of @value. This value does not include POINTER_MASK.
*
* Use this function is cases where the fact that the value could be a managed pointer is
* irrelevant. For example, field load doesn't care about this fact of type on stack.
*/
static gint32
stack_slot_get_underlying_type (ILStackDesc *value)
{
return value->stype & TYPE_MASK;
}
/* stack_slot_is_managed_pointer:
*
* Returns TRUE is @value is a managed pointer.
*/
static gboolean
stack_slot_is_managed_pointer (ILStackDesc *value)
{
return (value->stype & POINTER_MASK) == POINTER_MASK;
}
/* stack_slot_is_managed_mutability_pointer:
*
* Returns TRUE is @value is a managed mutability pointer.
*/
static G_GNUC_UNUSED gboolean
stack_slot_is_managed_mutability_pointer (ILStackDesc *value)
{
return (value->stype & CMMP_MASK) == CMMP_MASK;
}
/* stack_slot_is_null_literal:
*
* Returns TRUE is @value is the null literal.
*/
static gboolean
stack_slot_is_null_literal (ILStackDesc *value)
{
return (value->stype & NULL_LITERAL_MASK) == NULL_LITERAL_MASK;
}
/* stack_slot_is_this_pointer:
*
* Returns TRUE is @value is the this literal
*/
static gboolean
stack_slot_is_this_pointer (ILStackDesc *value)
{
return (value->stype & THIS_POINTER_MASK) == THIS_POINTER_MASK;
}
/* stack_slot_is_boxed_value:
*
* Returns TRUE is @value is a boxed value
*/
static gboolean
stack_slot_is_boxed_value (ILStackDesc *value)
{
return (value->stype & BOXED_MASK) == BOXED_MASK;
}
static const char *
stack_slot_get_name (ILStackDesc *value)
{
return type_names [value->stype & TYPE_MASK];
}
#define APPEND_WITH_PREDICATE(PRED,NAME) do {\
if (PRED (value)) { \
if (!first) \
g_string_append (str, ", "); \
g_string_append (str, NAME); \
first = FALSE; \
} } while (0)
static char*
stack_slot_stack_type_full_name (ILStackDesc *value)
{
GString *str = g_string_new ("");
char *result;
gboolean has_pred = FALSE, first = TRUE;
if ((value->stype & TYPE_MASK) != value->stype) {
g_string_append(str, "[");
APPEND_WITH_PREDICATE (stack_slot_is_this_pointer, "this");
APPEND_WITH_PREDICATE (stack_slot_is_boxed_value, "boxed");
APPEND_WITH_PREDICATE (stack_slot_is_null_literal, "null");
APPEND_WITH_PREDICATE (stack_slot_is_managed_mutability_pointer, "cmmp");
APPEND_WITH_PREDICATE (stack_slot_is_managed_pointer, "mp");
has_pred = TRUE;
}
if (mono_type_is_generic_argument (value->type) && !stack_slot_is_boxed_value (value)) {
if (!has_pred)
g_string_append(str, "[");
if (!first)
g_string_append (str, ", ");
g_string_append (str, "unboxed");
has_pred = TRUE;
}
if (has_pred)
g_string_append(str, "] ");
g_string_append (str, stack_slot_get_name (value));
result = str->str;
g_string_free (str, FALSE);
return result;
}
static char*
stack_slot_full_name (ILStackDesc *value)
{
char *type_name = mono_type_full_name (value->type);
char *stack_name = stack_slot_stack_type_full_name (value);
char *res = g_strdup_printf ("%s (%s)", type_name, stack_name);
g_free (type_name);
g_free (stack_name);
return res;
}
//////////////////////////////////////////////////////////////////
void
mono_free_verify_list (GSList *list)
{
MonoVerifyInfoExtended *info;
GSList *tmp;
for (tmp = list; tmp; tmp = tmp->next) {
info = tmp->data;
g_free (info->info.message);
g_free (info);
}
g_slist_free (list);
}
#define ADD_ERROR(list,msg) \
do { \
MonoVerifyInfoExtended *vinfo = g_new (MonoVerifyInfoExtended, 1); \
vinfo->info.status = MONO_VERIFY_ERROR; \
vinfo->info.message = (msg); \
(list) = g_slist_prepend ((list), vinfo); \
} while (0)
#define ADD_WARN(list,code,msg) \
do { \
MonoVerifyInfoExtended *vinfo = g_new (MonoVerifyInfoExtended, 1); \
vinfo->info.status = (code); \
vinfo->info.message = (msg); \
(list) = g_slist_prepend ((list), vinfo); \
} while (0)
#define ADD_INVALID(list,msg) \
do { \
MonoVerifyInfoExtended *vinfo = g_new (MonoVerifyInfoExtended, 1); \
vinfo->status = MONO_VERIFY_ERROR; \
vinfo->message = (msg); \
(list) = g_slist_prepend ((list), vinfo); \
/*G_BREAKPOINT ();*/ \
goto invalid_cil; \
} while (0)
#define CHECK_STACK_UNDERFLOW(num) \
do { \
if (cur_stack < (num)) \
ADD_INVALID (list, g_strdup_printf ("Stack underflow at 0x%04x (%d items instead of %d)", ip_offset, cur_stack, (num))); \
} while (0)
#define CHECK_STACK_OVERFLOW() \
do { \
if (cur_stack >= max_stack) \
ADD_INVALID (list, g_strdup_printf ("Maxstack exceeded at 0x%04x", ip_offset)); \
} while (0)
static int
in_any_block (MonoMethodHeader *header, guint offset)
{
int i;
MonoExceptionClause *clause;
for (i = 0; i < header->num_clauses; ++i) {
clause = &header->clauses [i];
if (MONO_OFFSET_IN_CLAUSE (clause, offset))
return 1;
if (MONO_OFFSET_IN_HANDLER (clause, offset))
return 1;
if (MONO_OFFSET_IN_FILTER (clause, offset))
return 1;
}
return 0;
}
/*
* in_any_exception_block:
*
* Returns TRUE is @offset is part of any exception clause (filter, handler, catch, finally or fault).
*/
static gboolean
in_any_exception_block (MonoMethodHeader *header, guint offset)
{
int i;
MonoExceptionClause *clause;
for (i = 0; i < header->num_clauses; ++i) {
clause = &header->clauses [i];
if (MONO_OFFSET_IN_HANDLER (clause, offset))
return TRUE;
if (MONO_OFFSET_IN_FILTER (clause, offset))
return TRUE;
}
return FALSE;
}
/*
* is_valid_branch_instruction:
*
* Verify if it's valid to perform a branch from @offset to @target.
* This should be used with br and brtrue/false.
* It returns 0 if valid, 1 for unverifiable and 2 for invalid.
* The major diferent from other similiar functions is that branching into a
* finally/fault block is invalid instead of just unverifiable.
*/
static int
is_valid_branch_instruction (MonoMethodHeader *header, guint offset, guint target)
{
int i;
MonoExceptionClause *clause;
for (i = 0; i < header->num_clauses; ++i) {
clause = &header->clauses [i];
/*branching into a finally block is invalid*/
if ((clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY || clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) &&
!MONO_OFFSET_IN_HANDLER (clause, offset) &&
MONO_OFFSET_IN_HANDLER (clause, target))
return 2;
if (clause->try_offset != target && (MONO_OFFSET_IN_CLAUSE (clause, offset) ^ MONO_OFFSET_IN_CLAUSE (clause, target)))
return 1;
if (MONO_OFFSET_IN_HANDLER (clause, offset) ^ MONO_OFFSET_IN_HANDLER (clause, target))
return 1;
if (MONO_OFFSET_IN_FILTER (clause, offset) ^ MONO_OFFSET_IN_FILTER (clause, target))
return 1;
}
return 0;
}
/*
* is_valid_cmp_branch_instruction:
*
* Verify if it's valid to perform a branch from @offset to @target.
* This should be used with binary comparison branching instruction, like beq, bge and similars.
* It returns 0 if valid, 1 for unverifiable and 2 for invalid.
*
* The major diferences from other similar functions are that most errors lead to invalid
* code and only branching out of finally, filter or fault clauses is unverifiable.
*/
static int
is_valid_cmp_branch_instruction (MonoMethodHeader *header, guint offset, guint target)
{
int i;
MonoExceptionClause *clause;
for (i = 0; i < header->num_clauses; ++i) {
clause = &header->clauses [i];
/*branching out of a handler or finally*/
if (clause->flags != MONO_EXCEPTION_CLAUSE_NONE &&
MONO_OFFSET_IN_HANDLER (clause, offset) &&
!MONO_OFFSET_IN_HANDLER (clause, target))
return 1;
if (clause->try_offset != target && (MONO_OFFSET_IN_CLAUSE (clause, offset) ^ MONO_OFFSET_IN_CLAUSE (clause, target)))
return 2;
if (MONO_OFFSET_IN_HANDLER (clause, offset) ^ MONO_OFFSET_IN_HANDLER (clause, target))
return 2;
if (MONO_OFFSET_IN_FILTER (clause, offset) ^ MONO_OFFSET_IN_FILTER (clause, target))
return 2;
}
return 0;
}
/*
* A leave can't escape a finally block
*/
static int
is_correct_leave (MonoMethodHeader *header, guint offset, guint target)
{
int i;
MonoExceptionClause *clause;
for (i = 0; i < header->num_clauses; ++i) {
clause = &header->clauses [i];
if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY && MONO_OFFSET_IN_HANDLER (clause, offset) && !MONO_OFFSET_IN_HANDLER (clause, target))
return 0;
if (MONO_OFFSET_IN_FILTER (clause, offset))
return 0;
}
return 1;
}
/*
* A rethrow can't happen outside of a catch handler.
*/
static int
is_correct_rethrow (MonoMethodHeader *header, guint offset)
{
int i;
MonoExceptionClause *clause;
for (i = 0; i < header->num_clauses; ++i) {
clause = &header->clauses [i];
if (MONO_OFFSET_IN_HANDLER (clause, offset))
return 1;
if (MONO_OFFSET_IN_FILTER (clause, offset))
return 1;
}
return 0;
}
/*
* An endfinally can't happen outside of a finally/fault handler.
*/
static int
is_correct_endfinally (MonoMethodHeader *header, guint offset)
{
int i;
MonoExceptionClause *clause;
for (i = 0; i < header->num_clauses; ++i) {
clause = &header->clauses [i];
if (MONO_OFFSET_IN_HANDLER (clause, offset) && (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT || clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY))
return 1;
}
return 0;
}
/*
* An endfilter can only happens inside a filter clause.
* In non-strict mode filter is allowed inside the handler clause too
*/
static MonoExceptionClause *
is_correct_endfilter (VerifyContext *ctx, guint offset)
{
int i;
MonoExceptionClause *clause;
for (i = 0; i < ctx->header->num_clauses; ++i) {
clause = &ctx->header->clauses [i];
if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER)
continue;
if (MONO_OFFSET_IN_FILTER (clause, offset))
return clause;
if (!IS_STRICT_MODE (ctx) && MONO_OFFSET_IN_HANDLER (clause, offset))
return clause;
}
return NULL;
}
/*
* Non-strict endfilter can happens inside a try block or any handler block
*/
static int
is_unverifiable_endfilter (VerifyContext *ctx, guint offset)
{
int i;
MonoExceptionClause *clause;
for (i = 0; i < ctx->header->num_clauses; ++i) {
clause = &ctx->header->clauses [i];
if (MONO_OFFSET_IN_CLAUSE (clause, offset))
return 1;
}
return 0;
}
static gboolean
is_valid_bool_arg (ILStackDesc *arg)
{
if (stack_slot_is_managed_pointer (arg) || stack_slot_is_boxed_value (arg) || stack_slot_is_null_literal (arg))
return TRUE;
switch (stack_slot_get_underlying_type (arg)) {
case TYPE_I4:
case TYPE_I8:
case TYPE_NATIVE_INT:
case TYPE_PTR:
return TRUE;
case TYPE_COMPLEX:
g_assert (arg->type);
switch (arg->type->type) {
case MONO_TYPE_CLASS:
case MONO_TYPE_STRING:
case MONO_TYPE_OBJECT:
case MONO_TYPE_SZARRAY:
case MONO_TYPE_ARRAY:
case MONO_TYPE_FNPTR:
case MONO_TYPE_PTR:
return TRUE;
case MONO_TYPE_GENERICINST:
/*We need to check if the container class
* of the generic type is a valuetype, iow:
* is it a "class Foo<T>" or a "struct Foo<T>"?
*/
return !arg->type->data.generic_class->container_class->valuetype;
}
default:
return FALSE;
}
}
/*Type manipulation helper*/
/*Returns the byref version of the supplied MonoType*/
static MonoType*
mono_type_get_type_byref (MonoType *type)
{
if (type->byref)
return type;
return &mono_class_from_mono_type (type)->this_arg;
}
/*Returns the byval version of the supplied MonoType*/
static MonoType*
mono_type_get_type_byval (MonoType *type)
{
if (!type->byref)
return type;
return &mono_class_from_mono_type (type)->byval_arg;
}
static MonoType*
mono_type_from_stack_slot (ILStackDesc *slot)
{
if (stack_slot_is_managed_pointer (slot))
return mono_type_get_type_byref (slot->type);
return slot->type;
}
/*Stack manipulation code*/
static void
ensure_stack_size (ILCodeDesc *stack, int required)
{
int new_size = 8;
ILStackDesc *tmp;
if (required < stack->max_size)
return;
/* We don't have to worry about the exponential growth since stack_copy prune unused space */
new_size = MAX (8, MAX (required, stack->max_size * 2));
g_assert (new_size >= stack->size);
g_assert (new_size >= required);
tmp = g_new0 (ILStackDesc, new_size);
MEM_ALLOC (sizeof (ILStackDesc) * new_size);
if (stack->stack) {
if (stack->size)
memcpy (tmp, stack->stack, stack->size * sizeof (ILStackDesc));
g_free (stack->stack);
MEM_FREE (sizeof (ILStackDesc) * stack->max_size);
}
stack->stack = tmp;
stack->max_size = new_size;
}
static void
stack_init (VerifyContext *ctx, ILCodeDesc *state)
{
if (state->flags & IL_CODE_FLAG_STACK_INITED)
return;
state->size = state->max_size = 0;
state->flags |= IL_CODE_FLAG_STACK_INITED;
}
static void
stack_copy (ILCodeDesc *to, ILCodeDesc *from)
{
ensure_stack_size (to, from->size);
to->size = from->size;
/*stack copy happens at merge points, which have small stacks*/
if (from->size)
memcpy (to->stack, from->stack, sizeof (ILStackDesc) * from->size);
}
static void
copy_stack_value (ILStackDesc *to, ILStackDesc *from)
{
to->stype = from->stype;
to->type = from->type;
to->method = from->method;
}
static int
check_underflow (VerifyContext *ctx, int size)
{
if (ctx->eval.size < size) {
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Stack underflow, required %d, but have %d at 0x%04x", size, ctx->eval.size, ctx->ip_offset));
return 0;
}
return 1;
}
static int
check_overflow (VerifyContext *ctx)
{
if (ctx->eval.size >= ctx->max_stack) {
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Method doesn't have stack-depth %d at 0x%04x", ctx->eval.size + 1, ctx->ip_offset));
return 0;
}
return 1;
}
/*This reject out PTR, FNPTR and TYPEDBYREF*/
static gboolean
check_unmanaged_pointer (VerifyContext *ctx, ILStackDesc *value)
{
if (stack_slot_get_type (value) == TYPE_PTR) {
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Unmanaged pointer is not a verifiable type at 0x%04x", ctx->ip_offset));
return 0;
}
return 1;
}
/*TODO verify if MONO_TYPE_TYPEDBYREF is not allowed here as well.*/
static gboolean
check_unverifiable_type (VerifyContext *ctx, MonoType *type)
{
if (type->type == MONO_TYPE_PTR || type->type == MONO_TYPE_FNPTR) {
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Unmanaged pointer is not a verifiable type at 0x%04x", ctx->ip_offset));
return 0;
}
return 1;
}
static ILStackDesc *
stack_push (VerifyContext *ctx)
{
g_assert (ctx->eval.size < ctx->max_stack);
g_assert (ctx->eval.size <= ctx->eval.max_size);
ensure_stack_size (&ctx->eval, ctx->eval.size + 1);
return & ctx->eval.stack [ctx->eval.size++];
}
static ILStackDesc *
stack_push_val (VerifyContext *ctx, int stype, MonoType *type)
{
ILStackDesc *top = stack_push (ctx);
top->stype = stype;
top->type = type;
return top;
}
static ILStackDesc *
stack_pop (VerifyContext *ctx)
{
ILStackDesc *ret;
g_assert (ctx->eval.size > 0);
ret = ctx->eval.stack + --ctx->eval.size;
if ((ret->stype & UNINIT_THIS_MASK) == UNINIT_THIS_MASK)
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Found use of uninitialized 'this ptr' ref at 0x%04x", ctx->ip_offset));
return ret;
}
/* This function allows to safely pop an unititialized this ptr from
* the eval stack without marking the method as unverifiable.
*/
static ILStackDesc *
stack_pop_safe (VerifyContext *ctx)
{
g_assert (ctx->eval.size > 0);
return ctx->eval.stack + --ctx->eval.size;
}
/*Positive number distance from stack top. [0] is stack top, [1] is the one below*/
static ILStackDesc*
stack_peek (VerifyContext *ctx, int distance)
{
g_assert (ctx->eval.size - distance > 0);
return ctx->eval.stack + (ctx->eval.size - 1 - distance);
}
static ILStackDesc *
stack_push_stack_val (VerifyContext *ctx, ILStackDesc *value)
{
ILStackDesc *top = stack_push (ctx);
copy_stack_value (top, value);
return top;
}
/* Returns the MonoType associated with the token, or NULL if it is invalid.
*
* A boxable type can be either a reference or value type, but cannot be a byref type or an unmanaged pointer
* */
static MonoType*
get_boxable_mono_type (VerifyContext* ctx, int token, const char *opcode)
{
MonoType *type;
MonoClass *class;
if (!(type = verifier_load_type (ctx, token, opcode)))
return NULL;
if (type->byref && type->type != MONO_TYPE_TYPEDBYREF) {
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Invalid use of byref type for %s at 0x%04x", opcode, ctx->ip_offset));
return NULL;
}
if (type->type == MONO_TYPE_VOID) {
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Invalid use of void type for %s at 0x%04x", opcode, ctx->ip_offset));
return NULL;
}
if (type->type == MONO_TYPE_TYPEDBYREF)
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid use of typedbyref for %s at 0x%04x", opcode, ctx->ip_offset));
if (!(class = mono_class_from_mono_type (type)))
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Could not retrieve type token for %s at 0x%04x", opcode, ctx->ip_offset));
if (class->generic_container && type->type != MONO_TYPE_GENERICINST)
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Cannot use the generic type definition in a boxable type position for %s at 0x%04x", opcode, ctx->ip_offset));
check_unverifiable_type (ctx, type);
return type;
}
/*operation result tables */
static const unsigned char bin_op_table [TYPE_MAX][TYPE_MAX] = {
{TYPE_I4, TYPE_INV, TYPE_NATIVE_INT, TYPE_INV, TYPE_INV, TYPE_INV},
{TYPE_INV, TYPE_I8, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV},
{TYPE_NATIVE_INT, TYPE_INV, TYPE_NATIVE_INT, TYPE_INV, TYPE_INV, TYPE_INV},
{TYPE_INV, TYPE_INV, TYPE_INV, TYPE_R8, TYPE_INV, TYPE_INV},
{TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV},
{TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV},
};
static const unsigned char add_table [TYPE_MAX][TYPE_MAX] = {
{TYPE_I4, TYPE_INV, TYPE_NATIVE_INT, TYPE_INV, TYPE_PTR | NON_VERIFIABLE_RESULT, TYPE_INV},
{TYPE_INV, TYPE_I8, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV},
{TYPE_NATIVE_INT, TYPE_INV, TYPE_NATIVE_INT, TYPE_INV, TYPE_PTR | NON_VERIFIABLE_RESULT, TYPE_INV},
{TYPE_INV, TYPE_INV, TYPE_INV, TYPE_R8, TYPE_INV, TYPE_INV},
{TYPE_PTR | NON_VERIFIABLE_RESULT, TYPE_INV, TYPE_PTR | NON_VERIFIABLE_RESULT, TYPE_INV, TYPE_INV, TYPE_INV},
{TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV},
};
static const unsigned char sub_table [TYPE_MAX][TYPE_MAX] = {
{TYPE_I4, TYPE_INV, TYPE_NATIVE_INT, TYPE_INV, TYPE_INV, TYPE_INV},
{TYPE_INV, TYPE_I8, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV},
{TYPE_NATIVE_INT, TYPE_INV, TYPE_NATIVE_INT, TYPE_INV, TYPE_INV, TYPE_INV},
{TYPE_INV, TYPE_INV, TYPE_INV, TYPE_R8, TYPE_INV, TYPE_INV},
{TYPE_PTR | NON_VERIFIABLE_RESULT, TYPE_INV, TYPE_PTR | NON_VERIFIABLE_RESULT, TYPE_INV, TYPE_NATIVE_INT | NON_VERIFIABLE_RESULT, TYPE_INV},
{TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV},
};
static const unsigned char int_bin_op_table [TYPE_MAX][TYPE_MAX] = {
{TYPE_I4, TYPE_INV, TYPE_NATIVE_INT, TYPE_INV, TYPE_INV, TYPE_INV},
{TYPE_INV, TYPE_I8, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV},
{TYPE_NATIVE_INT, TYPE_INV, TYPE_NATIVE_INT, TYPE_INV, TYPE_INV, TYPE_INV},
{TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV},
{TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV},
{TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV},
};
static const unsigned char shift_op_table [TYPE_MAX][TYPE_MAX] = {
{TYPE_I4, TYPE_INV, TYPE_I4, TYPE_INV, TYPE_INV, TYPE_INV},
{TYPE_I8, TYPE_INV, TYPE_I8, TYPE_INV, TYPE_INV, TYPE_INV},
{TYPE_NATIVE_INT, TYPE_INV, TYPE_NATIVE_INT, TYPE_INV, TYPE_INV, TYPE_INV},
{TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV},
{TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV},
{TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV},
};
static const unsigned char cmp_br_op [TYPE_MAX][TYPE_MAX] = {
{TYPE_I4, TYPE_INV, TYPE_I4, TYPE_INV, TYPE_INV, TYPE_INV},
{TYPE_INV, TYPE_I4, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV},
{TYPE_I4, TYPE_INV, TYPE_I4, TYPE_INV, TYPE_INV, TYPE_INV},
{TYPE_INV, TYPE_INV, TYPE_INV, TYPE_I4, TYPE_INV, TYPE_INV},
{TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_I4, TYPE_INV},
{TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV},
};
static const unsigned char cmp_br_eq_op [TYPE_MAX][TYPE_MAX] = {
{TYPE_I4, TYPE_INV, TYPE_I4, TYPE_INV, TYPE_INV, TYPE_INV},
{TYPE_INV, TYPE_I4, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV},
{TYPE_I4, TYPE_INV, TYPE_I4, TYPE_INV, TYPE_I4 | NON_VERIFIABLE_RESULT, TYPE_INV},
{TYPE_INV, TYPE_INV, TYPE_INV, TYPE_I4, TYPE_INV, TYPE_INV},
{TYPE_INV, TYPE_INV, TYPE_I4 | NON_VERIFIABLE_RESULT, TYPE_INV, TYPE_I4, TYPE_INV},
{TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_I4},
};
static const unsigned char add_ovf_un_table [TYPE_MAX][TYPE_MAX] = {
{TYPE_I4, TYPE_INV, TYPE_NATIVE_INT, TYPE_INV, TYPE_PTR | NON_VERIFIABLE_RESULT, TYPE_INV},
{TYPE_INV, TYPE_I8, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV},
{TYPE_NATIVE_INT, TYPE_INV, TYPE_NATIVE_INT, TYPE_INV, TYPE_PTR | NON_VERIFIABLE_RESULT, TYPE_INV},
{TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV},
{TYPE_PTR | NON_VERIFIABLE_RESULT, TYPE_INV, TYPE_PTR | NON_VERIFIABLE_RESULT, TYPE_INV, TYPE_INV, TYPE_INV},
{TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV},
};
static const unsigned char sub_ovf_un_table [TYPE_MAX][TYPE_MAX] = {
{TYPE_I4, TYPE_INV, TYPE_NATIVE_INT, TYPE_INV, TYPE_INV, TYPE_INV},
{TYPE_INV, TYPE_I8, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV},
{TYPE_NATIVE_INT, TYPE_INV, TYPE_NATIVE_INT, TYPE_INV, TYPE_INV, TYPE_INV},
{TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV},
{TYPE_PTR | NON_VERIFIABLE_RESULT, TYPE_INV, TYPE_PTR | NON_VERIFIABLE_RESULT, TYPE_INV, TYPE_NATIVE_INT | NON_VERIFIABLE_RESULT, TYPE_INV},
{TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV},
};
static const unsigned char bin_ovf_table [TYPE_MAX][TYPE_MAX] = {
{TYPE_I4, TYPE_INV, TYPE_NATIVE_INT, TYPE_INV, TYPE_INV, TYPE_INV},
{TYPE_INV, TYPE_I8, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV},
{TYPE_NATIVE_INT, TYPE_INV, TYPE_NATIVE_INT, TYPE_INV, TYPE_INV, TYPE_INV},
{TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV},
{TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV},
{TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV},
};
#ifdef MONO_VERIFIER_DEBUG
/*debug helpers */
static void
dump_stack_value (ILStackDesc *value)
{
printf ("[(%x)(%x)", value->type->type, value->stype);
if (stack_slot_is_this_pointer (value))
printf ("[this] ");
if (stack_slot_is_boxed_value (value))
printf ("[boxed] ");
if (stack_slot_is_null_literal (value))
printf ("[null] ");
if (stack_slot_is_managed_mutability_pointer (value))
printf ("Controled Mutability MP: ");
if (stack_slot_is_managed_pointer (value))
printf ("Managed Pointer to: ");
switch (stack_slot_get_underlying_type (value)) {
case TYPE_INV:
printf ("invalid type]");
return;
case TYPE_I4:
printf ("int32]");
return;
case TYPE_I8:
printf ("int64]");
return;
case TYPE_NATIVE_INT:
printf ("native int]");
return;
case TYPE_R8:
printf ("float64]");
return;
case TYPE_PTR:
printf ("unmanaged pointer]");
return;
case TYPE_COMPLEX:
switch (value->type->type) {
case MONO_TYPE_CLASS:
case MONO_TYPE_VALUETYPE:
printf ("complex] (%s)", value->type->data.klass->name);
return;
case MONO_TYPE_STRING:
printf ("complex] (string)");
return;
case MONO_TYPE_OBJECT:
printf ("complex] (object)");
return;
case MONO_TYPE_SZARRAY:
printf ("complex] (%s [])", value->type->data.klass->name);
return;
case MONO_TYPE_ARRAY:
printf ("complex] (%s [%d %d %d])",
value->type->data.array->eklass->name,
value->type->data.array->rank,
value->type->data.array->numsizes,
value->type->data.array->numlobounds);
return;
case MONO_TYPE_GENERICINST:
printf ("complex] (inst of %s )", value->type->data.generic_class->container_class->name);
return;
case MONO_TYPE_VAR:
printf ("complex] (type generic param !%d - %s) ", value->type->data.generic_param->num, mono_generic_param_info (value->type->data.generic_param)->name);
return;
case MONO_TYPE_MVAR:
printf ("complex] (method generic param !!%d - %s) ", value->type->data.generic_param->num, mono_generic_param_info (value->type->data.generic_param)->name);
return;
default: {
//should be a boxed value
char * name = mono_type_full_name (value->type);
printf ("complex] %s", name);
g_free (name);
return;
}
}
default:
printf ("unknown stack %x type]\n", value->stype);
g_assert_not_reached ();
}
}
static void
dump_stack_state (ILCodeDesc *state)
{
int i;
printf ("(%d) ", state->size);
for (i = 0; i < state->size; ++i)
dump_stack_value (state->stack + i);
printf ("\n");
}
#endif
/*Returns TRUE if candidate array type can be assigned to target.
*Both parameters MUST be of type MONO_TYPE_ARRAY (target->type == MONO_TYPE_ARRAY)
*/
static gboolean
is_array_type_compatible (MonoType *target, MonoType *candidate)
{
MonoArrayType *left = target->data.array;
MonoArrayType *right = candidate->data.array;
g_assert (target->type == MONO_TYPE_ARRAY);
g_assert (candidate->type == MONO_TYPE_ARRAY);
if (left->rank != right->rank)
return FALSE;
return mono_class_is_assignable_from (left->eklass, right->eklass);
}
static int
get_stack_type (MonoType *type)
{
int mask = 0;
int type_kind = type->type;
if (type->byref)
mask = POINTER_MASK;
/*TODO handle CMMP_MASK */
handle_enum:
switch (type_kind) {
case MONO_TYPE_I1:
case MONO_TYPE_U1:
case MONO_TYPE_BOOLEAN:
case MONO_TYPE_I2:
case MONO_TYPE_U2:
case MONO_TYPE_CHAR:
case MONO_TYPE_I4:
case MONO_TYPE_U4:
return TYPE_I4 | mask;
case MONO_TYPE_I:
case MONO_TYPE_U:
return TYPE_NATIVE_INT | mask;
/* FIXME: the spec says that you cannot have a pointer to method pointer, do we need to check this here? */
case MONO_TYPE_FNPTR:
case MONO_TYPE_PTR:
case MONO_TYPE_TYPEDBYREF:
return TYPE_PTR | mask;
case MONO_TYPE_VAR:
case MONO_TYPE_MVAR:
case MONO_TYPE_CLASS:
case MONO_TYPE_STRING:
case MONO_TYPE_OBJECT:
case MONO_TYPE_SZARRAY:
case MONO_TYPE_ARRAY:
return TYPE_COMPLEX | mask;
case MONO_TYPE_I8:
case MONO_TYPE_U8:
return TYPE_I8 | mask;
case MONO_TYPE_R4:
case MONO_TYPE_R8:
return TYPE_R8 | mask;
case MONO_TYPE_GENERICINST:
case MONO_TYPE_VALUETYPE:
if (mono_type_is_enum_type (type)) {
type = mono_type_get_underlying_type_any (type);
if (!type)
return FALSE;
type_kind = type->type;
goto handle_enum;
} else {
return TYPE_COMPLEX | mask;
}
default:
return TYPE_INV;
}
}
/* convert MonoType to ILStackDesc format (stype) */
static gboolean
set_stack_value (VerifyContext *ctx, ILStackDesc *stack, MonoType *type, int take_addr)
{
int mask = 0;
int type_kind = type->type;
if (type->byref || take_addr)
mask = POINTER_MASK;
/* TODO handle CMMP_MASK */
handle_enum:
stack->type = type;
switch (type_kind) {
case MONO_TYPE_I1:
case MONO_TYPE_U1:
case MONO_TYPE_BOOLEAN:
case MONO_TYPE_I2:
case MONO_TYPE_U2:
case MONO_TYPE_CHAR:
case MONO_TYPE_I4:
case MONO_TYPE_U4:
stack->stype = TYPE_I4 | mask;
break;
case MONO_TYPE_I:
case MONO_TYPE_U:
stack->stype = TYPE_NATIVE_INT | mask;
break;
/*FIXME: Do we need to check if it's a pointer to the method pointer? The spec says it' illegal to have that.*/
case MONO_TYPE_FNPTR:
case MONO_TYPE_PTR:
case MONO_TYPE_TYPEDBYREF:
stack->stype = TYPE_PTR | mask;
break;
case MONO_TYPE_CLASS:
case MONO_TYPE_STRING:
case MONO_TYPE_OBJECT:
case MONO_TYPE_SZARRAY:
case MONO_TYPE_ARRAY:
case MONO_TYPE_VAR:
case MONO_TYPE_MVAR:
stack->stype = TYPE_COMPLEX | mask;
break;
case MONO_TYPE_I8:
case MONO_TYPE_U8:
stack->stype = TYPE_I8 | mask;
break;
case MONO_TYPE_R4:
case MONO_TYPE_R8:
stack->stype = TYPE_R8 | mask;
break;
case MONO_TYPE_GENERICINST:
case MONO_TYPE_VALUETYPE:
if (mono_type_is_enum_type (type)) {
MonoType *utype = mono_type_get_underlying_type_any (type);
if (!utype) {
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Could not resolve underlying type of %x at %d", type->type, ctx->ip_offset));
return FALSE;
}
type = utype;
type_kind = type->type;
goto handle_enum;
} else {
stack->stype = TYPE_COMPLEX | mask;
break;
}
default:
VERIFIER_DEBUG ( printf ("unknown type 0x%02x in eval stack type\n", type->type); );
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Illegal value set on stack 0x%02x at %d", type->type, ctx->ip_offset));
return FALSE;
}
return TRUE;
}
/*
* init_stack_with_value_at_exception_boundary:
*
* Initialize the stack and push a given type.
* The instruction is marked as been on the exception boundary.
*/
static void
init_stack_with_value_at_exception_boundary (VerifyContext *ctx, ILCodeDesc *code, MonoClass *klass)
{
MonoError error;
MonoType *type = mono_class_inflate_generic_type_checked (&klass->byval_arg, ctx->generic_context, &error);
if (!mono_error_ok (&error)) {
char *name = mono_type_get_full_name (klass);
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Invalid class %s used for exception", name));
g_free (name);
mono_error_cleanup (&error);
return;
}
if (!ctx->max_stack) {
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Stack overflow at 0x%04x", ctx->ip_offset));
return;
}
stack_init (ctx, code);
ensure_stack_size (code, 1);
set_stack_value (ctx, code->stack, type, FALSE);
ctx->exception_types = g_slist_prepend (ctx->exception_types, type);
code->size = 1;
code->flags |= IL_CODE_FLAG_WAS_TARGET;
if (mono_type_is_generic_argument (type))
code->stack->stype |= BOXED_MASK;
}
/*Verify if type 'candidate' can be stored in type 'target'.
*
* If strict, check for the underlying type and not the verification stack types
*/
static gboolean
verify_type_compatibility_full (VerifyContext *ctx, MonoType *target, MonoType *candidate, gboolean strict)
{
#define IS_ONE_OF3(T, A, B, C) (T == A || T == B || T == C)
#define IS_ONE_OF2(T, A, B) (T == A || T == B)
MonoType *original_candidate = candidate;
VERIFIER_DEBUG ( printf ("checking type compatibility %s x %s strict %d\n", mono_type_full_name (target), mono_type_full_name (candidate), strict); );
/*only one is byref */
if (candidate->byref ^ target->byref) {
/* converting from native int to byref*/
if (get_stack_type (candidate) == TYPE_NATIVE_INT && target->byref) {
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("using byref native int at 0x%04x", ctx->ip_offset));
return TRUE;
}
return FALSE;
}
strict |= target->byref;
/*From now on we don't care about byref anymore, so it's ok to discard it here*/
candidate = mono_type_get_underlying_type_any (candidate);
handle_enum:
switch (target->type) {
case MONO_TYPE_VOID:
return candidate->type == MONO_TYPE_VOID;
case MONO_TYPE_I1:
case MONO_TYPE_U1:
case MONO_TYPE_BOOLEAN:
if (strict)
return IS_ONE_OF3 (candidate->type, MONO_TYPE_I1, MONO_TYPE_U1, MONO_TYPE_BOOLEAN);
case MONO_TYPE_I2:
case MONO_TYPE_U2:
case MONO_TYPE_CHAR:
if (strict)
return IS_ONE_OF3 (candidate->type, MONO_TYPE_I2, MONO_TYPE_U2, MONO_TYPE_CHAR);
case MONO_TYPE_I4:
case MONO_TYPE_U4: {
gboolean is_native_int = IS_ONE_OF2 (candidate->type, MONO_TYPE_I, MONO_TYPE_U);
gboolean is_int4 = IS_ONE_OF2 (candidate->type, MONO_TYPE_I4, MONO_TYPE_U4);
if (strict)
return is_native_int || is_int4;
return is_native_int || get_stack_type (candidate) == TYPE_I4;
}
case MONO_TYPE_I8:
case MONO_TYPE_U8:
return IS_ONE_OF2 (candidate->type, MONO_TYPE_I8, MONO_TYPE_U8);
case MONO_TYPE_R4:
case MONO_TYPE_R8:
if (strict)
return candidate->type == target->type;
return IS_ONE_OF2 (candidate->type, MONO_TYPE_R4, MONO_TYPE_R8);
case MONO_TYPE_I:
case MONO_TYPE_U: {
gboolean is_native_int = IS_ONE_OF2 (candidate->type, MONO_TYPE_I, MONO_TYPE_U);
gboolean is_int4 = IS_ONE_OF2 (candidate->type, MONO_TYPE_I4, MONO_TYPE_U4);
if (strict)
return is_native_int || is_int4;
return is_native_int || get_stack_type (candidate) == TYPE_I4;
}
case MONO_TYPE_PTR:
if (candidate->type != MONO_TYPE_PTR)
return FALSE;
/* check the underlying type */
return verify_type_compatibility_full (ctx, target->data.type, candidate->data.type, TRUE);
case MONO_TYPE_FNPTR: {
MonoMethodSignature *left, *right;
if (candidate->type != MONO_TYPE_FNPTR)
return FALSE;
left = mono_type_get_signature (target);
right = mono_type_get_signature (candidate);
return mono_metadata_signature_equal (left, right) && left->call_convention == right->call_convention;
}
case MONO_TYPE_GENERICINST: {
MonoClass *target_klass;
MonoClass *candidate_klass;
if (mono_type_is_enum_type (target)) {
target = mono_type_get_underlying_type_any (target);
if (!target)
return FALSE;
goto handle_enum;
}
/*
* VAR / MVAR compatibility must be checked by verify_stack_type_compatibility
* to take boxing status into account.
*/
if (mono_type_is_generic_argument (original_candidate))
return FALSE;
target_klass = mono_class_from_mono_type (target);
candidate_klass = mono_class_from_mono_type (candidate);
if (mono_class_is_nullable (target_klass)) {
if (!mono_class_is_nullable (candidate_klass))
return FALSE;
return target_klass == candidate_klass;
}
return mono_class_is_assignable_from (target_klass, candidate_klass);
}
case MONO_TYPE_STRING:
return candidate->type == MONO_TYPE_STRING;
case MONO_TYPE_CLASS:
/*
* VAR / MVAR compatibility must be checked by verify_stack_type_compatibility
* to take boxing status into account.
*/
if (mono_type_is_generic_argument (original_candidate))
return FALSE;
if (candidate->type == MONO_TYPE_VALUETYPE)
return FALSE;
/* If candidate is an enum it should return true for System.Enum and supertypes.
* That's why here we use the original type and not the underlying type.
*/
return mono_class_is_assignable_from (target->data.klass, mono_class_from_mono_type (original_candidate));
case MONO_TYPE_OBJECT:
return MONO_TYPE_IS_REFERENCE (candidate);
case MONO_TYPE_SZARRAY: {
MonoClass *left;
MonoClass *right;
if (candidate->type != MONO_TYPE_SZARRAY)
return FALSE;
left = mono_class_from_mono_type (target);
right = mono_class_from_mono_type (candidate);
return mono_class_is_assignable_from (left, right);
}
case MONO_TYPE_ARRAY:
if (candidate->type != MONO_TYPE_ARRAY)
return FALSE;
return is_array_type_compatible (target, candidate);
case MONO_TYPE_TYPEDBYREF:
return candidate->type == MONO_TYPE_TYPEDBYREF;
case MONO_TYPE_VALUETYPE: {
MonoClass *target_klass;
MonoClass *candidate_klass;
if (candidate->type == MONO_TYPE_CLASS)
return FALSE;
target_klass = mono_class_from_mono_type (target);
candidate_klass = mono_class_from_mono_type (candidate);
if (target_klass == candidate_klass)
return TRUE;
if (mono_type_is_enum_type (target)) {
target = mono_type_get_underlying_type_any (target);
if (!target)
return FALSE;
goto handle_enum;
}
return FALSE;
}
case MONO_TYPE_VAR:
if (candidate->type != MONO_TYPE_VAR)
return FALSE;
return mono_type_get_generic_param_num (candidate) == mono_type_get_generic_param_num (target);
case MONO_TYPE_MVAR:
if (candidate->type != MONO_TYPE_MVAR)
return FALSE;
return mono_type_get_generic_param_num (candidate) == mono_type_get_generic_param_num (target);
default:
VERIFIER_DEBUG ( printf ("unknown store type %d\n", target->type); );
g_assert_not_reached ();
return FALSE;
}
return 1;
#undef IS_ONE_OF3
#undef IS_ONE_OF2
}
static gboolean
verify_type_compatibility (VerifyContext *ctx, MonoType *target, MonoType *candidate)
{
return verify_type_compatibility_full (ctx, target, candidate, FALSE);
}
/*
* Returns the generic param bound to the context been verified.
*
*/
static MonoGenericParam*
get_generic_param (VerifyContext *ctx, MonoType *param)
{
guint16 param_num = mono_type_get_generic_param_num (param);
if (param->type == MONO_TYPE_VAR) {
if (!ctx->generic_context->class_inst || ctx->generic_context->class_inst->type_argc <= param_num) {
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Invalid generic type argument %d", param_num));
return NULL;
}
return ctx->generic_context->class_inst->type_argv [param_num]->data.generic_param;
}
/*param must be a MVAR */
if (!ctx->generic_context->method_inst || ctx->generic_context->method_inst->type_argc <= param_num) {
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Invalid generic method argument %d", param_num));
return NULL;
}
return ctx->generic_context->method_inst->type_argv [param_num]->data.generic_param;
}
static gboolean
recursive_boxed_constraint_type_check (VerifyContext *ctx, MonoType *type, MonoClass *constraint_class, int recursion_level)
{
MonoType *constraint_type = &constraint_class->byval_arg;
if (recursion_level <= 0)
return FALSE;
if (verify_type_compatibility_full (ctx, type, mono_type_get_type_byval (constraint_type), FALSE))
return TRUE;
if (mono_type_is_generic_argument (constraint_type)) {
MonoGenericParam *param = get_generic_param (ctx, constraint_type);
MonoClass **class;
if (!param)
return FALSE;
for (class = mono_generic_param_info (param)->constraints; class && *class; ++class) {
if (recursive_boxed_constraint_type_check (ctx, type, *class, recursion_level - 1))
return TRUE;
}
}
return FALSE;
}
/*
* is_compatible_boxed_valuetype:
*
* Returns TRUE if @candidate / @stack is a valid boxed valuetype.
*
* @type The source type. It it tested to be of the proper type.
* @candidate type of the boxed valuetype.
* @stack stack slot of the boxed valuetype, separate from @candidade since one could be changed before calling this function
* @strict if TRUE candidate must be boxed compatible to the target type
*
*/
static gboolean
is_compatible_boxed_valuetype (VerifyContext *ctx, MonoType *type, MonoType *candidate, ILStackDesc *stack, gboolean strict)
{
if (!stack_slot_is_boxed_value (stack))
return FALSE;
if (type->byref || candidate->byref)
return FALSE;
if (mono_type_is_generic_argument (candidate)) {
MonoGenericParam *param = get_generic_param (ctx, candidate);
MonoClass **class;
if (!param)
return FALSE;
for (class = mono_generic_param_info (param)->constraints; class && *class; ++class) {
/*256 should be enough since there can't be more than 255 generic arguments.*/
if (recursive_boxed_constraint_type_check (ctx, type, *class, 256))
return TRUE;
}
}
if (mono_type_is_generic_argument (type))
return FALSE;
if (!strict)
return TRUE;
return MONO_TYPE_IS_REFERENCE (type) && mono_class_is_assignable_from (mono_class_from_mono_type (type), mono_class_from_mono_type (candidate));
}
static int
verify_stack_type_compatibility_full (VerifyContext *ctx, MonoType *type, ILStackDesc *stack, gboolean drop_byref, gboolean valuetype_must_be_boxed)
{
MonoType *candidate = mono_type_from_stack_slot (stack);
if (MONO_TYPE_IS_REFERENCE (type) && !type->byref && stack_slot_is_null_literal (stack))
return TRUE;
if (is_compatible_boxed_valuetype (ctx, type, candidate, stack, TRUE))
return TRUE;
if (valuetype_must_be_boxed && !stack_slot_is_boxed_value (stack) && !MONO_TYPE_IS_REFERENCE (candidate))
return FALSE;
if (!valuetype_must_be_boxed && stack_slot_is_boxed_value (stack))
return FALSE;
if (drop_byref)
return verify_type_compatibility_full (ctx, type, mono_type_get_type_byval (candidate), FALSE);
return verify_type_compatibility_full (ctx, type, candidate, FALSE);
}
static int
verify_stack_type_compatibility (VerifyContext *ctx, MonoType *type, ILStackDesc *stack)
{
return verify_stack_type_compatibility_full (ctx, type, stack, FALSE, FALSE);
}
static gboolean
mono_delegate_type_equal (MonoType *target, MonoType *candidate)
{
if (candidate->byref ^ target->byref)
return FALSE;
switch (target->type) {
case MONO_TYPE_VOID:
case MONO_TYPE_I1:
case MONO_TYPE_U1:
case MONO_TYPE_BOOLEAN:
case MONO_TYPE_I2:
case MONO_TYPE_U2:
case MONO_TYPE_CHAR:
case MONO_TYPE_I4:
case MONO_TYPE_U4:
case MONO_TYPE_I8:
case MONO_TYPE_U8:
case MONO_TYPE_R4:
case MONO_TYPE_R8:
case MONO_TYPE_I:
case MONO_TYPE_U:
case MONO_TYPE_STRING:
case MONO_TYPE_TYPEDBYREF:
return candidate->type == target->type;
case MONO_TYPE_PTR:
return mono_delegate_type_equal (target->data.type, candidate->data.type);
case MONO_TYPE_FNPTR:
if (candidate->type != MONO_TYPE_FNPTR)
return FALSE;
return mono_delegate_signature_equal (mono_type_get_signature (target), mono_type_get_signature (candidate), FALSE);
case MONO_TYPE_GENERICINST: {
MonoClass *target_klass;
MonoClass *candidate_klass;
target_klass = mono_class_from_mono_type (target);
candidate_klass = mono_class_from_mono_type (candidate);
/*FIXME handle nullables and enum*/
return mono_class_is_assignable_from (target_klass, candidate_klass);
}
case MONO_TYPE_OBJECT:
return MONO_TYPE_IS_REFERENCE (candidate);
case MONO_TYPE_CLASS:
return mono_class_is_assignable_from(target->data.klass, mono_class_from_mono_type (candidate));
case MONO_TYPE_SZARRAY:
if (candidate->type != MONO_TYPE_SZARRAY)
return FALSE;
return mono_class_is_assignable_from (mono_class_from_mono_type (target)->element_class, mono_class_from_mono_type (candidate)->element_class);
case MONO_TYPE_ARRAY:
if (candidate->type != MONO_TYPE_ARRAY)
return FALSE;
return is_array_type_compatible (target, candidate);
case MONO_TYPE_VALUETYPE:
/*FIXME handle nullables and enum*/
return mono_class_from_mono_type (candidate) == mono_class_from_mono_type (target);
case MONO_TYPE_VAR:
return candidate->type == MONO_TYPE_VAR && mono_type_get_generic_param_num (target) == mono_type_get_generic_param_num (candidate);
return FALSE;
case MONO_TYPE_MVAR:
return candidate->type == MONO_TYPE_MVAR && mono_type_get_generic_param_num (target) == mono_type_get_generic_param_num (candidate);
return FALSE;
default:
VERIFIER_DEBUG ( printf ("Unknown type %d. Implement me!\n", target->type); );
g_assert_not_reached ();
return FALSE;
}
}
static gboolean
mono_delegate_param_equal (MonoType *delegate, MonoType *method)
{
if (mono_metadata_type_equal_full (delegate, method, TRUE))
return TRUE;
return mono_delegate_type_equal (method, delegate);
}
static gboolean
mono_delegate_ret_equal (MonoType *delegate, MonoType *method)
{
if (mono_metadata_type_equal_full (delegate, method, TRUE))
return TRUE;
return mono_delegate_type_equal (delegate, method);
}
/*
* mono_delegate_signature_equal:
*
* Compare two signatures in the way expected by delegates.
*
* This function only exists due to the fact that it should ignore the 'has_this' part of the signature.
*
* FIXME can this function be eliminated and proper metadata functionality be used?
*/
static gboolean
mono_delegate_signature_equal (MonoMethodSignature *delegate_sig, MonoMethodSignature *method_sig, gboolean is_static_ldftn)
{
int i;
int method_offset = is_static_ldftn ? 1 : 0;
if (delegate_sig->param_count + method_offset != method_sig->param_count)
return FALSE;
if (delegate_sig->call_convention != method_sig->call_convention)
return FALSE;
for (i = 0; i < delegate_sig->param_count; i++) {
MonoType *p1 = delegate_sig->params [i];
MonoType *p2 = method_sig->params [i + method_offset];
if (!mono_delegate_param_equal (p1, p2))
return FALSE;
}
if (!mono_delegate_ret_equal (delegate_sig->ret, method_sig->ret))
return FALSE;
return TRUE;
}
gboolean
mono_verifier_is_signature_compatible (MonoMethodSignature *target, MonoMethodSignature *candidate)
{
return mono_delegate_signature_equal (target, candidate, FALSE);
}
/*
* verify_ldftn_delegate:
*
* Verify properties of ldftn based delegates.
*/
static void
verify_ldftn_delegate (VerifyContext *ctx, MonoClass *delegate, ILStackDesc *value, ILStackDesc *funptr)
{
MonoMethod *method = funptr->method;
/*ldftn non-final virtuals only allowed if method is not static,
* the object is a this arg (comes from a ldarg.0), and there is no starg.0.
* This rules doesn't apply if the object on stack is a boxed valuetype.
*/
if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && !(method->flags & METHOD_ATTRIBUTE_FINAL) && !(method->klass->flags & TYPE_ATTRIBUTE_SEALED) && !stack_slot_is_boxed_value (value)) {
/*A stdarg 0 must not happen, we fail here only in fail fast mode to avoid double error reports*/
if (IS_FAIL_FAST_MODE (ctx) && ctx->has_this_store)
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid ldftn with virtual function in method with stdarg 0 at 0x%04x", ctx->ip_offset));
/*current method must not be static*/
if (ctx->method->flags & METHOD_ATTRIBUTE_STATIC)
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid ldftn with virtual function at 0x%04x", ctx->ip_offset));
/*value is the this pointer, loaded using ldarg.0 */
if (!stack_slot_is_this_pointer (value))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid object argument, it is not the this pointer, to ldftn with virtual method at 0x%04x", ctx->ip_offset));
ctx->code [ctx->ip_offset].flags |= IL_CODE_LDFTN_DELEGATE_NONFINAL_VIRTUAL;
}
}
/*
* verify_delegate_compatibility:
*
* Verify delegate creation sequence.
*
*/
static void
verify_delegate_compatibility (VerifyContext *ctx, MonoClass *delegate, ILStackDesc *value, ILStackDesc *funptr)
{
#define IS_VALID_OPCODE(offset, opcode) (ip [ip_offset - offset] == opcode && (ctx->code [ip_offset - offset].flags & IL_CODE_FLAG_SEEN))
#define IS_LOAD_FUN_PTR(kind) (IS_VALID_OPCODE (6, CEE_PREFIX1) && ip [ip_offset - 5] == kind)
MonoMethod *invoke, *method;
const guint8 *ip = ctx->header->code;
guint32 ip_offset = ctx->ip_offset;
gboolean is_static_ldftn = FALSE, is_first_arg_bound = FALSE;
if (stack_slot_get_type (funptr) != TYPE_PTR || !funptr->method) {
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid function pointer parameter for delegate constructor at 0x%04x", ctx->ip_offset));
return;
}
invoke = mono_get_delegate_invoke (delegate);
method = funptr->method;
if (!method || !mono_method_signature (method)) {
char *name = mono_type_get_full_name (delegate);
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Invalid method on stack to create delegate %s construction at 0x%04x", name, ctx->ip_offset));
g_free (name);
return;
}
if (!invoke || !mono_method_signature (invoke)) {
char *name = mono_type_get_full_name (delegate);
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Delegate type %s with bad Invoke method at 0x%04x", name, ctx->ip_offset));
g_free (name);
return;
}
is_static_ldftn = (ip_offset > 5 && IS_LOAD_FUN_PTR (CEE_LDFTN)) && method->flags & METHOD_ATTRIBUTE_STATIC;
if (is_static_ldftn)
is_first_arg_bound = mono_method_signature (invoke)->param_count + 1 == mono_method_signature (method)->param_count;
if (!mono_delegate_signature_equal (mono_method_signature (invoke), mono_method_signature (method), is_first_arg_bound)) {
char *fun_sig = mono_signature_get_desc (mono_method_signature (method), FALSE);
char *invoke_sig = mono_signature_get_desc (mono_method_signature (invoke), FALSE);
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Function pointer signature '%s' doesn't match delegate's signature '%s' at 0x%04x", fun_sig, invoke_sig, ctx->ip_offset));
g_free (fun_sig);
g_free (invoke_sig);
}
/*
* Delegate code sequences:
* [-6] ldftn token
* newobj ...
*
*
* [-7] dup
* [-6] ldvirtftn token
* newobj ...
*
* ldftn sequence:*/
if (ip_offset > 5 && IS_LOAD_FUN_PTR (CEE_LDFTN)) {
verify_ldftn_delegate (ctx, delegate, value, funptr);
} else if (ip_offset > 6 && IS_VALID_OPCODE (7, CEE_DUP) && IS_LOAD_FUN_PTR (CEE_LDVIRTFTN)) {
ctx->code [ip_offset - 6].flags |= IL_CODE_DELEGATE_SEQUENCE;
}else {
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid code sequence for delegate creation at 0x%04x", ctx->ip_offset));
}
ctx->code [ip_offset].flags |= IL_CODE_DELEGATE_SEQUENCE;
//general tests
if (is_first_arg_bound) {
if (mono_method_signature (method)->param_count == 0 || !verify_stack_type_compatibility_full (ctx, mono_method_signature (method)->params [0], value, FALSE, TRUE))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("This object not compatible with function pointer for delegate creation at 0x%04x", ctx->ip_offset));
} else {
if (method->flags & METHOD_ATTRIBUTE_STATIC) {
if (!stack_slot_is_null_literal (value) && !is_first_arg_bound)
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Non-null this args used with static function for delegate creation at 0x%04x", ctx->ip_offset));
} else {
if (!verify_stack_type_compatibility_full (ctx, &method->klass->byval_arg, value, FALSE, TRUE) && !stack_slot_is_null_literal (value))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("This object not compatible with function pointer for delegate creation at 0x%04x", ctx->ip_offset));
}
}
if (stack_slot_get_type (value) != TYPE_COMPLEX)
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid first parameter for delegate creation at 0x%04x", ctx->ip_offset));
#undef IS_VALID_OPCODE
#undef IS_LOAD_FUN_PTR
}
/* implement the opcode checks*/
static void
push_arg (VerifyContext *ctx, unsigned int arg, int take_addr)
{
ILStackDesc *top;
if (arg >= ctx->max_args) {
if (take_addr)
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Method doesn't have argument %d", arg + 1));
else {
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Method doesn't have argument %d", arg + 1));
if (check_overflow (ctx)) //FIXME: what sane value could we ever push?
stack_push_val (ctx, TYPE_I4, &mono_defaults.int32_class->byval_arg);
}
} else if (check_overflow (ctx)) {
/*We must let the value be pushed, otherwise we would get an underflow error*/
check_unverifiable_type (ctx, ctx->params [arg]);
if (ctx->params [arg]->byref && take_addr)
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("ByRef of ByRef at 0x%04x", ctx->ip_offset));
top = stack_push (ctx);
if (!set_stack_value (ctx, top, ctx->params [arg], take_addr))
return;
if (arg == 0 && !(ctx->method->flags & METHOD_ATTRIBUTE_STATIC)) {
if (take_addr)
ctx->has_this_store = TRUE;
else
top->stype |= THIS_POINTER_MASK;
if (mono_method_is_constructor (ctx->method) && !ctx->super_ctor_called && !ctx->method->klass->valuetype)
top->stype |= UNINIT_THIS_MASK;
}
}
}
static void
push_local (VerifyContext *ctx, guint32 arg, int take_addr)
{
if (arg >= ctx->num_locals) {
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Method doesn't have local %d", arg + 1));
} else if (check_overflow (ctx)) {
/*We must let the value be pushed, otherwise we would get an underflow error*/
check_unverifiable_type (ctx, ctx->locals [arg]);
if (ctx->locals [arg]->byref && take_addr)
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("ByRef of ByRef at 0x%04x", ctx->ip_offset));
set_stack_value (ctx, stack_push (ctx), ctx->locals [arg], take_addr);
}
}
static void
store_arg (VerifyContext *ctx, guint32 arg)
{
ILStackDesc *value;
if (arg >= ctx->max_args) {
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Method doesn't have argument %d at 0x%04x", arg + 1, ctx->ip_offset));
if (check_underflow (ctx, 1))
stack_pop (ctx);
return;
}
if (check_underflow (ctx, 1)) {
value = stack_pop (ctx);
if (!verify_stack_type_compatibility (ctx, ctx->params [arg], value)) {
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Incompatible type %s in argument store at 0x%04x", stack_slot_get_name (value), ctx->ip_offset));
}
}
if (arg == 0 && !(ctx->method->flags & METHOD_ATTRIBUTE_STATIC))
ctx->has_this_store = 1;
}
static void
store_local (VerifyContext *ctx, guint32 arg)
{
ILStackDesc *value;
if (arg >= ctx->num_locals) {
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Method doesn't have local var %d at 0x%04x", arg + 1, ctx->ip_offset));
return;
}
/*TODO verify definite assigment */
if (check_underflow (ctx, 1)) {
value = stack_pop(ctx);
if (!verify_stack_type_compatibility (ctx, ctx->locals [arg], value)) {
char *expected = mono_type_full_name (ctx->locals [arg]);
char *found = stack_slot_full_name (value);
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Incompatible type '%s' on stack cannot be stored to local %d with type '%s' at 0x%04x",
found,
arg,
expected,
ctx->ip_offset));
g_free (expected);
g_free (found);
}
}
}
/*FIXME add and sub needs special care here*/
static void
do_binop (VerifyContext *ctx, unsigned int opcode, const unsigned char table [TYPE_MAX][TYPE_MAX])
{
ILStackDesc *a, *b, *top;
int idxa, idxb, complexMerge = 0;
unsigned char res;
if (!check_underflow (ctx, 2))
return;
b = stack_pop (ctx);
a = stack_pop (ctx);
idxa = stack_slot_get_underlying_type (a);
if (stack_slot_is_managed_pointer (a)) {
idxa = TYPE_PTR;
complexMerge = 1;
}
idxb = stack_slot_get_underlying_type (b);
if (stack_slot_is_managed_pointer (b)) {
idxb = TYPE_PTR;
complexMerge = 2;
}
--idxa;
--idxb;
res = table [idxa][idxb];
VERIFIER_DEBUG ( printf ("binop res %d\n", res); );
VERIFIER_DEBUG ( printf ("idxa %d idxb %d\n", idxa, idxb); );
top = stack_push (ctx);
if (res == TYPE_INV) {
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Binary instruction applyed to ill formed stack (%s x %s)", stack_slot_get_name (a), stack_slot_get_name (b)));
copy_stack_value (top, a);
return;
}
if (res & NON_VERIFIABLE_RESULT) {
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Binary instruction is not verifiable (%s x %s)", stack_slot_get_name (a), stack_slot_get_name (b)));
res = res & ~NON_VERIFIABLE_RESULT;
}
if (complexMerge && res == TYPE_PTR) {
if (complexMerge == 1)
copy_stack_value (top, a);
else if (complexMerge == 2)
copy_stack_value (top, b);
/*
* There is no need to merge the type of two pointers.
* The only valid operation is subtraction, that returns a native
* int as result and can be used with any 2 pointer kinds.
* This is valid acording to Patition III 1.1.4
*/
} else
top->stype = res;
}
static void
do_boolean_branch_op (VerifyContext *ctx, int delta)
{
int target = ctx->ip_offset + delta;
ILStackDesc *top;
VERIFIER_DEBUG ( printf ("boolean branch offset %d delta %d target %d\n", ctx->ip_offset, delta, target); );
if (target < 0 || target >= ctx->code_size) {
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Boolean branch target out of code at 0x%04x", ctx->ip_offset));
return;
}
switch (is_valid_branch_instruction (ctx->header, ctx->ip_offset, target)) {
case 1:
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Branch target escapes out of exception block at 0x%04x", ctx->ip_offset));
break;
case 2:
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Branch target escapes out of exception block at 0x%04x", ctx->ip_offset));
return;
}
ctx->target = target;
if (!check_underflow (ctx, 1))
return;
top = stack_pop (ctx);
if (!is_valid_bool_arg (top))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Argument type %s not valid for brtrue/brfalse at 0x%04x", stack_slot_get_name (top), ctx->ip_offset));
check_unmanaged_pointer (ctx, top);
}
static gboolean
stack_slot_is_complex_type_not_reference_type (ILStackDesc *slot)
{
return stack_slot_get_type (slot) == TYPE_COMPLEX && !MONO_TYPE_IS_REFERENCE (slot->type) && !stack_slot_is_boxed_value (slot);
}
static void
do_branch_op (VerifyContext *ctx, signed int delta, const unsigned char table [TYPE_MAX][TYPE_MAX])
{
ILStackDesc *a, *b;
int idxa, idxb;
unsigned char res;
int target = ctx->ip_offset + delta;
VERIFIER_DEBUG ( printf ("branch offset %d delta %d target %d\n", ctx->ip_offset, delta, target); );
if (target < 0 || target >= ctx->code_size) {
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Branch target out of code at 0x%04x", ctx->ip_offset));
return;
}
switch (is_valid_cmp_branch_instruction (ctx->header, ctx->ip_offset, target)) {
case 1: /*FIXME use constants and not magic numbers.*/
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Branch target escapes out of exception block at 0x%04x", ctx->ip_offset));
break;
case 2:
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Branch target escapes out of exception block at 0x%04x", ctx->ip_offset));
return;
}
ctx->target = target;
if (!check_underflow (ctx, 2))
return;
b = stack_pop (ctx);
a = stack_pop (ctx);
idxa = stack_slot_get_underlying_type (a);
if (stack_slot_is_managed_pointer (a))
idxa = TYPE_PTR;
idxb = stack_slot_get_underlying_type (b);
if (stack_slot_is_managed_pointer (b))
idxb = TYPE_PTR;
if (stack_slot_is_complex_type_not_reference_type (a) || stack_slot_is_complex_type_not_reference_type (b)) {
res = TYPE_INV;
} else {
--idxa;
--idxb;
res = table [idxa][idxb];
}
VERIFIER_DEBUG ( printf ("branch res %d\n", res); );
VERIFIER_DEBUG ( printf ("idxa %d idxb %d\n", idxa, idxb); );
if (res == TYPE_INV) {
CODE_NOT_VERIFIABLE (ctx,
g_strdup_printf ("Compare and Branch instruction applyed to ill formed stack (%s x %s) at 0x%04x", stack_slot_get_name (a), stack_slot_get_name (b), ctx->ip_offset));
} else if (res & NON_VERIFIABLE_RESULT) {
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Compare and Branch instruction is not verifiable (%s x %s) at 0x%04x", stack_slot_get_name (a), stack_slot_get_name (b), ctx->ip_offset));
res = res & ~NON_VERIFIABLE_RESULT;
}
}
static void
do_cmp_op (VerifyContext *ctx, const unsigned char table [TYPE_MAX][TYPE_MAX], guint32 opcode)
{
ILStackDesc *a, *b;
int idxa, idxb;
unsigned char res;
if (!check_underflow (ctx, 2))
return;
b = stack_pop (ctx);
a = stack_pop (ctx);
if (opcode == CEE_CGT_UN) {
if (stack_slot_get_type (a) == TYPE_COMPLEX && stack_slot_get_type (b) == TYPE_COMPLEX) {
stack_push_val (ctx, TYPE_I4, &mono_defaults.int32_class->byval_arg);
return;
}
}
idxa = stack_slot_get_underlying_type (a);
if (stack_slot_is_managed_pointer (a))
idxa = TYPE_PTR;
idxb = stack_slot_get_underlying_type (b);
if (stack_slot_is_managed_pointer (b))
idxb = TYPE_PTR;
if (stack_slot_is_complex_type_not_reference_type (a) || stack_slot_is_complex_type_not_reference_type (b)) {
res = TYPE_INV;
} else {
--idxa;
--idxb;
res = table [idxa][idxb];
}
if(res == TYPE_INV) {
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf("Compare instruction applyed to ill formed stack (%s x %s) at 0x%04x", stack_slot_get_name (a), stack_slot_get_name (b), ctx->ip_offset));
} else if (res & NON_VERIFIABLE_RESULT) {
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Compare instruction is not verifiable (%s x %s) at 0x%04x", stack_slot_get_name (a), stack_slot_get_name (b), ctx->ip_offset));
res = res & ~NON_VERIFIABLE_RESULT;
}
stack_push_val (ctx, TYPE_I4, &mono_defaults.int32_class->byval_arg);
}
static void
do_ret (VerifyContext *ctx)
{
MonoType *ret = ctx->signature->ret;
VERIFIER_DEBUG ( printf ("checking ret\n"); );
if (ret->type != MONO_TYPE_VOID) {
ILStackDesc *top;
if (!check_underflow (ctx, 1))
return;
top = stack_pop(ctx);
if (!verify_stack_type_compatibility (ctx, ctx->signature->ret, top)) {
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Incompatible return value on stack with method signature ret at 0x%04x", ctx->ip_offset));
return;
}
if (ret->byref || ret->type == MONO_TYPE_TYPEDBYREF || mono_type_is_value_type (ret, "System", "ArgIterator") || mono_type_is_value_type (ret, "System", "RuntimeArgumentHandle"))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Method returns byref, TypedReference, ArgIterator or RuntimeArgumentHandle at 0x%04x", ctx->ip_offset));
}
if (ctx->eval.size > 0) {
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Stack not empty (%d) after ret at 0x%04x", ctx->eval.size, ctx->ip_offset));
}
if (in_any_block (ctx->header, ctx->ip_offset))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("ret cannot escape exception blocks at 0x%04x", ctx->ip_offset));
}
/*
* FIXME we need to fix the case of a non-virtual instance method defined in the parent but call using a token pointing to a subclass.
* This is illegal but mono_get_method_full decoded it.
* TODO handle calling .ctor outside one or calling the .ctor for other class but super
*/
static void
do_invoke_method (VerifyContext *ctx, int method_token, gboolean virtual)
{
int param_count, i;
MonoMethodSignature *sig;
ILStackDesc *value;
MonoMethod *method;
gboolean virt_check_this = FALSE;
gboolean constrained = ctx->prefix_set & PREFIX_CONSTRAINED;
if (!(method = verifier_load_method (ctx, method_token, virtual ? "callvirt" : "call")))
return;
if (virtual) {
CLEAR_PREFIX (ctx, PREFIX_CONSTRAINED);
if (method->klass->valuetype) // && !constrained ???
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Cannot use callvirtual with valuetype method at 0x%04x", ctx->ip_offset));
if ((method->flags & METHOD_ATTRIBUTE_STATIC))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Cannot use callvirtual with static method at 0x%04x", ctx->ip_offset));
} else {
if (method->flags & METHOD_ATTRIBUTE_ABSTRACT)
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Cannot use call with an abstract method at 0x%04x", ctx->ip_offset));
if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && !(method->flags & METHOD_ATTRIBUTE_FINAL) && !(method->klass->flags & TYPE_ATTRIBUTE_SEALED)) {
virt_check_this = TRUE;
ctx->code [ctx->ip_offset].flags |= IL_CODE_CALL_NONFINAL_VIRTUAL;
}
}
if (!(sig = mono_method_get_signature_full (method, ctx->image, method_token, ctx->generic_context)))
sig = mono_method_get_signature (method, ctx->image, method_token);
if (!sig) {
char *name = mono_type_get_full_name (method->klass);
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Could not resolve signature of %s:%s at 0x%04x", name, method->name, ctx->ip_offset));
g_free (name);
return;
}
param_count = sig->param_count + sig->hasthis;
if (!check_underflow (ctx, param_count))
return;
for (i = sig->param_count - 1; i >= 0; --i) {
VERIFIER_DEBUG ( printf ("verifying argument %d\n", i); );
value = stack_pop (ctx);
if (!verify_stack_type_compatibility (ctx, sig->params[i], value)) {
char *stack_name = stack_slot_full_name (value);
char *sig_name = mono_type_full_name (sig->params [i]);
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Incompatible parameter with function signature: Calling method with signature (%s) but for argument %d there is a (%s) on stack at 0x%04x", sig_name, i, stack_name, ctx->ip_offset));
g_free (stack_name);
g_free (sig_name);
}
if (stack_slot_is_managed_mutability_pointer (value))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Cannot use a readonly pointer as argument of %s at 0x%04x", virtual ? "callvirt" : "call", ctx->ip_offset));
if ((ctx->prefix_set & PREFIX_TAIL) && stack_slot_is_managed_pointer (value)) {
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Cannot pass a byref argument to a tail %s at 0x%04x", virtual ? "callvirt" : "call", ctx->ip_offset));
return;
}
}
if (sig->hasthis) {
MonoType *type = &method->klass->byval_arg;
ILStackDesc copy;
if (mono_method_is_constructor (method) && !method->klass->valuetype) {
if (!mono_method_is_constructor (ctx->method))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Cannot call a constructor outside one at 0x%04x", ctx->ip_offset));
if (method->klass != ctx->method->klass->parent && method->klass != ctx->method->klass)
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Cannot call a constructor to a type diferent that this or super at 0x%04x", ctx->ip_offset));
ctx->super_ctor_called = TRUE;
value = stack_pop_safe (ctx);
if ((value->stype & THIS_POINTER_MASK) != THIS_POINTER_MASK)
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid 'this ptr' argument for constructor at 0x%04x", ctx->ip_offset));
} else {
value = stack_pop (ctx);
}
copy_stack_value (©, value);
//TODO we should extract this to a 'drop_byref_argument' and use everywhere
//Other parts of the code suffer from the same issue of
copy.type = mono_type_get_type_byval (copy.type);
copy.stype &= ~POINTER_MASK;
if (virt_check_this && !stack_slot_is_this_pointer (value) && !(method->klass->valuetype || stack_slot_is_boxed_value (value)))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Cannot use the call opcode with a non-final virtual method on an object diferent thant the this pointer at 0x%04x", ctx->ip_offset));
if (constrained && virtual) {
if (!stack_slot_is_managed_pointer (value))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Object is not a managed pointer for a constrained call at 0x%04x", ctx->ip_offset));
if (!mono_metadata_type_equal_full (mono_type_get_type_byval (value->type), ctx->constrained_type, TRUE))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Object not compatible with constrained type at 0x%04x", ctx->ip_offset));
copy.stype |= BOXED_MASK;
} else {
if (stack_slot_is_managed_pointer (value) && !mono_class_from_mono_type (value->type)->valuetype)
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Cannot call a reference type using a managed pointer to the this arg at 0x%04x", ctx->ip_offset));
if (!virtual && mono_class_from_mono_type (value->type)->valuetype && !method->klass->valuetype && !stack_slot_is_boxed_value (value))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Cannot call a valuetype baseclass at 0x%04x", ctx->ip_offset));
if (virtual && mono_class_from_mono_type (value->type)->valuetype && !stack_slot_is_boxed_value (value))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Cannot use a valuetype with callvirt at 0x%04x", ctx->ip_offset));
if (method->klass->valuetype && (stack_slot_is_boxed_value (value) || !stack_slot_is_managed_pointer (value)))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Cannot use a boxed or literal valuetype to call a valuetype method at 0x%04x", ctx->ip_offset));
}
if (!verify_stack_type_compatibility (ctx, type, ©)) {
char *expected = mono_type_full_name (type);
char *effective = stack_slot_full_name (©);
char *method_name = mono_method_full_name (method, TRUE);
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Incompatible this argument on stack with method signature expected '%s' but got '%s' for a call to '%s' at 0x%04x",
expected, effective, method_name, ctx->ip_offset));
g_free (method_name);
g_free (effective);
g_free (expected);
}
if (!IS_SKIP_VISIBILITY (ctx) && !mono_method_can_access_method_full (ctx->method, method, mono_class_from_mono_type (value->type))) {
char *name = mono_method_full_name (method, TRUE);
CODE_NOT_VERIFIABLE2 (ctx, g_strdup_printf ("Method %s is not accessible at 0x%04x", name, ctx->ip_offset), MONO_EXCEPTION_METHOD_ACCESS);
g_free (name);
}
} else if (!IS_SKIP_VISIBILITY (ctx) && !mono_method_can_access_method_full (ctx->method, method, NULL)) {
char *name = mono_method_full_name (method, TRUE);
CODE_NOT_VERIFIABLE2 (ctx, g_strdup_printf ("Method %s is not accessible at 0x%04x", name, ctx->ip_offset), MONO_EXCEPTION_METHOD_ACCESS);
g_free (name);
}
if (sig->ret->type != MONO_TYPE_VOID) {
if (!mono_type_is_valid_in_context (ctx, sig->ret))
return;
if (check_overflow (ctx)) {
value = stack_push (ctx);
set_stack_value (ctx, value, sig->ret, FALSE);
if ((ctx->prefix_set & PREFIX_READONLY) && method->klass->rank && !strcmp (method->name, "Address")) {
ctx->prefix_set &= ~PREFIX_READONLY;
value->stype |= CMMP_MASK;
}
}
}
if ((ctx->prefix_set & PREFIX_TAIL)) {
if (!mono_delegate_ret_equal (mono_method_signature (ctx->method)->ret, sig->ret))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Tail call with incompatible return type at 0x%04x", ctx->ip_offset));
if (ctx->header->code [ctx->ip_offset + 5] != CEE_RET)
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Tail call not followed by ret at 0x%04x", ctx->ip_offset));
}
}
static void
do_push_static_field (VerifyContext *ctx, int token, gboolean take_addr)
{
MonoClassField *field;
MonoClass *klass;
if (!check_overflow (ctx))
return;
if (!take_addr)
CLEAR_PREFIX (ctx, PREFIX_VOLATILE);
if (!(field = verifier_load_field (ctx, token, &klass, take_addr ? "ldsflda" : "ldsfld")))
return;
if (!(field->type->attrs & FIELD_ATTRIBUTE_STATIC)) {
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Cannot load non static field at 0x%04x", ctx->ip_offset));
return;
}
/*taking the address of initonly field only works from the static constructor */
if (take_addr && (field->type->attrs & FIELD_ATTRIBUTE_INIT_ONLY) &&
!(field->parent == ctx->method->klass && (ctx->method->flags & (METHOD_ATTRIBUTE_SPECIAL_NAME | METHOD_ATTRIBUTE_STATIC)) && !strcmp (".cctor", ctx->method->name)))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Cannot take the address of a init-only field at 0x%04x", ctx->ip_offset));
if (!IS_SKIP_VISIBILITY (ctx) && !mono_method_can_access_field_full (ctx->method, field, NULL))
CODE_NOT_VERIFIABLE2 (ctx, g_strdup_printf ("Type at stack is not accessible at 0x%04x", ctx->ip_offset), MONO_EXCEPTION_FIELD_ACCESS);
set_stack_value (ctx, stack_push (ctx), field->type, take_addr);
}
static void
do_store_static_field (VerifyContext *ctx, int token) {
MonoClassField *field;
MonoClass *klass;
ILStackDesc *value;
CLEAR_PREFIX (ctx, PREFIX_VOLATILE);
if (!check_underflow (ctx, 1))
return;
value = stack_pop (ctx);
if (!(field = verifier_load_field (ctx, token, &klass, "stsfld")))
return;
if (!(field->type->attrs & FIELD_ATTRIBUTE_STATIC)) {
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Cannot store non static field at 0x%04x", ctx->ip_offset));
return;
}
if (field->type->type == MONO_TYPE_TYPEDBYREF) {
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Typedbyref field is an unverfiable type in store static field at 0x%04x", ctx->ip_offset));
return;
}
if (!IS_SKIP_VISIBILITY (ctx) && !mono_method_can_access_field_full (ctx->method, field, NULL))
CODE_NOT_VERIFIABLE2 (ctx, g_strdup_printf ("Type at stack is not accessible at 0x%04x", ctx->ip_offset), MONO_EXCEPTION_FIELD_ACCESS);
if (!verify_stack_type_compatibility (ctx, field->type, value)) {
char *stack_name = stack_slot_full_name (value);
char *field_name = mono_type_full_name (field->type);
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Incompatible type in static field store expected '%s' but found '%s' at 0x%04x",
field_name, stack_name, ctx->ip_offset));
g_free (field_name);
g_free (stack_name);
}
}
static gboolean
check_is_valid_type_for_field_ops (VerifyContext *ctx, int token, ILStackDesc *obj, MonoClassField **ret_field, const char *opcode)
{
MonoClassField *field;
MonoClass *klass;
gboolean is_pointer;
/*must be a reference type, a managed pointer, an unamanaged pointer, or a valuetype*/
if (!(field = verifier_load_field (ctx, token, &klass, opcode)))
return FALSE;
*ret_field = field;
//the value on stack is going to be used as a pointer
is_pointer = stack_slot_get_type (obj) == TYPE_PTR || (stack_slot_get_type (obj) == TYPE_NATIVE_INT && !get_stack_type (&field->parent->byval_arg));
if (field->type->type == MONO_TYPE_TYPEDBYREF) {
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Typedbyref field is an unverfiable type at 0x%04x", ctx->ip_offset));
return FALSE;
}
g_assert (obj->type);
/*The value on the stack must be a subclass of the defining type of the field*/
/* we need to check if we can load the field from the stack value*/
if (is_pointer) {
if (stack_slot_get_underlying_type (obj) == TYPE_NATIVE_INT)
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Native int is not a verifiable type to reference a field at 0x%04x", ctx->ip_offset));
if (!IS_SKIP_VISIBILITY (ctx) && !mono_method_can_access_field_full (ctx->method, field, NULL))
CODE_NOT_VERIFIABLE2 (ctx, g_strdup_printf ("Type at stack is not accessible at 0x%04x", ctx->ip_offset), MONO_EXCEPTION_FIELD_ACCESS);
} else {
if (!field->parent->valuetype && stack_slot_is_managed_pointer (obj))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Type at stack is a managed pointer to a reference type and is not compatible to reference the field at 0x%04x", ctx->ip_offset));
/*a value type can be loaded from a value or a managed pointer, but not a boxed object*/
if (field->parent->valuetype && stack_slot_is_boxed_value (obj))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Type at stack is a boxed valuetype and is not compatible to reference the field at 0x%04x", ctx->ip_offset));
if (!stack_slot_is_null_literal (obj) && !verify_stack_type_compatibility_full (ctx, &field->parent->byval_arg, obj, TRUE, FALSE)) {
char *found = stack_slot_full_name (obj);
char *expected = mono_type_full_name (&field->parent->byval_arg);
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Expected type '%s' but found '%s' referencing the 'this' argument at 0x%04x", expected, found, ctx->ip_offset));
g_free (found);
g_free (expected);
}
if (!IS_SKIP_VISIBILITY (ctx) && !mono_method_can_access_field_full (ctx->method, field, mono_class_from_mono_type (obj->type)))
CODE_NOT_VERIFIABLE2 (ctx, g_strdup_printf ("Type at stack is not accessible at 0x%04x", ctx->ip_offset), MONO_EXCEPTION_FIELD_ACCESS);
}
check_unmanaged_pointer (ctx, obj);
return TRUE;
}
static void
do_push_field (VerifyContext *ctx, int token, gboolean take_addr)
{
ILStackDesc *obj;
MonoClassField *field;
if (!take_addr)
CLEAR_PREFIX (ctx, PREFIX_UNALIGNED | PREFIX_VOLATILE);
if (!check_underflow (ctx, 1))
return;
obj = stack_pop_safe (ctx);
if (!check_is_valid_type_for_field_ops (ctx, token, obj, &field, take_addr ? "ldflda" : "ldfld"))
return;
if (take_addr && field->parent->valuetype && !stack_slot_is_managed_pointer (obj))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Cannot take the address of a temporary value-type at 0x%04x", ctx->ip_offset));
if (take_addr && (field->type->attrs & FIELD_ATTRIBUTE_INIT_ONLY) &&
!(field->parent == ctx->method->klass && mono_method_is_constructor (ctx->method)))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Cannot take the address of a init-only field at 0x%04x", ctx->ip_offset));
set_stack_value (ctx, stack_push (ctx), field->type, take_addr);
}
static void
do_store_field (VerifyContext *ctx, int token)
{
ILStackDesc *value, *obj;
MonoClassField *field;
CLEAR_PREFIX (ctx, PREFIX_UNALIGNED | PREFIX_VOLATILE);
if (!check_underflow (ctx, 2))
return;
value = stack_pop (ctx);
obj = stack_pop_safe (ctx);
if (!check_is_valid_type_for_field_ops (ctx, token, obj, &field, "stfld"))
return;
if (!verify_stack_type_compatibility (ctx, field->type, value))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Incompatible type %s in field store at 0x%04x", stack_slot_get_name (value), ctx->ip_offset));
}
/*TODO proper handle for Nullable<T>*/
static void
do_box_value (VerifyContext *ctx, int klass_token)
{
ILStackDesc *value;
MonoType *type = get_boxable_mono_type (ctx, klass_token, "box");
MonoClass *klass;
if (!type)
return;
if (!check_underflow (ctx, 1))
return;
value = stack_pop (ctx);
/*box is a nop for reference types*/
if (stack_slot_get_underlying_type (value) == TYPE_COMPLEX && MONO_TYPE_IS_REFERENCE (value->type) && MONO_TYPE_IS_REFERENCE (type)) {
stack_push_stack_val (ctx, value)->stype |= BOXED_MASK;
return;
}
if (!verify_stack_type_compatibility (ctx, type, value))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid type at stack for boxing operation at 0x%04x", ctx->ip_offset));
klass = mono_class_from_mono_type (type);
if (mono_class_is_nullable (klass))
type = &mono_class_get_nullable_param (klass)->byval_arg;
stack_push_val (ctx, TYPE_COMPLEX | BOXED_MASK, type);
}
static void
do_unbox_value (VerifyContext *ctx, int klass_token)
{
ILStackDesc *value;
MonoType *type = get_boxable_mono_type (ctx, klass_token, "unbox");
if (!type)
return;
if (!check_underflow (ctx, 1))
return;
if (!mono_class_from_mono_type (type)->valuetype)
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid reference type for unbox at 0x%04x", ctx->ip_offset));
value = stack_pop (ctx);
/*Value should be: a boxed valuetype or a reference type*/
if (!(stack_slot_get_type (value) == TYPE_COMPLEX &&
(stack_slot_is_boxed_value (value) || !mono_class_from_mono_type (value->type)->valuetype)))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid type %s at stack for unbox operation at 0x%04x", stack_slot_get_name (value), ctx->ip_offset));
set_stack_value (ctx, value = stack_push (ctx), mono_type_get_type_byref (type), FALSE);
value->stype |= CMMP_MASK;
}
static void
do_unbox_any (VerifyContext *ctx, int klass_token)
{
ILStackDesc *value;
MonoType *type = get_boxable_mono_type (ctx, klass_token, "unbox.any");
if (!type)
return;
if (!check_underflow (ctx, 1))
return;
value = stack_pop (ctx);
/*Value should be: a boxed valuetype or a reference type*/
if (!(stack_slot_get_type (value) == TYPE_COMPLEX &&
(stack_slot_is_boxed_value (value) || !mono_class_from_mono_type (value->type)->valuetype)))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid type %s at stack for unbox.any operation at 0x%04x", stack_slot_get_name (value), ctx->ip_offset));
set_stack_value (ctx, stack_push (ctx), type, FALSE);
}
static void
do_unary_math_op (VerifyContext *ctx, int op)
{
ILStackDesc *value;
if (!check_underflow (ctx, 1))
return;
value = stack_pop (ctx);
switch (stack_slot_get_type (value)) {
case TYPE_I4:
case TYPE_I8:
case TYPE_NATIVE_INT:
break;
case TYPE_R8:
if (op == CEE_NEG)
break;
case TYPE_COMPLEX: /*only enums are ok*/
if (mono_type_is_enum_type (value->type))
break;
default:
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid type at stack for unary not at 0x%04x", ctx->ip_offset));
}
stack_push_stack_val (ctx, value);
}
static void
do_conversion (VerifyContext *ctx, int kind)
{
ILStackDesc *value;
if (!check_underflow (ctx, 1))
return;
value = stack_pop (ctx);
switch (stack_slot_get_type (value)) {
case TYPE_I4:
case TYPE_I8:
case TYPE_NATIVE_INT:
case TYPE_R8:
break;
default:
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid type (%s) at stack for conversion operation. Numeric type expected at 0x%04x", stack_slot_get_name (value), ctx->ip_offset));
}
switch (kind) {
case TYPE_I4:
stack_push_val (ctx, TYPE_I4, &mono_defaults.int32_class->byval_arg);
break;
case TYPE_I8:
stack_push_val (ctx,TYPE_I8, &mono_defaults.int64_class->byval_arg);
break;
case TYPE_R8:
stack_push_val (ctx, TYPE_R8, &mono_defaults.double_class->byval_arg);
break;
case TYPE_NATIVE_INT:
stack_push_val (ctx, TYPE_NATIVE_INT, &mono_defaults.int_class->byval_arg);
break;
default:
g_error ("unknown type %02x in conversion", kind);
}
}
static void
do_load_token (VerifyContext *ctx, int token)
{
gpointer handle;
MonoClass *handle_class;
if (!check_overflow (ctx))
return;
switch (token & 0xff000000) {
case MONO_TOKEN_TYPE_DEF:
case MONO_TOKEN_TYPE_REF:
case MONO_TOKEN_TYPE_SPEC:
case MONO_TOKEN_FIELD_DEF:
case MONO_TOKEN_METHOD_DEF:
case MONO_TOKEN_METHOD_SPEC:
case MONO_TOKEN_MEMBER_REF:
if (!token_bounds_check (ctx->image, token)) {
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Table index out of range 0x%x for token %x for ldtoken at 0x%04x", mono_metadata_token_index (token), token, ctx->ip_offset));
return;
}
break;
default:
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Invalid table 0x%x for token 0x%x for ldtoken at 0x%04x", mono_metadata_token_table (token), token, ctx->ip_offset));
return;
}
handle = mono_ldtoken (ctx->image, token, &handle_class, ctx->generic_context);
if (!handle) {
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Invalid token 0x%x for ldtoken at 0x%04x", token, ctx->ip_offset));
return;
}
if (handle_class == mono_defaults.typehandle_class) {
mono_type_is_valid_in_context (ctx, (MonoType*)handle);
} else if (handle_class == mono_defaults.methodhandle_class) {
mono_method_is_valid_in_context (ctx, (MonoMethod*)handle);
} else if (handle_class == mono_defaults.fieldhandle_class) {
mono_type_is_valid_in_context (ctx, &((MonoClassField*)handle)->parent->byval_arg);
} else {
ADD_VERIFY_ERROR2 (ctx, g_strdup_printf ("Invalid ldtoken type %x at 0x%04x", token, ctx->ip_offset), MONO_EXCEPTION_BAD_IMAGE);
}
stack_push_val (ctx, TYPE_COMPLEX, mono_class_get_type (handle_class));
}
static void
do_ldobj_value (VerifyContext *ctx, int token)
{
ILStackDesc *value;
MonoType *type = get_boxable_mono_type (ctx, token, "ldobj");
CLEAR_PREFIX (ctx, PREFIX_UNALIGNED | PREFIX_VOLATILE);
if (!type)
return;
if (!check_underflow (ctx, 1))
return;
value = stack_pop (ctx);
if (!stack_slot_is_managed_pointer (value)
&& stack_slot_get_type (value) != TYPE_NATIVE_INT
&& !(stack_slot_get_type (value) == TYPE_PTR && value->type->type != MONO_TYPE_FNPTR)) {
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Invalid argument %s to ldobj at 0x%04x", stack_slot_get_name (value), ctx->ip_offset));
return;
}
if (stack_slot_get_type (value) == TYPE_NATIVE_INT)
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Using native pointer to ldobj at 0x%04x", ctx->ip_offset));
/*We have a byval on the stack, but the comparison must be strict. */
if (!verify_type_compatibility_full (ctx, type, mono_type_get_type_byval (value->type), TRUE))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid type at stack for ldojb operation at 0x%04x", ctx->ip_offset));
set_stack_value (ctx, stack_push (ctx), type, FALSE);
}
static void
do_stobj (VerifyContext *ctx, int token)
{
ILStackDesc *dest, *src;
MonoType *type = get_boxable_mono_type (ctx, token, "stobj");
CLEAR_PREFIX (ctx, PREFIX_UNALIGNED | PREFIX_VOLATILE);
if (!type)
return;
if (!check_underflow (ctx, 2))
return;
src = stack_pop (ctx);
dest = stack_pop (ctx);
if (stack_slot_is_managed_mutability_pointer (dest))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Cannot use a readonly pointer with stobj at 0x%04x", ctx->ip_offset));
if (!stack_slot_is_managed_pointer (dest))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid destination of stobj operation at 0x%04x", ctx->ip_offset));
if (stack_slot_is_boxed_value (src) && !MONO_TYPE_IS_REFERENCE (src->type) && !MONO_TYPE_IS_REFERENCE (type))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Cannot use stobj with a boxed source value that is not a reference type at 0x%04x", ctx->ip_offset));
if (!verify_stack_type_compatibility (ctx, type, src)) {
char *type_name = mono_type_full_name (type);
char *src_name = stack_slot_full_name (src);
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Token '%s' and source '%s' of stobj don't match ' at 0x%04x", type_name, src_name, ctx->ip_offset));
g_free (type_name);
g_free (src_name);
}
if (!verify_type_compatibility (ctx, mono_type_get_type_byval (dest->type), type))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Destination and token types of stobj don't match at 0x%04x", ctx->ip_offset));
}
static void
do_cpobj (VerifyContext *ctx, int token)
{
ILStackDesc *dest, *src;
MonoType *type = get_boxable_mono_type (ctx, token, "cpobj");
if (!type)
return;
if (!check_underflow (ctx, 2))
return;
src = stack_pop (ctx);
dest = stack_pop (ctx);
if (!stack_slot_is_managed_pointer (src))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid source of cpobj operation at 0x%04x", ctx->ip_offset));
if (!stack_slot_is_managed_pointer (dest))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid destination of cpobj operation at 0x%04x", ctx->ip_offset));
if (stack_slot_is_managed_mutability_pointer (dest))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Cannot use a readonly pointer with cpobj at 0x%04x", ctx->ip_offset));
if (!verify_type_compatibility (ctx, type, mono_type_get_type_byval (src->type)))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Token and source types of cpobj don't match at 0x%04x", ctx->ip_offset));
if (!verify_type_compatibility (ctx, mono_type_get_type_byval (dest->type), type))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Destination and token types of cpobj don't match at 0x%04x", ctx->ip_offset));
}
static void
do_initobj (VerifyContext *ctx, int token)
{
ILStackDesc *obj;
MonoType *stack, *type = get_boxable_mono_type (ctx, token, "initobj");
if (!type)
return;
if (!check_underflow (ctx, 1))
return;
obj = stack_pop (ctx);
if (!stack_slot_is_managed_pointer (obj))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid object address for initobj at 0x%04x", ctx->ip_offset));
if (stack_slot_is_managed_mutability_pointer (obj))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Cannot use a readonly pointer with initobj at 0x%04x", ctx->ip_offset));
stack = mono_type_get_type_byval (obj->type);
if (MONO_TYPE_IS_REFERENCE (stack)) {
if (!verify_type_compatibility (ctx, stack, type))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Type token of initobj not compatible with value on stack at 0x%04x", ctx->ip_offset));
else if (IS_STRICT_MODE (ctx) && !mono_metadata_type_equal (type, stack))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Type token of initobj not compatible with value on stack at 0x%04x", ctx->ip_offset));
} else if (!verify_type_compatibility (ctx, stack, type)) {
char *expected_name = mono_type_full_name (type);
char *stack_name = mono_type_full_name (stack);
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Initobj %s not compatible with value on stack %s at 0x%04x", expected_name, stack_name, ctx->ip_offset));
g_free (expected_name);
g_free (stack_name);
}
}
static void
do_newobj (VerifyContext *ctx, int token)
{
ILStackDesc *value;
int i;
MonoMethodSignature *sig;
MonoMethod *method;
gboolean is_delegate = FALSE;
if (!(method = verifier_load_method (ctx, token, "newobj")))
return;
if (!mono_method_is_constructor (method)) {
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Method from token 0x%08x not a constructor at 0x%04x", token, ctx->ip_offset));
return;
}
if (method->klass->flags & (TYPE_ATTRIBUTE_ABSTRACT | TYPE_ATTRIBUTE_INTERFACE))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Trying to instantiate an abstract or interface type at 0x%04x", ctx->ip_offset));
if (!mono_method_can_access_method_full (ctx->method, method, NULL)) {
char *from = mono_method_full_name (ctx->method, TRUE);
char *to = mono_method_full_name (method, TRUE);
CODE_NOT_VERIFIABLE2 (ctx, g_strdup_printf ("Constructor %s not visible from %s at 0x%04x", to, from, ctx->ip_offset), MONO_EXCEPTION_METHOD_ACCESS);
g_free (from);
g_free (to);
}
//FIXME use mono_method_get_signature_full
sig = mono_method_signature (method);
if (!sig) {
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Invalid constructor signature to newobj at 0x%04x", ctx->ip_offset));
return;
}
if (!sig->hasthis) {
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Invalid constructor signature missing hasthis at 0x%04x", ctx->ip_offset));
return;
}
if (!check_underflow (ctx, sig->param_count))
return;
is_delegate = method->klass->parent == mono_defaults.multicastdelegate_class;
if (is_delegate) {
ILStackDesc *funptr;
//first arg is object, second arg is fun ptr
if (sig->param_count != 2) {
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Invalid delegate constructor at 0x%04x", ctx->ip_offset));
return;
}
funptr = stack_pop (ctx);
value = stack_pop (ctx);
verify_delegate_compatibility (ctx, method->klass, value, funptr);
} else {
for (i = sig->param_count - 1; i >= 0; --i) {
VERIFIER_DEBUG ( printf ("verifying constructor argument %d\n", i); );
value = stack_pop (ctx);
if (!verify_stack_type_compatibility (ctx, sig->params [i], value)) {
char *stack_name = stack_slot_full_name (value);
char *sig_name = mono_type_full_name (sig->params [i]);
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Incompatible parameter value with constructor signature: %s X %s at 0x%04x", sig_name, stack_name, ctx->ip_offset));
g_free (stack_name);
g_free (sig_name);
}
if (stack_slot_is_managed_mutability_pointer (value))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Cannot use a readonly pointer as argument of newobj at 0x%04x", ctx->ip_offset));
}
}
if (check_overflow (ctx))
set_stack_value (ctx, stack_push (ctx), &method->klass->byval_arg, FALSE);
}
static void
do_cast (VerifyContext *ctx, int token, const char *opcode) {
ILStackDesc *value;
MonoType *type;
gboolean is_boxed;
gboolean do_box;
if (!check_underflow (ctx, 1))
return;
if (!(type = get_boxable_mono_type (ctx, token, opcode)))
return;
if (type->byref) {
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Invalid %s type at 0x%04x", opcode, ctx->ip_offset));
return;
}
value = stack_pop (ctx);
is_boxed = stack_slot_is_boxed_value (value);
if (stack_slot_is_managed_pointer (value))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid value for %s at 0x%04x", opcode, ctx->ip_offset));
else if (!MONO_TYPE_IS_REFERENCE (value->type) && !is_boxed) {
char *name = stack_slot_full_name (value);
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Expected a reference type on stack for %s but found %s at 0x%04x", opcode, name, ctx->ip_offset));
g_free (name);
}
switch (value->type->type) {
case MONO_TYPE_FNPTR:
case MONO_TYPE_PTR:
case MONO_TYPE_TYPEDBYREF:
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid value for %s at 0x%04x", opcode, ctx->ip_offset));
}
do_box = is_boxed || mono_type_is_generic_argument(type) || mono_class_from_mono_type (type)->valuetype;
stack_push_val (ctx, TYPE_COMPLEX | (do_box ? BOXED_MASK : 0), type);
}
static MonoType *
mono_type_from_opcode (int opcode) {
switch (opcode) {
case CEE_LDIND_I1:
case CEE_LDIND_U1:
case CEE_STIND_I1:
case CEE_LDELEM_I1:
case CEE_LDELEM_U1:
case CEE_STELEM_I1:
return &mono_defaults.sbyte_class->byval_arg;
case CEE_LDIND_I2:
case CEE_LDIND_U2:
case CEE_STIND_I2:
case CEE_LDELEM_I2:
case CEE_LDELEM_U2:
case CEE_STELEM_I2:
return &mono_defaults.int16_class->byval_arg;
case CEE_LDIND_I4:
case CEE_LDIND_U4:
case CEE_STIND_I4:
case CEE_LDELEM_I4:
case CEE_LDELEM_U4:
case CEE_STELEM_I4:
return &mono_defaults.int32_class->byval_arg;
case CEE_LDIND_I8:
case CEE_STIND_I8:
case CEE_LDELEM_I8:
case CEE_STELEM_I8:
return &mono_defaults.int64_class->byval_arg;
case CEE_LDIND_R4:
case CEE_STIND_R4:
case CEE_LDELEM_R4:
case CEE_STELEM_R4:
return &mono_defaults.single_class->byval_arg;
case CEE_LDIND_R8:
case CEE_STIND_R8:
case CEE_LDELEM_R8:
case CEE_STELEM_R8:
return &mono_defaults.double_class->byval_arg;
case CEE_LDIND_I:
case CEE_STIND_I:
case CEE_LDELEM_I:
case CEE_STELEM_I:
return &mono_defaults.int_class->byval_arg;
case CEE_LDIND_REF:
case CEE_STIND_REF:
case CEE_LDELEM_REF:
case CEE_STELEM_REF:
return &mono_defaults.object_class->byval_arg;
default:
g_error ("unknown opcode %02x in mono_type_from_opcode ", opcode);
return NULL;
}
}
static void
do_load_indirect (VerifyContext *ctx, int opcode)
{
ILStackDesc *value;
CLEAR_PREFIX (ctx, PREFIX_UNALIGNED | PREFIX_VOLATILE);
if (!check_underflow (ctx, 1))
return;
value = stack_pop (ctx);
if (!stack_slot_is_managed_pointer (value)) {
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Load indirect not using a manager pointer at 0x%04x", ctx->ip_offset));
set_stack_value (ctx, stack_push (ctx), mono_type_from_opcode (opcode), FALSE);
return;
}
if (opcode == CEE_LDIND_REF) {
if (stack_slot_get_underlying_type (value) != TYPE_COMPLEX || mono_class_from_mono_type (value->type)->valuetype)
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid type at stack for ldind_ref expected object byref operation at 0x%04x", ctx->ip_offset));
set_stack_value (ctx, stack_push (ctx), mono_type_get_type_byval (value->type), FALSE);
} else {
if (!verify_type_compatibility_full (ctx, mono_type_from_opcode (opcode), mono_type_get_type_byval (value->type), TRUE))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid type at stack for ldind 0x%x operation at 0x%04x", opcode, ctx->ip_offset));
set_stack_value (ctx, stack_push (ctx), mono_type_from_opcode (opcode), FALSE);
}
}
static void
do_store_indirect (VerifyContext *ctx, int opcode)
{
ILStackDesc *addr, *val;
CLEAR_PREFIX (ctx, PREFIX_UNALIGNED | PREFIX_VOLATILE);
if (!check_underflow (ctx, 2))
return;
val = stack_pop (ctx);
addr = stack_pop (ctx);
check_unmanaged_pointer (ctx, addr);
if (!stack_slot_is_managed_pointer (addr) && stack_slot_get_type (addr) != TYPE_PTR) {
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid non-pointer argument to stind at 0x%04x", ctx->ip_offset));
return;
}
if (stack_slot_is_managed_mutability_pointer (addr)) {
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Cannot use a readonly pointer with stind at 0x%04x", ctx->ip_offset));
return;
}
if (!verify_type_compatibility_full (ctx, mono_type_from_opcode (opcode), mono_type_get_type_byval (addr->type), TRUE))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid addr type at stack for stind 0x%x operation at 0x%04x", opcode, ctx->ip_offset));
if (!verify_stack_type_compatibility (ctx, mono_type_from_opcode (opcode), val))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid value type at stack for stind 0x%x operation at 0x%04x", opcode, ctx->ip_offset));
}
static void
do_newarr (VerifyContext *ctx, int token)
{
ILStackDesc *value;
MonoType *type = get_boxable_mono_type (ctx, token, "newarr");
if (!type)
return;
if (!check_underflow (ctx, 1))
return;
value = stack_pop (ctx);
if (stack_slot_get_type (value) != TYPE_I4 && stack_slot_get_type (value) != TYPE_NATIVE_INT)
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Array size type on stack (%s) is not a verifiable type at 0x%04x", stack_slot_get_name (value), ctx->ip_offset));
set_stack_value (ctx, stack_push (ctx), mono_class_get_type (mono_array_class_get (mono_class_from_mono_type (type), 1)), FALSE);
}
/*FIXME handle arrays that are not 0-indexed*/
static void
do_ldlen (VerifyContext *ctx)
{
ILStackDesc *value;
if (!check_underflow (ctx, 1))
return;
value = stack_pop (ctx);
if (stack_slot_get_type (value) != TYPE_COMPLEX || value->type->type != MONO_TYPE_SZARRAY)
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid array type for ldlen at 0x%04x", ctx->ip_offset));
stack_push_val (ctx, TYPE_NATIVE_INT, &mono_defaults.int_class->byval_arg);
}
/*FIXME handle arrays that are not 0-indexed*/
/*FIXME handle readonly prefix and CMMP*/
static void
do_ldelema (VerifyContext *ctx, int klass_token)
{
ILStackDesc *index, *array, *res;
MonoType *type = get_boxable_mono_type (ctx, klass_token, "ldelema");
gboolean valid;
if (!type)
return;
if (!check_underflow (ctx, 2))
return;
index = stack_pop (ctx);
array = stack_pop (ctx);
if (stack_slot_get_type (index) != TYPE_I4 && stack_slot_get_type (index) != TYPE_NATIVE_INT)
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Index type(%s) for ldelema is not an int or a native int at 0x%04x", stack_slot_get_name (index), ctx->ip_offset));
if (!stack_slot_is_null_literal (array)) {
if (stack_slot_get_type (array) != TYPE_COMPLEX || array->type->type != MONO_TYPE_SZARRAY)
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid array type(%s) for ldelema at 0x%04x", stack_slot_get_name (array), ctx->ip_offset));
else {
if (get_stack_type (type) == TYPE_I4 || get_stack_type (type) == TYPE_NATIVE_INT) {
valid = verify_type_compatibility_full (ctx, type, &array->type->data.klass->byval_arg, TRUE);
} else {
valid = mono_metadata_type_equal (type, &array->type->data.klass->byval_arg);
}
if (!valid)
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid array type on stack for ldelema at 0x%04x", ctx->ip_offset));
}
}
res = stack_push (ctx);
set_stack_value (ctx, res, type, TRUE);
if (ctx->prefix_set & PREFIX_READONLY) {
ctx->prefix_set &= ~PREFIX_READONLY;
res->stype |= CMMP_MASK;
}
}
/*
* FIXME handle arrays that are not 0-indexed
* FIXME handle readonly prefix and CMMP
*/
static void
do_ldelem (VerifyContext *ctx, int opcode, int token)
{
#define IS_ONE_OF2(T, A, B) (T == A || T == B)
ILStackDesc *index, *array;
MonoType *type;
if (!check_underflow (ctx, 2))
return;
if (opcode == CEE_LDELEM) {
if (!(type = verifier_load_type (ctx, token, "ldelem.any"))) {
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Type (0x%08x) not found at 0x%04x", token, ctx->ip_offset));
return;
}
} else {
type = mono_type_from_opcode (opcode);
}
index = stack_pop (ctx);
array = stack_pop (ctx);
if (stack_slot_get_type (index) != TYPE_I4 && stack_slot_get_type (index) != TYPE_NATIVE_INT)
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Index type(%s) for ldelem.X is not an int or a native int at 0x%04x", stack_slot_get_name (index), ctx->ip_offset));
if (!stack_slot_is_null_literal (array)) {
if (stack_slot_get_type (array) != TYPE_COMPLEX || array->type->type != MONO_TYPE_SZARRAY)
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid array type(%s) for ldelem.X at 0x%04x", stack_slot_get_name (array), ctx->ip_offset));
else {
if (opcode == CEE_LDELEM_REF) {
if (array->type->data.klass->valuetype)
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid array type is not a reference type for ldelem.ref 0x%04x", ctx->ip_offset));
type = &array->type->data.klass->byval_arg;
} else {
MonoType *candidate = &array->type->data.klass->byval_arg;
if (IS_STRICT_MODE (ctx)) {
MonoType *underlying_type = mono_type_get_underlying_type_any (type);
MonoType *underlying_candidate = mono_type_get_underlying_type_any (candidate);
if ((IS_ONE_OF2 (underlying_type->type, MONO_TYPE_I4, MONO_TYPE_U4) && IS_ONE_OF2 (underlying_candidate->type, MONO_TYPE_I, MONO_TYPE_U)) ||
(IS_ONE_OF2 (underlying_candidate->type, MONO_TYPE_I4, MONO_TYPE_U4) && IS_ONE_OF2 (underlying_type->type, MONO_TYPE_I, MONO_TYPE_U)))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid array type on stack for ldelem.X at 0x%04x", ctx->ip_offset));
}
if (!verify_type_compatibility_full (ctx, type, candidate, TRUE))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid array type on stack for ldelem.X at 0x%04x", ctx->ip_offset));
}
}
}
set_stack_value (ctx, stack_push (ctx), type, FALSE);
#undef IS_ONE_OF2
}
/*
* FIXME handle arrays that are not 0-indexed
*/
static void
do_stelem (VerifyContext *ctx, int opcode, int token)
{
ILStackDesc *index, *array, *value;
MonoType *type;
if (!check_underflow (ctx, 3))
return;
if (opcode == CEE_STELEM) {
if (!(type = verifier_load_type (ctx, token, "stelem.any"))) {
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Type (0x%08x) not found at 0x%04x", token, ctx->ip_offset));
return;
}
} else {
type = mono_type_from_opcode (opcode);
}
value = stack_pop (ctx);
index = stack_pop (ctx);
array = stack_pop (ctx);
if (stack_slot_get_type (index) != TYPE_I4 && stack_slot_get_type (index) != TYPE_NATIVE_INT)
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Index type(%s) for stdelem.X is not an int or a native int at 0x%04x", stack_slot_get_name (index), ctx->ip_offset));
if (!stack_slot_is_null_literal (array)) {
if (stack_slot_get_type (array) != TYPE_COMPLEX || array->type->type != MONO_TYPE_SZARRAY) {
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid array type(%s) for stelem.X at 0x%04x", stack_slot_get_name (array), ctx->ip_offset));
} else {
if (opcode == CEE_STELEM_REF) {
if (array->type->data.klass->valuetype)
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid array type is not a reference type for stelem.ref 0x%04x", ctx->ip_offset));
} else if (!verify_type_compatibility_full (ctx, &array->type->data.klass->byval_arg, type, TRUE)) {
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid array type on stack for stdelem.X at 0x%04x", ctx->ip_offset));
}
}
}
if (opcode == CEE_STELEM_REF) {
if (!stack_slot_is_boxed_value (value) && mono_class_from_mono_type (value->type)->valuetype)
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid value is not a reference type for stelem.ref 0x%04x", ctx->ip_offset));
} else if (opcode != CEE_STELEM_REF) {
if (!verify_stack_type_compatibility (ctx, type, value))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid value on stack for stdelem.X at 0x%04x", ctx->ip_offset));
if (stack_slot_is_boxed_value (value) && !MONO_TYPE_IS_REFERENCE (value->type) && !MONO_TYPE_IS_REFERENCE (type))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Cannot use stobj with a boxed source value that is not a reference type at 0x%04x", ctx->ip_offset));
}
}
static void
do_throw (VerifyContext *ctx)
{
ILStackDesc *exception;
if (!check_underflow (ctx, 1))
return;
exception = stack_pop (ctx);
if (!stack_slot_is_null_literal (exception) && !(stack_slot_get_type (exception) == TYPE_COMPLEX && !mono_class_from_mono_type (exception->type)->valuetype))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid type on stack for throw, expected reference type at 0x%04x", ctx->ip_offset));
if (mono_type_is_generic_argument (exception->type) && !stack_slot_is_boxed_value (exception)) {
char *name = mono_type_full_name (exception->type);
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid type on stack for throw, expected reference type but found unboxed %s at 0x%04x ", name, ctx->ip_offset));
g_free (name);
}
/*The stack is left empty after a throw*/
ctx->eval.size = 0;
}
static void
do_endfilter (VerifyContext *ctx)
{
MonoExceptionClause *clause;
if (IS_STRICT_MODE (ctx)) {
if (ctx->eval.size != 1)
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Stack size must have one item for endfilter at 0x%04x", ctx->ip_offset));
if (ctx->eval.size >= 1 && stack_slot_get_type (stack_pop (ctx)) != TYPE_I4)
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Stack item type is not an int32 for endfilter at 0x%04x", ctx->ip_offset));
}
if ((clause = is_correct_endfilter (ctx, ctx->ip_offset))) {
if (IS_STRICT_MODE (ctx)) {
if (ctx->ip_offset != clause->handler_offset - 2)
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("endfilter is not the last instruction of the filter clause at 0x%04x", ctx->ip_offset));
} else {
if ((ctx->ip_offset != clause->handler_offset - 2) && !MONO_OFFSET_IN_HANDLER (clause, ctx->ip_offset))
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("endfilter is not the last instruction of the filter clause at 0x%04x", ctx->ip_offset));
}
} else {
if (IS_STRICT_MODE (ctx) && !is_unverifiable_endfilter (ctx, ctx->ip_offset))
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("endfilter outside filter clause at 0x%04x", ctx->ip_offset));
else
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("endfilter outside filter clause at 0x%04x", ctx->ip_offset));
}
ctx->eval.size = 0;
}
static void
do_leave (VerifyContext *ctx, int delta)
{
int target = ((gint32)ctx->ip_offset) + delta;
if (target >= ctx->code_size || target < 0)
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Branch target out of code at 0x%04x", ctx->ip_offset));
if (!is_correct_leave (ctx->header, ctx->ip_offset, target))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Leave not allowed in finally block at 0x%04x", ctx->ip_offset));
ctx->eval.size = 0;
ctx->target = target;
}
/*
* do_static_branch:
*
* Verify br and br.s opcodes.
*/
static void
do_static_branch (VerifyContext *ctx, int delta)
{
int target = ctx->ip_offset + delta;
if (target < 0 || target >= ctx->code_size) {
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("branch target out of code at 0x%04x", ctx->ip_offset));
return;
}
switch (is_valid_branch_instruction (ctx->header, ctx->ip_offset, target)) {
case 1:
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Branch target escapes out of exception block at 0x%04x", ctx->ip_offset));
break;
case 2:
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Branch target escapes out of exception block at 0x%04x", ctx->ip_offset));
break;
}
ctx->target = target;
}
static void
do_switch (VerifyContext *ctx, int count, const unsigned char *data)
{
int i, base = ctx->ip_offset + 5 + count * 4;
ILStackDesc *value;
if (!check_underflow (ctx, 1))
return;
value = stack_pop (ctx);
if (stack_slot_get_type (value) != TYPE_I4 && stack_slot_get_type (value) != TYPE_NATIVE_INT)
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid argument to switch at 0x%04x", ctx->ip_offset));
for (i = 0; i < count; ++i) {
int target = base + read32 (data + i * 4);
if (target < 0 || target >= ctx->code_size) {
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Switch target %x out of code at 0x%04x", i, ctx->ip_offset));
return;
}
switch (is_valid_branch_instruction (ctx->header, ctx->ip_offset, target)) {
case 1:
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Switch target %x escapes out of exception block at 0x%04x", i, ctx->ip_offset));
break;
case 2:
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Switch target %x escapes out of exception block at 0x%04x", i, ctx->ip_offset));
return;
}
merge_stacks (ctx, &ctx->eval, &ctx->code [target], FALSE, TRUE);
}
}
static void
do_load_function_ptr (VerifyContext *ctx, guint32 token, gboolean virtual)
{
ILStackDesc *top;
MonoMethod *method;
if (virtual && !check_underflow (ctx, 1))
return;
if (!virtual && !check_overflow (ctx))
return;
if (!IS_METHOD_DEF_OR_REF_OR_SPEC (token) || !token_bounds_check (ctx->image, token)) {
ADD_VERIFY_ERROR2 (ctx, g_strdup_printf ("Invalid token %x for ldftn at 0x%04x", token, ctx->ip_offset), MONO_EXCEPTION_BAD_IMAGE);
return;
}
if (!(method = verifier_load_method (ctx, token, virtual ? "ldvirtfrn" : "ldftn")))
return;
if (mono_method_is_constructor (method))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Cannot use ldftn with a constructor at 0x%04x", ctx->ip_offset));
if (virtual) {
ILStackDesc *top = stack_pop (ctx);
if (stack_slot_get_type (top) != TYPE_COMPLEX || top->type->type == MONO_TYPE_VALUETYPE)
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid argument to ldvirtftn at 0x%04x", ctx->ip_offset));
if (method->flags & METHOD_ATTRIBUTE_STATIC)
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Cannot use ldvirtftn with a constructor at 0x%04x", ctx->ip_offset));
if (!verify_stack_type_compatibility (ctx, &method->klass->byval_arg, top))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Unexpected object for ldvirtftn at 0x%04x", ctx->ip_offset));
}
if (!mono_method_can_access_method_full (ctx->method, method, NULL))
CODE_NOT_VERIFIABLE2 (ctx, g_strdup_printf ("Loaded method is not visible for ldftn/ldvirtftn at 0x%04x", ctx->ip_offset), MONO_EXCEPTION_METHOD_ACCESS);
top = stack_push_val(ctx, TYPE_PTR, mono_type_create_fnptr_from_mono_method (ctx, method));
top->method = method;
}
static void
do_sizeof (VerifyContext *ctx, int token)
{
MonoType *type;
if (!IS_TYPE_DEF_OR_REF_OR_SPEC (token) || !token_bounds_check (ctx->image, token)) {
ADD_VERIFY_ERROR2 (ctx, g_strdup_printf ("Invalid type token %x at 0x%04x", token, ctx->ip_offset), MONO_EXCEPTION_BAD_IMAGE);
return;
}
if (!(type = verifier_load_type (ctx, token, "sizeof")))
return;
if (type->byref && type->type != MONO_TYPE_TYPEDBYREF) {
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Invalid use of byref type at 0x%04x", ctx->ip_offset));
return;
}
if (type->type == MONO_TYPE_VOID) {
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Invalid use of void type at 0x%04x", ctx->ip_offset));
return;
}
if (check_overflow (ctx))
set_stack_value (ctx, stack_push (ctx), &mono_defaults.uint32_class->byval_arg, FALSE);
}
/* Stack top can be of any type, the runtime doesn't care and treat everything as an int. */
static void
do_localloc (VerifyContext *ctx)
{
ILStackDesc *top;
if (ctx->eval.size != 1) {
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Stack must have only size item in localloc at 0x%04x", ctx->ip_offset));
return;
}
if (in_any_exception_block (ctx->header, ctx->ip_offset)) {
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Stack must have only size item in localloc at 0x%04x", ctx->ip_offset));
return;
}
/*TODO verify top type*/
top = stack_pop (ctx);
set_stack_value (ctx, stack_push (ctx), &mono_defaults.int_class->byval_arg, FALSE);
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Instruction localloc in never verifiable at 0x%04x", ctx->ip_offset));
}
static void
do_ldstr (VerifyContext *ctx, guint32 token)
{
GSList *error = NULL;
if (mono_metadata_token_code (token) != MONO_TOKEN_STRING) {
ADD_VERIFY_ERROR2 (ctx, g_strdup_printf ("Invalid string token %x at 0x%04x", token, ctx->ip_offset), MONO_EXCEPTION_BAD_IMAGE);
return;
}
if (!ctx->image->dynamic && !mono_verifier_verify_string_signature (ctx->image, mono_metadata_token_index (token), &error)) {
if (error)
ctx->list = g_slist_concat (ctx->list, error);
ADD_VERIFY_ERROR2 (ctx, g_strdup_printf ("Invalid string index %x at 0x%04x", token, ctx->ip_offset), MONO_EXCEPTION_BAD_IMAGE);
return;
}
if (check_overflow (ctx))
stack_push_val (ctx, TYPE_COMPLEX, &mono_defaults.string_class->byval_arg);
}
static void
do_refanyval (VerifyContext *ctx, int token)
{
ILStackDesc *top;
MonoType *type;
if (!check_underflow (ctx, 1))
return;
if (!(type = get_boxable_mono_type (ctx, token, "refanyval")))
return;
top = stack_pop (ctx);
if (top->stype != TYPE_PTR || top->type->type != MONO_TYPE_TYPEDBYREF)
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Expected a typedref as argument for refanyval, but found %s at 0x%04x", stack_slot_get_name (top), ctx->ip_offset));
set_stack_value (ctx, stack_push (ctx), type, TRUE);
}
static void
do_refanytype (VerifyContext *ctx)
{
ILStackDesc *top;
if (!check_underflow (ctx, 1))
return;
top = stack_pop (ctx);
if (top->stype != TYPE_PTR || top->type->type != MONO_TYPE_TYPEDBYREF)
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Expected a typedref as argument for refanytype, but found %s at 0x%04x", stack_slot_get_name (top), ctx->ip_offset));
set_stack_value (ctx, stack_push (ctx), &mono_defaults.typehandle_class->byval_arg, FALSE);
}
static void
do_mkrefany (VerifyContext *ctx, int token)
{
ILStackDesc *top;
MonoType *type;
if (!check_underflow (ctx, 1))
return;
if (!(type = get_boxable_mono_type (ctx, token, "refanyval")))
return;
top = stack_pop (ctx);
if (stack_slot_is_managed_mutability_pointer (top))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Cannot use a readonly pointer with mkrefany at 0x%04x", ctx->ip_offset));
if (!stack_slot_is_managed_pointer (top)) {
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Expected a managed pointer for mkrefany, but found %s at 0x%04x", stack_slot_get_name (top), ctx->ip_offset));
}else {
MonoType *stack_type = mono_type_get_type_byval (top->type);
if (MONO_TYPE_IS_REFERENCE (type) && !mono_metadata_type_equal (type, stack_type))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Type not compatible for mkrefany at 0x%04x", ctx->ip_offset));
if (!MONO_TYPE_IS_REFERENCE (type) && !verify_type_compatibility_full (ctx, type, stack_type, TRUE))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Type not compatible for mkrefany at 0x%04x", ctx->ip_offset));
}
set_stack_value (ctx, stack_push (ctx), &mono_defaults.typed_reference_class->byval_arg, FALSE);
}
static void
do_ckfinite (VerifyContext *ctx)
{
ILStackDesc *top;
if (!check_underflow (ctx, 1))
return;
top = stack_pop (ctx);
if (stack_slot_get_underlying_type (top) != TYPE_R8)
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Expected float32 or float64 on stack for ckfinit but found %s at 0x%04x", stack_slot_get_name (top), ctx->ip_offset));
stack_push_stack_val (ctx, top);
}
/*
* merge_stacks:
* Merge the stacks and perform compat checks. The merge check if types of @from are mergeable with type of @to
*
* @from holds new values for a given control path
* @to holds the current values of a given control path
*
* TODO we can eliminate the from argument as all callers pass &ctx->eval
*/
static void
merge_stacks (VerifyContext *ctx, ILCodeDesc *from, ILCodeDesc *to, gboolean start, gboolean external)
{
MonoError error;
int i, j, k;
stack_init (ctx, to);
if (start) {
if (to->flags == IL_CODE_FLAG_NOT_PROCESSED)
from->size = 0;
else
stack_copy (&ctx->eval, to);
goto end_verify;
} else if (!(to->flags & IL_CODE_STACK_MERGED)) {
stack_copy (to, &ctx->eval);
goto end_verify;
}
VERIFIER_DEBUG ( printf ("performing stack merge %d x %d\n", from->size, to->size); );
if (from->size != to->size) {
VERIFIER_DEBUG ( printf ("different stack sizes %d x %d at 0x%04x\n", from->size, to->size, ctx->ip_offset); );
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Could not merge stacks, different sizes (%d x %d) at 0x%04x", from->size, to->size, ctx->ip_offset));
goto end_verify;
}
//FIXME we need to preserve CMMP attributes
//FIXME we must take null literals into consideration.
for (i = 0; i < from->size; ++i) {
ILStackDesc *new_slot = from->stack + i;
ILStackDesc *old_slot = to->stack + i;
MonoType *new_type = mono_type_from_stack_slot (new_slot);
MonoType *old_type = mono_type_from_stack_slot (old_slot);
MonoClass *old_class = mono_class_from_mono_type (old_type);
MonoClass *new_class = mono_class_from_mono_type (new_type);
MonoClass *match_class = NULL;
// S := T then U = S (new value is compatible with current value, keep current)
if (verify_stack_type_compatibility (ctx, old_type, new_slot)) {
copy_stack_value (new_slot, old_slot);
continue;
}
// T := S then U = T (old value is compatible with current value, use new)
if (verify_stack_type_compatibility (ctx, new_type, old_slot)) {
copy_stack_value (old_slot, new_slot);
continue;
}
if (mono_type_is_generic_argument (old_type) || mono_type_is_generic_argument (new_type)) {
char *old_name = stack_slot_full_name (old_slot);
char *new_name = stack_slot_full_name (new_slot);
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Could not merge stack at depth %d, types not compatible: %s X %s at 0x%04x", i, old_name, new_name, ctx->ip_offset));
g_free (old_name);
g_free (new_name);
goto end_verify;
}
//both are reference types, use closest common super type
if (!mono_class_from_mono_type (old_type)->valuetype
&& !mono_class_from_mono_type (new_type)->valuetype
&& !stack_slot_is_managed_pointer (old_slot)
&& !stack_slot_is_managed_pointer (new_slot)) {
for (j = MIN (old_class->idepth, new_class->idepth) - 1; j > 0; --j) {
if (mono_metadata_type_equal (&old_class->supertypes [j]->byval_arg, &new_class->supertypes [j]->byval_arg)) {
match_class = old_class->supertypes [j];
goto match_found;
}
}
mono_class_setup_interfaces (old_class, &error);
if (!mono_error_ok (&error)) {
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Cannot merge stacks due to a TypeLoadException %s at 0x%04x", mono_error_get_message (&error), ctx->ip_offset));
mono_error_cleanup (&error);
goto end_verify;
}
for (j = 0; j < old_class->interface_count; ++j) {
for (k = 0; k < new_class->interface_count; ++k) {
if (mono_metadata_type_equal (&old_class->interfaces [j]->byval_arg, &new_class->interfaces [k]->byval_arg)) {
match_class = old_class->interfaces [j];
goto match_found;
}
}
}
//No decent super type found, use object
match_class = mono_defaults.object_class;
goto match_found;
} else if (is_compatible_boxed_valuetype (ctx,old_type, new_type, new_slot, FALSE) || is_compatible_boxed_valuetype (ctx, new_type, old_type, old_slot, FALSE)) {
match_class = mono_defaults.object_class;
goto match_found;
}
{
char *old_name = stack_slot_full_name (old_slot);
char *new_name = stack_slot_full_name (new_slot);
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Could not merge stack at depth %d, types not compatible: %s X %s at 0x%04x", i, old_name, new_name, ctx->ip_offset));
g_free (old_name);
g_free (new_name);
}
set_stack_value (ctx, old_slot, &new_class->byval_arg, stack_slot_is_managed_pointer (old_slot));
goto end_verify;
match_found:
g_assert (match_class);
set_stack_value (ctx, old_slot, &match_class->byval_arg, stack_slot_is_managed_pointer (old_slot));
set_stack_value (ctx, new_slot, &match_class->byval_arg, stack_slot_is_managed_pointer (old_slot));
continue;
}
end_verify:
if (external)
to->flags |= IL_CODE_FLAG_WAS_TARGET;
to->flags |= IL_CODE_STACK_MERGED;
}
#define HANDLER_START(clause) ((clause)->flags == MONO_EXCEPTION_CLAUSE_FILTER ? (clause)->data.filter_offset : clause->handler_offset)
#define IS_CATCH_OR_FILTER(clause) ((clause)->flags == MONO_EXCEPTION_CLAUSE_FILTER || (clause)->flags == MONO_EXCEPTION_CLAUSE_NONE)
/*
* is_clause_in_range :
*
* Returns TRUE if either the protected block or the handler of @clause is in the @start - @end range.
*/
static gboolean
is_clause_in_range (MonoExceptionClause *clause, guint32 start, guint32 end)
{
if (clause->try_offset >= start && clause->try_offset < end)
return TRUE;
if (HANDLER_START (clause) >= start && HANDLER_START (clause) < end)
return TRUE;
return FALSE;
}
/*
* is_clause_inside_range :
*
* Returns TRUE if @clause lies completely inside the @start - @end range.
*/
static gboolean
is_clause_inside_range (MonoExceptionClause *clause, guint32 start, guint32 end)
{
if (clause->try_offset < start || (clause->try_offset + clause->try_len) > end)
return FALSE;
if (HANDLER_START (clause) < start || (clause->handler_offset + clause->handler_len) > end)
return FALSE;
return TRUE;
}
/*
* is_clause_nested :
*
* Returns TRUE if @nested is nested in @clause.
*/
static gboolean
is_clause_nested (MonoExceptionClause *clause, MonoExceptionClause *nested)
{
if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER && is_clause_inside_range (nested, clause->data.filter_offset, clause->handler_offset))
return TRUE;
return is_clause_inside_range (nested, clause->try_offset, clause->try_offset + clause->try_len) ||
is_clause_inside_range (nested, clause->handler_offset, clause->handler_offset + clause->handler_len);
}
/* Test the relationship between 2 exception clauses. Follow P.1 12.4.2.7 of ECMA
* the each pair of exception must have the following properties:
* - one is fully nested on another (the outer must not be a filter clause) (the nested one must come earlier)
* - completely disjoin (none of the 3 regions of each entry overlap with the other 3)
* - mutual protection (protected block is EXACT the same, handlers are disjoin and all handler are catch or all handler are filter)
*/
static void
verify_clause_relationship (VerifyContext *ctx, MonoExceptionClause *clause, MonoExceptionClause *to_test)
{
/*clause is nested*/
if (to_test->flags == MONO_EXCEPTION_CLAUSE_FILTER && is_clause_inside_range (clause, to_test->data.filter_offset, to_test->handler_offset)) {
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Exception clause inside filter"));
return;
}
/*wrong nesting order.*/
if (is_clause_nested (clause, to_test)) {
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Nested exception clause appears after enclosing clause"));
return;
}
/*mutual protection*/
if (clause->try_offset == to_test->try_offset && clause->try_len == to_test->try_len) {
/*handlers are not disjoint*/
if (is_clause_in_range (to_test, HANDLER_START (clause), clause->handler_offset + clause->handler_len)) {
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Exception handlers overlap"));
return;
}
/* handlers are not catch or filter */
if (!IS_CATCH_OR_FILTER (clause) || !IS_CATCH_OR_FILTER (to_test)) {
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Exception clauses with shared protected block are neither catch or filter"));
return;
}
/*OK*/
return;
}
/*not completelly disjoint*/
if ((is_clause_in_range (to_test, clause->try_offset, clause->try_offset + clause->try_len) ||
is_clause_in_range (to_test, HANDLER_START (clause), clause->handler_offset + clause->handler_len)) && !is_clause_nested (to_test, clause))
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Exception clauses overlap"));
}
#define code_bounds_check(size) \
if (ADDP_IS_GREATER_OR_OVF (ip, size, end)) {\
ADD_VERIFY_ERROR (&ctx, g_strdup_printf ("Code overrun starting with 0x%x at 0x%04x", *ip, ctx.ip_offset)); \
break; \
} \
static gboolean
mono_opcode_is_prefix (int op)
{
switch (op) {
case MONO_CEE_UNALIGNED_:
case MONO_CEE_VOLATILE_:
case MONO_CEE_TAIL_:
case MONO_CEE_CONSTRAINED_:
case MONO_CEE_READONLY_:
return TRUE;
}
return FALSE;
}
/*
* FIXME: need to distinguish between valid and verifiable.
* Need to keep track of types on the stack.
* Verify types for opcodes.
*/
GSList*
mono_method_verify (MonoMethod *method, int level)
{
MonoError error;
const unsigned char *ip, *code_start;
const unsigned char *end;
MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
int i, n, need_merge = 0, start = 0;
guint token, ip_offset = 0, prefix = 0;
MonoGenericContext *generic_context = NULL;
MonoImage *image;
VerifyContext ctx;
GSList *tmp;
VERIFIER_DEBUG ( printf ("Verify IL for method %s %s %s\n", method->klass->name_space, method->klass->name, method->name); );
init_verifier_stats ();
if (method->iflags & (METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL | METHOD_IMPL_ATTRIBUTE_RUNTIME) ||
(method->flags & (METHOD_ATTRIBUTE_PINVOKE_IMPL | METHOD_ATTRIBUTE_ABSTRACT))) {
return NULL;
}
memset (&ctx, 0, sizeof (VerifyContext));
//FIXME use mono_method_get_signature_full
ctx.signature = mono_method_signature (method);
if (!ctx.signature) {
ADD_VERIFY_ERROR (&ctx, g_strdup_printf ("Could not decode method signature"));
finish_collect_stats ();
return ctx.list;
}
ctx.header = mono_method_get_header (method);
if (!ctx.header) {
ADD_VERIFY_ERROR (&ctx, g_strdup_printf ("Could not decode method header"));
finish_collect_stats ();
return ctx.list;
}
ctx.method = method;
code_start = ip = ctx.header->code;
end = ip + ctx.header->code_size;
ctx.image = image = method->klass->image;
ctx.max_args = ctx.signature->param_count + ctx.signature->hasthis;
ctx.max_stack = ctx.header->max_stack;
ctx.verifiable = ctx.valid = 1;
ctx.level = level;
ctx.code = g_new (ILCodeDesc, ctx.header->code_size);
ctx.code_size = ctx.header->code_size;
MEM_ALLOC (sizeof (ILCodeDesc) * ctx.header->code_size);
memset(ctx.code, 0, sizeof (ILCodeDesc) * ctx.header->code_size);
ctx.num_locals = ctx.header->num_locals;
ctx.locals = g_memdup (ctx.header->locals, sizeof (MonoType*) * ctx.header->num_locals);
MEM_ALLOC (sizeof (MonoType*) * ctx.header->num_locals);
if (ctx.num_locals > 0 && !ctx.header->init_locals)
CODE_NOT_VERIFIABLE (&ctx, g_strdup_printf ("Method with locals variable but without init locals set"));
ctx.params = g_new (MonoType*, ctx.max_args);
MEM_ALLOC (sizeof (MonoType*) * ctx.max_args);
if (ctx.signature->hasthis)
ctx.params [0] = method->klass->valuetype ? &method->klass->this_arg : &method->klass->byval_arg;
memcpy (ctx.params + ctx.signature->hasthis, ctx.signature->params, sizeof (MonoType *) * ctx.signature->param_count);
if (ctx.signature->is_inflated)
ctx.generic_context = generic_context = mono_method_get_context (method);
if (!generic_context && (method->klass->generic_container || method->is_generic)) {
if (method->is_generic)
ctx.generic_context = generic_context = &(mono_method_get_generic_container (method)->context);
else
ctx.generic_context = generic_context = &method->klass->generic_container->context;
}
for (i = 0; i < ctx.num_locals; ++i) {
MonoType *uninflated = ctx.locals [i];
ctx.locals [i] = mono_class_inflate_generic_type_checked (ctx.locals [i], ctx.generic_context, &error);
if (!mono_error_ok (&error)) {
char *name = mono_type_full_name (ctx.locals [i] ? ctx.locals [i] : uninflated);
ADD_VERIFY_ERROR (&ctx, g_strdup_printf ("Invalid local %d of type %s", i, name));
g_free (name);
mono_error_cleanup (&error);
/* we must not free (in cleanup) what was not yet allocated (but only copied) */
ctx.num_locals = i;
ctx.max_args = 0;
goto cleanup;
}
}
for (i = 0; i < ctx.max_args; ++i) {
MonoType *uninflated = ctx.params [i];
ctx.params [i] = mono_class_inflate_generic_type_checked (ctx.params [i], ctx.generic_context, &error);
if (!mono_error_ok (&error)) {
char *name = mono_type_full_name (ctx.params [i] ? ctx.params [i] : uninflated);
ADD_VERIFY_ERROR (&ctx, g_strdup_printf ("Invalid parameter %d of type %s", i, name));
g_free (name);
mono_error_cleanup (&error);
/* we must not free (in cleanup) what was not yet allocated (but only copied) */
ctx.max_args = i;
goto cleanup;
}
}
stack_init (&ctx, &ctx.eval);
for (i = 0; i < ctx.num_locals; ++i) {
if (!mono_type_is_valid_in_context (&ctx, ctx.locals [i]))
break;
if (get_stack_type (ctx.locals [i]) == TYPE_INV) {
char *name = mono_type_full_name (ctx.locals [i]);
ADD_VERIFY_ERROR (&ctx, g_strdup_printf ("Invalid local %i of type %s", i, name));
g_free (name);
break;
}
}
for (i = 0; i < ctx.max_args; ++i) {
if (!mono_type_is_valid_in_context (&ctx, ctx.params [i]))
break;
if (get_stack_type (ctx.params [i]) == TYPE_INV) {
char *name = mono_type_full_name (ctx.params [i]);
ADD_VERIFY_ERROR (&ctx, g_strdup_printf ("Invalid parameter %i of type %s", i, name));
g_free (name);
break;
}
}
if (!ctx.valid)
goto cleanup;
for (i = 0; i < ctx.header->num_clauses && ctx.valid; ++i) {
MonoExceptionClause *clause = ctx.header->clauses + i;
VERIFIER_DEBUG (printf ("clause try %x len %x filter at %x handler at %x len %x\n", clause->try_offset, clause->try_len, clause->data.filter_offset, clause->handler_offset, clause->handler_len); );
if (clause->try_offset > ctx.code_size || ADD_IS_GREATER_OR_OVF (clause->try_offset, clause->try_len, ctx.code_size))
ADD_VERIFY_ERROR (&ctx, g_strdup_printf ("try clause out of bounds at 0x%04x", clause->try_offset));
if (clause->try_len <= 0)
ADD_VERIFY_ERROR (&ctx, g_strdup_printf ("try clause len <= 0 at 0x%04x", clause->try_offset));
if (clause->handler_offset > ctx.code_size || ADD_IS_GREATER_OR_OVF (clause->handler_offset, clause->handler_len, ctx.code_size))
ADD_VERIFY_ERROR (&ctx, g_strdup_printf ("handler clause out of bounds at 0x%04x", clause->try_offset));
if (clause->handler_len <= 0)
ADD_VERIFY_ERROR (&ctx, g_strdup_printf ("handler clause len <= 0 at 0x%04x", clause->try_offset));
if (clause->try_offset < clause->handler_offset && ADD_IS_GREATER_OR_OVF (clause->try_offset, clause->try_len, HANDLER_START (clause)))
ADD_VERIFY_ERROR (&ctx, g_strdup_printf ("try block (at 0x%04x) includes handler block (at 0x%04x)", clause->try_offset, clause->handler_offset));
if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
if (clause->data.filter_offset > ctx.code_size)
ADD_VERIFY_ERROR (&ctx, g_strdup_printf ("filter clause out of bounds at 0x%04x", clause->try_offset));
if (clause->data.filter_offset >= clause->handler_offset)
ADD_VERIFY_ERROR (&ctx, g_strdup_printf ("filter clause must come before the handler clause at 0x%04x", clause->data.filter_offset));
}
for (n = i + 1; n < ctx.header->num_clauses && ctx.valid; ++n)
verify_clause_relationship (&ctx, clause, ctx.header->clauses + n);
if (!ctx.valid)
break;
ctx.code [clause->try_offset].flags |= IL_CODE_FLAG_WAS_TARGET;
if (clause->try_offset + clause->try_len < ctx.code_size)
ctx.code [clause->try_offset + clause->try_len].flags |= IL_CODE_FLAG_WAS_TARGET;
if (clause->handler_offset + clause->handler_len < ctx.code_size)
ctx.code [clause->handler_offset + clause->handler_len].flags |= IL_CODE_FLAG_WAS_TARGET;
if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) {
if (!clause->data.catch_class) {
ADD_VERIFY_ERROR (&ctx, g_strdup_printf ("Catch clause %d with invalid type", i));
break;
}
init_stack_with_value_at_exception_boundary (&ctx, ctx.code + clause->handler_offset, clause->data.catch_class);
}
else if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
init_stack_with_value_at_exception_boundary (&ctx, ctx.code + clause->data.filter_offset, mono_defaults.exception_class);
init_stack_with_value_at_exception_boundary (&ctx, ctx.code + clause->handler_offset, mono_defaults.exception_class);
}
}
original_bb = bb = mono_basic_block_split (method, &error);
if (!mono_error_ok (&error)) {
ADD_VERIFY_ERROR (&ctx, g_strdup_printf ("Invalid branch target: %s", mono_error_get_message (&error)));
mono_error_cleanup (&error);
goto cleanup;
}
g_assert (bb);
while (ip < end && ctx.valid) {
int op_size;
ip_offset = ip - code_start;
{
const unsigned char *ip_copy = ip;
int op;
if (ip_offset > bb->end) {
ADD_VERIFY_ERROR (&ctx, g_strdup_printf ("Branch or EH block at [0x%04x] targets middle instruction at 0x%04x", bb->end, ip_offset));
goto cleanup;
}
if (ip_offset == bb->end)
bb = bb->next;
op_size = mono_opcode_value_and_size (&ip_copy, end, &op);
if (op_size == -1) {
ADD_VERIFY_ERROR (&ctx, g_strdup_printf ("Invalid instruction %x at 0x%04x", *ip, ip_offset));
goto cleanup;
}
if (ADD_IS_GREATER_OR_OVF (ip_offset, op_size, bb->end)) {
ADD_VERIFY_ERROR (&ctx, g_strdup_printf ("Branch or EH block targets middle of instruction at 0x%04x", ip_offset));
goto cleanup;
}
/*Last Instruction*/
if (ip_offset + op_size == bb->end && mono_opcode_is_prefix (op)) {
ADD_VERIFY_ERROR (&ctx, g_strdup_printf ("Branch or EH block targets between prefix '%s' and instruction at 0x%04x", mono_opcode_name (op), ip_offset));
goto cleanup;
}
}
ctx.ip_offset = ip_offset = ip - code_start;
/*We need to check against fallthrou in and out of protected blocks.
* For fallout we check the once a protected block ends, if the start flag is not set.
* Likewise for fallthru in, we check if ip is the start of a protected block and start is not set
* TODO convert these checks to be done using flags and not this loop
*/
for (i = 0; i < ctx.header->num_clauses && ctx.valid; ++i) {
MonoExceptionClause *clause = ctx.header->clauses + i;
if ((clause->try_offset + clause->try_len == ip_offset) && start == 0) {
CODE_NOT_VERIFIABLE (&ctx, g_strdup_printf ("fallthru off try block at 0x%04x", ip_offset));
start = 1;
}
if ((clause->handler_offset + clause->handler_len == ip_offset) && start == 0) {
if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER)
ADD_VERIFY_ERROR (&ctx, g_strdup_printf ("fallout of handler block at 0x%04x", ip_offset));
else
CODE_NOT_VERIFIABLE (&ctx, g_strdup_printf ("fallout of handler block at 0x%04x", ip_offset));
start = 1;
}
if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER && clause->handler_offset == ip_offset && start == 0) {
ADD_VERIFY_ERROR (&ctx, g_strdup_printf ("fallout of filter block at 0x%04x", ip_offset));
start = 1;
}
if (clause->handler_offset == ip_offset && start == 0) {
CODE_NOT_VERIFIABLE (&ctx, g_strdup_printf ("fallthru handler block at 0x%04x", ip_offset));
start = 1;
}
if (clause->try_offset == ip_offset && ctx.eval.size > 0 && start == 0) {
ADD_VERIFY_ERROR (&ctx, g_strdup_printf ("Try to enter try block with a non-empty stack at 0x%04x", ip_offset));
start = 1;
}
}
/*This must be done after fallthru detection otherwise it won't happen.*/
if (bb->dead) {
/*FIXME remove this once we move all bad branch checking code to use BB only*/
ctx.code [ip_offset].flags |= IL_CODE_FLAG_SEEN;
ip += op_size;
continue;
}
if (!ctx.valid)
break;
if (need_merge) {
VERIFIER_DEBUG ( printf ("extra merge needed! 0x%04x \n", ctx.target); );
merge_stacks (&ctx, &ctx.eval, &ctx.code [ctx.target], FALSE, TRUE);
need_merge = 0;
}
merge_stacks (&ctx, &ctx.eval, &ctx.code[ip_offset], start, FALSE);
start = 0;
/*TODO we can fast detect a forward branch or exception block targeting code after prefix, we should fail fast*/
#ifdef MONO_VERIFIER_DEBUG
{
char *discode;
discode = mono_disasm_code_one (NULL, method, ip, NULL);
discode [strlen (discode) - 1] = 0; /* no \n */
g_print ("[%d] %-29s (%d)\n", ip_offset, discode, ctx.eval.size);
g_free (discode);
}
dump_stack_state (&ctx.code [ip_offset]);
dump_stack_state (&ctx.eval);
#endif
switch (*ip) {
case CEE_NOP:
case CEE_BREAK:
++ip;
break;
case CEE_LDARG_0:
case CEE_LDARG_1:
case CEE_LDARG_2:
case CEE_LDARG_3:
push_arg (&ctx, *ip - CEE_LDARG_0, FALSE);
++ip;
break;
case CEE_LDARG_S:
case CEE_LDARGA_S:
code_bounds_check (2);
push_arg (&ctx, ip [1], *ip == CEE_LDARGA_S);
ip += 2;
break;
case CEE_ADD_OVF_UN:
do_binop (&ctx, *ip, add_ovf_un_table);
++ip;
break;
case CEE_SUB_OVF_UN:
do_binop (&ctx, *ip, sub_ovf_un_table);
++ip;
break;
case CEE_ADD_OVF:
case CEE_SUB_OVF:
case CEE_MUL_OVF:
case CEE_MUL_OVF_UN:
do_binop (&ctx, *ip, bin_ovf_table);
++ip;
break;
case CEE_ADD:
do_binop (&ctx, *ip, add_table);
++ip;
break;
case CEE_SUB:
do_binop (&ctx, *ip, sub_table);
++ip;
break;
case CEE_MUL:
case CEE_DIV:
case CEE_REM:
do_binop (&ctx, *ip, bin_op_table);
++ip;
break;
case CEE_AND:
case CEE_DIV_UN:
case CEE_OR:
case CEE_REM_UN:
case CEE_XOR:
do_binop (&ctx, *ip, int_bin_op_table);
++ip;
break;
case CEE_SHL:
case CEE_SHR:
case CEE_SHR_UN:
do_binop (&ctx, *ip, shift_op_table);
++ip;
break;
case CEE_POP:
if (!check_underflow (&ctx, 1))
break;
stack_pop_safe (&ctx);
++ip;
break;
case CEE_RET:
do_ret (&ctx);
++ip;
start = 1;
break;
case CEE_LDLOC_0:
case CEE_LDLOC_1:
case CEE_LDLOC_2:
case CEE_LDLOC_3:
/*TODO support definite assignment verification? */
push_local (&ctx, *ip - CEE_LDLOC_0, FALSE);
++ip;
break;
case CEE_STLOC_0:
case CEE_STLOC_1:
case CEE_STLOC_2:
case CEE_STLOC_3:
store_local (&ctx, *ip - CEE_STLOC_0);
++ip;
break;
case CEE_STLOC_S:
code_bounds_check (2);
store_local (&ctx, ip [1]);
ip += 2;
break;
case CEE_STARG_S:
code_bounds_check (2);
store_arg (&ctx, ip [1]);
ip += 2;
break;
case CEE_LDC_I4_M1:
case CEE_LDC_I4_0:
case CEE_LDC_I4_1:
case CEE_LDC_I4_2:
case CEE_LDC_I4_3:
case CEE_LDC_I4_4:
case CEE_LDC_I4_5:
case CEE_LDC_I4_6:
case CEE_LDC_I4_7:
case CEE_LDC_I4_8:
if (check_overflow (&ctx))
stack_push_val (&ctx, TYPE_I4, &mono_defaults.int32_class->byval_arg);
++ip;
break;
case CEE_LDC_I4_S:
code_bounds_check (2);
if (check_overflow (&ctx))
stack_push_val (&ctx, TYPE_I4, &mono_defaults.int32_class->byval_arg);
ip += 2;
break;
case CEE_LDC_I4:
code_bounds_check (5);
if (check_overflow (&ctx))
stack_push_val (&ctx,TYPE_I4, &mono_defaults.int32_class->byval_arg);
ip += 5;
break;
case CEE_LDC_I8:
code_bounds_check (9);
if (check_overflow (&ctx))
stack_push_val (&ctx,TYPE_I8, &mono_defaults.int64_class->byval_arg);
ip += 9;
break;
case CEE_LDC_R4:
code_bounds_check (5);
if (check_overflow (&ctx))
stack_push_val (&ctx, TYPE_R8, &mono_defaults.double_class->byval_arg);
ip += 5;
break;
case CEE_LDC_R8:
code_bounds_check (9);
if (check_overflow (&ctx))
stack_push_val (&ctx, TYPE_R8, &mono_defaults.double_class->byval_arg);
ip += 9;
break;
case CEE_LDNULL:
if (check_overflow (&ctx))
stack_push_val (&ctx, TYPE_COMPLEX | NULL_LITERAL_MASK, &mono_defaults.object_class->byval_arg);
++ip;
break;
case CEE_BEQ_S:
case CEE_BNE_UN_S:
code_bounds_check (2);
do_branch_op (&ctx, (signed char)ip [1] + 2, cmp_br_eq_op);
ip += 2;
need_merge = 1;
break;
case CEE_BGE_S:
case CEE_BGT_S:
case CEE_BLE_S:
case CEE_BLT_S:
case CEE_BGE_UN_S:
case CEE_BGT_UN_S:
case CEE_BLE_UN_S:
case CEE_BLT_UN_S:
code_bounds_check (2);
do_branch_op (&ctx, (signed char)ip [1] + 2, cmp_br_op);
ip += 2;
need_merge = 1;
break;
case CEE_BEQ:
case CEE_BNE_UN:
code_bounds_check (5);
do_branch_op (&ctx, (gint32)read32 (ip + 1) + 5, cmp_br_eq_op);
ip += 5;
need_merge = 1;
break;
case CEE_BGE:
case CEE_BGT:
case CEE_BLE:
case CEE_BLT:
case CEE_BGE_UN:
case CEE_BGT_UN:
case CEE_BLE_UN:
case CEE_BLT_UN:
code_bounds_check (5);
do_branch_op (&ctx, (gint32)read32 (ip + 1) + 5, cmp_br_op);
ip += 5;
need_merge = 1;
break;
case CEE_LDLOC_S:
case CEE_LDLOCA_S:
code_bounds_check (2);
push_local (&ctx, ip[1], *ip == CEE_LDLOCA_S);
ip += 2;
break;
case CEE_UNUSED99:
ADD_VERIFY_ERROR (&ctx, g_strdup_printf ("Use of the `unused' opcode"));
++ip;
break;
case CEE_DUP: {
ILStackDesc *top;
if (!check_underflow (&ctx, 1))
break;
if (!check_overflow (&ctx))
break;
top = stack_push (&ctx);
copy_stack_value (top, stack_peek (&ctx, 1));
++ip;
break;
}
case CEE_JMP:
code_bounds_check (5);
if (ctx.eval.size)
ADD_VERIFY_ERROR (&ctx, g_strdup_printf ("Eval stack must be empty in jmp at 0x%04x", ip_offset));
token = read32 (ip + 1);
if (in_any_block (ctx.header, ip_offset))
ADD_VERIFY_ERROR (&ctx, g_strdup_printf ("jmp cannot escape exception blocks at 0x%04x", ip_offset));
CODE_NOT_VERIFIABLE (&ctx, g_strdup_printf ("Intruction jmp is not verifiable at 0x%04x", ctx.ip_offset));
/*
* FIXME: check signature, retval, arguments etc.
*/
ip += 5;
break;
case CEE_CALL:
case CEE_CALLVIRT:
code_bounds_check (5);
do_invoke_method (&ctx, read32 (ip + 1), *ip == CEE_CALLVIRT);
ip += 5;
break;
case CEE_CALLI:
code_bounds_check (5);
token = read32 (ip + 1);
/*
* FIXME: check signature, retval, arguments etc.
* FIXME: check requirements for tail call
*/
CODE_NOT_VERIFIABLE (&ctx, g_strdup_printf ("Intruction calli is not verifiable at 0x%04x", ctx.ip_offset));
ip += 5;
break;
case CEE_BR_S:
code_bounds_check (2);
do_static_branch (&ctx, (signed char)ip [1] + 2);
need_merge = 1;
ip += 2;
start = 1;
break;
case CEE_BRFALSE_S:
case CEE_BRTRUE_S:
code_bounds_check (2);
do_boolean_branch_op (&ctx, (signed char)ip [1] + 2);
ip += 2;
need_merge = 1;
break;
case CEE_BR:
code_bounds_check (5);
do_static_branch (&ctx, (gint32)read32 (ip + 1) + 5);
need_merge = 1;
ip += 5;
start = 1;
break;
case CEE_BRFALSE:
case CEE_BRTRUE:
code_bounds_check (5);
do_boolean_branch_op (&ctx, (gint32)read32 (ip + 1) + 5);
ip += 5;
need_merge = 1;
break;
case CEE_SWITCH: {
guint32 entries;
code_bounds_check (5);
entries = read32 (ip + 1);
if (entries > 0xFFFFFFFFU / sizeof (guint32))
ADD_VERIFY_ERROR (&ctx, g_strdup_printf ("Too many switch entries %x at 0x%04x", entries, ctx.ip_offset));
ip += 5;
code_bounds_check (sizeof (guint32) * entries);
do_switch (&ctx, entries, ip);
ip += sizeof (guint32) * entries;
break;
}
case CEE_LDIND_I1:
case CEE_LDIND_U1:
case CEE_LDIND_I2:
case CEE_LDIND_U2:
case CEE_LDIND_I4:
case CEE_LDIND_U4:
case CEE_LDIND_I8:
case CEE_LDIND_I:
case CEE_LDIND_R4:
case CEE_LDIND_R8:
case CEE_LDIND_REF:
do_load_indirect (&ctx, *ip);
++ip;
break;
case CEE_STIND_REF:
case CEE_STIND_I1:
case CEE_STIND_I2:
case CEE_STIND_I4:
case CEE_STIND_I8:
case CEE_STIND_R4:
case CEE_STIND_R8:
case CEE_STIND_I:
do_store_indirect (&ctx, *ip);
++ip;
break;
case CEE_NOT:
case CEE_NEG:
do_unary_math_op (&ctx, *ip);
++ip;
break;
case CEE_CONV_I1:
case CEE_CONV_I2:
case CEE_CONV_I4:
case CEE_CONV_U1:
case CEE_CONV_U2:
case CEE_CONV_U4:
do_conversion (&ctx, TYPE_I4);
++ip;
break;
case CEE_CONV_I8:
case CEE_CONV_U8:
do_conversion (&ctx, TYPE_I8);
++ip;
break;
case CEE_CONV_R4:
case CEE_CONV_R8:
case CEE_CONV_R_UN:
do_conversion (&ctx, TYPE_R8);
++ip;
break;
case CEE_CONV_I:
case CEE_CONV_U:
do_conversion (&ctx, TYPE_NATIVE_INT);
++ip;
break;
case CEE_CPOBJ:
code_bounds_check (5);
do_cpobj (&ctx, read32 (ip + 1));
ip += 5;
break;
case CEE_LDOBJ:
code_bounds_check (5);
do_ldobj_value (&ctx, read32 (ip + 1));
ip += 5;
break;
case CEE_LDSTR:
code_bounds_check (5);
do_ldstr (&ctx, read32 (ip + 1));
ip += 5;
break;
case CEE_NEWOBJ:
code_bounds_check (5);
do_newobj (&ctx, read32 (ip + 1));
ip += 5;
break;
case CEE_CASTCLASS:
case CEE_ISINST:
code_bounds_check (5);
do_cast (&ctx, read32 (ip + 1), *ip == CEE_CASTCLASS ? "castclass" : "isinst");
ip += 5;
break;
case CEE_UNUSED58:
case CEE_UNUSED1:
ADD_VERIFY_ERROR (&ctx, g_strdup_printf ("Use of the `unused' opcode"));
++ip;
break;
case CEE_UNBOX:
code_bounds_check (5);
do_unbox_value (&ctx, read32 (ip + 1));
ip += 5;
break;
case CEE_THROW:
do_throw (&ctx);
start = 1;
++ip;
break;
case CEE_LDFLD:
case CEE_LDFLDA:
code_bounds_check (5);
do_push_field (&ctx, read32 (ip + 1), *ip == CEE_LDFLDA);
ip += 5;
break;
case CEE_LDSFLD:
case CEE_LDSFLDA:
code_bounds_check (5);
do_push_static_field (&ctx, read32 (ip + 1), *ip == CEE_LDSFLDA);
ip += 5;
break;
case CEE_STFLD:
code_bounds_check (5);
do_store_field (&ctx, read32 (ip + 1));
ip += 5;
break;
case CEE_STSFLD:
code_bounds_check (5);
do_store_static_field (&ctx, read32 (ip + 1));
ip += 5;
break;
case CEE_STOBJ:
code_bounds_check (5);
do_stobj (&ctx, read32 (ip + 1));
ip += 5;
break;
case CEE_CONV_OVF_I1_UN:
case CEE_CONV_OVF_I2_UN:
case CEE_CONV_OVF_I4_UN:
case CEE_CONV_OVF_U1_UN:
case CEE_CONV_OVF_U2_UN:
case CEE_CONV_OVF_U4_UN:
do_conversion (&ctx, TYPE_I4);
++ip;
break;
case CEE_CONV_OVF_I8_UN:
case CEE_CONV_OVF_U8_UN:
do_conversion (&ctx, TYPE_I8);
++ip;
break;
case CEE_CONV_OVF_I_UN:
case CEE_CONV_OVF_U_UN:
do_conversion (&ctx, TYPE_NATIVE_INT);
++ip;
break;
case CEE_BOX:
code_bounds_check (5);
do_box_value (&ctx, read32 (ip + 1));
ip += 5;
break;
case CEE_NEWARR:
code_bounds_check (5);
do_newarr (&ctx, read32 (ip + 1));
ip += 5;
break;
case CEE_LDLEN:
do_ldlen (&ctx);
++ip;
break;
case CEE_LDELEMA:
code_bounds_check (5);
do_ldelema (&ctx, read32 (ip + 1));
ip += 5;
break;
case CEE_LDELEM_I1:
case CEE_LDELEM_U1:
case CEE_LDELEM_I2:
case CEE_LDELEM_U2:
case CEE_LDELEM_I4:
case CEE_LDELEM_U4:
case CEE_LDELEM_I8:
case CEE_LDELEM_I:
case CEE_LDELEM_R4:
case CEE_LDELEM_R8:
case CEE_LDELEM_REF:
do_ldelem (&ctx, *ip, 0);
++ip;
break;
case CEE_STELEM_I:
case CEE_STELEM_I1:
case CEE_STELEM_I2:
case CEE_STELEM_I4:
case CEE_STELEM_I8:
case CEE_STELEM_R4:
case CEE_STELEM_R8:
case CEE_STELEM_REF:
do_stelem (&ctx, *ip, 0);
++ip;
break;
case CEE_LDELEM:
code_bounds_check (5);
do_ldelem (&ctx, *ip, read32 (ip + 1));
ip += 5;
break;
case CEE_STELEM:
code_bounds_check (5);
do_stelem (&ctx, *ip, read32 (ip + 1));
ip += 5;
break;
case CEE_UNBOX_ANY:
code_bounds_check (5);
do_unbox_any (&ctx, read32 (ip + 1));
ip += 5;
break;
case CEE_CONV_OVF_I1:
case CEE_CONV_OVF_U1:
case CEE_CONV_OVF_I2:
case CEE_CONV_OVF_U2:
case CEE_CONV_OVF_I4:
case CEE_CONV_OVF_U4:
do_conversion (&ctx, TYPE_I4);
++ip;
break;
case CEE_CONV_OVF_I8:
case CEE_CONV_OVF_U8:
do_conversion (&ctx, TYPE_I8);
++ip;
break;
case CEE_CONV_OVF_I:
case CEE_CONV_OVF_U:
do_conversion (&ctx, TYPE_NATIVE_INT);
++ip;
break;
case CEE_REFANYVAL:
code_bounds_check (5);
do_refanyval (&ctx, read32 (ip + 1));
ip += 5;
break;
case CEE_CKFINITE:
do_ckfinite (&ctx);
++ip;
break;
case CEE_MKREFANY:
code_bounds_check (5);
do_mkrefany (&ctx, read32 (ip + 1));
ip += 5;
break;
case CEE_LDTOKEN:
code_bounds_check (5);
do_load_token (&ctx, read32 (ip + 1));
ip += 5;
break;
case CEE_ENDFINALLY:
if (!is_correct_endfinally (ctx.header, ip_offset))
ADD_VERIFY_ERROR (&ctx, g_strdup_printf ("endfinally must be used inside a finally/fault handler at 0x%04x", ctx.ip_offset));
ctx.eval.size = 0;
start = 1;
++ip;
break;
case CEE_LEAVE:
code_bounds_check (5);
do_leave (&ctx, read32 (ip + 1) + 5);
ip += 5;
start = 1;
need_merge = 1;
break;
case CEE_LEAVE_S:
code_bounds_check (2);
do_leave (&ctx, (signed char)ip [1] + 2);
ip += 2;
start = 1;
need_merge = 1;
break;
case CEE_PREFIX1:
code_bounds_check (2);
++ip;
switch (*ip) {
case CEE_STLOC:
code_bounds_check (3);
store_local (&ctx, read16 (ip + 1));
ip += 3;
break;
case CEE_CEQ:
do_cmp_op (&ctx, cmp_br_eq_op, *ip);
++ip;
break;
case CEE_CGT:
case CEE_CGT_UN:
case CEE_CLT:
case CEE_CLT_UN:
do_cmp_op (&ctx, cmp_br_op, *ip);
++ip;
break;
case CEE_STARG:
code_bounds_check (3);
store_arg (&ctx, read16 (ip + 1) );
ip += 3;
break;
case CEE_ARGLIST:
if (!check_overflow (&ctx))
break;
if (ctx.signature->call_convention != MONO_CALL_VARARG)
ADD_VERIFY_ERROR (&ctx, g_strdup_printf ("Cannot use arglist on method without VARGARG calling convention at 0x%04x", ctx.ip_offset));
set_stack_value (&ctx, stack_push (&ctx), &mono_defaults.argumenthandle_class->byval_arg, FALSE);
++ip;
break;
case CEE_LDFTN:
code_bounds_check (5);
do_load_function_ptr (&ctx, read32 (ip + 1), FALSE);
ip += 5;
break;
case CEE_LDVIRTFTN:
code_bounds_check (5);
do_load_function_ptr (&ctx, read32 (ip + 1), TRUE);
ip += 5;
break;
case CEE_LDARG:
case CEE_LDARGA:
code_bounds_check (3);
push_arg (&ctx, read16 (ip + 1), *ip == CEE_LDARGA);
ip += 3;
break;
case CEE_LDLOC:
case CEE_LDLOCA:
code_bounds_check (3);
push_local (&ctx, read16 (ip + 1), *ip == CEE_LDLOCA);
ip += 3;
break;
case CEE_LOCALLOC:
do_localloc (&ctx);
++ip;
break;
case CEE_UNUSED56:
case CEE_UNUSED57:
case CEE_UNUSED70:
case CEE_UNUSED:
ADD_VERIFY_ERROR (&ctx, g_strdup_printf ("Use of the `unused' opcode"));
++ip;
break;
case CEE_ENDFILTER:
do_endfilter (&ctx);
start = 1;
++ip;
break;
case CEE_UNALIGNED_:
code_bounds_check (2);
prefix |= PREFIX_UNALIGNED;
ip += 2;
break;
case CEE_VOLATILE_:
prefix |= PREFIX_VOLATILE;
++ip;
break;
case CEE_TAIL_:
prefix |= PREFIX_TAIL;
++ip;
if (ip < end && (*ip != CEE_CALL && *ip != CEE_CALLI && *ip != CEE_CALLVIRT))
ADD_VERIFY_ERROR (&ctx, g_strdup_printf ("tail prefix must be used only with call opcodes at 0x%04x", ip_offset));
break;
case CEE_INITOBJ:
code_bounds_check (5);
do_initobj (&ctx, read32 (ip + 1));
ip += 5;
break;
case CEE_CONSTRAINED_:
code_bounds_check (5);
ctx.constrained_type = get_boxable_mono_type (&ctx, read32 (ip + 1), "constrained.");
prefix |= PREFIX_CONSTRAINED;
ip += 5;
break;
case CEE_READONLY_:
prefix |= PREFIX_READONLY;
ip++;
break;
case CEE_CPBLK:
CLEAR_PREFIX (&ctx, PREFIX_UNALIGNED | PREFIX_VOLATILE);
if (!check_underflow (&ctx, 3))
break;
CODE_NOT_VERIFIABLE (&ctx, g_strdup_printf ("Instruction cpblk is not verifiable at 0x%04x", ctx.ip_offset));
ip++;
break;
case CEE_INITBLK:
CLEAR_PREFIX (&ctx, PREFIX_UNALIGNED | PREFIX_VOLATILE);
if (!check_underflow (&ctx, 3))
break;
CODE_NOT_VERIFIABLE (&ctx, g_strdup_printf ("Instruction initblk is not verifiable at 0x%04x", ctx.ip_offset));
ip++;
break;
case CEE_NO_:
ip += 2;
break;
case CEE_RETHROW:
if (!is_correct_rethrow (ctx.header, ip_offset))
ADD_VERIFY_ERROR (&ctx, g_strdup_printf ("rethrow must be used inside a catch handler at 0x%04x", ctx.ip_offset));
ctx.eval.size = 0;
start = 1;
++ip;
break;
case CEE_SIZEOF:
code_bounds_check (5);
do_sizeof (&ctx, read32 (ip + 1));
ip += 5;
break;
case CEE_REFANYTYPE:
do_refanytype (&ctx);
++ip;
break;
default:
ADD_VERIFY_ERROR (&ctx, g_strdup_printf ("Invalid instruction FE %x at 0x%04x", *ip, ctx.ip_offset));
++ip;
}
break;
default:
ADD_VERIFY_ERROR (&ctx, g_strdup_printf ("Invalid instruction %x at 0x%04x", *ip, ctx.ip_offset));
++ip;
}
/*TODO we can fast detect a forward branch or exception block targeting code after prefix, we should fail fast*/
if (prefix) {
if (!ctx.prefix_set) //first prefix
ctx.code [ctx.ip_offset].flags |= IL_CODE_FLAG_SEEN;
ctx.prefix_set |= prefix;
ctx.has_flags = TRUE;
prefix = 0;
} else {
if (!ctx.has_flags)
ctx.code [ctx.ip_offset].flags |= IL_CODE_FLAG_SEEN;
if (ctx.prefix_set & PREFIX_CONSTRAINED)
ADD_VERIFY_ERROR (&ctx, g_strdup_printf ("Invalid instruction after constrained prefix at 0x%04x", ctx.ip_offset));
if (ctx.prefix_set & PREFIX_READONLY)
ADD_VERIFY_ERROR (&ctx, g_strdup_printf ("Invalid instruction after readonly prefix at 0x%04x", ctx.ip_offset));
if (ctx.prefix_set & PREFIX_VOLATILE)
ADD_VERIFY_ERROR (&ctx, g_strdup_printf ("Invalid instruction after volatile prefix at 0x%04x", ctx.ip_offset));
if (ctx.prefix_set & PREFIX_UNALIGNED)
ADD_VERIFY_ERROR (&ctx, g_strdup_printf ("Invalid instruction after unaligned prefix at 0x%04x", ctx.ip_offset));
ctx.prefix_set = prefix = 0;
ctx.has_flags = FALSE;
}
}
/*
* if ip != end we overflowed: mark as error.
*/
if ((ip != end || !start) && ctx.verifiable && !ctx.list) {
ADD_VERIFY_ERROR (&ctx, g_strdup_printf ("Run ahead of method code at 0x%04x", ip_offset));
}
/*We should guard against the last decoded opcode, otherwise we might add errors that doesn't make sense.*/
for (i = 0; i < ctx.code_size && i < ip_offset; ++i) {
if (ctx.code [i].flags & IL_CODE_FLAG_WAS_TARGET) {
if (!(ctx.code [i].flags & IL_CODE_FLAG_SEEN))
ADD_VERIFY_ERROR (&ctx, g_strdup_printf ("Branch or exception block target middle of intruction at 0x%04x", i));
if (ctx.code [i].flags & IL_CODE_DELEGATE_SEQUENCE)
CODE_NOT_VERIFIABLE (&ctx, g_strdup_printf ("Branch to delegate code sequence at 0x%04x", i));
}
if ((ctx.code [i].flags & IL_CODE_LDFTN_DELEGATE_NONFINAL_VIRTUAL) && ctx.has_this_store)
CODE_NOT_VERIFIABLE (&ctx, g_strdup_printf ("Invalid ldftn with virtual function in method with stdarg 0 at 0x%04x", i));
if ((ctx.code [i].flags & IL_CODE_CALL_NONFINAL_VIRTUAL) && ctx.has_this_store)
CODE_NOT_VERIFIABLE (&ctx, g_strdup_printf ("Invalid call to a non-final virtual function in method with stdarg.0 or ldarga.0 at 0x%04x", i));
}
if (mono_method_is_constructor (ctx.method) && !ctx.super_ctor_called && !ctx.method->klass->valuetype && ctx.method->klass != mono_defaults.object_class) {
char *method_name = mono_method_full_name (ctx.method, TRUE);
char *type = mono_type_get_full_name (ctx.method->klass);
if (ctx.method->klass->parent && ctx.method->klass->parent->exception_type != MONO_EXCEPTION_NONE)
CODE_NOT_VERIFIABLE (&ctx, g_strdup_printf ("Constructor %s for type %s not calling base type ctor due to a TypeLoadException on base type.", method_name, type));
else
CODE_NOT_VERIFIABLE (&ctx, g_strdup_printf ("Constructor %s for type %s not calling base type ctor.", method_name, type));
g_free (method_name);
g_free (type);
}
cleanup:
if (ctx.code) {
for (i = 0; i < ctx.header->code_size; ++i) {
if (ctx.code [i].stack)
g_free (ctx.code [i].stack);
}
}
for (tmp = ctx.funptrs; tmp; tmp = tmp->next)
g_free (tmp->data);
g_slist_free (ctx.funptrs);
for (tmp = ctx.exception_types; tmp; tmp = tmp->next)
mono_metadata_free_type (tmp->data);
g_slist_free (ctx.exception_types);
for (i = 0; i < ctx.num_locals; ++i) {
if (ctx.locals [i])
mono_metadata_free_type (ctx.locals [i]);
}
for (i = 0; i < ctx.max_args; ++i) {
if (ctx.params [i])
mono_metadata_free_type (ctx.params [i]);
}
if (ctx.eval.stack)
g_free (ctx.eval.stack);
if (ctx.code)
g_free (ctx.code);
g_free (ctx.locals);
g_free (ctx.params);
mono_basic_block_free (original_bb);
mono_metadata_free_mh (ctx.header);
finish_collect_stats ();
return ctx.list;
}
char*
mono_verify_corlib ()
{
/* This is a public API function so cannot be removed */
return NULL;
}
/*
* Returns true if @method needs to be verified.
*
*/
gboolean
mono_verifier_is_enabled_for_method (MonoMethod *method)
{
return mono_verifier_is_enabled_for_class (method->klass) && method->wrapper_type == MONO_WRAPPER_NONE;
}
/*
* Returns true if @klass need to be verified.
*
*/
gboolean
mono_verifier_is_enabled_for_class (MonoClass *klass)
{
return verify_all || (verifier_mode > MONO_VERIFIER_MODE_OFF && !(klass->image->assembly && klass->image->assembly->in_gac) && klass->image != mono_defaults.corlib);
}
gboolean
mono_verifier_is_enabled_for_image (MonoImage *image)
{
return verify_all || verifier_mode > MONO_VERIFIER_MODE_OFF;
}
gboolean
mono_verifier_is_method_full_trust (MonoMethod *method)
{
return mono_verifier_is_class_full_trust (method->klass);
}
/*
* Returns if @klass is under full trust or not.
*
* TODO This code doesn't take CAS into account.
*
* Under verify_all all user code must be verifiable if no security option was set
*
*/
gboolean
mono_verifier_is_class_full_trust (MonoClass *klass)
{
/* under CoreCLR code is trusted if it is part of the "platform" otherwise all code inside the GAC is trusted */
gboolean trusted_location = (mono_security_get_mode () != MONO_SECURITY_MODE_CORE_CLR) ?
(klass->image->assembly && klass->image->assembly->in_gac) : mono_security_core_clr_is_platform_image (klass->image);
if (verify_all && verifier_mode == MONO_VERIFIER_MODE_OFF)
return trusted_location || klass->image == mono_defaults.corlib;
return verifier_mode < MONO_VERIFIER_MODE_VERIFIABLE || trusted_location || klass->image == mono_defaults.corlib;
}
GSList*
mono_method_verify_with_current_settings (MonoMethod *method, gboolean skip_visibility)
{
return mono_method_verify (method,
(verifier_mode != MONO_VERIFIER_MODE_STRICT ? MONO_VERIFY_NON_STRICT: 0)
| (!mono_verifier_is_method_full_trust (method) ? MONO_VERIFY_FAIL_FAST : 0)
| (skip_visibility ? MONO_VERIFY_SKIP_VISIBILITY : 0));
}
static int
get_field_end (MonoClassField *field)
{
int align;
int size = mono_type_size (field->type, &align);
if (size == 0)
size = 4; /*FIXME Is this a safe bet?*/
return size + field->offset;
}
static gboolean
verify_class_for_overlapping_reference_fields (MonoClass *class)
{
int i = 0, j;
gpointer iter = NULL;
MonoClassField *field;
gboolean is_fulltrust = mono_verifier_is_class_full_trust (class);
/*We can't skip types with !has_references since this is calculated after we have run.*/
if (!((class->flags & TYPE_ATTRIBUTE_LAYOUT_MASK) == TYPE_ATTRIBUTE_EXPLICIT_LAYOUT))
return TRUE;
/*We must check for stuff overlapping reference fields.
The outer loop uses mono_class_get_fields to ensure that MonoClass:fields get inited.
*/
while ((field = mono_class_get_fields (class, &iter))) {
int fieldEnd = get_field_end (field);
gboolean is_valuetype = !MONO_TYPE_IS_REFERENCE (field->type);
++i;
if (mono_field_is_deleted (field) || (field->type->attrs & FIELD_ATTRIBUTE_STATIC))
continue;
for (j = i; j < class->field.count; ++j) {
MonoClassField *other = &class->fields [j];
int otherEnd = get_field_end (other);
if (mono_field_is_deleted (other) || (is_valuetype && !MONO_TYPE_IS_REFERENCE (other->type)) || (other->type->attrs & FIELD_ATTRIBUTE_STATIC))
continue;
if (!is_valuetype && MONO_TYPE_IS_REFERENCE (other->type) && field->offset == other->offset && is_fulltrust)
continue;
if ((otherEnd > field->offset && otherEnd <= fieldEnd) || (other->offset >= field->offset && other->offset < fieldEnd))
return FALSE;
}
}
return TRUE;
}
static guint
field_hash (gconstpointer key)
{
const MonoClassField *field = key;
return g_str_hash (field->name) ^ mono_metadata_type_hash (field->type); /**/
}
static gboolean
field_equals (gconstpointer _a, gconstpointer _b)
{
const MonoClassField *a = _a;
const MonoClassField *b = _b;
return !strcmp (a->name, b->name) && mono_metadata_type_equal (a->type, b->type);
}
static gboolean
verify_class_fields (MonoClass *class)
{
gpointer iter = NULL;
MonoClassField *field;
MonoGenericContext *context = mono_class_get_context (class);
GHashTable *unique_fields = g_hash_table_new_full (&field_hash, &field_equals, NULL, NULL);
if (class->generic_container)
context = &class->generic_container->context;
while ((field = mono_class_get_fields (class, &iter)) != NULL) {
if (!mono_type_is_valid_type_in_context (field->type, context)) {
g_hash_table_destroy (unique_fields);
return FALSE;
}
if (g_hash_table_lookup (unique_fields, field)) {
g_hash_table_destroy (unique_fields);
return FALSE;
}
g_hash_table_insert (unique_fields, field, field);
}
g_hash_table_destroy (unique_fields);
return TRUE;
}
static gboolean
verify_interfaces (MonoClass *class)
{
int i;
for (i = 0; i < class->interface_count; ++i) {
MonoClass *iface = class->interfaces [i];
if (!(iface->flags & TYPE_ATTRIBUTE_INTERFACE))
return FALSE;
}
return TRUE;
}
static gboolean
verify_valuetype_layout_with_target (MonoClass *class, MonoClass *target_class)
{
int type;
gpointer iter = NULL;
MonoClassField *field;
MonoClass *field_class;
if (!class->valuetype)
return TRUE;
type = class->byval_arg.type;
/*primitive type fields are not properly decoded*/
if ((type >= MONO_TYPE_BOOLEAN && type <= MONO_TYPE_R8) || (type >= MONO_TYPE_I && type <= MONO_TYPE_U))
return TRUE;
while ((field = mono_class_get_fields (class, &iter)) != NULL) {
if (!field->type)
return FALSE;
if (field->type->attrs & (FIELD_ATTRIBUTE_STATIC | FIELD_ATTRIBUTE_HAS_FIELD_RVA))
continue;
field_class = mono_class_get_generic_type_definition (mono_class_from_mono_type (field->type));
if (field_class == target_class || class == field_class || !verify_valuetype_layout_with_target (field_class, target_class))
return FALSE;
}
return TRUE;
}
static gboolean
verify_valuetype_layout (MonoClass *class)
{
gboolean res;
res = verify_valuetype_layout_with_target (class, class);
return res;
}
static gboolean
recursive_mark_constraint_args (MonoBitSet *used_args, MonoGenericContainer *gc, MonoType *type)
{
int idx;
MonoClass **constraints;
MonoGenericParamInfo *param_info;
g_assert (mono_type_is_generic_argument (type));
idx = mono_type_get_generic_param_num (type);
if (mono_bitset_test_fast (used_args, idx))
return FALSE;
mono_bitset_set_fast (used_args, idx);
param_info = mono_generic_container_get_param_info (gc, idx);
if (!param_info->constraints)
return TRUE;
for (constraints = param_info->constraints; *constraints; ++constraints) {
MonoClass *ctr = *constraints;
MonoType *constraint_type = &ctr->byval_arg;
if (mono_type_is_generic_argument (constraint_type) && !recursive_mark_constraint_args (used_args, gc, constraint_type))
return FALSE;
}
return TRUE;
}
static gboolean
verify_generic_parameters (MonoClass *class)
{
int i;
MonoGenericContainer *gc = class->generic_container;
MonoBitSet *used_args = mono_bitset_new (gc->type_argc, 0);
for (i = 0; i < gc->type_argc; ++i) {
MonoGenericParamInfo *param_info = mono_generic_container_get_param_info (gc, i);
MonoClass **constraints;
if (!param_info->constraints)
continue;
mono_bitset_clear_all (used_args);
mono_bitset_set_fast (used_args, i);
for (constraints = param_info->constraints; *constraints; ++constraints) {
MonoClass *ctr = *constraints;
MonoType *constraint_type = &ctr->byval_arg;
if (!mono_type_is_valid_type_in_context (constraint_type, &gc->context))
goto fail;
if (mono_type_is_generic_argument (constraint_type) && !recursive_mark_constraint_args (used_args, gc, constraint_type))
goto fail;
if (ctr->generic_class && !mono_class_is_valid_generic_instantiation (NULL, ctr))
goto fail;
}
}
mono_bitset_free (used_args);
return TRUE;
fail:
mono_bitset_free (used_args);
return FALSE;
}
/*
* Check if the class is verifiable.
*
* Right now there are no conditions that make a class a valid but not verifiable. Both overlapping reference
* field and invalid generic instantiation are fatal errors.
*
* This method must be safe to be called from mono_class_init and all code must be carefull about that.
*
*/
gboolean
mono_verifier_verify_class (MonoClass *class)
{
/*Neither <Module>, object or ifaces have parent.*/
if (!class->parent &&
class != mono_defaults.object_class &&
!MONO_CLASS_IS_INTERFACE (class) &&
(!class->image->dynamic && class->type_token != 0x2000001)) /*<Module> is the first type in the assembly*/
return FALSE;
if (class->parent) {
if (MONO_CLASS_IS_INTERFACE (class->parent))
return FALSE;
if (!class->generic_class && class->parent->generic_container)
return FALSE;
}
if (class->generic_container && (class->flags & TYPE_ATTRIBUTE_LAYOUT_MASK) == TYPE_ATTRIBUTE_EXPLICIT_LAYOUT)
return FALSE;
if (class->generic_container && !verify_generic_parameters (class))
return FALSE;
if (!verify_class_for_overlapping_reference_fields (class))
return FALSE;
if (class->generic_class && !mono_class_is_valid_generic_instantiation (NULL, class))
return FALSE;
if (class->generic_class == NULL && !verify_class_fields (class))
return FALSE;
if (class->valuetype && !verify_valuetype_layout (class))
return FALSE;
if (!verify_interfaces (class))
return FALSE;
return TRUE;
}
gboolean
mono_verifier_class_is_valid_generic_instantiation (MonoClass *class)
{
return mono_class_is_valid_generic_instantiation (NULL, class);
}
#else
gboolean
mono_verifier_verify_class (MonoClass *class)
{
/* The verifier was disabled at compile time */
return TRUE;
}
GSList*
mono_method_verify_with_current_settings (MonoMethod *method, gboolean skip_visibility)
{
/* The verifier was disabled at compile time */
return NULL;
}
gboolean
mono_verifier_is_class_full_trust (MonoClass *klass)
{
/* The verifier was disabled at compile time */
return TRUE;
}
gboolean
mono_verifier_is_method_full_trust (MonoMethod *method)
{
/* The verifier was disabled at compile time */
return TRUE;
}
gboolean
mono_verifier_is_enabled_for_image (MonoImage *image)
{
/* The verifier was disabled at compile time */
return FALSE;
}
gboolean
mono_verifier_is_enabled_for_class (MonoClass *klass)
{
/* The verifier was disabled at compile time */
return FALSE;
}
gboolean
mono_verifier_is_enabled_for_method (MonoMethod *method)
{
/* The verifier was disabled at compile time */
return FALSE;
}
GSList*
mono_method_verify (MonoMethod *method, int level)
{
/* The verifier was disabled at compile time */
return NULL;
}
void
mono_free_verify_list (GSList *list)
{
/* The verifier was disabled at compile time */
/* will always be null if verifier is disabled */
}
gboolean
mono_verifier_class_is_valid_generic_instantiation (MonoClass *class)
{
return TRUE;
}
#endif
| ./CrossVul/dataset_final_sorted/CWE-20/c/bad_4718_2 |
crossvul-cpp_data_good_1658_3 | /*
* ntp_util.c - stuff I didn't have any other place for
*/
#ifdef HAVE_CONFIG_H
# include <config.h>
#endif
#include "ntpd.h"
#include "ntp_unixtime.h"
#include "ntp_filegen.h"
#include "ntp_if.h"
#include "ntp_stdlib.h"
#include "ntp_assert.h"
#include "ntp_calendar.h"
#include "lib_strbuf.h"
#include <stdio.h>
#include <ctype.h>
#include <sys/types.h>
#ifdef HAVE_SYS_IOCTL_H
# include <sys/ioctl.h>
#endif
#ifdef HAVE_UNISTD_H
# include <unistd.h>
#endif
#ifdef HAVE_IEEEFP_H
# include <ieeefp.h>
#endif
#ifdef HAVE_MATH_H
# include <math.h>
#endif
#ifdef DOSYNCTODR
# if !defined(VMS)
# include <sys/resource.h>
# endif /* VMS */
#endif
#if defined(VMS)
# include <descrip.h>
#endif /* VMS */
/*
* Defines used by the leapseconds stuff
*/
#define MAX_TAI 100 /* max TAI offset (s) */
#define L_DAY 86400UL /* seconds per day */
#define L_YEAR (L_DAY * 365) /* days per year */
#define L_LYEAR (L_YEAR + L_DAY) /* days per leap year */
#define L_4YEAR (L_LYEAR + 3 * L_YEAR) /* days per leap cycle */
#define L_CENT (L_4YEAR * 25) /* days per century */
/*
* This contains odds and ends, including the hourly stats, various
* configuration items, leapseconds stuff, etc.
*/
/*
* File names
*/
static char *key_file_name; /* keys file name */
char *leapseconds_file_name; /* leapseconds file name */
char *stats_drift_file; /* frequency file name */
static char *stats_temp_file; /* temp frequency file name */
static double wander_resid; /* last frequency update */
double wander_threshold = 1e-7; /* initial frequency threshold */
/*
* Statistics file stuff
*/
#ifndef NTP_VAR
# ifndef SYS_WINNT
# define NTP_VAR "/var/NTP/" /* NOTE the trailing '/' */
# else
# define NTP_VAR "c:\\var\\ntp\\" /* NOTE the trailing '\\' */
# endif /* SYS_WINNT */
#endif
#ifndef MAXPATHLEN
# define MAXPATHLEN 256
#endif
static char statsdir[MAXPATHLEN] = NTP_VAR;
static FILEGEN peerstats;
static FILEGEN loopstats;
static FILEGEN clockstats;
static FILEGEN rawstats;
static FILEGEN sysstats;
static FILEGEN protostats;
static FILEGEN cryptostats;
static FILEGEN timingstats;
/*
* This controls whether stats are written to the fileset. Provided
* so that ntpdc can turn off stats when the file system fills up.
*/
int stats_control;
/*
* Last frequency written to file.
*/
static double prev_drift_comp; /* last frequency update */
/*
* Function prototypes
*/
static int leap_file(FILE *);
static void record_sys_stats(void);
void ntpd_time_stepped(void);
/*
* Prototypes
*/
#ifdef DEBUG
void uninit_util(void);
#endif
/*
* uninit_util - free memory allocated by init_util
*/
#ifdef DEBUG
void
uninit_util(void)
{
#if defined(_MSC_VER) && defined (_DEBUG)
_CrtCheckMemory();
#endif
if (stats_drift_file) {
free(stats_drift_file);
free(stats_temp_file);
stats_drift_file = NULL;
stats_temp_file = NULL;
}
if (key_file_name) {
free(key_file_name);
key_file_name = NULL;
}
filegen_unregister("peerstats");
filegen_unregister("loopstats");
filegen_unregister("clockstats");
filegen_unregister("rawstats");
filegen_unregister("sysstats");
filegen_unregister("protostats");
#ifdef AUTOKEY
filegen_unregister("cryptostats");
#endif /* AUTOKEY */
#ifdef DEBUG_TIMING
filegen_unregister("timingstats");
#endif /* DEBUG_TIMING */
#if defined(_MSC_VER) && defined (_DEBUG)
_CrtCheckMemory();
#endif
}
#endif /* DEBUG */
/*
* init_util - initialize the util module of ntpd
*/
void
init_util(void)
{
filegen_register(statsdir, "peerstats", &peerstats);
filegen_register(statsdir, "loopstats", &loopstats);
filegen_register(statsdir, "clockstats", &clockstats);
filegen_register(statsdir, "rawstats", &rawstats);
filegen_register(statsdir, "sysstats", &sysstats);
filegen_register(statsdir, "protostats", &protostats);
filegen_register(statsdir, "cryptostats", &cryptostats);
filegen_register(statsdir, "timingstats", &timingstats);
/*
* register with libntp ntp_set_tod() to call us back
* when time is stepped.
*/
step_callback = &ntpd_time_stepped;
#ifdef DEBUG
atexit(&uninit_util);
#endif /* DEBUG */
}
/*
* hourly_stats - print some interesting stats
*/
void
write_stats(void)
{
FILE *fp;
#ifdef DOSYNCTODR
struct timeval tv;
#if !defined(VMS)
int prio_set;
#endif
#ifdef HAVE_GETCLOCK
struct timespec ts;
#endif
int o_prio;
/*
* Sometimes having a Sun can be a drag.
*
* The kernel variable dosynctodr controls whether the system's
* soft clock is kept in sync with the battery clock. If it
* is zero, then the soft clock is not synced, and the battery
* clock is simply left to rot. That means that when the system
* reboots, the battery clock (which has probably gone wacky)
* sets the soft clock. That means ntpd starts off with a very
* confused idea of what time it is. It then takes a large
* amount of time to figure out just how wacky the battery clock
* has made things drift, etc, etc. The solution is to make the
* battery clock sync up to system time. The way to do THAT is
* to simply set the time of day to the current time of day, but
* as quickly as possible. This may, or may not be a sensible
* thing to do.
*
* CAVEAT: settimeofday() steps the sun clock by about 800 us,
* so setting DOSYNCTODR seems a bad idea in the
* case of us resolution
*/
#if !defined(VMS)
/*
* (prr) getpriority returns -1 on error, but -1 is also a valid
* return value (!), so instead we have to zero errno before the
* call and check it for non-zero afterwards.
*/
errno = 0;
prio_set = 0;
o_prio = getpriority(PRIO_PROCESS,0); /* Save setting */
/*
* (prr) if getpriority succeeded, call setpriority to raise
* scheduling priority as high as possible. If that succeeds
* as well, set the prio_set flag so we remember to reset
* priority to its previous value below. Note that on Solaris
* 2.6 (and beyond?), both getpriority and setpriority will fail
* with ESRCH, because sched_setscheduler (called from main) put
* us in the real-time scheduling class which setpriority
* doesn't know about. Being in the real-time class is better
* than anything setpriority can do, anyhow, so this error is
* silently ignored.
*/
if ((errno == 0) && (setpriority(PRIO_PROCESS,0,-20) == 0))
prio_set = 1; /* overdrive */
#endif /* VMS */
#ifdef HAVE_GETCLOCK
(void) getclock(TIMEOFDAY, &ts);
tv.tv_sec = ts.tv_sec;
tv.tv_usec = ts.tv_nsec / 1000;
#else /* not HAVE_GETCLOCK */
GETTIMEOFDAY(&tv,(struct timezone *)NULL);
#endif /* not HAVE_GETCLOCK */
if (ntp_set_tod(&tv,(struct timezone *)NULL) != 0)
msyslog(LOG_ERR, "can't sync battery time: %m");
#if !defined(VMS)
if (prio_set)
setpriority(PRIO_PROCESS, 0, o_prio); /* downshift */
#endif /* VMS */
#endif /* DOSYNCTODR */
record_sys_stats();
if (stats_drift_file != 0) {
/*
* When the frequency file is written, initialize the
* prev_drift_comp and wander_resid. Thereafter,
* reduce the wander_resid by half each hour. When
* the difference between the prev_drift_comp and
* drift_comp is less than the wander_resid, update
* the frequncy file. This minimizes the file writes to
* nonvolaile storage.
*/
#ifdef DEBUG
if (debug)
printf("write_stats: frequency %.6lf thresh %.6lf, freq %.6lf\n",
(prev_drift_comp - drift_comp) * 1e6, wander_resid *
1e6, drift_comp * 1e6);
#endif
if (fabs(prev_drift_comp - drift_comp) < wander_resid) {
wander_resid *= 0.5;
return;
}
prev_drift_comp = drift_comp;
wander_resid = wander_threshold;
if ((fp = fopen(stats_temp_file, "w")) == NULL) {
msyslog(LOG_ERR, "frequency file %s: %m",
stats_temp_file);
return;
}
fprintf(fp, "%.3f\n", drift_comp * 1e6);
(void)fclose(fp);
/* atomic */
#ifdef SYS_WINNT
if (_unlink(stats_drift_file)) /* rename semantics differ under NT */
msyslog(LOG_WARNING,
"Unable to remove prior drift file %s, %m",
stats_drift_file);
#endif /* SYS_WINNT */
#ifndef NO_RENAME
if (rename(stats_temp_file, stats_drift_file))
msyslog(LOG_WARNING,
"Unable to rename temp drift file %s to %s, %m",
stats_temp_file, stats_drift_file);
#else
/* we have no rename NFS of ftp in use */
if ((fp = fopen(stats_drift_file, "w")) ==
NULL) {
msyslog(LOG_ERR,
"frequency file %s: %m",
stats_drift_file);
return;
}
#endif
#if defined(VMS)
/* PURGE */
{
$DESCRIPTOR(oldvers,";-1");
struct dsc$descriptor driftdsc = {
strlen(stats_drift_file), 0, 0,
stats_drift_file };
while(lib$delete_file(&oldvers,
&driftdsc) & 1);
}
#endif
}
}
/*
* stats_config - configure the stats operation
*/
void
stats_config(
int item,
const char *invalue /* only one type so far */
)
{
FILE *fp;
const char *value;
int len;
double old_drift;
#ifndef VMS
const char temp_ext[] = ".TEMP";
#else
const char temp_ext[] = "-TEMP";
#endif
/*
* Expand environment strings under Windows NT, since the
* command interpreter doesn't do this, the program must.
*/
#ifdef SYS_WINNT
char newvalue[MAX_PATH], parameter[MAX_PATH];
if (!ExpandEnvironmentStrings(invalue, newvalue, MAX_PATH)) {
switch (item) {
case STATS_FREQ_FILE:
strncpy(parameter, "STATS_FREQ_FILE",
sizeof(parameter));
break;
case STATS_LEAP_FILE:
strncpy(parameter, "STATS_LEAP_FILE",
sizeof(parameter));
break;
case STATS_STATSDIR:
strncpy(parameter, "STATS_STATSDIR",
sizeof(parameter));
break;
case STATS_PID_FILE:
strncpy(parameter, "STATS_PID_FILE",
sizeof(parameter));
break;
default:
strncpy(parameter, "UNKNOWN",
sizeof(parameter));
break;
}
value = invalue;
msyslog(LOG_ERR,
"ExpandEnvironmentStrings(%s) failed: %m\n",
parameter);
} else {
value = newvalue;
}
#else
value = invalue;
#endif /* SYS_WINNT */
switch (item) {
/*
* Open and read frequency file.
*/
case STATS_FREQ_FILE:
if (!value || (len = strlen(value)) == 0)
break;
stats_drift_file = erealloc(stats_drift_file, len + 1);
stats_temp_file = erealloc(stats_temp_file,
len + sizeof(".TEMP"));
memcpy(stats_drift_file, value, (size_t)(len+1));
memcpy(stats_temp_file, value, (size_t)len);
memcpy(stats_temp_file + len, temp_ext, sizeof(temp_ext));
/*
* Open drift file and read frequency. If the file is
* missing or contains errors, tell the loop to reset.
*/
if ((fp = fopen(stats_drift_file, "r")) == NULL)
break;
if (fscanf(fp, "%lf", &old_drift) != 1) {
msyslog(LOG_ERR,
"format error frequency file %s",
stats_drift_file);
fclose(fp);
break;
}
fclose(fp);
loop_config(LOOP_FREQ, old_drift);
prev_drift_comp = drift_comp;
break;
/*
* Specify statistics directory.
*/
case STATS_STATSDIR:
/*
* HMS: the following test is insufficient:
* - value may be missing the DIR_SEP
* - we still need the filename after it
*/
if (strlen(value) >= sizeof(statsdir)) {
msyslog(LOG_ERR,
"statsdir too long (>%d, sigh)",
(int)sizeof(statsdir) - 1);
} else {
l_fp now;
int add_dir_sep;
int value_l = strlen(value);
/* Add a DIR_SEP unless we already have one. */
if (value_l == 0)
add_dir_sep = 0;
else
add_dir_sep = (DIR_SEP !=
value[value_l - 1]);
if (add_dir_sep)
snprintf(statsdir, sizeof(statsdir),
"%s%c", value, DIR_SEP);
else
snprintf(statsdir, sizeof(statsdir),
"%s", value);
get_systime(&now);
if (peerstats.prefix == &statsdir[0] &&
peerstats.fp != NULL) {
fclose(peerstats.fp);
peerstats.fp = NULL;
filegen_setup(&peerstats, now.l_ui);
}
if (loopstats.prefix == &statsdir[0] &&
loopstats.fp != NULL) {
fclose(loopstats.fp);
loopstats.fp = NULL;
filegen_setup(&loopstats, now.l_ui);
}
if (clockstats.prefix == &statsdir[0] &&
clockstats.fp != NULL) {
fclose(clockstats.fp);
clockstats.fp = NULL;
filegen_setup(&clockstats, now.l_ui);
}
if (rawstats.prefix == &statsdir[0] &&
rawstats.fp != NULL) {
fclose(rawstats.fp);
rawstats.fp = NULL;
filegen_setup(&rawstats, now.l_ui);
}
if (sysstats.prefix == &statsdir[0] &&
sysstats.fp != NULL) {
fclose(sysstats.fp);
sysstats.fp = NULL;
filegen_setup(&sysstats, now.l_ui);
}
if (protostats.prefix == &statsdir[0] &&
protostats.fp != NULL) {
fclose(protostats.fp);
protostats.fp = NULL;
filegen_setup(&protostats, now.l_ui);
}
#ifdef AUTOKEY
if (cryptostats.prefix == &statsdir[0] &&
cryptostats.fp != NULL) {
fclose(cryptostats.fp);
cryptostats.fp = NULL;
filegen_setup(&cryptostats, now.l_ui);
}
#endif /* AUTOKEY */
#ifdef DEBUG_TIMING
if (timingstats.prefix == &statsdir[0] &&
timingstats.fp != NULL) {
fclose(timingstats.fp);
timingstats.fp = NULL;
filegen_setup(&timingstats, now.l_ui);
}
#endif /* DEBUG_TIMING */
}
break;
/*
* Open pid file.
*/
case STATS_PID_FILE:
if ((fp = fopen(value, "w")) == NULL) {
msyslog(LOG_ERR, "pid file %s: %m",
value);
break;
}
fprintf(fp, "%d", (int)getpid());
fclose(fp);;
break;
/*
* Read leapseconds file.
*/
case STATS_LEAP_FILE:
if ((fp = fopen(value, "r")) == NULL) {
msyslog(LOG_ERR, "leapseconds file %s: %m",
value);
break;
}
if (leap_file(fp) < 0)
msyslog(LOG_ERR,
"format error leapseconds file %s",
value);
else
mprintf_event(EVNT_TAI, NULL,
"%d leap %s expire %s", leap_tai,
fstostr(leap_sec),
fstostr(leap_expire));
fclose(fp);
break;
default:
/* oh well */
break;
}
}
/*
* record_peer_stats - write peer statistics to file
*
* file format:
* day (MJD)
* time (s past UTC midnight)
* IP address
* status word (hex)
* offset
* delay
* dispersion
* jitter
*/
void
record_peer_stats(
sockaddr_u *addr,
int status,
double offset, /* offset */
double delay, /* delay */
double dispersion, /* dispersion */
double jitter /* jitter */
)
{
l_fp now;
u_long day;
if (!stats_control)
return;
get_systime(&now);
filegen_setup(&peerstats, now.l_ui);
day = now.l_ui / 86400 + MJD_1900;
now.l_ui %= 86400;
if (peerstats.fp != NULL) {
fprintf(peerstats.fp,
"%lu %s %s %x %.9f %.9f %.9f %.9f\n", day,
ulfptoa(&now, 3), stoa(addr), status, offset,
delay, dispersion, jitter);
fflush(peerstats.fp);
}
}
/*
* record_loop_stats - write loop filter statistics to file
*
* file format:
* day (MJD)
* time (s past midnight)
* offset
* frequency (PPM)
* jitter
* wnder (PPM)
* time constant (log2)
*/
void
record_loop_stats(
double offset, /* offset */
double freq, /* frequency (PPM) */
double jitter, /* jitter */
double wander, /* wander (PPM) */
int spoll
)
{
l_fp now;
u_long day;
if (!stats_control)
return;
get_systime(&now);
filegen_setup(&loopstats, now.l_ui);
day = now.l_ui / 86400 + MJD_1900;
now.l_ui %= 86400;
if (loopstats.fp != NULL) {
fprintf(loopstats.fp, "%lu %s %.9f %.3f %.9f %.6f %d\n",
day, ulfptoa(&now, 3), offset, freq * 1e6, jitter,
wander * 1e6, spoll);
fflush(loopstats.fp);
}
}
/*
* record_clock_stats - write clock statistics to file
*
* file format:
* day (MJD)
* time (s past midnight)
* IP address
* text message
*/
void
record_clock_stats(
sockaddr_u *addr,
const char *text /* timecode string */
)
{
l_fp now;
u_long day;
if (!stats_control)
return;
get_systime(&now);
filegen_setup(&clockstats, now.l_ui);
day = now.l_ui / 86400 + MJD_1900;
now.l_ui %= 86400;
if (clockstats.fp != NULL) {
fprintf(clockstats.fp, "%lu %s %s %s\n", day,
ulfptoa(&now, 3), stoa(addr), text);
fflush(clockstats.fp);
}
}
/*
* mprintf_clock_stats - write clock statistics to file with
* msnprintf-style formatting.
*/
int
mprintf_clock_stats(
sockaddr_u *addr,
const char *fmt,
...
)
{
va_list ap;
int rc;
char msg[512];
va_start(ap, fmt);
rc = mvsnprintf(msg, sizeof(msg), fmt, ap);
va_end(ap);
if (stats_control)
record_clock_stats(addr, msg);
return rc;
}
/*
* record_raw_stats - write raw timestamps to file
*
* file format
* day (MJD)
* time (s past midnight)
* peer ip address
* IP address
* t1 t2 t3 t4 timestamps
*/
void
record_raw_stats(
sockaddr_u *srcadr,
sockaddr_u *dstadr,
l_fp *t1, /* originate timestamp */
l_fp *t2, /* receive timestamp */
l_fp *t3, /* transmit timestamp */
l_fp *t4 /* destination timestamp */
)
{
l_fp now;
u_long day;
if (!stats_control)
return;
get_systime(&now);
filegen_setup(&rawstats, now.l_ui);
day = now.l_ui / 86400 + MJD_1900;
now.l_ui %= 86400;
if (rawstats.fp != NULL) {
fprintf(rawstats.fp, "%lu %s %s %s %s %s %s %s\n", day,
ulfptoa(&now, 3), stoa(srcadr), dstadr ?
stoa(dstadr) : "-", ulfptoa(t1, 9), ulfptoa(t2, 9),
ulfptoa(t3, 9), ulfptoa(t4, 9));
fflush(rawstats.fp);
}
}
/*
* record_sys_stats - write system statistics to file
*
* file format
* day (MJD)
* time (s past midnight)
* time since reset
* packets recieved
* packets for this host
* current version
* old version
* access denied
* bad length or format
* bad authentication
* declined
* rate exceeded
* KoD sent
*/
void
record_sys_stats(void)
{
l_fp now;
u_long day;
if (!stats_control)
return;
get_systime(&now);
filegen_setup(&sysstats, now.l_ui);
day = now.l_ui / 86400 + MJD_1900;
now.l_ui %= 86400;
if (sysstats.fp != NULL) {
fprintf(sysstats.fp,
"%lu %s %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu\n",
day, ulfptoa(&now, 3), current_time - sys_stattime,
sys_received, sys_processed, sys_newversion,
sys_oldversion, sys_restricted, sys_badlength,
sys_badauth, sys_declined, sys_limitrejected,
sys_kodsent);
fflush(sysstats.fp);
proto_clr_stats();
}
}
/*
* record_proto_stats - write system statistics to file
*
* file format
* day (MJD)
* time (s past midnight)
* text message
*/
void
record_proto_stats(
char *str /* text string */
)
{
l_fp now;
u_long day;
if (!stats_control)
return;
get_systime(&now);
filegen_setup(&protostats, now.l_ui);
day = now.l_ui / 86400 + MJD_1900;
now.l_ui %= 86400;
if (protostats.fp != NULL) {
fprintf(protostats.fp, "%lu %s %s\n", day,
ulfptoa(&now, 3), str);
fflush(protostats.fp);
}
}
#ifdef AUTOKEY
/*
* record_crypto_stats - write crypto statistics to file
*
* file format:
* day (mjd)
* time (s past midnight)
* peer ip address
* text message
*/
void
record_crypto_stats(
sockaddr_u *addr,
const char *text /* text message */
)
{
l_fp now;
u_long day;
if (!stats_control)
return;
get_systime(&now);
filegen_setup(&cryptostats, now.l_ui);
day = now.l_ui / 86400 + MJD_1900;
now.l_ui %= 86400;
if (cryptostats.fp != NULL) {
if (addr == NULL)
fprintf(cryptostats.fp, "%lu %s 0.0.0.0 %s\n",
day, ulfptoa(&now, 3), text);
else
fprintf(cryptostats.fp, "%lu %s %s %s\n",
day, ulfptoa(&now, 3), stoa(addr), text);
fflush(cryptostats.fp);
}
}
#endif /* AUTOKEY */
#ifdef DEBUG_TIMING
/*
* record_timing_stats - write timing statistics to file
*
* file format:
* day (mjd)
* time (s past midnight)
* text message
*/
void
record_timing_stats(
const char *text /* text message */
)
{
static unsigned int flshcnt;
l_fp now;
u_long day;
if (!stats_control)
return;
get_systime(&now);
filegen_setup(&timingstats, now.l_ui);
day = now.l_ui / 86400 + MJD_1900;
now.l_ui %= 86400;
if (timingstats.fp != NULL) {
fprintf(timingstats.fp, "%lu %s %s\n", day, lfptoa(&now,
3), text);
if (++flshcnt % 100 == 0)
fflush(timingstats.fp);
}
}
#endif
/*
* leap_file - read leapseconds file
*
* Read the ERTS leapsecond file in NIST text format and extract the
* NTP seconds of the latest leap and TAI offset after the leap.
*/
static int
leap_file(
FILE *fp /* file handle */
)
{
char buf[NTP_MAXSTRLEN]; /* file line buffer */
u_long leap; /* NTP time at leap */
u_long expire; /* NTP time when file expires */
int offset; /* TAI offset at leap (s) */
int i;
/*
* Read and parse the leapseconds file. Empty lines and comments
* are ignored. A line beginning with #@ contains the file
* expiration time in NTP seconds. Other lines begin with two
* integers followed by junk or comments. The first integer is
* the NTP seconds at the leap, the second is the TAI offset
* after the leap.
*/
offset = 0;
leap = 0;
expire = 0;
i = 10;
while (fgets(buf, NTP_MAXSTRLEN - 1, fp) != NULL) {
if (strlen(buf) < 1)
continue;
if (buf[0] == '#') {
if (strlen(buf) < 3)
continue;
/*
* Note the '@' flag was used only in the 2006
* table; previious to that the flag was '$'.
*/
if (buf[1] == '@' || buf[1] == '$') {
if (sscanf(&buf[2], "%lu", &expire) !=
1)
return (-1);
continue;
}
}
if (sscanf(buf, "%lu %d", &leap, &offset) == 2) {
/*
* Valid offsets must increase by one for each
* leap.
*/
if (i++ != offset)
return (-1);
}
}
/*
* There must be at least one leap.
*/
if (i == 10)
return (-1);
leap_tai = offset;
leap_sec = leap;
leap_expire = expire;
return (0);
}
/*
* leap_month - returns seconds until the end of the month.
*/
u_long
leap_month(
u_long sec /* current NTP second */
)
{
int leap;
int32 year, month;
u_int32 ndays;
ntpcal_split tmp;
vint64 tvl;
/* --*-- expand time and split to days */
tvl = ntpcal_ntp_to_ntp(sec, NULL);
tmp = ntpcal_daysplit(&tvl);
/* --*-- split to years and days in year */
tmp = ntpcal_split_eradays(tmp.hi + DAY_NTP_STARTS - 1, &leap);
year = tmp.hi;
/* --*-- split days of year to month */
tmp = ntpcal_split_yeardays(tmp.lo, leap);
month = tmp.hi;
/* --*-- get nominal start of next month */
ndays = ntpcal_edate_to_eradays(year, month+1, 0) + 1 - DAY_NTP_STARTS;
return (u_int32)(ndays*SECSPERDAY - sec);
}
/*
* getauthkeys - read the authentication keys from the specified file
*/
void
getauthkeys(
const char *keyfile
)
{
int len;
len = strlen(keyfile);
if (!len)
return;
#ifndef SYS_WINNT
key_file_name = erealloc(key_file_name, len + 1);
memcpy(key_file_name, keyfile, len + 1);
#else
key_file_name = erealloc(key_file_name, _MAX_PATH);
if (len + 1 > _MAX_PATH)
return;
if (!ExpandEnvironmentStrings(keyfile, key_file_name,
_MAX_PATH)) {
msyslog(LOG_ERR,
"ExpandEnvironmentStrings(KEY_FILE) failed: %m");
strncpy(key_file_name, keyfile, _MAX_PATH);
}
#endif /* SYS_WINNT */
authreadkeys(key_file_name);
}
/*
* rereadkeys - read the authentication key file over again.
*/
void
rereadkeys(void)
{
if (NULL != key_file_name)
authreadkeys(key_file_name);
}
#if notyet
/*
* ntp_exit - document explicitly that ntpd has exited
*/
void
ntp_exit(int retval)
{
msyslog(LOG_ERR, "EXITING with return code %d", retval);
exit(retval);
}
#endif
/*
* fstostr - prettyprint NTP seconds
*/
char * fstostr(
time_t ntp_stamp
)
{
char * buf;
struct tm * tm;
time_t unix_stamp;
LIB_GETBUF(buf);
unix_stamp = ntp_stamp - JAN_1970;
tm = gmtime(&unix_stamp);
if (NULL == tm)
#ifdef WAIT_FOR_NTP_CRYPTO_C_CALLERS_ABLE_TO_HANDLE_MORE_THAN_20_CHARS
msnprintf(buf, LIB_BUFLENGTH, "gmtime: %m");
#else
strncpy(buf, "gmtime() error", LIB_BUFLENGTH);
#endif
else
snprintf(buf, LIB_BUFLENGTH, "%04d%02d%02d%02d%02d",
tm->tm_year + 1900, tm->tm_mon + 1,
tm->tm_mday, tm->tm_hour, tm->tm_min);
return buf;
}
/*
* ntpd_time_stepped is called back by step_systime(), allowing ntpd
* to do any one-time processing necessitated by the step.
*/
void
ntpd_time_stepped(void)
{
u_int saved_mon_enabled;
/*
* flush the monitor MRU list which contains l_fp timestamps
* which should not be compared across the step.
*/
if (MON_OFF != mon_enabled) {
saved_mon_enabled = mon_enabled;
mon_stop(MON_OFF);
mon_start(saved_mon_enabled);
}
/* inform interpolating Windows code to allow time to go back */
#ifdef SYS_WINNT
win_time_stepped();
#endif
}
| ./CrossVul/dataset_final_sorted/CWE-20/c/good_1658_3 |
crossvul-cpp_data_good_2174_1 | /*
* Copyright (c) Christos Zoulas 2003.
* All Rights Reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice immediately at the beginning of the file, without modification,
* this list of conditions, and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include "file.h"
#ifndef lint
FILE_RCSID("@(#)$File: readelf.c,v 1.103 2014/05/02 02:25:10 christos Exp $")
#endif
#ifdef BUILTIN_ELF
#include <string.h>
#include <ctype.h>
#include <stdlib.h>
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
#include "readelf.h"
#include "magic.h"
#ifdef ELFCORE
private int dophn_core(struct magic_set *, int, int, int, off_t, int, size_t,
off_t, int *);
#endif
private int dophn_exec(struct magic_set *, int, int, int, off_t, int, size_t,
off_t, int *, int);
private int doshn(struct magic_set *, int, int, int, off_t, int, size_t,
off_t, int *, int, int);
private size_t donote(struct magic_set *, void *, size_t, size_t, int,
int, size_t, int *);
#define ELF_ALIGN(a) ((((a) + align - 1) / align) * align)
#define isquote(c) (strchr("'\"`", (c)) != NULL)
private uint16_t getu16(int, uint16_t);
private uint32_t getu32(int, uint32_t);
private uint64_t getu64(int, uint64_t);
private uint16_t
getu16(int swap, uint16_t value)
{
union {
uint16_t ui;
char c[2];
} retval, tmpval;
if (swap) {
tmpval.ui = value;
retval.c[0] = tmpval.c[1];
retval.c[1] = tmpval.c[0];
return retval.ui;
} else
return value;
}
private uint32_t
getu32(int swap, uint32_t value)
{
union {
uint32_t ui;
char c[4];
} retval, tmpval;
if (swap) {
tmpval.ui = value;
retval.c[0] = tmpval.c[3];
retval.c[1] = tmpval.c[2];
retval.c[2] = tmpval.c[1];
retval.c[3] = tmpval.c[0];
return retval.ui;
} else
return value;
}
private uint64_t
getu64(int swap, uint64_t value)
{
union {
uint64_t ui;
char c[8];
} retval, tmpval;
if (swap) {
tmpval.ui = value;
retval.c[0] = tmpval.c[7];
retval.c[1] = tmpval.c[6];
retval.c[2] = tmpval.c[5];
retval.c[3] = tmpval.c[4];
retval.c[4] = tmpval.c[3];
retval.c[5] = tmpval.c[2];
retval.c[6] = tmpval.c[1];
retval.c[7] = tmpval.c[0];
return retval.ui;
} else
return value;
}
#define elf_getu16(swap, value) getu16(swap, value)
#define elf_getu32(swap, value) getu32(swap, value)
#define elf_getu64(swap, value) getu64(swap, value)
#define xsh_addr (clazz == ELFCLASS32 \
? (void *)&sh32 \
: (void *)&sh64)
#define xsh_sizeof (clazz == ELFCLASS32 \
? sizeof(sh32) \
: sizeof(sh64))
#define xsh_size (size_t)(clazz == ELFCLASS32 \
? elf_getu32(swap, sh32.sh_size) \
: elf_getu64(swap, sh64.sh_size))
#define xsh_offset (off_t)(clazz == ELFCLASS32 \
? elf_getu32(swap, sh32.sh_offset) \
: elf_getu64(swap, sh64.sh_offset))
#define xsh_type (clazz == ELFCLASS32 \
? elf_getu32(swap, sh32.sh_type) \
: elf_getu32(swap, sh64.sh_type))
#define xsh_name (clazz == ELFCLASS32 \
? elf_getu32(swap, sh32.sh_name) \
: elf_getu32(swap, sh64.sh_name))
#define xph_addr (clazz == ELFCLASS32 \
? (void *) &ph32 \
: (void *) &ph64)
#define xph_sizeof (clazz == ELFCLASS32 \
? sizeof(ph32) \
: sizeof(ph64))
#define xph_type (clazz == ELFCLASS32 \
? elf_getu32(swap, ph32.p_type) \
: elf_getu32(swap, ph64.p_type))
#define xph_offset (off_t)(clazz == ELFCLASS32 \
? elf_getu32(swap, ph32.p_offset) \
: elf_getu64(swap, ph64.p_offset))
#define xph_align (size_t)((clazz == ELFCLASS32 \
? (off_t) (ph32.p_align ? \
elf_getu32(swap, ph32.p_align) : 4) \
: (off_t) (ph64.p_align ? \
elf_getu64(swap, ph64.p_align) : 4)))
#define xph_filesz (size_t)((clazz == ELFCLASS32 \
? elf_getu32(swap, ph32.p_filesz) \
: elf_getu64(swap, ph64.p_filesz)))
#define xnh_addr (clazz == ELFCLASS32 \
? (void *)&nh32 \
: (void *)&nh64)
#define xph_memsz (size_t)((clazz == ELFCLASS32 \
? elf_getu32(swap, ph32.p_memsz) \
: elf_getu64(swap, ph64.p_memsz)))
#define xnh_sizeof (clazz == ELFCLASS32 \
? sizeof nh32 \
: sizeof nh64)
#define xnh_type (clazz == ELFCLASS32 \
? elf_getu32(swap, nh32.n_type) \
: elf_getu32(swap, nh64.n_type))
#define xnh_namesz (clazz == ELFCLASS32 \
? elf_getu32(swap, nh32.n_namesz) \
: elf_getu32(swap, nh64.n_namesz))
#define xnh_descsz (clazz == ELFCLASS32 \
? elf_getu32(swap, nh32.n_descsz) \
: elf_getu32(swap, nh64.n_descsz))
#define prpsoffsets(i) (clazz == ELFCLASS32 \
? prpsoffsets32[i] \
: prpsoffsets64[i])
#define xcap_addr (clazz == ELFCLASS32 \
? (void *)&cap32 \
: (void *)&cap64)
#define xcap_sizeof (clazz == ELFCLASS32 \
? sizeof cap32 \
: sizeof cap64)
#define xcap_tag (clazz == ELFCLASS32 \
? elf_getu32(swap, cap32.c_tag) \
: elf_getu64(swap, cap64.c_tag))
#define xcap_val (clazz == ELFCLASS32 \
? elf_getu32(swap, cap32.c_un.c_val) \
: elf_getu64(swap, cap64.c_un.c_val))
#ifdef ELFCORE
/*
* Try larger offsets first to avoid false matches
* from earlier data that happen to look like strings.
*/
static const size_t prpsoffsets32[] = {
#ifdef USE_NT_PSINFO
104, /* SunOS 5.x (command line) */
88, /* SunOS 5.x (short name) */
#endif /* USE_NT_PSINFO */
100, /* SunOS 5.x (command line) */
84, /* SunOS 5.x (short name) */
44, /* Linux (command line) */
28, /* Linux 2.0.36 (short name) */
8, /* FreeBSD */
};
static const size_t prpsoffsets64[] = {
#ifdef USE_NT_PSINFO
152, /* SunOS 5.x (command line) */
136, /* SunOS 5.x (short name) */
#endif /* USE_NT_PSINFO */
136, /* SunOS 5.x, 64-bit (command line) */
120, /* SunOS 5.x, 64-bit (short name) */
56, /* Linux (command line) */
40, /* Linux (tested on core from 2.4.x, short name) */
16, /* FreeBSD, 64-bit */
};
#define NOFFSETS32 (sizeof prpsoffsets32 / sizeof prpsoffsets32[0])
#define NOFFSETS64 (sizeof prpsoffsets64 / sizeof prpsoffsets64[0])
#define NOFFSETS (clazz == ELFCLASS32 ? NOFFSETS32 : NOFFSETS64)
/*
* Look through the program headers of an executable image, searching
* for a PT_NOTE section of type NT_PRPSINFO, with a name "CORE" or
* "FreeBSD"; if one is found, try looking in various places in its
* contents for a 16-character string containing only printable
* characters - if found, that string should be the name of the program
* that dropped core. Note: right after that 16-character string is,
* at least in SunOS 5.x (and possibly other SVR4-flavored systems) and
* Linux, a longer string (80 characters, in 5.x, probably other
* SVR4-flavored systems, and Linux) containing the start of the
* command line for that program.
*
* SunOS 5.x core files contain two PT_NOTE sections, with the types
* NT_PRPSINFO (old) and NT_PSINFO (new). These structs contain the
* same info about the command name and command line, so it probably
* isn't worthwhile to look for NT_PSINFO, but the offsets are provided
* above (see USE_NT_PSINFO), in case we ever decide to do so. The
* NT_PRPSINFO and NT_PSINFO sections are always in order and adjacent;
* the SunOS 5.x file command relies on this (and prefers the latter).
*
* The signal number probably appears in a section of type NT_PRSTATUS,
* but that's also rather OS-dependent, in ways that are harder to
* dissect with heuristics, so I'm not bothering with the signal number.
* (I suppose the signal number could be of interest in situations where
* you don't have the binary of the program that dropped core; if you
* *do* have that binary, the debugger will probably tell you what
* signal it was.)
*/
#define OS_STYLE_SVR4 0
#define OS_STYLE_FREEBSD 1
#define OS_STYLE_NETBSD 2
private const char os_style_names[][8] = {
"SVR4",
"FreeBSD",
"NetBSD",
};
#define FLAGS_DID_CORE 0x01
#define FLAGS_DID_NOTE 0x02
#define FLAGS_DID_BUILD_ID 0x04
#define FLAGS_DID_CORE_STYLE 0x08
#define FLAGS_IS_CORE 0x10
private int
dophn_core(struct magic_set *ms, int clazz, int swap, int fd, off_t off,
int num, size_t size, off_t fsize, int *flags)
{
Elf32_Phdr ph32;
Elf64_Phdr ph64;
size_t offset, len;
unsigned char nbuf[BUFSIZ];
ssize_t bufsize;
if (size != xph_sizeof) {
if (file_printf(ms, ", corrupted program header size") == -1)
return -1;
return 0;
}
/*
* Loop through all the program headers.
*/
for ( ; num; num--) {
if (pread(fd, xph_addr, xph_sizeof, off) == -1) {
file_badread(ms);
return -1;
}
off += size;
if (xph_offset > fsize) {
/* Perhaps warn here */
continue;
}
if (xph_type != PT_NOTE)
continue;
/*
* This is a PT_NOTE section; loop through all the notes
* in the section.
*/
len = xph_filesz < sizeof(nbuf) ? xph_filesz : sizeof(nbuf);
if ((bufsize = pread(fd, nbuf, len, xph_offset)) == -1) {
file_badread(ms);
return -1;
}
offset = 0;
for (;;) {
if (offset >= (size_t)bufsize)
break;
offset = donote(ms, nbuf, offset, (size_t)bufsize,
clazz, swap, 4, flags);
if (offset == 0)
break;
}
}
return 0;
}
#endif
static void
do_note_netbsd_version(struct magic_set *ms, int swap, void *v)
{
uint32_t desc;
(void)memcpy(&desc, v, sizeof(desc));
desc = elf_getu32(swap, desc);
if (file_printf(ms, ", for NetBSD") == -1)
return;
/*
* The version number used to be stuck as 199905, and was thus
* basically content-free. Newer versions of NetBSD have fixed
* this and now use the encoding of __NetBSD_Version__:
*
* MMmmrrpp00
*
* M = major version
* m = minor version
* r = release ["",A-Z,Z[A-Z] but numeric]
* p = patchlevel
*/
if (desc > 100000000U) {
uint32_t ver_patch = (desc / 100) % 100;
uint32_t ver_rel = (desc / 10000) % 100;
uint32_t ver_min = (desc / 1000000) % 100;
uint32_t ver_maj = desc / 100000000;
if (file_printf(ms, " %u.%u", ver_maj, ver_min) == -1)
return;
if (ver_rel == 0 && ver_patch != 0) {
if (file_printf(ms, ".%u", ver_patch) == -1)
return;
} else if (ver_rel != 0) {
while (ver_rel > 26) {
if (file_printf(ms, "Z") == -1)
return;
ver_rel -= 26;
}
if (file_printf(ms, "%c", 'A' + ver_rel - 1)
== -1)
return;
}
}
}
static void
do_note_freebsd_version(struct magic_set *ms, int swap, void *v)
{
uint32_t desc;
(void)memcpy(&desc, v, sizeof(desc));
desc = elf_getu32(swap, desc);
if (file_printf(ms, ", for FreeBSD") == -1)
return;
/*
* Contents is __FreeBSD_version, whose relation to OS
* versions is defined by a huge table in the Porter's
* Handbook. This is the general scheme:
*
* Releases:
* Mmp000 (before 4.10)
* Mmi0p0 (before 5.0)
* Mmm0p0
*
* Development branches:
* Mmpxxx (before 4.6)
* Mmp1xx (before 4.10)
* Mmi1xx (before 5.0)
* M000xx (pre-M.0)
* Mmm1xx
*
* M = major version
* m = minor version
* i = minor version increment (491000 -> 4.10)
* p = patchlevel
* x = revision
*
* The first release of FreeBSD to use ELF by default
* was version 3.0.
*/
if (desc == 460002) {
if (file_printf(ms, " 4.6.2") == -1)
return;
} else if (desc < 460100) {
if (file_printf(ms, " %d.%d", desc / 100000,
desc / 10000 % 10) == -1)
return;
if (desc / 1000 % 10 > 0)
if (file_printf(ms, ".%d", desc / 1000 % 10) == -1)
return;
if ((desc % 1000 > 0) || (desc % 100000 == 0))
if (file_printf(ms, " (%d)", desc) == -1)
return;
} else if (desc < 500000) {
if (file_printf(ms, " %d.%d", desc / 100000,
desc / 10000 % 10 + desc / 1000 % 10) == -1)
return;
if (desc / 100 % 10 > 0) {
if (file_printf(ms, " (%d)", desc) == -1)
return;
} else if (desc / 10 % 10 > 0) {
if (file_printf(ms, ".%d", desc / 10 % 10) == -1)
return;
}
} else {
if (file_printf(ms, " %d.%d", desc / 100000,
desc / 1000 % 100) == -1)
return;
if ((desc / 100 % 10 > 0) ||
(desc % 100000 / 100 == 0)) {
if (file_printf(ms, " (%d)", desc) == -1)
return;
} else if (desc / 10 % 10 > 0) {
if (file_printf(ms, ".%d", desc / 10 % 10) == -1)
return;
}
}
}
private size_t
donote(struct magic_set *ms, void *vbuf, size_t offset, size_t size,
int clazz, int swap, size_t align, int *flags)
{
Elf32_Nhdr nh32;
Elf64_Nhdr nh64;
size_t noff, doff;
#ifdef ELFCORE
int os_style = -1;
#endif
uint32_t namesz, descsz;
unsigned char *nbuf = CAST(unsigned char *, vbuf);
if (xnh_sizeof + offset > size) {
/*
* We're out of note headers.
*/
return xnh_sizeof + offset;
}
(void)memcpy(xnh_addr, &nbuf[offset], xnh_sizeof);
offset += xnh_sizeof;
namesz = xnh_namesz;
descsz = xnh_descsz;
if ((namesz == 0) && (descsz == 0)) {
/*
* We're out of note headers.
*/
return (offset >= size) ? offset : size;
}
if (namesz & 0x80000000) {
(void)file_printf(ms, ", bad note name size 0x%lx",
(unsigned long)namesz);
return offset;
}
if (descsz & 0x80000000) {
(void)file_printf(ms, ", bad note description size 0x%lx",
(unsigned long)descsz);
return offset;
}
noff = offset;
doff = ELF_ALIGN(offset + namesz);
if (offset + namesz > size) {
/*
* We're past the end of the buffer.
*/
return doff;
}
offset = ELF_ALIGN(doff + descsz);
if (doff + descsz > size) {
/*
* We're past the end of the buffer.
*/
return (offset >= size) ? offset : size;
}
if ((*flags & (FLAGS_DID_NOTE|FLAGS_DID_BUILD_ID)) ==
(FLAGS_DID_NOTE|FLAGS_DID_BUILD_ID))
goto core;
if (namesz == 5 && strcmp((char *)&nbuf[noff], "SuSE") == 0 &&
xnh_type == NT_GNU_VERSION && descsz == 2) {
file_printf(ms, ", for SuSE %d.%d", nbuf[doff], nbuf[doff + 1]);
}
if (namesz == 4 && strcmp((char *)&nbuf[noff], "GNU") == 0 &&
xnh_type == NT_GNU_VERSION && descsz == 16) {
uint32_t desc[4];
(void)memcpy(desc, &nbuf[doff], sizeof(desc));
if (file_printf(ms, ", for GNU/") == -1)
return size;
switch (elf_getu32(swap, desc[0])) {
case GNU_OS_LINUX:
if (file_printf(ms, "Linux") == -1)
return size;
break;
case GNU_OS_HURD:
if (file_printf(ms, "Hurd") == -1)
return size;
break;
case GNU_OS_SOLARIS:
if (file_printf(ms, "Solaris") == -1)
return size;
break;
case GNU_OS_KFREEBSD:
if (file_printf(ms, "kFreeBSD") == -1)
return size;
break;
case GNU_OS_KNETBSD:
if (file_printf(ms, "kNetBSD") == -1)
return size;
break;
default:
if (file_printf(ms, "<unknown>") == -1)
return size;
}
if (file_printf(ms, " %d.%d.%d", elf_getu32(swap, desc[1]),
elf_getu32(swap, desc[2]), elf_getu32(swap, desc[3])) == -1)
return size;
*flags |= FLAGS_DID_NOTE;
return size;
}
if (namesz == 4 && strcmp((char *)&nbuf[noff], "GNU") == 0 &&
xnh_type == NT_GNU_BUILD_ID && (descsz == 16 || descsz == 20)) {
uint8_t desc[20];
uint32_t i;
if (file_printf(ms, ", BuildID[%s]=", descsz == 16 ? "md5/uuid" :
"sha1") == -1)
return size;
(void)memcpy(desc, &nbuf[doff], descsz);
for (i = 0; i < descsz; i++)
if (file_printf(ms, "%02x", desc[i]) == -1)
return size;
*flags |= FLAGS_DID_BUILD_ID;
}
if (namesz == 4 && strcmp((char *)&nbuf[noff], "PaX") == 0 &&
xnh_type == NT_NETBSD_PAX && descsz == 4) {
static const char *pax[] = {
"+mprotect",
"-mprotect",
"+segvguard",
"-segvguard",
"+ASLR",
"-ASLR",
};
uint32_t desc;
size_t i;
int did = 0;
(void)memcpy(&desc, &nbuf[doff], sizeof(desc));
desc = elf_getu32(swap, desc);
if (desc && file_printf(ms, ", PaX: ") == -1)
return size;
for (i = 0; i < __arraycount(pax); i++) {
if (((1 << i) & desc) == 0)
continue;
if (file_printf(ms, "%s%s", did++ ? "," : "",
pax[i]) == -1)
return size;
}
}
if (namesz == 7 && strcmp((char *)&nbuf[noff], "NetBSD") == 0) {
switch (xnh_type) {
case NT_NETBSD_VERSION:
if (descsz == 4) {
do_note_netbsd_version(ms, swap, &nbuf[doff]);
*flags |= FLAGS_DID_NOTE;
return size;
}
break;
case NT_NETBSD_MARCH:
if (file_printf(ms, ", compiled for: %.*s", (int)descsz,
(const char *)&nbuf[doff]) == -1)
return size;
break;
case NT_NETBSD_CMODEL:
if (file_printf(ms, ", compiler model: %.*s",
(int)descsz, (const char *)&nbuf[doff]) == -1)
return size;
break;
default:
if (file_printf(ms, ", note=%u", xnh_type) == -1)
return size;
break;
}
return size;
}
if (namesz == 8 && strcmp((char *)&nbuf[noff], "FreeBSD") == 0) {
if (xnh_type == NT_FREEBSD_VERSION && descsz == 4) {
do_note_freebsd_version(ms, swap, &nbuf[doff]);
*flags |= FLAGS_DID_NOTE;
return size;
}
}
if (namesz == 8 && strcmp((char *)&nbuf[noff], "OpenBSD") == 0 &&
xnh_type == NT_OPENBSD_VERSION && descsz == 4) {
if (file_printf(ms, ", for OpenBSD") == -1)
return size;
/* Content of note is always 0 */
*flags |= FLAGS_DID_NOTE;
return size;
}
if (namesz == 10 && strcmp((char *)&nbuf[noff], "DragonFly") == 0 &&
xnh_type == NT_DRAGONFLY_VERSION && descsz == 4) {
uint32_t desc;
if (file_printf(ms, ", for DragonFly") == -1)
return size;
(void)memcpy(&desc, &nbuf[doff], sizeof(desc));
desc = elf_getu32(swap, desc);
if (file_printf(ms, " %d.%d.%d", desc / 100000,
desc / 10000 % 10, desc % 10000) == -1)
return size;
*flags |= FLAGS_DID_NOTE;
return size;
}
core:
/*
* Sigh. The 2.0.36 kernel in Debian 2.1, at
* least, doesn't correctly implement name
* sections, in core dumps, as specified by
* the "Program Linking" section of "UNIX(R) System
* V Release 4 Programmer's Guide: ANSI C and
* Programming Support Tools", because my copy
* clearly says "The first 'namesz' bytes in 'name'
* contain a *null-terminated* [emphasis mine]
* character representation of the entry's owner
* or originator", but the 2.0.36 kernel code
* doesn't include the terminating null in the
* name....
*/
if ((namesz == 4 && strncmp((char *)&nbuf[noff], "CORE", 4) == 0) ||
(namesz == 5 && strcmp((char *)&nbuf[noff], "CORE") == 0)) {
os_style = OS_STYLE_SVR4;
}
if ((namesz == 8 && strcmp((char *)&nbuf[noff], "FreeBSD") == 0)) {
os_style = OS_STYLE_FREEBSD;
}
if ((namesz >= 11 && strncmp((char *)&nbuf[noff], "NetBSD-CORE", 11)
== 0)) {
os_style = OS_STYLE_NETBSD;
}
#ifdef ELFCORE
if ((*flags & FLAGS_DID_CORE) != 0)
return size;
if (os_style != -1 && (*flags & FLAGS_DID_CORE_STYLE) == 0) {
if (file_printf(ms, ", %s-style", os_style_names[os_style])
== -1)
return size;
*flags |= FLAGS_DID_CORE_STYLE;
}
switch (os_style) {
case OS_STYLE_NETBSD:
if (xnh_type == NT_NETBSD_CORE_PROCINFO) {
uint32_t signo;
/*
* Extract the program name. It is at
* offset 0x7c, and is up to 32-bytes,
* including the terminating NUL.
*/
if (file_printf(ms, ", from '%.31s'",
&nbuf[doff + 0x7c]) == -1)
return size;
/*
* Extract the signal number. It is at
* offset 0x08.
*/
(void)memcpy(&signo, &nbuf[doff + 0x08],
sizeof(signo));
if (file_printf(ms, " (signal %u)",
elf_getu32(swap, signo)) == -1)
return size;
*flags |= FLAGS_DID_CORE;
return size;
}
break;
default:
if (xnh_type == NT_PRPSINFO && *flags & FLAGS_IS_CORE) {
size_t i, j;
unsigned char c;
/*
* Extract the program name. We assume
* it to be 16 characters (that's what it
* is in SunOS 5.x and Linux).
*
* Unfortunately, it's at a different offset
* in various OSes, so try multiple offsets.
* If the characters aren't all printable,
* reject it.
*/
for (i = 0; i < NOFFSETS; i++) {
unsigned char *cname, *cp;
size_t reloffset = prpsoffsets(i);
size_t noffset = doff + reloffset;
size_t k;
for (j = 0; j < 16; j++, noffset++,
reloffset++) {
/*
* Make sure we're not past
* the end of the buffer; if
* we are, just give up.
*/
if (noffset >= size)
goto tryanother;
/*
* Make sure we're not past
* the end of the contents;
* if we are, this obviously
* isn't the right offset.
*/
if (reloffset >= descsz)
goto tryanother;
c = nbuf[noffset];
if (c == '\0') {
/*
* A '\0' at the
* beginning is
* obviously wrong.
* Any other '\0'
* means we're done.
*/
if (j == 0)
goto tryanother;
else
break;
} else {
/*
* A nonprintable
* character is also
* wrong.
*/
if (!isprint(c) || isquote(c))
goto tryanother;
}
}
/*
* Well, that worked.
*/
/*
* Try next offsets, in case this match is
* in the middle of a string.
*/
for (k = i + 1 ; k < NOFFSETS ; k++) {
size_t no;
int adjust = 1;
if (prpsoffsets(k) >= prpsoffsets(i))
continue;
for (no = doff + prpsoffsets(k);
no < doff + prpsoffsets(i); no++)
adjust = adjust
&& isprint(nbuf[no]);
if (adjust)
i = k;
}
cname = (unsigned char *)
&nbuf[doff + prpsoffsets(i)];
for (cp = cname; *cp && isprint(*cp); cp++)
continue;
/*
* Linux apparently appends a space at the end
* of the command line: remove it.
*/
while (cp > cname && isspace(cp[-1]))
cp--;
if (file_printf(ms, ", from '%.*s'",
(int)(cp - cname), cname) == -1)
return size;
*flags |= FLAGS_DID_CORE;
return size;
tryanother:
;
}
}
break;
}
#endif
return offset;
}
/* SunOS 5.x hardware capability descriptions */
typedef struct cap_desc {
uint64_t cd_mask;
const char *cd_name;
} cap_desc_t;
static const cap_desc_t cap_desc_sparc[] = {
{ AV_SPARC_MUL32, "MUL32" },
{ AV_SPARC_DIV32, "DIV32" },
{ AV_SPARC_FSMULD, "FSMULD" },
{ AV_SPARC_V8PLUS, "V8PLUS" },
{ AV_SPARC_POPC, "POPC" },
{ AV_SPARC_VIS, "VIS" },
{ AV_SPARC_VIS2, "VIS2" },
{ AV_SPARC_ASI_BLK_INIT, "ASI_BLK_INIT" },
{ AV_SPARC_FMAF, "FMAF" },
{ AV_SPARC_FJFMAU, "FJFMAU" },
{ AV_SPARC_IMA, "IMA" },
{ 0, NULL }
};
static const cap_desc_t cap_desc_386[] = {
{ AV_386_FPU, "FPU" },
{ AV_386_TSC, "TSC" },
{ AV_386_CX8, "CX8" },
{ AV_386_SEP, "SEP" },
{ AV_386_AMD_SYSC, "AMD_SYSC" },
{ AV_386_CMOV, "CMOV" },
{ AV_386_MMX, "MMX" },
{ AV_386_AMD_MMX, "AMD_MMX" },
{ AV_386_AMD_3DNow, "AMD_3DNow" },
{ AV_386_AMD_3DNowx, "AMD_3DNowx" },
{ AV_386_FXSR, "FXSR" },
{ AV_386_SSE, "SSE" },
{ AV_386_SSE2, "SSE2" },
{ AV_386_PAUSE, "PAUSE" },
{ AV_386_SSE3, "SSE3" },
{ AV_386_MON, "MON" },
{ AV_386_CX16, "CX16" },
{ AV_386_AHF, "AHF" },
{ AV_386_TSCP, "TSCP" },
{ AV_386_AMD_SSE4A, "AMD_SSE4A" },
{ AV_386_POPCNT, "POPCNT" },
{ AV_386_AMD_LZCNT, "AMD_LZCNT" },
{ AV_386_SSSE3, "SSSE3" },
{ AV_386_SSE4_1, "SSE4.1" },
{ AV_386_SSE4_2, "SSE4.2" },
{ 0, NULL }
};
private int
doshn(struct magic_set *ms, int clazz, int swap, int fd, off_t off, int num,
size_t size, off_t fsize, int *flags, int mach, int strtab)
{
Elf32_Shdr sh32;
Elf64_Shdr sh64;
int stripped = 1;
void *nbuf;
off_t noff, coff, name_off;
uint64_t cap_hw1 = 0; /* SunOS 5.x hardware capabilites */
uint64_t cap_sf1 = 0; /* SunOS 5.x software capabilites */
char name[50];
if (size != xsh_sizeof) {
if (file_printf(ms, ", corrupted section header size") == -1)
return -1;
return 0;
}
/* Read offset of name section to be able to read section names later */
if (pread(fd, xsh_addr, xsh_sizeof, off + size * strtab) == -1) {
file_badread(ms);
return -1;
}
name_off = xsh_offset;
for ( ; num; num--) {
/* Read the name of this section. */
if (pread(fd, name, sizeof(name), name_off + xsh_name) == -1) {
file_badread(ms);
return -1;
}
name[sizeof(name) - 1] = '\0';
if (strcmp(name, ".debug_info") == 0)
stripped = 0;
if (pread(fd, xsh_addr, xsh_sizeof, off) == -1) {
file_badread(ms);
return -1;
}
off += size;
/* Things we can determine before we seek */
switch (xsh_type) {
case SHT_SYMTAB:
#if 0
case SHT_DYNSYM:
#endif
stripped = 0;
break;
default:
if (xsh_offset > fsize) {
/* Perhaps warn here */
continue;
}
break;
}
/* Things we can determine when we seek */
switch (xsh_type) {
case SHT_NOTE:
if ((nbuf = malloc(xsh_size)) == NULL) {
file_error(ms, errno, "Cannot allocate memory"
" for note");
return -1;
}
if (pread(fd, nbuf, xsh_size, xsh_offset) == -1) {
file_badread(ms);
free(nbuf);
return -1;
}
noff = 0;
for (;;) {
if (noff >= (off_t)xsh_size)
break;
noff = donote(ms, nbuf, (size_t)noff,
xsh_size, clazz, swap, 4, flags);
if (noff == 0)
break;
}
free(nbuf);
break;
case SHT_SUNW_cap:
switch (mach) {
case EM_SPARC:
case EM_SPARCV9:
case EM_IA_64:
case EM_386:
case EM_AMD64:
break;
default:
goto skip;
}
if (lseek(fd, xsh_offset, SEEK_SET) == (off_t)-1) {
file_badseek(ms);
return -1;
}
coff = 0;
for (;;) {
Elf32_Cap cap32;
Elf64_Cap cap64;
char cbuf[/*CONSTCOND*/
MAX(sizeof cap32, sizeof cap64)];
if ((coff += xcap_sizeof) > (off_t)xsh_size)
break;
if (read(fd, cbuf, (size_t)xcap_sizeof) !=
(ssize_t)xcap_sizeof) {
file_badread(ms);
return -1;
}
if (cbuf[0] == 'A') {
#ifdef notyet
char *p = cbuf + 1;
uint32_t len, tag;
memcpy(&len, p, sizeof(len));
p += 4;
len = getu32(swap, len);
if (memcmp("gnu", p, 3) != 0) {
if (file_printf(ms,
", unknown capability %.3s", p)
== -1)
return -1;
break;
}
p += strlen(p) + 1;
tag = *p++;
memcpy(&len, p, sizeof(len));
p += 4;
len = getu32(swap, len);
if (tag != 1) {
if (file_printf(ms, ", unknown gnu"
" capability tag %d", tag)
== -1)
return -1;
break;
}
// gnu attributes
#endif
break;
}
(void)memcpy(xcap_addr, cbuf, xcap_sizeof);
switch (xcap_tag) {
case CA_SUNW_NULL:
break;
case CA_SUNW_HW_1:
cap_hw1 |= xcap_val;
break;
case CA_SUNW_SF_1:
cap_sf1 |= xcap_val;
break;
default:
if (file_printf(ms,
", with unknown capability "
"0x%" INT64_T_FORMAT "x = 0x%"
INT64_T_FORMAT "x",
(unsigned long long)xcap_tag,
(unsigned long long)xcap_val) == -1)
return -1;
break;
}
}
/*FALLTHROUGH*/
skip:
default:
break;
}
}
if (file_printf(ms, ", %sstripped", stripped ? "" : "not ") == -1)
return -1;
if (cap_hw1) {
const cap_desc_t *cdp;
switch (mach) {
case EM_SPARC:
case EM_SPARC32PLUS:
case EM_SPARCV9:
cdp = cap_desc_sparc;
break;
case EM_386:
case EM_IA_64:
case EM_AMD64:
cdp = cap_desc_386;
break;
default:
cdp = NULL;
break;
}
if (file_printf(ms, ", uses") == -1)
return -1;
if (cdp) {
while (cdp->cd_name) {
if (cap_hw1 & cdp->cd_mask) {
if (file_printf(ms,
" %s", cdp->cd_name) == -1)
return -1;
cap_hw1 &= ~cdp->cd_mask;
}
++cdp;
}
if (cap_hw1)
if (file_printf(ms,
" unknown hardware capability 0x%"
INT64_T_FORMAT "x",
(unsigned long long)cap_hw1) == -1)
return -1;
} else {
if (file_printf(ms,
" hardware capability 0x%" INT64_T_FORMAT "x",
(unsigned long long)cap_hw1) == -1)
return -1;
}
}
if (cap_sf1) {
if (cap_sf1 & SF1_SUNW_FPUSED) {
if (file_printf(ms,
(cap_sf1 & SF1_SUNW_FPKNWN)
? ", uses frame pointer"
: ", not known to use frame pointer") == -1)
return -1;
}
cap_sf1 &= ~SF1_SUNW_MASK;
if (cap_sf1)
if (file_printf(ms,
", with unknown software capability 0x%"
INT64_T_FORMAT "x",
(unsigned long long)cap_sf1) == -1)
return -1;
}
return 0;
}
/*
* Look through the program headers of an executable image, searching
* for a PT_INTERP section; if one is found, it's dynamically linked,
* otherwise it's statically linked.
*/
private int
dophn_exec(struct magic_set *ms, int clazz, int swap, int fd, off_t off,
int num, size_t size, off_t fsize, int *flags, int sh_num)
{
Elf32_Phdr ph32;
Elf64_Phdr ph64;
const char *linking_style = "statically";
const char *shared_libraries = "";
unsigned char nbuf[BUFSIZ];
ssize_t bufsize;
size_t offset, align, len;
if (size != xph_sizeof) {
if (file_printf(ms, ", corrupted program header size") == -1)
return -1;
return 0;
}
for ( ; num; num--) {
if (pread(fd, xph_addr, xph_sizeof, off) == -1) {
file_badread(ms);
return -1;
}
off += size;
/* Things we can determine before we seek */
switch (xph_type) {
case PT_DYNAMIC:
linking_style = "dynamically";
break;
case PT_INTERP:
shared_libraries = " (uses shared libs)";
break;
default:
if (xph_offset > fsize) {
/* Maybe warn here? */
continue;
}
break;
}
/* Things we can determine when we seek */
switch (xph_type) {
case PT_NOTE:
if ((align = xph_align) & 0x80000000UL) {
if (file_printf(ms,
", invalid note alignment 0x%lx",
(unsigned long)align) == -1)
return -1;
align = 4;
}
if (sh_num)
break;
/*
* This is a PT_NOTE section; loop through all the notes
* in the section.
*/
len = xph_filesz < sizeof(nbuf) ? xph_filesz
: sizeof(nbuf);
bufsize = pread(fd, nbuf, len, xph_offset);
if (bufsize == -1) {
file_badread(ms);
return -1;
}
offset = 0;
for (;;) {
if (offset >= (size_t)bufsize)
break;
offset = donote(ms, nbuf, offset,
(size_t)bufsize, clazz, swap, align,
flags);
if (offset == 0)
break;
}
break;
default:
break;
}
}
if (file_printf(ms, ", %s linked%s", linking_style, shared_libraries)
== -1)
return -1;
return 0;
}
protected int
file_tryelf(struct magic_set *ms, int fd, const unsigned char *buf,
size_t nbytes)
{
union {
int32_t l;
char c[sizeof (int32_t)];
} u;
int clazz;
int swap;
struct stat st;
off_t fsize;
int flags = 0;
Elf32_Ehdr elf32hdr;
Elf64_Ehdr elf64hdr;
uint16_t type;
if (ms->flags & (MAGIC_MIME|MAGIC_APPLE))
return 0;
/*
* ELF executables have multiple section headers in arbitrary
* file locations and thus file(1) cannot determine it from easily.
* Instead we traverse thru all section headers until a symbol table
* one is found or else the binary is stripped.
* Return immediately if it's not ELF (so we avoid pipe2file unless needed).
*/
if (buf[EI_MAG0] != ELFMAG0
|| (buf[EI_MAG1] != ELFMAG1 && buf[EI_MAG1] != OLFMAG1)
|| buf[EI_MAG2] != ELFMAG2 || buf[EI_MAG3] != ELFMAG3)
return 0;
/*
* If we cannot seek, it must be a pipe, socket or fifo.
*/
if((lseek(fd, (off_t)0, SEEK_SET) == (off_t)-1) && (errno == ESPIPE))
fd = file_pipe2file(ms, fd, buf, nbytes);
if (fstat(fd, &st) == -1) {
file_badread(ms);
return -1;
}
fsize = st.st_size;
clazz = buf[EI_CLASS];
switch (clazz) {
case ELFCLASS32:
#undef elf_getu
#define elf_getu(a, b) elf_getu32(a, b)
#undef elfhdr
#define elfhdr elf32hdr
#include "elfclass.h"
case ELFCLASS64:
#undef elf_getu
#define elf_getu(a, b) elf_getu64(a, b)
#undef elfhdr
#define elfhdr elf64hdr
#include "elfclass.h"
default:
if (file_printf(ms, ", unknown class %d", clazz) == -1)
return -1;
break;
}
return 0;
}
#endif
| ./CrossVul/dataset_final_sorted/CWE-20/c/good_2174_1 |
crossvul-cpp_data_bad_1389_0 | /*
* This file is part of Cockpit.
*
* Copyright (C) 2013 Red Hat, Inc.
*
* Cockpit is free software; you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation; either version 2.1 of the License, or
* (at your option) any later version.
*
* Cockpit is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with Cockpit; If not, see <http://www.gnu.org/licenses/>.
*/
#include "config.h"
#include "cockpitauth.h"
#include "cockpitws.h"
#include "websocket/websocket.h"
#include "common/cockpitauthorize.h"
#include "common/cockpitconf.h"
#include "common/cockpiterror.h"
#include "common/cockpithex.h"
#include "common/cockpitlog.h"
#include "common/cockpitjson.h"
#include "common/cockpitmemory.h"
#include "common/cockpitpipe.h"
#include "common/cockpitpipetransport.h"
#include "common/cockpitsystem.h"
#include "common/cockpitunixfd.h"
#include "common/cockpitwebserver.h"
#include <security/pam_appl.h>
#include <sys/socket.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <errno.h>
#define ACTION_SSH "remote-login-ssh"
#define ACTION_NONE "none"
#define LOCAL_SESSION "local-session"
/* Some tunables that can be set from tests */
const gchar *cockpit_ws_session_program =
PACKAGE_LIBEXEC_DIR "/cockpit-session";
const gchar *cockpit_ws_ssh_program =
PACKAGE_LIBEXEC_DIR "/cockpit-ssh";
/* Timeout of authenticated session when no connections */
guint cockpit_ws_service_idle = 15;
/* Timeout of everything when noone is connected */
guint cockpit_ws_process_idle = 90;
/* The amount of time a spawned process has to complete authentication */
guint cockpit_ws_auth_process_timeout = 30;
guint cockpit_ws_auth_response_timeout = 60;
/* Maximum number of pending authentication requests */
const gchar *cockpit_ws_max_startups = NULL;
static guint max_startups = 10;
static guint sig__idling = 0;
/* Tristate tracking whether gssapi works properly */
static gint gssapi_available = -1;
G_DEFINE_TYPE (CockpitAuth, cockpit_auth, G_TYPE_OBJECT)
typedef struct {
gint refs;
gchar *name;
gchar *cookie;
CockpitAuth *auth;
CockpitWebService *service;
gboolean initialized;
guint timeout_tag;
gulong idling_sig;
gulong destroy_sig;
/* Used during authentication */
CockpitTransport *transport;
gulong control_sig;
gulong close_sig;
guint client_timeout;
guint authorize_timeout;
/* An open /login request from client */
GSimpleAsyncResult *result;
/* An authorization header from client */
gchar *authorization;
/* An authorize challenge from session */
JsonObject *authorize;
/* The conversation in progress */
gchar *conversation;
} CockpitSession;
static void
cockpit_session_reset (gpointer data)
{
CockpitSession *session = data;
GSimpleAsyncResult *result;
char *conversation;
CockpitAuth *self;
char *cookie;
if (session->result)
{
result = session->result;
session->result = NULL;
g_simple_async_result_complete (result);
g_object_unref (result);
}
if (session->authorization)
{
cockpit_memory_clear (session->authorization, -1);
g_free (session->authorization);
session->authorization = NULL;
}
conversation = session->conversation;
session->conversation = NULL;
cookie = session->cookie;
session->cookie = NULL;
self = session->auth;
/* No accessing session after this point */
if (cookie)
{
g_hash_table_remove (self->sessions, cookie);
g_free (cookie);
}
if (conversation)
{
g_hash_table_remove (self->conversations, conversation);
g_free (conversation);
}
}
static CockpitSession *
cockpit_session_ref (CockpitSession *session)
{
session->refs++;
return session;
}
static void
on_web_service_gone (gpointer data,
GObject *where_the_object_was)
{
CockpitSession *session = data;
session->service = NULL;
cockpit_session_reset (session);
}
static void
cockpit_session_unref (gpointer data)
{
CockpitSession *session = data;
CockpitCreds *creds;
GObject *object;
session->refs--;
if (session->refs > 0)
return;
cockpit_session_reset (data);
g_free (session->name);
g_free (session->cookie);
if (session->authorize)
json_object_unref (session->authorize);
if (session->transport)
{
if (session->control_sig)
g_signal_handler_disconnect (session->transport, session->control_sig);
if (session->close_sig)
g_signal_handler_disconnect (session->transport, session->close_sig);
g_object_unref (session->transport);
}
if (session->service)
{
creds = cockpit_web_service_get_creds (session->service);
object = G_OBJECT (session->service);
session->service = NULL;
if (creds)
cockpit_creds_poison (creds);
if (session->idling_sig)
g_signal_handler_disconnect (object, session->idling_sig);
if (session->destroy_sig)
g_signal_handler_disconnect (object, session->destroy_sig);
g_object_weak_unref (object, on_web_service_gone, session);
g_object_run_dispose (object);
g_object_unref (object);
}
if (session->timeout_tag)
g_source_remove (session->timeout_tag);
g_free (session);
}
static void
byte_array_clear_and_free (gpointer data)
{
GByteArray *buffer = data;
cockpit_memory_clear (buffer->data, buffer->len);
g_byte_array_free (buffer, TRUE);
}
static void
cockpit_auth_finalize (GObject *object)
{
CockpitAuth *self = COCKPIT_AUTH (object);
if (self->timeout_tag)
g_source_remove (self->timeout_tag);
g_bytes_unref (self->key);
g_hash_table_remove_all (self->sessions);
g_hash_table_remove_all (self->conversations);
g_hash_table_destroy (self->sessions);
g_hash_table_destroy (self->conversations);
G_OBJECT_CLASS (cockpit_auth_parent_class)->finalize (object);
}
static gboolean
on_process_timeout (gpointer data)
{
CockpitAuth *self = COCKPIT_AUTH (data);
self->timeout_tag = 0;
if (g_hash_table_size (self->sessions) == 0)
{
g_debug ("auth is idle");
g_signal_emit (self, sig__idling, 0);
}
return FALSE;
}
static void
cockpit_auth_init (CockpitAuth *self)
{
static const gsize key_len = 128;
gpointer key;
key = cockpit_authorize_nonce (key_len);
if (!key)
g_error ("couldn't read random key, startup aborted");
self->key = g_bytes_new_take (key, key_len);
self->sessions = g_hash_table_new_full (g_str_hash, g_str_equal,
NULL, cockpit_session_unref);
self->conversations = g_hash_table_new_full (g_str_hash, g_str_equal,
NULL, cockpit_session_unref);
self->timeout_tag = g_timeout_add_seconds (cockpit_ws_process_idle,
on_process_timeout, self);
self->startups = 0;
self->max_startups = max_startups;
self->max_startups_begin = max_startups;
self->max_startups_rate = 100;
}
gchar *
cockpit_auth_nonce (CockpitAuth *self)
{
const guchar *key;
gsize len;
guint64 seed;
seed = self->nonce_seed++;
key = g_bytes_get_data (self->key, &len);
return g_compute_hmac_for_data (G_CHECKSUM_SHA256, key, len,
(guchar *)&seed, sizeof (seed));
}
static gchar *
get_remote_address (GIOStream *io)
{
GSocketAddress *remote = NULL;
GSocketConnection *connection = NULL;
GIOStream *base;
gchar *result = NULL;
if (G_IS_TLS_CONNECTION (io))
{
g_object_get (io, "base-io-stream", &base, NULL);
if (G_IS_SOCKET_CONNECTION (base))
connection = g_object_ref (base);
g_object_unref (base);
}
else if (G_IS_SOCKET_CONNECTION (io))
{
connection = g_object_ref (io);
}
if (connection)
remote = g_socket_connection_get_remote_address (connection, NULL);
if (remote && G_IS_INET_SOCKET_ADDRESS (remote))
result = g_inet_address_to_string (g_inet_socket_address_get_address (G_INET_SOCKET_ADDRESS (remote)));
if (remote)
g_object_unref (remote);
if (connection)
g_object_unref (connection);
return result;
}
gchar *
cockpit_auth_steal_authorization (GHashTable *headers,
GIOStream *connection,
gchar **ret_type,
gchar **ret_conversation)
{
char *type = NULL;
gchar *ret = NULL;
gchar *line;
gpointer key;
g_assert (headers != NULL);
g_assert (ret_conversation != NULL);
g_assert (ret_type != NULL);
/* Avoid copying as it can contain passwords */
if (g_hash_table_lookup_extended (headers, "Authorization", &key, (gpointer *)&line))
{
g_hash_table_steal (headers, "Authorization");
g_free (key);
}
else
{
/*
* If we don't yet know that Negotiate authentication is possible
* or not, then we ask our session to try to do Negotiate auth
* but without any input data.
*/
if (gssapi_available != 0)
line = g_strdup ("Negotiate");
else
return NULL;
}
/* Dig out the authorization type */
if (!cockpit_authorize_type (line, &type))
goto out;
/* If this is a conversation, get that part out too */
if (g_str_equal (type, "x-conversation"))
{
if (!cockpit_authorize_subject (line, ret_conversation))
goto out;
}
/*
* So for negotiate authentication, conversation happens on a
* single connection. Yes that's right, GSSAPI, NTLM, and all
* those nice mechanisms are keep-alive based, not HTTP request based.
*/
else if (g_str_equal (type, "negotiate"))
{
/* Resume an already running conversation? */
if (ret_conversation && connection)
*ret_conversation = g_strdup (g_object_get_data (G_OBJECT (connection), type));
}
if (ret_type)
{
*ret_type = type;
type = NULL;
}
ret = line;
line = NULL;
out:
g_free (line);
g_free (type);
return ret;
}
static const gchar *
type_option (const gchar *type,
const gchar *option,
const gchar *default_str)
{
if (type && cockpit_conf_string (type, option))
return cockpit_conf_string (type, option);
return default_str;
}
static guint
timeout_option (const gchar *name,
const gchar *type,
guint default_value)
{
return cockpit_conf_guint (type, name, default_value,
MAX_AUTH_TIMEOUT, MIN_AUTH_TIMEOUT);
}
static const gchar *
application_parse_host (const gchar *application)
{
const gchar *prefix = "cockpit+=";
gint len = strlen (prefix);
g_return_val_if_fail (application != NULL, NULL);
if (g_str_has_prefix (application, prefix) && application[len] != '\0')
return application + len;
else
return NULL;
}
static gchar *
application_cookie_name (const gchar *application)
{
const gchar *host = application_parse_host (application);
gchar *cookie_name = NULL;
if (host)
cookie_name = g_strdup_printf ("machine-cockpit+%s", host);
else
cookie_name = g_strdup (application);
return cookie_name;
}
/* Struct for adding tty later */
typedef struct {
int io;
} ChildData;
static void
session_child_setup (gpointer data)
{
ChildData *child = data;
if (dup2 (child->io, 0) < 0 || dup2 (child->io, 1) < 0)
{
g_printerr ("couldn't set child stdin/stout file descriptors\n");
_exit (127);
}
close (child->io);
if (cockpit_unix_fd_close_all (3, -1) < 0)
{
g_printerr ("couldn't close file descriptors: %m\n");
_exit (127);
}
}
static CockpitTransport *
session_start_process (const gchar **argv,
const gchar **env)
{
CockpitTransport *transport = NULL;
CockpitPipe *pipe = NULL;
GError *error = NULL;
ChildData child;
gboolean ret;
GPid pid = 0;
int fds[2];
g_debug ("spawning %s", argv[0]);
/* The main stdin/stdout for the socket ... both are read/writable */
if (socketpair (PF_LOCAL, SOCK_STREAM, 0, fds) < 0)
{
g_warning ("couldn't create loopback socket: %s", g_strerror (errno));
return NULL;
}
child.io = fds[0];
ret = g_spawn_async_with_pipes (NULL, (gchar **)argv, (gchar **)env,
G_SPAWN_DO_NOT_REAP_CHILD | G_SPAWN_LEAVE_DESCRIPTORS_OPEN,
session_child_setup, &child,
&pid, NULL, NULL, NULL, &error);
close (fds[0]);
if (!ret)
{
g_message ("couldn't launch cockpit session: %s: %s", argv[0], error->message);
g_error_free (error);
close (fds[1]);
return NULL;
}
pipe = g_object_new (COCKPIT_TYPE_PIPE,
"in-fd", fds[1],
"out-fd", fds[1],
"pid", pid,
"name", argv[0],
NULL);
transport = cockpit_pipe_transport_new (pipe);
g_object_unref (pipe);
return transport;
}
static void
send_authorize_reply (CockpitTransport *transport,
const gchar *cookie,
const gchar *authorization)
{
const gchar *fields[] = {
"command", "authorize",
"cookie", cookie,
"response", authorization,
NULL
};
GBytes *payload;
const gchar *delim;
GByteArray *buffer;
JsonNode *node;
gchar *encoded;
gsize length;
guint i;
buffer = g_byte_array_new ();
for (i = 0; fields[i] != NULL; i++)
{
if (i % 2 == 0)
delim = buffer->len == 0 ? "{" : ",";
else
delim = ":";
g_byte_array_append (buffer, (guchar *)delim, 1);
node = json_node_init_string (json_node_new (JSON_NODE_VALUE), fields[i]);
encoded = cockpit_json_write (node, &length);
g_byte_array_append (buffer, (guchar *)encoded, length);
cockpit_memory_clear ((guchar *)json_node_get_string (node), -1);
json_node_free (node);
g_free (encoded);
}
g_byte_array_append (buffer, (guchar *)"}", 1);
payload = g_bytes_new_with_free_func (buffer->data, buffer->len,
byte_array_clear_and_free, buffer);
cockpit_transport_send (transport, NULL, payload);
g_bytes_unref (payload);
}
static gboolean
reply_authorize_challenge (CockpitSession *session)
{
const gchar *challenge = NULL;
char *authorize_type = NULL;
char *authorization_type = NULL;
const gchar *cookie = NULL;
const gchar *response = NULL;
JsonObject *login_data = NULL;
gboolean ret = FALSE;
if (!session->authorize)
goto out;
if (!cockpit_json_get_string (session->authorize, "cookie", NULL, &cookie) ||
!cockpit_json_get_string (session->authorize, "challenge", NULL, &challenge) ||
!cockpit_json_get_string (session->authorize, "response", NULL, &response))
goto out;
if (response && !cookie)
{
cockpit_memory_clear (session->authorization, -1);
g_free (session->authorization);
session->authorization = g_strdup (response);
ret = TRUE;
goto out;
}
if (!challenge || !cookie)
goto out;
if (!cockpit_authorize_type (challenge, &authorize_type))
goto out;
/* Handle prompting for login data */
if (g_str_equal (authorize_type, "x-login-data"))
{
if (cockpit_json_get_object (session->authorize, "login-data", NULL, &login_data) && login_data)
cockpit_creds_set_login_data (cockpit_web_service_get_creds (session->service), login_data);
ret = TRUE;
goto out;
}
if (!session->authorization)
goto out;
if (cockpit_authorize_type (session->authorization, &authorization_type) &&
(g_str_equal (authorize_type, "*") || g_str_equal (authorize_type, authorization_type)))
{
send_authorize_reply (session->transport, cookie, session->authorization);
cockpit_memory_clear (session->authorization, -1);
g_free (session->authorization);
session->authorization = NULL;
ret = TRUE;
}
out:
free (authorize_type);
free (authorization_type);
return ret;
}
static gboolean
on_authorize_timeout (gpointer data)
{
CockpitSession *session = data;
CockpitTransport *transport = cockpit_web_service_get_transport (session->service);
session->timeout_tag = 0;
g_message ("%s: session timed out during authentication", session->name);
cockpit_transport_close (transport, "timeout");
return FALSE;
}
static void
reset_authorize_timeout (CockpitSession *session,
gboolean waiting_for_client)
{
guint seconds = waiting_for_client ? session->client_timeout : session->authorize_timeout;
if (session->timeout_tag)
g_source_remove (session->timeout_tag);
session->timeout_tag = g_timeout_add_seconds (seconds, on_authorize_timeout, session);
}
static void
propagate_problem_to_error (CockpitSession *session,
JsonObject *options,
const gchar *problem,
const gchar *message,
GError **error)
{
char *type = NULL;
const char *pw_result = NULL;
JsonObject *auth_results = NULL;
g_return_if_fail (error != NULL);
if (g_str_equal (problem, "authentication-unavailable") &&
cockpit_authorize_type (session->authorization, &type) &&
g_str_equal (type, "negotiate"))
{
g_debug ("%s: negotiate authentication not available", session->name);
g_set_error (error, COCKPIT_ERROR, COCKPIT_ERROR_AUTHENTICATION_FAILED,
"Negotiate authentication not available");
gssapi_available = 0;
}
else if (g_str_equal (problem, "authentication-failed") ||
g_str_equal (problem, "authentication-unavailable"))
{
cockpit_json_get_object (options, "auth-method-results", NULL, &auth_results);
if (auth_results)
{
cockpit_json_get_string (auth_results, "password", NULL, &pw_result);
if (!pw_result || g_strcmp0 (pw_result, "no-server-support") == 0)
{
g_clear_error (error);
g_set_error (error, COCKPIT_ERROR,
COCKPIT_ERROR_AUTHENTICATION_FAILED,
"Authentication failed: authentication-not-supported");
}
}
if (*error == NULL)
{
g_debug ("%s: %s %s", session->name, problem, message);
g_set_error (error, COCKPIT_ERROR, COCKPIT_ERROR_AUTHENTICATION_FAILED,
"Authentication failed");
}
}
else if (g_str_equal (problem, "no-host") ||
g_str_equal (problem, "invalid-hostkey") ||
g_str_equal (problem, "unknown-hostkey") ||
g_str_equal (problem, "unknown-host") ||
g_str_equal (problem, "terminated"))
{
g_debug ("%s: %s", session->name, problem);
g_set_error (error, COCKPIT_ERROR, COCKPIT_ERROR_AUTHENTICATION_FAILED,
"Authentication failed: %s", problem);
}
else if (g_str_equal (problem, "access-denied"))
{
g_debug ("permission denied %s", message);
g_set_error_literal (error, COCKPIT_ERROR, COCKPIT_ERROR_PERMISSION_DENIED,
message ? message : "Permission denied");
}
else
{
g_debug ("%s: errored %s: %s", session->name, problem, message);
if (message)
{
g_set_error (error, COCKPIT_ERROR, COCKPIT_ERROR_FAILED,
"Authentication failed: %s: %s", problem, message);
}
else
{
g_set_error (error, COCKPIT_ERROR, COCKPIT_ERROR_FAILED,
"Authentication failed: %s", problem);
}
}
free (type);
}
static gboolean
on_transport_control (CockpitTransport *transport,
const char *command,
const gchar *channel,
JsonObject *options,
GBytes *payload,
gpointer user_data)
{
CockpitSession *session = user_data;
const gchar *problem = NULL;
const gchar *message = NULL;
GSimpleAsyncResult *result;
GError *error = NULL;
gboolean ret = TRUE;
if (g_str_equal (command, "init"))
{
g_debug ("session initialized");
g_signal_handler_disconnect (session->transport, session->control_sig);
g_signal_handler_disconnect (session->transport, session->close_sig);
session->control_sig = session->close_sig = 0;
session->initialized = TRUE;
if (cockpit_json_get_string (options, "problem", NULL, &problem) && problem)
{
if (!cockpit_json_get_string (options, "message", NULL, &message))
message = NULL;
propagate_problem_to_error (session, options, problem, message, &error);
if (session->result)
{
g_simple_async_result_take_error (session->result, error);
}
else
{
g_message ("ignoring failure from session process: %s", error->message);
g_error_free (error);
}
}
ret = FALSE; /* Let this message be handled elsewhere */
}
else if (g_str_equal (command, "authorize"))
{
const gchar *challenge;
/* handle x-host-key challenge */
if (cockpit_json_get_string (options, "challenge", NULL, &challenge) && g_strcmp0 (challenge, "x-host-key") == 0)
{
const gchar *cookie;
g_return_val_if_fail (cockpit_json_get_string (options, "cookie", NULL, &cookie), FALSE);
/* return a negative answer; we handle unknown hosts interactively, or want to fail on them */
g_debug ("received x-host-key authorize challenge");
send_authorize_reply (session->transport, cookie, "");
return TRUE;
}
/* handle login ("*") challenge */
g_debug ("received authorize challenge");
if (session->authorize)
json_object_unref (session->authorize);
session->authorize = json_object_ref (options);
if (reply_authorize_challenge (session))
return TRUE;
}
else
{
g_message ("unexpected \"%s\" control message from session before \"init\"", command);
cockpit_transport_close (transport, "protocol-error");
}
if (session->result)
{
result = session->result;
session->result = NULL;
g_simple_async_result_complete (result);
g_object_unref (result);
}
return ret;
}
static void
on_transport_closed (CockpitTransport *transport,
const gchar *problem,
gpointer user_data)
{
CockpitSession *session = user_data;
GSimpleAsyncResult *result;
CockpitPipe *pipe;
GError *error = NULL;
gint status = 0;
if (g_strcmp0 (problem, "timeout") == 0)
{
g_message ("%s: authentication timed out", session->name);
g_set_error (&error, COCKPIT_ERROR, COCKPIT_ERROR_FAILED,
"Authentication failed: Timeout");
}
else if (!session->initialized)
{
pipe = cockpit_pipe_transport_get_pipe (COCKPIT_PIPE_TRANSPORT (transport));
if (cockpit_pipe_get_pid (pipe, NULL))
status = cockpit_pipe_exit_status (pipe);
g_debug ("%s: authentication process exited: %d; problem %s", session->name, status, problem);
if (problem)
{
g_set_error (&error, COCKPIT_ERROR, COCKPIT_ERROR_FAILED,
g_strcmp0 (problem, "no-cockpit") == 0
? "The cockpit package is not installed"
: "Internal error in login process");
}
else
{
g_set_error (&error, COCKPIT_ERROR, COCKPIT_ERROR_AUTHENTICATION_FAILED,
"Authentication failed");
}
}
if (!error)
{
g_message ("%s: authentication process failed", session->name);
g_set_error (&error, COCKPIT_ERROR, COCKPIT_ERROR_FAILED,
"Authentication internal error");
}
if (session->result)
{
result = session->result;
session->result = NULL;
g_simple_async_result_take_error (result, error);
g_simple_async_result_complete (result);
g_object_unref (result);
}
else
{
g_message ("ignoring failure from session process: %s", error->message);
cockpit_session_reset (session);
g_error_free (error);
}
}
static CockpitCreds *
build_session_credentials (CockpitAuth *self,
GIOStream *connection,
GHashTable *headers,
const char *application,
const char *type,
const char *authorization)
{
CockpitCreds *creds;
const gchar *authorized;
char *user = NULL;
char *raw = NULL;
GBytes *password = NULL;
gchar *remote_peer = NULL;
gchar *csrf_token = NULL;
/* Prepare various credentials */
if (g_strcmp0 (type, "basic") == 0)
{
raw = cockpit_authorize_parse_basic (authorization, &user);
/* If "password" is in the X-Authorize header, we can keep the password for use */
authorized = g_hash_table_lookup (headers, "X-Authorize");
if (!authorized)
authorized = "";
if (strstr (authorized, "password") && raw)
{
password = g_bytes_new_take (raw, strlen (raw));
raw = NULL;
}
}
remote_peer = get_remote_address (connection);
csrf_token = cockpit_auth_nonce (self);
creds = cockpit_creds_new (application,
COCKPIT_CRED_USER, user,
COCKPIT_CRED_PASSWORD, password,
COCKPIT_CRED_RHOST, remote_peer,
COCKPIT_CRED_CSRF_TOKEN, csrf_token,
NULL);
g_free (remote_peer);
if (raw)
{
cockpit_memory_clear (raw, strlen (raw));
free (raw);
}
g_free (csrf_token);
if (password)
g_bytes_unref (password);
free (user);
return creds;
}
static gboolean
on_session_timeout (gpointer data)
{
CockpitSession *session = data;
session->timeout_tag = 0;
if (!session->service || cockpit_web_service_get_idling (session->service))
{
g_info ("session timed out");
cockpit_session_reset (session);
}
return FALSE;
}
static void
on_web_service_idling (CockpitWebService *service,
gpointer data)
{
CockpitSession *session = data;
if (session->timeout_tag)
g_source_remove (session->timeout_tag);
g_debug ("session is idle");
/*
* The minimum amount of time before a request uses this new web service,
* otherwise it will just go away.
*/
session->timeout_tag = g_timeout_add_seconds (cockpit_ws_service_idle,
on_session_timeout,
session);
/*
* Also reset the timer which checks whether anything is going on in the
* entire process or not.
*/
if (session->auth->timeout_tag)
g_source_remove (session->auth->timeout_tag);
session->auth->timeout_tag = g_timeout_add_seconds (cockpit_ws_process_idle,
on_process_timeout, session->auth);
}
static void
on_web_service_destroy (CockpitWebService *service,
gpointer data)
{
on_web_service_idling (service, data);
cockpit_session_reset (data);
}
static CockpitSession *
cockpit_session_create (CockpitAuth *self,
const gchar *name,
CockpitCreds *creds,
CockpitTransport *transport)
{
CockpitSession *session;
session = g_new0 (CockpitSession, 1);
session->refs = 1;
session->name = g_path_get_basename (name);
session->auth = self;
session->service = cockpit_web_service_new (creds, transport);
session->idling_sig = g_signal_connect (session->service, "idling",
G_CALLBACK (on_web_service_idling), session);
session->destroy_sig = g_signal_connect (session->service, "destroy",
G_CALLBACK (on_web_service_destroy), session);
g_object_weak_ref (G_OBJECT (session->service), on_web_service_gone, session);
session->transport = g_object_ref (transport);
session->control_sig = g_signal_connect (transport, "control", G_CALLBACK (on_transport_control), session);
session->close_sig = g_signal_connect (transport, "closed", G_CALLBACK (on_transport_closed), session);
return session;
}
static CockpitSession *
cockpit_session_launch (CockpitAuth *self,
GIOStream *connection,
GHashTable *headers,
const gchar *type,
const gchar *authorization,
const gchar *application,
GError **error)
{
CockpitTransport *transport = NULL;
CockpitSession *session = NULL;
CockpitCreds *creds = NULL;
const gchar *host;
const gchar *action;
const gchar *command;
const gchar *section;
const gchar *program_default;
gchar **env = g_get_environ ();
const gchar *argv[] = {
"command",
"host",
NULL,
};
host = application_parse_host (application);
action = type_option (type, "action", "localhost");
if (g_strcmp0 (action, ACTION_NONE) == 0)
{
g_set_error (error, COCKPIT_ERROR, COCKPIT_ERROR_AUTHENTICATION_FAILED,
"Authentication disabled");
goto out;
}
/* These are the credentials we'll carry around for this session */
creds = build_session_credentials (self, connection, headers,
application, type, authorization);
if (host)
section = COCKPIT_CONF_SSH_SECTION;
else if (self->login_loopback && g_strcmp0 (type, "basic") == 0)
section = COCKPIT_CONF_SSH_SECTION;
else if (g_strcmp0 (action, ACTION_SSH) == 0)
section = COCKPIT_CONF_SSH_SECTION;
else
section = type;
if (g_strcmp0 (section, COCKPIT_CONF_SSH_SECTION) == 0)
{
if (!host)
host = type_option (COCKPIT_CONF_SSH_SECTION, "host", "127.0.0.1");
program_default = cockpit_ws_ssh_program;
}
else
{
program_default = cockpit_ws_session_program;
}
command = type_option (section, "command", program_default);
if (cockpit_creds_get_rhost (creds))
{
env = g_environ_setenv (env, "COCKPIT_REMOTE_PEER",
cockpit_creds_get_rhost (creds),
TRUE);
}
argv[0] = command;
argv[1] = host ? host : "localhost";
transport = session_start_process (argv, (const gchar **)env);
if (!transport)
{
g_set_error (error, COCKPIT_ERROR, COCKPIT_ERROR_FAILED,
"Authentication failed to start");
goto out;
}
session = cockpit_session_create (self, argv[0], creds, transport);
/* How long to wait for the auth process to send some data */
session->authorize_timeout = timeout_option ("timeout", section, cockpit_ws_auth_process_timeout);
/* How long to wait for a response from the client to a auth prompt */
session->client_timeout = timeout_option ("response-timeout", section, cockpit_ws_auth_response_timeout);
out:
g_strfreev (env);
if (creds)
cockpit_creds_unref (creds);
if (transport)
g_object_unref (transport);
return session;
}
static gboolean
build_authorize_challenge (CockpitAuth *self,
JsonObject *authorize,
GIOStream *connection,
GHashTable *headers,
JsonObject **body,
gchar **conversation)
{
const gchar *challenge = NULL;
gchar *type = NULL;
JsonObject *object;
GList *l, *names;
if (!cockpit_json_get_string (authorize, "challenge", NULL, &challenge) ||
!cockpit_authorize_type (challenge, &type))
{
g_message ("invalid \"challenge\" field in \"authorize\" message");
return FALSE;
}
g_hash_table_replace (headers, g_strdup ("WWW-Authenticate"), g_strdup (challenge));
*conversation = NULL;
if (g_str_equal (type, "negotiate"))
{
gssapi_available = 1;
*conversation = cockpit_auth_nonce (self);
if (connection)
g_object_set_data_full (G_OBJECT (connection), "negotiate", g_strdup (*conversation), g_free);
}
else if (g_str_equal (type, "x-conversation"))
{
cockpit_authorize_subject (challenge, conversation);
}
object = json_object_new ();
names = json_object_get_members (authorize);
for (l = names; l != NULL; l = g_list_next (l))
{
if (!g_str_equal (l->data, "challenge") && !g_str_equal (l->data, "cookie"))
json_object_set_member (object, l->data, json_object_dup_member (authorize, l->data));
}
if (body)
*body = object;
g_list_free (names);
g_free (type);
return TRUE;
}
static void
authorize_logger (const char *data)
{
g_message ("%s", data);
}
static void
cockpit_auth_class_init (CockpitAuthClass *klass)
{
GObjectClass *gobject_class = G_OBJECT_CLASS (klass);
gobject_class->finalize = cockpit_auth_finalize;
sig__idling = g_signal_new ("idling", COCKPIT_TYPE_AUTH, G_SIGNAL_RUN_FIRST,
0, NULL, NULL, NULL, G_TYPE_NONE, 0);
cockpit_authorize_logger (authorize_logger, 0);
}
static char *
base64_decode_string (const char *enc)
{
if (enc == NULL)
return NULL;
char *dec = g_strdup (enc);
gsize len;
g_base64_decode_inplace (dec, &len);
dec[len] = '\0';
return dec;
}
static CockpitSession *
session_for_headers (CockpitAuth *self,
const gchar *path,
GHashTable *in_headers)
{
gchar *cookie = NULL;
gchar *raw = NULL;
const char *prefix = "v=2;k=";
CockpitSession *ret = NULL;
gchar *application;
gchar *cookie_name = NULL;
g_return_val_if_fail (self != NULL, FALSE);
g_return_val_if_fail (in_headers != NULL, FALSE);
application = cockpit_auth_parse_application (path, NULL);
if (!application)
return NULL;
cookie_name = application_cookie_name (application);
raw = cockpit_web_server_parse_cookie (in_headers, cookie_name);
if (raw)
{
cookie = base64_decode_string (raw);
if (cookie != NULL)
{
if (g_str_has_prefix (cookie, prefix))
ret = g_hash_table_lookup (self->sessions, cookie);
else
g_debug ("invalid or unsupported cookie: %s", cookie);
/* We must never find the default session based on a cookie */
g_assert (!ret || !g_str_equal (ret->cookie, LOCAL_SESSION));
g_assert (!ret || !g_str_equal (ret->name, LOCAL_SESSION));
g_free (cookie);
}
g_free (raw);
}
/* Check for a default session for auto-login */
if (!ret)
ret = g_hash_table_lookup (self->sessions, LOCAL_SESSION);
g_free (application);
g_free (cookie_name);
return ret;
}
CockpitWebService *
cockpit_auth_check_cookie (CockpitAuth *self,
const gchar *path,
GHashTable *in_headers)
{
CockpitSession *session;
CockpitCreds *creds;
session = session_for_headers (self, path, in_headers);
if (session)
{
creds = cockpit_web_service_get_creds (session->service);
g_debug ("received %s credential cookie for session",
cockpit_creds_get_application (creds));
return g_object_ref (session->service);
}
else
{
g_debug ("received unknown/invalid credential cookie");
return NULL;
}
}
void
cockpit_auth_local_async (CockpitAuth *self,
const gchar *user,
CockpitPipe *pipe,
GAsyncReadyCallback callback,
gpointer user_data)
{
CockpitTransport *transport;
CockpitSession *session;
CockpitCreds *creds;
gchar *csrf_token;
g_return_if_fail (COCKPIT_IS_AUTH (self));
g_return_if_fail (COCKPIT_IS_PIPE (pipe));
g_return_if_fail (user != NULL);
transport = cockpit_pipe_transport_new (pipe);
csrf_token = cockpit_auth_nonce (self);
creds = cockpit_creds_new ("cockpit",
COCKPIT_CRED_USER, user,
COCKPIT_CRED_RHOST, "localhost",
COCKPIT_CRED_CSRF_TOKEN, csrf_token,
NULL);
session = cockpit_session_create (self, cockpit_pipe_get_name (pipe), creds, transport);
session->cookie = g_strdup (LOCAL_SESSION);
g_hash_table_insert (self->sessions, session->cookie, session);
session->result = g_simple_async_result_new (G_OBJECT (self), callback, user_data,
cockpit_auth_local_async);
g_free (csrf_token);
g_object_unref (transport);
cockpit_creds_unref (creds);
}
gboolean
cockpit_auth_local_finish (CockpitAuth *self,
GAsyncResult *result,
GError **error)
{
g_return_val_if_fail (g_simple_async_result_is_valid (result, G_OBJECT (self),
cockpit_auth_local_async), FALSE);
return g_simple_async_result_propagate_error (G_SIMPLE_ASYNC_RESULT (result), error);
}
/*
* returns TRUE if auth can proceed, FALSE otherwise.
* dropping starts at connection max_startups_begin with a probability
* of (max_startups_rate/100). the probability increases linearly until
* all connections are dropped for startups > max_startups
*/
static gboolean
can_start_auth (CockpitAuth *self)
{
int p, r;
/* 0 means unlimited */
if (self->max_startups == 0)
return TRUE;
/* Under soft limit */
if (self->startups <= self->max_startups_begin)
return TRUE;
/* Over hard limit */
if (self->startups > self->max_startups)
return FALSE;
/* If rate is 100, soft limit is hard limit */
if (self->max_startups_rate == 100)
return FALSE;
p = 100 - self->max_startups_rate;
p *= self->startups - self->max_startups_begin;
p /= self->max_startups - self->max_startups_begin;
p += self->max_startups_rate;
r = g_random_int_range (0, 100);
g_debug ("calculating if auth can start: (%u:%u:%u): p %d, r %d",
self->max_startups_begin, self->max_startups_rate,
self->max_startups, p, r);
return (r < p) ? FALSE : TRUE;
}
void
cockpit_auth_login_async (CockpitAuth *self,
const gchar *path,
GIOStream *connection,
GHashTable *headers,
GAsyncReadyCallback callback,
gpointer user_data)
{
GSimpleAsyncResult *result = NULL;
CockpitSession *session;
GError *error = NULL;
gchar *type = NULL;
gchar *conversation = NULL;
gchar *authorization = NULL;
gchar *application = NULL;
g_return_if_fail (path != NULL);
g_return_if_fail (headers != NULL);
self->startups++;
result = g_simple_async_result_new (G_OBJECT (self), callback, user_data,
cockpit_auth_login_async);
if (!can_start_auth (self))
{
g_message ("Request dropped; too many startup connections: %u", self->startups);
g_simple_async_result_set_error (result, COCKPIT_ERROR, COCKPIT_ERROR_FAILED,
"Connection closed by host");
g_simple_async_result_complete_in_idle (result);
goto out;
}
application = cockpit_auth_parse_application (path, NULL);
authorization = cockpit_auth_steal_authorization (headers, connection, &type, &conversation);
if (!application || !authorization)
{
g_simple_async_result_set_error (result, COCKPIT_ERROR, COCKPIT_ERROR_AUTHENTICATION_FAILED,
"Authentication required");
g_simple_async_result_complete_in_idle (result);
goto out;
}
if (conversation)
{
session = g_hash_table_lookup (self->conversations, conversation);
if (!session)
{
g_simple_async_result_set_error (result, COCKPIT_ERROR, COCKPIT_ERROR_AUTHENTICATION_FAILED,
"Invalid conversation token");
g_simple_async_result_complete_in_idle (result);
goto out;
}
g_simple_async_result_set_op_res_gpointer (result, cockpit_session_ref (session), cockpit_session_unref);
}
else
{
session = cockpit_session_launch (self, connection, headers, type, authorization, application, &error);
if (!session)
{
g_simple_async_result_take_error (result, error);
g_simple_async_result_complete_in_idle (result);
goto out;
}
g_simple_async_result_set_op_res_gpointer (result, session, cockpit_session_unref);
}
cockpit_session_reset (session);
session->result = g_object_ref (result);
session->authorization = authorization;
authorization = NULL;
if (conversation && !reply_authorize_challenge (session))
{
g_simple_async_result_set_error (result, COCKPIT_ERROR, COCKPIT_ERROR_AUTHENTICATION_FAILED,
"Invalid conversation reply");
g_simple_async_result_complete_in_idle (result);
goto out;
}
reset_authorize_timeout (session, FALSE);
out:
g_free (type);
g_free (application);
g_free (conversation);
if (authorization)
{
cockpit_memory_clear (authorization, -1);
g_free (authorization);
}
g_object_unref (result);
}
JsonObject *
cockpit_auth_login_finish (CockpitAuth *self,
GAsyncResult *result,
GIOStream *connection,
GHashTable *headers,
GError **error)
{
JsonObject *body = NULL;
CockpitCreds *creds = NULL;
CockpitSession *session = NULL;
gboolean force_secure;
gchar *cookie_name;
gchar *cookie_b64;
gchar *header;
gchar *id;
g_return_val_if_fail (g_simple_async_result_is_valid (result, G_OBJECT (self),
cockpit_auth_login_async), NULL);
g_object_ref (result);
session = g_simple_async_result_get_op_res_gpointer (G_SIMPLE_ASYNC_RESULT (result));
if (g_simple_async_result_propagate_error (G_SIMPLE_ASYNC_RESULT (result), error))
goto out;
g_return_val_if_fail (session != NULL, NULL);
g_return_val_if_fail (session->result == NULL, NULL);
cockpit_session_reset (session);
if (session->authorize)
{
if (build_authorize_challenge (self, session->authorize, connection,
headers, &body, &session->conversation))
{
if (session->conversation)
{
reset_authorize_timeout (session, TRUE);
g_hash_table_replace (self->conversations, session->conversation, cockpit_session_ref (session));
}
}
}
if (session->initialized)
{
/* Start off in the idling state, and begin a timeout during which caller must do something else */
on_web_service_idling (session->service, session);
creds = cockpit_web_service_get_creds (session->service);
id = cockpit_auth_nonce (self);
session->cookie = g_strdup_printf ("v=2;k=%s", id);
g_hash_table_insert (self->sessions, session->cookie, cockpit_session_ref (session));
g_free (id);
if (headers)
{
force_secure = connection ? !G_IS_SOCKET_CONNECTION (connection) : TRUE;
cookie_name = application_cookie_name (cockpit_creds_get_application (creds));
cookie_b64 = g_base64_encode ((guint8 *)session->cookie, strlen (session->cookie));
header = g_strdup_printf ("%s=%s; Path=/; %s HttpOnly",
cookie_name, cookie_b64,
force_secure ? " Secure;" : "");
g_free (cookie_b64);
g_free (cookie_name);
g_hash_table_insert (headers, g_strdup ("Set-Cookie"), header);
}
if (body)
json_object_unref (body);
body = cockpit_creds_to_json (creds);
}
else
{
g_set_error (error, COCKPIT_ERROR, COCKPIT_ERROR_AUTHENTICATION_FAILED,
"Authentication failed");
}
out:
self->startups--;
g_object_unref (result);
/* Successful login */
if (creds)
g_info ("logged in user session");
return body;
}
CockpitAuth *
cockpit_auth_new (gboolean login_loopback)
{
CockpitAuth *self = g_object_new (COCKPIT_TYPE_AUTH, NULL);
const gchar *max_startups_conf;
gint count = 0;
self->login_loopback = login_loopback;
if (cockpit_ws_max_startups == NULL)
max_startups_conf = cockpit_conf_string ("WebService", "MaxStartups");
else
max_startups_conf = cockpit_ws_max_startups;
self->max_startups = max_startups;
self->max_startups_begin = max_startups;
self->max_startups_rate = 100;
if (max_startups_conf)
{
count = sscanf (max_startups_conf, "%u:%u:%u",
&self->max_startups_begin,
&self->max_startups_rate,
&self->max_startups);
/* If all three numbers are not given use the
* first as a hard limit */
if (count == 1 || count == 2)
{
self->max_startups = self->max_startups_begin;
self->max_startups_rate = 100;
}
if (count < 1 || count > 3 ||
self->max_startups_begin > self->max_startups ||
self->max_startups_rate > 100 || self->max_startups_rate < 1)
{
g_warning ("Illegal MaxStartups spec: %s. Reverting to defaults", max_startups_conf);
self->max_startups = max_startups;
self->max_startups_begin = max_startups;
self->max_startups_rate = 100;
}
}
return self;
}
gchar *
cockpit_auth_parse_application (const gchar *path,
gboolean *is_host)
{
const gchar *pos;
gchar *tmp = NULL;
gchar *val = NULL;
g_return_val_if_fail (path != NULL, NULL);
g_return_val_if_fail (path[0] == '/', NULL);
path += 1;
/* We are being embedded as a specific application */
if (g_str_has_prefix (path, "cockpit+") && path[8] != '\0')
{
pos = strchr (path, '/');
if (pos)
val = g_strndup (path, pos - path);
else
val = g_strdup (path);
}
else if (path[0] == '=' && path[1] != '\0')
{
pos = strchr (path, '/');
if (pos)
{
tmp = g_strndup (path, pos - path);
val = g_strdup_printf ("cockpit+%s", tmp);
}
else
{
val = g_strdup_printf ("cockpit+%s", path);
}
}
else
{
val = g_strdup ("cockpit");
}
if (is_host)
*is_host = application_parse_host (val) != NULL;
g_free (tmp);
return val;
}
gchar *
cockpit_auth_empty_cookie_value (const gchar *path)
{
gchar *application = cockpit_auth_parse_application (path, NULL);
gchar *cookie = application_cookie_name (application);
gchar *cookie_line = g_strdup_printf ("%s=deleted; PATH=/; HttpOnly", cookie);
g_free (application);
g_free (cookie);
return cookie_line;
}
| ./CrossVul/dataset_final_sorted/CWE-20/c/bad_1389_0 |
crossvul-cpp_data_good_2214_0 | /*
* LZ4 Decompressor for Linux kernel
*
* Copyright (C) 2013, LG Electronics, Kyungsik Lee <kyungsik.lee@lge.com>
*
* Based on LZ4 implementation by Yann Collet.
*
* LZ4 - Fast LZ compression algorithm
* Copyright (C) 2011-2012, Yann Collet.
* BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* You can contact the author at :
* - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html
* - LZ4 source repository : http://code.google.com/p/lz4/
*/
#ifndef STATIC
#include <linux/module.h>
#include <linux/kernel.h>
#endif
#include <linux/lz4.h>
#include <asm/unaligned.h>
#include "lz4defs.h"
static int lz4_uncompress(const char *source, char *dest, int osize)
{
const BYTE *ip = (const BYTE *) source;
const BYTE *ref;
BYTE *op = (BYTE *) dest;
BYTE * const oend = op + osize;
BYTE *cpy;
unsigned token;
size_t length;
size_t dec32table[] = {0, 3, 2, 3, 0, 0, 0, 0};
#if LZ4_ARCH64
size_t dec64table[] = {0, 0, 0, -1, 0, 1, 2, 3};
#endif
while (1) {
/* get runlength */
token = *ip++;
length = (token >> ML_BITS);
if (length == RUN_MASK) {
size_t len;
len = *ip++;
for (; len == 255; length += 255)
len = *ip++;
if (unlikely(length > (size_t)(length + len)))
goto _output_error;
length += len;
}
/* copy literals */
cpy = op + length;
if (unlikely(cpy > oend - COPYLENGTH)) {
/*
* Error: not enough place for another match
* (min 4) + 5 literals
*/
if (cpy != oend)
goto _output_error;
memcpy(op, ip, length);
ip += length;
break; /* EOF */
}
LZ4_WILDCOPY(ip, op, cpy);
ip -= (op - cpy);
op = cpy;
/* get offset */
LZ4_READ_LITTLEENDIAN_16(ref, cpy, ip);
ip += 2;
/* Error: offset create reference outside destination buffer */
if (unlikely(ref < (BYTE *const) dest))
goto _output_error;
/* get matchlength */
length = token & ML_MASK;
if (length == ML_MASK) {
for (; *ip == 255; length += 255)
ip++;
length += *ip++;
}
/* copy repeated sequence */
if (unlikely((op - ref) < STEPSIZE)) {
#if LZ4_ARCH64
size_t dec64 = dec64table[op - ref];
#else
const int dec64 = 0;
#endif
op[0] = ref[0];
op[1] = ref[1];
op[2] = ref[2];
op[3] = ref[3];
op += 4;
ref += 4;
ref -= dec32table[op-ref];
PUT4(ref, op);
op += STEPSIZE - 4;
ref -= dec64;
} else {
LZ4_COPYSTEP(ref, op);
}
cpy = op + length - (STEPSIZE - 4);
if (cpy > (oend - COPYLENGTH)) {
/* Error: request to write beyond destination buffer */
if (cpy > oend)
goto _output_error;
LZ4_SECURECOPY(ref, op, (oend - COPYLENGTH));
while (op < cpy)
*op++ = *ref++;
op = cpy;
/*
* Check EOF (should never happen, since last 5 bytes
* are supposed to be literals)
*/
if (op == oend)
goto _output_error;
continue;
}
LZ4_SECURECOPY(ref, op, cpy);
op = cpy; /* correction */
}
/* end of decoding */
return (int) (((char *)ip) - source);
/* write overflow error detected */
_output_error:
return (int) (-(((char *)ip) - source));
}
static int lz4_uncompress_unknownoutputsize(const char *source, char *dest,
int isize, size_t maxoutputsize)
{
const BYTE *ip = (const BYTE *) source;
const BYTE *const iend = ip + isize;
const BYTE *ref;
BYTE *op = (BYTE *) dest;
BYTE * const oend = op + maxoutputsize;
BYTE *cpy;
size_t dec32table[] = {0, 3, 2, 3, 0, 0, 0, 0};
#if LZ4_ARCH64
size_t dec64table[] = {0, 0, 0, -1, 0, 1, 2, 3};
#endif
/* Main Loop */
while (ip < iend) {
unsigned token;
size_t length;
/* get runlength */
token = *ip++;
length = (token >> ML_BITS);
if (length == RUN_MASK) {
int s = 255;
while ((ip < iend) && (s == 255)) {
s = *ip++;
length += s;
}
}
/* copy literals */
cpy = op + length;
if ((cpy > oend - COPYLENGTH) ||
(ip + length > iend - COPYLENGTH)) {
if (cpy > oend)
goto _output_error;/* writes beyond buffer */
if (ip + length != iend)
goto _output_error;/*
* Error: LZ4 format requires
* to consume all input
* at this stage
*/
memcpy(op, ip, length);
op += length;
break;/* Necessarily EOF, due to parsing restrictions */
}
LZ4_WILDCOPY(ip, op, cpy);
ip -= (op - cpy);
op = cpy;
/* get offset */
LZ4_READ_LITTLEENDIAN_16(ref, cpy, ip);
ip += 2;
if (ref < (BYTE * const) dest)
goto _output_error;
/*
* Error : offset creates reference
* outside of destination buffer
*/
/* get matchlength */
length = (token & ML_MASK);
if (length == ML_MASK) {
while (ip < iend) {
int s = *ip++;
length += s;
if (s == 255)
continue;
break;
}
}
/* copy repeated sequence */
if (unlikely((op - ref) < STEPSIZE)) {
#if LZ4_ARCH64
size_t dec64 = dec64table[op - ref];
#else
const int dec64 = 0;
#endif
op[0] = ref[0];
op[1] = ref[1];
op[2] = ref[2];
op[3] = ref[3];
op += 4;
ref += 4;
ref -= dec32table[op - ref];
PUT4(ref, op);
op += STEPSIZE - 4;
ref -= dec64;
} else {
LZ4_COPYSTEP(ref, op);
}
cpy = op + length - (STEPSIZE-4);
if (cpy > oend - COPYLENGTH) {
if (cpy > oend)
goto _output_error; /* write outside of buf */
LZ4_SECURECOPY(ref, op, (oend - COPYLENGTH));
while (op < cpy)
*op++ = *ref++;
op = cpy;
/*
* Check EOF (should never happen, since last 5 bytes
* are supposed to be literals)
*/
if (op == oend)
goto _output_error;
continue;
}
LZ4_SECURECOPY(ref, op, cpy);
op = cpy; /* correction */
}
/* end of decoding */
return (int) (((char *) op) - dest);
/* write overflow error detected */
_output_error:
return (int) (-(((char *) ip) - source));
}
int lz4_decompress(const unsigned char *src, size_t *src_len,
unsigned char *dest, size_t actual_dest_len)
{
int ret = -1;
int input_len = 0;
input_len = lz4_uncompress(src, dest, actual_dest_len);
if (input_len < 0)
goto exit_0;
*src_len = input_len;
return 0;
exit_0:
return ret;
}
#ifndef STATIC
EXPORT_SYMBOL(lz4_decompress);
#endif
int lz4_decompress_unknownoutputsize(const unsigned char *src, size_t src_len,
unsigned char *dest, size_t *dest_len)
{
int ret = -1;
int out_len = 0;
out_len = lz4_uncompress_unknownoutputsize(src, dest, src_len,
*dest_len);
if (out_len < 0)
goto exit_0;
*dest_len = out_len;
return 0;
exit_0:
return ret;
}
#ifndef STATIC
EXPORT_SYMBOL(lz4_decompress_unknownoutputsize);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION("LZ4 Decompressor");
#endif
| ./CrossVul/dataset_final_sorted/CWE-20/c/good_2214_0 |
crossvul-cpp_data_bad_2015_0 | /*
* 32bit Socket syscall emulation. Based on arch/sparc64/kernel/sys_sparc32.c.
*
* Copyright (C) 2000 VA Linux Co
* Copyright (C) 2000 Don Dugger <n0ano@valinux.com>
* Copyright (C) 1999 Arun Sharma <arun.sharma@intel.com>
* Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
* Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 2000 Hewlett-Packard Co.
* Copyright (C) 2000 David Mosberger-Tang <davidm@hpl.hp.com>
* Copyright (C) 2000,2001 Andi Kleen, SuSE Labs
*/
#include <linux/kernel.h>
#include <linux/gfp.h>
#include <linux/fs.h>
#include <linux/types.h>
#include <linux/file.h>
#include <linux/icmpv6.h>
#include <linux/socket.h>
#include <linux/syscalls.h>
#include <linux/filter.h>
#include <linux/compat.h>
#include <linux/security.h>
#include <linux/export.h>
#include <net/scm.h>
#include <net/sock.h>
#include <net/ip.h>
#include <net/ipv6.h>
#include <asm/uaccess.h>
#include <net/compat.h>
static inline int iov_from_user_compat_to_kern(struct iovec *kiov,
struct compat_iovec __user *uiov32,
int niov)
{
int tot_len = 0;
while (niov > 0) {
compat_uptr_t buf;
compat_size_t len;
if (get_user(len, &uiov32->iov_len) ||
get_user(buf, &uiov32->iov_base))
return -EFAULT;
if (len > INT_MAX - tot_len)
len = INT_MAX - tot_len;
tot_len += len;
kiov->iov_base = compat_ptr(buf);
kiov->iov_len = (__kernel_size_t) len;
uiov32++;
kiov++;
niov--;
}
return tot_len;
}
int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
{
compat_uptr_t tmp1, tmp2, tmp3;
if (!access_ok(VERIFY_READ, umsg, sizeof(*umsg)) ||
__get_user(tmp1, &umsg->msg_name) ||
__get_user(kmsg->msg_namelen, &umsg->msg_namelen) ||
__get_user(tmp2, &umsg->msg_iov) ||
__get_user(kmsg->msg_iovlen, &umsg->msg_iovlen) ||
__get_user(tmp3, &umsg->msg_control) ||
__get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
__get_user(kmsg->msg_flags, &umsg->msg_flags))
return -EFAULT;
if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
kmsg->msg_namelen = sizeof(struct sockaddr_storage);
kmsg->msg_name = compat_ptr(tmp1);
kmsg->msg_iov = compat_ptr(tmp2);
kmsg->msg_control = compat_ptr(tmp3);
return 0;
}
/* I've named the args so it is easy to tell whose space the pointers are in. */
int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
struct sockaddr_storage *kern_address, int mode)
{
int tot_len;
if (kern_msg->msg_namelen) {
if (mode == VERIFY_READ) {
int err = move_addr_to_kernel(kern_msg->msg_name,
kern_msg->msg_namelen,
kern_address);
if (err < 0)
return err;
}
if (kern_msg->msg_name)
kern_msg->msg_name = kern_address;
} else
kern_msg->msg_name = NULL;
tot_len = iov_from_user_compat_to_kern(kern_iov,
(struct compat_iovec __user *)kern_msg->msg_iov,
kern_msg->msg_iovlen);
if (tot_len >= 0)
kern_msg->msg_iov = kern_iov;
return tot_len;
}
/* Bleech... */
#define CMSG_COMPAT_ALIGN(len) ALIGN((len), sizeof(s32))
#define CMSG_COMPAT_DATA(cmsg) \
((void __user *)((char __user *)(cmsg) + CMSG_COMPAT_ALIGN(sizeof(struct compat_cmsghdr))))
#define CMSG_COMPAT_SPACE(len) \
(CMSG_COMPAT_ALIGN(sizeof(struct compat_cmsghdr)) + CMSG_COMPAT_ALIGN(len))
#define CMSG_COMPAT_LEN(len) \
(CMSG_COMPAT_ALIGN(sizeof(struct compat_cmsghdr)) + (len))
#define CMSG_COMPAT_FIRSTHDR(msg) \
(((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
(struct compat_cmsghdr __user *)((msg)->msg_control) : \
(struct compat_cmsghdr __user *)NULL)
#define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
((ucmlen) >= sizeof(struct compat_cmsghdr) && \
(ucmlen) <= (unsigned long) \
((mhdr)->msg_controllen - \
((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
struct compat_cmsghdr __user *cmsg, int cmsg_len)
{
char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
msg->msg_controllen)
return NULL;
return (struct compat_cmsghdr __user *)ptr;
}
/* There is a lot of hair here because the alignment rules (and
* thus placement) of cmsg headers and length are different for
* 32-bit apps. -DaveM
*/
int cmsghdr_from_user_compat_to_kern(struct msghdr *kmsg, struct sock *sk,
unsigned char *stackbuf, int stackbuf_size)
{
struct compat_cmsghdr __user *ucmsg;
struct cmsghdr *kcmsg, *kcmsg_base;
compat_size_t ucmlen;
__kernel_size_t kcmlen, tmp;
int err = -EFAULT;
kcmlen = 0;
kcmsg_base = kcmsg = (struct cmsghdr *)stackbuf;
ucmsg = CMSG_COMPAT_FIRSTHDR(kmsg);
while (ucmsg != NULL) {
if (get_user(ucmlen, &ucmsg->cmsg_len))
return -EFAULT;
/* Catch bogons. */
if (!CMSG_COMPAT_OK(ucmlen, ucmsg, kmsg))
return -EINVAL;
tmp = ((ucmlen - CMSG_COMPAT_ALIGN(sizeof(*ucmsg))) +
CMSG_ALIGN(sizeof(struct cmsghdr)));
tmp = CMSG_ALIGN(tmp);
kcmlen += tmp;
ucmsg = cmsg_compat_nxthdr(kmsg, ucmsg, ucmlen);
}
if (kcmlen == 0)
return -EINVAL;
/* The kcmlen holds the 64-bit version of the control length.
* It may not be modified as we do not stick it into the kmsg
* until we have successfully copied over all of the data
* from the user.
*/
if (kcmlen > stackbuf_size)
kcmsg_base = kcmsg = sock_kmalloc(sk, kcmlen, GFP_KERNEL);
if (kcmsg == NULL)
return -ENOBUFS;
/* Now copy them over neatly. */
memset(kcmsg, 0, kcmlen);
ucmsg = CMSG_COMPAT_FIRSTHDR(kmsg);
while (ucmsg != NULL) {
if (__get_user(ucmlen, &ucmsg->cmsg_len))
goto Efault;
if (!CMSG_COMPAT_OK(ucmlen, ucmsg, kmsg))
goto Einval;
tmp = ((ucmlen - CMSG_COMPAT_ALIGN(sizeof(*ucmsg))) +
CMSG_ALIGN(sizeof(struct cmsghdr)));
if ((char *)kcmsg_base + kcmlen - (char *)kcmsg < CMSG_ALIGN(tmp))
goto Einval;
kcmsg->cmsg_len = tmp;
tmp = CMSG_ALIGN(tmp);
if (__get_user(kcmsg->cmsg_level, &ucmsg->cmsg_level) ||
__get_user(kcmsg->cmsg_type, &ucmsg->cmsg_type) ||
copy_from_user(CMSG_DATA(kcmsg),
CMSG_COMPAT_DATA(ucmsg),
(ucmlen - CMSG_COMPAT_ALIGN(sizeof(*ucmsg)))))
goto Efault;
/* Advance. */
kcmsg = (struct cmsghdr *)((char *)kcmsg + tmp);
ucmsg = cmsg_compat_nxthdr(kmsg, ucmsg, ucmlen);
}
/* Ok, looks like we made it. Hook it up and return success. */
kmsg->msg_control = kcmsg_base;
kmsg->msg_controllen = kcmlen;
return 0;
Einval:
err = -EINVAL;
Efault:
if (kcmsg_base != (struct cmsghdr *)stackbuf)
sock_kfree_s(sk, kcmsg_base, kcmlen);
return err;
}
int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *data)
{
struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
struct compat_cmsghdr cmhdr;
struct compat_timeval ctv;
struct compat_timespec cts[3];
int cmlen;
if (cm == NULL || kmsg->msg_controllen < sizeof(*cm)) {
kmsg->msg_flags |= MSG_CTRUNC;
return 0; /* XXX: return error? check spec. */
}
if (!COMPAT_USE_64BIT_TIME) {
if (level == SOL_SOCKET && type == SCM_TIMESTAMP) {
struct timeval *tv = (struct timeval *)data;
ctv.tv_sec = tv->tv_sec;
ctv.tv_usec = tv->tv_usec;
data = &ctv;
len = sizeof(ctv);
}
if (level == SOL_SOCKET &&
(type == SCM_TIMESTAMPNS || type == SCM_TIMESTAMPING)) {
int count = type == SCM_TIMESTAMPNS ? 1 : 3;
int i;
struct timespec *ts = (struct timespec *)data;
for (i = 0; i < count; i++) {
cts[i].tv_sec = ts[i].tv_sec;
cts[i].tv_nsec = ts[i].tv_nsec;
}
data = &cts;
len = sizeof(cts[0]) * count;
}
}
cmlen = CMSG_COMPAT_LEN(len);
if (kmsg->msg_controllen < cmlen) {
kmsg->msg_flags |= MSG_CTRUNC;
cmlen = kmsg->msg_controllen;
}
cmhdr.cmsg_level = level;
cmhdr.cmsg_type = type;
cmhdr.cmsg_len = cmlen;
if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
return -EFAULT;
if (copy_to_user(CMSG_COMPAT_DATA(cm), data, cmlen - sizeof(struct compat_cmsghdr)))
return -EFAULT;
cmlen = CMSG_COMPAT_SPACE(len);
if (kmsg->msg_controllen < cmlen)
cmlen = kmsg->msg_controllen;
kmsg->msg_control += cmlen;
kmsg->msg_controllen -= cmlen;
return 0;
}
void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
{
struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
int fdnum = scm->fp->count;
struct file **fp = scm->fp->fp;
int __user *cmfptr;
int err = 0, i;
if (fdnum < fdmax)
fdmax = fdnum;
for (i = 0, cmfptr = (int __user *) CMSG_COMPAT_DATA(cm); i < fdmax; i++, cmfptr++) {
int new_fd;
err = security_file_receive(fp[i]);
if (err)
break;
err = get_unused_fd_flags(MSG_CMSG_CLOEXEC & kmsg->msg_flags
? O_CLOEXEC : 0);
if (err < 0)
break;
new_fd = err;
err = put_user(new_fd, cmfptr);
if (err) {
put_unused_fd(new_fd);
break;
}
/* Bump the usage count and install the file. */
fd_install(new_fd, get_file(fp[i]));
}
if (i > 0) {
int cmlen = CMSG_COMPAT_LEN(i * sizeof(int));
err = put_user(SOL_SOCKET, &cm->cmsg_level);
if (!err)
err = put_user(SCM_RIGHTS, &cm->cmsg_type);
if (!err)
err = put_user(cmlen, &cm->cmsg_len);
if (!err) {
cmlen = CMSG_COMPAT_SPACE(i * sizeof(int));
kmsg->msg_control += cmlen;
kmsg->msg_controllen -= cmlen;
}
}
if (i < fdnum)
kmsg->msg_flags |= MSG_CTRUNC;
/*
* All of the files that fit in the message have had their
* usage counts incremented, so we just free the list.
*/
__scm_destroy(scm);
}
static int do_set_attach_filter(struct socket *sock, int level, int optname,
char __user *optval, unsigned int optlen)
{
struct compat_sock_fprog __user *fprog32 = (struct compat_sock_fprog __user *)optval;
struct sock_fprog __user *kfprog = compat_alloc_user_space(sizeof(struct sock_fprog));
compat_uptr_t ptr;
u16 len;
if (!access_ok(VERIFY_READ, fprog32, sizeof(*fprog32)) ||
!access_ok(VERIFY_WRITE, kfprog, sizeof(struct sock_fprog)) ||
__get_user(len, &fprog32->len) ||
__get_user(ptr, &fprog32->filter) ||
__put_user(len, &kfprog->len) ||
__put_user(compat_ptr(ptr), &kfprog->filter))
return -EFAULT;
return sock_setsockopt(sock, level, optname, (char __user *)kfprog,
sizeof(struct sock_fprog));
}
static int do_set_sock_timeout(struct socket *sock, int level,
int optname, char __user *optval, unsigned int optlen)
{
struct compat_timeval __user *up = (struct compat_timeval __user *)optval;
struct timeval ktime;
mm_segment_t old_fs;
int err;
if (optlen < sizeof(*up))
return -EINVAL;
if (!access_ok(VERIFY_READ, up, sizeof(*up)) ||
__get_user(ktime.tv_sec, &up->tv_sec) ||
__get_user(ktime.tv_usec, &up->tv_usec))
return -EFAULT;
old_fs = get_fs();
set_fs(KERNEL_DS);
err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
set_fs(old_fs);
return err;
}
static int compat_sock_setsockopt(struct socket *sock, int level, int optname,
char __user *optval, unsigned int optlen)
{
if (optname == SO_ATTACH_FILTER)
return do_set_attach_filter(sock, level, optname,
optval, optlen);
if (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO)
return do_set_sock_timeout(sock, level, optname, optval, optlen);
return sock_setsockopt(sock, level, optname, optval, optlen);
}
asmlinkage long compat_sys_setsockopt(int fd, int level, int optname,
char __user *optval, unsigned int optlen)
{
int err;
struct socket *sock = sockfd_lookup(fd, &err);
if (sock) {
err = security_socket_setsockopt(sock, level, optname);
if (err) {
sockfd_put(sock);
return err;
}
if (level == SOL_SOCKET)
err = compat_sock_setsockopt(sock, level,
optname, optval, optlen);
else if (sock->ops->compat_setsockopt)
err = sock->ops->compat_setsockopt(sock, level,
optname, optval, optlen);
else
err = sock->ops->setsockopt(sock, level,
optname, optval, optlen);
sockfd_put(sock);
}
return err;
}
static int do_get_sock_timeout(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen)
{
struct compat_timeval __user *up;
struct timeval ktime;
mm_segment_t old_fs;
int len, err;
up = (struct compat_timeval __user *) optval;
if (get_user(len, optlen))
return -EFAULT;
if (len < sizeof(*up))
return -EINVAL;
len = sizeof(ktime);
old_fs = get_fs();
set_fs(KERNEL_DS);
err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
set_fs(old_fs);
if (!err) {
if (put_user(sizeof(*up), optlen) ||
!access_ok(VERIFY_WRITE, up, sizeof(*up)) ||
__put_user(ktime.tv_sec, &up->tv_sec) ||
__put_user(ktime.tv_usec, &up->tv_usec))
err = -EFAULT;
}
return err;
}
static int compat_sock_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen)
{
if (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO)
return do_get_sock_timeout(sock, level, optname, optval, optlen);
return sock_getsockopt(sock, level, optname, optval, optlen);
}
int compat_sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
{
struct compat_timeval __user *ctv;
int err;
struct timeval tv;
if (COMPAT_USE_64BIT_TIME)
return sock_get_timestamp(sk, userstamp);
ctv = (struct compat_timeval __user *) userstamp;
err = -ENOENT;
if (!sock_flag(sk, SOCK_TIMESTAMP))
sock_enable_timestamp(sk, SOCK_TIMESTAMP);
tv = ktime_to_timeval(sk->sk_stamp);
if (tv.tv_sec == -1)
return err;
if (tv.tv_sec == 0) {
sk->sk_stamp = ktime_get_real();
tv = ktime_to_timeval(sk->sk_stamp);
}
err = 0;
if (put_user(tv.tv_sec, &ctv->tv_sec) ||
put_user(tv.tv_usec, &ctv->tv_usec))
err = -EFAULT;
return err;
}
EXPORT_SYMBOL(compat_sock_get_timestamp);
int compat_sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp)
{
struct compat_timespec __user *ctv;
int err;
struct timespec ts;
if (COMPAT_USE_64BIT_TIME)
return sock_get_timestampns (sk, userstamp);
ctv = (struct compat_timespec __user *) userstamp;
err = -ENOENT;
if (!sock_flag(sk, SOCK_TIMESTAMP))
sock_enable_timestamp(sk, SOCK_TIMESTAMP);
ts = ktime_to_timespec(sk->sk_stamp);
if (ts.tv_sec == -1)
return err;
if (ts.tv_sec == 0) {
sk->sk_stamp = ktime_get_real();
ts = ktime_to_timespec(sk->sk_stamp);
}
err = 0;
if (put_user(ts.tv_sec, &ctv->tv_sec) ||
put_user(ts.tv_nsec, &ctv->tv_nsec))
err = -EFAULT;
return err;
}
EXPORT_SYMBOL(compat_sock_get_timestampns);
asmlinkage long compat_sys_getsockopt(int fd, int level, int optname,
char __user *optval, int __user *optlen)
{
int err;
struct socket *sock = sockfd_lookup(fd, &err);
if (sock) {
err = security_socket_getsockopt(sock, level, optname);
if (err) {
sockfd_put(sock);
return err;
}
if (level == SOL_SOCKET)
err = compat_sock_getsockopt(sock, level,
optname, optval, optlen);
else if (sock->ops->compat_getsockopt)
err = sock->ops->compat_getsockopt(sock, level,
optname, optval, optlen);
else
err = sock->ops->getsockopt(sock, level,
optname, optval, optlen);
sockfd_put(sock);
}
return err;
}
struct compat_group_req {
__u32 gr_interface;
struct __kernel_sockaddr_storage gr_group
__attribute__ ((aligned(4)));
} __packed;
struct compat_group_source_req {
__u32 gsr_interface;
struct __kernel_sockaddr_storage gsr_group
__attribute__ ((aligned(4)));
struct __kernel_sockaddr_storage gsr_source
__attribute__ ((aligned(4)));
} __packed;
struct compat_group_filter {
__u32 gf_interface;
struct __kernel_sockaddr_storage gf_group
__attribute__ ((aligned(4)));
__u32 gf_fmode;
__u32 gf_numsrc;
struct __kernel_sockaddr_storage gf_slist[1]
__attribute__ ((aligned(4)));
} __packed;
#define __COMPAT_GF0_SIZE (sizeof(struct compat_group_filter) - \
sizeof(struct __kernel_sockaddr_storage))
int compat_mc_setsockopt(struct sock *sock, int level, int optname,
char __user *optval, unsigned int optlen,
int (*setsockopt)(struct sock *, int, int, char __user *, unsigned int))
{
char __user *koptval = optval;
int koptlen = optlen;
switch (optname) {
case MCAST_JOIN_GROUP:
case MCAST_LEAVE_GROUP:
{
struct compat_group_req __user *gr32 = (void *)optval;
struct group_req __user *kgr =
compat_alloc_user_space(sizeof(struct group_req));
u32 interface;
if (!access_ok(VERIFY_READ, gr32, sizeof(*gr32)) ||
!access_ok(VERIFY_WRITE, kgr, sizeof(struct group_req)) ||
__get_user(interface, &gr32->gr_interface) ||
__put_user(interface, &kgr->gr_interface) ||
copy_in_user(&kgr->gr_group, &gr32->gr_group,
sizeof(kgr->gr_group)))
return -EFAULT;
koptval = (char __user *)kgr;
koptlen = sizeof(struct group_req);
break;
}
case MCAST_JOIN_SOURCE_GROUP:
case MCAST_LEAVE_SOURCE_GROUP:
case MCAST_BLOCK_SOURCE:
case MCAST_UNBLOCK_SOURCE:
{
struct compat_group_source_req __user *gsr32 = (void *)optval;
struct group_source_req __user *kgsr = compat_alloc_user_space(
sizeof(struct group_source_req));
u32 interface;
if (!access_ok(VERIFY_READ, gsr32, sizeof(*gsr32)) ||
!access_ok(VERIFY_WRITE, kgsr,
sizeof(struct group_source_req)) ||
__get_user(interface, &gsr32->gsr_interface) ||
__put_user(interface, &kgsr->gsr_interface) ||
copy_in_user(&kgsr->gsr_group, &gsr32->gsr_group,
sizeof(kgsr->gsr_group)) ||
copy_in_user(&kgsr->gsr_source, &gsr32->gsr_source,
sizeof(kgsr->gsr_source)))
return -EFAULT;
koptval = (char __user *)kgsr;
koptlen = sizeof(struct group_source_req);
break;
}
case MCAST_MSFILTER:
{
struct compat_group_filter __user *gf32 = (void *)optval;
struct group_filter __user *kgf;
u32 interface, fmode, numsrc;
if (!access_ok(VERIFY_READ, gf32, __COMPAT_GF0_SIZE) ||
__get_user(interface, &gf32->gf_interface) ||
__get_user(fmode, &gf32->gf_fmode) ||
__get_user(numsrc, &gf32->gf_numsrc))
return -EFAULT;
koptlen = optlen + sizeof(struct group_filter) -
sizeof(struct compat_group_filter);
if (koptlen < GROUP_FILTER_SIZE(numsrc))
return -EINVAL;
kgf = compat_alloc_user_space(koptlen);
if (!access_ok(VERIFY_WRITE, kgf, koptlen) ||
__put_user(interface, &kgf->gf_interface) ||
__put_user(fmode, &kgf->gf_fmode) ||
__put_user(numsrc, &kgf->gf_numsrc) ||
copy_in_user(&kgf->gf_group, &gf32->gf_group,
sizeof(kgf->gf_group)) ||
(numsrc && copy_in_user(kgf->gf_slist, gf32->gf_slist,
numsrc * sizeof(kgf->gf_slist[0]))))
return -EFAULT;
koptval = (char __user *)kgf;
break;
}
default:
break;
}
return setsockopt(sock, level, optname, koptval, koptlen);
}
EXPORT_SYMBOL(compat_mc_setsockopt);
int compat_mc_getsockopt(struct sock *sock, int level, int optname,
char __user *optval, int __user *optlen,
int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
{
struct compat_group_filter __user *gf32 = (void *)optval;
struct group_filter __user *kgf;
int __user *koptlen;
u32 interface, fmode, numsrc;
int klen, ulen, err;
if (optname != MCAST_MSFILTER)
return getsockopt(sock, level, optname, optval, optlen);
koptlen = compat_alloc_user_space(sizeof(*koptlen));
if (!access_ok(VERIFY_READ, optlen, sizeof(*optlen)) ||
__get_user(ulen, optlen))
return -EFAULT;
/* adjust len for pad */
klen = ulen + sizeof(*kgf) - sizeof(*gf32);
if (klen < GROUP_FILTER_SIZE(0))
return -EINVAL;
if (!access_ok(VERIFY_WRITE, koptlen, sizeof(*koptlen)) ||
__put_user(klen, koptlen))
return -EFAULT;
/* have to allow space for previous compat_alloc_user_space, too */
kgf = compat_alloc_user_space(klen+sizeof(*optlen));
if (!access_ok(VERIFY_READ, gf32, __COMPAT_GF0_SIZE) ||
__get_user(interface, &gf32->gf_interface) ||
__get_user(fmode, &gf32->gf_fmode) ||
__get_user(numsrc, &gf32->gf_numsrc) ||
__put_user(interface, &kgf->gf_interface) ||
__put_user(fmode, &kgf->gf_fmode) ||
__put_user(numsrc, &kgf->gf_numsrc) ||
copy_in_user(&kgf->gf_group, &gf32->gf_group, sizeof(kgf->gf_group)))
return -EFAULT;
err = getsockopt(sock, level, optname, (char __user *)kgf, koptlen);
if (err)
return err;
if (!access_ok(VERIFY_READ, koptlen, sizeof(*koptlen)) ||
__get_user(klen, koptlen))
return -EFAULT;
ulen = klen - (sizeof(*kgf)-sizeof(*gf32));
if (!access_ok(VERIFY_WRITE, optlen, sizeof(*optlen)) ||
__put_user(ulen, optlen))
return -EFAULT;
if (!access_ok(VERIFY_READ, kgf, klen) ||
!access_ok(VERIFY_WRITE, gf32, ulen) ||
__get_user(interface, &kgf->gf_interface) ||
__get_user(fmode, &kgf->gf_fmode) ||
__get_user(numsrc, &kgf->gf_numsrc) ||
__put_user(interface, &gf32->gf_interface) ||
__put_user(fmode, &gf32->gf_fmode) ||
__put_user(numsrc, &gf32->gf_numsrc))
return -EFAULT;
if (numsrc) {
int copylen;
klen -= GROUP_FILTER_SIZE(0);
copylen = numsrc * sizeof(gf32->gf_slist[0]);
if (copylen > klen)
copylen = klen;
if (copy_in_user(gf32->gf_slist, kgf->gf_slist, copylen))
return -EFAULT;
}
return err;
}
EXPORT_SYMBOL(compat_mc_getsockopt);
/* Argument list sizes for compat_sys_socketcall */
#define AL(x) ((x) * sizeof(u32))
static unsigned char nas[21] = {
AL(0), AL(3), AL(3), AL(3), AL(2), AL(3),
AL(3), AL(3), AL(4), AL(4), AL(4), AL(6),
AL(6), AL(2), AL(5), AL(5), AL(3), AL(3),
AL(4), AL(5), AL(4)
};
#undef AL
asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg, unsigned int flags)
{
if (flags & MSG_CMSG_COMPAT)
return -EINVAL;
return __sys_sendmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
}
asmlinkage long compat_sys_sendmmsg(int fd, struct compat_mmsghdr __user *mmsg,
unsigned int vlen, unsigned int flags)
{
if (flags & MSG_CMSG_COMPAT)
return -EINVAL;
return __sys_sendmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
flags | MSG_CMSG_COMPAT);
}
asmlinkage long compat_sys_recvmsg(int fd, struct compat_msghdr __user *msg, unsigned int flags)
{
if (flags & MSG_CMSG_COMPAT)
return -EINVAL;
return __sys_recvmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
}
asmlinkage long compat_sys_recv(int fd, void __user *buf, size_t len, unsigned int flags)
{
return sys_recv(fd, buf, len, flags | MSG_CMSG_COMPAT);
}
asmlinkage long compat_sys_recvfrom(int fd, void __user *buf, size_t len,
unsigned int flags, struct sockaddr __user *addr,
int __user *addrlen)
{
return sys_recvfrom(fd, buf, len, flags | MSG_CMSG_COMPAT, addr, addrlen);
}
asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg,
unsigned int vlen, unsigned int flags,
struct compat_timespec __user *timeout)
{
int datagrams;
struct timespec ktspec;
if (flags & MSG_CMSG_COMPAT)
return -EINVAL;
if (COMPAT_USE_64BIT_TIME)
return __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
flags | MSG_CMSG_COMPAT,
(struct timespec *) timeout);
if (timeout == NULL)
return __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
flags | MSG_CMSG_COMPAT, NULL);
if (get_compat_timespec(&ktspec, timeout))
return -EFAULT;
datagrams = __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
flags | MSG_CMSG_COMPAT, &ktspec);
if (datagrams > 0 && put_compat_timespec(&ktspec, timeout))
datagrams = -EFAULT;
return datagrams;
}
asmlinkage long compat_sys_socketcall(int call, u32 __user *args)
{
int ret;
u32 a[6];
u32 a0, a1;
if (call < SYS_SOCKET || call > SYS_SENDMMSG)
return -EINVAL;
if (copy_from_user(a, args, nas[call]))
return -EFAULT;
a0 = a[0];
a1 = a[1];
switch (call) {
case SYS_SOCKET:
ret = sys_socket(a0, a1, a[2]);
break;
case SYS_BIND:
ret = sys_bind(a0, compat_ptr(a1), a[2]);
break;
case SYS_CONNECT:
ret = sys_connect(a0, compat_ptr(a1), a[2]);
break;
case SYS_LISTEN:
ret = sys_listen(a0, a1);
break;
case SYS_ACCEPT:
ret = sys_accept4(a0, compat_ptr(a1), compat_ptr(a[2]), 0);
break;
case SYS_GETSOCKNAME:
ret = sys_getsockname(a0, compat_ptr(a1), compat_ptr(a[2]));
break;
case SYS_GETPEERNAME:
ret = sys_getpeername(a0, compat_ptr(a1), compat_ptr(a[2]));
break;
case SYS_SOCKETPAIR:
ret = sys_socketpair(a0, a1, a[2], compat_ptr(a[3]));
break;
case SYS_SEND:
ret = sys_send(a0, compat_ptr(a1), a[2], a[3]);
break;
case SYS_SENDTO:
ret = sys_sendto(a0, compat_ptr(a1), a[2], a[3], compat_ptr(a[4]), a[5]);
break;
case SYS_RECV:
ret = compat_sys_recv(a0, compat_ptr(a1), a[2], a[3]);
break;
case SYS_RECVFROM:
ret = compat_sys_recvfrom(a0, compat_ptr(a1), a[2], a[3],
compat_ptr(a[4]), compat_ptr(a[5]));
break;
case SYS_SHUTDOWN:
ret = sys_shutdown(a0, a1);
break;
case SYS_SETSOCKOPT:
ret = compat_sys_setsockopt(a0, a1, a[2],
compat_ptr(a[3]), a[4]);
break;
case SYS_GETSOCKOPT:
ret = compat_sys_getsockopt(a0, a1, a[2],
compat_ptr(a[3]), compat_ptr(a[4]));
break;
case SYS_SENDMSG:
ret = compat_sys_sendmsg(a0, compat_ptr(a1), a[2]);
break;
case SYS_SENDMMSG:
ret = compat_sys_sendmmsg(a0, compat_ptr(a1), a[2], a[3]);
break;
case SYS_RECVMSG:
ret = compat_sys_recvmsg(a0, compat_ptr(a1), a[2]);
break;
case SYS_RECVMMSG:
ret = compat_sys_recvmmsg(a0, compat_ptr(a1), a[2], a[3],
compat_ptr(a[4]));
break;
case SYS_ACCEPT4:
ret = sys_accept4(a0, compat_ptr(a1), compat_ptr(a[2]), a[3]);
break;
default:
ret = -EINVAL;
break;
}
return ret;
}
| ./CrossVul/dataset_final_sorted/CWE-20/c/bad_2015_0 |
crossvul-cpp_data_bad_763_0 | /*
Copyright 2016 Christian Hoene, Symonics GmbH
*/
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include "reader.h"
/*
*
00000370 42 54 4c 46 00 08 00 5b 01 00 00 00 2d 00 00 07 |BTLF...[....-...|
00000380 00 00 00 f8 ea 72 15 00 c9 03 00 00 00 26 00 00 |.....r.......&..|
00000390 14 00 00 00 32 32 7c 17 00 22 02 00 00 00 32 00 |....22|.."....2.|
000003a0 00 0b 00 00 00 07 ef 9c 26 00 bb 01 00 00 00 46 |........&......F|
000003b0 00 00 09 00 00 00 e5 f6 ba 26 00 45 03 00 00 00 |.........&.E....|
000003c0 34 00 00 11 00 00 00 f6 71 f0 2e 00 a3 02 00 00 |4.......q.......|
000003d0 00 3e 00 00 0d 00 00 00 61 36 dc 36 00 79 03 00 |.>......a6.6.y..|
000003e0 00 00 35 00 00 12 00 00 00 97 1b 4e 45 00 88 01 |..5........NE...|
000003f0 00 00 00 33 00 00 08 00 00 00 56 d7 d0 47 00 ae |...3......V..G..|
00000400 03 00 00 00 1b 00 00 13 00 00 00 2f 03 50 5a 00 |.........../.PZ.|
00000410 22 01 00 00 00 39 00 00 06 00 00 00 b7 88 37 66 |"....9........7f|
00000420 00 01 03 00 00 00 28 00 00 0f 00 00 00 dc aa 47 |......(........G|
00000430 66 00 16 04 00 00 00 2c 00 00 15 00 00 00 6b 54 |f......,......kT|
00000440 7d 77 00 fd 00 00 00 00 25 00 00 05 00 00 00 7d |}w......%......}|
00000450 0c 8c 9e 00 29 03 00 00 00 1c 00 00 10 00 00 00 |....)...........|
00000460 4c f3 0e a0 00 16 00 00 00 00 25 00 00 00 00 00 |L.........%.....|
00000470 00 e7 30 2d ab 00 01 02 00 00 00 21 00 00 0a 00 |..0-.......!....|
00000480 00 00 35 b5 69 b0 00 e1 02 00 00 00 20 00 00 0e |..5.i....... ...|
00000490 00 00 00 2b c5 8b c4 00 3b 00 00 00 00 20 00 00 |...+....;.... ..|
000004a0 01 00 00 00 09 a0 74 cc 00 93 00 00 00 00 2f 00 |......t......./.|
000004b0 00 03 00 00 00 3f 48 ef d6 00 5b 00 00 00 00 38 |.....?H...[....8|
000004c0 00 00 02 00 00 00 f1 7e 7d dd 00 54 02 00 00 00 |.......~}..T....|
000004d0 4f 00 00 0c 00 00 00 48 35 ff f5 00 c2 00 00 00 |O......H5.......|
000004e0 00 3b 00 00 04 00 00 00 ad 61 4e ff 63 42 f7 73 |.;.......aN.cB.s|
000004f0 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................|
*
00000570 42 54 4c 46 00 09 00 16 00 00 00 00 25 00 00 00 |BTLF........%...|
00000580 00 00 00 00 3b 00 00 00 00 20 00 00 01 00 00 00 |....;.... ......|
00000590 00 5b 00 00 00 00 38 00 00 02 00 00 00 00 93 00 |.[....8.........|
000005a0 00 00 00 2f 00 00 03 00 00 00 00 c2 00 00 00 00 |.../............|
000005b0 3b 00 00 04 00 00 00 00 fd 00 00 00 00 25 00 00 |;............%..|
000005c0 05 00 00 00 00 22 01 00 00 00 39 00 00 06 00 00 |....."....9.....|
000005d0 00 00 5b 01 00 00 00 2d 00 00 07 00 00 00 00 88 |..[....-........|
000005e0 01 00 00 00 33 00 00 08 00 00 00 00 bb 01 00 00 |....3...........|
000005f0 00 46 00 00 09 00 00 00 00 01 02 00 00 00 21 00 |.F............!.|
00000600 00 0a 00 00 00 00 22 02 00 00 00 32 00 00 0b 00 |......"....2....|
00000610 00 00 00 54 02 00 00 00 4f 00 00 0c 00 00 00 00 |...T....O.......|
00000620 a3 02 00 00 00 3e 00 00 0d 00 00 00 00 e1 02 00 |.....>..........|
00000630 00 00 20 00 00 0e 00 00 00 00 01 03 00 00 00 28 |.. ............(|
00000640 00 00 0f 00 00 00 00 29 03 00 00 00 1c 00 00 10 |.......)........|
00000650 00 00 00 00 45 03 00 00 00 34 00 00 11 00 00 00 |....E....4......|
00000660 00 79 03 00 00 00 35 00 00 12 00 00 00 00 ae 03 |.y....5.........|
00000670 00 00 00 1b 00 00 13 00 00 00 00 c9 03 00 00 00 |................|
00000680 26 00 00 14 00 00 00 00 16 04 00 00 00 2c 00 00 |&............,..|
00000690 15 00 00 00 d3 c7 19 a0 00 00 00 00 00 00 00 00 |................|
000006a0 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................|
*/
static int readBTLF(struct READER *reader, struct BTREE *btree,
int number_of_records, union RECORD *records) {
int i;
uint8_t type, message_flags;
uint32_t creation_order, hash_of_name;
uint64_t heap_id;
char buf[4];
UNUSED(heap_id);
UNUSED(hash_of_name);
UNUSED(creation_order);
UNUSED(message_flags);
/* read signature */
if (fread(buf, 1, 4, reader->fhd) != 4 || strncmp(buf, "BTLF", 4)) {
log("cannot read signature of BTLF\n");
return MYSOFA_INVALID_FORMAT;
} log("%08lX %.4s\n", (uint64_t )ftell(reader->fhd) - 4, buf);
if (fgetc(reader->fhd) != 0) {
log("object BTLF must have version 0\n");
return MYSOFA_INVALID_FORMAT;
}
type = (uint8_t)fgetc(reader->fhd);
for (i = 0; i < number_of_records; i++) {
switch (type) {
case 5:
records->type5.hash_of_name = (uint32_t)readValue(reader, 4);
records->type5.heap_id = readValue(reader, 7);
log(" type5 %08X %14lX\n", records->type5.hash_of_name,
records->type5.heap_id);
records++;
break;
case 6:
/*creation_order = */readValue(reader, 8);
/*heap_id = */readValue(reader, 7);
break;
case 8:
/*heap_id = */readValue(reader, 8);
/*message_flags = */fgetc(reader->fhd);
/*creation_order = */readValue(reader, 4);
/*hash_of_name = */readValue(reader, 4);
break;
case 9:
/*heap_id = */readValue(reader, 8);
/*message_flags = */fgetc(reader->fhd);
/*creation_order = */readValue(reader, 4);
break;
default:
log("object BTLF has unknown type %d\n", type);
return MYSOFA_INVALID_FORMAT;
}
}
/* fseeko(reader->fhd, bthd->root_node_address + bthd->node_size, SEEK_SET); skip checksum */
return MYSOFA_OK;
}
/* III.A.2. Disk Format: Level 1A2 - Version 2 B-trees
000002d0 32 1d 42 54 48 44 00 08 00 02 00 00 11 00 00 00 |2.BTHD..........|
000002e0 64 28 70 03 00 00 00 00 00 00 16 00 16 00 00 00 |d(p.............|
000002f0 00 00 00 00 30 12 d9 6e 42 54 48 44 00 09 00 02 |....0..nBTHD....|
00000300 00 00 0d 00 00 00 64 28 70 05 00 00 00 00 00 00 |......d(p.......|
00000310 16 00 16 00 00 00 00 00 00 00 e2 0d 76 5c 46 53 |............v\FS|
*/
int btreeRead(struct READER *reader, struct BTREE *btree) {
char buf[4];
/* read signature */
if (fread(buf, 1, 4, reader->fhd) != 4 || strncmp(buf, "BTHD", 4)) {
log("cannot read signature of BTHD\n");
return MYSOFA_INVALID_FORMAT;
} log("%08lX %.4s\n", (uint64_t )ftell(reader->fhd) - 4, buf);
if (fgetc(reader->fhd) != 0) {
log("object BTHD must have version 0\n");
return MYSOFA_INVALID_FORMAT;
}
btree->type = (uint8_t)fgetc(reader->fhd);
btree->node_size = (uint32_t)readValue(reader, 4);
btree->record_size = (uint16_t)readValue(reader, 2);
btree->depth = (uint16_t)readValue(reader, 2);
btree->split_percent = (uint8_t)fgetc(reader->fhd);
btree->merge_percent = (uint8_t)fgetc(reader->fhd);
btree->root_node_address = (uint64_t)readValue(reader,
reader->superblock.size_of_offsets);
btree->number_of_records = (uint16_t)readValue(reader, 2);
if(btree->number_of_records>0x1000)
return MYSOFA_UNSUPPORTED_FORMAT;
btree->total_number = (uint64_t)readValue(reader, reader->superblock.size_of_lengths);
/* fseek(reader->fhd, 4, SEEK_CUR); skip checksum */
if(btree->total_number > 0x10000000)
return MYSOFA_NO_MEMORY;
btree->records = malloc(sizeof(btree->records[0]) * btree->total_number);
if (!btree->records)
return MYSOFA_NO_MEMORY;
memset(btree->records, 0, sizeof(btree->records[0]) * btree->total_number);
/* read records */
if(fseek(reader->fhd, btree->root_node_address, SEEK_SET)<0)
return errno;
return readBTLF(reader, btree, btree->number_of_records, btree->records);
}
void btreeFree(struct BTREE *btree) {
free(btree->records);
}
/* III.A.1. Disk Format: Level 1A1 - Version 1 B-trees
*
*/
int treeRead(struct READER *reader, struct DATAOBJECT *data) {
int i, j, err, olen, elements, size, x, y, z, b, e, dy, dz, sx, sy, sz, dzy,
szy;
char *input, *output;
uint8_t node_type, node_level;
uint16_t entries_used;
uint32_t size_of_chunk;
uint32_t filter_mask;
uint64_t address_of_left_sibling, address_of_right_sibling, start[4],
child_pointer, key, store;
char buf[4];
UNUSED(node_level);
UNUSED(address_of_right_sibling);
UNUSED(address_of_left_sibling);
UNUSED(key);
if (data->ds.dimensionality > 3) {
log("TREE dimensions > 3");
return MYSOFA_INVALID_FORMAT;
}
/* read signature */
if (fread(buf, 1, 4, reader->fhd) != 4 || strncmp(buf, "TREE", 4)) {
log("cannot read signature of TREE\n");
return MYSOFA_INVALID_FORMAT;
} log("%08lX %.4s\n", (uint64_t )ftell(reader->fhd) - 4, buf);
node_type = (uint8_t)fgetc(reader->fhd);
node_level = (uint8_t)fgetc(reader->fhd);
entries_used = (uint16_t)readValue(reader, 2);
if(entries_used>0x1000)
return MYSOFA_UNSUPPORTED_FORMAT;
address_of_left_sibling = readValue(reader,
reader->superblock.size_of_offsets);
address_of_right_sibling = readValue(reader,
reader->superblock.size_of_offsets);
elements = 1;
for (j = 0; j < data->ds.dimensionality; j++)
elements *= data->datalayout_chunk[j];
dy = data->datalayout_chunk[1];
dz = data->datalayout_chunk[2];
sx = data->ds.dimension_size[0];
sy = data->ds.dimension_size[1];
sz = data->ds.dimension_size[2];
dzy = dz * dy;
szy = sz * sy;
size = data->datalayout_chunk[data->ds.dimensionality];
log("elements %d size %d\n",elements,size);
if (!(output = malloc(elements * size))) {
return MYSOFA_NO_MEMORY;
}
for (e = 0; e < entries_used * 2; e++) {
if (node_type == 0) {
key = readValue(reader, reader->superblock.size_of_lengths);
} else {
size_of_chunk = (uint32_t)readValue(reader, 4);
filter_mask = (uint32_t)readValue(reader, 4);
if (filter_mask) {
log("TREE all filters must be enabled\n");
free(output);
return MYSOFA_INVALID_FORMAT;
}
for (j = 0; j < data->ds.dimensionality; j++) {
start[j] = readValue(reader, 8);
log("start %d %lu\n",j,start[j]);
}
if (readValue(reader, 8)) {
break;
}
child_pointer = readValue(reader,
reader->superblock.size_of_offsets);
log(" data at %lX len %u\n", child_pointer, size_of_chunk);
/* read data */
store = ftell(reader->fhd);
if (fseek(reader->fhd, child_pointer, SEEK_SET)<0) {
free(output);
return errno;
}
if (!(input = malloc(size_of_chunk))) {
free(output);
return MYSOFA_NO_MEMORY;
}
if (fread(input, 1, size_of_chunk, reader->fhd) != size_of_chunk) {
free(output);
free(input);
return MYSOFA_INVALID_FORMAT;
}
olen = elements * size;
err = gunzip(size_of_chunk, input, &olen, output);
free(input);
log(" gunzip %d %d %d\n",err, olen, elements*size);
if (err || olen != elements * size) {
free(output);
return MYSOFA_INVALID_FORMAT;
}
switch (data->ds.dimensionality) {
case 1:
for (i = 0; i < olen; i++) {
b = i / elements;
x = i % elements + start[0];
if (x < sx) {
j = x * size + b;
((char*)data->data)[j] = output[i];
}
}
break;
case 2:
for (i = 0; i < olen; i++) {
b = i / elements;
x = i % elements;
y = x % dy + start[1];
x = x / dy + start[0];
if (y < sy && x < sx) {
j = ((x * sy + y) * size) + b;
((char*)data->data)[j] = output[i];
}
}
break;
case 3:
for (i = 0; i < olen; i++) {
b = i / elements;
x = i % elements;
z = x % dz + start[2];
y = (x / dz) % dy + start[1];
x = (x / dzy) + start[0];
if (z < sz && y < sy && x < sx) {
j = (x * szy + y * sz + z) * size + b;
((char*)data->data)[j] = output[i];
}
}
break;
default:
log("invalid dim\n");
return MYSOFA_INTERNAL_ERROR;
}
if(fseek(reader->fhd, store, SEEK_SET)<0) {
free(output);
return errno;
}
}
}
free(output);
if(fseek(reader->fhd, 4, SEEK_CUR)<0) /* skip checksum */
return errno;
return MYSOFA_OK;
}
| ./CrossVul/dataset_final_sorted/CWE-20/c/bad_763_0 |
crossvul-cpp_data_bad_4900_0 | /* vi:set ts=8 sts=4 sw=4 noet:
*
* VIM - Vi IMproved by Bram Moolenaar
*
* Do ":help uganda" in Vim to read copying and usage conditions.
* Do ":help credits" in Vim to see a list of people who contributed.
* See README.txt for an overview of the Vim source code.
*/
/*
* Code to handle user-settable options. This is all pretty much table-
* driven. Checklist for adding a new option:
* - Put it in the options array below (copy an existing entry).
* - For a global option: Add a variable for it in option.h.
* - For a buffer or window local option:
* - Add a PV_XX entry to the enum below.
* - Add a variable to the window or buffer struct in structs.h.
* - For a window option, add some code to copy_winopt().
* - For a buffer option, add some code to buf_copy_options().
* - For a buffer string option, add code to check_buf_options().
* - If it's a numeric option, add any necessary bounds checks to do_set().
* - If it's a list of flags, add some code in do_set(), search for WW_ALL.
* - When adding an option with expansion (P_EXPAND), but with a different
* default for Vi and Vim (no P_VI_DEF), add some code at VIMEXP.
* - Add documentation! One line in doc/quickref.txt, full description in
* options.txt, and any other related places.
* - Add an entry in runtime/optwin.vim.
* When making changes:
* - Adjust the help for the option in doc/option.txt.
* - When an entry has the P_VIM flag, or is lacking the P_VI_DEF flag, add a
* comment at the help for the 'compatible' option.
*/
#define IN_OPTION_C
#include "vim.h"
/*
* The options that are local to a window or buffer have "indir" set to one of
* these values. Special values:
* PV_NONE: global option.
* PV_WIN is added: window-local option
* PV_BUF is added: buffer-local option
* PV_BOTH is added: global option which also has a local value.
*/
#define PV_BOTH 0x1000
#define PV_WIN 0x2000
#define PV_BUF 0x4000
#define PV_MASK 0x0fff
#define OPT_WIN(x) (idopt_T)(PV_WIN + (int)(x))
#define OPT_BUF(x) (idopt_T)(PV_BUF + (int)(x))
#define OPT_BOTH(x) (idopt_T)(PV_BOTH + (int)(x))
/*
* Definition of the PV_ values for buffer-local options.
* The BV_ values are defined in option.h.
*/
#define PV_AI OPT_BUF(BV_AI)
#define PV_AR OPT_BOTH(OPT_BUF(BV_AR))
#define PV_BKC OPT_BOTH(OPT_BUF(BV_BKC))
#ifdef FEAT_QUICKFIX
# define PV_BH OPT_BUF(BV_BH)
# define PV_BT OPT_BUF(BV_BT)
# define PV_EFM OPT_BOTH(OPT_BUF(BV_EFM))
# define PV_GP OPT_BOTH(OPT_BUF(BV_GP))
# define PV_MP OPT_BOTH(OPT_BUF(BV_MP))
#endif
#define PV_BIN OPT_BUF(BV_BIN)
#define PV_BL OPT_BUF(BV_BL)
#ifdef FEAT_MBYTE
# define PV_BOMB OPT_BUF(BV_BOMB)
#endif
#define PV_CI OPT_BUF(BV_CI)
#ifdef FEAT_CINDENT
# define PV_CIN OPT_BUF(BV_CIN)
# define PV_CINK OPT_BUF(BV_CINK)
# define PV_CINO OPT_BUF(BV_CINO)
#endif
#if defined(FEAT_SMARTINDENT) || defined(FEAT_CINDENT)
# define PV_CINW OPT_BUF(BV_CINW)
#endif
#define PV_CM OPT_BOTH(OPT_BUF(BV_CM))
#ifdef FEAT_FOLDING
# define PV_CMS OPT_BUF(BV_CMS)
#endif
#ifdef FEAT_COMMENTS
# define PV_COM OPT_BUF(BV_COM)
#endif
#ifdef FEAT_INS_EXPAND
# define PV_CPT OPT_BUF(BV_CPT)
# define PV_DICT OPT_BOTH(OPT_BUF(BV_DICT))
# define PV_TSR OPT_BOTH(OPT_BUF(BV_TSR))
#endif
#ifdef FEAT_COMPL_FUNC
# define PV_CFU OPT_BUF(BV_CFU)
#endif
#ifdef FEAT_FIND_ID
# define PV_DEF OPT_BOTH(OPT_BUF(BV_DEF))
# define PV_INC OPT_BOTH(OPT_BUF(BV_INC))
#endif
#define PV_EOL OPT_BUF(BV_EOL)
#define PV_FIXEOL OPT_BUF(BV_FIXEOL)
#define PV_EP OPT_BOTH(OPT_BUF(BV_EP))
#define PV_ET OPT_BUF(BV_ET)
#ifdef FEAT_MBYTE
# define PV_FENC OPT_BUF(BV_FENC)
#endif
#if defined(FEAT_BEVAL) && defined(FEAT_EVAL)
# define PV_BEXPR OPT_BOTH(OPT_BUF(BV_BEXPR))
#endif
#ifdef FEAT_EVAL
# define PV_FEX OPT_BUF(BV_FEX)
#endif
#define PV_FF OPT_BUF(BV_FF)
#define PV_FLP OPT_BUF(BV_FLP)
#define PV_FO OPT_BUF(BV_FO)
#ifdef FEAT_AUTOCMD
# define PV_FT OPT_BUF(BV_FT)
#endif
#define PV_IMI OPT_BUF(BV_IMI)
#define PV_IMS OPT_BUF(BV_IMS)
#if defined(FEAT_CINDENT) && defined(FEAT_EVAL)
# define PV_INDE OPT_BUF(BV_INDE)
# define PV_INDK OPT_BUF(BV_INDK)
#endif
#if defined(FEAT_FIND_ID) && defined(FEAT_EVAL)
# define PV_INEX OPT_BUF(BV_INEX)
#endif
#define PV_INF OPT_BUF(BV_INF)
#define PV_ISK OPT_BUF(BV_ISK)
#ifdef FEAT_CRYPT
# define PV_KEY OPT_BUF(BV_KEY)
#endif
#ifdef FEAT_KEYMAP
# define PV_KMAP OPT_BUF(BV_KMAP)
#endif
#define PV_KP OPT_BOTH(OPT_BUF(BV_KP))
#ifdef FEAT_LISP
# define PV_LISP OPT_BUF(BV_LISP)
# define PV_LW OPT_BOTH(OPT_BUF(BV_LW))
#endif
#define PV_MA OPT_BUF(BV_MA)
#define PV_ML OPT_BUF(BV_ML)
#define PV_MOD OPT_BUF(BV_MOD)
#define PV_MPS OPT_BUF(BV_MPS)
#define PV_NF OPT_BUF(BV_NF)
#ifdef FEAT_COMPL_FUNC
# define PV_OFU OPT_BUF(BV_OFU)
#endif
#define PV_PATH OPT_BOTH(OPT_BUF(BV_PATH))
#define PV_PI OPT_BUF(BV_PI)
#ifdef FEAT_TEXTOBJ
# define PV_QE OPT_BUF(BV_QE)
#endif
#define PV_RO OPT_BUF(BV_RO)
#ifdef FEAT_SMARTINDENT
# define PV_SI OPT_BUF(BV_SI)
#endif
#define PV_SN OPT_BUF(BV_SN)
#ifdef FEAT_SYN_HL
# define PV_SMC OPT_BUF(BV_SMC)
# define PV_SYN OPT_BUF(BV_SYN)
#endif
#ifdef FEAT_SPELL
# define PV_SPC OPT_BUF(BV_SPC)
# define PV_SPF OPT_BUF(BV_SPF)
# define PV_SPL OPT_BUF(BV_SPL)
#endif
#define PV_STS OPT_BUF(BV_STS)
#ifdef FEAT_SEARCHPATH
# define PV_SUA OPT_BUF(BV_SUA)
#endif
#define PV_SW OPT_BUF(BV_SW)
#define PV_SWF OPT_BUF(BV_SWF)
#define PV_TAGS OPT_BOTH(OPT_BUF(BV_TAGS))
#define PV_TC OPT_BOTH(OPT_BUF(BV_TC))
#define PV_TS OPT_BUF(BV_TS)
#define PV_TW OPT_BUF(BV_TW)
#define PV_TX OPT_BUF(BV_TX)
#ifdef FEAT_PERSISTENT_UNDO
# define PV_UDF OPT_BUF(BV_UDF)
#endif
#define PV_WM OPT_BUF(BV_WM)
/*
* Definition of the PV_ values for window-local options.
* The WV_ values are defined in option.h.
*/
#define PV_LIST OPT_WIN(WV_LIST)
#ifdef FEAT_ARABIC
# define PV_ARAB OPT_WIN(WV_ARAB)
#endif
#ifdef FEAT_LINEBREAK
# define PV_BRI OPT_WIN(WV_BRI)
# define PV_BRIOPT OPT_WIN(WV_BRIOPT)
#endif
#ifdef FEAT_DIFF
# define PV_DIFF OPT_WIN(WV_DIFF)
#endif
#ifdef FEAT_FOLDING
# define PV_FDC OPT_WIN(WV_FDC)
# define PV_FEN OPT_WIN(WV_FEN)
# define PV_FDI OPT_WIN(WV_FDI)
# define PV_FDL OPT_WIN(WV_FDL)
# define PV_FDM OPT_WIN(WV_FDM)
# define PV_FML OPT_WIN(WV_FML)
# define PV_FDN OPT_WIN(WV_FDN)
# ifdef FEAT_EVAL
# define PV_FDE OPT_WIN(WV_FDE)
# define PV_FDT OPT_WIN(WV_FDT)
# endif
# define PV_FMR OPT_WIN(WV_FMR)
#endif
#ifdef FEAT_LINEBREAK
# define PV_LBR OPT_WIN(WV_LBR)
#endif
#define PV_NU OPT_WIN(WV_NU)
#define PV_RNU OPT_WIN(WV_RNU)
#ifdef FEAT_LINEBREAK
# define PV_NUW OPT_WIN(WV_NUW)
#endif
#if defined(FEAT_WINDOWS) && defined(FEAT_QUICKFIX)
# define PV_PVW OPT_WIN(WV_PVW)
#endif
#ifdef FEAT_RIGHTLEFT
# define PV_RL OPT_WIN(WV_RL)
# define PV_RLC OPT_WIN(WV_RLC)
#endif
#ifdef FEAT_SCROLLBIND
# define PV_SCBIND OPT_WIN(WV_SCBIND)
#endif
#define PV_SCROLL OPT_WIN(WV_SCROLL)
#ifdef FEAT_SPELL
# define PV_SPELL OPT_WIN(WV_SPELL)
#endif
#ifdef FEAT_SYN_HL
# define PV_CUC OPT_WIN(WV_CUC)
# define PV_CUL OPT_WIN(WV_CUL)
# define PV_CC OPT_WIN(WV_CC)
#endif
#ifdef FEAT_STL_OPT
# define PV_STL OPT_BOTH(OPT_WIN(WV_STL))
#endif
#define PV_UL OPT_BOTH(OPT_BUF(BV_UL))
#ifdef FEAT_WINDOWS
# define PV_WFH OPT_WIN(WV_WFH)
# define PV_WFW OPT_WIN(WV_WFW)
#endif
#define PV_WRAP OPT_WIN(WV_WRAP)
#ifdef FEAT_CURSORBIND
# define PV_CRBIND OPT_WIN(WV_CRBIND)
#endif
#ifdef FEAT_CONCEAL
# define PV_COCU OPT_WIN(WV_COCU)
# define PV_COLE OPT_WIN(WV_COLE)
#endif
#ifdef FEAT_SIGNS
# define PV_SCL OPT_WIN(WV_SCL)
#endif
/* WV_ and BV_ values get typecasted to this for the "indir" field */
typedef enum
{
PV_NONE = 0,
PV_MAXVAL = 0xffff /* to avoid warnings for value out of range */
} idopt_T;
/*
* Options local to a window have a value local to a buffer and global to all
* buffers. Indicate this by setting "var" to VAR_WIN.
*/
#define VAR_WIN ((char_u *)-1)
/*
* These are the global values for options which are also local to a buffer.
* Only to be used in option.c!
*/
static int p_ai;
static int p_bin;
#ifdef FEAT_MBYTE
static int p_bomb;
#endif
#if defined(FEAT_QUICKFIX)
static char_u *p_bh;
static char_u *p_bt;
#endif
static int p_bl;
static int p_ci;
#ifdef FEAT_CINDENT
static int p_cin;
static char_u *p_cink;
static char_u *p_cino;
#endif
#if defined(FEAT_SMARTINDENT) || defined(FEAT_CINDENT)
static char_u *p_cinw;
#endif
#ifdef FEAT_COMMENTS
static char_u *p_com;
#endif
#ifdef FEAT_FOLDING
static char_u *p_cms;
#endif
#ifdef FEAT_INS_EXPAND
static char_u *p_cpt;
#endif
#ifdef FEAT_COMPL_FUNC
static char_u *p_cfu;
static char_u *p_ofu;
#endif
static int p_eol;
static int p_fixeol;
static int p_et;
#ifdef FEAT_MBYTE
static char_u *p_fenc;
#endif
static char_u *p_ff;
static char_u *p_fo;
static char_u *p_flp;
#ifdef FEAT_AUTOCMD
static char_u *p_ft;
#endif
static long p_iminsert;
static long p_imsearch;
#if defined(FEAT_FIND_ID) && defined(FEAT_EVAL)
static char_u *p_inex;
#endif
#if defined(FEAT_CINDENT) && defined(FEAT_EVAL)
static char_u *p_inde;
static char_u *p_indk;
#endif
#if defined(FEAT_EVAL)
static char_u *p_fex;
#endif
static int p_inf;
static char_u *p_isk;
#ifdef FEAT_CRYPT
static char_u *p_key;
#endif
#ifdef FEAT_LISP
static int p_lisp;
#endif
static int p_ml;
static int p_ma;
static int p_mod;
static char_u *p_mps;
static char_u *p_nf;
static int p_pi;
#ifdef FEAT_TEXTOBJ
static char_u *p_qe;
#endif
static int p_ro;
#ifdef FEAT_SMARTINDENT
static int p_si;
#endif
static int p_sn;
static long p_sts;
#if defined(FEAT_SEARCHPATH)
static char_u *p_sua;
#endif
static long p_sw;
static int p_swf;
#ifdef FEAT_SYN_HL
static long p_smc;
static char_u *p_syn;
#endif
#ifdef FEAT_SPELL
static char_u *p_spc;
static char_u *p_spf;
static char_u *p_spl;
#endif
static long p_ts;
static long p_tw;
static int p_tx;
#ifdef FEAT_PERSISTENT_UNDO
static int p_udf;
#endif
static long p_wm;
#ifdef FEAT_KEYMAP
static char_u *p_keymap;
#endif
/* Saved values for when 'bin' is set. */
static int p_et_nobin;
static int p_ml_nobin;
static long p_tw_nobin;
static long p_wm_nobin;
/* Saved values for when 'paste' is set */
static int p_ai_nopaste;
static int p_et_nopaste;
static long p_sts_nopaste;
static long p_tw_nopaste;
static long p_wm_nopaste;
struct vimoption
{
char *fullname; /* full option name */
char *shortname; /* permissible abbreviation */
long_u flags; /* see below */
char_u *var; /* global option: pointer to variable;
* window-local option: VAR_WIN;
* buffer-local option: global value */
idopt_T indir; /* global option: PV_NONE;
* local option: indirect option index */
char_u *def_val[2]; /* default values for variable (vi and vim) */
#ifdef FEAT_EVAL
scid_T scriptID; /* script in which the option was last set */
# define SCRIPTID_INIT , 0
#else
# define SCRIPTID_INIT
#endif
};
#define VI_DEFAULT 0 /* def_val[VI_DEFAULT] is Vi default value */
#define VIM_DEFAULT 1 /* def_val[VIM_DEFAULT] is Vim default value */
/*
* Flags
*/
#define P_BOOL 0x01 /* the option is boolean */
#define P_NUM 0x02 /* the option is numeric */
#define P_STRING 0x04 /* the option is a string */
#define P_ALLOCED 0x08 /* the string option is in allocated memory,
must use free_string_option() when
assigning new value. Not set if default is
the same. */
#define P_EXPAND 0x10 /* environment expansion. NOTE: P_EXPAND can
never be used for local or hidden options! */
#define P_NODEFAULT 0x40 /* don't set to default value */
#define P_DEF_ALLOCED 0x80 /* default value is in allocated memory, must
use vim_free() when assigning new value */
#define P_WAS_SET 0x100 /* option has been set/reset */
#define P_NO_MKRC 0x200 /* don't include in :mkvimrc output */
#define P_VI_DEF 0x400 /* Use Vi default for Vim */
#define P_VIM 0x800 /* Vim option, reset when 'cp' set */
/* when option changed, what to display: */
#define P_RSTAT 0x1000 /* redraw status lines */
#define P_RWIN 0x2000 /* redraw current window */
#define P_RBUF 0x4000 /* redraw current buffer */
#define P_RALL 0x6000 /* redraw all windows */
#define P_RCLR 0x7000 /* clear and redraw all */
#define P_COMMA 0x8000 /* comma separated list */
#define P_ONECOMMA 0x18000L /* P_COMMA and cannot have two consecutive
* commas */
#define P_NODUP 0x20000L /* don't allow duplicate strings */
#define P_FLAGLIST 0x40000L /* list of single-char flags */
#define P_SECURE 0x80000L /* cannot change in modeline or secure mode */
#define P_GETTEXT 0x100000L /* expand default value with _() */
#define P_NOGLOB 0x200000L /* do not use local value for global vimrc */
#define P_NFNAME 0x400000L /* only normal file name chars allowed */
#define P_INSECURE 0x800000L /* option was set from a modeline */
#define P_PRI_MKRC 0x1000000L /* priority for :mkvimrc (setting option has
side effects) */
#define P_NO_ML 0x2000000L /* not allowed in modeline */
#define P_CURSWANT 0x4000000L /* update curswant required; not needed when
* there is a redraw flag */
#define ISK_LATIN1 (char_u *)"@,48-57,_,192-255"
/* 'isprint' for latin1 is also used for MS-Windows cp1252, where 0x80 is used
* for the currency sign. */
#if defined(MSWIN)
# define ISP_LATIN1 (char_u *)"@,~-255"
#else
# define ISP_LATIN1 (char_u *)"@,161-255"
#endif
/* Make the string as short as possible when compiling with few features. */
#if defined(FEAT_DIFF) || defined(FEAT_FOLDING) || defined(FEAT_SPELL) \
|| defined(FEAT_WINDOWS) || defined(FEAT_CLIPBOARD) \
|| defined(FEAT_INS_EXPAND) || defined(FEAT_SYN_HL) || defined(FEAT_CONCEAL)
# define HIGHLIGHT_INIT "8:SpecialKey,~:EndOfBuffer,@:NonText,d:Directory,e:ErrorMsg,i:IncSearch,l:Search,m:MoreMsg,M:ModeMsg,n:LineNr,N:CursorLineNr,r:Question,s:StatusLine,S:StatusLineNC,c:VertSplit,t:Title,v:Visual,V:VisualNOS,w:WarningMsg,W:WildMenu,f:Folded,F:FoldColumn,A:DiffAdd,C:DiffChange,D:DiffDelete,T:DiffText,>:SignColumn,-:Conceal,B:SpellBad,P:SpellCap,R:SpellRare,L:SpellLocal,+:Pmenu,=:PmenuSel,x:PmenuSbar,X:PmenuThumb,*:TabLine,#:TabLineSel,_:TabLineFill,!:CursorColumn,.:CursorLine,o:ColorColumn"
#else
# define HIGHLIGHT_INIT "8:SpecialKey,@:NonText,d:Directory,e:ErrorMsg,i:IncSearch,l:Search,m:MoreMsg,M:ModeMsg,n:LineNr,N:CursorLineNr,r:Question,s:StatusLine,S:StatusLineNC,t:Title,v:Visual,w:WarningMsg,W:WildMenu,>:SignColumn,*:TabLine,#:TabLineSel,_:TabLineFill"
#endif
/*
* options[] is initialized here.
* The order of the options MUST be alphabetic for ":set all" and findoption().
* All option names MUST start with a lowercase letter (for findoption()).
* Exception: "t_" options are at the end.
* The options with a NULL variable are 'hidden': a set command for them is
* ignored and they are not printed.
*/
static struct vimoption options[] =
{
{"aleph", "al", P_NUM|P_VI_DEF|P_CURSWANT,
#ifdef FEAT_RIGHTLEFT
(char_u *)&p_aleph, PV_NONE,
#else
(char_u *)NULL, PV_NONE,
#endif
{
#if (defined(WIN3264)) && !defined(FEAT_GUI_W32)
(char_u *)128L,
#else
(char_u *)224L,
#endif
(char_u *)0L} SCRIPTID_INIT},
{"antialias", "anti", P_BOOL|P_VI_DEF|P_VIM|P_RCLR,
#if defined(FEAT_GUI) && defined(MACOS_X)
(char_u *)&p_antialias, PV_NONE,
{(char_u *)FALSE, (char_u *)FALSE}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)FALSE, (char_u *)FALSE}
#endif
SCRIPTID_INIT},
{"arabic", "arab", P_BOOL|P_VI_DEF|P_VIM|P_CURSWANT,
#ifdef FEAT_ARABIC
(char_u *)VAR_WIN, PV_ARAB,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"arabicshape", "arshape", P_BOOL|P_VI_DEF|P_VIM|P_RCLR,
#ifdef FEAT_ARABIC
(char_u *)&p_arshape, PV_NONE,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)TRUE, (char_u *)0L} SCRIPTID_INIT},
{"allowrevins", "ari", P_BOOL|P_VI_DEF|P_VIM,
#ifdef FEAT_RIGHTLEFT
(char_u *)&p_ari, PV_NONE,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"altkeymap", "akm", P_BOOL|P_VI_DEF,
#ifdef FEAT_FKMAP
(char_u *)&p_altkeymap, PV_NONE,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"ambiwidth", "ambw", P_STRING|P_VI_DEF|P_RCLR,
#if defined(FEAT_MBYTE)
(char_u *)&p_ambw, PV_NONE,
{(char_u *)"single", (char_u *)0L}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)0L, (char_u *)0L}
#endif
SCRIPTID_INIT},
#ifdef FEAT_AUTOCHDIR
{"autochdir", "acd", P_BOOL|P_VI_DEF,
(char_u *)&p_acd, PV_NONE,
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
#endif
{"autoindent", "ai", P_BOOL|P_VI_DEF,
(char_u *)&p_ai, PV_AI,
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"autoprint", "ap", P_BOOL|P_VI_DEF,
(char_u *)NULL, PV_NONE,
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"autoread", "ar", P_BOOL|P_VI_DEF,
(char_u *)&p_ar, PV_AR,
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"autowrite", "aw", P_BOOL|P_VI_DEF,
(char_u *)&p_aw, PV_NONE,
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"autowriteall","awa", P_BOOL|P_VI_DEF,
(char_u *)&p_awa, PV_NONE,
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"background", "bg", P_STRING|P_VI_DEF|P_RCLR,
(char_u *)&p_bg, PV_NONE,
{
#if (defined(WIN3264)) && !defined(FEAT_GUI)
(char_u *)"dark",
#else
(char_u *)"light",
#endif
(char_u *)0L} SCRIPTID_INIT},
{"backspace", "bs", P_STRING|P_VI_DEF|P_VIM|P_ONECOMMA|P_NODUP,
(char_u *)&p_bs, PV_NONE,
{(char_u *)"", (char_u *)0L} SCRIPTID_INIT},
{"backup", "bk", P_BOOL|P_VI_DEF|P_VIM,
(char_u *)&p_bk, PV_NONE,
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"backupcopy", "bkc", P_STRING|P_VIM|P_ONECOMMA|P_NODUP,
(char_u *)&p_bkc, PV_BKC,
#ifdef UNIX
{(char_u *)"yes", (char_u *)"auto"}
#else
{(char_u *)"auto", (char_u *)"auto"}
#endif
SCRIPTID_INIT},
{"backupdir", "bdir", P_STRING|P_EXPAND|P_VI_DEF|P_ONECOMMA
|P_NODUP|P_SECURE,
(char_u *)&p_bdir, PV_NONE,
{(char_u *)DFLT_BDIR, (char_u *)0L} SCRIPTID_INIT},
{"backupext", "bex", P_STRING|P_VI_DEF|P_NFNAME,
(char_u *)&p_bex, PV_NONE,
{
#ifdef VMS
(char_u *)"_",
#else
(char_u *)"~",
#endif
(char_u *)0L} SCRIPTID_INIT},
{"backupskip", "bsk", P_STRING|P_VI_DEF|P_ONECOMMA,
#ifdef FEAT_WILDIGN
(char_u *)&p_bsk, PV_NONE,
{(char_u *)"", (char_u *)0L}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)0L, (char_u *)0L}
#endif
SCRIPTID_INIT},
#ifdef FEAT_BEVAL
{"balloondelay","bdlay",P_NUM|P_VI_DEF,
(char_u *)&p_bdlay, PV_NONE,
{(char_u *)600L, (char_u *)0L} SCRIPTID_INIT},
{"ballooneval", "beval",P_BOOL|P_VI_DEF|P_NO_MKRC,
(char_u *)&p_beval, PV_NONE,
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
# ifdef FEAT_EVAL
{"balloonexpr", "bexpr", P_STRING|P_ALLOCED|P_VI_DEF|P_VIM,
(char_u *)&p_bexpr, PV_BEXPR,
{(char_u *)"", (char_u *)0L} SCRIPTID_INIT},
# endif
#endif
{"beautify", "bf", P_BOOL|P_VI_DEF,
(char_u *)NULL, PV_NONE,
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"belloff", "bo", P_STRING|P_VI_DEF|P_COMMA|P_NODUP,
(char_u *)&p_bo, PV_NONE,
{(char_u *)"", (char_u *)0L} SCRIPTID_INIT},
{"binary", "bin", P_BOOL|P_VI_DEF|P_RSTAT,
(char_u *)&p_bin, PV_BIN,
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"bioskey", "biosk",P_BOOL|P_VI_DEF,
(char_u *)NULL, PV_NONE,
{(char_u *)TRUE, (char_u *)0L} SCRIPTID_INIT},
{"bomb", NULL, P_BOOL|P_NO_MKRC|P_VI_DEF|P_RSTAT,
#ifdef FEAT_MBYTE
(char_u *)&p_bomb, PV_BOMB,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"breakat", "brk", P_STRING|P_VI_DEF|P_RALL|P_FLAGLIST,
#ifdef FEAT_LINEBREAK
(char_u *)&p_breakat, PV_NONE,
{(char_u *)" \t!@*-+;:,./?", (char_u *)0L}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)0L, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"breakindent", "bri", P_BOOL|P_VI_DEF|P_VIM|P_RWIN,
#ifdef FEAT_LINEBREAK
(char_u *)VAR_WIN, PV_BRI,
{(char_u *)FALSE, (char_u *)0L}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)0L, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"breakindentopt", "briopt", P_STRING|P_ALLOCED|P_VI_DEF|P_RBUF
|P_ONECOMMA|P_NODUP,
#ifdef FEAT_LINEBREAK
(char_u *)VAR_WIN, PV_BRIOPT,
{(char_u *)"", (char_u *)NULL}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)"", (char_u *)NULL}
#endif
SCRIPTID_INIT},
{"browsedir", "bsdir",P_STRING|P_VI_DEF,
#ifdef FEAT_BROWSE
(char_u *)&p_bsdir, PV_NONE,
{(char_u *)"last", (char_u *)0L}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)0L, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"bufhidden", "bh", P_STRING|P_ALLOCED|P_VI_DEF|P_NOGLOB,
#if defined(FEAT_QUICKFIX)
(char_u *)&p_bh, PV_BH,
{(char_u *)"", (char_u *)0L}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)0L, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"buflisted", "bl", P_BOOL|P_VI_DEF|P_NOGLOB,
(char_u *)&p_bl, PV_BL,
{(char_u *)1L, (char_u *)0L}
SCRIPTID_INIT},
{"buftype", "bt", P_STRING|P_ALLOCED|P_VI_DEF|P_NOGLOB,
#if defined(FEAT_QUICKFIX)
(char_u *)&p_bt, PV_BT,
{(char_u *)"", (char_u *)0L}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)0L, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"casemap", "cmp", P_STRING|P_VI_DEF|P_ONECOMMA|P_NODUP,
#ifdef FEAT_MBYTE
(char_u *)&p_cmp, PV_NONE,
{(char_u *)"internal,keepascii", (char_u *)0L}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)0L, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"cdpath", "cd", P_STRING|P_EXPAND|P_VI_DEF|P_COMMA|P_NODUP,
#ifdef FEAT_SEARCHPATH
(char_u *)&p_cdpath, PV_NONE,
{(char_u *)",,", (char_u *)0L}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)0L, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"cedit", NULL, P_STRING,
#ifdef FEAT_CMDWIN
(char_u *)&p_cedit, PV_NONE,
{(char_u *)"", (char_u *)CTRL_F_STR}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)0L, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"charconvert", "ccv", P_STRING|P_VI_DEF|P_SECURE,
#if defined(FEAT_MBYTE) && defined(FEAT_EVAL)
(char_u *)&p_ccv, PV_NONE,
{(char_u *)"", (char_u *)0L}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)0L, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"cindent", "cin", P_BOOL|P_VI_DEF|P_VIM,
#ifdef FEAT_CINDENT
(char_u *)&p_cin, PV_CIN,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"cinkeys", "cink", P_STRING|P_ALLOCED|P_VI_DEF|P_ONECOMMA|P_NODUP,
#ifdef FEAT_CINDENT
(char_u *)&p_cink, PV_CINK,
{(char_u *)"0{,0},0),:,0#,!^F,o,O,e", (char_u *)0L}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)0L, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"cinoptions", "cino", P_STRING|P_ALLOCED|P_VI_DEF|P_ONECOMMA|P_NODUP,
#ifdef FEAT_CINDENT
(char_u *)&p_cino, PV_CINO,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)"", (char_u *)0L} SCRIPTID_INIT},
{"cinwords", "cinw", P_STRING|P_ALLOCED|P_VI_DEF|P_ONECOMMA|P_NODUP,
#if defined(FEAT_SMARTINDENT) || defined(FEAT_CINDENT)
(char_u *)&p_cinw, PV_CINW,
{(char_u *)"if,else,while,do,for,switch",
(char_u *)0L}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)0L, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"clipboard", "cb", P_STRING|P_VI_DEF|P_ONECOMMA|P_NODUP,
#ifdef FEAT_CLIPBOARD
(char_u *)&p_cb, PV_NONE,
# ifdef FEAT_XCLIPBOARD
{(char_u *)"autoselect,exclude:cons\\|linux",
(char_u *)0L}
# else
{(char_u *)"", (char_u *)0L}
# endif
#else
(char_u *)NULL, PV_NONE,
{(char_u *)"", (char_u *)0L}
#endif
SCRIPTID_INIT},
{"cmdheight", "ch", P_NUM|P_VI_DEF|P_RALL,
(char_u *)&p_ch, PV_NONE,
{(char_u *)1L, (char_u *)0L} SCRIPTID_INIT},
{"cmdwinheight", "cwh", P_NUM|P_VI_DEF,
#ifdef FEAT_CMDWIN
(char_u *)&p_cwh, PV_NONE,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)7L, (char_u *)0L} SCRIPTID_INIT},
{"colorcolumn", "cc", P_STRING|P_VI_DEF|P_ONECOMMA|P_NODUP|P_RWIN,
#ifdef FEAT_SYN_HL
(char_u *)VAR_WIN, PV_CC,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)"", (char_u *)0L} SCRIPTID_INIT},
{"columns", "co", P_NUM|P_NODEFAULT|P_NO_MKRC|P_VI_DEF|P_RCLR,
(char_u *)&Columns, PV_NONE,
{(char_u *)80L, (char_u *)0L} SCRIPTID_INIT},
{"comments", "com", P_STRING|P_ALLOCED|P_VI_DEF|P_ONECOMMA
|P_NODUP|P_CURSWANT,
#ifdef FEAT_COMMENTS
(char_u *)&p_com, PV_COM,
{(char_u *)"s1:/*,mb:*,ex:*/,://,b:#,:%,:XCOMM,n:>,fb:-",
(char_u *)0L}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)0L, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"commentstring", "cms", P_STRING|P_ALLOCED|P_VI_DEF|P_CURSWANT,
#ifdef FEAT_FOLDING
(char_u *)&p_cms, PV_CMS,
{(char_u *)"/*%s*/", (char_u *)0L}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)0L, (char_u *)0L}
#endif
SCRIPTID_INIT},
/* P_PRI_MKRC isn't needed here, optval_default()
* always returns TRUE for 'compatible' */
{"compatible", "cp", P_BOOL|P_RALL,
(char_u *)&p_cp, PV_NONE,
{(char_u *)TRUE, (char_u *)FALSE} SCRIPTID_INIT},
{"complete", "cpt", P_STRING|P_ALLOCED|P_VI_DEF|P_ONECOMMA|P_NODUP,
#ifdef FEAT_INS_EXPAND
(char_u *)&p_cpt, PV_CPT,
{(char_u *)".,w,b,u,t,i", (char_u *)0L}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)0L, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"concealcursor","cocu", P_STRING|P_ALLOCED|P_RWIN|P_VI_DEF,
#ifdef FEAT_CONCEAL
(char_u *)VAR_WIN, PV_COCU,
{(char_u *)"", (char_u *)NULL}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)NULL, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"conceallevel","cole", P_NUM|P_RWIN|P_VI_DEF,
#ifdef FEAT_CONCEAL
(char_u *)VAR_WIN, PV_COLE,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)0L, (char_u *)0L}
SCRIPTID_INIT},
{"completefunc", "cfu", P_STRING|P_ALLOCED|P_VI_DEF|P_SECURE,
#ifdef FEAT_COMPL_FUNC
(char_u *)&p_cfu, PV_CFU,
{(char_u *)"", (char_u *)0L}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)0L, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"completeopt", "cot", P_STRING|P_VI_DEF|P_ONECOMMA|P_NODUP,
#ifdef FEAT_INS_EXPAND
(char_u *)&p_cot, PV_NONE,
{(char_u *)"menu,preview", (char_u *)0L}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)0L, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"confirm", "cf", P_BOOL|P_VI_DEF,
#if defined(FEAT_GUI_DIALOG) || defined(FEAT_CON_DIALOG)
(char_u *)&p_confirm, PV_NONE,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"conskey", "consk",P_BOOL|P_VI_DEF,
(char_u *)NULL, PV_NONE,
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"copyindent", "ci", P_BOOL|P_VI_DEF|P_VIM,
(char_u *)&p_ci, PV_CI,
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"cpoptions", "cpo", P_STRING|P_VIM|P_RALL|P_FLAGLIST,
(char_u *)&p_cpo, PV_NONE,
{(char_u *)CPO_VI, (char_u *)CPO_VIM}
SCRIPTID_INIT},
{"cryptmethod", "cm", P_STRING|P_ALLOCED|P_VI_DEF,
#ifdef FEAT_CRYPT
(char_u *)&p_cm, PV_CM,
{(char_u *)"zip", (char_u *)0L}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)0L, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"cscopepathcomp", "cspc", P_NUM|P_VI_DEF|P_VIM,
#ifdef FEAT_CSCOPE
(char_u *)&p_cspc, PV_NONE,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)0L, (char_u *)0L} SCRIPTID_INIT},
{"cscopeprg", "csprg", P_STRING|P_EXPAND|P_VI_DEF|P_SECURE,
#ifdef FEAT_CSCOPE
(char_u *)&p_csprg, PV_NONE,
{(char_u *)"cscope", (char_u *)0L}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)0L, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"cscopequickfix", "csqf", P_STRING|P_VI_DEF|P_ONECOMMA|P_NODUP,
#if defined(FEAT_CSCOPE) && defined(FEAT_QUICKFIX)
(char_u *)&p_csqf, PV_NONE,
{(char_u *)"", (char_u *)0L}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)0L, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"cscoperelative", "csre", P_BOOL|P_VI_DEF|P_VIM,
#ifdef FEAT_CSCOPE
(char_u *)&p_csre, PV_NONE,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)0L, (char_u *)0L} SCRIPTID_INIT},
{"cscopetag", "cst", P_BOOL|P_VI_DEF|P_VIM,
#ifdef FEAT_CSCOPE
(char_u *)&p_cst, PV_NONE,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)0L, (char_u *)0L} SCRIPTID_INIT},
{"cscopetagorder", "csto", P_NUM|P_VI_DEF|P_VIM,
#ifdef FEAT_CSCOPE
(char_u *)&p_csto, PV_NONE,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)0L, (char_u *)0L} SCRIPTID_INIT},
{"cscopeverbose", "csverb", P_BOOL|P_VI_DEF|P_VIM,
#ifdef FEAT_CSCOPE
(char_u *)&p_csverbose, PV_NONE,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)0L, (char_u *)0L} SCRIPTID_INIT},
{"cursorbind", "crb", P_BOOL|P_VI_DEF,
#ifdef FEAT_CURSORBIND
(char_u *)VAR_WIN, PV_CRBIND,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"cursorcolumn", "cuc", P_BOOL|P_VI_DEF|P_RWIN,
#ifdef FEAT_SYN_HL
(char_u *)VAR_WIN, PV_CUC,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"cursorline", "cul", P_BOOL|P_VI_DEF|P_RWIN,
#ifdef FEAT_SYN_HL
(char_u *)VAR_WIN, PV_CUL,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"debug", NULL, P_STRING|P_VI_DEF,
(char_u *)&p_debug, PV_NONE,
{(char_u *)"", (char_u *)0L} SCRIPTID_INIT},
{"define", "def", P_STRING|P_ALLOCED|P_VI_DEF|P_CURSWANT,
#ifdef FEAT_FIND_ID
(char_u *)&p_def, PV_DEF,
{(char_u *)"^\\s*#\\s*define", (char_u *)0L}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)NULL, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"delcombine", "deco", P_BOOL|P_VI_DEF|P_VIM,
#ifdef FEAT_MBYTE
(char_u *)&p_deco, PV_NONE,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"dictionary", "dict", P_STRING|P_EXPAND|P_VI_DEF|P_ONECOMMA|P_NODUP,
#ifdef FEAT_INS_EXPAND
(char_u *)&p_dict, PV_DICT,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)"", (char_u *)0L} SCRIPTID_INIT},
{"diff", NULL, P_BOOL|P_VI_DEF|P_RWIN|P_NOGLOB,
#ifdef FEAT_DIFF
(char_u *)VAR_WIN, PV_DIFF,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"diffexpr", "dex", P_STRING|P_VI_DEF|P_SECURE|P_CURSWANT,
#if defined(FEAT_DIFF) && defined(FEAT_EVAL)
(char_u *)&p_dex, PV_NONE,
{(char_u *)"", (char_u *)0L}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)0L, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"diffopt", "dip", P_STRING|P_ALLOCED|P_VI_DEF|P_RWIN|P_ONECOMMA
|P_NODUP,
#ifdef FEAT_DIFF
(char_u *)&p_dip, PV_NONE,
{(char_u *)"filler", (char_u *)NULL}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)"", (char_u *)NULL}
#endif
SCRIPTID_INIT},
{"digraph", "dg", P_BOOL|P_VI_DEF|P_VIM,
#ifdef FEAT_DIGRAPHS
(char_u *)&p_dg, PV_NONE,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"directory", "dir", P_STRING|P_EXPAND|P_VI_DEF|P_ONECOMMA
|P_NODUP|P_SECURE,
(char_u *)&p_dir, PV_NONE,
{(char_u *)DFLT_DIR, (char_u *)0L} SCRIPTID_INIT},
{"display", "dy", P_STRING|P_VI_DEF|P_ONECOMMA|P_RALL|P_NODUP,
(char_u *)&p_dy, PV_NONE,
{(char_u *)"", (char_u *)0L} SCRIPTID_INIT},
{"eadirection", "ead", P_STRING|P_VI_DEF,
#ifdef FEAT_WINDOWS
(char_u *)&p_ead, PV_NONE,
{(char_u *)"both", (char_u *)0L}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)NULL, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"edcompatible","ed", P_BOOL|P_VI_DEF,
(char_u *)&p_ed, PV_NONE,
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"emoji", "emo", P_BOOL|P_VI_DEF|P_RCLR,
#if defined(FEAT_MBYTE)
(char_u *)&p_emoji, PV_NONE,
{(char_u *)TRUE, (char_u *)0L}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)0L, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"encoding", "enc", P_STRING|P_VI_DEF|P_RCLR|P_NO_ML,
#ifdef FEAT_MBYTE
(char_u *)&p_enc, PV_NONE,
{(char_u *)ENC_DFLT, (char_u *)0L}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)0L, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"endofline", "eol", P_BOOL|P_NO_MKRC|P_VI_DEF|P_RSTAT,
(char_u *)&p_eol, PV_EOL,
{(char_u *)TRUE, (char_u *)0L} SCRIPTID_INIT},
{"equalalways", "ea", P_BOOL|P_VI_DEF|P_RALL,
(char_u *)&p_ea, PV_NONE,
{(char_u *)TRUE, (char_u *)0L} SCRIPTID_INIT},
{"equalprg", "ep", P_STRING|P_EXPAND|P_VI_DEF|P_SECURE,
(char_u *)&p_ep, PV_EP,
{(char_u *)"", (char_u *)0L} SCRIPTID_INIT},
{"errorbells", "eb", P_BOOL|P_VI_DEF,
(char_u *)&p_eb, PV_NONE,
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"errorfile", "ef", P_STRING|P_EXPAND|P_VI_DEF|P_SECURE,
#ifdef FEAT_QUICKFIX
(char_u *)&p_ef, PV_NONE,
{(char_u *)DFLT_ERRORFILE, (char_u *)0L}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)NULL, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"errorformat", "efm", P_STRING|P_VI_DEF|P_ONECOMMA|P_NODUP,
#ifdef FEAT_QUICKFIX
(char_u *)&p_efm, PV_EFM,
{(char_u *)DFLT_EFM, (char_u *)0L}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)NULL, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"esckeys", "ek", P_BOOL|P_VIM,
(char_u *)&p_ek, PV_NONE,
{(char_u *)FALSE, (char_u *)TRUE} SCRIPTID_INIT},
{"eventignore", "ei", P_STRING|P_VI_DEF|P_ONECOMMA|P_NODUP,
#ifdef FEAT_AUTOCMD
(char_u *)&p_ei, PV_NONE,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)"", (char_u *)0L} SCRIPTID_INIT},
{"expandtab", "et", P_BOOL|P_VI_DEF|P_VIM,
(char_u *)&p_et, PV_ET,
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"exrc", "ex", P_BOOL|P_VI_DEF|P_SECURE,
(char_u *)&p_exrc, PV_NONE,
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"fileencoding","fenc", P_STRING|P_ALLOCED|P_VI_DEF|P_RSTAT|P_RBUF
|P_NO_MKRC,
#ifdef FEAT_MBYTE
(char_u *)&p_fenc, PV_FENC,
{(char_u *)"", (char_u *)0L}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)0L, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"fileencodings","fencs", P_STRING|P_VI_DEF|P_ONECOMMA,
#ifdef FEAT_MBYTE
(char_u *)&p_fencs, PV_NONE,
{(char_u *)"ucs-bom", (char_u *)0L}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)0L, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"fileformat", "ff", P_STRING|P_ALLOCED|P_VI_DEF|P_RSTAT|P_NO_MKRC
|P_CURSWANT,
(char_u *)&p_ff, PV_FF,
{(char_u *)DFLT_FF, (char_u *)0L} SCRIPTID_INIT},
{"fileformats", "ffs", P_STRING|P_VIM|P_ONECOMMA|P_NODUP,
(char_u *)&p_ffs, PV_NONE,
{(char_u *)DFLT_FFS_VI, (char_u *)DFLT_FFS_VIM}
SCRIPTID_INIT},
{"fileignorecase", "fic", P_BOOL|P_VI_DEF,
(char_u *)&p_fic, PV_NONE,
{
#ifdef CASE_INSENSITIVE_FILENAME
(char_u *)TRUE,
#else
(char_u *)FALSE,
#endif
(char_u *)0L} SCRIPTID_INIT},
{"filetype", "ft", P_STRING|P_ALLOCED|P_VI_DEF|P_NOGLOB|P_NFNAME,
#ifdef FEAT_AUTOCMD
(char_u *)&p_ft, PV_FT,
{(char_u *)"", (char_u *)0L}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)0L, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"fillchars", "fcs", P_STRING|P_VI_DEF|P_RALL|P_ONECOMMA|P_NODUP,
#if defined(FEAT_WINDOWS) || defined(FEAT_FOLDING)
(char_u *)&p_fcs, PV_NONE,
{(char_u *)"vert:|,fold:-", (char_u *)0L}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)"", (char_u *)0L}
#endif
SCRIPTID_INIT},
{"fixendofline", "fixeol", P_BOOL|P_VI_DEF|P_RSTAT,
(char_u *)&p_fixeol, PV_FIXEOL,
{(char_u *)TRUE, (char_u *)0L} SCRIPTID_INIT},
{"fkmap", "fk", P_BOOL|P_VI_DEF,
#ifdef FEAT_FKMAP
(char_u *)&p_fkmap, PV_NONE,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"flash", "fl", P_BOOL|P_VI_DEF,
(char_u *)NULL, PV_NONE,
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
#ifdef FEAT_FOLDING
{"foldclose", "fcl", P_STRING|P_VI_DEF|P_ONECOMMA|P_NODUP|P_RWIN,
(char_u *)&p_fcl, PV_NONE,
{(char_u *)"", (char_u *)0L} SCRIPTID_INIT},
{"foldcolumn", "fdc", P_NUM|P_VI_DEF|P_RWIN,
(char_u *)VAR_WIN, PV_FDC,
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"foldenable", "fen", P_BOOL|P_VI_DEF|P_RWIN,
(char_u *)VAR_WIN, PV_FEN,
{(char_u *)TRUE, (char_u *)0L} SCRIPTID_INIT},
{"foldexpr", "fde", P_STRING|P_ALLOCED|P_VIM|P_VI_DEF|P_RWIN,
# ifdef FEAT_EVAL
(char_u *)VAR_WIN, PV_FDE,
{(char_u *)"0", (char_u *)NULL}
# else
(char_u *)NULL, PV_NONE,
{(char_u *)NULL, (char_u *)0L}
# endif
SCRIPTID_INIT},
{"foldignore", "fdi", P_STRING|P_ALLOCED|P_VIM|P_VI_DEF|P_RWIN,
(char_u *)VAR_WIN, PV_FDI,
{(char_u *)"#", (char_u *)NULL} SCRIPTID_INIT},
{"foldlevel", "fdl", P_NUM|P_VI_DEF|P_RWIN,
(char_u *)VAR_WIN, PV_FDL,
{(char_u *)0L, (char_u *)0L} SCRIPTID_INIT},
{"foldlevelstart","fdls", P_NUM|P_VI_DEF|P_CURSWANT,
(char_u *)&p_fdls, PV_NONE,
{(char_u *)-1L, (char_u *)0L} SCRIPTID_INIT},
{"foldmarker", "fmr", P_STRING|P_ALLOCED|P_VIM|P_VI_DEF|
P_RWIN|P_ONECOMMA|P_NODUP,
(char_u *)VAR_WIN, PV_FMR,
{(char_u *)"{{{,}}}", (char_u *)NULL}
SCRIPTID_INIT},
{"foldmethod", "fdm", P_STRING|P_ALLOCED|P_VIM|P_VI_DEF|P_RWIN,
(char_u *)VAR_WIN, PV_FDM,
{(char_u *)"manual", (char_u *)NULL} SCRIPTID_INIT},
{"foldminlines","fml", P_NUM|P_VI_DEF|P_RWIN,
(char_u *)VAR_WIN, PV_FML,
{(char_u *)1L, (char_u *)0L} SCRIPTID_INIT},
{"foldnestmax", "fdn", P_NUM|P_VI_DEF|P_RWIN,
(char_u *)VAR_WIN, PV_FDN,
{(char_u *)20L, (char_u *)0L} SCRIPTID_INIT},
{"foldopen", "fdo", P_STRING|P_VI_DEF|P_ONECOMMA|P_NODUP|P_CURSWANT,
(char_u *)&p_fdo, PV_NONE,
{(char_u *)"block,hor,mark,percent,quickfix,search,tag,undo",
(char_u *)0L} SCRIPTID_INIT},
{"foldtext", "fdt", P_STRING|P_ALLOCED|P_VIM|P_VI_DEF|P_RWIN,
# ifdef FEAT_EVAL
(char_u *)VAR_WIN, PV_FDT,
{(char_u *)"foldtext()", (char_u *)NULL}
# else
(char_u *)NULL, PV_NONE,
{(char_u *)NULL, (char_u *)0L}
# endif
SCRIPTID_INIT},
#endif
{"formatexpr", "fex", P_STRING|P_ALLOCED|P_VI_DEF|P_VIM,
#ifdef FEAT_EVAL
(char_u *)&p_fex, PV_FEX,
{(char_u *)"", (char_u *)0L}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)0L, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"formatoptions","fo", P_STRING|P_ALLOCED|P_VIM|P_FLAGLIST,
(char_u *)&p_fo, PV_FO,
{(char_u *)DFLT_FO_VI, (char_u *)DFLT_FO_VIM}
SCRIPTID_INIT},
{"formatlistpat","flp", P_STRING|P_ALLOCED|P_VI_DEF,
(char_u *)&p_flp, PV_FLP,
{(char_u *)"^\\s*\\d\\+[\\]:.)}\\t ]\\s*",
(char_u *)0L} SCRIPTID_INIT},
{"formatprg", "fp", P_STRING|P_EXPAND|P_VI_DEF|P_SECURE,
(char_u *)&p_fp, PV_NONE,
{(char_u *)"", (char_u *)0L} SCRIPTID_INIT},
{"fsync", "fs", P_BOOL|P_SECURE|P_VI_DEF,
#ifdef HAVE_FSYNC
(char_u *)&p_fs, PV_NONE,
{(char_u *)TRUE, (char_u *)0L}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)FALSE, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"gdefault", "gd", P_BOOL|P_VI_DEF|P_VIM,
(char_u *)&p_gd, PV_NONE,
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"graphic", "gr", P_BOOL|P_VI_DEF,
(char_u *)NULL, PV_NONE,
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"grepformat", "gfm", P_STRING|P_VI_DEF|P_ONECOMMA|P_NODUP,
#ifdef FEAT_QUICKFIX
(char_u *)&p_gefm, PV_NONE,
{(char_u *)DFLT_GREPFORMAT, (char_u *)0L}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)NULL, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"grepprg", "gp", P_STRING|P_EXPAND|P_VI_DEF|P_SECURE,
#ifdef FEAT_QUICKFIX
(char_u *)&p_gp, PV_GP,
{
# ifdef WIN3264
/* may be changed to "grep -n" in os_win32.c */
(char_u *)"findstr /n",
# else
# ifdef UNIX
/* Add an extra file name so that grep will always
* insert a file name in the match line. */
(char_u *)"grep -n $* /dev/null",
# else
# ifdef VMS
(char_u *)"SEARCH/NUMBERS ",
# else
(char_u *)"grep -n ",
# endif
# endif
# endif
(char_u *)0L}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)NULL, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"guicursor", "gcr", P_STRING|P_VI_DEF|P_ONECOMMA|P_NODUP,
#ifdef CURSOR_SHAPE
(char_u *)&p_guicursor, PV_NONE,
{
# ifdef FEAT_GUI
(char_u *)"n-v-c:block-Cursor/lCursor,ve:ver35-Cursor,o:hor50-Cursor,i-ci:ver25-Cursor/lCursor,r-cr:hor20-Cursor/lCursor,sm:block-Cursor-blinkwait175-blinkoff150-blinkon175",
# else /* Win32 console */
(char_u *)"n-v-c:block,o:hor50,i-ci:hor15,r-cr:hor30,sm:block",
# endif
(char_u *)0L}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)NULL, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"guifont", "gfn", P_STRING|P_VI_DEF|P_RCLR|P_ONECOMMA|P_NODUP,
#ifdef FEAT_GUI
(char_u *)&p_guifont, PV_NONE,
{(char_u *)"", (char_u *)0L}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)NULL, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"guifontset", "gfs", P_STRING|P_VI_DEF|P_RCLR|P_ONECOMMA,
#if defined(FEAT_GUI) && defined(FEAT_XFONTSET)
(char_u *)&p_guifontset, PV_NONE,
{(char_u *)"", (char_u *)0L}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)NULL, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"guifontwide", "gfw", P_STRING|P_VI_DEF|P_RCLR|P_ONECOMMA|P_NODUP,
#if defined(FEAT_GUI) && defined(FEAT_MBYTE)
(char_u *)&p_guifontwide, PV_NONE,
{(char_u *)"", (char_u *)0L}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)NULL, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"guiheadroom", "ghr", P_NUM|P_VI_DEF,
#if defined(FEAT_GUI_GTK) || defined(FEAT_GUI_X11)
(char_u *)&p_ghr, PV_NONE,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)50L, (char_u *)0L} SCRIPTID_INIT},
{"guioptions", "go", P_STRING|P_VI_DEF|P_RALL|P_FLAGLIST,
#if defined(FEAT_GUI)
(char_u *)&p_go, PV_NONE,
# if defined(UNIX) && !defined(MACOS)
{(char_u *)"aegimrLtT", (char_u *)0L}
# else
{(char_u *)"egmrLtT", (char_u *)0L}
# endif
#else
(char_u *)NULL, PV_NONE,
{(char_u *)NULL, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"guipty", NULL, P_BOOL|P_VI_DEF,
#if defined(FEAT_GUI)
(char_u *)&p_guipty, PV_NONE,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)TRUE, (char_u *)0L} SCRIPTID_INIT},
{"guitablabel", "gtl", P_STRING|P_VI_DEF|P_RWIN,
#if defined(FEAT_GUI_TABLINE)
(char_u *)&p_gtl, PV_NONE,
{(char_u *)"", (char_u *)0L}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)NULL, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"guitabtooltip", "gtt", P_STRING|P_VI_DEF|P_RWIN,
#if defined(FEAT_GUI_TABLINE)
(char_u *)&p_gtt, PV_NONE,
{(char_u *)"", (char_u *)0L}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)NULL, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"hardtabs", "ht", P_NUM|P_VI_DEF,
(char_u *)NULL, PV_NONE,
{(char_u *)0L, (char_u *)0L} SCRIPTID_INIT},
{"helpfile", "hf", P_STRING|P_EXPAND|P_VI_DEF|P_SECURE,
(char_u *)&p_hf, PV_NONE,
{(char_u *)DFLT_HELPFILE, (char_u *)0L}
SCRIPTID_INIT},
{"helpheight", "hh", P_NUM|P_VI_DEF,
#ifdef FEAT_WINDOWS
(char_u *)&p_hh, PV_NONE,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)20L, (char_u *)0L} SCRIPTID_INIT},
{"helplang", "hlg", P_STRING|P_VI_DEF|P_ONECOMMA,
#ifdef FEAT_MULTI_LANG
(char_u *)&p_hlg, PV_NONE,
{(char_u *)"", (char_u *)0L}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)0L, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"hidden", "hid", P_BOOL|P_VI_DEF,
(char_u *)&p_hid, PV_NONE,
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"highlight", "hl", P_STRING|P_VI_DEF|P_RCLR|P_ONECOMMA|P_NODUP,
(char_u *)&p_hl, PV_NONE,
{(char_u *)HIGHLIGHT_INIT, (char_u *)0L}
SCRIPTID_INIT},
{"history", "hi", P_NUM|P_VIM,
(char_u *)&p_hi, PV_NONE,
{(char_u *)0L, (char_u *)50L} SCRIPTID_INIT},
{"hkmap", "hk", P_BOOL|P_VI_DEF|P_VIM,
#ifdef FEAT_RIGHTLEFT
(char_u *)&p_hkmap, PV_NONE,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"hkmapp", "hkp", P_BOOL|P_VI_DEF|P_VIM,
#ifdef FEAT_RIGHTLEFT
(char_u *)&p_hkmapp, PV_NONE,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"hlsearch", "hls", P_BOOL|P_VI_DEF|P_VIM|P_RALL,
(char_u *)&p_hls, PV_NONE,
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"icon", NULL, P_BOOL|P_VI_DEF,
#ifdef FEAT_TITLE
(char_u *)&p_icon, PV_NONE,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"iconstring", NULL, P_STRING|P_VI_DEF,
#ifdef FEAT_TITLE
(char_u *)&p_iconstring, PV_NONE,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)"", (char_u *)0L} SCRIPTID_INIT},
{"ignorecase", "ic", P_BOOL|P_VI_DEF,
(char_u *)&p_ic, PV_NONE,
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"imactivatefunc","imaf",P_STRING|P_VI_DEF|P_SECURE,
# if defined(FEAT_EVAL) && defined(FEAT_XIM) && defined(FEAT_GUI_GTK)
(char_u *)&p_imaf, PV_NONE,
{(char_u *)"", (char_u *)NULL}
# else
(char_u *)NULL, PV_NONE,
{(char_u *)NULL, (char_u *)0L}
# endif
SCRIPTID_INIT},
{"imactivatekey","imak",P_STRING|P_VI_DEF,
#if defined(FEAT_XIM) && defined(FEAT_GUI_GTK)
(char_u *)&p_imak, PV_NONE,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)"", (char_u *)0L} SCRIPTID_INIT},
{"imcmdline", "imc", P_BOOL|P_VI_DEF,
#ifdef USE_IM_CONTROL
(char_u *)&p_imcmdline, PV_NONE,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"imdisable", "imd", P_BOOL|P_VI_DEF,
#ifdef USE_IM_CONTROL
(char_u *)&p_imdisable, PV_NONE,
#else
(char_u *)NULL, PV_NONE,
#endif
#ifdef __sgi
{(char_u *)TRUE, (char_u *)0L}
#else
{(char_u *)FALSE, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"iminsert", "imi", P_NUM|P_VI_DEF,
(char_u *)&p_iminsert, PV_IMI,
#ifdef B_IMODE_IM
{(char_u *)B_IMODE_IM, (char_u *)0L}
#else
{(char_u *)B_IMODE_NONE, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"imsearch", "ims", P_NUM|P_VI_DEF,
(char_u *)&p_imsearch, PV_IMS,
#ifdef B_IMODE_IM
{(char_u *)B_IMODE_IM, (char_u *)0L}
#else
{(char_u *)B_IMODE_NONE, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"imstatusfunc","imsf",P_STRING|P_VI_DEF|P_SECURE,
# if defined(FEAT_EVAL) && defined(FEAT_XIM) && defined(FEAT_GUI_GTK)
(char_u *)&p_imsf, PV_NONE,
{(char_u *)"", (char_u *)NULL}
# else
(char_u *)NULL, PV_NONE,
{(char_u *)NULL, (char_u *)0L}
# endif
SCRIPTID_INIT},
{"include", "inc", P_STRING|P_ALLOCED|P_VI_DEF,
#ifdef FEAT_FIND_ID
(char_u *)&p_inc, PV_INC,
{(char_u *)"^\\s*#\\s*include", (char_u *)0L}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)0L, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"includeexpr", "inex", P_STRING|P_ALLOCED|P_VI_DEF,
#if defined(FEAT_FIND_ID) && defined(FEAT_EVAL)
(char_u *)&p_inex, PV_INEX,
{(char_u *)"", (char_u *)0L}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)0L, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"incsearch", "is", P_BOOL|P_VI_DEF|P_VIM,
(char_u *)&p_is, PV_NONE,
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"indentexpr", "inde", P_STRING|P_ALLOCED|P_VI_DEF|P_VIM,
#if defined(FEAT_CINDENT) && defined(FEAT_EVAL)
(char_u *)&p_inde, PV_INDE,
{(char_u *)"", (char_u *)0L}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)0L, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"indentkeys", "indk", P_STRING|P_ALLOCED|P_VI_DEF|P_ONECOMMA|P_NODUP,
#if defined(FEAT_CINDENT) && defined(FEAT_EVAL)
(char_u *)&p_indk, PV_INDK,
{(char_u *)"0{,0},:,0#,!^F,o,O,e", (char_u *)0L}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)0L, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"infercase", "inf", P_BOOL|P_VI_DEF,
(char_u *)&p_inf, PV_INF,
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"insertmode", "im", P_BOOL|P_VI_DEF|P_VIM,
(char_u *)&p_im, PV_NONE,
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"isfname", "isf", P_STRING|P_VI_DEF|P_COMMA|P_NODUP,
(char_u *)&p_isf, PV_NONE,
{
#ifdef BACKSLASH_IN_FILENAME
/* Excluded are: & and ^ are special in cmd.exe
* ( and ) are used in text separating fnames */
(char_u *)"@,48-57,/,\\,.,-,_,+,,,#,$,%,{,},[,],:,@-@,!,~,=",
#else
# ifdef AMIGA
(char_u *)"@,48-57,/,.,-,_,+,,,$,:",
# else
# ifdef VMS
(char_u *)"@,48-57,/,.,-,_,+,,,#,$,%,<,>,[,],:,;,~",
# else /* UNIX et al. */
# ifdef EBCDIC
(char_u *)"@,240-249,/,.,-,_,+,,,#,$,%,~,=",
# else
(char_u *)"@,48-57,/,.,-,_,+,,,#,$,%,~,=",
# endif
# endif
# endif
#endif
(char_u *)0L} SCRIPTID_INIT},
{"isident", "isi", P_STRING|P_VI_DEF|P_COMMA|P_NODUP,
(char_u *)&p_isi, PV_NONE,
{
#if defined(MSWIN)
(char_u *)"@,48-57,_,128-167,224-235",
#else
# ifdef EBCDIC
/* TODO: EBCDIC Check this! @ == isalpha()*/
(char_u *)"@,240-249,_,66-73,81-89,98-105,"
"112-120,128,140-142,156,158,172,"
"174,186,191,203-207,219-225,235-239,"
"251-254",
# else
(char_u *)"@,48-57,_,192-255",
# endif
#endif
(char_u *)0L} SCRIPTID_INIT},
{"iskeyword", "isk", P_STRING|P_ALLOCED|P_VIM|P_COMMA|P_NODUP,
(char_u *)&p_isk, PV_ISK,
{
#ifdef EBCDIC
(char_u *)"@,240-249,_",
/* TODO: EBCDIC Check this! @ == isalpha()*/
(char_u *)"@,240-249,_,66-73,81-89,98-105,"
"112-120,128,140-142,156,158,172,"
"174,186,191,203-207,219-225,235-239,"
"251-254",
#else
(char_u *)"@,48-57,_",
# if defined(MSWIN)
(char_u *)"@,48-57,_,128-167,224-235"
# else
ISK_LATIN1
# endif
#endif
} SCRIPTID_INIT},
{"isprint", "isp", P_STRING|P_VI_DEF|P_RALL|P_COMMA|P_NODUP,
(char_u *)&p_isp, PV_NONE,
{
#if defined(MSWIN) || (defined(MACOS) && !defined(MACOS_X)) \
|| defined(VMS)
(char_u *)"@,~-255",
#else
# ifdef EBCDIC
/* all chars above 63 are printable */
(char_u *)"63-255",
# else
ISP_LATIN1,
# endif
#endif
(char_u *)0L} SCRIPTID_INIT},
{"joinspaces", "js", P_BOOL|P_VI_DEF|P_VIM,
(char_u *)&p_js, PV_NONE,
{(char_u *)TRUE, (char_u *)0L} SCRIPTID_INIT},
{"key", NULL, P_STRING|P_ALLOCED|P_VI_DEF|P_NO_MKRC,
#ifdef FEAT_CRYPT
(char_u *)&p_key, PV_KEY,
{(char_u *)"", (char_u *)0L}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)0L, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"keymap", "kmp", P_STRING|P_ALLOCED|P_VI_DEF|P_RBUF|P_RSTAT|P_NFNAME|P_PRI_MKRC,
#ifdef FEAT_KEYMAP
(char_u *)&p_keymap, PV_KMAP,
{(char_u *)"", (char_u *)0L}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)"", (char_u *)0L}
#endif
SCRIPTID_INIT},
{"keymodel", "km", P_STRING|P_VI_DEF|P_ONECOMMA|P_NODUP,
(char_u *)&p_km, PV_NONE,
{(char_u *)"", (char_u *)0L} SCRIPTID_INIT},
{"keywordprg", "kp", P_STRING|P_EXPAND|P_VI_DEF|P_SECURE,
(char_u *)&p_kp, PV_KP,
{
#ifdef MSWIN
(char_u *)":help",
#else
# ifdef VMS
(char_u *)"help",
# else
# ifdef USEMAN_S
(char_u *)"man -s",
# else
(char_u *)"man",
# endif
# endif
#endif
(char_u *)0L} SCRIPTID_INIT},
{"langmap", "lmap", P_STRING|P_VI_DEF|P_ONECOMMA|P_NODUP|P_SECURE,
#ifdef FEAT_LANGMAP
(char_u *)&p_langmap, PV_NONE,
{(char_u *)"", /* unmatched } */
#else
(char_u *)NULL, PV_NONE,
{(char_u *)NULL,
#endif
(char_u *)0L} SCRIPTID_INIT},
{"langmenu", "lm", P_STRING|P_VI_DEF|P_NFNAME,
#if defined(FEAT_MENU) && defined(FEAT_MULTI_LANG)
(char_u *)&p_lm, PV_NONE,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)"", (char_u *)0L} SCRIPTID_INIT},
{"langnoremap", "lnr", P_BOOL|P_VI_DEF,
#ifdef FEAT_LANGMAP
(char_u *)&p_lnr, PV_NONE,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"langremap", "lrm", P_BOOL|P_VI_DEF,
#ifdef FEAT_LANGMAP
(char_u *)&p_lrm, PV_NONE,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)TRUE, (char_u *)0L} SCRIPTID_INIT},
{"laststatus", "ls", P_NUM|P_VI_DEF|P_RALL,
#ifdef FEAT_WINDOWS
(char_u *)&p_ls, PV_NONE,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)1L, (char_u *)0L} SCRIPTID_INIT},
{"lazyredraw", "lz", P_BOOL|P_VI_DEF,
(char_u *)&p_lz, PV_NONE,
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"linebreak", "lbr", P_BOOL|P_VI_DEF|P_RWIN,
#ifdef FEAT_LINEBREAK
(char_u *)VAR_WIN, PV_LBR,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"lines", NULL, P_NUM|P_NODEFAULT|P_NO_MKRC|P_VI_DEF|P_RCLR,
(char_u *)&Rows, PV_NONE,
{
#if defined(WIN3264)
(char_u *)25L,
#else
(char_u *)24L,
#endif
(char_u *)0L} SCRIPTID_INIT},
{"linespace", "lsp", P_NUM|P_VI_DEF|P_RCLR,
#ifdef FEAT_GUI
(char_u *)&p_linespace, PV_NONE,
#else
(char_u *)NULL, PV_NONE,
#endif
#ifdef FEAT_GUI_W32
{(char_u *)1L, (char_u *)0L}
#else
{(char_u *)0L, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"lisp", NULL, P_BOOL|P_VI_DEF,
#ifdef FEAT_LISP
(char_u *)&p_lisp, PV_LISP,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"lispwords", "lw", P_STRING|P_VI_DEF|P_ONECOMMA|P_NODUP,
#ifdef FEAT_LISP
(char_u *)&p_lispwords, PV_LW,
{(char_u *)LISPWORD_VALUE, (char_u *)0L}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)"", (char_u *)0L}
#endif
SCRIPTID_INIT},
{"list", NULL, P_BOOL|P_VI_DEF|P_RWIN,
(char_u *)VAR_WIN, PV_LIST,
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"listchars", "lcs", P_STRING|P_VI_DEF|P_RALL|P_ONECOMMA|P_NODUP,
(char_u *)&p_lcs, PV_NONE,
{(char_u *)"eol:$", (char_u *)0L} SCRIPTID_INIT},
{"loadplugins", "lpl", P_BOOL|P_VI_DEF,
(char_u *)&p_lpl, PV_NONE,
{(char_u *)TRUE, (char_u *)0L} SCRIPTID_INIT},
#if defined(DYNAMIC_LUA)
{"luadll", NULL, P_STRING|P_EXPAND|P_VI_DEF|P_SECURE,
(char_u *)&p_luadll, PV_NONE,
{(char_u *)DYNAMIC_LUA_DLL, (char_u *)0L}
SCRIPTID_INIT},
#endif
#ifdef FEAT_GUI_MAC
{"macatsui", NULL, P_BOOL|P_VI_DEF|P_RCLR,
(char_u *)&p_macatsui, PV_NONE,
{(char_u *)TRUE, (char_u *)0L} SCRIPTID_INIT},
#endif
{"magic", NULL, P_BOOL|P_VI_DEF,
(char_u *)&p_magic, PV_NONE,
{(char_u *)TRUE, (char_u *)0L} SCRIPTID_INIT},
{"makeef", "mef", P_STRING|P_EXPAND|P_VI_DEF|P_SECURE,
#ifdef FEAT_QUICKFIX
(char_u *)&p_mef, PV_NONE,
{(char_u *)"", (char_u *)0L}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)NULL, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"makeprg", "mp", P_STRING|P_EXPAND|P_VI_DEF|P_SECURE,
#ifdef FEAT_QUICKFIX
(char_u *)&p_mp, PV_MP,
# ifdef VMS
{(char_u *)"MMS", (char_u *)0L}
# else
{(char_u *)"make", (char_u *)0L}
# endif
#else
(char_u *)NULL, PV_NONE,
{(char_u *)NULL, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"matchpairs", "mps", P_STRING|P_ALLOCED|P_VI_DEF|P_ONECOMMA|P_NODUP,
(char_u *)&p_mps, PV_MPS,
{(char_u *)"(:),{:},[:]", (char_u *)0L}
SCRIPTID_INIT},
{"matchtime", "mat", P_NUM|P_VI_DEF,
(char_u *)&p_mat, PV_NONE,
{(char_u *)5L, (char_u *)0L} SCRIPTID_INIT},
{"maxcombine", "mco", P_NUM|P_VI_DEF|P_CURSWANT,
#ifdef FEAT_MBYTE
(char_u *)&p_mco, PV_NONE,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)2, (char_u *)0L} SCRIPTID_INIT},
{"maxfuncdepth", "mfd", P_NUM|P_VI_DEF,
#ifdef FEAT_EVAL
(char_u *)&p_mfd, PV_NONE,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)100L, (char_u *)0L} SCRIPTID_INIT},
{"maxmapdepth", "mmd", P_NUM|P_VI_DEF,
(char_u *)&p_mmd, PV_NONE,
{(char_u *)1000L, (char_u *)0L} SCRIPTID_INIT},
{"maxmem", "mm", P_NUM|P_VI_DEF,
(char_u *)&p_mm, PV_NONE,
{(char_u *)DFLT_MAXMEM, (char_u *)0L}
SCRIPTID_INIT},
{"maxmempattern","mmp", P_NUM|P_VI_DEF,
(char_u *)&p_mmp, PV_NONE,
{(char_u *)1000L, (char_u *)0L} SCRIPTID_INIT},
{"maxmemtot", "mmt", P_NUM|P_VI_DEF,
(char_u *)&p_mmt, PV_NONE,
{(char_u *)DFLT_MAXMEMTOT, (char_u *)0L}
SCRIPTID_INIT},
{"menuitems", "mis", P_NUM|P_VI_DEF,
#ifdef FEAT_MENU
(char_u *)&p_mis, PV_NONE,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)25L, (char_u *)0L} SCRIPTID_INIT},
{"mesg", NULL, P_BOOL|P_VI_DEF,
(char_u *)NULL, PV_NONE,
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"mkspellmem", "msm", P_STRING|P_VI_DEF|P_EXPAND|P_SECURE,
#ifdef FEAT_SPELL
(char_u *)&p_msm, PV_NONE,
{(char_u *)"460000,2000,500", (char_u *)0L}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)0L, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"modeline", "ml", P_BOOL|P_VIM,
(char_u *)&p_ml, PV_ML,
{(char_u *)FALSE, (char_u *)TRUE} SCRIPTID_INIT},
{"modelines", "mls", P_NUM|P_VI_DEF,
(char_u *)&p_mls, PV_NONE,
{(char_u *)5L, (char_u *)0L} SCRIPTID_INIT},
{"modifiable", "ma", P_BOOL|P_VI_DEF|P_NOGLOB,
(char_u *)&p_ma, PV_MA,
{(char_u *)TRUE, (char_u *)0L} SCRIPTID_INIT},
{"modified", "mod", P_BOOL|P_NO_MKRC|P_VI_DEF|P_RSTAT,
(char_u *)&p_mod, PV_MOD,
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"more", NULL, P_BOOL|P_VIM,
(char_u *)&p_more, PV_NONE,
{(char_u *)FALSE, (char_u *)TRUE} SCRIPTID_INIT},
{"mouse", NULL, P_STRING|P_VI_DEF|P_FLAGLIST,
(char_u *)&p_mouse, PV_NONE,
{
#if defined(WIN3264)
(char_u *)"a",
#else
(char_u *)"",
#endif
(char_u *)0L} SCRIPTID_INIT},
{"mousefocus", "mousef", P_BOOL|P_VI_DEF,
#ifdef FEAT_GUI
(char_u *)&p_mousef, PV_NONE,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"mousehide", "mh", P_BOOL|P_VI_DEF,
#ifdef FEAT_GUI
(char_u *)&p_mh, PV_NONE,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)TRUE, (char_u *)0L} SCRIPTID_INIT},
{"mousemodel", "mousem", P_STRING|P_VI_DEF,
(char_u *)&p_mousem, PV_NONE,
{
#if defined(MSWIN)
(char_u *)"popup",
#else
# if defined(MACOS)
(char_u *)"popup_setpos",
# else
(char_u *)"extend",
# endif
#endif
(char_u *)0L} SCRIPTID_INIT},
{"mouseshape", "mouses", P_STRING|P_VI_DEF|P_ONECOMMA|P_NODUP,
#ifdef FEAT_MOUSESHAPE
(char_u *)&p_mouseshape, PV_NONE,
{(char_u *)"i-r:beam,s:updown,sd:udsizing,vs:leftright,vd:lrsizing,m:no,ml:up-arrow,v:rightup-arrow", (char_u *)0L}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)NULL, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"mousetime", "mouset", P_NUM|P_VI_DEF,
(char_u *)&p_mouset, PV_NONE,
{(char_u *)500L, (char_u *)0L} SCRIPTID_INIT},
{"mzquantum", "mzq", P_NUM,
#ifdef FEAT_MZSCHEME
(char_u *)&p_mzq, PV_NONE,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)100L, (char_u *)100L} SCRIPTID_INIT},
{"novice", NULL, P_BOOL|P_VI_DEF,
(char_u *)NULL, PV_NONE,
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"nrformats", "nf", P_STRING|P_ALLOCED|P_VI_DEF|P_ONECOMMA|P_NODUP,
(char_u *)&p_nf, PV_NF,
{(char_u *)"bin,octal,hex", (char_u *)0L}
SCRIPTID_INIT},
{"number", "nu", P_BOOL|P_VI_DEF|P_RWIN,
(char_u *)VAR_WIN, PV_NU,
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"numberwidth", "nuw", P_NUM|P_RWIN|P_VIM,
#ifdef FEAT_LINEBREAK
(char_u *)VAR_WIN, PV_NUW,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)8L, (char_u *)4L} SCRIPTID_INIT},
{"omnifunc", "ofu", P_STRING|P_ALLOCED|P_VI_DEF|P_SECURE,
#ifdef FEAT_COMPL_FUNC
(char_u *)&p_ofu, PV_OFU,
{(char_u *)"", (char_u *)0L}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)0L, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"open", NULL, P_BOOL|P_VI_DEF,
(char_u *)NULL, PV_NONE,
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"opendevice", "odev", P_BOOL|P_VI_DEF,
#if defined(MSWIN)
(char_u *)&p_odev, PV_NONE,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)FALSE, (char_u *)FALSE}
SCRIPTID_INIT},
{"operatorfunc", "opfunc", P_STRING|P_VI_DEF|P_SECURE,
(char_u *)&p_opfunc, PV_NONE,
{(char_u *)"", (char_u *)0L} SCRIPTID_INIT},
{"optimize", "opt", P_BOOL|P_VI_DEF,
(char_u *)NULL, PV_NONE,
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"osfiletype", "oft", P_STRING|P_ALLOCED|P_VI_DEF,
(char_u *)NULL, PV_NONE,
{(char_u *)0L, (char_u *)0L} SCRIPTID_INIT},
{"packpath", "pp", P_STRING|P_VI_DEF|P_EXPAND|P_ONECOMMA|P_NODUP
|P_SECURE,
(char_u *)&p_pp, PV_NONE,
{(char_u *)DFLT_RUNTIMEPATH, (char_u *)0L}
SCRIPTID_INIT},
{"paragraphs", "para", P_STRING|P_VI_DEF,
(char_u *)&p_para, PV_NONE,
{(char_u *)"IPLPPPQPP TPHPLIPpLpItpplpipbp",
(char_u *)0L} SCRIPTID_INIT},
{"paste", NULL, P_BOOL|P_VI_DEF|P_PRI_MKRC,
(char_u *)&p_paste, PV_NONE,
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"pastetoggle", "pt", P_STRING|P_VI_DEF,
(char_u *)&p_pt, PV_NONE,
{(char_u *)"", (char_u *)0L} SCRIPTID_INIT},
{"patchexpr", "pex", P_STRING|P_VI_DEF|P_SECURE,
#if defined(FEAT_DIFF) && defined(FEAT_EVAL)
(char_u *)&p_pex, PV_NONE,
{(char_u *)"", (char_u *)0L}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)0L, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"patchmode", "pm", P_STRING|P_VI_DEF|P_NFNAME,
(char_u *)&p_pm, PV_NONE,
{(char_u *)"", (char_u *)0L} SCRIPTID_INIT},
{"path", "pa", P_STRING|P_EXPAND|P_VI_DEF|P_COMMA|P_NODUP,
(char_u *)&p_path, PV_PATH,
{
#if defined(AMIGA) || defined(MSWIN)
(char_u *)".,,",
#else
(char_u *)".,/usr/include,,",
#endif
(char_u *)0L} SCRIPTID_INIT},
#if defined(DYNAMIC_PERL)
{"perldll", NULL, P_STRING|P_EXPAND|P_VI_DEF|P_SECURE,
(char_u *)&p_perldll, PV_NONE,
{(char_u *)DYNAMIC_PERL_DLL, (char_u *)0L}
SCRIPTID_INIT},
#endif
{"preserveindent", "pi", P_BOOL|P_VI_DEF|P_VIM,
(char_u *)&p_pi, PV_PI,
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"previewheight", "pvh", P_NUM|P_VI_DEF,
#if defined(FEAT_WINDOWS) && defined(FEAT_QUICKFIX)
(char_u *)&p_pvh, PV_NONE,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)12L, (char_u *)0L} SCRIPTID_INIT},
{"previewwindow", "pvw", P_BOOL|P_VI_DEF|P_RSTAT|P_NOGLOB,
#if defined(FEAT_WINDOWS) && defined(FEAT_QUICKFIX)
(char_u *)VAR_WIN, PV_PVW,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"printdevice", "pdev", P_STRING|P_VI_DEF|P_SECURE,
#ifdef FEAT_PRINTER
(char_u *)&p_pdev, PV_NONE,
{(char_u *)"", (char_u *)0L}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)NULL, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"printencoding", "penc", P_STRING|P_VI_DEF,
#ifdef FEAT_POSTSCRIPT
(char_u *)&p_penc, PV_NONE,
{(char_u *)"", (char_u *)0L}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)NULL, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"printexpr", "pexpr", P_STRING|P_VI_DEF,
#ifdef FEAT_POSTSCRIPT
(char_u *)&p_pexpr, PV_NONE,
{(char_u *)"", (char_u *)0L}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)NULL, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"printfont", "pfn", P_STRING|P_VI_DEF,
#ifdef FEAT_PRINTER
(char_u *)&p_pfn, PV_NONE,
{
# ifdef MSWIN
(char_u *)"Courier_New:h10",
# else
(char_u *)"courier",
# endif
(char_u *)0L}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)NULL, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"printheader", "pheader", P_STRING|P_VI_DEF|P_GETTEXT,
#ifdef FEAT_PRINTER
(char_u *)&p_header, PV_NONE,
{(char_u *)N_("%<%f%h%m%=Page %N"), (char_u *)0L}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)NULL, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"printmbcharset", "pmbcs", P_STRING|P_VI_DEF,
#if defined(FEAT_POSTSCRIPT) && defined(FEAT_MBYTE)
(char_u *)&p_pmcs, PV_NONE,
{(char_u *)"", (char_u *)0L}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)NULL, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"printmbfont", "pmbfn", P_STRING|P_VI_DEF,
#if defined(FEAT_POSTSCRIPT) && defined(FEAT_MBYTE)
(char_u *)&p_pmfn, PV_NONE,
{(char_u *)"", (char_u *)0L}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)NULL, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"printoptions", "popt", P_STRING|P_VI_DEF|P_ONECOMMA|P_NODUP,
#ifdef FEAT_PRINTER
(char_u *)&p_popt, PV_NONE,
{(char_u *)"", (char_u *)0L}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)NULL, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"prompt", NULL, P_BOOL|P_VI_DEF,
(char_u *)&p_prompt, PV_NONE,
{(char_u *)TRUE, (char_u *)0L} SCRIPTID_INIT},
{"pumheight", "ph", P_NUM|P_VI_DEF,
#ifdef FEAT_INS_EXPAND
(char_u *)&p_ph, PV_NONE,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)0L, (char_u *)0L} SCRIPTID_INIT},
#if defined(DYNAMIC_PYTHON3)
{"pythonthreedll", NULL, P_STRING|P_EXPAND|P_VI_DEF|P_SECURE,
(char_u *)&p_py3dll, PV_NONE,
{(char_u *)DYNAMIC_PYTHON3_DLL, (char_u *)0L}
SCRIPTID_INIT},
#endif
#if defined(DYNAMIC_PYTHON)
{"pythondll", NULL, P_STRING|P_EXPAND|P_VI_DEF|P_SECURE,
(char_u *)&p_pydll, PV_NONE,
{(char_u *)DYNAMIC_PYTHON_DLL, (char_u *)0L}
SCRIPTID_INIT},
#endif
{"quoteescape", "qe", P_STRING|P_ALLOCED|P_VI_DEF,
#ifdef FEAT_TEXTOBJ
(char_u *)&p_qe, PV_QE,
{(char_u *)"\\", (char_u *)0L}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)NULL, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"readonly", "ro", P_BOOL|P_VI_DEF|P_RSTAT|P_NOGLOB,
(char_u *)&p_ro, PV_RO,
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"redraw", NULL, P_BOOL|P_VI_DEF,
(char_u *)NULL, PV_NONE,
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"redrawtime", "rdt", P_NUM|P_VI_DEF,
#ifdef FEAT_RELTIME
(char_u *)&p_rdt, PV_NONE,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)2000L, (char_u *)0L} SCRIPTID_INIT},
{"regexpengine", "re", P_NUM|P_VI_DEF,
(char_u *)&p_re, PV_NONE,
{(char_u *)0L, (char_u *)0L} SCRIPTID_INIT},
{"relativenumber", "rnu", P_BOOL|P_VI_DEF|P_RWIN,
(char_u *)VAR_WIN, PV_RNU,
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"remap", NULL, P_BOOL|P_VI_DEF,
(char_u *)&p_remap, PV_NONE,
{(char_u *)TRUE, (char_u *)0L} SCRIPTID_INIT},
{"renderoptions", "rop", P_STRING|P_ONECOMMA|P_RCLR|P_VI_DEF,
#ifdef FEAT_RENDER_OPTIONS
(char_u *)&p_rop, PV_NONE,
{(char_u *)"", (char_u *)0L}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)NULL, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"report", NULL, P_NUM|P_VI_DEF,
(char_u *)&p_report, PV_NONE,
{(char_u *)2L, (char_u *)0L} SCRIPTID_INIT},
{"restorescreen", "rs", P_BOOL|P_VI_DEF,
#ifdef WIN3264
(char_u *)&p_rs, PV_NONE,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)TRUE, (char_u *)0L} SCRIPTID_INIT},
{"revins", "ri", P_BOOL|P_VI_DEF|P_VIM,
#ifdef FEAT_RIGHTLEFT
(char_u *)&p_ri, PV_NONE,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"rightleft", "rl", P_BOOL|P_VI_DEF|P_RWIN,
#ifdef FEAT_RIGHTLEFT
(char_u *)VAR_WIN, PV_RL,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"rightleftcmd", "rlc", P_STRING|P_ALLOCED|P_VI_DEF|P_RWIN,
#ifdef FEAT_RIGHTLEFT
(char_u *)VAR_WIN, PV_RLC,
{(char_u *)"search", (char_u *)NULL}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)NULL, (char_u *)0L}
#endif
SCRIPTID_INIT},
#if defined(DYNAMIC_RUBY)
{"rubydll", NULL, P_STRING|P_EXPAND|P_VI_DEF|P_SECURE,
(char_u *)&p_rubydll, PV_NONE,
{(char_u *)DYNAMIC_RUBY_DLL, (char_u *)0L}
SCRIPTID_INIT},
#endif
{"ruler", "ru", P_BOOL|P_VI_DEF|P_VIM|P_RSTAT,
#ifdef FEAT_CMDL_INFO
(char_u *)&p_ru, PV_NONE,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"rulerformat", "ruf", P_STRING|P_VI_DEF|P_ALLOCED|P_RSTAT,
#ifdef FEAT_STL_OPT
(char_u *)&p_ruf, PV_NONE,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)"", (char_u *)0L} SCRIPTID_INIT},
{"runtimepath", "rtp", P_STRING|P_VI_DEF|P_EXPAND|P_ONECOMMA|P_NODUP
|P_SECURE,
(char_u *)&p_rtp, PV_NONE,
{(char_u *)DFLT_RUNTIMEPATH, (char_u *)0L}
SCRIPTID_INIT},
{"scroll", "scr", P_NUM|P_NO_MKRC|P_VI_DEF,
(char_u *)VAR_WIN, PV_SCROLL,
{(char_u *)12L, (char_u *)0L} SCRIPTID_INIT},
{"scrollbind", "scb", P_BOOL|P_VI_DEF,
#ifdef FEAT_SCROLLBIND
(char_u *)VAR_WIN, PV_SCBIND,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"scrolljump", "sj", P_NUM|P_VI_DEF|P_VIM,
(char_u *)&p_sj, PV_NONE,
{(char_u *)1L, (char_u *)0L} SCRIPTID_INIT},
{"scrolloff", "so", P_NUM|P_VI_DEF|P_VIM|P_RALL,
(char_u *)&p_so, PV_NONE,
{(char_u *)0L, (char_u *)0L} SCRIPTID_INIT},
{"scrollopt", "sbo", P_STRING|P_VI_DEF|P_ONECOMMA|P_NODUP,
#ifdef FEAT_SCROLLBIND
(char_u *)&p_sbo, PV_NONE,
{(char_u *)"ver,jump", (char_u *)0L}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)0L, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"sections", "sect", P_STRING|P_VI_DEF,
(char_u *)&p_sections, PV_NONE,
{(char_u *)"SHNHH HUnhsh", (char_u *)0L}
SCRIPTID_INIT},
{"secure", NULL, P_BOOL|P_VI_DEF|P_SECURE,
(char_u *)&p_secure, PV_NONE,
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"selection", "sel", P_STRING|P_VI_DEF,
(char_u *)&p_sel, PV_NONE,
{(char_u *)"inclusive", (char_u *)0L}
SCRIPTID_INIT},
{"selectmode", "slm", P_STRING|P_VI_DEF|P_ONECOMMA|P_NODUP,
(char_u *)&p_slm, PV_NONE,
{(char_u *)"", (char_u *)0L} SCRIPTID_INIT},
{"sessionoptions", "ssop", P_STRING|P_VI_DEF|P_ONECOMMA|P_NODUP,
#ifdef FEAT_SESSION
(char_u *)&p_ssop, PV_NONE,
{(char_u *)"blank,buffers,curdir,folds,help,options,tabpages,winsize",
(char_u *)0L}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)0L, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"shell", "sh", P_STRING|P_EXPAND|P_VI_DEF|P_SECURE,
(char_u *)&p_sh, PV_NONE,
{
#ifdef VMS
(char_u *)"-",
#else
# if defined(WIN3264)
(char_u *)"", /* set in set_init_1() */
# else
(char_u *)"sh",
# endif
#endif /* VMS */
(char_u *)0L} SCRIPTID_INIT},
{"shellcmdflag","shcf", P_STRING|P_VI_DEF|P_SECURE,
(char_u *)&p_shcf, PV_NONE,
{
#if defined(MSWIN)
(char_u *)"/c",
#else
(char_u *)"-c",
#endif
(char_u *)0L} SCRIPTID_INIT},
{"shellpipe", "sp", P_STRING|P_VI_DEF|P_SECURE,
#ifdef FEAT_QUICKFIX
(char_u *)&p_sp, PV_NONE,
{
#if defined(UNIX)
(char_u *)"| tee",
#else
(char_u *)">",
#endif
(char_u *)0L}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)0L, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"shellquote", "shq", P_STRING|P_VI_DEF|P_SECURE,
(char_u *)&p_shq, PV_NONE,
{(char_u *)"", (char_u *)0L} SCRIPTID_INIT},
{"shellredir", "srr", P_STRING|P_VI_DEF|P_SECURE,
(char_u *)&p_srr, PV_NONE,
{(char_u *)">", (char_u *)0L} SCRIPTID_INIT},
{"shellslash", "ssl", P_BOOL|P_VI_DEF,
#ifdef BACKSLASH_IN_FILENAME
(char_u *)&p_ssl, PV_NONE,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"shelltemp", "stmp", P_BOOL,
(char_u *)&p_stmp, PV_NONE,
{(char_u *)FALSE, (char_u *)TRUE} SCRIPTID_INIT},
{"shelltype", "st", P_NUM|P_VI_DEF,
#ifdef AMIGA
(char_u *)&p_st, PV_NONE,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)0L, (char_u *)0L} SCRIPTID_INIT},
{"shellxquote", "sxq", P_STRING|P_VI_DEF|P_SECURE,
(char_u *)&p_sxq, PV_NONE,
{
#if defined(UNIX) && defined(USE_SYSTEM)
(char_u *)"\"",
#else
(char_u *)"",
#endif
(char_u *)0L} SCRIPTID_INIT},
{"shellxescape", "sxe", P_STRING|P_VI_DEF|P_SECURE,
(char_u *)&p_sxe, PV_NONE,
{
#if defined(WIN3264)
(char_u *)"\"&|<>()@^",
#else
(char_u *)"",
#endif
(char_u *)0L} SCRIPTID_INIT},
{"shiftround", "sr", P_BOOL|P_VI_DEF|P_VIM,
(char_u *)&p_sr, PV_NONE,
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"shiftwidth", "sw", P_NUM|P_VI_DEF,
(char_u *)&p_sw, PV_SW,
{(char_u *)8L, (char_u *)0L} SCRIPTID_INIT},
{"shortmess", "shm", P_STRING|P_VIM|P_FLAGLIST,
(char_u *)&p_shm, PV_NONE,
{(char_u *)"", (char_u *)"filnxtToO"}
SCRIPTID_INIT},
{"shortname", "sn", P_BOOL|P_VI_DEF,
(char_u *)&p_sn, PV_SN,
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"showbreak", "sbr", P_STRING|P_VI_DEF|P_RALL,
#ifdef FEAT_LINEBREAK
(char_u *)&p_sbr, PV_NONE,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)"", (char_u *)0L} SCRIPTID_INIT},
{"showcmd", "sc", P_BOOL|P_VIM,
#ifdef FEAT_CMDL_INFO
(char_u *)&p_sc, PV_NONE,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)FALSE,
#ifdef UNIX
(char_u *)FALSE
#else
(char_u *)TRUE
#endif
} SCRIPTID_INIT},
{"showfulltag", "sft", P_BOOL|P_VI_DEF,
(char_u *)&p_sft, PV_NONE,
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"showmatch", "sm", P_BOOL|P_VI_DEF,
(char_u *)&p_sm, PV_NONE,
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"showmode", "smd", P_BOOL|P_VIM,
(char_u *)&p_smd, PV_NONE,
{(char_u *)FALSE, (char_u *)TRUE} SCRIPTID_INIT},
{"showtabline", "stal", P_NUM|P_VI_DEF|P_RALL,
#ifdef FEAT_WINDOWS
(char_u *)&p_stal, PV_NONE,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)1L, (char_u *)0L} SCRIPTID_INIT},
{"sidescroll", "ss", P_NUM|P_VI_DEF,
(char_u *)&p_ss, PV_NONE,
{(char_u *)0L, (char_u *)0L} SCRIPTID_INIT},
{"sidescrolloff", "siso", P_NUM|P_VI_DEF|P_VIM|P_RBUF,
(char_u *)&p_siso, PV_NONE,
{(char_u *)0L, (char_u *)0L} SCRIPTID_INIT},
{"signcolumn", "scl", P_STRING|P_ALLOCED|P_VI_DEF|P_RWIN,
#ifdef FEAT_SIGNS
(char_u *)VAR_WIN, PV_SCL,
{(char_u *)"auto", (char_u *)0L}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)NULL, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"slowopen", "slow", P_BOOL|P_VI_DEF,
(char_u *)NULL, PV_NONE,
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"smartcase", "scs", P_BOOL|P_VI_DEF|P_VIM,
(char_u *)&p_scs, PV_NONE,
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"smartindent", "si", P_BOOL|P_VI_DEF|P_VIM,
#ifdef FEAT_SMARTINDENT
(char_u *)&p_si, PV_SI,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"smarttab", "sta", P_BOOL|P_VI_DEF|P_VIM,
(char_u *)&p_sta, PV_NONE,
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"softtabstop", "sts", P_NUM|P_VI_DEF|P_VIM,
(char_u *)&p_sts, PV_STS,
{(char_u *)0L, (char_u *)0L} SCRIPTID_INIT},
{"sourceany", NULL, P_BOOL|P_VI_DEF,
(char_u *)NULL, PV_NONE,
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"spell", NULL, P_BOOL|P_VI_DEF|P_RWIN,
#ifdef FEAT_SPELL
(char_u *)VAR_WIN, PV_SPELL,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"spellcapcheck", "spc", P_STRING|P_ALLOCED|P_VI_DEF|P_RBUF,
#ifdef FEAT_SPELL
(char_u *)&p_spc, PV_SPC,
{(char_u *)"[.?!]\\_[\\])'\" ]\\+", (char_u *)0L}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)0L, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"spellfile", "spf", P_STRING|P_EXPAND|P_ALLOCED|P_VI_DEF|P_SECURE
|P_ONECOMMA,
#ifdef FEAT_SPELL
(char_u *)&p_spf, PV_SPF,
{(char_u *)"", (char_u *)0L}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)0L, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"spelllang", "spl", P_STRING|P_ALLOCED|P_VI_DEF|P_ONECOMMA
|P_RBUF|P_EXPAND,
#ifdef FEAT_SPELL
(char_u *)&p_spl, PV_SPL,
{(char_u *)"en", (char_u *)0L}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)0L, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"spellsuggest", "sps", P_STRING|P_VI_DEF|P_EXPAND|P_SECURE|P_ONECOMMA,
#ifdef FEAT_SPELL
(char_u *)&p_sps, PV_NONE,
{(char_u *)"best", (char_u *)0L}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)0L, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"splitbelow", "sb", P_BOOL|P_VI_DEF,
#ifdef FEAT_WINDOWS
(char_u *)&p_sb, PV_NONE,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"splitright", "spr", P_BOOL|P_VI_DEF,
#ifdef FEAT_WINDOWS
(char_u *)&p_spr, PV_NONE,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"startofline", "sol", P_BOOL|P_VI_DEF|P_VIM,
(char_u *)&p_sol, PV_NONE,
{(char_u *)TRUE, (char_u *)0L} SCRIPTID_INIT},
{"statusline" ,"stl", P_STRING|P_VI_DEF|P_ALLOCED|P_RSTAT,
#ifdef FEAT_STL_OPT
(char_u *)&p_stl, PV_STL,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)"", (char_u *)0L} SCRIPTID_INIT},
{"suffixes", "su", P_STRING|P_VI_DEF|P_ONECOMMA|P_NODUP,
(char_u *)&p_su, PV_NONE,
{(char_u *)".bak,~,.o,.h,.info,.swp,.obj",
(char_u *)0L} SCRIPTID_INIT},
{"suffixesadd", "sua", P_STRING|P_VI_DEF|P_ALLOCED|P_ONECOMMA|P_NODUP,
#ifdef FEAT_SEARCHPATH
(char_u *)&p_sua, PV_SUA,
{(char_u *)"", (char_u *)0L}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)0L, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"swapfile", "swf", P_BOOL|P_VI_DEF|P_RSTAT,
(char_u *)&p_swf, PV_SWF,
{(char_u *)TRUE, (char_u *)0L} SCRIPTID_INIT},
{"swapsync", "sws", P_STRING|P_VI_DEF,
(char_u *)&p_sws, PV_NONE,
{(char_u *)"fsync", (char_u *)0L} SCRIPTID_INIT},
{"switchbuf", "swb", P_STRING|P_VI_DEF|P_ONECOMMA|P_NODUP,
(char_u *)&p_swb, PV_NONE,
{(char_u *)"", (char_u *)0L} SCRIPTID_INIT},
{"synmaxcol", "smc", P_NUM|P_VI_DEF|P_RBUF,
#ifdef FEAT_SYN_HL
(char_u *)&p_smc, PV_SMC,
{(char_u *)3000L, (char_u *)0L}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)0L, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"syntax", "syn", P_STRING|P_ALLOCED|P_VI_DEF|P_NOGLOB|P_NFNAME,
#ifdef FEAT_SYN_HL
(char_u *)&p_syn, PV_SYN,
{(char_u *)"", (char_u *)0L}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)0L, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"tabline", "tal", P_STRING|P_VI_DEF|P_RALL,
#ifdef FEAT_STL_OPT
(char_u *)&p_tal, PV_NONE,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)"", (char_u *)0L} SCRIPTID_INIT},
{"tabpagemax", "tpm", P_NUM|P_VI_DEF,
#ifdef FEAT_WINDOWS
(char_u *)&p_tpm, PV_NONE,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)10L, (char_u *)0L} SCRIPTID_INIT},
{"tabstop", "ts", P_NUM|P_VI_DEF|P_RBUF,
(char_u *)&p_ts, PV_TS,
{(char_u *)8L, (char_u *)0L} SCRIPTID_INIT},
{"tagbsearch", "tbs", P_BOOL|P_VI_DEF,
(char_u *)&p_tbs, PV_NONE,
#ifdef VMS /* binary searching doesn't appear to work on VMS */
{(char_u *)0L, (char_u *)0L}
#else
{(char_u *)TRUE, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"tagcase", "tc", P_STRING|P_VIM,
(char_u *)&p_tc, PV_TC,
{(char_u *)"followic", (char_u *)"followic"} SCRIPTID_INIT},
{"taglength", "tl", P_NUM|P_VI_DEF,
(char_u *)&p_tl, PV_NONE,
{(char_u *)0L, (char_u *)0L} SCRIPTID_INIT},
{"tagrelative", "tr", P_BOOL|P_VIM,
(char_u *)&p_tr, PV_NONE,
{(char_u *)FALSE, (char_u *)TRUE} SCRIPTID_INIT},
{"tags", "tag", P_STRING|P_EXPAND|P_VI_DEF|P_ONECOMMA|P_NODUP,
(char_u *)&p_tags, PV_TAGS,
{
#if defined(FEAT_EMACS_TAGS) && !defined(CASE_INSENSITIVE_FILENAME)
(char_u *)"./tags,./TAGS,tags,TAGS",
#else
(char_u *)"./tags,tags",
#endif
(char_u *)0L} SCRIPTID_INIT},
{"tagstack", "tgst", P_BOOL|P_VI_DEF,
(char_u *)&p_tgst, PV_NONE,
{(char_u *)TRUE, (char_u *)0L} SCRIPTID_INIT},
#if defined(DYNAMIC_TCL)
{"tcldll", NULL, P_STRING|P_EXPAND|P_VI_DEF|P_SECURE,
(char_u *)&p_tcldll, PV_NONE,
{(char_u *)DYNAMIC_TCL_DLL, (char_u *)0L}
SCRIPTID_INIT},
#endif
{"term", NULL, P_STRING|P_EXPAND|P_NODEFAULT|P_NO_MKRC|P_VI_DEF|P_RALL,
(char_u *)&T_NAME, PV_NONE,
{(char_u *)"", (char_u *)0L} SCRIPTID_INIT},
{"termbidi", "tbidi", P_BOOL|P_VI_DEF,
#ifdef FEAT_ARABIC
(char_u *)&p_tbidi, PV_NONE,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"termencoding", "tenc", P_STRING|P_VI_DEF|P_RCLR,
#ifdef FEAT_MBYTE
(char_u *)&p_tenc, PV_NONE,
{(char_u *)"", (char_u *)0L}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)0L, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"termguicolors", "tgc", P_BOOL|P_VI_DEF|P_VIM|P_RCLR,
#ifdef FEAT_TERMGUICOLORS
(char_u *)&p_tgc, PV_NONE,
{(char_u *)FALSE, (char_u *)FALSE}
#else
(char_u*)NULL, PV_NONE,
{(char_u *)FALSE, (char_u *)FALSE}
#endif
SCRIPTID_INIT},
{"terse", NULL, P_BOOL|P_VI_DEF,
(char_u *)&p_terse, PV_NONE,
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"textauto", "ta", P_BOOL|P_VIM,
(char_u *)&p_ta, PV_NONE,
{(char_u *)DFLT_TEXTAUTO, (char_u *)TRUE}
SCRIPTID_INIT},
{"textmode", "tx", P_BOOL|P_VI_DEF|P_NO_MKRC,
(char_u *)&p_tx, PV_TX,
{
#ifdef USE_CRNL
(char_u *)TRUE,
#else
(char_u *)FALSE,
#endif
(char_u *)0L} SCRIPTID_INIT},
{"textwidth", "tw", P_NUM|P_VI_DEF|P_VIM|P_RBUF,
(char_u *)&p_tw, PV_TW,
{(char_u *)0L, (char_u *)0L} SCRIPTID_INIT},
{"thesaurus", "tsr", P_STRING|P_EXPAND|P_VI_DEF|P_ONECOMMA|P_NODUP,
#ifdef FEAT_INS_EXPAND
(char_u *)&p_tsr, PV_TSR,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)"", (char_u *)0L} SCRIPTID_INIT},
{"tildeop", "top", P_BOOL|P_VI_DEF|P_VIM,
(char_u *)&p_to, PV_NONE,
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"timeout", "to", P_BOOL|P_VI_DEF,
(char_u *)&p_timeout, PV_NONE,
{(char_u *)TRUE, (char_u *)0L} SCRIPTID_INIT},
{"timeoutlen", "tm", P_NUM|P_VI_DEF,
(char_u *)&p_tm, PV_NONE,
{(char_u *)1000L, (char_u *)0L} SCRIPTID_INIT},
{"title", NULL, P_BOOL|P_VI_DEF,
#ifdef FEAT_TITLE
(char_u *)&p_title, PV_NONE,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"titlelen", NULL, P_NUM|P_VI_DEF,
#ifdef FEAT_TITLE
(char_u *)&p_titlelen, PV_NONE,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)85L, (char_u *)0L} SCRIPTID_INIT},
{"titleold", NULL, P_STRING|P_VI_DEF|P_GETTEXT|P_SECURE|P_NO_MKRC,
#ifdef FEAT_TITLE
(char_u *)&p_titleold, PV_NONE,
{(char_u *)N_("Thanks for flying Vim"),
(char_u *)0L}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)0L, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"titlestring", NULL, P_STRING|P_VI_DEF,
#ifdef FEAT_TITLE
(char_u *)&p_titlestring, PV_NONE,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)"", (char_u *)0L} SCRIPTID_INIT},
#if defined(FEAT_TOOLBAR) && !defined(FEAT_GUI_W32)
{"toolbar", "tb", P_STRING|P_ONECOMMA|P_VI_DEF|P_NODUP,
(char_u *)&p_toolbar, PV_NONE,
{(char_u *)"icons,tooltips", (char_u *)0L}
SCRIPTID_INIT},
#endif
#if defined(FEAT_TOOLBAR) && defined(FEAT_GUI_GTK)
{"toolbariconsize", "tbis", P_STRING|P_VI_DEF,
(char_u *)&p_tbis, PV_NONE,
{(char_u *)"small", (char_u *)0L} SCRIPTID_INIT},
#endif
{"ttimeout", NULL, P_BOOL|P_VI_DEF|P_VIM,
(char_u *)&p_ttimeout, PV_NONE,
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"ttimeoutlen", "ttm", P_NUM|P_VI_DEF,
(char_u *)&p_ttm, PV_NONE,
{(char_u *)-1L, (char_u *)0L} SCRIPTID_INIT},
{"ttybuiltin", "tbi", P_BOOL|P_VI_DEF,
(char_u *)&p_tbi, PV_NONE,
{(char_u *)TRUE, (char_u *)0L} SCRIPTID_INIT},
{"ttyfast", "tf", P_BOOL|P_NO_MKRC|P_VI_DEF,
(char_u *)&p_tf, PV_NONE,
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"ttymouse", "ttym", P_STRING|P_NODEFAULT|P_NO_MKRC|P_VI_DEF,
#if defined(FEAT_MOUSE) && (defined(UNIX) || defined(VMS))
(char_u *)&p_ttym, PV_NONE,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)"", (char_u *)0L} SCRIPTID_INIT},
{"ttyscroll", "tsl", P_NUM|P_VI_DEF,
(char_u *)&p_ttyscroll, PV_NONE,
{(char_u *)999L, (char_u *)0L} SCRIPTID_INIT},
{"ttytype", "tty", P_STRING|P_EXPAND|P_NODEFAULT|P_NO_MKRC|P_VI_DEF|P_RALL,
(char_u *)&T_NAME, PV_NONE,
{(char_u *)"", (char_u *)0L} SCRIPTID_INIT},
{"undodir", "udir", P_STRING|P_EXPAND|P_ONECOMMA|P_NODUP|P_SECURE
|P_VI_DEF,
#ifdef FEAT_PERSISTENT_UNDO
(char_u *)&p_udir, PV_NONE,
{(char_u *)".", (char_u *)0L}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)0L, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"undofile", "udf", P_BOOL|P_VI_DEF|P_VIM,
#ifdef FEAT_PERSISTENT_UNDO
(char_u *)&p_udf, PV_UDF,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"undolevels", "ul", P_NUM|P_VI_DEF,
(char_u *)&p_ul, PV_UL,
{
#if defined(UNIX) || defined(WIN3264) || defined(VMS)
(char_u *)1000L,
#else
(char_u *)100L,
#endif
(char_u *)0L} SCRIPTID_INIT},
{"undoreload", "ur", P_NUM|P_VI_DEF,
(char_u *)&p_ur, PV_NONE,
{ (char_u *)10000L, (char_u *)0L} SCRIPTID_INIT},
{"updatecount", "uc", P_NUM|P_VI_DEF,
(char_u *)&p_uc, PV_NONE,
{(char_u *)200L, (char_u *)0L} SCRIPTID_INIT},
{"updatetime", "ut", P_NUM|P_VI_DEF,
(char_u *)&p_ut, PV_NONE,
{(char_u *)4000L, (char_u *)0L} SCRIPTID_INIT},
{"verbose", "vbs", P_NUM|P_VI_DEF,
(char_u *)&p_verbose, PV_NONE,
{(char_u *)0L, (char_u *)0L} SCRIPTID_INIT},
{"verbosefile", "vfile", P_STRING|P_EXPAND|P_VI_DEF|P_SECURE,
(char_u *)&p_vfile, PV_NONE,
{(char_u *)"", (char_u *)0L} SCRIPTID_INIT},
{"viewdir", "vdir", P_STRING|P_EXPAND|P_VI_DEF|P_SECURE,
#ifdef FEAT_SESSION
(char_u *)&p_vdir, PV_NONE,
{(char_u *)DFLT_VDIR, (char_u *)0L}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)0L, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"viewoptions", "vop", P_STRING|P_VI_DEF|P_ONECOMMA|P_NODUP,
#ifdef FEAT_SESSION
(char_u *)&p_vop, PV_NONE,
{(char_u *)"folds,options,cursor", (char_u *)0L}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)0L, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"viminfo", "vi", P_STRING|P_ONECOMMA|P_NODUP|P_SECURE,
#ifdef FEAT_VIMINFO
(char_u *)&p_viminfo, PV_NONE,
#if defined(MSWIN)
{(char_u *)"", (char_u *)"'100,<50,s10,h,rA:,rB:"}
#else
# ifdef AMIGA
{(char_u *)"",
(char_u *)"'100,<50,s10,h,rdf0:,rdf1:,rdf2:"}
# else
{(char_u *)"", (char_u *)"'100,<50,s10,h"}
# endif
#endif
#else
(char_u *)NULL, PV_NONE,
{(char_u *)0L, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"virtualedit", "ve", P_STRING|P_ONECOMMA|P_NODUP|P_VI_DEF
|P_VIM|P_CURSWANT,
#ifdef FEAT_VIRTUALEDIT
(char_u *)&p_ve, PV_NONE,
{(char_u *)"", (char_u *)""}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)0L, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"visualbell", "vb", P_BOOL|P_VI_DEF,
(char_u *)&p_vb, PV_NONE,
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"w300", NULL, P_NUM|P_VI_DEF,
(char_u *)NULL, PV_NONE,
{(char_u *)0L, (char_u *)0L} SCRIPTID_INIT},
{"w1200", NULL, P_NUM|P_VI_DEF,
(char_u *)NULL, PV_NONE,
{(char_u *)0L, (char_u *)0L} SCRIPTID_INIT},
{"w9600", NULL, P_NUM|P_VI_DEF,
(char_u *)NULL, PV_NONE,
{(char_u *)0L, (char_u *)0L} SCRIPTID_INIT},
{"warn", NULL, P_BOOL|P_VI_DEF,
(char_u *)&p_warn, PV_NONE,
{(char_u *)TRUE, (char_u *)0L} SCRIPTID_INIT},
{"weirdinvert", "wiv", P_BOOL|P_VI_DEF|P_RCLR,
(char_u *)&p_wiv, PV_NONE,
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"whichwrap", "ww", P_STRING|P_VIM|P_ONECOMMA|P_FLAGLIST,
(char_u *)&p_ww, PV_NONE,
{(char_u *)"", (char_u *)"b,s"} SCRIPTID_INIT},
{"wildchar", "wc", P_NUM|P_VIM,
(char_u *)&p_wc, PV_NONE,
{(char_u *)(long)Ctrl_E, (char_u *)(long)TAB}
SCRIPTID_INIT},
{"wildcharm", "wcm", P_NUM|P_VI_DEF,
(char_u *)&p_wcm, PV_NONE,
{(char_u *)0L, (char_u *)0L} SCRIPTID_INIT},
{"wildignore", "wig", P_STRING|P_VI_DEF|P_ONECOMMA|P_NODUP,
#ifdef FEAT_WILDIGN
(char_u *)&p_wig, PV_NONE,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)"", (char_u *)0L} SCRIPTID_INIT},
{"wildignorecase", "wic", P_BOOL|P_VI_DEF,
(char_u *)&p_wic, PV_NONE,
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"wildmenu", "wmnu", P_BOOL|P_VI_DEF,
#ifdef FEAT_WILDMENU
(char_u *)&p_wmnu, PV_NONE,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"wildmode", "wim", P_STRING|P_VI_DEF|P_ONECOMMA|P_NODUP,
(char_u *)&p_wim, PV_NONE,
{(char_u *)"full", (char_u *)0L} SCRIPTID_INIT},
{"wildoptions", "wop", P_STRING|P_VI_DEF,
#ifdef FEAT_CMDL_COMPL
(char_u *)&p_wop, PV_NONE,
{(char_u *)"", (char_u *)0L}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)NULL, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"winaltkeys", "wak", P_STRING|P_VI_DEF,
#ifdef FEAT_WAK
(char_u *)&p_wak, PV_NONE,
{(char_u *)"menu", (char_u *)0L}
#else
(char_u *)NULL, PV_NONE,
{(char_u *)NULL, (char_u *)0L}
#endif
SCRIPTID_INIT},
{"window", "wi", P_NUM|P_VI_DEF,
(char_u *)&p_window, PV_NONE,
{(char_u *)0L, (char_u *)0L} SCRIPTID_INIT},
{"winheight", "wh", P_NUM|P_VI_DEF,
#ifdef FEAT_WINDOWS
(char_u *)&p_wh, PV_NONE,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)1L, (char_u *)0L} SCRIPTID_INIT},
{"winfixheight", "wfh", P_BOOL|P_VI_DEF|P_RSTAT,
#ifdef FEAT_WINDOWS
(char_u *)VAR_WIN, PV_WFH,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"winfixwidth", "wfw", P_BOOL|P_VI_DEF|P_RSTAT,
#ifdef FEAT_WINDOWS
(char_u *)VAR_WIN, PV_WFW,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"winminheight", "wmh", P_NUM|P_VI_DEF,
#ifdef FEAT_WINDOWS
(char_u *)&p_wmh, PV_NONE,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)1L, (char_u *)0L} SCRIPTID_INIT},
{"winminwidth", "wmw", P_NUM|P_VI_DEF,
#ifdef FEAT_WINDOWS
(char_u *)&p_wmw, PV_NONE,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)1L, (char_u *)0L} SCRIPTID_INIT},
{"winwidth", "wiw", P_NUM|P_VI_DEF,
#ifdef FEAT_WINDOWS
(char_u *)&p_wiw, PV_NONE,
#else
(char_u *)NULL, PV_NONE,
#endif
{(char_u *)20L, (char_u *)0L} SCRIPTID_INIT},
{"wrap", NULL, P_BOOL|P_VI_DEF|P_RWIN,
(char_u *)VAR_WIN, PV_WRAP,
{(char_u *)TRUE, (char_u *)0L} SCRIPTID_INIT},
{"wrapmargin", "wm", P_NUM|P_VI_DEF,
(char_u *)&p_wm, PV_WM,
{(char_u *)0L, (char_u *)0L} SCRIPTID_INIT},
{"wrapscan", "ws", P_BOOL|P_VI_DEF,
(char_u *)&p_ws, PV_NONE,
{(char_u *)TRUE, (char_u *)0L} SCRIPTID_INIT},
{"write", NULL, P_BOOL|P_VI_DEF,
(char_u *)&p_write, PV_NONE,
{(char_u *)TRUE, (char_u *)0L} SCRIPTID_INIT},
{"writeany", "wa", P_BOOL|P_VI_DEF,
(char_u *)&p_wa, PV_NONE,
{(char_u *)FALSE, (char_u *)0L} SCRIPTID_INIT},
{"writebackup", "wb", P_BOOL|P_VI_DEF|P_VIM,
(char_u *)&p_wb, PV_NONE,
{
#ifdef FEAT_WRITEBACKUP
(char_u *)TRUE,
#else
(char_u *)FALSE,
#endif
(char_u *)0L} SCRIPTID_INIT},
{"writedelay", "wd", P_NUM|P_VI_DEF,
(char_u *)&p_wd, PV_NONE,
{(char_u *)0L, (char_u *)0L} SCRIPTID_INIT},
/* terminal output codes */
#define p_term(sss, vvv) {sss, NULL, P_STRING|P_VI_DEF|P_RALL|P_SECURE, \
(char_u *)&vvv, PV_NONE, \
{(char_u *)"", (char_u *)0L} SCRIPTID_INIT},
p_term("t_AB", T_CAB)
p_term("t_AF", T_CAF)
p_term("t_AL", T_CAL)
p_term("t_al", T_AL)
p_term("t_bc", T_BC)
p_term("t_cd", T_CD)
p_term("t_ce", T_CE)
p_term("t_cl", T_CL)
p_term("t_cm", T_CM)
p_term("t_Ce", T_UCE)
p_term("t_Co", T_CCO)
p_term("t_CS", T_CCS)
p_term("t_Cs", T_UCS)
p_term("t_cs", T_CS)
#ifdef FEAT_WINDOWS
p_term("t_CV", T_CSV)
#endif
p_term("t_da", T_DA)
p_term("t_db", T_DB)
p_term("t_DL", T_CDL)
p_term("t_dl", T_DL)
p_term("t_EI", T_CEI)
p_term("t_fs", T_FS)
p_term("t_IE", T_CIE)
p_term("t_IS", T_CIS)
p_term("t_ke", T_KE)
p_term("t_ks", T_KS)
p_term("t_le", T_LE)
p_term("t_mb", T_MB)
p_term("t_md", T_MD)
p_term("t_me", T_ME)
p_term("t_mr", T_MR)
p_term("t_ms", T_MS)
p_term("t_nd", T_ND)
p_term("t_op", T_OP)
p_term("t_RB", T_RBG)
p_term("t_RI", T_CRI)
p_term("t_RV", T_CRV)
p_term("t_Sb", T_CSB)
p_term("t_se", T_SE)
p_term("t_Sf", T_CSF)
p_term("t_SI", T_CSI)
p_term("t_so", T_SO)
p_term("t_SR", T_CSR)
p_term("t_sr", T_SR)
p_term("t_te", T_TE)
p_term("t_ti", T_TI)
p_term("t_ts", T_TS)
p_term("t_u7", T_U7)
p_term("t_ue", T_UE)
p_term("t_us", T_US)
p_term("t_ut", T_UT)
p_term("t_vb", T_VB)
p_term("t_ve", T_VE)
p_term("t_vi", T_VI)
p_term("t_vs", T_VS)
p_term("t_WP", T_CWP)
p_term("t_WS", T_CWS)
p_term("t_xn", T_XN)
p_term("t_xs", T_XS)
p_term("t_ZH", T_CZH)
p_term("t_ZR", T_CZR)
p_term("t_8f", T_8F)
p_term("t_8b", T_8B)
/* terminal key codes are not in here */
/* end marker */
{NULL, NULL, 0, NULL, PV_NONE, {NULL, NULL} SCRIPTID_INIT}
};
#define PARAM_COUNT (sizeof(options) / sizeof(struct vimoption))
#ifdef FEAT_MBYTE
static char *(p_ambw_values[]) = {"single", "double", NULL};
#endif
static char *(p_bg_values[]) = {"light", "dark", NULL};
static char *(p_nf_values[]) = {"bin", "octal", "hex", "alpha", NULL};
static char *(p_ff_values[]) = {FF_UNIX, FF_DOS, FF_MAC, NULL};
#ifdef FEAT_CRYPT
static char *(p_cm_values[]) = {"zip", "blowfish", "blowfish2", NULL};
#endif
#ifdef FEAT_CMDL_COMPL
static char *(p_wop_values[]) = {"tagfile", NULL};
#endif
#ifdef FEAT_WAK
static char *(p_wak_values[]) = {"yes", "menu", "no", NULL};
#endif
static char *(p_mousem_values[]) = {"extend", "popup", "popup_setpos", "mac", NULL};
static char *(p_sel_values[]) = {"inclusive", "exclusive", "old", NULL};
static char *(p_slm_values[]) = {"mouse", "key", "cmd", NULL};
static char *(p_km_values[]) = {"startsel", "stopsel", NULL};
#ifdef FEAT_BROWSE
static char *(p_bsdir_values[]) = {"current", "last", "buffer", NULL};
#endif
#ifdef FEAT_SCROLLBIND
static char *(p_scbopt_values[]) = {"ver", "hor", "jump", NULL};
#endif
static char *(p_debug_values[]) = {"msg", "throw", "beep", NULL};
#ifdef FEAT_WINDOWS
static char *(p_ead_values[]) = {"both", "ver", "hor", NULL};
#endif
#if defined(FEAT_QUICKFIX)
# ifdef FEAT_AUTOCMD
static char *(p_buftype_values[]) = {"nofile", "nowrite", "quickfix", "help", "acwrite", NULL};
# else
static char *(p_buftype_values[]) = {"nofile", "nowrite", "quickfix", "help", NULL};
# endif
static char *(p_bufhidden_values[]) = {"hide", "unload", "delete", "wipe", NULL};
#endif
static char *(p_bs_values[]) = {"indent", "eol", "start", NULL};
#ifdef FEAT_FOLDING
static char *(p_fdm_values[]) = {"manual", "expr", "marker", "indent", "syntax",
# ifdef FEAT_DIFF
"diff",
# endif
NULL};
static char *(p_fcl_values[]) = {"all", NULL};
#endif
#ifdef FEAT_INS_EXPAND
static char *(p_cot_values[]) = {"menu", "menuone", "longest", "preview", "noinsert", "noselect", NULL};
#endif
#ifdef FEAT_SIGNS
static char *(p_scl_values[]) = {"yes", "no", "auto", NULL};
#endif
static void set_option_default(int, int opt_flags, int compatible);
static void set_options_default(int opt_flags);
static char_u *term_bg_default(void);
static void did_set_option(int opt_idx, int opt_flags, int new_value);
static char_u *illegal_char(char_u *, int);
static int string_to_key(char_u *arg);
#ifdef FEAT_CMDWIN
static char_u *check_cedit(void);
#endif
#ifdef FEAT_TITLE
static void did_set_title(int icon);
#endif
static char_u *option_expand(int opt_idx, char_u *val);
static void didset_options(void);
static void didset_options2(void);
static void check_string_option(char_u **pp);
#if defined(FEAT_EVAL) || defined(PROTO)
static long_u *insecure_flag(int opt_idx, int opt_flags);
#else
# define insecure_flag(opt_idx, opt_flags) (&options[opt_idx].flags)
#endif
static void set_string_option_global(int opt_idx, char_u **varp);
static char_u *set_string_option(int opt_idx, char_u *value, int opt_flags);
static char_u *did_set_string_option(int opt_idx, char_u **varp, int new_value_alloced, char_u *oldval, char_u *errbuf, int opt_flags);
static char_u *set_chars_option(char_u **varp);
#ifdef FEAT_SYN_HL
static int int_cmp(const void *a, const void *b);
#endif
#ifdef FEAT_CLIPBOARD
static char_u *check_clipboard_option(void);
#endif
#ifdef FEAT_SPELL
static char_u *did_set_spell_option(int is_spellfile);
static char_u *compile_cap_prog(synblock_T *synblock);
#endif
#ifdef FEAT_EVAL
static void set_option_scriptID_idx(int opt_idx, int opt_flags, int id);
#endif
static char_u *set_bool_option(int opt_idx, char_u *varp, int value, int opt_flags);
static char_u *set_num_option(int opt_idx, char_u *varp, long value, char_u *errbuf, size_t errbuflen, int opt_flags);
static void check_redraw(long_u flags);
static int findoption(char_u *);
static int find_key_option(char_u *);
static void showoptions(int all, int opt_flags);
static int optval_default(struct vimoption *, char_u *varp);
static void showoneopt(struct vimoption *, int opt_flags);
static int put_setstring(FILE *fd, char *cmd, char *name, char_u **valuep, int expand);
static int put_setnum(FILE *fd, char *cmd, char *name, long *valuep);
static int put_setbool(FILE *fd, char *cmd, char *name, int value);
static int istermoption(struct vimoption *);
static char_u *get_varp_scope(struct vimoption *p, int opt_flags);
static char_u *get_varp(struct vimoption *);
static void option_value2string(struct vimoption *, int opt_flags);
static void check_winopt(winopt_T *wop);
static int wc_use_keyname(char_u *varp, long *wcp);
#ifdef FEAT_LANGMAP
static void langmap_init(void);
static void langmap_set(void);
#endif
static void paste_option_changed(void);
static void compatible_set(void);
#ifdef FEAT_LINEBREAK
static void fill_breakat_flags(void);
#endif
static int opt_strings_flags(char_u *val, char **values, unsigned *flagp, int list);
static int check_opt_strings(char_u *val, char **values, int);
static int check_opt_wim(void);
#ifdef FEAT_LINEBREAK
static int briopt_check(win_T *wp);
#endif
/*
* Initialize the options, first part.
*
* Called only once from main(), just after creating the first buffer.
*/
void
set_init_1(void)
{
char_u *p;
int opt_idx;
long_u n;
#ifdef FEAT_LANGMAP
langmap_init();
#endif
/* Be Vi compatible by default */
p_cp = TRUE;
/* Use POSIX compatibility when $VIM_POSIX is set. */
if (mch_getenv((char_u *)"VIM_POSIX") != NULL)
{
set_string_default("cpo", (char_u *)CPO_ALL);
set_string_default("shm", (char_u *)"A");
}
/*
* Find default value for 'shell' option.
* Don't use it if it is empty.
*/
if (((p = mch_getenv((char_u *)"SHELL")) != NULL && *p != NUL)
#if defined(MSWIN)
|| ((p = mch_getenv((char_u *)"COMSPEC")) != NULL && *p != NUL)
# ifdef WIN3264
|| ((p = (char_u *)default_shell()) != NULL && *p != NUL)
# endif
#endif
)
set_string_default("sh", p);
#ifdef FEAT_WILDIGN
/*
* Set the default for 'backupskip' to include environment variables for
* temp files.
*/
{
# ifdef UNIX
static char *(names[4]) = {"", "TMPDIR", "TEMP", "TMP"};
# else
static char *(names[3]) = {"TMPDIR", "TEMP", "TMP"};
# endif
int len;
garray_T ga;
int mustfree;
ga_init2(&ga, 1, 100);
for (n = 0; n < (long)(sizeof(names) / sizeof(char *)); ++n)
{
mustfree = FALSE;
# ifdef UNIX
if (*names[n] == NUL)
p = (char_u *)"/tmp";
else
# endif
p = vim_getenv((char_u *)names[n], &mustfree);
if (p != NULL && *p != NUL)
{
/* First time count the NUL, otherwise count the ','. */
len = (int)STRLEN(p) + 3;
if (ga_grow(&ga, len) == OK)
{
if (ga.ga_len > 0)
STRCAT(ga.ga_data, ",");
STRCAT(ga.ga_data, p);
add_pathsep(ga.ga_data);
STRCAT(ga.ga_data, "*");
ga.ga_len += len;
}
}
if (mustfree)
vim_free(p);
}
if (ga.ga_data != NULL)
{
set_string_default("bsk", ga.ga_data);
vim_free(ga.ga_data);
}
}
#endif
/*
* 'maxmemtot' and 'maxmem' may have to be adjusted for available memory
*/
opt_idx = findoption((char_u *)"maxmemtot");
if (opt_idx >= 0)
{
#if !defined(HAVE_AVAIL_MEM) && !defined(HAVE_TOTAL_MEM)
if (options[opt_idx].def_val[VI_DEFAULT] == (char_u *)0L)
#endif
{
#ifdef HAVE_AVAIL_MEM
/* Use amount of memory available at this moment. */
n = (mch_avail_mem(FALSE) >> 1);
#else
# ifdef HAVE_TOTAL_MEM
/* Use amount of memory available to Vim. */
n = (mch_total_mem(FALSE) >> 1);
# else
n = (0x7fffffff >> 11);
# endif
#endif
options[opt_idx].def_val[VI_DEFAULT] = (char_u *)n;
opt_idx = findoption((char_u *)"maxmem");
if (opt_idx >= 0)
{
#if !defined(HAVE_AVAIL_MEM) && !defined(HAVE_TOTAL_MEM)
if ((long)(long_i)options[opt_idx].def_val[VI_DEFAULT] > (long)n
|| (long)(long_i)options[opt_idx].def_val[VI_DEFAULT] == 0L)
#endif
options[opt_idx].def_val[VI_DEFAULT] = (char_u *)n;
}
}
}
#ifdef FEAT_SEARCHPATH
{
char_u *cdpath;
char_u *buf;
int i;
int j;
int mustfree = FALSE;
/* Initialize the 'cdpath' option's default value. */
cdpath = vim_getenv((char_u *)"CDPATH", &mustfree);
if (cdpath != NULL)
{
buf = alloc((unsigned)((STRLEN(cdpath) << 1) + 2));
if (buf != NULL)
{
buf[0] = ','; /* start with ",", current dir first */
j = 1;
for (i = 0; cdpath[i] != NUL; ++i)
{
if (vim_ispathlistsep(cdpath[i]))
buf[j++] = ',';
else
{
if (cdpath[i] == ' ' || cdpath[i] == ',')
buf[j++] = '\\';
buf[j++] = cdpath[i];
}
}
buf[j] = NUL;
opt_idx = findoption((char_u *)"cdpath");
if (opt_idx >= 0)
{
options[opt_idx].def_val[VI_DEFAULT] = buf;
options[opt_idx].flags |= P_DEF_ALLOCED;
}
else
vim_free(buf); /* cannot happen */
}
if (mustfree)
vim_free(cdpath);
}
}
#endif
#if defined(FEAT_POSTSCRIPT) && (defined(MSWIN) || defined(VMS) || defined(EBCDIC) || defined(MAC) || defined(hpux))
/* Set print encoding on platforms that don't default to latin1 */
set_string_default("penc",
# if defined(MSWIN)
(char_u *)"cp1252"
# else
# ifdef VMS
(char_u *)"dec-mcs"
# else
# ifdef EBCDIC
(char_u *)"ebcdic-uk"
# else
# ifdef MAC
(char_u *)"mac-roman"
# else /* HPUX */
(char_u *)"hp-roman8"
# endif
# endif
# endif
# endif
);
#endif
#ifdef FEAT_POSTSCRIPT
/* 'printexpr' must be allocated to be able to evaluate it. */
set_string_default("pexpr",
# if defined(MSWIN)
(char_u *)"system('copy' . ' ' . v:fname_in . (&printdevice == '' ? ' LPT1:' : (' \"' . &printdevice . '\"'))) . delete(v:fname_in)"
# else
# ifdef VMS
(char_u *)"system('print/delete' . (&printdevice == '' ? '' : ' /queue=' . &printdevice) . ' ' . v:fname_in)"
# else
(char_u *)"system('lpr' . (&printdevice == '' ? '' : ' -P' . &printdevice) . ' ' . v:fname_in) . delete(v:fname_in) + v:shell_error"
# endif
# endif
);
#endif
/*
* Set all the options (except the terminal options) to their default
* value. Also set the global value for local options.
*/
set_options_default(0);
#ifdef FEAT_GUI
if (found_reverse_arg)
set_option_value((char_u *)"bg", 0L, (char_u *)"dark", 0);
#endif
curbuf->b_p_initialized = TRUE;
curbuf->b_p_ar = -1; /* no local 'autoread' value */
curbuf->b_p_ul = NO_LOCAL_UNDOLEVEL;
check_buf_options(curbuf);
check_win_options(curwin);
check_options();
/* Must be before option_expand(), because that one needs vim_isIDc() */
didset_options();
#ifdef FEAT_SPELL
/* Use the current chartab for the generic chartab. This is not in
* didset_options() because it only depends on 'encoding'. */
init_spell_chartab();
#endif
/*
* Expand environment variables and things like "~" for the defaults.
* If option_expand() returns non-NULL the variable is expanded. This can
* only happen for non-indirect options.
* Also set the default to the expanded value, so ":set" does not list
* them.
* Don't set the P_ALLOCED flag, because we don't want to free the
* default.
*/
for (opt_idx = 0; !istermoption(&options[opt_idx]); opt_idx++)
{
if ((options[opt_idx].flags & P_GETTEXT)
&& options[opt_idx].var != NULL)
p = (char_u *)_(*(char **)options[opt_idx].var);
else
p = option_expand(opt_idx, NULL);
if (p != NULL && (p = vim_strsave(p)) != NULL)
{
*(char_u **)options[opt_idx].var = p;
/* VIMEXP
* Defaults for all expanded options are currently the same for Vi
* and Vim. When this changes, add some code here! Also need to
* split P_DEF_ALLOCED in two.
*/
if (options[opt_idx].flags & P_DEF_ALLOCED)
vim_free(options[opt_idx].def_val[VI_DEFAULT]);
options[opt_idx].def_val[VI_DEFAULT] = p;
options[opt_idx].flags |= P_DEF_ALLOCED;
}
}
save_file_ff(curbuf); /* Buffer is unchanged */
#if defined(FEAT_ARABIC)
/* Detect use of mlterm.
* Mlterm is a terminal emulator akin to xterm that has some special
* abilities (bidi namely).
* NOTE: mlterm's author is being asked to 'set' a variable
* instead of an environment variable due to inheritance.
*/
if (mch_getenv((char_u *)"MLTERM") != NULL)
set_option_value((char_u *)"tbidi", 1L, NULL, 0);
#endif
didset_options2();
#ifdef FEAT_MBYTE
# if defined(WIN3264) && defined(FEAT_GETTEXT)
/*
* If $LANG isn't set, try to get a good value for it. This makes the
* right language be used automatically. Don't do this for English.
*/
if (mch_getenv((char_u *)"LANG") == NULL)
{
char buf[20];
/* Could use LOCALE_SISO639LANGNAME, but it's not in Win95.
* LOCALE_SABBREVLANGNAME gives us three letters, like "enu", we use
* only the first two. */
n = GetLocaleInfo(LOCALE_USER_DEFAULT, LOCALE_SABBREVLANGNAME,
(LPTSTR)buf, 20);
if (n >= 2 && STRNICMP(buf, "en", 2) != 0)
{
/* There are a few exceptions (probably more) */
if (STRNICMP(buf, "cht", 3) == 0 || STRNICMP(buf, "zht", 3) == 0)
STRCPY(buf, "zh_TW");
else if (STRNICMP(buf, "chs", 3) == 0
|| STRNICMP(buf, "zhc", 3) == 0)
STRCPY(buf, "zh_CN");
else if (STRNICMP(buf, "jp", 2) == 0)
STRCPY(buf, "ja");
else
buf[2] = NUL; /* truncate to two-letter code */
vim_setenv((char_u *)"LANG", (char_u *)buf);
}
}
# else
# ifdef MACOS_CONVERT
/* Moved to os_mac_conv.c to avoid dependency problems. */
mac_lang_init();
# endif
# endif
/* enc_locale() will try to find the encoding of the current locale. */
p = enc_locale();
if (p != NULL)
{
char_u *save_enc;
/* Try setting 'encoding' and check if the value is valid.
* If not, go back to the default "latin1". */
save_enc = p_enc;
p_enc = p;
if (STRCMP(p_enc, "gb18030") == 0)
{
/* We don't support "gb18030", but "cp936" is a good substitute
* for practical purposes, thus use that. It's not an alias to
* still support conversion between gb18030 and utf-8. */
p_enc = vim_strsave((char_u *)"cp936");
vim_free(p);
}
if (mb_init() == NULL)
{
opt_idx = findoption((char_u *)"encoding");
if (opt_idx >= 0)
{
options[opt_idx].def_val[VI_DEFAULT] = p_enc;
options[opt_idx].flags |= P_DEF_ALLOCED;
}
#if defined(MSWIN) || defined(MACOS) || defined(VMS)
if (STRCMP(p_enc, "latin1") == 0
# ifdef FEAT_MBYTE
|| enc_utf8
# endif
)
{
/* Adjust the default for 'isprint' and 'iskeyword' to match
* latin1. Also set the defaults for when 'nocompatible' is
* set. */
set_string_option_direct((char_u *)"isp", -1,
ISP_LATIN1, OPT_FREE, SID_NONE);
set_string_option_direct((char_u *)"isk", -1,
ISK_LATIN1, OPT_FREE, SID_NONE);
opt_idx = findoption((char_u *)"isp");
if (opt_idx >= 0)
options[opt_idx].def_val[VIM_DEFAULT] = ISP_LATIN1;
opt_idx = findoption((char_u *)"isk");
if (opt_idx >= 0)
options[opt_idx].def_val[VIM_DEFAULT] = ISK_LATIN1;
(void)init_chartab();
}
#endif
# if defined(WIN3264) && !defined(FEAT_GUI)
/* Win32 console: When GetACP() returns a different value from
* GetConsoleCP() set 'termencoding'. */
if (GetACP() != GetConsoleCP())
{
char buf[50];
sprintf(buf, "cp%ld", (long)GetConsoleCP());
p_tenc = vim_strsave((char_u *)buf);
if (p_tenc != NULL)
{
opt_idx = findoption((char_u *)"termencoding");
if (opt_idx >= 0)
{
options[opt_idx].def_val[VI_DEFAULT] = p_tenc;
options[opt_idx].flags |= P_DEF_ALLOCED;
}
convert_setup(&input_conv, p_tenc, p_enc);
convert_setup(&output_conv, p_enc, p_tenc);
}
else
p_tenc = empty_option;
}
# endif
# if defined(WIN3264) && defined(FEAT_MBYTE)
/* $HOME may have characters in active code page. */
init_homedir();
# endif
}
else
{
vim_free(p_enc);
p_enc = save_enc;
}
}
#endif
#ifdef FEAT_MULTI_LANG
/* Set the default for 'helplang'. */
set_helplang_default(get_mess_lang());
#endif
}
/*
* Set an option to its default value.
* This does not take care of side effects!
*/
static void
set_option_default(
int opt_idx,
int opt_flags, /* OPT_FREE, OPT_LOCAL and/or OPT_GLOBAL */
int compatible) /* use Vi default value */
{
char_u *varp; /* pointer to variable for current option */
int dvi; /* index in def_val[] */
long_u flags;
long_u *flagsp;
int both = (opt_flags & (OPT_LOCAL | OPT_GLOBAL)) == 0;
varp = get_varp_scope(&(options[opt_idx]), both ? OPT_LOCAL : opt_flags);
flags = options[opt_idx].flags;
if (varp != NULL) /* skip hidden option, nothing to do for it */
{
dvi = ((flags & P_VI_DEF) || compatible) ? VI_DEFAULT : VIM_DEFAULT;
if (flags & P_STRING)
{
/* Use set_string_option_direct() for local options to handle
* freeing and allocating the value. */
if (options[opt_idx].indir != PV_NONE)
set_string_option_direct(NULL, opt_idx,
options[opt_idx].def_val[dvi], opt_flags, 0);
else
{
if ((opt_flags & OPT_FREE) && (flags & P_ALLOCED))
free_string_option(*(char_u **)(varp));
*(char_u **)varp = options[opt_idx].def_val[dvi];
options[opt_idx].flags &= ~P_ALLOCED;
}
}
else if (flags & P_NUM)
{
if (options[opt_idx].indir == PV_SCROLL)
win_comp_scroll(curwin);
else
{
*(long *)varp = (long)(long_i)options[opt_idx].def_val[dvi];
/* May also set global value for local option. */
if (both)
*(long *)get_varp_scope(&(options[opt_idx]), OPT_GLOBAL) =
*(long *)varp;
}
}
else /* P_BOOL */
{
/* the cast to long is required for Manx C, long_i is needed for
* MSVC */
*(int *)varp = (int)(long)(long_i)options[opt_idx].def_val[dvi];
#ifdef UNIX
/* 'modeline' defaults to off for root */
if (options[opt_idx].indir == PV_ML && getuid() == ROOT_UID)
*(int *)varp = FALSE;
#endif
/* May also set global value for local option. */
if (both)
*(int *)get_varp_scope(&(options[opt_idx]), OPT_GLOBAL) =
*(int *)varp;
}
/* The default value is not insecure. */
flagsp = insecure_flag(opt_idx, opt_flags);
*flagsp = *flagsp & ~P_INSECURE;
}
#ifdef FEAT_EVAL
set_option_scriptID_idx(opt_idx, opt_flags, current_SID);
#endif
}
/*
* Set all options (except terminal options) to their default value.
* When "opt_flags" is non-zero skip 'encoding'.
*/
static void
set_options_default(
int opt_flags) /* OPT_FREE, OPT_LOCAL and/or OPT_GLOBAL */
{
int i;
#ifdef FEAT_WINDOWS
win_T *wp;
tabpage_T *tp;
#endif
for (i = 0; !istermoption(&options[i]); i++)
if (!(options[i].flags & P_NODEFAULT)
#if defined(FEAT_MBYTE) || defined(FEAT_CRYPT)
&& (opt_flags == 0
|| (TRUE
# if defined(FEAT_MBYTE)
&& options[i].var != (char_u *)&p_enc
# endif
# if defined(FEAT_CRYPT)
&& options[i].var != (char_u *)&p_cm
&& options[i].var != (char_u *)&p_key
# endif
))
#endif
)
set_option_default(i, opt_flags, p_cp);
#ifdef FEAT_WINDOWS
/* The 'scroll' option must be computed for all windows. */
FOR_ALL_TAB_WINDOWS(tp, wp)
win_comp_scroll(wp);
#else
win_comp_scroll(curwin);
#endif
#ifdef FEAT_CINDENT
parse_cino(curbuf);
#endif
}
/*
* Set the Vi-default value of a string option.
* Used for 'sh', 'backupskip' and 'term'.
*/
void
set_string_default(char *name, char_u *val)
{
char_u *p;
int opt_idx;
p = vim_strsave(val);
if (p != NULL) /* we don't want a NULL */
{
opt_idx = findoption((char_u *)name);
if (opt_idx >= 0)
{
if (options[opt_idx].flags & P_DEF_ALLOCED)
vim_free(options[opt_idx].def_val[VI_DEFAULT]);
options[opt_idx].def_val[VI_DEFAULT] = p;
options[opt_idx].flags |= P_DEF_ALLOCED;
}
}
}
/*
* Set the Vi-default value of a number option.
* Used for 'lines' and 'columns'.
*/
void
set_number_default(char *name, long val)
{
int opt_idx;
opt_idx = findoption((char_u *)name);
if (opt_idx >= 0)
options[opt_idx].def_val[VI_DEFAULT] = (char_u *)(long_i)val;
}
#if defined(EXITFREE) || defined(PROTO)
/*
* Free all options.
*/
void
free_all_options(void)
{
int i;
for (i = 0; !istermoption(&options[i]); i++)
{
if (options[i].indir == PV_NONE)
{
/* global option: free value and default value. */
if (options[i].flags & P_ALLOCED && options[i].var != NULL)
free_string_option(*(char_u **)options[i].var);
if (options[i].flags & P_DEF_ALLOCED)
free_string_option(options[i].def_val[VI_DEFAULT]);
}
else if (options[i].var != VAR_WIN
&& (options[i].flags & P_STRING))
/* buffer-local option: free global value */
free_string_option(*(char_u **)options[i].var);
}
}
#endif
/*
* Initialize the options, part two: After getting Rows and Columns and
* setting 'term'.
*/
void
set_init_2(void)
{
int idx;
/*
* 'scroll' defaults to half the window height. Note that this default is
* wrong when the window height changes.
*/
set_number_default("scroll", (long)((long_u)Rows >> 1));
idx = findoption((char_u *)"scroll");
if (idx >= 0 && !(options[idx].flags & P_WAS_SET))
set_option_default(idx, OPT_LOCAL, p_cp);
comp_col();
/*
* 'window' is only for backwards compatibility with Vi.
* Default is Rows - 1.
*/
if (!option_was_set((char_u *)"window"))
p_window = Rows - 1;
set_number_default("window", Rows - 1);
/* For DOS console the default is always black. */
#if !((defined(WIN3264)) && !defined(FEAT_GUI))
/*
* If 'background' wasn't set by the user, try guessing the value,
* depending on the terminal name. Only need to check for terminals
* with a dark background, that can handle color.
*/
idx = findoption((char_u *)"bg");
if (idx >= 0 && !(options[idx].flags & P_WAS_SET)
&& *term_bg_default() == 'd')
{
set_string_option_direct(NULL, idx, (char_u *)"dark", OPT_FREE, 0);
/* don't mark it as set, when starting the GUI it may be
* changed again */
options[idx].flags &= ~P_WAS_SET;
}
#endif
#ifdef CURSOR_SHAPE
parse_shape_opt(SHAPE_CURSOR); /* set cursor shapes from 'guicursor' */
#endif
#ifdef FEAT_MOUSESHAPE
parse_shape_opt(SHAPE_MOUSE); /* set mouse shapes from 'mouseshape' */
#endif
#ifdef FEAT_PRINTER
(void)parse_printoptions(); /* parse 'printoptions' default value */
#endif
}
/*
* Return "dark" or "light" depending on the kind of terminal.
* This is just guessing! Recognized are:
* "linux" Linux console
* "screen.linux" Linux console with screen
* "cygwin" Cygwin shell
* "putty" Putty program
* We also check the COLORFGBG environment variable, which is set by
* rxvt and derivatives. This variable contains either two or three
* values separated by semicolons; we want the last value in either
* case. If this value is 0-6 or 8, our background is dark.
*/
static char_u *
term_bg_default(void)
{
#if defined(WIN3264)
/* DOS console nearly always black */
return (char_u *)"dark";
#else
char_u *p;
if (STRCMP(T_NAME, "linux") == 0
|| STRCMP(T_NAME, "screen.linux") == 0
|| STRCMP(T_NAME, "cygwin") == 0
|| STRCMP(T_NAME, "putty") == 0
|| ((p = mch_getenv((char_u *)"COLORFGBG")) != NULL
&& (p = vim_strrchr(p, ';')) != NULL
&& ((p[1] >= '0' && p[1] <= '6') || p[1] == '8')
&& p[2] == NUL))
return (char_u *)"dark";
return (char_u *)"light";
#endif
}
/*
* Initialize the options, part three: After reading the .vimrc
*/
void
set_init_3(void)
{
#if defined(UNIX) || defined(WIN3264)
/*
* Set 'shellpipe' and 'shellredir', depending on the 'shell' option.
* This is done after other initializations, where 'shell' might have been
* set, but only if they have not been set before.
*/
char_u *p;
int idx_srr;
int do_srr;
# ifdef FEAT_QUICKFIX
int idx_sp;
int do_sp;
# endif
idx_srr = findoption((char_u *)"srr");
if (idx_srr < 0)
do_srr = FALSE;
else
do_srr = !(options[idx_srr].flags & P_WAS_SET);
# ifdef FEAT_QUICKFIX
idx_sp = findoption((char_u *)"sp");
if (idx_sp < 0)
do_sp = FALSE;
else
do_sp = !(options[idx_sp].flags & P_WAS_SET);
# endif
p = get_isolated_shell_name();
if (p != NULL)
{
/*
* Default for p_sp is "| tee", for p_srr is ">".
* For known shells it is changed here to include stderr.
*/
if ( fnamecmp(p, "csh") == 0
|| fnamecmp(p, "tcsh") == 0
# if defined(WIN3264) /* also check with .exe extension */
|| fnamecmp(p, "csh.exe") == 0
|| fnamecmp(p, "tcsh.exe") == 0
# endif
)
{
# if defined(FEAT_QUICKFIX)
if (do_sp)
{
# ifdef WIN3264
p_sp = (char_u *)">&";
# else
p_sp = (char_u *)"|& tee";
# endif
options[idx_sp].def_val[VI_DEFAULT] = p_sp;
}
# endif
if (do_srr)
{
p_srr = (char_u *)">&";
options[idx_srr].def_val[VI_DEFAULT] = p_srr;
}
}
else
/* Always use bourne shell style redirection if we reach this */
if ( fnamecmp(p, "sh") == 0
|| fnamecmp(p, "ksh") == 0
|| fnamecmp(p, "mksh") == 0
|| fnamecmp(p, "pdksh") == 0
|| fnamecmp(p, "zsh") == 0
|| fnamecmp(p, "zsh-beta") == 0
|| fnamecmp(p, "bash") == 0
|| fnamecmp(p, "fish") == 0
# ifdef WIN3264
|| fnamecmp(p, "cmd") == 0
|| fnamecmp(p, "sh.exe") == 0
|| fnamecmp(p, "ksh.exe") == 0
|| fnamecmp(p, "mksh.exe") == 0
|| fnamecmp(p, "pdksh.exe") == 0
|| fnamecmp(p, "zsh.exe") == 0
|| fnamecmp(p, "zsh-beta.exe") == 0
|| fnamecmp(p, "bash.exe") == 0
|| fnamecmp(p, "cmd.exe") == 0
# endif
)
{
# if defined(FEAT_QUICKFIX)
if (do_sp)
{
# ifdef WIN3264
p_sp = (char_u *)">%s 2>&1";
# else
p_sp = (char_u *)"2>&1| tee";
# endif
options[idx_sp].def_val[VI_DEFAULT] = p_sp;
}
# endif
if (do_srr)
{
p_srr = (char_u *)">%s 2>&1";
options[idx_srr].def_val[VI_DEFAULT] = p_srr;
}
}
vim_free(p);
}
#endif
#if defined(WIN3264)
/*
* Set 'shellcmdflag', 'shellxquote', and 'shellquote' depending on the
* 'shell' option.
* This is done after other initializations, where 'shell' might have been
* set, but only if they have not been set before. Default for p_shcf is
* "/c", for p_shq is "". For "sh" like shells it is changed here to
* "-c" and "\"". And for Win32 we need to set p_sxq instead.
*/
if (strstr((char *)gettail(p_sh), "sh") != NULL)
{
int idx3;
idx3 = findoption((char_u *)"shcf");
if (idx3 >= 0 && !(options[idx3].flags & P_WAS_SET))
{
p_shcf = (char_u *)"-c";
options[idx3].def_val[VI_DEFAULT] = p_shcf;
}
# ifdef WIN3264
/* Somehow Win32 requires the quotes around the redirection too */
idx3 = findoption((char_u *)"sxq");
if (idx3 >= 0 && !(options[idx3].flags & P_WAS_SET))
{
p_sxq = (char_u *)"\"";
options[idx3].def_val[VI_DEFAULT] = p_sxq;
}
# else
idx3 = findoption((char_u *)"shq");
if (idx3 >= 0 && !(options[idx3].flags & P_WAS_SET))
{
p_shq = (char_u *)"\"";
options[idx3].def_val[VI_DEFAULT] = p_shq;
}
# endif
}
else if (strstr((char *)gettail(p_sh), "cmd.exe") != NULL)
{
int idx3;
/*
* cmd.exe on Windows will strip the first and last double quote given
* on the command line, e.g. most of the time things like:
* cmd /c "my path/to/echo" "my args to echo"
* become:
* my path/to/echo" "my args to echo
* when executed.
*
* To avoid this, set shellxquote to surround the command in
* parenthesis. This appears to make most commands work, without
* breaking commands that worked previously, such as
* '"path with spaces/cmd" "a&b"'.
*/
idx3 = findoption((char_u *)"sxq");
if (idx3 >= 0 && !(options[idx3].flags & P_WAS_SET))
{
p_sxq = (char_u *)"(";
options[idx3].def_val[VI_DEFAULT] = p_sxq;
}
idx3 = findoption((char_u *)"shcf");
if (idx3 >= 0 && !(options[idx3].flags & P_WAS_SET))
{
p_shcf = (char_u *)"/c";
options[idx3].def_val[VI_DEFAULT] = p_shcf;
}
}
#endif
if (bufempty())
{
int idx_ffs = findoption((char_u *)"ffs");
/* Apply the first entry of 'fileformats' to the initial buffer. */
if (idx_ffs >= 0 && (options[idx_ffs].flags & P_WAS_SET))
set_fileformat(default_fileformat(), OPT_LOCAL);
}
#ifdef FEAT_TITLE
set_title_defaults();
#endif
}
#if defined(FEAT_MULTI_LANG) || defined(PROTO)
/*
* When 'helplang' is still at its default value, set it to "lang".
* Only the first two characters of "lang" are used.
*/
void
set_helplang_default(char_u *lang)
{
int idx;
if (lang == NULL || STRLEN(lang) < 2) /* safety check */
return;
idx = findoption((char_u *)"hlg");
if (idx >= 0 && !(options[idx].flags & P_WAS_SET))
{
if (options[idx].flags & P_ALLOCED)
free_string_option(p_hlg);
p_hlg = vim_strsave(lang);
if (p_hlg == NULL)
p_hlg = empty_option;
else
{
/* zh_CN becomes "cn", zh_TW becomes "tw". */
if (STRNICMP(p_hlg, "zh_", 3) == 0 && STRLEN(p_hlg) >= 5)
{
p_hlg[0] = TOLOWER_ASC(p_hlg[3]);
p_hlg[1] = TOLOWER_ASC(p_hlg[4]);
}
p_hlg[2] = NUL;
}
options[idx].flags |= P_ALLOCED;
}
}
#endif
#ifdef FEAT_GUI
static char_u *gui_bg_default(void);
static char_u *
gui_bg_default(void)
{
if (gui_get_lightness(gui.back_pixel) < 127)
return (char_u *)"dark";
return (char_u *)"light";
}
/*
* Option initializations that can only be done after opening the GUI window.
*/
void
init_gui_options(void)
{
/* Set the 'background' option according to the lightness of the
* background color, unless the user has set it already. */
if (!option_was_set((char_u *)"bg") && STRCMP(p_bg, gui_bg_default()) != 0)
{
set_option_value((char_u *)"bg", 0L, gui_bg_default(), 0);
highlight_changed();
}
}
#endif
#ifdef FEAT_TITLE
/*
* 'title' and 'icon' only default to true if they have not been set or reset
* in .vimrc and we can read the old value.
* When 'title' and 'icon' have been reset in .vimrc, we won't even check if
* they can be reset. This reduces startup time when using X on a remote
* machine.
*/
void
set_title_defaults(void)
{
int idx1;
long val;
/*
* If GUI is (going to be) used, we can always set the window title and
* icon name. Saves a bit of time, because the X11 display server does
* not need to be contacted.
*/
idx1 = findoption((char_u *)"title");
if (idx1 >= 0 && !(options[idx1].flags & P_WAS_SET))
{
#ifdef FEAT_GUI
if (gui.starting || gui.in_use)
val = TRUE;
else
#endif
val = mch_can_restore_title();
options[idx1].def_val[VI_DEFAULT] = (char_u *)(long_i)val;
p_title = val;
}
idx1 = findoption((char_u *)"icon");
if (idx1 >= 0 && !(options[idx1].flags & P_WAS_SET))
{
#ifdef FEAT_GUI
if (gui.starting || gui.in_use)
val = TRUE;
else
#endif
val = mch_can_restore_icon();
options[idx1].def_val[VI_DEFAULT] = (char_u *)(long_i)val;
p_icon = val;
}
}
#endif
/*
* Parse 'arg' for option settings.
*
* 'arg' may be IObuff, but only when no errors can be present and option
* does not need to be expanded with option_expand().
* "opt_flags":
* 0 for ":set"
* OPT_GLOBAL for ":setglobal"
* OPT_LOCAL for ":setlocal" and a modeline
* OPT_MODELINE for a modeline
* OPT_WINONLY to only set window-local options
* OPT_NOWIN to skip setting window-local options
*
* returns FAIL if an error is detected, OK otherwise
*/
int
do_set(
char_u *arg, /* option string (may be written to!) */
int opt_flags)
{
int opt_idx;
char_u *errmsg;
char_u errbuf[80];
char_u *startarg;
int prefix; /* 1: nothing, 0: "no", 2: "inv" in front of name */
int nextchar; /* next non-white char after option name */
int afterchar; /* character just after option name */
int len;
int i;
varnumber_T value;
int key;
long_u flags; /* flags for current option */
char_u *varp = NULL; /* pointer to variable for current option */
int did_show = FALSE; /* already showed one value */
int adding; /* "opt+=arg" */
int prepending; /* "opt^=arg" */
int removing; /* "opt-=arg" */
int cp_val = 0;
char_u key_name[2];
if (*arg == NUL)
{
showoptions(0, opt_flags);
did_show = TRUE;
goto theend;
}
while (*arg != NUL) /* loop to process all options */
{
errmsg = NULL;
startarg = arg; /* remember for error message */
if (STRNCMP(arg, "all", 3) == 0 && !isalpha(arg[3])
&& !(opt_flags & OPT_MODELINE))
{
/*
* ":set all" show all options.
* ":set all&" set all options to their default value.
*/
arg += 3;
if (*arg == '&')
{
++arg;
/* Only for :set command set global value of local options. */
set_options_default(OPT_FREE | opt_flags);
didset_options();
didset_options2();
redraw_all_later(CLEAR);
}
else
{
showoptions(1, opt_flags);
did_show = TRUE;
}
}
else if (STRNCMP(arg, "termcap", 7) == 0 && !(opt_flags & OPT_MODELINE))
{
showoptions(2, opt_flags);
show_termcodes();
did_show = TRUE;
arg += 7;
}
else
{
prefix = 1;
if (STRNCMP(arg, "no", 2) == 0 && STRNCMP(arg, "novice", 6) != 0)
{
prefix = 0;
arg += 2;
}
else if (STRNCMP(arg, "inv", 3) == 0)
{
prefix = 2;
arg += 3;
}
/* find end of name */
key = 0;
if (*arg == '<')
{
nextchar = 0;
opt_idx = -1;
/* look out for <t_>;> */
if (arg[1] == 't' && arg[2] == '_' && arg[3] && arg[4])
len = 5;
else
{
len = 1;
while (arg[len] != NUL && arg[len] != '>')
++len;
}
if (arg[len] != '>')
{
errmsg = e_invarg;
goto skip;
}
arg[len] = NUL; /* put NUL after name */
if (arg[1] == 't' && arg[2] == '_') /* could be term code */
opt_idx = findoption(arg + 1);
arg[len++] = '>'; /* restore '>' */
if (opt_idx == -1)
key = find_key_option(arg + 1);
}
else
{
len = 0;
/*
* The two characters after "t_" may not be alphanumeric.
*/
if (arg[0] == 't' && arg[1] == '_' && arg[2] && arg[3])
len = 4;
else
while (ASCII_ISALNUM(arg[len]) || arg[len] == '_')
++len;
nextchar = arg[len];
arg[len] = NUL; /* put NUL after name */
opt_idx = findoption(arg);
arg[len] = nextchar; /* restore nextchar */
if (opt_idx == -1)
key = find_key_option(arg);
}
/* remember character after option name */
afterchar = arg[len];
/* skip white space, allow ":set ai ?" */
while (vim_iswhite(arg[len]))
++len;
adding = FALSE;
prepending = FALSE;
removing = FALSE;
if (arg[len] != NUL && arg[len + 1] == '=')
{
if (arg[len] == '+')
{
adding = TRUE; /* "+=" */
++len;
}
else if (arg[len] == '^')
{
prepending = TRUE; /* "^=" */
++len;
}
else if (arg[len] == '-')
{
removing = TRUE; /* "-=" */
++len;
}
}
nextchar = arg[len];
if (opt_idx == -1 && key == 0) /* found a mismatch: skip */
{
errmsg = (char_u *)N_("E518: Unknown option");
goto skip;
}
if (opt_idx >= 0)
{
if (options[opt_idx].var == NULL) /* hidden option: skip */
{
/* Only give an error message when requesting the value of
* a hidden option, ignore setting it. */
if (vim_strchr((char_u *)"=:!&<", nextchar) == NULL
&& (!(options[opt_idx].flags & P_BOOL)
|| nextchar == '?'))
errmsg = (char_u *)N_("E519: Option not supported");
goto skip;
}
flags = options[opt_idx].flags;
varp = get_varp_scope(&(options[opt_idx]), opt_flags);
}
else
{
flags = P_STRING;
if (key < 0)
{
key_name[0] = KEY2TERMCAP0(key);
key_name[1] = KEY2TERMCAP1(key);
}
else
{
key_name[0] = KS_KEY;
key_name[1] = (key & 0xff);
}
}
/* Skip all options that are not window-local (used when showing
* an already loaded buffer in a window). */
if ((opt_flags & OPT_WINONLY)
&& (opt_idx < 0 || options[opt_idx].var != VAR_WIN))
goto skip;
/* Skip all options that are window-local (used for :vimgrep). */
if ((opt_flags & OPT_NOWIN) && opt_idx >= 0
&& options[opt_idx].var == VAR_WIN)
goto skip;
/* Disallow changing some options from modelines. */
if (opt_flags & OPT_MODELINE)
{
if (flags & (P_SECURE | P_NO_ML))
{
errmsg = (char_u *)_("E520: Not allowed in a modeline");
goto skip;
}
#ifdef FEAT_DIFF
/* In diff mode some options are overruled. This avoids that
* 'foldmethod' becomes "marker" instead of "diff" and that
* "wrap" gets set. */
if (curwin->w_p_diff
&& opt_idx >= 0 /* shut up coverity warning */
&& (options[opt_idx].indir == PV_FDM
|| options[opt_idx].indir == PV_WRAP))
goto skip;
#endif
}
#ifdef HAVE_SANDBOX
/* Disallow changing some options in the sandbox */
if (sandbox != 0 && (flags & P_SECURE))
{
errmsg = (char_u *)_(e_sandbox);
goto skip;
}
#endif
if (vim_strchr((char_u *)"?=:!&<", nextchar) != NULL)
{
arg += len;
cp_val = p_cp;
if (nextchar == '&' && arg[1] == 'v' && arg[2] == 'i')
{
if (arg[3] == 'm') /* "opt&vim": set to Vim default */
{
cp_val = FALSE;
arg += 3;
}
else /* "opt&vi": set to Vi default */
{
cp_val = TRUE;
arg += 2;
}
}
if (vim_strchr((char_u *)"?!&<", nextchar) != NULL
&& arg[1] != NUL && !vim_iswhite(arg[1]))
{
errmsg = e_trailing;
goto skip;
}
}
/*
* allow '=' and ':' for hystorical reasons (MSDOS command.com
* allows only one '=' character per "set" command line. grrr. (jw)
*/
if (nextchar == '?'
|| (prefix == 1
&& vim_strchr((char_u *)"=:&<", nextchar) == NULL
&& !(flags & P_BOOL)))
{
/*
* print value
*/
if (did_show)
msg_putchar('\n'); /* cursor below last one */
else
{
gotocmdline(TRUE); /* cursor at status line */
did_show = TRUE; /* remember that we did a line */
}
if (opt_idx >= 0)
{
showoneopt(&options[opt_idx], opt_flags);
#ifdef FEAT_EVAL
if (p_verbose > 0)
{
/* Mention where the option was last set. */
if (varp == options[opt_idx].var)
last_set_msg(options[opt_idx].scriptID);
else if ((int)options[opt_idx].indir & PV_WIN)
last_set_msg(curwin->w_p_scriptID[
(int)options[opt_idx].indir & PV_MASK]);
else if ((int)options[opt_idx].indir & PV_BUF)
last_set_msg(curbuf->b_p_scriptID[
(int)options[opt_idx].indir & PV_MASK]);
}
#endif
}
else
{
char_u *p;
p = find_termcode(key_name);
if (p == NULL)
{
errmsg = (char_u *)N_("E846: Key code not set");
goto skip;
}
else
(void)show_one_termcode(key_name, p, TRUE);
}
if (nextchar != '?'
&& nextchar != NUL && !vim_iswhite(afterchar))
errmsg = e_trailing;
}
else
{
if (flags & P_BOOL) /* boolean */
{
if (nextchar == '=' || nextchar == ':')
{
errmsg = e_invarg;
goto skip;
}
/*
* ":set opt!": invert
* ":set opt&": reset to default value
* ":set opt<": reset to global value
*/
if (nextchar == '!')
value = *(int *)(varp) ^ 1;
else if (nextchar == '&')
value = (int)(long)(long_i)options[opt_idx].def_val[
((flags & P_VI_DEF) || cp_val)
? VI_DEFAULT : VIM_DEFAULT];
else if (nextchar == '<')
{
/* For 'autoread' -1 means to use global value. */
if ((int *)varp == &curbuf->b_p_ar
&& opt_flags == OPT_LOCAL)
value = -1;
else
value = *(int *)get_varp_scope(&(options[opt_idx]),
OPT_GLOBAL);
}
else
{
/*
* ":set invopt": invert
* ":set opt" or ":set noopt": set or reset
*/
if (nextchar != NUL && !vim_iswhite(afterchar))
{
errmsg = e_trailing;
goto skip;
}
if (prefix == 2) /* inv */
value = *(int *)(varp) ^ 1;
else
value = prefix;
}
errmsg = set_bool_option(opt_idx, varp, (int)value,
opt_flags);
}
else /* numeric or string */
{
if (vim_strchr((char_u *)"=:&<", nextchar) == NULL
|| prefix != 1)
{
errmsg = e_invarg;
goto skip;
}
if (flags & P_NUM) /* numeric */
{
/*
* Different ways to set a number option:
* & set to default value
* < set to global value
* <xx> accept special key codes for 'wildchar'
* c accept any non-digit for 'wildchar'
* [-]0-9 set number
* other error
*/
++arg;
if (nextchar == '&')
value = (long)(long_i)options[opt_idx].def_val[
((flags & P_VI_DEF) || cp_val)
? VI_DEFAULT : VIM_DEFAULT];
else if (nextchar == '<')
{
/* For 'undolevels' NO_LOCAL_UNDOLEVEL means to
* use the global value. */
if ((long *)varp == &curbuf->b_p_ul
&& opt_flags == OPT_LOCAL)
value = NO_LOCAL_UNDOLEVEL;
else
value = *(long *)get_varp_scope(
&(options[opt_idx]), OPT_GLOBAL);
}
else if (((long *)varp == &p_wc
|| (long *)varp == &p_wcm)
&& (*arg == '<'
|| *arg == '^'
|| ((!arg[1] || vim_iswhite(arg[1]))
&& !VIM_ISDIGIT(*arg))))
{
value = string_to_key(arg);
if (value == 0 && (long *)varp != &p_wcm)
{
errmsg = e_invarg;
goto skip;
}
}
else if (*arg == '-' || VIM_ISDIGIT(*arg))
{
/* Allow negative (for 'undolevels'), octal and
* hex numbers. */
vim_str2nr(arg, NULL, &i, STR2NR_ALL,
&value, NULL, 0);
if (arg[i] != NUL && !vim_iswhite(arg[i]))
{
errmsg = e_invarg;
goto skip;
}
}
else
{
errmsg = (char_u *)N_("E521: Number required after =");
goto skip;
}
if (adding)
value = *(long *)varp + value;
if (prepending)
value = *(long *)varp * value;
if (removing)
value = *(long *)varp - value;
errmsg = set_num_option(opt_idx, varp, value,
errbuf, sizeof(errbuf), opt_flags);
}
else if (opt_idx >= 0) /* string */
{
char_u *save_arg = NULL;
char_u *s = NULL;
char_u *oldval = NULL; /* previous value if *varp */
char_u *newval;
char_u *origval = NULL;
#if defined(FEAT_AUTOCMD) && defined(FEAT_EVAL)
char_u *saved_origval = NULL;
#endif
unsigned newlen;
int comma;
int bs;
int new_value_alloced; /* new string option
was allocated */
/* When using ":set opt=val" for a global option
* with a local value the local value will be
* reset, use the global value here. */
if ((opt_flags & (OPT_LOCAL | OPT_GLOBAL)) == 0
&& ((int)options[opt_idx].indir & PV_BOTH))
varp = options[opt_idx].var;
/* The old value is kept until we are sure that the
* new value is valid. */
oldval = *(char_u **)varp;
if (nextchar == '&') /* set to default val */
{
newval = options[opt_idx].def_val[
((flags & P_VI_DEF) || cp_val)
? VI_DEFAULT : VIM_DEFAULT];
if ((char_u **)varp == &p_bg)
{
/* guess the value of 'background' */
#ifdef FEAT_GUI
if (gui.in_use)
newval = gui_bg_default();
else
#endif
newval = term_bg_default();
}
/* expand environment variables and ~ (since the
* default value was already expanded, only
* required when an environment variable was set
* later */
if (newval == NULL)
newval = empty_option;
else
{
s = option_expand(opt_idx, newval);
if (s == NULL)
s = newval;
newval = vim_strsave(s);
}
new_value_alloced = TRUE;
}
else if (nextchar == '<') /* set to global val */
{
newval = vim_strsave(*(char_u **)get_varp_scope(
&(options[opt_idx]), OPT_GLOBAL));
new_value_alloced = TRUE;
}
else
{
++arg; /* jump to after the '=' or ':' */
/*
* Set 'keywordprg' to ":help" if an empty
* value was passed to :set by the user.
* Misuse errbuf[] for the resulting string.
*/
if (varp == (char_u *)&p_kp
&& (*arg == NUL || *arg == ' '))
{
STRCPY(errbuf, ":help");
save_arg = arg;
arg = errbuf;
}
/*
* Convert 'backspace' number to string, for
* adding, prepending and removing string.
*/
else if (varp == (char_u *)&p_bs
&& VIM_ISDIGIT(**(char_u **)varp))
{
i = getdigits((char_u **)varp);
switch (i)
{
case 0:
*(char_u **)varp = empty_option;
break;
case 1:
*(char_u **)varp = vim_strsave(
(char_u *)"indent,eol");
break;
case 2:
*(char_u **)varp = vim_strsave(
(char_u *)"indent,eol,start");
break;
}
vim_free(oldval);
oldval = *(char_u **)varp;
}
/*
* Convert 'whichwrap' number to string, for
* backwards compatibility with Vim 3.0.
* Misuse errbuf[] for the resulting string.
*/
else if (varp == (char_u *)&p_ww
&& VIM_ISDIGIT(*arg))
{
*errbuf = NUL;
i = getdigits(&arg);
if (i & 1)
STRCAT(errbuf, "b,");
if (i & 2)
STRCAT(errbuf, "s,");
if (i & 4)
STRCAT(errbuf, "h,l,");
if (i & 8)
STRCAT(errbuf, "<,>,");
if (i & 16)
STRCAT(errbuf, "[,],");
if (*errbuf != NUL) /* remove trailing , */
errbuf[STRLEN(errbuf) - 1] = NUL;
save_arg = arg;
arg = errbuf;
}
/*
* Remove '>' before 'dir' and 'bdir', for
* backwards compatibility with version 3.0
*/
else if ( *arg == '>'
&& (varp == (char_u *)&p_dir
|| varp == (char_u *)&p_bdir))
{
++arg;
}
/* When setting the local value of a global
* option, the old value may be the global value. */
if (((int)options[opt_idx].indir & PV_BOTH)
&& (opt_flags & OPT_LOCAL))
origval = *(char_u **)get_varp(
&options[opt_idx]);
else
origval = oldval;
/*
* Copy the new string into allocated memory.
* Can't use set_string_option_direct(), because
* we need to remove the backslashes.
*/
/* get a bit too much */
newlen = (unsigned)STRLEN(arg) + 1;
if (adding || prepending || removing)
newlen += (unsigned)STRLEN(origval) + 1;
newval = alloc(newlen);
if (newval == NULL) /* out of mem, don't change */
break;
s = newval;
/*
* Copy the string, skip over escaped chars.
* For MS-DOS and WIN32 backslashes before normal
* file name characters are not removed, and keep
* backslash at start, for "\\machine\path", but
* do remove it for "\\\\machine\\path".
* The reverse is found in ExpandOldSetting().
*/
while (*arg && !vim_iswhite(*arg))
{
if (*arg == '\\' && arg[1] != NUL
#ifdef BACKSLASH_IN_FILENAME
&& !((flags & P_EXPAND)
&& vim_isfilec(arg[1])
&& (arg[1] != '\\'
|| (s == newval
&& arg[2] != '\\')))
#endif
)
++arg; /* remove backslash */
#ifdef FEAT_MBYTE
if (has_mbyte
&& (i = (*mb_ptr2len)(arg)) > 1)
{
/* copy multibyte char */
mch_memmove(s, arg, (size_t)i);
arg += i;
s += i;
}
else
#endif
*s++ = *arg++;
}
*s = NUL;
/*
* Expand environment variables and ~.
* Don't do it when adding without inserting a
* comma.
*/
if (!(adding || prepending || removing)
|| (flags & P_COMMA))
{
s = option_expand(opt_idx, newval);
if (s != NULL)
{
vim_free(newval);
newlen = (unsigned)STRLEN(s) + 1;
if (adding || prepending || removing)
newlen += (unsigned)STRLEN(origval) + 1;
newval = alloc(newlen);
if (newval == NULL)
break;
STRCPY(newval, s);
}
}
/* locate newval[] in origval[] when removing it
* and when adding to avoid duplicates */
i = 0; /* init for GCC */
if (removing || (flags & P_NODUP))
{
i = (int)STRLEN(newval);
bs = 0;
for (s = origval; *s; ++s)
{
if ((!(flags & P_COMMA)
|| s == origval
|| (s[-1] == ',' && !(bs & 1)))
&& STRNCMP(s, newval, i) == 0
&& (!(flags & P_COMMA)
|| s[i] == ','
|| s[i] == NUL))
break;
/* Count backslashes. Only a comma with an
* even number of backslashes or a single
* backslash preceded by a comma before it
* is recognized as a separator */
if ((s > origval + 1
&& s[-1] == '\\'
&& s[-2] != ',')
|| (s == origval + 1
&& s[-1] == '\\'))
++bs;
else
bs = 0;
}
/* do not add if already there */
if ((adding || prepending) && *s)
{
prepending = FALSE;
adding = FALSE;
STRCPY(newval, origval);
}
}
/* concatenate the two strings; add a ',' if
* needed */
if (adding || prepending)
{
comma = ((flags & P_COMMA) && *origval != NUL
&& *newval != NUL);
if (adding)
{
i = (int)STRLEN(origval);
/* strip a trailing comma, would get 2 */
if (comma && i > 1
&& (flags & P_ONECOMMA) == P_ONECOMMA
&& origval[i - 1] == ','
&& origval[i - 2] != '\\')
i--;
mch_memmove(newval + i + comma, newval,
STRLEN(newval) + 1);
mch_memmove(newval, origval, (size_t)i);
}
else
{
i = (int)STRLEN(newval);
STRMOVE(newval + i + comma, origval);
}
if (comma)
newval[i] = ',';
}
/* Remove newval[] from origval[]. (Note: "i" has
* been set above and is used here). */
if (removing)
{
STRCPY(newval, origval);
if (*s)
{
/* may need to remove a comma */
if (flags & P_COMMA)
{
if (s == origval)
{
/* include comma after string */
if (s[i] == ',')
++i;
}
else
{
/* include comma before string */
--s;
++i;
}
}
STRMOVE(newval + (s - origval), s + i);
}
}
if (flags & P_FLAGLIST)
{
/* Remove flags that appear twice. */
for (s = newval; *s; ++s)
{
/* if options have P_FLAGLIST and
* P_ONECOMMA such as 'whichwrap' */
if (flags & P_ONECOMMA)
{
if (*s != ',' && *(s + 1) == ','
&& vim_strchr(s + 2, *s) != NULL)
{
/* Remove the duplicated value and
* the next comma. */
STRMOVE(s, s + 2);
s -= 2;
}
}
else
{
if ((!(flags & P_COMMA) || *s != ',')
&& vim_strchr(s + 1, *s) != NULL)
{
STRMOVE(s, s + 1);
--s;
}
}
}
}
if (save_arg != NULL) /* number for 'whichwrap' */
arg = save_arg;
new_value_alloced = TRUE;
}
/* Set the new value. */
*(char_u **)(varp) = newval;
#if defined(FEAT_AUTOCMD) && defined(FEAT_EVAL)
if (!starting
# ifdef FEAT_CRYPT
&& options[opt_idx].indir != PV_KEY
# endif
&& origval != NULL)
/* origval may be freed by
* did_set_string_option(), make a copy. */
saved_origval = vim_strsave(origval);
#endif
/* Handle side effects, and set the global value for
* ":set" on local options. */
errmsg = did_set_string_option(opt_idx, (char_u **)varp,
new_value_alloced, oldval, errbuf, opt_flags);
/* If error detected, print the error message. */
if (errmsg != NULL)
{
#if defined(FEAT_AUTOCMD) && defined(FEAT_EVAL)
vim_free(saved_origval);
#endif
goto skip;
}
#if defined(FEAT_AUTOCMD) && defined(FEAT_EVAL)
if (saved_origval != NULL)
{
char_u buf_type[7];
sprintf((char *)buf_type, "%s",
(opt_flags & OPT_LOCAL) ? "local" : "global");
set_vim_var_string(VV_OPTION_NEW,
*(char_u **)varp, -1);
set_vim_var_string(VV_OPTION_OLD, saved_origval, -1);
set_vim_var_string(VV_OPTION_TYPE, buf_type, -1);
apply_autocmds(EVENT_OPTIONSET,
(char_u *)options[opt_idx].fullname,
NULL, FALSE, NULL);
reset_v_option_vars();
vim_free(saved_origval);
}
#endif
}
else /* key code option */
{
char_u *p;
if (nextchar == '&')
{
if (add_termcap_entry(key_name, TRUE) == FAIL)
errmsg = (char_u *)N_("E522: Not found in termcap");
}
else
{
++arg; /* jump to after the '=' or ':' */
for (p = arg; *p && !vim_iswhite(*p); ++p)
if (*p == '\\' && p[1] != NUL)
++p;
nextchar = *p;
*p = NUL;
add_termcode(key_name, arg, FALSE);
*p = nextchar;
}
if (full_screen)
ttest(FALSE);
redraw_all_later(CLEAR);
}
}
if (opt_idx >= 0)
did_set_option(opt_idx, opt_flags,
!prepending && !adding && !removing);
}
skip:
/*
* Advance to next argument.
* - skip until a blank found, taking care of backslashes
* - skip blanks
* - skip one "=val" argument (for hidden options ":set gfn =xx")
*/
for (i = 0; i < 2 ; ++i)
{
while (*arg != NUL && !vim_iswhite(*arg))
if (*arg++ == '\\' && *arg != NUL)
++arg;
arg = skipwhite(arg);
if (*arg != '=')
break;
}
}
if (errmsg != NULL)
{
vim_strncpy(IObuff, (char_u *)_(errmsg), IOSIZE - 1);
i = (int)STRLEN(IObuff) + 2;
if (i + (arg - startarg) < IOSIZE)
{
/* append the argument with the error */
STRCAT(IObuff, ": ");
mch_memmove(IObuff + i, startarg, (arg - startarg));
IObuff[i + (arg - startarg)] = NUL;
}
/* make sure all characters are printable */
trans_characters(IObuff, IOSIZE);
++no_wait_return; /* wait_return done later */
emsg(IObuff); /* show error highlighted */
--no_wait_return;
return FAIL;
}
arg = skipwhite(arg);
}
theend:
if (silent_mode && did_show)
{
/* After displaying option values in silent mode. */
silent_mode = FALSE;
info_message = TRUE; /* use mch_msg(), not mch_errmsg() */
msg_putchar('\n');
cursor_on(); /* msg_start() switches it off */
out_flush();
silent_mode = TRUE;
info_message = FALSE; /* use mch_msg(), not mch_errmsg() */
}
return OK;
}
/*
* Call this when an option has been given a new value through a user command.
* Sets the P_WAS_SET flag and takes care of the P_INSECURE flag.
*/
static void
did_set_option(
int opt_idx,
int opt_flags, /* possibly with OPT_MODELINE */
int new_value) /* value was replaced completely */
{
long_u *p;
options[opt_idx].flags |= P_WAS_SET;
/* When an option is set in the sandbox, from a modeline or in secure mode
* set the P_INSECURE flag. Otherwise, if a new value is stored reset the
* flag. */
p = insecure_flag(opt_idx, opt_flags);
if (secure
#ifdef HAVE_SANDBOX
|| sandbox != 0
#endif
|| (opt_flags & OPT_MODELINE))
*p = *p | P_INSECURE;
else if (new_value)
*p = *p & ~P_INSECURE;
}
static char_u *
illegal_char(char_u *errbuf, int c)
{
if (errbuf == NULL)
return (char_u *)"";
sprintf((char *)errbuf, _("E539: Illegal character <%s>"),
(char *)transchar(c));
return errbuf;
}
/*
* Convert a key name or string into a key value.
* Used for 'wildchar' and 'cedit' options.
*/
static int
string_to_key(char_u *arg)
{
if (*arg == '<')
return find_key_option(arg + 1);
if (*arg == '^')
return Ctrl_chr(arg[1]);
return *arg;
}
#ifdef FEAT_CMDWIN
/*
* Check value of 'cedit' and set cedit_key.
* Returns NULL if value is OK, error message otherwise.
*/
static char_u *
check_cedit(void)
{
int n;
if (*p_cedit == NUL)
cedit_key = -1;
else
{
n = string_to_key(p_cedit);
if (vim_isprintc(n))
return e_invarg;
cedit_key = n;
}
return NULL;
}
#endif
#ifdef FEAT_TITLE
/*
* When changing 'title', 'titlestring', 'icon' or 'iconstring', call
* maketitle() to create and display it.
* When switching the title or icon off, call mch_restore_title() to get
* the old value back.
*/
static void
did_set_title(
int icon) /* Did set icon instead of title */
{
if (starting != NO_SCREEN
#ifdef FEAT_GUI
&& !gui.starting
#endif
)
{
maketitle();
if (icon)
{
if (!p_icon)
mch_restore_title(2);
}
else
{
if (!p_title)
mch_restore_title(1);
}
}
}
#endif
/*
* set_options_bin - called when 'bin' changes value.
*/
void
set_options_bin(
int oldval,
int newval,
int opt_flags) /* OPT_LOCAL and/or OPT_GLOBAL */
{
/*
* The option values that are changed when 'bin' changes are
* copied when 'bin is set and restored when 'bin' is reset.
*/
if (newval)
{
if (!oldval) /* switched on */
{
if (!(opt_flags & OPT_GLOBAL))
{
curbuf->b_p_tw_nobin = curbuf->b_p_tw;
curbuf->b_p_wm_nobin = curbuf->b_p_wm;
curbuf->b_p_ml_nobin = curbuf->b_p_ml;
curbuf->b_p_et_nobin = curbuf->b_p_et;
}
if (!(opt_flags & OPT_LOCAL))
{
p_tw_nobin = p_tw;
p_wm_nobin = p_wm;
p_ml_nobin = p_ml;
p_et_nobin = p_et;
}
}
if (!(opt_flags & OPT_GLOBAL))
{
curbuf->b_p_tw = 0; /* no automatic line wrap */
curbuf->b_p_wm = 0; /* no automatic line wrap */
curbuf->b_p_ml = 0; /* no modelines */
curbuf->b_p_et = 0; /* no expandtab */
}
if (!(opt_flags & OPT_LOCAL))
{
p_tw = 0;
p_wm = 0;
p_ml = FALSE;
p_et = FALSE;
p_bin = TRUE; /* needed when called for the "-b" argument */
}
}
else if (oldval) /* switched off */
{
if (!(opt_flags & OPT_GLOBAL))
{
curbuf->b_p_tw = curbuf->b_p_tw_nobin;
curbuf->b_p_wm = curbuf->b_p_wm_nobin;
curbuf->b_p_ml = curbuf->b_p_ml_nobin;
curbuf->b_p_et = curbuf->b_p_et_nobin;
}
if (!(opt_flags & OPT_LOCAL))
{
p_tw = p_tw_nobin;
p_wm = p_wm_nobin;
p_ml = p_ml_nobin;
p_et = p_et_nobin;
}
}
}
#ifdef FEAT_VIMINFO
/*
* Find the parameter represented by the given character (eg ', :, ", or /),
* and return its associated value in the 'viminfo' string.
* Only works for number parameters, not for 'r' or 'n'.
* If the parameter is not specified in the string or there is no following
* number, return -1.
*/
int
get_viminfo_parameter(int type)
{
char_u *p;
p = find_viminfo_parameter(type);
if (p != NULL && VIM_ISDIGIT(*p))
return atoi((char *)p);
return -1;
}
/*
* Find the parameter represented by the given character (eg ''', ':', '"', or
* '/') in the 'viminfo' option and return a pointer to the string after it.
* Return NULL if the parameter is not specified in the string.
*/
char_u *
find_viminfo_parameter(int type)
{
char_u *p;
for (p = p_viminfo; *p; ++p)
{
if (*p == type)
return p + 1;
if (*p == 'n') /* 'n' is always the last one */
break;
p = vim_strchr(p, ','); /* skip until next ',' */
if (p == NULL) /* hit the end without finding parameter */
break;
}
return NULL;
}
#endif
/*
* Expand environment variables for some string options.
* These string options cannot be indirect!
* If "val" is NULL expand the current value of the option.
* Return pointer to NameBuff, or NULL when not expanded.
*/
static char_u *
option_expand(int opt_idx, char_u *val)
{
/* if option doesn't need expansion nothing to do */
if (!(options[opt_idx].flags & P_EXPAND) || options[opt_idx].var == NULL)
return NULL;
/* If val is longer than MAXPATHL no meaningful expansion can be done,
* expand_env() would truncate the string. */
if (val != NULL && STRLEN(val) > MAXPATHL)
return NULL;
if (val == NULL)
val = *(char_u **)options[opt_idx].var;
/*
* Expanding this with NameBuff, expand_env() must not be passed IObuff.
* Escape spaces when expanding 'tags', they are used to separate file
* names.
* For 'spellsuggest' expand after "file:".
*/
expand_env_esc(val, NameBuff, MAXPATHL,
(char_u **)options[opt_idx].var == &p_tags, FALSE,
#ifdef FEAT_SPELL
(char_u **)options[opt_idx].var == &p_sps ? (char_u *)"file:" :
#endif
NULL);
if (STRCMP(NameBuff, val) == 0) /* they are the same */
return NULL;
return NameBuff;
}
/*
* After setting various option values: recompute variables that depend on
* option values.
*/
static void
didset_options(void)
{
/* initialize the table for 'iskeyword' et.al. */
(void)init_chartab();
#ifdef FEAT_MBYTE
(void)opt_strings_flags(p_cmp, p_cmp_values, &cmp_flags, TRUE);
#endif
(void)opt_strings_flags(p_bkc, p_bkc_values, &bkc_flags, TRUE);
(void)opt_strings_flags(p_bo, p_bo_values, &bo_flags, TRUE);
#ifdef FEAT_SESSION
(void)opt_strings_flags(p_ssop, p_ssop_values, &ssop_flags, TRUE);
(void)opt_strings_flags(p_vop, p_ssop_values, &vop_flags, TRUE);
#endif
#ifdef FEAT_FOLDING
(void)opt_strings_flags(p_fdo, p_fdo_values, &fdo_flags, TRUE);
#endif
(void)opt_strings_flags(p_dy, p_dy_values, &dy_flags, TRUE);
(void)opt_strings_flags(p_tc, p_tc_values, &tc_flags, FALSE);
#ifdef FEAT_VIRTUALEDIT
(void)opt_strings_flags(p_ve, p_ve_values, &ve_flags, TRUE);
#endif
#if defined(FEAT_MOUSE) && (defined(UNIX) || defined(VMS))
(void)opt_strings_flags(p_ttym, p_ttym_values, &ttym_flags, FALSE);
#endif
#ifdef FEAT_SPELL
(void)spell_check_msm();
(void)spell_check_sps();
(void)compile_cap_prog(curwin->w_s);
(void)did_set_spell_option(TRUE);
#endif
#if defined(FEAT_TOOLBAR) && !defined(FEAT_GUI_W32)
(void)opt_strings_flags(p_toolbar, p_toolbar_values, &toolbar_flags, TRUE);
#endif
#if defined(FEAT_TOOLBAR) && defined(FEAT_GUI_GTK)
(void)opt_strings_flags(p_tbis, p_tbis_values, &tbis_flags, FALSE);
#endif
#ifdef FEAT_CMDWIN
/* set cedit_key */
(void)check_cedit();
#endif
#ifdef FEAT_LINEBREAK
briopt_check(curwin);
#endif
#ifdef FEAT_LINEBREAK
/* initialize the table for 'breakat'. */
fill_breakat_flags();
#endif
}
/*
* More side effects of setting options.
*/
static void
didset_options2(void)
{
/* Initialize the highlight_attr[] table. */
(void)highlight_changed();
/* Parse default for 'wildmode' */
check_opt_wim();
(void)set_chars_option(&p_lcs);
#if defined(FEAT_WINDOWS) || defined(FEAT_FOLDING)
/* Parse default for 'fillchars'. */
(void)set_chars_option(&p_fcs);
#endif
#ifdef FEAT_CLIPBOARD
/* Parse default for 'clipboard' */
(void)check_clipboard_option();
#endif
}
/*
* Check for string options that are NULL (normally only termcap options).
*/
void
check_options(void)
{
int opt_idx;
for (opt_idx = 0; options[opt_idx].fullname != NULL; opt_idx++)
if ((options[opt_idx].flags & P_STRING) && options[opt_idx].var != NULL)
check_string_option((char_u **)get_varp(&(options[opt_idx])));
}
/*
* Check string options in a buffer for NULL value.
*/
void
check_buf_options(buf_T *buf)
{
#if defined(FEAT_QUICKFIX)
check_string_option(&buf->b_p_bh);
check_string_option(&buf->b_p_bt);
#endif
#ifdef FEAT_MBYTE
check_string_option(&buf->b_p_fenc);
#endif
check_string_option(&buf->b_p_ff);
#ifdef FEAT_FIND_ID
check_string_option(&buf->b_p_def);
check_string_option(&buf->b_p_inc);
# ifdef FEAT_EVAL
check_string_option(&buf->b_p_inex);
# endif
#endif
#if defined(FEAT_CINDENT) && defined(FEAT_EVAL)
check_string_option(&buf->b_p_inde);
check_string_option(&buf->b_p_indk);
#endif
#if defined(FEAT_BEVAL) && defined(FEAT_EVAL)
check_string_option(&buf->b_p_bexpr);
#endif
#if defined(FEAT_CRYPT)
check_string_option(&buf->b_p_cm);
#endif
#if defined(FEAT_EVAL)
check_string_option(&buf->b_p_fex);
#endif
#ifdef FEAT_CRYPT
check_string_option(&buf->b_p_key);
#endif
check_string_option(&buf->b_p_kp);
check_string_option(&buf->b_p_mps);
check_string_option(&buf->b_p_fo);
check_string_option(&buf->b_p_flp);
check_string_option(&buf->b_p_isk);
#ifdef FEAT_COMMENTS
check_string_option(&buf->b_p_com);
#endif
#ifdef FEAT_FOLDING
check_string_option(&buf->b_p_cms);
#endif
check_string_option(&buf->b_p_nf);
#ifdef FEAT_TEXTOBJ
check_string_option(&buf->b_p_qe);
#endif
#ifdef FEAT_SYN_HL
check_string_option(&buf->b_p_syn);
check_string_option(&buf->b_s.b_syn_isk);
#endif
#ifdef FEAT_SPELL
check_string_option(&buf->b_s.b_p_spc);
check_string_option(&buf->b_s.b_p_spf);
check_string_option(&buf->b_s.b_p_spl);
#endif
#ifdef FEAT_SEARCHPATH
check_string_option(&buf->b_p_sua);
#endif
#ifdef FEAT_CINDENT
check_string_option(&buf->b_p_cink);
check_string_option(&buf->b_p_cino);
parse_cino(buf);
#endif
#ifdef FEAT_AUTOCMD
check_string_option(&buf->b_p_ft);
#endif
#if defined(FEAT_SMARTINDENT) || defined(FEAT_CINDENT)
check_string_option(&buf->b_p_cinw);
#endif
#ifdef FEAT_INS_EXPAND
check_string_option(&buf->b_p_cpt);
#endif
#ifdef FEAT_COMPL_FUNC
check_string_option(&buf->b_p_cfu);
check_string_option(&buf->b_p_ofu);
#endif
#ifdef FEAT_KEYMAP
check_string_option(&buf->b_p_keymap);
#endif
#ifdef FEAT_QUICKFIX
check_string_option(&buf->b_p_gp);
check_string_option(&buf->b_p_mp);
check_string_option(&buf->b_p_efm);
#endif
check_string_option(&buf->b_p_ep);
check_string_option(&buf->b_p_path);
check_string_option(&buf->b_p_tags);
check_string_option(&buf->b_p_tc);
#ifdef FEAT_INS_EXPAND
check_string_option(&buf->b_p_dict);
check_string_option(&buf->b_p_tsr);
#endif
#ifdef FEAT_LISP
check_string_option(&buf->b_p_lw);
#endif
check_string_option(&buf->b_p_bkc);
}
/*
* Free the string allocated for an option.
* Checks for the string being empty_option. This may happen if we're out of
* memory, vim_strsave() returned NULL, which was replaced by empty_option by
* check_options().
* Does NOT check for P_ALLOCED flag!
*/
void
free_string_option(char_u *p)
{
if (p != empty_option)
vim_free(p);
}
void
clear_string_option(char_u **pp)
{
if (*pp != empty_option)
vim_free(*pp);
*pp = empty_option;
}
static void
check_string_option(char_u **pp)
{
if (*pp == NULL)
*pp = empty_option;
}
/*
* Mark a terminal option as allocated, found by a pointer into term_strings[].
*/
void
set_term_option_alloced(char_u **p)
{
int opt_idx;
for (opt_idx = 1; options[opt_idx].fullname != NULL; opt_idx++)
if (options[opt_idx].var == (char_u *)p)
{
options[opt_idx].flags |= P_ALLOCED;
return;
}
return; /* cannot happen: didn't find it! */
}
#if defined(FEAT_EVAL) || defined(PROTO)
/*
* Return TRUE when option "opt" was set from a modeline or in secure mode.
* Return FALSE when it wasn't.
* Return -1 for an unknown option.
*/
int
was_set_insecurely(char_u *opt, int opt_flags)
{
int idx = findoption(opt);
long_u *flagp;
if (idx >= 0)
{
flagp = insecure_flag(idx, opt_flags);
return (*flagp & P_INSECURE) != 0;
}
EMSG2(_(e_intern2), "was_set_insecurely()");
return -1;
}
/*
* Get a pointer to the flags used for the P_INSECURE flag of option
* "opt_idx". For some local options a local flags field is used.
*/
static long_u *
insecure_flag(int opt_idx, int opt_flags)
{
if (opt_flags & OPT_LOCAL)
switch ((int)options[opt_idx].indir)
{
#ifdef FEAT_STL_OPT
case PV_STL: return &curwin->w_p_stl_flags;
#endif
#ifdef FEAT_EVAL
# ifdef FEAT_FOLDING
case PV_FDE: return &curwin->w_p_fde_flags;
case PV_FDT: return &curwin->w_p_fdt_flags;
# endif
# ifdef FEAT_BEVAL
case PV_BEXPR: return &curbuf->b_p_bexpr_flags;
# endif
# if defined(FEAT_CINDENT)
case PV_INDE: return &curbuf->b_p_inde_flags;
# endif
case PV_FEX: return &curbuf->b_p_fex_flags;
# ifdef FEAT_FIND_ID
case PV_INEX: return &curbuf->b_p_inex_flags;
# endif
#endif
}
/* Nothing special, return global flags field. */
return &options[opt_idx].flags;
}
#endif
#ifdef FEAT_TITLE
static void redraw_titles(void);
/*
* Redraw the window title and/or tab page text later.
*/
static void redraw_titles(void)
{
need_maketitle = TRUE;
# ifdef FEAT_WINDOWS
redraw_tabline = TRUE;
# endif
}
#endif
/*
* Set a string option to a new value (without checking the effect).
* The string is copied into allocated memory.
* if ("opt_idx" == -1) "name" is used, otherwise "opt_idx" is used.
* When "set_sid" is zero set the scriptID to current_SID. When "set_sid" is
* SID_NONE don't set the scriptID. Otherwise set the scriptID to "set_sid".
*/
void
set_string_option_direct(
char_u *name,
int opt_idx,
char_u *val,
int opt_flags, /* OPT_FREE, OPT_LOCAL and/or OPT_GLOBAL */
int set_sid UNUSED)
{
char_u *s;
char_u **varp;
int both = (opt_flags & (OPT_LOCAL | OPT_GLOBAL)) == 0;
int idx = opt_idx;
if (idx == -1) /* use name */
{
idx = findoption(name);
if (idx < 0) /* not found (should not happen) */
{
EMSG2(_(e_intern2), "set_string_option_direct()");
EMSG2(_("For option %s"), name);
return;
}
}
if (options[idx].var == NULL) /* can't set hidden option */
return;
s = vim_strsave(val);
if (s != NULL)
{
varp = (char_u **)get_varp_scope(&(options[idx]),
both ? OPT_LOCAL : opt_flags);
if ((opt_flags & OPT_FREE) && (options[idx].flags & P_ALLOCED))
free_string_option(*varp);
*varp = s;
/* For buffer/window local option may also set the global value. */
if (both)
set_string_option_global(idx, varp);
options[idx].flags |= P_ALLOCED;
/* When setting both values of a global option with a local value,
* make the local value empty, so that the global value is used. */
if (((int)options[idx].indir & PV_BOTH) && both)
{
free_string_option(*varp);
*varp = empty_option;
}
# ifdef FEAT_EVAL
if (set_sid != SID_NONE)
set_option_scriptID_idx(idx, opt_flags,
set_sid == 0 ? current_SID : set_sid);
# endif
}
}
/*
* Set global value for string option when it's a local option.
*/
static void
set_string_option_global(
int opt_idx, /* option index */
char_u **varp) /* pointer to option variable */
{
char_u **p, *s;
/* the global value is always allocated */
if (options[opt_idx].var == VAR_WIN)
p = (char_u **)GLOBAL_WO(varp);
else
p = (char_u **)options[opt_idx].var;
if (options[opt_idx].indir != PV_NONE
&& p != varp
&& (s = vim_strsave(*varp)) != NULL)
{
free_string_option(*p);
*p = s;
}
}
/*
* Set a string option to a new value, and handle the effects.
*
* Returns NULL on success or error message on error.
*/
static char_u *
set_string_option(
int opt_idx,
char_u *value,
int opt_flags) /* OPT_LOCAL and/or OPT_GLOBAL */
{
char_u *s;
char_u **varp;
char_u *oldval;
#if defined(FEAT_AUTOCMD) && defined(FEAT_EVAL)
char_u *saved_oldval = NULL;
#endif
char_u *r = NULL;
if (options[opt_idx].var == NULL) /* don't set hidden option */
return NULL;
s = vim_strsave(value);
if (s != NULL)
{
varp = (char_u **)get_varp_scope(&(options[opt_idx]),
(opt_flags & (OPT_LOCAL | OPT_GLOBAL)) == 0
? (((int)options[opt_idx].indir & PV_BOTH)
? OPT_GLOBAL : OPT_LOCAL)
: opt_flags);
oldval = *varp;
*varp = s;
#if defined(FEAT_AUTOCMD) && defined(FEAT_EVAL)
if (!starting
# ifdef FEAT_CRYPT
&& options[opt_idx].indir != PV_KEY
# endif
)
saved_oldval = vim_strsave(oldval);
#endif
if ((r = did_set_string_option(opt_idx, varp, TRUE, oldval, NULL,
opt_flags)) == NULL)
did_set_option(opt_idx, opt_flags, TRUE);
/* call autocomamnd after handling side effects */
#if defined(FEAT_AUTOCMD) && defined(FEAT_EVAL)
if (saved_oldval != NULL)
{
char_u buf_type[7];
sprintf((char *)buf_type, "%s",
(opt_flags & OPT_LOCAL) ? "local" : "global");
set_vim_var_string(VV_OPTION_NEW, *varp, -1);
set_vim_var_string(VV_OPTION_OLD, saved_oldval, -1);
set_vim_var_string(VV_OPTION_TYPE, buf_type, -1);
apply_autocmds(EVENT_OPTIONSET, (char_u *)options[opt_idx].fullname, NULL, FALSE, NULL);
reset_v_option_vars();
vim_free(saved_oldval);
}
#endif
}
return r;
}
/*
* Handle string options that need some action to perform when changed.
* Returns NULL for success, or an error message for an error.
*/
static char_u *
did_set_string_option(
int opt_idx, /* index in options[] table */
char_u **varp, /* pointer to the option variable */
int new_value_alloced, /* new value was allocated */
char_u *oldval, /* previous value of the option */
char_u *errbuf, /* buffer for errors, or NULL */
int opt_flags) /* OPT_LOCAL and/or OPT_GLOBAL */
{
char_u *errmsg = NULL;
char_u *s, *p;
int did_chartab = FALSE;
char_u **gvarp;
long_u free_oldval = (options[opt_idx].flags & P_ALLOCED);
#ifdef FEAT_GUI
/* set when changing an option that only requires a redraw in the GUI */
int redraw_gui_only = FALSE;
#endif
/* Get the global option to compare with, otherwise we would have to check
* two values for all local options. */
gvarp = (char_u **)get_varp_scope(&(options[opt_idx]), OPT_GLOBAL);
/* Disallow changing some options from secure mode */
if ((secure
#ifdef HAVE_SANDBOX
|| sandbox != 0
#endif
) && (options[opt_idx].flags & P_SECURE))
{
errmsg = e_secure;
}
/* Check for a "normal" file name in some options. Disallow a path
* separator (slash and/or backslash), wildcards and characters that are
* often illegal in a file name. */
else if ((options[opt_idx].flags & P_NFNAME)
&& vim_strpbrk(*varp, (char_u *)"/\\*?[|<>") != NULL)
{
errmsg = e_invarg;
}
/* 'term' */
else if (varp == &T_NAME)
{
if (T_NAME[0] == NUL)
errmsg = (char_u *)N_("E529: Cannot set 'term' to empty string");
#ifdef FEAT_GUI
if (gui.in_use)
errmsg = (char_u *)N_("E530: Cannot change term in GUI");
else if (term_is_gui(T_NAME))
errmsg = (char_u *)N_("E531: Use \":gui\" to start the GUI");
#endif
else if (set_termname(T_NAME) == FAIL)
errmsg = (char_u *)N_("E522: Not found in termcap");
else
/* Screen colors may have changed. */
redraw_later_clear();
}
/* 'backupcopy' */
else if (gvarp == &p_bkc)
{
char_u *bkc = p_bkc;
unsigned int *flags = &bkc_flags;
if (opt_flags & OPT_LOCAL)
{
bkc = curbuf->b_p_bkc;
flags = &curbuf->b_bkc_flags;
}
if ((opt_flags & OPT_LOCAL) && *bkc == NUL)
/* make the local value empty: use the global value */
*flags = 0;
else
{
if (opt_strings_flags(bkc, p_bkc_values, flags, TRUE) != OK)
errmsg = e_invarg;
if ((((int)*flags & BKC_AUTO) != 0)
+ (((int)*flags & BKC_YES) != 0)
+ (((int)*flags & BKC_NO) != 0) != 1)
{
/* Must have exactly one of "auto", "yes" and "no". */
(void)opt_strings_flags(oldval, p_bkc_values, flags, TRUE);
errmsg = e_invarg;
}
}
}
/* 'backupext' and 'patchmode' */
else if (varp == &p_bex || varp == &p_pm)
{
if (STRCMP(*p_bex == '.' ? p_bex + 1 : p_bex,
*p_pm == '.' ? p_pm + 1 : p_pm) == 0)
errmsg = (char_u *)N_("E589: 'backupext' and 'patchmode' are equal");
}
#ifdef FEAT_LINEBREAK
/* 'breakindentopt' */
else if (varp == &curwin->w_p_briopt)
{
if (briopt_check(curwin) == FAIL)
errmsg = e_invarg;
}
#endif
/*
* 'isident', 'iskeyword', 'isprint or 'isfname' option: refill g_chartab[]
* If the new option is invalid, use old value. 'lisp' option: refill
* g_chartab[] for '-' char
*/
else if ( varp == &p_isi
|| varp == &(curbuf->b_p_isk)
|| varp == &p_isp
|| varp == &p_isf)
{
if (init_chartab() == FAIL)
{
did_chartab = TRUE; /* need to restore it below */
errmsg = e_invarg; /* error in value */
}
}
/* 'helpfile' */
else if (varp == &p_hf)
{
/* May compute new values for $VIM and $VIMRUNTIME */
if (didset_vim)
{
vim_setenv((char_u *)"VIM", (char_u *)"");
didset_vim = FALSE;
}
if (didset_vimruntime)
{
vim_setenv((char_u *)"VIMRUNTIME", (char_u *)"");
didset_vimruntime = FALSE;
}
}
#ifdef FEAT_SYN_HL
/* 'colorcolumn' */
else if (varp == &curwin->w_p_cc)
errmsg = check_colorcolumn(curwin);
#endif
#ifdef FEAT_MULTI_LANG
/* 'helplang' */
else if (varp == &p_hlg)
{
/* Check for "", "ab", "ab,cd", etc. */
for (s = p_hlg; *s != NUL; s += 3)
{
if (s[1] == NUL || ((s[2] != ',' || s[3] == NUL) && s[2] != NUL))
{
errmsg = e_invarg;
break;
}
if (s[2] == NUL)
break;
}
}
#endif
/* 'highlight' */
else if (varp == &p_hl)
{
if (highlight_changed() == FAIL)
errmsg = e_invarg; /* invalid flags */
}
/* 'nrformats' */
else if (gvarp == &p_nf)
{
if (check_opt_strings(*varp, p_nf_values, TRUE) != OK)
errmsg = e_invarg;
}
#ifdef FEAT_SESSION
/* 'sessionoptions' */
else if (varp == &p_ssop)
{
if (opt_strings_flags(p_ssop, p_ssop_values, &ssop_flags, TRUE) != OK)
errmsg = e_invarg;
if ((ssop_flags & SSOP_CURDIR) && (ssop_flags & SSOP_SESDIR))
{
/* Don't allow both "sesdir" and "curdir". */
(void)opt_strings_flags(oldval, p_ssop_values, &ssop_flags, TRUE);
errmsg = e_invarg;
}
}
/* 'viewoptions' */
else if (varp == &p_vop)
{
if (opt_strings_flags(p_vop, p_ssop_values, &vop_flags, TRUE) != OK)
errmsg = e_invarg;
}
#endif
/* 'scrollopt' */
#ifdef FEAT_SCROLLBIND
else if (varp == &p_sbo)
{
if (check_opt_strings(p_sbo, p_scbopt_values, TRUE) != OK)
errmsg = e_invarg;
}
#endif
/* 'ambiwidth' */
#ifdef FEAT_MBYTE
else if (varp == &p_ambw || varp == &p_emoji)
{
if (check_opt_strings(p_ambw, p_ambw_values, FALSE) != OK)
errmsg = e_invarg;
else if (set_chars_option(&p_lcs) != NULL)
errmsg = (char_u *)_("E834: Conflicts with value of 'listchars'");
# if defined(FEAT_WINDOWS) || defined(FEAT_FOLDING)
else if (set_chars_option(&p_fcs) != NULL)
errmsg = (char_u *)_("E835: Conflicts with value of 'fillchars'");
# endif
}
#endif
/* 'background' */
else if (varp == &p_bg)
{
if (check_opt_strings(p_bg, p_bg_values, FALSE) == OK)
{
#ifdef FEAT_EVAL
int dark = (*p_bg == 'd');
#endif
init_highlight(FALSE, FALSE);
#ifdef FEAT_EVAL
if (dark != (*p_bg == 'd')
&& get_var_value((char_u *)"g:colors_name") != NULL)
{
/* The color scheme must have set 'background' back to another
* value, that's not what we want here. Disable the color
* scheme and set the colors again. */
do_unlet((char_u *)"g:colors_name", TRUE);
free_string_option(p_bg);
p_bg = vim_strsave((char_u *)(dark ? "dark" : "light"));
check_string_option(&p_bg);
init_highlight(FALSE, FALSE);
}
#endif
}
else
errmsg = e_invarg;
}
/* 'wildmode' */
else if (varp == &p_wim)
{
if (check_opt_wim() == FAIL)
errmsg = e_invarg;
}
#ifdef FEAT_CMDL_COMPL
/* 'wildoptions' */
else if (varp == &p_wop)
{
if (check_opt_strings(p_wop, p_wop_values, TRUE) != OK)
errmsg = e_invarg;
}
#endif
#ifdef FEAT_WAK
/* 'winaltkeys' */
else if (varp == &p_wak)
{
if (*p_wak == NUL
|| check_opt_strings(p_wak, p_wak_values, FALSE) != OK)
errmsg = e_invarg;
# ifdef FEAT_MENU
# ifdef FEAT_GUI_MOTIF
else if (gui.in_use)
gui_motif_set_mnemonics(p_wak[0] == 'y' || p_wak[0] == 'm');
# else
# ifdef FEAT_GUI_GTK
else if (gui.in_use)
gui_gtk_set_mnemonics(p_wak[0] == 'y' || p_wak[0] == 'm');
# endif
# endif
# endif
}
#endif
#ifdef FEAT_AUTOCMD
/* 'eventignore' */
else if (varp == &p_ei)
{
if (check_ei() == FAIL)
errmsg = e_invarg;
}
#endif
#ifdef FEAT_MBYTE
/* 'encoding' and 'fileencoding' */
else if (varp == &p_enc || gvarp == &p_fenc || varp == &p_tenc)
{
if (gvarp == &p_fenc)
{
if (!curbuf->b_p_ma && opt_flags != OPT_GLOBAL)
errmsg = e_modifiable;
else if (vim_strchr(*varp, ',') != NULL)
/* No comma allowed in 'fileencoding'; catches confusing it
* with 'fileencodings'. */
errmsg = e_invarg;
else
{
# ifdef FEAT_TITLE
/* May show a "+" in the title now. */
redraw_titles();
# endif
/* Add 'fileencoding' to the swap file. */
ml_setflags(curbuf);
}
}
if (errmsg == NULL)
{
/* canonize the value, so that STRCMP() can be used on it */
p = enc_canonize(*varp);
if (p != NULL)
{
vim_free(*varp);
*varp = p;
}
if (varp == &p_enc)
{
errmsg = mb_init();
# ifdef FEAT_TITLE
redraw_titles();
# endif
}
}
# if defined(FEAT_GUI_GTK)
if (errmsg == NULL && varp == &p_tenc && gui.in_use)
{
/* GTK+ 2 uses only a single encoding, and that is UTF-8. */
if (STRCMP(p_tenc, "utf-8") != 0)
errmsg = (char_u *)N_("E617: Cannot be changed in the GTK+ 2 GUI");
}
# endif
if (errmsg == NULL)
{
# ifdef FEAT_KEYMAP
/* When 'keymap' is used and 'encoding' changes, reload the keymap
* (with another encoding). */
if (varp == &p_enc && *curbuf->b_p_keymap != NUL)
(void)keymap_init();
# endif
/* When 'termencoding' is not empty and 'encoding' changes or when
* 'termencoding' changes, need to setup for keyboard input and
* display output conversion. */
if (((varp == &p_enc && *p_tenc != NUL) || varp == &p_tenc))
{
convert_setup(&input_conv, p_tenc, p_enc);
convert_setup(&output_conv, p_enc, p_tenc);
}
# if defined(WIN3264) && defined(FEAT_MBYTE)
/* $HOME may have characters in active code page. */
if (varp == &p_enc)
init_homedir();
# endif
}
}
#endif
#if defined(FEAT_POSTSCRIPT)
else if (varp == &p_penc)
{
/* Canonize printencoding if VIM standard one */
p = enc_canonize(p_penc);
if (p != NULL)
{
vim_free(p_penc);
p_penc = p;
}
else
{
/* Ensure lower case and '-' for '_' */
for (s = p_penc; *s != NUL; s++)
{
if (*s == '_')
*s = '-';
else
*s = TOLOWER_ASC(*s);
}
}
}
#endif
#if defined(FEAT_XIM) && defined(FEAT_GUI_GTK)
else if (varp == &p_imak)
{
if (gui.in_use && !im_xim_isvalid_imactivate())
errmsg = e_invarg;
}
#endif
#ifdef FEAT_KEYMAP
else if (varp == &curbuf->b_p_keymap)
{
/* load or unload key mapping tables */
errmsg = keymap_init();
if (errmsg == NULL)
{
if (*curbuf->b_p_keymap != NUL)
{
/* Installed a new keymap, switch on using it. */
curbuf->b_p_iminsert = B_IMODE_LMAP;
if (curbuf->b_p_imsearch != B_IMODE_USE_INSERT)
curbuf->b_p_imsearch = B_IMODE_LMAP;
}
else
{
/* Cleared the keymap, may reset 'iminsert' and 'imsearch'. */
if (curbuf->b_p_iminsert == B_IMODE_LMAP)
curbuf->b_p_iminsert = B_IMODE_NONE;
if (curbuf->b_p_imsearch == B_IMODE_LMAP)
curbuf->b_p_imsearch = B_IMODE_USE_INSERT;
}
if ((opt_flags & OPT_LOCAL) == 0)
{
set_iminsert_global();
set_imsearch_global();
}
# ifdef FEAT_WINDOWS
status_redraw_curbuf();
# endif
}
}
#endif
/* 'fileformat' */
else if (gvarp == &p_ff)
{
if (!curbuf->b_p_ma && !(opt_flags & OPT_GLOBAL))
errmsg = e_modifiable;
else if (check_opt_strings(*varp, p_ff_values, FALSE) != OK)
errmsg = e_invarg;
else
{
/* may also change 'textmode' */
if (get_fileformat(curbuf) == EOL_DOS)
curbuf->b_p_tx = TRUE;
else
curbuf->b_p_tx = FALSE;
#ifdef FEAT_TITLE
redraw_titles();
#endif
/* update flag in swap file */
ml_setflags(curbuf);
/* Redraw needed when switching to/from "mac": a CR in the text
* will be displayed differently. */
if (get_fileformat(curbuf) == EOL_MAC || *oldval == 'm')
redraw_curbuf_later(NOT_VALID);
}
}
/* 'fileformats' */
else if (varp == &p_ffs)
{
if (check_opt_strings(p_ffs, p_ff_values, TRUE) != OK)
errmsg = e_invarg;
else
{
/* also change 'textauto' */
if (*p_ffs == NUL)
p_ta = FALSE;
else
p_ta = TRUE;
}
}
#if defined(FEAT_CRYPT)
/* 'cryptkey' */
else if (gvarp == &p_key)
{
# if defined(FEAT_CMDHIST)
/* Make sure the ":set" command doesn't show the new value in the
* history. */
remove_key_from_history();
# endif
if (STRCMP(curbuf->b_p_key, oldval) != 0)
/* Need to update the swapfile. */
ml_set_crypt_key(curbuf, oldval,
*curbuf->b_p_cm == NUL ? p_cm : curbuf->b_p_cm);
}
else if (gvarp == &p_cm)
{
if (opt_flags & OPT_LOCAL)
p = curbuf->b_p_cm;
else
p = p_cm;
if (check_opt_strings(p, p_cm_values, TRUE) != OK)
errmsg = e_invarg;
else if (crypt_self_test() == FAIL)
errmsg = e_invarg;
else
{
/* When setting the global value to empty, make it "zip". */
if (*p_cm == NUL)
{
if (new_value_alloced)
free_string_option(p_cm);
p_cm = vim_strsave((char_u *)"zip");
new_value_alloced = TRUE;
}
/* When using ":set cm=name" the local value is going to be empty.
* Do that here, otherwise the crypt functions will still use the
* local value. */
if ((opt_flags & (OPT_LOCAL | OPT_GLOBAL)) == 0)
{
free_string_option(curbuf->b_p_cm);
curbuf->b_p_cm = empty_option;
}
/* Need to update the swapfile when the effective method changed.
* Set "s" to the effective old value, "p" to the effective new
* method and compare. */
if ((opt_flags & OPT_LOCAL) && *oldval == NUL)
s = p_cm; /* was previously using the global value */
else
s = oldval;
if (*curbuf->b_p_cm == NUL)
p = p_cm; /* is now using the global value */
else
p = curbuf->b_p_cm;
if (STRCMP(s, p) != 0)
ml_set_crypt_key(curbuf, curbuf->b_p_key, s);
/* If the global value changes need to update the swapfile for all
* buffers using that value. */
if ((opt_flags & OPT_GLOBAL) && STRCMP(p_cm, oldval) != 0)
{
buf_T *buf;
FOR_ALL_BUFFERS(buf)
if (buf != curbuf && *buf->b_p_cm == NUL)
ml_set_crypt_key(buf, buf->b_p_key, oldval);
}
}
}
#endif
/* 'matchpairs' */
else if (gvarp == &p_mps)
{
#ifdef FEAT_MBYTE
if (has_mbyte)
{
for (p = *varp; *p != NUL; ++p)
{
int x2 = -1;
int x3 = -1;
if (*p != NUL)
p += mb_ptr2len(p);
if (*p != NUL)
x2 = *p++;
if (*p != NUL)
{
x3 = mb_ptr2char(p);
p += mb_ptr2len(p);
}
if (x2 != ':' || x3 == -1 || (*p != NUL && *p != ','))
{
errmsg = e_invarg;
break;
}
if (*p == NUL)
break;
}
}
else
#endif
{
/* Check for "x:y,x:y" */
for (p = *varp; *p != NUL; p += 4)
{
if (p[1] != ':' || p[2] == NUL || (p[3] != NUL && p[3] != ','))
{
errmsg = e_invarg;
break;
}
if (p[3] == NUL)
break;
}
}
}
#ifdef FEAT_COMMENTS
/* 'comments' */
else if (gvarp == &p_com)
{
for (s = *varp; *s; )
{
while (*s && *s != ':')
{
if (vim_strchr((char_u *)COM_ALL, *s) == NULL
&& !VIM_ISDIGIT(*s) && *s != '-')
{
errmsg = illegal_char(errbuf, *s);
break;
}
++s;
}
if (*s++ == NUL)
errmsg = (char_u *)N_("E524: Missing colon");
else if (*s == ',' || *s == NUL)
errmsg = (char_u *)N_("E525: Zero length string");
if (errmsg != NULL)
break;
while (*s && *s != ',')
{
if (*s == '\\' && s[1] != NUL)
++s;
++s;
}
s = skip_to_option_part(s);
}
}
#endif
/* 'listchars' */
else if (varp == &p_lcs)
{
errmsg = set_chars_option(varp);
}
#if defined(FEAT_WINDOWS) || defined(FEAT_FOLDING)
/* 'fillchars' */
else if (varp == &p_fcs)
{
errmsg = set_chars_option(varp);
}
#endif
#ifdef FEAT_CMDWIN
/* 'cedit' */
else if (varp == &p_cedit)
{
errmsg = check_cedit();
}
#endif
/* 'verbosefile' */
else if (varp == &p_vfile)
{
verbose_stop();
if (*p_vfile != NUL && verbose_open() == FAIL)
errmsg = e_invarg;
}
#ifdef FEAT_VIMINFO
/* 'viminfo' */
else if (varp == &p_viminfo)
{
for (s = p_viminfo; *s;)
{
/* Check it's a valid character */
if (vim_strchr((char_u *)"!\"%'/:<@cfhnrs", *s) == NULL)
{
errmsg = illegal_char(errbuf, *s);
break;
}
if (*s == 'n') /* name is always last one */
{
break;
}
else if (*s == 'r') /* skip until next ',' */
{
while (*++s && *s != ',')
;
}
else if (*s == '%')
{
/* optional number */
while (vim_isdigit(*++s))
;
}
else if (*s == '!' || *s == 'h' || *s == 'c')
++s; /* no extra chars */
else /* must have a number */
{
while (vim_isdigit(*++s))
;
if (!VIM_ISDIGIT(*(s - 1)))
{
if (errbuf != NULL)
{
sprintf((char *)errbuf,
_("E526: Missing number after <%s>"),
transchar_byte(*(s - 1)));
errmsg = errbuf;
}
else
errmsg = (char_u *)"";
break;
}
}
if (*s == ',')
++s;
else if (*s)
{
if (errbuf != NULL)
errmsg = (char_u *)N_("E527: Missing comma");
else
errmsg = (char_u *)"";
break;
}
}
if (*p_viminfo && errmsg == NULL && get_viminfo_parameter('\'') < 0)
errmsg = (char_u *)N_("E528: Must specify a ' value");
}
#endif /* FEAT_VIMINFO */
/* terminal options */
else if (istermoption(&options[opt_idx]) && full_screen)
{
/* ":set t_Co=0" and ":set t_Co=1" do ":set t_Co=" */
if (varp == &T_CCO)
{
int colors = atoi((char *)T_CCO);
/* Only reinitialize colors if t_Co value has really changed to
* avoid expensive reload of colorscheme if t_Co is set to the
* same value multiple times. */
if (colors != t_colors)
{
t_colors = colors;
if (t_colors <= 1)
{
if (new_value_alloced)
vim_free(T_CCO);
T_CCO = empty_option;
}
/* We now have a different color setup, initialize it again. */
init_highlight(TRUE, FALSE);
}
}
ttest(FALSE);
if (varp == &T_ME)
{
out_str(T_ME);
redraw_later(CLEAR);
#if defined(WIN3264) && !defined(FEAT_GUI_W32)
/* Since t_me has been set, this probably means that the user
* wants to use this as default colors. Need to reset default
* background/foreground colors. */
mch_set_normal_colors();
#endif
}
}
#ifdef FEAT_LINEBREAK
/* 'showbreak' */
else if (varp == &p_sbr)
{
for (s = p_sbr; *s; )
{
if (ptr2cells(s) != 1)
errmsg = (char_u *)N_("E595: contains unprintable or wide character");
mb_ptr_adv(s);
}
}
#endif
#ifdef FEAT_GUI
/* 'guifont' */
else if (varp == &p_guifont)
{
if (gui.in_use)
{
p = p_guifont;
# if defined(FEAT_GUI_GTK)
/*
* Put up a font dialog and let the user select a new value.
* If this is cancelled go back to the old value but don't
* give an error message.
*/
if (STRCMP(p, "*") == 0)
{
p = gui_mch_font_dialog(oldval);
if (new_value_alloced)
free_string_option(p_guifont);
p_guifont = (p != NULL) ? p : vim_strsave(oldval);
new_value_alloced = TRUE;
}
# endif
if (p != NULL && gui_init_font(p_guifont, FALSE) != OK)
{
# if defined(FEAT_GUI_MSWIN) || defined(FEAT_GUI_PHOTON)
if (STRCMP(p_guifont, "*") == 0)
{
/* Dialog was cancelled: Keep the old value without giving
* an error message. */
if (new_value_alloced)
free_string_option(p_guifont);
p_guifont = vim_strsave(oldval);
new_value_alloced = TRUE;
}
else
# endif
errmsg = (char_u *)N_("E596: Invalid font(s)");
}
}
redraw_gui_only = TRUE;
}
# ifdef FEAT_XFONTSET
else if (varp == &p_guifontset)
{
if (STRCMP(p_guifontset, "*") == 0)
errmsg = (char_u *)N_("E597: can't select fontset");
else if (gui.in_use && gui_init_font(p_guifontset, TRUE) != OK)
errmsg = (char_u *)N_("E598: Invalid fontset");
redraw_gui_only = TRUE;
}
# endif
# ifdef FEAT_MBYTE
else if (varp == &p_guifontwide)
{
if (STRCMP(p_guifontwide, "*") == 0)
errmsg = (char_u *)N_("E533: can't select wide font");
else if (gui_get_wide_font() == FAIL)
errmsg = (char_u *)N_("E534: Invalid wide font");
redraw_gui_only = TRUE;
}
# endif
#endif
#ifdef CURSOR_SHAPE
/* 'guicursor' */
else if (varp == &p_guicursor)
errmsg = parse_shape_opt(SHAPE_CURSOR);
#endif
#ifdef FEAT_MOUSESHAPE
/* 'mouseshape' */
else if (varp == &p_mouseshape)
{
errmsg = parse_shape_opt(SHAPE_MOUSE);
update_mouseshape(-1);
}
#endif
#ifdef FEAT_PRINTER
else if (varp == &p_popt)
errmsg = parse_printoptions();
# if defined(FEAT_MBYTE) && defined(FEAT_POSTSCRIPT)
else if (varp == &p_pmfn)
errmsg = parse_printmbfont();
# endif
#endif
#ifdef FEAT_LANGMAP
/* 'langmap' */
else if (varp == &p_langmap)
langmap_set();
#endif
#ifdef FEAT_LINEBREAK
/* 'breakat' */
else if (varp == &p_breakat)
fill_breakat_flags();
#endif
#ifdef FEAT_TITLE
/* 'titlestring' and 'iconstring' */
else if (varp == &p_titlestring || varp == &p_iconstring)
{
# ifdef FEAT_STL_OPT
int flagval = (varp == &p_titlestring) ? STL_IN_TITLE : STL_IN_ICON;
/* NULL => statusline syntax */
if (vim_strchr(*varp, '%') && check_stl_option(*varp) == NULL)
stl_syntax |= flagval;
else
stl_syntax &= ~flagval;
# endif
did_set_title(varp == &p_iconstring);
}
#endif
#ifdef FEAT_GUI
/* 'guioptions' */
else if (varp == &p_go)
{
gui_init_which_components(oldval);
redraw_gui_only = TRUE;
}
#endif
#if defined(FEAT_GUI_TABLINE)
/* 'guitablabel' */
else if (varp == &p_gtl)
{
redraw_tabline = TRUE;
redraw_gui_only = TRUE;
}
/* 'guitabtooltip' */
else if (varp == &p_gtt)
{
redraw_gui_only = TRUE;
}
#endif
#if defined(FEAT_MOUSE_TTY) && (defined(UNIX) || defined(VMS))
/* 'ttymouse' */
else if (varp == &p_ttym)
{
/* Switch the mouse off before changing the escape sequences used for
* that. */
mch_setmouse(FALSE);
if (opt_strings_flags(p_ttym, p_ttym_values, &ttym_flags, FALSE) != OK)
errmsg = e_invarg;
else
check_mouse_termcode();
if (termcap_active)
setmouse(); /* may switch it on again */
}
#endif
/* 'selection' */
else if (varp == &p_sel)
{
if (*p_sel == NUL
|| check_opt_strings(p_sel, p_sel_values, FALSE) != OK)
errmsg = e_invarg;
}
/* 'selectmode' */
else if (varp == &p_slm)
{
if (check_opt_strings(p_slm, p_slm_values, TRUE) != OK)
errmsg = e_invarg;
}
#ifdef FEAT_BROWSE
/* 'browsedir' */
else if (varp == &p_bsdir)
{
if (check_opt_strings(p_bsdir, p_bsdir_values, FALSE) != OK
&& !mch_isdir(p_bsdir))
errmsg = e_invarg;
}
#endif
/* 'keymodel' */
else if (varp == &p_km)
{
if (check_opt_strings(p_km, p_km_values, TRUE) != OK)
errmsg = e_invarg;
else
{
km_stopsel = (vim_strchr(p_km, 'o') != NULL);
km_startsel = (vim_strchr(p_km, 'a') != NULL);
}
}
/* 'mousemodel' */
else if (varp == &p_mousem)
{
if (check_opt_strings(p_mousem, p_mousem_values, FALSE) != OK)
errmsg = e_invarg;
#if defined(FEAT_GUI_MOTIF) && defined(FEAT_MENU) && (XmVersion <= 1002)
else if (*p_mousem != *oldval)
/* Changed from "extend" to "popup" or "popup_setpos" or vv: need
* to create or delete the popup menus. */
gui_motif_update_mousemodel(root_menu);
#endif
}
/* 'switchbuf' */
else if (varp == &p_swb)
{
if (opt_strings_flags(p_swb, p_swb_values, &swb_flags, TRUE) != OK)
errmsg = e_invarg;
}
/* 'debug' */
else if (varp == &p_debug)
{
if (check_opt_strings(p_debug, p_debug_values, TRUE) != OK)
errmsg = e_invarg;
}
/* 'display' */
else if (varp == &p_dy)
{
if (opt_strings_flags(p_dy, p_dy_values, &dy_flags, TRUE) != OK)
errmsg = e_invarg;
else
(void)init_chartab();
}
#ifdef FEAT_WINDOWS
/* 'eadirection' */
else if (varp == &p_ead)
{
if (check_opt_strings(p_ead, p_ead_values, FALSE) != OK)
errmsg = e_invarg;
}
#endif
#ifdef FEAT_CLIPBOARD
/* 'clipboard' */
else if (varp == &p_cb)
errmsg = check_clipboard_option();
#endif
#ifdef FEAT_SPELL
/* When 'spelllang' or 'spellfile' is set and there is a window for this
* buffer in which 'spell' is set load the wordlists. */
else if (varp == &(curwin->w_s->b_p_spl)
|| varp == &(curwin->w_s->b_p_spf))
{
errmsg = did_set_spell_option(varp == &(curwin->w_s->b_p_spf));
}
/* When 'spellcapcheck' is set compile the regexp program. */
else if (varp == &(curwin->w_s->b_p_spc))
{
errmsg = compile_cap_prog(curwin->w_s);
}
/* 'spellsuggest' */
else if (varp == &p_sps)
{
if (spell_check_sps() != OK)
errmsg = e_invarg;
}
/* 'mkspellmem' */
else if (varp == &p_msm)
{
if (spell_check_msm() != OK)
errmsg = e_invarg;
}
#endif
#ifdef FEAT_QUICKFIX
/* When 'bufhidden' is set, check for valid value. */
else if (gvarp == &p_bh)
{
if (check_opt_strings(curbuf->b_p_bh, p_bufhidden_values, FALSE) != OK)
errmsg = e_invarg;
}
/* When 'buftype' is set, check for valid value. */
else if (gvarp == &p_bt)
{
if (check_opt_strings(curbuf->b_p_bt, p_buftype_values, FALSE) != OK)
errmsg = e_invarg;
else
{
# ifdef FEAT_WINDOWS
if (curwin->w_status_height)
{
curwin->w_redr_status = TRUE;
redraw_later(VALID);
}
# endif
curbuf->b_help = (curbuf->b_p_bt[0] == 'h');
# ifdef FEAT_TITLE
redraw_titles();
# endif
}
}
#endif
#ifdef FEAT_STL_OPT
/* 'statusline' or 'rulerformat' */
else if (gvarp == &p_stl || varp == &p_ruf)
{
int wid;
if (varp == &p_ruf) /* reset ru_wid first */
ru_wid = 0;
s = *varp;
if (varp == &p_ruf && *s == '%')
{
/* set ru_wid if 'ruf' starts with "%99(" */
if (*++s == '-') /* ignore a '-' */
s++;
wid = getdigits(&s);
if (wid && *s == '(' && (errmsg = check_stl_option(p_ruf)) == NULL)
ru_wid = wid;
else
errmsg = check_stl_option(p_ruf);
}
/* check 'statusline' only if it doesn't start with "%!" */
else if (varp == &p_ruf || s[0] != '%' || s[1] != '!')
errmsg = check_stl_option(s);
if (varp == &p_ruf && errmsg == NULL)
comp_col();
}
#endif
#ifdef FEAT_INS_EXPAND
/* check if it is a valid value for 'complete' -- Acevedo */
else if (gvarp == &p_cpt)
{
for (s = *varp; *s;)
{
while (*s == ',' || *s == ' ')
s++;
if (!*s)
break;
if (vim_strchr((char_u *)".wbuksid]tU", *s) == NULL)
{
errmsg = illegal_char(errbuf, *s);
break;
}
if (*++s != NUL && *s != ',' && *s != ' ')
{
if (s[-1] == 'k' || s[-1] == 's')
{
/* skip optional filename after 'k' and 's' */
while (*s && *s != ',' && *s != ' ')
{
if (*s == '\\')
++s;
++s;
}
}
else
{
if (errbuf != NULL)
{
sprintf((char *)errbuf,
_("E535: Illegal character after <%c>"),
*--s);
errmsg = errbuf;
}
else
errmsg = (char_u *)"";
break;
}
}
}
}
/* 'completeopt' */
else if (varp == &p_cot)
{
if (check_opt_strings(p_cot, p_cot_values, TRUE) != OK)
errmsg = e_invarg;
else
completeopt_was_set();
}
#endif /* FEAT_INS_EXPAND */
#ifdef FEAT_SIGNS
/* 'signcolumn' */
else if (varp == &curwin->w_p_scl)
{
if (check_opt_strings(*varp, p_scl_values, FALSE) != OK)
errmsg = e_invarg;
}
#endif
#if defined(FEAT_TOOLBAR) && !defined(FEAT_GUI_W32)
else if (varp == &p_toolbar)
{
if (opt_strings_flags(p_toolbar, p_toolbar_values,
&toolbar_flags, TRUE) != OK)
errmsg = e_invarg;
else
{
out_flush();
gui_mch_show_toolbar((toolbar_flags &
(TOOLBAR_TEXT | TOOLBAR_ICONS)) != 0);
}
}
#endif
#if defined(FEAT_TOOLBAR) && defined(FEAT_GUI_GTK)
/* 'toolbariconsize': GTK+ 2 only */
else if (varp == &p_tbis)
{
if (opt_strings_flags(p_tbis, p_tbis_values, &tbis_flags, FALSE) != OK)
errmsg = e_invarg;
else
{
out_flush();
gui_mch_show_toolbar((toolbar_flags &
(TOOLBAR_TEXT | TOOLBAR_ICONS)) != 0);
}
}
#endif
/* 'pastetoggle': translate key codes like in a mapping */
else if (varp == &p_pt)
{
if (*p_pt)
{
(void)replace_termcodes(p_pt, &p, TRUE, TRUE, FALSE);
if (p != NULL)
{
if (new_value_alloced)
free_string_option(p_pt);
p_pt = p;
new_value_alloced = TRUE;
}
}
}
/* 'backspace' */
else if (varp == &p_bs)
{
if (VIM_ISDIGIT(*p_bs))
{
if (*p_bs > '2' || p_bs[1] != NUL)
errmsg = e_invarg;
}
else if (check_opt_strings(p_bs, p_bs_values, TRUE) != OK)
errmsg = e_invarg;
}
else if (varp == &p_bo)
{
if (opt_strings_flags(p_bo, p_bo_values, &bo_flags, TRUE) != OK)
errmsg = e_invarg;
}
/* 'tagcase' */
else if (gvarp == &p_tc)
{
unsigned int *flags;
if (opt_flags & OPT_LOCAL)
{
p = curbuf->b_p_tc;
flags = &curbuf->b_tc_flags;
}
else
{
p = p_tc;
flags = &tc_flags;
}
if ((opt_flags & OPT_LOCAL) && *p == NUL)
/* make the local value empty: use the global value */
*flags = 0;
else if (*p == NUL
|| opt_strings_flags(p, p_tc_values, flags, FALSE) != OK)
errmsg = e_invarg;
}
#ifdef FEAT_MBYTE
/* 'casemap' */
else if (varp == &p_cmp)
{
if (opt_strings_flags(p_cmp, p_cmp_values, &cmp_flags, TRUE) != OK)
errmsg = e_invarg;
}
#endif
#ifdef FEAT_DIFF
/* 'diffopt' */
else if (varp == &p_dip)
{
if (diffopt_changed() == FAIL)
errmsg = e_invarg;
}
#endif
#ifdef FEAT_FOLDING
/* 'foldmethod' */
else if (gvarp == &curwin->w_allbuf_opt.wo_fdm)
{
if (check_opt_strings(*varp, p_fdm_values, FALSE) != OK
|| *curwin->w_p_fdm == NUL)
errmsg = e_invarg;
else
{
foldUpdateAll(curwin);
if (foldmethodIsDiff(curwin))
newFoldLevel();
}
}
# ifdef FEAT_EVAL
/* 'foldexpr' */
else if (varp == &curwin->w_p_fde)
{
if (foldmethodIsExpr(curwin))
foldUpdateAll(curwin);
}
# endif
/* 'foldmarker' */
else if (gvarp == &curwin->w_allbuf_opt.wo_fmr)
{
p = vim_strchr(*varp, ',');
if (p == NULL)
errmsg = (char_u *)N_("E536: comma required");
else if (p == *varp || p[1] == NUL)
errmsg = e_invarg;
else if (foldmethodIsMarker(curwin))
foldUpdateAll(curwin);
}
/* 'commentstring' */
else if (gvarp == &p_cms)
{
if (**varp != NUL && strstr((char *)*varp, "%s") == NULL)
errmsg = (char_u *)N_("E537: 'commentstring' must be empty or contain %s");
}
/* 'foldopen' */
else if (varp == &p_fdo)
{
if (opt_strings_flags(p_fdo, p_fdo_values, &fdo_flags, TRUE) != OK)
errmsg = e_invarg;
}
/* 'foldclose' */
else if (varp == &p_fcl)
{
if (check_opt_strings(p_fcl, p_fcl_values, TRUE) != OK)
errmsg = e_invarg;
}
/* 'foldignore' */
else if (gvarp == &curwin->w_allbuf_opt.wo_fdi)
{
if (foldmethodIsIndent(curwin))
foldUpdateAll(curwin);
}
#endif
#ifdef FEAT_VIRTUALEDIT
/* 'virtualedit' */
else if (varp == &p_ve)
{
if (opt_strings_flags(p_ve, p_ve_values, &ve_flags, TRUE) != OK)
errmsg = e_invarg;
else if (STRCMP(p_ve, oldval) != 0)
{
/* Recompute cursor position in case the new 've' setting
* changes something. */
validate_virtcol();
coladvance(curwin->w_virtcol);
}
}
#endif
#if defined(FEAT_CSCOPE) && defined(FEAT_QUICKFIX)
else if (varp == &p_csqf)
{
if (p_csqf != NULL)
{
p = p_csqf;
while (*p != NUL)
{
if (vim_strchr((char_u *)CSQF_CMDS, *p) == NULL
|| p[1] == NUL
|| vim_strchr((char_u *)CSQF_FLAGS, p[1]) == NULL
|| (p[2] != NUL && p[2] != ','))
{
errmsg = e_invarg;
break;
}
else if (p[2] == NUL)
break;
else
p += 3;
}
}
}
#endif
#ifdef FEAT_CINDENT
/* 'cinoptions' */
else if (gvarp == &p_cino)
{
/* TODO: recognize errors */
parse_cino(curbuf);
}
#endif
#if defined(FEAT_RENDER_OPTIONS)
else if (varp == &p_rop && gui.in_use)
{
if (!gui_mch_set_rendering_options(p_rop))
errmsg = e_invarg;
}
#endif
/* Options that are a list of flags. */
else
{
p = NULL;
if (varp == &p_ww)
p = (char_u *)WW_ALL;
if (varp == &p_shm)
p = (char_u *)SHM_ALL;
else if (varp == &(p_cpo))
p = (char_u *)CPO_ALL;
else if (varp == &(curbuf->b_p_fo))
p = (char_u *)FO_ALL;
#ifdef FEAT_CONCEAL
else if (varp == &curwin->w_p_cocu)
p = (char_u *)COCU_ALL;
#endif
else if (varp == &p_mouse)
{
#ifdef FEAT_MOUSE
p = (char_u *)MOUSE_ALL;
#else
if (*p_mouse != NUL)
errmsg = (char_u *)N_("E538: No mouse support");
#endif
}
#if defined(FEAT_GUI)
else if (varp == &p_go)
p = (char_u *)GO_ALL;
#endif
if (p != NULL)
{
for (s = *varp; *s; ++s)
if (vim_strchr(p, *s) == NULL)
{
errmsg = illegal_char(errbuf, *s);
break;
}
}
}
/*
* If error detected, restore the previous value.
*/
if (errmsg != NULL)
{
if (new_value_alloced)
free_string_option(*varp);
*varp = oldval;
/*
* When resetting some values, need to act on it.
*/
if (did_chartab)
(void)init_chartab();
if (varp == &p_hl)
(void)highlight_changed();
}
else
{
#ifdef FEAT_EVAL
/* Remember where the option was set. */
set_option_scriptID_idx(opt_idx, opt_flags, current_SID);
#endif
/*
* Free string options that are in allocated memory.
* Use "free_oldval", because recursiveness may change the flags under
* our fingers (esp. init_highlight()).
*/
if (free_oldval)
free_string_option(oldval);
if (new_value_alloced)
options[opt_idx].flags |= P_ALLOCED;
else
options[opt_idx].flags &= ~P_ALLOCED;
if ((opt_flags & (OPT_LOCAL | OPT_GLOBAL)) == 0
&& ((int)options[opt_idx].indir & PV_BOTH))
{
/* global option with local value set to use global value; free
* the local value and make it empty */
p = get_varp_scope(&(options[opt_idx]), OPT_LOCAL);
free_string_option(*(char_u **)p);
*(char_u **)p = empty_option;
}
/* May set global value for local option. */
else if (!(opt_flags & OPT_LOCAL) && opt_flags != OPT_GLOBAL)
set_string_option_global(opt_idx, varp);
#ifdef FEAT_AUTOCMD
/*
* Trigger the autocommand only after setting the flags.
*/
# ifdef FEAT_SYN_HL
/* When 'syntax' is set, load the syntax of that name */
if (varp == &(curbuf->b_p_syn))
{
apply_autocmds(EVENT_SYNTAX, curbuf->b_p_syn,
curbuf->b_fname, TRUE, curbuf);
}
# endif
else if (varp == &(curbuf->b_p_ft))
{
/* 'filetype' is set, trigger the FileType autocommand */
did_filetype = TRUE;
apply_autocmds(EVENT_FILETYPE, curbuf->b_p_ft,
curbuf->b_fname, TRUE, curbuf);
}
#endif
#ifdef FEAT_SPELL
if (varp == &(curwin->w_s->b_p_spl))
{
char_u fname[200];
char_u *q = curwin->w_s->b_p_spl;
/* Skip the first name if it is "cjk". */
if (STRNCMP(q, "cjk,", 4) == 0)
q += 4;
/*
* Source the spell/LANG.vim in 'runtimepath'.
* They could set 'spellcapcheck' depending on the language.
* Use the first name in 'spelllang' up to '_region' or
* '.encoding'.
*/
for (p = q; *p != NUL; ++p)
if (vim_strchr((char_u *)"_.,", *p) != NULL)
break;
vim_snprintf((char *)fname, 200, "spell/%.*s.vim", (int)(p - q), q);
source_runtime(fname, DIP_ALL);
}
#endif
}
#ifdef FEAT_MOUSE
if (varp == &p_mouse)
{
# ifdef FEAT_MOUSE_TTY
if (*p_mouse == NUL)
mch_setmouse(FALSE); /* switch mouse off */
else
# endif
setmouse(); /* in case 'mouse' changed */
}
#endif
if (curwin->w_curswant != MAXCOL
&& (options[opt_idx].flags & (P_CURSWANT | P_RALL)) != 0)
curwin->w_set_curswant = TRUE;
#ifdef FEAT_GUI
/* check redraw when it's not a GUI option or the GUI is active. */
if (!redraw_gui_only || gui.in_use)
#endif
check_redraw(options[opt_idx].flags);
return errmsg;
}
#if defined(FEAT_SYN_HL) || defined(PROTO)
/*
* Simple int comparison function for use with qsort()
*/
static int
int_cmp(const void *a, const void *b)
{
return *(const int *)a - *(const int *)b;
}
/*
* Handle setting 'colorcolumn' or 'textwidth' in window "wp".
* Returns error message, NULL if it's OK.
*/
char_u *
check_colorcolumn(win_T *wp)
{
char_u *s;
int col;
int count = 0;
int color_cols[256];
int i;
int j = 0;
if (wp->w_buffer == NULL)
return NULL; /* buffer was closed */
for (s = wp->w_p_cc; *s != NUL && count < 255;)
{
if (*s == '-' || *s == '+')
{
/* -N and +N: add to 'textwidth' */
col = (*s == '-') ? -1 : 1;
++s;
if (!VIM_ISDIGIT(*s))
return e_invarg;
col = col * getdigits(&s);
if (wp->w_buffer->b_p_tw == 0)
goto skip; /* 'textwidth' not set, skip this item */
col += wp->w_buffer->b_p_tw;
if (col < 0)
goto skip;
}
else if (VIM_ISDIGIT(*s))
col = getdigits(&s);
else
return e_invarg;
color_cols[count++] = col - 1; /* 1-based to 0-based */
skip:
if (*s == NUL)
break;
if (*s != ',')
return e_invarg;
if (*++s == NUL)
return e_invarg; /* illegal trailing comma as in "set cc=80," */
}
vim_free(wp->w_p_cc_cols);
if (count == 0)
wp->w_p_cc_cols = NULL;
else
{
wp->w_p_cc_cols = (int *)alloc((unsigned)sizeof(int) * (count + 1));
if (wp->w_p_cc_cols != NULL)
{
/* sort the columns for faster usage on screen redraw inside
* win_line() */
qsort(color_cols, count, sizeof(int), int_cmp);
for (i = 0; i < count; ++i)
/* skip duplicates */
if (j == 0 || wp->w_p_cc_cols[j - 1] != color_cols[i])
wp->w_p_cc_cols[j++] = color_cols[i];
wp->w_p_cc_cols[j] = -1; /* end marker */
}
}
return NULL; /* no error */
}
#endif
/*
* Handle setting 'listchars' or 'fillchars'.
* Returns error message, NULL if it's OK.
*/
static char_u *
set_chars_option(char_u **varp)
{
int round, i, len, entries;
char_u *p, *s;
int c1, c2 = 0;
struct charstab
{
int *cp;
char *name;
};
#if defined(FEAT_WINDOWS) || defined(FEAT_FOLDING)
static struct charstab filltab[] =
{
{&fill_stl, "stl"},
{&fill_stlnc, "stlnc"},
{&fill_vert, "vert"},
{&fill_fold, "fold"},
{&fill_diff, "diff"},
};
#endif
static struct charstab lcstab[] =
{
{&lcs_eol, "eol"},
{&lcs_ext, "extends"},
{&lcs_nbsp, "nbsp"},
{&lcs_prec, "precedes"},
{&lcs_space, "space"},
{&lcs_tab2, "tab"},
{&lcs_trail, "trail"},
#ifdef FEAT_CONCEAL
{&lcs_conceal, "conceal"},
#else
{NULL, "conceal"},
#endif
};
struct charstab *tab;
#if defined(FEAT_WINDOWS) || defined(FEAT_FOLDING)
if (varp == &p_lcs)
#endif
{
tab = lcstab;
entries = sizeof(lcstab) / sizeof(struct charstab);
}
#if defined(FEAT_WINDOWS) || defined(FEAT_FOLDING)
else
{
tab = filltab;
entries = sizeof(filltab) / sizeof(struct charstab);
}
#endif
/* first round: check for valid value, second round: assign values */
for (round = 0; round <= 1; ++round)
{
if (round > 0)
{
/* After checking that the value is valid: set defaults: space for
* 'fillchars', NUL for 'listchars' */
for (i = 0; i < entries; ++i)
if (tab[i].cp != NULL)
*(tab[i].cp) = (varp == &p_lcs ? NUL : ' ');
if (varp == &p_lcs)
lcs_tab1 = NUL;
#if defined(FEAT_WINDOWS) || defined(FEAT_FOLDING)
else
fill_diff = '-';
#endif
}
p = *varp;
while (*p)
{
for (i = 0; i < entries; ++i)
{
len = (int)STRLEN(tab[i].name);
if (STRNCMP(p, tab[i].name, len) == 0
&& p[len] == ':'
&& p[len + 1] != NUL)
{
s = p + len + 1;
#ifdef FEAT_MBYTE
c1 = mb_ptr2char_adv(&s);
if (mb_char2cells(c1) > 1)
continue;
#else
c1 = *s++;
#endif
if (tab[i].cp == &lcs_tab2)
{
if (*s == NUL)
continue;
#ifdef FEAT_MBYTE
c2 = mb_ptr2char_adv(&s);
if (mb_char2cells(c2) > 1)
continue;
#else
c2 = *s++;
#endif
}
if (*s == ',' || *s == NUL)
{
if (round)
{
if (tab[i].cp == &lcs_tab2)
{
lcs_tab1 = c1;
lcs_tab2 = c2;
}
else if (tab[i].cp != NULL)
*(tab[i].cp) = c1;
}
p = s;
break;
}
}
}
if (i == entries)
return e_invarg;
if (*p == ',')
++p;
}
}
return NULL; /* no error */
}
#ifdef FEAT_STL_OPT
/*
* Check validity of options with the 'statusline' format.
* Return error message or NULL.
*/
char_u *
check_stl_option(char_u *s)
{
int itemcnt = 0;
int groupdepth = 0;
static char_u errbuf[80];
while (*s && itemcnt < STL_MAX_ITEM)
{
/* Check for valid keys after % sequences */
while (*s && *s != '%')
s++;
if (!*s)
break;
s++;
if (*s != '%' && *s != ')')
++itemcnt;
if (*s == '%' || *s == STL_TRUNCMARK || *s == STL_MIDDLEMARK)
{
s++;
continue;
}
if (*s == ')')
{
s++;
if (--groupdepth < 0)
break;
continue;
}
if (*s == '-')
s++;
while (VIM_ISDIGIT(*s))
s++;
if (*s == STL_USER_HL)
continue;
if (*s == '.')
{
s++;
while (*s && VIM_ISDIGIT(*s))
s++;
}
if (*s == '(')
{
groupdepth++;
continue;
}
if (vim_strchr(STL_ALL, *s) == NULL)
{
return illegal_char(errbuf, *s);
}
if (*s == '{')
{
s++;
while (*s != '}' && *s)
s++;
if (*s != '}')
return (char_u *)N_("E540: Unclosed expression sequence");
}
}
if (itemcnt >= STL_MAX_ITEM)
return (char_u *)N_("E541: too many items");
if (groupdepth != 0)
return (char_u *)N_("E542: unbalanced groups");
return NULL;
}
#endif
#ifdef FEAT_CLIPBOARD
/*
* Extract the items in the 'clipboard' option and set global values.
*/
static char_u *
check_clipboard_option(void)
{
int new_unnamed = 0;
int new_autoselect_star = FALSE;
int new_autoselect_plus = FALSE;
int new_autoselectml = FALSE;
int new_html = FALSE;
regprog_T *new_exclude_prog = NULL;
char_u *errmsg = NULL;
char_u *p;
for (p = p_cb; *p != NUL; )
{
if (STRNCMP(p, "unnamed", 7) == 0 && (p[7] == ',' || p[7] == NUL))
{
new_unnamed |= CLIP_UNNAMED;
p += 7;
}
else if (STRNCMP(p, "unnamedplus", 11) == 0
&& (p[11] == ',' || p[11] == NUL))
{
new_unnamed |= CLIP_UNNAMED_PLUS;
p += 11;
}
else if (STRNCMP(p, "autoselect", 10) == 0
&& (p[10] == ',' || p[10] == NUL))
{
new_autoselect_star = TRUE;
p += 10;
}
else if (STRNCMP(p, "autoselectplus", 14) == 0
&& (p[14] == ',' || p[14] == NUL))
{
new_autoselect_plus = TRUE;
p += 14;
}
else if (STRNCMP(p, "autoselectml", 12) == 0
&& (p[12] == ',' || p[12] == NUL))
{
new_autoselectml = TRUE;
p += 12;
}
else if (STRNCMP(p, "html", 4) == 0 && (p[4] == ',' || p[4] == NUL))
{
new_html = TRUE;
p += 4;
}
else if (STRNCMP(p, "exclude:", 8) == 0 && new_exclude_prog == NULL)
{
p += 8;
new_exclude_prog = vim_regcomp(p, RE_MAGIC);
if (new_exclude_prog == NULL)
errmsg = e_invarg;
break;
}
else
{
errmsg = e_invarg;
break;
}
if (*p == ',')
++p;
}
if (errmsg == NULL)
{
clip_unnamed = new_unnamed;
clip_autoselect_star = new_autoselect_star;
clip_autoselect_plus = new_autoselect_plus;
clip_autoselectml = new_autoselectml;
clip_html = new_html;
vim_regfree(clip_exclude_prog);
clip_exclude_prog = new_exclude_prog;
#ifdef FEAT_GUI_GTK
if (gui.in_use)
{
gui_gtk_set_selection_targets();
gui_gtk_set_dnd_targets();
}
#endif
}
else
vim_regfree(new_exclude_prog);
return errmsg;
}
#endif
#ifdef FEAT_SPELL
static char_u *
did_set_spell_option(int is_spellfile)
{
char_u *errmsg = NULL;
win_T *wp;
int l;
if (is_spellfile)
{
l = (int)STRLEN(curwin->w_s->b_p_spf);
if (l > 0 && (l < 4
|| STRCMP(curwin->w_s->b_p_spf + l - 4, ".add") != 0))
errmsg = e_invarg;
}
if (errmsg == NULL)
{
FOR_ALL_WINDOWS(wp)
if (wp->w_buffer == curbuf && wp->w_p_spell)
{
errmsg = did_set_spelllang(wp);
# ifdef FEAT_WINDOWS
break;
# endif
}
}
return errmsg;
}
/*
* Set curbuf->b_cap_prog to the regexp program for 'spellcapcheck'.
* Return error message when failed, NULL when OK.
*/
static char_u *
compile_cap_prog(synblock_T *synblock)
{
regprog_T *rp = synblock->b_cap_prog;
char_u *re;
if (*synblock->b_p_spc == NUL)
synblock->b_cap_prog = NULL;
else
{
/* Prepend a ^ so that we only match at one column */
re = concat_str((char_u *)"^", synblock->b_p_spc);
if (re != NULL)
{
synblock->b_cap_prog = vim_regcomp(re, RE_MAGIC);
vim_free(re);
if (synblock->b_cap_prog == NULL)
{
synblock->b_cap_prog = rp; /* restore the previous program */
return e_invarg;
}
}
}
vim_regfree(rp);
return NULL;
}
#endif
#if defined(FEAT_EVAL) || defined(PROTO)
/*
* Set the scriptID for an option, taking care of setting the buffer- or
* window-local value.
*/
static void
set_option_scriptID_idx(int opt_idx, int opt_flags, int id)
{
int both = (opt_flags & (OPT_LOCAL | OPT_GLOBAL)) == 0;
int indir = (int)options[opt_idx].indir;
/* Remember where the option was set. For local options need to do that
* in the buffer or window structure. */
if (both || (opt_flags & OPT_GLOBAL) || (indir & (PV_BUF|PV_WIN)) == 0)
options[opt_idx].scriptID = id;
if (both || (opt_flags & OPT_LOCAL))
{
if (indir & PV_BUF)
curbuf->b_p_scriptID[indir & PV_MASK] = id;
else if (indir & PV_WIN)
curwin->w_p_scriptID[indir & PV_MASK] = id;
}
}
#endif
/*
* Set the value of a boolean option, and take care of side effects.
* Returns NULL for success, or an error message for an error.
*/
static char_u *
set_bool_option(
int opt_idx, /* index in options[] table */
char_u *varp, /* pointer to the option variable */
int value, /* new value */
int opt_flags) /* OPT_LOCAL and/or OPT_GLOBAL */
{
int old_value = *(int *)varp;
/* Disallow changing some options from secure mode */
if ((secure
#ifdef HAVE_SANDBOX
|| sandbox != 0
#endif
) && (options[opt_idx].flags & P_SECURE))
return e_secure;
*(int *)varp = value; /* set the new value */
#ifdef FEAT_EVAL
/* Remember where the option was set. */
set_option_scriptID_idx(opt_idx, opt_flags, current_SID);
#endif
#ifdef FEAT_GUI
need_mouse_correct = TRUE;
#endif
/* May set global value for local option. */
if ((opt_flags & (OPT_LOCAL | OPT_GLOBAL)) == 0)
*(int *)get_varp_scope(&(options[opt_idx]), OPT_GLOBAL) = value;
/*
* Handle side effects of changing a bool option.
*/
/* 'compatible' */
if ((int *)varp == &p_cp)
{
compatible_set();
}
#ifdef FEAT_LANGMAP
if ((int *)varp == &p_lrm)
/* 'langremap' -> !'langnoremap' */
p_lnr = !p_lrm;
else if ((int *)varp == &p_lnr)
/* 'langnoremap' -> !'langremap' */
p_lrm = !p_lnr;
#endif
#ifdef FEAT_PERSISTENT_UNDO
/* 'undofile' */
else if ((int *)varp == &curbuf->b_p_udf || (int *)varp == &p_udf)
{
/* Only take action when the option was set. When reset we do not
* delete the undo file, the option may be set again without making
* any changes in between. */
if (curbuf->b_p_udf || p_udf)
{
char_u hash[UNDO_HASH_SIZE];
buf_T *save_curbuf = curbuf;
FOR_ALL_BUFFERS(curbuf)
{
/* When 'undofile' is set globally: for every buffer, otherwise
* only for the current buffer: Try to read in the undofile,
* if one exists, the buffer wasn't changed and the buffer was
* loaded */
if ((curbuf == save_curbuf
|| (opt_flags & OPT_GLOBAL) || opt_flags == 0)
&& !curbufIsChanged() && curbuf->b_ml.ml_mfp != NULL)
{
u_compute_hash(hash);
u_read_undo(NULL, hash, curbuf->b_fname);
}
}
curbuf = save_curbuf;
}
}
#endif
else if ((int *)varp == &curbuf->b_p_ro)
{
/* when 'readonly' is reset globally, also reset readonlymode */
if (!curbuf->b_p_ro && (opt_flags & OPT_LOCAL) == 0)
readonlymode = FALSE;
/* when 'readonly' is set may give W10 again */
if (curbuf->b_p_ro)
curbuf->b_did_warn = FALSE;
#ifdef FEAT_TITLE
redraw_titles();
#endif
}
#ifdef FEAT_GUI
else if ((int *)varp == &p_mh)
{
if (!p_mh)
gui_mch_mousehide(FALSE);
}
#endif
#ifdef FEAT_TITLE
/* when 'modifiable' is changed, redraw the window title */
else if ((int *)varp == &curbuf->b_p_ma)
{
redraw_titles();
}
/* when 'endofline' is changed, redraw the window title */
else if ((int *)varp == &curbuf->b_p_eol)
{
redraw_titles();
}
/* when 'fixeol' is changed, redraw the window title */
else if ((int *)varp == &curbuf->b_p_fixeol)
{
redraw_titles();
}
# ifdef FEAT_MBYTE
/* when 'bomb' is changed, redraw the window title and tab page text */
else if ((int *)varp == &curbuf->b_p_bomb)
{
redraw_titles();
}
# endif
#endif
/* when 'bin' is set also set some other options */
else if ((int *)varp == &curbuf->b_p_bin)
{
set_options_bin(old_value, curbuf->b_p_bin, opt_flags);
#ifdef FEAT_TITLE
redraw_titles();
#endif
}
#ifdef FEAT_AUTOCMD
/* when 'buflisted' changes, trigger autocommands */
else if ((int *)varp == &curbuf->b_p_bl && old_value != curbuf->b_p_bl)
{
apply_autocmds(curbuf->b_p_bl ? EVENT_BUFADD : EVENT_BUFDELETE,
NULL, NULL, TRUE, curbuf);
}
#endif
/* when 'swf' is set, create swapfile, when reset remove swapfile */
else if ((int *)varp == &curbuf->b_p_swf)
{
if (curbuf->b_p_swf && p_uc)
ml_open_file(curbuf); /* create the swap file */
else
/* no need to reset curbuf->b_may_swap, ml_open_file() will check
* buf->b_p_swf */
mf_close_file(curbuf, TRUE); /* remove the swap file */
}
/* when 'terse' is set change 'shortmess' */
else if ((int *)varp == &p_terse)
{
char_u *p;
p = vim_strchr(p_shm, SHM_SEARCH);
/* insert 's' in p_shm */
if (p_terse && p == NULL)
{
STRCPY(IObuff, p_shm);
STRCAT(IObuff, "s");
set_string_option_direct((char_u *)"shm", -1, IObuff, OPT_FREE, 0);
}
/* remove 's' from p_shm */
else if (!p_terse && p != NULL)
STRMOVE(p, p + 1);
}
/* when 'paste' is set or reset also change other options */
else if ((int *)varp == &p_paste)
{
paste_option_changed();
}
/* when 'insertmode' is set from an autocommand need to do work here */
else if ((int *)varp == &p_im)
{
if (p_im)
{
if ((State & INSERT) == 0)
need_start_insertmode = TRUE;
stop_insert_mode = FALSE;
}
/* only reset if it was set previously */
else if (old_value)
{
need_start_insertmode = FALSE;
stop_insert_mode = TRUE;
if (restart_edit != 0 && mode_displayed)
clear_cmdline = TRUE; /* remove "(insert)" */
restart_edit = 0;
}
}
/* when 'ignorecase' is set or reset and 'hlsearch' is set, redraw */
else if ((int *)varp == &p_ic && p_hls)
{
redraw_all_later(SOME_VALID);
}
#ifdef FEAT_SEARCH_EXTRA
/* when 'hlsearch' is set or reset: reset no_hlsearch */
else if ((int *)varp == &p_hls)
{
SET_NO_HLSEARCH(FALSE);
}
#endif
#ifdef FEAT_SCROLLBIND
/* when 'scrollbind' is set: snapshot the current position to avoid a jump
* at the end of normal_cmd() */
else if ((int *)varp == &curwin->w_p_scb)
{
if (curwin->w_p_scb)
{
do_check_scrollbind(FALSE);
curwin->w_scbind_pos = curwin->w_topline;
}
}
#endif
#if defined(FEAT_WINDOWS) && defined(FEAT_QUICKFIX)
/* There can be only one window with 'previewwindow' set. */
else if ((int *)varp == &curwin->w_p_pvw)
{
if (curwin->w_p_pvw)
{
win_T *win;
FOR_ALL_WINDOWS(win)
if (win->w_p_pvw && win != curwin)
{
curwin->w_p_pvw = FALSE;
return (char_u *)N_("E590: A preview window already exists");
}
}
}
#endif
/* when 'textmode' is set or reset also change 'fileformat' */
else if ((int *)varp == &curbuf->b_p_tx)
{
set_fileformat(curbuf->b_p_tx ? EOL_DOS : EOL_UNIX, opt_flags);
}
/* when 'textauto' is set or reset also change 'fileformats' */
else if ((int *)varp == &p_ta)
set_string_option_direct((char_u *)"ffs", -1,
p_ta ? (char_u *)DFLT_FFS_VIM : (char_u *)"",
OPT_FREE | opt_flags, 0);
/*
* When 'lisp' option changes include/exclude '-' in
* keyword characters.
*/
#ifdef FEAT_LISP
else if (varp == (char_u *)&(curbuf->b_p_lisp))
{
(void)buf_init_chartab(curbuf, FALSE); /* ignore errors */
}
#endif
#ifdef FEAT_TITLE
/* when 'title' changed, may need to change the title; same for 'icon' */
else if ((int *)varp == &p_title)
{
did_set_title(FALSE);
}
else if ((int *)varp == &p_icon)
{
did_set_title(TRUE);
}
#endif
else if ((int *)varp == &curbuf->b_changed)
{
if (!value)
save_file_ff(curbuf); /* Buffer is unchanged */
#ifdef FEAT_TITLE
redraw_titles();
#endif
#ifdef FEAT_AUTOCMD
modified_was_set = value;
#endif
}
#ifdef BACKSLASH_IN_FILENAME
else if ((int *)varp == &p_ssl)
{
if (p_ssl)
{
psepc = '/';
psepcN = '\\';
pseps[0] = '/';
}
else
{
psepc = '\\';
psepcN = '/';
pseps[0] = '\\';
}
/* need to adjust the file name arguments and buffer names. */
buflist_slash_adjust();
alist_slash_adjust();
# ifdef FEAT_EVAL
scriptnames_slash_adjust();
# endif
}
#endif
/* If 'wrap' is set, set w_leftcol to zero. */
else if ((int *)varp == &curwin->w_p_wrap)
{
if (curwin->w_p_wrap)
curwin->w_leftcol = 0;
}
#ifdef FEAT_WINDOWS
else if ((int *)varp == &p_ea)
{
if (p_ea && !old_value)
win_equal(curwin, FALSE, 0);
}
#endif
else if ((int *)varp == &p_wiv)
{
/*
* When 'weirdinvert' changed, set/reset 't_xs'.
* Then set 'weirdinvert' according to value of 't_xs'.
*/
if (p_wiv && !old_value)
T_XS = (char_u *)"y";
else if (!p_wiv && old_value)
T_XS = empty_option;
p_wiv = (*T_XS != NUL);
}
#ifdef FEAT_BEVAL
else if ((int *)varp == &p_beval)
{
if (p_beval && !old_value)
gui_mch_enable_beval_area(balloonEval);
else if (!p_beval && old_value)
gui_mch_disable_beval_area(balloonEval);
}
#endif
#ifdef FEAT_AUTOCHDIR
else if ((int *)varp == &p_acd)
{
/* Change directories when the 'acd' option is set now. */
DO_AUTOCHDIR
}
#endif
#ifdef FEAT_DIFF
/* 'diff' */
else if ((int *)varp == &curwin->w_p_diff)
{
/* May add or remove the buffer from the list of diff buffers. */
diff_buf_adjust(curwin);
# ifdef FEAT_FOLDING
if (foldmethodIsDiff(curwin))
foldUpdateAll(curwin);
# endif
}
#endif
#ifdef USE_IM_CONTROL
/* 'imdisable' */
else if ((int *)varp == &p_imdisable)
{
/* Only de-activate it here, it will be enabled when changing mode. */
if (p_imdisable)
im_set_active(FALSE);
else if (State & INSERT)
/* When the option is set from an autocommand, it may need to take
* effect right away. */
im_set_active(curbuf->b_p_iminsert == B_IMODE_IM);
}
#endif
#ifdef FEAT_SPELL
/* 'spell' */
else if ((int *)varp == &curwin->w_p_spell)
{
if (curwin->w_p_spell)
{
char_u *errmsg = did_set_spelllang(curwin);
if (errmsg != NULL)
EMSG(_(errmsg));
}
}
#endif
#ifdef FEAT_FKMAP
else if ((int *)varp == &p_altkeymap)
{
if (old_value != p_altkeymap)
{
if (!p_altkeymap)
{
p_hkmap = p_fkmap;
p_fkmap = 0;
}
else
{
p_fkmap = p_hkmap;
p_hkmap = 0;
}
(void)init_chartab();
}
}
/*
* In case some second language keymapping options have changed, check
* and correct the setting in a consistent way.
*/
/*
* If hkmap or fkmap are set, reset Arabic keymapping.
*/
if ((p_hkmap || p_fkmap) && p_altkeymap)
{
p_altkeymap = p_fkmap;
# ifdef FEAT_ARABIC
curwin->w_p_arab = FALSE;
# endif
(void)init_chartab();
}
/*
* If hkmap set, reset Farsi keymapping.
*/
if (p_hkmap && p_altkeymap)
{
p_altkeymap = 0;
p_fkmap = 0;
# ifdef FEAT_ARABIC
curwin->w_p_arab = FALSE;
# endif
(void)init_chartab();
}
/*
* If fkmap set, reset Hebrew keymapping.
*/
if (p_fkmap && !p_altkeymap)
{
p_altkeymap = 1;
p_hkmap = 0;
# ifdef FEAT_ARABIC
curwin->w_p_arab = FALSE;
# endif
(void)init_chartab();
}
#endif
#ifdef FEAT_ARABIC
if ((int *)varp == &curwin->w_p_arab)
{
if (curwin->w_p_arab)
{
/*
* 'arabic' is set, handle various sub-settings.
*/
if (!p_tbidi)
{
/* set rightleft mode */
if (!curwin->w_p_rl)
{
curwin->w_p_rl = TRUE;
changed_window_setting();
}
/* Enable Arabic shaping (major part of what Arabic requires) */
if (!p_arshape)
{
p_arshape = TRUE;
redraw_later_clear();
}
}
/* Arabic requires a utf-8 encoding, inform the user if its not
* set. */
if (STRCMP(p_enc, "utf-8") != 0)
{
static char *w_arabic = N_("W17: Arabic requires UTF-8, do ':set encoding=utf-8'");
msg_source(hl_attr(HLF_W));
MSG_ATTR(_(w_arabic), hl_attr(HLF_W));
#ifdef FEAT_EVAL
set_vim_var_string(VV_WARNINGMSG, (char_u *)_(w_arabic), -1);
#endif
}
# ifdef FEAT_MBYTE
/* set 'delcombine' */
p_deco = TRUE;
# endif
# ifdef FEAT_KEYMAP
/* Force-set the necessary keymap for arabic */
set_option_value((char_u *)"keymap", 0L, (char_u *)"arabic",
OPT_LOCAL);
# endif
# ifdef FEAT_FKMAP
p_altkeymap = 0;
p_hkmap = 0;
p_fkmap = 0;
(void)init_chartab();
# endif
}
else
{
/*
* 'arabic' is reset, handle various sub-settings.
*/
if (!p_tbidi)
{
/* reset rightleft mode */
if (curwin->w_p_rl)
{
curwin->w_p_rl = FALSE;
changed_window_setting();
}
/* 'arabicshape' isn't reset, it is a global option and
* another window may still need it "on". */
}
/* 'delcombine' isn't reset, it is a global option and another
* window may still want it "on". */
# ifdef FEAT_KEYMAP
/* Revert to the default keymap */
curbuf->b_p_iminsert = B_IMODE_NONE;
curbuf->b_p_imsearch = B_IMODE_USE_INSERT;
# endif
}
}
#endif
#ifdef FEAT_TERMGUICOLORS
/* 'termguicolors' */
else if ((int *)varp == &p_tgc)
{
# ifdef FEAT_GUI
if (!gui.in_use && !gui.starting)
# endif
highlight_gui_started();
}
#endif
/*
* End of handling side effects for bool options.
*/
/* after handling side effects, call autocommand */
options[opt_idx].flags |= P_WAS_SET;
#if defined(FEAT_AUTOCMD) && defined(FEAT_EVAL)
if (!starting)
{
char_u buf_old[2], buf_new[2], buf_type[7];
vim_snprintf((char *)buf_old, 2, "%d", old_value ? TRUE: FALSE);
vim_snprintf((char *)buf_new, 2, "%d", value ? TRUE: FALSE);
vim_snprintf((char *)buf_type, 7, "%s", (opt_flags & OPT_LOCAL) ? "local" : "global");
set_vim_var_string(VV_OPTION_NEW, buf_new, -1);
set_vim_var_string(VV_OPTION_OLD, buf_old, -1);
set_vim_var_string(VV_OPTION_TYPE, buf_type, -1);
apply_autocmds(EVENT_OPTIONSET, (char_u *) options[opt_idx].fullname, NULL, FALSE, NULL);
reset_v_option_vars();
}
#endif
comp_col(); /* in case 'ruler' or 'showcmd' changed */
if (curwin->w_curswant != MAXCOL
&& (options[opt_idx].flags & (P_CURSWANT | P_RALL)) != 0)
curwin->w_set_curswant = TRUE;
check_redraw(options[opt_idx].flags);
return NULL;
}
/*
* Set the value of a number option, and take care of side effects.
* Returns NULL for success, or an error message for an error.
*/
static char_u *
set_num_option(
int opt_idx, /* index in options[] table */
char_u *varp, /* pointer to the option variable */
long value, /* new value */
char_u *errbuf, /* buffer for error messages */
size_t errbuflen, /* length of "errbuf" */
int opt_flags) /* OPT_LOCAL, OPT_GLOBAL and
OPT_MODELINE */
{
char_u *errmsg = NULL;
long old_value = *(long *)varp;
long old_Rows = Rows; /* remember old Rows */
long old_Columns = Columns; /* remember old Columns */
long *pp = (long *)varp;
/* Disallow changing some options from secure mode. */
if ((secure
#ifdef HAVE_SANDBOX
|| sandbox != 0
#endif
) && (options[opt_idx].flags & P_SECURE))
return e_secure;
*pp = value;
#ifdef FEAT_EVAL
/* Remember where the option was set. */
set_option_scriptID_idx(opt_idx, opt_flags, current_SID);
#endif
#ifdef FEAT_GUI
need_mouse_correct = TRUE;
#endif
if (curbuf->b_p_sw < 0)
{
errmsg = e_positive;
curbuf->b_p_sw = curbuf->b_p_ts;
}
/*
* Number options that need some action when changed
*/
#ifdef FEAT_WINDOWS
if (pp == &p_wh || pp == &p_hh)
{
if (p_wh < 1)
{
errmsg = e_positive;
p_wh = 1;
}
if (p_wmh > p_wh)
{
errmsg = e_winheight;
p_wh = p_wmh;
}
if (p_hh < 0)
{
errmsg = e_positive;
p_hh = 0;
}
/* Change window height NOW */
if (lastwin != firstwin)
{
if (pp == &p_wh && curwin->w_height < p_wh)
win_setheight((int)p_wh);
if (pp == &p_hh && curbuf->b_help && curwin->w_height < p_hh)
win_setheight((int)p_hh);
}
}
/* 'winminheight' */
else if (pp == &p_wmh)
{
if (p_wmh < 0)
{
errmsg = e_positive;
p_wmh = 0;
}
if (p_wmh > p_wh)
{
errmsg = e_winheight;
p_wmh = p_wh;
}
win_setminheight();
}
# ifdef FEAT_WINDOWS
else if (pp == &p_wiw)
{
if (p_wiw < 1)
{
errmsg = e_positive;
p_wiw = 1;
}
if (p_wmw > p_wiw)
{
errmsg = e_winwidth;
p_wiw = p_wmw;
}
/* Change window width NOW */
if (lastwin != firstwin && curwin->w_width < p_wiw)
win_setwidth((int)p_wiw);
}
/* 'winminwidth' */
else if (pp == &p_wmw)
{
if (p_wmw < 0)
{
errmsg = e_positive;
p_wmw = 0;
}
if (p_wmw > p_wiw)
{
errmsg = e_winwidth;
p_wmw = p_wiw;
}
win_setminheight();
}
# endif
#endif
#ifdef FEAT_WINDOWS
/* (re)set last window status line */
else if (pp == &p_ls)
{
last_status(FALSE);
}
/* (re)set tab page line */
else if (pp == &p_stal)
{
shell_new_rows(); /* recompute window positions and heights */
}
#endif
#ifdef FEAT_GUI
else if (pp == &p_linespace)
{
/* Recompute gui.char_height and resize the Vim window to keep the
* same number of lines. */
if (gui.in_use && gui_mch_adjust_charheight() == OK)
gui_set_shellsize(FALSE, FALSE, RESIZE_VERT);
}
#endif
#ifdef FEAT_FOLDING
/* 'foldlevel' */
else if (pp == &curwin->w_p_fdl)
{
if (curwin->w_p_fdl < 0)
curwin->w_p_fdl = 0;
newFoldLevel();
}
/* 'foldminlines' */
else if (pp == &curwin->w_p_fml)
{
foldUpdateAll(curwin);
}
/* 'foldnestmax' */
else if (pp == &curwin->w_p_fdn)
{
if (foldmethodIsSyntax(curwin) || foldmethodIsIndent(curwin))
foldUpdateAll(curwin);
}
/* 'foldcolumn' */
else if (pp == &curwin->w_p_fdc)
{
if (curwin->w_p_fdc < 0)
{
errmsg = e_positive;
curwin->w_p_fdc = 0;
}
else if (curwin->w_p_fdc > 12)
{
errmsg = e_invarg;
curwin->w_p_fdc = 12;
}
}
#endif /* FEAT_FOLDING */
#if defined(FEAT_FOLDING) || defined(FEAT_CINDENT)
/* 'shiftwidth' or 'tabstop' */
else if (pp == &curbuf->b_p_sw || pp == &curbuf->b_p_ts)
{
# ifdef FEAT_FOLDING
if (foldmethodIsIndent(curwin))
foldUpdateAll(curwin);
# endif
# ifdef FEAT_CINDENT
/* When 'shiftwidth' changes, or it's zero and 'tabstop' changes:
* parse 'cinoptions'. */
if (pp == &curbuf->b_p_sw || curbuf->b_p_sw == 0)
parse_cino(curbuf);
# endif
}
#endif
#ifdef FEAT_MBYTE
/* 'maxcombine' */
else if (pp == &p_mco)
{
if (p_mco > MAX_MCO)
p_mco = MAX_MCO;
else if (p_mco < 0)
p_mco = 0;
screenclear(); /* will re-allocate the screen */
}
#endif
else if (pp == &curbuf->b_p_iminsert)
{
if (curbuf->b_p_iminsert < 0 || curbuf->b_p_iminsert > B_IMODE_LAST)
{
errmsg = e_invarg;
curbuf->b_p_iminsert = B_IMODE_NONE;
}
p_iminsert = curbuf->b_p_iminsert;
if (termcap_active) /* don't do this in the alternate screen */
showmode();
#if defined(FEAT_WINDOWS) && defined(FEAT_KEYMAP)
/* Show/unshow value of 'keymap' in status lines. */
status_redraw_curbuf();
#endif
}
else if (pp == &p_window)
{
if (p_window < 1)
p_window = 1;
else if (p_window >= Rows)
p_window = Rows - 1;
}
else if (pp == &curbuf->b_p_imsearch)
{
if (curbuf->b_p_imsearch < -1 || curbuf->b_p_imsearch > B_IMODE_LAST)
{
errmsg = e_invarg;
curbuf->b_p_imsearch = B_IMODE_NONE;
}
p_imsearch = curbuf->b_p_imsearch;
}
#ifdef FEAT_TITLE
/* if 'titlelen' has changed, redraw the title */
else if (pp == &p_titlelen)
{
if (p_titlelen < 0)
{
errmsg = e_positive;
p_titlelen = 85;
}
if (starting != NO_SCREEN && old_value != p_titlelen)
need_maketitle = TRUE;
}
#endif
/* if p_ch changed value, change the command line height */
else if (pp == &p_ch)
{
if (p_ch < 1)
{
errmsg = e_positive;
p_ch = 1;
}
if (p_ch > Rows - min_rows() + 1)
p_ch = Rows - min_rows() + 1;
/* Only compute the new window layout when startup has been
* completed. Otherwise the frame sizes may be wrong. */
if (p_ch != old_value && full_screen
#ifdef FEAT_GUI
&& !gui.starting
#endif
)
command_height();
}
/* when 'updatecount' changes from zero to non-zero, open swap files */
else if (pp == &p_uc)
{
if (p_uc < 0)
{
errmsg = e_positive;
p_uc = 100;
}
if (p_uc && !old_value)
ml_open_files();
}
#ifdef FEAT_CONCEAL
else if (pp == &curwin->w_p_cole)
{
if (curwin->w_p_cole < 0)
{
errmsg = e_positive;
curwin->w_p_cole = 0;
}
else if (curwin->w_p_cole > 3)
{
errmsg = e_invarg;
curwin->w_p_cole = 3;
}
}
#endif
#ifdef MZSCHEME_GUI_THREADS
else if (pp == &p_mzq)
mzvim_reset_timer();
#endif
/* sync undo before 'undolevels' changes */
else if (pp == &p_ul)
{
/* use the old value, otherwise u_sync() may not work properly */
p_ul = old_value;
u_sync(TRUE);
p_ul = value;
}
else if (pp == &curbuf->b_p_ul)
{
/* use the old value, otherwise u_sync() may not work properly */
curbuf->b_p_ul = old_value;
u_sync(TRUE);
curbuf->b_p_ul = value;
}
#ifdef FEAT_LINEBREAK
/* 'numberwidth' must be positive */
else if (pp == &curwin->w_p_nuw)
{
if (curwin->w_p_nuw < 1)
{
errmsg = e_positive;
curwin->w_p_nuw = 1;
}
if (curwin->w_p_nuw > 10)
{
errmsg = e_invarg;
curwin->w_p_nuw = 10;
}
curwin->w_nrwidth_line_count = 0; /* trigger a redraw */
}
#endif
else if (pp == &curbuf->b_p_tw)
{
if (curbuf->b_p_tw < 0)
{
errmsg = e_positive;
curbuf->b_p_tw = 0;
}
#ifdef FEAT_SYN_HL
# ifdef FEAT_WINDOWS
{
win_T *wp;
tabpage_T *tp;
FOR_ALL_TAB_WINDOWS(tp, wp)
check_colorcolumn(wp);
}
# else
check_colorcolumn(curwin);
# endif
#endif
}
/*
* Check the bounds for numeric options here
*/
if (Rows < min_rows() && full_screen)
{
if (errbuf != NULL)
{
vim_snprintf((char *)errbuf, errbuflen,
_("E593: Need at least %d lines"), min_rows());
errmsg = errbuf;
}
Rows = min_rows();
}
if (Columns < MIN_COLUMNS && full_screen)
{
if (errbuf != NULL)
{
vim_snprintf((char *)errbuf, errbuflen,
_("E594: Need at least %d columns"), MIN_COLUMNS);
errmsg = errbuf;
}
Columns = MIN_COLUMNS;
}
limit_screen_size();
/*
* If the screen (shell) height has been changed, assume it is the
* physical screenheight.
*/
if (old_Rows != Rows || old_Columns != Columns)
{
/* Changing the screen size is not allowed while updating the screen. */
if (updating_screen)
*pp = old_value;
else if (full_screen
#ifdef FEAT_GUI
&& !gui.starting
#endif
)
set_shellsize((int)Columns, (int)Rows, TRUE);
else
{
/* Postpone the resizing; check the size and cmdline position for
* messages. */
check_shellsize();
if (cmdline_row > Rows - p_ch && Rows > p_ch)
cmdline_row = Rows - p_ch;
}
if (p_window >= Rows || !option_was_set((char_u *)"window"))
p_window = Rows - 1;
}
if (curbuf->b_p_ts <= 0)
{
errmsg = e_positive;
curbuf->b_p_ts = 8;
}
if (p_tm < 0)
{
errmsg = e_positive;
p_tm = 0;
}
if ((curwin->w_p_scr <= 0
|| (curwin->w_p_scr > curwin->w_height
&& curwin->w_height > 0))
&& full_screen)
{
if (pp == &(curwin->w_p_scr))
{
if (curwin->w_p_scr != 0)
errmsg = e_scroll;
win_comp_scroll(curwin);
}
/* If 'scroll' became invalid because of a side effect silently adjust
* it. */
else if (curwin->w_p_scr <= 0)
curwin->w_p_scr = 1;
else /* curwin->w_p_scr > curwin->w_height */
curwin->w_p_scr = curwin->w_height;
}
if (p_hi < 0)
{
errmsg = e_positive;
p_hi = 0;
}
else if (p_hi > 10000)
{
errmsg = e_invarg;
p_hi = 10000;
}
if (p_re < 0 || p_re > 2)
{
errmsg = e_invarg;
p_re = 0;
}
if (p_report < 0)
{
errmsg = e_positive;
p_report = 1;
}
if ((p_sj < -100 || p_sj >= Rows) && full_screen)
{
if (Rows != old_Rows) /* Rows changed, just adjust p_sj */
p_sj = Rows / 2;
else
{
errmsg = e_scroll;
p_sj = 1;
}
}
if (p_so < 0 && full_screen)
{
errmsg = e_scroll;
p_so = 0;
}
if (p_siso < 0 && full_screen)
{
errmsg = e_positive;
p_siso = 0;
}
#ifdef FEAT_CMDWIN
if (p_cwh < 1)
{
errmsg = e_positive;
p_cwh = 1;
}
#endif
if (p_ut < 0)
{
errmsg = e_positive;
p_ut = 2000;
}
if (p_ss < 0)
{
errmsg = e_positive;
p_ss = 0;
}
/* May set global value for local option. */
if ((opt_flags & (OPT_LOCAL | OPT_GLOBAL)) == 0)
*(long *)get_varp_scope(&(options[opt_idx]), OPT_GLOBAL) = *pp;
options[opt_idx].flags |= P_WAS_SET;
#if defined(FEAT_AUTOCMD) && defined(FEAT_EVAL)
if (!starting && errmsg == NULL)
{
char_u buf_old[11], buf_new[11], buf_type[7];
vim_snprintf((char *)buf_old, 10, "%ld", old_value);
vim_snprintf((char *)buf_new, 10, "%ld", value);
vim_snprintf((char *)buf_type, 7, "%s", (opt_flags & OPT_LOCAL) ? "local" : "global");
set_vim_var_string(VV_OPTION_NEW, buf_new, -1);
set_vim_var_string(VV_OPTION_OLD, buf_old, -1);
set_vim_var_string(VV_OPTION_TYPE, buf_type, -1);
apply_autocmds(EVENT_OPTIONSET, (char_u *) options[opt_idx].fullname, NULL, FALSE, NULL);
reset_v_option_vars();
}
#endif
comp_col(); /* in case 'columns' or 'ls' changed */
if (curwin->w_curswant != MAXCOL
&& (options[opt_idx].flags & (P_CURSWANT | P_RALL)) != 0)
curwin->w_set_curswant = TRUE;
check_redraw(options[opt_idx].flags);
return errmsg;
}
/*
* Called after an option changed: check if something needs to be redrawn.
*/
static void
check_redraw(long_u flags)
{
/* Careful: P_RCLR and P_RALL are a combination of other P_ flags */
int doclear = (flags & P_RCLR) == P_RCLR;
int all = ((flags & P_RALL) == P_RALL || doclear);
#ifdef FEAT_WINDOWS
if ((flags & P_RSTAT) || all) /* mark all status lines dirty */
status_redraw_all();
#endif
if ((flags & P_RBUF) || (flags & P_RWIN) || all)
changed_window_setting();
if (flags & P_RBUF)
redraw_curbuf_later(NOT_VALID);
if (doclear)
redraw_all_later(CLEAR);
else if (all)
redraw_all_later(NOT_VALID);
}
/*
* Find index for option 'arg'.
* Return -1 if not found.
*/
static int
findoption(char_u *arg)
{
int opt_idx;
char *s, *p;
static short quick_tab[27] = {0, 0}; /* quick access table */
int is_term_opt;
/*
* For first call: Initialize the quick-access table.
* It contains the index for the first option that starts with a certain
* letter. There are 26 letters, plus the first "t_" option.
*/
if (quick_tab[1] == 0)
{
p = options[0].fullname;
for (opt_idx = 1; (s = options[opt_idx].fullname) != NULL; opt_idx++)
{
if (s[0] != p[0])
{
if (s[0] == 't' && s[1] == '_')
quick_tab[26] = opt_idx;
else
quick_tab[CharOrdLow(s[0])] = opt_idx;
}
p = s;
}
}
/*
* Check for name starting with an illegal character.
*/
#ifdef EBCDIC
if (!islower(arg[0]))
#else
if (arg[0] < 'a' || arg[0] > 'z')
#endif
return -1;
is_term_opt = (arg[0] == 't' && arg[1] == '_');
if (is_term_opt)
opt_idx = quick_tab[26];
else
opt_idx = quick_tab[CharOrdLow(arg[0])];
for ( ; (s = options[opt_idx].fullname) != NULL; opt_idx++)
{
if (STRCMP(arg, s) == 0) /* match full name */
break;
}
if (s == NULL && !is_term_opt)
{
opt_idx = quick_tab[CharOrdLow(arg[0])];
for ( ; options[opt_idx].fullname != NULL; opt_idx++)
{
s = options[opt_idx].shortname;
if (s != NULL && STRCMP(arg, s) == 0) /* match short name */
break;
s = NULL;
}
}
if (s == NULL)
opt_idx = -1;
return opt_idx;
}
#if defined(FEAT_EVAL) || defined(FEAT_TCL) || defined(FEAT_MZSCHEME)
/*
* Get the value for an option.
*
* Returns:
* Number or Toggle option: 1, *numval gets value.
* String option: 0, *stringval gets allocated string.
* Hidden Number or Toggle option: -1.
* hidden String option: -2.
* unknown option: -3.
*/
int
get_option_value(
char_u *name,
long *numval,
char_u **stringval, /* NULL when only checking existence */
int opt_flags)
{
int opt_idx;
char_u *varp;
opt_idx = findoption(name);
if (opt_idx < 0) /* unknown option */
return -3;
varp = get_varp_scope(&(options[opt_idx]), opt_flags);
if (options[opt_idx].flags & P_STRING)
{
if (varp == NULL) /* hidden option */
return -2;
if (stringval != NULL)
{
#ifdef FEAT_CRYPT
/* never return the value of the crypt key */
if ((char_u **)varp == &curbuf->b_p_key
&& **(char_u **)(varp) != NUL)
*stringval = vim_strsave((char_u *)"*****");
else
#endif
*stringval = vim_strsave(*(char_u **)(varp));
}
return 0;
}
if (varp == NULL) /* hidden option */
return -1;
if (options[opt_idx].flags & P_NUM)
*numval = *(long *)varp;
else
{
/* Special case: 'modified' is b_changed, but we also want to consider
* it set when 'ff' or 'fenc' changed. */
if ((int *)varp == &curbuf->b_changed)
*numval = curbufIsChanged();
else
*numval = (long) *(int *)varp;
}
return 1;
}
#endif
#if defined(FEAT_PYTHON) || defined(FEAT_PYTHON3) || defined(PROTO)
/*
* Returns the option attributes and its value. Unlike the above function it
* will return either global value or local value of the option depending on
* what was requested, but it will never return global value if it was
* requested to return local one and vice versa. Neither it will return
* buffer-local value if it was requested to return window-local one.
*
* Pretends that option is absent if it is not present in the requested scope
* (i.e. has no global, window-local or buffer-local value depending on
* opt_type). Uses
*
* Returned flags:
* 0 hidden or unknown option, also option that does not have requested
* type (see SREQ_* in vim.h)
* see SOPT_* in vim.h for other flags
*
* Possible opt_type values: see SREQ_* in vim.h
*/
int
get_option_value_strict(
char_u *name,
long *numval,
char_u **stringval, /* NULL when only obtaining attributes */
int opt_type,
void *from)
{
int opt_idx;
char_u *varp = NULL;
struct vimoption *p;
int r = 0;
opt_idx = findoption(name);
if (opt_idx < 0)
return 0;
p = &(options[opt_idx]);
/* Hidden option */
if (p->var == NULL)
return 0;
if (p->flags & P_BOOL)
r |= SOPT_BOOL;
else if (p->flags & P_NUM)
r |= SOPT_NUM;
else if (p->flags & P_STRING)
r |= SOPT_STRING;
if (p->indir == PV_NONE)
{
if (opt_type == SREQ_GLOBAL)
r |= SOPT_GLOBAL;
else
return 0; /* Did not request global-only option */
}
else
{
if (p->indir & PV_BOTH)
r |= SOPT_GLOBAL;
else if (opt_type == SREQ_GLOBAL)
return 0; /* Requested global option */
if (p->indir & PV_WIN)
{
if (opt_type == SREQ_BUF)
return 0; /* Did not request window-local option */
else
r |= SOPT_WIN;
}
else if (p->indir & PV_BUF)
{
if (opt_type == SREQ_WIN)
return 0; /* Did not request buffer-local option */
else
r |= SOPT_BUF;
}
}
if (stringval == NULL)
return r;
if (opt_type == SREQ_GLOBAL)
varp = p->var;
else
{
if (opt_type == SREQ_BUF)
{
/* Special case: 'modified' is b_changed, but we also want to
* consider it set when 'ff' or 'fenc' changed. */
if (p->indir == PV_MOD)
{
*numval = bufIsChanged((buf_T *) from);
varp = NULL;
}
#ifdef FEAT_CRYPT
else if (p->indir == PV_KEY)
{
/* never return the value of the crypt key */
*stringval = NULL;
varp = NULL;
}
#endif
else
{
aco_save_T aco;
aucmd_prepbuf(&aco, (buf_T *) from);
varp = get_varp(p);
aucmd_restbuf(&aco);
}
}
else if (opt_type == SREQ_WIN)
{
win_T *save_curwin;
save_curwin = curwin;
curwin = (win_T *) from;
curbuf = curwin->w_buffer;
varp = get_varp(p);
curwin = save_curwin;
curbuf = curwin->w_buffer;
}
if (varp == p->var)
return (r | SOPT_UNSET);
}
if (varp != NULL)
{
if (p->flags & P_STRING)
*stringval = vim_strsave(*(char_u **)(varp));
else if (p->flags & P_NUM)
*numval = *(long *) varp;
else
*numval = *(int *)varp;
}
return r;
}
/*
* Iterate over options. First argument is a pointer to a pointer to a
* structure inside options[] array, second is option type like in the above
* function.
*
* If first argument points to NULL it is assumed that iteration just started
* and caller needs the very first value.
* If first argument points to the end marker function returns NULL and sets
* first argument to NULL.
*
* Returns full option name for current option on each call.
*/
char_u *
option_iter_next(void **option, int opt_type)
{
struct vimoption *ret = NULL;
do
{
if (*option == NULL)
*option = (void *) options;
else if (((struct vimoption *) (*option))->fullname == NULL)
{
*option = NULL;
return NULL;
}
else
*option = (void *) (((struct vimoption *) (*option)) + 1);
ret = ((struct vimoption *) (*option));
/* Hidden option */
if (ret->var == NULL)
{
ret = NULL;
continue;
}
switch (opt_type)
{
case SREQ_GLOBAL:
if (!(ret->indir == PV_NONE || ret->indir & PV_BOTH))
ret = NULL;
break;
case SREQ_BUF:
if (!(ret->indir & PV_BUF))
ret = NULL;
break;
case SREQ_WIN:
if (!(ret->indir & PV_WIN))
ret = NULL;
break;
default:
EMSG2(_(e_intern2), "option_iter_next()");
return NULL;
}
}
while (ret == NULL);
return (char_u *)ret->fullname;
}
#endif
/*
* Set the value of option "name".
* Use "string" for string options, use "number" for other options.
*
* Returns NULL on success or error message on error.
*/
char_u *
set_option_value(
char_u *name,
long number,
char_u *string,
int opt_flags) /* OPT_LOCAL or 0 (both) */
{
int opt_idx;
char_u *varp;
long_u flags;
opt_idx = findoption(name);
if (opt_idx < 0)
EMSG2(_("E355: Unknown option: %s"), name);
else
{
flags = options[opt_idx].flags;
#ifdef HAVE_SANDBOX
/* Disallow changing some options in the sandbox */
if (sandbox > 0 && (flags & P_SECURE))
{
EMSG(_(e_sandbox));
return NULL;
}
#endif
if (flags & P_STRING)
return set_string_option(opt_idx, string, opt_flags);
else
{
varp = get_varp_scope(&(options[opt_idx]), opt_flags);
if (varp != NULL) /* hidden option is not changed */
{
if (number == 0 && string != NULL)
{
int idx;
/* Either we are given a string or we are setting option
* to zero. */
for (idx = 0; string[idx] == '0'; ++idx)
;
if (string[idx] != NUL || idx == 0)
{
/* There's another character after zeros or the string
* is empty. In both cases, we are trying to set a
* num option using a string. */
EMSG3(_("E521: Number required: &%s = '%s'"),
name, string);
return NULL; /* do nothing as we hit an error */
}
}
if (flags & P_NUM)
return set_num_option(opt_idx, varp, number,
NULL, 0, opt_flags);
else
return set_bool_option(opt_idx, varp, (int)number,
opt_flags);
}
}
}
return NULL;
}
/*
* Get the terminal code for a terminal option.
* Returns NULL when not found.
*/
char_u *
get_term_code(char_u *tname)
{
int opt_idx;
char_u *varp;
if (tname[0] != 't' || tname[1] != '_' ||
tname[2] == NUL || tname[3] == NUL)
return NULL;
if ((opt_idx = findoption(tname)) >= 0)
{
varp = get_varp(&(options[opt_idx]));
if (varp != NULL)
varp = *(char_u **)(varp);
return varp;
}
return find_termcode(tname + 2);
}
char_u *
get_highlight_default(void)
{
int i;
i = findoption((char_u *)"hl");
if (i >= 0)
return options[i].def_val[VI_DEFAULT];
return (char_u *)NULL;
}
#if defined(FEAT_MBYTE) || defined(PROTO)
char_u *
get_encoding_default(void)
{
int i;
i = findoption((char_u *)"enc");
if (i >= 0)
return options[i].def_val[VI_DEFAULT];
return (char_u *)NULL;
}
#endif
/*
* Translate a string like "t_xx", "<t_xx>" or "<S-Tab>" to a key number.
*/
static int
find_key_option(char_u *arg)
{
int key;
int modifiers;
/*
* Don't use get_special_key_code() for t_xx, we don't want it to call
* add_termcap_entry().
*/
if (arg[0] == 't' && arg[1] == '_' && arg[2] && arg[3])
key = TERMCAP2KEY(arg[2], arg[3]);
else
{
--arg; /* put arg at the '<' */
modifiers = 0;
key = find_special_key(&arg, &modifiers, TRUE, TRUE, FALSE);
if (modifiers) /* can't handle modifiers here */
key = 0;
}
return key;
}
/*
* if 'all' == 0: show changed options
* if 'all' == 1: show all normal options
* if 'all' == 2: show all terminal options
*/
static void
showoptions(
int all,
int opt_flags) /* OPT_LOCAL and/or OPT_GLOBAL */
{
struct vimoption *p;
int col;
int isterm;
char_u *varp;
struct vimoption **items;
int item_count;
int run;
int row, rows;
int cols;
int i;
int len;
#define INC 20
#define GAP 3
items = (struct vimoption **)alloc((unsigned)(sizeof(struct vimoption *) *
PARAM_COUNT));
if (items == NULL)
return;
/* Highlight title */
if (all == 2)
MSG_PUTS_TITLE(_("\n--- Terminal codes ---"));
else if (opt_flags & OPT_GLOBAL)
MSG_PUTS_TITLE(_("\n--- Global option values ---"));
else if (opt_flags & OPT_LOCAL)
MSG_PUTS_TITLE(_("\n--- Local option values ---"));
else
MSG_PUTS_TITLE(_("\n--- Options ---"));
/*
* do the loop two times:
* 1. display the short items
* 2. display the long items (only strings and numbers)
*/
for (run = 1; run <= 2 && !got_int; ++run)
{
/*
* collect the items in items[]
*/
item_count = 0;
for (p = &options[0]; p->fullname != NULL; p++)
{
varp = NULL;
isterm = istermoption(p);
if (opt_flags != 0)
{
if (p->indir != PV_NONE && !isterm)
varp = get_varp_scope(p, opt_flags);
}
else
varp = get_varp(p);
if (varp != NULL
&& ((all == 2 && isterm)
|| (all == 1 && !isterm)
|| (all == 0 && !optval_default(p, varp))))
{
if (p->flags & P_BOOL)
len = 1; /* a toggle option fits always */
else
{
option_value2string(p, opt_flags);
len = (int)STRLEN(p->fullname) + vim_strsize(NameBuff) + 1;
}
if ((len <= INC - GAP && run == 1) ||
(len > INC - GAP && run == 2))
items[item_count++] = p;
}
}
/*
* display the items
*/
if (run == 1)
{
cols = (Columns + GAP - 3) / INC;
if (cols == 0)
cols = 1;
rows = (item_count + cols - 1) / cols;
}
else /* run == 2 */
rows = item_count;
for (row = 0; row < rows && !got_int; ++row)
{
msg_putchar('\n'); /* go to next line */
if (got_int) /* 'q' typed in more */
break;
col = 0;
for (i = row; i < item_count; i += rows)
{
msg_col = col; /* make columns */
showoneopt(items[i], opt_flags);
col += INC;
}
out_flush();
ui_breakcheck();
}
}
vim_free(items);
}
/*
* Return TRUE if option "p" has its default value.
*/
static int
optval_default(struct vimoption *p, char_u *varp)
{
int dvi;
if (varp == NULL)
return TRUE; /* hidden option is always at default */
dvi = ((p->flags & P_VI_DEF) || p_cp) ? VI_DEFAULT : VIM_DEFAULT;
if (p->flags & P_NUM)
return (*(long *)varp == (long)(long_i)p->def_val[dvi]);
if (p->flags & P_BOOL)
/* the cast to long is required for Manx C, long_i is
* needed for MSVC */
return (*(int *)varp == (int)(long)(long_i)p->def_val[dvi]);
/* P_STRING */
return (STRCMP(*(char_u **)varp, p->def_val[dvi]) == 0);
}
/*
* showoneopt: show the value of one option
* must not be called with a hidden option!
*/
static void
showoneopt(
struct vimoption *p,
int opt_flags) /* OPT_LOCAL or OPT_GLOBAL */
{
char_u *varp;
int save_silent = silent_mode;
silent_mode = FALSE;
info_message = TRUE; /* use mch_msg(), not mch_errmsg() */
varp = get_varp_scope(p, opt_flags);
/* for 'modified' we also need to check if 'ff' or 'fenc' changed. */
if ((p->flags & P_BOOL) && ((int *)varp == &curbuf->b_changed
? !curbufIsChanged() : !*(int *)varp))
MSG_PUTS("no");
else if ((p->flags & P_BOOL) && *(int *)varp < 0)
MSG_PUTS("--");
else
MSG_PUTS(" ");
MSG_PUTS(p->fullname);
if (!(p->flags & P_BOOL))
{
msg_putchar('=');
/* put value string in NameBuff */
option_value2string(p, opt_flags);
msg_outtrans(NameBuff);
}
silent_mode = save_silent;
info_message = FALSE;
}
/*
* Write modified options as ":set" commands to a file.
*
* There are three values for "opt_flags":
* OPT_GLOBAL: Write global option values and fresh values of
* buffer-local options (used for start of a session
* file).
* OPT_GLOBAL + OPT_LOCAL: Idem, add fresh values of window-local options for
* curwin (used for a vimrc file).
* OPT_LOCAL: Write buffer-local option values for curbuf, fresh
* and local values for window-local options of
* curwin. Local values are also written when at the
* default value, because a modeline or autocommand
* may have set them when doing ":edit file" and the
* user has set them back at the default or fresh
* value.
* When "local_only" is TRUE, don't write fresh
* values, only local values (for ":mkview").
* (fresh value = value used for a new buffer or window for a local option).
*
* Return FAIL on error, OK otherwise.
*/
int
makeset(FILE *fd, int opt_flags, int local_only)
{
struct vimoption *p;
char_u *varp; /* currently used value */
char_u *varp_fresh; /* local value */
char_u *varp_local = NULL; /* fresh value */
char *cmd;
int round;
int pri;
/*
* The options that don't have a default (terminal name, columns, lines)
* are never written. Terminal options are also not written.
* Do the loop over "options[]" twice: once for options with the
* P_PRI_MKRC flag and once without.
*/
for (pri = 1; pri >= 0; --pri)
{
for (p = &options[0]; !istermoption(p); p++)
if (!(p->flags & P_NO_MKRC)
&& !istermoption(p)
&& ((pri == 1) == ((p->flags & P_PRI_MKRC) != 0)))
{
/* skip global option when only doing locals */
if (p->indir == PV_NONE && !(opt_flags & OPT_GLOBAL))
continue;
/* Do not store options like 'bufhidden' and 'syntax' in a vimrc
* file, they are always buffer-specific. */
if ((opt_flags & OPT_GLOBAL) && (p->flags & P_NOGLOB))
continue;
/* Global values are only written when not at the default value. */
varp = get_varp_scope(p, opt_flags);
if ((opt_flags & OPT_GLOBAL) && optval_default(p, varp))
continue;
round = 2;
if (p->indir != PV_NONE)
{
if (p->var == VAR_WIN)
{
/* skip window-local option when only doing globals */
if (!(opt_flags & OPT_LOCAL))
continue;
/* When fresh value of window-local option is not at the
* default, need to write it too. */
if (!(opt_flags & OPT_GLOBAL) && !local_only)
{
varp_fresh = get_varp_scope(p, OPT_GLOBAL);
if (!optval_default(p, varp_fresh))
{
round = 1;
varp_local = varp;
varp = varp_fresh;
}
}
}
}
/* Round 1: fresh value for window-local options.
* Round 2: other values */
for ( ; round <= 2; varp = varp_local, ++round)
{
if (round == 1 || (opt_flags & OPT_GLOBAL))
cmd = "set";
else
cmd = "setlocal";
if (p->flags & P_BOOL)
{
if (put_setbool(fd, cmd, p->fullname, *(int *)varp) == FAIL)
return FAIL;
}
else if (p->flags & P_NUM)
{
if (put_setnum(fd, cmd, p->fullname, (long *)varp) == FAIL)
return FAIL;
}
else /* P_STRING */
{
#if defined(FEAT_SYN_HL) || defined(FEAT_AUTOCMD)
int do_endif = FALSE;
/* Don't set 'syntax' and 'filetype' again if the value is
* already right, avoids reloading the syntax file. */
if (
# if defined(FEAT_SYN_HL)
p->indir == PV_SYN
# if defined(FEAT_AUTOCMD)
||
# endif
# endif
# if defined(FEAT_AUTOCMD)
p->indir == PV_FT
# endif
)
{
if (fprintf(fd, "if &%s != '%s'", p->fullname,
*(char_u **)(varp)) < 0
|| put_eol(fd) < 0)
return FAIL;
do_endif = TRUE;
}
#endif
if (put_setstring(fd, cmd, p->fullname, (char_u **)varp,
(p->flags & P_EXPAND) != 0) == FAIL)
return FAIL;
#if defined(FEAT_SYN_HL) || defined(FEAT_AUTOCMD)
if (do_endif)
{
if (put_line(fd, "endif") == FAIL)
return FAIL;
}
#endif
}
}
}
}
return OK;
}
#if defined(FEAT_FOLDING) || defined(PROTO)
/*
* Generate set commands for the local fold options only. Used when
* 'sessionoptions' or 'viewoptions' contains "folds" but not "options".
*/
int
makefoldset(FILE *fd)
{
if (put_setstring(fd, "setlocal", "fdm", &curwin->w_p_fdm, FALSE) == FAIL
# ifdef FEAT_EVAL
|| put_setstring(fd, "setlocal", "fde", &curwin->w_p_fde, FALSE)
== FAIL
# endif
|| put_setstring(fd, "setlocal", "fmr", &curwin->w_p_fmr, FALSE)
== FAIL
|| put_setstring(fd, "setlocal", "fdi", &curwin->w_p_fdi, FALSE)
== FAIL
|| put_setnum(fd, "setlocal", "fdl", &curwin->w_p_fdl) == FAIL
|| put_setnum(fd, "setlocal", "fml", &curwin->w_p_fml) == FAIL
|| put_setnum(fd, "setlocal", "fdn", &curwin->w_p_fdn) == FAIL
|| put_setbool(fd, "setlocal", "fen", curwin->w_p_fen) == FAIL
)
return FAIL;
return OK;
}
#endif
static int
put_setstring(
FILE *fd,
char *cmd,
char *name,
char_u **valuep,
int expand)
{
char_u *s;
char_u *buf;
if (fprintf(fd, "%s %s=", cmd, name) < 0)
return FAIL;
if (*valuep != NULL)
{
/* Output 'pastetoggle' as key names. For other
* options some characters have to be escaped with
* CTRL-V or backslash */
if (valuep == &p_pt)
{
s = *valuep;
while (*s != NUL)
if (put_escstr(fd, str2special(&s, FALSE), 2) == FAIL)
return FAIL;
}
else if (expand)
{
buf = alloc(MAXPATHL);
if (buf == NULL)
return FAIL;
home_replace(NULL, *valuep, buf, MAXPATHL, FALSE);
if (put_escstr(fd, buf, 2) == FAIL)
{
vim_free(buf);
return FAIL;
}
vim_free(buf);
}
else if (put_escstr(fd, *valuep, 2) == FAIL)
return FAIL;
}
if (put_eol(fd) < 0)
return FAIL;
return OK;
}
static int
put_setnum(
FILE *fd,
char *cmd,
char *name,
long *valuep)
{
long wc;
if (fprintf(fd, "%s %s=", cmd, name) < 0)
return FAIL;
if (wc_use_keyname((char_u *)valuep, &wc))
{
/* print 'wildchar' and 'wildcharm' as a key name */
if (fputs((char *)get_special_key_name((int)wc, 0), fd) < 0)
return FAIL;
}
else if (fprintf(fd, "%ld", *valuep) < 0)
return FAIL;
if (put_eol(fd) < 0)
return FAIL;
return OK;
}
static int
put_setbool(
FILE *fd,
char *cmd,
char *name,
int value)
{
if (value < 0) /* global/local option using global value */
return OK;
if (fprintf(fd, "%s %s%s", cmd, value ? "" : "no", name) < 0
|| put_eol(fd) < 0)
return FAIL;
return OK;
}
/*
* Clear all the terminal options.
* If the option has been allocated, free the memory.
* Terminal options are never hidden or indirect.
*/
void
clear_termoptions(void)
{
/*
* Reset a few things before clearing the old options. This may cause
* outputting a few things that the terminal doesn't understand, but the
* screen will be cleared later, so this is OK.
*/
#ifdef FEAT_MOUSE_TTY
mch_setmouse(FALSE); /* switch mouse off */
#endif
#ifdef FEAT_TITLE
mch_restore_title(3); /* restore window titles */
#endif
#if defined(FEAT_XCLIPBOARD) && defined(FEAT_GUI)
/* When starting the GUI close the display opened for the clipboard.
* After restoring the title, because that will need the display. */
if (gui.starting)
clear_xterm_clip();
#endif
stoptermcap(); /* stop termcap mode */
free_termoptions();
}
void
free_termoptions(void)
{
struct vimoption *p;
for (p = &options[0]; p->fullname != NULL; p++)
if (istermoption(p))
{
if (p->flags & P_ALLOCED)
free_string_option(*(char_u **)(p->var));
if (p->flags & P_DEF_ALLOCED)
free_string_option(p->def_val[VI_DEFAULT]);
*(char_u **)(p->var) = empty_option;
p->def_val[VI_DEFAULT] = empty_option;
p->flags &= ~(P_ALLOCED|P_DEF_ALLOCED);
}
clear_termcodes();
}
/*
* Free the string for one term option, if it was allocated.
* Set the string to empty_option and clear allocated flag.
* "var" points to the option value.
*/
void
free_one_termoption(char_u *var)
{
struct vimoption *p;
for (p = &options[0]; p->fullname != NULL; p++)
if (p->var == var)
{
if (p->flags & P_ALLOCED)
free_string_option(*(char_u **)(p->var));
*(char_u **)(p->var) = empty_option;
p->flags &= ~P_ALLOCED;
break;
}
}
/*
* Set the terminal option defaults to the current value.
* Used after setting the terminal name.
*/
void
set_term_defaults(void)
{
struct vimoption *p;
for (p = &options[0]; p->fullname != NULL; p++)
{
if (istermoption(p) && p->def_val[VI_DEFAULT] != *(char_u **)(p->var))
{
if (p->flags & P_DEF_ALLOCED)
{
free_string_option(p->def_val[VI_DEFAULT]);
p->flags &= ~P_DEF_ALLOCED;
}
p->def_val[VI_DEFAULT] = *(char_u **)(p->var);
if (p->flags & P_ALLOCED)
{
p->flags |= P_DEF_ALLOCED;
p->flags &= ~P_ALLOCED; /* don't free the value now */
}
}
}
}
/*
* return TRUE if 'p' starts with 't_'
*/
static int
istermoption(struct vimoption *p)
{
return (p->fullname[0] == 't' && p->fullname[1] == '_');
}
/*
* Compute columns for ruler and shown command. 'sc_col' is also used to
* decide what the maximum length of a message on the status line can be.
* If there is a status line for the last window, 'sc_col' is independent
* of 'ru_col'.
*/
#define COL_RULER 17 /* columns needed by standard ruler */
void
comp_col(void)
{
#if defined(FEAT_CMDL_INFO) && defined(FEAT_WINDOWS)
int last_has_status = (p_ls == 2 || (p_ls == 1 && firstwin != lastwin));
sc_col = 0;
ru_col = 0;
if (p_ru)
{
#ifdef FEAT_STL_OPT
ru_col = (ru_wid ? ru_wid : COL_RULER) + 1;
#else
ru_col = COL_RULER + 1;
#endif
/* no last status line, adjust sc_col */
if (!last_has_status)
sc_col = ru_col;
}
if (p_sc)
{
sc_col += SHOWCMD_COLS;
if (!p_ru || last_has_status) /* no need for separating space */
++sc_col;
}
sc_col = Columns - sc_col;
ru_col = Columns - ru_col;
if (sc_col <= 0) /* screen too narrow, will become a mess */
sc_col = 1;
if (ru_col <= 0)
ru_col = 1;
#else
sc_col = Columns;
ru_col = Columns;
#endif
}
/*
* Unset local option value, similar to ":set opt<".
*/
void
unset_global_local_option(char_u *name, void *from)
{
struct vimoption *p;
int opt_idx;
buf_T *buf = (buf_T *)from;
opt_idx = findoption(name);
if (opt_idx < 0)
return;
p = &(options[opt_idx]);
switch ((int)p->indir)
{
/* global option with local value: use local value if it's been set */
case PV_EP:
clear_string_option(&buf->b_p_ep);
break;
case PV_KP:
clear_string_option(&buf->b_p_kp);
break;
case PV_PATH:
clear_string_option(&buf->b_p_path);
break;
case PV_AR:
buf->b_p_ar = -1;
break;
case PV_BKC:
clear_string_option(&buf->b_p_bkc);
buf->b_bkc_flags = 0;
break;
case PV_TAGS:
clear_string_option(&buf->b_p_tags);
break;
case PV_TC:
clear_string_option(&buf->b_p_tc);
buf->b_tc_flags = 0;
break;
#ifdef FEAT_FIND_ID
case PV_DEF:
clear_string_option(&buf->b_p_def);
break;
case PV_INC:
clear_string_option(&buf->b_p_inc);
break;
#endif
#ifdef FEAT_INS_EXPAND
case PV_DICT:
clear_string_option(&buf->b_p_dict);
break;
case PV_TSR:
clear_string_option(&buf->b_p_tsr);
break;
#endif
#ifdef FEAT_QUICKFIX
case PV_EFM:
clear_string_option(&buf->b_p_efm);
break;
case PV_GP:
clear_string_option(&buf->b_p_gp);
break;
case PV_MP:
clear_string_option(&buf->b_p_mp);
break;
#endif
#if defined(FEAT_BEVAL) && defined(FEAT_EVAL)
case PV_BEXPR:
clear_string_option(&buf->b_p_bexpr);
break;
#endif
#if defined(FEAT_CRYPT)
case PV_CM:
clear_string_option(&buf->b_p_cm);
break;
#endif
#ifdef FEAT_STL_OPT
case PV_STL:
clear_string_option(&((win_T *)from)->w_p_stl);
break;
#endif
case PV_UL:
buf->b_p_ul = NO_LOCAL_UNDOLEVEL;
break;
#ifdef FEAT_LISP
case PV_LW:
clear_string_option(&buf->b_p_lw);
break;
#endif
}
}
/*
* Get pointer to option variable, depending on local or global scope.
*/
static char_u *
get_varp_scope(struct vimoption *p, int opt_flags)
{
if ((opt_flags & OPT_GLOBAL) && p->indir != PV_NONE)
{
if (p->var == VAR_WIN)
return (char_u *)GLOBAL_WO(get_varp(p));
return p->var;
}
if ((opt_flags & OPT_LOCAL) && ((int)p->indir & PV_BOTH))
{
switch ((int)p->indir)
{
#ifdef FEAT_QUICKFIX
case PV_EFM: return (char_u *)&(curbuf->b_p_efm);
case PV_GP: return (char_u *)&(curbuf->b_p_gp);
case PV_MP: return (char_u *)&(curbuf->b_p_mp);
#endif
case PV_EP: return (char_u *)&(curbuf->b_p_ep);
case PV_KP: return (char_u *)&(curbuf->b_p_kp);
case PV_PATH: return (char_u *)&(curbuf->b_p_path);
case PV_AR: return (char_u *)&(curbuf->b_p_ar);
case PV_TAGS: return (char_u *)&(curbuf->b_p_tags);
case PV_TC: return (char_u *)&(curbuf->b_p_tc);
#ifdef FEAT_FIND_ID
case PV_DEF: return (char_u *)&(curbuf->b_p_def);
case PV_INC: return (char_u *)&(curbuf->b_p_inc);
#endif
#ifdef FEAT_INS_EXPAND
case PV_DICT: return (char_u *)&(curbuf->b_p_dict);
case PV_TSR: return (char_u *)&(curbuf->b_p_tsr);
#endif
#if defined(FEAT_BEVAL) && defined(FEAT_EVAL)
case PV_BEXPR: return (char_u *)&(curbuf->b_p_bexpr);
#endif
#if defined(FEAT_CRYPT)
case PV_CM: return (char_u *)&(curbuf->b_p_cm);
#endif
#ifdef FEAT_STL_OPT
case PV_STL: return (char_u *)&(curwin->w_p_stl);
#endif
case PV_UL: return (char_u *)&(curbuf->b_p_ul);
#ifdef FEAT_LISP
case PV_LW: return (char_u *)&(curbuf->b_p_lw);
#endif
case PV_BKC: return (char_u *)&(curbuf->b_p_bkc);
}
return NULL; /* "cannot happen" */
}
return get_varp(p);
}
/*
* Get pointer to option variable.
*/
static char_u *
get_varp(struct vimoption *p)
{
/* hidden option, always return NULL */
if (p->var == NULL)
return NULL;
switch ((int)p->indir)
{
case PV_NONE: return p->var;
/* global option with local value: use local value if it's been set */
case PV_EP: return *curbuf->b_p_ep != NUL
? (char_u *)&curbuf->b_p_ep : p->var;
case PV_KP: return *curbuf->b_p_kp != NUL
? (char_u *)&curbuf->b_p_kp : p->var;
case PV_PATH: return *curbuf->b_p_path != NUL
? (char_u *)&(curbuf->b_p_path) : p->var;
case PV_AR: return curbuf->b_p_ar >= 0
? (char_u *)&(curbuf->b_p_ar) : p->var;
case PV_TAGS: return *curbuf->b_p_tags != NUL
? (char_u *)&(curbuf->b_p_tags) : p->var;
case PV_TC: return *curbuf->b_p_tc != NUL
? (char_u *)&(curbuf->b_p_tc) : p->var;
case PV_BKC: return *curbuf->b_p_bkc != NUL
? (char_u *)&(curbuf->b_p_bkc) : p->var;
#ifdef FEAT_FIND_ID
case PV_DEF: return *curbuf->b_p_def != NUL
? (char_u *)&(curbuf->b_p_def) : p->var;
case PV_INC: return *curbuf->b_p_inc != NUL
? (char_u *)&(curbuf->b_p_inc) : p->var;
#endif
#ifdef FEAT_INS_EXPAND
case PV_DICT: return *curbuf->b_p_dict != NUL
? (char_u *)&(curbuf->b_p_dict) : p->var;
case PV_TSR: return *curbuf->b_p_tsr != NUL
? (char_u *)&(curbuf->b_p_tsr) : p->var;
#endif
#ifdef FEAT_QUICKFIX
case PV_EFM: return *curbuf->b_p_efm != NUL
? (char_u *)&(curbuf->b_p_efm) : p->var;
case PV_GP: return *curbuf->b_p_gp != NUL
? (char_u *)&(curbuf->b_p_gp) : p->var;
case PV_MP: return *curbuf->b_p_mp != NUL
? (char_u *)&(curbuf->b_p_mp) : p->var;
#endif
#if defined(FEAT_BEVAL) && defined(FEAT_EVAL)
case PV_BEXPR: return *curbuf->b_p_bexpr != NUL
? (char_u *)&(curbuf->b_p_bexpr) : p->var;
#endif
#if defined(FEAT_CRYPT)
case PV_CM: return *curbuf->b_p_cm != NUL
? (char_u *)&(curbuf->b_p_cm) : p->var;
#endif
#ifdef FEAT_STL_OPT
case PV_STL: return *curwin->w_p_stl != NUL
? (char_u *)&(curwin->w_p_stl) : p->var;
#endif
case PV_UL: return curbuf->b_p_ul != NO_LOCAL_UNDOLEVEL
? (char_u *)&(curbuf->b_p_ul) : p->var;
#ifdef FEAT_LISP
case PV_LW: return *curbuf->b_p_lw != NUL
? (char_u *)&(curbuf->b_p_lw) : p->var;
#endif
#ifdef FEAT_ARABIC
case PV_ARAB: return (char_u *)&(curwin->w_p_arab);
#endif
case PV_LIST: return (char_u *)&(curwin->w_p_list);
#ifdef FEAT_SPELL
case PV_SPELL: return (char_u *)&(curwin->w_p_spell);
#endif
#ifdef FEAT_SYN_HL
case PV_CUC: return (char_u *)&(curwin->w_p_cuc);
case PV_CUL: return (char_u *)&(curwin->w_p_cul);
case PV_CC: return (char_u *)&(curwin->w_p_cc);
#endif
#ifdef FEAT_DIFF
case PV_DIFF: return (char_u *)&(curwin->w_p_diff);
#endif
#ifdef FEAT_FOLDING
case PV_FDC: return (char_u *)&(curwin->w_p_fdc);
case PV_FEN: return (char_u *)&(curwin->w_p_fen);
case PV_FDI: return (char_u *)&(curwin->w_p_fdi);
case PV_FDL: return (char_u *)&(curwin->w_p_fdl);
case PV_FDM: return (char_u *)&(curwin->w_p_fdm);
case PV_FML: return (char_u *)&(curwin->w_p_fml);
case PV_FDN: return (char_u *)&(curwin->w_p_fdn);
# ifdef FEAT_EVAL
case PV_FDE: return (char_u *)&(curwin->w_p_fde);
case PV_FDT: return (char_u *)&(curwin->w_p_fdt);
# endif
case PV_FMR: return (char_u *)&(curwin->w_p_fmr);
#endif
case PV_NU: return (char_u *)&(curwin->w_p_nu);
case PV_RNU: return (char_u *)&(curwin->w_p_rnu);
#ifdef FEAT_LINEBREAK
case PV_NUW: return (char_u *)&(curwin->w_p_nuw);
#endif
#ifdef FEAT_WINDOWS
case PV_WFH: return (char_u *)&(curwin->w_p_wfh);
case PV_WFW: return (char_u *)&(curwin->w_p_wfw);
#endif
#if defined(FEAT_WINDOWS) && defined(FEAT_QUICKFIX)
case PV_PVW: return (char_u *)&(curwin->w_p_pvw);
#endif
#ifdef FEAT_RIGHTLEFT
case PV_RL: return (char_u *)&(curwin->w_p_rl);
case PV_RLC: return (char_u *)&(curwin->w_p_rlc);
#endif
case PV_SCROLL: return (char_u *)&(curwin->w_p_scr);
case PV_WRAP: return (char_u *)&(curwin->w_p_wrap);
#ifdef FEAT_LINEBREAK
case PV_LBR: return (char_u *)&(curwin->w_p_lbr);
case PV_BRI: return (char_u *)&(curwin->w_p_bri);
case PV_BRIOPT: return (char_u *)&(curwin->w_p_briopt);
#endif
#ifdef FEAT_SCROLLBIND
case PV_SCBIND: return (char_u *)&(curwin->w_p_scb);
#endif
#ifdef FEAT_CURSORBIND
case PV_CRBIND: return (char_u *)&(curwin->w_p_crb);
#endif
#ifdef FEAT_CONCEAL
case PV_COCU: return (char_u *)&(curwin->w_p_cocu);
case PV_COLE: return (char_u *)&(curwin->w_p_cole);
#endif
case PV_AI: return (char_u *)&(curbuf->b_p_ai);
case PV_BIN: return (char_u *)&(curbuf->b_p_bin);
#ifdef FEAT_MBYTE
case PV_BOMB: return (char_u *)&(curbuf->b_p_bomb);
#endif
#if defined(FEAT_QUICKFIX)
case PV_BH: return (char_u *)&(curbuf->b_p_bh);
case PV_BT: return (char_u *)&(curbuf->b_p_bt);
#endif
case PV_BL: return (char_u *)&(curbuf->b_p_bl);
case PV_CI: return (char_u *)&(curbuf->b_p_ci);
#ifdef FEAT_CINDENT
case PV_CIN: return (char_u *)&(curbuf->b_p_cin);
case PV_CINK: return (char_u *)&(curbuf->b_p_cink);
case PV_CINO: return (char_u *)&(curbuf->b_p_cino);
#endif
#if defined(FEAT_SMARTINDENT) || defined(FEAT_CINDENT)
case PV_CINW: return (char_u *)&(curbuf->b_p_cinw);
#endif
#ifdef FEAT_COMMENTS
case PV_COM: return (char_u *)&(curbuf->b_p_com);
#endif
#ifdef FEAT_FOLDING
case PV_CMS: return (char_u *)&(curbuf->b_p_cms);
#endif
#ifdef FEAT_INS_EXPAND
case PV_CPT: return (char_u *)&(curbuf->b_p_cpt);
#endif
#ifdef FEAT_COMPL_FUNC
case PV_CFU: return (char_u *)&(curbuf->b_p_cfu);
case PV_OFU: return (char_u *)&(curbuf->b_p_ofu);
#endif
case PV_EOL: return (char_u *)&(curbuf->b_p_eol);
case PV_FIXEOL: return (char_u *)&(curbuf->b_p_fixeol);
case PV_ET: return (char_u *)&(curbuf->b_p_et);
#ifdef FEAT_MBYTE
case PV_FENC: return (char_u *)&(curbuf->b_p_fenc);
#endif
case PV_FF: return (char_u *)&(curbuf->b_p_ff);
#ifdef FEAT_AUTOCMD
case PV_FT: return (char_u *)&(curbuf->b_p_ft);
#endif
case PV_FO: return (char_u *)&(curbuf->b_p_fo);
case PV_FLP: return (char_u *)&(curbuf->b_p_flp);
case PV_IMI: return (char_u *)&(curbuf->b_p_iminsert);
case PV_IMS: return (char_u *)&(curbuf->b_p_imsearch);
case PV_INF: return (char_u *)&(curbuf->b_p_inf);
case PV_ISK: return (char_u *)&(curbuf->b_p_isk);
#ifdef FEAT_FIND_ID
# ifdef FEAT_EVAL
case PV_INEX: return (char_u *)&(curbuf->b_p_inex);
# endif
#endif
#if defined(FEAT_CINDENT) && defined(FEAT_EVAL)
case PV_INDE: return (char_u *)&(curbuf->b_p_inde);
case PV_INDK: return (char_u *)&(curbuf->b_p_indk);
#endif
#ifdef FEAT_EVAL
case PV_FEX: return (char_u *)&(curbuf->b_p_fex);
#endif
#ifdef FEAT_CRYPT
case PV_KEY: return (char_u *)&(curbuf->b_p_key);
#endif
#ifdef FEAT_LISP
case PV_LISP: return (char_u *)&(curbuf->b_p_lisp);
#endif
case PV_ML: return (char_u *)&(curbuf->b_p_ml);
case PV_MPS: return (char_u *)&(curbuf->b_p_mps);
case PV_MA: return (char_u *)&(curbuf->b_p_ma);
case PV_MOD: return (char_u *)&(curbuf->b_changed);
case PV_NF: return (char_u *)&(curbuf->b_p_nf);
case PV_PI: return (char_u *)&(curbuf->b_p_pi);
#ifdef FEAT_TEXTOBJ
case PV_QE: return (char_u *)&(curbuf->b_p_qe);
#endif
case PV_RO: return (char_u *)&(curbuf->b_p_ro);
#ifdef FEAT_SMARTINDENT
case PV_SI: return (char_u *)&(curbuf->b_p_si);
#endif
case PV_SN: return (char_u *)&(curbuf->b_p_sn);
case PV_STS: return (char_u *)&(curbuf->b_p_sts);
#ifdef FEAT_SEARCHPATH
case PV_SUA: return (char_u *)&(curbuf->b_p_sua);
#endif
case PV_SWF: return (char_u *)&(curbuf->b_p_swf);
#ifdef FEAT_SYN_HL
case PV_SMC: return (char_u *)&(curbuf->b_p_smc);
case PV_SYN: return (char_u *)&(curbuf->b_p_syn);
#endif
#ifdef FEAT_SPELL
case PV_SPC: return (char_u *)&(curwin->w_s->b_p_spc);
case PV_SPF: return (char_u *)&(curwin->w_s->b_p_spf);
case PV_SPL: return (char_u *)&(curwin->w_s->b_p_spl);
#endif
case PV_SW: return (char_u *)&(curbuf->b_p_sw);
case PV_TS: return (char_u *)&(curbuf->b_p_ts);
case PV_TW: return (char_u *)&(curbuf->b_p_tw);
case PV_TX: return (char_u *)&(curbuf->b_p_tx);
#ifdef FEAT_PERSISTENT_UNDO
case PV_UDF: return (char_u *)&(curbuf->b_p_udf);
#endif
case PV_WM: return (char_u *)&(curbuf->b_p_wm);
#ifdef FEAT_KEYMAP
case PV_KMAP: return (char_u *)&(curbuf->b_p_keymap);
#endif
#ifdef FEAT_SIGNS
case PV_SCL: return (char_u *)&(curwin->w_p_scl);
#endif
default: EMSG(_("E356: get_varp ERROR"));
}
/* always return a valid pointer to avoid a crash! */
return (char_u *)&(curbuf->b_p_wm);
}
/*
* Get the value of 'equalprg', either the buffer-local one or the global one.
*/
char_u *
get_equalprg(void)
{
if (*curbuf->b_p_ep == NUL)
return p_ep;
return curbuf->b_p_ep;
}
#if defined(FEAT_WINDOWS) || defined(PROTO)
/*
* Copy options from one window to another.
* Used when splitting a window.
*/
void
win_copy_options(win_T *wp_from, win_T *wp_to)
{
copy_winopt(&wp_from->w_onebuf_opt, &wp_to->w_onebuf_opt);
copy_winopt(&wp_from->w_allbuf_opt, &wp_to->w_allbuf_opt);
# ifdef FEAT_RIGHTLEFT
# ifdef FEAT_FKMAP
/* Is this right? */
wp_to->w_farsi = wp_from->w_farsi;
# endif
# endif
#if defined(FEAT_LINEBREAK)
briopt_check(wp_to);
#endif
}
#endif
/*
* Copy the options from one winopt_T to another.
* Doesn't free the old option values in "to", use clear_winopt() for that.
* The 'scroll' option is not copied, because it depends on the window height.
* The 'previewwindow' option is reset, there can be only one preview window.
*/
void
copy_winopt(winopt_T *from, winopt_T *to)
{
#ifdef FEAT_ARABIC
to->wo_arab = from->wo_arab;
#endif
to->wo_list = from->wo_list;
to->wo_nu = from->wo_nu;
to->wo_rnu = from->wo_rnu;
#ifdef FEAT_LINEBREAK
to->wo_nuw = from->wo_nuw;
#endif
#ifdef FEAT_RIGHTLEFT
to->wo_rl = from->wo_rl;
to->wo_rlc = vim_strsave(from->wo_rlc);
#endif
#ifdef FEAT_STL_OPT
to->wo_stl = vim_strsave(from->wo_stl);
#endif
to->wo_wrap = from->wo_wrap;
#ifdef FEAT_DIFF
to->wo_wrap_save = from->wo_wrap_save;
#endif
#ifdef FEAT_LINEBREAK
to->wo_lbr = from->wo_lbr;
to->wo_bri = from->wo_bri;
to->wo_briopt = vim_strsave(from->wo_briopt);
#endif
#ifdef FEAT_SCROLLBIND
to->wo_scb = from->wo_scb;
to->wo_scb_save = from->wo_scb_save;
#endif
#ifdef FEAT_CURSORBIND
to->wo_crb = from->wo_crb;
to->wo_crb_save = from->wo_crb_save;
#endif
#ifdef FEAT_SPELL
to->wo_spell = from->wo_spell;
#endif
#ifdef FEAT_SYN_HL
to->wo_cuc = from->wo_cuc;
to->wo_cul = from->wo_cul;
to->wo_cc = vim_strsave(from->wo_cc);
#endif
#ifdef FEAT_DIFF
to->wo_diff = from->wo_diff;
to->wo_diff_saved = from->wo_diff_saved;
#endif
#ifdef FEAT_CONCEAL
to->wo_cocu = vim_strsave(from->wo_cocu);
to->wo_cole = from->wo_cole;
#endif
#ifdef FEAT_FOLDING
to->wo_fdc = from->wo_fdc;
to->wo_fdc_save = from->wo_fdc_save;
to->wo_fen = from->wo_fen;
to->wo_fen_save = from->wo_fen_save;
to->wo_fdi = vim_strsave(from->wo_fdi);
to->wo_fml = from->wo_fml;
to->wo_fdl = from->wo_fdl;
to->wo_fdl_save = from->wo_fdl_save;
to->wo_fdm = vim_strsave(from->wo_fdm);
to->wo_fdm_save = from->wo_diff_saved
? vim_strsave(from->wo_fdm_save) : empty_option;
to->wo_fdn = from->wo_fdn;
# ifdef FEAT_EVAL
to->wo_fde = vim_strsave(from->wo_fde);
to->wo_fdt = vim_strsave(from->wo_fdt);
# endif
to->wo_fmr = vim_strsave(from->wo_fmr);
#endif
#ifdef FEAT_SIGNS
to->wo_scl = vim_strsave(from->wo_scl);
#endif
check_winopt(to); /* don't want NULL pointers */
}
/*
* Check string options in a window for a NULL value.
*/
void
check_win_options(win_T *win)
{
check_winopt(&win->w_onebuf_opt);
check_winopt(&win->w_allbuf_opt);
}
/*
* Check for NULL pointers in a winopt_T and replace them with empty_option.
*/
static void
check_winopt(winopt_T *wop UNUSED)
{
#ifdef FEAT_FOLDING
check_string_option(&wop->wo_fdi);
check_string_option(&wop->wo_fdm);
check_string_option(&wop->wo_fdm_save);
# ifdef FEAT_EVAL
check_string_option(&wop->wo_fde);
check_string_option(&wop->wo_fdt);
# endif
check_string_option(&wop->wo_fmr);
#endif
#ifdef FEAT_SIGNS
check_string_option(&wop->wo_scl);
#endif
#ifdef FEAT_RIGHTLEFT
check_string_option(&wop->wo_rlc);
#endif
#ifdef FEAT_STL_OPT
check_string_option(&wop->wo_stl);
#endif
#ifdef FEAT_SYN_HL
check_string_option(&wop->wo_cc);
#endif
#ifdef FEAT_CONCEAL
check_string_option(&wop->wo_cocu);
#endif
#ifdef FEAT_LINEBREAK
check_string_option(&wop->wo_briopt);
#endif
}
/*
* Free the allocated memory inside a winopt_T.
*/
void
clear_winopt(winopt_T *wop UNUSED)
{
#ifdef FEAT_FOLDING
clear_string_option(&wop->wo_fdi);
clear_string_option(&wop->wo_fdm);
clear_string_option(&wop->wo_fdm_save);
# ifdef FEAT_EVAL
clear_string_option(&wop->wo_fde);
clear_string_option(&wop->wo_fdt);
# endif
clear_string_option(&wop->wo_fmr);
#endif
#ifdef FEAT_SIGNS
clear_string_option(&wop->wo_scl);
#endif
#ifdef FEAT_LINEBREAK
clear_string_option(&wop->wo_briopt);
#endif
#ifdef FEAT_RIGHTLEFT
clear_string_option(&wop->wo_rlc);
#endif
#ifdef FEAT_STL_OPT
clear_string_option(&wop->wo_stl);
#endif
#ifdef FEAT_SYN_HL
clear_string_option(&wop->wo_cc);
#endif
#ifdef FEAT_CONCEAL
clear_string_option(&wop->wo_cocu);
#endif
}
/*
* Copy global option values to local options for one buffer.
* Used when creating a new buffer and sometimes when entering a buffer.
* flags:
* BCO_ENTER We will enter the buf buffer.
* BCO_ALWAYS Always copy the options, but only set b_p_initialized when
* appropriate.
* BCO_NOHELP Don't copy the values to a help buffer.
*/
void
buf_copy_options(buf_T *buf, int flags)
{
int should_copy = TRUE;
char_u *save_p_isk = NULL; /* init for GCC */
int dont_do_help;
int did_isk = FALSE;
/*
* Skip this when the option defaults have not been set yet. Happens when
* main() allocates the first buffer.
*/
if (p_cpo != NULL)
{
/*
* Always copy when entering and 'cpo' contains 'S'.
* Don't copy when already initialized.
* Don't copy when 'cpo' contains 's' and not entering.
* 'S' BCO_ENTER initialized 's' should_copy
* yes yes X X TRUE
* yes no yes X FALSE
* no X yes X FALSE
* X no no yes FALSE
* X no no no TRUE
* no yes no X TRUE
*/
if ((vim_strchr(p_cpo, CPO_BUFOPTGLOB) == NULL || !(flags & BCO_ENTER))
&& (buf->b_p_initialized
|| (!(flags & BCO_ENTER)
&& vim_strchr(p_cpo, CPO_BUFOPT) != NULL)))
should_copy = FALSE;
if (should_copy || (flags & BCO_ALWAYS))
{
/* Don't copy the options specific to a help buffer when
* BCO_NOHELP is given or the options were initialized already
* (jumping back to a help file with CTRL-T or CTRL-O) */
dont_do_help = ((flags & BCO_NOHELP) && buf->b_help)
|| buf->b_p_initialized;
if (dont_do_help) /* don't free b_p_isk */
{
save_p_isk = buf->b_p_isk;
buf->b_p_isk = NULL;
}
/*
* Always free the allocated strings.
* If not already initialized, set 'readonly' and copy 'fileformat'.
*/
if (!buf->b_p_initialized)
{
free_buf_options(buf, TRUE);
buf->b_p_ro = FALSE; /* don't copy readonly */
buf->b_p_tx = p_tx;
#ifdef FEAT_MBYTE
buf->b_p_fenc = vim_strsave(p_fenc);
#endif
switch (*p_ffs)
{
case 'm':
buf->b_p_ff = vim_strsave((char_u *)FF_MAC); break;
case 'd':
buf->b_p_ff = vim_strsave((char_u *)FF_DOS); break;
case 'u':
buf->b_p_ff = vim_strsave((char_u *)FF_UNIX); break;
default:
buf->b_p_ff = vim_strsave(p_ff);
}
if (buf->b_p_ff != NULL)
buf->b_start_ffc = *buf->b_p_ff;
#if defined(FEAT_QUICKFIX)
buf->b_p_bh = empty_option;
buf->b_p_bt = empty_option;
#endif
}
else
free_buf_options(buf, FALSE);
buf->b_p_ai = p_ai;
buf->b_p_ai_nopaste = p_ai_nopaste;
buf->b_p_sw = p_sw;
buf->b_p_tw = p_tw;
buf->b_p_tw_nopaste = p_tw_nopaste;
buf->b_p_tw_nobin = p_tw_nobin;
buf->b_p_wm = p_wm;
buf->b_p_wm_nopaste = p_wm_nopaste;
buf->b_p_wm_nobin = p_wm_nobin;
buf->b_p_bin = p_bin;
#ifdef FEAT_MBYTE
buf->b_p_bomb = p_bomb;
#endif
buf->b_p_fixeol = p_fixeol;
buf->b_p_et = p_et;
buf->b_p_et_nobin = p_et_nobin;
buf->b_p_et_nopaste = p_et_nopaste;
buf->b_p_ml = p_ml;
buf->b_p_ml_nobin = p_ml_nobin;
buf->b_p_inf = p_inf;
buf->b_p_swf = p_swf;
#ifdef FEAT_INS_EXPAND
buf->b_p_cpt = vim_strsave(p_cpt);
#endif
#ifdef FEAT_COMPL_FUNC
buf->b_p_cfu = vim_strsave(p_cfu);
buf->b_p_ofu = vim_strsave(p_ofu);
#endif
buf->b_p_sts = p_sts;
buf->b_p_sts_nopaste = p_sts_nopaste;
buf->b_p_sn = p_sn;
#ifdef FEAT_COMMENTS
buf->b_p_com = vim_strsave(p_com);
#endif
#ifdef FEAT_FOLDING
buf->b_p_cms = vim_strsave(p_cms);
#endif
buf->b_p_fo = vim_strsave(p_fo);
buf->b_p_flp = vim_strsave(p_flp);
buf->b_p_nf = vim_strsave(p_nf);
buf->b_p_mps = vim_strsave(p_mps);
#ifdef FEAT_SMARTINDENT
buf->b_p_si = p_si;
#endif
buf->b_p_ci = p_ci;
#ifdef FEAT_CINDENT
buf->b_p_cin = p_cin;
buf->b_p_cink = vim_strsave(p_cink);
buf->b_p_cino = vim_strsave(p_cino);
#endif
#ifdef FEAT_AUTOCMD
/* Don't copy 'filetype', it must be detected */
buf->b_p_ft = empty_option;
#endif
buf->b_p_pi = p_pi;
#if defined(FEAT_SMARTINDENT) || defined(FEAT_CINDENT)
buf->b_p_cinw = vim_strsave(p_cinw);
#endif
#ifdef FEAT_LISP
buf->b_p_lisp = p_lisp;
#endif
#ifdef FEAT_SYN_HL
/* Don't copy 'syntax', it must be set */
buf->b_p_syn = empty_option;
buf->b_p_smc = p_smc;
buf->b_s.b_syn_isk = empty_option;
#endif
#ifdef FEAT_SPELL
buf->b_s.b_p_spc = vim_strsave(p_spc);
(void)compile_cap_prog(&buf->b_s);
buf->b_s.b_p_spf = vim_strsave(p_spf);
buf->b_s.b_p_spl = vim_strsave(p_spl);
#endif
#if defined(FEAT_CINDENT) && defined(FEAT_EVAL)
buf->b_p_inde = vim_strsave(p_inde);
buf->b_p_indk = vim_strsave(p_indk);
#endif
#if defined(FEAT_EVAL)
buf->b_p_fex = vim_strsave(p_fex);
#endif
#ifdef FEAT_CRYPT
buf->b_p_key = vim_strsave(p_key);
#endif
#ifdef FEAT_SEARCHPATH
buf->b_p_sua = vim_strsave(p_sua);
#endif
#ifdef FEAT_KEYMAP
buf->b_p_keymap = vim_strsave(p_keymap);
buf->b_kmap_state |= KEYMAP_INIT;
#endif
/* This isn't really an option, but copying the langmap and IME
* state from the current buffer is better than resetting it. */
buf->b_p_iminsert = p_iminsert;
buf->b_p_imsearch = p_imsearch;
/* options that are normally global but also have a local value
* are not copied, start using the global value */
buf->b_p_ar = -1;
buf->b_p_ul = NO_LOCAL_UNDOLEVEL;
buf->b_p_bkc = empty_option;
buf->b_bkc_flags = 0;
#ifdef FEAT_QUICKFIX
buf->b_p_gp = empty_option;
buf->b_p_mp = empty_option;
buf->b_p_efm = empty_option;
#endif
buf->b_p_ep = empty_option;
buf->b_p_kp = empty_option;
buf->b_p_path = empty_option;
buf->b_p_tags = empty_option;
buf->b_p_tc = empty_option;
buf->b_tc_flags = 0;
#ifdef FEAT_FIND_ID
buf->b_p_def = empty_option;
buf->b_p_inc = empty_option;
# ifdef FEAT_EVAL
buf->b_p_inex = vim_strsave(p_inex);
# endif
#endif
#ifdef FEAT_INS_EXPAND
buf->b_p_dict = empty_option;
buf->b_p_tsr = empty_option;
#endif
#ifdef FEAT_TEXTOBJ
buf->b_p_qe = vim_strsave(p_qe);
#endif
#if defined(FEAT_BEVAL) && defined(FEAT_EVAL)
buf->b_p_bexpr = empty_option;
#endif
#if defined(FEAT_CRYPT)
buf->b_p_cm = empty_option;
#endif
#ifdef FEAT_PERSISTENT_UNDO
buf->b_p_udf = p_udf;
#endif
#ifdef FEAT_LISP
buf->b_p_lw = empty_option;
#endif
/*
* Don't copy the options set by ex_help(), use the saved values,
* when going from a help buffer to a non-help buffer.
* Don't touch these at all when BCO_NOHELP is used and going from
* or to a help buffer.
*/
if (dont_do_help)
buf->b_p_isk = save_p_isk;
else
{
buf->b_p_isk = vim_strsave(p_isk);
did_isk = TRUE;
buf->b_p_ts = p_ts;
buf->b_help = FALSE;
#ifdef FEAT_QUICKFIX
if (buf->b_p_bt[0] == 'h')
clear_string_option(&buf->b_p_bt);
#endif
buf->b_p_ma = p_ma;
}
}
/*
* When the options should be copied (ignoring BCO_ALWAYS), set the
* flag that indicates that the options have been initialized.
*/
if (should_copy)
buf->b_p_initialized = TRUE;
}
check_buf_options(buf); /* make sure we don't have NULLs */
if (did_isk)
(void)buf_init_chartab(buf, FALSE);
}
/*
* Reset the 'modifiable' option and its default value.
*/
void
reset_modifiable(void)
{
int opt_idx;
curbuf->b_p_ma = FALSE;
p_ma = FALSE;
opt_idx = findoption((char_u *)"ma");
if (opt_idx >= 0)
options[opt_idx].def_val[VI_DEFAULT] = FALSE;
}
/*
* Set the global value for 'iminsert' to the local value.
*/
void
set_iminsert_global(void)
{
p_iminsert = curbuf->b_p_iminsert;
}
/*
* Set the global value for 'imsearch' to the local value.
*/
void
set_imsearch_global(void)
{
p_imsearch = curbuf->b_p_imsearch;
}
#if defined(FEAT_CMDL_COMPL) || defined(PROTO)
static int expand_option_idx = -1;
static char_u expand_option_name[5] = {'t', '_', NUL, NUL, NUL};
static int expand_option_flags = 0;
void
set_context_in_set_cmd(
expand_T *xp,
char_u *arg,
int opt_flags) /* OPT_GLOBAL and/or OPT_LOCAL */
{
int nextchar;
long_u flags = 0; /* init for GCC */
int opt_idx = 0; /* init for GCC */
char_u *p;
char_u *s;
int is_term_option = FALSE;
int key;
expand_option_flags = opt_flags;
xp->xp_context = EXPAND_SETTINGS;
if (*arg == NUL)
{
xp->xp_pattern = arg;
return;
}
p = arg + STRLEN(arg) - 1;
if (*p == ' ' && *(p - 1) != '\\')
{
xp->xp_pattern = p + 1;
return;
}
while (p > arg)
{
s = p;
/* count number of backslashes before ' ' or ',' */
if (*p == ' ' || *p == ',')
{
while (s > arg && *(s - 1) == '\\')
--s;
}
/* break at a space with an even number of backslashes */
if (*p == ' ' && ((p - s) & 1) == 0)
{
++p;
break;
}
--p;
}
if (STRNCMP(p, "no", 2) == 0 && STRNCMP(p, "novice", 6) != 0)
{
xp->xp_context = EXPAND_BOOL_SETTINGS;
p += 2;
}
if (STRNCMP(p, "inv", 3) == 0)
{
xp->xp_context = EXPAND_BOOL_SETTINGS;
p += 3;
}
xp->xp_pattern = arg = p;
if (*arg == '<')
{
while (*p != '>')
if (*p++ == NUL) /* expand terminal option name */
return;
key = get_special_key_code(arg + 1);
if (key == 0) /* unknown name */
{
xp->xp_context = EXPAND_NOTHING;
return;
}
nextchar = *++p;
is_term_option = TRUE;
expand_option_name[2] = KEY2TERMCAP0(key);
expand_option_name[3] = KEY2TERMCAP1(key);
}
else
{
if (p[0] == 't' && p[1] == '_')
{
p += 2;
if (*p != NUL)
++p;
if (*p == NUL)
return; /* expand option name */
nextchar = *++p;
is_term_option = TRUE;
expand_option_name[2] = p[-2];
expand_option_name[3] = p[-1];
}
else
{
/* Allow * wildcard */
while (ASCII_ISALNUM(*p) || *p == '_' || *p == '*')
p++;
if (*p == NUL)
return;
nextchar = *p;
*p = NUL;
opt_idx = findoption(arg);
*p = nextchar;
if (opt_idx == -1 || options[opt_idx].var == NULL)
{
xp->xp_context = EXPAND_NOTHING;
return;
}
flags = options[opt_idx].flags;
if (flags & P_BOOL)
{
xp->xp_context = EXPAND_NOTHING;
return;
}
}
}
/* handle "-=" and "+=" */
if ((nextchar == '-' || nextchar == '+' || nextchar == '^') && p[1] == '=')
{
++p;
nextchar = '=';
}
if ((nextchar != '=' && nextchar != ':')
|| xp->xp_context == EXPAND_BOOL_SETTINGS)
{
xp->xp_context = EXPAND_UNSUCCESSFUL;
return;
}
if (xp->xp_context != EXPAND_BOOL_SETTINGS && p[1] == NUL)
{
xp->xp_context = EXPAND_OLD_SETTING;
if (is_term_option)
expand_option_idx = -1;
else
expand_option_idx = opt_idx;
xp->xp_pattern = p + 1;
return;
}
xp->xp_context = EXPAND_NOTHING;
if (is_term_option || (flags & P_NUM))
return;
xp->xp_pattern = p + 1;
if (flags & P_EXPAND)
{
p = options[opt_idx].var;
if (p == (char_u *)&p_bdir
|| p == (char_u *)&p_dir
|| p == (char_u *)&p_path
|| p == (char_u *)&p_pp
|| p == (char_u *)&p_rtp
#ifdef FEAT_SEARCHPATH
|| p == (char_u *)&p_cdpath
#endif
#ifdef FEAT_SESSION
|| p == (char_u *)&p_vdir
#endif
)
{
xp->xp_context = EXPAND_DIRECTORIES;
if (p == (char_u *)&p_path
#ifdef FEAT_SEARCHPATH
|| p == (char_u *)&p_cdpath
#endif
)
xp->xp_backslash = XP_BS_THREE;
else
xp->xp_backslash = XP_BS_ONE;
}
else
{
xp->xp_context = EXPAND_FILES;
/* for 'tags' need three backslashes for a space */
if (p == (char_u *)&p_tags)
xp->xp_backslash = XP_BS_THREE;
else
xp->xp_backslash = XP_BS_ONE;
}
}
/* For an option that is a list of file names, find the start of the
* last file name. */
for (p = arg + STRLEN(arg) - 1; p > xp->xp_pattern; --p)
{
/* count number of backslashes before ' ' or ',' */
if (*p == ' ' || *p == ',')
{
s = p;
while (s > xp->xp_pattern && *(s - 1) == '\\')
--s;
if ((*p == ' ' && (xp->xp_backslash == XP_BS_THREE && (p - s) < 3))
|| (*p == ',' && (flags & P_COMMA) && ((p - s) & 1) == 0))
{
xp->xp_pattern = p + 1;
break;
}
}
#ifdef FEAT_SPELL
/* for 'spellsuggest' start at "file:" */
if (options[opt_idx].var == (char_u *)&p_sps
&& STRNCMP(p, "file:", 5) == 0)
{
xp->xp_pattern = p + 5;
break;
}
#endif
}
return;
}
int
ExpandSettings(
expand_T *xp,
regmatch_T *regmatch,
int *num_file,
char_u ***file)
{
int num_normal = 0; /* Nr of matching non-term-code settings */
int num_term = 0; /* Nr of matching terminal code settings */
int opt_idx;
int match;
int count = 0;
char_u *str;
int loop;
int is_term_opt;
char_u name_buf[MAX_KEY_NAME_LEN];
static char *(names[]) = {"all", "termcap"};
int ic = regmatch->rm_ic; /* remember the ignore-case flag */
/* do this loop twice:
* loop == 0: count the number of matching options
* loop == 1: copy the matching options into allocated memory
*/
for (loop = 0; loop <= 1; ++loop)
{
regmatch->rm_ic = ic;
if (xp->xp_context != EXPAND_BOOL_SETTINGS)
{
for (match = 0; match < (int)(sizeof(names) / sizeof(char *));
++match)
if (vim_regexec(regmatch, (char_u *)names[match], (colnr_T)0))
{
if (loop == 0)
num_normal++;
else
(*file)[count++] = vim_strsave((char_u *)names[match]);
}
}
for (opt_idx = 0; (str = (char_u *)options[opt_idx].fullname) != NULL;
opt_idx++)
{
if (options[opt_idx].var == NULL)
continue;
if (xp->xp_context == EXPAND_BOOL_SETTINGS
&& !(options[opt_idx].flags & P_BOOL))
continue;
is_term_opt = istermoption(&options[opt_idx]);
if (is_term_opt && num_normal > 0)
continue;
match = FALSE;
if (vim_regexec(regmatch, str, (colnr_T)0)
|| (options[opt_idx].shortname != NULL
&& vim_regexec(regmatch,
(char_u *)options[opt_idx].shortname, (colnr_T)0)))
match = TRUE;
else if (is_term_opt)
{
name_buf[0] = '<';
name_buf[1] = 't';
name_buf[2] = '_';
name_buf[3] = str[2];
name_buf[4] = str[3];
name_buf[5] = '>';
name_buf[6] = NUL;
if (vim_regexec(regmatch, name_buf, (colnr_T)0))
{
match = TRUE;
str = name_buf;
}
}
if (match)
{
if (loop == 0)
{
if (is_term_opt)
num_term++;
else
num_normal++;
}
else
(*file)[count++] = vim_strsave(str);
}
}
/*
* Check terminal key codes, these are not in the option table
*/
if (xp->xp_context != EXPAND_BOOL_SETTINGS && num_normal == 0)
{
for (opt_idx = 0; (str = get_termcode(opt_idx)) != NULL; opt_idx++)
{
if (!isprint(str[0]) || !isprint(str[1]))
continue;
name_buf[0] = 't';
name_buf[1] = '_';
name_buf[2] = str[0];
name_buf[3] = str[1];
name_buf[4] = NUL;
match = FALSE;
if (vim_regexec(regmatch, name_buf, (colnr_T)0))
match = TRUE;
else
{
name_buf[0] = '<';
name_buf[1] = 't';
name_buf[2] = '_';
name_buf[3] = str[0];
name_buf[4] = str[1];
name_buf[5] = '>';
name_buf[6] = NUL;
if (vim_regexec(regmatch, name_buf, (colnr_T)0))
match = TRUE;
}
if (match)
{
if (loop == 0)
num_term++;
else
(*file)[count++] = vim_strsave(name_buf);
}
}
/*
* Check special key names.
*/
regmatch->rm_ic = TRUE; /* ignore case here */
for (opt_idx = 0; (str = get_key_name(opt_idx)) != NULL; opt_idx++)
{
name_buf[0] = '<';
STRCPY(name_buf + 1, str);
STRCAT(name_buf, ">");
if (vim_regexec(regmatch, name_buf, (colnr_T)0))
{
if (loop == 0)
num_term++;
else
(*file)[count++] = vim_strsave(name_buf);
}
}
}
if (loop == 0)
{
if (num_normal > 0)
*num_file = num_normal;
else if (num_term > 0)
*num_file = num_term;
else
return OK;
*file = (char_u **)alloc((unsigned)(*num_file * sizeof(char_u *)));
if (*file == NULL)
{
*file = (char_u **)"";
return FAIL;
}
}
}
return OK;
}
int
ExpandOldSetting(int *num_file, char_u ***file)
{
char_u *var = NULL; /* init for GCC */
char_u *buf;
*num_file = 0;
*file = (char_u **)alloc((unsigned)sizeof(char_u *));
if (*file == NULL)
return FAIL;
/*
* For a terminal key code expand_option_idx is < 0.
*/
if (expand_option_idx < 0)
{
var = find_termcode(expand_option_name + 2);
if (var == NULL)
expand_option_idx = findoption(expand_option_name);
}
if (expand_option_idx >= 0)
{
/* put string of option value in NameBuff */
option_value2string(&options[expand_option_idx], expand_option_flags);
var = NameBuff;
}
else if (var == NULL)
var = (char_u *)"";
/* A backslash is required before some characters. This is the reverse of
* what happens in do_set(). */
buf = vim_strsave_escaped(var, escape_chars);
if (buf == NULL)
{
vim_free(*file);
*file = NULL;
return FAIL;
}
#ifdef BACKSLASH_IN_FILENAME
/* For MS-Windows et al. we don't double backslashes at the start and
* before a file name character. */
for (var = buf; *var != NUL; mb_ptr_adv(var))
if (var[0] == '\\' && var[1] == '\\'
&& expand_option_idx >= 0
&& (options[expand_option_idx].flags & P_EXPAND)
&& vim_isfilec(var[2])
&& (var[2] != '\\' || (var == buf && var[4] != '\\')))
STRMOVE(var, var + 1);
#endif
*file[0] = buf;
*num_file = 1;
return OK;
}
#endif
/*
* Get the value for the numeric or string option *opp in a nice format into
* NameBuff[]. Must not be called with a hidden option!
*/
static void
option_value2string(
struct vimoption *opp,
int opt_flags) /* OPT_GLOBAL and/or OPT_LOCAL */
{
char_u *varp;
varp = get_varp_scope(opp, opt_flags);
if (opp->flags & P_NUM)
{
long wc = 0;
if (wc_use_keyname(varp, &wc))
STRCPY(NameBuff, get_special_key_name((int)wc, 0));
else if (wc != 0)
STRCPY(NameBuff, transchar((int)wc));
else
sprintf((char *)NameBuff, "%ld", *(long *)varp);
}
else /* P_STRING */
{
varp = *(char_u **)(varp);
if (varp == NULL) /* just in case */
NameBuff[0] = NUL;
#ifdef FEAT_CRYPT
/* don't show the actual value of 'key', only that it's set */
else if (opp->var == (char_u *)&p_key && *varp)
STRCPY(NameBuff, "*****");
#endif
else if (opp->flags & P_EXPAND)
home_replace(NULL, varp, NameBuff, MAXPATHL, FALSE);
/* Translate 'pastetoggle' into special key names */
else if ((char_u **)opp->var == &p_pt)
str2specialbuf(p_pt, NameBuff, MAXPATHL);
else
vim_strncpy(NameBuff, varp, MAXPATHL - 1);
}
}
/*
* Return TRUE if "varp" points to 'wildchar' or 'wildcharm' and it can be
* printed as a keyname.
* "*wcp" is set to the value of the option if it's 'wildchar' or 'wildcharm'.
*/
static int
wc_use_keyname(char_u *varp, long *wcp)
{
if (((long *)varp == &p_wc) || ((long *)varp == &p_wcm))
{
*wcp = *(long *)varp;
if (IS_SPECIAL(*wcp) || find_special_key_in_table((int)*wcp) >= 0)
return TRUE;
}
return FALSE;
}
#if defined(FEAT_LANGMAP) || defined(PROTO)
/*
* Any character has an equivalent 'langmap' character. This is used for
* keyboards that have a special language mode that sends characters above
* 128 (although other characters can be translated too). The "to" field is a
* Vim command character. This avoids having to switch the keyboard back to
* ASCII mode when leaving Insert mode.
*
* langmap_mapchar[] maps any of 256 chars to an ASCII char used for Vim
* commands.
* When FEAT_MBYTE is defined langmap_mapga.ga_data is a sorted table of
* langmap_entry_T. This does the same as langmap_mapchar[] for characters >=
* 256.
*/
# if defined(FEAT_MBYTE) || defined(PROTO)
/*
* With multi-byte support use growarray for 'langmap' chars >= 256
*/
typedef struct
{
int from;
int to;
} langmap_entry_T;
static garray_T langmap_mapga;
static void langmap_set_entry(int from, int to);
/*
* Search for an entry in "langmap_mapga" for "from". If found set the "to"
* field. If not found insert a new entry at the appropriate location.
*/
static void
langmap_set_entry(int from, int to)
{
langmap_entry_T *entries = (langmap_entry_T *)(langmap_mapga.ga_data);
int a = 0;
int b = langmap_mapga.ga_len;
/* Do a binary search for an existing entry. */
while (a != b)
{
int i = (a + b) / 2;
int d = entries[i].from - from;
if (d == 0)
{
entries[i].to = to;
return;
}
if (d < 0)
a = i + 1;
else
b = i;
}
if (ga_grow(&langmap_mapga, 1) != OK)
return; /* out of memory */
/* insert new entry at position "a" */
entries = (langmap_entry_T *)(langmap_mapga.ga_data) + a;
mch_memmove(entries + 1, entries,
(langmap_mapga.ga_len - a) * sizeof(langmap_entry_T));
++langmap_mapga.ga_len;
entries[0].from = from;
entries[0].to = to;
}
/*
* Apply 'langmap' to multi-byte character "c" and return the result.
*/
int
langmap_adjust_mb(int c)
{
langmap_entry_T *entries = (langmap_entry_T *)(langmap_mapga.ga_data);
int a = 0;
int b = langmap_mapga.ga_len;
while (a != b)
{
int i = (a + b) / 2;
int d = entries[i].from - c;
if (d == 0)
return entries[i].to; /* found matching entry */
if (d < 0)
a = i + 1;
else
b = i;
}
return c; /* no entry found, return "c" unmodified */
}
# endif
static void
langmap_init(void)
{
int i;
for (i = 0; i < 256; i++)
langmap_mapchar[i] = i; /* we init with a one-to-one map */
# ifdef FEAT_MBYTE
ga_init2(&langmap_mapga, sizeof(langmap_entry_T), 8);
# endif
}
/*
* Called when langmap option is set; the language map can be
* changed at any time!
*/
static void
langmap_set(void)
{
char_u *p;
char_u *p2;
int from, to;
#ifdef FEAT_MBYTE
ga_clear(&langmap_mapga); /* clear the previous map first */
#endif
langmap_init(); /* back to one-to-one map */
for (p = p_langmap; p[0] != NUL; )
{
for (p2 = p; p2[0] != NUL && p2[0] != ',' && p2[0] != ';';
mb_ptr_adv(p2))
{
if (p2[0] == '\\' && p2[1] != NUL)
++p2;
}
if (p2[0] == ';')
++p2; /* abcd;ABCD form, p2 points to A */
else
p2 = NULL; /* aAbBcCdD form, p2 is NULL */
while (p[0])
{
if (p[0] == ',')
{
++p;
break;
}
if (p[0] == '\\' && p[1] != NUL)
++p;
#ifdef FEAT_MBYTE
from = (*mb_ptr2char)(p);
#else
from = p[0];
#endif
to = NUL;
if (p2 == NULL)
{
mb_ptr_adv(p);
if (p[0] != ',')
{
if (p[0] == '\\')
++p;
#ifdef FEAT_MBYTE
to = (*mb_ptr2char)(p);
#else
to = p[0];
#endif
}
}
else
{
if (p2[0] != ',')
{
if (p2[0] == '\\')
++p2;
#ifdef FEAT_MBYTE
to = (*mb_ptr2char)(p2);
#else
to = p2[0];
#endif
}
}
if (to == NUL)
{
EMSG2(_("E357: 'langmap': Matching character missing for %s"),
transchar(from));
return;
}
#ifdef FEAT_MBYTE
if (from >= 256)
langmap_set_entry(from, to);
else
#endif
langmap_mapchar[from & 255] = to;
/* Advance to next pair */
mb_ptr_adv(p);
if (p2 != NULL)
{
mb_ptr_adv(p2);
if (*p == ';')
{
p = p2;
if (p[0] != NUL)
{
if (p[0] != ',')
{
EMSG2(_("E358: 'langmap': Extra characters after semicolon: %s"), p);
return;
}
++p;
}
break;
}
}
}
}
}
#endif
/*
* Return TRUE if format option 'x' is in effect.
* Take care of no formatting when 'paste' is set.
*/
int
has_format_option(int x)
{
if (p_paste)
return FALSE;
return (vim_strchr(curbuf->b_p_fo, x) != NULL);
}
/*
* Return TRUE if "x" is present in 'shortmess' option, or
* 'shortmess' contains 'a' and "x" is present in SHM_A.
*/
int
shortmess(int x)
{
return p_shm != NULL &&
( vim_strchr(p_shm, x) != NULL
|| (vim_strchr(p_shm, 'a') != NULL
&& vim_strchr((char_u *)SHM_A, x) != NULL));
}
/*
* paste_option_changed() - Called after p_paste was set or reset.
*/
static void
paste_option_changed(void)
{
static int old_p_paste = FALSE;
static int save_sm = 0;
static int save_sta = 0;
#ifdef FEAT_CMDL_INFO
static int save_ru = 0;
#endif
#ifdef FEAT_RIGHTLEFT
static int save_ri = 0;
static int save_hkmap = 0;
#endif
buf_T *buf;
if (p_paste)
{
/*
* Paste switched from off to on.
* Save the current values, so they can be restored later.
*/
if (!old_p_paste)
{
/* save options for each buffer */
FOR_ALL_BUFFERS(buf)
{
buf->b_p_tw_nopaste = buf->b_p_tw;
buf->b_p_wm_nopaste = buf->b_p_wm;
buf->b_p_sts_nopaste = buf->b_p_sts;
buf->b_p_ai_nopaste = buf->b_p_ai;
buf->b_p_et_nopaste = buf->b_p_et;
}
/* save global options */
save_sm = p_sm;
save_sta = p_sta;
#ifdef FEAT_CMDL_INFO
save_ru = p_ru;
#endif
#ifdef FEAT_RIGHTLEFT
save_ri = p_ri;
save_hkmap = p_hkmap;
#endif
/* save global values for local buffer options */
p_ai_nopaste = p_ai;
p_et_nopaste = p_et;
p_sts_nopaste = p_sts;
p_tw_nopaste = p_tw;
p_wm_nopaste = p_wm;
}
/*
* Always set the option values, also when 'paste' is set when it is
* already on.
*/
/* set options for each buffer */
FOR_ALL_BUFFERS(buf)
{
buf->b_p_tw = 0; /* textwidth is 0 */
buf->b_p_wm = 0; /* wrapmargin is 0 */
buf->b_p_sts = 0; /* softtabstop is 0 */
buf->b_p_ai = 0; /* no auto-indent */
buf->b_p_et = 0; /* no expandtab */
}
/* set global options */
p_sm = 0; /* no showmatch */
p_sta = 0; /* no smarttab */
#ifdef FEAT_CMDL_INFO
# ifdef FEAT_WINDOWS
if (p_ru)
status_redraw_all(); /* redraw to remove the ruler */
# endif
p_ru = 0; /* no ruler */
#endif
#ifdef FEAT_RIGHTLEFT
p_ri = 0; /* no reverse insert */
p_hkmap = 0; /* no Hebrew keyboard */
#endif
/* set global values for local buffer options */
p_tw = 0;
p_wm = 0;
p_sts = 0;
p_ai = 0;
}
/*
* Paste switched from on to off: Restore saved values.
*/
else if (old_p_paste)
{
/* restore options for each buffer */
FOR_ALL_BUFFERS(buf)
{
buf->b_p_tw = buf->b_p_tw_nopaste;
buf->b_p_wm = buf->b_p_wm_nopaste;
buf->b_p_sts = buf->b_p_sts_nopaste;
buf->b_p_ai = buf->b_p_ai_nopaste;
buf->b_p_et = buf->b_p_et_nopaste;
}
/* restore global options */
p_sm = save_sm;
p_sta = save_sta;
#ifdef FEAT_CMDL_INFO
# ifdef FEAT_WINDOWS
if (p_ru != save_ru)
status_redraw_all(); /* redraw to draw the ruler */
# endif
p_ru = save_ru;
#endif
#ifdef FEAT_RIGHTLEFT
p_ri = save_ri;
p_hkmap = save_hkmap;
#endif
/* set global values for local buffer options */
p_ai = p_ai_nopaste;
p_et = p_et_nopaste;
p_sts = p_sts_nopaste;
p_tw = p_tw_nopaste;
p_wm = p_wm_nopaste;
}
old_p_paste = p_paste;
}
/*
* vimrc_found() - Called when a ".vimrc" or "VIMINIT" has been found.
*
* Reset 'compatible' and set the values for options that didn't get set yet
* to the Vim defaults.
* Don't do this if the 'compatible' option has been set or reset before.
* When "fname" is not NULL, use it to set $"envname" when it wasn't set yet.
*/
void
vimrc_found(char_u *fname, char_u *envname)
{
int opt_idx;
int dofree = FALSE;
char_u *p;
if (!option_was_set((char_u *)"cp"))
{
p_cp = FALSE;
for (opt_idx = 0; !istermoption(&options[opt_idx]); opt_idx++)
if (!(options[opt_idx].flags & (P_WAS_SET|P_VI_DEF)))
set_option_default(opt_idx, OPT_FREE, FALSE);
didset_options();
didset_options2();
}
if (fname != NULL)
{
p = vim_getenv(envname, &dofree);
if (p == NULL)
{
/* Set $MYVIMRC to the first vimrc file found. */
p = FullName_save(fname, FALSE);
if (p != NULL)
{
vim_setenv(envname, p);
vim_free(p);
}
}
else if (dofree)
vim_free(p);
}
}
/*
* Set 'compatible' on or off. Called for "-C" and "-N" command line arg.
*/
void
change_compatible(int on)
{
int opt_idx;
if (p_cp != on)
{
p_cp = on;
compatible_set();
}
opt_idx = findoption((char_u *)"cp");
if (opt_idx >= 0)
options[opt_idx].flags |= P_WAS_SET;
}
/*
* Return TRUE when option "name" has been set.
* Only works correctly for global options.
*/
int
option_was_set(char_u *name)
{
int idx;
idx = findoption(name);
if (idx < 0) /* unknown option */
return FALSE;
if (options[idx].flags & P_WAS_SET)
return TRUE;
return FALSE;
}
/*
* Reset the flag indicating option "name" was set.
*/
void
reset_option_was_set(char_u *name)
{
int idx = findoption(name);
if (idx >= 0)
options[idx].flags &= ~P_WAS_SET;
}
/*
* compatible_set() - Called when 'compatible' has been set or unset.
*
* When 'compatible' set: Set all relevant options (those that have the P_VIM)
* flag) to a Vi compatible value.
* When 'compatible' is unset: Set all options that have a different default
* for Vim (without the P_VI_DEF flag) to that default.
*/
static void
compatible_set(void)
{
int opt_idx;
for (opt_idx = 0; !istermoption(&options[opt_idx]); opt_idx++)
if ( ((options[opt_idx].flags & P_VIM) && p_cp)
|| (!(options[opt_idx].flags & P_VI_DEF) && !p_cp))
set_option_default(opt_idx, OPT_FREE, p_cp);
didset_options();
didset_options2();
}
#ifdef FEAT_LINEBREAK
# if defined(__BORLANDC__) && (__BORLANDC__ < 0x500)
/* Borland C++ screws up loop optimisation here (negri) */
#pragma option -O-l
# endif
/*
* fill_breakat_flags() -- called when 'breakat' changes value.
*/
static void
fill_breakat_flags(void)
{
char_u *p;
int i;
for (i = 0; i < 256; i++)
breakat_flags[i] = FALSE;
if (p_breakat != NULL)
for (p = p_breakat; *p; p++)
breakat_flags[*p] = TRUE;
}
# if defined(__BORLANDC__) && (__BORLANDC__ < 0x500)
#pragma option -O.l
# endif
#endif
/*
* Check an option that can be a range of string values.
*
* Return OK for correct value, FAIL otherwise.
* Empty is always OK.
*/
static int
check_opt_strings(
char_u *val,
char **values,
int list) /* when TRUE: accept a list of values */
{
return opt_strings_flags(val, values, NULL, list);
}
/*
* Handle an option that can be a range of string values.
* Set a flag in "*flagp" for each string present.
*
* Return OK for correct value, FAIL otherwise.
* Empty is always OK.
*/
static int
opt_strings_flags(
char_u *val, /* new value */
char **values, /* array of valid string values */
unsigned *flagp,
int list) /* when TRUE: accept a list of values */
{
int i;
int len;
unsigned new_flags = 0;
while (*val)
{
for (i = 0; ; ++i)
{
if (values[i] == NULL) /* val not found in values[] */
return FAIL;
len = (int)STRLEN(values[i]);
if (STRNCMP(values[i], val, len) == 0
&& ((list && val[len] == ',') || val[len] == NUL))
{
val += len + (val[len] == ',');
new_flags |= (1 << i);
break; /* check next item in val list */
}
}
}
if (flagp != NULL)
*flagp = new_flags;
return OK;
}
/*
* Read the 'wildmode' option, fill wim_flags[].
*/
static int
check_opt_wim(void)
{
char_u new_wim_flags[4];
char_u *p;
int i;
int idx = 0;
for (i = 0; i < 4; ++i)
new_wim_flags[i] = 0;
for (p = p_wim; *p; ++p)
{
for (i = 0; ASCII_ISALPHA(p[i]); ++i)
;
if (p[i] != NUL && p[i] != ',' && p[i] != ':')
return FAIL;
if (i == 7 && STRNCMP(p, "longest", 7) == 0)
new_wim_flags[idx] |= WIM_LONGEST;
else if (i == 4 && STRNCMP(p, "full", 4) == 0)
new_wim_flags[idx] |= WIM_FULL;
else if (i == 4 && STRNCMP(p, "list", 4) == 0)
new_wim_flags[idx] |= WIM_LIST;
else
return FAIL;
p += i;
if (*p == NUL)
break;
if (*p == ',')
{
if (idx == 3)
return FAIL;
++idx;
}
}
/* fill remaining entries with last flag */
while (idx < 3)
{
new_wim_flags[idx + 1] = new_wim_flags[idx];
++idx;
}
/* only when there are no errors, wim_flags[] is changed */
for (i = 0; i < 4; ++i)
wim_flags[i] = new_wim_flags[i];
return OK;
}
/*
* Check if backspacing over something is allowed.
*/
int
can_bs(
int what) /* BS_INDENT, BS_EOL or BS_START */
{
switch (*p_bs)
{
case '2': return TRUE;
case '1': return (what != BS_START);
case '0': return FALSE;
}
return vim_strchr(p_bs, what) != NULL;
}
/*
* Save the current values of 'fileformat' and 'fileencoding', so that we know
* the file must be considered changed when the value is different.
*/
void
save_file_ff(buf_T *buf)
{
buf->b_start_ffc = *buf->b_p_ff;
buf->b_start_eol = buf->b_p_eol;
#ifdef FEAT_MBYTE
buf->b_start_bomb = buf->b_p_bomb;
/* Only use free/alloc when necessary, they take time. */
if (buf->b_start_fenc == NULL
|| STRCMP(buf->b_start_fenc, buf->b_p_fenc) != 0)
{
vim_free(buf->b_start_fenc);
buf->b_start_fenc = vim_strsave(buf->b_p_fenc);
}
#endif
}
/*
* Return TRUE if 'fileformat' and/or 'fileencoding' has a different value
* from when editing started (save_file_ff() called).
* Also when 'endofline' was changed and 'binary' is set, or when 'bomb' was
* changed and 'binary' is not set.
* Also when 'endofline' was changed and 'fixeol' is not set.
* When "ignore_empty" is true don't consider a new, empty buffer to be
* changed.
*/
int
file_ff_differs(buf_T *buf, int ignore_empty)
{
/* In a buffer that was never loaded the options are not valid. */
if (buf->b_flags & BF_NEVERLOADED)
return FALSE;
if (ignore_empty
&& (buf->b_flags & BF_NEW)
&& buf->b_ml.ml_line_count == 1
&& *ml_get_buf(buf, (linenr_T)1, FALSE) == NUL)
return FALSE;
if (buf->b_start_ffc != *buf->b_p_ff)
return TRUE;
if ((buf->b_p_bin || !buf->b_p_fixeol) && buf->b_start_eol != buf->b_p_eol)
return TRUE;
#ifdef FEAT_MBYTE
if (!buf->b_p_bin && buf->b_start_bomb != buf->b_p_bomb)
return TRUE;
if (buf->b_start_fenc == NULL)
return (*buf->b_p_fenc != NUL);
return (STRCMP(buf->b_start_fenc, buf->b_p_fenc) != 0);
#else
return FALSE;
#endif
}
/*
* return OK if "p" is a valid fileformat name, FAIL otherwise.
*/
int
check_ff_value(char_u *p)
{
return check_opt_strings(p, p_ff_values, FALSE);
}
/*
* Return the effective shiftwidth value for current buffer, using the
* 'tabstop' value when 'shiftwidth' is zero.
*/
long
get_sw_value(buf_T *buf)
{
return buf->b_p_sw ? buf->b_p_sw : buf->b_p_ts;
}
/*
* Return the effective softtabstop value for the current buffer, using the
* 'tabstop' value when 'softtabstop' is negative.
*/
long
get_sts_value(void)
{
return curbuf->b_p_sts < 0 ? get_sw_value(curbuf) : curbuf->b_p_sts;
}
/*
* Check matchpairs option for "*initc".
* If there is a match set "*initc" to the matching character and "*findc" to
* the opposite character. Set "*backwards" to the direction.
* When "switchit" is TRUE swap the direction.
*/
void
find_mps_values(
int *initc,
int *findc,
int *backwards,
int switchit)
{
char_u *ptr;
ptr = curbuf->b_p_mps;
while (*ptr != NUL)
{
#ifdef FEAT_MBYTE
if (has_mbyte)
{
char_u *prev;
if (mb_ptr2char(ptr) == *initc)
{
if (switchit)
{
*findc = *initc;
*initc = mb_ptr2char(ptr + mb_ptr2len(ptr) + 1);
*backwards = TRUE;
}
else
{
*findc = mb_ptr2char(ptr + mb_ptr2len(ptr) + 1);
*backwards = FALSE;
}
return;
}
prev = ptr;
ptr += mb_ptr2len(ptr) + 1;
if (mb_ptr2char(ptr) == *initc)
{
if (switchit)
{
*findc = *initc;
*initc = mb_ptr2char(prev);
*backwards = FALSE;
}
else
{
*findc = mb_ptr2char(prev);
*backwards = TRUE;
}
return;
}
ptr += mb_ptr2len(ptr);
}
else
#endif
{
if (*ptr == *initc)
{
if (switchit)
{
*backwards = TRUE;
*findc = *initc;
*initc = ptr[2];
}
else
{
*backwards = FALSE;
*findc = ptr[2];
}
return;
}
ptr += 2;
if (*ptr == *initc)
{
if (switchit)
{
*backwards = FALSE;
*findc = *initc;
*initc = ptr[-2];
}
else
{
*backwards = TRUE;
*findc = ptr[-2];
}
return;
}
++ptr;
}
if (*ptr == ',')
++ptr;
}
}
#if defined(FEAT_LINEBREAK) || defined(PROTO)
/*
* This is called when 'breakindentopt' is changed and when a window is
* initialized.
*/
static int
briopt_check(win_T *wp)
{
char_u *p;
int bri_shift = 0;
long bri_min = 20;
int bri_sbr = FALSE;
p = wp->w_p_briopt;
while (*p != NUL)
{
if (STRNCMP(p, "shift:", 6) == 0
&& ((p[6] == '-' && VIM_ISDIGIT(p[7])) || VIM_ISDIGIT(p[6])))
{
p += 6;
bri_shift = getdigits(&p);
}
else if (STRNCMP(p, "min:", 4) == 0 && VIM_ISDIGIT(p[4]))
{
p += 4;
bri_min = getdigits(&p);
}
else if (STRNCMP(p, "sbr", 3) == 0)
{
p += 3;
bri_sbr = TRUE;
}
if (*p != ',' && *p != NUL)
return FAIL;
if (*p == ',')
++p;
}
wp->w_p_brishift = bri_shift;
wp->w_p_brimin = bri_min;
wp->w_p_brisbr = bri_sbr;
return OK;
}
#endif
/*
* Get the local or global value of 'backupcopy'.
*/
unsigned int
get_bkc_value(buf_T *buf)
{
return buf->b_bkc_flags ? buf->b_bkc_flags : bkc_flags;
}
#if defined(FEAT_SIGNS) || defined(PROTO)
/*
* Return TRUE when window "wp" has a column to draw signs in.
*/
int
signcolumn_on(win_T *wp)
{
if (*wp->w_p_scl == 'n')
return FALSE;
if (*wp->w_p_scl == 'y')
return TRUE;
return (wp->w_buffer->b_signlist != NULL
# ifdef FEAT_NETBEANS_INTG
|| wp->w_buffer->b_has_sign_column
# endif
);
}
#endif
#if defined(FEAT_EVAL) || defined(PROTO)
/*
* Get window or buffer local options.
*/
dict_T *
get_winbuf_options(int bufopt)
{
dict_T *d;
int opt_idx;
d = dict_alloc();
if (d == NULL)
return NULL;
for (opt_idx = 0; !istermoption(&options[opt_idx]); opt_idx++)
{
struct vimoption *opt = &options[opt_idx];
if ((bufopt && (opt->indir & PV_BUF))
|| (!bufopt && (opt->indir & PV_WIN)))
{
char_u *varp = get_varp(opt);
if (varp != NULL)
{
if (opt->flags & P_STRING)
dict_add_nr_str(d, opt->fullname, 0L, *(char_u **)varp);
else if (opt->flags & P_NUM)
dict_add_nr_str(d, opt->fullname, *(long *)varp, NULL);
else
dict_add_nr_str(d, opt->fullname, *(int *)varp, NULL);
}
}
}
return d;
}
#endif
| ./CrossVul/dataset_final_sorted/CWE-20/c/bad_4900_0 |
crossvul-cpp_data_bad_1399_0 | /* BGP attributes management routines.
* Copyright (C) 1996, 97, 98, 1999 Kunihiro Ishiguro
*
* This file is part of GNU Zebra.
*
* GNU Zebra is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2, or (at your option) any
* later version.
*
* GNU Zebra is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; see the file COPYING; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <zebra.h>
#include "linklist.h"
#include "prefix.h"
#include "memory.h"
#include "vector.h"
#include "stream.h"
#include "log.h"
#include "hash.h"
#include "jhash.h"
#include "queue.h"
#include "table.h"
#include "filter.h"
#include "command.h"
#include "bgpd/bgpd.h"
#include "bgpd/bgp_attr.h"
#include "bgpd/bgp_route.h"
#include "bgpd/bgp_aspath.h"
#include "bgpd/bgp_community.h"
#include "bgpd/bgp_debug.h"
#include "bgpd/bgp_errors.h"
#include "bgpd/bgp_label.h"
#include "bgpd/bgp_packet.h"
#include "bgpd/bgp_ecommunity.h"
#include "bgpd/bgp_lcommunity.h"
#include "bgpd/bgp_updgrp.h"
#include "bgpd/bgp_encap_types.h"
#if ENABLE_BGP_VNC
#include "bgpd/rfapi/bgp_rfapi_cfg.h"
#include "bgp_encap_types.h"
#include "bgp_vnc_types.h"
#endif
#include "bgp_encap_types.h"
#include "bgp_evpn.h"
#include "bgp_flowspec_private.h"
/* Attribute strings for logging. */
static const struct message attr_str[] = {
{BGP_ATTR_ORIGIN, "ORIGIN"},
{BGP_ATTR_AS_PATH, "AS_PATH"},
{BGP_ATTR_NEXT_HOP, "NEXT_HOP"},
{BGP_ATTR_MULTI_EXIT_DISC, "MULTI_EXIT_DISC"},
{BGP_ATTR_LOCAL_PREF, "LOCAL_PREF"},
{BGP_ATTR_ATOMIC_AGGREGATE, "ATOMIC_AGGREGATE"},
{BGP_ATTR_AGGREGATOR, "AGGREGATOR"},
{BGP_ATTR_COMMUNITIES, "COMMUNITY"},
{BGP_ATTR_ORIGINATOR_ID, "ORIGINATOR_ID"},
{BGP_ATTR_CLUSTER_LIST, "CLUSTER_LIST"},
{BGP_ATTR_DPA, "DPA"},
{BGP_ATTR_ADVERTISER, "ADVERTISER"},
{BGP_ATTR_RCID_PATH, "RCID_PATH"},
{BGP_ATTR_MP_REACH_NLRI, "MP_REACH_NLRI"},
{BGP_ATTR_MP_UNREACH_NLRI, "MP_UNREACH_NLRI"},
{BGP_ATTR_EXT_COMMUNITIES, "EXT_COMMUNITIES"},
{BGP_ATTR_AS4_PATH, "AS4_PATH"},
{BGP_ATTR_AS4_AGGREGATOR, "AS4_AGGREGATOR"},
{BGP_ATTR_AS_PATHLIMIT, "AS_PATHLIMIT"},
{BGP_ATTR_PMSI_TUNNEL, "PMSI_TUNNEL_ATTRIBUTE"},
{BGP_ATTR_ENCAP, "ENCAP"},
#if ENABLE_BGP_VNC
{BGP_ATTR_VNC, "VNC"},
#endif
{BGP_ATTR_LARGE_COMMUNITIES, "LARGE_COMMUNITY"},
{BGP_ATTR_PREFIX_SID, "PREFIX_SID"},
{0}};
static const struct message attr_flag_str[] = {
{BGP_ATTR_FLAG_OPTIONAL, "Optional"},
{BGP_ATTR_FLAG_TRANS, "Transitive"},
{BGP_ATTR_FLAG_PARTIAL, "Partial"},
/* bgp_attr_flags_diagnose() relies on this bit being last in
this list */
{BGP_ATTR_FLAG_EXTLEN, "Extended Length"},
{0}};
static struct hash *cluster_hash;
static void *cluster_hash_alloc(void *p)
{
const struct cluster_list *val = (const struct cluster_list *)p;
struct cluster_list *cluster;
cluster = XMALLOC(MTYPE_CLUSTER, sizeof(struct cluster_list));
cluster->length = val->length;
if (cluster->length) {
cluster->list = XMALLOC(MTYPE_CLUSTER_VAL, val->length);
memcpy(cluster->list, val->list, val->length);
} else
cluster->list = NULL;
cluster->refcnt = 0;
return cluster;
}
/* Cluster list related functions. */
static struct cluster_list *cluster_parse(struct in_addr *pnt, int length)
{
struct cluster_list tmp;
struct cluster_list *cluster;
tmp.length = length;
tmp.list = pnt;
cluster = hash_get(cluster_hash, &tmp, cluster_hash_alloc);
cluster->refcnt++;
return cluster;
}
int cluster_loop_check(struct cluster_list *cluster, struct in_addr originator)
{
int i;
for (i = 0; i < cluster->length / 4; i++)
if (cluster->list[i].s_addr == originator.s_addr)
return 1;
return 0;
}
static unsigned int cluster_hash_key_make(void *p)
{
const struct cluster_list *cluster = p;
return jhash(cluster->list, cluster->length, 0);
}
static bool cluster_hash_cmp(const void *p1, const void *p2)
{
const struct cluster_list *cluster1 = p1;
const struct cluster_list *cluster2 = p2;
return (cluster1->length == cluster2->length
&& memcmp(cluster1->list, cluster2->list, cluster1->length)
== 0);
}
static void cluster_free(struct cluster_list *cluster)
{
if (cluster->list)
XFREE(MTYPE_CLUSTER_VAL, cluster->list);
XFREE(MTYPE_CLUSTER, cluster);
}
static struct cluster_list *cluster_intern(struct cluster_list *cluster)
{
struct cluster_list *find;
find = hash_get(cluster_hash, cluster, cluster_hash_alloc);
find->refcnt++;
return find;
}
void cluster_unintern(struct cluster_list *cluster)
{
if (cluster->refcnt)
cluster->refcnt--;
if (cluster->refcnt == 0) {
hash_release(cluster_hash, cluster);
cluster_free(cluster);
}
}
static void cluster_init(void)
{
cluster_hash = hash_create(cluster_hash_key_make, cluster_hash_cmp,
"BGP Cluster");
}
static void cluster_finish(void)
{
hash_clean(cluster_hash, (void (*)(void *))cluster_free);
hash_free(cluster_hash);
cluster_hash = NULL;
}
static struct hash *encap_hash = NULL;
#if ENABLE_BGP_VNC
static struct hash *vnc_hash = NULL;
#endif
struct bgp_attr_encap_subtlv *encap_tlv_dup(struct bgp_attr_encap_subtlv *orig)
{
struct bgp_attr_encap_subtlv *new;
struct bgp_attr_encap_subtlv *tail;
struct bgp_attr_encap_subtlv *p;
for (p = orig, tail = new = NULL; p; p = p->next) {
int size = sizeof(struct bgp_attr_encap_subtlv) + p->length;
if (tail) {
tail->next = XCALLOC(MTYPE_ENCAP_TLV, size);
tail = tail->next;
} else {
tail = new = XCALLOC(MTYPE_ENCAP_TLV, size);
}
assert(tail);
memcpy(tail, p, size);
tail->next = NULL;
}
return new;
}
static void encap_free(struct bgp_attr_encap_subtlv *p)
{
struct bgp_attr_encap_subtlv *next;
while (p) {
next = p->next;
p->next = NULL;
XFREE(MTYPE_ENCAP_TLV, p);
p = next;
}
}
void bgp_attr_flush_encap(struct attr *attr)
{
if (!attr)
return;
if (attr->encap_subtlvs) {
encap_free(attr->encap_subtlvs);
attr->encap_subtlvs = NULL;
}
#if ENABLE_BGP_VNC
if (attr->vnc_subtlvs) {
encap_free(attr->vnc_subtlvs);
attr->vnc_subtlvs = NULL;
}
#endif
}
/*
* Compare encap sub-tlv chains
*
* 1 = equivalent
* 0 = not equivalent
*
* This algorithm could be made faster if needed
*/
static int encap_same(const struct bgp_attr_encap_subtlv *h1,
const struct bgp_attr_encap_subtlv *h2)
{
const struct bgp_attr_encap_subtlv *p;
const struct bgp_attr_encap_subtlv *q;
if (h1 == h2)
return 1;
if (h1 == NULL || h2 == NULL)
return 0;
for (p = h1; p; p = p->next) {
for (q = h2; q; q = q->next) {
if ((p->type == q->type) && (p->length == q->length)
&& !memcmp(p->value, q->value, p->length)) {
break;
}
}
if (!q)
return 0;
}
for (p = h2; p; p = p->next) {
for (q = h1; q; q = q->next) {
if ((p->type == q->type) && (p->length == q->length)
&& !memcmp(p->value, q->value, p->length)) {
break;
}
}
if (!q)
return 0;
}
return 1;
}
static void *encap_hash_alloc(void *p)
{
/* Encap structure is already allocated. */
return p;
}
typedef enum {
ENCAP_SUBTLV_TYPE,
#if ENABLE_BGP_VNC
VNC_SUBTLV_TYPE
#endif
} encap_subtlv_type;
static struct bgp_attr_encap_subtlv *
encap_intern(struct bgp_attr_encap_subtlv *encap, encap_subtlv_type type)
{
struct bgp_attr_encap_subtlv *find;
struct hash *hash = encap_hash;
#if ENABLE_BGP_VNC
if (type == VNC_SUBTLV_TYPE)
hash = vnc_hash;
#endif
find = hash_get(hash, encap, encap_hash_alloc);
if (find != encap)
encap_free(encap);
find->refcnt++;
return find;
}
static void encap_unintern(struct bgp_attr_encap_subtlv **encapp,
encap_subtlv_type type)
{
struct bgp_attr_encap_subtlv *encap = *encapp;
if (encap->refcnt)
encap->refcnt--;
if (encap->refcnt == 0) {
struct hash *hash = encap_hash;
#if ENABLE_BGP_VNC
if (type == VNC_SUBTLV_TYPE)
hash = vnc_hash;
#endif
hash_release(hash, encap);
encap_free(encap);
*encapp = NULL;
}
}
static unsigned int encap_hash_key_make(void *p)
{
const struct bgp_attr_encap_subtlv *encap = p;
return jhash(encap->value, encap->length, 0);
}
static bool encap_hash_cmp(const void *p1, const void *p2)
{
return encap_same((const struct bgp_attr_encap_subtlv *)p1,
(const struct bgp_attr_encap_subtlv *)p2);
}
static void encap_init(void)
{
encap_hash = hash_create(encap_hash_key_make, encap_hash_cmp,
"BGP Encap Hash");
#if ENABLE_BGP_VNC
vnc_hash = hash_create(encap_hash_key_make, encap_hash_cmp,
"BGP VNC Hash");
#endif
}
static void encap_finish(void)
{
hash_clean(encap_hash, (void (*)(void *))encap_free);
hash_free(encap_hash);
encap_hash = NULL;
#if ENABLE_BGP_VNC
hash_clean(vnc_hash, (void (*)(void *))encap_free);
hash_free(vnc_hash);
vnc_hash = NULL;
#endif
}
static bool overlay_index_same(const struct attr *a1, const struct attr *a2)
{
if (!a1 && a2)
return false;
if (!a2 && a1)
return false;
if (!a1 && !a2)
return true;
return !memcmp(&(a1->evpn_overlay), &(a2->evpn_overlay),
sizeof(struct overlay_index));
}
/* Unknown transit attribute. */
static struct hash *transit_hash;
static void transit_free(struct transit *transit)
{
if (transit->val)
XFREE(MTYPE_TRANSIT_VAL, transit->val);
XFREE(MTYPE_TRANSIT, transit);
}
static void *transit_hash_alloc(void *p)
{
/* Transit structure is already allocated. */
return p;
}
static struct transit *transit_intern(struct transit *transit)
{
struct transit *find;
find = hash_get(transit_hash, transit, transit_hash_alloc);
if (find != transit)
transit_free(transit);
find->refcnt++;
return find;
}
void transit_unintern(struct transit *transit)
{
if (transit->refcnt)
transit->refcnt--;
if (transit->refcnt == 0) {
hash_release(transit_hash, transit);
transit_free(transit);
}
}
static unsigned int transit_hash_key_make(void *p)
{
const struct transit *transit = p;
return jhash(transit->val, transit->length, 0);
}
static bool transit_hash_cmp(const void *p1, const void *p2)
{
const struct transit *transit1 = p1;
const struct transit *transit2 = p2;
return (transit1->length == transit2->length
&& memcmp(transit1->val, transit2->val, transit1->length) == 0);
}
static void transit_init(void)
{
transit_hash = hash_create(transit_hash_key_make, transit_hash_cmp,
"BGP Transit Hash");
}
static void transit_finish(void)
{
hash_clean(transit_hash, (void (*)(void *))transit_free);
hash_free(transit_hash);
transit_hash = NULL;
}
/* Attribute hash routines. */
static struct hash *attrhash;
/* Shallow copy of an attribute
* Though, not so shallow that it doesn't copy the contents
* of the attr_extra pointed to by 'extra'
*/
void bgp_attr_dup(struct attr *new, struct attr *orig)
{
*new = *orig;
}
unsigned long int attr_count(void)
{
return attrhash->count;
}
unsigned long int attr_unknown_count(void)
{
return transit_hash->count;
}
unsigned int attrhash_key_make(void *p)
{
const struct attr *attr = (struct attr *)p;
uint32_t key = 0;
#define MIX(val) key = jhash_1word(val, key)
#define MIX3(a, b, c) key = jhash_3words((a), (b), (c), key)
MIX3(attr->origin, attr->nexthop.s_addr, attr->med);
MIX3(attr->local_pref, attr->aggregator_as,
attr->aggregator_addr.s_addr);
MIX3(attr->weight, attr->mp_nexthop_global_in.s_addr,
attr->originator_id.s_addr);
MIX3(attr->tag, attr->label, attr->label_index);
if (attr->aspath)
MIX(aspath_key_make(attr->aspath));
if (attr->community)
MIX(community_hash_make(attr->community));
if (attr->lcommunity)
MIX(lcommunity_hash_make(attr->lcommunity));
if (attr->ecommunity)
MIX(ecommunity_hash_make(attr->ecommunity));
if (attr->cluster)
MIX(cluster_hash_key_make(attr->cluster));
if (attr->transit)
MIX(transit_hash_key_make(attr->transit));
if (attr->encap_subtlvs)
MIX(encap_hash_key_make(attr->encap_subtlvs));
#if ENABLE_BGP_VNC
if (attr->vnc_subtlvs)
MIX(encap_hash_key_make(attr->vnc_subtlvs));
#endif
MIX(attr->mp_nexthop_len);
key = jhash(attr->mp_nexthop_global.s6_addr, IPV6_MAX_BYTELEN, key);
key = jhash(attr->mp_nexthop_local.s6_addr, IPV6_MAX_BYTELEN, key);
MIX(attr->nh_ifindex);
MIX(attr->nh_lla_ifindex);
return key;
}
bool attrhash_cmp(const void *p1, const void *p2)
{
const struct attr *attr1 = p1;
const struct attr *attr2 = p2;
if (attr1->flag == attr2->flag && attr1->origin == attr2->origin
&& attr1->nexthop.s_addr == attr2->nexthop.s_addr
&& attr1->aspath == attr2->aspath
&& attr1->community == attr2->community && attr1->med == attr2->med
&& attr1->local_pref == attr2->local_pref
&& attr1->rmap_change_flags == attr2->rmap_change_flags) {
if (attr1->aggregator_as == attr2->aggregator_as
&& attr1->aggregator_addr.s_addr
== attr2->aggregator_addr.s_addr
&& attr1->weight == attr2->weight
&& attr1->tag == attr2->tag
&& attr1->label_index == attr2->label_index
&& attr1->mp_nexthop_len == attr2->mp_nexthop_len
&& attr1->ecommunity == attr2->ecommunity
&& attr1->lcommunity == attr2->lcommunity
&& attr1->cluster == attr2->cluster
&& attr1->transit == attr2->transit
&& (attr1->encap_tunneltype == attr2->encap_tunneltype)
&& encap_same(attr1->encap_subtlvs, attr2->encap_subtlvs)
#if ENABLE_BGP_VNC
&& encap_same(attr1->vnc_subtlvs, attr2->vnc_subtlvs)
#endif
&& IPV6_ADDR_SAME(&attr1->mp_nexthop_global,
&attr2->mp_nexthop_global)
&& IPV6_ADDR_SAME(&attr1->mp_nexthop_local,
&attr2->mp_nexthop_local)
&& IPV4_ADDR_SAME(&attr1->mp_nexthop_global_in,
&attr2->mp_nexthop_global_in)
&& IPV4_ADDR_SAME(&attr1->originator_id,
&attr2->originator_id)
&& overlay_index_same(attr1, attr2)
&& attr1->nh_ifindex == attr2->nh_ifindex
&& attr1->nh_lla_ifindex == attr2->nh_lla_ifindex)
return true;
}
return false;
}
static void attrhash_init(void)
{
attrhash =
hash_create(attrhash_key_make, attrhash_cmp, "BGP Attributes");
}
/*
* special for hash_clean below
*/
static void attr_vfree(void *attr)
{
XFREE(MTYPE_ATTR, attr);
}
static void attrhash_finish(void)
{
hash_clean(attrhash, attr_vfree);
hash_free(attrhash);
attrhash = NULL;
}
static void attr_show_all_iterator(struct hash_backet *backet, struct vty *vty)
{
struct attr *attr = backet->data;
vty_out(vty, "attr[%ld] nexthop %s\n", attr->refcnt,
inet_ntoa(attr->nexthop));
vty_out(vty, "\tflags: %" PRIu64 " med: %u local_pref: %u origin: %u weight: %u label: %u\n",
attr->flag, attr->med, attr->local_pref, attr->origin,
attr->weight, attr->label);
}
void attr_show_all(struct vty *vty)
{
hash_iterate(attrhash, (void (*)(struct hash_backet *,
void *))attr_show_all_iterator,
vty);
}
static void *bgp_attr_hash_alloc(void *p)
{
struct attr *val = (struct attr *)p;
struct attr *attr;
attr = XMALLOC(MTYPE_ATTR, sizeof(struct attr));
*attr = *val;
if (val->encap_subtlvs) {
val->encap_subtlvs = NULL;
}
#if ENABLE_BGP_VNC
if (val->vnc_subtlvs) {
val->vnc_subtlvs = NULL;
}
#endif
attr->refcnt = 0;
return attr;
}
/* Internet argument attribute. */
struct attr *bgp_attr_intern(struct attr *attr)
{
struct attr *find;
/* Intern referenced strucutre. */
if (attr->aspath) {
if (!attr->aspath->refcnt)
attr->aspath = aspath_intern(attr->aspath);
else
attr->aspath->refcnt++;
}
if (attr->community) {
if (!attr->community->refcnt)
attr->community = community_intern(attr->community);
else
attr->community->refcnt++;
}
if (attr->ecommunity) {
if (!attr->ecommunity->refcnt)
attr->ecommunity = ecommunity_intern(attr->ecommunity);
else
attr->ecommunity->refcnt++;
}
if (attr->lcommunity) {
if (!attr->lcommunity->refcnt)
attr->lcommunity = lcommunity_intern(attr->lcommunity);
else
attr->lcommunity->refcnt++;
}
if (attr->cluster) {
if (!attr->cluster->refcnt)
attr->cluster = cluster_intern(attr->cluster);
else
attr->cluster->refcnt++;
}
if (attr->transit) {
if (!attr->transit->refcnt)
attr->transit = transit_intern(attr->transit);
else
attr->transit->refcnt++;
}
if (attr->encap_subtlvs) {
if (!attr->encap_subtlvs->refcnt)
attr->encap_subtlvs = encap_intern(attr->encap_subtlvs,
ENCAP_SUBTLV_TYPE);
else
attr->encap_subtlvs->refcnt++;
}
#if ENABLE_BGP_VNC
if (attr->vnc_subtlvs) {
if (!attr->vnc_subtlvs->refcnt)
attr->vnc_subtlvs = encap_intern(attr->vnc_subtlvs,
VNC_SUBTLV_TYPE);
else
attr->vnc_subtlvs->refcnt++;
}
#endif
/* At this point, attr only contains intern'd pointers. that means
* if we find it in attrhash, it has all the same pointers and we
* correctly updated the refcounts on these.
* If we don't find it, we need to allocate a one because in all
* cases this returns a new reference to a hashed attr, but the input
* wasn't on hash. */
find = (struct attr *)hash_get(attrhash, attr, bgp_attr_hash_alloc);
find->refcnt++;
return find;
}
/* Make network statement's attribute. */
struct attr *bgp_attr_default_set(struct attr *attr, uint8_t origin)
{
memset(attr, 0, sizeof(struct attr));
attr->origin = origin;
attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_ORIGIN);
attr->aspath = aspath_empty();
attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_AS_PATH);
attr->weight = BGP_ATTR_DEFAULT_WEIGHT;
attr->tag = 0;
attr->label_index = BGP_INVALID_LABEL_INDEX;
attr->label = MPLS_INVALID_LABEL;
attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_NEXT_HOP);
attr->mp_nexthop_len = IPV6_MAX_BYTELEN;
return attr;
}
/* Create the attributes for an aggregate */
struct attr *bgp_attr_aggregate_intern(struct bgp *bgp, uint8_t origin,
struct aspath *aspath,
struct community *community,
struct ecommunity *ecommunity,
struct lcommunity *lcommunity,
int as_set, uint8_t atomic_aggregate)
{
struct attr attr;
struct attr *new;
memset(&attr, 0, sizeof(struct attr));
/* Origin attribute. */
attr.origin = origin;
attr.flag |= ATTR_FLAG_BIT(BGP_ATTR_ORIGIN);
/* AS path attribute. */
if (aspath)
attr.aspath = aspath_intern(aspath);
else
attr.aspath = aspath_empty();
attr.flag |= ATTR_FLAG_BIT(BGP_ATTR_AS_PATH);
/* Next hop attribute. */
attr.flag |= ATTR_FLAG_BIT(BGP_ATTR_NEXT_HOP);
if (community) {
uint32_t gshut = COMMUNITY_GSHUT;
/* If we are not shutting down ourselves and we are
* aggregating a route that contains the GSHUT community we
* need to remove that community when creating the aggregate */
if (!bgp_flag_check(bgp, BGP_FLAG_GRACEFUL_SHUTDOWN)
&& community_include(community, gshut)) {
community_del_val(community, &gshut);
}
attr.community = community;
attr.flag |= ATTR_FLAG_BIT(BGP_ATTR_COMMUNITIES);
}
if (ecommunity) {
attr.ecommunity = ecommunity;
attr.flag |= ATTR_FLAG_BIT(BGP_ATTR_EXT_COMMUNITIES);
}
if (lcommunity) {
attr.lcommunity = lcommunity;
attr.flag |= ATTR_FLAG_BIT(BGP_ATTR_LARGE_COMMUNITIES);
}
if (bgp_flag_check(bgp, BGP_FLAG_GRACEFUL_SHUTDOWN)) {
bgp_attr_add_gshut_community(&attr);
}
attr.label_index = BGP_INVALID_LABEL_INDEX;
attr.label = MPLS_INVALID_LABEL;
attr.weight = BGP_ATTR_DEFAULT_WEIGHT;
attr.mp_nexthop_len = IPV6_MAX_BYTELEN;
if (!as_set || atomic_aggregate)
attr.flag |= ATTR_FLAG_BIT(BGP_ATTR_ATOMIC_AGGREGATE);
attr.flag |= ATTR_FLAG_BIT(BGP_ATTR_AGGREGATOR);
if (CHECK_FLAG(bgp->config, BGP_CONFIG_CONFEDERATION))
attr.aggregator_as = bgp->confed_id;
else
attr.aggregator_as = bgp->as;
attr.aggregator_addr = bgp->router_id;
attr.label_index = BGP_INVALID_LABEL_INDEX;
attr.label = MPLS_INVALID_LABEL;
new = bgp_attr_intern(&attr);
aspath_unintern(&new->aspath);
return new;
}
/* Unintern just the sub-components of the attr, but not the attr */
void bgp_attr_unintern_sub(struct attr *attr)
{
/* aspath refcount shoud be decrement. */
if (attr->aspath)
aspath_unintern(&attr->aspath);
UNSET_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_AS_PATH));
if (attr->community)
community_unintern(&attr->community);
UNSET_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_COMMUNITIES));
if (attr->ecommunity)
ecommunity_unintern(&attr->ecommunity);
UNSET_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_EXT_COMMUNITIES));
if (attr->lcommunity)
lcommunity_unintern(&attr->lcommunity);
UNSET_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_LARGE_COMMUNITIES));
if (attr->cluster)
cluster_unintern(attr->cluster);
UNSET_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_CLUSTER_LIST));
if (attr->transit)
transit_unintern(attr->transit);
if (attr->encap_subtlvs)
encap_unintern(&attr->encap_subtlvs, ENCAP_SUBTLV_TYPE);
#if ENABLE_BGP_VNC
if (attr->vnc_subtlvs)
encap_unintern(&attr->vnc_subtlvs, VNC_SUBTLV_TYPE);
#endif
}
/*
* We have some show commands that let you experimentally
* apply a route-map. When we apply the route-map
* we are reseting values but not saving them for
* posterity via intern'ing( because route-maps don't
* do that) but at this point in time we need
* to compare the new attr to the old and if the
* routemap has changed it we need to, as Snoop Dog says,
* Drop it like it's hot
*/
void bgp_attr_undup(struct attr *new, struct attr *old)
{
if (new->aspath != old->aspath)
aspath_free(new->aspath);
if (new->community != old->community)
community_free(&new->community);
if (new->ecommunity != old->ecommunity)
ecommunity_free(&new->ecommunity);
if (new->lcommunity != old->lcommunity)
lcommunity_free(&new->lcommunity);
}
/* Free bgp attribute and aspath. */
void bgp_attr_unintern(struct attr **pattr)
{
struct attr *attr = *pattr;
struct attr *ret;
struct attr tmp;
/* Decrement attribute reference. */
attr->refcnt--;
tmp = *attr;
/* If reference becomes zero then free attribute object. */
if (attr->refcnt == 0) {
ret = hash_release(attrhash, attr);
assert(ret != NULL);
XFREE(MTYPE_ATTR, attr);
*pattr = NULL;
}
bgp_attr_unintern_sub(&tmp);
}
void bgp_attr_flush(struct attr *attr)
{
if (attr->aspath && !attr->aspath->refcnt) {
aspath_free(attr->aspath);
attr->aspath = NULL;
}
if (attr->community && !attr->community->refcnt)
community_free(&attr->community);
if (attr->ecommunity && !attr->ecommunity->refcnt)
ecommunity_free(&attr->ecommunity);
if (attr->lcommunity && !attr->lcommunity->refcnt)
lcommunity_free(&attr->lcommunity);
if (attr->cluster && !attr->cluster->refcnt) {
cluster_free(attr->cluster);
attr->cluster = NULL;
}
if (attr->transit && !attr->transit->refcnt) {
transit_free(attr->transit);
attr->transit = NULL;
}
if (attr->encap_subtlvs && !attr->encap_subtlvs->refcnt) {
encap_free(attr->encap_subtlvs);
attr->encap_subtlvs = NULL;
}
#if ENABLE_BGP_VNC
if (attr->vnc_subtlvs && !attr->vnc_subtlvs->refcnt) {
encap_free(attr->vnc_subtlvs);
attr->vnc_subtlvs = NULL;
}
#endif
}
/* Implement draft-scudder-idr-optional-transitive behaviour and
* avoid resetting sessions for malformed attributes which are
* are partial/optional and hence where the error likely was not
* introduced by the sending neighbour.
*/
static bgp_attr_parse_ret_t
bgp_attr_malformed(struct bgp_attr_parser_args *args, uint8_t subcode,
bgp_size_t length)
{
struct peer *const peer = args->peer;
const uint8_t flags = args->flags;
/* startp and length must be special-cased, as whether or not to
* send the attribute data with the NOTIFY depends on the error,
* the caller therefore signals this with the seperate length argument
*/
uint8_t *notify_datap = (length > 0 ? args->startp : NULL);
/* Only relax error handling for eBGP peers */
if (peer->sort != BGP_PEER_EBGP) {
bgp_notify_send_with_data(peer, BGP_NOTIFY_UPDATE_ERR, subcode,
notify_datap, length);
return BGP_ATTR_PARSE_ERROR;
}
/* Adjust the stream getp to the end of the attribute, in case we can
* still proceed but the caller hasn't read all the attribute.
*/
stream_set_getp(BGP_INPUT(peer),
(args->startp - STREAM_DATA(BGP_INPUT(peer)))
+ args->total);
switch (args->type) {
/* where an attribute is relatively inconsequential, e.g. it does not
* affect route selection, and can be safely ignored, then any such
* attributes which are malformed should just be ignored and the route
* processed as normal.
*/
case BGP_ATTR_AS4_AGGREGATOR:
case BGP_ATTR_AGGREGATOR:
case BGP_ATTR_ATOMIC_AGGREGATE:
return BGP_ATTR_PARSE_PROCEED;
/* Core attributes, particularly ones which may influence route
* selection, should always cause session resets
*/
case BGP_ATTR_ORIGIN:
case BGP_ATTR_AS_PATH:
case BGP_ATTR_NEXT_HOP:
case BGP_ATTR_MULTI_EXIT_DISC:
case BGP_ATTR_LOCAL_PREF:
case BGP_ATTR_COMMUNITIES:
case BGP_ATTR_ORIGINATOR_ID:
case BGP_ATTR_CLUSTER_LIST:
case BGP_ATTR_MP_REACH_NLRI:
case BGP_ATTR_MP_UNREACH_NLRI:
case BGP_ATTR_EXT_COMMUNITIES:
bgp_notify_send_with_data(peer, BGP_NOTIFY_UPDATE_ERR, subcode,
notify_datap, length);
return BGP_ATTR_PARSE_ERROR;
}
/* Partial optional attributes that are malformed should not cause
* the whole session to be reset. Instead treat it as a withdrawal
* of the routes, if possible.
*/
if (CHECK_FLAG(flags, BGP_ATTR_FLAG_TRANS)
&& CHECK_FLAG(flags, BGP_ATTR_FLAG_OPTIONAL)
&& CHECK_FLAG(flags, BGP_ATTR_FLAG_PARTIAL))
return BGP_ATTR_PARSE_WITHDRAW;
/* default to reset */
return BGP_ATTR_PARSE_ERROR_NOTIFYPLS;
}
/* Find out what is wrong with the path attribute flag bits and log the error.
"Flag bits" here stand for Optional, Transitive and Partial, but not for
Extended Length. Checking O/T/P bits at once implies, that the attribute
being diagnosed is defined by RFC as either a "well-known" or an "optional,
non-transitive" attribute. */
static void
bgp_attr_flags_diagnose(struct bgp_attr_parser_args *args,
uint8_t desired_flags /* how RFC says it must be */
)
{
uint8_t seen = 0, i;
uint8_t real_flags = args->flags;
const uint8_t attr_code = args->type;
desired_flags &= ~BGP_ATTR_FLAG_EXTLEN;
real_flags &= ~BGP_ATTR_FLAG_EXTLEN;
for (i = 0; i <= 2; i++) /* O,T,P, but not E */
if (CHECK_FLAG(desired_flags, attr_flag_str[i].key)
!= CHECK_FLAG(real_flags, attr_flag_str[i].key)) {
flog_err(EC_BGP_ATTR_FLAG,
"%s attribute must%s be flagged as \"%s\"",
lookup_msg(attr_str, attr_code, NULL),
CHECK_FLAG(desired_flags, attr_flag_str[i].key)
? ""
: " not",
attr_flag_str[i].str);
seen = 1;
}
if (!seen) {
zlog_debug(
"Strange, %s called for attr %s, but no problem found with flags"
" (real flags 0x%x, desired 0x%x)",
__func__, lookup_msg(attr_str, attr_code, NULL),
real_flags, desired_flags);
}
}
/* Required flags for attributes. EXTLEN will be masked off when testing,
* as will PARTIAL for optional+transitive attributes.
*/
const uint8_t attr_flags_values[] = {
[BGP_ATTR_ORIGIN] = BGP_ATTR_FLAG_TRANS,
[BGP_ATTR_AS_PATH] = BGP_ATTR_FLAG_TRANS,
[BGP_ATTR_NEXT_HOP] = BGP_ATTR_FLAG_TRANS,
[BGP_ATTR_MULTI_EXIT_DISC] = BGP_ATTR_FLAG_OPTIONAL,
[BGP_ATTR_LOCAL_PREF] = BGP_ATTR_FLAG_TRANS,
[BGP_ATTR_ATOMIC_AGGREGATE] = BGP_ATTR_FLAG_TRANS,
[BGP_ATTR_AGGREGATOR] = BGP_ATTR_FLAG_TRANS | BGP_ATTR_FLAG_OPTIONAL,
[BGP_ATTR_COMMUNITIES] = BGP_ATTR_FLAG_TRANS | BGP_ATTR_FLAG_OPTIONAL,
[BGP_ATTR_ORIGINATOR_ID] = BGP_ATTR_FLAG_OPTIONAL,
[BGP_ATTR_CLUSTER_LIST] = BGP_ATTR_FLAG_OPTIONAL,
[BGP_ATTR_MP_REACH_NLRI] = BGP_ATTR_FLAG_OPTIONAL,
[BGP_ATTR_MP_UNREACH_NLRI] = BGP_ATTR_FLAG_OPTIONAL,
[BGP_ATTR_EXT_COMMUNITIES] =
BGP_ATTR_FLAG_OPTIONAL | BGP_ATTR_FLAG_TRANS,
[BGP_ATTR_AS4_PATH] = BGP_ATTR_FLAG_OPTIONAL | BGP_ATTR_FLAG_TRANS,
[BGP_ATTR_AS4_AGGREGATOR] =
BGP_ATTR_FLAG_OPTIONAL | BGP_ATTR_FLAG_TRANS,
[BGP_ATTR_PMSI_TUNNEL] = BGP_ATTR_FLAG_OPTIONAL | BGP_ATTR_FLAG_TRANS,
[BGP_ATTR_LARGE_COMMUNITIES] =
BGP_ATTR_FLAG_OPTIONAL | BGP_ATTR_FLAG_TRANS,
[BGP_ATTR_PREFIX_SID] = BGP_ATTR_FLAG_OPTIONAL | BGP_ATTR_FLAG_TRANS,
};
static const size_t attr_flags_values_max = array_size(attr_flags_values) - 1;
static int bgp_attr_flag_invalid(struct bgp_attr_parser_args *args)
{
uint8_t mask = BGP_ATTR_FLAG_EXTLEN;
const uint8_t flags = args->flags;
const uint8_t attr_code = args->type;
/* there may be attributes we don't know about */
if (attr_code > attr_flags_values_max)
return 0;
if (attr_flags_values[attr_code] == 0)
return 0;
/* RFC4271, "For well-known attributes, the Transitive bit MUST be set
* to
* 1."
*/
if (!CHECK_FLAG(BGP_ATTR_FLAG_OPTIONAL, flags)
&& !CHECK_FLAG(BGP_ATTR_FLAG_TRANS, flags)) {
flog_err(
EC_BGP_ATTR_FLAG,
"%s well-known attributes must have transitive flag set (%x)",
lookup_msg(attr_str, attr_code, NULL), flags);
return 1;
}
/* "For well-known attributes and for optional non-transitive
* attributes,
* the Partial bit MUST be set to 0."
*/
if (CHECK_FLAG(flags, BGP_ATTR_FLAG_PARTIAL)) {
if (!CHECK_FLAG(flags, BGP_ATTR_FLAG_OPTIONAL)) {
flog_err(EC_BGP_ATTR_FLAG,
"%s well-known attribute "
"must NOT have the partial flag set (%x)",
lookup_msg(attr_str, attr_code, NULL), flags);
return 1;
}
if (CHECK_FLAG(flags, BGP_ATTR_FLAG_OPTIONAL)
&& !CHECK_FLAG(flags, BGP_ATTR_FLAG_TRANS)) {
flog_err(EC_BGP_ATTR_FLAG,
"%s optional + transitive attribute "
"must NOT have the partial flag set (%x)",
lookup_msg(attr_str, attr_code, NULL), flags);
return 1;
}
}
/* Optional transitive attributes may go through speakers that don't
* reocgnise them and set the Partial bit.
*/
if (CHECK_FLAG(flags, BGP_ATTR_FLAG_OPTIONAL)
&& CHECK_FLAG(flags, BGP_ATTR_FLAG_TRANS))
SET_FLAG(mask, BGP_ATTR_FLAG_PARTIAL);
if ((flags & ~mask) == attr_flags_values[attr_code])
return 0;
bgp_attr_flags_diagnose(args, attr_flags_values[attr_code]);
return 1;
}
/* Get origin attribute of the update message. */
static bgp_attr_parse_ret_t bgp_attr_origin(struct bgp_attr_parser_args *args)
{
struct peer *const peer = args->peer;
struct attr *const attr = args->attr;
const bgp_size_t length = args->length;
/* If any recognized attribute has Attribute Length that conflicts
with the expected length (based on the attribute type code), then
the Error Subcode is set to Attribute Length Error. The Data
field contains the erroneous attribute (type, length and
value). */
if (length != 1) {
flog_err(EC_BGP_ATTR_LEN,
"Origin attribute length is not one %d", length);
return bgp_attr_malformed(args, BGP_NOTIFY_UPDATE_ATTR_LENG_ERR,
args->total);
}
/* Fetch origin attribute. */
attr->origin = stream_getc(BGP_INPUT(peer));
/* If the ORIGIN attribute has an undefined value, then the Error
Subcode is set to Invalid Origin Attribute. The Data field
contains the unrecognized attribute (type, length and value). */
if ((attr->origin != BGP_ORIGIN_IGP) && (attr->origin != BGP_ORIGIN_EGP)
&& (attr->origin != BGP_ORIGIN_INCOMPLETE)) {
flog_err(EC_BGP_ATTR_ORIGIN,
"Origin attribute value is invalid %d", attr->origin);
return bgp_attr_malformed(args, BGP_NOTIFY_UPDATE_INVAL_ORIGIN,
args->total);
}
/* Set oring attribute flag. */
attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_ORIGIN);
return 0;
}
/* Parse AS path information. This function is wrapper of
aspath_parse. */
static int bgp_attr_aspath(struct bgp_attr_parser_args *args)
{
struct attr *const attr = args->attr;
struct peer *const peer = args->peer;
const bgp_size_t length = args->length;
/*
* peer with AS4 => will get 4Byte ASnums
* otherwise, will get 16 Bit
*/
attr->aspath = aspath_parse(peer->curr, length,
CHECK_FLAG(peer->cap, PEER_CAP_AS4_RCV));
/* In case of IBGP, length will be zero. */
if (!attr->aspath) {
flog_err(EC_BGP_ATTR_MAL_AS_PATH,
"Malformed AS path from %s, length is %d", peer->host,
length);
return bgp_attr_malformed(args, BGP_NOTIFY_UPDATE_MAL_AS_PATH,
0);
}
/* Set aspath attribute flag. */
attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_AS_PATH);
return BGP_ATTR_PARSE_PROCEED;
}
static bgp_attr_parse_ret_t bgp_attr_aspath_check(struct peer *const peer,
struct attr *const attr)
{
/* These checks were part of bgp_attr_aspath, but with
* as4 we should to check aspath things when
* aspath synthesizing with as4_path has already taken place.
* Otherwise we check ASPATH and use the synthesized thing, and that is
* not right.
* So do the checks later, i.e. here
*/
struct aspath *aspath;
/* Confederation sanity check. */
if ((peer->sort == BGP_PEER_CONFED
&& !aspath_left_confed_check(attr->aspath))
|| (peer->sort == BGP_PEER_EBGP
&& aspath_confed_check(attr->aspath))) {
flog_err(EC_BGP_ATTR_MAL_AS_PATH, "Malformed AS path from %s",
peer->host);
bgp_notify_send(peer, BGP_NOTIFY_UPDATE_ERR,
BGP_NOTIFY_UPDATE_MAL_AS_PATH);
return BGP_ATTR_PARSE_ERROR;
}
/* First AS check for EBGP. */
if (CHECK_FLAG(peer->flags, PEER_FLAG_ENFORCE_FIRST_AS)) {
if (peer->sort == BGP_PEER_EBGP
&& !aspath_firstas_check(attr->aspath, peer->as)) {
flog_err(EC_BGP_ATTR_FIRST_AS,
"%s incorrect first AS (must be %u)",
peer->host, peer->as);
bgp_notify_send(peer, BGP_NOTIFY_UPDATE_ERR,
BGP_NOTIFY_UPDATE_MAL_AS_PATH);
return BGP_ATTR_PARSE_ERROR;
}
}
/* local-as prepend */
if (peer->change_local_as
&& !CHECK_FLAG(peer->flags, PEER_FLAG_LOCAL_AS_NO_PREPEND)) {
aspath = aspath_dup(attr->aspath);
aspath = aspath_add_seq(aspath, peer->change_local_as);
aspath_unintern(&attr->aspath);
attr->aspath = aspath_intern(aspath);
}
return BGP_ATTR_PARSE_PROCEED;
}
/* Parse AS4 path information. This function is another wrapper of
aspath_parse. */
static int bgp_attr_as4_path(struct bgp_attr_parser_args *args,
struct aspath **as4_path)
{
struct peer *const peer = args->peer;
struct attr *const attr = args->attr;
const bgp_size_t length = args->length;
*as4_path = aspath_parse(peer->curr, length, 1);
/* In case of IBGP, length will be zero. */
if (!*as4_path) {
flog_err(EC_BGP_ATTR_MAL_AS_PATH,
"Malformed AS4 path from %s, length is %d", peer->host,
length);
return bgp_attr_malformed(args, BGP_NOTIFY_UPDATE_MAL_AS_PATH,
0);
}
/* Set aspath attribute flag. */
attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_AS4_PATH);
return BGP_ATTR_PARSE_PROCEED;
}
/* Nexthop attribute. */
static bgp_attr_parse_ret_t bgp_attr_nexthop(struct bgp_attr_parser_args *args)
{
struct peer *const peer = args->peer;
struct attr *const attr = args->attr;
const bgp_size_t length = args->length;
in_addr_t nexthop_h, nexthop_n;
/* Check nexthop attribute length. */
if (length != 4) {
flog_err(EC_BGP_ATTR_LEN,
"Nexthop attribute length isn't four [%d]", length);
return bgp_attr_malformed(args, BGP_NOTIFY_UPDATE_ATTR_LENG_ERR,
args->total);
}
/* According to section 6.3 of RFC4271, syntactically incorrect NEXT_HOP
attribute must result in a NOTIFICATION message (this is implemented
below).
At the same time, semantically incorrect NEXT_HOP is more likely to
be just
logged locally (this is implemented somewhere else). The UPDATE
message
gets ignored in any of these cases. */
nexthop_n = stream_get_ipv4(peer->curr);
nexthop_h = ntohl(nexthop_n);
if ((IPV4_NET0(nexthop_h) || IPV4_NET127(nexthop_h)
|| IPV4_CLASS_DE(nexthop_h))
&& !BGP_DEBUG(
allow_martians,
ALLOW_MARTIANS)) /* loopbacks may be used in testing */
{
char buf[INET_ADDRSTRLEN];
inet_ntop(AF_INET, &nexthop_n, buf, INET_ADDRSTRLEN);
flog_err(EC_BGP_ATTR_MARTIAN_NH, "Martian nexthop %s", buf);
return bgp_attr_malformed(
args, BGP_NOTIFY_UPDATE_INVAL_NEXT_HOP, args->total);
}
attr->nexthop.s_addr = nexthop_n;
attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_NEXT_HOP);
return BGP_ATTR_PARSE_PROCEED;
}
/* MED atrribute. */
static bgp_attr_parse_ret_t bgp_attr_med(struct bgp_attr_parser_args *args)
{
struct peer *const peer = args->peer;
struct attr *const attr = args->attr;
const bgp_size_t length = args->length;
/* Length check. */
if (length != 4) {
flog_err(EC_BGP_ATTR_LEN,
"MED attribute length isn't four [%d]", length);
return bgp_attr_malformed(args, BGP_NOTIFY_UPDATE_ATTR_LENG_ERR,
args->total);
}
attr->med = stream_getl(peer->curr);
attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_MULTI_EXIT_DISC);
return BGP_ATTR_PARSE_PROCEED;
}
/* Local preference attribute. */
static bgp_attr_parse_ret_t
bgp_attr_local_pref(struct bgp_attr_parser_args *args)
{
struct peer *const peer = args->peer;
struct attr *const attr = args->attr;
const bgp_size_t length = args->length;
/* Length check. */
if (length != 4) {
flog_err(EC_BGP_ATTR_LEN,
"LOCAL_PREF attribute length isn't 4 [%u]", length);
return bgp_attr_malformed(args, BGP_NOTIFY_UPDATE_ATTR_LENG_ERR,
args->total);
}
/* If it is contained in an UPDATE message that is received from an
external peer, then this attribute MUST be ignored by the
receiving speaker. */
if (peer->sort == BGP_PEER_EBGP) {
stream_forward_getp(peer->curr, length);
return BGP_ATTR_PARSE_PROCEED;
}
attr->local_pref = stream_getl(peer->curr);
/* Set the local-pref flag. */
attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_LOCAL_PREF);
return BGP_ATTR_PARSE_PROCEED;
}
/* Atomic aggregate. */
static int bgp_attr_atomic(struct bgp_attr_parser_args *args)
{
struct attr *const attr = args->attr;
const bgp_size_t length = args->length;
/* Length check. */
if (length != 0) {
flog_err(EC_BGP_ATTR_LEN,
"ATOMIC_AGGREGATE attribute length isn't 0 [%u]",
length);
return bgp_attr_malformed(args, BGP_NOTIFY_UPDATE_ATTR_LENG_ERR,
args->total);
}
/* Set atomic aggregate flag. */
attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_ATOMIC_AGGREGATE);
return BGP_ATTR_PARSE_PROCEED;
}
/* Aggregator attribute */
static int bgp_attr_aggregator(struct bgp_attr_parser_args *args)
{
struct peer *const peer = args->peer;
struct attr *const attr = args->attr;
const bgp_size_t length = args->length;
int wantedlen = 6;
/* peer with AS4 will send 4 Byte AS, peer without will send 2 Byte */
if (CHECK_FLAG(peer->cap, PEER_CAP_AS4_RCV))
wantedlen = 8;
if (length != wantedlen) {
flog_err(EC_BGP_ATTR_LEN,
"AGGREGATOR attribute length isn't %u [%u]", wantedlen,
length);
return bgp_attr_malformed(args, BGP_NOTIFY_UPDATE_ATTR_LENG_ERR,
args->total);
}
if (CHECK_FLAG(peer->cap, PEER_CAP_AS4_RCV))
attr->aggregator_as = stream_getl(peer->curr);
else
attr->aggregator_as = stream_getw(peer->curr);
attr->aggregator_addr.s_addr = stream_get_ipv4(peer->curr);
/* Set atomic aggregate flag. */
attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_AGGREGATOR);
return BGP_ATTR_PARSE_PROCEED;
}
/* New Aggregator attribute */
static bgp_attr_parse_ret_t
bgp_attr_as4_aggregator(struct bgp_attr_parser_args *args,
as_t *as4_aggregator_as,
struct in_addr *as4_aggregator_addr)
{
struct peer *const peer = args->peer;
struct attr *const attr = args->attr;
const bgp_size_t length = args->length;
if (length != 8) {
flog_err(EC_BGP_ATTR_LEN, "New Aggregator length is not 8 [%d]",
length);
return bgp_attr_malformed(args, BGP_NOTIFY_UPDATE_ATTR_LENG_ERR,
0);
}
*as4_aggregator_as = stream_getl(peer->curr);
as4_aggregator_addr->s_addr = stream_get_ipv4(peer->curr);
attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_AS4_AGGREGATOR);
return BGP_ATTR_PARSE_PROCEED;
}
/* Munge Aggregator and New-Aggregator, AS_PATH and NEW_AS_PATH.
*/
static bgp_attr_parse_ret_t
bgp_attr_munge_as4_attrs(struct peer *const peer, struct attr *const attr,
struct aspath *as4_path, as_t as4_aggregator,
struct in_addr *as4_aggregator_addr)
{
int ignore_as4_path = 0;
struct aspath *newpath;
if (!attr->aspath) {
/* NULL aspath shouldn't be possible as bgp_attr_parse should
* have
* checked that all well-known, mandatory attributes were
* present.
*
* Can only be a problem with peer itself - hard error
*/
return BGP_ATTR_PARSE_ERROR;
}
if (CHECK_FLAG(peer->cap, PEER_CAP_AS4_RCV)) {
/* peer can do AS4, so we ignore AS4_PATH and AS4_AGGREGATOR
* if given.
* It is worth a warning though, because the peer really
* should not send them
*/
if (BGP_DEBUG(as4, AS4)) {
if (attr->flag & (ATTR_FLAG_BIT(BGP_ATTR_AS4_PATH)))
zlog_debug("[AS4] %s %s AS4_PATH", peer->host,
"AS4 capable peer, yet it sent");
if (attr->flag
& (ATTR_FLAG_BIT(BGP_ATTR_AS4_AGGREGATOR)))
zlog_debug("[AS4] %s %s AS4_AGGREGATOR",
peer->host,
"AS4 capable peer, yet it sent");
}
return BGP_ATTR_PARSE_PROCEED;
}
/* We have a asn16 peer. First, look for AS4_AGGREGATOR
* because that may override AS4_PATH
*/
if (attr->flag & (ATTR_FLAG_BIT(BGP_ATTR_AS4_AGGREGATOR))) {
if (attr->flag & (ATTR_FLAG_BIT(BGP_ATTR_AGGREGATOR))) {
/* received both.
* if the as_number in aggregator is not AS_TRANS,
* then AS4_AGGREGATOR and AS4_PATH shall be ignored
* and the Aggregator shall be taken as
* info on the aggregating node, and the AS_PATH
* shall be taken as the AS_PATH
* otherwise
* the Aggregator shall be ignored and the
* AS4_AGGREGATOR shall be taken as the
* Aggregating node and the AS_PATH is to be
* constructed "as in all other cases"
*/
if (attr->aggregator_as != BGP_AS_TRANS) {
/* ignore */
if (BGP_DEBUG(as4, AS4))
zlog_debug(
"[AS4] %s BGP not AS4 capable peer"
" send AGGREGATOR != AS_TRANS and"
" AS4_AGGREGATOR, so ignore"
" AS4_AGGREGATOR and AS4_PATH",
peer->host);
ignore_as4_path = 1;
} else {
/* "New_aggregator shall be taken as aggregator"
*/
attr->aggregator_as = as4_aggregator;
attr->aggregator_addr.s_addr =
as4_aggregator_addr->s_addr;
}
} else {
/* We received a AS4_AGGREGATOR but no AGGREGATOR.
* That is bogus - but reading the conditions
* we have to handle AS4_AGGREGATOR as if it were
* AGGREGATOR in that case
*/
if (BGP_DEBUG(as4, AS4))
zlog_debug(
"[AS4] %s BGP not AS4 capable peer send"
" AS4_AGGREGATOR but no AGGREGATOR, will take"
" it as if AGGREGATOR with AS_TRANS had been there",
peer->host);
attr->aggregator_as = as4_aggregator;
/* sweep it under the carpet and simulate a "good"
* AGGREGATOR */
attr->flag |= (ATTR_FLAG_BIT(BGP_ATTR_AGGREGATOR));
}
}
/* need to reconcile NEW_AS_PATH and AS_PATH */
if (!ignore_as4_path
&& (attr->flag & (ATTR_FLAG_BIT(BGP_ATTR_AS4_PATH)))) {
newpath = aspath_reconcile_as4(attr->aspath, as4_path);
if (!newpath)
return BGP_ATTR_PARSE_ERROR;
aspath_unintern(&attr->aspath);
attr->aspath = aspath_intern(newpath);
}
return BGP_ATTR_PARSE_PROCEED;
}
/* Community attribute. */
static bgp_attr_parse_ret_t
bgp_attr_community(struct bgp_attr_parser_args *args)
{
struct peer *const peer = args->peer;
struct attr *const attr = args->attr;
const bgp_size_t length = args->length;
if (length == 0) {
attr->community = NULL;
return BGP_ATTR_PARSE_PROCEED;
}
attr->community =
community_parse((uint32_t *)stream_pnt(peer->curr), length);
/* XXX: fix community_parse to use stream API and remove this */
stream_forward_getp(peer->curr, length);
if (!attr->community)
return bgp_attr_malformed(args, BGP_NOTIFY_UPDATE_OPT_ATTR_ERR,
args->total);
attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_COMMUNITIES);
return BGP_ATTR_PARSE_PROCEED;
}
/* Originator ID attribute. */
static bgp_attr_parse_ret_t
bgp_attr_originator_id(struct bgp_attr_parser_args *args)
{
struct peer *const peer = args->peer;
struct attr *const attr = args->attr;
const bgp_size_t length = args->length;
/* Length check. */
if (length != 4) {
flog_err(EC_BGP_ATTR_LEN, "Bad originator ID length %d",
length);
return bgp_attr_malformed(args, BGP_NOTIFY_UPDATE_ATTR_LENG_ERR,
args->total);
}
attr->originator_id.s_addr = stream_get_ipv4(peer->curr);
attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_ORIGINATOR_ID);
return BGP_ATTR_PARSE_PROCEED;
}
/* Cluster list attribute. */
static bgp_attr_parse_ret_t
bgp_attr_cluster_list(struct bgp_attr_parser_args *args)
{
struct peer *const peer = args->peer;
struct attr *const attr = args->attr;
const bgp_size_t length = args->length;
/* Check length. */
if (length % 4) {
flog_err(EC_BGP_ATTR_LEN, "Bad cluster list length %d", length);
return bgp_attr_malformed(args, BGP_NOTIFY_UPDATE_ATTR_LENG_ERR,
args->total);
}
attr->cluster =
cluster_parse((struct in_addr *)stream_pnt(peer->curr), length);
/* XXX: Fix cluster_parse to use stream API and then remove this */
stream_forward_getp(peer->curr, length);
attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_CLUSTER_LIST);
return BGP_ATTR_PARSE_PROCEED;
}
/* Multiprotocol reachability information parse. */
int bgp_mp_reach_parse(struct bgp_attr_parser_args *args,
struct bgp_nlri *mp_update)
{
iana_afi_t pkt_afi;
afi_t afi;
iana_safi_t pkt_safi;
safi_t safi;
bgp_size_t nlri_len;
size_t start;
struct stream *s;
struct peer *const peer = args->peer;
struct attr *const attr = args->attr;
const bgp_size_t length = args->length;
/* Set end of packet. */
s = BGP_INPUT(peer);
start = stream_get_getp(s);
/* safe to read statically sized header? */
#define BGP_MP_REACH_MIN_SIZE 5
#define LEN_LEFT (length - (stream_get_getp(s) - start))
if ((length > STREAM_READABLE(s)) || (length < BGP_MP_REACH_MIN_SIZE)) {
zlog_info("%s: %s sent invalid length, %lu", __func__,
peer->host, (unsigned long)length);
return BGP_ATTR_PARSE_ERROR_NOTIFYPLS;
}
/* Load AFI, SAFI. */
pkt_afi = stream_getw(s);
pkt_safi = stream_getc(s);
/* Convert AFI, SAFI to internal values, check. */
if (bgp_map_afi_safi_iana2int(pkt_afi, pkt_safi, &afi, &safi)) {
/* Log if AFI or SAFI is unrecognized. This is not an error
* unless
* the attribute is otherwise malformed.
*/
if (bgp_debug_update(peer, NULL, NULL, 0))
zlog_debug(
"%s: MP_REACH received AFI %u or SAFI %u is unrecognized",
peer->host, pkt_afi, pkt_safi);
return BGP_ATTR_PARSE_ERROR;
}
/* Get nexthop length. */
attr->mp_nexthop_len = stream_getc(s);
if (LEN_LEFT < attr->mp_nexthop_len) {
zlog_info(
"%s: %s, MP nexthop length, %u, goes past end of attribute",
__func__, peer->host, attr->mp_nexthop_len);
return BGP_ATTR_PARSE_ERROR_NOTIFYPLS;
}
/* Nexthop length check. */
switch (attr->mp_nexthop_len) {
case 0:
if (safi != SAFI_FLOWSPEC) {
zlog_info("%s: (%s) Wrong multiprotocol next hop length: %d",
__func__, peer->host, attr->mp_nexthop_len);
return BGP_ATTR_PARSE_ERROR_NOTIFYPLS;
}
break;
case BGP_ATTR_NHLEN_VPNV4:
stream_getl(s); /* RD high */
stream_getl(s); /* RD low */
/*
* NOTE: intentional fall through
* - for consistency in rx processing
*
* The following comment is to signal GCC this intention
* and suppress the warning
*/
/* FALLTHRU */
case BGP_ATTR_NHLEN_IPV4:
stream_get(&attr->mp_nexthop_global_in, s, IPV4_MAX_BYTELEN);
/* Probably needed for RFC 2283 */
if (attr->nexthop.s_addr == 0)
memcpy(&attr->nexthop.s_addr,
&attr->mp_nexthop_global_in, IPV4_MAX_BYTELEN);
break;
case BGP_ATTR_NHLEN_IPV6_GLOBAL:
case BGP_ATTR_NHLEN_VPNV6_GLOBAL:
if (attr->mp_nexthop_len == BGP_ATTR_NHLEN_VPNV6_GLOBAL) {
stream_getl(s); /* RD high */
stream_getl(s); /* RD low */
}
stream_get(&attr->mp_nexthop_global, s, IPV6_MAX_BYTELEN);
if (IN6_IS_ADDR_LINKLOCAL(&attr->mp_nexthop_global)) {
if (!peer->nexthop.ifp) {
zlog_warn("%s: interface not set appropriately to handle some attributes",
peer->host);
return BGP_ATTR_PARSE_WITHDRAW;
}
attr->nh_ifindex = peer->nexthop.ifp->ifindex;
}
break;
case BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL:
case BGP_ATTR_NHLEN_VPNV6_GLOBAL_AND_LL:
if (attr->mp_nexthop_len
== BGP_ATTR_NHLEN_VPNV6_GLOBAL_AND_LL) {
stream_getl(s); /* RD high */
stream_getl(s); /* RD low */
}
stream_get(&attr->mp_nexthop_global, s, IPV6_MAX_BYTELEN);
if (IN6_IS_ADDR_LINKLOCAL(&attr->mp_nexthop_global)) {
if (!peer->nexthop.ifp) {
zlog_warn("%s: interface not set appropriately to handle some attributes",
peer->host);
return BGP_ATTR_PARSE_WITHDRAW;
}
attr->nh_ifindex = peer->nexthop.ifp->ifindex;
}
if (attr->mp_nexthop_len
== BGP_ATTR_NHLEN_VPNV6_GLOBAL_AND_LL) {
stream_getl(s); /* RD high */
stream_getl(s); /* RD low */
}
stream_get(&attr->mp_nexthop_local, s, IPV6_MAX_BYTELEN);
if (!IN6_IS_ADDR_LINKLOCAL(&attr->mp_nexthop_local)) {
char buf1[INET6_ADDRSTRLEN];
char buf2[INET6_ADDRSTRLEN];
if (bgp_debug_update(peer, NULL, NULL, 1))
zlog_debug(
"%s rcvd nexthops %s, %s -- ignoring non-LL value",
peer->host,
inet_ntop(AF_INET6,
&attr->mp_nexthop_global,
buf1, INET6_ADDRSTRLEN),
inet_ntop(AF_INET6,
&attr->mp_nexthop_local, buf2,
INET6_ADDRSTRLEN));
attr->mp_nexthop_len = IPV6_MAX_BYTELEN;
}
if (!peer->nexthop.ifp) {
zlog_warn("%s: Interface not set appropriately to handle this some attributes",
peer->host);
return BGP_ATTR_PARSE_WITHDRAW;
}
attr->nh_lla_ifindex = peer->nexthop.ifp->ifindex;
break;
default:
zlog_info("%s: (%s) Wrong multiprotocol next hop length: %d",
__func__, peer->host, attr->mp_nexthop_len);
return BGP_ATTR_PARSE_ERROR_NOTIFYPLS;
}
if (!LEN_LEFT) {
zlog_info("%s: (%s) Failed to read SNPA and NLRI(s)", __func__,
peer->host);
return BGP_ATTR_PARSE_ERROR_NOTIFYPLS;
}
{
uint8_t val;
if ((val = stream_getc(s)))
flog_warn(
EC_BGP_DEFUNCT_SNPA_LEN,
"%s sent non-zero value, %u, for defunct SNPA-length field",
peer->host, val);
}
/* must have nrli_len, what is left of the attribute */
nlri_len = LEN_LEFT;
if (nlri_len > STREAM_READABLE(s)) {
zlog_info("%s: (%s) Failed to read NLRI", __func__, peer->host);
return BGP_ATTR_PARSE_ERROR_NOTIFYPLS;
}
if (!nlri_len) {
zlog_info("%s: (%s) No Reachability, Treating as a EOR marker",
__func__, peer->host);
mp_update->afi = afi;
mp_update->safi = safi;
return BGP_ATTR_PARSE_EOR;
}
mp_update->afi = afi;
mp_update->safi = safi;
mp_update->nlri = stream_pnt(s);
mp_update->length = nlri_len;
stream_forward_getp(s, nlri_len);
attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_MP_REACH_NLRI);
return BGP_ATTR_PARSE_PROCEED;
#undef LEN_LEFT
}
/* Multiprotocol unreachable parse */
int bgp_mp_unreach_parse(struct bgp_attr_parser_args *args,
struct bgp_nlri *mp_withdraw)
{
struct stream *s;
iana_afi_t pkt_afi;
afi_t afi;
iana_safi_t pkt_safi;
safi_t safi;
uint16_t withdraw_len;
struct peer *const peer = args->peer;
struct attr *const attr = args->attr;
const bgp_size_t length = args->length;
s = peer->curr;
#define BGP_MP_UNREACH_MIN_SIZE 3
if ((length > STREAM_READABLE(s)) || (length < BGP_MP_UNREACH_MIN_SIZE))
return BGP_ATTR_PARSE_ERROR_NOTIFYPLS;
pkt_afi = stream_getw(s);
pkt_safi = stream_getc(s);
/* Convert AFI, SAFI to internal values, check. */
if (bgp_map_afi_safi_iana2int(pkt_afi, pkt_safi, &afi, &safi)) {
/* Log if AFI or SAFI is unrecognized. This is not an error
* unless
* the attribute is otherwise malformed.
*/
if (bgp_debug_update(peer, NULL, NULL, 0))
zlog_debug(
"%s: MP_UNREACH received AFI %u or SAFI %u is unrecognized",
peer->host, pkt_afi, pkt_safi);
return BGP_ATTR_PARSE_ERROR;
}
withdraw_len = length - BGP_MP_UNREACH_MIN_SIZE;
mp_withdraw->afi = afi;
mp_withdraw->safi = safi;
mp_withdraw->nlri = stream_pnt(s);
mp_withdraw->length = withdraw_len;
stream_forward_getp(s, withdraw_len);
attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_MP_UNREACH_NLRI);
return BGP_ATTR_PARSE_PROCEED;
}
/* Large Community attribute. */
static bgp_attr_parse_ret_t
bgp_attr_large_community(struct bgp_attr_parser_args *args)
{
struct peer *const peer = args->peer;
struct attr *const attr = args->attr;
const bgp_size_t length = args->length;
/*
* Large community follows new attribute format.
*/
if (length == 0) {
attr->lcommunity = NULL;
/* Empty extcomm doesn't seem to be invalid per se */
return BGP_ATTR_PARSE_PROCEED;
}
attr->lcommunity =
lcommunity_parse((uint8_t *)stream_pnt(peer->curr), length);
/* XXX: fix ecommunity_parse to use stream API */
stream_forward_getp(peer->curr, length);
if (!attr->lcommunity)
return bgp_attr_malformed(args, BGP_NOTIFY_UPDATE_OPT_ATTR_ERR,
args->total);
attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_LARGE_COMMUNITIES);
return BGP_ATTR_PARSE_PROCEED;
}
/* Extended Community attribute. */
static bgp_attr_parse_ret_t
bgp_attr_ext_communities(struct bgp_attr_parser_args *args)
{
struct peer *const peer = args->peer;
struct attr *const attr = args->attr;
const bgp_size_t length = args->length;
uint8_t sticky = 0;
if (length == 0) {
attr->ecommunity = NULL;
/* Empty extcomm doesn't seem to be invalid per se */
return BGP_ATTR_PARSE_PROCEED;
}
attr->ecommunity =
ecommunity_parse((uint8_t *)stream_pnt(peer->curr), length);
/* XXX: fix ecommunity_parse to use stream API */
stream_forward_getp(peer->curr, length);
if (!attr->ecommunity)
return bgp_attr_malformed(args, BGP_NOTIFY_UPDATE_OPT_ATTR_ERR,
args->total);
attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_EXT_COMMUNITIES);
/* Extract MAC mobility sequence number, if any. */
attr->mm_seqnum = bgp_attr_mac_mobility_seqnum(attr, &sticky);
attr->sticky = sticky;
/* Check if this is a Gateway MAC-IP advertisement */
attr->default_gw = bgp_attr_default_gw(attr);
/* Handle scenario where router flag ecommunity is not
* set but default gw ext community is present.
* Use default gateway, set and propogate R-bit.
*/
if (attr->default_gw)
attr->router_flag = 1;
/* Check EVPN Neighbor advertisement flags, R-bit */
bgp_attr_evpn_na_flag(attr, &attr->router_flag);
/* Extract the Rmac, if any */
bgp_attr_rmac(attr, &attr->rmac);
return BGP_ATTR_PARSE_PROCEED;
}
/* Parse Tunnel Encap attribute in an UPDATE */
static int bgp_attr_encap(uint8_t type, struct peer *peer, /* IN */
bgp_size_t length, /* IN: attr's length field */
struct attr *attr, /* IN: caller already allocated */
uint8_t flag, /* IN: attr's flags field */
uint8_t *startp)
{
bgp_size_t total;
uint16_t tunneltype = 0;
total = length + (CHECK_FLAG(flag, BGP_ATTR_FLAG_EXTLEN) ? 4 : 3);
if (!CHECK_FLAG(flag, BGP_ATTR_FLAG_TRANS)
|| !CHECK_FLAG(flag, BGP_ATTR_FLAG_OPTIONAL)) {
zlog_info(
"Tunnel Encap attribute flag isn't optional and transitive %d",
flag);
bgp_notify_send_with_data(peer, BGP_NOTIFY_UPDATE_ERR,
BGP_NOTIFY_UPDATE_ATTR_FLAG_ERR,
startp, total);
return -1;
}
if (BGP_ATTR_ENCAP == type) {
/* read outer TLV type and length */
uint16_t tlv_length;
if (length < 4) {
zlog_info(
"Tunnel Encap attribute not long enough to contain outer T,L");
bgp_notify_send_with_data(
peer, BGP_NOTIFY_UPDATE_ERR,
BGP_NOTIFY_UPDATE_OPT_ATTR_ERR, startp, total);
return -1;
}
tunneltype = stream_getw(BGP_INPUT(peer));
tlv_length = stream_getw(BGP_INPUT(peer));
length -= 4;
if (tlv_length != length) {
zlog_info("%s: tlv_length(%d) != length(%d)", __func__,
tlv_length, length);
}
}
while (length >= 4) {
uint16_t subtype = 0;
uint16_t sublength = 0;
struct bgp_attr_encap_subtlv *tlv;
if (BGP_ATTR_ENCAP == type) {
subtype = stream_getc(BGP_INPUT(peer));
sublength = stream_getc(BGP_INPUT(peer));
length -= 2;
#if ENABLE_BGP_VNC
} else {
subtype = stream_getw(BGP_INPUT(peer));
sublength = stream_getw(BGP_INPUT(peer));
length -= 4;
#endif
}
if (sublength > length) {
zlog_info(
"Tunnel Encap attribute sub-tlv length %d exceeds remaining length %d",
sublength, length);
bgp_notify_send_with_data(
peer, BGP_NOTIFY_UPDATE_ERR,
BGP_NOTIFY_UPDATE_OPT_ATTR_ERR, startp, total);
return -1;
}
/* alloc and copy sub-tlv */
/* TBD make sure these are freed when attributes are released */
tlv = XCALLOC(MTYPE_ENCAP_TLV,
sizeof(struct bgp_attr_encap_subtlv) + sublength);
tlv->type = subtype;
tlv->length = sublength;
stream_get(tlv->value, peer->curr, sublength);
length -= sublength;
/* attach tlv to encap chain */
if (BGP_ATTR_ENCAP == type) {
struct bgp_attr_encap_subtlv *stlv_last;
for (stlv_last = attr->encap_subtlvs;
stlv_last && stlv_last->next;
stlv_last = stlv_last->next)
;
if (stlv_last) {
stlv_last->next = tlv;
} else {
attr->encap_subtlvs = tlv;
}
#if ENABLE_BGP_VNC
} else {
struct bgp_attr_encap_subtlv *stlv_last;
for (stlv_last = attr->vnc_subtlvs;
stlv_last && stlv_last->next;
stlv_last = stlv_last->next)
;
if (stlv_last) {
stlv_last->next = tlv;
} else {
attr->vnc_subtlvs = tlv;
}
#endif
}
}
if (BGP_ATTR_ENCAP == type) {
attr->encap_tunneltype = tunneltype;
}
if (length) {
/* spurious leftover data */
zlog_info(
"Tunnel Encap attribute length is bad: %d leftover octets",
length);
bgp_notify_send_with_data(peer, BGP_NOTIFY_UPDATE_ERR,
BGP_NOTIFY_UPDATE_OPT_ATTR_ERR,
startp, total);
return -1;
}
return 0;
}
/*
* Read an individual SID value returning how much data we have read
* Returns 0 if there was an error that needs to be passed up the stack
*/
static bgp_attr_parse_ret_t bgp_attr_psid_sub(int32_t type,
int32_t length,
struct bgp_attr_parser_args *args,
struct bgp_nlri *mp_update)
{
struct peer *const peer = args->peer;
struct attr *const attr = args->attr;
uint32_t label_index;
struct in6_addr ipv6_sid;
uint32_t srgb_base;
uint32_t srgb_range;
int srgb_count;
if (type == BGP_PREFIX_SID_LABEL_INDEX) {
if (length != BGP_PREFIX_SID_LABEL_INDEX_LENGTH) {
flog_err(
EC_BGP_ATTR_LEN,
"Prefix SID label index length is %d instead of %d",
length, BGP_PREFIX_SID_LABEL_INDEX_LENGTH);
return bgp_attr_malformed(args,
BGP_NOTIFY_UPDATE_ATTR_LENG_ERR,
args->total);
}
/* Ignore flags and reserved */
stream_getc(peer->curr);
stream_getw(peer->curr);
/* Fetch the label index and see if it is valid. */
label_index = stream_getl(peer->curr);
if (label_index == BGP_INVALID_LABEL_INDEX)
return bgp_attr_malformed(args, BGP_NOTIFY_UPDATE_OPT_ATTR_ERR,
args->total);
/* Store label index; subsequently, we'll check on
* address-family */
attr->label_index = label_index;
/*
* Ignore the Label index attribute unless received for
* labeled-unicast
* SAFI.
*/
if (!mp_update->length
|| mp_update->safi != SAFI_LABELED_UNICAST)
attr->label_index = BGP_INVALID_LABEL_INDEX;
}
/* Placeholder code for the IPv6 SID type */
else if (type == BGP_PREFIX_SID_IPV6) {
if (length != BGP_PREFIX_SID_IPV6_LENGTH) {
flog_err(EC_BGP_ATTR_LEN,
"Prefix SID IPv6 length is %d instead of %d",
length, BGP_PREFIX_SID_IPV6_LENGTH);
return bgp_attr_malformed(args,
BGP_NOTIFY_UPDATE_ATTR_LENG_ERR,
args->total);
}
/* Ignore reserved */
stream_getc(peer->curr);
stream_getw(peer->curr);
stream_get(&ipv6_sid, peer->curr, 16);
}
/* Placeholder code for the Originator SRGB type */
else if (type == BGP_PREFIX_SID_ORIGINATOR_SRGB) {
/* Ignore flags */
stream_getw(peer->curr);
length -= 2;
if (length % BGP_PREFIX_SID_ORIGINATOR_SRGB_LENGTH) {
flog_err(
EC_BGP_ATTR_LEN,
"Prefix SID Originator SRGB length is %d, it must be a multiple of %d ",
length, BGP_PREFIX_SID_ORIGINATOR_SRGB_LENGTH);
return bgp_attr_malformed(
args, BGP_NOTIFY_UPDATE_ATTR_LENG_ERR,
args->total);
}
srgb_count = length / BGP_PREFIX_SID_ORIGINATOR_SRGB_LENGTH;
for (int i = 0; i < srgb_count; i++) {
stream_get(&srgb_base, peer->curr, 3);
stream_get(&srgb_range, peer->curr, 3);
}
}
return BGP_ATTR_PARSE_PROCEED;
}
/* Prefix SID attribute
* draft-ietf-idr-bgp-prefix-sid-05
*/
bgp_attr_parse_ret_t
bgp_attr_prefix_sid(int32_t tlength, struct bgp_attr_parser_args *args,
struct bgp_nlri *mp_update)
{
struct peer *const peer = args->peer;
struct attr *const attr = args->attr;
bgp_attr_parse_ret_t ret;
attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_PREFIX_SID);
while (tlength) {
int32_t type, length;
type = stream_getc(peer->curr);
length = stream_getw(peer->curr);
ret = bgp_attr_psid_sub(type, length, args, mp_update);
if (ret != BGP_ATTR_PARSE_PROCEED)
return ret;
/*
* Subtract length + the T and the L
* since length is the Vector portion
*/
tlength -= length + 3;
if (tlength < 0) {
flog_err(
EC_BGP_ATTR_LEN,
"Prefix SID internal length %d causes us to read beyond the total Prefix SID length",
length);
return bgp_attr_malformed(args,
BGP_NOTIFY_UPDATE_ATTR_LENG_ERR,
args->total);
}
}
return BGP_ATTR_PARSE_PROCEED;
}
/* PMSI tunnel attribute (RFC 6514)
* Basic validation checks done here.
*/
static bgp_attr_parse_ret_t
bgp_attr_pmsi_tunnel(struct bgp_attr_parser_args *args)
{
struct peer *const peer = args->peer;
struct attr *const attr = args->attr;
const bgp_size_t length = args->length;
uint8_t tnl_type;
/* Verify that the receiver is expecting "ingress replication" as we
* can only support that.
*/
if (length < 2) {
flog_err(EC_BGP_ATTR_LEN, "Bad PMSI tunnel attribute length %d",
length);
return bgp_attr_malformed(args, BGP_NOTIFY_UPDATE_ATTR_LENG_ERR,
args->total);
}
stream_getc(peer->curr); /* Flags */
tnl_type = stream_getc(peer->curr);
if (tnl_type > PMSI_TNLTYPE_MAX) {
flog_err(EC_BGP_ATTR_PMSI_TYPE,
"Invalid PMSI tunnel attribute type %d", tnl_type);
return bgp_attr_malformed(args, BGP_NOTIFY_UPDATE_OPT_ATTR_ERR,
args->total);
}
if (tnl_type == PMSI_TNLTYPE_INGR_REPL) {
if (length != 9) {
flog_err(EC_BGP_ATTR_PMSI_LEN,
"Bad PMSI tunnel attribute length %d for IR",
length);
return bgp_attr_malformed(
args, BGP_NOTIFY_UPDATE_ATTR_LENG_ERR,
args->total);
}
}
attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_PMSI_TUNNEL);
attr->pmsi_tnl_type = tnl_type;
/* Forward read pointer of input stream. */
stream_forward_getp(peer->curr, length - 2);
return BGP_ATTR_PARSE_PROCEED;
}
/* BGP unknown attribute treatment. */
static bgp_attr_parse_ret_t bgp_attr_unknown(struct bgp_attr_parser_args *args)
{
bgp_size_t total = args->total;
struct transit *transit;
struct peer *const peer = args->peer;
struct attr *const attr = args->attr;
uint8_t *const startp = args->startp;
const uint8_t type = args->type;
const uint8_t flag = args->flags;
const bgp_size_t length = args->length;
if (bgp_debug_update(peer, NULL, NULL, 1))
zlog_debug(
"%s Unknown attribute is received (type %d, length %d)",
peer->host, type, length);
/* Forward read pointer of input stream. */
stream_forward_getp(peer->curr, length);
/* If any of the mandatory well-known attributes are not recognized,
then the Error Subcode is set to Unrecognized Well-known
Attribute. The Data field contains the unrecognized attribute
(type, length and value). */
if (!CHECK_FLAG(flag, BGP_ATTR_FLAG_OPTIONAL)) {
return bgp_attr_malformed(args, BGP_NOTIFY_UPDATE_UNREC_ATTR,
args->total);
}
/* Unrecognized non-transitive optional attributes must be quietly
ignored and not passed along to other BGP peers. */
if (!CHECK_FLAG(flag, BGP_ATTR_FLAG_TRANS))
return BGP_ATTR_PARSE_PROCEED;
/* If a path with recognized transitive optional attribute is
accepted and passed along to other BGP peers and the Partial bit
in the Attribute Flags octet is set to 1 by some previous AS, it
is not set back to 0 by the current AS. */
SET_FLAG(*startp, BGP_ATTR_FLAG_PARTIAL);
/* Store transitive attribute to the end of attr->transit. */
if (!attr->transit)
attr->transit = XCALLOC(MTYPE_TRANSIT, sizeof(struct transit));
transit = attr->transit;
if (transit->val)
transit->val = XREALLOC(MTYPE_TRANSIT_VAL, transit->val,
transit->length + total);
else
transit->val = XMALLOC(MTYPE_TRANSIT_VAL, total);
memcpy(transit->val + transit->length, startp, total);
transit->length += total;
return BGP_ATTR_PARSE_PROCEED;
}
/* Well-known attribute check. */
static int bgp_attr_check(struct peer *peer, struct attr *attr)
{
uint8_t type = 0;
/* BGP Graceful-Restart End-of-RIB for IPv4 unicast is signaled as an
* empty UPDATE. */
if (CHECK_FLAG(peer->cap, PEER_CAP_RESTART_RCV) && !attr->flag)
return BGP_ATTR_PARSE_PROCEED;
/* "An UPDATE message that contains the MP_UNREACH_NLRI is not required
to carry any other path attributes.", though if MP_REACH_NLRI or NLRI
are present, it should. Check for any other attribute being present
instead.
*/
if ((!CHECK_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_MP_REACH_NLRI)) &&
CHECK_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_MP_UNREACH_NLRI))))
return BGP_ATTR_PARSE_PROCEED;
if (!CHECK_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_ORIGIN)))
type = BGP_ATTR_ORIGIN;
if (!CHECK_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_AS_PATH)))
type = BGP_ATTR_AS_PATH;
/* RFC 2858 makes Next-Hop optional/ignored, if MP_REACH_NLRI is present
* and
* NLRI is empty. We can't easily check NLRI empty here though.
*/
if (!CHECK_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_NEXT_HOP))
&& !CHECK_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_MP_REACH_NLRI)))
type = BGP_ATTR_NEXT_HOP;
if (peer->sort == BGP_PEER_IBGP
&& !CHECK_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_LOCAL_PREF)))
type = BGP_ATTR_LOCAL_PREF;
if (type) {
flog_warn(EC_BGP_MISSING_ATTRIBUTE,
"%s Missing well-known attribute %s.", peer->host,
lookup_msg(attr_str, type, NULL));
bgp_notify_send_with_data(peer, BGP_NOTIFY_UPDATE_ERR,
BGP_NOTIFY_UPDATE_MISS_ATTR, &type,
1);
return BGP_ATTR_PARSE_ERROR;
}
return BGP_ATTR_PARSE_PROCEED;
}
/* Read attribute of update packet. This function is called from
bgp_update_receive() in bgp_packet.c. */
bgp_attr_parse_ret_t bgp_attr_parse(struct peer *peer, struct attr *attr,
bgp_size_t size, struct bgp_nlri *mp_update,
struct bgp_nlri *mp_withdraw)
{
bgp_attr_parse_ret_t ret;
uint8_t flag = 0;
uint8_t type = 0;
bgp_size_t length;
uint8_t *startp, *endp;
uint8_t *attr_endp;
uint8_t seen[BGP_ATTR_BITMAP_SIZE];
/* we need the as4_path only until we have synthesized the as_path with
* it */
/* same goes for as4_aggregator */
struct aspath *as4_path = NULL;
as_t as4_aggregator = 0;
struct in_addr as4_aggregator_addr = {.s_addr = 0};
/* Initialize bitmap. */
memset(seen, 0, BGP_ATTR_BITMAP_SIZE);
/* End pointer of BGP attribute. */
endp = BGP_INPUT_PNT(peer) + size;
/* Get attributes to the end of attribute length. */
while (BGP_INPUT_PNT(peer) < endp) {
/* Check remaining length check.*/
if (endp - BGP_INPUT_PNT(peer) < BGP_ATTR_MIN_LEN) {
/* XXX warning: long int format, int arg (arg 5) */
flog_warn(
EC_BGP_ATTRIBUTE_TOO_SMALL,
"%s: error BGP attribute length %lu is smaller than min len",
peer->host,
(unsigned long)(endp
- stream_pnt(BGP_INPUT(peer))));
bgp_notify_send(peer, BGP_NOTIFY_UPDATE_ERR,
BGP_NOTIFY_UPDATE_ATTR_LENG_ERR);
return BGP_ATTR_PARSE_ERROR;
}
/* Fetch attribute flag and type. */
startp = BGP_INPUT_PNT(peer);
/* "The lower-order four bits of the Attribute Flags octet are
unused. They MUST be zero when sent and MUST be ignored when
received." */
flag = 0xF0 & stream_getc(BGP_INPUT(peer));
type = stream_getc(BGP_INPUT(peer));
/* Check whether Extended-Length applies and is in bounds */
if (CHECK_FLAG(flag, BGP_ATTR_FLAG_EXTLEN)
&& ((endp - startp) < (BGP_ATTR_MIN_LEN + 1))) {
flog_warn(
EC_BGP_EXT_ATTRIBUTE_TOO_SMALL,
"%s: Extended length set, but just %lu bytes of attr header",
peer->host,
(unsigned long)(endp
- stream_pnt(BGP_INPUT(peer))));
bgp_notify_send(peer, BGP_NOTIFY_UPDATE_ERR,
BGP_NOTIFY_UPDATE_ATTR_LENG_ERR);
return BGP_ATTR_PARSE_ERROR;
}
/* Check extended attribue length bit. */
if (CHECK_FLAG(flag, BGP_ATTR_FLAG_EXTLEN))
length = stream_getw(BGP_INPUT(peer));
else
length = stream_getc(BGP_INPUT(peer));
/* If any attribute appears more than once in the UPDATE
message, then the Error Subcode is set to Malformed Attribute
List. */
if (CHECK_BITMAP(seen, type)) {
flog_warn(
EC_BGP_ATTRIBUTE_REPEATED,
"%s: error BGP attribute type %d appears twice in a message",
peer->host, type);
bgp_notify_send(peer, BGP_NOTIFY_UPDATE_ERR,
BGP_NOTIFY_UPDATE_MAL_ATTR);
return BGP_ATTR_PARSE_ERROR;
}
/* Set type to bitmap to check duplicate attribute. `type' is
unsigned char so it never overflow bitmap range. */
SET_BITMAP(seen, type);
/* Overflow check. */
attr_endp = BGP_INPUT_PNT(peer) + length;
if (attr_endp > endp) {
flog_warn(
EC_BGP_ATTRIBUTE_TOO_LARGE,
"%s: BGP type %d length %d is too large, attribute total length is %d. attr_endp is %p. endp is %p",
peer->host, type, length, size, attr_endp,
endp);
/*
* RFC 4271 6.3
* If any recognized attribute has an Attribute
* Length that conflicts with the expected length
* (based on the attribute type code), then the
* Error Subcode MUST be set to Attribute Length
* Error. The Data field MUST contain the erroneous
* attribute (type, length, and value).
* ----------
* We do not currently have a good way to determine the
* length of the attribute independent of the length
* received in the message. Instead we send the
* minimum between the amount of data we have and the
* amount specified by the attribute length field.
*
* Instead of directly passing in the packet buffer and
* offset we use the stream_get* functions to read into
* a stack buffer, since they perform bounds checking
* and we are working with untrusted data.
*/
unsigned char ndata[BGP_MAX_PACKET_SIZE];
memset(ndata, 0x00, sizeof(ndata));
size_t lfl =
CHECK_FLAG(flag, BGP_ATTR_FLAG_EXTLEN) ? 2 : 1;
/* Rewind to end of flag field */
stream_forward_getp(BGP_INPUT(peer), -(1 + lfl));
/* Type */
stream_get(&ndata[0], BGP_INPUT(peer), 1);
/* Length */
stream_get(&ndata[1], BGP_INPUT(peer), lfl);
/* Value */
size_t atl = attr_endp - startp;
size_t ndl = MIN(atl, STREAM_READABLE(BGP_INPUT(peer)));
stream_get(&ndata[lfl + 1], BGP_INPUT(peer), ndl);
bgp_notify_send_with_data(
peer, BGP_NOTIFY_UPDATE_ERR,
BGP_NOTIFY_UPDATE_ATTR_LENG_ERR, ndata,
ndl + lfl + 1);
return BGP_ATTR_PARSE_ERROR;
}
struct bgp_attr_parser_args attr_args = {
.peer = peer,
.length = length,
.attr = attr,
.type = type,
.flags = flag,
.startp = startp,
.total = attr_endp - startp,
};
/* If any recognized attribute has Attribute Flags that conflict
with the Attribute Type Code, then the Error Subcode is set
to
Attribute Flags Error. The Data field contains the erroneous
attribute (type, length and value). */
if (bgp_attr_flag_invalid(&attr_args)) {
ret = bgp_attr_malformed(
&attr_args, BGP_NOTIFY_UPDATE_ATTR_FLAG_ERR,
attr_args.total);
if (ret == BGP_ATTR_PARSE_PROCEED)
continue;
return ret;
}
/* OK check attribute and store it's value. */
switch (type) {
case BGP_ATTR_ORIGIN:
ret = bgp_attr_origin(&attr_args);
break;
case BGP_ATTR_AS_PATH:
ret = bgp_attr_aspath(&attr_args);
break;
case BGP_ATTR_AS4_PATH:
ret = bgp_attr_as4_path(&attr_args, &as4_path);
break;
case BGP_ATTR_NEXT_HOP:
ret = bgp_attr_nexthop(&attr_args);
break;
case BGP_ATTR_MULTI_EXIT_DISC:
ret = bgp_attr_med(&attr_args);
break;
case BGP_ATTR_LOCAL_PREF:
ret = bgp_attr_local_pref(&attr_args);
break;
case BGP_ATTR_ATOMIC_AGGREGATE:
ret = bgp_attr_atomic(&attr_args);
break;
case BGP_ATTR_AGGREGATOR:
ret = bgp_attr_aggregator(&attr_args);
break;
case BGP_ATTR_AS4_AGGREGATOR:
ret = bgp_attr_as4_aggregator(&attr_args,
&as4_aggregator,
&as4_aggregator_addr);
break;
case BGP_ATTR_COMMUNITIES:
ret = bgp_attr_community(&attr_args);
break;
case BGP_ATTR_LARGE_COMMUNITIES:
ret = bgp_attr_large_community(&attr_args);
break;
case BGP_ATTR_ORIGINATOR_ID:
ret = bgp_attr_originator_id(&attr_args);
break;
case BGP_ATTR_CLUSTER_LIST:
ret = bgp_attr_cluster_list(&attr_args);
break;
case BGP_ATTR_MP_REACH_NLRI:
ret = bgp_mp_reach_parse(&attr_args, mp_update);
break;
case BGP_ATTR_MP_UNREACH_NLRI:
ret = bgp_mp_unreach_parse(&attr_args, mp_withdraw);
break;
case BGP_ATTR_EXT_COMMUNITIES:
ret = bgp_attr_ext_communities(&attr_args);
break;
#if ENABLE_BGP_VNC
case BGP_ATTR_VNC:
#endif
case BGP_ATTR_ENCAP:
ret = bgp_attr_encap(type, peer, length, attr, flag,
startp);
break;
case BGP_ATTR_PREFIX_SID:
ret = bgp_attr_prefix_sid(length,
&attr_args, mp_update);
break;
case BGP_ATTR_PMSI_TUNNEL:
ret = bgp_attr_pmsi_tunnel(&attr_args);
break;
default:
ret = bgp_attr_unknown(&attr_args);
break;
}
if (ret == BGP_ATTR_PARSE_ERROR_NOTIFYPLS) {
bgp_notify_send(peer, BGP_NOTIFY_UPDATE_ERR,
BGP_NOTIFY_UPDATE_MAL_ATTR);
ret = BGP_ATTR_PARSE_ERROR;
}
if (ret == BGP_ATTR_PARSE_EOR) {
if (as4_path)
aspath_unintern(&as4_path);
return ret;
}
/* If hard error occurred immediately return to the caller. */
if (ret == BGP_ATTR_PARSE_ERROR) {
flog_warn(EC_BGP_ATTRIBUTE_PARSE_ERROR,
"%s: Attribute %s, parse error", peer->host,
lookup_msg(attr_str, type, NULL));
if (as4_path)
aspath_unintern(&as4_path);
return ret;
}
if (ret == BGP_ATTR_PARSE_WITHDRAW) {
flog_warn(
EC_BGP_ATTRIBUTE_PARSE_WITHDRAW,
"%s: Attribute %s, parse error - treating as withdrawal",
peer->host, lookup_msg(attr_str, type, NULL));
if (as4_path)
aspath_unintern(&as4_path);
return ret;
}
/* Check the fetched length. */
if (BGP_INPUT_PNT(peer) != attr_endp) {
flog_warn(EC_BGP_ATTRIBUTE_FETCH_ERROR,
"%s: BGP attribute %s, fetch error",
peer->host, lookup_msg(attr_str, type, NULL));
bgp_notify_send(peer, BGP_NOTIFY_UPDATE_ERR,
BGP_NOTIFY_UPDATE_ATTR_LENG_ERR);
if (as4_path)
aspath_unintern(&as4_path);
return BGP_ATTR_PARSE_ERROR;
}
}
/* Check final read pointer is same as end pointer. */
if (BGP_INPUT_PNT(peer) != endp) {
flog_warn(EC_BGP_ATTRIBUTES_MISMATCH,
"%s: BGP attribute %s, length mismatch", peer->host,
lookup_msg(attr_str, type, NULL));
bgp_notify_send(peer, BGP_NOTIFY_UPDATE_ERR,
BGP_NOTIFY_UPDATE_ATTR_LENG_ERR);
if (as4_path)
aspath_unintern(&as4_path);
return BGP_ATTR_PARSE_ERROR;
}
/* Check all mandatory well-known attributes are present */
if ((ret = bgp_attr_check(peer, attr)) < 0) {
if (as4_path)
aspath_unintern(&as4_path);
return ret;
}
/*
* At this place we can see whether we got AS4_PATH and/or
* AS4_AGGREGATOR from a 16Bit peer and act accordingly.
* We can not do this before we've read all attributes because
* the as4 handling does not say whether AS4_PATH has to be sent
* after AS_PATH or not - and when AS4_AGGREGATOR will be send
* in relationship to AGGREGATOR.
* So, to be defensive, we are not relying on any order and read
* all attributes first, including these 32bit ones, and now,
* afterwards, we look what and if something is to be done for as4.
*
* It is possible to not have AS_PATH, e.g. GR EoR and sole
* MP_UNREACH_NLRI.
*/
/* actually... this doesn't ever return failure currently, but
* better safe than sorry */
if (CHECK_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_AS_PATH))
&& bgp_attr_munge_as4_attrs(peer, attr, as4_path, as4_aggregator,
&as4_aggregator_addr)) {
bgp_notify_send(peer, BGP_NOTIFY_UPDATE_ERR,
BGP_NOTIFY_UPDATE_MAL_ATTR);
if (as4_path)
aspath_unintern(&as4_path);
return BGP_ATTR_PARSE_ERROR;
}
/* At this stage, we have done all fiddling with as4, and the
* resulting info is in attr->aggregator resp. attr->aspath
* so we can chuck as4_aggregator and as4_path alltogether in
* order to save memory
*/
if (as4_path) {
aspath_unintern(&as4_path); /* unintern - it is in the hash */
/* The flag that we got this is still there, but that does not
* do any trouble
*/
}
/*
* The "rest" of the code does nothing with as4_aggregator.
* there is no memory attached specifically which is not part
* of the attr.
* so ignoring just means do nothing.
*/
/*
* Finally do the checks on the aspath we did not do yet
* because we waited for a potentially synthesized aspath.
*/
if (attr->flag & (ATTR_FLAG_BIT(BGP_ATTR_AS_PATH))) {
ret = bgp_attr_aspath_check(peer, attr);
if (ret != BGP_ATTR_PARSE_PROCEED)
return ret;
}
/* Finally intern unknown attribute. */
if (attr->transit)
attr->transit = transit_intern(attr->transit);
if (attr->encap_subtlvs)
attr->encap_subtlvs =
encap_intern(attr->encap_subtlvs, ENCAP_SUBTLV_TYPE);
#if ENABLE_BGP_VNC
if (attr->vnc_subtlvs)
attr->vnc_subtlvs =
encap_intern(attr->vnc_subtlvs, VNC_SUBTLV_TYPE);
#endif
return BGP_ATTR_PARSE_PROCEED;
}
size_t bgp_packet_mpattr_start(struct stream *s, struct peer *peer, afi_t afi,
safi_t safi, struct bpacket_attr_vec_arr *vecarr,
struct attr *attr)
{
size_t sizep;
iana_afi_t pkt_afi;
iana_safi_t pkt_safi;
afi_t nh_afi;
/* Set extended bit always to encode the attribute length as 2 bytes */
stream_putc(s, BGP_ATTR_FLAG_OPTIONAL | BGP_ATTR_FLAG_EXTLEN);
stream_putc(s, BGP_ATTR_MP_REACH_NLRI);
sizep = stream_get_endp(s);
stream_putw(s, 0); /* Marker: Attribute length. */
/* Convert AFI, SAFI to values for packet. */
bgp_map_afi_safi_int2iana(afi, safi, &pkt_afi, &pkt_safi);
stream_putw(s, pkt_afi); /* AFI */
stream_putc(s, pkt_safi); /* SAFI */
/* Nexthop AFI */
if (afi == AFI_IP
&& (safi == SAFI_UNICAST || safi == SAFI_LABELED_UNICAST))
nh_afi = peer_cap_enhe(peer, afi, safi) ? AFI_IP6 : AFI_IP;
else
nh_afi = BGP_NEXTHOP_AFI_FROM_NHLEN(attr->mp_nexthop_len);
/* Nexthop */
bpacket_attr_vec_arr_set_vec(vecarr, BGP_ATTR_VEC_NH, s, attr);
switch (nh_afi) {
case AFI_IP:
switch (safi) {
case SAFI_UNICAST:
case SAFI_MULTICAST:
case SAFI_LABELED_UNICAST:
stream_putc(s, 4);
stream_put_ipv4(s, attr->nexthop.s_addr);
break;
case SAFI_MPLS_VPN:
stream_putc(s, 12);
stream_putl(s, 0); /* RD = 0, per RFC */
stream_putl(s, 0);
stream_put(s, &attr->mp_nexthop_global_in, 4);
break;
case SAFI_ENCAP:
case SAFI_EVPN:
stream_putc(s, 4);
stream_put(s, &attr->mp_nexthop_global_in, 4);
break;
case SAFI_FLOWSPEC:
stream_putc(s, 0); /* no nexthop for flowspec */
default:
break;
}
break;
case AFI_IP6:
switch (safi) {
case SAFI_UNICAST:
case SAFI_MULTICAST:
case SAFI_LABELED_UNICAST:
case SAFI_EVPN: {
if (attr->mp_nexthop_len
== BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL) {
stream_putc(s,
BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL);
stream_put(s, &attr->mp_nexthop_global,
IPV6_MAX_BYTELEN);
stream_put(s, &attr->mp_nexthop_local,
IPV6_MAX_BYTELEN);
} else {
stream_putc(s, IPV6_MAX_BYTELEN);
stream_put(s, &attr->mp_nexthop_global,
IPV6_MAX_BYTELEN);
}
} break;
case SAFI_MPLS_VPN: {
if (attr->mp_nexthop_len
== BGP_ATTR_NHLEN_IPV6_GLOBAL) {
stream_putc(s, 24);
stream_putl(s, 0); /* RD = 0, per RFC */
stream_putl(s, 0);
stream_put(s, &attr->mp_nexthop_global,
IPV6_MAX_BYTELEN);
} else if (attr->mp_nexthop_len
== BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL) {
stream_putc(s, 48);
stream_putl(s, 0); /* RD = 0, per RFC */
stream_putl(s, 0);
stream_put(s, &attr->mp_nexthop_global,
IPV6_MAX_BYTELEN);
stream_putl(s, 0); /* RD = 0, per RFC */
stream_putl(s, 0);
stream_put(s, &attr->mp_nexthop_local,
IPV6_MAX_BYTELEN);
}
} break;
case SAFI_ENCAP:
stream_putc(s, IPV6_MAX_BYTELEN);
stream_put(s, &attr->mp_nexthop_global,
IPV6_MAX_BYTELEN);
break;
case SAFI_FLOWSPEC:
stream_putc(s, 0); /* no nexthop for flowspec */
default:
break;
}
break;
default:
if (safi != SAFI_FLOWSPEC)
flog_err(
EC_BGP_ATTR_NH_SEND_LEN,
"Bad nexthop when sending to %s, AFI %u SAFI %u nhlen %d",
peer->host, afi, safi, attr->mp_nexthop_len);
break;
}
/* SNPA */
stream_putc(s, 0);
return sizep;
}
void bgp_packet_mpattr_prefix(struct stream *s, afi_t afi, safi_t safi,
struct prefix *p, struct prefix_rd *prd,
mpls_label_t *label, uint32_t num_labels,
int addpath_encode, uint32_t addpath_tx_id,
struct attr *attr)
{
if (safi == SAFI_MPLS_VPN) {
if (addpath_encode)
stream_putl(s, addpath_tx_id);
/* Label, RD, Prefix write. */
stream_putc(s, p->prefixlen + 88);
stream_put(s, label, BGP_LABEL_BYTES);
stream_put(s, prd->val, 8);
stream_put(s, &p->u.prefix, PSIZE(p->prefixlen));
} else if (afi == AFI_L2VPN && safi == SAFI_EVPN) {
/* EVPN prefix - contents depend on type */
bgp_evpn_encode_prefix(s, p, prd, label, num_labels, attr,
addpath_encode, addpath_tx_id);
} else if (safi == SAFI_LABELED_UNICAST) {
/* Prefix write with label. */
stream_put_labeled_prefix(s, p, label);
} else if (safi == SAFI_FLOWSPEC) {
if (PSIZE (p->prefixlen)+2 < FLOWSPEC_NLRI_SIZELIMIT)
stream_putc(s, PSIZE (p->prefixlen)+2);
else
stream_putw(s, (PSIZE (p->prefixlen)+2)|(0xf<<12));
stream_putc(s, 2);/* Filter type */
stream_putc(s, p->prefixlen);/* Prefix length */
stream_put(s, &p->u.prefix, PSIZE (p->prefixlen));
} else
stream_put_prefix_addpath(s, p, addpath_encode, addpath_tx_id);
}
size_t bgp_packet_mpattr_prefix_size(afi_t afi, safi_t safi, struct prefix *p)
{
int size = PSIZE(p->prefixlen);
if (safi == SAFI_MPLS_VPN)
size += 88;
else if (afi == AFI_L2VPN && safi == SAFI_EVPN)
size += 232; // TODO: Maximum possible for type-2, type-3 and
// type-5
return size;
}
/*
* Encodes the tunnel encapsulation attribute,
* and with ENABLE_BGP_VNC the VNC attribute which uses
* almost the same TLV format
*/
static void bgp_packet_mpattr_tea(struct bgp *bgp, struct peer *peer,
struct stream *s, struct attr *attr,
uint8_t attrtype)
{
unsigned int attrlenfield = 0;
unsigned int attrhdrlen = 0;
struct bgp_attr_encap_subtlv *subtlvs;
struct bgp_attr_encap_subtlv *st;
const char *attrname;
if (!attr || (attrtype == BGP_ATTR_ENCAP
&& (!attr->encap_tunneltype
|| attr->encap_tunneltype == BGP_ENCAP_TYPE_MPLS)))
return;
switch (attrtype) {
case BGP_ATTR_ENCAP:
attrname = "Tunnel Encap";
subtlvs = attr->encap_subtlvs;
if (subtlvs == NULL) /* nothing to do */
return;
/*
* The tunnel encap attr has an "outer" tlv.
* T = tunneltype,
* L = total length of subtlvs,
* V = concatenated subtlvs.
*/
attrlenfield = 2 + 2; /* T + L */
attrhdrlen = 1 + 1; /* subTLV T + L */
break;
#if ENABLE_BGP_VNC
case BGP_ATTR_VNC:
attrname = "VNC";
subtlvs = attr->vnc_subtlvs;
if (subtlvs == NULL) /* nothing to do */
return;
attrlenfield = 0; /* no outer T + L */
attrhdrlen = 2 + 2; /* subTLV T + L */
break;
#endif
default:
assert(0);
}
/* compute attr length */
for (st = subtlvs; st; st = st->next) {
attrlenfield += (attrhdrlen + st->length);
}
if (attrlenfield > 0xffff) {
zlog_info("%s attribute is too long (length=%d), can't send it",
attrname, attrlenfield);
return;
}
if (attrlenfield > 0xff) {
/* 2-octet length field */
stream_putc(s,
BGP_ATTR_FLAG_TRANS | BGP_ATTR_FLAG_OPTIONAL
| BGP_ATTR_FLAG_EXTLEN);
stream_putc(s, attrtype);
stream_putw(s, attrlenfield & 0xffff);
} else {
/* 1-octet length field */
stream_putc(s, BGP_ATTR_FLAG_TRANS | BGP_ATTR_FLAG_OPTIONAL);
stream_putc(s, attrtype);
stream_putc(s, attrlenfield & 0xff);
}
if (attrtype == BGP_ATTR_ENCAP) {
/* write outer T+L */
stream_putw(s, attr->encap_tunneltype);
stream_putw(s, attrlenfield - 4);
}
/* write each sub-tlv */
for (st = subtlvs; st; st = st->next) {
if (attrtype == BGP_ATTR_ENCAP) {
stream_putc(s, st->type);
stream_putc(s, st->length);
#if ENABLE_BGP_VNC
} else {
stream_putw(s, st->type);
stream_putw(s, st->length);
#endif
}
stream_put(s, st->value, st->length);
}
}
void bgp_packet_mpattr_end(struct stream *s, size_t sizep)
{
/* Set MP attribute length. Don't count the (2) bytes used to encode
the attr length */
stream_putw_at(s, sizep, (stream_get_endp(s) - sizep) - 2);
}
/* Make attribute packet. */
bgp_size_t bgp_packet_attribute(struct bgp *bgp, struct peer *peer,
struct stream *s, struct attr *attr,
struct bpacket_attr_vec_arr *vecarr,
struct prefix *p, afi_t afi, safi_t safi,
struct peer *from, struct prefix_rd *prd,
mpls_label_t *label, uint32_t num_labels,
int addpath_encode, uint32_t addpath_tx_id)
{
size_t cp;
size_t aspath_sizep;
struct aspath *aspath;
int send_as4_path = 0;
int send_as4_aggregator = 0;
int use32bit = (CHECK_FLAG(peer->cap, PEER_CAP_AS4_RCV)) ? 1 : 0;
if (!bgp)
bgp = peer->bgp;
/* Remember current pointer. */
cp = stream_get_endp(s);
if (p
&& !((afi == AFI_IP && safi == SAFI_UNICAST)
&& !peer_cap_enhe(peer, afi, safi))) {
size_t mpattrlen_pos = 0;
mpattrlen_pos = bgp_packet_mpattr_start(s, peer, afi, safi,
vecarr, attr);
bgp_packet_mpattr_prefix(s, afi, safi, p, prd, label,
num_labels, addpath_encode,
addpath_tx_id, attr);
bgp_packet_mpattr_end(s, mpattrlen_pos);
}
/* Origin attribute. */
stream_putc(s, BGP_ATTR_FLAG_TRANS);
stream_putc(s, BGP_ATTR_ORIGIN);
stream_putc(s, 1);
stream_putc(s, attr->origin);
/* AS path attribute. */
/* If remote-peer is EBGP */
if (peer->sort == BGP_PEER_EBGP
&& (!CHECK_FLAG(peer->af_flags[afi][safi],
PEER_FLAG_AS_PATH_UNCHANGED)
|| attr->aspath->segments == NULL)
&& (!CHECK_FLAG(peer->af_flags[afi][safi],
PEER_FLAG_RSERVER_CLIENT))) {
aspath = aspath_dup(attr->aspath);
/* Even though we may not be configured for confederations we
* may have
* RXed an AS_PATH with AS_CONFED_SEQUENCE or AS_CONFED_SET */
aspath = aspath_delete_confed_seq(aspath);
if (CHECK_FLAG(bgp->config, BGP_CONFIG_CONFEDERATION)) {
/* Stuff our path CONFED_ID on the front */
aspath = aspath_add_seq(aspath, bgp->confed_id);
} else {
if (peer->change_local_as) {
/* If replace-as is specified, we only use the
change_local_as when
advertising routes. */
if (!CHECK_FLAG(
peer->flags,
PEER_FLAG_LOCAL_AS_REPLACE_AS)) {
aspath = aspath_add_seq(aspath,
peer->local_as);
}
aspath = aspath_add_seq(aspath,
peer->change_local_as);
} else {
aspath = aspath_add_seq(aspath, peer->local_as);
}
}
} else if (peer->sort == BGP_PEER_CONFED) {
/* A confed member, so we need to do the AS_CONFED_SEQUENCE
* thing */
aspath = aspath_dup(attr->aspath);
aspath = aspath_add_confed_seq(aspath, peer->local_as);
} else
aspath = attr->aspath;
/* If peer is not AS4 capable, then:
* - send the created AS_PATH out as AS4_PATH (optional, transitive),
* but ensure that no AS_CONFED_SEQUENCE and AS_CONFED_SET path
* segment
* types are in it (i.e. exclude them if they are there)
* AND do this only if there is at least one asnum > 65535 in the
* path!
* - send an AS_PATH out, but put 16Bit ASnums in it, not 32bit, and
* change
* all ASnums > 65535 to BGP_AS_TRANS
*/
stream_putc(s, BGP_ATTR_FLAG_TRANS | BGP_ATTR_FLAG_EXTLEN);
stream_putc(s, BGP_ATTR_AS_PATH);
aspath_sizep = stream_get_endp(s);
stream_putw(s, 0);
stream_putw_at(s, aspath_sizep, aspath_put(s, aspath, use32bit));
/* OLD session may need NEW_AS_PATH sent, if there are 4-byte ASNs
* in the path
*/
if (!use32bit && aspath_has_as4(aspath))
send_as4_path =
1; /* we'll do this later, at the correct place */
/* Nexthop attribute. */
if (afi == AFI_IP && safi == SAFI_UNICAST
&& !peer_cap_enhe(peer, afi, safi)) {
if (attr->flag & ATTR_FLAG_BIT(BGP_ATTR_NEXT_HOP)) {
stream_putc(s, BGP_ATTR_FLAG_TRANS);
stream_putc(s, BGP_ATTR_NEXT_HOP);
bpacket_attr_vec_arr_set_vec(vecarr, BGP_ATTR_VEC_NH, s,
attr);
stream_putc(s, 4);
stream_put_ipv4(s, attr->nexthop.s_addr);
} else if (peer_cap_enhe(from, afi, safi)) {
/*
* Likely this is the case when an IPv4 prefix was
* received with
* Extended Next-hop capability and now being advertised
* to
* non-ENHE peers.
* Setting the mandatory (ipv4) next-hop attribute here
* to enable
* implicit next-hop self with correct (ipv4 address
* family).
*/
stream_putc(s, BGP_ATTR_FLAG_TRANS);
stream_putc(s, BGP_ATTR_NEXT_HOP);
bpacket_attr_vec_arr_set_vec(vecarr, BGP_ATTR_VEC_NH, s,
NULL);
stream_putc(s, 4);
stream_put_ipv4(s, 0);
}
}
/* MED attribute. */
if (attr->flag & ATTR_FLAG_BIT(BGP_ATTR_MULTI_EXIT_DISC)
|| bgp->maxmed_active) {
stream_putc(s, BGP_ATTR_FLAG_OPTIONAL);
stream_putc(s, BGP_ATTR_MULTI_EXIT_DISC);
stream_putc(s, 4);
stream_putl(s, (bgp->maxmed_active ? bgp->maxmed_value
: attr->med));
}
/* Local preference. */
if (peer->sort == BGP_PEER_IBGP || peer->sort == BGP_PEER_CONFED) {
stream_putc(s, BGP_ATTR_FLAG_TRANS);
stream_putc(s, BGP_ATTR_LOCAL_PREF);
stream_putc(s, 4);
stream_putl(s, attr->local_pref);
}
/* Atomic aggregate. */
if (attr->flag & ATTR_FLAG_BIT(BGP_ATTR_ATOMIC_AGGREGATE)) {
stream_putc(s, BGP_ATTR_FLAG_TRANS);
stream_putc(s, BGP_ATTR_ATOMIC_AGGREGATE);
stream_putc(s, 0);
}
/* Aggregator. */
if (attr->flag & ATTR_FLAG_BIT(BGP_ATTR_AGGREGATOR)) {
/* Common to BGP_ATTR_AGGREGATOR, regardless of ASN size */
stream_putc(s, BGP_ATTR_FLAG_OPTIONAL | BGP_ATTR_FLAG_TRANS);
stream_putc(s, BGP_ATTR_AGGREGATOR);
if (use32bit) {
/* AS4 capable peer */
stream_putc(s, 8);
stream_putl(s, attr->aggregator_as);
} else {
/* 2-byte AS peer */
stream_putc(s, 6);
/* Is ASN representable in 2-bytes? Or must AS_TRANS be
* used? */
if (attr->aggregator_as > 65535) {
stream_putw(s, BGP_AS_TRANS);
/* we have to send AS4_AGGREGATOR, too.
* we'll do that later in order to send
* attributes in ascending
* order.
*/
send_as4_aggregator = 1;
} else
stream_putw(s, (uint16_t)attr->aggregator_as);
}
stream_put_ipv4(s, attr->aggregator_addr.s_addr);
}
/* Community attribute. */
if (CHECK_FLAG(peer->af_flags[afi][safi], PEER_FLAG_SEND_COMMUNITY)
&& (attr->flag & ATTR_FLAG_BIT(BGP_ATTR_COMMUNITIES))) {
if (attr->community->size * 4 > 255) {
stream_putc(s,
BGP_ATTR_FLAG_OPTIONAL | BGP_ATTR_FLAG_TRANS
| BGP_ATTR_FLAG_EXTLEN);
stream_putc(s, BGP_ATTR_COMMUNITIES);
stream_putw(s, attr->community->size * 4);
} else {
stream_putc(s,
BGP_ATTR_FLAG_OPTIONAL
| BGP_ATTR_FLAG_TRANS);
stream_putc(s, BGP_ATTR_COMMUNITIES);
stream_putc(s, attr->community->size * 4);
}
stream_put(s, attr->community->val, attr->community->size * 4);
}
/*
* Large Community attribute.
*/
if (CHECK_FLAG(peer->af_flags[afi][safi],
PEER_FLAG_SEND_LARGE_COMMUNITY)
&& (attr->flag & ATTR_FLAG_BIT(BGP_ATTR_LARGE_COMMUNITIES))) {
if (lcom_length(attr->lcommunity) > 255) {
stream_putc(s,
BGP_ATTR_FLAG_OPTIONAL | BGP_ATTR_FLAG_TRANS
| BGP_ATTR_FLAG_EXTLEN);
stream_putc(s, BGP_ATTR_LARGE_COMMUNITIES);
stream_putw(s, lcom_length(attr->lcommunity));
} else {
stream_putc(s,
BGP_ATTR_FLAG_OPTIONAL
| BGP_ATTR_FLAG_TRANS);
stream_putc(s, BGP_ATTR_LARGE_COMMUNITIES);
stream_putc(s, lcom_length(attr->lcommunity));
}
stream_put(s, attr->lcommunity->val,
lcom_length(attr->lcommunity));
}
/* Route Reflector. */
if (peer->sort == BGP_PEER_IBGP && from
&& from->sort == BGP_PEER_IBGP) {
/* Originator ID. */
stream_putc(s, BGP_ATTR_FLAG_OPTIONAL);
stream_putc(s, BGP_ATTR_ORIGINATOR_ID);
stream_putc(s, 4);
if (attr->flag & ATTR_FLAG_BIT(BGP_ATTR_ORIGINATOR_ID))
stream_put_in_addr(s, &attr->originator_id);
else
stream_put_in_addr(s, &from->remote_id);
/* Cluster list. */
stream_putc(s, BGP_ATTR_FLAG_OPTIONAL);
stream_putc(s, BGP_ATTR_CLUSTER_LIST);
if (attr->cluster) {
stream_putc(s, attr->cluster->length + 4);
/* If this peer configuration's parent BGP has
* cluster_id. */
if (bgp->config & BGP_CONFIG_CLUSTER_ID)
stream_put_in_addr(s, &bgp->cluster_id);
else
stream_put_in_addr(s, &bgp->router_id);
stream_put(s, attr->cluster->list,
attr->cluster->length);
} else {
stream_putc(s, 4);
/* If this peer configuration's parent BGP has
* cluster_id. */
if (bgp->config & BGP_CONFIG_CLUSTER_ID)
stream_put_in_addr(s, &bgp->cluster_id);
else
stream_put_in_addr(s, &bgp->router_id);
}
}
/* Extended Communities attribute. */
if (CHECK_FLAG(peer->af_flags[afi][safi], PEER_FLAG_SEND_EXT_COMMUNITY)
&& (attr->flag & ATTR_FLAG_BIT(BGP_ATTR_EXT_COMMUNITIES))) {
if (peer->sort == BGP_PEER_IBGP
|| peer->sort == BGP_PEER_CONFED) {
if (attr->ecommunity->size * 8 > 255) {
stream_putc(s,
BGP_ATTR_FLAG_OPTIONAL
| BGP_ATTR_FLAG_TRANS
| BGP_ATTR_FLAG_EXTLEN);
stream_putc(s, BGP_ATTR_EXT_COMMUNITIES);
stream_putw(s, attr->ecommunity->size * 8);
} else {
stream_putc(s,
BGP_ATTR_FLAG_OPTIONAL
| BGP_ATTR_FLAG_TRANS);
stream_putc(s, BGP_ATTR_EXT_COMMUNITIES);
stream_putc(s, attr->ecommunity->size * 8);
}
stream_put(s, attr->ecommunity->val,
attr->ecommunity->size * 8);
} else {
uint8_t *pnt;
int tbit;
int ecom_tr_size = 0;
int i;
for (i = 0; i < attr->ecommunity->size; i++) {
pnt = attr->ecommunity->val + (i * 8);
tbit = *pnt;
if (CHECK_FLAG(tbit,
ECOMMUNITY_FLAG_NON_TRANSITIVE))
continue;
ecom_tr_size++;
}
if (ecom_tr_size) {
if (ecom_tr_size * 8 > 255) {
stream_putc(
s,
BGP_ATTR_FLAG_OPTIONAL
| BGP_ATTR_FLAG_TRANS
| BGP_ATTR_FLAG_EXTLEN);
stream_putc(s,
BGP_ATTR_EXT_COMMUNITIES);
stream_putw(s, ecom_tr_size * 8);
} else {
stream_putc(
s,
BGP_ATTR_FLAG_OPTIONAL
| BGP_ATTR_FLAG_TRANS);
stream_putc(s,
BGP_ATTR_EXT_COMMUNITIES);
stream_putc(s, ecom_tr_size * 8);
}
for (i = 0; i < attr->ecommunity->size; i++) {
pnt = attr->ecommunity->val + (i * 8);
tbit = *pnt;
if (CHECK_FLAG(
tbit,
ECOMMUNITY_FLAG_NON_TRANSITIVE))
continue;
stream_put(s, pnt, 8);
}
}
}
}
/* Label index attribute. */
if (safi == SAFI_LABELED_UNICAST) {
if (attr->flag & ATTR_FLAG_BIT(BGP_ATTR_PREFIX_SID)) {
uint32_t label_index;
label_index = attr->label_index;
if (label_index != BGP_INVALID_LABEL_INDEX) {
stream_putc(s,
BGP_ATTR_FLAG_OPTIONAL
| BGP_ATTR_FLAG_TRANS);
stream_putc(s, BGP_ATTR_PREFIX_SID);
stream_putc(s, 10);
stream_putc(s, BGP_PREFIX_SID_LABEL_INDEX);
stream_putw(s,
BGP_PREFIX_SID_LABEL_INDEX_LENGTH);
stream_putc(s, 0); // reserved
stream_putw(s, 0); // flags
stream_putl(s, label_index);
}
}
}
if (send_as4_path) {
/* If the peer is NOT As4 capable, AND */
/* there are ASnums > 65535 in path THEN
* give out AS4_PATH */
/* Get rid of all AS_CONFED_SEQUENCE and AS_CONFED_SET
* path segments!
* Hm, I wonder... confederation things *should* only be at
* the beginning of an aspath, right? Then we should use
* aspath_delete_confed_seq for this, because it is already
* there! (JK)
* Folks, talk to me: what is reasonable here!?
*/
aspath = aspath_delete_confed_seq(aspath);
stream_putc(s,
BGP_ATTR_FLAG_TRANS | BGP_ATTR_FLAG_OPTIONAL
| BGP_ATTR_FLAG_EXTLEN);
stream_putc(s, BGP_ATTR_AS4_PATH);
aspath_sizep = stream_get_endp(s);
stream_putw(s, 0);
stream_putw_at(s, aspath_sizep, aspath_put(s, aspath, 1));
}
if (aspath != attr->aspath)
aspath_free(aspath);
if (send_as4_aggregator) {
/* send AS4_AGGREGATOR, at this place */
/* this section of code moved here in order to ensure the
* correct
* *ascending* order of attributes
*/
stream_putc(s, BGP_ATTR_FLAG_OPTIONAL | BGP_ATTR_FLAG_TRANS);
stream_putc(s, BGP_ATTR_AS4_AGGREGATOR);
stream_putc(s, 8);
stream_putl(s, attr->aggregator_as);
stream_put_ipv4(s, attr->aggregator_addr.s_addr);
}
if (((afi == AFI_IP || afi == AFI_IP6)
&& (safi == SAFI_ENCAP || safi == SAFI_MPLS_VPN))
|| (afi == AFI_L2VPN && safi == SAFI_EVPN)) {
/* Tunnel Encap attribute */
bgp_packet_mpattr_tea(bgp, peer, s, attr, BGP_ATTR_ENCAP);
#if ENABLE_BGP_VNC
/* VNC attribute */
bgp_packet_mpattr_tea(bgp, peer, s, attr, BGP_ATTR_VNC);
#endif
}
/* PMSI Tunnel */
if (attr->flag & ATTR_FLAG_BIT(BGP_ATTR_PMSI_TUNNEL)) {
stream_putc(s, BGP_ATTR_FLAG_OPTIONAL | BGP_ATTR_FLAG_TRANS);
stream_putc(s, BGP_ATTR_PMSI_TUNNEL);
stream_putc(s, 9); // Length
stream_putc(s, 0); // Flags
stream_putc(s, PMSI_TNLTYPE_INGR_REPL); // IR (6)
stream_put(s, &(attr->label),
BGP_LABEL_BYTES); // MPLS Label / VXLAN VNI
stream_put_ipv4(s, attr->nexthop.s_addr);
// Unicast tunnel endpoint IP address
}
/* Unknown transit attribute. */
if (attr->transit)
stream_put(s, attr->transit->val, attr->transit->length);
/* Return total size of attribute. */
return stream_get_endp(s) - cp;
}
size_t bgp_packet_mpunreach_start(struct stream *s, afi_t afi, safi_t safi)
{
unsigned long attrlen_pnt;
iana_afi_t pkt_afi;
iana_safi_t pkt_safi;
/* Set extended bit always to encode the attribute length as 2 bytes */
stream_putc(s, BGP_ATTR_FLAG_OPTIONAL | BGP_ATTR_FLAG_EXTLEN);
stream_putc(s, BGP_ATTR_MP_UNREACH_NLRI);
attrlen_pnt = stream_get_endp(s);
stream_putw(s, 0); /* Length of this attribute. */
/* Convert AFI, SAFI to values for packet. */
bgp_map_afi_safi_int2iana(afi, safi, &pkt_afi, &pkt_safi);
stream_putw(s, pkt_afi);
stream_putc(s, pkt_safi);
return attrlen_pnt;
}
void bgp_packet_mpunreach_prefix(struct stream *s, struct prefix *p, afi_t afi,
safi_t safi, struct prefix_rd *prd,
mpls_label_t *label, uint32_t num_labels,
int addpath_encode, uint32_t addpath_tx_id,
struct attr *attr)
{
uint8_t wlabel[3] = {0x80, 0x00, 0x00};
if (safi == SAFI_LABELED_UNICAST) {
label = (mpls_label_t *)wlabel;
num_labels = 1;
}
bgp_packet_mpattr_prefix(s, afi, safi, p, prd, label, num_labels,
addpath_encode, addpath_tx_id, attr);
}
void bgp_packet_mpunreach_end(struct stream *s, size_t attrlen_pnt)
{
bgp_packet_mpattr_end(s, attrlen_pnt);
}
/* Initialization of attribute. */
void bgp_attr_init(void)
{
aspath_init();
attrhash_init();
community_init();
ecommunity_init();
lcommunity_init();
cluster_init();
transit_init();
encap_init();
}
void bgp_attr_finish(void)
{
aspath_finish();
attrhash_finish();
community_finish();
ecommunity_finish();
lcommunity_finish();
cluster_finish();
transit_finish();
encap_finish();
}
/* Make attribute packet. */
void bgp_dump_routes_attr(struct stream *s, struct attr *attr,
struct prefix *prefix)
{
unsigned long cp;
unsigned long len;
size_t aspath_lenp;
struct aspath *aspath;
int addpath_encode = 0;
uint32_t addpath_tx_id = 0;
/* Remember current pointer. */
cp = stream_get_endp(s);
/* Place holder of length. */
stream_putw(s, 0);
/* Origin attribute. */
stream_putc(s, BGP_ATTR_FLAG_TRANS);
stream_putc(s, BGP_ATTR_ORIGIN);
stream_putc(s, 1);
stream_putc(s, attr->origin);
aspath = attr->aspath;
stream_putc(s, BGP_ATTR_FLAG_TRANS | BGP_ATTR_FLAG_EXTLEN);
stream_putc(s, BGP_ATTR_AS_PATH);
aspath_lenp = stream_get_endp(s);
stream_putw(s, 0);
stream_putw_at(s, aspath_lenp, aspath_put(s, aspath, 1));
/* Nexthop attribute. */
/* If it's an IPv6 prefix, don't dump the IPv4 nexthop to save space */
if (prefix != NULL && prefix->family != AF_INET6) {
stream_putc(s, BGP_ATTR_FLAG_TRANS);
stream_putc(s, BGP_ATTR_NEXT_HOP);
stream_putc(s, 4);
stream_put_ipv4(s, attr->nexthop.s_addr);
}
/* MED attribute. */
if (attr->flag & ATTR_FLAG_BIT(BGP_ATTR_MULTI_EXIT_DISC)) {
stream_putc(s, BGP_ATTR_FLAG_OPTIONAL);
stream_putc(s, BGP_ATTR_MULTI_EXIT_DISC);
stream_putc(s, 4);
stream_putl(s, attr->med);
}
/* Local preference. */
if (attr->flag & ATTR_FLAG_BIT(BGP_ATTR_LOCAL_PREF)) {
stream_putc(s, BGP_ATTR_FLAG_TRANS);
stream_putc(s, BGP_ATTR_LOCAL_PREF);
stream_putc(s, 4);
stream_putl(s, attr->local_pref);
}
/* Atomic aggregate. */
if (attr->flag & ATTR_FLAG_BIT(BGP_ATTR_ATOMIC_AGGREGATE)) {
stream_putc(s, BGP_ATTR_FLAG_TRANS);
stream_putc(s, BGP_ATTR_ATOMIC_AGGREGATE);
stream_putc(s, 0);
}
/* Aggregator. */
if (attr->flag & ATTR_FLAG_BIT(BGP_ATTR_AGGREGATOR)) {
stream_putc(s, BGP_ATTR_FLAG_OPTIONAL | BGP_ATTR_FLAG_TRANS);
stream_putc(s, BGP_ATTR_AGGREGATOR);
stream_putc(s, 8);
stream_putl(s, attr->aggregator_as);
stream_put_ipv4(s, attr->aggregator_addr.s_addr);
}
/* Community attribute. */
if (attr->flag & ATTR_FLAG_BIT(BGP_ATTR_COMMUNITIES)) {
if (attr->community->size * 4 > 255) {
stream_putc(s,
BGP_ATTR_FLAG_OPTIONAL | BGP_ATTR_FLAG_TRANS
| BGP_ATTR_FLAG_EXTLEN);
stream_putc(s, BGP_ATTR_COMMUNITIES);
stream_putw(s, attr->community->size * 4);
} else {
stream_putc(s,
BGP_ATTR_FLAG_OPTIONAL
| BGP_ATTR_FLAG_TRANS);
stream_putc(s, BGP_ATTR_COMMUNITIES);
stream_putc(s, attr->community->size * 4);
}
stream_put(s, attr->community->val, attr->community->size * 4);
}
/* Large Community attribute. */
if (attr->flag & ATTR_FLAG_BIT(BGP_ATTR_LARGE_COMMUNITIES)) {
if (lcom_length(attr->lcommunity) > 255) {
stream_putc(s,
BGP_ATTR_FLAG_OPTIONAL | BGP_ATTR_FLAG_TRANS
| BGP_ATTR_FLAG_EXTLEN);
stream_putc(s, BGP_ATTR_LARGE_COMMUNITIES);
stream_putw(s, lcom_length(attr->lcommunity));
} else {
stream_putc(s,
BGP_ATTR_FLAG_OPTIONAL
| BGP_ATTR_FLAG_TRANS);
stream_putc(s, BGP_ATTR_LARGE_COMMUNITIES);
stream_putc(s, lcom_length(attr->lcommunity));
}
stream_put(s, attr->lcommunity->val,
lcom_length(attr->lcommunity));
}
/* Add a MP_NLRI attribute to dump the IPv6 next hop */
if (prefix != NULL && prefix->family == AF_INET6
&& (attr->mp_nexthop_len == BGP_ATTR_NHLEN_IPV6_GLOBAL
|| attr->mp_nexthop_len == BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL)) {
int sizep;
stream_putc(s, BGP_ATTR_FLAG_OPTIONAL);
stream_putc(s, BGP_ATTR_MP_REACH_NLRI);
sizep = stream_get_endp(s);
/* MP header */
stream_putc(s, 0); /* Marker: Attribute length. */
stream_putw(s, AFI_IP6); /* AFI */
stream_putc(s, SAFI_UNICAST); /* SAFI */
/* Next hop */
stream_putc(s, attr->mp_nexthop_len);
stream_put(s, &attr->mp_nexthop_global, IPV6_MAX_BYTELEN);
if (attr->mp_nexthop_len == BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL)
stream_put(s, &attr->mp_nexthop_local,
IPV6_MAX_BYTELEN);
/* SNPA */
stream_putc(s, 0);
/* Prefix */
stream_put_prefix_addpath(s, prefix, addpath_encode,
addpath_tx_id);
/* Set MP attribute length. */
stream_putc_at(s, sizep, (stream_get_endp(s) - sizep) - 1);
}
/* Prefix SID */
if (attr->flag & ATTR_FLAG_BIT(BGP_ATTR_PREFIX_SID)) {
if (attr->label_index != BGP_INVALID_LABEL_INDEX) {
stream_putc(s,
BGP_ATTR_FLAG_OPTIONAL
| BGP_ATTR_FLAG_TRANS);
stream_putc(s, BGP_ATTR_PREFIX_SID);
stream_putc(s, 10);
stream_putc(s, BGP_PREFIX_SID_LABEL_INDEX);
stream_putc(s, BGP_PREFIX_SID_LABEL_INDEX_LENGTH);
stream_putc(s, 0); // reserved
stream_putw(s, 0); // flags
stream_putl(s, attr->label_index);
}
}
/* Return total size of attribute. */
len = stream_get_endp(s) - cp - 2;
stream_putw_at(s, cp, len);
}
| ./CrossVul/dataset_final_sorted/CWE-20/c/bad_1399_0 |
crossvul-cpp_data_bad_2740_0 | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% JJJ PPPP 222 %
% J P P 2 2 %
% J PPPP 22 %
% J J P 2 %
% JJ P 22222 %
% %
% %
% Read/Write JPEG-2000 Image Format %
% %
% Cristy %
% Nathan Brown %
% June 2001 %
% %
% %
% Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/artifact.h"
#include "magick/attribute.h"
#include "magick/blob.h"
#include "magick/blob-private.h"
#include "magick/cache.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/magick.h"
#include "magick/memory_.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/pixel-accessor.h"
#include "magick/profile.h"
#include "magick/property.h"
#include "magick/quantum-private.h"
#include "magick/semaphore.h"
#include "magick/static.h"
#include "magick/statistic.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/module.h"
#if defined(MAGICKCORE_LIBOPENJP2_DELEGATE)
#include <openjpeg.h>
#endif
/*
Forward declarations.
*/
#if defined(MAGICKCORE_LIBOPENJP2_DELEGATE)
static MagickBooleanType
WriteJP2Image(const ImageInfo *,Image *);
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s J 2 K %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsJ2K() returns MagickTrue if the image format type, identified by the
% magick string, is J2K.
%
% The format of the IsJ2K method is:
%
% MagickBooleanType IsJ2K(const unsigned char *magick,const size_t length)
%
% A description of each parameter follows:
%
% o magick: compare image format pattern against these bytes.
%
% o length: Specifies the length of the magick string.
%
*/
static MagickBooleanType IsJ2K(const unsigned char *magick,const size_t length)
{
if (length < 4)
return(MagickFalse);
if (memcmp(magick,"\xff\x4f\xff\x51",4) == 0)
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s J P 2 %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsJP2() returns MagickTrue if the image format type, identified by the
% magick string, is JP2.
%
% The format of the IsJP2 method is:
%
% MagickBooleanType IsJP2(const unsigned char *magick,const size_t length)
%
% A description of each parameter follows:
%
% o magick: compare image format pattern against these bytes.
%
% o length: Specifies the length of the magick string.
%
*/
static MagickBooleanType IsJP2(const unsigned char *magick,const size_t length)
{
if (length < 4)
return(MagickFalse);
if (memcmp(magick,"\x0d\x0a\x87\x0a",4) == 0)
return(MagickTrue);
if (length < 12)
return(MagickFalse);
if (memcmp(magick,"\x00\x00\x00\x0c\x6a\x50\x20\x20\x0d\x0a\x87\x0a",12) == 0)
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d J P 2 I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadJP2Image() reads a JPEG 2000 Image file (JP2) or JPEG 2000
% codestream (JPC) image file and returns it. It allocates the memory
% necessary for the new Image structure and returns a pointer to the new
% image or set of images.
%
% JP2 support is originally written by Nathan Brown, nathanbrown@letu.edu.
%
% The format of the ReadJP2Image method is:
%
% Image *ReadJP2Image(const ImageInfo *image_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o exception: return any errors or warnings in this structure.
%
*/
#if defined(MAGICKCORE_LIBOPENJP2_DELEGATE)
static void JP2ErrorHandler(const char *message,void *client_data)
{
ExceptionInfo
*exception;
exception=(ExceptionInfo *) client_data;
(void) ThrowMagickException(exception,GetMagickModule(),CoderError,
message,"`%s'","OpenJP2");
}
static OPJ_SIZE_T JP2ReadHandler(void *buffer,OPJ_SIZE_T length,void *context)
{
Image
*image;
ssize_t
count;
image=(Image *) context;
count=ReadBlob(image,(ssize_t) length,(unsigned char *) buffer);
if (count == 0)
return((OPJ_SIZE_T) -1);
return((OPJ_SIZE_T) count);
}
static OPJ_BOOL JP2SeekHandler(OPJ_OFF_T offset,void *context)
{
Image
*image;
image=(Image *) context;
return(SeekBlob(image,offset,SEEK_SET) < 0 ? OPJ_FALSE : OPJ_TRUE);
}
static OPJ_OFF_T JP2SkipHandler(OPJ_OFF_T offset,void *context)
{
Image
*image;
image=(Image *) context;
return(SeekBlob(image,offset,SEEK_CUR) < 0 ? -1 : offset);
}
static void JP2WarningHandler(const char *message,void *client_data)
{
ExceptionInfo
*exception;
exception=(ExceptionInfo *) client_data;
(void) ThrowMagickException(exception,GetMagickModule(),CoderWarning,
message,"`%s'","OpenJP2");
}
static OPJ_SIZE_T JP2WriteHandler(void *buffer,OPJ_SIZE_T length,void *context)
{
Image
*image;
ssize_t
count;
image=(Image *) context;
count=WriteBlob(image,(ssize_t) length,(unsigned char *) buffer);
return((OPJ_SIZE_T) count);
}
static Image *ReadJP2Image(const ImageInfo *image_info,ExceptionInfo *exception)
{
const char
*option;
Image
*image;
int
jp2_status;
MagickBooleanType
status;
opj_codec_t
*jp2_codec;
opj_codestream_index_t
*codestream_index = (opj_codestream_index_t *) NULL;
opj_dparameters_t
parameters;
opj_image_t
*jp2_image;
opj_stream_t
*jp2_stream;
register ssize_t
i;
ssize_t
y;
unsigned char
sans[4];
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
image=AcquireImage(image_info);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Initialize JP2 codec.
*/
if (ReadBlob(image,4,sans) != 4)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
(void) SeekBlob(image,SEEK_SET,0);
if (LocaleCompare(image_info->magick,"JPT") == 0)
jp2_codec=opj_create_decompress(OPJ_CODEC_JPT);
else
if (IsJ2K(sans,4) != MagickFalse)
jp2_codec=opj_create_decompress(OPJ_CODEC_J2K);
else
jp2_codec=opj_create_decompress(OPJ_CODEC_JP2);
opj_set_warning_handler(jp2_codec,JP2WarningHandler,exception);
opj_set_error_handler(jp2_codec,JP2ErrorHandler,exception);
opj_set_default_decoder_parameters(¶meters);
option=GetImageOption(image_info,"jp2:reduce-factor");
if (option != (const char *) NULL)
parameters.cp_reduce=StringToInteger(option);
option=GetImageOption(image_info,"jp2:quality-layers");
if (option == (const char *) NULL)
option=GetImageOption(image_info,"jp2:layer-number");
if (option != (const char *) NULL)
parameters.cp_layer=StringToInteger(option);
if (opj_setup_decoder(jp2_codec,¶meters) == 0)
{
opj_destroy_codec(jp2_codec);
ThrowReaderException(DelegateError,"UnableToManageJP2Stream");
}
jp2_stream=opj_stream_create(OPJ_J2K_STREAM_CHUNK_SIZE,OPJ_TRUE);
opj_stream_set_read_function(jp2_stream,JP2ReadHandler);
opj_stream_set_write_function(jp2_stream,JP2WriteHandler);
opj_stream_set_seek_function(jp2_stream,JP2SeekHandler);
opj_stream_set_skip_function(jp2_stream,JP2SkipHandler);
opj_stream_set_user_data(jp2_stream,image,NULL);
opj_stream_set_user_data_length(jp2_stream,GetBlobSize(image));
if (opj_read_header(jp2_stream,jp2_codec,&jp2_image) == 0)
{
opj_stream_destroy(jp2_stream);
opj_destroy_codec(jp2_codec);
ThrowReaderException(DelegateError,"UnableToDecodeImageFile");
}
jp2_status=1;
if ((image->columns != 0) && (image->rows != 0))
{
/*
Extract an area from the image.
*/
jp2_status=opj_set_decode_area(jp2_codec,jp2_image,
(OPJ_INT32) image->extract_info.x,(OPJ_INT32) image->extract_info.y,
(OPJ_INT32) image->extract_info.x+(ssize_t) image->columns,
(OPJ_INT32) image->extract_info.y+(ssize_t) image->rows);
if (jp2_status == 0)
{
opj_stream_destroy(jp2_stream);
opj_destroy_codec(jp2_codec);
opj_image_destroy(jp2_image);
ThrowReaderException(DelegateError,"UnableToDecodeImageFile");
}
}
if ((image_info->number_scenes != 0) && (image_info->scene != 0))
jp2_status=opj_get_decoded_tile(jp2_codec,jp2_stream,jp2_image,
(unsigned int) image_info->scene-1);
else
if (image->ping == MagickFalse)
{
jp2_status=opj_decode(jp2_codec,jp2_stream,jp2_image);
if (jp2_status != 0)
jp2_status=opj_end_decompress(jp2_codec,jp2_stream);
}
if (jp2_status == 0)
{
opj_stream_destroy(jp2_stream);
opj_destroy_codec(jp2_codec);
opj_image_destroy(jp2_image);
ThrowReaderException(DelegateError,"UnableToDecodeImageFile");
}
opj_stream_destroy(jp2_stream);
for (i=0; i < (ssize_t) jp2_image->numcomps; i++)
{
if ((jp2_image->comps[0].dx == 0) || (jp2_image->comps[0].dy == 0) ||
(jp2_image->comps[0].dx != jp2_image->comps[i].dx) ||
(jp2_image->comps[0].dy != jp2_image->comps[i].dy) ||
(jp2_image->comps[0].prec != jp2_image->comps[i].prec) ||
(jp2_image->comps[0].sgnd != jp2_image->comps[i].sgnd) ||
(jp2_image->comps[i].data == NULL))
{
opj_destroy_codec(jp2_codec);
opj_image_destroy(jp2_image);
ThrowReaderException(CoderError,"IrregularChannelGeometryNotSupported")
}
}
/*
Convert JP2 image.
*/
image->columns=(size_t) jp2_image->comps[0].w;
image->rows=(size_t) jp2_image->comps[0].h;
image->depth=jp2_image->comps[0].prec;
status=SetImageExtent(image,image->columns,image->rows);
if (status == MagickFalse)
{
InheritException(exception,&image->exception);
return(DestroyImageList(image));
}
image->compression=JPEG2000Compression;
if (jp2_image->color_space == 2)
{
SetImageColorspace(image,GRAYColorspace);
if (jp2_image->numcomps > 1)
image->matte=MagickTrue;
}
else
if (jp2_image->color_space == 3)
SetImageColorspace(image,Rec601YCbCrColorspace);
if (jp2_image->numcomps > 3)
image->matte=MagickTrue;
if (jp2_image->icc_profile_buf != (unsigned char *) NULL)
{
StringInfo
*profile;
profile=BlobToStringInfo(jp2_image->icc_profile_buf,
jp2_image->icc_profile_len);
if (profile != (StringInfo *) NULL)
SetImageProfile(image,"icc",profile);
}
if (image->ping != MagickFalse)
{
opj_destroy_codec(jp2_codec);
opj_image_destroy(jp2_image);
opj_destroy_cstr_index(&codestream_index);
return(GetFirstImageInList(image));
}
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*magick_restrict q;
register ssize_t
x;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) jp2_image->numcomps; i++)
{
double
pixel,
scale;
scale=QuantumRange/(double) ((1UL << jp2_image->comps[i].prec)-1);
pixel=scale*(jp2_image->comps[i].data[y/jp2_image->comps[i].dy*
image->columns/jp2_image->comps[i].dx+x/jp2_image->comps[i].dx]+
(jp2_image->comps[i].sgnd ? 1UL << (jp2_image->comps[i].prec-1) : 0));
switch (i)
{
case 0:
{
q->red=ClampToQuantum(pixel);
q->green=q->red;
q->blue=q->red;
q->opacity=OpaqueOpacity;
break;
}
case 1:
{
if (jp2_image->numcomps == 2)
{
q->opacity=ClampToQuantum(QuantumRange-pixel);
break;
}
q->green=ClampToQuantum(pixel);
break;
}
case 2:
{
q->blue=ClampToQuantum(pixel);
break;
}
case 3:
{
q->opacity=ClampToQuantum(QuantumRange-pixel);
break;
}
}
}
q++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
/*
Free resources.
*/
opj_destroy_codec(jp2_codec);
opj_image_destroy(jp2_image);
opj_destroy_cstr_index(&codestream_index);
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e g i s t e r J P 2 I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RegisterJP2Image() adds attributes for the JP2 image format to the list of
% supported formats. The attributes include the image format tag, a method
% method to read and/or write the format, whether the format supports the
% saving of more than one frame to the same file or blob, whether the format
% supports native in-memory I/O, and a brief description of the format.
%
% The format of the RegisterJP2Image method is:
%
% size_t RegisterJP2Image(void)
%
*/
ModuleExport size_t RegisterJP2Image(void)
{
char
version[MaxTextExtent];
MagickInfo
*entry;
*version='\0';
#if defined(MAGICKCORE_LIBOPENJP2_DELEGATE)
(void) FormatLocaleString(version,MaxTextExtent,"%s",opj_version());
#endif
entry=SetMagickInfo("JP2");
entry->description=ConstantString("JPEG-2000 File Format Syntax");
if (*version != '\0')
entry->version=ConstantString(version);
entry->mime_type=ConstantString("image/jp2");
entry->module=ConstantString("JP2");
entry->magick=(IsImageFormatHandler *) IsJP2;
entry->adjoin=MagickFalse;
entry->seekable_stream=MagickTrue;
#if defined(MAGICKCORE_LIBOPENJP2_DELEGATE)
entry->decoder=(DecodeImageHandler *) ReadJP2Image;
entry->encoder=(EncodeImageHandler *) WriteJP2Image;
#endif
(void) RegisterMagickInfo(entry);
entry=SetMagickInfo("J2C");
entry->description=ConstantString("JPEG-2000 Code Stream Syntax");
if (*version != '\0')
entry->version=ConstantString(version);
entry->mime_type=ConstantString("image/jp2");
entry->module=ConstantString("JP2");
entry->magick=(IsImageFormatHandler *) IsJ2K;
entry->adjoin=MagickFalse;
entry->seekable_stream=MagickTrue;
#if defined(MAGICKCORE_LIBOPENJP2_DELEGATE)
entry->decoder=(DecodeImageHandler *) ReadJP2Image;
entry->encoder=(EncodeImageHandler *) WriteJP2Image;
#endif
(void) RegisterMagickInfo(entry);
entry=SetMagickInfo("J2K");
entry->description=ConstantString("JPEG-2000 Code Stream Syntax");
if (*version != '\0')
entry->version=ConstantString(version);
entry->mime_type=ConstantString("image/jp2");
entry->module=ConstantString("JP2");
entry->magick=(IsImageFormatHandler *) IsJ2K;
entry->adjoin=MagickFalse;
entry->seekable_stream=MagickTrue;
#if defined(MAGICKCORE_LIBOPENJP2_DELEGATE)
entry->decoder=(DecodeImageHandler *) ReadJP2Image;
entry->encoder=(EncodeImageHandler *) WriteJP2Image;
#endif
(void) RegisterMagickInfo(entry);
entry=SetMagickInfo("JPM");
entry->description=ConstantString("JPEG-2000 Code Stream Syntax");
if (*version != '\0')
entry->version=ConstantString(version);
entry->mime_type=ConstantString("image/jp2");
entry->module=ConstantString("JP2");
entry->magick=(IsImageFormatHandler *) IsJP2;
entry->adjoin=MagickFalse;
entry->seekable_stream=MagickTrue;
#if defined(MAGICKCORE_LIBOPENJP2_DELEGATE)
entry->decoder=(DecodeImageHandler *) ReadJP2Image;
entry->encoder=(EncodeImageHandler *) WriteJP2Image;
#endif
(void) RegisterMagickInfo(entry);
entry=SetMagickInfo("JPT");
entry->description=ConstantString("JPEG-2000 File Format Syntax");
if (*version != '\0')
entry->version=ConstantString(version);
entry->mime_type=ConstantString("image/jp2");
entry->module=ConstantString("JP2");
entry->magick=(IsImageFormatHandler *) IsJP2;
entry->adjoin=MagickFalse;
entry->seekable_stream=MagickTrue;
#if defined(MAGICKCORE_LIBOPENJP2_DELEGATE)
entry->decoder=(DecodeImageHandler *) ReadJP2Image;
entry->encoder=(EncodeImageHandler *) WriteJP2Image;
#endif
(void) RegisterMagickInfo(entry);
entry=SetMagickInfo("JPC");
entry->description=ConstantString("JPEG-2000 Code Stream Syntax");
if (*version != '\0')
entry->version=ConstantString(version);
entry->mime_type=ConstantString("image/jp2");
entry->module=ConstantString("JP2");
entry->magick=(IsImageFormatHandler *) IsJP2;
entry->adjoin=MagickFalse;
entry->seekable_stream=MagickTrue;
#if defined(MAGICKCORE_LIBOPENJP2_DELEGATE)
entry->decoder=(DecodeImageHandler *) ReadJP2Image;
entry->encoder=(EncodeImageHandler *) WriteJP2Image;
#endif
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n r e g i s t e r J P 2 I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnregisterJP2Image() removes format registrations made by the JP2 module
% from the list of supported formats.
%
% The format of the UnregisterJP2Image method is:
%
% UnregisterJP2Image(void)
%
*/
ModuleExport void UnregisterJP2Image(void)
{
(void) UnregisterMagickInfo("JPC");
(void) UnregisterMagickInfo("JPT");
(void) UnregisterMagickInfo("JPM");
(void) UnregisterMagickInfo("JP2");
(void) UnregisterMagickInfo("J2K");
}
#if defined(MAGICKCORE_LIBOPENJP2_DELEGATE)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e J P 2 I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WriteJP2Image() writes an image in the JPEG 2000 image format.
%
% JP2 support originally written by Nathan Brown, nathanbrown@letu.edu
%
% The format of the WriteJP2Image method is:
%
% MagickBooleanType WriteJP2Image(const ImageInfo *image_info,Image *image)
%
% A description of each parameter follows.
%
% o image_info: the image info.
%
% o image: The image.
%
*/
static void CinemaProfileCompliance(const opj_image_t *jp2_image,
opj_cparameters_t *parameters)
{
/*
Digital Cinema 4K profile compliant codestream.
*/
parameters->tile_size_on=OPJ_FALSE;
parameters->cp_tdx=1;
parameters->cp_tdy=1;
parameters->tp_flag='C';
parameters->tp_on=1;
parameters->cp_tx0=0;
parameters->cp_ty0=0;
parameters->image_offset_x0=0;
parameters->image_offset_y0=0;
parameters->cblockw_init=32;
parameters->cblockh_init=32;
parameters->csty|=0x01;
parameters->prog_order=OPJ_CPRL;
parameters->roi_compno=(-1);
parameters->subsampling_dx=1;
parameters->subsampling_dy=1;
parameters->irreversible=1;
if ((jp2_image->comps[0].w == 2048) || (jp2_image->comps[0].h == 1080))
{
/*
Digital Cinema 2K.
*/
parameters->cp_cinema=OPJ_CINEMA2K_24;
parameters->cp_rsiz=OPJ_CINEMA2K;
parameters->max_comp_size=1041666;
if (parameters->numresolution > 6)
parameters->numresolution=6;
}
if ((jp2_image->comps[0].w == 4096) || (jp2_image->comps[0].h == 2160))
{
/*
Digital Cinema 4K.
*/
parameters->cp_cinema=OPJ_CINEMA4K_24;
parameters->cp_rsiz=OPJ_CINEMA4K;
parameters->max_comp_size=1041666;
if (parameters->numresolution < 1)
parameters->numresolution=1;
if (parameters->numresolution > 7)
parameters->numresolution=7;
parameters->numpocs=2;
parameters->POC[0].tile=1;
parameters->POC[0].resno0=0;
parameters->POC[0].compno0=0;
parameters->POC[0].layno1=1;
parameters->POC[0].resno1=parameters->numresolution-1;
parameters->POC[0].compno1=3;
parameters->POC[0].prg1=OPJ_CPRL;
parameters->POC[1].tile=1;
parameters->POC[1].resno0=parameters->numresolution-1;
parameters->POC[1].compno0=0;
parameters->POC[1].layno1=1;
parameters->POC[1].resno1=parameters->numresolution;
parameters->POC[1].compno1=3;
parameters->POC[1].prg1=OPJ_CPRL;
}
parameters->tcp_numlayers=1;
parameters->tcp_rates[0]=((float) (jp2_image->numcomps*jp2_image->comps[0].w*
jp2_image->comps[0].h*jp2_image->comps[0].prec))/(parameters->max_comp_size*
8*jp2_image->comps[0].dx*jp2_image->comps[0].dy);
parameters->cp_disto_alloc=1;
}
static MagickBooleanType WriteJP2Image(const ImageInfo *image_info,Image *image)
{
const char
*option,
*property;
int
jp2_status;
MagickBooleanType
status;
opj_codec_t
*jp2_codec;
OPJ_COLOR_SPACE
jp2_colorspace;
opj_cparameters_t
parameters;
opj_image_cmptparm_t
jp2_info[5];
opj_image_t
*jp2_image;
opj_stream_t
*jp2_stream;
register ssize_t
i;
ssize_t
y;
unsigned int
channels;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,&image->exception);
if (status == MagickFalse)
return(status);
/*
Initialize JPEG 2000 encoder parameters.
*/
opj_set_default_encoder_parameters(¶meters);
for (i=1; i < 6; i++)
if (((size_t) (1 << (i+2)) > image->columns) &&
((size_t) (1 << (i+2)) > image->rows))
break;
parameters.numresolution=i;
option=GetImageOption(image_info,"jp2:number-resolutions");
if (option != (const char *) NULL)
parameters.numresolution=StringToInteger(option);
parameters.tcp_numlayers=1;
parameters.tcp_rates[0]=0; /* lossless */
parameters.cp_disto_alloc=1;
if ((image_info->quality != 0) && (image_info->quality != 100))
{
parameters.tcp_distoratio[0]=(double) image_info->quality;
parameters.cp_fixed_quality=OPJ_TRUE;
}
if (image_info->extract != (char *) NULL)
{
RectangleInfo
geometry;
int
flags;
/*
Set tile size.
*/
flags=ParseAbsoluteGeometry(image_info->extract,&geometry);
parameters.cp_tdx=(int) geometry.width;
parameters.cp_tdy=(int) geometry.width;
if ((flags & HeightValue) != 0)
parameters.cp_tdy=(int) geometry.height;
if ((flags & XValue) != 0)
parameters.cp_tx0=geometry.x;
if ((flags & YValue) != 0)
parameters.cp_ty0=geometry.y;
parameters.tile_size_on=OPJ_TRUE;
}
option=GetImageOption(image_info,"jp2:quality");
if (option != (const char *) NULL)
{
register const char
*p;
/*
Set quality PSNR.
*/
p=option;
for (i=0; sscanf(p,"%f",¶meters.tcp_distoratio[i]) == 1; i++)
{
if (i >= 100)
break;
while ((*p != '\0') && (*p != ','))
p++;
if (*p == '\0')
break;
p++;
}
parameters.tcp_numlayers=i+1;
parameters.cp_fixed_quality=OPJ_TRUE;
}
option=GetImageOption(image_info,"jp2:progression-order");
if (option != (const char *) NULL)
{
if (LocaleCompare(option,"LRCP") == 0)
parameters.prog_order=OPJ_LRCP;
if (LocaleCompare(option,"RLCP") == 0)
parameters.prog_order=OPJ_RLCP;
if (LocaleCompare(option,"RPCL") == 0)
parameters.prog_order=OPJ_RPCL;
if (LocaleCompare(option,"PCRL") == 0)
parameters.prog_order=OPJ_PCRL;
if (LocaleCompare(option,"CPRL") == 0)
parameters.prog_order=OPJ_CPRL;
}
option=GetImageOption(image_info,"jp2:rate");
if (option != (const char *) NULL)
{
register const char
*p;
/*
Set compression rate.
*/
p=option;
for (i=0; sscanf(p,"%f",¶meters.tcp_rates[i]) == 1; i++)
{
if (i > 100)
break;
while ((*p != '\0') && (*p != ','))
p++;
if (*p == '\0')
break;
p++;
}
parameters.tcp_numlayers=i+1;
parameters.cp_disto_alloc=OPJ_TRUE;
}
if (image_info->sampling_factor != (const char *) NULL)
(void) sscanf(image_info->sampling_factor,"%d,%d",
¶meters.subsampling_dx,¶meters.subsampling_dy);
property=GetImageProperty(image,"comment");
if (property != (const char *) NULL)
parameters.cp_comment=ConstantString(property);
channels=3;
jp2_colorspace=OPJ_CLRSPC_SRGB;
if (image->colorspace == YUVColorspace)
{
jp2_colorspace=OPJ_CLRSPC_SYCC;
parameters.subsampling_dx=2;
}
else
{
if (IsGrayColorspace(image->colorspace) != MagickFalse)
{
channels=1;
jp2_colorspace=OPJ_CLRSPC_GRAY;
}
else
(void) TransformImageColorspace(image,sRGBColorspace);
if (image->matte != MagickFalse)
channels++;
}
parameters.tcp_mct=channels == 3 ? 1 : 0;
ResetMagickMemory(jp2_info,0,sizeof(jp2_info));
for (i=0; i < (ssize_t) channels; i++)
{
jp2_info[i].prec=(unsigned int) image->depth;
jp2_info[i].bpp=(unsigned int) image->depth;
if ((image->depth == 1) &&
((LocaleCompare(image_info->magick,"JPT") == 0) ||
(LocaleCompare(image_info->magick,"JP2") == 0)))
{
jp2_info[i].prec++; /* OpenJPEG returns exception for depth @ 1 */
jp2_info[i].bpp++;
}
jp2_info[i].sgnd=0;
jp2_info[i].dx=parameters.subsampling_dx;
jp2_info[i].dy=parameters.subsampling_dy;
jp2_info[i].w=(unsigned int) image->columns;
jp2_info[i].h=(unsigned int) image->rows;
}
jp2_image=opj_image_create(channels,jp2_info,jp2_colorspace);
if (jp2_image == (opj_image_t *) NULL)
ThrowWriterException(DelegateError,"UnableToEncodeImageFile");
jp2_image->x0=parameters.image_offset_x0;
jp2_image->y0=parameters.image_offset_y0;
jp2_image->x1=(unsigned int) (2*parameters.image_offset_x0+(image->columns-1)*
parameters.subsampling_dx+1);
jp2_image->y1=(unsigned int) (2*parameters.image_offset_y0+(image->rows-1)*
parameters.subsampling_dx+1);
if ((image->depth == 12) &&
((image->columns == 2048) || (image->rows == 1080) ||
(image->columns == 4096) || (image->rows == 2160)))
CinemaProfileCompliance(jp2_image,¶meters);
if (channels == 4)
jp2_image->comps[3].alpha=1;
else
if ((channels == 2) && (jp2_colorspace == OPJ_CLRSPC_GRAY))
jp2_image->comps[1].alpha=1;
/*
Convert to JP2 pixels.
*/
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*p;
ssize_t
x;
p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
for (i=0; i < (ssize_t) channels; i++)
{
double
scale;
register int
*q;
scale=(double) ((1UL << jp2_image->comps[i].prec)-1)/QuantumRange;
q=jp2_image->comps[i].data+(y/jp2_image->comps[i].dy*
image->columns/jp2_image->comps[i].dx+x/jp2_image->comps[i].dx);
switch (i)
{
case 0:
{
if (jp2_colorspace == OPJ_CLRSPC_GRAY)
{
*q=(int) (scale*GetPixelLuma(image,p));
break;
}
*q=(int) (scale*p->red);
break;
}
case 1:
{
if (jp2_colorspace == OPJ_CLRSPC_GRAY)
{
*q=(int) (scale*(QuantumRange-p->opacity));
break;
}
*q=(int) (scale*p->green);
break;
}
case 2:
{
*q=(int) (scale*p->blue);
break;
}
case 3:
{
*q=(int) (scale*(QuantumRange-p->opacity));
break;
}
}
}
p++;
}
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
if (LocaleCompare(image_info->magick,"JPT") == 0)
jp2_codec=opj_create_compress(OPJ_CODEC_JPT);
else
if (LocaleCompare(image_info->magick,"J2K") == 0)
jp2_codec=opj_create_compress(OPJ_CODEC_J2K);
else
jp2_codec=opj_create_compress(OPJ_CODEC_JP2);
opj_set_warning_handler(jp2_codec,JP2WarningHandler,&image->exception);
opj_set_error_handler(jp2_codec,JP2ErrorHandler,&image->exception);
opj_setup_encoder(jp2_codec,¶meters,jp2_image);
jp2_stream=opj_stream_create(OPJ_J2K_STREAM_CHUNK_SIZE,OPJ_FALSE);
opj_stream_set_read_function(jp2_stream,JP2ReadHandler);
opj_stream_set_write_function(jp2_stream,JP2WriteHandler);
opj_stream_set_seek_function(jp2_stream,JP2SeekHandler);
opj_stream_set_skip_function(jp2_stream,JP2SkipHandler);
opj_stream_set_user_data(jp2_stream,image,NULL);
if (jp2_stream == (opj_stream_t *) NULL)
ThrowWriterException(DelegateError,"UnableToEncodeImageFile");
jp2_status=opj_start_compress(jp2_codec,jp2_image,jp2_stream);
if (jp2_status == 0)
ThrowWriterException(DelegateError,"UnableToEncodeImageFile");
if ((opj_encode(jp2_codec,jp2_stream) == 0) ||
(opj_end_compress(jp2_codec,jp2_stream) == 0))
{
opj_stream_destroy(jp2_stream);
opj_destroy_codec(jp2_codec);
opj_image_destroy(jp2_image);
ThrowWriterException(DelegateError,"UnableToEncodeImageFile");
}
/*
Free resources.
*/
opj_stream_destroy(jp2_stream);
opj_destroy_codec(jp2_codec);
opj_image_destroy(jp2_image);
(void) CloseBlob(image);
return(MagickTrue);
}
#endif
| ./CrossVul/dataset_final_sorted/CWE-20/c/bad_2740_0 |
crossvul-cpp_data_bad_3581_3 | 404: Not Found | ./CrossVul/dataset_final_sorted/CWE-20/c/bad_3581_3 |
crossvul-cpp_data_good_3979_0 | /* $OpenBSD: scp.c,v 1.209 2020/05/01 06:31:42 djm Exp $ */
/*
* scp - secure remote copy. This is basically patched BSD rcp which
* uses ssh to do the data transfer (instead of using rcmd).
*
* NOTE: This version should NOT be suid root. (This uses ssh to
* do the transfer and ssh has the necessary privileges.)
*
* 1995 Timo Rinne <tri@iki.fi>, Tatu Ylonen <ylo@cs.hut.fi>
*
* As far as I am concerned, the code I have written for this software
* can be used freely for any purpose. Any derived versions of this
* software must be clearly marked as such, and if the derived work is
* incompatible with the protocol description in the RFC file, it must be
* called by a name other than "ssh" or "Secure Shell".
*/
/*
* Copyright (c) 1999 Theo de Raadt. All rights reserved.
* Copyright (c) 1999 Aaron Campbell. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Parts from:
*
* Copyright (c) 1983, 1990, 1992, 1993, 1995
* The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
*/
#include "includes.h"
#include <sys/types.h>
#ifdef HAVE_SYS_STAT_H
# include <sys/stat.h>
#endif
#ifdef HAVE_POLL_H
#include <poll.h>
#else
# ifdef HAVE_SYS_POLL_H
# include <sys/poll.h>
# endif
#endif
#ifdef HAVE_SYS_TIME_H
# include <sys/time.h>
#endif
#include <sys/wait.h>
#include <sys/uio.h>
#include <ctype.h>
#include <dirent.h>
#include <errno.h>
#include <fcntl.h>
#ifdef HAVE_FNMATCH_H
#include <fnmatch.h>
#endif
#include <limits.h>
#include <locale.h>
#include <pwd.h>
#include <signal.h>
#include <stdarg.h>
#ifdef HAVE_STDINT_H
# include <stdint.h>
#endif
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <unistd.h>
#if defined(HAVE_STRNVIS) && defined(HAVE_VIS_H) && !defined(BROKEN_STRNVIS)
#include <vis.h>
#endif
#include "xmalloc.h"
#include "ssh.h"
#include "atomicio.h"
#include "pathnames.h"
#include "log.h"
#include "misc.h"
#include "progressmeter.h"
#include "utf8.h"
extern char *__progname;
#define COPY_BUFLEN 16384
int do_cmd(char *host, char *remuser, int port, char *cmd, int *fdin, int *fdout);
int do_cmd2(char *host, char *remuser, int port, char *cmd, int fdin, int fdout);
/* Struct for addargs */
arglist args;
arglist remote_remote_args;
/* Bandwidth limit */
long long limit_kbps = 0;
struct bwlimit bwlimit;
/* Name of current file being transferred. */
char *curfile;
/* This is set to non-zero to enable verbose mode. */
int verbose_mode = 0;
/* This is set to zero if the progressmeter is not desired. */
int showprogress = 1;
/*
* This is set to non-zero if remote-remote copy should be piped
* through this process.
*/
int throughlocal = 0;
/* Non-standard port to use for the ssh connection or -1. */
int sshport = -1;
/* This is the program to execute for the secured connection. ("ssh" or -S) */
char *ssh_program = _PATH_SSH_PROGRAM;
/* This is used to store the pid of ssh_program */
pid_t do_cmd_pid = -1;
static void
killchild(int signo)
{
if (do_cmd_pid > 1) {
kill(do_cmd_pid, signo ? signo : SIGTERM);
waitpid(do_cmd_pid, NULL, 0);
}
if (signo)
_exit(1);
exit(1);
}
static void
suspchild(int signo)
{
int status;
if (do_cmd_pid > 1) {
kill(do_cmd_pid, signo);
while (waitpid(do_cmd_pid, &status, WUNTRACED) == -1 &&
errno == EINTR)
;
kill(getpid(), SIGSTOP);
}
}
static int
do_local_cmd(arglist *a)
{
u_int i;
int status;
pid_t pid;
if (a->num == 0)
fatal("do_local_cmd: no arguments");
if (verbose_mode) {
fprintf(stderr, "Executing:");
for (i = 0; i < a->num; i++)
fmprintf(stderr, " %s", a->list[i]);
fprintf(stderr, "\n");
}
if ((pid = fork()) == -1)
fatal("do_local_cmd: fork: %s", strerror(errno));
if (pid == 0) {
execvp(a->list[0], a->list);
perror(a->list[0]);
exit(1);
}
do_cmd_pid = pid;
ssh_signal(SIGTERM, killchild);
ssh_signal(SIGINT, killchild);
ssh_signal(SIGHUP, killchild);
while (waitpid(pid, &status, 0) == -1)
if (errno != EINTR)
fatal("do_local_cmd: waitpid: %s", strerror(errno));
do_cmd_pid = -1;
if (!WIFEXITED(status) || WEXITSTATUS(status) != 0)
return (-1);
return (0);
}
/*
* This function executes the given command as the specified user on the
* given host. This returns < 0 if execution fails, and >= 0 otherwise. This
* assigns the input and output file descriptors on success.
*/
int
do_cmd(char *host, char *remuser, int port, char *cmd, int *fdin, int *fdout)
{
int pin[2], pout[2], reserved[2];
if (verbose_mode)
fmprintf(stderr,
"Executing: program %s host %s, user %s, command %s\n",
ssh_program, host,
remuser ? remuser : "(unspecified)", cmd);
if (port == -1)
port = sshport;
/*
* Reserve two descriptors so that the real pipes won't get
* descriptors 0 and 1 because that will screw up dup2 below.
*/
if (pipe(reserved) == -1)
fatal("pipe: %s", strerror(errno));
/* Create a socket pair for communicating with ssh. */
if (pipe(pin) == -1)
fatal("pipe: %s", strerror(errno));
if (pipe(pout) == -1)
fatal("pipe: %s", strerror(errno));
/* Free the reserved descriptors. */
close(reserved[0]);
close(reserved[1]);
ssh_signal(SIGTSTP, suspchild);
ssh_signal(SIGTTIN, suspchild);
ssh_signal(SIGTTOU, suspchild);
/* Fork a child to execute the command on the remote host using ssh. */
do_cmd_pid = fork();
if (do_cmd_pid == 0) {
/* Child. */
close(pin[1]);
close(pout[0]);
dup2(pin[0], 0);
dup2(pout[1], 1);
close(pin[0]);
close(pout[1]);
replacearg(&args, 0, "%s", ssh_program);
if (port != -1) {
addargs(&args, "-p");
addargs(&args, "%d", port);
}
if (remuser != NULL) {
addargs(&args, "-l");
addargs(&args, "%s", remuser);
}
addargs(&args, "--");
addargs(&args, "%s", host);
addargs(&args, "%s", cmd);
execvp(ssh_program, args.list);
perror(ssh_program);
exit(1);
} else if (do_cmd_pid == -1) {
fatal("fork: %s", strerror(errno));
}
/* Parent. Close the other side, and return the local side. */
close(pin[0]);
*fdout = pin[1];
close(pout[1]);
*fdin = pout[0];
ssh_signal(SIGTERM, killchild);
ssh_signal(SIGINT, killchild);
ssh_signal(SIGHUP, killchild);
return 0;
}
/*
* This function executes a command similar to do_cmd(), but expects the
* input and output descriptors to be setup by a previous call to do_cmd().
* This way the input and output of two commands can be connected.
*/
int
do_cmd2(char *host, char *remuser, int port, char *cmd, int fdin, int fdout)
{
pid_t pid;
int status;
if (verbose_mode)
fmprintf(stderr,
"Executing: 2nd program %s host %s, user %s, command %s\n",
ssh_program, host,
remuser ? remuser : "(unspecified)", cmd);
if (port == -1)
port = sshport;
/* Fork a child to execute the command on the remote host using ssh. */
pid = fork();
if (pid == 0) {
dup2(fdin, 0);
dup2(fdout, 1);
replacearg(&args, 0, "%s", ssh_program);
if (port != -1) {
addargs(&args, "-p");
addargs(&args, "%d", port);
}
if (remuser != NULL) {
addargs(&args, "-l");
addargs(&args, "%s", remuser);
}
addargs(&args, "-oBatchMode=yes");
addargs(&args, "--");
addargs(&args, "%s", host);
addargs(&args, "%s", cmd);
execvp(ssh_program, args.list);
perror(ssh_program);
exit(1);
} else if (pid == -1) {
fatal("fork: %s", strerror(errno));
}
while (waitpid(pid, &status, 0) == -1)
if (errno != EINTR)
fatal("do_cmd2: waitpid: %s", strerror(errno));
return 0;
}
typedef struct {
size_t cnt;
char *buf;
} BUF;
BUF *allocbuf(BUF *, int, int);
void lostconn(int);
int okname(char *);
void run_err(const char *,...);
int note_err(const char *,...);
void verifydir(char *);
struct passwd *pwd;
uid_t userid;
int errs, remin, remout;
int Tflag, pflag, iamremote, iamrecursive, targetshouldbedirectory;
#define CMDNEEDS 64
char cmd[CMDNEEDS]; /* must hold "rcp -r -p -d\0" */
int response(void);
void rsource(char *, struct stat *);
void sink(int, char *[], const char *);
void source(int, char *[]);
void tolocal(int, char *[]);
void toremote(int, char *[]);
void usage(void);
int
main(int argc, char **argv)
{
int ch, fflag, tflag, status, n;
char **newargv;
const char *errstr;
extern char *optarg;
extern int optind;
/* Ensure that fds 0, 1 and 2 are open or directed to /dev/null */
sanitise_stdfd();
seed_rng();
msetlocale();
/* Copy argv, because we modify it */
newargv = xcalloc(MAXIMUM(argc + 1, 1), sizeof(*newargv));
for (n = 0; n < argc; n++)
newargv[n] = xstrdup(argv[n]);
argv = newargv;
__progname = ssh_get_progname(argv[0]);
memset(&args, '\0', sizeof(args));
memset(&remote_remote_args, '\0', sizeof(remote_remote_args));
args.list = remote_remote_args.list = NULL;
addargs(&args, "%s", ssh_program);
addargs(&args, "-x");
addargs(&args, "-oForwardAgent=no");
addargs(&args, "-oPermitLocalCommand=no");
addargs(&args, "-oClearAllForwardings=yes");
addargs(&args, "-oRemoteCommand=none");
addargs(&args, "-oRequestTTY=no");
fflag = Tflag = tflag = 0;
while ((ch = getopt(argc, argv,
"dfl:prtTvBCc:i:P:q12346S:o:F:J:")) != -1) {
switch (ch) {
/* User-visible flags. */
case '1':
fatal("SSH protocol v.1 is no longer supported");
break;
case '2':
/* Ignored */
break;
case '4':
case '6':
case 'C':
addargs(&args, "-%c", ch);
addargs(&remote_remote_args, "-%c", ch);
break;
case '3':
throughlocal = 1;
break;
case 'o':
case 'c':
case 'i':
case 'F':
case 'J':
addargs(&remote_remote_args, "-%c", ch);
addargs(&remote_remote_args, "%s", optarg);
addargs(&args, "-%c", ch);
addargs(&args, "%s", optarg);
break;
case 'P':
sshport = a2port(optarg);
if (sshport <= 0)
fatal("bad port \"%s\"\n", optarg);
break;
case 'B':
addargs(&remote_remote_args, "-oBatchmode=yes");
addargs(&args, "-oBatchmode=yes");
break;
case 'l':
limit_kbps = strtonum(optarg, 1, 100 * 1024 * 1024,
&errstr);
if (errstr != NULL)
usage();
limit_kbps *= 1024; /* kbps */
bandwidth_limit_init(&bwlimit, limit_kbps, COPY_BUFLEN);
break;
case 'p':
pflag = 1;
break;
case 'r':
iamrecursive = 1;
break;
case 'S':
ssh_program = xstrdup(optarg);
break;
case 'v':
addargs(&args, "-v");
addargs(&remote_remote_args, "-v");
verbose_mode = 1;
break;
case 'q':
addargs(&args, "-q");
addargs(&remote_remote_args, "-q");
showprogress = 0;
break;
/* Server options. */
case 'd':
targetshouldbedirectory = 1;
break;
case 'f': /* "from" */
iamremote = 1;
fflag = 1;
break;
case 't': /* "to" */
iamremote = 1;
tflag = 1;
#ifdef HAVE_CYGWIN
setmode(0, O_BINARY);
#endif
break;
case 'T':
Tflag = 1;
break;
default:
usage();
}
}
argc -= optind;
argv += optind;
if ((pwd = getpwuid(userid = getuid())) == NULL)
fatal("unknown user %u", (u_int) userid);
if (!isatty(STDOUT_FILENO))
showprogress = 0;
if (pflag) {
/* Cannot pledge: -p allows setuid/setgid files... */
} else {
if (pledge("stdio rpath wpath cpath fattr tty proc exec",
NULL) == -1) {
perror("pledge");
exit(1);
}
}
remin = STDIN_FILENO;
remout = STDOUT_FILENO;
if (fflag) {
/* Follow "protocol", send data. */
(void) response();
source(argc, argv);
exit(errs != 0);
}
if (tflag) {
/* Receive data. */
sink(argc, argv, NULL);
exit(errs != 0);
}
if (argc < 2)
usage();
if (argc > 2)
targetshouldbedirectory = 1;
remin = remout = -1;
do_cmd_pid = -1;
/* Command to be executed on remote system using "ssh". */
(void) snprintf(cmd, sizeof cmd, "scp%s%s%s%s",
verbose_mode ? " -v" : "",
iamrecursive ? " -r" : "", pflag ? " -p" : "",
targetshouldbedirectory ? " -d" : "");
(void) ssh_signal(SIGPIPE, lostconn);
if (colon(argv[argc - 1])) /* Dest is remote host. */
toremote(argc, argv);
else {
if (targetshouldbedirectory)
verifydir(argv[argc - 1]);
tolocal(argc, argv); /* Dest is local host. */
}
/*
* Finally check the exit status of the ssh process, if one was forked
* and no error has occurred yet
*/
if (do_cmd_pid != -1 && errs == 0) {
if (remin != -1)
(void) close(remin);
if (remout != -1)
(void) close(remout);
if (waitpid(do_cmd_pid, &status, 0) == -1)
errs = 1;
else {
if (!WIFEXITED(status) || WEXITSTATUS(status) != 0)
errs = 1;
}
}
exit(errs != 0);
}
/* Callback from atomicio6 to update progress meter and limit bandwidth */
static int
scpio(void *_cnt, size_t s)
{
off_t *cnt = (off_t *)_cnt;
*cnt += s;
refresh_progress_meter(0);
if (limit_kbps > 0)
bandwidth_limit(&bwlimit, s);
return 0;
}
static int
do_times(int fd, int verb, const struct stat *sb)
{
/* strlen(2^64) == 20; strlen(10^6) == 7 */
char buf[(20 + 7 + 2) * 2 + 2];
(void)snprintf(buf, sizeof(buf), "T%llu 0 %llu 0\n",
(unsigned long long) (sb->st_mtime < 0 ? 0 : sb->st_mtime),
(unsigned long long) (sb->st_atime < 0 ? 0 : sb->st_atime));
if (verb) {
fprintf(stderr, "File mtime %lld atime %lld\n",
(long long)sb->st_mtime, (long long)sb->st_atime);
fprintf(stderr, "Sending file timestamps: %s", buf);
}
(void) atomicio(vwrite, fd, buf, strlen(buf));
return (response());
}
static int
parse_scp_uri(const char *uri, char **userp, char **hostp, int *portp,
char **pathp)
{
int r;
r = parse_uri("scp", uri, userp, hostp, portp, pathp);
if (r == 0 && *pathp == NULL)
*pathp = xstrdup(".");
return r;
}
/* Appends a string to an array; returns 0 on success, -1 on alloc failure */
static int
append(char *cp, char ***ap, size_t *np)
{
char **tmp;
if ((tmp = reallocarray(*ap, *np + 1, sizeof(*tmp))) == NULL)
return -1;
tmp[(*np)] = cp;
(*np)++;
*ap = tmp;
return 0;
}
/*
* Finds the start and end of the first brace pair in the pattern.
* returns 0 on success or -1 for invalid patterns.
*/
static int
find_brace(const char *pattern, int *startp, int *endp)
{
int i;
int in_bracket, brace_level;
*startp = *endp = -1;
in_bracket = brace_level = 0;
for (i = 0; i < INT_MAX && *endp < 0 && pattern[i] != '\0'; i++) {
switch (pattern[i]) {
case '\\':
/* skip next character */
if (pattern[i + 1] != '\0')
i++;
break;
case '[':
in_bracket = 1;
break;
case ']':
in_bracket = 0;
break;
case '{':
if (in_bracket)
break;
if (pattern[i + 1] == '}') {
/* Protect a single {}, for find(1), like csh */
i++; /* skip */
break;
}
if (*startp == -1)
*startp = i;
brace_level++;
break;
case '}':
if (in_bracket)
break;
if (*startp < 0) {
/* Unbalanced brace */
return -1;
}
if (--brace_level <= 0)
*endp = i;
break;
}
}
/* unbalanced brackets/braces */
if (*endp < 0 && (*startp >= 0 || in_bracket))
return -1;
return 0;
}
/*
* Assembles and records a successfully-expanded pattern, returns -1 on
* alloc failure.
*/
static int
emit_expansion(const char *pattern, int brace_start, int brace_end,
int sel_start, int sel_end, char ***patternsp, size_t *npatternsp)
{
char *cp;
int o = 0, tail_len = strlen(pattern + brace_end + 1);
if ((cp = malloc(brace_start + (sel_end - sel_start) +
tail_len + 1)) == NULL)
return -1;
/* Pattern before initial brace */
if (brace_start > 0) {
memcpy(cp, pattern, brace_start);
o = brace_start;
}
/* Current braced selection */
if (sel_end - sel_start > 0) {
memcpy(cp + o, pattern + sel_start,
sel_end - sel_start);
o += sel_end - sel_start;
}
/* Remainder of pattern after closing brace */
if (tail_len > 0) {
memcpy(cp + o, pattern + brace_end + 1, tail_len);
o += tail_len;
}
cp[o] = '\0';
if (append(cp, patternsp, npatternsp) != 0) {
free(cp);
return -1;
}
return 0;
}
/*
* Expand the first encountered brace in pattern, appending the expanded
* patterns it yielded to the *patternsp array.
*
* Returns 0 on success or -1 on allocation failure.
*
* Signals whether expansion was performed via *expanded and whether
* pattern was invalid via *invalid.
*/
static int
brace_expand_one(const char *pattern, char ***patternsp, size_t *npatternsp,
int *expanded, int *invalid)
{
int i;
int in_bracket, brace_start, brace_end, brace_level;
int sel_start, sel_end;
*invalid = *expanded = 0;
if (find_brace(pattern, &brace_start, &brace_end) != 0) {
*invalid = 1;
return 0;
} else if (brace_start == -1)
return 0;
in_bracket = brace_level = 0;
for (i = sel_start = brace_start + 1; i < brace_end; i++) {
switch (pattern[i]) {
case '{':
if (in_bracket)
break;
brace_level++;
break;
case '}':
if (in_bracket)
break;
brace_level--;
break;
case '[':
in_bracket = 1;
break;
case ']':
in_bracket = 0;
break;
case '\\':
if (i < brace_end - 1)
i++; /* skip */
break;
}
if (pattern[i] == ',' || i == brace_end - 1) {
if (in_bracket || brace_level > 0)
continue;
/* End of a selection, emit an expanded pattern */
/* Adjust end index for last selection */
sel_end = (i == brace_end - 1) ? brace_end : i;
if (emit_expansion(pattern, brace_start, brace_end,
sel_start, sel_end, patternsp, npatternsp) != 0)
return -1;
/* move on to the next selection */
sel_start = i + 1;
continue;
}
}
if (in_bracket || brace_level > 0) {
*invalid = 1;
return 0;
}
/* success */
*expanded = 1;
return 0;
}
/* Expand braces from pattern. Returns 0 on success, -1 on failure */
static int
brace_expand(const char *pattern, char ***patternsp, size_t *npatternsp)
{
char *cp, *cp2, **active = NULL, **done = NULL;
size_t i, nactive = 0, ndone = 0;
int ret = -1, invalid = 0, expanded = 0;
*patternsp = NULL;
*npatternsp = 0;
/* Start the worklist with the original pattern */
if ((cp = strdup(pattern)) == NULL)
return -1;
if (append(cp, &active, &nactive) != 0) {
free(cp);
return -1;
}
while (nactive > 0) {
cp = active[nactive - 1];
nactive--;
if (brace_expand_one(cp, &active, &nactive,
&expanded, &invalid) == -1) {
free(cp);
goto fail;
}
if (invalid)
fatal("%s: invalid brace pattern \"%s\"", __func__, cp);
if (expanded) {
/*
* Current entry expanded to new entries on the
* active list; discard the progenitor pattern.
*/
free(cp);
continue;
}
/*
* Pattern did not expand; append the finename component to
* the completed list
*/
if ((cp2 = strrchr(cp, '/')) != NULL)
*cp2++ = '\0';
else
cp2 = cp;
if (append(xstrdup(cp2), &done, &ndone) != 0) {
free(cp);
goto fail;
}
free(cp);
}
/* success */
*patternsp = done;
*npatternsp = ndone;
done = NULL;
ndone = 0;
ret = 0;
fail:
for (i = 0; i < nactive; i++)
free(active[i]);
free(active);
for (i = 0; i < ndone; i++)
free(done[i]);
free(done);
return ret;
}
void
toremote(int argc, char **argv)
{
char *suser = NULL, *host = NULL, *src = NULL;
char *bp, *tuser, *thost, *targ;
int sport = -1, tport = -1;
arglist alist;
int i, r;
u_int j;
memset(&alist, '\0', sizeof(alist));
alist.list = NULL;
/* Parse target */
r = parse_scp_uri(argv[argc - 1], &tuser, &thost, &tport, &targ);
if (r == -1) {
fmprintf(stderr, "%s: invalid uri\n", argv[argc - 1]);
++errs;
goto out;
}
if (r != 0) {
if (parse_user_host_path(argv[argc - 1], &tuser, &thost,
&targ) == -1) {
fmprintf(stderr, "%s: invalid target\n", argv[argc - 1]);
++errs;
goto out;
}
}
if (tuser != NULL && !okname(tuser)) {
++errs;
goto out;
}
/* Parse source files */
for (i = 0; i < argc - 1; i++) {
free(suser);
free(host);
free(src);
r = parse_scp_uri(argv[i], &suser, &host, &sport, &src);
if (r == -1) {
fmprintf(stderr, "%s: invalid uri\n", argv[i]);
++errs;
continue;
}
if (r != 0) {
parse_user_host_path(argv[i], &suser, &host, &src);
}
if (suser != NULL && !okname(suser)) {
++errs;
continue;
}
if (host && throughlocal) { /* extended remote to remote */
xasprintf(&bp, "%s -f %s%s", cmd,
*src == '-' ? "-- " : "", src);
if (do_cmd(host, suser, sport, bp, &remin, &remout) < 0)
exit(1);
free(bp);
xasprintf(&bp, "%s -t %s%s", cmd,
*targ == '-' ? "-- " : "", targ);
if (do_cmd2(thost, tuser, tport, bp, remin, remout) < 0)
exit(1);
free(bp);
(void) close(remin);
(void) close(remout);
remin = remout = -1;
} else if (host) { /* standard remote to remote */
if (tport != -1 && tport != SSH_DEFAULT_PORT) {
/* This would require the remote support URIs */
fatal("target port not supported with two "
"remote hosts without the -3 option");
}
freeargs(&alist);
addargs(&alist, "%s", ssh_program);
addargs(&alist, "-x");
addargs(&alist, "-oClearAllForwardings=yes");
addargs(&alist, "-n");
for (j = 0; j < remote_remote_args.num; j++) {
addargs(&alist, "%s",
remote_remote_args.list[j]);
}
if (sport != -1) {
addargs(&alist, "-p");
addargs(&alist, "%d", sport);
}
if (suser) {
addargs(&alist, "-l");
addargs(&alist, "%s", suser);
}
addargs(&alist, "--");
addargs(&alist, "%s", host);
addargs(&alist, "%s", cmd);
addargs(&alist, "%s", src);
addargs(&alist, "%s%s%s:%s",
tuser ? tuser : "", tuser ? "@" : "",
thost, targ);
if (do_local_cmd(&alist) != 0)
errs = 1;
} else { /* local to remote */
if (remin == -1) {
xasprintf(&bp, "%s -t %s%s", cmd,
*targ == '-' ? "-- " : "", targ);
if (do_cmd(thost, tuser, tport, bp, &remin,
&remout) < 0)
exit(1);
if (response() < 0)
exit(1);
free(bp);
}
source(1, argv + i);
}
}
out:
free(tuser);
free(thost);
free(targ);
free(suser);
free(host);
free(src);
}
void
tolocal(int argc, char **argv)
{
char *bp, *host = NULL, *src = NULL, *suser = NULL;
arglist alist;
int i, r, sport = -1;
memset(&alist, '\0', sizeof(alist));
alist.list = NULL;
for (i = 0; i < argc - 1; i++) {
free(suser);
free(host);
free(src);
r = parse_scp_uri(argv[i], &suser, &host, &sport, &src);
if (r == -1) {
fmprintf(stderr, "%s: invalid uri\n", argv[i]);
++errs;
continue;
}
if (r != 0)
parse_user_host_path(argv[i], &suser, &host, &src);
if (suser != NULL && !okname(suser)) {
++errs;
continue;
}
if (!host) { /* Local to local. */
freeargs(&alist);
addargs(&alist, "%s", _PATH_CP);
if (iamrecursive)
addargs(&alist, "-r");
if (pflag)
addargs(&alist, "-p");
addargs(&alist, "--");
addargs(&alist, "%s", argv[i]);
addargs(&alist, "%s", argv[argc-1]);
if (do_local_cmd(&alist))
++errs;
continue;
}
/* Remote to local. */
xasprintf(&bp, "%s -f %s%s",
cmd, *src == '-' ? "-- " : "", src);
if (do_cmd(host, suser, sport, bp, &remin, &remout) < 0) {
free(bp);
++errs;
continue;
}
free(bp);
sink(1, argv + argc - 1, src);
(void) close(remin);
remin = remout = -1;
}
free(suser);
free(host);
free(src);
}
void
source(int argc, char **argv)
{
struct stat stb;
static BUF buffer;
BUF *bp;
off_t i, statbytes;
size_t amt, nr;
int fd = -1, haderr, indx;
char *last, *name, buf[PATH_MAX + 128], encname[PATH_MAX];
int len;
for (indx = 0; indx < argc; ++indx) {
name = argv[indx];
statbytes = 0;
len = strlen(name);
while (len > 1 && name[len-1] == '/')
name[--len] = '\0';
if ((fd = open(name, O_RDONLY|O_NONBLOCK, 0)) == -1)
goto syserr;
if (strchr(name, '\n') != NULL) {
strnvis(encname, name, sizeof(encname), VIS_NL);
name = encname;
}
if (fstat(fd, &stb) == -1) {
syserr: run_err("%s: %s", name, strerror(errno));
goto next;
}
if (stb.st_size < 0) {
run_err("%s: %s", name, "Negative file size");
goto next;
}
unset_nonblock(fd);
switch (stb.st_mode & S_IFMT) {
case S_IFREG:
break;
case S_IFDIR:
if (iamrecursive) {
rsource(name, &stb);
goto next;
}
/* FALLTHROUGH */
default:
run_err("%s: not a regular file", name);
goto next;
}
if ((last = strrchr(name, '/')) == NULL)
last = name;
else
++last;
curfile = last;
if (pflag) {
if (do_times(remout, verbose_mode, &stb) < 0)
goto next;
}
#define FILEMODEMASK (S_ISUID|S_ISGID|S_IRWXU|S_IRWXG|S_IRWXO)
snprintf(buf, sizeof buf, "C%04o %lld %s\n",
(u_int) (stb.st_mode & FILEMODEMASK),
(long long)stb.st_size, last);
if (verbose_mode)
fmprintf(stderr, "Sending file modes: %s", buf);
(void) atomicio(vwrite, remout, buf, strlen(buf));
if (response() < 0)
goto next;
if ((bp = allocbuf(&buffer, fd, COPY_BUFLEN)) == NULL) {
next: if (fd != -1) {
(void) close(fd);
fd = -1;
}
continue;
}
if (showprogress)
start_progress_meter(curfile, stb.st_size, &statbytes);
set_nonblock(remout);
for (haderr = i = 0; i < stb.st_size; i += bp->cnt) {
amt = bp->cnt;
if (i + (off_t)amt > stb.st_size)
amt = stb.st_size - i;
if (!haderr) {
if ((nr = atomicio(read, fd,
bp->buf, amt)) != amt) {
haderr = errno;
memset(bp->buf + nr, 0, amt - nr);
}
}
/* Keep writing after error to retain sync */
if (haderr) {
(void)atomicio(vwrite, remout, bp->buf, amt);
memset(bp->buf, 0, amt);
continue;
}
if (atomicio6(vwrite, remout, bp->buf, amt, scpio,
&statbytes) != amt)
haderr = errno;
}
unset_nonblock(remout);
if (fd != -1) {
if (close(fd) == -1 && !haderr)
haderr = errno;
fd = -1;
}
if (!haderr)
(void) atomicio(vwrite, remout, "", 1);
else
run_err("%s: %s", name, strerror(haderr));
(void) response();
if (showprogress)
stop_progress_meter();
}
}
void
rsource(char *name, struct stat *statp)
{
DIR *dirp;
struct dirent *dp;
char *last, *vect[1], path[PATH_MAX];
if (!(dirp = opendir(name))) {
run_err("%s: %s", name, strerror(errno));
return;
}
last = strrchr(name, '/');
if (last == NULL)
last = name;
else
last++;
if (pflag) {
if (do_times(remout, verbose_mode, statp) < 0) {
closedir(dirp);
return;
}
}
(void) snprintf(path, sizeof path, "D%04o %d %.1024s\n",
(u_int) (statp->st_mode & FILEMODEMASK), 0, last);
if (verbose_mode)
fmprintf(stderr, "Entering directory: %s", path);
(void) atomicio(vwrite, remout, path, strlen(path));
if (response() < 0) {
closedir(dirp);
return;
}
while ((dp = readdir(dirp)) != NULL) {
if (dp->d_ino == 0)
continue;
if (!strcmp(dp->d_name, ".") || !strcmp(dp->d_name, ".."))
continue;
if (strlen(name) + 1 + strlen(dp->d_name) >= sizeof(path) - 1) {
run_err("%s/%s: name too long", name, dp->d_name);
continue;
}
(void) snprintf(path, sizeof path, "%s/%s", name, dp->d_name);
vect[0] = path;
source(1, vect);
}
(void) closedir(dirp);
(void) atomicio(vwrite, remout, "E\n", 2);
(void) response();
}
#define TYPE_OVERFLOW(type, val) \
((sizeof(type) == 4 && (val) > INT32_MAX) || \
(sizeof(type) == 8 && (val) > INT64_MAX) || \
(sizeof(type) != 4 && sizeof(type) != 8))
void
sink(int argc, char **argv, const char *src)
{
static BUF buffer;
struct stat stb;
BUF *bp;
off_t i;
size_t j, count;
int amt, exists, first, ofd;
mode_t mode, omode, mask;
off_t size, statbytes;
unsigned long long ull;
int setimes, targisdir, wrerr;
char ch, *cp, *np, *targ, *why, *vect[1], buf[2048], visbuf[2048];
char **patterns = NULL;
size_t n, npatterns = 0;
struct timeval tv[2];
#define atime tv[0]
#define mtime tv[1]
#define SCREWUP(str) { why = str; goto screwup; }
if (TYPE_OVERFLOW(time_t, 0) || TYPE_OVERFLOW(off_t, 0))
SCREWUP("Unexpected off_t/time_t size");
setimes = targisdir = 0;
mask = umask(0);
if (!pflag)
(void) umask(mask);
if (argc != 1) {
run_err("ambiguous target");
exit(1);
}
targ = *argv;
if (targetshouldbedirectory)
verifydir(targ);
(void) atomicio(vwrite, remout, "", 1);
if (stat(targ, &stb) == 0 && S_ISDIR(stb.st_mode))
targisdir = 1;
if (src != NULL && !iamrecursive && !Tflag) {
/*
* Prepare to try to restrict incoming filenames to match
* the requested destination file glob.
*/
if (brace_expand(src, &patterns, &npatterns) != 0)
fatal("%s: could not expand pattern", __func__);
}
for (first = 1;; first = 0) {
cp = buf;
if (atomicio(read, remin, cp, 1) != 1)
goto done;
if (*cp++ == '\n')
SCREWUP("unexpected <newline>");
do {
if (atomicio(read, remin, &ch, sizeof(ch)) != sizeof(ch))
SCREWUP("lost connection");
*cp++ = ch;
} while (cp < &buf[sizeof(buf) - 1] && ch != '\n');
*cp = 0;
if (verbose_mode)
fmprintf(stderr, "Sink: %s", buf);
if (buf[0] == '\01' || buf[0] == '\02') {
if (iamremote == 0) {
(void) snmprintf(visbuf, sizeof(visbuf),
NULL, "%s", buf + 1);
(void) atomicio(vwrite, STDERR_FILENO,
visbuf, strlen(visbuf));
}
if (buf[0] == '\02')
exit(1);
++errs;
continue;
}
if (buf[0] == 'E') {
(void) atomicio(vwrite, remout, "", 1);
goto done;
}
if (ch == '\n')
*--cp = 0;
cp = buf;
if (*cp == 'T') {
setimes++;
cp++;
if (!isdigit((unsigned char)*cp))
SCREWUP("mtime.sec not present");
ull = strtoull(cp, &cp, 10);
if (!cp || *cp++ != ' ')
SCREWUP("mtime.sec not delimited");
if (TYPE_OVERFLOW(time_t, ull))
setimes = 0; /* out of range */
mtime.tv_sec = ull;
mtime.tv_usec = strtol(cp, &cp, 10);
if (!cp || *cp++ != ' ' || mtime.tv_usec < 0 ||
mtime.tv_usec > 999999)
SCREWUP("mtime.usec not delimited");
if (!isdigit((unsigned char)*cp))
SCREWUP("atime.sec not present");
ull = strtoull(cp, &cp, 10);
if (!cp || *cp++ != ' ')
SCREWUP("atime.sec not delimited");
if (TYPE_OVERFLOW(time_t, ull))
setimes = 0; /* out of range */
atime.tv_sec = ull;
atime.tv_usec = strtol(cp, &cp, 10);
if (!cp || *cp++ != '\0' || atime.tv_usec < 0 ||
atime.tv_usec > 999999)
SCREWUP("atime.usec not delimited");
(void) atomicio(vwrite, remout, "", 1);
continue;
}
if (*cp != 'C' && *cp != 'D') {
/*
* Check for the case "rcp remote:foo\* local:bar".
* In this case, the line "No match." can be returned
* by the shell before the rcp command on the remote is
* executed so the ^Aerror_message convention isn't
* followed.
*/
if (first) {
run_err("%s", cp);
exit(1);
}
SCREWUP("expected control record");
}
mode = 0;
for (++cp; cp < buf + 5; cp++) {
if (*cp < '0' || *cp > '7')
SCREWUP("bad mode");
mode = (mode << 3) | (*cp - '0');
}
if (!pflag)
mode &= ~mask;
if (*cp++ != ' ')
SCREWUP("mode not delimited");
if (!isdigit((unsigned char)*cp))
SCREWUP("size not present");
ull = strtoull(cp, &cp, 10);
if (!cp || *cp++ != ' ')
SCREWUP("size not delimited");
if (TYPE_OVERFLOW(off_t, ull))
SCREWUP("size out of range");
size = (off_t)ull;
if (*cp == '\0' || strchr(cp, '/') != NULL ||
strcmp(cp, ".") == 0 || strcmp(cp, "..") == 0) {
run_err("error: unexpected filename: %s", cp);
exit(1);
}
if (npatterns > 0) {
for (n = 0; n < npatterns; n++) {
if (fnmatch(patterns[n], cp, 0) == 0)
break;
}
if (n >= npatterns)
SCREWUP("filename does not match request");
}
if (targisdir) {
static char *namebuf;
static size_t cursize;
size_t need;
need = strlen(targ) + strlen(cp) + 250;
if (need > cursize) {
free(namebuf);
namebuf = xmalloc(need);
cursize = need;
}
(void) snprintf(namebuf, need, "%s%s%s", targ,
strcmp(targ, "/") ? "/" : "", cp);
np = namebuf;
} else
np = targ;
curfile = cp;
exists = stat(np, &stb) == 0;
if (buf[0] == 'D') {
int mod_flag = pflag;
if (!iamrecursive)
SCREWUP("received directory without -r");
if (exists) {
if (!S_ISDIR(stb.st_mode)) {
errno = ENOTDIR;
goto bad;
}
if (pflag)
(void) chmod(np, mode);
} else {
/* Handle copying from a read-only
directory */
mod_flag = 1;
if (mkdir(np, mode | S_IRWXU) == -1)
goto bad;
}
vect[0] = xstrdup(np);
sink(1, vect, src);
if (setimes) {
setimes = 0;
if (utimes(vect[0], tv) == -1)
run_err("%s: set times: %s",
vect[0], strerror(errno));
}
if (mod_flag)
(void) chmod(vect[0], mode);
free(vect[0]);
continue;
}
omode = mode;
mode |= S_IWUSR;
if ((ofd = open(np, O_WRONLY|O_CREAT, mode)) == -1) {
bad: run_err("%s: %s", np, strerror(errno));
continue;
}
(void) atomicio(vwrite, remout, "", 1);
if ((bp = allocbuf(&buffer, ofd, COPY_BUFLEN)) == NULL) {
(void) close(ofd);
continue;
}
cp = bp->buf;
wrerr = 0;
/*
* NB. do not use run_err() unless immediately followed by
* exit() below as it may send a spurious reply that might
* desyncronise us from the peer. Use note_err() instead.
*/
statbytes = 0;
if (showprogress)
start_progress_meter(curfile, size, &statbytes);
set_nonblock(remin);
for (count = i = 0; i < size; i += bp->cnt) {
amt = bp->cnt;
if (i + amt > size)
amt = size - i;
count += amt;
do {
j = atomicio6(read, remin, cp, amt,
scpio, &statbytes);
if (j == 0) {
run_err("%s", j != EPIPE ?
strerror(errno) :
"dropped connection");
exit(1);
}
amt -= j;
cp += j;
} while (amt > 0);
if (count == bp->cnt) {
/* Keep reading so we stay sync'd up. */
if (!wrerr) {
if (atomicio(vwrite, ofd, bp->buf,
count) != count) {
note_err("%s: %s", np,
strerror(errno));
wrerr = 1;
}
}
count = 0;
cp = bp->buf;
}
}
unset_nonblock(remin);
if (count != 0 && !wrerr &&
atomicio(vwrite, ofd, bp->buf, count) != count) {
note_err("%s: %s", np, strerror(errno));
wrerr = 1;
}
if (!wrerr && (!exists || S_ISREG(stb.st_mode)) &&
ftruncate(ofd, size) != 0)
note_err("%s: truncate: %s", np, strerror(errno));
if (pflag) {
if (exists || omode != mode)
#ifdef HAVE_FCHMOD
if (fchmod(ofd, omode)) {
#else /* HAVE_FCHMOD */
if (chmod(np, omode)) {
#endif /* HAVE_FCHMOD */
note_err("%s: set mode: %s",
np, strerror(errno));
}
} else {
if (!exists && omode != mode)
#ifdef HAVE_FCHMOD
if (fchmod(ofd, omode & ~mask)) {
#else /* HAVE_FCHMOD */
if (chmod(np, omode & ~mask)) {
#endif /* HAVE_FCHMOD */
note_err("%s: set mode: %s",
np, strerror(errno));
}
}
if (close(ofd) == -1)
note_err(np, "%s: close: %s", np, strerror(errno));
(void) response();
if (showprogress)
stop_progress_meter();
if (setimes && !wrerr) {
setimes = 0;
if (utimes(np, tv) == -1) {
note_err("%s: set times: %s",
np, strerror(errno));
}
}
/* If no error was noted then signal success for this file */
if (note_err(NULL) == 0)
(void) atomicio(vwrite, remout, "", 1);
}
done:
for (n = 0; n < npatterns; n++)
free(patterns[n]);
free(patterns);
return;
screwup:
for (n = 0; n < npatterns; n++)
free(patterns[n]);
free(patterns);
run_err("protocol error: %s", why);
exit(1);
}
int
response(void)
{
char ch, *cp, resp, rbuf[2048], visbuf[2048];
if (atomicio(read, remin, &resp, sizeof(resp)) != sizeof(resp))
lostconn(0);
cp = rbuf;
switch (resp) {
case 0: /* ok */
return (0);
default:
*cp++ = resp;
/* FALLTHROUGH */
case 1: /* error, followed by error msg */
case 2: /* fatal error, "" */
do {
if (atomicio(read, remin, &ch, sizeof(ch)) != sizeof(ch))
lostconn(0);
*cp++ = ch;
} while (cp < &rbuf[sizeof(rbuf) - 1] && ch != '\n');
if (!iamremote) {
cp[-1] = '\0';
(void) snmprintf(visbuf, sizeof(visbuf),
NULL, "%s\n", rbuf);
(void) atomicio(vwrite, STDERR_FILENO,
visbuf, strlen(visbuf));
}
++errs;
if (resp == 1)
return (-1);
exit(1);
}
/* NOTREACHED */
}
void
usage(void)
{
(void) fprintf(stderr,
"usage: scp [-346BCpqrTv] [-c cipher] [-F ssh_config] [-i identity_file]\n"
" [-J destination] [-l limit] [-o ssh_option] [-P port]\n"
" [-S program] source ... target\n");
exit(1);
}
void
run_err(const char *fmt,...)
{
static FILE *fp;
va_list ap;
++errs;
if (fp != NULL || (remout != -1 && (fp = fdopen(remout, "w")))) {
(void) fprintf(fp, "%c", 0x01);
(void) fprintf(fp, "scp: ");
va_start(ap, fmt);
(void) vfprintf(fp, fmt, ap);
va_end(ap);
(void) fprintf(fp, "\n");
(void) fflush(fp);
}
if (!iamremote) {
va_start(ap, fmt);
vfmprintf(stderr, fmt, ap);
va_end(ap);
fprintf(stderr, "\n");
}
}
/*
* Notes a sink error for sending at the end of a file transfer. Returns 0 if
* no error has been noted or -1 otherwise. Use note_err(NULL) to flush
* any active error at the end of the transfer.
*/
int
note_err(const char *fmt, ...)
{
static char *emsg;
va_list ap;
/* Replay any previously-noted error */
if (fmt == NULL) {
if (emsg == NULL)
return 0;
run_err("%s", emsg);
free(emsg);
emsg = NULL;
return -1;
}
errs++;
/* Prefer first-noted error */
if (emsg != NULL)
return -1;
va_start(ap, fmt);
vasnmprintf(&emsg, INT_MAX, NULL, fmt, ap);
va_end(ap);
return -1;
}
void
verifydir(char *cp)
{
struct stat stb;
if (!stat(cp, &stb)) {
if (S_ISDIR(stb.st_mode))
return;
errno = ENOTDIR;
}
run_err("%s: %s", cp, strerror(errno));
killchild(0);
}
int
okname(char *cp0)
{
int c;
char *cp;
cp = cp0;
do {
c = (int)*cp;
if (c & 0200)
goto bad;
if (!isalpha(c) && !isdigit((unsigned char)c)) {
switch (c) {
case '\'':
case '"':
case '`':
case ' ':
case '#':
goto bad;
default:
break;
}
}
} while (*++cp);
return (1);
bad: fmprintf(stderr, "%s: invalid user name\n", cp0);
return (0);
}
BUF *
allocbuf(BUF *bp, int fd, int blksize)
{
size_t size;
#ifdef HAVE_STRUCT_STAT_ST_BLKSIZE
struct stat stb;
if (fstat(fd, &stb) == -1) {
run_err("fstat: %s", strerror(errno));
return (0);
}
size = ROUNDUP(stb.st_blksize, blksize);
if (size == 0)
size = blksize;
#else /* HAVE_STRUCT_STAT_ST_BLKSIZE */
size = blksize;
#endif /* HAVE_STRUCT_STAT_ST_BLKSIZE */
if (bp->cnt >= size)
return (bp);
bp->buf = xrecallocarray(bp->buf, bp->cnt, size, 1);
bp->cnt = size;
return (bp);
}
void
lostconn(int signo)
{
if (!iamremote)
(void)write(STDERR_FILENO, "lost connection\n", 16);
if (signo)
_exit(1);
else
exit(1);
}
| ./CrossVul/dataset_final_sorted/CWE-20/c/good_3979_0 |
crossvul-cpp_data_bad_5845_20 | /*
* af_llc.c - LLC User Interface SAPs
* Description:
* Functions in this module are implementation of socket based llc
* communications for the Linux operating system. Support of llc class
* one and class two is provided via SOCK_DGRAM and SOCK_STREAM
* respectively.
*
* An llc2 connection is (mac + sap), only one llc2 sap connection
* is allowed per mac. Though one sap may have multiple mac + sap
* connections.
*
* Copyright (c) 2001 by Jay Schulist <jschlst@samba.org>
* 2002-2003 by Arnaldo Carvalho de Melo <acme@conectiva.com.br>
*
* This program can be redistributed or modified under the terms of the
* GNU General Public License as published by the Free Software Foundation.
* This program is distributed without any warranty or implied warranty
* of merchantability or fitness for a particular purpose.
*
* See the GNU General Public License for more details.
*/
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/rtnetlink.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <net/llc.h>
#include <net/llc_sap.h>
#include <net/llc_pdu.h>
#include <net/llc_conn.h>
#include <net/tcp_states.h>
/* remember: uninitialized global data is zeroed because its in .bss */
static u16 llc_ui_sap_last_autoport = LLC_SAP_DYN_START;
static u16 llc_ui_sap_link_no_max[256];
static struct sockaddr_llc llc_ui_addrnull;
static const struct proto_ops llc_ui_ops;
static int llc_ui_wait_for_conn(struct sock *sk, long timeout);
static int llc_ui_wait_for_disc(struct sock *sk, long timeout);
static int llc_ui_wait_for_busy_core(struct sock *sk, long timeout);
#if 0
#define dprintk(args...) printk(KERN_DEBUG args)
#else
#define dprintk(args...)
#endif
/* Maybe we'll add some more in the future. */
#define LLC_CMSG_PKTINFO 1
/**
* llc_ui_next_link_no - return the next unused link number for a sap
* @sap: Address of sap to get link number from.
*
* Return the next unused link number for a given sap.
*/
static inline u16 llc_ui_next_link_no(int sap)
{
return llc_ui_sap_link_no_max[sap]++;
}
/**
* llc_proto_type - return eth protocol for ARP header type
* @arphrd: ARP header type.
*
* Given an ARP header type return the corresponding ethernet protocol.
*/
static inline __be16 llc_proto_type(u16 arphrd)
{
return htons(ETH_P_802_2);
}
/**
* llc_ui_addr_null - determines if a address structure is null
* @addr: Address to test if null.
*/
static inline u8 llc_ui_addr_null(struct sockaddr_llc *addr)
{
return !memcmp(addr, &llc_ui_addrnull, sizeof(*addr));
}
/**
* llc_ui_header_len - return length of llc header based on operation
* @sk: Socket which contains a valid llc socket type.
* @addr: Complete sockaddr_llc structure received from the user.
*
* Provide the length of the llc header depending on what kind of
* operation the user would like to perform and the type of socket.
* Returns the correct llc header length.
*/
static inline u8 llc_ui_header_len(struct sock *sk, struct sockaddr_llc *addr)
{
u8 rc = LLC_PDU_LEN_U;
if (addr->sllc_test || addr->sllc_xid)
rc = LLC_PDU_LEN_U;
else if (sk->sk_type == SOCK_STREAM)
rc = LLC_PDU_LEN_I;
return rc;
}
/**
* llc_ui_send_data - send data via reliable llc2 connection
* @sk: Connection the socket is using.
* @skb: Data the user wishes to send.
* @noblock: can we block waiting for data?
*
* Send data via reliable llc2 connection.
* Returns 0 upon success, non-zero if action did not succeed.
*/
static int llc_ui_send_data(struct sock* sk, struct sk_buff *skb, int noblock)
{
struct llc_sock* llc = llc_sk(sk);
int rc = 0;
if (unlikely(llc_data_accept_state(llc->state) ||
llc->remote_busy_flag ||
llc->p_flag)) {
long timeout = sock_sndtimeo(sk, noblock);
rc = llc_ui_wait_for_busy_core(sk, timeout);
}
if (unlikely(!rc))
rc = llc_build_and_send_pkt(sk, skb);
return rc;
}
static void llc_ui_sk_init(struct socket *sock, struct sock *sk)
{
sock_graft(sk, sock);
sk->sk_type = sock->type;
sock->ops = &llc_ui_ops;
}
static struct proto llc_proto = {
.name = "LLC",
.owner = THIS_MODULE,
.obj_size = sizeof(struct llc_sock),
.slab_flags = SLAB_DESTROY_BY_RCU,
};
/**
* llc_ui_create - alloc and init a new llc_ui socket
* @net: network namespace (must be default network)
* @sock: Socket to initialize and attach allocated sk to.
* @protocol: Unused.
* @kern: on behalf of kernel or userspace
*
* Allocate and initialize a new llc_ui socket, validate the user wants a
* socket type we have available.
* Returns 0 upon success, negative upon failure.
*/
static int llc_ui_create(struct net *net, struct socket *sock, int protocol,
int kern)
{
struct sock *sk;
int rc = -ESOCKTNOSUPPORT;
if (!ns_capable(net->user_ns, CAP_NET_RAW))
return -EPERM;
if (!net_eq(net, &init_net))
return -EAFNOSUPPORT;
if (likely(sock->type == SOCK_DGRAM || sock->type == SOCK_STREAM)) {
rc = -ENOMEM;
sk = llc_sk_alloc(net, PF_LLC, GFP_KERNEL, &llc_proto);
if (sk) {
rc = 0;
llc_ui_sk_init(sock, sk);
}
}
return rc;
}
/**
* llc_ui_release - shutdown socket
* @sock: Socket to release.
*
* Shutdown and deallocate an existing socket.
*/
static int llc_ui_release(struct socket *sock)
{
struct sock *sk = sock->sk;
struct llc_sock *llc;
if (unlikely(sk == NULL))
goto out;
sock_hold(sk);
lock_sock(sk);
llc = llc_sk(sk);
dprintk("%s: closing local(%02X) remote(%02X)\n", __func__,
llc->laddr.lsap, llc->daddr.lsap);
if (!llc_send_disc(sk))
llc_ui_wait_for_disc(sk, sk->sk_rcvtimeo);
if (!sock_flag(sk, SOCK_ZAPPED))
llc_sap_remove_socket(llc->sap, sk);
release_sock(sk);
if (llc->dev)
dev_put(llc->dev);
sock_put(sk);
llc_sk_free(sk);
out:
return 0;
}
/**
* llc_ui_autoport - provide dynamically allocate SAP number
*
* Provide the caller with a dynamically allocated SAP number according
* to the rules that are set in this function. Returns: 0, upon failure,
* SAP number otherwise.
*/
static int llc_ui_autoport(void)
{
struct llc_sap *sap;
int i, tries = 0;
while (tries < LLC_SAP_DYN_TRIES) {
for (i = llc_ui_sap_last_autoport;
i < LLC_SAP_DYN_STOP; i += 2) {
sap = llc_sap_find(i);
if (!sap) {
llc_ui_sap_last_autoport = i + 2;
goto out;
}
llc_sap_put(sap);
}
llc_ui_sap_last_autoport = LLC_SAP_DYN_START;
tries++;
}
i = 0;
out:
return i;
}
/**
* llc_ui_autobind - automatically bind a socket to a sap
* @sock: socket to bind
* @addr: address to connect to
*
* Used by llc_ui_connect and llc_ui_sendmsg when the user hasn't
* specifically used llc_ui_bind to bind to an specific address/sap
*
* Returns: 0 upon success, negative otherwise.
*/
static int llc_ui_autobind(struct socket *sock, struct sockaddr_llc *addr)
{
struct sock *sk = sock->sk;
struct llc_sock *llc = llc_sk(sk);
struct llc_sap *sap;
int rc = -EINVAL;
if (!sock_flag(sk, SOCK_ZAPPED))
goto out;
rc = -ENODEV;
if (sk->sk_bound_dev_if) {
llc->dev = dev_get_by_index(&init_net, sk->sk_bound_dev_if);
if (llc->dev && addr->sllc_arphrd != llc->dev->type) {
dev_put(llc->dev);
llc->dev = NULL;
}
} else
llc->dev = dev_getfirstbyhwtype(&init_net, addr->sllc_arphrd);
if (!llc->dev)
goto out;
rc = -EUSERS;
llc->laddr.lsap = llc_ui_autoport();
if (!llc->laddr.lsap)
goto out;
rc = -EBUSY; /* some other network layer is using the sap */
sap = llc_sap_open(llc->laddr.lsap, NULL);
if (!sap)
goto out;
memcpy(llc->laddr.mac, llc->dev->dev_addr, IFHWADDRLEN);
memcpy(&llc->addr, addr, sizeof(llc->addr));
/* assign new connection to its SAP */
llc_sap_add_socket(sap, sk);
sock_reset_flag(sk, SOCK_ZAPPED);
rc = 0;
out:
return rc;
}
/**
* llc_ui_bind - bind a socket to a specific address.
* @sock: Socket to bind an address to.
* @uaddr: Address the user wants the socket bound to.
* @addrlen: Length of the uaddr structure.
*
* Bind a socket to a specific address. For llc a user is able to bind to
* a specific sap only or mac + sap.
* If the user desires to bind to a specific mac + sap, it is possible to
* have multiple sap connections via multiple macs.
* Bind and autobind for that matter must enforce the correct sap usage
* otherwise all hell will break loose.
* Returns: 0 upon success, negative otherwise.
*/
static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen)
{
struct sockaddr_llc *addr = (struct sockaddr_llc *)uaddr;
struct sock *sk = sock->sk;
struct llc_sock *llc = llc_sk(sk);
struct llc_sap *sap;
int rc = -EINVAL;
dprintk("%s: binding %02X\n", __func__, addr->sllc_sap);
if (unlikely(!sock_flag(sk, SOCK_ZAPPED) || addrlen != sizeof(*addr)))
goto out;
rc = -EAFNOSUPPORT;
if (unlikely(addr->sllc_family != AF_LLC))
goto out;
rc = -ENODEV;
rcu_read_lock();
if (sk->sk_bound_dev_if) {
llc->dev = dev_get_by_index_rcu(&init_net, sk->sk_bound_dev_if);
if (llc->dev) {
if (!addr->sllc_arphrd)
addr->sllc_arphrd = llc->dev->type;
if (is_zero_ether_addr(addr->sllc_mac))
memcpy(addr->sllc_mac, llc->dev->dev_addr,
IFHWADDRLEN);
if (addr->sllc_arphrd != llc->dev->type ||
!ether_addr_equal(addr->sllc_mac,
llc->dev->dev_addr)) {
rc = -EINVAL;
llc->dev = NULL;
}
}
} else
llc->dev = dev_getbyhwaddr_rcu(&init_net, addr->sllc_arphrd,
addr->sllc_mac);
if (llc->dev)
dev_hold(llc->dev);
rcu_read_unlock();
if (!llc->dev)
goto out;
if (!addr->sllc_sap) {
rc = -EUSERS;
addr->sllc_sap = llc_ui_autoport();
if (!addr->sllc_sap)
goto out;
}
sap = llc_sap_find(addr->sllc_sap);
if (!sap) {
sap = llc_sap_open(addr->sllc_sap, NULL);
rc = -EBUSY; /* some other network layer is using the sap */
if (!sap)
goto out;
} else {
struct llc_addr laddr, daddr;
struct sock *ask;
memset(&laddr, 0, sizeof(laddr));
memset(&daddr, 0, sizeof(daddr));
/*
* FIXME: check if the address is multicast,
* only SOCK_DGRAM can do this.
*/
memcpy(laddr.mac, addr->sllc_mac, IFHWADDRLEN);
laddr.lsap = addr->sllc_sap;
rc = -EADDRINUSE; /* mac + sap clash. */
ask = llc_lookup_established(sap, &daddr, &laddr);
if (ask) {
sock_put(ask);
goto out_put;
}
}
llc->laddr.lsap = addr->sllc_sap;
memcpy(llc->laddr.mac, addr->sllc_mac, IFHWADDRLEN);
memcpy(&llc->addr, addr, sizeof(llc->addr));
/* assign new connection to its SAP */
llc_sap_add_socket(sap, sk);
sock_reset_flag(sk, SOCK_ZAPPED);
rc = 0;
out_put:
llc_sap_put(sap);
out:
return rc;
}
/**
* llc_ui_shutdown - shutdown a connect llc2 socket.
* @sock: Socket to shutdown.
* @how: What part of the socket to shutdown.
*
* Shutdown a connected llc2 socket. Currently this function only supports
* shutting down both sends and receives (2), we could probably make this
* function such that a user can shutdown only half the connection but not
* right now.
* Returns: 0 upon success, negative otherwise.
*/
static int llc_ui_shutdown(struct socket *sock, int how)
{
struct sock *sk = sock->sk;
int rc = -ENOTCONN;
lock_sock(sk);
if (unlikely(sk->sk_state != TCP_ESTABLISHED))
goto out;
rc = -EINVAL;
if (how != 2)
goto out;
rc = llc_send_disc(sk);
if (!rc)
rc = llc_ui_wait_for_disc(sk, sk->sk_rcvtimeo);
/* Wake up anyone sleeping in poll */
sk->sk_state_change(sk);
out:
release_sock(sk);
return rc;
}
/**
* llc_ui_connect - Connect to a remote llc2 mac + sap.
* @sock: Socket which will be connected to the remote destination.
* @uaddr: Remote and possibly the local address of the new connection.
* @addrlen: Size of uaddr structure.
* @flags: Operational flags specified by the user.
*
* Connect to a remote llc2 mac + sap. The caller must specify the
* destination mac and address to connect to. If the user hasn't previously
* called bind(2) with a smac the address of the first interface of the
* specified arp type will be used.
* This function will autobind if user did not previously call bind.
* Returns: 0 upon success, negative otherwise.
*/
static int llc_ui_connect(struct socket *sock, struct sockaddr *uaddr,
int addrlen, int flags)
{
struct sock *sk = sock->sk;
struct llc_sock *llc = llc_sk(sk);
struct sockaddr_llc *addr = (struct sockaddr_llc *)uaddr;
int rc = -EINVAL;
lock_sock(sk);
if (unlikely(addrlen != sizeof(*addr)))
goto out;
rc = -EAFNOSUPPORT;
if (unlikely(addr->sllc_family != AF_LLC))
goto out;
if (unlikely(sk->sk_type != SOCK_STREAM))
goto out;
rc = -EALREADY;
if (unlikely(sock->state == SS_CONNECTING))
goto out;
/* bind connection to sap if user hasn't done it. */
if (sock_flag(sk, SOCK_ZAPPED)) {
/* bind to sap with null dev, exclusive */
rc = llc_ui_autobind(sock, addr);
if (rc)
goto out;
}
llc->daddr.lsap = addr->sllc_sap;
memcpy(llc->daddr.mac, addr->sllc_mac, IFHWADDRLEN);
sock->state = SS_CONNECTING;
sk->sk_state = TCP_SYN_SENT;
llc->link = llc_ui_next_link_no(llc->sap->laddr.lsap);
rc = llc_establish_connection(sk, llc->dev->dev_addr,
addr->sllc_mac, addr->sllc_sap);
if (rc) {
dprintk("%s: llc_ui_send_conn failed :-(\n", __func__);
sock->state = SS_UNCONNECTED;
sk->sk_state = TCP_CLOSE;
goto out;
}
if (sk->sk_state == TCP_SYN_SENT) {
const long timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
if (!timeo || !llc_ui_wait_for_conn(sk, timeo))
goto out;
rc = sock_intr_errno(timeo);
if (signal_pending(current))
goto out;
}
if (sk->sk_state == TCP_CLOSE)
goto sock_error;
sock->state = SS_CONNECTED;
rc = 0;
out:
release_sock(sk);
return rc;
sock_error:
rc = sock_error(sk) ? : -ECONNABORTED;
sock->state = SS_UNCONNECTED;
goto out;
}
/**
* llc_ui_listen - allow a normal socket to accept incoming connections
* @sock: Socket to allow incoming connections on.
* @backlog: Number of connections to queue.
*
* Allow a normal socket to accept incoming connections.
* Returns 0 upon success, negative otherwise.
*/
static int llc_ui_listen(struct socket *sock, int backlog)
{
struct sock *sk = sock->sk;
int rc = -EINVAL;
lock_sock(sk);
if (unlikely(sock->state != SS_UNCONNECTED))
goto out;
rc = -EOPNOTSUPP;
if (unlikely(sk->sk_type != SOCK_STREAM))
goto out;
rc = -EAGAIN;
if (sock_flag(sk, SOCK_ZAPPED))
goto out;
rc = 0;
if (!(unsigned int)backlog) /* BSDism */
backlog = 1;
sk->sk_max_ack_backlog = backlog;
if (sk->sk_state != TCP_LISTEN) {
sk->sk_ack_backlog = 0;
sk->sk_state = TCP_LISTEN;
}
sk->sk_socket->flags |= __SO_ACCEPTCON;
out:
release_sock(sk);
return rc;
}
static int llc_ui_wait_for_disc(struct sock *sk, long timeout)
{
DEFINE_WAIT(wait);
int rc = 0;
while (1) {
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
if (sk_wait_event(sk, &timeout, sk->sk_state == TCP_CLOSE))
break;
rc = -ERESTARTSYS;
if (signal_pending(current))
break;
rc = -EAGAIN;
if (!timeout)
break;
rc = 0;
}
finish_wait(sk_sleep(sk), &wait);
return rc;
}
static int llc_ui_wait_for_conn(struct sock *sk, long timeout)
{
DEFINE_WAIT(wait);
while (1) {
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
if (sk_wait_event(sk, &timeout, sk->sk_state != TCP_SYN_SENT))
break;
if (signal_pending(current) || !timeout)
break;
}
finish_wait(sk_sleep(sk), &wait);
return timeout;
}
static int llc_ui_wait_for_busy_core(struct sock *sk, long timeout)
{
DEFINE_WAIT(wait);
struct llc_sock *llc = llc_sk(sk);
int rc;
while (1) {
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
rc = 0;
if (sk_wait_event(sk, &timeout,
(sk->sk_shutdown & RCV_SHUTDOWN) ||
(!llc_data_accept_state(llc->state) &&
!llc->remote_busy_flag &&
!llc->p_flag)))
break;
rc = -ERESTARTSYS;
if (signal_pending(current))
break;
rc = -EAGAIN;
if (!timeout)
break;
}
finish_wait(sk_sleep(sk), &wait);
return rc;
}
static int llc_wait_data(struct sock *sk, long timeo)
{
int rc;
while (1) {
/*
* POSIX 1003.1g mandates this order.
*/
rc = sock_error(sk);
if (rc)
break;
rc = 0;
if (sk->sk_shutdown & RCV_SHUTDOWN)
break;
rc = -EAGAIN;
if (!timeo)
break;
rc = sock_intr_errno(timeo);
if (signal_pending(current))
break;
rc = 0;
if (sk_wait_data(sk, &timeo))
break;
}
return rc;
}
static void llc_cmsg_rcv(struct msghdr *msg, struct sk_buff *skb)
{
struct llc_sock *llc = llc_sk(skb->sk);
if (llc->cmsg_flags & LLC_CMSG_PKTINFO) {
struct llc_pktinfo info;
info.lpi_ifindex = llc_sk(skb->sk)->dev->ifindex;
llc_pdu_decode_dsap(skb, &info.lpi_sap);
llc_pdu_decode_da(skb, info.lpi_mac);
put_cmsg(msg, SOL_LLC, LLC_OPT_PKTINFO, sizeof(info), &info);
}
}
/**
* llc_ui_accept - accept a new incoming connection.
* @sock: Socket which connections arrive on.
* @newsock: Socket to move incoming connection to.
* @flags: User specified operational flags.
*
* Accept a new incoming connection.
* Returns 0 upon success, negative otherwise.
*/
static int llc_ui_accept(struct socket *sock, struct socket *newsock, int flags)
{
struct sock *sk = sock->sk, *newsk;
struct llc_sock *llc, *newllc;
struct sk_buff *skb;
int rc = -EOPNOTSUPP;
dprintk("%s: accepting on %02X\n", __func__,
llc_sk(sk)->laddr.lsap);
lock_sock(sk);
if (unlikely(sk->sk_type != SOCK_STREAM))
goto out;
rc = -EINVAL;
if (unlikely(sock->state != SS_UNCONNECTED ||
sk->sk_state != TCP_LISTEN))
goto out;
/* wait for a connection to arrive. */
if (skb_queue_empty(&sk->sk_receive_queue)) {
rc = llc_wait_data(sk, sk->sk_rcvtimeo);
if (rc)
goto out;
}
dprintk("%s: got a new connection on %02X\n", __func__,
llc_sk(sk)->laddr.lsap);
skb = skb_dequeue(&sk->sk_receive_queue);
rc = -EINVAL;
if (!skb->sk)
goto frees;
rc = 0;
newsk = skb->sk;
/* attach connection to a new socket. */
llc_ui_sk_init(newsock, newsk);
sock_reset_flag(newsk, SOCK_ZAPPED);
newsk->sk_state = TCP_ESTABLISHED;
newsock->state = SS_CONNECTED;
llc = llc_sk(sk);
newllc = llc_sk(newsk);
memcpy(&newllc->addr, &llc->addr, sizeof(newllc->addr));
newllc->link = llc_ui_next_link_no(newllc->laddr.lsap);
/* put original socket back into a clean listen state. */
sk->sk_state = TCP_LISTEN;
sk->sk_ack_backlog--;
dprintk("%s: ok success on %02X, client on %02X\n", __func__,
llc_sk(sk)->addr.sllc_sap, newllc->daddr.lsap);
frees:
kfree_skb(skb);
out:
release_sock(sk);
return rc;
}
/**
* llc_ui_recvmsg - copy received data to the socket user.
* @sock: Socket to copy data from.
* @msg: Various user space related information.
* @len: Size of user buffer.
* @flags: User specified flags.
*
* Copy received data to the socket user.
* Returns non-negative upon success, negative otherwise.
*/
static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len, int flags)
{
struct sockaddr_llc *uaddr = (struct sockaddr_llc *)msg->msg_name;
const int nonblock = flags & MSG_DONTWAIT;
struct sk_buff *skb = NULL;
struct sock *sk = sock->sk;
struct llc_sock *llc = llc_sk(sk);
unsigned long cpu_flags;
size_t copied = 0;
u32 peek_seq = 0;
u32 *seq;
unsigned long used;
int target; /* Read at least this many bytes */
long timeo;
msg->msg_namelen = 0;
lock_sock(sk);
copied = -ENOTCONN;
if (unlikely(sk->sk_type == SOCK_STREAM && sk->sk_state == TCP_LISTEN))
goto out;
timeo = sock_rcvtimeo(sk, nonblock);
seq = &llc->copied_seq;
if (flags & MSG_PEEK) {
peek_seq = llc->copied_seq;
seq = &peek_seq;
}
target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
copied = 0;
do {
u32 offset;
/*
* We need to check signals first, to get correct SIGURG
* handling. FIXME: Need to check this doesn't impact 1003.1g
* and move it down to the bottom of the loop
*/
if (signal_pending(current)) {
if (copied)
break;
copied = timeo ? sock_intr_errno(timeo) : -EAGAIN;
break;
}
/* Next get a buffer. */
skb = skb_peek(&sk->sk_receive_queue);
if (skb) {
offset = *seq;
goto found_ok_skb;
}
/* Well, if we have backlog, try to process it now yet. */
if (copied >= target && !sk->sk_backlog.tail)
break;
if (copied) {
if (sk->sk_err ||
sk->sk_state == TCP_CLOSE ||
(sk->sk_shutdown & RCV_SHUTDOWN) ||
!timeo ||
(flags & MSG_PEEK))
break;
} else {
if (sock_flag(sk, SOCK_DONE))
break;
if (sk->sk_err) {
copied = sock_error(sk);
break;
}
if (sk->sk_shutdown & RCV_SHUTDOWN)
break;
if (sk->sk_type == SOCK_STREAM && sk->sk_state == TCP_CLOSE) {
if (!sock_flag(sk, SOCK_DONE)) {
/*
* This occurs when user tries to read
* from never connected socket.
*/
copied = -ENOTCONN;
break;
}
break;
}
if (!timeo) {
copied = -EAGAIN;
break;
}
}
if (copied >= target) { /* Do not sleep, just process backlog. */
release_sock(sk);
lock_sock(sk);
} else
sk_wait_data(sk, &timeo);
if ((flags & MSG_PEEK) && peek_seq != llc->copied_seq) {
net_dbg_ratelimited("LLC(%s:%d): Application bug, race in MSG_PEEK\n",
current->comm,
task_pid_nr(current));
peek_seq = llc->copied_seq;
}
continue;
found_ok_skb:
/* Ok so how much can we use? */
used = skb->len - offset;
if (len < used)
used = len;
if (!(flags & MSG_TRUNC)) {
int rc = skb_copy_datagram_iovec(skb, offset,
msg->msg_iov, used);
if (rc) {
/* Exception. Bailout! */
if (!copied)
copied = -EFAULT;
break;
}
}
*seq += used;
copied += used;
len -= used;
/* For non stream protcols we get one packet per recvmsg call */
if (sk->sk_type != SOCK_STREAM)
goto copy_uaddr;
if (!(flags & MSG_PEEK)) {
spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags);
sk_eat_skb(sk, skb, false);
spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags);
*seq = 0;
}
/* Partial read */
if (used + offset < skb->len)
continue;
} while (len > 0);
out:
release_sock(sk);
return copied;
copy_uaddr:
if (uaddr != NULL && skb != NULL) {
memcpy(uaddr, llc_ui_skb_cb(skb), sizeof(*uaddr));
msg->msg_namelen = sizeof(*uaddr);
}
if (llc_sk(sk)->cmsg_flags)
llc_cmsg_rcv(msg, skb);
if (!(flags & MSG_PEEK)) {
spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags);
sk_eat_skb(sk, skb, false);
spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags);
*seq = 0;
}
goto out;
}
/**
* llc_ui_sendmsg - Transmit data provided by the socket user.
* @sock: Socket to transmit data from.
* @msg: Various user related information.
* @len: Length of data to transmit.
*
* Transmit data provided by the socket user.
* Returns non-negative upon success, negative otherwise.
*/
static int llc_ui_sendmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len)
{
struct sock *sk = sock->sk;
struct llc_sock *llc = llc_sk(sk);
struct sockaddr_llc *addr = (struct sockaddr_llc *)msg->msg_name;
int flags = msg->msg_flags;
int noblock = flags & MSG_DONTWAIT;
struct sk_buff *skb;
size_t size = 0;
int rc = -EINVAL, copied = 0, hdrlen;
dprintk("%s: sending from %02X to %02X\n", __func__,
llc->laddr.lsap, llc->daddr.lsap);
lock_sock(sk);
if (addr) {
if (msg->msg_namelen < sizeof(*addr))
goto release;
} else {
if (llc_ui_addr_null(&llc->addr))
goto release;
addr = &llc->addr;
}
/* must bind connection to sap if user hasn't done it. */
if (sock_flag(sk, SOCK_ZAPPED)) {
/* bind to sap with null dev, exclusive. */
rc = llc_ui_autobind(sock, addr);
if (rc)
goto release;
}
hdrlen = llc->dev->hard_header_len + llc_ui_header_len(sk, addr);
size = hdrlen + len;
if (size > llc->dev->mtu)
size = llc->dev->mtu;
copied = size - hdrlen;
release_sock(sk);
skb = sock_alloc_send_skb(sk, size, noblock, &rc);
lock_sock(sk);
if (!skb)
goto release;
skb->dev = llc->dev;
skb->protocol = llc_proto_type(addr->sllc_arphrd);
skb_reserve(skb, hdrlen);
rc = memcpy_fromiovec(skb_put(skb, copied), msg->msg_iov, copied);
if (rc)
goto out;
if (sk->sk_type == SOCK_DGRAM || addr->sllc_ua) {
llc_build_and_send_ui_pkt(llc->sap, skb, addr->sllc_mac,
addr->sllc_sap);
goto out;
}
if (addr->sllc_test) {
llc_build_and_send_test_pkt(llc->sap, skb, addr->sllc_mac,
addr->sllc_sap);
goto out;
}
if (addr->sllc_xid) {
llc_build_and_send_xid_pkt(llc->sap, skb, addr->sllc_mac,
addr->sllc_sap);
goto out;
}
rc = -ENOPROTOOPT;
if (!(sk->sk_type == SOCK_STREAM && !addr->sllc_ua))
goto out;
rc = llc_ui_send_data(sk, skb, noblock);
out:
if (rc) {
kfree_skb(skb);
release:
dprintk("%s: failed sending from %02X to %02X: %d\n",
__func__, llc->laddr.lsap, llc->daddr.lsap, rc);
}
release_sock(sk);
return rc ? : copied;
}
/**
* llc_ui_getname - return the address info of a socket
* @sock: Socket to get address of.
* @uaddr: Address structure to return information.
* @uaddrlen: Length of address structure.
* @peer: Does user want local or remote address information.
*
* Return the address information of a socket.
*/
static int llc_ui_getname(struct socket *sock, struct sockaddr *uaddr,
int *uaddrlen, int peer)
{
struct sockaddr_llc sllc;
struct sock *sk = sock->sk;
struct llc_sock *llc = llc_sk(sk);
int rc = -EBADF;
memset(&sllc, 0, sizeof(sllc));
lock_sock(sk);
if (sock_flag(sk, SOCK_ZAPPED))
goto out;
*uaddrlen = sizeof(sllc);
if (peer) {
rc = -ENOTCONN;
if (sk->sk_state != TCP_ESTABLISHED)
goto out;
if(llc->dev)
sllc.sllc_arphrd = llc->dev->type;
sllc.sllc_sap = llc->daddr.lsap;
memcpy(&sllc.sllc_mac, &llc->daddr.mac, IFHWADDRLEN);
} else {
rc = -EINVAL;
if (!llc->sap)
goto out;
sllc.sllc_sap = llc->sap->laddr.lsap;
if (llc->dev) {
sllc.sllc_arphrd = llc->dev->type;
memcpy(&sllc.sllc_mac, llc->dev->dev_addr,
IFHWADDRLEN);
}
}
rc = 0;
sllc.sllc_family = AF_LLC;
memcpy(uaddr, &sllc, sizeof(sllc));
out:
release_sock(sk);
return rc;
}
/**
* llc_ui_ioctl - io controls for PF_LLC
* @sock: Socket to get/set info
* @cmd: command
* @arg: optional argument for cmd
*
* get/set info on llc sockets
*/
static int llc_ui_ioctl(struct socket *sock, unsigned int cmd,
unsigned long arg)
{
return -ENOIOCTLCMD;
}
/**
* llc_ui_setsockopt - set various connection specific parameters.
* @sock: Socket to set options on.
* @level: Socket level user is requesting operations on.
* @optname: Operation name.
* @optval: User provided operation data.
* @optlen: Length of optval.
*
* Set various connection specific parameters.
*/
static int llc_ui_setsockopt(struct socket *sock, int level, int optname,
char __user *optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
struct llc_sock *llc = llc_sk(sk);
unsigned int opt;
int rc = -EINVAL;
lock_sock(sk);
if (unlikely(level != SOL_LLC || optlen != sizeof(int)))
goto out;
rc = get_user(opt, (int __user *)optval);
if (rc)
goto out;
rc = -EINVAL;
switch (optname) {
case LLC_OPT_RETRY:
if (opt > LLC_OPT_MAX_RETRY)
goto out;
llc->n2 = opt;
break;
case LLC_OPT_SIZE:
if (opt > LLC_OPT_MAX_SIZE)
goto out;
llc->n1 = opt;
break;
case LLC_OPT_ACK_TMR_EXP:
if (opt > LLC_OPT_MAX_ACK_TMR_EXP)
goto out;
llc->ack_timer.expire = opt * HZ;
break;
case LLC_OPT_P_TMR_EXP:
if (opt > LLC_OPT_MAX_P_TMR_EXP)
goto out;
llc->pf_cycle_timer.expire = opt * HZ;
break;
case LLC_OPT_REJ_TMR_EXP:
if (opt > LLC_OPT_MAX_REJ_TMR_EXP)
goto out;
llc->rej_sent_timer.expire = opt * HZ;
break;
case LLC_OPT_BUSY_TMR_EXP:
if (opt > LLC_OPT_MAX_BUSY_TMR_EXP)
goto out;
llc->busy_state_timer.expire = opt * HZ;
break;
case LLC_OPT_TX_WIN:
if (opt > LLC_OPT_MAX_WIN)
goto out;
llc->k = opt;
break;
case LLC_OPT_RX_WIN:
if (opt > LLC_OPT_MAX_WIN)
goto out;
llc->rw = opt;
break;
case LLC_OPT_PKTINFO:
if (opt)
llc->cmsg_flags |= LLC_CMSG_PKTINFO;
else
llc->cmsg_flags &= ~LLC_CMSG_PKTINFO;
break;
default:
rc = -ENOPROTOOPT;
goto out;
}
rc = 0;
out:
release_sock(sk);
return rc;
}
/**
* llc_ui_getsockopt - get connection specific socket info
* @sock: Socket to get information from.
* @level: Socket level user is requesting operations on.
* @optname: Operation name.
* @optval: Variable to return operation data in.
* @optlen: Length of optval.
*
* Get connection specific socket information.
*/
static int llc_ui_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen)
{
struct sock *sk = sock->sk;
struct llc_sock *llc = llc_sk(sk);
int val = 0, len = 0, rc = -EINVAL;
lock_sock(sk);
if (unlikely(level != SOL_LLC))
goto out;
rc = get_user(len, optlen);
if (rc)
goto out;
rc = -EINVAL;
if (len != sizeof(int))
goto out;
switch (optname) {
case LLC_OPT_RETRY:
val = llc->n2; break;
case LLC_OPT_SIZE:
val = llc->n1; break;
case LLC_OPT_ACK_TMR_EXP:
val = llc->ack_timer.expire / HZ; break;
case LLC_OPT_P_TMR_EXP:
val = llc->pf_cycle_timer.expire / HZ; break;
case LLC_OPT_REJ_TMR_EXP:
val = llc->rej_sent_timer.expire / HZ; break;
case LLC_OPT_BUSY_TMR_EXP:
val = llc->busy_state_timer.expire / HZ; break;
case LLC_OPT_TX_WIN:
val = llc->k; break;
case LLC_OPT_RX_WIN:
val = llc->rw; break;
case LLC_OPT_PKTINFO:
val = (llc->cmsg_flags & LLC_CMSG_PKTINFO) != 0;
break;
default:
rc = -ENOPROTOOPT;
goto out;
}
rc = 0;
if (put_user(len, optlen) || copy_to_user(optval, &val, len))
rc = -EFAULT;
out:
release_sock(sk);
return rc;
}
static const struct net_proto_family llc_ui_family_ops = {
.family = PF_LLC,
.create = llc_ui_create,
.owner = THIS_MODULE,
};
static const struct proto_ops llc_ui_ops = {
.family = PF_LLC,
.owner = THIS_MODULE,
.release = llc_ui_release,
.bind = llc_ui_bind,
.connect = llc_ui_connect,
.socketpair = sock_no_socketpair,
.accept = llc_ui_accept,
.getname = llc_ui_getname,
.poll = datagram_poll,
.ioctl = llc_ui_ioctl,
.listen = llc_ui_listen,
.shutdown = llc_ui_shutdown,
.setsockopt = llc_ui_setsockopt,
.getsockopt = llc_ui_getsockopt,
.sendmsg = llc_ui_sendmsg,
.recvmsg = llc_ui_recvmsg,
.mmap = sock_no_mmap,
.sendpage = sock_no_sendpage,
};
static const char llc_proc_err_msg[] __initconst =
KERN_CRIT "LLC: Unable to register the proc_fs entries\n";
static const char llc_sysctl_err_msg[] __initconst =
KERN_CRIT "LLC: Unable to register the sysctl entries\n";
static const char llc_sock_err_msg[] __initconst =
KERN_CRIT "LLC: Unable to register the network family\n";
static int __init llc2_init(void)
{
int rc = proto_register(&llc_proto, 0);
if (rc != 0)
goto out;
llc_build_offset_table();
llc_station_init();
llc_ui_sap_last_autoport = LLC_SAP_DYN_START;
rc = llc_proc_init();
if (rc != 0) {
printk(llc_proc_err_msg);
goto out_station;
}
rc = llc_sysctl_init();
if (rc) {
printk(llc_sysctl_err_msg);
goto out_proc;
}
rc = sock_register(&llc_ui_family_ops);
if (rc) {
printk(llc_sock_err_msg);
goto out_sysctl;
}
llc_add_pack(LLC_DEST_SAP, llc_sap_handler);
llc_add_pack(LLC_DEST_CONN, llc_conn_handler);
out:
return rc;
out_sysctl:
llc_sysctl_exit();
out_proc:
llc_proc_exit();
out_station:
llc_station_exit();
proto_unregister(&llc_proto);
goto out;
}
static void __exit llc2_exit(void)
{
llc_station_exit();
llc_remove_pack(LLC_DEST_SAP);
llc_remove_pack(LLC_DEST_CONN);
sock_unregister(PF_LLC);
llc_proc_exit();
llc_sysctl_exit();
proto_unregister(&llc_proto);
}
module_init(llc2_init);
module_exit(llc2_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Procom 1997, Jay Schullist 2001, Arnaldo C. Melo 2001-2003");
MODULE_DESCRIPTION("IEEE 802.2 PF_LLC support");
MODULE_ALIAS_NETPROTO(PF_LLC);
| ./CrossVul/dataset_final_sorted/CWE-20/c/bad_5845_20 |
crossvul-cpp_data_good_364_0 | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC AAA L SSSSS %
% C A A L SS %
% C AAAAA L SSS %
% C A A L SS %
% CCCC A A LLLLL SSSSS %
% %
% %
% Read/Write CALS Raster Group 1 Image Format %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% The CALS raster format is a standard developed by the Computer Aided
% Acquisition and Logistics Support (CALS) office of the United States
% Department of Defense to standardize graphics data interchange for
% electronic publishing, especially in the areas of technical graphics,
% CAD/CAM, and image processing applications.
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/constitute.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/static.h"
#include "MagickCore/string_.h"
#include "MagickCore/module.h"
#if defined(MAGICKCORE_TIFF_DELEGATE)
/*
Forward declarations.
*/
static MagickBooleanType
WriteCALSImage(const ImageInfo *,Image *,ExceptionInfo *);
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s C A L S %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsCALS() returns MagickTrue if the image format type, identified by the
% magick string, is CALS Raster Group 1.
%
% The format of the IsCALS method is:
%
% MagickBooleanType IsCALS(const unsigned char *magick,const size_t length)
%
% A description of each parameter follows:
%
% o magick: compare image format pattern against these bytes.
%
% o length: Specifies the length of the magick string.
%
*/
static MagickBooleanType IsCALS(const unsigned char *magick,const size_t length)
{
if (length < 128)
return(MagickFalse);
if (LocaleNCompare((const char *) magick,"version: MIL-STD-1840",21) == 0)
return(MagickTrue);
if (LocaleNCompare((const char *) magick,"srcdocid:",9) == 0)
return(MagickTrue);
if (LocaleNCompare((const char *) magick,"rorient:",8) == 0)
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d C A L S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadCALSImage() reads an CALS Raster Group 1 image format image file and
% returns it. It allocates the memory necessary for the new Image structure
% and returns a pointer to the new image.
%
% The format of the ReadCALSImage method is:
%
% Image *ReadCALSImage(const ImageInfo *image_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *ReadCALSImage(const ImageInfo *image_info,
ExceptionInfo *exception)
{
char
filename[MagickPathExtent],
header[MagickPathExtent],
message[MagickPathExtent];
FILE
*file;
Image
*image;
ImageInfo
*read_info;
int
c,
unique_file;
MagickBooleanType
status;
register ssize_t
i;
unsigned long
density,
direction,
height,
orientation,
pel_path,
type,
width;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImage(image_info,exception);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Read CALS header.
*/
(void) memset(header,0,sizeof(header));
density=0;
direction=0;
orientation=1;
pel_path=0;
type=1;
width=0;
height=0;
for (i=0; i < 16; i++)
{
if (ReadBlob(image,128,(unsigned char *) header) != 128)
break;
switch (*header)
{
case 'R':
case 'r':
{
if (LocaleNCompare(header,"rdensty:",8) == 0)
{
(void) sscanf(header+8,"%lu",&density);
break;
}
if (LocaleNCompare(header,"rpelcnt:",8) == 0)
{
(void) sscanf(header+8,"%lu,%lu",&width,&height);
break;
}
if (LocaleNCompare(header,"rorient:",8) == 0)
{
(void) sscanf(header+8,"%lu,%lu",&pel_path,&direction);
if (pel_path == 90)
orientation=5;
else
if (pel_path == 180)
orientation=3;
else
if (pel_path == 270)
orientation=7;
if (direction == 90)
orientation++;
break;
}
if (LocaleNCompare(header,"rtype:",6) == 0)
{
(void) sscanf(header+6,"%lu",&type);
break;
}
break;
}
}
}
/*
Read CALS pixels.
*/
file=(FILE *) NULL;
unique_file=AcquireUniqueFileResource(filename);
if (unique_file != -1)
file=fdopen(unique_file,"wb");
if ((unique_file == -1) || (file == (FILE *) NULL))
ThrowImageException(FileOpenError,"UnableToCreateTemporaryFile");
while ((c=ReadBlobByte(image)) != EOF)
if (fputc(c,file) != c)
break;
(void) fclose(file);
(void) CloseBlob(image);
image=DestroyImage(image);
read_info=CloneImageInfo(image_info);
SetImageInfoBlob(read_info,(void *) NULL,0);
(void) FormatLocaleString(read_info->filename,MagickPathExtent,"group4:%s",
filename);
(void) FormatLocaleString(message,MagickPathExtent,"%lux%lu",width,height);
(void) CloneString(&read_info->size,message);
(void) FormatLocaleString(message,MagickPathExtent,"%lu",density);
(void) CloneString(&read_info->density,message);
read_info->orientation=(OrientationType) orientation;
image=ReadImage(read_info,exception);
if (image != (Image *) NULL)
{
(void) CopyMagickString(image->filename,image_info->filename,
MagickPathExtent);
(void) CopyMagickString(image->magick_filename,image_info->filename,
MagickPathExtent);
(void) CopyMagickString(image->magick,"CALS",MagickPathExtent);
}
read_info=DestroyImageInfo(read_info);
(void) RelinquishUniqueFileResource(filename);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e g i s t e r C A L S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RegisterCALSImage() adds attributes for the CALS Raster Group 1 image file
% image format to the list of supported formats. The attributes include the
% image format tag, a method to read and/or write the format, whether the
% format supports the saving of more than one frame to the same file or blob,
% whether the format supports native in-memory I/O, and a brief description
% of the format.
%
% The format of the RegisterCALSImage method is:
%
% size_t RegisterCALSImage(void)
%
*/
ModuleExport size_t RegisterCALSImage(void)
{
#define CALSDescription "Continuous Acquisition and Life-cycle Support Type 1"
#define CALSNote "Specified in MIL-R-28002 and MIL-PRF-28002"
MagickInfo
*entry;
entry=AcquireMagickInfo("CALS","CAL",CALSDescription);
entry->decoder=(DecodeImageHandler *) ReadCALSImage;
#if defined(MAGICKCORE_TIFF_DELEGATE)
entry->encoder=(EncodeImageHandler *) WriteCALSImage;
#endif
entry->flags^=CoderAdjoinFlag;
entry->magick=(IsImageFormatHandler *) IsCALS;
entry->note=ConstantString(CALSNote);
(void) RegisterMagickInfo(entry);
entry=AcquireMagickInfo("CALS","CALS",CALSDescription);
entry->decoder=(DecodeImageHandler *) ReadCALSImage;
#if defined(MAGICKCORE_TIFF_DELEGATE)
entry->encoder=(EncodeImageHandler *) WriteCALSImage;
#endif
entry->flags^=CoderAdjoinFlag;
entry->magick=(IsImageFormatHandler *) IsCALS;
entry->note=ConstantString(CALSNote);
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n r e g i s t e r C A L S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnregisterCALSImage() removes format registrations made by the
% CALS module from the list of supported formats.
%
% The format of the UnregisterCALSImage method is:
%
% UnregisterCALSImage(void)
%
*/
ModuleExport void UnregisterCALSImage(void)
{
(void) UnregisterMagickInfo("CAL");
(void) UnregisterMagickInfo("CALS");
}
#if defined(MAGICKCORE_TIFF_DELEGATE)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e C A L S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WriteCALSImage() writes an image to a file in CALS Raster Group 1 image
% format.
%
% The format of the WriteCALSImage method is:
%
% MagickBooleanType WriteCALSImage(const ImageInfo *image_info,
% Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image_info: the image info.
%
% o image: The image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static ssize_t WriteCALSRecord(Image *image,const char *data)
{
char
pad[128];
register const char
*p;
register ssize_t
i;
ssize_t
count;
i=0;
count=0;
if (data != (const char *) NULL)
{
p=data;
for (i=0; (i < 128) && (p[i] != '\0'); i++);
count=WriteBlob(image,(size_t) i,(const unsigned char *) data);
}
if (i < 128)
{
i=128-i;
(void) memset(pad,' ',(size_t) i);
count=WriteBlob(image,(size_t) i,(const unsigned char *) pad);
}
return(count);
}
static MagickBooleanType WriteCALSImage(const ImageInfo *image_info,
Image *image,ExceptionInfo *exception)
{
char
header[129];
Image
*group4_image;
ImageInfo
*write_info;
MagickBooleanType
status;
register ssize_t
i;
size_t
density,
length,
orient_x,
orient_y;
ssize_t
count;
unsigned char
*group4;
/*
Open output image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception);
if (status == MagickFalse)
return(status);
/*
Create standard CALS header.
*/
count=WriteCALSRecord(image,"srcdocid: NONE");
(void) count;
count=WriteCALSRecord(image,"dstdocid: NONE");
count=WriteCALSRecord(image,"txtfilid: NONE");
count=WriteCALSRecord(image,"figid: NONE");
count=WriteCALSRecord(image,"srcgph: NONE");
count=WriteCALSRecord(image,"doccls: NONE");
count=WriteCALSRecord(image,"rtype: 1");
orient_x=0;
orient_y=0;
switch (image->orientation)
{
case TopRightOrientation:
{
orient_x=180;
orient_y=270;
break;
}
case BottomRightOrientation:
{
orient_x=180;
orient_y=90;
break;
}
case BottomLeftOrientation:
{
orient_y=90;
break;
}
case LeftTopOrientation:
{
orient_x=270;
break;
}
case RightTopOrientation:
{
orient_x=270;
orient_y=180;
break;
}
case RightBottomOrientation:
{
orient_x=90;
orient_y=180;
break;
}
case LeftBottomOrientation:
{
orient_x=90;
break;
}
default:
{
orient_y=270;
break;
}
}
(void) FormatLocaleString(header,sizeof(header),"rorient: %03ld,%03ld",
(long) orient_x,(long) orient_y);
count=WriteCALSRecord(image,header);
(void) FormatLocaleString(header,sizeof(header),"rpelcnt: %06lu,%06lu",
(unsigned long) image->columns,(unsigned long) image->rows);
count=WriteCALSRecord(image,header);
density=200;
if (image_info->density != (char *) NULL)
{
GeometryInfo
geometry_info;
(void) ParseGeometry(image_info->density,&geometry_info);
density=(size_t) floor(geometry_info.rho+0.5);
}
(void) FormatLocaleString(header,sizeof(header),"rdensty: %04lu",
(unsigned long) density);
count=WriteCALSRecord(image,header);
count=WriteCALSRecord(image,"notes: NONE");
(void) memset(header,' ',128);
for (i=0; i < 5; i++)
(void) WriteBlob(image,128,(unsigned char *) header);
/*
Write CALS pixels.
*/
write_info=CloneImageInfo(image_info);
(void) CopyMagickString(write_info->filename,"GROUP4:",MagickPathExtent);
(void) CopyMagickString(write_info->magick,"GROUP4",MagickPathExtent);
group4_image=CloneImage(image,0,0,MagickTrue,exception);
if (group4_image == (Image *) NULL)
{
write_info=DestroyImageInfo(write_info);
(void) CloseBlob(image);
return(MagickFalse);
}
group4=(unsigned char *) ImageToBlob(write_info,group4_image,&length,
exception);
group4_image=DestroyImage(group4_image);
if (group4 == (unsigned char *) NULL)
{
write_info=DestroyImageInfo(write_info);
(void) CloseBlob(image);
return(MagickFalse);
}
write_info=DestroyImageInfo(write_info);
if (WriteBlob(image,length,group4) != (ssize_t) length)
status=MagickFalse;
group4=(unsigned char *) RelinquishMagickMemory(group4);
(void) CloseBlob(image);
return(status);
}
#endif
| ./CrossVul/dataset_final_sorted/CWE-20/c/good_364_0 |
crossvul-cpp_data_good_5844_5 | /*
* UDP over IPv6
* Linux INET6 implementation
*
* Authors:
* Pedro Roque <roque@di.fc.ul.pt>
*
* Based on linux/ipv4/udp.c
*
* Fixes:
* Hideaki YOSHIFUJI : sin6_scope_id support
* YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
* Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
* a single port at the same time.
* Kazunori MIYAZAWA @USAGI: change process style to use ip6_append_data
* YOSHIFUJI Hideaki @USAGI: convert /proc/net/udp6 to seq_file.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/in6.h>
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/ipv6.h>
#include <linux/icmpv6.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <asm/uaccess.h>
#include <net/ndisc.h>
#include <net/protocol.h>
#include <net/transp_v6.h>
#include <net/ip6_route.h>
#include <net/raw.h>
#include <net/tcp_states.h>
#include <net/ip6_checksum.h>
#include <net/xfrm.h>
#include <net/inet6_hashtables.h>
#include <net/busy_poll.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <trace/events/skb.h>
#include "udp_impl.h"
static unsigned int udp6_ehashfn(struct net *net,
const struct in6_addr *laddr,
const u16 lport,
const struct in6_addr *faddr,
const __be16 fport)
{
static u32 udp6_ehash_secret __read_mostly;
static u32 udp_ipv6_hash_secret __read_mostly;
u32 lhash, fhash;
net_get_random_once(&udp6_ehash_secret,
sizeof(udp6_ehash_secret));
net_get_random_once(&udp_ipv6_hash_secret,
sizeof(udp_ipv6_hash_secret));
lhash = (__force u32)laddr->s6_addr32[3];
fhash = __ipv6_addr_jhash(faddr, udp_ipv6_hash_secret);
return __inet6_ehashfn(lhash, lport, fhash, fport,
udp_ipv6_hash_secret + net_hash_mix(net));
}
int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
{
const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2);
int sk_ipv6only = ipv6_only_sock(sk);
int sk2_ipv6only = inet_v6_ipv6only(sk2);
int addr_type = ipv6_addr_type(&sk->sk_v6_rcv_saddr);
int addr_type2 = sk2_rcv_saddr6 ? ipv6_addr_type(sk2_rcv_saddr6) : IPV6_ADDR_MAPPED;
/* if both are mapped, treat as IPv4 */
if (addr_type == IPV6_ADDR_MAPPED && addr_type2 == IPV6_ADDR_MAPPED)
return (!sk2_ipv6only &&
(!sk->sk_rcv_saddr || !sk2->sk_rcv_saddr ||
sk->sk_rcv_saddr == sk2->sk_rcv_saddr));
if (addr_type2 == IPV6_ADDR_ANY &&
!(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED))
return 1;
if (addr_type == IPV6_ADDR_ANY &&
!(sk_ipv6only && addr_type2 == IPV6_ADDR_MAPPED))
return 1;
if (sk2_rcv_saddr6 &&
ipv6_addr_equal(&sk->sk_v6_rcv_saddr, sk2_rcv_saddr6))
return 1;
return 0;
}
static unsigned int udp6_portaddr_hash(struct net *net,
const struct in6_addr *addr6,
unsigned int port)
{
unsigned int hash, mix = net_hash_mix(net);
if (ipv6_addr_any(addr6))
hash = jhash_1word(0, mix);
else if (ipv6_addr_v4mapped(addr6))
hash = jhash_1word((__force u32)addr6->s6_addr32[3], mix);
else
hash = jhash2((__force u32 *)addr6->s6_addr32, 4, mix);
return hash ^ port;
}
int udp_v6_get_port(struct sock *sk, unsigned short snum)
{
unsigned int hash2_nulladdr =
udp6_portaddr_hash(sock_net(sk), &in6addr_any, snum);
unsigned int hash2_partial =
udp6_portaddr_hash(sock_net(sk), &sk->sk_v6_rcv_saddr, 0);
/* precompute partial secondary hash */
udp_sk(sk)->udp_portaddr_hash = hash2_partial;
return udp_lib_get_port(sk, snum, ipv6_rcv_saddr_equal, hash2_nulladdr);
}
static void udp_v6_rehash(struct sock *sk)
{
u16 new_hash = udp6_portaddr_hash(sock_net(sk),
&sk->sk_v6_rcv_saddr,
inet_sk(sk)->inet_num);
udp_lib_rehash(sk, new_hash);
}
static inline int compute_score(struct sock *sk, struct net *net,
unsigned short hnum,
const struct in6_addr *saddr, __be16 sport,
const struct in6_addr *daddr, __be16 dport,
int dif)
{
int score = -1;
if (net_eq(sock_net(sk), net) && udp_sk(sk)->udp_port_hash == hnum &&
sk->sk_family == PF_INET6) {
struct inet_sock *inet = inet_sk(sk);
score = 0;
if (inet->inet_dport) {
if (inet->inet_dport != sport)
return -1;
score++;
}
if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr)) {
if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr))
return -1;
score++;
}
if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
if (!ipv6_addr_equal(&sk->sk_v6_daddr, saddr))
return -1;
score++;
}
if (sk->sk_bound_dev_if) {
if (sk->sk_bound_dev_if != dif)
return -1;
score++;
}
}
return score;
}
#define SCORE2_MAX (1 + 1 + 1)
static inline int compute_score2(struct sock *sk, struct net *net,
const struct in6_addr *saddr, __be16 sport,
const struct in6_addr *daddr, unsigned short hnum,
int dif)
{
int score = -1;
if (net_eq(sock_net(sk), net) && udp_sk(sk)->udp_port_hash == hnum &&
sk->sk_family == PF_INET6) {
struct inet_sock *inet = inet_sk(sk);
if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr))
return -1;
score = 0;
if (inet->inet_dport) {
if (inet->inet_dport != sport)
return -1;
score++;
}
if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
if (!ipv6_addr_equal(&sk->sk_v6_daddr, saddr))
return -1;
score++;
}
if (sk->sk_bound_dev_if) {
if (sk->sk_bound_dev_if != dif)
return -1;
score++;
}
}
return score;
}
/* called with read_rcu_lock() */
static struct sock *udp6_lib_lookup2(struct net *net,
const struct in6_addr *saddr, __be16 sport,
const struct in6_addr *daddr, unsigned int hnum, int dif,
struct udp_hslot *hslot2, unsigned int slot2)
{
struct sock *sk, *result;
struct hlist_nulls_node *node;
int score, badness, matches = 0, reuseport = 0;
u32 hash = 0;
begin:
result = NULL;
badness = -1;
udp_portaddr_for_each_entry_rcu(sk, node, &hslot2->head) {
score = compute_score2(sk, net, saddr, sport,
daddr, hnum, dif);
if (score > badness) {
result = sk;
badness = score;
reuseport = sk->sk_reuseport;
if (reuseport) {
hash = udp6_ehashfn(net, daddr, hnum,
saddr, sport);
matches = 1;
} else if (score == SCORE2_MAX)
goto exact_match;
} else if (score == badness && reuseport) {
matches++;
if (((u64)hash * matches) >> 32 == 0)
result = sk;
hash = next_pseudo_random32(hash);
}
}
/*
* if the nulls value we got at the end of this lookup is
* not the expected one, we must restart lookup.
* We probably met an item that was moved to another chain.
*/
if (get_nulls_value(node) != slot2)
goto begin;
if (result) {
exact_match:
if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
result = NULL;
else if (unlikely(compute_score2(result, net, saddr, sport,
daddr, hnum, dif) < badness)) {
sock_put(result);
goto begin;
}
}
return result;
}
struct sock *__udp6_lib_lookup(struct net *net,
const struct in6_addr *saddr, __be16 sport,
const struct in6_addr *daddr, __be16 dport,
int dif, struct udp_table *udptable)
{
struct sock *sk, *result;
struct hlist_nulls_node *node;
unsigned short hnum = ntohs(dport);
unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask);
struct udp_hslot *hslot2, *hslot = &udptable->hash[slot];
int score, badness, matches = 0, reuseport = 0;
u32 hash = 0;
rcu_read_lock();
if (hslot->count > 10) {
hash2 = udp6_portaddr_hash(net, daddr, hnum);
slot2 = hash2 & udptable->mask;
hslot2 = &udptable->hash2[slot2];
if (hslot->count < hslot2->count)
goto begin;
result = udp6_lib_lookup2(net, saddr, sport,
daddr, hnum, dif,
hslot2, slot2);
if (!result) {
hash2 = udp6_portaddr_hash(net, &in6addr_any, hnum);
slot2 = hash2 & udptable->mask;
hslot2 = &udptable->hash2[slot2];
if (hslot->count < hslot2->count)
goto begin;
result = udp6_lib_lookup2(net, saddr, sport,
&in6addr_any, hnum, dif,
hslot2, slot2);
}
rcu_read_unlock();
return result;
}
begin:
result = NULL;
badness = -1;
sk_nulls_for_each_rcu(sk, node, &hslot->head) {
score = compute_score(sk, net, hnum, saddr, sport, daddr, dport, dif);
if (score > badness) {
result = sk;
badness = score;
reuseport = sk->sk_reuseport;
if (reuseport) {
hash = udp6_ehashfn(net, daddr, hnum,
saddr, sport);
matches = 1;
}
} else if (score == badness && reuseport) {
matches++;
if (((u64)hash * matches) >> 32 == 0)
result = sk;
hash = next_pseudo_random32(hash);
}
}
/*
* if the nulls value we got at the end of this lookup is
* not the expected one, we must restart lookup.
* We probably met an item that was moved to another chain.
*/
if (get_nulls_value(node) != slot)
goto begin;
if (result) {
if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
result = NULL;
else if (unlikely(compute_score(result, net, hnum, saddr, sport,
daddr, dport, dif) < badness)) {
sock_put(result);
goto begin;
}
}
rcu_read_unlock();
return result;
}
EXPORT_SYMBOL_GPL(__udp6_lib_lookup);
static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb,
__be16 sport, __be16 dport,
struct udp_table *udptable)
{
struct sock *sk;
const struct ipv6hdr *iph = ipv6_hdr(skb);
if (unlikely(sk = skb_steal_sock(skb)))
return sk;
return __udp6_lib_lookup(dev_net(skb_dst(skb)->dev), &iph->saddr, sport,
&iph->daddr, dport, inet6_iif(skb),
udptable);
}
struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport,
const struct in6_addr *daddr, __be16 dport, int dif)
{
return __udp6_lib_lookup(net, saddr, sport, daddr, dport, dif, &udp_table);
}
EXPORT_SYMBOL_GPL(udp6_lib_lookup);
/*
* This should be easy, if there is something there we
* return it, otherwise we block.
*/
int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
struct msghdr *msg, size_t len,
int noblock, int flags, int *addr_len)
{
struct ipv6_pinfo *np = inet6_sk(sk);
struct inet_sock *inet = inet_sk(sk);
struct sk_buff *skb;
unsigned int ulen, copied;
int peeked, off = 0;
int err;
int is_udplite = IS_UDPLITE(sk);
int is_udp4;
bool slow;
if (flags & MSG_ERRQUEUE)
return ipv6_recv_error(sk, msg, len);
if (np->rxpmtu && np->rxopt.bits.rxpmtu)
return ipv6_recv_rxpmtu(sk, msg, len);
try_again:
skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
&peeked, &off, &err);
if (!skb)
goto out;
ulen = skb->len - sizeof(struct udphdr);
copied = len;
if (copied > ulen)
copied = ulen;
else if (copied < ulen)
msg->msg_flags |= MSG_TRUNC;
is_udp4 = (skb->protocol == htons(ETH_P_IP));
/*
* If checksum is needed at all, try to do it while copying the
* data. If the data is truncated, or if we only want a partial
* coverage checksum (UDP-Lite), do it before the copy.
*/
if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) {
if (udp_lib_checksum_complete(skb))
goto csum_copy_err;
}
if (skb_csum_unnecessary(skb))
err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr),
msg->msg_iov, copied);
else {
err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov);
if (err == -EINVAL)
goto csum_copy_err;
}
if (unlikely(err)) {
trace_kfree_skb(skb, udpv6_recvmsg);
if (!peeked) {
atomic_inc(&sk->sk_drops);
if (is_udp4)
UDP_INC_STATS_USER(sock_net(sk),
UDP_MIB_INERRORS,
is_udplite);
else
UDP6_INC_STATS_USER(sock_net(sk),
UDP_MIB_INERRORS,
is_udplite);
}
goto out_free;
}
if (!peeked) {
if (is_udp4)
UDP_INC_STATS_USER(sock_net(sk),
UDP_MIB_INDATAGRAMS, is_udplite);
else
UDP6_INC_STATS_USER(sock_net(sk),
UDP_MIB_INDATAGRAMS, is_udplite);
}
sock_recv_ts_and_drops(msg, sk, skb);
/* Copy the address. */
if (msg->msg_name) {
struct sockaddr_in6 *sin6;
sin6 = (struct sockaddr_in6 *) msg->msg_name;
sin6->sin6_family = AF_INET6;
sin6->sin6_port = udp_hdr(skb)->source;
sin6->sin6_flowinfo = 0;
if (is_udp4) {
ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr,
&sin6->sin6_addr);
sin6->sin6_scope_id = 0;
} else {
sin6->sin6_addr = ipv6_hdr(skb)->saddr;
sin6->sin6_scope_id =
ipv6_iface_scope_id(&sin6->sin6_addr,
IP6CB(skb)->iif);
}
*addr_len = sizeof(*sin6);
}
if (is_udp4) {
if (inet->cmsg_flags)
ip_cmsg_recv(msg, skb);
} else {
if (np->rxopt.all)
ip6_datagram_recv_ctl(sk, msg, skb);
}
err = copied;
if (flags & MSG_TRUNC)
err = ulen;
out_free:
skb_free_datagram_locked(sk, skb);
out:
return err;
csum_copy_err:
slow = lock_sock_fast(sk);
if (!skb_kill_datagram(sk, skb, flags)) {
if (is_udp4) {
UDP_INC_STATS_USER(sock_net(sk),
UDP_MIB_CSUMERRORS, is_udplite);
UDP_INC_STATS_USER(sock_net(sk),
UDP_MIB_INERRORS, is_udplite);
} else {
UDP6_INC_STATS_USER(sock_net(sk),
UDP_MIB_CSUMERRORS, is_udplite);
UDP6_INC_STATS_USER(sock_net(sk),
UDP_MIB_INERRORS, is_udplite);
}
}
unlock_sock_fast(sk, slow);
if (noblock)
return -EAGAIN;
/* starting over for a new packet */
msg->msg_flags &= ~MSG_TRUNC;
goto try_again;
}
void __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
u8 type, u8 code, int offset, __be32 info,
struct udp_table *udptable)
{
struct ipv6_pinfo *np;
const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
const struct in6_addr *saddr = &hdr->saddr;
const struct in6_addr *daddr = &hdr->daddr;
struct udphdr *uh = (struct udphdr*)(skb->data+offset);
struct sock *sk;
int err;
sk = __udp6_lib_lookup(dev_net(skb->dev), daddr, uh->dest,
saddr, uh->source, inet6_iif(skb), udptable);
if (sk == NULL)
return;
if (type == ICMPV6_PKT_TOOBIG)
ip6_sk_update_pmtu(skb, sk, info);
if (type == NDISC_REDIRECT) {
ip6_sk_redirect(skb, sk);
goto out;
}
np = inet6_sk(sk);
if (!icmpv6_err_convert(type, code, &err) && !np->recverr)
goto out;
if (sk->sk_state != TCP_ESTABLISHED && !np->recverr)
goto out;
if (np->recverr)
ipv6_icmp_error(sk, skb, err, uh->dest, ntohl(info), (u8 *)(uh+1));
sk->sk_err = err;
sk->sk_error_report(sk);
out:
sock_put(sk);
}
static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
int rc;
if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
sock_rps_save_rxhash(sk, skb);
sk_mark_napi_id(sk, skb);
}
rc = sock_queue_rcv_skb(sk, skb);
if (rc < 0) {
int is_udplite = IS_UDPLITE(sk);
/* Note that an ENOMEM error is charged twice */
if (rc == -ENOMEM)
UDP6_INC_STATS_BH(sock_net(sk),
UDP_MIB_RCVBUFERRORS, is_udplite);
UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
kfree_skb(skb);
return -1;
}
return 0;
}
static __inline__ void udpv6_err(struct sk_buff *skb,
struct inet6_skb_parm *opt, u8 type,
u8 code, int offset, __be32 info )
{
__udp6_lib_err(skb, opt, type, code, offset, info, &udp_table);
}
static struct static_key udpv6_encap_needed __read_mostly;
void udpv6_encap_enable(void)
{
if (!static_key_enabled(&udpv6_encap_needed))
static_key_slow_inc(&udpv6_encap_needed);
}
EXPORT_SYMBOL(udpv6_encap_enable);
int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
struct udp_sock *up = udp_sk(sk);
int rc;
int is_udplite = IS_UDPLITE(sk);
if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
goto drop;
if (static_key_false(&udpv6_encap_needed) && up->encap_type) {
int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
/*
* This is an encapsulation socket so pass the skb to
* the socket's udp_encap_rcv() hook. Otherwise, just
* fall through and pass this up the UDP socket.
* up->encap_rcv() returns the following value:
* =0 if skb was successfully passed to the encap
* handler or was discarded by it.
* >0 if skb should be passed on to UDP.
* <0 if skb should be resubmitted as proto -N
*/
/* if we're overly short, let UDP handle it */
encap_rcv = ACCESS_ONCE(up->encap_rcv);
if (skb->len > sizeof(struct udphdr) && encap_rcv != NULL) {
int ret;
ret = encap_rcv(sk, skb);
if (ret <= 0) {
UDP_INC_STATS_BH(sock_net(sk),
UDP_MIB_INDATAGRAMS,
is_udplite);
return -ret;
}
}
/* FALLTHROUGH -- it's a UDP Packet */
}
/*
* UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c).
*/
if ((is_udplite & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) {
if (up->pcrlen == 0) { /* full coverage was set */
LIMIT_NETDEBUG(KERN_WARNING "UDPLITE6: partial coverage"
" %d while full coverage %d requested\n",
UDP_SKB_CB(skb)->cscov, skb->len);
goto drop;
}
if (UDP_SKB_CB(skb)->cscov < up->pcrlen) {
LIMIT_NETDEBUG(KERN_WARNING "UDPLITE6: coverage %d "
"too small, need min %d\n",
UDP_SKB_CB(skb)->cscov, up->pcrlen);
goto drop;
}
}
if (rcu_access_pointer(sk->sk_filter)) {
if (udp_lib_checksum_complete(skb))
goto csum_error;
}
if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf))
goto drop;
skb_dst_drop(skb);
bh_lock_sock(sk);
rc = 0;
if (!sock_owned_by_user(sk))
rc = __udpv6_queue_rcv_skb(sk, skb);
else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
bh_unlock_sock(sk);
goto drop;
}
bh_unlock_sock(sk);
return rc;
csum_error:
UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
drop:
UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
atomic_inc(&sk->sk_drops);
kfree_skb(skb);
return -1;
}
static struct sock *udp_v6_mcast_next(struct net *net, struct sock *sk,
__be16 loc_port, const struct in6_addr *loc_addr,
__be16 rmt_port, const struct in6_addr *rmt_addr,
int dif)
{
struct hlist_nulls_node *node;
struct sock *s = sk;
unsigned short num = ntohs(loc_port);
sk_nulls_for_each_from(s, node) {
struct inet_sock *inet = inet_sk(s);
if (!net_eq(sock_net(s), net))
continue;
if (udp_sk(s)->udp_port_hash == num &&
s->sk_family == PF_INET6) {
if (inet->inet_dport) {
if (inet->inet_dport != rmt_port)
continue;
}
if (!ipv6_addr_any(&sk->sk_v6_daddr) &&
!ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr))
continue;
if (s->sk_bound_dev_if && s->sk_bound_dev_if != dif)
continue;
if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr)) {
if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr))
continue;
}
if (!inet6_mc_check(s, loc_addr, rmt_addr))
continue;
return s;
}
}
return NULL;
}
static void flush_stack(struct sock **stack, unsigned int count,
struct sk_buff *skb, unsigned int final)
{
struct sk_buff *skb1 = NULL;
struct sock *sk;
unsigned int i;
for (i = 0; i < count; i++) {
sk = stack[i];
if (likely(skb1 == NULL))
skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
if (!skb1) {
atomic_inc(&sk->sk_drops);
UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
IS_UDPLITE(sk));
UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
IS_UDPLITE(sk));
}
if (skb1 && udpv6_queue_rcv_skb(sk, skb1) <= 0)
skb1 = NULL;
}
if (unlikely(skb1))
kfree_skb(skb1);
}
/*
* Note: called only from the BH handler context,
* so we don't need to lock the hashes.
*/
static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
const struct in6_addr *saddr, const struct in6_addr *daddr,
struct udp_table *udptable)
{
struct sock *sk, *stack[256 / sizeof(struct sock *)];
const struct udphdr *uh = udp_hdr(skb);
struct udp_hslot *hslot = udp_hashslot(udptable, net, ntohs(uh->dest));
int dif;
unsigned int i, count = 0;
spin_lock(&hslot->lock);
sk = sk_nulls_head(&hslot->head);
dif = inet6_iif(skb);
sk = udp_v6_mcast_next(net, sk, uh->dest, daddr, uh->source, saddr, dif);
while (sk) {
stack[count++] = sk;
sk = udp_v6_mcast_next(net, sk_nulls_next(sk), uh->dest, daddr,
uh->source, saddr, dif);
if (unlikely(count == ARRAY_SIZE(stack))) {
if (!sk)
break;
flush_stack(stack, count, skb, ~0);
count = 0;
}
}
/*
* before releasing the lock, we must take reference on sockets
*/
for (i = 0; i < count; i++)
sock_hold(stack[i]);
spin_unlock(&hslot->lock);
if (count) {
flush_stack(stack, count, skb, count - 1);
for (i = 0; i < count; i++)
sock_put(stack[i]);
} else {
kfree_skb(skb);
}
return 0;
}
int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
int proto)
{
struct net *net = dev_net(skb->dev);
struct sock *sk;
struct udphdr *uh;
const struct in6_addr *saddr, *daddr;
u32 ulen = 0;
if (!pskb_may_pull(skb, sizeof(struct udphdr)))
goto discard;
saddr = &ipv6_hdr(skb)->saddr;
daddr = &ipv6_hdr(skb)->daddr;
uh = udp_hdr(skb);
ulen = ntohs(uh->len);
if (ulen > skb->len)
goto short_packet;
if (proto == IPPROTO_UDP) {
/* UDP validates ulen. */
/* Check for jumbo payload */
if (ulen == 0)
ulen = skb->len;
if (ulen < sizeof(*uh))
goto short_packet;
if (ulen < skb->len) {
if (pskb_trim_rcsum(skb, ulen))
goto short_packet;
saddr = &ipv6_hdr(skb)->saddr;
daddr = &ipv6_hdr(skb)->daddr;
uh = udp_hdr(skb);
}
}
if (udp6_csum_init(skb, uh, proto))
goto csum_error;
/*
* Multicast receive code
*/
if (ipv6_addr_is_multicast(daddr))
return __udp6_lib_mcast_deliver(net, skb,
saddr, daddr, udptable);
/* Unicast */
/*
* check socket cache ... must talk to Alan about his plans
* for sock caches... i'll skip this for now.
*/
sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
if (sk != NULL) {
int ret;
ret = udpv6_queue_rcv_skb(sk, skb);
sock_put(sk);
/* a return value > 0 means to resubmit the input, but
* it wants the return to be -protocol, or 0
*/
if (ret > 0)
return -ret;
return 0;
}
if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
goto discard;
if (udp_lib_checksum_complete(skb))
goto csum_error;
UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
kfree_skb(skb);
return 0;
short_packet:
LIMIT_NETDEBUG(KERN_DEBUG "UDP%sv6: short packet: From [%pI6c]:%u %d/%d to [%pI6c]:%u\n",
proto == IPPROTO_UDPLITE ? "-Lite" : "",
saddr,
ntohs(uh->source),
ulen,
skb->len,
daddr,
ntohs(uh->dest));
goto discard;
csum_error:
UDP6_INC_STATS_BH(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE);
discard:
UDP6_INC_STATS_BH(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
kfree_skb(skb);
return 0;
}
static __inline__ int udpv6_rcv(struct sk_buff *skb)
{
return __udp6_lib_rcv(skb, &udp_table, IPPROTO_UDP);
}
/*
* Throw away all pending data and cancel the corking. Socket is locked.
*/
static void udp_v6_flush_pending_frames(struct sock *sk)
{
struct udp_sock *up = udp_sk(sk);
if (up->pending == AF_INET)
udp_flush_pending_frames(sk);
else if (up->pending) {
up->len = 0;
up->pending = 0;
ip6_flush_pending_frames(sk);
}
}
/**
* udp6_hwcsum_outgoing - handle outgoing HW checksumming
* @sk: socket we are sending on
* @skb: sk_buff containing the filled-in UDP header
* (checksum field must be zeroed out)
*/
static void udp6_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb,
const struct in6_addr *saddr,
const struct in6_addr *daddr, int len)
{
unsigned int offset;
struct udphdr *uh = udp_hdr(skb);
__wsum csum = 0;
if (skb_queue_len(&sk->sk_write_queue) == 1) {
/* Only one fragment on the socket. */
skb->csum_start = skb_transport_header(skb) - skb->head;
skb->csum_offset = offsetof(struct udphdr, check);
uh->check = ~csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP, 0);
} else {
/*
* HW-checksum won't work as there are two or more
* fragments on the socket so that all csums of sk_buffs
* should be together
*/
offset = skb_transport_offset(skb);
skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
skb->ip_summed = CHECKSUM_NONE;
skb_queue_walk(&sk->sk_write_queue, skb) {
csum = csum_add(csum, skb->csum);
}
uh->check = csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP,
csum);
if (uh->check == 0)
uh->check = CSUM_MANGLED_0;
}
}
/*
* Sending
*/
static int udp_v6_push_pending_frames(struct sock *sk)
{
struct sk_buff *skb;
struct udphdr *uh;
struct udp_sock *up = udp_sk(sk);
struct inet_sock *inet = inet_sk(sk);
struct flowi6 *fl6;
int err = 0;
int is_udplite = IS_UDPLITE(sk);
__wsum csum = 0;
if (up->pending == AF_INET)
return udp_push_pending_frames(sk);
fl6 = &inet->cork.fl.u.ip6;
/* Grab the skbuff where UDP header space exists. */
if ((skb = skb_peek(&sk->sk_write_queue)) == NULL)
goto out;
/*
* Create a UDP header
*/
uh = udp_hdr(skb);
uh->source = fl6->fl6_sport;
uh->dest = fl6->fl6_dport;
uh->len = htons(up->len);
uh->check = 0;
if (is_udplite)
csum = udplite_csum_outgoing(sk, skb);
else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
udp6_hwcsum_outgoing(sk, skb, &fl6->saddr, &fl6->daddr,
up->len);
goto send;
} else
csum = udp_csum_outgoing(sk, skb);
/* add protocol-dependent pseudo-header */
uh->check = csum_ipv6_magic(&fl6->saddr, &fl6->daddr,
up->len, fl6->flowi6_proto, csum);
if (uh->check == 0)
uh->check = CSUM_MANGLED_0;
send:
err = ip6_push_pending_frames(sk);
if (err) {
if (err == -ENOBUFS && !inet6_sk(sk)->recverr) {
UDP6_INC_STATS_USER(sock_net(sk),
UDP_MIB_SNDBUFERRORS, is_udplite);
err = 0;
}
} else
UDP6_INC_STATS_USER(sock_net(sk),
UDP_MIB_OUTDATAGRAMS, is_udplite);
out:
up->len = 0;
up->pending = 0;
return err;
}
int udpv6_sendmsg(struct kiocb *iocb, struct sock *sk,
struct msghdr *msg, size_t len)
{
struct ipv6_txoptions opt_space;
struct udp_sock *up = udp_sk(sk);
struct inet_sock *inet = inet_sk(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) msg->msg_name;
struct in6_addr *daddr, *final_p, final;
struct ipv6_txoptions *opt = NULL;
struct ip6_flowlabel *flowlabel = NULL;
struct flowi6 fl6;
struct dst_entry *dst;
int addr_len = msg->msg_namelen;
int ulen = len;
int hlimit = -1;
int tclass = -1;
int dontfrag = -1;
int corkreq = up->corkflag || msg->msg_flags&MSG_MORE;
int err;
int connected = 0;
int is_udplite = IS_UDPLITE(sk);
int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
/* destination address check */
if (sin6) {
if (addr_len < offsetof(struct sockaddr, sa_data))
return -EINVAL;
switch (sin6->sin6_family) {
case AF_INET6:
if (addr_len < SIN6_LEN_RFC2133)
return -EINVAL;
daddr = &sin6->sin6_addr;
break;
case AF_INET:
goto do_udp_sendmsg;
case AF_UNSPEC:
msg->msg_name = sin6 = NULL;
msg->msg_namelen = addr_len = 0;
daddr = NULL;
break;
default:
return -EINVAL;
}
} else if (!up->pending) {
if (sk->sk_state != TCP_ESTABLISHED)
return -EDESTADDRREQ;
daddr = &sk->sk_v6_daddr;
} else
daddr = NULL;
if (daddr) {
if (ipv6_addr_v4mapped(daddr)) {
struct sockaddr_in sin;
sin.sin_family = AF_INET;
sin.sin_port = sin6 ? sin6->sin6_port : inet->inet_dport;
sin.sin_addr.s_addr = daddr->s6_addr32[3];
msg->msg_name = &sin;
msg->msg_namelen = sizeof(sin);
do_udp_sendmsg:
if (__ipv6_only_sock(sk))
return -ENETUNREACH;
return udp_sendmsg(iocb, sk, msg, len);
}
}
if (up->pending == AF_INET)
return udp_sendmsg(iocb, sk, msg, len);
/* Rough check on arithmetic overflow,
better check is made in ip6_append_data().
*/
if (len > INT_MAX - sizeof(struct udphdr))
return -EMSGSIZE;
if (up->pending) {
/*
* There are pending frames.
* The socket lock must be held while it's corked.
*/
lock_sock(sk);
if (likely(up->pending)) {
if (unlikely(up->pending != AF_INET6)) {
release_sock(sk);
return -EAFNOSUPPORT;
}
dst = NULL;
goto do_append_data;
}
release_sock(sk);
}
ulen += sizeof(struct udphdr);
memset(&fl6, 0, sizeof(fl6));
if (sin6) {
if (sin6->sin6_port == 0)
return -EINVAL;
fl6.fl6_dport = sin6->sin6_port;
daddr = &sin6->sin6_addr;
if (np->sndflow) {
fl6.flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK;
if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
if (flowlabel == NULL)
return -EINVAL;
daddr = &flowlabel->dst;
}
}
/*
* Otherwise it will be difficult to maintain
* sk->sk_dst_cache.
*/
if (sk->sk_state == TCP_ESTABLISHED &&
ipv6_addr_equal(daddr, &sk->sk_v6_daddr))
daddr = &sk->sk_v6_daddr;
if (addr_len >= sizeof(struct sockaddr_in6) &&
sin6->sin6_scope_id &&
__ipv6_addr_needs_scope_id(__ipv6_addr_type(daddr)))
fl6.flowi6_oif = sin6->sin6_scope_id;
} else {
if (sk->sk_state != TCP_ESTABLISHED)
return -EDESTADDRREQ;
fl6.fl6_dport = inet->inet_dport;
daddr = &sk->sk_v6_daddr;
fl6.flowlabel = np->flow_label;
connected = 1;
}
if (!fl6.flowi6_oif)
fl6.flowi6_oif = sk->sk_bound_dev_if;
if (!fl6.flowi6_oif)
fl6.flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
fl6.flowi6_mark = sk->sk_mark;
if (msg->msg_controllen) {
opt = &opt_space;
memset(opt, 0, sizeof(struct ipv6_txoptions));
opt->tot_len = sizeof(*opt);
err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
&hlimit, &tclass, &dontfrag);
if (err < 0) {
fl6_sock_release(flowlabel);
return err;
}
if ((fl6.flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) {
flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
if (flowlabel == NULL)
return -EINVAL;
}
if (!(opt->opt_nflen|opt->opt_flen))
opt = NULL;
connected = 0;
}
if (opt == NULL)
opt = np->opt;
if (flowlabel)
opt = fl6_merge_options(&opt_space, flowlabel, opt);
opt = ipv6_fixup_options(&opt_space, opt);
fl6.flowi6_proto = sk->sk_protocol;
if (!ipv6_addr_any(daddr))
fl6.daddr = *daddr;
else
fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr))
fl6.saddr = np->saddr;
fl6.fl6_sport = inet->inet_sport;
final_p = fl6_update_dst(&fl6, opt, &final);
if (final_p)
connected = 0;
if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr)) {
fl6.flowi6_oif = np->mcast_oif;
connected = 0;
} else if (!fl6.flowi6_oif)
fl6.flowi6_oif = np->ucast_oif;
security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
dst = ip6_sk_dst_lookup_flow(sk, &fl6, final_p, true);
if (IS_ERR(dst)) {
err = PTR_ERR(dst);
dst = NULL;
goto out;
}
if (hlimit < 0) {
if (ipv6_addr_is_multicast(&fl6.daddr))
hlimit = np->mcast_hops;
else
hlimit = np->hop_limit;
if (hlimit < 0)
hlimit = ip6_dst_hoplimit(dst);
}
if (tclass < 0)
tclass = np->tclass;
if (msg->msg_flags&MSG_CONFIRM)
goto do_confirm;
back_from_confirm:
lock_sock(sk);
if (unlikely(up->pending)) {
/* The socket is already corked while preparing it. */
/* ... which is an evident application bug. --ANK */
release_sock(sk);
LIMIT_NETDEBUG(KERN_DEBUG "udp cork app bug 2\n");
err = -EINVAL;
goto out;
}
up->pending = AF_INET6;
do_append_data:
if (dontfrag < 0)
dontfrag = np->dontfrag;
up->len += ulen;
getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;
err = ip6_append_data(sk, getfrag, msg->msg_iov, ulen,
sizeof(struct udphdr), hlimit, tclass, opt, &fl6,
(struct rt6_info*)dst,
corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags, dontfrag);
if (err)
udp_v6_flush_pending_frames(sk);
else if (!corkreq)
err = udp_v6_push_pending_frames(sk);
else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
up->pending = 0;
if (dst) {
if (connected) {
ip6_dst_store(sk, dst,
ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr) ?
&sk->sk_v6_daddr : NULL,
#ifdef CONFIG_IPV6_SUBTREES
ipv6_addr_equal(&fl6.saddr, &np->saddr) ?
&np->saddr :
#endif
NULL);
} else {
dst_release(dst);
}
dst = NULL;
}
if (err > 0)
err = np->recverr ? net_xmit_errno(err) : 0;
release_sock(sk);
out:
dst_release(dst);
fl6_sock_release(flowlabel);
if (!err)
return len;
/*
* ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting
* ENOBUFS might not be good (it's not tunable per se), but otherwise
* we don't have a good statistic (IpOutDiscards but it can be too many
* things). We could add another new stat but at least for now that
* seems like overkill.
*/
if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
UDP6_INC_STATS_USER(sock_net(sk),
UDP_MIB_SNDBUFERRORS, is_udplite);
}
return err;
do_confirm:
dst_confirm(dst);
if (!(msg->msg_flags&MSG_PROBE) || len)
goto back_from_confirm;
err = 0;
goto out;
}
void udpv6_destroy_sock(struct sock *sk)
{
struct udp_sock *up = udp_sk(sk);
lock_sock(sk);
udp_v6_flush_pending_frames(sk);
release_sock(sk);
if (static_key_false(&udpv6_encap_needed) && up->encap_type) {
void (*encap_destroy)(struct sock *sk);
encap_destroy = ACCESS_ONCE(up->encap_destroy);
if (encap_destroy)
encap_destroy(sk);
}
inet6_destroy_sock(sk);
}
/*
* Socket option code for UDP
*/
int udpv6_setsockopt(struct sock *sk, int level, int optname,
char __user *optval, unsigned int optlen)
{
if (level == SOL_UDP || level == SOL_UDPLITE)
return udp_lib_setsockopt(sk, level, optname, optval, optlen,
udp_v6_push_pending_frames);
return ipv6_setsockopt(sk, level, optname, optval, optlen);
}
#ifdef CONFIG_COMPAT
int compat_udpv6_setsockopt(struct sock *sk, int level, int optname,
char __user *optval, unsigned int optlen)
{
if (level == SOL_UDP || level == SOL_UDPLITE)
return udp_lib_setsockopt(sk, level, optname, optval, optlen,
udp_v6_push_pending_frames);
return compat_ipv6_setsockopt(sk, level, optname, optval, optlen);
}
#endif
int udpv6_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
if (level == SOL_UDP || level == SOL_UDPLITE)
return udp_lib_getsockopt(sk, level, optname, optval, optlen);
return ipv6_getsockopt(sk, level, optname, optval, optlen);
}
#ifdef CONFIG_COMPAT
int compat_udpv6_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
if (level == SOL_UDP || level == SOL_UDPLITE)
return udp_lib_getsockopt(sk, level, optname, optval, optlen);
return compat_ipv6_getsockopt(sk, level, optname, optval, optlen);
}
#endif
static const struct inet6_protocol udpv6_protocol = {
.handler = udpv6_rcv,
.err_handler = udpv6_err,
.flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
};
/* ------------------------------------------------------------------------ */
#ifdef CONFIG_PROC_FS
int udp6_seq_show(struct seq_file *seq, void *v)
{
if (v == SEQ_START_TOKEN) {
seq_puts(seq, IPV6_SEQ_DGRAM_HEADER);
} else {
int bucket = ((struct udp_iter_state *)seq->private)->bucket;
struct inet_sock *inet = inet_sk(v);
__u16 srcp = ntohs(inet->inet_sport);
__u16 destp = ntohs(inet->inet_dport);
ip6_dgram_sock_seq_show(seq, v, srcp, destp, bucket);
}
return 0;
}
static const struct file_operations udp6_afinfo_seq_fops = {
.owner = THIS_MODULE,
.open = udp_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_net
};
static struct udp_seq_afinfo udp6_seq_afinfo = {
.name = "udp6",
.family = AF_INET6,
.udp_table = &udp_table,
.seq_fops = &udp6_afinfo_seq_fops,
.seq_ops = {
.show = udp6_seq_show,
},
};
int __net_init udp6_proc_init(struct net *net)
{
return udp_proc_register(net, &udp6_seq_afinfo);
}
void udp6_proc_exit(struct net *net) {
udp_proc_unregister(net, &udp6_seq_afinfo);
}
#endif /* CONFIG_PROC_FS */
void udp_v6_clear_sk(struct sock *sk, int size)
{
struct inet_sock *inet = inet_sk(sk);
/* we do not want to clear pinet6 field, because of RCU lookups */
sk_prot_clear_portaddr_nulls(sk, offsetof(struct inet_sock, pinet6));
size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
memset(&inet->pinet6 + 1, 0, size);
}
/* ------------------------------------------------------------------------ */
struct proto udpv6_prot = {
.name = "UDPv6",
.owner = THIS_MODULE,
.close = udp_lib_close,
.connect = ip6_datagram_connect,
.disconnect = udp_disconnect,
.ioctl = udp_ioctl,
.destroy = udpv6_destroy_sock,
.setsockopt = udpv6_setsockopt,
.getsockopt = udpv6_getsockopt,
.sendmsg = udpv6_sendmsg,
.recvmsg = udpv6_recvmsg,
.backlog_rcv = __udpv6_queue_rcv_skb,
.hash = udp_lib_hash,
.unhash = udp_lib_unhash,
.rehash = udp_v6_rehash,
.get_port = udp_v6_get_port,
.memory_allocated = &udp_memory_allocated,
.sysctl_mem = sysctl_udp_mem,
.sysctl_wmem = &sysctl_udp_wmem_min,
.sysctl_rmem = &sysctl_udp_rmem_min,
.obj_size = sizeof(struct udp6_sock),
.slab_flags = SLAB_DESTROY_BY_RCU,
.h.udp_table = &udp_table,
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_udpv6_setsockopt,
.compat_getsockopt = compat_udpv6_getsockopt,
#endif
.clear_sk = udp_v6_clear_sk,
};
static struct inet_protosw udpv6_protosw = {
.type = SOCK_DGRAM,
.protocol = IPPROTO_UDP,
.prot = &udpv6_prot,
.ops = &inet6_dgram_ops,
.no_check = UDP_CSUM_DEFAULT,
.flags = INET_PROTOSW_PERMANENT,
};
int __init udpv6_init(void)
{
int ret;
ret = inet6_add_protocol(&udpv6_protocol, IPPROTO_UDP);
if (ret)
goto out;
ret = inet6_register_protosw(&udpv6_protosw);
if (ret)
goto out_udpv6_protocol;
out:
return ret;
out_udpv6_protocol:
inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP);
goto out;
}
void udpv6_exit(void)
{
inet6_unregister_protosw(&udpv6_protosw);
inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP);
}
| ./CrossVul/dataset_final_sorted/CWE-20/c/good_5844_5 |
crossvul-cpp_data_good_2778_0 | /*
* MXF demuxer.
* Copyright (c) 2006 SmartJog S.A., Baptiste Coudurier <baptiste dot coudurier at smartjog dot com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/*
* References
* SMPTE 336M KLV Data Encoding Protocol Using Key-Length-Value
* SMPTE 377M MXF File Format Specifications
* SMPTE 378M Operational Pattern 1a
* SMPTE 379M MXF Generic Container
* SMPTE 381M Mapping MPEG Streams into the MXF Generic Container
* SMPTE 382M Mapping AES3 and Broadcast Wave Audio into the MXF Generic Container
* SMPTE 383M Mapping DV-DIF Data to the MXF Generic Container
*
* Principle
* Search for Track numbers which will identify essence element KLV packets.
* Search for SourcePackage which define tracks which contains Track numbers.
* Material Package contains tracks with reference to SourcePackage tracks.
* Search for Descriptors (Picture, Sound) which contains codec info and parameters.
* Assign Descriptors to correct Tracks.
*
* Metadata reading functions read Local Tags, get InstanceUID(0x3C0A) then add MetaDataSet to MXFContext.
* Metadata parsing resolves Strong References to objects.
*
* Simple demuxer, only OP1A supported and some files might not work at all.
* Only tracks with associated descriptors will be decoded. "Highly Desirable" SMPTE 377M D.1
*/
#include <inttypes.h>
#include "libavutil/aes.h"
#include "libavutil/avassert.h"
#include "libavutil/mathematics.h"
#include "libavcodec/bytestream.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/parseutils.h"
#include "libavutil/timecode.h"
#include "avformat.h"
#include "internal.h"
#include "mxf.h"
typedef enum {
Header,
BodyPartition,
Footer
} MXFPartitionType;
typedef enum {
OP1a = 1,
OP1b,
OP1c,
OP2a,
OP2b,
OP2c,
OP3a,
OP3b,
OP3c,
OPAtom,
OPSONYOpt, /* FATE sample, violates the spec in places */
} MXFOP;
typedef struct MXFPartition {
int closed;
int complete;
MXFPartitionType type;
uint64_t previous_partition;
int index_sid;
int body_sid;
int64_t this_partition;
int64_t essence_offset; ///< absolute offset of essence
int64_t essence_length;
int32_t kag_size;
int64_t header_byte_count;
int64_t index_byte_count;
int pack_length;
int64_t pack_ofs; ///< absolute offset of pack in file, including run-in
} MXFPartition;
typedef struct MXFCryptoContext {
UID uid;
enum MXFMetadataSetType type;
UID source_container_ul;
} MXFCryptoContext;
typedef struct MXFStructuralComponent {
UID uid;
enum MXFMetadataSetType type;
UID source_package_ul;
UID source_package_uid;
UID data_definition_ul;
int64_t duration;
int64_t start_position;
int source_track_id;
} MXFStructuralComponent;
typedef struct MXFSequence {
UID uid;
enum MXFMetadataSetType type;
UID data_definition_ul;
UID *structural_components_refs;
int structural_components_count;
int64_t duration;
uint8_t origin;
} MXFSequence;
typedef struct MXFTrack {
UID uid;
enum MXFMetadataSetType type;
int drop_frame;
int start_frame;
struct AVRational rate;
AVTimecode tc;
} MXFTimecodeComponent;
typedef struct {
UID uid;
enum MXFMetadataSetType type;
UID input_segment_ref;
} MXFPulldownComponent;
typedef struct {
UID uid;
enum MXFMetadataSetType type;
UID *structural_components_refs;
int structural_components_count;
int64_t duration;
} MXFEssenceGroup;
typedef struct {
UID uid;
enum MXFMetadataSetType type;
char *name;
char *value;
} MXFTaggedValue;
typedef struct {
UID uid;
enum MXFMetadataSetType type;
MXFSequence *sequence; /* mandatory, and only one */
UID sequence_ref;
int track_id;
char *name;
uint8_t track_number[4];
AVRational edit_rate;
int intra_only;
uint64_t sample_count;
int64_t original_duration; /* st->duration in SampleRate/EditRate units */
} MXFTrack;
typedef struct MXFDescriptor {
UID uid;
enum MXFMetadataSetType type;
UID essence_container_ul;
UID essence_codec_ul;
UID codec_ul;
AVRational sample_rate;
AVRational aspect_ratio;
int width;
int height; /* Field height, not frame height */
int frame_layout; /* See MXFFrameLayout enum */
int video_line_map[2];
#define MXF_FIELD_DOMINANCE_DEFAULT 0
#define MXF_FIELD_DOMINANCE_FF 1 /* coded first, displayed first */
#define MXF_FIELD_DOMINANCE_FL 2 /* coded first, displayed last */
int field_dominance;
int channels;
int bits_per_sample;
int64_t duration; /* ContainerDuration optional property */
unsigned int component_depth;
unsigned int horiz_subsampling;
unsigned int vert_subsampling;
UID *sub_descriptors_refs;
int sub_descriptors_count;
int linked_track_id;
uint8_t *extradata;
int extradata_size;
enum AVPixelFormat pix_fmt;
} MXFDescriptor;
typedef struct MXFIndexTableSegment {
UID uid;
enum MXFMetadataSetType type;
int edit_unit_byte_count;
int index_sid;
int body_sid;
AVRational index_edit_rate;
uint64_t index_start_position;
uint64_t index_duration;
int8_t *temporal_offset_entries;
int *flag_entries;
uint64_t *stream_offset_entries;
int nb_index_entries;
} MXFIndexTableSegment;
typedef struct MXFPackage {
UID uid;
enum MXFMetadataSetType type;
UID package_uid;
UID package_ul;
UID *tracks_refs;
int tracks_count;
MXFDescriptor *descriptor; /* only one */
UID descriptor_ref;
char *name;
UID *comment_refs;
int comment_count;
} MXFPackage;
typedef struct MXFMetadataSet {
UID uid;
enum MXFMetadataSetType type;
} MXFMetadataSet;
/* decoded index table */
typedef struct MXFIndexTable {
int index_sid;
int body_sid;
int nb_ptses; /* number of PTSes or total duration of index */
int64_t first_dts; /* DTS = EditUnit + first_dts */
int64_t *ptses; /* maps EditUnit -> PTS */
int nb_segments;
MXFIndexTableSegment **segments; /* sorted by IndexStartPosition */
AVIndexEntry *fake_index; /* used for calling ff_index_search_timestamp() */
int8_t *offsets; /* temporal offsets for display order to stored order conversion */
} MXFIndexTable;
typedef struct MXFContext {
MXFPartition *partitions;
unsigned partitions_count;
MXFOP op;
UID *packages_refs;
int packages_count;
MXFMetadataSet **metadata_sets;
int metadata_sets_count;
AVFormatContext *fc;
struct AVAES *aesc;
uint8_t *local_tags;
int local_tags_count;
uint64_t footer_partition;
KLVPacket current_klv_data;
int current_klv_index;
int run_in;
MXFPartition *current_partition;
int parsing_backward;
int64_t last_forward_tell;
int last_forward_partition;
int current_edit_unit;
int nb_index_tables;
MXFIndexTable *index_tables;
int edit_units_per_packet; ///< how many edit units to read at a time (PCM, OPAtom)
} MXFContext;
enum MXFWrappingScheme {
Frame,
Clip,
};
/* NOTE: klv_offset is not set (-1) for local keys */
typedef int MXFMetadataReadFunc(void *arg, AVIOContext *pb, int tag, int size, UID uid, int64_t klv_offset);
typedef struct MXFMetadataReadTableEntry {
const UID key;
MXFMetadataReadFunc *read;
int ctx_size;
enum MXFMetadataSetType type;
} MXFMetadataReadTableEntry;
static int mxf_read_close(AVFormatContext *s);
/* partial keys to match */
static const uint8_t mxf_header_partition_pack_key[] = { 0x06,0x0e,0x2b,0x34,0x02,0x05,0x01,0x01,0x0d,0x01,0x02,0x01,0x01,0x02 };
static const uint8_t mxf_essence_element_key[] = { 0x06,0x0e,0x2b,0x34,0x01,0x02,0x01,0x01,0x0d,0x01,0x03,0x01 };
static const uint8_t mxf_avid_essence_element_key[] = { 0x06,0x0e,0x2b,0x34,0x01,0x02,0x01,0x01,0x0e,0x04,0x03,0x01 };
static const uint8_t mxf_canopus_essence_element_key[] = { 0x06,0x0e,0x2b,0x34,0x01,0x02,0x01,0x0a,0x0e,0x0f,0x03,0x01 };
static const uint8_t mxf_system_item_key[] = { 0x06,0x0e,0x2b,0x34,0x02,0x05,0x01,0x01,0x0d,0x01,0x03,0x01,0x04 };
static const uint8_t mxf_klv_key[] = { 0x06,0x0e,0x2b,0x34 };
/* complete keys to match */
static const uint8_t mxf_crypto_source_container_ul[] = { 0x06,0x0e,0x2b,0x34,0x01,0x01,0x01,0x09,0x06,0x01,0x01,0x02,0x02,0x00,0x00,0x00 };
static const uint8_t mxf_encrypted_triplet_key[] = { 0x06,0x0e,0x2b,0x34,0x02,0x04,0x01,0x07,0x0d,0x01,0x03,0x01,0x02,0x7e,0x01,0x00 };
static const uint8_t mxf_encrypted_essence_container[] = { 0x06,0x0e,0x2b,0x34,0x04,0x01,0x01,0x07,0x0d,0x01,0x03,0x01,0x02,0x0b,0x01,0x00 };
static const uint8_t mxf_random_index_pack_key[] = { 0x06,0x0e,0x2b,0x34,0x02,0x05,0x01,0x01,0x0d,0x01,0x02,0x01,0x01,0x11,0x01,0x00 };
static const uint8_t mxf_sony_mpeg4_extradata[] = { 0x06,0x0e,0x2b,0x34,0x04,0x01,0x01,0x01,0x0e,0x06,0x06,0x02,0x02,0x01,0x00,0x00 };
static const uint8_t mxf_avid_project_name[] = { 0xa5,0xfb,0x7b,0x25,0xf6,0x15,0x94,0xb9,0x62,0xfc,0x37,0x17,0x49,0x2d,0x42,0xbf };
static const uint8_t mxf_jp2k_rsiz[] = { 0x06,0x0e,0x2b,0x34,0x02,0x05,0x01,0x01,0x0d,0x01,0x02,0x01,0x01,0x02,0x01,0x00 };
static const uint8_t mxf_indirect_value_utf16le[] = { 0x4c,0x00,0x02,0x10,0x01,0x00,0x00,0x00,0x00,0x06,0x0e,0x2b,0x34,0x01,0x04,0x01,0x01 };
static const uint8_t mxf_indirect_value_utf16be[] = { 0x42,0x01,0x10,0x02,0x00,0x00,0x00,0x00,0x00,0x06,0x0e,0x2b,0x34,0x01,0x04,0x01,0x01 };
#define IS_KLV_KEY(x, y) (!memcmp(x, y, sizeof(y)))
static void mxf_free_metadataset(MXFMetadataSet **ctx, int freectx)
{
MXFIndexTableSegment *seg;
switch ((*ctx)->type) {
case Descriptor:
av_freep(&((MXFDescriptor *)*ctx)->extradata);
break;
case MultipleDescriptor:
av_freep(&((MXFDescriptor *)*ctx)->sub_descriptors_refs);
break;
case Sequence:
av_freep(&((MXFSequence *)*ctx)->structural_components_refs);
break;
case EssenceGroup:
av_freep(&((MXFEssenceGroup *)*ctx)->structural_components_refs);
break;
case SourcePackage:
case MaterialPackage:
av_freep(&((MXFPackage *)*ctx)->tracks_refs);
av_freep(&((MXFPackage *)*ctx)->name);
av_freep(&((MXFPackage *)*ctx)->comment_refs);
break;
case TaggedValue:
av_freep(&((MXFTaggedValue *)*ctx)->name);
av_freep(&((MXFTaggedValue *)*ctx)->value);
break;
case Track:
av_freep(&((MXFTrack *)*ctx)->name);
break;
case IndexTableSegment:
seg = (MXFIndexTableSegment *)*ctx;
av_freep(&seg->temporal_offset_entries);
av_freep(&seg->flag_entries);
av_freep(&seg->stream_offset_entries);
default:
break;
}
if (freectx)
av_freep(ctx);
}
static int64_t klv_decode_ber_length(AVIOContext *pb)
{
uint64_t size = avio_r8(pb);
if (size & 0x80) { /* long form */
int bytes_num = size & 0x7f;
/* SMPTE 379M 5.3.4 guarantee that bytes_num must not exceed 8 bytes */
if (bytes_num > 8)
return AVERROR_INVALIDDATA;
size = 0;
while (bytes_num--)
size = size << 8 | avio_r8(pb);
}
return size;
}
static int mxf_read_sync(AVIOContext *pb, const uint8_t *key, unsigned size)
{
int i, b;
for (i = 0; i < size && !avio_feof(pb); i++) {
b = avio_r8(pb);
if (b == key[0])
i = 0;
else if (b != key[i])
i = -1;
}
return i == size;
}
static int klv_read_packet(KLVPacket *klv, AVIOContext *pb)
{
if (!mxf_read_sync(pb, mxf_klv_key, 4))
return AVERROR_INVALIDDATA;
klv->offset = avio_tell(pb) - 4;
memcpy(klv->key, mxf_klv_key, 4);
avio_read(pb, klv->key + 4, 12);
klv->length = klv_decode_ber_length(pb);
return klv->length == -1 ? -1 : 0;
}
static int mxf_get_stream_index(AVFormatContext *s, KLVPacket *klv)
{
int i;
for (i = 0; i < s->nb_streams; i++) {
MXFTrack *track = s->streams[i]->priv_data;
/* SMPTE 379M 7.3 */
if (track && !memcmp(klv->key + sizeof(mxf_essence_element_key), track->track_number, sizeof(track->track_number)))
return i;
}
/* return 0 if only one stream, for OP Atom files with 0 as track number */
return s->nb_streams == 1 ? 0 : -1;
}
/* XXX: use AVBitStreamFilter */
static int mxf_get_d10_aes3_packet(AVIOContext *pb, AVStream *st, AVPacket *pkt, int64_t length)
{
const uint8_t *buf_ptr, *end_ptr;
uint8_t *data_ptr;
int i;
if (length > 61444) /* worst case PAL 1920 samples 8 channels */
return AVERROR_INVALIDDATA;
length = av_get_packet(pb, pkt, length);
if (length < 0)
return length;
data_ptr = pkt->data;
end_ptr = pkt->data + length;
buf_ptr = pkt->data + 4; /* skip SMPTE 331M header */
for (; end_ptr - buf_ptr >= st->codecpar->channels * 4; ) {
for (i = 0; i < st->codecpar->channels; i++) {
uint32_t sample = bytestream_get_le32(&buf_ptr);
if (st->codecpar->bits_per_coded_sample == 24)
bytestream_put_le24(&data_ptr, (sample >> 4) & 0xffffff);
else
bytestream_put_le16(&data_ptr, (sample >> 12) & 0xffff);
}
buf_ptr += 32 - st->codecpar->channels*4; // always 8 channels stored SMPTE 331M
}
av_shrink_packet(pkt, data_ptr - pkt->data);
return 0;
}
static int mxf_decrypt_triplet(AVFormatContext *s, AVPacket *pkt, KLVPacket *klv)
{
static const uint8_t checkv[16] = {0x43, 0x48, 0x55, 0x4b, 0x43, 0x48, 0x55, 0x4b, 0x43, 0x48, 0x55, 0x4b, 0x43, 0x48, 0x55, 0x4b};
MXFContext *mxf = s->priv_data;
AVIOContext *pb = s->pb;
int64_t end = avio_tell(pb) + klv->length;
int64_t size;
uint64_t orig_size;
uint64_t plaintext_size;
uint8_t ivec[16];
uint8_t tmpbuf[16];
int index;
if (!mxf->aesc && s->key && s->keylen == 16) {
mxf->aesc = av_aes_alloc();
if (!mxf->aesc)
return AVERROR(ENOMEM);
av_aes_init(mxf->aesc, s->key, 128, 1);
}
// crypto context
avio_skip(pb, klv_decode_ber_length(pb));
// plaintext offset
klv_decode_ber_length(pb);
plaintext_size = avio_rb64(pb);
// source klv key
klv_decode_ber_length(pb);
avio_read(pb, klv->key, 16);
if (!IS_KLV_KEY(klv, mxf_essence_element_key))
return AVERROR_INVALIDDATA;
index = mxf_get_stream_index(s, klv);
if (index < 0)
return AVERROR_INVALIDDATA;
// source size
klv_decode_ber_length(pb);
orig_size = avio_rb64(pb);
if (orig_size < plaintext_size)
return AVERROR_INVALIDDATA;
// enc. code
size = klv_decode_ber_length(pb);
if (size < 32 || size - 32 < orig_size)
return AVERROR_INVALIDDATA;
avio_read(pb, ivec, 16);
avio_read(pb, tmpbuf, 16);
if (mxf->aesc)
av_aes_crypt(mxf->aesc, tmpbuf, tmpbuf, 1, ivec, 1);
if (memcmp(tmpbuf, checkv, 16))
av_log(s, AV_LOG_ERROR, "probably incorrect decryption key\n");
size -= 32;
size = av_get_packet(pb, pkt, size);
if (size < 0)
return size;
else if (size < plaintext_size)
return AVERROR_INVALIDDATA;
size -= plaintext_size;
if (mxf->aesc)
av_aes_crypt(mxf->aesc, &pkt->data[plaintext_size],
&pkt->data[plaintext_size], size >> 4, ivec, 1);
av_shrink_packet(pkt, orig_size);
pkt->stream_index = index;
avio_skip(pb, end - avio_tell(pb));
return 0;
}
static int mxf_read_primer_pack(void *arg, AVIOContext *pb, int tag, int size, UID uid, int64_t klv_offset)
{
MXFContext *mxf = arg;
int item_num = avio_rb32(pb);
int item_len = avio_rb32(pb);
if (item_len != 18) {
avpriv_request_sample(pb, "Primer pack item length %d", item_len);
return AVERROR_PATCHWELCOME;
}
if (item_num > 65536 || item_num < 0) {
av_log(mxf->fc, AV_LOG_ERROR, "item_num %d is too large\n", item_num);
return AVERROR_INVALIDDATA;
}
if (mxf->local_tags)
av_log(mxf->fc, AV_LOG_VERBOSE, "Multiple primer packs\n");
av_free(mxf->local_tags);
mxf->local_tags_count = 0;
mxf->local_tags = av_calloc(item_num, item_len);
if (!mxf->local_tags)
return AVERROR(ENOMEM);
mxf->local_tags_count = item_num;
avio_read(pb, mxf->local_tags, item_num*item_len);
return 0;
}
static int mxf_read_partition_pack(void *arg, AVIOContext *pb, int tag, int size, UID uid, int64_t klv_offset)
{
MXFContext *mxf = arg;
MXFPartition *partition, *tmp_part;
UID op;
uint64_t footer_partition;
uint32_t nb_essence_containers;
tmp_part = av_realloc_array(mxf->partitions, mxf->partitions_count + 1, sizeof(*mxf->partitions));
if (!tmp_part)
return AVERROR(ENOMEM);
mxf->partitions = tmp_part;
if (mxf->parsing_backward) {
/* insert the new partition pack in the middle
* this makes the entries in mxf->partitions sorted by offset */
memmove(&mxf->partitions[mxf->last_forward_partition+1],
&mxf->partitions[mxf->last_forward_partition],
(mxf->partitions_count - mxf->last_forward_partition)*sizeof(*mxf->partitions));
partition = mxf->current_partition = &mxf->partitions[mxf->last_forward_partition];
} else {
mxf->last_forward_partition++;
partition = mxf->current_partition = &mxf->partitions[mxf->partitions_count];
}
memset(partition, 0, sizeof(*partition));
mxf->partitions_count++;
partition->pack_length = avio_tell(pb) - klv_offset + size;
partition->pack_ofs = klv_offset;
switch(uid[13]) {
case 2:
partition->type = Header;
break;
case 3:
partition->type = BodyPartition;
break;
case 4:
partition->type = Footer;
break;
default:
av_log(mxf->fc, AV_LOG_ERROR, "unknown partition type %i\n", uid[13]);
return AVERROR_INVALIDDATA;
}
/* consider both footers to be closed (there is only Footer and CompleteFooter) */
partition->closed = partition->type == Footer || !(uid[14] & 1);
partition->complete = uid[14] > 2;
avio_skip(pb, 4);
partition->kag_size = avio_rb32(pb);
partition->this_partition = avio_rb64(pb);
partition->previous_partition = avio_rb64(pb);
footer_partition = avio_rb64(pb);
partition->header_byte_count = avio_rb64(pb);
partition->index_byte_count = avio_rb64(pb);
partition->index_sid = avio_rb32(pb);
avio_skip(pb, 8);
partition->body_sid = avio_rb32(pb);
if (avio_read(pb, op, sizeof(UID)) != sizeof(UID)) {
av_log(mxf->fc, AV_LOG_ERROR, "Failed reading UID\n");
return AVERROR_INVALIDDATA;
}
nb_essence_containers = avio_rb32(pb);
if (partition->this_partition &&
partition->previous_partition == partition->this_partition) {
av_log(mxf->fc, AV_LOG_ERROR,
"PreviousPartition equal to ThisPartition %"PRIx64"\n",
partition->previous_partition);
/* override with the actual previous partition offset */
if (!mxf->parsing_backward && mxf->last_forward_partition > 1) {
MXFPartition *prev =
mxf->partitions + mxf->last_forward_partition - 2;
partition->previous_partition = prev->this_partition;
}
/* if no previous body partition are found point to the header
* partition */
if (partition->previous_partition == partition->this_partition)
partition->previous_partition = 0;
av_log(mxf->fc, AV_LOG_ERROR,
"Overriding PreviousPartition with %"PRIx64"\n",
partition->previous_partition);
}
/* some files don't have FooterPartition set in every partition */
if (footer_partition) {
if (mxf->footer_partition && mxf->footer_partition != footer_partition) {
av_log(mxf->fc, AV_LOG_ERROR,
"inconsistent FooterPartition value: %"PRIu64" != %"PRIu64"\n",
mxf->footer_partition, footer_partition);
} else {
mxf->footer_partition = footer_partition;
}
}
av_log(mxf->fc, AV_LOG_TRACE,
"PartitionPack: ThisPartition = 0x%"PRIX64
", PreviousPartition = 0x%"PRIX64", "
"FooterPartition = 0x%"PRIX64", IndexSID = %i, BodySID = %i\n",
partition->this_partition,
partition->previous_partition, footer_partition,
partition->index_sid, partition->body_sid);
/* sanity check PreviousPartition if set */
//NOTE: this isn't actually enough, see mxf_seek_to_previous_partition()
if (partition->previous_partition &&
mxf->run_in + partition->previous_partition >= klv_offset) {
av_log(mxf->fc, AV_LOG_ERROR,
"PreviousPartition points to this partition or forward\n");
return AVERROR_INVALIDDATA;
}
if (op[12] == 1 && op[13] == 1) mxf->op = OP1a;
else if (op[12] == 1 && op[13] == 2) mxf->op = OP1b;
else if (op[12] == 1 && op[13] == 3) mxf->op = OP1c;
else if (op[12] == 2 && op[13] == 1) mxf->op = OP2a;
else if (op[12] == 2 && op[13] == 2) mxf->op = OP2b;
else if (op[12] == 2 && op[13] == 3) mxf->op = OP2c;
else if (op[12] == 3 && op[13] == 1) mxf->op = OP3a;
else if (op[12] == 3 && op[13] == 2) mxf->op = OP3b;
else if (op[12] == 3 && op[13] == 3) mxf->op = OP3c;
else if (op[12] == 64&& op[13] == 1) mxf->op = OPSONYOpt;
else if (op[12] == 0x10) {
/* SMPTE 390m: "There shall be exactly one essence container"
* The following block deals with files that violate this, namely:
* 2011_DCPTEST_24FPS.V.mxf - two ECs, OP1a
* abcdefghiv016f56415e.mxf - zero ECs, OPAtom, output by Avid AirSpeed */
if (nb_essence_containers != 1) {
MXFOP op = nb_essence_containers ? OP1a : OPAtom;
/* only nag once */
if (!mxf->op)
av_log(mxf->fc, AV_LOG_WARNING,
"\"OPAtom\" with %"PRIu32" ECs - assuming %s\n",
nb_essence_containers,
op == OP1a ? "OP1a" : "OPAtom");
mxf->op = op;
} else
mxf->op = OPAtom;
} else {
av_log(mxf->fc, AV_LOG_ERROR, "unknown operational pattern: %02xh %02xh - guessing OP1a\n", op[12], op[13]);
mxf->op = OP1a;
}
if (partition->kag_size <= 0 || partition->kag_size > (1 << 20)) {
av_log(mxf->fc, AV_LOG_WARNING, "invalid KAGSize %"PRId32" - guessing ",
partition->kag_size);
if (mxf->op == OPSONYOpt)
partition->kag_size = 512;
else
partition->kag_size = 1;
av_log(mxf->fc, AV_LOG_WARNING, "%"PRId32"\n", partition->kag_size);
}
return 0;
}
static int mxf_add_metadata_set(MXFContext *mxf, void *metadata_set)
{
MXFMetadataSet **tmp;
tmp = av_realloc_array(mxf->metadata_sets, mxf->metadata_sets_count + 1, sizeof(*mxf->metadata_sets));
if (!tmp)
return AVERROR(ENOMEM);
mxf->metadata_sets = tmp;
mxf->metadata_sets[mxf->metadata_sets_count] = metadata_set;
mxf->metadata_sets_count++;
return 0;
}
static int mxf_read_cryptographic_context(void *arg, AVIOContext *pb, int tag, int size, UID uid, int64_t klv_offset)
{
MXFCryptoContext *cryptocontext = arg;
if (size != 16)
return AVERROR_INVALIDDATA;
if (IS_KLV_KEY(uid, mxf_crypto_source_container_ul))
avio_read(pb, cryptocontext->source_container_ul, 16);
return 0;
}
static int mxf_read_strong_ref_array(AVIOContext *pb, UID **refs, int *count)
{
*count = avio_rb32(pb);
*refs = av_calloc(*count, sizeof(UID));
if (!*refs) {
*count = 0;
return AVERROR(ENOMEM);
}
avio_skip(pb, 4); /* useless size of objects, always 16 according to specs */
avio_read(pb, (uint8_t *)*refs, *count * sizeof(UID));
return 0;
}
static inline int mxf_read_utf16_string(AVIOContext *pb, int size, char** str, int be)
{
int ret;
size_t buf_size;
if (size < 0 || size > INT_MAX/2)
return AVERROR(EINVAL);
buf_size = size + size / 2 + 1;
*str = av_malloc(buf_size);
if (!*str)
return AVERROR(ENOMEM);
if (be)
ret = avio_get_str16be(pb, size, *str, buf_size);
else
ret = avio_get_str16le(pb, size, *str, buf_size);
if (ret < 0) {
av_freep(str);
return ret;
}
return ret;
}
#define READ_STR16(type, big_endian) \
static int mxf_read_utf16 ## type ##_string(AVIOContext *pb, int size, char** str) \
{ \
return mxf_read_utf16_string(pb, size, str, big_endian); \
}
READ_STR16(be, 1)
READ_STR16(le, 0)
#undef READ_STR16
static int mxf_read_content_storage(void *arg, AVIOContext *pb, int tag, int size, UID uid, int64_t klv_offset)
{
MXFContext *mxf = arg;
switch (tag) {
case 0x1901:
if (mxf->packages_refs)
av_log(mxf->fc, AV_LOG_VERBOSE, "Multiple packages_refs\n");
av_free(mxf->packages_refs);
return mxf_read_strong_ref_array(pb, &mxf->packages_refs, &mxf->packages_count);
}
return 0;
}
static int mxf_read_source_clip(void *arg, AVIOContext *pb, int tag, int size, UID uid, int64_t klv_offset)
{
MXFStructuralComponent *source_clip = arg;
switch(tag) {
case 0x0202:
source_clip->duration = avio_rb64(pb);
break;
case 0x1201:
source_clip->start_position = avio_rb64(pb);
break;
case 0x1101:
/* UMID, only get last 16 bytes */
avio_read(pb, source_clip->source_package_ul, 16);
avio_read(pb, source_clip->source_package_uid, 16);
break;
case 0x1102:
source_clip->source_track_id = avio_rb32(pb);
break;
}
return 0;
}
static int mxf_read_timecode_component(void *arg, AVIOContext *pb, int tag, int size, UID uid, int64_t klv_offset)
{
MXFTimecodeComponent *mxf_timecode = arg;
switch(tag) {
case 0x1501:
mxf_timecode->start_frame = avio_rb64(pb);
break;
case 0x1502:
mxf_timecode->rate = (AVRational){avio_rb16(pb), 1};
break;
case 0x1503:
mxf_timecode->drop_frame = avio_r8(pb);
break;
}
return 0;
}
static int mxf_read_pulldown_component(void *arg, AVIOContext *pb, int tag, int size, UID uid, int64_t klv_offset)
{
MXFPulldownComponent *mxf_pulldown = arg;
switch(tag) {
case 0x0d01:
avio_read(pb, mxf_pulldown->input_segment_ref, 16);
break;
}
return 0;
}
static int mxf_read_track(void *arg, AVIOContext *pb, int tag, int size, UID uid, int64_t klv_offset)
{
MXFTrack *track = arg;
switch(tag) {
case 0x4801:
track->track_id = avio_rb32(pb);
break;
case 0x4804:
avio_read(pb, track->track_number, 4);
break;
case 0x4802:
mxf_read_utf16be_string(pb, size, &track->name);
break;
case 0x4b01:
track->edit_rate.num = avio_rb32(pb);
track->edit_rate.den = avio_rb32(pb);
break;
case 0x4803:
avio_read(pb, track->sequence_ref, 16);
break;
}
return 0;
}
static int mxf_read_sequence(void *arg, AVIOContext *pb, int tag, int size, UID uid, int64_t klv_offset)
{
MXFSequence *sequence = arg;
switch(tag) {
case 0x0202:
sequence->duration = avio_rb64(pb);
break;
case 0x0201:
avio_read(pb, sequence->data_definition_ul, 16);
break;
case 0x4b02:
sequence->origin = avio_r8(pb);
break;
case 0x1001:
return mxf_read_strong_ref_array(pb, &sequence->structural_components_refs,
&sequence->structural_components_count);
}
return 0;
}
static int mxf_read_essence_group(void *arg, AVIOContext *pb, int tag, int size, UID uid, int64_t klv_offset)
{
MXFEssenceGroup *essence_group = arg;
switch (tag) {
case 0x0202:
essence_group->duration = avio_rb64(pb);
break;
case 0x0501:
return mxf_read_strong_ref_array(pb, &essence_group->structural_components_refs,
&essence_group->structural_components_count);
}
return 0;
}
static int mxf_read_package(void *arg, AVIOContext *pb, int tag, int size, UID uid, int64_t klv_offset)
{
MXFPackage *package = arg;
switch(tag) {
case 0x4403:
return mxf_read_strong_ref_array(pb, &package->tracks_refs,
&package->tracks_count);
case 0x4401:
/* UMID */
avio_read(pb, package->package_ul, 16);
avio_read(pb, package->package_uid, 16);
break;
case 0x4701:
avio_read(pb, package->descriptor_ref, 16);
break;
case 0x4402:
return mxf_read_utf16be_string(pb, size, &package->name);
case 0x4406:
return mxf_read_strong_ref_array(pb, &package->comment_refs,
&package->comment_count);
}
return 0;
}
static int mxf_read_index_entry_array(AVIOContext *pb, MXFIndexTableSegment *segment)
{
int i, length;
segment->nb_index_entries = avio_rb32(pb);
length = avio_rb32(pb);
if(segment->nb_index_entries && length < 11)
return AVERROR_INVALIDDATA;
if (!(segment->temporal_offset_entries=av_calloc(segment->nb_index_entries, sizeof(*segment->temporal_offset_entries))) ||
!(segment->flag_entries = av_calloc(segment->nb_index_entries, sizeof(*segment->flag_entries))) ||
!(segment->stream_offset_entries = av_calloc(segment->nb_index_entries, sizeof(*segment->stream_offset_entries)))) {
av_freep(&segment->temporal_offset_entries);
av_freep(&segment->flag_entries);
return AVERROR(ENOMEM);
}
for (i = 0; i < segment->nb_index_entries; i++) {
if(avio_feof(pb))
return AVERROR_INVALIDDATA;
segment->temporal_offset_entries[i] = avio_r8(pb);
avio_r8(pb); /* KeyFrameOffset */
segment->flag_entries[i] = avio_r8(pb);
segment->stream_offset_entries[i] = avio_rb64(pb);
avio_skip(pb, length - 11);
}
return 0;
}
static int mxf_read_index_table_segment(void *arg, AVIOContext *pb, int tag, int size, UID uid, int64_t klv_offset)
{
MXFIndexTableSegment *segment = arg;
switch(tag) {
case 0x3F05:
segment->edit_unit_byte_count = avio_rb32(pb);
av_log(NULL, AV_LOG_TRACE, "EditUnitByteCount %d\n", segment->edit_unit_byte_count);
break;
case 0x3F06:
segment->index_sid = avio_rb32(pb);
av_log(NULL, AV_LOG_TRACE, "IndexSID %d\n", segment->index_sid);
break;
case 0x3F07:
segment->body_sid = avio_rb32(pb);
av_log(NULL, AV_LOG_TRACE, "BodySID %d\n", segment->body_sid);
break;
case 0x3F0A:
av_log(NULL, AV_LOG_TRACE, "IndexEntryArray found\n");
return mxf_read_index_entry_array(pb, segment);
case 0x3F0B:
segment->index_edit_rate.num = avio_rb32(pb);
segment->index_edit_rate.den = avio_rb32(pb);
av_log(NULL, AV_LOG_TRACE, "IndexEditRate %d/%d\n", segment->index_edit_rate.num,
segment->index_edit_rate.den);
break;
case 0x3F0C:
segment->index_start_position = avio_rb64(pb);
av_log(NULL, AV_LOG_TRACE, "IndexStartPosition %"PRId64"\n", segment->index_start_position);
break;
case 0x3F0D:
segment->index_duration = avio_rb64(pb);
av_log(NULL, AV_LOG_TRACE, "IndexDuration %"PRId64"\n", segment->index_duration);
break;
}
return 0;
}
static void mxf_read_pixel_layout(AVIOContext *pb, MXFDescriptor *descriptor)
{
int code, value, ofs = 0;
char layout[16] = {0}; /* not for printing, may end up not terminated on purpose */
do {
code = avio_r8(pb);
value = avio_r8(pb);
av_log(NULL, AV_LOG_TRACE, "pixel layout: code %#x\n", code);
if (ofs <= 14) {
layout[ofs++] = code;
layout[ofs++] = value;
} else
break; /* don't read byte by byte on sneaky files filled with lots of non-zeroes */
} while (code != 0); /* SMPTE 377M E.2.46 */
ff_mxf_decode_pixel_layout(layout, &descriptor->pix_fmt);
}
static int mxf_read_generic_descriptor(void *arg, AVIOContext *pb, int tag, int size, UID uid, int64_t klv_offset)
{
MXFDescriptor *descriptor = arg;
int entry_count, entry_size;
switch(tag) {
case 0x3F01:
return mxf_read_strong_ref_array(pb, &descriptor->sub_descriptors_refs,
&descriptor->sub_descriptors_count);
case 0x3002: /* ContainerDuration */
descriptor->duration = avio_rb64(pb);
break;
case 0x3004:
avio_read(pb, descriptor->essence_container_ul, 16);
break;
case 0x3005:
avio_read(pb, descriptor->codec_ul, 16);
break;
case 0x3006:
descriptor->linked_track_id = avio_rb32(pb);
break;
case 0x3201: /* PictureEssenceCoding */
avio_read(pb, descriptor->essence_codec_ul, 16);
break;
case 0x3203:
descriptor->width = avio_rb32(pb);
break;
case 0x3202:
descriptor->height = avio_rb32(pb);
break;
case 0x320C:
descriptor->frame_layout = avio_r8(pb);
break;
case 0x320D:
entry_count = avio_rb32(pb);
entry_size = avio_rb32(pb);
if (entry_size == 4) {
if (entry_count > 0)
descriptor->video_line_map[0] = avio_rb32(pb);
else
descriptor->video_line_map[0] = 0;
if (entry_count > 1)
descriptor->video_line_map[1] = avio_rb32(pb);
else
descriptor->video_line_map[1] = 0;
} else
av_log(NULL, AV_LOG_WARNING, "VideoLineMap element size %d currently not supported\n", entry_size);
break;
case 0x320E:
descriptor->aspect_ratio.num = avio_rb32(pb);
descriptor->aspect_ratio.den = avio_rb32(pb);
break;
case 0x3212:
descriptor->field_dominance = avio_r8(pb);
break;
case 0x3301:
descriptor->component_depth = avio_rb32(pb);
break;
case 0x3302:
descriptor->horiz_subsampling = avio_rb32(pb);
break;
case 0x3308:
descriptor->vert_subsampling = avio_rb32(pb);
break;
case 0x3D03:
descriptor->sample_rate.num = avio_rb32(pb);
descriptor->sample_rate.den = avio_rb32(pb);
break;
case 0x3D06: /* SoundEssenceCompression */
avio_read(pb, descriptor->essence_codec_ul, 16);
break;
case 0x3D07:
descriptor->channels = avio_rb32(pb);
break;
case 0x3D01:
descriptor->bits_per_sample = avio_rb32(pb);
break;
case 0x3401:
mxf_read_pixel_layout(pb, descriptor);
break;
default:
/* Private uid used by SONY C0023S01.mxf */
if (IS_KLV_KEY(uid, mxf_sony_mpeg4_extradata)) {
if (descriptor->extradata)
av_log(NULL, AV_LOG_WARNING, "Duplicate sony_mpeg4_extradata\n");
av_free(descriptor->extradata);
descriptor->extradata_size = 0;
descriptor->extradata = av_malloc(size);
if (!descriptor->extradata)
return AVERROR(ENOMEM);
descriptor->extradata_size = size;
avio_read(pb, descriptor->extradata, size);
}
if (IS_KLV_KEY(uid, mxf_jp2k_rsiz)) {
uint32_t rsiz = avio_rb16(pb);
if (rsiz == FF_PROFILE_JPEG2000_DCINEMA_2K ||
rsiz == FF_PROFILE_JPEG2000_DCINEMA_4K)
descriptor->pix_fmt = AV_PIX_FMT_XYZ12;
}
break;
}
return 0;
}
static int mxf_read_indirect_value(void *arg, AVIOContext *pb, int size)
{
MXFTaggedValue *tagged_value = arg;
uint8_t key[17];
if (size <= 17)
return 0;
avio_read(pb, key, 17);
/* TODO: handle other types of of indirect values */
if (memcmp(key, mxf_indirect_value_utf16le, 17) == 0) {
return mxf_read_utf16le_string(pb, size - 17, &tagged_value->value);
} else if (memcmp(key, mxf_indirect_value_utf16be, 17) == 0) {
return mxf_read_utf16be_string(pb, size - 17, &tagged_value->value);
}
return 0;
}
static int mxf_read_tagged_value(void *arg, AVIOContext *pb, int tag, int size, UID uid, int64_t klv_offset)
{
MXFTaggedValue *tagged_value = arg;
switch (tag){
case 0x5001:
return mxf_read_utf16be_string(pb, size, &tagged_value->name);
case 0x5003:
return mxf_read_indirect_value(tagged_value, pb, size);
}
return 0;
}
/*
* Match an uid independently of the version byte and up to len common bytes
* Returns: boolean
*/
static int mxf_match_uid(const UID key, const UID uid, int len)
{
int i;
for (i = 0; i < len; i++) {
if (i != 7 && key[i] != uid[i])
return 0;
}
return 1;
}
static const MXFCodecUL *mxf_get_codec_ul(const MXFCodecUL *uls, UID *uid)
{
while (uls->uid[0]) {
if(mxf_match_uid(uls->uid, *uid, uls->matching_len))
break;
uls++;
}
return uls;
}
static void *mxf_resolve_strong_ref(MXFContext *mxf, UID *strong_ref, enum MXFMetadataSetType type)
{
int i;
if (!strong_ref)
return NULL;
for (i = 0; i < mxf->metadata_sets_count; i++) {
if (!memcmp(*strong_ref, mxf->metadata_sets[i]->uid, 16) &&
(type == AnyType || mxf->metadata_sets[i]->type == type)) {
return mxf->metadata_sets[i];
}
}
return NULL;
}
static const MXFCodecUL mxf_picture_essence_container_uls[] = {
// video essence container uls
{ { 0x06,0x0e,0x2b,0x34,0x04,0x01,0x01,0x07,0x0d,0x01,0x03,0x01,0x02,0x0c,0x01,0x00 }, 14, AV_CODEC_ID_JPEG2000 },
{ { 0x06,0x0e,0x2b,0x34,0x04,0x01,0x01,0x02,0x0d,0x01,0x03,0x01,0x02,0x10,0x60,0x01 }, 14, AV_CODEC_ID_H264 }, /* H.264 frame wrapped */
{ { 0x06,0x0e,0x2b,0x34,0x04,0x01,0x01,0x02,0x0d,0x01,0x03,0x01,0x02,0x12,0x01,0x00 }, 14, AV_CODEC_ID_VC1 }, /* VC-1 frame wrapped */
{ { 0x06,0x0e,0x2b,0x34,0x04,0x01,0x01,0x02,0x0d,0x01,0x03,0x01,0x02,0x04,0x60,0x01 }, 14, AV_CODEC_ID_MPEG2VIDEO }, /* MPEG-ES frame wrapped */
{ { 0x06,0x0e,0x2b,0x34,0x04,0x01,0x01,0x01,0x0d,0x01,0x03,0x01,0x02,0x01,0x04,0x01 }, 14, AV_CODEC_ID_MPEG2VIDEO }, /* Type D-10 mapping of 40Mbps 525/60-I */
{ { 0x06,0x0e,0x2b,0x34,0x04,0x01,0x01,0x01,0x0d,0x01,0x03,0x01,0x02,0x02,0x41,0x01 }, 14, AV_CODEC_ID_DVVIDEO }, /* DV 625 25mbps */
{ { 0x06,0x0e,0x2b,0x34,0x04,0x01,0x01,0x01,0x0d,0x01,0x03,0x01,0x02,0x05,0x00,0x00 }, 14, AV_CODEC_ID_RAWVIDEO }, /* uncompressed picture */
{ { 0x06,0x0e,0x2b,0x34,0x04,0x01,0x01,0x0a,0x0e,0x0f,0x03,0x01,0x02,0x20,0x01,0x01 }, 15, AV_CODEC_ID_HQ_HQA },
{ { 0x06,0x0e,0x2b,0x34,0x04,0x01,0x01,0x0a,0x0e,0x0f,0x03,0x01,0x02,0x20,0x02,0x01 }, 15, AV_CODEC_ID_HQX },
{ { 0x06,0x0e,0x2b,0x34,0x01,0x01,0x01,0xff,0x4b,0x46,0x41,0x41,0x00,0x0d,0x4d,0x4f }, 14, AV_CODEC_ID_RAWVIDEO }, /* Legacy ?? Uncompressed Picture */
{ { 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 }, 0, AV_CODEC_ID_NONE },
};
/* EC ULs for intra-only formats */
static const MXFCodecUL mxf_intra_only_essence_container_uls[] = {
{ { 0x06,0x0e,0x2b,0x34,0x04,0x01,0x01,0x01,0x0d,0x01,0x03,0x01,0x02,0x01,0x00,0x00 }, 14, AV_CODEC_ID_MPEG2VIDEO }, /* MXF-GC SMPTE D-10 mappings */
{ { 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 }, 0, AV_CODEC_ID_NONE },
};
/* intra-only PictureEssenceCoding ULs, where no corresponding EC UL exists */
static const MXFCodecUL mxf_intra_only_picture_essence_coding_uls[] = {
{ { 0x06,0x0e,0x2b,0x34,0x04,0x01,0x01,0x0A,0x04,0x01,0x02,0x02,0x01,0x32,0x00,0x00 }, 14, AV_CODEC_ID_H264 }, /* H.264/MPEG-4 AVC Intra Profiles */
{ { 0x06,0x0e,0x2b,0x34,0x04,0x01,0x01,0x07,0x04,0x01,0x02,0x02,0x03,0x01,0x01,0x00 }, 14, AV_CODEC_ID_JPEG2000 }, /* JPEG 2000 code stream */
{ { 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 }, 0, AV_CODEC_ID_NONE },
};
/* actual coded width for AVC-Intra to allow selecting correct SPS/PPS */
static const MXFCodecUL mxf_intra_only_picture_coded_width[] = {
{ { 0x06,0x0e,0x2b,0x34,0x04,0x01,0x01,0x0A,0x04,0x01,0x02,0x02,0x01,0x32,0x21,0x01 }, 16, 1440 },
{ { 0x06,0x0e,0x2b,0x34,0x04,0x01,0x01,0x0A,0x04,0x01,0x02,0x02,0x01,0x32,0x21,0x02 }, 16, 1440 },
{ { 0x06,0x0e,0x2b,0x34,0x04,0x01,0x01,0x0A,0x04,0x01,0x02,0x02,0x01,0x32,0x21,0x03 }, 16, 1440 },
{ { 0x06,0x0e,0x2b,0x34,0x04,0x01,0x01,0x0A,0x04,0x01,0x02,0x02,0x01,0x32,0x21,0x04 }, 16, 1440 },
{ { 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 }, 0, 0 },
};
static const MXFCodecUL mxf_sound_essence_container_uls[] = {
// sound essence container uls
{ { 0x06,0x0e,0x2b,0x34,0x04,0x01,0x01,0x01,0x0d,0x01,0x03,0x01,0x02,0x06,0x01,0x00 }, 14, AV_CODEC_ID_PCM_S16LE }, /* BWF Frame wrapped */
{ { 0x06,0x0e,0x2b,0x34,0x04,0x01,0x01,0x02,0x0d,0x01,0x03,0x01,0x02,0x04,0x40,0x01 }, 14, AV_CODEC_ID_MP2 }, /* MPEG-ES Frame wrapped, 0x40 ??? stream id */
{ { 0x06,0x0e,0x2b,0x34,0x04,0x01,0x01,0x01,0x0d,0x01,0x03,0x01,0x02,0x01,0x01,0x01 }, 14, AV_CODEC_ID_PCM_S16LE }, /* D-10 Mapping 50Mbps PAL Extended Template */
{ { 0x06,0x0e,0x2b,0x34,0x01,0x01,0x01,0xff,0x4b,0x46,0x41,0x41,0x00,0x0d,0x4d,0x4F }, 14, AV_CODEC_ID_PCM_S16LE }, /* 0001GL00.MXF.A1.mxf_opatom.mxf */
{ { 0x06,0x0e,0x2b,0x34,0x04,0x01,0x01,0x03,0x04,0x02,0x02,0x02,0x03,0x03,0x01,0x00 }, 14, AV_CODEC_ID_AAC }, /* MPEG-2 AAC ADTS (legacy) */
{ { 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 }, 0, AV_CODEC_ID_NONE },
};
static const MXFCodecUL mxf_data_essence_container_uls[] = {
{ { 0x06,0x0e,0x2b,0x34,0x04,0x01,0x01,0x09,0x0d,0x01,0x03,0x01,0x02,0x0e,0x00,0x00 }, 16, 0 },
{ { 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 }, 0, AV_CODEC_ID_NONE },
};
static const char* const mxf_data_essence_descriptor[] = {
"vbi_vanc_smpte_436M",
};
static int mxf_get_sorted_table_segments(MXFContext *mxf, int *nb_sorted_segments, MXFIndexTableSegment ***sorted_segments)
{
int i, j, nb_segments = 0;
MXFIndexTableSegment **unsorted_segments;
int last_body_sid = -1, last_index_sid = -1, last_index_start = -1;
/* count number of segments, allocate arrays and copy unsorted segments */
for (i = 0; i < mxf->metadata_sets_count; i++)
if (mxf->metadata_sets[i]->type == IndexTableSegment)
nb_segments++;
if (!nb_segments)
return AVERROR_INVALIDDATA;
if (!(unsorted_segments = av_calloc(nb_segments, sizeof(*unsorted_segments))) ||
!(*sorted_segments = av_calloc(nb_segments, sizeof(**sorted_segments)))) {
av_freep(sorted_segments);
av_free(unsorted_segments);
return AVERROR(ENOMEM);
}
for (i = j = 0; i < mxf->metadata_sets_count; i++)
if (mxf->metadata_sets[i]->type == IndexTableSegment)
unsorted_segments[j++] = (MXFIndexTableSegment*)mxf->metadata_sets[i];
*nb_sorted_segments = 0;
/* sort segments by {BodySID, IndexSID, IndexStartPosition}, remove duplicates while we're at it */
for (i = 0; i < nb_segments; i++) {
int best = -1, best_body_sid = -1, best_index_sid = -1, best_index_start = -1;
uint64_t best_index_duration = 0;
for (j = 0; j < nb_segments; j++) {
MXFIndexTableSegment *s = unsorted_segments[j];
/* Require larger BosySID, IndexSID or IndexStartPosition then the previous entry. This removes duplicates.
* We want the smallest values for the keys than what we currently have, unless this is the first such entry this time around.
* If we come across an entry with the same IndexStartPosition but larger IndexDuration, then we'll prefer it over the one we currently have.
*/
if ((i == 0 || s->body_sid > last_body_sid || s->index_sid > last_index_sid || s->index_start_position > last_index_start) &&
(best == -1 || s->body_sid < best_body_sid || s->index_sid < best_index_sid || s->index_start_position < best_index_start ||
(s->index_start_position == best_index_start && s->index_duration > best_index_duration))) {
best = j;
best_body_sid = s->body_sid;
best_index_sid = s->index_sid;
best_index_start = s->index_start_position;
best_index_duration = s->index_duration;
}
}
/* no suitable entry found -> we're done */
if (best == -1)
break;
(*sorted_segments)[(*nb_sorted_segments)++] = unsorted_segments[best];
last_body_sid = best_body_sid;
last_index_sid = best_index_sid;
last_index_start = best_index_start;
}
av_free(unsorted_segments);
return 0;
}
/**
* Computes the absolute file offset of the given essence container offset
*/
static int mxf_absolute_bodysid_offset(MXFContext *mxf, int body_sid, int64_t offset, int64_t *offset_out)
{
int x;
int64_t offset_in = offset; /* for logging */
for (x = 0; x < mxf->partitions_count; x++) {
MXFPartition *p = &mxf->partitions[x];
if (p->body_sid != body_sid)
continue;
if (offset < p->essence_length || !p->essence_length) {
*offset_out = p->essence_offset + offset;
return 0;
}
offset -= p->essence_length;
}
av_log(mxf->fc, AV_LOG_ERROR,
"failed to find absolute offset of %"PRIX64" in BodySID %i - partial file?\n",
offset_in, body_sid);
return AVERROR_INVALIDDATA;
}
/**
* Returns the end position of the essence container with given BodySID, or zero if unknown
*/
static int64_t mxf_essence_container_end(MXFContext *mxf, int body_sid)
{
int x;
int64_t ret = 0;
for (x = 0; x < mxf->partitions_count; x++) {
MXFPartition *p = &mxf->partitions[x];
if (p->body_sid != body_sid)
continue;
if (!p->essence_length)
return 0;
ret = p->essence_offset + p->essence_length;
}
return ret;
}
/* EditUnit -> absolute offset */
static int mxf_edit_unit_absolute_offset(MXFContext *mxf, MXFIndexTable *index_table, int64_t edit_unit, int64_t *edit_unit_out, int64_t *offset_out, int nag)
{
int i;
int64_t offset_temp = 0;
for (i = 0; i < index_table->nb_segments; i++) {
MXFIndexTableSegment *s = index_table->segments[i];
edit_unit = FFMAX(edit_unit, s->index_start_position); /* clamp if trying to seek before start */
if (edit_unit < s->index_start_position + s->index_duration) {
int64_t index = edit_unit - s->index_start_position;
if (s->edit_unit_byte_count)
offset_temp += s->edit_unit_byte_count * index;
else if (s->nb_index_entries) {
if (s->nb_index_entries == 2 * s->index_duration + 1)
index *= 2; /* Avid index */
if (index < 0 || index >= s->nb_index_entries) {
av_log(mxf->fc, AV_LOG_ERROR, "IndexSID %i segment at %"PRId64" IndexEntryArray too small\n",
index_table->index_sid, s->index_start_position);
return AVERROR_INVALIDDATA;
}
offset_temp = s->stream_offset_entries[index];
} else {
av_log(mxf->fc, AV_LOG_ERROR, "IndexSID %i segment at %"PRId64" missing EditUnitByteCount and IndexEntryArray\n",
index_table->index_sid, s->index_start_position);
return AVERROR_INVALIDDATA;
}
if (edit_unit_out)
*edit_unit_out = edit_unit;
return mxf_absolute_bodysid_offset(mxf, index_table->body_sid, offset_temp, offset_out);
} else {
/* EditUnitByteCount == 0 for VBR indexes, which is fine since they use explicit StreamOffsets */
offset_temp += s->edit_unit_byte_count * s->index_duration;
}
}
if (nag)
av_log(mxf->fc, AV_LOG_ERROR, "failed to map EditUnit %"PRId64" in IndexSID %i to an offset\n", edit_unit, index_table->index_sid);
return AVERROR_INVALIDDATA;
}
static int mxf_compute_ptses_fake_index(MXFContext *mxf, MXFIndexTable *index_table)
{
int i, j, x;
int8_t max_temporal_offset = -128;
uint8_t *flags;
/* first compute how many entries we have */
for (i = 0; i < index_table->nb_segments; i++) {
MXFIndexTableSegment *s = index_table->segments[i];
if (!s->nb_index_entries) {
index_table->nb_ptses = 0;
return 0; /* no TemporalOffsets */
}
index_table->nb_ptses += s->index_duration;
}
/* paranoid check */
if (index_table->nb_ptses <= 0)
return 0;
if (!(index_table->ptses = av_calloc(index_table->nb_ptses, sizeof(int64_t))) ||
!(index_table->fake_index = av_calloc(index_table->nb_ptses, sizeof(AVIndexEntry))) ||
!(index_table->offsets = av_calloc(index_table->nb_ptses, sizeof(int8_t))) ||
!(flags = av_calloc(index_table->nb_ptses, sizeof(uint8_t)))) {
av_freep(&index_table->ptses);
av_freep(&index_table->fake_index);
av_freep(&index_table->offsets);
return AVERROR(ENOMEM);
}
/* we may have a few bad TemporalOffsets
* make sure the corresponding PTSes don't have the bogus value 0 */
for (x = 0; x < index_table->nb_ptses; x++)
index_table->ptses[x] = AV_NOPTS_VALUE;
/**
* We have this:
*
* x TemporalOffset
* 0: 0
* 1: 1
* 2: 1
* 3: -2
* 4: 1
* 5: 1
* 6: -2
*
* We want to transform it into this:
*
* x DTS PTS
* 0: -1 0
* 1: 0 3
* 2: 1 1
* 3: 2 2
* 4: 3 6
* 5: 4 4
* 6: 5 5
*
* We do this by bucket sorting x by x+TemporalOffset[x] into mxf->ptses,
* then settings mxf->first_dts = -max(TemporalOffset[x]).
* The latter makes DTS <= PTS.
*/
for (i = x = 0; i < index_table->nb_segments; i++) {
MXFIndexTableSegment *s = index_table->segments[i];
int index_delta = 1;
int n = s->nb_index_entries;
if (s->nb_index_entries == 2 * s->index_duration + 1) {
index_delta = 2; /* Avid index */
/* ignore the last entry - it's the size of the essence container */
n--;
}
for (j = 0; j < n; j += index_delta, x++) {
int offset = s->temporal_offset_entries[j] / index_delta;
int index = x + offset;
if (x >= index_table->nb_ptses) {
av_log(mxf->fc, AV_LOG_ERROR,
"x >= nb_ptses - IndexEntryCount %i < IndexDuration %"PRId64"?\n",
s->nb_index_entries, s->index_duration);
break;
}
flags[x] = !(s->flag_entries[j] & 0x30) ? AVINDEX_KEYFRAME : 0;
if (index < 0 || index >= index_table->nb_ptses) {
av_log(mxf->fc, AV_LOG_ERROR,
"index entry %i + TemporalOffset %i = %i, which is out of bounds\n",
x, offset, index);
continue;
}
index_table->offsets[x] = offset;
index_table->ptses[index] = x;
max_temporal_offset = FFMAX(max_temporal_offset, offset);
}
}
/* calculate the fake index table in display order */
for (x = 0; x < index_table->nb_ptses; x++) {
index_table->fake_index[x].timestamp = x;
if (index_table->ptses[x] != AV_NOPTS_VALUE)
index_table->fake_index[index_table->ptses[x]].flags = flags[x];
}
av_freep(&flags);
index_table->first_dts = -max_temporal_offset;
return 0;
}
/**
* Sorts and collects index table segments into index tables.
* Also computes PTSes if possible.
*/
static int mxf_compute_index_tables(MXFContext *mxf)
{
int i, j, k, ret, nb_sorted_segments;
MXFIndexTableSegment **sorted_segments = NULL;
AVStream *st = NULL;
for (i = 0; i < mxf->fc->nb_streams; i++) {
if (mxf->fc->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_DATA)
continue;
st = mxf->fc->streams[i];
break;
}
if ((ret = mxf_get_sorted_table_segments(mxf, &nb_sorted_segments, &sorted_segments)) ||
nb_sorted_segments <= 0) {
av_log(mxf->fc, AV_LOG_WARNING, "broken or empty index\n");
return 0;
}
/* sanity check and count unique BodySIDs/IndexSIDs */
for (i = 0; i < nb_sorted_segments; i++) {
if (i == 0 || sorted_segments[i-1]->index_sid != sorted_segments[i]->index_sid)
mxf->nb_index_tables++;
else if (sorted_segments[i-1]->body_sid != sorted_segments[i]->body_sid) {
av_log(mxf->fc, AV_LOG_ERROR, "found inconsistent BodySID\n");
ret = AVERROR_INVALIDDATA;
goto finish_decoding_index;
}
}
mxf->index_tables = av_mallocz_array(mxf->nb_index_tables,
sizeof(*mxf->index_tables));
if (!mxf->index_tables) {
av_log(mxf->fc, AV_LOG_ERROR, "failed to allocate index tables\n");
ret = AVERROR(ENOMEM);
goto finish_decoding_index;
}
/* distribute sorted segments to index tables */
for (i = j = 0; i < nb_sorted_segments; i++) {
if (i != 0 && sorted_segments[i-1]->index_sid != sorted_segments[i]->index_sid) {
/* next IndexSID */
j++;
}
mxf->index_tables[j].nb_segments++;
}
for (i = j = 0; j < mxf->nb_index_tables; i += mxf->index_tables[j++].nb_segments) {
MXFIndexTable *t = &mxf->index_tables[j];
t->segments = av_mallocz_array(t->nb_segments,
sizeof(*t->segments));
if (!t->segments) {
av_log(mxf->fc, AV_LOG_ERROR, "failed to allocate IndexTableSegment"
" pointer array\n");
ret = AVERROR(ENOMEM);
goto finish_decoding_index;
}
if (sorted_segments[i]->index_start_position)
av_log(mxf->fc, AV_LOG_WARNING, "IndexSID %i starts at EditUnit %"PRId64" - seeking may not work as expected\n",
sorted_segments[i]->index_sid, sorted_segments[i]->index_start_position);
memcpy(t->segments, &sorted_segments[i], t->nb_segments * sizeof(MXFIndexTableSegment*));
t->index_sid = sorted_segments[i]->index_sid;
t->body_sid = sorted_segments[i]->body_sid;
if ((ret = mxf_compute_ptses_fake_index(mxf, t)) < 0)
goto finish_decoding_index;
/* fix zero IndexDurations */
for (k = 0; k < t->nb_segments; k++) {
if (t->segments[k]->index_duration)
continue;
if (t->nb_segments > 1)
av_log(mxf->fc, AV_LOG_WARNING, "IndexSID %i segment %i has zero IndexDuration and there's more than one segment\n",
t->index_sid, k);
if (!st) {
av_log(mxf->fc, AV_LOG_WARNING, "no streams?\n");
break;
}
/* assume the first stream's duration is reasonable
* leave index_duration = 0 on further segments in case we have any (unlikely)
*/
t->segments[k]->index_duration = st->duration;
break;
}
}
ret = 0;
finish_decoding_index:
av_free(sorted_segments);
return ret;
}
static int mxf_is_intra_only(MXFDescriptor *descriptor)
{
return mxf_get_codec_ul(mxf_intra_only_essence_container_uls,
&descriptor->essence_container_ul)->id != AV_CODEC_ID_NONE ||
mxf_get_codec_ul(mxf_intra_only_picture_essence_coding_uls,
&descriptor->essence_codec_ul)->id != AV_CODEC_ID_NONE;
}
static int mxf_uid_to_str(UID uid, char **str)
{
int i;
char *p;
p = *str = av_mallocz(sizeof(UID) * 2 + 4 + 1);
if (!p)
return AVERROR(ENOMEM);
for (i = 0; i < sizeof(UID); i++) {
snprintf(p, 2 + 1, "%.2x", uid[i]);
p += 2;
if (i == 3 || i == 5 || i == 7 || i == 9) {
snprintf(p, 1 + 1, "-");
p++;
}
}
return 0;
}
static int mxf_umid_to_str(UID ul, UID uid, char **str)
{
int i;
char *p;
p = *str = av_mallocz(sizeof(UID) * 4 + 2 + 1);
if (!p)
return AVERROR(ENOMEM);
snprintf(p, 2 + 1, "0x");
p += 2;
for (i = 0; i < sizeof(UID); i++) {
snprintf(p, 2 + 1, "%.2X", ul[i]);
p += 2;
}
for (i = 0; i < sizeof(UID); i++) {
snprintf(p, 2 + 1, "%.2X", uid[i]);
p += 2;
}
return 0;
}
static int mxf_add_umid_metadata(AVDictionary **pm, const char *key, MXFPackage* package)
{
char *str;
int ret;
if (!package)
return 0;
if ((ret = mxf_umid_to_str(package->package_ul, package->package_uid, &str)) < 0)
return ret;
av_dict_set(pm, key, str, AV_DICT_DONT_STRDUP_VAL);
return 0;
}
static int mxf_add_timecode_metadata(AVDictionary **pm, const char *key, AVTimecode *tc)
{
char buf[AV_TIMECODE_STR_SIZE];
av_dict_set(pm, key, av_timecode_make_string(tc, buf, 0), 0);
return 0;
}
static MXFTimecodeComponent* mxf_resolve_timecode_component(MXFContext *mxf, UID *strong_ref)
{
MXFStructuralComponent *component = NULL;
MXFPulldownComponent *pulldown = NULL;
component = mxf_resolve_strong_ref(mxf, strong_ref, AnyType);
if (!component)
return NULL;
switch (component->type) {
case TimecodeComponent:
return (MXFTimecodeComponent*)component;
case PulldownComponent: /* timcode component may be located on a pulldown component */
pulldown = (MXFPulldownComponent*)component;
return mxf_resolve_strong_ref(mxf, &pulldown->input_segment_ref, TimecodeComponent);
default:
break;
}
return NULL;
}
static MXFPackage* mxf_resolve_source_package(MXFContext *mxf, UID package_uid)
{
MXFPackage *package = NULL;
int i;
for (i = 0; i < mxf->packages_count; i++) {
package = mxf_resolve_strong_ref(mxf, &mxf->packages_refs[i], SourcePackage);
if (!package)
continue;
if (!memcmp(package->package_uid, package_uid, 16))
return package;
}
return NULL;
}
static MXFDescriptor* mxf_resolve_multidescriptor(MXFContext *mxf, MXFDescriptor *descriptor, int track_id)
{
MXFDescriptor *sub_descriptor = NULL;
int i;
if (!descriptor)
return NULL;
if (descriptor->type == MultipleDescriptor) {
for (i = 0; i < descriptor->sub_descriptors_count; i++) {
sub_descriptor = mxf_resolve_strong_ref(mxf, &descriptor->sub_descriptors_refs[i], Descriptor);
if (!sub_descriptor) {
av_log(mxf->fc, AV_LOG_ERROR, "could not resolve sub descriptor strong ref\n");
continue;
}
if (sub_descriptor->linked_track_id == track_id) {
return sub_descriptor;
}
}
} else if (descriptor->type == Descriptor)
return descriptor;
return NULL;
}
static MXFStructuralComponent* mxf_resolve_essence_group_choice(MXFContext *mxf, MXFEssenceGroup *essence_group)
{
MXFStructuralComponent *component = NULL;
MXFPackage *package = NULL;
MXFDescriptor *descriptor = NULL;
int i;
if (!essence_group || !essence_group->structural_components_count)
return NULL;
/* essence groups contains multiple representations of the same media,
this return the first components with a valid Descriptor typically index 0 */
for (i =0; i < essence_group->structural_components_count; i++){
component = mxf_resolve_strong_ref(mxf, &essence_group->structural_components_refs[i], SourceClip);
if (!component)
continue;
if (!(package = mxf_resolve_source_package(mxf, component->source_package_uid)))
continue;
descriptor = mxf_resolve_strong_ref(mxf, &package->descriptor_ref, Descriptor);
if (descriptor)
return component;
}
return NULL;
}
static MXFStructuralComponent* mxf_resolve_sourceclip(MXFContext *mxf, UID *strong_ref)
{
MXFStructuralComponent *component = NULL;
component = mxf_resolve_strong_ref(mxf, strong_ref, AnyType);
if (!component)
return NULL;
switch (component->type) {
case SourceClip:
return component;
case EssenceGroup:
return mxf_resolve_essence_group_choice(mxf, (MXFEssenceGroup*) component);
default:
break;
}
return NULL;
}
static int mxf_parse_package_comments(MXFContext *mxf, AVDictionary **pm, MXFPackage *package)
{
MXFTaggedValue *tag;
int size, i;
char *key = NULL;
for (i = 0; i < package->comment_count; i++) {
tag = mxf_resolve_strong_ref(mxf, &package->comment_refs[i], TaggedValue);
if (!tag || !tag->name || !tag->value)
continue;
size = strlen(tag->name) + 8 + 1;
key = av_mallocz(size);
if (!key)
return AVERROR(ENOMEM);
snprintf(key, size, "comment_%s", tag->name);
av_dict_set(pm, key, tag->value, AV_DICT_DONT_STRDUP_KEY);
}
return 0;
}
static int mxf_parse_physical_source_package(MXFContext *mxf, MXFTrack *source_track, AVStream *st)
{
MXFPackage *physical_package = NULL;
MXFTrack *physical_track = NULL;
MXFStructuralComponent *sourceclip = NULL;
MXFTimecodeComponent *mxf_tc = NULL;
int i, j, k;
AVTimecode tc;
int flags;
int64_t start_position;
for (i = 0; i < source_track->sequence->structural_components_count; i++) {
sourceclip = mxf_resolve_strong_ref(mxf, &source_track->sequence->structural_components_refs[i], SourceClip);
if (!sourceclip)
continue;
if (!(physical_package = mxf_resolve_source_package(mxf, sourceclip->source_package_uid)))
break;
mxf_add_umid_metadata(&st->metadata, "reel_umid", physical_package);
/* the name of physical source package is name of the reel or tape */
if (physical_package->name && physical_package->name[0])
av_dict_set(&st->metadata, "reel_name", physical_package->name, 0);
/* the source timecode is calculated by adding the start_position of the sourceclip from the file source package track
* to the start_frame of the timecode component located on one of the tracks of the physical source package.
*/
for (j = 0; j < physical_package->tracks_count; j++) {
if (!(physical_track = mxf_resolve_strong_ref(mxf, &physical_package->tracks_refs[j], Track))) {
av_log(mxf->fc, AV_LOG_ERROR, "could not resolve source track strong ref\n");
continue;
}
if (!(physical_track->sequence = mxf_resolve_strong_ref(mxf, &physical_track->sequence_ref, Sequence))) {
av_log(mxf->fc, AV_LOG_ERROR, "could not resolve source track sequence strong ref\n");
continue;
}
if (physical_track->edit_rate.num <= 0 ||
physical_track->edit_rate.den <= 0) {
av_log(mxf->fc, AV_LOG_WARNING,
"Invalid edit rate (%d/%d) found on structural"
" component #%d, defaulting to 25/1\n",
physical_track->edit_rate.num,
physical_track->edit_rate.den, i);
physical_track->edit_rate = (AVRational){25, 1};
}
for (k = 0; k < physical_track->sequence->structural_components_count; k++) {
if (!(mxf_tc = mxf_resolve_timecode_component(mxf, &physical_track->sequence->structural_components_refs[k])))
continue;
flags = mxf_tc->drop_frame == 1 ? AV_TIMECODE_FLAG_DROPFRAME : 0;
/* scale sourceclip start_position to match physical track edit rate */
start_position = av_rescale_q(sourceclip->start_position,
physical_track->edit_rate,
source_track->edit_rate);
if (av_timecode_init(&tc, mxf_tc->rate, flags, start_position + mxf_tc->start_frame, mxf->fc) == 0) {
mxf_add_timecode_metadata(&st->metadata, "timecode", &tc);
return 0;
}
}
}
}
return 0;
}
static int mxf_add_metadata_stream(MXFContext *mxf, MXFTrack *track)
{
MXFStructuralComponent *component = NULL;
const MXFCodecUL *codec_ul = NULL;
MXFPackage tmp_package;
AVStream *st;
int j;
for (j = 0; j < track->sequence->structural_components_count; j++) {
component = mxf_resolve_sourceclip(mxf, &track->sequence->structural_components_refs[j]);
if (!component)
continue;
break;
}
if (!component)
return 0;
st = avformat_new_stream(mxf->fc, NULL);
if (!st) {
av_log(mxf->fc, AV_LOG_ERROR, "could not allocate metadata stream\n");
return AVERROR(ENOMEM);
}
st->codecpar->codec_type = AVMEDIA_TYPE_DATA;
st->codecpar->codec_id = AV_CODEC_ID_NONE;
st->id = track->track_id;
memcpy(&tmp_package.package_ul, component->source_package_ul, 16);
memcpy(&tmp_package.package_uid, component->source_package_uid, 16);
mxf_add_umid_metadata(&st->metadata, "file_package_umid", &tmp_package);
if (track->name && track->name[0])
av_dict_set(&st->metadata, "track_name", track->name, 0);
codec_ul = mxf_get_codec_ul(ff_mxf_data_definition_uls, &track->sequence->data_definition_ul);
av_dict_set(&st->metadata, "data_type", av_get_media_type_string(codec_ul->id), 0);
return 0;
}
static int mxf_parse_structural_metadata(MXFContext *mxf)
{
MXFPackage *material_package = NULL;
int i, j, k, ret;
av_log(mxf->fc, AV_LOG_TRACE, "metadata sets count %d\n", mxf->metadata_sets_count);
/* TODO: handle multiple material packages (OP3x) */
for (i = 0; i < mxf->packages_count; i++) {
material_package = mxf_resolve_strong_ref(mxf, &mxf->packages_refs[i], MaterialPackage);
if (material_package) break;
}
if (!material_package) {
av_log(mxf->fc, AV_LOG_ERROR, "no material package found\n");
return AVERROR_INVALIDDATA;
}
mxf_add_umid_metadata(&mxf->fc->metadata, "material_package_umid", material_package);
if (material_package->name && material_package->name[0])
av_dict_set(&mxf->fc->metadata, "material_package_name", material_package->name, 0);
mxf_parse_package_comments(mxf, &mxf->fc->metadata, material_package);
for (i = 0; i < material_package->tracks_count; i++) {
MXFPackage *source_package = NULL;
MXFTrack *material_track = NULL;
MXFTrack *source_track = NULL;
MXFTrack *temp_track = NULL;
MXFDescriptor *descriptor = NULL;
MXFStructuralComponent *component = NULL;
MXFTimecodeComponent *mxf_tc = NULL;
UID *essence_container_ul = NULL;
const MXFCodecUL *codec_ul = NULL;
const MXFCodecUL *container_ul = NULL;
const MXFCodecUL *pix_fmt_ul = NULL;
AVStream *st;
AVTimecode tc;
int flags;
if (!(material_track = mxf_resolve_strong_ref(mxf, &material_package->tracks_refs[i], Track))) {
av_log(mxf->fc, AV_LOG_ERROR, "could not resolve material track strong ref\n");
continue;
}
if ((component = mxf_resolve_strong_ref(mxf, &material_track->sequence_ref, TimecodeComponent))) {
mxf_tc = (MXFTimecodeComponent*)component;
flags = mxf_tc->drop_frame == 1 ? AV_TIMECODE_FLAG_DROPFRAME : 0;
if (av_timecode_init(&tc, mxf_tc->rate, flags, mxf_tc->start_frame, mxf->fc) == 0) {
mxf_add_timecode_metadata(&mxf->fc->metadata, "timecode", &tc);
}
}
if (!(material_track->sequence = mxf_resolve_strong_ref(mxf, &material_track->sequence_ref, Sequence))) {
av_log(mxf->fc, AV_LOG_ERROR, "could not resolve material track sequence strong ref\n");
continue;
}
for (j = 0; j < material_track->sequence->structural_components_count; j++) {
component = mxf_resolve_strong_ref(mxf, &material_track->sequence->structural_components_refs[j], TimecodeComponent);
if (!component)
continue;
mxf_tc = (MXFTimecodeComponent*)component;
flags = mxf_tc->drop_frame == 1 ? AV_TIMECODE_FLAG_DROPFRAME : 0;
if (av_timecode_init(&tc, mxf_tc->rate, flags, mxf_tc->start_frame, mxf->fc) == 0) {
mxf_add_timecode_metadata(&mxf->fc->metadata, "timecode", &tc);
break;
}
}
/* TODO: handle multiple source clips, only finds first valid source clip */
if(material_track->sequence->structural_components_count > 1)
av_log(mxf->fc, AV_LOG_WARNING, "material track %d: has %d components\n",
material_track->track_id, material_track->sequence->structural_components_count);
for (j = 0; j < material_track->sequence->structural_components_count; j++) {
component = mxf_resolve_sourceclip(mxf, &material_track->sequence->structural_components_refs[j]);
if (!component)
continue;
source_package = mxf_resolve_source_package(mxf, component->source_package_uid);
if (!source_package) {
av_log(mxf->fc, AV_LOG_TRACE, "material track %d: no corresponding source package found\n", material_track->track_id);
break;
}
for (k = 0; k < source_package->tracks_count; k++) {
if (!(temp_track = mxf_resolve_strong_ref(mxf, &source_package->tracks_refs[k], Track))) {
av_log(mxf->fc, AV_LOG_ERROR, "could not resolve source track strong ref\n");
ret = AVERROR_INVALIDDATA;
goto fail_and_free;
}
if (temp_track->track_id == component->source_track_id) {
source_track = temp_track;
break;
}
}
if (!source_track) {
av_log(mxf->fc, AV_LOG_ERROR, "material track %d: no corresponding source track found\n", material_track->track_id);
break;
}
if(source_track && component)
break;
}
if (!source_track || !component || !source_package) {
if((ret = mxf_add_metadata_stream(mxf, material_track)))
goto fail_and_free;
continue;
}
if (!(source_track->sequence = mxf_resolve_strong_ref(mxf, &source_track->sequence_ref, Sequence))) {
av_log(mxf->fc, AV_LOG_ERROR, "could not resolve source track sequence strong ref\n");
ret = AVERROR_INVALIDDATA;
goto fail_and_free;
}
/* 0001GL00.MXF.A1.mxf_opatom.mxf has the same SourcePackageID as 0001GL.MXF.V1.mxf_opatom.mxf
* This would result in both files appearing to have two streams. Work around this by sanity checking DataDefinition */
if (memcmp(material_track->sequence->data_definition_ul, source_track->sequence->data_definition_ul, 16)) {
av_log(mxf->fc, AV_LOG_ERROR, "material track %d: DataDefinition mismatch\n", material_track->track_id);
continue;
}
st = avformat_new_stream(mxf->fc, NULL);
if (!st) {
av_log(mxf->fc, AV_LOG_ERROR, "could not allocate stream\n");
ret = AVERROR(ENOMEM);
goto fail_and_free;
}
st->id = material_track->track_id;
st->priv_data = source_track;
source_package->descriptor = mxf_resolve_strong_ref(mxf, &source_package->descriptor_ref, AnyType);
descriptor = mxf_resolve_multidescriptor(mxf, source_package->descriptor, source_track->track_id);
/* A SourceClip from a EssenceGroup may only be a single frame of essence data. The clips duration is then how many
* frames its suppose to repeat for. Descriptor->duration, if present, contains the real duration of the essence data */
if (descriptor && descriptor->duration != AV_NOPTS_VALUE)
source_track->original_duration = st->duration = FFMIN(descriptor->duration, component->duration);
else
source_track->original_duration = st->duration = component->duration;
if (st->duration == -1)
st->duration = AV_NOPTS_VALUE;
st->start_time = component->start_position;
if (material_track->edit_rate.num <= 0 ||
material_track->edit_rate.den <= 0) {
av_log(mxf->fc, AV_LOG_WARNING,
"Invalid edit rate (%d/%d) found on stream #%d, "
"defaulting to 25/1\n",
material_track->edit_rate.num,
material_track->edit_rate.den, st->index);
material_track->edit_rate = (AVRational){25, 1};
}
avpriv_set_pts_info(st, 64, material_track->edit_rate.den, material_track->edit_rate.num);
/* ensure SourceTrack EditRate == MaterialTrack EditRate since only
* the former is accessible via st->priv_data */
source_track->edit_rate = material_track->edit_rate;
PRINT_KEY(mxf->fc, "data definition ul", source_track->sequence->data_definition_ul);
codec_ul = mxf_get_codec_ul(ff_mxf_data_definition_uls, &source_track->sequence->data_definition_ul);
st->codecpar->codec_type = codec_ul->id;
if (!descriptor) {
av_log(mxf->fc, AV_LOG_INFO, "source track %d: stream %d, no descriptor found\n", source_track->track_id, st->index);
continue;
}
PRINT_KEY(mxf->fc, "essence codec ul", descriptor->essence_codec_ul);
PRINT_KEY(mxf->fc, "essence container ul", descriptor->essence_container_ul);
essence_container_ul = &descriptor->essence_container_ul;
/* HACK: replacing the original key with mxf_encrypted_essence_container
* is not allowed according to s429-6, try to find correct information anyway */
if (IS_KLV_KEY(essence_container_ul, mxf_encrypted_essence_container)) {
av_log(mxf->fc, AV_LOG_INFO, "broken encrypted mxf file\n");
for (k = 0; k < mxf->metadata_sets_count; k++) {
MXFMetadataSet *metadata = mxf->metadata_sets[k];
if (metadata->type == CryptoContext) {
essence_container_ul = &((MXFCryptoContext *)metadata)->source_container_ul;
break;
}
}
}
/* TODO: drop PictureEssenceCoding and SoundEssenceCompression, only check EssenceContainer */
codec_ul = mxf_get_codec_ul(ff_mxf_codec_uls, &descriptor->essence_codec_ul);
st->codecpar->codec_id = (enum AVCodecID)codec_ul->id;
if (st->codecpar->codec_id == AV_CODEC_ID_NONE) {
codec_ul = mxf_get_codec_ul(ff_mxf_codec_uls, &descriptor->codec_ul);
st->codecpar->codec_id = (enum AVCodecID)codec_ul->id;
}
av_log(mxf->fc, AV_LOG_VERBOSE, "%s: Universal Label: ",
avcodec_get_name(st->codecpar->codec_id));
for (k = 0; k < 16; k++) {
av_log(mxf->fc, AV_LOG_VERBOSE, "%.2x",
descriptor->essence_codec_ul[k]);
if (!(k+1 & 19) || k == 5)
av_log(mxf->fc, AV_LOG_VERBOSE, ".");
}
av_log(mxf->fc, AV_LOG_VERBOSE, "\n");
mxf_add_umid_metadata(&st->metadata, "file_package_umid", source_package);
if (source_package->name && source_package->name[0])
av_dict_set(&st->metadata, "file_package_name", source_package->name, 0);
if (material_track->name && material_track->name[0])
av_dict_set(&st->metadata, "track_name", material_track->name, 0);
mxf_parse_physical_source_package(mxf, source_track, st);
if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
source_track->intra_only = mxf_is_intra_only(descriptor);
container_ul = mxf_get_codec_ul(mxf_picture_essence_container_uls, essence_container_ul);
if (st->codecpar->codec_id == AV_CODEC_ID_NONE)
st->codecpar->codec_id = container_ul->id;
st->codecpar->width = descriptor->width;
st->codecpar->height = descriptor->height; /* Field height, not frame height */
switch (descriptor->frame_layout) {
case FullFrame:
st->codecpar->field_order = AV_FIELD_PROGRESSIVE;
break;
case OneField:
/* Every other line is stored and needs to be duplicated. */
av_log(mxf->fc, AV_LOG_INFO, "OneField frame layout isn't currently supported\n");
break; /* The correct thing to do here is fall through, but by breaking we might be
able to decode some streams at half the vertical resolution, rather than not al all.
It's also for compatibility with the old behavior. */
case MixedFields:
break;
case SegmentedFrame:
st->codecpar->field_order = AV_FIELD_PROGRESSIVE;
case SeparateFields:
av_log(mxf->fc, AV_LOG_DEBUG, "video_line_map: (%d, %d), field_dominance: %d\n",
descriptor->video_line_map[0], descriptor->video_line_map[1],
descriptor->field_dominance);
if ((descriptor->video_line_map[0] > 0) && (descriptor->video_line_map[1] > 0)) {
/* Detect coded field order from VideoLineMap:
* (even, even) => bottom field coded first
* (even, odd) => top field coded first
* (odd, even) => top field coded first
* (odd, odd) => bottom field coded first
*/
if ((descriptor->video_line_map[0] + descriptor->video_line_map[1]) % 2) {
switch (descriptor->field_dominance) {
case MXF_FIELD_DOMINANCE_DEFAULT:
case MXF_FIELD_DOMINANCE_FF:
st->codecpar->field_order = AV_FIELD_TT;
break;
case MXF_FIELD_DOMINANCE_FL:
st->codecpar->field_order = AV_FIELD_TB;
break;
default:
avpriv_request_sample(mxf->fc,
"Field dominance %d support",
descriptor->field_dominance);
}
} else {
switch (descriptor->field_dominance) {
case MXF_FIELD_DOMINANCE_DEFAULT:
case MXF_FIELD_DOMINANCE_FF:
st->codecpar->field_order = AV_FIELD_BB;
break;
case MXF_FIELD_DOMINANCE_FL:
st->codecpar->field_order = AV_FIELD_BT;
break;
default:
avpriv_request_sample(mxf->fc,
"Field dominance %d support",
descriptor->field_dominance);
}
}
}
/* Turn field height into frame height. */
st->codecpar->height *= 2;
break;
default:
av_log(mxf->fc, AV_LOG_INFO, "Unknown frame layout type: %d\n", descriptor->frame_layout);
}
if (st->codecpar->codec_id == AV_CODEC_ID_RAWVIDEO) {
st->codecpar->format = descriptor->pix_fmt;
if (st->codecpar->format == AV_PIX_FMT_NONE) {
pix_fmt_ul = mxf_get_codec_ul(ff_mxf_pixel_format_uls,
&descriptor->essence_codec_ul);
st->codecpar->format = (enum AVPixelFormat)pix_fmt_ul->id;
if (st->codecpar->format== AV_PIX_FMT_NONE) {
st->codecpar->codec_tag = mxf_get_codec_ul(ff_mxf_codec_tag_uls,
&descriptor->essence_codec_ul)->id;
if (!st->codecpar->codec_tag) {
/* support files created before RP224v10 by defaulting to UYVY422
if subsampling is 4:2:2 and component depth is 8-bit */
if (descriptor->horiz_subsampling == 2 &&
descriptor->vert_subsampling == 1 &&
descriptor->component_depth == 8) {
st->codecpar->format = AV_PIX_FMT_UYVY422;
}
}
}
}
}
st->need_parsing = AVSTREAM_PARSE_HEADERS;
if (material_track->sequence->origin) {
av_dict_set_int(&st->metadata, "material_track_origin", material_track->sequence->origin, 0);
}
if (source_track->sequence->origin) {
av_dict_set_int(&st->metadata, "source_track_origin", source_track->sequence->origin, 0);
}
if (descriptor->aspect_ratio.num && descriptor->aspect_ratio.den)
st->display_aspect_ratio = descriptor->aspect_ratio;
} else if (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
container_ul = mxf_get_codec_ul(mxf_sound_essence_container_uls, essence_container_ul);
/* Only overwrite existing codec ID if it is unset or A-law, which is the default according to SMPTE RP 224. */
if (st->codecpar->codec_id == AV_CODEC_ID_NONE || (st->codecpar->codec_id == AV_CODEC_ID_PCM_ALAW && (enum AVCodecID)container_ul->id != AV_CODEC_ID_NONE))
st->codecpar->codec_id = (enum AVCodecID)container_ul->id;
st->codecpar->channels = descriptor->channels;
st->codecpar->bits_per_coded_sample = descriptor->bits_per_sample;
if (descriptor->sample_rate.den > 0) {
st->codecpar->sample_rate = descriptor->sample_rate.num / descriptor->sample_rate.den;
avpriv_set_pts_info(st, 64, descriptor->sample_rate.den, descriptor->sample_rate.num);
} else {
av_log(mxf->fc, AV_LOG_WARNING, "invalid sample rate (%d/%d) "
"found for stream #%d, time base forced to 1/48000\n",
descriptor->sample_rate.num, descriptor->sample_rate.den,
st->index);
avpriv_set_pts_info(st, 64, 1, 48000);
}
/* if duration is set, rescale it from EditRate to SampleRate */
if (st->duration != AV_NOPTS_VALUE)
st->duration = av_rescale_q(st->duration,
av_inv_q(material_track->edit_rate),
st->time_base);
/* TODO: implement AV_CODEC_ID_RAWAUDIO */
if (st->codecpar->codec_id == AV_CODEC_ID_PCM_S16LE) {
if (descriptor->bits_per_sample > 16 && descriptor->bits_per_sample <= 24)
st->codecpar->codec_id = AV_CODEC_ID_PCM_S24LE;
else if (descriptor->bits_per_sample == 32)
st->codecpar->codec_id = AV_CODEC_ID_PCM_S32LE;
} else if (st->codecpar->codec_id == AV_CODEC_ID_PCM_S16BE) {
if (descriptor->bits_per_sample > 16 && descriptor->bits_per_sample <= 24)
st->codecpar->codec_id = AV_CODEC_ID_PCM_S24BE;
else if (descriptor->bits_per_sample == 32)
st->codecpar->codec_id = AV_CODEC_ID_PCM_S32BE;
} else if (st->codecpar->codec_id == AV_CODEC_ID_MP2) {
st->need_parsing = AVSTREAM_PARSE_FULL;
}
} else if (st->codecpar->codec_type == AVMEDIA_TYPE_DATA) {
int codec_id = mxf_get_codec_ul(mxf_data_essence_container_uls,
essence_container_ul)->id;
if (codec_id >= 0 &&
codec_id < FF_ARRAY_ELEMS(mxf_data_essence_descriptor)) {
av_dict_set(&st->metadata, "data_type",
mxf_data_essence_descriptor[codec_id], 0);
}
}
if (descriptor->extradata) {
if (!ff_alloc_extradata(st->codecpar, descriptor->extradata_size)) {
memcpy(st->codecpar->extradata, descriptor->extradata, descriptor->extradata_size);
}
} else if (st->codecpar->codec_id == AV_CODEC_ID_H264) {
int coded_width = mxf_get_codec_ul(mxf_intra_only_picture_coded_width,
&descriptor->essence_codec_ul)->id;
if (coded_width)
st->codecpar->width = coded_width;
ret = ff_generate_avci_extradata(st);
if (ret < 0)
return ret;
}
if (st->codecpar->codec_type != AVMEDIA_TYPE_DATA && (*essence_container_ul)[15] > 0x01) {
/* TODO: decode timestamps */
st->need_parsing = AVSTREAM_PARSE_TIMESTAMPS;
}
}
ret = 0;
fail_and_free:
return ret;
}
static int64_t mxf_timestamp_to_int64(uint64_t timestamp)
{
struct tm time = { 0 };
time.tm_year = (timestamp >> 48) - 1900;
time.tm_mon = (timestamp >> 40 & 0xFF) - 1;
time.tm_mday = (timestamp >> 32 & 0xFF);
time.tm_hour = (timestamp >> 24 & 0xFF);
time.tm_min = (timestamp >> 16 & 0xFF);
time.tm_sec = (timestamp >> 8 & 0xFF);
/* msvcrt versions of strftime calls the invalid parameter handler
* (aborting the process if one isn't set) if the parameters are out
* of range. */
time.tm_mon = av_clip(time.tm_mon, 0, 11);
time.tm_mday = av_clip(time.tm_mday, 1, 31);
time.tm_hour = av_clip(time.tm_hour, 0, 23);
time.tm_min = av_clip(time.tm_min, 0, 59);
time.tm_sec = av_clip(time.tm_sec, 0, 59);
return (int64_t)av_timegm(&time) * 1000000;
}
#define SET_STR_METADATA(pb, name, str) do { \
if ((ret = mxf_read_utf16be_string(pb, size, &str)) < 0) \
return ret; \
av_dict_set(&s->metadata, name, str, AV_DICT_DONT_STRDUP_VAL); \
} while (0)
#define SET_UID_METADATA(pb, name, var, str) do { \
avio_read(pb, var, 16); \
if ((ret = mxf_uid_to_str(var, &str)) < 0) \
return ret; \
av_dict_set(&s->metadata, name, str, AV_DICT_DONT_STRDUP_VAL); \
} while (0)
#define SET_TS_METADATA(pb, name, var, str) do { \
var = avio_rb64(pb); \
if ((ret = avpriv_dict_set_timestamp(&s->metadata, name, mxf_timestamp_to_int64(var)) < 0)) \
return ret; \
} while (0)
static int mxf_read_identification_metadata(void *arg, AVIOContext *pb, int tag, int size, UID _uid, int64_t klv_offset)
{
MXFContext *mxf = arg;
AVFormatContext *s = mxf->fc;
int ret;
UID uid = { 0 };
char *str = NULL;
uint64_t ts;
switch (tag) {
case 0x3C01:
SET_STR_METADATA(pb, "company_name", str);
break;
case 0x3C02:
SET_STR_METADATA(pb, "product_name", str);
break;
case 0x3C04:
SET_STR_METADATA(pb, "product_version", str);
break;
case 0x3C05:
SET_UID_METADATA(pb, "product_uid", uid, str);
break;
case 0x3C06:
SET_TS_METADATA(pb, "modification_date", ts, str);
break;
case 0x3C08:
SET_STR_METADATA(pb, "application_platform", str);
break;
case 0x3C09:
SET_UID_METADATA(pb, "generation_uid", uid, str);
break;
case 0x3C0A:
SET_UID_METADATA(pb, "uid", uid, str);
break;
}
return 0;
}
static int mxf_read_preface_metadata(void *arg, AVIOContext *pb, int tag, int size, UID uid, int64_t klv_offset)
{
MXFContext *mxf = arg;
AVFormatContext *s = mxf->fc;
int ret;
char *str = NULL;
if (tag >= 0x8000 && (IS_KLV_KEY(uid, mxf_avid_project_name))) {
SET_STR_METADATA(pb, "project_name", str);
}
return 0;
}
static const MXFMetadataReadTableEntry mxf_metadata_read_table[] = {
{ { 0x06,0x0e,0x2b,0x34,0x02,0x05,0x01,0x01,0x0d,0x01,0x02,0x01,0x01,0x05,0x01,0x00 }, mxf_read_primer_pack },
{ { 0x06,0x0e,0x2b,0x34,0x02,0x05,0x01,0x01,0x0d,0x01,0x02,0x01,0x01,0x02,0x01,0x00 }, mxf_read_partition_pack },
{ { 0x06,0x0e,0x2b,0x34,0x02,0x05,0x01,0x01,0x0d,0x01,0x02,0x01,0x01,0x02,0x02,0x00 }, mxf_read_partition_pack },
{ { 0x06,0x0e,0x2b,0x34,0x02,0x05,0x01,0x01,0x0d,0x01,0x02,0x01,0x01,0x02,0x03,0x00 }, mxf_read_partition_pack },
{ { 0x06,0x0e,0x2b,0x34,0x02,0x05,0x01,0x01,0x0d,0x01,0x02,0x01,0x01,0x02,0x04,0x00 }, mxf_read_partition_pack },
{ { 0x06,0x0e,0x2b,0x34,0x02,0x05,0x01,0x01,0x0d,0x01,0x02,0x01,0x01,0x03,0x01,0x00 }, mxf_read_partition_pack },
{ { 0x06,0x0e,0x2b,0x34,0x02,0x05,0x01,0x01,0x0d,0x01,0x02,0x01,0x01,0x03,0x02,0x00 }, mxf_read_partition_pack },
{ { 0x06,0x0e,0x2b,0x34,0x02,0x05,0x01,0x01,0x0d,0x01,0x02,0x01,0x01,0x03,0x03,0x00 }, mxf_read_partition_pack },
{ { 0x06,0x0e,0x2b,0x34,0x02,0x05,0x01,0x01,0x0d,0x01,0x02,0x01,0x01,0x03,0x04,0x00 }, mxf_read_partition_pack },
{ { 0x06,0x0e,0x2b,0x34,0x02,0x05,0x01,0x01,0x0d,0x01,0x02,0x01,0x01,0x04,0x02,0x00 }, mxf_read_partition_pack },
{ { 0x06,0x0e,0x2b,0x34,0x02,0x05,0x01,0x01,0x0d,0x01,0x02,0x01,0x01,0x04,0x04,0x00 }, mxf_read_partition_pack },
{ { 0x06,0x0e,0x2b,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x2f,0x00 }, mxf_read_preface_metadata },
{ { 0x06,0x0e,0x2b,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x30,0x00 }, mxf_read_identification_metadata },
{ { 0x06,0x0e,0x2b,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x18,0x00 }, mxf_read_content_storage, 0, AnyType },
{ { 0x06,0x0e,0x2b,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x37,0x00 }, mxf_read_package, sizeof(MXFPackage), SourcePackage },
{ { 0x06,0x0e,0x2b,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x36,0x00 }, mxf_read_package, sizeof(MXFPackage), MaterialPackage },
{ { 0x06,0x0e,0x2b,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x0f,0x00 }, mxf_read_sequence, sizeof(MXFSequence), Sequence },
{ { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0D,0x01,0x01,0x01,0x01,0x01,0x05,0x00 }, mxf_read_essence_group, sizeof(MXFEssenceGroup), EssenceGroup},
{ { 0x06,0x0e,0x2b,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x11,0x00 }, mxf_read_source_clip, sizeof(MXFStructuralComponent), SourceClip },
{ { 0x06,0x0e,0x2b,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x3f,0x00 }, mxf_read_tagged_value, sizeof(MXFTaggedValue), TaggedValue },
{ { 0x06,0x0e,0x2b,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x44,0x00 }, mxf_read_generic_descriptor, sizeof(MXFDescriptor), MultipleDescriptor },
{ { 0x06,0x0e,0x2b,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x42,0x00 }, mxf_read_generic_descriptor, sizeof(MXFDescriptor), Descriptor }, /* Generic Sound */
{ { 0x06,0x0e,0x2b,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x28,0x00 }, mxf_read_generic_descriptor, sizeof(MXFDescriptor), Descriptor }, /* CDCI */
{ { 0x06,0x0e,0x2b,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x29,0x00 }, mxf_read_generic_descriptor, sizeof(MXFDescriptor), Descriptor }, /* RGBA */
{ { 0x06,0x0e,0x2b,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x48,0x00 }, mxf_read_generic_descriptor, sizeof(MXFDescriptor), Descriptor }, /* Wave */
{ { 0x06,0x0e,0x2b,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x47,0x00 }, mxf_read_generic_descriptor, sizeof(MXFDescriptor), Descriptor }, /* AES3 */
{ { 0x06,0x0e,0x2b,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x51,0x00 }, mxf_read_generic_descriptor, sizeof(MXFDescriptor), Descriptor }, /* MPEG2VideoDescriptor */
{ { 0x06,0x0e,0x2b,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x5c,0x00 }, mxf_read_generic_descriptor, sizeof(MXFDescriptor), Descriptor }, /* VANC/VBI - SMPTE 436M */
{ { 0x06,0x0e,0x2b,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x5e,0x00 }, mxf_read_generic_descriptor, sizeof(MXFDescriptor), Descriptor }, /* MPEG2AudioDescriptor */
{ { 0x06,0x0e,0x2b,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x3A,0x00 }, mxf_read_track, sizeof(MXFTrack), Track }, /* Static Track */
{ { 0x06,0x0e,0x2b,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x3B,0x00 }, mxf_read_track, sizeof(MXFTrack), Track }, /* Generic Track */
{ { 0x06,0x0e,0x2b,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x14,0x00 }, mxf_read_timecode_component, sizeof(MXFTimecodeComponent), TimecodeComponent },
{ { 0x06,0x0e,0x2b,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x0c,0x00 }, mxf_read_pulldown_component, sizeof(MXFPulldownComponent), PulldownComponent },
{ { 0x06,0x0e,0x2b,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x04,0x01,0x02,0x02,0x00,0x00 }, mxf_read_cryptographic_context, sizeof(MXFCryptoContext), CryptoContext },
{ { 0x06,0x0e,0x2b,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x02,0x01,0x01,0x10,0x01,0x00 }, mxf_read_index_table_segment, sizeof(MXFIndexTableSegment), IndexTableSegment },
{ { 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 }, NULL, 0, AnyType },
};
static int mxf_metadataset_init(MXFMetadataSet *ctx, enum MXFMetadataSetType type)
{
switch (type){
case MultipleDescriptor:
case Descriptor:
((MXFDescriptor*)ctx)->pix_fmt = AV_PIX_FMT_NONE;
((MXFDescriptor*)ctx)->duration = AV_NOPTS_VALUE;
break;
default:
break;
}
return 0;
}
static int mxf_read_local_tags(MXFContext *mxf, KLVPacket *klv, MXFMetadataReadFunc *read_child, int ctx_size, enum MXFMetadataSetType type)
{
AVIOContext *pb = mxf->fc->pb;
MXFMetadataSet *ctx = ctx_size ? av_mallocz(ctx_size) : mxf;
uint64_t klv_end = avio_tell(pb) + klv->length;
if (!ctx)
return AVERROR(ENOMEM);
mxf_metadataset_init(ctx, type);
while (avio_tell(pb) + 4 < klv_end && !avio_feof(pb)) {
int ret;
int tag = avio_rb16(pb);
int size = avio_rb16(pb); /* KLV specified by 0x53 */
uint64_t next = avio_tell(pb) + size;
UID uid = {0};
av_log(mxf->fc, AV_LOG_TRACE, "local tag %#04x size %d\n", tag, size);
if (!size) { /* ignore empty tag, needed for some files with empty UMID tag */
av_log(mxf->fc, AV_LOG_ERROR, "local tag %#04x with 0 size\n", tag);
continue;
}
if (tag > 0x7FFF) { /* dynamic tag */
int i;
for (i = 0; i < mxf->local_tags_count; i++) {
int local_tag = AV_RB16(mxf->local_tags+i*18);
if (local_tag == tag) {
memcpy(uid, mxf->local_tags+i*18+2, 16);
av_log(mxf->fc, AV_LOG_TRACE, "local tag %#04x\n", local_tag);
PRINT_KEY(mxf->fc, "uid", uid);
}
}
}
if (ctx_size && tag == 0x3C0A) {
avio_read(pb, ctx->uid, 16);
} else if ((ret = read_child(ctx, pb, tag, size, uid, -1)) < 0) {
mxf_free_metadataset(&ctx, !!ctx_size);
return ret;
}
/* Accept the 64k local set limit being exceeded (Avid). Don't accept
* it extending past the end of the KLV though (zzuf5.mxf). */
if (avio_tell(pb) > klv_end) {
if (ctx_size) {
ctx->type = type;
mxf_free_metadataset(&ctx, !!ctx_size);
}
av_log(mxf->fc, AV_LOG_ERROR,
"local tag %#04x extends past end of local set @ %#"PRIx64"\n",
tag, klv->offset);
return AVERROR_INVALIDDATA;
} else if (avio_tell(pb) <= next) /* only seek forward, else this can loop for a long time */
avio_seek(pb, next, SEEK_SET);
}
if (ctx_size) ctx->type = type;
return ctx_size ? mxf_add_metadata_set(mxf, ctx) : 0;
}
/**
* Matches any partition pack key, in other words:
* - HeaderPartition
* - BodyPartition
* - FooterPartition
* @return non-zero if the key is a partition pack key, zero otherwise
*/
static int mxf_is_partition_pack_key(UID key)
{
//NOTE: this is a little lax since it doesn't constraint key[14]
return !memcmp(key, mxf_header_partition_pack_key, 13) &&
key[13] >= 2 && key[13] <= 4;
}
/**
* Parses a metadata KLV
* @return <0 on error, 0 otherwise
*/
static int mxf_parse_klv(MXFContext *mxf, KLVPacket klv, MXFMetadataReadFunc *read,
int ctx_size, enum MXFMetadataSetType type)
{
AVFormatContext *s = mxf->fc;
int res;
if (klv.key[5] == 0x53) {
res = mxf_read_local_tags(mxf, &klv, read, ctx_size, type);
} else {
uint64_t next = avio_tell(s->pb) + klv.length;
res = read(mxf, s->pb, 0, klv.length, klv.key, klv.offset);
/* only seek forward, else this can loop for a long time */
if (avio_tell(s->pb) > next) {
av_log(s, AV_LOG_ERROR, "read past end of KLV @ %#"PRIx64"\n",
klv.offset);
return AVERROR_INVALIDDATA;
}
avio_seek(s->pb, next, SEEK_SET);
}
if (res < 0) {
av_log(s, AV_LOG_ERROR, "error reading header metadata\n");
return res;
}
return 0;
}
/**
* Seeks to the previous partition and parses it, if possible
* @return <= 0 if we should stop parsing, > 0 if we should keep going
*/
static int mxf_seek_to_previous_partition(MXFContext *mxf)
{
AVIOContext *pb = mxf->fc->pb;
KLVPacket klv;
int64_t current_partition_ofs;
int ret;
if (!mxf->current_partition ||
mxf->run_in + mxf->current_partition->previous_partition <= mxf->last_forward_tell)
return 0; /* we've parsed all partitions */
/* seek to previous partition */
current_partition_ofs = mxf->current_partition->pack_ofs; //includes run-in
avio_seek(pb, mxf->run_in + mxf->current_partition->previous_partition, SEEK_SET);
mxf->current_partition = NULL;
av_log(mxf->fc, AV_LOG_TRACE, "seeking to previous partition\n");
/* Make sure this is actually a PartitionPack, and if so parse it.
* See deadlock2.mxf
*/
if ((ret = klv_read_packet(&klv, pb)) < 0) {
av_log(mxf->fc, AV_LOG_ERROR, "failed to read PartitionPack KLV\n");
return ret;
}
if (!mxf_is_partition_pack_key(klv.key)) {
av_log(mxf->fc, AV_LOG_ERROR, "PreviousPartition @ %" PRIx64 " isn't a PartitionPack\n", klv.offset);
return AVERROR_INVALIDDATA;
}
/* We can't just check ofs >= current_partition_ofs because PreviousPartition
* can point to just before the current partition, causing klv_read_packet()
* to sync back up to it. See deadlock3.mxf
*/
if (klv.offset >= current_partition_ofs) {
av_log(mxf->fc, AV_LOG_ERROR, "PreviousPartition for PartitionPack @ %"
PRIx64 " indirectly points to itself\n", current_partition_ofs);
return AVERROR_INVALIDDATA;
}
if ((ret = mxf_parse_klv(mxf, klv, mxf_read_partition_pack, 0, 0)) < 0)
return ret;
return 1;
}
/**
* Called when essence is encountered
* @return <= 0 if we should stop parsing, > 0 if we should keep going
*/
static int mxf_parse_handle_essence(MXFContext *mxf)
{
AVIOContext *pb = mxf->fc->pb;
int64_t ret;
if (mxf->parsing_backward) {
return mxf_seek_to_previous_partition(mxf);
} else {
if (!mxf->footer_partition) {
av_log(mxf->fc, AV_LOG_TRACE, "no FooterPartition\n");
return 0;
}
av_log(mxf->fc, AV_LOG_TRACE, "seeking to FooterPartition\n");
/* remember where we were so we don't end up seeking further back than this */
mxf->last_forward_tell = avio_tell(pb);
if (!(pb->seekable & AVIO_SEEKABLE_NORMAL)) {
av_log(mxf->fc, AV_LOG_INFO, "file is not seekable - not parsing FooterPartition\n");
return -1;
}
/* seek to FooterPartition and parse backward */
if ((ret = avio_seek(pb, mxf->run_in + mxf->footer_partition, SEEK_SET)) < 0) {
av_log(mxf->fc, AV_LOG_ERROR,
"failed to seek to FooterPartition @ 0x%" PRIx64
" (%"PRId64") - partial file?\n",
mxf->run_in + mxf->footer_partition, ret);
return ret;
}
mxf->current_partition = NULL;
mxf->parsing_backward = 1;
}
return 1;
}
/**
* Called when the next partition or EOF is encountered
* @return <= 0 if we should stop parsing, > 0 if we should keep going
*/
static int mxf_parse_handle_partition_or_eof(MXFContext *mxf)
{
return mxf->parsing_backward ? mxf_seek_to_previous_partition(mxf) : 1;
}
/**
* Figures out the proper offset and length of the essence container in each partition
*/
static void mxf_compute_essence_containers(MXFContext *mxf)
{
int x;
/* everything is already correct */
if (mxf->op == OPAtom)
return;
for (x = 0; x < mxf->partitions_count; x++) {
MXFPartition *p = &mxf->partitions[x];
if (!p->body_sid)
continue; /* BodySID == 0 -> no essence */
if (x >= mxf->partitions_count - 1)
break; /* FooterPartition - can't compute length (and we don't need to) */
/* essence container spans to the next partition */
p->essence_length = mxf->partitions[x+1].this_partition - p->essence_offset;
if (p->essence_length < 0) {
/* next ThisPartition < essence_offset */
p->essence_length = 0;
av_log(mxf->fc, AV_LOG_ERROR,
"partition %i: bad ThisPartition = %"PRIX64"\n",
x+1, mxf->partitions[x+1].this_partition);
}
}
}
static int64_t round_to_kag(int64_t position, int kag_size)
{
/* TODO: account for run-in? the spec isn't clear whether KAG should account for it */
/* NOTE: kag_size may be any integer between 1 - 2^10 */
int64_t ret = (position / kag_size) * kag_size;
return ret == position ? ret : ret + kag_size;
}
static int is_pcm(enum AVCodecID codec_id)
{
/* we only care about "normal" PCM codecs until we get samples */
return codec_id >= AV_CODEC_ID_PCM_S16LE && codec_id < AV_CODEC_ID_PCM_S24DAUD;
}
static AVStream* mxf_get_opatom_stream(MXFContext *mxf)
{
int i;
if (mxf->op != OPAtom)
return NULL;
for (i = 0; i < mxf->fc->nb_streams; i++) {
if (mxf->fc->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_DATA)
continue;
return mxf->fc->streams[i];
}
return NULL;
}
/**
* Deal with the case where for some audio atoms EditUnitByteCount is
* very small (2, 4..). In those cases we should read more than one
* sample per call to mxf_read_packet().
*/
static void mxf_handle_small_eubc(AVFormatContext *s)
{
MXFContext *mxf = s->priv_data;
/* assuming non-OPAtom == frame wrapped
* no sane writer would wrap 2 byte PCM packets with 20 byte headers.. */
AVStream *st = mxf_get_opatom_stream(mxf);
if (!st)
return;
/* expect PCM with exactly one index table segment and a small (< 32) EUBC */
if (st->codecpar->codec_type != AVMEDIA_TYPE_AUDIO ||
!is_pcm(st->codecpar->codec_id) ||
mxf->nb_index_tables != 1 ||
mxf->index_tables[0].nb_segments != 1 ||
mxf->index_tables[0].segments[0]->edit_unit_byte_count >= 32)
return;
/* arbitrarily default to 48 kHz PAL audio frame size */
/* TODO: We could compute this from the ratio between the audio
* and video edit rates for 48 kHz NTSC we could use the
* 1802-1802-1802-1802-1801 pattern. */
mxf->edit_units_per_packet = 1920;
}
/**
* Deal with the case where OPAtom files does not have any IndexTableSegments.
*/
static int mxf_handle_missing_index_segment(MXFContext *mxf)
{
AVFormatContext *s = mxf->fc;
AVStream *st = NULL;
MXFIndexTableSegment *segment = NULL;
MXFPartition *p = NULL;
int essence_partition_count = 0;
int i, ret;
st = mxf_get_opatom_stream(mxf);
if (!st)
return 0;
/* TODO: support raw video without an index if they exist */
if (st->codecpar->codec_type != AVMEDIA_TYPE_AUDIO || !is_pcm(st->codecpar->codec_id))
return 0;
/* check if file already has a IndexTableSegment */
for (i = 0; i < mxf->metadata_sets_count; i++) {
if (mxf->metadata_sets[i]->type == IndexTableSegment)
return 0;
}
/* find the essence partition */
for (i = 0; i < mxf->partitions_count; i++) {
/* BodySID == 0 -> no essence */
if (!mxf->partitions[i].body_sid)
continue;
p = &mxf->partitions[i];
essence_partition_count++;
}
/* only handle files with a single essence partition */
if (essence_partition_count != 1)
return 0;
if (!(segment = av_mallocz(sizeof(*segment))))
return AVERROR(ENOMEM);
if ((ret = mxf_add_metadata_set(mxf, segment))) {
mxf_free_metadataset((MXFMetadataSet**)&segment, 1);
return ret;
}
segment->type = IndexTableSegment;
/* stream will be treated as small EditUnitByteCount */
segment->edit_unit_byte_count = (av_get_bits_per_sample(st->codecpar->codec_id) * st->codecpar->channels) >> 3;
segment->index_start_position = 0;
segment->index_duration = s->streams[0]->duration;
segment->index_sid = p->index_sid;
segment->body_sid = p->body_sid;
return 0;
}
static void mxf_read_random_index_pack(AVFormatContext *s)
{
MXFContext *mxf = s->priv_data;
uint32_t length;
int64_t file_size, max_rip_length, min_rip_length;
KLVPacket klv;
if (!(s->pb->seekable & AVIO_SEEKABLE_NORMAL))
return;
file_size = avio_size(s->pb);
/* S377m says to check the RIP length for "silly" values, without defining "silly".
* The limit below assumes a file with nothing but partition packs and a RIP.
* Before changing this, consider that a muxer may place each sample in its own partition.
*
* 105 is the size of the smallest possible PartitionPack
* 12 is the size of each RIP entry
* 28 is the size of the RIP header and footer, assuming an 8-byte BER
*/
max_rip_length = ((file_size - mxf->run_in) / 105) * 12 + 28;
max_rip_length = FFMIN(max_rip_length, INT_MAX); //2 GiB and up is also silly
/* We're only interested in RIPs with at least two entries.. */
min_rip_length = 16+1+24+4;
/* See S377m section 11 */
avio_seek(s->pb, file_size - 4, SEEK_SET);
length = avio_rb32(s->pb);
if (length < min_rip_length || length > max_rip_length)
goto end;
avio_seek(s->pb, file_size - length, SEEK_SET);
if (klv_read_packet(&klv, s->pb) < 0 ||
!IS_KLV_KEY(klv.key, mxf_random_index_pack_key) ||
klv.length != length - 20)
goto end;
avio_skip(s->pb, klv.length - 12);
mxf->footer_partition = avio_rb64(s->pb);
/* sanity check */
if (mxf->run_in + mxf->footer_partition >= file_size) {
av_log(s, AV_LOG_WARNING, "bad FooterPartition in RIP - ignoring\n");
mxf->footer_partition = 0;
}
end:
avio_seek(s->pb, mxf->run_in, SEEK_SET);
}
static int mxf_read_header(AVFormatContext *s)
{
MXFContext *mxf = s->priv_data;
KLVPacket klv;
int64_t essence_offset = 0;
int ret;
mxf->last_forward_tell = INT64_MAX;
mxf->edit_units_per_packet = 1;
if (!mxf_read_sync(s->pb, mxf_header_partition_pack_key, 14)) {
av_log(s, AV_LOG_ERROR, "could not find header partition pack key\n");
return AVERROR_INVALIDDATA;
}
avio_seek(s->pb, -14, SEEK_CUR);
mxf->fc = s;
mxf->run_in = avio_tell(s->pb);
mxf_read_random_index_pack(s);
while (!avio_feof(s->pb)) {
const MXFMetadataReadTableEntry *metadata;
if (klv_read_packet(&klv, s->pb) < 0) {
/* EOF - seek to previous partition or stop */
if(mxf_parse_handle_partition_or_eof(mxf) <= 0)
break;
else
continue;
}
PRINT_KEY(s, "read header", klv.key);
av_log(s, AV_LOG_TRACE, "size %"PRIu64" offset %#"PRIx64"\n", klv.length, klv.offset);
if (IS_KLV_KEY(klv.key, mxf_encrypted_triplet_key) ||
IS_KLV_KEY(klv.key, mxf_essence_element_key) ||
IS_KLV_KEY(klv.key, mxf_avid_essence_element_key) ||
IS_KLV_KEY(klv.key, mxf_system_item_key)) {
if (!mxf->current_partition) {
av_log(mxf->fc, AV_LOG_ERROR, "found essence prior to first PartitionPack\n");
return AVERROR_INVALIDDATA;
}
if (!mxf->current_partition->essence_offset) {
/* for OP1a we compute essence_offset
* for OPAtom we point essence_offset after the KL (usually op1a_essence_offset + 20 or 25)
* TODO: for OP1a we could eliminate this entire if statement, always stopping parsing at op1a_essence_offset
* for OPAtom we still need the actual essence_offset though (the KL's length can vary)
*/
int64_t op1a_essence_offset =
round_to_kag(mxf->current_partition->this_partition +
mxf->current_partition->pack_length, mxf->current_partition->kag_size) +
round_to_kag(mxf->current_partition->header_byte_count, mxf->current_partition->kag_size) +
round_to_kag(mxf->current_partition->index_byte_count, mxf->current_partition->kag_size);
if (mxf->op == OPAtom) {
/* point essence_offset to the actual data
* OPAtom has all the essence in one big KLV
*/
mxf->current_partition->essence_offset = avio_tell(s->pb);
mxf->current_partition->essence_length = klv.length;
} else {
/* NOTE: op1a_essence_offset may be less than to klv.offset (C0023S01.mxf) */
mxf->current_partition->essence_offset = op1a_essence_offset;
}
}
if (!essence_offset)
essence_offset = klv.offset;
/* seek to footer, previous partition or stop */
if (mxf_parse_handle_essence(mxf) <= 0)
break;
continue;
} else if (mxf_is_partition_pack_key(klv.key) && mxf->current_partition) {
/* next partition pack - keep going, seek to previous partition or stop */
if(mxf_parse_handle_partition_or_eof(mxf) <= 0)
break;
else if (mxf->parsing_backward)
continue;
/* we're still parsing forward. proceed to parsing this partition pack */
}
for (metadata = mxf_metadata_read_table; metadata->read; metadata++) {
if (IS_KLV_KEY(klv.key, metadata->key)) {
if ((ret = mxf_parse_klv(mxf, klv, metadata->read, metadata->ctx_size, metadata->type)) < 0)
goto fail;
break;
}
}
if (!metadata->read) {
av_log(s, AV_LOG_VERBOSE, "Dark key " PRIxUID "\n",
UID_ARG(klv.key));
avio_skip(s->pb, klv.length);
}
}
/* FIXME avoid seek */
if (!essence_offset) {
av_log(s, AV_LOG_ERROR, "no essence\n");
ret = AVERROR_INVALIDDATA;
goto fail;
}
avio_seek(s->pb, essence_offset, SEEK_SET);
mxf_compute_essence_containers(mxf);
/* we need to do this before computing the index tables
* to be able to fill in zero IndexDurations with st->duration */
if ((ret = mxf_parse_structural_metadata(mxf)) < 0)
goto fail;
mxf_handle_missing_index_segment(mxf);
if ((ret = mxf_compute_index_tables(mxf)) < 0)
goto fail;
if (mxf->nb_index_tables > 1) {
/* TODO: look up which IndexSID to use via EssenceContainerData */
av_log(mxf->fc, AV_LOG_INFO, "got %i index tables - only the first one (IndexSID %i) will be used\n",
mxf->nb_index_tables, mxf->index_tables[0].index_sid);
} else if (mxf->nb_index_tables == 0 && mxf->op == OPAtom) {
av_log(mxf->fc, AV_LOG_ERROR, "cannot demux OPAtom without an index\n");
ret = AVERROR_INVALIDDATA;
goto fail;
}
mxf_handle_small_eubc(s);
return 0;
fail:
mxf_read_close(s);
return ret;
}
/**
* Sets mxf->current_edit_unit based on what offset we're currently at.
* @return next_ofs if OK, <0 on error
*/
static int64_t mxf_set_current_edit_unit(MXFContext *mxf, int64_t current_offset)
{
int64_t last_ofs = -1, next_ofs = -1;
MXFIndexTable *t = &mxf->index_tables[0];
/* this is called from the OP1a demuxing logic, which means there
* may be no index tables */
if (mxf->nb_index_tables <= 0)
return -1;
/* find mxf->current_edit_unit so that the next edit unit starts ahead of current_offset */
while (mxf->current_edit_unit >= 0) {
if (mxf_edit_unit_absolute_offset(mxf, t, mxf->current_edit_unit + 1, NULL, &next_ofs, 0) < 0)
return -1;
if (next_ofs <= last_ofs) {
/* large next_ofs didn't change or current_edit_unit wrapped
* around this fixes the infinite loop on zzuf3.mxf */
av_log(mxf->fc, AV_LOG_ERROR,
"next_ofs didn't change. not deriving packet timestamps\n");
return -1;
}
if (next_ofs > current_offset)
break;
last_ofs = next_ofs;
mxf->current_edit_unit++;
}
/* not checking mxf->current_edit_unit >= t->nb_ptses here since CBR files may lack IndexEntryArrays */
if (mxf->current_edit_unit < 0)
return -1;
return next_ofs;
}
static int mxf_compute_sample_count(MXFContext *mxf, int stream_index,
uint64_t *sample_count)
{
int i, total = 0, size = 0;
AVStream *st = mxf->fc->streams[stream_index];
MXFTrack *track = st->priv_data;
AVRational time_base = av_inv_q(track->edit_rate);
AVRational sample_rate = av_inv_q(st->time_base);
const MXFSamplesPerFrame *spf = NULL;
if ((sample_rate.num / sample_rate.den) == 48000)
spf = ff_mxf_get_samples_per_frame(mxf->fc, time_base);
if (!spf) {
int remainder = (sample_rate.num * time_base.num) %
(time_base.den * sample_rate.den);
*sample_count = av_q2d(av_mul_q((AVRational){mxf->current_edit_unit, 1},
av_mul_q(sample_rate, time_base)));
if (remainder)
av_log(mxf->fc, AV_LOG_WARNING,
"seeking detected on stream #%d with time base (%d/%d) and "
"sample rate (%d/%d), audio pts won't be accurate.\n",
stream_index, time_base.num, time_base.den,
sample_rate.num, sample_rate.den);
return 0;
}
while (spf->samples_per_frame[size]) {
total += spf->samples_per_frame[size];
size++;
}
av_assert2(size);
*sample_count = (mxf->current_edit_unit / size) * (uint64_t)total;
for (i = 0; i < mxf->current_edit_unit % size; i++) {
*sample_count += spf->samples_per_frame[i];
}
return 0;
}
static int mxf_set_audio_pts(MXFContext *mxf, AVCodecParameters *par,
AVPacket *pkt)
{
MXFTrack *track = mxf->fc->streams[pkt->stream_index]->priv_data;
int64_t bits_per_sample = par->bits_per_coded_sample;
if (!bits_per_sample)
bits_per_sample = av_get_bits_per_sample(par->codec_id);
pkt->pts = track->sample_count;
if ( par->channels <= 0
|| bits_per_sample <= 0
|| par->channels * (int64_t)bits_per_sample < 8)
return AVERROR(EINVAL);
track->sample_count += pkt->size / (par->channels * (int64_t)bits_per_sample / 8);
return 0;
}
static int mxf_read_packet_old(AVFormatContext *s, AVPacket *pkt)
{
KLVPacket klv;
MXFContext *mxf = s->priv_data;
int ret;
while ((ret = klv_read_packet(&klv, s->pb)) == 0) {
PRINT_KEY(s, "read packet", klv.key);
av_log(s, AV_LOG_TRACE, "size %"PRIu64" offset %#"PRIx64"\n", klv.length, klv.offset);
if (IS_KLV_KEY(klv.key, mxf_encrypted_triplet_key)) {
ret = mxf_decrypt_triplet(s, pkt, &klv);
if (ret < 0) {
av_log(s, AV_LOG_ERROR, "invalid encoded triplet\n");
return ret;
}
return 0;
}
if (IS_KLV_KEY(klv.key, mxf_essence_element_key) ||
IS_KLV_KEY(klv.key, mxf_canopus_essence_element_key) ||
IS_KLV_KEY(klv.key, mxf_avid_essence_element_key)) {
int index = mxf_get_stream_index(s, &klv);
int64_t next_ofs, next_klv;
AVStream *st;
MXFTrack *track;
AVCodecParameters *par;
if (index < 0) {
av_log(s, AV_LOG_ERROR,
"error getting stream index %"PRIu32"\n",
AV_RB32(klv.key + 12));
goto skip;
}
st = s->streams[index];
track = st->priv_data;
if (s->streams[index]->discard == AVDISCARD_ALL)
goto skip;
next_klv = avio_tell(s->pb) + klv.length;
next_ofs = mxf_set_current_edit_unit(mxf, klv.offset);
if (next_ofs >= 0 && next_klv > next_ofs) {
/* if this check is hit then it's possible OPAtom was treated as OP1a
* truncate the packet since it's probably very large (>2 GiB is common) */
avpriv_request_sample(s,
"OPAtom misinterpreted as OP1a? "
"KLV for edit unit %i extending into "
"next edit unit",
mxf->current_edit_unit);
klv.length = next_ofs - avio_tell(s->pb);
}
/* check for 8 channels AES3 element */
if (klv.key[12] == 0x06 && klv.key[13] == 0x01 && klv.key[14] == 0x10) {
ret = mxf_get_d10_aes3_packet(s->pb, s->streams[index],
pkt, klv.length);
if (ret < 0) {
av_log(s, AV_LOG_ERROR, "error reading D-10 aes3 frame\n");
return ret;
}
} else {
ret = av_get_packet(s->pb, pkt, klv.length);
if (ret < 0)
return ret;
}
pkt->stream_index = index;
pkt->pos = klv.offset;
par = st->codecpar;
if (par->codec_type == AVMEDIA_TYPE_VIDEO && next_ofs >= 0) {
/* mxf->current_edit_unit good - see if we have an
* index table to derive timestamps from */
MXFIndexTable *t = &mxf->index_tables[0];
if (mxf->nb_index_tables >= 1 && mxf->current_edit_unit < t->nb_ptses) {
pkt->dts = mxf->current_edit_unit + t->first_dts;
pkt->pts = t->ptses[mxf->current_edit_unit];
} else if (track && track->intra_only) {
/* intra-only -> PTS = EditUnit.
* let utils.c figure out DTS since it can be < PTS if low_delay = 0 (Sony IMX30) */
pkt->pts = mxf->current_edit_unit;
}
} else if (par->codec_type == AVMEDIA_TYPE_AUDIO) {
ret = mxf_set_audio_pts(mxf, par, pkt);
if (ret < 0)
return ret;
}
/* seek for truncated packets */
avio_seek(s->pb, next_klv, SEEK_SET);
return 0;
} else
skip:
avio_skip(s->pb, klv.length);
}
return avio_feof(s->pb) ? AVERROR_EOF : ret;
}
static int mxf_read_packet(AVFormatContext *s, AVPacket *pkt)
{
MXFContext *mxf = s->priv_data;
int ret, size;
int64_t ret64, pos, next_pos;
AVStream *st;
MXFIndexTable *t;
int edit_units;
if (mxf->op != OPAtom)
return mxf_read_packet_old(s, pkt);
// If we have no streams then we basically are at EOF
st = mxf_get_opatom_stream(mxf);
if (!st)
return AVERROR_EOF;
/* OPAtom - clip wrapped demuxing */
/* NOTE: mxf_read_header() makes sure nb_index_tables > 0 for OPAtom */
t = &mxf->index_tables[0];
if (mxf->current_edit_unit >= st->duration)
return AVERROR_EOF;
edit_units = FFMIN(mxf->edit_units_per_packet, st->duration - mxf->current_edit_unit);
if ((ret = mxf_edit_unit_absolute_offset(mxf, t, mxf->current_edit_unit, NULL, &pos, 1)) < 0)
return ret;
/* compute size by finding the next edit unit or the end of the essence container
* not pretty, but it works */
if ((ret = mxf_edit_unit_absolute_offset(mxf, t, mxf->current_edit_unit + edit_units, NULL, &next_pos, 0)) < 0 &&
(next_pos = mxf_essence_container_end(mxf, t->body_sid)) <= 0) {
av_log(s, AV_LOG_ERROR, "unable to compute the size of the last packet\n");
return AVERROR_INVALIDDATA;
}
if ((size = next_pos - pos) <= 0) {
av_log(s, AV_LOG_ERROR, "bad size: %i\n", size);
return AVERROR_INVALIDDATA;
}
if ((ret64 = avio_seek(s->pb, pos, SEEK_SET)) < 0)
return ret64;
if ((size = av_get_packet(s->pb, pkt, size)) < 0)
return size;
pkt->stream_index = st->index;
if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && t->ptses &&
mxf->current_edit_unit >= 0 && mxf->current_edit_unit < t->nb_ptses) {
pkt->dts = mxf->current_edit_unit + t->first_dts;
pkt->pts = t->ptses[mxf->current_edit_unit];
} else if (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
int ret = mxf_set_audio_pts(mxf, st->codecpar, pkt);
if (ret < 0)
return ret;
}
mxf->current_edit_unit += edit_units;
return 0;
}
static int mxf_read_close(AVFormatContext *s)
{
MXFContext *mxf = s->priv_data;
int i;
av_freep(&mxf->packages_refs);
for (i = 0; i < s->nb_streams; i++)
s->streams[i]->priv_data = NULL;
for (i = 0; i < mxf->metadata_sets_count; i++) {
mxf_free_metadataset(mxf->metadata_sets + i, 1);
}
av_freep(&mxf->partitions);
av_freep(&mxf->metadata_sets);
av_freep(&mxf->aesc);
av_freep(&mxf->local_tags);
if (mxf->index_tables) {
for (i = 0; i < mxf->nb_index_tables; i++) {
av_freep(&mxf->index_tables[i].segments);
av_freep(&mxf->index_tables[i].ptses);
av_freep(&mxf->index_tables[i].fake_index);
av_freep(&mxf->index_tables[i].offsets);
}
}
av_freep(&mxf->index_tables);
return 0;
}
static int mxf_probe(AVProbeData *p) {
const uint8_t *bufp = p->buf;
const uint8_t *end = p->buf + p->buf_size;
if (p->buf_size < sizeof(mxf_header_partition_pack_key))
return 0;
/* Must skip Run-In Sequence and search for MXF header partition pack key SMPTE 377M 5.5 */
end -= sizeof(mxf_header_partition_pack_key);
for (; bufp < end;) {
if (!((bufp[13] - 1) & 0xF2)){
if (AV_RN32(bufp ) == AV_RN32(mxf_header_partition_pack_key ) &&
AV_RN32(bufp+ 4) == AV_RN32(mxf_header_partition_pack_key+ 4) &&
AV_RN32(bufp+ 8) == AV_RN32(mxf_header_partition_pack_key+ 8) &&
AV_RN16(bufp+12) == AV_RN16(mxf_header_partition_pack_key+12))
return AVPROBE_SCORE_MAX;
bufp ++;
} else
bufp += 10;
}
return 0;
}
/* rudimentary byte seek */
/* XXX: use MXF Index */
static int mxf_read_seek(AVFormatContext *s, int stream_index, int64_t sample_time, int flags)
{
AVStream *st = s->streams[stream_index];
int64_t seconds;
MXFContext* mxf = s->priv_data;
int64_t seekpos;
int i, ret;
MXFIndexTable *t;
MXFTrack *source_track = st->priv_data;
if(st->codecpar->codec_type == AVMEDIA_TYPE_DATA)
return 0;
/* if audio then truncate sample_time to EditRate */
if (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO)
sample_time = av_rescale_q(sample_time, st->time_base,
av_inv_q(source_track->edit_rate));
if (mxf->nb_index_tables <= 0) {
if (!s->bit_rate)
return AVERROR_INVALIDDATA;
if (sample_time < 0)
sample_time = 0;
seconds = av_rescale(sample_time, st->time_base.num, st->time_base.den);
seekpos = avio_seek(s->pb, (s->bit_rate * seconds) >> 3, SEEK_SET);
if (seekpos < 0)
return seekpos;
ff_update_cur_dts(s, st, sample_time);
mxf->current_edit_unit = sample_time;
} else {
t = &mxf->index_tables[0];
/* clamp above zero, else ff_index_search_timestamp() returns negative
* this also means we allow seeking before the start */
sample_time = FFMAX(sample_time, 0);
if (t->fake_index) {
/* The first frames may not be keyframes in presentation order, so
* we have to advance the target to be able to find the first
* keyframe backwards... */
if (!(flags & AVSEEK_FLAG_ANY) &&
(flags & AVSEEK_FLAG_BACKWARD) &&
t->ptses[0] != AV_NOPTS_VALUE &&
sample_time < t->ptses[0] &&
(t->fake_index[t->ptses[0]].flags & AVINDEX_KEYFRAME))
sample_time = t->ptses[0];
/* behave as if we have a proper index */
if ((sample_time = ff_index_search_timestamp(t->fake_index, t->nb_ptses, sample_time, flags)) < 0)
return sample_time;
/* get the stored order index from the display order index */
sample_time += t->offsets[sample_time];
} else {
/* no IndexEntryArray (one or more CBR segments)
* make sure we don't seek past the end */
sample_time = FFMIN(sample_time, source_track->original_duration - 1);
}
if ((ret = mxf_edit_unit_absolute_offset(mxf, t, sample_time, &sample_time, &seekpos, 1)) < 0)
return ret;
ff_update_cur_dts(s, st, sample_time);
mxf->current_edit_unit = sample_time;
avio_seek(s->pb, seekpos, SEEK_SET);
}
// Update all tracks sample count
for (i = 0; i < s->nb_streams; i++) {
AVStream *cur_st = s->streams[i];
MXFTrack *cur_track = cur_st->priv_data;
uint64_t current_sample_count = 0;
if (cur_st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
ret = mxf_compute_sample_count(mxf, i, ¤t_sample_count);
if (ret < 0)
return ret;
cur_track->sample_count = current_sample_count;
}
}
return 0;
}
AVInputFormat ff_mxf_demuxer = {
.name = "mxf",
.long_name = NULL_IF_CONFIG_SMALL("MXF (Material eXchange Format)"),
.flags = AVFMT_SEEK_TO_PTS,
.priv_data_size = sizeof(MXFContext),
.read_probe = mxf_probe,
.read_header = mxf_read_header,
.read_packet = mxf_read_packet,
.read_close = mxf_read_close,
.read_seek = mxf_read_seek,
};
| ./CrossVul/dataset_final_sorted/CWE-20/c/good_2778_0 |
crossvul-cpp_data_good_5845_5 | /*
* DDP: An implementation of the AppleTalk DDP protocol for
* Ethernet 'ELAP'.
*
* Alan Cox <alan@lxorguk.ukuu.org.uk>
*
* With more than a little assistance from
*
* Wesley Craig <netatalk@umich.edu>
*
* Fixes:
* Neil Horman : Added missing device ioctls
* Michael Callahan : Made routing work
* Wesley Craig : Fix probing to listen to a
* passed node id.
* Alan Cox : Added send/recvmsg support
* Alan Cox : Moved at. to protinfo in
* socket.
* Alan Cox : Added firewall hooks.
* Alan Cox : Supports new ARPHRD_LOOPBACK
* Christer Weinigel : Routing and /proc fixes.
* Bradford Johnson : LocalTalk.
* Tom Dyas : Module support.
* Alan Cox : Hooks for PPP (based on the
* LocalTalk hook).
* Alan Cox : Posix bits
* Alan Cox/Mike Freeman : Possible fix to NBP problems
* Bradford Johnson : IP-over-DDP (experimental)
* Jay Schulist : Moved IP-over-DDP to its own
* driver file. (ipddp.c & ipddp.h)
* Jay Schulist : Made work as module with
* AppleTalk drivers, cleaned it.
* Rob Newberry : Added proxy AARP and AARP
* procfs, moved probing to AARP
* module.
* Adrian Sun/
* Michael Zuelsdorff : fix for net.0 packets. don't
* allow illegal ether/tokentalk
* port assignment. we lose a
* valid localtalk port as a
* result.
* Arnaldo C. de Melo : Cleanup, in preparation for
* shared skb support 8)
* Arnaldo C. de Melo : Move proc stuff to atalk_proc.c,
* use seq_file
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
*/
#include <linux/capability.h>
#include <linux/module.h>
#include <linux/if_arp.h>
#include <linux/termios.h> /* For TIOCOUTQ/INQ */
#include <linux/compat.h>
#include <linux/slab.h>
#include <net/datalink.h>
#include <net/psnap.h>
#include <net/sock.h>
#include <net/tcp_states.h>
#include <net/route.h>
#include <linux/atalk.h>
#include <linux/highmem.h>
struct datalink_proto *ddp_dl, *aarp_dl;
static const struct proto_ops atalk_dgram_ops;
/**************************************************************************\
* *
* Handlers for the socket list. *
* *
\**************************************************************************/
HLIST_HEAD(atalk_sockets);
DEFINE_RWLOCK(atalk_sockets_lock);
static inline void __atalk_insert_socket(struct sock *sk)
{
sk_add_node(sk, &atalk_sockets);
}
static inline void atalk_remove_socket(struct sock *sk)
{
write_lock_bh(&atalk_sockets_lock);
sk_del_node_init(sk);
write_unlock_bh(&atalk_sockets_lock);
}
static struct sock *atalk_search_socket(struct sockaddr_at *to,
struct atalk_iface *atif)
{
struct sock *s;
read_lock_bh(&atalk_sockets_lock);
sk_for_each(s, &atalk_sockets) {
struct atalk_sock *at = at_sk(s);
if (to->sat_port != at->src_port)
continue;
if (to->sat_addr.s_net == ATADDR_ANYNET &&
to->sat_addr.s_node == ATADDR_BCAST)
goto found;
if (to->sat_addr.s_net == at->src_net &&
(to->sat_addr.s_node == at->src_node ||
to->sat_addr.s_node == ATADDR_BCAST ||
to->sat_addr.s_node == ATADDR_ANYNODE))
goto found;
/* XXXX.0 -- we got a request for this router. make sure
* that the node is appropriately set. */
if (to->sat_addr.s_node == ATADDR_ANYNODE &&
to->sat_addr.s_net != ATADDR_ANYNET &&
atif->address.s_node == at->src_node) {
to->sat_addr.s_node = atif->address.s_node;
goto found;
}
}
s = NULL;
found:
read_unlock_bh(&atalk_sockets_lock);
return s;
}
/**
* atalk_find_or_insert_socket - Try to find a socket matching ADDR
* @sk: socket to insert in the list if it is not there already
* @sat: address to search for
*
* Try to find a socket matching ADDR in the socket list, if found then return
* it. If not, insert SK into the socket list.
*
* This entire operation must execute atomically.
*/
static struct sock *atalk_find_or_insert_socket(struct sock *sk,
struct sockaddr_at *sat)
{
struct sock *s;
struct atalk_sock *at;
write_lock_bh(&atalk_sockets_lock);
sk_for_each(s, &atalk_sockets) {
at = at_sk(s);
if (at->src_net == sat->sat_addr.s_net &&
at->src_node == sat->sat_addr.s_node &&
at->src_port == sat->sat_port)
goto found;
}
s = NULL;
__atalk_insert_socket(sk); /* Wheee, it's free, assign and insert. */
found:
write_unlock_bh(&atalk_sockets_lock);
return s;
}
static void atalk_destroy_timer(unsigned long data)
{
struct sock *sk = (struct sock *)data;
if (sk_has_allocations(sk)) {
sk->sk_timer.expires = jiffies + SOCK_DESTROY_TIME;
add_timer(&sk->sk_timer);
} else
sock_put(sk);
}
static inline void atalk_destroy_socket(struct sock *sk)
{
atalk_remove_socket(sk);
skb_queue_purge(&sk->sk_receive_queue);
if (sk_has_allocations(sk)) {
setup_timer(&sk->sk_timer, atalk_destroy_timer,
(unsigned long)sk);
sk->sk_timer.expires = jiffies + SOCK_DESTROY_TIME;
add_timer(&sk->sk_timer);
} else
sock_put(sk);
}
/**************************************************************************\
* *
* Routing tables for the AppleTalk socket layer. *
* *
\**************************************************************************/
/* Anti-deadlock ordering is atalk_routes_lock --> iface_lock -DaveM */
struct atalk_route *atalk_routes;
DEFINE_RWLOCK(atalk_routes_lock);
struct atalk_iface *atalk_interfaces;
DEFINE_RWLOCK(atalk_interfaces_lock);
/* For probing devices or in a routerless network */
struct atalk_route atrtr_default;
/* AppleTalk interface control */
/*
* Drop a device. Doesn't drop any of its routes - that is the caller's
* problem. Called when we down the interface or delete the address.
*/
static void atif_drop_device(struct net_device *dev)
{
struct atalk_iface **iface = &atalk_interfaces;
struct atalk_iface *tmp;
write_lock_bh(&atalk_interfaces_lock);
while ((tmp = *iface) != NULL) {
if (tmp->dev == dev) {
*iface = tmp->next;
dev_put(dev);
kfree(tmp);
dev->atalk_ptr = NULL;
} else
iface = &tmp->next;
}
write_unlock_bh(&atalk_interfaces_lock);
}
static struct atalk_iface *atif_add_device(struct net_device *dev,
struct atalk_addr *sa)
{
struct atalk_iface *iface = kzalloc(sizeof(*iface), GFP_KERNEL);
if (!iface)
goto out;
dev_hold(dev);
iface->dev = dev;
dev->atalk_ptr = iface;
iface->address = *sa;
iface->status = 0;
write_lock_bh(&atalk_interfaces_lock);
iface->next = atalk_interfaces;
atalk_interfaces = iface;
write_unlock_bh(&atalk_interfaces_lock);
out:
return iface;
}
/* Perform phase 2 AARP probing on our tentative address */
static int atif_probe_device(struct atalk_iface *atif)
{
int netrange = ntohs(atif->nets.nr_lastnet) -
ntohs(atif->nets.nr_firstnet) + 1;
int probe_net = ntohs(atif->address.s_net);
int probe_node = atif->address.s_node;
int netct, nodect;
/* Offset the network we start probing with */
if (probe_net == ATADDR_ANYNET) {
probe_net = ntohs(atif->nets.nr_firstnet);
if (netrange)
probe_net += jiffies % netrange;
}
if (probe_node == ATADDR_ANYNODE)
probe_node = jiffies & 0xFF;
/* Scan the networks */
atif->status |= ATIF_PROBE;
for (netct = 0; netct <= netrange; netct++) {
/* Sweep the available nodes from a given start */
atif->address.s_net = htons(probe_net);
for (nodect = 0; nodect < 256; nodect++) {
atif->address.s_node = (nodect + probe_node) & 0xFF;
if (atif->address.s_node > 0 &&
atif->address.s_node < 254) {
/* Probe a proposed address */
aarp_probe_network(atif);
if (!(atif->status & ATIF_PROBE_FAIL)) {
atif->status &= ~ATIF_PROBE;
return 0;
}
}
atif->status &= ~ATIF_PROBE_FAIL;
}
probe_net++;
if (probe_net > ntohs(atif->nets.nr_lastnet))
probe_net = ntohs(atif->nets.nr_firstnet);
}
atif->status &= ~ATIF_PROBE;
return -EADDRINUSE; /* Network is full... */
}
/* Perform AARP probing for a proxy address */
static int atif_proxy_probe_device(struct atalk_iface *atif,
struct atalk_addr* proxy_addr)
{
int netrange = ntohs(atif->nets.nr_lastnet) -
ntohs(atif->nets.nr_firstnet) + 1;
/* we probe the interface's network */
int probe_net = ntohs(atif->address.s_net);
int probe_node = ATADDR_ANYNODE; /* we'll take anything */
int netct, nodect;
/* Offset the network we start probing with */
if (probe_net == ATADDR_ANYNET) {
probe_net = ntohs(atif->nets.nr_firstnet);
if (netrange)
probe_net += jiffies % netrange;
}
if (probe_node == ATADDR_ANYNODE)
probe_node = jiffies & 0xFF;
/* Scan the networks */
for (netct = 0; netct <= netrange; netct++) {
/* Sweep the available nodes from a given start */
proxy_addr->s_net = htons(probe_net);
for (nodect = 0; nodect < 256; nodect++) {
proxy_addr->s_node = (nodect + probe_node) & 0xFF;
if (proxy_addr->s_node > 0 &&
proxy_addr->s_node < 254) {
/* Tell AARP to probe a proposed address */
int ret = aarp_proxy_probe_network(atif,
proxy_addr);
if (ret != -EADDRINUSE)
return ret;
}
}
probe_net++;
if (probe_net > ntohs(atif->nets.nr_lastnet))
probe_net = ntohs(atif->nets.nr_firstnet);
}
return -EADDRINUSE; /* Network is full... */
}
struct atalk_addr *atalk_find_dev_addr(struct net_device *dev)
{
struct atalk_iface *iface = dev->atalk_ptr;
return iface ? &iface->address : NULL;
}
static struct atalk_addr *atalk_find_primary(void)
{
struct atalk_iface *fiface = NULL;
struct atalk_addr *retval;
struct atalk_iface *iface;
/*
* Return a point-to-point interface only if
* there is no non-ptp interface available.
*/
read_lock_bh(&atalk_interfaces_lock);
for (iface = atalk_interfaces; iface; iface = iface->next) {
if (!fiface && !(iface->dev->flags & IFF_LOOPBACK))
fiface = iface;
if (!(iface->dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT))) {
retval = &iface->address;
goto out;
}
}
if (fiface)
retval = &fiface->address;
else if (atalk_interfaces)
retval = &atalk_interfaces->address;
else
retval = NULL;
out:
read_unlock_bh(&atalk_interfaces_lock);
return retval;
}
/*
* Find a match for 'any network' - ie any of our interfaces with that
* node number will do just nicely.
*/
static struct atalk_iface *atalk_find_anynet(int node, struct net_device *dev)
{
struct atalk_iface *iface = dev->atalk_ptr;
if (!iface || iface->status & ATIF_PROBE)
goto out_err;
if (node != ATADDR_BCAST &&
iface->address.s_node != node &&
node != ATADDR_ANYNODE)
goto out_err;
out:
return iface;
out_err:
iface = NULL;
goto out;
}
/* Find a match for a specific network:node pair */
static struct atalk_iface *atalk_find_interface(__be16 net, int node)
{
struct atalk_iface *iface;
read_lock_bh(&atalk_interfaces_lock);
for (iface = atalk_interfaces; iface; iface = iface->next) {
if ((node == ATADDR_BCAST ||
node == ATADDR_ANYNODE ||
iface->address.s_node == node) &&
iface->address.s_net == net &&
!(iface->status & ATIF_PROBE))
break;
/* XXXX.0 -- net.0 returns the iface associated with net */
if (node == ATADDR_ANYNODE && net != ATADDR_ANYNET &&
ntohs(iface->nets.nr_firstnet) <= ntohs(net) &&
ntohs(net) <= ntohs(iface->nets.nr_lastnet))
break;
}
read_unlock_bh(&atalk_interfaces_lock);
return iface;
}
/*
* Find a route for an AppleTalk packet. This ought to get cached in
* the socket (later on...). We know about host routes and the fact
* that a route must be direct to broadcast.
*/
static struct atalk_route *atrtr_find(struct atalk_addr *target)
{
/*
* we must search through all routes unless we find a
* host route, because some host routes might overlap
* network routes
*/
struct atalk_route *net_route = NULL;
struct atalk_route *r;
read_lock_bh(&atalk_routes_lock);
for (r = atalk_routes; r; r = r->next) {
if (!(r->flags & RTF_UP))
continue;
if (r->target.s_net == target->s_net) {
if (r->flags & RTF_HOST) {
/*
* if this host route is for the target,
* the we're done
*/
if (r->target.s_node == target->s_node)
goto out;
} else
/*
* this route will work if there isn't a
* direct host route, so cache it
*/
net_route = r;
}
}
/*
* if we found a network route but not a direct host
* route, then return it
*/
if (net_route)
r = net_route;
else if (atrtr_default.dev)
r = &atrtr_default;
else /* No route can be found */
r = NULL;
out:
read_unlock_bh(&atalk_routes_lock);
return r;
}
/*
* Given an AppleTalk network, find the device to use. This can be
* a simple lookup.
*/
struct net_device *atrtr_get_dev(struct atalk_addr *sa)
{
struct atalk_route *atr = atrtr_find(sa);
return atr ? atr->dev : NULL;
}
/* Set up a default router */
static void atrtr_set_default(struct net_device *dev)
{
atrtr_default.dev = dev;
atrtr_default.flags = RTF_UP;
atrtr_default.gateway.s_net = htons(0);
atrtr_default.gateway.s_node = 0;
}
/*
* Add a router. Basically make sure it looks valid and stuff the
* entry in the list. While it uses netranges we always set them to one
* entry to work like netatalk.
*/
static int atrtr_create(struct rtentry *r, struct net_device *devhint)
{
struct sockaddr_at *ta = (struct sockaddr_at *)&r->rt_dst;
struct sockaddr_at *ga = (struct sockaddr_at *)&r->rt_gateway;
struct atalk_route *rt;
struct atalk_iface *iface, *riface;
int retval = -EINVAL;
/*
* Fixme: Raise/Lower a routing change semaphore for these
* operations.
*/
/* Validate the request */
if (ta->sat_family != AF_APPLETALK ||
(!devhint && ga->sat_family != AF_APPLETALK))
goto out;
/* Now walk the routing table and make our decisions */
write_lock_bh(&atalk_routes_lock);
for (rt = atalk_routes; rt; rt = rt->next) {
if (r->rt_flags != rt->flags)
continue;
if (ta->sat_addr.s_net == rt->target.s_net) {
if (!(rt->flags & RTF_HOST))
break;
if (ta->sat_addr.s_node == rt->target.s_node)
break;
}
}
if (!devhint) {
riface = NULL;
read_lock_bh(&atalk_interfaces_lock);
for (iface = atalk_interfaces; iface; iface = iface->next) {
if (!riface &&
ntohs(ga->sat_addr.s_net) >=
ntohs(iface->nets.nr_firstnet) &&
ntohs(ga->sat_addr.s_net) <=
ntohs(iface->nets.nr_lastnet))
riface = iface;
if (ga->sat_addr.s_net == iface->address.s_net &&
ga->sat_addr.s_node == iface->address.s_node)
riface = iface;
}
read_unlock_bh(&atalk_interfaces_lock);
retval = -ENETUNREACH;
if (!riface)
goto out_unlock;
devhint = riface->dev;
}
if (!rt) {
rt = kzalloc(sizeof(*rt), GFP_ATOMIC);
retval = -ENOBUFS;
if (!rt)
goto out_unlock;
rt->next = atalk_routes;
atalk_routes = rt;
}
/* Fill in the routing entry */
rt->target = ta->sat_addr;
dev_hold(devhint);
rt->dev = devhint;
rt->flags = r->rt_flags;
rt->gateway = ga->sat_addr;
retval = 0;
out_unlock:
write_unlock_bh(&atalk_routes_lock);
out:
return retval;
}
/* Delete a route. Find it and discard it */
static int atrtr_delete(struct atalk_addr * addr)
{
struct atalk_route **r = &atalk_routes;
int retval = 0;
struct atalk_route *tmp;
write_lock_bh(&atalk_routes_lock);
while ((tmp = *r) != NULL) {
if (tmp->target.s_net == addr->s_net &&
(!(tmp->flags&RTF_GATEWAY) ||
tmp->target.s_node == addr->s_node)) {
*r = tmp->next;
dev_put(tmp->dev);
kfree(tmp);
goto out;
}
r = &tmp->next;
}
retval = -ENOENT;
out:
write_unlock_bh(&atalk_routes_lock);
return retval;
}
/*
* Called when a device is downed. Just throw away any routes
* via it.
*/
static void atrtr_device_down(struct net_device *dev)
{
struct atalk_route **r = &atalk_routes;
struct atalk_route *tmp;
write_lock_bh(&atalk_routes_lock);
while ((tmp = *r) != NULL) {
if (tmp->dev == dev) {
*r = tmp->next;
dev_put(dev);
kfree(tmp);
} else
r = &tmp->next;
}
write_unlock_bh(&atalk_routes_lock);
if (atrtr_default.dev == dev)
atrtr_set_default(NULL);
}
/* Actually down the interface */
static inline void atalk_dev_down(struct net_device *dev)
{
atrtr_device_down(dev); /* Remove all routes for the device */
aarp_device_down(dev); /* Remove AARP entries for the device */
atif_drop_device(dev); /* Remove the device */
}
/*
* A device event has occurred. Watch for devices going down and
* delete our use of them (iface and route).
*/
static int ddp_device_event(struct notifier_block *this, unsigned long event,
void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
if (!net_eq(dev_net(dev), &init_net))
return NOTIFY_DONE;
if (event == NETDEV_DOWN)
/* Discard any use of this */
atalk_dev_down(dev);
return NOTIFY_DONE;
}
/* ioctl calls. Shouldn't even need touching */
/* Device configuration ioctl calls */
static int atif_ioctl(int cmd, void __user *arg)
{
static char aarp_mcast[6] = { 0x09, 0x00, 0x00, 0xFF, 0xFF, 0xFF };
struct ifreq atreq;
struct atalk_netrange *nr;
struct sockaddr_at *sa;
struct net_device *dev;
struct atalk_iface *atif;
int ct;
int limit;
struct rtentry rtdef;
int add_route;
if (copy_from_user(&atreq, arg, sizeof(atreq)))
return -EFAULT;
dev = __dev_get_by_name(&init_net, atreq.ifr_name);
if (!dev)
return -ENODEV;
sa = (struct sockaddr_at *)&atreq.ifr_addr;
atif = atalk_find_dev(dev);
switch (cmd) {
case SIOCSIFADDR:
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (sa->sat_family != AF_APPLETALK)
return -EINVAL;
if (dev->type != ARPHRD_ETHER &&
dev->type != ARPHRD_LOOPBACK &&
dev->type != ARPHRD_LOCALTLK &&
dev->type != ARPHRD_PPP)
return -EPROTONOSUPPORT;
nr = (struct atalk_netrange *)&sa->sat_zero[0];
add_route = 1;
/*
* if this is a point-to-point iface, and we already
* have an iface for this AppleTalk address, then we
* should not add a route
*/
if ((dev->flags & IFF_POINTOPOINT) &&
atalk_find_interface(sa->sat_addr.s_net,
sa->sat_addr.s_node)) {
printk(KERN_DEBUG "AppleTalk: point-to-point "
"interface added with "
"existing address\n");
add_route = 0;
}
/*
* Phase 1 is fine on LocalTalk but we don't do
* EtherTalk phase 1. Anyone wanting to add it go ahead.
*/
if (dev->type == ARPHRD_ETHER && nr->nr_phase != 2)
return -EPROTONOSUPPORT;
if (sa->sat_addr.s_node == ATADDR_BCAST ||
sa->sat_addr.s_node == 254)
return -EINVAL;
if (atif) {
/* Already setting address */
if (atif->status & ATIF_PROBE)
return -EBUSY;
atif->address.s_net = sa->sat_addr.s_net;
atif->address.s_node = sa->sat_addr.s_node;
atrtr_device_down(dev); /* Flush old routes */
} else {
atif = atif_add_device(dev, &sa->sat_addr);
if (!atif)
return -ENOMEM;
}
atif->nets = *nr;
/*
* Check if the chosen address is used. If so we
* error and atalkd will try another.
*/
if (!(dev->flags & IFF_LOOPBACK) &&
!(dev->flags & IFF_POINTOPOINT) &&
atif_probe_device(atif) < 0) {
atif_drop_device(dev);
return -EADDRINUSE;
}
/* Hey it worked - add the direct routes */
sa = (struct sockaddr_at *)&rtdef.rt_gateway;
sa->sat_family = AF_APPLETALK;
sa->sat_addr.s_net = atif->address.s_net;
sa->sat_addr.s_node = atif->address.s_node;
sa = (struct sockaddr_at *)&rtdef.rt_dst;
rtdef.rt_flags = RTF_UP;
sa->sat_family = AF_APPLETALK;
sa->sat_addr.s_node = ATADDR_ANYNODE;
if (dev->flags & IFF_LOOPBACK ||
dev->flags & IFF_POINTOPOINT)
rtdef.rt_flags |= RTF_HOST;
/* Routerless initial state */
if (nr->nr_firstnet == htons(0) &&
nr->nr_lastnet == htons(0xFFFE)) {
sa->sat_addr.s_net = atif->address.s_net;
atrtr_create(&rtdef, dev);
atrtr_set_default(dev);
} else {
limit = ntohs(nr->nr_lastnet);
if (limit - ntohs(nr->nr_firstnet) > 4096) {
printk(KERN_WARNING "Too many routes/"
"iface.\n");
return -EINVAL;
}
if (add_route)
for (ct = ntohs(nr->nr_firstnet);
ct <= limit; ct++) {
sa->sat_addr.s_net = htons(ct);
atrtr_create(&rtdef, dev);
}
}
dev_mc_add_global(dev, aarp_mcast);
return 0;
case SIOCGIFADDR:
if (!atif)
return -EADDRNOTAVAIL;
sa->sat_family = AF_APPLETALK;
sa->sat_addr = atif->address;
break;
case SIOCGIFBRDADDR:
if (!atif)
return -EADDRNOTAVAIL;
sa->sat_family = AF_APPLETALK;
sa->sat_addr.s_net = atif->address.s_net;
sa->sat_addr.s_node = ATADDR_BCAST;
break;
case SIOCATALKDIFADDR:
case SIOCDIFADDR:
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (sa->sat_family != AF_APPLETALK)
return -EINVAL;
atalk_dev_down(dev);
break;
case SIOCSARP:
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (sa->sat_family != AF_APPLETALK)
return -EINVAL;
/*
* for now, we only support proxy AARP on ELAP;
* we should be able to do it for LocalTalk, too.
*/
if (dev->type != ARPHRD_ETHER)
return -EPROTONOSUPPORT;
/*
* atif points to the current interface on this network;
* we aren't concerned about its current status (at
* least for now), but it has all the settings about
* the network we're going to probe. Consequently, it
* must exist.
*/
if (!atif)
return -EADDRNOTAVAIL;
nr = (struct atalk_netrange *)&(atif->nets);
/*
* Phase 1 is fine on Localtalk but we don't do
* Ethertalk phase 1. Anyone wanting to add it go ahead.
*/
if (dev->type == ARPHRD_ETHER && nr->nr_phase != 2)
return -EPROTONOSUPPORT;
if (sa->sat_addr.s_node == ATADDR_BCAST ||
sa->sat_addr.s_node == 254)
return -EINVAL;
/*
* Check if the chosen address is used. If so we
* error and ATCP will try another.
*/
if (atif_proxy_probe_device(atif, &(sa->sat_addr)) < 0)
return -EADDRINUSE;
/*
* We now have an address on the local network, and
* the AARP code will defend it for us until we take it
* down. We don't set up any routes right now, because
* ATCP will install them manually via SIOCADDRT.
*/
break;
case SIOCDARP:
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (sa->sat_family != AF_APPLETALK)
return -EINVAL;
if (!atif)
return -EADDRNOTAVAIL;
/* give to aarp module to remove proxy entry */
aarp_proxy_remove(atif->dev, &(sa->sat_addr));
return 0;
}
return copy_to_user(arg, &atreq, sizeof(atreq)) ? -EFAULT : 0;
}
/* Routing ioctl() calls */
static int atrtr_ioctl(unsigned int cmd, void __user *arg)
{
struct rtentry rt;
if (copy_from_user(&rt, arg, sizeof(rt)))
return -EFAULT;
switch (cmd) {
case SIOCDELRT:
if (rt.rt_dst.sa_family != AF_APPLETALK)
return -EINVAL;
return atrtr_delete(&((struct sockaddr_at *)
&rt.rt_dst)->sat_addr);
case SIOCADDRT: {
struct net_device *dev = NULL;
if (rt.rt_dev) {
char name[IFNAMSIZ];
if (copy_from_user(name, rt.rt_dev, IFNAMSIZ-1))
return -EFAULT;
name[IFNAMSIZ-1] = '\0';
dev = __dev_get_by_name(&init_net, name);
if (!dev)
return -ENODEV;
}
return atrtr_create(&rt, dev);
}
}
return -EINVAL;
}
/**************************************************************************\
* *
* Handling for system calls applied via the various interfaces to an *
* AppleTalk socket object. *
* *
\**************************************************************************/
/*
* Checksum: This is 'optional'. It's quite likely also a good
* candidate for assembler hackery 8)
*/
static unsigned long atalk_sum_partial(const unsigned char *data,
int len, unsigned long sum)
{
/* This ought to be unwrapped neatly. I'll trust gcc for now */
while (len--) {
sum += *data++;
sum = rol16(sum, 1);
}
return sum;
}
/* Checksum skb data -- similar to skb_checksum */
static unsigned long atalk_sum_skb(const struct sk_buff *skb, int offset,
int len, unsigned long sum)
{
int start = skb_headlen(skb);
struct sk_buff *frag_iter;
int i, copy;
/* checksum stuff in header space */
if ( (copy = start - offset) > 0) {
if (copy > len)
copy = len;
sum = atalk_sum_partial(skb->data + offset, copy, sum);
if ( (len -= copy) == 0)
return sum;
offset += copy;
}
/* checksum stuff in frags */
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
WARN_ON(start > offset + len);
end = start + skb_frag_size(frag);
if ((copy = end - offset) > 0) {
u8 *vaddr;
if (copy > len)
copy = len;
vaddr = kmap_atomic(skb_frag_page(frag));
sum = atalk_sum_partial(vaddr + frag->page_offset +
offset - start, copy, sum);
kunmap_atomic(vaddr);
if (!(len -= copy))
return sum;
offset += copy;
}
start = end;
}
skb_walk_frags(skb, frag_iter) {
int end;
WARN_ON(start > offset + len);
end = start + frag_iter->len;
if ((copy = end - offset) > 0) {
if (copy > len)
copy = len;
sum = atalk_sum_skb(frag_iter, offset - start,
copy, sum);
if ((len -= copy) == 0)
return sum;
offset += copy;
}
start = end;
}
BUG_ON(len > 0);
return sum;
}
static __be16 atalk_checksum(const struct sk_buff *skb, int len)
{
unsigned long sum;
/* skip header 4 bytes */
sum = atalk_sum_skb(skb, 4, len-4, 0);
/* Use 0xFFFF for 0. 0 itself means none */
return sum ? htons((unsigned short)sum) : htons(0xFFFF);
}
static struct proto ddp_proto = {
.name = "DDP",
.owner = THIS_MODULE,
.obj_size = sizeof(struct atalk_sock),
};
/*
* Create a socket. Initialise the socket, blank the addresses
* set the state.
*/
static int atalk_create(struct net *net, struct socket *sock, int protocol,
int kern)
{
struct sock *sk;
int rc = -ESOCKTNOSUPPORT;
if (!net_eq(net, &init_net))
return -EAFNOSUPPORT;
/*
* We permit SOCK_DGRAM and RAW is an extension. It is trivial to do
* and gives you the full ELAP frame. Should be handy for CAP 8)
*/
if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
goto out;
rc = -ENOMEM;
sk = sk_alloc(net, PF_APPLETALK, GFP_KERNEL, &ddp_proto);
if (!sk)
goto out;
rc = 0;
sock->ops = &atalk_dgram_ops;
sock_init_data(sock, sk);
/* Checksums on by default */
sock_set_flag(sk, SOCK_ZAPPED);
out:
return rc;
}
/* Free a socket. No work needed */
static int atalk_release(struct socket *sock)
{
struct sock *sk = sock->sk;
if (sk) {
sock_hold(sk);
lock_sock(sk);
sock_orphan(sk);
sock->sk = NULL;
atalk_destroy_socket(sk);
release_sock(sk);
sock_put(sk);
}
return 0;
}
/**
* atalk_pick_and_bind_port - Pick a source port when one is not given
* @sk: socket to insert into the tables
* @sat: address to search for
*
* Pick a source port when one is not given. If we can find a suitable free
* one, we insert the socket into the tables using it.
*
* This whole operation must be atomic.
*/
static int atalk_pick_and_bind_port(struct sock *sk, struct sockaddr_at *sat)
{
int retval;
write_lock_bh(&atalk_sockets_lock);
for (sat->sat_port = ATPORT_RESERVED;
sat->sat_port < ATPORT_LAST;
sat->sat_port++) {
struct sock *s;
sk_for_each(s, &atalk_sockets) {
struct atalk_sock *at = at_sk(s);
if (at->src_net == sat->sat_addr.s_net &&
at->src_node == sat->sat_addr.s_node &&
at->src_port == sat->sat_port)
goto try_next_port;
}
/* Wheee, it's free, assign and insert. */
__atalk_insert_socket(sk);
at_sk(sk)->src_port = sat->sat_port;
retval = 0;
goto out;
try_next_port:;
}
retval = -EBUSY;
out:
write_unlock_bh(&atalk_sockets_lock);
return retval;
}
static int atalk_autobind(struct sock *sk)
{
struct atalk_sock *at = at_sk(sk);
struct sockaddr_at sat;
struct atalk_addr *ap = atalk_find_primary();
int n = -EADDRNOTAVAIL;
if (!ap || ap->s_net == htons(ATADDR_ANYNET))
goto out;
at->src_net = sat.sat_addr.s_net = ap->s_net;
at->src_node = sat.sat_addr.s_node = ap->s_node;
n = atalk_pick_and_bind_port(sk, &sat);
if (!n)
sock_reset_flag(sk, SOCK_ZAPPED);
out:
return n;
}
/* Set the address 'our end' of the connection */
static int atalk_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
{
struct sockaddr_at *addr = (struct sockaddr_at *)uaddr;
struct sock *sk = sock->sk;
struct atalk_sock *at = at_sk(sk);
int err;
if (!sock_flag(sk, SOCK_ZAPPED) ||
addr_len != sizeof(struct sockaddr_at))
return -EINVAL;
if (addr->sat_family != AF_APPLETALK)
return -EAFNOSUPPORT;
lock_sock(sk);
if (addr->sat_addr.s_net == htons(ATADDR_ANYNET)) {
struct atalk_addr *ap = atalk_find_primary();
err = -EADDRNOTAVAIL;
if (!ap)
goto out;
at->src_net = addr->sat_addr.s_net = ap->s_net;
at->src_node = addr->sat_addr.s_node= ap->s_node;
} else {
err = -EADDRNOTAVAIL;
if (!atalk_find_interface(addr->sat_addr.s_net,
addr->sat_addr.s_node))
goto out;
at->src_net = addr->sat_addr.s_net;
at->src_node = addr->sat_addr.s_node;
}
if (addr->sat_port == ATADDR_ANYPORT) {
err = atalk_pick_and_bind_port(sk, addr);
if (err < 0)
goto out;
} else {
at->src_port = addr->sat_port;
err = -EADDRINUSE;
if (atalk_find_or_insert_socket(sk, addr))
goto out;
}
sock_reset_flag(sk, SOCK_ZAPPED);
err = 0;
out:
release_sock(sk);
return err;
}
/* Set the address we talk to */
static int atalk_connect(struct socket *sock, struct sockaddr *uaddr,
int addr_len, int flags)
{
struct sock *sk = sock->sk;
struct atalk_sock *at = at_sk(sk);
struct sockaddr_at *addr;
int err;
sk->sk_state = TCP_CLOSE;
sock->state = SS_UNCONNECTED;
if (addr_len != sizeof(*addr))
return -EINVAL;
addr = (struct sockaddr_at *)uaddr;
if (addr->sat_family != AF_APPLETALK)
return -EAFNOSUPPORT;
if (addr->sat_addr.s_node == ATADDR_BCAST &&
!sock_flag(sk, SOCK_BROADCAST)) {
#if 1
pr_warn("atalk_connect: %s is broken and did not set SO_BROADCAST.\n",
current->comm);
#else
return -EACCES;
#endif
}
lock_sock(sk);
err = -EBUSY;
if (sock_flag(sk, SOCK_ZAPPED))
if (atalk_autobind(sk) < 0)
goto out;
err = -ENETUNREACH;
if (!atrtr_get_dev(&addr->sat_addr))
goto out;
at->dest_port = addr->sat_port;
at->dest_net = addr->sat_addr.s_net;
at->dest_node = addr->sat_addr.s_node;
sock->state = SS_CONNECTED;
sk->sk_state = TCP_ESTABLISHED;
err = 0;
out:
release_sock(sk);
return err;
}
/*
* Find the name of an AppleTalk socket. Just copy the right
* fields into the sockaddr.
*/
static int atalk_getname(struct socket *sock, struct sockaddr *uaddr,
int *uaddr_len, int peer)
{
struct sockaddr_at sat;
struct sock *sk = sock->sk;
struct atalk_sock *at = at_sk(sk);
int err;
lock_sock(sk);
err = -ENOBUFS;
if (sock_flag(sk, SOCK_ZAPPED))
if (atalk_autobind(sk) < 0)
goto out;
*uaddr_len = sizeof(struct sockaddr_at);
memset(&sat, 0, sizeof(sat));
if (peer) {
err = -ENOTCONN;
if (sk->sk_state != TCP_ESTABLISHED)
goto out;
sat.sat_addr.s_net = at->dest_net;
sat.sat_addr.s_node = at->dest_node;
sat.sat_port = at->dest_port;
} else {
sat.sat_addr.s_net = at->src_net;
sat.sat_addr.s_node = at->src_node;
sat.sat_port = at->src_port;
}
err = 0;
sat.sat_family = AF_APPLETALK;
memcpy(uaddr, &sat, sizeof(sat));
out:
release_sock(sk);
return err;
}
#if defined(CONFIG_IPDDP) || defined(CONFIG_IPDDP_MODULE)
static __inline__ int is_ip_over_ddp(struct sk_buff *skb)
{
return skb->data[12] == 22;
}
static int handle_ip_over_ddp(struct sk_buff *skb)
{
struct net_device *dev = __dev_get_by_name(&init_net, "ipddp0");
struct net_device_stats *stats;
/* This needs to be able to handle ipddp"N" devices */
if (!dev) {
kfree_skb(skb);
return NET_RX_DROP;
}
skb->protocol = htons(ETH_P_IP);
skb_pull(skb, 13);
skb->dev = dev;
skb_reset_transport_header(skb);
stats = netdev_priv(dev);
stats->rx_packets++;
stats->rx_bytes += skb->len + 13;
return netif_rx(skb); /* Send the SKB up to a higher place. */
}
#else
/* make it easy for gcc to optimize this test out, i.e. kill the code */
#define is_ip_over_ddp(skb) 0
#define handle_ip_over_ddp(skb) 0
#endif
static int atalk_route_packet(struct sk_buff *skb, struct net_device *dev,
struct ddpehdr *ddp, __u16 len_hops, int origlen)
{
struct atalk_route *rt;
struct atalk_addr ta;
/*
* Don't route multicast, etc., packets, or packets sent to "this
* network"
*/
if (skb->pkt_type != PACKET_HOST || !ddp->deh_dnet) {
/*
* FIXME:
*
* Can it ever happen that a packet is from a PPP iface and
* needs to be broadcast onto the default network?
*/
if (dev->type == ARPHRD_PPP)
printk(KERN_DEBUG "AppleTalk: didn't forward broadcast "
"packet received from PPP iface\n");
goto free_it;
}
ta.s_net = ddp->deh_dnet;
ta.s_node = ddp->deh_dnode;
/* Route the packet */
rt = atrtr_find(&ta);
/* increment hops count */
len_hops += 1 << 10;
if (!rt || !(len_hops & (15 << 10)))
goto free_it;
/* FIXME: use skb->cb to be able to use shared skbs */
/*
* Route goes through another gateway, so set the target to the
* gateway instead.
*/
if (rt->flags & RTF_GATEWAY) {
ta.s_net = rt->gateway.s_net;
ta.s_node = rt->gateway.s_node;
}
/* Fix up skb->len field */
skb_trim(skb, min_t(unsigned int, origlen,
(rt->dev->hard_header_len +
ddp_dl->header_length + (len_hops & 1023))));
/* FIXME: use skb->cb to be able to use shared skbs */
ddp->deh_len_hops = htons(len_hops);
/*
* Send the buffer onwards
*
* Now we must always be careful. If it's come from LocalTalk to
* EtherTalk it might not fit
*
* Order matters here: If a packet has to be copied to make a new
* headroom (rare hopefully) then it won't need unsharing.
*
* Note. ddp-> becomes invalid at the realloc.
*/
if (skb_headroom(skb) < 22) {
/* 22 bytes - 12 ether, 2 len, 3 802.2 5 snap */
struct sk_buff *nskb = skb_realloc_headroom(skb, 32);
kfree_skb(skb);
skb = nskb;
} else
skb = skb_unshare(skb, GFP_ATOMIC);
/*
* If the buffer didn't vanish into the lack of space bitbucket we can
* send it.
*/
if (skb == NULL)
goto drop;
if (aarp_send_ddp(rt->dev, skb, &ta, NULL) == NET_XMIT_DROP)
return NET_RX_DROP;
return NET_RX_SUCCESS;
free_it:
kfree_skb(skb);
drop:
return NET_RX_DROP;
}
/**
* atalk_rcv - Receive a packet (in skb) from device dev
* @skb - packet received
* @dev - network device where the packet comes from
* @pt - packet type
*
* Receive a packet (in skb) from device dev. This has come from the SNAP
* decoder, and on entry skb->transport_header is the DDP header, skb->len
* is the DDP header, skb->len is the DDP length. The physical headers
* have been extracted. PPP should probably pass frames marked as for this
* layer. [ie ARPHRD_ETHERTALK]
*/
static int atalk_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev)
{
struct ddpehdr *ddp;
struct sock *sock;
struct atalk_iface *atif;
struct sockaddr_at tosat;
int origlen;
__u16 len_hops;
if (!net_eq(dev_net(dev), &init_net))
goto drop;
/* Don't mangle buffer if shared */
if (!(skb = skb_share_check(skb, GFP_ATOMIC)))
goto out;
/* Size check and make sure header is contiguous */
if (!pskb_may_pull(skb, sizeof(*ddp)))
goto drop;
ddp = ddp_hdr(skb);
len_hops = ntohs(ddp->deh_len_hops);
/* Trim buffer in case of stray trailing data */
origlen = skb->len;
skb_trim(skb, min_t(unsigned int, skb->len, len_hops & 1023));
/*
* Size check to see if ddp->deh_len was crap
* (Otherwise we'll detonate most spectacularly
* in the middle of atalk_checksum() or recvmsg()).
*/
if (skb->len < sizeof(*ddp) || skb->len < (len_hops & 1023)) {
pr_debug("AppleTalk: dropping corrupted frame (deh_len=%u, "
"skb->len=%u)\n", len_hops & 1023, skb->len);
goto drop;
}
/*
* Any checksums. Note we don't do htons() on this == is assumed to be
* valid for net byte orders all over the networking code...
*/
if (ddp->deh_sum &&
atalk_checksum(skb, len_hops & 1023) != ddp->deh_sum)
/* Not a valid AppleTalk frame - dustbin time */
goto drop;
/* Check the packet is aimed at us */
if (!ddp->deh_dnet) /* Net 0 is 'this network' */
atif = atalk_find_anynet(ddp->deh_dnode, dev);
else
atif = atalk_find_interface(ddp->deh_dnet, ddp->deh_dnode);
if (!atif) {
/* Not ours, so we route the packet via the correct
* AppleTalk iface
*/
return atalk_route_packet(skb, dev, ddp, len_hops, origlen);
}
/* if IP over DDP is not selected this code will be optimized out */
if (is_ip_over_ddp(skb))
return handle_ip_over_ddp(skb);
/*
* Which socket - atalk_search_socket() looks for a *full match*
* of the <net, node, port> tuple.
*/
tosat.sat_addr.s_net = ddp->deh_dnet;
tosat.sat_addr.s_node = ddp->deh_dnode;
tosat.sat_port = ddp->deh_dport;
sock = atalk_search_socket(&tosat, atif);
if (!sock) /* But not one of our sockets */
goto drop;
/* Queue packet (standard) */
skb->sk = sock;
if (sock_queue_rcv_skb(sock, skb) < 0)
goto drop;
return NET_RX_SUCCESS;
drop:
kfree_skb(skb);
out:
return NET_RX_DROP;
}
/*
* Receive a LocalTalk frame. We make some demands on the caller here.
* Caller must provide enough headroom on the packet to pull the short
* header and append a long one.
*/
static int ltalk_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev)
{
if (!net_eq(dev_net(dev), &init_net))
goto freeit;
/* Expand any short form frames */
if (skb_mac_header(skb)[2] == 1) {
struct ddpehdr *ddp;
/* Find our address */
struct atalk_addr *ap = atalk_find_dev_addr(dev);
if (!ap || skb->len < sizeof(__be16) || skb->len > 1023)
goto freeit;
/* Don't mangle buffer if shared */
if (!(skb = skb_share_check(skb, GFP_ATOMIC)))
return 0;
/*
* The push leaves us with a ddephdr not an shdr, and
* handily the port bytes in the right place preset.
*/
ddp = (struct ddpehdr *) skb_push(skb, sizeof(*ddp) - 4);
/* Now fill in the long header */
/*
* These two first. The mac overlays the new source/dest
* network information so we MUST copy these before
* we write the network numbers !
*/
ddp->deh_dnode = skb_mac_header(skb)[0]; /* From physical header */
ddp->deh_snode = skb_mac_header(skb)[1]; /* From physical header */
ddp->deh_dnet = ap->s_net; /* Network number */
ddp->deh_snet = ap->s_net;
ddp->deh_sum = 0; /* No checksum */
/*
* Not sure about this bit...
*/
/* Non routable, so force a drop if we slip up later */
ddp->deh_len_hops = htons(skb->len + (DDP_MAXHOPS << 10));
}
skb_reset_transport_header(skb);
return atalk_rcv(skb, dev, pt, orig_dev);
freeit:
kfree_skb(skb);
return 0;
}
static int atalk_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
size_t len)
{
struct sock *sk = sock->sk;
struct atalk_sock *at = at_sk(sk);
struct sockaddr_at *usat = (struct sockaddr_at *)msg->msg_name;
int flags = msg->msg_flags;
int loopback = 0;
struct sockaddr_at local_satalk, gsat;
struct sk_buff *skb;
struct net_device *dev;
struct ddpehdr *ddp;
int size;
struct atalk_route *rt;
int err;
if (flags & ~(MSG_DONTWAIT|MSG_CMSG_COMPAT))
return -EINVAL;
if (len > DDP_MAXSZ)
return -EMSGSIZE;
lock_sock(sk);
if (usat) {
err = -EBUSY;
if (sock_flag(sk, SOCK_ZAPPED))
if (atalk_autobind(sk) < 0)
goto out;
err = -EINVAL;
if (msg->msg_namelen < sizeof(*usat) ||
usat->sat_family != AF_APPLETALK)
goto out;
err = -EPERM;
/* netatalk didn't implement this check */
if (usat->sat_addr.s_node == ATADDR_BCAST &&
!sock_flag(sk, SOCK_BROADCAST)) {
goto out;
}
} else {
err = -ENOTCONN;
if (sk->sk_state != TCP_ESTABLISHED)
goto out;
usat = &local_satalk;
usat->sat_family = AF_APPLETALK;
usat->sat_port = at->dest_port;
usat->sat_addr.s_node = at->dest_node;
usat->sat_addr.s_net = at->dest_net;
}
/* Build a packet */
SOCK_DEBUG(sk, "SK %p: Got address.\n", sk);
/* For headers */
size = sizeof(struct ddpehdr) + len + ddp_dl->header_length;
if (usat->sat_addr.s_net || usat->sat_addr.s_node == ATADDR_ANYNODE) {
rt = atrtr_find(&usat->sat_addr);
} else {
struct atalk_addr at_hint;
at_hint.s_node = 0;
at_hint.s_net = at->src_net;
rt = atrtr_find(&at_hint);
}
err = ENETUNREACH;
if (!rt)
goto out;
dev = rt->dev;
SOCK_DEBUG(sk, "SK %p: Size needed %d, device %s\n",
sk, size, dev->name);
size += dev->hard_header_len;
release_sock(sk);
skb = sock_alloc_send_skb(sk, size, (flags & MSG_DONTWAIT), &err);
lock_sock(sk);
if (!skb)
goto out;
skb->sk = sk;
skb_reserve(skb, ddp_dl->header_length);
skb_reserve(skb, dev->hard_header_len);
skb->dev = dev;
SOCK_DEBUG(sk, "SK %p: Begin build.\n", sk);
ddp = (struct ddpehdr *)skb_put(skb, sizeof(struct ddpehdr));
ddp->deh_len_hops = htons(len + sizeof(*ddp));
ddp->deh_dnet = usat->sat_addr.s_net;
ddp->deh_snet = at->src_net;
ddp->deh_dnode = usat->sat_addr.s_node;
ddp->deh_snode = at->src_node;
ddp->deh_dport = usat->sat_port;
ddp->deh_sport = at->src_port;
SOCK_DEBUG(sk, "SK %p: Copy user data (%Zd bytes).\n", sk, len);
err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
if (err) {
kfree_skb(skb);
err = -EFAULT;
goto out;
}
if (sk->sk_no_check == 1)
ddp->deh_sum = 0;
else
ddp->deh_sum = atalk_checksum(skb, len + sizeof(*ddp));
/*
* Loopback broadcast packets to non gateway targets (ie routes
* to group we are in)
*/
if (ddp->deh_dnode == ATADDR_BCAST &&
!(rt->flags & RTF_GATEWAY) && !(dev->flags & IFF_LOOPBACK)) {
struct sk_buff *skb2 = skb_copy(skb, GFP_KERNEL);
if (skb2) {
loopback = 1;
SOCK_DEBUG(sk, "SK %p: send out(copy).\n", sk);
/*
* If it fails it is queued/sent above in the aarp queue
*/
aarp_send_ddp(dev, skb2, &usat->sat_addr, NULL);
}
}
if (dev->flags & IFF_LOOPBACK || loopback) {
SOCK_DEBUG(sk, "SK %p: Loop back.\n", sk);
/* loop back */
skb_orphan(skb);
if (ddp->deh_dnode == ATADDR_BCAST) {
struct atalk_addr at_lo;
at_lo.s_node = 0;
at_lo.s_net = 0;
rt = atrtr_find(&at_lo);
if (!rt) {
kfree_skb(skb);
err = -ENETUNREACH;
goto out;
}
dev = rt->dev;
skb->dev = dev;
}
ddp_dl->request(ddp_dl, skb, dev->dev_addr);
} else {
SOCK_DEBUG(sk, "SK %p: send out.\n", sk);
if (rt->flags & RTF_GATEWAY) {
gsat.sat_addr = rt->gateway;
usat = &gsat;
}
/*
* If it fails it is queued/sent above in the aarp queue
*/
aarp_send_ddp(dev, skb, &usat->sat_addr, NULL);
}
SOCK_DEBUG(sk, "SK %p: Done write (%Zd).\n", sk, len);
out:
release_sock(sk);
return err ? : len;
}
static int atalk_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
size_t size, int flags)
{
struct sock *sk = sock->sk;
struct ddpehdr *ddp;
int copied = 0;
int offset = 0;
int err = 0;
struct sk_buff *skb;
skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
flags & MSG_DONTWAIT, &err);
lock_sock(sk);
if (!skb)
goto out;
/* FIXME: use skb->cb to be able to use shared skbs */
ddp = ddp_hdr(skb);
copied = ntohs(ddp->deh_len_hops) & 1023;
if (sk->sk_type != SOCK_RAW) {
offset = sizeof(*ddp);
copied -= offset;
}
if (copied > size) {
copied = size;
msg->msg_flags |= MSG_TRUNC;
}
err = skb_copy_datagram_iovec(skb, offset, msg->msg_iov, copied);
if (!err && msg->msg_name) {
struct sockaddr_at *sat = msg->msg_name;
sat->sat_family = AF_APPLETALK;
sat->sat_port = ddp->deh_sport;
sat->sat_addr.s_node = ddp->deh_snode;
sat->sat_addr.s_net = ddp->deh_snet;
msg->msg_namelen = sizeof(*sat);
}
skb_free_datagram(sk, skb); /* Free the datagram. */
out:
release_sock(sk);
return err ? : copied;
}
/*
* AppleTalk ioctl calls.
*/
static int atalk_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
int rc = -ENOIOCTLCMD;
struct sock *sk = sock->sk;
void __user *argp = (void __user *)arg;
switch (cmd) {
/* Protocol layer */
case TIOCOUTQ: {
long amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
if (amount < 0)
amount = 0;
rc = put_user(amount, (int __user *)argp);
break;
}
case TIOCINQ: {
/*
* These two are safe on a single CPU system as only
* user tasks fiddle here
*/
struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
long amount = 0;
if (skb)
amount = skb->len - sizeof(struct ddpehdr);
rc = put_user(amount, (int __user *)argp);
break;
}
case SIOCGSTAMP:
rc = sock_get_timestamp(sk, argp);
break;
case SIOCGSTAMPNS:
rc = sock_get_timestampns(sk, argp);
break;
/* Routing */
case SIOCADDRT:
case SIOCDELRT:
rc = -EPERM;
if (capable(CAP_NET_ADMIN))
rc = atrtr_ioctl(cmd, argp);
break;
/* Interface */
case SIOCGIFADDR:
case SIOCSIFADDR:
case SIOCGIFBRDADDR:
case SIOCATALKDIFADDR:
case SIOCDIFADDR:
case SIOCSARP: /* proxy AARP */
case SIOCDARP: /* proxy AARP */
rtnl_lock();
rc = atif_ioctl(cmd, argp);
rtnl_unlock();
break;
}
return rc;
}
#ifdef CONFIG_COMPAT
static int atalk_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
/*
* SIOCATALKDIFADDR is a SIOCPROTOPRIVATE ioctl number, so we
* cannot handle it in common code. The data we access if ifreq
* here is compatible, so we can simply call the native
* handler.
*/
if (cmd == SIOCATALKDIFADDR)
return atalk_ioctl(sock, cmd, (unsigned long)compat_ptr(arg));
return -ENOIOCTLCMD;
}
#endif
static const struct net_proto_family atalk_family_ops = {
.family = PF_APPLETALK,
.create = atalk_create,
.owner = THIS_MODULE,
};
static const struct proto_ops atalk_dgram_ops = {
.family = PF_APPLETALK,
.owner = THIS_MODULE,
.release = atalk_release,
.bind = atalk_bind,
.connect = atalk_connect,
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.getname = atalk_getname,
.poll = datagram_poll,
.ioctl = atalk_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = atalk_compat_ioctl,
#endif
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
.setsockopt = sock_no_setsockopt,
.getsockopt = sock_no_getsockopt,
.sendmsg = atalk_sendmsg,
.recvmsg = atalk_recvmsg,
.mmap = sock_no_mmap,
.sendpage = sock_no_sendpage,
};
static struct notifier_block ddp_notifier = {
.notifier_call = ddp_device_event,
};
static struct packet_type ltalk_packet_type __read_mostly = {
.type = cpu_to_be16(ETH_P_LOCALTALK),
.func = ltalk_rcv,
};
static struct packet_type ppptalk_packet_type __read_mostly = {
.type = cpu_to_be16(ETH_P_PPPTALK),
.func = atalk_rcv,
};
static unsigned char ddp_snap_id[] = { 0x08, 0x00, 0x07, 0x80, 0x9B };
/* Export symbols for use by drivers when AppleTalk is a module */
EXPORT_SYMBOL(atrtr_get_dev);
EXPORT_SYMBOL(atalk_find_dev_addr);
static const char atalk_err_snap[] __initconst =
KERN_CRIT "Unable to register DDP with SNAP.\n";
/* Called by proto.c on kernel start up */
static int __init atalk_init(void)
{
int rc = proto_register(&ddp_proto, 0);
if (rc != 0)
goto out;
(void)sock_register(&atalk_family_ops);
ddp_dl = register_snap_client(ddp_snap_id, atalk_rcv);
if (!ddp_dl)
printk(atalk_err_snap);
dev_add_pack(<alk_packet_type);
dev_add_pack(&ppptalk_packet_type);
register_netdevice_notifier(&ddp_notifier);
aarp_proto_init();
atalk_proc_init();
atalk_register_sysctl();
out:
return rc;
}
module_init(atalk_init);
/*
* No explicit module reference count manipulation is needed in the
* protocol. Socket layer sets module reference count for us
* and interfaces reference counting is done
* by the network device layer.
*
* Ergo, before the AppleTalk module can be removed, all AppleTalk
* sockets be closed from user space.
*/
static void __exit atalk_exit(void)
{
#ifdef CONFIG_SYSCTL
atalk_unregister_sysctl();
#endif /* CONFIG_SYSCTL */
atalk_proc_exit();
aarp_cleanup_module(); /* General aarp clean-up. */
unregister_netdevice_notifier(&ddp_notifier);
dev_remove_pack(<alk_packet_type);
dev_remove_pack(&ppptalk_packet_type);
unregister_snap_client(ddp_dl);
sock_unregister(PF_APPLETALK);
proto_unregister(&ddp_proto);
}
module_exit(atalk_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Alan Cox <alan@lxorguk.ukuu.org.uk>");
MODULE_DESCRIPTION("AppleTalk 0.20\n");
MODULE_ALIAS_NETPROTO(PF_APPLETALK);
| ./CrossVul/dataset_final_sorted/CWE-20/c/good_5845_5 |
crossvul-cpp_data_good_4716_0 | /*
* reflection.c: Routines for creating an image at runtime.
*
* Author:
* Paolo Molaro (lupus@ximian.com)
*
* Copyright 2001-2003 Ximian, Inc (http://www.ximian.com)
* Copyright 2004-2009 Novell, Inc (http://www.novell.com)
*
*/
#include <config.h>
#include "mono/utils/mono-digest.h"
#include "mono/utils/mono-membar.h"
#include "mono/metadata/reflection.h"
#include "mono/metadata/tabledefs.h"
#include "mono/metadata/metadata-internals.h"
#include <mono/metadata/profiler-private.h>
#include "mono/metadata/class-internals.h"
#include "mono/metadata/gc-internal.h"
#include "mono/metadata/tokentype.h"
#include "mono/metadata/domain-internals.h"
#include "mono/metadata/opcodes.h"
#include "mono/metadata/assembly.h"
#include "mono/metadata/object-internals.h"
#include <mono/metadata/exception.h>
#include <mono/metadata/marshal.h>
#include <mono/metadata/security-manager.h>
#include <stdio.h>
#include <glib.h>
#include <errno.h>
#include <time.h>
#include <string.h>
#include <ctype.h>
#include "image.h"
#include "cil-coff.h"
#include "mono-endian.h"
#include <mono/metadata/gc-internal.h>
#include <mono/metadata/mempool-internals.h>
#include <mono/metadata/security-core-clr.h>
#include <mono/metadata/debug-helpers.h>
#include <mono/utils/mono-string.h>
#include <mono/utils/mono-error-internals.h>
#if HAVE_SGEN_GC
static void* reflection_info_desc = NULL;
#define MOVING_GC_REGISTER(addr) do { \
if (!reflection_info_desc) { \
gsize bmap = 1; \
reflection_info_desc = mono_gc_make_descr_from_bitmap (&bmap, 1); \
} \
mono_gc_register_root ((char*)(addr), sizeof (gpointer), reflection_info_desc); \
} while (0)
#else
#define MOVING_GC_REGISTER(addr)
#endif
typedef struct {
char *p;
char *buf;
char *end;
} SigBuffer;
#define TEXT_OFFSET 512
#define CLI_H_SIZE 136
#define FILE_ALIGN 512
#define VIRT_ALIGN 8192
#define START_TEXT_RVA 0x00002000
typedef struct {
MonoReflectionILGen *ilgen;
MonoReflectionType *rtype;
MonoArray *parameters;
MonoArray *generic_params;
MonoGenericContainer *generic_container;
MonoArray *pinfo;
MonoArray *opt_types;
guint32 attrs;
guint32 iattrs;
guint32 call_conv;
guint32 *table_idx; /* note: it's a pointer */
MonoArray *code;
MonoObject *type;
MonoString *name;
MonoBoolean init_locals;
MonoBoolean skip_visibility;
MonoArray *return_modreq;
MonoArray *return_modopt;
MonoArray *param_modreq;
MonoArray *param_modopt;
MonoArray *permissions;
MonoMethod *mhandle;
guint32 nrefs;
gpointer *refs;
/* for PInvoke */
int charset, extra_flags, native_cc;
MonoString *dll, *dllentry;
} ReflectionMethodBuilder;
typedef struct {
guint32 owner;
MonoReflectionGenericParam *gparam;
} GenericParamTableEntry;
const unsigned char table_sizes [MONO_TABLE_NUM] = {
MONO_MODULE_SIZE,
MONO_TYPEREF_SIZE,
MONO_TYPEDEF_SIZE,
0,
MONO_FIELD_SIZE,
0,
MONO_METHOD_SIZE,
0,
MONO_PARAM_SIZE,
MONO_INTERFACEIMPL_SIZE,
MONO_MEMBERREF_SIZE, /* 0x0A */
MONO_CONSTANT_SIZE,
MONO_CUSTOM_ATTR_SIZE,
MONO_FIELD_MARSHAL_SIZE,
MONO_DECL_SECURITY_SIZE,
MONO_CLASS_LAYOUT_SIZE,
MONO_FIELD_LAYOUT_SIZE, /* 0x10 */
MONO_STAND_ALONE_SIGNATURE_SIZE,
MONO_EVENT_MAP_SIZE,
0,
MONO_EVENT_SIZE,
MONO_PROPERTY_MAP_SIZE,
0,
MONO_PROPERTY_SIZE,
MONO_METHOD_SEMA_SIZE,
MONO_METHODIMPL_SIZE,
MONO_MODULEREF_SIZE, /* 0x1A */
MONO_TYPESPEC_SIZE,
MONO_IMPLMAP_SIZE,
MONO_FIELD_RVA_SIZE,
0,
0,
MONO_ASSEMBLY_SIZE, /* 0x20 */
MONO_ASSEMBLY_PROCESSOR_SIZE,
MONO_ASSEMBLYOS_SIZE,
MONO_ASSEMBLYREF_SIZE,
MONO_ASSEMBLYREFPROC_SIZE,
MONO_ASSEMBLYREFOS_SIZE,
MONO_FILE_SIZE,
MONO_EXP_TYPE_SIZE,
MONO_MANIFEST_SIZE,
MONO_NESTED_CLASS_SIZE,
MONO_GENERICPARAM_SIZE, /* 0x2A */
MONO_METHODSPEC_SIZE,
MONO_GENPARCONSTRAINT_SIZE
};
#ifndef DISABLE_REFLECTION_EMIT
static guint32 mono_image_get_methodref_token (MonoDynamicImage *assembly, MonoMethod *method, gboolean create_typespec);
static guint32 mono_image_get_methodbuilder_token (MonoDynamicImage *assembly, MonoReflectionMethodBuilder *mb, gboolean create_methodspec);
static guint32 mono_image_get_ctorbuilder_token (MonoDynamicImage *assembly, MonoReflectionCtorBuilder *cb);
static guint32 mono_image_get_sighelper_token (MonoDynamicImage *assembly, MonoReflectionSigHelper *helper);
static void ensure_runtime_vtable (MonoClass *klass);
static gpointer resolve_object (MonoImage *image, MonoObject *obj, MonoClass **handle_class, MonoGenericContext *context);
static guint32 mono_image_get_methodref_token_for_methodbuilder (MonoDynamicImage *assembly, MonoReflectionMethodBuilder *method);
static guint32 encode_generic_method_sig (MonoDynamicImage *assembly, MonoGenericContext *context);
static gpointer register_assembly (MonoDomain *domain, MonoReflectionAssembly *res, MonoAssembly *assembly);
static void reflection_methodbuilder_from_method_builder (ReflectionMethodBuilder *rmb, MonoReflectionMethodBuilder *mb);
static void reflection_methodbuilder_from_ctor_builder (ReflectionMethodBuilder *rmb, MonoReflectionCtorBuilder *mb);
#endif
static guint32 mono_image_typedef_or_ref (MonoDynamicImage *assembly, MonoType *type);
static guint32 mono_image_typedef_or_ref_full (MonoDynamicImage *assembly, MonoType *type, gboolean try_typespec);
static void mono_image_get_generic_param_info (MonoReflectionGenericParam *gparam, guint32 owner, MonoDynamicImage *assembly);
static guint32 encode_marshal_blob (MonoDynamicImage *assembly, MonoReflectionMarshal *minfo);
static guint32 encode_constant (MonoDynamicImage *assembly, MonoObject *val, guint32 *ret_type);
static char* type_get_qualified_name (MonoType *type, MonoAssembly *ass);
static void encode_type (MonoDynamicImage *assembly, MonoType *type, SigBuffer *buf);
static void get_default_param_value_blobs (MonoMethod *method, char **blobs, guint32 *types);
static MonoObject *mono_get_object_from_blob (MonoDomain *domain, MonoType *type, const char *blob);
static MonoReflectionType *mono_reflection_type_get_underlying_system_type (MonoReflectionType* t);
static MonoType* mono_reflection_get_type_with_rootimage (MonoImage *rootimage, MonoImage* image, MonoTypeNameParse *info, gboolean ignorecase, gboolean *type_resolve);
static MonoReflectionType* mono_reflection_type_resolve_user_types (MonoReflectionType *type);
static gboolean is_sre_array (MonoClass *class);
static gboolean is_sre_byref (MonoClass *class);
static gboolean is_sre_pointer (MonoClass *class);
static gboolean is_sre_method_builder (MonoClass *class);
static gboolean is_sre_ctor_builder (MonoClass *class);
static gboolean is_sr_mono_method (MonoClass *class);
static gboolean is_sr_mono_cmethod (MonoClass *class);
static gboolean is_sr_mono_generic_method (MonoClass *class);
static gboolean is_sr_mono_generic_cmethod (MonoClass *class);
static gboolean is_sr_mono_field (MonoClass *class);
static gboolean is_sr_mono_property (MonoClass *class);
static gboolean is_sre_method_on_tb_inst (MonoClass *class);
static gboolean is_sre_ctor_on_tb_inst (MonoClass *class);
static guint32 mono_image_get_methodspec_token (MonoDynamicImage *assembly, MonoMethod *method);
static guint32 mono_image_get_inflated_method_token (MonoDynamicImage *assembly, MonoMethod *m);
static MonoMethod * inflate_method (MonoReflectionGenericClass *type, MonoObject *obj);
#define RESOLVE_TYPE(type) do { type = (void*)mono_reflection_type_resolve_user_types ((MonoReflectionType*)type); } while (0)
#define RESOLVE_ARRAY_TYPE_ELEMENT(array, index) do { \
MonoReflectionType *__type = mono_array_get (array, MonoReflectionType*, index); \
__type = mono_reflection_type_resolve_user_types (__type); \
mono_array_set (arr, MonoReflectionType*, index, __type); \
} while (0)
#define mono_type_array_get_and_resolve(array, index) mono_reflection_type_get_handle ((MonoReflectionType*)mono_array_get (array, gpointer, index))
void
mono_reflection_init (void)
{
}
static void
sigbuffer_init (SigBuffer *buf, int size)
{
buf->buf = g_malloc (size);
buf->p = buf->buf;
buf->end = buf->buf + size;
}
static void
sigbuffer_make_room (SigBuffer *buf, int size)
{
if (buf->end - buf->p < size) {
int new_size = buf->end - buf->buf + size + 32;
char *p = g_realloc (buf->buf, new_size);
size = buf->p - buf->buf;
buf->buf = p;
buf->p = p + size;
buf->end = buf->buf + new_size;
}
}
static void
sigbuffer_add_value (SigBuffer *buf, guint32 val)
{
sigbuffer_make_room (buf, 6);
mono_metadata_encode_value (val, buf->p, &buf->p);
}
static void
sigbuffer_add_byte (SigBuffer *buf, guint8 val)
{
sigbuffer_make_room (buf, 1);
buf->p [0] = val;
buf->p++;
}
static void
sigbuffer_add_mem (SigBuffer *buf, char *p, guint32 size)
{
sigbuffer_make_room (buf, size);
memcpy (buf->p, p, size);
buf->p += size;
}
static void
sigbuffer_free (SigBuffer *buf)
{
g_free (buf->buf);
}
#ifndef DISABLE_REFLECTION_EMIT
/**
* mp_g_alloc:
*
* Allocate memory from the @image mempool if it is non-NULL. Otherwise, allocate memory
* from the C heap.
*/
static gpointer
image_g_malloc (MonoImage *image, guint size)
{
if (image)
return mono_image_alloc (image, size);
else
return g_malloc (size);
}
#endif /* !DISABLE_REFLECTION_EMIT */
/**
* image_g_alloc0:
*
* Allocate memory from the @image mempool if it is non-NULL. Otherwise, allocate memory
* from the C heap.
*/
static gpointer
image_g_malloc0 (MonoImage *image, guint size)
{
if (image)
return mono_image_alloc0 (image, size);
else
return g_malloc0 (size);
}
#ifndef DISABLE_REFLECTION_EMIT
static char*
image_strdup (MonoImage *image, const char *s)
{
if (image)
return mono_image_strdup (image, s);
else
return g_strdup (s);
}
#endif
#define image_g_new(image,struct_type, n_structs) \
((struct_type *) image_g_malloc (image, ((gsize) sizeof (struct_type)) * ((gsize) (n_structs))))
#define image_g_new0(image,struct_type, n_structs) \
((struct_type *) image_g_malloc0 (image, ((gsize) sizeof (struct_type)) * ((gsize) (n_structs))))
static void
alloc_table (MonoDynamicTable *table, guint nrows)
{
table->rows = nrows;
g_assert (table->columns);
if (nrows + 1 >= table->alloc_rows) {
while (nrows + 1 >= table->alloc_rows) {
if (table->alloc_rows == 0)
table->alloc_rows = 16;
else
table->alloc_rows *= 2;
}
table->values = g_renew (guint32, table->values, (table->alloc_rows) * table->columns);
}
}
static void
make_room_in_stream (MonoDynamicStream *stream, int size)
{
if (size <= stream->alloc_size)
return;
while (stream->alloc_size <= size) {
if (stream->alloc_size < 4096)
stream->alloc_size = 4096;
else
stream->alloc_size *= 2;
}
stream->data = g_realloc (stream->data, stream->alloc_size);
}
static guint32
string_heap_insert (MonoDynamicStream *sh, const char *str)
{
guint32 idx;
guint32 len;
gpointer oldkey, oldval;
if (g_hash_table_lookup_extended (sh->hash, str, &oldkey, &oldval))
return GPOINTER_TO_UINT (oldval);
len = strlen (str) + 1;
idx = sh->index;
make_room_in_stream (sh, idx + len);
/*
* We strdup the string even if we already copy them in sh->data
* so that the string pointers in the hash remain valid even if
* we need to realloc sh->data. We may want to avoid that later.
*/
g_hash_table_insert (sh->hash, g_strdup (str), GUINT_TO_POINTER (idx));
memcpy (sh->data + idx, str, len);
sh->index += len;
return idx;
}
static guint32
string_heap_insert_mstring (MonoDynamicStream *sh, MonoString *str)
{
char *name = mono_string_to_utf8 (str);
guint32 idx;
idx = string_heap_insert (sh, name);
g_free (name);
return idx;
}
#ifndef DISABLE_REFLECTION_EMIT
static void
string_heap_init (MonoDynamicStream *sh)
{
sh->index = 0;
sh->alloc_size = 4096;
sh->data = g_malloc (4096);
sh->hash = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, NULL);
string_heap_insert (sh, "");
}
#endif
static guint32
mono_image_add_stream_data (MonoDynamicStream *stream, const char *data, guint32 len)
{
guint32 idx;
make_room_in_stream (stream, stream->index + len);
memcpy (stream->data + stream->index, data, len);
idx = stream->index;
stream->index += len;
/*
* align index? Not without adding an additional param that controls it since
* we may store a blob value in pieces.
*/
return idx;
}
static guint32
mono_image_add_stream_zero (MonoDynamicStream *stream, guint32 len)
{
guint32 idx;
make_room_in_stream (stream, stream->index + len);
memset (stream->data + stream->index, 0, len);
idx = stream->index;
stream->index += len;
return idx;
}
static void
stream_data_align (MonoDynamicStream *stream)
{
char buf [4] = {0};
guint32 count = stream->index % 4;
/* we assume the stream data will be aligned */
if (count)
mono_image_add_stream_data (stream, buf, 4 - count);
}
#ifndef DISABLE_REFLECTION_EMIT
static int
mono_blob_entry_hash (const char* str)
{
guint len, h;
const char *end;
len = mono_metadata_decode_blob_size (str, &str);
if (len > 0) {
end = str + len;
h = *str;
for (str += 1; str < end; str++)
h = (h << 5) - h + *str;
return h;
} else {
return 0;
}
}
static gboolean
mono_blob_entry_equal (const char *str1, const char *str2) {
int len, len2;
const char *end1;
const char *end2;
len = mono_metadata_decode_blob_size (str1, &end1);
len2 = mono_metadata_decode_blob_size (str2, &end2);
if (len != len2)
return 0;
return memcmp (end1, end2, len) == 0;
}
#endif
static guint32
add_to_blob_cached (MonoDynamicImage *assembly, char *b1, int s1, char *b2, int s2)
{
guint32 idx;
char *copy;
gpointer oldkey, oldval;
copy = g_malloc (s1+s2);
memcpy (copy, b1, s1);
memcpy (copy + s1, b2, s2);
if (g_hash_table_lookup_extended (assembly->blob_cache, copy, &oldkey, &oldval)) {
g_free (copy);
idx = GPOINTER_TO_UINT (oldval);
} else {
idx = mono_image_add_stream_data (&assembly->blob, b1, s1);
mono_image_add_stream_data (&assembly->blob, b2, s2);
g_hash_table_insert (assembly->blob_cache, copy, GUINT_TO_POINTER (idx));
}
return idx;
}
static guint32
sigbuffer_add_to_blob_cached (MonoDynamicImage *assembly, SigBuffer *buf)
{
char blob_size [8];
char *b = blob_size;
guint32 size = buf->p - buf->buf;
/* store length */
g_assert (size <= (buf->end - buf->buf));
mono_metadata_encode_value (size, b, &b);
return add_to_blob_cached (assembly, blob_size, b-blob_size, buf->buf, size);
}
/*
* Copy len * nelem bytes from val to dest, swapping bytes to LE if necessary.
* dest may be misaligned.
*/
static void
swap_with_size (char *dest, const char* val, int len, int nelem) {
#if G_BYTE_ORDER != G_LITTLE_ENDIAN
int elem;
for (elem = 0; elem < nelem; ++elem) {
switch (len) {
case 1:
*dest = *val;
break;
case 2:
dest [0] = val [1];
dest [1] = val [0];
break;
case 4:
dest [0] = val [3];
dest [1] = val [2];
dest [2] = val [1];
dest [3] = val [0];
break;
case 8:
dest [0] = val [7];
dest [1] = val [6];
dest [2] = val [5];
dest [3] = val [4];
dest [4] = val [3];
dest [5] = val [2];
dest [6] = val [1];
dest [7] = val [0];
break;
default:
g_assert_not_reached ();
}
dest += len;
val += len;
}
#else
memcpy (dest, val, len * nelem);
#endif
}
static guint32
add_mono_string_to_blob_cached (MonoDynamicImage *assembly, MonoString *str)
{
char blob_size [64];
char *b = blob_size;
guint32 idx = 0, len;
len = str->length * 2;
mono_metadata_encode_value (len, b, &b);
#if G_BYTE_ORDER != G_LITTLE_ENDIAN
{
char *swapped = g_malloc (2 * mono_string_length (str));
const char *p = (const char*)mono_string_chars (str);
swap_with_size (swapped, p, 2, mono_string_length (str));
idx = add_to_blob_cached (assembly, blob_size, b-blob_size, swapped, len);
g_free (swapped);
}
#else
idx = add_to_blob_cached (assembly, blob_size, b-blob_size, (char*)mono_string_chars (str), len);
#endif
return idx;
}
#ifndef DISABLE_REFLECTION_EMIT
static MonoClass *
default_class_from_mono_type (MonoType *type)
{
switch (type->type) {
case MONO_TYPE_OBJECT:
return mono_defaults.object_class;
case MONO_TYPE_VOID:
return mono_defaults.void_class;
case MONO_TYPE_BOOLEAN:
return mono_defaults.boolean_class;
case MONO_TYPE_CHAR:
return mono_defaults.char_class;
case MONO_TYPE_I1:
return mono_defaults.sbyte_class;
case MONO_TYPE_U1:
return mono_defaults.byte_class;
case MONO_TYPE_I2:
return mono_defaults.int16_class;
case MONO_TYPE_U2:
return mono_defaults.uint16_class;
case MONO_TYPE_I4:
return mono_defaults.int32_class;
case MONO_TYPE_U4:
return mono_defaults.uint32_class;
case MONO_TYPE_I:
return mono_defaults.int_class;
case MONO_TYPE_U:
return mono_defaults.uint_class;
case MONO_TYPE_I8:
return mono_defaults.int64_class;
case MONO_TYPE_U8:
return mono_defaults.uint64_class;
case MONO_TYPE_R4:
return mono_defaults.single_class;
case MONO_TYPE_R8:
return mono_defaults.double_class;
case MONO_TYPE_STRING:
return mono_defaults.string_class;
default:
g_warning ("default_class_from_mono_type: implement me 0x%02x\n", type->type);
g_assert_not_reached ();
}
return NULL;
}
#endif
static void
encode_generic_class (MonoDynamicImage *assembly, MonoGenericClass *gclass, SigBuffer *buf)
{
int i;
MonoGenericInst *class_inst;
MonoClass *klass;
g_assert (gclass);
class_inst = gclass->context.class_inst;
sigbuffer_add_value (buf, MONO_TYPE_GENERICINST);
klass = gclass->container_class;
sigbuffer_add_value (buf, klass->byval_arg.type);
sigbuffer_add_value (buf, mono_image_typedef_or_ref_full (assembly, &klass->byval_arg, FALSE));
sigbuffer_add_value (buf, class_inst->type_argc);
for (i = 0; i < class_inst->type_argc; ++i)
encode_type (assembly, class_inst->type_argv [i], buf);
}
static void
encode_type (MonoDynamicImage *assembly, MonoType *type, SigBuffer *buf)
{
if (!type) {
g_assert_not_reached ();
return;
}
if (type->byref)
sigbuffer_add_value (buf, MONO_TYPE_BYREF);
switch (type->type){
case MONO_TYPE_VOID:
case MONO_TYPE_BOOLEAN:
case MONO_TYPE_CHAR:
case MONO_TYPE_I1:
case MONO_TYPE_U1:
case MONO_TYPE_I2:
case MONO_TYPE_U2:
case MONO_TYPE_I4:
case MONO_TYPE_U4:
case MONO_TYPE_I8:
case MONO_TYPE_U8:
case MONO_TYPE_R4:
case MONO_TYPE_R8:
case MONO_TYPE_I:
case MONO_TYPE_U:
case MONO_TYPE_STRING:
case MONO_TYPE_OBJECT:
case MONO_TYPE_TYPEDBYREF:
sigbuffer_add_value (buf, type->type);
break;
case MONO_TYPE_PTR:
sigbuffer_add_value (buf, type->type);
encode_type (assembly, type->data.type, buf);
break;
case MONO_TYPE_SZARRAY:
sigbuffer_add_value (buf, type->type);
encode_type (assembly, &type->data.klass->byval_arg, buf);
break;
case MONO_TYPE_VALUETYPE:
case MONO_TYPE_CLASS: {
MonoClass *k = mono_class_from_mono_type (type);
if (k->generic_container) {
MonoGenericClass *gclass = mono_metadata_lookup_generic_class (k, k->generic_container->context.class_inst, TRUE);
encode_generic_class (assembly, gclass, buf);
} else {
/*
* Make sure we use the correct type.
*/
sigbuffer_add_value (buf, k->byval_arg.type);
/*
* ensure only non-byref gets passed to mono_image_typedef_or_ref(),
* otherwise two typerefs could point to the same type, leading to
* verification errors.
*/
sigbuffer_add_value (buf, mono_image_typedef_or_ref (assembly, &k->byval_arg));
}
break;
}
case MONO_TYPE_ARRAY:
sigbuffer_add_value (buf, type->type);
encode_type (assembly, &type->data.array->eklass->byval_arg, buf);
sigbuffer_add_value (buf, type->data.array->rank);
sigbuffer_add_value (buf, 0); /* FIXME: set to 0 for now */
sigbuffer_add_value (buf, 0);
break;
case MONO_TYPE_GENERICINST:
encode_generic_class (assembly, type->data.generic_class, buf);
break;
case MONO_TYPE_VAR:
case MONO_TYPE_MVAR:
sigbuffer_add_value (buf, type->type);
sigbuffer_add_value (buf, mono_type_get_generic_param_num (type));
break;
default:
g_error ("need to encode type %x", type->type);
}
}
static void
encode_reflection_type (MonoDynamicImage *assembly, MonoReflectionType *type, SigBuffer *buf)
{
if (!type) {
sigbuffer_add_value (buf, MONO_TYPE_VOID);
return;
}
encode_type (assembly, mono_reflection_type_get_handle (type), buf);
}
static void
encode_custom_modifiers (MonoDynamicImage *assembly, MonoArray *modreq, MonoArray *modopt, SigBuffer *buf)
{
int i;
if (modreq) {
for (i = 0; i < mono_array_length (modreq); ++i) {
MonoType *mod = mono_type_array_get_and_resolve (modreq, i);
sigbuffer_add_byte (buf, MONO_TYPE_CMOD_REQD);
sigbuffer_add_value (buf, mono_image_typedef_or_ref (assembly, mod));
}
}
if (modopt) {
for (i = 0; i < mono_array_length (modopt); ++i) {
MonoType *mod = mono_type_array_get_and_resolve (modopt, i);
sigbuffer_add_byte (buf, MONO_TYPE_CMOD_OPT);
sigbuffer_add_value (buf, mono_image_typedef_or_ref (assembly, mod));
}
}
}
#ifndef DISABLE_REFLECTION_EMIT
static guint32
method_encode_signature (MonoDynamicImage *assembly, MonoMethodSignature *sig)
{
SigBuffer buf;
int i;
guint32 nparams = sig->param_count;
guint32 idx;
if (!assembly->save)
return 0;
sigbuffer_init (&buf, 32);
/*
* FIXME: vararg, explicit_this, differenc call_conv values...
*/
idx = sig->call_convention;
if (sig->hasthis)
idx |= 0x20; /* hasthis */
if (sig->generic_param_count)
idx |= 0x10; /* generic */
sigbuffer_add_byte (&buf, idx);
if (sig->generic_param_count)
sigbuffer_add_value (&buf, sig->generic_param_count);
sigbuffer_add_value (&buf, nparams);
encode_type (assembly, sig->ret, &buf);
for (i = 0; i < nparams; ++i) {
if (i == sig->sentinelpos)
sigbuffer_add_byte (&buf, MONO_TYPE_SENTINEL);
encode_type (assembly, sig->params [i], &buf);
}
idx = sigbuffer_add_to_blob_cached (assembly, &buf);
sigbuffer_free (&buf);
return idx;
}
#endif
static guint32
method_builder_encode_signature (MonoDynamicImage *assembly, ReflectionMethodBuilder *mb)
{
/*
* FIXME: reuse code from method_encode_signature().
*/
SigBuffer buf;
int i;
guint32 nparams = mb->parameters ? mono_array_length (mb->parameters): 0;
guint32 ngparams = mb->generic_params ? mono_array_length (mb->generic_params): 0;
guint32 notypes = mb->opt_types ? mono_array_length (mb->opt_types): 0;
guint32 idx;
sigbuffer_init (&buf, 32);
/* LAMESPEC: all the call conv spec is foobared */
idx = mb->call_conv & 0x60; /* has-this, explicit-this */
if (mb->call_conv & 2)
idx |= 0x5; /* vararg */
if (!(mb->attrs & METHOD_ATTRIBUTE_STATIC))
idx |= 0x20; /* hasthis */
if (ngparams)
idx |= 0x10; /* generic */
sigbuffer_add_byte (&buf, idx);
if (ngparams)
sigbuffer_add_value (&buf, ngparams);
sigbuffer_add_value (&buf, nparams + notypes);
encode_custom_modifiers (assembly, mb->return_modreq, mb->return_modopt, &buf);
encode_reflection_type (assembly, mb->rtype, &buf);
for (i = 0; i < nparams; ++i) {
MonoArray *modreq = NULL;
MonoArray *modopt = NULL;
MonoReflectionType *pt;
if (mb->param_modreq && (i < mono_array_length (mb->param_modreq)))
modreq = mono_array_get (mb->param_modreq, MonoArray*, i);
if (mb->param_modopt && (i < mono_array_length (mb->param_modopt)))
modopt = mono_array_get (mb->param_modopt, MonoArray*, i);
encode_custom_modifiers (assembly, modreq, modopt, &buf);
pt = mono_array_get (mb->parameters, MonoReflectionType*, i);
encode_reflection_type (assembly, pt, &buf);
}
if (notypes)
sigbuffer_add_byte (&buf, MONO_TYPE_SENTINEL);
for (i = 0; i < notypes; ++i) {
MonoReflectionType *pt;
pt = mono_array_get (mb->opt_types, MonoReflectionType*, i);
encode_reflection_type (assembly, pt, &buf);
}
idx = sigbuffer_add_to_blob_cached (assembly, &buf);
sigbuffer_free (&buf);
return idx;
}
static guint32
encode_locals (MonoDynamicImage *assembly, MonoReflectionILGen *ilgen)
{
MonoDynamicTable *table;
guint32 *values;
guint32 idx, sig_idx;
guint nl = mono_array_length (ilgen->locals);
SigBuffer buf;
int i;
sigbuffer_init (&buf, 32);
sigbuffer_add_value (&buf, 0x07);
sigbuffer_add_value (&buf, nl);
for (i = 0; i < nl; ++i) {
MonoReflectionLocalBuilder *lb = mono_array_get (ilgen->locals, MonoReflectionLocalBuilder*, i);
if (lb->is_pinned)
sigbuffer_add_value (&buf, MONO_TYPE_PINNED);
encode_reflection_type (assembly, (MonoReflectionType*)lb->type, &buf);
}
sig_idx = sigbuffer_add_to_blob_cached (assembly, &buf);
sigbuffer_free (&buf);
if (assembly->standalonesig_cache == NULL)
assembly->standalonesig_cache = g_hash_table_new (NULL, NULL);
idx = GPOINTER_TO_UINT (g_hash_table_lookup (assembly->standalonesig_cache, GUINT_TO_POINTER (sig_idx)));
if (idx)
return idx;
table = &assembly->tables [MONO_TABLE_STANDALONESIG];
idx = table->next_idx ++;
table->rows ++;
alloc_table (table, table->rows);
values = table->values + idx * MONO_STAND_ALONE_SIGNATURE_SIZE;
values [MONO_STAND_ALONE_SIGNATURE] = sig_idx;
g_hash_table_insert (assembly->standalonesig_cache, GUINT_TO_POINTER (sig_idx), GUINT_TO_POINTER (idx));
return idx;
}
static guint32
method_count_clauses (MonoReflectionILGen *ilgen)
{
guint32 num_clauses = 0;
int i;
MonoILExceptionInfo *ex_info;
for (i = 0; i < mono_array_length (ilgen->ex_handlers); ++i) {
ex_info = (MonoILExceptionInfo*)mono_array_addr (ilgen->ex_handlers, MonoILExceptionInfo, i);
if (ex_info->handlers)
num_clauses += mono_array_length (ex_info->handlers);
else
num_clauses++;
}
return num_clauses;
}
#ifndef DISABLE_REFLECTION_EMIT
static MonoExceptionClause*
method_encode_clauses (MonoImage *image, MonoDynamicImage *assembly, MonoReflectionILGen *ilgen, guint32 num_clauses)
{
MonoExceptionClause *clauses;
MonoExceptionClause *clause;
MonoILExceptionInfo *ex_info;
MonoILExceptionBlock *ex_block;
guint32 finally_start;
int i, j, clause_index;;
clauses = image_g_new0 (image, MonoExceptionClause, num_clauses);
clause_index = 0;
for (i = mono_array_length (ilgen->ex_handlers) - 1; i >= 0; --i) {
ex_info = (MonoILExceptionInfo*)mono_array_addr (ilgen->ex_handlers, MonoILExceptionInfo, i);
finally_start = ex_info->start + ex_info->len;
if (!ex_info->handlers)
continue;
for (j = 0; j < mono_array_length (ex_info->handlers); ++j) {
ex_block = (MonoILExceptionBlock*)mono_array_addr (ex_info->handlers, MonoILExceptionBlock, j);
clause = &(clauses [clause_index]);
clause->flags = ex_block->type;
clause->try_offset = ex_info->start;
if (ex_block->type == MONO_EXCEPTION_CLAUSE_FINALLY)
clause->try_len = finally_start - ex_info->start;
else
clause->try_len = ex_info->len;
clause->handler_offset = ex_block->start;
clause->handler_len = ex_block->len;
if (ex_block->extype) {
clause->data.catch_class = mono_class_from_mono_type (mono_reflection_type_get_handle ((MonoReflectionType*)ex_block->extype));
} else {
if (ex_block->type == MONO_EXCEPTION_CLAUSE_FILTER)
clause->data.filter_offset = ex_block->filter_offset;
else
clause->data.filter_offset = 0;
}
finally_start = ex_block->start + ex_block->len;
clause_index ++;
}
}
return clauses;
}
#endif /* !DISABLE_REFLECTION_EMIT */
static guint32
method_encode_code (MonoDynamicImage *assembly, ReflectionMethodBuilder *mb)
{
char flags = 0;
guint32 idx;
guint32 code_size;
gint32 max_stack, i;
gint32 num_locals = 0;
gint32 num_exception = 0;
gint maybe_small;
guint32 fat_flags;
char fat_header [12];
guint32 int_value;
guint16 short_value;
guint32 local_sig = 0;
guint32 header_size = 12;
MonoArray *code;
if ((mb->attrs & (METHOD_ATTRIBUTE_PINVOKE_IMPL | METHOD_ATTRIBUTE_ABSTRACT)) ||
(mb->iattrs & (METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL | METHOD_IMPL_ATTRIBUTE_RUNTIME)))
return 0;
/*if (mb->name)
g_print ("Encode method %s\n", mono_string_to_utf8 (mb->name));*/
if (mb->ilgen) {
code = mb->ilgen->code;
code_size = mb->ilgen->code_len;
max_stack = mb->ilgen->max_stack;
num_locals = mb->ilgen->locals ? mono_array_length (mb->ilgen->locals) : 0;
if (mb->ilgen->ex_handlers)
num_exception = method_count_clauses (mb->ilgen);
} else {
code = mb->code;
if (code == NULL){
char *name = mono_string_to_utf8 (mb->name);
char *str = g_strdup_printf ("Method %s does not have any IL associated", name);
MonoException *exception = mono_get_exception_argument (NULL, "a method does not have any IL associated");
g_free (str);
g_free (name);
mono_raise_exception (exception);
}
code_size = mono_array_length (code);
max_stack = 8; /* we probably need to run a verifier on the code... */
}
stream_data_align (&assembly->code);
/* check for exceptions, maxstack, locals */
maybe_small = (max_stack <= 8) && (!num_locals) && (!num_exception);
if (maybe_small) {
if (code_size < 64 && !(code_size & 1)) {
flags = (code_size << 2) | 0x2;
} else if (code_size < 32 && (code_size & 1)) {
flags = (code_size << 2) | 0x6; /* LAMESPEC: see metadata.c */
} else {
goto fat_header;
}
idx = mono_image_add_stream_data (&assembly->code, &flags, 1);
/* add to the fixup todo list */
if (mb->ilgen && mb->ilgen->num_token_fixups)
mono_g_hash_table_insert (assembly->token_fixups, mb->ilgen, GUINT_TO_POINTER (idx + 1));
mono_image_add_stream_data (&assembly->code, mono_array_addr (code, char, 0), code_size);
return assembly->text_rva + idx;
}
fat_header:
if (num_locals)
local_sig = MONO_TOKEN_SIGNATURE | encode_locals (assembly, mb->ilgen);
/*
* FIXME: need to set also the header size in fat_flags.
* (and more sects and init locals flags)
*/
fat_flags = 0x03;
if (num_exception)
fat_flags |= METHOD_HEADER_MORE_SECTS;
if (mb->init_locals)
fat_flags |= METHOD_HEADER_INIT_LOCALS;
fat_header [0] = fat_flags;
fat_header [1] = (header_size / 4 ) << 4;
short_value = GUINT16_TO_LE (max_stack);
memcpy (fat_header + 2, &short_value, 2);
int_value = GUINT32_TO_LE (code_size);
memcpy (fat_header + 4, &int_value, 4);
int_value = GUINT32_TO_LE (local_sig);
memcpy (fat_header + 8, &int_value, 4);
idx = mono_image_add_stream_data (&assembly->code, fat_header, 12);
/* add to the fixup todo list */
if (mb->ilgen && mb->ilgen->num_token_fixups)
mono_g_hash_table_insert (assembly->token_fixups, mb->ilgen, GUINT_TO_POINTER (idx + 12));
mono_image_add_stream_data (&assembly->code, mono_array_addr (code, char, 0), code_size);
if (num_exception) {
unsigned char sheader [4];
MonoILExceptionInfo * ex_info;
MonoILExceptionBlock * ex_block;
int j;
stream_data_align (&assembly->code);
/* always use fat format for now */
sheader [0] = METHOD_HEADER_SECTION_FAT_FORMAT | METHOD_HEADER_SECTION_EHTABLE;
num_exception *= 6 * sizeof (guint32);
num_exception += 4; /* include the size of the header */
sheader [1] = num_exception & 0xff;
sheader [2] = (num_exception >> 8) & 0xff;
sheader [3] = (num_exception >> 16) & 0xff;
mono_image_add_stream_data (&assembly->code, (char*)sheader, 4);
/* fat header, so we are already aligned */
/* reverse order */
for (i = mono_array_length (mb->ilgen->ex_handlers) - 1; i >= 0; --i) {
ex_info = (MonoILExceptionInfo *)mono_array_addr (mb->ilgen->ex_handlers, MonoILExceptionInfo, i);
if (ex_info->handlers) {
int finally_start = ex_info->start + ex_info->len;
for (j = 0; j < mono_array_length (ex_info->handlers); ++j) {
guint32 val;
ex_block = (MonoILExceptionBlock*)mono_array_addr (ex_info->handlers, MonoILExceptionBlock, j);
/* the flags */
val = GUINT32_TO_LE (ex_block->type);
mono_image_add_stream_data (&assembly->code, (char*)&val, sizeof (guint32));
/* try offset */
val = GUINT32_TO_LE (ex_info->start);
mono_image_add_stream_data (&assembly->code, (char*)&val, sizeof (guint32));
/* need fault, too, probably */
if (ex_block->type == MONO_EXCEPTION_CLAUSE_FINALLY)
val = GUINT32_TO_LE (finally_start - ex_info->start);
else
val = GUINT32_TO_LE (ex_info->len);
mono_image_add_stream_data (&assembly->code, (char*)&val, sizeof (guint32));
/* handler offset */
val = GUINT32_TO_LE (ex_block->start);
mono_image_add_stream_data (&assembly->code, (char*)&val, sizeof (guint32));
/* handler len */
val = GUINT32_TO_LE (ex_block->len);
mono_image_add_stream_data (&assembly->code, (char*)&val, sizeof (guint32));
finally_start = ex_block->start + ex_block->len;
if (ex_block->extype) {
val = mono_metadata_token_from_dor (mono_image_typedef_or_ref (assembly, mono_reflection_type_get_handle ((MonoReflectionType*)ex_block->extype)));
} else {
if (ex_block->type == MONO_EXCEPTION_CLAUSE_FILTER)
val = ex_block->filter_offset;
else
val = 0;
}
val = GUINT32_TO_LE (val);
mono_image_add_stream_data (&assembly->code, (char*)&val, sizeof (guint32));
/*g_print ("out clause %d: from %d len=%d, handler at %d, %d, finally_start=%d, ex_info->start=%d, ex_info->len=%d, ex_block->type=%d, j=%d, i=%d\n",
clause.flags, clause.try_offset, clause.try_len, clause.handler_offset, clause.handler_len, finally_start, ex_info->start, ex_info->len, ex_block->type, j, i);*/
}
} else {
g_error ("No clauses for ex info block %d", i);
}
}
}
return assembly->text_rva + idx;
}
static guint32
find_index_in_table (MonoDynamicImage *assembly, int table_idx, int col, guint32 token)
{
int i;
MonoDynamicTable *table;
guint32 *values;
table = &assembly->tables [table_idx];
g_assert (col < table->columns);
values = table->values + table->columns;
for (i = 1; i <= table->rows; ++i) {
if (values [col] == token)
return i;
values += table->columns;
}
return 0;
}
/*
* LOCKING: Acquires the loader lock.
*/
static MonoCustomAttrInfo*
lookup_custom_attr (MonoImage *image, gpointer member)
{
MonoCustomAttrInfo* res;
res = mono_image_property_lookup (image, member, MONO_PROP_DYNAMIC_CATTR);
if (!res)
return NULL;
return g_memdup (res, MONO_SIZEOF_CUSTOM_ATTR_INFO + sizeof (MonoCustomAttrEntry) * res->num_attrs);
}
static gboolean
custom_attr_visible (MonoImage *image, MonoReflectionCustomAttr *cattr)
{
/* FIXME: Need to do more checks */
if (cattr->ctor->method && (cattr->ctor->method->klass->image != image)) {
int visibility = cattr->ctor->method->klass->flags & TYPE_ATTRIBUTE_VISIBILITY_MASK;
if ((visibility != TYPE_ATTRIBUTE_PUBLIC) && (visibility != TYPE_ATTRIBUTE_NESTED_PUBLIC))
return FALSE;
}
return TRUE;
}
static MonoCustomAttrInfo*
mono_custom_attrs_from_builders (MonoImage *alloc_img, MonoImage *image, MonoArray *cattrs)
{
int i, index, count, not_visible;
MonoCustomAttrInfo *ainfo;
MonoReflectionCustomAttr *cattr;
if (!cattrs)
return NULL;
/* FIXME: check in assembly the Run flag is set */
count = mono_array_length (cattrs);
/* Skip nonpublic attributes since MS.NET seems to do the same */
/* FIXME: This needs to be done more globally */
not_visible = 0;
for (i = 0; i < count; ++i) {
cattr = (MonoReflectionCustomAttr*)mono_array_get (cattrs, gpointer, i);
if (!custom_attr_visible (image, cattr))
not_visible ++;
}
count -= not_visible;
ainfo = image_g_malloc0 (alloc_img, MONO_SIZEOF_CUSTOM_ATTR_INFO + sizeof (MonoCustomAttrEntry) * count);
ainfo->image = image;
ainfo->num_attrs = count;
ainfo->cached = alloc_img != NULL;
index = 0;
for (i = 0; i < count; ++i) {
cattr = (MonoReflectionCustomAttr*)mono_array_get (cattrs, gpointer, i);
if (custom_attr_visible (image, cattr)) {
unsigned char *saved = mono_image_alloc (image, mono_array_length (cattr->data));
memcpy (saved, mono_array_addr (cattr->data, char, 0), mono_array_length (cattr->data));
ainfo->attrs [index].ctor = cattr->ctor->method;
ainfo->attrs [index].data = saved;
ainfo->attrs [index].data_size = mono_array_length (cattr->data);
index ++;
}
}
return ainfo;
}
#ifndef DISABLE_REFLECTION_EMIT
/*
* LOCKING: Acquires the loader lock.
*/
static void
mono_save_custom_attrs (MonoImage *image, void *obj, MonoArray *cattrs)
{
MonoCustomAttrInfo *ainfo, *tmp;
if (!cattrs || !mono_array_length (cattrs))
return;
ainfo = mono_custom_attrs_from_builders (image, image, cattrs);
mono_loader_lock ();
tmp = mono_image_property_lookup (image, obj, MONO_PROP_DYNAMIC_CATTR);
if (tmp)
mono_custom_attrs_free (tmp);
mono_image_property_insert (image, obj, MONO_PROP_DYNAMIC_CATTR, ainfo);
mono_loader_unlock ();
}
#endif
void
mono_custom_attrs_free (MonoCustomAttrInfo *ainfo)
{
if (!ainfo->cached)
g_free (ainfo);
}
/*
* idx is the table index of the object
* type is one of MONO_CUSTOM_ATTR_*
*/
static void
mono_image_add_cattrs (MonoDynamicImage *assembly, guint32 idx, guint32 type, MonoArray *cattrs)
{
MonoDynamicTable *table;
MonoReflectionCustomAttr *cattr;
guint32 *values;
guint32 count, i, token;
char blob_size [6];
char *p = blob_size;
/* it is legal to pass a NULL cattrs: we avoid to use the if in a lot of places */
if (!cattrs)
return;
count = mono_array_length (cattrs);
table = &assembly->tables [MONO_TABLE_CUSTOMATTRIBUTE];
table->rows += count;
alloc_table (table, table->rows);
values = table->values + table->next_idx * MONO_CUSTOM_ATTR_SIZE;
idx <<= MONO_CUSTOM_ATTR_BITS;
idx |= type;
for (i = 0; i < count; ++i) {
cattr = (MonoReflectionCustomAttr*)mono_array_get (cattrs, gpointer, i);
values [MONO_CUSTOM_ATTR_PARENT] = idx;
token = mono_image_create_token (assembly, (MonoObject*)cattr->ctor, FALSE, FALSE);
type = mono_metadata_token_index (token);
type <<= MONO_CUSTOM_ATTR_TYPE_BITS;
switch (mono_metadata_token_table (token)) {
case MONO_TABLE_METHOD:
type |= MONO_CUSTOM_ATTR_TYPE_METHODDEF;
break;
case MONO_TABLE_MEMBERREF:
type |= MONO_CUSTOM_ATTR_TYPE_MEMBERREF;
break;
default:
g_warning ("got wrong token in custom attr");
continue;
}
values [MONO_CUSTOM_ATTR_TYPE] = type;
p = blob_size;
mono_metadata_encode_value (mono_array_length (cattr->data), p, &p);
values [MONO_CUSTOM_ATTR_VALUE] = add_to_blob_cached (assembly, blob_size, p - blob_size,
mono_array_addr (cattr->data, char, 0), mono_array_length (cattr->data));
values += MONO_CUSTOM_ATTR_SIZE;
++table->next_idx;
}
}
static void
mono_image_add_decl_security (MonoDynamicImage *assembly, guint32 parent_token, MonoArray *permissions)
{
MonoDynamicTable *table;
guint32 *values;
guint32 count, i, idx;
MonoReflectionPermissionSet *perm;
if (!permissions)
return;
count = mono_array_length (permissions);
table = &assembly->tables [MONO_TABLE_DECLSECURITY];
table->rows += count;
alloc_table (table, table->rows);
for (i = 0; i < mono_array_length (permissions); ++i) {
perm = (MonoReflectionPermissionSet*)mono_array_addr (permissions, MonoReflectionPermissionSet, i);
values = table->values + table->next_idx * MONO_DECL_SECURITY_SIZE;
idx = mono_metadata_token_index (parent_token);
idx <<= MONO_HAS_DECL_SECURITY_BITS;
switch (mono_metadata_token_table (parent_token)) {
case MONO_TABLE_TYPEDEF:
idx |= MONO_HAS_DECL_SECURITY_TYPEDEF;
break;
case MONO_TABLE_METHOD:
idx |= MONO_HAS_DECL_SECURITY_METHODDEF;
break;
case MONO_TABLE_ASSEMBLY:
idx |= MONO_HAS_DECL_SECURITY_ASSEMBLY;
break;
default:
g_assert_not_reached ();
}
values [MONO_DECL_SECURITY_ACTION] = perm->action;
values [MONO_DECL_SECURITY_PARENT] = idx;
values [MONO_DECL_SECURITY_PERMISSIONSET] = add_mono_string_to_blob_cached (assembly, perm->pset);
++table->next_idx;
}
}
/*
* Fill in the MethodDef and ParamDef tables for a method.
* This is used for both normal methods and constructors.
*/
static void
mono_image_basic_method (ReflectionMethodBuilder *mb, MonoDynamicImage *assembly)
{
MonoDynamicTable *table;
guint32 *values;
guint i, count;
/* room in this table is already allocated */
table = &assembly->tables [MONO_TABLE_METHOD];
*mb->table_idx = table->next_idx ++;
g_hash_table_insert (assembly->method_to_table_idx, mb->mhandle, GUINT_TO_POINTER ((*mb->table_idx)));
values = table->values + *mb->table_idx * MONO_METHOD_SIZE;
values [MONO_METHOD_NAME] = string_heap_insert_mstring (&assembly->sheap, mb->name);
values [MONO_METHOD_FLAGS] = mb->attrs;
values [MONO_METHOD_IMPLFLAGS] = mb->iattrs;
values [MONO_METHOD_SIGNATURE] = method_builder_encode_signature (assembly, mb);
values [MONO_METHOD_RVA] = method_encode_code (assembly, mb);
table = &assembly->tables [MONO_TABLE_PARAM];
values [MONO_METHOD_PARAMLIST] = table->next_idx;
mono_image_add_decl_security (assembly,
mono_metadata_make_token (MONO_TABLE_METHOD, *mb->table_idx), mb->permissions);
if (mb->pinfo) {
MonoDynamicTable *mtable;
guint32 *mvalues;
mtable = &assembly->tables [MONO_TABLE_FIELDMARSHAL];
mvalues = mtable->values + mtable->next_idx * MONO_FIELD_MARSHAL_SIZE;
count = 0;
for (i = 0; i < mono_array_length (mb->pinfo); ++i) {
if (mono_array_get (mb->pinfo, gpointer, i))
count++;
}
table->rows += count;
alloc_table (table, table->rows);
values = table->values + table->next_idx * MONO_PARAM_SIZE;
for (i = 0; i < mono_array_length (mb->pinfo); ++i) {
MonoReflectionParamBuilder *pb;
if ((pb = mono_array_get (mb->pinfo, MonoReflectionParamBuilder*, i))) {
values [MONO_PARAM_FLAGS] = pb->attrs;
values [MONO_PARAM_SEQUENCE] = i;
if (pb->name != NULL) {
values [MONO_PARAM_NAME] = string_heap_insert_mstring (&assembly->sheap, pb->name);
} else {
values [MONO_PARAM_NAME] = 0;
}
values += MONO_PARAM_SIZE;
if (pb->marshal_info) {
mtable->rows++;
alloc_table (mtable, mtable->rows);
mvalues = mtable->values + mtable->rows * MONO_FIELD_MARSHAL_SIZE;
mvalues [MONO_FIELD_MARSHAL_PARENT] = (table->next_idx << MONO_HAS_FIELD_MARSHAL_BITS) | MONO_HAS_FIELD_MARSHAL_PARAMDEF;
mvalues [MONO_FIELD_MARSHAL_NATIVE_TYPE] = encode_marshal_blob (assembly, pb->marshal_info);
}
pb->table_idx = table->next_idx++;
if (pb->attrs & PARAM_ATTRIBUTE_HAS_DEFAULT) {
guint32 field_type = 0;
mtable = &assembly->tables [MONO_TABLE_CONSTANT];
mtable->rows ++;
alloc_table (mtable, mtable->rows);
mvalues = mtable->values + mtable->rows * MONO_CONSTANT_SIZE;
mvalues [MONO_CONSTANT_PARENT] = MONO_HASCONSTANT_PARAM | (pb->table_idx << MONO_HASCONSTANT_BITS);
mvalues [MONO_CONSTANT_VALUE] = encode_constant (assembly, pb->def_value, &field_type);
mvalues [MONO_CONSTANT_TYPE] = field_type;
mvalues [MONO_CONSTANT_PADDING] = 0;
}
}
}
}
}
#ifndef DISABLE_REFLECTION_EMIT
static void
reflection_methodbuilder_from_method_builder (ReflectionMethodBuilder *rmb, MonoReflectionMethodBuilder *mb)
{
memset (rmb, 0, sizeof (ReflectionMethodBuilder));
rmb->ilgen = mb->ilgen;
rmb->rtype = mono_reflection_type_resolve_user_types ((MonoReflectionType*)mb->rtype);
rmb->parameters = mb->parameters;
rmb->generic_params = mb->generic_params;
rmb->generic_container = mb->generic_container;
rmb->opt_types = NULL;
rmb->pinfo = mb->pinfo;
rmb->attrs = mb->attrs;
rmb->iattrs = mb->iattrs;
rmb->call_conv = mb->call_conv;
rmb->code = mb->code;
rmb->type = mb->type;
rmb->name = mb->name;
rmb->table_idx = &mb->table_idx;
rmb->init_locals = mb->init_locals;
rmb->skip_visibility = FALSE;
rmb->return_modreq = mb->return_modreq;
rmb->return_modopt = mb->return_modopt;
rmb->param_modreq = mb->param_modreq;
rmb->param_modopt = mb->param_modopt;
rmb->permissions = mb->permissions;
rmb->mhandle = mb->mhandle;
rmb->nrefs = 0;
rmb->refs = NULL;
if (mb->dll) {
rmb->charset = mb->charset;
rmb->extra_flags = mb->extra_flags;
rmb->native_cc = mb->native_cc;
rmb->dllentry = mb->dllentry;
rmb->dll = mb->dll;
}
}
static void
reflection_methodbuilder_from_ctor_builder (ReflectionMethodBuilder *rmb, MonoReflectionCtorBuilder *mb)
{
const char *name = mb->attrs & METHOD_ATTRIBUTE_STATIC ? ".cctor": ".ctor";
memset (rmb, 0, sizeof (ReflectionMethodBuilder));
rmb->ilgen = mb->ilgen;
rmb->rtype = mono_type_get_object (mono_domain_get (), &mono_defaults.void_class->byval_arg);
rmb->parameters = mb->parameters;
rmb->generic_params = NULL;
rmb->generic_container = NULL;
rmb->opt_types = NULL;
rmb->pinfo = mb->pinfo;
rmb->attrs = mb->attrs;
rmb->iattrs = mb->iattrs;
rmb->call_conv = mb->call_conv;
rmb->code = NULL;
rmb->type = mb->type;
rmb->name = mono_string_new (mono_domain_get (), name);
rmb->table_idx = &mb->table_idx;
rmb->init_locals = mb->init_locals;
rmb->skip_visibility = FALSE;
rmb->return_modreq = NULL;
rmb->return_modopt = NULL;
rmb->param_modreq = mb->param_modreq;
rmb->param_modopt = mb->param_modopt;
rmb->permissions = mb->permissions;
rmb->mhandle = mb->mhandle;
rmb->nrefs = 0;
rmb->refs = NULL;
}
static void
reflection_methodbuilder_from_dynamic_method (ReflectionMethodBuilder *rmb, MonoReflectionDynamicMethod *mb)
{
memset (rmb, 0, sizeof (ReflectionMethodBuilder));
rmb->ilgen = mb->ilgen;
rmb->rtype = mb->rtype;
rmb->parameters = mb->parameters;
rmb->generic_params = NULL;
rmb->generic_container = NULL;
rmb->opt_types = NULL;
rmb->pinfo = NULL;
rmb->attrs = mb->attrs;
rmb->iattrs = 0;
rmb->call_conv = mb->call_conv;
rmb->code = NULL;
rmb->type = (MonoObject *) mb->owner;
rmb->name = mb->name;
rmb->table_idx = NULL;
rmb->init_locals = mb->init_locals;
rmb->skip_visibility = mb->skip_visibility;
rmb->return_modreq = NULL;
rmb->return_modopt = NULL;
rmb->param_modreq = NULL;
rmb->param_modopt = NULL;
rmb->permissions = NULL;
rmb->mhandle = mb->mhandle;
rmb->nrefs = 0;
rmb->refs = NULL;
}
#endif
static void
mono_image_add_methodimpl (MonoDynamicImage *assembly, MonoReflectionMethodBuilder *mb)
{
MonoReflectionTypeBuilder *tb = (MonoReflectionTypeBuilder *)mb->type;
MonoDynamicTable *table;
guint32 *values;
guint32 tok;
if (!mb->override_method)
return;
table = &assembly->tables [MONO_TABLE_METHODIMPL];
table->rows ++;
alloc_table (table, table->rows);
values = table->values + table->rows * MONO_METHODIMPL_SIZE;
values [MONO_METHODIMPL_CLASS] = tb->table_idx;
values [MONO_METHODIMPL_BODY] = MONO_METHODDEFORREF_METHODDEF | (mb->table_idx << MONO_METHODDEFORREF_BITS);
tok = mono_image_create_token (assembly, (MonoObject*)mb->override_method, FALSE, FALSE);
switch (mono_metadata_token_table (tok)) {
case MONO_TABLE_MEMBERREF:
tok = (mono_metadata_token_index (tok) << MONO_METHODDEFORREF_BITS ) | MONO_METHODDEFORREF_METHODREF;
break;
case MONO_TABLE_METHOD:
tok = (mono_metadata_token_index (tok) << MONO_METHODDEFORREF_BITS ) | MONO_METHODDEFORREF_METHODDEF;
break;
default:
g_assert_not_reached ();
}
values [MONO_METHODIMPL_DECLARATION] = tok;
}
#ifndef DISABLE_REFLECTION_EMIT
static void
mono_image_get_method_info (MonoReflectionMethodBuilder *mb, MonoDynamicImage *assembly)
{
MonoDynamicTable *table;
guint32 *values;
ReflectionMethodBuilder rmb;
int i;
reflection_methodbuilder_from_method_builder (&rmb, mb);
mono_image_basic_method (&rmb, assembly);
mb->table_idx = *rmb.table_idx;
if (mb->dll) { /* It's a P/Invoke method */
guint32 moduleref;
/* map CharSet values to on-disk values */
int ncharset = (mb->charset ? (mb->charset - 1) * 2 : 0);
int extra_flags = mb->extra_flags;
table = &assembly->tables [MONO_TABLE_IMPLMAP];
table->rows ++;
alloc_table (table, table->rows);
values = table->values + table->rows * MONO_IMPLMAP_SIZE;
values [MONO_IMPLMAP_FLAGS] = (mb->native_cc << 8) | ncharset | extra_flags;
values [MONO_IMPLMAP_MEMBER] = (mb->table_idx << 1) | 1; /* memberforwarded: method */
if (mb->dllentry)
values [MONO_IMPLMAP_NAME] = string_heap_insert_mstring (&assembly->sheap, mb->dllentry);
else
values [MONO_IMPLMAP_NAME] = string_heap_insert_mstring (&assembly->sheap, mb->name);
moduleref = string_heap_insert_mstring (&assembly->sheap, mb->dll);
if (!(values [MONO_IMPLMAP_SCOPE] = find_index_in_table (assembly, MONO_TABLE_MODULEREF, MONO_MODULEREF_NAME, moduleref))) {
table = &assembly->tables [MONO_TABLE_MODULEREF];
table->rows ++;
alloc_table (table, table->rows);
table->values [table->rows * MONO_MODULEREF_SIZE + MONO_MODULEREF_NAME] = moduleref;
values [MONO_IMPLMAP_SCOPE] = table->rows;
}
}
if (mb->generic_params) {
table = &assembly->tables [MONO_TABLE_GENERICPARAM];
table->rows += mono_array_length (mb->generic_params);
alloc_table (table, table->rows);
for (i = 0; i < mono_array_length (mb->generic_params); ++i) {
guint32 owner = MONO_TYPEORMETHOD_METHOD | (mb->table_idx << MONO_TYPEORMETHOD_BITS);
mono_image_get_generic_param_info (
mono_array_get (mb->generic_params, gpointer, i), owner, assembly);
}
}
}
static void
mono_image_get_ctor_info (MonoDomain *domain, MonoReflectionCtorBuilder *mb, MonoDynamicImage *assembly)
{
ReflectionMethodBuilder rmb;
reflection_methodbuilder_from_ctor_builder (&rmb, mb);
mono_image_basic_method (&rmb, assembly);
mb->table_idx = *rmb.table_idx;
}
#endif
static char*
type_get_fully_qualified_name (MonoType *type)
{
return mono_type_get_name_full (type, MONO_TYPE_NAME_FORMAT_ASSEMBLY_QUALIFIED);
}
static char*
type_get_qualified_name (MonoType *type, MonoAssembly *ass) {
MonoClass *klass;
MonoAssembly *ta;
klass = mono_class_from_mono_type (type);
if (!klass)
return mono_type_get_name_full (type, MONO_TYPE_NAME_FORMAT_REFLECTION);
ta = klass->image->assembly;
if (ta->dynamic || (ta == ass)) {
if (klass->generic_class || klass->generic_container)
/* For generic type definitions, we want T, while REFLECTION returns T<K> */
return mono_type_get_name_full (type, MONO_TYPE_NAME_FORMAT_FULL_NAME);
else
return mono_type_get_name_full (type, MONO_TYPE_NAME_FORMAT_REFLECTION);
}
return mono_type_get_name_full (type, MONO_TYPE_NAME_FORMAT_ASSEMBLY_QUALIFIED);
}
#ifndef DISABLE_REFLECTION_EMIT
/*field_image is the image to which the eventual custom mods have been encoded against*/
static guint32
fieldref_encode_signature (MonoDynamicImage *assembly, MonoImage *field_image, MonoType *type)
{
SigBuffer buf;
guint32 idx, i, token;
if (!assembly->save)
return 0;
sigbuffer_init (&buf, 32);
sigbuffer_add_value (&buf, 0x06);
/* encode custom attributes before the type */
if (type->num_mods) {
for (i = 0; i < type->num_mods; ++i) {
if (field_image) {
MonoClass *class = mono_class_get (field_image, type->modifiers [i].token);
g_assert (class);
token = mono_image_typedef_or_ref (assembly, &class->byval_arg);
} else {
token = type->modifiers [i].token;
}
if (type->modifiers [i].required)
sigbuffer_add_byte (&buf, MONO_TYPE_CMOD_REQD);
else
sigbuffer_add_byte (&buf, MONO_TYPE_CMOD_OPT);
sigbuffer_add_value (&buf, token);
}
}
encode_type (assembly, type, &buf);
idx = sigbuffer_add_to_blob_cached (assembly, &buf);
sigbuffer_free (&buf);
return idx;
}
#endif
static guint32
field_encode_signature (MonoDynamicImage *assembly, MonoReflectionFieldBuilder *fb)
{
SigBuffer buf;
guint32 idx;
sigbuffer_init (&buf, 32);
sigbuffer_add_value (&buf, 0x06);
encode_custom_modifiers (assembly, fb->modreq, fb->modopt, &buf);
/* encode custom attributes before the type */
encode_reflection_type (assembly, (MonoReflectionType*)fb->type, &buf);
idx = sigbuffer_add_to_blob_cached (assembly, &buf);
sigbuffer_free (&buf);
return idx;
}
static guint32
encode_constant (MonoDynamicImage *assembly, MonoObject *val, guint32 *ret_type) {
char blob_size [64];
char *b = blob_size;
char *p, *box_val;
char* buf;
guint32 idx = 0, len = 0, dummy = 0;
#ifdef ARM_FPU_FPA
#if G_BYTE_ORDER == G_LITTLE_ENDIAN
guint32 fpa_double [2];
guint32 *fpa_p;
#endif
#endif
p = buf = g_malloc (64);
if (!val) {
*ret_type = MONO_TYPE_CLASS;
len = 4;
box_val = (char*)&dummy;
} else {
box_val = ((char*)val) + sizeof (MonoObject);
*ret_type = val->vtable->klass->byval_arg.type;
}
handle_enum:
switch (*ret_type) {
case MONO_TYPE_BOOLEAN:
case MONO_TYPE_U1:
case MONO_TYPE_I1:
len = 1;
break;
case MONO_TYPE_CHAR:
case MONO_TYPE_U2:
case MONO_TYPE_I2:
len = 2;
break;
case MONO_TYPE_U4:
case MONO_TYPE_I4:
case MONO_TYPE_R4:
len = 4;
break;
case MONO_TYPE_U8:
case MONO_TYPE_I8:
len = 8;
break;
case MONO_TYPE_R8:
len = 8;
#ifdef ARM_FPU_FPA
#if G_BYTE_ORDER == G_LITTLE_ENDIAN
fpa_p = (guint32*)box_val;
fpa_double [0] = fpa_p [1];
fpa_double [1] = fpa_p [0];
box_val = (char*)fpa_double;
#endif
#endif
break;
case MONO_TYPE_VALUETYPE:
if (val->vtable->klass->enumtype) {
*ret_type = mono_class_enum_basetype (val->vtable->klass)->type;
goto handle_enum;
} else
g_error ("we can't encode valuetypes");
case MONO_TYPE_CLASS:
break;
case MONO_TYPE_STRING: {
MonoString *str = (MonoString*)val;
/* there is no signature */
len = str->length * 2;
mono_metadata_encode_value (len, b, &b);
#if G_BYTE_ORDER != G_LITTLE_ENDIAN
{
char *swapped = g_malloc (2 * mono_string_length (str));
const char *p = (const char*)mono_string_chars (str);
swap_with_size (swapped, p, 2, mono_string_length (str));
idx = add_to_blob_cached (assembly, blob_size, b-blob_size, swapped, len);
g_free (swapped);
}
#else
idx = add_to_blob_cached (assembly, blob_size, b-blob_size, (char*)mono_string_chars (str), len);
#endif
g_free (buf);
return idx;
}
case MONO_TYPE_GENERICINST:
*ret_type = val->vtable->klass->generic_class->container_class->byval_arg.type;
goto handle_enum;
default:
g_error ("we don't encode constant type 0x%02x yet", *ret_type);
}
/* there is no signature */
mono_metadata_encode_value (len, b, &b);
#if G_BYTE_ORDER != G_LITTLE_ENDIAN
idx = mono_image_add_stream_data (&assembly->blob, blob_size, b-blob_size);
swap_with_size (blob_size, box_val, len, 1);
mono_image_add_stream_data (&assembly->blob, blob_size, len);
#else
idx = add_to_blob_cached (assembly, blob_size, b-blob_size, box_val, len);
#endif
g_free (buf);
return idx;
}
static guint32
encode_marshal_blob (MonoDynamicImage *assembly, MonoReflectionMarshal *minfo) {
char *str;
SigBuffer buf;
guint32 idx, len;
sigbuffer_init (&buf, 32);
sigbuffer_add_value (&buf, minfo->type);
switch (minfo->type) {
case MONO_NATIVE_BYVALTSTR:
case MONO_NATIVE_BYVALARRAY:
sigbuffer_add_value (&buf, minfo->count);
break;
case MONO_NATIVE_LPARRAY:
if (minfo->eltype || minfo->has_size) {
sigbuffer_add_value (&buf, minfo->eltype);
if (minfo->has_size) {
sigbuffer_add_value (&buf, minfo->param_num != -1? minfo->param_num: 0);
sigbuffer_add_value (&buf, minfo->count != -1? minfo->count: 0);
/* LAMESPEC: ElemMult is undocumented */
sigbuffer_add_value (&buf, minfo->param_num != -1? 1: 0);
}
}
break;
case MONO_NATIVE_SAFEARRAY:
if (minfo->eltype)
sigbuffer_add_value (&buf, minfo->eltype);
break;
case MONO_NATIVE_CUSTOM:
if (minfo->guid) {
str = mono_string_to_utf8 (minfo->guid);
len = strlen (str);
sigbuffer_add_value (&buf, len);
sigbuffer_add_mem (&buf, str, len);
g_free (str);
} else {
sigbuffer_add_value (&buf, 0);
}
/* native type name */
sigbuffer_add_value (&buf, 0);
/* custom marshaler type name */
if (minfo->marshaltype || minfo->marshaltyperef) {
if (minfo->marshaltyperef)
str = type_get_fully_qualified_name (mono_reflection_type_get_handle ((MonoReflectionType*)minfo->marshaltyperef));
else
str = mono_string_to_utf8 (minfo->marshaltype);
len = strlen (str);
sigbuffer_add_value (&buf, len);
sigbuffer_add_mem (&buf, str, len);
g_free (str);
} else {
/* FIXME: Actually a bug, since this field is required. Punting for now ... */
sigbuffer_add_value (&buf, 0);
}
if (minfo->mcookie) {
str = mono_string_to_utf8 (minfo->mcookie);
len = strlen (str);
sigbuffer_add_value (&buf, len);
sigbuffer_add_mem (&buf, str, len);
g_free (str);
} else {
sigbuffer_add_value (&buf, 0);
}
break;
default:
break;
}
idx = sigbuffer_add_to_blob_cached (assembly, &buf);
sigbuffer_free (&buf);
return idx;
}
static void
mono_image_get_field_info (MonoReflectionFieldBuilder *fb, MonoDynamicImage *assembly)
{
MonoDynamicTable *table;
guint32 *values;
/* maybe this fixup should be done in the C# code */
if (fb->attrs & FIELD_ATTRIBUTE_LITERAL)
fb->attrs |= FIELD_ATTRIBUTE_HAS_DEFAULT;
table = &assembly->tables [MONO_TABLE_FIELD];
fb->table_idx = table->next_idx ++;
g_hash_table_insert (assembly->field_to_table_idx, fb->handle, GUINT_TO_POINTER (fb->table_idx));
values = table->values + fb->table_idx * MONO_FIELD_SIZE;
values [MONO_FIELD_NAME] = string_heap_insert_mstring (&assembly->sheap, fb->name);
values [MONO_FIELD_FLAGS] = fb->attrs;
values [MONO_FIELD_SIGNATURE] = field_encode_signature (assembly, fb);
if (fb->offset != -1) {
table = &assembly->tables [MONO_TABLE_FIELDLAYOUT];
table->rows ++;
alloc_table (table, table->rows);
values = table->values + table->rows * MONO_FIELD_LAYOUT_SIZE;
values [MONO_FIELD_LAYOUT_FIELD] = fb->table_idx;
values [MONO_FIELD_LAYOUT_OFFSET] = fb->offset;
}
if (fb->attrs & FIELD_ATTRIBUTE_LITERAL) {
guint32 field_type = 0;
table = &assembly->tables [MONO_TABLE_CONSTANT];
table->rows ++;
alloc_table (table, table->rows);
values = table->values + table->rows * MONO_CONSTANT_SIZE;
values [MONO_CONSTANT_PARENT] = MONO_HASCONSTANT_FIEDDEF | (fb->table_idx << MONO_HASCONSTANT_BITS);
values [MONO_CONSTANT_VALUE] = encode_constant (assembly, fb->def_value, &field_type);
values [MONO_CONSTANT_TYPE] = field_type;
values [MONO_CONSTANT_PADDING] = 0;
}
if (fb->attrs & FIELD_ATTRIBUTE_HAS_FIELD_RVA) {
guint32 rva_idx;
table = &assembly->tables [MONO_TABLE_FIELDRVA];
table->rows ++;
alloc_table (table, table->rows);
values = table->values + table->rows * MONO_FIELD_RVA_SIZE;
values [MONO_FIELD_RVA_FIELD] = fb->table_idx;
/*
* We store it in the code section because it's simpler for now.
*/
if (fb->rva_data) {
if (mono_array_length (fb->rva_data) >= 10)
stream_data_align (&assembly->code);
rva_idx = mono_image_add_stream_data (&assembly->code, mono_array_addr (fb->rva_data, char, 0), mono_array_length (fb->rva_data));
} else
rva_idx = mono_image_add_stream_zero (&assembly->code, mono_class_value_size (fb->handle->parent, NULL));
values [MONO_FIELD_RVA_RVA] = rva_idx + assembly->text_rva;
}
if (fb->marshal_info) {
table = &assembly->tables [MONO_TABLE_FIELDMARSHAL];
table->rows ++;
alloc_table (table, table->rows);
values = table->values + table->rows * MONO_FIELD_MARSHAL_SIZE;
values [MONO_FIELD_MARSHAL_PARENT] = (fb->table_idx << MONO_HAS_FIELD_MARSHAL_BITS) | MONO_HAS_FIELD_MARSHAL_FIELDSREF;
values [MONO_FIELD_MARSHAL_NATIVE_TYPE] = encode_marshal_blob (assembly, fb->marshal_info);
}
}
static guint32
property_encode_signature (MonoDynamicImage *assembly, MonoReflectionPropertyBuilder *fb)
{
SigBuffer buf;
guint32 nparams = 0;
MonoReflectionMethodBuilder *mb = fb->get_method;
MonoReflectionMethodBuilder *smb = fb->set_method;
guint32 idx, i;
if (mb && mb->parameters)
nparams = mono_array_length (mb->parameters);
if (!mb && smb && smb->parameters)
nparams = mono_array_length (smb->parameters) - 1;
sigbuffer_init (&buf, 32);
sigbuffer_add_byte (&buf, 0x08);
sigbuffer_add_value (&buf, nparams);
if (mb) {
encode_reflection_type (assembly, (MonoReflectionType*)mb->rtype, &buf);
for (i = 0; i < nparams; ++i) {
MonoReflectionType *pt = mono_array_get (mb->parameters, MonoReflectionType*, i);
encode_reflection_type (assembly, pt, &buf);
}
} else if (smb && smb->parameters) {
/* the property type is the last param */
encode_reflection_type (assembly, mono_array_get (smb->parameters, MonoReflectionType*, nparams), &buf);
for (i = 0; i < nparams; ++i) {
MonoReflectionType *pt = mono_array_get (smb->parameters, MonoReflectionType*, i);
encode_reflection_type (assembly, pt, &buf);
}
} else {
encode_reflection_type (assembly, (MonoReflectionType*)fb->type, &buf);
}
idx = sigbuffer_add_to_blob_cached (assembly, &buf);
sigbuffer_free (&buf);
return idx;
}
static void
mono_image_get_property_info (MonoReflectionPropertyBuilder *pb, MonoDynamicImage *assembly)
{
MonoDynamicTable *table;
guint32 *values;
guint num_methods = 0;
guint32 semaidx;
/*
* we need to set things in the following tables:
* PROPERTYMAP (info already filled in _get_type_info ())
* PROPERTY (rows already preallocated in _get_type_info ())
* METHOD (method info already done with the generic method code)
* METHODSEMANTICS
*/
table = &assembly->tables [MONO_TABLE_PROPERTY];
pb->table_idx = table->next_idx ++;
values = table->values + pb->table_idx * MONO_PROPERTY_SIZE;
values [MONO_PROPERTY_NAME] = string_heap_insert_mstring (&assembly->sheap, pb->name);
values [MONO_PROPERTY_FLAGS] = pb->attrs;
values [MONO_PROPERTY_TYPE] = property_encode_signature (assembly, pb);
/* FIXME: we still don't handle 'other' methods */
if (pb->get_method) num_methods ++;
if (pb->set_method) num_methods ++;
table = &assembly->tables [MONO_TABLE_METHODSEMANTICS];
table->rows += num_methods;
alloc_table (table, table->rows);
if (pb->get_method) {
semaidx = table->next_idx ++;
values = table->values + semaidx * MONO_METHOD_SEMA_SIZE;
values [MONO_METHOD_SEMA_SEMANTICS] = METHOD_SEMANTIC_GETTER;
values [MONO_METHOD_SEMA_METHOD] = pb->get_method->table_idx;
values [MONO_METHOD_SEMA_ASSOCIATION] = (pb->table_idx << MONO_HAS_SEMANTICS_BITS) | MONO_HAS_SEMANTICS_PROPERTY;
}
if (pb->set_method) {
semaidx = table->next_idx ++;
values = table->values + semaidx * MONO_METHOD_SEMA_SIZE;
values [MONO_METHOD_SEMA_SEMANTICS] = METHOD_SEMANTIC_SETTER;
values [MONO_METHOD_SEMA_METHOD] = pb->set_method->table_idx;
values [MONO_METHOD_SEMA_ASSOCIATION] = (pb->table_idx << MONO_HAS_SEMANTICS_BITS) | MONO_HAS_SEMANTICS_PROPERTY;
}
}
static void
mono_image_get_event_info (MonoReflectionEventBuilder *eb, MonoDynamicImage *assembly)
{
MonoDynamicTable *table;
guint32 *values;
guint num_methods = 0;
guint32 semaidx;
/*
* we need to set things in the following tables:
* EVENTMAP (info already filled in _get_type_info ())
* EVENT (rows already preallocated in _get_type_info ())
* METHOD (method info already done with the generic method code)
* METHODSEMANTICS
*/
table = &assembly->tables [MONO_TABLE_EVENT];
eb->table_idx = table->next_idx ++;
values = table->values + eb->table_idx * MONO_EVENT_SIZE;
values [MONO_EVENT_NAME] = string_heap_insert_mstring (&assembly->sheap, eb->name);
values [MONO_EVENT_FLAGS] = eb->attrs;
values [MONO_EVENT_TYPE] = mono_image_typedef_or_ref (assembly, mono_reflection_type_get_handle (eb->type));
/*
* FIXME: we still don't handle 'other' methods
*/
if (eb->add_method) num_methods ++;
if (eb->remove_method) num_methods ++;
if (eb->raise_method) num_methods ++;
table = &assembly->tables [MONO_TABLE_METHODSEMANTICS];
table->rows += num_methods;
alloc_table (table, table->rows);
if (eb->add_method) {
semaidx = table->next_idx ++;
values = table->values + semaidx * MONO_METHOD_SEMA_SIZE;
values [MONO_METHOD_SEMA_SEMANTICS] = METHOD_SEMANTIC_ADD_ON;
values [MONO_METHOD_SEMA_METHOD] = eb->add_method->table_idx;
values [MONO_METHOD_SEMA_ASSOCIATION] = (eb->table_idx << MONO_HAS_SEMANTICS_BITS) | MONO_HAS_SEMANTICS_EVENT;
}
if (eb->remove_method) {
semaidx = table->next_idx ++;
values = table->values + semaidx * MONO_METHOD_SEMA_SIZE;
values [MONO_METHOD_SEMA_SEMANTICS] = METHOD_SEMANTIC_REMOVE_ON;
values [MONO_METHOD_SEMA_METHOD] = eb->remove_method->table_idx;
values [MONO_METHOD_SEMA_ASSOCIATION] = (eb->table_idx << MONO_HAS_SEMANTICS_BITS) | MONO_HAS_SEMANTICS_EVENT;
}
if (eb->raise_method) {
semaidx = table->next_idx ++;
values = table->values + semaidx * MONO_METHOD_SEMA_SIZE;
values [MONO_METHOD_SEMA_SEMANTICS] = METHOD_SEMANTIC_FIRE;
values [MONO_METHOD_SEMA_METHOD] = eb->raise_method->table_idx;
values [MONO_METHOD_SEMA_ASSOCIATION] = (eb->table_idx << MONO_HAS_SEMANTICS_BITS) | MONO_HAS_SEMANTICS_EVENT;
}
}
static void
encode_constraints (MonoReflectionGenericParam *gparam, guint32 owner, MonoDynamicImage *assembly)
{
MonoDynamicTable *table;
guint32 num_constraints, i;
guint32 *values;
guint32 table_idx;
table = &assembly->tables [MONO_TABLE_GENERICPARAMCONSTRAINT];
num_constraints = gparam->iface_constraints ?
mono_array_length (gparam->iface_constraints) : 0;
table->rows += num_constraints;
if (gparam->base_type)
table->rows++;
alloc_table (table, table->rows);
if (gparam->base_type) {
table_idx = table->next_idx ++;
values = table->values + table_idx * MONO_GENPARCONSTRAINT_SIZE;
values [MONO_GENPARCONSTRAINT_GENERICPAR] = owner;
values [MONO_GENPARCONSTRAINT_CONSTRAINT] = mono_image_typedef_or_ref (
assembly, mono_reflection_type_get_handle (gparam->base_type));
}
for (i = 0; i < num_constraints; i++) {
MonoReflectionType *constraint = mono_array_get (
gparam->iface_constraints, gpointer, i);
table_idx = table->next_idx ++;
values = table->values + table_idx * MONO_GENPARCONSTRAINT_SIZE;
values [MONO_GENPARCONSTRAINT_GENERICPAR] = owner;
values [MONO_GENPARCONSTRAINT_CONSTRAINT] = mono_image_typedef_or_ref (
assembly, mono_reflection_type_get_handle (constraint));
}
}
static void
mono_image_get_generic_param_info (MonoReflectionGenericParam *gparam, guint32 owner, MonoDynamicImage *assembly)
{
GenericParamTableEntry *entry;
/*
* The GenericParam table must be sorted according to the `owner' field.
* We need to do this sorting prior to writing the GenericParamConstraint
* table, since we have to use the final GenericParam table indices there
* and they must also be sorted.
*/
entry = g_new0 (GenericParamTableEntry, 1);
entry->owner = owner;
/* FIXME: track where gen_params should be freed and remove the GC root as well */
MOVING_GC_REGISTER (&entry->gparam);
entry->gparam = gparam;
g_ptr_array_add (assembly->gen_params, entry);
}
static void
write_generic_param_entry (MonoDynamicImage *assembly, GenericParamTableEntry *entry)
{
MonoDynamicTable *table;
MonoGenericParam *param;
guint32 *values;
guint32 table_idx;
table = &assembly->tables [MONO_TABLE_GENERICPARAM];
table_idx = table->next_idx ++;
values = table->values + table_idx * MONO_GENERICPARAM_SIZE;
param = mono_reflection_type_get_handle ((MonoReflectionType*)entry->gparam)->data.generic_param;
values [MONO_GENERICPARAM_OWNER] = entry->owner;
values [MONO_GENERICPARAM_FLAGS] = entry->gparam->attrs;
values [MONO_GENERICPARAM_NUMBER] = mono_generic_param_num (param);
values [MONO_GENERICPARAM_NAME] = string_heap_insert (&assembly->sheap, mono_generic_param_info (param)->name);
mono_image_add_cattrs (assembly, table_idx, MONO_CUSTOM_ATTR_GENERICPAR, entry->gparam->cattrs);
encode_constraints (entry->gparam, table_idx, assembly);
}
static guint32
resolution_scope_from_image (MonoDynamicImage *assembly, MonoImage *image)
{
MonoDynamicTable *table;
guint32 token;
guint32 *values;
guint32 cols [MONO_ASSEMBLY_SIZE];
const char *pubkey;
guint32 publen;
if ((token = GPOINTER_TO_UINT (g_hash_table_lookup (assembly->handleref, image))))
return token;
if (image->assembly->dynamic && (image->assembly == assembly->image.assembly)) {
table = &assembly->tables [MONO_TABLE_MODULEREF];
token = table->next_idx ++;
table->rows ++;
alloc_table (table, table->rows);
values = table->values + token * MONO_MODULEREF_SIZE;
values [MONO_MODULEREF_NAME] = string_heap_insert (&assembly->sheap, image->module_name);
token <<= MONO_RESOLTION_SCOPE_BITS;
token |= MONO_RESOLTION_SCOPE_MODULEREF;
g_hash_table_insert (assembly->handleref, image, GUINT_TO_POINTER (token));
return token;
}
if (image->assembly->dynamic)
/* FIXME: */
memset (cols, 0, sizeof (cols));
else {
/* image->assembly->image is the manifest module */
image = image->assembly->image;
mono_metadata_decode_row (&image->tables [MONO_TABLE_ASSEMBLY], 0, cols, MONO_ASSEMBLY_SIZE);
}
table = &assembly->tables [MONO_TABLE_ASSEMBLYREF];
token = table->next_idx ++;
table->rows ++;
alloc_table (table, table->rows);
values = table->values + token * MONO_ASSEMBLYREF_SIZE;
values [MONO_ASSEMBLYREF_NAME] = string_heap_insert (&assembly->sheap, image->assembly_name);
values [MONO_ASSEMBLYREF_MAJOR_VERSION] = cols [MONO_ASSEMBLY_MAJOR_VERSION];
values [MONO_ASSEMBLYREF_MINOR_VERSION] = cols [MONO_ASSEMBLY_MINOR_VERSION];
values [MONO_ASSEMBLYREF_BUILD_NUMBER] = cols [MONO_ASSEMBLY_BUILD_NUMBER];
values [MONO_ASSEMBLYREF_REV_NUMBER] = cols [MONO_ASSEMBLY_REV_NUMBER];
values [MONO_ASSEMBLYREF_FLAGS] = 0;
values [MONO_ASSEMBLYREF_CULTURE] = 0;
values [MONO_ASSEMBLYREF_HASH_VALUE] = 0;
if (strcmp ("", image->assembly->aname.culture)) {
values [MONO_ASSEMBLYREF_CULTURE] = string_heap_insert (&assembly->sheap,
image->assembly->aname.culture);
}
if ((pubkey = mono_image_get_public_key (image, &publen))) {
guchar pubtoken [9];
pubtoken [0] = 8;
mono_digest_get_public_token (pubtoken + 1, (guchar*)pubkey, publen);
values [MONO_ASSEMBLYREF_PUBLIC_KEY] = mono_image_add_stream_data (&assembly->blob, (char*)pubtoken, 9);
} else {
values [MONO_ASSEMBLYREF_PUBLIC_KEY] = 0;
}
token <<= MONO_RESOLTION_SCOPE_BITS;
token |= MONO_RESOLTION_SCOPE_ASSEMBLYREF;
g_hash_table_insert (assembly->handleref, image, GUINT_TO_POINTER (token));
return token;
}
static guint32
create_typespec (MonoDynamicImage *assembly, MonoType *type)
{
MonoDynamicTable *table;
guint32 *values;
guint32 token;
SigBuffer buf;
if ((token = GPOINTER_TO_UINT (g_hash_table_lookup (assembly->typespec, type))))
return token;
sigbuffer_init (&buf, 32);
switch (type->type) {
case MONO_TYPE_FNPTR:
case MONO_TYPE_PTR:
case MONO_TYPE_SZARRAY:
case MONO_TYPE_ARRAY:
case MONO_TYPE_VAR:
case MONO_TYPE_MVAR:
case MONO_TYPE_GENERICINST:
encode_type (assembly, type, &buf);
break;
case MONO_TYPE_CLASS:
case MONO_TYPE_VALUETYPE: {
MonoClass *k = mono_class_from_mono_type (type);
if (!k || !k->generic_container) {
sigbuffer_free (&buf);
return 0;
}
encode_type (assembly, type, &buf);
break;
}
default:
sigbuffer_free (&buf);
return 0;
}
table = &assembly->tables [MONO_TABLE_TYPESPEC];
if (assembly->save) {
token = sigbuffer_add_to_blob_cached (assembly, &buf);
alloc_table (table, table->rows + 1);
values = table->values + table->next_idx * MONO_TYPESPEC_SIZE;
values [MONO_TYPESPEC_SIGNATURE] = token;
}
sigbuffer_free (&buf);
token = MONO_TYPEDEFORREF_TYPESPEC | (table->next_idx << MONO_TYPEDEFORREF_BITS);
g_hash_table_insert (assembly->typespec, type, GUINT_TO_POINTER(token));
table->next_idx ++;
return token;
}
static guint32
mono_image_typedef_or_ref_full (MonoDynamicImage *assembly, MonoType *type, gboolean try_typespec)
{
MonoDynamicTable *table;
guint32 *values;
guint32 token, scope, enclosing;
MonoClass *klass;
/* if the type requires a typespec, we must try that first*/
if (try_typespec && (token = create_typespec (assembly, type)))
return token;
token = GPOINTER_TO_UINT (g_hash_table_lookup (assembly->typeref, type));
if (token)
return token;
klass = mono_class_from_mono_type (type);
if (!klass)
klass = mono_class_from_mono_type (type);
/*
* If it's in the same module and not a generic type parameter:
*/
if ((klass->image == &assembly->image) && (type->type != MONO_TYPE_VAR) &&
(type->type != MONO_TYPE_MVAR)) {
MonoReflectionTypeBuilder *tb = klass->reflection_info;
token = MONO_TYPEDEFORREF_TYPEDEF | (tb->table_idx << MONO_TYPEDEFORREF_BITS);
mono_g_hash_table_insert (assembly->tokens, GUINT_TO_POINTER (token), klass->reflection_info);
return token;
}
if (klass->nested_in) {
enclosing = mono_image_typedef_or_ref_full (assembly, &klass->nested_in->byval_arg, FALSE);
/* get the typeref idx of the enclosing type */
enclosing >>= MONO_TYPEDEFORREF_BITS;
scope = (enclosing << MONO_RESOLTION_SCOPE_BITS) | MONO_RESOLTION_SCOPE_TYPEREF;
} else {
scope = resolution_scope_from_image (assembly, klass->image);
}
table = &assembly->tables [MONO_TABLE_TYPEREF];
if (assembly->save) {
alloc_table (table, table->rows + 1);
values = table->values + table->next_idx * MONO_TYPEREF_SIZE;
values [MONO_TYPEREF_SCOPE] = scope;
values [MONO_TYPEREF_NAME] = string_heap_insert (&assembly->sheap, klass->name);
values [MONO_TYPEREF_NAMESPACE] = string_heap_insert (&assembly->sheap, klass->name_space);
}
token = MONO_TYPEDEFORREF_TYPEREF | (table->next_idx << MONO_TYPEDEFORREF_BITS); /* typeref */
g_hash_table_insert (assembly->typeref, type, GUINT_TO_POINTER(token));
table->next_idx ++;
mono_g_hash_table_insert (assembly->tokens, GUINT_TO_POINTER (token), klass->reflection_info);
return token;
}
/*
* Despite the name, we handle also TypeSpec (with the above helper).
*/
static guint32
mono_image_typedef_or_ref (MonoDynamicImage *assembly, MonoType *type)
{
return mono_image_typedef_or_ref_full (assembly, type, TRUE);
}
#ifndef DISABLE_REFLECTION_EMIT
/*
* Insert a memberef row into the metadata: the token that point to the memberref
* is returned. Caching is done in the caller (mono_image_get_methodref_token() or
* mono_image_get_fieldref_token()).
* The sig param is an index to an already built signature.
*/
static guint32
mono_image_get_memberref_token (MonoDynamicImage *assembly, MonoType *type, const char *name, guint32 sig)
{
MonoDynamicTable *table;
guint32 *values;
guint32 token, pclass;
guint32 parent;
parent = mono_image_typedef_or_ref (assembly, type);
switch (parent & MONO_TYPEDEFORREF_MASK) {
case MONO_TYPEDEFORREF_TYPEREF:
pclass = MONO_MEMBERREF_PARENT_TYPEREF;
break;
case MONO_TYPEDEFORREF_TYPESPEC:
pclass = MONO_MEMBERREF_PARENT_TYPESPEC;
break;
case MONO_TYPEDEFORREF_TYPEDEF:
pclass = MONO_MEMBERREF_PARENT_TYPEDEF;
break;
default:
g_warning ("unknown typeref or def token 0x%08x for %s", parent, name);
return 0;
}
/* extract the index */
parent >>= MONO_TYPEDEFORREF_BITS;
table = &assembly->tables [MONO_TABLE_MEMBERREF];
if (assembly->save) {
alloc_table (table, table->rows + 1);
values = table->values + table->next_idx * MONO_MEMBERREF_SIZE;
values [MONO_MEMBERREF_CLASS] = pclass | (parent << MONO_MEMBERREF_PARENT_BITS);
values [MONO_MEMBERREF_NAME] = string_heap_insert (&assembly->sheap, name);
values [MONO_MEMBERREF_SIGNATURE] = sig;
}
token = MONO_TOKEN_MEMBER_REF | table->next_idx;
table->next_idx ++;
return token;
}
static guint32
mono_image_get_methodref_token (MonoDynamicImage *assembly, MonoMethod *method, gboolean create_typespec)
{
guint32 token;
MonoMethodSignature *sig;
create_typespec = create_typespec && method->is_generic && method->klass->image != &assembly->image;
if (create_typespec) {
token = GPOINTER_TO_UINT (g_hash_table_lookup (assembly->handleref, GUINT_TO_POINTER (GPOINTER_TO_UINT (method) + 1)));
if (token)
return token;
}
token = GPOINTER_TO_UINT (g_hash_table_lookup (assembly->handleref, method));
if (token && !create_typespec)
return token;
g_assert (!method->is_inflated);
if (!token) {
/*
* A methodref signature can't contain an unmanaged calling convention.
*/
sig = mono_metadata_signature_dup (mono_method_signature (method));
if ((sig->call_convention != MONO_CALL_DEFAULT) && (sig->call_convention != MONO_CALL_VARARG))
sig->call_convention = MONO_CALL_DEFAULT;
token = mono_image_get_memberref_token (assembly, &method->klass->byval_arg,
method->name, method_encode_signature (assembly, sig));
g_free (sig);
g_hash_table_insert (assembly->handleref, method, GUINT_TO_POINTER(token));
}
if (create_typespec) {
MonoDynamicTable *table = &assembly->tables [MONO_TABLE_METHODSPEC];
g_assert (mono_metadata_token_table (token) == MONO_TABLE_MEMBERREF);
token = (mono_metadata_token_index (token) << MONO_METHODDEFORREF_BITS) | MONO_METHODDEFORREF_METHODREF;
if (assembly->save) {
guint32 *values;
alloc_table (table, table->rows + 1);
values = table->values + table->next_idx * MONO_METHODSPEC_SIZE;
values [MONO_METHODSPEC_METHOD] = token;
values [MONO_METHODSPEC_SIGNATURE] = encode_generic_method_sig (assembly, &mono_method_get_generic_container (method)->context);
}
token = MONO_TOKEN_METHOD_SPEC | table->next_idx;
table->next_idx ++;
/*methodspec and memberef tokens are diferent, */
g_hash_table_insert (assembly->handleref, GUINT_TO_POINTER (GPOINTER_TO_UINT (method) + 1), GUINT_TO_POINTER (token));
return token;
}
return token;
}
static guint32
mono_image_get_methodref_token_for_methodbuilder (MonoDynamicImage *assembly, MonoReflectionMethodBuilder *method)
{
guint32 token;
ReflectionMethodBuilder rmb;
char *name;
token = GPOINTER_TO_UINT (g_hash_table_lookup (assembly->handleref, method));
if (token)
return token;
name = mono_string_to_utf8 (method->name);
reflection_methodbuilder_from_method_builder (&rmb, method);
/*
* A methodref signature can't contain an unmanaged calling convention.
* Since some flags are encoded as part of call_conv, we need to check against it.
*/
if ((rmb.call_conv & ~0x60) != MONO_CALL_DEFAULT && (rmb.call_conv & ~0x60) != MONO_CALL_VARARG)
rmb.call_conv = (rmb.call_conv & 0x60) | MONO_CALL_DEFAULT;
token = mono_image_get_memberref_token (assembly, mono_reflection_type_get_handle ((MonoReflectionType*)rmb.type),
name, method_builder_encode_signature (assembly, &rmb));
g_free (name);
g_hash_table_insert (assembly->handleref, method, GUINT_TO_POINTER(token));
return token;
}
static guint32
mono_image_get_varargs_method_token (MonoDynamicImage *assembly, guint32 original,
const gchar *name, guint32 sig)
{
MonoDynamicTable *table;
guint32 token;
guint32 *values;
table = &assembly->tables [MONO_TABLE_MEMBERREF];
if (assembly->save) {
alloc_table (table, table->rows + 1);
values = table->values + table->next_idx * MONO_MEMBERREF_SIZE;
values [MONO_MEMBERREF_CLASS] = original;
values [MONO_MEMBERREF_NAME] = string_heap_insert (&assembly->sheap, name);
values [MONO_MEMBERREF_SIGNATURE] = sig;
}
token = MONO_TOKEN_MEMBER_REF | table->next_idx;
table->next_idx ++;
return token;
}
static guint32
encode_generic_method_definition_sig (MonoDynamicImage *assembly, MonoReflectionMethodBuilder *mb)
{
SigBuffer buf;
int i;
guint32 nparams = mono_array_length (mb->generic_params);
guint32 idx;
if (!assembly->save)
return 0;
sigbuffer_init (&buf, 32);
sigbuffer_add_value (&buf, 0xa);
sigbuffer_add_value (&buf, nparams);
for (i = 0; i < nparams; i++) {
sigbuffer_add_value (&buf, MONO_TYPE_MVAR);
sigbuffer_add_value (&buf, i);
}
idx = sigbuffer_add_to_blob_cached (assembly, &buf);
sigbuffer_free (&buf);
return idx;
}
static guint32
mono_image_get_methodspec_token_for_generic_method_definition (MonoDynamicImage *assembly, MonoReflectionMethodBuilder *mb)
{
MonoDynamicTable *table;
guint32 *values;
guint32 token, mtoken = 0;
token = GPOINTER_TO_UINT (mono_g_hash_table_lookup (assembly->methodspec, mb));
if (token)
return token;
table = &assembly->tables [MONO_TABLE_METHODSPEC];
mtoken = mono_image_get_methodref_token_for_methodbuilder (assembly, mb);
switch (mono_metadata_token_table (mtoken)) {
case MONO_TABLE_MEMBERREF:
mtoken = (mono_metadata_token_index (mtoken) << MONO_METHODDEFORREF_BITS) | MONO_METHODDEFORREF_METHODREF;
break;
case MONO_TABLE_METHOD:
mtoken = (mono_metadata_token_index (mtoken) << MONO_METHODDEFORREF_BITS) | MONO_METHODDEFORREF_METHODDEF;
break;
default:
g_assert_not_reached ();
}
if (assembly->save) {
alloc_table (table, table->rows + 1);
values = table->values + table->next_idx * MONO_METHODSPEC_SIZE;
values [MONO_METHODSPEC_METHOD] = mtoken;
values [MONO_METHODSPEC_SIGNATURE] = encode_generic_method_definition_sig (assembly, mb);
}
token = MONO_TOKEN_METHOD_SPEC | table->next_idx;
table->next_idx ++;
mono_g_hash_table_insert (assembly->methodspec, mb, GUINT_TO_POINTER(token));
return token;
}
static guint32
mono_image_get_methodbuilder_token (MonoDynamicImage *assembly, MonoReflectionMethodBuilder *mb, gboolean create_methodspec)
{
guint32 token;
if (mb->generic_params && create_methodspec)
return mono_image_get_methodspec_token_for_generic_method_definition (assembly, mb);
token = GPOINTER_TO_UINT (g_hash_table_lookup (assembly->handleref, mb));
if (token)
return token;
token = mono_image_get_methodref_token_for_methodbuilder (assembly, mb);
g_hash_table_insert (assembly->handleref, mb, GUINT_TO_POINTER(token));
return token;
}
static guint32
mono_image_get_ctorbuilder_token (MonoDynamicImage *assembly, MonoReflectionCtorBuilder *mb)
{
guint32 token;
ReflectionMethodBuilder rmb;
char *name;
token = GPOINTER_TO_UINT (g_hash_table_lookup (assembly->handleref, mb));
if (token)
return token;
reflection_methodbuilder_from_ctor_builder (&rmb, mb);
name = mono_string_to_utf8 (rmb.name);
token = mono_image_get_memberref_token (assembly, mono_reflection_type_get_handle ((MonoReflectionType*)rmb.type),
name, method_builder_encode_signature (assembly, &rmb));
g_free (name);
g_hash_table_insert (assembly->handleref, mb, GUINT_TO_POINTER(token));
return token;
}
#endif
static gboolean
is_field_on_inst (MonoClassField *field)
{
return (field->parent->generic_class && field->parent->generic_class->is_dynamic && ((MonoDynamicGenericClass*)field->parent->generic_class)->fields);
}
/*
* If FIELD is a field of a MonoDynamicGenericClass, return its non-inflated type.
*/
static MonoType*
get_field_on_inst_generic_type (MonoClassField *field)
{
MonoDynamicGenericClass *dgclass;
int field_index;
g_assert (is_field_on_inst (field));
dgclass = (MonoDynamicGenericClass*)field->parent->generic_class;
field_index = field - dgclass->fields;
g_assert (field_index >= 0 && field_index < dgclass->count_fields);
return dgclass->field_generic_types [field_index];
}
#ifndef DISABLE_REFLECTION_EMIT
static guint32
mono_image_get_fieldref_token (MonoDynamicImage *assembly, MonoReflectionField *f)
{
MonoType *type;
guint32 token;
MonoClassField *field;
token = GPOINTER_TO_UINT (g_hash_table_lookup (assembly->handleref, f));
if (token)
return token;
g_assert (f->field->parent);
field = f->field;
if (field->parent->generic_class && field->parent->generic_class->container_class && field->parent->generic_class->container_class->fields) {
int index = field - field->parent->fields;
type = field->parent->generic_class->container_class->fields [index].type;
} else {
if (is_field_on_inst (f->field))
type = get_field_on_inst_generic_type (f->field);
else
type = f->field->type;
}
token = mono_image_get_memberref_token (assembly, &f->field->parent->byval_arg,
mono_field_get_name (f->field),
fieldref_encode_signature (assembly, field->parent->image, type));
g_hash_table_insert (assembly->handleref, f, GUINT_TO_POINTER(token));
return token;
}
static guint32
mono_image_get_field_on_inst_token (MonoDynamicImage *assembly, MonoReflectionFieldOnTypeBuilderInst *f)
{
guint32 token;
MonoClass *klass;
MonoGenericClass *gclass;
MonoDynamicGenericClass *dgclass;
MonoReflectionFieldBuilder *fb = f->fb;
MonoType *type;
char *name;
token = GPOINTER_TO_UINT (g_hash_table_lookup (assembly->handleref, f));
if (token)
return token;
type = mono_reflection_type_get_handle ((MonoReflectionType*)f->inst);
klass = mono_class_from_mono_type (type);
gclass = type->data.generic_class;
g_assert (gclass->is_dynamic);
dgclass = (MonoDynamicGenericClass *) gclass;
name = mono_string_to_utf8 (fb->name);
token = mono_image_get_memberref_token (assembly, &klass->byval_arg, name,
field_encode_signature (assembly, fb));
g_free (name);
g_hash_table_insert (assembly->handleref, f, GUINT_TO_POINTER (token));
return token;
}
static guint32
mono_image_get_ctor_on_inst_token (MonoDynamicImage *assembly, MonoReflectionCtorOnTypeBuilderInst *c, gboolean create_methodspec)
{
guint32 sig, token;
MonoClass *klass;
MonoGenericClass *gclass;
MonoDynamicGenericClass *dgclass;
MonoReflectionCtorBuilder *cb = c->cb;
ReflectionMethodBuilder rmb;
MonoType *type;
char *name;
/* A ctor cannot be a generic method, so we can ignore create_methodspec */
token = GPOINTER_TO_UINT (g_hash_table_lookup (assembly->handleref, c));
if (token)
return token;
type = mono_reflection_type_get_handle ((MonoReflectionType*)c->inst);
klass = mono_class_from_mono_type (type);
gclass = type->data.generic_class;
g_assert (gclass->is_dynamic);
dgclass = (MonoDynamicGenericClass *) gclass;
reflection_methodbuilder_from_ctor_builder (&rmb, cb);
name = mono_string_to_utf8 (rmb.name);
sig = method_builder_encode_signature (assembly, &rmb);
token = mono_image_get_memberref_token (assembly, &klass->byval_arg, name, sig);
g_free (name);
g_hash_table_insert (assembly->handleref, c, GUINT_TO_POINTER (token));
return token;
}
static MonoMethod*
mono_reflection_method_on_tb_inst_get_handle (MonoReflectionMethodOnTypeBuilderInst *m)
{
MonoClass *klass;
MonoGenericContext tmp_context;
MonoType **type_argv;
MonoGenericInst *ginst;
MonoMethod *method, *inflated;
int count, i;
method = inflate_method (m->inst, (MonoObject*)m->mb);
klass = method->klass;
if (m->method_args == NULL)
return method;
if (method->is_inflated)
method = ((MonoMethodInflated *) method)->declaring;
count = mono_array_length (m->method_args);
type_argv = g_new0 (MonoType *, count);
for (i = 0; i < count; i++) {
MonoReflectionType *garg = mono_array_get (m->method_args, gpointer, i);
type_argv [i] = mono_reflection_type_get_handle (garg);
}
ginst = mono_metadata_get_generic_inst (count, type_argv);
g_free (type_argv);
tmp_context.class_inst = klass->generic_class ? klass->generic_class->context.class_inst : NULL;
tmp_context.method_inst = ginst;
inflated = mono_class_inflate_generic_method (method, &tmp_context);
return inflated;
}
static guint32
mono_image_get_method_on_inst_token (MonoDynamicImage *assembly, MonoReflectionMethodOnTypeBuilderInst *m, gboolean create_methodspec)
{
guint32 sig, token;
MonoClass *klass;
MonoGenericClass *gclass;
MonoReflectionMethodBuilder *mb = m->mb;
ReflectionMethodBuilder rmb;
MonoType *type;
char *name;
if (m->method_args) {
MonoMethod *inflated;
inflated = mono_reflection_method_on_tb_inst_get_handle (m);
if (create_methodspec)
token = mono_image_get_methodspec_token (assembly, inflated);
else
token = mono_image_get_inflated_method_token (assembly, inflated);
return token;
}
token = GPOINTER_TO_UINT (g_hash_table_lookup (assembly->handleref, m));
if (token)
return token;
type = mono_reflection_type_get_handle ((MonoReflectionType*)m->inst);
klass = mono_class_from_mono_type (type);
gclass = type->data.generic_class;
g_assert (gclass->is_dynamic);
reflection_methodbuilder_from_method_builder (&rmb, mb);
name = mono_string_to_utf8 (rmb.name);
sig = method_builder_encode_signature (assembly, &rmb);
token = mono_image_get_memberref_token (assembly, &klass->byval_arg, name, sig);
g_free (name);
g_hash_table_insert (assembly->handleref, m, GUINT_TO_POINTER (token));
return token;
}
static guint32
encode_generic_method_sig (MonoDynamicImage *assembly, MonoGenericContext *context)
{
SigBuffer buf;
int i;
guint32 nparams = context->method_inst->type_argc;
guint32 idx;
if (!assembly->save)
return 0;
sigbuffer_init (&buf, 32);
/*
* FIXME: vararg, explicit_this, differenc call_conv values...
*/
sigbuffer_add_value (&buf, 0xa); /* FIXME FIXME FIXME */
sigbuffer_add_value (&buf, nparams);
for (i = 0; i < nparams; i++)
encode_type (assembly, context->method_inst->type_argv [i], &buf);
idx = sigbuffer_add_to_blob_cached (assembly, &buf);
sigbuffer_free (&buf);
return idx;
}
static guint32
method_encode_methodspec (MonoDynamicImage *assembly, MonoMethod *method)
{
MonoDynamicTable *table;
guint32 *values;
guint32 token, mtoken = 0, sig;
MonoMethodInflated *imethod;
MonoMethod *declaring;
table = &assembly->tables [MONO_TABLE_METHODSPEC];
g_assert (method->is_inflated);
imethod = (MonoMethodInflated *) method;
declaring = imethod->declaring;
sig = method_encode_signature (assembly, mono_method_signature (declaring));
mtoken = mono_image_get_memberref_token (assembly, &method->klass->byval_arg, declaring->name, sig);
if (!mono_method_signature (declaring)->generic_param_count)
return mtoken;
switch (mono_metadata_token_table (mtoken)) {
case MONO_TABLE_MEMBERREF:
mtoken = (mono_metadata_token_index (mtoken) << MONO_METHODDEFORREF_BITS) | MONO_METHODDEFORREF_METHODREF;
break;
case MONO_TABLE_METHOD:
mtoken = (mono_metadata_token_index (mtoken) << MONO_METHODDEFORREF_BITS) | MONO_METHODDEFORREF_METHODDEF;
break;
default:
g_assert_not_reached ();
}
sig = encode_generic_method_sig (assembly, mono_method_get_context (method));
if (assembly->save) {
alloc_table (table, table->rows + 1);
values = table->values + table->next_idx * MONO_METHODSPEC_SIZE;
values [MONO_METHODSPEC_METHOD] = mtoken;
values [MONO_METHODSPEC_SIGNATURE] = sig;
}
token = MONO_TOKEN_METHOD_SPEC | table->next_idx;
table->next_idx ++;
return token;
}
static guint32
mono_image_get_methodspec_token (MonoDynamicImage *assembly, MonoMethod *method)
{
MonoMethodInflated *imethod;
guint32 token;
token = GPOINTER_TO_UINT (g_hash_table_lookup (assembly->handleref, method));
if (token)
return token;
g_assert (method->is_inflated);
imethod = (MonoMethodInflated *) method;
if (mono_method_signature (imethod->declaring)->generic_param_count) {
token = method_encode_methodspec (assembly, method);
} else {
guint32 sig = method_encode_signature (
assembly, mono_method_signature (imethod->declaring));
token = mono_image_get_memberref_token (
assembly, &method->klass->byval_arg, method->name, sig);
}
g_hash_table_insert (assembly->handleref, method, GUINT_TO_POINTER(token));
return token;
}
static guint32
mono_image_get_inflated_method_token (MonoDynamicImage *assembly, MonoMethod *m)
{
MonoMethodInflated *imethod = (MonoMethodInflated *) m;
guint32 sig, token;
sig = method_encode_signature (assembly, mono_method_signature (imethod->declaring));
token = mono_image_get_memberref_token (
assembly, &m->klass->byval_arg, m->name, sig);
return token;
}
static guint32
create_generic_typespec (MonoDynamicImage *assembly, MonoReflectionTypeBuilder *tb)
{
MonoDynamicTable *table;
MonoClass *klass;
MonoType *type;
guint32 *values;
guint32 token;
SigBuffer buf;
int count, i;
/*
* We're creating a TypeSpec for the TypeBuilder of a generic type declaration,
* ie. what we'd normally use as the generic type in a TypeSpec signature.
* Because of this, we must not insert it into the `typeref' hash table.
*/
type = mono_reflection_type_get_handle ((MonoReflectionType*)tb);
token = GPOINTER_TO_UINT (g_hash_table_lookup (assembly->typespec, type));
if (token)
return token;
sigbuffer_init (&buf, 32);
g_assert (tb->generic_params);
klass = mono_class_from_mono_type (type);
if (tb->generic_container)
mono_reflection_create_generic_class (tb);
sigbuffer_add_value (&buf, MONO_TYPE_GENERICINST);
g_assert (klass->generic_container);
sigbuffer_add_value (&buf, klass->byval_arg.type);
sigbuffer_add_value (&buf, mono_image_typedef_or_ref_full (assembly, &klass->byval_arg, FALSE));
count = mono_array_length (tb->generic_params);
sigbuffer_add_value (&buf, count);
for (i = 0; i < count; i++) {
MonoReflectionGenericParam *gparam;
gparam = mono_array_get (tb->generic_params, MonoReflectionGenericParam *, i);
encode_type (assembly, mono_reflection_type_get_handle ((MonoReflectionType*)gparam), &buf);
}
table = &assembly->tables [MONO_TABLE_TYPESPEC];
if (assembly->save) {
token = sigbuffer_add_to_blob_cached (assembly, &buf);
alloc_table (table, table->rows + 1);
values = table->values + table->next_idx * MONO_TYPESPEC_SIZE;
values [MONO_TYPESPEC_SIGNATURE] = token;
}
sigbuffer_free (&buf);
token = MONO_TYPEDEFORREF_TYPESPEC | (table->next_idx << MONO_TYPEDEFORREF_BITS);
g_hash_table_insert (assembly->typespec, type, GUINT_TO_POINTER(token));
table->next_idx ++;
return token;
}
/*
* Return a copy of TYPE, adding the custom modifiers in MODREQ and MODOPT.
*/
static MonoType*
add_custom_modifiers (MonoDynamicImage *assembly, MonoType *type, MonoArray *modreq, MonoArray *modopt)
{
int i, count, len, pos;
MonoType *t;
count = 0;
if (modreq)
count += mono_array_length (modreq);
if (modopt)
count += mono_array_length (modopt);
if (count == 0)
return mono_metadata_type_dup (NULL, type);
len = MONO_SIZEOF_TYPE + ((gint32)count) * sizeof (MonoCustomMod);
t = g_malloc (len);
memcpy (t, type, MONO_SIZEOF_TYPE);
t->num_mods = count;
pos = 0;
if (modreq) {
for (i = 0; i < mono_array_length (modreq); ++i) {
MonoType *mod = mono_type_array_get_and_resolve (modreq, i);
t->modifiers [pos].required = 1;
t->modifiers [pos].token = mono_image_typedef_or_ref (assembly, mod);
pos ++;
}
}
if (modopt) {
for (i = 0; i < mono_array_length (modopt); ++i) {
MonoType *mod = mono_type_array_get_and_resolve (modopt, i);
t->modifiers [pos].required = 0;
t->modifiers [pos].token = mono_image_typedef_or_ref (assembly, mod);
pos ++;
}
}
return t;
}
static guint32
mono_image_get_generic_field_token (MonoDynamicImage *assembly, MonoReflectionFieldBuilder *fb)
{
MonoDynamicTable *table;
MonoClass *klass;
MonoType *custom = NULL;
guint32 *values;
guint32 token, pclass, parent, sig;
gchar *name;
token = GPOINTER_TO_UINT (g_hash_table_lookup (assembly->handleref, fb));
if (token)
return token;
klass = mono_class_from_mono_type (mono_reflection_type_get_handle (fb->typeb));
name = mono_string_to_utf8 (fb->name);
/* fb->type does not include the custom modifiers */
/* FIXME: We should do this in one place when a fieldbuilder is created */
if (fb->modreq || fb->modopt) {
custom = add_custom_modifiers (assembly, mono_reflection_type_get_handle ((MonoReflectionType*)fb->type), fb->modreq, fb->modopt);
sig = fieldref_encode_signature (assembly, NULL, custom);
g_free (custom);
} else {
sig = fieldref_encode_signature (assembly, NULL, mono_reflection_type_get_handle ((MonoReflectionType*)fb->type));
}
parent = create_generic_typespec (assembly, (MonoReflectionTypeBuilder *) fb->typeb);
g_assert ((parent & MONO_TYPEDEFORREF_MASK) == MONO_TYPEDEFORREF_TYPESPEC);
pclass = MONO_MEMBERREF_PARENT_TYPESPEC;
parent >>= MONO_TYPEDEFORREF_BITS;
table = &assembly->tables [MONO_TABLE_MEMBERREF];
if (assembly->save) {
alloc_table (table, table->rows + 1);
values = table->values + table->next_idx * MONO_MEMBERREF_SIZE;
values [MONO_MEMBERREF_CLASS] = pclass | (parent << MONO_MEMBERREF_PARENT_BITS);
values [MONO_MEMBERREF_NAME] = string_heap_insert (&assembly->sheap, name);
values [MONO_MEMBERREF_SIGNATURE] = sig;
}
token = MONO_TOKEN_MEMBER_REF | table->next_idx;
table->next_idx ++;
g_hash_table_insert (assembly->handleref, fb, GUINT_TO_POINTER(token));
g_free (name);
return token;
}
static guint32
mono_reflection_encode_sighelper (MonoDynamicImage *assembly, MonoReflectionSigHelper *helper)
{
SigBuffer buf;
guint32 nargs;
guint32 size;
guint32 i, idx;
if (!assembly->save)
return 0;
/* FIXME: this means SignatureHelper.SignatureHelpType.HELPER_METHOD */
g_assert (helper->type == 2);
if (helper->arguments)
nargs = mono_array_length (helper->arguments);
else
nargs = 0;
size = 10 + (nargs * 10);
sigbuffer_init (&buf, 32);
/* Encode calling convention */
/* Change Any to Standard */
if ((helper->call_conv & 0x03) == 0x03)
helper->call_conv = 0x01;
/* explicit_this implies has_this */
if (helper->call_conv & 0x40)
helper->call_conv &= 0x20;
if (helper->call_conv == 0) { /* Unmanaged */
idx = helper->unmanaged_call_conv - 1;
} else {
/* Managed */
idx = helper->call_conv & 0x60; /* has_this + explicit_this */
if (helper->call_conv & 0x02) /* varargs */
idx += 0x05;
}
sigbuffer_add_byte (&buf, idx);
sigbuffer_add_value (&buf, nargs);
encode_reflection_type (assembly, helper->return_type, &buf);
for (i = 0; i < nargs; ++i) {
MonoArray *modreqs = NULL;
MonoArray *modopts = NULL;
MonoReflectionType *pt;
if (helper->modreqs && (i < mono_array_length (helper->modreqs)))
modreqs = mono_array_get (helper->modreqs, MonoArray*, i);
if (helper->modopts && (i < mono_array_length (helper->modopts)))
modopts = mono_array_get (helper->modopts, MonoArray*, i);
encode_custom_modifiers (assembly, modreqs, modopts, &buf);
pt = mono_array_get (helper->arguments, MonoReflectionType*, i);
encode_reflection_type (assembly, pt, &buf);
}
idx = sigbuffer_add_to_blob_cached (assembly, &buf);
sigbuffer_free (&buf);
return idx;
}
static guint32
mono_image_get_sighelper_token (MonoDynamicImage *assembly, MonoReflectionSigHelper *helper)
{
guint32 idx;
MonoDynamicTable *table;
guint32 *values;
table = &assembly->tables [MONO_TABLE_STANDALONESIG];
idx = table->next_idx ++;
table->rows ++;
alloc_table (table, table->rows);
values = table->values + idx * MONO_STAND_ALONE_SIGNATURE_SIZE;
values [MONO_STAND_ALONE_SIGNATURE] =
mono_reflection_encode_sighelper (assembly, helper);
return idx;
}
static int
reflection_cc_to_file (int call_conv) {
switch (call_conv & 0x3) {
case 0:
case 1: return MONO_CALL_DEFAULT;
case 2: return MONO_CALL_VARARG;
default:
g_assert_not_reached ();
}
return 0;
}
#endif /* !DISABLE_REFLECTION_EMIT */
typedef struct {
MonoType *parent;
MonoMethodSignature *sig;
char *name;
guint32 token;
} ArrayMethod;
#ifndef DISABLE_REFLECTION_EMIT
static guint32
mono_image_get_array_token (MonoDynamicImage *assembly, MonoReflectionArrayMethod *m)
{
guint32 nparams, i;
GList *tmp;
char *name;
MonoMethodSignature *sig;
ArrayMethod *am;
MonoType *mtype;
name = mono_string_to_utf8 (m->name);
nparams = mono_array_length (m->parameters);
sig = g_malloc0 (MONO_SIZEOF_METHOD_SIGNATURE + sizeof (MonoType*) * nparams);
sig->hasthis = 1;
sig->sentinelpos = -1;
sig->call_convention = reflection_cc_to_file (m->call_conv);
sig->param_count = nparams;
sig->ret = m->ret ? mono_reflection_type_get_handle (m->ret): &mono_defaults.void_class->byval_arg;
mtype = mono_reflection_type_get_handle (m->parent);
for (i = 0; i < nparams; ++i)
sig->params [i] = mono_type_array_get_and_resolve (m->parameters, i);
for (tmp = assembly->array_methods; tmp; tmp = tmp->next) {
am = tmp->data;
if (strcmp (name, am->name) == 0 &&
mono_metadata_type_equal (am->parent, mtype) &&
mono_metadata_signature_equal (am->sig, sig)) {
g_free (name);
g_free (sig);
m->table_idx = am->token & 0xffffff;
return am->token;
}
}
am = g_new0 (ArrayMethod, 1);
am->name = name;
am->sig = sig;
am->parent = mtype;
am->token = mono_image_get_memberref_token (assembly, am->parent, name,
method_encode_signature (assembly, sig));
assembly->array_methods = g_list_prepend (assembly->array_methods, am);
m->table_idx = am->token & 0xffffff;
return am->token;
}
/*
* Insert into the metadata tables all the info about the TypeBuilder tb.
* Data in the tables is inserted in a predefined order, since some tables need to be sorted.
*/
static void
mono_image_get_type_info (MonoDomain *domain, MonoReflectionTypeBuilder *tb, MonoDynamicImage *assembly)
{
MonoDynamicTable *table;
guint *values;
int i, is_object = 0, is_system = 0;
char *n;
table = &assembly->tables [MONO_TABLE_TYPEDEF];
values = table->values + tb->table_idx * MONO_TYPEDEF_SIZE;
values [MONO_TYPEDEF_FLAGS] = tb->attrs;
n = mono_string_to_utf8 (tb->name);
if (strcmp (n, "Object") == 0)
is_object++;
values [MONO_TYPEDEF_NAME] = string_heap_insert (&assembly->sheap, n);
g_free (n);
n = mono_string_to_utf8 (tb->nspace);
if (strcmp (n, "System") == 0)
is_system++;
values [MONO_TYPEDEF_NAMESPACE] = string_heap_insert (&assembly->sheap, n);
g_free (n);
if (tb->parent && !(is_system && is_object) &&
!(tb->attrs & TYPE_ATTRIBUTE_INTERFACE)) { /* interfaces don't have a parent */
values [MONO_TYPEDEF_EXTENDS] = mono_image_typedef_or_ref (assembly, mono_reflection_type_get_handle ((MonoReflectionType*)tb->parent));
} else {
values [MONO_TYPEDEF_EXTENDS] = 0;
}
values [MONO_TYPEDEF_FIELD_LIST] = assembly->tables [MONO_TABLE_FIELD].next_idx;
values [MONO_TYPEDEF_METHOD_LIST] = assembly->tables [MONO_TABLE_METHOD].next_idx;
/*
* if we have explicitlayout or sequentiallayouts, output data in the
* ClassLayout table.
*/
if (((tb->attrs & TYPE_ATTRIBUTE_LAYOUT_MASK) != TYPE_ATTRIBUTE_AUTO_LAYOUT) &&
((tb->class_size > 0) || (tb->packing_size > 0))) {
table = &assembly->tables [MONO_TABLE_CLASSLAYOUT];
table->rows++;
alloc_table (table, table->rows);
values = table->values + table->rows * MONO_CLASS_LAYOUT_SIZE;
values [MONO_CLASS_LAYOUT_PARENT] = tb->table_idx;
values [MONO_CLASS_LAYOUT_CLASS_SIZE] = tb->class_size;
values [MONO_CLASS_LAYOUT_PACKING_SIZE] = tb->packing_size;
}
/* handle interfaces */
if (tb->interfaces) {
table = &assembly->tables [MONO_TABLE_INTERFACEIMPL];
i = table->rows;
table->rows += mono_array_length (tb->interfaces);
alloc_table (table, table->rows);
values = table->values + (i + 1) * MONO_INTERFACEIMPL_SIZE;
for (i = 0; i < mono_array_length (tb->interfaces); ++i) {
MonoReflectionType* iface = (MonoReflectionType*) mono_array_get (tb->interfaces, gpointer, i);
values [MONO_INTERFACEIMPL_CLASS] = tb->table_idx;
values [MONO_INTERFACEIMPL_INTERFACE] = mono_image_typedef_or_ref (assembly, mono_reflection_type_get_handle (iface));
values += MONO_INTERFACEIMPL_SIZE;
}
}
/* handle fields */
if (tb->fields) {
table = &assembly->tables [MONO_TABLE_FIELD];
table->rows += tb->num_fields;
alloc_table (table, table->rows);
for (i = 0; i < tb->num_fields; ++i)
mono_image_get_field_info (
mono_array_get (tb->fields, MonoReflectionFieldBuilder*, i), assembly);
}
/* handle constructors */
if (tb->ctors) {
table = &assembly->tables [MONO_TABLE_METHOD];
table->rows += mono_array_length (tb->ctors);
alloc_table (table, table->rows);
for (i = 0; i < mono_array_length (tb->ctors); ++i)
mono_image_get_ctor_info (domain,
mono_array_get (tb->ctors, MonoReflectionCtorBuilder*, i), assembly);
}
/* handle methods */
if (tb->methods) {
table = &assembly->tables [MONO_TABLE_METHOD];
table->rows += tb->num_methods;
alloc_table (table, table->rows);
for (i = 0; i < tb->num_methods; ++i)
mono_image_get_method_info (
mono_array_get (tb->methods, MonoReflectionMethodBuilder*, i), assembly);
}
/* Do the same with properties etc.. */
if (tb->events && mono_array_length (tb->events)) {
table = &assembly->tables [MONO_TABLE_EVENT];
table->rows += mono_array_length (tb->events);
alloc_table (table, table->rows);
table = &assembly->tables [MONO_TABLE_EVENTMAP];
table->rows ++;
alloc_table (table, table->rows);
values = table->values + table->rows * MONO_EVENT_MAP_SIZE;
values [MONO_EVENT_MAP_PARENT] = tb->table_idx;
values [MONO_EVENT_MAP_EVENTLIST] = assembly->tables [MONO_TABLE_EVENT].next_idx;
for (i = 0; i < mono_array_length (tb->events); ++i)
mono_image_get_event_info (
mono_array_get (tb->events, MonoReflectionEventBuilder*, i), assembly);
}
if (tb->properties && mono_array_length (tb->properties)) {
table = &assembly->tables [MONO_TABLE_PROPERTY];
table->rows += mono_array_length (tb->properties);
alloc_table (table, table->rows);
table = &assembly->tables [MONO_TABLE_PROPERTYMAP];
table->rows ++;
alloc_table (table, table->rows);
values = table->values + table->rows * MONO_PROPERTY_MAP_SIZE;
values [MONO_PROPERTY_MAP_PARENT] = tb->table_idx;
values [MONO_PROPERTY_MAP_PROPERTY_LIST] = assembly->tables [MONO_TABLE_PROPERTY].next_idx;
for (i = 0; i < mono_array_length (tb->properties); ++i)
mono_image_get_property_info (
mono_array_get (tb->properties, MonoReflectionPropertyBuilder*, i), assembly);
}
/* handle generic parameters */
if (tb->generic_params) {
table = &assembly->tables [MONO_TABLE_GENERICPARAM];
table->rows += mono_array_length (tb->generic_params);
alloc_table (table, table->rows);
for (i = 0; i < mono_array_length (tb->generic_params); ++i) {
guint32 owner = MONO_TYPEORMETHOD_TYPE | (tb->table_idx << MONO_TYPEORMETHOD_BITS);
mono_image_get_generic_param_info (
mono_array_get (tb->generic_params, MonoReflectionGenericParam*, i), owner, assembly);
}
}
mono_image_add_decl_security (assembly,
mono_metadata_make_token (MONO_TABLE_TYPEDEF, tb->table_idx), tb->permissions);
if (tb->subtypes) {
MonoDynamicTable *ntable;
ntable = &assembly->tables [MONO_TABLE_NESTEDCLASS];
ntable->rows += mono_array_length (tb->subtypes);
alloc_table (ntable, ntable->rows);
values = ntable->values + ntable->next_idx * MONO_NESTED_CLASS_SIZE;
for (i = 0; i < mono_array_length (tb->subtypes); ++i) {
MonoReflectionTypeBuilder *subtype = mono_array_get (tb->subtypes, MonoReflectionTypeBuilder*, i);
values [MONO_NESTED_CLASS_NESTED] = subtype->table_idx;
values [MONO_NESTED_CLASS_ENCLOSING] = tb->table_idx;
/*g_print ("nesting %s (%d) in %s (%d) (rows %d/%d)\n",
mono_string_to_utf8 (subtype->name), subtype->table_idx,
mono_string_to_utf8 (tb->name), tb->table_idx,
ntable->next_idx, ntable->rows);*/
values += MONO_NESTED_CLASS_SIZE;
ntable->next_idx++;
}
}
}
#endif
static void
collect_types (GPtrArray *types, MonoReflectionTypeBuilder *type)
{
int i;
g_ptr_array_add (types, type); /* FIXME: GC object added to unmanaged memory */
if (!type->subtypes)
return;
for (i = 0; i < mono_array_length (type->subtypes); ++i) {
MonoReflectionTypeBuilder *subtype = mono_array_get (type->subtypes, MonoReflectionTypeBuilder*, i);
collect_types (types, subtype);
}
}
static gint
compare_types_by_table_idx (MonoReflectionTypeBuilder **type1, MonoReflectionTypeBuilder **type2)
{
if ((*type1)->table_idx < (*type2)->table_idx)
return -1;
else
if ((*type1)->table_idx > (*type2)->table_idx)
return 1;
else
return 0;
}
static void
params_add_cattrs (MonoDynamicImage *assembly, MonoArray *pinfo) {
int i;
if (!pinfo)
return;
for (i = 0; i < mono_array_length (pinfo); ++i) {
MonoReflectionParamBuilder *pb;
pb = mono_array_get (pinfo, MonoReflectionParamBuilder *, i);
if (!pb)
continue;
mono_image_add_cattrs (assembly, pb->table_idx, MONO_CUSTOM_ATTR_PARAMDEF, pb->cattrs);
}
}
static void
type_add_cattrs (MonoDynamicImage *assembly, MonoReflectionTypeBuilder *tb) {
int i;
mono_image_add_cattrs (assembly, tb->table_idx, MONO_CUSTOM_ATTR_TYPEDEF, tb->cattrs);
if (tb->fields) {
for (i = 0; i < tb->num_fields; ++i) {
MonoReflectionFieldBuilder* fb;
fb = mono_array_get (tb->fields, MonoReflectionFieldBuilder*, i);
mono_image_add_cattrs (assembly, fb->table_idx, MONO_CUSTOM_ATTR_FIELDDEF, fb->cattrs);
}
}
if (tb->events) {
for (i = 0; i < mono_array_length (tb->events); ++i) {
MonoReflectionEventBuilder* eb;
eb = mono_array_get (tb->events, MonoReflectionEventBuilder*, i);
mono_image_add_cattrs (assembly, eb->table_idx, MONO_CUSTOM_ATTR_EVENT, eb->cattrs);
}
}
if (tb->properties) {
for (i = 0; i < mono_array_length (tb->properties); ++i) {
MonoReflectionPropertyBuilder* pb;
pb = mono_array_get (tb->properties, MonoReflectionPropertyBuilder*, i);
mono_image_add_cattrs (assembly, pb->table_idx, MONO_CUSTOM_ATTR_PROPERTY, pb->cattrs);
}
}
if (tb->ctors) {
for (i = 0; i < mono_array_length (tb->ctors); ++i) {
MonoReflectionCtorBuilder* cb;
cb = mono_array_get (tb->ctors, MonoReflectionCtorBuilder*, i);
mono_image_add_cattrs (assembly, cb->table_idx, MONO_CUSTOM_ATTR_METHODDEF, cb->cattrs);
params_add_cattrs (assembly, cb->pinfo);
}
}
if (tb->methods) {
for (i = 0; i < tb->num_methods; ++i) {
MonoReflectionMethodBuilder* mb;
mb = mono_array_get (tb->methods, MonoReflectionMethodBuilder*, i);
mono_image_add_cattrs (assembly, mb->table_idx, MONO_CUSTOM_ATTR_METHODDEF, mb->cattrs);
params_add_cattrs (assembly, mb->pinfo);
}
}
if (tb->subtypes) {
for (i = 0; i < mono_array_length (tb->subtypes); ++i)
type_add_cattrs (assembly, mono_array_get (tb->subtypes, MonoReflectionTypeBuilder*, i));
}
}
static void
module_add_cattrs (MonoDynamicImage *assembly, MonoReflectionModuleBuilder *moduleb)
{
int i;
mono_image_add_cattrs (assembly, moduleb->table_idx, MONO_CUSTOM_ATTR_MODULE, moduleb->cattrs);
if (moduleb->global_methods) {
for (i = 0; i < mono_array_length (moduleb->global_methods); ++i) {
MonoReflectionMethodBuilder* mb = mono_array_get (moduleb->global_methods, MonoReflectionMethodBuilder*, i);
mono_image_add_cattrs (assembly, mb->table_idx, MONO_CUSTOM_ATTR_METHODDEF, mb->cattrs);
params_add_cattrs (assembly, mb->pinfo);
}
}
if (moduleb->global_fields) {
for (i = 0; i < mono_array_length (moduleb->global_fields); ++i) {
MonoReflectionFieldBuilder *fb = mono_array_get (moduleb->global_fields, MonoReflectionFieldBuilder*, i);
mono_image_add_cattrs (assembly, fb->table_idx, MONO_CUSTOM_ATTR_FIELDDEF, fb->cattrs);
}
}
if (moduleb->types) {
for (i = 0; i < moduleb->num_types; ++i)
type_add_cattrs (assembly, mono_array_get (moduleb->types, MonoReflectionTypeBuilder*, i));
}
}
static void
mono_image_fill_file_table (MonoDomain *domain, MonoReflectionModule *module, MonoDynamicImage *assembly)
{
MonoDynamicTable *table;
guint32 *values;
char blob_size [6];
guchar hash [20];
char *b = blob_size;
char *dir, *path;
table = &assembly->tables [MONO_TABLE_FILE];
table->rows++;
alloc_table (table, table->rows);
values = table->values + table->next_idx * MONO_FILE_SIZE;
values [MONO_FILE_FLAGS] = FILE_CONTAINS_METADATA;
values [MONO_FILE_NAME] = string_heap_insert (&assembly->sheap, module->image->module_name);
if (module->image->dynamic) {
/* This depends on the fact that the main module is emitted last */
dir = mono_string_to_utf8 (((MonoReflectionModuleBuilder*)module)->assemblyb->dir);
path = g_strdup_printf ("%s%c%s", dir, G_DIR_SEPARATOR, module->image->module_name);
} else {
dir = NULL;
path = g_strdup (module->image->name);
}
mono_sha1_get_digest_from_file (path, hash);
g_free (dir);
g_free (path);
mono_metadata_encode_value (20, b, &b);
values [MONO_FILE_HASH_VALUE] = mono_image_add_stream_data (&assembly->blob, blob_size, b-blob_size);
mono_image_add_stream_data (&assembly->blob, (char*)hash, 20);
table->next_idx ++;
}
static void
mono_image_fill_module_table (MonoDomain *domain, MonoReflectionModuleBuilder *mb, MonoDynamicImage *assembly)
{
MonoDynamicTable *table;
int i;
table = &assembly->tables [MONO_TABLE_MODULE];
mb->table_idx = table->next_idx ++;
table->values [mb->table_idx * MONO_MODULE_SIZE + MONO_MODULE_NAME] = string_heap_insert_mstring (&assembly->sheap, mb->module.name);
i = mono_image_add_stream_data (&assembly->guid, mono_array_addr (mb->guid, char, 0), 16);
i /= 16;
++i;
table->values [mb->table_idx * MONO_MODULE_SIZE + MONO_MODULE_GENERATION] = 0;
table->values [mb->table_idx * MONO_MODULE_SIZE + MONO_MODULE_MVID] = i;
table->values [mb->table_idx * MONO_MODULE_SIZE + MONO_MODULE_ENC] = 0;
table->values [mb->table_idx * MONO_MODULE_SIZE + MONO_MODULE_ENCBASE] = 0;
}
static guint32
mono_image_fill_export_table_from_class (MonoDomain *domain, MonoClass *klass,
guint32 module_index, guint32 parent_index, MonoDynamicImage *assembly)
{
MonoDynamicTable *table;
guint32 *values;
guint32 visib, res;
visib = klass->flags & TYPE_ATTRIBUTE_VISIBILITY_MASK;
if (! ((visib & TYPE_ATTRIBUTE_PUBLIC) || (visib & TYPE_ATTRIBUTE_NESTED_PUBLIC)))
return 0;
table = &assembly->tables [MONO_TABLE_EXPORTEDTYPE];
table->rows++;
alloc_table (table, table->rows);
values = table->values + table->next_idx * MONO_EXP_TYPE_SIZE;
values [MONO_EXP_TYPE_FLAGS] = klass->flags;
values [MONO_EXP_TYPE_TYPEDEF] = klass->type_token;
if (klass->nested_in)
values [MONO_EXP_TYPE_IMPLEMENTATION] = (parent_index << MONO_IMPLEMENTATION_BITS) + MONO_IMPLEMENTATION_EXP_TYPE;
else
values [MONO_EXP_TYPE_IMPLEMENTATION] = (module_index << MONO_IMPLEMENTATION_BITS) + MONO_IMPLEMENTATION_FILE;
values [MONO_EXP_TYPE_NAME] = string_heap_insert (&assembly->sheap, klass->name);
values [MONO_EXP_TYPE_NAMESPACE] = string_heap_insert (&assembly->sheap, klass->name_space);
res = table->next_idx;
table->next_idx ++;
/* Emit nested types */
if (klass->ext && klass->ext->nested_classes) {
GList *tmp;
for (tmp = klass->ext->nested_classes; tmp; tmp = tmp->next)
mono_image_fill_export_table_from_class (domain, tmp->data, module_index, table->next_idx - 1, assembly);
}
return res;
}
static void
mono_image_fill_export_table (MonoDomain *domain, MonoReflectionTypeBuilder *tb,
guint32 module_index, guint32 parent_index, MonoDynamicImage *assembly)
{
MonoClass *klass;
guint32 idx, i;
klass = mono_class_from_mono_type (mono_reflection_type_get_handle ((MonoReflectionType*)tb));
klass->type_token = mono_metadata_make_token (MONO_TABLE_TYPEDEF, tb->table_idx);
idx = mono_image_fill_export_table_from_class (domain, klass, module_index,
parent_index, assembly);
/*
* Emit nested types
* We need to do this ourselves since klass->nested_classes is not set up.
*/
if (tb->subtypes) {
for (i = 0; i < mono_array_length (tb->subtypes); ++i)
mono_image_fill_export_table (domain, mono_array_get (tb->subtypes, MonoReflectionTypeBuilder*, i), module_index, idx, assembly);
}
}
static void
mono_image_fill_export_table_from_module (MonoDomain *domain, MonoReflectionModule *module,
guint32 module_index, MonoDynamicImage *assembly)
{
MonoImage *image = module->image;
MonoTableInfo *t;
guint32 i;
t = &image->tables [MONO_TABLE_TYPEDEF];
for (i = 0; i < t->rows; ++i) {
MonoClass *klass = mono_class_get (image, mono_metadata_make_token (MONO_TABLE_TYPEDEF, i + 1));
if (klass->flags & TYPE_ATTRIBUTE_PUBLIC)
mono_image_fill_export_table_from_class (domain, klass, module_index, 0, assembly);
}
}
static guint32
add_exported_type (MonoReflectionAssemblyBuilder *assemblyb, MonoDynamicImage *assembly, MonoClass *klass)
{
MonoDynamicTable *table;
guint32 *values;
guint32 scope, idx, res, impl;
gboolean forwarder = TRUE;
if (klass->nested_in) {
impl = add_exported_type (assemblyb, assembly, klass->nested_in);
forwarder = FALSE;
} else {
scope = resolution_scope_from_image (assembly, klass->image);
g_assert ((scope & MONO_RESOLTION_SCOPE_MASK) == MONO_RESOLTION_SCOPE_ASSEMBLYREF);
idx = scope >> MONO_RESOLTION_SCOPE_BITS;
impl = (idx << MONO_IMPLEMENTATION_BITS) + MONO_IMPLEMENTATION_ASSEMBLYREF;
}
table = &assembly->tables [MONO_TABLE_EXPORTEDTYPE];
table->rows++;
alloc_table (table, table->rows);
values = table->values + table->next_idx * MONO_EXP_TYPE_SIZE;
values [MONO_EXP_TYPE_FLAGS] = forwarder ? TYPE_ATTRIBUTE_FORWARDER : 0;
values [MONO_EXP_TYPE_TYPEDEF] = 0;
values [MONO_EXP_TYPE_IMPLEMENTATION] = impl;
values [MONO_EXP_TYPE_NAME] = string_heap_insert (&assembly->sheap, klass->name);
values [MONO_EXP_TYPE_NAMESPACE] = string_heap_insert (&assembly->sheap, klass->name_space);
res = (table->next_idx << MONO_IMPLEMENTATION_BITS) + MONO_IMPLEMENTATION_EXP_TYPE;
table->next_idx++;
return res;
}
static void
mono_image_fill_export_table_from_type_forwarders (MonoReflectionAssemblyBuilder *assemblyb, MonoDynamicImage *assembly)
{
MonoClass *klass;
int i;
if (!assemblyb->type_forwarders)
return;
for (i = 0; i < mono_array_length (assemblyb->type_forwarders); ++i) {
MonoReflectionType *t = mono_array_get (assemblyb->type_forwarders, MonoReflectionType *, i);
MonoType *type;
if (!t)
continue;
type = mono_reflection_type_get_handle (t);
g_assert (type);
klass = mono_class_from_mono_type (type);
add_exported_type (assemblyb, assembly, klass);
}
}
#define align_pointer(base,p)\
do {\
guint32 __diff = (unsigned char*)(p)-(unsigned char*)(base);\
if (__diff & 3)\
(p) += 4 - (__diff & 3);\
} while (0)
static int
compare_constants (const void *a, const void *b)
{
const guint32 *a_values = a;
const guint32 *b_values = b;
return a_values [MONO_CONSTANT_PARENT] - b_values [MONO_CONSTANT_PARENT];
}
static int
compare_semantics (const void *a, const void *b)
{
const guint32 *a_values = a;
const guint32 *b_values = b;
int assoc = a_values [MONO_METHOD_SEMA_ASSOCIATION] - b_values [MONO_METHOD_SEMA_ASSOCIATION];
if (assoc)
return assoc;
return a_values [MONO_METHOD_SEMA_SEMANTICS] - b_values [MONO_METHOD_SEMA_SEMANTICS];
}
static int
compare_custom_attrs (const void *a, const void *b)
{
const guint32 *a_values = a;
const guint32 *b_values = b;
return a_values [MONO_CUSTOM_ATTR_PARENT] - b_values [MONO_CUSTOM_ATTR_PARENT];
}
static int
compare_field_marshal (const void *a, const void *b)
{
const guint32 *a_values = a;
const guint32 *b_values = b;
return a_values [MONO_FIELD_MARSHAL_PARENT] - b_values [MONO_FIELD_MARSHAL_PARENT];
}
static int
compare_nested (const void *a, const void *b)
{
const guint32 *a_values = a;
const guint32 *b_values = b;
return a_values [MONO_NESTED_CLASS_NESTED] - b_values [MONO_NESTED_CLASS_NESTED];
}
static int
compare_genericparam (const void *a, const void *b)
{
const GenericParamTableEntry **a_entry = (const GenericParamTableEntry **) a;
const GenericParamTableEntry **b_entry = (const GenericParamTableEntry **) b;
if ((*b_entry)->owner == (*a_entry)->owner)
return
mono_type_get_generic_param_num (mono_reflection_type_get_handle ((MonoReflectionType*)(*a_entry)->gparam)) -
mono_type_get_generic_param_num (mono_reflection_type_get_handle ((MonoReflectionType*)(*b_entry)->gparam));
else
return (*a_entry)->owner - (*b_entry)->owner;
}
static int
compare_declsecurity_attrs (const void *a, const void *b)
{
const guint32 *a_values = a;
const guint32 *b_values = b;
return a_values [MONO_DECL_SECURITY_PARENT] - b_values [MONO_DECL_SECURITY_PARENT];
}
static int
compare_interface_impl (const void *a, const void *b)
{
const guint32 *a_values = a;
const guint32 *b_values = b;
int klass = a_values [MONO_INTERFACEIMPL_CLASS] - b_values [MONO_INTERFACEIMPL_CLASS];
if (klass)
return klass;
return a_values [MONO_INTERFACEIMPL_INTERFACE] - b_values [MONO_INTERFACEIMPL_INTERFACE];
}
static void
pad_heap (MonoDynamicStream *sh)
{
if (sh->index & 3) {
int sz = 4 - (sh->index & 3);
memset (sh->data + sh->index, 0, sz);
sh->index += sz;
}
}
struct StreamDesc {
const char *name;
MonoDynamicStream *stream;
};
/*
* build_compressed_metadata() fills in the blob of data that represents the
* raw metadata as it will be saved in the PE file. The five streams are output
* and the metadata tables are comnpressed from the guint32 array representation,
* to the compressed on-disk format.
*/
static void
build_compressed_metadata (MonoDynamicImage *assembly)
{
MonoDynamicTable *table;
int i;
guint64 valid_mask = 0;
guint64 sorted_mask;
guint32 heapt_size = 0;
guint32 meta_size = 256; /* allow for header and other stuff */
guint32 table_offset;
guint32 ntables = 0;
guint64 *int64val;
guint32 *int32val;
guint16 *int16val;
MonoImage *meta;
unsigned char *p;
struct StreamDesc stream_desc [5];
qsort (assembly->gen_params->pdata, assembly->gen_params->len, sizeof (gpointer), compare_genericparam);
for (i = 0; i < assembly->gen_params->len; i++){
GenericParamTableEntry *entry = g_ptr_array_index (assembly->gen_params, i);
write_generic_param_entry (assembly, entry);
}
stream_desc [0].name = "#~";
stream_desc [0].stream = &assembly->tstream;
stream_desc [1].name = "#Strings";
stream_desc [1].stream = &assembly->sheap;
stream_desc [2].name = "#US";
stream_desc [2].stream = &assembly->us;
stream_desc [3].name = "#Blob";
stream_desc [3].stream = &assembly->blob;
stream_desc [4].name = "#GUID";
stream_desc [4].stream = &assembly->guid;
/* tables that are sorted */
sorted_mask = ((guint64)1 << MONO_TABLE_CONSTANT) | ((guint64)1 << MONO_TABLE_FIELDMARSHAL)
| ((guint64)1 << MONO_TABLE_METHODSEMANTICS) | ((guint64)1 << MONO_TABLE_CLASSLAYOUT)
| ((guint64)1 << MONO_TABLE_FIELDLAYOUT) | ((guint64)1 << MONO_TABLE_FIELDRVA)
| ((guint64)1 << MONO_TABLE_IMPLMAP) | ((guint64)1 << MONO_TABLE_NESTEDCLASS)
| ((guint64)1 << MONO_TABLE_METHODIMPL) | ((guint64)1 << MONO_TABLE_CUSTOMATTRIBUTE)
| ((guint64)1 << MONO_TABLE_DECLSECURITY) | ((guint64)1 << MONO_TABLE_GENERICPARAM)
| ((guint64)1 << MONO_TABLE_INTERFACEIMPL);
/* Compute table sizes */
/* the MonoImage has already been created in mono_image_basic_init() */
meta = &assembly->image;
/* sizes should be multiple of 4 */
pad_heap (&assembly->blob);
pad_heap (&assembly->guid);
pad_heap (&assembly->sheap);
pad_heap (&assembly->us);
/* Setup the info used by compute_sizes () */
meta->idx_blob_wide = assembly->blob.index >= 65536 ? 1 : 0;
meta->idx_guid_wide = assembly->guid.index >= 65536 ? 1 : 0;
meta->idx_string_wide = assembly->sheap.index >= 65536 ? 1 : 0;
meta_size += assembly->blob.index;
meta_size += assembly->guid.index;
meta_size += assembly->sheap.index;
meta_size += assembly->us.index;
for (i=0; i < MONO_TABLE_NUM; ++i)
meta->tables [i].rows = assembly->tables [i].rows;
for (i = 0; i < MONO_TABLE_NUM; i++){
if (meta->tables [i].rows == 0)
continue;
valid_mask |= (guint64)1 << i;
ntables ++;
meta->tables [i].row_size = mono_metadata_compute_size (
meta, i, &meta->tables [i].size_bitfield);
heapt_size += meta->tables [i].row_size * meta->tables [i].rows;
}
heapt_size += 24; /* #~ header size */
heapt_size += ntables * 4;
/* make multiple of 4 */
heapt_size += 3;
heapt_size &= ~3;
meta_size += heapt_size;
meta->raw_metadata = g_malloc0 (meta_size);
p = (unsigned char*)meta->raw_metadata;
/* the metadata signature */
*p++ = 'B'; *p++ = 'S'; *p++ = 'J'; *p++ = 'B';
/* version numbers and 4 bytes reserved */
int16val = (guint16*)p;
*int16val++ = GUINT16_TO_LE (meta->md_version_major);
*int16val = GUINT16_TO_LE (meta->md_version_minor);
p += 8;
/* version string */
int32val = (guint32*)p;
*int32val = GUINT32_TO_LE ((strlen (meta->version) + 3) & (~3)); /* needs to be multiple of 4 */
p += 4;
memcpy (p, meta->version, strlen (meta->version));
p += GUINT32_FROM_LE (*int32val);
align_pointer (meta->raw_metadata, p);
int16val = (guint16*)p;
*int16val++ = GUINT16_TO_LE (0); /* flags must be 0 */
*int16val = GUINT16_TO_LE (5); /* number of streams */
p += 4;
/*
* write the stream info.
*/
table_offset = (p - (unsigned char*)meta->raw_metadata) + 5 * 8 + 40; /* room needed for stream headers */
table_offset += 3; table_offset &= ~3;
assembly->tstream.index = heapt_size;
for (i = 0; i < 5; ++i) {
int32val = (guint32*)p;
stream_desc [i].stream->offset = table_offset;
*int32val++ = GUINT32_TO_LE (table_offset);
*int32val = GUINT32_TO_LE (stream_desc [i].stream->index);
table_offset += GUINT32_FROM_LE (*int32val);
table_offset += 3; table_offset &= ~3;
p += 8;
strcpy ((char*)p, stream_desc [i].name);
p += strlen (stream_desc [i].name) + 1;
align_pointer (meta->raw_metadata, p);
}
/*
* now copy the data, the table stream header and contents goes first.
*/
g_assert ((p - (unsigned char*)meta->raw_metadata) < assembly->tstream.offset);
p = (guchar*)meta->raw_metadata + assembly->tstream.offset;
int32val = (guint32*)p;
*int32val = GUINT32_TO_LE (0); /* reserved */
p += 4;
if (mono_framework_version () > 1) {
*p++ = 2; /* version */
*p++ = 0;
} else {
*p++ = 1; /* version */
*p++ = 0;
}
if (meta->idx_string_wide)
*p |= 0x01;
if (meta->idx_guid_wide)
*p |= 0x02;
if (meta->idx_blob_wide)
*p |= 0x04;
++p;
*p++ = 1; /* reserved */
int64val = (guint64*)p;
*int64val++ = GUINT64_TO_LE (valid_mask);
*int64val++ = GUINT64_TO_LE (valid_mask & sorted_mask); /* bitvector of sorted tables */
p += 16;
int32val = (guint32*)p;
for (i = 0; i < MONO_TABLE_NUM; i++){
if (meta->tables [i].rows == 0)
continue;
*int32val++ = GUINT32_TO_LE (meta->tables [i].rows);
}
p = (unsigned char*)int32val;
/* sort the tables that still need sorting */
table = &assembly->tables [MONO_TABLE_CONSTANT];
if (table->rows)
qsort (table->values + MONO_CONSTANT_SIZE, table->rows, sizeof (guint32) * MONO_CONSTANT_SIZE, compare_constants);
table = &assembly->tables [MONO_TABLE_METHODSEMANTICS];
if (table->rows)
qsort (table->values + MONO_METHOD_SEMA_SIZE, table->rows, sizeof (guint32) * MONO_METHOD_SEMA_SIZE, compare_semantics);
table = &assembly->tables [MONO_TABLE_CUSTOMATTRIBUTE];
if (table->rows)
qsort (table->values + MONO_CUSTOM_ATTR_SIZE, table->rows, sizeof (guint32) * MONO_CUSTOM_ATTR_SIZE, compare_custom_attrs);
table = &assembly->tables [MONO_TABLE_FIELDMARSHAL];
if (table->rows)
qsort (table->values + MONO_FIELD_MARSHAL_SIZE, table->rows, sizeof (guint32) * MONO_FIELD_MARSHAL_SIZE, compare_field_marshal);
table = &assembly->tables [MONO_TABLE_NESTEDCLASS];
if (table->rows)
qsort (table->values + MONO_NESTED_CLASS_SIZE, table->rows, sizeof (guint32) * MONO_NESTED_CLASS_SIZE, compare_nested);
/* Section 21.11 DeclSecurity in Partition II doesn't specify this to be sorted by MS implementation requires it */
table = &assembly->tables [MONO_TABLE_DECLSECURITY];
if (table->rows)
qsort (table->values + MONO_DECL_SECURITY_SIZE, table->rows, sizeof (guint32) * MONO_DECL_SECURITY_SIZE, compare_declsecurity_attrs);
table = &assembly->tables [MONO_TABLE_INTERFACEIMPL];
if (table->rows)
qsort (table->values + MONO_INTERFACEIMPL_SIZE, table->rows, sizeof (guint32) * MONO_INTERFACEIMPL_SIZE, compare_interface_impl);
/* compress the tables */
for (i = 0; i < MONO_TABLE_NUM; i++){
int row, col;
guint32 *values;
guint32 bitfield = meta->tables [i].size_bitfield;
if (!meta->tables [i].rows)
continue;
if (assembly->tables [i].columns != mono_metadata_table_count (bitfield))
g_error ("col count mismatch in %d: %d %d", i, assembly->tables [i].columns, mono_metadata_table_count (bitfield));
meta->tables [i].base = (char*)p;
for (row = 1; row <= meta->tables [i].rows; ++row) {
values = assembly->tables [i].values + row * assembly->tables [i].columns;
for (col = 0; col < assembly->tables [i].columns; ++col) {
switch (mono_metadata_table_size (bitfield, col)) {
case 1:
*p++ = values [col];
break;
case 2:
*p++ = values [col] & 0xff;
*p++ = (values [col] >> 8) & 0xff;
break;
case 4:
*p++ = values [col] & 0xff;
*p++ = (values [col] >> 8) & 0xff;
*p++ = (values [col] >> 16) & 0xff;
*p++ = (values [col] >> 24) & 0xff;
break;
default:
g_assert_not_reached ();
}
}
}
g_assert ((p - (const unsigned char*)meta->tables [i].base) == (meta->tables [i].rows * meta->tables [i].row_size));
}
g_assert (assembly->guid.offset + assembly->guid.index < meta_size);
memcpy (meta->raw_metadata + assembly->sheap.offset, assembly->sheap.data, assembly->sheap.index);
memcpy (meta->raw_metadata + assembly->us.offset, assembly->us.data, assembly->us.index);
memcpy (meta->raw_metadata + assembly->blob.offset, assembly->blob.data, assembly->blob.index);
memcpy (meta->raw_metadata + assembly->guid.offset, assembly->guid.data, assembly->guid.index);
assembly->meta_size = assembly->guid.offset + assembly->guid.index;
}
/*
* Some tables in metadata need to be sorted according to some criteria, but
* when methods and fields are first created with reflection, they may be assigned a token
* that doesn't correspond to the final token they will get assigned after the sorting.
* ILGenerator.cs keeps a fixup table that maps the position of tokens in the IL code stream
* with the reflection objects that represent them. Once all the tables are set up, the
* reflection objects will contains the correct table index. fixup_method() will fixup the
* tokens for the method with ILGenerator @ilgen.
*/
static void
fixup_method (MonoReflectionILGen *ilgen, gpointer value, MonoDynamicImage *assembly)
{
guint32 code_idx = GPOINTER_TO_UINT (value);
MonoReflectionILTokenInfo *iltoken;
MonoReflectionFieldBuilder *field;
MonoReflectionCtorBuilder *ctor;
MonoReflectionMethodBuilder *method;
MonoReflectionTypeBuilder *tb;
MonoReflectionArrayMethod *am;
guint32 i, idx = 0;
unsigned char *target;
for (i = 0; i < ilgen->num_token_fixups; ++i) {
iltoken = (MonoReflectionILTokenInfo *)mono_array_addr_with_size (ilgen->token_fixups, sizeof (MonoReflectionILTokenInfo), i);
target = (guchar*)assembly->code.data + code_idx + iltoken->code_pos;
switch (target [3]) {
case MONO_TABLE_FIELD:
if (!strcmp (iltoken->member->vtable->klass->name, "FieldBuilder")) {
field = (MonoReflectionFieldBuilder *)iltoken->member;
idx = field->table_idx;
} else if (!strcmp (iltoken->member->vtable->klass->name, "MonoField")) {
MonoClassField *f = ((MonoReflectionField*)iltoken->member)->field;
idx = GPOINTER_TO_UINT (g_hash_table_lookup (assembly->field_to_table_idx, f));
} else {
g_assert_not_reached ();
}
break;
case MONO_TABLE_METHOD:
if (!strcmp (iltoken->member->vtable->klass->name, "MethodBuilder")) {
method = (MonoReflectionMethodBuilder *)iltoken->member;
idx = method->table_idx;
} else if (!strcmp (iltoken->member->vtable->klass->name, "ConstructorBuilder")) {
ctor = (MonoReflectionCtorBuilder *)iltoken->member;
idx = ctor->table_idx;
} else if (!strcmp (iltoken->member->vtable->klass->name, "MonoMethod") ||
!strcmp (iltoken->member->vtable->klass->name, "MonoCMethod")) {
MonoMethod *m = ((MonoReflectionMethod*)iltoken->member)->method;
idx = GPOINTER_TO_UINT (g_hash_table_lookup (assembly->method_to_table_idx, m));
} else {
g_assert_not_reached ();
}
break;
case MONO_TABLE_TYPEDEF:
if (strcmp (iltoken->member->vtable->klass->name, "TypeBuilder"))
g_assert_not_reached ();
tb = (MonoReflectionTypeBuilder *)iltoken->member;
idx = tb->table_idx;
break;
case MONO_TABLE_MEMBERREF:
if (!strcmp (iltoken->member->vtable->klass->name, "MonoArrayMethod")) {
am = (MonoReflectionArrayMethod*)iltoken->member;
idx = am->table_idx;
} else if (!strcmp (iltoken->member->vtable->klass->name, "MonoMethod") ||
!strcmp (iltoken->member->vtable->klass->name, "MonoCMethod") ||
!strcmp (iltoken->member->vtable->klass->name, "MonoGenericMethod") ||
!strcmp (iltoken->member->vtable->klass->name, "MonoGenericCMethod")) {
MonoMethod *m = ((MonoReflectionMethod*)iltoken->member)->method;
g_assert (m->klass->generic_class || m->klass->generic_container);
continue;
} else if (!strcmp (iltoken->member->vtable->klass->name, "FieldBuilder")) {
continue;
} else if (!strcmp (iltoken->member->vtable->klass->name, "MonoField")) {
MonoClassField *f = ((MonoReflectionField*)iltoken->member)->field;
g_assert (is_field_on_inst (f));
continue;
} else if (!strcmp (iltoken->member->vtable->klass->name, "MethodBuilder") ||
!strcmp (iltoken->member->vtable->klass->name, "ConstructorBuilder")) {
continue;
} else if (!strcmp (iltoken->member->vtable->klass->name, "FieldOnTypeBuilderInst")) {
continue;
} else if (!strcmp (iltoken->member->vtable->klass->name, "MethodOnTypeBuilderInst")) {
continue;
} else if (!strcmp (iltoken->member->vtable->klass->name, "ConstructorOnTypeBuilderInst")) {
continue;
} else {
g_assert_not_reached ();
}
break;
case MONO_TABLE_METHODSPEC:
if (!strcmp (iltoken->member->vtable->klass->name, "MonoGenericMethod")) {
MonoMethod *m = ((MonoReflectionMethod*)iltoken->member)->method;
g_assert (mono_method_signature (m)->generic_param_count);
continue;
} else if (!strcmp (iltoken->member->vtable->klass->name, "MethodBuilder")) {
continue;
} else if (!strcmp (iltoken->member->vtable->klass->name, "MethodOnTypeBuilderInst")) {
continue;
} else {
g_assert_not_reached ();
}
break;
default:
g_error ("got unexpected table 0x%02x in fixup", target [3]);
}
target [0] = idx & 0xff;
target [1] = (idx >> 8) & 0xff;
target [2] = (idx >> 16) & 0xff;
}
}
/*
* fixup_cattrs:
*
* The CUSTOM_ATTRIBUTE table might contain METHODDEF tokens whose final
* value is not known when the table is emitted.
*/
static void
fixup_cattrs (MonoDynamicImage *assembly)
{
MonoDynamicTable *table;
guint32 *values;
guint32 type, i, idx, token;
MonoObject *ctor;
table = &assembly->tables [MONO_TABLE_CUSTOMATTRIBUTE];
for (i = 0; i < table->rows; ++i) {
values = table->values + ((i + 1) * MONO_CUSTOM_ATTR_SIZE);
type = values [MONO_CUSTOM_ATTR_TYPE];
if ((type & MONO_CUSTOM_ATTR_TYPE_MASK) == MONO_CUSTOM_ATTR_TYPE_METHODDEF) {
idx = type >> MONO_CUSTOM_ATTR_TYPE_BITS;
token = mono_metadata_make_token (MONO_TABLE_METHOD, idx);
ctor = mono_g_hash_table_lookup (assembly->tokens, GUINT_TO_POINTER (token));
g_assert (ctor);
if (!strcmp (ctor->vtable->klass->name, "MonoCMethod")) {
MonoMethod *m = ((MonoReflectionMethod*)ctor)->method;
idx = GPOINTER_TO_UINT (g_hash_table_lookup (assembly->method_to_table_idx, m));
values [MONO_CUSTOM_ATTR_TYPE] = (idx << MONO_CUSTOM_ATTR_TYPE_BITS) | MONO_CUSTOM_ATTR_TYPE_METHODDEF;
}
}
}
}
static void
assembly_add_resource_manifest (MonoReflectionModuleBuilder *mb, MonoDynamicImage *assembly, MonoReflectionResource *rsrc, guint32 implementation)
{
MonoDynamicTable *table;
guint32 *values;
table = &assembly->tables [MONO_TABLE_MANIFESTRESOURCE];
table->rows++;
alloc_table (table, table->rows);
values = table->values + table->next_idx * MONO_MANIFEST_SIZE;
values [MONO_MANIFEST_OFFSET] = rsrc->offset;
values [MONO_MANIFEST_FLAGS] = rsrc->attrs;
values [MONO_MANIFEST_NAME] = string_heap_insert_mstring (&assembly->sheap, rsrc->name);
values [MONO_MANIFEST_IMPLEMENTATION] = implementation;
table->next_idx++;
}
static void
assembly_add_resource (MonoReflectionModuleBuilder *mb, MonoDynamicImage *assembly, MonoReflectionResource *rsrc)
{
MonoDynamicTable *table;
guint32 *values;
char blob_size [6];
guchar hash [20];
char *b = blob_size;
char *name, *sname;
guint32 idx, offset;
if (rsrc->filename) {
name = mono_string_to_utf8 (rsrc->filename);
sname = g_path_get_basename (name);
table = &assembly->tables [MONO_TABLE_FILE];
table->rows++;
alloc_table (table, table->rows);
values = table->values + table->next_idx * MONO_FILE_SIZE;
values [MONO_FILE_FLAGS] = FILE_CONTAINS_NO_METADATA;
values [MONO_FILE_NAME] = string_heap_insert (&assembly->sheap, sname);
g_free (sname);
mono_sha1_get_digest_from_file (name, hash);
mono_metadata_encode_value (20, b, &b);
values [MONO_FILE_HASH_VALUE] = mono_image_add_stream_data (&assembly->blob, blob_size, b-blob_size);
mono_image_add_stream_data (&assembly->blob, (char*)hash, 20);
g_free (name);
idx = table->next_idx++;
rsrc->offset = 0;
idx = MONO_IMPLEMENTATION_FILE | (idx << MONO_IMPLEMENTATION_BITS);
} else {
char sizebuf [4];
char *data;
guint len;
if (rsrc->data) {
data = mono_array_addr (rsrc->data, char, 0);
len = mono_array_length (rsrc->data);
} else {
data = NULL;
len = 0;
}
offset = len;
sizebuf [0] = offset; sizebuf [1] = offset >> 8;
sizebuf [2] = offset >> 16; sizebuf [3] = offset >> 24;
rsrc->offset = mono_image_add_stream_data (&assembly->resources, sizebuf, 4);
mono_image_add_stream_data (&assembly->resources, data, len);
if (!mb->is_main)
/*
* The entry should be emitted into the MANIFESTRESOURCE table of
* the main module, but that needs to reference the FILE table
* which isn't emitted yet.
*/
return;
else
idx = 0;
}
assembly_add_resource_manifest (mb, assembly, rsrc, idx);
}
static void
set_version_from_string (MonoString *version, guint32 *values)
{
gchar *ver, *p, *str;
guint32 i;
values [MONO_ASSEMBLY_MAJOR_VERSION] = 0;
values [MONO_ASSEMBLY_MINOR_VERSION] = 0;
values [MONO_ASSEMBLY_REV_NUMBER] = 0;
values [MONO_ASSEMBLY_BUILD_NUMBER] = 0;
if (!version)
return;
ver = str = mono_string_to_utf8 (version);
for (i = 0; i < 4; ++i) {
values [MONO_ASSEMBLY_MAJOR_VERSION + i] = strtol (ver, &p, 10);
switch (*p) {
case '.':
p++;
break;
case '*':
/* handle Revision and Build */
p++;
break;
}
ver = p;
}
g_free (str);
}
static guint32
load_public_key (MonoArray *pkey, MonoDynamicImage *assembly) {
gsize len;
guint32 token = 0;
char blob_size [6];
char *b = blob_size;
if (!pkey)
return token;
len = mono_array_length (pkey);
mono_metadata_encode_value (len, b, &b);
token = mono_image_add_stream_data (&assembly->blob, blob_size, b - blob_size);
mono_image_add_stream_data (&assembly->blob, mono_array_addr (pkey, char, 0), len);
assembly->public_key = g_malloc (len);
memcpy (assembly->public_key, mono_array_addr (pkey, char, 0), len);
assembly->public_key_len = len;
/* Special case: check for ECMA key (16 bytes) */
if ((len == MONO_ECMA_KEY_LENGTH) && mono_is_ecma_key (mono_array_addr (pkey, char, 0), len)) {
/* In this case we must reserve 128 bytes (1024 bits) for the signature */
assembly->strong_name_size = MONO_DEFAULT_PUBLIC_KEY_LENGTH;
} else if (len >= MONO_PUBLIC_KEY_HEADER_LENGTH + MONO_MINIMUM_PUBLIC_KEY_LENGTH) {
/* minimum key size (in 2.0) is 384 bits */
assembly->strong_name_size = len - MONO_PUBLIC_KEY_HEADER_LENGTH;
} else {
/* FIXME - verifier */
g_warning ("Invalid public key length: %d bits (total: %d)", (int)MONO_PUBLIC_KEY_BIT_SIZE (len), (int)len);
assembly->strong_name_size = MONO_DEFAULT_PUBLIC_KEY_LENGTH; /* to be safe */
}
assembly->strong_name = g_malloc0 (assembly->strong_name_size);
return token;
}
static void
mono_image_emit_manifest (MonoReflectionModuleBuilder *moduleb)
{
MonoDynamicTable *table;
MonoDynamicImage *assembly;
MonoReflectionAssemblyBuilder *assemblyb;
MonoDomain *domain;
guint32 *values;
int i;
guint32 module_index;
assemblyb = moduleb->assemblyb;
assembly = moduleb->dynamic_image;
domain = mono_object_domain (assemblyb);
/* Emit ASSEMBLY table */
table = &assembly->tables [MONO_TABLE_ASSEMBLY];
alloc_table (table, 1);
values = table->values + MONO_ASSEMBLY_SIZE;
values [MONO_ASSEMBLY_HASH_ALG] = assemblyb->algid? assemblyb->algid: ASSEMBLY_HASH_SHA1;
values [MONO_ASSEMBLY_NAME] = string_heap_insert_mstring (&assembly->sheap, assemblyb->name);
if (assemblyb->culture) {
values [MONO_ASSEMBLY_CULTURE] = string_heap_insert_mstring (&assembly->sheap, assemblyb->culture);
} else {
values [MONO_ASSEMBLY_CULTURE] = string_heap_insert (&assembly->sheap, "");
}
values [MONO_ASSEMBLY_PUBLIC_KEY] = load_public_key (assemblyb->public_key, assembly);
values [MONO_ASSEMBLY_FLAGS] = assemblyb->flags;
set_version_from_string (assemblyb->version, values);
/* Emit FILE + EXPORTED_TYPE table */
module_index = 0;
for (i = 0; i < mono_array_length (assemblyb->modules); ++i) {
int j;
MonoReflectionModuleBuilder *file_module =
mono_array_get (assemblyb->modules, MonoReflectionModuleBuilder*, i);
if (file_module != moduleb) {
mono_image_fill_file_table (domain, (MonoReflectionModule*)file_module, assembly);
module_index ++;
if (file_module->types) {
for (j = 0; j < file_module->num_types; ++j) {
MonoReflectionTypeBuilder *tb = mono_array_get (file_module->types, MonoReflectionTypeBuilder*, j);
mono_image_fill_export_table (domain, tb, module_index, 0, assembly);
}
}
}
}
if (assemblyb->loaded_modules) {
for (i = 0; i < mono_array_length (assemblyb->loaded_modules); ++i) {
MonoReflectionModule *file_module =
mono_array_get (assemblyb->loaded_modules, MonoReflectionModule*, i);
mono_image_fill_file_table (domain, file_module, assembly);
module_index ++;
mono_image_fill_export_table_from_module (domain, file_module, module_index, assembly);
}
}
if (assemblyb->type_forwarders)
mono_image_fill_export_table_from_type_forwarders (assemblyb, assembly);
/* Emit MANIFESTRESOURCE table */
module_index = 0;
for (i = 0; i < mono_array_length (assemblyb->modules); ++i) {
int j;
MonoReflectionModuleBuilder *file_module =
mono_array_get (assemblyb->modules, MonoReflectionModuleBuilder*, i);
/* The table for the main module is emitted later */
if (file_module != moduleb) {
module_index ++;
if (file_module->resources) {
int len = mono_array_length (file_module->resources);
for (j = 0; j < len; ++j) {
MonoReflectionResource* res = (MonoReflectionResource*)mono_array_addr (file_module->resources, MonoReflectionResource, j);
assembly_add_resource_manifest (file_module, assembly, res, MONO_IMPLEMENTATION_FILE | (module_index << MONO_IMPLEMENTATION_BITS));
}
}
}
}
}
#ifndef DISABLE_REFLECTION_EMIT_SAVE
/*
* mono_image_build_metadata() will fill the info in all the needed metadata tables
* for the modulebuilder @moduleb.
* At the end of the process, method and field tokens are fixed up and the
* on-disk compressed metadata representation is created.
*/
void
mono_image_build_metadata (MonoReflectionModuleBuilder *moduleb)
{
MonoDynamicTable *table;
MonoDynamicImage *assembly;
MonoReflectionAssemblyBuilder *assemblyb;
MonoDomain *domain;
GPtrArray *types;
guint32 *values;
int i, j;
assemblyb = moduleb->assemblyb;
assembly = moduleb->dynamic_image;
domain = mono_object_domain (assemblyb);
if (assembly->text_rva)
return;
assembly->text_rva = START_TEXT_RVA;
if (moduleb->is_main) {
mono_image_emit_manifest (moduleb);
}
table = &assembly->tables [MONO_TABLE_TYPEDEF];
table->rows = 1; /* .<Module> */
table->next_idx++;
alloc_table (table, table->rows);
/*
* Set the first entry.
*/
values = table->values + table->columns;
values [MONO_TYPEDEF_FLAGS] = 0;
values [MONO_TYPEDEF_NAME] = string_heap_insert (&assembly->sheap, "<Module>") ;
values [MONO_TYPEDEF_NAMESPACE] = string_heap_insert (&assembly->sheap, "") ;
values [MONO_TYPEDEF_EXTENDS] = 0;
values [MONO_TYPEDEF_FIELD_LIST] = 1;
values [MONO_TYPEDEF_METHOD_LIST] = 1;
/*
* handle global methods
* FIXME: test what to do when global methods are defined in multiple modules.
*/
if (moduleb->global_methods) {
table = &assembly->tables [MONO_TABLE_METHOD];
table->rows += mono_array_length (moduleb->global_methods);
alloc_table (table, table->rows);
for (i = 0; i < mono_array_length (moduleb->global_methods); ++i)
mono_image_get_method_info (
mono_array_get (moduleb->global_methods, MonoReflectionMethodBuilder*, i), assembly);
}
if (moduleb->global_fields) {
table = &assembly->tables [MONO_TABLE_FIELD];
table->rows += mono_array_length (moduleb->global_fields);
alloc_table (table, table->rows);
for (i = 0; i < mono_array_length (moduleb->global_fields); ++i)
mono_image_get_field_info (
mono_array_get (moduleb->global_fields, MonoReflectionFieldBuilder*, i), assembly);
}
table = &assembly->tables [MONO_TABLE_MODULE];
alloc_table (table, 1);
mono_image_fill_module_table (domain, moduleb, assembly);
/* Collect all types into a list sorted by their table_idx */
types = g_ptr_array_new ();
if (moduleb->types)
for (i = 0; i < moduleb->num_types; ++i) {
MonoReflectionTypeBuilder *type = mono_array_get (moduleb->types, MonoReflectionTypeBuilder*, i);
collect_types (types, type);
}
g_ptr_array_sort (types, (GCompareFunc)compare_types_by_table_idx);
table = &assembly->tables [MONO_TABLE_TYPEDEF];
table->rows += types->len;
alloc_table (table, table->rows);
/*
* Emit type names + namespaces at one place inside the string heap,
* so load_class_names () needs to touch fewer pages.
*/
for (i = 0; i < types->len; ++i) {
MonoReflectionTypeBuilder *tb = g_ptr_array_index (types, i);
string_heap_insert_mstring (&assembly->sheap, tb->nspace);
}
for (i = 0; i < types->len; ++i) {
MonoReflectionTypeBuilder *tb = g_ptr_array_index (types, i);
string_heap_insert_mstring (&assembly->sheap, tb->name);
}
for (i = 0; i < types->len; ++i) {
MonoReflectionTypeBuilder *type = g_ptr_array_index (types, i);
mono_image_get_type_info (domain, type, assembly);
}
/*
* table->rows is already set above and in mono_image_fill_module_table.
*/
/* add all the custom attributes at the end, once all the indexes are stable */
mono_image_add_cattrs (assembly, 1, MONO_CUSTOM_ATTR_ASSEMBLY, assemblyb->cattrs);
/* CAS assembly permissions */
if (assemblyb->permissions_minimum)
mono_image_add_decl_security (assembly, mono_metadata_make_token (MONO_TABLE_ASSEMBLY, 1), assemblyb->permissions_minimum);
if (assemblyb->permissions_optional)
mono_image_add_decl_security (assembly, mono_metadata_make_token (MONO_TABLE_ASSEMBLY, 1), assemblyb->permissions_optional);
if (assemblyb->permissions_refused)
mono_image_add_decl_security (assembly, mono_metadata_make_token (MONO_TABLE_ASSEMBLY, 1), assemblyb->permissions_refused);
module_add_cattrs (assembly, moduleb);
/* fixup tokens */
mono_g_hash_table_foreach (assembly->token_fixups, (GHFunc)fixup_method, assembly);
/* Create the MethodImpl table. We do this after emitting all methods so we already know
* the final tokens and don't need another fixup pass. */
if (moduleb->global_methods) {
for (i = 0; i < mono_array_length (moduleb->global_methods); ++i) {
MonoReflectionMethodBuilder *mb = mono_array_get (
moduleb->global_methods, MonoReflectionMethodBuilder*, i);
mono_image_add_methodimpl (assembly, mb);
}
}
for (i = 0; i < types->len; ++i) {
MonoReflectionTypeBuilder *type = g_ptr_array_index (types, i);
if (type->methods) {
for (j = 0; j < type->num_methods; ++j) {
MonoReflectionMethodBuilder *mb = mono_array_get (
type->methods, MonoReflectionMethodBuilder*, j);
mono_image_add_methodimpl (assembly, mb);
}
}
}
g_ptr_array_free (types, TRUE);
fixup_cattrs (assembly);
}
#else /* DISABLE_REFLECTION_EMIT_SAVE */
void
mono_image_build_metadata (MonoReflectionModuleBuilder *moduleb)
{
g_error ("This mono runtime was configured with --enable-minimal=reflection_emit_save, so saving of dynamic assemblies is not supported.");
}
#endif /* DISABLE_REFLECTION_EMIT_SAVE */
typedef struct {
guint32 import_lookup_table;
guint32 timestamp;
guint32 forwarder;
guint32 name_rva;
guint32 import_address_table_rva;
} MonoIDT;
typedef struct {
guint32 name_rva;
guint32 flags;
} MonoILT;
#ifndef DISABLE_REFLECTION_EMIT
/*
* mono_image_insert_string:
* @module: module builder object
* @str: a string
*
* Insert @str into the user string stream of @module.
*/
guint32
mono_image_insert_string (MonoReflectionModuleBuilder *module, MonoString *str)
{
MonoDynamicImage *assembly;
guint32 idx;
char buf [16];
char *b = buf;
MONO_ARCH_SAVE_REGS;
if (!module->dynamic_image)
mono_image_module_basic_init (module);
assembly = module->dynamic_image;
if (assembly->save) {
mono_metadata_encode_value (1 | (str->length * 2), b, &b);
idx = mono_image_add_stream_data (&assembly->us, buf, b-buf);
#if G_BYTE_ORDER != G_LITTLE_ENDIAN
{
char *swapped = g_malloc (2 * mono_string_length (str));
const char *p = (const char*)mono_string_chars (str);
swap_with_size (swapped, p, 2, mono_string_length (str));
mono_image_add_stream_data (&assembly->us, swapped, str->length * 2);
g_free (swapped);
}
#else
mono_image_add_stream_data (&assembly->us, (const char*)mono_string_chars (str), str->length * 2);
#endif
mono_image_add_stream_data (&assembly->us, "", 1);
} else {
idx = assembly->us.index ++;
}
mono_g_hash_table_insert (assembly->tokens, GUINT_TO_POINTER (MONO_TOKEN_STRING | idx), str);
return MONO_TOKEN_STRING | idx;
}
guint32
mono_image_create_method_token (MonoDynamicImage *assembly, MonoObject *obj, MonoArray *opt_param_types)
{
MonoClass *klass;
guint32 token = 0;
klass = obj->vtable->klass;
if (strcmp (klass->name, "MonoMethod") == 0) {
MonoMethod *method = ((MonoReflectionMethod *)obj)->method;
MonoMethodSignature *sig, *old;
guint32 sig_token, parent;
int nargs, i;
g_assert (opt_param_types && (mono_method_signature (method)->sentinelpos >= 0));
nargs = mono_array_length (opt_param_types);
old = mono_method_signature (method);
sig = mono_metadata_signature_alloc ( &assembly->image, old->param_count + nargs);
sig->hasthis = old->hasthis;
sig->explicit_this = old->explicit_this;
sig->call_convention = old->call_convention;
sig->generic_param_count = old->generic_param_count;
sig->param_count = old->param_count + nargs;
sig->sentinelpos = old->param_count;
sig->ret = old->ret;
for (i = 0; i < old->param_count; i++)
sig->params [i] = old->params [i];
for (i = 0; i < nargs; i++) {
MonoReflectionType *rt = mono_array_get (opt_param_types, MonoReflectionType *, i);
sig->params [old->param_count + i] = mono_reflection_type_get_handle (rt);
}
parent = mono_image_typedef_or_ref (assembly, &method->klass->byval_arg);
g_assert ((parent & MONO_TYPEDEFORREF_MASK) == MONO_MEMBERREF_PARENT_TYPEREF);
parent >>= MONO_TYPEDEFORREF_BITS;
parent <<= MONO_MEMBERREF_PARENT_BITS;
parent |= MONO_MEMBERREF_PARENT_TYPEREF;
sig_token = method_encode_signature (assembly, sig);
token = mono_image_get_varargs_method_token (assembly, parent, method->name, sig_token);
} else if (strcmp (klass->name, "MethodBuilder") == 0) {
MonoReflectionMethodBuilder *mb = (MonoReflectionMethodBuilder *)obj;
ReflectionMethodBuilder rmb;
guint32 parent, sig;
char *name;
reflection_methodbuilder_from_method_builder (&rmb, mb);
rmb.opt_types = opt_param_types;
sig = method_builder_encode_signature (assembly, &rmb);
parent = mono_image_create_token (assembly, obj, TRUE, TRUE);
g_assert (mono_metadata_token_table (parent) == MONO_TABLE_METHOD);
parent = mono_metadata_token_index (parent) << MONO_MEMBERREF_PARENT_BITS;
parent |= MONO_MEMBERREF_PARENT_METHODDEF;
name = mono_string_to_utf8 (rmb.name);
token = mono_image_get_varargs_method_token (
assembly, parent, name, sig);
g_free (name);
} else {
g_error ("requested method token for %s\n", klass->name);
}
return token;
}
/*
* mono_image_create_token:
* @assembly: a dynamic assembly
* @obj:
* @register_token: Whenever to register the token in the assembly->tokens hash.
*
* Get a token to insert in the IL code stream for the given MemberInfo.
* The metadata emission routines need to pass FALSE as REGISTER_TOKEN, since by that time,
* the table_idx-es were recomputed, so registering the token would overwrite an existing
* entry.
*/
guint32
mono_image_create_token (MonoDynamicImage *assembly, MonoObject *obj,
gboolean create_methodspec, gboolean register_token)
{
MonoClass *klass;
guint32 token = 0;
klass = obj->vtable->klass;
/* Check for user defined reflection objects */
/* TypeDelegator is the only corlib type which doesn't look like a MonoReflectionType */
if (klass->image != mono_defaults.corlib || (strcmp (klass->name, "TypeDelegator") == 0))
mono_raise_exception (mono_get_exception_not_supported ("User defined subclasses of System.Type are not yet supported")); \
if (strcmp (klass->name, "MethodBuilder") == 0) {
MonoReflectionMethodBuilder *mb = (MonoReflectionMethodBuilder *)obj;
MonoReflectionTypeBuilder *tb = (MonoReflectionTypeBuilder*)mb->type;
if (tb->module->dynamic_image == assembly && !tb->generic_params && !mb->generic_params)
token = mb->table_idx | MONO_TOKEN_METHOD_DEF;
else
token = mono_image_get_methodbuilder_token (assembly, mb, create_methodspec);
/*g_print ("got token 0x%08x for %s\n", token, mono_string_to_utf8 (mb->name));*/
} else if (strcmp (klass->name, "ConstructorBuilder") == 0) {
MonoReflectionCtorBuilder *mb = (MonoReflectionCtorBuilder *)obj;
MonoReflectionTypeBuilder *tb = (MonoReflectionTypeBuilder*)mb->type;
if (tb->module->dynamic_image == assembly && !tb->generic_params)
token = mb->table_idx | MONO_TOKEN_METHOD_DEF;
else
token = mono_image_get_ctorbuilder_token (assembly, mb);
/*g_print ("got token 0x%08x for %s\n", token, mono_string_to_utf8 (mb->name));*/
} else if (strcmp (klass->name, "FieldBuilder") == 0) {
MonoReflectionFieldBuilder *fb = (MonoReflectionFieldBuilder *)obj;
MonoReflectionTypeBuilder *tb = (MonoReflectionTypeBuilder *)fb->typeb;
if (tb->generic_params) {
token = mono_image_get_generic_field_token (assembly, fb);
} else {
token = fb->table_idx | MONO_TOKEN_FIELD_DEF;
}
} else if (strcmp (klass->name, "TypeBuilder") == 0) {
MonoReflectionTypeBuilder *tb = (MonoReflectionTypeBuilder *)obj;
token = tb->table_idx | MONO_TOKEN_TYPE_DEF;
} else if (strcmp (klass->name, "MonoType") == 0) {
MonoType *type = mono_reflection_type_get_handle ((MonoReflectionType *)obj);
MonoClass *mc = mono_class_from_mono_type (type);
token = mono_metadata_token_from_dor (
mono_image_typedef_or_ref_full (assembly, type, mc->generic_container == NULL));
} else if (strcmp (klass->name, "GenericTypeParameterBuilder") == 0) {
MonoType *type = mono_reflection_type_get_handle ((MonoReflectionType *)obj);
token = mono_metadata_token_from_dor (
mono_image_typedef_or_ref (assembly, type));
} else if (strcmp (klass->name, "MonoGenericClass") == 0) {
MonoType *type = mono_reflection_type_get_handle ((MonoReflectionType *)obj);
token = mono_metadata_token_from_dor (
mono_image_typedef_or_ref (assembly, type));
} else if (strcmp (klass->name, "MonoCMethod") == 0 ||
strcmp (klass->name, "MonoMethod") == 0 ||
strcmp (klass->name, "MonoGenericMethod") == 0 ||
strcmp (klass->name, "MonoGenericCMethod") == 0) {
MonoReflectionMethod *m = (MonoReflectionMethod *)obj;
if (m->method->is_inflated) {
if (create_methodspec)
token = mono_image_get_methodspec_token (assembly, m->method);
else
token = mono_image_get_inflated_method_token (assembly, m->method);
} else if ((m->method->klass->image == &assembly->image) &&
!m->method->klass->generic_class) {
static guint32 method_table_idx = 0xffffff;
if (m->method->klass->wastypebuilder) {
/* we use the same token as the one that was assigned
* to the Methodbuilder.
* FIXME: do the equivalent for Fields.
*/
token = m->method->token;
} else {
/*
* Each token should have a unique index, but the indexes are
* assigned by managed code, so we don't know about them. An
* easy solution is to count backwards...
*/
method_table_idx --;
token = MONO_TOKEN_METHOD_DEF | method_table_idx;
}
} else {
token = mono_image_get_methodref_token (assembly, m->method, create_methodspec);
}
/*g_print ("got token 0x%08x for %s\n", token, m->method->name);*/
} else if (strcmp (klass->name, "MonoField") == 0) {
MonoReflectionField *f = (MonoReflectionField *)obj;
if ((f->field->parent->image == &assembly->image) && !is_field_on_inst (f->field)) {
static guint32 field_table_idx = 0xffffff;
field_table_idx --;
token = MONO_TOKEN_FIELD_DEF | field_table_idx;
} else {
token = mono_image_get_fieldref_token (assembly, f);
}
/*g_print ("got token 0x%08x for %s\n", token, f->field->name);*/
} else if (strcmp (klass->name, "MonoArrayMethod") == 0) {
MonoReflectionArrayMethod *m = (MonoReflectionArrayMethod *)obj;
token = mono_image_get_array_token (assembly, m);
} else if (strcmp (klass->name, "SignatureHelper") == 0) {
MonoReflectionSigHelper *s = (MonoReflectionSigHelper*)obj;
token = MONO_TOKEN_SIGNATURE | mono_image_get_sighelper_token (assembly, s);
} else if (strcmp (klass->name, "EnumBuilder") == 0) {
MonoType *type = mono_reflection_type_get_handle ((MonoReflectionType *)obj);
token = mono_metadata_token_from_dor (
mono_image_typedef_or_ref (assembly, type));
} else if (strcmp (klass->name, "FieldOnTypeBuilderInst") == 0) {
MonoReflectionFieldOnTypeBuilderInst *f = (MonoReflectionFieldOnTypeBuilderInst*)obj;
token = mono_image_get_field_on_inst_token (assembly, f);
} else if (strcmp (klass->name, "ConstructorOnTypeBuilderInst") == 0) {
MonoReflectionCtorOnTypeBuilderInst *c = (MonoReflectionCtorOnTypeBuilderInst*)obj;
token = mono_image_get_ctor_on_inst_token (assembly, c, create_methodspec);
} else if (strcmp (klass->name, "MethodOnTypeBuilderInst") == 0) {
MonoReflectionMethodOnTypeBuilderInst *m = (MonoReflectionMethodOnTypeBuilderInst*)obj;
token = mono_image_get_method_on_inst_token (assembly, m, create_methodspec);
} else if (is_sre_array (klass) || is_sre_byref (klass) || is_sre_pointer (klass)) {
MonoReflectionType *type = (MonoReflectionType *)obj;
token = mono_metadata_token_from_dor (
mono_image_typedef_or_ref (assembly, mono_reflection_type_get_handle (type)));
} else {
g_error ("requested token for %s\n", klass->name);
}
if (register_token)
mono_image_register_token (assembly, token, obj);
return token;
}
/*
* mono_image_register_token:
*
* Register the TOKEN->OBJ mapping in the mapping table in ASSEMBLY. This is required for
* the Module.ResolveXXXToken () methods to work.
*/
void
mono_image_register_token (MonoDynamicImage *assembly, guint32 token, MonoObject *obj)
{
MonoObject *prev = mono_g_hash_table_lookup (assembly->tokens, GUINT_TO_POINTER (token));
if (prev) {
/* There could be multiple MethodInfo objects with the same token */
//g_assert (prev == obj);
} else {
mono_g_hash_table_insert (assembly->tokens, GUINT_TO_POINTER (token), obj);
}
}
static MonoDynamicImage*
create_dynamic_mono_image (MonoDynamicAssembly *assembly, char *assembly_name, char *module_name)
{
static const guchar entrycode [16] = {0xff, 0x25, 0};
MonoDynamicImage *image;
int i;
const char *version;
if (!strcmp (mono_get_runtime_info ()->framework_version, "2.1"))
version = "v2.0.50727"; /* HACK: SL 2 enforces the .net 2 metadata version */
else
version = mono_get_runtime_info ()->runtime_version;
#if HAVE_BOEHM_GC
image = GC_MALLOC (sizeof (MonoDynamicImage));
#else
image = g_new0 (MonoDynamicImage, 1);
#endif
mono_profiler_module_event (&image->image, MONO_PROFILE_START_LOAD);
/*g_print ("created image %p\n", image);*/
/* keep in sync with image.c */
image->image.name = assembly_name;
image->image.assembly_name = image->image.name; /* they may be different */
image->image.module_name = module_name;
image->image.version = g_strdup (version);
image->image.md_version_major = 1;
image->image.md_version_minor = 1;
image->image.dynamic = TRUE;
image->image.references = g_new0 (MonoAssembly*, 1);
image->image.references [0] = NULL;
mono_image_init (&image->image);
image->token_fixups = mono_g_hash_table_new_type ((GHashFunc)mono_object_hash, NULL, MONO_HASH_KEY_GC);
image->method_to_table_idx = g_hash_table_new (NULL, NULL);
image->field_to_table_idx = g_hash_table_new (NULL, NULL);
image->method_aux_hash = g_hash_table_new (NULL, NULL);
image->handleref = g_hash_table_new (NULL, NULL);
image->tokens = mono_g_hash_table_new_type (NULL, NULL, MONO_HASH_VALUE_GC);
image->generic_def_objects = mono_g_hash_table_new_type (NULL, NULL, MONO_HASH_VALUE_GC);
image->methodspec = mono_g_hash_table_new_type ((GHashFunc)mono_object_hash, NULL, MONO_HASH_KEY_GC);
image->typespec = g_hash_table_new ((GHashFunc)mono_metadata_type_hash, (GCompareFunc)mono_metadata_type_equal);
image->typeref = g_hash_table_new ((GHashFunc)mono_metadata_type_hash, (GCompareFunc)mono_metadata_type_equal);
image->blob_cache = g_hash_table_new ((GHashFunc)mono_blob_entry_hash, (GCompareFunc)mono_blob_entry_equal);
image->gen_params = g_ptr_array_new ();
/*g_print ("string heap create for image %p (%s)\n", image, module_name);*/
string_heap_init (&image->sheap);
mono_image_add_stream_data (&image->us, "", 1);
add_to_blob_cached (image, (char*) "", 1, NULL, 0);
/* import tables... */
mono_image_add_stream_data (&image->code, (char*)entrycode, sizeof (entrycode));
image->iat_offset = mono_image_add_stream_zero (&image->code, 8); /* two IAT entries */
image->idt_offset = mono_image_add_stream_zero (&image->code, 2 * sizeof (MonoIDT)); /* two IDT entries */
image->imp_names_offset = mono_image_add_stream_zero (&image->code, 2); /* flags for name entry */
mono_image_add_stream_data (&image->code, "_CorExeMain", 12);
mono_image_add_stream_data (&image->code, "mscoree.dll", 12);
image->ilt_offset = mono_image_add_stream_zero (&image->code, 8); /* two ILT entries */
stream_data_align (&image->code);
image->cli_header_offset = mono_image_add_stream_zero (&image->code, sizeof (MonoCLIHeader));
for (i=0; i < MONO_TABLE_NUM; ++i) {
image->tables [i].next_idx = 1;
image->tables [i].columns = table_sizes [i];
}
image->image.assembly = (MonoAssembly*)assembly;
image->run = assembly->run;
image->save = assembly->save;
image->pe_kind = 0x1; /* ILOnly */
image->machine = 0x14c; /* I386 */
mono_profiler_module_loaded (&image->image, MONO_PROFILE_OK);
return image;
}
#endif
static void
free_blob_cache_entry (gpointer key, gpointer val, gpointer user_data)
{
g_free (key);
}
void
mono_dynamic_image_free (MonoDynamicImage *image)
{
MonoDynamicImage *di = image;
GList *list;
int i;
if (di->methodspec)
mono_g_hash_table_destroy (di->methodspec);
if (di->typespec)
g_hash_table_destroy (di->typespec);
if (di->typeref)
g_hash_table_destroy (di->typeref);
if (di->handleref)
g_hash_table_destroy (di->handleref);
if (di->tokens)
mono_g_hash_table_destroy (di->tokens);
if (di->generic_def_objects)
mono_g_hash_table_destroy (di->generic_def_objects);
if (di->blob_cache) {
g_hash_table_foreach (di->blob_cache, free_blob_cache_entry, NULL);
g_hash_table_destroy (di->blob_cache);
}
if (di->standalonesig_cache)
g_hash_table_destroy (di->standalonesig_cache);
for (list = di->array_methods; list; list = list->next) {
ArrayMethod *am = (ArrayMethod *)list->data;
g_free (am->sig);
g_free (am->name);
g_free (am);
}
g_list_free (di->array_methods);
if (di->gen_params) {
for (i = 0; i < di->gen_params->len; i++) {
GenericParamTableEntry *entry = g_ptr_array_index (di->gen_params, i);
if (entry->gparam->type.type) {
MonoGenericParam *param = entry->gparam->type.type->data.generic_param;
g_free ((char*)mono_generic_param_info (param)->name);
g_free (param);
}
g_free (entry);
}
g_ptr_array_free (di->gen_params, TRUE);
}
if (di->token_fixups)
mono_g_hash_table_destroy (di->token_fixups);
if (di->method_to_table_idx)
g_hash_table_destroy (di->method_to_table_idx);
if (di->field_to_table_idx)
g_hash_table_destroy (di->field_to_table_idx);
if (di->method_aux_hash)
g_hash_table_destroy (di->method_aux_hash);
g_free (di->strong_name);
g_free (di->win32_res);
if (di->public_key)
g_free (di->public_key);
/*g_print ("string heap destroy for image %p\n", di);*/
mono_dynamic_stream_reset (&di->sheap);
mono_dynamic_stream_reset (&di->code);
mono_dynamic_stream_reset (&di->resources);
mono_dynamic_stream_reset (&di->us);
mono_dynamic_stream_reset (&di->blob);
mono_dynamic_stream_reset (&di->tstream);
mono_dynamic_stream_reset (&di->guid);
for (i = 0; i < MONO_TABLE_NUM; ++i) {
g_free (di->tables [i].values);
}
}
#ifndef DISABLE_REFLECTION_EMIT
/*
* mono_image_basic_init:
* @assembly: an assembly builder object
*
* Create the MonoImage that represents the assembly builder and setup some
* of the helper hash table and the basic metadata streams.
*/
void
mono_image_basic_init (MonoReflectionAssemblyBuilder *assemblyb)
{
MonoDynamicAssembly *assembly;
MonoDynamicImage *image;
MonoDomain *domain = mono_object_domain (assemblyb);
MONO_ARCH_SAVE_REGS;
if (assemblyb->dynamic_assembly)
return;
#if HAVE_BOEHM_GC
assembly = assemblyb->dynamic_assembly = GC_MALLOC (sizeof (MonoDynamicAssembly));
#else
assembly = assemblyb->dynamic_assembly = g_new0 (MonoDynamicAssembly, 1);
#endif
mono_profiler_assembly_event (&assembly->assembly, MONO_PROFILE_START_LOAD);
assembly->assembly.ref_count = 1;
assembly->assembly.dynamic = TRUE;
assembly->assembly.corlib_internal = assemblyb->corlib_internal;
assemblyb->assembly.assembly = (MonoAssembly*)assembly;
assembly->assembly.basedir = mono_string_to_utf8 (assemblyb->dir);
if (assemblyb->culture)
assembly->assembly.aname.culture = mono_string_to_utf8 (assemblyb->culture);
else
assembly->assembly.aname.culture = g_strdup ("");
if (assemblyb->version) {
char *vstr = mono_string_to_utf8 (assemblyb->version);
char **version = g_strsplit (vstr, ".", 4);
char **parts = version;
assembly->assembly.aname.major = atoi (*parts++);
assembly->assembly.aname.minor = atoi (*parts++);
assembly->assembly.aname.build = *parts != NULL ? atoi (*parts++) : 0;
assembly->assembly.aname.revision = *parts != NULL ? atoi (*parts) : 0;
g_strfreev (version);
g_free (vstr);
} else {
assembly->assembly.aname.major = 0;
assembly->assembly.aname.minor = 0;
assembly->assembly.aname.build = 0;
assembly->assembly.aname.revision = 0;
}
assembly->run = assemblyb->access != 2;
assembly->save = assemblyb->access != 1;
assembly->domain = domain;
image = create_dynamic_mono_image (assembly, mono_string_to_utf8 (assemblyb->name), g_strdup ("RefEmit_YouForgotToDefineAModule"));
image->initial_image = TRUE;
assembly->assembly.aname.name = image->image.name;
assembly->assembly.image = &image->image;
if (assemblyb->pktoken && assemblyb->pktoken->max_length) {
/* -1 to correct for the trailing NULL byte */
if (assemblyb->pktoken->max_length != MONO_PUBLIC_KEY_TOKEN_LENGTH - 1) {
g_error ("Public key token length invalid for assembly %s: %i", assembly->assembly.aname.name, assemblyb->pktoken->max_length);
}
memcpy (&assembly->assembly.aname.public_key_token, mono_array_addr (assemblyb->pktoken, guint8, 0), assemblyb->pktoken->max_length);
}
mono_domain_assemblies_lock (domain);
domain->domain_assemblies = g_slist_prepend (domain->domain_assemblies, assembly);
mono_domain_assemblies_unlock (domain);
register_assembly (mono_object_domain (assemblyb), &assemblyb->assembly, &assembly->assembly);
mono_profiler_assembly_loaded (&assembly->assembly, MONO_PROFILE_OK);
mono_assembly_invoke_load_hook ((MonoAssembly*)assembly);
}
#endif /* !DISABLE_REFLECTION_EMIT */
#ifndef DISABLE_REFLECTION_EMIT_SAVE
static int
calc_section_size (MonoDynamicImage *assembly)
{
int nsections = 0;
/* alignment constraints */
mono_image_add_stream_zero (&assembly->code, 4 - (assembly->code.index % 4));
g_assert ((assembly->code.index % 4) == 0);
assembly->meta_size += 3;
assembly->meta_size &= ~3;
mono_image_add_stream_zero (&assembly->resources, 4 - (assembly->resources.index % 4));
g_assert ((assembly->resources.index % 4) == 0);
assembly->sections [MONO_SECTION_TEXT].size = assembly->meta_size + assembly->code.index + assembly->resources.index + assembly->strong_name_size;
assembly->sections [MONO_SECTION_TEXT].attrs = SECT_FLAGS_HAS_CODE | SECT_FLAGS_MEM_EXECUTE | SECT_FLAGS_MEM_READ;
nsections++;
if (assembly->win32_res) {
guint32 res_size = (assembly->win32_res_size + 3) & ~3;
assembly->sections [MONO_SECTION_RSRC].size = res_size;
assembly->sections [MONO_SECTION_RSRC].attrs = SECT_FLAGS_HAS_INITIALIZED_DATA | SECT_FLAGS_MEM_READ;
nsections++;
}
assembly->sections [MONO_SECTION_RELOC].size = 12;
assembly->sections [MONO_SECTION_RELOC].attrs = SECT_FLAGS_MEM_READ | SECT_FLAGS_MEM_DISCARDABLE | SECT_FLAGS_HAS_INITIALIZED_DATA;
nsections++;
return nsections;
}
typedef struct {
guint32 id;
guint32 offset;
GSList *children;
MonoReflectionWin32Resource *win32_res; /* Only for leaf nodes */
} ResTreeNode;
static int
resource_tree_compare_by_id (gconstpointer a, gconstpointer b)
{
ResTreeNode *t1 = (ResTreeNode*)a;
ResTreeNode *t2 = (ResTreeNode*)b;
return t1->id - t2->id;
}
/*
* resource_tree_create:
*
* Organize the resources into a resource tree.
*/
static ResTreeNode *
resource_tree_create (MonoArray *win32_resources)
{
ResTreeNode *tree, *res_node, *type_node, *lang_node;
GSList *l;
int i;
tree = g_new0 (ResTreeNode, 1);
for (i = 0; i < mono_array_length (win32_resources); ++i) {
MonoReflectionWin32Resource *win32_res =
(MonoReflectionWin32Resource*)mono_array_addr (win32_resources, MonoReflectionWin32Resource, i);
/* Create node */
/* FIXME: BUG: this stores managed references in unmanaged memory */
lang_node = g_new0 (ResTreeNode, 1);
lang_node->id = win32_res->lang_id;
lang_node->win32_res = win32_res;
/* Create type node if neccesary */
type_node = NULL;
for (l = tree->children; l; l = l->next)
if (((ResTreeNode*)(l->data))->id == win32_res->res_type) {
type_node = (ResTreeNode*)l->data;
break;
}
if (!type_node) {
type_node = g_new0 (ResTreeNode, 1);
type_node->id = win32_res->res_type;
/*
* The resource types have to be sorted otherwise
* Windows Explorer can't display the version information.
*/
tree->children = g_slist_insert_sorted (tree->children,
type_node, resource_tree_compare_by_id);
}
/* Create res node if neccesary */
res_node = NULL;
for (l = type_node->children; l; l = l->next)
if (((ResTreeNode*)(l->data))->id == win32_res->res_id) {
res_node = (ResTreeNode*)l->data;
break;
}
if (!res_node) {
res_node = g_new0 (ResTreeNode, 1);
res_node->id = win32_res->res_id;
type_node->children = g_slist_append (type_node->children, res_node);
}
res_node->children = g_slist_append (res_node->children, lang_node);
}
return tree;
}
/*
* resource_tree_encode:
*
* Encode the resource tree into the format used in the PE file.
*/
static void
resource_tree_encode (ResTreeNode *node, char *begin, char *p, char **endbuf)
{
char *entries;
MonoPEResourceDir dir;
MonoPEResourceDirEntry dir_entry;
MonoPEResourceDataEntry data_entry;
GSList *l;
guint32 res_id_entries;
/*
* For the format of the resource directory, see the article
* "An In-Depth Look into the Win32 Portable Executable File Format" by
* Matt Pietrek
*/
memset (&dir, 0, sizeof (dir));
memset (&dir_entry, 0, sizeof (dir_entry));
memset (&data_entry, 0, sizeof (data_entry));
g_assert (sizeof (dir) == 16);
g_assert (sizeof (dir_entry) == 8);
g_assert (sizeof (data_entry) == 16);
node->offset = p - begin;
/* IMAGE_RESOURCE_DIRECTORY */
res_id_entries = g_slist_length (node->children);
dir.res_id_entries = GUINT16_TO_LE (res_id_entries);
memcpy (p, &dir, sizeof (dir));
p += sizeof (dir);
/* Reserve space for entries */
entries = p;
p += sizeof (dir_entry) * res_id_entries;
/* Write children */
for (l = node->children; l; l = l->next) {
ResTreeNode *child = (ResTreeNode*)l->data;
if (child->win32_res) {
guint32 size;
child->offset = p - begin;
/* IMAGE_RESOURCE_DATA_ENTRY */
data_entry.rde_data_offset = GUINT32_TO_LE (p - begin + sizeof (data_entry));
size = mono_array_length (child->win32_res->res_data);
data_entry.rde_size = GUINT32_TO_LE (size);
memcpy (p, &data_entry, sizeof (data_entry));
p += sizeof (data_entry);
memcpy (p, mono_array_addr (child->win32_res->res_data, char, 0), size);
p += size;
} else {
resource_tree_encode (child, begin, p, &p);
}
}
/* IMAGE_RESOURCE_ENTRY */
for (l = node->children; l; l = l->next) {
ResTreeNode *child = (ResTreeNode*)l->data;
MONO_PE_RES_DIR_ENTRY_SET_NAME (dir_entry, FALSE, child->id);
MONO_PE_RES_DIR_ENTRY_SET_DIR (dir_entry, !child->win32_res, child->offset);
memcpy (entries, &dir_entry, sizeof (dir_entry));
entries += sizeof (dir_entry);
}
*endbuf = p;
}
static void
resource_tree_free (ResTreeNode * node)
{
GSList * list;
for (list = node->children; list; list = list->next)
resource_tree_free ((ResTreeNode*)list->data);
g_slist_free(node->children);
g_free (node);
}
static void
assembly_add_win32_resources (MonoDynamicImage *assembly, MonoReflectionAssemblyBuilder *assemblyb)
{
char *buf;
char *p;
guint32 size, i;
MonoReflectionWin32Resource *win32_res;
ResTreeNode *tree;
if (!assemblyb->win32_resources)
return;
/*
* Resources are stored in a three level tree inside the PE file.
* - level one contains a node for each type of resource
* - level two contains a node for each resource
* - level three contains a node for each instance of a resource for a
* specific language.
*/
tree = resource_tree_create (assemblyb->win32_resources);
/* Estimate the size of the encoded tree */
size = 0;
for (i = 0; i < mono_array_length (assemblyb->win32_resources); ++i) {
win32_res = (MonoReflectionWin32Resource*)mono_array_addr (assemblyb->win32_resources, MonoReflectionWin32Resource, i);
size += mono_array_length (win32_res->res_data);
}
/* Directory structure */
size += mono_array_length (assemblyb->win32_resources) * 256;
p = buf = g_malloc (size);
resource_tree_encode (tree, p, p, &p);
g_assert (p - buf <= size);
assembly->win32_res = g_malloc (p - buf);
assembly->win32_res_size = p - buf;
memcpy (assembly->win32_res, buf, p - buf);
g_free (buf);
resource_tree_free (tree);
}
static void
fixup_resource_directory (char *res_section, char *p, guint32 rva)
{
MonoPEResourceDir *dir = (MonoPEResourceDir*)p;
int i;
p += sizeof (MonoPEResourceDir);
for (i = 0; i < GUINT16_FROM_LE (dir->res_named_entries) + GUINT16_FROM_LE (dir->res_id_entries); ++i) {
MonoPEResourceDirEntry *dir_entry = (MonoPEResourceDirEntry*)p;
char *child = res_section + MONO_PE_RES_DIR_ENTRY_DIR_OFFSET (*dir_entry);
if (MONO_PE_RES_DIR_ENTRY_IS_DIR (*dir_entry)) {
fixup_resource_directory (res_section, child, rva);
} else {
MonoPEResourceDataEntry *data_entry = (MonoPEResourceDataEntry*)child;
data_entry->rde_data_offset = GUINT32_TO_LE (GUINT32_FROM_LE (data_entry->rde_data_offset) + rva);
}
p += sizeof (MonoPEResourceDirEntry);
}
}
static void
checked_write_file (HANDLE f, gconstpointer buffer, guint32 numbytes)
{
guint32 dummy;
if (!WriteFile (f, buffer, numbytes, &dummy, NULL))
g_error ("WriteFile returned %d\n", GetLastError ());
}
/*
* mono_image_create_pefile:
* @mb: a module builder object
*
* This function creates the PE-COFF header, the image sections, the CLI header * etc. all the data is written in
* assembly->pefile where it can be easily retrieved later in chunks.
*/
void
mono_image_create_pefile (MonoReflectionModuleBuilder *mb, HANDLE file)
{
MonoMSDOSHeader *msdos;
MonoDotNetHeader *header;
MonoSectionTable *section;
MonoCLIHeader *cli_header;
guint32 size, image_size, virtual_base, text_offset;
guint32 header_start, section_start, file_offset, virtual_offset;
MonoDynamicImage *assembly;
MonoReflectionAssemblyBuilder *assemblyb;
MonoDynamicStream pefile_stream = {0};
MonoDynamicStream *pefile = &pefile_stream;
int i, nsections;
guint32 *rva, value;
guchar *p;
static const unsigned char msheader[] = {
0x4d, 0x5a, 0x90, 0x00, 0x03, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00,
0xb8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00,
0x0e, 0x1f, 0xba, 0x0e, 0x00, 0xb4, 0x09, 0xcd, 0x21, 0xb8, 0x01, 0x4c, 0xcd, 0x21, 0x54, 0x68,
0x69, 0x73, 0x20, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x20, 0x63, 0x61, 0x6e, 0x6e, 0x6f,
0x74, 0x20, 0x62, 0x65, 0x20, 0x72, 0x75, 0x6e, 0x20, 0x69, 0x6e, 0x20, 0x44, 0x4f, 0x53, 0x20,
0x6d, 0x6f, 0x64, 0x65, 0x2e, 0x0d, 0x0d, 0x0a, 0x24, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
};
assemblyb = mb->assemblyb;
mono_image_basic_init (assemblyb);
assembly = mb->dynamic_image;
assembly->pe_kind = assemblyb->pe_kind;
assembly->machine = assemblyb->machine;
((MonoDynamicImage*)assemblyb->dynamic_assembly->assembly.image)->pe_kind = assemblyb->pe_kind;
((MonoDynamicImage*)assemblyb->dynamic_assembly->assembly.image)->machine = assemblyb->machine;
mono_image_build_metadata (mb);
if (mb->is_main && assemblyb->resources) {
int len = mono_array_length (assemblyb->resources);
for (i = 0; i < len; ++i)
assembly_add_resource (mb, assembly, (MonoReflectionResource*)mono_array_addr (assemblyb->resources, MonoReflectionResource, i));
}
if (mb->resources) {
int len = mono_array_length (mb->resources);
for (i = 0; i < len; ++i)
assembly_add_resource (mb, assembly, (MonoReflectionResource*)mono_array_addr (mb->resources, MonoReflectionResource, i));
}
build_compressed_metadata (assembly);
if (mb->is_main)
assembly_add_win32_resources (assembly, assemblyb);
nsections = calc_section_size (assembly);
/* The DOS header and stub */
g_assert (sizeof (MonoMSDOSHeader) == sizeof (msheader));
mono_image_add_stream_data (pefile, (char*)msheader, sizeof (msheader));
/* the dotnet header */
header_start = mono_image_add_stream_zero (pefile, sizeof (MonoDotNetHeader));
/* the section tables */
section_start = mono_image_add_stream_zero (pefile, sizeof (MonoSectionTable) * nsections);
file_offset = section_start + sizeof (MonoSectionTable) * nsections;
virtual_offset = VIRT_ALIGN;
image_size = 0;
for (i = 0; i < MONO_SECTION_MAX; ++i) {
if (!assembly->sections [i].size)
continue;
/* align offsets */
file_offset += FILE_ALIGN - 1;
file_offset &= ~(FILE_ALIGN - 1);
virtual_offset += VIRT_ALIGN - 1;
virtual_offset &= ~(VIRT_ALIGN - 1);
assembly->sections [i].offset = file_offset;
assembly->sections [i].rva = virtual_offset;
file_offset += assembly->sections [i].size;
virtual_offset += assembly->sections [i].size;
image_size += (assembly->sections [i].size + VIRT_ALIGN - 1) & ~(VIRT_ALIGN - 1);
}
file_offset += FILE_ALIGN - 1;
file_offset &= ~(FILE_ALIGN - 1);
image_size += section_start + sizeof (MonoSectionTable) * nsections;
/* back-patch info */
msdos = (MonoMSDOSHeader*)pefile->data;
msdos->pe_offset = GUINT32_FROM_LE (sizeof (MonoMSDOSHeader));
header = (MonoDotNetHeader*)(pefile->data + header_start);
header->pesig [0] = 'P';
header->pesig [1] = 'E';
header->coff.coff_machine = GUINT16_FROM_LE (assemblyb->machine);
header->coff.coff_sections = GUINT16_FROM_LE (nsections);
header->coff.coff_time = GUINT32_FROM_LE (time (NULL));
header->coff.coff_opt_header_size = GUINT16_FROM_LE (sizeof (MonoDotNetHeader) - sizeof (MonoCOFFHeader) - 4);
if (assemblyb->pekind == 1) {
/* it's a dll */
header->coff.coff_attributes = GUINT16_FROM_LE (0x210e);
} else {
/* it's an exe */
header->coff.coff_attributes = GUINT16_FROM_LE (0x010e);
}
virtual_base = 0x400000; /* FIXME: 0x10000000 if a DLL */
header->pe.pe_magic = GUINT16_FROM_LE (0x10B);
header->pe.pe_major = 6;
header->pe.pe_minor = 0;
size = assembly->sections [MONO_SECTION_TEXT].size;
size += FILE_ALIGN - 1;
size &= ~(FILE_ALIGN - 1);
header->pe.pe_code_size = GUINT32_FROM_LE(size);
size = assembly->sections [MONO_SECTION_RSRC].size;
size += FILE_ALIGN - 1;
size &= ~(FILE_ALIGN - 1);
header->pe.pe_data_size = GUINT32_FROM_LE(size);
g_assert (START_TEXT_RVA == assembly->sections [MONO_SECTION_TEXT].rva);
header->pe.pe_rva_code_base = GUINT32_FROM_LE (assembly->sections [MONO_SECTION_TEXT].rva);
header->pe.pe_rva_data_base = GUINT32_FROM_LE (assembly->sections [MONO_SECTION_RSRC].rva);
/* pe_rva_entry_point always at the beginning of the text section */
header->pe.pe_rva_entry_point = GUINT32_FROM_LE (assembly->sections [MONO_SECTION_TEXT].rva);
header->nt.pe_image_base = GUINT32_FROM_LE (virtual_base);
header->nt.pe_section_align = GUINT32_FROM_LE (VIRT_ALIGN);
header->nt.pe_file_alignment = GUINT32_FROM_LE (FILE_ALIGN);
header->nt.pe_os_major = GUINT16_FROM_LE (4);
header->nt.pe_os_minor = GUINT16_FROM_LE (0);
header->nt.pe_subsys_major = GUINT16_FROM_LE (4);
size = section_start;
size += FILE_ALIGN - 1;
size &= ~(FILE_ALIGN - 1);
header->nt.pe_header_size = GUINT32_FROM_LE (size);
size = image_size;
size += VIRT_ALIGN - 1;
size &= ~(VIRT_ALIGN - 1);
header->nt.pe_image_size = GUINT32_FROM_LE (size);
/*
// Translate the PEFileKind value to the value expected by the Windows loader
*/
{
short kind;
/*
// PEFileKinds.Dll == 1
// PEFileKinds.ConsoleApplication == 2
// PEFileKinds.WindowApplication == 3
//
// need to get:
// IMAGE_SUBSYSTEM_WINDOWS_GUI 2 // Image runs in the Windows GUI subsystem.
// IMAGE_SUBSYSTEM_WINDOWS_CUI 3 // Image runs in the Windows character subsystem.
*/
if (assemblyb->pekind == 3)
kind = 2;
else
kind = 3;
header->nt.pe_subsys_required = GUINT16_FROM_LE (kind);
}
header->nt.pe_stack_reserve = GUINT32_FROM_LE (0x00100000);
header->nt.pe_stack_commit = GUINT32_FROM_LE (0x00001000);
header->nt.pe_heap_reserve = GUINT32_FROM_LE (0x00100000);
header->nt.pe_heap_commit = GUINT32_FROM_LE (0x00001000);
header->nt.pe_loader_flags = GUINT32_FROM_LE (0);
header->nt.pe_data_dir_count = GUINT32_FROM_LE (16);
/* fill data directory entries */
header->datadir.pe_resource_table.size = GUINT32_FROM_LE (assembly->sections [MONO_SECTION_RSRC].size);
header->datadir.pe_resource_table.rva = GUINT32_FROM_LE (assembly->sections [MONO_SECTION_RSRC].rva);
header->datadir.pe_reloc_table.size = GUINT32_FROM_LE (assembly->sections [MONO_SECTION_RELOC].size);
header->datadir.pe_reloc_table.rva = GUINT32_FROM_LE (assembly->sections [MONO_SECTION_RELOC].rva);
header->datadir.pe_cli_header.size = GUINT32_FROM_LE (72);
header->datadir.pe_cli_header.rva = GUINT32_FROM_LE (assembly->text_rva + assembly->cli_header_offset);
header->datadir.pe_iat.size = GUINT32_FROM_LE (8);
header->datadir.pe_iat.rva = GUINT32_FROM_LE (assembly->text_rva + assembly->iat_offset);
/* patch entrypoint name */
if (assemblyb->pekind == 1)
memcpy (assembly->code.data + assembly->imp_names_offset + 2, "_CorDllMain", 12);
else
memcpy (assembly->code.data + assembly->imp_names_offset + 2, "_CorExeMain", 12);
/* patch imported function RVA name */
rva = (guint32*)(assembly->code.data + assembly->iat_offset);
*rva = GUINT32_FROM_LE (assembly->text_rva + assembly->imp_names_offset);
/* the import table */
header->datadir.pe_import_table.size = GUINT32_FROM_LE (79); /* FIXME: magic number? */
header->datadir.pe_import_table.rva = GUINT32_FROM_LE (assembly->text_rva + assembly->idt_offset);
/* patch imported dll RVA name and other entries in the dir */
rva = (guint32*)(assembly->code.data + assembly->idt_offset + G_STRUCT_OFFSET (MonoIDT, name_rva));
*rva = GUINT32_FROM_LE (assembly->text_rva + assembly->imp_names_offset + 14); /* 14 is hint+strlen+1 of func name */
rva = (guint32*)(assembly->code.data + assembly->idt_offset + G_STRUCT_OFFSET (MonoIDT, import_address_table_rva));
*rva = GUINT32_FROM_LE (assembly->text_rva + assembly->iat_offset);
rva = (guint32*)(assembly->code.data + assembly->idt_offset + G_STRUCT_OFFSET (MonoIDT, import_lookup_table));
*rva = GUINT32_FROM_LE (assembly->text_rva + assembly->ilt_offset);
p = (guchar*)(assembly->code.data + assembly->ilt_offset);
value = (assembly->text_rva + assembly->imp_names_offset);
*p++ = (value) & 0xff;
*p++ = (value >> 8) & (0xff);
*p++ = (value >> 16) & (0xff);
*p++ = (value >> 24) & (0xff);
/* the CLI header info */
cli_header = (MonoCLIHeader*)(assembly->code.data + assembly->cli_header_offset);
cli_header->ch_size = GUINT32_FROM_LE (72);
cli_header->ch_runtime_major = GUINT16_FROM_LE (2);
if (mono_framework_version () > 1)
cli_header->ch_runtime_minor = GUINT16_FROM_LE (5);
else
cli_header->ch_runtime_minor = GUINT16_FROM_LE (0);
cli_header->ch_flags = GUINT32_FROM_LE (assemblyb->pe_kind);
if (assemblyb->entry_point) {
guint32 table_idx = 0;
if (!strcmp (assemblyb->entry_point->object.vtable->klass->name, "MethodBuilder")) {
MonoReflectionMethodBuilder *methodb = (MonoReflectionMethodBuilder*)assemblyb->entry_point;
table_idx = methodb->table_idx;
} else {
table_idx = GPOINTER_TO_UINT (g_hash_table_lookup (assembly->method_to_table_idx, assemblyb->entry_point->method));
}
cli_header->ch_entry_point = GUINT32_FROM_LE (table_idx | MONO_TOKEN_METHOD_DEF);
} else {
cli_header->ch_entry_point = GUINT32_FROM_LE (0);
}
/* The embedded managed resources */
text_offset = assembly->text_rva + assembly->code.index;
cli_header->ch_resources.rva = GUINT32_FROM_LE (text_offset);
cli_header->ch_resources.size = GUINT32_FROM_LE (assembly->resources.index);
text_offset += assembly->resources.index;
cli_header->ch_metadata.rva = GUINT32_FROM_LE (text_offset);
cli_header->ch_metadata.size = GUINT32_FROM_LE (assembly->meta_size);
text_offset += assembly->meta_size;
if (assembly->strong_name_size) {
cli_header->ch_strong_name.rva = GUINT32_FROM_LE (text_offset);
cli_header->ch_strong_name.size = GUINT32_FROM_LE (assembly->strong_name_size);
text_offset += assembly->strong_name_size;
}
/* write the section tables and section content */
section = (MonoSectionTable*)(pefile->data + section_start);
for (i = 0; i < MONO_SECTION_MAX; ++i) {
static const char section_names [][7] = {
".text", ".rsrc", ".reloc"
};
if (!assembly->sections [i].size)
continue;
strcpy (section->st_name, section_names [i]);
/*g_print ("output section %s (%d), size: %d\n", section->st_name, i, assembly->sections [i].size);*/
section->st_virtual_address = GUINT32_FROM_LE (assembly->sections [i].rva);
section->st_virtual_size = GUINT32_FROM_LE (assembly->sections [i].size);
section->st_raw_data_size = GUINT32_FROM_LE (GUINT32_TO_LE (section->st_virtual_size) + (FILE_ALIGN - 1));
section->st_raw_data_size &= GUINT32_FROM_LE (~(FILE_ALIGN - 1));
section->st_raw_data_ptr = GUINT32_FROM_LE (assembly->sections [i].offset);
section->st_flags = GUINT32_FROM_LE (assembly->sections [i].attrs);
section ++;
}
checked_write_file (file, pefile->data, pefile->index);
mono_dynamic_stream_reset (pefile);
for (i = 0; i < MONO_SECTION_MAX; ++i) {
if (!assembly->sections [i].size)
continue;
if (SetFilePointer (file, assembly->sections [i].offset, NULL, FILE_BEGIN) == INVALID_SET_FILE_POINTER)
g_error ("SetFilePointer returned %d\n", GetLastError ());
switch (i) {
case MONO_SECTION_TEXT:
/* patch entry point */
p = (guchar*)(assembly->code.data + 2);
value = (virtual_base + assembly->text_rva + assembly->iat_offset);
*p++ = (value) & 0xff;
*p++ = (value >> 8) & 0xff;
*p++ = (value >> 16) & 0xff;
*p++ = (value >> 24) & 0xff;
checked_write_file (file, assembly->code.data, assembly->code.index);
checked_write_file (file, assembly->resources.data, assembly->resources.index);
checked_write_file (file, assembly->image.raw_metadata, assembly->meta_size);
checked_write_file (file, assembly->strong_name, assembly->strong_name_size);
g_free (assembly->image.raw_metadata);
break;
case MONO_SECTION_RELOC: {
struct {
guint32 page_rva;
guint32 block_size;
guint16 type_and_offset;
guint16 term;
} reloc;
g_assert (sizeof (reloc) == 12);
reloc.page_rva = GUINT32_FROM_LE (assembly->text_rva);
reloc.block_size = GUINT32_FROM_LE (12);
/*
* the entrypoint is always at the start of the text section
* 3 is IMAGE_REL_BASED_HIGHLOW
* 2 is patch_size_rva - text_rva
*/
reloc.type_and_offset = GUINT16_FROM_LE ((3 << 12) + (2));
reloc.term = 0;
checked_write_file (file, &reloc, sizeof (reloc));
break;
}
case MONO_SECTION_RSRC:
if (assembly->win32_res) {
/* Fixup the offsets in the IMAGE_RESOURCE_DATA_ENTRY structures */
fixup_resource_directory (assembly->win32_res, assembly->win32_res, assembly->sections [i].rva);
checked_write_file (file, assembly->win32_res, assembly->win32_res_size);
}
break;
default:
g_assert_not_reached ();
}
}
/* check that the file is properly padded */
if (SetFilePointer (file, file_offset, NULL, FILE_BEGIN) == INVALID_SET_FILE_POINTER)
g_error ("SetFilePointer returned %d\n", GetLastError ());
if (! SetEndOfFile (file))
g_error ("SetEndOfFile returned %d\n", GetLastError ());
mono_dynamic_stream_reset (&assembly->code);
mono_dynamic_stream_reset (&assembly->us);
mono_dynamic_stream_reset (&assembly->blob);
mono_dynamic_stream_reset (&assembly->guid);
mono_dynamic_stream_reset (&assembly->sheap);
g_hash_table_foreach (assembly->blob_cache, (GHFunc)g_free, NULL);
g_hash_table_destroy (assembly->blob_cache);
assembly->blob_cache = NULL;
}
#else /* DISABLE_REFLECTION_EMIT_SAVE */
void
mono_image_create_pefile (MonoReflectionModuleBuilder *mb, HANDLE file)
{
g_assert_not_reached ();
}
#endif /* DISABLE_REFLECTION_EMIT_SAVE */
#ifndef DISABLE_REFLECTION_EMIT
MonoReflectionModule *
mono_image_load_module_dynamic (MonoReflectionAssemblyBuilder *ab, MonoString *fileName)
{
char *name;
MonoImage *image;
MonoImageOpenStatus status;
MonoDynamicAssembly *assembly;
guint32 module_count;
MonoImage **new_modules;
gboolean *new_modules_loaded;
name = mono_string_to_utf8 (fileName);
image = mono_image_open (name, &status);
if (!image) {
MonoException *exc;
if (status == MONO_IMAGE_ERROR_ERRNO)
exc = mono_get_exception_file_not_found (fileName);
else
exc = mono_get_exception_bad_image_format (name);
g_free (name);
mono_raise_exception (exc);
}
g_free (name);
assembly = ab->dynamic_assembly;
image->assembly = (MonoAssembly*)assembly;
module_count = image->assembly->image->module_count;
new_modules = g_new0 (MonoImage *, module_count + 1);
new_modules_loaded = g_new0 (gboolean, module_count + 1);
if (image->assembly->image->modules)
memcpy (new_modules, image->assembly->image->modules, module_count * sizeof (MonoImage *));
if (image->assembly->image->modules_loaded)
memcpy (new_modules_loaded, image->assembly->image->modules_loaded, module_count * sizeof (gboolean));
new_modules [module_count] = image;
new_modules_loaded [module_count] = TRUE;
mono_image_addref (image);
g_free (image->assembly->image->modules);
image->assembly->image->modules = new_modules;
image->assembly->image->modules_loaded = new_modules_loaded;
image->assembly->image->module_count ++;
mono_assembly_load_references (image, &status);
if (status) {
mono_image_close (image);
mono_raise_exception (mono_get_exception_file_not_found (fileName));
}
return mono_module_get_object (mono_domain_get (), image);
}
#endif /* DISABLE_REFLECTION_EMIT */
/*
* We need to return always the same object for MethodInfo, FieldInfo etc..
* but we need to consider the reflected type.
* type uses a different hash, since it uses custom hash/equal functions.
*/
typedef struct {
gpointer item;
MonoClass *refclass;
} ReflectedEntry;
static gboolean
reflected_equal (gconstpointer a, gconstpointer b) {
const ReflectedEntry *ea = a;
const ReflectedEntry *eb = b;
return (ea->item == eb->item) && (ea->refclass == eb->refclass);
}
static guint
reflected_hash (gconstpointer a) {
const ReflectedEntry *ea = a;
return mono_aligned_addr_hash (ea->item);
}
#define CHECK_OBJECT(t,p,k) \
do { \
t _obj; \
ReflectedEntry e; \
e.item = (p); \
e.refclass = (k); \
mono_domain_lock (domain); \
if (!domain->refobject_hash) \
domain->refobject_hash = mono_g_hash_table_new_type (reflected_hash, reflected_equal, MONO_HASH_VALUE_GC); \
if ((_obj = mono_g_hash_table_lookup (domain->refobject_hash, &e))) { \
mono_domain_unlock (domain); \
return _obj; \
} \
mono_domain_unlock (domain); \
} while (0)
#ifdef HAVE_BOEHM_GC
/* ReflectedEntry doesn't need to be GC tracked */
#define ALLOC_REFENTRY g_new0 (ReflectedEntry, 1)
#define FREE_REFENTRY(entry) g_free ((entry))
#define REFENTRY_REQUIRES_CLEANUP
#else
#define ALLOC_REFENTRY mono_mempool_alloc (domain->mp, sizeof (ReflectedEntry))
/* FIXME: */
#define FREE_REFENTRY(entry)
#endif
#define CACHE_OBJECT(t,p,o,k) \
do { \
t _obj; \
ReflectedEntry pe; \
pe.item = (p); \
pe.refclass = (k); \
mono_domain_lock (domain); \
if (!domain->refobject_hash) \
domain->refobject_hash = mono_g_hash_table_new_type (reflected_hash, reflected_equal, MONO_HASH_VALUE_GC); \
_obj = mono_g_hash_table_lookup (domain->refobject_hash, &pe); \
if (!_obj) { \
ReflectedEntry *e = ALLOC_REFENTRY; \
e->item = (p); \
e->refclass = (k); \
mono_g_hash_table_insert (domain->refobject_hash, e,o); \
_obj = o; \
} \
mono_domain_unlock (domain); \
return _obj; \
} while (0)
static void
clear_cached_object (MonoDomain *domain, gpointer o, MonoClass *klass)
{
mono_domain_lock (domain);
if (domain->refobject_hash) {
ReflectedEntry pe;
gpointer orig_pe, orig_value;
pe.item = o;
pe.refclass = klass;
if (mono_g_hash_table_lookup_extended (domain->refobject_hash, &pe, &orig_pe, &orig_value)) {
mono_g_hash_table_remove (domain->refobject_hash, &pe);
FREE_REFENTRY (orig_pe);
}
}
mono_domain_unlock (domain);
}
#ifdef REFENTRY_REQUIRES_CLEANUP
static void
cleanup_refobject_hash (gpointer key, gpointer value, gpointer user_data)
{
FREE_REFENTRY (key);
}
#endif
void
mono_reflection_cleanup_domain (MonoDomain *domain)
{
if (domain->refobject_hash) {
/*let's avoid scanning the whole hashtable if not needed*/
#ifdef REFENTRY_REQUIRES_CLEANUP
mono_g_hash_table_foreach (domain->refobject_hash, cleanup_refobject_hash, NULL);
#endif
mono_g_hash_table_destroy (domain->refobject_hash);
domain->refobject_hash = NULL;
}
}
#ifndef DISABLE_REFLECTION_EMIT
static gpointer
register_assembly (MonoDomain *domain, MonoReflectionAssembly *res, MonoAssembly *assembly)
{
CACHE_OBJECT (MonoReflectionAssembly *, assembly, res, NULL);
}
static gpointer
register_module (MonoDomain *domain, MonoReflectionModuleBuilder *res, MonoDynamicImage *module)
{
CACHE_OBJECT (MonoReflectionModuleBuilder *, module, res, NULL);
}
void
mono_image_module_basic_init (MonoReflectionModuleBuilder *moduleb)
{
MonoDynamicImage *image = moduleb->dynamic_image;
MonoReflectionAssemblyBuilder *ab = moduleb->assemblyb;
if (!image) {
MonoError error;
int module_count;
MonoImage **new_modules;
MonoImage *ass;
char *name, *fqname;
/*
* FIXME: we already created an image in mono_image_basic_init (), but
* we don't know which module it belongs to, since that is only
* determined at assembly save time.
*/
/*image = (MonoDynamicImage*)ab->dynamic_assembly->assembly.image; */
name = mono_string_to_utf8 (ab->name);
fqname = mono_string_to_utf8_checked (moduleb->module.fqname, &error);
if (!mono_error_ok (&error)) {
g_free (name);
mono_error_raise_exception (&error);
}
image = create_dynamic_mono_image (ab->dynamic_assembly, name, fqname);
moduleb->module.image = &image->image;
moduleb->dynamic_image = image;
register_module (mono_object_domain (moduleb), moduleb, image);
/* register the module with the assembly */
ass = ab->dynamic_assembly->assembly.image;
module_count = ass->module_count;
new_modules = g_new0 (MonoImage *, module_count + 1);
if (ass->modules)
memcpy (new_modules, ass->modules, module_count * sizeof (MonoImage *));
new_modules [module_count] = &image->image;
mono_image_addref (&image->image);
g_free (ass->modules);
ass->modules = new_modules;
ass->module_count ++;
}
}
void
mono_image_set_wrappers_type (MonoReflectionModuleBuilder *moduleb, MonoReflectionType *type)
{
MonoDynamicImage *image = moduleb->dynamic_image;
g_assert (type->type);
image->wrappers_type = mono_class_from_mono_type (type->type);
}
#endif
/*
* mono_assembly_get_object:
* @domain: an app domain
* @assembly: an assembly
*
* Return an System.Reflection.Assembly object representing the MonoAssembly @assembly.
*/
MonoReflectionAssembly*
mono_assembly_get_object (MonoDomain *domain, MonoAssembly *assembly)
{
static MonoClass *System_Reflection_Assembly;
MonoReflectionAssembly *res;
CHECK_OBJECT (MonoReflectionAssembly *, assembly, NULL);
if (!System_Reflection_Assembly)
System_Reflection_Assembly = mono_class_from_name (
mono_defaults.corlib, "System.Reflection", "Assembly");
res = (MonoReflectionAssembly *)mono_object_new (domain, System_Reflection_Assembly);
res->assembly = assembly;
CACHE_OBJECT (MonoReflectionAssembly *, assembly, res, NULL);
}
MonoReflectionModule*
mono_module_get_object (MonoDomain *domain, MonoImage *image)
{
static MonoClass *System_Reflection_Module;
MonoReflectionModule *res;
char* basename;
CHECK_OBJECT (MonoReflectionModule *, image, NULL);
if (!System_Reflection_Module)
System_Reflection_Module = mono_class_from_name (
mono_defaults.corlib, "System.Reflection", "Module");
res = (MonoReflectionModule *)mono_object_new (domain, System_Reflection_Module);
res->image = image;
MONO_OBJECT_SETREF (res, assembly, (MonoReflectionAssembly *) mono_assembly_get_object(domain, image->assembly));
MONO_OBJECT_SETREF (res, fqname, mono_string_new (domain, image->name));
basename = g_path_get_basename (image->name);
MONO_OBJECT_SETREF (res, name, mono_string_new (domain, basename));
MONO_OBJECT_SETREF (res, scopename, mono_string_new (domain, image->module_name));
g_free (basename);
if (image->assembly->image == image) {
res->token = mono_metadata_make_token (MONO_TABLE_MODULE, 1);
} else {
int i;
res->token = 0;
if (image->assembly->image->modules) {
for (i = 0; i < image->assembly->image->module_count; i++) {
if (image->assembly->image->modules [i] == image)
res->token = mono_metadata_make_token (MONO_TABLE_MODULEREF, i + 1);
}
g_assert (res->token);
}
}
CACHE_OBJECT (MonoReflectionModule *, image, res, NULL);
}
MonoReflectionModule*
mono_module_file_get_object (MonoDomain *domain, MonoImage *image, int table_index)
{
static MonoClass *System_Reflection_Module;
MonoReflectionModule *res;
MonoTableInfo *table;
guint32 cols [MONO_FILE_SIZE];
const char *name;
guint32 i, name_idx;
const char *val;
if (!System_Reflection_Module)
System_Reflection_Module = mono_class_from_name (
mono_defaults.corlib, "System.Reflection", "Module");
res = (MonoReflectionModule *)mono_object_new (domain, System_Reflection_Module);
table = &image->tables [MONO_TABLE_FILE];
g_assert (table_index < table->rows);
mono_metadata_decode_row (table, table_index, cols, MONO_FILE_SIZE);
res->image = NULL;
MONO_OBJECT_SETREF (res, assembly, (MonoReflectionAssembly *) mono_assembly_get_object(domain, image->assembly));
name = mono_metadata_string_heap (image, cols [MONO_FILE_NAME]);
/* Check whenever the row has a corresponding row in the moduleref table */
table = &image->tables [MONO_TABLE_MODULEREF];
for (i = 0; i < table->rows; ++i) {
name_idx = mono_metadata_decode_row_col (table, i, MONO_MODULEREF_NAME);
val = mono_metadata_string_heap (image, name_idx);
if (strcmp (val, name) == 0)
res->image = image->modules [i];
}
MONO_OBJECT_SETREF (res, fqname, mono_string_new (domain, name));
MONO_OBJECT_SETREF (res, name, mono_string_new (domain, name));
MONO_OBJECT_SETREF (res, scopename, mono_string_new (domain, name));
res->is_resource = cols [MONO_FILE_FLAGS] && FILE_CONTAINS_NO_METADATA;
res->token = mono_metadata_make_token (MONO_TABLE_FILE, table_index + 1);
return res;
}
static gboolean
mymono_metadata_type_equal (MonoType *t1, MonoType *t2)
{
if ((t1->type != t2->type) ||
(t1->byref != t2->byref))
return FALSE;
switch (t1->type) {
case MONO_TYPE_VOID:
case MONO_TYPE_BOOLEAN:
case MONO_TYPE_CHAR:
case MONO_TYPE_I1:
case MONO_TYPE_U1:
case MONO_TYPE_I2:
case MONO_TYPE_U2:
case MONO_TYPE_I4:
case MONO_TYPE_U4:
case MONO_TYPE_I8:
case MONO_TYPE_U8:
case MONO_TYPE_R4:
case MONO_TYPE_R8:
case MONO_TYPE_STRING:
case MONO_TYPE_I:
case MONO_TYPE_U:
case MONO_TYPE_OBJECT:
case MONO_TYPE_TYPEDBYREF:
return TRUE;
case MONO_TYPE_VALUETYPE:
case MONO_TYPE_CLASS:
case MONO_TYPE_SZARRAY:
return t1->data.klass == t2->data.klass;
case MONO_TYPE_PTR:
return mymono_metadata_type_equal (t1->data.type, t2->data.type);
case MONO_TYPE_ARRAY:
if (t1->data.array->rank != t2->data.array->rank)
return FALSE;
return t1->data.array->eklass == t2->data.array->eklass;
case MONO_TYPE_GENERICINST: {
int i;
MonoGenericInst *i1 = t1->data.generic_class->context.class_inst;
MonoGenericInst *i2 = t2->data.generic_class->context.class_inst;
if (i1->type_argc != i2->type_argc)
return FALSE;
if (!mono_metadata_type_equal (&t1->data.generic_class->container_class->byval_arg,
&t2->data.generic_class->container_class->byval_arg))
return FALSE;
/* FIXME: we should probably just compare the instance pointers directly. */
for (i = 0; i < i1->type_argc; ++i) {
if (!mono_metadata_type_equal (i1->type_argv [i], i2->type_argv [i]))
return FALSE;
}
return TRUE;
}
case MONO_TYPE_VAR:
case MONO_TYPE_MVAR:
return t1->data.generic_param == t2->data.generic_param;
default:
g_error ("implement type compare for %0x!", t1->type);
return FALSE;
}
return FALSE;
}
static guint
mymono_metadata_type_hash (MonoType *t1)
{
guint hash;
hash = t1->type;
hash |= t1->byref << 6; /* do not collide with t1->type values */
switch (t1->type) {
case MONO_TYPE_VALUETYPE:
case MONO_TYPE_CLASS:
case MONO_TYPE_SZARRAY:
/* check if the distribution is good enough */
return ((hash << 5) - hash) ^ g_str_hash (t1->data.klass->name);
case MONO_TYPE_PTR:
return ((hash << 5) - hash) ^ mymono_metadata_type_hash (t1->data.type);
case MONO_TYPE_GENERICINST: {
int i;
MonoGenericInst *inst = t1->data.generic_class->context.class_inst;
hash += g_str_hash (t1->data.generic_class->container_class->name);
hash *= 13;
for (i = 0; i < inst->type_argc; ++i) {
hash += mymono_metadata_type_hash (inst->type_argv [i]);
hash *= 13;
}
return hash;
}
}
return hash;
}
static MonoReflectionGenericClass*
mono_generic_class_get_object (MonoDomain *domain, MonoType *geninst)
{
static MonoClass *System_Reflection_MonoGenericClass;
MonoReflectionGenericClass *res;
MonoClass *klass, *gklass;
MonoGenericInst *ginst;
MonoArray *type_args;
int i;
if (!System_Reflection_MonoGenericClass) {
System_Reflection_MonoGenericClass = mono_class_from_name (
mono_defaults.corlib, "System.Reflection", "MonoGenericClass");
g_assert (System_Reflection_MonoGenericClass);
}
klass = mono_class_from_mono_type (geninst);
gklass = klass->generic_class->container_class;
mono_class_init (klass);
#ifdef HAVE_SGEN_GC
res = (MonoReflectionGenericClass *) mono_gc_alloc_pinned_obj (mono_class_vtable (domain, System_Reflection_MonoGenericClass), mono_class_instance_size (System_Reflection_MonoGenericClass));
#else
res = (MonoReflectionGenericClass *) mono_object_new (domain, System_Reflection_MonoGenericClass);
#endif
res->type.type = geninst;
g_assert (gklass->reflection_info);
g_assert (!strcmp (((MonoObject*)gklass->reflection_info)->vtable->klass->name, "TypeBuilder"));
MONO_OBJECT_SETREF (res, generic_type, gklass->reflection_info);
ginst = klass->generic_class->context.class_inst;
type_args = mono_array_new (domain, mono_defaults.systemtype_class, ginst->type_argc);
for (i = 0; i < ginst->type_argc; ++i)
mono_array_setref (type_args, i, mono_type_get_object (domain, ginst->type_argv [i]));
MONO_OBJECT_SETREF (res, type_arguments, type_args);
return res;
}
static gboolean
verify_safe_for_managed_space (MonoType *type)
{
switch (type->type) {
#ifdef DEBUG_HARDER
case MONO_TYPE_ARRAY:
return verify_safe_for_managed_space (&type->data.array->eklass->byval_arg);
case MONO_TYPE_PTR:
return verify_safe_for_managed_space (type->data.type);
case MONO_TYPE_SZARRAY:
return verify_safe_for_managed_space (&type->data.klass->byval_arg);
case MONO_TYPE_GENERICINST: {
MonoGenericInst *inst = type->data.generic_class->inst;
int i;
if (!inst->is_open)
break;
for (i = 0; i < inst->type_argc; ++i)
if (!verify_safe_for_managed_space (inst->type_argv [i]))
return FALSE;
break;
}
#endif
case MONO_TYPE_VAR:
case MONO_TYPE_MVAR:
return TRUE;
}
return TRUE;
}
/*
* mono_type_get_object:
* @domain: an app domain
* @type: a type
*
* Return an System.MonoType object representing the type @type.
*/
MonoReflectionType*
mono_type_get_object (MonoDomain *domain, MonoType *type)
{
MonoReflectionType *res;
MonoClass *klass = mono_class_from_mono_type (type);
/*we must avoid using @type as it might have come
* from a mono_metadata_type_dup and the caller
* expects that is can be freed.
* Using the right type from
*/
type = klass->byval_arg.byref == type->byref ? &klass->byval_arg : &klass->this_arg;
/* void is very common */
if (type->type == MONO_TYPE_VOID && domain->typeof_void)
return (MonoReflectionType*)domain->typeof_void;
/*
* If the vtable of the given class was already created, we can use
* the MonoType from there and avoid all locking and hash table lookups.
*
* We cannot do this for TypeBuilders as mono_reflection_create_runtime_class expects
* that the resulting object is different.
*/
if (type == &klass->byval_arg && !klass->image->dynamic) {
MonoVTable *vtable = mono_class_try_get_vtable (domain, klass);
if (vtable && vtable->type)
return vtable->type;
}
mono_loader_lock (); /*FIXME mono_class_init and mono_class_vtable acquire it*/
mono_domain_lock (domain);
if (!domain->type_hash)
domain->type_hash = mono_g_hash_table_new_type ((GHashFunc)mymono_metadata_type_hash,
(GCompareFunc)mymono_metadata_type_equal, MONO_HASH_VALUE_GC);
if ((res = mono_g_hash_table_lookup (domain->type_hash, type))) {
mono_domain_unlock (domain);
mono_loader_unlock ();
return res;
}
/* Create a MonoGenericClass object for instantiations of not finished TypeBuilders */
if ((type->type == MONO_TYPE_GENERICINST) && type->data.generic_class->is_dynamic && !type->data.generic_class->container_class->wastypebuilder) {
res = (MonoReflectionType *)mono_generic_class_get_object (domain, type);
mono_g_hash_table_insert (domain->type_hash, type, res);
mono_domain_unlock (domain);
mono_loader_unlock ();
return res;
}
if (!verify_safe_for_managed_space (type)) {
mono_domain_unlock (domain);
mono_loader_unlock ();
mono_raise_exception (mono_get_exception_invalid_operation ("This type cannot be propagated to managed space"));
}
if (klass->reflection_info && !klass->wastypebuilder) {
gboolean is_type_done = TRUE;
/* Generic parameters have reflection_info set but they are not finished together with their enclosing type.
* We must ensure that once a type is finished we don't return a GenericTypeParameterBuilder.
* We can't simply close the types as this will interfere with other parts of the generics machinery.
*/
if (klass->byval_arg.type == MONO_TYPE_MVAR || klass->byval_arg.type == MONO_TYPE_VAR) {
MonoGenericParam *gparam = klass->byval_arg.data.generic_param;
if (gparam->owner && gparam->owner->is_method) {
MonoMethod *method = gparam->owner->owner.method;
if (method && mono_class_get_generic_type_definition (method->klass)->wastypebuilder)
is_type_done = FALSE;
} else if (gparam->owner && !gparam->owner->is_method) {
MonoClass *klass = gparam->owner->owner.klass;
if (klass && mono_class_get_generic_type_definition (klass)->wastypebuilder)
is_type_done = FALSE;
}
}
/* g_assert_not_reached (); */
/* should this be considered an error condition? */
if (is_type_done && !type->byref) {
mono_domain_unlock (domain);
mono_loader_unlock ();
return klass->reflection_info;
}
}
// FIXME: Get rid of this, do it in the icalls for Type
mono_class_init (klass);
#ifdef HAVE_SGEN_GC
res = (MonoReflectionType *)mono_gc_alloc_pinned_obj (mono_class_vtable (domain, mono_defaults.monotype_class), mono_class_instance_size (mono_defaults.monotype_class));
#else
res = (MonoReflectionType *)mono_object_new (domain, mono_defaults.monotype_class);
#endif
res->type = type;
mono_g_hash_table_insert (domain->type_hash, type, res);
if (type->type == MONO_TYPE_VOID)
domain->typeof_void = (MonoObject*)res;
mono_domain_unlock (domain);
mono_loader_unlock ();
return res;
}
/*
* mono_method_get_object:
* @domain: an app domain
* @method: a method
* @refclass: the reflected type (can be NULL)
*
* Return an System.Reflection.MonoMethod object representing the method @method.
*/
MonoReflectionMethod*
mono_method_get_object (MonoDomain *domain, MonoMethod *method, MonoClass *refclass)
{
/*
* We use the same C representation for methods and constructors, but the type
* name in C# is different.
*/
static MonoClass *System_Reflection_MonoMethod = NULL;
static MonoClass *System_Reflection_MonoCMethod = NULL;
static MonoClass *System_Reflection_MonoGenericMethod = NULL;
static MonoClass *System_Reflection_MonoGenericCMethod = NULL;
MonoClass *klass;
MonoReflectionMethod *ret;
if (method->is_inflated) {
MonoReflectionGenericMethod *gret;
refclass = method->klass;
CHECK_OBJECT (MonoReflectionMethod *, method, refclass);
if ((*method->name == '.') && (!strcmp (method->name, ".ctor") || !strcmp (method->name, ".cctor"))) {
if (!System_Reflection_MonoGenericCMethod)
System_Reflection_MonoGenericCMethod = mono_class_from_name (mono_defaults.corlib, "System.Reflection", "MonoGenericCMethod");
klass = System_Reflection_MonoGenericCMethod;
} else {
if (!System_Reflection_MonoGenericMethod)
System_Reflection_MonoGenericMethod = mono_class_from_name (mono_defaults.corlib, "System.Reflection", "MonoGenericMethod");
klass = System_Reflection_MonoGenericMethod;
}
gret = (MonoReflectionGenericMethod*)mono_object_new (domain, klass);
gret->method.method = method;
MONO_OBJECT_SETREF (gret, method.name, mono_string_new (domain, method->name));
MONO_OBJECT_SETREF (gret, method.reftype, mono_type_get_object (domain, &refclass->byval_arg));
CACHE_OBJECT (MonoReflectionMethod *, method, (MonoReflectionMethod*)gret, refclass);
}
if (!refclass)
refclass = method->klass;
CHECK_OBJECT (MonoReflectionMethod *, method, refclass);
if (*method->name == '.' && (strcmp (method->name, ".ctor") == 0 || strcmp (method->name, ".cctor") == 0)) {
if (!System_Reflection_MonoCMethod)
System_Reflection_MonoCMethod = mono_class_from_name (mono_defaults.corlib, "System.Reflection", "MonoCMethod");
klass = System_Reflection_MonoCMethod;
}
else {
if (!System_Reflection_MonoMethod)
System_Reflection_MonoMethod = mono_class_from_name (mono_defaults.corlib, "System.Reflection", "MonoMethod");
klass = System_Reflection_MonoMethod;
}
ret = (MonoReflectionMethod*)mono_object_new (domain, klass);
ret->method = method;
MONO_OBJECT_SETREF (ret, reftype, mono_type_get_object (domain, &refclass->byval_arg));
CACHE_OBJECT (MonoReflectionMethod *, method, ret, refclass);
}
/*
* mono_method_clear_object:
*
* Clear the cached reflection objects for the dynamic method METHOD.
*/
void
mono_method_clear_object (MonoDomain *domain, MonoMethod *method)
{
MonoClass *klass;
g_assert (method->dynamic);
klass = method->klass;
while (klass) {
clear_cached_object (domain, method, klass);
klass = klass->parent;
}
/* Added by mono_param_get_objects () */
clear_cached_object (domain, &(method->signature), NULL);
klass = method->klass;
while (klass) {
clear_cached_object (domain, &(method->signature), klass);
klass = klass->parent;
}
}
/*
* mono_field_get_object:
* @domain: an app domain
* @klass: a type
* @field: a field
*
* Return an System.Reflection.MonoField object representing the field @field
* in class @klass.
*/
MonoReflectionField*
mono_field_get_object (MonoDomain *domain, MonoClass *klass, MonoClassField *field)
{
MonoReflectionField *res;
static MonoClass *monofield_klass;
CHECK_OBJECT (MonoReflectionField *, field, klass);
if (!monofield_klass)
monofield_klass = mono_class_from_name (mono_defaults.corlib, "System.Reflection", "MonoField");
res = (MonoReflectionField *)mono_object_new (domain, monofield_klass);
res->klass = klass;
res->field = field;
MONO_OBJECT_SETREF (res, name, mono_string_new (domain, mono_field_get_name (field)));
if (is_field_on_inst (field))
res->attrs = get_field_on_inst_generic_type (field)->attrs;
else
res->attrs = field->type->attrs;
MONO_OBJECT_SETREF (res, type, mono_type_get_object (domain, field->type));
CACHE_OBJECT (MonoReflectionField *, field, res, klass);
}
/*
* mono_property_get_object:
* @domain: an app domain
* @klass: a type
* @property: a property
*
* Return an System.Reflection.MonoProperty object representing the property @property
* in class @klass.
*/
MonoReflectionProperty*
mono_property_get_object (MonoDomain *domain, MonoClass *klass, MonoProperty *property)
{
MonoReflectionProperty *res;
static MonoClass *monoproperty_klass;
CHECK_OBJECT (MonoReflectionProperty *, property, klass);
if (!monoproperty_klass)
monoproperty_klass = mono_class_from_name (mono_defaults.corlib, "System.Reflection", "MonoProperty");
res = (MonoReflectionProperty *)mono_object_new (domain, monoproperty_klass);
res->klass = klass;
res->property = property;
CACHE_OBJECT (MonoReflectionProperty *, property, res, klass);
}
/*
* mono_event_get_object:
* @domain: an app domain
* @klass: a type
* @event: a event
*
* Return an System.Reflection.MonoEvent object representing the event @event
* in class @klass.
*/
MonoReflectionEvent*
mono_event_get_object (MonoDomain *domain, MonoClass *klass, MonoEvent *event)
{
MonoReflectionEvent *res;
MonoReflectionMonoEvent *mono_event;
static MonoClass *monoevent_klass;
CHECK_OBJECT (MonoReflectionEvent *, event, klass);
if (!monoevent_klass)
monoevent_klass = mono_class_from_name (mono_defaults.corlib, "System.Reflection", "MonoEvent");
mono_event = (MonoReflectionMonoEvent *)mono_object_new (domain, monoevent_klass);
mono_event->klass = klass;
mono_event->event = event;
res = (MonoReflectionEvent*)mono_event;
CACHE_OBJECT (MonoReflectionEvent *, event, res, klass);
}
/**
* mono_get_reflection_missing_object:
* @domain: Domain where the object lives
*
* Returns the System.Reflection.Missing.Value singleton object
* (of type System.Reflection.Missing).
*
* Used as the value for ParameterInfo.DefaultValue when Optional
* is present
*/
static MonoObject *
mono_get_reflection_missing_object (MonoDomain *domain)
{
MonoObject *obj;
static MonoClassField *missing_value_field = NULL;
if (!missing_value_field) {
MonoClass *missing_klass;
missing_klass = mono_class_from_name (mono_defaults.corlib, "System.Reflection", "Missing");
mono_class_init (missing_klass);
missing_value_field = mono_class_get_field_from_name (missing_klass, "Value");
g_assert (missing_value_field);
}
obj = mono_field_get_value_object (domain, missing_value_field, NULL);
g_assert (obj);
return obj;
}
static MonoObject*
get_dbnull (MonoDomain *domain, MonoObject **dbnull)
{
if (!*dbnull)
*dbnull = mono_get_dbnull_object (domain);
return *dbnull;
}
static MonoObject*
get_reflection_missing (MonoDomain *domain, MonoObject **reflection_missing)
{
if (!*reflection_missing)
*reflection_missing = mono_get_reflection_missing_object (domain);
return *reflection_missing;
}
/*
* mono_param_get_objects:
* @domain: an app domain
* @method: a method
*
* Return an System.Reflection.ParameterInfo array object representing the parameters
* in the method @method.
*/
MonoArray*
mono_param_get_objects_internal (MonoDomain *domain, MonoMethod *method, MonoClass *refclass)
{
static MonoClass *System_Reflection_ParameterInfo;
static MonoClass *System_Reflection_ParameterInfo_array;
MonoArray *res = NULL;
MonoReflectionMethod *member = NULL;
MonoReflectionParameter *param = NULL;
char **names, **blobs = NULL;
guint32 *types = NULL;
MonoType *type = NULL;
MonoObject *dbnull = NULL;
MonoObject *missing = NULL;
MonoMarshalSpec **mspecs;
MonoMethodSignature *sig;
MonoVTable *pinfo_vtable;
int i;
if (!System_Reflection_ParameterInfo_array) {
MonoClass *klass;
klass = mono_class_from_name (mono_defaults.corlib, "System.Reflection", "ParameterInfo");
mono_memory_barrier ();
System_Reflection_ParameterInfo = klass;
klass = mono_array_class_get (klass, 1);
mono_memory_barrier ();
System_Reflection_ParameterInfo_array = klass;
}
if (!mono_method_signature (method)->param_count)
return mono_array_new_specific (mono_class_vtable (domain, System_Reflection_ParameterInfo_array), 0);
/* Note: the cache is based on the address of the signature into the method
* since we already cache MethodInfos with the method as keys.
*/
CHECK_OBJECT (MonoArray*, &(method->signature), refclass);
sig = mono_method_signature (method);
member = mono_method_get_object (domain, method, refclass);
names = g_new (char *, sig->param_count);
mono_method_get_param_names (method, (const char **) names);
mspecs = g_new (MonoMarshalSpec*, sig->param_count + 1);
mono_method_get_marshal_info (method, mspecs);
res = mono_array_new_specific (mono_class_vtable (domain, System_Reflection_ParameterInfo_array), sig->param_count);
pinfo_vtable = mono_class_vtable (domain, System_Reflection_ParameterInfo);
for (i = 0; i < sig->param_count; ++i) {
param = (MonoReflectionParameter *)mono_object_new_specific (pinfo_vtable);
MONO_OBJECT_SETREF (param, ClassImpl, mono_type_get_object (domain, sig->params [i]));
MONO_OBJECT_SETREF (param, MemberImpl, (MonoObject*)member);
MONO_OBJECT_SETREF (param, NameImpl, mono_string_new (domain, names [i]));
param->PositionImpl = i;
param->AttrsImpl = sig->params [i]->attrs;
if (!(param->AttrsImpl & PARAM_ATTRIBUTE_HAS_DEFAULT)) {
if (param->AttrsImpl & PARAM_ATTRIBUTE_OPTIONAL)
MONO_OBJECT_SETREF (param, DefaultValueImpl, get_reflection_missing (domain, &missing));
else
MONO_OBJECT_SETREF (param, DefaultValueImpl, get_dbnull (domain, &dbnull));
} else {
if (!blobs) {
blobs = g_new0 (char *, sig->param_count);
types = g_new0 (guint32, sig->param_count);
get_default_param_value_blobs (method, blobs, types);
}
/* Build MonoType for the type from the Constant Table */
if (!type)
type = g_new0 (MonoType, 1);
type->type = types [i];
type->data.klass = NULL;
if (types [i] == MONO_TYPE_CLASS)
type->data.klass = mono_defaults.object_class;
else if ((sig->params [i]->type == MONO_TYPE_VALUETYPE) && sig->params [i]->data.klass->enumtype) {
/* For enums, types [i] contains the base type */
type->type = MONO_TYPE_VALUETYPE;
type->data.klass = mono_class_from_mono_type (sig->params [i]);
} else
type->data.klass = mono_class_from_mono_type (type);
MONO_OBJECT_SETREF (param, DefaultValueImpl, mono_get_object_from_blob (domain, type, blobs [i]));
/* Type in the Constant table is MONO_TYPE_CLASS for nulls */
if (types [i] != MONO_TYPE_CLASS && !param->DefaultValueImpl) {
if (param->AttrsImpl & PARAM_ATTRIBUTE_OPTIONAL)
MONO_OBJECT_SETREF (param, DefaultValueImpl, get_reflection_missing (domain, &missing));
else
MONO_OBJECT_SETREF (param, DefaultValueImpl, get_dbnull (domain, &dbnull));
}
}
if (mspecs [i + 1])
MONO_OBJECT_SETREF (param, MarshalAsImpl, (MonoObject*)mono_reflection_marshal_from_marshal_spec (domain, method->klass, mspecs [i + 1]));
mono_array_setref (res, i, param);
}
g_free (names);
g_free (blobs);
g_free (types);
g_free (type);
for (i = mono_method_signature (method)->param_count; i >= 0; i--)
if (mspecs [i])
mono_metadata_free_marshal_spec (mspecs [i]);
g_free (mspecs);
CACHE_OBJECT (MonoArray *, &(method->signature), res, refclass);
}
MonoArray*
mono_param_get_objects (MonoDomain *domain, MonoMethod *method)
{
return mono_param_get_objects_internal (domain, method, NULL);
}
/*
* mono_method_body_get_object:
* @domain: an app domain
* @method: a method
*
* Return an System.Reflection.MethodBody object representing the method @method.
*/
MonoReflectionMethodBody*
mono_method_body_get_object (MonoDomain *domain, MonoMethod *method)
{
static MonoClass *System_Reflection_MethodBody = NULL;
static MonoClass *System_Reflection_LocalVariableInfo = NULL;
static MonoClass *System_Reflection_ExceptionHandlingClause = NULL;
MonoReflectionMethodBody *ret;
MonoMethodNormal *mn;
MonoMethodHeader *header;
MonoImage *image;
guint32 method_rva, local_var_sig_token;
char *ptr;
unsigned char format, flags;
int i;
if (!System_Reflection_MethodBody)
System_Reflection_MethodBody = mono_class_from_name (mono_defaults.corlib, "System.Reflection", "MethodBody");
if (!System_Reflection_LocalVariableInfo)
System_Reflection_LocalVariableInfo = mono_class_from_name (mono_defaults.corlib, "System.Reflection", "LocalVariableInfo");
if (!System_Reflection_ExceptionHandlingClause)
System_Reflection_ExceptionHandlingClause = mono_class_from_name (mono_defaults.corlib, "System.Reflection", "ExceptionHandlingClause");
CHECK_OBJECT (MonoReflectionMethodBody *, method, NULL);
if ((method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL) ||
(method->flags & METHOD_ATTRIBUTE_ABSTRACT) ||
(method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
(method->iflags & METHOD_IMPL_ATTRIBUTE_RUNTIME))
return NULL;
mn = (MonoMethodNormal *)method;
image = method->klass->image;
header = mono_method_get_header (method);
if (!image->dynamic) {
/* Obtain local vars signature token */
method_rva = mono_metadata_decode_row_col (&image->tables [MONO_TABLE_METHOD], mono_metadata_token_index (method->token) - 1, MONO_METHOD_RVA);
ptr = mono_image_rva_map (image, method_rva);
flags = *(const unsigned char *) ptr;
format = flags & METHOD_HEADER_FORMAT_MASK;
switch (format){
case METHOD_HEADER_TINY_FORMAT:
local_var_sig_token = 0;
break;
case METHOD_HEADER_FAT_FORMAT:
ptr += 2;
ptr += 2;
ptr += 4;
local_var_sig_token = read32 (ptr);
break;
default:
g_assert_not_reached ();
}
} else
local_var_sig_token = 0; //FIXME
ret = (MonoReflectionMethodBody*)mono_object_new (domain, System_Reflection_MethodBody);
ret->init_locals = header->init_locals;
ret->max_stack = header->max_stack;
ret->local_var_sig_token = local_var_sig_token;
MONO_OBJECT_SETREF (ret, il, mono_array_new_cached (domain, mono_defaults.byte_class, header->code_size));
memcpy (mono_array_addr (ret->il, guint8, 0), header->code, header->code_size);
/* Locals */
MONO_OBJECT_SETREF (ret, locals, mono_array_new_cached (domain, System_Reflection_LocalVariableInfo, header->num_locals));
for (i = 0; i < header->num_locals; ++i) {
MonoReflectionLocalVariableInfo *info = (MonoReflectionLocalVariableInfo*)mono_object_new (domain, System_Reflection_LocalVariableInfo);
MONO_OBJECT_SETREF (info, local_type, mono_type_get_object (domain, header->locals [i]));
info->is_pinned = header->locals [i]->pinned;
info->local_index = i;
mono_array_setref (ret->locals, i, info);
}
/* Exceptions */
MONO_OBJECT_SETREF (ret, clauses, mono_array_new_cached (domain, System_Reflection_ExceptionHandlingClause, header->num_clauses));
for (i = 0; i < header->num_clauses; ++i) {
MonoReflectionExceptionHandlingClause *info = (MonoReflectionExceptionHandlingClause*)mono_object_new (domain, System_Reflection_ExceptionHandlingClause);
MonoExceptionClause *clause = &header->clauses [i];
info->flags = clause->flags;
info->try_offset = clause->try_offset;
info->try_length = clause->try_len;
info->handler_offset = clause->handler_offset;
info->handler_length = clause->handler_len;
if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER)
info->filter_offset = clause->data.filter_offset;
else if (clause->data.catch_class)
MONO_OBJECT_SETREF (info, catch_type, mono_type_get_object (mono_domain_get (), &clause->data.catch_class->byval_arg));
mono_array_setref (ret->clauses, i, info);
}
CACHE_OBJECT (MonoReflectionMethodBody *, method, ret, NULL);
return ret;
}
/**
* mono_get_dbnull_object:
* @domain: Domain where the object lives
*
* Returns the System.DBNull.Value singleton object
*
* Used as the value for ParameterInfo.DefaultValue
*/
MonoObject *
mono_get_dbnull_object (MonoDomain *domain)
{
MonoObject *obj;
static MonoClassField *dbnull_value_field = NULL;
if (!dbnull_value_field) {
MonoClass *dbnull_klass;
dbnull_klass = mono_class_from_name (mono_defaults.corlib, "System", "DBNull");
mono_class_init (dbnull_klass);
dbnull_value_field = mono_class_get_field_from_name (dbnull_klass, "Value");
g_assert (dbnull_value_field);
}
obj = mono_field_get_value_object (domain, dbnull_value_field, NULL);
g_assert (obj);
return obj;
}
static void
get_default_param_value_blobs (MonoMethod *method, char **blobs, guint32 *types)
{
guint32 param_index, i, lastp, crow = 0;
guint32 param_cols [MONO_PARAM_SIZE], const_cols [MONO_CONSTANT_SIZE];
gint32 idx;
MonoClass *klass = method->klass;
MonoImage *image = klass->image;
MonoMethodSignature *methodsig = mono_method_signature (method);
MonoTableInfo *constt;
MonoTableInfo *methodt;
MonoTableInfo *paramt;
if (!methodsig->param_count)
return;
mono_class_init (klass);
if (klass->image->dynamic) {
MonoReflectionMethodAux *aux;
if (method->is_inflated)
method = ((MonoMethodInflated*)method)->declaring;
aux = g_hash_table_lookup (((MonoDynamicImage*)method->klass->image)->method_aux_hash, method);
if (aux && aux->param_defaults) {
memcpy (blobs, &(aux->param_defaults [1]), methodsig->param_count * sizeof (char*));
memcpy (types, &(aux->param_default_types [1]), methodsig->param_count * sizeof (guint32));
}
return;
}
methodt = &klass->image->tables [MONO_TABLE_METHOD];
paramt = &klass->image->tables [MONO_TABLE_PARAM];
constt = &image->tables [MONO_TABLE_CONSTANT];
idx = mono_method_get_index (method) - 1;
g_assert (idx != -1);
param_index = mono_metadata_decode_row_col (methodt, idx, MONO_METHOD_PARAMLIST);
if (idx + 1 < methodt->rows)
lastp = mono_metadata_decode_row_col (methodt, idx + 1, MONO_METHOD_PARAMLIST);
else
lastp = paramt->rows + 1;
for (i = param_index; i < lastp; ++i) {
guint32 paramseq;
mono_metadata_decode_row (paramt, i - 1, param_cols, MONO_PARAM_SIZE);
paramseq = param_cols [MONO_PARAM_SEQUENCE];
if (!(param_cols [MONO_PARAM_FLAGS] & PARAM_ATTRIBUTE_HAS_DEFAULT))
continue;
crow = mono_metadata_get_constant_index (image, MONO_TOKEN_PARAM_DEF | i, crow + 1);
if (!crow) {
continue;
}
mono_metadata_decode_row (constt, crow - 1, const_cols, MONO_CONSTANT_SIZE);
blobs [paramseq - 1] = (gpointer) mono_metadata_blob_heap (image, const_cols [MONO_CONSTANT_VALUE]);
types [paramseq - 1] = const_cols [MONO_CONSTANT_TYPE];
}
return;
}
static MonoObject *
mono_get_object_from_blob (MonoDomain *domain, MonoType *type, const char *blob)
{
void *retval;
MonoClass *klass;
MonoObject *object;
MonoType *basetype = type;
if (!blob)
return NULL;
klass = mono_class_from_mono_type (type);
if (klass->valuetype) {
object = mono_object_new (domain, klass);
retval = ((gchar *) object + sizeof (MonoObject));
if (klass->enumtype)
basetype = mono_class_enum_basetype (klass);
} else {
retval = &object;
}
if (!mono_get_constant_value_from_blob (domain, basetype->type, blob, retval))
return object;
else
return NULL;
}
static int
assembly_name_to_aname (MonoAssemblyName *assembly, char *p) {
int found_sep;
char *s;
memset (assembly, 0, sizeof (MonoAssemblyName));
assembly->name = p;
assembly->culture = "";
memset (assembly->public_key_token, 0, MONO_PUBLIC_KEY_TOKEN_LENGTH);
while (*p && (isalnum (*p) || *p == '.' || *p == '-' || *p == '_' || *p == '$' || *p == '@'))
p++;
found_sep = 0;
while (g_ascii_isspace (*p) || *p == ',') {
*p++ = 0;
found_sep = 1;
continue;
}
/* failed */
if (!found_sep)
return 1;
while (*p) {
if (*p == 'V' && g_ascii_strncasecmp (p, "Version=", 8) == 0) {
p += 8;
assembly->major = strtoul (p, &s, 10);
if (s == p || *s != '.')
return 1;
p = ++s;
assembly->minor = strtoul (p, &s, 10);
if (s == p || *s != '.')
return 1;
p = ++s;
assembly->build = strtoul (p, &s, 10);
if (s == p || *s != '.')
return 1;
p = ++s;
assembly->revision = strtoul (p, &s, 10);
if (s == p)
return 1;
p = s;
} else if (*p == 'C' && g_ascii_strncasecmp (p, "Culture=", 8) == 0) {
p += 8;
if (g_ascii_strncasecmp (p, "neutral", 7) == 0) {
assembly->culture = "";
p += 7;
} else {
assembly->culture = p;
while (*p && *p != ',') {
p++;
}
}
} else if (*p == 'P' && g_ascii_strncasecmp (p, "PublicKeyToken=", 15) == 0) {
p += 15;
if (strncmp (p, "null", 4) == 0) {
p += 4;
} else {
int len;
gchar *start = p;
while (*p && *p != ',') {
p++;
}
len = (p - start + 1);
if (len > MONO_PUBLIC_KEY_TOKEN_LENGTH)
len = MONO_PUBLIC_KEY_TOKEN_LENGTH;
g_strlcpy ((char*)assembly->public_key_token, start, len);
}
} else {
while (*p && *p != ',')
p++;
}
found_sep = 0;
while (g_ascii_isspace (*p) || *p == ',') {
*p++ = 0;
found_sep = 1;
continue;
}
/* failed */
if (!found_sep)
return 1;
}
return 0;
}
/*
* mono_reflection_parse_type:
* @name: type name
*
* Parse a type name as accepted by the GetType () method and output the info
* extracted in the info structure.
* the name param will be mangled, so, make a copy before passing it to this function.
* The fields in info will be valid until the memory pointed to by name is valid.
*
* See also mono_type_get_name () below.
*
* Returns: 0 on parse error.
*/
static int
_mono_reflection_parse_type (char *name, char **endptr, gboolean is_recursed,
MonoTypeNameParse *info)
{
char *start, *p, *w, *temp, *last_point, *startn;
int in_modifiers = 0;
int isbyref = 0, rank, arity = 0, i;
start = p = w = name;
//FIXME could we just zero the whole struct? memset (&info, 0, sizeof (MonoTypeNameParse))
memset (&info->assembly, 0, sizeof (MonoAssemblyName));
info->name = info->name_space = NULL;
info->nested = NULL;
info->modifiers = NULL;
info->type_arguments = NULL;
/* last_point separates the namespace from the name */
last_point = NULL;
/* Skips spaces */
while (*p == ' ') p++, start++, w++, name++;
while (*p) {
switch (*p) {
case '+':
*p = 0; /* NULL terminate the name */
startn = p + 1;
info->nested = g_list_append (info->nested, startn);
/* we have parsed the nesting namespace + name */
if (info->name)
break;
if (last_point) {
info->name_space = start;
*last_point = 0;
info->name = last_point + 1;
} else {
info->name_space = (char *)"";
info->name = start;
}
break;
case '.':
last_point = p;
break;
case '\\':
++p;
break;
case '&':
case '*':
case '[':
case ',':
case ']':
in_modifiers = 1;
break;
case '`':
++p;
i = strtol (p, &temp, 10);
arity += i;
if (p == temp)
return 0;
p = temp-1;
break;
default:
break;
}
if (in_modifiers)
break;
// *w++ = *p++;
p++;
}
if (!info->name) {
if (last_point) {
info->name_space = start;
*last_point = 0;
info->name = last_point + 1;
} else {
info->name_space = (char *)"";
info->name = start;
}
}
while (*p) {
switch (*p) {
case '&':
if (isbyref) /* only one level allowed by the spec */
return 0;
isbyref = 1;
info->modifiers = g_list_append (info->modifiers, GUINT_TO_POINTER (0));
*p++ = 0;
break;
case '*':
info->modifiers = g_list_append (info->modifiers, GUINT_TO_POINTER (-1));
*p++ = 0;
break;
case '[':
if (arity != 0) {
*p++ = 0;
info->type_arguments = g_ptr_array_new ();
for (i = 0; i < arity; i++) {
MonoTypeNameParse *subinfo = g_new0 (MonoTypeNameParse, 1);
gboolean fqname = FALSE;
g_ptr_array_add (info->type_arguments, subinfo);
if (*p == '[') {
p++;
fqname = TRUE;
}
if (!_mono_reflection_parse_type (p, &p, TRUE, subinfo))
return 0;
/*MS is lenient on [] delimited parameters that aren't fqn - and F# uses them.*/
if (fqname && (*p != ']')) {
char *aname;
if (*p != ',')
return 0;
*p++ = 0;
aname = p;
while (*p && (*p != ']'))
p++;
if (*p != ']')
return 0;
*p++ = 0;
while (*aname) {
if (g_ascii_isspace (*aname)) {
++aname;
continue;
}
break;
}
if (!*aname ||
!assembly_name_to_aname (&subinfo->assembly, aname))
return 0;
} else if (fqname && (*p == ']')) {
*p++ = 0;
}
if (i + 1 < arity) {
if (*p != ',')
return 0;
} else {
if (*p != ']')
return 0;
}
*p++ = 0;
}
arity = 0;
break;
}
rank = 1;
*p++ = 0;
while (*p) {
if (*p == ']')
break;
if (*p == ',')
rank++;
else if (*p == '*') /* '*' means unknown lower bound */
info->modifiers = g_list_append (info->modifiers, GUINT_TO_POINTER (-2));
else
return 0;
++p;
}
if (*p++ != ']')
return 0;
info->modifiers = g_list_append (info->modifiers, GUINT_TO_POINTER (rank));
break;
case ']':
if (is_recursed)
goto end;
return 0;
case ',':
if (is_recursed)
goto end;
*p++ = 0;
while (*p) {
if (g_ascii_isspace (*p)) {
++p;
continue;
}
break;
}
if (!*p)
return 0; /* missing assembly name */
if (!assembly_name_to_aname (&info->assembly, p))
return 0;
break;
default:
return 0;
}
if (info->assembly.name)
break;
}
// *w = 0; /* terminate class name */
end:
if (!info->name || !*info->name)
return 0;
if (endptr)
*endptr = p;
/* add other consistency checks */
return 1;
}
int
mono_reflection_parse_type (char *name, MonoTypeNameParse *info)
{
return _mono_reflection_parse_type (name, NULL, FALSE, info);
}
static MonoType*
_mono_reflection_get_type_from_info (MonoTypeNameParse *info, MonoImage *image, gboolean ignorecase)
{
gboolean type_resolve = FALSE;
MonoType *type;
MonoImage *rootimage = image;
if (info->assembly.name) {
MonoAssembly *assembly = mono_assembly_loaded (&info->assembly);
if (!assembly && image && image->assembly && mono_assembly_names_equal (&info->assembly, &image->assembly->aname))
/*
* This could happen in the AOT compiler case when the search hook is not
* installed.
*/
assembly = image->assembly;
if (!assembly) {
/* then we must load the assembly ourselve - see #60439 */
assembly = mono_assembly_load (&info->assembly, NULL, NULL);
if (!assembly)
return NULL;
}
image = assembly->image;
} else if (!image) {
image = mono_defaults.corlib;
}
type = mono_reflection_get_type_with_rootimage (rootimage, image, info, ignorecase, &type_resolve);
if (type == NULL && !info->assembly.name && image != mono_defaults.corlib) {
image = mono_defaults.corlib;
type = mono_reflection_get_type_with_rootimage (rootimage, image, info, ignorecase, &type_resolve);
}
return type;
}
static MonoType*
mono_reflection_get_type_internal (MonoImage *rootimage, MonoImage* image, MonoTypeNameParse *info, gboolean ignorecase)
{
MonoClass *klass;
GList *mod;
int modval;
gboolean bounded = FALSE;
if (!image)
image = mono_defaults.corlib;
if (ignorecase)
klass = mono_class_from_name_case (image, info->name_space, info->name);
else
klass = mono_class_from_name (image, info->name_space, info->name);
if (!klass)
return NULL;
for (mod = info->nested; mod; mod = mod->next) {
gpointer iter = NULL;
MonoClass *parent;
parent = klass;
mono_class_init (parent);
while ((klass = mono_class_get_nested_types (parent, &iter))) {
if (ignorecase) {
if (mono_utf8_strcasecmp (klass->name, mod->data) == 0)
break;
} else {
if (strcmp (klass->name, mod->data) == 0)
break;
}
}
if (!klass)
break;
}
if (!klass)
return NULL;
mono_class_init (klass);
if (info->type_arguments) {
MonoType **type_args = g_new0 (MonoType *, info->type_arguments->len);
MonoReflectionType *the_type;
MonoType *instance;
int i;
for (i = 0; i < info->type_arguments->len; i++) {
MonoTypeNameParse *subinfo = g_ptr_array_index (info->type_arguments, i);
type_args [i] = _mono_reflection_get_type_from_info (subinfo, rootimage, ignorecase);
if (!type_args [i]) {
g_free (type_args);
return NULL;
}
}
the_type = mono_type_get_object (mono_domain_get (), &klass->byval_arg);
instance = mono_reflection_bind_generic_parameters (
the_type, info->type_arguments->len, type_args);
g_free (type_args);
if (!instance)
return NULL;
klass = mono_class_from_mono_type (instance);
}
for (mod = info->modifiers; mod; mod = mod->next) {
modval = GPOINTER_TO_UINT (mod->data);
if (!modval) { /* byref: must be last modifier */
return &klass->this_arg;
} else if (modval == -1) {
klass = mono_ptr_class_get (&klass->byval_arg);
} else if (modval == -2) {
bounded = TRUE;
} else { /* array rank */
klass = mono_bounded_array_class_get (klass, modval, bounded);
}
mono_class_init (klass);
}
return &klass->byval_arg;
}
/*
* mono_reflection_get_type:
* @image: a metadata context
* @info: type description structure
* @ignorecase: flag for case-insensitive string compares
* @type_resolve: whenever type resolve was already tried
*
* Build a MonoType from the type description in @info.
*
*/
MonoType*
mono_reflection_get_type (MonoImage* image, MonoTypeNameParse *info, gboolean ignorecase, gboolean *type_resolve) {
return mono_reflection_get_type_with_rootimage(image, image, info, ignorecase, type_resolve);
}
static MonoType*
mono_reflection_get_type_internal_dynamic (MonoImage *rootimage, MonoAssembly *assembly, MonoTypeNameParse *info, gboolean ignorecase)
{
MonoReflectionAssemblyBuilder *abuilder;
MonoType *type;
int i;
g_assert (assembly->dynamic);
abuilder = (MonoReflectionAssemblyBuilder*)mono_assembly_get_object (((MonoDynamicAssembly*)assembly)->domain, assembly);
/* Enumerate all modules */
type = NULL;
if (abuilder->modules) {
for (i = 0; i < mono_array_length (abuilder->modules); ++i) {
MonoReflectionModuleBuilder *mb = mono_array_get (abuilder->modules, MonoReflectionModuleBuilder*, i);
type = mono_reflection_get_type_internal (rootimage, &mb->dynamic_image->image, info, ignorecase);
if (type)
break;
}
}
if (!type && abuilder->loaded_modules) {
for (i = 0; i < mono_array_length (abuilder->loaded_modules); ++i) {
MonoReflectionModule *mod = mono_array_get (abuilder->loaded_modules, MonoReflectionModule*, i);
type = mono_reflection_get_type_internal (rootimage, mod->image, info, ignorecase);
if (type)
break;
}
}
return type;
}
MonoType*
mono_reflection_get_type_with_rootimage (MonoImage *rootimage, MonoImage* image, MonoTypeNameParse *info, gboolean ignorecase, gboolean *type_resolve)
{
MonoType *type;
MonoReflectionAssembly *assembly;
GString *fullName;
GList *mod;
if (image && image->dynamic)
type = mono_reflection_get_type_internal_dynamic (rootimage, image->assembly, info, ignorecase);
else
type = mono_reflection_get_type_internal (rootimage, image, info, ignorecase);
if (type)
return type;
if (!mono_domain_has_type_resolve (mono_domain_get ()))
return NULL;
if (type_resolve) {
if (*type_resolve)
return NULL;
else
*type_resolve = TRUE;
}
/* Reconstruct the type name */
fullName = g_string_new ("");
if (info->name_space && (info->name_space [0] != '\0'))
g_string_printf (fullName, "%s.%s", info->name_space, info->name);
else
g_string_printf (fullName, "%s", info->name);
for (mod = info->nested; mod; mod = mod->next)
g_string_append_printf (fullName, "+%s", (char*)mod->data);
assembly = mono_domain_try_type_resolve ( mono_domain_get (), fullName->str, NULL);
if (assembly) {
if (assembly->assembly->dynamic)
type = mono_reflection_get_type_internal_dynamic (rootimage, assembly->assembly, info, ignorecase);
else
type = mono_reflection_get_type_internal (rootimage, assembly->assembly->image,
info, ignorecase);
}
g_string_free (fullName, TRUE);
return type;
}
void
mono_reflection_free_type_info (MonoTypeNameParse *info)
{
g_list_free (info->modifiers);
g_list_free (info->nested);
if (info->type_arguments) {
int i;
for (i = 0; i < info->type_arguments->len; i++) {
MonoTypeNameParse *subinfo = g_ptr_array_index (info->type_arguments, i);
mono_reflection_free_type_info (subinfo);
/*We free the subinfo since it is allocated by _mono_reflection_parse_type*/
g_free (subinfo);
}
g_ptr_array_free (info->type_arguments, TRUE);
}
}
/*
* mono_reflection_type_from_name:
* @name: type name.
* @image: a metadata context (can be NULL).
*
* Retrieves a MonoType from its @name. If the name is not fully qualified,
* it defaults to get the type from @image or, if @image is NULL or loading
* from it fails, uses corlib.
*
*/
MonoType*
mono_reflection_type_from_name (char *name, MonoImage *image)
{
MonoType *type = NULL;
MonoTypeNameParse info;
char *tmp;
/* Make a copy since parse_type modifies its argument */
tmp = g_strdup (name);
/*g_print ("requested type %s\n", str);*/
if (mono_reflection_parse_type (tmp, &info)) {
type = _mono_reflection_get_type_from_info (&info, image, FALSE);
}
g_free (tmp);
mono_reflection_free_type_info (&info);
return type;
}
/*
* mono_reflection_get_token:
*
* Return the metadata token of OBJ which should be an object
* representing a metadata element.
*/
guint32
mono_reflection_get_token (MonoObject *obj)
{
MonoClass *klass;
guint32 token = 0;
klass = obj->vtable->klass;
if (strcmp (klass->name, "MethodBuilder") == 0) {
MonoReflectionMethodBuilder *mb = (MonoReflectionMethodBuilder *)obj;
token = mb->table_idx | MONO_TOKEN_METHOD_DEF;
} else if (strcmp (klass->name, "ConstructorBuilder") == 0) {
MonoReflectionCtorBuilder *mb = (MonoReflectionCtorBuilder *)obj;
token = mb->table_idx | MONO_TOKEN_METHOD_DEF;
} else if (strcmp (klass->name, "FieldBuilder") == 0) {
MonoReflectionFieldBuilder *fb = (MonoReflectionFieldBuilder *)obj;
/* Call mono_image_create_token so the object gets added to the tokens hash table */
token = mono_image_create_token (((MonoReflectionTypeBuilder*)fb->typeb)->module->dynamic_image, obj, FALSE, TRUE);
} else if (strcmp (klass->name, "TypeBuilder") == 0) {
MonoReflectionTypeBuilder *tb = (MonoReflectionTypeBuilder *)obj;
token = tb->table_idx | MONO_TOKEN_TYPE_DEF;
} else if (strcmp (klass->name, "MonoType") == 0) {
MonoType *type = mono_reflection_type_get_handle ((MonoReflectionType *)obj);
token = mono_class_from_mono_type (type)->type_token;
} else if (strcmp (klass->name, "MonoCMethod") == 0 ||
strcmp (klass->name, "MonoMethod") == 0 ||
strcmp (klass->name, "MonoGenericMethod") == 0 ||
strcmp (klass->name, "MonoGenericCMethod") == 0) {
MonoReflectionMethod *m = (MonoReflectionMethod *)obj;
if (m->method->is_inflated) {
MonoMethodInflated *inflated = (MonoMethodInflated *) m->method;
return inflated->declaring->token;
} else {
token = m->method->token;
}
} else if (strcmp (klass->name, "MonoField") == 0) {
MonoReflectionField *f = (MonoReflectionField*)obj;
if (is_field_on_inst (f->field)) {
MonoDynamicGenericClass *dgclass = (MonoDynamicGenericClass*)f->field->parent->generic_class;
int field_index = f->field - dgclass->fields;
MonoObject *obj;
g_assert (field_index >= 0 && field_index < dgclass->count_fields);
obj = dgclass->field_objects [field_index];
return mono_reflection_get_token (obj);
}
token = mono_class_get_field_token (f->field);
} else if (strcmp (klass->name, "MonoProperty") == 0) {
MonoReflectionProperty *p = (MonoReflectionProperty*)obj;
token = mono_class_get_property_token (p->property);
} else if (strcmp (klass->name, "MonoEvent") == 0) {
MonoReflectionMonoEvent *p = (MonoReflectionMonoEvent*)obj;
token = mono_class_get_event_token (p->event);
} else if (strcmp (klass->name, "ParameterInfo") == 0) {
MonoReflectionParameter *p = (MonoReflectionParameter*)obj;
MonoClass *member_class = mono_object_class (p->MemberImpl);
g_assert (mono_class_is_reflection_method_or_constructor (member_class));
token = mono_method_get_param_token (((MonoReflectionMethod*)p->MemberImpl)->method, p->PositionImpl);
} else if (strcmp (klass->name, "Module") == 0) {
MonoReflectionModule *m = (MonoReflectionModule*)obj;
token = m->token;
} else if (strcmp (klass->name, "Assembly") == 0) {
token = mono_metadata_make_token (MONO_TABLE_ASSEMBLY, 1);
} else {
gchar *msg = g_strdup_printf ("MetadataToken is not supported for type '%s.%s'", klass->name_space, klass->name);
MonoException *ex = mono_get_exception_not_implemented (msg);
g_free (msg);
mono_raise_exception (ex);
}
return token;
}
static void*
load_cattr_value (MonoImage *image, MonoType *t, const char *p, const char **end)
{
int slen, type = t->type;
MonoClass *tklass = t->data.klass;
handle_enum:
switch (type) {
case MONO_TYPE_U1:
case MONO_TYPE_I1:
case MONO_TYPE_BOOLEAN: {
MonoBoolean *bval = g_malloc (sizeof (MonoBoolean));
*bval = *p;
*end = p + 1;
return bval;
}
case MONO_TYPE_CHAR:
case MONO_TYPE_U2:
case MONO_TYPE_I2: {
guint16 *val = g_malloc (sizeof (guint16));
*val = read16 (p);
*end = p + 2;
return val;
}
#if SIZEOF_VOID_P == 4
case MONO_TYPE_U:
case MONO_TYPE_I:
#endif
case MONO_TYPE_R4:
case MONO_TYPE_U4:
case MONO_TYPE_I4: {
guint32 *val = g_malloc (sizeof (guint32));
*val = read32 (p);
*end = p + 4;
return val;
}
#if SIZEOF_VOID_P == 8
case MONO_TYPE_U: /* error out instead? this should probably not happen */
case MONO_TYPE_I:
#endif
case MONO_TYPE_U8:
case MONO_TYPE_I8: {
guint64 *val = g_malloc (sizeof (guint64));
*val = read64 (p);
*end = p + 8;
return val;
}
case MONO_TYPE_R8: {
double *val = g_malloc (sizeof (double));
readr8 (p, val);
*end = p + 8;
return val;
}
case MONO_TYPE_VALUETYPE:
if (t->data.klass->enumtype) {
type = mono_class_enum_basetype (t->data.klass)->type;
goto handle_enum;
} else {
g_error ("generic valutype %s not handled in custom attr value decoding", t->data.klass->name);
}
break;
case MONO_TYPE_STRING:
if (*p == (char)0xFF) {
*end = p + 1;
return NULL;
}
slen = mono_metadata_decode_value (p, &p);
*end = p + slen;
return mono_string_new_len (mono_domain_get (), p, slen);
case MONO_TYPE_CLASS: {
char *n;
MonoType *t;
if (*p == (char)0xFF) {
*end = p + 1;
return NULL;
}
handle_type:
slen = mono_metadata_decode_value (p, &p);
n = g_memdup (p, slen + 1);
n [slen] = 0;
t = mono_reflection_type_from_name (n, image);
if (!t)
g_warning ("Cannot load type '%s'", n);
g_free (n);
*end = p + slen;
if (t)
return mono_type_get_object (mono_domain_get (), t);
else
return NULL;
}
case MONO_TYPE_OBJECT: {
char subt = *p++;
MonoObject *obj;
MonoClass *subc = NULL;
void *val;
if (subt == 0x50) {
goto handle_type;
} else if (subt == 0x0E) {
type = MONO_TYPE_STRING;
goto handle_enum;
} else if (subt == 0x1D) {
MonoType simple_type = {{0}};
int etype = *p;
p ++;
if (etype == 0x51)
/* See Partition II, Appendix B3 */
etype = MONO_TYPE_OBJECT;
type = MONO_TYPE_SZARRAY;
simple_type.type = etype;
tklass = mono_class_from_mono_type (&simple_type);
goto handle_enum;
} else if (subt == 0x55) {
char *n;
MonoType *t;
slen = mono_metadata_decode_value (p, &p);
n = g_memdup (p, slen + 1);
n [slen] = 0;
t = mono_reflection_type_from_name (n, image);
if (!t)
g_error ("Cannot load type '%s'", n);
g_free (n);
p += slen;
subc = mono_class_from_mono_type (t);
} else if (subt >= MONO_TYPE_BOOLEAN && subt <= MONO_TYPE_R8) {
MonoType simple_type = {{0}};
simple_type.type = subt;
subc = mono_class_from_mono_type (&simple_type);
} else {
g_error ("Unknown type 0x%02x for object type encoding in custom attr", subt);
}
val = load_cattr_value (image, &subc->byval_arg, p, end);
obj = mono_object_new (mono_domain_get (), subc);
memcpy ((char*)obj + sizeof (MonoObject), val, mono_class_value_size (subc, NULL));
g_free (val);
return obj;
}
case MONO_TYPE_SZARRAY: {
MonoArray *arr;
guint32 i, alen, basetype;
alen = read32 (p);
p += 4;
if (alen == 0xffffffff) {
*end = p;
return NULL;
}
arr = mono_array_new (mono_domain_get(), tklass, alen);
basetype = tklass->byval_arg.type;
if (basetype == MONO_TYPE_VALUETYPE && tklass->enumtype)
basetype = mono_class_enum_basetype (tklass)->type;
switch (basetype)
{
case MONO_TYPE_U1:
case MONO_TYPE_I1:
case MONO_TYPE_BOOLEAN:
for (i = 0; i < alen; i++) {
MonoBoolean val = *p++;
mono_array_set (arr, MonoBoolean, i, val);
}
break;
case MONO_TYPE_CHAR:
case MONO_TYPE_U2:
case MONO_TYPE_I2:
for (i = 0; i < alen; i++) {
guint16 val = read16 (p);
mono_array_set (arr, guint16, i, val);
p += 2;
}
break;
case MONO_TYPE_R4:
case MONO_TYPE_U4:
case MONO_TYPE_I4:
for (i = 0; i < alen; i++) {
guint32 val = read32 (p);
mono_array_set (arr, guint32, i, val);
p += 4;
}
break;
case MONO_TYPE_R8:
for (i = 0; i < alen; i++) {
double val;
readr8 (p, &val);
mono_array_set (arr, double, i, val);
p += 8;
}
break;
case MONO_TYPE_U8:
case MONO_TYPE_I8:
for (i = 0; i < alen; i++) {
guint64 val = read64 (p);
mono_array_set (arr, guint64, i, val);
p += 8;
}
break;
case MONO_TYPE_CLASS:
case MONO_TYPE_OBJECT:
case MONO_TYPE_STRING:
for (i = 0; i < alen; i++) {
MonoObject *item = load_cattr_value (image, &tklass->byval_arg, p, &p);
mono_array_setref (arr, i, item);
}
break;
default:
g_error ("Type 0x%02x not handled in custom attr array decoding", basetype);
}
*end=p;
return arr;
}
default:
g_error ("Type 0x%02x not handled in custom attr value decoding", type);
}
return NULL;
}
static MonoObject*
create_cattr_typed_arg (MonoType *t, MonoObject *val)
{
static MonoClass *klass;
static MonoMethod *ctor;
MonoObject *retval;
void *params [2], *unboxed;
if (!klass)
klass = mono_class_from_name (mono_defaults.corlib, "System.Reflection", "CustomAttributeTypedArgument");
if (!ctor)
ctor = mono_class_get_method_from_name (klass, ".ctor", 2);
params [0] = mono_type_get_object (mono_domain_get (), t);
params [1] = val;
retval = mono_object_new (mono_domain_get (), klass);
unboxed = mono_object_unbox (retval);
mono_runtime_invoke (ctor, unboxed, params, NULL);
return retval;
}
static MonoObject*
create_cattr_named_arg (void *minfo, MonoObject *typedarg)
{
static MonoClass *klass;
static MonoMethod *ctor;
MonoObject *retval;
void *unboxed, *params [2];
if (!klass)
klass = mono_class_from_name (mono_defaults.corlib, "System.Reflection", "CustomAttributeNamedArgument");
if (!ctor)
ctor = mono_class_get_method_from_name (klass, ".ctor", 2);
params [0] = minfo;
params [1] = typedarg;
retval = mono_object_new (mono_domain_get (), klass);
unboxed = mono_object_unbox (retval);
mono_runtime_invoke (ctor, unboxed, params, NULL);
return retval;
}
static gboolean
type_is_reference (MonoType *type)
{
switch (type->type) {
case MONO_TYPE_BOOLEAN:
case MONO_TYPE_CHAR:
case MONO_TYPE_U:
case MONO_TYPE_I:
case MONO_TYPE_U1:
case MONO_TYPE_I1:
case MONO_TYPE_U2:
case MONO_TYPE_I2:
case MONO_TYPE_U4:
case MONO_TYPE_I4:
case MONO_TYPE_U8:
case MONO_TYPE_I8:
case MONO_TYPE_R8:
case MONO_TYPE_R4:
case MONO_TYPE_VALUETYPE:
return FALSE;
default:
return TRUE;
}
}
static void
free_param_data (MonoMethodSignature *sig, void **params) {
int i;
for (i = 0; i < sig->param_count; ++i) {
if (!type_is_reference (sig->params [i]))
g_free (params [i]);
}
}
/*
* Find the field index in the metadata FieldDef table.
*/
static guint32
find_field_index (MonoClass *klass, MonoClassField *field) {
int i;
for (i = 0; i < klass->field.count; ++i) {
if (field == &klass->fields [i])
return klass->field.first + 1 + i;
}
return 0;
}
/*
* Find the property index in the metadata Property table.
*/
static guint32
find_property_index (MonoClass *klass, MonoProperty *property) {
int i;
for (i = 0; i < klass->ext->property.count; ++i) {
if (property == &klass->ext->properties [i])
return klass->ext->property.first + 1 + i;
}
return 0;
}
/*
* Find the event index in the metadata Event table.
*/
static guint32
find_event_index (MonoClass *klass, MonoEvent *event) {
int i;
for (i = 0; i < klass->ext->event.count; ++i) {
if (event == &klass->ext->events [i])
return klass->ext->event.first + 1 + i;
}
return 0;
}
static MonoObject*
create_custom_attr (MonoImage *image, MonoMethod *method, const guchar *data, guint32 len)
{
const char *p = (const char*)data;
const char *named;
guint32 i, j, num_named;
MonoObject *attr;
void *params_buf [32];
void **params;
MonoMethodSignature *sig;
mono_class_init (method->klass);
if (len == 0) {
attr = mono_object_new (mono_domain_get (), method->klass);
mono_runtime_invoke (method, attr, NULL, NULL);
return attr;
}
if (len < 2 || read16 (p) != 0x0001) /* Prolog */
return NULL;
/*g_print ("got attr %s\n", method->klass->name);*/
sig = mono_method_signature (method);
if (sig->param_count < 32)
params = params_buf;
else
/* Allocate using GC so it gets GC tracking */
params = mono_gc_alloc_fixed (sig->param_count * sizeof (void*), NULL);
/* skip prolog */
p += 2;
for (i = 0; i < mono_method_signature (method)->param_count; ++i) {
params [i] = load_cattr_value (image, mono_method_signature (method)->params [i], p, &p);
}
named = p;
attr = mono_object_new (mono_domain_get (), method->klass);
mono_runtime_invoke (method, attr, params, NULL);
free_param_data (method->signature, params);
num_named = read16 (named);
named += 2;
for (j = 0; j < num_named; j++) {
gint name_len;
char *name, named_type, data_type;
named_type = *named++;
data_type = *named++; /* type of data */
if (data_type == MONO_TYPE_SZARRAY)
data_type = *named++;
if (data_type == MONO_TYPE_ENUM) {
gint type_len;
char *type_name;
type_len = mono_metadata_decode_blob_size (named, &named);
type_name = g_malloc (type_len + 1);
memcpy (type_name, named, type_len);
type_name [type_len] = 0;
named += type_len;
/* FIXME: lookup the type and check type consistency */
g_free (type_name);
}
name_len = mono_metadata_decode_blob_size (named, &named);
name = g_malloc (name_len + 1);
memcpy (name, named, name_len);
name [name_len] = 0;
named += name_len;
if (named_type == 0x53) {
MonoClassField *field = mono_class_get_field_from_name (mono_object_class (attr), name);
void *val = load_cattr_value (image, field->type, named, &named);
mono_field_set_value (attr, field, val);
if (!type_is_reference (field->type))
g_free (val);
} else if (named_type == 0x54) {
MonoProperty *prop;
void *pparams [1];
MonoType *prop_type;
prop = mono_class_get_property_from_name (mono_object_class (attr), name);
/* can we have more that 1 arg in a custom attr named property? */
prop_type = prop->get? mono_method_signature (prop->get)->ret :
mono_method_signature (prop->set)->params [mono_method_signature (prop->set)->param_count - 1];
pparams [0] = load_cattr_value (image, prop_type, named, &named);
mono_property_set_value (prop, attr, pparams, NULL);
if (!type_is_reference (prop_type))
g_free (pparams [0]);
}
g_free (name);
}
if (params != params_buf)
mono_gc_free_fixed (params);
return attr;
}
/*
* mono_reflection_create_custom_attr_data_args:
*
* Create an array of typed and named arguments from the cattr blob given by DATA.
* TYPED_ARGS and NAMED_ARGS will contain the objects representing the arguments,
* NAMED_ARG_INFO will contain information about the named arguments.
*/
void
mono_reflection_create_custom_attr_data_args (MonoImage *image, MonoMethod *method, const guchar *data, guint32 len, MonoArray **typed_args, MonoArray **named_args, CattrNamedArg **named_arg_info)
{
MonoArray *typedargs, *namedargs;
MonoClass *attrklass;
MonoDomain *domain;
const char *p = (const char*)data;
const char *named;
guint32 i, j, num_named;
CattrNamedArg *arginfo = NULL;
mono_class_init (method->klass);
*typed_args = NULL;
*named_args = NULL;
*named_arg_info = NULL;
domain = mono_domain_get ();
if (len < 2 || read16 (p) != 0x0001) /* Prolog */
return;
typedargs = mono_array_new (domain, mono_get_object_class (), mono_method_signature (method)->param_count);
/* skip prolog */
p += 2;
for (i = 0; i < mono_method_signature (method)->param_count; ++i) {
MonoObject *obj;
void *val;
val = load_cattr_value (image, mono_method_signature (method)->params [i], p, &p);
obj = type_is_reference (mono_method_signature (method)->params [i]) ?
val : mono_value_box (domain, mono_class_from_mono_type (mono_method_signature (method)->params [i]), val);
mono_array_setref (typedargs, i, obj);
if (!type_is_reference (mono_method_signature (method)->params [i]))
g_free (val);
}
named = p;
num_named = read16 (named);
namedargs = mono_array_new (domain, mono_get_object_class (), num_named);
named += 2;
attrklass = method->klass;
arginfo = g_new0 (CattrNamedArg, num_named);
*named_arg_info = arginfo;
for (j = 0; j < num_named; j++) {
gint name_len;
char *name, named_type, data_type;
named_type = *named++;
data_type = *named++; /* type of data */
if (data_type == MONO_TYPE_SZARRAY)
data_type = *named++;
if (data_type == MONO_TYPE_ENUM) {
gint type_len;
char *type_name;
type_len = mono_metadata_decode_blob_size (named, &named);
type_name = g_malloc (type_len + 1);
memcpy (type_name, named, type_len);
type_name [type_len] = 0;
named += type_len;
/* FIXME: lookup the type and check type consistency */
g_free (type_name);
}
name_len = mono_metadata_decode_blob_size (named, &named);
name = g_malloc (name_len + 1);
memcpy (name, named, name_len);
name [name_len] = 0;
named += name_len;
if (named_type == 0x53) {
MonoObject *obj;
MonoClassField *field = mono_class_get_field_from_name (attrklass, name);
void *val;
arginfo [j].type = field->type;
arginfo [j].field = field;
val = load_cattr_value (image, field->type, named, &named);
obj = type_is_reference (field->type) ? val : mono_value_box (domain, mono_class_from_mono_type (field->type), val);
mono_array_setref (namedargs, j, obj);
if (!type_is_reference (field->type))
g_free (val);
} else if (named_type == 0x54) {
MonoObject *obj;
MonoType *prop_type;
MonoProperty *prop = mono_class_get_property_from_name (attrklass, name);
void *val;
prop_type = prop->get? mono_method_signature (prop->get)->ret :
mono_method_signature (prop->set)->params [mono_method_signature (prop->set)->param_count - 1];
arginfo [j].type = prop_type;
arginfo [j].prop = prop;
val = load_cattr_value (image, prop_type, named, &named);
obj = type_is_reference (prop_type) ? val : mono_value_box (domain, mono_class_from_mono_type (prop_type), val);
mono_array_setref (namedargs, j, obj);
if (!type_is_reference (prop_type))
g_free (val);
}
g_free (name);
}
*typed_args = typedargs;
*named_args = namedargs;
}
static MonoObject*
create_custom_attr_data (MonoImage *image, MonoMethod *method, const guchar *data, guint32 len)
{
MonoArray *typedargs, *namedargs;
static MonoMethod *ctor;
MonoDomain *domain;
MonoObject *attr;
void *params [3];
CattrNamedArg *arginfo;
int i;
mono_class_init (method->klass);
if (!ctor)
ctor = mono_class_get_method_from_name (mono_defaults.customattribute_data_class, ".ctor", 3);
domain = mono_domain_get ();
if (len == 0) {
/* This is for Attributes with no parameters */
attr = mono_object_new (domain, mono_defaults.customattribute_data_class);
params [0] = mono_method_get_object (domain, method, NULL);
params [1] = params [2] = NULL;
mono_runtime_invoke (method, attr, params, NULL);
return attr;
}
mono_reflection_create_custom_attr_data_args (image, method, data, len, &typedargs, &namedargs, &arginfo);
if (!typedargs || !namedargs)
return NULL;
for (i = 0; i < mono_method_signature (method)->param_count; ++i) {
MonoObject *obj = mono_array_get (typedargs, MonoObject*, i);
MonoObject *typedarg;
typedarg = create_cattr_typed_arg (mono_method_signature (method)->params [i], obj);
mono_array_setref (typedargs, i, typedarg);
}
for (i = 0; i < mono_array_length (namedargs); ++i) {
MonoObject *obj = mono_array_get (namedargs, MonoObject*, i);
MonoObject *typedarg, *namedarg, *minfo;
if (arginfo [i].prop)
minfo = (MonoObject*)mono_property_get_object (domain, NULL, arginfo [i].prop);
else
minfo = (MonoObject*)mono_field_get_object (domain, NULL, arginfo [i].field);
typedarg = create_cattr_typed_arg (arginfo [i].type, obj);
namedarg = create_cattr_named_arg (minfo, typedarg);
mono_array_setref (namedargs, i, namedarg);
}
attr = mono_object_new (domain, mono_defaults.customattribute_data_class);
params [0] = mono_method_get_object (domain, method, NULL);
params [1] = typedargs;
params [2] = namedargs;
mono_runtime_invoke (ctor, attr, params, NULL);
return attr;
}
MonoArray*
mono_custom_attrs_construct (MonoCustomAttrInfo *cinfo)
{
MonoArray *result;
MonoObject *attr;
int i;
result = mono_array_new_cached (mono_domain_get (), mono_defaults.attribute_class, cinfo->num_attrs);
for (i = 0; i < cinfo->num_attrs; ++i) {
if (!cinfo->attrs [i].ctor)
/* The cattr type is not finished yet */
/* We should include the type name but cinfo doesn't contain it */
mono_raise_exception (mono_get_exception_type_load (NULL, NULL));
attr = create_custom_attr (cinfo->image, cinfo->attrs [i].ctor, cinfo->attrs [i].data, cinfo->attrs [i].data_size);
mono_array_setref (result, i, attr);
}
return result;
}
static MonoArray*
mono_custom_attrs_construct_by_type (MonoCustomAttrInfo *cinfo, MonoClass *attr_klass)
{
MonoArray *result;
MonoObject *attr;
int i, n;
n = 0;
for (i = 0; i < cinfo->num_attrs; ++i) {
if (mono_class_is_assignable_from (attr_klass, cinfo->attrs [i].ctor->klass))
n ++;
}
result = mono_array_new_cached (mono_domain_get (), mono_defaults.attribute_class, n);
n = 0;
for (i = 0; i < cinfo->num_attrs; ++i) {
if (mono_class_is_assignable_from (attr_klass, cinfo->attrs [i].ctor->klass)) {
attr = create_custom_attr (cinfo->image, cinfo->attrs [i].ctor, cinfo->attrs [i].data, cinfo->attrs [i].data_size);
mono_array_setref (result, n, attr);
n ++;
}
}
return result;
}
static MonoArray*
mono_custom_attrs_data_construct (MonoCustomAttrInfo *cinfo)
{
MonoArray *result;
MonoObject *attr;
int i;
result = mono_array_new (mono_domain_get (), mono_defaults.customattribute_data_class, cinfo->num_attrs);
for (i = 0; i < cinfo->num_attrs; ++i) {
attr = create_custom_attr_data (cinfo->image, cinfo->attrs [i].ctor, cinfo->attrs [i].data, cinfo->attrs [i].data_size);
mono_array_setref (result, i, attr);
}
return result;
}
/**
* mono_custom_attrs_from_index:
*
* Returns: NULL if no attributes are found or if a loading error occurs.
*/
MonoCustomAttrInfo*
mono_custom_attrs_from_index (MonoImage *image, guint32 idx)
{
guint32 mtoken, i, len;
guint32 cols [MONO_CUSTOM_ATTR_SIZE];
MonoTableInfo *ca;
MonoCustomAttrInfo *ainfo;
GList *tmp, *list = NULL;
const char *data;
ca = &image->tables [MONO_TABLE_CUSTOMATTRIBUTE];
i = mono_metadata_custom_attrs_from_index (image, idx);
if (!i)
return NULL;
i --;
while (i < ca->rows) {
if (mono_metadata_decode_row_col (ca, i, MONO_CUSTOM_ATTR_PARENT) != idx)
break;
list = g_list_prepend (list, GUINT_TO_POINTER (i));
++i;
}
len = g_list_length (list);
if (!len)
return NULL;
ainfo = g_malloc0 (MONO_SIZEOF_CUSTOM_ATTR_INFO + sizeof (MonoCustomAttrEntry) * len);
ainfo->num_attrs = len;
ainfo->image = image;
for (i = 0, tmp = list; i < len; ++i, tmp = tmp->next) {
mono_metadata_decode_row (ca, GPOINTER_TO_UINT (tmp->data), cols, MONO_CUSTOM_ATTR_SIZE);
mtoken = cols [MONO_CUSTOM_ATTR_TYPE] >> MONO_CUSTOM_ATTR_TYPE_BITS;
switch (cols [MONO_CUSTOM_ATTR_TYPE] & MONO_CUSTOM_ATTR_TYPE_MASK) {
case MONO_CUSTOM_ATTR_TYPE_METHODDEF:
mtoken |= MONO_TOKEN_METHOD_DEF;
break;
case MONO_CUSTOM_ATTR_TYPE_MEMBERREF:
mtoken |= MONO_TOKEN_MEMBER_REF;
break;
default:
g_error ("Unknown table for custom attr type %08x", cols [MONO_CUSTOM_ATTR_TYPE]);
break;
}
ainfo->attrs [i].ctor = mono_get_method (image, mtoken, NULL);
if (!ainfo->attrs [i].ctor) {
g_warning ("Can't find custom attr constructor image: %s mtoken: 0x%08x", image->name, mtoken);
g_list_free (list);
g_free (ainfo);
return NULL;
}
data = mono_metadata_blob_heap (image, cols [MONO_CUSTOM_ATTR_VALUE]);
ainfo->attrs [i].data_size = mono_metadata_decode_value (data, &data);
ainfo->attrs [i].data = (guchar*)data;
}
g_list_free (list);
return ainfo;
}
MonoCustomAttrInfo*
mono_custom_attrs_from_method (MonoMethod *method)
{
guint32 idx;
/*
* An instantiated method has the same cattrs as the generic method definition.
*
* LAMESPEC: The .NET SRE throws an exception for instantiations of generic method builders
* Note that this stanza is not necessary for non-SRE types, but it's a micro-optimization
*/
if (method->is_inflated)
method = ((MonoMethodInflated *) method)->declaring;
if (method->dynamic || method->klass->image->dynamic)
return lookup_custom_attr (method->klass->image, method);
if (!method->token)
/* Synthetic methods */
return NULL;
idx = mono_method_get_index (method);
idx <<= MONO_CUSTOM_ATTR_BITS;
idx |= MONO_CUSTOM_ATTR_METHODDEF;
return mono_custom_attrs_from_index (method->klass->image, idx);
}
MonoCustomAttrInfo*
mono_custom_attrs_from_class (MonoClass *klass)
{
guint32 idx;
if (klass->generic_class)
klass = klass->generic_class->container_class;
if (klass->image->dynamic)
return lookup_custom_attr (klass->image, klass);
if (klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR) {
idx = mono_metadata_token_index (klass->sizes.generic_param_token);
idx <<= MONO_CUSTOM_ATTR_BITS;
idx |= MONO_CUSTOM_ATTR_GENERICPAR;
} else {
idx = mono_metadata_token_index (klass->type_token);
idx <<= MONO_CUSTOM_ATTR_BITS;
idx |= MONO_CUSTOM_ATTR_TYPEDEF;
}
return mono_custom_attrs_from_index (klass->image, idx);
}
MonoCustomAttrInfo*
mono_custom_attrs_from_assembly (MonoAssembly *assembly)
{
guint32 idx;
if (assembly->image->dynamic)
return lookup_custom_attr (assembly->image, assembly);
idx = 1; /* there is only one assembly */
idx <<= MONO_CUSTOM_ATTR_BITS;
idx |= MONO_CUSTOM_ATTR_ASSEMBLY;
return mono_custom_attrs_from_index (assembly->image, idx);
}
static MonoCustomAttrInfo*
mono_custom_attrs_from_module (MonoImage *image)
{
guint32 idx;
if (image->dynamic)
return lookup_custom_attr (image, image);
idx = 1; /* there is only one module */
idx <<= MONO_CUSTOM_ATTR_BITS;
idx |= MONO_CUSTOM_ATTR_MODULE;
return mono_custom_attrs_from_index (image, idx);
}
MonoCustomAttrInfo*
mono_custom_attrs_from_property (MonoClass *klass, MonoProperty *property)
{
guint32 idx;
if (klass->image->dynamic) {
property = mono_metadata_get_corresponding_property_from_generic_type_definition (property);
return lookup_custom_attr (klass->image, property);
}
idx = find_property_index (klass, property);
idx <<= MONO_CUSTOM_ATTR_BITS;
idx |= MONO_CUSTOM_ATTR_PROPERTY;
return mono_custom_attrs_from_index (klass->image, idx);
}
MonoCustomAttrInfo*
mono_custom_attrs_from_event (MonoClass *klass, MonoEvent *event)
{
guint32 idx;
if (klass->image->dynamic) {
event = mono_metadata_get_corresponding_event_from_generic_type_definition (event);
return lookup_custom_attr (klass->image, event);
}
idx = find_event_index (klass, event);
idx <<= MONO_CUSTOM_ATTR_BITS;
idx |= MONO_CUSTOM_ATTR_EVENT;
return mono_custom_attrs_from_index (klass->image, idx);
}
MonoCustomAttrInfo*
mono_custom_attrs_from_field (MonoClass *klass, MonoClassField *field)
{
guint32 idx;
if (klass->image->dynamic) {
field = mono_metadata_get_corresponding_field_from_generic_type_definition (field);
return lookup_custom_attr (klass->image, field);
}
idx = find_field_index (klass, field);
idx <<= MONO_CUSTOM_ATTR_BITS;
idx |= MONO_CUSTOM_ATTR_FIELDDEF;
return mono_custom_attrs_from_index (klass->image, idx);
}
MonoCustomAttrInfo*
mono_custom_attrs_from_param (MonoMethod *method, guint32 param)
{
MonoTableInfo *ca;
guint32 i, idx, method_index;
guint32 param_list, param_last, param_pos, found;
MonoImage *image;
MonoReflectionMethodAux *aux;
/*
* An instantiated method has the same cattrs as the generic method definition.
*
* LAMESPEC: The .NET SRE throws an exception for instantiations of generic method builders
* Note that this stanza is not necessary for non-SRE types, but it's a micro-optimization
*/
if (method->is_inflated)
method = ((MonoMethodInflated *) method)->declaring;
if (method->klass->image->dynamic) {
MonoCustomAttrInfo *res, *ainfo;
int size;
aux = g_hash_table_lookup (((MonoDynamicImage*)method->klass->image)->method_aux_hash, method);
if (!aux || !aux->param_cattr)
return NULL;
/* Need to copy since it will be freed later */
ainfo = aux->param_cattr [param];
if (!ainfo)
return NULL;
size = MONO_SIZEOF_CUSTOM_ATTR_INFO + sizeof (MonoCustomAttrEntry) * ainfo->num_attrs;
res = g_malloc0 (size);
memcpy (res, ainfo, size);
return res;
}
image = method->klass->image;
method_index = mono_method_get_index (method);
ca = &image->tables [MONO_TABLE_METHOD];
param_list = mono_metadata_decode_row_col (ca, method_index - 1, MONO_METHOD_PARAMLIST);
if (method_index == ca->rows) {
ca = &image->tables [MONO_TABLE_PARAM];
param_last = ca->rows + 1;
} else {
param_last = mono_metadata_decode_row_col (ca, method_index, MONO_METHOD_PARAMLIST);
ca = &image->tables [MONO_TABLE_PARAM];
}
found = FALSE;
for (i = param_list; i < param_last; ++i) {
param_pos = mono_metadata_decode_row_col (ca, i - 1, MONO_PARAM_SEQUENCE);
if (param_pos == param) {
found = TRUE;
break;
}
}
if (!found)
return NULL;
idx = i;
idx <<= MONO_CUSTOM_ATTR_BITS;
idx |= MONO_CUSTOM_ATTR_PARAMDEF;
return mono_custom_attrs_from_index (image, idx);
}
gboolean
mono_custom_attrs_has_attr (MonoCustomAttrInfo *ainfo, MonoClass *attr_klass)
{
int i;
MonoClass *klass;
for (i = 0; i < ainfo->num_attrs; ++i) {
klass = ainfo->attrs [i].ctor->klass;
if (mono_class_has_parent (klass, attr_klass) || (MONO_CLASS_IS_INTERFACE (attr_klass) && mono_class_is_assignable_from (attr_klass, klass)))
return TRUE;
}
return FALSE;
}
MonoObject*
mono_custom_attrs_get_attr (MonoCustomAttrInfo *ainfo, MonoClass *attr_klass)
{
int i, attr_index;
MonoClass *klass;
MonoArray *attrs;
attr_index = -1;
for (i = 0; i < ainfo->num_attrs; ++i) {
klass = ainfo->attrs [i].ctor->klass;
if (mono_class_has_parent (klass, attr_klass)) {
attr_index = i;
break;
}
}
if (attr_index == -1)
return NULL;
attrs = mono_custom_attrs_construct (ainfo);
if (attrs)
return mono_array_get (attrs, MonoObject*, attr_index);
else
return NULL;
}
/*
* mono_reflection_get_custom_attrs_info:
* @obj: a reflection object handle
*
* Return the custom attribute info for attributes defined for the
* reflection handle @obj. The objects.
*
* FIXME this function leaks like a sieve for SRE objects.
*/
MonoCustomAttrInfo*
mono_reflection_get_custom_attrs_info (MonoObject *obj)
{
MonoClass *klass;
MonoCustomAttrInfo *cinfo = NULL;
klass = obj->vtable->klass;
if (klass == mono_defaults.monotype_class) {
MonoType *type = mono_reflection_type_get_handle ((MonoReflectionType *)obj);
klass = mono_class_from_mono_type (type);
cinfo = mono_custom_attrs_from_class (klass);
} else if (strcmp ("Assembly", klass->name) == 0) {
MonoReflectionAssembly *rassembly = (MonoReflectionAssembly*)obj;
cinfo = mono_custom_attrs_from_assembly (rassembly->assembly);
} else if (strcmp ("Module", klass->name) == 0) {
MonoReflectionModule *module = (MonoReflectionModule*)obj;
cinfo = mono_custom_attrs_from_module (module->image);
} else if (strcmp ("MonoProperty", klass->name) == 0) {
MonoReflectionProperty *rprop = (MonoReflectionProperty*)obj;
cinfo = mono_custom_attrs_from_property (rprop->property->parent, rprop->property);
} else if (strcmp ("MonoEvent", klass->name) == 0) {
MonoReflectionMonoEvent *revent = (MonoReflectionMonoEvent*)obj;
cinfo = mono_custom_attrs_from_event (revent->event->parent, revent->event);
} else if (strcmp ("MonoField", klass->name) == 0) {
MonoReflectionField *rfield = (MonoReflectionField*)obj;
cinfo = mono_custom_attrs_from_field (rfield->field->parent, rfield->field);
} else if ((strcmp ("MonoMethod", klass->name) == 0) || (strcmp ("MonoCMethod", klass->name) == 0)) {
MonoReflectionMethod *rmethod = (MonoReflectionMethod*)obj;
cinfo = mono_custom_attrs_from_method (rmethod->method);
} else if ((strcmp ("MonoGenericMethod", klass->name) == 0) || (strcmp ("MonoGenericCMethod", klass->name) == 0)) {
MonoReflectionMethod *rmethod = (MonoReflectionMethod*)obj;
cinfo = mono_custom_attrs_from_method (rmethod->method);
} else if (strcmp ("ParameterInfo", klass->name) == 0) {
MonoReflectionParameter *param = (MonoReflectionParameter*)obj;
MonoClass *member_class = mono_object_class (param->MemberImpl);
if (mono_class_is_reflection_method_or_constructor (member_class)) {
MonoReflectionMethod *rmethod = (MonoReflectionMethod*)param->MemberImpl;
cinfo = mono_custom_attrs_from_param (rmethod->method, param->PositionImpl + 1);
} else if (is_sr_mono_property (member_class)) {
MonoReflectionProperty *prop = (MonoReflectionProperty *)param->MemberImpl;
MonoMethod *method;
if (!(method = prop->property->get))
method = prop->property->set;
g_assert (method);
cinfo = mono_custom_attrs_from_param (method, param->PositionImpl + 1);
} else if (is_sre_method_on_tb_inst (member_class)) {/*XXX This is a workaround for Compiler Context*/
MonoMethod *method = mono_reflection_method_on_tb_inst_get_handle ((MonoReflectionMethodOnTypeBuilderInst*)param->MemberImpl);
cinfo = mono_custom_attrs_from_param (method, param->PositionImpl + 1);
} else if (is_sre_ctor_on_tb_inst (member_class)) { /*XX This is a workaround for Compiler Context*/
MonoReflectionCtorOnTypeBuilderInst *c = (MonoReflectionCtorOnTypeBuilderInst*)param->MemberImpl;
MonoMethod *method = NULL;
if (is_sre_ctor_builder (mono_object_class (c->cb)))
method = ((MonoReflectionCtorBuilder *)c->cb)->mhandle;
else if (is_sr_mono_cmethod (mono_object_class (c->cb)))
method = ((MonoReflectionMethod *)c->cb)->method;
else
g_error ("mono_reflection_get_custom_attrs_info:: can't handle a CTBI with base_method of type %s", mono_type_get_full_name (member_class));
cinfo = mono_custom_attrs_from_param (method, param->PositionImpl + 1);
} else {
char *type_name = mono_type_get_full_name (member_class);
char *msg = g_strdup_printf ("Custom attributes on a ParamInfo with member %s are not supported", type_name);
MonoException *ex = mono_get_exception_not_supported (msg);
g_free (type_name);
g_free (msg);
mono_raise_exception (ex);
}
} else if (strcmp ("AssemblyBuilder", klass->name) == 0) {
MonoReflectionAssemblyBuilder *assemblyb = (MonoReflectionAssemblyBuilder*)obj;
cinfo = mono_custom_attrs_from_builders (NULL, assemblyb->assembly.assembly->image, assemblyb->cattrs);
} else if (strcmp ("TypeBuilder", klass->name) == 0) {
MonoReflectionTypeBuilder *tb = (MonoReflectionTypeBuilder*)obj;
cinfo = mono_custom_attrs_from_builders (NULL, &tb->module->dynamic_image->image, tb->cattrs);
} else if (strcmp ("ModuleBuilder", klass->name) == 0) {
MonoReflectionModuleBuilder *mb = (MonoReflectionModuleBuilder*)obj;
cinfo = mono_custom_attrs_from_builders (NULL, &mb->dynamic_image->image, mb->cattrs);
} else if (strcmp ("ConstructorBuilder", klass->name) == 0) {
MonoReflectionCtorBuilder *cb = (MonoReflectionCtorBuilder*)obj;
cinfo = mono_custom_attrs_from_builders (NULL, cb->mhandle->klass->image, cb->cattrs);
} else if (strcmp ("MethodBuilder", klass->name) == 0) {
MonoReflectionMethodBuilder *mb = (MonoReflectionMethodBuilder*)obj;
cinfo = mono_custom_attrs_from_builders (NULL, mb->mhandle->klass->image, mb->cattrs);
} else if (strcmp ("FieldBuilder", klass->name) == 0) {
MonoReflectionFieldBuilder *fb = (MonoReflectionFieldBuilder*)obj;
cinfo = mono_custom_attrs_from_builders (NULL, &((MonoReflectionTypeBuilder*)fb->typeb)->module->dynamic_image->image, fb->cattrs);
} else if (strcmp ("MonoGenericClass", klass->name) == 0) {
MonoReflectionGenericClass *gclass = (MonoReflectionGenericClass*)obj;
cinfo = mono_reflection_get_custom_attrs_info ((MonoObject*)gclass->generic_type);
} else { /* handle other types here... */
g_error ("get custom attrs not yet supported for %s", klass->name);
}
return cinfo;
}
/*
* mono_reflection_get_custom_attrs_by_type:
* @obj: a reflection object handle
*
* Return an array with all the custom attributes defined of the
* reflection handle @obj. If @attr_klass is non-NULL, only custom attributes
* of that type are returned. The objects are fully build. Return NULL if a loading error
* occurs.
*/
MonoArray*
mono_reflection_get_custom_attrs_by_type (MonoObject *obj, MonoClass *attr_klass)
{
MonoArray *result;
MonoCustomAttrInfo *cinfo;
cinfo = mono_reflection_get_custom_attrs_info (obj);
if (cinfo) {
if (attr_klass)
result = mono_custom_attrs_construct_by_type (cinfo, attr_klass);
else
result = mono_custom_attrs_construct (cinfo);
if (!cinfo->cached)
mono_custom_attrs_free (cinfo);
} else {
if (mono_loader_get_last_error ())
return NULL;
result = mono_array_new_cached (mono_domain_get (), mono_defaults.attribute_class, 0);
}
return result;
}
/*
* mono_reflection_get_custom_attrs:
* @obj: a reflection object handle
*
* Return an array with all the custom attributes defined of the
* reflection handle @obj. The objects are fully build. Return NULL if a loading error
* occurs.
*/
MonoArray*
mono_reflection_get_custom_attrs (MonoObject *obj)
{
return mono_reflection_get_custom_attrs_by_type (obj, NULL);
}
/*
* mono_reflection_get_custom_attrs_data:
* @obj: a reflection obj handle
*
* Returns an array of System.Reflection.CustomAttributeData,
* which include information about attributes reflected on
* types loaded using the Reflection Only methods
*/
MonoArray*
mono_reflection_get_custom_attrs_data (MonoObject *obj)
{
MonoArray *result;
MonoCustomAttrInfo *cinfo;
cinfo = mono_reflection_get_custom_attrs_info (obj);
if (cinfo) {
result = mono_custom_attrs_data_construct (cinfo);
if (!cinfo->cached)
mono_custom_attrs_free (cinfo);
} else
result = mono_array_new (mono_domain_get (), mono_defaults.customattribute_data_class, 0);
return result;
}
static MonoReflectionType*
mono_reflection_type_get_underlying_system_type (MonoReflectionType* t)
{
MonoMethod *method_get_underlying_system_type;
method_get_underlying_system_type = mono_object_get_virtual_method ((MonoObject *) t,
mono_class_get_method_from_name (mono_object_class (t),
"get_UnderlyingSystemType",
0));
return (MonoReflectionType *) mono_runtime_invoke (method_get_underlying_system_type, t, NULL, NULL);
}
#ifndef DISABLE_REFLECTION_EMIT
static gboolean
is_corlib_type (MonoClass *class)
{
return class->image == mono_defaults.corlib;
}
static gboolean
is_usertype (MonoReflectionType *ref)
{
MonoClass *class = mono_object_class (ref);
return class->image != mono_defaults.corlib || strcmp ("TypeDelegator", class->name) == 0;
}
#define check_corlib_type_cached(_class, _namespace, _name) do { \
static MonoClass *cached_class; \
if (cached_class) \
return cached_class == _class; \
if (is_corlib_type (_class) && !strcmp (_name, _class->name) && !strcmp (_namespace, _class->name_space)) { \
cached_class = _class; \
return TRUE; \
} \
return FALSE; \
} while (0) \
static gboolean
is_sre_array (MonoClass *class)
{
check_corlib_type_cached (class, "System.Reflection.Emit", "ArrayType");
}
static gboolean
is_sre_byref (MonoClass *class)
{
check_corlib_type_cached (class, "System.Reflection.Emit", "ByRefType");
}
static gboolean
is_sre_pointer (MonoClass *class)
{
check_corlib_type_cached (class, "System.Reflection.Emit", "PointerType");
}
static gboolean
is_sre_generic_instance (MonoClass *class)
{
check_corlib_type_cached (class, "System.Reflection", "MonoGenericClass");
}
static gboolean
is_sre_method_builder (MonoClass *class)
{
check_corlib_type_cached (class, "System.Reflection.Emit", "MethodBuilder");
}
static gboolean
is_sre_ctor_builder (MonoClass *class)
{
check_corlib_type_cached (class, "System.Reflection.Emit", "ConstructorBuilder");
}
static gboolean
is_sr_mono_method (MonoClass *class)
{
check_corlib_type_cached (class, "System.Reflection", "MonoMethod");
}
static gboolean
is_sr_mono_cmethod (MonoClass *class)
{
check_corlib_type_cached (class, "System.Reflection", "MonoCMethod");
}
static gboolean
is_sr_mono_generic_method (MonoClass *class)
{
check_corlib_type_cached (class, "System.Reflection", "MonoGenericMethod");
}
static gboolean
is_sr_mono_generic_cmethod (MonoClass *class)
{
check_corlib_type_cached (class, "System.Reflection", "MonoGenericCMethod");
}
static gboolean
is_sr_mono_property (MonoClass *class)
{
check_corlib_type_cached (class, "System.Reflection", "MonoProperty");
}
static gboolean
is_sre_method_on_tb_inst (MonoClass *class)
{
check_corlib_type_cached (class, "System.Reflection.Emit", "MethodOnTypeBuilderInst");
}
static gboolean
is_sre_ctor_on_tb_inst (MonoClass *class)
{
check_corlib_type_cached (class, "System.Reflection.Emit", "ConstructorOnTypeBuilderInst");
}
gboolean
mono_class_is_reflection_method_or_constructor (MonoClass *class)
{
return is_sr_mono_method (class) || is_sr_mono_cmethod (class) || is_sr_mono_generic_method (class) || is_sr_mono_generic_cmethod (class);
}
MonoType*
mono_reflection_type_get_handle (MonoReflectionType* ref)
{
MonoClass *class;
if (!ref)
return NULL;
if (ref->type)
return ref->type;
if (is_usertype (ref)) {
ref = mono_reflection_type_get_underlying_system_type (ref);
g_assert (!is_usertype (ref)); /*FIXME fail better*/
if (ref->type)
return ref->type;
}
class = mono_object_class (ref);
if (is_sre_array (class)) {
MonoType *res;
MonoReflectionArrayType *sre_array = (MonoReflectionArrayType*)ref;
MonoType *base = mono_reflection_type_get_handle (sre_array->element_type);
g_assert (base);
if (sre_array->rank == 0) //single dimentional array
res = &mono_array_class_get (mono_class_from_mono_type (base), 1)->byval_arg;
else
res = &mono_bounded_array_class_get (mono_class_from_mono_type (base), sre_array->rank, TRUE)->byval_arg;
sre_array->type.type = res;
return res;
} else if (is_sre_byref (class)) {
MonoType *res;
MonoReflectionDerivedType *sre_byref = (MonoReflectionDerivedType*)ref;
MonoType *base = mono_reflection_type_get_handle (sre_byref->element_type);
g_assert (base);
res = &mono_class_from_mono_type (base)->this_arg;
sre_byref->type.type = res;
return res;
} else if (is_sre_pointer (class)) {
MonoType *res;
MonoReflectionDerivedType *sre_pointer = (MonoReflectionDerivedType*)ref;
MonoType *base = mono_reflection_type_get_handle (sre_pointer->element_type);
g_assert (base);
res = &mono_ptr_class_get (base)->byval_arg;
sre_pointer->type.type = res;
return res;
} else if (is_sre_generic_instance (class)) {
MonoType *res, **types;
MonoReflectionGenericClass *gclass = (MonoReflectionGenericClass*)ref;
int i, count;
count = mono_array_length (gclass->type_arguments);
types = g_new0 (MonoType*, count);
for (i = 0; i < count; ++i) {
MonoReflectionType *t = mono_array_get (gclass->type_arguments, gpointer, i);
types [i] = mono_reflection_type_get_handle (t);
}
res = mono_reflection_bind_generic_parameters ((MonoReflectionType*)gclass->generic_type, count, types);
g_free (types);
g_assert (res);
gclass->type.type = res;
return res;
}
g_error ("Cannot handle corlib user type %s", mono_type_full_name (&mono_object_class(ref)->byval_arg));
return NULL;
}
static MonoReflectionType*
mono_reflection_type_resolve_user_types (MonoReflectionType *type)
{
if (!type || type->type)
return type;
if (is_usertype (type)) {
type = mono_reflection_type_get_underlying_system_type (type);
if (is_usertype (type))
mono_raise_exception (mono_get_exception_not_supported ("User defined subclasses of System.Type are not yet supported22"));
}
return type;
}
void
mono_reflection_create_unmanaged_type (MonoReflectionType *type)
{
mono_reflection_type_get_handle (type);
}
/**
* LOCKING: Assumes the loader lock is held.
*/
static MonoMethodSignature*
parameters_to_signature (MonoImage *image, MonoArray *parameters) {
MonoMethodSignature *sig;
int count, i;
count = parameters? mono_array_length (parameters): 0;
sig = image_g_malloc0 (image, MONO_SIZEOF_METHOD_SIGNATURE + sizeof (MonoType*) * count);
sig->param_count = count;
sig->sentinelpos = -1; /* FIXME */
for (i = 0; i < count; ++i)
sig->params [i] = mono_type_array_get_and_resolve (parameters, i);
return sig;
}
/**
* LOCKING: Assumes the loader lock is held.
*/
static MonoMethodSignature*
ctor_builder_to_signature (MonoImage *image, MonoReflectionCtorBuilder *ctor) {
MonoMethodSignature *sig;
sig = parameters_to_signature (image, ctor->parameters);
sig->hasthis = ctor->attrs & METHOD_ATTRIBUTE_STATIC? 0: 1;
sig->ret = &mono_defaults.void_class->byval_arg;
return sig;
}
/**
* LOCKING: Assumes the loader lock is held.
*/
static MonoMethodSignature*
method_builder_to_signature (MonoImage *image, MonoReflectionMethodBuilder *method) {
MonoMethodSignature *sig;
sig = parameters_to_signature (image, method->parameters);
sig->hasthis = method->attrs & METHOD_ATTRIBUTE_STATIC? 0: 1;
sig->ret = method->rtype? mono_reflection_type_get_handle ((MonoReflectionType*)method->rtype): &mono_defaults.void_class->byval_arg;
sig->generic_param_count = method->generic_params ? mono_array_length (method->generic_params) : 0;
return sig;
}
static MonoMethodSignature*
dynamic_method_to_signature (MonoReflectionDynamicMethod *method) {
MonoMethodSignature *sig;
sig = parameters_to_signature (NULL, method->parameters);
sig->hasthis = method->attrs & METHOD_ATTRIBUTE_STATIC? 0: 1;
sig->ret = method->rtype? mono_reflection_type_get_handle (method->rtype): &mono_defaults.void_class->byval_arg;
sig->generic_param_count = 0;
return sig;
}
static void
get_prop_name_and_type (MonoObject *prop, char **name, MonoType **type)
{
MonoClass *klass = mono_object_class (prop);
if (strcmp (klass->name, "PropertyBuilder") == 0) {
MonoReflectionPropertyBuilder *pb = (MonoReflectionPropertyBuilder *)prop;
*name = mono_string_to_utf8 (pb->name);
*type = mono_reflection_type_get_handle ((MonoReflectionType*)pb->type);
} else {
MonoReflectionProperty *p = (MonoReflectionProperty *)prop;
*name = g_strdup (p->property->name);
if (p->property->get)
*type = mono_method_signature (p->property->get)->ret;
else
*type = mono_method_signature (p->property->set)->params [mono_method_signature (p->property->set)->param_count - 1];
}
}
static void
get_field_name_and_type (MonoObject *field, char **name, MonoType **type)
{
MonoClass *klass = mono_object_class (field);
if (strcmp (klass->name, "FieldBuilder") == 0) {
MonoReflectionFieldBuilder *fb = (MonoReflectionFieldBuilder *)field;
*name = mono_string_to_utf8 (fb->name);
*type = mono_reflection_type_get_handle ((MonoReflectionType*)fb->type);
} else {
MonoReflectionField *f = (MonoReflectionField *)field;
*name = g_strdup (mono_field_get_name (f->field));
*type = f->field->type;
}
}
#endif /* !DISABLE_REFLECTION_EMIT */
/*
* Encode a value in a custom attribute stream of bytes.
* The value to encode is either supplied as an object in argument val
* (valuetypes are boxed), or as a pointer to the data in the
* argument argval.
* @type represents the type of the value
* @buffer is the start of the buffer
* @p the current position in the buffer
* @buflen contains the size of the buffer and is used to return the new buffer size
* if this needs to be realloced.
* @retbuffer and @retp return the start and the position of the buffer
*/
static void
encode_cattr_value (MonoAssembly *assembly, char *buffer, char *p, char **retbuffer, char **retp, guint32 *buflen, MonoType *type, MonoObject *arg, char *argval)
{
MonoTypeEnum simple_type;
if ((p-buffer) + 10 >= *buflen) {
char *newbuf;
*buflen *= 2;
newbuf = g_realloc (buffer, *buflen);
p = newbuf + (p-buffer);
buffer = newbuf;
}
if (!argval)
argval = ((char*)arg + sizeof (MonoObject));
simple_type = type->type;
handle_enum:
switch (simple_type) {
case MONO_TYPE_BOOLEAN:
case MONO_TYPE_U1:
case MONO_TYPE_I1:
*p++ = *argval;
break;
case MONO_TYPE_CHAR:
case MONO_TYPE_U2:
case MONO_TYPE_I2:
swap_with_size (p, argval, 2, 1);
p += 2;
break;
case MONO_TYPE_U4:
case MONO_TYPE_I4:
case MONO_TYPE_R4:
swap_with_size (p, argval, 4, 1);
p += 4;
break;
case MONO_TYPE_R8:
#if defined(ARM_FPU_FPA) && G_BYTE_ORDER == G_LITTLE_ENDIAN
p [0] = argval [4];
p [1] = argval [5];
p [2] = argval [6];
p [3] = argval [7];
p [4] = argval [0];
p [5] = argval [1];
p [6] = argval [2];
p [7] = argval [3];
#else
swap_with_size (p, argval, 8, 1);
#endif
p += 8;
break;
case MONO_TYPE_U8:
case MONO_TYPE_I8:
swap_with_size (p, argval, 8, 1);
p += 8;
break;
case MONO_TYPE_VALUETYPE:
if (type->data.klass->enumtype) {
simple_type = mono_class_enum_basetype (type->data.klass)->type;
goto handle_enum;
} else {
g_warning ("generic valutype %s not handled in custom attr value decoding", type->data.klass->name);
}
break;
case MONO_TYPE_STRING: {
char *str;
guint32 slen;
if (!arg) {
*p++ = 0xFF;
break;
}
str = mono_string_to_utf8 ((MonoString*)arg);
slen = strlen (str);
if ((p-buffer) + 10 + slen >= *buflen) {
char *newbuf;
*buflen *= 2;
*buflen += slen;
newbuf = g_realloc (buffer, *buflen);
p = newbuf + (p-buffer);
buffer = newbuf;
}
mono_metadata_encode_value (slen, p, &p);
memcpy (p, str, slen);
p += slen;
g_free (str);
break;
}
case MONO_TYPE_CLASS: {
char *str;
guint32 slen;
if (!arg) {
*p++ = 0xFF;
break;
}
handle_type:
str = type_get_qualified_name (mono_reflection_type_get_handle ((MonoReflectionType*)arg), NULL);
slen = strlen (str);
if ((p-buffer) + 10 + slen >= *buflen) {
char *newbuf;
*buflen *= 2;
*buflen += slen;
newbuf = g_realloc (buffer, *buflen);
p = newbuf + (p-buffer);
buffer = newbuf;
}
mono_metadata_encode_value (slen, p, &p);
memcpy (p, str, slen);
p += slen;
g_free (str);
break;
}
case MONO_TYPE_SZARRAY: {
int len, i;
MonoClass *eclass, *arg_eclass;
if (!arg) {
*p++ = 0xff; *p++ = 0xff; *p++ = 0xff; *p++ = 0xff;
break;
}
len = mono_array_length ((MonoArray*)arg);
*p++ = len & 0xff;
*p++ = (len >> 8) & 0xff;
*p++ = (len >> 16) & 0xff;
*p++ = (len >> 24) & 0xff;
*retp = p;
*retbuffer = buffer;
eclass = type->data.klass;
arg_eclass = mono_object_class (arg)->element_class;
if (!eclass) {
/* Happens when we are called from the MONO_TYPE_OBJECT case below */
eclass = mono_defaults.object_class;
}
if (eclass == mono_defaults.object_class && arg_eclass->valuetype) {
char *elptr = mono_array_addr ((MonoArray*)arg, char, 0);
int elsize = mono_class_array_element_size (arg_eclass);
for (i = 0; i < len; ++i) {
encode_cattr_value (assembly, buffer, p, &buffer, &p, buflen, &arg_eclass->byval_arg, NULL, elptr);
elptr += elsize;
}
} else if (eclass->valuetype && arg_eclass->valuetype) {
char *elptr = mono_array_addr ((MonoArray*)arg, char, 0);
int elsize = mono_class_array_element_size (eclass);
for (i = 0; i < len; ++i) {
encode_cattr_value (assembly, buffer, p, &buffer, &p, buflen, &eclass->byval_arg, NULL, elptr);
elptr += elsize;
}
} else {
for (i = 0; i < len; ++i) {
encode_cattr_value (assembly, buffer, p, &buffer, &p, buflen, &eclass->byval_arg, mono_array_get ((MonoArray*)arg, MonoObject*, i), NULL);
}
}
break;
}
case MONO_TYPE_OBJECT: {
MonoClass *klass;
char *str;
guint32 slen;
/*
* The parameter type is 'object' but the type of the actual
* argument is not. So we have to add type information to the blob
* too. This is completely undocumented in the spec.
*/
if (arg == NULL) {
*p++ = MONO_TYPE_STRING; // It's same hack as MS uses
*p++ = 0xFF;
break;
}
klass = mono_object_class (arg);
if (mono_object_isinst (arg, mono_defaults.systemtype_class)) {
*p++ = 0x50;
goto handle_type;
} else if (klass->enumtype) {
*p++ = 0x55;
} else if (klass == mono_defaults.string_class) {
simple_type = MONO_TYPE_STRING;
*p++ = 0x0E;
goto handle_enum;
} else if (klass->rank == 1) {
*p++ = 0x1D;
if (klass->element_class->byval_arg.type == MONO_TYPE_OBJECT)
/* See Partition II, Appendix B3 */
*p++ = 0x51;
else
*p++ = klass->element_class->byval_arg.type;
encode_cattr_value (assembly, buffer, p, &buffer, &p, buflen, &klass->byval_arg, arg, NULL);
break;
} else if (klass->byval_arg.type >= MONO_TYPE_BOOLEAN && klass->byval_arg.type <= MONO_TYPE_R8) {
*p++ = simple_type = klass->byval_arg.type;
goto handle_enum;
} else {
g_error ("unhandled type in custom attr");
}
str = type_get_qualified_name (mono_class_get_type(klass), NULL);
slen = strlen (str);
if ((p-buffer) + 10 + slen >= *buflen) {
char *newbuf;
*buflen *= 2;
*buflen += slen;
newbuf = g_realloc (buffer, *buflen);
p = newbuf + (p-buffer);
buffer = newbuf;
}
mono_metadata_encode_value (slen, p, &p);
memcpy (p, str, slen);
p += slen;
g_free (str);
simple_type = mono_class_enum_basetype (klass)->type;
goto handle_enum;
}
default:
g_error ("type 0x%02x not yet supported in custom attr encoder", simple_type);
}
*retp = p;
*retbuffer = buffer;
}
static void
encode_field_or_prop_type (MonoType *type, char *p, char **retp)
{
if (type->type == MONO_TYPE_VALUETYPE && type->data.klass->enumtype) {
char *str = type_get_qualified_name (type, NULL);
int slen = strlen (str);
*p++ = 0x55;
/*
* This seems to be optional...
* *p++ = 0x80;
*/
mono_metadata_encode_value (slen, p, &p);
memcpy (p, str, slen);
p += slen;
g_free (str);
} else if (type->type == MONO_TYPE_OBJECT) {
*p++ = 0x51;
} else if (type->type == MONO_TYPE_CLASS) {
/* it should be a type: encode_cattr_value () has the check */
*p++ = 0x50;
} else {
mono_metadata_encode_value (type->type, p, &p);
if (type->type == MONO_TYPE_SZARRAY)
/* See the examples in Partition VI, Annex B */
encode_field_or_prop_type (&type->data.klass->byval_arg, p, &p);
}
*retp = p;
}
#ifndef DISABLE_REFLECTION_EMIT
static void
encode_named_val (MonoReflectionAssembly *assembly, char *buffer, char *p, char **retbuffer, char **retp, guint32 *buflen, MonoType *type, char *name, MonoObject *value)
{
int len;
/* Preallocate a large enough buffer */
if (type->type == MONO_TYPE_VALUETYPE && type->data.klass->enumtype) {
char *str = type_get_qualified_name (type, NULL);
len = strlen (str);
g_free (str);
} else if (type->type == MONO_TYPE_SZARRAY && type->data.klass->enumtype) {
char *str = type_get_qualified_name (&type->data.klass->byval_arg, NULL);
len = strlen (str);
g_free (str);
} else {
len = 0;
}
len += strlen (name);
if ((p-buffer) + 20 + len >= *buflen) {
char *newbuf;
*buflen *= 2;
*buflen += len;
newbuf = g_realloc (buffer, *buflen);
p = newbuf + (p-buffer);
buffer = newbuf;
}
encode_field_or_prop_type (type, p, &p);
len = strlen (name);
mono_metadata_encode_value (len, p, &p);
memcpy (p, name, len);
p += len;
encode_cattr_value (assembly->assembly, buffer, p, &buffer, &p, buflen, type, value, NULL);
*retp = p;
*retbuffer = buffer;
}
/*
* mono_reflection_get_custom_attrs_blob:
* @ctor: custom attribute constructor
* @ctorArgs: arguments o the constructor
* @properties:
* @propValues:
* @fields:
* @fieldValues:
*
* Creates the blob of data that needs to be saved in the metadata and that represents
* the custom attributed described by @ctor, @ctorArgs etc.
* Returns: a Byte array representing the blob of data.
*/
MonoArray*
mono_reflection_get_custom_attrs_blob (MonoReflectionAssembly *assembly, MonoObject *ctor, MonoArray *ctorArgs, MonoArray *properties, MonoArray *propValues, MonoArray *fields, MonoArray* fieldValues)
{
MonoArray *result;
MonoMethodSignature *sig;
MonoObject *arg;
char *buffer, *p;
guint32 buflen, i;
MONO_ARCH_SAVE_REGS;
if (strcmp (ctor->vtable->klass->name, "MonoCMethod")) {
/* sig is freed later so allocate it in the heap */
sig = ctor_builder_to_signature (NULL, (MonoReflectionCtorBuilder*)ctor);
} else {
sig = mono_method_signature (((MonoReflectionMethod*)ctor)->method);
}
g_assert (mono_array_length (ctorArgs) == sig->param_count);
buflen = 256;
p = buffer = g_malloc (buflen);
/* write the prolog */
*p++ = 1;
*p++ = 0;
for (i = 0; i < sig->param_count; ++i) {
arg = mono_array_get (ctorArgs, MonoObject*, i);
encode_cattr_value (assembly->assembly, buffer, p, &buffer, &p, &buflen, sig->params [i], arg, NULL);
}
i = 0;
if (properties)
i += mono_array_length (properties);
if (fields)
i += mono_array_length (fields);
*p++ = i & 0xff;
*p++ = (i >> 8) & 0xff;
if (properties) {
MonoObject *prop;
for (i = 0; i < mono_array_length (properties); ++i) {
MonoType *ptype;
char *pname;
prop = mono_array_get (properties, gpointer, i);
get_prop_name_and_type (prop, &pname, &ptype);
*p++ = 0x54; /* PROPERTY signature */
encode_named_val (assembly, buffer, p, &buffer, &p, &buflen, ptype, pname, (MonoObject*)mono_array_get (propValues, gpointer, i));
g_free (pname);
}
}
if (fields) {
MonoObject *field;
for (i = 0; i < mono_array_length (fields); ++i) {
MonoType *ftype;
char *fname;
field = mono_array_get (fields, gpointer, i);
get_field_name_and_type (field, &fname, &ftype);
*p++ = 0x53; /* FIELD signature */
encode_named_val (assembly, buffer, p, &buffer, &p, &buflen, ftype, fname, (MonoObject*)mono_array_get (fieldValues, gpointer, i));
g_free (fname);
}
}
g_assert (p - buffer <= buflen);
buflen = p - buffer;
result = mono_array_new (mono_domain_get (), mono_defaults.byte_class, buflen);
p = mono_array_addr (result, char, 0);
memcpy (p, buffer, buflen);
g_free (buffer);
if (strcmp (ctor->vtable->klass->name, "MonoCMethod"))
g_free (sig);
return result;
}
/*
* mono_reflection_setup_internal_class:
* @tb: a TypeBuilder object
*
* Creates a MonoClass that represents the TypeBuilder.
* This is a trick that lets us simplify a lot of reflection code
* (and will allow us to support Build and Run assemblies easier).
*/
void
mono_reflection_setup_internal_class (MonoReflectionTypeBuilder *tb)
{
MonoError error;
MonoClass *klass, *parent;
MONO_ARCH_SAVE_REGS;
RESOLVE_TYPE (tb->parent);
mono_loader_lock ();
if (tb->parent) {
/* check so we can compile corlib correctly */
if (strcmp (mono_object_class (tb->parent)->name, "TypeBuilder") == 0) {
/* mono_class_setup_mono_type () guaranteess type->data.klass is valid */
parent = mono_reflection_type_get_handle ((MonoReflectionType*)tb->parent)->data.klass;
} else {
parent = mono_class_from_mono_type (mono_reflection_type_get_handle ((MonoReflectionType*)tb->parent));
}
} else {
parent = NULL;
}
/* the type has already being created: it means we just have to change the parent */
if (tb->type.type) {
klass = mono_class_from_mono_type (tb->type.type);
klass->parent = NULL;
/* fool mono_class_setup_parent */
klass->supertypes = NULL;
mono_class_setup_parent (klass, parent);
mono_class_setup_mono_type (klass);
mono_loader_unlock ();
return;
}
klass = mono_image_alloc0 (&tb->module->dynamic_image->image, sizeof (MonoClass));
klass->image = &tb->module->dynamic_image->image;
klass->inited = 1; /* we lie to the runtime */
klass->name = mono_string_to_utf8_image (klass->image, tb->name, &error);
if (!mono_error_ok (&error))
goto failure;
klass->name_space = mono_string_to_utf8_image (klass->image, tb->nspace, &error);
if (!mono_error_ok (&error))
goto failure;
klass->type_token = MONO_TOKEN_TYPE_DEF | tb->table_idx;
klass->flags = tb->attrs;
mono_profiler_class_event (klass, MONO_PROFILE_START_LOAD);
klass->element_class = klass;
MOVING_GC_REGISTER (&klass->reflection_info);
klass->reflection_info = tb;
/* Put into cache so mono_class_get () will find it */
mono_image_add_to_name_cache (klass->image, klass->name_space, klass->name, tb->table_idx);
mono_g_hash_table_insert (tb->module->dynamic_image->tokens,
GUINT_TO_POINTER (MONO_TOKEN_TYPE_DEF | tb->table_idx), tb);
if (parent != NULL) {
mono_class_setup_parent (klass, parent);
} else if (strcmp (klass->name, "Object") == 0 && strcmp (klass->name_space, "System") == 0) {
const char *old_n = klass->name;
/* trick to get relative numbering right when compiling corlib */
klass->name = "BuildingObject";
mono_class_setup_parent (klass, mono_defaults.object_class);
klass->name = old_n;
}
if ((!strcmp (klass->name, "ValueType") && !strcmp (klass->name_space, "System")) ||
(!strcmp (klass->name, "Object") && !strcmp (klass->name_space, "System")) ||
(!strcmp (klass->name, "Enum") && !strcmp (klass->name_space, "System"))) {
klass->instance_size = sizeof (MonoObject);
klass->size_inited = 1;
mono_class_setup_vtable_general (klass, NULL, 0);
}
mono_class_setup_mono_type (klass);
mono_class_setup_supertypes (klass);
/*
* FIXME: handle interfaces.
*/
tb->type.type = &klass->byval_arg;
if (tb->nesting_type) {
g_assert (tb->nesting_type->type);
klass->nested_in = mono_class_from_mono_type (mono_reflection_type_get_handle (tb->nesting_type));
}
/*g_print ("setup %s as %s (%p)\n", klass->name, ((MonoObject*)tb)->vtable->klass->name, tb);*/
mono_profiler_class_loaded (klass, MONO_PROFILE_OK);
mono_loader_unlock ();
return;
failure:
mono_loader_unlock ();
mono_error_raise_exception (&error);
}
/*
* mono_reflection_setup_generic_class:
* @tb: a TypeBuilder object
*
* Setup the generic class before adding the first generic parameter.
*/
void
mono_reflection_setup_generic_class (MonoReflectionTypeBuilder *tb)
{
}
/*
* mono_reflection_create_generic_class:
* @tb: a TypeBuilder object
*
* Creates the generic class after all generic parameters have been added.
*/
void
mono_reflection_create_generic_class (MonoReflectionTypeBuilder *tb)
{
MonoClass *klass;
int count, i;
MONO_ARCH_SAVE_REGS;
klass = mono_class_from_mono_type (tb->type.type);
count = tb->generic_params ? mono_array_length (tb->generic_params) : 0;
if (klass->generic_container || (count == 0))
return;
g_assert (tb->generic_container && (tb->generic_container->owner.klass == klass));
klass->generic_container = mono_image_alloc0 (klass->image, sizeof (MonoGenericContainer));
klass->generic_container->owner.klass = klass;
klass->generic_container->type_argc = count;
klass->generic_container->type_params = mono_image_alloc0 (klass->image, sizeof (MonoGenericParamFull) * count);
klass->is_generic = 1;
for (i = 0; i < count; i++) {
MonoReflectionGenericParam *gparam = mono_array_get (tb->generic_params, gpointer, i);
MonoGenericParamFull *param = (MonoGenericParamFull *) mono_reflection_type_get_handle ((MonoReflectionType*)gparam)->data.generic_param;
klass->generic_container->type_params [i] = *param;
/*Make sure we are a diferent type instance */
klass->generic_container->type_params [i].param.owner = klass->generic_container;
klass->generic_container->type_params [i].info.pklass = NULL;
klass->generic_container->type_params [i].info.flags = gparam->attrs;
g_assert (klass->generic_container->type_params [i].param.owner);
}
klass->generic_container->context.class_inst = mono_get_shared_generic_inst (klass->generic_container);
}
/*
* mono_reflection_create_internal_class:
* @tb: a TypeBuilder object
*
* Actually create the MonoClass that is associated with the TypeBuilder.
*/
void
mono_reflection_create_internal_class (MonoReflectionTypeBuilder *tb)
{
MonoClass *klass;
MONO_ARCH_SAVE_REGS;
klass = mono_class_from_mono_type (tb->type.type);
mono_loader_lock ();
if (klass->enumtype && mono_class_enum_basetype (klass) == NULL) {
MonoReflectionFieldBuilder *fb;
MonoClass *ec;
MonoType *enum_basetype;
g_assert (tb->fields != NULL);
g_assert (mono_array_length (tb->fields) >= 1);
fb = mono_array_get (tb->fields, MonoReflectionFieldBuilder*, 0);
if (!mono_type_is_valid_enum_basetype (mono_reflection_type_get_handle ((MonoReflectionType*)fb->type))) {
mono_loader_unlock ();
return;
}
enum_basetype = mono_reflection_type_get_handle ((MonoReflectionType*)fb->type);
klass->element_class = mono_class_from_mono_type (enum_basetype);
if (!klass->element_class)
klass->element_class = mono_class_from_mono_type (enum_basetype);
/*
* get the element_class from the current corlib.
*/
ec = default_class_from_mono_type (enum_basetype);
klass->instance_size = ec->instance_size;
klass->size_inited = 1;
/*
* this is almost safe to do with enums and it's needed to be able
* to create objects of the enum type (for use in SetConstant).
*/
/* FIXME: Does this mean enums can't have method overrides ? */
mono_class_setup_vtable_general (klass, NULL, 0);
}
mono_loader_unlock ();
}
static MonoMarshalSpec*
mono_marshal_spec_from_builder (MonoImage *image, MonoAssembly *assembly,
MonoReflectionMarshal *minfo)
{
MonoMarshalSpec *res;
res = image_g_new0 (image, MonoMarshalSpec, 1);
res->native = minfo->type;
switch (minfo->type) {
case MONO_NATIVE_LPARRAY:
res->data.array_data.elem_type = minfo->eltype;
if (minfo->has_size) {
res->data.array_data.param_num = minfo->param_num;
res->data.array_data.num_elem = minfo->count;
res->data.array_data.elem_mult = minfo->param_num == -1 ? 0 : 1;
}
else {
res->data.array_data.param_num = -1;
res->data.array_data.num_elem = -1;
res->data.array_data.elem_mult = -1;
}
break;
case MONO_NATIVE_BYVALTSTR:
case MONO_NATIVE_BYVALARRAY:
res->data.array_data.num_elem = minfo->count;
break;
case MONO_NATIVE_CUSTOM:
if (minfo->marshaltyperef)
res->data.custom_data.custom_name =
type_get_fully_qualified_name (mono_reflection_type_get_handle ((MonoReflectionType*)minfo->marshaltyperef));
if (minfo->mcookie)
res->data.custom_data.cookie = mono_string_to_utf8 (minfo->mcookie);
break;
default:
break;
}
return res;
}
#endif /* !DISABLE_REFLECTION_EMIT */
MonoReflectionMarshal*
mono_reflection_marshal_from_marshal_spec (MonoDomain *domain, MonoClass *klass,
MonoMarshalSpec *spec)
{
static MonoClass *System_Reflection_Emit_UnmanagedMarshalClass;
MonoReflectionMarshal *minfo;
MonoType *mtype;
if (!System_Reflection_Emit_UnmanagedMarshalClass) {
System_Reflection_Emit_UnmanagedMarshalClass = mono_class_from_name (
mono_defaults.corlib, "System.Reflection.Emit", "UnmanagedMarshal");
g_assert (System_Reflection_Emit_UnmanagedMarshalClass);
}
minfo = (MonoReflectionMarshal*)mono_object_new (domain, System_Reflection_Emit_UnmanagedMarshalClass);
minfo->type = spec->native;
switch (minfo->type) {
case MONO_NATIVE_LPARRAY:
minfo->eltype = spec->data.array_data.elem_type;
minfo->count = spec->data.array_data.num_elem;
minfo->param_num = spec->data.array_data.param_num;
break;
case MONO_NATIVE_BYVALTSTR:
case MONO_NATIVE_BYVALARRAY:
minfo->count = spec->data.array_data.num_elem;
break;
case MONO_NATIVE_CUSTOM:
if (spec->data.custom_data.custom_name) {
mtype = mono_reflection_type_from_name (spec->data.custom_data.custom_name, klass->image);
if (mtype)
MONO_OBJECT_SETREF (minfo, marshaltyperef, mono_type_get_object (domain, mtype));
MONO_OBJECT_SETREF (minfo, marshaltype, mono_string_new (domain, spec->data.custom_data.custom_name));
}
if (spec->data.custom_data.cookie)
MONO_OBJECT_SETREF (minfo, mcookie, mono_string_new (domain, spec->data.custom_data.cookie));
break;
default:
break;
}
return minfo;
}
#ifndef DISABLE_REFLECTION_EMIT
static MonoMethod*
reflection_methodbuilder_to_mono_method (MonoClass *klass,
ReflectionMethodBuilder *rmb,
MonoMethodSignature *sig)
{
MonoError error;
MonoMethod *m;
MonoMethodNormal *pm;
MonoMarshalSpec **specs;
MonoReflectionMethodAux *method_aux;
MonoImage *image;
gboolean dynamic;
int i;
mono_error_init (&error);
/*
* Methods created using a MethodBuilder should have their memory allocated
* inside the image mempool, while dynamic methods should have their memory
* malloc'd.
*/
dynamic = rmb->refs != NULL;
image = dynamic ? NULL : klass->image;
if (!dynamic)
g_assert (!klass->generic_class);
mono_loader_lock ();
if ((rmb->attrs & METHOD_ATTRIBUTE_PINVOKE_IMPL) ||
(rmb->iattrs & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL))
m = (MonoMethod *)image_g_new0 (image, MonoMethodPInvoke, 1);
else if (rmb->refs)
m = (MonoMethod *)image_g_new0 (image, MonoMethodWrapper, 1);
else
m = (MonoMethod *)image_g_new0 (image, MonoMethodNormal, 1);
pm = (MonoMethodNormal*)m;
m->dynamic = dynamic;
m->slot = -1;
m->flags = rmb->attrs;
m->iflags = rmb->iattrs;
m->name = mono_string_to_utf8_image (image, rmb->name, &error);
g_assert (mono_error_ok (&error));
m->klass = klass;
m->signature = sig;
m->skip_visibility = rmb->skip_visibility;
if (rmb->table_idx)
m->token = MONO_TOKEN_METHOD_DEF | (*rmb->table_idx);
if (m->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) {
if (klass == mono_defaults.string_class && !strcmp (m->name, ".ctor"))
m->string_ctor = 1;
m->signature->pinvoke = 1;
} else if (m->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL) {
m->signature->pinvoke = 1;
method_aux = image_g_new0 (image, MonoReflectionMethodAux, 1);
method_aux->dllentry = rmb->dllentry ? mono_string_to_utf8_image (image, rmb->dllentry, &error) : image_strdup (image, m->name);
g_assert (mono_error_ok (&error));
method_aux->dll = mono_string_to_utf8_image (image, rmb->dll, &error);
g_assert (mono_error_ok (&error));
((MonoMethodPInvoke*)m)->piflags = (rmb->native_cc << 8) | (rmb->charset ? (rmb->charset - 1) * 2 : 0) | rmb->extra_flags;
if (klass->image->dynamic)
g_hash_table_insert (((MonoDynamicImage*)klass->image)->method_aux_hash, m, method_aux);
mono_loader_unlock ();
return m;
} else if (!(m->flags & METHOD_ATTRIBUTE_ABSTRACT) &&
!(m->iflags & METHOD_IMPL_ATTRIBUTE_RUNTIME)) {
MonoMethodHeader *header;
guint32 code_size;
gint32 max_stack, i;
gint32 num_locals = 0;
gint32 num_clauses = 0;
guint8 *code;
if (rmb->ilgen) {
code = mono_array_addr (rmb->ilgen->code, guint8, 0);
code_size = rmb->ilgen->code_len;
max_stack = rmb->ilgen->max_stack;
num_locals = rmb->ilgen->locals ? mono_array_length (rmb->ilgen->locals) : 0;
if (rmb->ilgen->ex_handlers)
num_clauses = method_count_clauses (rmb->ilgen);
} else {
if (rmb->code) {
code = mono_array_addr (rmb->code, guint8, 0);
code_size = mono_array_length (rmb->code);
/* we probably need to run a verifier on the code... */
max_stack = 8;
}
else {
code = NULL;
code_size = 0;
max_stack = 8;
}
}
header = image_g_malloc0 (image, MONO_SIZEOF_METHOD_HEADER + num_locals * sizeof (MonoType*));
header->code_size = code_size;
header->code = image_g_malloc (image, code_size);
memcpy ((char*)header->code, code, code_size);
header->max_stack = max_stack;
header->init_locals = rmb->init_locals;
header->num_locals = num_locals;
for (i = 0; i < num_locals; ++i) {
MonoReflectionLocalBuilder *lb =
mono_array_get (rmb->ilgen->locals, MonoReflectionLocalBuilder*, i);
header->locals [i] = image_g_new0 (image, MonoType, 1);
memcpy (header->locals [i], mono_reflection_type_get_handle ((MonoReflectionType*)lb->type), MONO_SIZEOF_TYPE);
}
header->num_clauses = num_clauses;
if (num_clauses) {
header->clauses = method_encode_clauses (image, (MonoDynamicImage*)klass->image,
rmb->ilgen, num_clauses);
}
pm->header = header;
}
if (rmb->generic_params) {
int count = mono_array_length (rmb->generic_params);
MonoGenericContainer *container;
container = rmb->generic_container;
if (container) {
m->is_generic = TRUE;
mono_method_set_generic_container (m, container);
}
container->type_argc = count;
container->type_params = image_g_new0 (image, MonoGenericParamFull, count);
container->owner.method = m;
for (i = 0; i < count; i++) {
MonoReflectionGenericParam *gp =
mono_array_get (rmb->generic_params, MonoReflectionGenericParam*, i);
MonoGenericParamFull *param = (MonoGenericParamFull *) mono_reflection_type_get_handle ((MonoReflectionType*)gp)->data.generic_param;
container->type_params [i] = *param;
}
if (klass->generic_container) {
container->parent = klass->generic_container;
container->context.class_inst = klass->generic_container->context.class_inst;
}
container->context.method_inst = mono_get_shared_generic_inst (container);
}
if (rmb->refs) {
MonoMethodWrapper *mw = (MonoMethodWrapper*)m;
int i;
void **data;
m->wrapper_type = MONO_WRAPPER_DYNAMIC_METHOD;
mw->method_data = data = image_g_new (image, gpointer, rmb->nrefs + 1);
data [0] = GUINT_TO_POINTER (rmb->nrefs);
for (i = 0; i < rmb->nrefs; ++i)
data [i + 1] = rmb->refs [i];
}
method_aux = NULL;
/* Parameter info */
if (rmb->pinfo) {
if (!method_aux)
method_aux = image_g_new0 (image, MonoReflectionMethodAux, 1);
method_aux->param_names = image_g_new0 (image, char *, mono_method_signature (m)->param_count + 1);
for (i = 0; i <= m->signature->param_count; ++i) {
MonoReflectionParamBuilder *pb;
if ((pb = mono_array_get (rmb->pinfo, MonoReflectionParamBuilder*, i))) {
if ((i > 0) && (pb->attrs)) {
/* Make a copy since it might point to a shared type structure */
m->signature->params [i - 1] = mono_metadata_type_dup (klass->image, m->signature->params [i - 1]);
m->signature->params [i - 1]->attrs = pb->attrs;
}
if (pb->attrs & PARAM_ATTRIBUTE_HAS_DEFAULT) {
MonoDynamicImage *assembly;
guint32 idx, def_type, len;
char *p;
const char *p2;
if (!method_aux->param_defaults) {
method_aux->param_defaults = image_g_new0 (image, guint8*, m->signature->param_count + 1);
method_aux->param_default_types = image_g_new0 (image, guint32, m->signature->param_count + 1);
}
assembly = (MonoDynamicImage*)klass->image;
idx = encode_constant (assembly, pb->def_value, &def_type);
/* Copy the data from the blob since it might get realloc-ed */
p = assembly->blob.data + idx;
len = mono_metadata_decode_blob_size (p, &p2);
len += p2 - p;
method_aux->param_defaults [i] = image_g_malloc (image, len);
method_aux->param_default_types [i] = def_type;
memcpy ((gpointer)method_aux->param_defaults [i], p, len);
}
if (pb->name) {
method_aux->param_names [i] = mono_string_to_utf8_image (image, pb->name, &error);
g_assert (mono_error_ok (&error));
}
if (pb->cattrs) {
if (!method_aux->param_cattr)
method_aux->param_cattr = image_g_new0 (image, MonoCustomAttrInfo*, m->signature->param_count + 1);
method_aux->param_cattr [i] = mono_custom_attrs_from_builders (image, klass->image, pb->cattrs);
}
}
}
}
/* Parameter marshalling */
specs = NULL;
if (rmb->pinfo)
for (i = 0; i < mono_array_length (rmb->pinfo); ++i) {
MonoReflectionParamBuilder *pb;
if ((pb = mono_array_get (rmb->pinfo, MonoReflectionParamBuilder*, i))) {
if (pb->marshal_info) {
if (specs == NULL)
specs = image_g_new0 (image, MonoMarshalSpec*, sig->param_count + 1);
specs [pb->position] =
mono_marshal_spec_from_builder (image, klass->image->assembly, pb->marshal_info);
}
}
}
if (specs != NULL) {
if (!method_aux)
method_aux = image_g_new0 (image, MonoReflectionMethodAux, 1);
method_aux->param_marshall = specs;
}
if (klass->image->dynamic && method_aux)
g_hash_table_insert (((MonoDynamicImage*)klass->image)->method_aux_hash, m, method_aux);
mono_loader_unlock ();
return m;
}
static MonoMethod*
ctorbuilder_to_mono_method (MonoClass *klass, MonoReflectionCtorBuilder* mb)
{
ReflectionMethodBuilder rmb;
MonoMethodSignature *sig;
mono_loader_lock ();
sig = ctor_builder_to_signature (klass->image, mb);
mono_loader_unlock ();
reflection_methodbuilder_from_ctor_builder (&rmb, mb);
mb->mhandle = reflection_methodbuilder_to_mono_method (klass, &rmb, sig);
mono_save_custom_attrs (klass->image, mb->mhandle, mb->cattrs);
/* If we are in a generic class, we might be called multiple times from inflate_method */
if (!((MonoDynamicImage*)(MonoDynamicImage*)klass->image)->save && !klass->generic_container) {
/* ilgen is no longer needed */
mb->ilgen = NULL;
}
return mb->mhandle;
}
static MonoMethod*
methodbuilder_to_mono_method (MonoClass *klass, MonoReflectionMethodBuilder* mb)
{
ReflectionMethodBuilder rmb;
MonoMethodSignature *sig;
mono_loader_lock ();
sig = method_builder_to_signature (klass->image, mb);
mono_loader_unlock ();
reflection_methodbuilder_from_method_builder (&rmb, mb);
mb->mhandle = reflection_methodbuilder_to_mono_method (klass, &rmb, sig);
mono_save_custom_attrs (klass->image, mb->mhandle, mb->cattrs);
/* If we are in a generic class, we might be called multiple times from inflate_method */
if (!((MonoDynamicImage*)(MonoDynamicImage*)klass->image)->save && !klass->generic_container) {
/* ilgen is no longer needed */
mb->ilgen = NULL;
}
return mb->mhandle;
}
static MonoClassField*
fieldbuilder_to_mono_class_field (MonoClass *klass, MonoReflectionFieldBuilder* fb)
{
MonoClassField *field;
MonoType *custom;
field = g_new0 (MonoClassField, 1);
field->name = mono_string_to_utf8 (fb->name);
if (fb->attrs || fb->modreq || fb->modopt) {
field->type = mono_metadata_type_dup (NULL, mono_reflection_type_get_handle ((MonoReflectionType*)fb->type));
field->type->attrs = fb->attrs;
g_assert (klass->image->dynamic);
custom = add_custom_modifiers ((MonoDynamicImage*)klass->image, field->type, fb->modreq, fb->modopt);
g_free (field->type);
field->type = custom;
} else {
field->type = mono_reflection_type_get_handle ((MonoReflectionType*)fb->type);
}
if (fb->offset != -1)
field->offset = fb->offset;
field->parent = klass;
mono_save_custom_attrs (klass->image, field, fb->cattrs);
// FIXME: Can't store fb->def_value/RVA, is it needed for field_on_insts ?
return field;
}
#endif
MonoType*
mono_reflection_bind_generic_parameters (MonoReflectionType *type, int type_argc, MonoType **types)
{
MonoClass *klass;
MonoReflectionTypeBuilder *tb = NULL;
gboolean is_dynamic = FALSE;
MonoDomain *domain;
MonoClass *geninst;
mono_loader_lock ();
domain = mono_object_domain (type);
if (!strcmp (((MonoObject *) type)->vtable->klass->name, "TypeBuilder")) {
tb = (MonoReflectionTypeBuilder *) type;
is_dynamic = TRUE;
} else if (!strcmp (((MonoObject *) type)->vtable->klass->name, "MonoGenericClass")) {
MonoReflectionGenericClass *rgi = (MonoReflectionGenericClass *) type;
tb = rgi->generic_type;
is_dynamic = TRUE;
}
/* FIXME: fix the CreateGenericParameters protocol to avoid the two stage setup of TypeBuilders */
if (tb && tb->generic_container)
mono_reflection_create_generic_class (tb);
klass = mono_class_from_mono_type (mono_reflection_type_get_handle (type));
if (!klass->generic_container) {
mono_loader_unlock ();
return NULL;
}
if (klass->wastypebuilder) {
tb = (MonoReflectionTypeBuilder *) klass->reflection_info;
is_dynamic = TRUE;
}
mono_loader_unlock ();
geninst = mono_class_bind_generic_parameters (klass, type_argc, types, is_dynamic);
return &geninst->byval_arg;
}
MonoClass*
mono_class_bind_generic_parameters (MonoClass *klass, int type_argc, MonoType **types, gboolean is_dynamic)
{
MonoGenericClass *gclass;
MonoGenericInst *inst;
g_assert (klass->generic_container);
inst = mono_metadata_get_generic_inst (type_argc, types);
gclass = mono_metadata_lookup_generic_class (klass, inst, is_dynamic);
return mono_generic_class_get_class (gclass);
}
MonoReflectionMethod*
mono_reflection_bind_generic_method_parameters (MonoReflectionMethod *rmethod, MonoArray *types)
{
MonoClass *klass;
MonoMethod *method, *inflated;
MonoMethodInflated *imethod;
MonoGenericContext tmp_context;
MonoGenericInst *ginst;
MonoType **type_argv;
int count, i;
MONO_ARCH_SAVE_REGS;
if (!strcmp (rmethod->object.vtable->klass->name, "MethodBuilder")) {
#ifndef DISABLE_REFLECTION_EMIT
MonoReflectionMethodBuilder *mb = NULL;
MonoReflectionTypeBuilder *tb;
MonoClass *klass;
mb = (MonoReflectionMethodBuilder *) rmethod;
tb = (MonoReflectionTypeBuilder *) mb->type;
klass = mono_class_from_mono_type (mono_reflection_type_get_handle ((MonoReflectionType*)tb));
method = methodbuilder_to_mono_method (klass, mb);
#else
g_assert_not_reached ();
method = NULL;
#endif
} else {
method = rmethod->method;
}
klass = method->klass;
if (method->is_inflated)
method = ((MonoMethodInflated *) method)->declaring;
count = mono_method_signature (method)->generic_param_count;
if (count != mono_array_length (types))
return NULL;
type_argv = g_new0 (MonoType *, count);
for (i = 0; i < count; i++) {
MonoReflectionType *garg = mono_array_get (types, gpointer, i);
type_argv [i] = mono_reflection_type_get_handle (garg);
}
ginst = mono_metadata_get_generic_inst (count, type_argv);
g_free (type_argv);
tmp_context.class_inst = klass->generic_class ? klass->generic_class->context.class_inst : NULL;
tmp_context.method_inst = ginst;
inflated = mono_class_inflate_generic_method (method, &tmp_context);
imethod = (MonoMethodInflated *) inflated;
if (method->klass->image->dynamic) {
MonoDynamicImage *image = (MonoDynamicImage*)method->klass->image;
/*
* This table maps metadata structures representing inflated methods/fields
* to the reflection objects representing their generic definitions.
*/
mono_loader_lock ();
mono_g_hash_table_insert (image->generic_def_objects, imethod, rmethod);
mono_loader_unlock ();
}
if (!mono_verifier_is_method_valid_generic_instantiation (inflated))
mono_raise_exception (mono_get_exception_argument ("typeArguments", "Invalid generic arguments"));
return mono_method_get_object (mono_object_domain (rmethod), inflated, NULL);
}
#ifndef DISABLE_REFLECTION_EMIT
static MonoMethod *
inflate_mono_method (MonoClass *klass, MonoMethod *method, MonoObject *obj)
{
MonoMethodInflated *imethod;
MonoGenericContext *context;
int i;
/*
* With generic code sharing the klass might not be inflated.
* This can happen because classes inflated with their own
* type arguments are "normalized" to the uninflated class.
*/
if (!klass->generic_class)
return method;
context = mono_class_get_context (klass);
if (klass->method.count) {
/* Find the already created inflated method */
for (i = 0; i < klass->method.count; ++i) {
g_assert (klass->methods [i]->is_inflated);
if (((MonoMethodInflated*)klass->methods [i])->declaring == method)
break;
}
g_assert (i < klass->method.count);
imethod = (MonoMethodInflated*)klass->methods [i];
} else {
imethod = (MonoMethodInflated *) mono_class_inflate_generic_method_full (method, klass, context);
}
if (method->is_generic && method->klass->image->dynamic) {
MonoDynamicImage *image = (MonoDynamicImage*)method->klass->image;
mono_loader_lock ();
mono_g_hash_table_insert (image->generic_def_objects, imethod, obj);
mono_loader_unlock ();
}
return (MonoMethod *) imethod;
}
static MonoMethod *
inflate_method (MonoReflectionGenericClass *type, MonoObject *obj)
{
MonoMethod *method;
MonoClass *gklass;
gklass = mono_class_from_mono_type (mono_reflection_type_get_handle ((MonoReflectionType*)type->generic_type));
if (!strcmp (obj->vtable->klass->name, "MethodBuilder"))
if (((MonoReflectionMethodBuilder*)obj)->mhandle)
method = ((MonoReflectionMethodBuilder*)obj)->mhandle;
else
method = methodbuilder_to_mono_method (gklass, (MonoReflectionMethodBuilder *) obj);
else if (!strcmp (obj->vtable->klass->name, "ConstructorBuilder"))
method = ctorbuilder_to_mono_method (gklass, (MonoReflectionCtorBuilder *) obj);
else if (!strcmp (obj->vtable->klass->name, "MonoMethod") || !strcmp (obj->vtable->klass->name, "MonoCMethod"))
method = ((MonoReflectionMethod *) obj)->method;
else {
method = NULL; /* prevent compiler warning */
g_error ("can't handle type %s", obj->vtable->klass->name);
}
return inflate_mono_method (mono_class_from_mono_type (mono_reflection_type_get_handle ((MonoReflectionType*)type)), method, obj);
}
/*TODO avoid saving custom attrs for generic classes as it's enough to have them on the generic type definition.*/
void
mono_reflection_generic_class_initialize (MonoReflectionGenericClass *type, MonoArray *methods,
MonoArray *ctors, MonoArray *fields, MonoArray *properties,
MonoArray *events)
{
MonoGenericClass *gclass;
MonoDynamicGenericClass *dgclass;
MonoClass *klass, *gklass;
MonoType *gtype;
int i;
MONO_ARCH_SAVE_REGS;
gtype = mono_reflection_type_get_handle ((MonoReflectionType*)type);
klass = mono_class_from_mono_type (gtype);
g_assert (gtype->type == MONO_TYPE_GENERICINST);
gclass = gtype->data.generic_class;
g_assert (gclass->is_dynamic);
dgclass = (MonoDynamicGenericClass *) gclass;
if (dgclass->initialized)
return;
gklass = gclass->container_class;
mono_class_init (gklass);
dgclass->count_methods = methods ? mono_array_length (methods) : 0;
dgclass->count_ctors = ctors ? mono_array_length (ctors) : 0;
dgclass->count_fields = fields ? mono_array_length (fields) : 0;
dgclass->count_properties = properties ? mono_array_length (properties) : 0;
dgclass->count_events = events ? mono_array_length (events) : 0;
dgclass->methods = g_new0 (MonoMethod *, dgclass->count_methods);
dgclass->ctors = g_new0 (MonoMethod *, dgclass->count_ctors);
dgclass->fields = g_new0 (MonoClassField, dgclass->count_fields);
dgclass->properties = g_new0 (MonoProperty, dgclass->count_properties);
dgclass->events = g_new0 (MonoEvent, dgclass->count_events);
dgclass->field_objects = g_new0 (MonoObject*, dgclass->count_fields);
dgclass->field_generic_types = g_new0 (MonoType*, dgclass->count_fields);
for (i = 0; i < dgclass->count_methods; i++) {
MonoObject *obj = mono_array_get (methods, gpointer, i);
dgclass->methods [i] = inflate_method (type, obj);
}
for (i = 0; i < dgclass->count_ctors; i++) {
MonoObject *obj = mono_array_get (ctors, gpointer, i);
dgclass->ctors [i] = inflate_method (type, obj);
}
for (i = 0; i < dgclass->count_fields; i++) {
MonoObject *obj = mono_array_get (fields, gpointer, i);
MonoClassField *field, *inflated_field = NULL;
if (!strcmp (obj->vtable->klass->name, "FieldBuilder"))
inflated_field = field = fieldbuilder_to_mono_class_field (klass, (MonoReflectionFieldBuilder *) obj);
else if (!strcmp (obj->vtable->klass->name, "MonoField"))
field = ((MonoReflectionField *) obj)->field;
else {
field = NULL; /* prevent compiler warning */
g_assert_not_reached ();
}
dgclass->fields [i] = *field;
dgclass->fields [i].parent = klass;
dgclass->fields [i].type = mono_class_inflate_generic_type (
field->type, mono_generic_class_get_context ((MonoGenericClass *) dgclass));
dgclass->field_generic_types [i] = field->type;
MOVING_GC_REGISTER (&dgclass->field_objects [i]);
dgclass->field_objects [i] = obj;
if (inflated_field) {
g_free (inflated_field);
} else {
dgclass->fields [i].name = g_strdup (dgclass->fields [i].name);
}
}
for (i = 0; i < dgclass->count_properties; i++) {
MonoObject *obj = mono_array_get (properties, gpointer, i);
MonoProperty *property = &dgclass->properties [i];
if (!strcmp (obj->vtable->klass->name, "PropertyBuilder")) {
MonoReflectionPropertyBuilder *pb = (MonoReflectionPropertyBuilder *) obj;
property->parent = klass;
property->attrs = pb->attrs;
property->name = mono_string_to_utf8 (pb->name);
if (pb->get_method)
property->get = inflate_method (type, (MonoObject *) pb->get_method);
if (pb->set_method)
property->set = inflate_method (type, (MonoObject *) pb->set_method);
} else if (!strcmp (obj->vtable->klass->name, "MonoProperty")) {
*property = *((MonoReflectionProperty *) obj)->property;
property->name = g_strdup (property->name);
if (property->get)
property->get = inflate_mono_method (klass, property->get, NULL);
if (property->set)
property->set = inflate_mono_method (klass, property->set, NULL);
} else
g_assert_not_reached ();
}
for (i = 0; i < dgclass->count_events; i++) {
MonoObject *obj = mono_array_get (events, gpointer, i);
MonoEvent *event = &dgclass->events [i];
if (!strcmp (obj->vtable->klass->name, "EventBuilder")) {
MonoReflectionEventBuilder *eb = (MonoReflectionEventBuilder *) obj;
event->parent = klass;
event->attrs = eb->attrs;
event->name = mono_string_to_utf8 (eb->name);
if (eb->add_method)
event->add = inflate_method (type, (MonoObject *) eb->add_method);
if (eb->remove_method)
event->remove = inflate_method (type, (MonoObject *) eb->remove_method);
} else if (!strcmp (obj->vtable->klass->name, "MonoEvent")) {
*event = *((MonoReflectionMonoEvent *) obj)->event;
event->name = g_strdup (event->name);
if (event->add)
event->add = inflate_mono_method (klass, event->add, NULL);
if (event->remove)
event->remove = inflate_mono_method (klass, event->remove, NULL);
} else
g_assert_not_reached ();
}
dgclass->initialized = TRUE;
}
static void
ensure_generic_class_runtime_vtable (MonoClass *klass)
{
MonoClass *gklass = klass->generic_class->container_class;
int i;
if (klass->wastypebuilder)
return;
ensure_runtime_vtable (gklass);
klass->method.count = gklass->method.count;
klass->methods = mono_image_alloc (klass->image, sizeof (MonoMethod*) * (klass->method.count + 1));
for (i = 0; i < klass->method.count; i++) {
klass->methods [i] = mono_class_inflate_generic_method_full (
gklass->methods [i], klass, mono_class_get_context (klass));
}
klass->interface_count = gklass->interface_count;
klass->interfaces = mono_image_alloc (klass->image, sizeof (MonoClass*) * klass->interface_count);
for (i = 0; i < klass->interface_count; ++i) {
MonoType *iface_type = mono_class_inflate_generic_type (&gklass->interfaces [i]->byval_arg, mono_class_get_context (klass));
klass->interfaces [i] = mono_class_from_mono_type (iface_type);
mono_metadata_free_type (iface_type);
ensure_runtime_vtable (klass->interfaces [i]);
}
klass->interfaces_inited = 1;
/*We can only finish with this klass once it's parent has as well*/
if (gklass->wastypebuilder)
klass->wastypebuilder = TRUE;
return;
}
static void
ensure_runtime_vtable (MonoClass *klass)
{
MonoReflectionTypeBuilder *tb = klass->reflection_info;
int i, num, j;
if (!klass->image->dynamic || (!tb && !klass->generic_class) || klass->wastypebuilder)
return;
if (klass->parent)
ensure_runtime_vtable (klass->parent);
if (tb) {
num = tb->ctors? mono_array_length (tb->ctors): 0;
num += tb->num_methods;
klass->method.count = num;
klass->methods = mono_image_alloc (klass->image, sizeof (MonoMethod*) * num);
num = tb->ctors? mono_array_length (tb->ctors): 0;
for (i = 0; i < num; ++i)
klass->methods [i] = ctorbuilder_to_mono_method (klass, mono_array_get (tb->ctors, MonoReflectionCtorBuilder*, i));
num = tb->num_methods;
j = i;
for (i = 0; i < num; ++i)
klass->methods [j++] = methodbuilder_to_mono_method (klass, mono_array_get (tb->methods, MonoReflectionMethodBuilder*, i));
if (tb->interfaces) {
klass->interface_count = mono_array_length (tb->interfaces);
klass->interfaces = mono_image_alloc (klass->image, sizeof (MonoClass*) * klass->interface_count);
for (i = 0; i < klass->interface_count; ++i) {
MonoType *iface = mono_type_array_get_and_resolve (tb->interfaces, i);
klass->interfaces [i] = mono_class_from_mono_type (iface);
ensure_runtime_vtable (klass->interfaces [i]);
}
klass->interfaces_inited = 1;
}
} else if (klass->generic_class){
ensure_generic_class_runtime_vtable (klass);
}
if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
for (i = 0; i < klass->method.count; ++i)
klass->methods [i]->slot = i;
mono_class_setup_interface_offsets (klass);
mono_class_setup_interface_id (klass);
}
/*
* The generic vtable is needed even if image->run is not set since some
* runtime code like ves_icall_Type_GetMethodsByName depends on
* method->slot being defined.
*/
/*
* tb->methods could not be freed since it is used for determining
* overrides during dynamic vtable construction.
*/
}
static MonoMethod*
mono_reflection_method_get_handle (MonoObject *method)
{
MonoClass *class = mono_object_class (method);
if (is_sr_mono_method (class) || is_sr_mono_generic_method (class)) {
MonoReflectionMethod *sr_method = (MonoReflectionMethod*)method;
return sr_method->method;
}
if (is_sre_method_builder (class)) {
MonoReflectionMethodBuilder *mb = (MonoReflectionMethodBuilder*)method;
return mb->mhandle;
}
if (is_sre_method_on_tb_inst (class)) {
MonoReflectionMethodOnTypeBuilderInst *m = (MonoReflectionMethodOnTypeBuilderInst*)method;
MonoMethod *result;
/*FIXME move this to a proper method and unify with resolve_object*/
if (m->method_args) {
result = mono_reflection_method_on_tb_inst_get_handle (m);
} else {
MonoType *type = mono_reflection_type_get_handle ((MonoReflectionType*)m->inst);
MonoClass *inflated_klass = mono_class_from_mono_type (type);
MonoMethod *mono_method;
if (is_sre_method_builder (mono_object_class (m->mb)))
mono_method = ((MonoReflectionMethodBuilder *)m->mb)->mhandle;
else if (is_sr_mono_method (mono_object_class (m->mb)))
mono_method = ((MonoReflectionMethod *)m->mb)->method;
else
g_error ("resolve_object:: can't handle a MTBI with base_method of type %s", mono_type_get_full_name (mono_object_class (m->mb)));
result = inflate_mono_method (inflated_klass, mono_method, (MonoObject*)m->mb);
}
return result;
}
g_error ("Can't handle methods of type %s:%s", class->name_space, class->name);
return NULL;
}
void
mono_reflection_get_dynamic_overrides (MonoClass *klass, MonoMethod ***overrides, int *num_overrides)
{
MonoReflectionTypeBuilder *tb;
int i, onum;
*overrides = NULL;
*num_overrides = 0;
g_assert (klass->image->dynamic);
if (!klass->reflection_info)
return;
g_assert (strcmp (((MonoObject*)klass->reflection_info)->vtable->klass->name, "TypeBuilder") == 0);
tb = (MonoReflectionTypeBuilder*)klass->reflection_info;
onum = 0;
if (tb->methods) {
for (i = 0; i < tb->num_methods; ++i) {
MonoReflectionMethodBuilder *mb =
mono_array_get (tb->methods, MonoReflectionMethodBuilder*, i);
if (mb->override_method)
onum ++;
}
}
if (onum) {
*overrides = g_new0 (MonoMethod*, onum * 2);
onum = 0;
for (i = 0; i < tb->num_methods; ++i) {
MonoReflectionMethodBuilder *mb =
mono_array_get (tb->methods, MonoReflectionMethodBuilder*, i);
if (mb->override_method) {
(*overrides) [onum * 2] = mono_reflection_method_get_handle ((MonoObject *)mb->override_method);
(*overrides) [onum * 2 + 1] = mb->mhandle;
g_assert (mb->mhandle);
onum ++;
}
}
}
*num_overrides = onum;
}
static void
typebuilder_setup_fields (MonoClass *klass, MonoError *error)
{
MonoReflectionTypeBuilder *tb = klass->reflection_info;
MonoReflectionFieldBuilder *fb;
MonoClassField *field;
MonoImage *image = klass->image;
const char *p, *p2;
int i;
guint32 len, idx, real_size = 0;
klass->field.count = tb->num_fields;
klass->field.first = 0;
mono_error_init (error);
if (tb->class_size) {
g_assert ((tb->packing_size & 0xfffffff0) == 0);
klass->packing_size = tb->packing_size;
real_size = klass->instance_size + tb->class_size;
}
if (!klass->field.count) {
klass->instance_size = MAX (klass->instance_size, real_size);
return;
}
klass->fields = image_g_new0 (image, MonoClassField, klass->field.count);
mono_class_alloc_ext (klass);
klass->ext->field_def_values = image_g_new0 (image, MonoFieldDefaultValue, klass->field.count);
/*
This is, guess what, a hack.
The issue is that the runtime doesn't know how to setup the fields of a typebuider and crash.
On the static path no field class is resolved, only types are built. This is the right thing to do
but we suck.
Setting size_inited is harmless because we're doing the same job as mono_class_setup_fields anyway.
*/
klass->size_inited = 1;
for (i = 0; i < klass->field.count; ++i) {
fb = mono_array_get (tb->fields, gpointer, i);
field = &klass->fields [i];
field->name = mono_string_to_utf8_image (image, fb->name, error);
if (!mono_error_ok (error))
return;
if (fb->attrs) {
field->type = mono_metadata_type_dup (klass->image, mono_reflection_type_get_handle ((MonoReflectionType*)fb->type));
field->type->attrs = fb->attrs;
} else {
field->type = mono_reflection_type_get_handle ((MonoReflectionType*)fb->type);
}
if ((fb->attrs & FIELD_ATTRIBUTE_HAS_FIELD_RVA) && fb->rva_data)
klass->ext->field_def_values [i].data = mono_array_addr (fb->rva_data, char, 0);
if (fb->offset != -1)
field->offset = fb->offset;
field->parent = klass;
fb->handle = field;
mono_save_custom_attrs (klass->image, field, fb->cattrs);
if (fb->def_value) {
MonoDynamicImage *assembly = (MonoDynamicImage*)klass->image;
field->type->attrs |= FIELD_ATTRIBUTE_HAS_DEFAULT;
idx = encode_constant (assembly, fb->def_value, &klass->ext->field_def_values [i].def_type);
/* Copy the data from the blob since it might get realloc-ed */
p = assembly->blob.data + idx;
len = mono_metadata_decode_blob_size (p, &p2);
len += p2 - p;
klass->ext->field_def_values [i].data = mono_image_alloc (image, len);
memcpy ((gpointer)klass->ext->field_def_values [i].data, p, len);
}
}
klass->instance_size = MAX (klass->instance_size, real_size);
mono_class_layout_fields (klass);
}
static void
typebuilder_setup_properties (MonoClass *klass, MonoError *error)
{
MonoReflectionTypeBuilder *tb = klass->reflection_info;
MonoReflectionPropertyBuilder *pb;
MonoImage *image = klass->image;
MonoProperty *properties;
int i;
mono_error_init (error);
if (!klass->ext)
klass->ext = image_g_new0 (image, MonoClassExt, 1);
klass->ext->property.count = tb->properties ? mono_array_length (tb->properties) : 0;
klass->ext->property.first = 0;
properties = image_g_new0 (image, MonoProperty, klass->ext->property.count);
klass->ext->properties = properties;
for (i = 0; i < klass->ext->property.count; ++i) {
pb = mono_array_get (tb->properties, MonoReflectionPropertyBuilder*, i);
properties [i].parent = klass;
properties [i].attrs = pb->attrs;
properties [i].name = mono_string_to_utf8_image (image, pb->name, error);
if (!mono_error_ok (error))
return;
if (pb->get_method)
properties [i].get = pb->get_method->mhandle;
if (pb->set_method)
properties [i].set = pb->set_method->mhandle;
mono_save_custom_attrs (klass->image, &properties [i], pb->cattrs);
}
}
MonoReflectionEvent *
mono_reflection_event_builder_get_event_info (MonoReflectionTypeBuilder *tb, MonoReflectionEventBuilder *eb)
{
MonoEvent *event = g_new0 (MonoEvent, 1);
MonoClass *klass;
int j;
klass = mono_class_from_mono_type (mono_reflection_type_get_handle ((MonoReflectionType*)tb));
event->parent = klass;
event->attrs = eb->attrs;
event->name = mono_string_to_utf8 (eb->name);
if (eb->add_method)
event->add = eb->add_method->mhandle;
if (eb->remove_method)
event->remove = eb->remove_method->mhandle;
if (eb->raise_method)
event->raise = eb->raise_method->mhandle;
if (eb->other_methods) {
event->other = g_new0 (MonoMethod*, mono_array_length (eb->other_methods) + 1);
for (j = 0; j < mono_array_length (eb->other_methods); ++j) {
MonoReflectionMethodBuilder *mb =
mono_array_get (eb->other_methods,
MonoReflectionMethodBuilder*, j);
event->other [j] = mb->mhandle;
}
}
return mono_event_get_object (mono_object_domain (tb), klass, event);
}
static void
typebuilder_setup_events (MonoClass *klass, MonoError *error)
{
MonoReflectionTypeBuilder *tb = klass->reflection_info;
MonoReflectionEventBuilder *eb;
MonoImage *image = klass->image;
MonoEvent *events;
int i, j;
mono_error_init (error);
if (!klass->ext)
klass->ext = image_g_new0 (image, MonoClassExt, 1);
klass->ext->event.count = tb->events ? mono_array_length (tb->events) : 0;
klass->ext->event.first = 0;
events = image_g_new0 (image, MonoEvent, klass->ext->event.count);
klass->ext->events = events;
for (i = 0; i < klass->ext->event.count; ++i) {
eb = mono_array_get (tb->events, MonoReflectionEventBuilder*, i);
events [i].parent = klass;
events [i].attrs = eb->attrs;
events [i].name = mono_string_to_utf8_image (image, eb->name, error);
if (!mono_error_ok (error))
return;
if (eb->add_method)
events [i].add = eb->add_method->mhandle;
if (eb->remove_method)
events [i].remove = eb->remove_method->mhandle;
if (eb->raise_method)
events [i].raise = eb->raise_method->mhandle;
if (eb->other_methods) {
events [i].other = image_g_new0 (image, MonoMethod*, mono_array_length (eb->other_methods) + 1);
for (j = 0; j < mono_array_length (eb->other_methods); ++j) {
MonoReflectionMethodBuilder *mb =
mono_array_get (eb->other_methods,
MonoReflectionMethodBuilder*, j);
events [i].other [j] = mb->mhandle;
}
}
mono_save_custom_attrs (klass->image, &events [i], eb->cattrs);
}
}
static gboolean
remove_instantiations_of (gpointer key,
gpointer value,
gpointer user_data)
{
MonoType *type = (MonoType*)key;
MonoClass *klass = (MonoClass*)user_data;
if ((type->type == MONO_TYPE_GENERICINST) && (type->data.generic_class->container_class == klass))
return TRUE;
else
return FALSE;
}
static void
check_array_for_usertypes (MonoArray *arr)
{
int i;
if (!arr)
return;
for (i = 0; i < mono_array_length (arr); ++i)
RESOLVE_ARRAY_TYPE_ELEMENT (arr, i);
}
MonoReflectionType*
mono_reflection_create_runtime_class (MonoReflectionTypeBuilder *tb)
{
MonoError error;
MonoClass *klass;
MonoDomain* domain;
MonoReflectionType* res;
int i, j;
MONO_ARCH_SAVE_REGS;
domain = mono_object_domain (tb);
klass = mono_class_from_mono_type (tb->type.type);
/*
* Check for user defined Type subclasses.
*/
RESOLVE_TYPE (tb->parent);
check_array_for_usertypes (tb->interfaces);
if (tb->fields) {
for (i = 0; i < mono_array_length (tb->fields); ++i) {
MonoReflectionFieldBuilder *fb = mono_array_get (tb->fields, gpointer, i);
if (fb) {
RESOLVE_TYPE (fb->type);
check_array_for_usertypes (fb->modreq);
check_array_for_usertypes (fb->modopt);
if (fb->marshal_info && fb->marshal_info->marshaltyperef)
RESOLVE_TYPE (fb->marshal_info->marshaltyperef);
}
}
}
if (tb->methods) {
for (i = 0; i < mono_array_length (tb->methods); ++i) {
MonoReflectionMethodBuilder *mb = mono_array_get (tb->methods, gpointer, i);
if (mb) {
RESOLVE_TYPE (mb->rtype);
check_array_for_usertypes (mb->return_modreq);
check_array_for_usertypes (mb->return_modopt);
check_array_for_usertypes (mb->parameters);
if (mb->param_modreq)
for (j = 0; j < mono_array_length (mb->param_modreq); ++j)
check_array_for_usertypes (mono_array_get (mb->param_modreq, MonoArray*, j));
if (mb->param_modopt)
for (j = 0; j < mono_array_length (mb->param_modopt); ++j)
check_array_for_usertypes (mono_array_get (mb->param_modopt, MonoArray*, j));
}
}
}
if (tb->ctors) {
for (i = 0; i < mono_array_length (tb->ctors); ++i) {
MonoReflectionCtorBuilder *mb = mono_array_get (tb->ctors, gpointer, i);
if (mb) {
check_array_for_usertypes (mb->parameters);
if (mb->param_modreq)
for (j = 0; j < mono_array_length (mb->param_modreq); ++j)
check_array_for_usertypes (mono_array_get (mb->param_modreq, MonoArray*, j));
if (mb->param_modopt)
for (j = 0; j < mono_array_length (mb->param_modopt); ++j)
check_array_for_usertypes (mono_array_get (mb->param_modopt, MonoArray*, j));
}
}
}
mono_save_custom_attrs (klass->image, klass, tb->cattrs);
/*
* we need to lock the domain because the lock will be taken inside
* So, we need to keep the locking order correct.
*/
mono_loader_lock ();
mono_domain_lock (domain);
if (klass->wastypebuilder) {
mono_domain_unlock (domain);
mono_loader_unlock ();
return mono_type_get_object (mono_object_domain (tb), &klass->byval_arg);
}
/*
* Fields to set in klass:
* the various flags: delegate/unicode/contextbound etc.
*/
klass->flags = tb->attrs;
klass->has_cctor = 1;
klass->has_finalize = 1;
#if 0
if (!((MonoDynamicImage*)klass->image)->run) {
if (klass->generic_container) {
/* FIXME: The code below can't handle generic classes */
klass->wastypebuilder = TRUE;
mono_loader_unlock ();
mono_domain_unlock (domain);
return mono_type_get_object (mono_object_domain (tb), &klass->byval_arg);
}
}
#endif
/* enums are done right away */
if (!klass->enumtype)
ensure_runtime_vtable (klass);
if (tb->subtypes) {
for (i = 0; i < mono_array_length (tb->subtypes); ++i) {
MonoReflectionTypeBuilder *subtb = mono_array_get (tb->subtypes, MonoReflectionTypeBuilder*, i);
mono_class_alloc_ext (klass);
klass->ext->nested_classes = g_list_prepend_image (klass->image, klass->ext->nested_classes, mono_class_from_mono_type (mono_reflection_type_get_handle ((MonoReflectionType*)subtb)));
}
}
klass->nested_classes_inited = TRUE;
/* fields and object layout */
if (klass->parent) {
if (!klass->parent->size_inited)
mono_class_init (klass->parent);
klass->instance_size = klass->parent->instance_size;
klass->sizes.class_size = 0;
klass->min_align = klass->parent->min_align;
/* if the type has no fields we won't call the field_setup
* routine which sets up klass->has_references.
*/
klass->has_references |= klass->parent->has_references;
} else {
klass->instance_size = sizeof (MonoObject);
klass->min_align = 1;
}
/* FIXME: handle packing_size and instance_size */
typebuilder_setup_fields (klass, &error);
if (!mono_error_ok (&error))
goto failure;
typebuilder_setup_properties (klass, &error);
if (!mono_error_ok (&error))
goto failure;
typebuilder_setup_events (klass, &error);
if (!mono_error_ok (&error))
goto failure;
klass->wastypebuilder = TRUE;
/*
* If we are a generic TypeBuilder, there might be instantiations in the type cache
* which have type System.Reflection.MonoGenericClass, but after the type is created,
* we want to return normal System.MonoType objects, so clear these out from the cache.
*/
if (domain->type_hash && klass->generic_container)
mono_g_hash_table_foreach_remove (domain->type_hash, remove_instantiations_of, klass);
mono_domain_unlock (domain);
mono_loader_unlock ();
if (klass->enumtype && !mono_class_is_valid_enum (klass)) {
mono_class_set_failure (klass, MONO_EXCEPTION_TYPE_LOAD, NULL);
mono_raise_exception (mono_get_exception_type_load (tb->name, NULL));
}
res = mono_type_get_object (mono_object_domain (tb), &klass->byval_arg);
g_assert (res != (MonoReflectionType*)tb);
return res;
failure:
mono_class_set_failure (klass, MONO_EXCEPTION_TYPE_LOAD, NULL);
klass->wastypebuilder = TRUE;
mono_domain_unlock (domain);
mono_loader_unlock ();
mono_error_raise_exception (&error);
return NULL;
}
void
mono_reflection_initialize_generic_parameter (MonoReflectionGenericParam *gparam)
{
MonoGenericParamFull *param;
MonoImage *image;
MonoClass *pklass;
MONO_ARCH_SAVE_REGS;
param = g_new0 (MonoGenericParamFull, 1);
if (gparam->mbuilder) {
if (!gparam->mbuilder->generic_container) {
MonoReflectionTypeBuilder *tb = (MonoReflectionTypeBuilder *)gparam->mbuilder->type;
MonoClass *klass = mono_class_from_mono_type (mono_reflection_type_get_handle ((MonoReflectionType*)tb));
gparam->mbuilder->generic_container = mono_image_alloc0 (klass->image, sizeof (MonoGenericContainer));
gparam->mbuilder->generic_container->is_method = TRUE;
/*
* Cannot set owner.method, since the MonoMethod is not created yet.
* Set the image field instead, so type_in_image () works.
*/
gparam->mbuilder->generic_container->image = klass->image;
}
param->param.owner = gparam->mbuilder->generic_container;
} else if (gparam->tbuilder) {
if (!gparam->tbuilder->generic_container) {
MonoClass *klass = mono_class_from_mono_type (mono_reflection_type_get_handle ((MonoReflectionType*)gparam->tbuilder));
gparam->tbuilder->generic_container = mono_image_alloc0 (klass->image, sizeof (MonoGenericContainer));
gparam->tbuilder->generic_container->owner.klass = klass;
}
param->param.owner = gparam->tbuilder->generic_container;
}
param->info.name = mono_string_to_utf8 (gparam->name);
param->param.num = gparam->index;
image = &gparam->tbuilder->module->dynamic_image->image;
pklass = mono_class_from_generic_parameter ((MonoGenericParam *) param, image, gparam->mbuilder != NULL);
gparam->type.type = &pklass->byval_arg;
MOVING_GC_REGISTER (&pklass->reflection_info);
pklass->reflection_info = gparam; /* FIXME: GC pin gparam */
mono_image_lock (image);
image->reflection_info_unregister_classes = g_slist_prepend (image->reflection_info_unregister_classes, pklass);
mono_image_unlock (image);
}
MonoArray *
mono_reflection_sighelper_get_signature_local (MonoReflectionSigHelper *sig)
{
MonoReflectionModuleBuilder *module = sig->module;
MonoDynamicImage *assembly = module != NULL ? module->dynamic_image : NULL;
guint32 na = sig->arguments ? mono_array_length (sig->arguments) : 0;
guint32 buflen, i;
MonoArray *result;
SigBuffer buf;
check_array_for_usertypes (sig->arguments);
sigbuffer_init (&buf, 32);
sigbuffer_add_value (&buf, 0x07);
sigbuffer_add_value (&buf, na);
if (assembly != NULL){
for (i = 0; i < na; ++i) {
MonoReflectionType *type = mono_array_get (sig->arguments, MonoReflectionType*, i);
encode_reflection_type (assembly, type, &buf);
}
}
buflen = buf.p - buf.buf;
result = mono_array_new (mono_domain_get (), mono_defaults.byte_class, buflen);
memcpy (mono_array_addr (result, char, 0), buf.buf, buflen);
sigbuffer_free (&buf);
return result;
}
MonoArray *
mono_reflection_sighelper_get_signature_field (MonoReflectionSigHelper *sig)
{
MonoDynamicImage *assembly = sig->module->dynamic_image;
guint32 na = sig->arguments ? mono_array_length (sig->arguments) : 0;
guint32 buflen, i;
MonoArray *result;
SigBuffer buf;
check_array_for_usertypes (sig->arguments);
sigbuffer_init (&buf, 32);
sigbuffer_add_value (&buf, 0x06);
for (i = 0; i < na; ++i) {
MonoReflectionType *type = mono_array_get (sig->arguments, MonoReflectionType*, i);
encode_reflection_type (assembly, type, &buf);
}
buflen = buf.p - buf.buf;
result = mono_array_new (mono_domain_get (), mono_defaults.byte_class, buflen);
memcpy (mono_array_addr (result, char, 0), buf.buf, buflen);
sigbuffer_free (&buf);
return result;
}
void
mono_reflection_create_dynamic_method (MonoReflectionDynamicMethod *mb)
{
ReflectionMethodBuilder rmb;
MonoMethodSignature *sig;
MonoClass *klass;
GSList *l;
int i;
sig = dynamic_method_to_signature (mb);
reflection_methodbuilder_from_dynamic_method (&rmb, mb);
/*
* Resolve references.
*/
/*
* Every second entry in the refs array is reserved for storing handle_class,
* which is needed by the ldtoken implementation in the JIT.
*/
rmb.nrefs = mb->nrefs;
rmb.refs = g_new0 (gpointer, mb->nrefs + 1);
for (i = 0; i < mb->nrefs; i += 2) {
MonoClass *handle_class;
gpointer ref;
MonoObject *obj = mono_array_get (mb->refs, MonoObject*, i);
if (strcmp (obj->vtable->klass->name, "DynamicMethod") == 0) {
MonoReflectionDynamicMethod *method = (MonoReflectionDynamicMethod*)obj;
/*
* The referenced DynamicMethod should already be created by the managed
* code, except in the case of circular references. In that case, we store
* method in the refs array, and fix it up later when the referenced
* DynamicMethod is created.
*/
if (method->mhandle) {
ref = method->mhandle;
} else {
/* FIXME: GC object stored in unmanaged memory */
ref = method;
/* FIXME: GC object stored in unmanaged memory */
method->referenced_by = g_slist_append (method->referenced_by, mb);
}
handle_class = mono_defaults.methodhandle_class;
} else {
MonoException *ex = NULL;
ref = resolve_object (mb->module->image, obj, &handle_class, NULL);
if (!ref)
ex = mono_get_exception_type_load (NULL, NULL);
else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
ex = mono_security_core_clr_ensure_dynamic_method_resolved_object (ref, handle_class);
if (ex) {
g_free (rmb.refs);
mono_raise_exception (ex);
return;
}
}
rmb.refs [i] = ref; /* FIXME: GC object stored in unmanaged memory (change also resolve_object() signature) */
rmb.refs [i + 1] = handle_class;
}
klass = mb->owner ? mono_class_from_mono_type (mono_reflection_type_get_handle ((MonoReflectionType*)mb->owner)) : mono_defaults.object_class;
mb->mhandle = reflection_methodbuilder_to_mono_method (klass, &rmb, sig);
/* Fix up refs entries pointing at us */
for (l = mb->referenced_by; l; l = l->next) {
MonoReflectionDynamicMethod *method = (MonoReflectionDynamicMethod*)l->data;
MonoMethodWrapper *wrapper = (MonoMethodWrapper*)method->mhandle;
gpointer *data;
g_assert (method->mhandle);
data = (gpointer*)wrapper->method_data;
for (i = 0; i < GPOINTER_TO_UINT (data [0]); i += 2) {
if ((data [i + 1] == mb) && (data [i + 1 + 1] == mono_defaults.methodhandle_class))
data [i + 1] = mb->mhandle;
}
}
g_slist_free (mb->referenced_by);
g_free (rmb.refs);
/* ilgen is no longer needed */
mb->ilgen = NULL;
}
#endif /* DISABLE_REFLECTION_EMIT */
void
mono_reflection_destroy_dynamic_method (MonoReflectionDynamicMethod *mb)
{
g_assert (mb);
if (mb->mhandle)
mono_runtime_free_method (
mono_object_get_domain ((MonoObject*)mb), mb->mhandle);
}
/**
*
* mono_reflection_is_valid_dynamic_token:
*
* Returns TRUE if token is valid.
*
*/
gboolean
mono_reflection_is_valid_dynamic_token (MonoDynamicImage *image, guint32 token)
{
return mono_g_hash_table_lookup (image->tokens, GUINT_TO_POINTER (token)) != NULL;
}
#ifndef DISABLE_REFLECTION_EMIT
/**
* mono_reflection_lookup_dynamic_token:
*
* Finish the Builder object pointed to by TOKEN and return the corresponding
* runtime structure. If HANDLE_CLASS is not NULL, it is set to the class required by
* mono_ldtoken. If valid_token is TRUE, assert if it is not found in the token->object
* mapping table.
*
* LOCKING: Take the loader lock
*/
gpointer
mono_reflection_lookup_dynamic_token (MonoImage *image, guint32 token, gboolean valid_token, MonoClass **handle_class, MonoGenericContext *context)
{
MonoDynamicImage *assembly = (MonoDynamicImage*)image;
MonoObject *obj;
MonoClass *klass;
mono_loader_lock ();
obj = mono_g_hash_table_lookup (assembly->tokens, GUINT_TO_POINTER (token));
mono_loader_unlock ();
if (!obj) {
if (valid_token)
g_error ("Could not find required dynamic token 0x%08x", token);
else
return NULL;
}
if (!handle_class)
handle_class = &klass;
return resolve_object (image, obj, handle_class, context);
}
/*
* ensure_complete_type:
*
* Ensure that KLASS is completed if it is a dynamic type, or references
* dynamic types.
*/
static void
ensure_complete_type (MonoClass *klass)
{
if (klass->image->dynamic && !klass->wastypebuilder) {
MonoReflectionTypeBuilder *tb = klass->reflection_info;
mono_domain_try_type_resolve (mono_domain_get (), NULL, (MonoObject*)tb);
// Asserting here could break a lot of code
//g_assert (klass->wastypebuilder);
}
if (klass->generic_class) {
MonoGenericInst *inst = klass->generic_class->context.class_inst;
int i;
for (i = 0; i < inst->type_argc; ++i) {
ensure_complete_type (mono_class_from_mono_type (inst->type_argv [i]));
}
}
}
static gpointer
resolve_object (MonoImage *image, MonoObject *obj, MonoClass **handle_class, MonoGenericContext *context)
{
gpointer result = NULL;
if (strcmp (obj->vtable->klass->name, "String") == 0) {
result = mono_string_intern ((MonoString*)obj);
*handle_class = mono_defaults.string_class;
g_assert (result);
} else if (strcmp (obj->vtable->klass->name, "MonoType") == 0) {
MonoType *type = mono_reflection_type_get_handle ((MonoReflectionType*)obj);
if (context) {
MonoType *inflated = mono_class_inflate_generic_type (type, context);
result = mono_class_from_mono_type (inflated);
mono_metadata_free_type (inflated);
} else {
result = mono_class_from_mono_type (type);
}
*handle_class = mono_defaults.typehandle_class;
g_assert (result);
} else if (strcmp (obj->vtable->klass->name, "MonoMethod") == 0 ||
strcmp (obj->vtable->klass->name, "MonoCMethod") == 0 ||
strcmp (obj->vtable->klass->name, "MonoGenericCMethod") == 0 ||
strcmp (obj->vtable->klass->name, "MonoGenericMethod") == 0) {
result = ((MonoReflectionMethod*)obj)->method;
if (context)
result = mono_class_inflate_generic_method (result, context);
*handle_class = mono_defaults.methodhandle_class;
g_assert (result);
} else if (strcmp (obj->vtable->klass->name, "MethodBuilder") == 0) {
MonoReflectionMethodBuilder *mb = (MonoReflectionMethodBuilder*)obj;
result = mb->mhandle;
if (!result) {
/* Type is not yet created */
MonoReflectionTypeBuilder *tb = (MonoReflectionTypeBuilder*)mb->type;
mono_domain_try_type_resolve (mono_domain_get (), NULL, (MonoObject*)tb);
/*
* Hopefully this has been filled in by calling CreateType() on the
* TypeBuilder.
*/
/*
* TODO: This won't work if the application finishes another
* TypeBuilder instance instead of this one.
*/
result = mb->mhandle;
}
if (context)
result = mono_class_inflate_generic_method (result, context);
*handle_class = mono_defaults.methodhandle_class;
} else if (strcmp (obj->vtable->klass->name, "ConstructorBuilder") == 0) {
MonoReflectionCtorBuilder *cb = (MonoReflectionCtorBuilder*)obj;
result = cb->mhandle;
if (!result) {
MonoReflectionTypeBuilder *tb = (MonoReflectionTypeBuilder*)cb->type;
mono_domain_try_type_resolve (mono_domain_get (), NULL, (MonoObject*)tb);
result = cb->mhandle;
}
if (context)
result = mono_class_inflate_generic_method (result, context);
*handle_class = mono_defaults.methodhandle_class;
} else if (strcmp (obj->vtable->klass->name, "MonoField") == 0) {
MonoClassField *field = ((MonoReflectionField*)obj)->field;
ensure_complete_type (field->parent);
if (context) {
MonoType *inflated = mono_class_inflate_generic_type (&field->parent->byval_arg, context);
MonoClass *class = mono_class_from_mono_type (inflated);
MonoClassField *inflated_field;
gpointer iter = NULL;
mono_metadata_free_type (inflated);
while ((inflated_field = mono_class_get_fields (class, &iter))) {
if (!strcmp (field->name, inflated_field->name))
break;
}
g_assert (inflated_field && !strcmp (field->name, inflated_field->name));
result = inflated_field;
} else {
result = field;
}
*handle_class = mono_defaults.fieldhandle_class;
g_assert (result);
} else if (strcmp (obj->vtable->klass->name, "FieldBuilder") == 0) {
MonoReflectionFieldBuilder *fb = (MonoReflectionFieldBuilder*)obj;
result = fb->handle;
if (!result) {
MonoReflectionTypeBuilder *tb = (MonoReflectionTypeBuilder*)fb->typeb;
mono_domain_try_type_resolve (mono_domain_get (), NULL, (MonoObject*)tb);
result = fb->handle;
}
if (fb->handle && fb->handle->parent->generic_container) {
MonoClass *klass = fb->handle->parent;
MonoType *type = mono_class_inflate_generic_type (&klass->byval_arg, context);
MonoClass *inflated = mono_class_from_mono_type (type);
result = mono_class_get_field_from_name (inflated, mono_field_get_name (fb->handle));
g_assert (result);
mono_metadata_free_type (type);
}
*handle_class = mono_defaults.fieldhandle_class;
} else if (strcmp (obj->vtable->klass->name, "TypeBuilder") == 0) {
MonoReflectionTypeBuilder *tb = (MonoReflectionTypeBuilder*)obj;
MonoType *type = mono_reflection_type_get_handle ((MonoReflectionType*)tb);
MonoClass *klass;
klass = type->data.klass;
if (klass->wastypebuilder) {
/* Already created */
result = klass;
}
else {
mono_domain_try_type_resolve (mono_domain_get (), NULL, (MonoObject*)tb);
result = type->data.klass;
g_assert (result);
}
*handle_class = mono_defaults.typehandle_class;
} else if (strcmp (obj->vtable->klass->name, "SignatureHelper") == 0) {
MonoReflectionSigHelper *helper = (MonoReflectionSigHelper*)obj;
MonoMethodSignature *sig;
int nargs, i;
if (helper->arguments)
nargs = mono_array_length (helper->arguments);
else
nargs = 0;
sig = mono_metadata_signature_alloc (image, nargs);
sig->explicit_this = helper->call_conv & 64 ? 1 : 0;
sig->hasthis = helper->call_conv & 32 ? 1 : 0;
if (helper->unmanaged_call_conv) { /* unmanaged */
sig->call_convention = helper->unmanaged_call_conv - 1;
sig->pinvoke = TRUE;
} else if (helper->call_conv & 0x02) {
sig->call_convention = MONO_CALL_VARARG;
} else {
sig->call_convention = MONO_CALL_DEFAULT;
}
sig->param_count = nargs;
/* TODO: Copy type ? */
sig->ret = helper->return_type->type;
for (i = 0; i < nargs; ++i)
sig->params [i] = mono_type_array_get_and_resolve (helper->arguments, i);
result = sig;
*handle_class = NULL;
} else if (strcmp (obj->vtable->klass->name, "DynamicMethod") == 0) {
MonoReflectionDynamicMethod *method = (MonoReflectionDynamicMethod*)obj;
/* Already created by the managed code */
g_assert (method->mhandle);
result = method->mhandle;
*handle_class = mono_defaults.methodhandle_class;
} else if (strcmp (obj->vtable->klass->name, "GenericTypeParameterBuilder") == 0) {
MonoType *type = mono_reflection_type_get_handle ((MonoReflectionType*)obj);
type = mono_class_inflate_generic_type (type, context);
result = mono_class_from_mono_type (type);
*handle_class = mono_defaults.typehandle_class;
g_assert (result);
mono_metadata_free_type (type);
} else if (strcmp (obj->vtable->klass->name, "MonoGenericClass") == 0) {
MonoType *type = mono_reflection_type_get_handle ((MonoReflectionType*)obj);
type = mono_class_inflate_generic_type (type, context);
result = mono_class_from_mono_type (type);
*handle_class = mono_defaults.typehandle_class;
g_assert (result);
mono_metadata_free_type (type);
} else if (strcmp (obj->vtable->klass->name, "FieldOnTypeBuilderInst") == 0) {
MonoReflectionFieldOnTypeBuilderInst *f = (MonoReflectionFieldOnTypeBuilderInst*)obj;
MonoClass *inflated;
MonoType *type;
type = mono_class_inflate_generic_type (mono_reflection_type_get_handle ((MonoReflectionType*)f->inst), context);
inflated = mono_class_from_mono_type (type);
g_assert (f->fb->handle);
result = mono_class_get_field_from_name (inflated, mono_field_get_name (f->fb->handle));
g_assert (result);
mono_metadata_free_type (type);
*handle_class = mono_defaults.fieldhandle_class;
} else if (strcmp (obj->vtable->klass->name, "ConstructorOnTypeBuilderInst") == 0) {
MonoReflectionCtorOnTypeBuilderInst *c = (MonoReflectionCtorOnTypeBuilderInst*)obj;
MonoType *type = mono_class_inflate_generic_type (mono_reflection_type_get_handle ((MonoReflectionType*)c->inst), context);
MonoClass *inflated_klass = mono_class_from_mono_type (type);
g_assert (c->cb->mhandle);
result = inflate_mono_method (inflated_klass, c->cb->mhandle, (MonoObject*)c->cb);
*handle_class = mono_defaults.methodhandle_class;
mono_metadata_free_type (type);
} else if (strcmp (obj->vtable->klass->name, "MethodOnTypeBuilderInst") == 0) {
MonoReflectionMethodOnTypeBuilderInst *m = (MonoReflectionMethodOnTypeBuilderInst*)obj;
if (m->method_args) {
result = mono_reflection_method_on_tb_inst_get_handle (m);
} else {
MonoType *type = mono_class_inflate_generic_type (mono_reflection_type_get_handle ((MonoReflectionType*)m->inst), context);
MonoClass *inflated_klass = mono_class_from_mono_type (type);
g_assert (m->mb->mhandle);
result = inflate_mono_method (inflated_klass, m->mb->mhandle, (MonoObject*)m->mb);
mono_metadata_free_type (type);
}
*handle_class = mono_defaults.methodhandle_class;
} else if (strcmp (obj->vtable->klass->name, "MonoArrayMethod") == 0) {
MonoReflectionArrayMethod *m = (MonoReflectionArrayMethod*)obj;
MonoType *mtype;
MonoClass *klass;
MonoMethod *method;
gpointer iter;
char *name;
mtype = mono_reflection_type_get_handle (m->parent);
klass = mono_class_from_mono_type (mtype);
/* Find the method */
name = mono_string_to_utf8 (m->name);
iter = NULL;
while ((method = mono_class_get_methods (klass, &iter))) {
if (!strcmp (method->name, name))
break;
}
g_free (name);
// FIXME:
g_assert (method);
// FIXME: Check parameters/return value etc. match
result = method;
*handle_class = mono_defaults.methodhandle_class;
} else if (is_sre_array (mono_object_get_class(obj)) ||
is_sre_byref (mono_object_get_class(obj)) ||
is_sre_pointer (mono_object_get_class(obj))) {
MonoReflectionType *ref_type = (MonoReflectionType *)obj;
MonoType *type = mono_reflection_type_get_handle (ref_type);
result = mono_class_from_mono_type (type);
*handle_class = mono_defaults.typehandle_class;
} else {
g_print ("%s\n", obj->vtable->klass->name);
g_assert_not_reached ();
}
return result;
}
#else /* DISABLE_REFLECTION_EMIT */
MonoArray*
mono_reflection_get_custom_attrs_blob (MonoReflectionAssembly *assembly, MonoObject *ctor, MonoArray *ctorArgs, MonoArray *properties, MonoArray *propValues, MonoArray *fields, MonoArray* fieldValues)
{
g_assert_not_reached ();
return NULL;
}
void
mono_reflection_setup_internal_class (MonoReflectionTypeBuilder *tb)
{
g_assert_not_reached ();
}
void
mono_reflection_setup_generic_class (MonoReflectionTypeBuilder *tb)
{
g_assert_not_reached ();
}
void
mono_reflection_create_generic_class (MonoReflectionTypeBuilder *tb)
{
g_assert_not_reached ();
}
void
mono_reflection_create_internal_class (MonoReflectionTypeBuilder *tb)
{
g_assert_not_reached ();
}
void
mono_image_basic_init (MonoReflectionAssemblyBuilder *assemblyb)
{
g_error ("This mono runtime was configured with --enable-minimal=reflection_emit, so System.Reflection.Emit is not supported.");
}
void
mono_image_module_basic_init (MonoReflectionModuleBuilder *moduleb)
{
g_assert_not_reached ();
}
void
mono_image_set_wrappers_type (MonoReflectionModuleBuilder *moduleb, MonoReflectionType *type)
{
g_assert_not_reached ();
}
MonoReflectionModule *
mono_image_load_module_dynamic (MonoReflectionAssemblyBuilder *ab, MonoString *fileName)
{
g_assert_not_reached ();
return NULL;
}
guint32
mono_image_insert_string (MonoReflectionModuleBuilder *module, MonoString *str)
{
g_assert_not_reached ();
return 0;
}
guint32
mono_image_create_method_token (MonoDynamicImage *assembly, MonoObject *obj, MonoArray *opt_param_types)
{
g_assert_not_reached ();
return 0;
}
guint32
mono_image_create_token (MonoDynamicImage *assembly, MonoObject *obj,
gboolean create_methodspec, gboolean register_token)
{
g_assert_not_reached ();
return 0;
}
void
mono_image_register_token (MonoDynamicImage *assembly, guint32 token, MonoObject *obj)
{
}
void
mono_reflection_generic_class_initialize (MonoReflectionGenericClass *type, MonoArray *methods,
MonoArray *ctors, MonoArray *fields, MonoArray *properties,
MonoArray *events)
{
g_assert_not_reached ();
}
void
mono_reflection_get_dynamic_overrides (MonoClass *klass, MonoMethod ***overrides, int *num_overrides)
{
*overrides = NULL;
*num_overrides = 0;
}
MonoReflectionEvent *
mono_reflection_event_builder_get_event_info (MonoReflectionTypeBuilder *tb, MonoReflectionEventBuilder *eb)
{
g_assert_not_reached ();
return NULL;
}
MonoReflectionType*
mono_reflection_create_runtime_class (MonoReflectionTypeBuilder *tb)
{
g_assert_not_reached ();
return NULL;
}
void
mono_reflection_initialize_generic_parameter (MonoReflectionGenericParam *gparam)
{
g_assert_not_reached ();
}
MonoArray *
mono_reflection_sighelper_get_signature_local (MonoReflectionSigHelper *sig)
{
g_assert_not_reached ();
return NULL;
}
MonoArray *
mono_reflection_sighelper_get_signature_field (MonoReflectionSigHelper *sig)
{
g_assert_not_reached ();
return NULL;
}
void
mono_reflection_create_dynamic_method (MonoReflectionDynamicMethod *mb)
{
}
gpointer
mono_reflection_lookup_dynamic_token (MonoImage *image, guint32 token, gboolean valid_token, MonoClass **handle_class, MonoGenericContext *context)
{
return NULL;
}
MonoType*
mono_reflection_type_get_handle (MonoReflectionType* ref)
{
if (!ref)
return NULL;
return ref->type;
}
#endif /* DISABLE_REFLECTION_EMIT */
/* SECURITY_ACTION_* are defined in mono/metadata/tabledefs.h */
const static guint32 declsec_flags_map[] = {
0x00000000, /* empty */
MONO_DECLSEC_FLAG_REQUEST, /* SECURITY_ACTION_REQUEST (x01) */
MONO_DECLSEC_FLAG_DEMAND, /* SECURITY_ACTION_DEMAND (x02) */
MONO_DECLSEC_FLAG_ASSERT, /* SECURITY_ACTION_ASSERT (x03) */
MONO_DECLSEC_FLAG_DENY, /* SECURITY_ACTION_DENY (x04) */
MONO_DECLSEC_FLAG_PERMITONLY, /* SECURITY_ACTION_PERMITONLY (x05) */
MONO_DECLSEC_FLAG_LINKDEMAND, /* SECURITY_ACTION_LINKDEMAND (x06) */
MONO_DECLSEC_FLAG_INHERITANCEDEMAND, /* SECURITY_ACTION_INHERITANCEDEMAND (x07) */
MONO_DECLSEC_FLAG_REQUEST_MINIMUM, /* SECURITY_ACTION_REQUEST_MINIMUM (x08) */
MONO_DECLSEC_FLAG_REQUEST_OPTIONAL, /* SECURITY_ACTION_REQUEST_OPTIONAL (x09) */
MONO_DECLSEC_FLAG_REQUEST_REFUSE, /* SECURITY_ACTION_REQUEST_REFUSE (x0A) */
MONO_DECLSEC_FLAG_PREJIT_GRANT, /* SECURITY_ACTION_PREJIT_GRANT (x0B) */
MONO_DECLSEC_FLAG_PREJIT_DENY, /* SECURITY_ACTION_PREJIT_DENY (x0C) */
MONO_DECLSEC_FLAG_NONCAS_DEMAND, /* SECURITY_ACTION_NONCAS_DEMAND (x0D) */
MONO_DECLSEC_FLAG_NONCAS_LINKDEMAND, /* SECURITY_ACTION_NONCAS_LINKDEMAND (x0E) */
MONO_DECLSEC_FLAG_NONCAS_INHERITANCEDEMAND, /* SECURITY_ACTION_NONCAS_INHERITANCEDEMAND (x0F) */
MONO_DECLSEC_FLAG_LINKDEMAND_CHOICE, /* SECURITY_ACTION_LINKDEMAND_CHOICE (x10) */
MONO_DECLSEC_FLAG_INHERITANCEDEMAND_CHOICE, /* SECURITY_ACTION_INHERITANCEDEMAND_CHOICE (x11) */
MONO_DECLSEC_FLAG_DEMAND_CHOICE, /* SECURITY_ACTION_DEMAND_CHOICE (x12) */
};
/*
* Returns flags that includes all available security action associated to the handle.
* @token: metadata token (either for a class or a method)
* @image: image where resides the metadata.
*/
static guint32
mono_declsec_get_flags (MonoImage *image, guint32 token)
{
int index = mono_metadata_declsec_from_index (image, token);
MonoTableInfo *t = &image->tables [MONO_TABLE_DECLSECURITY];
guint32 result = 0;
guint32 action;
int i;
/* HasSecurity can be present for other, not specially encoded, attributes,
e.g. SuppressUnmanagedCodeSecurityAttribute */
if (index < 0)
return 0;
for (i = index; i < t->rows; i++) {
guint32 cols [MONO_DECL_SECURITY_SIZE];
mono_metadata_decode_row (t, i, cols, MONO_DECL_SECURITY_SIZE);
if (cols [MONO_DECL_SECURITY_PARENT] != token)
break;
action = cols [MONO_DECL_SECURITY_ACTION];
if ((action >= MONO_DECLSEC_ACTION_MIN) && (action <= MONO_DECLSEC_ACTION_MAX)) {
result |= declsec_flags_map [action];
} else {
g_assert_not_reached ();
}
}
return result;
}
/*
* Get the security actions (in the form of flags) associated with the specified method.
*
* @method: The method for which we want the declarative security flags.
* Return the declarative security flags for the method (only).
*
* Note: To keep MonoMethod size down we do not cache the declarative security flags
* (except for the stack modifiers which are kept in the MonoJitInfo structure)
*/
guint32
mono_declsec_flags_from_method (MonoMethod *method)
{
if (method->flags & METHOD_ATTRIBUTE_HAS_SECURITY) {
/* FIXME: No cache (for the moment) */
guint32 idx = mono_method_get_index (method);
idx <<= MONO_HAS_DECL_SECURITY_BITS;
idx |= MONO_HAS_DECL_SECURITY_METHODDEF;
return mono_declsec_get_flags (method->klass->image, idx);
}
return 0;
}
/*
* Get the security actions (in the form of flags) associated with the specified class.
*
* @klass: The class for which we want the declarative security flags.
* Return the declarative security flags for the class.
*
* Note: We cache the flags inside the MonoClass structure as this will get
* called very often (at least for each method).
*/
guint32
mono_declsec_flags_from_class (MonoClass *klass)
{
if (klass->flags & TYPE_ATTRIBUTE_HAS_SECURITY) {
if (!klass->ext || !klass->ext->declsec_flags) {
guint32 idx;
idx = mono_metadata_token_index (klass->type_token);
idx <<= MONO_HAS_DECL_SECURITY_BITS;
idx |= MONO_HAS_DECL_SECURITY_TYPEDEF;
mono_loader_lock ();
mono_class_alloc_ext (klass);
mono_loader_unlock ();
/* we cache the flags on classes */
klass->ext->declsec_flags = mono_declsec_get_flags (klass->image, idx);
}
return klass->ext->declsec_flags;
}
return 0;
}
/*
* Get the security actions (in the form of flags) associated with the specified assembly.
*
* @assembly: The assembly for which we want the declarative security flags.
* Return the declarative security flags for the assembly.
*/
guint32
mono_declsec_flags_from_assembly (MonoAssembly *assembly)
{
guint32 idx = 1; /* there is only one assembly */
idx <<= MONO_HAS_DECL_SECURITY_BITS;
idx |= MONO_HAS_DECL_SECURITY_ASSEMBLY;
return mono_declsec_get_flags (assembly->image, idx);
}
/*
* Fill actions for the specific index (which may either be an encoded class token or
* an encoded method token) from the metadata image.
* Returns TRUE if some actions requiring code generation are present, FALSE otherwise.
*/
static MonoBoolean
fill_actions_from_index (MonoImage *image, guint32 token, MonoDeclSecurityActions* actions,
guint32 id_std, guint32 id_noncas, guint32 id_choice)
{
MonoBoolean result = FALSE;
MonoTableInfo *t;
guint32 cols [MONO_DECL_SECURITY_SIZE];
int index = mono_metadata_declsec_from_index (image, token);
int i;
t = &image->tables [MONO_TABLE_DECLSECURITY];
for (i = index; i < t->rows; i++) {
mono_metadata_decode_row (t, i, cols, MONO_DECL_SECURITY_SIZE);
if (cols [MONO_DECL_SECURITY_PARENT] != token)
return result;
/* if present only replace (class) permissions with method permissions */
/* if empty accept either class or method permissions */
if (cols [MONO_DECL_SECURITY_ACTION] == id_std) {
if (!actions->demand.blob) {
const char *blob = mono_metadata_blob_heap (image, cols [MONO_DECL_SECURITY_PERMISSIONSET]);
actions->demand.index = cols [MONO_DECL_SECURITY_PERMISSIONSET];
actions->demand.blob = (char*) (blob + 2);
actions->demand.size = mono_metadata_decode_blob_size (blob, &blob);
result = TRUE;
}
} else if (cols [MONO_DECL_SECURITY_ACTION] == id_noncas) {
if (!actions->noncasdemand.blob) {
const char *blob = mono_metadata_blob_heap (image, cols [MONO_DECL_SECURITY_PERMISSIONSET]);
actions->noncasdemand.index = cols [MONO_DECL_SECURITY_PERMISSIONSET];
actions->noncasdemand.blob = (char*) (blob + 2);
actions->noncasdemand.size = mono_metadata_decode_blob_size (blob, &blob);
result = TRUE;
}
} else if (cols [MONO_DECL_SECURITY_ACTION] == id_choice) {
if (!actions->demandchoice.blob) {
const char *blob = mono_metadata_blob_heap (image, cols [MONO_DECL_SECURITY_PERMISSIONSET]);
actions->demandchoice.index = cols [MONO_DECL_SECURITY_PERMISSIONSET];
actions->demandchoice.blob = (char*) (blob + 2);
actions->demandchoice.size = mono_metadata_decode_blob_size (blob, &blob);
result = TRUE;
}
}
}
return result;
}
static MonoBoolean
mono_declsec_get_class_demands_params (MonoClass *klass, MonoDeclSecurityActions* demands,
guint32 id_std, guint32 id_noncas, guint32 id_choice)
{
guint32 idx = mono_metadata_token_index (klass->type_token);
idx <<= MONO_HAS_DECL_SECURITY_BITS;
idx |= MONO_HAS_DECL_SECURITY_TYPEDEF;
return fill_actions_from_index (klass->image, idx, demands, id_std, id_noncas, id_choice);
}
static MonoBoolean
mono_declsec_get_method_demands_params (MonoMethod *method, MonoDeclSecurityActions* demands,
guint32 id_std, guint32 id_noncas, guint32 id_choice)
{
guint32 idx = mono_method_get_index (method);
idx <<= MONO_HAS_DECL_SECURITY_BITS;
idx |= MONO_HAS_DECL_SECURITY_METHODDEF;
return fill_actions_from_index (method->klass->image, idx, demands, id_std, id_noncas, id_choice);
}
/*
* Collect all actions (that requires to generate code in mini) assigned for
* the specified method.
* Note: Don't use the content of actions if the function return FALSE.
*/
MonoBoolean
mono_declsec_get_demands (MonoMethod *method, MonoDeclSecurityActions* demands)
{
guint32 mask = MONO_DECLSEC_FLAG_DEMAND | MONO_DECLSEC_FLAG_NONCAS_DEMAND |
MONO_DECLSEC_FLAG_DEMAND_CHOICE;
MonoBoolean result = FALSE;
guint32 flags;
/* quick exit if no declarative security is present in the metadata */
if (!method->klass->image->tables [MONO_TABLE_DECLSECURITY].rows)
return FALSE;
/* we want the original as the wrapper is "free" of the security informations */
if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE || method->wrapper_type == MONO_WRAPPER_MANAGED_TO_MANAGED) {
method = mono_marshal_method_from_wrapper (method);
if (!method)
return FALSE;
}
/* First we look for method-level attributes */
if (method->flags & METHOD_ATTRIBUTE_HAS_SECURITY) {
mono_class_init (method->klass);
memset (demands, 0, sizeof (MonoDeclSecurityActions));
result = mono_declsec_get_method_demands_params (method, demands,
SECURITY_ACTION_DEMAND, SECURITY_ACTION_NONCASDEMAND, SECURITY_ACTION_DEMANDCHOICE);
}
/* Here we use (or create) the class declarative cache to look for demands */
flags = mono_declsec_flags_from_class (method->klass);
if (flags & mask) {
if (!result) {
mono_class_init (method->klass);
memset (demands, 0, sizeof (MonoDeclSecurityActions));
}
result |= mono_declsec_get_class_demands_params (method->klass, demands,
SECURITY_ACTION_DEMAND, SECURITY_ACTION_NONCASDEMAND, SECURITY_ACTION_DEMANDCHOICE);
}
/* The boolean return value is used as a shortcut in case nothing needs to
be generated (e.g. LinkDemand[Choice] and InheritanceDemand[Choice]) */
return result;
}
/*
* Collect all Link actions: LinkDemand, NonCasLinkDemand and LinkDemandChoice (2.0).
*
* Note: Don't use the content of actions if the function return FALSE.
*/
MonoBoolean
mono_declsec_get_linkdemands (MonoMethod *method, MonoDeclSecurityActions* klass, MonoDeclSecurityActions *cmethod)
{
MonoBoolean result = FALSE;
guint32 flags;
/* quick exit if no declarative security is present in the metadata */
if (!method->klass->image->tables [MONO_TABLE_DECLSECURITY].rows)
return FALSE;
/* we want the original as the wrapper is "free" of the security informations */
if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE || method->wrapper_type == MONO_WRAPPER_MANAGED_TO_MANAGED) {
method = mono_marshal_method_from_wrapper (method);
if (!method)
return FALSE;
}
/* results are independant - zeroize both */
memset (cmethod, 0, sizeof (MonoDeclSecurityActions));
memset (klass, 0, sizeof (MonoDeclSecurityActions));
/* First we look for method-level attributes */
if (method->flags & METHOD_ATTRIBUTE_HAS_SECURITY) {
mono_class_init (method->klass);
result = mono_declsec_get_method_demands_params (method, cmethod,
SECURITY_ACTION_LINKDEMAND, SECURITY_ACTION_NONCASLINKDEMAND, SECURITY_ACTION_LINKDEMANDCHOICE);
}
/* Here we use (or create) the class declarative cache to look for demands */
flags = mono_declsec_flags_from_class (method->klass);
if (flags & (MONO_DECLSEC_FLAG_LINKDEMAND | MONO_DECLSEC_FLAG_NONCAS_LINKDEMAND | MONO_DECLSEC_FLAG_LINKDEMAND_CHOICE)) {
mono_class_init (method->klass);
result |= mono_declsec_get_class_demands_params (method->klass, klass,
SECURITY_ACTION_LINKDEMAND, SECURITY_ACTION_NONCASLINKDEMAND, SECURITY_ACTION_LINKDEMANDCHOICE);
}
return result;
}
/*
* Collect all Inherit actions: InheritanceDemand, NonCasInheritanceDemand and InheritanceDemandChoice (2.0).
*
* @klass The inherited class - this is the class that provides the security check (attributes)
* @demans
* return TRUE if inheritance demands (any kind) are present, FALSE otherwise.
*
* Note: Don't use the content of actions if the function return FALSE.
*/
MonoBoolean
mono_declsec_get_inheritdemands_class (MonoClass *klass, MonoDeclSecurityActions* demands)
{
MonoBoolean result = FALSE;
guint32 flags;
/* quick exit if no declarative security is present in the metadata */
if (!klass->image->tables [MONO_TABLE_DECLSECURITY].rows)
return FALSE;
/* Here we use (or create) the class declarative cache to look for demands */
flags = mono_declsec_flags_from_class (klass);
if (flags & (MONO_DECLSEC_FLAG_INHERITANCEDEMAND | MONO_DECLSEC_FLAG_NONCAS_INHERITANCEDEMAND | MONO_DECLSEC_FLAG_INHERITANCEDEMAND_CHOICE)) {
mono_class_init (klass);
memset (demands, 0, sizeof (MonoDeclSecurityActions));
result |= mono_declsec_get_class_demands_params (klass, demands,
SECURITY_ACTION_INHERITDEMAND, SECURITY_ACTION_NONCASINHERITANCE, SECURITY_ACTION_INHERITDEMANDCHOICE);
}
return result;
}
/*
* Collect all Inherit actions: InheritanceDemand, NonCasInheritanceDemand and InheritanceDemandChoice (2.0).
*
* Note: Don't use the content of actions if the function return FALSE.
*/
MonoBoolean
mono_declsec_get_inheritdemands_method (MonoMethod *method, MonoDeclSecurityActions* demands)
{
/* quick exit if no declarative security is present in the metadata */
if (!method->klass->image->tables [MONO_TABLE_DECLSECURITY].rows)
return FALSE;
/* we want the original as the wrapper is "free" of the security informations */
if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE || method->wrapper_type == MONO_WRAPPER_MANAGED_TO_MANAGED) {
method = mono_marshal_method_from_wrapper (method);
if (!method)
return FALSE;
}
if (method->flags & METHOD_ATTRIBUTE_HAS_SECURITY) {
mono_class_init (method->klass);
memset (demands, 0, sizeof (MonoDeclSecurityActions));
return mono_declsec_get_method_demands_params (method, demands,
SECURITY_ACTION_INHERITDEMAND, SECURITY_ACTION_NONCASINHERITANCE, SECURITY_ACTION_INHERITDEMANDCHOICE);
}
return FALSE;
}
static MonoBoolean
get_declsec_action (MonoImage *image, guint32 token, guint32 action, MonoDeclSecurityEntry *entry)
{
guint32 cols [MONO_DECL_SECURITY_SIZE];
MonoTableInfo *t;
int i;
int index = mono_metadata_declsec_from_index (image, token);
if (index == -1)
return FALSE;
t = &image->tables [MONO_TABLE_DECLSECURITY];
for (i = index; i < t->rows; i++) {
mono_metadata_decode_row (t, i, cols, MONO_DECL_SECURITY_SIZE);
/* shortcut - index are ordered */
if (token != cols [MONO_DECL_SECURITY_PARENT])
return FALSE;
if (cols [MONO_DECL_SECURITY_ACTION] == action) {
const char *metadata = mono_metadata_blob_heap (image, cols [MONO_DECL_SECURITY_PERMISSIONSET]);
entry->blob = (char*) (metadata + 2);
entry->size = mono_metadata_decode_blob_size (metadata, &metadata);
return TRUE;
}
}
return FALSE;
}
MonoBoolean
mono_declsec_get_method_action (MonoMethod *method, guint32 action, MonoDeclSecurityEntry *entry)
{
if (method->flags & METHOD_ATTRIBUTE_HAS_SECURITY) {
guint32 idx = mono_method_get_index (method);
idx <<= MONO_HAS_DECL_SECURITY_BITS;
idx |= MONO_HAS_DECL_SECURITY_METHODDEF;
return get_declsec_action (method->klass->image, idx, action, entry);
}
return FALSE;
}
MonoBoolean
mono_declsec_get_class_action (MonoClass *klass, guint32 action, MonoDeclSecurityEntry *entry)
{
/* use cache */
guint32 flags = mono_declsec_flags_from_class (klass);
if (declsec_flags_map [action] & flags) {
guint32 idx = mono_metadata_token_index (klass->type_token);
idx <<= MONO_HAS_DECL_SECURITY_BITS;
idx |= MONO_HAS_DECL_SECURITY_TYPEDEF;
return get_declsec_action (klass->image, idx, action, entry);
}
return FALSE;
}
MonoBoolean
mono_declsec_get_assembly_action (MonoAssembly *assembly, guint32 action, MonoDeclSecurityEntry *entry)
{
guint32 idx = 1; /* there is only one assembly */
idx <<= MONO_HAS_DECL_SECURITY_BITS;
idx |= MONO_HAS_DECL_SECURITY_ASSEMBLY;
return get_declsec_action (assembly->image, idx, action, entry);
}
gboolean
mono_reflection_call_is_assignable_to (MonoClass *klass, MonoClass *oklass)
{
MonoObject *res, *exc;
void *params [1];
static MonoClass *System_Reflection_Emit_TypeBuilder = NULL;
static MonoMethod *method = NULL;
if (!System_Reflection_Emit_TypeBuilder) {
System_Reflection_Emit_TypeBuilder = mono_class_from_name (mono_defaults.corlib, "System.Reflection.Emit", "TypeBuilder");
g_assert (System_Reflection_Emit_TypeBuilder);
}
if (method == NULL) {
method = mono_class_get_method_from_name (System_Reflection_Emit_TypeBuilder, "IsAssignableTo", 1);
g_assert (method);
}
/*
* The result of mono_type_get_object () might be a System.MonoType but we
* need a TypeBuilder so use klass->reflection_info.
*/
g_assert (klass->reflection_info);
g_assert (!strcmp (((MonoObject*)(klass->reflection_info))->vtable->klass->name, "TypeBuilder"));
params [0] = mono_type_get_object (mono_domain_get (), &oklass->byval_arg);
res = mono_runtime_invoke (method, (MonoObject*)(klass->reflection_info), params, &exc);
if (exc)
return FALSE;
else
return *(MonoBoolean*)mono_object_unbox (res);
}
| ./CrossVul/dataset_final_sorted/CWE-20/c/good_4716_0 |
crossvul-cpp_data_good_5603_0 | /* SCTP kernel implementation
* (C) Copyright IBM Corp. 2001, 2004
* Copyright (c) 1999-2000 Cisco, Inc.
* Copyright (c) 1999-2001 Motorola, Inc.
* Copyright (c) 2001-2003 Intel Corp.
* Copyright (c) 2001-2002 Nokia, Inc.
* Copyright (c) 2001 La Monte H.P. Yarroll
*
* This file is part of the SCTP kernel implementation
*
* These functions interface with the sockets layer to implement the
* SCTP Extensions for the Sockets API.
*
* Note that the descriptions from the specification are USER level
* functions--this file is the functions which populate the struct proto
* for SCTP which is the BOTTOM of the sockets interface.
*
* This SCTP implementation is free software;
* you can redistribute it and/or modify it under the terms of
* the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This SCTP implementation is distributed in the hope that it
* will be useful, but WITHOUT ANY WARRANTY; without even the implied
* ************************
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with GNU CC; see the file COPYING. If not, write to
* the Free Software Foundation, 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*
* Please send any bug reports or fixes you make to the
* email address(es):
* lksctp developers <lksctp-developers@lists.sourceforge.net>
*
* Or submit a bug report through the following website:
* http://www.sf.net/projects/lksctp
*
* Written or modified by:
* La Monte H.P. Yarroll <piggy@acm.org>
* Narasimha Budihal <narsi@refcode.org>
* Karl Knutson <karl@athena.chicago.il.us>
* Jon Grimm <jgrimm@us.ibm.com>
* Xingang Guo <xingang.guo@intel.com>
* Daisy Chang <daisyc@us.ibm.com>
* Sridhar Samudrala <samudrala@us.ibm.com>
* Inaky Perez-Gonzalez <inaky.gonzalez@intel.com>
* Ardelle Fan <ardelle.fan@intel.com>
* Ryan Layer <rmlayer@us.ibm.com>
* Anup Pemmaiah <pemmaiah@cc.usu.edu>
* Kevin Gao <kevin.gao@intel.com>
*
* Any bugs reported given to us we will try to fix... any fixes shared will
* be incorporated into the next SCTP release.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/wait.h>
#include <linux/time.h>
#include <linux/ip.h>
#include <linux/capability.h>
#include <linux/fcntl.h>
#include <linux/poll.h>
#include <linux/init.h>
#include <linux/crypto.h>
#include <linux/slab.h>
#include <linux/file.h>
#include <net/ip.h>
#include <net/icmp.h>
#include <net/route.h>
#include <net/ipv6.h>
#include <net/inet_common.h>
#include <linux/socket.h> /* for sa_family_t */
#include <linux/export.h>
#include <net/sock.h>
#include <net/sctp/sctp.h>
#include <net/sctp/sm.h>
/* WARNING: Please do not remove the SCTP_STATIC attribute to
* any of the functions below as they are used to export functions
* used by a project regression testsuite.
*/
/* Forward declarations for internal helper functions. */
static int sctp_writeable(struct sock *sk);
static void sctp_wfree(struct sk_buff *skb);
static int sctp_wait_for_sndbuf(struct sctp_association *, long *timeo_p,
size_t msg_len);
static int sctp_wait_for_packet(struct sock * sk, int *err, long *timeo_p);
static int sctp_wait_for_connect(struct sctp_association *, long *timeo_p);
static int sctp_wait_for_accept(struct sock *sk, long timeo);
static void sctp_wait_for_close(struct sock *sk, long timeo);
static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt,
union sctp_addr *addr, int len);
static int sctp_bindx_add(struct sock *, struct sockaddr *, int);
static int sctp_bindx_rem(struct sock *, struct sockaddr *, int);
static int sctp_send_asconf_add_ip(struct sock *, struct sockaddr *, int);
static int sctp_send_asconf_del_ip(struct sock *, struct sockaddr *, int);
static int sctp_send_asconf(struct sctp_association *asoc,
struct sctp_chunk *chunk);
static int sctp_do_bind(struct sock *, union sctp_addr *, int);
static int sctp_autobind(struct sock *sk);
static void sctp_sock_migrate(struct sock *, struct sock *,
struct sctp_association *, sctp_socket_type_t);
extern struct kmem_cache *sctp_bucket_cachep;
extern long sysctl_sctp_mem[3];
extern int sysctl_sctp_rmem[3];
extern int sysctl_sctp_wmem[3];
static int sctp_memory_pressure;
static atomic_long_t sctp_memory_allocated;
struct percpu_counter sctp_sockets_allocated;
static void sctp_enter_memory_pressure(struct sock *sk)
{
sctp_memory_pressure = 1;
}
/* Get the sndbuf space available at the time on the association. */
static inline int sctp_wspace(struct sctp_association *asoc)
{
int amt;
if (asoc->ep->sndbuf_policy)
amt = asoc->sndbuf_used;
else
amt = sk_wmem_alloc_get(asoc->base.sk);
if (amt >= asoc->base.sk->sk_sndbuf) {
if (asoc->base.sk->sk_userlocks & SOCK_SNDBUF_LOCK)
amt = 0;
else {
amt = sk_stream_wspace(asoc->base.sk);
if (amt < 0)
amt = 0;
}
} else {
amt = asoc->base.sk->sk_sndbuf - amt;
}
return amt;
}
/* Increment the used sndbuf space count of the corresponding association by
* the size of the outgoing data chunk.
* Also, set the skb destructor for sndbuf accounting later.
*
* Since it is always 1-1 between chunk and skb, and also a new skb is always
* allocated for chunk bundling in sctp_packet_transmit(), we can use the
* destructor in the data chunk skb for the purpose of the sndbuf space
* tracking.
*/
static inline void sctp_set_owner_w(struct sctp_chunk *chunk)
{
struct sctp_association *asoc = chunk->asoc;
struct sock *sk = asoc->base.sk;
/* The sndbuf space is tracked per association. */
sctp_association_hold(asoc);
skb_set_owner_w(chunk->skb, sk);
chunk->skb->destructor = sctp_wfree;
/* Save the chunk pointer in skb for sctp_wfree to use later. */
*((struct sctp_chunk **)(chunk->skb->cb)) = chunk;
asoc->sndbuf_used += SCTP_DATA_SNDSIZE(chunk) +
sizeof(struct sk_buff) +
sizeof(struct sctp_chunk);
atomic_add(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc);
sk->sk_wmem_queued += chunk->skb->truesize;
sk_mem_charge(sk, chunk->skb->truesize);
}
/* Verify that this is a valid address. */
static inline int sctp_verify_addr(struct sock *sk, union sctp_addr *addr,
int len)
{
struct sctp_af *af;
/* Verify basic sockaddr. */
af = sctp_sockaddr_af(sctp_sk(sk), addr, len);
if (!af)
return -EINVAL;
/* Is this a valid SCTP address? */
if (!af->addr_valid(addr, sctp_sk(sk), NULL))
return -EINVAL;
if (!sctp_sk(sk)->pf->send_verify(sctp_sk(sk), (addr)))
return -EINVAL;
return 0;
}
/* Look up the association by its id. If this is not a UDP-style
* socket, the ID field is always ignored.
*/
struct sctp_association *sctp_id2assoc(struct sock *sk, sctp_assoc_t id)
{
struct sctp_association *asoc = NULL;
/* If this is not a UDP-style socket, assoc id should be ignored. */
if (!sctp_style(sk, UDP)) {
/* Return NULL if the socket state is not ESTABLISHED. It
* could be a TCP-style listening socket or a socket which
* hasn't yet called connect() to establish an association.
*/
if (!sctp_sstate(sk, ESTABLISHED))
return NULL;
/* Get the first and the only association from the list. */
if (!list_empty(&sctp_sk(sk)->ep->asocs))
asoc = list_entry(sctp_sk(sk)->ep->asocs.next,
struct sctp_association, asocs);
return asoc;
}
/* Otherwise this is a UDP-style socket. */
if (!id || (id == (sctp_assoc_t)-1))
return NULL;
spin_lock_bh(&sctp_assocs_id_lock);
asoc = (struct sctp_association *)idr_find(&sctp_assocs_id, (int)id);
spin_unlock_bh(&sctp_assocs_id_lock);
if (!asoc || (asoc->base.sk != sk) || asoc->base.dead)
return NULL;
return asoc;
}
/* Look up the transport from an address and an assoc id. If both address and
* id are specified, the associations matching the address and the id should be
* the same.
*/
static struct sctp_transport *sctp_addr_id2transport(struct sock *sk,
struct sockaddr_storage *addr,
sctp_assoc_t id)
{
struct sctp_association *addr_asoc = NULL, *id_asoc = NULL;
struct sctp_transport *transport;
union sctp_addr *laddr = (union sctp_addr *)addr;
addr_asoc = sctp_endpoint_lookup_assoc(sctp_sk(sk)->ep,
laddr,
&transport);
if (!addr_asoc)
return NULL;
id_asoc = sctp_id2assoc(sk, id);
if (id_asoc && (id_asoc != addr_asoc))
return NULL;
sctp_get_pf_specific(sk->sk_family)->addr_v4map(sctp_sk(sk),
(union sctp_addr *)addr);
return transport;
}
/* API 3.1.2 bind() - UDP Style Syntax
* The syntax of bind() is,
*
* ret = bind(int sd, struct sockaddr *addr, int addrlen);
*
* sd - the socket descriptor returned by socket().
* addr - the address structure (struct sockaddr_in or struct
* sockaddr_in6 [RFC 2553]),
* addr_len - the size of the address structure.
*/
SCTP_STATIC int sctp_bind(struct sock *sk, struct sockaddr *addr, int addr_len)
{
int retval = 0;
sctp_lock_sock(sk);
SCTP_DEBUG_PRINTK("sctp_bind(sk: %p, addr: %p, addr_len: %d)\n",
sk, addr, addr_len);
/* Disallow binding twice. */
if (!sctp_sk(sk)->ep->base.bind_addr.port)
retval = sctp_do_bind(sk, (union sctp_addr *)addr,
addr_len);
else
retval = -EINVAL;
sctp_release_sock(sk);
return retval;
}
static long sctp_get_port_local(struct sock *, union sctp_addr *);
/* Verify this is a valid sockaddr. */
static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt,
union sctp_addr *addr, int len)
{
struct sctp_af *af;
/* Check minimum size. */
if (len < sizeof (struct sockaddr))
return NULL;
/* V4 mapped address are really of AF_INET family */
if (addr->sa.sa_family == AF_INET6 &&
ipv6_addr_v4mapped(&addr->v6.sin6_addr)) {
if (!opt->pf->af_supported(AF_INET, opt))
return NULL;
} else {
/* Does this PF support this AF? */
if (!opt->pf->af_supported(addr->sa.sa_family, opt))
return NULL;
}
/* If we get this far, af is valid. */
af = sctp_get_af_specific(addr->sa.sa_family);
if (len < af->sockaddr_len)
return NULL;
return af;
}
/* Bind a local address either to an endpoint or to an association. */
SCTP_STATIC int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
{
struct net *net = sock_net(sk);
struct sctp_sock *sp = sctp_sk(sk);
struct sctp_endpoint *ep = sp->ep;
struct sctp_bind_addr *bp = &ep->base.bind_addr;
struct sctp_af *af;
unsigned short snum;
int ret = 0;
/* Common sockaddr verification. */
af = sctp_sockaddr_af(sp, addr, len);
if (!af) {
SCTP_DEBUG_PRINTK("sctp_do_bind(sk: %p, newaddr: %p, len: %d) EINVAL\n",
sk, addr, len);
return -EINVAL;
}
snum = ntohs(addr->v4.sin_port);
SCTP_DEBUG_PRINTK_IPADDR("sctp_do_bind(sk: %p, new addr: ",
", port: %d, new port: %d, len: %d)\n",
sk,
addr,
bp->port, snum,
len);
/* PF specific bind() address verification. */
if (!sp->pf->bind_verify(sp, addr))
return -EADDRNOTAVAIL;
/* We must either be unbound, or bind to the same port.
* It's OK to allow 0 ports if we are already bound.
* We'll just inhert an already bound port in this case
*/
if (bp->port) {
if (!snum)
snum = bp->port;
else if (snum != bp->port) {
SCTP_DEBUG_PRINTK("sctp_do_bind:"
" New port %d does not match existing port "
"%d.\n", snum, bp->port);
return -EINVAL;
}
}
if (snum && snum < PROT_SOCK &&
!ns_capable(net->user_ns, CAP_NET_BIND_SERVICE))
return -EACCES;
/* See if the address matches any of the addresses we may have
* already bound before checking against other endpoints.
*/
if (sctp_bind_addr_match(bp, addr, sp))
return -EINVAL;
/* Make sure we are allowed to bind here.
* The function sctp_get_port_local() does duplicate address
* detection.
*/
addr->v4.sin_port = htons(snum);
if ((ret = sctp_get_port_local(sk, addr))) {
return -EADDRINUSE;
}
/* Refresh ephemeral port. */
if (!bp->port)
bp->port = inet_sk(sk)->inet_num;
/* Add the address to the bind address list.
* Use GFP_ATOMIC since BHs will be disabled.
*/
ret = sctp_add_bind_addr(bp, addr, SCTP_ADDR_SRC, GFP_ATOMIC);
/* Copy back into socket for getsockname() use. */
if (!ret) {
inet_sk(sk)->inet_sport = htons(inet_sk(sk)->inet_num);
af->to_sk_saddr(addr, sk);
}
return ret;
}
/* ADDIP Section 4.1.1 Congestion Control of ASCONF Chunks
*
* R1) One and only one ASCONF Chunk MAY be in transit and unacknowledged
* at any one time. If a sender, after sending an ASCONF chunk, decides
* it needs to transfer another ASCONF Chunk, it MUST wait until the
* ASCONF-ACK Chunk returns from the previous ASCONF Chunk before sending a
* subsequent ASCONF. Note this restriction binds each side, so at any
* time two ASCONF may be in-transit on any given association (one sent
* from each endpoint).
*/
static int sctp_send_asconf(struct sctp_association *asoc,
struct sctp_chunk *chunk)
{
struct net *net = sock_net(asoc->base.sk);
int retval = 0;
/* If there is an outstanding ASCONF chunk, queue it for later
* transmission.
*/
if (asoc->addip_last_asconf) {
list_add_tail(&chunk->list, &asoc->addip_chunk_list);
goto out;
}
/* Hold the chunk until an ASCONF_ACK is received. */
sctp_chunk_hold(chunk);
retval = sctp_primitive_ASCONF(net, asoc, chunk);
if (retval)
sctp_chunk_free(chunk);
else
asoc->addip_last_asconf = chunk;
out:
return retval;
}
/* Add a list of addresses as bind addresses to local endpoint or
* association.
*
* Basically run through each address specified in the addrs/addrcnt
* array/length pair, determine if it is IPv6 or IPv4 and call
* sctp_do_bind() on it.
*
* If any of them fails, then the operation will be reversed and the
* ones that were added will be removed.
*
* Only sctp_setsockopt_bindx() is supposed to call this function.
*/
static int sctp_bindx_add(struct sock *sk, struct sockaddr *addrs, int addrcnt)
{
int cnt;
int retval = 0;
void *addr_buf;
struct sockaddr *sa_addr;
struct sctp_af *af;
SCTP_DEBUG_PRINTK("sctp_bindx_add (sk: %p, addrs: %p, addrcnt: %d)\n",
sk, addrs, addrcnt);
addr_buf = addrs;
for (cnt = 0; cnt < addrcnt; cnt++) {
/* The list may contain either IPv4 or IPv6 address;
* determine the address length for walking thru the list.
*/
sa_addr = addr_buf;
af = sctp_get_af_specific(sa_addr->sa_family);
if (!af) {
retval = -EINVAL;
goto err_bindx_add;
}
retval = sctp_do_bind(sk, (union sctp_addr *)sa_addr,
af->sockaddr_len);
addr_buf += af->sockaddr_len;
err_bindx_add:
if (retval < 0) {
/* Failed. Cleanup the ones that have been added */
if (cnt > 0)
sctp_bindx_rem(sk, addrs, cnt);
return retval;
}
}
return retval;
}
/* Send an ASCONF chunk with Add IP address parameters to all the peers of the
* associations that are part of the endpoint indicating that a list of local
* addresses are added to the endpoint.
*
* If any of the addresses is already in the bind address list of the
* association, we do not send the chunk for that association. But it will not
* affect other associations.
*
* Only sctp_setsockopt_bindx() is supposed to call this function.
*/
static int sctp_send_asconf_add_ip(struct sock *sk,
struct sockaddr *addrs,
int addrcnt)
{
struct net *net = sock_net(sk);
struct sctp_sock *sp;
struct sctp_endpoint *ep;
struct sctp_association *asoc;
struct sctp_bind_addr *bp;
struct sctp_chunk *chunk;
struct sctp_sockaddr_entry *laddr;
union sctp_addr *addr;
union sctp_addr saveaddr;
void *addr_buf;
struct sctp_af *af;
struct list_head *p;
int i;
int retval = 0;
if (!net->sctp.addip_enable)
return retval;
sp = sctp_sk(sk);
ep = sp->ep;
SCTP_DEBUG_PRINTK("%s: (sk: %p, addrs: %p, addrcnt: %d)\n",
__func__, sk, addrs, addrcnt);
list_for_each_entry(asoc, &ep->asocs, asocs) {
if (!asoc->peer.asconf_capable)
continue;
if (asoc->peer.addip_disabled_mask & SCTP_PARAM_ADD_IP)
continue;
if (!sctp_state(asoc, ESTABLISHED))
continue;
/* Check if any address in the packed array of addresses is
* in the bind address list of the association. If so,
* do not send the asconf chunk to its peer, but continue with
* other associations.
*/
addr_buf = addrs;
for (i = 0; i < addrcnt; i++) {
addr = addr_buf;
af = sctp_get_af_specific(addr->v4.sin_family);
if (!af) {
retval = -EINVAL;
goto out;
}
if (sctp_assoc_lookup_laddr(asoc, addr))
break;
addr_buf += af->sockaddr_len;
}
if (i < addrcnt)
continue;
/* Use the first valid address in bind addr list of
* association as Address Parameter of ASCONF CHUNK.
*/
bp = &asoc->base.bind_addr;
p = bp->address_list.next;
laddr = list_entry(p, struct sctp_sockaddr_entry, list);
chunk = sctp_make_asconf_update_ip(asoc, &laddr->a, addrs,
addrcnt, SCTP_PARAM_ADD_IP);
if (!chunk) {
retval = -ENOMEM;
goto out;
}
/* Add the new addresses to the bind address list with
* use_as_src set to 0.
*/
addr_buf = addrs;
for (i = 0; i < addrcnt; i++) {
addr = addr_buf;
af = sctp_get_af_specific(addr->v4.sin_family);
memcpy(&saveaddr, addr, af->sockaddr_len);
retval = sctp_add_bind_addr(bp, &saveaddr,
SCTP_ADDR_NEW, GFP_ATOMIC);
addr_buf += af->sockaddr_len;
}
if (asoc->src_out_of_asoc_ok) {
struct sctp_transport *trans;
list_for_each_entry(trans,
&asoc->peer.transport_addr_list, transports) {
/* Clear the source and route cache */
dst_release(trans->dst);
trans->cwnd = min(4*asoc->pathmtu, max_t(__u32,
2*asoc->pathmtu, 4380));
trans->ssthresh = asoc->peer.i.a_rwnd;
trans->rto = asoc->rto_initial;
sctp_max_rto(asoc, trans);
trans->rtt = trans->srtt = trans->rttvar = 0;
sctp_transport_route(trans, NULL,
sctp_sk(asoc->base.sk));
}
}
retval = sctp_send_asconf(asoc, chunk);
}
out:
return retval;
}
/* Remove a list of addresses from bind addresses list. Do not remove the
* last address.
*
* Basically run through each address specified in the addrs/addrcnt
* array/length pair, determine if it is IPv6 or IPv4 and call
* sctp_del_bind() on it.
*
* If any of them fails, then the operation will be reversed and the
* ones that were removed will be added back.
*
* At least one address has to be left; if only one address is
* available, the operation will return -EBUSY.
*
* Only sctp_setsockopt_bindx() is supposed to call this function.
*/
static int sctp_bindx_rem(struct sock *sk, struct sockaddr *addrs, int addrcnt)
{
struct sctp_sock *sp = sctp_sk(sk);
struct sctp_endpoint *ep = sp->ep;
int cnt;
struct sctp_bind_addr *bp = &ep->base.bind_addr;
int retval = 0;
void *addr_buf;
union sctp_addr *sa_addr;
struct sctp_af *af;
SCTP_DEBUG_PRINTK("sctp_bindx_rem (sk: %p, addrs: %p, addrcnt: %d)\n",
sk, addrs, addrcnt);
addr_buf = addrs;
for (cnt = 0; cnt < addrcnt; cnt++) {
/* If the bind address list is empty or if there is only one
* bind address, there is nothing more to be removed (we need
* at least one address here).
*/
if (list_empty(&bp->address_list) ||
(sctp_list_single_entry(&bp->address_list))) {
retval = -EBUSY;
goto err_bindx_rem;
}
sa_addr = addr_buf;
af = sctp_get_af_specific(sa_addr->sa.sa_family);
if (!af) {
retval = -EINVAL;
goto err_bindx_rem;
}
if (!af->addr_valid(sa_addr, sp, NULL)) {
retval = -EADDRNOTAVAIL;
goto err_bindx_rem;
}
if (sa_addr->v4.sin_port &&
sa_addr->v4.sin_port != htons(bp->port)) {
retval = -EINVAL;
goto err_bindx_rem;
}
if (!sa_addr->v4.sin_port)
sa_addr->v4.sin_port = htons(bp->port);
/* FIXME - There is probably a need to check if sk->sk_saddr and
* sk->sk_rcv_addr are currently set to one of the addresses to
* be removed. This is something which needs to be looked into
* when we are fixing the outstanding issues with multi-homing
* socket routing and failover schemes. Refer to comments in
* sctp_do_bind(). -daisy
*/
retval = sctp_del_bind_addr(bp, sa_addr);
addr_buf += af->sockaddr_len;
err_bindx_rem:
if (retval < 0) {
/* Failed. Add the ones that has been removed back */
if (cnt > 0)
sctp_bindx_add(sk, addrs, cnt);
return retval;
}
}
return retval;
}
/* Send an ASCONF chunk with Delete IP address parameters to all the peers of
* the associations that are part of the endpoint indicating that a list of
* local addresses are removed from the endpoint.
*
* If any of the addresses is already in the bind address list of the
* association, we do not send the chunk for that association. But it will not
* affect other associations.
*
* Only sctp_setsockopt_bindx() is supposed to call this function.
*/
static int sctp_send_asconf_del_ip(struct sock *sk,
struct sockaddr *addrs,
int addrcnt)
{
struct net *net = sock_net(sk);
struct sctp_sock *sp;
struct sctp_endpoint *ep;
struct sctp_association *asoc;
struct sctp_transport *transport;
struct sctp_bind_addr *bp;
struct sctp_chunk *chunk;
union sctp_addr *laddr;
void *addr_buf;
struct sctp_af *af;
struct sctp_sockaddr_entry *saddr;
int i;
int retval = 0;
int stored = 0;
chunk = NULL;
if (!net->sctp.addip_enable)
return retval;
sp = sctp_sk(sk);
ep = sp->ep;
SCTP_DEBUG_PRINTK("%s: (sk: %p, addrs: %p, addrcnt: %d)\n",
__func__, sk, addrs, addrcnt);
list_for_each_entry(asoc, &ep->asocs, asocs) {
if (!asoc->peer.asconf_capable)
continue;
if (asoc->peer.addip_disabled_mask & SCTP_PARAM_DEL_IP)
continue;
if (!sctp_state(asoc, ESTABLISHED))
continue;
/* Check if any address in the packed array of addresses is
* not present in the bind address list of the association.
* If so, do not send the asconf chunk to its peer, but
* continue with other associations.
*/
addr_buf = addrs;
for (i = 0; i < addrcnt; i++) {
laddr = addr_buf;
af = sctp_get_af_specific(laddr->v4.sin_family);
if (!af) {
retval = -EINVAL;
goto out;
}
if (!sctp_assoc_lookup_laddr(asoc, laddr))
break;
addr_buf += af->sockaddr_len;
}
if (i < addrcnt)
continue;
/* Find one address in the association's bind address list
* that is not in the packed array of addresses. This is to
* make sure that we do not delete all the addresses in the
* association.
*/
bp = &asoc->base.bind_addr;
laddr = sctp_find_unmatch_addr(bp, (union sctp_addr *)addrs,
addrcnt, sp);
if ((laddr == NULL) && (addrcnt == 1)) {
if (asoc->asconf_addr_del_pending)
continue;
asoc->asconf_addr_del_pending =
kzalloc(sizeof(union sctp_addr), GFP_ATOMIC);
if (asoc->asconf_addr_del_pending == NULL) {
retval = -ENOMEM;
goto out;
}
asoc->asconf_addr_del_pending->sa.sa_family =
addrs->sa_family;
asoc->asconf_addr_del_pending->v4.sin_port =
htons(bp->port);
if (addrs->sa_family == AF_INET) {
struct sockaddr_in *sin;
sin = (struct sockaddr_in *)addrs;
asoc->asconf_addr_del_pending->v4.sin_addr.s_addr = sin->sin_addr.s_addr;
} else if (addrs->sa_family == AF_INET6) {
struct sockaddr_in6 *sin6;
sin6 = (struct sockaddr_in6 *)addrs;
asoc->asconf_addr_del_pending->v6.sin6_addr = sin6->sin6_addr;
}
SCTP_DEBUG_PRINTK_IPADDR("send_asconf_del_ip: keep the last address asoc: %p ",
" at %p\n", asoc, asoc->asconf_addr_del_pending,
asoc->asconf_addr_del_pending);
asoc->src_out_of_asoc_ok = 1;
stored = 1;
goto skip_mkasconf;
}
/* We do not need RCU protection throughout this loop
* because this is done under a socket lock from the
* setsockopt call.
*/
chunk = sctp_make_asconf_update_ip(asoc, laddr, addrs, addrcnt,
SCTP_PARAM_DEL_IP);
if (!chunk) {
retval = -ENOMEM;
goto out;
}
skip_mkasconf:
/* Reset use_as_src flag for the addresses in the bind address
* list that are to be deleted.
*/
addr_buf = addrs;
for (i = 0; i < addrcnt; i++) {
laddr = addr_buf;
af = sctp_get_af_specific(laddr->v4.sin_family);
list_for_each_entry(saddr, &bp->address_list, list) {
if (sctp_cmp_addr_exact(&saddr->a, laddr))
saddr->state = SCTP_ADDR_DEL;
}
addr_buf += af->sockaddr_len;
}
/* Update the route and saddr entries for all the transports
* as some of the addresses in the bind address list are
* about to be deleted and cannot be used as source addresses.
*/
list_for_each_entry(transport, &asoc->peer.transport_addr_list,
transports) {
dst_release(transport->dst);
sctp_transport_route(transport, NULL,
sctp_sk(asoc->base.sk));
}
if (stored)
/* We don't need to transmit ASCONF */
continue;
retval = sctp_send_asconf(asoc, chunk);
}
out:
return retval;
}
/* set addr events to assocs in the endpoint. ep and addr_wq must be locked */
int sctp_asconf_mgmt(struct sctp_sock *sp, struct sctp_sockaddr_entry *addrw)
{
struct sock *sk = sctp_opt2sk(sp);
union sctp_addr *addr;
struct sctp_af *af;
/* It is safe to write port space in caller. */
addr = &addrw->a;
addr->v4.sin_port = htons(sp->ep->base.bind_addr.port);
af = sctp_get_af_specific(addr->sa.sa_family);
if (!af)
return -EINVAL;
if (sctp_verify_addr(sk, addr, af->sockaddr_len))
return -EINVAL;
if (addrw->state == SCTP_ADDR_NEW)
return sctp_send_asconf_add_ip(sk, (struct sockaddr *)addr, 1);
else
return sctp_send_asconf_del_ip(sk, (struct sockaddr *)addr, 1);
}
/* Helper for tunneling sctp_bindx() requests through sctp_setsockopt()
*
* API 8.1
* int sctp_bindx(int sd, struct sockaddr *addrs, int addrcnt,
* int flags);
*
* If sd is an IPv4 socket, the addresses passed must be IPv4 addresses.
* If the sd is an IPv6 socket, the addresses passed can either be IPv4
* or IPv6 addresses.
*
* A single address may be specified as INADDR_ANY or IN6ADDR_ANY, see
* Section 3.1.2 for this usage.
*
* addrs is a pointer to an array of one or more socket addresses. Each
* address is contained in its appropriate structure (i.e. struct
* sockaddr_in or struct sockaddr_in6) the family of the address type
* must be used to distinguish the address length (note that this
* representation is termed a "packed array" of addresses). The caller
* specifies the number of addresses in the array with addrcnt.
*
* On success, sctp_bindx() returns 0. On failure, sctp_bindx() returns
* -1, and sets errno to the appropriate error code.
*
* For SCTP, the port given in each socket address must be the same, or
* sctp_bindx() will fail, setting errno to EINVAL.
*
* The flags parameter is formed from the bitwise OR of zero or more of
* the following currently defined flags:
*
* SCTP_BINDX_ADD_ADDR
*
* SCTP_BINDX_REM_ADDR
*
* SCTP_BINDX_ADD_ADDR directs SCTP to add the given addresses to the
* association, and SCTP_BINDX_REM_ADDR directs SCTP to remove the given
* addresses from the association. The two flags are mutually exclusive;
* if both are given, sctp_bindx() will fail with EINVAL. A caller may
* not remove all addresses from an association; sctp_bindx() will
* reject such an attempt with EINVAL.
*
* An application can use sctp_bindx(SCTP_BINDX_ADD_ADDR) to associate
* additional addresses with an endpoint after calling bind(). Or use
* sctp_bindx(SCTP_BINDX_REM_ADDR) to remove some addresses a listening
* socket is associated with so that no new association accepted will be
* associated with those addresses. If the endpoint supports dynamic
* address a SCTP_BINDX_REM_ADDR or SCTP_BINDX_ADD_ADDR may cause a
* endpoint to send the appropriate message to the peer to change the
* peers address lists.
*
* Adding and removing addresses from a connected association is
* optional functionality. Implementations that do not support this
* functionality should return EOPNOTSUPP.
*
* Basically do nothing but copying the addresses from user to kernel
* land and invoking either sctp_bindx_add() or sctp_bindx_rem() on the sk.
* This is used for tunneling the sctp_bindx() request through sctp_setsockopt()
* from userspace.
*
* We don't use copy_from_user() for optimization: we first do the
* sanity checks (buffer size -fast- and access check-healthy
* pointer); if all of those succeed, then we can alloc the memory
* (expensive operation) needed to copy the data to kernel. Then we do
* the copying without checking the user space area
* (__copy_from_user()).
*
* On exit there is no need to do sockfd_put(), sys_setsockopt() does
* it.
*
* sk The sk of the socket
* addrs The pointer to the addresses in user land
* addrssize Size of the addrs buffer
* op Operation to perform (add or remove, see the flags of
* sctp_bindx)
*
* Returns 0 if ok, <0 errno code on error.
*/
SCTP_STATIC int sctp_setsockopt_bindx(struct sock* sk,
struct sockaddr __user *addrs,
int addrs_size, int op)
{
struct sockaddr *kaddrs;
int err;
int addrcnt = 0;
int walk_size = 0;
struct sockaddr *sa_addr;
void *addr_buf;
struct sctp_af *af;
SCTP_DEBUG_PRINTK("sctp_setsockopt_bindx: sk %p addrs %p"
" addrs_size %d opt %d\n", sk, addrs, addrs_size, op);
if (unlikely(addrs_size <= 0))
return -EINVAL;
/* Check the user passed a healthy pointer. */
if (unlikely(!access_ok(VERIFY_READ, addrs, addrs_size)))
return -EFAULT;
/* Alloc space for the address array in kernel memory. */
kaddrs = kmalloc(addrs_size, GFP_KERNEL);
if (unlikely(!kaddrs))
return -ENOMEM;
if (__copy_from_user(kaddrs, addrs, addrs_size)) {
kfree(kaddrs);
return -EFAULT;
}
/* Walk through the addrs buffer and count the number of addresses. */
addr_buf = kaddrs;
while (walk_size < addrs_size) {
if (walk_size + sizeof(sa_family_t) > addrs_size) {
kfree(kaddrs);
return -EINVAL;
}
sa_addr = addr_buf;
af = sctp_get_af_specific(sa_addr->sa_family);
/* If the address family is not supported or if this address
* causes the address buffer to overflow return EINVAL.
*/
if (!af || (walk_size + af->sockaddr_len) > addrs_size) {
kfree(kaddrs);
return -EINVAL;
}
addrcnt++;
addr_buf += af->sockaddr_len;
walk_size += af->sockaddr_len;
}
/* Do the work. */
switch (op) {
case SCTP_BINDX_ADD_ADDR:
err = sctp_bindx_add(sk, kaddrs, addrcnt);
if (err)
goto out;
err = sctp_send_asconf_add_ip(sk, kaddrs, addrcnt);
break;
case SCTP_BINDX_REM_ADDR:
err = sctp_bindx_rem(sk, kaddrs, addrcnt);
if (err)
goto out;
err = sctp_send_asconf_del_ip(sk, kaddrs, addrcnt);
break;
default:
err = -EINVAL;
break;
}
out:
kfree(kaddrs);
return err;
}
/* __sctp_connect(struct sock* sk, struct sockaddr *kaddrs, int addrs_size)
*
* Common routine for handling connect() and sctp_connectx().
* Connect will come in with just a single address.
*/
static int __sctp_connect(struct sock* sk,
struct sockaddr *kaddrs,
int addrs_size,
sctp_assoc_t *assoc_id)
{
struct net *net = sock_net(sk);
struct sctp_sock *sp;
struct sctp_endpoint *ep;
struct sctp_association *asoc = NULL;
struct sctp_association *asoc2;
struct sctp_transport *transport;
union sctp_addr to;
struct sctp_af *af;
sctp_scope_t scope;
long timeo;
int err = 0;
int addrcnt = 0;
int walk_size = 0;
union sctp_addr *sa_addr = NULL;
void *addr_buf;
unsigned short port;
unsigned int f_flags = 0;
sp = sctp_sk(sk);
ep = sp->ep;
/* connect() cannot be done on a socket that is already in ESTABLISHED
* state - UDP-style peeled off socket or a TCP-style socket that
* is already connected.
* It cannot be done even on a TCP-style listening socket.
*/
if (sctp_sstate(sk, ESTABLISHED) ||
(sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))) {
err = -EISCONN;
goto out_free;
}
/* Walk through the addrs buffer and count the number of addresses. */
addr_buf = kaddrs;
while (walk_size < addrs_size) {
if (walk_size + sizeof(sa_family_t) > addrs_size) {
err = -EINVAL;
goto out_free;
}
sa_addr = addr_buf;
af = sctp_get_af_specific(sa_addr->sa.sa_family);
/* If the address family is not supported or if this address
* causes the address buffer to overflow return EINVAL.
*/
if (!af || (walk_size + af->sockaddr_len) > addrs_size) {
err = -EINVAL;
goto out_free;
}
port = ntohs(sa_addr->v4.sin_port);
/* Save current address so we can work with it */
memcpy(&to, sa_addr, af->sockaddr_len);
err = sctp_verify_addr(sk, &to, af->sockaddr_len);
if (err)
goto out_free;
/* Make sure the destination port is correctly set
* in all addresses.
*/
if (asoc && asoc->peer.port && asoc->peer.port != port)
goto out_free;
/* Check if there already is a matching association on the
* endpoint (other than the one created here).
*/
asoc2 = sctp_endpoint_lookup_assoc(ep, &to, &transport);
if (asoc2 && asoc2 != asoc) {
if (asoc2->state >= SCTP_STATE_ESTABLISHED)
err = -EISCONN;
else
err = -EALREADY;
goto out_free;
}
/* If we could not find a matching association on the endpoint,
* make sure that there is no peeled-off association matching
* the peer address even on another socket.
*/
if (sctp_endpoint_is_peeled_off(ep, &to)) {
err = -EADDRNOTAVAIL;
goto out_free;
}
if (!asoc) {
/* If a bind() or sctp_bindx() is not called prior to
* an sctp_connectx() call, the system picks an
* ephemeral port and will choose an address set
* equivalent to binding with a wildcard address.
*/
if (!ep->base.bind_addr.port) {
if (sctp_autobind(sk)) {
err = -EAGAIN;
goto out_free;
}
} else {
/*
* If an unprivileged user inherits a 1-many
* style socket with open associations on a
* privileged port, it MAY be permitted to
* accept new associations, but it SHOULD NOT
* be permitted to open new associations.
*/
if (ep->base.bind_addr.port < PROT_SOCK &&
!ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) {
err = -EACCES;
goto out_free;
}
}
scope = sctp_scope(&to);
asoc = sctp_association_new(ep, sk, scope, GFP_KERNEL);
if (!asoc) {
err = -ENOMEM;
goto out_free;
}
err = sctp_assoc_set_bind_addr_from_ep(asoc, scope,
GFP_KERNEL);
if (err < 0) {
goto out_free;
}
}
/* Prime the peer's transport structures. */
transport = sctp_assoc_add_peer(asoc, &to, GFP_KERNEL,
SCTP_UNKNOWN);
if (!transport) {
err = -ENOMEM;
goto out_free;
}
addrcnt++;
addr_buf += af->sockaddr_len;
walk_size += af->sockaddr_len;
}
/* In case the user of sctp_connectx() wants an association
* id back, assign one now.
*/
if (assoc_id) {
err = sctp_assoc_set_id(asoc, GFP_KERNEL);
if (err < 0)
goto out_free;
}
err = sctp_primitive_ASSOCIATE(net, asoc, NULL);
if (err < 0) {
goto out_free;
}
/* Initialize sk's dport and daddr for getpeername() */
inet_sk(sk)->inet_dport = htons(asoc->peer.port);
af = sctp_get_af_specific(sa_addr->sa.sa_family);
af->to_sk_daddr(sa_addr, sk);
sk->sk_err = 0;
/* in-kernel sockets don't generally have a file allocated to them
* if all they do is call sock_create_kern().
*/
if (sk->sk_socket->file)
f_flags = sk->sk_socket->file->f_flags;
timeo = sock_sndtimeo(sk, f_flags & O_NONBLOCK);
err = sctp_wait_for_connect(asoc, &timeo);
if ((err == 0 || err == -EINPROGRESS) && assoc_id)
*assoc_id = asoc->assoc_id;
/* Don't free association on exit. */
asoc = NULL;
out_free:
SCTP_DEBUG_PRINTK("About to exit __sctp_connect() free asoc: %p"
" kaddrs: %p err: %d\n",
asoc, kaddrs, err);
if (asoc) {
/* sctp_primitive_ASSOCIATE may have added this association
* To the hash table, try to unhash it, just in case, its a noop
* if it wasn't hashed so we're safe
*/
sctp_unhash_established(asoc);
sctp_association_free(asoc);
}
return err;
}
/* Helper for tunneling sctp_connectx() requests through sctp_setsockopt()
*
* API 8.9
* int sctp_connectx(int sd, struct sockaddr *addrs, int addrcnt,
* sctp_assoc_t *asoc);
*
* If sd is an IPv4 socket, the addresses passed must be IPv4 addresses.
* If the sd is an IPv6 socket, the addresses passed can either be IPv4
* or IPv6 addresses.
*
* A single address may be specified as INADDR_ANY or IN6ADDR_ANY, see
* Section 3.1.2 for this usage.
*
* addrs is a pointer to an array of one or more socket addresses. Each
* address is contained in its appropriate structure (i.e. struct
* sockaddr_in or struct sockaddr_in6) the family of the address type
* must be used to distengish the address length (note that this
* representation is termed a "packed array" of addresses). The caller
* specifies the number of addresses in the array with addrcnt.
*
* On success, sctp_connectx() returns 0. It also sets the assoc_id to
* the association id of the new association. On failure, sctp_connectx()
* returns -1, and sets errno to the appropriate error code. The assoc_id
* is not touched by the kernel.
*
* For SCTP, the port given in each socket address must be the same, or
* sctp_connectx() will fail, setting errno to EINVAL.
*
* An application can use sctp_connectx to initiate an association with
* an endpoint that is multi-homed. Much like sctp_bindx() this call
* allows a caller to specify multiple addresses at which a peer can be
* reached. The way the SCTP stack uses the list of addresses to set up
* the association is implementation dependent. This function only
* specifies that the stack will try to make use of all the addresses in
* the list when needed.
*
* Note that the list of addresses passed in is only used for setting up
* the association. It does not necessarily equal the set of addresses
* the peer uses for the resulting association. If the caller wants to
* find out the set of peer addresses, it must use sctp_getpaddrs() to
* retrieve them after the association has been set up.
*
* Basically do nothing but copying the addresses from user to kernel
* land and invoking either sctp_connectx(). This is used for tunneling
* the sctp_connectx() request through sctp_setsockopt() from userspace.
*
* We don't use copy_from_user() for optimization: we first do the
* sanity checks (buffer size -fast- and access check-healthy
* pointer); if all of those succeed, then we can alloc the memory
* (expensive operation) needed to copy the data to kernel. Then we do
* the copying without checking the user space area
* (__copy_from_user()).
*
* On exit there is no need to do sockfd_put(), sys_setsockopt() does
* it.
*
* sk The sk of the socket
* addrs The pointer to the addresses in user land
* addrssize Size of the addrs buffer
*
* Returns >=0 if ok, <0 errno code on error.
*/
SCTP_STATIC int __sctp_setsockopt_connectx(struct sock* sk,
struct sockaddr __user *addrs,
int addrs_size,
sctp_assoc_t *assoc_id)
{
int err = 0;
struct sockaddr *kaddrs;
SCTP_DEBUG_PRINTK("%s - sk %p addrs %p addrs_size %d\n",
__func__, sk, addrs, addrs_size);
if (unlikely(addrs_size <= 0))
return -EINVAL;
/* Check the user passed a healthy pointer. */
if (unlikely(!access_ok(VERIFY_READ, addrs, addrs_size)))
return -EFAULT;
/* Alloc space for the address array in kernel memory. */
kaddrs = kmalloc(addrs_size, GFP_KERNEL);
if (unlikely(!kaddrs))
return -ENOMEM;
if (__copy_from_user(kaddrs, addrs, addrs_size)) {
err = -EFAULT;
} else {
err = __sctp_connect(sk, kaddrs, addrs_size, assoc_id);
}
kfree(kaddrs);
return err;
}
/*
* This is an older interface. It's kept for backward compatibility
* to the option that doesn't provide association id.
*/
SCTP_STATIC int sctp_setsockopt_connectx_old(struct sock* sk,
struct sockaddr __user *addrs,
int addrs_size)
{
return __sctp_setsockopt_connectx(sk, addrs, addrs_size, NULL);
}
/*
* New interface for the API. The since the API is done with a socket
* option, to make it simple we feed back the association id is as a return
* indication to the call. Error is always negative and association id is
* always positive.
*/
SCTP_STATIC int sctp_setsockopt_connectx(struct sock* sk,
struct sockaddr __user *addrs,
int addrs_size)
{
sctp_assoc_t assoc_id = 0;
int err = 0;
err = __sctp_setsockopt_connectx(sk, addrs, addrs_size, &assoc_id);
if (err)
return err;
else
return assoc_id;
}
/*
* New (hopefully final) interface for the API.
* We use the sctp_getaddrs_old structure so that use-space library
* can avoid any unnecessary allocations. The only defferent part
* is that we store the actual length of the address buffer into the
* addrs_num structure member. That way we can re-use the existing
* code.
*/
SCTP_STATIC int sctp_getsockopt_connectx3(struct sock* sk, int len,
char __user *optval,
int __user *optlen)
{
struct sctp_getaddrs_old param;
sctp_assoc_t assoc_id = 0;
int err = 0;
if (len < sizeof(param))
return -EINVAL;
if (copy_from_user(¶m, optval, sizeof(param)))
return -EFAULT;
err = __sctp_setsockopt_connectx(sk,
(struct sockaddr __user *)param.addrs,
param.addr_num, &assoc_id);
if (err == 0 || err == -EINPROGRESS) {
if (copy_to_user(optval, &assoc_id, sizeof(assoc_id)))
return -EFAULT;
if (put_user(sizeof(assoc_id), optlen))
return -EFAULT;
}
return err;
}
/* API 3.1.4 close() - UDP Style Syntax
* Applications use close() to perform graceful shutdown (as described in
* Section 10.1 of [SCTP]) on ALL the associations currently represented
* by a UDP-style socket.
*
* The syntax is
*
* ret = close(int sd);
*
* sd - the socket descriptor of the associations to be closed.
*
* To gracefully shutdown a specific association represented by the
* UDP-style socket, an application should use the sendmsg() call,
* passing no user data, but including the appropriate flag in the
* ancillary data (see Section xxxx).
*
* If sd in the close() call is a branched-off socket representing only
* one association, the shutdown is performed on that association only.
*
* 4.1.6 close() - TCP Style Syntax
*
* Applications use close() to gracefully close down an association.
*
* The syntax is:
*
* int close(int sd);
*
* sd - the socket descriptor of the association to be closed.
*
* After an application calls close() on a socket descriptor, no further
* socket operations will succeed on that descriptor.
*
* API 7.1.4 SO_LINGER
*
* An application using the TCP-style socket can use this option to
* perform the SCTP ABORT primitive. The linger option structure is:
*
* struct linger {
* int l_onoff; // option on/off
* int l_linger; // linger time
* };
*
* To enable the option, set l_onoff to 1. If the l_linger value is set
* to 0, calling close() is the same as the ABORT primitive. If the
* value is set to a negative value, the setsockopt() call will return
* an error. If the value is set to a positive value linger_time, the
* close() can be blocked for at most linger_time ms. If the graceful
* shutdown phase does not finish during this period, close() will
* return but the graceful shutdown phase continues in the system.
*/
SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
{
struct net *net = sock_net(sk);
struct sctp_endpoint *ep;
struct sctp_association *asoc;
struct list_head *pos, *temp;
unsigned int data_was_unread;
SCTP_DEBUG_PRINTK("sctp_close(sk: 0x%p, timeout:%ld)\n", sk, timeout);
sctp_lock_sock(sk);
sk->sk_shutdown = SHUTDOWN_MASK;
sk->sk_state = SCTP_SS_CLOSING;
ep = sctp_sk(sk)->ep;
/* Clean up any skbs sitting on the receive queue. */
data_was_unread = sctp_queue_purge_ulpevents(&sk->sk_receive_queue);
data_was_unread += sctp_queue_purge_ulpevents(&sctp_sk(sk)->pd_lobby);
/* Walk all associations on an endpoint. */
list_for_each_safe(pos, temp, &ep->asocs) {
asoc = list_entry(pos, struct sctp_association, asocs);
if (sctp_style(sk, TCP)) {
/* A closed association can still be in the list if
* it belongs to a TCP-style listening socket that is
* not yet accepted. If so, free it. If not, send an
* ABORT or SHUTDOWN based on the linger options.
*/
if (sctp_state(asoc, CLOSED)) {
sctp_unhash_established(asoc);
sctp_association_free(asoc);
continue;
}
}
if (data_was_unread || !skb_queue_empty(&asoc->ulpq.lobby) ||
!skb_queue_empty(&asoc->ulpq.reasm) ||
(sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime)) {
struct sctp_chunk *chunk;
chunk = sctp_make_abort_user(asoc, NULL, 0);
if (chunk)
sctp_primitive_ABORT(net, asoc, chunk);
} else
sctp_primitive_SHUTDOWN(net, asoc, NULL);
}
/* On a TCP-style socket, block for at most linger_time if set. */
if (sctp_style(sk, TCP) && timeout)
sctp_wait_for_close(sk, timeout);
/* This will run the backlog queue. */
sctp_release_sock(sk);
/* Supposedly, no process has access to the socket, but
* the net layers still may.
*/
sctp_local_bh_disable();
sctp_bh_lock_sock(sk);
/* Hold the sock, since sk_common_release() will put sock_put()
* and we have just a little more cleanup.
*/
sock_hold(sk);
sk_common_release(sk);
sctp_bh_unlock_sock(sk);
sctp_local_bh_enable();
sock_put(sk);
SCTP_DBG_OBJCNT_DEC(sock);
}
/* Handle EPIPE error. */
static int sctp_error(struct sock *sk, int flags, int err)
{
if (err == -EPIPE)
err = sock_error(sk) ? : -EPIPE;
if (err == -EPIPE && !(flags & MSG_NOSIGNAL))
send_sig(SIGPIPE, current, 0);
return err;
}
/* API 3.1.3 sendmsg() - UDP Style Syntax
*
* An application uses sendmsg() and recvmsg() calls to transmit data to
* and receive data from its peer.
*
* ssize_t sendmsg(int socket, const struct msghdr *message,
* int flags);
*
* socket - the socket descriptor of the endpoint.
* message - pointer to the msghdr structure which contains a single
* user message and possibly some ancillary data.
*
* See Section 5 for complete description of the data
* structures.
*
* flags - flags sent or received with the user message, see Section
* 5 for complete description of the flags.
*
* Note: This function could use a rewrite especially when explicit
* connect support comes in.
*/
/* BUG: We do not implement the equivalent of sk_stream_wait_memory(). */
SCTP_STATIC int sctp_msghdr_parse(const struct msghdr *, sctp_cmsgs_t *);
SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
struct msghdr *msg, size_t msg_len)
{
struct net *net = sock_net(sk);
struct sctp_sock *sp;
struct sctp_endpoint *ep;
struct sctp_association *new_asoc=NULL, *asoc=NULL;
struct sctp_transport *transport, *chunk_tp;
struct sctp_chunk *chunk;
union sctp_addr to;
struct sockaddr *msg_name = NULL;
struct sctp_sndrcvinfo default_sinfo;
struct sctp_sndrcvinfo *sinfo;
struct sctp_initmsg *sinit;
sctp_assoc_t associd = 0;
sctp_cmsgs_t cmsgs = { NULL };
int err;
sctp_scope_t scope;
long timeo;
__u16 sinfo_flags = 0;
struct sctp_datamsg *datamsg;
int msg_flags = msg->msg_flags;
SCTP_DEBUG_PRINTK("sctp_sendmsg(sk: %p, msg: %p, msg_len: %zu)\n",
sk, msg, msg_len);
err = 0;
sp = sctp_sk(sk);
ep = sp->ep;
SCTP_DEBUG_PRINTK("Using endpoint: %p.\n", ep);
/* We cannot send a message over a TCP-style listening socket. */
if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) {
err = -EPIPE;
goto out_nounlock;
}
/* Parse out the SCTP CMSGs. */
err = sctp_msghdr_parse(msg, &cmsgs);
if (err) {
SCTP_DEBUG_PRINTK("msghdr parse err = %x\n", err);
goto out_nounlock;
}
/* Fetch the destination address for this packet. This
* address only selects the association--it is not necessarily
* the address we will send to.
* For a peeled-off socket, msg_name is ignored.
*/
if (!sctp_style(sk, UDP_HIGH_BANDWIDTH) && msg->msg_name) {
int msg_namelen = msg->msg_namelen;
err = sctp_verify_addr(sk, (union sctp_addr *)msg->msg_name,
msg_namelen);
if (err)
return err;
if (msg_namelen > sizeof(to))
msg_namelen = sizeof(to);
memcpy(&to, msg->msg_name, msg_namelen);
msg_name = msg->msg_name;
}
sinfo = cmsgs.info;
sinit = cmsgs.init;
/* Did the user specify SNDRCVINFO? */
if (sinfo) {
sinfo_flags = sinfo->sinfo_flags;
associd = sinfo->sinfo_assoc_id;
}
SCTP_DEBUG_PRINTK("msg_len: %zu, sinfo_flags: 0x%x\n",
msg_len, sinfo_flags);
/* SCTP_EOF or SCTP_ABORT cannot be set on a TCP-style socket. */
if (sctp_style(sk, TCP) && (sinfo_flags & (SCTP_EOF | SCTP_ABORT))) {
err = -EINVAL;
goto out_nounlock;
}
/* If SCTP_EOF is set, no data can be sent. Disallow sending zero
* length messages when SCTP_EOF|SCTP_ABORT is not set.
* If SCTP_ABORT is set, the message length could be non zero with
* the msg_iov set to the user abort reason.
*/
if (((sinfo_flags & SCTP_EOF) && (msg_len > 0)) ||
(!(sinfo_flags & (SCTP_EOF|SCTP_ABORT)) && (msg_len == 0))) {
err = -EINVAL;
goto out_nounlock;
}
/* If SCTP_ADDR_OVER is set, there must be an address
* specified in msg_name.
*/
if ((sinfo_flags & SCTP_ADDR_OVER) && (!msg->msg_name)) {
err = -EINVAL;
goto out_nounlock;
}
transport = NULL;
SCTP_DEBUG_PRINTK("About to look up association.\n");
sctp_lock_sock(sk);
/* If a msg_name has been specified, assume this is to be used. */
if (msg_name) {
/* Look for a matching association on the endpoint. */
asoc = sctp_endpoint_lookup_assoc(ep, &to, &transport);
if (!asoc) {
/* If we could not find a matching association on the
* endpoint, make sure that it is not a TCP-style
* socket that already has an association or there is
* no peeled-off association on another socket.
*/
if ((sctp_style(sk, TCP) &&
sctp_sstate(sk, ESTABLISHED)) ||
sctp_endpoint_is_peeled_off(ep, &to)) {
err = -EADDRNOTAVAIL;
goto out_unlock;
}
}
} else {
asoc = sctp_id2assoc(sk, associd);
if (!asoc) {
err = -EPIPE;
goto out_unlock;
}
}
if (asoc) {
SCTP_DEBUG_PRINTK("Just looked up association: %p.\n", asoc);
/* We cannot send a message on a TCP-style SCTP_SS_ESTABLISHED
* socket that has an association in CLOSED state. This can
* happen when an accepted socket has an association that is
* already CLOSED.
*/
if (sctp_state(asoc, CLOSED) && sctp_style(sk, TCP)) {
err = -EPIPE;
goto out_unlock;
}
if (sinfo_flags & SCTP_EOF) {
SCTP_DEBUG_PRINTK("Shutting down association: %p\n",
asoc);
sctp_primitive_SHUTDOWN(net, asoc, NULL);
err = 0;
goto out_unlock;
}
if (sinfo_flags & SCTP_ABORT) {
chunk = sctp_make_abort_user(asoc, msg, msg_len);
if (!chunk) {
err = -ENOMEM;
goto out_unlock;
}
SCTP_DEBUG_PRINTK("Aborting association: %p\n", asoc);
sctp_primitive_ABORT(net, asoc, chunk);
err = 0;
goto out_unlock;
}
}
/* Do we need to create the association? */
if (!asoc) {
SCTP_DEBUG_PRINTK("There is no association yet.\n");
if (sinfo_flags & (SCTP_EOF | SCTP_ABORT)) {
err = -EINVAL;
goto out_unlock;
}
/* Check for invalid stream against the stream counts,
* either the default or the user specified stream counts.
*/
if (sinfo) {
if (!sinit || (sinit && !sinit->sinit_num_ostreams)) {
/* Check against the defaults. */
if (sinfo->sinfo_stream >=
sp->initmsg.sinit_num_ostreams) {
err = -EINVAL;
goto out_unlock;
}
} else {
/* Check against the requested. */
if (sinfo->sinfo_stream >=
sinit->sinit_num_ostreams) {
err = -EINVAL;
goto out_unlock;
}
}
}
/*
* API 3.1.2 bind() - UDP Style Syntax
* If a bind() or sctp_bindx() is not called prior to a
* sendmsg() call that initiates a new association, the
* system picks an ephemeral port and will choose an address
* set equivalent to binding with a wildcard address.
*/
if (!ep->base.bind_addr.port) {
if (sctp_autobind(sk)) {
err = -EAGAIN;
goto out_unlock;
}
} else {
/*
* If an unprivileged user inherits a one-to-many
* style socket with open associations on a privileged
* port, it MAY be permitted to accept new associations,
* but it SHOULD NOT be permitted to open new
* associations.
*/
if (ep->base.bind_addr.port < PROT_SOCK &&
!ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) {
err = -EACCES;
goto out_unlock;
}
}
scope = sctp_scope(&to);
new_asoc = sctp_association_new(ep, sk, scope, GFP_KERNEL);
if (!new_asoc) {
err = -ENOMEM;
goto out_unlock;
}
asoc = new_asoc;
err = sctp_assoc_set_bind_addr_from_ep(asoc, scope, GFP_KERNEL);
if (err < 0) {
err = -ENOMEM;
goto out_free;
}
/* If the SCTP_INIT ancillary data is specified, set all
* the association init values accordingly.
*/
if (sinit) {
if (sinit->sinit_num_ostreams) {
asoc->c.sinit_num_ostreams =
sinit->sinit_num_ostreams;
}
if (sinit->sinit_max_instreams) {
asoc->c.sinit_max_instreams =
sinit->sinit_max_instreams;
}
if (sinit->sinit_max_attempts) {
asoc->max_init_attempts
= sinit->sinit_max_attempts;
}
if (sinit->sinit_max_init_timeo) {
asoc->max_init_timeo =
msecs_to_jiffies(sinit->sinit_max_init_timeo);
}
}
/* Prime the peer's transport structures. */
transport = sctp_assoc_add_peer(asoc, &to, GFP_KERNEL, SCTP_UNKNOWN);
if (!transport) {
err = -ENOMEM;
goto out_free;
}
}
/* ASSERT: we have a valid association at this point. */
SCTP_DEBUG_PRINTK("We have a valid association.\n");
if (!sinfo) {
/* If the user didn't specify SNDRCVINFO, make up one with
* some defaults.
*/
memset(&default_sinfo, 0, sizeof(default_sinfo));
default_sinfo.sinfo_stream = asoc->default_stream;
default_sinfo.sinfo_flags = asoc->default_flags;
default_sinfo.sinfo_ppid = asoc->default_ppid;
default_sinfo.sinfo_context = asoc->default_context;
default_sinfo.sinfo_timetolive = asoc->default_timetolive;
default_sinfo.sinfo_assoc_id = sctp_assoc2id(asoc);
sinfo = &default_sinfo;
}
/* API 7.1.7, the sndbuf size per association bounds the
* maximum size of data that can be sent in a single send call.
*/
if (msg_len > sk->sk_sndbuf) {
err = -EMSGSIZE;
goto out_free;
}
if (asoc->pmtu_pending)
sctp_assoc_pending_pmtu(sk, asoc);
/* If fragmentation is disabled and the message length exceeds the
* association fragmentation point, return EMSGSIZE. The I-D
* does not specify what this error is, but this looks like
* a great fit.
*/
if (sctp_sk(sk)->disable_fragments && (msg_len > asoc->frag_point)) {
err = -EMSGSIZE;
goto out_free;
}
/* Check for invalid stream. */
if (sinfo->sinfo_stream >= asoc->c.sinit_num_ostreams) {
err = -EINVAL;
goto out_free;
}
timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
if (!sctp_wspace(asoc)) {
err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len);
if (err)
goto out_free;
}
/* If an address is passed with the sendto/sendmsg call, it is used
* to override the primary destination address in the TCP model, or
* when SCTP_ADDR_OVER flag is set in the UDP model.
*/
if ((sctp_style(sk, TCP) && msg_name) ||
(sinfo_flags & SCTP_ADDR_OVER)) {
chunk_tp = sctp_assoc_lookup_paddr(asoc, &to);
if (!chunk_tp) {
err = -EINVAL;
goto out_free;
}
} else
chunk_tp = NULL;
/* Auto-connect, if we aren't connected already. */
if (sctp_state(asoc, CLOSED)) {
err = sctp_primitive_ASSOCIATE(net, asoc, NULL);
if (err < 0)
goto out_free;
SCTP_DEBUG_PRINTK("We associated primitively.\n");
}
/* Break the message into multiple chunks of maximum size. */
datamsg = sctp_datamsg_from_user(asoc, sinfo, msg, msg_len);
if (IS_ERR(datamsg)) {
err = PTR_ERR(datamsg);
goto out_free;
}
/* Now send the (possibly) fragmented message. */
list_for_each_entry(chunk, &datamsg->chunks, frag_list) {
sctp_chunk_hold(chunk);
/* Do accounting for the write space. */
sctp_set_owner_w(chunk);
chunk->transport = chunk_tp;
}
/* Send it to the lower layers. Note: all chunks
* must either fail or succeed. The lower layer
* works that way today. Keep it that way or this
* breaks.
*/
err = sctp_primitive_SEND(net, asoc, datamsg);
/* Did the lower layer accept the chunk? */
if (err)
sctp_datamsg_free(datamsg);
else
sctp_datamsg_put(datamsg);
SCTP_DEBUG_PRINTK("We sent primitively.\n");
if (err)
goto out_free;
else
err = msg_len;
/* If we are already past ASSOCIATE, the lower
* layers are responsible for association cleanup.
*/
goto out_unlock;
out_free:
if (new_asoc) {
sctp_unhash_established(asoc);
sctp_association_free(asoc);
}
out_unlock:
sctp_release_sock(sk);
out_nounlock:
return sctp_error(sk, msg_flags, err);
#if 0
do_sock_err:
if (msg_len)
err = msg_len;
else
err = sock_error(sk);
goto out;
do_interrupted:
if (msg_len)
err = msg_len;
goto out;
#endif /* 0 */
}
/* This is an extended version of skb_pull() that removes the data from the
* start of a skb even when data is spread across the list of skb's in the
* frag_list. len specifies the total amount of data that needs to be removed.
* when 'len' bytes could be removed from the skb, it returns 0.
* If 'len' exceeds the total skb length, it returns the no. of bytes that
* could not be removed.
*/
static int sctp_skb_pull(struct sk_buff *skb, int len)
{
struct sk_buff *list;
int skb_len = skb_headlen(skb);
int rlen;
if (len <= skb_len) {
__skb_pull(skb, len);
return 0;
}
len -= skb_len;
__skb_pull(skb, skb_len);
skb_walk_frags(skb, list) {
rlen = sctp_skb_pull(list, len);
skb->len -= (len-rlen);
skb->data_len -= (len-rlen);
if (!rlen)
return 0;
len = rlen;
}
return len;
}
/* API 3.1.3 recvmsg() - UDP Style Syntax
*
* ssize_t recvmsg(int socket, struct msghdr *message,
* int flags);
*
* socket - the socket descriptor of the endpoint.
* message - pointer to the msghdr structure which contains a single
* user message and possibly some ancillary data.
*
* See Section 5 for complete description of the data
* structures.
*
* flags - flags sent or received with the user message, see Section
* 5 for complete description of the flags.
*/
static struct sk_buff *sctp_skb_recv_datagram(struct sock *, int, int, int *);
SCTP_STATIC int sctp_recvmsg(struct kiocb *iocb, struct sock *sk,
struct msghdr *msg, size_t len, int noblock,
int flags, int *addr_len)
{
struct sctp_ulpevent *event = NULL;
struct sctp_sock *sp = sctp_sk(sk);
struct sk_buff *skb;
int copied;
int err = 0;
int skb_len;
SCTP_DEBUG_PRINTK("sctp_recvmsg(%s: %p, %s: %p, %s: %zd, %s: %d, %s: "
"0x%x, %s: %p)\n", "sk", sk, "msghdr", msg,
"len", len, "knoblauch", noblock,
"flags", flags, "addr_len", addr_len);
sctp_lock_sock(sk);
if (sctp_style(sk, TCP) && !sctp_sstate(sk, ESTABLISHED)) {
err = -ENOTCONN;
goto out;
}
skb = sctp_skb_recv_datagram(sk, flags, noblock, &err);
if (!skb)
goto out;
/* Get the total length of the skb including any skb's in the
* frag_list.
*/
skb_len = skb->len;
copied = skb_len;
if (copied > len)
copied = len;
err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
event = sctp_skb2event(skb);
if (err)
goto out_free;
sock_recv_ts_and_drops(msg, sk, skb);
if (sctp_ulpevent_is_notification(event)) {
msg->msg_flags |= MSG_NOTIFICATION;
sp->pf->event_msgname(event, msg->msg_name, addr_len);
} else {
sp->pf->skb_msgname(skb, msg->msg_name, addr_len);
}
/* Check if we allow SCTP_SNDRCVINFO. */
if (sp->subscribe.sctp_data_io_event)
sctp_ulpevent_read_sndrcvinfo(event, msg);
#if 0
/* FIXME: we should be calling IP/IPv6 layers. */
if (sk->sk_protinfo.af_inet.cmsg_flags)
ip_cmsg_recv(msg, skb);
#endif
err = copied;
/* If skb's length exceeds the user's buffer, update the skb and
* push it back to the receive_queue so that the next call to
* recvmsg() will return the remaining data. Don't set MSG_EOR.
*/
if (skb_len > copied) {
msg->msg_flags &= ~MSG_EOR;
if (flags & MSG_PEEK)
goto out_free;
sctp_skb_pull(skb, copied);
skb_queue_head(&sk->sk_receive_queue, skb);
/* When only partial message is copied to the user, increase
* rwnd by that amount. If all the data in the skb is read,
* rwnd is updated when the event is freed.
*/
if (!sctp_ulpevent_is_notification(event))
sctp_assoc_rwnd_increase(event->asoc, copied);
goto out;
} else if ((event->msg_flags & MSG_NOTIFICATION) ||
(event->msg_flags & MSG_EOR))
msg->msg_flags |= MSG_EOR;
else
msg->msg_flags &= ~MSG_EOR;
out_free:
if (flags & MSG_PEEK) {
/* Release the skb reference acquired after peeking the skb in
* sctp_skb_recv_datagram().
*/
kfree_skb(skb);
} else {
/* Free the event which includes releasing the reference to
* the owner of the skb, freeing the skb and updating the
* rwnd.
*/
sctp_ulpevent_free(event);
}
out:
sctp_release_sock(sk);
return err;
}
/* 7.1.12 Enable/Disable message fragmentation (SCTP_DISABLE_FRAGMENTS)
*
* This option is a on/off flag. If enabled no SCTP message
* fragmentation will be performed. Instead if a message being sent
* exceeds the current PMTU size, the message will NOT be sent and
* instead a error will be indicated to the user.
*/
static int sctp_setsockopt_disable_fragments(struct sock *sk,
char __user *optval,
unsigned int optlen)
{
int val;
if (optlen < sizeof(int))
return -EINVAL;
if (get_user(val, (int __user *)optval))
return -EFAULT;
sctp_sk(sk)->disable_fragments = (val == 0) ? 0 : 1;
return 0;
}
static int sctp_setsockopt_events(struct sock *sk, char __user *optval,
unsigned int optlen)
{
struct sctp_association *asoc;
struct sctp_ulpevent *event;
if (optlen > sizeof(struct sctp_event_subscribe))
return -EINVAL;
if (copy_from_user(&sctp_sk(sk)->subscribe, optval, optlen))
return -EFAULT;
/*
* At the time when a user app subscribes to SCTP_SENDER_DRY_EVENT,
* if there is no data to be sent or retransmit, the stack will
* immediately send up this notification.
*/
if (sctp_ulpevent_type_enabled(SCTP_SENDER_DRY_EVENT,
&sctp_sk(sk)->subscribe)) {
asoc = sctp_id2assoc(sk, 0);
if (asoc && sctp_outq_is_empty(&asoc->outqueue)) {
event = sctp_ulpevent_make_sender_dry_event(asoc,
GFP_ATOMIC);
if (!event)
return -ENOMEM;
sctp_ulpq_tail_event(&asoc->ulpq, event);
}
}
return 0;
}
/* 7.1.8 Automatic Close of associations (SCTP_AUTOCLOSE)
*
* This socket option is applicable to the UDP-style socket only. When
* set it will cause associations that are idle for more than the
* specified number of seconds to automatically close. An association
* being idle is defined an association that has NOT sent or received
* user data. The special value of '0' indicates that no automatic
* close of any associations should be performed. The option expects an
* integer defining the number of seconds of idle time before an
* association is closed.
*/
static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval,
unsigned int optlen)
{
struct sctp_sock *sp = sctp_sk(sk);
/* Applicable to UDP-style socket only */
if (sctp_style(sk, TCP))
return -EOPNOTSUPP;
if (optlen != sizeof(int))
return -EINVAL;
if (copy_from_user(&sp->autoclose, optval, optlen))
return -EFAULT;
return 0;
}
/* 7.1.13 Peer Address Parameters (SCTP_PEER_ADDR_PARAMS)
*
* Applications can enable or disable heartbeats for any peer address of
* an association, modify an address's heartbeat interval, force a
* heartbeat to be sent immediately, and adjust the address's maximum
* number of retransmissions sent before an address is considered
* unreachable. The following structure is used to access and modify an
* address's parameters:
*
* struct sctp_paddrparams {
* sctp_assoc_t spp_assoc_id;
* struct sockaddr_storage spp_address;
* uint32_t spp_hbinterval;
* uint16_t spp_pathmaxrxt;
* uint32_t spp_pathmtu;
* uint32_t spp_sackdelay;
* uint32_t spp_flags;
* };
*
* spp_assoc_id - (one-to-many style socket) This is filled in the
* application, and identifies the association for
* this query.
* spp_address - This specifies which address is of interest.
* spp_hbinterval - This contains the value of the heartbeat interval,
* in milliseconds. If a value of zero
* is present in this field then no changes are to
* be made to this parameter.
* spp_pathmaxrxt - This contains the maximum number of
* retransmissions before this address shall be
* considered unreachable. If a value of zero
* is present in this field then no changes are to
* be made to this parameter.
* spp_pathmtu - When Path MTU discovery is disabled the value
* specified here will be the "fixed" path mtu.
* Note that if the spp_address field is empty
* then all associations on this address will
* have this fixed path mtu set upon them.
*
* spp_sackdelay - When delayed sack is enabled, this value specifies
* the number of milliseconds that sacks will be delayed
* for. This value will apply to all addresses of an
* association if the spp_address field is empty. Note
* also, that if delayed sack is enabled and this
* value is set to 0, no change is made to the last
* recorded delayed sack timer value.
*
* spp_flags - These flags are used to control various features
* on an association. The flag field may contain
* zero or more of the following options.
*
* SPP_HB_ENABLE - Enable heartbeats on the
* specified address. Note that if the address
* field is empty all addresses for the association
* have heartbeats enabled upon them.
*
* SPP_HB_DISABLE - Disable heartbeats on the
* speicifed address. Note that if the address
* field is empty all addresses for the association
* will have their heartbeats disabled. Note also
* that SPP_HB_ENABLE and SPP_HB_DISABLE are
* mutually exclusive, only one of these two should
* be specified. Enabling both fields will have
* undetermined results.
*
* SPP_HB_DEMAND - Request a user initiated heartbeat
* to be made immediately.
*
* SPP_HB_TIME_IS_ZERO - Specify's that the time for
* heartbeat delayis to be set to the value of 0
* milliseconds.
*
* SPP_PMTUD_ENABLE - This field will enable PMTU
* discovery upon the specified address. Note that
* if the address feild is empty then all addresses
* on the association are effected.
*
* SPP_PMTUD_DISABLE - This field will disable PMTU
* discovery upon the specified address. Note that
* if the address feild is empty then all addresses
* on the association are effected. Not also that
* SPP_PMTUD_ENABLE and SPP_PMTUD_DISABLE are mutually
* exclusive. Enabling both will have undetermined
* results.
*
* SPP_SACKDELAY_ENABLE - Setting this flag turns
* on delayed sack. The time specified in spp_sackdelay
* is used to specify the sack delay for this address. Note
* that if spp_address is empty then all addresses will
* enable delayed sack and take on the sack delay
* value specified in spp_sackdelay.
* SPP_SACKDELAY_DISABLE - Setting this flag turns
* off delayed sack. If the spp_address field is blank then
* delayed sack is disabled for the entire association. Note
* also that this field is mutually exclusive to
* SPP_SACKDELAY_ENABLE, setting both will have undefined
* results.
*/
static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
struct sctp_transport *trans,
struct sctp_association *asoc,
struct sctp_sock *sp,
int hb_change,
int pmtud_change,
int sackdelay_change)
{
int error;
if (params->spp_flags & SPP_HB_DEMAND && trans) {
struct net *net = sock_net(trans->asoc->base.sk);
error = sctp_primitive_REQUESTHEARTBEAT(net, trans->asoc, trans);
if (error)
return error;
}
/* Note that unless the spp_flag is set to SPP_HB_ENABLE the value of
* this field is ignored. Note also that a value of zero indicates
* the current setting should be left unchanged.
*/
if (params->spp_flags & SPP_HB_ENABLE) {
/* Re-zero the interval if the SPP_HB_TIME_IS_ZERO is
* set. This lets us use 0 value when this flag
* is set.
*/
if (params->spp_flags & SPP_HB_TIME_IS_ZERO)
params->spp_hbinterval = 0;
if (params->spp_hbinterval ||
(params->spp_flags & SPP_HB_TIME_IS_ZERO)) {
if (trans) {
trans->hbinterval =
msecs_to_jiffies(params->spp_hbinterval);
} else if (asoc) {
asoc->hbinterval =
msecs_to_jiffies(params->spp_hbinterval);
} else {
sp->hbinterval = params->spp_hbinterval;
}
}
}
if (hb_change) {
if (trans) {
trans->param_flags =
(trans->param_flags & ~SPP_HB) | hb_change;
} else if (asoc) {
asoc->param_flags =
(asoc->param_flags & ~SPP_HB) | hb_change;
} else {
sp->param_flags =
(sp->param_flags & ~SPP_HB) | hb_change;
}
}
/* When Path MTU discovery is disabled the value specified here will
* be the "fixed" path mtu (i.e. the value of the spp_flags field must
* include the flag SPP_PMTUD_DISABLE for this field to have any
* effect).
*/
if ((params->spp_flags & SPP_PMTUD_DISABLE) && params->spp_pathmtu) {
if (trans) {
trans->pathmtu = params->spp_pathmtu;
sctp_assoc_sync_pmtu(sctp_opt2sk(sp), asoc);
} else if (asoc) {
asoc->pathmtu = params->spp_pathmtu;
sctp_frag_point(asoc, params->spp_pathmtu);
} else {
sp->pathmtu = params->spp_pathmtu;
}
}
if (pmtud_change) {
if (trans) {
int update = (trans->param_flags & SPP_PMTUD_DISABLE) &&
(params->spp_flags & SPP_PMTUD_ENABLE);
trans->param_flags =
(trans->param_flags & ~SPP_PMTUD) | pmtud_change;
if (update) {
sctp_transport_pmtu(trans, sctp_opt2sk(sp));
sctp_assoc_sync_pmtu(sctp_opt2sk(sp), asoc);
}
} else if (asoc) {
asoc->param_flags =
(asoc->param_flags & ~SPP_PMTUD) | pmtud_change;
} else {
sp->param_flags =
(sp->param_flags & ~SPP_PMTUD) | pmtud_change;
}
}
/* Note that unless the spp_flag is set to SPP_SACKDELAY_ENABLE the
* value of this field is ignored. Note also that a value of zero
* indicates the current setting should be left unchanged.
*/
if ((params->spp_flags & SPP_SACKDELAY_ENABLE) && params->spp_sackdelay) {
if (trans) {
trans->sackdelay =
msecs_to_jiffies(params->spp_sackdelay);
} else if (asoc) {
asoc->sackdelay =
msecs_to_jiffies(params->spp_sackdelay);
} else {
sp->sackdelay = params->spp_sackdelay;
}
}
if (sackdelay_change) {
if (trans) {
trans->param_flags =
(trans->param_flags & ~SPP_SACKDELAY) |
sackdelay_change;
} else if (asoc) {
asoc->param_flags =
(asoc->param_flags & ~SPP_SACKDELAY) |
sackdelay_change;
} else {
sp->param_flags =
(sp->param_flags & ~SPP_SACKDELAY) |
sackdelay_change;
}
}
/* Note that a value of zero indicates the current setting should be
left unchanged.
*/
if (params->spp_pathmaxrxt) {
if (trans) {
trans->pathmaxrxt = params->spp_pathmaxrxt;
} else if (asoc) {
asoc->pathmaxrxt = params->spp_pathmaxrxt;
} else {
sp->pathmaxrxt = params->spp_pathmaxrxt;
}
}
return 0;
}
static int sctp_setsockopt_peer_addr_params(struct sock *sk,
char __user *optval,
unsigned int optlen)
{
struct sctp_paddrparams params;
struct sctp_transport *trans = NULL;
struct sctp_association *asoc = NULL;
struct sctp_sock *sp = sctp_sk(sk);
int error;
int hb_change, pmtud_change, sackdelay_change;
if (optlen != sizeof(struct sctp_paddrparams))
return - EINVAL;
if (copy_from_user(¶ms, optval, optlen))
return -EFAULT;
/* Validate flags and value parameters. */
hb_change = params.spp_flags & SPP_HB;
pmtud_change = params.spp_flags & SPP_PMTUD;
sackdelay_change = params.spp_flags & SPP_SACKDELAY;
if (hb_change == SPP_HB ||
pmtud_change == SPP_PMTUD ||
sackdelay_change == SPP_SACKDELAY ||
params.spp_sackdelay > 500 ||
(params.spp_pathmtu &&
params.spp_pathmtu < SCTP_DEFAULT_MINSEGMENT))
return -EINVAL;
/* If an address other than INADDR_ANY is specified, and
* no transport is found, then the request is invalid.
*/
if (!sctp_is_any(sk, ( union sctp_addr *)¶ms.spp_address)) {
trans = sctp_addr_id2transport(sk, ¶ms.spp_address,
params.spp_assoc_id);
if (!trans)
return -EINVAL;
}
/* Get association, if assoc_id != 0 and the socket is a one
* to many style socket, and an association was not found, then
* the id was invalid.
*/
asoc = sctp_id2assoc(sk, params.spp_assoc_id);
if (!asoc && params.spp_assoc_id && sctp_style(sk, UDP))
return -EINVAL;
/* Heartbeat demand can only be sent on a transport or
* association, but not a socket.
*/
if (params.spp_flags & SPP_HB_DEMAND && !trans && !asoc)
return -EINVAL;
/* Process parameters. */
error = sctp_apply_peer_addr_params(¶ms, trans, asoc, sp,
hb_change, pmtud_change,
sackdelay_change);
if (error)
return error;
/* If changes are for association, also apply parameters to each
* transport.
*/
if (!trans && asoc) {
list_for_each_entry(trans, &asoc->peer.transport_addr_list,
transports) {
sctp_apply_peer_addr_params(¶ms, trans, asoc, sp,
hb_change, pmtud_change,
sackdelay_change);
}
}
return 0;
}
/*
* 7.1.23. Get or set delayed ack timer (SCTP_DELAYED_SACK)
*
* This option will effect the way delayed acks are performed. This
* option allows you to get or set the delayed ack time, in
* milliseconds. It also allows changing the delayed ack frequency.
* Changing the frequency to 1 disables the delayed sack algorithm. If
* the assoc_id is 0, then this sets or gets the endpoints default
* values. If the assoc_id field is non-zero, then the set or get
* effects the specified association for the one to many model (the
* assoc_id field is ignored by the one to one model). Note that if
* sack_delay or sack_freq are 0 when setting this option, then the
* current values will remain unchanged.
*
* struct sctp_sack_info {
* sctp_assoc_t sack_assoc_id;
* uint32_t sack_delay;
* uint32_t sack_freq;
* };
*
* sack_assoc_id - This parameter, indicates which association the user
* is performing an action upon. Note that if this field's value is
* zero then the endpoints default value is changed (effecting future
* associations only).
*
* sack_delay - This parameter contains the number of milliseconds that
* the user is requesting the delayed ACK timer be set to. Note that
* this value is defined in the standard to be between 200 and 500
* milliseconds.
*
* sack_freq - This parameter contains the number of packets that must
* be received before a sack is sent without waiting for the delay
* timer to expire. The default value for this is 2, setting this
* value to 1 will disable the delayed sack algorithm.
*/
static int sctp_setsockopt_delayed_ack(struct sock *sk,
char __user *optval, unsigned int optlen)
{
struct sctp_sack_info params;
struct sctp_transport *trans = NULL;
struct sctp_association *asoc = NULL;
struct sctp_sock *sp = sctp_sk(sk);
if (optlen == sizeof(struct sctp_sack_info)) {
if (copy_from_user(¶ms, optval, optlen))
return -EFAULT;
if (params.sack_delay == 0 && params.sack_freq == 0)
return 0;
} else if (optlen == sizeof(struct sctp_assoc_value)) {
pr_warn("Use of struct sctp_assoc_value in delayed_ack socket option deprecated\n");
pr_warn("Use struct sctp_sack_info instead\n");
if (copy_from_user(¶ms, optval, optlen))
return -EFAULT;
if (params.sack_delay == 0)
params.sack_freq = 1;
else
params.sack_freq = 0;
} else
return - EINVAL;
/* Validate value parameter. */
if (params.sack_delay > 500)
return -EINVAL;
/* Get association, if sack_assoc_id != 0 and the socket is a one
* to many style socket, and an association was not found, then
* the id was invalid.
*/
asoc = sctp_id2assoc(sk, params.sack_assoc_id);
if (!asoc && params.sack_assoc_id && sctp_style(sk, UDP))
return -EINVAL;
if (params.sack_delay) {
if (asoc) {
asoc->sackdelay =
msecs_to_jiffies(params.sack_delay);
asoc->param_flags =
(asoc->param_flags & ~SPP_SACKDELAY) |
SPP_SACKDELAY_ENABLE;
} else {
sp->sackdelay = params.sack_delay;
sp->param_flags =
(sp->param_flags & ~SPP_SACKDELAY) |
SPP_SACKDELAY_ENABLE;
}
}
if (params.sack_freq == 1) {
if (asoc) {
asoc->param_flags =
(asoc->param_flags & ~SPP_SACKDELAY) |
SPP_SACKDELAY_DISABLE;
} else {
sp->param_flags =
(sp->param_flags & ~SPP_SACKDELAY) |
SPP_SACKDELAY_DISABLE;
}
} else if (params.sack_freq > 1) {
if (asoc) {
asoc->sackfreq = params.sack_freq;
asoc->param_flags =
(asoc->param_flags & ~SPP_SACKDELAY) |
SPP_SACKDELAY_ENABLE;
} else {
sp->sackfreq = params.sack_freq;
sp->param_flags =
(sp->param_flags & ~SPP_SACKDELAY) |
SPP_SACKDELAY_ENABLE;
}
}
/* If change is for association, also apply to each transport. */
if (asoc) {
list_for_each_entry(trans, &asoc->peer.transport_addr_list,
transports) {
if (params.sack_delay) {
trans->sackdelay =
msecs_to_jiffies(params.sack_delay);
trans->param_flags =
(trans->param_flags & ~SPP_SACKDELAY) |
SPP_SACKDELAY_ENABLE;
}
if (params.sack_freq == 1) {
trans->param_flags =
(trans->param_flags & ~SPP_SACKDELAY) |
SPP_SACKDELAY_DISABLE;
} else if (params.sack_freq > 1) {
trans->sackfreq = params.sack_freq;
trans->param_flags =
(trans->param_flags & ~SPP_SACKDELAY) |
SPP_SACKDELAY_ENABLE;
}
}
}
return 0;
}
/* 7.1.3 Initialization Parameters (SCTP_INITMSG)
*
* Applications can specify protocol parameters for the default association
* initialization. The option name argument to setsockopt() and getsockopt()
* is SCTP_INITMSG.
*
* Setting initialization parameters is effective only on an unconnected
* socket (for UDP-style sockets only future associations are effected
* by the change). With TCP-style sockets, this option is inherited by
* sockets derived from a listener socket.
*/
static int sctp_setsockopt_initmsg(struct sock *sk, char __user *optval, unsigned int optlen)
{
struct sctp_initmsg sinit;
struct sctp_sock *sp = sctp_sk(sk);
if (optlen != sizeof(struct sctp_initmsg))
return -EINVAL;
if (copy_from_user(&sinit, optval, optlen))
return -EFAULT;
if (sinit.sinit_num_ostreams)
sp->initmsg.sinit_num_ostreams = sinit.sinit_num_ostreams;
if (sinit.sinit_max_instreams)
sp->initmsg.sinit_max_instreams = sinit.sinit_max_instreams;
if (sinit.sinit_max_attempts)
sp->initmsg.sinit_max_attempts = sinit.sinit_max_attempts;
if (sinit.sinit_max_init_timeo)
sp->initmsg.sinit_max_init_timeo = sinit.sinit_max_init_timeo;
return 0;
}
/*
* 7.1.14 Set default send parameters (SCTP_DEFAULT_SEND_PARAM)
*
* Applications that wish to use the sendto() system call may wish to
* specify a default set of parameters that would normally be supplied
* through the inclusion of ancillary data. This socket option allows
* such an application to set the default sctp_sndrcvinfo structure.
* The application that wishes to use this socket option simply passes
* in to this call the sctp_sndrcvinfo structure defined in Section
* 5.2.2) The input parameters accepted by this call include
* sinfo_stream, sinfo_flags, sinfo_ppid, sinfo_context,
* sinfo_timetolive. The user must provide the sinfo_assoc_id field in
* to this call if the caller is using the UDP model.
*/
static int sctp_setsockopt_default_send_param(struct sock *sk,
char __user *optval,
unsigned int optlen)
{
struct sctp_sndrcvinfo info;
struct sctp_association *asoc;
struct sctp_sock *sp = sctp_sk(sk);
if (optlen != sizeof(struct sctp_sndrcvinfo))
return -EINVAL;
if (copy_from_user(&info, optval, optlen))
return -EFAULT;
asoc = sctp_id2assoc(sk, info.sinfo_assoc_id);
if (!asoc && info.sinfo_assoc_id && sctp_style(sk, UDP))
return -EINVAL;
if (asoc) {
asoc->default_stream = info.sinfo_stream;
asoc->default_flags = info.sinfo_flags;
asoc->default_ppid = info.sinfo_ppid;
asoc->default_context = info.sinfo_context;
asoc->default_timetolive = info.sinfo_timetolive;
} else {
sp->default_stream = info.sinfo_stream;
sp->default_flags = info.sinfo_flags;
sp->default_ppid = info.sinfo_ppid;
sp->default_context = info.sinfo_context;
sp->default_timetolive = info.sinfo_timetolive;
}
return 0;
}
/* 7.1.10 Set Primary Address (SCTP_PRIMARY_ADDR)
*
* Requests that the local SCTP stack use the enclosed peer address as
* the association primary. The enclosed address must be one of the
* association peer's addresses.
*/
static int sctp_setsockopt_primary_addr(struct sock *sk, char __user *optval,
unsigned int optlen)
{
struct sctp_prim prim;
struct sctp_transport *trans;
if (optlen != sizeof(struct sctp_prim))
return -EINVAL;
if (copy_from_user(&prim, optval, sizeof(struct sctp_prim)))
return -EFAULT;
trans = sctp_addr_id2transport(sk, &prim.ssp_addr, prim.ssp_assoc_id);
if (!trans)
return -EINVAL;
sctp_assoc_set_primary(trans->asoc, trans);
return 0;
}
/*
* 7.1.5 SCTP_NODELAY
*
* Turn on/off any Nagle-like algorithm. This means that packets are
* generally sent as soon as possible and no unnecessary delays are
* introduced, at the cost of more packets in the network. Expects an
* integer boolean flag.
*/
static int sctp_setsockopt_nodelay(struct sock *sk, char __user *optval,
unsigned int optlen)
{
int val;
if (optlen < sizeof(int))
return -EINVAL;
if (get_user(val, (int __user *)optval))
return -EFAULT;
sctp_sk(sk)->nodelay = (val == 0) ? 0 : 1;
return 0;
}
/*
*
* 7.1.1 SCTP_RTOINFO
*
* The protocol parameters used to initialize and bound retransmission
* timeout (RTO) are tunable. sctp_rtoinfo structure is used to access
* and modify these parameters.
* All parameters are time values, in milliseconds. A value of 0, when
* modifying the parameters, indicates that the current value should not
* be changed.
*
*/
static int sctp_setsockopt_rtoinfo(struct sock *sk, char __user *optval, unsigned int optlen)
{
struct sctp_rtoinfo rtoinfo;
struct sctp_association *asoc;
if (optlen != sizeof (struct sctp_rtoinfo))
return -EINVAL;
if (copy_from_user(&rtoinfo, optval, optlen))
return -EFAULT;
asoc = sctp_id2assoc(sk, rtoinfo.srto_assoc_id);
/* Set the values to the specific association */
if (!asoc && rtoinfo.srto_assoc_id && sctp_style(sk, UDP))
return -EINVAL;
if (asoc) {
if (rtoinfo.srto_initial != 0)
asoc->rto_initial =
msecs_to_jiffies(rtoinfo.srto_initial);
if (rtoinfo.srto_max != 0)
asoc->rto_max = msecs_to_jiffies(rtoinfo.srto_max);
if (rtoinfo.srto_min != 0)
asoc->rto_min = msecs_to_jiffies(rtoinfo.srto_min);
} else {
/* If there is no association or the association-id = 0
* set the values to the endpoint.
*/
struct sctp_sock *sp = sctp_sk(sk);
if (rtoinfo.srto_initial != 0)
sp->rtoinfo.srto_initial = rtoinfo.srto_initial;
if (rtoinfo.srto_max != 0)
sp->rtoinfo.srto_max = rtoinfo.srto_max;
if (rtoinfo.srto_min != 0)
sp->rtoinfo.srto_min = rtoinfo.srto_min;
}
return 0;
}
/*
*
* 7.1.2 SCTP_ASSOCINFO
*
* This option is used to tune the maximum retransmission attempts
* of the association.
* Returns an error if the new association retransmission value is
* greater than the sum of the retransmission value of the peer.
* See [SCTP] for more information.
*
*/
static int sctp_setsockopt_associnfo(struct sock *sk, char __user *optval, unsigned int optlen)
{
struct sctp_assocparams assocparams;
struct sctp_association *asoc;
if (optlen != sizeof(struct sctp_assocparams))
return -EINVAL;
if (copy_from_user(&assocparams, optval, optlen))
return -EFAULT;
asoc = sctp_id2assoc(sk, assocparams.sasoc_assoc_id);
if (!asoc && assocparams.sasoc_assoc_id && sctp_style(sk, UDP))
return -EINVAL;
/* Set the values to the specific association */
if (asoc) {
if (assocparams.sasoc_asocmaxrxt != 0) {
__u32 path_sum = 0;
int paths = 0;
struct sctp_transport *peer_addr;
list_for_each_entry(peer_addr, &asoc->peer.transport_addr_list,
transports) {
path_sum += peer_addr->pathmaxrxt;
paths++;
}
/* Only validate asocmaxrxt if we have more than
* one path/transport. We do this because path
* retransmissions are only counted when we have more
* then one path.
*/
if (paths > 1 &&
assocparams.sasoc_asocmaxrxt > path_sum)
return -EINVAL;
asoc->max_retrans = assocparams.sasoc_asocmaxrxt;
}
if (assocparams.sasoc_cookie_life != 0) {
asoc->cookie_life.tv_sec =
assocparams.sasoc_cookie_life / 1000;
asoc->cookie_life.tv_usec =
(assocparams.sasoc_cookie_life % 1000)
* 1000;
}
} else {
/* Set the values to the endpoint */
struct sctp_sock *sp = sctp_sk(sk);
if (assocparams.sasoc_asocmaxrxt != 0)
sp->assocparams.sasoc_asocmaxrxt =
assocparams.sasoc_asocmaxrxt;
if (assocparams.sasoc_cookie_life != 0)
sp->assocparams.sasoc_cookie_life =
assocparams.sasoc_cookie_life;
}
return 0;
}
/*
* 7.1.16 Set/clear IPv4 mapped addresses (SCTP_I_WANT_MAPPED_V4_ADDR)
*
* This socket option is a boolean flag which turns on or off mapped V4
* addresses. If this option is turned on and the socket is type
* PF_INET6, then IPv4 addresses will be mapped to V6 representation.
* If this option is turned off, then no mapping will be done of V4
* addresses and a user will receive both PF_INET6 and PF_INET type
* addresses on the socket.
*/
static int sctp_setsockopt_mappedv4(struct sock *sk, char __user *optval, unsigned int optlen)
{
int val;
struct sctp_sock *sp = sctp_sk(sk);
if (optlen < sizeof(int))
return -EINVAL;
if (get_user(val, (int __user *)optval))
return -EFAULT;
if (val)
sp->v4mapped = 1;
else
sp->v4mapped = 0;
return 0;
}
/*
* 8.1.16. Get or Set the Maximum Fragmentation Size (SCTP_MAXSEG)
* This option will get or set the maximum size to put in any outgoing
* SCTP DATA chunk. If a message is larger than this size it will be
* fragmented by SCTP into the specified size. Note that the underlying
* SCTP implementation may fragment into smaller sized chunks when the
* PMTU of the underlying association is smaller than the value set by
* the user. The default value for this option is '0' which indicates
* the user is NOT limiting fragmentation and only the PMTU will effect
* SCTP's choice of DATA chunk size. Note also that values set larger
* than the maximum size of an IP datagram will effectively let SCTP
* control fragmentation (i.e. the same as setting this option to 0).
*
* The following structure is used to access and modify this parameter:
*
* struct sctp_assoc_value {
* sctp_assoc_t assoc_id;
* uint32_t assoc_value;
* };
*
* assoc_id: This parameter is ignored for one-to-one style sockets.
* For one-to-many style sockets this parameter indicates which
* association the user is performing an action upon. Note that if
* this field's value is zero then the endpoints default value is
* changed (effecting future associations only).
* assoc_value: This parameter specifies the maximum size in bytes.
*/
static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, unsigned int optlen)
{
struct sctp_assoc_value params;
struct sctp_association *asoc;
struct sctp_sock *sp = sctp_sk(sk);
int val;
if (optlen == sizeof(int)) {
pr_warn("Use of int in maxseg socket option deprecated\n");
pr_warn("Use struct sctp_assoc_value instead\n");
if (copy_from_user(&val, optval, optlen))
return -EFAULT;
params.assoc_id = 0;
} else if (optlen == sizeof(struct sctp_assoc_value)) {
if (copy_from_user(¶ms, optval, optlen))
return -EFAULT;
val = params.assoc_value;
} else
return -EINVAL;
if ((val != 0) && ((val < 8) || (val > SCTP_MAX_CHUNK_LEN)))
return -EINVAL;
asoc = sctp_id2assoc(sk, params.assoc_id);
if (!asoc && params.assoc_id && sctp_style(sk, UDP))
return -EINVAL;
if (asoc) {
if (val == 0) {
val = asoc->pathmtu;
val -= sp->pf->af->net_header_len;
val -= sizeof(struct sctphdr) +
sizeof(struct sctp_data_chunk);
}
asoc->user_frag = val;
asoc->frag_point = sctp_frag_point(asoc, asoc->pathmtu);
} else {
sp->user_frag = val;
}
return 0;
}
/*
* 7.1.9 Set Peer Primary Address (SCTP_SET_PEER_PRIMARY_ADDR)
*
* Requests that the peer mark the enclosed address as the association
* primary. The enclosed address must be one of the association's
* locally bound addresses. The following structure is used to make a
* set primary request:
*/
static int sctp_setsockopt_peer_primary_addr(struct sock *sk, char __user *optval,
unsigned int optlen)
{
struct net *net = sock_net(sk);
struct sctp_sock *sp;
struct sctp_association *asoc = NULL;
struct sctp_setpeerprim prim;
struct sctp_chunk *chunk;
struct sctp_af *af;
int err;
sp = sctp_sk(sk);
if (!net->sctp.addip_enable)
return -EPERM;
if (optlen != sizeof(struct sctp_setpeerprim))
return -EINVAL;
if (copy_from_user(&prim, optval, optlen))
return -EFAULT;
asoc = sctp_id2assoc(sk, prim.sspp_assoc_id);
if (!asoc)
return -EINVAL;
if (!asoc->peer.asconf_capable)
return -EPERM;
if (asoc->peer.addip_disabled_mask & SCTP_PARAM_SET_PRIMARY)
return -EPERM;
if (!sctp_state(asoc, ESTABLISHED))
return -ENOTCONN;
af = sctp_get_af_specific(prim.sspp_addr.ss_family);
if (!af)
return -EINVAL;
if (!af->addr_valid((union sctp_addr *)&prim.sspp_addr, sp, NULL))
return -EADDRNOTAVAIL;
if (!sctp_assoc_lookup_laddr(asoc, (union sctp_addr *)&prim.sspp_addr))
return -EADDRNOTAVAIL;
/* Create an ASCONF chunk with SET_PRIMARY parameter */
chunk = sctp_make_asconf_set_prim(asoc,
(union sctp_addr *)&prim.sspp_addr);
if (!chunk)
return -ENOMEM;
err = sctp_send_asconf(asoc, chunk);
SCTP_DEBUG_PRINTK("We set peer primary addr primitively.\n");
return err;
}
static int sctp_setsockopt_adaptation_layer(struct sock *sk, char __user *optval,
unsigned int optlen)
{
struct sctp_setadaptation adaptation;
if (optlen != sizeof(struct sctp_setadaptation))
return -EINVAL;
if (copy_from_user(&adaptation, optval, optlen))
return -EFAULT;
sctp_sk(sk)->adaptation_ind = adaptation.ssb_adaptation_ind;
return 0;
}
/*
* 7.1.29. Set or Get the default context (SCTP_CONTEXT)
*
* The context field in the sctp_sndrcvinfo structure is normally only
* used when a failed message is retrieved holding the value that was
* sent down on the actual send call. This option allows the setting of
* a default context on an association basis that will be received on
* reading messages from the peer. This is especially helpful in the
* one-2-many model for an application to keep some reference to an
* internal state machine that is processing messages on the
* association. Note that the setting of this value only effects
* received messages from the peer and does not effect the value that is
* saved with outbound messages.
*/
static int sctp_setsockopt_context(struct sock *sk, char __user *optval,
unsigned int optlen)
{
struct sctp_assoc_value params;
struct sctp_sock *sp;
struct sctp_association *asoc;
if (optlen != sizeof(struct sctp_assoc_value))
return -EINVAL;
if (copy_from_user(¶ms, optval, optlen))
return -EFAULT;
sp = sctp_sk(sk);
if (params.assoc_id != 0) {
asoc = sctp_id2assoc(sk, params.assoc_id);
if (!asoc)
return -EINVAL;
asoc->default_rcv_context = params.assoc_value;
} else {
sp->default_rcv_context = params.assoc_value;
}
return 0;
}
/*
* 7.1.24. Get or set fragmented interleave (SCTP_FRAGMENT_INTERLEAVE)
*
* This options will at a minimum specify if the implementation is doing
* fragmented interleave. Fragmented interleave, for a one to many
* socket, is when subsequent calls to receive a message may return
* parts of messages from different associations. Some implementations
* may allow you to turn this value on or off. If so, when turned off,
* no fragment interleave will occur (which will cause a head of line
* blocking amongst multiple associations sharing the same one to many
* socket). When this option is turned on, then each receive call may
* come from a different association (thus the user must receive data
* with the extended calls (e.g. sctp_recvmsg) to keep track of which
* association each receive belongs to.
*
* This option takes a boolean value. A non-zero value indicates that
* fragmented interleave is on. A value of zero indicates that
* fragmented interleave is off.
*
* Note that it is important that an implementation that allows this
* option to be turned on, have it off by default. Otherwise an unaware
* application using the one to many model may become confused and act
* incorrectly.
*/
static int sctp_setsockopt_fragment_interleave(struct sock *sk,
char __user *optval,
unsigned int optlen)
{
int val;
if (optlen != sizeof(int))
return -EINVAL;
if (get_user(val, (int __user *)optval))
return -EFAULT;
sctp_sk(sk)->frag_interleave = (val == 0) ? 0 : 1;
return 0;
}
/*
* 8.1.21. Set or Get the SCTP Partial Delivery Point
* (SCTP_PARTIAL_DELIVERY_POINT)
*
* This option will set or get the SCTP partial delivery point. This
* point is the size of a message where the partial delivery API will be
* invoked to help free up rwnd space for the peer. Setting this to a
* lower value will cause partial deliveries to happen more often. The
* calls argument is an integer that sets or gets the partial delivery
* point. Note also that the call will fail if the user attempts to set
* this value larger than the socket receive buffer size.
*
* Note that any single message having a length smaller than or equal to
* the SCTP partial delivery point will be delivered in one single read
* call as long as the user provided buffer is large enough to hold the
* message.
*/
static int sctp_setsockopt_partial_delivery_point(struct sock *sk,
char __user *optval,
unsigned int optlen)
{
u32 val;
if (optlen != sizeof(u32))
return -EINVAL;
if (get_user(val, (int __user *)optval))
return -EFAULT;
/* Note: We double the receive buffer from what the user sets
* it to be, also initial rwnd is based on rcvbuf/2.
*/
if (val > (sk->sk_rcvbuf >> 1))
return -EINVAL;
sctp_sk(sk)->pd_point = val;
return 0; /* is this the right error code? */
}
/*
* 7.1.28. Set or Get the maximum burst (SCTP_MAX_BURST)
*
* This option will allow a user to change the maximum burst of packets
* that can be emitted by this association. Note that the default value
* is 4, and some implementations may restrict this setting so that it
* can only be lowered.
*
* NOTE: This text doesn't seem right. Do this on a socket basis with
* future associations inheriting the socket value.
*/
static int sctp_setsockopt_maxburst(struct sock *sk,
char __user *optval,
unsigned int optlen)
{
struct sctp_assoc_value params;
struct sctp_sock *sp;
struct sctp_association *asoc;
int val;
int assoc_id = 0;
if (optlen == sizeof(int)) {
pr_warn("Use of int in max_burst socket option deprecated\n");
pr_warn("Use struct sctp_assoc_value instead\n");
if (copy_from_user(&val, optval, optlen))
return -EFAULT;
} else if (optlen == sizeof(struct sctp_assoc_value)) {
if (copy_from_user(¶ms, optval, optlen))
return -EFAULT;
val = params.assoc_value;
assoc_id = params.assoc_id;
} else
return -EINVAL;
sp = sctp_sk(sk);
if (assoc_id != 0) {
asoc = sctp_id2assoc(sk, assoc_id);
if (!asoc)
return -EINVAL;
asoc->max_burst = val;
} else
sp->max_burst = val;
return 0;
}
/*
* 7.1.18. Add a chunk that must be authenticated (SCTP_AUTH_CHUNK)
*
* This set option adds a chunk type that the user is requesting to be
* received only in an authenticated way. Changes to the list of chunks
* will only effect future associations on the socket.
*/
static int sctp_setsockopt_auth_chunk(struct sock *sk,
char __user *optval,
unsigned int optlen)
{
struct net *net = sock_net(sk);
struct sctp_authchunk val;
if (!net->sctp.auth_enable)
return -EACCES;
if (optlen != sizeof(struct sctp_authchunk))
return -EINVAL;
if (copy_from_user(&val, optval, optlen))
return -EFAULT;
switch (val.sauth_chunk) {
case SCTP_CID_INIT:
case SCTP_CID_INIT_ACK:
case SCTP_CID_SHUTDOWN_COMPLETE:
case SCTP_CID_AUTH:
return -EINVAL;
}
/* add this chunk id to the endpoint */
return sctp_auth_ep_add_chunkid(sctp_sk(sk)->ep, val.sauth_chunk);
}
/*
* 7.1.19. Get or set the list of supported HMAC Identifiers (SCTP_HMAC_IDENT)
*
* This option gets or sets the list of HMAC algorithms that the local
* endpoint requires the peer to use.
*/
static int sctp_setsockopt_hmac_ident(struct sock *sk,
char __user *optval,
unsigned int optlen)
{
struct net *net = sock_net(sk);
struct sctp_hmacalgo *hmacs;
u32 idents;
int err;
if (!net->sctp.auth_enable)
return -EACCES;
if (optlen < sizeof(struct sctp_hmacalgo))
return -EINVAL;
hmacs= memdup_user(optval, optlen);
if (IS_ERR(hmacs))
return PTR_ERR(hmacs);
idents = hmacs->shmac_num_idents;
if (idents == 0 || idents > SCTP_AUTH_NUM_HMACS ||
(idents * sizeof(u16)) > (optlen - sizeof(struct sctp_hmacalgo))) {
err = -EINVAL;
goto out;
}
err = sctp_auth_ep_set_hmacs(sctp_sk(sk)->ep, hmacs);
out:
kfree(hmacs);
return err;
}
/*
* 7.1.20. Set a shared key (SCTP_AUTH_KEY)
*
* This option will set a shared secret key which is used to build an
* association shared key.
*/
static int sctp_setsockopt_auth_key(struct sock *sk,
char __user *optval,
unsigned int optlen)
{
struct net *net = sock_net(sk);
struct sctp_authkey *authkey;
struct sctp_association *asoc;
int ret;
if (!net->sctp.auth_enable)
return -EACCES;
if (optlen <= sizeof(struct sctp_authkey))
return -EINVAL;
authkey= memdup_user(optval, optlen);
if (IS_ERR(authkey))
return PTR_ERR(authkey);
if (authkey->sca_keylength > optlen - sizeof(struct sctp_authkey)) {
ret = -EINVAL;
goto out;
}
asoc = sctp_id2assoc(sk, authkey->sca_assoc_id);
if (!asoc && authkey->sca_assoc_id && sctp_style(sk, UDP)) {
ret = -EINVAL;
goto out;
}
ret = sctp_auth_set_key(sctp_sk(sk)->ep, asoc, authkey);
out:
kzfree(authkey);
return ret;
}
/*
* 7.1.21. Get or set the active shared key (SCTP_AUTH_ACTIVE_KEY)
*
* This option will get or set the active shared key to be used to build
* the association shared key.
*/
static int sctp_setsockopt_active_key(struct sock *sk,
char __user *optval,
unsigned int optlen)
{
struct net *net = sock_net(sk);
struct sctp_authkeyid val;
struct sctp_association *asoc;
if (!net->sctp.auth_enable)
return -EACCES;
if (optlen != sizeof(struct sctp_authkeyid))
return -EINVAL;
if (copy_from_user(&val, optval, optlen))
return -EFAULT;
asoc = sctp_id2assoc(sk, val.scact_assoc_id);
if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP))
return -EINVAL;
return sctp_auth_set_active_key(sctp_sk(sk)->ep, asoc,
val.scact_keynumber);
}
/*
* 7.1.22. Delete a shared key (SCTP_AUTH_DELETE_KEY)
*
* This set option will delete a shared secret key from use.
*/
static int sctp_setsockopt_del_key(struct sock *sk,
char __user *optval,
unsigned int optlen)
{
struct net *net = sock_net(sk);
struct sctp_authkeyid val;
struct sctp_association *asoc;
if (!net->sctp.auth_enable)
return -EACCES;
if (optlen != sizeof(struct sctp_authkeyid))
return -EINVAL;
if (copy_from_user(&val, optval, optlen))
return -EFAULT;
asoc = sctp_id2assoc(sk, val.scact_assoc_id);
if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP))
return -EINVAL;
return sctp_auth_del_key_id(sctp_sk(sk)->ep, asoc,
val.scact_keynumber);
}
/*
* 8.1.23 SCTP_AUTO_ASCONF
*
* This option will enable or disable the use of the automatic generation of
* ASCONF chunks to add and delete addresses to an existing association. Note
* that this option has two caveats namely: a) it only affects sockets that
* are bound to all addresses available to the SCTP stack, and b) the system
* administrator may have an overriding control that turns the ASCONF feature
* off no matter what setting the socket option may have.
* This option expects an integer boolean flag, where a non-zero value turns on
* the option, and a zero value turns off the option.
* Note. In this implementation, socket operation overrides default parameter
* being set by sysctl as well as FreeBSD implementation
*/
static int sctp_setsockopt_auto_asconf(struct sock *sk, char __user *optval,
unsigned int optlen)
{
int val;
struct sctp_sock *sp = sctp_sk(sk);
if (optlen < sizeof(int))
return -EINVAL;
if (get_user(val, (int __user *)optval))
return -EFAULT;
if (!sctp_is_ep_boundall(sk) && val)
return -EINVAL;
if ((val && sp->do_auto_asconf) || (!val && !sp->do_auto_asconf))
return 0;
if (val == 0 && sp->do_auto_asconf) {
list_del(&sp->auto_asconf_list);
sp->do_auto_asconf = 0;
} else if (val && !sp->do_auto_asconf) {
list_add_tail(&sp->auto_asconf_list,
&sock_net(sk)->sctp.auto_asconf_splist);
sp->do_auto_asconf = 1;
}
return 0;
}
/*
* SCTP_PEER_ADDR_THLDS
*
* This option allows us to alter the partially failed threshold for one or all
* transports in an association. See Section 6.1 of:
* http://www.ietf.org/id/draft-nishida-tsvwg-sctp-failover-05.txt
*/
static int sctp_setsockopt_paddr_thresholds(struct sock *sk,
char __user *optval,
unsigned int optlen)
{
struct sctp_paddrthlds val;
struct sctp_transport *trans;
struct sctp_association *asoc;
if (optlen < sizeof(struct sctp_paddrthlds))
return -EINVAL;
if (copy_from_user(&val, (struct sctp_paddrthlds __user *)optval,
sizeof(struct sctp_paddrthlds)))
return -EFAULT;
if (sctp_is_any(sk, (const union sctp_addr *)&val.spt_address)) {
asoc = sctp_id2assoc(sk, val.spt_assoc_id);
if (!asoc)
return -ENOENT;
list_for_each_entry(trans, &asoc->peer.transport_addr_list,
transports) {
if (val.spt_pathmaxrxt)
trans->pathmaxrxt = val.spt_pathmaxrxt;
trans->pf_retrans = val.spt_pathpfthld;
}
if (val.spt_pathmaxrxt)
asoc->pathmaxrxt = val.spt_pathmaxrxt;
asoc->pf_retrans = val.spt_pathpfthld;
} else {
trans = sctp_addr_id2transport(sk, &val.spt_address,
val.spt_assoc_id);
if (!trans)
return -ENOENT;
if (val.spt_pathmaxrxt)
trans->pathmaxrxt = val.spt_pathmaxrxt;
trans->pf_retrans = val.spt_pathpfthld;
}
return 0;
}
/* API 6.2 setsockopt(), getsockopt()
*
* Applications use setsockopt() and getsockopt() to set or retrieve
* socket options. Socket options are used to change the default
* behavior of sockets calls. They are described in Section 7.
*
* The syntax is:
*
* ret = getsockopt(int sd, int level, int optname, void __user *optval,
* int __user *optlen);
* ret = setsockopt(int sd, int level, int optname, const void __user *optval,
* int optlen);
*
* sd - the socket descript.
* level - set to IPPROTO_SCTP for all SCTP options.
* optname - the option name.
* optval - the buffer to store the value of the option.
* optlen - the size of the buffer.
*/
SCTP_STATIC int sctp_setsockopt(struct sock *sk, int level, int optname,
char __user *optval, unsigned int optlen)
{
int retval = 0;
SCTP_DEBUG_PRINTK("sctp_setsockopt(sk: %p... optname: %d)\n",
sk, optname);
/* I can hardly begin to describe how wrong this is. This is
* so broken as to be worse than useless. The API draft
* REALLY is NOT helpful here... I am not convinced that the
* semantics of setsockopt() with a level OTHER THAN SOL_SCTP
* are at all well-founded.
*/
if (level != SOL_SCTP) {
struct sctp_af *af = sctp_sk(sk)->pf->af;
retval = af->setsockopt(sk, level, optname, optval, optlen);
goto out_nounlock;
}
sctp_lock_sock(sk);
switch (optname) {
case SCTP_SOCKOPT_BINDX_ADD:
/* 'optlen' is the size of the addresses buffer. */
retval = sctp_setsockopt_bindx(sk, (struct sockaddr __user *)optval,
optlen, SCTP_BINDX_ADD_ADDR);
break;
case SCTP_SOCKOPT_BINDX_REM:
/* 'optlen' is the size of the addresses buffer. */
retval = sctp_setsockopt_bindx(sk, (struct sockaddr __user *)optval,
optlen, SCTP_BINDX_REM_ADDR);
break;
case SCTP_SOCKOPT_CONNECTX_OLD:
/* 'optlen' is the size of the addresses buffer. */
retval = sctp_setsockopt_connectx_old(sk,
(struct sockaddr __user *)optval,
optlen);
break;
case SCTP_SOCKOPT_CONNECTX:
/* 'optlen' is the size of the addresses buffer. */
retval = sctp_setsockopt_connectx(sk,
(struct sockaddr __user *)optval,
optlen);
break;
case SCTP_DISABLE_FRAGMENTS:
retval = sctp_setsockopt_disable_fragments(sk, optval, optlen);
break;
case SCTP_EVENTS:
retval = sctp_setsockopt_events(sk, optval, optlen);
break;
case SCTP_AUTOCLOSE:
retval = sctp_setsockopt_autoclose(sk, optval, optlen);
break;
case SCTP_PEER_ADDR_PARAMS:
retval = sctp_setsockopt_peer_addr_params(sk, optval, optlen);
break;
case SCTP_DELAYED_SACK:
retval = sctp_setsockopt_delayed_ack(sk, optval, optlen);
break;
case SCTP_PARTIAL_DELIVERY_POINT:
retval = sctp_setsockopt_partial_delivery_point(sk, optval, optlen);
break;
case SCTP_INITMSG:
retval = sctp_setsockopt_initmsg(sk, optval, optlen);
break;
case SCTP_DEFAULT_SEND_PARAM:
retval = sctp_setsockopt_default_send_param(sk, optval,
optlen);
break;
case SCTP_PRIMARY_ADDR:
retval = sctp_setsockopt_primary_addr(sk, optval, optlen);
break;
case SCTP_SET_PEER_PRIMARY_ADDR:
retval = sctp_setsockopt_peer_primary_addr(sk, optval, optlen);
break;
case SCTP_NODELAY:
retval = sctp_setsockopt_nodelay(sk, optval, optlen);
break;
case SCTP_RTOINFO:
retval = sctp_setsockopt_rtoinfo(sk, optval, optlen);
break;
case SCTP_ASSOCINFO:
retval = sctp_setsockopt_associnfo(sk, optval, optlen);
break;
case SCTP_I_WANT_MAPPED_V4_ADDR:
retval = sctp_setsockopt_mappedv4(sk, optval, optlen);
break;
case SCTP_MAXSEG:
retval = sctp_setsockopt_maxseg(sk, optval, optlen);
break;
case SCTP_ADAPTATION_LAYER:
retval = sctp_setsockopt_adaptation_layer(sk, optval, optlen);
break;
case SCTP_CONTEXT:
retval = sctp_setsockopt_context(sk, optval, optlen);
break;
case SCTP_FRAGMENT_INTERLEAVE:
retval = sctp_setsockopt_fragment_interleave(sk, optval, optlen);
break;
case SCTP_MAX_BURST:
retval = sctp_setsockopt_maxburst(sk, optval, optlen);
break;
case SCTP_AUTH_CHUNK:
retval = sctp_setsockopt_auth_chunk(sk, optval, optlen);
break;
case SCTP_HMAC_IDENT:
retval = sctp_setsockopt_hmac_ident(sk, optval, optlen);
break;
case SCTP_AUTH_KEY:
retval = sctp_setsockopt_auth_key(sk, optval, optlen);
break;
case SCTP_AUTH_ACTIVE_KEY:
retval = sctp_setsockopt_active_key(sk, optval, optlen);
break;
case SCTP_AUTH_DELETE_KEY:
retval = sctp_setsockopt_del_key(sk, optval, optlen);
break;
case SCTP_AUTO_ASCONF:
retval = sctp_setsockopt_auto_asconf(sk, optval, optlen);
break;
case SCTP_PEER_ADDR_THLDS:
retval = sctp_setsockopt_paddr_thresholds(sk, optval, optlen);
break;
default:
retval = -ENOPROTOOPT;
break;
}
sctp_release_sock(sk);
out_nounlock:
return retval;
}
/* API 3.1.6 connect() - UDP Style Syntax
*
* An application may use the connect() call in the UDP model to initiate an
* association without sending data.
*
* The syntax is:
*
* ret = connect(int sd, const struct sockaddr *nam, socklen_t len);
*
* sd: the socket descriptor to have a new association added to.
*
* nam: the address structure (either struct sockaddr_in or struct
* sockaddr_in6 defined in RFC2553 [7]).
*
* len: the size of the address.
*/
SCTP_STATIC int sctp_connect(struct sock *sk, struct sockaddr *addr,
int addr_len)
{
int err = 0;
struct sctp_af *af;
sctp_lock_sock(sk);
SCTP_DEBUG_PRINTK("%s - sk: %p, sockaddr: %p, addr_len: %d\n",
__func__, sk, addr, addr_len);
/* Validate addr_len before calling common connect/connectx routine. */
af = sctp_get_af_specific(addr->sa_family);
if (!af || addr_len < af->sockaddr_len) {
err = -EINVAL;
} else {
/* Pass correct addr len to common routine (so it knows there
* is only one address being passed.
*/
err = __sctp_connect(sk, addr, af->sockaddr_len, NULL);
}
sctp_release_sock(sk);
return err;
}
/* FIXME: Write comments. */
SCTP_STATIC int sctp_disconnect(struct sock *sk, int flags)
{
return -EOPNOTSUPP; /* STUB */
}
/* 4.1.4 accept() - TCP Style Syntax
*
* Applications use accept() call to remove an established SCTP
* association from the accept queue of the endpoint. A new socket
* descriptor will be returned from accept() to represent the newly
* formed association.
*/
SCTP_STATIC struct sock *sctp_accept(struct sock *sk, int flags, int *err)
{
struct sctp_sock *sp;
struct sctp_endpoint *ep;
struct sock *newsk = NULL;
struct sctp_association *asoc;
long timeo;
int error = 0;
sctp_lock_sock(sk);
sp = sctp_sk(sk);
ep = sp->ep;
if (!sctp_style(sk, TCP)) {
error = -EOPNOTSUPP;
goto out;
}
if (!sctp_sstate(sk, LISTENING)) {
error = -EINVAL;
goto out;
}
timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
error = sctp_wait_for_accept(sk, timeo);
if (error)
goto out;
/* We treat the list of associations on the endpoint as the accept
* queue and pick the first association on the list.
*/
asoc = list_entry(ep->asocs.next, struct sctp_association, asocs);
newsk = sp->pf->create_accept_sk(sk, asoc);
if (!newsk) {
error = -ENOMEM;
goto out;
}
/* Populate the fields of the newsk from the oldsk and migrate the
* asoc to the newsk.
*/
sctp_sock_migrate(sk, newsk, asoc, SCTP_SOCKET_TCP);
out:
sctp_release_sock(sk);
*err = error;
return newsk;
}
/* The SCTP ioctl handler. */
SCTP_STATIC int sctp_ioctl(struct sock *sk, int cmd, unsigned long arg)
{
int rc = -ENOTCONN;
sctp_lock_sock(sk);
/*
* SEQPACKET-style sockets in LISTENING state are valid, for
* SCTP, so only discard TCP-style sockets in LISTENING state.
*/
if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))
goto out;
switch (cmd) {
case SIOCINQ: {
struct sk_buff *skb;
unsigned int amount = 0;
skb = skb_peek(&sk->sk_receive_queue);
if (skb != NULL) {
/*
* We will only return the amount of this packet since
* that is all that will be read.
*/
amount = skb->len;
}
rc = put_user(amount, (int __user *)arg);
break;
}
default:
rc = -ENOIOCTLCMD;
break;
}
out:
sctp_release_sock(sk);
return rc;
}
/* This is the function which gets called during socket creation to
* initialized the SCTP-specific portion of the sock.
* The sock structure should already be zero-filled memory.
*/
SCTP_STATIC int sctp_init_sock(struct sock *sk)
{
struct net *net = sock_net(sk);
struct sctp_endpoint *ep;
struct sctp_sock *sp;
SCTP_DEBUG_PRINTK("sctp_init_sock(sk: %p)\n", sk);
sp = sctp_sk(sk);
/* Initialize the SCTP per socket area. */
switch (sk->sk_type) {
case SOCK_SEQPACKET:
sp->type = SCTP_SOCKET_UDP;
break;
case SOCK_STREAM:
sp->type = SCTP_SOCKET_TCP;
break;
default:
return -ESOCKTNOSUPPORT;
}
/* Initialize default send parameters. These parameters can be
* modified with the SCTP_DEFAULT_SEND_PARAM socket option.
*/
sp->default_stream = 0;
sp->default_ppid = 0;
sp->default_flags = 0;
sp->default_context = 0;
sp->default_timetolive = 0;
sp->default_rcv_context = 0;
sp->max_burst = net->sctp.max_burst;
sp->sctp_hmac_alg = net->sctp.sctp_hmac_alg;
/* Initialize default setup parameters. These parameters
* can be modified with the SCTP_INITMSG socket option or
* overridden by the SCTP_INIT CMSG.
*/
sp->initmsg.sinit_num_ostreams = sctp_max_outstreams;
sp->initmsg.sinit_max_instreams = sctp_max_instreams;
sp->initmsg.sinit_max_attempts = net->sctp.max_retrans_init;
sp->initmsg.sinit_max_init_timeo = net->sctp.rto_max;
/* Initialize default RTO related parameters. These parameters can
* be modified for with the SCTP_RTOINFO socket option.
*/
sp->rtoinfo.srto_initial = net->sctp.rto_initial;
sp->rtoinfo.srto_max = net->sctp.rto_max;
sp->rtoinfo.srto_min = net->sctp.rto_min;
/* Initialize default association related parameters. These parameters
* can be modified with the SCTP_ASSOCINFO socket option.
*/
sp->assocparams.sasoc_asocmaxrxt = net->sctp.max_retrans_association;
sp->assocparams.sasoc_number_peer_destinations = 0;
sp->assocparams.sasoc_peer_rwnd = 0;
sp->assocparams.sasoc_local_rwnd = 0;
sp->assocparams.sasoc_cookie_life = net->sctp.valid_cookie_life;
/* Initialize default event subscriptions. By default, all the
* options are off.
*/
memset(&sp->subscribe, 0, sizeof(struct sctp_event_subscribe));
/* Default Peer Address Parameters. These defaults can
* be modified via SCTP_PEER_ADDR_PARAMS
*/
sp->hbinterval = net->sctp.hb_interval;
sp->pathmaxrxt = net->sctp.max_retrans_path;
sp->pathmtu = 0; // allow default discovery
sp->sackdelay = net->sctp.sack_timeout;
sp->sackfreq = 2;
sp->param_flags = SPP_HB_ENABLE |
SPP_PMTUD_ENABLE |
SPP_SACKDELAY_ENABLE;
/* If enabled no SCTP message fragmentation will be performed.
* Configure through SCTP_DISABLE_FRAGMENTS socket option.
*/
sp->disable_fragments = 0;
/* Enable Nagle algorithm by default. */
sp->nodelay = 0;
/* Enable by default. */
sp->v4mapped = 1;
/* Auto-close idle associations after the configured
* number of seconds. A value of 0 disables this
* feature. Configure through the SCTP_AUTOCLOSE socket option,
* for UDP-style sockets only.
*/
sp->autoclose = 0;
/* User specified fragmentation limit. */
sp->user_frag = 0;
sp->adaptation_ind = 0;
sp->pf = sctp_get_pf_specific(sk->sk_family);
/* Control variables for partial data delivery. */
atomic_set(&sp->pd_mode, 0);
skb_queue_head_init(&sp->pd_lobby);
sp->frag_interleave = 0;
/* Create a per socket endpoint structure. Even if we
* change the data structure relationships, this may still
* be useful for storing pre-connect address information.
*/
ep = sctp_endpoint_new(sk, GFP_KERNEL);
if (!ep)
return -ENOMEM;
sp->ep = ep;
sp->hmac = NULL;
SCTP_DBG_OBJCNT_INC(sock);
local_bh_disable();
percpu_counter_inc(&sctp_sockets_allocated);
sock_prot_inuse_add(net, sk->sk_prot, 1);
if (net->sctp.default_auto_asconf) {
list_add_tail(&sp->auto_asconf_list,
&net->sctp.auto_asconf_splist);
sp->do_auto_asconf = 1;
} else
sp->do_auto_asconf = 0;
local_bh_enable();
return 0;
}
/* Cleanup any SCTP per socket resources. */
SCTP_STATIC void sctp_destroy_sock(struct sock *sk)
{
struct sctp_sock *sp;
SCTP_DEBUG_PRINTK("sctp_destroy_sock(sk: %p)\n", sk);
/* Release our hold on the endpoint. */
sp = sctp_sk(sk);
if (sp->do_auto_asconf) {
sp->do_auto_asconf = 0;
list_del(&sp->auto_asconf_list);
}
sctp_endpoint_free(sp->ep);
local_bh_disable();
percpu_counter_dec(&sctp_sockets_allocated);
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
local_bh_enable();
}
/* API 4.1.7 shutdown() - TCP Style Syntax
* int shutdown(int socket, int how);
*
* sd - the socket descriptor of the association to be closed.
* how - Specifies the type of shutdown. The values are
* as follows:
* SHUT_RD
* Disables further receive operations. No SCTP
* protocol action is taken.
* SHUT_WR
* Disables further send operations, and initiates
* the SCTP shutdown sequence.
* SHUT_RDWR
* Disables further send and receive operations
* and initiates the SCTP shutdown sequence.
*/
SCTP_STATIC void sctp_shutdown(struct sock *sk, int how)
{
struct net *net = sock_net(sk);
struct sctp_endpoint *ep;
struct sctp_association *asoc;
if (!sctp_style(sk, TCP))
return;
if (how & SEND_SHUTDOWN) {
ep = sctp_sk(sk)->ep;
if (!list_empty(&ep->asocs)) {
asoc = list_entry(ep->asocs.next,
struct sctp_association, asocs);
sctp_primitive_SHUTDOWN(net, asoc, NULL);
}
}
}
/* 7.2.1 Association Status (SCTP_STATUS)
* Applications can retrieve current status information about an
* association, including association state, peer receiver window size,
* number of unacked data chunks, and number of data chunks pending
* receipt. This information is read-only.
*/
static int sctp_getsockopt_sctp_status(struct sock *sk, int len,
char __user *optval,
int __user *optlen)
{
struct sctp_status status;
struct sctp_association *asoc = NULL;
struct sctp_transport *transport;
sctp_assoc_t associd;
int retval = 0;
if (len < sizeof(status)) {
retval = -EINVAL;
goto out;
}
len = sizeof(status);
if (copy_from_user(&status, optval, len)) {
retval = -EFAULT;
goto out;
}
associd = status.sstat_assoc_id;
asoc = sctp_id2assoc(sk, associd);
if (!asoc) {
retval = -EINVAL;
goto out;
}
transport = asoc->peer.primary_path;
status.sstat_assoc_id = sctp_assoc2id(asoc);
status.sstat_state = asoc->state;
status.sstat_rwnd = asoc->peer.rwnd;
status.sstat_unackdata = asoc->unack_data;
status.sstat_penddata = sctp_tsnmap_pending(&asoc->peer.tsn_map);
status.sstat_instrms = asoc->c.sinit_max_instreams;
status.sstat_outstrms = asoc->c.sinit_num_ostreams;
status.sstat_fragmentation_point = asoc->frag_point;
status.sstat_primary.spinfo_assoc_id = sctp_assoc2id(transport->asoc);
memcpy(&status.sstat_primary.spinfo_address, &transport->ipaddr,
transport->af_specific->sockaddr_len);
/* Map ipv4 address into v4-mapped-on-v6 address. */
sctp_get_pf_specific(sk->sk_family)->addr_v4map(sctp_sk(sk),
(union sctp_addr *)&status.sstat_primary.spinfo_address);
status.sstat_primary.spinfo_state = transport->state;
status.sstat_primary.spinfo_cwnd = transport->cwnd;
status.sstat_primary.spinfo_srtt = transport->srtt;
status.sstat_primary.spinfo_rto = jiffies_to_msecs(transport->rto);
status.sstat_primary.spinfo_mtu = transport->pathmtu;
if (status.sstat_primary.spinfo_state == SCTP_UNKNOWN)
status.sstat_primary.spinfo_state = SCTP_ACTIVE;
if (put_user(len, optlen)) {
retval = -EFAULT;
goto out;
}
SCTP_DEBUG_PRINTK("sctp_getsockopt_sctp_status(%d): %d %d %d\n",
len, status.sstat_state, status.sstat_rwnd,
status.sstat_assoc_id);
if (copy_to_user(optval, &status, len)) {
retval = -EFAULT;
goto out;
}
out:
return retval;
}
/* 7.2.2 Peer Address Information (SCTP_GET_PEER_ADDR_INFO)
*
* Applications can retrieve information about a specific peer address
* of an association, including its reachability state, congestion
* window, and retransmission timer values. This information is
* read-only.
*/
static int sctp_getsockopt_peer_addr_info(struct sock *sk, int len,
char __user *optval,
int __user *optlen)
{
struct sctp_paddrinfo pinfo;
struct sctp_transport *transport;
int retval = 0;
if (len < sizeof(pinfo)) {
retval = -EINVAL;
goto out;
}
len = sizeof(pinfo);
if (copy_from_user(&pinfo, optval, len)) {
retval = -EFAULT;
goto out;
}
transport = sctp_addr_id2transport(sk, &pinfo.spinfo_address,
pinfo.spinfo_assoc_id);
if (!transport)
return -EINVAL;
pinfo.spinfo_assoc_id = sctp_assoc2id(transport->asoc);
pinfo.spinfo_state = transport->state;
pinfo.spinfo_cwnd = transport->cwnd;
pinfo.spinfo_srtt = transport->srtt;
pinfo.spinfo_rto = jiffies_to_msecs(transport->rto);
pinfo.spinfo_mtu = transport->pathmtu;
if (pinfo.spinfo_state == SCTP_UNKNOWN)
pinfo.spinfo_state = SCTP_ACTIVE;
if (put_user(len, optlen)) {
retval = -EFAULT;
goto out;
}
if (copy_to_user(optval, &pinfo, len)) {
retval = -EFAULT;
goto out;
}
out:
return retval;
}
/* 7.1.12 Enable/Disable message fragmentation (SCTP_DISABLE_FRAGMENTS)
*
* This option is a on/off flag. If enabled no SCTP message
* fragmentation will be performed. Instead if a message being sent
* exceeds the current PMTU size, the message will NOT be sent and
* instead a error will be indicated to the user.
*/
static int sctp_getsockopt_disable_fragments(struct sock *sk, int len,
char __user *optval, int __user *optlen)
{
int val;
if (len < sizeof(int))
return -EINVAL;
len = sizeof(int);
val = (sctp_sk(sk)->disable_fragments == 1);
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, &val, len))
return -EFAULT;
return 0;
}
/* 7.1.15 Set notification and ancillary events (SCTP_EVENTS)
*
* This socket option is used to specify various notifications and
* ancillary data the user wishes to receive.
*/
static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval,
int __user *optlen)
{
if (len <= 0)
return -EINVAL;
if (len > sizeof(struct sctp_event_subscribe))
len = sizeof(struct sctp_event_subscribe);
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, &sctp_sk(sk)->subscribe, len))
return -EFAULT;
return 0;
}
/* 7.1.8 Automatic Close of associations (SCTP_AUTOCLOSE)
*
* This socket option is applicable to the UDP-style socket only. When
* set it will cause associations that are idle for more than the
* specified number of seconds to automatically close. An association
* being idle is defined an association that has NOT sent or received
* user data. The special value of '0' indicates that no automatic
* close of any associations should be performed. The option expects an
* integer defining the number of seconds of idle time before an
* association is closed.
*/
static int sctp_getsockopt_autoclose(struct sock *sk, int len, char __user *optval, int __user *optlen)
{
/* Applicable to UDP-style socket only */
if (sctp_style(sk, TCP))
return -EOPNOTSUPP;
if (len < sizeof(int))
return -EINVAL;
len = sizeof(int);
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, &sctp_sk(sk)->autoclose, sizeof(int)))
return -EFAULT;
return 0;
}
/* Helper routine to branch off an association to a new socket. */
int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp)
{
struct sctp_association *asoc = sctp_id2assoc(sk, id);
struct socket *sock;
struct sctp_af *af;
int err = 0;
if (!asoc)
return -EINVAL;
/* An association cannot be branched off from an already peeled-off
* socket, nor is this supported for tcp style sockets.
*/
if (!sctp_style(sk, UDP))
return -EINVAL;
/* Create a new socket. */
err = sock_create(sk->sk_family, SOCK_SEQPACKET, IPPROTO_SCTP, &sock);
if (err < 0)
return err;
sctp_copy_sock(sock->sk, sk, asoc);
/* Make peeled-off sockets more like 1-1 accepted sockets.
* Set the daddr and initialize id to something more random
*/
af = sctp_get_af_specific(asoc->peer.primary_addr.sa.sa_family);
af->to_sk_daddr(&asoc->peer.primary_addr, sk);
/* Populate the fields of the newsk from the oldsk and migrate the
* asoc to the newsk.
*/
sctp_sock_migrate(sk, sock->sk, asoc, SCTP_SOCKET_UDP_HIGH_BANDWIDTH);
*sockp = sock;
return err;
}
EXPORT_SYMBOL(sctp_do_peeloff);
static int sctp_getsockopt_peeloff(struct sock *sk, int len, char __user *optval, int __user *optlen)
{
sctp_peeloff_arg_t peeloff;
struct socket *newsock;
struct file *newfile;
int retval = 0;
if (len < sizeof(sctp_peeloff_arg_t))
return -EINVAL;
len = sizeof(sctp_peeloff_arg_t);
if (copy_from_user(&peeloff, optval, len))
return -EFAULT;
retval = sctp_do_peeloff(sk, peeloff.associd, &newsock);
if (retval < 0)
goto out;
/* Map the socket to an unused fd that can be returned to the user. */
retval = get_unused_fd();
if (retval < 0) {
sock_release(newsock);
goto out;
}
newfile = sock_alloc_file(newsock, 0, NULL);
if (unlikely(IS_ERR(newfile))) {
put_unused_fd(retval);
sock_release(newsock);
return PTR_ERR(newfile);
}
SCTP_DEBUG_PRINTK("%s: sk: %p newsk: %p sd: %d\n",
__func__, sk, newsock->sk, retval);
/* Return the fd mapped to the new socket. */
if (put_user(len, optlen)) {
fput(newfile);
put_unused_fd(retval);
return -EFAULT;
}
peeloff.sd = retval;
if (copy_to_user(optval, &peeloff, len)) {
fput(newfile);
put_unused_fd(retval);
return -EFAULT;
}
fd_install(retval, newfile);
out:
return retval;
}
/* 7.1.13 Peer Address Parameters (SCTP_PEER_ADDR_PARAMS)
*
* Applications can enable or disable heartbeats for any peer address of
* an association, modify an address's heartbeat interval, force a
* heartbeat to be sent immediately, and adjust the address's maximum
* number of retransmissions sent before an address is considered
* unreachable. The following structure is used to access and modify an
* address's parameters:
*
* struct sctp_paddrparams {
* sctp_assoc_t spp_assoc_id;
* struct sockaddr_storage spp_address;
* uint32_t spp_hbinterval;
* uint16_t spp_pathmaxrxt;
* uint32_t spp_pathmtu;
* uint32_t spp_sackdelay;
* uint32_t spp_flags;
* };
*
* spp_assoc_id - (one-to-many style socket) This is filled in the
* application, and identifies the association for
* this query.
* spp_address - This specifies which address is of interest.
* spp_hbinterval - This contains the value of the heartbeat interval,
* in milliseconds. If a value of zero
* is present in this field then no changes are to
* be made to this parameter.
* spp_pathmaxrxt - This contains the maximum number of
* retransmissions before this address shall be
* considered unreachable. If a value of zero
* is present in this field then no changes are to
* be made to this parameter.
* spp_pathmtu - When Path MTU discovery is disabled the value
* specified here will be the "fixed" path mtu.
* Note that if the spp_address field is empty
* then all associations on this address will
* have this fixed path mtu set upon them.
*
* spp_sackdelay - When delayed sack is enabled, this value specifies
* the number of milliseconds that sacks will be delayed
* for. This value will apply to all addresses of an
* association if the spp_address field is empty. Note
* also, that if delayed sack is enabled and this
* value is set to 0, no change is made to the last
* recorded delayed sack timer value.
*
* spp_flags - These flags are used to control various features
* on an association. The flag field may contain
* zero or more of the following options.
*
* SPP_HB_ENABLE - Enable heartbeats on the
* specified address. Note that if the address
* field is empty all addresses for the association
* have heartbeats enabled upon them.
*
* SPP_HB_DISABLE - Disable heartbeats on the
* speicifed address. Note that if the address
* field is empty all addresses for the association
* will have their heartbeats disabled. Note also
* that SPP_HB_ENABLE and SPP_HB_DISABLE are
* mutually exclusive, only one of these two should
* be specified. Enabling both fields will have
* undetermined results.
*
* SPP_HB_DEMAND - Request a user initiated heartbeat
* to be made immediately.
*
* SPP_PMTUD_ENABLE - This field will enable PMTU
* discovery upon the specified address. Note that
* if the address feild is empty then all addresses
* on the association are effected.
*
* SPP_PMTUD_DISABLE - This field will disable PMTU
* discovery upon the specified address. Note that
* if the address feild is empty then all addresses
* on the association are effected. Not also that
* SPP_PMTUD_ENABLE and SPP_PMTUD_DISABLE are mutually
* exclusive. Enabling both will have undetermined
* results.
*
* SPP_SACKDELAY_ENABLE - Setting this flag turns
* on delayed sack. The time specified in spp_sackdelay
* is used to specify the sack delay for this address. Note
* that if spp_address is empty then all addresses will
* enable delayed sack and take on the sack delay
* value specified in spp_sackdelay.
* SPP_SACKDELAY_DISABLE - Setting this flag turns
* off delayed sack. If the spp_address field is blank then
* delayed sack is disabled for the entire association. Note
* also that this field is mutually exclusive to
* SPP_SACKDELAY_ENABLE, setting both will have undefined
* results.
*/
static int sctp_getsockopt_peer_addr_params(struct sock *sk, int len,
char __user *optval, int __user *optlen)
{
struct sctp_paddrparams params;
struct sctp_transport *trans = NULL;
struct sctp_association *asoc = NULL;
struct sctp_sock *sp = sctp_sk(sk);
if (len < sizeof(struct sctp_paddrparams))
return -EINVAL;
len = sizeof(struct sctp_paddrparams);
if (copy_from_user(¶ms, optval, len))
return -EFAULT;
/* If an address other than INADDR_ANY is specified, and
* no transport is found, then the request is invalid.
*/
if (!sctp_is_any(sk, ( union sctp_addr *)¶ms.spp_address)) {
trans = sctp_addr_id2transport(sk, ¶ms.spp_address,
params.spp_assoc_id);
if (!trans) {
SCTP_DEBUG_PRINTK("Failed no transport\n");
return -EINVAL;
}
}
/* Get association, if assoc_id != 0 and the socket is a one
* to many style socket, and an association was not found, then
* the id was invalid.
*/
asoc = sctp_id2assoc(sk, params.spp_assoc_id);
if (!asoc && params.spp_assoc_id && sctp_style(sk, UDP)) {
SCTP_DEBUG_PRINTK("Failed no association\n");
return -EINVAL;
}
if (trans) {
/* Fetch transport values. */
params.spp_hbinterval = jiffies_to_msecs(trans->hbinterval);
params.spp_pathmtu = trans->pathmtu;
params.spp_pathmaxrxt = trans->pathmaxrxt;
params.spp_sackdelay = jiffies_to_msecs(trans->sackdelay);
/*draft-11 doesn't say what to return in spp_flags*/
params.spp_flags = trans->param_flags;
} else if (asoc) {
/* Fetch association values. */
params.spp_hbinterval = jiffies_to_msecs(asoc->hbinterval);
params.spp_pathmtu = asoc->pathmtu;
params.spp_pathmaxrxt = asoc->pathmaxrxt;
params.spp_sackdelay = jiffies_to_msecs(asoc->sackdelay);
/*draft-11 doesn't say what to return in spp_flags*/
params.spp_flags = asoc->param_flags;
} else {
/* Fetch socket values. */
params.spp_hbinterval = sp->hbinterval;
params.spp_pathmtu = sp->pathmtu;
params.spp_sackdelay = sp->sackdelay;
params.spp_pathmaxrxt = sp->pathmaxrxt;
/*draft-11 doesn't say what to return in spp_flags*/
params.spp_flags = sp->param_flags;
}
if (copy_to_user(optval, ¶ms, len))
return -EFAULT;
if (put_user(len, optlen))
return -EFAULT;
return 0;
}
/*
* 7.1.23. Get or set delayed ack timer (SCTP_DELAYED_SACK)
*
* This option will effect the way delayed acks are performed. This
* option allows you to get or set the delayed ack time, in
* milliseconds. It also allows changing the delayed ack frequency.
* Changing the frequency to 1 disables the delayed sack algorithm. If
* the assoc_id is 0, then this sets or gets the endpoints default
* values. If the assoc_id field is non-zero, then the set or get
* effects the specified association for the one to many model (the
* assoc_id field is ignored by the one to one model). Note that if
* sack_delay or sack_freq are 0 when setting this option, then the
* current values will remain unchanged.
*
* struct sctp_sack_info {
* sctp_assoc_t sack_assoc_id;
* uint32_t sack_delay;
* uint32_t sack_freq;
* };
*
* sack_assoc_id - This parameter, indicates which association the user
* is performing an action upon. Note that if this field's value is
* zero then the endpoints default value is changed (effecting future
* associations only).
*
* sack_delay - This parameter contains the number of milliseconds that
* the user is requesting the delayed ACK timer be set to. Note that
* this value is defined in the standard to be between 200 and 500
* milliseconds.
*
* sack_freq - This parameter contains the number of packets that must
* be received before a sack is sent without waiting for the delay
* timer to expire. The default value for this is 2, setting this
* value to 1 will disable the delayed sack algorithm.
*/
static int sctp_getsockopt_delayed_ack(struct sock *sk, int len,
char __user *optval,
int __user *optlen)
{
struct sctp_sack_info params;
struct sctp_association *asoc = NULL;
struct sctp_sock *sp = sctp_sk(sk);
if (len >= sizeof(struct sctp_sack_info)) {
len = sizeof(struct sctp_sack_info);
if (copy_from_user(¶ms, optval, len))
return -EFAULT;
} else if (len == sizeof(struct sctp_assoc_value)) {
pr_warn("Use of struct sctp_assoc_value in delayed_ack socket option deprecated\n");
pr_warn("Use struct sctp_sack_info instead\n");
if (copy_from_user(¶ms, optval, len))
return -EFAULT;
} else
return - EINVAL;
/* Get association, if sack_assoc_id != 0 and the socket is a one
* to many style socket, and an association was not found, then
* the id was invalid.
*/
asoc = sctp_id2assoc(sk, params.sack_assoc_id);
if (!asoc && params.sack_assoc_id && sctp_style(sk, UDP))
return -EINVAL;
if (asoc) {
/* Fetch association values. */
if (asoc->param_flags & SPP_SACKDELAY_ENABLE) {
params.sack_delay = jiffies_to_msecs(
asoc->sackdelay);
params.sack_freq = asoc->sackfreq;
} else {
params.sack_delay = 0;
params.sack_freq = 1;
}
} else {
/* Fetch socket values. */
if (sp->param_flags & SPP_SACKDELAY_ENABLE) {
params.sack_delay = sp->sackdelay;
params.sack_freq = sp->sackfreq;
} else {
params.sack_delay = 0;
params.sack_freq = 1;
}
}
if (copy_to_user(optval, ¶ms, len))
return -EFAULT;
if (put_user(len, optlen))
return -EFAULT;
return 0;
}
/* 7.1.3 Initialization Parameters (SCTP_INITMSG)
*
* Applications can specify protocol parameters for the default association
* initialization. The option name argument to setsockopt() and getsockopt()
* is SCTP_INITMSG.
*
* Setting initialization parameters is effective only on an unconnected
* socket (for UDP-style sockets only future associations are effected
* by the change). With TCP-style sockets, this option is inherited by
* sockets derived from a listener socket.
*/
static int sctp_getsockopt_initmsg(struct sock *sk, int len, char __user *optval, int __user *optlen)
{
if (len < sizeof(struct sctp_initmsg))
return -EINVAL;
len = sizeof(struct sctp_initmsg);
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, &sctp_sk(sk)->initmsg, len))
return -EFAULT;
return 0;
}
static int sctp_getsockopt_peer_addrs(struct sock *sk, int len,
char __user *optval, int __user *optlen)
{
struct sctp_association *asoc;
int cnt = 0;
struct sctp_getaddrs getaddrs;
struct sctp_transport *from;
void __user *to;
union sctp_addr temp;
struct sctp_sock *sp = sctp_sk(sk);
int addrlen;
size_t space_left;
int bytes_copied;
if (len < sizeof(struct sctp_getaddrs))
return -EINVAL;
if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs)))
return -EFAULT;
/* For UDP-style sockets, id specifies the association to query. */
asoc = sctp_id2assoc(sk, getaddrs.assoc_id);
if (!asoc)
return -EINVAL;
to = optval + offsetof(struct sctp_getaddrs,addrs);
space_left = len - offsetof(struct sctp_getaddrs,addrs);
list_for_each_entry(from, &asoc->peer.transport_addr_list,
transports) {
memcpy(&temp, &from->ipaddr, sizeof(temp));
sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp);
addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
if (space_left < addrlen)
return -ENOMEM;
if (copy_to_user(to, &temp, addrlen))
return -EFAULT;
to += addrlen;
cnt++;
space_left -= addrlen;
}
if (put_user(cnt, &((struct sctp_getaddrs __user *)optval)->addr_num))
return -EFAULT;
bytes_copied = ((char __user *)to) - optval;
if (put_user(bytes_copied, optlen))
return -EFAULT;
return 0;
}
static int sctp_copy_laddrs(struct sock *sk, __u16 port, void *to,
size_t space_left, int *bytes_copied)
{
struct sctp_sockaddr_entry *addr;
union sctp_addr temp;
int cnt = 0;
int addrlen;
struct net *net = sock_net(sk);
rcu_read_lock();
list_for_each_entry_rcu(addr, &net->sctp.local_addr_list, list) {
if (!addr->valid)
continue;
if ((PF_INET == sk->sk_family) &&
(AF_INET6 == addr->a.sa.sa_family))
continue;
if ((PF_INET6 == sk->sk_family) &&
inet_v6_ipv6only(sk) &&
(AF_INET == addr->a.sa.sa_family))
continue;
memcpy(&temp, &addr->a, sizeof(temp));
if (!temp.v4.sin_port)
temp.v4.sin_port = htons(port);
sctp_get_pf_specific(sk->sk_family)->addr_v4map(sctp_sk(sk),
&temp);
addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
if (space_left < addrlen) {
cnt = -ENOMEM;
break;
}
memcpy(to, &temp, addrlen);
to += addrlen;
cnt ++;
space_left -= addrlen;
*bytes_copied += addrlen;
}
rcu_read_unlock();
return cnt;
}
static int sctp_getsockopt_local_addrs(struct sock *sk, int len,
char __user *optval, int __user *optlen)
{
struct sctp_bind_addr *bp;
struct sctp_association *asoc;
int cnt = 0;
struct sctp_getaddrs getaddrs;
struct sctp_sockaddr_entry *addr;
void __user *to;
union sctp_addr temp;
struct sctp_sock *sp = sctp_sk(sk);
int addrlen;
int err = 0;
size_t space_left;
int bytes_copied = 0;
void *addrs;
void *buf;
if (len < sizeof(struct sctp_getaddrs))
return -EINVAL;
if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs)))
return -EFAULT;
/*
* For UDP-style sockets, id specifies the association to query.
* If the id field is set to the value '0' then the locally bound
* addresses are returned without regard to any particular
* association.
*/
if (0 == getaddrs.assoc_id) {
bp = &sctp_sk(sk)->ep->base.bind_addr;
} else {
asoc = sctp_id2assoc(sk, getaddrs.assoc_id);
if (!asoc)
return -EINVAL;
bp = &asoc->base.bind_addr;
}
to = optval + offsetof(struct sctp_getaddrs,addrs);
space_left = len - offsetof(struct sctp_getaddrs,addrs);
addrs = kmalloc(space_left, GFP_KERNEL);
if (!addrs)
return -ENOMEM;
/* If the endpoint is bound to 0.0.0.0 or ::0, get the valid
* addresses from the global local address list.
*/
if (sctp_list_single_entry(&bp->address_list)) {
addr = list_entry(bp->address_list.next,
struct sctp_sockaddr_entry, list);
if (sctp_is_any(sk, &addr->a)) {
cnt = sctp_copy_laddrs(sk, bp->port, addrs,
space_left, &bytes_copied);
if (cnt < 0) {
err = cnt;
goto out;
}
goto copy_getaddrs;
}
}
buf = addrs;
/* Protection on the bound address list is not needed since
* in the socket option context we hold a socket lock and
* thus the bound address list can't change.
*/
list_for_each_entry(addr, &bp->address_list, list) {
memcpy(&temp, &addr->a, sizeof(temp));
sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp);
addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
if (space_left < addrlen) {
err = -ENOMEM; /*fixme: right error?*/
goto out;
}
memcpy(buf, &temp, addrlen);
buf += addrlen;
bytes_copied += addrlen;
cnt ++;
space_left -= addrlen;
}
copy_getaddrs:
if (copy_to_user(to, addrs, bytes_copied)) {
err = -EFAULT;
goto out;
}
if (put_user(cnt, &((struct sctp_getaddrs __user *)optval)->addr_num)) {
err = -EFAULT;
goto out;
}
if (put_user(bytes_copied, optlen))
err = -EFAULT;
out:
kfree(addrs);
return err;
}
/* 7.1.10 Set Primary Address (SCTP_PRIMARY_ADDR)
*
* Requests that the local SCTP stack use the enclosed peer address as
* the association primary. The enclosed address must be one of the
* association peer's addresses.
*/
static int sctp_getsockopt_primary_addr(struct sock *sk, int len,
char __user *optval, int __user *optlen)
{
struct sctp_prim prim;
struct sctp_association *asoc;
struct sctp_sock *sp = sctp_sk(sk);
if (len < sizeof(struct sctp_prim))
return -EINVAL;
len = sizeof(struct sctp_prim);
if (copy_from_user(&prim, optval, len))
return -EFAULT;
asoc = sctp_id2assoc(sk, prim.ssp_assoc_id);
if (!asoc)
return -EINVAL;
if (!asoc->peer.primary_path)
return -ENOTCONN;
memcpy(&prim.ssp_addr, &asoc->peer.primary_path->ipaddr,
asoc->peer.primary_path->af_specific->sockaddr_len);
sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp,
(union sctp_addr *)&prim.ssp_addr);
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, &prim, len))
return -EFAULT;
return 0;
}
/*
* 7.1.11 Set Adaptation Layer Indicator (SCTP_ADAPTATION_LAYER)
*
* Requests that the local endpoint set the specified Adaptation Layer
* Indication parameter for all future INIT and INIT-ACK exchanges.
*/
static int sctp_getsockopt_adaptation_layer(struct sock *sk, int len,
char __user *optval, int __user *optlen)
{
struct sctp_setadaptation adaptation;
if (len < sizeof(struct sctp_setadaptation))
return -EINVAL;
len = sizeof(struct sctp_setadaptation);
adaptation.ssb_adaptation_ind = sctp_sk(sk)->adaptation_ind;
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, &adaptation, len))
return -EFAULT;
return 0;
}
/*
*
* 7.1.14 Set default send parameters (SCTP_DEFAULT_SEND_PARAM)
*
* Applications that wish to use the sendto() system call may wish to
* specify a default set of parameters that would normally be supplied
* through the inclusion of ancillary data. This socket option allows
* such an application to set the default sctp_sndrcvinfo structure.
* The application that wishes to use this socket option simply passes
* in to this call the sctp_sndrcvinfo structure defined in Section
* 5.2.2) The input parameters accepted by this call include
* sinfo_stream, sinfo_flags, sinfo_ppid, sinfo_context,
* sinfo_timetolive. The user must provide the sinfo_assoc_id field in
* to this call if the caller is using the UDP model.
*
* For getsockopt, it get the default sctp_sndrcvinfo structure.
*/
static int sctp_getsockopt_default_send_param(struct sock *sk,
int len, char __user *optval,
int __user *optlen)
{
struct sctp_sndrcvinfo info;
struct sctp_association *asoc;
struct sctp_sock *sp = sctp_sk(sk);
if (len < sizeof(struct sctp_sndrcvinfo))
return -EINVAL;
len = sizeof(struct sctp_sndrcvinfo);
if (copy_from_user(&info, optval, len))
return -EFAULT;
asoc = sctp_id2assoc(sk, info.sinfo_assoc_id);
if (!asoc && info.sinfo_assoc_id && sctp_style(sk, UDP))
return -EINVAL;
if (asoc) {
info.sinfo_stream = asoc->default_stream;
info.sinfo_flags = asoc->default_flags;
info.sinfo_ppid = asoc->default_ppid;
info.sinfo_context = asoc->default_context;
info.sinfo_timetolive = asoc->default_timetolive;
} else {
info.sinfo_stream = sp->default_stream;
info.sinfo_flags = sp->default_flags;
info.sinfo_ppid = sp->default_ppid;
info.sinfo_context = sp->default_context;
info.sinfo_timetolive = sp->default_timetolive;
}
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, &info, len))
return -EFAULT;
return 0;
}
/*
*
* 7.1.5 SCTP_NODELAY
*
* Turn on/off any Nagle-like algorithm. This means that packets are
* generally sent as soon as possible and no unnecessary delays are
* introduced, at the cost of more packets in the network. Expects an
* integer boolean flag.
*/
static int sctp_getsockopt_nodelay(struct sock *sk, int len,
char __user *optval, int __user *optlen)
{
int val;
if (len < sizeof(int))
return -EINVAL;
len = sizeof(int);
val = (sctp_sk(sk)->nodelay == 1);
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, &val, len))
return -EFAULT;
return 0;
}
/*
*
* 7.1.1 SCTP_RTOINFO
*
* The protocol parameters used to initialize and bound retransmission
* timeout (RTO) are tunable. sctp_rtoinfo structure is used to access
* and modify these parameters.
* All parameters are time values, in milliseconds. A value of 0, when
* modifying the parameters, indicates that the current value should not
* be changed.
*
*/
static int sctp_getsockopt_rtoinfo(struct sock *sk, int len,
char __user *optval,
int __user *optlen) {
struct sctp_rtoinfo rtoinfo;
struct sctp_association *asoc;
if (len < sizeof (struct sctp_rtoinfo))
return -EINVAL;
len = sizeof(struct sctp_rtoinfo);
if (copy_from_user(&rtoinfo, optval, len))
return -EFAULT;
asoc = sctp_id2assoc(sk, rtoinfo.srto_assoc_id);
if (!asoc && rtoinfo.srto_assoc_id && sctp_style(sk, UDP))
return -EINVAL;
/* Values corresponding to the specific association. */
if (asoc) {
rtoinfo.srto_initial = jiffies_to_msecs(asoc->rto_initial);
rtoinfo.srto_max = jiffies_to_msecs(asoc->rto_max);
rtoinfo.srto_min = jiffies_to_msecs(asoc->rto_min);
} else {
/* Values corresponding to the endpoint. */
struct sctp_sock *sp = sctp_sk(sk);
rtoinfo.srto_initial = sp->rtoinfo.srto_initial;
rtoinfo.srto_max = sp->rtoinfo.srto_max;
rtoinfo.srto_min = sp->rtoinfo.srto_min;
}
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, &rtoinfo, len))
return -EFAULT;
return 0;
}
/*
*
* 7.1.2 SCTP_ASSOCINFO
*
* This option is used to tune the maximum retransmission attempts
* of the association.
* Returns an error if the new association retransmission value is
* greater than the sum of the retransmission value of the peer.
* See [SCTP] for more information.
*
*/
static int sctp_getsockopt_associnfo(struct sock *sk, int len,
char __user *optval,
int __user *optlen)
{
struct sctp_assocparams assocparams;
struct sctp_association *asoc;
struct list_head *pos;
int cnt = 0;
if (len < sizeof (struct sctp_assocparams))
return -EINVAL;
len = sizeof(struct sctp_assocparams);
if (copy_from_user(&assocparams, optval, len))
return -EFAULT;
asoc = sctp_id2assoc(sk, assocparams.sasoc_assoc_id);
if (!asoc && assocparams.sasoc_assoc_id && sctp_style(sk, UDP))
return -EINVAL;
/* Values correspoinding to the specific association */
if (asoc) {
assocparams.sasoc_asocmaxrxt = asoc->max_retrans;
assocparams.sasoc_peer_rwnd = asoc->peer.rwnd;
assocparams.sasoc_local_rwnd = asoc->a_rwnd;
assocparams.sasoc_cookie_life = (asoc->cookie_life.tv_sec
* 1000) +
(asoc->cookie_life.tv_usec
/ 1000);
list_for_each(pos, &asoc->peer.transport_addr_list) {
cnt ++;
}
assocparams.sasoc_number_peer_destinations = cnt;
} else {
/* Values corresponding to the endpoint */
struct sctp_sock *sp = sctp_sk(sk);
assocparams.sasoc_asocmaxrxt = sp->assocparams.sasoc_asocmaxrxt;
assocparams.sasoc_peer_rwnd = sp->assocparams.sasoc_peer_rwnd;
assocparams.sasoc_local_rwnd = sp->assocparams.sasoc_local_rwnd;
assocparams.sasoc_cookie_life =
sp->assocparams.sasoc_cookie_life;
assocparams.sasoc_number_peer_destinations =
sp->assocparams.
sasoc_number_peer_destinations;
}
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, &assocparams, len))
return -EFAULT;
return 0;
}
/*
* 7.1.16 Set/clear IPv4 mapped addresses (SCTP_I_WANT_MAPPED_V4_ADDR)
*
* This socket option is a boolean flag which turns on or off mapped V4
* addresses. If this option is turned on and the socket is type
* PF_INET6, then IPv4 addresses will be mapped to V6 representation.
* If this option is turned off, then no mapping will be done of V4
* addresses and a user will receive both PF_INET6 and PF_INET type
* addresses on the socket.
*/
static int sctp_getsockopt_mappedv4(struct sock *sk, int len,
char __user *optval, int __user *optlen)
{
int val;
struct sctp_sock *sp = sctp_sk(sk);
if (len < sizeof(int))
return -EINVAL;
len = sizeof(int);
val = sp->v4mapped;
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, &val, len))
return -EFAULT;
return 0;
}
/*
* 7.1.29. Set or Get the default context (SCTP_CONTEXT)
* (chapter and verse is quoted at sctp_setsockopt_context())
*/
static int sctp_getsockopt_context(struct sock *sk, int len,
char __user *optval, int __user *optlen)
{
struct sctp_assoc_value params;
struct sctp_sock *sp;
struct sctp_association *asoc;
if (len < sizeof(struct sctp_assoc_value))
return -EINVAL;
len = sizeof(struct sctp_assoc_value);
if (copy_from_user(¶ms, optval, len))
return -EFAULT;
sp = sctp_sk(sk);
if (params.assoc_id != 0) {
asoc = sctp_id2assoc(sk, params.assoc_id);
if (!asoc)
return -EINVAL;
params.assoc_value = asoc->default_rcv_context;
} else {
params.assoc_value = sp->default_rcv_context;
}
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, ¶ms, len))
return -EFAULT;
return 0;
}
/*
* 8.1.16. Get or Set the Maximum Fragmentation Size (SCTP_MAXSEG)
* This option will get or set the maximum size to put in any outgoing
* SCTP DATA chunk. If a message is larger than this size it will be
* fragmented by SCTP into the specified size. Note that the underlying
* SCTP implementation may fragment into smaller sized chunks when the
* PMTU of the underlying association is smaller than the value set by
* the user. The default value for this option is '0' which indicates
* the user is NOT limiting fragmentation and only the PMTU will effect
* SCTP's choice of DATA chunk size. Note also that values set larger
* than the maximum size of an IP datagram will effectively let SCTP
* control fragmentation (i.e. the same as setting this option to 0).
*
* The following structure is used to access and modify this parameter:
*
* struct sctp_assoc_value {
* sctp_assoc_t assoc_id;
* uint32_t assoc_value;
* };
*
* assoc_id: This parameter is ignored for one-to-one style sockets.
* For one-to-many style sockets this parameter indicates which
* association the user is performing an action upon. Note that if
* this field's value is zero then the endpoints default value is
* changed (effecting future associations only).
* assoc_value: This parameter specifies the maximum size in bytes.
*/
static int sctp_getsockopt_maxseg(struct sock *sk, int len,
char __user *optval, int __user *optlen)
{
struct sctp_assoc_value params;
struct sctp_association *asoc;
if (len == sizeof(int)) {
pr_warn("Use of int in maxseg socket option deprecated\n");
pr_warn("Use struct sctp_assoc_value instead\n");
params.assoc_id = 0;
} else if (len >= sizeof(struct sctp_assoc_value)) {
len = sizeof(struct sctp_assoc_value);
if (copy_from_user(¶ms, optval, sizeof(params)))
return -EFAULT;
} else
return -EINVAL;
asoc = sctp_id2assoc(sk, params.assoc_id);
if (!asoc && params.assoc_id && sctp_style(sk, UDP))
return -EINVAL;
if (asoc)
params.assoc_value = asoc->frag_point;
else
params.assoc_value = sctp_sk(sk)->user_frag;
if (put_user(len, optlen))
return -EFAULT;
if (len == sizeof(int)) {
if (copy_to_user(optval, ¶ms.assoc_value, len))
return -EFAULT;
} else {
if (copy_to_user(optval, ¶ms, len))
return -EFAULT;
}
return 0;
}
/*
* 7.1.24. Get or set fragmented interleave (SCTP_FRAGMENT_INTERLEAVE)
* (chapter and verse is quoted at sctp_setsockopt_fragment_interleave())
*/
static int sctp_getsockopt_fragment_interleave(struct sock *sk, int len,
char __user *optval, int __user *optlen)
{
int val;
if (len < sizeof(int))
return -EINVAL;
len = sizeof(int);
val = sctp_sk(sk)->frag_interleave;
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, &val, len))
return -EFAULT;
return 0;
}
/*
* 7.1.25. Set or Get the sctp partial delivery point
* (chapter and verse is quoted at sctp_setsockopt_partial_delivery_point())
*/
static int sctp_getsockopt_partial_delivery_point(struct sock *sk, int len,
char __user *optval,
int __user *optlen)
{
u32 val;
if (len < sizeof(u32))
return -EINVAL;
len = sizeof(u32);
val = sctp_sk(sk)->pd_point;
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, &val, len))
return -EFAULT;
return 0;
}
/*
* 7.1.28. Set or Get the maximum burst (SCTP_MAX_BURST)
* (chapter and verse is quoted at sctp_setsockopt_maxburst())
*/
static int sctp_getsockopt_maxburst(struct sock *sk, int len,
char __user *optval,
int __user *optlen)
{
struct sctp_assoc_value params;
struct sctp_sock *sp;
struct sctp_association *asoc;
if (len == sizeof(int)) {
pr_warn("Use of int in max_burst socket option deprecated\n");
pr_warn("Use struct sctp_assoc_value instead\n");
params.assoc_id = 0;
} else if (len >= sizeof(struct sctp_assoc_value)) {
len = sizeof(struct sctp_assoc_value);
if (copy_from_user(¶ms, optval, len))
return -EFAULT;
} else
return -EINVAL;
sp = sctp_sk(sk);
if (params.assoc_id != 0) {
asoc = sctp_id2assoc(sk, params.assoc_id);
if (!asoc)
return -EINVAL;
params.assoc_value = asoc->max_burst;
} else
params.assoc_value = sp->max_burst;
if (len == sizeof(int)) {
if (copy_to_user(optval, ¶ms.assoc_value, len))
return -EFAULT;
} else {
if (copy_to_user(optval, ¶ms, len))
return -EFAULT;
}
return 0;
}
static int sctp_getsockopt_hmac_ident(struct sock *sk, int len,
char __user *optval, int __user *optlen)
{
struct net *net = sock_net(sk);
struct sctp_hmacalgo __user *p = (void __user *)optval;
struct sctp_hmac_algo_param *hmacs;
__u16 data_len = 0;
u32 num_idents;
if (!net->sctp.auth_enable)
return -EACCES;
hmacs = sctp_sk(sk)->ep->auth_hmacs_list;
data_len = ntohs(hmacs->param_hdr.length) - sizeof(sctp_paramhdr_t);
if (len < sizeof(struct sctp_hmacalgo) + data_len)
return -EINVAL;
len = sizeof(struct sctp_hmacalgo) + data_len;
num_idents = data_len / sizeof(u16);
if (put_user(len, optlen))
return -EFAULT;
if (put_user(num_idents, &p->shmac_num_idents))
return -EFAULT;
if (copy_to_user(p->shmac_idents, hmacs->hmac_ids, data_len))
return -EFAULT;
return 0;
}
static int sctp_getsockopt_active_key(struct sock *sk, int len,
char __user *optval, int __user *optlen)
{
struct net *net = sock_net(sk);
struct sctp_authkeyid val;
struct sctp_association *asoc;
if (!net->sctp.auth_enable)
return -EACCES;
if (len < sizeof(struct sctp_authkeyid))
return -EINVAL;
if (copy_from_user(&val, optval, sizeof(struct sctp_authkeyid)))
return -EFAULT;
asoc = sctp_id2assoc(sk, val.scact_assoc_id);
if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP))
return -EINVAL;
if (asoc)
val.scact_keynumber = asoc->active_key_id;
else
val.scact_keynumber = sctp_sk(sk)->ep->active_key_id;
len = sizeof(struct sctp_authkeyid);
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, &val, len))
return -EFAULT;
return 0;
}
static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len,
char __user *optval, int __user *optlen)
{
struct net *net = sock_net(sk);
struct sctp_authchunks __user *p = (void __user *)optval;
struct sctp_authchunks val;
struct sctp_association *asoc;
struct sctp_chunks_param *ch;
u32 num_chunks = 0;
char __user *to;
if (!net->sctp.auth_enable)
return -EACCES;
if (len < sizeof(struct sctp_authchunks))
return -EINVAL;
if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks)))
return -EFAULT;
to = p->gauth_chunks;
asoc = sctp_id2assoc(sk, val.gauth_assoc_id);
if (!asoc)
return -EINVAL;
ch = asoc->peer.peer_chunks;
if (!ch)
goto num;
/* See if the user provided enough room for all the data */
num_chunks = ntohs(ch->param_hdr.length) - sizeof(sctp_paramhdr_t);
if (len < num_chunks)
return -EINVAL;
if (copy_to_user(to, ch->chunks, num_chunks))
return -EFAULT;
num:
len = sizeof(struct sctp_authchunks) + num_chunks;
if (put_user(len, optlen)) return -EFAULT;
if (put_user(num_chunks, &p->gauth_number_of_chunks))
return -EFAULT;
return 0;
}
static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len,
char __user *optval, int __user *optlen)
{
struct net *net = sock_net(sk);
struct sctp_authchunks __user *p = (void __user *)optval;
struct sctp_authchunks val;
struct sctp_association *asoc;
struct sctp_chunks_param *ch;
u32 num_chunks = 0;
char __user *to;
if (!net->sctp.auth_enable)
return -EACCES;
if (len < sizeof(struct sctp_authchunks))
return -EINVAL;
if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks)))
return -EFAULT;
to = p->gauth_chunks;
asoc = sctp_id2assoc(sk, val.gauth_assoc_id);
if (!asoc && val.gauth_assoc_id && sctp_style(sk, UDP))
return -EINVAL;
if (asoc)
ch = (struct sctp_chunks_param*)asoc->c.auth_chunks;
else
ch = sctp_sk(sk)->ep->auth_chunk_list;
if (!ch)
goto num;
num_chunks = ntohs(ch->param_hdr.length) - sizeof(sctp_paramhdr_t);
if (len < sizeof(struct sctp_authchunks) + num_chunks)
return -EINVAL;
if (copy_to_user(to, ch->chunks, num_chunks))
return -EFAULT;
num:
len = sizeof(struct sctp_authchunks) + num_chunks;
if (put_user(len, optlen))
return -EFAULT;
if (put_user(num_chunks, &p->gauth_number_of_chunks))
return -EFAULT;
return 0;
}
/*
* 8.2.5. Get the Current Number of Associations (SCTP_GET_ASSOC_NUMBER)
* This option gets the current number of associations that are attached
* to a one-to-many style socket. The option value is an uint32_t.
*/
static int sctp_getsockopt_assoc_number(struct sock *sk, int len,
char __user *optval, int __user *optlen)
{
struct sctp_sock *sp = sctp_sk(sk);
struct sctp_association *asoc;
u32 val = 0;
if (sctp_style(sk, TCP))
return -EOPNOTSUPP;
if (len < sizeof(u32))
return -EINVAL;
len = sizeof(u32);
list_for_each_entry(asoc, &(sp->ep->asocs), asocs) {
val++;
}
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, &val, len))
return -EFAULT;
return 0;
}
/*
* 8.1.23 SCTP_AUTO_ASCONF
* See the corresponding setsockopt entry as description
*/
static int sctp_getsockopt_auto_asconf(struct sock *sk, int len,
char __user *optval, int __user *optlen)
{
int val = 0;
if (len < sizeof(int))
return -EINVAL;
len = sizeof(int);
if (sctp_sk(sk)->do_auto_asconf && sctp_is_ep_boundall(sk))
val = 1;
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, &val, len))
return -EFAULT;
return 0;
}
/*
* 8.2.6. Get the Current Identifiers of Associations
* (SCTP_GET_ASSOC_ID_LIST)
*
* This option gets the current list of SCTP association identifiers of
* the SCTP associations handled by a one-to-many style socket.
*/
static int sctp_getsockopt_assoc_ids(struct sock *sk, int len,
char __user *optval, int __user *optlen)
{
struct sctp_sock *sp = sctp_sk(sk);
struct sctp_association *asoc;
struct sctp_assoc_ids *ids;
u32 num = 0;
if (sctp_style(sk, TCP))
return -EOPNOTSUPP;
if (len < sizeof(struct sctp_assoc_ids))
return -EINVAL;
list_for_each_entry(asoc, &(sp->ep->asocs), asocs) {
num++;
}
if (len < sizeof(struct sctp_assoc_ids) + sizeof(sctp_assoc_t) * num)
return -EINVAL;
len = sizeof(struct sctp_assoc_ids) + sizeof(sctp_assoc_t) * num;
ids = kmalloc(len, GFP_KERNEL);
if (unlikely(!ids))
return -ENOMEM;
ids->gaids_number_of_ids = num;
num = 0;
list_for_each_entry(asoc, &(sp->ep->asocs), asocs) {
ids->gaids_assoc_id[num++] = asoc->assoc_id;
}
if (put_user(len, optlen) || copy_to_user(optval, ids, len)) {
kfree(ids);
return -EFAULT;
}
kfree(ids);
return 0;
}
/*
* SCTP_PEER_ADDR_THLDS
*
* This option allows us to fetch the partially failed threshold for one or all
* transports in an association. See Section 6.1 of:
* http://www.ietf.org/id/draft-nishida-tsvwg-sctp-failover-05.txt
*/
static int sctp_getsockopt_paddr_thresholds(struct sock *sk,
char __user *optval,
int len,
int __user *optlen)
{
struct sctp_paddrthlds val;
struct sctp_transport *trans;
struct sctp_association *asoc;
if (len < sizeof(struct sctp_paddrthlds))
return -EINVAL;
len = sizeof(struct sctp_paddrthlds);
if (copy_from_user(&val, (struct sctp_paddrthlds __user *)optval, len))
return -EFAULT;
if (sctp_is_any(sk, (const union sctp_addr *)&val.spt_address)) {
asoc = sctp_id2assoc(sk, val.spt_assoc_id);
if (!asoc)
return -ENOENT;
val.spt_pathpfthld = asoc->pf_retrans;
val.spt_pathmaxrxt = asoc->pathmaxrxt;
} else {
trans = sctp_addr_id2transport(sk, &val.spt_address,
val.spt_assoc_id);
if (!trans)
return -ENOENT;
val.spt_pathmaxrxt = trans->pathmaxrxt;
val.spt_pathpfthld = trans->pf_retrans;
}
if (put_user(len, optlen) || copy_to_user(optval, &val, len))
return -EFAULT;
return 0;
}
/*
* SCTP_GET_ASSOC_STATS
*
* This option retrieves local per endpoint statistics. It is modeled
* after OpenSolaris' implementation
*/
static int sctp_getsockopt_assoc_stats(struct sock *sk, int len,
char __user *optval,
int __user *optlen)
{
struct sctp_assoc_stats sas;
struct sctp_association *asoc = NULL;
/* User must provide at least the assoc id */
if (len < sizeof(sctp_assoc_t))
return -EINVAL;
/* Allow the struct to grow and fill in as much as possible */
len = min_t(size_t, len, sizeof(sas));
if (copy_from_user(&sas, optval, len))
return -EFAULT;
asoc = sctp_id2assoc(sk, sas.sas_assoc_id);
if (!asoc)
return -EINVAL;
sas.sas_rtxchunks = asoc->stats.rtxchunks;
sas.sas_gapcnt = asoc->stats.gapcnt;
sas.sas_outofseqtsns = asoc->stats.outofseqtsns;
sas.sas_osacks = asoc->stats.osacks;
sas.sas_isacks = asoc->stats.isacks;
sas.sas_octrlchunks = asoc->stats.octrlchunks;
sas.sas_ictrlchunks = asoc->stats.ictrlchunks;
sas.sas_oodchunks = asoc->stats.oodchunks;
sas.sas_iodchunks = asoc->stats.iodchunks;
sas.sas_ouodchunks = asoc->stats.ouodchunks;
sas.sas_iuodchunks = asoc->stats.iuodchunks;
sas.sas_idupchunks = asoc->stats.idupchunks;
sas.sas_opackets = asoc->stats.opackets;
sas.sas_ipackets = asoc->stats.ipackets;
/* New high max rto observed, will return 0 if not a single
* RTO update took place. obs_rto_ipaddr will be bogus
* in such a case
*/
sas.sas_maxrto = asoc->stats.max_obs_rto;
memcpy(&sas.sas_obs_rto_ipaddr, &asoc->stats.obs_rto_ipaddr,
sizeof(struct sockaddr_storage));
/* Mark beginning of a new observation period */
asoc->stats.max_obs_rto = asoc->rto_min;
if (put_user(len, optlen))
return -EFAULT;
SCTP_DEBUG_PRINTK("sctp_getsockopt_assoc_stat(%d): %d\n",
len, sas.sas_assoc_id);
if (copy_to_user(optval, &sas, len))
return -EFAULT;
return 0;
}
SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
int retval = 0;
int len;
SCTP_DEBUG_PRINTK("sctp_getsockopt(sk: %p... optname: %d)\n",
sk, optname);
/* I can hardly begin to describe how wrong this is. This is
* so broken as to be worse than useless. The API draft
* REALLY is NOT helpful here... I am not convinced that the
* semantics of getsockopt() with a level OTHER THAN SOL_SCTP
* are at all well-founded.
*/
if (level != SOL_SCTP) {
struct sctp_af *af = sctp_sk(sk)->pf->af;
retval = af->getsockopt(sk, level, optname, optval, optlen);
return retval;
}
if (get_user(len, optlen))
return -EFAULT;
sctp_lock_sock(sk);
switch (optname) {
case SCTP_STATUS:
retval = sctp_getsockopt_sctp_status(sk, len, optval, optlen);
break;
case SCTP_DISABLE_FRAGMENTS:
retval = sctp_getsockopt_disable_fragments(sk, len, optval,
optlen);
break;
case SCTP_EVENTS:
retval = sctp_getsockopt_events(sk, len, optval, optlen);
break;
case SCTP_AUTOCLOSE:
retval = sctp_getsockopt_autoclose(sk, len, optval, optlen);
break;
case SCTP_SOCKOPT_PEELOFF:
retval = sctp_getsockopt_peeloff(sk, len, optval, optlen);
break;
case SCTP_PEER_ADDR_PARAMS:
retval = sctp_getsockopt_peer_addr_params(sk, len, optval,
optlen);
break;
case SCTP_DELAYED_SACK:
retval = sctp_getsockopt_delayed_ack(sk, len, optval,
optlen);
break;
case SCTP_INITMSG:
retval = sctp_getsockopt_initmsg(sk, len, optval, optlen);
break;
case SCTP_GET_PEER_ADDRS:
retval = sctp_getsockopt_peer_addrs(sk, len, optval,
optlen);
break;
case SCTP_GET_LOCAL_ADDRS:
retval = sctp_getsockopt_local_addrs(sk, len, optval,
optlen);
break;
case SCTP_SOCKOPT_CONNECTX3:
retval = sctp_getsockopt_connectx3(sk, len, optval, optlen);
break;
case SCTP_DEFAULT_SEND_PARAM:
retval = sctp_getsockopt_default_send_param(sk, len,
optval, optlen);
break;
case SCTP_PRIMARY_ADDR:
retval = sctp_getsockopt_primary_addr(sk, len, optval, optlen);
break;
case SCTP_NODELAY:
retval = sctp_getsockopt_nodelay(sk, len, optval, optlen);
break;
case SCTP_RTOINFO:
retval = sctp_getsockopt_rtoinfo(sk, len, optval, optlen);
break;
case SCTP_ASSOCINFO:
retval = sctp_getsockopt_associnfo(sk, len, optval, optlen);
break;
case SCTP_I_WANT_MAPPED_V4_ADDR:
retval = sctp_getsockopt_mappedv4(sk, len, optval, optlen);
break;
case SCTP_MAXSEG:
retval = sctp_getsockopt_maxseg(sk, len, optval, optlen);
break;
case SCTP_GET_PEER_ADDR_INFO:
retval = sctp_getsockopt_peer_addr_info(sk, len, optval,
optlen);
break;
case SCTP_ADAPTATION_LAYER:
retval = sctp_getsockopt_adaptation_layer(sk, len, optval,
optlen);
break;
case SCTP_CONTEXT:
retval = sctp_getsockopt_context(sk, len, optval, optlen);
break;
case SCTP_FRAGMENT_INTERLEAVE:
retval = sctp_getsockopt_fragment_interleave(sk, len, optval,
optlen);
break;
case SCTP_PARTIAL_DELIVERY_POINT:
retval = sctp_getsockopt_partial_delivery_point(sk, len, optval,
optlen);
break;
case SCTP_MAX_BURST:
retval = sctp_getsockopt_maxburst(sk, len, optval, optlen);
break;
case SCTP_AUTH_KEY:
case SCTP_AUTH_CHUNK:
case SCTP_AUTH_DELETE_KEY:
retval = -EOPNOTSUPP;
break;
case SCTP_HMAC_IDENT:
retval = sctp_getsockopt_hmac_ident(sk, len, optval, optlen);
break;
case SCTP_AUTH_ACTIVE_KEY:
retval = sctp_getsockopt_active_key(sk, len, optval, optlen);
break;
case SCTP_PEER_AUTH_CHUNKS:
retval = sctp_getsockopt_peer_auth_chunks(sk, len, optval,
optlen);
break;
case SCTP_LOCAL_AUTH_CHUNKS:
retval = sctp_getsockopt_local_auth_chunks(sk, len, optval,
optlen);
break;
case SCTP_GET_ASSOC_NUMBER:
retval = sctp_getsockopt_assoc_number(sk, len, optval, optlen);
break;
case SCTP_GET_ASSOC_ID_LIST:
retval = sctp_getsockopt_assoc_ids(sk, len, optval, optlen);
break;
case SCTP_AUTO_ASCONF:
retval = sctp_getsockopt_auto_asconf(sk, len, optval, optlen);
break;
case SCTP_PEER_ADDR_THLDS:
retval = sctp_getsockopt_paddr_thresholds(sk, optval, len, optlen);
break;
case SCTP_GET_ASSOC_STATS:
retval = sctp_getsockopt_assoc_stats(sk, len, optval, optlen);
break;
default:
retval = -ENOPROTOOPT;
break;
}
sctp_release_sock(sk);
return retval;
}
static void sctp_hash(struct sock *sk)
{
/* STUB */
}
static void sctp_unhash(struct sock *sk)
{
/* STUB */
}
/* Check if port is acceptable. Possibly find first available port.
*
* The port hash table (contained in the 'global' SCTP protocol storage
* returned by struct sctp_protocol *sctp_get_protocol()). The hash
* table is an array of 4096 lists (sctp_bind_hashbucket). Each
* list (the list number is the port number hashed out, so as you
* would expect from a hash function, all the ports in a given list have
* such a number that hashes out to the same list number; you were
* expecting that, right?); so each list has a set of ports, with a
* link to the socket (struct sock) that uses it, the port number and
* a fastreuse flag (FIXME: NPI ipg).
*/
static struct sctp_bind_bucket *sctp_bucket_create(
struct sctp_bind_hashbucket *head, struct net *, unsigned short snum);
static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
{
struct sctp_bind_hashbucket *head; /* hash list */
struct sctp_bind_bucket *pp; /* hash list port iterator */
struct hlist_node *node;
unsigned short snum;
int ret;
snum = ntohs(addr->v4.sin_port);
SCTP_DEBUG_PRINTK("sctp_get_port() begins, snum=%d\n", snum);
sctp_local_bh_disable();
if (snum == 0) {
/* Search for an available port. */
int low, high, remaining, index;
unsigned int rover;
inet_get_local_port_range(&low, &high);
remaining = (high - low) + 1;
rover = net_random() % remaining + low;
do {
rover++;
if ((rover < low) || (rover > high))
rover = low;
if (inet_is_reserved_local_port(rover))
continue;
index = sctp_phashfn(sock_net(sk), rover);
head = &sctp_port_hashtable[index];
sctp_spin_lock(&head->lock);
sctp_for_each_hentry(pp, node, &head->chain)
if ((pp->port == rover) &&
net_eq(sock_net(sk), pp->net))
goto next;
break;
next:
sctp_spin_unlock(&head->lock);
} while (--remaining > 0);
/* Exhausted local port range during search? */
ret = 1;
if (remaining <= 0)
goto fail;
/* OK, here is the one we will use. HEAD (the port
* hash table list entry) is non-NULL and we hold it's
* mutex.
*/
snum = rover;
} else {
/* We are given an specific port number; we verify
* that it is not being used. If it is used, we will
* exahust the search in the hash list corresponding
* to the port number (snum) - we detect that with the
* port iterator, pp being NULL.
*/
head = &sctp_port_hashtable[sctp_phashfn(sock_net(sk), snum)];
sctp_spin_lock(&head->lock);
sctp_for_each_hentry(pp, node, &head->chain) {
if ((pp->port == snum) && net_eq(pp->net, sock_net(sk)))
goto pp_found;
}
}
pp = NULL;
goto pp_not_found;
pp_found:
if (!hlist_empty(&pp->owner)) {
/* We had a port hash table hit - there is an
* available port (pp != NULL) and it is being
* used by other socket (pp->owner not empty); that other
* socket is going to be sk2.
*/
int reuse = sk->sk_reuse;
struct sock *sk2;
SCTP_DEBUG_PRINTK("sctp_get_port() found a possible match\n");
if (pp->fastreuse && sk->sk_reuse &&
sk->sk_state != SCTP_SS_LISTENING)
goto success;
/* Run through the list of sockets bound to the port
* (pp->port) [via the pointers bind_next and
* bind_pprev in the struct sock *sk2 (pp->sk)]. On each one,
* we get the endpoint they describe and run through
* the endpoint's list of IP (v4 or v6) addresses,
* comparing each of the addresses with the address of
* the socket sk. If we find a match, then that means
* that this port/socket (sk) combination are already
* in an endpoint.
*/
sk_for_each_bound(sk2, node, &pp->owner) {
struct sctp_endpoint *ep2;
ep2 = sctp_sk(sk2)->ep;
if (sk == sk2 ||
(reuse && sk2->sk_reuse &&
sk2->sk_state != SCTP_SS_LISTENING))
continue;
if (sctp_bind_addr_conflict(&ep2->base.bind_addr, addr,
sctp_sk(sk2), sctp_sk(sk))) {
ret = (long)sk2;
goto fail_unlock;
}
}
SCTP_DEBUG_PRINTK("sctp_get_port(): Found a match\n");
}
pp_not_found:
/* If there was a hash table miss, create a new port. */
ret = 1;
if (!pp && !(pp = sctp_bucket_create(head, sock_net(sk), snum)))
goto fail_unlock;
/* In either case (hit or miss), make sure fastreuse is 1 only
* if sk->sk_reuse is too (that is, if the caller requested
* SO_REUSEADDR on this socket -sk-).
*/
if (hlist_empty(&pp->owner)) {
if (sk->sk_reuse && sk->sk_state != SCTP_SS_LISTENING)
pp->fastreuse = 1;
else
pp->fastreuse = 0;
} else if (pp->fastreuse &&
(!sk->sk_reuse || sk->sk_state == SCTP_SS_LISTENING))
pp->fastreuse = 0;
/* We are set, so fill up all the data in the hash table
* entry, tie the socket list information with the rest of the
* sockets FIXME: Blurry, NPI (ipg).
*/
success:
if (!sctp_sk(sk)->bind_hash) {
inet_sk(sk)->inet_num = snum;
sk_add_bind_node(sk, &pp->owner);
sctp_sk(sk)->bind_hash = pp;
}
ret = 0;
fail_unlock:
sctp_spin_unlock(&head->lock);
fail:
sctp_local_bh_enable();
return ret;
}
/* Assign a 'snum' port to the socket. If snum == 0, an ephemeral
* port is requested.
*/
static int sctp_get_port(struct sock *sk, unsigned short snum)
{
long ret;
union sctp_addr addr;
struct sctp_af *af = sctp_sk(sk)->pf->af;
/* Set up a dummy address struct from the sk. */
af->from_sk(&addr, sk);
addr.v4.sin_port = htons(snum);
/* Note: sk->sk_num gets filled in if ephemeral port request. */
ret = sctp_get_port_local(sk, &addr);
return ret ? 1 : 0;
}
/*
* Move a socket to LISTENING state.
*/
SCTP_STATIC int sctp_listen_start(struct sock *sk, int backlog)
{
struct sctp_sock *sp = sctp_sk(sk);
struct sctp_endpoint *ep = sp->ep;
struct crypto_hash *tfm = NULL;
char alg[32];
/* Allocate HMAC for generating cookie. */
if (!sp->hmac && sp->sctp_hmac_alg) {
sprintf(alg, "hmac(%s)", sp->sctp_hmac_alg);
tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(tfm)) {
net_info_ratelimited("failed to load transform for %s: %ld\n",
sp->sctp_hmac_alg, PTR_ERR(tfm));
return -ENOSYS;
}
sctp_sk(sk)->hmac = tfm;
}
/*
* If a bind() or sctp_bindx() is not called prior to a listen()
* call that allows new associations to be accepted, the system
* picks an ephemeral port and will choose an address set equivalent
* to binding with a wildcard address.
*
* This is not currently spelled out in the SCTP sockets
* extensions draft, but follows the practice as seen in TCP
* sockets.
*
*/
sk->sk_state = SCTP_SS_LISTENING;
if (!ep->base.bind_addr.port) {
if (sctp_autobind(sk))
return -EAGAIN;
} else {
if (sctp_get_port(sk, inet_sk(sk)->inet_num)) {
sk->sk_state = SCTP_SS_CLOSED;
return -EADDRINUSE;
}
}
sk->sk_max_ack_backlog = backlog;
sctp_hash_endpoint(ep);
return 0;
}
/*
* 4.1.3 / 5.1.3 listen()
*
* By default, new associations are not accepted for UDP style sockets.
* An application uses listen() to mark a socket as being able to
* accept new associations.
*
* On TCP style sockets, applications use listen() to ready the SCTP
* endpoint for accepting inbound associations.
*
* On both types of endpoints a backlog of '0' disables listening.
*
* Move a socket to LISTENING state.
*/
int sctp_inet_listen(struct socket *sock, int backlog)
{
struct sock *sk = sock->sk;
struct sctp_endpoint *ep = sctp_sk(sk)->ep;
int err = -EINVAL;
if (unlikely(backlog < 0))
return err;
sctp_lock_sock(sk);
/* Peeled-off sockets are not allowed to listen(). */
if (sctp_style(sk, UDP_HIGH_BANDWIDTH))
goto out;
if (sock->state != SS_UNCONNECTED)
goto out;
/* If backlog is zero, disable listening. */
if (!backlog) {
if (sctp_sstate(sk, CLOSED))
goto out;
err = 0;
sctp_unhash_endpoint(ep);
sk->sk_state = SCTP_SS_CLOSED;
if (sk->sk_reuse)
sctp_sk(sk)->bind_hash->fastreuse = 1;
goto out;
}
/* If we are already listening, just update the backlog */
if (sctp_sstate(sk, LISTENING))
sk->sk_max_ack_backlog = backlog;
else {
err = sctp_listen_start(sk, backlog);
if (err)
goto out;
}
err = 0;
out:
sctp_release_sock(sk);
return err;
}
/*
* This function is done by modeling the current datagram_poll() and the
* tcp_poll(). Note that, based on these implementations, we don't
* lock the socket in this function, even though it seems that,
* ideally, locking or some other mechanisms can be used to ensure
* the integrity of the counters (sndbuf and wmem_alloc) used
* in this place. We assume that we don't need locks either until proven
* otherwise.
*
* Another thing to note is that we include the Async I/O support
* here, again, by modeling the current TCP/UDP code. We don't have
* a good way to test with it yet.
*/
unsigned int sctp_poll(struct file *file, struct socket *sock, poll_table *wait)
{
struct sock *sk = sock->sk;
struct sctp_sock *sp = sctp_sk(sk);
unsigned int mask;
poll_wait(file, sk_sleep(sk), wait);
/* A TCP-style listening socket becomes readable when the accept queue
* is not empty.
*/
if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))
return (!list_empty(&sp->ep->asocs)) ?
(POLLIN | POLLRDNORM) : 0;
mask = 0;
/* Is there any exceptional events? */
if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
mask |= POLLERR;
if (sk->sk_shutdown & RCV_SHUTDOWN)
mask |= POLLRDHUP | POLLIN | POLLRDNORM;
if (sk->sk_shutdown == SHUTDOWN_MASK)
mask |= POLLHUP;
/* Is it readable? Reconsider this code with TCP-style support. */
if (!skb_queue_empty(&sk->sk_receive_queue))
mask |= POLLIN | POLLRDNORM;
/* The association is either gone or not ready. */
if (!sctp_style(sk, UDP) && sctp_sstate(sk, CLOSED))
return mask;
/* Is it writable? */
if (sctp_writeable(sk)) {
mask |= POLLOUT | POLLWRNORM;
} else {
set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
/*
* Since the socket is not locked, the buffer
* might be made available after the writeable check and
* before the bit is set. This could cause a lost I/O
* signal. tcp_poll() has a race breaker for this race
* condition. Based on their implementation, we put
* in the following code to cover it as well.
*/
if (sctp_writeable(sk))
mask |= POLLOUT | POLLWRNORM;
}
return mask;
}
/********************************************************************
* 2nd Level Abstractions
********************************************************************/
static struct sctp_bind_bucket *sctp_bucket_create(
struct sctp_bind_hashbucket *head, struct net *net, unsigned short snum)
{
struct sctp_bind_bucket *pp;
pp = kmem_cache_alloc(sctp_bucket_cachep, GFP_ATOMIC);
if (pp) {
SCTP_DBG_OBJCNT_INC(bind_bucket);
pp->port = snum;
pp->fastreuse = 0;
INIT_HLIST_HEAD(&pp->owner);
pp->net = net;
hlist_add_head(&pp->node, &head->chain);
}
return pp;
}
/* Caller must hold hashbucket lock for this tb with local BH disabled */
static void sctp_bucket_destroy(struct sctp_bind_bucket *pp)
{
if (pp && hlist_empty(&pp->owner)) {
__hlist_del(&pp->node);
kmem_cache_free(sctp_bucket_cachep, pp);
SCTP_DBG_OBJCNT_DEC(bind_bucket);
}
}
/* Release this socket's reference to a local port. */
static inline void __sctp_put_port(struct sock *sk)
{
struct sctp_bind_hashbucket *head =
&sctp_port_hashtable[sctp_phashfn(sock_net(sk),
inet_sk(sk)->inet_num)];
struct sctp_bind_bucket *pp;
sctp_spin_lock(&head->lock);
pp = sctp_sk(sk)->bind_hash;
__sk_del_bind_node(sk);
sctp_sk(sk)->bind_hash = NULL;
inet_sk(sk)->inet_num = 0;
sctp_bucket_destroy(pp);
sctp_spin_unlock(&head->lock);
}
void sctp_put_port(struct sock *sk)
{
sctp_local_bh_disable();
__sctp_put_port(sk);
sctp_local_bh_enable();
}
/*
* The system picks an ephemeral port and choose an address set equivalent
* to binding with a wildcard address.
* One of those addresses will be the primary address for the association.
* This automatically enables the multihoming capability of SCTP.
*/
static int sctp_autobind(struct sock *sk)
{
union sctp_addr autoaddr;
struct sctp_af *af;
__be16 port;
/* Initialize a local sockaddr structure to INADDR_ANY. */
af = sctp_sk(sk)->pf->af;
port = htons(inet_sk(sk)->inet_num);
af->inaddr_any(&autoaddr, port);
return sctp_do_bind(sk, &autoaddr, af->sockaddr_len);
}
/* Parse out IPPROTO_SCTP CMSG headers. Perform only minimal validation.
*
* From RFC 2292
* 4.2 The cmsghdr Structure *
*
* When ancillary data is sent or received, any number of ancillary data
* objects can be specified by the msg_control and msg_controllen members of
* the msghdr structure, because each object is preceded by
* a cmsghdr structure defining the object's length (the cmsg_len member).
* Historically Berkeley-derived implementations have passed only one object
* at a time, but this API allows multiple objects to be
* passed in a single call to sendmsg() or recvmsg(). The following example
* shows two ancillary data objects in a control buffer.
*
* |<--------------------------- msg_controllen -------------------------->|
* | |
*
* |<----- ancillary data object ----->|<----- ancillary data object ----->|
*
* |<---------- CMSG_SPACE() --------->|<---------- CMSG_SPACE() --------->|
* | | |
*
* |<---------- cmsg_len ---------->| |<--------- cmsg_len ----------->| |
*
* |<--------- CMSG_LEN() --------->| |<-------- CMSG_LEN() ---------->| |
* | | | | |
*
* +-----+-----+-----+--+-----------+--+-----+-----+-----+--+-----------+--+
* |cmsg_|cmsg_|cmsg_|XX| |XX|cmsg_|cmsg_|cmsg_|XX| |XX|
*
* |len |level|type |XX|cmsg_data[]|XX|len |level|type |XX|cmsg_data[]|XX|
*
* +-----+-----+-----+--+-----------+--+-----+-----+-----+--+-----------+--+
* ^
* |
*
* msg_control
* points here
*/
SCTP_STATIC int sctp_msghdr_parse(const struct msghdr *msg,
sctp_cmsgs_t *cmsgs)
{
struct cmsghdr *cmsg;
struct msghdr *my_msg = (struct msghdr *)msg;
for (cmsg = CMSG_FIRSTHDR(msg);
cmsg != NULL;
cmsg = CMSG_NXTHDR(my_msg, cmsg)) {
if (!CMSG_OK(my_msg, cmsg))
return -EINVAL;
/* Should we parse this header or ignore? */
if (cmsg->cmsg_level != IPPROTO_SCTP)
continue;
/* Strictly check lengths following example in SCM code. */
switch (cmsg->cmsg_type) {
case SCTP_INIT:
/* SCTP Socket API Extension
* 5.2.1 SCTP Initiation Structure (SCTP_INIT)
*
* This cmsghdr structure provides information for
* initializing new SCTP associations with sendmsg().
* The SCTP_INITMSG socket option uses this same data
* structure. This structure is not used for
* recvmsg().
*
* cmsg_level cmsg_type cmsg_data[]
* ------------ ------------ ----------------------
* IPPROTO_SCTP SCTP_INIT struct sctp_initmsg
*/
if (cmsg->cmsg_len !=
CMSG_LEN(sizeof(struct sctp_initmsg)))
return -EINVAL;
cmsgs->init = (struct sctp_initmsg *)CMSG_DATA(cmsg);
break;
case SCTP_SNDRCV:
/* SCTP Socket API Extension
* 5.2.2 SCTP Header Information Structure(SCTP_SNDRCV)
*
* This cmsghdr structure specifies SCTP options for
* sendmsg() and describes SCTP header information
* about a received message through recvmsg().
*
* cmsg_level cmsg_type cmsg_data[]
* ------------ ------------ ----------------------
* IPPROTO_SCTP SCTP_SNDRCV struct sctp_sndrcvinfo
*/
if (cmsg->cmsg_len !=
CMSG_LEN(sizeof(struct sctp_sndrcvinfo)))
return -EINVAL;
cmsgs->info =
(struct sctp_sndrcvinfo *)CMSG_DATA(cmsg);
/* Minimally, validate the sinfo_flags. */
if (cmsgs->info->sinfo_flags &
~(SCTP_UNORDERED | SCTP_ADDR_OVER |
SCTP_ABORT | SCTP_EOF))
return -EINVAL;
break;
default:
return -EINVAL;
}
}
return 0;
}
/*
* Wait for a packet..
* Note: This function is the same function as in core/datagram.c
* with a few modifications to make lksctp work.
*/
static int sctp_wait_for_packet(struct sock * sk, int *err, long *timeo_p)
{
int error;
DEFINE_WAIT(wait);
prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
/* Socket errors? */
error = sock_error(sk);
if (error)
goto out;
if (!skb_queue_empty(&sk->sk_receive_queue))
goto ready;
/* Socket shut down? */
if (sk->sk_shutdown & RCV_SHUTDOWN)
goto out;
/* Sequenced packets can come disconnected. If so we report the
* problem.
*/
error = -ENOTCONN;
/* Is there a good reason to think that we may receive some data? */
if (list_empty(&sctp_sk(sk)->ep->asocs) && !sctp_sstate(sk, LISTENING))
goto out;
/* Handle signals. */
if (signal_pending(current))
goto interrupted;
/* Let another process have a go. Since we are going to sleep
* anyway. Note: This may cause odd behaviors if the message
* does not fit in the user's buffer, but this seems to be the
* only way to honor MSG_DONTWAIT realistically.
*/
sctp_release_sock(sk);
*timeo_p = schedule_timeout(*timeo_p);
sctp_lock_sock(sk);
ready:
finish_wait(sk_sleep(sk), &wait);
return 0;
interrupted:
error = sock_intr_errno(*timeo_p);
out:
finish_wait(sk_sleep(sk), &wait);
*err = error;
return error;
}
/* Receive a datagram.
* Note: This is pretty much the same routine as in core/datagram.c
* with a few changes to make lksctp work.
*/
static struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags,
int noblock, int *err)
{
int error;
struct sk_buff *skb;
long timeo;
timeo = sock_rcvtimeo(sk, noblock);
SCTP_DEBUG_PRINTK("Timeout: timeo: %ld, MAX: %ld.\n",
timeo, MAX_SCHEDULE_TIMEOUT);
do {
/* Again only user level code calls this function,
* so nothing interrupt level
* will suddenly eat the receive_queue.
*
* Look at current nfs client by the way...
* However, this function was correct in any case. 8)
*/
if (flags & MSG_PEEK) {
spin_lock_bh(&sk->sk_receive_queue.lock);
skb = skb_peek(&sk->sk_receive_queue);
if (skb)
atomic_inc(&skb->users);
spin_unlock_bh(&sk->sk_receive_queue.lock);
} else {
skb = skb_dequeue(&sk->sk_receive_queue);
}
if (skb)
return skb;
/* Caller is allowed not to check sk->sk_err before calling. */
error = sock_error(sk);
if (error)
goto no_packet;
if (sk->sk_shutdown & RCV_SHUTDOWN)
break;
/* User doesn't want to wait. */
error = -EAGAIN;
if (!timeo)
goto no_packet;
} while (sctp_wait_for_packet(sk, err, &timeo) == 0);
return NULL;
no_packet:
*err = error;
return NULL;
}
/* If sndbuf has changed, wake up per association sndbuf waiters. */
static void __sctp_write_space(struct sctp_association *asoc)
{
struct sock *sk = asoc->base.sk;
struct socket *sock = sk->sk_socket;
if ((sctp_wspace(asoc) > 0) && sock) {
if (waitqueue_active(&asoc->wait))
wake_up_interruptible(&asoc->wait);
if (sctp_writeable(sk)) {
wait_queue_head_t *wq = sk_sleep(sk);
if (wq && waitqueue_active(wq))
wake_up_interruptible(wq);
/* Note that we try to include the Async I/O support
* here by modeling from the current TCP/UDP code.
* We have not tested with it yet.
*/
if (!(sk->sk_shutdown & SEND_SHUTDOWN))
sock_wake_async(sock,
SOCK_WAKE_SPACE, POLL_OUT);
}
}
}
/* Do accounting for the sndbuf space.
* Decrement the used sndbuf space of the corresponding association by the
* data size which was just transmitted(freed).
*/
static void sctp_wfree(struct sk_buff *skb)
{
struct sctp_association *asoc;
struct sctp_chunk *chunk;
struct sock *sk;
/* Get the saved chunk pointer. */
chunk = *((struct sctp_chunk **)(skb->cb));
asoc = chunk->asoc;
sk = asoc->base.sk;
asoc->sndbuf_used -= SCTP_DATA_SNDSIZE(chunk) +
sizeof(struct sk_buff) +
sizeof(struct sctp_chunk);
atomic_sub(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc);
/*
* This undoes what is done via sctp_set_owner_w and sk_mem_charge
*/
sk->sk_wmem_queued -= skb->truesize;
sk_mem_uncharge(sk, skb->truesize);
sock_wfree(skb);
__sctp_write_space(asoc);
sctp_association_put(asoc);
}
/* Do accounting for the receive space on the socket.
* Accounting for the association is done in ulpevent.c
* We set this as a destructor for the cloned data skbs so that
* accounting is done at the correct time.
*/
void sctp_sock_rfree(struct sk_buff *skb)
{
struct sock *sk = skb->sk;
struct sctp_ulpevent *event = sctp_skb2event(skb);
atomic_sub(event->rmem_len, &sk->sk_rmem_alloc);
/*
* Mimic the behavior of sock_rfree
*/
sk_mem_uncharge(sk, event->rmem_len);
}
/* Helper function to wait for space in the sndbuf. */
static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
size_t msg_len)
{
struct sock *sk = asoc->base.sk;
int err = 0;
long current_timeo = *timeo_p;
DEFINE_WAIT(wait);
SCTP_DEBUG_PRINTK("wait_for_sndbuf: asoc=%p, timeo=%ld, msg_len=%zu\n",
asoc, (long)(*timeo_p), msg_len);
/* Increment the association's refcnt. */
sctp_association_hold(asoc);
/* Wait on the association specific sndbuf space. */
for (;;) {
prepare_to_wait_exclusive(&asoc->wait, &wait,
TASK_INTERRUPTIBLE);
if (!*timeo_p)
goto do_nonblock;
if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING ||
asoc->base.dead)
goto do_error;
if (signal_pending(current))
goto do_interrupted;
if (msg_len <= sctp_wspace(asoc))
break;
/* Let another process have a go. Since we are going
* to sleep anyway.
*/
sctp_release_sock(sk);
current_timeo = schedule_timeout(current_timeo);
BUG_ON(sk != asoc->base.sk);
sctp_lock_sock(sk);
*timeo_p = current_timeo;
}
out:
finish_wait(&asoc->wait, &wait);
/* Release the association's refcnt. */
sctp_association_put(asoc);
return err;
do_error:
err = -EPIPE;
goto out;
do_interrupted:
err = sock_intr_errno(*timeo_p);
goto out;
do_nonblock:
err = -EAGAIN;
goto out;
}
void sctp_data_ready(struct sock *sk, int len)
{
struct socket_wq *wq;
rcu_read_lock();
wq = rcu_dereference(sk->sk_wq);
if (wq_has_sleeper(wq))
wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
POLLRDNORM | POLLRDBAND);
sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
rcu_read_unlock();
}
/* If socket sndbuf has changed, wake up all per association waiters. */
void sctp_write_space(struct sock *sk)
{
struct sctp_association *asoc;
/* Wake up the tasks in each wait queue. */
list_for_each_entry(asoc, &((sctp_sk(sk))->ep->asocs), asocs) {
__sctp_write_space(asoc);
}
}
/* Is there any sndbuf space available on the socket?
*
* Note that sk_wmem_alloc is the sum of the send buffers on all of the
* associations on the same socket. For a UDP-style socket with
* multiple associations, it is possible for it to be "unwriteable"
* prematurely. I assume that this is acceptable because
* a premature "unwriteable" is better than an accidental "writeable" which
* would cause an unwanted block under certain circumstances. For the 1-1
* UDP-style sockets or TCP-style sockets, this code should work.
* - Daisy
*/
static int sctp_writeable(struct sock *sk)
{
int amt = 0;
amt = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
if (amt < 0)
amt = 0;
return amt;
}
/* Wait for an association to go into ESTABLISHED state. If timeout is 0,
* returns immediately with EINPROGRESS.
*/
static int sctp_wait_for_connect(struct sctp_association *asoc, long *timeo_p)
{
struct sock *sk = asoc->base.sk;
int err = 0;
long current_timeo = *timeo_p;
DEFINE_WAIT(wait);
SCTP_DEBUG_PRINTK("%s: asoc=%p, timeo=%ld\n", __func__, asoc,
(long)(*timeo_p));
/* Increment the association's refcnt. */
sctp_association_hold(asoc);
for (;;) {
prepare_to_wait_exclusive(&asoc->wait, &wait,
TASK_INTERRUPTIBLE);
if (!*timeo_p)
goto do_nonblock;
if (sk->sk_shutdown & RCV_SHUTDOWN)
break;
if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING ||
asoc->base.dead)
goto do_error;
if (signal_pending(current))
goto do_interrupted;
if (sctp_state(asoc, ESTABLISHED))
break;
/* Let another process have a go. Since we are going
* to sleep anyway.
*/
sctp_release_sock(sk);
current_timeo = schedule_timeout(current_timeo);
sctp_lock_sock(sk);
*timeo_p = current_timeo;
}
out:
finish_wait(&asoc->wait, &wait);
/* Release the association's refcnt. */
sctp_association_put(asoc);
return err;
do_error:
if (asoc->init_err_counter + 1 > asoc->max_init_attempts)
err = -ETIMEDOUT;
else
err = -ECONNREFUSED;
goto out;
do_interrupted:
err = sock_intr_errno(*timeo_p);
goto out;
do_nonblock:
err = -EINPROGRESS;
goto out;
}
static int sctp_wait_for_accept(struct sock *sk, long timeo)
{
struct sctp_endpoint *ep;
int err = 0;
DEFINE_WAIT(wait);
ep = sctp_sk(sk)->ep;
for (;;) {
prepare_to_wait_exclusive(sk_sleep(sk), &wait,
TASK_INTERRUPTIBLE);
if (list_empty(&ep->asocs)) {
sctp_release_sock(sk);
timeo = schedule_timeout(timeo);
sctp_lock_sock(sk);
}
err = -EINVAL;
if (!sctp_sstate(sk, LISTENING))
break;
err = 0;
if (!list_empty(&ep->asocs))
break;
err = sock_intr_errno(timeo);
if (signal_pending(current))
break;
err = -EAGAIN;
if (!timeo)
break;
}
finish_wait(sk_sleep(sk), &wait);
return err;
}
static void sctp_wait_for_close(struct sock *sk, long timeout)
{
DEFINE_WAIT(wait);
do {
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
if (list_empty(&sctp_sk(sk)->ep->asocs))
break;
sctp_release_sock(sk);
timeout = schedule_timeout(timeout);
sctp_lock_sock(sk);
} while (!signal_pending(current) && timeout);
finish_wait(sk_sleep(sk), &wait);
}
static void sctp_skb_set_owner_r_frag(struct sk_buff *skb, struct sock *sk)
{
struct sk_buff *frag;
if (!skb->data_len)
goto done;
/* Don't forget the fragments. */
skb_walk_frags(skb, frag)
sctp_skb_set_owner_r_frag(frag, sk);
done:
sctp_skb_set_owner_r(skb, sk);
}
void sctp_copy_sock(struct sock *newsk, struct sock *sk,
struct sctp_association *asoc)
{
struct inet_sock *inet = inet_sk(sk);
struct inet_sock *newinet;
newsk->sk_type = sk->sk_type;
newsk->sk_bound_dev_if = sk->sk_bound_dev_if;
newsk->sk_flags = sk->sk_flags;
newsk->sk_no_check = sk->sk_no_check;
newsk->sk_reuse = sk->sk_reuse;
newsk->sk_shutdown = sk->sk_shutdown;
newsk->sk_destruct = inet_sock_destruct;
newsk->sk_family = sk->sk_family;
newsk->sk_protocol = IPPROTO_SCTP;
newsk->sk_backlog_rcv = sk->sk_prot->backlog_rcv;
newsk->sk_sndbuf = sk->sk_sndbuf;
newsk->sk_rcvbuf = sk->sk_rcvbuf;
newsk->sk_lingertime = sk->sk_lingertime;
newsk->sk_rcvtimeo = sk->sk_rcvtimeo;
newsk->sk_sndtimeo = sk->sk_sndtimeo;
newinet = inet_sk(newsk);
/* Initialize sk's sport, dport, rcv_saddr and daddr for
* getsockname() and getpeername()
*/
newinet->inet_sport = inet->inet_sport;
newinet->inet_saddr = inet->inet_saddr;
newinet->inet_rcv_saddr = inet->inet_rcv_saddr;
newinet->inet_dport = htons(asoc->peer.port);
newinet->pmtudisc = inet->pmtudisc;
newinet->inet_id = asoc->next_tsn ^ jiffies;
newinet->uc_ttl = inet->uc_ttl;
newinet->mc_loop = 1;
newinet->mc_ttl = 1;
newinet->mc_index = 0;
newinet->mc_list = NULL;
}
/* Populate the fields of the newsk from the oldsk and migrate the assoc
* and its messages to the newsk.
*/
static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
struct sctp_association *assoc,
sctp_socket_type_t type)
{
struct sctp_sock *oldsp = sctp_sk(oldsk);
struct sctp_sock *newsp = sctp_sk(newsk);
struct sctp_bind_bucket *pp; /* hash list port iterator */
struct sctp_endpoint *newep = newsp->ep;
struct sk_buff *skb, *tmp;
struct sctp_ulpevent *event;
struct sctp_bind_hashbucket *head;
struct list_head tmplist;
/* Migrate socket buffer sizes and all the socket level options to the
* new socket.
*/
newsk->sk_sndbuf = oldsk->sk_sndbuf;
newsk->sk_rcvbuf = oldsk->sk_rcvbuf;
/* Brute force copy old sctp opt. */
if (oldsp->do_auto_asconf) {
memcpy(&tmplist, &newsp->auto_asconf_list, sizeof(tmplist));
inet_sk_copy_descendant(newsk, oldsk);
memcpy(&newsp->auto_asconf_list, &tmplist, sizeof(tmplist));
} else
inet_sk_copy_descendant(newsk, oldsk);
/* Restore the ep value that was overwritten with the above structure
* copy.
*/
newsp->ep = newep;
newsp->hmac = NULL;
/* Hook this new socket in to the bind_hash list. */
head = &sctp_port_hashtable[sctp_phashfn(sock_net(oldsk),
inet_sk(oldsk)->inet_num)];
sctp_local_bh_disable();
sctp_spin_lock(&head->lock);
pp = sctp_sk(oldsk)->bind_hash;
sk_add_bind_node(newsk, &pp->owner);
sctp_sk(newsk)->bind_hash = pp;
inet_sk(newsk)->inet_num = inet_sk(oldsk)->inet_num;
sctp_spin_unlock(&head->lock);
sctp_local_bh_enable();
/* Copy the bind_addr list from the original endpoint to the new
* endpoint so that we can handle restarts properly
*/
sctp_bind_addr_dup(&newsp->ep->base.bind_addr,
&oldsp->ep->base.bind_addr, GFP_KERNEL);
/* Move any messages in the old socket's receive queue that are for the
* peeled off association to the new socket's receive queue.
*/
sctp_skb_for_each(skb, &oldsk->sk_receive_queue, tmp) {
event = sctp_skb2event(skb);
if (event->asoc == assoc) {
__skb_unlink(skb, &oldsk->sk_receive_queue);
__skb_queue_tail(&newsk->sk_receive_queue, skb);
sctp_skb_set_owner_r_frag(skb, newsk);
}
}
/* Clean up any messages pending delivery due to partial
* delivery. Three cases:
* 1) No partial deliver; no work.
* 2) Peeling off partial delivery; keep pd_lobby in new pd_lobby.
* 3) Peeling off non-partial delivery; move pd_lobby to receive_queue.
*/
skb_queue_head_init(&newsp->pd_lobby);
atomic_set(&sctp_sk(newsk)->pd_mode, assoc->ulpq.pd_mode);
if (atomic_read(&sctp_sk(oldsk)->pd_mode)) {
struct sk_buff_head *queue;
/* Decide which queue to move pd_lobby skbs to. */
if (assoc->ulpq.pd_mode) {
queue = &newsp->pd_lobby;
} else
queue = &newsk->sk_receive_queue;
/* Walk through the pd_lobby, looking for skbs that
* need moved to the new socket.
*/
sctp_skb_for_each(skb, &oldsp->pd_lobby, tmp) {
event = sctp_skb2event(skb);
if (event->asoc == assoc) {
__skb_unlink(skb, &oldsp->pd_lobby);
__skb_queue_tail(queue, skb);
sctp_skb_set_owner_r_frag(skb, newsk);
}
}
/* Clear up any skbs waiting for the partial
* delivery to finish.
*/
if (assoc->ulpq.pd_mode)
sctp_clear_pd(oldsk, NULL);
}
sctp_skb_for_each(skb, &assoc->ulpq.reasm, tmp)
sctp_skb_set_owner_r_frag(skb, newsk);
sctp_skb_for_each(skb, &assoc->ulpq.lobby, tmp)
sctp_skb_set_owner_r_frag(skb, newsk);
/* Set the type of socket to indicate that it is peeled off from the
* original UDP-style socket or created with the accept() call on a
* TCP-style socket..
*/
newsp->type = type;
/* Mark the new socket "in-use" by the user so that any packets
* that may arrive on the association after we've moved it are
* queued to the backlog. This prevents a potential race between
* backlog processing on the old socket and new-packet processing
* on the new socket.
*
* The caller has just allocated newsk so we can guarantee that other
* paths won't try to lock it and then oldsk.
*/
lock_sock_nested(newsk, SINGLE_DEPTH_NESTING);
sctp_assoc_migrate(assoc, newsk);
/* If the association on the newsk is already closed before accept()
* is called, set RCV_SHUTDOWN flag.
*/
if (sctp_state(assoc, CLOSED) && sctp_style(newsk, TCP))
newsk->sk_shutdown |= RCV_SHUTDOWN;
newsk->sk_state = SCTP_SS_ESTABLISHED;
sctp_release_sock(newsk);
}
/* This proto struct describes the ULP interface for SCTP. */
struct proto sctp_prot = {
.name = "SCTP",
.owner = THIS_MODULE,
.close = sctp_close,
.connect = sctp_connect,
.disconnect = sctp_disconnect,
.accept = sctp_accept,
.ioctl = sctp_ioctl,
.init = sctp_init_sock,
.destroy = sctp_destroy_sock,
.shutdown = sctp_shutdown,
.setsockopt = sctp_setsockopt,
.getsockopt = sctp_getsockopt,
.sendmsg = sctp_sendmsg,
.recvmsg = sctp_recvmsg,
.bind = sctp_bind,
.backlog_rcv = sctp_backlog_rcv,
.hash = sctp_hash,
.unhash = sctp_unhash,
.get_port = sctp_get_port,
.obj_size = sizeof(struct sctp_sock),
.sysctl_mem = sysctl_sctp_mem,
.sysctl_rmem = sysctl_sctp_rmem,
.sysctl_wmem = sysctl_sctp_wmem,
.memory_pressure = &sctp_memory_pressure,
.enter_memory_pressure = sctp_enter_memory_pressure,
.memory_allocated = &sctp_memory_allocated,
.sockets_allocated = &sctp_sockets_allocated,
};
#if IS_ENABLED(CONFIG_IPV6)
struct proto sctpv6_prot = {
.name = "SCTPv6",
.owner = THIS_MODULE,
.close = sctp_close,
.connect = sctp_connect,
.disconnect = sctp_disconnect,
.accept = sctp_accept,
.ioctl = sctp_ioctl,
.init = sctp_init_sock,
.destroy = sctp_destroy_sock,
.shutdown = sctp_shutdown,
.setsockopt = sctp_setsockopt,
.getsockopt = sctp_getsockopt,
.sendmsg = sctp_sendmsg,
.recvmsg = sctp_recvmsg,
.bind = sctp_bind,
.backlog_rcv = sctp_backlog_rcv,
.hash = sctp_hash,
.unhash = sctp_unhash,
.get_port = sctp_get_port,
.obj_size = sizeof(struct sctp6_sock),
.sysctl_mem = sysctl_sctp_mem,
.sysctl_rmem = sysctl_sctp_rmem,
.sysctl_wmem = sysctl_sctp_wmem,
.memory_pressure = &sctp_memory_pressure,
.enter_memory_pressure = sctp_enter_memory_pressure,
.memory_allocated = &sctp_memory_allocated,
.sockets_allocated = &sctp_sockets_allocated,
};
#endif /* IS_ENABLED(CONFIG_IPV6) */
| ./CrossVul/dataset_final_sorted/CWE-20/c/good_5603_0 |
crossvul-cpp_data_good_364_2 | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% PPPP IIIII CCCC TTTTT %
% P P I C T %
% PPPP I C T %
% P I C T %
% P IIIII CCCC T %
% %
% %
% Read/Write Apple Macintosh QuickDraw/PICT Format %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colormap-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/constitute.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/profile.h"
#include "MagickCore/resource_.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/static.h"
#include "MagickCore/string_.h"
#include "MagickCore/module.h"
#include "MagickCore/transform.h"
#include "MagickCore/utility.h"
/*
ImageMagick Macintosh PICT Methods.
*/
typedef struct _PICTCode
{
const char
*name;
ssize_t
length;
const char
*description;
} PICTCode;
typedef struct _PICTPixmap
{
short
version,
pack_type;
size_t
pack_size,
horizontal_resolution,
vertical_resolution;
short
pixel_type,
bits_per_pixel,
component_count,
component_size;
size_t
plane_bytes,
table,
reserved;
} PICTPixmap;
typedef struct _PICTRectangle
{
short
top,
left,
bottom,
right;
} PICTRectangle;
static const PICTCode
codes[] =
{
/* 0x00 */ { "NOP", 0, "nop" },
/* 0x01 */ { "Clip", 0, "clip" },
/* 0x02 */ { "BkPat", 8, "background pattern" },
/* 0x03 */ { "TxFont", 2, "text font (word)" },
/* 0x04 */ { "TxFace", 1, "text face (byte)" },
/* 0x05 */ { "TxMode", 2, "text mode (word)" },
/* 0x06 */ { "SpExtra", 4, "space extra (fixed point)" },
/* 0x07 */ { "PnSize", 4, "pen size (point)" },
/* 0x08 */ { "PnMode", 2, "pen mode (word)" },
/* 0x09 */ { "PnPat", 8, "pen pattern" },
/* 0x0a */ { "FillPat", 8, "fill pattern" },
/* 0x0b */ { "OvSize", 4, "oval size (point)" },
/* 0x0c */ { "Origin", 4, "dh, dv (word)" },
/* 0x0d */ { "TxSize", 2, "text size (word)" },
/* 0x0e */ { "FgColor", 4, "foreground color (ssize_tword)" },
/* 0x0f */ { "BkColor", 4, "background color (ssize_tword)" },
/* 0x10 */ { "TxRatio", 8, "numerator (point), denominator (point)" },
/* 0x11 */ { "Version", 1, "version (byte)" },
/* 0x12 */ { "BkPixPat", 0, "color background pattern" },
/* 0x13 */ { "PnPixPat", 0, "color pen pattern" },
/* 0x14 */ { "FillPixPat", 0, "color fill pattern" },
/* 0x15 */ { "PnLocHFrac", 2, "fractional pen position" },
/* 0x16 */ { "ChExtra", 2, "extra for each character" },
/* 0x17 */ { "reserved", 0, "reserved for Apple use" },
/* 0x18 */ { "reserved", 0, "reserved for Apple use" },
/* 0x19 */ { "reserved", 0, "reserved for Apple use" },
/* 0x1a */ { "RGBFgCol", 6, "RGB foreColor" },
/* 0x1b */ { "RGBBkCol", 6, "RGB backColor" },
/* 0x1c */ { "HiliteMode", 0, "hilite mode flag" },
/* 0x1d */ { "HiliteColor", 6, "RGB hilite color" },
/* 0x1e */ { "DefHilite", 0, "Use default hilite color" },
/* 0x1f */ { "OpColor", 6, "RGB OpColor for arithmetic modes" },
/* 0x20 */ { "Line", 8, "pnLoc (point), newPt (point)" },
/* 0x21 */ { "LineFrom", 4, "newPt (point)" },
/* 0x22 */ { "ShortLine", 6, "pnLoc (point, dh, dv (-128 .. 127))" },
/* 0x23 */ { "ShortLineFrom", 2, "dh, dv (-128 .. 127)" },
/* 0x24 */ { "reserved", -1, "reserved for Apple use" },
/* 0x25 */ { "reserved", -1, "reserved for Apple use" },
/* 0x26 */ { "reserved", -1, "reserved for Apple use" },
/* 0x27 */ { "reserved", -1, "reserved for Apple use" },
/* 0x28 */ { "LongText", 0, "txLoc (point), count (0..255), text" },
/* 0x29 */ { "DHText", 0, "dh (0..255), count (0..255), text" },
/* 0x2a */ { "DVText", 0, "dv (0..255), count (0..255), text" },
/* 0x2b */ { "DHDVText", 0, "dh, dv (0..255), count (0..255), text" },
/* 0x2c */ { "reserved", -1, "reserved for Apple use" },
/* 0x2d */ { "reserved", -1, "reserved for Apple use" },
/* 0x2e */ { "reserved", -1, "reserved for Apple use" },
/* 0x2f */ { "reserved", -1, "reserved for Apple use" },
/* 0x30 */ { "frameRect", 8, "rect" },
/* 0x31 */ { "paintRect", 8, "rect" },
/* 0x32 */ { "eraseRect", 8, "rect" },
/* 0x33 */ { "invertRect", 8, "rect" },
/* 0x34 */ { "fillRect", 8, "rect" },
/* 0x35 */ { "reserved", 8, "reserved for Apple use" },
/* 0x36 */ { "reserved", 8, "reserved for Apple use" },
/* 0x37 */ { "reserved", 8, "reserved for Apple use" },
/* 0x38 */ { "frameSameRect", 0, "rect" },
/* 0x39 */ { "paintSameRect", 0, "rect" },
/* 0x3a */ { "eraseSameRect", 0, "rect" },
/* 0x3b */ { "invertSameRect", 0, "rect" },
/* 0x3c */ { "fillSameRect", 0, "rect" },
/* 0x3d */ { "reserved", 0, "reserved for Apple use" },
/* 0x3e */ { "reserved", 0, "reserved for Apple use" },
/* 0x3f */ { "reserved", 0, "reserved for Apple use" },
/* 0x40 */ { "frameRRect", 8, "rect" },
/* 0x41 */ { "paintRRect", 8, "rect" },
/* 0x42 */ { "eraseRRect", 8, "rect" },
/* 0x43 */ { "invertRRect", 8, "rect" },
/* 0x44 */ { "fillRRrect", 8, "rect" },
/* 0x45 */ { "reserved", 8, "reserved for Apple use" },
/* 0x46 */ { "reserved", 8, "reserved for Apple use" },
/* 0x47 */ { "reserved", 8, "reserved for Apple use" },
/* 0x48 */ { "frameSameRRect", 0, "rect" },
/* 0x49 */ { "paintSameRRect", 0, "rect" },
/* 0x4a */ { "eraseSameRRect", 0, "rect" },
/* 0x4b */ { "invertSameRRect", 0, "rect" },
/* 0x4c */ { "fillSameRRect", 0, "rect" },
/* 0x4d */ { "reserved", 0, "reserved for Apple use" },
/* 0x4e */ { "reserved", 0, "reserved for Apple use" },
/* 0x4f */ { "reserved", 0, "reserved for Apple use" },
/* 0x50 */ { "frameOval", 8, "rect" },
/* 0x51 */ { "paintOval", 8, "rect" },
/* 0x52 */ { "eraseOval", 8, "rect" },
/* 0x53 */ { "invertOval", 8, "rect" },
/* 0x54 */ { "fillOval", 8, "rect" },
/* 0x55 */ { "reserved", 8, "reserved for Apple use" },
/* 0x56 */ { "reserved", 8, "reserved for Apple use" },
/* 0x57 */ { "reserved", 8, "reserved for Apple use" },
/* 0x58 */ { "frameSameOval", 0, "rect" },
/* 0x59 */ { "paintSameOval", 0, "rect" },
/* 0x5a */ { "eraseSameOval", 0, "rect" },
/* 0x5b */ { "invertSameOval", 0, "rect" },
/* 0x5c */ { "fillSameOval", 0, "rect" },
/* 0x5d */ { "reserved", 0, "reserved for Apple use" },
/* 0x5e */ { "reserved", 0, "reserved for Apple use" },
/* 0x5f */ { "reserved", 0, "reserved for Apple use" },
/* 0x60 */ { "frameArc", 12, "rect, startAngle, arcAngle" },
/* 0x61 */ { "paintArc", 12, "rect, startAngle, arcAngle" },
/* 0x62 */ { "eraseArc", 12, "rect, startAngle, arcAngle" },
/* 0x63 */ { "invertArc", 12, "rect, startAngle, arcAngle" },
/* 0x64 */ { "fillArc", 12, "rect, startAngle, arcAngle" },
/* 0x65 */ { "reserved", 12, "reserved for Apple use" },
/* 0x66 */ { "reserved", 12, "reserved for Apple use" },
/* 0x67 */ { "reserved", 12, "reserved for Apple use" },
/* 0x68 */ { "frameSameArc", 4, "rect, startAngle, arcAngle" },
/* 0x69 */ { "paintSameArc", 4, "rect, startAngle, arcAngle" },
/* 0x6a */ { "eraseSameArc", 4, "rect, startAngle, arcAngle" },
/* 0x6b */ { "invertSameArc", 4, "rect, startAngle, arcAngle" },
/* 0x6c */ { "fillSameArc", 4, "rect, startAngle, arcAngle" },
/* 0x6d */ { "reserved", 4, "reserved for Apple use" },
/* 0x6e */ { "reserved", 4, "reserved for Apple use" },
/* 0x6f */ { "reserved", 4, "reserved for Apple use" },
/* 0x70 */ { "framePoly", 0, "poly" },
/* 0x71 */ { "paintPoly", 0, "poly" },
/* 0x72 */ { "erasePoly", 0, "poly" },
/* 0x73 */ { "invertPoly", 0, "poly" },
/* 0x74 */ { "fillPoly", 0, "poly" },
/* 0x75 */ { "reserved", 0, "reserved for Apple use" },
/* 0x76 */ { "reserved", 0, "reserved for Apple use" },
/* 0x77 */ { "reserved", 0, "reserved for Apple use" },
/* 0x78 */ { "frameSamePoly", 0, "poly (NYI)" },
/* 0x79 */ { "paintSamePoly", 0, "poly (NYI)" },
/* 0x7a */ { "eraseSamePoly", 0, "poly (NYI)" },
/* 0x7b */ { "invertSamePoly", 0, "poly (NYI)" },
/* 0x7c */ { "fillSamePoly", 0, "poly (NYI)" },
/* 0x7d */ { "reserved", 0, "reserved for Apple use" },
/* 0x7e */ { "reserved", 0, "reserved for Apple use" },
/* 0x7f */ { "reserved", 0, "reserved for Apple use" },
/* 0x80 */ { "frameRgn", 0, "region" },
/* 0x81 */ { "paintRgn", 0, "region" },
/* 0x82 */ { "eraseRgn", 0, "region" },
/* 0x83 */ { "invertRgn", 0, "region" },
/* 0x84 */ { "fillRgn", 0, "region" },
/* 0x85 */ { "reserved", 0, "reserved for Apple use" },
/* 0x86 */ { "reserved", 0, "reserved for Apple use" },
/* 0x87 */ { "reserved", 0, "reserved for Apple use" },
/* 0x88 */ { "frameSameRgn", 0, "region (NYI)" },
/* 0x89 */ { "paintSameRgn", 0, "region (NYI)" },
/* 0x8a */ { "eraseSameRgn", 0, "region (NYI)" },
/* 0x8b */ { "invertSameRgn", 0, "region (NYI)" },
/* 0x8c */ { "fillSameRgn", 0, "region (NYI)" },
/* 0x8d */ { "reserved", 0, "reserved for Apple use" },
/* 0x8e */ { "reserved", 0, "reserved for Apple use" },
/* 0x8f */ { "reserved", 0, "reserved for Apple use" },
/* 0x90 */ { "BitsRect", 0, "copybits, rect clipped" },
/* 0x91 */ { "BitsRgn", 0, "copybits, rgn clipped" },
/* 0x92 */ { "reserved", -1, "reserved for Apple use" },
/* 0x93 */ { "reserved", -1, "reserved for Apple use" },
/* 0x94 */ { "reserved", -1, "reserved for Apple use" },
/* 0x95 */ { "reserved", -1, "reserved for Apple use" },
/* 0x96 */ { "reserved", -1, "reserved for Apple use" },
/* 0x97 */ { "reserved", -1, "reserved for Apple use" },
/* 0x98 */ { "PackBitsRect", 0, "packed copybits, rect clipped" },
/* 0x99 */ { "PackBitsRgn", 0, "packed copybits, rgn clipped" },
/* 0x9a */ { "DirectBitsRect", 0, "PixMap, srcRect, dstRect, mode, PixData" },
/* 0x9b */ { "DirectBitsRgn", 0, "PixMap, srcRect, dstRect, mode, maskRgn, PixData" },
/* 0x9c */ { "reserved", -1, "reserved for Apple use" },
/* 0x9d */ { "reserved", -1, "reserved for Apple use" },
/* 0x9e */ { "reserved", -1, "reserved for Apple use" },
/* 0x9f */ { "reserved", -1, "reserved for Apple use" },
/* 0xa0 */ { "ShortComment", 2, "kind (word)" },
/* 0xa1 */ { "LongComment", 0, "kind (word), size (word), data" }
};
/*
Forward declarations.
*/
static MagickBooleanType
WritePICTImage(const ImageInfo *,Image *,ExceptionInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e c o d e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DecodeImage decompresses an image via Macintosh pack bits decoding for
% Macintosh PICT images.
%
% The format of the DecodeImage method is:
%
% unsigned char *DecodeImage(Image *blob,Image *image,
% size_t bytes_per_line,const int bits_per_pixel,
% unsigned size_t extent)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o blob,image: the address of a structure of type Image.
%
% o bytes_per_line: This integer identifies the number of bytes in a
% scanline.
%
% o bits_per_pixel: the number of bits in a pixel.
%
% o extent: the number of pixels allocated.
%
*/
static unsigned char *ExpandBuffer(unsigned char *pixels,
MagickSizeType *bytes_per_line,const unsigned int bits_per_pixel)
{
register ssize_t
i;
register unsigned char
*p,
*q;
static unsigned char
scanline[8*256];
p=pixels;
q=scanline;
switch (bits_per_pixel)
{
case 8:
case 16:
case 32:
return(pixels);
case 4:
{
for (i=0; i < (ssize_t) *bytes_per_line; i++)
{
*q++=(*p >> 4) & 0xff;
*q++=(*p & 15);
p++;
}
*bytes_per_line*=2;
break;
}
case 2:
{
for (i=0; i < (ssize_t) *bytes_per_line; i++)
{
*q++=(*p >> 6) & 0x03;
*q++=(*p >> 4) & 0x03;
*q++=(*p >> 2) & 0x03;
*q++=(*p & 3);
p++;
}
*bytes_per_line*=4;
break;
}
case 1:
{
for (i=0; i < (ssize_t) *bytes_per_line; i++)
{
*q++=(*p >> 7) & 0x01;
*q++=(*p >> 6) & 0x01;
*q++=(*p >> 5) & 0x01;
*q++=(*p >> 4) & 0x01;
*q++=(*p >> 3) & 0x01;
*q++=(*p >> 2) & 0x01;
*q++=(*p >> 1) & 0x01;
*q++=(*p & 0x01);
p++;
}
*bytes_per_line*=8;
break;
}
default:
break;
}
return(scanline);
}
static unsigned char *DecodeImage(Image *blob,Image *image,
size_t bytes_per_line,const unsigned int bits_per_pixel,size_t *extent)
{
MagickBooleanType
status;
MagickSizeType
number_pixels;
register ssize_t
i;
register unsigned char
*p,
*q;
size_t
bytes_per_pixel,
length,
row_bytes,
scanline_length,
width;
ssize_t
count,
j,
y;
unsigned char
*pixels,
*scanline;
/*
Determine pixel buffer size.
*/
if (bits_per_pixel <= 8)
bytes_per_line&=0x7fff;
width=image->columns;
bytes_per_pixel=1;
if (bits_per_pixel == 16)
{
bytes_per_pixel=2;
width*=2;
}
else
if (bits_per_pixel == 32)
width*=image->alpha_trait ? 4 : 3;
if (bytes_per_line == 0)
bytes_per_line=width;
row_bytes=(size_t) (image->columns | 0x8000);
if (image->storage_class == DirectClass)
row_bytes=(size_t) ((4*image->columns) | 0x8000);
/*
Allocate pixel and scanline buffer.
*/
pixels=(unsigned char *) AcquireQuantumMemory(image->rows,row_bytes*
sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
return((unsigned char *) NULL);
*extent=row_bytes*image->rows*sizeof(*pixels);
(void) memset(pixels,0,*extent);
scanline=(unsigned char *) AcquireQuantumMemory(row_bytes,2*
sizeof(*scanline));
if (scanline == (unsigned char *) NULL)
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return((unsigned char *) NULL);
}
(void) memset(scanline,0,2*row_bytes*sizeof(*scanline));
status=MagickTrue;
if (bytes_per_line < 8)
{
/*
Pixels are already uncompressed.
*/
for (y=0; y < (ssize_t) image->rows; y++)
{
q=pixels+y*width*GetPixelChannels(image);
number_pixels=bytes_per_line;
count=ReadBlob(blob,(size_t) number_pixels,scanline);
if (count != (ssize_t) number_pixels)
{
status=MagickFalse;
break;
}
p=ExpandBuffer(scanline,&number_pixels,bits_per_pixel);
if ((q+number_pixels) > (pixels+(*extent)))
{
status=MagickFalse;
break;
}
(void) memcpy(q,p,(size_t) number_pixels);
}
scanline=(unsigned char *) RelinquishMagickMemory(scanline);
if (status == MagickFalse)
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(pixels);
}
/*
Uncompress RLE pixels into uncompressed pixel buffer.
*/
for (y=0; y < (ssize_t) image->rows; y++)
{
q=pixels+y*width;
if (bytes_per_line > 200)
scanline_length=ReadBlobMSBShort(blob);
else
scanline_length=(size_t) ReadBlobByte(blob);
if ((scanline_length >= row_bytes) || (scanline_length == 0))
{
status=MagickFalse;
break;
}
count=ReadBlob(blob,scanline_length,scanline);
if (count != (ssize_t) scanline_length)
{
status=MagickFalse;
break;
}
for (j=0; j < (ssize_t) scanline_length; )
if ((scanline[j] & 0x80) == 0)
{
length=(size_t) ((scanline[j] & 0xff)+1);
number_pixels=length*bytes_per_pixel;
p=ExpandBuffer(scanline+j+1,&number_pixels,bits_per_pixel);
if ((q-pixels+number_pixels) <= *extent)
(void) memcpy(q,p,(size_t) number_pixels);
q+=number_pixels;
j+=(ssize_t) (length*bytes_per_pixel+1);
}
else
{
length=(size_t) (((scanline[j] ^ 0xff) & 0xff)+2);
number_pixels=bytes_per_pixel;
p=ExpandBuffer(scanline+j+1,&number_pixels,bits_per_pixel);
for (i=0; i < (ssize_t) length; i++)
{
if ((q-pixels+number_pixels) <= *extent)
(void) memcpy(q,p,(size_t) number_pixels);
q+=number_pixels;
}
j+=(ssize_t) bytes_per_pixel+1;
}
}
scanline=(unsigned char *) RelinquishMagickMemory(scanline);
if (status == MagickFalse)
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E n c o d e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EncodeImage compresses an image via Macintosh pack bits encoding
% for Macintosh PICT images.
%
% The format of the EncodeImage method is:
%
% size_t EncodeImage(Image *image,const unsigned char *scanline,
% const size_t bytes_per_line,unsigned char *pixels)
%
% A description of each parameter follows:
%
% o image: the address of a structure of type Image.
%
% o scanline: A pointer to an array of characters to pack.
%
% o bytes_per_line: the number of bytes in a scanline.
%
% o pixels: A pointer to an array of characters where the packed
% characters are stored.
%
*/
static size_t EncodeImage(Image *image,const unsigned char *scanline,
const size_t bytes_per_line,unsigned char *pixels)
{
#define MaxCount 128
#define MaxPackbitsRunlength 128
register const unsigned char
*p;
register ssize_t
i;
register unsigned char
*q;
size_t
length;
ssize_t
count,
repeat_count,
runlength;
unsigned char
index;
/*
Pack scanline.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(scanline != (unsigned char *) NULL);
assert(pixels != (unsigned char *) NULL);
count=0;
runlength=0;
p=scanline+(bytes_per_line-1);
q=pixels;
index=(*p);
for (i=(ssize_t) bytes_per_line-1; i >= 0; i--)
{
if (index == *p)
runlength++;
else
{
if (runlength < 3)
while (runlength > 0)
{
*q++=(unsigned char) index;
runlength--;
count++;
if (count == MaxCount)
{
*q++=(unsigned char) (MaxCount-1);
count-=MaxCount;
}
}
else
{
if (count > 0)
*q++=(unsigned char) (count-1);
count=0;
while (runlength > 0)
{
repeat_count=runlength;
if (repeat_count > MaxPackbitsRunlength)
repeat_count=MaxPackbitsRunlength;
*q++=(unsigned char) index;
*q++=(unsigned char) (257-repeat_count);
runlength-=repeat_count;
}
}
runlength=1;
}
index=(*p);
p--;
}
if (runlength < 3)
while (runlength > 0)
{
*q++=(unsigned char) index;
runlength--;
count++;
if (count == MaxCount)
{
*q++=(unsigned char) (MaxCount-1);
count-=MaxCount;
}
}
else
{
if (count > 0)
*q++=(unsigned char) (count-1);
count=0;
while (runlength > 0)
{
repeat_count=runlength;
if (repeat_count > MaxPackbitsRunlength)
repeat_count=MaxPackbitsRunlength;
*q++=(unsigned char) index;
*q++=(unsigned char) (257-repeat_count);
runlength-=repeat_count;
}
}
if (count > 0)
*q++=(unsigned char) (count-1);
/*
Write the number of and the packed length.
*/
length=(size_t) (q-pixels);
if (bytes_per_line > 200)
{
(void) WriteBlobMSBShort(image,(unsigned short) length);
length+=2;
}
else
{
(void) WriteBlobByte(image,(unsigned char) length);
length++;
}
while (q != pixels)
{
q--;
(void) WriteBlobByte(image,*q);
}
return(length);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s P I C T %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsPICT()() returns MagickTrue if the image format type, identified by the
% magick string, is PICT.
%
% The format of the ReadPICTImage method is:
%
% MagickBooleanType IsPICT(const unsigned char *magick,const size_t length)
%
% A description of each parameter follows:
%
% o magick: compare image format pattern against these bytes.
%
% o length: Specifies the length of the magick string.
%
*/
static MagickBooleanType IsPICT(const unsigned char *magick,const size_t length)
{
if (length < 12)
return(MagickFalse);
/*
Embedded OLE2 macintosh have "PICT" instead of 512 platform header.
*/
if (memcmp(magick,"PICT",4) == 0)
return(MagickTrue);
if (length < 528)
return(MagickFalse);
if (memcmp(magick+522,"\000\021\002\377\014\000",6) == 0)
return(MagickTrue);
return(MagickFalse);
}
#if !defined(macintosh)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d P I C T I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPICTImage() reads an Apple Macintosh QuickDraw/PICT image file
% and returns it. It allocates the memory necessary for the new Image
% structure and returns a pointer to the new image.
%
% The format of the ReadPICTImage method is:
%
% Image *ReadPICTImage(const ImageInfo *image_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType ReadPixmap(Image *image,PICTPixmap *pixmap)
{
pixmap->version=(short) ReadBlobMSBShort(image);
pixmap->pack_type=(short) ReadBlobMSBShort(image);
pixmap->pack_size=ReadBlobMSBLong(image);
pixmap->horizontal_resolution=1UL*ReadBlobMSBShort(image);
(void) ReadBlobMSBShort(image);
pixmap->vertical_resolution=1UL*ReadBlobMSBShort(image);
(void) ReadBlobMSBShort(image);
pixmap->pixel_type=(short) ReadBlobMSBShort(image);
pixmap->bits_per_pixel=(short) ReadBlobMSBShort(image);
pixmap->component_count=(short) ReadBlobMSBShort(image);
pixmap->component_size=(short) ReadBlobMSBShort(image);
pixmap->plane_bytes=ReadBlobMSBLong(image);
pixmap->table=ReadBlobMSBLong(image);
pixmap->reserved=ReadBlobMSBLong(image);
if ((EOFBlob(image) != MagickFalse) || (pixmap->bits_per_pixel <= 0) ||
(pixmap->bits_per_pixel > 32) || (pixmap->component_count <= 0) ||
(pixmap->component_count > 4) || (pixmap->component_size <= 0))
return(MagickFalse);
return(MagickTrue);
}
static MagickBooleanType ReadRectangle(Image *image,PICTRectangle *rectangle)
{
rectangle->top=(short) ReadBlobMSBShort(image);
rectangle->left=(short) ReadBlobMSBShort(image);
rectangle->bottom=(short) ReadBlobMSBShort(image);
rectangle->right=(short) ReadBlobMSBShort(image);
if ((EOFBlob(image) != MagickFalse) ||
((rectangle->bottom-rectangle->top) <= 0) ||
((rectangle->right-rectangle->left) <= 0))
return(MagickFalse);
return(MagickTrue);
}
static Image *ReadPICTImage(const ImageInfo *image_info,
ExceptionInfo *exception)
{
#define ThrowPICTException(exception,message) \
{ \
if (tile_image != (Image *) NULL) \
tile_image=DestroyImage(tile_image); \
if (read_info != (ImageInfo *) NULL) \
read_info=DestroyImageInfo(read_info); \
ThrowReaderException((exception),(message)); \
}
char
geometry[MagickPathExtent],
header_ole[4];
Image
*image,
*tile_image;
ImageInfo
*read_info;
int
c,
code;
MagickBooleanType
jpeg,
status;
PICTRectangle
frame;
PICTPixmap
pixmap;
Quantum
index;
register Quantum
*q;
register ssize_t
i,
x;
size_t
extent,
length;
ssize_t
count,
flags,
j,
version,
y;
StringInfo
*profile;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImage(image_info,exception);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Read PICT header.
*/
read_info=(ImageInfo *) NULL;
tile_image=(Image *) NULL;
pixmap.bits_per_pixel=0;
pixmap.component_count=0;
/*
Skip header : 512 for standard PICT and 4, ie "PICT" for OLE2.
*/
header_ole[0]=ReadBlobByte(image);
header_ole[1]=ReadBlobByte(image);
header_ole[2]=ReadBlobByte(image);
header_ole[3]=ReadBlobByte(image);
if (!((header_ole[0] == 0x50) && (header_ole[1] == 0x49) &&
(header_ole[2] == 0x43) && (header_ole[3] == 0x54 )))
for (i=0; i < 508; i++)
if (ReadBlobByte(image) == EOF)
break;
(void) ReadBlobMSBShort(image); /* skip picture size */
if (ReadRectangle(image,&frame) == MagickFalse)
ThrowPICTException(CorruptImageError,"ImproperImageHeader");
while ((c=ReadBlobByte(image)) == 0) ;
if (c != 0x11)
ThrowPICTException(CorruptImageError,"ImproperImageHeader");
version=(ssize_t) ReadBlobByte(image);
if (version == 2)
{
c=ReadBlobByte(image);
if (c != 0xff)
ThrowPICTException(CorruptImageError,"ImproperImageHeader");
}
else
if (version != 1)
ThrowPICTException(CorruptImageError,"ImproperImageHeader");
if ((frame.left < 0) || (frame.right < 0) || (frame.top < 0) ||
(frame.bottom < 0) || (frame.left >= frame.right) ||
(frame.top >= frame.bottom))
ThrowPICTException(CorruptImageError,"ImproperImageHeader");
/*
Create black canvas.
*/
flags=0;
image->depth=8;
image->columns=(size_t) (frame.right-frame.left);
image->rows=(size_t) (frame.bottom-frame.top);
image->resolution.x=DefaultResolution;
image->resolution.y=DefaultResolution;
image->units=UndefinedResolution;
if ((image_info->ping != MagickFalse) && (image_info->number_scenes != 0))
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
{
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status != MagickFalse)
status=ResetImagePixels(image,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
/*
Interpret PICT opcodes.
*/
jpeg=MagickFalse;
for (code=0; EOFBlob(image) == MagickFalse; )
{
if ((image_info->ping != MagickFalse) && (image_info->number_scenes != 0))
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
if ((version == 1) || ((TellBlob(image) % 2) != 0))
code=ReadBlobByte(image);
if (version == 2)
code=ReadBlobMSBSignedShort(image);
if (code < 0)
break;
if (code == 0)
continue;
if (code > 0xa1)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),"%04X:",code);
}
else
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" %04X %s: %s",code,codes[code].name,codes[code].description);
switch (code)
{
case 0x01:
{
/*
Clipping rectangle.
*/
length=ReadBlobMSBShort(image);
if (length != 0x000a)
{
for (i=0; i < (ssize_t) (length-2); i++)
if (ReadBlobByte(image) == EOF)
break;
break;
}
if (ReadRectangle(image,&frame) == MagickFalse)
ThrowPICTException(CorruptImageError,"ImproperImageHeader");
if (((frame.left & 0x8000) != 0) || ((frame.top & 0x8000) != 0))
break;
image->columns=(size_t) (frame.right-frame.left);
image->rows=(size_t) (frame.bottom-frame.top);
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status != MagickFalse)
status=ResetImagePixels(image,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
break;
}
case 0x12:
case 0x13:
case 0x14:
{
ssize_t
pattern;
size_t
height,
width;
/*
Skip pattern definition.
*/
pattern=(ssize_t) ReadBlobMSBShort(image);
for (i=0; i < 8; i++)
if (ReadBlobByte(image) == EOF)
break;
if (pattern == 2)
{
for (i=0; i < 5; i++)
if (ReadBlobByte(image) == EOF)
break;
break;
}
if (pattern != 1)
ThrowPICTException(CorruptImageError,"UnknownPatternType");
length=ReadBlobMSBShort(image);
if (ReadRectangle(image,&frame) == MagickFalse)
ThrowPICTException(CorruptImageError,"ImproperImageHeader");
if (ReadPixmap(image,&pixmap) == MagickFalse)
ThrowPICTException(CorruptImageError,"ImproperImageHeader");
image->depth=(size_t) pixmap.component_size;
image->resolution.x=1.0*pixmap.horizontal_resolution;
image->resolution.y=1.0*pixmap.vertical_resolution;
image->units=PixelsPerInchResolution;
(void) ReadBlobMSBLong(image);
flags=(ssize_t) ReadBlobMSBShort(image);
length=ReadBlobMSBShort(image);
for (i=0; i <= (ssize_t) length; i++)
(void) ReadBlobMSBLong(image);
width=(size_t) (frame.bottom-frame.top);
height=(size_t) (frame.right-frame.left);
if (pixmap.bits_per_pixel <= 8)
length&=0x7fff;
if (pixmap.bits_per_pixel == 16)
width<<=1;
if (length == 0)
length=width;
if (length < 8)
{
for (i=0; i < (ssize_t) (length*height); i++)
if (ReadBlobByte(image) == EOF)
break;
}
else
for (i=0; i < (ssize_t) height; i++)
{
if (EOFBlob(image) != MagickFalse)
break;
if (length > 200)
{
for (j=0; j < (ssize_t) ReadBlobMSBShort(image); j++)
if (ReadBlobByte(image) == EOF)
break;
}
else
for (j=0; j < (ssize_t) ReadBlobByte(image); j++)
if (ReadBlobByte(image) == EOF)
break;
}
break;
}
case 0x1b:
{
/*
Initialize image background color.
*/
image->background_color.red=(Quantum)
ScaleShortToQuantum(ReadBlobMSBShort(image));
image->background_color.green=(Quantum)
ScaleShortToQuantum(ReadBlobMSBShort(image));
image->background_color.blue=(Quantum)
ScaleShortToQuantum(ReadBlobMSBShort(image));
break;
}
case 0x70:
case 0x71:
case 0x72:
case 0x73:
case 0x74:
case 0x75:
case 0x76:
case 0x77:
{
/*
Skip polygon or region.
*/
length=ReadBlobMSBShort(image);
for (i=0; i < (ssize_t) (length-2); i++)
if (ReadBlobByte(image) == EOF)
break;
break;
}
case 0x90:
case 0x91:
case 0x98:
case 0x99:
case 0x9a:
case 0x9b:
{
PICTRectangle
source,
destination;
register unsigned char
*p;
size_t
j;
ssize_t
bytes_per_line;
unsigned char
*pixels;
/*
Pixmap clipped by a rectangle.
*/
bytes_per_line=0;
if ((code != 0x9a) && (code != 0x9b))
bytes_per_line=(ssize_t) ReadBlobMSBShort(image);
else
{
(void) ReadBlobMSBShort(image);
(void) ReadBlobMSBShort(image);
(void) ReadBlobMSBShort(image);
}
if (ReadRectangle(image,&frame) == MagickFalse)
ThrowPICTException(CorruptImageError,"ImproperImageHeader");
/*
Initialize tile image.
*/
tile_image=CloneImage(image,(size_t) (frame.right-frame.left),
(size_t) (frame.bottom-frame.top),MagickTrue,exception);
if (tile_image == (Image *) NULL)
ThrowPICTException(CorruptImageError,"ImproperImageHeader");
if ((code == 0x9a) || (code == 0x9b) ||
((bytes_per_line & 0x8000) != 0))
{
if (ReadPixmap(image,&pixmap) == MagickFalse)
ThrowPICTException(CorruptImageError,"ImproperImageHeader");
tile_image->depth=(size_t) pixmap.component_size;
tile_image->alpha_trait=pixmap.component_count == 4 ?
BlendPixelTrait : UndefinedPixelTrait;
tile_image->resolution.x=(double) pixmap.horizontal_resolution;
tile_image->resolution.y=(double) pixmap.vertical_resolution;
tile_image->units=PixelsPerInchResolution;
if (tile_image->alpha_trait != UndefinedPixelTrait)
(void) SetImageAlpha(tile_image,OpaqueAlpha,exception);
}
if ((code != 0x9a) && (code != 0x9b))
{
/*
Initialize colormap.
*/
tile_image->colors=2;
if ((bytes_per_line & 0x8000) != 0)
{
(void) ReadBlobMSBLong(image);
flags=(ssize_t) ReadBlobMSBShort(image);
tile_image->colors=1UL*ReadBlobMSBShort(image)+1;
}
status=AcquireImageColormap(tile_image,tile_image->colors,
exception);
if (status == MagickFalse)
ThrowPICTException(ResourceLimitError,
"MemoryAllocationFailed");
if ((bytes_per_line & 0x8000) != 0)
{
for (i=0; i < (ssize_t) tile_image->colors; i++)
{
j=ReadBlobMSBShort(image) % tile_image->colors;
if ((flags & 0x8000) != 0)
j=(size_t) i;
tile_image->colormap[j].red=(Quantum)
ScaleShortToQuantum(ReadBlobMSBShort(image));
tile_image->colormap[j].green=(Quantum)
ScaleShortToQuantum(ReadBlobMSBShort(image));
tile_image->colormap[j].blue=(Quantum)
ScaleShortToQuantum(ReadBlobMSBShort(image));
}
}
else
{
for (i=0; i < (ssize_t) tile_image->colors; i++)
{
tile_image->colormap[i].red=(Quantum) (QuantumRange-
tile_image->colormap[i].red);
tile_image->colormap[i].green=(Quantum) (QuantumRange-
tile_image->colormap[i].green);
tile_image->colormap[i].blue=(Quantum) (QuantumRange-
tile_image->colormap[i].blue);
}
}
}
if (EOFBlob(image) != MagickFalse)
ThrowPICTException(CorruptImageError,
"InsufficientImageDataInFile");
if (ReadRectangle(image,&source) == MagickFalse)
ThrowPICTException(CorruptImageError,"ImproperImageHeader");
if (ReadRectangle(image,&destination) == MagickFalse)
ThrowPICTException(CorruptImageError,"ImproperImageHeader");
(void) ReadBlobMSBShort(image);
if ((code == 0x91) || (code == 0x99) || (code == 0x9b))
{
/*
Skip region.
*/
length=ReadBlobMSBShort(image);
for (i=0; i < (ssize_t) (length-2); i++)
if (ReadBlobByte(image) == EOF)
break;
}
if ((code != 0x9a) && (code != 0x9b) &&
(bytes_per_line & 0x8000) == 0)
pixels=DecodeImage(image,tile_image,(size_t) bytes_per_line,1,
&extent);
else
pixels=DecodeImage(image,tile_image,(size_t) bytes_per_line,
(unsigned int) pixmap.bits_per_pixel,&extent);
if (pixels == (unsigned char *) NULL)
ThrowPICTException(CorruptImageError,"UnableToUncompressImage");
/*
Convert PICT tile image to pixel packets.
*/
p=pixels;
for (y=0; y < (ssize_t) tile_image->rows; y++)
{
if (p > (pixels+extent+image->columns))
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowPICTException(CorruptImageError,"NotEnoughPixelData");
}
q=QueueAuthenticPixels(tile_image,0,y,tile_image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) tile_image->columns; x++)
{
if (tile_image->storage_class == PseudoClass)
{
index=(Quantum) ConstrainColormapIndex(tile_image,(ssize_t)
*p,exception);
SetPixelIndex(tile_image,index,q);
SetPixelRed(tile_image,
tile_image->colormap[(ssize_t) index].red,q);
SetPixelGreen(tile_image,
tile_image->colormap[(ssize_t) index].green,q);
SetPixelBlue(tile_image,
tile_image->colormap[(ssize_t) index].blue,q);
}
else
{
if (pixmap.bits_per_pixel == 16)
{
i=(ssize_t) (*p++);
j=(size_t) (*p);
SetPixelRed(tile_image,ScaleCharToQuantum(
(unsigned char) ((i & 0x7c) << 1)),q);
SetPixelGreen(tile_image,ScaleCharToQuantum(
(unsigned char) (((i & 0x03) << 6) |
((j & 0xe0) >> 2))),q);
SetPixelBlue(tile_image,ScaleCharToQuantum(
(unsigned char) ((j & 0x1f) << 3)),q);
}
else
if (tile_image->alpha_trait == UndefinedPixelTrait)
{
if (p > (pixels+extent+2*image->columns))
ThrowPICTException(CorruptImageError,
"NotEnoughPixelData");
SetPixelRed(tile_image,ScaleCharToQuantum(*p),q);
SetPixelGreen(tile_image,ScaleCharToQuantum(
*(p+tile_image->columns)),q);
SetPixelBlue(tile_image,ScaleCharToQuantum(
*(p+2*tile_image->columns)),q);
}
else
{
if (p > (pixels+extent+3*image->columns))
ThrowPICTException(CorruptImageError,
"NotEnoughPixelData");
SetPixelAlpha(tile_image,ScaleCharToQuantum(*p),q);
SetPixelRed(tile_image,ScaleCharToQuantum(
*(p+tile_image->columns)),q);
SetPixelGreen(tile_image,ScaleCharToQuantum(
*(p+2*tile_image->columns)),q);
SetPixelBlue(tile_image,ScaleCharToQuantum(
*(p+3*tile_image->columns)),q);
}
}
p++;
q+=GetPixelChannels(tile_image);
}
if (SyncAuthenticPixels(tile_image,exception) == MagickFalse)
break;
if ((tile_image->storage_class == DirectClass) &&
(pixmap.bits_per_pixel != 16))
{
p+=(pixmap.component_count-1)*tile_image->columns;
if (p < pixels)
break;
}
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,
tile_image->rows);
if (status == MagickFalse)
break;
}
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
if ((jpeg == MagickFalse) && (EOFBlob(image) == MagickFalse))
if ((code == 0x9a) || (code == 0x9b) ||
((bytes_per_line & 0x8000) != 0))
(void) CompositeImage(image,tile_image,CopyCompositeOp,
MagickTrue,(ssize_t) destination.left,(ssize_t)
destination.top,exception);
tile_image=DestroyImage(tile_image);
break;
}
case 0xa1:
{
unsigned char
*info;
size_t
type;
/*
Comment.
*/
type=ReadBlobMSBShort(image);
length=ReadBlobMSBShort(image);
if (length == 0)
break;
(void) ReadBlobMSBLong(image);
length-=MagickMin(length,4);
if (length == 0)
break;
info=(unsigned char *) AcquireQuantumMemory(length,sizeof(*info));
if (info == (unsigned char *) NULL)
break;
count=ReadBlob(image,length,info);
if (count != (ssize_t) length)
{
info=(unsigned char *) RelinquishMagickMemory(info);
ThrowPICTException(ResourceLimitError,"UnableToReadImageData");
}
switch (type)
{
case 0xe0:
{
profile=BlobToStringInfo((const void *) NULL,length);
SetStringInfoDatum(profile,info);
status=SetImageProfile(image,"icc",profile,exception);
profile=DestroyStringInfo(profile);
if (status == MagickFalse)
{
info=(unsigned char *) RelinquishMagickMemory(info);
ThrowPICTException(ResourceLimitError,
"MemoryAllocationFailed");
}
break;
}
case 0x1f2:
{
profile=BlobToStringInfo((const void *) NULL,length);
SetStringInfoDatum(profile,info);
status=SetImageProfile(image,"iptc",profile,exception);
if (status == MagickFalse)
{
info=(unsigned char *) RelinquishMagickMemory(info);
ThrowPICTException(ResourceLimitError,
"MemoryAllocationFailed");
}
profile=DestroyStringInfo(profile);
break;
}
default:
break;
}
info=(unsigned char *) RelinquishMagickMemory(info);
break;
}
default:
{
/*
Skip to next op code.
*/
if (codes[code].length == -1)
(void) ReadBlobMSBShort(image);
else
for (i=0; i < (ssize_t) codes[code].length; i++)
if (ReadBlobByte(image) == EOF)
break;
}
}
}
if (code == 0xc00)
{
/*
Skip header.
*/
for (i=0; i < 24; i++)
if (ReadBlobByte(image) == EOF)
break;
continue;
}
if (((code >= 0xb0) && (code <= 0xcf)) ||
((code >= 0x8000) && (code <= 0x80ff)))
continue;
if (code == 0x8200)
{
char
filename[MaxTextExtent];
FILE
*file;
int
unique_file;
/*
Embedded JPEG.
*/
jpeg=MagickTrue;
read_info=CloneImageInfo(image_info);
SetImageInfoBlob(read_info,(void *) NULL,0);
file=(FILE *) NULL;
unique_file=AcquireUniqueFileResource(filename);
(void) FormatLocaleString(read_info->filename,MaxTextExtent,"jpeg:%s",
filename);
if (unique_file != -1)
file=fdopen(unique_file,"wb");
if ((unique_file == -1) || (file == (FILE *) NULL))
{
(void) RelinquishUniqueFileResource(read_info->filename);
(void) CopyMagickString(image->filename,read_info->filename,
MagickPathExtent);
ThrowPICTException(FileOpenError,"UnableToCreateTemporaryFile");
}
length=ReadBlobMSBLong(image);
if (length > 154)
{
for (i=0; i < 6; i++)
(void) ReadBlobMSBLong(image);
if (ReadRectangle(image,&frame) == MagickFalse)
{
(void) fclose(file);
(void) RelinquishUniqueFileResource(read_info->filename);
ThrowPICTException(CorruptImageError,"ImproperImageHeader");
}
for (i=0; i < 122; i++)
if (ReadBlobByte(image) == EOF)
break;
for (i=0; i < (ssize_t) (length-154); i++)
{
c=ReadBlobByte(image);
if (c == EOF)
break;
if (fputc(c,file) != c)
break;
}
}
(void) fclose(file);
(void) close(unique_file);
tile_image=ReadImage(read_info,exception);
(void) RelinquishUniqueFileResource(filename);
read_info=DestroyImageInfo(read_info);
if (tile_image == (Image *) NULL)
continue;
(void) FormatLocaleString(geometry,MagickPathExtent,"%.20gx%.20g",
(double) MagickMax(image->columns,tile_image->columns),
(double) MagickMax(image->rows,tile_image->rows));
(void) SetImageExtent(image,
MagickMax(image->columns,tile_image->columns),
MagickMax(image->rows,tile_image->rows),exception);
(void) TransformImageColorspace(image,tile_image->colorspace,exception);
(void) CompositeImage(image,tile_image,CopyCompositeOp,MagickTrue,
(ssize_t) frame.left,(ssize_t) frame.right,exception);
image->compression=tile_image->compression;
tile_image=DestroyImage(tile_image);
continue;
}
if ((code == 0xff) || (code == 0xffff))
break;
if (((code >= 0xd0) && (code <= 0xfe)) ||
((code >= 0x8100) && (code <= 0xffff)))
{
/*
Skip reserved.
*/
length=ReadBlobMSBShort(image);
for (i=0; i < (ssize_t) length; i++)
if (ReadBlobByte(image) == EOF)
break;
continue;
}
if ((code >= 0x100) && (code <= 0x7fff))
{
/*
Skip reserved.
*/
length=(size_t) ((code >> 7) & 0xff);
for (i=0; i < (ssize_t) length; i++)
if (ReadBlobByte(image) == EOF)
break;
continue;
}
}
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e g i s t e r P I C T I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RegisterPICTImage() adds attributes for the PICT image format to
% the list of supported formats. The attributes include the image format
% tag, a method to read and/or write the format, whether the format
% supports the saving of more than one frame to the same file or blob,
% whether the format supports native in-memory I/O, and a brief
% description of the format.
%
% The format of the RegisterPICTImage method is:
%
% size_t RegisterPICTImage(void)
%
*/
ModuleExport size_t RegisterPICTImage(void)
{
MagickInfo
*entry;
entry=AcquireMagickInfo("PICT","PCT","Apple Macintosh QuickDraw/PICT");
entry->decoder=(DecodeImageHandler *) ReadPICTImage;
entry->encoder=(EncodeImageHandler *) WritePICTImage;
entry->flags^=CoderAdjoinFlag;
entry->flags|=CoderEncoderSeekableStreamFlag;
entry->magick=(IsImageFormatHandler *) IsPICT;
(void) RegisterMagickInfo(entry);
entry=AcquireMagickInfo("PICT","PICT","Apple Macintosh QuickDraw/PICT");
entry->decoder=(DecodeImageHandler *) ReadPICTImage;
entry->encoder=(EncodeImageHandler *) WritePICTImage;
entry->flags^=CoderAdjoinFlag;
entry->flags|=CoderEncoderSeekableStreamFlag;
entry->magick=(IsImageFormatHandler *) IsPICT;
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n r e g i s t e r P I C T I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnregisterPICTImage() removes format registrations made by the
% PICT module from the list of supported formats.
%
% The format of the UnregisterPICTImage method is:
%
% UnregisterPICTImage(void)
%
*/
ModuleExport void UnregisterPICTImage(void)
{
(void) UnregisterMagickInfo("PCT");
(void) UnregisterMagickInfo("PICT");
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e P I C T I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePICTImage() writes an image to a file in the Apple Macintosh
% QuickDraw/PICT image format.
%
% The format of the WritePICTImage method is:
%
% MagickBooleanType WritePICTImage(const ImageInfo *image_info,
% Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image_info: the image info.
%
% o image: The image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType WritePICTImage(const ImageInfo *image_info,
Image *image,ExceptionInfo *exception)
{
#define MaxCount 128
#define PictCropRegionOp 0x01
#define PictEndOfPictureOp 0xff
#define PictJPEGOp 0x8200
#define PictInfoOp 0x0C00
#define PictInfoSize 512
#define PictPixmapOp 0x9A
#define PictPICTOp 0x98
#define PictVersion 0x11
const StringInfo
*profile;
double
x_resolution,
y_resolution;
MagickBooleanType
status;
MagickOffsetType
offset;
PICTPixmap
pixmap;
PICTRectangle
bounds,
crop_rectangle,
destination_rectangle,
frame_rectangle,
size_rectangle,
source_rectangle;
register const Quantum
*p;
register ssize_t
i,
x;
size_t
bytes_per_line,
count,
row_bytes,
storage_class;
ssize_t
y;
unsigned char
*buffer,
*packed_scanline,
*scanline;
unsigned short
base_address,
transfer_mode;
/*
Open output image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->columns > 65535L) || (image->rows > 65535L))
ThrowWriterException(ImageError,"WidthOrHeightExceedsLimit");
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception);
if (status == MagickFalse)
return(status);
(void) TransformImageColorspace(image,sRGBColorspace,exception);
/*
Initialize image info.
*/
size_rectangle.top=0;
size_rectangle.left=0;
size_rectangle.bottom=(short) image->rows;
size_rectangle.right=(short) image->columns;
frame_rectangle=size_rectangle;
crop_rectangle=size_rectangle;
source_rectangle=size_rectangle;
destination_rectangle=size_rectangle;
base_address=0xff;
row_bytes=image->columns;
bounds.top=0;
bounds.left=0;
bounds.bottom=(short) image->rows;
bounds.right=(short) image->columns;
pixmap.version=0;
pixmap.pack_type=0;
pixmap.pack_size=0;
pixmap.pixel_type=0;
pixmap.bits_per_pixel=8;
pixmap.component_count=1;
pixmap.component_size=8;
pixmap.plane_bytes=0;
pixmap.table=0;
pixmap.reserved=0;
transfer_mode=0;
x_resolution=image->resolution.x != 0.0 ? image->resolution.x :
DefaultResolution;
y_resolution=image->resolution.y != 0.0 ? image->resolution.y :
DefaultResolution;
storage_class=image->storage_class;
if (image_info->compression == JPEGCompression)
storage_class=DirectClass;
if (storage_class == DirectClass)
{
pixmap.component_count=image->alpha_trait != UndefinedPixelTrait ? 4 : 3;
pixmap.pixel_type=16;
pixmap.bits_per_pixel=32;
pixmap.pack_type=0x04;
transfer_mode=0x40;
row_bytes=4*image->columns;
}
/*
Allocate memory.
*/
bytes_per_line=image->columns;
if (storage_class == DirectClass)
bytes_per_line*=image->alpha_trait != UndefinedPixelTrait ? 4 : 3;
buffer=(unsigned char *) AcquireQuantumMemory(PictInfoSize,sizeof(*buffer));
packed_scanline=(unsigned char *) AcquireQuantumMemory((size_t)
(row_bytes+MaxCount),sizeof(*packed_scanline));
scanline=(unsigned char *) AcquireQuantumMemory(row_bytes,sizeof(*scanline));
if ((buffer == (unsigned char *) NULL) ||
(packed_scanline == (unsigned char *) NULL) ||
(scanline == (unsigned char *) NULL))
{
if (scanline != (unsigned char *) NULL)
scanline=(unsigned char *) RelinquishMagickMemory(scanline);
if (packed_scanline != (unsigned char *) NULL)
packed_scanline=(unsigned char *) RelinquishMagickMemory(
packed_scanline);
if (buffer != (unsigned char *) NULL)
buffer=(unsigned char *) RelinquishMagickMemory(buffer);
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
}
(void) memset(scanline,0,row_bytes);
(void) memset(packed_scanline,0,(size_t) (row_bytes+MaxCount));
/*
Write header, header size, size bounding box, version, and reserved.
*/
(void) memset(buffer,0,PictInfoSize);
(void) WriteBlob(image,PictInfoSize,buffer);
(void) WriteBlobMSBShort(image,0);
(void) WriteBlobMSBShort(image,(unsigned short) size_rectangle.top);
(void) WriteBlobMSBShort(image,(unsigned short) size_rectangle.left);
(void) WriteBlobMSBShort(image,(unsigned short) size_rectangle.bottom);
(void) WriteBlobMSBShort(image,(unsigned short) size_rectangle.right);
(void) WriteBlobMSBShort(image,PictVersion);
(void) WriteBlobMSBShort(image,0x02ff); /* version #2 */
(void) WriteBlobMSBShort(image,PictInfoOp);
(void) WriteBlobMSBLong(image,0xFFFE0000U);
/*
Write full size of the file, resolution, frame bounding box, and reserved.
*/
(void) WriteBlobMSBShort(image,(unsigned short) x_resolution);
(void) WriteBlobMSBShort(image,0x0000);
(void) WriteBlobMSBShort(image,(unsigned short) y_resolution);
(void) WriteBlobMSBShort(image,0x0000);
(void) WriteBlobMSBShort(image,(unsigned short) frame_rectangle.top);
(void) WriteBlobMSBShort(image,(unsigned short) frame_rectangle.left);
(void) WriteBlobMSBShort(image,(unsigned short) frame_rectangle.bottom);
(void) WriteBlobMSBShort(image,(unsigned short) frame_rectangle.right);
(void) WriteBlobMSBLong(image,0x00000000L);
profile=GetImageProfile(image,"iptc");
if (profile != (StringInfo *) NULL)
{
(void) WriteBlobMSBShort(image,0xa1);
(void) WriteBlobMSBShort(image,0x1f2);
(void) WriteBlobMSBShort(image,(unsigned short)
(GetStringInfoLength(profile)+4));
(void) WriteBlobString(image,"8BIM");
(void) WriteBlob(image,GetStringInfoLength(profile),
GetStringInfoDatum(profile));
}
profile=GetImageProfile(image,"icc");
if (profile != (StringInfo *) NULL)
{
(void) WriteBlobMSBShort(image,0xa1);
(void) WriteBlobMSBShort(image,0xe0);
(void) WriteBlobMSBShort(image,(unsigned short)
(GetStringInfoLength(profile)+4));
(void) WriteBlobMSBLong(image,0x00000000U);
(void) WriteBlob(image,GetStringInfoLength(profile),
GetStringInfoDatum(profile));
(void) WriteBlobMSBShort(image,0xa1);
(void) WriteBlobMSBShort(image,0xe0);
(void) WriteBlobMSBShort(image,4);
(void) WriteBlobMSBLong(image,0x00000002U);
}
/*
Write crop region opcode and crop bounding box.
*/
(void) WriteBlobMSBShort(image,PictCropRegionOp);
(void) WriteBlobMSBShort(image,0xa);
(void) WriteBlobMSBShort(image,(unsigned short) crop_rectangle.top);
(void) WriteBlobMSBShort(image,(unsigned short) crop_rectangle.left);
(void) WriteBlobMSBShort(image,(unsigned short) crop_rectangle.bottom);
(void) WriteBlobMSBShort(image,(unsigned short) crop_rectangle.right);
if (image_info->compression == JPEGCompression)
{
Image
*jpeg_image;
ImageInfo
*jpeg_info;
size_t
length;
unsigned char
*blob;
jpeg_image=CloneImage(image,0,0,MagickTrue,exception);
if (jpeg_image == (Image *) NULL)
{
(void) CloseBlob(image);
return(MagickFalse);
}
jpeg_info=CloneImageInfo(image_info);
(void) CopyMagickString(jpeg_info->magick,"JPEG",MagickPathExtent);
length=0;
blob=(unsigned char *) ImageToBlob(jpeg_info,jpeg_image,&length,
exception);
jpeg_info=DestroyImageInfo(jpeg_info);
if (blob == (unsigned char *) NULL)
return(MagickFalse);
jpeg_image=DestroyImage(jpeg_image);
(void) WriteBlobMSBShort(image,PictJPEGOp);
(void) WriteBlobMSBLong(image,(unsigned int) length+154);
(void) WriteBlobMSBShort(image,0x0000);
(void) WriteBlobMSBLong(image,0x00010000U);
(void) WriteBlobMSBLong(image,0x00000000U);
(void) WriteBlobMSBLong(image,0x00000000U);
(void) WriteBlobMSBLong(image,0x00000000U);
(void) WriteBlobMSBLong(image,0x00010000U);
(void) WriteBlobMSBLong(image,0x00000000U);
(void) WriteBlobMSBLong(image,0x00000000U);
(void) WriteBlobMSBLong(image,0x00000000U);
(void) WriteBlobMSBLong(image,0x40000000U);
(void) WriteBlobMSBLong(image,0x00000000U);
(void) WriteBlobMSBLong(image,0x00000000U);
(void) WriteBlobMSBLong(image,0x00000000U);
(void) WriteBlobMSBLong(image,0x00400000U);
(void) WriteBlobMSBShort(image,0x0000);
(void) WriteBlobMSBShort(image,(unsigned short) image->rows);
(void) WriteBlobMSBShort(image,(unsigned short) image->columns);
(void) WriteBlobMSBShort(image,0x0000);
(void) WriteBlobMSBShort(image,768);
(void) WriteBlobMSBShort(image,0x0000);
(void) WriteBlobMSBLong(image,0x00000000U);
(void) WriteBlobMSBLong(image,0x00566A70U);
(void) WriteBlobMSBLong(image,0x65670000U);
(void) WriteBlobMSBLong(image,0x00000000U);
(void) WriteBlobMSBLong(image,0x00000001U);
(void) WriteBlobMSBLong(image,0x00016170U);
(void) WriteBlobMSBLong(image,0x706C0000U);
(void) WriteBlobMSBLong(image,0x00000000U);
(void) WriteBlobMSBShort(image,768);
(void) WriteBlobMSBShort(image,(unsigned short) image->columns);
(void) WriteBlobMSBShort(image,(unsigned short) image->rows);
(void) WriteBlobMSBShort(image,(unsigned short) x_resolution);
(void) WriteBlobMSBShort(image,0x0000);
(void) WriteBlobMSBShort(image,(unsigned short) y_resolution);
(void) WriteBlobMSBShort(image,0x0000);
(void) WriteBlobMSBLong(image,length);
(void) WriteBlobMSBShort(image,0x0001);
(void) WriteBlobMSBLong(image,0x0B466F74U);
(void) WriteBlobMSBLong(image,0x6F202D20U);
(void) WriteBlobMSBLong(image,0x4A504547U);
(void) WriteBlobMSBLong(image,0x00000000U);
(void) WriteBlobMSBLong(image,0x00000000U);
(void) WriteBlobMSBLong(image,0x00000000U);
(void) WriteBlobMSBLong(image,0x00000000U);
(void) WriteBlobMSBLong(image,0x00000000U);
(void) WriteBlobMSBLong(image,0x0018FFFFU);
(void) WriteBlob(image,length,blob);
if ((length & 0x01) != 0)
(void) WriteBlobByte(image,'\0');
blob=(unsigned char *) RelinquishMagickMemory(blob);
}
/*
Write picture opcode, row bytes, and picture bounding box, and version.
*/
if (storage_class == PseudoClass)
(void) WriteBlobMSBShort(image,PictPICTOp);
else
{
(void) WriteBlobMSBShort(image,PictPixmapOp);
(void) WriteBlobMSBLong(image,(unsigned int) base_address);
}
(void) WriteBlobMSBShort(image,(unsigned short) (row_bytes | 0x8000));
(void) WriteBlobMSBShort(image,(unsigned short) bounds.top);
(void) WriteBlobMSBShort(image,(unsigned short) bounds.left);
(void) WriteBlobMSBShort(image,(unsigned short) bounds.bottom);
(void) WriteBlobMSBShort(image,(unsigned short) bounds.right);
/*
Write pack type, pack size, resolution, pixel type, and pixel size.
*/
(void) WriteBlobMSBShort(image,(unsigned short) pixmap.version);
(void) WriteBlobMSBShort(image,(unsigned short) pixmap.pack_type);
(void) WriteBlobMSBLong(image,(unsigned int) pixmap.pack_size);
(void) WriteBlobMSBShort(image,(unsigned short) (x_resolution+0.5));
(void) WriteBlobMSBShort(image,0x0000);
(void) WriteBlobMSBShort(image,(unsigned short) (y_resolution+0.5));
(void) WriteBlobMSBShort(image,0x0000);
(void) WriteBlobMSBShort(image,(unsigned short) pixmap.pixel_type);
(void) WriteBlobMSBShort(image,(unsigned short) pixmap.bits_per_pixel);
/*
Write component count, size, plane bytes, table size, and reserved.
*/
(void) WriteBlobMSBShort(image,(unsigned short) pixmap.component_count);
(void) WriteBlobMSBShort(image,(unsigned short) pixmap.component_size);
(void) WriteBlobMSBLong(image,(unsigned int) pixmap.plane_bytes);
(void) WriteBlobMSBLong(image,(unsigned int) pixmap.table);
(void) WriteBlobMSBLong(image,(unsigned int) pixmap.reserved);
if (storage_class == PseudoClass)
{
/*
Write image colormap.
*/
(void) WriteBlobMSBLong(image,0x00000000L); /* color seed */
(void) WriteBlobMSBShort(image,0L); /* color flags */
(void) WriteBlobMSBShort(image,(unsigned short) (image->colors-1));
for (i=0; i < (ssize_t) image->colors; i++)
{
(void) WriteBlobMSBShort(image,(unsigned short) i);
(void) WriteBlobMSBShort(image,ScaleQuantumToShort(
image->colormap[i].red));
(void) WriteBlobMSBShort(image,ScaleQuantumToShort(
image->colormap[i].green));
(void) WriteBlobMSBShort(image,ScaleQuantumToShort(
image->colormap[i].blue));
}
}
/*
Write source and destination rectangle.
*/
(void) WriteBlobMSBShort(image,(unsigned short) source_rectangle.top);
(void) WriteBlobMSBShort(image,(unsigned short) source_rectangle.left);
(void) WriteBlobMSBShort(image,(unsigned short) source_rectangle.bottom);
(void) WriteBlobMSBShort(image,(unsigned short) source_rectangle.right);
(void) WriteBlobMSBShort(image,(unsigned short) destination_rectangle.top);
(void) WriteBlobMSBShort(image,(unsigned short) destination_rectangle.left);
(void) WriteBlobMSBShort(image,(unsigned short) destination_rectangle.bottom);
(void) WriteBlobMSBShort(image,(unsigned short) destination_rectangle.right);
(void) WriteBlobMSBShort(image,(unsigned short) transfer_mode);
/*
Write picture data.
*/
count=0;
if (storage_class == PseudoClass)
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
scanline[x]=(unsigned char) GetPixelIndex(image,p);
p+=GetPixelChannels(image);
}
count+=EncodeImage(image,scanline,(size_t) (row_bytes & 0x7FFF),
packed_scanline);
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
else
if (image_info->compression == JPEGCompression)
{
(void) memset(scanline,0,row_bytes);
for (y=0; y < (ssize_t) image->rows; y++)
count+=EncodeImage(image,scanline,(size_t) (row_bytes & 0x7FFF),
packed_scanline);
}
else
{
register unsigned char
*blue,
*green,
*opacity,
*red;
red=scanline;
green=scanline+image->columns;
blue=scanline+2*image->columns;
opacity=scanline+3*image->columns;
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
red=scanline;
green=scanline+image->columns;
blue=scanline+2*image->columns;
if (image->alpha_trait != UndefinedPixelTrait)
{
opacity=scanline;
red=scanline+image->columns;
green=scanline+2*image->columns;
blue=scanline+3*image->columns;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
*red++=ScaleQuantumToChar(GetPixelRed(image,p));
*green++=ScaleQuantumToChar(GetPixelGreen(image,p));
*blue++=ScaleQuantumToChar(GetPixelBlue(image,p));
if (image->alpha_trait != UndefinedPixelTrait)
*opacity++=ScaleQuantumToChar((Quantum) (GetPixelAlpha(image,p)));
p+=GetPixelChannels(image);
}
count+=EncodeImage(image,scanline,bytes_per_line,packed_scanline);
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
}
if ((count & 0x01) != 0)
(void) WriteBlobByte(image,'\0');
(void) WriteBlobMSBShort(image,PictEndOfPictureOp);
offset=TellBlob(image);
offset=SeekBlob(image,512,SEEK_SET);
(void) WriteBlobMSBShort(image,(unsigned short) offset);
scanline=(unsigned char *) RelinquishMagickMemory(scanline);
packed_scanline=(unsigned char *) RelinquishMagickMemory(packed_scanline);
buffer=(unsigned char *) RelinquishMagickMemory(buffer);
(void) CloseBlob(image);
return(MagickTrue);
}
| ./CrossVul/dataset_final_sorted/CWE-20/c/good_364_2 |
crossvul-cpp_data_bad_3038_0 | /*
* Linux IPv6 multicast routing support for BSD pim6sd
* Based on net/ipv4/ipmr.c.
*
* (c) 2004 Mickael Hoerdt, <hoerdt@clarinet.u-strasbg.fr>
* LSIIT Laboratory, Strasbourg, France
* (c) 2004 Jean-Philippe Andriot, <jean-philippe.andriot@6WIND.com>
* 6WIND, Paris, France
* Copyright (C)2007,2008 USAGI/WIDE Project
* YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
*/
#include <linux/uaccess.h>
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/errno.h>
#include <linux/timer.h>
#include <linux/mm.h>
#include <linux/kernel.h>
#include <linux/fcntl.h>
#include <linux/stat.h>
#include <linux/socket.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/inetdevice.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/compat.h>
#include <net/protocol.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <net/raw.h>
#include <linux/notifier.h>
#include <linux/if_arp.h>
#include <net/checksum.h>
#include <net/netlink.h>
#include <net/fib_rules.h>
#include <net/ipv6.h>
#include <net/ip6_route.h>
#include <linux/mroute6.h>
#include <linux/pim.h>
#include <net/addrconf.h>
#include <linux/netfilter_ipv6.h>
#include <linux/export.h>
#include <net/ip6_checksum.h>
#include <linux/netconf.h>
struct mr6_table {
struct list_head list;
possible_net_t net;
u32 id;
struct sock *mroute6_sk;
struct timer_list ipmr_expire_timer;
struct list_head mfc6_unres_queue;
struct list_head mfc6_cache_array[MFC6_LINES];
struct mif_device vif6_table[MAXMIFS];
int maxvif;
atomic_t cache_resolve_queue_len;
bool mroute_do_assert;
bool mroute_do_pim;
#ifdef CONFIG_IPV6_PIMSM_V2
int mroute_reg_vif_num;
#endif
};
struct ip6mr_rule {
struct fib_rule common;
};
struct ip6mr_result {
struct mr6_table *mrt;
};
/* Big lock, protecting vif table, mrt cache and mroute socket state.
Note that the changes are semaphored via rtnl_lock.
*/
static DEFINE_RWLOCK(mrt_lock);
/*
* Multicast router control variables
*/
#define MIF_EXISTS(_mrt, _idx) ((_mrt)->vif6_table[_idx].dev != NULL)
/* Special spinlock for queue of unresolved entries */
static DEFINE_SPINLOCK(mfc_unres_lock);
/* We return to original Alan's scheme. Hash table of resolved
entries is changed only in process context and protected
with weak lock mrt_lock. Queue of unresolved entries is protected
with strong spinlock mfc_unres_lock.
In this case data path is free of exclusive locks at all.
*/
static struct kmem_cache *mrt_cachep __read_mostly;
static struct mr6_table *ip6mr_new_table(struct net *net, u32 id);
static void ip6mr_free_table(struct mr6_table *mrt);
static void ip6_mr_forward(struct net *net, struct mr6_table *mrt,
struct sk_buff *skb, struct mfc6_cache *cache);
static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt,
mifi_t mifi, int assert);
static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
struct mfc6_cache *c, struct rtmsg *rtm);
static void mr6_netlink_event(struct mr6_table *mrt, struct mfc6_cache *mfc,
int cmd);
static int ip6mr_rtm_dumproute(struct sk_buff *skb,
struct netlink_callback *cb);
static void mroute_clean_tables(struct mr6_table *mrt, bool all);
static void ipmr_expire_process(unsigned long arg);
#ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
#define ip6mr_for_each_table(mrt, net) \
list_for_each_entry_rcu(mrt, &net->ipv6.mr6_tables, list)
static struct mr6_table *ip6mr_get_table(struct net *net, u32 id)
{
struct mr6_table *mrt;
ip6mr_for_each_table(mrt, net) {
if (mrt->id == id)
return mrt;
}
return NULL;
}
static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
struct mr6_table **mrt)
{
int err;
struct ip6mr_result res;
struct fib_lookup_arg arg = {
.result = &res,
.flags = FIB_LOOKUP_NOREF,
};
err = fib_rules_lookup(net->ipv6.mr6_rules_ops,
flowi6_to_flowi(flp6), 0, &arg);
if (err < 0)
return err;
*mrt = res.mrt;
return 0;
}
static int ip6mr_rule_action(struct fib_rule *rule, struct flowi *flp,
int flags, struct fib_lookup_arg *arg)
{
struct ip6mr_result *res = arg->result;
struct mr6_table *mrt;
switch (rule->action) {
case FR_ACT_TO_TBL:
break;
case FR_ACT_UNREACHABLE:
return -ENETUNREACH;
case FR_ACT_PROHIBIT:
return -EACCES;
case FR_ACT_BLACKHOLE:
default:
return -EINVAL;
}
mrt = ip6mr_get_table(rule->fr_net, rule->table);
if (!mrt)
return -EAGAIN;
res->mrt = mrt;
return 0;
}
static int ip6mr_rule_match(struct fib_rule *rule, struct flowi *flp, int flags)
{
return 1;
}
static const struct nla_policy ip6mr_rule_policy[FRA_MAX + 1] = {
FRA_GENERIC_POLICY,
};
static int ip6mr_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
struct fib_rule_hdr *frh, struct nlattr **tb)
{
return 0;
}
static int ip6mr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
struct nlattr **tb)
{
return 1;
}
static int ip6mr_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
struct fib_rule_hdr *frh)
{
frh->dst_len = 0;
frh->src_len = 0;
frh->tos = 0;
return 0;
}
static const struct fib_rules_ops __net_initconst ip6mr_rules_ops_template = {
.family = RTNL_FAMILY_IP6MR,
.rule_size = sizeof(struct ip6mr_rule),
.addr_size = sizeof(struct in6_addr),
.action = ip6mr_rule_action,
.match = ip6mr_rule_match,
.configure = ip6mr_rule_configure,
.compare = ip6mr_rule_compare,
.fill = ip6mr_rule_fill,
.nlgroup = RTNLGRP_IPV6_RULE,
.policy = ip6mr_rule_policy,
.owner = THIS_MODULE,
};
static int __net_init ip6mr_rules_init(struct net *net)
{
struct fib_rules_ops *ops;
struct mr6_table *mrt;
int err;
ops = fib_rules_register(&ip6mr_rules_ops_template, net);
if (IS_ERR(ops))
return PTR_ERR(ops);
INIT_LIST_HEAD(&net->ipv6.mr6_tables);
mrt = ip6mr_new_table(net, RT6_TABLE_DFLT);
if (!mrt) {
err = -ENOMEM;
goto err1;
}
err = fib_default_rule_add(ops, 0x7fff, RT6_TABLE_DFLT, 0);
if (err < 0)
goto err2;
net->ipv6.mr6_rules_ops = ops;
return 0;
err2:
ip6mr_free_table(mrt);
err1:
fib_rules_unregister(ops);
return err;
}
static void __net_exit ip6mr_rules_exit(struct net *net)
{
struct mr6_table *mrt, *next;
rtnl_lock();
list_for_each_entry_safe(mrt, next, &net->ipv6.mr6_tables, list) {
list_del(&mrt->list);
ip6mr_free_table(mrt);
}
fib_rules_unregister(net->ipv6.mr6_rules_ops);
rtnl_unlock();
}
#else
#define ip6mr_for_each_table(mrt, net) \
for (mrt = net->ipv6.mrt6; mrt; mrt = NULL)
static struct mr6_table *ip6mr_get_table(struct net *net, u32 id)
{
return net->ipv6.mrt6;
}
static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
struct mr6_table **mrt)
{
*mrt = net->ipv6.mrt6;
return 0;
}
static int __net_init ip6mr_rules_init(struct net *net)
{
net->ipv6.mrt6 = ip6mr_new_table(net, RT6_TABLE_DFLT);
return net->ipv6.mrt6 ? 0 : -ENOMEM;
}
static void __net_exit ip6mr_rules_exit(struct net *net)
{
rtnl_lock();
ip6mr_free_table(net->ipv6.mrt6);
net->ipv6.mrt6 = NULL;
rtnl_unlock();
}
#endif
static struct mr6_table *ip6mr_new_table(struct net *net, u32 id)
{
struct mr6_table *mrt;
unsigned int i;
mrt = ip6mr_get_table(net, id);
if (mrt)
return mrt;
mrt = kzalloc(sizeof(*mrt), GFP_KERNEL);
if (!mrt)
return NULL;
mrt->id = id;
write_pnet(&mrt->net, net);
/* Forwarding cache */
for (i = 0; i < MFC6_LINES; i++)
INIT_LIST_HEAD(&mrt->mfc6_cache_array[i]);
INIT_LIST_HEAD(&mrt->mfc6_unres_queue);
setup_timer(&mrt->ipmr_expire_timer, ipmr_expire_process,
(unsigned long)mrt);
#ifdef CONFIG_IPV6_PIMSM_V2
mrt->mroute_reg_vif_num = -1;
#endif
#ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
list_add_tail_rcu(&mrt->list, &net->ipv6.mr6_tables);
#endif
return mrt;
}
static void ip6mr_free_table(struct mr6_table *mrt)
{
del_timer_sync(&mrt->ipmr_expire_timer);
mroute_clean_tables(mrt, true);
kfree(mrt);
}
#ifdef CONFIG_PROC_FS
struct ipmr_mfc_iter {
struct seq_net_private p;
struct mr6_table *mrt;
struct list_head *cache;
int ct;
};
static struct mfc6_cache *ipmr_mfc_seq_idx(struct net *net,
struct ipmr_mfc_iter *it, loff_t pos)
{
struct mr6_table *mrt = it->mrt;
struct mfc6_cache *mfc;
read_lock(&mrt_lock);
for (it->ct = 0; it->ct < MFC6_LINES; it->ct++) {
it->cache = &mrt->mfc6_cache_array[it->ct];
list_for_each_entry(mfc, it->cache, list)
if (pos-- == 0)
return mfc;
}
read_unlock(&mrt_lock);
spin_lock_bh(&mfc_unres_lock);
it->cache = &mrt->mfc6_unres_queue;
list_for_each_entry(mfc, it->cache, list)
if (pos-- == 0)
return mfc;
spin_unlock_bh(&mfc_unres_lock);
it->cache = NULL;
return NULL;
}
/*
* The /proc interfaces to multicast routing /proc/ip6_mr_cache /proc/ip6_mr_vif
*/
struct ipmr_vif_iter {
struct seq_net_private p;
struct mr6_table *mrt;
int ct;
};
static struct mif_device *ip6mr_vif_seq_idx(struct net *net,
struct ipmr_vif_iter *iter,
loff_t pos)
{
struct mr6_table *mrt = iter->mrt;
for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) {
if (!MIF_EXISTS(mrt, iter->ct))
continue;
if (pos-- == 0)
return &mrt->vif6_table[iter->ct];
}
return NULL;
}
static void *ip6mr_vif_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(mrt_lock)
{
struct ipmr_vif_iter *iter = seq->private;
struct net *net = seq_file_net(seq);
struct mr6_table *mrt;
mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
if (!mrt)
return ERR_PTR(-ENOENT);
iter->mrt = mrt;
read_lock(&mrt_lock);
return *pos ? ip6mr_vif_seq_idx(net, seq->private, *pos - 1)
: SEQ_START_TOKEN;
}
static void *ip6mr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct ipmr_vif_iter *iter = seq->private;
struct net *net = seq_file_net(seq);
struct mr6_table *mrt = iter->mrt;
++*pos;
if (v == SEQ_START_TOKEN)
return ip6mr_vif_seq_idx(net, iter, 0);
while (++iter->ct < mrt->maxvif) {
if (!MIF_EXISTS(mrt, iter->ct))
continue;
return &mrt->vif6_table[iter->ct];
}
return NULL;
}
static void ip6mr_vif_seq_stop(struct seq_file *seq, void *v)
__releases(mrt_lock)
{
read_unlock(&mrt_lock);
}
static int ip6mr_vif_seq_show(struct seq_file *seq, void *v)
{
struct ipmr_vif_iter *iter = seq->private;
struct mr6_table *mrt = iter->mrt;
if (v == SEQ_START_TOKEN) {
seq_puts(seq,
"Interface BytesIn PktsIn BytesOut PktsOut Flags\n");
} else {
const struct mif_device *vif = v;
const char *name = vif->dev ? vif->dev->name : "none";
seq_printf(seq,
"%2td %-10s %8ld %7ld %8ld %7ld %05X\n",
vif - mrt->vif6_table,
name, vif->bytes_in, vif->pkt_in,
vif->bytes_out, vif->pkt_out,
vif->flags);
}
return 0;
}
static const struct seq_operations ip6mr_vif_seq_ops = {
.start = ip6mr_vif_seq_start,
.next = ip6mr_vif_seq_next,
.stop = ip6mr_vif_seq_stop,
.show = ip6mr_vif_seq_show,
};
static int ip6mr_vif_open(struct inode *inode, struct file *file)
{
return seq_open_net(inode, file, &ip6mr_vif_seq_ops,
sizeof(struct ipmr_vif_iter));
}
static const struct file_operations ip6mr_vif_fops = {
.owner = THIS_MODULE,
.open = ip6mr_vif_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_net,
};
static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
{
struct ipmr_mfc_iter *it = seq->private;
struct net *net = seq_file_net(seq);
struct mr6_table *mrt;
mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
if (!mrt)
return ERR_PTR(-ENOENT);
it->mrt = mrt;
return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1)
: SEQ_START_TOKEN;
}
static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct mfc6_cache *mfc = v;
struct ipmr_mfc_iter *it = seq->private;
struct net *net = seq_file_net(seq);
struct mr6_table *mrt = it->mrt;
++*pos;
if (v == SEQ_START_TOKEN)
return ipmr_mfc_seq_idx(net, seq->private, 0);
if (mfc->list.next != it->cache)
return list_entry(mfc->list.next, struct mfc6_cache, list);
if (it->cache == &mrt->mfc6_unres_queue)
goto end_of_list;
BUG_ON(it->cache != &mrt->mfc6_cache_array[it->ct]);
while (++it->ct < MFC6_LINES) {
it->cache = &mrt->mfc6_cache_array[it->ct];
if (list_empty(it->cache))
continue;
return list_first_entry(it->cache, struct mfc6_cache, list);
}
/* exhausted cache_array, show unresolved */
read_unlock(&mrt_lock);
it->cache = &mrt->mfc6_unres_queue;
it->ct = 0;
spin_lock_bh(&mfc_unres_lock);
if (!list_empty(it->cache))
return list_first_entry(it->cache, struct mfc6_cache, list);
end_of_list:
spin_unlock_bh(&mfc_unres_lock);
it->cache = NULL;
return NULL;
}
static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
{
struct ipmr_mfc_iter *it = seq->private;
struct mr6_table *mrt = it->mrt;
if (it->cache == &mrt->mfc6_unres_queue)
spin_unlock_bh(&mfc_unres_lock);
else if (it->cache == &mrt->mfc6_cache_array[it->ct])
read_unlock(&mrt_lock);
}
static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
{
int n;
if (v == SEQ_START_TOKEN) {
seq_puts(seq,
"Group "
"Origin "
"Iif Pkts Bytes Wrong Oifs\n");
} else {
const struct mfc6_cache *mfc = v;
const struct ipmr_mfc_iter *it = seq->private;
struct mr6_table *mrt = it->mrt;
seq_printf(seq, "%pI6 %pI6 %-3hd",
&mfc->mf6c_mcastgrp, &mfc->mf6c_origin,
mfc->mf6c_parent);
if (it->cache != &mrt->mfc6_unres_queue) {
seq_printf(seq, " %8lu %8lu %8lu",
mfc->mfc_un.res.pkt,
mfc->mfc_un.res.bytes,
mfc->mfc_un.res.wrong_if);
for (n = mfc->mfc_un.res.minvif;
n < mfc->mfc_un.res.maxvif; n++) {
if (MIF_EXISTS(mrt, n) &&
mfc->mfc_un.res.ttls[n] < 255)
seq_printf(seq,
" %2d:%-3d",
n, mfc->mfc_un.res.ttls[n]);
}
} else {
/* unresolved mfc_caches don't contain
* pkt, bytes and wrong_if values
*/
seq_printf(seq, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
}
seq_putc(seq, '\n');
}
return 0;
}
static const struct seq_operations ipmr_mfc_seq_ops = {
.start = ipmr_mfc_seq_start,
.next = ipmr_mfc_seq_next,
.stop = ipmr_mfc_seq_stop,
.show = ipmr_mfc_seq_show,
};
static int ipmr_mfc_open(struct inode *inode, struct file *file)
{
return seq_open_net(inode, file, &ipmr_mfc_seq_ops,
sizeof(struct ipmr_mfc_iter));
}
static const struct file_operations ip6mr_mfc_fops = {
.owner = THIS_MODULE,
.open = ipmr_mfc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_net,
};
#endif
#ifdef CONFIG_IPV6_PIMSM_V2
static int pim6_rcv(struct sk_buff *skb)
{
struct pimreghdr *pim;
struct ipv6hdr *encap;
struct net_device *reg_dev = NULL;
struct net *net = dev_net(skb->dev);
struct mr6_table *mrt;
struct flowi6 fl6 = {
.flowi6_iif = skb->dev->ifindex,
.flowi6_mark = skb->mark,
};
int reg_vif_num;
if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap)))
goto drop;
pim = (struct pimreghdr *)skb_transport_header(skb);
if (pim->type != ((PIM_VERSION << 4) | PIM_TYPE_REGISTER) ||
(pim->flags & PIM_NULL_REGISTER) ||
(csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
sizeof(*pim), IPPROTO_PIM,
csum_partial((void *)pim, sizeof(*pim), 0)) &&
csum_fold(skb_checksum(skb, 0, skb->len, 0))))
goto drop;
/* check if the inner packet is destined to mcast group */
encap = (struct ipv6hdr *)(skb_transport_header(skb) +
sizeof(*pim));
if (!ipv6_addr_is_multicast(&encap->daddr) ||
encap->payload_len == 0 ||
ntohs(encap->payload_len) + sizeof(*pim) > skb->len)
goto drop;
if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
goto drop;
reg_vif_num = mrt->mroute_reg_vif_num;
read_lock(&mrt_lock);
if (reg_vif_num >= 0)
reg_dev = mrt->vif6_table[reg_vif_num].dev;
if (reg_dev)
dev_hold(reg_dev);
read_unlock(&mrt_lock);
if (!reg_dev)
goto drop;
skb->mac_header = skb->network_header;
skb_pull(skb, (u8 *)encap - skb->data);
skb_reset_network_header(skb);
skb->protocol = htons(ETH_P_IPV6);
skb->ip_summed = CHECKSUM_NONE;
skb_tunnel_rx(skb, reg_dev, dev_net(reg_dev));
netif_rx(skb);
dev_put(reg_dev);
return 0;
drop:
kfree_skb(skb);
return 0;
}
static const struct inet6_protocol pim6_protocol = {
.handler = pim6_rcv,
};
/* Service routines creating virtual interfaces: PIMREG */
static netdev_tx_t reg_vif_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct net *net = dev_net(dev);
struct mr6_table *mrt;
struct flowi6 fl6 = {
.flowi6_oif = dev->ifindex,
.flowi6_iif = skb->skb_iif ? : LOOPBACK_IFINDEX,
.flowi6_mark = skb->mark,
};
int err;
err = ip6mr_fib_lookup(net, &fl6, &mrt);
if (err < 0) {
kfree_skb(skb);
return err;
}
read_lock(&mrt_lock);
dev->stats.tx_bytes += skb->len;
dev->stats.tx_packets++;
ip6mr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, MRT6MSG_WHOLEPKT);
read_unlock(&mrt_lock);
kfree_skb(skb);
return NETDEV_TX_OK;
}
static int reg_vif_get_iflink(const struct net_device *dev)
{
return 0;
}
static const struct net_device_ops reg_vif_netdev_ops = {
.ndo_start_xmit = reg_vif_xmit,
.ndo_get_iflink = reg_vif_get_iflink,
};
static void reg_vif_setup(struct net_device *dev)
{
dev->type = ARPHRD_PIMREG;
dev->mtu = 1500 - sizeof(struct ipv6hdr) - 8;
dev->flags = IFF_NOARP;
dev->netdev_ops = ®_vif_netdev_ops;
dev->destructor = free_netdev;
dev->features |= NETIF_F_NETNS_LOCAL;
}
static struct net_device *ip6mr_reg_vif(struct net *net, struct mr6_table *mrt)
{
struct net_device *dev;
char name[IFNAMSIZ];
if (mrt->id == RT6_TABLE_DFLT)
sprintf(name, "pim6reg");
else
sprintf(name, "pim6reg%u", mrt->id);
dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, reg_vif_setup);
if (!dev)
return NULL;
dev_net_set(dev, net);
if (register_netdevice(dev)) {
free_netdev(dev);
return NULL;
}
if (dev_open(dev))
goto failure;
dev_hold(dev);
return dev;
failure:
unregister_netdevice(dev);
return NULL;
}
#endif
/*
* Delete a VIF entry
*/
static int mif6_delete(struct mr6_table *mrt, int vifi, struct list_head *head)
{
struct mif_device *v;
struct net_device *dev;
struct inet6_dev *in6_dev;
if (vifi < 0 || vifi >= mrt->maxvif)
return -EADDRNOTAVAIL;
v = &mrt->vif6_table[vifi];
write_lock_bh(&mrt_lock);
dev = v->dev;
v->dev = NULL;
if (!dev) {
write_unlock_bh(&mrt_lock);
return -EADDRNOTAVAIL;
}
#ifdef CONFIG_IPV6_PIMSM_V2
if (vifi == mrt->mroute_reg_vif_num)
mrt->mroute_reg_vif_num = -1;
#endif
if (vifi + 1 == mrt->maxvif) {
int tmp;
for (tmp = vifi - 1; tmp >= 0; tmp--) {
if (MIF_EXISTS(mrt, tmp))
break;
}
mrt->maxvif = tmp + 1;
}
write_unlock_bh(&mrt_lock);
dev_set_allmulti(dev, -1);
in6_dev = __in6_dev_get(dev);
if (in6_dev) {
in6_dev->cnf.mc_forwarding--;
inet6_netconf_notify_devconf(dev_net(dev),
NETCONFA_MC_FORWARDING,
dev->ifindex, &in6_dev->cnf);
}
if (v->flags & MIFF_REGISTER)
unregister_netdevice_queue(dev, head);
dev_put(dev);
return 0;
}
static inline void ip6mr_cache_free(struct mfc6_cache *c)
{
kmem_cache_free(mrt_cachep, c);
}
/* Destroy an unresolved cache entry, killing queued skbs
and reporting error to netlink readers.
*/
static void ip6mr_destroy_unres(struct mr6_table *mrt, struct mfc6_cache *c)
{
struct net *net = read_pnet(&mrt->net);
struct sk_buff *skb;
atomic_dec(&mrt->cache_resolve_queue_len);
while ((skb = skb_dequeue(&c->mfc_un.unres.unresolved)) != NULL) {
if (ipv6_hdr(skb)->version == 0) {
struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct ipv6hdr));
nlh->nlmsg_type = NLMSG_ERROR;
nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
skb_trim(skb, nlh->nlmsg_len);
((struct nlmsgerr *)nlmsg_data(nlh))->error = -ETIMEDOUT;
rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
} else
kfree_skb(skb);
}
ip6mr_cache_free(c);
}
/* Timer process for all the unresolved queue. */
static void ipmr_do_expire_process(struct mr6_table *mrt)
{
unsigned long now = jiffies;
unsigned long expires = 10 * HZ;
struct mfc6_cache *c, *next;
list_for_each_entry_safe(c, next, &mrt->mfc6_unres_queue, list) {
if (time_after(c->mfc_un.unres.expires, now)) {
/* not yet... */
unsigned long interval = c->mfc_un.unres.expires - now;
if (interval < expires)
expires = interval;
continue;
}
list_del(&c->list);
mr6_netlink_event(mrt, c, RTM_DELROUTE);
ip6mr_destroy_unres(mrt, c);
}
if (!list_empty(&mrt->mfc6_unres_queue))
mod_timer(&mrt->ipmr_expire_timer, jiffies + expires);
}
static void ipmr_expire_process(unsigned long arg)
{
struct mr6_table *mrt = (struct mr6_table *)arg;
if (!spin_trylock(&mfc_unres_lock)) {
mod_timer(&mrt->ipmr_expire_timer, jiffies + 1);
return;
}
if (!list_empty(&mrt->mfc6_unres_queue))
ipmr_do_expire_process(mrt);
spin_unlock(&mfc_unres_lock);
}
/* Fill oifs list. It is called under write locked mrt_lock. */
static void ip6mr_update_thresholds(struct mr6_table *mrt, struct mfc6_cache *cache,
unsigned char *ttls)
{
int vifi;
cache->mfc_un.res.minvif = MAXMIFS;
cache->mfc_un.res.maxvif = 0;
memset(cache->mfc_un.res.ttls, 255, MAXMIFS);
for (vifi = 0; vifi < mrt->maxvif; vifi++) {
if (MIF_EXISTS(mrt, vifi) &&
ttls[vifi] && ttls[vifi] < 255) {
cache->mfc_un.res.ttls[vifi] = ttls[vifi];
if (cache->mfc_un.res.minvif > vifi)
cache->mfc_un.res.minvif = vifi;
if (cache->mfc_un.res.maxvif <= vifi)
cache->mfc_un.res.maxvif = vifi + 1;
}
}
cache->mfc_un.res.lastuse = jiffies;
}
static int mif6_add(struct net *net, struct mr6_table *mrt,
struct mif6ctl *vifc, int mrtsock)
{
int vifi = vifc->mif6c_mifi;
struct mif_device *v = &mrt->vif6_table[vifi];
struct net_device *dev;
struct inet6_dev *in6_dev;
int err;
/* Is vif busy ? */
if (MIF_EXISTS(mrt, vifi))
return -EADDRINUSE;
switch (vifc->mif6c_flags) {
#ifdef CONFIG_IPV6_PIMSM_V2
case MIFF_REGISTER:
/*
* Special Purpose VIF in PIM
* All the packets will be sent to the daemon
*/
if (mrt->mroute_reg_vif_num >= 0)
return -EADDRINUSE;
dev = ip6mr_reg_vif(net, mrt);
if (!dev)
return -ENOBUFS;
err = dev_set_allmulti(dev, 1);
if (err) {
unregister_netdevice(dev);
dev_put(dev);
return err;
}
break;
#endif
case 0:
dev = dev_get_by_index(net, vifc->mif6c_pifi);
if (!dev)
return -EADDRNOTAVAIL;
err = dev_set_allmulti(dev, 1);
if (err) {
dev_put(dev);
return err;
}
break;
default:
return -EINVAL;
}
in6_dev = __in6_dev_get(dev);
if (in6_dev) {
in6_dev->cnf.mc_forwarding++;
inet6_netconf_notify_devconf(dev_net(dev),
NETCONFA_MC_FORWARDING,
dev->ifindex, &in6_dev->cnf);
}
/*
* Fill in the VIF structures
*/
v->rate_limit = vifc->vifc_rate_limit;
v->flags = vifc->mif6c_flags;
if (!mrtsock)
v->flags |= VIFF_STATIC;
v->threshold = vifc->vifc_threshold;
v->bytes_in = 0;
v->bytes_out = 0;
v->pkt_in = 0;
v->pkt_out = 0;
v->link = dev->ifindex;
if (v->flags & MIFF_REGISTER)
v->link = dev_get_iflink(dev);
/* And finish update writing critical data */
write_lock_bh(&mrt_lock);
v->dev = dev;
#ifdef CONFIG_IPV6_PIMSM_V2
if (v->flags & MIFF_REGISTER)
mrt->mroute_reg_vif_num = vifi;
#endif
if (vifi + 1 > mrt->maxvif)
mrt->maxvif = vifi + 1;
write_unlock_bh(&mrt_lock);
return 0;
}
static struct mfc6_cache *ip6mr_cache_find(struct mr6_table *mrt,
const struct in6_addr *origin,
const struct in6_addr *mcastgrp)
{
int line = MFC6_HASH(mcastgrp, origin);
struct mfc6_cache *c;
list_for_each_entry(c, &mrt->mfc6_cache_array[line], list) {
if (ipv6_addr_equal(&c->mf6c_origin, origin) &&
ipv6_addr_equal(&c->mf6c_mcastgrp, mcastgrp))
return c;
}
return NULL;
}
/* Look for a (*,*,oif) entry */
static struct mfc6_cache *ip6mr_cache_find_any_parent(struct mr6_table *mrt,
mifi_t mifi)
{
int line = MFC6_HASH(&in6addr_any, &in6addr_any);
struct mfc6_cache *c;
list_for_each_entry(c, &mrt->mfc6_cache_array[line], list)
if (ipv6_addr_any(&c->mf6c_origin) &&
ipv6_addr_any(&c->mf6c_mcastgrp) &&
(c->mfc_un.res.ttls[mifi] < 255))
return c;
return NULL;
}
/* Look for a (*,G) entry */
static struct mfc6_cache *ip6mr_cache_find_any(struct mr6_table *mrt,
struct in6_addr *mcastgrp,
mifi_t mifi)
{
int line = MFC6_HASH(mcastgrp, &in6addr_any);
struct mfc6_cache *c, *proxy;
if (ipv6_addr_any(mcastgrp))
goto skip;
list_for_each_entry(c, &mrt->mfc6_cache_array[line], list)
if (ipv6_addr_any(&c->mf6c_origin) &&
ipv6_addr_equal(&c->mf6c_mcastgrp, mcastgrp)) {
if (c->mfc_un.res.ttls[mifi] < 255)
return c;
/* It's ok if the mifi is part of the static tree */
proxy = ip6mr_cache_find_any_parent(mrt,
c->mf6c_parent);
if (proxy && proxy->mfc_un.res.ttls[mifi] < 255)
return c;
}
skip:
return ip6mr_cache_find_any_parent(mrt, mifi);
}
/*
* Allocate a multicast cache entry
*/
static struct mfc6_cache *ip6mr_cache_alloc(void)
{
struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
if (!c)
return NULL;
c->mfc_un.res.last_assert = jiffies - MFC_ASSERT_THRESH - 1;
c->mfc_un.res.minvif = MAXMIFS;
return c;
}
static struct mfc6_cache *ip6mr_cache_alloc_unres(void)
{
struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
if (!c)
return NULL;
skb_queue_head_init(&c->mfc_un.unres.unresolved);
c->mfc_un.unres.expires = jiffies + 10 * HZ;
return c;
}
/*
* A cache entry has gone into a resolved state from queued
*/
static void ip6mr_cache_resolve(struct net *net, struct mr6_table *mrt,
struct mfc6_cache *uc, struct mfc6_cache *c)
{
struct sk_buff *skb;
/*
* Play the pending entries through our router
*/
while ((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) {
if (ipv6_hdr(skb)->version == 0) {
struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct ipv6hdr));
if (__ip6mr_fill_mroute(mrt, skb, c, nlmsg_data(nlh)) > 0) {
nlh->nlmsg_len = skb_tail_pointer(skb) - (u8 *)nlh;
} else {
nlh->nlmsg_type = NLMSG_ERROR;
nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
skb_trim(skb, nlh->nlmsg_len);
((struct nlmsgerr *)nlmsg_data(nlh))->error = -EMSGSIZE;
}
rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
} else
ip6_mr_forward(net, mrt, skb, c);
}
}
/*
* Bounce a cache query up to pim6sd. We could use netlink for this but pim6sd
* expects the following bizarre scheme.
*
* Called under mrt_lock.
*/
static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt,
mifi_t mifi, int assert)
{
struct sk_buff *skb;
struct mrt6msg *msg;
int ret;
#ifdef CONFIG_IPV6_PIMSM_V2
if (assert == MRT6MSG_WHOLEPKT)
skb = skb_realloc_headroom(pkt, -skb_network_offset(pkt)
+sizeof(*msg));
else
#endif
skb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(*msg), GFP_ATOMIC);
if (!skb)
return -ENOBUFS;
/* I suppose that internal messages
* do not require checksums */
skb->ip_summed = CHECKSUM_UNNECESSARY;
#ifdef CONFIG_IPV6_PIMSM_V2
if (assert == MRT6MSG_WHOLEPKT) {
/* Ugly, but we have no choice with this interface.
Duplicate old header, fix length etc.
And all this only to mangle msg->im6_msgtype and
to set msg->im6_mbz to "mbz" :-)
*/
skb_push(skb, -skb_network_offset(pkt));
skb_push(skb, sizeof(*msg));
skb_reset_transport_header(skb);
msg = (struct mrt6msg *)skb_transport_header(skb);
msg->im6_mbz = 0;
msg->im6_msgtype = MRT6MSG_WHOLEPKT;
msg->im6_mif = mrt->mroute_reg_vif_num;
msg->im6_pad = 0;
msg->im6_src = ipv6_hdr(pkt)->saddr;
msg->im6_dst = ipv6_hdr(pkt)->daddr;
skb->ip_summed = CHECKSUM_UNNECESSARY;
} else
#endif
{
/*
* Copy the IP header
*/
skb_put(skb, sizeof(struct ipv6hdr));
skb_reset_network_header(skb);
skb_copy_to_linear_data(skb, ipv6_hdr(pkt), sizeof(struct ipv6hdr));
/*
* Add our header
*/
skb_put(skb, sizeof(*msg));
skb_reset_transport_header(skb);
msg = (struct mrt6msg *)skb_transport_header(skb);
msg->im6_mbz = 0;
msg->im6_msgtype = assert;
msg->im6_mif = mifi;
msg->im6_pad = 0;
msg->im6_src = ipv6_hdr(pkt)->saddr;
msg->im6_dst = ipv6_hdr(pkt)->daddr;
skb_dst_set(skb, dst_clone(skb_dst(pkt)));
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
if (!mrt->mroute6_sk) {
kfree_skb(skb);
return -EINVAL;
}
/*
* Deliver to user space multicast routing algorithms
*/
ret = sock_queue_rcv_skb(mrt->mroute6_sk, skb);
if (ret < 0) {
net_warn_ratelimited("mroute6: pending queue full, dropping entries\n");
kfree_skb(skb);
}
return ret;
}
/*
* Queue a packet for resolution. It gets locked cache entry!
*/
static int
ip6mr_cache_unresolved(struct mr6_table *mrt, mifi_t mifi, struct sk_buff *skb)
{
bool found = false;
int err;
struct mfc6_cache *c;
spin_lock_bh(&mfc_unres_lock);
list_for_each_entry(c, &mrt->mfc6_unres_queue, list) {
if (ipv6_addr_equal(&c->mf6c_mcastgrp, &ipv6_hdr(skb)->daddr) &&
ipv6_addr_equal(&c->mf6c_origin, &ipv6_hdr(skb)->saddr)) {
found = true;
break;
}
}
if (!found) {
/*
* Create a new entry if allowable
*/
if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 ||
(c = ip6mr_cache_alloc_unres()) == NULL) {
spin_unlock_bh(&mfc_unres_lock);
kfree_skb(skb);
return -ENOBUFS;
}
/*
* Fill in the new cache entry
*/
c->mf6c_parent = -1;
c->mf6c_origin = ipv6_hdr(skb)->saddr;
c->mf6c_mcastgrp = ipv6_hdr(skb)->daddr;
/*
* Reflect first query at pim6sd
*/
err = ip6mr_cache_report(mrt, skb, mifi, MRT6MSG_NOCACHE);
if (err < 0) {
/* If the report failed throw the cache entry
out - Brad Parker
*/
spin_unlock_bh(&mfc_unres_lock);
ip6mr_cache_free(c);
kfree_skb(skb);
return err;
}
atomic_inc(&mrt->cache_resolve_queue_len);
list_add(&c->list, &mrt->mfc6_unres_queue);
mr6_netlink_event(mrt, c, RTM_NEWROUTE);
ipmr_do_expire_process(mrt);
}
/*
* See if we can append the packet
*/
if (c->mfc_un.unres.unresolved.qlen > 3) {
kfree_skb(skb);
err = -ENOBUFS;
} else {
skb_queue_tail(&c->mfc_un.unres.unresolved, skb);
err = 0;
}
spin_unlock_bh(&mfc_unres_lock);
return err;
}
/*
* MFC6 cache manipulation by user space
*/
static int ip6mr_mfc_delete(struct mr6_table *mrt, struct mf6cctl *mfc,
int parent)
{
int line;
struct mfc6_cache *c, *next;
line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr);
list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[line], list) {
if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) &&
ipv6_addr_equal(&c->mf6c_mcastgrp,
&mfc->mf6cc_mcastgrp.sin6_addr) &&
(parent == -1 || parent == c->mf6c_parent)) {
write_lock_bh(&mrt_lock);
list_del(&c->list);
write_unlock_bh(&mrt_lock);
mr6_netlink_event(mrt, c, RTM_DELROUTE);
ip6mr_cache_free(c);
return 0;
}
}
return -ENOENT;
}
static int ip6mr_device_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct net *net = dev_net(dev);
struct mr6_table *mrt;
struct mif_device *v;
int ct;
LIST_HEAD(list);
if (event != NETDEV_UNREGISTER)
return NOTIFY_DONE;
ip6mr_for_each_table(mrt, net) {
v = &mrt->vif6_table[0];
for (ct = 0; ct < mrt->maxvif; ct++, v++) {
if (v->dev == dev)
mif6_delete(mrt, ct, &list);
}
}
unregister_netdevice_many(&list);
return NOTIFY_DONE;
}
static struct notifier_block ip6_mr_notifier = {
.notifier_call = ip6mr_device_event
};
/*
* Setup for IP multicast routing
*/
static int __net_init ip6mr_net_init(struct net *net)
{
int err;
err = ip6mr_rules_init(net);
if (err < 0)
goto fail;
#ifdef CONFIG_PROC_FS
err = -ENOMEM;
if (!proc_create("ip6_mr_vif", 0, net->proc_net, &ip6mr_vif_fops))
goto proc_vif_fail;
if (!proc_create("ip6_mr_cache", 0, net->proc_net, &ip6mr_mfc_fops))
goto proc_cache_fail;
#endif
return 0;
#ifdef CONFIG_PROC_FS
proc_cache_fail:
remove_proc_entry("ip6_mr_vif", net->proc_net);
proc_vif_fail:
ip6mr_rules_exit(net);
#endif
fail:
return err;
}
static void __net_exit ip6mr_net_exit(struct net *net)
{
#ifdef CONFIG_PROC_FS
remove_proc_entry("ip6_mr_cache", net->proc_net);
remove_proc_entry("ip6_mr_vif", net->proc_net);
#endif
ip6mr_rules_exit(net);
}
static struct pernet_operations ip6mr_net_ops = {
.init = ip6mr_net_init,
.exit = ip6mr_net_exit,
};
int __init ip6_mr_init(void)
{
int err;
mrt_cachep = kmem_cache_create("ip6_mrt_cache",
sizeof(struct mfc6_cache),
0, SLAB_HWCACHE_ALIGN,
NULL);
if (!mrt_cachep)
return -ENOMEM;
err = register_pernet_subsys(&ip6mr_net_ops);
if (err)
goto reg_pernet_fail;
err = register_netdevice_notifier(&ip6_mr_notifier);
if (err)
goto reg_notif_fail;
#ifdef CONFIG_IPV6_PIMSM_V2
if (inet6_add_protocol(&pim6_protocol, IPPROTO_PIM) < 0) {
pr_err("%s: can't add PIM protocol\n", __func__);
err = -EAGAIN;
goto add_proto_fail;
}
#endif
rtnl_register(RTNL_FAMILY_IP6MR, RTM_GETROUTE, NULL,
ip6mr_rtm_dumproute, NULL);
return 0;
#ifdef CONFIG_IPV6_PIMSM_V2
add_proto_fail:
unregister_netdevice_notifier(&ip6_mr_notifier);
#endif
reg_notif_fail:
unregister_pernet_subsys(&ip6mr_net_ops);
reg_pernet_fail:
kmem_cache_destroy(mrt_cachep);
return err;
}
void ip6_mr_cleanup(void)
{
rtnl_unregister(RTNL_FAMILY_IP6MR, RTM_GETROUTE);
#ifdef CONFIG_IPV6_PIMSM_V2
inet6_del_protocol(&pim6_protocol, IPPROTO_PIM);
#endif
unregister_netdevice_notifier(&ip6_mr_notifier);
unregister_pernet_subsys(&ip6mr_net_ops);
kmem_cache_destroy(mrt_cachep);
}
static int ip6mr_mfc_add(struct net *net, struct mr6_table *mrt,
struct mf6cctl *mfc, int mrtsock, int parent)
{
bool found = false;
int line;
struct mfc6_cache *uc, *c;
unsigned char ttls[MAXMIFS];
int i;
if (mfc->mf6cc_parent >= MAXMIFS)
return -ENFILE;
memset(ttls, 255, MAXMIFS);
for (i = 0; i < MAXMIFS; i++) {
if (IF_ISSET(i, &mfc->mf6cc_ifset))
ttls[i] = 1;
}
line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr);
list_for_each_entry(c, &mrt->mfc6_cache_array[line], list) {
if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) &&
ipv6_addr_equal(&c->mf6c_mcastgrp,
&mfc->mf6cc_mcastgrp.sin6_addr) &&
(parent == -1 || parent == mfc->mf6cc_parent)) {
found = true;
break;
}
}
if (found) {
write_lock_bh(&mrt_lock);
c->mf6c_parent = mfc->mf6cc_parent;
ip6mr_update_thresholds(mrt, c, ttls);
if (!mrtsock)
c->mfc_flags |= MFC_STATIC;
write_unlock_bh(&mrt_lock);
mr6_netlink_event(mrt, c, RTM_NEWROUTE);
return 0;
}
if (!ipv6_addr_any(&mfc->mf6cc_mcastgrp.sin6_addr) &&
!ipv6_addr_is_multicast(&mfc->mf6cc_mcastgrp.sin6_addr))
return -EINVAL;
c = ip6mr_cache_alloc();
if (!c)
return -ENOMEM;
c->mf6c_origin = mfc->mf6cc_origin.sin6_addr;
c->mf6c_mcastgrp = mfc->mf6cc_mcastgrp.sin6_addr;
c->mf6c_parent = mfc->mf6cc_parent;
ip6mr_update_thresholds(mrt, c, ttls);
if (!mrtsock)
c->mfc_flags |= MFC_STATIC;
write_lock_bh(&mrt_lock);
list_add(&c->list, &mrt->mfc6_cache_array[line]);
write_unlock_bh(&mrt_lock);
/*
* Check to see if we resolved a queued list. If so we
* need to send on the frames and tidy up.
*/
found = false;
spin_lock_bh(&mfc_unres_lock);
list_for_each_entry(uc, &mrt->mfc6_unres_queue, list) {
if (ipv6_addr_equal(&uc->mf6c_origin, &c->mf6c_origin) &&
ipv6_addr_equal(&uc->mf6c_mcastgrp, &c->mf6c_mcastgrp)) {
list_del(&uc->list);
atomic_dec(&mrt->cache_resolve_queue_len);
found = true;
break;
}
}
if (list_empty(&mrt->mfc6_unres_queue))
del_timer(&mrt->ipmr_expire_timer);
spin_unlock_bh(&mfc_unres_lock);
if (found) {
ip6mr_cache_resolve(net, mrt, uc, c);
ip6mr_cache_free(uc);
}
mr6_netlink_event(mrt, c, RTM_NEWROUTE);
return 0;
}
/*
* Close the multicast socket, and clear the vif tables etc
*/
static void mroute_clean_tables(struct mr6_table *mrt, bool all)
{
int i;
LIST_HEAD(list);
struct mfc6_cache *c, *next;
/*
* Shut down all active vif entries
*/
for (i = 0; i < mrt->maxvif; i++) {
if (!all && (mrt->vif6_table[i].flags & VIFF_STATIC))
continue;
mif6_delete(mrt, i, &list);
}
unregister_netdevice_many(&list);
/*
* Wipe the cache
*/
for (i = 0; i < MFC6_LINES; i++) {
list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[i], list) {
if (!all && (c->mfc_flags & MFC_STATIC))
continue;
write_lock_bh(&mrt_lock);
list_del(&c->list);
write_unlock_bh(&mrt_lock);
mr6_netlink_event(mrt, c, RTM_DELROUTE);
ip6mr_cache_free(c);
}
}
if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
spin_lock_bh(&mfc_unres_lock);
list_for_each_entry_safe(c, next, &mrt->mfc6_unres_queue, list) {
list_del(&c->list);
mr6_netlink_event(mrt, c, RTM_DELROUTE);
ip6mr_destroy_unres(mrt, c);
}
spin_unlock_bh(&mfc_unres_lock);
}
}
static int ip6mr_sk_init(struct mr6_table *mrt, struct sock *sk)
{
int err = 0;
struct net *net = sock_net(sk);
rtnl_lock();
write_lock_bh(&mrt_lock);
if (likely(mrt->mroute6_sk == NULL)) {
mrt->mroute6_sk = sk;
net->ipv6.devconf_all->mc_forwarding++;
} else {
err = -EADDRINUSE;
}
write_unlock_bh(&mrt_lock);
if (!err)
inet6_netconf_notify_devconf(net, NETCONFA_MC_FORWARDING,
NETCONFA_IFINDEX_ALL,
net->ipv6.devconf_all);
rtnl_unlock();
return err;
}
int ip6mr_sk_done(struct sock *sk)
{
int err = -EACCES;
struct net *net = sock_net(sk);
struct mr6_table *mrt;
rtnl_lock();
ip6mr_for_each_table(mrt, net) {
if (sk == mrt->mroute6_sk) {
write_lock_bh(&mrt_lock);
mrt->mroute6_sk = NULL;
net->ipv6.devconf_all->mc_forwarding--;
write_unlock_bh(&mrt_lock);
inet6_netconf_notify_devconf(net,
NETCONFA_MC_FORWARDING,
NETCONFA_IFINDEX_ALL,
net->ipv6.devconf_all);
mroute_clean_tables(mrt, false);
err = 0;
break;
}
}
rtnl_unlock();
return err;
}
struct sock *mroute6_socket(struct net *net, struct sk_buff *skb)
{
struct mr6_table *mrt;
struct flowi6 fl6 = {
.flowi6_iif = skb->skb_iif ? : LOOPBACK_IFINDEX,
.flowi6_oif = skb->dev->ifindex,
.flowi6_mark = skb->mark,
};
if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
return NULL;
return mrt->mroute6_sk;
}
/*
* Socket options and virtual interface manipulation. The whole
* virtual interface system is a complete heap, but unfortunately
* that's how BSD mrouted happens to think. Maybe one day with a proper
* MOSPF/PIM router set up we can clean this up.
*/
int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsigned int optlen)
{
int ret, parent = 0;
struct mif6ctl vif;
struct mf6cctl mfc;
mifi_t mifi;
struct net *net = sock_net(sk);
struct mr6_table *mrt;
mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
if (!mrt)
return -ENOENT;
if (optname != MRT6_INIT) {
if (sk != mrt->mroute6_sk && !ns_capable(net->user_ns, CAP_NET_ADMIN))
return -EACCES;
}
switch (optname) {
case MRT6_INIT:
if (sk->sk_type != SOCK_RAW ||
inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
return -EOPNOTSUPP;
if (optlen < sizeof(int))
return -EINVAL;
return ip6mr_sk_init(mrt, sk);
case MRT6_DONE:
return ip6mr_sk_done(sk);
case MRT6_ADD_MIF:
if (optlen < sizeof(vif))
return -EINVAL;
if (copy_from_user(&vif, optval, sizeof(vif)))
return -EFAULT;
if (vif.mif6c_mifi >= MAXMIFS)
return -ENFILE;
rtnl_lock();
ret = mif6_add(net, mrt, &vif, sk == mrt->mroute6_sk);
rtnl_unlock();
return ret;
case MRT6_DEL_MIF:
if (optlen < sizeof(mifi_t))
return -EINVAL;
if (copy_from_user(&mifi, optval, sizeof(mifi_t)))
return -EFAULT;
rtnl_lock();
ret = mif6_delete(mrt, mifi, NULL);
rtnl_unlock();
return ret;
/*
* Manipulate the forwarding caches. These live
* in a sort of kernel/user symbiosis.
*/
case MRT6_ADD_MFC:
case MRT6_DEL_MFC:
parent = -1;
case MRT6_ADD_MFC_PROXY:
case MRT6_DEL_MFC_PROXY:
if (optlen < sizeof(mfc))
return -EINVAL;
if (copy_from_user(&mfc, optval, sizeof(mfc)))
return -EFAULT;
if (parent == 0)
parent = mfc.mf6cc_parent;
rtnl_lock();
if (optname == MRT6_DEL_MFC || optname == MRT6_DEL_MFC_PROXY)
ret = ip6mr_mfc_delete(mrt, &mfc, parent);
else
ret = ip6mr_mfc_add(net, mrt, &mfc,
sk == mrt->mroute6_sk, parent);
rtnl_unlock();
return ret;
/*
* Control PIM assert (to activate pim will activate assert)
*/
case MRT6_ASSERT:
{
int v;
if (optlen != sizeof(v))
return -EINVAL;
if (get_user(v, (int __user *)optval))
return -EFAULT;
mrt->mroute_do_assert = v;
return 0;
}
#ifdef CONFIG_IPV6_PIMSM_V2
case MRT6_PIM:
{
int v;
if (optlen != sizeof(v))
return -EINVAL;
if (get_user(v, (int __user *)optval))
return -EFAULT;
v = !!v;
rtnl_lock();
ret = 0;
if (v != mrt->mroute_do_pim) {
mrt->mroute_do_pim = v;
mrt->mroute_do_assert = v;
}
rtnl_unlock();
return ret;
}
#endif
#ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
case MRT6_TABLE:
{
u32 v;
if (optlen != sizeof(u32))
return -EINVAL;
if (get_user(v, (u32 __user *)optval))
return -EFAULT;
/* "pim6reg%u" should not exceed 16 bytes (IFNAMSIZ) */
if (v != RT_TABLE_DEFAULT && v >= 100000000)
return -EINVAL;
if (sk == mrt->mroute6_sk)
return -EBUSY;
rtnl_lock();
ret = 0;
if (!ip6mr_new_table(net, v))
ret = -ENOMEM;
raw6_sk(sk)->ip6mr_table = v;
rtnl_unlock();
return ret;
}
#endif
/*
* Spurious command, or MRT6_VERSION which you cannot
* set.
*/
default:
return -ENOPROTOOPT;
}
}
/*
* Getsock opt support for the multicast routing system.
*/
int ip6_mroute_getsockopt(struct sock *sk, int optname, char __user *optval,
int __user *optlen)
{
int olr;
int val;
struct net *net = sock_net(sk);
struct mr6_table *mrt;
mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
if (!mrt)
return -ENOENT;
switch (optname) {
case MRT6_VERSION:
val = 0x0305;
break;
#ifdef CONFIG_IPV6_PIMSM_V2
case MRT6_PIM:
val = mrt->mroute_do_pim;
break;
#endif
case MRT6_ASSERT:
val = mrt->mroute_do_assert;
break;
default:
return -ENOPROTOOPT;
}
if (get_user(olr, optlen))
return -EFAULT;
olr = min_t(int, olr, sizeof(int));
if (olr < 0)
return -EINVAL;
if (put_user(olr, optlen))
return -EFAULT;
if (copy_to_user(optval, &val, olr))
return -EFAULT;
return 0;
}
/*
* The IP multicast ioctl support routines.
*/
int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg)
{
struct sioc_sg_req6 sr;
struct sioc_mif_req6 vr;
struct mif_device *vif;
struct mfc6_cache *c;
struct net *net = sock_net(sk);
struct mr6_table *mrt;
mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
if (!mrt)
return -ENOENT;
switch (cmd) {
case SIOCGETMIFCNT_IN6:
if (copy_from_user(&vr, arg, sizeof(vr)))
return -EFAULT;
if (vr.mifi >= mrt->maxvif)
return -EINVAL;
read_lock(&mrt_lock);
vif = &mrt->vif6_table[vr.mifi];
if (MIF_EXISTS(mrt, vr.mifi)) {
vr.icount = vif->pkt_in;
vr.ocount = vif->pkt_out;
vr.ibytes = vif->bytes_in;
vr.obytes = vif->bytes_out;
read_unlock(&mrt_lock);
if (copy_to_user(arg, &vr, sizeof(vr)))
return -EFAULT;
return 0;
}
read_unlock(&mrt_lock);
return -EADDRNOTAVAIL;
case SIOCGETSGCNT_IN6:
if (copy_from_user(&sr, arg, sizeof(sr)))
return -EFAULT;
read_lock(&mrt_lock);
c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr);
if (c) {
sr.pktcnt = c->mfc_un.res.pkt;
sr.bytecnt = c->mfc_un.res.bytes;
sr.wrong_if = c->mfc_un.res.wrong_if;
read_unlock(&mrt_lock);
if (copy_to_user(arg, &sr, sizeof(sr)))
return -EFAULT;
return 0;
}
read_unlock(&mrt_lock);
return -EADDRNOTAVAIL;
default:
return -ENOIOCTLCMD;
}
}
#ifdef CONFIG_COMPAT
struct compat_sioc_sg_req6 {
struct sockaddr_in6 src;
struct sockaddr_in6 grp;
compat_ulong_t pktcnt;
compat_ulong_t bytecnt;
compat_ulong_t wrong_if;
};
struct compat_sioc_mif_req6 {
mifi_t mifi;
compat_ulong_t icount;
compat_ulong_t ocount;
compat_ulong_t ibytes;
compat_ulong_t obytes;
};
int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
{
struct compat_sioc_sg_req6 sr;
struct compat_sioc_mif_req6 vr;
struct mif_device *vif;
struct mfc6_cache *c;
struct net *net = sock_net(sk);
struct mr6_table *mrt;
mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
if (!mrt)
return -ENOENT;
switch (cmd) {
case SIOCGETMIFCNT_IN6:
if (copy_from_user(&vr, arg, sizeof(vr)))
return -EFAULT;
if (vr.mifi >= mrt->maxvif)
return -EINVAL;
read_lock(&mrt_lock);
vif = &mrt->vif6_table[vr.mifi];
if (MIF_EXISTS(mrt, vr.mifi)) {
vr.icount = vif->pkt_in;
vr.ocount = vif->pkt_out;
vr.ibytes = vif->bytes_in;
vr.obytes = vif->bytes_out;
read_unlock(&mrt_lock);
if (copy_to_user(arg, &vr, sizeof(vr)))
return -EFAULT;
return 0;
}
read_unlock(&mrt_lock);
return -EADDRNOTAVAIL;
case SIOCGETSGCNT_IN6:
if (copy_from_user(&sr, arg, sizeof(sr)))
return -EFAULT;
read_lock(&mrt_lock);
c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr);
if (c) {
sr.pktcnt = c->mfc_un.res.pkt;
sr.bytecnt = c->mfc_un.res.bytes;
sr.wrong_if = c->mfc_un.res.wrong_if;
read_unlock(&mrt_lock);
if (copy_to_user(arg, &sr, sizeof(sr)))
return -EFAULT;
return 0;
}
read_unlock(&mrt_lock);
return -EADDRNOTAVAIL;
default:
return -ENOIOCTLCMD;
}
}
#endif
static inline int ip6mr_forward2_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
{
__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_OUTFORWDATAGRAMS);
__IP6_ADD_STATS(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_OUTOCTETS, skb->len);
return dst_output(net, sk, skb);
}
/*
* Processing handlers for ip6mr_forward
*/
static int ip6mr_forward2(struct net *net, struct mr6_table *mrt,
struct sk_buff *skb, struct mfc6_cache *c, int vifi)
{
struct ipv6hdr *ipv6h;
struct mif_device *vif = &mrt->vif6_table[vifi];
struct net_device *dev;
struct dst_entry *dst;
struct flowi6 fl6;
if (!vif->dev)
goto out_free;
#ifdef CONFIG_IPV6_PIMSM_V2
if (vif->flags & MIFF_REGISTER) {
vif->pkt_out++;
vif->bytes_out += skb->len;
vif->dev->stats.tx_bytes += skb->len;
vif->dev->stats.tx_packets++;
ip6mr_cache_report(mrt, skb, vifi, MRT6MSG_WHOLEPKT);
goto out_free;
}
#endif
ipv6h = ipv6_hdr(skb);
fl6 = (struct flowi6) {
.flowi6_oif = vif->link,
.daddr = ipv6h->daddr,
};
dst = ip6_route_output(net, NULL, &fl6);
if (dst->error) {
dst_release(dst);
goto out_free;
}
skb_dst_drop(skb);
skb_dst_set(skb, dst);
/*
* RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
* not only before forwarding, but after forwarding on all output
* interfaces. It is clear, if mrouter runs a multicasting
* program, it should receive packets not depending to what interface
* program is joined.
* If we will not make it, the program will have to join on all
* interfaces. On the other hand, multihoming host (or router, but
* not mrouter) cannot join to more than one interface - it will
* result in receiving multiple packets.
*/
dev = vif->dev;
skb->dev = dev;
vif->pkt_out++;
vif->bytes_out += skb->len;
/* We are about to write */
/* XXX: extension headers? */
if (skb_cow(skb, sizeof(*ipv6h) + LL_RESERVED_SPACE(dev)))
goto out_free;
ipv6h = ipv6_hdr(skb);
ipv6h->hop_limit--;
IP6CB(skb)->flags |= IP6SKB_FORWARDED;
return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD,
net, NULL, skb, skb->dev, dev,
ip6mr_forward2_finish);
out_free:
kfree_skb(skb);
return 0;
}
static int ip6mr_find_vif(struct mr6_table *mrt, struct net_device *dev)
{
int ct;
for (ct = mrt->maxvif - 1; ct >= 0; ct--) {
if (mrt->vif6_table[ct].dev == dev)
break;
}
return ct;
}
static void ip6_mr_forward(struct net *net, struct mr6_table *mrt,
struct sk_buff *skb, struct mfc6_cache *cache)
{
int psend = -1;
int vif, ct;
int true_vifi = ip6mr_find_vif(mrt, skb->dev);
vif = cache->mf6c_parent;
cache->mfc_un.res.pkt++;
cache->mfc_un.res.bytes += skb->len;
cache->mfc_un.res.lastuse = jiffies;
if (ipv6_addr_any(&cache->mf6c_origin) && true_vifi >= 0) {
struct mfc6_cache *cache_proxy;
/* For an (*,G) entry, we only check that the incoming
* interface is part of the static tree.
*/
cache_proxy = ip6mr_cache_find_any_parent(mrt, vif);
if (cache_proxy &&
cache_proxy->mfc_un.res.ttls[true_vifi] < 255)
goto forward;
}
/*
* Wrong interface: drop packet and (maybe) send PIM assert.
*/
if (mrt->vif6_table[vif].dev != skb->dev) {
cache->mfc_un.res.wrong_if++;
if (true_vifi >= 0 && mrt->mroute_do_assert &&
/* pimsm uses asserts, when switching from RPT to SPT,
so that we cannot check that packet arrived on an oif.
It is bad, but otherwise we would need to move pretty
large chunk of pimd to kernel. Ough... --ANK
*/
(mrt->mroute_do_pim ||
cache->mfc_un.res.ttls[true_vifi] < 255) &&
time_after(jiffies,
cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) {
cache->mfc_un.res.last_assert = jiffies;
ip6mr_cache_report(mrt, skb, true_vifi, MRT6MSG_WRONGMIF);
}
goto dont_forward;
}
forward:
mrt->vif6_table[vif].pkt_in++;
mrt->vif6_table[vif].bytes_in += skb->len;
/*
* Forward the frame
*/
if (ipv6_addr_any(&cache->mf6c_origin) &&
ipv6_addr_any(&cache->mf6c_mcastgrp)) {
if (true_vifi >= 0 &&
true_vifi != cache->mf6c_parent &&
ipv6_hdr(skb)->hop_limit >
cache->mfc_un.res.ttls[cache->mf6c_parent]) {
/* It's an (*,*) entry and the packet is not coming from
* the upstream: forward the packet to the upstream
* only.
*/
psend = cache->mf6c_parent;
goto last_forward;
}
goto dont_forward;
}
for (ct = cache->mfc_un.res.maxvif - 1; ct >= cache->mfc_un.res.minvif; ct--) {
/* For (*,G) entry, don't forward to the incoming interface */
if ((!ipv6_addr_any(&cache->mf6c_origin) || ct != true_vifi) &&
ipv6_hdr(skb)->hop_limit > cache->mfc_un.res.ttls[ct]) {
if (psend != -1) {
struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
if (skb2)
ip6mr_forward2(net, mrt, skb2, cache, psend);
}
psend = ct;
}
}
last_forward:
if (psend != -1) {
ip6mr_forward2(net, mrt, skb, cache, psend);
return;
}
dont_forward:
kfree_skb(skb);
}
/*
* Multicast packets for forwarding arrive here
*/
int ip6_mr_input(struct sk_buff *skb)
{
struct mfc6_cache *cache;
struct net *net = dev_net(skb->dev);
struct mr6_table *mrt;
struct flowi6 fl6 = {
.flowi6_iif = skb->dev->ifindex,
.flowi6_mark = skb->mark,
};
int err;
err = ip6mr_fib_lookup(net, &fl6, &mrt);
if (err < 0) {
kfree_skb(skb);
return err;
}
read_lock(&mrt_lock);
cache = ip6mr_cache_find(mrt,
&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr);
if (!cache) {
int vif = ip6mr_find_vif(mrt, skb->dev);
if (vif >= 0)
cache = ip6mr_cache_find_any(mrt,
&ipv6_hdr(skb)->daddr,
vif);
}
/*
* No usable cache entry
*/
if (!cache) {
int vif;
vif = ip6mr_find_vif(mrt, skb->dev);
if (vif >= 0) {
int err = ip6mr_cache_unresolved(mrt, vif, skb);
read_unlock(&mrt_lock);
return err;
}
read_unlock(&mrt_lock);
kfree_skb(skb);
return -ENODEV;
}
ip6_mr_forward(net, mrt, skb, cache);
read_unlock(&mrt_lock);
return 0;
}
static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
struct mfc6_cache *c, struct rtmsg *rtm)
{
struct rta_mfc_stats mfcs;
struct nlattr *mp_attr;
struct rtnexthop *nhp;
unsigned long lastuse;
int ct;
/* If cache is unresolved, don't try to parse IIF and OIF */
if (c->mf6c_parent >= MAXMIFS) {
rtm->rtm_flags |= RTNH_F_UNRESOLVED;
return -ENOENT;
}
if (MIF_EXISTS(mrt, c->mf6c_parent) &&
nla_put_u32(skb, RTA_IIF, mrt->vif6_table[c->mf6c_parent].dev->ifindex) < 0)
return -EMSGSIZE;
mp_attr = nla_nest_start(skb, RTA_MULTIPATH);
if (!mp_attr)
return -EMSGSIZE;
for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
if (MIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) {
nhp = nla_reserve_nohdr(skb, sizeof(*nhp));
if (!nhp) {
nla_nest_cancel(skb, mp_attr);
return -EMSGSIZE;
}
nhp->rtnh_flags = 0;
nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
nhp->rtnh_ifindex = mrt->vif6_table[ct].dev->ifindex;
nhp->rtnh_len = sizeof(*nhp);
}
}
nla_nest_end(skb, mp_attr);
lastuse = READ_ONCE(c->mfc_un.res.lastuse);
lastuse = time_after_eq(jiffies, lastuse) ? jiffies - lastuse : 0;
mfcs.mfcs_packets = c->mfc_un.res.pkt;
mfcs.mfcs_bytes = c->mfc_un.res.bytes;
mfcs.mfcs_wrong_if = c->mfc_un.res.wrong_if;
if (nla_put_64bit(skb, RTA_MFC_STATS, sizeof(mfcs), &mfcs, RTA_PAD) ||
nla_put_u64_64bit(skb, RTA_EXPIRES, jiffies_to_clock_t(lastuse),
RTA_PAD))
return -EMSGSIZE;
rtm->rtm_type = RTN_MULTICAST;
return 1;
}
int ip6mr_get_route(struct net *net, struct sk_buff *skb, struct rtmsg *rtm,
u32 portid)
{
int err;
struct mr6_table *mrt;
struct mfc6_cache *cache;
struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
if (!mrt)
return -ENOENT;
read_lock(&mrt_lock);
cache = ip6mr_cache_find(mrt, &rt->rt6i_src.addr, &rt->rt6i_dst.addr);
if (!cache && skb->dev) {
int vif = ip6mr_find_vif(mrt, skb->dev);
if (vif >= 0)
cache = ip6mr_cache_find_any(mrt, &rt->rt6i_dst.addr,
vif);
}
if (!cache) {
struct sk_buff *skb2;
struct ipv6hdr *iph;
struct net_device *dev;
int vif;
dev = skb->dev;
if (!dev || (vif = ip6mr_find_vif(mrt, dev)) < 0) {
read_unlock(&mrt_lock);
return -ENODEV;
}
/* really correct? */
skb2 = alloc_skb(sizeof(struct ipv6hdr), GFP_ATOMIC);
if (!skb2) {
read_unlock(&mrt_lock);
return -ENOMEM;
}
NETLINK_CB(skb2).portid = portid;
skb_reset_transport_header(skb2);
skb_put(skb2, sizeof(struct ipv6hdr));
skb_reset_network_header(skb2);
iph = ipv6_hdr(skb2);
iph->version = 0;
iph->priority = 0;
iph->flow_lbl[0] = 0;
iph->flow_lbl[1] = 0;
iph->flow_lbl[2] = 0;
iph->payload_len = 0;
iph->nexthdr = IPPROTO_NONE;
iph->hop_limit = 0;
iph->saddr = rt->rt6i_src.addr;
iph->daddr = rt->rt6i_dst.addr;
err = ip6mr_cache_unresolved(mrt, vif, skb2);
read_unlock(&mrt_lock);
return err;
}
if (rtm->rtm_flags & RTM_F_NOTIFY)
cache->mfc_flags |= MFC_NOTIFY;
err = __ip6mr_fill_mroute(mrt, skb, cache, rtm);
read_unlock(&mrt_lock);
return err;
}
static int ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
u32 portid, u32 seq, struct mfc6_cache *c, int cmd,
int flags)
{
struct nlmsghdr *nlh;
struct rtmsg *rtm;
int err;
nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags);
if (!nlh)
return -EMSGSIZE;
rtm = nlmsg_data(nlh);
rtm->rtm_family = RTNL_FAMILY_IP6MR;
rtm->rtm_dst_len = 128;
rtm->rtm_src_len = 128;
rtm->rtm_tos = 0;
rtm->rtm_table = mrt->id;
if (nla_put_u32(skb, RTA_TABLE, mrt->id))
goto nla_put_failure;
rtm->rtm_type = RTN_MULTICAST;
rtm->rtm_scope = RT_SCOPE_UNIVERSE;
if (c->mfc_flags & MFC_STATIC)
rtm->rtm_protocol = RTPROT_STATIC;
else
rtm->rtm_protocol = RTPROT_MROUTED;
rtm->rtm_flags = 0;
if (nla_put_in6_addr(skb, RTA_SRC, &c->mf6c_origin) ||
nla_put_in6_addr(skb, RTA_DST, &c->mf6c_mcastgrp))
goto nla_put_failure;
err = __ip6mr_fill_mroute(mrt, skb, c, rtm);
/* do not break the dump if cache is unresolved */
if (err < 0 && err != -ENOENT)
goto nla_put_failure;
nlmsg_end(skb, nlh);
return 0;
nla_put_failure:
nlmsg_cancel(skb, nlh);
return -EMSGSIZE;
}
static int mr6_msgsize(bool unresolved, int maxvif)
{
size_t len =
NLMSG_ALIGN(sizeof(struct rtmsg))
+ nla_total_size(4) /* RTA_TABLE */
+ nla_total_size(sizeof(struct in6_addr)) /* RTA_SRC */
+ nla_total_size(sizeof(struct in6_addr)) /* RTA_DST */
;
if (!unresolved)
len = len
+ nla_total_size(4) /* RTA_IIF */
+ nla_total_size(0) /* RTA_MULTIPATH */
+ maxvif * NLA_ALIGN(sizeof(struct rtnexthop))
/* RTA_MFC_STATS */
+ nla_total_size_64bit(sizeof(struct rta_mfc_stats))
;
return len;
}
static void mr6_netlink_event(struct mr6_table *mrt, struct mfc6_cache *mfc,
int cmd)
{
struct net *net = read_pnet(&mrt->net);
struct sk_buff *skb;
int err = -ENOBUFS;
skb = nlmsg_new(mr6_msgsize(mfc->mf6c_parent >= MAXMIFS, mrt->maxvif),
GFP_ATOMIC);
if (!skb)
goto errout;
err = ip6mr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0);
if (err < 0)
goto errout;
rtnl_notify(skb, net, 0, RTNLGRP_IPV6_MROUTE, NULL, GFP_ATOMIC);
return;
errout:
kfree_skb(skb);
if (err < 0)
rtnl_set_sk_err(net, RTNLGRP_IPV6_MROUTE, err);
}
static int ip6mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
{
struct net *net = sock_net(skb->sk);
struct mr6_table *mrt;
struct mfc6_cache *mfc;
unsigned int t = 0, s_t;
unsigned int h = 0, s_h;
unsigned int e = 0, s_e;
s_t = cb->args[0];
s_h = cb->args[1];
s_e = cb->args[2];
read_lock(&mrt_lock);
ip6mr_for_each_table(mrt, net) {
if (t < s_t)
goto next_table;
if (t > s_t)
s_h = 0;
for (h = s_h; h < MFC6_LINES; h++) {
list_for_each_entry(mfc, &mrt->mfc6_cache_array[h], list) {
if (e < s_e)
goto next_entry;
if (ip6mr_fill_mroute(mrt, skb,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
mfc, RTM_NEWROUTE,
NLM_F_MULTI) < 0)
goto done;
next_entry:
e++;
}
e = s_e = 0;
}
spin_lock_bh(&mfc_unres_lock);
list_for_each_entry(mfc, &mrt->mfc6_unres_queue, list) {
if (e < s_e)
goto next_entry2;
if (ip6mr_fill_mroute(mrt, skb,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
mfc, RTM_NEWROUTE,
NLM_F_MULTI) < 0) {
spin_unlock_bh(&mfc_unres_lock);
goto done;
}
next_entry2:
e++;
}
spin_unlock_bh(&mfc_unres_lock);
e = s_e = 0;
s_h = 0;
next_table:
t++;
}
done:
read_unlock(&mrt_lock);
cb->args[2] = e;
cb->args[1] = h;
cb->args[0] = t;
return skb->len;
}
| ./CrossVul/dataset_final_sorted/CWE-20/c/bad_3038_0 |
crossvul-cpp_data_bad_1763_0 | /* Key garbage collector
*
* Copyright (C) 2009-2011 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/security.h>
#include <keys/keyring-type.h>
#include "internal.h"
/*
* Delay between key revocation/expiry in seconds
*/
unsigned key_gc_delay = 5 * 60;
/*
* Reaper for unused keys.
*/
static void key_garbage_collector(struct work_struct *work);
DECLARE_WORK(key_gc_work, key_garbage_collector);
/*
* Reaper for links from keyrings to dead keys.
*/
static void key_gc_timer_func(unsigned long);
static DEFINE_TIMER(key_gc_timer, key_gc_timer_func, 0, 0);
static time_t key_gc_next_run = LONG_MAX;
static struct key_type *key_gc_dead_keytype;
static unsigned long key_gc_flags;
#define KEY_GC_KEY_EXPIRED 0 /* A key expired and needs unlinking */
#define KEY_GC_REAP_KEYTYPE 1 /* A keytype is being unregistered */
#define KEY_GC_REAPING_KEYTYPE 2 /* Cleared when keytype reaped */
/*
* Any key whose type gets unregistered will be re-typed to this if it can't be
* immediately unlinked.
*/
struct key_type key_type_dead = {
.name = "dead",
};
/*
* Schedule a garbage collection run.
* - time precision isn't particularly important
*/
void key_schedule_gc(time_t gc_at)
{
unsigned long expires;
time_t now = current_kernel_time().tv_sec;
kenter("%ld", gc_at - now);
if (gc_at <= now || test_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags)) {
kdebug("IMMEDIATE");
schedule_work(&key_gc_work);
} else if (gc_at < key_gc_next_run) {
kdebug("DEFERRED");
key_gc_next_run = gc_at;
expires = jiffies + (gc_at - now) * HZ;
mod_timer(&key_gc_timer, expires);
}
}
/*
* Schedule a dead links collection run.
*/
void key_schedule_gc_links(void)
{
set_bit(KEY_GC_KEY_EXPIRED, &key_gc_flags);
schedule_work(&key_gc_work);
}
/*
* Some key's cleanup time was met after it expired, so we need to get the
* reaper to go through a cycle finding expired keys.
*/
static void key_gc_timer_func(unsigned long data)
{
kenter("");
key_gc_next_run = LONG_MAX;
key_schedule_gc_links();
}
/*
* Reap keys of dead type.
*
* We use three flags to make sure we see three complete cycles of the garbage
* collector: the first to mark keys of that type as being dead, the second to
* collect dead links and the third to clean up the dead keys. We have to be
* careful as there may already be a cycle in progress.
*
* The caller must be holding key_types_sem.
*/
void key_gc_keytype(struct key_type *ktype)
{
kenter("%s", ktype->name);
key_gc_dead_keytype = ktype;
set_bit(KEY_GC_REAPING_KEYTYPE, &key_gc_flags);
smp_mb();
set_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags);
kdebug("schedule");
schedule_work(&key_gc_work);
kdebug("sleep");
wait_on_bit(&key_gc_flags, KEY_GC_REAPING_KEYTYPE,
TASK_UNINTERRUPTIBLE);
key_gc_dead_keytype = NULL;
kleave("");
}
/*
* Garbage collect a list of unreferenced, detached keys
*/
static noinline void key_gc_unused_keys(struct list_head *keys)
{
while (!list_empty(keys)) {
struct key *key =
list_entry(keys->next, struct key, graveyard_link);
list_del(&key->graveyard_link);
kdebug("- %u", key->serial);
key_check(key);
/* Throw away the key data */
if (key->type->destroy)
key->type->destroy(key);
security_key_free(key);
/* deal with the user's key tracking and quota */
if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
spin_lock(&key->user->lock);
key->user->qnkeys--;
key->user->qnbytes -= key->quotalen;
spin_unlock(&key->user->lock);
}
atomic_dec(&key->user->nkeys);
if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
atomic_dec(&key->user->nikeys);
key_user_put(key->user);
kfree(key->description);
#ifdef KEY_DEBUGGING
key->magic = KEY_DEBUG_MAGIC_X;
#endif
kmem_cache_free(key_jar, key);
}
}
/*
* Garbage collector for unused keys.
*
* This is done in process context so that we don't have to disable interrupts
* all over the place. key_put() schedules this rather than trying to do the
* cleanup itself, which means key_put() doesn't have to sleep.
*/
static void key_garbage_collector(struct work_struct *work)
{
static LIST_HEAD(graveyard);
static u8 gc_state; /* Internal persistent state */
#define KEY_GC_REAP_AGAIN 0x01 /* - Need another cycle */
#define KEY_GC_REAPING_LINKS 0x02 /* - We need to reap links */
#define KEY_GC_SET_TIMER 0x04 /* - We need to restart the timer */
#define KEY_GC_REAPING_DEAD_1 0x10 /* - We need to mark dead keys */
#define KEY_GC_REAPING_DEAD_2 0x20 /* - We need to reap dead key links */
#define KEY_GC_REAPING_DEAD_3 0x40 /* - We need to reap dead keys */
#define KEY_GC_FOUND_DEAD_KEY 0x80 /* - We found at least one dead key */
struct rb_node *cursor;
struct key *key;
time_t new_timer, limit;
kenter("[%lx,%x]", key_gc_flags, gc_state);
limit = current_kernel_time().tv_sec;
if (limit > key_gc_delay)
limit -= key_gc_delay;
else
limit = key_gc_delay;
/* Work out what we're going to be doing in this pass */
gc_state &= KEY_GC_REAPING_DEAD_1 | KEY_GC_REAPING_DEAD_2;
gc_state <<= 1;
if (test_and_clear_bit(KEY_GC_KEY_EXPIRED, &key_gc_flags))
gc_state |= KEY_GC_REAPING_LINKS | KEY_GC_SET_TIMER;
if (test_and_clear_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags))
gc_state |= KEY_GC_REAPING_DEAD_1;
kdebug("new pass %x", gc_state);
new_timer = LONG_MAX;
/* As only this function is permitted to remove things from the key
* serial tree, if cursor is non-NULL then it will always point to a
* valid node in the tree - even if lock got dropped.
*/
spin_lock(&key_serial_lock);
cursor = rb_first(&key_serial_tree);
continue_scanning:
while (cursor) {
key = rb_entry(cursor, struct key, serial_node);
cursor = rb_next(cursor);
if (atomic_read(&key->usage) == 0)
goto found_unreferenced_key;
if (unlikely(gc_state & KEY_GC_REAPING_DEAD_1)) {
if (key->type == key_gc_dead_keytype) {
gc_state |= KEY_GC_FOUND_DEAD_KEY;
set_bit(KEY_FLAG_DEAD, &key->flags);
key->perm = 0;
goto skip_dead_key;
}
}
if (gc_state & KEY_GC_SET_TIMER) {
if (key->expiry > limit && key->expiry < new_timer) {
kdebug("will expire %x in %ld",
key_serial(key), key->expiry - limit);
new_timer = key->expiry;
}
}
if (unlikely(gc_state & KEY_GC_REAPING_DEAD_2))
if (key->type == key_gc_dead_keytype)
gc_state |= KEY_GC_FOUND_DEAD_KEY;
if ((gc_state & KEY_GC_REAPING_LINKS) ||
unlikely(gc_state & KEY_GC_REAPING_DEAD_2)) {
if (key->type == &key_type_keyring)
goto found_keyring;
}
if (unlikely(gc_state & KEY_GC_REAPING_DEAD_3))
if (key->type == key_gc_dead_keytype)
goto destroy_dead_key;
skip_dead_key:
if (spin_is_contended(&key_serial_lock) || need_resched())
goto contended;
}
contended:
spin_unlock(&key_serial_lock);
maybe_resched:
if (cursor) {
cond_resched();
spin_lock(&key_serial_lock);
goto continue_scanning;
}
/* We've completed the pass. Set the timer if we need to and queue a
* new cycle if necessary. We keep executing cycles until we find one
* where we didn't reap any keys.
*/
kdebug("pass complete");
if (gc_state & KEY_GC_SET_TIMER && new_timer != (time_t)LONG_MAX) {
new_timer += key_gc_delay;
key_schedule_gc(new_timer);
}
if (unlikely(gc_state & KEY_GC_REAPING_DEAD_2) ||
!list_empty(&graveyard)) {
/* Make sure that all pending keyring payload destructions are
* fulfilled and that people aren't now looking at dead or
* dying keys that they don't have a reference upon or a link
* to.
*/
kdebug("gc sync");
synchronize_rcu();
}
if (!list_empty(&graveyard)) {
kdebug("gc keys");
key_gc_unused_keys(&graveyard);
}
if (unlikely(gc_state & (KEY_GC_REAPING_DEAD_1 |
KEY_GC_REAPING_DEAD_2))) {
if (!(gc_state & KEY_GC_FOUND_DEAD_KEY)) {
/* No remaining dead keys: short circuit the remaining
* keytype reap cycles.
*/
kdebug("dead short");
gc_state &= ~(KEY_GC_REAPING_DEAD_1 | KEY_GC_REAPING_DEAD_2);
gc_state |= KEY_GC_REAPING_DEAD_3;
} else {
gc_state |= KEY_GC_REAP_AGAIN;
}
}
if (unlikely(gc_state & KEY_GC_REAPING_DEAD_3)) {
kdebug("dead wake");
smp_mb();
clear_bit(KEY_GC_REAPING_KEYTYPE, &key_gc_flags);
wake_up_bit(&key_gc_flags, KEY_GC_REAPING_KEYTYPE);
}
if (gc_state & KEY_GC_REAP_AGAIN)
schedule_work(&key_gc_work);
kleave(" [end %x]", gc_state);
return;
/* We found an unreferenced key - once we've removed it from the tree,
* we can safely drop the lock.
*/
found_unreferenced_key:
kdebug("unrefd key %d", key->serial);
rb_erase(&key->serial_node, &key_serial_tree);
spin_unlock(&key_serial_lock);
list_add_tail(&key->graveyard_link, &graveyard);
gc_state |= KEY_GC_REAP_AGAIN;
goto maybe_resched;
/* We found a keyring and we need to check the payload for links to
* dead or expired keys. We don't flag another reap immediately as we
* have to wait for the old payload to be destroyed by RCU before we
* can reap the keys to which it refers.
*/
found_keyring:
spin_unlock(&key_serial_lock);
keyring_gc(key, limit);
goto maybe_resched;
/* We found a dead key that is still referenced. Reset its type and
* destroy its payload with its semaphore held.
*/
destroy_dead_key:
spin_unlock(&key_serial_lock);
kdebug("destroy key %d", key->serial);
down_write(&key->sem);
key->type = &key_type_dead;
if (key_gc_dead_keytype->destroy)
key_gc_dead_keytype->destroy(key);
memset(&key->payload, KEY_DESTROY, sizeof(key->payload));
up_write(&key->sem);
goto maybe_resched;
}
| ./CrossVul/dataset_final_sorted/CWE-20/c/bad_1763_0 |
crossvul-cpp_data_good_5845_2 | /*
*
* Author Karsten Keil <kkeil@novell.com>
*
* Copyright 2008 by Karsten Keil <kkeil@novell.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/mISDNif.h>
#include <linux/slab.h>
#include <linux/export.h>
#include "core.h"
static u_int *debug;
static struct proto mISDN_proto = {
.name = "misdn",
.owner = THIS_MODULE,
.obj_size = sizeof(struct mISDN_sock)
};
#define _pms(sk) ((struct mISDN_sock *)sk)
static struct mISDN_sock_list data_sockets = {
.lock = __RW_LOCK_UNLOCKED(data_sockets.lock)
};
static struct mISDN_sock_list base_sockets = {
.lock = __RW_LOCK_UNLOCKED(base_sockets.lock)
};
#define L2_HEADER_LEN 4
static inline struct sk_buff *
_l2_alloc_skb(unsigned int len, gfp_t gfp_mask)
{
struct sk_buff *skb;
skb = alloc_skb(len + L2_HEADER_LEN, gfp_mask);
if (likely(skb))
skb_reserve(skb, L2_HEADER_LEN);
return skb;
}
static void
mISDN_sock_link(struct mISDN_sock_list *l, struct sock *sk)
{
write_lock_bh(&l->lock);
sk_add_node(sk, &l->head);
write_unlock_bh(&l->lock);
}
static void mISDN_sock_unlink(struct mISDN_sock_list *l, struct sock *sk)
{
write_lock_bh(&l->lock);
sk_del_node_init(sk);
write_unlock_bh(&l->lock);
}
static int
mISDN_send(struct mISDNchannel *ch, struct sk_buff *skb)
{
struct mISDN_sock *msk;
int err;
msk = container_of(ch, struct mISDN_sock, ch);
if (*debug & DEBUG_SOCKET)
printk(KERN_DEBUG "%s len %d %p\n", __func__, skb->len, skb);
if (msk->sk.sk_state == MISDN_CLOSED)
return -EUNATCH;
__net_timestamp(skb);
err = sock_queue_rcv_skb(&msk->sk, skb);
if (err)
printk(KERN_WARNING "%s: error %d\n", __func__, err);
return err;
}
static int
mISDN_ctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
{
struct mISDN_sock *msk;
msk = container_of(ch, struct mISDN_sock, ch);
if (*debug & DEBUG_SOCKET)
printk(KERN_DEBUG "%s(%p, %x, %p)\n", __func__, ch, cmd, arg);
switch (cmd) {
case CLOSE_CHANNEL:
msk->sk.sk_state = MISDN_CLOSED;
break;
}
return 0;
}
static inline void
mISDN_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
{
struct timeval tv;
if (_pms(sk)->cmask & MISDN_TIME_STAMP) {
skb_get_timestamp(skb, &tv);
put_cmsg(msg, SOL_MISDN, MISDN_TIME_STAMP, sizeof(tv), &tv);
}
}
static int
mISDN_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len, int flags)
{
struct sk_buff *skb;
struct sock *sk = sock->sk;
int copied, err;
if (*debug & DEBUG_SOCKET)
printk(KERN_DEBUG "%s: len %d, flags %x ch.nr %d, proto %x\n",
__func__, (int)len, flags, _pms(sk)->ch.nr,
sk->sk_protocol);
if (flags & (MSG_OOB))
return -EOPNOTSUPP;
if (sk->sk_state == MISDN_CLOSED)
return 0;
skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
if (!skb)
return err;
if (msg->msg_name) {
struct sockaddr_mISDN *maddr = msg->msg_name;
maddr->family = AF_ISDN;
maddr->dev = _pms(sk)->dev->id;
if ((sk->sk_protocol == ISDN_P_LAPD_TE) ||
(sk->sk_protocol == ISDN_P_LAPD_NT)) {
maddr->channel = (mISDN_HEAD_ID(skb) >> 16) & 0xff;
maddr->tei = (mISDN_HEAD_ID(skb) >> 8) & 0xff;
maddr->sapi = mISDN_HEAD_ID(skb) & 0xff;
} else {
maddr->channel = _pms(sk)->ch.nr;
maddr->sapi = _pms(sk)->ch.addr & 0xFF;
maddr->tei = (_pms(sk)->ch.addr >> 8) & 0xFF;
}
msg->msg_namelen = sizeof(*maddr);
}
copied = skb->len + MISDN_HEADER_LEN;
if (len < copied) {
if (flags & MSG_PEEK)
atomic_dec(&skb->users);
else
skb_queue_head(&sk->sk_receive_queue, skb);
return -ENOSPC;
}
memcpy(skb_push(skb, MISDN_HEADER_LEN), mISDN_HEAD_P(skb),
MISDN_HEADER_LEN);
err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
mISDN_sock_cmsg(sk, msg, skb);
skb_free_datagram(sk, skb);
return err ? : copied;
}
static int
mISDN_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len)
{
struct sock *sk = sock->sk;
struct sk_buff *skb;
int err = -ENOMEM;
struct sockaddr_mISDN *maddr;
if (*debug & DEBUG_SOCKET)
printk(KERN_DEBUG "%s: len %d flags %x ch %d proto %x\n",
__func__, (int)len, msg->msg_flags, _pms(sk)->ch.nr,
sk->sk_protocol);
if (msg->msg_flags & MSG_OOB)
return -EOPNOTSUPP;
if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_NOSIGNAL | MSG_ERRQUEUE))
return -EINVAL;
if (len < MISDN_HEADER_LEN)
return -EINVAL;
if (sk->sk_state != MISDN_BOUND)
return -EBADFD;
lock_sock(sk);
skb = _l2_alloc_skb(len, GFP_KERNEL);
if (!skb)
goto done;
if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
err = -EFAULT;
goto done;
}
memcpy(mISDN_HEAD_P(skb), skb->data, MISDN_HEADER_LEN);
skb_pull(skb, MISDN_HEADER_LEN);
if (msg->msg_namelen >= sizeof(struct sockaddr_mISDN)) {
/* if we have a address, we use it */
maddr = (struct sockaddr_mISDN *)msg->msg_name;
mISDN_HEAD_ID(skb) = maddr->channel;
} else { /* use default for L2 messages */
if ((sk->sk_protocol == ISDN_P_LAPD_TE) ||
(sk->sk_protocol == ISDN_P_LAPD_NT))
mISDN_HEAD_ID(skb) = _pms(sk)->ch.nr;
}
if (*debug & DEBUG_SOCKET)
printk(KERN_DEBUG "%s: ID:%x\n",
__func__, mISDN_HEAD_ID(skb));
err = -ENODEV;
if (!_pms(sk)->ch.peer)
goto done;
err = _pms(sk)->ch.recv(_pms(sk)->ch.peer, skb);
if (err)
goto done;
else {
skb = NULL;
err = len;
}
done:
if (skb)
kfree_skb(skb);
release_sock(sk);
return err;
}
static int
data_sock_release(struct socket *sock)
{
struct sock *sk = sock->sk;
if (*debug & DEBUG_SOCKET)
printk(KERN_DEBUG "%s(%p) sk=%p\n", __func__, sock, sk);
if (!sk)
return 0;
switch (sk->sk_protocol) {
case ISDN_P_TE_S0:
case ISDN_P_NT_S0:
case ISDN_P_TE_E1:
case ISDN_P_NT_E1:
if (sk->sk_state == MISDN_BOUND)
delete_channel(&_pms(sk)->ch);
else
mISDN_sock_unlink(&data_sockets, sk);
break;
case ISDN_P_LAPD_TE:
case ISDN_P_LAPD_NT:
case ISDN_P_B_RAW:
case ISDN_P_B_HDLC:
case ISDN_P_B_X75SLP:
case ISDN_P_B_L2DTMF:
case ISDN_P_B_L2DSP:
case ISDN_P_B_L2DSPHDLC:
delete_channel(&_pms(sk)->ch);
mISDN_sock_unlink(&data_sockets, sk);
break;
}
lock_sock(sk);
sock_orphan(sk);
skb_queue_purge(&sk->sk_receive_queue);
release_sock(sk);
sock_put(sk);
return 0;
}
static int
data_sock_ioctl_bound(struct sock *sk, unsigned int cmd, void __user *p)
{
struct mISDN_ctrl_req cq;
int err = -EINVAL, val[2];
struct mISDNchannel *bchan, *next;
lock_sock(sk);
if (!_pms(sk)->dev) {
err = -ENODEV;
goto done;
}
switch (cmd) {
case IMCTRLREQ:
if (copy_from_user(&cq, p, sizeof(cq))) {
err = -EFAULT;
break;
}
if ((sk->sk_protocol & ~ISDN_P_B_MASK) == ISDN_P_B_START) {
list_for_each_entry_safe(bchan, next,
&_pms(sk)->dev->bchannels, list) {
if (bchan->nr == cq.channel) {
err = bchan->ctrl(bchan,
CONTROL_CHANNEL, &cq);
break;
}
}
} else
err = _pms(sk)->dev->D.ctrl(&_pms(sk)->dev->D,
CONTROL_CHANNEL, &cq);
if (err)
break;
if (copy_to_user(p, &cq, sizeof(cq)))
err = -EFAULT;
break;
case IMCLEAR_L2:
if (sk->sk_protocol != ISDN_P_LAPD_NT) {
err = -EINVAL;
break;
}
val[0] = cmd;
if (get_user(val[1], (int __user *)p)) {
err = -EFAULT;
break;
}
err = _pms(sk)->dev->teimgr->ctrl(_pms(sk)->dev->teimgr,
CONTROL_CHANNEL, val);
break;
case IMHOLD_L1:
if (sk->sk_protocol != ISDN_P_LAPD_NT
&& sk->sk_protocol != ISDN_P_LAPD_TE) {
err = -EINVAL;
break;
}
val[0] = cmd;
if (get_user(val[1], (int __user *)p)) {
err = -EFAULT;
break;
}
err = _pms(sk)->dev->teimgr->ctrl(_pms(sk)->dev->teimgr,
CONTROL_CHANNEL, val);
break;
default:
err = -EINVAL;
break;
}
done:
release_sock(sk);
return err;
}
static int
data_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
int err = 0, id;
struct sock *sk = sock->sk;
struct mISDNdevice *dev;
struct mISDNversion ver;
switch (cmd) {
case IMGETVERSION:
ver.major = MISDN_MAJOR_VERSION;
ver.minor = MISDN_MINOR_VERSION;
ver.release = MISDN_RELEASE;
if (copy_to_user((void __user *)arg, &ver, sizeof(ver)))
err = -EFAULT;
break;
case IMGETCOUNT:
id = get_mdevice_count();
if (put_user(id, (int __user *)arg))
err = -EFAULT;
break;
case IMGETDEVINFO:
if (get_user(id, (int __user *)arg)) {
err = -EFAULT;
break;
}
dev = get_mdevice(id);
if (dev) {
struct mISDN_devinfo di;
memset(&di, 0, sizeof(di));
di.id = dev->id;
di.Dprotocols = dev->Dprotocols;
di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
di.protocol = dev->D.protocol;
memcpy(di.channelmap, dev->channelmap,
sizeof(di.channelmap));
di.nrbchan = dev->nrbchan;
strcpy(di.name, dev_name(&dev->dev));
if (copy_to_user((void __user *)arg, &di, sizeof(di)))
err = -EFAULT;
} else
err = -ENODEV;
break;
default:
if (sk->sk_state == MISDN_BOUND)
err = data_sock_ioctl_bound(sk, cmd,
(void __user *)arg);
else
err = -ENOTCONN;
}
return err;
}
static int data_sock_setsockopt(struct socket *sock, int level, int optname,
char __user *optval, unsigned int len)
{
struct sock *sk = sock->sk;
int err = 0, opt = 0;
if (*debug & DEBUG_SOCKET)
printk(KERN_DEBUG "%s(%p, %d, %x, %p, %d)\n", __func__, sock,
level, optname, optval, len);
lock_sock(sk);
switch (optname) {
case MISDN_TIME_STAMP:
if (get_user(opt, (int __user *)optval)) {
err = -EFAULT;
break;
}
if (opt)
_pms(sk)->cmask |= MISDN_TIME_STAMP;
else
_pms(sk)->cmask &= ~MISDN_TIME_STAMP;
break;
default:
err = -ENOPROTOOPT;
break;
}
release_sock(sk);
return err;
}
static int data_sock_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen)
{
struct sock *sk = sock->sk;
int len, opt;
if (get_user(len, optlen))
return -EFAULT;
if (len != sizeof(char))
return -EINVAL;
switch (optname) {
case MISDN_TIME_STAMP:
if (_pms(sk)->cmask & MISDN_TIME_STAMP)
opt = 1;
else
opt = 0;
if (put_user(opt, optval))
return -EFAULT;
break;
default:
return -ENOPROTOOPT;
}
return 0;
}
static int
data_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
{
struct sockaddr_mISDN *maddr = (struct sockaddr_mISDN *) addr;
struct sock *sk = sock->sk;
struct sock *csk;
int err = 0;
if (*debug & DEBUG_SOCKET)
printk(KERN_DEBUG "%s(%p) sk=%p\n", __func__, sock, sk);
if (addr_len != sizeof(struct sockaddr_mISDN))
return -EINVAL;
if (!maddr || maddr->family != AF_ISDN)
return -EINVAL;
lock_sock(sk);
if (_pms(sk)->dev) {
err = -EALREADY;
goto done;
}
_pms(sk)->dev = get_mdevice(maddr->dev);
if (!_pms(sk)->dev) {
err = -ENODEV;
goto done;
}
if (sk->sk_protocol < ISDN_P_B_START) {
read_lock_bh(&data_sockets.lock);
sk_for_each(csk, &data_sockets.head) {
if (sk == csk)
continue;
if (_pms(csk)->dev != _pms(sk)->dev)
continue;
if (csk->sk_protocol >= ISDN_P_B_START)
continue;
if (IS_ISDN_P_TE(csk->sk_protocol)
== IS_ISDN_P_TE(sk->sk_protocol))
continue;
read_unlock_bh(&data_sockets.lock);
err = -EBUSY;
goto done;
}
read_unlock_bh(&data_sockets.lock);
}
_pms(sk)->ch.send = mISDN_send;
_pms(sk)->ch.ctrl = mISDN_ctrl;
switch (sk->sk_protocol) {
case ISDN_P_TE_S0:
case ISDN_P_NT_S0:
case ISDN_P_TE_E1:
case ISDN_P_NT_E1:
mISDN_sock_unlink(&data_sockets, sk);
err = connect_layer1(_pms(sk)->dev, &_pms(sk)->ch,
sk->sk_protocol, maddr);
if (err)
mISDN_sock_link(&data_sockets, sk);
break;
case ISDN_P_LAPD_TE:
case ISDN_P_LAPD_NT:
err = create_l2entity(_pms(sk)->dev, &_pms(sk)->ch,
sk->sk_protocol, maddr);
break;
case ISDN_P_B_RAW:
case ISDN_P_B_HDLC:
case ISDN_P_B_X75SLP:
case ISDN_P_B_L2DTMF:
case ISDN_P_B_L2DSP:
case ISDN_P_B_L2DSPHDLC:
err = connect_Bstack(_pms(sk)->dev, &_pms(sk)->ch,
sk->sk_protocol, maddr);
break;
default:
err = -EPROTONOSUPPORT;
}
if (err)
goto done;
sk->sk_state = MISDN_BOUND;
_pms(sk)->ch.protocol = sk->sk_protocol;
done:
release_sock(sk);
return err;
}
static int
data_sock_getname(struct socket *sock, struct sockaddr *addr,
int *addr_len, int peer)
{
struct sockaddr_mISDN *maddr = (struct sockaddr_mISDN *) addr;
struct sock *sk = sock->sk;
if (!_pms(sk)->dev)
return -EBADFD;
lock_sock(sk);
*addr_len = sizeof(*maddr);
maddr->family = AF_ISDN;
maddr->dev = _pms(sk)->dev->id;
maddr->channel = _pms(sk)->ch.nr;
maddr->sapi = _pms(sk)->ch.addr & 0xff;
maddr->tei = (_pms(sk)->ch.addr >> 8) & 0xff;
release_sock(sk);
return 0;
}
static const struct proto_ops data_sock_ops = {
.family = PF_ISDN,
.owner = THIS_MODULE,
.release = data_sock_release,
.ioctl = data_sock_ioctl,
.bind = data_sock_bind,
.getname = data_sock_getname,
.sendmsg = mISDN_sock_sendmsg,
.recvmsg = mISDN_sock_recvmsg,
.poll = datagram_poll,
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
.setsockopt = data_sock_setsockopt,
.getsockopt = data_sock_getsockopt,
.connect = sock_no_connect,
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.mmap = sock_no_mmap
};
static int
data_sock_create(struct net *net, struct socket *sock, int protocol)
{
struct sock *sk;
if (sock->type != SOCK_DGRAM)
return -ESOCKTNOSUPPORT;
sk = sk_alloc(net, PF_ISDN, GFP_KERNEL, &mISDN_proto);
if (!sk)
return -ENOMEM;
sock_init_data(sock, sk);
sock->ops = &data_sock_ops;
sock->state = SS_UNCONNECTED;
sock_reset_flag(sk, SOCK_ZAPPED);
sk->sk_protocol = protocol;
sk->sk_state = MISDN_OPEN;
mISDN_sock_link(&data_sockets, sk);
return 0;
}
static int
base_sock_release(struct socket *sock)
{
struct sock *sk = sock->sk;
printk(KERN_DEBUG "%s(%p) sk=%p\n", __func__, sock, sk);
if (!sk)
return 0;
mISDN_sock_unlink(&base_sockets, sk);
sock_orphan(sk);
sock_put(sk);
return 0;
}
static int
base_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
int err = 0, id;
struct mISDNdevice *dev;
struct mISDNversion ver;
switch (cmd) {
case IMGETVERSION:
ver.major = MISDN_MAJOR_VERSION;
ver.minor = MISDN_MINOR_VERSION;
ver.release = MISDN_RELEASE;
if (copy_to_user((void __user *)arg, &ver, sizeof(ver)))
err = -EFAULT;
break;
case IMGETCOUNT:
id = get_mdevice_count();
if (put_user(id, (int __user *)arg))
err = -EFAULT;
break;
case IMGETDEVINFO:
if (get_user(id, (int __user *)arg)) {
err = -EFAULT;
break;
}
dev = get_mdevice(id);
if (dev) {
struct mISDN_devinfo di;
memset(&di, 0, sizeof(di));
di.id = dev->id;
di.Dprotocols = dev->Dprotocols;
di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
di.protocol = dev->D.protocol;
memcpy(di.channelmap, dev->channelmap,
sizeof(di.channelmap));
di.nrbchan = dev->nrbchan;
strcpy(di.name, dev_name(&dev->dev));
if (copy_to_user((void __user *)arg, &di, sizeof(di)))
err = -EFAULT;
} else
err = -ENODEV;
break;
case IMSETDEVNAME:
{
struct mISDN_devrename dn;
if (copy_from_user(&dn, (void __user *)arg,
sizeof(dn))) {
err = -EFAULT;
break;
}
dev = get_mdevice(dn.id);
if (dev)
err = device_rename(&dev->dev, dn.name);
else
err = -ENODEV;
}
break;
default:
err = -EINVAL;
}
return err;
}
static int
base_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
{
struct sockaddr_mISDN *maddr = (struct sockaddr_mISDN *) addr;
struct sock *sk = sock->sk;
int err = 0;
if (!maddr || maddr->family != AF_ISDN)
return -EINVAL;
lock_sock(sk);
if (_pms(sk)->dev) {
err = -EALREADY;
goto done;
}
_pms(sk)->dev = get_mdevice(maddr->dev);
if (!_pms(sk)->dev) {
err = -ENODEV;
goto done;
}
sk->sk_state = MISDN_BOUND;
done:
release_sock(sk);
return err;
}
static const struct proto_ops base_sock_ops = {
.family = PF_ISDN,
.owner = THIS_MODULE,
.release = base_sock_release,
.ioctl = base_sock_ioctl,
.bind = base_sock_bind,
.getname = sock_no_getname,
.sendmsg = sock_no_sendmsg,
.recvmsg = sock_no_recvmsg,
.poll = sock_no_poll,
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
.setsockopt = sock_no_setsockopt,
.getsockopt = sock_no_getsockopt,
.connect = sock_no_connect,
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.mmap = sock_no_mmap
};
static int
base_sock_create(struct net *net, struct socket *sock, int protocol)
{
struct sock *sk;
if (sock->type != SOCK_RAW)
return -ESOCKTNOSUPPORT;
sk = sk_alloc(net, PF_ISDN, GFP_KERNEL, &mISDN_proto);
if (!sk)
return -ENOMEM;
sock_init_data(sock, sk);
sock->ops = &base_sock_ops;
sock->state = SS_UNCONNECTED;
sock_reset_flag(sk, SOCK_ZAPPED);
sk->sk_protocol = protocol;
sk->sk_state = MISDN_OPEN;
mISDN_sock_link(&base_sockets, sk);
return 0;
}
static int
mISDN_sock_create(struct net *net, struct socket *sock, int proto, int kern)
{
int err = -EPROTONOSUPPORT;
switch (proto) {
case ISDN_P_BASE:
err = base_sock_create(net, sock, proto);
break;
case ISDN_P_TE_S0:
case ISDN_P_NT_S0:
case ISDN_P_TE_E1:
case ISDN_P_NT_E1:
case ISDN_P_LAPD_TE:
case ISDN_P_LAPD_NT:
case ISDN_P_B_RAW:
case ISDN_P_B_HDLC:
case ISDN_P_B_X75SLP:
case ISDN_P_B_L2DTMF:
case ISDN_P_B_L2DSP:
case ISDN_P_B_L2DSPHDLC:
err = data_sock_create(net, sock, proto);
break;
default:
return err;
}
return err;
}
static const struct net_proto_family mISDN_sock_family_ops = {
.owner = THIS_MODULE,
.family = PF_ISDN,
.create = mISDN_sock_create,
};
int
misdn_sock_init(u_int *deb)
{
int err;
debug = deb;
err = sock_register(&mISDN_sock_family_ops);
if (err)
printk(KERN_ERR "%s: error(%d)\n", __func__, err);
return err;
}
void
misdn_sock_cleanup(void)
{
sock_unregister(PF_ISDN);
}
| ./CrossVul/dataset_final_sorted/CWE-20/c/good_5845_2 |
crossvul-cpp_data_good_2117_0 | /*
* Copyright (c) 2004-2007 Intel Corporation. All rights reserved.
* Copyright (c) 2004 Topspin Corporation. All rights reserved.
* Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/completion.h>
#include <linux/dma-mapping.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/err.h>
#include <linux/idr.h>
#include <linux/interrupt.h>
#include <linux/random.h>
#include <linux/rbtree.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/workqueue.h>
#include <linux/kdev_t.h>
#include <linux/etherdevice.h>
#include <rdma/ib_cache.h>
#include <rdma/ib_cm.h>
#include "cm_msgs.h"
MODULE_AUTHOR("Sean Hefty");
MODULE_DESCRIPTION("InfiniBand CM");
MODULE_LICENSE("Dual BSD/GPL");
static void cm_add_one(struct ib_device *device);
static void cm_remove_one(struct ib_device *device);
static struct ib_client cm_client = {
.name = "cm",
.add = cm_add_one,
.remove = cm_remove_one
};
static struct ib_cm {
spinlock_t lock;
struct list_head device_list;
rwlock_t device_lock;
struct rb_root listen_service_table;
u64 listen_service_id;
/* struct rb_root peer_service_table; todo: fix peer to peer */
struct rb_root remote_qp_table;
struct rb_root remote_id_table;
struct rb_root remote_sidr_table;
struct idr local_id_table;
__be32 random_id_operand;
struct list_head timewait_list;
struct workqueue_struct *wq;
} cm;
/* Counter indexes ordered by attribute ID */
enum {
CM_REQ_COUNTER,
CM_MRA_COUNTER,
CM_REJ_COUNTER,
CM_REP_COUNTER,
CM_RTU_COUNTER,
CM_DREQ_COUNTER,
CM_DREP_COUNTER,
CM_SIDR_REQ_COUNTER,
CM_SIDR_REP_COUNTER,
CM_LAP_COUNTER,
CM_APR_COUNTER,
CM_ATTR_COUNT,
CM_ATTR_ID_OFFSET = 0x0010,
};
enum {
CM_XMIT,
CM_XMIT_RETRIES,
CM_RECV,
CM_RECV_DUPLICATES,
CM_COUNTER_GROUPS
};
static char const counter_group_names[CM_COUNTER_GROUPS]
[sizeof("cm_rx_duplicates")] = {
"cm_tx_msgs", "cm_tx_retries",
"cm_rx_msgs", "cm_rx_duplicates"
};
struct cm_counter_group {
struct kobject obj;
atomic_long_t counter[CM_ATTR_COUNT];
};
struct cm_counter_attribute {
struct attribute attr;
int index;
};
#define CM_COUNTER_ATTR(_name, _index) \
struct cm_counter_attribute cm_##_name##_counter_attr = { \
.attr = { .name = __stringify(_name), .mode = 0444 }, \
.index = _index \
}
static CM_COUNTER_ATTR(req, CM_REQ_COUNTER);
static CM_COUNTER_ATTR(mra, CM_MRA_COUNTER);
static CM_COUNTER_ATTR(rej, CM_REJ_COUNTER);
static CM_COUNTER_ATTR(rep, CM_REP_COUNTER);
static CM_COUNTER_ATTR(rtu, CM_RTU_COUNTER);
static CM_COUNTER_ATTR(dreq, CM_DREQ_COUNTER);
static CM_COUNTER_ATTR(drep, CM_DREP_COUNTER);
static CM_COUNTER_ATTR(sidr_req, CM_SIDR_REQ_COUNTER);
static CM_COUNTER_ATTR(sidr_rep, CM_SIDR_REP_COUNTER);
static CM_COUNTER_ATTR(lap, CM_LAP_COUNTER);
static CM_COUNTER_ATTR(apr, CM_APR_COUNTER);
static struct attribute *cm_counter_default_attrs[] = {
&cm_req_counter_attr.attr,
&cm_mra_counter_attr.attr,
&cm_rej_counter_attr.attr,
&cm_rep_counter_attr.attr,
&cm_rtu_counter_attr.attr,
&cm_dreq_counter_attr.attr,
&cm_drep_counter_attr.attr,
&cm_sidr_req_counter_attr.attr,
&cm_sidr_rep_counter_attr.attr,
&cm_lap_counter_attr.attr,
&cm_apr_counter_attr.attr,
NULL
};
struct cm_port {
struct cm_device *cm_dev;
struct ib_mad_agent *mad_agent;
struct kobject port_obj;
u8 port_num;
struct cm_counter_group counter_group[CM_COUNTER_GROUPS];
};
struct cm_device {
struct list_head list;
struct ib_device *ib_device;
struct device *device;
u8 ack_delay;
struct cm_port *port[0];
};
struct cm_av {
struct cm_port *port;
union ib_gid dgid;
struct ib_ah_attr ah_attr;
u16 pkey_index;
u8 timeout;
u8 valid;
u8 smac[ETH_ALEN];
};
struct cm_work {
struct delayed_work work;
struct list_head list;
struct cm_port *port;
struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */
__be32 local_id; /* Established / timewait */
__be32 remote_id;
struct ib_cm_event cm_event;
struct ib_sa_path_rec path[0];
};
struct cm_timewait_info {
struct cm_work work; /* Must be first. */
struct list_head list;
struct rb_node remote_qp_node;
struct rb_node remote_id_node;
__be64 remote_ca_guid;
__be32 remote_qpn;
u8 inserted_remote_qp;
u8 inserted_remote_id;
};
struct cm_id_private {
struct ib_cm_id id;
struct rb_node service_node;
struct rb_node sidr_id_node;
spinlock_t lock; /* Do not acquire inside cm.lock */
struct completion comp;
atomic_t refcount;
struct ib_mad_send_buf *msg;
struct cm_timewait_info *timewait_info;
/* todo: use alternate port on send failure */
struct cm_av av;
struct cm_av alt_av;
struct ib_cm_compare_data *compare_data;
void *private_data;
__be64 tid;
__be32 local_qpn;
__be32 remote_qpn;
enum ib_qp_type qp_type;
__be32 sq_psn;
__be32 rq_psn;
int timeout_ms;
enum ib_mtu path_mtu;
__be16 pkey;
u8 private_data_len;
u8 max_cm_retries;
u8 peer_to_peer;
u8 responder_resources;
u8 initiator_depth;
u8 retry_count;
u8 rnr_retry_count;
u8 service_timeout;
u8 target_ack_delay;
struct list_head work_list;
atomic_t work_count;
};
static void cm_work_handler(struct work_struct *work);
static inline void cm_deref_id(struct cm_id_private *cm_id_priv)
{
if (atomic_dec_and_test(&cm_id_priv->refcount))
complete(&cm_id_priv->comp);
}
static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
struct ib_mad_send_buf **msg)
{
struct ib_mad_agent *mad_agent;
struct ib_mad_send_buf *m;
struct ib_ah *ah;
mad_agent = cm_id_priv->av.port->mad_agent;
ah = ib_create_ah(mad_agent->qp->pd, &cm_id_priv->av.ah_attr);
if (IS_ERR(ah))
return PTR_ERR(ah);
m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn,
cm_id_priv->av.pkey_index,
0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
GFP_ATOMIC);
if (IS_ERR(m)) {
ib_destroy_ah(ah);
return PTR_ERR(m);
}
/* Timeout set by caller if response is expected. */
m->ah = ah;
m->retries = cm_id_priv->max_cm_retries;
atomic_inc(&cm_id_priv->refcount);
m->context[0] = cm_id_priv;
*msg = m;
return 0;
}
static int cm_alloc_response_msg(struct cm_port *port,
struct ib_mad_recv_wc *mad_recv_wc,
struct ib_mad_send_buf **msg)
{
struct ib_mad_send_buf *m;
struct ib_ah *ah;
ah = ib_create_ah_from_wc(port->mad_agent->qp->pd, mad_recv_wc->wc,
mad_recv_wc->recv_buf.grh, port->port_num);
if (IS_ERR(ah))
return PTR_ERR(ah);
m = ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index,
0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
GFP_ATOMIC);
if (IS_ERR(m)) {
ib_destroy_ah(ah);
return PTR_ERR(m);
}
m->ah = ah;
*msg = m;
return 0;
}
static void cm_free_msg(struct ib_mad_send_buf *msg)
{
ib_destroy_ah(msg->ah);
if (msg->context[0])
cm_deref_id(msg->context[0]);
ib_free_send_mad(msg);
}
static void * cm_copy_private_data(const void *private_data,
u8 private_data_len)
{
void *data;
if (!private_data || !private_data_len)
return NULL;
data = kmemdup(private_data, private_data_len, GFP_KERNEL);
if (!data)
return ERR_PTR(-ENOMEM);
return data;
}
static void cm_set_private_data(struct cm_id_private *cm_id_priv,
void *private_data, u8 private_data_len)
{
if (cm_id_priv->private_data && cm_id_priv->private_data_len)
kfree(cm_id_priv->private_data);
cm_id_priv->private_data = private_data;
cm_id_priv->private_data_len = private_data_len;
}
static void cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
struct ib_grh *grh, struct cm_av *av)
{
av->port = port;
av->pkey_index = wc->pkey_index;
ib_init_ah_from_wc(port->cm_dev->ib_device, port->port_num, wc,
grh, &av->ah_attr);
}
static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
{
struct cm_device *cm_dev;
struct cm_port *port = NULL;
unsigned long flags;
int ret;
u8 p;
read_lock_irqsave(&cm.device_lock, flags);
list_for_each_entry(cm_dev, &cm.device_list, list) {
if (!ib_find_cached_gid(cm_dev->ib_device, &path->sgid,
&p, NULL)) {
port = cm_dev->port[p-1];
break;
}
}
read_unlock_irqrestore(&cm.device_lock, flags);
if (!port)
return -EINVAL;
ret = ib_find_cached_pkey(cm_dev->ib_device, port->port_num,
be16_to_cpu(path->pkey), &av->pkey_index);
if (ret)
return ret;
av->port = port;
ib_init_ah_from_path(cm_dev->ib_device, port->port_num, path,
&av->ah_attr);
av->timeout = path->packet_life_time + 1;
memcpy(av->smac, path->smac, sizeof(av->smac));
av->valid = 1;
return 0;
}
static int cm_alloc_id(struct cm_id_private *cm_id_priv)
{
unsigned long flags;
int id;
idr_preload(GFP_KERNEL);
spin_lock_irqsave(&cm.lock, flags);
id = idr_alloc_cyclic(&cm.local_id_table, cm_id_priv, 0, 0, GFP_NOWAIT);
spin_unlock_irqrestore(&cm.lock, flags);
idr_preload_end();
cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand;
return id < 0 ? id : 0;
}
static void cm_free_id(__be32 local_id)
{
spin_lock_irq(&cm.lock);
idr_remove(&cm.local_id_table,
(__force int) (local_id ^ cm.random_id_operand));
spin_unlock_irq(&cm.lock);
}
static struct cm_id_private * cm_get_id(__be32 local_id, __be32 remote_id)
{
struct cm_id_private *cm_id_priv;
cm_id_priv = idr_find(&cm.local_id_table,
(__force int) (local_id ^ cm.random_id_operand));
if (cm_id_priv) {
if (cm_id_priv->id.remote_id == remote_id)
atomic_inc(&cm_id_priv->refcount);
else
cm_id_priv = NULL;
}
return cm_id_priv;
}
static struct cm_id_private * cm_acquire_id(__be32 local_id, __be32 remote_id)
{
struct cm_id_private *cm_id_priv;
spin_lock_irq(&cm.lock);
cm_id_priv = cm_get_id(local_id, remote_id);
spin_unlock_irq(&cm.lock);
return cm_id_priv;
}
static void cm_mask_copy(u8 *dst, u8 *src, u8 *mask)
{
int i;
for (i = 0; i < IB_CM_COMPARE_SIZE / sizeof(unsigned long); i++)
((unsigned long *) dst)[i] = ((unsigned long *) src)[i] &
((unsigned long *) mask)[i];
}
static int cm_compare_data(struct ib_cm_compare_data *src_data,
struct ib_cm_compare_data *dst_data)
{
u8 src[IB_CM_COMPARE_SIZE];
u8 dst[IB_CM_COMPARE_SIZE];
if (!src_data || !dst_data)
return 0;
cm_mask_copy(src, src_data->data, dst_data->mask);
cm_mask_copy(dst, dst_data->data, src_data->mask);
return memcmp(src, dst, IB_CM_COMPARE_SIZE);
}
static int cm_compare_private_data(u8 *private_data,
struct ib_cm_compare_data *dst_data)
{
u8 src[IB_CM_COMPARE_SIZE];
if (!dst_data)
return 0;
cm_mask_copy(src, private_data, dst_data->mask);
return memcmp(src, dst_data->data, IB_CM_COMPARE_SIZE);
}
/*
* Trivial helpers to strip endian annotation and compare; the
* endianness doesn't actually matter since we just need a stable
* order for the RB tree.
*/
static int be32_lt(__be32 a, __be32 b)
{
return (__force u32) a < (__force u32) b;
}
static int be32_gt(__be32 a, __be32 b)
{
return (__force u32) a > (__force u32) b;
}
static int be64_lt(__be64 a, __be64 b)
{
return (__force u64) a < (__force u64) b;
}
static int be64_gt(__be64 a, __be64 b)
{
return (__force u64) a > (__force u64) b;
}
static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv)
{
struct rb_node **link = &cm.listen_service_table.rb_node;
struct rb_node *parent = NULL;
struct cm_id_private *cur_cm_id_priv;
__be64 service_id = cm_id_priv->id.service_id;
__be64 service_mask = cm_id_priv->id.service_mask;
int data_cmp;
while (*link) {
parent = *link;
cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
service_node);
data_cmp = cm_compare_data(cm_id_priv->compare_data,
cur_cm_id_priv->compare_data);
if ((cur_cm_id_priv->id.service_mask & service_id) ==
(service_mask & cur_cm_id_priv->id.service_id) &&
(cm_id_priv->id.device == cur_cm_id_priv->id.device) &&
!data_cmp)
return cur_cm_id_priv;
if (cm_id_priv->id.device < cur_cm_id_priv->id.device)
link = &(*link)->rb_left;
else if (cm_id_priv->id.device > cur_cm_id_priv->id.device)
link = &(*link)->rb_right;
else if (be64_lt(service_id, cur_cm_id_priv->id.service_id))
link = &(*link)->rb_left;
else if (be64_gt(service_id, cur_cm_id_priv->id.service_id))
link = &(*link)->rb_right;
else if (data_cmp < 0)
link = &(*link)->rb_left;
else
link = &(*link)->rb_right;
}
rb_link_node(&cm_id_priv->service_node, parent, link);
rb_insert_color(&cm_id_priv->service_node, &cm.listen_service_table);
return NULL;
}
static struct cm_id_private * cm_find_listen(struct ib_device *device,
__be64 service_id,
u8 *private_data)
{
struct rb_node *node = cm.listen_service_table.rb_node;
struct cm_id_private *cm_id_priv;
int data_cmp;
while (node) {
cm_id_priv = rb_entry(node, struct cm_id_private, service_node);
data_cmp = cm_compare_private_data(private_data,
cm_id_priv->compare_data);
if ((cm_id_priv->id.service_mask & service_id) ==
cm_id_priv->id.service_id &&
(cm_id_priv->id.device == device) && !data_cmp)
return cm_id_priv;
if (device < cm_id_priv->id.device)
node = node->rb_left;
else if (device > cm_id_priv->id.device)
node = node->rb_right;
else if (be64_lt(service_id, cm_id_priv->id.service_id))
node = node->rb_left;
else if (be64_gt(service_id, cm_id_priv->id.service_id))
node = node->rb_right;
else if (data_cmp < 0)
node = node->rb_left;
else
node = node->rb_right;
}
return NULL;
}
static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info
*timewait_info)
{
struct rb_node **link = &cm.remote_id_table.rb_node;
struct rb_node *parent = NULL;
struct cm_timewait_info *cur_timewait_info;
__be64 remote_ca_guid = timewait_info->remote_ca_guid;
__be32 remote_id = timewait_info->work.remote_id;
while (*link) {
parent = *link;
cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
remote_id_node);
if (be32_lt(remote_id, cur_timewait_info->work.remote_id))
link = &(*link)->rb_left;
else if (be32_gt(remote_id, cur_timewait_info->work.remote_id))
link = &(*link)->rb_right;
else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
link = &(*link)->rb_left;
else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
link = &(*link)->rb_right;
else
return cur_timewait_info;
}
timewait_info->inserted_remote_id = 1;
rb_link_node(&timewait_info->remote_id_node, parent, link);
rb_insert_color(&timewait_info->remote_id_node, &cm.remote_id_table);
return NULL;
}
static struct cm_timewait_info * cm_find_remote_id(__be64 remote_ca_guid,
__be32 remote_id)
{
struct rb_node *node = cm.remote_id_table.rb_node;
struct cm_timewait_info *timewait_info;
while (node) {
timewait_info = rb_entry(node, struct cm_timewait_info,
remote_id_node);
if (be32_lt(remote_id, timewait_info->work.remote_id))
node = node->rb_left;
else if (be32_gt(remote_id, timewait_info->work.remote_id))
node = node->rb_right;
else if (be64_lt(remote_ca_guid, timewait_info->remote_ca_guid))
node = node->rb_left;
else if (be64_gt(remote_ca_guid, timewait_info->remote_ca_guid))
node = node->rb_right;
else
return timewait_info;
}
return NULL;
}
static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info
*timewait_info)
{
struct rb_node **link = &cm.remote_qp_table.rb_node;
struct rb_node *parent = NULL;
struct cm_timewait_info *cur_timewait_info;
__be64 remote_ca_guid = timewait_info->remote_ca_guid;
__be32 remote_qpn = timewait_info->remote_qpn;
while (*link) {
parent = *link;
cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
remote_qp_node);
if (be32_lt(remote_qpn, cur_timewait_info->remote_qpn))
link = &(*link)->rb_left;
else if (be32_gt(remote_qpn, cur_timewait_info->remote_qpn))
link = &(*link)->rb_right;
else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
link = &(*link)->rb_left;
else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
link = &(*link)->rb_right;
else
return cur_timewait_info;
}
timewait_info->inserted_remote_qp = 1;
rb_link_node(&timewait_info->remote_qp_node, parent, link);
rb_insert_color(&timewait_info->remote_qp_node, &cm.remote_qp_table);
return NULL;
}
static struct cm_id_private * cm_insert_remote_sidr(struct cm_id_private
*cm_id_priv)
{
struct rb_node **link = &cm.remote_sidr_table.rb_node;
struct rb_node *parent = NULL;
struct cm_id_private *cur_cm_id_priv;
union ib_gid *port_gid = &cm_id_priv->av.dgid;
__be32 remote_id = cm_id_priv->id.remote_id;
while (*link) {
parent = *link;
cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
sidr_id_node);
if (be32_lt(remote_id, cur_cm_id_priv->id.remote_id))
link = &(*link)->rb_left;
else if (be32_gt(remote_id, cur_cm_id_priv->id.remote_id))
link = &(*link)->rb_right;
else {
int cmp;
cmp = memcmp(port_gid, &cur_cm_id_priv->av.dgid,
sizeof *port_gid);
if (cmp < 0)
link = &(*link)->rb_left;
else if (cmp > 0)
link = &(*link)->rb_right;
else
return cur_cm_id_priv;
}
}
rb_link_node(&cm_id_priv->sidr_id_node, parent, link);
rb_insert_color(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
return NULL;
}
static void cm_reject_sidr_req(struct cm_id_private *cm_id_priv,
enum ib_cm_sidr_status status)
{
struct ib_cm_sidr_rep_param param;
memset(¶m, 0, sizeof param);
param.status = status;
ib_send_cm_sidr_rep(&cm_id_priv->id, ¶m);
}
struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
ib_cm_handler cm_handler,
void *context)
{
struct cm_id_private *cm_id_priv;
int ret;
cm_id_priv = kzalloc(sizeof *cm_id_priv, GFP_KERNEL);
if (!cm_id_priv)
return ERR_PTR(-ENOMEM);
cm_id_priv->id.state = IB_CM_IDLE;
cm_id_priv->id.device = device;
cm_id_priv->id.cm_handler = cm_handler;
cm_id_priv->id.context = context;
cm_id_priv->id.remote_cm_qpn = 1;
ret = cm_alloc_id(cm_id_priv);
if (ret)
goto error;
spin_lock_init(&cm_id_priv->lock);
init_completion(&cm_id_priv->comp);
INIT_LIST_HEAD(&cm_id_priv->work_list);
atomic_set(&cm_id_priv->work_count, -1);
atomic_set(&cm_id_priv->refcount, 1);
return &cm_id_priv->id;
error:
kfree(cm_id_priv);
return ERR_PTR(-ENOMEM);
}
EXPORT_SYMBOL(ib_create_cm_id);
static struct cm_work * cm_dequeue_work(struct cm_id_private *cm_id_priv)
{
struct cm_work *work;
if (list_empty(&cm_id_priv->work_list))
return NULL;
work = list_entry(cm_id_priv->work_list.next, struct cm_work, list);
list_del(&work->list);
return work;
}
static void cm_free_work(struct cm_work *work)
{
if (work->mad_recv_wc)
ib_free_recv_mad(work->mad_recv_wc);
kfree(work);
}
static inline int cm_convert_to_ms(int iba_time)
{
/* approximate conversion to ms from 4.096us x 2^iba_time */
return 1 << max(iba_time - 8, 0);
}
/*
* calculate: 4.096x2^ack_timeout = 4.096x2^ack_delay + 2x4.096x2^life_time
* Because of how ack_timeout is stored, adding one doubles the timeout.
* To avoid large timeouts, select the max(ack_delay, life_time + 1), and
* increment it (round up) only if the other is within 50%.
*/
static u8 cm_ack_timeout(u8 ca_ack_delay, u8 packet_life_time)
{
int ack_timeout = packet_life_time + 1;
if (ack_timeout >= ca_ack_delay)
ack_timeout += (ca_ack_delay >= (ack_timeout - 1));
else
ack_timeout = ca_ack_delay +
(ack_timeout >= (ca_ack_delay - 1));
return min(31, ack_timeout);
}
static void cm_cleanup_timewait(struct cm_timewait_info *timewait_info)
{
if (timewait_info->inserted_remote_id) {
rb_erase(&timewait_info->remote_id_node, &cm.remote_id_table);
timewait_info->inserted_remote_id = 0;
}
if (timewait_info->inserted_remote_qp) {
rb_erase(&timewait_info->remote_qp_node, &cm.remote_qp_table);
timewait_info->inserted_remote_qp = 0;
}
}
static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id)
{
struct cm_timewait_info *timewait_info;
timewait_info = kzalloc(sizeof *timewait_info, GFP_KERNEL);
if (!timewait_info)
return ERR_PTR(-ENOMEM);
timewait_info->work.local_id = local_id;
INIT_DELAYED_WORK(&timewait_info->work.work, cm_work_handler);
timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT;
return timewait_info;
}
static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
{
int wait_time;
unsigned long flags;
spin_lock_irqsave(&cm.lock, flags);
cm_cleanup_timewait(cm_id_priv->timewait_info);
list_add_tail(&cm_id_priv->timewait_info->list, &cm.timewait_list);
spin_unlock_irqrestore(&cm.lock, flags);
/*
* The cm_id could be destroyed by the user before we exit timewait.
* To protect against this, we search for the cm_id after exiting
* timewait before notifying the user that we've exited timewait.
*/
cm_id_priv->id.state = IB_CM_TIMEWAIT;
wait_time = cm_convert_to_ms(cm_id_priv->av.timeout);
queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
msecs_to_jiffies(wait_time));
cm_id_priv->timewait_info = NULL;
}
static void cm_reset_to_idle(struct cm_id_private *cm_id_priv)
{
unsigned long flags;
cm_id_priv->id.state = IB_CM_IDLE;
if (cm_id_priv->timewait_info) {
spin_lock_irqsave(&cm.lock, flags);
cm_cleanup_timewait(cm_id_priv->timewait_info);
spin_unlock_irqrestore(&cm.lock, flags);
kfree(cm_id_priv->timewait_info);
cm_id_priv->timewait_info = NULL;
}
}
static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
{
struct cm_id_private *cm_id_priv;
struct cm_work *work;
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
retest:
spin_lock_irq(&cm_id_priv->lock);
switch (cm_id->state) {
case IB_CM_LISTEN:
cm_id->state = IB_CM_IDLE;
spin_unlock_irq(&cm_id_priv->lock);
spin_lock_irq(&cm.lock);
rb_erase(&cm_id_priv->service_node, &cm.listen_service_table);
spin_unlock_irq(&cm.lock);
break;
case IB_CM_SIDR_REQ_SENT:
cm_id->state = IB_CM_IDLE;
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
spin_unlock_irq(&cm_id_priv->lock);
break;
case IB_CM_SIDR_REQ_RCVD:
spin_unlock_irq(&cm_id_priv->lock);
cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT);
break;
case IB_CM_REQ_SENT:
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
spin_unlock_irq(&cm_id_priv->lock);
ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT,
&cm_id_priv->id.device->node_guid,
sizeof cm_id_priv->id.device->node_guid,
NULL, 0);
break;
case IB_CM_REQ_RCVD:
if (err == -ENOMEM) {
/* Do not reject to allow future retries. */
cm_reset_to_idle(cm_id_priv);
spin_unlock_irq(&cm_id_priv->lock);
} else {
spin_unlock_irq(&cm_id_priv->lock);
ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
NULL, 0, NULL, 0);
}
break;
case IB_CM_MRA_REQ_RCVD:
case IB_CM_REP_SENT:
case IB_CM_MRA_REP_RCVD:
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
/* Fall through */
case IB_CM_MRA_REQ_SENT:
case IB_CM_REP_RCVD:
case IB_CM_MRA_REP_SENT:
spin_unlock_irq(&cm_id_priv->lock);
ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
NULL, 0, NULL, 0);
break;
case IB_CM_ESTABLISHED:
spin_unlock_irq(&cm_id_priv->lock);
if (cm_id_priv->qp_type == IB_QPT_XRC_TGT)
break;
ib_send_cm_dreq(cm_id, NULL, 0);
goto retest;
case IB_CM_DREQ_SENT:
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
cm_enter_timewait(cm_id_priv);
spin_unlock_irq(&cm_id_priv->lock);
break;
case IB_CM_DREQ_RCVD:
spin_unlock_irq(&cm_id_priv->lock);
ib_send_cm_drep(cm_id, NULL, 0);
break;
default:
spin_unlock_irq(&cm_id_priv->lock);
break;
}
cm_free_id(cm_id->local_id);
cm_deref_id(cm_id_priv);
wait_for_completion(&cm_id_priv->comp);
while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
cm_free_work(work);
kfree(cm_id_priv->compare_data);
kfree(cm_id_priv->private_data);
kfree(cm_id_priv);
}
void ib_destroy_cm_id(struct ib_cm_id *cm_id)
{
cm_destroy_id(cm_id, 0);
}
EXPORT_SYMBOL(ib_destroy_cm_id);
int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask,
struct ib_cm_compare_data *compare_data)
{
struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
unsigned long flags;
int ret = 0;
service_mask = service_mask ? service_mask : ~cpu_to_be64(0);
service_id &= service_mask;
if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID &&
(service_id != IB_CM_ASSIGN_SERVICE_ID))
return -EINVAL;
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
if (cm_id->state != IB_CM_IDLE)
return -EINVAL;
if (compare_data) {
cm_id_priv->compare_data = kzalloc(sizeof *compare_data,
GFP_KERNEL);
if (!cm_id_priv->compare_data)
return -ENOMEM;
cm_mask_copy(cm_id_priv->compare_data->data,
compare_data->data, compare_data->mask);
memcpy(cm_id_priv->compare_data->mask, compare_data->mask,
IB_CM_COMPARE_SIZE);
}
cm_id->state = IB_CM_LISTEN;
spin_lock_irqsave(&cm.lock, flags);
if (service_id == IB_CM_ASSIGN_SERVICE_ID) {
cm_id->service_id = cpu_to_be64(cm.listen_service_id++);
cm_id->service_mask = ~cpu_to_be64(0);
} else {
cm_id->service_id = service_id;
cm_id->service_mask = service_mask;
}
cur_cm_id_priv = cm_insert_listen(cm_id_priv);
spin_unlock_irqrestore(&cm.lock, flags);
if (cur_cm_id_priv) {
cm_id->state = IB_CM_IDLE;
kfree(cm_id_priv->compare_data);
cm_id_priv->compare_data = NULL;
ret = -EBUSY;
}
return ret;
}
EXPORT_SYMBOL(ib_cm_listen);
static __be64 cm_form_tid(struct cm_id_private *cm_id_priv,
enum cm_msg_sequence msg_seq)
{
u64 hi_tid, low_tid;
hi_tid = ((u64) cm_id_priv->av.port->mad_agent->hi_tid) << 32;
low_tid = (u64) ((__force u32)cm_id_priv->id.local_id |
(msg_seq << 30));
return cpu_to_be64(hi_tid | low_tid);
}
static void cm_format_mad_hdr(struct ib_mad_hdr *hdr,
__be16 attr_id, __be64 tid)
{
hdr->base_version = IB_MGMT_BASE_VERSION;
hdr->mgmt_class = IB_MGMT_CLASS_CM;
hdr->class_version = IB_CM_CLASS_VERSION;
hdr->method = IB_MGMT_METHOD_SEND;
hdr->attr_id = attr_id;
hdr->tid = tid;
}
static void cm_format_req(struct cm_req_msg *req_msg,
struct cm_id_private *cm_id_priv,
struct ib_cm_req_param *param)
{
struct ib_sa_path_rec *pri_path = param->primary_path;
struct ib_sa_path_rec *alt_path = param->alternate_path;
cm_format_mad_hdr(&req_msg->hdr, CM_REQ_ATTR_ID,
cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_REQ));
req_msg->local_comm_id = cm_id_priv->id.local_id;
req_msg->service_id = param->service_id;
req_msg->local_ca_guid = cm_id_priv->id.device->node_guid;
cm_req_set_local_qpn(req_msg, cpu_to_be32(param->qp_num));
cm_req_set_init_depth(req_msg, param->initiator_depth);
cm_req_set_remote_resp_timeout(req_msg,
param->remote_cm_response_timeout);
cm_req_set_qp_type(req_msg, param->qp_type);
cm_req_set_flow_ctrl(req_msg, param->flow_control);
cm_req_set_starting_psn(req_msg, cpu_to_be32(param->starting_psn));
cm_req_set_local_resp_timeout(req_msg,
param->local_cm_response_timeout);
req_msg->pkey = param->primary_path->pkey;
cm_req_set_path_mtu(req_msg, param->primary_path->mtu);
cm_req_set_max_cm_retries(req_msg, param->max_cm_retries);
if (param->qp_type != IB_QPT_XRC_INI) {
cm_req_set_resp_res(req_msg, param->responder_resources);
cm_req_set_retry_count(req_msg, param->retry_count);
cm_req_set_rnr_retry_count(req_msg, param->rnr_retry_count);
cm_req_set_srq(req_msg, param->srq);
}
if (pri_path->hop_limit <= 1) {
req_msg->primary_local_lid = pri_path->slid;
req_msg->primary_remote_lid = pri_path->dlid;
} else {
/* Work-around until there's a way to obtain remote LID info */
req_msg->primary_local_lid = IB_LID_PERMISSIVE;
req_msg->primary_remote_lid = IB_LID_PERMISSIVE;
}
req_msg->primary_local_gid = pri_path->sgid;
req_msg->primary_remote_gid = pri_path->dgid;
cm_req_set_primary_flow_label(req_msg, pri_path->flow_label);
cm_req_set_primary_packet_rate(req_msg, pri_path->rate);
req_msg->primary_traffic_class = pri_path->traffic_class;
req_msg->primary_hop_limit = pri_path->hop_limit;
cm_req_set_primary_sl(req_msg, pri_path->sl);
cm_req_set_primary_subnet_local(req_msg, (pri_path->hop_limit <= 1));
cm_req_set_primary_local_ack_timeout(req_msg,
cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
pri_path->packet_life_time));
if (alt_path) {
if (alt_path->hop_limit <= 1) {
req_msg->alt_local_lid = alt_path->slid;
req_msg->alt_remote_lid = alt_path->dlid;
} else {
req_msg->alt_local_lid = IB_LID_PERMISSIVE;
req_msg->alt_remote_lid = IB_LID_PERMISSIVE;
}
req_msg->alt_local_gid = alt_path->sgid;
req_msg->alt_remote_gid = alt_path->dgid;
cm_req_set_alt_flow_label(req_msg,
alt_path->flow_label);
cm_req_set_alt_packet_rate(req_msg, alt_path->rate);
req_msg->alt_traffic_class = alt_path->traffic_class;
req_msg->alt_hop_limit = alt_path->hop_limit;
cm_req_set_alt_sl(req_msg, alt_path->sl);
cm_req_set_alt_subnet_local(req_msg, (alt_path->hop_limit <= 1));
cm_req_set_alt_local_ack_timeout(req_msg,
cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
alt_path->packet_life_time));
}
if (param->private_data && param->private_data_len)
memcpy(req_msg->private_data, param->private_data,
param->private_data_len);
}
static int cm_validate_req_param(struct ib_cm_req_param *param)
{
/* peer-to-peer not supported */
if (param->peer_to_peer)
return -EINVAL;
if (!param->primary_path)
return -EINVAL;
if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC &&
param->qp_type != IB_QPT_XRC_INI)
return -EINVAL;
if (param->private_data &&
param->private_data_len > IB_CM_REQ_PRIVATE_DATA_SIZE)
return -EINVAL;
if (param->alternate_path &&
(param->alternate_path->pkey != param->primary_path->pkey ||
param->alternate_path->mtu != param->primary_path->mtu))
return -EINVAL;
return 0;
}
int ib_send_cm_req(struct ib_cm_id *cm_id,
struct ib_cm_req_param *param)
{
struct cm_id_private *cm_id_priv;
struct cm_req_msg *req_msg;
unsigned long flags;
int ret;
ret = cm_validate_req_param(param);
if (ret)
return ret;
/* Verify that we're not in timewait. */
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
spin_lock_irqsave(&cm_id_priv->lock, flags);
if (cm_id->state != IB_CM_IDLE) {
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
ret = -EINVAL;
goto out;
}
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
id.local_id);
if (IS_ERR(cm_id_priv->timewait_info)) {
ret = PTR_ERR(cm_id_priv->timewait_info);
goto out;
}
ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av);
if (ret)
goto error1;
if (param->alternate_path) {
ret = cm_init_av_by_path(param->alternate_path,
&cm_id_priv->alt_av);
if (ret)
goto error1;
}
cm_id->service_id = param->service_id;
cm_id->service_mask = ~cpu_to_be64(0);
cm_id_priv->timeout_ms = cm_convert_to_ms(
param->primary_path->packet_life_time) * 2 +
cm_convert_to_ms(
param->remote_cm_response_timeout);
cm_id_priv->max_cm_retries = param->max_cm_retries;
cm_id_priv->initiator_depth = param->initiator_depth;
cm_id_priv->responder_resources = param->responder_resources;
cm_id_priv->retry_count = param->retry_count;
cm_id_priv->path_mtu = param->primary_path->mtu;
cm_id_priv->pkey = param->primary_path->pkey;
cm_id_priv->qp_type = param->qp_type;
ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg);
if (ret)
goto error1;
req_msg = (struct cm_req_msg *) cm_id_priv->msg->mad;
cm_format_req(req_msg, cm_id_priv, param);
cm_id_priv->tid = req_msg->hdr.tid;
cm_id_priv->msg->timeout_ms = cm_id_priv->timeout_ms;
cm_id_priv->msg->context[1] = (void *) (unsigned long) IB_CM_REQ_SENT;
cm_id_priv->local_qpn = cm_req_get_local_qpn(req_msg);
cm_id_priv->rq_psn = cm_req_get_starting_psn(req_msg);
spin_lock_irqsave(&cm_id_priv->lock, flags);
ret = ib_post_send_mad(cm_id_priv->msg, NULL);
if (ret) {
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
goto error2;
}
BUG_ON(cm_id->state != IB_CM_IDLE);
cm_id->state = IB_CM_REQ_SENT;
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
return 0;
error2: cm_free_msg(cm_id_priv->msg);
error1: kfree(cm_id_priv->timewait_info);
out: return ret;
}
EXPORT_SYMBOL(ib_send_cm_req);
static int cm_issue_rej(struct cm_port *port,
struct ib_mad_recv_wc *mad_recv_wc,
enum ib_cm_rej_reason reason,
enum cm_msg_response msg_rejected,
void *ari, u8 ari_length)
{
struct ib_mad_send_buf *msg = NULL;
struct cm_rej_msg *rej_msg, *rcv_msg;
int ret;
ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
if (ret)
return ret;
/* We just need common CM header information. Cast to any message. */
rcv_msg = (struct cm_rej_msg *) mad_recv_wc->recv_buf.mad;
rej_msg = (struct cm_rej_msg *) msg->mad;
cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, rcv_msg->hdr.tid);
rej_msg->remote_comm_id = rcv_msg->local_comm_id;
rej_msg->local_comm_id = rcv_msg->remote_comm_id;
cm_rej_set_msg_rejected(rej_msg, msg_rejected);
rej_msg->reason = cpu_to_be16(reason);
if (ari && ari_length) {
cm_rej_set_reject_info_len(rej_msg, ari_length);
memcpy(rej_msg->ari, ari, ari_length);
}
ret = ib_post_send_mad(msg, NULL);
if (ret)
cm_free_msg(msg);
return ret;
}
static inline int cm_is_active_peer(__be64 local_ca_guid, __be64 remote_ca_guid,
__be32 local_qpn, __be32 remote_qpn)
{
return (be64_to_cpu(local_ca_guid) > be64_to_cpu(remote_ca_guid) ||
((local_ca_guid == remote_ca_guid) &&
(be32_to_cpu(local_qpn) > be32_to_cpu(remote_qpn))));
}
static void cm_format_paths_from_req(struct cm_req_msg *req_msg,
struct ib_sa_path_rec *primary_path,
struct ib_sa_path_rec *alt_path)
{
memset(primary_path, 0, sizeof *primary_path);
primary_path->dgid = req_msg->primary_local_gid;
primary_path->sgid = req_msg->primary_remote_gid;
primary_path->dlid = req_msg->primary_local_lid;
primary_path->slid = req_msg->primary_remote_lid;
primary_path->flow_label = cm_req_get_primary_flow_label(req_msg);
primary_path->hop_limit = req_msg->primary_hop_limit;
primary_path->traffic_class = req_msg->primary_traffic_class;
primary_path->reversible = 1;
primary_path->pkey = req_msg->pkey;
primary_path->sl = cm_req_get_primary_sl(req_msg);
primary_path->mtu_selector = IB_SA_EQ;
primary_path->mtu = cm_req_get_path_mtu(req_msg);
primary_path->rate_selector = IB_SA_EQ;
primary_path->rate = cm_req_get_primary_packet_rate(req_msg);
primary_path->packet_life_time_selector = IB_SA_EQ;
primary_path->packet_life_time =
cm_req_get_primary_local_ack_timeout(req_msg);
primary_path->packet_life_time -= (primary_path->packet_life_time > 0);
if (req_msg->alt_local_lid) {
memset(alt_path, 0, sizeof *alt_path);
alt_path->dgid = req_msg->alt_local_gid;
alt_path->sgid = req_msg->alt_remote_gid;
alt_path->dlid = req_msg->alt_local_lid;
alt_path->slid = req_msg->alt_remote_lid;
alt_path->flow_label = cm_req_get_alt_flow_label(req_msg);
alt_path->hop_limit = req_msg->alt_hop_limit;
alt_path->traffic_class = req_msg->alt_traffic_class;
alt_path->reversible = 1;
alt_path->pkey = req_msg->pkey;
alt_path->sl = cm_req_get_alt_sl(req_msg);
alt_path->mtu_selector = IB_SA_EQ;
alt_path->mtu = cm_req_get_path_mtu(req_msg);
alt_path->rate_selector = IB_SA_EQ;
alt_path->rate = cm_req_get_alt_packet_rate(req_msg);
alt_path->packet_life_time_selector = IB_SA_EQ;
alt_path->packet_life_time =
cm_req_get_alt_local_ack_timeout(req_msg);
alt_path->packet_life_time -= (alt_path->packet_life_time > 0);
}
}
static void cm_format_req_event(struct cm_work *work,
struct cm_id_private *cm_id_priv,
struct ib_cm_id *listen_id)
{
struct cm_req_msg *req_msg;
struct ib_cm_req_event_param *param;
req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
param = &work->cm_event.param.req_rcvd;
param->listen_id = listen_id;
param->port = cm_id_priv->av.port->port_num;
param->primary_path = &work->path[0];
if (req_msg->alt_local_lid)
param->alternate_path = &work->path[1];
else
param->alternate_path = NULL;
param->remote_ca_guid = req_msg->local_ca_guid;
param->remote_qkey = be32_to_cpu(req_msg->local_qkey);
param->remote_qpn = be32_to_cpu(cm_req_get_local_qpn(req_msg));
param->qp_type = cm_req_get_qp_type(req_msg);
param->starting_psn = be32_to_cpu(cm_req_get_starting_psn(req_msg));
param->responder_resources = cm_req_get_init_depth(req_msg);
param->initiator_depth = cm_req_get_resp_res(req_msg);
param->local_cm_response_timeout =
cm_req_get_remote_resp_timeout(req_msg);
param->flow_control = cm_req_get_flow_ctrl(req_msg);
param->remote_cm_response_timeout =
cm_req_get_local_resp_timeout(req_msg);
param->retry_count = cm_req_get_retry_count(req_msg);
param->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
param->srq = cm_req_get_srq(req_msg);
work->cm_event.private_data = &req_msg->private_data;
}
static void cm_process_work(struct cm_id_private *cm_id_priv,
struct cm_work *work)
{
int ret;
/* We will typically only have the current event to report. */
ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event);
cm_free_work(work);
while (!ret && !atomic_add_negative(-1, &cm_id_priv->work_count)) {
spin_lock_irq(&cm_id_priv->lock);
work = cm_dequeue_work(cm_id_priv);
spin_unlock_irq(&cm_id_priv->lock);
BUG_ON(!work);
ret = cm_id_priv->id.cm_handler(&cm_id_priv->id,
&work->cm_event);
cm_free_work(work);
}
cm_deref_id(cm_id_priv);
if (ret)
cm_destroy_id(&cm_id_priv->id, ret);
}
static void cm_format_mra(struct cm_mra_msg *mra_msg,
struct cm_id_private *cm_id_priv,
enum cm_msg_response msg_mraed, u8 service_timeout,
const void *private_data, u8 private_data_len)
{
cm_format_mad_hdr(&mra_msg->hdr, CM_MRA_ATTR_ID, cm_id_priv->tid);
cm_mra_set_msg_mraed(mra_msg, msg_mraed);
mra_msg->local_comm_id = cm_id_priv->id.local_id;
mra_msg->remote_comm_id = cm_id_priv->id.remote_id;
cm_mra_set_service_timeout(mra_msg, service_timeout);
if (private_data && private_data_len)
memcpy(mra_msg->private_data, private_data, private_data_len);
}
static void cm_format_rej(struct cm_rej_msg *rej_msg,
struct cm_id_private *cm_id_priv,
enum ib_cm_rej_reason reason,
void *ari,
u8 ari_length,
const void *private_data,
u8 private_data_len)
{
cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, cm_id_priv->tid);
rej_msg->remote_comm_id = cm_id_priv->id.remote_id;
switch(cm_id_priv->id.state) {
case IB_CM_REQ_RCVD:
rej_msg->local_comm_id = 0;
cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
break;
case IB_CM_MRA_REQ_SENT:
rej_msg->local_comm_id = cm_id_priv->id.local_id;
cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
break;
case IB_CM_REP_RCVD:
case IB_CM_MRA_REP_SENT:
rej_msg->local_comm_id = cm_id_priv->id.local_id;
cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REP);
break;
default:
rej_msg->local_comm_id = cm_id_priv->id.local_id;
cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_OTHER);
break;
}
rej_msg->reason = cpu_to_be16(reason);
if (ari && ari_length) {
cm_rej_set_reject_info_len(rej_msg, ari_length);
memcpy(rej_msg->ari, ari, ari_length);
}
if (private_data && private_data_len)
memcpy(rej_msg->private_data, private_data, private_data_len);
}
static void cm_dup_req_handler(struct cm_work *work,
struct cm_id_private *cm_id_priv)
{
struct ib_mad_send_buf *msg = NULL;
int ret;
atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
counter[CM_REQ_COUNTER]);
/* Quick state check to discard duplicate REQs. */
if (cm_id_priv->id.state == IB_CM_REQ_RCVD)
return;
ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
if (ret)
return;
spin_lock_irq(&cm_id_priv->lock);
switch (cm_id_priv->id.state) {
case IB_CM_MRA_REQ_SENT:
cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
CM_MSG_RESPONSE_REQ, cm_id_priv->service_timeout,
cm_id_priv->private_data,
cm_id_priv->private_data_len);
break;
case IB_CM_TIMEWAIT:
cm_format_rej((struct cm_rej_msg *) msg->mad, cm_id_priv,
IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0);
break;
default:
goto unlock;
}
spin_unlock_irq(&cm_id_priv->lock);
ret = ib_post_send_mad(msg, NULL);
if (ret)
goto free;
return;
unlock: spin_unlock_irq(&cm_id_priv->lock);
free: cm_free_msg(msg);
}
static struct cm_id_private * cm_match_req(struct cm_work *work,
struct cm_id_private *cm_id_priv)
{
struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv;
struct cm_timewait_info *timewait_info;
struct cm_req_msg *req_msg;
req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
/* Check for possible duplicate REQ. */
spin_lock_irq(&cm.lock);
timewait_info = cm_insert_remote_id(cm_id_priv->timewait_info);
if (timewait_info) {
cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
timewait_info->work.remote_id);
spin_unlock_irq(&cm.lock);
if (cur_cm_id_priv) {
cm_dup_req_handler(work, cur_cm_id_priv);
cm_deref_id(cur_cm_id_priv);
}
return NULL;
}
/* Check for stale connections. */
timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
if (timewait_info) {
cm_cleanup_timewait(cm_id_priv->timewait_info);
spin_unlock_irq(&cm.lock);
cm_issue_rej(work->port, work->mad_recv_wc,
IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ,
NULL, 0);
return NULL;
}
/* Find matching listen request. */
listen_cm_id_priv = cm_find_listen(cm_id_priv->id.device,
req_msg->service_id,
req_msg->private_data);
if (!listen_cm_id_priv) {
cm_cleanup_timewait(cm_id_priv->timewait_info);
spin_unlock_irq(&cm.lock);
cm_issue_rej(work->port, work->mad_recv_wc,
IB_CM_REJ_INVALID_SERVICE_ID, CM_MSG_RESPONSE_REQ,
NULL, 0);
goto out;
}
atomic_inc(&listen_cm_id_priv->refcount);
atomic_inc(&cm_id_priv->refcount);
cm_id_priv->id.state = IB_CM_REQ_RCVD;
atomic_inc(&cm_id_priv->work_count);
spin_unlock_irq(&cm.lock);
out:
return listen_cm_id_priv;
}
/*
* Work-around for inter-subnet connections. If the LIDs are permissive,
* we need to override the LID/SL data in the REQ with the LID information
* in the work completion.
*/
static void cm_process_routed_req(struct cm_req_msg *req_msg, struct ib_wc *wc)
{
if (!cm_req_get_primary_subnet_local(req_msg)) {
if (req_msg->primary_local_lid == IB_LID_PERMISSIVE) {
req_msg->primary_local_lid = cpu_to_be16(wc->slid);
cm_req_set_primary_sl(req_msg, wc->sl);
}
if (req_msg->primary_remote_lid == IB_LID_PERMISSIVE)
req_msg->primary_remote_lid = cpu_to_be16(wc->dlid_path_bits);
}
if (!cm_req_get_alt_subnet_local(req_msg)) {
if (req_msg->alt_local_lid == IB_LID_PERMISSIVE) {
req_msg->alt_local_lid = cpu_to_be16(wc->slid);
cm_req_set_alt_sl(req_msg, wc->sl);
}
if (req_msg->alt_remote_lid == IB_LID_PERMISSIVE)
req_msg->alt_remote_lid = cpu_to_be16(wc->dlid_path_bits);
}
}
static int cm_req_handler(struct cm_work *work)
{
struct ib_cm_id *cm_id;
struct cm_id_private *cm_id_priv, *listen_cm_id_priv;
struct cm_req_msg *req_msg;
int ret;
req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
cm_id = ib_create_cm_id(work->port->cm_dev->ib_device, NULL, NULL);
if (IS_ERR(cm_id))
return PTR_ERR(cm_id);
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
cm_id_priv->id.remote_id = req_msg->local_comm_id;
cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
work->mad_recv_wc->recv_buf.grh,
&cm_id_priv->av);
cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
id.local_id);
if (IS_ERR(cm_id_priv->timewait_info)) {
ret = PTR_ERR(cm_id_priv->timewait_info);
goto destroy;
}
cm_id_priv->timewait_info->work.remote_id = req_msg->local_comm_id;
cm_id_priv->timewait_info->remote_ca_guid = req_msg->local_ca_guid;
cm_id_priv->timewait_info->remote_qpn = cm_req_get_local_qpn(req_msg);
listen_cm_id_priv = cm_match_req(work, cm_id_priv);
if (!listen_cm_id_priv) {
ret = -EINVAL;
kfree(cm_id_priv->timewait_info);
goto destroy;
}
cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
cm_id_priv->id.context = listen_cm_id_priv->id.context;
cm_id_priv->id.service_id = req_msg->service_id;
cm_id_priv->id.service_mask = ~cpu_to_be64(0);
cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]);
memcpy(work->path[0].dmac, cm_id_priv->av.ah_attr.dmac, ETH_ALEN);
work->path[0].vlan_id = cm_id_priv->av.ah_attr.vlan_id;
ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av);
if (ret) {
ib_get_cached_gid(work->port->cm_dev->ib_device,
work->port->port_num, 0, &work->path[0].sgid);
ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_GID,
&work->path[0].sgid, sizeof work->path[0].sgid,
NULL, 0);
goto rejected;
}
if (req_msg->alt_local_lid) {
ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av);
if (ret) {
ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID,
&work->path[0].sgid,
sizeof work->path[0].sgid, NULL, 0);
goto rejected;
}
}
cm_id_priv->tid = req_msg->hdr.tid;
cm_id_priv->timeout_ms = cm_convert_to_ms(
cm_req_get_local_resp_timeout(req_msg));
cm_id_priv->max_cm_retries = cm_req_get_max_cm_retries(req_msg);
cm_id_priv->remote_qpn = cm_req_get_local_qpn(req_msg);
cm_id_priv->initiator_depth = cm_req_get_resp_res(req_msg);
cm_id_priv->responder_resources = cm_req_get_init_depth(req_msg);
cm_id_priv->path_mtu = cm_req_get_path_mtu(req_msg);
cm_id_priv->pkey = req_msg->pkey;
cm_id_priv->sq_psn = cm_req_get_starting_psn(req_msg);
cm_id_priv->retry_count = cm_req_get_retry_count(req_msg);
cm_id_priv->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
cm_id_priv->qp_type = cm_req_get_qp_type(req_msg);
cm_format_req_event(work, cm_id_priv, &listen_cm_id_priv->id);
cm_process_work(cm_id_priv, work);
cm_deref_id(listen_cm_id_priv);
return 0;
rejected:
atomic_dec(&cm_id_priv->refcount);
cm_deref_id(listen_cm_id_priv);
destroy:
ib_destroy_cm_id(cm_id);
return ret;
}
static void cm_format_rep(struct cm_rep_msg *rep_msg,
struct cm_id_private *cm_id_priv,
struct ib_cm_rep_param *param)
{
cm_format_mad_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid);
rep_msg->local_comm_id = cm_id_priv->id.local_id;
rep_msg->remote_comm_id = cm_id_priv->id.remote_id;
cm_rep_set_starting_psn(rep_msg, cpu_to_be32(param->starting_psn));
rep_msg->resp_resources = param->responder_resources;
cm_rep_set_target_ack_delay(rep_msg,
cm_id_priv->av.port->cm_dev->ack_delay);
cm_rep_set_failover(rep_msg, param->failover_accepted);
cm_rep_set_rnr_retry_count(rep_msg, param->rnr_retry_count);
rep_msg->local_ca_guid = cm_id_priv->id.device->node_guid;
if (cm_id_priv->qp_type != IB_QPT_XRC_TGT) {
rep_msg->initiator_depth = param->initiator_depth;
cm_rep_set_flow_ctrl(rep_msg, param->flow_control);
cm_rep_set_srq(rep_msg, param->srq);
cm_rep_set_local_qpn(rep_msg, cpu_to_be32(param->qp_num));
} else {
cm_rep_set_srq(rep_msg, 1);
cm_rep_set_local_eecn(rep_msg, cpu_to_be32(param->qp_num));
}
if (param->private_data && param->private_data_len)
memcpy(rep_msg->private_data, param->private_data,
param->private_data_len);
}
int ib_send_cm_rep(struct ib_cm_id *cm_id,
struct ib_cm_rep_param *param)
{
struct cm_id_private *cm_id_priv;
struct ib_mad_send_buf *msg;
struct cm_rep_msg *rep_msg;
unsigned long flags;
int ret;
if (param->private_data &&
param->private_data_len > IB_CM_REP_PRIVATE_DATA_SIZE)
return -EINVAL;
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
spin_lock_irqsave(&cm_id_priv->lock, flags);
if (cm_id->state != IB_CM_REQ_RCVD &&
cm_id->state != IB_CM_MRA_REQ_SENT) {
ret = -EINVAL;
goto out;
}
ret = cm_alloc_msg(cm_id_priv, &msg);
if (ret)
goto out;
rep_msg = (struct cm_rep_msg *) msg->mad;
cm_format_rep(rep_msg, cm_id_priv, param);
msg->timeout_ms = cm_id_priv->timeout_ms;
msg->context[1] = (void *) (unsigned long) IB_CM_REP_SENT;
ret = ib_post_send_mad(msg, NULL);
if (ret) {
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
cm_free_msg(msg);
return ret;
}
cm_id->state = IB_CM_REP_SENT;
cm_id_priv->msg = msg;
cm_id_priv->initiator_depth = param->initiator_depth;
cm_id_priv->responder_resources = param->responder_resources;
cm_id_priv->rq_psn = cm_rep_get_starting_psn(rep_msg);
cm_id_priv->local_qpn = cpu_to_be32(param->qp_num & 0xFFFFFF);
out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
return ret;
}
EXPORT_SYMBOL(ib_send_cm_rep);
static void cm_format_rtu(struct cm_rtu_msg *rtu_msg,
struct cm_id_private *cm_id_priv,
const void *private_data,
u8 private_data_len)
{
cm_format_mad_hdr(&rtu_msg->hdr, CM_RTU_ATTR_ID, cm_id_priv->tid);
rtu_msg->local_comm_id = cm_id_priv->id.local_id;
rtu_msg->remote_comm_id = cm_id_priv->id.remote_id;
if (private_data && private_data_len)
memcpy(rtu_msg->private_data, private_data, private_data_len);
}
int ib_send_cm_rtu(struct ib_cm_id *cm_id,
const void *private_data,
u8 private_data_len)
{
struct cm_id_private *cm_id_priv;
struct ib_mad_send_buf *msg;
unsigned long flags;
void *data;
int ret;
if (private_data && private_data_len > IB_CM_RTU_PRIVATE_DATA_SIZE)
return -EINVAL;
data = cm_copy_private_data(private_data, private_data_len);
if (IS_ERR(data))
return PTR_ERR(data);
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
spin_lock_irqsave(&cm_id_priv->lock, flags);
if (cm_id->state != IB_CM_REP_RCVD &&
cm_id->state != IB_CM_MRA_REP_SENT) {
ret = -EINVAL;
goto error;
}
ret = cm_alloc_msg(cm_id_priv, &msg);
if (ret)
goto error;
cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
private_data, private_data_len);
ret = ib_post_send_mad(msg, NULL);
if (ret) {
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
cm_free_msg(msg);
kfree(data);
return ret;
}
cm_id->state = IB_CM_ESTABLISHED;
cm_set_private_data(cm_id_priv, data, private_data_len);
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
return 0;
error: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
kfree(data);
return ret;
}
EXPORT_SYMBOL(ib_send_cm_rtu);
static void cm_format_rep_event(struct cm_work *work, enum ib_qp_type qp_type)
{
struct cm_rep_msg *rep_msg;
struct ib_cm_rep_event_param *param;
rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
param = &work->cm_event.param.rep_rcvd;
param->remote_ca_guid = rep_msg->local_ca_guid;
param->remote_qkey = be32_to_cpu(rep_msg->local_qkey);
param->remote_qpn = be32_to_cpu(cm_rep_get_qpn(rep_msg, qp_type));
param->starting_psn = be32_to_cpu(cm_rep_get_starting_psn(rep_msg));
param->responder_resources = rep_msg->initiator_depth;
param->initiator_depth = rep_msg->resp_resources;
param->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg);
param->failover_accepted = cm_rep_get_failover(rep_msg);
param->flow_control = cm_rep_get_flow_ctrl(rep_msg);
param->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
param->srq = cm_rep_get_srq(rep_msg);
work->cm_event.private_data = &rep_msg->private_data;
}
static void cm_dup_rep_handler(struct cm_work *work)
{
struct cm_id_private *cm_id_priv;
struct cm_rep_msg *rep_msg;
struct ib_mad_send_buf *msg = NULL;
int ret;
rep_msg = (struct cm_rep_msg *) work->mad_recv_wc->recv_buf.mad;
cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id,
rep_msg->local_comm_id);
if (!cm_id_priv)
return;
atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
counter[CM_REP_COUNTER]);
ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
if (ret)
goto deref;
spin_lock_irq(&cm_id_priv->lock);
if (cm_id_priv->id.state == IB_CM_ESTABLISHED)
cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
cm_id_priv->private_data,
cm_id_priv->private_data_len);
else if (cm_id_priv->id.state == IB_CM_MRA_REP_SENT)
cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
CM_MSG_RESPONSE_REP, cm_id_priv->service_timeout,
cm_id_priv->private_data,
cm_id_priv->private_data_len);
else
goto unlock;
spin_unlock_irq(&cm_id_priv->lock);
ret = ib_post_send_mad(msg, NULL);
if (ret)
goto free;
goto deref;
unlock: spin_unlock_irq(&cm_id_priv->lock);
free: cm_free_msg(msg);
deref: cm_deref_id(cm_id_priv);
}
static int cm_rep_handler(struct cm_work *work)
{
struct cm_id_private *cm_id_priv;
struct cm_rep_msg *rep_msg;
int ret;
rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, 0);
if (!cm_id_priv) {
cm_dup_rep_handler(work);
return -EINVAL;
}
cm_format_rep_event(work, cm_id_priv->qp_type);
spin_lock_irq(&cm_id_priv->lock);
switch (cm_id_priv->id.state) {
case IB_CM_REQ_SENT:
case IB_CM_MRA_REQ_RCVD:
break;
default:
spin_unlock_irq(&cm_id_priv->lock);
ret = -EINVAL;
goto error;
}
cm_id_priv->timewait_info->work.remote_id = rep_msg->local_comm_id;
cm_id_priv->timewait_info->remote_ca_guid = rep_msg->local_ca_guid;
cm_id_priv->timewait_info->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type);
spin_lock(&cm.lock);
/* Check for duplicate REP. */
if (cm_insert_remote_id(cm_id_priv->timewait_info)) {
spin_unlock(&cm.lock);
spin_unlock_irq(&cm_id_priv->lock);
ret = -EINVAL;
goto error;
}
/* Check for a stale connection. */
if (cm_insert_remote_qpn(cm_id_priv->timewait_info)) {
rb_erase(&cm_id_priv->timewait_info->remote_id_node,
&cm.remote_id_table);
cm_id_priv->timewait_info->inserted_remote_id = 0;
spin_unlock(&cm.lock);
spin_unlock_irq(&cm_id_priv->lock);
cm_issue_rej(work->port, work->mad_recv_wc,
IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP,
NULL, 0);
ret = -EINVAL;
goto error;
}
spin_unlock(&cm.lock);
cm_id_priv->id.state = IB_CM_REP_RCVD;
cm_id_priv->id.remote_id = rep_msg->local_comm_id;
cm_id_priv->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type);
cm_id_priv->initiator_depth = rep_msg->resp_resources;
cm_id_priv->responder_resources = rep_msg->initiator_depth;
cm_id_priv->sq_psn = cm_rep_get_starting_psn(rep_msg);
cm_id_priv->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
cm_id_priv->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg);
cm_id_priv->av.timeout =
cm_ack_timeout(cm_id_priv->target_ack_delay,
cm_id_priv->av.timeout - 1);
cm_id_priv->alt_av.timeout =
cm_ack_timeout(cm_id_priv->target_ack_delay,
cm_id_priv->alt_av.timeout - 1);
/* todo: handle peer_to_peer */
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
ret = atomic_inc_and_test(&cm_id_priv->work_count);
if (!ret)
list_add_tail(&work->list, &cm_id_priv->work_list);
spin_unlock_irq(&cm_id_priv->lock);
if (ret)
cm_process_work(cm_id_priv, work);
else
cm_deref_id(cm_id_priv);
return 0;
error:
cm_deref_id(cm_id_priv);
return ret;
}
static int cm_establish_handler(struct cm_work *work)
{
struct cm_id_private *cm_id_priv;
int ret;
/* See comment in cm_establish about lookup. */
cm_id_priv = cm_acquire_id(work->local_id, work->remote_id);
if (!cm_id_priv)
return -EINVAL;
spin_lock_irq(&cm_id_priv->lock);
if (cm_id_priv->id.state != IB_CM_ESTABLISHED) {
spin_unlock_irq(&cm_id_priv->lock);
goto out;
}
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
ret = atomic_inc_and_test(&cm_id_priv->work_count);
if (!ret)
list_add_tail(&work->list, &cm_id_priv->work_list);
spin_unlock_irq(&cm_id_priv->lock);
if (ret)
cm_process_work(cm_id_priv, work);
else
cm_deref_id(cm_id_priv);
return 0;
out:
cm_deref_id(cm_id_priv);
return -EINVAL;
}
static int cm_rtu_handler(struct cm_work *work)
{
struct cm_id_private *cm_id_priv;
struct cm_rtu_msg *rtu_msg;
int ret;
rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad;
cm_id_priv = cm_acquire_id(rtu_msg->remote_comm_id,
rtu_msg->local_comm_id);
if (!cm_id_priv)
return -EINVAL;
work->cm_event.private_data = &rtu_msg->private_data;
spin_lock_irq(&cm_id_priv->lock);
if (cm_id_priv->id.state != IB_CM_REP_SENT &&
cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
spin_unlock_irq(&cm_id_priv->lock);
atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
counter[CM_RTU_COUNTER]);
goto out;
}
cm_id_priv->id.state = IB_CM_ESTABLISHED;
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
ret = atomic_inc_and_test(&cm_id_priv->work_count);
if (!ret)
list_add_tail(&work->list, &cm_id_priv->work_list);
spin_unlock_irq(&cm_id_priv->lock);
if (ret)
cm_process_work(cm_id_priv, work);
else
cm_deref_id(cm_id_priv);
return 0;
out:
cm_deref_id(cm_id_priv);
return -EINVAL;
}
static void cm_format_dreq(struct cm_dreq_msg *dreq_msg,
struct cm_id_private *cm_id_priv,
const void *private_data,
u8 private_data_len)
{
cm_format_mad_hdr(&dreq_msg->hdr, CM_DREQ_ATTR_ID,
cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_DREQ));
dreq_msg->local_comm_id = cm_id_priv->id.local_id;
dreq_msg->remote_comm_id = cm_id_priv->id.remote_id;
cm_dreq_set_remote_qpn(dreq_msg, cm_id_priv->remote_qpn);
if (private_data && private_data_len)
memcpy(dreq_msg->private_data, private_data, private_data_len);
}
int ib_send_cm_dreq(struct ib_cm_id *cm_id,
const void *private_data,
u8 private_data_len)
{
struct cm_id_private *cm_id_priv;
struct ib_mad_send_buf *msg;
unsigned long flags;
int ret;
if (private_data && private_data_len > IB_CM_DREQ_PRIVATE_DATA_SIZE)
return -EINVAL;
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
spin_lock_irqsave(&cm_id_priv->lock, flags);
if (cm_id->state != IB_CM_ESTABLISHED) {
ret = -EINVAL;
goto out;
}
if (cm_id->lap_state == IB_CM_LAP_SENT ||
cm_id->lap_state == IB_CM_MRA_LAP_RCVD)
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
ret = cm_alloc_msg(cm_id_priv, &msg);
if (ret) {
cm_enter_timewait(cm_id_priv);
goto out;
}
cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv,
private_data, private_data_len);
msg->timeout_ms = cm_id_priv->timeout_ms;
msg->context[1] = (void *) (unsigned long) IB_CM_DREQ_SENT;
ret = ib_post_send_mad(msg, NULL);
if (ret) {
cm_enter_timewait(cm_id_priv);
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
cm_free_msg(msg);
return ret;
}
cm_id->state = IB_CM_DREQ_SENT;
cm_id_priv->msg = msg;
out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
return ret;
}
EXPORT_SYMBOL(ib_send_cm_dreq);
static void cm_format_drep(struct cm_drep_msg *drep_msg,
struct cm_id_private *cm_id_priv,
const void *private_data,
u8 private_data_len)
{
cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, cm_id_priv->tid);
drep_msg->local_comm_id = cm_id_priv->id.local_id;
drep_msg->remote_comm_id = cm_id_priv->id.remote_id;
if (private_data && private_data_len)
memcpy(drep_msg->private_data, private_data, private_data_len);
}
int ib_send_cm_drep(struct ib_cm_id *cm_id,
const void *private_data,
u8 private_data_len)
{
struct cm_id_private *cm_id_priv;
struct ib_mad_send_buf *msg;
unsigned long flags;
void *data;
int ret;
if (private_data && private_data_len > IB_CM_DREP_PRIVATE_DATA_SIZE)
return -EINVAL;
data = cm_copy_private_data(private_data, private_data_len);
if (IS_ERR(data))
return PTR_ERR(data);
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
spin_lock_irqsave(&cm_id_priv->lock, flags);
if (cm_id->state != IB_CM_DREQ_RCVD) {
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
kfree(data);
return -EINVAL;
}
cm_set_private_data(cm_id_priv, data, private_data_len);
cm_enter_timewait(cm_id_priv);
ret = cm_alloc_msg(cm_id_priv, &msg);
if (ret)
goto out;
cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
private_data, private_data_len);
ret = ib_post_send_mad(msg, NULL);
if (ret) {
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
cm_free_msg(msg);
return ret;
}
out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
return ret;
}
EXPORT_SYMBOL(ib_send_cm_drep);
static int cm_issue_drep(struct cm_port *port,
struct ib_mad_recv_wc *mad_recv_wc)
{
struct ib_mad_send_buf *msg = NULL;
struct cm_dreq_msg *dreq_msg;
struct cm_drep_msg *drep_msg;
int ret;
ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
if (ret)
return ret;
dreq_msg = (struct cm_dreq_msg *) mad_recv_wc->recv_buf.mad;
drep_msg = (struct cm_drep_msg *) msg->mad;
cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, dreq_msg->hdr.tid);
drep_msg->remote_comm_id = dreq_msg->local_comm_id;
drep_msg->local_comm_id = dreq_msg->remote_comm_id;
ret = ib_post_send_mad(msg, NULL);
if (ret)
cm_free_msg(msg);
return ret;
}
static int cm_dreq_handler(struct cm_work *work)
{
struct cm_id_private *cm_id_priv;
struct cm_dreq_msg *dreq_msg;
struct ib_mad_send_buf *msg = NULL;
int ret;
dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad;
cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
dreq_msg->local_comm_id);
if (!cm_id_priv) {
atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
counter[CM_DREQ_COUNTER]);
cm_issue_drep(work->port, work->mad_recv_wc);
return -EINVAL;
}
work->cm_event.private_data = &dreq_msg->private_data;
spin_lock_irq(&cm_id_priv->lock);
if (cm_id_priv->local_qpn != cm_dreq_get_remote_qpn(dreq_msg))
goto unlock;
switch (cm_id_priv->id.state) {
case IB_CM_REP_SENT:
case IB_CM_DREQ_SENT:
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
break;
case IB_CM_ESTABLISHED:
if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT ||
cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
break;
case IB_CM_MRA_REP_RCVD:
break;
case IB_CM_TIMEWAIT:
atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
counter[CM_DREQ_COUNTER]);
if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
goto unlock;
cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
cm_id_priv->private_data,
cm_id_priv->private_data_len);
spin_unlock_irq(&cm_id_priv->lock);
if (ib_post_send_mad(msg, NULL))
cm_free_msg(msg);
goto deref;
case IB_CM_DREQ_RCVD:
atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
counter[CM_DREQ_COUNTER]);
goto unlock;
default:
goto unlock;
}
cm_id_priv->id.state = IB_CM_DREQ_RCVD;
cm_id_priv->tid = dreq_msg->hdr.tid;
ret = atomic_inc_and_test(&cm_id_priv->work_count);
if (!ret)
list_add_tail(&work->list, &cm_id_priv->work_list);
spin_unlock_irq(&cm_id_priv->lock);
if (ret)
cm_process_work(cm_id_priv, work);
else
cm_deref_id(cm_id_priv);
return 0;
unlock: spin_unlock_irq(&cm_id_priv->lock);
deref: cm_deref_id(cm_id_priv);
return -EINVAL;
}
static int cm_drep_handler(struct cm_work *work)
{
struct cm_id_private *cm_id_priv;
struct cm_drep_msg *drep_msg;
int ret;
drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad;
cm_id_priv = cm_acquire_id(drep_msg->remote_comm_id,
drep_msg->local_comm_id);
if (!cm_id_priv)
return -EINVAL;
work->cm_event.private_data = &drep_msg->private_data;
spin_lock_irq(&cm_id_priv->lock);
if (cm_id_priv->id.state != IB_CM_DREQ_SENT &&
cm_id_priv->id.state != IB_CM_DREQ_RCVD) {
spin_unlock_irq(&cm_id_priv->lock);
goto out;
}
cm_enter_timewait(cm_id_priv);
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
ret = atomic_inc_and_test(&cm_id_priv->work_count);
if (!ret)
list_add_tail(&work->list, &cm_id_priv->work_list);
spin_unlock_irq(&cm_id_priv->lock);
if (ret)
cm_process_work(cm_id_priv, work);
else
cm_deref_id(cm_id_priv);
return 0;
out:
cm_deref_id(cm_id_priv);
return -EINVAL;
}
int ib_send_cm_rej(struct ib_cm_id *cm_id,
enum ib_cm_rej_reason reason,
void *ari,
u8 ari_length,
const void *private_data,
u8 private_data_len)
{
struct cm_id_private *cm_id_priv;
struct ib_mad_send_buf *msg;
unsigned long flags;
int ret;
if ((private_data && private_data_len > IB_CM_REJ_PRIVATE_DATA_SIZE) ||
(ari && ari_length > IB_CM_REJ_ARI_LENGTH))
return -EINVAL;
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
spin_lock_irqsave(&cm_id_priv->lock, flags);
switch (cm_id->state) {
case IB_CM_REQ_SENT:
case IB_CM_MRA_REQ_RCVD:
case IB_CM_REQ_RCVD:
case IB_CM_MRA_REQ_SENT:
case IB_CM_REP_RCVD:
case IB_CM_MRA_REP_SENT:
ret = cm_alloc_msg(cm_id_priv, &msg);
if (!ret)
cm_format_rej((struct cm_rej_msg *) msg->mad,
cm_id_priv, reason, ari, ari_length,
private_data, private_data_len);
cm_reset_to_idle(cm_id_priv);
break;
case IB_CM_REP_SENT:
case IB_CM_MRA_REP_RCVD:
ret = cm_alloc_msg(cm_id_priv, &msg);
if (!ret)
cm_format_rej((struct cm_rej_msg *) msg->mad,
cm_id_priv, reason, ari, ari_length,
private_data, private_data_len);
cm_enter_timewait(cm_id_priv);
break;
default:
ret = -EINVAL;
goto out;
}
if (ret)
goto out;
ret = ib_post_send_mad(msg, NULL);
if (ret)
cm_free_msg(msg);
out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
return ret;
}
EXPORT_SYMBOL(ib_send_cm_rej);
static void cm_format_rej_event(struct cm_work *work)
{
struct cm_rej_msg *rej_msg;
struct ib_cm_rej_event_param *param;
rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
param = &work->cm_event.param.rej_rcvd;
param->ari = rej_msg->ari;
param->ari_length = cm_rej_get_reject_info_len(rej_msg);
param->reason = __be16_to_cpu(rej_msg->reason);
work->cm_event.private_data = &rej_msg->private_data;
}
static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
{
struct cm_timewait_info *timewait_info;
struct cm_id_private *cm_id_priv;
__be32 remote_id;
remote_id = rej_msg->local_comm_id;
if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_TIMEOUT) {
spin_lock_irq(&cm.lock);
timewait_info = cm_find_remote_id( *((__be64 *) rej_msg->ari),
remote_id);
if (!timewait_info) {
spin_unlock_irq(&cm.lock);
return NULL;
}
cm_id_priv = idr_find(&cm.local_id_table, (__force int)
(timewait_info->work.local_id ^
cm.random_id_operand));
if (cm_id_priv) {
if (cm_id_priv->id.remote_id == remote_id)
atomic_inc(&cm_id_priv->refcount);
else
cm_id_priv = NULL;
}
spin_unlock_irq(&cm.lock);
} else if (cm_rej_get_msg_rejected(rej_msg) == CM_MSG_RESPONSE_REQ)
cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, 0);
else
cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, remote_id);
return cm_id_priv;
}
static int cm_rej_handler(struct cm_work *work)
{
struct cm_id_private *cm_id_priv;
struct cm_rej_msg *rej_msg;
int ret;
rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
cm_id_priv = cm_acquire_rejected_id(rej_msg);
if (!cm_id_priv)
return -EINVAL;
cm_format_rej_event(work);
spin_lock_irq(&cm_id_priv->lock);
switch (cm_id_priv->id.state) {
case IB_CM_REQ_SENT:
case IB_CM_MRA_REQ_RCVD:
case IB_CM_REP_SENT:
case IB_CM_MRA_REP_RCVD:
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
/* fall through */
case IB_CM_REQ_RCVD:
case IB_CM_MRA_REQ_SENT:
if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_STALE_CONN)
cm_enter_timewait(cm_id_priv);
else
cm_reset_to_idle(cm_id_priv);
break;
case IB_CM_DREQ_SENT:
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
/* fall through */
case IB_CM_REP_RCVD:
case IB_CM_MRA_REP_SENT:
cm_enter_timewait(cm_id_priv);
break;
case IB_CM_ESTABLISHED:
if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT ||
cm_id_priv->id.lap_state == IB_CM_LAP_SENT) {
if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT)
ib_cancel_mad(cm_id_priv->av.port->mad_agent,
cm_id_priv->msg);
cm_enter_timewait(cm_id_priv);
break;
}
/* fall through */
default:
spin_unlock_irq(&cm_id_priv->lock);
ret = -EINVAL;
goto out;
}
ret = atomic_inc_and_test(&cm_id_priv->work_count);
if (!ret)
list_add_tail(&work->list, &cm_id_priv->work_list);
spin_unlock_irq(&cm_id_priv->lock);
if (ret)
cm_process_work(cm_id_priv, work);
else
cm_deref_id(cm_id_priv);
return 0;
out:
cm_deref_id(cm_id_priv);
return -EINVAL;
}
int ib_send_cm_mra(struct ib_cm_id *cm_id,
u8 service_timeout,
const void *private_data,
u8 private_data_len)
{
struct cm_id_private *cm_id_priv;
struct ib_mad_send_buf *msg;
enum ib_cm_state cm_state;
enum ib_cm_lap_state lap_state;
enum cm_msg_response msg_response;
void *data;
unsigned long flags;
int ret;
if (private_data && private_data_len > IB_CM_MRA_PRIVATE_DATA_SIZE)
return -EINVAL;
data = cm_copy_private_data(private_data, private_data_len);
if (IS_ERR(data))
return PTR_ERR(data);
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
spin_lock_irqsave(&cm_id_priv->lock, flags);
switch(cm_id_priv->id.state) {
case IB_CM_REQ_RCVD:
cm_state = IB_CM_MRA_REQ_SENT;
lap_state = cm_id->lap_state;
msg_response = CM_MSG_RESPONSE_REQ;
break;
case IB_CM_REP_RCVD:
cm_state = IB_CM_MRA_REP_SENT;
lap_state = cm_id->lap_state;
msg_response = CM_MSG_RESPONSE_REP;
break;
case IB_CM_ESTABLISHED:
if (cm_id->lap_state == IB_CM_LAP_RCVD) {
cm_state = cm_id->state;
lap_state = IB_CM_MRA_LAP_SENT;
msg_response = CM_MSG_RESPONSE_OTHER;
break;
}
default:
ret = -EINVAL;
goto error1;
}
if (!(service_timeout & IB_CM_MRA_FLAG_DELAY)) {
ret = cm_alloc_msg(cm_id_priv, &msg);
if (ret)
goto error1;
cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
msg_response, service_timeout,
private_data, private_data_len);
ret = ib_post_send_mad(msg, NULL);
if (ret)
goto error2;
}
cm_id->state = cm_state;
cm_id->lap_state = lap_state;
cm_id_priv->service_timeout = service_timeout;
cm_set_private_data(cm_id_priv, data, private_data_len);
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
return 0;
error1: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
kfree(data);
return ret;
error2: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
kfree(data);
cm_free_msg(msg);
return ret;
}
EXPORT_SYMBOL(ib_send_cm_mra);
static struct cm_id_private * cm_acquire_mraed_id(struct cm_mra_msg *mra_msg)
{
switch (cm_mra_get_msg_mraed(mra_msg)) {
case CM_MSG_RESPONSE_REQ:
return cm_acquire_id(mra_msg->remote_comm_id, 0);
case CM_MSG_RESPONSE_REP:
case CM_MSG_RESPONSE_OTHER:
return cm_acquire_id(mra_msg->remote_comm_id,
mra_msg->local_comm_id);
default:
return NULL;
}
}
static int cm_mra_handler(struct cm_work *work)
{
struct cm_id_private *cm_id_priv;
struct cm_mra_msg *mra_msg;
int timeout, ret;
mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad;
cm_id_priv = cm_acquire_mraed_id(mra_msg);
if (!cm_id_priv)
return -EINVAL;
work->cm_event.private_data = &mra_msg->private_data;
work->cm_event.param.mra_rcvd.service_timeout =
cm_mra_get_service_timeout(mra_msg);
timeout = cm_convert_to_ms(cm_mra_get_service_timeout(mra_msg)) +
cm_convert_to_ms(cm_id_priv->av.timeout);
spin_lock_irq(&cm_id_priv->lock);
switch (cm_id_priv->id.state) {
case IB_CM_REQ_SENT:
if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REQ ||
ib_modify_mad(cm_id_priv->av.port->mad_agent,
cm_id_priv->msg, timeout))
goto out;
cm_id_priv->id.state = IB_CM_MRA_REQ_RCVD;
break;
case IB_CM_REP_SENT:
if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REP ||
ib_modify_mad(cm_id_priv->av.port->mad_agent,
cm_id_priv->msg, timeout))
goto out;
cm_id_priv->id.state = IB_CM_MRA_REP_RCVD;
break;
case IB_CM_ESTABLISHED:
if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_OTHER ||
cm_id_priv->id.lap_state != IB_CM_LAP_SENT ||
ib_modify_mad(cm_id_priv->av.port->mad_agent,
cm_id_priv->msg, timeout)) {
if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
atomic_long_inc(&work->port->
counter_group[CM_RECV_DUPLICATES].
counter[CM_MRA_COUNTER]);
goto out;
}
cm_id_priv->id.lap_state = IB_CM_MRA_LAP_RCVD;
break;
case IB_CM_MRA_REQ_RCVD:
case IB_CM_MRA_REP_RCVD:
atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
counter[CM_MRA_COUNTER]);
/* fall through */
default:
goto out;
}
cm_id_priv->msg->context[1] = (void *) (unsigned long)
cm_id_priv->id.state;
ret = atomic_inc_and_test(&cm_id_priv->work_count);
if (!ret)
list_add_tail(&work->list, &cm_id_priv->work_list);
spin_unlock_irq(&cm_id_priv->lock);
if (ret)
cm_process_work(cm_id_priv, work);
else
cm_deref_id(cm_id_priv);
return 0;
out:
spin_unlock_irq(&cm_id_priv->lock);
cm_deref_id(cm_id_priv);
return -EINVAL;
}
static void cm_format_lap(struct cm_lap_msg *lap_msg,
struct cm_id_private *cm_id_priv,
struct ib_sa_path_rec *alternate_path,
const void *private_data,
u8 private_data_len)
{
cm_format_mad_hdr(&lap_msg->hdr, CM_LAP_ATTR_ID,
cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_LAP));
lap_msg->local_comm_id = cm_id_priv->id.local_id;
lap_msg->remote_comm_id = cm_id_priv->id.remote_id;
cm_lap_set_remote_qpn(lap_msg, cm_id_priv->remote_qpn);
/* todo: need remote CM response timeout */
cm_lap_set_remote_resp_timeout(lap_msg, 0x1F);
lap_msg->alt_local_lid = alternate_path->slid;
lap_msg->alt_remote_lid = alternate_path->dlid;
lap_msg->alt_local_gid = alternate_path->sgid;
lap_msg->alt_remote_gid = alternate_path->dgid;
cm_lap_set_flow_label(lap_msg, alternate_path->flow_label);
cm_lap_set_traffic_class(lap_msg, alternate_path->traffic_class);
lap_msg->alt_hop_limit = alternate_path->hop_limit;
cm_lap_set_packet_rate(lap_msg, alternate_path->rate);
cm_lap_set_sl(lap_msg, alternate_path->sl);
cm_lap_set_subnet_local(lap_msg, 1); /* local only... */
cm_lap_set_local_ack_timeout(lap_msg,
cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
alternate_path->packet_life_time));
if (private_data && private_data_len)
memcpy(lap_msg->private_data, private_data, private_data_len);
}
int ib_send_cm_lap(struct ib_cm_id *cm_id,
struct ib_sa_path_rec *alternate_path,
const void *private_data,
u8 private_data_len)
{
struct cm_id_private *cm_id_priv;
struct ib_mad_send_buf *msg;
unsigned long flags;
int ret;
if (private_data && private_data_len > IB_CM_LAP_PRIVATE_DATA_SIZE)
return -EINVAL;
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
spin_lock_irqsave(&cm_id_priv->lock, flags);
if (cm_id->state != IB_CM_ESTABLISHED ||
(cm_id->lap_state != IB_CM_LAP_UNINIT &&
cm_id->lap_state != IB_CM_LAP_IDLE)) {
ret = -EINVAL;
goto out;
}
ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av);
if (ret)
goto out;
cm_id_priv->alt_av.timeout =
cm_ack_timeout(cm_id_priv->target_ack_delay,
cm_id_priv->alt_av.timeout - 1);
ret = cm_alloc_msg(cm_id_priv, &msg);
if (ret)
goto out;
cm_format_lap((struct cm_lap_msg *) msg->mad, cm_id_priv,
alternate_path, private_data, private_data_len);
msg->timeout_ms = cm_id_priv->timeout_ms;
msg->context[1] = (void *) (unsigned long) IB_CM_ESTABLISHED;
ret = ib_post_send_mad(msg, NULL);
if (ret) {
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
cm_free_msg(msg);
return ret;
}
cm_id->lap_state = IB_CM_LAP_SENT;
cm_id_priv->msg = msg;
out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
return ret;
}
EXPORT_SYMBOL(ib_send_cm_lap);
static void cm_format_path_from_lap(struct cm_id_private *cm_id_priv,
struct ib_sa_path_rec *path,
struct cm_lap_msg *lap_msg)
{
memset(path, 0, sizeof *path);
path->dgid = lap_msg->alt_local_gid;
path->sgid = lap_msg->alt_remote_gid;
path->dlid = lap_msg->alt_local_lid;
path->slid = lap_msg->alt_remote_lid;
path->flow_label = cm_lap_get_flow_label(lap_msg);
path->hop_limit = lap_msg->alt_hop_limit;
path->traffic_class = cm_lap_get_traffic_class(lap_msg);
path->reversible = 1;
path->pkey = cm_id_priv->pkey;
path->sl = cm_lap_get_sl(lap_msg);
path->mtu_selector = IB_SA_EQ;
path->mtu = cm_id_priv->path_mtu;
path->rate_selector = IB_SA_EQ;
path->rate = cm_lap_get_packet_rate(lap_msg);
path->packet_life_time_selector = IB_SA_EQ;
path->packet_life_time = cm_lap_get_local_ack_timeout(lap_msg);
path->packet_life_time -= (path->packet_life_time > 0);
}
static int cm_lap_handler(struct cm_work *work)
{
struct cm_id_private *cm_id_priv;
struct cm_lap_msg *lap_msg;
struct ib_cm_lap_event_param *param;
struct ib_mad_send_buf *msg = NULL;
int ret;
/* todo: verify LAP request and send reject APR if invalid. */
lap_msg = (struct cm_lap_msg *)work->mad_recv_wc->recv_buf.mad;
cm_id_priv = cm_acquire_id(lap_msg->remote_comm_id,
lap_msg->local_comm_id);
if (!cm_id_priv)
return -EINVAL;
param = &work->cm_event.param.lap_rcvd;
param->alternate_path = &work->path[0];
cm_format_path_from_lap(cm_id_priv, param->alternate_path, lap_msg);
work->cm_event.private_data = &lap_msg->private_data;
spin_lock_irq(&cm_id_priv->lock);
if (cm_id_priv->id.state != IB_CM_ESTABLISHED)
goto unlock;
switch (cm_id_priv->id.lap_state) {
case IB_CM_LAP_UNINIT:
case IB_CM_LAP_IDLE:
break;
case IB_CM_MRA_LAP_SENT:
atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
counter[CM_LAP_COUNTER]);
if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
goto unlock;
cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
CM_MSG_RESPONSE_OTHER,
cm_id_priv->service_timeout,
cm_id_priv->private_data,
cm_id_priv->private_data_len);
spin_unlock_irq(&cm_id_priv->lock);
if (ib_post_send_mad(msg, NULL))
cm_free_msg(msg);
goto deref;
case IB_CM_LAP_RCVD:
atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
counter[CM_LAP_COUNTER]);
goto unlock;
default:
goto unlock;
}
cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
cm_id_priv->tid = lap_msg->hdr.tid;
cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
work->mad_recv_wc->recv_buf.grh,
&cm_id_priv->av);
cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av);
ret = atomic_inc_and_test(&cm_id_priv->work_count);
if (!ret)
list_add_tail(&work->list, &cm_id_priv->work_list);
spin_unlock_irq(&cm_id_priv->lock);
if (ret)
cm_process_work(cm_id_priv, work);
else
cm_deref_id(cm_id_priv);
return 0;
unlock: spin_unlock_irq(&cm_id_priv->lock);
deref: cm_deref_id(cm_id_priv);
return -EINVAL;
}
static void cm_format_apr(struct cm_apr_msg *apr_msg,
struct cm_id_private *cm_id_priv,
enum ib_cm_apr_status status,
void *info,
u8 info_length,
const void *private_data,
u8 private_data_len)
{
cm_format_mad_hdr(&apr_msg->hdr, CM_APR_ATTR_ID, cm_id_priv->tid);
apr_msg->local_comm_id = cm_id_priv->id.local_id;
apr_msg->remote_comm_id = cm_id_priv->id.remote_id;
apr_msg->ap_status = (u8) status;
if (info && info_length) {
apr_msg->info_length = info_length;
memcpy(apr_msg->info, info, info_length);
}
if (private_data && private_data_len)
memcpy(apr_msg->private_data, private_data, private_data_len);
}
int ib_send_cm_apr(struct ib_cm_id *cm_id,
enum ib_cm_apr_status status,
void *info,
u8 info_length,
const void *private_data,
u8 private_data_len)
{
struct cm_id_private *cm_id_priv;
struct ib_mad_send_buf *msg;
unsigned long flags;
int ret;
if ((private_data && private_data_len > IB_CM_APR_PRIVATE_DATA_SIZE) ||
(info && info_length > IB_CM_APR_INFO_LENGTH))
return -EINVAL;
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
spin_lock_irqsave(&cm_id_priv->lock, flags);
if (cm_id->state != IB_CM_ESTABLISHED ||
(cm_id->lap_state != IB_CM_LAP_RCVD &&
cm_id->lap_state != IB_CM_MRA_LAP_SENT)) {
ret = -EINVAL;
goto out;
}
ret = cm_alloc_msg(cm_id_priv, &msg);
if (ret)
goto out;
cm_format_apr((struct cm_apr_msg *) msg->mad, cm_id_priv, status,
info, info_length, private_data, private_data_len);
ret = ib_post_send_mad(msg, NULL);
if (ret) {
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
cm_free_msg(msg);
return ret;
}
cm_id->lap_state = IB_CM_LAP_IDLE;
out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
return ret;
}
EXPORT_SYMBOL(ib_send_cm_apr);
static int cm_apr_handler(struct cm_work *work)
{
struct cm_id_private *cm_id_priv;
struct cm_apr_msg *apr_msg;
int ret;
apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad;
cm_id_priv = cm_acquire_id(apr_msg->remote_comm_id,
apr_msg->local_comm_id);
if (!cm_id_priv)
return -EINVAL; /* Unmatched reply. */
work->cm_event.param.apr_rcvd.ap_status = apr_msg->ap_status;
work->cm_event.param.apr_rcvd.apr_info = &apr_msg->info;
work->cm_event.param.apr_rcvd.info_len = apr_msg->info_length;
work->cm_event.private_data = &apr_msg->private_data;
spin_lock_irq(&cm_id_priv->lock);
if (cm_id_priv->id.state != IB_CM_ESTABLISHED ||
(cm_id_priv->id.lap_state != IB_CM_LAP_SENT &&
cm_id_priv->id.lap_state != IB_CM_MRA_LAP_RCVD)) {
spin_unlock_irq(&cm_id_priv->lock);
goto out;
}
cm_id_priv->id.lap_state = IB_CM_LAP_IDLE;
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
cm_id_priv->msg = NULL;
ret = atomic_inc_and_test(&cm_id_priv->work_count);
if (!ret)
list_add_tail(&work->list, &cm_id_priv->work_list);
spin_unlock_irq(&cm_id_priv->lock);
if (ret)
cm_process_work(cm_id_priv, work);
else
cm_deref_id(cm_id_priv);
return 0;
out:
cm_deref_id(cm_id_priv);
return -EINVAL;
}
static int cm_timewait_handler(struct cm_work *work)
{
struct cm_timewait_info *timewait_info;
struct cm_id_private *cm_id_priv;
int ret;
timewait_info = (struct cm_timewait_info *)work;
spin_lock_irq(&cm.lock);
list_del(&timewait_info->list);
spin_unlock_irq(&cm.lock);
cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
timewait_info->work.remote_id);
if (!cm_id_priv)
return -EINVAL;
spin_lock_irq(&cm_id_priv->lock);
if (cm_id_priv->id.state != IB_CM_TIMEWAIT ||
cm_id_priv->remote_qpn != timewait_info->remote_qpn) {
spin_unlock_irq(&cm_id_priv->lock);
goto out;
}
cm_id_priv->id.state = IB_CM_IDLE;
ret = atomic_inc_and_test(&cm_id_priv->work_count);
if (!ret)
list_add_tail(&work->list, &cm_id_priv->work_list);
spin_unlock_irq(&cm_id_priv->lock);
if (ret)
cm_process_work(cm_id_priv, work);
else
cm_deref_id(cm_id_priv);
return 0;
out:
cm_deref_id(cm_id_priv);
return -EINVAL;
}
static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg,
struct cm_id_private *cm_id_priv,
struct ib_cm_sidr_req_param *param)
{
cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID,
cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_SIDR));
sidr_req_msg->request_id = cm_id_priv->id.local_id;
sidr_req_msg->pkey = param->path->pkey;
sidr_req_msg->service_id = param->service_id;
if (param->private_data && param->private_data_len)
memcpy(sidr_req_msg->private_data, param->private_data,
param->private_data_len);
}
int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
struct ib_cm_sidr_req_param *param)
{
struct cm_id_private *cm_id_priv;
struct ib_mad_send_buf *msg;
unsigned long flags;
int ret;
if (!param->path || (param->private_data &&
param->private_data_len > IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE))
return -EINVAL;
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
ret = cm_init_av_by_path(param->path, &cm_id_priv->av);
if (ret)
goto out;
cm_id->service_id = param->service_id;
cm_id->service_mask = ~cpu_to_be64(0);
cm_id_priv->timeout_ms = param->timeout_ms;
cm_id_priv->max_cm_retries = param->max_cm_retries;
ret = cm_alloc_msg(cm_id_priv, &msg);
if (ret)
goto out;
cm_format_sidr_req((struct cm_sidr_req_msg *) msg->mad, cm_id_priv,
param);
msg->timeout_ms = cm_id_priv->timeout_ms;
msg->context[1] = (void *) (unsigned long) IB_CM_SIDR_REQ_SENT;
spin_lock_irqsave(&cm_id_priv->lock, flags);
if (cm_id->state == IB_CM_IDLE)
ret = ib_post_send_mad(msg, NULL);
else
ret = -EINVAL;
if (ret) {
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
cm_free_msg(msg);
goto out;
}
cm_id->state = IB_CM_SIDR_REQ_SENT;
cm_id_priv->msg = msg;
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
out:
return ret;
}
EXPORT_SYMBOL(ib_send_cm_sidr_req);
static void cm_format_sidr_req_event(struct cm_work *work,
struct ib_cm_id *listen_id)
{
struct cm_sidr_req_msg *sidr_req_msg;
struct ib_cm_sidr_req_event_param *param;
sidr_req_msg = (struct cm_sidr_req_msg *)
work->mad_recv_wc->recv_buf.mad;
param = &work->cm_event.param.sidr_req_rcvd;
param->pkey = __be16_to_cpu(sidr_req_msg->pkey);
param->listen_id = listen_id;
param->port = work->port->port_num;
work->cm_event.private_data = &sidr_req_msg->private_data;
}
static int cm_sidr_req_handler(struct cm_work *work)
{
struct ib_cm_id *cm_id;
struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
struct cm_sidr_req_msg *sidr_req_msg;
struct ib_wc *wc;
cm_id = ib_create_cm_id(work->port->cm_dev->ib_device, NULL, NULL);
if (IS_ERR(cm_id))
return PTR_ERR(cm_id);
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
/* Record SGID/SLID and request ID for lookup. */
sidr_req_msg = (struct cm_sidr_req_msg *)
work->mad_recv_wc->recv_buf.mad;
wc = work->mad_recv_wc->wc;
cm_id_priv->av.dgid.global.subnet_prefix = cpu_to_be64(wc->slid);
cm_id_priv->av.dgid.global.interface_id = 0;
cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
work->mad_recv_wc->recv_buf.grh,
&cm_id_priv->av);
cm_id_priv->id.remote_id = sidr_req_msg->request_id;
cm_id_priv->tid = sidr_req_msg->hdr.tid;
atomic_inc(&cm_id_priv->work_count);
spin_lock_irq(&cm.lock);
cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
if (cur_cm_id_priv) {
spin_unlock_irq(&cm.lock);
atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
counter[CM_SIDR_REQ_COUNTER]);
goto out; /* Duplicate message. */
}
cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD;
cur_cm_id_priv = cm_find_listen(cm_id->device,
sidr_req_msg->service_id,
sidr_req_msg->private_data);
if (!cur_cm_id_priv) {
spin_unlock_irq(&cm.lock);
cm_reject_sidr_req(cm_id_priv, IB_SIDR_UNSUPPORTED);
goto out; /* No match. */
}
atomic_inc(&cur_cm_id_priv->refcount);
atomic_inc(&cm_id_priv->refcount);
spin_unlock_irq(&cm.lock);
cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler;
cm_id_priv->id.context = cur_cm_id_priv->id.context;
cm_id_priv->id.service_id = sidr_req_msg->service_id;
cm_id_priv->id.service_mask = ~cpu_to_be64(0);
cm_format_sidr_req_event(work, &cur_cm_id_priv->id);
cm_process_work(cm_id_priv, work);
cm_deref_id(cur_cm_id_priv);
return 0;
out:
ib_destroy_cm_id(&cm_id_priv->id);
return -EINVAL;
}
static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg,
struct cm_id_private *cm_id_priv,
struct ib_cm_sidr_rep_param *param)
{
cm_format_mad_hdr(&sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID,
cm_id_priv->tid);
sidr_rep_msg->request_id = cm_id_priv->id.remote_id;
sidr_rep_msg->status = param->status;
cm_sidr_rep_set_qpn(sidr_rep_msg, cpu_to_be32(param->qp_num));
sidr_rep_msg->service_id = cm_id_priv->id.service_id;
sidr_rep_msg->qkey = cpu_to_be32(param->qkey);
if (param->info && param->info_length)
memcpy(sidr_rep_msg->info, param->info, param->info_length);
if (param->private_data && param->private_data_len)
memcpy(sidr_rep_msg->private_data, param->private_data,
param->private_data_len);
}
int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
struct ib_cm_sidr_rep_param *param)
{
struct cm_id_private *cm_id_priv;
struct ib_mad_send_buf *msg;
unsigned long flags;
int ret;
if ((param->info && param->info_length > IB_CM_SIDR_REP_INFO_LENGTH) ||
(param->private_data &&
param->private_data_len > IB_CM_SIDR_REP_PRIVATE_DATA_SIZE))
return -EINVAL;
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
spin_lock_irqsave(&cm_id_priv->lock, flags);
if (cm_id->state != IB_CM_SIDR_REQ_RCVD) {
ret = -EINVAL;
goto error;
}
ret = cm_alloc_msg(cm_id_priv, &msg);
if (ret)
goto error;
cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv,
param);
ret = ib_post_send_mad(msg, NULL);
if (ret) {
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
cm_free_msg(msg);
return ret;
}
cm_id->state = IB_CM_IDLE;
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
spin_lock_irqsave(&cm.lock, flags);
rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
spin_unlock_irqrestore(&cm.lock, flags);
return 0;
error: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
return ret;
}
EXPORT_SYMBOL(ib_send_cm_sidr_rep);
static void cm_format_sidr_rep_event(struct cm_work *work)
{
struct cm_sidr_rep_msg *sidr_rep_msg;
struct ib_cm_sidr_rep_event_param *param;
sidr_rep_msg = (struct cm_sidr_rep_msg *)
work->mad_recv_wc->recv_buf.mad;
param = &work->cm_event.param.sidr_rep_rcvd;
param->status = sidr_rep_msg->status;
param->qkey = be32_to_cpu(sidr_rep_msg->qkey);
param->qpn = be32_to_cpu(cm_sidr_rep_get_qpn(sidr_rep_msg));
param->info = &sidr_rep_msg->info;
param->info_len = sidr_rep_msg->info_length;
work->cm_event.private_data = &sidr_rep_msg->private_data;
}
static int cm_sidr_rep_handler(struct cm_work *work)
{
struct cm_sidr_rep_msg *sidr_rep_msg;
struct cm_id_private *cm_id_priv;
sidr_rep_msg = (struct cm_sidr_rep_msg *)
work->mad_recv_wc->recv_buf.mad;
cm_id_priv = cm_acquire_id(sidr_rep_msg->request_id, 0);
if (!cm_id_priv)
return -EINVAL; /* Unmatched reply. */
spin_lock_irq(&cm_id_priv->lock);
if (cm_id_priv->id.state != IB_CM_SIDR_REQ_SENT) {
spin_unlock_irq(&cm_id_priv->lock);
goto out;
}
cm_id_priv->id.state = IB_CM_IDLE;
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
spin_unlock_irq(&cm_id_priv->lock);
cm_format_sidr_rep_event(work);
cm_process_work(cm_id_priv, work);
return 0;
out:
cm_deref_id(cm_id_priv);
return -EINVAL;
}
static void cm_process_send_error(struct ib_mad_send_buf *msg,
enum ib_wc_status wc_status)
{
struct cm_id_private *cm_id_priv;
struct ib_cm_event cm_event;
enum ib_cm_state state;
int ret;
memset(&cm_event, 0, sizeof cm_event);
cm_id_priv = msg->context[0];
/* Discard old sends or ones without a response. */
spin_lock_irq(&cm_id_priv->lock);
state = (enum ib_cm_state) (unsigned long) msg->context[1];
if (msg != cm_id_priv->msg || state != cm_id_priv->id.state)
goto discard;
switch (state) {
case IB_CM_REQ_SENT:
case IB_CM_MRA_REQ_RCVD:
cm_reset_to_idle(cm_id_priv);
cm_event.event = IB_CM_REQ_ERROR;
break;
case IB_CM_REP_SENT:
case IB_CM_MRA_REP_RCVD:
cm_reset_to_idle(cm_id_priv);
cm_event.event = IB_CM_REP_ERROR;
break;
case IB_CM_DREQ_SENT:
cm_enter_timewait(cm_id_priv);
cm_event.event = IB_CM_DREQ_ERROR;
break;
case IB_CM_SIDR_REQ_SENT:
cm_id_priv->id.state = IB_CM_IDLE;
cm_event.event = IB_CM_SIDR_REQ_ERROR;
break;
default:
goto discard;
}
spin_unlock_irq(&cm_id_priv->lock);
cm_event.param.send_status = wc_status;
/* No other events can occur on the cm_id at this point. */
ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &cm_event);
cm_free_msg(msg);
if (ret)
ib_destroy_cm_id(&cm_id_priv->id);
return;
discard:
spin_unlock_irq(&cm_id_priv->lock);
cm_free_msg(msg);
}
static void cm_send_handler(struct ib_mad_agent *mad_agent,
struct ib_mad_send_wc *mad_send_wc)
{
struct ib_mad_send_buf *msg = mad_send_wc->send_buf;
struct cm_port *port;
u16 attr_index;
port = mad_agent->context;
attr_index = be16_to_cpu(((struct ib_mad_hdr *)
msg->mad)->attr_id) - CM_ATTR_ID_OFFSET;
/*
* If the send was in response to a received message (context[0] is not
* set to a cm_id), and is not a REJ, then it is a send that was
* manually retried.
*/
if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
msg->retries = 1;
atomic_long_add(1 + msg->retries,
&port->counter_group[CM_XMIT].counter[attr_index]);
if (msg->retries)
atomic_long_add(msg->retries,
&port->counter_group[CM_XMIT_RETRIES].
counter[attr_index]);
switch (mad_send_wc->status) {
case IB_WC_SUCCESS:
case IB_WC_WR_FLUSH_ERR:
cm_free_msg(msg);
break;
default:
if (msg->context[0] && msg->context[1])
cm_process_send_error(msg, mad_send_wc->status);
else
cm_free_msg(msg);
break;
}
}
static void cm_work_handler(struct work_struct *_work)
{
struct cm_work *work = container_of(_work, struct cm_work, work.work);
int ret;
switch (work->cm_event.event) {
case IB_CM_REQ_RECEIVED:
ret = cm_req_handler(work);
break;
case IB_CM_MRA_RECEIVED:
ret = cm_mra_handler(work);
break;
case IB_CM_REJ_RECEIVED:
ret = cm_rej_handler(work);
break;
case IB_CM_REP_RECEIVED:
ret = cm_rep_handler(work);
break;
case IB_CM_RTU_RECEIVED:
ret = cm_rtu_handler(work);
break;
case IB_CM_USER_ESTABLISHED:
ret = cm_establish_handler(work);
break;
case IB_CM_DREQ_RECEIVED:
ret = cm_dreq_handler(work);
break;
case IB_CM_DREP_RECEIVED:
ret = cm_drep_handler(work);
break;
case IB_CM_SIDR_REQ_RECEIVED:
ret = cm_sidr_req_handler(work);
break;
case IB_CM_SIDR_REP_RECEIVED:
ret = cm_sidr_rep_handler(work);
break;
case IB_CM_LAP_RECEIVED:
ret = cm_lap_handler(work);
break;
case IB_CM_APR_RECEIVED:
ret = cm_apr_handler(work);
break;
case IB_CM_TIMEWAIT_EXIT:
ret = cm_timewait_handler(work);
break;
default:
ret = -EINVAL;
break;
}
if (ret)
cm_free_work(work);
}
static int cm_establish(struct ib_cm_id *cm_id)
{
struct cm_id_private *cm_id_priv;
struct cm_work *work;
unsigned long flags;
int ret = 0;
work = kmalloc(sizeof *work, GFP_ATOMIC);
if (!work)
return -ENOMEM;
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
spin_lock_irqsave(&cm_id_priv->lock, flags);
switch (cm_id->state)
{
case IB_CM_REP_SENT:
case IB_CM_MRA_REP_RCVD:
cm_id->state = IB_CM_ESTABLISHED;
break;
case IB_CM_ESTABLISHED:
ret = -EISCONN;
break;
default:
ret = -EINVAL;
break;
}
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
if (ret) {
kfree(work);
goto out;
}
/*
* The CM worker thread may try to destroy the cm_id before it
* can execute this work item. To prevent potential deadlock,
* we need to find the cm_id once we're in the context of the
* worker thread, rather than holding a reference on it.
*/
INIT_DELAYED_WORK(&work->work, cm_work_handler);
work->local_id = cm_id->local_id;
work->remote_id = cm_id->remote_id;
work->mad_recv_wc = NULL;
work->cm_event.event = IB_CM_USER_ESTABLISHED;
queue_delayed_work(cm.wq, &work->work, 0);
out:
return ret;
}
static int cm_migrate(struct ib_cm_id *cm_id)
{
struct cm_id_private *cm_id_priv;
unsigned long flags;
int ret = 0;
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
spin_lock_irqsave(&cm_id_priv->lock, flags);
if (cm_id->state == IB_CM_ESTABLISHED &&
(cm_id->lap_state == IB_CM_LAP_UNINIT ||
cm_id->lap_state == IB_CM_LAP_IDLE)) {
cm_id->lap_state = IB_CM_LAP_IDLE;
cm_id_priv->av = cm_id_priv->alt_av;
} else
ret = -EINVAL;
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
return ret;
}
int ib_cm_notify(struct ib_cm_id *cm_id, enum ib_event_type event)
{
int ret;
switch (event) {
case IB_EVENT_COMM_EST:
ret = cm_establish(cm_id);
break;
case IB_EVENT_PATH_MIG:
ret = cm_migrate(cm_id);
break;
default:
ret = -EINVAL;
}
return ret;
}
EXPORT_SYMBOL(ib_cm_notify);
static void cm_recv_handler(struct ib_mad_agent *mad_agent,
struct ib_mad_recv_wc *mad_recv_wc)
{
struct cm_port *port = mad_agent->context;
struct cm_work *work;
enum ib_cm_event_type event;
u16 attr_id;
int paths = 0;
switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) {
case CM_REQ_ATTR_ID:
paths = 1 + (((struct cm_req_msg *) mad_recv_wc->recv_buf.mad)->
alt_local_lid != 0);
event = IB_CM_REQ_RECEIVED;
break;
case CM_MRA_ATTR_ID:
event = IB_CM_MRA_RECEIVED;
break;
case CM_REJ_ATTR_ID:
event = IB_CM_REJ_RECEIVED;
break;
case CM_REP_ATTR_ID:
event = IB_CM_REP_RECEIVED;
break;
case CM_RTU_ATTR_ID:
event = IB_CM_RTU_RECEIVED;
break;
case CM_DREQ_ATTR_ID:
event = IB_CM_DREQ_RECEIVED;
break;
case CM_DREP_ATTR_ID:
event = IB_CM_DREP_RECEIVED;
break;
case CM_SIDR_REQ_ATTR_ID:
event = IB_CM_SIDR_REQ_RECEIVED;
break;
case CM_SIDR_REP_ATTR_ID:
event = IB_CM_SIDR_REP_RECEIVED;
break;
case CM_LAP_ATTR_ID:
paths = 1;
event = IB_CM_LAP_RECEIVED;
break;
case CM_APR_ATTR_ID:
event = IB_CM_APR_RECEIVED;
break;
default:
ib_free_recv_mad(mad_recv_wc);
return;
}
attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
atomic_long_inc(&port->counter_group[CM_RECV].
counter[attr_id - CM_ATTR_ID_OFFSET]);
work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
GFP_KERNEL);
if (!work) {
ib_free_recv_mad(mad_recv_wc);
return;
}
INIT_DELAYED_WORK(&work->work, cm_work_handler);
work->cm_event.event = event;
work->mad_recv_wc = mad_recv_wc;
work->port = port;
queue_delayed_work(cm.wq, &work->work, 0);
}
static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
struct ib_qp_attr *qp_attr,
int *qp_attr_mask)
{
unsigned long flags;
int ret;
spin_lock_irqsave(&cm_id_priv->lock, flags);
switch (cm_id_priv->id.state) {
case IB_CM_REQ_SENT:
case IB_CM_MRA_REQ_RCVD:
case IB_CM_REQ_RCVD:
case IB_CM_MRA_REQ_SENT:
case IB_CM_REP_RCVD:
case IB_CM_MRA_REP_SENT:
case IB_CM_REP_SENT:
case IB_CM_MRA_REP_RCVD:
case IB_CM_ESTABLISHED:
*qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS |
IB_QP_PKEY_INDEX | IB_QP_PORT;
qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE;
if (cm_id_priv->responder_resources)
qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ |
IB_ACCESS_REMOTE_ATOMIC;
qp_attr->pkey_index = cm_id_priv->av.pkey_index;
qp_attr->port_num = cm_id_priv->av.port->port_num;
ret = 0;
break;
default:
ret = -EINVAL;
break;
}
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
return ret;
}
static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
struct ib_qp_attr *qp_attr,
int *qp_attr_mask)
{
unsigned long flags;
int ret;
spin_lock_irqsave(&cm_id_priv->lock, flags);
switch (cm_id_priv->id.state) {
case IB_CM_REQ_RCVD:
case IB_CM_MRA_REQ_SENT:
case IB_CM_REP_RCVD:
case IB_CM_MRA_REP_SENT:
case IB_CM_REP_SENT:
case IB_CM_MRA_REP_RCVD:
case IB_CM_ESTABLISHED:
*qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU |
IB_QP_DEST_QPN | IB_QP_RQ_PSN;
qp_attr->ah_attr = cm_id_priv->av.ah_attr;
if (!cm_id_priv->av.valid) {
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
return -EINVAL;
}
if (cm_id_priv->av.ah_attr.vlan_id != 0xffff) {
qp_attr->vlan_id = cm_id_priv->av.ah_attr.vlan_id;
*qp_attr_mask |= IB_QP_VID;
}
if (!is_zero_ether_addr(cm_id_priv->av.smac)) {
memcpy(qp_attr->smac, cm_id_priv->av.smac,
sizeof(qp_attr->smac));
*qp_attr_mask |= IB_QP_SMAC;
}
if (cm_id_priv->alt_av.valid) {
if (cm_id_priv->alt_av.ah_attr.vlan_id != 0xffff) {
qp_attr->alt_vlan_id =
cm_id_priv->alt_av.ah_attr.vlan_id;
*qp_attr_mask |= IB_QP_ALT_VID;
}
if (!is_zero_ether_addr(cm_id_priv->alt_av.smac)) {
memcpy(qp_attr->alt_smac,
cm_id_priv->alt_av.smac,
sizeof(qp_attr->alt_smac));
*qp_attr_mask |= IB_QP_ALT_SMAC;
}
}
qp_attr->path_mtu = cm_id_priv->path_mtu;
qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn);
qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn);
if (cm_id_priv->qp_type == IB_QPT_RC ||
cm_id_priv->qp_type == IB_QPT_XRC_TGT) {
*qp_attr_mask |= IB_QP_MAX_DEST_RD_ATOMIC |
IB_QP_MIN_RNR_TIMER;
qp_attr->max_dest_rd_atomic =
cm_id_priv->responder_resources;
qp_attr->min_rnr_timer = 0;
}
if (cm_id_priv->alt_av.ah_attr.dlid) {
*qp_attr_mask |= IB_QP_ALT_PATH;
qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
qp_attr->alt_timeout = cm_id_priv->alt_av.timeout;
qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
}
ret = 0;
break;
default:
ret = -EINVAL;
break;
}
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
return ret;
}
static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
struct ib_qp_attr *qp_attr,
int *qp_attr_mask)
{
unsigned long flags;
int ret;
spin_lock_irqsave(&cm_id_priv->lock, flags);
switch (cm_id_priv->id.state) {
/* Allow transition to RTS before sending REP */
case IB_CM_REQ_RCVD:
case IB_CM_MRA_REQ_SENT:
case IB_CM_REP_RCVD:
case IB_CM_MRA_REP_SENT:
case IB_CM_REP_SENT:
case IB_CM_MRA_REP_RCVD:
case IB_CM_ESTABLISHED:
if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT) {
*qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN;
qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn);
switch (cm_id_priv->qp_type) {
case IB_QPT_RC:
case IB_QPT_XRC_INI:
*qp_attr_mask |= IB_QP_RETRY_CNT | IB_QP_RNR_RETRY |
IB_QP_MAX_QP_RD_ATOMIC;
qp_attr->retry_cnt = cm_id_priv->retry_count;
qp_attr->rnr_retry = cm_id_priv->rnr_retry_count;
qp_attr->max_rd_atomic = cm_id_priv->initiator_depth;
/* fall through */
case IB_QPT_XRC_TGT:
*qp_attr_mask |= IB_QP_TIMEOUT;
qp_attr->timeout = cm_id_priv->av.timeout;
break;
default:
break;
}
if (cm_id_priv->alt_av.ah_attr.dlid) {
*qp_attr_mask |= IB_QP_PATH_MIG_STATE;
qp_attr->path_mig_state = IB_MIG_REARM;
}
} else {
*qp_attr_mask = IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE;
qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
qp_attr->alt_timeout = cm_id_priv->alt_av.timeout;
qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
qp_attr->path_mig_state = IB_MIG_REARM;
}
ret = 0;
break;
default:
ret = -EINVAL;
break;
}
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
return ret;
}
int ib_cm_init_qp_attr(struct ib_cm_id *cm_id,
struct ib_qp_attr *qp_attr,
int *qp_attr_mask)
{
struct cm_id_private *cm_id_priv;
int ret;
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
switch (qp_attr->qp_state) {
case IB_QPS_INIT:
ret = cm_init_qp_init_attr(cm_id_priv, qp_attr, qp_attr_mask);
break;
case IB_QPS_RTR:
ret = cm_init_qp_rtr_attr(cm_id_priv, qp_attr, qp_attr_mask);
break;
case IB_QPS_RTS:
ret = cm_init_qp_rts_attr(cm_id_priv, qp_attr, qp_attr_mask);
break;
default:
ret = -EINVAL;
break;
}
return ret;
}
EXPORT_SYMBOL(ib_cm_init_qp_attr);
static void cm_get_ack_delay(struct cm_device *cm_dev)
{
struct ib_device_attr attr;
if (ib_query_device(cm_dev->ib_device, &attr))
cm_dev->ack_delay = 0; /* acks will rely on packet life time */
else
cm_dev->ack_delay = attr.local_ca_ack_delay;
}
static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
char *buf)
{
struct cm_counter_group *group;
struct cm_counter_attribute *cm_attr;
group = container_of(obj, struct cm_counter_group, obj);
cm_attr = container_of(attr, struct cm_counter_attribute, attr);
return sprintf(buf, "%ld\n",
atomic_long_read(&group->counter[cm_attr->index]));
}
static const struct sysfs_ops cm_counter_ops = {
.show = cm_show_counter
};
static struct kobj_type cm_counter_obj_type = {
.sysfs_ops = &cm_counter_ops,
.default_attrs = cm_counter_default_attrs
};
static void cm_release_port_obj(struct kobject *obj)
{
struct cm_port *cm_port;
cm_port = container_of(obj, struct cm_port, port_obj);
kfree(cm_port);
}
static struct kobj_type cm_port_obj_type = {
.release = cm_release_port_obj
};
static char *cm_devnode(struct device *dev, umode_t *mode)
{
if (mode)
*mode = 0666;
return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
}
struct class cm_class = {
.owner = THIS_MODULE,
.name = "infiniband_cm",
.devnode = cm_devnode,
};
EXPORT_SYMBOL(cm_class);
static int cm_create_port_fs(struct cm_port *port)
{
int i, ret;
ret = kobject_init_and_add(&port->port_obj, &cm_port_obj_type,
&port->cm_dev->device->kobj,
"%d", port->port_num);
if (ret) {
kfree(port);
return ret;
}
for (i = 0; i < CM_COUNTER_GROUPS; i++) {
ret = kobject_init_and_add(&port->counter_group[i].obj,
&cm_counter_obj_type,
&port->port_obj,
"%s", counter_group_names[i]);
if (ret)
goto error;
}
return 0;
error:
while (i--)
kobject_put(&port->counter_group[i].obj);
kobject_put(&port->port_obj);
return ret;
}
static void cm_remove_port_fs(struct cm_port *port)
{
int i;
for (i = 0; i < CM_COUNTER_GROUPS; i++)
kobject_put(&port->counter_group[i].obj);
kobject_put(&port->port_obj);
}
static void cm_add_one(struct ib_device *ib_device)
{
struct cm_device *cm_dev;
struct cm_port *port;
struct ib_mad_reg_req reg_req = {
.mgmt_class = IB_MGMT_CLASS_CM,
.mgmt_class_version = IB_CM_CLASS_VERSION
};
struct ib_port_modify port_modify = {
.set_port_cap_mask = IB_PORT_CM_SUP
};
unsigned long flags;
int ret;
u8 i;
if (rdma_node_get_transport(ib_device->node_type) != RDMA_TRANSPORT_IB)
return;
cm_dev = kzalloc(sizeof(*cm_dev) + sizeof(*port) *
ib_device->phys_port_cnt, GFP_KERNEL);
if (!cm_dev)
return;
cm_dev->ib_device = ib_device;
cm_get_ack_delay(cm_dev);
cm_dev->device = device_create(&cm_class, &ib_device->dev,
MKDEV(0, 0), NULL,
"%s", ib_device->name);
if (IS_ERR(cm_dev->device)) {
kfree(cm_dev);
return;
}
set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);
for (i = 1; i <= ib_device->phys_port_cnt; i++) {
port = kzalloc(sizeof *port, GFP_KERNEL);
if (!port)
goto error1;
cm_dev->port[i-1] = port;
port->cm_dev = cm_dev;
port->port_num = i;
ret = cm_create_port_fs(port);
if (ret)
goto error1;
port->mad_agent = ib_register_mad_agent(ib_device, i,
IB_QPT_GSI,
®_req,
0,
cm_send_handler,
cm_recv_handler,
port);
if (IS_ERR(port->mad_agent))
goto error2;
ret = ib_modify_port(ib_device, i, 0, &port_modify);
if (ret)
goto error3;
}
ib_set_client_data(ib_device, &cm_client, cm_dev);
write_lock_irqsave(&cm.device_lock, flags);
list_add_tail(&cm_dev->list, &cm.device_list);
write_unlock_irqrestore(&cm.device_lock, flags);
return;
error3:
ib_unregister_mad_agent(port->mad_agent);
error2:
cm_remove_port_fs(port);
error1:
port_modify.set_port_cap_mask = 0;
port_modify.clr_port_cap_mask = IB_PORT_CM_SUP;
while (--i) {
port = cm_dev->port[i-1];
ib_modify_port(ib_device, port->port_num, 0, &port_modify);
ib_unregister_mad_agent(port->mad_agent);
cm_remove_port_fs(port);
}
device_unregister(cm_dev->device);
kfree(cm_dev);
}
static void cm_remove_one(struct ib_device *ib_device)
{
struct cm_device *cm_dev;
struct cm_port *port;
struct ib_port_modify port_modify = {
.clr_port_cap_mask = IB_PORT_CM_SUP
};
unsigned long flags;
int i;
cm_dev = ib_get_client_data(ib_device, &cm_client);
if (!cm_dev)
return;
write_lock_irqsave(&cm.device_lock, flags);
list_del(&cm_dev->list);
write_unlock_irqrestore(&cm.device_lock, flags);
for (i = 1; i <= ib_device->phys_port_cnt; i++) {
port = cm_dev->port[i-1];
ib_modify_port(ib_device, port->port_num, 0, &port_modify);
ib_unregister_mad_agent(port->mad_agent);
flush_workqueue(cm.wq);
cm_remove_port_fs(port);
}
device_unregister(cm_dev->device);
kfree(cm_dev);
}
static int __init ib_cm_init(void)
{
int ret;
memset(&cm, 0, sizeof cm);
INIT_LIST_HEAD(&cm.device_list);
rwlock_init(&cm.device_lock);
spin_lock_init(&cm.lock);
cm.listen_service_table = RB_ROOT;
cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID);
cm.remote_id_table = RB_ROOT;
cm.remote_qp_table = RB_ROOT;
cm.remote_sidr_table = RB_ROOT;
idr_init(&cm.local_id_table);
get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand);
INIT_LIST_HEAD(&cm.timewait_list);
ret = class_register(&cm_class);
if (ret) {
ret = -ENOMEM;
goto error1;
}
cm.wq = create_workqueue("ib_cm");
if (!cm.wq) {
ret = -ENOMEM;
goto error2;
}
ret = ib_register_client(&cm_client);
if (ret)
goto error3;
return 0;
error3:
destroy_workqueue(cm.wq);
error2:
class_unregister(&cm_class);
error1:
idr_destroy(&cm.local_id_table);
return ret;
}
static void __exit ib_cm_cleanup(void)
{
struct cm_timewait_info *timewait_info, *tmp;
spin_lock_irq(&cm.lock);
list_for_each_entry(timewait_info, &cm.timewait_list, list)
cancel_delayed_work(&timewait_info->work.work);
spin_unlock_irq(&cm.lock);
ib_unregister_client(&cm_client);
destroy_workqueue(cm.wq);
list_for_each_entry_safe(timewait_info, tmp, &cm.timewait_list, list) {
list_del(&timewait_info->list);
kfree(timewait_info);
}
class_unregister(&cm_class);
idr_destroy(&cm.local_id_table);
}
module_init(ib_cm_init);
module_exit(ib_cm_cleanup);
| ./CrossVul/dataset_final_sorted/CWE-20/c/good_2117_0 |
crossvul-cpp_data_good_5823_0 | /*
* JPEG 2000 image decoder
* Copyright (c) 2007 Kamil Nowosad
* Copyright (c) 2013 Nicolas Bertrand <nicoinattendu@gmail.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* JPEG 2000 image decoder
*/
#include "libavutil/avassert.h"
#include "libavutil/common.h"
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
#include "avcodec.h"
#include "bytestream.h"
#include "internal.h"
#include "thread.h"
#include "jpeg2000.h"
#define JP2_SIG_TYPE 0x6A502020
#define JP2_SIG_VALUE 0x0D0A870A
#define JP2_CODESTREAM 0x6A703263
#define JP2_HEADER 0x6A703268
#define HAD_COC 0x01
#define HAD_QCC 0x02
typedef struct Jpeg2000TilePart {
uint8_t tile_index; // Tile index who refers the tile-part
const uint8_t *tp_end;
GetByteContext tpg; // bit stream in tile-part
} Jpeg2000TilePart;
/* RMK: For JPEG2000 DCINEMA 3 tile-parts in a tile
* one per component, so tile_part elements have a size of 3 */
typedef struct Jpeg2000Tile {
Jpeg2000Component *comp;
uint8_t properties[4];
Jpeg2000CodingStyle codsty[4];
Jpeg2000QuantStyle qntsty[4];
Jpeg2000TilePart tile_part[4];
uint16_t tp_idx; // Tile-part index
} Jpeg2000Tile;
typedef struct Jpeg2000DecoderContext {
AVClass *class;
AVCodecContext *avctx;
GetByteContext g;
int width, height;
int image_offset_x, image_offset_y;
int tile_offset_x, tile_offset_y;
uint8_t cbps[4]; // bits per sample in particular components
uint8_t sgnd[4]; // if a component is signed
uint8_t properties[4];
int cdx[4], cdy[4];
int precision;
int ncomponents;
int colour_space;
uint32_t palette[256];
int8_t pal8;
int cdef[4];
int tile_width, tile_height;
unsigned numXtiles, numYtiles;
int maxtilelen;
Jpeg2000CodingStyle codsty[4];
Jpeg2000QuantStyle qntsty[4];
int bit_index;
int curtileno;
Jpeg2000Tile *tile;
/*options parameters*/
int reduction_factor;
} Jpeg2000DecoderContext;
/* get_bits functions for JPEG2000 packet bitstream
* It is a get_bit function with a bit-stuffing routine. If the value of the
* byte is 0xFF, the next byte includes an extra zero bit stuffed into the MSB.
* cf. ISO-15444-1:2002 / B.10.1 Bit-stuffing routine */
static int get_bits(Jpeg2000DecoderContext *s, int n)
{
int res = 0;
while (--n >= 0) {
res <<= 1;
if (s->bit_index == 0) {
s->bit_index = 7 + (bytestream2_get_byte(&s->g) != 0xFFu);
}
s->bit_index--;
res |= (bytestream2_peek_byte(&s->g) >> s->bit_index) & 1;
}
return res;
}
static void jpeg2000_flush(Jpeg2000DecoderContext *s)
{
if (bytestream2_get_byte(&s->g) == 0xff)
bytestream2_skip(&s->g, 1);
s->bit_index = 8;
}
/* decode the value stored in node */
static int tag_tree_decode(Jpeg2000DecoderContext *s, Jpeg2000TgtNode *node,
int threshold)
{
Jpeg2000TgtNode *stack[30];
int sp = -1, curval = 0;
if (!node)
return AVERROR_INVALIDDATA;
while (node && !node->vis) {
stack[++sp] = node;
node = node->parent;
}
if (node)
curval = node->val;
else
curval = stack[sp]->val;
while (curval < threshold && sp >= 0) {
if (curval < stack[sp]->val)
curval = stack[sp]->val;
while (curval < threshold) {
int ret;
if ((ret = get_bits(s, 1)) > 0) {
stack[sp]->vis++;
break;
} else if (!ret)
curval++;
else
return ret;
}
stack[sp]->val = curval;
sp--;
}
return curval;
}
static int pix_fmt_match(enum AVPixelFormat pix_fmt, int components,
int bpc, uint32_t log2_chroma_wh, int pal8)
{
int match = 1;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
if (desc->nb_components != components) {
return 0;
}
switch (components) {
case 4:
match = match && desc->comp[3].depth_minus1 + 1 >= bpc &&
(log2_chroma_wh >> 14 & 3) == 0 &&
(log2_chroma_wh >> 12 & 3) == 0;
case 3:
match = match && desc->comp[2].depth_minus1 + 1 >= bpc &&
(log2_chroma_wh >> 10 & 3) == desc->log2_chroma_w &&
(log2_chroma_wh >> 8 & 3) == desc->log2_chroma_h;
case 2:
match = match && desc->comp[1].depth_minus1 + 1 >= bpc &&
(log2_chroma_wh >> 6 & 3) == desc->log2_chroma_w &&
(log2_chroma_wh >> 4 & 3) == desc->log2_chroma_h;
case 1:
match = match && desc->comp[0].depth_minus1 + 1 >= bpc &&
(log2_chroma_wh >> 2 & 3) == 0 &&
(log2_chroma_wh & 3) == 0 &&
(desc->flags & AV_PIX_FMT_FLAG_PAL) == pal8 * AV_PIX_FMT_FLAG_PAL;
}
return match;
}
// pix_fmts with lower bpp have to be listed before
// similar pix_fmts with higher bpp.
#define RGB_PIXEL_FORMATS AV_PIX_FMT_PAL8,AV_PIX_FMT_RGB24,AV_PIX_FMT_RGBA,AV_PIX_FMT_RGB48,AV_PIX_FMT_RGBA64
#define GRAY_PIXEL_FORMATS AV_PIX_FMT_GRAY8,AV_PIX_FMT_GRAY8A,AV_PIX_FMT_GRAY16
#define YUV_PIXEL_FORMATS AV_PIX_FMT_YUV410P,AV_PIX_FMT_YUV411P,AV_PIX_FMT_YUVA420P, \
AV_PIX_FMT_YUV420P,AV_PIX_FMT_YUV422P,AV_PIX_FMT_YUVA422P, \
AV_PIX_FMT_YUV440P,AV_PIX_FMT_YUV444P,AV_PIX_FMT_YUVA444P, \
AV_PIX_FMT_YUV420P9,AV_PIX_FMT_YUV422P9,AV_PIX_FMT_YUV444P9, \
AV_PIX_FMT_YUVA420P9,AV_PIX_FMT_YUVA422P9,AV_PIX_FMT_YUVA444P9, \
AV_PIX_FMT_YUV420P10,AV_PIX_FMT_YUV422P10,AV_PIX_FMT_YUV444P10, \
AV_PIX_FMT_YUVA420P10,AV_PIX_FMT_YUVA422P10,AV_PIX_FMT_YUVA444P10, \
AV_PIX_FMT_YUV420P12,AV_PIX_FMT_YUV422P12,AV_PIX_FMT_YUV444P12, \
AV_PIX_FMT_YUV420P14,AV_PIX_FMT_YUV422P14,AV_PIX_FMT_YUV444P14, \
AV_PIX_FMT_YUV420P16,AV_PIX_FMT_YUV422P16,AV_PIX_FMT_YUV444P16, \
AV_PIX_FMT_YUVA420P16,AV_PIX_FMT_YUVA422P16,AV_PIX_FMT_YUVA444P16
#define XYZ_PIXEL_FORMATS AV_PIX_FMT_XYZ12
static const enum AVPixelFormat rgb_pix_fmts[] = {RGB_PIXEL_FORMATS};
static const enum AVPixelFormat gray_pix_fmts[] = {GRAY_PIXEL_FORMATS};
static const enum AVPixelFormat yuv_pix_fmts[] = {YUV_PIXEL_FORMATS};
static const enum AVPixelFormat xyz_pix_fmts[] = {XYZ_PIXEL_FORMATS};
static const enum AVPixelFormat all_pix_fmts[] = {RGB_PIXEL_FORMATS,
GRAY_PIXEL_FORMATS,
YUV_PIXEL_FORMATS,
XYZ_PIXEL_FORMATS};
/* marker segments */
/* get sizes and offsets of image, tiles; number of components */
static int get_siz(Jpeg2000DecoderContext *s)
{
int i;
int ncomponents;
uint32_t log2_chroma_wh = 0;
const enum AVPixelFormat *possible_fmts = NULL;
int possible_fmts_nb = 0;
if (bytestream2_get_bytes_left(&s->g) < 36)
return AVERROR_INVALIDDATA;
s->avctx->profile = bytestream2_get_be16u(&s->g); // Rsiz
s->width = bytestream2_get_be32u(&s->g); // Width
s->height = bytestream2_get_be32u(&s->g); // Height
s->image_offset_x = bytestream2_get_be32u(&s->g); // X0Siz
s->image_offset_y = bytestream2_get_be32u(&s->g); // Y0Siz
s->tile_width = bytestream2_get_be32u(&s->g); // XTSiz
s->tile_height = bytestream2_get_be32u(&s->g); // YTSiz
s->tile_offset_x = bytestream2_get_be32u(&s->g); // XT0Siz
s->tile_offset_y = bytestream2_get_be32u(&s->g); // YT0Siz
ncomponents = bytestream2_get_be16u(&s->g); // CSiz
if (ncomponents <= 0) {
av_log(s->avctx, AV_LOG_ERROR, "Invalid number of components: %d\n",
s->ncomponents);
return AVERROR_INVALIDDATA;
}
if (ncomponents > 4) {
avpriv_request_sample(s->avctx, "Support for %d components",
s->ncomponents);
return AVERROR_PATCHWELCOME;
}
s->ncomponents = ncomponents;
if (s->tile_width <= 0 || s->tile_height <= 0) {
av_log(s->avctx, AV_LOG_ERROR, "Invalid tile dimension %dx%d.\n",
s->tile_width, s->tile_height);
return AVERROR_INVALIDDATA;
}
if (bytestream2_get_bytes_left(&s->g) < 3 * s->ncomponents)
return AVERROR_INVALIDDATA;
for (i = 0; i < s->ncomponents; i++) { // Ssiz_i XRsiz_i, YRsiz_i
uint8_t x = bytestream2_get_byteu(&s->g);
s->cbps[i] = (x & 0x7f) + 1;
s->precision = FFMAX(s->cbps[i], s->precision);
s->sgnd[i] = !!(x & 0x80);
s->cdx[i] = bytestream2_get_byteu(&s->g);
s->cdy[i] = bytestream2_get_byteu(&s->g);
if ( !s->cdx[i] || s->cdx[i] == 3 || s->cdx[i] > 4
|| !s->cdy[i] || s->cdy[i] == 3 || s->cdy[i] > 4) {
av_log(s->avctx, AV_LOG_ERROR, "Invalid sample separation %d/%d\n", s->cdx[i], s->cdy[i]);
return AVERROR_INVALIDDATA;
}
log2_chroma_wh |= s->cdy[i] >> 1 << i * 4 | s->cdx[i] >> 1 << i * 4 + 2;
}
s->numXtiles = ff_jpeg2000_ceildiv(s->width - s->tile_offset_x, s->tile_width);
s->numYtiles = ff_jpeg2000_ceildiv(s->height - s->tile_offset_y, s->tile_height);
if (s->numXtiles * (uint64_t)s->numYtiles > INT_MAX/sizeof(*s->tile)) {
s->numXtiles = s->numYtiles = 0;
return AVERROR(EINVAL);
}
s->tile = av_mallocz_array(s->numXtiles * s->numYtiles, sizeof(*s->tile));
if (!s->tile) {
s->numXtiles = s->numYtiles = 0;
return AVERROR(ENOMEM);
}
for (i = 0; i < s->numXtiles * s->numYtiles; i++) {
Jpeg2000Tile *tile = s->tile + i;
tile->comp = av_mallocz(s->ncomponents * sizeof(*tile->comp));
if (!tile->comp)
return AVERROR(ENOMEM);
}
/* compute image size with reduction factor */
s->avctx->width = ff_jpeg2000_ceildivpow2(s->width - s->image_offset_x,
s->reduction_factor);
s->avctx->height = ff_jpeg2000_ceildivpow2(s->height - s->image_offset_y,
s->reduction_factor);
if (s->avctx->profile == FF_PROFILE_JPEG2000_DCINEMA_2K ||
s->avctx->profile == FF_PROFILE_JPEG2000_DCINEMA_4K) {
possible_fmts = xyz_pix_fmts;
possible_fmts_nb = FF_ARRAY_ELEMS(xyz_pix_fmts);
} else {
switch (s->colour_space) {
case 16:
possible_fmts = rgb_pix_fmts;
possible_fmts_nb = FF_ARRAY_ELEMS(rgb_pix_fmts);
break;
case 17:
possible_fmts = gray_pix_fmts;
possible_fmts_nb = FF_ARRAY_ELEMS(gray_pix_fmts);
break;
case 18:
possible_fmts = yuv_pix_fmts;
possible_fmts_nb = FF_ARRAY_ELEMS(yuv_pix_fmts);
break;
default:
possible_fmts = all_pix_fmts;
possible_fmts_nb = FF_ARRAY_ELEMS(all_pix_fmts);
break;
}
}
for (i = 0; i < possible_fmts_nb; ++i) {
if (pix_fmt_match(possible_fmts[i], ncomponents, s->precision, log2_chroma_wh, s->pal8)) {
s->avctx->pix_fmt = possible_fmts[i];
break;
}
}
if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
av_log(s->avctx, AV_LOG_ERROR,
"Unknown pix_fmt, profile: %d, colour_space: %d, "
"components: %d, precision: %d, "
"cdx[1]: %d, cdy[1]: %d, cdx[2]: %d, cdy[2]: %d\n",
s->avctx->profile, s->colour_space, ncomponents, s->precision,
ncomponents > 2 ? s->cdx[1] : 0,
ncomponents > 2 ? s->cdy[1] : 0,
ncomponents > 2 ? s->cdx[2] : 0,
ncomponents > 2 ? s->cdy[2] : 0);
}
s->avctx->bits_per_raw_sample = s->precision;
return 0;
}
/* get common part for COD and COC segments */
static int get_cox(Jpeg2000DecoderContext *s, Jpeg2000CodingStyle *c)
{
uint8_t byte;
if (bytestream2_get_bytes_left(&s->g) < 5)
return AVERROR_INVALIDDATA;
/* nreslevels = number of resolution levels
= number of decomposition level +1 */
c->nreslevels = bytestream2_get_byteu(&s->g) + 1;
if (c->nreslevels >= JPEG2000_MAX_RESLEVELS) {
av_log(s->avctx, AV_LOG_ERROR, "nreslevels %d is invalid\n", c->nreslevels);
return AVERROR_INVALIDDATA;
}
if (c->nreslevels <= s->reduction_factor) {
/* we are forced to update reduction_factor as its requested value is
not compatible with this bitstream, and as we might have used it
already in setup earlier we have to fail this frame until
reinitialization is implemented */
av_log(s->avctx, AV_LOG_ERROR, "reduction_factor too large for this bitstream, max is %d\n", c->nreslevels - 1);
s->reduction_factor = c->nreslevels - 1;
return AVERROR(EINVAL);
}
/* compute number of resolution levels to decode */
c->nreslevels2decode = c->nreslevels - s->reduction_factor;
c->log2_cblk_width = (bytestream2_get_byteu(&s->g) & 15) + 2; // cblk width
c->log2_cblk_height = (bytestream2_get_byteu(&s->g) & 15) + 2; // cblk height
if (c->log2_cblk_width > 10 || c->log2_cblk_height > 10 ||
c->log2_cblk_width + c->log2_cblk_height > 12) {
av_log(s->avctx, AV_LOG_ERROR, "cblk size invalid\n");
return AVERROR_INVALIDDATA;
}
if (c->log2_cblk_width > 6 || c->log2_cblk_height > 6) {
avpriv_request_sample(s->avctx, "cblk size > 64");
return AVERROR_PATCHWELCOME;
}
c->cblk_style = bytestream2_get_byteu(&s->g);
if (c->cblk_style != 0) { // cblk style
av_log(s->avctx, AV_LOG_WARNING, "extra cblk styles %X\n", c->cblk_style);
}
c->transform = bytestream2_get_byteu(&s->g); // DWT transformation type
/* set integer 9/7 DWT in case of BITEXACT flag */
if ((s->avctx->flags & CODEC_FLAG_BITEXACT) && (c->transform == FF_DWT97))
c->transform = FF_DWT97_INT;
if (c->csty & JPEG2000_CSTY_PREC) {
int i;
for (i = 0; i < c->nreslevels; i++) {
byte = bytestream2_get_byte(&s->g);
c->log2_prec_widths[i] = byte & 0x0F; // precinct PPx
c->log2_prec_heights[i] = (byte >> 4) & 0x0F; // precinct PPy
}
} else {
memset(c->log2_prec_widths , 15, sizeof(c->log2_prec_widths ));
memset(c->log2_prec_heights, 15, sizeof(c->log2_prec_heights));
}
return 0;
}
/* get coding parameters for a particular tile or whole image*/
static int get_cod(Jpeg2000DecoderContext *s, Jpeg2000CodingStyle *c,
uint8_t *properties)
{
Jpeg2000CodingStyle tmp;
int compno, ret;
if (bytestream2_get_bytes_left(&s->g) < 5)
return AVERROR_INVALIDDATA;
tmp.csty = bytestream2_get_byteu(&s->g);
// get progression order
tmp.prog_order = bytestream2_get_byteu(&s->g);
tmp.nlayers = bytestream2_get_be16u(&s->g);
tmp.mct = bytestream2_get_byteu(&s->g); // multiple component transformation
if (tmp.mct && s->ncomponents < 3) {
av_log(s->avctx, AV_LOG_ERROR,
"MCT %d with too few components (%d)\n",
tmp.mct, s->ncomponents);
return AVERROR_INVALIDDATA;
}
if ((ret = get_cox(s, &tmp)) < 0)
return ret;
for (compno = 0; compno < s->ncomponents; compno++)
if (!(properties[compno] & HAD_COC))
memcpy(c + compno, &tmp, sizeof(tmp));
return 0;
}
/* Get coding parameters for a component in the whole image or a
* particular tile. */
static int get_coc(Jpeg2000DecoderContext *s, Jpeg2000CodingStyle *c,
uint8_t *properties)
{
int compno, ret;
if (bytestream2_get_bytes_left(&s->g) < 2)
return AVERROR_INVALIDDATA;
compno = bytestream2_get_byteu(&s->g);
if (compno >= s->ncomponents) {
av_log(s->avctx, AV_LOG_ERROR,
"Invalid compno %d. There are %d components in the image.\n",
compno, s->ncomponents);
return AVERROR_INVALIDDATA;
}
c += compno;
c->csty = bytestream2_get_byteu(&s->g);
if ((ret = get_cox(s, c)) < 0)
return ret;
properties[compno] |= HAD_COC;
return 0;
}
/* Get common part for QCD and QCC segments. */
static int get_qcx(Jpeg2000DecoderContext *s, int n, Jpeg2000QuantStyle *q)
{
int i, x;
if (bytestream2_get_bytes_left(&s->g) < 1)
return AVERROR_INVALIDDATA;
x = bytestream2_get_byteu(&s->g); // Sqcd
q->nguardbits = x >> 5;
q->quantsty = x & 0x1f;
if (q->quantsty == JPEG2000_QSTY_NONE) {
n -= 3;
if (bytestream2_get_bytes_left(&s->g) < n ||
n > JPEG2000_MAX_DECLEVELS*3)
return AVERROR_INVALIDDATA;
for (i = 0; i < n; i++)
q->expn[i] = bytestream2_get_byteu(&s->g) >> 3;
} else if (q->quantsty == JPEG2000_QSTY_SI) {
if (bytestream2_get_bytes_left(&s->g) < 2)
return AVERROR_INVALIDDATA;
x = bytestream2_get_be16u(&s->g);
q->expn[0] = x >> 11;
q->mant[0] = x & 0x7ff;
for (i = 1; i < JPEG2000_MAX_DECLEVELS * 3; i++) {
int curexpn = FFMAX(0, q->expn[0] - (i - 1) / 3);
q->expn[i] = curexpn;
q->mant[i] = q->mant[0];
}
} else {
n = (n - 3) >> 1;
if (bytestream2_get_bytes_left(&s->g) < 2 * n ||
n > JPEG2000_MAX_DECLEVELS*3)
return AVERROR_INVALIDDATA;
for (i = 0; i < n; i++) {
x = bytestream2_get_be16u(&s->g);
q->expn[i] = x >> 11;
q->mant[i] = x & 0x7ff;
}
}
return 0;
}
/* Get quantization parameters for a particular tile or a whole image. */
static int get_qcd(Jpeg2000DecoderContext *s, int n, Jpeg2000QuantStyle *q,
uint8_t *properties)
{
Jpeg2000QuantStyle tmp;
int compno, ret;
if ((ret = get_qcx(s, n, &tmp)) < 0)
return ret;
for (compno = 0; compno < s->ncomponents; compno++)
if (!(properties[compno] & HAD_QCC))
memcpy(q + compno, &tmp, sizeof(tmp));
return 0;
}
/* Get quantization parameters for a component in the whole image
* on in a particular tile. */
static int get_qcc(Jpeg2000DecoderContext *s, int n, Jpeg2000QuantStyle *q,
uint8_t *properties)
{
int compno;
if (bytestream2_get_bytes_left(&s->g) < 1)
return AVERROR_INVALIDDATA;
compno = bytestream2_get_byteu(&s->g);
if (compno >= s->ncomponents) {
av_log(s->avctx, AV_LOG_ERROR,
"Invalid compno %d. There are %d components in the image.\n",
compno, s->ncomponents);
return AVERROR_INVALIDDATA;
}
properties[compno] |= HAD_QCC;
return get_qcx(s, n - 1, q + compno);
}
/* Get start of tile segment. */
static int get_sot(Jpeg2000DecoderContext *s, int n)
{
Jpeg2000TilePart *tp;
uint16_t Isot;
uint32_t Psot;
uint8_t TPsot;
if (bytestream2_get_bytes_left(&s->g) < 8)
return AVERROR_INVALIDDATA;
s->curtileno = 0;
Isot = bytestream2_get_be16u(&s->g); // Isot
if (Isot >= s->numXtiles * s->numYtiles)
return AVERROR_INVALIDDATA;
s->curtileno = Isot;
Psot = bytestream2_get_be32u(&s->g); // Psot
TPsot = bytestream2_get_byteu(&s->g); // TPsot
/* Read TNSot but not used */
bytestream2_get_byteu(&s->g); // TNsot
if (Psot > bytestream2_get_bytes_left(&s->g) + n + 2) {
av_log(s->avctx, AV_LOG_ERROR, "Psot %d too big\n", Psot);
return AVERROR_INVALIDDATA;
}
if (TPsot >= FF_ARRAY_ELEMS(s->tile[Isot].tile_part)) {
avpriv_request_sample(s->avctx, "Support for %d components", TPsot);
return AVERROR_PATCHWELCOME;
}
s->tile[Isot].tp_idx = TPsot;
tp = s->tile[Isot].tile_part + TPsot;
tp->tile_index = Isot;
tp->tp_end = s->g.buffer + Psot - n - 2;
if (!TPsot) {
Jpeg2000Tile *tile = s->tile + s->curtileno;
/* copy defaults */
memcpy(tile->codsty, s->codsty, s->ncomponents * sizeof(Jpeg2000CodingStyle));
memcpy(tile->qntsty, s->qntsty, s->ncomponents * sizeof(Jpeg2000QuantStyle));
}
return 0;
}
/* Tile-part lengths: see ISO 15444-1:2002, section A.7.1
* Used to know the number of tile parts and lengths.
* There may be multiple TLMs in the header.
* TODO: The function is not used for tile-parts management, nor anywhere else.
* It can be useful to allocate memory for tile parts, before managing the SOT
* markers. Parsing the TLM header is needed to increment the input header
* buffer.
* This marker is mandatory for DCI. */
static uint8_t get_tlm(Jpeg2000DecoderContext *s, int n)
{
uint8_t Stlm, ST, SP, tile_tlm, i;
bytestream2_get_byte(&s->g); /* Ztlm: skipped */
Stlm = bytestream2_get_byte(&s->g);
// too complex ? ST = ((Stlm >> 4) & 0x01) + ((Stlm >> 4) & 0x02);
ST = (Stlm >> 4) & 0x03;
// TODO: Manage case of ST = 0b11 --> raise error
SP = (Stlm >> 6) & 0x01;
tile_tlm = (n - 4) / ((SP + 1) * 2 + ST);
for (i = 0; i < tile_tlm; i++) {
switch (ST) {
case 0:
break;
case 1:
bytestream2_get_byte(&s->g);
break;
case 2:
bytestream2_get_be16(&s->g);
break;
case 3:
bytestream2_get_be32(&s->g);
break;
}
if (SP == 0) {
bytestream2_get_be16(&s->g);
} else {
bytestream2_get_be32(&s->g);
}
}
return 0;
}
static int init_tile(Jpeg2000DecoderContext *s, int tileno)
{
int compno;
int tilex = tileno % s->numXtiles;
int tiley = tileno / s->numXtiles;
Jpeg2000Tile *tile = s->tile + tileno;
if (!tile->comp)
return AVERROR(ENOMEM);
for (compno = 0; compno < s->ncomponents; compno++) {
Jpeg2000Component *comp = tile->comp + compno;
Jpeg2000CodingStyle *codsty = tile->codsty + compno;
Jpeg2000QuantStyle *qntsty = tile->qntsty + compno;
int ret; // global bandno
comp->coord_o[0][0] = FFMAX(tilex * s->tile_width + s->tile_offset_x, s->image_offset_x);
comp->coord_o[0][1] = FFMIN((tilex + 1) * s->tile_width + s->tile_offset_x, s->width);
comp->coord_o[1][0] = FFMAX(tiley * s->tile_height + s->tile_offset_y, s->image_offset_y);
comp->coord_o[1][1] = FFMIN((tiley + 1) * s->tile_height + s->tile_offset_y, s->height);
comp->coord[0][0] = ff_jpeg2000_ceildivpow2(comp->coord_o[0][0], s->reduction_factor);
comp->coord[0][1] = ff_jpeg2000_ceildivpow2(comp->coord_o[0][1], s->reduction_factor);
comp->coord[1][0] = ff_jpeg2000_ceildivpow2(comp->coord_o[1][0], s->reduction_factor);
comp->coord[1][1] = ff_jpeg2000_ceildivpow2(comp->coord_o[1][1], s->reduction_factor);
if (ret = ff_jpeg2000_init_component(comp, codsty, qntsty,
s->cbps[compno], s->cdx[compno],
s->cdy[compno], s->avctx))
return ret;
}
return 0;
}
/* Read the number of coding passes. */
static int getnpasses(Jpeg2000DecoderContext *s)
{
int num;
if (!get_bits(s, 1))
return 1;
if (!get_bits(s, 1))
return 2;
if ((num = get_bits(s, 2)) != 3)
return num < 0 ? num : 3 + num;
if ((num = get_bits(s, 5)) != 31)
return num < 0 ? num : 6 + num;
num = get_bits(s, 7);
return num < 0 ? num : 37 + num;
}
static int getlblockinc(Jpeg2000DecoderContext *s)
{
int res = 0, ret;
while (ret = get_bits(s, 1)) {
if (ret < 0)
return ret;
res++;
}
return res;
}
static int jpeg2000_decode_packet(Jpeg2000DecoderContext *s,
Jpeg2000CodingStyle *codsty,
Jpeg2000ResLevel *rlevel, int precno,
int layno, uint8_t *expn, int numgbits)
{
int bandno, cblkno, ret, nb_code_blocks;
if (!(ret = get_bits(s, 1))) {
jpeg2000_flush(s);
return 0;
} else if (ret < 0)
return ret;
for (bandno = 0; bandno < rlevel->nbands; bandno++) {
Jpeg2000Band *band = rlevel->band + bandno;
Jpeg2000Prec *prec = band->prec + precno;
if (band->coord[0][0] == band->coord[0][1] ||
band->coord[1][0] == band->coord[1][1])
continue;
nb_code_blocks = prec->nb_codeblocks_height *
prec->nb_codeblocks_width;
for (cblkno = 0; cblkno < nb_code_blocks; cblkno++) {
Jpeg2000Cblk *cblk = prec->cblk + cblkno;
int incl, newpasses, llen;
if (cblk->npasses)
incl = get_bits(s, 1);
else
incl = tag_tree_decode(s, prec->cblkincl + cblkno, layno + 1) == layno;
if (!incl)
continue;
else if (incl < 0)
return incl;
if (!cblk->npasses) {
int v = expn[bandno] + numgbits - 1 -
tag_tree_decode(s, prec->zerobits + cblkno, 100);
if (v < 0) {
av_log(s->avctx, AV_LOG_ERROR,
"nonzerobits %d invalid\n", v);
return AVERROR_INVALIDDATA;
}
cblk->nonzerobits = v;
}
if ((newpasses = getnpasses(s)) < 0)
return newpasses;
if ((llen = getlblockinc(s)) < 0)
return llen;
cblk->lblock += llen;
if ((ret = get_bits(s, av_log2(newpasses) + cblk->lblock)) < 0)
return ret;
if (ret > sizeof(cblk->data)) {
avpriv_request_sample(s->avctx,
"Block with lengthinc greater than %zu",
sizeof(cblk->data));
return AVERROR_PATCHWELCOME;
}
cblk->lengthinc = ret;
cblk->npasses += newpasses;
}
}
jpeg2000_flush(s);
if (codsty->csty & JPEG2000_CSTY_EPH) {
if (bytestream2_peek_be16(&s->g) == JPEG2000_EPH)
bytestream2_skip(&s->g, 2);
else
av_log(s->avctx, AV_LOG_ERROR, "EPH marker not found.\n");
}
for (bandno = 0; bandno < rlevel->nbands; bandno++) {
Jpeg2000Band *band = rlevel->band + bandno;
Jpeg2000Prec *prec = band->prec + precno;
nb_code_blocks = prec->nb_codeblocks_height * prec->nb_codeblocks_width;
for (cblkno = 0; cblkno < nb_code_blocks; cblkno++) {
Jpeg2000Cblk *cblk = prec->cblk + cblkno;
if ( bytestream2_get_bytes_left(&s->g) < cblk->lengthinc
|| sizeof(cblk->data) < cblk->length + cblk->lengthinc + 2
) {
av_log(s->avctx, AV_LOG_ERROR,
"Block length %d or lengthinc %d is too large\n",
cblk->length, cblk->lengthinc);
return AVERROR_INVALIDDATA;
}
bytestream2_get_bufferu(&s->g, cblk->data + cblk->length, cblk->lengthinc);
cblk->length += cblk->lengthinc;
cblk->lengthinc = 0;
}
}
return 0;
}
static int jpeg2000_decode_packets(Jpeg2000DecoderContext *s, Jpeg2000Tile *tile)
{
int ret = 0;
int layno, reslevelno, compno, precno, ok_reslevel;
int x, y;
s->bit_index = 8;
switch (tile->codsty[0].prog_order) {
case JPEG2000_PGOD_RLCP:
avpriv_request_sample(s->avctx, "Progression order RLCP");
case JPEG2000_PGOD_LRCP:
for (layno = 0; layno < tile->codsty[0].nlayers; layno++) {
ok_reslevel = 1;
for (reslevelno = 0; ok_reslevel; reslevelno++) {
ok_reslevel = 0;
for (compno = 0; compno < s->ncomponents; compno++) {
Jpeg2000CodingStyle *codsty = tile->codsty + compno;
Jpeg2000QuantStyle *qntsty = tile->qntsty + compno;
if (reslevelno < codsty->nreslevels) {
Jpeg2000ResLevel *rlevel = tile->comp[compno].reslevel +
reslevelno;
ok_reslevel = 1;
for (precno = 0; precno < rlevel->num_precincts_x * rlevel->num_precincts_y; precno++)
if ((ret = jpeg2000_decode_packet(s,
codsty, rlevel,
precno, layno,
qntsty->expn + (reslevelno ? 3 * (reslevelno - 1) + 1 : 0),
qntsty->nguardbits)) < 0)
return ret;
}
}
}
}
break;
case JPEG2000_PGOD_CPRL:
for (compno = 0; compno < s->ncomponents; compno++) {
Jpeg2000CodingStyle *codsty = tile->codsty + compno;
Jpeg2000QuantStyle *qntsty = tile->qntsty + compno;
/* Set bit stream buffer address according to tile-part.
* For DCinema one tile-part per component, so can be
* indexed by component. */
s->g = tile->tile_part[compno].tpg;
/* Position loop (y axis)
* TODO: Automate computing of step 256.
* Fixed here, but to be computed before entering here. */
for (y = 0; y < s->height; y += 256) {
/* Position loop (y axis)
* TODO: automate computing of step 256.
* Fixed here, but to be computed before entering here. */
for (x = 0; x < s->width; x += 256) {
for (reslevelno = 0; reslevelno < codsty->nreslevels; reslevelno++) {
uint16_t prcx, prcy;
uint8_t reducedresno = codsty->nreslevels - 1 -reslevelno; // ==> N_L - r
Jpeg2000ResLevel *rlevel = tile->comp[compno].reslevel + reslevelno;
if (!((y % (1 << (rlevel->log2_prec_height + reducedresno)) == 0) ||
(y == 0))) // TODO: 2nd condition simplified as try0 always =0 for dcinema
continue;
if (!((x % (1 << (rlevel->log2_prec_width + reducedresno)) == 0) ||
(x == 0))) // TODO: 2nd condition simplified as try0 always =0 for dcinema
continue;
// check if a precinct exists
prcx = ff_jpeg2000_ceildivpow2(x, reducedresno) >> rlevel->log2_prec_width;
prcy = ff_jpeg2000_ceildivpow2(y, reducedresno) >> rlevel->log2_prec_height;
precno = prcx + rlevel->num_precincts_x * prcy;
for (layno = 0; layno < tile->codsty[0].nlayers; layno++) {
if ((ret = jpeg2000_decode_packet(s, codsty, rlevel,
precno, layno,
qntsty->expn + (reslevelno ? 3 * (reslevelno - 1) + 1 : 0),
qntsty->nguardbits)) < 0)
return ret;
}
}
}
}
}
break;
case JPEG2000_PGOD_RPCL:
avpriv_request_sample(s->avctx, "Progression order RPCL");
ret = AVERROR_PATCHWELCOME;
break;
case JPEG2000_PGOD_PCRL:
avpriv_request_sample(s->avctx, "Progression order PCRL");
ret = AVERROR_PATCHWELCOME;
break;
default:
break;
}
/* EOC marker reached */
bytestream2_skip(&s->g, 2);
return ret;
}
/* TIER-1 routines */
static void decode_sigpass(Jpeg2000T1Context *t1, int width, int height,
int bpno, int bandno, int bpass_csty_symbol,
int vert_causal_ctx_csty_symbol)
{
int mask = 3 << (bpno - 1), y0, x, y;
for (y0 = 0; y0 < height; y0 += 4)
for (x = 0; x < width; x++)
for (y = y0; y < height && y < y0 + 4; y++) {
if ((t1->flags[y+1][x+1] & JPEG2000_T1_SIG_NB)
&& !(t1->flags[y+1][x+1] & (JPEG2000_T1_SIG | JPEG2000_T1_VIS))) {
int flags_mask = -1;
if (vert_causal_ctx_csty_symbol && y == y0 + 3)
flags_mask &= ~(JPEG2000_T1_SIG_S | JPEG2000_T1_SIG_SW | JPEG2000_T1_SIG_SE);
if (ff_mqc_decode(&t1->mqc, t1->mqc.cx_states + ff_jpeg2000_getsigctxno(t1->flags[y+1][x+1] & flags_mask, bandno))) {
int xorbit, ctxno = ff_jpeg2000_getsgnctxno(t1->flags[y+1][x+1], &xorbit);
if (bpass_csty_symbol)
t1->data[y][x] = ff_mqc_decode(&t1->mqc, t1->mqc.cx_states + ctxno) ? -mask : mask;
else
t1->data[y][x] = (ff_mqc_decode(&t1->mqc, t1->mqc.cx_states + ctxno) ^ xorbit) ?
-mask : mask;
ff_jpeg2000_set_significance(t1, x, y,
t1->data[y][x] < 0);
}
t1->flags[y + 1][x + 1] |= JPEG2000_T1_VIS;
}
}
}
static void decode_refpass(Jpeg2000T1Context *t1, int width, int height,
int bpno)
{
int phalf, nhalf;
int y0, x, y;
phalf = 1 << (bpno - 1);
nhalf = -phalf;
for (y0 = 0; y0 < height; y0 += 4)
for (x = 0; x < width; x++)
for (y = y0; y < height && y < y0 + 4; y++)
if ((t1->flags[y + 1][x + 1] & (JPEG2000_T1_SIG | JPEG2000_T1_VIS)) == JPEG2000_T1_SIG) {
int ctxno = ff_jpeg2000_getrefctxno(t1->flags[y + 1][x + 1]);
int r = ff_mqc_decode(&t1->mqc,
t1->mqc.cx_states + ctxno)
? phalf : nhalf;
t1->data[y][x] += t1->data[y][x] < 0 ? -r : r;
t1->flags[y + 1][x + 1] |= JPEG2000_T1_REF;
}
}
static void decode_clnpass(Jpeg2000DecoderContext *s, Jpeg2000T1Context *t1,
int width, int height, int bpno, int bandno,
int seg_symbols, int vert_causal_ctx_csty_symbol)
{
int mask = 3 << (bpno - 1), y0, x, y, runlen, dec;
for (y0 = 0; y0 < height; y0 += 4) {
for (x = 0; x < width; x++) {
if (y0 + 3 < height &&
!((t1->flags[y0 + 1][x + 1] & (JPEG2000_T1_SIG_NB | JPEG2000_T1_VIS | JPEG2000_T1_SIG)) ||
(t1->flags[y0 + 2][x + 1] & (JPEG2000_T1_SIG_NB | JPEG2000_T1_VIS | JPEG2000_T1_SIG)) ||
(t1->flags[y0 + 3][x + 1] & (JPEG2000_T1_SIG_NB | JPEG2000_T1_VIS | JPEG2000_T1_SIG)) ||
(t1->flags[y0 + 4][x + 1] & (JPEG2000_T1_SIG_NB | JPEG2000_T1_VIS | JPEG2000_T1_SIG)))) {
if (!ff_mqc_decode(&t1->mqc, t1->mqc.cx_states + MQC_CX_RL))
continue;
runlen = ff_mqc_decode(&t1->mqc,
t1->mqc.cx_states + MQC_CX_UNI);
runlen = (runlen << 1) | ff_mqc_decode(&t1->mqc,
t1->mqc.cx_states +
MQC_CX_UNI);
dec = 1;
} else {
runlen = 0;
dec = 0;
}
for (y = y0 + runlen; y < y0 + 4 && y < height; y++) {
if (!dec) {
if (!(t1->flags[y+1][x+1] & (JPEG2000_T1_SIG | JPEG2000_T1_VIS))) {
int flags_mask = -1;
if (vert_causal_ctx_csty_symbol && y == y0 + 3)
flags_mask &= ~(JPEG2000_T1_SIG_S | JPEG2000_T1_SIG_SW | JPEG2000_T1_SIG_SE);
dec = ff_mqc_decode(&t1->mqc, t1->mqc.cx_states + ff_jpeg2000_getsigctxno(t1->flags[y+1][x+1] & flags_mask,
bandno));
}
}
if (dec) {
int xorbit;
int ctxno = ff_jpeg2000_getsgnctxno(t1->flags[y + 1][x + 1],
&xorbit);
t1->data[y][x] = (ff_mqc_decode(&t1->mqc,
t1->mqc.cx_states + ctxno) ^
xorbit)
? -mask : mask;
ff_jpeg2000_set_significance(t1, x, y, t1->data[y][x] < 0);
}
dec = 0;
t1->flags[y + 1][x + 1] &= ~JPEG2000_T1_VIS;
}
}
}
if (seg_symbols) {
int val;
val = ff_mqc_decode(&t1->mqc, t1->mqc.cx_states + MQC_CX_UNI);
val = (val << 1) + ff_mqc_decode(&t1->mqc, t1->mqc.cx_states + MQC_CX_UNI);
val = (val << 1) + ff_mqc_decode(&t1->mqc, t1->mqc.cx_states + MQC_CX_UNI);
val = (val << 1) + ff_mqc_decode(&t1->mqc, t1->mqc.cx_states + MQC_CX_UNI);
if (val != 0xa)
av_log(s->avctx, AV_LOG_ERROR,
"Segmentation symbol value incorrect\n");
}
}
static int decode_cblk(Jpeg2000DecoderContext *s, Jpeg2000CodingStyle *codsty,
Jpeg2000T1Context *t1, Jpeg2000Cblk *cblk,
int width, int height, int bandpos)
{
int passno = cblk->npasses, pass_t = 2, bpno = cblk->nonzerobits - 1, y;
int clnpass_cnt = 0;
int bpass_csty_symbol = codsty->cblk_style & JPEG2000_CBLK_BYPASS;
int vert_causal_ctx_csty_symbol = codsty->cblk_style & JPEG2000_CBLK_VSC;
av_assert0(width <= JPEG2000_MAX_CBLKW);
av_assert0(height <= JPEG2000_MAX_CBLKH);
for (y = 0; y < height; y++)
memset(t1->data[y], 0, width * sizeof(**t1->data));
/* If code-block contains no compressed data: nothing to do. */
if (!cblk->length)
return 0;
for (y = 0; y < height + 2; y++)
memset(t1->flags[y], 0, (width + 2) * sizeof(**t1->flags));
cblk->data[cblk->length] = 0xff;
cblk->data[cblk->length+1] = 0xff;
ff_mqc_initdec(&t1->mqc, cblk->data);
while (passno--) {
switch(pass_t) {
case 0:
decode_sigpass(t1, width, height, bpno + 1, bandpos,
bpass_csty_symbol && (clnpass_cnt >= 4),
vert_causal_ctx_csty_symbol);
break;
case 1:
decode_refpass(t1, width, height, bpno + 1);
if (bpass_csty_symbol && clnpass_cnt >= 4)
ff_mqc_initdec(&t1->mqc, cblk->data);
break;
case 2:
decode_clnpass(s, t1, width, height, bpno + 1, bandpos,
codsty->cblk_style & JPEG2000_CBLK_SEGSYM,
vert_causal_ctx_csty_symbol);
clnpass_cnt = clnpass_cnt + 1;
if (bpass_csty_symbol && clnpass_cnt >= 4)
ff_mqc_initdec(&t1->mqc, cblk->data);
break;
}
pass_t++;
if (pass_t == 3) {
bpno--;
pass_t = 0;
}
}
return 0;
}
/* TODO: Verify dequantization for lossless case
* comp->data can be float or int
* band->stepsize can be float or int
* depending on the type of DWT transformation.
* see ISO/IEC 15444-1:2002 A.6.1 */
/* Float dequantization of a codeblock.*/
static void dequantization_float(int x, int y, Jpeg2000Cblk *cblk,
Jpeg2000Component *comp,
Jpeg2000T1Context *t1, Jpeg2000Band *band)
{
int i, j;
int w = cblk->coord[0][1] - cblk->coord[0][0];
for (j = 0; j < (cblk->coord[1][1] - cblk->coord[1][0]); ++j) {
float *datap = &comp->f_data[(comp->coord[0][1] - comp->coord[0][0]) * (y + j) + x];
int *src = t1->data[j];
for (i = 0; i < w; ++i)
datap[i] = src[i] * band->f_stepsize;
}
}
/* Integer dequantization of a codeblock.*/
static void dequantization_int(int x, int y, Jpeg2000Cblk *cblk,
Jpeg2000Component *comp,
Jpeg2000T1Context *t1, Jpeg2000Band *band)
{
int i, j;
int w = cblk->coord[0][1] - cblk->coord[0][0];
for (j = 0; j < (cblk->coord[1][1] - cblk->coord[1][0]); ++j) {
int32_t *datap = &comp->i_data[(comp->coord[0][1] - comp->coord[0][0]) * (y + j) + x];
int *src = t1->data[j];
for (i = 0; i < w; ++i)
datap[i] = (src[i] * band->i_stepsize + (1 << 14)) >> 15;
}
}
/* Inverse ICT parameters in float and integer.
* int value = (float value) * (1<<16) */
static const float f_ict_params[4] = {
1.402f,
0.34413f,
0.71414f,
1.772f
};
static const int i_ict_params[4] = {
91881,
22553,
46802,
116130
};
static void mct_decode(Jpeg2000DecoderContext *s, Jpeg2000Tile *tile)
{
int i, csize = 1;
int32_t *src[3], i0, i1, i2;
float *srcf[3], i0f, i1f, i2f;
for (i = 1; i < 3; i++)
if (tile->codsty[0].transform != tile->codsty[i].transform) {
av_log(s->avctx, AV_LOG_ERROR, "Transforms mismatch, MCT not supported\n");
return;
}
for (i = 0; i < 3; i++)
if (tile->codsty[0].transform == FF_DWT97)
srcf[i] = tile->comp[i].f_data;
else
src [i] = tile->comp[i].i_data;
for (i = 0; i < 2; i++)
csize *= tile->comp[0].coord[i][1] - tile->comp[0].coord[i][0];
switch (tile->codsty[0].transform) {
case FF_DWT97:
for (i = 0; i < csize; i++) {
i0f = *srcf[0] + (f_ict_params[0] * *srcf[2]);
i1f = *srcf[0] - (f_ict_params[1] * *srcf[1])
- (f_ict_params[2] * *srcf[2]);
i2f = *srcf[0] + (f_ict_params[3] * *srcf[1]);
*srcf[0]++ = i0f;
*srcf[1]++ = i1f;
*srcf[2]++ = i2f;
}
break;
case FF_DWT97_INT:
for (i = 0; i < csize; i++) {
i0 = *src[0] + (((i_ict_params[0] * *src[2]) + (1 << 15)) >> 16);
i1 = *src[0] - (((i_ict_params[1] * *src[1]) + (1 << 15)) >> 16)
- (((i_ict_params[2] * *src[2]) + (1 << 15)) >> 16);
i2 = *src[0] + (((i_ict_params[3] * *src[1]) + (1 << 15)) >> 16);
*src[0]++ = i0;
*src[1]++ = i1;
*src[2]++ = i2;
}
break;
case FF_DWT53:
for (i = 0; i < csize; i++) {
i1 = *src[0] - (*src[2] + *src[1] >> 2);
i0 = i1 + *src[2];
i2 = i1 + *src[1];
*src[0]++ = i0;
*src[1]++ = i1;
*src[2]++ = i2;
}
break;
}
}
static int jpeg2000_decode_tile(Jpeg2000DecoderContext *s, Jpeg2000Tile *tile,
AVFrame *picture)
{
int compno, reslevelno, bandno;
int x, y;
uint8_t *line;
Jpeg2000T1Context t1;
/* Loop on tile components */
for (compno = 0; compno < s->ncomponents; compno++) {
Jpeg2000Component *comp = tile->comp + compno;
Jpeg2000CodingStyle *codsty = tile->codsty + compno;
/* Loop on resolution levels */
for (reslevelno = 0; reslevelno < codsty->nreslevels2decode; reslevelno++) {
Jpeg2000ResLevel *rlevel = comp->reslevel + reslevelno;
/* Loop on bands */
for (bandno = 0; bandno < rlevel->nbands; bandno++) {
int nb_precincts, precno;
Jpeg2000Band *band = rlevel->band + bandno;
int cblkno = 0, bandpos;
bandpos = bandno + (reslevelno > 0);
if (band->coord[0][0] == band->coord[0][1] ||
band->coord[1][0] == band->coord[1][1])
continue;
nb_precincts = rlevel->num_precincts_x * rlevel->num_precincts_y;
/* Loop on precincts */
for (precno = 0; precno < nb_precincts; precno++) {
Jpeg2000Prec *prec = band->prec + precno;
/* Loop on codeblocks */
for (cblkno = 0; cblkno < prec->nb_codeblocks_width * prec->nb_codeblocks_height; cblkno++) {
int x, y;
Jpeg2000Cblk *cblk = prec->cblk + cblkno;
decode_cblk(s, codsty, &t1, cblk,
cblk->coord[0][1] - cblk->coord[0][0],
cblk->coord[1][1] - cblk->coord[1][0],
bandpos);
x = cblk->coord[0][0];
y = cblk->coord[1][0];
if (codsty->transform == FF_DWT97)
dequantization_float(x, y, cblk, comp, &t1, band);
else
dequantization_int(x, y, cblk, comp, &t1, band);
} /* end cblk */
} /*end prec */
} /* end band */
} /* end reslevel */
/* inverse DWT */
ff_dwt_decode(&comp->dwt, codsty->transform == FF_DWT97 ? (void*)comp->f_data : (void*)comp->i_data);
} /*end comp */
/* inverse MCT transformation */
if (tile->codsty[0].mct)
mct_decode(s, tile);
if (s->cdef[0] < 0) {
for (x = 0; x < s->ncomponents; x++)
s->cdef[x] = x + 1;
if ((s->ncomponents & 1) == 0)
s->cdef[s->ncomponents-1] = 0;
}
if (s->precision <= 8) {
for (compno = 0; compno < s->ncomponents; compno++) {
Jpeg2000Component *comp = tile->comp + compno;
Jpeg2000CodingStyle *codsty = tile->codsty + compno;
float *datap = comp->f_data;
int32_t *i_datap = comp->i_data;
int cbps = s->cbps[compno];
int w = tile->comp[compno].coord[0][1] - s->image_offset_x;
int planar = !!picture->data[2];
int pixelsize = planar ? 1 : s->ncomponents;
int plane = 0;
if (planar)
plane = s->cdef[compno] ? s->cdef[compno]-1 : (s->ncomponents-1);
y = tile->comp[compno].coord[1][0] - s->image_offset_y;
line = picture->data[plane] + y / s->cdy[compno] * picture->linesize[plane];
for (; y < tile->comp[compno].coord[1][1] - s->image_offset_y; y += s->cdy[compno]) {
uint8_t *dst;
x = tile->comp[compno].coord[0][0] - s->image_offset_x;
dst = line + x / s->cdx[compno] * pixelsize + compno*!planar;
if (codsty->transform == FF_DWT97) {
for (; x < w; x += s->cdx[compno]) {
int val = lrintf(*datap) + (1 << (cbps - 1));
/* DC level shift and clip see ISO 15444-1:2002 G.1.2 */
val = av_clip(val, 0, (1 << cbps) - 1);
*dst = val << (8 - cbps);
datap++;
dst += pixelsize;
}
} else {
for (; x < w; x += s->cdx[compno]) {
int val = *i_datap + (1 << (cbps - 1));
/* DC level shift and clip see ISO 15444-1:2002 G.1.2 */
val = av_clip(val, 0, (1 << cbps) - 1);
*dst = val << (8 - cbps);
i_datap++;
dst += pixelsize;
}
}
line += picture->linesize[plane];
}
}
} else {
for (compno = 0; compno < s->ncomponents; compno++) {
Jpeg2000Component *comp = tile->comp + compno;
Jpeg2000CodingStyle *codsty = tile->codsty + compno;
float *datap = comp->f_data;
int32_t *i_datap = comp->i_data;
uint16_t *linel;
int cbps = s->cbps[compno];
int w = tile->comp[compno].coord[0][1] - s->image_offset_x;
int planar = !!picture->data[2];
int pixelsize = planar ? 1 : s->ncomponents;
int plane = 0;
if (planar)
plane = s->cdef[compno] ? s->cdef[compno]-1 : (s->ncomponents-1);
y = tile->comp[compno].coord[1][0] - s->image_offset_y;
linel = (uint16_t *)picture->data[plane] + y / s->cdy[compno] * (picture->linesize[plane] >> 1);
for (; y < tile->comp[compno].coord[1][1] - s->image_offset_y; y += s->cdy[compno]) {
uint16_t *dst;
x = tile->comp[compno].coord[0][0] - s->image_offset_x;
dst = linel + (x / s->cdx[compno] * pixelsize + compno*!planar);
if (codsty->transform == FF_DWT97) {
for (; x < w; x += s-> cdx[compno]) {
int val = lrintf(*datap) + (1 << (cbps - 1));
/* DC level shift and clip see ISO 15444-1:2002 G.1.2 */
val = av_clip(val, 0, (1 << cbps) - 1);
/* align 12 bit values in little-endian mode */
*dst = val << (16 - cbps);
datap++;
dst += pixelsize;
}
} else {
for (; x < w; x += s-> cdx[compno]) {
int val = *i_datap + (1 << (cbps - 1));
/* DC level shift and clip see ISO 15444-1:2002 G.1.2 */
val = av_clip(val, 0, (1 << cbps) - 1);
/* align 12 bit values in little-endian mode */
*dst = val << (16 - cbps);
i_datap++;
dst += pixelsize;
}
}
linel += picture->linesize[plane] >> 1;
}
}
}
return 0;
}
static void jpeg2000_dec_cleanup(Jpeg2000DecoderContext *s)
{
int tileno, compno;
for (tileno = 0; tileno < s->numXtiles * s->numYtiles; tileno++) {
if (s->tile[tileno].comp) {
for (compno = 0; compno < s->ncomponents; compno++) {
Jpeg2000Component *comp = s->tile[tileno].comp + compno;
Jpeg2000CodingStyle *codsty = s->tile[tileno].codsty + compno;
ff_jpeg2000_cleanup(comp, codsty);
}
av_freep(&s->tile[tileno].comp);
}
}
av_freep(&s->tile);
memset(s->codsty, 0, sizeof(s->codsty));
memset(s->qntsty, 0, sizeof(s->qntsty));
s->numXtiles = s->numYtiles = 0;
}
static int jpeg2000_read_main_headers(Jpeg2000DecoderContext *s)
{
Jpeg2000CodingStyle *codsty = s->codsty;
Jpeg2000QuantStyle *qntsty = s->qntsty;
uint8_t *properties = s->properties;
for (;;) {
int len, ret = 0;
uint16_t marker;
int oldpos;
if (bytestream2_get_bytes_left(&s->g) < 2) {
av_log(s->avctx, AV_LOG_ERROR, "Missing EOC\n");
break;
}
marker = bytestream2_get_be16u(&s->g);
oldpos = bytestream2_tell(&s->g);
if (marker == JPEG2000_SOD) {
Jpeg2000Tile *tile;
Jpeg2000TilePart *tp;
if (!s->tile) {
av_log(s->avctx, AV_LOG_ERROR, "Missing SIZ\n");
return AVERROR_INVALIDDATA;
}
if (s->curtileno < 0) {
av_log(s->avctx, AV_LOG_ERROR, "Missing SOT\n");
return AVERROR_INVALIDDATA;
}
tile = s->tile + s->curtileno;
tp = tile->tile_part + tile->tp_idx;
if (tp->tp_end < s->g.buffer) {
av_log(s->avctx, AV_LOG_ERROR, "Invalid tpend\n");
return AVERROR_INVALIDDATA;
}
bytestream2_init(&tp->tpg, s->g.buffer, tp->tp_end - s->g.buffer);
bytestream2_skip(&s->g, tp->tp_end - s->g.buffer);
continue;
}
if (marker == JPEG2000_EOC)
break;
len = bytestream2_get_be16(&s->g);
if (len < 2 || bytestream2_get_bytes_left(&s->g) < len - 2)
return AVERROR_INVALIDDATA;
switch (marker) {
case JPEG2000_SIZ:
ret = get_siz(s);
if (!s->tile)
s->numXtiles = s->numYtiles = 0;
break;
case JPEG2000_COC:
ret = get_coc(s, codsty, properties);
break;
case JPEG2000_COD:
ret = get_cod(s, codsty, properties);
break;
case JPEG2000_QCC:
ret = get_qcc(s, len, qntsty, properties);
break;
case JPEG2000_QCD:
ret = get_qcd(s, len, qntsty, properties);
break;
case JPEG2000_SOT:
if (!(ret = get_sot(s, len))) {
av_assert1(s->curtileno >= 0);
codsty = s->tile[s->curtileno].codsty;
qntsty = s->tile[s->curtileno].qntsty;
properties = s->tile[s->curtileno].properties;
}
break;
case JPEG2000_COM:
// the comment is ignored
bytestream2_skip(&s->g, len - 2);
break;
case JPEG2000_TLM:
// Tile-part lengths
ret = get_tlm(s, len);
break;
default:
av_log(s->avctx, AV_LOG_ERROR,
"unsupported marker 0x%.4X at pos 0x%X\n",
marker, bytestream2_tell(&s->g) - 4);
bytestream2_skip(&s->g, len - 2);
break;
}
if (bytestream2_tell(&s->g) - oldpos != len || ret) {
av_log(s->avctx, AV_LOG_ERROR,
"error during processing marker segment %.4x\n", marker);
return ret ? ret : -1;
}
}
return 0;
}
/* Read bit stream packets --> T2 operation. */
static int jpeg2000_read_bitstream_packets(Jpeg2000DecoderContext *s)
{
int ret = 0;
int tileno;
for (tileno = 0; tileno < s->numXtiles * s->numYtiles; tileno++) {
Jpeg2000Tile *tile = s->tile + tileno;
if (ret = init_tile(s, tileno))
return ret;
s->g = tile->tile_part[0].tpg;
if (ret = jpeg2000_decode_packets(s, tile))
return ret;
}
return 0;
}
static int jp2_find_codestream(Jpeg2000DecoderContext *s)
{
uint32_t atom_size, atom, atom_end;
int search_range = 10;
while (search_range
&&
bytestream2_get_bytes_left(&s->g) >= 8) {
atom_size = bytestream2_get_be32u(&s->g);
atom = bytestream2_get_be32u(&s->g);
atom_end = bytestream2_tell(&s->g) + atom_size - 8;
if (atom == JP2_CODESTREAM)
return 1;
if (bytestream2_get_bytes_left(&s->g) < atom_size || atom_end < atom_size)
return 0;
if (atom == JP2_HEADER &&
atom_size >= 16) {
uint32_t atom2_size, atom2, atom2_end;
do {
atom2_size = bytestream2_get_be32u(&s->g);
atom2 = bytestream2_get_be32u(&s->g);
atom2_end = bytestream2_tell(&s->g) + atom2_size - 8;
if (atom2_size < 8 || atom2_end > atom_end || atom2_end < atom2_size)
break;
if (atom2 == JP2_CODESTREAM) {
return 1;
} else if (atom2 == MKBETAG('c','o','l','r') && atom2_size >= 7) {
int method = bytestream2_get_byteu(&s->g);
bytestream2_skipu(&s->g, 2);
if (method == 1) {
s->colour_space = bytestream2_get_be32u(&s->g);
}
} else if (atom2 == MKBETAG('p','c','l','r') && atom2_size >= 6) {
int i, size, colour_count, colour_channels, colour_depth[3];
uint32_t r, g, b;
colour_count = bytestream2_get_be16u(&s->g);
colour_channels = bytestream2_get_byteu(&s->g);
// FIXME: Do not ignore channel_sign
colour_depth[0] = (bytestream2_get_byteu(&s->g) & 0x7f) + 1;
colour_depth[1] = (bytestream2_get_byteu(&s->g) & 0x7f) + 1;
colour_depth[2] = (bytestream2_get_byteu(&s->g) & 0x7f) + 1;
size = (colour_depth[0] + 7 >> 3) * colour_count +
(colour_depth[1] + 7 >> 3) * colour_count +
(colour_depth[2] + 7 >> 3) * colour_count;
if (colour_count > 256 ||
colour_channels != 3 ||
colour_depth[0] > 16 ||
colour_depth[1] > 16 ||
colour_depth[2] > 16 ||
atom2_size < size) {
avpriv_request_sample(s->avctx, "Unknown palette");
bytestream2_seek(&s->g, atom2_end, SEEK_SET);
continue;
}
s->pal8 = 1;
for (i = 0; i < colour_count; i++) {
if (colour_depth[0] <= 8) {
r = bytestream2_get_byteu(&s->g) << 8 - colour_depth[0];
r |= r >> colour_depth[0];
} else {
r = bytestream2_get_be16u(&s->g) >> colour_depth[0] - 8;
}
if (colour_depth[1] <= 8) {
g = bytestream2_get_byteu(&s->g) << 8 - colour_depth[1];
r |= r >> colour_depth[1];
} else {
g = bytestream2_get_be16u(&s->g) >> colour_depth[1] - 8;
}
if (colour_depth[2] <= 8) {
b = bytestream2_get_byteu(&s->g) << 8 - colour_depth[2];
r |= r >> colour_depth[2];
} else {
b = bytestream2_get_be16u(&s->g) >> colour_depth[2] - 8;
}
s->palette[i] = 0xffu << 24 | r << 16 | g << 8 | b;
}
} else if (atom2 == MKBETAG('c','d','e','f') && atom2_size >= 2) {
int n = bytestream2_get_be16u(&s->g);
for (; n>0; n--) {
int cn = bytestream2_get_be16(&s->g);
int av_unused typ = bytestream2_get_be16(&s->g);
int asoc = bytestream2_get_be16(&s->g);
if (cn < 4 || asoc < 4)
s->cdef[cn] = asoc;
}
}
bytestream2_seek(&s->g, atom2_end, SEEK_SET);
} while (atom_end - atom2_end >= 8);
} else {
search_range--;
}
bytestream2_seek(&s->g, atom_end, SEEK_SET);
}
return 0;
}
static int jpeg2000_decode_frame(AVCodecContext *avctx, void *data,
int *got_frame, AVPacket *avpkt)
{
Jpeg2000DecoderContext *s = avctx->priv_data;
ThreadFrame frame = { .f = data };
AVFrame *picture = data;
int tileno, ret;
s->avctx = avctx;
bytestream2_init(&s->g, avpkt->data, avpkt->size);
s->curtileno = -1;
memset(s->cdef, -1, sizeof(s->cdef));
if (bytestream2_get_bytes_left(&s->g) < 2) {
ret = AVERROR_INVALIDDATA;
goto end;
}
// check if the image is in jp2 format
if (bytestream2_get_bytes_left(&s->g) >= 12 &&
(bytestream2_get_be32u(&s->g) == 12) &&
(bytestream2_get_be32u(&s->g) == JP2_SIG_TYPE) &&
(bytestream2_get_be32u(&s->g) == JP2_SIG_VALUE)) {
if (!jp2_find_codestream(s)) {
av_log(avctx, AV_LOG_ERROR,
"Could not find Jpeg2000 codestream atom.\n");
ret = AVERROR_INVALIDDATA;
goto end;
}
} else {
bytestream2_seek(&s->g, 0, SEEK_SET);
}
while (bytestream2_get_bytes_left(&s->g) >= 3 && bytestream2_peek_be16(&s->g) != JPEG2000_SOC)
bytestream2_skip(&s->g, 1);
if (bytestream2_get_be16u(&s->g) != JPEG2000_SOC) {
av_log(avctx, AV_LOG_ERROR, "SOC marker not present\n");
ret = AVERROR_INVALIDDATA;
goto end;
}
if (ret = jpeg2000_read_main_headers(s))
goto end;
/* get picture buffer */
if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
goto end;
picture->pict_type = AV_PICTURE_TYPE_I;
picture->key_frame = 1;
if (ret = jpeg2000_read_bitstream_packets(s))
goto end;
for (tileno = 0; tileno < s->numXtiles * s->numYtiles; tileno++)
if (ret = jpeg2000_decode_tile(s, s->tile + tileno, picture))
goto end;
jpeg2000_dec_cleanup(s);
*got_frame = 1;
if (s->avctx->pix_fmt == AV_PIX_FMT_PAL8)
memcpy(picture->data[1], s->palette, 256 * sizeof(uint32_t));
return bytestream2_tell(&s->g);
end:
jpeg2000_dec_cleanup(s);
return ret;
}
static void jpeg2000_init_static_data(AVCodec *codec)
{
ff_jpeg2000_init_tier1_luts();
ff_mqc_init_context_tables();
}
#define OFFSET(x) offsetof(Jpeg2000DecoderContext, x)
#define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
static const AVOption options[] = {
{ "lowres", "Lower the decoding resolution by a power of two",
OFFSET(reduction_factor), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, JPEG2000_MAX_RESLEVELS - 1, VD },
{ NULL },
};
static const AVProfile profiles[] = {
{ FF_PROFILE_JPEG2000_CSTREAM_RESTRICTION_0, "JPEG 2000 codestream restriction 0" },
{ FF_PROFILE_JPEG2000_CSTREAM_RESTRICTION_1, "JPEG 2000 codestream restriction 1" },
{ FF_PROFILE_JPEG2000_CSTREAM_NO_RESTRICTION, "JPEG 2000 no codestream restrictions" },
{ FF_PROFILE_JPEG2000_DCINEMA_2K, "JPEG 2000 digital cinema 2K" },
{ FF_PROFILE_JPEG2000_DCINEMA_4K, "JPEG 2000 digital cinema 4K" },
{ FF_PROFILE_UNKNOWN },
};
static const AVClass jpeg2000_class = {
.class_name = "jpeg2000",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
AVCodec ff_jpeg2000_decoder = {
.name = "jpeg2000",
.long_name = NULL_IF_CONFIG_SMALL("JPEG 2000"),
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_JPEG2000,
.capabilities = CODEC_CAP_FRAME_THREADS,
.priv_data_size = sizeof(Jpeg2000DecoderContext),
.init_static_data = jpeg2000_init_static_data,
.decode = jpeg2000_decode_frame,
.priv_class = &jpeg2000_class,
.max_lowres = 5,
.profiles = NULL_IF_CONFIG_SMALL(profiles)
};
| ./CrossVul/dataset_final_sorted/CWE-20/c/good_5823_0 |
crossvul-cpp_data_good_2109_0 | /*
* DCCP connection tracking protocol helper
*
* Copyright (c) 2005, 2006, 2008 Patrick McHardy <kaber@trash.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/sysctl.h>
#include <linux/spinlock.h>
#include <linux/skbuff.h>
#include <linux/dccp.h>
#include <linux/slab.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
#include <linux/netfilter/nfnetlink_conntrack.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_ecache.h>
#include <net/netfilter/nf_log.h>
/* Timeouts are based on values from RFC4340:
*
* - REQUEST:
*
* 8.1.2. Client Request
*
* A client MAY give up on its DCCP-Requests after some time
* (3 minutes, for example).
*
* - RESPOND:
*
* 8.1.3. Server Response
*
* It MAY also leave the RESPOND state for CLOSED after a timeout of
* not less than 4MSL (8 minutes);
*
* - PARTOPEN:
*
* 8.1.5. Handshake Completion
*
* If the client remains in PARTOPEN for more than 4MSL (8 minutes),
* it SHOULD reset the connection with Reset Code 2, "Aborted".
*
* - OPEN:
*
* The DCCP timestamp overflows after 11.9 hours. If the connection
* stays idle this long the sequence number won't be recognized
* as valid anymore.
*
* - CLOSEREQ/CLOSING:
*
* 8.3. Termination
*
* The retransmission timer should initially be set to go off in two
* round-trip times and should back off to not less than once every
* 64 seconds ...
*
* - TIMEWAIT:
*
* 4.3. States
*
* A server or client socket remains in this state for 2MSL (4 minutes)
* after the connection has been town down, ...
*/
#define DCCP_MSL (2 * 60 * HZ)
static const char * const dccp_state_names[] = {
[CT_DCCP_NONE] = "NONE",
[CT_DCCP_REQUEST] = "REQUEST",
[CT_DCCP_RESPOND] = "RESPOND",
[CT_DCCP_PARTOPEN] = "PARTOPEN",
[CT_DCCP_OPEN] = "OPEN",
[CT_DCCP_CLOSEREQ] = "CLOSEREQ",
[CT_DCCP_CLOSING] = "CLOSING",
[CT_DCCP_TIMEWAIT] = "TIMEWAIT",
[CT_DCCP_IGNORE] = "IGNORE",
[CT_DCCP_INVALID] = "INVALID",
};
#define sNO CT_DCCP_NONE
#define sRQ CT_DCCP_REQUEST
#define sRS CT_DCCP_RESPOND
#define sPO CT_DCCP_PARTOPEN
#define sOP CT_DCCP_OPEN
#define sCR CT_DCCP_CLOSEREQ
#define sCG CT_DCCP_CLOSING
#define sTW CT_DCCP_TIMEWAIT
#define sIG CT_DCCP_IGNORE
#define sIV CT_DCCP_INVALID
/*
* DCCP state transition table
*
* The assumption is the same as for TCP tracking:
*
* We are the man in the middle. All the packets go through us but might
* get lost in transit to the destination. It is assumed that the destination
* can't receive segments we haven't seen.
*
* The following states exist:
*
* NONE: Initial state, expecting Request
* REQUEST: Request seen, waiting for Response from server
* RESPOND: Response from server seen, waiting for Ack from client
* PARTOPEN: Ack after Response seen, waiting for packet other than Response,
* Reset or Sync from server
* OPEN: Packet other than Response, Reset or Sync seen
* CLOSEREQ: CloseReq from server seen, expecting Close from client
* CLOSING: Close seen, expecting Reset
* TIMEWAIT: Reset seen
* IGNORE: Not determinable whether packet is valid
*
* Some states exist only on one side of the connection: REQUEST, RESPOND,
* PARTOPEN, CLOSEREQ. For the other side these states are equivalent to
* the one it was in before.
*
* Packets are marked as ignored (sIG) if we don't know if they're valid
* (for example a reincarnation of a connection we didn't notice is dead
* already) and the server may send back a connection closing Reset or a
* Response. They're also used for Sync/SyncAck packets, which we don't
* care about.
*/
static const u_int8_t
dccp_state_table[CT_DCCP_ROLE_MAX + 1][DCCP_PKT_SYNCACK + 1][CT_DCCP_MAX + 1] = {
[CT_DCCP_ROLE_CLIENT] = {
[DCCP_PKT_REQUEST] = {
/*
* sNO -> sRQ Regular Request
* sRQ -> sRQ Retransmitted Request or reincarnation
* sRS -> sRS Retransmitted Request (apparently Response
* got lost after we saw it) or reincarnation
* sPO -> sIG Ignore, conntrack might be out of sync
* sOP -> sIG Ignore, conntrack might be out of sync
* sCR -> sIG Ignore, conntrack might be out of sync
* sCG -> sIG Ignore, conntrack might be out of sync
* sTW -> sRQ Reincarnation
*
* sNO, sRQ, sRS, sPO. sOP, sCR, sCG, sTW, */
sRQ, sRQ, sRS, sIG, sIG, sIG, sIG, sRQ,
},
[DCCP_PKT_RESPONSE] = {
/*
* sNO -> sIV Invalid
* sRQ -> sIG Ignore, might be response to ignored Request
* sRS -> sIG Ignore, might be response to ignored Request
* sPO -> sIG Ignore, might be response to ignored Request
* sOP -> sIG Ignore, might be response to ignored Request
* sCR -> sIG Ignore, might be response to ignored Request
* sCG -> sIG Ignore, might be response to ignored Request
* sTW -> sIV Invalid, reincarnation in reverse direction
* goes through sRQ
*
* sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIV,
},
[DCCP_PKT_ACK] = {
/*
* sNO -> sIV No connection
* sRQ -> sIV No connection
* sRS -> sPO Ack for Response, move to PARTOPEN (8.1.5.)
* sPO -> sPO Retransmitted Ack for Response, remain in PARTOPEN
* sOP -> sOP Regular ACK, remain in OPEN
* sCR -> sCR Ack in CLOSEREQ MAY be processed (8.3.)
* sCG -> sCG Ack in CLOSING MAY be processed (8.3.)
* sTW -> sIV
*
* sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
sIV, sIV, sPO, sPO, sOP, sCR, sCG, sIV
},
[DCCP_PKT_DATA] = {
/*
* sNO -> sIV No connection
* sRQ -> sIV No connection
* sRS -> sIV No connection
* sPO -> sIV MUST use DataAck in PARTOPEN state (8.1.5.)
* sOP -> sOP Regular Data packet
* sCR -> sCR Data in CLOSEREQ MAY be processed (8.3.)
* sCG -> sCG Data in CLOSING MAY be processed (8.3.)
* sTW -> sIV
*
* sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
sIV, sIV, sIV, sIV, sOP, sCR, sCG, sIV,
},
[DCCP_PKT_DATAACK] = {
/*
* sNO -> sIV No connection
* sRQ -> sIV No connection
* sRS -> sPO Ack for Response, move to PARTOPEN (8.1.5.)
* sPO -> sPO Remain in PARTOPEN state
* sOP -> sOP Regular DataAck packet in OPEN state
* sCR -> sCR DataAck in CLOSEREQ MAY be processed (8.3.)
* sCG -> sCG DataAck in CLOSING MAY be processed (8.3.)
* sTW -> sIV
*
* sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
sIV, sIV, sPO, sPO, sOP, sCR, sCG, sIV
},
[DCCP_PKT_CLOSEREQ] = {
/*
* CLOSEREQ may only be sent by the server.
*
* sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV
},
[DCCP_PKT_CLOSE] = {
/*
* sNO -> sIV No connection
* sRQ -> sIV No connection
* sRS -> sIV No connection
* sPO -> sCG Client-initiated close
* sOP -> sCG Client-initiated close
* sCR -> sCG Close in response to CloseReq (8.3.)
* sCG -> sCG Retransmit
* sTW -> sIV Late retransmit, already in TIME_WAIT
*
* sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
sIV, sIV, sIV, sCG, sCG, sCG, sIV, sIV
},
[DCCP_PKT_RESET] = {
/*
* sNO -> sIV No connection
* sRQ -> sTW Sync received or timeout, SHOULD send Reset (8.1.1.)
* sRS -> sTW Response received without Request
* sPO -> sTW Timeout, SHOULD send Reset (8.1.5.)
* sOP -> sTW Connection reset
* sCR -> sTW Connection reset
* sCG -> sTW Connection reset
* sTW -> sIG Ignore (don't refresh timer)
*
* sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
sIV, sTW, sTW, sTW, sTW, sTW, sTW, sIG
},
[DCCP_PKT_SYNC] = {
/*
* We currently ignore Sync packets
*
* sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
sIG, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
},
[DCCP_PKT_SYNCACK] = {
/*
* We currently ignore SyncAck packets
*
* sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
sIG, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
},
},
[CT_DCCP_ROLE_SERVER] = {
[DCCP_PKT_REQUEST] = {
/*
* sNO -> sIV Invalid
* sRQ -> sIG Ignore, conntrack might be out of sync
* sRS -> sIG Ignore, conntrack might be out of sync
* sPO -> sIG Ignore, conntrack might be out of sync
* sOP -> sIG Ignore, conntrack might be out of sync
* sCR -> sIG Ignore, conntrack might be out of sync
* sCG -> sIG Ignore, conntrack might be out of sync
* sTW -> sRQ Reincarnation, must reverse roles
*
* sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
sIV, sIG, sIG, sIG, sIG, sIG, sIG, sRQ
},
[DCCP_PKT_RESPONSE] = {
/*
* sNO -> sIV Response without Request
* sRQ -> sRS Response to clients Request
* sRS -> sRS Retransmitted Response (8.1.3. SHOULD NOT)
* sPO -> sIG Response to an ignored Request or late retransmit
* sOP -> sIG Ignore, might be response to ignored Request
* sCR -> sIG Ignore, might be response to ignored Request
* sCG -> sIG Ignore, might be response to ignored Request
* sTW -> sIV Invalid, Request from client in sTW moves to sRQ
*
* sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
sIV, sRS, sRS, sIG, sIG, sIG, sIG, sIV
},
[DCCP_PKT_ACK] = {
/*
* sNO -> sIV No connection
* sRQ -> sIV No connection
* sRS -> sIV No connection
* sPO -> sOP Enter OPEN state (8.1.5.)
* sOP -> sOP Regular Ack in OPEN state
* sCR -> sIV Waiting for Close from client
* sCG -> sCG Ack in CLOSING MAY be processed (8.3.)
* sTW -> sIV
*
* sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
sIV, sIV, sIV, sOP, sOP, sIV, sCG, sIV
},
[DCCP_PKT_DATA] = {
/*
* sNO -> sIV No connection
* sRQ -> sIV No connection
* sRS -> sIV No connection
* sPO -> sOP Enter OPEN state (8.1.5.)
* sOP -> sOP Regular Data packet in OPEN state
* sCR -> sIV Waiting for Close from client
* sCG -> sCG Data in CLOSING MAY be processed (8.3.)
* sTW -> sIV
*
* sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
sIV, sIV, sIV, sOP, sOP, sIV, sCG, sIV
},
[DCCP_PKT_DATAACK] = {
/*
* sNO -> sIV No connection
* sRQ -> sIV No connection
* sRS -> sIV No connection
* sPO -> sOP Enter OPEN state (8.1.5.)
* sOP -> sOP Regular DataAck in OPEN state
* sCR -> sIV Waiting for Close from client
* sCG -> sCG Data in CLOSING MAY be processed (8.3.)
* sTW -> sIV
*
* sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
sIV, sIV, sIV, sOP, sOP, sIV, sCG, sIV
},
[DCCP_PKT_CLOSEREQ] = {
/*
* sNO -> sIV No connection
* sRQ -> sIV No connection
* sRS -> sIV No connection
* sPO -> sOP -> sCR Move directly to CLOSEREQ (8.1.5.)
* sOP -> sCR CloseReq in OPEN state
* sCR -> sCR Retransmit
* sCG -> sCR Simultaneous close, client sends another Close
* sTW -> sIV Already closed
*
* sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
sIV, sIV, sIV, sCR, sCR, sCR, sCR, sIV
},
[DCCP_PKT_CLOSE] = {
/*
* sNO -> sIV No connection
* sRQ -> sIV No connection
* sRS -> sIV No connection
* sPO -> sOP -> sCG Move direcly to CLOSING
* sOP -> sCG Move to CLOSING
* sCR -> sIV Close after CloseReq is invalid
* sCG -> sCG Retransmit
* sTW -> sIV Already closed
*
* sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
sIV, sIV, sIV, sCG, sCG, sIV, sCG, sIV
},
[DCCP_PKT_RESET] = {
/*
* sNO -> sIV No connection
* sRQ -> sTW Reset in response to Request
* sRS -> sTW Timeout, SHOULD send Reset (8.1.3.)
* sPO -> sTW Timeout, SHOULD send Reset (8.1.3.)
* sOP -> sTW
* sCR -> sTW
* sCG -> sTW
* sTW -> sIG Ignore (don't refresh timer)
*
* sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW, sTW */
sIV, sTW, sTW, sTW, sTW, sTW, sTW, sTW, sIG
},
[DCCP_PKT_SYNC] = {
/*
* We currently ignore Sync packets
*
* sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
sIG, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
},
[DCCP_PKT_SYNCACK] = {
/*
* We currently ignore SyncAck packets
*
* sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
sIG, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
},
},
};
/* this module per-net specifics */
static int dccp_net_id __read_mostly;
struct dccp_net {
struct nf_proto_net pn;
int dccp_loose;
unsigned int dccp_timeout[CT_DCCP_MAX + 1];
};
static inline struct dccp_net *dccp_pernet(struct net *net)
{
return net_generic(net, dccp_net_id);
}
static bool dccp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff,
struct nf_conntrack_tuple *tuple)
{
struct dccp_hdr _hdr, *dh;
dh = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr);
if (dh == NULL)
return false;
tuple->src.u.dccp.port = dh->dccph_sport;
tuple->dst.u.dccp.port = dh->dccph_dport;
return true;
}
static bool dccp_invert_tuple(struct nf_conntrack_tuple *inv,
const struct nf_conntrack_tuple *tuple)
{
inv->src.u.dccp.port = tuple->dst.u.dccp.port;
inv->dst.u.dccp.port = tuple->src.u.dccp.port;
return true;
}
static bool dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
unsigned int dataoff, unsigned int *timeouts)
{
struct net *net = nf_ct_net(ct);
struct dccp_net *dn;
struct dccp_hdr _dh, *dh;
const char *msg;
u_int8_t state;
dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh);
BUG_ON(dh == NULL);
state = dccp_state_table[CT_DCCP_ROLE_CLIENT][dh->dccph_type][CT_DCCP_NONE];
switch (state) {
default:
dn = dccp_pernet(net);
if (dn->dccp_loose == 0) {
msg = "nf_ct_dccp: not picking up existing connection ";
goto out_invalid;
}
case CT_DCCP_REQUEST:
break;
case CT_DCCP_INVALID:
msg = "nf_ct_dccp: invalid state transition ";
goto out_invalid;
}
ct->proto.dccp.role[IP_CT_DIR_ORIGINAL] = CT_DCCP_ROLE_CLIENT;
ct->proto.dccp.role[IP_CT_DIR_REPLY] = CT_DCCP_ROLE_SERVER;
ct->proto.dccp.state = CT_DCCP_NONE;
ct->proto.dccp.last_pkt = DCCP_PKT_REQUEST;
ct->proto.dccp.last_dir = IP_CT_DIR_ORIGINAL;
ct->proto.dccp.handshake_seq = 0;
return true;
out_invalid:
if (LOG_INVALID(net, IPPROTO_DCCP))
nf_log_packet(net, nf_ct_l3num(ct), 0, skb, NULL, NULL,
NULL, "%s", msg);
return false;
}
static u64 dccp_ack_seq(const struct dccp_hdr *dh)
{
const struct dccp_hdr_ack_bits *dhack;
dhack = (void *)dh + __dccp_basic_hdr_len(dh);
return ((u64)ntohs(dhack->dccph_ack_nr_high) << 32) +
ntohl(dhack->dccph_ack_nr_low);
}
static unsigned int *dccp_get_timeouts(struct net *net)
{
return dccp_pernet(net)->dccp_timeout;
}
static int dccp_packet(struct nf_conn *ct, const struct sk_buff *skb,
unsigned int dataoff, enum ip_conntrack_info ctinfo,
u_int8_t pf, unsigned int hooknum,
unsigned int *timeouts)
{
struct net *net = nf_ct_net(ct);
enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
struct dccp_hdr _dh, *dh;
u_int8_t type, old_state, new_state;
enum ct_dccp_roles role;
dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh);
BUG_ON(dh == NULL);
type = dh->dccph_type;
if (type == DCCP_PKT_RESET &&
!test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
/* Tear down connection immediately if only reply is a RESET */
nf_ct_kill_acct(ct, ctinfo, skb);
return NF_ACCEPT;
}
spin_lock_bh(&ct->lock);
role = ct->proto.dccp.role[dir];
old_state = ct->proto.dccp.state;
new_state = dccp_state_table[role][type][old_state];
switch (new_state) {
case CT_DCCP_REQUEST:
if (old_state == CT_DCCP_TIMEWAIT &&
role == CT_DCCP_ROLE_SERVER) {
/* Reincarnation in the reverse direction: reopen and
* reverse client/server roles. */
ct->proto.dccp.role[dir] = CT_DCCP_ROLE_CLIENT;
ct->proto.dccp.role[!dir] = CT_DCCP_ROLE_SERVER;
}
break;
case CT_DCCP_RESPOND:
if (old_state == CT_DCCP_REQUEST)
ct->proto.dccp.handshake_seq = dccp_hdr_seq(dh);
break;
case CT_DCCP_PARTOPEN:
if (old_state == CT_DCCP_RESPOND &&
type == DCCP_PKT_ACK &&
dccp_ack_seq(dh) == ct->proto.dccp.handshake_seq)
set_bit(IPS_ASSURED_BIT, &ct->status);
break;
case CT_DCCP_IGNORE:
/*
* Connection tracking might be out of sync, so we ignore
* packets that might establish a new connection and resync
* if the server responds with a valid Response.
*/
if (ct->proto.dccp.last_dir == !dir &&
ct->proto.dccp.last_pkt == DCCP_PKT_REQUEST &&
type == DCCP_PKT_RESPONSE) {
ct->proto.dccp.role[!dir] = CT_DCCP_ROLE_CLIENT;
ct->proto.dccp.role[dir] = CT_DCCP_ROLE_SERVER;
ct->proto.dccp.handshake_seq = dccp_hdr_seq(dh);
new_state = CT_DCCP_RESPOND;
break;
}
ct->proto.dccp.last_dir = dir;
ct->proto.dccp.last_pkt = type;
spin_unlock_bh(&ct->lock);
if (LOG_INVALID(net, IPPROTO_DCCP))
nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
"nf_ct_dccp: invalid packet ignored ");
return NF_ACCEPT;
case CT_DCCP_INVALID:
spin_unlock_bh(&ct->lock);
if (LOG_INVALID(net, IPPROTO_DCCP))
nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
"nf_ct_dccp: invalid state transition ");
return -NF_ACCEPT;
}
ct->proto.dccp.last_dir = dir;
ct->proto.dccp.last_pkt = type;
ct->proto.dccp.state = new_state;
spin_unlock_bh(&ct->lock);
if (new_state != old_state)
nf_conntrack_event_cache(IPCT_PROTOINFO, ct);
nf_ct_refresh_acct(ct, ctinfo, skb, timeouts[new_state]);
return NF_ACCEPT;
}
static int dccp_error(struct net *net, struct nf_conn *tmpl,
struct sk_buff *skb, unsigned int dataoff,
enum ip_conntrack_info *ctinfo,
u_int8_t pf, unsigned int hooknum)
{
struct dccp_hdr _dh, *dh;
unsigned int dccp_len = skb->len - dataoff;
unsigned int cscov;
const char *msg;
dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh);
if (dh == NULL) {
msg = "nf_ct_dccp: short packet ";
goto out_invalid;
}
if (dh->dccph_doff * 4 < sizeof(struct dccp_hdr) ||
dh->dccph_doff * 4 > dccp_len) {
msg = "nf_ct_dccp: truncated/malformed packet ";
goto out_invalid;
}
cscov = dccp_len;
if (dh->dccph_cscov) {
cscov = (dh->dccph_cscov - 1) * 4;
if (cscov > dccp_len) {
msg = "nf_ct_dccp: bad checksum coverage ";
goto out_invalid;
}
}
if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING &&
nf_checksum_partial(skb, hooknum, dataoff, cscov, IPPROTO_DCCP,
pf)) {
msg = "nf_ct_dccp: bad checksum ";
goto out_invalid;
}
if (dh->dccph_type >= DCCP_PKT_INVALID) {
msg = "nf_ct_dccp: reserved packet type ";
goto out_invalid;
}
return NF_ACCEPT;
out_invalid:
if (LOG_INVALID(net, IPPROTO_DCCP))
nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, "%s", msg);
return -NF_ACCEPT;
}
static int dccp_print_tuple(struct seq_file *s,
const struct nf_conntrack_tuple *tuple)
{
return seq_printf(s, "sport=%hu dport=%hu ",
ntohs(tuple->src.u.dccp.port),
ntohs(tuple->dst.u.dccp.port));
}
static int dccp_print_conntrack(struct seq_file *s, struct nf_conn *ct)
{
return seq_printf(s, "%s ", dccp_state_names[ct->proto.dccp.state]);
}
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
static int dccp_to_nlattr(struct sk_buff *skb, struct nlattr *nla,
struct nf_conn *ct)
{
struct nlattr *nest_parms;
spin_lock_bh(&ct->lock);
nest_parms = nla_nest_start(skb, CTA_PROTOINFO_DCCP | NLA_F_NESTED);
if (!nest_parms)
goto nla_put_failure;
if (nla_put_u8(skb, CTA_PROTOINFO_DCCP_STATE, ct->proto.dccp.state) ||
nla_put_u8(skb, CTA_PROTOINFO_DCCP_ROLE,
ct->proto.dccp.role[IP_CT_DIR_ORIGINAL]) ||
nla_put_be64(skb, CTA_PROTOINFO_DCCP_HANDSHAKE_SEQ,
cpu_to_be64(ct->proto.dccp.handshake_seq)))
goto nla_put_failure;
nla_nest_end(skb, nest_parms);
spin_unlock_bh(&ct->lock);
return 0;
nla_put_failure:
spin_unlock_bh(&ct->lock);
return -1;
}
static const struct nla_policy dccp_nla_policy[CTA_PROTOINFO_DCCP_MAX + 1] = {
[CTA_PROTOINFO_DCCP_STATE] = { .type = NLA_U8 },
[CTA_PROTOINFO_DCCP_ROLE] = { .type = NLA_U8 },
[CTA_PROTOINFO_DCCP_HANDSHAKE_SEQ] = { .type = NLA_U64 },
};
static int nlattr_to_dccp(struct nlattr *cda[], struct nf_conn *ct)
{
struct nlattr *attr = cda[CTA_PROTOINFO_DCCP];
struct nlattr *tb[CTA_PROTOINFO_DCCP_MAX + 1];
int err;
if (!attr)
return 0;
err = nla_parse_nested(tb, CTA_PROTOINFO_DCCP_MAX, attr,
dccp_nla_policy);
if (err < 0)
return err;
if (!tb[CTA_PROTOINFO_DCCP_STATE] ||
!tb[CTA_PROTOINFO_DCCP_ROLE] ||
nla_get_u8(tb[CTA_PROTOINFO_DCCP_ROLE]) > CT_DCCP_ROLE_MAX ||
nla_get_u8(tb[CTA_PROTOINFO_DCCP_STATE]) >= CT_DCCP_IGNORE) {
return -EINVAL;
}
spin_lock_bh(&ct->lock);
ct->proto.dccp.state = nla_get_u8(tb[CTA_PROTOINFO_DCCP_STATE]);
if (nla_get_u8(tb[CTA_PROTOINFO_DCCP_ROLE]) == CT_DCCP_ROLE_CLIENT) {
ct->proto.dccp.role[IP_CT_DIR_ORIGINAL] = CT_DCCP_ROLE_CLIENT;
ct->proto.dccp.role[IP_CT_DIR_REPLY] = CT_DCCP_ROLE_SERVER;
} else {
ct->proto.dccp.role[IP_CT_DIR_ORIGINAL] = CT_DCCP_ROLE_SERVER;
ct->proto.dccp.role[IP_CT_DIR_REPLY] = CT_DCCP_ROLE_CLIENT;
}
if (tb[CTA_PROTOINFO_DCCP_HANDSHAKE_SEQ]) {
ct->proto.dccp.handshake_seq =
be64_to_cpu(nla_get_be64(tb[CTA_PROTOINFO_DCCP_HANDSHAKE_SEQ]));
}
spin_unlock_bh(&ct->lock);
return 0;
}
static int dccp_nlattr_size(void)
{
return nla_total_size(0) /* CTA_PROTOINFO_DCCP */
+ nla_policy_len(dccp_nla_policy, CTA_PROTOINFO_DCCP_MAX + 1);
}
#endif
#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_cttimeout.h>
static int dccp_timeout_nlattr_to_obj(struct nlattr *tb[],
struct net *net, void *data)
{
struct dccp_net *dn = dccp_pernet(net);
unsigned int *timeouts = data;
int i;
/* set default DCCP timeouts. */
for (i=0; i<CT_DCCP_MAX; i++)
timeouts[i] = dn->dccp_timeout[i];
/* there's a 1:1 mapping between attributes and protocol states. */
for (i=CTA_TIMEOUT_DCCP_UNSPEC+1; i<CTA_TIMEOUT_DCCP_MAX+1; i++) {
if (tb[i]) {
timeouts[i] = ntohl(nla_get_be32(tb[i])) * HZ;
}
}
return 0;
}
static int
dccp_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
{
const unsigned int *timeouts = data;
int i;
for (i=CTA_TIMEOUT_DCCP_UNSPEC+1; i<CTA_TIMEOUT_DCCP_MAX+1; i++) {
if (nla_put_be32(skb, i, htonl(timeouts[i] / HZ)))
goto nla_put_failure;
}
return 0;
nla_put_failure:
return -ENOSPC;
}
static const struct nla_policy
dccp_timeout_nla_policy[CTA_TIMEOUT_DCCP_MAX+1] = {
[CTA_TIMEOUT_DCCP_REQUEST] = { .type = NLA_U32 },
[CTA_TIMEOUT_DCCP_RESPOND] = { .type = NLA_U32 },
[CTA_TIMEOUT_DCCP_PARTOPEN] = { .type = NLA_U32 },
[CTA_TIMEOUT_DCCP_OPEN] = { .type = NLA_U32 },
[CTA_TIMEOUT_DCCP_CLOSEREQ] = { .type = NLA_U32 },
[CTA_TIMEOUT_DCCP_CLOSING] = { .type = NLA_U32 },
[CTA_TIMEOUT_DCCP_TIMEWAIT] = { .type = NLA_U32 },
};
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
#ifdef CONFIG_SYSCTL
/* template, data assigned later */
static struct ctl_table dccp_sysctl_table[] = {
{
.procname = "nf_conntrack_dccp_timeout_request",
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "nf_conntrack_dccp_timeout_respond",
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "nf_conntrack_dccp_timeout_partopen",
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "nf_conntrack_dccp_timeout_open",
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "nf_conntrack_dccp_timeout_closereq",
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "nf_conntrack_dccp_timeout_closing",
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "nf_conntrack_dccp_timeout_timewait",
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "nf_conntrack_dccp_loose",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{ }
};
#endif /* CONFIG_SYSCTL */
static int dccp_kmemdup_sysctl_table(struct net *net, struct nf_proto_net *pn,
struct dccp_net *dn)
{
#ifdef CONFIG_SYSCTL
if (pn->ctl_table)
return 0;
pn->ctl_table = kmemdup(dccp_sysctl_table,
sizeof(dccp_sysctl_table),
GFP_KERNEL);
if (!pn->ctl_table)
return -ENOMEM;
pn->ctl_table[0].data = &dn->dccp_timeout[CT_DCCP_REQUEST];
pn->ctl_table[1].data = &dn->dccp_timeout[CT_DCCP_RESPOND];
pn->ctl_table[2].data = &dn->dccp_timeout[CT_DCCP_PARTOPEN];
pn->ctl_table[3].data = &dn->dccp_timeout[CT_DCCP_OPEN];
pn->ctl_table[4].data = &dn->dccp_timeout[CT_DCCP_CLOSEREQ];
pn->ctl_table[5].data = &dn->dccp_timeout[CT_DCCP_CLOSING];
pn->ctl_table[6].data = &dn->dccp_timeout[CT_DCCP_TIMEWAIT];
pn->ctl_table[7].data = &dn->dccp_loose;
/* Don't export sysctls to unprivileged users */
if (net->user_ns != &init_user_ns)
pn->ctl_table[0].procname = NULL;
#endif
return 0;
}
static int dccp_init_net(struct net *net, u_int16_t proto)
{
struct dccp_net *dn = dccp_pernet(net);
struct nf_proto_net *pn = &dn->pn;
if (!pn->users) {
/* default values */
dn->dccp_loose = 1;
dn->dccp_timeout[CT_DCCP_REQUEST] = 2 * DCCP_MSL;
dn->dccp_timeout[CT_DCCP_RESPOND] = 4 * DCCP_MSL;
dn->dccp_timeout[CT_DCCP_PARTOPEN] = 4 * DCCP_MSL;
dn->dccp_timeout[CT_DCCP_OPEN] = 12 * 3600 * HZ;
dn->dccp_timeout[CT_DCCP_CLOSEREQ] = 64 * HZ;
dn->dccp_timeout[CT_DCCP_CLOSING] = 64 * HZ;
dn->dccp_timeout[CT_DCCP_TIMEWAIT] = 2 * DCCP_MSL;
}
return dccp_kmemdup_sysctl_table(net, pn, dn);
}
static struct nf_conntrack_l4proto dccp_proto4 __read_mostly = {
.l3proto = AF_INET,
.l4proto = IPPROTO_DCCP,
.name = "dccp",
.pkt_to_tuple = dccp_pkt_to_tuple,
.invert_tuple = dccp_invert_tuple,
.new = dccp_new,
.packet = dccp_packet,
.get_timeouts = dccp_get_timeouts,
.error = dccp_error,
.print_tuple = dccp_print_tuple,
.print_conntrack = dccp_print_conntrack,
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
.to_nlattr = dccp_to_nlattr,
.nlattr_size = dccp_nlattr_size,
.from_nlattr = nlattr_to_dccp,
.tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
.nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
.nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
.nla_policy = nf_ct_port_nla_policy,
#endif
#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
.ctnl_timeout = {
.nlattr_to_obj = dccp_timeout_nlattr_to_obj,
.obj_to_nlattr = dccp_timeout_obj_to_nlattr,
.nlattr_max = CTA_TIMEOUT_DCCP_MAX,
.obj_size = sizeof(unsigned int) * CT_DCCP_MAX,
.nla_policy = dccp_timeout_nla_policy,
},
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
.net_id = &dccp_net_id,
.init_net = dccp_init_net,
};
static struct nf_conntrack_l4proto dccp_proto6 __read_mostly = {
.l3proto = AF_INET6,
.l4proto = IPPROTO_DCCP,
.name = "dccp",
.pkt_to_tuple = dccp_pkt_to_tuple,
.invert_tuple = dccp_invert_tuple,
.new = dccp_new,
.packet = dccp_packet,
.get_timeouts = dccp_get_timeouts,
.error = dccp_error,
.print_tuple = dccp_print_tuple,
.print_conntrack = dccp_print_conntrack,
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
.to_nlattr = dccp_to_nlattr,
.nlattr_size = dccp_nlattr_size,
.from_nlattr = nlattr_to_dccp,
.tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
.nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
.nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
.nla_policy = nf_ct_port_nla_policy,
#endif
#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
.ctnl_timeout = {
.nlattr_to_obj = dccp_timeout_nlattr_to_obj,
.obj_to_nlattr = dccp_timeout_obj_to_nlattr,
.nlattr_max = CTA_TIMEOUT_DCCP_MAX,
.obj_size = sizeof(unsigned int) * CT_DCCP_MAX,
.nla_policy = dccp_timeout_nla_policy,
},
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
.net_id = &dccp_net_id,
.init_net = dccp_init_net,
};
static __net_init int dccp_net_init(struct net *net)
{
int ret = 0;
ret = nf_ct_l4proto_pernet_register(net, &dccp_proto4);
if (ret < 0) {
pr_err("nf_conntrack_dccp4: pernet registration failed.\n");
goto out;
}
ret = nf_ct_l4proto_pernet_register(net, &dccp_proto6);
if (ret < 0) {
pr_err("nf_conntrack_dccp6: pernet registration failed.\n");
goto cleanup_dccp4;
}
return 0;
cleanup_dccp4:
nf_ct_l4proto_pernet_unregister(net, &dccp_proto4);
out:
return ret;
}
static __net_exit void dccp_net_exit(struct net *net)
{
nf_ct_l4proto_pernet_unregister(net, &dccp_proto6);
nf_ct_l4proto_pernet_unregister(net, &dccp_proto4);
}
static struct pernet_operations dccp_net_ops = {
.init = dccp_net_init,
.exit = dccp_net_exit,
.id = &dccp_net_id,
.size = sizeof(struct dccp_net),
};
static int __init nf_conntrack_proto_dccp_init(void)
{
int ret;
ret = register_pernet_subsys(&dccp_net_ops);
if (ret < 0)
goto out_pernet;
ret = nf_ct_l4proto_register(&dccp_proto4);
if (ret < 0)
goto out_dccp4;
ret = nf_ct_l4proto_register(&dccp_proto6);
if (ret < 0)
goto out_dccp6;
return 0;
out_dccp6:
nf_ct_l4proto_unregister(&dccp_proto4);
out_dccp4:
unregister_pernet_subsys(&dccp_net_ops);
out_pernet:
return ret;
}
static void __exit nf_conntrack_proto_dccp_fini(void)
{
nf_ct_l4proto_unregister(&dccp_proto6);
nf_ct_l4proto_unregister(&dccp_proto4);
unregister_pernet_subsys(&dccp_net_ops);
}
module_init(nf_conntrack_proto_dccp_init);
module_exit(nf_conntrack_proto_dccp_fini);
MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
MODULE_DESCRIPTION("DCCP connection tracking protocol helper");
MODULE_LICENSE("GPL");
| ./CrossVul/dataset_final_sorted/CWE-20/c/good_2109_0 |
crossvul-cpp_data_good_721_1 | /* Copyright (C) 2012 Open Information Security Foundation
*
* You can copy, redistribute or modify this Program under the terms of
* the GNU General Public License version 2 as published by the Free
* Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
/**
* \ingroup decode
*
* @{
*/
/**
* \file
*
* \author Eric Leblond <eric@regit.org>
*
* Decode Teredo Tunneling protocol.
*
* This implementation is based upon RFC 4380: http://www.ietf.org/rfc/rfc4380.txt
*/
#include "suricata-common.h"
#include "decode.h"
#include "decode-ipv6.h"
#include "decode-teredo.h"
#include "util-debug.h"
#include "conf.h"
#define TEREDO_ORIG_INDICATION_LENGTH 8
static bool g_teredo_enabled = true;
void DecodeTeredoConfig(void)
{
int enabled = 0;
if (ConfGetBool("decoder.teredo.enabled", &enabled) == 1) {
if (enabled) {
g_teredo_enabled = true;
} else {
g_teredo_enabled = false;
}
}
}
/**
* \brief Function to decode Teredo packets
*
* \retval TM_ECODE_FAILED if packet is not a Teredo packet, TM_ECODE_OK if it is
*/
int DecodeTeredo(ThreadVars *tv, DecodeThreadVars *dtv, Packet *p, uint8_t *pkt, uint16_t len, PacketQueue *pq)
{
if (!g_teredo_enabled)
return TM_ECODE_FAILED;
uint8_t *start = pkt;
/* Is this packet to short to contain an IPv6 packet ? */
if (len < IPV6_HEADER_LEN)
return TM_ECODE_FAILED;
/* Teredo encapsulate IPv6 in UDP and can add some custom message
* part before the IPv6 packet. In our case, we just want to get
* over an ORIGIN indication. So we just make one offset if needed. */
if (start[0] == 0x0) {
switch (start[1]) {
/* origin indication: compatible with tunnel */
case 0x0:
/* offset is coherent with len and presence of an IPv6 header */
if (len >= TEREDO_ORIG_INDICATION_LENGTH + IPV6_HEADER_LEN)
start += TEREDO_ORIG_INDICATION_LENGTH;
else
return TM_ECODE_FAILED;
break;
/* authentication: negotiation not real tunnel */
case 0x1:
return TM_ECODE_FAILED;
/* this case is not possible in Teredo: not that protocol */
default:
return TM_ECODE_FAILED;
}
}
/* There is no specific field that we can check to prove that the packet
* is a Teredo packet. We've zapped here all the possible Teredo header
* and we should have an IPv6 packet at the start pointer.
* We then can only do a few checks before sending the encapsulated packets
* to decoding:
* - The packet has a protocol version which is IPv6.
* - The IPv6 length of the packet matches what remains in buffer.
* - HLIM is 0. This would technically be valid, but still weird.
* - NH 0 (HOP) and not enough data.
*
* If all these conditions are met, the tunnel decoder will be called.
* If the packet gets an invalid event set, it will still be rejected.
*/
if (IP_GET_RAW_VER(start) == 6) {
IPV6Hdr *thdr = (IPV6Hdr *)start;
/* ignore hoplimit 0 packets, most likely an artifact of bad detection */
if (IPV6_GET_RAW_HLIM(thdr) == 0)
return TM_ECODE_FAILED;
/* if nh is 0 (HOP) with little data we have a bogus packet */
if (IPV6_GET_RAW_NH(thdr) == 0 && IPV6_GET_RAW_PLEN(thdr) < 8)
return TM_ECODE_FAILED;
if (len == IPV6_HEADER_LEN +
IPV6_GET_RAW_PLEN(thdr) + (start - pkt)) {
if (pq != NULL) {
int blen = len - (start - pkt);
/* spawn off tunnel packet */
Packet *tp = PacketTunnelPktSetup(tv, dtv, p, start, blen,
DECODE_TUNNEL_IPV6_TEREDO, pq);
if (tp != NULL) {
PKT_SET_SRC(tp, PKT_SRC_DECODER_TEREDO);
/* add the tp to the packet queue. */
PacketEnqueue(pq,tp);
StatsIncr(tv, dtv->counter_teredo);
return TM_ECODE_OK;
}
}
}
return TM_ECODE_FAILED;
}
return TM_ECODE_FAILED;
}
/**
* @}
*/
| ./CrossVul/dataset_final_sorted/CWE-20/c/good_721_1 |
crossvul-cpp_data_good_1022_0 | /*
* Copyright (c) 2002 - 2003
* NetGroup, Politecnico di Torino (Italy)
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the Politecnico di Torino nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#include "ftmacros.h"
#include "varattrs.h"
#include <errno.h> // for the errno variable
#include <stdlib.h> // for malloc(), free(), ...
#include <string.h> // for strlen(), ...
#ifdef _WIN32
#include <process.h> // for threads
#else
#include <unistd.h>
#include <pthread.h>
#include <signal.h>
#include <sys/time.h>
#include <sys/types.h> // for select() and such
#include <pwd.h> // for password management
#endif
#ifdef HAVE_GETSPNAM
#include <shadow.h> // for password management
#endif
#include <pcap.h> // for libpcap/WinPcap calls
#include "fmtutils.h"
#include "sockutils.h" // for socket calls
#include "portability.h"
#include "rpcap-protocol.h"
#include "daemon.h"
#include "log.h"
//
// Timeout, in seconds, when we're waiting for a client to send us an
// authentication request; if they don't send us a request within that
// interval, we drop the connection, so we don't stay stuck forever.
//
#define RPCAP_TIMEOUT_INIT 90
//
// Timeout, in seconds, when we're waiting for an authenticated client
// to send us a request, if a capture isn't in progress; if they don't
// send us a request within that interval, we drop the connection, so
// we don't stay stuck forever.
//
#define RPCAP_TIMEOUT_RUNTIME 180
//
// Time, in seconds, that we wait after a failed authentication attempt
// before processing the next request; this prevents a client from
// rapidly trying different accounts or passwords.
//
#define RPCAP_SUSPEND_WRONGAUTH 1
// Parameters for the service loop.
struct daemon_slpars
{
SOCKET sockctrl; //!< SOCKET ID of the control connection
int isactive; //!< Not null if the daemon has to run in active mode
int nullAuthAllowed; //!< '1' if we permit NULL authentication, '0' otherwise
};
//
// Data for a session managed by a thread.
// It includes both a Boolean indicating whether we *have* a thread,
// and a platform-dependent (UN*X vs. Windows) identifier for the
// thread; on Windows, we could use an invalid handle to indicate
// that we don't have a thread, but there *is* no portable "no thread"
// value for a pthread_t on UN*X.
//
struct session {
SOCKET sockctrl;
SOCKET sockdata;
uint8 protocol_version;
pcap_t *fp;
unsigned int TotCapt;
int have_thread;
#ifdef _WIN32
HANDLE thread;
#else
pthread_t thread;
#endif
};
// Locally defined functions
static int daemon_msg_err(SOCKET sockctrl, uint32 plen);
static int daemon_msg_auth_req(struct daemon_slpars *pars, uint32 plen);
static int daemon_AuthUserPwd(char *username, char *password, char *errbuf);
static int daemon_msg_findallif_req(uint8 ver, struct daemon_slpars *pars,
uint32 plen);
static int daemon_msg_open_req(uint8 ver, struct daemon_slpars *pars,
uint32 plen, char *source, size_t sourcelen);
static int daemon_msg_startcap_req(uint8 ver, struct daemon_slpars *pars,
uint32 plen, char *source, struct session **sessionp,
struct rpcap_sampling *samp_param);
static int daemon_msg_endcap_req(uint8 ver, struct daemon_slpars *pars,
struct session *session);
static int daemon_msg_updatefilter_req(uint8 ver, struct daemon_slpars *pars,
struct session *session, uint32 plen);
static int daemon_unpackapplyfilter(SOCKET sockctrl, struct session *session, uint32 *plenp, char *errbuf);
static int daemon_msg_stats_req(uint8 ver, struct daemon_slpars *pars,
struct session *session, uint32 plen, struct pcap_stat *stats,
unsigned int svrcapt);
static int daemon_msg_setsampling_req(uint8 ver, struct daemon_slpars *pars,
uint32 plen, struct rpcap_sampling *samp_param);
static void daemon_seraddr(struct sockaddr_storage *sockaddrin, struct rpcap_sockaddr *sockaddrout);
#ifdef _WIN32
static unsigned __stdcall daemon_thrdatamain(void *ptr);
#else
static void *daemon_thrdatamain(void *ptr);
static void noop_handler(int sign);
#endif
static int rpcapd_recv_msg_header(SOCKET sock, struct rpcap_header *headerp);
static int rpcapd_recv(SOCKET sock, char *buffer, size_t toread, uint32 *plen, char *errmsgbuf);
static int rpcapd_discard(SOCKET sock, uint32 len);
static void session_close(struct session *);
static int is_url(const char *source);
int
daemon_serviceloop(SOCKET sockctrl, int isactive, char *passiveClients,
int nullAuthAllowed)
{
struct daemon_slpars pars; // service loop parameters
char errbuf[PCAP_ERRBUF_SIZE + 1]; // keeps the error string, prior to be printed
char errmsgbuf[PCAP_ERRBUF_SIZE + 1]; // buffer for errors to send to the client
int host_port_check_status;
int nrecv;
struct rpcap_header header; // RPCAP message general header
uint32 plen; // payload length from header
int authenticated = 0; // 1 if the client has successfully authenticated
char source[PCAP_BUF_SIZE+1]; // keeps the string that contains the interface to open
int got_source = 0; // 1 if we've gotten the source from an open request
#ifndef _WIN32
struct sigaction action;
#endif
struct session *session = NULL; // struct session main variable
const char *msg_type_string; // string for message type
int client_told_us_to_close = 0; // 1 if the client told us to close the capture
// needed to save the values of the statistics
struct pcap_stat stats;
unsigned int svrcapt;
struct rpcap_sampling samp_param; // in case sampling has been requested
// Structures needed for the select() call
fd_set rfds; // set of socket descriptors we have to check
struct timeval tv; // maximum time the select() can block waiting for data
int retval; // select() return value
*errbuf = 0; // Initialize errbuf
// Set parameters structure
pars.sockctrl = sockctrl;
pars.isactive = isactive; // active mode
pars.nullAuthAllowed = nullAuthAllowed;
//
// We have a connection.
//
// If it's a passive mode connection, check whether the connecting
// host is among the ones allowed.
//
// In either case, we were handed a copy of the host list; free it
// as soon as we're done with it.
//
if (pars.isactive)
{
// Nothing to do.
free(passiveClients);
passiveClients = NULL;
}
else
{
struct sockaddr_storage from;
socklen_t fromlen;
//
// Get the address of the other end of the connection.
//
fromlen = sizeof(struct sockaddr_storage);
if (getpeername(pars.sockctrl, (struct sockaddr *)&from,
&fromlen) == -1)
{
sock_geterror("getpeername()", errmsgbuf, PCAP_ERRBUF_SIZE);
if (rpcap_senderror(pars.sockctrl, 0, PCAP_ERR_NETW, errmsgbuf, errbuf) == -1)
rpcapd_log(LOGPRIO_ERROR, "Send to client failed: %s", errbuf);
goto end;
}
//
// Are they in the list of host/port combinations we allow?
//
host_port_check_status = sock_check_hostlist(passiveClients, RPCAP_HOSTLIST_SEP, &from, errmsgbuf, PCAP_ERRBUF_SIZE);
free(passiveClients);
passiveClients = NULL;
if (host_port_check_status < 0)
{
if (host_port_check_status == -2) {
//
// We got an error; log it.
//
rpcapd_log(LOGPRIO_ERROR, "%s", errmsgbuf);
}
//
// Sorry, we can't let you in.
//
if (rpcap_senderror(pars.sockctrl, 0, PCAP_ERR_HOSTNOAUTH, errmsgbuf, errbuf) == -1)
rpcapd_log(LOGPRIO_ERROR, "Send to client failed: %s", errbuf);
goto end;
}
}
#ifndef _WIN32
//
// Catch SIGUSR1, but do nothing. We use it to interrupt the
// capture thread to break it out of a loop in which it's
// blocked waiting for packets to arrive.
//
// We don't want interrupted system calls to restart, so that
// the read routine for the pcap_t gets EINTR rather than
// restarting if it's blocked in a system call.
//
memset(&action, 0, sizeof (action));
action.sa_handler = noop_handler;
action.sa_flags = 0;
sigemptyset(&action.sa_mask);
sigaction(SIGUSR1, &action, NULL);
#endif
//
// The client must first authenticate; loop until they send us a
// message with a version we support and credentials we accept,
// they send us a close message indicating that they're giving up,
// or we get a network error or other fatal error.
//
while (!authenticated)
{
//
// If we're not in active mode, we use select(), with a
// timeout, to wait for an authentication request; if
// the timeout expires, we drop the connection, so that
// a client can't just connect to us and leave us
// waiting forever.
//
if (!pars.isactive)
{
FD_ZERO(&rfds);
// We do not have to block here
tv.tv_sec = RPCAP_TIMEOUT_INIT;
tv.tv_usec = 0;
FD_SET(pars.sockctrl, &rfds);
retval = select(pars.sockctrl + 1, &rfds, NULL, NULL, &tv);
if (retval == -1)
{
sock_geterror("select() failed", errmsgbuf, PCAP_ERRBUF_SIZE);
if (rpcap_senderror(pars.sockctrl, 0, PCAP_ERR_NETW, errmsgbuf, errbuf) == -1)
rpcapd_log(LOGPRIO_ERROR, "Send to client failed: %s", errbuf);
goto end;
}
// The timeout has expired
// So, this was a fake connection. Drop it down
if (retval == 0)
{
if (rpcap_senderror(pars.sockctrl, 0, PCAP_ERR_INITTIMEOUT, "The RPCAP initial timeout has expired", errbuf) == -1)
rpcapd_log(LOGPRIO_ERROR, "Send to client failed: %s", errbuf);
goto end;
}
}
//
// Read the message header from the client.
//
nrecv = rpcapd_recv_msg_header(pars.sockctrl, &header);
if (nrecv == -1)
{
// Fatal error.
goto end;
}
if (nrecv == -2)
{
// Client closed the connection.
goto end;
}
plen = header.plen;
//
// While we're in the authentication pharse, all requests
// must use version 0.
//
if (header.ver != 0)
{
//
// Send it back to them with their version.
//
if (rpcap_senderror(pars.sockctrl, header.ver,
PCAP_ERR_WRONGVER,
"RPCAP version in requests in the authentication phase must be 0",
errbuf) == -1)
{
// That failed; log a message and give up.
rpcapd_log(LOGPRIO_ERROR, "Send to client failed: %s", errbuf);
goto end;
}
// Discard the rest of the message and drop the
// connection.
(void)rpcapd_discard(pars.sockctrl, plen);
goto end;
}
switch (header.type)
{
case RPCAP_MSG_AUTH_REQ:
retval = daemon_msg_auth_req(&pars, plen);
if (retval == -1)
{
// Fatal error; a message has
// been logged, so just give up.
goto end;
}
if (retval == -2)
{
// Non-fatal error; we sent back
// an error message, so let them
// try again.
continue;
}
// OK, we're authenticated; we sent back
// a reply, so start serving requests.
authenticated = 1;
break;
case RPCAP_MSG_CLOSE:
//
// The client is giving up.
// Discard the rest of the message, if
// there is anything more.
//
(void)rpcapd_discard(pars.sockctrl, plen);
// We're done with this client.
goto end;
case RPCAP_MSG_ERROR:
// Log this and close the connection?
// XXX - is this what happens in active
// mode, where *we* initiate the
// connection, and the client gives us
// an error message rather than a "let
// me log in" message, indicating that
// we're not allowed to connect to them?
(void)daemon_msg_err(pars.sockctrl, plen);
goto end;
case RPCAP_MSG_FINDALLIF_REQ:
case RPCAP_MSG_OPEN_REQ:
case RPCAP_MSG_STARTCAP_REQ:
case RPCAP_MSG_UPDATEFILTER_REQ:
case RPCAP_MSG_STATS_REQ:
case RPCAP_MSG_ENDCAP_REQ:
case RPCAP_MSG_SETSAMPLING_REQ:
//
// These requests can't be sent until
// the client is authenticated.
//
msg_type_string = rpcap_msg_type_string(header.type);
if (msg_type_string != NULL)
{
pcap_snprintf(errmsgbuf, PCAP_ERRBUF_SIZE, "%s request sent before authentication was completed", msg_type_string);
}
else
{
pcap_snprintf(errmsgbuf, PCAP_ERRBUF_SIZE, "Message of type %u sent before authentication was completed", header.type);
}
if (rpcap_senderror(pars.sockctrl, header.ver,
PCAP_ERR_WRONGMSG, errmsgbuf, errbuf) == -1)
{
rpcapd_log(LOGPRIO_ERROR, "Send to client failed: %s", errbuf);
goto end;
}
// Discard the rest of the message.
if (rpcapd_discard(pars.sockctrl, plen) == -1)
{
// Network error.
goto end;
}
break;
case RPCAP_MSG_PACKET:
case RPCAP_MSG_FINDALLIF_REPLY:
case RPCAP_MSG_OPEN_REPLY:
case RPCAP_MSG_STARTCAP_REPLY:
case RPCAP_MSG_UPDATEFILTER_REPLY:
case RPCAP_MSG_AUTH_REPLY:
case RPCAP_MSG_STATS_REPLY:
case RPCAP_MSG_ENDCAP_REPLY:
case RPCAP_MSG_SETSAMPLING_REPLY:
//
// These are server-to-client messages.
//
msg_type_string = rpcap_msg_type_string(header.type);
if (msg_type_string != NULL)
{
pcap_snprintf(errmsgbuf, PCAP_ERRBUF_SIZE, "Server-to-client message %s received from client", msg_type_string);
}
else
{
pcap_snprintf(errmsgbuf, PCAP_ERRBUF_SIZE, "Server-to-client message of type %u received from client", header.type);
}
if (rpcap_senderror(pars.sockctrl, header.ver,
PCAP_ERR_WRONGMSG, errmsgbuf, errbuf) == -1)
{
rpcapd_log(LOGPRIO_ERROR, "Send to client failed: %s", errbuf);
goto end;
}
// Discard the rest of the message.
if (rpcapd_discard(pars.sockctrl, plen) == -1)
{
// Fatal error.
goto end;
}
break;
default:
//
// Unknown message type.
//
pcap_snprintf(errmsgbuf, PCAP_ERRBUF_SIZE, "Unknown message type %u", header.type);
if (rpcap_senderror(pars.sockctrl, header.ver,
PCAP_ERR_WRONGMSG, errmsgbuf, errbuf) == -1)
{
rpcapd_log(LOGPRIO_ERROR, "Send to client failed: %s", errbuf);
goto end;
}
// Discard the rest of the message.
if (rpcapd_discard(pars.sockctrl, plen) == -1)
{
// Fatal error.
goto end;
}
break;
}
}
//
// OK, the client has authenticated itself, and we can start
// processing regular requests from it.
//
//
// We don't have any statistics yet.
//
stats.ps_ifdrop = 0;
stats.ps_recv = 0;
stats.ps_drop = 0;
svrcapt = 0;
//
// Service requests.
//
for (;;)
{
errbuf[0] = 0; // clear errbuf
// Avoid zombies connections; check if the connection is opens but no commands are performed
// from more than RPCAP_TIMEOUT_RUNTIME
// Conditions:
// - I have to be in normal mode (no active mode)
// - if the device is open, I don't have to be in the middle of a capture (session->sockdata)
// - if the device is closed, I have always to check if a new command arrives
//
// Be carefully: the capture can have been started, but an error occurred (so session != NULL, but
// sockdata is 0
if ((!pars.isactive) && ((session == NULL) || ((session != NULL) && (session->sockdata == 0))))
{
// Check for the initial timeout
FD_ZERO(&rfds);
// We do not have to block here
tv.tv_sec = RPCAP_TIMEOUT_RUNTIME;
tv.tv_usec = 0;
FD_SET(pars.sockctrl, &rfds);
retval = select(pars.sockctrl + 1, &rfds, NULL, NULL, &tv);
if (retval == -1)
{
sock_geterror("select() failed", errmsgbuf, PCAP_ERRBUF_SIZE);
if (rpcap_senderror(pars.sockctrl, 0,
PCAP_ERR_NETW, errmsgbuf, errbuf) == -1)
rpcapd_log(LOGPRIO_ERROR, "Send to client failed: %s", errbuf);
goto end;
}
// The timeout has expired
// So, this was a fake connection. Drop it down
if (retval == 0)
{
if (rpcap_senderror(pars.sockctrl, 0,
PCAP_ERR_INITTIMEOUT,
"The RPCAP initial timeout has expired",
errbuf) == -1)
rpcapd_log(LOGPRIO_ERROR, "Send to client failed: %s", errbuf);
goto end;
}
}
//
// Read the message header from the client.
//
nrecv = rpcapd_recv_msg_header(pars.sockctrl, &header);
if (nrecv == -1)
{
// Fatal error.
goto end;
}
if (nrecv == -2)
{
// Client closed the connection.
goto end;
}
plen = header.plen;
//
// Did the client specify a protocol version that we
// support?
//
if (!RPCAP_VERSION_IS_SUPPORTED(header.ver))
{
//
// Tell them it's not a supported version.
// Send the error message with their version,
// so they don't reject it as having the wrong
// version.
//
if (rpcap_senderror(pars.sockctrl,
header.ver, PCAP_ERR_WRONGVER,
"RPCAP version in message isn't supported by the server",
errbuf) == -1)
{
// That failed; log a message and give up.
rpcapd_log(LOGPRIO_ERROR, "Send to client failed: %s", errbuf);
goto end;
}
// Discard the rest of the message.
(void)rpcapd_discard(pars.sockctrl, plen);
// Give up on them.
goto end;
}
switch (header.type)
{
case RPCAP_MSG_ERROR: // The other endpoint reported an error
{
(void)daemon_msg_err(pars.sockctrl, plen);
// Do nothing; just exit; the error code is already into the errbuf
// XXX - actually exit....
break;
}
case RPCAP_MSG_FINDALLIF_REQ:
{
if (daemon_msg_findallif_req(header.ver, &pars, plen) == -1)
{
// Fatal error; a message has
// been logged, so just give up.
goto end;
}
break;
}
case RPCAP_MSG_OPEN_REQ:
{
//
// Process the open request, and keep
// the source from it, for use later
// when the capture is started.
//
// XXX - we don't care if the client sends
// us multiple open requests, the last
// one wins.
//
retval = daemon_msg_open_req(header.ver, &pars,
plen, source, sizeof(source));
if (retval == -1)
{
// Fatal error; a message has
// been logged, so just give up.
goto end;
}
got_source = 1;
break;
}
case RPCAP_MSG_STARTCAP_REQ:
{
if (!got_source)
{
// They never told us what device
// to capture on!
if (rpcap_senderror(pars.sockctrl,
header.ver,
PCAP_ERR_STARTCAPTURE,
"No capture device was specified",
errbuf) == -1)
{
// Fatal error; log an
// error and give up.
rpcapd_log(LOGPRIO_ERROR, "Send to client failed: %s", errbuf);
goto end;
}
if (rpcapd_discard(pars.sockctrl, plen) == -1)
{
goto end;
}
break;
}
if (daemon_msg_startcap_req(header.ver, &pars,
plen, source, &session, &samp_param) == -1)
{
// Fatal error; a message has
// been logged, so just give up.
goto end;
}
break;
}
case RPCAP_MSG_UPDATEFILTER_REQ:
{
if (session)
{
if (daemon_msg_updatefilter_req(header.ver,
&pars, session, plen) == -1)
{
// Fatal error; a message has
// been logged, so just give up.
goto end;
}
}
else
{
if (rpcap_senderror(pars.sockctrl,
header.ver, PCAP_ERR_UPDATEFILTER,
"Device not opened. Cannot update filter",
errbuf) == -1)
{
// That failed; log a message and give up.
rpcapd_log(LOGPRIO_ERROR, "Send to client failed: %s", errbuf);
goto end;
}
}
break;
}
case RPCAP_MSG_CLOSE: // The other endpoint close the pcap session
{
//
// Indicate to our caller that the client
// closed the control connection.
// This is used only in case of active mode.
//
client_told_us_to_close = 1;
rpcapd_log(LOGPRIO_DEBUG, "The other end system asked to close the connection.");
goto end;
}
case RPCAP_MSG_STATS_REQ:
{
if (daemon_msg_stats_req(header.ver, &pars,
session, plen, &stats, svrcapt) == -1)
{
// Fatal error; a message has
// been logged, so just give up.
goto end;
}
break;
}
case RPCAP_MSG_ENDCAP_REQ: // The other endpoint close the current capture session
{
if (session)
{
// Save statistics (we can need them in the future)
if (pcap_stats(session->fp, &stats))
{
svrcapt = session->TotCapt;
}
else
{
stats.ps_ifdrop = 0;
stats.ps_recv = 0;
stats.ps_drop = 0;
svrcapt = 0;
}
if (daemon_msg_endcap_req(header.ver,
&pars, session) == -1)
{
free(session);
session = NULL;
// Fatal error; a message has
// been logged, so just give up.
goto end;
}
free(session);
session = NULL;
}
else
{
rpcap_senderror(pars.sockctrl,
header.ver, PCAP_ERR_ENDCAPTURE,
"Device not opened. Cannot close the capture",
errbuf);
}
break;
}
case RPCAP_MSG_SETSAMPLING_REQ:
{
if (daemon_msg_setsampling_req(header.ver,
&pars, plen, &samp_param) == -1)
{
// Fatal error; a message has
// been logged, so just give up.
goto end;
}
break;
}
case RPCAP_MSG_AUTH_REQ:
{
//
// We're already authenticated; you don't
// get to reauthenticate.
//
rpcapd_log(LOGPRIO_INFO, "The client sent an RPCAP_MSG_AUTH_REQ message after authentication was completed");
if (rpcap_senderror(pars.sockctrl, header.ver,
PCAP_ERR_WRONGMSG,
"RPCAP_MSG_AUTH_REQ request sent after authentication was completed",
errbuf) == -1)
{
rpcapd_log(LOGPRIO_ERROR, "Send to client failed: %s", errbuf);
goto end;
}
// Discard the rest of the message.
if (rpcapd_discard(pars.sockctrl, plen) == -1)
{
// Fatal error.
goto end;
}
goto end;
case RPCAP_MSG_PACKET:
case RPCAP_MSG_FINDALLIF_REPLY:
case RPCAP_MSG_OPEN_REPLY:
case RPCAP_MSG_STARTCAP_REPLY:
case RPCAP_MSG_UPDATEFILTER_REPLY:
case RPCAP_MSG_AUTH_REPLY:
case RPCAP_MSG_STATS_REPLY:
case RPCAP_MSG_ENDCAP_REPLY:
case RPCAP_MSG_SETSAMPLING_REPLY:
//
// These are server-to-client messages.
//
msg_type_string = rpcap_msg_type_string(header.type);
if (msg_type_string != NULL)
{
rpcapd_log(LOGPRIO_INFO, "The client sent a %s server-to-client message", msg_type_string);
pcap_snprintf(errmsgbuf, PCAP_ERRBUF_SIZE, "Server-to-client message %s received from client", msg_type_string);
}
else
{
rpcapd_log(LOGPRIO_INFO, "The client sent a server-to-client message of type %u", header.type);
pcap_snprintf(errmsgbuf, PCAP_ERRBUF_SIZE, "Server-to-client message of type %u received from client", header.type);
}
if (rpcap_senderror(pars.sockctrl, header.ver,
PCAP_ERR_WRONGMSG, errmsgbuf, errbuf) == -1)
{
rpcapd_log(LOGPRIO_ERROR, "Send to client failed: %s", errbuf);
goto end;
}
// Discard the rest of the message.
if (rpcapd_discard(pars.sockctrl, plen) == -1)
{
// Fatal error.
goto end;
}
goto end;
default:
//
// Unknown message type.
//
rpcapd_log(LOGPRIO_INFO, "The client sent a message of type %u", header.type);
pcap_snprintf(errmsgbuf, PCAP_ERRBUF_SIZE, "Unknown message type %u", header.type);
if (rpcap_senderror(pars.sockctrl, header.ver,
PCAP_ERR_WRONGMSG, errbuf, errmsgbuf) == -1)
{
rpcapd_log(LOGPRIO_ERROR, "Send to client failed: %s", errbuf);
goto end;
}
// Discard the rest of the message.
if (rpcapd_discard(pars.sockctrl, plen) == -1)
{
// Fatal error.
goto end;
}
goto end;
}
}
}
end:
// The service loop is finishing up.
// If we have a capture session running, close it.
if (session)
{
session_close(session);
free(session);
session = NULL;
}
sock_close(sockctrl, NULL, 0);
// Print message and return
rpcapd_log(LOGPRIO_DEBUG, "I'm exiting from the child loop");
return client_told_us_to_close;
}
/*
* This handles the RPCAP_MSG_ERR message.
*/
static int
daemon_msg_err(SOCKET sockctrl, uint32 plen)
{
char errbuf[PCAP_ERRBUF_SIZE];
char remote_errbuf[PCAP_ERRBUF_SIZE];
if (plen >= PCAP_ERRBUF_SIZE)
{
/*
* Message is too long; just read as much of it as we
* can into the buffer provided, and discard the rest.
*/
if (sock_recv(sockctrl, remote_errbuf, PCAP_ERRBUF_SIZE - 1,
SOCK_RECEIVEALL_YES|SOCK_EOF_IS_ERROR, errbuf,
PCAP_ERRBUF_SIZE) == -1)
{
// Network error.
rpcapd_log(LOGPRIO_ERROR, "Read from client failed: %s", errbuf);
return -1;
}
if (rpcapd_discard(sockctrl, plen - (PCAP_ERRBUF_SIZE - 1)) == -1)
{
// Network error.
return -1;
}
/*
* Null-terminate it.
*/
remote_errbuf[PCAP_ERRBUF_SIZE - 1] = '\0';
}
else if (plen == 0)
{
/* Empty error string. */
remote_errbuf[0] = '\0';
}
else
{
if (sock_recv(sockctrl, remote_errbuf, plen,
SOCK_RECEIVEALL_YES|SOCK_EOF_IS_ERROR, errbuf,
PCAP_ERRBUF_SIZE) == -1)
{
// Network error.
rpcapd_log(LOGPRIO_ERROR, "Read from client failed: %s", errbuf);
return -1;
}
/*
* Null-terminate it.
*/
remote_errbuf[plen] = '\0';
}
// Log the message
rpcapd_log(LOGPRIO_ERROR, "Error from client: %s", remote_errbuf);
return 0;
}
/*
* This handles the RPCAP_MSG_AUTH_REQ message.
* It checks if the authentication credentials supplied by the user are valid.
*
* This function is called if the daemon receives a RPCAP_MSG_AUTH_REQ
* message in its authentication loop. It reads the body of the
* authentication message from the network and checks whether the
* credentials are valid.
*
* \param sockctrl: the socket for the control connection.
*
* \param nullAuthAllowed: '1' if the NULL authentication is allowed.
*
* \param errbuf: a user-allocated buffer in which the error message
* (if one) has to be written. It must be at least PCAP_ERRBUF_SIZE
* bytes long.
*
* \return '0' if everything is fine, '-1' if an unrecoverable error occurred,
* or '-2' if the authentication failed. For errors, an error message is
* returned in the 'errbuf' variable; this gives a message for the
* unrecoverable error or for the authentication failure.
*/
static int
daemon_msg_auth_req(struct daemon_slpars *pars, uint32 plen)
{
char errbuf[PCAP_ERRBUF_SIZE]; // buffer for network errors
char errmsgbuf[PCAP_ERRBUF_SIZE]; // buffer for errors to send to the client
int status;
struct rpcap_auth auth; // RPCAP authentication header
char sendbuf[RPCAP_NETBUF_SIZE]; // temporary buffer in which data to be sent is buffered
int sendbufidx = 0; // index which keeps the number of bytes currently buffered
struct rpcap_authreply *authreply; // authentication reply message
status = rpcapd_recv(pars->sockctrl, (char *) &auth, sizeof(struct rpcap_auth), &plen, errmsgbuf);
if (status == -1)
{
return -1;
}
if (status == -2)
{
goto error;
}
switch (ntohs(auth.type))
{
case RPCAP_RMTAUTH_NULL:
{
if (!pars->nullAuthAllowed)
{
// Send the client an error reply.
pcap_snprintf(errmsgbuf, PCAP_ERRBUF_SIZE,
"Authentication failed; NULL authentication not permitted.");
if (rpcap_senderror(pars->sockctrl, 0,
PCAP_ERR_AUTH_FAILED, errmsgbuf, errbuf) == -1)
{
// That failed; log a message and give up.
rpcapd_log(LOGPRIO_ERROR, "Send to client failed: %s", errbuf);
return -1;
}
goto error_noreply;
}
break;
}
case RPCAP_RMTAUTH_PWD:
{
char *username, *passwd;
uint32 usernamelen, passwdlen;
usernamelen = ntohs(auth.slen1);
username = (char *) malloc (usernamelen + 1);
if (username == NULL)
{
pcap_fmt_errmsg_for_errno(errmsgbuf,
PCAP_ERRBUF_SIZE, errno, "malloc() failed");
goto error;
}
status = rpcapd_recv(pars->sockctrl, username, usernamelen, &plen, errmsgbuf);
if (status == -1)
{
free(username);
return -1;
}
if (status == -2)
{
free(username);
goto error;
}
username[usernamelen] = '\0';
passwdlen = ntohs(auth.slen2);
passwd = (char *) malloc (passwdlen + 1);
if (passwd == NULL)
{
pcap_fmt_errmsg_for_errno(errmsgbuf,
PCAP_ERRBUF_SIZE, errno, "malloc() failed");
free(username);
goto error;
}
status = rpcapd_recv(pars->sockctrl, passwd, passwdlen, &plen, errmsgbuf);
if (status == -1)
{
free(username);
free(passwd);
return -1;
}
if (status == -2)
{
free(username);
free(passwd);
goto error;
}
passwd[passwdlen] = '\0';
if (daemon_AuthUserPwd(username, passwd, errmsgbuf))
{
//
// Authentication failed. Let the client
// know.
//
free(username);
free(passwd);
if (rpcap_senderror(pars->sockctrl, 0,
PCAP_ERR_AUTH_FAILED, errmsgbuf, errbuf) == -1)
{
// That failed; log a message and give up.
rpcapd_log(LOGPRIO_ERROR, "Send to client failed: %s", errbuf);
return -1;
}
//
// Suspend for 1 second, so that they can't
// hammer us with repeated tries with an
// attack such as a dictionary attack.
//
// WARNING: this delay is inserted only
// at this point; if the client closes the
// connection and reconnects, the suspension
// time does not have any effect.
//
sleep_secs(RPCAP_SUSPEND_WRONGAUTH);
goto error_noreply;
}
free(username);
free(passwd);
break;
}
default:
pcap_snprintf(errmsgbuf, PCAP_ERRBUF_SIZE,
"Authentication type not recognized.");
if (rpcap_senderror(pars->sockctrl, 0,
PCAP_ERR_AUTH_TYPE_NOTSUP, errmsgbuf, errbuf) == -1)
{
// That failed; log a message and give up.
rpcapd_log(LOGPRIO_ERROR, "Send to client failed: %s", errbuf);
return -1;
}
goto error_noreply;
}
// The authentication succeeded; let the client know.
if (sock_bufferize(NULL, sizeof(struct rpcap_header), NULL, &sendbufidx,
RPCAP_NETBUF_SIZE, SOCKBUF_CHECKONLY, errmsgbuf, PCAP_ERRBUF_SIZE) == -1)
goto error;
rpcap_createhdr((struct rpcap_header *) sendbuf, 0,
RPCAP_MSG_AUTH_REPLY, 0, sizeof(struct rpcap_authreply));
authreply = (struct rpcap_authreply *) &sendbuf[sendbufidx];
if (sock_bufferize(NULL, sizeof(struct rpcap_authreply), NULL, &sendbufidx,
RPCAP_NETBUF_SIZE, SOCKBUF_CHECKONLY, errmsgbuf, PCAP_ERRBUF_SIZE) == -1)
goto error;
//
// Indicate to our peer what versions we support.
//
memset(authreply, 0, sizeof(struct rpcap_authreply));
authreply->minvers = RPCAP_MIN_VERSION;
authreply->maxvers = RPCAP_MAX_VERSION;
// Send the reply.
if (sock_send(pars->sockctrl, sendbuf, sendbufidx, errbuf, PCAP_ERRBUF_SIZE) == -1)
{
// That failed; log a messsage and give up.
rpcapd_log(LOGPRIO_ERROR, "Send to client failed: %s", errbuf);
return -1;
}
// Check if all the data has been read; if not, discard the data in excess
if (rpcapd_discard(pars->sockctrl, plen) == -1)
{
return -1;
}
return 0;
error:
if (rpcap_senderror(pars->sockctrl, 0, PCAP_ERR_AUTH, errmsgbuf,
errbuf) == -1)
{
// That failed; log a message and give up.
rpcapd_log(LOGPRIO_ERROR, "Send to client failed: %s", errbuf);
return -1;
}
error_noreply:
// Check if all the data has been read; if not, discard the data in excess
if (rpcapd_discard(pars->sockctrl, plen) == -1)
{
return -1;
}
return -2;
}
static int
daemon_AuthUserPwd(char *username, char *password, char *errbuf)
{
#ifdef _WIN32
/*
* Warning: the user which launches the process must have the
* SE_TCB_NAME right.
* This corresponds to have the "Act as part of the Operating System"
* turned on (administrative tools, local security settings, local
* policies, user right assignment)
* However, it seems to me that if you run it as a service, this
* right should be provided by default.
*
* XXX - hopefully, this returns errors such as ERROR_LOGON_FAILURE,
* which merely indicates that the user name or password is
* incorrect, not whether it's the user name or the password
* that's incorrect, so a client that's trying to brute-force
* accounts doesn't know whether it's the user name or the
* password that's incorrect, so it doesn't know whether to
* stop trying to log in with a given user name and move on
* to another user name.
*/
DWORD error;
HANDLE Token;
char errmsgbuf[PCAP_ERRBUF_SIZE]; // buffer for errors to log
if (LogonUser(username, ".", password, LOGON32_LOGON_NETWORK, LOGON32_PROVIDER_DEFAULT, &Token) == 0)
{
pcap_snprintf(errbuf, PCAP_ERRBUF_SIZE, "Authentication failed");
error = GetLastError();
if (error != ERROR_LOGON_FAILURE)
{
// Some error other than an authentication error;
// log it.
pcap_fmt_errmsg_for_win32_err(errmsgbuf,
PCAP_ERRBUF_SIZE, error, "LogonUser() failed");
rpcapd_log(LOGPRIO_ERROR, "%s", errmsgbuf);
}
return -1;
}
// This call should change the current thread to the selected user.
// I didn't test it.
if (ImpersonateLoggedOnUser(Token) == 0)
{
pcap_snprintf(errbuf, PCAP_ERRBUF_SIZE, "Authentication failed");
pcap_fmt_errmsg_for_win32_err(errmsgbuf, PCAP_ERRBUF_SIZE,
GetLastError(), "ImpersonateLoggedOnUser() failed");
rpcapd_log(LOGPRIO_ERROR, "%s", errmsgbuf);
CloseHandle(Token);
return -1;
}
CloseHandle(Token);
return 0;
#else
/*
* See
*
* http://www.unixpapa.com/incnote/passwd.html
*
* We use the Solaris/Linux shadow password authentication if
* we have getspnam(), otherwise we just do traditional
* authentication, which, on some platforms, might work, even
* with shadow passwords, if we're running as root. Traditional
* authenticaion won't work if we're not running as root, as
* I think these days all UN*Xes either won't return the password
* at all with getpwnam() or will only do so if you're root.
*
* XXX - perhaps what we *should* be using is PAM, if we have
* it. That might hide all the details of username/password
* authentication, whether it's done with a visible-to-root-
* only password database or some other authentication mechanism,
* behind its API.
*/
int error;
struct passwd *user;
char *user_password;
#ifdef HAVE_GETSPNAM
struct spwd *usersp;
#endif
char *crypt_password;
// This call is needed to get the uid
if ((user = getpwnam(username)) == NULL)
{
pcap_snprintf(errbuf, PCAP_ERRBUF_SIZE, "Authentication failed");
return -1;
}
#ifdef HAVE_GETSPNAM
// This call is needed to get the password; otherwise 'x' is returned
if ((usersp = getspnam(username)) == NULL)
{
pcap_snprintf(errbuf, PCAP_ERRBUF_SIZE, "Authentication failed");
return -1;
}
user_password = usersp->sp_pwdp;
#else
/*
* XXX - what about other platforms?
* The unixpapa.com page claims this Just Works on *BSD if you're
* running as root - it's from 2000, so it doesn't indicate whether
* macOS (which didn't come out until 2001, under the name Mac OS
* X) behaves like the *BSDs or not, and might also work on AIX.
* HP-UX does something else.
*
* Again, hopefully PAM hides all that.
*/
user_password = user->pw_passwd;
#endif
//
// The Single UNIX Specification says that if crypt() fails it
// sets errno, but some implementatons that haven't been run
// through the SUS test suite might not do so.
//
errno = 0;
crypt_password = crypt(password, user_password);
if (crypt_password == NULL)
{
error = errno;
pcap_snprintf(errbuf, PCAP_ERRBUF_SIZE, "Authentication failed");
if (error == 0)
{
// It didn't set errno.
rpcapd_log(LOGPRIO_ERROR, "crypt() failed");
}
else
{
rpcapd_log(LOGPRIO_ERROR, "crypt() failed: %s",
strerror(error));
}
return -1;
}
if (strcmp(user_password, crypt_password) != 0)
{
pcap_snprintf(errbuf, PCAP_ERRBUF_SIZE, "Authentication failed");
return -1;
}
if (setuid(user->pw_uid))
{
error = errno;
pcap_fmt_errmsg_for_errno(errbuf, PCAP_ERRBUF_SIZE,
error, "setuid");
rpcapd_log(LOGPRIO_ERROR, "setuid() failed: %s",
strerror(error));
return -1;
}
/* if (setgid(user->pw_gid))
{
error = errno;
pcap_fmt_errmsg_for_errno(errbuf, PCAP_ERRBUF_SIZE,
errno, "setgid");
rpcapd_log(LOGPRIO_ERROR, "setgid() failed: %s",
strerror(error));
return -1;
}
*/
return 0;
#endif
}
static int
daemon_msg_findallif_req(uint8 ver, struct daemon_slpars *pars, uint32 plen)
{
char errbuf[PCAP_ERRBUF_SIZE]; // buffer for network errors
char errmsgbuf[PCAP_ERRBUF_SIZE]; // buffer for errors to send to the client
char sendbuf[RPCAP_NETBUF_SIZE]; // temporary buffer in which data to be sent is buffered
int sendbufidx = 0; // index which keeps the number of bytes currently buffered
pcap_if_t *alldevs = NULL; // pointer to the header of the interface chain
pcap_if_t *d; // temp pointer needed to scan the interface chain
struct pcap_addr *address; // pcap structure that keeps a network address of an interface
struct rpcap_findalldevs_if *findalldevs_if;// rpcap structure that packet all the data of an interface together
uint32 replylen; // length of reply payload
uint16 nif = 0; // counts the number of interface listed
// Discard the rest of the message; there shouldn't be any payload.
if (rpcapd_discard(pars->sockctrl, plen) == -1)
{
// Network error.
return -1;
}
// Retrieve the device list
if (pcap_findalldevs(&alldevs, errmsgbuf) == -1)
goto error;
if (alldevs == NULL)
{
if (rpcap_senderror(pars->sockctrl, ver, PCAP_ERR_NOREMOTEIF,
"No interfaces found! Make sure libpcap/WinPcap is properly installed"
" and you have the right to access to the remote device.",
errbuf) == -1)
{
rpcapd_log(LOGPRIO_ERROR, "Send to client failed: %s", errbuf);
return -1;
}
return 0;
}
// This checks the number of interfaces and computes the total
// length of the payload.
replylen = 0;
for (d = alldevs; d != NULL; d = d->next)
{
nif++;
if (d->description)
replylen += strlen(d->description);
if (d->name)
replylen += strlen(d->name);
replylen += sizeof(struct rpcap_findalldevs_if);
for (address = d->addresses; address != NULL; address = address->next)
{
/*
* Send only IPv4 and IPv6 addresses over the wire.
*/
switch (address->addr->sa_family)
{
case AF_INET:
#ifdef AF_INET6
case AF_INET6:
#endif
replylen += (sizeof(struct rpcap_sockaddr) * 4);
break;
default:
break;
}
}
}
// RPCAP findalldevs command
if (sock_bufferize(NULL, sizeof(struct rpcap_header), NULL,
&sendbufidx, RPCAP_NETBUF_SIZE, SOCKBUF_CHECKONLY, errmsgbuf,
PCAP_ERRBUF_SIZE) == -1)
goto error;
rpcap_createhdr((struct rpcap_header *) sendbuf, ver,
RPCAP_MSG_FINDALLIF_REPLY, nif, replylen);
// send the interface list
for (d = alldevs; d != NULL; d = d->next)
{
uint16 lname, ldescr;
findalldevs_if = (struct rpcap_findalldevs_if *) &sendbuf[sendbufidx];
if (sock_bufferize(NULL, sizeof(struct rpcap_findalldevs_if), NULL,
&sendbufidx, RPCAP_NETBUF_SIZE, SOCKBUF_CHECKONLY, errmsgbuf, PCAP_ERRBUF_SIZE) == -1)
goto error;
memset(findalldevs_if, 0, sizeof(struct rpcap_findalldevs_if));
if (d->description) ldescr = (short) strlen(d->description);
else ldescr = 0;
if (d->name) lname = (short) strlen(d->name);
else lname = 0;
findalldevs_if->desclen = htons(ldescr);
findalldevs_if->namelen = htons(lname);
findalldevs_if->flags = htonl(d->flags);
for (address = d->addresses; address != NULL; address = address->next)
{
/*
* Send only IPv4 and IPv6 addresses over the wire.
*/
switch (address->addr->sa_family)
{
case AF_INET:
#ifdef AF_INET6
case AF_INET6:
#endif
findalldevs_if->naddr++;
break;
default:
break;
}
}
findalldevs_if->naddr = htons(findalldevs_if->naddr);
if (sock_bufferize(d->name, lname, sendbuf, &sendbufidx,
RPCAP_NETBUF_SIZE, SOCKBUF_BUFFERIZE, errmsgbuf,
PCAP_ERRBUF_SIZE) == -1)
goto error;
if (sock_bufferize(d->description, ldescr, sendbuf, &sendbufidx,
RPCAP_NETBUF_SIZE, SOCKBUF_BUFFERIZE, errmsgbuf,
PCAP_ERRBUF_SIZE) == -1)
goto error;
// send all addresses
for (address = d->addresses; address != NULL; address = address->next)
{
struct rpcap_sockaddr *sockaddr;
/*
* Send only IPv4 and IPv6 addresses over the wire.
*/
switch (address->addr->sa_family)
{
case AF_INET:
#ifdef AF_INET6
case AF_INET6:
#endif
sockaddr = (struct rpcap_sockaddr *) &sendbuf[sendbufidx];
if (sock_bufferize(NULL, sizeof(struct rpcap_sockaddr), NULL,
&sendbufidx, RPCAP_NETBUF_SIZE, SOCKBUF_CHECKONLY, errmsgbuf, PCAP_ERRBUF_SIZE) == -1)
goto error;
daemon_seraddr((struct sockaddr_storage *) address->addr, sockaddr);
sockaddr = (struct rpcap_sockaddr *) &sendbuf[sendbufidx];
if (sock_bufferize(NULL, sizeof(struct rpcap_sockaddr), NULL,
&sendbufidx, RPCAP_NETBUF_SIZE, SOCKBUF_CHECKONLY, errmsgbuf, PCAP_ERRBUF_SIZE) == -1)
goto error;
daemon_seraddr((struct sockaddr_storage *) address->netmask, sockaddr);
sockaddr = (struct rpcap_sockaddr *) &sendbuf[sendbufidx];
if (sock_bufferize(NULL, sizeof(struct rpcap_sockaddr), NULL,
&sendbufidx, RPCAP_NETBUF_SIZE, SOCKBUF_CHECKONLY, errmsgbuf, PCAP_ERRBUF_SIZE) == -1)
goto error;
daemon_seraddr((struct sockaddr_storage *) address->broadaddr, sockaddr);
sockaddr = (struct rpcap_sockaddr *) &sendbuf[sendbufidx];
if (sock_bufferize(NULL, sizeof(struct rpcap_sockaddr), NULL,
&sendbufidx, RPCAP_NETBUF_SIZE, SOCKBUF_CHECKONLY, errmsgbuf, PCAP_ERRBUF_SIZE) == -1)
goto error;
daemon_seraddr((struct sockaddr_storage *) address->dstaddr, sockaddr);
break;
default:
break;
}
}
}
// We no longer need the device list. Free it.
pcap_freealldevs(alldevs);
// Send a final command that says "now send it!"
if (sock_send(pars->sockctrl, sendbuf, sendbufidx, errbuf, PCAP_ERRBUF_SIZE) == -1)
{
rpcapd_log(LOGPRIO_ERROR, "Send to client failed: %s", errbuf);
return -1;
}
return 0;
error:
if (alldevs)
pcap_freealldevs(alldevs);
if (rpcap_senderror(pars->sockctrl, ver, PCAP_ERR_FINDALLIF,
errmsgbuf, errbuf) == -1)
{
rpcapd_log(LOGPRIO_ERROR, "Send to client failed: %s", errbuf);
return -1;
}
return 0;
}
/*
\param plen: the length of the current message (needed in order to be able
to discard excess data in the message, if present)
*/
static int
daemon_msg_open_req(uint8 ver, struct daemon_slpars *pars, uint32 plen,
char *source, size_t sourcelen)
{
char errbuf[PCAP_ERRBUF_SIZE]; // buffer for network errors
char errmsgbuf[PCAP_ERRBUF_SIZE]; // buffer for errors to send to the client
pcap_t *fp; // pcap_t main variable
int nread;
char sendbuf[RPCAP_NETBUF_SIZE]; // temporary buffer in which data to be sent is buffered
int sendbufidx = 0; // index which keeps the number of bytes currently buffered
struct rpcap_openreply *openreply; // open reply message
if (plen > sourcelen - 1)
{
pcap_snprintf(errmsgbuf, PCAP_ERRBUF_SIZE, "Source string too long");
goto error;
}
nread = sock_recv(pars->sockctrl, source, plen,
SOCK_RECEIVEALL_YES|SOCK_EOF_IS_ERROR, errbuf, PCAP_ERRBUF_SIZE);
if (nread == -1)
{
rpcapd_log(LOGPRIO_ERROR, "Read from client failed: %s", errbuf);
return -1;
}
source[nread] = '\0';
plen -= nread;
// Is this a URL rather than a device?
// If so, reject it.
if (is_url(source))
{
pcap_snprintf(errmsgbuf, PCAP_ERRBUF_SIZE, "Source string refers to a remote device");
goto error;
}
// Open the selected device
// This is a fake open, since we do that only to get the needed parameters, then we close the device again
if ((fp = pcap_open_live(source,
1500 /* fake snaplen */,
0 /* no promis */,
1000 /* fake timeout */,
errmsgbuf)) == NULL)
goto error;
// Now, I can send a RPCAP open reply message
if (sock_bufferize(NULL, sizeof(struct rpcap_header), NULL, &sendbufidx,
RPCAP_NETBUF_SIZE, SOCKBUF_CHECKONLY, errmsgbuf, PCAP_ERRBUF_SIZE) == -1)
goto error;
rpcap_createhdr((struct rpcap_header *) sendbuf, ver,
RPCAP_MSG_OPEN_REPLY, 0, sizeof(struct rpcap_openreply));
openreply = (struct rpcap_openreply *) &sendbuf[sendbufidx];
if (sock_bufferize(NULL, sizeof(struct rpcap_openreply), NULL, &sendbufidx,
RPCAP_NETBUF_SIZE, SOCKBUF_CHECKONLY, errmsgbuf, PCAP_ERRBUF_SIZE) == -1)
goto error;
memset(openreply, 0, sizeof(struct rpcap_openreply));
openreply->linktype = htonl(pcap_datalink(fp));
openreply->tzoff = 0; /* This is always 0 for live captures */
// We're done with the pcap_t.
pcap_close(fp);
// Send the reply.
if (sock_send(pars->sockctrl, sendbuf, sendbufidx, errbuf, PCAP_ERRBUF_SIZE) == -1)
{
rpcapd_log(LOGPRIO_ERROR, "Send to client failed: %s", errbuf);
return -1;
}
return 0;
error:
if (rpcap_senderror(pars->sockctrl, ver, PCAP_ERR_OPEN,
errmsgbuf, errbuf) == -1)
{
// That failed; log a message and give up.
rpcapd_log(LOGPRIO_ERROR, "Send to client failed: %s", errbuf);
return -1;
}
// Check if all the data has been read; if not, discard the data in excess
if (rpcapd_discard(pars->sockctrl, plen) == -1)
{
return -1;
}
return 0;
}
/*
\param plen: the length of the current message (needed in order to be able
to discard excess data in the message, if present)
*/
static int
daemon_msg_startcap_req(uint8 ver, struct daemon_slpars *pars, uint32 plen,
char *source, struct session **sessionp,
struct rpcap_sampling *samp_param _U_)
{
char errbuf[PCAP_ERRBUF_SIZE]; // buffer for network errors
char errmsgbuf[PCAP_ERRBUF_SIZE]; // buffer for errors to send to the client
char portdata[PCAP_BUF_SIZE]; // temp variable needed to derive the data port
char peerhost[PCAP_BUF_SIZE]; // temp variable needed to derive the host name of our peer
struct session *session = NULL; // saves state of session
int status;
char sendbuf[RPCAP_NETBUF_SIZE]; // temporary buffer in which data to be sent is buffered
int sendbufidx = 0; // index which keeps the number of bytes currently buffered
// socket-related variables
struct addrinfo hints; // temp, needed to open a socket connection
struct addrinfo *addrinfo; // temp, needed to open a socket connection
struct sockaddr_storage saddr; // temp, needed to retrieve the network data port chosen on the local machine
socklen_t saddrlen; // temp, needed to retrieve the network data port chosen on the local machine
int ret; // return value from functions
// RPCAP-related variables
struct rpcap_startcapreq startcapreq; // start capture request message
struct rpcap_startcapreply *startcapreply; // start capture reply message
int serveropen_dp; // keeps who is going to open the data connection
addrinfo = NULL;
status = rpcapd_recv(pars->sockctrl, (char *) &startcapreq,
sizeof(struct rpcap_startcapreq), &plen, errmsgbuf);
if (status == -1)
{
goto fatal_error;
}
if (status == -2)
{
goto error;
}
startcapreq.flags = ntohs(startcapreq.flags);
// Create a session structure
session = malloc(sizeof(struct session));
if (session == NULL)
{
pcap_snprintf(errmsgbuf, PCAP_ERRBUF_SIZE, "Can't allocate session structure");
goto error;
}
session->sockdata = INVALID_SOCKET;
// We don't have a thread yet.
session->have_thread = 0;
//
// We *shouldn't* have to initialize the thread indicator
// itself, because the compiler *should* realize that we
// only use this if have_thread isn't 0, but we *do* have
// to do it, because not all compilers *do* realize that.
//
// There is no "invalid thread handle" value for a UN*X
// pthread_t, so we just zero it out.
//
#ifdef _WIN32
session->thread = INVALID_HANDLE_VALUE;
#else
memset(&session->thread, 0, sizeof(session->thread));
#endif
// Open the selected device
if ((session->fp = pcap_open_live(source,
ntohl(startcapreq.snaplen),
(startcapreq.flags & RPCAP_STARTCAPREQ_FLAG_PROMISC) ? 1 : 0 /* local device, other flags not needed */,
ntohl(startcapreq.read_timeout),
errmsgbuf)) == NULL)
goto error;
#if 0
// Apply sampling parameters
fp->rmt_samp.method = samp_param->method;
fp->rmt_samp.value = samp_param->value;
#endif
/*
We're in active mode if:
- we're using TCP, and the user wants us to be in active mode
- we're using UDP
*/
serveropen_dp = (startcapreq.flags & RPCAP_STARTCAPREQ_FLAG_SERVEROPEN) || (startcapreq.flags & RPCAP_STARTCAPREQ_FLAG_DGRAM) || pars->isactive;
/*
Gets the sockaddr structure referred to the other peer in the ctrl connection
We need that because:
- if we're in passive mode, we need to know the address family we want to use
(the same used for the ctrl socket)
- if we're in active mode, we need to know the network address of the other host
we want to connect to
*/
saddrlen = sizeof(struct sockaddr_storage);
if (getpeername(pars->sockctrl, (struct sockaddr *) &saddr, &saddrlen) == -1)
{
sock_geterror("getpeername()", errmsgbuf, PCAP_ERRBUF_SIZE);
goto error;
}
memset(&hints, 0, sizeof(struct addrinfo));
hints.ai_socktype = (startcapreq.flags & RPCAP_STARTCAPREQ_FLAG_DGRAM) ? SOCK_DGRAM : SOCK_STREAM;
hints.ai_family = saddr.ss_family;
// Now we have to create a new socket to send packets
if (serveropen_dp) // Data connection is opened by the server toward the client
{
pcap_snprintf(portdata, sizeof portdata, "%d", ntohs(startcapreq.portdata));
// Get the name of the other peer (needed to connect to that specific network address)
if (getnameinfo((struct sockaddr *) &saddr, saddrlen, peerhost,
sizeof(peerhost), NULL, 0, NI_NUMERICHOST))
{
sock_geterror("getnameinfo()", errmsgbuf, PCAP_ERRBUF_SIZE);
goto error;
}
if (sock_initaddress(peerhost, portdata, &hints, &addrinfo, errmsgbuf, PCAP_ERRBUF_SIZE) == -1)
goto error;
if ((session->sockdata = sock_open(addrinfo, SOCKOPEN_CLIENT, 0, errmsgbuf, PCAP_ERRBUF_SIZE)) == INVALID_SOCKET)
goto error;
}
else // Data connection is opened by the client toward the server
{
hints.ai_flags = AI_PASSIVE;
// Let's the server socket pick up a free network port for us
if (sock_initaddress(NULL, "0", &hints, &addrinfo, errmsgbuf, PCAP_ERRBUF_SIZE) == -1)
goto error;
if ((session->sockdata = sock_open(addrinfo, SOCKOPEN_SERVER, 1 /* max 1 connection in queue */, errmsgbuf, PCAP_ERRBUF_SIZE)) == INVALID_SOCKET)
goto error;
// get the complete sockaddr structure used in the data connection
saddrlen = sizeof(struct sockaddr_storage);
if (getsockname(session->sockdata, (struct sockaddr *) &saddr, &saddrlen) == -1)
{
sock_geterror("getsockname()", errmsgbuf, PCAP_ERRBUF_SIZE);
goto error;
}
// Get the local port the system picked up
if (getnameinfo((struct sockaddr *) &saddr, saddrlen, NULL,
0, portdata, sizeof(portdata), NI_NUMERICSERV))
{
sock_geterror("getnameinfo()", errmsgbuf, PCAP_ERRBUF_SIZE);
goto error;
}
}
// addrinfo is no longer used
freeaddrinfo(addrinfo);
addrinfo = NULL;
// Needed to send an error on the ctrl connection
session->sockctrl = pars->sockctrl;
session->protocol_version = ver;
// Now I can set the filter
ret = daemon_unpackapplyfilter(pars->sockctrl, session, &plen, errmsgbuf);
if (ret == -1)
{
// Fatal error. A message has been logged; just give up.
goto fatal_error;
}
if (ret == -2)
{
// Non-fatal error. Send an error message to the client.
goto error;
}
// Now, I can send a RPCAP start capture reply message
if (sock_bufferize(NULL, sizeof(struct rpcap_header), NULL, &sendbufidx,
RPCAP_NETBUF_SIZE, SOCKBUF_CHECKONLY, errmsgbuf, PCAP_ERRBUF_SIZE) == -1)
goto error;
rpcap_createhdr((struct rpcap_header *) sendbuf, ver,
RPCAP_MSG_STARTCAP_REPLY, 0, sizeof(struct rpcap_startcapreply));
startcapreply = (struct rpcap_startcapreply *) &sendbuf[sendbufidx];
if (sock_bufferize(NULL, sizeof(struct rpcap_startcapreply), NULL,
&sendbufidx, RPCAP_NETBUF_SIZE, SOCKBUF_CHECKONLY, errmsgbuf, PCAP_ERRBUF_SIZE) == -1)
goto error;
memset(startcapreply, 0, sizeof(struct rpcap_startcapreply));
startcapreply->bufsize = htonl(pcap_bufsize(session->fp));
if (!serveropen_dp)
{
unsigned short port = (unsigned short)strtoul(portdata,NULL,10);
startcapreply->portdata = htons(port);
}
if (sock_send(pars->sockctrl, sendbuf, sendbufidx, errbuf, PCAP_ERRBUF_SIZE) == -1)
{
// That failed; log a message and give up.
rpcapd_log(LOGPRIO_ERROR, "Send to client failed: %s", errbuf);
goto fatal_error;
}
if (!serveropen_dp)
{
SOCKET socktemp; // We need another socket, since we're going to accept() a connection
// Connection creation
saddrlen = sizeof(struct sockaddr_storage);
socktemp = accept(session->sockdata, (struct sockaddr *) &saddr, &saddrlen);
if (socktemp == INVALID_SOCKET)
{
sock_geterror("accept()", errbuf, PCAP_ERRBUF_SIZE);
rpcapd_log(LOGPRIO_ERROR, "Accept of data connection failed: %s",
errbuf);
goto error;
}
// Now that I accepted the connection, the server socket is no longer needed
sock_close(session->sockdata, NULL, 0);
session->sockdata = socktemp;
}
// Now we have to create a new thread to receive packets
#ifdef _WIN32
session->thread = (HANDLE)_beginthreadex(NULL, 0, daemon_thrdatamain,
(void *) session, 0, NULL);
if (session->thread == 0)
{
pcap_snprintf(errbuf, PCAP_ERRBUF_SIZE, "Error creating the data thread");
goto error;
}
#else
ret = pthread_create(&session->thread, NULL, daemon_thrdatamain,
(void *) session);
if (ret != 0)
{
pcap_fmt_errmsg_for_errno(errbuf, PCAP_ERRBUF_SIZE,
ret, "Error creating the data thread");
goto error;
}
#endif
session->have_thread = 1;
// Check if all the data has been read; if not, discard the data in excess
if (rpcapd_discard(pars->sockctrl, plen) == -1)
goto fatal_error;
*sessionp = session;
return 0;
error:
//
// Not a fatal error, so send the client an error message and
// keep serving client requests.
//
*sessionp = NULL;
if (addrinfo)
freeaddrinfo(addrinfo);
if (session)
{
session_close(session);
free(session);
}
if (rpcap_senderror(pars->sockctrl, ver, PCAP_ERR_STARTCAPTURE,
errmsgbuf, errbuf) == -1)
{
// That failed; log a message and give up.
rpcapd_log(LOGPRIO_ERROR, "Send to client failed: %s", errbuf);
return -1;
}
// Check if all the data has been read; if not, discard the data in excess
if (rpcapd_discard(pars->sockctrl, plen) == -1)
{
// Network error.
return -1;
}
return 0;
fatal_error:
//
// Fatal network error, so don't try to communicate with
// the client, just give up.
//
*sessionp = NULL;
if (session)
{
session_close(session);
free(session);
}
return -1;
}
static int
daemon_msg_endcap_req(uint8 ver, struct daemon_slpars *pars,
struct session *session)
{
char errbuf[PCAP_ERRBUF_SIZE]; // buffer for network errors
struct rpcap_header header;
session_close(session);
rpcap_createhdr(&header, ver, RPCAP_MSG_ENDCAP_REPLY, 0, 0);
if (sock_send(pars->sockctrl, (char *) &header, sizeof(struct rpcap_header), errbuf, PCAP_ERRBUF_SIZE) == -1)
{
// That failed; log a message and give up.
rpcapd_log(LOGPRIO_ERROR, "Send to client failed: %s", errbuf);
return -1;
}
return 0;
}
static int
daemon_unpackapplyfilter(SOCKET sockctrl, struct session *session, uint32 *plenp, char *errmsgbuf)
{
int status;
struct rpcap_filter filter;
struct rpcap_filterbpf_insn insn;
struct bpf_insn *bf_insn;
struct bpf_program bf_prog;
unsigned int i;
status = rpcapd_recv(sockctrl, (char *) &filter,
sizeof(struct rpcap_filter), plenp, errmsgbuf);
if (status == -1)
{
return -1;
}
if (status == -2)
{
return -2;
}
bf_prog.bf_len = ntohl(filter.nitems);
if (ntohs(filter.filtertype) != RPCAP_UPDATEFILTER_BPF)
{
pcap_snprintf(errmsgbuf, PCAP_ERRBUF_SIZE, "Only BPF/NPF filters are currently supported");
return -2;
}
bf_insn = (struct bpf_insn *) malloc (sizeof(struct bpf_insn) * bf_prog.bf_len);
if (bf_insn == NULL)
{
pcap_fmt_errmsg_for_errno(errmsgbuf, PCAP_ERRBUF_SIZE,
errno, "malloc() failed");
return -2;
}
bf_prog.bf_insns = bf_insn;
for (i = 0; i < bf_prog.bf_len; i++)
{
status = rpcapd_recv(sockctrl, (char *) &insn,
sizeof(struct rpcap_filterbpf_insn), plenp, errmsgbuf);
if (status == -1)
{
return -1;
}
if (status == -2)
{
return -2;
}
bf_insn->code = ntohs(insn.code);
bf_insn->jf = insn.jf;
bf_insn->jt = insn.jt;
bf_insn->k = ntohl(insn.k);
bf_insn++;
}
if (bpf_validate(bf_prog.bf_insns, bf_prog.bf_len) == 0)
{
pcap_snprintf(errmsgbuf, PCAP_ERRBUF_SIZE, "The filter contains bogus instructions");
return -2;
}
if (pcap_setfilter(session->fp, &bf_prog))
{
pcap_snprintf(errmsgbuf, PCAP_ERRBUF_SIZE, "RPCAP error: %s", pcap_geterr(session->fp));
return -2;
}
return 0;
}
static int
daemon_msg_updatefilter_req(uint8 ver, struct daemon_slpars *pars,
struct session *session, uint32 plen)
{
char errbuf[PCAP_ERRBUF_SIZE];
char errmsgbuf[PCAP_ERRBUF_SIZE]; // buffer for errors to send to the client
int ret; // status of daemon_unpackapplyfilter()
struct rpcap_header header; // keeps the answer to the updatefilter command
ret = daemon_unpackapplyfilter(pars->sockctrl, session, &plen, errmsgbuf);
if (ret == -1)
{
// Fatal error. A message has been logged; just give up.
return -1;
}
if (ret == -2)
{
// Non-fatal error. Send an error reply to the client.
goto error;
}
// Check if all the data has been read; if not, discard the data in excess
if (rpcapd_discard(pars->sockctrl, plen) == -1)
{
// Network error.
return -1;
}
// A response is needed, otherwise the other host does not know that everything went well
rpcap_createhdr(&header, ver, RPCAP_MSG_UPDATEFILTER_REPLY, 0, 0);
if (sock_send(pars->sockctrl, (char *) &header, sizeof (struct rpcap_header), pcap_geterr(session->fp), PCAP_ERRBUF_SIZE))
{
// That failed; log a messsage and give up.
rpcapd_log(LOGPRIO_ERROR, "Send to client failed: %s", errbuf);
return -1;
}
return 0;
error:
if (rpcapd_discard(pars->sockctrl, plen) == -1)
{
return -1;
}
rpcap_senderror(pars->sockctrl, ver, PCAP_ERR_UPDATEFILTER,
errmsgbuf, NULL);
return 0;
}
/*!
\brief Received the sampling parameters from remote host and it stores in the pcap_t structure.
*/
static int
daemon_msg_setsampling_req(uint8 ver, struct daemon_slpars *pars, uint32 plen,
struct rpcap_sampling *samp_param)
{
char errbuf[PCAP_ERRBUF_SIZE]; // buffer for network errors
char errmsgbuf[PCAP_ERRBUF_SIZE];
struct rpcap_header header;
struct rpcap_sampling rpcap_samp;
int status;
status = rpcapd_recv(pars->sockctrl, (char *) &rpcap_samp, sizeof(struct rpcap_sampling), &plen, errmsgbuf);
if (status == -1)
{
return -1;
}
if (status == -2)
{
goto error;
}
// Save these settings in the pcap_t
samp_param->method = rpcap_samp.method;
samp_param->value = ntohl(rpcap_samp.value);
// A response is needed, otherwise the other host does not know that everything went well
rpcap_createhdr(&header, ver, RPCAP_MSG_SETSAMPLING_REPLY, 0, 0);
if (sock_send(pars->sockctrl, (char *) &header, sizeof (struct rpcap_header), errbuf, PCAP_ERRBUF_SIZE) == -1)
{
// That failed; log a messsage and give up.
rpcapd_log(LOGPRIO_ERROR, "Send to client failed: %s", errbuf);
return -1;
}
if (rpcapd_discard(pars->sockctrl, plen) == -1)
{
return -1;
}
return 0;
error:
if (rpcap_senderror(pars->sockctrl, ver, PCAP_ERR_SETSAMPLING,
errmsgbuf, errbuf) == -1)
{
// That failed; log a message and give up.
rpcapd_log(LOGPRIO_ERROR, "Send to client failed: %s", errbuf);
return -1;
}
// Check if all the data has been read; if not, discard the data in excess
if (rpcapd_discard(pars->sockctrl, plen) == -1)
{
return -1;
}
return 0;
}
static int
daemon_msg_stats_req(uint8 ver, struct daemon_slpars *pars,
struct session *session, uint32 plen, struct pcap_stat *stats,
unsigned int svrcapt)
{
char errbuf[PCAP_ERRBUF_SIZE]; // buffer for network errors
char errmsgbuf[PCAP_ERRBUF_SIZE]; // buffer for errors to send to the client
char sendbuf[RPCAP_NETBUF_SIZE]; // temporary buffer in which data to be sent is buffered
int sendbufidx = 0; // index which keeps the number of bytes currently buffered
struct rpcap_stats *netstats; // statistics sent on the network
// Checks that the header does not contain other data; if so, discard it
if (rpcapd_discard(pars->sockctrl, plen) == -1)
{
// Network error.
return -1;
}
if (sock_bufferize(NULL, sizeof(struct rpcap_header), NULL,
&sendbufidx, RPCAP_NETBUF_SIZE, SOCKBUF_CHECKONLY, errmsgbuf, PCAP_ERRBUF_SIZE) == -1)
goto error;
rpcap_createhdr((struct rpcap_header *) sendbuf, ver,
RPCAP_MSG_STATS_REPLY, 0, (uint16) sizeof(struct rpcap_stats));
netstats = (struct rpcap_stats *) &sendbuf[sendbufidx];
if (sock_bufferize(NULL, sizeof(struct rpcap_stats), NULL,
&sendbufidx, RPCAP_NETBUF_SIZE, SOCKBUF_CHECKONLY, errmsgbuf, PCAP_ERRBUF_SIZE) == -1)
goto error;
if (session && session->fp)
{
if (pcap_stats(session->fp, stats) == -1)
{
pcap_snprintf(errmsgbuf, PCAP_ERRBUF_SIZE, "%s", pcap_geterr(session->fp));
goto error;
}
netstats->ifdrop = htonl(stats->ps_ifdrop);
netstats->ifrecv = htonl(stats->ps_recv);
netstats->krnldrop = htonl(stats->ps_drop);
netstats->svrcapt = htonl(session->TotCapt);
}
else
{
// We have to keep compatibility with old applications,
// which ask for statistics also when the capture has
// already stopped.
netstats->ifdrop = htonl(stats->ps_ifdrop);
netstats->ifrecv = htonl(stats->ps_recv);
netstats->krnldrop = htonl(stats->ps_drop);
netstats->svrcapt = htonl(svrcapt);
}
// Send the packet
if (sock_send(pars->sockctrl, sendbuf, sendbufidx, errbuf, PCAP_ERRBUF_SIZE) == -1)
{
rpcapd_log(LOGPRIO_ERROR, "Send to client failed: %s", errbuf);
return -1;
}
return 0;
error:
rpcap_senderror(pars->sockctrl, ver, PCAP_ERR_GETSTATS,
errmsgbuf, NULL);
return 0;
}
#ifdef _WIN32
static unsigned __stdcall
#else
static void *
#endif
daemon_thrdatamain(void *ptr)
{
char errbuf[PCAP_ERRBUF_SIZE + 1]; // error buffer
struct session *session; // pointer to the struct session for this session
int retval; // general variable used to keep the return value of other functions
struct rpcap_pkthdr *net_pkt_header;// header of the packet
struct pcap_pkthdr *pkt_header; // pointer to the buffer that contains the header of the current packet
u_char *pkt_data; // pointer to the buffer that contains the current packet
size_t sendbufsize; // size for the send buffer
char *sendbuf; // temporary buffer in which data to be sent is buffered
int sendbufidx; // index which keeps the number of bytes currently buffered
int status;
#ifndef _WIN32
sigset_t sigusr1; // signal set with just SIGUSR1
#endif
session = (struct session *) ptr;
session->TotCapt = 0; // counter which is incremented each time a packet is received
// Initialize errbuf
memset(errbuf, 0, sizeof(errbuf));
//
// We need a buffer large enough to hold a buffer large enough
// for a maximum-size packet for this pcap_t.
//
if (pcap_snapshot(session->fp) < 0)
{
//
// The snapshot length is negative.
// This "should not happen".
//
rpcapd_log(LOGPRIO_ERROR,
"Unable to allocate the buffer for this child thread: snapshot length of %d is negative",
pcap_snapshot(session->fp));
sendbuf = NULL; // we can't allocate a buffer, so nothing to free
goto error;
}
//
// size_t is unsigned, and the result of pcap_snapshot() is signed;
// on no platform that we support is int larger than size_t.
// This means that, unless the extra information we prepend to
// a maximum-sized packet is impossibly large, the sum of the
// snapshot length and the size of that extra information will
// fit in a size_t.
//
// So we don't need to make sure that sendbufsize will overflow.
//
sendbufsize = sizeof(struct rpcap_header) + sizeof(struct rpcap_pkthdr) + pcap_snapshot(session->fp);
sendbuf = (char *) malloc (sendbufsize);
if (sendbuf == NULL)
{
rpcapd_log(LOGPRIO_ERROR,
"Unable to allocate the buffer for this child thread");
goto error;
}
#ifndef _WIN32
//
// Set the signal set to include just SIGUSR1, and block that
// signal; we only want it unblocked when we're reading
// packets - we dn't want any other system calls, such as
// ones being used to send to the client or to log messages,
// to be interrupted.
//
sigemptyset(&sigusr1);
sigaddset(&sigusr1, SIGUSR1);
pthread_sigmask(SIG_BLOCK, &sigusr1, NULL);
#endif
// Retrieve the packets
for (;;)
{
#ifndef _WIN32
//
// Unblock SIGUSR1 while we might be waiting for packets.
//
pthread_sigmask(SIG_UNBLOCK, &sigusr1, NULL);
#endif
retval = pcap_next_ex(session->fp, &pkt_header, (const u_char **) &pkt_data); // cast to avoid a compiler warning
#ifndef _WIN32
//
// Now block it again.
//
pthread_sigmask(SIG_BLOCK, &sigusr1, NULL);
#endif
if (retval < 0)
break; // error
if (retval == 0) // Read timeout elapsed
continue;
sendbufidx = 0;
// Bufferize the general header
if (sock_bufferize(NULL, sizeof(struct rpcap_header), NULL,
&sendbufidx, sendbufsize, SOCKBUF_CHECKONLY, errbuf,
PCAP_ERRBUF_SIZE) == -1)
{
rpcapd_log(LOGPRIO_ERROR,
"sock_bufferize() error sending packet message: %s",
errbuf);
goto error;
}
rpcap_createhdr((struct rpcap_header *) sendbuf,
session->protocol_version, RPCAP_MSG_PACKET, 0,
(uint16) (sizeof(struct rpcap_pkthdr) + pkt_header->caplen));
net_pkt_header = (struct rpcap_pkthdr *) &sendbuf[sendbufidx];
// Bufferize the pkt header
if (sock_bufferize(NULL, sizeof(struct rpcap_pkthdr), NULL,
&sendbufidx, sendbufsize, SOCKBUF_CHECKONLY, errbuf,
PCAP_ERRBUF_SIZE) == -1)
{
rpcapd_log(LOGPRIO_ERROR,
"sock_bufferize() error sending packet message: %s",
errbuf);
goto error;
}
net_pkt_header->caplen = htonl(pkt_header->caplen);
net_pkt_header->len = htonl(pkt_header->len);
net_pkt_header->npkt = htonl(++(session->TotCapt));
net_pkt_header->timestamp_sec = htonl(pkt_header->ts.tv_sec);
net_pkt_header->timestamp_usec = htonl(pkt_header->ts.tv_usec);
// Bufferize the pkt data
if (sock_bufferize((char *) pkt_data, pkt_header->caplen,
sendbuf, &sendbufidx, sendbufsize, SOCKBUF_BUFFERIZE,
errbuf, PCAP_ERRBUF_SIZE) == -1)
{
rpcapd_log(LOGPRIO_ERROR,
"sock_bufferize() error sending packet message: %s",
errbuf);
goto error;
}
// Send the packet
// If the client dropped the connection, don't report an
// error, just quit.
status = sock_send(session->sockdata, sendbuf, sendbufidx, errbuf, PCAP_ERRBUF_SIZE);
if (status < 0)
{
if (status == -1)
{
//
// Error other than "client closed the
// connection out from under us"; report
// it.
//
rpcapd_log(LOGPRIO_ERROR,
"Send of packet to client failed: %s",
errbuf);
}
//
// Give up in either case.
//
goto error;
}
}
if (retval < 0 && retval != PCAP_ERROR_BREAK)
{
//
// Failed with an error other than "we were told to break
// out of the loop".
//
// The latter just means that the client told us to stop
// capturing, so there's no error to report.
//
pcap_snprintf(errbuf, PCAP_ERRBUF_SIZE, "Error reading the packets: %s", pcap_geterr(session->fp));
rpcap_senderror(session->sockctrl, session->protocol_version,
PCAP_ERR_READEX, errbuf, NULL);
}
error:
//
// The main thread will clean up the session structure.
//
free(sendbuf);
return 0;
}
#ifndef _WIN32
//
// Do-nothing handler for SIGUSR1; the sole purpose of SIGUSR1 is to
// interrupt the data thread if it's blocked in a system call waiting
// for packets to arrive.
//
static void noop_handler(int sign _U_)
{
}
#endif
/*!
\brief It serializes a network address.
It accepts a 'sockaddr_storage' structure as input, and it converts it appropriately into a format
that can be used to be sent on the network. Basically, it applies all the hton()
conversion required to the input variable.
\param sockaddrin a 'sockaddr_storage' pointer to the variable that has to be
serialized. This variable can be both a 'sockaddr_in' and 'sockaddr_in6'.
\param sockaddrout an 'rpcap_sockaddr' pointer to the variable that will contain
the serialized data. This variable has to be allocated by the user.
\warning This function supports only AF_INET and AF_INET6 address families.
*/
static void
daemon_seraddr(struct sockaddr_storage *sockaddrin, struct rpcap_sockaddr *sockaddrout)
{
memset(sockaddrout, 0, sizeof(struct sockaddr_storage));
// There can be the case in which the sockaddrin is not available
if (sockaddrin == NULL) return;
// Warning: we support only AF_INET and AF_INET6
switch (sockaddrin->ss_family)
{
case AF_INET:
{
struct sockaddr_in *sockaddrin_ipv4;
struct rpcap_sockaddr_in *sockaddrout_ipv4;
sockaddrin_ipv4 = (struct sockaddr_in *) sockaddrin;
sockaddrout_ipv4 = (struct rpcap_sockaddr_in *) sockaddrout;
sockaddrout_ipv4->family = htons(RPCAP_AF_INET);
sockaddrout_ipv4->port = htons(sockaddrin_ipv4->sin_port);
memcpy(&sockaddrout_ipv4->addr, &sockaddrin_ipv4->sin_addr, sizeof(sockaddrout_ipv4->addr));
memset(sockaddrout_ipv4->zero, 0, sizeof(sockaddrout_ipv4->zero));
break;
}
#ifdef AF_INET6
case AF_INET6:
{
struct sockaddr_in6 *sockaddrin_ipv6;
struct rpcap_sockaddr_in6 *sockaddrout_ipv6;
sockaddrin_ipv6 = (struct sockaddr_in6 *) sockaddrin;
sockaddrout_ipv6 = (struct rpcap_sockaddr_in6 *) sockaddrout;
sockaddrout_ipv6->family = htons(RPCAP_AF_INET6);
sockaddrout_ipv6->port = htons(sockaddrin_ipv6->sin6_port);
sockaddrout_ipv6->flowinfo = htonl(sockaddrin_ipv6->sin6_flowinfo);
memcpy(&sockaddrout_ipv6->addr, &sockaddrin_ipv6->sin6_addr, sizeof(sockaddrout_ipv6->addr));
sockaddrout_ipv6->scope_id = htonl(sockaddrin_ipv6->sin6_scope_id);
break;
}
#endif
}
}
/*!
\brief Suspends a thread for secs seconds.
*/
void sleep_secs(int secs)
{
#ifdef _WIN32
Sleep(secs*1000);
#else
unsigned secs_remaining;
if (secs <= 0)
return;
secs_remaining = secs;
while (secs_remaining != 0)
secs_remaining = sleep(secs_remaining);
#endif
}
/*
* Read the header of a message.
*/
static int
rpcapd_recv_msg_header(SOCKET sock, struct rpcap_header *headerp)
{
int nread;
char errbuf[PCAP_ERRBUF_SIZE]; // buffer for network errors
nread = sock_recv(sock, (char *) headerp, sizeof(struct rpcap_header),
SOCK_RECEIVEALL_YES|SOCK_EOF_ISNT_ERROR, errbuf, PCAP_ERRBUF_SIZE);
if (nread == -1)
{
// Network error.
rpcapd_log(LOGPRIO_ERROR, "Read from client failed: %s", errbuf);
return -1;
}
if (nread == 0)
{
// Immediate EOF; that's treated like a close message.
return -2;
}
headerp->plen = ntohl(headerp->plen);
return 0;
}
/*
* Read data from a message.
* If we're trying to read more data that remains, puts an error
* message into errmsgbuf and returns -2. Otherwise, tries to read
* the data and, if that succeeds, subtracts the amount read from
* the number of bytes of data that remains.
* Returns 0 on success, logs a message and returns -1 on a network
* error.
*/
static int
rpcapd_recv(SOCKET sock, char *buffer, size_t toread, uint32 *plen, char *errmsgbuf)
{
int nread;
char errbuf[PCAP_ERRBUF_SIZE]; // buffer for network errors
if (toread > *plen)
{
// Tell the client and continue.
pcap_snprintf(errmsgbuf, PCAP_ERRBUF_SIZE, "Message payload is too short");
return -2;
}
nread = sock_recv(sock, buffer, toread,
SOCK_RECEIVEALL_YES|SOCK_EOF_IS_ERROR, errbuf, PCAP_ERRBUF_SIZE);
if (nread == -1)
{
rpcapd_log(LOGPRIO_ERROR, "Read from client failed: %s", errbuf);
return -1;
}
*plen -= nread;
return 0;
}
/*
* Discard data from a connection.
* Mostly used to discard wrong-sized messages.
* Returns 0 on success, logs a message and returns -1 on a network
* error.
*/
static int
rpcapd_discard(SOCKET sock, uint32 len)
{
char errbuf[PCAP_ERRBUF_SIZE + 1]; // keeps the error string, prior to be printed
if (len != 0)
{
if (sock_discard(sock, len, errbuf, PCAP_ERRBUF_SIZE) == -1)
{
// Network error.
rpcapd_log(LOGPRIO_ERROR, "Read from client failed: %s", errbuf);
return -1;
}
}
return 0;
}
//
// Shut down any packet-capture thread associated with the session,
// close the SSL handle for the data socket if we have one, close
// the data socket if we have one, and close the underlying packet
// capture handle if we have one.
//
// We do not, of course, touch the controlling socket that's also
// copied into the session, as the service loop might still use it.
//
static void session_close(struct session *session)
{
if (session->have_thread)
{
//
// Tell the data connection thread main capture loop to
// break out of that loop.
//
// This may be sufficient to wake up a blocked thread,
// but it's not guaranteed to be sufficient.
//
pcap_breakloop(session->fp);
#ifdef _WIN32
//
// Set the event on which a read would block, so that,
// if it's currently blocked waiting for packets to
// arrive, it'll wake up, so it can see the "break
// out of the loop" indication. (pcap_breakloop()
// might do this, but older versions don't. Setting
// it twice should, at worst, cause an extra wakeup,
// which shouldn't be a problem.)
//
// XXX - what about modules other than NPF?
//
SetEvent(pcap_getevent(session->fp));
//
// Wait for the thread to exit, so we don't close
// sockets out from under it.
//
// XXX - have a timeout, so we don't wait forever?
//
WaitForSingleObject(session->thread, INFINITE);
//
// Release the thread handle, as we're done with
// it.
//
CloseHandle(session->thread);
session->have_thread = 0;
session->thread = INVALID_HANDLE_VALUE;
#else
//
// Send a SIGUSR1 signal to the thread, so that, if
// it's currently blocked waiting for packets to arrive,
// it'll wake up (we've turned off SA_RESTART for
// SIGUSR1, so that the system call in which it's blocked
// should return EINTR rather than restarting).
//
pthread_kill(session->thread, SIGUSR1);
//
// Wait for the thread to exit, so we don't close
// sockets out from under it.
//
// XXX - have a timeout, so we don't wait forever?
//
pthread_join(session->thread, NULL);
session->have_thread = 0;
memset(&session->thread, 0, sizeof(session->thread));
#endif
}
if (session->sockdata != INVALID_SOCKET)
{
sock_close(session->sockdata, NULL, 0);
session->sockdata = INVALID_SOCKET;
}
if (session->fp)
{
pcap_close(session->fp);
session->fp = NULL;
}
}
// Check whether a capture source string is a URL or not.
// This includes URLs that refer to a local device; a scheme, followed
// by ://, followed by *another* scheme and ://, is just silly, and
// anybody who supplies that will get an error.
//
static int
is_url(const char *source)
{
char *colonp;
/*
* RFC 3986 says:
*
* URI = scheme ":" hier-part [ "?" query ] [ "#" fragment ]
*
* hier-part = "//" authority path-abempty
* / path-absolute
* / path-rootless
* / path-empty
*
* authority = [ userinfo "@" ] host [ ":" port ]
*
* userinfo = *( unreserved / pct-encoded / sub-delims / ":" )
*
* Step 1: look for the ":" at the end of the scheme.
* A colon in the source is *NOT* sufficient to indicate that
* this is a URL, as interface names on some platforms might
* include colons (e.g., I think some Solaris interfaces
* might).
*/
colonp = strchr(source, ':');
if (colonp == NULL)
{
/*
* The source is the device to open. It's not a URL.
*/
return (0);
}
/*
* All schemes must have "//" after them, i.e. we only support
* hier-part = "//" authority path-abempty, not
* hier-part = path-absolute
* hier-part = path-rootless
* hier-part = path-empty
*
* We need that in order to distinguish between a local device
* name that happens to contain a colon and a URI.
*/
if (strncmp(colonp + 1, "//", 2) != 0)
{
/*
* The source is the device to open. It's not a URL.
*/
return (0);
}
/*
* It's a URL.
*/
return (1);
}
| ./CrossVul/dataset_final_sorted/CWE-20/c/good_1022_0 |
crossvul-cpp_data_bad_4716_2 | /*
* verify.c:
*
* Author:
* Mono Project (http://www.mono-project.com)
*
* Copyright 2001-2003 Ximian, Inc (http://www.ximian.com)
* Copyright 2004-2009 Novell, Inc (http://www.novell.com)
*/
#include <config.h>
#include <mono/metadata/object-internals.h>
#include <mono/metadata/verify.h>
#include <mono/metadata/verify-internals.h>
#include <mono/metadata/opcodes.h>
#include <mono/metadata/tabledefs.h>
#include <mono/metadata/reflection.h>
#include <mono/metadata/debug-helpers.h>
#include <mono/metadata/mono-endian.h>
#include <mono/metadata/metadata.h>
#include <mono/metadata/metadata-internals.h>
#include <mono/metadata/class-internals.h>
#include <mono/metadata/security-manager.h>
#include <mono/metadata/security-core-clr.h>
#include <mono/metadata/tokentype.h>
#include <mono/metadata/mono-basic-block.h>
#include <mono/utils/monobitset.h>
#include <string.h>
#include <signal.h>
#include <ctype.h>
static MiniVerifierMode verifier_mode = MONO_VERIFIER_MODE_OFF;
static gboolean verify_all = FALSE;
/*
* Set the desired level of checks for the verfier.
*
*/
void
mono_verifier_set_mode (MiniVerifierMode mode)
{
verifier_mode = mode;
}
void
mono_verifier_enable_verify_all ()
{
verify_all = TRUE;
}
#ifndef DISABLE_VERIFIER
/*
* Pull the list of opcodes
*/
#define OPDEF(a,b,c,d,e,f,g,h,i,j) \
a = i,
enum {
#include "mono/cil/opcode.def"
LAST = 0xff
};
#undef OPDEF
#ifdef MONO_VERIFIER_DEBUG
#define VERIFIER_DEBUG(code) do { code } while (0)
#else
#define VERIFIER_DEBUG(code)
#endif
//////////////////////////////////////////////////////////////////
#define IS_STRICT_MODE(ctx) (((ctx)->level & MONO_VERIFY_NON_STRICT) == 0)
#define IS_FAIL_FAST_MODE(ctx) (((ctx)->level & MONO_VERIFY_FAIL_FAST) == MONO_VERIFY_FAIL_FAST)
#define IS_SKIP_VISIBILITY(ctx) (((ctx)->level & MONO_VERIFY_SKIP_VISIBILITY) == MONO_VERIFY_SKIP_VISIBILITY)
#define IS_REPORT_ALL_ERRORS(ctx) (((ctx)->level & MONO_VERIFY_REPORT_ALL_ERRORS) == MONO_VERIFY_REPORT_ALL_ERRORS)
#define CLEAR_PREFIX(ctx, prefix) do { (ctx)->prefix_set &= ~(prefix); } while (0)
#define ADD_VERIFY_INFO(__ctx, __msg, __status, __exception) \
do { \
MonoVerifyInfoExtended *vinfo = g_new (MonoVerifyInfoExtended, 1); \
vinfo->info.status = __status; \
vinfo->info.message = ( __msg ); \
vinfo->exception_type = (__exception); \
(__ctx)->list = g_slist_prepend ((__ctx)->list, vinfo); \
} while (0)
//TODO support MONO_VERIFY_REPORT_ALL_ERRORS
#define ADD_VERIFY_ERROR(__ctx, __msg) \
do { \
ADD_VERIFY_INFO(__ctx, __msg, MONO_VERIFY_ERROR, MONO_EXCEPTION_INVALID_PROGRAM); \
(__ctx)->valid = 0; \
} while (0)
#define CODE_NOT_VERIFIABLE(__ctx, __msg) \
do { \
if ((__ctx)->verifiable || IS_REPORT_ALL_ERRORS (__ctx)) { \
ADD_VERIFY_INFO(__ctx, __msg, MONO_VERIFY_NOT_VERIFIABLE, MONO_EXCEPTION_UNVERIFIABLE_IL); \
(__ctx)->verifiable = 0; \
if (IS_FAIL_FAST_MODE (__ctx)) \
(__ctx)->valid = 0; \
} \
} while (0)
#define ADD_VERIFY_ERROR2(__ctx, __msg, __exception) \
do { \
ADD_VERIFY_INFO(__ctx, __msg, MONO_VERIFY_ERROR, __exception); \
(__ctx)->valid = 0; \
} while (0)
#define CODE_NOT_VERIFIABLE2(__ctx, __msg, __exception) \
do { \
if ((__ctx)->verifiable || IS_REPORT_ALL_ERRORS (__ctx)) { \
ADD_VERIFY_INFO(__ctx, __msg, MONO_VERIFY_NOT_VERIFIABLE, __exception); \
(__ctx)->verifiable = 0; \
if (IS_FAIL_FAST_MODE (__ctx)) \
(__ctx)->valid = 0; \
} \
} while (0)
#define CHECK_ADD4_OVERFLOW_UN(a, b) ((guint32)(0xFFFFFFFFU) - (guint32)(b) < (guint32)(a))
#define CHECK_ADD8_OVERFLOW_UN(a, b) ((guint64)(0xFFFFFFFFFFFFFFFFUL) - (guint64)(b) < (guint64)(a))
#if SIZEOF_VOID_P == 4
#define CHECK_ADDP_OVERFLOW_UN(a,b) CHECK_ADD4_OVERFLOW_UN(a, b)
#else
#define CHECK_ADDP_OVERFLOW_UN(a,b) CHECK_ADD8_OVERFLOW_UN(a, b)
#endif
#define ADDP_IS_GREATER_OR_OVF(a, b, c) (((a) + (b) > (c)) || CHECK_ADDP_OVERFLOW_UN (a, b))
#define ADD_IS_GREATER_OR_OVF(a, b, c) (((a) + (b) > (c)) || CHECK_ADD4_OVERFLOW_UN (a, b))
/*Flags to be used with ILCodeDesc::flags */
enum {
/*Instruction has not been processed.*/
IL_CODE_FLAG_NOT_PROCESSED = 0,
/*Instruction was decoded by mono_method_verify loop.*/
IL_CODE_FLAG_SEEN = 1,
/*Instruction was target of a branch or is at a protected block boundary.*/
IL_CODE_FLAG_WAS_TARGET = 2,
/*Used by stack_init to avoid double initialize each entry.*/
IL_CODE_FLAG_STACK_INITED = 4,
/*Used by merge_stacks to decide if it should just copy the eval stack.*/
IL_CODE_STACK_MERGED = 8,
/*This instruction is part of the delegate construction sequence, it cannot be target of a branch.*/
IL_CODE_DELEGATE_SEQUENCE = 0x10,
/*This is a delegate created from a ldftn to a non final virtual method*/
IL_CODE_LDFTN_DELEGATE_NONFINAL_VIRTUAL = 0x20,
/*This is a call to a non final virtual method*/
IL_CODE_CALL_NONFINAL_VIRTUAL = 0x40,
};
typedef enum {
RESULT_VALID,
RESULT_UNVERIFIABLE,
RESULT_INVALID
} verify_result_t;
typedef struct {
MonoType *type;
int stype;
MonoMethod *method;
} ILStackDesc;
typedef struct {
ILStackDesc *stack;
guint16 size;
guint16 flags;
} ILCodeDesc;
typedef struct {
int max_args;
int max_stack;
int verifiable;
int valid;
int level;
int code_size;
ILCodeDesc *code;
ILCodeDesc eval;
MonoType **params;
GSList *list;
/*Allocated fnptr MonoType that should be freed by us.*/
GSList *funptrs;
/*Type dup'ed exception types from catch blocks.*/
GSList *exception_types;
int num_locals;
MonoType **locals;
/*TODO get rid of target here, need_merge in mono_method_verify and hoist the merging code in the branching code*/
int target;
guint32 ip_offset;
MonoMethodSignature *signature;
MonoMethodHeader *header;
MonoGenericContext *generic_context;
MonoImage *image;
MonoMethod *method;
/*This flag helps solving a corner case of delegate verification in that you cannot have a "starg 0"
*on a method that creates a delegate for a non-final virtual method using ldftn*/
gboolean has_this_store;
/*This flag is used to control if the contructor of the parent class has been called.
*If the this pointer is pushed on the eval stack and it's a reference type constructor and
* super_ctor_called is false, the uninitialized flag is set on the pushed value.
*
* Poping an uninitialized this ptr from the eval stack is an unverifiable operation unless
* the safe variant is used. Only a few opcodes can use it : dup, pop, ldfld, stfld and call to a constructor.
*/
gboolean super_ctor_called;
guint32 prefix_set;
gboolean has_flags;
MonoType *constrained_type;
} VerifyContext;
static void
merge_stacks (VerifyContext *ctx, ILCodeDesc *from, ILCodeDesc *to, gboolean start, gboolean external);
static int
get_stack_type (MonoType *type);
static gboolean
mono_delegate_signature_equal (MonoMethodSignature *delegate_sig, MonoMethodSignature *method_sig, gboolean is_static_ldftn);
static gboolean
mono_class_is_valid_generic_instantiation (VerifyContext *ctx, MonoClass *klass);
static gboolean
mono_method_is_valid_generic_instantiation (VerifyContext *ctx, MonoMethod *method);
//////////////////////////////////////////////////////////////////
enum {
TYPE_INV = 0, /* leave at 0. */
TYPE_I4 = 1,
TYPE_I8 = 2,
TYPE_NATIVE_INT = 3,
TYPE_R8 = 4,
/* Used by operator tables to resolve pointer types (managed & unmanaged) and by unmanaged pointer types*/
TYPE_PTR = 5,
/* value types and classes */
TYPE_COMPLEX = 6,
/* Number of types, used to define the size of the tables*/
TYPE_MAX = 6,
/* Used by tables to signal that a result is not verifiable*/
NON_VERIFIABLE_RESULT = 0x80,
/*Mask used to extract just the type, excluding flags */
TYPE_MASK = 0x0F,
/* The stack type is a managed pointer, unmask the value to res */
POINTER_MASK = 0x100,
/*Stack type with the pointer mask*/
RAW_TYPE_MASK = 0x10F,
/* Controlled Mutability Manager Pointer */
CMMP_MASK = 0x200,
/* The stack type is a null literal*/
NULL_LITERAL_MASK = 0x400,
/**Used by ldarg.0 and family to let delegate verification happens.*/
THIS_POINTER_MASK = 0x800,
/**Signals that this is a boxed value type*/
BOXED_MASK = 0x1000,
/*This is an unitialized this ref*/
UNINIT_THIS_MASK = 0x2000,
};
static const char* const
type_names [TYPE_MAX + 1] = {
"Invalid",
"Int32",
"Int64",
"Native Int",
"Float64",
"Native Pointer",
"Complex"
};
enum {
PREFIX_UNALIGNED = 1,
PREFIX_VOLATILE = 2,
PREFIX_TAIL = 4,
PREFIX_CONSTRAINED = 8,
PREFIX_READONLY = 16
};
//////////////////////////////////////////////////////////////////
/*Token validation macros and functions */
#define IS_MEMBER_REF(token) (mono_metadata_token_table (token) == MONO_TABLE_MEMBERREF)
#define IS_METHOD_DEF(token) (mono_metadata_token_table (token) == MONO_TABLE_METHOD)
#define IS_METHOD_SPEC(token) (mono_metadata_token_table (token) == MONO_TABLE_METHODSPEC)
#define IS_FIELD_DEF(token) (mono_metadata_token_table (token) == MONO_TABLE_FIELD)
#define IS_TYPE_REF(token) (mono_metadata_token_table (token) == MONO_TABLE_TYPEREF)
#define IS_TYPE_DEF(token) (mono_metadata_token_table (token) == MONO_TABLE_TYPEDEF)
#define IS_TYPE_SPEC(token) (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC)
#define IS_METHOD_DEF_OR_REF_OR_SPEC(token) (IS_METHOD_DEF (token) || IS_MEMBER_REF (token) || IS_METHOD_SPEC (token))
#define IS_TYPE_DEF_OR_REF_OR_SPEC(token) (IS_TYPE_DEF (token) || IS_TYPE_REF (token) || IS_TYPE_SPEC (token))
#define IS_FIELD_DEF_OR_REF(token) (IS_FIELD_DEF (token) || IS_MEMBER_REF (token))
/*
* Verify if @token refers to a valid row on int's table.
*/
static gboolean
token_bounds_check (MonoImage *image, guint32 token)
{
if (image->dynamic)
return mono_reflection_is_valid_dynamic_token ((MonoDynamicImage*)image, token);
return image->tables [mono_metadata_token_table (token)].rows >= mono_metadata_token_index (token);
}
static MonoType *
mono_type_create_fnptr_from_mono_method (VerifyContext *ctx, MonoMethod *method)
{
MonoType *res = g_new0 (MonoType, 1);
//FIXME use mono_method_get_signature_full
res->data.method = mono_method_signature (method);
res->type = MONO_TYPE_FNPTR;
ctx->funptrs = g_slist_prepend (ctx->funptrs, res);
return res;
}
/*
* mono_type_is_enum_type:
*
* Returns TRUE if @type is an enum type.
*/
static gboolean
mono_type_is_enum_type (MonoType *type)
{
if (type->type == MONO_TYPE_VALUETYPE && type->data.klass->enumtype)
return TRUE;
if (type->type == MONO_TYPE_GENERICINST && type->data.generic_class->container_class->enumtype)
return TRUE;
return FALSE;
}
/*
* mono_type_is_value_type:
*
* Returns TRUE if @type is named after @namespace.@name.
*
*/
static gboolean
mono_type_is_value_type (MonoType *type, const char *namespace, const char *name)
{
return type->type == MONO_TYPE_VALUETYPE &&
!strcmp (namespace, type->data.klass->name_space) &&
!strcmp (name, type->data.klass->name);
}
/*
* Returns TURE if @type is VAR or MVAR
*/
static gboolean
mono_type_is_generic_argument (MonoType *type)
{
return type->type == MONO_TYPE_VAR || type->type == MONO_TYPE_MVAR;
}
/*
* mono_type_get_underlying_type_any:
*
* This functions is just like mono_type_get_underlying_type but it doesn't care if the type is byref.
*
* Returns the underlying type of @type regardless if it is byref or not.
*/
static MonoType*
mono_type_get_underlying_type_any (MonoType *type)
{
if (type->type == MONO_TYPE_VALUETYPE && type->data.klass->enumtype)
return mono_class_enum_basetype (type->data.klass);
if (type->type == MONO_TYPE_GENERICINST && type->data.generic_class->container_class->enumtype)
return mono_class_enum_basetype (type->data.generic_class->container_class);
return type;
}
static const char*
mono_type_get_stack_name (MonoType *type)
{
return type_names [get_stack_type (type) & TYPE_MASK];
}
#define CTOR_REQUIRED_FLAGS (METHOD_ATTRIBUTE_SPECIAL_NAME | METHOD_ATTRIBUTE_RT_SPECIAL_NAME)
#define CTOR_INVALID_FLAGS (METHOD_ATTRIBUTE_STATIC)
static gboolean
mono_method_is_constructor (MonoMethod *method)
{
return ((method->flags & CTOR_REQUIRED_FLAGS) == CTOR_REQUIRED_FLAGS &&
!(method->flags & CTOR_INVALID_FLAGS) &&
!strcmp (".ctor", method->name));
}
static gboolean
mono_class_has_default_constructor (MonoClass *klass)
{
MonoMethod *method;
int i;
mono_class_setup_methods (klass);
if (klass->exception_type)
return FALSE;
for (i = 0; i < klass->method.count; ++i) {
method = klass->methods [i];
if (mono_method_is_constructor (method) &&
mono_method_signature (method) &&
mono_method_signature (method)->param_count == 0 &&
(method->flags & METHOD_ATTRIBUTE_MEMBER_ACCESS_MASK) == METHOD_ATTRIBUTE_PUBLIC)
return TRUE;
}
return FALSE;
}
static gboolean
mono_class_interface_implements_interface (MonoClass *candidate, MonoClass *iface)
{
MonoError error;
int i;
do {
if (candidate == iface)
return TRUE;
mono_class_setup_interfaces (candidate, &error);
if (!mono_error_ok (&error)) {
mono_error_cleanup (&error);
return FALSE;
}
for (i = 0; i < candidate->interface_count; ++i) {
if (candidate->interfaces [i] == iface || mono_class_interface_implements_interface (candidate->interfaces [i], iface))
return TRUE;
}
candidate = candidate->parent;
} while (candidate);
return FALSE;
}
/*
* Verify if @type is valid for the given @ctx verification context.
* this function checks for VAR and MVAR types that are invalid under the current verifier,
*/
static gboolean
mono_type_is_valid_type_in_context (MonoType *type, MonoGenericContext *context)
{
int i;
MonoGenericInst *inst;
switch (type->type) {
case MONO_TYPE_VAR:
case MONO_TYPE_MVAR:
if (!context)
return FALSE;
inst = type->type == MONO_TYPE_VAR ? context->class_inst : context->method_inst;
if (!inst || mono_type_get_generic_param_num (type) >= inst->type_argc)
return FALSE;
break;
case MONO_TYPE_SZARRAY:
return mono_type_is_valid_type_in_context (&type->data.klass->byval_arg, context);
case MONO_TYPE_ARRAY:
return mono_type_is_valid_type_in_context (&type->data.array->eklass->byval_arg, context);
case MONO_TYPE_PTR:
return mono_type_is_valid_type_in_context (type->data.type, context);
case MONO_TYPE_GENERICINST:
inst = type->data.generic_class->context.class_inst;
if (!inst->is_open)
break;
for (i = 0; i < inst->type_argc; ++i)
if (!mono_type_is_valid_type_in_context (inst->type_argv [i], context))
return FALSE;
break;
case MONO_TYPE_CLASS:
case MONO_TYPE_VALUETYPE: {
MonoClass *klass = type->data.klass;
/*
* It's possible to encode generic'sh types in such a way that they disguise themselves as class or valuetype.
* Fixing the type decoding is really tricky since under some cases this behavior is needed, for example, to
* have a 'class' type pointing to a 'genericinst' class.
*
* For the runtime these non canonical (weird) encodings work fine, they worst they can cause is some
* reflection oddities which are harmless - to security at least.
*/
if (klass->byval_arg.type != type->type)
return mono_type_is_valid_type_in_context (&klass->byval_arg, context);
break;
}
}
return TRUE;
}
/*This function returns NULL if the type is not instantiatable*/
static MonoType*
verifier_inflate_type (VerifyContext *ctx, MonoType *type, MonoGenericContext *context)
{
MonoError error;
MonoType *result;
result = mono_class_inflate_generic_type_checked (type, context, &error);
if (!mono_error_ok (&error)) {
mono_error_cleanup (&error);
return NULL;
}
return result;
}
/*
* Test if @candidate is a subtype of @target using the minimal possible information
* TODO move the code for non finished TypeBuilders to here.
*/
static gboolean
mono_class_is_constraint_compatible (MonoClass *candidate, MonoClass *target)
{
if (candidate == target)
return TRUE;
if (target == mono_defaults.object_class)
return TRUE;
//setup_supertypes don't mono_class_init anything
mono_class_setup_supertypes (candidate);
mono_class_setup_supertypes (target);
if (mono_class_has_parent (candidate, target))
return TRUE;
//if target is not a supertype it must be an interface
if (!MONO_CLASS_IS_INTERFACE (target))
return FALSE;
if (candidate->image->dynamic && !candidate->wastypebuilder) {
MonoReflectionTypeBuilder *tb = candidate->reflection_info;
int j;
if (tb->interfaces) {
for (j = mono_array_length (tb->interfaces) - 1; j >= 0; --j) {
MonoReflectionType *iface = mono_array_get (tb->interfaces, MonoReflectionType*, j);
MonoClass *ifaceClass = mono_class_from_mono_type (iface->type);
if (mono_class_is_constraint_compatible (ifaceClass, target)) {
return TRUE;
}
}
}
return FALSE;
}
return mono_class_interface_implements_interface (candidate, target);
}
static gboolean
is_valid_generic_instantiation (MonoGenericContainer *gc, MonoGenericContext *context, MonoGenericInst *ginst)
{
MonoError error;
int i;
if (ginst->type_argc != gc->type_argc)
return FALSE;
for (i = 0; i < gc->type_argc; ++i) {
MonoGenericParamInfo *param_info = mono_generic_container_get_param_info (gc, i);
MonoClass *paramClass;
MonoClass **constraints;
if (!param_info->constraints && !(param_info->flags & GENERIC_PARAMETER_ATTRIBUTE_SPECIAL_CONSTRAINTS_MASK))
continue;
if (mono_type_is_generic_argument (ginst->type_argv [i]))
continue; //it's not our job to validate type variables
paramClass = mono_class_from_mono_type (ginst->type_argv [i]);
if (paramClass->exception_type != MONO_EXCEPTION_NONE)
return FALSE;
/*it's not safe to call mono_class_init from here*/
if (paramClass->generic_class && !paramClass->inited) {
if (!mono_class_is_valid_generic_instantiation (NULL, paramClass))
return FALSE;
}
if ((param_info->flags & GENERIC_PARAMETER_ATTRIBUTE_VALUE_TYPE_CONSTRAINT) && (!paramClass->valuetype || mono_class_is_nullable (paramClass)))
return FALSE;
if ((param_info->flags & GENERIC_PARAMETER_ATTRIBUTE_REFERENCE_TYPE_CONSTRAINT) && paramClass->valuetype)
return FALSE;
if ((param_info->flags & GENERIC_PARAMETER_ATTRIBUTE_CONSTRUCTOR_CONSTRAINT) && !paramClass->valuetype && !mono_class_has_default_constructor (paramClass))
return FALSE;
if (!param_info->constraints)
continue;
for (constraints = param_info->constraints; *constraints; ++constraints) {
MonoClass *ctr = *constraints;
MonoType *inflated;
inflated = mono_class_inflate_generic_type_checked (&ctr->byval_arg, context, &error);
if (!mono_error_ok (&error)) {
mono_error_cleanup (&error);
return FALSE;
}
ctr = mono_class_from_mono_type (inflated);
mono_metadata_free_type (inflated);
if (!mono_class_is_constraint_compatible (paramClass, ctr))
return FALSE;
}
}
return TRUE;
}
/*
* Return true if @candidate is constraint compatible with @target.
*
* This means that @candidate constraints are a super set of @target constaints
*/
static gboolean
mono_generic_param_is_constraint_compatible (VerifyContext *ctx, MonoGenericParam *target, MonoGenericParam *candidate, MonoClass *candidate_param_class, MonoGenericContext *context)
{
MonoGenericParamInfo *tinfo = mono_generic_param_info (target);
MonoGenericParamInfo *cinfo = mono_generic_param_info (candidate);
int tmask = tinfo->flags & GENERIC_PARAMETER_ATTRIBUTE_SPECIAL_CONSTRAINTS_MASK;
int cmask = cinfo->flags & GENERIC_PARAMETER_ATTRIBUTE_SPECIAL_CONSTRAINTS_MASK;
if ((tmask & cmask) != tmask)
return FALSE;
if (tinfo->constraints) {
MonoClass **target_class, **candidate_class;
for (target_class = tinfo->constraints; *target_class; ++target_class) {
MonoClass *tc;
MonoType *inflated = verifier_inflate_type (ctx, &(*target_class)->byval_arg, context);
if (!inflated)
return FALSE;
tc = mono_class_from_mono_type (inflated);
mono_metadata_free_type (inflated);
/*
* A constraint from @target might inflate into @candidate itself and in that case we don't need
* check it's constraints since it satisfy the constraint by itself.
*/
if (mono_metadata_type_equal (&tc->byval_arg, &candidate_param_class->byval_arg))
continue;
if (!cinfo->constraints)
return FALSE;
for (candidate_class = cinfo->constraints; *candidate_class; ++candidate_class) {
MonoClass *cc;
inflated = verifier_inflate_type (ctx, &(*candidate_class)->byval_arg, ctx->generic_context);
if (!inflated)
return FALSE;
cc = mono_class_from_mono_type (inflated);
mono_metadata_free_type (inflated);
if (mono_class_is_assignable_from (tc, cc))
break;
}
if (!*candidate_class)
return FALSE;
}
}
return TRUE;
}
static MonoGenericParam*
verifier_get_generic_param_from_type (VerifyContext *ctx, MonoType *type)
{
MonoGenericContainer *gc;
MonoMethod *method = ctx->method;
int num;
num = mono_type_get_generic_param_num (type);
if (type->type == MONO_TYPE_VAR) {
MonoClass *gtd = method->klass;
if (gtd->generic_class)
gtd = gtd->generic_class->container_class;
gc = gtd->generic_container;
} else { //MVAR
MonoMethod *gmd = method;
if (method->is_inflated)
gmd = ((MonoMethodInflated*)method)->declaring;
gc = mono_method_get_generic_container (gmd);
}
if (!gc)
return FALSE;
return mono_generic_container_get_param (gc, num);
}
/*
* Verify if @type is valid for the given @ctx verification context.
* this function checks for VAR and MVAR types that are invalid under the current verifier,
* This means that it either
*/
static gboolean
is_valid_type_in_context (VerifyContext *ctx, MonoType *type)
{
return mono_type_is_valid_type_in_context (type, ctx->generic_context);
}
static gboolean
is_valid_generic_instantiation_in_context (VerifyContext *ctx, MonoGenericInst *ginst)
{
int i;
for (i = 0; i < ginst->type_argc; ++i) {
MonoType *type = ginst->type_argv [i];
if (!is_valid_type_in_context (ctx, type))
return FALSE;
}
return TRUE;
}
static gboolean
generic_arguments_respect_constraints (VerifyContext *ctx, MonoGenericContainer *gc, MonoGenericContext *context, MonoGenericInst *ginst)
{
int i;
for (i = 0; i < ginst->type_argc; ++i) {
MonoType *type = ginst->type_argv [i];
MonoGenericParam *target = mono_generic_container_get_param (gc, i);
MonoGenericParam *candidate;
MonoClass *candidate_class;
if (!mono_type_is_generic_argument (type))
continue;
if (!is_valid_type_in_context (ctx, type))
return FALSE;
candidate = verifier_get_generic_param_from_type (ctx, type);
candidate_class = mono_class_from_mono_type (type);
if (!mono_generic_param_is_constraint_compatible (ctx, target, candidate, candidate_class, context))
return FALSE;
}
return TRUE;
}
static gboolean
mono_method_repect_method_constraints (VerifyContext *ctx, MonoMethod *method)
{
MonoMethodInflated *gmethod = (MonoMethodInflated *)method;
MonoGenericInst *ginst = gmethod->context.method_inst;
MonoGenericContainer *gc = mono_method_get_generic_container (gmethod->declaring);
return !gc || generic_arguments_respect_constraints (ctx, gc, &gmethod->context, ginst);
}
static gboolean
mono_class_repect_method_constraints (VerifyContext *ctx, MonoClass *klass)
{
MonoGenericClass *gklass = klass->generic_class;
MonoGenericInst *ginst = gklass->context.class_inst;
MonoGenericContainer *gc = gklass->container_class->generic_container;
return !gc || generic_arguments_respect_constraints (ctx, gc, &gklass->context, ginst);
}
static gboolean
mono_method_is_valid_generic_instantiation (VerifyContext *ctx, MonoMethod *method)
{
MonoMethodInflated *gmethod = (MonoMethodInflated *)method;
MonoGenericInst *ginst = gmethod->context.method_inst;
MonoGenericContainer *gc = mono_method_get_generic_container (gmethod->declaring);
if (!gc) /*non-generic inflated method - it's part of a generic type */
return TRUE;
if (ctx && !is_valid_generic_instantiation_in_context (ctx, ginst))
return FALSE;
return is_valid_generic_instantiation (gc, &gmethod->context, ginst);
}
static gboolean
mono_class_is_valid_generic_instantiation (VerifyContext *ctx, MonoClass *klass)
{
MonoGenericClass *gklass = klass->generic_class;
MonoGenericInst *ginst = gklass->context.class_inst;
MonoGenericContainer *gc = gklass->container_class->generic_container;
if (ctx && !is_valid_generic_instantiation_in_context (ctx, ginst))
return FALSE;
return is_valid_generic_instantiation (gc, &gklass->context, ginst);
}
static gboolean
mono_type_is_valid_in_context (VerifyContext *ctx, MonoType *type)
{
MonoClass *klass;
if (type == NULL) {
ADD_VERIFY_ERROR2 (ctx, g_strdup_printf ("Invalid null type at 0x%04x", ctx->ip_offset), MONO_EXCEPTION_BAD_IMAGE);
return FALSE;
}
if (!is_valid_type_in_context (ctx, type)) {
char *str = mono_type_full_name (type);
ADD_VERIFY_ERROR2 (ctx, g_strdup_printf ("Invalid generic type (%s%s) (argument out of range or %s is not generic) at 0x%04x",
type->type == MONO_TYPE_VAR ? "!" : "!!",
str,
type->type == MONO_TYPE_VAR ? "class" : "method",
ctx->ip_offset),
MONO_EXCEPTION_BAD_IMAGE);
g_free (str);
return FALSE;
}
klass = mono_class_from_mono_type (type);
mono_class_init (klass);
if (mono_loader_get_last_error () || klass->exception_type != MONO_EXCEPTION_NONE) {
if (klass->generic_class && !mono_class_is_valid_generic_instantiation (NULL, klass))
ADD_VERIFY_ERROR2 (ctx, g_strdup_printf ("Invalid generic instantiation of type %s.%s at 0x%04x", klass->name_space, klass->name, ctx->ip_offset), MONO_EXCEPTION_TYPE_LOAD);
else
ADD_VERIFY_ERROR2 (ctx, g_strdup_printf ("Could not load type %s.%s at 0x%04x", klass->name_space, klass->name, ctx->ip_offset), MONO_EXCEPTION_TYPE_LOAD);
return FALSE;
}
if (klass->generic_class && klass->generic_class->container_class->exception_type != MONO_EXCEPTION_NONE) {
ADD_VERIFY_ERROR2 (ctx, g_strdup_printf ("Could not load type %s.%s at 0x%04x", klass->name_space, klass->name, ctx->ip_offset), MONO_EXCEPTION_TYPE_LOAD);
return FALSE;
}
if (!klass->generic_class)
return TRUE;
if (!mono_class_is_valid_generic_instantiation (ctx, klass)) {
ADD_VERIFY_ERROR2 (ctx, g_strdup_printf ("Invalid generic type instantiation of type %s.%s at 0x%04x", klass->name_space, klass->name, ctx->ip_offset), MONO_EXCEPTION_TYPE_LOAD);
return FALSE;
}
if (!mono_class_repect_method_constraints (ctx, klass)) {
ADD_VERIFY_ERROR2 (ctx, g_strdup_printf ("Invalid generic type instantiation of type %s.%s (generic args don't respect target's constraints) at 0x%04x", klass->name_space, klass->name, ctx->ip_offset), MONO_EXCEPTION_TYPE_LOAD);
return FALSE;
}
return TRUE;
}
static verify_result_t
mono_method_is_valid_in_context (VerifyContext *ctx, MonoMethod *method)
{
if (!mono_type_is_valid_in_context (ctx, &method->klass->byval_arg))
return RESULT_INVALID;
if (!method->is_inflated)
return RESULT_VALID;
if (!mono_method_is_valid_generic_instantiation (ctx, method)) {
ADD_VERIFY_ERROR2 (ctx, g_strdup_printf ("Invalid generic method instantiation of method %s.%s::%s at 0x%04x", method->klass->name_space, method->klass->name, method->name, ctx->ip_offset), MONO_EXCEPTION_UNVERIFIABLE_IL);
return RESULT_INVALID;
}
if (!mono_method_repect_method_constraints (ctx, method)) {
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid generic method instantiation of method %s.%s::%s (generic args don't respect target's constraints) at 0x%04x", method->klass->name_space, method->klass->name, method->name, ctx->ip_offset));
return RESULT_UNVERIFIABLE;
}
return RESULT_VALID;
}
static MonoClassField*
verifier_load_field (VerifyContext *ctx, int token, MonoClass **out_klass, const char *opcode) {
MonoClassField *field;
MonoClass *klass = NULL;
if (!IS_FIELD_DEF_OR_REF (token) || !token_bounds_check (ctx->image, token)) {
ADD_VERIFY_ERROR2 (ctx, g_strdup_printf ("Invalid field token 0x%08x for %s at 0x%04x", token, opcode, ctx->ip_offset), MONO_EXCEPTION_BAD_IMAGE);
return NULL;
}
field = mono_field_from_token (ctx->image, token, &klass, ctx->generic_context);
if (!field || !field->parent || !klass) {
ADD_VERIFY_ERROR2 (ctx, g_strdup_printf ("Cannot load field from token 0x%08x for %s at 0x%04x", token, opcode, ctx->ip_offset), MONO_EXCEPTION_BAD_IMAGE);
return NULL;
}
if (!mono_type_is_valid_in_context (ctx, &klass->byval_arg))
return NULL;
*out_klass = klass;
return field;
}
static MonoMethod*
verifier_load_method (VerifyContext *ctx, int token, const char *opcode) {
MonoMethod* method;
if (!IS_METHOD_DEF_OR_REF_OR_SPEC (token) || !token_bounds_check (ctx->image, token)) {
ADD_VERIFY_ERROR2 (ctx, g_strdup_printf ("Invalid method token 0x%08x for %s at 0x%04x", token, opcode, ctx->ip_offset), MONO_EXCEPTION_BAD_IMAGE);
return NULL;
}
method = mono_get_method_full (ctx->image, token, NULL, ctx->generic_context);
if (!method) {
ADD_VERIFY_ERROR2 (ctx, g_strdup_printf ("Cannot load method from token 0x%08x for %s at 0x%04x", token, opcode, ctx->ip_offset), MONO_EXCEPTION_BAD_IMAGE);
return NULL;
}
if (mono_method_is_valid_in_context (ctx, method) == RESULT_INVALID)
return NULL;
return method;
}
static MonoType*
verifier_load_type (VerifyContext *ctx, int token, const char *opcode) {
MonoType* type;
if (!IS_TYPE_DEF_OR_REF_OR_SPEC (token) || !token_bounds_check (ctx->image, token)) {
ADD_VERIFY_ERROR2 (ctx, g_strdup_printf ("Invalid type token 0x%08x at 0x%04x", token, ctx->ip_offset), MONO_EXCEPTION_BAD_IMAGE);
return NULL;
}
type = mono_type_get_full (ctx->image, token, ctx->generic_context);
if (!type) {
ADD_VERIFY_ERROR2 (ctx, g_strdup_printf ("Cannot load type from token 0x%08x for %s at 0x%04x", token, opcode, ctx->ip_offset), MONO_EXCEPTION_BAD_IMAGE);
return NULL;
}
if (!mono_type_is_valid_in_context (ctx, type))
return NULL;
return type;
}
/* stack_slot_get_type:
*
* Returns the stack type of @value. This value includes POINTER_MASK.
*
* Use this function to checks that account for a managed pointer.
*/
static gint32
stack_slot_get_type (ILStackDesc *value)
{
return value->stype & RAW_TYPE_MASK;
}
/* stack_slot_get_underlying_type:
*
* Returns the stack type of @value. This value does not include POINTER_MASK.
*
* Use this function is cases where the fact that the value could be a managed pointer is
* irrelevant. For example, field load doesn't care about this fact of type on stack.
*/
static gint32
stack_slot_get_underlying_type (ILStackDesc *value)
{
return value->stype & TYPE_MASK;
}
/* stack_slot_is_managed_pointer:
*
* Returns TRUE is @value is a managed pointer.
*/
static gboolean
stack_slot_is_managed_pointer (ILStackDesc *value)
{
return (value->stype & POINTER_MASK) == POINTER_MASK;
}
/* stack_slot_is_managed_mutability_pointer:
*
* Returns TRUE is @value is a managed mutability pointer.
*/
static G_GNUC_UNUSED gboolean
stack_slot_is_managed_mutability_pointer (ILStackDesc *value)
{
return (value->stype & CMMP_MASK) == CMMP_MASK;
}
/* stack_slot_is_null_literal:
*
* Returns TRUE is @value is the null literal.
*/
static gboolean
stack_slot_is_null_literal (ILStackDesc *value)
{
return (value->stype & NULL_LITERAL_MASK) == NULL_LITERAL_MASK;
}
/* stack_slot_is_this_pointer:
*
* Returns TRUE is @value is the this literal
*/
static gboolean
stack_slot_is_this_pointer (ILStackDesc *value)
{
return (value->stype & THIS_POINTER_MASK) == THIS_POINTER_MASK;
}
/* stack_slot_is_boxed_value:
*
* Returns TRUE is @value is a boxed value
*/
static gboolean
stack_slot_is_boxed_value (ILStackDesc *value)
{
return (value->stype & BOXED_MASK) == BOXED_MASK;
}
static const char *
stack_slot_get_name (ILStackDesc *value)
{
return type_names [value->stype & TYPE_MASK];
}
#define APPEND_WITH_PREDICATE(PRED,NAME) do {\
if (PRED (value)) { \
if (!first) \
g_string_append (str, ", "); \
g_string_append (str, NAME); \
first = FALSE; \
} } while (0)
static char*
stack_slot_stack_type_full_name (ILStackDesc *value)
{
GString *str = g_string_new ("");
char *result;
gboolean has_pred = FALSE, first = TRUE;
if ((value->stype & TYPE_MASK) != value->stype) {
g_string_append(str, "[");
APPEND_WITH_PREDICATE (stack_slot_is_this_pointer, "this");
APPEND_WITH_PREDICATE (stack_slot_is_boxed_value, "boxed");
APPEND_WITH_PREDICATE (stack_slot_is_null_literal, "null");
APPEND_WITH_PREDICATE (stack_slot_is_managed_mutability_pointer, "cmmp");
APPEND_WITH_PREDICATE (stack_slot_is_managed_pointer, "mp");
has_pred = TRUE;
}
if (mono_type_is_generic_argument (value->type) && !stack_slot_is_boxed_value (value)) {
if (!has_pred)
g_string_append(str, "[");
if (!first)
g_string_append (str, ", ");
g_string_append (str, "unboxed");
has_pred = TRUE;
}
if (has_pred)
g_string_append(str, "] ");
g_string_append (str, stack_slot_get_name (value));
result = str->str;
g_string_free (str, FALSE);
return result;
}
static char*
stack_slot_full_name (ILStackDesc *value)
{
char *type_name = mono_type_full_name (value->type);
char *stack_name = stack_slot_stack_type_full_name (value);
char *res = g_strdup_printf ("%s (%s)", type_name, stack_name);
g_free (type_name);
g_free (stack_name);
return res;
}
//////////////////////////////////////////////////////////////////
void
mono_free_verify_list (GSList *list)
{
MonoVerifyInfoExtended *info;
GSList *tmp;
for (tmp = list; tmp; tmp = tmp->next) {
info = tmp->data;
g_free (info->info.message);
g_free (info);
}
g_slist_free (list);
}
#define ADD_ERROR(list,msg) \
do { \
MonoVerifyInfoExtended *vinfo = g_new (MonoVerifyInfoExtended, 1); \
vinfo->info.status = MONO_VERIFY_ERROR; \
vinfo->info.message = (msg); \
(list) = g_slist_prepend ((list), vinfo); \
} while (0)
#define ADD_WARN(list,code,msg) \
do { \
MonoVerifyInfoExtended *vinfo = g_new (MonoVerifyInfoExtended, 1); \
vinfo->info.status = (code); \
vinfo->info.message = (msg); \
(list) = g_slist_prepend ((list), vinfo); \
} while (0)
static const char
valid_cultures[][9] = {
"ar-SA", "ar-IQ", "ar-EG", "ar-LY",
"ar-DZ", "ar-MA", "ar-TN", "ar-OM",
"ar-YE", "ar-SY", "ar-JO", "ar-LB",
"ar-KW", "ar-AE", "ar-BH", "ar-QA",
"bg-BG", "ca-ES", "zh-TW", "zh-CN",
"zh-HK", "zh-SG", "zh-MO", "cs-CZ",
"da-DK", "de-DE", "de-CH", "de-AT",
"de-LU", "de-LI", "el-GR", "en-US",
"en-GB", "en-AU", "en-CA", "en-NZ",
"en-IE", "en-ZA", "en-JM", "en-CB",
"en-BZ", "en-TT", "en-ZW", "en-PH",
"es-ES-Ts", "es-MX", "es-ES-Is", "es-GT",
"es-CR", "es-PA", "es-DO", "es-VE",
"es-CO", "es-PE", "es-AR", "es-EC",
"es-CL", "es-UY", "es-PY", "es-BO",
"es-SV", "es-HN", "es-NI", "es-PR",
"Fi-FI", "fr-FR", "fr-BE", "fr-CA",
"Fr-CH", "fr-LU", "fr-MC", "he-IL",
"hu-HU", "is-IS", "it-IT", "it-CH",
"Ja-JP", "ko-KR", "nl-NL", "nl-BE",
"nb-NO", "nn-NO", "pl-PL", "pt-BR",
"pt-PT", "ro-RO", "ru-RU", "hr-HR",
"Lt-sr-SP", "Cy-sr-SP", "sk-SK", "sq-AL",
"sv-SE", "sv-FI", "th-TH", "tr-TR",
"ur-PK", "id-ID", "uk-UA", "be-BY",
"sl-SI", "et-EE", "lv-LV", "lt-LT",
"fa-IR", "vi-VN", "hy-AM", "Lt-az-AZ",
"Cy-az-AZ",
"eu-ES", "mk-MK", "af-ZA",
"ka-GE", "fo-FO", "hi-IN", "ms-MY",
"ms-BN", "kk-KZ", "ky-KZ", "sw-KE",
"Lt-uz-UZ", "Cy-uz-UZ", "tt-TA", "pa-IN",
"gu-IN", "ta-IN", "te-IN", "kn-IN",
"mr-IN", "sa-IN", "mn-MN", "gl-ES",
"kok-IN", "syr-SY", "div-MV"
};
static int
is_valid_culture (const char *cname)
{
int i;
int found;
found = *cname == 0;
for (i = 0; i < G_N_ELEMENTS (valid_cultures); ++i) {
if (g_ascii_strcasecmp (valid_cultures [i], cname)) {
found = 1;
break;
}
}
return found;
}
static int
is_valid_assembly_flags (guint32 flags) {
/* Metadata: 22.1.2 */
flags &= ~(0x8000 | 0x4000); /* ignore reserved bits 0x0030? */
return ((flags == 1) || (flags == 0));
}
static int
is_valid_blob (MonoImage *image, guint32 blob_index, int notnull)
{
guint32 size;
const char *p, *blob_end;
if (blob_index >= image->heap_blob.size)
return 0;
p = mono_metadata_blob_heap (image, blob_index);
size = mono_metadata_decode_blob_size (p, &blob_end);
if (blob_index + size + (blob_end-p) > image->heap_blob.size)
return 0;
if (notnull && !size)
return 0;
return 1;
}
static const char*
is_valid_string (MonoImage *image, guint32 str_index, int notnull)
{
const char *p, *blob_end, *res;
if (str_index >= image->heap_strings.size)
return NULL;
res = p = mono_metadata_string_heap (image, str_index);
blob_end = mono_metadata_string_heap (image, image->heap_strings.size - 1);
if (notnull && !*p)
return 0;
/*
* FIXME: should check it's a valid utf8 string, too.
*/
while (p <= blob_end) {
if (!*p)
return res;
++p;
}
return *p? NULL: res;
}
static int
is_valid_cls_ident (const char *p)
{
/*
* FIXME: we need the full unicode glib support for this.
* Check: http://www.unicode.org/unicode/reports/tr15/Identifier.java
* We do the lame thing for now.
*/
if (!isalpha (*p))
return 0;
++p;
while (*p) {
if (!isalnum (*p) && *p != '_')
return 0;
++p;
}
return 1;
}
static int
is_valid_filename (const char *p)
{
if (!*p)
return 0;
return strpbrk (p, "\\//:")? 0: 1;
}
static GSList*
verify_assembly_table (MonoImage *image, GSList *list, int level)
{
MonoTableInfo *t = &image->tables [MONO_TABLE_ASSEMBLY];
guint32 cols [MONO_ASSEMBLY_SIZE];
const char *p;
if (level & MONO_VERIFY_ERROR) {
if (t->rows > 1)
ADD_ERROR (list, g_strdup ("Assembly table may only have 0 or 1 rows"));
mono_metadata_decode_row (t, 0, cols, MONO_ASSEMBLY_SIZE);
switch (cols [MONO_ASSEMBLY_HASH_ALG]) {
case ASSEMBLY_HASH_NONE:
case ASSEMBLY_HASH_MD5:
case ASSEMBLY_HASH_SHA1:
break;
default:
ADD_ERROR (list, g_strdup_printf ("Hash algorithm 0x%x unknown", cols [MONO_ASSEMBLY_HASH_ALG]));
}
if (!is_valid_assembly_flags (cols [MONO_ASSEMBLY_FLAGS]))
ADD_ERROR (list, g_strdup_printf ("Invalid flags in assembly: 0x%x", cols [MONO_ASSEMBLY_FLAGS]));
if (!is_valid_blob (image, cols [MONO_ASSEMBLY_PUBLIC_KEY], FALSE))
ADD_ERROR (list, g_strdup ("Assembly public key is an invalid index"));
if (!(p = is_valid_string (image, cols [MONO_ASSEMBLY_NAME], TRUE))) {
ADD_ERROR (list, g_strdup ("Assembly name is invalid"));
} else {
if (strpbrk (p, ":\\/."))
ADD_ERROR (list, g_strdup_printf ("Assembly name `%s' contains invalid chars", p));
}
if (!(p = is_valid_string (image, cols [MONO_ASSEMBLY_CULTURE], FALSE))) {
ADD_ERROR (list, g_strdup ("Assembly culture is an invalid index"));
} else {
if (!is_valid_culture (p))
ADD_ERROR (list, g_strdup_printf ("Assembly culture `%s' is invalid", p));
}
}
return list;
}
static GSList*
verify_assemblyref_table (MonoImage *image, GSList *list, int level)
{
MonoTableInfo *t = &image->tables [MONO_TABLE_ASSEMBLYREF];
guint32 cols [MONO_ASSEMBLYREF_SIZE];
const char *p;
int i;
if (level & MONO_VERIFY_ERROR) {
for (i = 0; i < t->rows; ++i) {
mono_metadata_decode_row (t, i, cols, MONO_ASSEMBLYREF_SIZE);
if (!is_valid_assembly_flags (cols [MONO_ASSEMBLYREF_FLAGS]))
ADD_ERROR (list, g_strdup_printf ("Invalid flags in assemblyref row %d: 0x%x", i + 1, cols [MONO_ASSEMBLY_FLAGS]));
if (!is_valid_blob (image, cols [MONO_ASSEMBLYREF_PUBLIC_KEY], FALSE))
ADD_ERROR (list, g_strdup_printf ("AssemblyRef public key in row %d is an invalid index", i + 1));
if (!(p = is_valid_string (image, cols [MONO_ASSEMBLYREF_CULTURE], FALSE))) {
ADD_ERROR (list, g_strdup_printf ("AssemblyRef culture in row %d is invalid", i + 1));
} else {
if (!is_valid_culture (p))
ADD_ERROR (list, g_strdup_printf ("AssemblyRef culture `%s' in row %d is invalid", p, i + 1));
}
if (cols [MONO_ASSEMBLYREF_HASH_VALUE] && !is_valid_blob (image, cols [MONO_ASSEMBLYREF_HASH_VALUE], TRUE))
ADD_ERROR (list, g_strdup_printf ("AssemblyRef hash value in row %d is invalid or not null and empty", i + 1));
}
}
if (level & MONO_VERIFY_WARNING) {
/* check for duplicated rows */
for (i = 0; i < t->rows; ++i) {
}
}
return list;
}
static GSList*
verify_class_layout_table (MonoImage *image, GSList *list, int level)
{
MonoTableInfo *t = &image->tables [MONO_TABLE_CLASSLAYOUT];
MonoTableInfo *tdef = &image->tables [MONO_TABLE_TYPEDEF];
guint32 cols [MONO_CLASS_LAYOUT_SIZE];
guint32 value, i;
if (level & MONO_VERIFY_ERROR) {
for (i = 0; i < t->rows; ++i) {
mono_metadata_decode_row (t, i, cols, MONO_CLASS_LAYOUT_SIZE);
if (cols [MONO_CLASS_LAYOUT_PARENT] > tdef->rows || !cols [MONO_CLASS_LAYOUT_PARENT]) {
ADD_ERROR (list, g_strdup_printf ("Parent in class layout is invalid in row %d", i + 1));
} else {
value = mono_metadata_decode_row_col (tdef, cols [MONO_CLASS_LAYOUT_PARENT] - 1, MONO_TYPEDEF_FLAGS);
if (value & TYPE_ATTRIBUTE_INTERFACE)
ADD_ERROR (list, g_strdup_printf ("Parent in class layout row %d is an interface", i + 1));
if (value & TYPE_ATTRIBUTE_AUTO_LAYOUT)
ADD_ERROR (list, g_strdup_printf ("Parent in class layout row %d is AutoLayout", i + 1));
if (value & TYPE_ATTRIBUTE_SEQUENTIAL_LAYOUT) {
switch (cols [MONO_CLASS_LAYOUT_PACKING_SIZE]) {
case 0: case 1: case 2: case 4: case 8: case 16:
case 32: case 64: case 128: break;
default:
ADD_ERROR (list, g_strdup_printf ("Packing size %d in class layout row %d is invalid", cols [MONO_CLASS_LAYOUT_PACKING_SIZE], i + 1));
}
} else if (value & TYPE_ATTRIBUTE_EXPLICIT_LAYOUT) {
/*
* FIXME: LAMESPEC: it claims it must be 0 (it's 1, instead).
if (cols [MONO_CLASS_LAYOUT_PACKING_SIZE])
ADD_ERROR (list, g_strdup_printf ("Packing size %d in class layout row %d is invalid with explicit layout", cols [MONO_CLASS_LAYOUT_PACKING_SIZE], i + 1));
*/
}
/*
* FIXME: we need to check that if class size != 0,
* it needs to be greater than the class calculated size.
* If parent is a valuetype it also needs to be smaller than
* 1 MByte (0x100000 bytes).
* To do both these checks we need to load the referenced
* assemblies, though (the spec claims we didn't have to, bah).
*/
/*
* We need to check that the parent types have the same layout
* type as well.
*/
}
}
}
return list;
}
static GSList*
verify_constant_table (MonoImage *image, GSList *list, int level)
{
MonoTableInfo *t = &image->tables [MONO_TABLE_CONSTANT];
guint32 cols [MONO_CONSTANT_SIZE];
guint32 value, i;
GHashTable *dups = g_hash_table_new (NULL, NULL);
for (i = 0; i < t->rows; ++i) {
mono_metadata_decode_row (t, i, cols, MONO_CONSTANT_SIZE);
if (level & MONO_VERIFY_ERROR)
if (g_hash_table_lookup (dups, GUINT_TO_POINTER (cols [MONO_CONSTANT_PARENT])))
ADD_ERROR (list, g_strdup_printf ("Parent 0x%08x is duplicated in Constant row %d", cols [MONO_CONSTANT_PARENT], i + 1));
g_hash_table_insert (dups, GUINT_TO_POINTER (cols [MONO_CONSTANT_PARENT]),
GUINT_TO_POINTER (cols [MONO_CONSTANT_PARENT]));
switch (cols [MONO_CONSTANT_TYPE]) {
case MONO_TYPE_U1: /* LAMESPEC: it says I1...*/
case MONO_TYPE_U2:
case MONO_TYPE_U4:
case MONO_TYPE_U8:
if (level & MONO_VERIFY_CLS)
ADD_WARN (list, MONO_VERIFY_CLS, g_strdup_printf ("Type 0x%x not CLS compliant in Constant row %d", cols [MONO_CONSTANT_TYPE], i + 1));
case MONO_TYPE_BOOLEAN:
case MONO_TYPE_CHAR:
case MONO_TYPE_I1:
case MONO_TYPE_I2:
case MONO_TYPE_I4:
case MONO_TYPE_I8:
case MONO_TYPE_R4:
case MONO_TYPE_R8:
case MONO_TYPE_STRING:
case MONO_TYPE_CLASS:
break;
default:
if (level & MONO_VERIFY_ERROR)
ADD_ERROR (list, g_strdup_printf ("Type 0x%x is invalid in Constant row %d", cols [MONO_CONSTANT_TYPE], i + 1));
}
if (level & MONO_VERIFY_ERROR) {
value = cols [MONO_CONSTANT_PARENT] >> MONO_HASCONSTANT_BITS;
switch (cols [MONO_CONSTANT_PARENT] & MONO_HASCONSTANT_MASK) {
case MONO_HASCONSTANT_FIEDDEF:
if (value > image->tables [MONO_TABLE_FIELD].rows)
ADD_ERROR (list, g_strdup_printf ("Parent (field) is invalid in Constant row %d", i + 1));
break;
case MONO_HASCONSTANT_PARAM:
if (value > image->tables [MONO_TABLE_PARAM].rows)
ADD_ERROR (list, g_strdup_printf ("Parent (param) is invalid in Constant row %d", i + 1));
break;
case MONO_HASCONSTANT_PROPERTY:
if (value > image->tables [MONO_TABLE_PROPERTY].rows)
ADD_ERROR (list, g_strdup_printf ("Parent (property) is invalid in Constant row %d", i + 1));
break;
default:
ADD_ERROR (list, g_strdup_printf ("Parent is invalid in Constant row %d", i + 1));
break;
}
}
if (level & MONO_VERIFY_CLS) {
/*
* FIXME: verify types is consistent with the enum type
* is parent is an enum.
*/
}
}
g_hash_table_destroy (dups);
return list;
}
static GSList*
verify_event_map_table (MonoImage *image, GSList *list, int level)
{
MonoTableInfo *t = &image->tables [MONO_TABLE_EVENTMAP];
guint32 cols [MONO_EVENT_MAP_SIZE];
guint32 i, last_event;
GHashTable *dups = g_hash_table_new (NULL, NULL);
last_event = 0;
for (i = 0; i < t->rows; ++i) {
mono_metadata_decode_row (t, i, cols, MONO_EVENT_MAP_SIZE);
if (level & MONO_VERIFY_ERROR)
if (g_hash_table_lookup (dups, GUINT_TO_POINTER (cols [MONO_EVENT_MAP_PARENT])))
ADD_ERROR (list, g_strdup_printf ("Parent 0x%08x is duplicated in Event Map row %d", cols [MONO_EVENT_MAP_PARENT], i + 1));
g_hash_table_insert (dups, GUINT_TO_POINTER (cols [MONO_EVENT_MAP_PARENT]),
GUINT_TO_POINTER (cols [MONO_EVENT_MAP_PARENT]));
if (level & MONO_VERIFY_ERROR) {
if (cols [MONO_EVENT_MAP_PARENT] > image->tables [MONO_TABLE_TYPEDEF].rows)
ADD_ERROR (list, g_strdup_printf ("Parent 0x%08x is invalid in Event Map row %d", cols [MONO_EVENT_MAP_PARENT], i + 1));
if (cols [MONO_EVENT_MAP_EVENTLIST] > image->tables [MONO_TABLE_EVENT].rows)
ADD_ERROR (list, g_strdup_printf ("EventList 0x%08x is invalid in Event Map row %d", cols [MONO_EVENT_MAP_EVENTLIST], i + 1));
if (cols [MONO_EVENT_MAP_EVENTLIST] <= last_event)
ADD_ERROR (list, g_strdup_printf ("EventList overlap in Event Map row %d", i + 1));
last_event = cols [MONO_EVENT_MAP_EVENTLIST];
}
}
g_hash_table_destroy (dups);
return list;
}
static GSList*
verify_event_table (MonoImage *image, GSList *list, int level)
{
MonoTableInfo *t = &image->tables [MONO_TABLE_EVENT];
guint32 cols [MONO_EVENT_SIZE];
const char *p;
guint32 value, i;
for (i = 0; i < t->rows; ++i) {
mono_metadata_decode_row (t, i, cols, MONO_EVENT_SIZE);
if (cols [MONO_EVENT_FLAGS] & ~(EVENT_SPECIALNAME|EVENT_RTSPECIALNAME)) {
if (level & MONO_VERIFY_ERROR)
ADD_ERROR (list, g_strdup_printf ("Flags 0x%04x invalid in Event row %d", cols [MONO_EVENT_FLAGS], i + 1));
}
if (!(p = is_valid_string (image, cols [MONO_EVENT_NAME], TRUE))) {
if (level & MONO_VERIFY_ERROR)
ADD_ERROR (list, g_strdup_printf ("Invalid name in Event row %d", i + 1));
} else {
if (level & MONO_VERIFY_CLS) {
if (!is_valid_cls_ident (p))
ADD_WARN (list, MONO_VERIFY_CLS, g_strdup_printf ("Invalid CLS name '%s` in Event row %d", p, i + 1));
}
}
if (level & MONO_VERIFY_ERROR && cols [MONO_EVENT_TYPE]) {
value = cols [MONO_EVENT_TYPE] >> MONO_TYPEDEFORREF_BITS;
switch (cols [MONO_EVENT_TYPE] & MONO_TYPEDEFORREF_MASK) {
case MONO_TYPEDEFORREF_TYPEDEF:
if (!value || value > image->tables [MONO_TABLE_TYPEDEF].rows)
ADD_ERROR (list, g_strdup_printf ("Type invalid in Event row %d", i + 1));
break;
case MONO_TYPEDEFORREF_TYPEREF:
if (!value || value > image->tables [MONO_TABLE_TYPEREF].rows)
ADD_ERROR (list, g_strdup_printf ("Type invalid in Event row %d", i + 1));
break;
case MONO_TYPEDEFORREF_TYPESPEC:
if (!value || value > image->tables [MONO_TABLE_TYPESPEC].rows)
ADD_ERROR (list, g_strdup_printf ("Type invalid in Event row %d", i + 1));
break;
default:
ADD_ERROR (list, g_strdup_printf ("Type invalid in Event row %d", i + 1));
}
}
/*
* FIXME: check that there is 1 add and remove row in methodsemantics
* and 0 or 1 raise and 0 or more other (maybe it's better to check for
* these while checking methodsemantics).
* check for duplicated names for the same type [ERROR]
* check for CLS duplicate names for the same type [CLS]
*/
}
return list;
}
static GSList*
verify_field_table (MonoImage *image, GSList *list, int level)
{
MonoTableInfo *t = &image->tables [MONO_TABLE_FIELD];
guint32 cols [MONO_FIELD_SIZE];
const char *p;
guint32 i, flags;
for (i = 0; i < t->rows; ++i) {
mono_metadata_decode_row (t, i, cols, MONO_FIELD_SIZE);
/*
* Check this field has only one owner and that the owner is not
* an interface (done in verify_typedef_table() )
*/
flags = cols [MONO_FIELD_FLAGS];
switch (flags & FIELD_ATTRIBUTE_FIELD_ACCESS_MASK) {
case FIELD_ATTRIBUTE_COMPILER_CONTROLLED:
case FIELD_ATTRIBUTE_PRIVATE:
case FIELD_ATTRIBUTE_FAM_AND_ASSEM:
case FIELD_ATTRIBUTE_ASSEMBLY:
case FIELD_ATTRIBUTE_FAMILY:
case FIELD_ATTRIBUTE_FAM_OR_ASSEM:
case FIELD_ATTRIBUTE_PUBLIC:
break;
default:
if (level & MONO_VERIFY_ERROR)
ADD_ERROR (list, g_strdup_printf ("Invalid access mask in Field row %d", i + 1));
break;
}
if (level & MONO_VERIFY_ERROR) {
if ((flags & FIELD_ATTRIBUTE_LITERAL) && (flags & FIELD_ATTRIBUTE_INIT_ONLY))
ADD_ERROR (list, g_strdup_printf ("Literal and InitOnly cannot be both set in Field row %d", i + 1));
if ((flags & FIELD_ATTRIBUTE_LITERAL) && !(flags & FIELD_ATTRIBUTE_STATIC))
ADD_ERROR (list, g_strdup_printf ("Literal needs also Static set in Field row %d", i + 1));
if ((flags & FIELD_ATTRIBUTE_RT_SPECIAL_NAME) && !(flags & FIELD_ATTRIBUTE_SPECIAL_NAME))
ADD_ERROR (list, g_strdup_printf ("RTSpecialName needs also SpecialName set in Field row %d", i + 1));
/*
* FIXME: check there is only one owner in the respective table.
* if (flags & FIELD_ATTRIBUTE_HAS_FIELD_MARSHAL)
* if (flags & FIELD_ATTRIBUTE_HAS_DEFAULT)
* if (flags & FIELD_ATTRIBUTE_HAS_FIELD_RVA)
*/
}
if (!(p = is_valid_string (image, cols [MONO_FIELD_NAME], TRUE))) {
if (level & MONO_VERIFY_ERROR)
ADD_ERROR (list, g_strdup_printf ("Invalid name in Field row %d", i + 1));
} else {
if (level & MONO_VERIFY_CLS) {
if (!is_valid_cls_ident (p))
ADD_WARN (list, MONO_VERIFY_CLS, g_strdup_printf ("Invalid CLS name '%s` in Field row %d", p, i + 1));
}
}
/*
* check signature.
* if owner is module needs to be static, access mask needs to be compilercontrolled,
* public or private (not allowed in cls mode).
* if owner is an enum ...
*/
}
return list;
}
static GSList*
verify_file_table (MonoImage *image, GSList *list, int level)
{
MonoTableInfo *t = &image->tables [MONO_TABLE_FILE];
guint32 cols [MONO_FILE_SIZE];
const char *p;
guint32 i;
GHashTable *dups = g_hash_table_new (g_str_hash, g_str_equal);
for (i = 0; i < t->rows; ++i) {
mono_metadata_decode_row (t, i, cols, MONO_FILE_SIZE);
if (level & MONO_VERIFY_ERROR) {
if (cols [MONO_FILE_FLAGS] != FILE_CONTAINS_METADATA && cols [MONO_FILE_FLAGS] != FILE_CONTAINS_NO_METADATA)
ADD_ERROR (list, g_strdup_printf ("Invalid flags in File row %d", i + 1));
if (!is_valid_blob (image, cols [MONO_FILE_HASH_VALUE], TRUE))
ADD_ERROR (list, g_strdup_printf ("File hash value in row %d is invalid or not null and empty", i + 1));
}
if (!(p = is_valid_string (image, cols [MONO_FILE_NAME], TRUE))) {
if (level & MONO_VERIFY_ERROR)
ADD_ERROR (list, g_strdup_printf ("Invalid name in File row %d", i + 1));
} else {
if (level & MONO_VERIFY_ERROR) {
if (!is_valid_filename (p))
ADD_ERROR (list, g_strdup_printf ("Invalid name '%s` in File row %d", p, i + 1));
else if (g_hash_table_lookup (dups, p)) {
ADD_ERROR (list, g_strdup_printf ("Duplicate name '%s` in File row %d", p, i + 1));
}
g_hash_table_insert (dups, (gpointer)p, (gpointer)p);
}
}
/*
* FIXME: I don't understand what this means:
* If this module contains a row in the Assembly table (that is, if this module "holds the manifest")
* then there shall not be any row in the File table for this module - i.e., no self-reference [ERROR]
*/
}
if (level & MONO_VERIFY_WARNING) {
if (!t->rows && image->tables [MONO_TABLE_EXPORTEDTYPE].rows)
ADD_WARN (list, MONO_VERIFY_WARNING, g_strdup ("ExportedType table should be empty if File table is empty"));
}
g_hash_table_destroy (dups);
return list;
}
static GSList*
verify_moduleref_table (MonoImage *image, GSList *list, int level)
{
MonoTableInfo *t = &image->tables [MONO_TABLE_MODULEREF];
MonoTableInfo *tfile = &image->tables [MONO_TABLE_FILE];
guint32 cols [MONO_MODULEREF_SIZE];
const char *p, *pf;
guint32 found, i, j, value;
GHashTable *dups = g_hash_table_new (g_str_hash, g_str_equal);
for (i = 0; i < t->rows; ++i) {
mono_metadata_decode_row (t, i, cols, MONO_MODULEREF_SIZE);
if (!(p = is_valid_string (image, cols [MONO_MODULEREF_NAME], TRUE))) {
if (level & MONO_VERIFY_ERROR)
ADD_ERROR (list, g_strdup_printf ("Invalid name in ModuleRef row %d", i + 1));
} else {
if (level & MONO_VERIFY_ERROR) {
if (!is_valid_filename (p))
ADD_ERROR (list, g_strdup_printf ("Invalid name '%s` in ModuleRef row %d", p, i + 1));
else if (g_hash_table_lookup (dups, p)) {
ADD_WARN (list, MONO_VERIFY_WARNING, g_strdup_printf ("Duplicate name '%s` in ModuleRef row %d", p, i + 1));
g_hash_table_insert (dups, (gpointer)p, (gpointer)p);
found = 0;
for (j = 0; j < tfile->rows; ++j) {
value = mono_metadata_decode_row_col (tfile, j, MONO_FILE_NAME);
if ((pf = is_valid_string (image, value, TRUE)))
if (strcmp (p, pf) == 0) {
found = 1;
break;
}
}
if (!found)
ADD_ERROR (list, g_strdup_printf ("Name '%s` in ModuleRef row %d doesn't have a match in File table", p, i + 1));
}
}
}
}
g_hash_table_destroy (dups);
return list;
}
static GSList*
verify_standalonesig_table (MonoImage *image, GSList *list, int level)
{
MonoTableInfo *t = &image->tables [MONO_TABLE_STANDALONESIG];
guint32 cols [MONO_STAND_ALONE_SIGNATURE_SIZE];
const char *p;
guint32 i;
for (i = 0; i < t->rows; ++i) {
mono_metadata_decode_row (t, i, cols, MONO_STAND_ALONE_SIGNATURE_SIZE);
if (level & MONO_VERIFY_ERROR) {
if (!is_valid_blob (image, cols [MONO_STAND_ALONE_SIGNATURE], TRUE)) {
ADD_ERROR (list, g_strdup_printf ("Signature is invalid in StandAloneSig row %d", i + 1));
} else {
p = mono_metadata_blob_heap (image, cols [MONO_STAND_ALONE_SIGNATURE]);
/* FIXME: check it's a valid locals or method sig.*/
}
}
}
return list;
}
GSList*
mono_image_verify_tables (MonoImage *image, int level)
{
GSList *error_list = NULL;
error_list = verify_assembly_table (image, error_list, level);
/*
* AssemblyOS, AssemblyProcessor, AssemblyRefOs and
* AssemblyRefProcessor should be ignored,
* though we may want to emit a warning, since it should not
* be present in a PE file.
*/
error_list = verify_assemblyref_table (image, error_list, level);
error_list = verify_class_layout_table (image, error_list, level);
error_list = verify_constant_table (image, error_list, level);
/*
* cutom attribute, declsecurity
*/
error_list = verify_event_map_table (image, error_list, level);
error_list = verify_event_table (image, error_list, level);
error_list = verify_field_table (image, error_list, level);
error_list = verify_file_table (image, error_list, level);
error_list = verify_moduleref_table (image, error_list, level);
error_list = verify_standalonesig_table (image, error_list, level);
return g_slist_reverse (error_list);
}
#define ADD_INVALID(list,msg) \
do { \
MonoVerifyInfoExtended *vinfo = g_new (MonoVerifyInfoExtended, 1); \
vinfo->status = MONO_VERIFY_ERROR; \
vinfo->message = (msg); \
(list) = g_slist_prepend ((list), vinfo); \
/*G_BREAKPOINT ();*/ \
goto invalid_cil; \
} while (0)
#define CHECK_STACK_UNDERFLOW(num) \
do { \
if (cur_stack < (num)) \
ADD_INVALID (list, g_strdup_printf ("Stack underflow at 0x%04x (%d items instead of %d)", ip_offset, cur_stack, (num))); \
} while (0)
#define CHECK_STACK_OVERFLOW() \
do { \
if (cur_stack >= max_stack) \
ADD_INVALID (list, g_strdup_printf ("Maxstack exceeded at 0x%04x", ip_offset)); \
} while (0)
static int
in_any_block (MonoMethodHeader *header, guint offset)
{
int i;
MonoExceptionClause *clause;
for (i = 0; i < header->num_clauses; ++i) {
clause = &header->clauses [i];
if (MONO_OFFSET_IN_CLAUSE (clause, offset))
return 1;
if (MONO_OFFSET_IN_HANDLER (clause, offset))
return 1;
if (MONO_OFFSET_IN_FILTER (clause, offset))
return 1;
}
return 0;
}
/*
* in_any_exception_block:
*
* Returns TRUE is @offset is part of any exception clause (filter, handler, catch, finally or fault).
*/
static gboolean
in_any_exception_block (MonoMethodHeader *header, guint offset)
{
int i;
MonoExceptionClause *clause;
for (i = 0; i < header->num_clauses; ++i) {
clause = &header->clauses [i];
if (MONO_OFFSET_IN_HANDLER (clause, offset))
return TRUE;
if (MONO_OFFSET_IN_FILTER (clause, offset))
return TRUE;
}
return FALSE;
}
/*
* is_valid_branch_instruction:
*
* Verify if it's valid to perform a branch from @offset to @target.
* This should be used with br and brtrue/false.
* It returns 0 if valid, 1 for unverifiable and 2 for invalid.
* The major diferent from other similiar functions is that branching into a
* finally/fault block is invalid instead of just unverifiable.
*/
static int
is_valid_branch_instruction (MonoMethodHeader *header, guint offset, guint target)
{
int i;
MonoExceptionClause *clause;
for (i = 0; i < header->num_clauses; ++i) {
clause = &header->clauses [i];
/*branching into a finally block is invalid*/
if ((clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY || clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) &&
!MONO_OFFSET_IN_HANDLER (clause, offset) &&
MONO_OFFSET_IN_HANDLER (clause, target))
return 2;
if (clause->try_offset != target && (MONO_OFFSET_IN_CLAUSE (clause, offset) ^ MONO_OFFSET_IN_CLAUSE (clause, target)))
return 1;
if (MONO_OFFSET_IN_HANDLER (clause, offset) ^ MONO_OFFSET_IN_HANDLER (clause, target))
return 1;
if (MONO_OFFSET_IN_FILTER (clause, offset) ^ MONO_OFFSET_IN_FILTER (clause, target))
return 1;
}
return 0;
}
/*
* is_valid_cmp_branch_instruction:
*
* Verify if it's valid to perform a branch from @offset to @target.
* This should be used with binary comparison branching instruction, like beq, bge and similars.
* It returns 0 if valid, 1 for unverifiable and 2 for invalid.
*
* The major diferences from other similar functions are that most errors lead to invalid
* code and only branching out of finally, filter or fault clauses is unverifiable.
*/
static int
is_valid_cmp_branch_instruction (MonoMethodHeader *header, guint offset, guint target)
{
int i;
MonoExceptionClause *clause;
for (i = 0; i < header->num_clauses; ++i) {
clause = &header->clauses [i];
/*branching out of a handler or finally*/
if (clause->flags != MONO_EXCEPTION_CLAUSE_NONE &&
MONO_OFFSET_IN_HANDLER (clause, offset) &&
!MONO_OFFSET_IN_HANDLER (clause, target))
return 1;
if (clause->try_offset != target && (MONO_OFFSET_IN_CLAUSE (clause, offset) ^ MONO_OFFSET_IN_CLAUSE (clause, target)))
return 2;
if (MONO_OFFSET_IN_HANDLER (clause, offset) ^ MONO_OFFSET_IN_HANDLER (clause, target))
return 2;
if (MONO_OFFSET_IN_FILTER (clause, offset) ^ MONO_OFFSET_IN_FILTER (clause, target))
return 2;
}
return 0;
}
/*
* A leave can't escape a finally block
*/
static int
is_correct_leave (MonoMethodHeader *header, guint offset, guint target)
{
int i;
MonoExceptionClause *clause;
for (i = 0; i < header->num_clauses; ++i) {
clause = &header->clauses [i];
if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY && MONO_OFFSET_IN_HANDLER (clause, offset) && !MONO_OFFSET_IN_HANDLER (clause, target))
return 0;
if (MONO_OFFSET_IN_FILTER (clause, offset))
return 0;
}
return 1;
}
/*
* A rethrow can't happen outside of a catch handler.
*/
static int
is_correct_rethrow (MonoMethodHeader *header, guint offset)
{
int i;
MonoExceptionClause *clause;
for (i = 0; i < header->num_clauses; ++i) {
clause = &header->clauses [i];
if (MONO_OFFSET_IN_HANDLER (clause, offset))
return 1;
if (MONO_OFFSET_IN_FILTER (clause, offset))
return 1;
}
return 0;
}
/*
* An endfinally can't happen outside of a finally/fault handler.
*/
static int
is_correct_endfinally (MonoMethodHeader *header, guint offset)
{
int i;
MonoExceptionClause *clause;
for (i = 0; i < header->num_clauses; ++i) {
clause = &header->clauses [i];
if (MONO_OFFSET_IN_HANDLER (clause, offset) && (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT || clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY))
return 1;
}
return 0;
}
/*
* An endfilter can only happens inside a filter clause.
* In non-strict mode filter is allowed inside the handler clause too
*/
static MonoExceptionClause *
is_correct_endfilter (VerifyContext *ctx, guint offset)
{
int i;
MonoExceptionClause *clause;
for (i = 0; i < ctx->header->num_clauses; ++i) {
clause = &ctx->header->clauses [i];
if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER)
continue;
if (MONO_OFFSET_IN_FILTER (clause, offset))
return clause;
if (!IS_STRICT_MODE (ctx) && MONO_OFFSET_IN_HANDLER (clause, offset))
return clause;
}
return NULL;
}
/*
* Non-strict endfilter can happens inside a try block or any handler block
*/
static int
is_unverifiable_endfilter (VerifyContext *ctx, guint offset)
{
int i;
MonoExceptionClause *clause;
for (i = 0; i < ctx->header->num_clauses; ++i) {
clause = &ctx->header->clauses [i];
if (MONO_OFFSET_IN_CLAUSE (clause, offset))
return 1;
}
return 0;
}
static gboolean
is_valid_bool_arg (ILStackDesc *arg)
{
if (stack_slot_is_managed_pointer (arg) || stack_slot_is_boxed_value (arg) || stack_slot_is_null_literal (arg))
return TRUE;
switch (stack_slot_get_underlying_type (arg)) {
case TYPE_I4:
case TYPE_I8:
case TYPE_NATIVE_INT:
case TYPE_PTR:
return TRUE;
case TYPE_COMPLEX:
g_assert (arg->type);
switch (arg->type->type) {
case MONO_TYPE_CLASS:
case MONO_TYPE_STRING:
case MONO_TYPE_OBJECT:
case MONO_TYPE_SZARRAY:
case MONO_TYPE_ARRAY:
case MONO_TYPE_FNPTR:
case MONO_TYPE_PTR:
return TRUE;
case MONO_TYPE_GENERICINST:
/*We need to check if the container class
* of the generic type is a valuetype, iow:
* is it a "class Foo<T>" or a "struct Foo<T>"?
*/
return !arg->type->data.generic_class->container_class->valuetype;
}
default:
return FALSE;
}
}
/*Type manipulation helper*/
/*Returns the byref version of the supplied MonoType*/
static MonoType*
mono_type_get_type_byref (MonoType *type)
{
if (type->byref)
return type;
return &mono_class_from_mono_type (type)->this_arg;
}
/*Returns the byval version of the supplied MonoType*/
static MonoType*
mono_type_get_type_byval (MonoType *type)
{
if (!type->byref)
return type;
return &mono_class_from_mono_type (type)->byval_arg;
}
static MonoType*
mono_type_from_stack_slot (ILStackDesc *slot)
{
if (stack_slot_is_managed_pointer (slot))
return mono_type_get_type_byref (slot->type);
return slot->type;
}
/*Stack manipulation code*/
static void
stack_init (VerifyContext *ctx, ILCodeDesc *state)
{
if (state->flags & IL_CODE_FLAG_STACK_INITED)
return;
state->size = 0;
state->flags |= IL_CODE_FLAG_STACK_INITED;
if (!state->stack)
state->stack = g_new0 (ILStackDesc, ctx->max_stack);
}
static void
stack_copy (ILCodeDesc *to, ILCodeDesc *from)
{
to->size = from->size;
memcpy (to->stack, from->stack, sizeof (ILStackDesc) * from->size);
}
static void
copy_stack_value (ILStackDesc *to, ILStackDesc *from)
{
to->stype = from->stype;
to->type = from->type;
to->method = from->method;
}
static int
check_underflow (VerifyContext *ctx, int size)
{
if (ctx->eval.size < size) {
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Stack underflow, required %d, but have %d at 0x%04x", size, ctx->eval.size, ctx->ip_offset));
return 0;
}
return 1;
}
static int
check_overflow (VerifyContext *ctx)
{
if (ctx->eval.size >= ctx->max_stack) {
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Method doesn't have stack-depth %d at 0x%04x", ctx->eval.size + 1, ctx->ip_offset));
return 0;
}
return 1;
}
/*This reject out PTR, FNPTR and TYPEDBYREF*/
static gboolean
check_unmanaged_pointer (VerifyContext *ctx, ILStackDesc *value)
{
if (stack_slot_get_type (value) == TYPE_PTR) {
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Unmanaged pointer is not a verifiable type at 0x%04x", ctx->ip_offset));
return 0;
}
return 1;
}
/*TODO verify if MONO_TYPE_TYPEDBYREF is not allowed here as well.*/
static gboolean
check_unverifiable_type (VerifyContext *ctx, MonoType *type)
{
if (type->type == MONO_TYPE_PTR || type->type == MONO_TYPE_FNPTR) {
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Unmanaged pointer is not a verifiable type at 0x%04x", ctx->ip_offset));
return 0;
}
return 1;
}
static ILStackDesc *
stack_push (VerifyContext *ctx)
{
g_assert (ctx->eval.size < ctx->max_stack);
return & ctx->eval.stack [ctx->eval.size++];
}
static ILStackDesc *
stack_push_val (VerifyContext *ctx, int stype, MonoType *type)
{
ILStackDesc *top = stack_push (ctx);
top->stype = stype;
top->type = type;
return top;
}
static ILStackDesc *
stack_pop (VerifyContext *ctx)
{
ILStackDesc *ret;
g_assert (ctx->eval.size > 0);
ret = ctx->eval.stack + --ctx->eval.size;
if ((ret->stype & UNINIT_THIS_MASK) == UNINIT_THIS_MASK)
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Found use of uninitialized 'this ptr' ref at 0x%04x", ctx->ip_offset));
return ret;
}
/* This function allows to safely pop an unititialized this ptr from
* the eval stack without marking the method as unverifiable.
*/
static ILStackDesc *
stack_pop_safe (VerifyContext *ctx)
{
g_assert (ctx->eval.size > 0);
return ctx->eval.stack + --ctx->eval.size;
}
static ILStackDesc *
stack_push_stack_val (VerifyContext *ctx, ILStackDesc *value)
{
ILStackDesc *top = stack_push (ctx);
copy_stack_value (top, value);
return top;
}
/* Returns the MonoType associated with the token, or NULL if it is invalid.
*
* A boxable type can be either a reference or value type, but cannot be a byref type or an unmanaged pointer
* */
static MonoType*
get_boxable_mono_type (VerifyContext* ctx, int token, const char *opcode)
{
MonoType *type;
MonoClass *class;
if (!(type = verifier_load_type (ctx, token, opcode)))
return NULL;
if (type->byref && type->type != MONO_TYPE_TYPEDBYREF) {
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Invalid use of byref type for %s at 0x%04x", opcode, ctx->ip_offset));
return NULL;
}
if (type->type == MONO_TYPE_VOID) {
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Invalid use of void type for %s at 0x%04x", opcode, ctx->ip_offset));
return NULL;
}
if (type->type == MONO_TYPE_TYPEDBYREF)
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid use of typedbyref for %s at 0x%04x", opcode, ctx->ip_offset));
if (!(class = mono_class_from_mono_type (type)))
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Could not retrieve type token for %s at 0x%04x", opcode, ctx->ip_offset));
if (class->generic_container && type->type != MONO_TYPE_GENERICINST)
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Cannot use the generic type definition in a boxable type position for %s at 0x%04x", opcode, ctx->ip_offset));
check_unverifiable_type (ctx, type);
return type;
}
/*operation result tables */
static const unsigned char bin_op_table [TYPE_MAX][TYPE_MAX] = {
{TYPE_I4, TYPE_INV, TYPE_NATIVE_INT, TYPE_INV, TYPE_INV, TYPE_INV},
{TYPE_INV, TYPE_I8, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV},
{TYPE_NATIVE_INT, TYPE_INV, TYPE_NATIVE_INT, TYPE_INV, TYPE_INV, TYPE_INV},
{TYPE_INV, TYPE_INV, TYPE_INV, TYPE_R8, TYPE_INV, TYPE_INV},
{TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV},
{TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV},
};
static const unsigned char add_table [TYPE_MAX][TYPE_MAX] = {
{TYPE_I4, TYPE_INV, TYPE_NATIVE_INT, TYPE_INV, TYPE_PTR | NON_VERIFIABLE_RESULT, TYPE_INV},
{TYPE_INV, TYPE_I8, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV},
{TYPE_NATIVE_INT, TYPE_INV, TYPE_NATIVE_INT, TYPE_INV, TYPE_PTR | NON_VERIFIABLE_RESULT, TYPE_INV},
{TYPE_INV, TYPE_INV, TYPE_INV, TYPE_R8, TYPE_INV, TYPE_INV},
{TYPE_PTR | NON_VERIFIABLE_RESULT, TYPE_INV, TYPE_PTR | NON_VERIFIABLE_RESULT, TYPE_INV, TYPE_INV, TYPE_INV},
{TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV},
};
static const unsigned char sub_table [TYPE_MAX][TYPE_MAX] = {
{TYPE_I4, TYPE_INV, TYPE_NATIVE_INT, TYPE_INV, TYPE_INV, TYPE_INV},
{TYPE_INV, TYPE_I8, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV},
{TYPE_NATIVE_INT, TYPE_INV, TYPE_NATIVE_INT, TYPE_INV, TYPE_INV, TYPE_INV},
{TYPE_INV, TYPE_INV, TYPE_INV, TYPE_R8, TYPE_INV, TYPE_INV},
{TYPE_PTR | NON_VERIFIABLE_RESULT, TYPE_INV, TYPE_PTR | NON_VERIFIABLE_RESULT, TYPE_INV, TYPE_NATIVE_INT | NON_VERIFIABLE_RESULT, TYPE_INV},
{TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV},
};
static const unsigned char int_bin_op_table [TYPE_MAX][TYPE_MAX] = {
{TYPE_I4, TYPE_INV, TYPE_NATIVE_INT, TYPE_INV, TYPE_INV, TYPE_INV},
{TYPE_INV, TYPE_I8, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV},
{TYPE_NATIVE_INT, TYPE_INV, TYPE_NATIVE_INT, TYPE_INV, TYPE_INV, TYPE_INV},
{TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV},
{TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV},
{TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV},
};
static const unsigned char shift_op_table [TYPE_MAX][TYPE_MAX] = {
{TYPE_I4, TYPE_INV, TYPE_I4, TYPE_INV, TYPE_INV, TYPE_INV},
{TYPE_I8, TYPE_INV, TYPE_I8, TYPE_INV, TYPE_INV, TYPE_INV},
{TYPE_NATIVE_INT, TYPE_INV, TYPE_NATIVE_INT, TYPE_INV, TYPE_INV, TYPE_INV},
{TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV},
{TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV},
{TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV},
};
static const unsigned char cmp_br_op [TYPE_MAX][TYPE_MAX] = {
{TYPE_I4, TYPE_INV, TYPE_I4, TYPE_INV, TYPE_INV, TYPE_INV},
{TYPE_INV, TYPE_I4, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV},
{TYPE_I4, TYPE_INV, TYPE_I4, TYPE_INV, TYPE_INV, TYPE_INV},
{TYPE_INV, TYPE_INV, TYPE_INV, TYPE_I4, TYPE_INV, TYPE_INV},
{TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_I4, TYPE_INV},
{TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV},
};
static const unsigned char cmp_br_eq_op [TYPE_MAX][TYPE_MAX] = {
{TYPE_I4, TYPE_INV, TYPE_I4, TYPE_INV, TYPE_INV, TYPE_INV},
{TYPE_INV, TYPE_I4, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV},
{TYPE_I4, TYPE_INV, TYPE_I4, TYPE_INV, TYPE_I4 | NON_VERIFIABLE_RESULT, TYPE_INV},
{TYPE_INV, TYPE_INV, TYPE_INV, TYPE_I4, TYPE_INV, TYPE_INV},
{TYPE_INV, TYPE_INV, TYPE_I4 | NON_VERIFIABLE_RESULT, TYPE_INV, TYPE_I4, TYPE_INV},
{TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_I4},
};
static const unsigned char add_ovf_un_table [TYPE_MAX][TYPE_MAX] = {
{TYPE_I4, TYPE_INV, TYPE_NATIVE_INT, TYPE_INV, TYPE_PTR | NON_VERIFIABLE_RESULT, TYPE_INV},
{TYPE_INV, TYPE_I8, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV},
{TYPE_NATIVE_INT, TYPE_INV, TYPE_NATIVE_INT, TYPE_INV, TYPE_PTR | NON_VERIFIABLE_RESULT, TYPE_INV},
{TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV},
{TYPE_PTR | NON_VERIFIABLE_RESULT, TYPE_INV, TYPE_PTR | NON_VERIFIABLE_RESULT, TYPE_INV, TYPE_INV, TYPE_INV},
{TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV},
};
static const unsigned char sub_ovf_un_table [TYPE_MAX][TYPE_MAX] = {
{TYPE_I4, TYPE_INV, TYPE_NATIVE_INT, TYPE_INV, TYPE_INV, TYPE_INV},
{TYPE_INV, TYPE_I8, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV},
{TYPE_NATIVE_INT, TYPE_INV, TYPE_NATIVE_INT, TYPE_INV, TYPE_INV, TYPE_INV},
{TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV},
{TYPE_PTR | NON_VERIFIABLE_RESULT, TYPE_INV, TYPE_PTR | NON_VERIFIABLE_RESULT, TYPE_INV, TYPE_NATIVE_INT | NON_VERIFIABLE_RESULT, TYPE_INV},
{TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV},
};
static const unsigned char bin_ovf_table [TYPE_MAX][TYPE_MAX] = {
{TYPE_I4, TYPE_INV, TYPE_NATIVE_INT, TYPE_INV, TYPE_INV, TYPE_INV},
{TYPE_INV, TYPE_I8, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV},
{TYPE_NATIVE_INT, TYPE_INV, TYPE_NATIVE_INT, TYPE_INV, TYPE_INV, TYPE_INV},
{TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV},
{TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV},
{TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV, TYPE_INV},
};
#ifdef MONO_VERIFIER_DEBUG
/*debug helpers */
static void
dump_stack_value (ILStackDesc *value)
{
printf ("[(%x)(%x)", value->type->type, value->stype);
if (stack_slot_is_this_pointer (value))
printf ("[this] ");
if (stack_slot_is_boxed_value (value))
printf ("[boxed] ");
if (stack_slot_is_null_literal (value))
printf ("[null] ");
if (stack_slot_is_managed_mutability_pointer (value))
printf ("Controled Mutability MP: ");
if (stack_slot_is_managed_pointer (value))
printf ("Managed Pointer to: ");
switch (stack_slot_get_underlying_type (value)) {
case TYPE_INV:
printf ("invalid type]");
return;
case TYPE_I4:
printf ("int32]");
return;
case TYPE_I8:
printf ("int64]");
return;
case TYPE_NATIVE_INT:
printf ("native int]");
return;
case TYPE_R8:
printf ("float64]");
return;
case TYPE_PTR:
printf ("unmanaged pointer]");
return;
case TYPE_COMPLEX:
switch (value->type->type) {
case MONO_TYPE_CLASS:
case MONO_TYPE_VALUETYPE:
printf ("complex] (%s)", value->type->data.klass->name);
return;
case MONO_TYPE_STRING:
printf ("complex] (string)");
return;
case MONO_TYPE_OBJECT:
printf ("complex] (object)");
return;
case MONO_TYPE_SZARRAY:
printf ("complex] (%s [])", value->type->data.klass->name);
return;
case MONO_TYPE_ARRAY:
printf ("complex] (%s [%d %d %d])",
value->type->data.array->eklass->name,
value->type->data.array->rank,
value->type->data.array->numsizes,
value->type->data.array->numlobounds);
return;
case MONO_TYPE_GENERICINST:
printf ("complex] (inst of %s )", value->type->data.generic_class->container_class->name);
return;
case MONO_TYPE_VAR:
printf ("complex] (type generic param !%d - %s) ", value->type->data.generic_param->num, mono_generic_param_info (value->type->data.generic_param)->name);
return;
case MONO_TYPE_MVAR:
printf ("complex] (method generic param !!%d - %s) ", value->type->data.generic_param->num, mono_generic_param_info (value->type->data.generic_param)->name);
return;
default: {
//should be a boxed value
char * name = mono_type_full_name (value->type);
printf ("complex] %s", name);
g_free (name);
return;
}
}
default:
printf ("unknown stack %x type]\n", value->stype);
g_assert_not_reached ();
}
}
static void
dump_stack_state (ILCodeDesc *state)
{
int i;
printf ("(%d) ", state->size);
for (i = 0; i < state->size; ++i)
dump_stack_value (state->stack + i);
printf ("\n");
}
#endif
/*Returns TRUE if candidate array type can be assigned to target.
*Both parameters MUST be of type MONO_TYPE_ARRAY (target->type == MONO_TYPE_ARRAY)
*/
static gboolean
is_array_type_compatible (MonoType *target, MonoType *candidate)
{
MonoArrayType *left = target->data.array;
MonoArrayType *right = candidate->data.array;
g_assert (target->type == MONO_TYPE_ARRAY);
g_assert (candidate->type == MONO_TYPE_ARRAY);
if (left->rank != right->rank)
return FALSE;
return mono_class_is_assignable_from (left->eklass, right->eklass);
}
static int
get_stack_type (MonoType *type)
{
int mask = 0;
int type_kind = type->type;
if (type->byref)
mask = POINTER_MASK;
/*TODO handle CMMP_MASK */
handle_enum:
switch (type_kind) {
case MONO_TYPE_I1:
case MONO_TYPE_U1:
case MONO_TYPE_BOOLEAN:
case MONO_TYPE_I2:
case MONO_TYPE_U2:
case MONO_TYPE_CHAR:
case MONO_TYPE_I4:
case MONO_TYPE_U4:
return TYPE_I4 | mask;
case MONO_TYPE_I:
case MONO_TYPE_U:
return TYPE_NATIVE_INT | mask;
/* FIXME: the spec says that you cannot have a pointer to method pointer, do we need to check this here? */
case MONO_TYPE_FNPTR:
case MONO_TYPE_PTR:
case MONO_TYPE_TYPEDBYREF:
return TYPE_PTR | mask;
case MONO_TYPE_VAR:
case MONO_TYPE_MVAR:
case MONO_TYPE_CLASS:
case MONO_TYPE_STRING:
case MONO_TYPE_OBJECT:
case MONO_TYPE_SZARRAY:
case MONO_TYPE_ARRAY:
return TYPE_COMPLEX | mask;
case MONO_TYPE_I8:
case MONO_TYPE_U8:
return TYPE_I8 | mask;
case MONO_TYPE_R4:
case MONO_TYPE_R8:
return TYPE_R8 | mask;
case MONO_TYPE_GENERICINST:
case MONO_TYPE_VALUETYPE:
if (mono_type_is_enum_type (type)) {
type = mono_type_get_underlying_type_any (type);
if (!type)
return FALSE;
type_kind = type->type;
goto handle_enum;
} else {
return TYPE_COMPLEX | mask;
}
default:
return TYPE_INV;
}
}
/* convert MonoType to ILStackDesc format (stype) */
static gboolean
set_stack_value (VerifyContext *ctx, ILStackDesc *stack, MonoType *type, int take_addr)
{
int mask = 0;
int type_kind = type->type;
if (type->byref || take_addr)
mask = POINTER_MASK;
/* TODO handle CMMP_MASK */
handle_enum:
stack->type = type;
switch (type_kind) {
case MONO_TYPE_I1:
case MONO_TYPE_U1:
case MONO_TYPE_BOOLEAN:
case MONO_TYPE_I2:
case MONO_TYPE_U2:
case MONO_TYPE_CHAR:
case MONO_TYPE_I4:
case MONO_TYPE_U4:
stack->stype = TYPE_I4 | mask;
break;
case MONO_TYPE_I:
case MONO_TYPE_U:
stack->stype = TYPE_NATIVE_INT | mask;
break;
/*FIXME: Do we need to check if it's a pointer to the method pointer? The spec says it' illegal to have that.*/
case MONO_TYPE_FNPTR:
case MONO_TYPE_PTR:
case MONO_TYPE_TYPEDBYREF:
stack->stype = TYPE_PTR | mask;
break;
case MONO_TYPE_CLASS:
case MONO_TYPE_STRING:
case MONO_TYPE_OBJECT:
case MONO_TYPE_SZARRAY:
case MONO_TYPE_ARRAY:
case MONO_TYPE_VAR:
case MONO_TYPE_MVAR:
stack->stype = TYPE_COMPLEX | mask;
break;
case MONO_TYPE_I8:
case MONO_TYPE_U8:
stack->stype = TYPE_I8 | mask;
break;
case MONO_TYPE_R4:
case MONO_TYPE_R8:
stack->stype = TYPE_R8 | mask;
break;
case MONO_TYPE_GENERICINST:
case MONO_TYPE_VALUETYPE:
if (mono_type_is_enum_type (type)) {
MonoType *utype = mono_type_get_underlying_type_any (type);
if (!utype) {
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Could not resolve underlying type of %x at %d", type->type, ctx->ip_offset));
return FALSE;
}
type = utype;
type_kind = type->type;
goto handle_enum;
} else {
stack->stype = TYPE_COMPLEX | mask;
break;
}
default:
VERIFIER_DEBUG ( printf ("unknown type 0x%02x in eval stack type\n", type->type); );
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Illegal value set on stack 0x%02x at %d", type->type, ctx->ip_offset));
return FALSE;
}
return TRUE;
}
/*
* init_stack_with_value_at_exception_boundary:
*
* Initialize the stack and push a given type.
* The instruction is marked as been on the exception boundary.
*/
static void
init_stack_with_value_at_exception_boundary (VerifyContext *ctx, ILCodeDesc *code, MonoClass *klass)
{
MonoError error;
MonoType *type = mono_class_inflate_generic_type_checked (&klass->byval_arg, ctx->generic_context, &error);
if (!mono_error_ok (&error)) {
char *name = mono_type_get_full_name (klass);
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Invalid class %s used for exception", name));
g_free (name);
mono_error_cleanup (&error);
return;
}
if (!ctx->max_stack) {
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Stack overflow at 0x%04x", ctx->ip_offset));
return;
}
stack_init (ctx, code);
set_stack_value (ctx, code->stack, type, FALSE);
ctx->exception_types = g_slist_prepend (ctx->exception_types, type);
code->size = 1;
code->flags |= IL_CODE_FLAG_WAS_TARGET;
if (mono_type_is_generic_argument (type))
code->stack->stype |= BOXED_MASK;
}
/*Verify if type 'candidate' can be stored in type 'target'.
*
* If strict, check for the underlying type and not the verification stack types
*/
static gboolean
verify_type_compatibility_full (VerifyContext *ctx, MonoType *target, MonoType *candidate, gboolean strict)
{
#define IS_ONE_OF3(T, A, B, C) (T == A || T == B || T == C)
#define IS_ONE_OF2(T, A, B) (T == A || T == B)
MonoType *original_candidate = candidate;
VERIFIER_DEBUG ( printf ("checking type compatibility %s x %s strict %d\n", mono_type_full_name (target), mono_type_full_name (candidate), strict); );
/*only one is byref */
if (candidate->byref ^ target->byref) {
/* converting from native int to byref*/
if (get_stack_type (candidate) == TYPE_NATIVE_INT && target->byref) {
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("using byref native int at 0x%04x", ctx->ip_offset));
return TRUE;
}
return FALSE;
}
strict |= target->byref;
/*From now on we don't care about byref anymore, so it's ok to discard it here*/
candidate = mono_type_get_underlying_type_any (candidate);
handle_enum:
switch (target->type) {
case MONO_TYPE_VOID:
return candidate->type == MONO_TYPE_VOID;
case MONO_TYPE_I1:
case MONO_TYPE_U1:
case MONO_TYPE_BOOLEAN:
if (strict)
return IS_ONE_OF3 (candidate->type, MONO_TYPE_I1, MONO_TYPE_U1, MONO_TYPE_BOOLEAN);
case MONO_TYPE_I2:
case MONO_TYPE_U2:
case MONO_TYPE_CHAR:
if (strict)
return IS_ONE_OF3 (candidate->type, MONO_TYPE_I2, MONO_TYPE_U2, MONO_TYPE_CHAR);
case MONO_TYPE_I4:
case MONO_TYPE_U4: {
gboolean is_native_int = IS_ONE_OF2 (candidate->type, MONO_TYPE_I, MONO_TYPE_U);
gboolean is_int4 = IS_ONE_OF2 (candidate->type, MONO_TYPE_I4, MONO_TYPE_U4);
if (strict)
return is_native_int || is_int4;
return is_native_int || get_stack_type (candidate) == TYPE_I4;
}
case MONO_TYPE_I8:
case MONO_TYPE_U8:
return IS_ONE_OF2 (candidate->type, MONO_TYPE_I8, MONO_TYPE_U8);
case MONO_TYPE_R4:
case MONO_TYPE_R8:
if (strict)
return candidate->type == target->type;
return IS_ONE_OF2 (candidate->type, MONO_TYPE_R4, MONO_TYPE_R8);
case MONO_TYPE_I:
case MONO_TYPE_U: {
gboolean is_native_int = IS_ONE_OF2 (candidate->type, MONO_TYPE_I, MONO_TYPE_U);
gboolean is_int4 = IS_ONE_OF2 (candidate->type, MONO_TYPE_I4, MONO_TYPE_U4);
if (strict)
return is_native_int || is_int4;
return is_native_int || get_stack_type (candidate) == TYPE_I4;
}
case MONO_TYPE_PTR:
if (candidate->type != MONO_TYPE_PTR)
return FALSE;
/* check the underlying type */
return verify_type_compatibility_full (ctx, target->data.type, candidate->data.type, TRUE);
case MONO_TYPE_FNPTR: {
MonoMethodSignature *left, *right;
if (candidate->type != MONO_TYPE_FNPTR)
return FALSE;
left = mono_type_get_signature (target);
right = mono_type_get_signature (candidate);
return mono_metadata_signature_equal (left, right) && left->call_convention == right->call_convention;
}
case MONO_TYPE_GENERICINST: {
MonoClass *target_klass;
MonoClass *candidate_klass;
if (mono_type_is_enum_type (target)) {
target = mono_type_get_underlying_type_any (target);
if (!target)
return FALSE;
goto handle_enum;
}
/*
* VAR / MVAR compatibility must be checked by verify_stack_type_compatibility
* to take boxing status into account.
*/
if (mono_type_is_generic_argument (original_candidate))
return FALSE;
target_klass = mono_class_from_mono_type (target);
candidate_klass = mono_class_from_mono_type (candidate);
if (mono_class_is_nullable (target_klass)) {
if (!mono_class_is_nullable (candidate_klass))
return FALSE;
return target_klass == candidate_klass;
}
return mono_class_is_assignable_from (target_klass, candidate_klass);
}
case MONO_TYPE_STRING:
return candidate->type == MONO_TYPE_STRING;
case MONO_TYPE_CLASS:
/*
* VAR / MVAR compatibility must be checked by verify_stack_type_compatibility
* to take boxing status into account.
*/
if (mono_type_is_generic_argument (original_candidate))
return FALSE;
if (candidate->type == MONO_TYPE_VALUETYPE)
return FALSE;
/* If candidate is an enum it should return true for System.Enum and supertypes.
* That's why here we use the original type and not the underlying type.
*/
return mono_class_is_assignable_from (target->data.klass, mono_class_from_mono_type (original_candidate));
case MONO_TYPE_OBJECT:
return MONO_TYPE_IS_REFERENCE (candidate);
case MONO_TYPE_SZARRAY: {
MonoClass *left;
MonoClass *right;
if (candidate->type != MONO_TYPE_SZARRAY)
return FALSE;
left = mono_class_from_mono_type (target)->element_class;
right = mono_class_from_mono_type (candidate)->element_class;
return mono_class_is_assignable_from (left, right);
}
case MONO_TYPE_ARRAY:
if (candidate->type != MONO_TYPE_ARRAY)
return FALSE;
return is_array_type_compatible (target, candidate);
case MONO_TYPE_TYPEDBYREF:
return candidate->type == MONO_TYPE_TYPEDBYREF;
case MONO_TYPE_VALUETYPE: {
MonoClass *target_klass;
MonoClass *candidate_klass;
if (candidate->type == MONO_TYPE_CLASS)
return FALSE;
target_klass = mono_class_from_mono_type (target);
candidate_klass = mono_class_from_mono_type (candidate);
if (target_klass == candidate_klass)
return TRUE;
if (mono_type_is_enum_type (target)) {
target = mono_type_get_underlying_type_any (target);
if (!target)
return FALSE;
goto handle_enum;
}
return FALSE;
}
case MONO_TYPE_VAR:
if (candidate->type != MONO_TYPE_VAR)
return FALSE;
return mono_type_get_generic_param_num (candidate) == mono_type_get_generic_param_num (target);
case MONO_TYPE_MVAR:
if (candidate->type != MONO_TYPE_MVAR)
return FALSE;
return mono_type_get_generic_param_num (candidate) == mono_type_get_generic_param_num (target);
default:
VERIFIER_DEBUG ( printf ("unknown store type %d\n", target->type); );
g_assert_not_reached ();
return FALSE;
}
return 1;
#undef IS_ONE_OF3
#undef IS_ONE_OF2
}
static gboolean
verify_type_compatibility (VerifyContext *ctx, MonoType *target, MonoType *candidate)
{
return verify_type_compatibility_full (ctx, target, candidate, FALSE);
}
/*
* Returns the generic param bound to the context been verified.
*
*/
static MonoGenericParam*
get_generic_param (VerifyContext *ctx, MonoType *param)
{
guint16 param_num = mono_type_get_generic_param_num (param);
if (param->type == MONO_TYPE_VAR) {
if (!ctx->generic_context->class_inst || ctx->generic_context->class_inst->type_argc <= param_num) {
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Invalid generic type argument %d", param_num));
return NULL;
}
return ctx->generic_context->class_inst->type_argv [param_num]->data.generic_param;
}
/*param must be a MVAR */
if (!ctx->generic_context->method_inst || ctx->generic_context->method_inst->type_argc <= param_num) {
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Invalid generic method argument %d", param_num));
return NULL;
}
return ctx->generic_context->method_inst->type_argv [param_num]->data.generic_param;
}
static gboolean
recursive_boxed_constraint_type_check (VerifyContext *ctx, MonoType *type, MonoClass *constraint_class, int recursion_level)
{
MonoType *constraint_type = &constraint_class->byval_arg;
if (recursion_level <= 0)
return FALSE;
if (verify_type_compatibility_full (ctx, type, mono_type_get_type_byval (constraint_type), FALSE))
return TRUE;
if (mono_type_is_generic_argument (constraint_type)) {
MonoGenericParam *param = get_generic_param (ctx, constraint_type);
MonoClass **class;
if (!param)
return FALSE;
for (class = mono_generic_param_info (param)->constraints; class && *class; ++class) {
if (recursive_boxed_constraint_type_check (ctx, type, *class, recursion_level - 1))
return TRUE;
}
}
return FALSE;
}
/*
* is_compatible_boxed_valuetype:
*
* Returns TRUE if @candidate / @stack is a valid boxed valuetype.
*
* @type The source type. It it tested to be of the proper type.
* @candidate type of the boxed valuetype.
* @stack stack slot of the boxed valuetype, separate from @candidade since one could be changed before calling this function
* @strict if TRUE candidate must be boxed compatible to the target type
*
*/
static gboolean
is_compatible_boxed_valuetype (VerifyContext *ctx, MonoType *type, MonoType *candidate, ILStackDesc *stack, gboolean strict)
{
if (!stack_slot_is_boxed_value (stack))
return FALSE;
if (type->byref || candidate->byref)
return FALSE;
if (mono_type_is_generic_argument (candidate)) {
MonoGenericParam *param = get_generic_param (ctx, candidate);
MonoClass **class;
if (!param)
return FALSE;
for (class = mono_generic_param_info (param)->constraints; class && *class; ++class) {
/*256 should be enough since there can't be more than 255 generic arguments.*/
if (recursive_boxed_constraint_type_check (ctx, type, *class, 256))
return TRUE;
}
}
if (mono_type_is_generic_argument (type))
return FALSE;
if (!strict)
return TRUE;
return MONO_TYPE_IS_REFERENCE (type) && mono_class_is_assignable_from (mono_class_from_mono_type (type), mono_class_from_mono_type (candidate));
}
static int
verify_stack_type_compatibility_full (VerifyContext *ctx, MonoType *type, ILStackDesc *stack, gboolean drop_byref, gboolean valuetype_must_be_boxed)
{
MonoType *candidate = mono_type_from_stack_slot (stack);
if (MONO_TYPE_IS_REFERENCE (type) && !type->byref && stack_slot_is_null_literal (stack))
return TRUE;
if (is_compatible_boxed_valuetype (ctx, type, candidate, stack, TRUE))
return TRUE;
if (valuetype_must_be_boxed && !stack_slot_is_boxed_value (stack) && !MONO_TYPE_IS_REFERENCE (candidate))
return FALSE;
if (!valuetype_must_be_boxed && stack_slot_is_boxed_value (stack))
return FALSE;
if (drop_byref)
return verify_type_compatibility_full (ctx, type, mono_type_get_type_byval (candidate), FALSE);
return verify_type_compatibility_full (ctx, type, candidate, FALSE);
}
static int
verify_stack_type_compatibility (VerifyContext *ctx, MonoType *type, ILStackDesc *stack)
{
return verify_stack_type_compatibility_full (ctx, type, stack, FALSE, FALSE);
}
static gboolean
mono_delegate_type_equal (MonoType *target, MonoType *candidate)
{
if (candidate->byref ^ target->byref)
return FALSE;
switch (target->type) {
case MONO_TYPE_VOID:
case MONO_TYPE_I1:
case MONO_TYPE_U1:
case MONO_TYPE_BOOLEAN:
case MONO_TYPE_I2:
case MONO_TYPE_U2:
case MONO_TYPE_CHAR:
case MONO_TYPE_I4:
case MONO_TYPE_U4:
case MONO_TYPE_I8:
case MONO_TYPE_U8:
case MONO_TYPE_R4:
case MONO_TYPE_R8:
case MONO_TYPE_I:
case MONO_TYPE_U:
case MONO_TYPE_STRING:
case MONO_TYPE_TYPEDBYREF:
return candidate->type == target->type;
case MONO_TYPE_PTR:
return mono_delegate_type_equal (target->data.type, candidate->data.type);
case MONO_TYPE_FNPTR:
if (candidate->type != MONO_TYPE_FNPTR)
return FALSE;
return mono_delegate_signature_equal (mono_type_get_signature (target), mono_type_get_signature (candidate), FALSE);
case MONO_TYPE_GENERICINST: {
MonoClass *target_klass;
MonoClass *candidate_klass;
target_klass = mono_class_from_mono_type (target);
candidate_klass = mono_class_from_mono_type (candidate);
/*FIXME handle nullables and enum*/
return mono_class_is_assignable_from (target_klass, candidate_klass);
}
case MONO_TYPE_OBJECT:
return MONO_TYPE_IS_REFERENCE (candidate);
case MONO_TYPE_CLASS:
return mono_class_is_assignable_from(target->data.klass, mono_class_from_mono_type (candidate));
case MONO_TYPE_SZARRAY:
if (candidate->type != MONO_TYPE_SZARRAY)
return FALSE;
return mono_class_is_assignable_from (mono_class_from_mono_type (target)->element_class, mono_class_from_mono_type (candidate)->element_class);
case MONO_TYPE_ARRAY:
if (candidate->type != MONO_TYPE_ARRAY)
return FALSE;
return is_array_type_compatible (target, candidate);
case MONO_TYPE_VALUETYPE:
/*FIXME handle nullables and enum*/
return mono_class_from_mono_type (candidate) == mono_class_from_mono_type (target);
case MONO_TYPE_VAR:
return candidate->type == MONO_TYPE_VAR && mono_type_get_generic_param_num (target) == mono_type_get_generic_param_num (candidate);
return FALSE;
case MONO_TYPE_MVAR:
return candidate->type == MONO_TYPE_MVAR && mono_type_get_generic_param_num (target) == mono_type_get_generic_param_num (candidate);
return FALSE;
default:
VERIFIER_DEBUG ( printf ("Unknown type %d. Implement me!\n", target->type); );
g_assert_not_reached ();
return FALSE;
}
}
static gboolean
mono_delegate_param_equal (MonoType *delegate, MonoType *method)
{
if (mono_metadata_type_equal_full (delegate, method, TRUE))
return TRUE;
return mono_delegate_type_equal (method, delegate);
}
static gboolean
mono_delegate_ret_equal (MonoType *delegate, MonoType *method)
{
if (mono_metadata_type_equal_full (delegate, method, TRUE))
return TRUE;
return mono_delegate_type_equal (delegate, method);
}
/*
* mono_delegate_signature_equal:
*
* Compare two signatures in the way expected by delegates.
*
* This function only exists due to the fact that it should ignore the 'has_this' part of the signature.
*
* FIXME can this function be eliminated and proper metadata functionality be used?
*/
static gboolean
mono_delegate_signature_equal (MonoMethodSignature *delegate_sig, MonoMethodSignature *method_sig, gboolean is_static_ldftn)
{
int i;
int method_offset = is_static_ldftn ? 1 : 0;
if (delegate_sig->param_count + method_offset != method_sig->param_count)
return FALSE;
if (delegate_sig->call_convention != method_sig->call_convention)
return FALSE;
for (i = 0; i < delegate_sig->param_count; i++) {
MonoType *p1 = delegate_sig->params [i];
MonoType *p2 = method_sig->params [i + method_offset];
if (!mono_delegate_param_equal (p1, p2))
return FALSE;
}
if (!mono_delegate_ret_equal (delegate_sig->ret, method_sig->ret))
return FALSE;
return TRUE;
}
/*
* verify_ldftn_delegate:
*
* Verify properties of ldftn based delegates.
*/
static void
verify_ldftn_delegate (VerifyContext *ctx, MonoClass *delegate, ILStackDesc *value, ILStackDesc *funptr)
{
MonoMethod *method = funptr->method;
/*ldftn non-final virtuals only allowed if method is not static,
* the object is a this arg (comes from a ldarg.0), and there is no starg.0.
* This rules doesn't apply if the object on stack is a boxed valuetype.
*/
if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && !(method->flags & METHOD_ATTRIBUTE_FINAL) && !(method->klass->flags & TYPE_ATTRIBUTE_SEALED) && !stack_slot_is_boxed_value (value)) {
/*A stdarg 0 must not happen, we fail here only in fail fast mode to avoid double error reports*/
if (IS_FAIL_FAST_MODE (ctx) && ctx->has_this_store)
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid ldftn with virtual function in method with stdarg 0 at 0x%04x", ctx->ip_offset));
/*current method must not be static*/
if (ctx->method->flags & METHOD_ATTRIBUTE_STATIC)
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid ldftn with virtual function at 0x%04x", ctx->ip_offset));
/*value is the this pointer, loaded using ldarg.0 */
if (!stack_slot_is_this_pointer (value))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid object argument, it is not the this pointer, to ldftn with virtual method at 0x%04x", ctx->ip_offset));
ctx->code [ctx->ip_offset].flags |= IL_CODE_LDFTN_DELEGATE_NONFINAL_VIRTUAL;
}
}
/*
* verify_delegate_compatibility:
*
* Verify delegate creation sequence.
*
*/
static void
verify_delegate_compatibility (VerifyContext *ctx, MonoClass *delegate, ILStackDesc *value, ILStackDesc *funptr)
{
#define IS_VALID_OPCODE(offset, opcode) (ip [ip_offset - offset] == opcode && (ctx->code [ip_offset - offset].flags & IL_CODE_FLAG_SEEN))
#define IS_LOAD_FUN_PTR(kind) (IS_VALID_OPCODE (6, CEE_PREFIX1) && ip [ip_offset - 5] == kind)
MonoMethod *invoke, *method;
const guint8 *ip = ctx->header->code;
guint32 ip_offset = ctx->ip_offset;
gboolean is_static_ldftn = FALSE, is_first_arg_bound = FALSE;
if (stack_slot_get_type (funptr) != TYPE_PTR || !funptr->method) {
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid function pointer parameter for delegate constructor at 0x%04x", ctx->ip_offset));
return;
}
invoke = mono_get_delegate_invoke (delegate);
method = funptr->method;
if (!method || !mono_method_signature (method)) {
char *name = mono_type_get_full_name (delegate);
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Invalid method on stack to create delegate %s construction at 0x%04x", name, ctx->ip_offset));
g_free (name);
return;
}
if (!invoke || !mono_method_signature (invoke)) {
char *name = mono_type_get_full_name (delegate);
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Delegate type %s with bad Invoke method at 0x%04x", name, ctx->ip_offset));
g_free (name);
return;
}
is_static_ldftn = (ip_offset > 5 && IS_LOAD_FUN_PTR (CEE_LDFTN)) && method->flags & METHOD_ATTRIBUTE_STATIC;
if (is_static_ldftn)
is_first_arg_bound = mono_method_signature (invoke)->param_count + 1 == mono_method_signature (method)->param_count;
if (!mono_delegate_signature_equal (mono_method_signature (invoke), mono_method_signature (method), is_first_arg_bound)) {
char *fun_sig = mono_signature_get_desc (mono_method_signature (method), FALSE);
char *invoke_sig = mono_signature_get_desc (mono_method_signature (invoke), FALSE);
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Function pointer signature '%s' doesn't match delegate's signature '%s' at 0x%04x", fun_sig, invoke_sig, ctx->ip_offset));
g_free (fun_sig);
g_free (invoke_sig);
}
/*
* Delegate code sequences:
* [-6] ldftn token
* newobj ...
*
*
* [-7] dup
* [-6] ldvirtftn token
* newobj ...
*
* ldftn sequence:*/
if (ip_offset > 5 && IS_LOAD_FUN_PTR (CEE_LDFTN)) {
verify_ldftn_delegate (ctx, delegate, value, funptr);
} else if (ip_offset > 6 && IS_VALID_OPCODE (7, CEE_DUP) && IS_LOAD_FUN_PTR (CEE_LDVIRTFTN)) {
ctx->code [ip_offset - 6].flags |= IL_CODE_DELEGATE_SEQUENCE;
}else {
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid code sequence for delegate creation at 0x%04x", ctx->ip_offset));
}
ctx->code [ip_offset].flags |= IL_CODE_DELEGATE_SEQUENCE;
//general tests
if (is_first_arg_bound) {
if (mono_method_signature (method)->param_count == 0 || !verify_stack_type_compatibility_full (ctx, mono_method_signature (method)->params [0], value, FALSE, TRUE))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("This object not compatible with function pointer for delegate creation at 0x%04x", ctx->ip_offset));
} else {
if (method->flags & METHOD_ATTRIBUTE_STATIC) {
if (!stack_slot_is_null_literal (value) && !is_first_arg_bound)
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Non-null this args used with static function for delegate creation at 0x%04x", ctx->ip_offset));
} else {
if (!verify_stack_type_compatibility_full (ctx, &method->klass->byval_arg, value, FALSE, TRUE) && !stack_slot_is_null_literal (value))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("This object not compatible with function pointer for delegate creation at 0x%04x", ctx->ip_offset));
}
}
if (stack_slot_get_type (value) != TYPE_COMPLEX)
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid first parameter for delegate creation at 0x%04x", ctx->ip_offset));
#undef IS_VALID_OPCODE
#undef IS_LOAD_FUN_PTR
}
/* implement the opcode checks*/
static void
push_arg (VerifyContext *ctx, unsigned int arg, int take_addr)
{
ILStackDesc *top;
if (arg >= ctx->max_args) {
if (take_addr)
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Method doesn't have argument %d", arg + 1));
else {
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Method doesn't have argument %d", arg + 1));
if (check_overflow (ctx)) //FIXME: what sane value could we ever push?
stack_push_val (ctx, TYPE_I4, &mono_defaults.int32_class->byval_arg);
}
} else if (check_overflow (ctx)) {
/*We must let the value be pushed, otherwise we would get an underflow error*/
check_unverifiable_type (ctx, ctx->params [arg]);
if (ctx->params [arg]->byref && take_addr)
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("ByRef of ByRef at 0x%04x", ctx->ip_offset));
top = stack_push (ctx);
if (!set_stack_value (ctx, top, ctx->params [arg], take_addr))
return;
if (arg == 0 && !(ctx->method->flags & METHOD_ATTRIBUTE_STATIC)) {
if (take_addr)
ctx->has_this_store = TRUE;
else
top->stype |= THIS_POINTER_MASK;
if (mono_method_is_constructor (ctx->method) && !ctx->super_ctor_called && !ctx->method->klass->valuetype)
top->stype |= UNINIT_THIS_MASK;
}
}
}
static void
push_local (VerifyContext *ctx, guint32 arg, int take_addr)
{
if (arg >= ctx->num_locals) {
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Method doesn't have local %d", arg + 1));
} else if (check_overflow (ctx)) {
/*We must let the value be pushed, otherwise we would get an underflow error*/
check_unverifiable_type (ctx, ctx->locals [arg]);
if (ctx->locals [arg]->byref && take_addr)
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("ByRef of ByRef at 0x%04x", ctx->ip_offset));
set_stack_value (ctx, stack_push (ctx), ctx->locals [arg], take_addr);
}
}
static void
store_arg (VerifyContext *ctx, guint32 arg)
{
ILStackDesc *value;
if (arg >= ctx->max_args) {
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Method doesn't have argument %d at 0x%04x", arg + 1, ctx->ip_offset));
if (check_underflow (ctx, 1))
stack_pop (ctx);
return;
}
if (check_underflow (ctx, 1)) {
value = stack_pop (ctx);
if (!verify_stack_type_compatibility (ctx, ctx->params [arg], value)) {
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Incompatible type %s in argument store at 0x%04x", stack_slot_get_name (value), ctx->ip_offset));
}
}
if (arg == 0 && !(ctx->method->flags & METHOD_ATTRIBUTE_STATIC))
ctx->has_this_store = 1;
}
static void
store_local (VerifyContext *ctx, guint32 arg)
{
ILStackDesc *value;
if (arg >= ctx->num_locals) {
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Method doesn't have local var %d at 0x%04x", arg + 1, ctx->ip_offset));
return;
}
/*TODO verify definite assigment */
if (check_underflow (ctx, 1)) {
value = stack_pop(ctx);
if (!verify_stack_type_compatibility (ctx, ctx->locals [arg], value)) {
char *expected = mono_type_full_name (ctx->locals [arg]);
char *found = stack_slot_full_name (value);
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Incompatible type '%s' on stack cannot be stored to local %d with type '%s' at 0x%04x",
found,
arg,
expected,
ctx->ip_offset));
g_free (expected);
g_free (found);
}
}
}
/*FIXME add and sub needs special care here*/
static void
do_binop (VerifyContext *ctx, unsigned int opcode, const unsigned char table [TYPE_MAX][TYPE_MAX])
{
ILStackDesc *a, *b, *top;
int idxa, idxb, complexMerge = 0;
unsigned char res;
if (!check_underflow (ctx, 2))
return;
b = stack_pop (ctx);
a = stack_pop (ctx);
idxa = stack_slot_get_underlying_type (a);
if (stack_slot_is_managed_pointer (a)) {
idxa = TYPE_PTR;
complexMerge = 1;
}
idxb = stack_slot_get_underlying_type (b);
if (stack_slot_is_managed_pointer (b)) {
idxb = TYPE_PTR;
complexMerge = 2;
}
--idxa;
--idxb;
res = table [idxa][idxb];
VERIFIER_DEBUG ( printf ("binop res %d\n", res); );
VERIFIER_DEBUG ( printf ("idxa %d idxb %d\n", idxa, idxb); );
top = stack_push (ctx);
if (res == TYPE_INV) {
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Binary instruction applyed to ill formed stack (%s x %s)", stack_slot_get_name (a), stack_slot_get_name (b)));
copy_stack_value (top, a);
return;
}
if (res & NON_VERIFIABLE_RESULT) {
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Binary instruction is not verifiable (%s x %s)", stack_slot_get_name (a), stack_slot_get_name (b)));
res = res & ~NON_VERIFIABLE_RESULT;
}
if (complexMerge && res == TYPE_PTR) {
if (complexMerge == 1)
copy_stack_value (top, a);
else if (complexMerge == 2)
copy_stack_value (top, b);
/*
* There is no need to merge the type of two pointers.
* The only valid operation is subtraction, that returns a native
* int as result and can be used with any 2 pointer kinds.
* This is valid acording to Patition III 1.1.4
*/
} else
top->stype = res;
}
static void
do_boolean_branch_op (VerifyContext *ctx, int delta)
{
int target = ctx->ip_offset + delta;
ILStackDesc *top;
VERIFIER_DEBUG ( printf ("boolean branch offset %d delta %d target %d\n", ctx->ip_offset, delta, target); );
if (target < 0 || target >= ctx->code_size) {
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Boolean branch target out of code at 0x%04x", ctx->ip_offset));
return;
}
switch (is_valid_branch_instruction (ctx->header, ctx->ip_offset, target)) {
case 1:
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Branch target escapes out of exception block at 0x%04x", ctx->ip_offset));
break;
case 2:
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Branch target escapes out of exception block at 0x%04x", ctx->ip_offset));
return;
}
ctx->target = target;
if (!check_underflow (ctx, 1))
return;
top = stack_pop (ctx);
if (!is_valid_bool_arg (top))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Argument type %s not valid for brtrue/brfalse at 0x%04x", stack_slot_get_name (top), ctx->ip_offset));
check_unmanaged_pointer (ctx, top);
}
static gboolean
stack_slot_is_complex_type_not_reference_type (ILStackDesc *slot)
{
return stack_slot_get_type (slot) == TYPE_COMPLEX && !MONO_TYPE_IS_REFERENCE (slot->type) && !stack_slot_is_boxed_value (slot);
}
static void
do_branch_op (VerifyContext *ctx, signed int delta, const unsigned char table [TYPE_MAX][TYPE_MAX])
{
ILStackDesc *a, *b;
int idxa, idxb;
unsigned char res;
int target = ctx->ip_offset + delta;
VERIFIER_DEBUG ( printf ("branch offset %d delta %d target %d\n", ctx->ip_offset, delta, target); );
if (target < 0 || target >= ctx->code_size) {
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Branch target out of code at 0x%04x", ctx->ip_offset));
return;
}
switch (is_valid_cmp_branch_instruction (ctx->header, ctx->ip_offset, target)) {
case 1: /*FIXME use constants and not magic numbers.*/
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Branch target escapes out of exception block at 0x%04x", ctx->ip_offset));
break;
case 2:
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Branch target escapes out of exception block at 0x%04x", ctx->ip_offset));
return;
}
ctx->target = target;
if (!check_underflow (ctx, 2))
return;
b = stack_pop (ctx);
a = stack_pop (ctx);
idxa = stack_slot_get_underlying_type (a);
if (stack_slot_is_managed_pointer (a))
idxa = TYPE_PTR;
idxb = stack_slot_get_underlying_type (b);
if (stack_slot_is_managed_pointer (b))
idxb = TYPE_PTR;
if (stack_slot_is_complex_type_not_reference_type (a) || stack_slot_is_complex_type_not_reference_type (b)) {
res = TYPE_INV;
} else {
--idxa;
--idxb;
res = table [idxa][idxb];
}
VERIFIER_DEBUG ( printf ("branch res %d\n", res); );
VERIFIER_DEBUG ( printf ("idxa %d idxb %d\n", idxa, idxb); );
if (res == TYPE_INV) {
CODE_NOT_VERIFIABLE (ctx,
g_strdup_printf ("Compare and Branch instruction applyed to ill formed stack (%s x %s) at 0x%04x", stack_slot_get_name (a), stack_slot_get_name (b), ctx->ip_offset));
} else if (res & NON_VERIFIABLE_RESULT) {
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Compare and Branch instruction is not verifiable (%s x %s) at 0x%04x", stack_slot_get_name (a), stack_slot_get_name (b), ctx->ip_offset));
res = res & ~NON_VERIFIABLE_RESULT;
}
}
static void
do_cmp_op (VerifyContext *ctx, const unsigned char table [TYPE_MAX][TYPE_MAX], guint32 opcode)
{
ILStackDesc *a, *b;
int idxa, idxb;
unsigned char res;
if (!check_underflow (ctx, 2))
return;
b = stack_pop (ctx);
a = stack_pop (ctx);
if (opcode == CEE_CGT_UN) {
if (stack_slot_get_type (a) == TYPE_COMPLEX && stack_slot_get_type (b) == TYPE_COMPLEX) {
stack_push_val (ctx, TYPE_I4, &mono_defaults.int32_class->byval_arg);
return;
}
}
idxa = stack_slot_get_underlying_type (a);
if (stack_slot_is_managed_pointer (a))
idxa = TYPE_PTR;
idxb = stack_slot_get_underlying_type (b);
if (stack_slot_is_managed_pointer (b))
idxb = TYPE_PTR;
if (stack_slot_is_complex_type_not_reference_type (a) || stack_slot_is_complex_type_not_reference_type (b)) {
res = TYPE_INV;
} else {
--idxa;
--idxb;
res = table [idxa][idxb];
}
if(res == TYPE_INV) {
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf("Compare instruction applyed to ill formed stack (%s x %s) at 0x%04x", stack_slot_get_name (a), stack_slot_get_name (b), ctx->ip_offset));
} else if (res & NON_VERIFIABLE_RESULT) {
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Compare instruction is not verifiable (%s x %s) at 0x%04x", stack_slot_get_name (a), stack_slot_get_name (b), ctx->ip_offset));
res = res & ~NON_VERIFIABLE_RESULT;
}
stack_push_val (ctx, TYPE_I4, &mono_defaults.int32_class->byval_arg);
}
static void
do_ret (VerifyContext *ctx)
{
MonoType *ret = ctx->signature->ret;
VERIFIER_DEBUG ( printf ("checking ret\n"); );
if (ret->type != MONO_TYPE_VOID) {
ILStackDesc *top;
if (!check_underflow (ctx, 1))
return;
top = stack_pop(ctx);
if (!verify_stack_type_compatibility (ctx, ctx->signature->ret, top)) {
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Incompatible return value on stack with method signature ret at 0x%04x", ctx->ip_offset));
return;
}
if (ret->byref || ret->type == MONO_TYPE_TYPEDBYREF || mono_type_is_value_type (ret, "System", "ArgIterator") || mono_type_is_value_type (ret, "System", "RuntimeArgumentHandle"))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Method returns byref, TypedReference, ArgIterator or RuntimeArgumentHandle at 0x%04x", ctx->ip_offset));
}
if (ctx->eval.size > 0) {
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Stack not empty (%d) after ret at 0x%04x", ctx->eval.size, ctx->ip_offset));
}
if (in_any_block (ctx->header, ctx->ip_offset))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("ret cannot escape exception blocks at 0x%04x", ctx->ip_offset));
}
/*
* FIXME we need to fix the case of a non-virtual instance method defined in the parent but call using a token pointing to a subclass.
* This is illegal but mono_get_method_full decoded it.
* TODO handle calling .ctor outside one or calling the .ctor for other class but super
*/
static void
do_invoke_method (VerifyContext *ctx, int method_token, gboolean virtual)
{
int param_count, i;
MonoMethodSignature *sig;
ILStackDesc *value;
MonoMethod *method;
gboolean virt_check_this = FALSE;
gboolean constrained = ctx->prefix_set & PREFIX_CONSTRAINED;
if (!(method = verifier_load_method (ctx, method_token, virtual ? "callvirt" : "call")))
return;
if (virtual) {
CLEAR_PREFIX (ctx, PREFIX_CONSTRAINED);
if (method->klass->valuetype) // && !constrained ???
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Cannot use callvirtual with valuetype method at 0x%04x", ctx->ip_offset));
if ((method->flags & METHOD_ATTRIBUTE_STATIC))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Cannot use callvirtual with static method at 0x%04x", ctx->ip_offset));
} else {
if (method->flags & METHOD_ATTRIBUTE_ABSTRACT)
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Cannot use call with an abstract method at 0x%04x", ctx->ip_offset));
if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && !(method->flags & METHOD_ATTRIBUTE_FINAL) && !(method->klass->flags & TYPE_ATTRIBUTE_SEALED)) {
virt_check_this = TRUE;
ctx->code [ctx->ip_offset].flags |= IL_CODE_CALL_NONFINAL_VIRTUAL;
}
}
if (!(sig = mono_method_get_signature_full (method, ctx->image, method_token, ctx->generic_context)))
sig = mono_method_get_signature (method, ctx->image, method_token);
if (!sig) {
char *name = mono_type_get_full_name (method->klass);
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Could not resolve signature of %s:%s at 0x%04x", name, method->name, ctx->ip_offset));
g_free (name);
return;
}
param_count = sig->param_count + sig->hasthis;
if (!check_underflow (ctx, param_count))
return;
for (i = sig->param_count - 1; i >= 0; --i) {
VERIFIER_DEBUG ( printf ("verifying argument %d\n", i); );
value = stack_pop (ctx);
if (!verify_stack_type_compatibility (ctx, sig->params[i], value)) {
char *stack_name = stack_slot_full_name (value);
char *sig_name = mono_type_full_name (sig->params [i]);
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Incompatible parameter with function signature: Calling method with signature (%s) but for argument %d there is a (%s) on stack at 0x%04x", sig_name, i, stack_name, ctx->ip_offset));
g_free (stack_name);
g_free (sig_name);
}
if (stack_slot_is_managed_mutability_pointer (value))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Cannot use a readonly pointer as argument of %s at 0x%04x", virtual ? "callvirt" : "call", ctx->ip_offset));
if ((ctx->prefix_set & PREFIX_TAIL) && stack_slot_is_managed_pointer (value)) {
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Cannot pass a byref argument to a tail %s at 0x%04x", virtual ? "callvirt" : "call", ctx->ip_offset));
return;
}
}
if (sig->hasthis) {
MonoType *type = &method->klass->byval_arg;
ILStackDesc copy;
if (mono_method_is_constructor (method) && !method->klass->valuetype) {
if (!mono_method_is_constructor (ctx->method))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Cannot call a constructor outside one at 0x%04x", ctx->ip_offset));
if (method->klass != ctx->method->klass->parent && method->klass != ctx->method->klass)
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Cannot call a constructor to a type diferent that this or super at 0x%04x", ctx->ip_offset));
ctx->super_ctor_called = TRUE;
value = stack_pop_safe (ctx);
if ((value->stype & THIS_POINTER_MASK) != THIS_POINTER_MASK)
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid 'this ptr' argument for constructor at 0x%04x", ctx->ip_offset));
} else {
value = stack_pop (ctx);
}
copy_stack_value (©, value);
//TODO we should extract this to a 'drop_byref_argument' and use everywhere
//Other parts of the code suffer from the same issue of
copy.type = mono_type_get_type_byval (copy.type);
copy.stype &= ~POINTER_MASK;
if (virt_check_this && !stack_slot_is_this_pointer (value) && !(method->klass->valuetype || stack_slot_is_boxed_value (value)))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Cannot call a non-final virtual method from an objet diferent thant the this pointer at 0x%04x", ctx->ip_offset));
if (constrained && virtual) {
if (!stack_slot_is_managed_pointer (value))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Object is not a managed pointer for a constrained call at 0x%04x", ctx->ip_offset));
if (!mono_metadata_type_equal_full (mono_type_get_type_byval (value->type), ctx->constrained_type, TRUE))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Object not compatible with constrained type at 0x%04x", ctx->ip_offset));
copy.stype |= BOXED_MASK;
} else {
if (stack_slot_is_managed_pointer (value) && !mono_class_from_mono_type (value->type)->valuetype)
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Cannot call a reference type using a managed pointer to the this arg at 0x%04x", ctx->ip_offset));
if (!virtual && mono_class_from_mono_type (value->type)->valuetype && !method->klass->valuetype && !stack_slot_is_boxed_value (value))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Cannot call a valuetype baseclass at 0x%04x", ctx->ip_offset));
if (virtual && mono_class_from_mono_type (value->type)->valuetype && !stack_slot_is_boxed_value (value))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Cannot use a valuetype with callvirt at 0x%04x", ctx->ip_offset));
if (method->klass->valuetype && (stack_slot_is_boxed_value (value) || !stack_slot_is_managed_pointer (value)))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Cannot use a boxed or literal valuetype to call a valuetype method at 0x%04x", ctx->ip_offset));
}
if (!verify_stack_type_compatibility (ctx, type, ©)) {
char *expected = mono_type_full_name (type);
char *effective = stack_slot_full_name (©);
char *method_name = mono_method_full_name (method, TRUE);
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Incompatible this argument on stack with method signature expected '%s' but got '%s' for a call to '%s' at 0x%04x",
expected, effective, method_name, ctx->ip_offset));
g_free (method_name);
g_free (effective);
g_free (expected);
}
if (!IS_SKIP_VISIBILITY (ctx) && !mono_method_can_access_method_full (ctx->method, method, mono_class_from_mono_type (value->type))) {
char *name = mono_method_full_name (method, TRUE);
CODE_NOT_VERIFIABLE2 (ctx, g_strdup_printf ("Method %s is not accessible at 0x%04x", name, ctx->ip_offset), MONO_EXCEPTION_METHOD_ACCESS);
g_free (name);
}
} else if (!IS_SKIP_VISIBILITY (ctx) && !mono_method_can_access_method_full (ctx->method, method, NULL)) {
char *name = mono_method_full_name (method, TRUE);
CODE_NOT_VERIFIABLE2 (ctx, g_strdup_printf ("Method %s is not accessible at 0x%04x", name, ctx->ip_offset), MONO_EXCEPTION_METHOD_ACCESS);
g_free (name);
}
if (sig->ret->type != MONO_TYPE_VOID) {
if (check_overflow (ctx)) {
value = stack_push (ctx);
set_stack_value (ctx, value, sig->ret, FALSE);
if ((ctx->prefix_set & PREFIX_READONLY) && method->klass->rank && !strcmp (method->name, "Address")) {
ctx->prefix_set &= ~PREFIX_READONLY;
value->stype |= CMMP_MASK;
}
}
}
if ((ctx->prefix_set & PREFIX_TAIL)) {
if (!mono_delegate_ret_equal (mono_method_signature (ctx->method)->ret, sig->ret))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Tail call with incompatible return type at 0x%04x", ctx->ip_offset));
if (ctx->header->code [ctx->ip_offset + 5] != CEE_RET)
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Tail call not followed by ret at 0x%04x", ctx->ip_offset));
}
}
static void
do_push_static_field (VerifyContext *ctx, int token, gboolean take_addr)
{
MonoClassField *field;
MonoClass *klass;
if (!check_overflow (ctx))
return;
if (!take_addr)
CLEAR_PREFIX (ctx, PREFIX_VOLATILE);
if (!(field = verifier_load_field (ctx, token, &klass, take_addr ? "ldsflda" : "ldsfld")))
return;
if (!(field->type->attrs & FIELD_ATTRIBUTE_STATIC)) {
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Cannot load non static field at 0x%04x", ctx->ip_offset));
return;
}
/*taking the address of initonly field only works from the static constructor */
if (take_addr && (field->type->attrs & FIELD_ATTRIBUTE_INIT_ONLY) &&
!(field->parent == ctx->method->klass && (ctx->method->flags & (METHOD_ATTRIBUTE_SPECIAL_NAME | METHOD_ATTRIBUTE_STATIC)) && !strcmp (".cctor", ctx->method->name)))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Cannot take the address of a init-only field at 0x%04x", ctx->ip_offset));
if (!IS_SKIP_VISIBILITY (ctx) && !mono_method_can_access_field_full (ctx->method, field, NULL))
CODE_NOT_VERIFIABLE2 (ctx, g_strdup_printf ("Type at stack is not accessible at 0x%04x", ctx->ip_offset), MONO_EXCEPTION_FIELD_ACCESS);
set_stack_value (ctx, stack_push (ctx), field->type, take_addr);
}
static void
do_store_static_field (VerifyContext *ctx, int token) {
MonoClassField *field;
MonoClass *klass;
ILStackDesc *value;
CLEAR_PREFIX (ctx, PREFIX_VOLATILE);
if (!check_underflow (ctx, 1))
return;
value = stack_pop (ctx);
if (!(field = verifier_load_field (ctx, token, &klass, "stsfld")))
return;
if (!(field->type->attrs & FIELD_ATTRIBUTE_STATIC)) {
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Cannot store non static field at 0x%04x", ctx->ip_offset));
return;
}
if (field->type->type == MONO_TYPE_TYPEDBYREF) {
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Typedbyref field is an unverfiable type in store static field at 0x%04x", ctx->ip_offset));
return;
}
if (!IS_SKIP_VISIBILITY (ctx) && !mono_method_can_access_field_full (ctx->method, field, NULL))
CODE_NOT_VERIFIABLE2 (ctx, g_strdup_printf ("Type at stack is not accessible at 0x%04x", ctx->ip_offset), MONO_EXCEPTION_FIELD_ACCESS);
if (!verify_stack_type_compatibility (ctx, field->type, value))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Incompatible type %s in static field store at 0x%04x", stack_slot_get_name (value), ctx->ip_offset));
}
static gboolean
check_is_valid_type_for_field_ops (VerifyContext *ctx, int token, ILStackDesc *obj, MonoClassField **ret_field, const char *opcode)
{
MonoClassField *field;
MonoClass *klass;
gboolean is_pointer;
/*must be a reference type, a managed pointer, an unamanaged pointer, or a valuetype*/
if (!(field = verifier_load_field (ctx, token, &klass, opcode)))
return FALSE;
*ret_field = field;
//the value on stack is going to be used as a pointer
is_pointer = stack_slot_get_type (obj) == TYPE_PTR || (stack_slot_get_type (obj) == TYPE_NATIVE_INT && !get_stack_type (&field->parent->byval_arg));
if (field->type->type == MONO_TYPE_TYPEDBYREF) {
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Typedbyref field is an unverfiable type at 0x%04x", ctx->ip_offset));
return FALSE;
}
g_assert (obj->type);
/*The value on the stack must be a subclass of the defining type of the field*/
/* we need to check if we can load the field from the stack value*/
if (is_pointer) {
if (stack_slot_get_underlying_type (obj) == TYPE_NATIVE_INT)
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Native int is not a verifiable type to reference a field at 0x%04x", ctx->ip_offset));
if (!IS_SKIP_VISIBILITY (ctx) && !mono_method_can_access_field_full (ctx->method, field, NULL))
CODE_NOT_VERIFIABLE2 (ctx, g_strdup_printf ("Type at stack is not accessible at 0x%04x", ctx->ip_offset), MONO_EXCEPTION_FIELD_ACCESS);
} else {
if (!field->parent->valuetype && stack_slot_is_managed_pointer (obj))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Type at stack is a managed pointer to a reference type and is not compatible to reference the field at 0x%04x", ctx->ip_offset));
/*a value type can be loaded from a value or a managed pointer, but not a boxed object*/
if (field->parent->valuetype && stack_slot_is_boxed_value (obj))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Type at stack is a boxed valuetype and is not compatible to reference the field at 0x%04x", ctx->ip_offset));
if (!stack_slot_is_null_literal (obj) && !verify_stack_type_compatibility_full (ctx, &field->parent->byval_arg, obj, TRUE, FALSE)) {
char *found = stack_slot_full_name (obj);
char *expected = mono_type_full_name (&field->parent->byval_arg);
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Expected type '%s' but found '%s' referencing the 'this' argument at 0x%04x", expected, found, ctx->ip_offset));
g_free (found);
g_free (expected);
}
if (!IS_SKIP_VISIBILITY (ctx) && !mono_method_can_access_field_full (ctx->method, field, mono_class_from_mono_type (obj->type)))
CODE_NOT_VERIFIABLE2 (ctx, g_strdup_printf ("Type at stack is not accessible at 0x%04x", ctx->ip_offset), MONO_EXCEPTION_FIELD_ACCESS);
}
check_unmanaged_pointer (ctx, obj);
return TRUE;
}
static void
do_push_field (VerifyContext *ctx, int token, gboolean take_addr)
{
ILStackDesc *obj;
MonoClassField *field;
if (!take_addr)
CLEAR_PREFIX (ctx, PREFIX_UNALIGNED | PREFIX_VOLATILE);
if (!check_underflow (ctx, 1))
return;
obj = stack_pop_safe (ctx);
if (!check_is_valid_type_for_field_ops (ctx, token, obj, &field, take_addr ? "ldflda" : "ldfld"))
return;
if (take_addr && field->parent->valuetype && !stack_slot_is_managed_pointer (obj))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Cannot take the address of a temporary value-type at 0x%04x", ctx->ip_offset));
if (take_addr && (field->type->attrs & FIELD_ATTRIBUTE_INIT_ONLY) &&
!(field->parent == ctx->method->klass && mono_method_is_constructor (ctx->method)))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Cannot take the address of a init-only field at 0x%04x", ctx->ip_offset));
set_stack_value (ctx, stack_push (ctx), field->type, take_addr);
}
static void
do_store_field (VerifyContext *ctx, int token)
{
ILStackDesc *value, *obj;
MonoClassField *field;
CLEAR_PREFIX (ctx, PREFIX_UNALIGNED | PREFIX_VOLATILE);
if (!check_underflow (ctx, 2))
return;
value = stack_pop (ctx);
obj = stack_pop_safe (ctx);
if (!check_is_valid_type_for_field_ops (ctx, token, obj, &field, "stfld"))
return;
if (!verify_stack_type_compatibility (ctx, field->type, value))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Incompatible type %s in field store at 0x%04x", stack_slot_get_name (value), ctx->ip_offset));
}
/*TODO proper handle for Nullable<T>*/
static void
do_box_value (VerifyContext *ctx, int klass_token)
{
ILStackDesc *value;
MonoType *type = get_boxable_mono_type (ctx, klass_token, "box");
MonoClass *klass;
if (!type)
return;
if (!check_underflow (ctx, 1))
return;
value = stack_pop (ctx);
/*box is a nop for reference types*/
if (stack_slot_get_underlying_type (value) == TYPE_COMPLEX && MONO_TYPE_IS_REFERENCE (value->type) && MONO_TYPE_IS_REFERENCE (type)) {
stack_push_stack_val (ctx, value)->stype |= BOXED_MASK;
return;
}
if (!verify_stack_type_compatibility (ctx, type, value))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid type at stack for boxing operation at 0x%04x", ctx->ip_offset));
klass = mono_class_from_mono_type (type);
if (mono_class_is_nullable (klass))
type = &mono_class_get_nullable_param (klass)->byval_arg;
stack_push_val (ctx, TYPE_COMPLEX | BOXED_MASK, type);
}
static void
do_unbox_value (VerifyContext *ctx, int klass_token)
{
ILStackDesc *value;
MonoType *type = get_boxable_mono_type (ctx, klass_token, "unbox");
if (!type)
return;
if (!check_underflow (ctx, 1))
return;
if (!mono_class_from_mono_type (type)->valuetype)
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid reference type for unbox at 0x%04x", ctx->ip_offset));
value = stack_pop (ctx);
/*Value should be: a boxed valuetype or a reference type*/
if (!(stack_slot_get_type (value) == TYPE_COMPLEX &&
(stack_slot_is_boxed_value (value) || !mono_class_from_mono_type (value->type)->valuetype)))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid type %s at stack for unbox operation at 0x%04x", stack_slot_get_name (value), ctx->ip_offset));
set_stack_value (ctx, value = stack_push (ctx), mono_type_get_type_byref (type), FALSE);
value->stype |= CMMP_MASK;
}
static void
do_unbox_any (VerifyContext *ctx, int klass_token)
{
ILStackDesc *value;
MonoType *type = get_boxable_mono_type (ctx, klass_token, "unbox.any");
if (!type)
return;
if (!check_underflow (ctx, 1))
return;
value = stack_pop (ctx);
/*Value should be: a boxed valuetype or a reference type*/
if (!(stack_slot_get_type (value) == TYPE_COMPLEX &&
(stack_slot_is_boxed_value (value) || !mono_class_from_mono_type (value->type)->valuetype)))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid type %s at stack for unbox.any operation at 0x%04x", stack_slot_get_name (value), ctx->ip_offset));
set_stack_value (ctx, stack_push (ctx), type, FALSE);
}
static void
do_unary_math_op (VerifyContext *ctx, int op)
{
ILStackDesc *value;
if (!check_underflow (ctx, 1))
return;
value = stack_pop (ctx);
switch (stack_slot_get_type (value)) {
case TYPE_I4:
case TYPE_I8:
case TYPE_NATIVE_INT:
break;
case TYPE_R8:
if (op == CEE_NEG)
break;
case TYPE_COMPLEX: /*only enums are ok*/
if (mono_type_is_enum_type (value->type))
break;
default:
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid type at stack for unary not at 0x%04x", ctx->ip_offset));
}
stack_push_stack_val (ctx, value);
}
static void
do_conversion (VerifyContext *ctx, int kind)
{
ILStackDesc *value;
if (!check_underflow (ctx, 1))
return;
value = stack_pop (ctx);
switch (stack_slot_get_type (value)) {
case TYPE_I4:
case TYPE_I8:
case TYPE_NATIVE_INT:
case TYPE_R8:
break;
default:
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid type (%s) at stack for conversion operation. Numeric type expected at 0x%04x", stack_slot_get_name (value), ctx->ip_offset));
}
switch (kind) {
case TYPE_I4:
stack_push_val (ctx, TYPE_I4, &mono_defaults.int32_class->byval_arg);
break;
case TYPE_I8:
stack_push_val (ctx,TYPE_I8, &mono_defaults.int64_class->byval_arg);
break;
case TYPE_R8:
stack_push_val (ctx, TYPE_R8, &mono_defaults.double_class->byval_arg);
break;
case TYPE_NATIVE_INT:
stack_push_val (ctx, TYPE_NATIVE_INT, &mono_defaults.int_class->byval_arg);
break;
default:
g_error ("unknown type %02x in conversion", kind);
}
}
static void
do_load_token (VerifyContext *ctx, int token)
{
gpointer handle;
MonoClass *handle_class;
if (!check_overflow (ctx))
return;
switch (token & 0xff000000) {
case MONO_TOKEN_TYPE_DEF:
case MONO_TOKEN_TYPE_REF:
case MONO_TOKEN_TYPE_SPEC:
case MONO_TOKEN_FIELD_DEF:
case MONO_TOKEN_METHOD_DEF:
case MONO_TOKEN_METHOD_SPEC:
case MONO_TOKEN_MEMBER_REF:
if (!token_bounds_check (ctx->image, token)) {
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Table index out of range 0x%x for token %x for ldtoken at 0x%04x", mono_metadata_token_index (token), token, ctx->ip_offset));
return;
}
break;
default:
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Invalid table 0x%x for token 0x%x for ldtoken at 0x%04x", mono_metadata_token_table (token), token, ctx->ip_offset));
return;
}
handle = mono_ldtoken (ctx->image, token, &handle_class, ctx->generic_context);
if (!handle) {
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Invalid token 0x%x for ldtoken at 0x%04x", token, ctx->ip_offset));
return;
}
if (handle_class == mono_defaults.typehandle_class) {
mono_type_is_valid_in_context (ctx, (MonoType*)handle);
} else if (handle_class == mono_defaults.methodhandle_class) {
mono_method_is_valid_in_context (ctx, (MonoMethod*)handle);
} else if (handle_class == mono_defaults.fieldhandle_class) {
mono_type_is_valid_in_context (ctx, &((MonoClassField*)handle)->parent->byval_arg);
} else {
ADD_VERIFY_ERROR2 (ctx, g_strdup_printf ("Invalid ldtoken type %x at 0x%04x", token, ctx->ip_offset), MONO_EXCEPTION_BAD_IMAGE);
}
stack_push_val (ctx, TYPE_COMPLEX, mono_class_get_type (handle_class));
}
static void
do_ldobj_value (VerifyContext *ctx, int token)
{
ILStackDesc *value;
MonoType *type = get_boxable_mono_type (ctx, token, "ldobj");
CLEAR_PREFIX (ctx, PREFIX_UNALIGNED | PREFIX_VOLATILE);
if (!type)
return;
if (!check_underflow (ctx, 1))
return;
value = stack_pop (ctx);
if (!stack_slot_is_managed_pointer (value)
&& stack_slot_get_type (value) != TYPE_NATIVE_INT
&& !(stack_slot_get_type (value) == TYPE_PTR && value->type->type != MONO_TYPE_FNPTR)) {
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Invalid argument %s to ldobj at 0x%04x", stack_slot_get_name (value), ctx->ip_offset));
return;
}
if (stack_slot_get_type (value) == TYPE_NATIVE_INT)
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Using native pointer to ldobj at 0x%04x", ctx->ip_offset));
/*We have a byval on the stack, but the comparison must be strict. */
if (!verify_type_compatibility_full (ctx, type, mono_type_get_type_byval (value->type), TRUE))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid type at stack for ldojb operation at 0x%04x", ctx->ip_offset));
set_stack_value (ctx, stack_push (ctx), type, FALSE);
}
static void
do_stobj (VerifyContext *ctx, int token)
{
ILStackDesc *dest, *src;
MonoType *type = get_boxable_mono_type (ctx, token, "stobj");
CLEAR_PREFIX (ctx, PREFIX_UNALIGNED | PREFIX_VOLATILE);
if (!type)
return;
if (!check_underflow (ctx, 2))
return;
src = stack_pop (ctx);
dest = stack_pop (ctx);
if (stack_slot_is_managed_mutability_pointer (dest))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Cannot use a readonly pointer with stobj at 0x%04x", ctx->ip_offset));
if (!stack_slot_is_managed_pointer (dest))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid destination of stobj operation at 0x%04x", ctx->ip_offset));
if (stack_slot_is_boxed_value (src) && !MONO_TYPE_IS_REFERENCE (src->type) && !MONO_TYPE_IS_REFERENCE (type))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Cannot use stobj with a boxed source value that is not a reference type at 0x%04x", ctx->ip_offset));
if (!verify_stack_type_compatibility (ctx, type, src))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Token and source types of stobj don't match at 0x%04x", ctx->ip_offset));
if (!verify_type_compatibility (ctx, mono_type_get_type_byval (dest->type), type))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Destination and token types of stobj don't match at 0x%04x", ctx->ip_offset));
}
static void
do_cpobj (VerifyContext *ctx, int token)
{
ILStackDesc *dest, *src;
MonoType *type = get_boxable_mono_type (ctx, token, "cpobj");
if (!type)
return;
if (!check_underflow (ctx, 2))
return;
src = stack_pop (ctx);
dest = stack_pop (ctx);
if (!stack_slot_is_managed_pointer (src))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid source of cpobj operation at 0x%04x", ctx->ip_offset));
if (!stack_slot_is_managed_pointer (dest))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid destination of cpobj operation at 0x%04x", ctx->ip_offset));
if (stack_slot_is_managed_mutability_pointer (dest))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Cannot use a readonly pointer with cpobj at 0x%04x", ctx->ip_offset));
if (!verify_type_compatibility (ctx, type, mono_type_get_type_byval (src->type)))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Token and source types of cpobj don't match at 0x%04x", ctx->ip_offset));
if (!verify_type_compatibility (ctx, mono_type_get_type_byval (dest->type), type))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Destination and token types of cpobj don't match at 0x%04x", ctx->ip_offset));
}
static void
do_initobj (VerifyContext *ctx, int token)
{
ILStackDesc *obj;
MonoType *stack, *type = get_boxable_mono_type (ctx, token, "initobj");
if (!type)
return;
if (!check_underflow (ctx, 1))
return;
obj = stack_pop (ctx);
if (!stack_slot_is_managed_pointer (obj))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid object address for initobj at 0x%04x", ctx->ip_offset));
if (stack_slot_is_managed_mutability_pointer (obj))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Cannot use a readonly pointer with initobj at 0x%04x", ctx->ip_offset));
stack = mono_type_get_type_byval (obj->type);
if (MONO_TYPE_IS_REFERENCE (stack)) {
if (!verify_type_compatibility (ctx, stack, type))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Type token of initobj not compatible with value on stack at 0x%04x", ctx->ip_offset));
else if (IS_STRICT_MODE (ctx) && !mono_metadata_type_equal (type, stack))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Type token of initobj not compatible with value on stack at 0x%04x", ctx->ip_offset));
} else if (!verify_type_compatibility (ctx, stack, type)) {
char *expected_name = mono_type_full_name (type);
char *stack_name = mono_type_full_name (stack);
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Initobj %s not compatible with value on stack %s at 0x%04x", expected_name, stack_name, ctx->ip_offset));
g_free (expected_name);
g_free (stack_name);
}
}
static void
do_newobj (VerifyContext *ctx, int token)
{
ILStackDesc *value;
int i;
MonoMethodSignature *sig;
MonoMethod *method;
gboolean is_delegate = FALSE;
if (!(method = verifier_load_method (ctx, token, "newobj")))
return;
if (!mono_method_is_constructor (method)) {
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Method from token 0x%08x not a constructor at 0x%04x", token, ctx->ip_offset));
return;
}
if (method->klass->flags & (TYPE_ATTRIBUTE_ABSTRACT | TYPE_ATTRIBUTE_INTERFACE))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Trying to instantiate an abstract or interface type at 0x%04x", ctx->ip_offset));
if (!mono_method_can_access_method_full (ctx->method, method, NULL)) {
char *from = mono_method_full_name (ctx->method, TRUE);
char *to = mono_method_full_name (method, TRUE);
CODE_NOT_VERIFIABLE2 (ctx, g_strdup_printf ("Constructor %s not visible from %s at 0x%04x", to, from, ctx->ip_offset), MONO_EXCEPTION_METHOD_ACCESS);
g_free (from);
g_free (to);
}
//FIXME use mono_method_get_signature_full
sig = mono_method_signature (method);
if (!sig) {
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Invalid constructor signature to newobj at 0x%04x", ctx->ip_offset));
return;
}
if (!check_underflow (ctx, sig->param_count))
return;
is_delegate = method->klass->parent == mono_defaults.multicastdelegate_class;
if (is_delegate) {
ILStackDesc *funptr;
//first arg is object, second arg is fun ptr
if (sig->param_count != 2) {
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Invalid delegate constructor at 0x%04x", ctx->ip_offset));
return;
}
funptr = stack_pop (ctx);
value = stack_pop (ctx);
verify_delegate_compatibility (ctx, method->klass, value, funptr);
} else {
for (i = sig->param_count - 1; i >= 0; --i) {
VERIFIER_DEBUG ( printf ("verifying constructor argument %d\n", i); );
value = stack_pop (ctx);
if (!verify_stack_type_compatibility (ctx, sig->params [i], value)) {
char *stack_name = stack_slot_full_name (value);
char *sig_name = mono_type_full_name (sig->params [i]);
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Incompatible parameter value with constructor signature: %s X %s at 0x%04x", sig_name, stack_name, ctx->ip_offset));
g_free (stack_name);
g_free (sig_name);
}
if (stack_slot_is_managed_mutability_pointer (value))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Cannot use a readonly pointer as argument of newobj at 0x%04x", ctx->ip_offset));
}
}
if (check_overflow (ctx))
set_stack_value (ctx, stack_push (ctx), &method->klass->byval_arg, FALSE);
}
static void
do_cast (VerifyContext *ctx, int token, const char *opcode) {
ILStackDesc *value;
MonoType *type;
gboolean is_boxed;
gboolean do_box;
if (!check_underflow (ctx, 1))
return;
if (!(type = verifier_load_type (ctx, token, opcode)))
return;
if (type->byref) {
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Invalid %s type at 0x%04x", opcode, ctx->ip_offset));
return;
}
value = stack_pop (ctx);
is_boxed = stack_slot_is_boxed_value (value);
if (stack_slot_is_managed_pointer (value))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid value for %s at 0x%04x", opcode, ctx->ip_offset));
else if (!MONO_TYPE_IS_REFERENCE (value->type) && !is_boxed) {
char *name = stack_slot_full_name (value);
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Expected a reference type on stack for %s but found %s at 0x%04x", opcode, name, ctx->ip_offset));
g_free (name);
}
switch (value->type->type) {
case MONO_TYPE_FNPTR:
case MONO_TYPE_PTR:
case MONO_TYPE_TYPEDBYREF:
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid value for %s at 0x%04x", opcode, ctx->ip_offset));
}
do_box = is_boxed || mono_type_is_generic_argument(type) || mono_class_from_mono_type (type)->valuetype;
stack_push_val (ctx, TYPE_COMPLEX | (do_box ? BOXED_MASK : 0), type);
}
static MonoType *
mono_type_from_opcode (int opcode) {
switch (opcode) {
case CEE_LDIND_I1:
case CEE_LDIND_U1:
case CEE_STIND_I1:
case CEE_LDELEM_I1:
case CEE_LDELEM_U1:
case CEE_STELEM_I1:
return &mono_defaults.sbyte_class->byval_arg;
case CEE_LDIND_I2:
case CEE_LDIND_U2:
case CEE_STIND_I2:
case CEE_LDELEM_I2:
case CEE_LDELEM_U2:
case CEE_STELEM_I2:
return &mono_defaults.int16_class->byval_arg;
case CEE_LDIND_I4:
case CEE_LDIND_U4:
case CEE_STIND_I4:
case CEE_LDELEM_I4:
case CEE_LDELEM_U4:
case CEE_STELEM_I4:
return &mono_defaults.int32_class->byval_arg;
case CEE_LDIND_I8:
case CEE_STIND_I8:
case CEE_LDELEM_I8:
case CEE_STELEM_I8:
return &mono_defaults.int64_class->byval_arg;
case CEE_LDIND_R4:
case CEE_STIND_R4:
case CEE_LDELEM_R4:
case CEE_STELEM_R4:
return &mono_defaults.single_class->byval_arg;
case CEE_LDIND_R8:
case CEE_STIND_R8:
case CEE_LDELEM_R8:
case CEE_STELEM_R8:
return &mono_defaults.double_class->byval_arg;
case CEE_LDIND_I:
case CEE_STIND_I:
case CEE_LDELEM_I:
case CEE_STELEM_I:
return &mono_defaults.int_class->byval_arg;
case CEE_LDIND_REF:
case CEE_STIND_REF:
case CEE_LDELEM_REF:
case CEE_STELEM_REF:
return &mono_defaults.object_class->byval_arg;
default:
g_error ("unknown opcode %02x in mono_type_from_opcode ", opcode);
return NULL;
}
}
static void
do_load_indirect (VerifyContext *ctx, int opcode)
{
ILStackDesc *value;
CLEAR_PREFIX (ctx, PREFIX_UNALIGNED | PREFIX_VOLATILE);
if (!check_underflow (ctx, 1))
return;
value = stack_pop (ctx);
if (!stack_slot_is_managed_pointer (value)) {
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Load indirect not using a manager pointer at 0x%04x", ctx->ip_offset));
set_stack_value (ctx, stack_push (ctx), mono_type_from_opcode (opcode), FALSE);
return;
}
if (opcode == CEE_LDIND_REF) {
if (stack_slot_get_underlying_type (value) != TYPE_COMPLEX || mono_class_from_mono_type (value->type)->valuetype)
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid type at stack for ldind_ref expected object byref operation at 0x%04x", ctx->ip_offset));
set_stack_value (ctx, stack_push (ctx), mono_type_get_type_byval (value->type), FALSE);
} else {
if (!verify_type_compatibility_full (ctx, mono_type_from_opcode (opcode), mono_type_get_type_byval (value->type), TRUE))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid type at stack for ldind 0x%x operation at 0x%04x", opcode, ctx->ip_offset));
set_stack_value (ctx, stack_push (ctx), mono_type_from_opcode (opcode), FALSE);
}
}
static void
do_store_indirect (VerifyContext *ctx, int opcode)
{
ILStackDesc *addr, *val;
CLEAR_PREFIX (ctx, PREFIX_UNALIGNED | PREFIX_VOLATILE);
if (!check_underflow (ctx, 2))
return;
val = stack_pop (ctx);
addr = stack_pop (ctx);
check_unmanaged_pointer (ctx, addr);
if (!stack_slot_is_managed_pointer (addr) && stack_slot_get_type (addr) != TYPE_PTR) {
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid non-pointer argument to stind at 0x%04x", ctx->ip_offset));
return;
}
if (stack_slot_is_managed_mutability_pointer (addr)) {
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Cannot use a readonly pointer with stind at 0x%04x", ctx->ip_offset));
return;
}
if (!verify_type_compatibility_full (ctx, mono_type_from_opcode (opcode), mono_type_get_type_byval (addr->type), TRUE))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid addr type at stack for stind 0x%x operation at 0x%04x", opcode, ctx->ip_offset));
if (!verify_stack_type_compatibility (ctx, mono_type_from_opcode (opcode), val))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid value type at stack for stind 0x%x operation at 0x%04x", opcode, ctx->ip_offset));
}
static void
do_newarr (VerifyContext *ctx, int token)
{
ILStackDesc *value;
MonoType *type = get_boxable_mono_type (ctx, token, "newarr");
if (!type)
return;
if (!check_underflow (ctx, 1))
return;
value = stack_pop (ctx);
if (stack_slot_get_type (value) != TYPE_I4 && stack_slot_get_type (value) != TYPE_NATIVE_INT)
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Array size type on stack (%s) is not a verifiable type at 0x%04x", stack_slot_get_name (value), ctx->ip_offset));
set_stack_value (ctx, stack_push (ctx), mono_class_get_type (mono_array_class_get (mono_class_from_mono_type (type), 1)), FALSE);
}
/*FIXME handle arrays that are not 0-indexed*/
static void
do_ldlen (VerifyContext *ctx)
{
ILStackDesc *value;
if (!check_underflow (ctx, 1))
return;
value = stack_pop (ctx);
if (stack_slot_get_type (value) != TYPE_COMPLEX || value->type->type != MONO_TYPE_SZARRAY)
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid array type for ldlen at 0x%04x", ctx->ip_offset));
stack_push_val (ctx, TYPE_NATIVE_INT, &mono_defaults.int_class->byval_arg);
}
/*FIXME handle arrays that are not 0-indexed*/
/*FIXME handle readonly prefix and CMMP*/
static void
do_ldelema (VerifyContext *ctx, int klass_token)
{
ILStackDesc *index, *array, *res;
MonoType *type = get_boxable_mono_type (ctx, klass_token, "ldelema");
gboolean valid;
if (!type)
return;
if (!check_underflow (ctx, 2))
return;
index = stack_pop (ctx);
array = stack_pop (ctx);
if (stack_slot_get_type (index) != TYPE_I4 && stack_slot_get_type (index) != TYPE_NATIVE_INT)
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Index type(%s) for ldelema is not an int or a native int at 0x%04x", stack_slot_get_name (index), ctx->ip_offset));
if (!stack_slot_is_null_literal (array)) {
if (stack_slot_get_type (array) != TYPE_COMPLEX || array->type->type != MONO_TYPE_SZARRAY)
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid array type(%s) for ldelema at 0x%04x", stack_slot_get_name (array), ctx->ip_offset));
else {
if (get_stack_type (type) == TYPE_I4 || get_stack_type (type) == TYPE_NATIVE_INT) {
valid = verify_type_compatibility_full (ctx, type, &array->type->data.klass->byval_arg, TRUE);
} else {
valid = mono_metadata_type_equal (type, &array->type->data.klass->byval_arg);
}
if (!valid)
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid array type on stack for ldelema at 0x%04x", ctx->ip_offset));
}
}
res = stack_push (ctx);
set_stack_value (ctx, res, type, TRUE);
if (ctx->prefix_set & PREFIX_READONLY) {
ctx->prefix_set &= ~PREFIX_READONLY;
res->stype |= CMMP_MASK;
}
}
/*
* FIXME handle arrays that are not 0-indexed
* FIXME handle readonly prefix and CMMP
*/
static void
do_ldelem (VerifyContext *ctx, int opcode, int token)
{
#define IS_ONE_OF2(T, A, B) (T == A || T == B)
ILStackDesc *index, *array;
MonoType *type;
if (!check_underflow (ctx, 2))
return;
if (opcode == CEE_LDELEM) {
if (!(type = verifier_load_type (ctx, token, "ldelem.any"))) {
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Type (0x%08x) not found at 0x%04x", token, ctx->ip_offset));
return;
}
} else {
type = mono_type_from_opcode (opcode);
}
index = stack_pop (ctx);
array = stack_pop (ctx);
if (stack_slot_get_type (index) != TYPE_I4 && stack_slot_get_type (index) != TYPE_NATIVE_INT)
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Index type(%s) for ldelem.X is not an int or a native int at 0x%04x", stack_slot_get_name (index), ctx->ip_offset));
if (!stack_slot_is_null_literal (array)) {
if (stack_slot_get_type (array) != TYPE_COMPLEX || array->type->type != MONO_TYPE_SZARRAY)
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid array type(%s) for ldelem.X at 0x%04x", stack_slot_get_name (array), ctx->ip_offset));
else {
if (opcode == CEE_LDELEM_REF) {
if (array->type->data.klass->valuetype)
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid array type is not a reference type for ldelem.ref 0x%04x", ctx->ip_offset));
type = &array->type->data.klass->byval_arg;
} else {
MonoType *candidate = &array->type->data.klass->byval_arg;
if (IS_STRICT_MODE (ctx)) {
MonoType *underlying_type = mono_type_get_underlying_type_any (type);
MonoType *underlying_candidate = mono_type_get_underlying_type_any (candidate);
if ((IS_ONE_OF2 (underlying_type->type, MONO_TYPE_I4, MONO_TYPE_U4) && IS_ONE_OF2 (underlying_candidate->type, MONO_TYPE_I, MONO_TYPE_U)) ||
(IS_ONE_OF2 (underlying_candidate->type, MONO_TYPE_I4, MONO_TYPE_U4) && IS_ONE_OF2 (underlying_type->type, MONO_TYPE_I, MONO_TYPE_U)))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid array type on stack for ldelem.X at 0x%04x", ctx->ip_offset));
}
if (!verify_type_compatibility_full (ctx, type, candidate, TRUE))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid array type on stack for ldelem.X at 0x%04x", ctx->ip_offset));
}
}
}
set_stack_value (ctx, stack_push (ctx), type, FALSE);
#undef IS_ONE_OF2
}
/*
* FIXME handle arrays that are not 0-indexed
*/
static void
do_stelem (VerifyContext *ctx, int opcode, int token)
{
ILStackDesc *index, *array, *value;
MonoType *type;
if (!check_underflow (ctx, 3))
return;
if (opcode == CEE_STELEM) {
if (!(type = verifier_load_type (ctx, token, "stelem.any"))) {
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Type (0x%08x) not found at 0x%04x", token, ctx->ip_offset));
return;
}
} else {
type = mono_type_from_opcode (opcode);
}
value = stack_pop (ctx);
index = stack_pop (ctx);
array = stack_pop (ctx);
if (stack_slot_get_type (index) != TYPE_I4 && stack_slot_get_type (index) != TYPE_NATIVE_INT)
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Index type(%s) for stdelem.X is not an int or a native int at 0x%04x", stack_slot_get_name (index), ctx->ip_offset));
if (!stack_slot_is_null_literal (array)) {
if (stack_slot_get_type (array) != TYPE_COMPLEX || array->type->type != MONO_TYPE_SZARRAY) {
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid array type(%s) for stelem.X at 0x%04x", stack_slot_get_name (array), ctx->ip_offset));
} else {
if (opcode == CEE_STELEM_REF) {
if (array->type->data.klass->valuetype)
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid array type is not a reference type for stelem.ref 0x%04x", ctx->ip_offset));
} else if (!verify_type_compatibility_full (ctx, &array->type->data.klass->byval_arg, type, TRUE)) {
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid array type on stack for stdelem.X at 0x%04x", ctx->ip_offset));
}
}
}
if (opcode == CEE_STELEM_REF) {
if (!stack_slot_is_boxed_value (value) && mono_class_from_mono_type (value->type)->valuetype)
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid value is not a reference type for stelem.ref 0x%04x", ctx->ip_offset));
} else if (opcode != CEE_STELEM_REF) {
if (!verify_stack_type_compatibility (ctx, type, value))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid value on stack for stdelem.X at 0x%04x", ctx->ip_offset));
if (stack_slot_is_boxed_value (value) && !MONO_TYPE_IS_REFERENCE (value->type) && !MONO_TYPE_IS_REFERENCE (type))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Cannot use stobj with a boxed source value that is not a reference type at 0x%04x", ctx->ip_offset));
}
}
static void
do_throw (VerifyContext *ctx)
{
ILStackDesc *exception;
if (!check_underflow (ctx, 1))
return;
exception = stack_pop (ctx);
if (!stack_slot_is_null_literal (exception) && !(stack_slot_get_type (exception) == TYPE_COMPLEX && !mono_class_from_mono_type (exception->type)->valuetype))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid type on stack for throw, expected reference type at 0x%04x", ctx->ip_offset));
if (mono_type_is_generic_argument (exception->type) && !stack_slot_is_boxed_value (exception)) {
char *name = mono_type_full_name (exception->type);
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid type on stack for throw, expected reference type but found unboxed %s at 0x%04x ", name, ctx->ip_offset));
g_free (name);
}
/*The stack is left empty after a throw*/
ctx->eval.size = 0;
}
static void
do_endfilter (VerifyContext *ctx)
{
MonoExceptionClause *clause;
if (IS_STRICT_MODE (ctx)) {
if (ctx->eval.size != 1)
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Stack size must have one item for endfilter at 0x%04x", ctx->ip_offset));
if (ctx->eval.size >= 1 && stack_slot_get_type (stack_pop (ctx)) != TYPE_I4)
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Stack item type is not an int32 for endfilter at 0x%04x", ctx->ip_offset));
}
if ((clause = is_correct_endfilter (ctx, ctx->ip_offset))) {
if (IS_STRICT_MODE (ctx)) {
if (ctx->ip_offset != clause->handler_offset - 2)
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("endfilter is not the last instruction of the filter clause at 0x%04x", ctx->ip_offset));
} else {
if ((ctx->ip_offset != clause->handler_offset - 2) && !MONO_OFFSET_IN_HANDLER (clause, ctx->ip_offset))
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("endfilter is not the last instruction of the filter clause at 0x%04x", ctx->ip_offset));
}
} else {
if (IS_STRICT_MODE (ctx) && !is_unverifiable_endfilter (ctx, ctx->ip_offset))
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("endfilter outside filter clause at 0x%04x", ctx->ip_offset));
else
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("endfilter outside filter clause at 0x%04x", ctx->ip_offset));
}
ctx->eval.size = 0;
}
static void
do_leave (VerifyContext *ctx, int delta)
{
int target = ((gint32)ctx->ip_offset) + delta;
if (target >= ctx->code_size || target < 0)
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Branch target out of code at 0x%04x", ctx->ip_offset));
if (!is_correct_leave (ctx->header, ctx->ip_offset, target))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Leave not allowed in finally block at 0x%04x", ctx->ip_offset));
ctx->eval.size = 0;
ctx->target = target;
}
/*
* do_static_branch:
*
* Verify br and br.s opcodes.
*/
static void
do_static_branch (VerifyContext *ctx, int delta)
{
int target = ctx->ip_offset + delta;
if (target < 0 || target >= ctx->code_size) {
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("branch target out of code at 0x%04x", ctx->ip_offset));
return;
}
switch (is_valid_branch_instruction (ctx->header, ctx->ip_offset, target)) {
case 1:
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Branch target escapes out of exception block at 0x%04x", ctx->ip_offset));
break;
case 2:
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Branch target escapes out of exception block at 0x%04x", ctx->ip_offset));
break;
}
ctx->target = target;
}
static void
do_switch (VerifyContext *ctx, int count, const unsigned char *data)
{
int i, base = ctx->ip_offset + 5 + count * 4;
ILStackDesc *value;
if (!check_underflow (ctx, 1))
return;
value = stack_pop (ctx);
if (stack_slot_get_type (value) != TYPE_I4 && stack_slot_get_type (value) != TYPE_NATIVE_INT)
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid argument to switch at 0x%04x", ctx->ip_offset));
for (i = 0; i < count; ++i) {
int target = base + read32 (data + i * 4);
if (target < 0 || target >= ctx->code_size) {
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Switch target %x out of code at 0x%04x", i, ctx->ip_offset));
return;
}
switch (is_valid_branch_instruction (ctx->header, ctx->ip_offset, target)) {
case 1:
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Switch target %x escapes out of exception block at 0x%04x", i, ctx->ip_offset));
break;
case 2:
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Switch target %x escapes out of exception block at 0x%04x", i, ctx->ip_offset));
return;
}
merge_stacks (ctx, &ctx->eval, &ctx->code [target], FALSE, TRUE);
}
}
static void
do_load_function_ptr (VerifyContext *ctx, guint32 token, gboolean virtual)
{
ILStackDesc *top;
MonoMethod *method;
if (virtual && !check_underflow (ctx, 1))
return;
if (!virtual && !check_overflow (ctx))
return;
if (!IS_METHOD_DEF_OR_REF_OR_SPEC (token) || !token_bounds_check (ctx->image, token)) {
ADD_VERIFY_ERROR2 (ctx, g_strdup_printf ("Invalid token %x for ldftn at 0x%04x", token, ctx->ip_offset), MONO_EXCEPTION_BAD_IMAGE);
return;
}
if (!(method = verifier_load_method (ctx, token, virtual ? "ldvirtfrn" : "ldftn")))
return;
if (mono_method_is_constructor (method))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Cannot use ldftn with a constructor at 0x%04x", ctx->ip_offset));
if (virtual) {
ILStackDesc *top = stack_pop (ctx);
if (stack_slot_get_type (top) != TYPE_COMPLEX || top->type->type == MONO_TYPE_VALUETYPE)
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Invalid argument to ldvirtftn at 0x%04x", ctx->ip_offset));
if (method->flags & METHOD_ATTRIBUTE_STATIC)
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Cannot use ldvirtftn with a constructor at 0x%04x", ctx->ip_offset));
if (!verify_stack_type_compatibility (ctx, &method->klass->byval_arg, top))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Unexpected object for ldvirtftn at 0x%04x", ctx->ip_offset));
}
if (!mono_method_can_access_method_full (ctx->method, method, NULL))
CODE_NOT_VERIFIABLE2 (ctx, g_strdup_printf ("Loaded method is not visible for ldftn/ldvirtftn at 0x%04x", ctx->ip_offset), MONO_EXCEPTION_METHOD_ACCESS);
top = stack_push_val(ctx, TYPE_PTR, mono_type_create_fnptr_from_mono_method (ctx, method));
top->method = method;
}
static void
do_sizeof (VerifyContext *ctx, int token)
{
MonoType *type;
if (!IS_TYPE_DEF_OR_REF_OR_SPEC (token) || !token_bounds_check (ctx->image, token)) {
ADD_VERIFY_ERROR2 (ctx, g_strdup_printf ("Invalid type token %x at 0x%04x", token, ctx->ip_offset), MONO_EXCEPTION_BAD_IMAGE);
return;
}
if (!(type = verifier_load_type (ctx, token, "sizeof")))
return;
if (type->byref && type->type != MONO_TYPE_TYPEDBYREF) {
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Invalid use of byref type at 0x%04x", ctx->ip_offset));
return;
}
if (type->type == MONO_TYPE_VOID) {
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Invalid use of void type at 0x%04x", ctx->ip_offset));
return;
}
if (check_overflow (ctx))
set_stack_value (ctx, stack_push (ctx), &mono_defaults.uint32_class->byval_arg, FALSE);
}
/* Stack top can be of any type, the runtime doesn't care and treat everything as an int. */
static void
do_localloc (VerifyContext *ctx)
{
ILStackDesc *top;
if (ctx->eval.size != 1) {
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Stack must have only size item in localloc at 0x%04x", ctx->ip_offset));
return;
}
if (in_any_exception_block (ctx->header, ctx->ip_offset)) {
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Stack must have only size item in localloc at 0x%04x", ctx->ip_offset));
return;
}
/*TODO verify top type*/
top = stack_pop (ctx);
set_stack_value (ctx, stack_push (ctx), &mono_defaults.int_class->byval_arg, FALSE);
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Instruction localloc in never verifiable at 0x%04x", ctx->ip_offset));
}
static void
do_ldstr (VerifyContext *ctx, guint32 token)
{
GSList *error = NULL;
if (mono_metadata_token_code (token) != MONO_TOKEN_STRING) {
ADD_VERIFY_ERROR2 (ctx, g_strdup_printf ("Invalid string token %x at 0x%04x", token, ctx->ip_offset), MONO_EXCEPTION_BAD_IMAGE);
return;
}
if (!ctx->image->dynamic && !mono_verifier_verify_string_signature (ctx->image, mono_metadata_token_index (token), &error)) {
if (error)
ctx->list = g_slist_concat (ctx->list, error);
ADD_VERIFY_ERROR2 (ctx, g_strdup_printf ("Invalid string index %x at 0x%04x", token, ctx->ip_offset), MONO_EXCEPTION_BAD_IMAGE);
return;
}
if (check_overflow (ctx))
stack_push_val (ctx, TYPE_COMPLEX, &mono_defaults.string_class->byval_arg);
}
static void
do_refanyval (VerifyContext *ctx, int token)
{
ILStackDesc *top;
MonoType *type;
if (!check_underflow (ctx, 1))
return;
if (!(type = get_boxable_mono_type (ctx, token, "refanyval")))
return;
top = stack_pop (ctx);
if (top->stype != TYPE_PTR || top->type->type != MONO_TYPE_TYPEDBYREF)
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Expected a typedref as argument for refanyval, but found %s at 0x%04x", stack_slot_get_name (top), ctx->ip_offset));
set_stack_value (ctx, stack_push (ctx), type, TRUE);
}
static void
do_refanytype (VerifyContext *ctx)
{
ILStackDesc *top;
if (!check_underflow (ctx, 1))
return;
top = stack_pop (ctx);
if (top->stype != TYPE_PTR || top->type->type != MONO_TYPE_TYPEDBYREF)
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Expected a typedref as argument for refanytype, but found %s at 0x%04x", stack_slot_get_name (top), ctx->ip_offset));
set_stack_value (ctx, stack_push (ctx), &mono_defaults.typehandle_class->byval_arg, FALSE);
}
static void
do_mkrefany (VerifyContext *ctx, int token)
{
ILStackDesc *top;
MonoType *type;
if (!check_underflow (ctx, 1))
return;
if (!(type = get_boxable_mono_type (ctx, token, "refanyval")))
return;
top = stack_pop (ctx);
if (stack_slot_is_managed_mutability_pointer (top))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Cannot use a readonly pointer with mkrefany at 0x%04x", ctx->ip_offset));
if (!stack_slot_is_managed_pointer (top)) {
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Expected a managed pointer for mkrefany, but found %s at 0x%04x", stack_slot_get_name (top), ctx->ip_offset));
}else {
MonoType *stack_type = mono_type_get_type_byval (top->type);
if (MONO_TYPE_IS_REFERENCE (type) && !mono_metadata_type_equal (type, stack_type))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Type not compatible for mkrefany at 0x%04x", ctx->ip_offset));
if (!MONO_TYPE_IS_REFERENCE (type) && !verify_type_compatibility_full (ctx, type, stack_type, TRUE))
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Type not compatible for mkrefany at 0x%04x", ctx->ip_offset));
}
set_stack_value (ctx, stack_push (ctx), &mono_defaults.typed_reference_class->byval_arg, FALSE);
}
static void
do_ckfinite (VerifyContext *ctx)
{
ILStackDesc *top;
if (!check_underflow (ctx, 1))
return;
top = stack_pop (ctx);
if (stack_slot_get_underlying_type (top) != TYPE_R8)
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Expected float32 or float64 on stack for ckfinit but found %s at 0x%04x", stack_slot_get_name (top), ctx->ip_offset));
stack_push_stack_val (ctx, top);
}
/*
* merge_stacks:
* Merge the stacks and perform compat checks. The merge check if types of @from are mergeable with type of @to
*
* @from holds new values for a given control path
* @to holds the current values of a given control path
*
* TODO we can eliminate the from argument as all callers pass &ctx->eval
*/
static void
merge_stacks (VerifyContext *ctx, ILCodeDesc *from, ILCodeDesc *to, gboolean start, gboolean external)
{
MonoError error;
int i, j, k;
stack_init (ctx, to);
if (start) {
if (to->flags == IL_CODE_FLAG_NOT_PROCESSED)
from->size = 0;
else
stack_copy (&ctx->eval, to);
goto end_verify;
} else if (!(to->flags & IL_CODE_STACK_MERGED)) {
stack_copy (to, &ctx->eval);
goto end_verify;
}
VERIFIER_DEBUG ( printf ("performing stack merge %d x %d\n", from->size, to->size); );
if (from->size != to->size) {
VERIFIER_DEBUG ( printf ("different stack sizes %d x %d at 0x%04x\n", from->size, to->size, ctx->ip_offset); );
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Could not merge stacks, different sizes (%d x %d) at 0x%04x", from->size, to->size, ctx->ip_offset));
goto end_verify;
}
//FIXME we need to preserve CMMP attributes
//FIXME we must take null literals into consideration.
for (i = 0; i < from->size; ++i) {
ILStackDesc *new_slot = from->stack + i;
ILStackDesc *old_slot = to->stack + i;
MonoType *new_type = mono_type_from_stack_slot (new_slot);
MonoType *old_type = mono_type_from_stack_slot (old_slot);
MonoClass *old_class = mono_class_from_mono_type (old_type);
MonoClass *new_class = mono_class_from_mono_type (new_type);
MonoClass *match_class = NULL;
// S := T then U = S (new value is compatible with current value, keep current)
if (verify_stack_type_compatibility (ctx, old_type, new_slot)) {
copy_stack_value (new_slot, old_slot);
continue;
}
// T := S then U = T (old value is compatible with current value, use new)
if (verify_stack_type_compatibility (ctx, new_type, old_slot)) {
copy_stack_value (old_slot, new_slot);
continue;
}
if (mono_type_is_generic_argument (old_type) || mono_type_is_generic_argument (new_type)) {
char *old_name = stack_slot_full_name (old_slot);
char *new_name = stack_slot_full_name (new_slot);
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Could not merge stack at depth %d, types not compatible: %s X %s at 0x%04x", i, old_name, new_name, ctx->ip_offset));
g_free (old_name);
g_free (new_name);
goto end_verify;
}
//both are reference types, use closest common super type
if (!mono_class_from_mono_type (old_type)->valuetype
&& !mono_class_from_mono_type (new_type)->valuetype
&& !stack_slot_is_managed_pointer (old_slot)
&& !stack_slot_is_managed_pointer (new_slot)) {
for (j = MIN (old_class->idepth, new_class->idepth) - 1; j > 0; --j) {
if (mono_metadata_type_equal (&old_class->supertypes [j]->byval_arg, &new_class->supertypes [j]->byval_arg)) {
match_class = old_class->supertypes [j];
goto match_found;
}
}
mono_class_setup_interfaces (old_class, &error);
if (!mono_error_ok (&error)) {
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Cannot merge stacks due to a TypeLoadException %s at 0x%04x", mono_error_get_message (&error), ctx->ip_offset));
mono_error_cleanup (&error);
goto end_verify;
}
for (j = 0; j < old_class->interface_count; ++j) {
for (k = 0; k < new_class->interface_count; ++k) {
if (mono_metadata_type_equal (&old_class->interfaces [j]->byval_arg, &new_class->interfaces [k]->byval_arg)) {
match_class = old_class->interfaces [j];
goto match_found;
}
}
}
//No decent super type found, use object
match_class = mono_defaults.object_class;
goto match_found;
} else if (is_compatible_boxed_valuetype (ctx,old_type, new_type, new_slot, FALSE) || is_compatible_boxed_valuetype (ctx, new_type, old_type, old_slot, FALSE)) {
match_class = mono_defaults.object_class;
goto match_found;
}
{
char *old_name = stack_slot_full_name (old_slot);
char *new_name = stack_slot_full_name (new_slot);
CODE_NOT_VERIFIABLE (ctx, g_strdup_printf ("Could not merge stack at depth %d, types not compatible: %s X %s at 0x%04x", i, old_name, new_name, ctx->ip_offset));
g_free (old_name);
g_free (new_name);
}
set_stack_value (ctx, old_slot, &new_class->byval_arg, stack_slot_is_managed_pointer (old_slot));
goto end_verify;
match_found:
g_assert (match_class);
set_stack_value (ctx, old_slot, &match_class->byval_arg, stack_slot_is_managed_pointer (old_slot));
set_stack_value (ctx, new_slot, &match_class->byval_arg, stack_slot_is_managed_pointer (old_slot));
continue;
}
end_verify:
if (external)
to->flags |= IL_CODE_FLAG_WAS_TARGET;
to->flags |= IL_CODE_STACK_MERGED;
}
#define HANDLER_START(clause) ((clause)->flags == MONO_EXCEPTION_CLAUSE_FILTER ? (clause)->data.filter_offset : clause->handler_offset)
#define IS_CATCH_OR_FILTER(clause) ((clause)->flags == MONO_EXCEPTION_CLAUSE_FILTER || (clause)->flags == MONO_EXCEPTION_CLAUSE_NONE)
/*
* is_clause_in_range :
*
* Returns TRUE if either the protected block or the handler of @clause is in the @start - @end range.
*/
static gboolean
is_clause_in_range (MonoExceptionClause *clause, guint32 start, guint32 end)
{
if (clause->try_offset >= start && clause->try_offset < end)
return TRUE;
if (HANDLER_START (clause) >= start && HANDLER_START (clause) < end)
return TRUE;
return FALSE;
}
/*
* is_clause_inside_range :
*
* Returns TRUE if @clause lies completely inside the @start - @end range.
*/
static gboolean
is_clause_inside_range (MonoExceptionClause *clause, guint32 start, guint32 end)
{
if (clause->try_offset < start || (clause->try_offset + clause->try_len) > end)
return FALSE;
if (HANDLER_START (clause) < start || (clause->handler_offset + clause->handler_len) > end)
return FALSE;
return TRUE;
}
/*
* is_clause_nested :
*
* Returns TRUE if @nested is nested in @clause.
*/
static gboolean
is_clause_nested (MonoExceptionClause *clause, MonoExceptionClause *nested)
{
if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER && is_clause_inside_range (nested, clause->data.filter_offset, clause->handler_offset))
return TRUE;
return is_clause_inside_range (nested, clause->try_offset, clause->try_offset + clause->try_len) ||
is_clause_inside_range (nested, clause->handler_offset, clause->handler_offset + clause->handler_len);
}
/* Test the relationship between 2 exception clauses. Follow P.1 12.4.2.7 of ECMA
* the each pair of exception must have the following properties:
* - one is fully nested on another (the outer must not be a filter clause) (the nested one must come earlier)
* - completely disjoin (none of the 3 regions of each entry overlap with the other 3)
* - mutual protection (protected block is EXACT the same, handlers are disjoin and all handler are catch or all handler are filter)
*/
static void
verify_clause_relationship (VerifyContext *ctx, MonoExceptionClause *clause, MonoExceptionClause *to_test)
{
/*clause is nested*/
if (to_test->flags == MONO_EXCEPTION_CLAUSE_FILTER && is_clause_inside_range (clause, to_test->data.filter_offset, to_test->handler_offset)) {
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Exception clause inside filter"));
return;
}
/*wrong nesting order.*/
if (is_clause_nested (clause, to_test)) {
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Nested exception clause appears after enclosing clause"));
return;
}
/*mutual protection*/
if (clause->try_offset == to_test->try_offset && clause->try_len == to_test->try_len) {
/*handlers are not disjoint*/
if (is_clause_in_range (to_test, HANDLER_START (clause), clause->handler_offset + clause->handler_len)) {
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Exception handlers overlap"));
return;
}
/* handlers are not catch or filter */
if (!IS_CATCH_OR_FILTER (clause) || !IS_CATCH_OR_FILTER (to_test)) {
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Exception clauses with shared protected block are neither catch or filter"));
return;
}
/*OK*/
return;
}
/*not completelly disjoint*/
if ((is_clause_in_range (to_test, clause->try_offset, clause->try_offset + clause->try_len) ||
is_clause_in_range (to_test, HANDLER_START (clause), clause->handler_offset + clause->handler_len)) && !is_clause_nested (to_test, clause))
ADD_VERIFY_ERROR (ctx, g_strdup_printf ("Exception clauses overlap"));
}
#define code_bounds_check(size) \
if (ADDP_IS_GREATER_OR_OVF (ip, size, end)) {\
ADD_VERIFY_ERROR (&ctx, g_strdup_printf ("Code overrun starting with 0x%x at 0x%04x", *ip, ctx.ip_offset)); \
break; \
} \
static gboolean
mono_opcode_is_prefix (int op)
{
switch (op) {
case MONO_CEE_UNALIGNED_:
case MONO_CEE_VOLATILE_:
case MONO_CEE_TAIL_:
case MONO_CEE_CONSTRAINED_:
case MONO_CEE_READONLY_:
return TRUE;
}
return FALSE;
}
/*
* FIXME: need to distinguish between valid and verifiable.
* Need to keep track of types on the stack.
* Verify types for opcodes.
*/
GSList*
mono_method_verify (MonoMethod *method, int level)
{
MonoError error;
const unsigned char *ip, *code_start;
const unsigned char *end;
MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
int i, n, need_merge = 0, start = 0;
guint token, ip_offset = 0, prefix = 0;
MonoGenericContext *generic_context = NULL;
MonoImage *image;
VerifyContext ctx;
GSList *tmp;
VERIFIER_DEBUG ( printf ("Verify IL for method %s %s %s\n", method->klass->name_space, method->klass->name, method->name); );
if (method->iflags & (METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL | METHOD_IMPL_ATTRIBUTE_RUNTIME) ||
(method->flags & (METHOD_ATTRIBUTE_PINVOKE_IMPL | METHOD_ATTRIBUTE_ABSTRACT))) {
return NULL;
}
memset (&ctx, 0, sizeof (VerifyContext));
//FIXME use mono_method_get_signature_full
ctx.signature = mono_method_signature (method);
if (!ctx.signature) {
ADD_VERIFY_ERROR (&ctx, g_strdup_printf ("Could not decode method signature"));
return ctx.list;
}
ctx.header = mono_method_get_header (method);
if (!ctx.header) {
ADD_VERIFY_ERROR (&ctx, g_strdup_printf ("Could not decode method header"));
return ctx.list;
}
ctx.method = method;
code_start = ip = ctx.header->code;
end = ip + ctx.header->code_size;
ctx.image = image = method->klass->image;
ctx.max_args = ctx.signature->param_count + ctx.signature->hasthis;
ctx.max_stack = ctx.header->max_stack;
ctx.verifiable = ctx.valid = 1;
ctx.level = level;
ctx.code = g_new (ILCodeDesc, ctx.header->code_size);
ctx.code_size = ctx.header->code_size;
memset(ctx.code, 0, sizeof (ILCodeDesc) * ctx.header->code_size);
ctx.num_locals = ctx.header->num_locals;
ctx.locals = g_memdup (ctx.header->locals, sizeof (MonoType*) * ctx.header->num_locals);
if (ctx.num_locals > 0 && !ctx.header->init_locals)
CODE_NOT_VERIFIABLE (&ctx, g_strdup_printf ("Method with locals variable but without init locals set"));
ctx.params = g_new (MonoType*, ctx.max_args);
if (ctx.signature->hasthis)
ctx.params [0] = method->klass->valuetype ? &method->klass->this_arg : &method->klass->byval_arg;
memcpy (ctx.params + ctx.signature->hasthis, ctx.signature->params, sizeof (MonoType *) * ctx.signature->param_count);
if (ctx.signature->is_inflated)
ctx.generic_context = generic_context = mono_method_get_context (method);
if (!generic_context && (method->klass->generic_container || method->is_generic)) {
if (method->is_generic)
ctx.generic_context = generic_context = &(mono_method_get_generic_container (method)->context);
else
ctx.generic_context = generic_context = &method->klass->generic_container->context;
}
for (i = 0; i < ctx.num_locals; ++i) {
MonoType *uninflated = ctx.locals [i];
ctx.locals [i] = mono_class_inflate_generic_type_checked (ctx.locals [i], ctx.generic_context, &error);
if (!mono_error_ok (&error)) {
char *name = mono_type_full_name (ctx.locals [i] ? ctx.locals [i] : uninflated);
ADD_VERIFY_ERROR (&ctx, g_strdup_printf ("Invalid local %d of type %s", i, name));
g_free (name);
mono_error_cleanup (&error);
/* we must not free (in cleanup) what was not yet allocated (but only copied) */
ctx.num_locals = i;
ctx.max_args = 0;
goto cleanup;
}
}
for (i = 0; i < ctx.max_args; ++i) {
MonoType *uninflated = ctx.params [i];
ctx.params [i] = mono_class_inflate_generic_type_checked (ctx.params [i], ctx.generic_context, &error);
if (!mono_error_ok (&error)) {
char *name = mono_type_full_name (ctx.params [i] ? ctx.params [i] : uninflated);
ADD_VERIFY_ERROR (&ctx, g_strdup_printf ("Invalid parameter %d of type %s", i, name));
g_free (name);
mono_error_cleanup (&error);
/* we must not free (in cleanup) what was not yet allocated (but only copied) */
ctx.max_args = i;
goto cleanup;
}
}
stack_init (&ctx, &ctx.eval);
for (i = 0; i < ctx.num_locals; ++i) {
if (!mono_type_is_valid_in_context (&ctx, ctx.locals [i]))
break;
if (get_stack_type (ctx.locals [i]) == TYPE_INV) {
char *name = mono_type_full_name (ctx.locals [i]);
ADD_VERIFY_ERROR (&ctx, g_strdup_printf ("Invalid local %i of type %s", i, name));
g_free (name);
break;
}
}
for (i = 0; i < ctx.max_args; ++i) {
if (!mono_type_is_valid_in_context (&ctx, ctx.params [i]))
break;
if (get_stack_type (ctx.params [i]) == TYPE_INV) {
char *name = mono_type_full_name (ctx.params [i]);
ADD_VERIFY_ERROR (&ctx, g_strdup_printf ("Invalid parameter %i of type %s", i, name));
g_free (name);
break;
}
}
if (!ctx.valid)
goto cleanup;
for (i = 0; i < ctx.header->num_clauses && ctx.valid; ++i) {
MonoExceptionClause *clause = ctx.header->clauses + i;
VERIFIER_DEBUG (printf ("clause try %x len %x filter at %x handler at %x len %x\n", clause->try_offset, clause->try_len, clause->data.filter_offset, clause->handler_offset, clause->handler_len); );
if (clause->try_offset > ctx.code_size || ADD_IS_GREATER_OR_OVF (clause->try_offset, clause->try_len, ctx.code_size))
ADD_VERIFY_ERROR (&ctx, g_strdup_printf ("try clause out of bounds at 0x%04x", clause->try_offset));
if (clause->try_len <= 0)
ADD_VERIFY_ERROR (&ctx, g_strdup_printf ("try clause len <= 0 at 0x%04x", clause->try_offset));
if (clause->handler_offset > ctx.code_size || ADD_IS_GREATER_OR_OVF (clause->handler_offset, clause->handler_len, ctx.code_size))
ADD_VERIFY_ERROR (&ctx, g_strdup_printf ("handler clause out of bounds at 0x%04x", clause->try_offset));
if (clause->handler_len <= 0)
ADD_VERIFY_ERROR (&ctx, g_strdup_printf ("handler clause len <= 0 at 0x%04x", clause->try_offset));
if (clause->try_offset < clause->handler_offset && ADD_IS_GREATER_OR_OVF (clause->try_offset, clause->try_len, HANDLER_START (clause)))
ADD_VERIFY_ERROR (&ctx, g_strdup_printf ("try block (at 0x%04x) includes handler block (at 0x%04x)", clause->try_offset, clause->handler_offset));
if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
if (clause->data.filter_offset > ctx.code_size)
ADD_VERIFY_ERROR (&ctx, g_strdup_printf ("filter clause out of bounds at 0x%04x", clause->try_offset));
if (clause->data.filter_offset >= clause->handler_offset)
ADD_VERIFY_ERROR (&ctx, g_strdup_printf ("filter clause must come before the handler clause at 0x%04x", clause->data.filter_offset));
}
for (n = i + 1; n < ctx.header->num_clauses && ctx.valid; ++n)
verify_clause_relationship (&ctx, clause, ctx.header->clauses + n);
if (!ctx.valid)
break;
ctx.code [clause->try_offset].flags |= IL_CODE_FLAG_WAS_TARGET;
if (clause->try_offset + clause->try_len < ctx.code_size)
ctx.code [clause->try_offset + clause->try_len].flags |= IL_CODE_FLAG_WAS_TARGET;
if (clause->handler_offset + clause->handler_len < ctx.code_size)
ctx.code [clause->handler_offset + clause->handler_len].flags |= IL_CODE_FLAG_WAS_TARGET;
if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) {
if (!clause->data.catch_class) {
ADD_VERIFY_ERROR (&ctx, g_strdup_printf ("Catch clause %d with invalid type", i));
break;
}
init_stack_with_value_at_exception_boundary (&ctx, ctx.code + clause->handler_offset, clause->data.catch_class);
}
else if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
init_stack_with_value_at_exception_boundary (&ctx, ctx.code + clause->data.filter_offset, mono_defaults.exception_class);
init_stack_with_value_at_exception_boundary (&ctx, ctx.code + clause->handler_offset, mono_defaults.exception_class);
}
}
original_bb = bb = mono_basic_block_split (method, &error);
if (!mono_error_ok (&error)) {
ADD_VERIFY_ERROR (&ctx, g_strdup_printf ("Invalid branch target: %s", mono_error_get_message (&error)));
mono_error_cleanup (&error);
goto cleanup;
}
g_assert (bb);
while (ip < end && ctx.valid) {
int op_size;
ip_offset = ip - code_start;
{
const unsigned char *ip_copy = ip;
int op;
if (ip_offset > bb->end) {
ADD_VERIFY_ERROR (&ctx, g_strdup_printf ("Branch or EH block at [0x%04x] targets middle instruction at 0x%04x", bb->end, ip_offset));
goto cleanup;
}
if (ip_offset == bb->end)
bb = bb->next;
op_size = mono_opcode_value_and_size (&ip_copy, end, &op);
if (op_size == -1) {
ADD_VERIFY_ERROR (&ctx, g_strdup_printf ("Invalid instruction %x at 0x%04x", *ip, ip_offset));
goto cleanup;
}
if (ADD_IS_GREATER_OR_OVF (ip_offset, op_size, bb->end)) {
ADD_VERIFY_ERROR (&ctx, g_strdup_printf ("Branch or EH block targets middle of instruction at 0x%04x", ip_offset));
goto cleanup;
}
/*Last Instruction*/
if (ip_offset + op_size == bb->end && mono_opcode_is_prefix (op)) {
ADD_VERIFY_ERROR (&ctx, g_strdup_printf ("Branch or EH block targets between prefix '%s' and instruction at 0x%04x", mono_opcode_name (op), ip_offset));
goto cleanup;
}
}
ctx.ip_offset = ip_offset = ip - code_start;
/*We need to check against fallthrou in and out of protected blocks.
* For fallout we check the once a protected block ends, if the start flag is not set.
* Likewise for fallthru in, we check if ip is the start of a protected block and start is not set
* TODO convert these checks to be done using flags and not this loop
*/
for (i = 0; i < ctx.header->num_clauses && ctx.valid; ++i) {
MonoExceptionClause *clause = ctx.header->clauses + i;
if ((clause->try_offset + clause->try_len == ip_offset) && start == 0) {
CODE_NOT_VERIFIABLE (&ctx, g_strdup_printf ("fallthru off try block at 0x%04x", ip_offset));
start = 1;
}
if ((clause->handler_offset + clause->handler_len == ip_offset) && start == 0) {
if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER)
ADD_VERIFY_ERROR (&ctx, g_strdup_printf ("fallout of handler block at 0x%04x", ip_offset));
else
CODE_NOT_VERIFIABLE (&ctx, g_strdup_printf ("fallout of handler block at 0x%04x", ip_offset));
start = 1;
}
if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER && clause->handler_offset == ip_offset && start == 0) {
ADD_VERIFY_ERROR (&ctx, g_strdup_printf ("fallout of filter block at 0x%04x", ip_offset));
start = 1;
}
if (clause->handler_offset == ip_offset && start == 0) {
CODE_NOT_VERIFIABLE (&ctx, g_strdup_printf ("fallthru handler block at 0x%04x", ip_offset));
start = 1;
}
if (clause->try_offset == ip_offset && ctx.eval.size > 0 && start == 0) {
ADD_VERIFY_ERROR (&ctx, g_strdup_printf ("Try to enter try block with a non-empty stack at 0x%04x", ip_offset));
start = 1;
}
}
/*This must be done after fallthru detection otherwise it won't happen.*/
if (bb->dead) {
/*FIXME remove this once we move all bad branch checking code to use BB only*/
ctx.code [ip_offset].flags |= IL_CODE_FLAG_SEEN;
ip += op_size;
continue;
}
if (!ctx.valid)
break;
if (need_merge) {
VERIFIER_DEBUG ( printf ("extra merge needed! 0x%04x \n", ctx.target); );
merge_stacks (&ctx, &ctx.eval, &ctx.code [ctx.target], FALSE, TRUE);
need_merge = 0;
}
merge_stacks (&ctx, &ctx.eval, &ctx.code[ip_offset], start, FALSE);
start = 0;
/*TODO we can fast detect a forward branch or exception block targeting code after prefix, we should fail fast*/
#ifdef MONO_VERIFIER_DEBUG
{
char *discode;
discode = mono_disasm_code_one (NULL, method, ip, NULL);
discode [strlen (discode) - 1] = 0; /* no \n */
g_print ("[%d] %-29s (%d)\n", ip_offset, discode, ctx.eval.size);
g_free (discode);
}
dump_stack_state (&ctx.code [ip_offset]);
dump_stack_state (&ctx.eval);
#endif
switch (*ip) {
case CEE_NOP:
case CEE_BREAK:
++ip;
break;
case CEE_LDARG_0:
case CEE_LDARG_1:
case CEE_LDARG_2:
case CEE_LDARG_3:
push_arg (&ctx, *ip - CEE_LDARG_0, FALSE);
++ip;
break;
case CEE_LDARG_S:
case CEE_LDARGA_S:
code_bounds_check (2);
push_arg (&ctx, ip [1], *ip == CEE_LDARGA_S);
ip += 2;
break;
case CEE_ADD_OVF_UN:
do_binop (&ctx, *ip, add_ovf_un_table);
++ip;
break;
case CEE_SUB_OVF_UN:
do_binop (&ctx, *ip, sub_ovf_un_table);
++ip;
break;
case CEE_ADD_OVF:
case CEE_SUB_OVF:
case CEE_MUL_OVF:
case CEE_MUL_OVF_UN:
do_binop (&ctx, *ip, bin_ovf_table);
++ip;
break;
case CEE_ADD:
do_binop (&ctx, *ip, add_table);
++ip;
break;
case CEE_SUB:
do_binop (&ctx, *ip, sub_table);
++ip;
break;
case CEE_MUL:
case CEE_DIV:
case CEE_REM:
do_binop (&ctx, *ip, bin_op_table);
++ip;
break;
case CEE_AND:
case CEE_DIV_UN:
case CEE_OR:
case CEE_REM_UN:
case CEE_XOR:
do_binop (&ctx, *ip, int_bin_op_table);
++ip;
break;
case CEE_SHL:
case CEE_SHR:
case CEE_SHR_UN:
do_binop (&ctx, *ip, shift_op_table);
++ip;
break;
case CEE_POP:
if (!check_underflow (&ctx, 1))
break;
stack_pop_safe (&ctx);
++ip;
break;
case CEE_RET:
do_ret (&ctx);
++ip;
start = 1;
break;
case CEE_LDLOC_0:
case CEE_LDLOC_1:
case CEE_LDLOC_2:
case CEE_LDLOC_3:
/*TODO support definite assignment verification? */
push_local (&ctx, *ip - CEE_LDLOC_0, FALSE);
++ip;
break;
case CEE_STLOC_0:
case CEE_STLOC_1:
case CEE_STLOC_2:
case CEE_STLOC_3:
store_local (&ctx, *ip - CEE_STLOC_0);
++ip;
break;
case CEE_STLOC_S:
code_bounds_check (2);
store_local (&ctx, ip [1]);
ip += 2;
break;
case CEE_STARG_S:
code_bounds_check (2);
store_arg (&ctx, ip [1]);
ip += 2;
break;
case CEE_LDC_I4_M1:
case CEE_LDC_I4_0:
case CEE_LDC_I4_1:
case CEE_LDC_I4_2:
case CEE_LDC_I4_3:
case CEE_LDC_I4_4:
case CEE_LDC_I4_5:
case CEE_LDC_I4_6:
case CEE_LDC_I4_7:
case CEE_LDC_I4_8:
if (check_overflow (&ctx))
stack_push_val (&ctx, TYPE_I4, &mono_defaults.int32_class->byval_arg);
++ip;
break;
case CEE_LDC_I4_S:
code_bounds_check (2);
if (check_overflow (&ctx))
stack_push_val (&ctx, TYPE_I4, &mono_defaults.int32_class->byval_arg);
ip += 2;
break;
case CEE_LDC_I4:
code_bounds_check (5);
if (check_overflow (&ctx))
stack_push_val (&ctx,TYPE_I4, &mono_defaults.int32_class->byval_arg);
ip += 5;
break;
case CEE_LDC_I8:
code_bounds_check (9);
if (check_overflow (&ctx))
stack_push_val (&ctx,TYPE_I8, &mono_defaults.int64_class->byval_arg);
ip += 9;
break;
case CEE_LDC_R4:
code_bounds_check (5);
if (check_overflow (&ctx))
stack_push_val (&ctx, TYPE_R8, &mono_defaults.double_class->byval_arg);
ip += 5;
break;
case CEE_LDC_R8:
code_bounds_check (9);
if (check_overflow (&ctx))
stack_push_val (&ctx, TYPE_R8, &mono_defaults.double_class->byval_arg);
ip += 9;
break;
case CEE_LDNULL:
if (check_overflow (&ctx))
stack_push_val (&ctx, TYPE_COMPLEX | NULL_LITERAL_MASK, &mono_defaults.object_class->byval_arg);
++ip;
break;
case CEE_BEQ_S:
case CEE_BNE_UN_S:
code_bounds_check (2);
do_branch_op (&ctx, (signed char)ip [1] + 2, cmp_br_eq_op);
ip += 2;
need_merge = 1;
break;
case CEE_BGE_S:
case CEE_BGT_S:
case CEE_BLE_S:
case CEE_BLT_S:
case CEE_BGE_UN_S:
case CEE_BGT_UN_S:
case CEE_BLE_UN_S:
case CEE_BLT_UN_S:
code_bounds_check (2);
do_branch_op (&ctx, (signed char)ip [1] + 2, cmp_br_op);
ip += 2;
need_merge = 1;
break;
case CEE_BEQ:
case CEE_BNE_UN:
code_bounds_check (5);
do_branch_op (&ctx, (gint32)read32 (ip + 1) + 5, cmp_br_eq_op);
ip += 5;
need_merge = 1;
break;
case CEE_BGE:
case CEE_BGT:
case CEE_BLE:
case CEE_BLT:
case CEE_BGE_UN:
case CEE_BGT_UN:
case CEE_BLE_UN:
case CEE_BLT_UN:
code_bounds_check (5);
do_branch_op (&ctx, (gint32)read32 (ip + 1) + 5, cmp_br_op);
ip += 5;
need_merge = 1;
break;
case CEE_LDLOC_S:
case CEE_LDLOCA_S:
code_bounds_check (2);
push_local (&ctx, ip[1], *ip == CEE_LDLOCA_S);
ip += 2;
break;
case CEE_UNUSED99:
ADD_VERIFY_ERROR (&ctx, g_strdup_printf ("Use of the `unused' opcode"));
++ip;
break;
case CEE_DUP: {
ILStackDesc * top;
if (!check_underflow (&ctx, 1))
break;
if (!check_overflow (&ctx))
break;
top = stack_pop_safe (&ctx);
copy_stack_value (stack_push (&ctx), top);
copy_stack_value (stack_push (&ctx), top);
++ip;
break;
}
case CEE_JMP:
code_bounds_check (5);
if (ctx.eval.size)
ADD_VERIFY_ERROR (&ctx, g_strdup_printf ("Eval stack must be empty in jmp at 0x%04x", ip_offset));
token = read32 (ip + 1);
if (in_any_block (ctx.header, ip_offset))
ADD_VERIFY_ERROR (&ctx, g_strdup_printf ("jmp cannot escape exception blocks at 0x%04x", ip_offset));
CODE_NOT_VERIFIABLE (&ctx, g_strdup_printf ("Intruction jmp is not verifiable at 0x%04x", ctx.ip_offset));
/*
* FIXME: check signature, retval, arguments etc.
*/
ip += 5;
break;
case CEE_CALL:
case CEE_CALLVIRT:
code_bounds_check (5);
do_invoke_method (&ctx, read32 (ip + 1), *ip == CEE_CALLVIRT);
ip += 5;
break;
case CEE_CALLI:
code_bounds_check (5);
token = read32 (ip + 1);
/*
* FIXME: check signature, retval, arguments etc.
* FIXME: check requirements for tail call
*/
CODE_NOT_VERIFIABLE (&ctx, g_strdup_printf ("Intruction calli is not verifiable at 0x%04x", ctx.ip_offset));
ip += 5;
break;
case CEE_BR_S:
code_bounds_check (2);
do_static_branch (&ctx, (signed char)ip [1] + 2);
need_merge = 1;
ip += 2;
start = 1;
break;
case CEE_BRFALSE_S:
case CEE_BRTRUE_S:
code_bounds_check (2);
do_boolean_branch_op (&ctx, (signed char)ip [1] + 2);
ip += 2;
need_merge = 1;
break;
case CEE_BR:
code_bounds_check (5);
do_static_branch (&ctx, (gint32)read32 (ip + 1) + 5);
need_merge = 1;
ip += 5;
start = 1;
break;
case CEE_BRFALSE:
case CEE_BRTRUE:
code_bounds_check (5);
do_boolean_branch_op (&ctx, (gint32)read32 (ip + 1) + 5);
ip += 5;
need_merge = 1;
break;
case CEE_SWITCH: {
guint32 entries;
code_bounds_check (5);
entries = read32 (ip + 1);
if (entries > 0xFFFFFFFFU / sizeof (guint32))
ADD_VERIFY_ERROR (&ctx, g_strdup_printf ("Too many switch entries %x at 0x%04x", entries, ctx.ip_offset));
ip += 5;
code_bounds_check (sizeof (guint32) * entries);
do_switch (&ctx, entries, ip);
ip += sizeof (guint32) * entries;
break;
}
case CEE_LDIND_I1:
case CEE_LDIND_U1:
case CEE_LDIND_I2:
case CEE_LDIND_U2:
case CEE_LDIND_I4:
case CEE_LDIND_U4:
case CEE_LDIND_I8:
case CEE_LDIND_I:
case CEE_LDIND_R4:
case CEE_LDIND_R8:
case CEE_LDIND_REF:
do_load_indirect (&ctx, *ip);
++ip;
break;
case CEE_STIND_REF:
case CEE_STIND_I1:
case CEE_STIND_I2:
case CEE_STIND_I4:
case CEE_STIND_I8:
case CEE_STIND_R4:
case CEE_STIND_R8:
case CEE_STIND_I:
do_store_indirect (&ctx, *ip);
++ip;
break;
case CEE_NOT:
case CEE_NEG:
do_unary_math_op (&ctx, *ip);
++ip;
break;
case CEE_CONV_I1:
case CEE_CONV_I2:
case CEE_CONV_I4:
case CEE_CONV_U1:
case CEE_CONV_U2:
case CEE_CONV_U4:
do_conversion (&ctx, TYPE_I4);
++ip;
break;
case CEE_CONV_I8:
case CEE_CONV_U8:
do_conversion (&ctx, TYPE_I8);
++ip;
break;
case CEE_CONV_R4:
case CEE_CONV_R8:
case CEE_CONV_R_UN:
do_conversion (&ctx, TYPE_R8);
++ip;
break;
case CEE_CONV_I:
case CEE_CONV_U:
do_conversion (&ctx, TYPE_NATIVE_INT);
++ip;
break;
case CEE_CPOBJ:
code_bounds_check (5);
do_cpobj (&ctx, read32 (ip + 1));
ip += 5;
break;
case CEE_LDOBJ:
code_bounds_check (5);
do_ldobj_value (&ctx, read32 (ip + 1));
ip += 5;
break;
case CEE_LDSTR:
code_bounds_check (5);
do_ldstr (&ctx, read32 (ip + 1));
ip += 5;
break;
case CEE_NEWOBJ:
code_bounds_check (5);
do_newobj (&ctx, read32 (ip + 1));
ip += 5;
break;
case CEE_CASTCLASS:
case CEE_ISINST:
code_bounds_check (5);
do_cast (&ctx, read32 (ip + 1), *ip == CEE_CASTCLASS ? "castclass" : "isinst");
ip += 5;
break;
case CEE_UNUSED58:
case CEE_UNUSED1:
ADD_VERIFY_ERROR (&ctx, g_strdup_printf ("Use of the `unused' opcode"));
++ip;
break;
case CEE_UNBOX:
code_bounds_check (5);
do_unbox_value (&ctx, read32 (ip + 1));
ip += 5;
break;
case CEE_THROW:
do_throw (&ctx);
start = 1;
++ip;
break;
case CEE_LDFLD:
case CEE_LDFLDA:
code_bounds_check (5);
do_push_field (&ctx, read32 (ip + 1), *ip == CEE_LDFLDA);
ip += 5;
break;
case CEE_LDSFLD:
case CEE_LDSFLDA:
code_bounds_check (5);
do_push_static_field (&ctx, read32 (ip + 1), *ip == CEE_LDSFLDA);
ip += 5;
break;
case CEE_STFLD:
code_bounds_check (5);
do_store_field (&ctx, read32 (ip + 1));
ip += 5;
break;
case CEE_STSFLD:
code_bounds_check (5);
do_store_static_field (&ctx, read32 (ip + 1));
ip += 5;
break;
case CEE_STOBJ:
code_bounds_check (5);
do_stobj (&ctx, read32 (ip + 1));
ip += 5;
break;
case CEE_CONV_OVF_I1_UN:
case CEE_CONV_OVF_I2_UN:
case CEE_CONV_OVF_I4_UN:
case CEE_CONV_OVF_U1_UN:
case CEE_CONV_OVF_U2_UN:
case CEE_CONV_OVF_U4_UN:
do_conversion (&ctx, TYPE_I4);
++ip;
break;
case CEE_CONV_OVF_I8_UN:
case CEE_CONV_OVF_U8_UN:
do_conversion (&ctx, TYPE_I8);
++ip;
break;
case CEE_CONV_OVF_I_UN:
case CEE_CONV_OVF_U_UN:
do_conversion (&ctx, TYPE_NATIVE_INT);
++ip;
break;
case CEE_BOX:
code_bounds_check (5);
do_box_value (&ctx, read32 (ip + 1));
ip += 5;
break;
case CEE_NEWARR:
code_bounds_check (5);
do_newarr (&ctx, read32 (ip + 1));
ip += 5;
break;
case CEE_LDLEN:
do_ldlen (&ctx);
++ip;
break;
case CEE_LDELEMA:
code_bounds_check (5);
do_ldelema (&ctx, read32 (ip + 1));
ip += 5;
break;
case CEE_LDELEM_I1:
case CEE_LDELEM_U1:
case CEE_LDELEM_I2:
case CEE_LDELEM_U2:
case CEE_LDELEM_I4:
case CEE_LDELEM_U4:
case CEE_LDELEM_I8:
case CEE_LDELEM_I:
case CEE_LDELEM_R4:
case CEE_LDELEM_R8:
case CEE_LDELEM_REF:
do_ldelem (&ctx, *ip, 0);
++ip;
break;
case CEE_STELEM_I:
case CEE_STELEM_I1:
case CEE_STELEM_I2:
case CEE_STELEM_I4:
case CEE_STELEM_I8:
case CEE_STELEM_R4:
case CEE_STELEM_R8:
case CEE_STELEM_REF:
do_stelem (&ctx, *ip, 0);
++ip;
break;
case CEE_LDELEM:
code_bounds_check (5);
do_ldelem (&ctx, *ip, read32 (ip + 1));
ip += 5;
break;
case CEE_STELEM:
code_bounds_check (5);
do_stelem (&ctx, *ip, read32 (ip + 1));
ip += 5;
break;
case CEE_UNBOX_ANY:
code_bounds_check (5);
do_unbox_any (&ctx, read32 (ip + 1));
ip += 5;
break;
case CEE_CONV_OVF_I1:
case CEE_CONV_OVF_U1:
case CEE_CONV_OVF_I2:
case CEE_CONV_OVF_U2:
case CEE_CONV_OVF_I4:
case CEE_CONV_OVF_U4:
do_conversion (&ctx, TYPE_I4);
++ip;
break;
case CEE_CONV_OVF_I8:
case CEE_CONV_OVF_U8:
do_conversion (&ctx, TYPE_I8);
++ip;
break;
case CEE_CONV_OVF_I:
case CEE_CONV_OVF_U:
do_conversion (&ctx, TYPE_NATIVE_INT);
++ip;
break;
case CEE_REFANYVAL:
code_bounds_check (5);
do_refanyval (&ctx, read32 (ip + 1));
ip += 5;
break;
case CEE_CKFINITE:
do_ckfinite (&ctx);
++ip;
break;
case CEE_MKREFANY:
code_bounds_check (5);
do_mkrefany (&ctx, read32 (ip + 1));
ip += 5;
break;
case CEE_LDTOKEN:
code_bounds_check (5);
do_load_token (&ctx, read32 (ip + 1));
ip += 5;
break;
case CEE_ENDFINALLY:
if (!is_correct_endfinally (ctx.header, ip_offset))
ADD_VERIFY_ERROR (&ctx, g_strdup_printf ("endfinally must be used inside a finally/fault handler at 0x%04x", ctx.ip_offset));
ctx.eval.size = 0;
start = 1;
++ip;
break;
case CEE_LEAVE:
code_bounds_check (5);
do_leave (&ctx, read32 (ip + 1) + 5);
ip += 5;
start = 1;
need_merge = 1;
break;
case CEE_LEAVE_S:
code_bounds_check (2);
do_leave (&ctx, (signed char)ip [1] + 2);
ip += 2;
start = 1;
need_merge = 1;
break;
case CEE_PREFIX1:
code_bounds_check (2);
++ip;
switch (*ip) {
case CEE_STLOC:
code_bounds_check (3);
store_local (&ctx, read16 (ip + 1));
ip += 3;
break;
case CEE_CEQ:
do_cmp_op (&ctx, cmp_br_eq_op, *ip);
++ip;
break;
case CEE_CGT:
case CEE_CGT_UN:
case CEE_CLT:
case CEE_CLT_UN:
do_cmp_op (&ctx, cmp_br_op, *ip);
++ip;
break;
case CEE_STARG:
code_bounds_check (3);
store_arg (&ctx, read16 (ip + 1) );
ip += 3;
break;
case CEE_ARGLIST:
if (!check_overflow (&ctx))
break;
if (ctx.signature->call_convention != MONO_CALL_VARARG)
ADD_VERIFY_ERROR (&ctx, g_strdup_printf ("Cannot use arglist on method without VARGARG calling convention at 0x%04x", ctx.ip_offset));
set_stack_value (&ctx, stack_push (&ctx), &mono_defaults.argumenthandle_class->byval_arg, FALSE);
++ip;
break;
case CEE_LDFTN:
code_bounds_check (5);
do_load_function_ptr (&ctx, read32 (ip + 1), FALSE);
ip += 5;
break;
case CEE_LDVIRTFTN:
code_bounds_check (5);
do_load_function_ptr (&ctx, read32 (ip + 1), TRUE);
ip += 5;
break;
case CEE_LDARG:
case CEE_LDARGA:
code_bounds_check (3);
push_arg (&ctx, read16 (ip + 1), *ip == CEE_LDARGA);
ip += 3;
break;
case CEE_LDLOC:
case CEE_LDLOCA:
code_bounds_check (3);
push_local (&ctx, read16 (ip + 1), *ip == CEE_LDLOCA);
ip += 3;
break;
case CEE_LOCALLOC:
do_localloc (&ctx);
++ip;
break;
case CEE_UNUSED56:
case CEE_UNUSED57:
case CEE_UNUSED70:
case CEE_UNUSED:
ADD_VERIFY_ERROR (&ctx, g_strdup_printf ("Use of the `unused' opcode"));
++ip;
break;
case CEE_ENDFILTER:
do_endfilter (&ctx);
start = 1;
++ip;
break;
case CEE_UNALIGNED_:
code_bounds_check (2);
prefix |= PREFIX_UNALIGNED;
ip += 2;
break;
case CEE_VOLATILE_:
prefix |= PREFIX_VOLATILE;
++ip;
break;
case CEE_TAIL_:
prefix |= PREFIX_TAIL;
++ip;
if (ip < end && (*ip != CEE_CALL && *ip != CEE_CALLI && *ip != CEE_CALLVIRT))
ADD_VERIFY_ERROR (&ctx, g_strdup_printf ("tail prefix must be used only with call opcodes at 0x%04x", ip_offset));
break;
case CEE_INITOBJ:
code_bounds_check (5);
do_initobj (&ctx, read32 (ip + 1));
ip += 5;
break;
case CEE_CONSTRAINED_:
code_bounds_check (5);
ctx.constrained_type = get_boxable_mono_type (&ctx, read32 (ip + 1), "constrained.");
prefix |= PREFIX_CONSTRAINED;
ip += 5;
break;
case CEE_READONLY_:
prefix |= PREFIX_READONLY;
ip++;
break;
case CEE_CPBLK:
CLEAR_PREFIX (&ctx, PREFIX_UNALIGNED | PREFIX_VOLATILE);
if (!check_underflow (&ctx, 3))
break;
CODE_NOT_VERIFIABLE (&ctx, g_strdup_printf ("Instruction cpblk is not verifiable at 0x%04x", ctx.ip_offset));
ip++;
break;
case CEE_INITBLK:
CLEAR_PREFIX (&ctx, PREFIX_UNALIGNED | PREFIX_VOLATILE);
if (!check_underflow (&ctx, 3))
break;
CODE_NOT_VERIFIABLE (&ctx, g_strdup_printf ("Instruction initblk is not verifiable at 0x%04x", ctx.ip_offset));
ip++;
break;
case CEE_NO_:
ip += 2;
break;
case CEE_RETHROW:
if (!is_correct_rethrow (ctx.header, ip_offset))
ADD_VERIFY_ERROR (&ctx, g_strdup_printf ("rethrow must be used inside a catch handler at 0x%04x", ctx.ip_offset));
ctx.eval.size = 0;
start = 1;
++ip;
break;
case CEE_SIZEOF:
code_bounds_check (5);
do_sizeof (&ctx, read32 (ip + 1));
ip += 5;
break;
case CEE_REFANYTYPE:
do_refanytype (&ctx);
++ip;
break;
default:
ADD_VERIFY_ERROR (&ctx, g_strdup_printf ("Invalid instruction FE %x at 0x%04x", *ip, ctx.ip_offset));
++ip;
}
break;
default:
ADD_VERIFY_ERROR (&ctx, g_strdup_printf ("Invalid instruction %x at 0x%04x", *ip, ctx.ip_offset));
++ip;
}
/*TODO we can fast detect a forward branch or exception block targeting code after prefix, we should fail fast*/
if (prefix) {
if (!ctx.prefix_set) //first prefix
ctx.code [ctx.ip_offset].flags |= IL_CODE_FLAG_SEEN;
ctx.prefix_set |= prefix;
ctx.has_flags = TRUE;
prefix = 0;
} else {
if (!ctx.has_flags)
ctx.code [ctx.ip_offset].flags |= IL_CODE_FLAG_SEEN;
if (ctx.prefix_set & PREFIX_CONSTRAINED)
ADD_VERIFY_ERROR (&ctx, g_strdup_printf ("Invalid instruction after constrained prefix at 0x%04x", ctx.ip_offset));
if (ctx.prefix_set & PREFIX_READONLY)
ADD_VERIFY_ERROR (&ctx, g_strdup_printf ("Invalid instruction after readonly prefix at 0x%04x", ctx.ip_offset));
if (ctx.prefix_set & PREFIX_VOLATILE)
ADD_VERIFY_ERROR (&ctx, g_strdup_printf ("Invalid instruction after volatile prefix at 0x%04x", ctx.ip_offset));
if (ctx.prefix_set & PREFIX_UNALIGNED)
ADD_VERIFY_ERROR (&ctx, g_strdup_printf ("Invalid instruction after unaligned prefix at 0x%04x", ctx.ip_offset));
ctx.prefix_set = prefix = 0;
ctx.has_flags = FALSE;
}
}
/*
* if ip != end we overflowed: mark as error.
*/
if ((ip != end || !start) && ctx.verifiable && !ctx.list) {
ADD_VERIFY_ERROR (&ctx, g_strdup_printf ("Run ahead of method code at 0x%04x", ip_offset));
}
/*We should guard against the last decoded opcode, otherwise we might add errors that doesn't make sense.*/
for (i = 0; i < ctx.code_size && i < ip_offset; ++i) {
if (ctx.code [i].flags & IL_CODE_FLAG_WAS_TARGET) {
if (!(ctx.code [i].flags & IL_CODE_FLAG_SEEN))
ADD_VERIFY_ERROR (&ctx, g_strdup_printf ("Branch or exception block target middle of intruction at 0x%04x", i));
if (ctx.code [i].flags & IL_CODE_DELEGATE_SEQUENCE)
CODE_NOT_VERIFIABLE (&ctx, g_strdup_printf ("Branch to delegate code sequence at 0x%04x", i));
}
if ((ctx.code [i].flags & IL_CODE_LDFTN_DELEGATE_NONFINAL_VIRTUAL) && ctx.has_this_store)
CODE_NOT_VERIFIABLE (&ctx, g_strdup_printf ("Invalid ldftn with virtual function in method with stdarg 0 at 0x%04x", i));
if ((ctx.code [i].flags & IL_CODE_CALL_NONFINAL_VIRTUAL) && ctx.has_this_store)
CODE_NOT_VERIFIABLE (&ctx, g_strdup_printf ("Invalid call to a non-final virtual function in method with stdarg.0 or ldarga.0 at 0x%04x", i));
}
if (mono_method_is_constructor (ctx.method) && !ctx.super_ctor_called && !ctx.method->klass->valuetype && ctx.method->klass != mono_defaults.object_class)
CODE_NOT_VERIFIABLE (&ctx, g_strdup_printf ("Constructor not calling super\n"));
cleanup:
if (ctx.code) {
for (i = 0; i < ctx.header->code_size; ++i) {
if (ctx.code [i].stack)
g_free (ctx.code [i].stack);
}
}
for (tmp = ctx.funptrs; tmp; tmp = tmp->next)
g_free (tmp->data);
g_slist_free (ctx.funptrs);
for (tmp = ctx.exception_types; tmp; tmp = tmp->next)
mono_metadata_free_type (tmp->data);
g_slist_free (ctx.exception_types);
for (i = 0; i < ctx.num_locals; ++i) {
if (ctx.locals [i])
mono_metadata_free_type (ctx.locals [i]);
}
for (i = 0; i < ctx.max_args; ++i) {
if (ctx.params [i])
mono_metadata_free_type (ctx.params [i]);
}
if (ctx.eval.stack)
g_free (ctx.eval.stack);
if (ctx.code)
g_free (ctx.code);
g_free (ctx.locals);
g_free (ctx.params);
mono_basic_block_free (original_bb);
return ctx.list;
}
char*
mono_verify_corlib ()
{
/* This is a public API function so cannot be removed */
return NULL;
}
/*
* Returns true if @method needs to be verified.
*
*/
gboolean
mono_verifier_is_enabled_for_method (MonoMethod *method)
{
return mono_verifier_is_enabled_for_class (method->klass) && method->wrapper_type == MONO_WRAPPER_NONE;
}
/*
* Returns true if @klass need to be verified.
*
*/
gboolean
mono_verifier_is_enabled_for_class (MonoClass *klass)
{
return verify_all || (verifier_mode > MONO_VERIFIER_MODE_OFF && !klass->image->assembly->in_gac && klass->image != mono_defaults.corlib);
}
gboolean
mono_verifier_is_enabled_for_image (MonoImage *image)
{
return verify_all || verifier_mode > MONO_VERIFIER_MODE_OFF;
}
gboolean
mono_verifier_is_method_full_trust (MonoMethod *method)
{
return mono_verifier_is_class_full_trust (method->klass);
}
/*
* Returns if @klass is under full trust or not.
*
* TODO This code doesn't take CAS into account.
*
* Under verify_all all user code must be verifiable if no security option was set
*
*/
gboolean
mono_verifier_is_class_full_trust (MonoClass *klass)
{
/* under CoreCLR code is trusted if it is part of the "platform" otherwise all code inside the GAC is trusted */
gboolean trusted_location = (mono_security_get_mode () != MONO_SECURITY_MODE_CORE_CLR) ?
klass->image->assembly->in_gac : mono_security_core_clr_is_platform_image (klass->image);
if (verify_all && verifier_mode == MONO_VERIFIER_MODE_OFF)
return trusted_location || klass->image == mono_defaults.corlib;
return verifier_mode < MONO_VERIFIER_MODE_VERIFIABLE || trusted_location || klass->image == mono_defaults.corlib;
}
GSList*
mono_method_verify_with_current_settings (MonoMethod *method, gboolean skip_visibility)
{
return mono_method_verify (method,
(verifier_mode != MONO_VERIFIER_MODE_STRICT ? MONO_VERIFY_NON_STRICT: 0)
| (!mono_verifier_is_method_full_trust (method) ? MONO_VERIFY_FAIL_FAST : 0)
| (skip_visibility ? MONO_VERIFY_SKIP_VISIBILITY : 0));
}
static int
get_field_end (MonoClassField *field)
{
int align;
int size = mono_type_size (field->type, &align);
if (size == 0)
size = 4; /*FIXME Is this a safe bet?*/
return size + field->offset;
}
static gboolean
verify_class_for_overlapping_reference_fields (MonoClass *class)
{
int i = 0, j;
gpointer iter = NULL;
MonoClassField *field;
gboolean is_fulltrust = mono_verifier_is_class_full_trust (class);
/*We can't skip types with !has_references since this is calculated after we have run.*/
if (!((class->flags & TYPE_ATTRIBUTE_LAYOUT_MASK) == TYPE_ATTRIBUTE_EXPLICIT_LAYOUT))
return TRUE;
/*We must check for stuff overlapping reference fields.
The outer loop uses mono_class_get_fields to ensure that MonoClass:fields get inited.
*/
while ((field = mono_class_get_fields (class, &iter))) {
int fieldEnd = get_field_end (field);
gboolean is_valuetype = !MONO_TYPE_IS_REFERENCE (field->type);
++i;
if (mono_field_is_deleted (field) || (field->type->attrs & FIELD_ATTRIBUTE_STATIC))
continue;
for (j = i; j < class->field.count; ++j) {
MonoClassField *other = &class->fields [j];
int otherEnd = get_field_end (other);
if (mono_field_is_deleted (other) || (is_valuetype && !MONO_TYPE_IS_REFERENCE (other->type)) || (other->type->attrs & FIELD_ATTRIBUTE_STATIC))
continue;
if (!is_valuetype && MONO_TYPE_IS_REFERENCE (other->type) && field->offset == other->offset && is_fulltrust)
continue;
if ((otherEnd > field->offset && otherEnd <= fieldEnd) || (other->offset >= field->offset && other->offset < fieldEnd))
return FALSE;
}
}
return TRUE;
}
static guint
field_hash (gconstpointer key)
{
const MonoClassField *field = key;
return g_str_hash (field->name) ^ mono_metadata_type_hash (field->type); /**/
}
static gboolean
field_equals (gconstpointer _a, gconstpointer _b)
{
const MonoClassField *a = _a;
const MonoClassField *b = _b;
return !strcmp (a->name, b->name) && mono_metadata_type_equal (a->type, b->type);
}
static gboolean
verify_class_fields (MonoClass *class)
{
gpointer iter = NULL;
MonoClassField *field;
MonoGenericContext *context = mono_class_get_context (class);
GHashTable *unique_fields = g_hash_table_new_full (&field_hash, &field_equals, NULL, NULL);
if (class->generic_container)
context = &class->generic_container->context;
while ((field = mono_class_get_fields (class, &iter)) != NULL) {
if (!mono_type_is_valid_type_in_context (field->type, context)) {
g_hash_table_destroy (unique_fields);
return FALSE;
}
if (g_hash_table_lookup (unique_fields, field)) {
g_hash_table_destroy (unique_fields);
return FALSE;
}
g_hash_table_insert (unique_fields, field, field);
}
g_hash_table_destroy (unique_fields);
return TRUE;
}
static gboolean
verify_interfaces (MonoClass *class)
{
int i;
for (i = 0; i < class->interface_count; ++i) {
MonoClass *iface = class->interfaces [i];
if (!(iface->flags & TYPE_ATTRIBUTE_INTERFACE))
return FALSE;
}
return TRUE;
}
static gboolean
verify_valuetype_layout_with_target (MonoClass *class, MonoClass *target_class)
{
int type;
gpointer iter = NULL;
MonoClassField *field;
MonoClass *field_class;
if (!class->valuetype)
return TRUE;
type = class->byval_arg.type;
/*primitive type fields are not properly decoded*/
if ((type >= MONO_TYPE_BOOLEAN && type <= MONO_TYPE_R8) || (type >= MONO_TYPE_I && type <= MONO_TYPE_U))
return TRUE;
while ((field = mono_class_get_fields (class, &iter)) != NULL) {
if (!field->type)
return FALSE;
if (field->type->attrs & (FIELD_ATTRIBUTE_STATIC | FIELD_ATTRIBUTE_HAS_FIELD_RVA))
continue;
field_class = mono_class_get_generic_type_definition (mono_class_from_mono_type (field->type));
if (field_class == target_class || class == field_class || !verify_valuetype_layout_with_target (field_class, target_class))
return FALSE;
}
return TRUE;
}
static gboolean
verify_valuetype_layout (MonoClass *class)
{
gboolean res;
res = verify_valuetype_layout_with_target (class, class);
return res;
}
static gboolean
recursive_mark_constraint_args (MonoBitSet *used_args, MonoGenericContainer *gc, MonoType *type)
{
int idx;
MonoClass **constraints;
MonoGenericParamInfo *param_info;
g_assert (mono_type_is_generic_argument (type));
idx = mono_type_get_generic_param_num (type);
if (mono_bitset_test_fast (used_args, idx))
return FALSE;
mono_bitset_set_fast (used_args, idx);
param_info = mono_generic_container_get_param_info (gc, idx);
if (!param_info->constraints)
return TRUE;
for (constraints = param_info->constraints; *constraints; ++constraints) {
MonoClass *ctr = *constraints;
MonoType *constraint_type = &ctr->byval_arg;
if (mono_type_is_generic_argument (constraint_type) && !recursive_mark_constraint_args (used_args, gc, constraint_type))
return FALSE;
}
return TRUE;
}
static gboolean
verify_generic_parameters (MonoClass *class)
{
int i;
MonoGenericContainer *gc = class->generic_container;
MonoBitSet *used_args = mono_bitset_new (gc->type_argc, 0);
for (i = 0; i < gc->type_argc; ++i) {
MonoGenericParamInfo *param_info = mono_generic_container_get_param_info (gc, i);
MonoClass **constraints;
if (!param_info->constraints)
continue;
mono_bitset_clear_all (used_args);
mono_bitset_set_fast (used_args, i);
for (constraints = param_info->constraints; *constraints; ++constraints) {
MonoClass *ctr = *constraints;
MonoType *constraint_type = &ctr->byval_arg;
if (!mono_type_is_valid_type_in_context (constraint_type, &gc->context))
goto fail;
if (mono_type_is_generic_argument (constraint_type) && !recursive_mark_constraint_args (used_args, gc, constraint_type))
goto fail;
}
}
mono_bitset_free (used_args);
return TRUE;
fail:
mono_bitset_free (used_args);
return FALSE;
}
/*
* Check if the class is verifiable.
*
* Right now there are no conditions that make a class a valid but not verifiable. Both overlapping reference
* field and invalid generic instantiation are fatal errors.
*
* This method must be safe to be called from mono_class_init and all code must be carefull about that.
*
*/
gboolean
mono_verifier_verify_class (MonoClass *class)
{
/*Neither <Module>, object or ifaces have parent.*/
if (!class->parent &&
class != mono_defaults.object_class &&
!MONO_CLASS_IS_INTERFACE (class) &&
(!class->image->dynamic && class->type_token != 0x2000001)) /*<Module> is the first type in the assembly*/
return FALSE;
if (class->parent && MONO_CLASS_IS_INTERFACE (class->parent))
return FALSE;
if (class->generic_container && (class->flags & TYPE_ATTRIBUTE_LAYOUT_MASK) == TYPE_ATTRIBUTE_EXPLICIT_LAYOUT)
return FALSE;
if (class->generic_container && !verify_generic_parameters (class))
return FALSE;
if (!verify_class_for_overlapping_reference_fields (class))
return FALSE;
if (class->generic_class && !mono_class_is_valid_generic_instantiation (NULL, class))
return FALSE;
if (class->generic_class == NULL && !verify_class_fields (class))
return FALSE;
if (class->valuetype && !verify_valuetype_layout (class))
return FALSE;
if (!verify_interfaces (class))
return FALSE;
return TRUE;
}
gboolean
mono_verifier_class_is_valid_generic_instantiation (MonoClass *class)
{
return mono_class_is_valid_generic_instantiation (NULL, class);
}
#else
gboolean
mono_verifier_verify_class (MonoClass *class)
{
/* The verifier was disabled at compile time */
return TRUE;
}
GSList*
mono_method_verify_with_current_settings (MonoMethod *method, gboolean skip_visibility)
{
/* The verifier was disabled at compile time */
return NULL;
}
gboolean
mono_verifier_is_class_full_trust (MonoClass *klass)
{
/* The verifier was disabled at compile time */
return TRUE;
}
gboolean
mono_verifier_is_method_full_trust (MonoMethod *method)
{
/* The verifier was disabled at compile time */
return TRUE;
}
gboolean
mono_verifier_is_enabled_for_image (MonoImage *image)
{
/* The verifier was disabled at compile time */
return FALSE;
}
gboolean
mono_verifier_is_enabled_for_class (MonoClass *klass)
{
/* The verifier was disabled at compile time */
return FALSE;
}
gboolean
mono_verifier_is_enabled_for_method (MonoMethod *method)
{
/* The verifier was disabled at compile time */
return FALSE;
}
GSList*
mono_method_verify (MonoMethod *method, int level)
{
/* The verifier was disabled at compile time */
return NULL;
}
void
mono_free_verify_list (GSList *list)
{
/* The verifier was disabled at compile time */
/* will always be null if verifier is disabled */
}
GSList*
mono_image_verify_tables (MonoImage *image, int level)
{
/* The verifier was disabled at compile time */
return NULL;
}
gboolean
mono_verifier_class_is_valid_generic_instantiation (MonoClass *class)
{
return TRUE;
}
#endif
| ./CrossVul/dataset_final_sorted/CWE-20/c/bad_4716_2 |
crossvul-cpp_data_good_2802_0 | /*
* nautilus-directory-async.c: Nautilus directory model state machine.
*
* Copyright (C) 1999, 2000, 2001 Eazel, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* Author: Darin Adler <darin@bentspoon.com>
*/
#include <config.h>
#include "nautilus-directory-notify.h"
#include "nautilus-directory-private.h"
#include "nautilus-file-attributes.h"
#include "nautilus-file-private.h"
#include "nautilus-file-utilities.h"
#include "nautilus-signaller.h"
#include "nautilus-global-preferences.h"
#include "nautilus-link.h"
#include "nautilus-profile.h"
#include "nautilus-metadata.h"
#include <eel/eel-glib-extensions.h>
#include <gtk/gtk.h>
#include <libxml/parser.h>
#include <stdio.h>
#include <stdlib.h>
/* turn this on to see messages about each load_directory call: */
#if 0
#define DEBUG_LOAD_DIRECTORY
#endif
/* turn this on to check if async. job calls are balanced */
#if 0
#define DEBUG_ASYNC_JOBS
#endif
/* turn this on to log things starting and stopping */
#if 0
#define DEBUG_START_STOP
#endif
#define DIRECTORY_LOAD_ITEMS_PER_CALLBACK 100
/* Keep async. jobs down to this number for all directories. */
#define MAX_ASYNC_JOBS 10
struct TopLeftTextReadState
{
NautilusDirectory *directory;
NautilusFile *file;
gboolean large;
GCancellable *cancellable;
};
struct LinkInfoReadState
{
NautilusDirectory *directory;
GCancellable *cancellable;
NautilusFile *file;
};
struct ThumbnailState
{
NautilusDirectory *directory;
GCancellable *cancellable;
NautilusFile *file;
gboolean trying_original;
gboolean tried_original;
};
struct MountState
{
NautilusDirectory *directory;
GCancellable *cancellable;
NautilusFile *file;
};
struct FilesystemInfoState
{
NautilusDirectory *directory;
GCancellable *cancellable;
NautilusFile *file;
};
struct DirectoryLoadState
{
NautilusDirectory *directory;
GCancellable *cancellable;
GFileEnumerator *enumerator;
GHashTable *load_mime_list_hash;
NautilusFile *load_directory_file;
int load_file_count;
};
struct MimeListState
{
NautilusDirectory *directory;
NautilusFile *mime_list_file;
GCancellable *cancellable;
GFileEnumerator *enumerator;
GHashTable *mime_list_hash;
};
struct GetInfoState
{
NautilusDirectory *directory;
GCancellable *cancellable;
};
struct NewFilesState
{
NautilusDirectory *directory;
GCancellable *cancellable;
int count;
};
struct DirectoryCountState
{
NautilusDirectory *directory;
NautilusFile *count_file;
GCancellable *cancellable;
GFileEnumerator *enumerator;
int file_count;
};
struct DeepCountState
{
NautilusDirectory *directory;
GCancellable *cancellable;
GFileEnumerator *enumerator;
GFile *deep_count_location;
GList *deep_count_subdirectories;
GArray *seen_deep_count_inodes;
char *fs_id;
};
typedef struct
{
NautilusFile *file; /* Which file, NULL means all. */
union
{
NautilusDirectoryCallback directory;
NautilusFileCallback file;
} callback;
gpointer callback_data;
Request request;
gboolean active; /* Set to FALSE when the callback is triggered and
* scheduled to be called at idle, its still kept
* in the list so we can kill it when the file
* goes away.
*/
} ReadyCallback;
typedef struct
{
NautilusFile *file; /* Which file, NULL means all. */
gboolean monitor_hidden_files; /* defines whether "all" includes hidden files */
gconstpointer client;
Request request;
} Monitor;
typedef struct
{
NautilusDirectory *directory;
NautilusInfoProvider *provider;
NautilusOperationHandle *handle;
NautilusOperationResult result;
} InfoProviderResponse;
typedef gboolean (*RequestCheck) (Request);
typedef gboolean (*FileCheck) (NautilusFile *);
/* Current number of async. jobs. */
static int async_job_count;
static GHashTable *waiting_directories;
#ifdef DEBUG_ASYNC_JOBS
static GHashTable *async_jobs;
#endif
/* Forward declarations for functions that need them. */
static void deep_count_load (DeepCountState *state,
GFile *location);
static gboolean request_is_satisfied (NautilusDirectory *directory,
NautilusFile *file,
Request request);
static void cancel_loading_attributes (NautilusDirectory *directory,
NautilusFileAttributes file_attributes);
static void add_all_files_to_work_queue (NautilusDirectory *directory);
static void link_info_done (NautilusDirectory *directory,
NautilusFile *file,
const char *uri,
const char *name,
GIcon *icon,
gboolean is_launcher,
gboolean is_foreign);
static void move_file_to_low_priority_queue (NautilusDirectory *directory,
NautilusFile *file);
static void move_file_to_extension_queue (NautilusDirectory *directory,
NautilusFile *file);
static void nautilus_directory_invalidate_file_attributes (NautilusDirectory *directory,
NautilusFileAttributes file_attributes);
/* Some helpers for case-insensitive strings.
* Move to nautilus-glib-extensions?
*/
static gboolean
istr_equal (gconstpointer v,
gconstpointer v2)
{
return g_ascii_strcasecmp (v, v2) == 0;
}
static guint
istr_hash (gconstpointer key)
{
const char *p;
guint h;
h = 0;
for (p = key; *p != '\0'; p++)
{
h = (h << 5) - h + g_ascii_tolower (*p);
}
return h;
}
static GHashTable *
istr_set_new (void)
{
return g_hash_table_new_full (istr_hash, istr_equal, g_free, NULL);
}
static void
istr_set_insert (GHashTable *table,
const char *istr)
{
char *key;
key = g_strdup (istr);
g_hash_table_replace (table, key, key);
}
static void
add_istr_to_list (gpointer key,
gpointer value,
gpointer callback_data)
{
GList **list;
list = callback_data;
*list = g_list_prepend (*list, g_strdup (key));
}
static GList *
istr_set_get_as_list (GHashTable *table)
{
GList *list;
list = NULL;
g_hash_table_foreach (table, add_istr_to_list, &list);
return list;
}
static void
istr_set_destroy (GHashTable *table)
{
g_hash_table_destroy (table);
}
static void
request_counter_add_request (RequestCounter counter,
Request request)
{
guint i;
for (i = 0; i < REQUEST_TYPE_LAST; i++)
{
if (REQUEST_WANTS_TYPE (request, i))
{
counter[i]++;
}
}
}
static void
request_counter_remove_request (RequestCounter counter,
Request request)
{
guint i;
for (i = 0; i < REQUEST_TYPE_LAST; i++)
{
if (REQUEST_WANTS_TYPE (request, i))
{
counter[i]--;
}
}
}
#if 0
static void
nautilus_directory_verify_request_counts (NautilusDirectory *directory)
{
GList *l;
RequestCounter counters;
int i;
gboolean fail;
fail = FALSE;
for (i = 0; i < REQUEST_TYPE_LAST; i++)
{
counters[i] = 0;
}
for (l = directory->details->monitor_list; l != NULL; l = l->next)
{
Monitor *monitor = l->data;
request_counter_add_request (counters, monitor->request);
}
for (i = 0; i < REQUEST_TYPE_LAST; i++)
{
if (counters[i] != directory->details->monitor_counters[i])
{
g_warning ("monitor counter for %i is wrong, expecting %d but found %d",
i, counters[i], directory->details->monitor_counters[i]);
fail = TRUE;
}
}
for (i = 0; i < REQUEST_TYPE_LAST; i++)
{
counters[i] = 0;
}
for (l = directory->details->call_when_ready_list; l != NULL; l = l->next)
{
ReadyCallback *callback = l->data;
request_counter_add_request (counters, callback->request);
}
for (i = 0; i < REQUEST_TYPE_LAST; i++)
{
if (counters[i] != directory->details->call_when_ready_counters[i])
{
g_warning ("call when ready counter for %i is wrong, expecting %d but found %d",
i, counters[i], directory->details->call_when_ready_counters[i]);
fail = TRUE;
}
}
g_assert (!fail);
}
#endif
/* Start a job. This is really just a way of limiting the number of
* async. requests that we issue at any given time. Without this, the
* number of requests is unbounded.
*/
static gboolean
async_job_start (NautilusDirectory *directory,
const char *job)
{
#ifdef DEBUG_ASYNC_JOBS
char *key;
#endif
#ifdef DEBUG_START_STOP
g_message ("starting %s in %p", job, directory->details->location);
#endif
g_assert (async_job_count >= 0);
g_assert (async_job_count <= MAX_ASYNC_JOBS);
if (async_job_count >= MAX_ASYNC_JOBS)
{
if (waiting_directories == NULL)
{
waiting_directories = g_hash_table_new (NULL, NULL);
}
g_hash_table_insert (waiting_directories,
directory,
directory);
return FALSE;
}
#ifdef DEBUG_ASYNC_JOBS
{
char *uri;
if (async_jobs == NULL)
{
async_jobs = g_hash_table_new (g_str_hash, g_str_equal);
}
uri = nautilus_directory_get_uri (directory);
key = g_strconcat (uri, ": ", job, NULL);
if (g_hash_table_lookup (async_jobs, key) != NULL)
{
g_warning ("same job twice: %s in %s",
job, uri);
}
g_free (uri);
g_hash_table_insert (async_jobs, key, directory);
}
#endif
async_job_count += 1;
return TRUE;
}
/* End a job. */
static void
async_job_end (NautilusDirectory *directory,
const char *job)
{
#ifdef DEBUG_ASYNC_JOBS
char *key;
gpointer table_key, value;
#endif
#ifdef DEBUG_START_STOP
g_message ("stopping %s in %p", job, directory->details->location);
#endif
g_assert (async_job_count > 0);
#ifdef DEBUG_ASYNC_JOBS
{
char *uri;
uri = nautilus_directory_get_uri (directory);
g_assert (async_jobs != NULL);
key = g_strconcat (uri, ": ", job, NULL);
if (!g_hash_table_lookup_extended (async_jobs, key, &table_key, &value))
{
g_warning ("ending job we didn't start: %s in %s",
job, uri);
}
else
{
g_hash_table_remove (async_jobs, key);
g_free (table_key);
}
g_free (uri);
g_free (key);
}
#endif
async_job_count -= 1;
}
/* Helper to get one value from a hash table. */
static void
get_one_value_callback (gpointer key,
gpointer value,
gpointer callback_data)
{
gpointer *returned_value;
returned_value = callback_data;
*returned_value = value;
}
/* return a single value from a hash table. */
static gpointer
get_one_value (GHashTable *table)
{
gpointer value;
value = NULL;
if (table != NULL)
{
g_hash_table_foreach (table, get_one_value_callback, &value);
}
return value;
}
/* Wake up directories that are "blocked" as long as there are job
* slots available.
*/
static void
async_job_wake_up (void)
{
static gboolean already_waking_up = FALSE;
gpointer value;
g_assert (async_job_count >= 0);
g_assert (async_job_count <= MAX_ASYNC_JOBS);
if (already_waking_up)
{
return;
}
already_waking_up = TRUE;
while (async_job_count < MAX_ASYNC_JOBS)
{
value = get_one_value (waiting_directories);
if (value == NULL)
{
break;
}
g_hash_table_remove (waiting_directories, value);
nautilus_directory_async_state_changed
(NAUTILUS_DIRECTORY (value));
}
already_waking_up = FALSE;
}
static void
directory_count_cancel (NautilusDirectory *directory)
{
if (directory->details->count_in_progress != NULL)
{
g_cancellable_cancel (directory->details->count_in_progress->cancellable);
directory->details->count_in_progress = NULL;
}
}
static void
deep_count_cancel (NautilusDirectory *directory)
{
if (directory->details->deep_count_in_progress != NULL)
{
g_assert (NAUTILUS_IS_FILE (directory->details->deep_count_file));
g_cancellable_cancel (directory->details->deep_count_in_progress->cancellable);
directory->details->deep_count_file->details->deep_counts_status = NAUTILUS_REQUEST_NOT_STARTED;
directory->details->deep_count_in_progress->directory = NULL;
directory->details->deep_count_in_progress = NULL;
directory->details->deep_count_file = NULL;
async_job_end (directory, "deep count");
}
}
static void
mime_list_cancel (NautilusDirectory *directory)
{
if (directory->details->mime_list_in_progress != NULL)
{
g_cancellable_cancel (directory->details->mime_list_in_progress->cancellable);
}
}
static void
link_info_cancel (NautilusDirectory *directory)
{
if (directory->details->link_info_read_state != NULL)
{
g_cancellable_cancel (directory->details->link_info_read_state->cancellable);
directory->details->link_info_read_state->directory = NULL;
directory->details->link_info_read_state = NULL;
async_job_end (directory, "link info");
}
}
static void
thumbnail_cancel (NautilusDirectory *directory)
{
if (directory->details->thumbnail_state != NULL)
{
g_cancellable_cancel (directory->details->thumbnail_state->cancellable);
directory->details->thumbnail_state->directory = NULL;
directory->details->thumbnail_state = NULL;
async_job_end (directory, "thumbnail");
}
}
static void
mount_cancel (NautilusDirectory *directory)
{
if (directory->details->mount_state != NULL)
{
g_cancellable_cancel (directory->details->mount_state->cancellable);
directory->details->mount_state->directory = NULL;
directory->details->mount_state = NULL;
async_job_end (directory, "mount");
}
}
static void
file_info_cancel (NautilusDirectory *directory)
{
if (directory->details->get_info_in_progress != NULL)
{
g_cancellable_cancel (directory->details->get_info_in_progress->cancellable);
directory->details->get_info_in_progress->directory = NULL;
directory->details->get_info_in_progress = NULL;
directory->details->get_info_file = NULL;
async_job_end (directory, "file info");
}
}
static void
new_files_cancel (NautilusDirectory *directory)
{
GList *l;
NewFilesState *state;
if (directory->details->new_files_in_progress != NULL)
{
for (l = directory->details->new_files_in_progress; l != NULL; l = l->next)
{
state = l->data;
g_cancellable_cancel (state->cancellable);
state->directory = NULL;
}
g_list_free (directory->details->new_files_in_progress);
directory->details->new_files_in_progress = NULL;
}
}
static int
monitor_key_compare (gconstpointer a,
gconstpointer data)
{
const Monitor *monitor;
const Monitor *compare_monitor;
monitor = a;
compare_monitor = data;
if (monitor->client < compare_monitor->client)
{
return -1;
}
if (monitor->client > compare_monitor->client)
{
return +1;
}
if (monitor->file < compare_monitor->file)
{
return -1;
}
if (monitor->file > compare_monitor->file)
{
return +1;
}
return 0;
}
static GList *
find_monitor (NautilusDirectory *directory,
NautilusFile *file,
gconstpointer client)
{
Monitor monitor;
monitor.client = client;
monitor.file = file;
return g_list_find_custom (directory->details->monitor_list,
&monitor,
monitor_key_compare);
}
static void
remove_monitor_link (NautilusDirectory *directory,
GList *link)
{
Monitor *monitor;
if (link != NULL)
{
monitor = link->data;
request_counter_remove_request (directory->details->monitor_counters,
monitor->request);
directory->details->monitor_list =
g_list_remove_link (directory->details->monitor_list, link);
g_free (monitor);
g_list_free_1 (link);
}
}
static void
remove_monitor (NautilusDirectory *directory,
NautilusFile *file,
gconstpointer client)
{
remove_monitor_link (directory, find_monitor (directory, file, client));
}
Request
nautilus_directory_set_up_request (NautilusFileAttributes file_attributes)
{
Request request;
request = 0;
if ((file_attributes & NAUTILUS_FILE_ATTRIBUTE_DIRECTORY_ITEM_COUNT) != 0)
{
REQUEST_SET_TYPE (request, REQUEST_DIRECTORY_COUNT);
}
if ((file_attributes & NAUTILUS_FILE_ATTRIBUTE_DEEP_COUNTS) != 0)
{
REQUEST_SET_TYPE (request, REQUEST_DEEP_COUNT);
}
if ((file_attributes & NAUTILUS_FILE_ATTRIBUTE_DIRECTORY_ITEM_MIME_TYPES) != 0)
{
REQUEST_SET_TYPE (request, REQUEST_MIME_LIST);
}
if ((file_attributes & NAUTILUS_FILE_ATTRIBUTE_INFO) != 0)
{
REQUEST_SET_TYPE (request, REQUEST_FILE_INFO);
}
if (file_attributes & NAUTILUS_FILE_ATTRIBUTE_LINK_INFO)
{
REQUEST_SET_TYPE (request, REQUEST_FILE_INFO);
REQUEST_SET_TYPE (request, REQUEST_LINK_INFO);
}
if ((file_attributes & NAUTILUS_FILE_ATTRIBUTE_EXTENSION_INFO) != 0)
{
REQUEST_SET_TYPE (request, REQUEST_EXTENSION_INFO);
}
if (file_attributes & NAUTILUS_FILE_ATTRIBUTE_THUMBNAIL)
{
REQUEST_SET_TYPE (request, REQUEST_THUMBNAIL);
REQUEST_SET_TYPE (request, REQUEST_FILE_INFO);
}
if (file_attributes & NAUTILUS_FILE_ATTRIBUTE_MOUNT)
{
REQUEST_SET_TYPE (request, REQUEST_MOUNT);
REQUEST_SET_TYPE (request, REQUEST_FILE_INFO);
}
if (file_attributes & NAUTILUS_FILE_ATTRIBUTE_FILESYSTEM_INFO)
{
REQUEST_SET_TYPE (request, REQUEST_FILESYSTEM_INFO);
}
return request;
}
static void
mime_db_changed_callback (GObject *ignore,
NautilusDirectory *dir)
{
NautilusFileAttributes attrs;
g_assert (dir != NULL);
g_assert (dir->details != NULL);
attrs = NAUTILUS_FILE_ATTRIBUTE_INFO |
NAUTILUS_FILE_ATTRIBUTE_LINK_INFO |
NAUTILUS_FILE_ATTRIBUTE_DIRECTORY_ITEM_MIME_TYPES;
nautilus_directory_force_reload_internal (dir, attrs);
}
void
nautilus_directory_monitor_add_internal (NautilusDirectory *directory,
NautilusFile *file,
gconstpointer client,
gboolean monitor_hidden_files,
NautilusFileAttributes file_attributes,
NautilusDirectoryCallback callback,
gpointer callback_data)
{
Monitor *monitor;
GList *file_list;
char *file_uri = NULL;
char *dir_uri = NULL;
g_assert (NAUTILUS_IS_DIRECTORY (directory));
if (file != NULL)
{
file_uri = nautilus_file_get_uri (file);
}
if (directory != NULL)
{
dir_uri = nautilus_directory_get_uri (directory);
}
nautilus_profile_start ("uri %s file-uri %s client %p", dir_uri, file_uri, client);
g_free (dir_uri);
g_free (file_uri);
/* Replace any current monitor for this client/file pair. */
remove_monitor (directory, file, client);
/* Add the new monitor. */
monitor = g_new (Monitor, 1);
monitor->file = file;
monitor->monitor_hidden_files = monitor_hidden_files;
monitor->client = client;
monitor->request = nautilus_directory_set_up_request (file_attributes);
if (file == NULL)
{
REQUEST_SET_TYPE (monitor->request, REQUEST_FILE_LIST);
}
directory->details->monitor_list =
g_list_prepend (directory->details->monitor_list, monitor);
request_counter_add_request (directory->details->monitor_counters,
monitor->request);
if (callback != NULL)
{
file_list = nautilus_directory_get_file_list (directory);
(*callback)(directory, file_list, callback_data);
nautilus_file_list_free (file_list);
}
/* Start the "real" monitoring (FAM or whatever). */
/* We always monitor the whole directory since in practice
* nautilus almost always shows the whole directory anyway, and
* it allows us to avoid one file monitor per file in a directory.
*/
if (directory->details->monitor == NULL)
{
directory->details->monitor = nautilus_monitor_directory (directory->details->location);
}
if (REQUEST_WANTS_TYPE (monitor->request, REQUEST_FILE_INFO) &&
directory->details->mime_db_monitor == 0)
{
directory->details->mime_db_monitor =
g_signal_connect_object (nautilus_signaller_get_current (),
"mime-data-changed",
G_CALLBACK (mime_db_changed_callback), directory, 0);
}
/* Put the monitor file or all the files on the work queue. */
if (file != NULL)
{
nautilus_directory_add_file_to_work_queue (directory, file);
}
else
{
add_all_files_to_work_queue (directory);
}
/* Kick off I/O. */
nautilus_directory_async_state_changed (directory);
nautilus_profile_end (NULL);
}
static void
set_file_unconfirmed (NautilusFile *file,
gboolean unconfirmed)
{
NautilusDirectory *directory;
g_assert (NAUTILUS_IS_FILE (file));
g_assert (unconfirmed == FALSE || unconfirmed == TRUE);
if (file->details->unconfirmed == unconfirmed)
{
return;
}
file->details->unconfirmed = unconfirmed;
directory = file->details->directory;
if (unconfirmed)
{
directory->details->confirmed_file_count--;
}
else
{
directory->details->confirmed_file_count++;
}
}
static gboolean show_hidden_files = TRUE;
static void
show_hidden_files_changed_callback (gpointer callback_data)
{
show_hidden_files = g_settings_get_boolean (gtk_filechooser_preferences, NAUTILUS_PREFERENCES_SHOW_HIDDEN_FILES);
}
static gboolean
should_skip_file (NautilusDirectory *directory,
GFileInfo *info)
{
static gboolean show_hidden_files_changed_callback_installed = FALSE;
/* Add the callback once for the life of our process */
if (!show_hidden_files_changed_callback_installed)
{
g_signal_connect_swapped (gtk_filechooser_preferences,
"changed::" NAUTILUS_PREFERENCES_SHOW_HIDDEN_FILES,
G_CALLBACK (show_hidden_files_changed_callback),
NULL);
show_hidden_files_changed_callback_installed = TRUE;
/* Peek for the first time */
show_hidden_files_changed_callback (NULL);
}
if (!show_hidden_files &&
(g_file_info_get_is_hidden (info) ||
g_file_info_get_is_backup (info)))
{
return TRUE;
}
return FALSE;
}
static gboolean
dequeue_pending_idle_callback (gpointer callback_data)
{
NautilusDirectory *directory;
GList *pending_file_info;
GList *node, *next;
NautilusFile *file;
GList *changed_files, *added_files;
GFileInfo *file_info;
const char *mimetype, *name;
DirectoryLoadState *dir_load_state;
directory = NAUTILUS_DIRECTORY (callback_data);
nautilus_directory_ref (directory);
nautilus_profile_start ("nitems %d", g_list_length (directory->details->pending_file_info));
directory->details->dequeue_pending_idle_id = 0;
/* Handle the files in the order we saw them. */
pending_file_info = g_list_reverse (directory->details->pending_file_info);
directory->details->pending_file_info = NULL;
/* If we are no longer monitoring, then throw away these. */
if (!nautilus_directory_is_file_list_monitored (directory))
{
nautilus_directory_async_state_changed (directory);
goto drain;
}
added_files = NULL;
changed_files = NULL;
dir_load_state = directory->details->directory_load_in_progress;
/* Build a list of NautilusFile objects. */
for (node = pending_file_info; node != NULL; node = node->next)
{
file_info = node->data;
name = g_file_info_get_name (file_info);
/* Update the file count. */
/* FIXME bugzilla.gnome.org 45063: This could count a
* file twice if we get it from both load_directory
* and from new_files_callback. Not too hard to fix by
* moving this into the actual callback instead of
* waiting for the idle function.
*/
if (dir_load_state &&
!should_skip_file (directory, file_info))
{
dir_load_state->load_file_count += 1;
/* Add the MIME type to the set. */
mimetype = g_file_info_get_content_type (file_info);
if (mimetype != NULL)
{
istr_set_insert (dir_load_state->load_mime_list_hash,
mimetype);
}
}
/* check if the file already exists */
file = nautilus_directory_find_file_by_name (directory, name);
if (file != NULL)
{
/* file already exists in dir, check if we still need to
* emit file_added or if it changed */
set_file_unconfirmed (file, FALSE);
if (!file->details->is_added)
{
/* We consider this newly added even if its in the list.
* This can happen if someone called nautilus_file_get_by_uri()
* on a file in the folder before the add signal was
* emitted */
nautilus_file_ref (file);
file->details->is_added = TRUE;
added_files = g_list_prepend (added_files, file);
}
else if (nautilus_file_update_info (file, file_info))
{
/* File changed, notify about the change. */
nautilus_file_ref (file);
changed_files = g_list_prepend (changed_files, file);
}
}
else
{
/* new file, create a nautilus file object and add it to the list */
file = nautilus_file_new_from_info (directory, file_info);
nautilus_directory_add_file (directory, file);
file->details->is_added = TRUE;
added_files = g_list_prepend (added_files, file);
}
}
/* If we are done loading, then we assume that any unconfirmed
* files are gone.
*/
if (directory->details->directory_loaded)
{
for (node = directory->details->file_list;
node != NULL; node = next)
{
file = NAUTILUS_FILE (node->data);
next = node->next;
if (file->details->unconfirmed)
{
nautilus_file_ref (file);
changed_files = g_list_prepend (changed_files, file);
nautilus_file_mark_gone (file);
}
}
}
/* Send the changed and added signals. */
nautilus_directory_emit_change_signals (directory, changed_files);
nautilus_file_list_free (changed_files);
nautilus_directory_emit_files_added (directory, added_files);
nautilus_file_list_free (added_files);
if (directory->details->directory_loaded &&
!directory->details->directory_loaded_sent_notification)
{
/* Send the done_loading signal. */
nautilus_directory_emit_done_loading (directory);
if (dir_load_state)
{
file = dir_load_state->load_directory_file;
file->details->directory_count = dir_load_state->load_file_count;
file->details->directory_count_is_up_to_date = TRUE;
file->details->got_directory_count = TRUE;
file->details->got_mime_list = TRUE;
file->details->mime_list_is_up_to_date = TRUE;
g_list_free_full (file->details->mime_list, g_free);
file->details->mime_list = istr_set_get_as_list
(dir_load_state->load_mime_list_hash);
nautilus_file_changed (file);
}
nautilus_directory_async_state_changed (directory);
directory->details->directory_loaded_sent_notification = TRUE;
}
drain:
g_list_free_full (pending_file_info, g_object_unref);
/* Get the state machine running again. */
nautilus_directory_async_state_changed (directory);
nautilus_profile_end (NULL);
nautilus_directory_unref (directory);
return FALSE;
}
void
nautilus_directory_schedule_dequeue_pending (NautilusDirectory *directory)
{
if (directory->details->dequeue_pending_idle_id == 0)
{
directory->details->dequeue_pending_idle_id
= g_idle_add (dequeue_pending_idle_callback, directory);
}
}
static void
directory_load_one (NautilusDirectory *directory,
GFileInfo *info)
{
if (info == NULL)
{
return;
}
if (g_file_info_get_name (info) == NULL)
{
char *uri;
uri = nautilus_directory_get_uri (directory);
g_warning ("Got GFileInfo with NULL name in %s, ignoring. This shouldn't happen unless the gvfs backend is broken.\n", uri);
g_free (uri);
return;
}
/* Arrange for the "loading" part of the work. */
g_object_ref (info);
directory->details->pending_file_info
= g_list_prepend (directory->details->pending_file_info, info);
nautilus_directory_schedule_dequeue_pending (directory);
}
static void
directory_load_cancel (NautilusDirectory *directory)
{
NautilusFile *file;
DirectoryLoadState *state;
state = directory->details->directory_load_in_progress;
if (state != NULL)
{
file = state->load_directory_file;
file->details->loading_directory = FALSE;
if (file->details->directory != directory)
{
nautilus_directory_async_state_changed (file->details->directory);
}
g_cancellable_cancel (state->cancellable);
state->directory = NULL;
directory->details->directory_load_in_progress = NULL;
async_job_end (directory, "file list");
}
}
static void
file_list_cancel (NautilusDirectory *directory)
{
directory_load_cancel (directory);
if (directory->details->dequeue_pending_idle_id != 0)
{
g_source_remove (directory->details->dequeue_pending_idle_id);
directory->details->dequeue_pending_idle_id = 0;
}
if (directory->details->pending_file_info != NULL)
{
g_list_free_full (directory->details->pending_file_info, g_object_unref);
directory->details->pending_file_info = NULL;
}
}
static void
directory_load_done (NautilusDirectory *directory,
GError *error)
{
GList *node;
nautilus_profile_start (NULL);
g_object_ref (directory);
directory->details->directory_loaded = TRUE;
directory->details->directory_loaded_sent_notification = FALSE;
if (error != NULL)
{
/* The load did not complete successfully. This means
* we don't know the status of the files in this directory.
* We clear the unconfirmed bit on each file here so that
* they won't be marked "gone" later -- we don't know enough
* about them to know whether they are really gone.
*/
for (node = directory->details->file_list;
node != NULL; node = node->next)
{
set_file_unconfirmed (NAUTILUS_FILE (node->data), FALSE);
}
nautilus_directory_emit_load_error (directory, error);
}
/* Call the idle function right away. */
if (directory->details->dequeue_pending_idle_id != 0)
{
g_source_remove (directory->details->dequeue_pending_idle_id);
}
dequeue_pending_idle_callback (directory);
directory_load_cancel (directory);
g_object_unref (directory);
nautilus_profile_end (NULL);
}
void
nautilus_directory_monitor_remove_internal (NautilusDirectory *directory,
NautilusFile *file,
gconstpointer client)
{
g_assert (NAUTILUS_IS_DIRECTORY (directory));
g_assert (file == NULL || NAUTILUS_IS_FILE (file));
g_assert (client != NULL);
remove_monitor (directory, file, client);
if (directory->details->monitor != NULL
&& directory->details->monitor_list == NULL)
{
nautilus_monitor_cancel (directory->details->monitor);
directory->details->monitor = NULL;
}
/* XXX - do we need to remove anything from the work queue? */
nautilus_directory_async_state_changed (directory);
}
FileMonitors *
nautilus_directory_remove_file_monitors (NautilusDirectory *directory,
NautilusFile *file)
{
GList *result, **list, *node, *next;
Monitor *monitor;
g_assert (NAUTILUS_IS_DIRECTORY (directory));
g_assert (NAUTILUS_IS_FILE (file));
g_assert (file->details->directory == directory);
result = NULL;
list = &directory->details->monitor_list;
for (node = directory->details->monitor_list; node != NULL; node = next)
{
next = node->next;
monitor = node->data;
if (monitor->file == file)
{
*list = g_list_remove_link (*list, node);
result = g_list_concat (node, result);
request_counter_remove_request (directory->details->monitor_counters,
monitor->request);
}
}
/* XXX - do we need to remove anything from the work queue? */
nautilus_directory_async_state_changed (directory);
return (FileMonitors *) result;
}
void
nautilus_directory_add_file_monitors (NautilusDirectory *directory,
NautilusFile *file,
FileMonitors *monitors)
{
GList **list;
GList *l;
Monitor *monitor;
g_assert (NAUTILUS_IS_DIRECTORY (directory));
g_assert (NAUTILUS_IS_FILE (file));
g_assert (file->details->directory == directory);
if (monitors == NULL)
{
return;
}
for (l = (GList *) monitors; l != NULL; l = l->next)
{
monitor = l->data;
request_counter_add_request (directory->details->monitor_counters,
monitor->request);
}
list = &directory->details->monitor_list;
*list = g_list_concat (*list, (GList *) monitors);
nautilus_directory_add_file_to_work_queue (directory, file);
nautilus_directory_async_state_changed (directory);
}
static int
ready_callback_key_compare (gconstpointer a,
gconstpointer b)
{
const ReadyCallback *callback_a, *callback_b;
callback_a = a;
callback_b = b;
if (callback_a->file < callback_b->file)
{
return -1;
}
if (callback_a->file > callback_b->file)
{
return 1;
}
if (callback_a->file == NULL)
{
/* ANSI C doesn't allow ordered compares of function pointers, so we cast them to
* normal pointers to make some overly pedantic compilers (*cough* HP-UX *cough*)
* compile this. Of course, on any compiler where ordered function pointers actually
* break this probably won't work, but at least it will compile on platforms where it
* works, but stupid compilers won't let you use it.
*/
if ((void *) callback_a->callback.directory < (void *) callback_b->callback.directory)
{
return -1;
}
if ((void *) callback_a->callback.directory > (void *) callback_b->callback.directory)
{
return 1;
}
}
else
{
if ((void *) callback_a->callback.file < (void *) callback_b->callback.file)
{
return -1;
}
if ((void *) callback_a->callback.file > (void *) callback_b->callback.file)
{
return 1;
}
}
if (callback_a->callback_data < callback_b->callback_data)
{
return -1;
}
if (callback_a->callback_data > callback_b->callback_data)
{
return 1;
}
return 0;
}
static int
ready_callback_key_compare_only_active (gconstpointer a,
gconstpointer b)
{
const ReadyCallback *callback_a;
callback_a = a;
/* Non active callbacks never match */
if (!callback_a->active)
{
return -1;
}
return ready_callback_key_compare (a, b);
}
static void
ready_callback_call (NautilusDirectory *directory,
const ReadyCallback *callback)
{
GList *file_list;
/* Call the callback. */
if (callback->file != NULL)
{
if (callback->callback.file)
{
(*callback->callback.file)(callback->file,
callback->callback_data);
}
}
else if (callback->callback.directory != NULL)
{
if (directory == NULL ||
!REQUEST_WANTS_TYPE (callback->request, REQUEST_FILE_LIST))
{
file_list = NULL;
}
else
{
file_list = nautilus_directory_get_file_list (directory);
}
/* Pass back the file list if the user was waiting for it. */
(*callback->callback.directory)(directory,
file_list,
callback->callback_data);
nautilus_file_list_free (file_list);
}
}
void
nautilus_directory_call_when_ready_internal (NautilusDirectory *directory,
NautilusFile *file,
NautilusFileAttributes file_attributes,
gboolean wait_for_file_list,
NautilusDirectoryCallback directory_callback,
NautilusFileCallback file_callback,
gpointer callback_data)
{
ReadyCallback callback;
g_assert (directory == NULL || NAUTILUS_IS_DIRECTORY (directory));
g_assert (file == NULL || NAUTILUS_IS_FILE (file));
g_assert (file != NULL || directory_callback != NULL);
/* Construct a callback object. */
callback.active = TRUE;
callback.file = file;
if (file == NULL)
{
callback.callback.directory = directory_callback;
}
else
{
callback.callback.file = file_callback;
}
callback.callback_data = callback_data;
callback.request = nautilus_directory_set_up_request (file_attributes);
if (wait_for_file_list)
{
REQUEST_SET_TYPE (callback.request, REQUEST_FILE_LIST);
}
/* Handle the NULL case. */
if (directory == NULL)
{
ready_callback_call (NULL, &callback);
return;
}
/* Check if the callback is already there. */
if (g_list_find_custom (directory->details->call_when_ready_list,
&callback,
ready_callback_key_compare_only_active) != NULL)
{
if (file_callback != NULL && directory_callback != NULL)
{
g_warning ("tried to add a new callback while an old one was pending");
}
/* NULL callback means, just read it. Conflicts are ok. */
return;
}
/* Add the new callback to the list. */
directory->details->call_when_ready_list = g_list_prepend
(directory->details->call_when_ready_list,
g_memdup (&callback, sizeof (callback)));
request_counter_add_request (directory->details->call_when_ready_counters,
callback.request);
/* Put the callback file or all the files on the work queue. */
if (file != NULL)
{
nautilus_directory_add_file_to_work_queue (directory, file);
}
else
{
add_all_files_to_work_queue (directory);
}
nautilus_directory_async_state_changed (directory);
}
gboolean
nautilus_directory_check_if_ready_internal (NautilusDirectory *directory,
NautilusFile *file,
NautilusFileAttributes file_attributes)
{
Request request;
g_assert (NAUTILUS_IS_DIRECTORY (directory));
request = nautilus_directory_set_up_request (file_attributes);
return request_is_satisfied (directory, file, request);
}
static void
remove_callback_link_keep_data (NautilusDirectory *directory,
GList *link)
{
ReadyCallback *callback;
callback = link->data;
directory->details->call_when_ready_list = g_list_remove_link
(directory->details->call_when_ready_list, link);
request_counter_remove_request (directory->details->call_when_ready_counters,
callback->request);
g_list_free_1 (link);
}
static void
remove_callback_link (NautilusDirectory *directory,
GList *link)
{
ReadyCallback *callback;
callback = link->data;
remove_callback_link_keep_data (directory, link);
g_free (callback);
}
void
nautilus_directory_cancel_callback_internal (NautilusDirectory *directory,
NautilusFile *file,
NautilusDirectoryCallback directory_callback,
NautilusFileCallback file_callback,
gpointer callback_data)
{
ReadyCallback callback;
GList *node;
if (directory == NULL)
{
return;
}
g_assert (NAUTILUS_IS_DIRECTORY (directory));
g_assert (file == NULL || NAUTILUS_IS_FILE (file));
g_assert (file != NULL || directory_callback != NULL);
g_assert (file == NULL || file_callback != NULL);
/* Construct a callback object. */
callback.file = file;
if (file == NULL)
{
callback.callback.directory = directory_callback;
}
else
{
callback.callback.file = file_callback;
}
callback.callback_data = callback_data;
/* Remove all queued callback from the list (including non-active). */
do
{
node = g_list_find_custom (directory->details->call_when_ready_list,
&callback,
ready_callback_key_compare);
if (node != NULL)
{
remove_callback_link (directory, node);
nautilus_directory_async_state_changed (directory);
}
}
while (node != NULL);
}
static void
new_files_state_unref (NewFilesState *state)
{
state->count--;
if (state->count == 0)
{
if (state->directory)
{
state->directory->details->new_files_in_progress =
g_list_remove (state->directory->details->new_files_in_progress,
state);
}
g_object_unref (state->cancellable);
g_free (state);
}
}
static void
new_files_callback (GObject *source_object,
GAsyncResult *res,
gpointer user_data)
{
NautilusDirectory *directory;
GFileInfo *info;
NewFilesState *state;
state = user_data;
if (state->directory == NULL)
{
/* Operation was cancelled. Bail out */
new_files_state_unref (state);
return;
}
directory = nautilus_directory_ref (state->directory);
/* Queue up the new file. */
info = g_file_query_info_finish (G_FILE (source_object), res, NULL);
if (info != NULL)
{
directory_load_one (directory, info);
g_object_unref (info);
}
new_files_state_unref (state);
nautilus_directory_unref (directory);
}
void
nautilus_directory_get_info_for_new_files (NautilusDirectory *directory,
GList *location_list)
{
NewFilesState *state;
GFile *location;
GList *l;
if (location_list == NULL)
{
return;
}
state = g_new (NewFilesState, 1);
state->directory = directory;
state->cancellable = g_cancellable_new ();
state->count = 0;
for (l = location_list; l != NULL; l = l->next)
{
location = l->data;
state->count++;
g_file_query_info_async (location,
NAUTILUS_FILE_DEFAULT_ATTRIBUTES,
0,
G_PRIORITY_DEFAULT,
state->cancellable,
new_files_callback, state);
}
directory->details->new_files_in_progress
= g_list_prepend (directory->details->new_files_in_progress,
state);
}
void
nautilus_async_destroying_file (NautilusFile *file)
{
NautilusDirectory *directory;
gboolean changed;
GList *node, *next;
ReadyCallback *callback;
Monitor *monitor;
directory = file->details->directory;
changed = FALSE;
/* Check for callbacks. */
for (node = directory->details->call_when_ready_list; node != NULL; node = next)
{
next = node->next;
callback = node->data;
if (callback->file == file)
{
/* Client should have cancelled callback. */
if (callback->active)
{
g_warning ("destroyed file has call_when_ready pending");
}
remove_callback_link (directory, node);
changed = TRUE;
}
}
/* Check for monitors. */
for (node = directory->details->monitor_list; node != NULL; node = next)
{
next = node->next;
monitor = node->data;
if (monitor->file == file)
{
/* Client should have removed monitor earlier. */
g_warning ("destroyed file still being monitored");
remove_monitor_link (directory, node);
changed = TRUE;
}
}
/* Check if it's a file that's currently being worked on.
* If so, make that NULL so it gets canceled right away.
*/
if (directory->details->count_in_progress != NULL &&
directory->details->count_in_progress->count_file == file)
{
directory->details->count_in_progress->count_file = NULL;
changed = TRUE;
}
if (directory->details->deep_count_file == file)
{
directory->details->deep_count_file = NULL;
changed = TRUE;
}
if (directory->details->mime_list_in_progress != NULL &&
directory->details->mime_list_in_progress->mime_list_file == file)
{
directory->details->mime_list_in_progress->mime_list_file = NULL;
changed = TRUE;
}
if (directory->details->get_info_file == file)
{
directory->details->get_info_file = NULL;
changed = TRUE;
}
if (directory->details->link_info_read_state != NULL &&
directory->details->link_info_read_state->file == file)
{
directory->details->link_info_read_state->file = NULL;
changed = TRUE;
}
if (directory->details->extension_info_file == file)
{
directory->details->extension_info_file = NULL;
changed = TRUE;
}
if (directory->details->thumbnail_state != NULL &&
directory->details->thumbnail_state->file == file)
{
directory->details->thumbnail_state->file = NULL;
changed = TRUE;
}
if (directory->details->mount_state != NULL &&
directory->details->mount_state->file == file)
{
directory->details->mount_state->file = NULL;
changed = TRUE;
}
if (directory->details->filesystem_info_state != NULL &&
directory->details->filesystem_info_state->file == file)
{
directory->details->filesystem_info_state->file = NULL;
changed = TRUE;
}
/* Let the directory take care of the rest. */
if (changed)
{
nautilus_directory_async_state_changed (directory);
}
}
static gboolean
lacks_directory_count (NautilusFile *file)
{
return !file->details->directory_count_is_up_to_date
&& nautilus_file_should_show_directory_item_count (file);
}
static gboolean
should_get_directory_count_now (NautilusFile *file)
{
return lacks_directory_count (file)
&& !file->details->loading_directory;
}
static gboolean
lacks_info (NautilusFile *file)
{
return !file->details->file_info_is_up_to_date
&& !file->details->is_gone;
}
static gboolean
lacks_filesystem_info (NautilusFile *file)
{
return !file->details->filesystem_info_is_up_to_date;
}
static gboolean
lacks_deep_count (NautilusFile *file)
{
return file->details->deep_counts_status != NAUTILUS_REQUEST_DONE;
}
static gboolean
lacks_mime_list (NautilusFile *file)
{
return !file->details->mime_list_is_up_to_date;
}
static gboolean
should_get_mime_list (NautilusFile *file)
{
return lacks_mime_list (file)
&& !file->details->loading_directory;
}
static gboolean
lacks_link_info (NautilusFile *file)
{
if (file->details->file_info_is_up_to_date &&
!file->details->link_info_is_up_to_date)
{
if (nautilus_file_is_nautilus_link (file))
{
return TRUE;
}
else
{
link_info_done (file->details->directory, file, NULL, NULL, NULL, FALSE, FALSE);
return FALSE;
}
}
else
{
return FALSE;
}
}
static gboolean
lacks_extension_info (NautilusFile *file)
{
return file->details->pending_info_providers != NULL;
}
static gboolean
lacks_thumbnail (NautilusFile *file)
{
return nautilus_file_should_show_thumbnail (file) &&
file->details->thumbnail_path != NULL &&
!file->details->thumbnail_is_up_to_date;
}
static gboolean
lacks_mount (NautilusFile *file)
{
return (!file->details->mount_is_up_to_date &&
(
/* Unix mountpoint, could be a GMount */
file->details->is_mountpoint ||
/* The toplevel directory of something */
(file->details->type == G_FILE_TYPE_DIRECTORY &&
nautilus_file_is_self_owned (file)) ||
/* Mountable, could be a mountpoint */
(file->details->type == G_FILE_TYPE_MOUNTABLE)
)
);
}
static gboolean
has_problem (NautilusDirectory *directory,
NautilusFile *file,
FileCheck problem)
{
GList *node;
if (file != NULL)
{
return (*problem)(file);
}
for (node = directory->details->file_list; node != NULL; node = node->next)
{
if ((*problem)(node->data))
{
return TRUE;
}
}
return FALSE;
}
static gboolean
request_is_satisfied (NautilusDirectory *directory,
NautilusFile *file,
Request request)
{
if (REQUEST_WANTS_TYPE (request, REQUEST_FILE_LIST) &&
!(directory->details->directory_loaded &&
directory->details->directory_loaded_sent_notification))
{
return FALSE;
}
if (REQUEST_WANTS_TYPE (request, REQUEST_DIRECTORY_COUNT))
{
if (has_problem (directory, file, lacks_directory_count))
{
return FALSE;
}
}
if (REQUEST_WANTS_TYPE (request, REQUEST_FILE_INFO))
{
if (has_problem (directory, file, lacks_info))
{
return FALSE;
}
}
if (REQUEST_WANTS_TYPE (request, REQUEST_FILESYSTEM_INFO))
{
if (has_problem (directory, file, lacks_filesystem_info))
{
return FALSE;
}
}
if (REQUEST_WANTS_TYPE (request, REQUEST_DEEP_COUNT))
{
if (has_problem (directory, file, lacks_deep_count))
{
return FALSE;
}
}
if (REQUEST_WANTS_TYPE (request, REQUEST_THUMBNAIL))
{
if (has_problem (directory, file, lacks_thumbnail))
{
return FALSE;
}
}
if (REQUEST_WANTS_TYPE (request, REQUEST_MOUNT))
{
if (has_problem (directory, file, lacks_mount))
{
return FALSE;
}
}
if (REQUEST_WANTS_TYPE (request, REQUEST_MIME_LIST))
{
if (has_problem (directory, file, lacks_mime_list))
{
return FALSE;
}
}
if (REQUEST_WANTS_TYPE (request, REQUEST_LINK_INFO))
{
if (has_problem (directory, file, lacks_link_info))
{
return FALSE;
}
}
return TRUE;
}
static gboolean
call_ready_callbacks_at_idle (gpointer callback_data)
{
NautilusDirectory *directory;
GList *node, *next;
ReadyCallback *callback;
directory = NAUTILUS_DIRECTORY (callback_data);
directory->details->call_ready_idle_id = 0;
nautilus_directory_ref (directory);
callback = NULL;
while (1)
{
/* Check if any callbacks are non-active and call them if they are. */
for (node = directory->details->call_when_ready_list;
node != NULL; node = next)
{
next = node->next;
callback = node->data;
if (!callback->active)
{
/* Non-active, remove and call */
break;
}
}
if (node == NULL)
{
break;
}
/* Callbacks are one-shots, so remove it now. */
remove_callback_link_keep_data (directory, node);
/* Call the callback. */
ready_callback_call (directory, callback);
g_free (callback);
}
nautilus_directory_async_state_changed (directory);
nautilus_directory_unref (directory);
return FALSE;
}
static void
schedule_call_ready_callbacks (NautilusDirectory *directory)
{
if (directory->details->call_ready_idle_id == 0)
{
directory->details->call_ready_idle_id
= g_idle_add (call_ready_callbacks_at_idle, directory);
}
}
/* Marks all callbacks that are ready as non-active and
* calls them at idle time, unless they are removed
* before then */
static gboolean
call_ready_callbacks (NautilusDirectory *directory)
{
gboolean found_any;
GList *node, *next;
ReadyCallback *callback;
found_any = FALSE;
/* Check if any callbacks are satisifed and mark them for call them if they are. */
for (node = directory->details->call_when_ready_list;
node != NULL; node = next)
{
next = node->next;
callback = node->data;
if (callback->active &&
request_is_satisfied (directory, callback->file, callback->request))
{
callback->active = FALSE;
found_any = TRUE;
}
}
if (found_any)
{
schedule_call_ready_callbacks (directory);
}
return found_any;
}
gboolean
nautilus_directory_has_active_request_for_file (NautilusDirectory *directory,
NautilusFile *file)
{
GList *node;
ReadyCallback *callback;
Monitor *monitor;
for (node = directory->details->call_when_ready_list;
node != NULL; node = node->next)
{
callback = node->data;
if (callback->file == file ||
callback->file == NULL)
{
return TRUE;
}
}
for (node = directory->details->monitor_list;
node != NULL; node = node->next)
{
monitor = node->data;
if (monitor->file == file ||
monitor->file == NULL)
{
return TRUE;
}
}
return FALSE;
}
/* This checks if there's a request for monitoring the file list. */
gboolean
nautilus_directory_is_anyone_monitoring_file_list (NautilusDirectory *directory)
{
if (directory->details->call_when_ready_counters[REQUEST_FILE_LIST] > 0)
{
return TRUE;
}
if (directory->details->monitor_counters[REQUEST_FILE_LIST] > 0)
{
return TRUE;
}
return FALSE;
}
/* This checks if the file list being monitored. */
gboolean
nautilus_directory_is_file_list_monitored (NautilusDirectory *directory)
{
return directory->details->file_list_monitored;
}
static void
mark_all_files_unconfirmed (NautilusDirectory *directory)
{
GList *node;
NautilusFile *file;
for (node = directory->details->file_list; node != NULL; node = node->next)
{
file = node->data;
set_file_unconfirmed (file, TRUE);
}
}
static void
directory_load_state_free (DirectoryLoadState *state)
{
if (state->enumerator)
{
if (!g_file_enumerator_is_closed (state->enumerator))
{
g_file_enumerator_close_async (state->enumerator,
0, NULL, NULL, NULL);
}
g_object_unref (state->enumerator);
}
if (state->load_mime_list_hash != NULL)
{
istr_set_destroy (state->load_mime_list_hash);
}
nautilus_file_unref (state->load_directory_file);
g_object_unref (state->cancellable);
g_free (state);
}
static void
more_files_callback (GObject *source_object,
GAsyncResult *res,
gpointer user_data)
{
DirectoryLoadState *state;
NautilusDirectory *directory;
GError *error;
GList *files, *l;
GFileInfo *info;
state = user_data;
if (state->directory == NULL)
{
/* Operation was cancelled. Bail out */
directory_load_state_free (state);
return;
}
directory = nautilus_directory_ref (state->directory);
g_assert (directory->details->directory_load_in_progress != NULL);
g_assert (directory->details->directory_load_in_progress == state);
error = NULL;
files = g_file_enumerator_next_files_finish (state->enumerator,
res, &error);
for (l = files; l != NULL; l = l->next)
{
info = l->data;
directory_load_one (directory, info);
g_object_unref (info);
}
if (files == NULL)
{
directory_load_done (directory, error);
directory_load_state_free (state);
}
else
{
g_file_enumerator_next_files_async (state->enumerator,
DIRECTORY_LOAD_ITEMS_PER_CALLBACK,
G_PRIORITY_DEFAULT,
state->cancellable,
more_files_callback,
state);
}
nautilus_directory_unref (directory);
if (error)
{
g_error_free (error);
}
g_list_free (files);
}
static void
enumerate_children_callback (GObject *source_object,
GAsyncResult *res,
gpointer user_data)
{
DirectoryLoadState *state;
GFileEnumerator *enumerator;
GError *error;
state = user_data;
if (state->directory == NULL)
{
/* Operation was cancelled. Bail out */
directory_load_state_free (state);
return;
}
error = NULL;
enumerator = g_file_enumerate_children_finish (G_FILE (source_object),
res, &error);
if (enumerator == NULL)
{
directory_load_done (state->directory, error);
g_error_free (error);
directory_load_state_free (state);
return;
}
else
{
state->enumerator = enumerator;
g_file_enumerator_next_files_async (state->enumerator,
DIRECTORY_LOAD_ITEMS_PER_CALLBACK,
G_PRIORITY_DEFAULT,
state->cancellable,
more_files_callback,
state);
}
}
/* Start monitoring the file list if it isn't already. */
static void
start_monitoring_file_list (NautilusDirectory *directory)
{
DirectoryLoadState *state;
if (!directory->details->file_list_monitored)
{
g_assert (!directory->details->directory_load_in_progress);
directory->details->file_list_monitored = TRUE;
nautilus_file_list_ref (directory->details->file_list);
}
if (directory->details->directory_loaded ||
directory->details->directory_load_in_progress != NULL)
{
return;
}
if (!async_job_start (directory, "file list"))
{
return;
}
mark_all_files_unconfirmed (directory);
state = g_new0 (DirectoryLoadState, 1);
state->directory = directory;
state->cancellable = g_cancellable_new ();
state->load_mime_list_hash = istr_set_new ();
state->load_file_count = 0;
g_assert (directory->details->location != NULL);
state->load_directory_file =
nautilus_directory_get_corresponding_file (directory);
state->load_directory_file->details->loading_directory = TRUE;
#ifdef DEBUG_LOAD_DIRECTORY
g_message ("load_directory called to monitor file list of %p", directory->details->location);
#endif
directory->details->directory_load_in_progress = state;
g_file_enumerate_children_async (directory->details->location,
NAUTILUS_FILE_DEFAULT_ATTRIBUTES,
0, /* flags */
G_PRIORITY_DEFAULT, /* prio */
state->cancellable,
enumerate_children_callback,
state);
}
/* Stop monitoring the file list if it is being monitored. */
void
nautilus_directory_stop_monitoring_file_list (NautilusDirectory *directory)
{
if (!directory->details->file_list_monitored)
{
g_assert (directory->details->directory_load_in_progress == NULL);
return;
}
directory->details->file_list_monitored = FALSE;
file_list_cancel (directory);
nautilus_file_list_unref (directory->details->file_list);
directory->details->directory_loaded = FALSE;
}
static void
file_list_start_or_stop (NautilusDirectory *directory)
{
if (nautilus_directory_is_anyone_monitoring_file_list (directory))
{
start_monitoring_file_list (directory);
}
else
{
nautilus_directory_stop_monitoring_file_list (directory);
}
}
void
nautilus_file_invalidate_count_and_mime_list (NautilusFile *file)
{
NautilusFileAttributes attributes;
attributes = NAUTILUS_FILE_ATTRIBUTE_DIRECTORY_ITEM_COUNT |
NAUTILUS_FILE_ATTRIBUTE_DIRECTORY_ITEM_MIME_TYPES;
nautilus_file_invalidate_attributes (file, attributes);
}
/* Reset count and mime list. Invalidating deep counts is handled by
* itself elsewhere because it's a relatively heavyweight and
* special-purpose operation (see bug 5863). Also, the shallow count
* needs to be refreshed when filtering changes, but the deep count
* deliberately does not take filtering into account.
*/
void
nautilus_directory_invalidate_count_and_mime_list (NautilusDirectory *directory)
{
NautilusFile *file;
file = nautilus_directory_get_existing_corresponding_file (directory);
if (file != NULL)
{
nautilus_file_invalidate_count_and_mime_list (file);
}
nautilus_file_unref (file);
}
static void
nautilus_directory_invalidate_file_attributes (NautilusDirectory *directory,
NautilusFileAttributes file_attributes)
{
GList *node;
cancel_loading_attributes (directory, file_attributes);
for (node = directory->details->file_list; node != NULL; node = node->next)
{
nautilus_file_invalidate_attributes_internal (NAUTILUS_FILE (node->data),
file_attributes);
}
if (directory->details->as_file != NULL)
{
nautilus_file_invalidate_attributes_internal (directory->details->as_file,
file_attributes);
}
}
void
nautilus_directory_force_reload_internal (NautilusDirectory *directory,
NautilusFileAttributes file_attributes)
{
nautilus_profile_start (NULL);
/* invalidate attributes that are getting reloaded for all files */
nautilus_directory_invalidate_file_attributes (directory, file_attributes);
/* Start a new directory load. */
file_list_cancel (directory);
directory->details->directory_loaded = FALSE;
/* Start a new directory count. */
nautilus_directory_invalidate_count_and_mime_list (directory);
add_all_files_to_work_queue (directory);
nautilus_directory_async_state_changed (directory);
nautilus_profile_end (NULL);
}
static gboolean
monitor_includes_file (const Monitor *monitor,
NautilusFile *file)
{
if (monitor->file == file)
{
return TRUE;
}
if (monitor->file != NULL)
{
return FALSE;
}
if (file == file->details->directory->details->as_file)
{
return FALSE;
}
return nautilus_file_should_show (file,
monitor->monitor_hidden_files,
TRUE);
}
static gboolean
is_needy (NautilusFile *file,
FileCheck check_missing,
RequestType request_type_wanted)
{
NautilusDirectory *directory;
GList *node;
ReadyCallback *callback;
Monitor *monitor;
if (!(*check_missing)(file))
{
return FALSE;
}
directory = file->details->directory;
if (directory->details->call_when_ready_counters[request_type_wanted] > 0)
{
for (node = directory->details->call_when_ready_list;
node != NULL; node = node->next)
{
callback = node->data;
if (callback->active &&
REQUEST_WANTS_TYPE (callback->request, request_type_wanted))
{
if (callback->file == file)
{
return TRUE;
}
if (callback->file == NULL
&& file != directory->details->as_file)
{
return TRUE;
}
}
}
}
if (directory->details->monitor_counters[request_type_wanted] > 0)
{
for (node = directory->details->monitor_list;
node != NULL; node = node->next)
{
monitor = node->data;
if (REQUEST_WANTS_TYPE (monitor->request, request_type_wanted))
{
if (monitor_includes_file (monitor, file))
{
return TRUE;
}
}
}
}
return FALSE;
}
static void
directory_count_stop (NautilusDirectory *directory)
{
NautilusFile *file;
if (directory->details->count_in_progress != NULL)
{
file = directory->details->count_in_progress->count_file;
if (file != NULL)
{
g_assert (NAUTILUS_IS_FILE (file));
g_assert (file->details->directory == directory);
if (is_needy (file,
should_get_directory_count_now,
REQUEST_DIRECTORY_COUNT))
{
return;
}
}
/* The count is not wanted, so stop it. */
directory_count_cancel (directory);
}
}
static guint
count_non_skipped_files (GList *list)
{
guint count;
GList *node;
GFileInfo *info;
count = 0;
for (node = list; node != NULL; node = node->next)
{
info = node->data;
if (!should_skip_file (NULL, info))
{
count += 1;
}
}
return count;
}
static void
count_children_done (NautilusDirectory *directory,
NautilusFile *count_file,
gboolean succeeded,
int count)
{
g_assert (NAUTILUS_IS_FILE (count_file));
count_file->details->directory_count_is_up_to_date = TRUE;
/* Record either a failure or success. */
if (!succeeded)
{
count_file->details->directory_count_failed = TRUE;
count_file->details->got_directory_count = FALSE;
count_file->details->directory_count = 0;
}
else
{
count_file->details->directory_count_failed = FALSE;
count_file->details->got_directory_count = TRUE;
count_file->details->directory_count = count;
}
directory->details->count_in_progress = NULL;
/* Send file-changed even if count failed, so interested parties can
* distinguish between unknowable and not-yet-known cases.
*/
nautilus_file_changed (count_file);
/* Start up the next one. */
async_job_end (directory, "directory count");
nautilus_directory_async_state_changed (directory);
}
static void
directory_count_state_free (DirectoryCountState *state)
{
if (state->enumerator)
{
if (!g_file_enumerator_is_closed (state->enumerator))
{
g_file_enumerator_close_async (state->enumerator,
0, NULL, NULL, NULL);
}
g_object_unref (state->enumerator);
}
g_object_unref (state->cancellable);
nautilus_directory_unref (state->directory);
g_free (state);
}
static void
count_more_files_callback (GObject *source_object,
GAsyncResult *res,
gpointer user_data)
{
DirectoryCountState *state;
NautilusDirectory *directory;
GError *error;
GList *files;
state = user_data;
directory = state->directory;
if (g_cancellable_is_cancelled (state->cancellable))
{
/* Operation was cancelled. Bail out */
async_job_end (directory, "directory count");
nautilus_directory_async_state_changed (directory);
directory_count_state_free (state);
return;
}
g_assert (directory->details->count_in_progress != NULL);
g_assert (directory->details->count_in_progress == state);
error = NULL;
files = g_file_enumerator_next_files_finish (state->enumerator,
res, &error);
state->file_count += count_non_skipped_files (files);
if (files == NULL)
{
count_children_done (directory, state->count_file,
TRUE, state->file_count);
directory_count_state_free (state);
}
else
{
g_file_enumerator_next_files_async (state->enumerator,
DIRECTORY_LOAD_ITEMS_PER_CALLBACK,
G_PRIORITY_DEFAULT,
state->cancellable,
count_more_files_callback,
state);
}
g_list_free_full (files, g_object_unref);
if (error)
{
g_error_free (error);
}
}
static void
count_children_callback (GObject *source_object,
GAsyncResult *res,
gpointer user_data)
{
DirectoryCountState *state;
GFileEnumerator *enumerator;
NautilusDirectory *directory;
GError *error;
state = user_data;
if (g_cancellable_is_cancelled (state->cancellable))
{
/* Operation was cancelled. Bail out */
directory = state->directory;
async_job_end (directory, "directory count");
nautilus_directory_async_state_changed (directory);
directory_count_state_free (state);
return;
}
error = NULL;
enumerator = g_file_enumerate_children_finish (G_FILE (source_object),
res, &error);
if (enumerator == NULL)
{
count_children_done (state->directory,
state->count_file,
FALSE, 0);
g_error_free (error);
directory_count_state_free (state);
return;
}
else
{
state->enumerator = enumerator;
g_file_enumerator_next_files_async (state->enumerator,
DIRECTORY_LOAD_ITEMS_PER_CALLBACK,
G_PRIORITY_DEFAULT,
state->cancellable,
count_more_files_callback,
state);
}
}
static void
directory_count_start (NautilusDirectory *directory,
NautilusFile *file,
gboolean *doing_io)
{
DirectoryCountState *state;
GFile *location;
if (directory->details->count_in_progress != NULL)
{
*doing_io = TRUE;
return;
}
if (!is_needy (file,
should_get_directory_count_now,
REQUEST_DIRECTORY_COUNT))
{
return;
}
*doing_io = TRUE;
if (!nautilus_file_is_directory (file))
{
file->details->directory_count_is_up_to_date = TRUE;
file->details->directory_count_failed = FALSE;
file->details->got_directory_count = FALSE;
nautilus_directory_async_state_changed (directory);
return;
}
if (!async_job_start (directory, "directory count"))
{
return;
}
/* Start counting. */
state = g_new0 (DirectoryCountState, 1);
state->count_file = file;
state->directory = nautilus_directory_ref (directory);
state->cancellable = g_cancellable_new ();
directory->details->count_in_progress = state;
location = nautilus_file_get_location (file);
#ifdef DEBUG_LOAD_DIRECTORY
{
char *uri;
uri = g_file_get_uri (location);
g_message ("load_directory called to get shallow file count for %s", uri);
g_free (uri);
}
#endif
g_file_enumerate_children_async (location,
G_FILE_ATTRIBUTE_STANDARD_NAME ","
G_FILE_ATTRIBUTE_STANDARD_IS_HIDDEN ","
G_FILE_ATTRIBUTE_STANDARD_IS_BACKUP,
G_FILE_QUERY_INFO_NOFOLLOW_SYMLINKS, /* flags */
G_PRIORITY_DEFAULT, /* prio */
state->cancellable,
count_children_callback,
state);
g_object_unref (location);
}
static inline gboolean
seen_inode (DeepCountState *state,
GFileInfo *info)
{
guint64 inode, inode2;
guint i;
inode = g_file_info_get_attribute_uint64 (info, G_FILE_ATTRIBUTE_UNIX_INODE);
if (inode != 0)
{
for (i = 0; i < state->seen_deep_count_inodes->len; i++)
{
inode2 = g_array_index (state->seen_deep_count_inodes, guint64, i);
if (inode == inode2)
{
return TRUE;
}
}
}
return FALSE;
}
static inline void
mark_inode_as_seen (DeepCountState *state,
GFileInfo *info)
{
guint64 inode;
inode = g_file_info_get_attribute_uint64 (info, G_FILE_ATTRIBUTE_UNIX_INODE);
if (inode != 0)
{
g_array_append_val (state->seen_deep_count_inodes, inode);
}
}
static void
deep_count_one (DeepCountState *state,
GFileInfo *info)
{
NautilusFile *file;
GFile *subdir;
gboolean is_seen_inode;
const char *fs_id;
if (should_skip_file (NULL, info))
{
return;
}
is_seen_inode = seen_inode (state, info);
if (!is_seen_inode)
{
mark_inode_as_seen (state, info);
}
file = state->directory->details->deep_count_file;
if (g_file_info_get_file_type (info) == G_FILE_TYPE_DIRECTORY)
{
/* Count the directory. */
file->details->deep_directory_count += 1;
/* Record the fact that we have to descend into this directory. */
fs_id = g_file_info_get_attribute_string (info, G_FILE_ATTRIBUTE_ID_FILESYSTEM);
if (g_strcmp0 (fs_id, state->fs_id) == 0)
{
/* only if it is on the same filesystem */
subdir = g_file_get_child (state->deep_count_location, g_file_info_get_name (info));
state->deep_count_subdirectories = g_list_prepend
(state->deep_count_subdirectories, subdir);
}
}
else
{
/* Even non-regular files count as files. */
file->details->deep_file_count += 1;
}
/* Count the size. */
if (!is_seen_inode && g_file_info_has_attribute (info, G_FILE_ATTRIBUTE_STANDARD_SIZE))
{
file->details->deep_size += g_file_info_get_size (info);
}
}
static void
deep_count_state_free (DeepCountState *state)
{
if (state->enumerator)
{
if (!g_file_enumerator_is_closed (state->enumerator))
{
g_file_enumerator_close_async (state->enumerator,
0, NULL, NULL, NULL);
}
g_object_unref (state->enumerator);
}
g_object_unref (state->cancellable);
if (state->deep_count_location)
{
g_object_unref (state->deep_count_location);
}
g_list_free_full (state->deep_count_subdirectories, g_object_unref);
g_array_free (state->seen_deep_count_inodes, TRUE);
g_free (state->fs_id);
g_free (state);
}
static void
deep_count_next_dir (DeepCountState *state)
{
GFile *location;
NautilusFile *file;
NautilusDirectory *directory;
gboolean done;
directory = state->directory;
g_object_unref (state->deep_count_location);
state->deep_count_location = NULL;
done = FALSE;
file = directory->details->deep_count_file;
if (state->deep_count_subdirectories != NULL)
{
/* Work on a new directory. */
location = state->deep_count_subdirectories->data;
state->deep_count_subdirectories = g_list_remove
(state->deep_count_subdirectories, location);
deep_count_load (state, location);
g_object_unref (location);
}
else
{
file->details->deep_counts_status = NAUTILUS_REQUEST_DONE;
directory->details->deep_count_file = NULL;
directory->details->deep_count_in_progress = NULL;
deep_count_state_free (state);
done = TRUE;
}
nautilus_file_updated_deep_count_in_progress (file);
if (done)
{
nautilus_file_changed (file);
async_job_end (directory, "deep count");
nautilus_directory_async_state_changed (directory);
}
}
static void
deep_count_more_files_callback (GObject *source_object,
GAsyncResult *res,
gpointer user_data)
{
DeepCountState *state;
NautilusDirectory *directory;
GList *files, *l;
GFileInfo *info;
state = user_data;
if (state->directory == NULL)
{
/* Operation was cancelled. Bail out */
deep_count_state_free (state);
return;
}
directory = nautilus_directory_ref (state->directory);
g_assert (directory->details->deep_count_in_progress != NULL);
g_assert (directory->details->deep_count_in_progress == state);
files = g_file_enumerator_next_files_finish (state->enumerator,
res, NULL);
for (l = files; l != NULL; l = l->next)
{
info = l->data;
deep_count_one (state, info);
g_object_unref (info);
}
if (files == NULL)
{
g_file_enumerator_close_async (state->enumerator, 0, NULL, NULL, NULL);
g_object_unref (state->enumerator);
state->enumerator = NULL;
deep_count_next_dir (state);
}
else
{
g_file_enumerator_next_files_async (state->enumerator,
DIRECTORY_LOAD_ITEMS_PER_CALLBACK,
G_PRIORITY_LOW,
state->cancellable,
deep_count_more_files_callback,
state);
}
g_list_free (files);
nautilus_directory_unref (directory);
}
static void
deep_count_callback (GObject *source_object,
GAsyncResult *res,
gpointer user_data)
{
DeepCountState *state;
GFileEnumerator *enumerator;
NautilusFile *file;
state = user_data;
if (state->directory == NULL)
{
/* Operation was cancelled. Bail out */
deep_count_state_free (state);
return;
}
file = state->directory->details->deep_count_file;
enumerator = g_file_enumerate_children_finish (G_FILE (source_object), res, NULL);
if (enumerator == NULL)
{
file->details->deep_unreadable_count += 1;
deep_count_next_dir (state);
}
else
{
state->enumerator = enumerator;
g_file_enumerator_next_files_async (state->enumerator,
DIRECTORY_LOAD_ITEMS_PER_CALLBACK,
G_PRIORITY_LOW,
state->cancellable,
deep_count_more_files_callback,
state);
}
}
static void
deep_count_load (DeepCountState *state,
GFile *location)
{
state->deep_count_location = g_object_ref (location);
#ifdef DEBUG_LOAD_DIRECTORY
g_message ("load_directory called to get deep file count for %p", location);
#endif
g_file_enumerate_children_async (state->deep_count_location,
G_FILE_ATTRIBUTE_STANDARD_NAME ","
G_FILE_ATTRIBUTE_STANDARD_TYPE ","
G_FILE_ATTRIBUTE_STANDARD_SIZE ","
G_FILE_ATTRIBUTE_STANDARD_IS_HIDDEN ","
G_FILE_ATTRIBUTE_STANDARD_IS_BACKUP ","
G_FILE_ATTRIBUTE_ID_FILESYSTEM ","
G_FILE_ATTRIBUTE_UNIX_INODE,
G_FILE_QUERY_INFO_NOFOLLOW_SYMLINKS, /* flags */
G_PRIORITY_LOW, /* prio */
state->cancellable,
deep_count_callback,
state);
}
static void
deep_count_stop (NautilusDirectory *directory)
{
NautilusFile *file;
if (directory->details->deep_count_in_progress != NULL)
{
file = directory->details->deep_count_file;
if (file != NULL)
{
g_assert (NAUTILUS_IS_FILE (file));
g_assert (file->details->directory == directory);
if (is_needy (file,
lacks_deep_count,
REQUEST_DEEP_COUNT))
{
return;
}
}
/* The count is not wanted, so stop it. */
deep_count_cancel (directory);
}
}
static void
deep_count_got_info (GObject *source_object,
GAsyncResult *res,
gpointer user_data)
{
GFileInfo *info;
const char *id;
GFile *file = (GFile *) source_object;
DeepCountState *state = (DeepCountState *) user_data;
info = g_file_query_info_finish (file, res, NULL);
if (info != NULL)
{
id = g_file_info_get_attribute_string (info, G_FILE_ATTRIBUTE_ID_FILESYSTEM);
state->fs_id = g_strdup (id);
g_object_unref (info);
}
deep_count_load (state, file);
}
static void
deep_count_start (NautilusDirectory *directory,
NautilusFile *file,
gboolean *doing_io)
{
GFile *location;
DeepCountState *state;
if (directory->details->deep_count_in_progress != NULL)
{
*doing_io = TRUE;
return;
}
if (!is_needy (file,
lacks_deep_count,
REQUEST_DEEP_COUNT))
{
return;
}
*doing_io = TRUE;
if (!nautilus_file_is_directory (file))
{
file->details->deep_counts_status = NAUTILUS_REQUEST_DONE;
nautilus_directory_async_state_changed (directory);
return;
}
if (!async_job_start (directory, "deep count"))
{
return;
}
/* Start counting. */
file->details->deep_counts_status = NAUTILUS_REQUEST_IN_PROGRESS;
file->details->deep_directory_count = 0;
file->details->deep_file_count = 0;
file->details->deep_unreadable_count = 0;
file->details->deep_size = 0;
directory->details->deep_count_file = file;
state = g_new0 (DeepCountState, 1);
state->directory = directory;
state->cancellable = g_cancellable_new ();
state->seen_deep_count_inodes = g_array_new (FALSE, TRUE, sizeof (guint64));
state->fs_id = NULL;
directory->details->deep_count_in_progress = state;
location = nautilus_file_get_location (file);
g_file_query_info_async (location,
G_FILE_ATTRIBUTE_ID_FILESYSTEM,
G_FILE_QUERY_INFO_NOFOLLOW_SYMLINKS,
G_PRIORITY_DEFAULT,
NULL,
deep_count_got_info,
state);
g_object_unref (location);
}
static void
mime_list_stop (NautilusDirectory *directory)
{
NautilusFile *file;
if (directory->details->mime_list_in_progress != NULL)
{
file = directory->details->mime_list_in_progress->mime_list_file;
if (file != NULL)
{
g_assert (NAUTILUS_IS_FILE (file));
g_assert (file->details->directory == directory);
if (is_needy (file,
should_get_mime_list,
REQUEST_MIME_LIST))
{
return;
}
}
/* The count is not wanted, so stop it. */
mime_list_cancel (directory);
}
}
static void
mime_list_state_free (MimeListState *state)
{
if (state->enumerator)
{
if (!g_file_enumerator_is_closed (state->enumerator))
{
g_file_enumerator_close_async (state->enumerator,
0, NULL, NULL, NULL);
}
g_object_unref (state->enumerator);
}
g_object_unref (state->cancellable);
istr_set_destroy (state->mime_list_hash);
nautilus_directory_unref (state->directory);
g_free (state);
}
static void
mime_list_done (MimeListState *state,
gboolean success)
{
NautilusFile *file;
NautilusDirectory *directory;
directory = state->directory;
g_assert (directory != NULL);
file = state->mime_list_file;
file->details->mime_list_is_up_to_date = TRUE;
g_list_free_full (file->details->mime_list, g_free);
if (success)
{
file->details->mime_list_failed = TRUE;
file->details->mime_list = NULL;
}
else
{
file->details->got_mime_list = TRUE;
file->details->mime_list = istr_set_get_as_list (state->mime_list_hash);
}
directory->details->mime_list_in_progress = NULL;
/* Send file-changed even if getting the item type list
* failed, so interested parties can distinguish between
* unknowable and not-yet-known cases.
*/
nautilus_file_changed (file);
/* Start up the next one. */
async_job_end (directory, "MIME list");
nautilus_directory_async_state_changed (directory);
}
static void
mime_list_one (MimeListState *state,
GFileInfo *info)
{
const char *mime_type;
if (should_skip_file (NULL, info))
{
g_object_unref (info);
return;
}
mime_type = g_file_info_get_content_type (info);
if (mime_type != NULL)
{
istr_set_insert (state->mime_list_hash, mime_type);
}
}
static void
mime_list_callback (GObject *source_object,
GAsyncResult *res,
gpointer user_data)
{
MimeListState *state;
NautilusDirectory *directory;
GError *error;
GList *files, *l;
GFileInfo *info;
state = user_data;
directory = state->directory;
if (g_cancellable_is_cancelled (state->cancellable))
{
/* Operation was cancelled. Bail out */
directory->details->mime_list_in_progress = NULL;
async_job_end (directory, "MIME list");
nautilus_directory_async_state_changed (directory);
mime_list_state_free (state);
return;
}
g_assert (directory->details->mime_list_in_progress != NULL);
g_assert (directory->details->mime_list_in_progress == state);
error = NULL;
files = g_file_enumerator_next_files_finish (state->enumerator,
res, &error);
for (l = files; l != NULL; l = l->next)
{
info = l->data;
mime_list_one (state, info);
g_object_unref (info);
}
if (files == NULL)
{
mime_list_done (state, error != NULL);
mime_list_state_free (state);
}
else
{
g_file_enumerator_next_files_async (state->enumerator,
DIRECTORY_LOAD_ITEMS_PER_CALLBACK,
G_PRIORITY_DEFAULT,
state->cancellable,
mime_list_callback,
state);
}
g_list_free (files);
if (error)
{
g_error_free (error);
}
}
static void
list_mime_enum_callback (GObject *source_object,
GAsyncResult *res,
gpointer user_data)
{
MimeListState *state;
GFileEnumerator *enumerator;
NautilusDirectory *directory;
GError *error;
state = user_data;
if (g_cancellable_is_cancelled (state->cancellable))
{
/* Operation was cancelled. Bail out */
directory = state->directory;
directory->details->mime_list_in_progress = NULL;
async_job_end (directory, "MIME list");
nautilus_directory_async_state_changed (directory);
mime_list_state_free (state);
return;
}
error = NULL;
enumerator = g_file_enumerate_children_finish (G_FILE (source_object),
res, &error);
if (enumerator == NULL)
{
mime_list_done (state, FALSE);
g_error_free (error);
mime_list_state_free (state);
return;
}
else
{
state->enumerator = enumerator;
g_file_enumerator_next_files_async (state->enumerator,
DIRECTORY_LOAD_ITEMS_PER_CALLBACK,
G_PRIORITY_DEFAULT,
state->cancellable,
mime_list_callback,
state);
}
}
static void
mime_list_start (NautilusDirectory *directory,
NautilusFile *file,
gboolean *doing_io)
{
MimeListState *state;
GFile *location;
mime_list_stop (directory);
if (directory->details->mime_list_in_progress != NULL)
{
*doing_io = TRUE;
return;
}
/* Figure out which file to get a mime list for. */
if (!is_needy (file,
should_get_mime_list,
REQUEST_MIME_LIST))
{
return;
}
*doing_io = TRUE;
if (!nautilus_file_is_directory (file))
{
g_list_free (file->details->mime_list);
file->details->mime_list_failed = FALSE;
file->details->got_mime_list = FALSE;
file->details->mime_list_is_up_to_date = TRUE;
nautilus_directory_async_state_changed (directory);
return;
}
if (!async_job_start (directory, "MIME list"))
{
return;
}
state = g_new0 (MimeListState, 1);
state->mime_list_file = file;
state->directory = nautilus_directory_ref (directory);
state->cancellable = g_cancellable_new ();
state->mime_list_hash = istr_set_new ();
directory->details->mime_list_in_progress = state;
location = nautilus_file_get_location (file);
#ifdef DEBUG_LOAD_DIRECTORY
{
char *uri;
uri = g_file_get_uri (location);
g_message ("load_directory called to get MIME list of %s", uri);
g_free (uri);
}
#endif
g_file_enumerate_children_async (location,
G_FILE_ATTRIBUTE_STANDARD_CONTENT_TYPE,
0, /* flags */
G_PRIORITY_LOW, /* prio */
state->cancellable,
list_mime_enum_callback,
state);
g_object_unref (location);
}
static void
get_info_state_free (GetInfoState *state)
{
g_object_unref (state->cancellable);
g_free (state);
}
static void
query_info_callback (GObject *source_object,
GAsyncResult *res,
gpointer user_data)
{
NautilusDirectory *directory;
NautilusFile *get_info_file;
GFileInfo *info;
GetInfoState *state;
GError *error;
state = user_data;
if (state->directory == NULL)
{
/* Operation was cancelled. Bail out */
get_info_state_free (state);
return;
}
directory = nautilus_directory_ref (state->directory);
get_info_file = directory->details->get_info_file;
g_assert (NAUTILUS_IS_FILE (get_info_file));
directory->details->get_info_file = NULL;
directory->details->get_info_in_progress = NULL;
/* ref here because we might be removing the last ref when we
* mark the file gone below, but we need to keep a ref at
* least long enough to send the change notification.
*/
nautilus_file_ref (get_info_file);
error = NULL;
info = g_file_query_info_finish (G_FILE (source_object), res, &error);
if (info == NULL)
{
if (error->domain == G_IO_ERROR && error->code == G_IO_ERROR_NOT_FOUND)
{
/* mark file as gone */
nautilus_file_mark_gone (get_info_file);
}
get_info_file->details->file_info_is_up_to_date = TRUE;
nautilus_file_clear_info (get_info_file);
get_info_file->details->get_info_failed = TRUE;
get_info_file->details->get_info_error = error;
}
else
{
nautilus_file_update_info (get_info_file, info);
g_object_unref (info);
}
nautilus_file_changed (get_info_file);
nautilus_file_unref (get_info_file);
async_job_end (directory, "file info");
nautilus_directory_async_state_changed (directory);
nautilus_directory_unref (directory);
get_info_state_free (state);
}
static void
file_info_stop (NautilusDirectory *directory)
{
NautilusFile *file;
if (directory->details->get_info_in_progress != NULL)
{
file = directory->details->get_info_file;
if (file != NULL)
{
g_assert (NAUTILUS_IS_FILE (file));
g_assert (file->details->directory == directory);
if (is_needy (file, lacks_info, REQUEST_FILE_INFO))
{
return;
}
}
/* The info is not wanted, so stop it. */
file_info_cancel (directory);
}
}
static void
file_info_start (NautilusDirectory *directory,
NautilusFile *file,
gboolean *doing_io)
{
GFile *location;
GetInfoState *state;
file_info_stop (directory);
if (directory->details->get_info_in_progress != NULL)
{
*doing_io = TRUE;
return;
}
if (!is_needy (file, lacks_info, REQUEST_FILE_INFO))
{
return;
}
*doing_io = TRUE;
if (!async_job_start (directory, "file info"))
{
return;
}
directory->details->get_info_file = file;
file->details->get_info_failed = FALSE;
if (file->details->get_info_error)
{
g_error_free (file->details->get_info_error);
file->details->get_info_error = NULL;
}
state = g_new (GetInfoState, 1);
state->directory = directory;
state->cancellable = g_cancellable_new ();
directory->details->get_info_in_progress = state;
location = nautilus_file_get_location (file);
g_file_query_info_async (location,
NAUTILUS_FILE_DEFAULT_ATTRIBUTES,
0,
G_PRIORITY_DEFAULT,
state->cancellable, query_info_callback, state);
g_object_unref (location);
}
static gboolean
is_link_trusted (NautilusFile *file,
gboolean is_launcher)
{
GFile *location;
gboolean res;
g_autofree gchar* trusted = NULL;
if (!is_launcher)
{
return TRUE;
}
trusted = nautilus_file_get_metadata (file,
NAUTILUS_METADATA_KEY_DESKTOP_FILE_TRUSTED,
NULL);
if (nautilus_file_can_execute (file) && trusted != NULL)
{
return TRUE;
}
res = FALSE;
if (nautilus_file_is_local (file))
{
location = nautilus_file_get_location (file);
res = nautilus_is_in_system_dir (location);
g_object_unref (location);
}
return res;
}
static void
link_info_done (NautilusDirectory *directory,
NautilusFile *file,
const char *uri,
const char *name,
GIcon *icon,
gboolean is_launcher,
gboolean is_foreign)
{
gboolean is_trusted;
file->details->link_info_is_up_to_date = TRUE;
is_trusted = is_link_trusted (file, is_launcher);
if (is_trusted)
{
nautilus_file_set_display_name (file, name, name, TRUE);
}
else
{
nautilus_file_set_display_name (file, NULL, NULL, TRUE);
}
file->details->got_link_info = TRUE;
g_clear_object (&file->details->custom_icon);
if (uri)
{
g_free (file->details->activation_uri);
file->details->activation_uri = NULL;
file->details->got_custom_activation_uri = TRUE;
file->details->activation_uri = g_strdup (uri);
}
if (is_trusted && (icon != NULL))
{
file->details->custom_icon = g_object_ref (icon);
}
file->details->is_launcher = is_launcher;
file->details->is_foreign_link = is_foreign;
file->details->is_trusted_link = is_trusted;
nautilus_directory_async_state_changed (directory);
}
static void
link_info_stop (NautilusDirectory *directory)
{
NautilusFile *file;
if (directory->details->link_info_read_state != NULL)
{
file = directory->details->link_info_read_state->file;
if (file != NULL)
{
g_assert (NAUTILUS_IS_FILE (file));
g_assert (file->details->directory == directory);
if (is_needy (file,
lacks_link_info,
REQUEST_LINK_INFO))
{
return;
}
}
/* The link info is not wanted, so stop it. */
link_info_cancel (directory);
}
}
static void
link_info_got_data (NautilusDirectory *directory,
NautilusFile *file,
gboolean result,
goffset bytes_read,
char *file_contents)
{
char *link_uri, *uri, *name;
GIcon *icon;
gboolean is_launcher;
gboolean is_foreign;
nautilus_directory_ref (directory);
uri = NULL;
name = NULL;
icon = NULL;
is_launcher = FALSE;
is_foreign = FALSE;
/* Handle the case where we read the Nautilus link. */
if (result)
{
link_uri = nautilus_file_get_uri (file);
nautilus_link_get_link_info_given_file_contents (file_contents, bytes_read, link_uri,
&uri, &name, &icon, &is_launcher, &is_foreign);
g_free (link_uri);
}
else
{
/* FIXME bugzilla.gnome.org 42433: We should report this error to the user. */
}
nautilus_file_ref (file);
link_info_done (directory, file, uri, name, icon, is_launcher, is_foreign);
nautilus_file_changed (file);
nautilus_file_unref (file);
g_free (uri);
g_free (name);
if (icon != NULL)
{
g_object_unref (icon);
}
nautilus_directory_unref (directory);
}
static void
link_info_read_state_free (LinkInfoReadState *state)
{
g_object_unref (state->cancellable);
g_free (state);
}
static void
link_info_nautilus_link_read_callback (GObject *source_object,
GAsyncResult *res,
gpointer user_data)
{
LinkInfoReadState *state;
gsize file_size;
char *file_contents;
gboolean result;
NautilusDirectory *directory;
state = user_data;
if (state->directory == NULL)
{
/* Operation was cancelled. Bail out */
link_info_read_state_free (state);
return;
}
directory = nautilus_directory_ref (state->directory);
result = g_file_load_contents_finish (G_FILE (source_object),
res,
&file_contents, &file_size,
NULL, NULL);
state->directory->details->link_info_read_state = NULL;
async_job_end (state->directory, "link info");
link_info_got_data (state->directory, state->file, result, file_size, file_contents);
if (result)
{
g_free (file_contents);
}
link_info_read_state_free (state);
nautilus_directory_unref (directory);
}
static void
link_info_start (NautilusDirectory *directory,
NautilusFile *file,
gboolean *doing_io)
{
GFile *location;
gboolean nautilus_style_link;
LinkInfoReadState *state;
if (directory->details->link_info_read_state != NULL)
{
*doing_io = TRUE;
return;
}
if (!is_needy (file,
lacks_link_info,
REQUEST_LINK_INFO))
{
return;
}
*doing_io = TRUE;
/* Figure out if it is a link. */
nautilus_style_link = nautilus_file_is_nautilus_link (file);
location = nautilus_file_get_location (file);
/* If it's not a link we are done. If it is, we need to read it. */
if (!nautilus_style_link)
{
link_info_done (directory, file, NULL, NULL, NULL, FALSE, FALSE);
}
else
{
if (!async_job_start (directory, "link info"))
{
g_object_unref (location);
return;
}
state = g_new0 (LinkInfoReadState, 1);
state->directory = directory;
state->file = file;
state->cancellable = g_cancellable_new ();
directory->details->link_info_read_state = state;
g_file_load_contents_async (location,
state->cancellable,
link_info_nautilus_link_read_callback,
state);
}
g_object_unref (location);
}
static void
thumbnail_done (NautilusDirectory *directory,
NautilusFile *file,
GdkPixbuf *pixbuf,
gboolean tried_original)
{
const char *thumb_mtime_str;
time_t thumb_mtime = 0;
file->details->thumbnail_is_up_to_date = TRUE;
file->details->thumbnail_tried_original = tried_original;
if (file->details->thumbnail)
{
g_object_unref (file->details->thumbnail);
file->details->thumbnail = NULL;
}
if (file->details->scaled_thumbnail)
{
g_object_unref (file->details->scaled_thumbnail);
file->details->scaled_thumbnail = NULL;
}
if (pixbuf)
{
if (tried_original)
{
thumb_mtime = file->details->mtime;
}
else
{
thumb_mtime_str = gdk_pixbuf_get_option (pixbuf, "tEXt::Thumb::MTime");
if (thumb_mtime_str)
{
thumb_mtime = atol (thumb_mtime_str);
}
}
if (thumb_mtime == 0 ||
thumb_mtime == file->details->mtime)
{
file->details->thumbnail = g_object_ref (pixbuf);
file->details->thumbnail_mtime = thumb_mtime;
}
else
{
g_free (file->details->thumbnail_path);
file->details->thumbnail_path = NULL;
}
}
nautilus_directory_async_state_changed (directory);
}
static void
thumbnail_stop (NautilusDirectory *directory)
{
NautilusFile *file;
if (directory->details->thumbnail_state != NULL)
{
file = directory->details->thumbnail_state->file;
if (file != NULL)
{
g_assert (NAUTILUS_IS_FILE (file));
g_assert (file->details->directory == directory);
if (is_needy (file,
lacks_thumbnail,
REQUEST_THUMBNAIL))
{
return;
}
}
/* The link info is not wanted, so stop it. */
thumbnail_cancel (directory);
}
}
static void
thumbnail_got_pixbuf (NautilusDirectory *directory,
NautilusFile *file,
GdkPixbuf *pixbuf,
gboolean tried_original)
{
nautilus_directory_ref (directory);
nautilus_file_ref (file);
thumbnail_done (directory, file, pixbuf, tried_original);
nautilus_file_changed (file);
nautilus_file_unref (file);
if (pixbuf)
{
g_object_unref (pixbuf);
}
nautilus_directory_unref (directory);
}
static void
thumbnail_state_free (ThumbnailState *state)
{
g_object_unref (state->cancellable);
g_free (state);
}
extern int cached_thumbnail_size;
/* scale very large images down to the max. size we need */
static void
thumbnail_loader_size_prepared (GdkPixbufLoader *loader,
int width,
int height,
gpointer user_data)
{
int max_thumbnail_size;
double aspect_ratio;
aspect_ratio = ((double) width) / height;
/* cf. nautilus_file_get_icon() */
max_thumbnail_size = NAUTILUS_CANVAS_ICON_SIZE_LARGER * cached_thumbnail_size / NAUTILUS_CANVAS_ICON_SIZE_SMALL;
if (MAX (width, height) > max_thumbnail_size)
{
if (width > height)
{
width = max_thumbnail_size;
height = width / aspect_ratio;
}
else
{
height = max_thumbnail_size;
width = height * aspect_ratio;
}
gdk_pixbuf_loader_set_size (loader, width, height);
}
}
static GdkPixbuf *
get_pixbuf_for_content (goffset file_len,
char *file_contents)
{
gboolean res;
GdkPixbuf *pixbuf, *pixbuf2;
GdkPixbufLoader *loader;
gsize chunk_len;
pixbuf = NULL;
loader = gdk_pixbuf_loader_new ();
g_signal_connect (loader, "size-prepared",
G_CALLBACK (thumbnail_loader_size_prepared),
NULL);
/* For some reason we have to write in chunks, or gdk-pixbuf fails */
res = TRUE;
while (res && file_len > 0)
{
chunk_len = file_len;
res = gdk_pixbuf_loader_write (loader, (guchar *) file_contents, chunk_len, NULL);
file_contents += chunk_len;
file_len -= chunk_len;
}
if (res)
{
res = gdk_pixbuf_loader_close (loader, NULL);
}
if (res)
{
pixbuf = g_object_ref (gdk_pixbuf_loader_get_pixbuf (loader));
}
g_object_unref (G_OBJECT (loader));
if (pixbuf)
{
pixbuf2 = gdk_pixbuf_apply_embedded_orientation (pixbuf);
g_object_unref (pixbuf);
pixbuf = pixbuf2;
}
return pixbuf;
}
static void
thumbnail_read_callback (GObject *source_object,
GAsyncResult *res,
gpointer user_data)
{
ThumbnailState *state;
gsize file_size;
char *file_contents;
gboolean result;
NautilusDirectory *directory;
GdkPixbuf *pixbuf;
GFile *location;
state = user_data;
if (state->directory == NULL)
{
/* Operation was cancelled. Bail out */
thumbnail_state_free (state);
return;
}
directory = nautilus_directory_ref (state->directory);
result = g_file_load_contents_finish (G_FILE (source_object),
res,
&file_contents, &file_size,
NULL, NULL);
pixbuf = NULL;
if (result)
{
pixbuf = get_pixbuf_for_content (file_size, file_contents);
g_free (file_contents);
}
if (pixbuf == NULL && state->trying_original)
{
state->trying_original = FALSE;
location = g_file_new_for_path (state->file->details->thumbnail_path);
g_file_load_contents_async (location,
state->cancellable,
thumbnail_read_callback,
state);
g_object_unref (location);
}
else
{
state->directory->details->thumbnail_state = NULL;
async_job_end (state->directory, "thumbnail");
thumbnail_got_pixbuf (state->directory, state->file, pixbuf, state->tried_original);
thumbnail_state_free (state);
}
nautilus_directory_unref (directory);
}
static void
thumbnail_start (NautilusDirectory *directory,
NautilusFile *file,
gboolean *doing_io)
{
GFile *location;
ThumbnailState *state;
if (directory->details->thumbnail_state != NULL)
{
*doing_io = TRUE;
return;
}
if (!is_needy (file,
lacks_thumbnail,
REQUEST_THUMBNAIL))
{
return;
}
*doing_io = TRUE;
if (!async_job_start (directory, "thumbnail"))
{
return;
}
state = g_new0 (ThumbnailState, 1);
state->directory = directory;
state->file = file;
state->cancellable = g_cancellable_new ();
if (file->details->thumbnail_wants_original)
{
state->tried_original = TRUE;
state->trying_original = TRUE;
location = nautilus_file_get_location (file);
}
else
{
location = g_file_new_for_path (file->details->thumbnail_path);
}
directory->details->thumbnail_state = state;
g_file_load_contents_async (location,
state->cancellable,
thumbnail_read_callback,
state);
g_object_unref (location);
}
static void
mount_stop (NautilusDirectory *directory)
{
NautilusFile *file;
if (directory->details->mount_state != NULL)
{
file = directory->details->mount_state->file;
if (file != NULL)
{
g_assert (NAUTILUS_IS_FILE (file));
g_assert (file->details->directory == directory);
if (is_needy (file,
lacks_mount,
REQUEST_MOUNT))
{
return;
}
}
/* The link info is not wanted, so stop it. */
mount_cancel (directory);
}
}
static void
mount_state_free (MountState *state)
{
g_object_unref (state->cancellable);
g_free (state);
}
static void
got_mount (MountState *state,
GMount *mount)
{
NautilusDirectory *directory;
NautilusFile *file;
directory = nautilus_directory_ref (state->directory);
state->directory->details->mount_state = NULL;
async_job_end (state->directory, "mount");
file = nautilus_file_ref (state->file);
file->details->mount_is_up_to_date = TRUE;
nautilus_file_set_mount (file, mount);
nautilus_directory_async_state_changed (directory);
nautilus_file_changed (file);
nautilus_file_unref (file);
nautilus_directory_unref (directory);
mount_state_free (state);
}
static void
find_enclosing_mount_callback (GObject *source_object,
GAsyncResult *res,
gpointer user_data)
{
GMount *mount;
MountState *state;
GFile *location, *root;
state = user_data;
if (state->directory == NULL)
{
/* Operation was cancelled. Bail out */
mount_state_free (state);
return;
}
mount = g_file_find_enclosing_mount_finish (G_FILE (source_object),
res, NULL);
if (mount)
{
root = g_mount_get_root (mount);
location = nautilus_file_get_location (state->file);
if (!g_file_equal (location, root))
{
g_object_unref (mount);
mount = NULL;
}
g_object_unref (root);
g_object_unref (location);
}
got_mount (state, mount);
if (mount)
{
g_object_unref (mount);
}
}
static GMount *
get_mount_at (GFile *target)
{
GVolumeMonitor *monitor;
GFile *root;
GList *mounts, *l;
GMount *found;
monitor = g_volume_monitor_get ();
mounts = g_volume_monitor_get_mounts (monitor);
found = NULL;
for (l = mounts; l != NULL; l = l->next)
{
GMount *mount = G_MOUNT (l->data);
if (g_mount_is_shadowed (mount))
{
continue;
}
root = g_mount_get_root (mount);
if (g_file_equal (target, root))
{
found = g_object_ref (mount);
break;
}
g_object_unref (root);
}
g_list_free_full (mounts, g_object_unref);
g_object_unref (monitor);
return found;
}
static void
mount_start (NautilusDirectory *directory,
NautilusFile *file,
gboolean *doing_io)
{
GFile *location;
MountState *state;
if (directory->details->mount_state != NULL)
{
*doing_io = TRUE;
return;
}
if (!is_needy (file,
lacks_mount,
REQUEST_MOUNT))
{
return;
}
*doing_io = TRUE;
if (!async_job_start (directory, "mount"))
{
return;
}
state = g_new0 (MountState, 1);
state->directory = directory;
state->file = file;
state->cancellable = g_cancellable_new ();
location = nautilus_file_get_location (file);
directory->details->mount_state = state;
if (file->details->type == G_FILE_TYPE_MOUNTABLE)
{
GFile *target;
GMount *mount;
mount = NULL;
target = nautilus_file_get_activation_location (file);
if (target != NULL)
{
mount = get_mount_at (target);
g_object_unref (target);
}
got_mount (state, mount);
if (mount)
{
g_object_unref (mount);
}
}
else
{
g_file_find_enclosing_mount_async (location,
G_PRIORITY_DEFAULT,
state->cancellable,
find_enclosing_mount_callback,
state);
}
g_object_unref (location);
}
static void
filesystem_info_cancel (NautilusDirectory *directory)
{
if (directory->details->filesystem_info_state != NULL)
{
g_cancellable_cancel (directory->details->filesystem_info_state->cancellable);
directory->details->filesystem_info_state->directory = NULL;
directory->details->filesystem_info_state = NULL;
async_job_end (directory, "filesystem info");
}
}
static void
filesystem_info_stop (NautilusDirectory *directory)
{
NautilusFile *file;
if (directory->details->filesystem_info_state != NULL)
{
file = directory->details->filesystem_info_state->file;
if (file != NULL)
{
g_assert (NAUTILUS_IS_FILE (file));
g_assert (file->details->directory == directory);
if (is_needy (file,
lacks_filesystem_info,
REQUEST_FILESYSTEM_INFO))
{
return;
}
}
/* The filesystem info is not wanted, so stop it. */
filesystem_info_cancel (directory);
}
}
static void
filesystem_info_state_free (FilesystemInfoState *state)
{
g_object_unref (state->cancellable);
g_free (state);
}
static void
got_filesystem_info (FilesystemInfoState *state,
GFileInfo *info)
{
NautilusDirectory *directory;
NautilusFile *file;
const char *filesystem_type;
/* careful here, info may be NULL */
directory = nautilus_directory_ref (state->directory);
state->directory->details->filesystem_info_state = NULL;
async_job_end (state->directory, "filesystem info");
file = nautilus_file_ref (state->file);
file->details->filesystem_info_is_up_to_date = TRUE;
if (info != NULL)
{
file->details->filesystem_use_preview =
g_file_info_get_attribute_uint32 (info, G_FILE_ATTRIBUTE_FILESYSTEM_USE_PREVIEW);
file->details->filesystem_readonly =
g_file_info_get_attribute_boolean (info, G_FILE_ATTRIBUTE_FILESYSTEM_READONLY);
filesystem_type = g_file_info_get_attribute_string (info, G_FILE_ATTRIBUTE_FILESYSTEM_TYPE);
if (g_strcmp0 (eel_ref_str_peek (file->details->filesystem_type), filesystem_type) != 0)
{
eel_ref_str_unref (file->details->filesystem_type);
file->details->filesystem_type = eel_ref_str_get_unique (filesystem_type);
}
}
nautilus_directory_async_state_changed (directory);
nautilus_file_changed (file);
nautilus_file_unref (file);
nautilus_directory_unref (directory);
filesystem_info_state_free (state);
}
static void
query_filesystem_info_callback (GObject *source_object,
GAsyncResult *res,
gpointer user_data)
{
GFileInfo *info;
FilesystemInfoState *state;
state = user_data;
if (state->directory == NULL)
{
/* Operation was cancelled. Bail out */
filesystem_info_state_free (state);
return;
}
info = g_file_query_filesystem_info_finish (G_FILE (source_object), res, NULL);
got_filesystem_info (state, info);
if (info != NULL)
{
g_object_unref (info);
}
}
static void
filesystem_info_start (NautilusDirectory *directory,
NautilusFile *file,
gboolean *doing_io)
{
GFile *location;
FilesystemInfoState *state;
if (directory->details->filesystem_info_state != NULL)
{
*doing_io = TRUE;
return;
}
if (!is_needy (file,
lacks_filesystem_info,
REQUEST_FILESYSTEM_INFO))
{
return;
}
*doing_io = TRUE;
if (!async_job_start (directory, "filesystem info"))
{
return;
}
state = g_new0 (FilesystemInfoState, 1);
state->directory = directory;
state->file = file;
state->cancellable = g_cancellable_new ();
location = nautilus_file_get_location (file);
directory->details->filesystem_info_state = state;
g_file_query_filesystem_info_async (location,
G_FILE_ATTRIBUTE_FILESYSTEM_READONLY ","
G_FILE_ATTRIBUTE_FILESYSTEM_USE_PREVIEW ","
G_FILE_ATTRIBUTE_FILESYSTEM_TYPE,
G_PRIORITY_DEFAULT,
state->cancellable,
query_filesystem_info_callback,
state);
g_object_unref (location);
}
static void
extension_info_cancel (NautilusDirectory *directory)
{
if (directory->details->extension_info_in_progress != NULL)
{
if (directory->details->extension_info_idle)
{
g_source_remove (directory->details->extension_info_idle);
}
else
{
nautilus_info_provider_cancel_update
(directory->details->extension_info_provider,
directory->details->extension_info_in_progress);
}
directory->details->extension_info_in_progress = NULL;
directory->details->extension_info_file = NULL;
directory->details->extension_info_provider = NULL;
directory->details->extension_info_idle = 0;
async_job_end (directory, "extension info");
}
}
static void
extension_info_stop (NautilusDirectory *directory)
{
if (directory->details->extension_info_in_progress != NULL)
{
NautilusFile *file;
file = directory->details->extension_info_file;
if (file != NULL)
{
g_assert (NAUTILUS_IS_FILE (file));
g_assert (file->details->directory == directory);
if (is_needy (file, lacks_extension_info, REQUEST_EXTENSION_INFO))
{
return;
}
}
/* The info is not wanted, so stop it. */
extension_info_cancel (directory);
}
}
static void
finish_info_provider (NautilusDirectory *directory,
NautilusFile *file,
NautilusInfoProvider *provider)
{
file->details->pending_info_providers =
g_list_remove (file->details->pending_info_providers,
provider);
g_object_unref (provider);
nautilus_directory_async_state_changed (directory);
if (file->details->pending_info_providers == NULL)
{
nautilus_file_info_providers_done (file);
}
}
static gboolean
info_provider_idle_callback (gpointer user_data)
{
InfoProviderResponse *response;
NautilusDirectory *directory;
response = user_data;
directory = response->directory;
if (response->handle != directory->details->extension_info_in_progress
|| response->provider != directory->details->extension_info_provider)
{
g_warning ("Unexpected plugin response. This probably indicates a bug in a Nautilus extension: handle=%p", response->handle);
}
else
{
NautilusFile *file;
async_job_end (directory, "extension info");
file = directory->details->extension_info_file;
directory->details->extension_info_file = NULL;
directory->details->extension_info_provider = NULL;
directory->details->extension_info_in_progress = NULL;
directory->details->extension_info_idle = 0;
finish_info_provider (directory, file, response->provider);
}
return FALSE;
}
static void
info_provider_callback (NautilusInfoProvider *provider,
NautilusOperationHandle *handle,
NautilusOperationResult result,
gpointer user_data)
{
InfoProviderResponse *response;
response = g_new0 (InfoProviderResponse, 1);
response->provider = provider;
response->handle = handle;
response->result = result;
response->directory = NAUTILUS_DIRECTORY (user_data);
response->directory->details->extension_info_idle =
g_idle_add_full (G_PRIORITY_DEFAULT_IDLE,
info_provider_idle_callback, response,
g_free);
}
static void
extension_info_start (NautilusDirectory *directory,
NautilusFile *file,
gboolean *doing_io)
{
NautilusInfoProvider *provider;
NautilusOperationResult result;
NautilusOperationHandle *handle;
GClosure *update_complete;
if (directory->details->extension_info_in_progress != NULL)
{
*doing_io = TRUE;
return;
}
if (!is_needy (file, lacks_extension_info, REQUEST_EXTENSION_INFO))
{
return;
}
*doing_io = TRUE;
if (!async_job_start (directory, "extension info"))
{
return;
}
provider = file->details->pending_info_providers->data;
update_complete = g_cclosure_new (G_CALLBACK (info_provider_callback),
directory,
NULL);
g_closure_set_marshal (update_complete,
g_cclosure_marshal_generic);
result = nautilus_info_provider_update_file_info
(provider,
NAUTILUS_FILE_INFO (file),
update_complete,
&handle);
g_closure_unref (update_complete);
if (result == NAUTILUS_OPERATION_COMPLETE ||
result == NAUTILUS_OPERATION_FAILED)
{
finish_info_provider (directory, file, provider);
async_job_end (directory, "extension info");
}
else
{
directory->details->extension_info_in_progress = handle;
directory->details->extension_info_provider = provider;
directory->details->extension_info_file = file;
}
}
static void
start_or_stop_io (NautilusDirectory *directory)
{
NautilusFile *file;
gboolean doing_io;
/* Start or stop reading files. */
file_list_start_or_stop (directory);
/* Stop any no longer wanted attribute fetches. */
file_info_stop (directory);
directory_count_stop (directory);
deep_count_stop (directory);
mime_list_stop (directory);
link_info_stop (directory);
extension_info_stop (directory);
mount_stop (directory);
thumbnail_stop (directory);
filesystem_info_stop (directory);
doing_io = FALSE;
/* Take files that are all done off the queue. */
while (!nautilus_file_queue_is_empty (directory->details->high_priority_queue))
{
file = nautilus_file_queue_head (directory->details->high_priority_queue);
/* Start getting attributes if possible */
file_info_start (directory, file, &doing_io);
link_info_start (directory, file, &doing_io);
if (doing_io)
{
return;
}
move_file_to_low_priority_queue (directory, file);
}
/* High priority queue must be empty */
while (!nautilus_file_queue_is_empty (directory->details->low_priority_queue))
{
file = nautilus_file_queue_head (directory->details->low_priority_queue);
/* Start getting attributes if possible */
mount_start (directory, file, &doing_io);
directory_count_start (directory, file, &doing_io);
deep_count_start (directory, file, &doing_io);
mime_list_start (directory, file, &doing_io);
thumbnail_start (directory, file, &doing_io);
filesystem_info_start (directory, file, &doing_io);
if (doing_io)
{
return;
}
move_file_to_extension_queue (directory, file);
}
/* Low priority queue must be empty */
while (!nautilus_file_queue_is_empty (directory->details->extension_queue))
{
file = nautilus_file_queue_head (directory->details->extension_queue);
/* Start getting attributes if possible */
extension_info_start (directory, file, &doing_io);
if (doing_io)
{
return;
}
nautilus_directory_remove_file_from_work_queue (directory, file);
}
}
/* Call this when the monitor or call when ready list changes,
* or when some I/O is completed.
*/
void
nautilus_directory_async_state_changed (NautilusDirectory *directory)
{
/* Check if any callbacks are satisfied and call them if they
* are. Do this last so that any changes done in start or stop
* I/O functions immediately (not in callbacks) are taken into
* consideration. If any callbacks are called, consider the
* I/O state again so that we can release or cancel I/O that
* is not longer needed once the callbacks are satisfied.
*/
if (directory->details->in_async_service_loop)
{
directory->details->state_changed = TRUE;
return;
}
directory->details->in_async_service_loop = TRUE;
nautilus_directory_ref (directory);
do
{
directory->details->state_changed = FALSE;
start_or_stop_io (directory);
if (call_ready_callbacks (directory))
{
directory->details->state_changed = TRUE;
}
}
while (directory->details->state_changed);
directory->details->in_async_service_loop = FALSE;
nautilus_directory_unref (directory);
/* Check if any directories should wake up. */
async_job_wake_up ();
}
void
nautilus_directory_cancel (NautilusDirectory *directory)
{
/* Arbitrary order (kept alphabetical). */
deep_count_cancel (directory);
directory_count_cancel (directory);
file_info_cancel (directory);
file_list_cancel (directory);
link_info_cancel (directory);
mime_list_cancel (directory);
new_files_cancel (directory);
extension_info_cancel (directory);
thumbnail_cancel (directory);
mount_cancel (directory);
filesystem_info_cancel (directory);
/* We aren't waiting for anything any more. */
if (waiting_directories != NULL)
{
g_hash_table_remove (waiting_directories, directory);
}
/* Check if any directories should wake up. */
async_job_wake_up ();
}
static void
cancel_directory_count_for_file (NautilusDirectory *directory,
NautilusFile *file)
{
if (directory->details->count_in_progress != NULL &&
directory->details->count_in_progress->count_file == file)
{
directory_count_cancel (directory);
}
}
static void
cancel_deep_counts_for_file (NautilusDirectory *directory,
NautilusFile *file)
{
if (directory->details->deep_count_file == file)
{
deep_count_cancel (directory);
}
}
static void
cancel_mime_list_for_file (NautilusDirectory *directory,
NautilusFile *file)
{
if (directory->details->mime_list_in_progress != NULL &&
directory->details->mime_list_in_progress->mime_list_file == file)
{
mime_list_cancel (directory);
}
}
static void
cancel_file_info_for_file (NautilusDirectory *directory,
NautilusFile *file)
{
if (directory->details->get_info_file == file)
{
file_info_cancel (directory);
}
}
static void
cancel_thumbnail_for_file (NautilusDirectory *directory,
NautilusFile *file)
{
if (directory->details->thumbnail_state != NULL &&
directory->details->thumbnail_state->file == file)
{
thumbnail_cancel (directory);
}
}
static void
cancel_mount_for_file (NautilusDirectory *directory,
NautilusFile *file)
{
if (directory->details->mount_state != NULL &&
directory->details->mount_state->file == file)
{
mount_cancel (directory);
}
}
static void
cancel_filesystem_info_for_file (NautilusDirectory *directory,
NautilusFile *file)
{
if (directory->details->filesystem_info_state != NULL &&
directory->details->filesystem_info_state->file == file)
{
filesystem_info_cancel (directory);
}
}
static void
cancel_link_info_for_file (NautilusDirectory *directory,
NautilusFile *file)
{
if (directory->details->link_info_read_state != NULL &&
directory->details->link_info_read_state->file == file)
{
link_info_cancel (directory);
}
}
static void
cancel_loading_attributes (NautilusDirectory *directory,
NautilusFileAttributes file_attributes)
{
Request request;
request = nautilus_directory_set_up_request (file_attributes);
if (REQUEST_WANTS_TYPE (request, REQUEST_DIRECTORY_COUNT))
{
directory_count_cancel (directory);
}
if (REQUEST_WANTS_TYPE (request, REQUEST_DEEP_COUNT))
{
deep_count_cancel (directory);
}
if (REQUEST_WANTS_TYPE (request, REQUEST_MIME_LIST))
{
mime_list_cancel (directory);
}
if (REQUEST_WANTS_TYPE (request, REQUEST_FILE_INFO))
{
file_info_cancel (directory);
}
if (REQUEST_WANTS_TYPE (request, REQUEST_FILESYSTEM_INFO))
{
filesystem_info_cancel (directory);
}
if (REQUEST_WANTS_TYPE (request, REQUEST_LINK_INFO))
{
link_info_cancel (directory);
}
if (REQUEST_WANTS_TYPE (request, REQUEST_EXTENSION_INFO))
{
extension_info_cancel (directory);
}
if (REQUEST_WANTS_TYPE (request, REQUEST_THUMBNAIL))
{
thumbnail_cancel (directory);
}
if (REQUEST_WANTS_TYPE (request, REQUEST_MOUNT))
{
mount_cancel (directory);
}
nautilus_directory_async_state_changed (directory);
}
void
nautilus_directory_cancel_loading_file_attributes (NautilusDirectory *directory,
NautilusFile *file,
NautilusFileAttributes file_attributes)
{
Request request;
nautilus_directory_remove_file_from_work_queue (directory, file);
request = nautilus_directory_set_up_request (file_attributes);
if (REQUEST_WANTS_TYPE (request, REQUEST_DIRECTORY_COUNT))
{
cancel_directory_count_for_file (directory, file);
}
if (REQUEST_WANTS_TYPE (request, REQUEST_DEEP_COUNT))
{
cancel_deep_counts_for_file (directory, file);
}
if (REQUEST_WANTS_TYPE (request, REQUEST_MIME_LIST))
{
cancel_mime_list_for_file (directory, file);
}
if (REQUEST_WANTS_TYPE (request, REQUEST_FILE_INFO))
{
cancel_file_info_for_file (directory, file);
}
if (REQUEST_WANTS_TYPE (request, REQUEST_FILESYSTEM_INFO))
{
cancel_filesystem_info_for_file (directory, file);
}
if (REQUEST_WANTS_TYPE (request, REQUEST_LINK_INFO))
{
cancel_link_info_for_file (directory, file);
}
if (REQUEST_WANTS_TYPE (request, REQUEST_THUMBNAIL))
{
cancel_thumbnail_for_file (directory, file);
}
if (REQUEST_WANTS_TYPE (request, REQUEST_MOUNT))
{
cancel_mount_for_file (directory, file);
}
nautilus_directory_async_state_changed (directory);
}
void
nautilus_directory_add_file_to_work_queue (NautilusDirectory *directory,
NautilusFile *file)
{
g_return_if_fail (file->details->directory == directory);
nautilus_file_queue_enqueue (directory->details->high_priority_queue,
file);
}
static void
add_all_files_to_work_queue (NautilusDirectory *directory)
{
GList *node;
NautilusFile *file;
for (node = directory->details->file_list; node != NULL; node = node->next)
{
file = NAUTILUS_FILE (node->data);
nautilus_directory_add_file_to_work_queue (directory, file);
}
}
void
nautilus_directory_remove_file_from_work_queue (NautilusDirectory *directory,
NautilusFile *file)
{
nautilus_file_queue_remove (directory->details->high_priority_queue,
file);
nautilus_file_queue_remove (directory->details->low_priority_queue,
file);
nautilus_file_queue_remove (directory->details->extension_queue,
file);
}
static void
move_file_to_low_priority_queue (NautilusDirectory *directory,
NautilusFile *file)
{
/* Must add before removing to avoid ref underflow */
nautilus_file_queue_enqueue (directory->details->low_priority_queue,
file);
nautilus_file_queue_remove (directory->details->high_priority_queue,
file);
}
static void
move_file_to_extension_queue (NautilusDirectory *directory,
NautilusFile *file)
{
/* Must add before removing to avoid ref underflow */
nautilus_file_queue_enqueue (directory->details->extension_queue,
file);
nautilus_file_queue_remove (directory->details->low_priority_queue,
file);
}
| ./CrossVul/dataset_final_sorted/CWE-20/c/good_2802_0 |
crossvul-cpp_data_good_2891_5 | /* Basic authentication token and access key management
*
* Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/poison.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/security.h>
#include <linux/workqueue.h>
#include <linux/random.h>
#include <linux/err.h>
#include "internal.h"
struct kmem_cache *key_jar;
struct rb_root key_serial_tree; /* tree of keys indexed by serial */
DEFINE_SPINLOCK(key_serial_lock);
struct rb_root key_user_tree; /* tree of quota records indexed by UID */
DEFINE_SPINLOCK(key_user_lock);
unsigned int key_quota_root_maxkeys = 1000000; /* root's key count quota */
unsigned int key_quota_root_maxbytes = 25000000; /* root's key space quota */
unsigned int key_quota_maxkeys = 200; /* general key count quota */
unsigned int key_quota_maxbytes = 20000; /* general key space quota */
static LIST_HEAD(key_types_list);
static DECLARE_RWSEM(key_types_sem);
/* We serialise key instantiation and link */
DEFINE_MUTEX(key_construction_mutex);
#ifdef KEY_DEBUGGING
void __key_check(const struct key *key)
{
printk("__key_check: key %p {%08x} should be {%08x}\n",
key, key->magic, KEY_DEBUG_MAGIC);
BUG();
}
#endif
/*
* Get the key quota record for a user, allocating a new record if one doesn't
* already exist.
*/
struct key_user *key_user_lookup(kuid_t uid)
{
struct key_user *candidate = NULL, *user;
struct rb_node *parent, **p;
try_again:
parent = NULL;
p = &key_user_tree.rb_node;
spin_lock(&key_user_lock);
/* search the tree for a user record with a matching UID */
while (*p) {
parent = *p;
user = rb_entry(parent, struct key_user, node);
if (uid_lt(uid, user->uid))
p = &(*p)->rb_left;
else if (uid_gt(uid, user->uid))
p = &(*p)->rb_right;
else
goto found;
}
/* if we get here, we failed to find a match in the tree */
if (!candidate) {
/* allocate a candidate user record if we don't already have
* one */
spin_unlock(&key_user_lock);
user = NULL;
candidate = kmalloc(sizeof(struct key_user), GFP_KERNEL);
if (unlikely(!candidate))
goto out;
/* the allocation may have scheduled, so we need to repeat the
* search lest someone else added the record whilst we were
* asleep */
goto try_again;
}
/* if we get here, then the user record still hadn't appeared on the
* second pass - so we use the candidate record */
refcount_set(&candidate->usage, 1);
atomic_set(&candidate->nkeys, 0);
atomic_set(&candidate->nikeys, 0);
candidate->uid = uid;
candidate->qnkeys = 0;
candidate->qnbytes = 0;
spin_lock_init(&candidate->lock);
mutex_init(&candidate->cons_lock);
rb_link_node(&candidate->node, parent, p);
rb_insert_color(&candidate->node, &key_user_tree);
spin_unlock(&key_user_lock);
user = candidate;
goto out;
/* okay - we found a user record for this UID */
found:
refcount_inc(&user->usage);
spin_unlock(&key_user_lock);
kfree(candidate);
out:
return user;
}
/*
* Dispose of a user structure
*/
void key_user_put(struct key_user *user)
{
if (refcount_dec_and_lock(&user->usage, &key_user_lock)) {
rb_erase(&user->node, &key_user_tree);
spin_unlock(&key_user_lock);
kfree(user);
}
}
/*
* Allocate a serial number for a key. These are assigned randomly to avoid
* security issues through covert channel problems.
*/
static inline void key_alloc_serial(struct key *key)
{
struct rb_node *parent, **p;
struct key *xkey;
/* propose a random serial number and look for a hole for it in the
* serial number tree */
do {
get_random_bytes(&key->serial, sizeof(key->serial));
key->serial >>= 1; /* negative numbers are not permitted */
} while (key->serial < 3);
spin_lock(&key_serial_lock);
attempt_insertion:
parent = NULL;
p = &key_serial_tree.rb_node;
while (*p) {
parent = *p;
xkey = rb_entry(parent, struct key, serial_node);
if (key->serial < xkey->serial)
p = &(*p)->rb_left;
else if (key->serial > xkey->serial)
p = &(*p)->rb_right;
else
goto serial_exists;
}
/* we've found a suitable hole - arrange for this key to occupy it */
rb_link_node(&key->serial_node, parent, p);
rb_insert_color(&key->serial_node, &key_serial_tree);
spin_unlock(&key_serial_lock);
return;
/* we found a key with the proposed serial number - walk the tree from
* that point looking for the next unused serial number */
serial_exists:
for (;;) {
key->serial++;
if (key->serial < 3) {
key->serial = 3;
goto attempt_insertion;
}
parent = rb_next(parent);
if (!parent)
goto attempt_insertion;
xkey = rb_entry(parent, struct key, serial_node);
if (key->serial < xkey->serial)
goto attempt_insertion;
}
}
/**
* key_alloc - Allocate a key of the specified type.
* @type: The type of key to allocate.
* @desc: The key description to allow the key to be searched out.
* @uid: The owner of the new key.
* @gid: The group ID for the new key's group permissions.
* @cred: The credentials specifying UID namespace.
* @perm: The permissions mask of the new key.
* @flags: Flags specifying quota properties.
* @restrict_link: Optional link restriction for new keyrings.
*
* Allocate a key of the specified type with the attributes given. The key is
* returned in an uninstantiated state and the caller needs to instantiate the
* key before returning.
*
* The restrict_link structure (if not NULL) will be freed when the
* keyring is destroyed, so it must be dynamically allocated.
*
* The user's key count quota is updated to reflect the creation of the key and
* the user's key data quota has the default for the key type reserved. The
* instantiation function should amend this as necessary. If insufficient
* quota is available, -EDQUOT will be returned.
*
* The LSM security modules can prevent a key being created, in which case
* -EACCES will be returned.
*
* Returns a pointer to the new key if successful and an error code otherwise.
*
* Note that the caller needs to ensure the key type isn't uninstantiated.
* Internally this can be done by locking key_types_sem. Externally, this can
* be done by either never unregistering the key type, or making sure
* key_alloc() calls don't race with module unloading.
*/
struct key *key_alloc(struct key_type *type, const char *desc,
kuid_t uid, kgid_t gid, const struct cred *cred,
key_perm_t perm, unsigned long flags,
struct key_restriction *restrict_link)
{
struct key_user *user = NULL;
struct key *key;
size_t desclen, quotalen;
int ret;
key = ERR_PTR(-EINVAL);
if (!desc || !*desc)
goto error;
if (type->vet_description) {
ret = type->vet_description(desc);
if (ret < 0) {
key = ERR_PTR(ret);
goto error;
}
}
desclen = strlen(desc);
quotalen = desclen + 1 + type->def_datalen;
/* get hold of the key tracking for this user */
user = key_user_lookup(uid);
if (!user)
goto no_memory_1;
/* check that the user's quota permits allocation of another key and
* its description */
if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
unsigned maxkeys = uid_eq(uid, GLOBAL_ROOT_UID) ?
key_quota_root_maxkeys : key_quota_maxkeys;
unsigned maxbytes = uid_eq(uid, GLOBAL_ROOT_UID) ?
key_quota_root_maxbytes : key_quota_maxbytes;
spin_lock(&user->lock);
if (!(flags & KEY_ALLOC_QUOTA_OVERRUN)) {
if (user->qnkeys + 1 >= maxkeys ||
user->qnbytes + quotalen >= maxbytes ||
user->qnbytes + quotalen < user->qnbytes)
goto no_quota;
}
user->qnkeys++;
user->qnbytes += quotalen;
spin_unlock(&user->lock);
}
/* allocate and initialise the key and its description */
key = kmem_cache_zalloc(key_jar, GFP_KERNEL);
if (!key)
goto no_memory_2;
key->index_key.desc_len = desclen;
key->index_key.description = kmemdup(desc, desclen + 1, GFP_KERNEL);
if (!key->index_key.description)
goto no_memory_3;
refcount_set(&key->usage, 1);
init_rwsem(&key->sem);
lockdep_set_class(&key->sem, &type->lock_class);
key->index_key.type = type;
key->user = user;
key->quotalen = quotalen;
key->datalen = type->def_datalen;
key->uid = uid;
key->gid = gid;
key->perm = perm;
key->restrict_link = restrict_link;
if (!(flags & KEY_ALLOC_NOT_IN_QUOTA))
key->flags |= 1 << KEY_FLAG_IN_QUOTA;
if (flags & KEY_ALLOC_BUILT_IN)
key->flags |= 1 << KEY_FLAG_BUILTIN;
if (flags & KEY_ALLOC_UID_KEYRING)
key->flags |= 1 << KEY_FLAG_UID_KEYRING;
#ifdef KEY_DEBUGGING
key->magic = KEY_DEBUG_MAGIC;
#endif
/* let the security module know about the key */
ret = security_key_alloc(key, cred, flags);
if (ret < 0)
goto security_error;
/* publish the key by giving it a serial number */
atomic_inc(&user->nkeys);
key_alloc_serial(key);
error:
return key;
security_error:
kfree(key->description);
kmem_cache_free(key_jar, key);
if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
spin_lock(&user->lock);
user->qnkeys--;
user->qnbytes -= quotalen;
spin_unlock(&user->lock);
}
key_user_put(user);
key = ERR_PTR(ret);
goto error;
no_memory_3:
kmem_cache_free(key_jar, key);
no_memory_2:
if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
spin_lock(&user->lock);
user->qnkeys--;
user->qnbytes -= quotalen;
spin_unlock(&user->lock);
}
key_user_put(user);
no_memory_1:
key = ERR_PTR(-ENOMEM);
goto error;
no_quota:
spin_unlock(&user->lock);
key_user_put(user);
key = ERR_PTR(-EDQUOT);
goto error;
}
EXPORT_SYMBOL(key_alloc);
/**
* key_payload_reserve - Adjust data quota reservation for the key's payload
* @key: The key to make the reservation for.
* @datalen: The amount of data payload the caller now wants.
*
* Adjust the amount of the owning user's key data quota that a key reserves.
* If the amount is increased, then -EDQUOT may be returned if there isn't
* enough free quota available.
*
* If successful, 0 is returned.
*/
int key_payload_reserve(struct key *key, size_t datalen)
{
int delta = (int)datalen - key->datalen;
int ret = 0;
key_check(key);
/* contemplate the quota adjustment */
if (delta != 0 && test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
unsigned maxbytes = uid_eq(key->user->uid, GLOBAL_ROOT_UID) ?
key_quota_root_maxbytes : key_quota_maxbytes;
spin_lock(&key->user->lock);
if (delta > 0 &&
(key->user->qnbytes + delta >= maxbytes ||
key->user->qnbytes + delta < key->user->qnbytes)) {
ret = -EDQUOT;
}
else {
key->user->qnbytes += delta;
key->quotalen += delta;
}
spin_unlock(&key->user->lock);
}
/* change the recorded data length if that didn't generate an error */
if (ret == 0)
key->datalen = datalen;
return ret;
}
EXPORT_SYMBOL(key_payload_reserve);
/*
* Change the key state to being instantiated.
*/
static void mark_key_instantiated(struct key *key, int reject_error)
{
/* Commit the payload before setting the state; barrier versus
* key_read_state().
*/
smp_store_release(&key->state,
(reject_error < 0) ? reject_error : KEY_IS_POSITIVE);
}
/*
* Instantiate a key and link it into the target keyring atomically. Must be
* called with the target keyring's semaphore writelocked. The target key's
* semaphore need not be locked as instantiation is serialised by
* key_construction_mutex.
*/
static int __key_instantiate_and_link(struct key *key,
struct key_preparsed_payload *prep,
struct key *keyring,
struct key *authkey,
struct assoc_array_edit **_edit)
{
int ret, awaken;
key_check(key);
key_check(keyring);
awaken = 0;
ret = -EBUSY;
mutex_lock(&key_construction_mutex);
/* can't instantiate twice */
if (key->state == KEY_IS_UNINSTANTIATED) {
/* instantiate the key */
ret = key->type->instantiate(key, prep);
if (ret == 0) {
/* mark the key as being instantiated */
atomic_inc(&key->user->nikeys);
mark_key_instantiated(key, 0);
if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
awaken = 1;
/* and link it into the destination keyring */
if (keyring) {
if (test_bit(KEY_FLAG_KEEP, &keyring->flags))
set_bit(KEY_FLAG_KEEP, &key->flags);
__key_link(key, _edit);
}
/* disable the authorisation key */
if (authkey)
key_revoke(authkey);
if (prep->expiry != TIME_T_MAX) {
key->expiry = prep->expiry;
key_schedule_gc(prep->expiry + key_gc_delay);
}
}
}
mutex_unlock(&key_construction_mutex);
/* wake up anyone waiting for a key to be constructed */
if (awaken)
wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT);
return ret;
}
/**
* key_instantiate_and_link - Instantiate a key and link it into the keyring.
* @key: The key to instantiate.
* @data: The data to use to instantiate the keyring.
* @datalen: The length of @data.
* @keyring: Keyring to create a link in on success (or NULL).
* @authkey: The authorisation token permitting instantiation.
*
* Instantiate a key that's in the uninstantiated state using the provided data
* and, if successful, link it in to the destination keyring if one is
* supplied.
*
* If successful, 0 is returned, the authorisation token is revoked and anyone
* waiting for the key is woken up. If the key was already instantiated,
* -EBUSY will be returned.
*/
int key_instantiate_and_link(struct key *key,
const void *data,
size_t datalen,
struct key *keyring,
struct key *authkey)
{
struct key_preparsed_payload prep;
struct assoc_array_edit *edit;
int ret;
memset(&prep, 0, sizeof(prep));
prep.data = data;
prep.datalen = datalen;
prep.quotalen = key->type->def_datalen;
prep.expiry = TIME_T_MAX;
if (key->type->preparse) {
ret = key->type->preparse(&prep);
if (ret < 0)
goto error;
}
if (keyring) {
ret = __key_link_begin(keyring, &key->index_key, &edit);
if (ret < 0)
goto error;
if (keyring->restrict_link && keyring->restrict_link->check) {
struct key_restriction *keyres = keyring->restrict_link;
ret = keyres->check(keyring, key->type, &prep.payload,
keyres->key);
if (ret < 0)
goto error_link_end;
}
}
ret = __key_instantiate_and_link(key, &prep, keyring, authkey, &edit);
error_link_end:
if (keyring)
__key_link_end(keyring, &key->index_key, edit);
error:
if (key->type->preparse)
key->type->free_preparse(&prep);
return ret;
}
EXPORT_SYMBOL(key_instantiate_and_link);
/**
* key_reject_and_link - Negatively instantiate a key and link it into the keyring.
* @key: The key to instantiate.
* @timeout: The timeout on the negative key.
* @error: The error to return when the key is hit.
* @keyring: Keyring to create a link in on success (or NULL).
* @authkey: The authorisation token permitting instantiation.
*
* Negatively instantiate a key that's in the uninstantiated state and, if
* successful, set its timeout and stored error and link it in to the
* destination keyring if one is supplied. The key and any links to the key
* will be automatically garbage collected after the timeout expires.
*
* Negative keys are used to rate limit repeated request_key() calls by causing
* them to return the stored error code (typically ENOKEY) until the negative
* key expires.
*
* If successful, 0 is returned, the authorisation token is revoked and anyone
* waiting for the key is woken up. If the key was already instantiated,
* -EBUSY will be returned.
*/
int key_reject_and_link(struct key *key,
unsigned timeout,
unsigned error,
struct key *keyring,
struct key *authkey)
{
struct assoc_array_edit *edit;
struct timespec now;
int ret, awaken, link_ret = 0;
key_check(key);
key_check(keyring);
awaken = 0;
ret = -EBUSY;
if (keyring) {
if (keyring->restrict_link)
return -EPERM;
link_ret = __key_link_begin(keyring, &key->index_key, &edit);
}
mutex_lock(&key_construction_mutex);
/* can't instantiate twice */
if (key->state == KEY_IS_UNINSTANTIATED) {
/* mark the key as being negatively instantiated */
atomic_inc(&key->user->nikeys);
mark_key_instantiated(key, -error);
now = current_kernel_time();
key->expiry = now.tv_sec + timeout;
key_schedule_gc(key->expiry + key_gc_delay);
if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
awaken = 1;
ret = 0;
/* and link it into the destination keyring */
if (keyring && link_ret == 0)
__key_link(key, &edit);
/* disable the authorisation key */
if (authkey)
key_revoke(authkey);
}
mutex_unlock(&key_construction_mutex);
if (keyring && link_ret == 0)
__key_link_end(keyring, &key->index_key, edit);
/* wake up anyone waiting for a key to be constructed */
if (awaken)
wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT);
return ret == 0 ? link_ret : ret;
}
EXPORT_SYMBOL(key_reject_and_link);
/**
* key_put - Discard a reference to a key.
* @key: The key to discard a reference from.
*
* Discard a reference to a key, and when all the references are gone, we
* schedule the cleanup task to come and pull it out of the tree in process
* context at some later time.
*/
void key_put(struct key *key)
{
if (key) {
key_check(key);
if (refcount_dec_and_test(&key->usage))
schedule_work(&key_gc_work);
}
}
EXPORT_SYMBOL(key_put);
/*
* Find a key by its serial number.
*/
struct key *key_lookup(key_serial_t id)
{
struct rb_node *n;
struct key *key;
spin_lock(&key_serial_lock);
/* search the tree for the specified key */
n = key_serial_tree.rb_node;
while (n) {
key = rb_entry(n, struct key, serial_node);
if (id < key->serial)
n = n->rb_left;
else if (id > key->serial)
n = n->rb_right;
else
goto found;
}
not_found:
key = ERR_PTR(-ENOKEY);
goto error;
found:
/* A key is allowed to be looked up only if someone still owns a
* reference to it - otherwise it's awaiting the gc.
*/
if (!refcount_inc_not_zero(&key->usage))
goto not_found;
error:
spin_unlock(&key_serial_lock);
return key;
}
/*
* Find and lock the specified key type against removal.
*
* We return with the sem read-locked if successful. If the type wasn't
* available -ENOKEY is returned instead.
*/
struct key_type *key_type_lookup(const char *type)
{
struct key_type *ktype;
down_read(&key_types_sem);
/* look up the key type to see if it's one of the registered kernel
* types */
list_for_each_entry(ktype, &key_types_list, link) {
if (strcmp(ktype->name, type) == 0)
goto found_kernel_type;
}
up_read(&key_types_sem);
ktype = ERR_PTR(-ENOKEY);
found_kernel_type:
return ktype;
}
void key_set_timeout(struct key *key, unsigned timeout)
{
struct timespec now;
time_t expiry = 0;
/* make the changes with the locks held to prevent races */
down_write(&key->sem);
if (timeout > 0) {
now = current_kernel_time();
expiry = now.tv_sec + timeout;
}
key->expiry = expiry;
key_schedule_gc(key->expiry + key_gc_delay);
up_write(&key->sem);
}
EXPORT_SYMBOL_GPL(key_set_timeout);
/*
* Unlock a key type locked by key_type_lookup().
*/
void key_type_put(struct key_type *ktype)
{
up_read(&key_types_sem);
}
/*
* Attempt to update an existing key.
*
* The key is given to us with an incremented refcount that we need to discard
* if we get an error.
*/
static inline key_ref_t __key_update(key_ref_t key_ref,
struct key_preparsed_payload *prep)
{
struct key *key = key_ref_to_ptr(key_ref);
int ret;
/* need write permission on the key to update it */
ret = key_permission(key_ref, KEY_NEED_WRITE);
if (ret < 0)
goto error;
ret = -EEXIST;
if (!key->type->update)
goto error;
down_write(&key->sem);
ret = key->type->update(key, prep);
if (ret == 0)
/* Updating a negative key positively instantiates it */
mark_key_instantiated(key, 0);
up_write(&key->sem);
if (ret < 0)
goto error;
out:
return key_ref;
error:
key_put(key);
key_ref = ERR_PTR(ret);
goto out;
}
/**
* key_create_or_update - Update or create and instantiate a key.
* @keyring_ref: A pointer to the destination keyring with possession flag.
* @type: The type of key.
* @description: The searchable description for the key.
* @payload: The data to use to instantiate or update the key.
* @plen: The length of @payload.
* @perm: The permissions mask for a new key.
* @flags: The quota flags for a new key.
*
* Search the destination keyring for a key of the same description and if one
* is found, update it, otherwise create and instantiate a new one and create a
* link to it from that keyring.
*
* If perm is KEY_PERM_UNDEF then an appropriate key permissions mask will be
* concocted.
*
* Returns a pointer to the new key if successful, -ENODEV if the key type
* wasn't available, -ENOTDIR if the keyring wasn't a keyring, -EACCES if the
* caller isn't permitted to modify the keyring or the LSM did not permit
* creation of the key.
*
* On success, the possession flag from the keyring ref will be tacked on to
* the key ref before it is returned.
*/
key_ref_t key_create_or_update(key_ref_t keyring_ref,
const char *type,
const char *description,
const void *payload,
size_t plen,
key_perm_t perm,
unsigned long flags)
{
struct keyring_index_key index_key = {
.description = description,
};
struct key_preparsed_payload prep;
struct assoc_array_edit *edit;
const struct cred *cred = current_cred();
struct key *keyring, *key = NULL;
key_ref_t key_ref;
int ret;
struct key_restriction *restrict_link = NULL;
/* look up the key type to see if it's one of the registered kernel
* types */
index_key.type = key_type_lookup(type);
if (IS_ERR(index_key.type)) {
key_ref = ERR_PTR(-ENODEV);
goto error;
}
key_ref = ERR_PTR(-EINVAL);
if (!index_key.type->instantiate ||
(!index_key.description && !index_key.type->preparse))
goto error_put_type;
keyring = key_ref_to_ptr(keyring_ref);
key_check(keyring);
key_ref = ERR_PTR(-EPERM);
if (!(flags & KEY_ALLOC_BYPASS_RESTRICTION))
restrict_link = keyring->restrict_link;
key_ref = ERR_PTR(-ENOTDIR);
if (keyring->type != &key_type_keyring)
goto error_put_type;
memset(&prep, 0, sizeof(prep));
prep.data = payload;
prep.datalen = plen;
prep.quotalen = index_key.type->def_datalen;
prep.expiry = TIME_T_MAX;
if (index_key.type->preparse) {
ret = index_key.type->preparse(&prep);
if (ret < 0) {
key_ref = ERR_PTR(ret);
goto error_free_prep;
}
if (!index_key.description)
index_key.description = prep.description;
key_ref = ERR_PTR(-EINVAL);
if (!index_key.description)
goto error_free_prep;
}
index_key.desc_len = strlen(index_key.description);
ret = __key_link_begin(keyring, &index_key, &edit);
if (ret < 0) {
key_ref = ERR_PTR(ret);
goto error_free_prep;
}
if (restrict_link && restrict_link->check) {
ret = restrict_link->check(keyring, index_key.type,
&prep.payload, restrict_link->key);
if (ret < 0) {
key_ref = ERR_PTR(ret);
goto error_link_end;
}
}
/* if we're going to allocate a new key, we're going to have
* to modify the keyring */
ret = key_permission(keyring_ref, KEY_NEED_WRITE);
if (ret < 0) {
key_ref = ERR_PTR(ret);
goto error_link_end;
}
/* if it's possible to update this type of key, search for an existing
* key of the same type and description in the destination keyring and
* update that instead if possible
*/
if (index_key.type->update) {
key_ref = find_key_to_update(keyring_ref, &index_key);
if (key_ref)
goto found_matching_key;
}
/* if the client doesn't provide, decide on the permissions we want */
if (perm == KEY_PERM_UNDEF) {
perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR;
perm |= KEY_USR_VIEW;
if (index_key.type->read)
perm |= KEY_POS_READ;
if (index_key.type == &key_type_keyring ||
index_key.type->update)
perm |= KEY_POS_WRITE;
}
/* allocate a new key */
key = key_alloc(index_key.type, index_key.description,
cred->fsuid, cred->fsgid, cred, perm, flags, NULL);
if (IS_ERR(key)) {
key_ref = ERR_CAST(key);
goto error_link_end;
}
/* instantiate it and link it into the target keyring */
ret = __key_instantiate_and_link(key, &prep, keyring, NULL, &edit);
if (ret < 0) {
key_put(key);
key_ref = ERR_PTR(ret);
goto error_link_end;
}
key_ref = make_key_ref(key, is_key_possessed(keyring_ref));
error_link_end:
__key_link_end(keyring, &index_key, edit);
error_free_prep:
if (index_key.type->preparse)
index_key.type->free_preparse(&prep);
error_put_type:
key_type_put(index_key.type);
error:
return key_ref;
found_matching_key:
/* we found a matching key, so we're going to try to update it
* - we can drop the locks first as we have the key pinned
*/
__key_link_end(keyring, &index_key, edit);
key_ref = __key_update(key_ref, &prep);
goto error_free_prep;
}
EXPORT_SYMBOL(key_create_or_update);
/**
* key_update - Update a key's contents.
* @key_ref: The pointer (plus possession flag) to the key.
* @payload: The data to be used to update the key.
* @plen: The length of @payload.
*
* Attempt to update the contents of a key with the given payload data. The
* caller must be granted Write permission on the key. Negative keys can be
* instantiated by this method.
*
* Returns 0 on success, -EACCES if not permitted and -EOPNOTSUPP if the key
* type does not support updating. The key type may return other errors.
*/
int key_update(key_ref_t key_ref, const void *payload, size_t plen)
{
struct key_preparsed_payload prep;
struct key *key = key_ref_to_ptr(key_ref);
int ret;
key_check(key);
/* the key must be writable */
ret = key_permission(key_ref, KEY_NEED_WRITE);
if (ret < 0)
return ret;
/* attempt to update it if supported */
if (!key->type->update)
return -EOPNOTSUPP;
memset(&prep, 0, sizeof(prep));
prep.data = payload;
prep.datalen = plen;
prep.quotalen = key->type->def_datalen;
prep.expiry = TIME_T_MAX;
if (key->type->preparse) {
ret = key->type->preparse(&prep);
if (ret < 0)
goto error;
}
down_write(&key->sem);
ret = key->type->update(key, &prep);
if (ret == 0)
/* Updating a negative key positively instantiates it */
mark_key_instantiated(key, 0);
up_write(&key->sem);
error:
if (key->type->preparse)
key->type->free_preparse(&prep);
return ret;
}
EXPORT_SYMBOL(key_update);
/**
* key_revoke - Revoke a key.
* @key: The key to be revoked.
*
* Mark a key as being revoked and ask the type to free up its resources. The
* revocation timeout is set and the key and all its links will be
* automatically garbage collected after key_gc_delay amount of time if they
* are not manually dealt with first.
*/
void key_revoke(struct key *key)
{
struct timespec now;
time_t time;
key_check(key);
/* make sure no one's trying to change or use the key when we mark it
* - we tell lockdep that we might nest because we might be revoking an
* authorisation key whilst holding the sem on a key we've just
* instantiated
*/
down_write_nested(&key->sem, 1);
if (!test_and_set_bit(KEY_FLAG_REVOKED, &key->flags) &&
key->type->revoke)
key->type->revoke(key);
/* set the death time to no more than the expiry time */
now = current_kernel_time();
time = now.tv_sec;
if (key->revoked_at == 0 || key->revoked_at > time) {
key->revoked_at = time;
key_schedule_gc(key->revoked_at + key_gc_delay);
}
up_write(&key->sem);
}
EXPORT_SYMBOL(key_revoke);
/**
* key_invalidate - Invalidate a key.
* @key: The key to be invalidated.
*
* Mark a key as being invalidated and have it cleaned up immediately. The key
* is ignored by all searches and other operations from this point.
*/
void key_invalidate(struct key *key)
{
kenter("%d", key_serial(key));
key_check(key);
if (!test_bit(KEY_FLAG_INVALIDATED, &key->flags)) {
down_write_nested(&key->sem, 1);
if (!test_and_set_bit(KEY_FLAG_INVALIDATED, &key->flags))
key_schedule_gc_links();
up_write(&key->sem);
}
}
EXPORT_SYMBOL(key_invalidate);
/**
* generic_key_instantiate - Simple instantiation of a key from preparsed data
* @key: The key to be instantiated
* @prep: The preparsed data to load.
*
* Instantiate a key from preparsed data. We assume we can just copy the data
* in directly and clear the old pointers.
*
* This can be pointed to directly by the key type instantiate op pointer.
*/
int generic_key_instantiate(struct key *key, struct key_preparsed_payload *prep)
{
int ret;
pr_devel("==>%s()\n", __func__);
ret = key_payload_reserve(key, prep->quotalen);
if (ret == 0) {
rcu_assign_keypointer(key, prep->payload.data[0]);
key->payload.data[1] = prep->payload.data[1];
key->payload.data[2] = prep->payload.data[2];
key->payload.data[3] = prep->payload.data[3];
prep->payload.data[0] = NULL;
prep->payload.data[1] = NULL;
prep->payload.data[2] = NULL;
prep->payload.data[3] = NULL;
}
pr_devel("<==%s() = %d\n", __func__, ret);
return ret;
}
EXPORT_SYMBOL(generic_key_instantiate);
/**
* register_key_type - Register a type of key.
* @ktype: The new key type.
*
* Register a new key type.
*
* Returns 0 on success or -EEXIST if a type of this name already exists.
*/
int register_key_type(struct key_type *ktype)
{
struct key_type *p;
int ret;
memset(&ktype->lock_class, 0, sizeof(ktype->lock_class));
ret = -EEXIST;
down_write(&key_types_sem);
/* disallow key types with the same name */
list_for_each_entry(p, &key_types_list, link) {
if (strcmp(p->name, ktype->name) == 0)
goto out;
}
/* store the type */
list_add(&ktype->link, &key_types_list);
pr_notice("Key type %s registered\n", ktype->name);
ret = 0;
out:
up_write(&key_types_sem);
return ret;
}
EXPORT_SYMBOL(register_key_type);
/**
* unregister_key_type - Unregister a type of key.
* @ktype: The key type.
*
* Unregister a key type and mark all the extant keys of this type as dead.
* Those keys of this type are then destroyed to get rid of their payloads and
* they and their links will be garbage collected as soon as possible.
*/
void unregister_key_type(struct key_type *ktype)
{
down_write(&key_types_sem);
list_del_init(&ktype->link);
downgrade_write(&key_types_sem);
key_gc_keytype(ktype);
pr_notice("Key type %s unregistered\n", ktype->name);
up_read(&key_types_sem);
}
EXPORT_SYMBOL(unregister_key_type);
/*
* Initialise the key management state.
*/
void __init key_init(void)
{
/* allocate a slab in which we can store keys */
key_jar = kmem_cache_create("key_jar", sizeof(struct key),
0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
/* add the special key types */
list_add_tail(&key_type_keyring.link, &key_types_list);
list_add_tail(&key_type_dead.link, &key_types_list);
list_add_tail(&key_type_user.link, &key_types_list);
list_add_tail(&key_type_logon.link, &key_types_list);
/* record the root user tracking */
rb_link_node(&root_key_user.node,
NULL,
&key_user_tree.rb_node);
rb_insert_color(&root_key_user.node,
&key_user_tree);
}
| ./CrossVul/dataset_final_sorted/CWE-20/c/good_2891_5 |
crossvul-cpp_data_good_5107_0 | /* packet-umts_fp.c
* Routines for UMTS FP disassembly
*
* Martin Mathieson
*
* Wireshark - Network traffic analyzer
* By Gerald Combs <gerald@wireshark.org>
* Copyright 1998 Gerald Combs
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include "config.h"
#include <epan/packet.h>
#include <epan/expert.h>
#include <epan/prefs.h>
#include <epan/uat.h>
#include <epan/conversation.h>
#include <epan/addr_resolv.h>
#include <epan/proto_data.h>
#include <wsutil/crc7.h> /* For FP data header and control frame CRC. */
#include <wsutil/crc16-plain.h> /* For FP Payload CRC. */
#include <wsutil/crc11.h> /* For FP EDCH header CRC. */
#include "packet-umts_fp.h"
#include "packet-nbap.h"
#include "packet-rrc.h"
/* The Frame Protocol (FP) is described in:
* 3GPP TS 25.427 (for dedicated channels)
* 3GPP TS 25.435 (for common/shared channels)
*
* TODO:
* - IUR interface-specific formats
* - do CRC verification before further parsing
* - Set the logical channel properly for non multiplexed, channels
* for channels that doesn't have the C/T flag! This should be based
* on the RRC message RadioBearerSetup.
*/
void proto_register_fp(void);
void proto_reg_handoff_fp(void);
#define UMTS_FP_IPV4 1
#define UMTS_FP_IPV6 2
/* Initialize the protocol and registered fields. */
int proto_fp = -1;
extern int proto_umts_mac;
extern int proto_rlc;
static int hf_fp_release = -1;
static int hf_fp_release_version = -1;
static int hf_fp_release_year = -1;
static int hf_fp_release_month = -1;
static int hf_fp_channel_type = -1;
static int hf_fp_division = -1;
static int hf_fp_direction = -1;
static int hf_fp_ddi_config = -1;
static int hf_fp_ddi_config_ddi = -1;
static int hf_fp_ddi_config_macd_pdu_size = -1;
static int hf_fp_header_crc = -1;
static int hf_fp_ft = -1;
static int hf_fp_cfn = -1;
static int hf_fp_pch_cfn = -1;
static int hf_fp_pch_toa = -1;
static int hf_fp_cfn_control = -1;
static int hf_fp_toa = -1;
static int hf_fp_tfi = -1;
static int hf_fp_usch_tfi = -1;
static int hf_fp_cpch_tfi = -1;
static int hf_fp_propagation_delay = -1;
static int hf_fp_tb = -1;
static int hf_fp_chan_zero_tbs = -1;
static int hf_fp_received_sync_ul_timing_deviation = -1;
static int hf_fp_pch_pi = -1;
static int hf_fp_pch_tfi = -1;
static int hf_fp_fach_tfi = -1;
static int hf_fp_transmit_power_level = -1;
static int hf_fp_paging_indication_bitmap = -1;
static int hf_fp_pdsch_set_id = -1;
static int hf_fp_rx_timing_deviation = -1;
static int hf_fp_dch_e_rucch_flag = -1;
static int hf_fp_dch_control_frame_type = -1;
static int hf_fp_dch_rx_timing_deviation = -1;
static int hf_fp_quality_estimate = -1;
static int hf_fp_payload_crc = -1;
static int hf_fp_edch_header_crc = -1;
static int hf_fp_edch_fsn = -1;
static int hf_fp_edch_subframe = -1;
static int hf_fp_edch_number_of_subframes = -1;
static int hf_fp_edch_harq_retransmissions = -1;
static int hf_fp_edch_subframe_number = -1;
static int hf_fp_edch_number_of_mac_es_pdus = -1;
static int hf_fp_edch_ddi = -1;
static int hf_fp_edch_subframe_header = -1;
static int hf_fp_edch_number_of_mac_d_pdus = -1;
static int hf_fp_edch_pdu_padding = -1;
static int hf_fp_edch_tsn = -1;
static int hf_fp_edch_mac_es_pdu = -1;
static int hf_fp_edch_user_buffer_size = -1;
static int hf_fp_edch_no_macid_sdus = -1;
static int hf_fp_edch_number_of_mac_is_pdus = -1;
static int hf_fp_edch_mac_is_pdu = -1;
static int hf_fp_edch_e_rnti = -1;
static int hf_fp_edch_macis_descriptors = -1;
static int hf_fp_edch_macis_lchid = -1;
static int hf_fp_edch_macis_length = -1;
static int hf_fp_edch_macis_flag = -1;
static int hf_fp_frame_seq_nr = -1;
static int hf_fp_hsdsch_pdu_block_header = -1;
/* static int hf_fp_hsdsch_pdu_block = -1; */
static int hf_fp_flush = -1;
static int hf_fp_fsn_drt_reset = -1;
static int hf_fp_drt_indicator = -1;
static int hf_fp_fach_indicator = -1;
static int hf_fp_total_pdu_blocks = -1;
static int hf_fp_drt = -1;
static int hf_fp_hrnti = -1;
static int hf_fp_rach_measurement_result = -1;
static int hf_fp_lchid = -1;
static int hf_fp_pdu_length_in_block = -1;
static int hf_fp_pdus_in_block = -1;
static int hf_fp_cmch_pi = -1;
static int hf_fp_user_buffer_size = -1;
static int hf_fp_hsdsch_credits = -1;
static int hf_fp_hsdsch_max_macd_pdu_len = -1;
static int hf_fp_hsdsch_max_macdc_pdu_len = -1;
static int hf_fp_hsdsch_interval = -1;
static int hf_fp_hsdsch_calculated_rate = -1;
static int hf_fp_hsdsch_unlimited_rate = -1;
static int hf_fp_hsdsch_repetition_period = -1;
static int hf_fp_hsdsch_data_padding = -1;
static int hf_fp_hsdsch_new_ie_flags = -1;
static int hf_fp_hsdsch_new_ie_flag[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
static int hf_fp_hsdsch_drt = -1;
static int hf_fp_hsdsch_entity = -1;
static int hf_fp_hsdsch_physical_layer_category = -1;
static int hf_fp_timing_advance = -1;
static int hf_fp_num_of_pdu = -1;
static int hf_fp_mac_d_pdu_len = -1;
static int hf_fp_mac_d_pdu = -1;
static int hf_fp_data = -1;
static int hf_fp_crcis = -1;
static int hf_fp_crci[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
static int hf_fp_common_control_frame_type = -1;
static int hf_fp_t1 = -1;
static int hf_fp_t2 = -1;
static int hf_fp_t3 = -1;
static int hf_fp_ul_sir_target = -1;
static int hf_fp_pusch_set_id = -1;
static int hf_fp_activation_cfn = -1;
static int hf_fp_duration = -1;
static int hf_fp_power_offset = -1;
static int hf_fp_code_number = -1;
static int hf_fp_spreading_factor = -1;
static int hf_fp_mc_info = -1;
static int hf_fp_rach_new_ie_flags = -1;
static int hf_fp_rach_new_ie_flag_unused[7] = {-1, -1, -1, -1, -1, -1, -1 };
static int hf_fp_rach_ext_propagation_delay_present = -1;
static int hf_fp_rach_cell_portion_id_present = -1;
static int hf_fp_rach_angle_of_arrival_present = -1;
static int hf_fp_rach_ext_rx_sync_ul_timing_deviation_present = -1;
static int hf_fp_rach_ext_rx_timing_deviation_present = -1;
static int hf_fp_cell_portion_id = -1;
static int hf_fp_ext_propagation_delay = -1;
static int hf_fp_angle_of_arrival = -1;
static int hf_fp_ext_received_sync_ul_timing_deviation = -1;
static int hf_fp_radio_interface_parameter_update_flag[5] = {-1, -1, -1, -1, -1};
static int hf_fp_dpc_mode = -1;
static int hf_fp_tpc_po = -1;
static int hf_fp_multiple_rl_set_indicator = -1;
static int hf_fp_max_ue_tx_pow = -1;
static int hf_fp_congestion_status = -1;
static int hf_fp_e_rucch_present = -1;
static int hf_fp_extended_bits_present = -1;
static int hf_fp_extended_bits = -1;
static int hf_fp_spare_extension = -1;
static int hf_fp_ul_setup_frame = -1;
static int hf_fp_dl_setup_frame = -1;
/* Subtrees. */
static int ett_fp = -1;
static int ett_fp_release = -1;
static int ett_fp_data = -1;
static int ett_fp_crcis = -1;
static int ett_fp_ddi_config = -1;
static int ett_fp_edch_subframe_header = -1;
static int ett_fp_edch_subframe = -1;
static int ett_fp_edch_maces = -1;
static int ett_fp_edch_macis_descriptors = -1;
static int ett_fp_hsdsch_new_ie_flags = -1;
static int ett_fp_rach_new_ie_flags = -1;
static int ett_fp_hsdsch_pdu_block_header = -1;
static expert_field ei_fp_hsdsch_common_experimental_support = EI_INIT;
static expert_field ei_fp_hsdsch_common_t3_not_implemented = EI_INIT;
static expert_field ei_fp_channel_type_unknown = EI_INIT;
static expert_field ei_fp_ddi_not_defined = EI_INIT;
static expert_field ei_fp_stop_hsdpa_transmission = EI_INIT;
static expert_field ei_fp_hsdsch_entity_not_specified = EI_INIT;
static expert_field ei_fp_expecting_tdd = EI_INIT;
static expert_field ei_fp_bad_payload_checksum = EI_INIT;
static expert_field ei_fp_e_rnti_t2_edch_frames = EI_INIT;
static expert_field ei_fp_crci_no_subdissector = EI_INIT;
static expert_field ei_fp_timing_adjustmentment_reported = EI_INIT;
static expert_field ei_fp_mac_is_sdus_miscount = EI_INIT;
static expert_field ei_fp_maybe_srb = EI_INIT;
static expert_field ei_fp_transport_channel_type_unknown = EI_INIT;
static expert_field ei_fp_unable_to_locate_ddi_entry = EI_INIT;
static expert_field ei_fp_e_rnti_first_entry = EI_INIT;
static expert_field ei_fp_bad_header_checksum = EI_INIT;
static expert_field ei_fp_crci_error_bit_set_for_tb = EI_INIT;
static expert_field ei_fp_spare_extension = EI_INIT;
static expert_field ei_fp_no_per_frame_info = EI_INIT;
static dissector_handle_t rlc_bcch_handle;
static dissector_handle_t mac_fdd_dch_handle;
static dissector_handle_t mac_fdd_rach_handle;
static dissector_handle_t mac_fdd_fach_handle;
static dissector_handle_t mac_fdd_pch_handle;
static dissector_handle_t mac_fdd_edch_handle;
static dissector_handle_t mac_fdd_edch_type2_handle;
static dissector_handle_t mac_fdd_hsdsch_handle;
static dissector_handle_t fp_handle;
static proto_tree *top_level_tree = NULL;
/* Variables used for preferences */
static gboolean preferences_call_mac_dissectors = TRUE;
static gboolean preferences_show_release_info = TRUE;
static gboolean preferences_payload_checksum = TRUE;
static gboolean preferences_header_checksum = TRUE;
#define UMTS_FP_USE_UAT 1
#ifdef UMTS_FP_USE_UAT
/* UAT entry structure. */
typedef struct {
guint8 protocol;
gchar *srcIP;
guint16 src_port;
gchar *dstIP;
guint16 dst_port;
guint8 interface_type;
guint8 division;
guint8 rlc_mode;
guint8 channel_type;
} uat_umts_fp_ep_and_ch_record_t;
static uat_umts_fp_ep_and_ch_record_t *uat_umts_fp_ep_and_ch_records = NULL;
static uat_t *umts_fp_uat = NULL;
static guint num_umts_fp_ep_and_ch_items = 0;
#endif /* UMTS_FP_USE_UAT */
/* E-DCH (T1) channel header information */
struct edch_t1_subframe_info
{
guint8 subframe_number;
guint8 number_of_mac_es_pdus;
guint8 ddi[64];
guint16 number_of_mac_d_pdus[64];
};
/* E-DCH (T2) channel header information */
struct edch_t2_subframe_info
{
guint8 subframe_number;
guint8 number_of_mac_is_pdus;
guint8 number_of_mac_is_sdus[16];
guint8 mac_is_lchid[16][16];
guint16 mac_is_length[16][16];
};
static const value_string channel_type_vals[] =
{
{ CHANNEL_RACH_FDD, "RACH_FDD" },
{ CHANNEL_RACH_TDD, "RACH_TDD" },
{ CHANNEL_FACH_FDD, "FACH_FDD" },
{ CHANNEL_FACH_TDD, "FACH_TDD" },
{ CHANNEL_DSCH_FDD, "DSCH_FDD" },
{ CHANNEL_DSCH_TDD, "DSCH_TDD" },
{ CHANNEL_USCH_TDD_384, "USCH_TDD_384" },
{ CHANNEL_USCH_TDD_128, "USCH_TDD_128" },
{ CHANNEL_PCH, "PCH" },
{ CHANNEL_CPCH, "CPCH" },
{ CHANNEL_BCH, "BCH" },
{ CHANNEL_DCH, "DCH" },
{ CHANNEL_HSDSCH, "HSDSCH" },
{ CHANNEL_IUR_CPCHF, "IUR CPCHF" },
{ CHANNEL_IUR_FACH, "IUR FACH" },
{ CHANNEL_IUR_DSCH, "IUR DSCH" },
{ CHANNEL_EDCH, "EDCH" },
{ CHANNEL_RACH_TDD_128, "RACH_TDD_128" },
{ CHANNEL_HSDSCH_COMMON, "HSDSCH-COMMON" },
{ CHANNEL_HSDSCH_COMMON_T3, "HSDSCH-COMMON-T3" },
{ CHANNEL_EDCH_COMMON, "EDCH-COMMON"},
{ 0, NULL }
};
static const value_string division_vals[] =
{
{ Division_FDD, "FDD"},
{ Division_TDD_384, "TDD-384"},
{ Division_TDD_128, "TDD-128"},
{ Division_TDD_768, "TDD-768"},
{ 0, NULL }
};
static const value_string data_control_vals[] = {
{ 0, "Data" },
{ 1, "Control" },
{ 0, NULL }
};
static const value_string direction_vals[] = {
{ 0, "Downlink" },
{ 1, "Uplink" },
{ 0, NULL }
};
static const value_string crci_vals[] = {
{ 0, "Correct" },
{ 1, "Not correct" },
{ 0, NULL }
};
static const value_string paging_indication_vals[] = {
{ 0, "no PI-bitmap in payload" },
{ 1, "PI-bitmap in payload" },
{ 0, NULL }
};
static const value_string spreading_factor_vals[] = {
{ 0, "4"},
{ 1, "8"},
{ 2, "16"},
{ 3, "32"},
{ 4, "64"},
{ 5, "128"},
{ 6, "256"},
{ 0, NULL }
};
static const value_string congestion_status_vals[] = {
{ 0, "No TNL congestion"},
{ 1, "Reserved for future use"},
{ 2, "TNL congestion - detected by delay build-up"},
{ 3, "TNL congestion - detected by frame loss"},
{ 0, NULL }
};
static const value_string e_rucch_flag_vals[] = {
{ 0, "Conventional E-RUCCH reception" },
{ 1, "TA Request reception" },
{ 0, NULL }
};
static const value_string hsdshc_mac_entity_vals[] = {
{ entity_not_specified, "Unspecified (assume MAC-hs)" },
{ hs, "MAC-hs" },
{ ehs, "MAC-ehs" },
{ 0, NULL }
};
/* TODO: add and use */
#if 0
static const value_string segmentation_status_vals[] = {
{ 0, "" },
{ 1, "" },
{ 2, "" },
{ 3, "" },
{ 0, NULL }
};
#endif
static const value_string lchid_vals[] = {
{ 0, "Logical Channel 1" },
{ 1, "Logical Channel 2" },
{ 2, "Logical Channel 3" },
{ 3, "Logical Channel 4" },
{ 4, "Logical Channel 5" },
{ 5, "Logical Channel 6" },
{ 6, "Logical Channel 7" },
{ 7, "Logical Channel 8" },
{ 8, "Logical Channel 9" },
{ 9, "Logical Channel 10" },
{ 10, "Logical Channel 11" },
{ 11, "Logical Channel 12" },
{ 12, "Logical Channel 13" },
{ 13, "Logical Channel 14" },
{ 14, "CCCH (SRB0)" },
{ 15, "E-RNTI being included (FDD only)" },
{ 0, NULL }
};
/* Dedicated control types */
#define DCH_OUTER_LOOP_POWER_CONTROL 1
#define DCH_TIMING_ADJUSTMENT 2
#define DCH_DL_SYNCHRONISATION 3
#define DCH_UL_SYNCHRONISATION 4
#define DCH_DL_NODE_SYNCHRONISATION 6
#define DCH_UL_NODE_SYNCHRONISATION 7
#define DCH_RX_TIMING_DEVIATION 8
#define DCH_RADIO_INTERFACE_PARAMETER_UPDATE 9
#define DCH_TIMING_ADVANCE 10
#define DCH_TNL_CONGESTION_INDICATION 11
static const value_string dch_control_frame_type_vals[] = {
{ DCH_OUTER_LOOP_POWER_CONTROL, "OUTER LOOP POWER CONTROL" },
{ DCH_TIMING_ADJUSTMENT, "TIMING ADJUSTMENT" },
{ DCH_DL_SYNCHRONISATION, "DL SYNCHRONISATION" },
{ DCH_UL_SYNCHRONISATION, "UL SYNCHRONISATION" },
{ 5, "Reserved Value" },
{ DCH_DL_NODE_SYNCHRONISATION, "DL NODE SYNCHRONISATION" },
{ DCH_UL_NODE_SYNCHRONISATION, "UL NODE SYNCHRONISATION" },
{ DCH_RX_TIMING_DEVIATION, "RX TIMING DEVIATION" },
{ DCH_RADIO_INTERFACE_PARAMETER_UPDATE, "RADIO INTERFACE PARAMETER UPDATE" },
{ DCH_TIMING_ADVANCE, "TIMING ADVANCE" },
{ DCH_TNL_CONGESTION_INDICATION, "TNL CONGESTION INDICATION" },
{ 0, NULL }
};
/* Common channel control types */
#define COMMON_OUTER_LOOP_POWER_CONTROL 1
#define COMMON_TIMING_ADJUSTMENT 2
#define COMMON_DL_SYNCHRONISATION 3
#define COMMON_UL_SYNCHRONISATION 4
#define COMMON_DL_NODE_SYNCHRONISATION 6
#define COMMON_UL_NODE_SYNCHRONISATION 7
#define COMMON_DYNAMIC_PUSCH_ASSIGNMENT 8
#define COMMON_TIMING_ADVANCE 9
#define COMMON_HS_DSCH_Capacity_Request 10
#define COMMON_HS_DSCH_Capacity_Allocation 11
#define COMMON_HS_DSCH_Capacity_Allocation_Type_2 12
static const value_string common_control_frame_type_vals[] = {
{ COMMON_OUTER_LOOP_POWER_CONTROL, "OUTER LOOP POWER CONTROL" },
{ COMMON_TIMING_ADJUSTMENT, "TIMING ADJUSTMENT" },
{ COMMON_DL_SYNCHRONISATION, "DL SYNCHRONISATION" },
{ COMMON_UL_SYNCHRONISATION, "UL SYNCHRONISATION" },
{ 5, "Reserved Value" },
{ COMMON_DL_NODE_SYNCHRONISATION, "DL NODE SYNCHRONISATION" },
{ COMMON_UL_NODE_SYNCHRONISATION, "UL NODE SYNCHRONISATION" },
{ COMMON_DYNAMIC_PUSCH_ASSIGNMENT, "DYNAMIC PUSCH ASSIGNMENT" },
{ COMMON_TIMING_ADVANCE, "TIMING ADVANCE" },
{ COMMON_HS_DSCH_Capacity_Request, "HS-DSCH Capacity Request" },
{ COMMON_HS_DSCH_Capacity_Allocation, "HS-DSCH Capacity Allocation" },
{ COMMON_HS_DSCH_Capacity_Allocation_Type_2, "HS-DSCH Capacity Allocation Type 2" },
{ 0, NULL }
};
/* 0 to 7*/
static const guint8 hsdsch_macdflow_id_rlc_map[] = {
RLC_UM, /*0 SRB */
RLC_AM, /*1 Interactive PS*/
RLC_AM, /*2 Interatcive PS*/
RLC_UNKNOWN_MODE, /*3 ???*/
RLC_AM, /*4 Streaming PS*/
RLC_UNKNOWN_MODE,
RLC_UNKNOWN_MODE,
RLC_UNKNOWN_MODE
};
/* Mapping hsdsch MACd-FlowId to MAC_CONTENT, basically flowid = 1 (0) => SRB*/
/* 1 to 8*/
static const guint8 hsdsch_macdflow_id_mac_content_map[] = {
MAC_CONTENT_DCCH, /*1 SRB */
MAC_CONTENT_PS_DTCH, /*2 Interactive PS*/
MAC_CONTENT_PS_DTCH, /*3 Interatcive PS*/
RLC_UNKNOWN_MODE, /*4 ???*/
MAC_CONTENT_PS_DTCH, /*5 Streaming PS*/
RLC_UNKNOWN_MODE,
RLC_UNKNOWN_MODE,
RLC_UNKNOWN_MODE
};
/* Make fake logical channel id's based on MACdFlow-ID's,
* XXXX Bug 12121 expanded the number of entries to 8(+2),
* not at all sure what the proper value should be 0xfF?
*/
static const guint8 fake_lchid_macd_flow[] = {1,9,14,11,0,12,0,0};
/* Dissect message parts */
static int dissect_tb_data(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree,
int offset, struct fp_info *p_fp_info,
dissector_handle_t *data_handle,
void *data);
static int dissect_macd_pdu_data(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree,
int offset, guint16 length, guint16 number_of_pdus, struct fp_info *p_fp_info,
void *data);
static int dissect_macd_pdu_data_type_2(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree,
int offset, guint16 length, guint16 number_of_pdus, struct fp_info * fpi,
void *data);
static int dissect_crci_bits(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree,
fp_info *p_fp_info, int offset);
static void dissect_spare_extension_and_crc(tvbuff_t *tvb, packet_info *pinfo,
proto_tree *tree, guint8 dch_crc_present,
int offset, guint header_length);
/* Dissect common control messages */
static int dissect_common_outer_loop_power_control(packet_info *pinfo, proto_tree *tree, tvbuff_t *tvb,
int offset, struct fp_info *p_fp_info);
static int dissect_common_timing_adjustment(packet_info *pinfo, proto_tree *tree, tvbuff_t *tvb,
int offset, struct fp_info *p_fp_info);
static int dissect_common_dl_node_synchronisation(packet_info *pinfo, proto_tree *tree,
tvbuff_t *tvb, int offset);
static int dissect_common_ul_node_synchronisation(packet_info *pinfo, proto_tree *tree,
tvbuff_t *tvb, int offset);
static int dissect_common_dl_synchronisation(packet_info *pinfo, proto_tree *tree,
tvbuff_t *tvb, int offset,
struct fp_info *p_fp_info);
static int dissect_common_ul_synchronisation(packet_info *pinfo, proto_tree *tree,
tvbuff_t *tvb, int offset,
struct fp_info *p_fp_info);
static int dissect_common_timing_advance(packet_info *pinfo, proto_tree *tree,
tvbuff_t *tvb, int offset);
static int dissect_hsdpa_capacity_request(packet_info *pinfo, proto_tree *tree,
tvbuff_t *tvb, int offset);
static int dissect_hsdpa_capacity_allocation(packet_info *pinfo, proto_tree *tree,
tvbuff_t *tvb, int offset,
struct fp_info *p_fp_info);
static int dissect_hsdpa_capacity_allocation_type_2(packet_info *pinfo, proto_tree *tree,
tvbuff_t *tvb, int offset);
static void dissect_common_control(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree,
int offset, struct fp_info *p_fp_info);
static int dissect_common_dynamic_pusch_assignment(packet_info *pinfo, proto_tree *tree,
tvbuff_t *tvb, int offset);
/* Dissect common channel types */
static void dissect_rach_channel_info(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree,
int offset, struct fp_info *p_fp_info,
void *data);
static void dissect_fach_channel_info(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree,
int offset, struct fp_info *p_fp_info,
void *data);
static void dissect_dsch_channel_info(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree,
int offset, struct fp_info *p_fp_info);
static void dissect_usch_channel_info(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree,
int offset, struct fp_info *p_fp_info);
static void dissect_pch_channel_info(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree,
int offset, struct fp_info *p_fp_info,
void *data);
static void dissect_cpch_channel_info(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree,
int offset, struct fp_info *p_fp_info);
static void dissect_bch_channel_info(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree,
int offset, struct fp_info *p_fp_info);
static void dissect_iur_dsch_channel_info(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree,
int offset, struct fp_info *p_fp_info);
static void dissect_hsdsch_channel_info(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree,
int offset, struct fp_info *p_fp_info,
void *data);
static void dissect_hsdsch_type_2_channel_info(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree,
int offset, struct fp_info *p_fp_info,
void *data);
static void dissect_hsdsch_common_channel_info(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree,
int offset,
struct fp_info *p_fp_info,
void *data);
/* Dissect DCH control messages */
static int dissect_dch_timing_adjustment(proto_tree *tree, packet_info *pinfo,
tvbuff_t *tvb, int offset);
static int dissect_dch_rx_timing_deviation(packet_info *pinfo, proto_tree *tree, tvbuff_t *tvb, int offset,
struct fp_info *p_fp_info);
static int dissect_dch_dl_synchronisation(proto_tree *tree, packet_info *pinfo,
tvbuff_t *tvb, int offset);
static int dissect_dch_ul_synchronisation(proto_tree *tree, packet_info *pinfo,
tvbuff_t *tvb, int offset);
static int dissect_dch_outer_loop_power_control(proto_tree *tree, packet_info *pinfo,
tvbuff_t *tvb, int offset);
static int dissect_dch_dl_node_synchronisation(proto_tree *tree, packet_info *pinfo,
tvbuff_t *tvb, int offset);
static int dissect_dch_ul_node_synchronisation(proto_tree *tree, packet_info *pinfo,
tvbuff_t *tvb, int offset);
static int dissect_dch_radio_interface_parameter_update(proto_tree *tree, packet_info *pinfo,
tvbuff_t *tvb, int offset);
static int dissect_dch_timing_advance(proto_tree *tree, packet_info *pinfo,
tvbuff_t *tvb, int offset, struct fp_info *p_fp_info);
static int dissect_dch_tnl_congestion_indication(proto_tree *tree, packet_info *pinfo,
tvbuff_t *tvb, int offset);
static void dissect_dch_control_frame(proto_tree *tree, packet_info *pinfo, tvbuff_t *tvb,
int offset, struct fp_info *p_fp_info);
/* Dissect a DCH channel */
static void dissect_dch_channel_info(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree,
int offset, struct fp_info *p_fp_info,
void *data);
/* Dissect dedicated channels */
static void dissect_e_dch_channel_info(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree,
int offset, struct fp_info *p_fp_info,
gboolean is_common, rlc_info *rlcinf,
void *data);
static void dissect_e_dch_t2_or_common_channel_info(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree,
int offset, struct fp_info *p_fp_info,
int number_of_subframes,
gboolean is_common,
guint16 header_crc,
proto_item * header_crc_pi,
void *data);
/* Main dissection function */
static int dissect_fp(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, void* data);
/*
* CRNC sends data downlink on uplink parameters.
*/
void
set_umts_fp_conv_data(conversation_t *conversation, umts_fp_conversation_info_t *umts_fp_conversation_info)
{
if (conversation == NULL) {
return;
}
conversation_add_proto_data(conversation, proto_fp, umts_fp_conversation_info);
}
static int
get_tb_count(struct fp_info *p_fp_info)
{
int chan, tb_count = 0;
for (chan = 0; chan < p_fp_info->num_chans; chan++) {
tb_count += p_fp_info->chan_num_tbs[chan];
}
return tb_count;
}
static gboolean verify_control_frame_crc(tvbuff_t * tvb, packet_info * pinfo, proto_item * pi, guint16 frame_crc)
{
guint8 crc = 0;
guint8 * data = NULL;
/* Get data. */
data = (guint8 *)tvb_memdup(wmem_packet_scope(), tvb, 0, tvb_reported_length(tvb));
/* Include only FT flag bit in CRC calculation. */
data[0] = data[0] & 1;
/* Calculate crc7 sum. */
crc = crc7update(0, data, tvb_reported_length(tvb));
crc = crc7finalize(crc); /* finalize crc */
if (frame_crc == crc) {
proto_item_append_text(pi, " [correct]");
return TRUE;
} else {
proto_item_append_text(pi, " [incorrect, should be 0x%x]", crc);
expert_add_info(pinfo, pi, &ei_fp_bad_header_checksum);
return FALSE;
}
}
static gboolean verify_header_crc(tvbuff_t * tvb, packet_info * pinfo, proto_item * pi, guint16 header_crc, guint header_length)
{
guint8 crc = 0;
guint8 * data = NULL;
/* Get data of header with first byte removed. */
data = (guint8 *)tvb_memdup(wmem_packet_scope(), tvb, 1, header_length-1);
/* Calculate crc7 sum. */
crc = crc7update(0, data, header_length-1);
crc = crc7finalize(crc); /* finalize crc */
if (header_crc == crc) {
proto_item_append_text(pi, " [correct]");
return TRUE;
} else {
proto_item_append_text(pi, " [incorrect, should be 0x%x]", crc);
expert_add_info(pinfo, pi, &ei_fp_bad_header_checksum);
return FALSE;
}
}
static gboolean verify_header_crc_edch(tvbuff_t * tvb, packet_info * pinfo, proto_item * pi, guint16 header_crc, guint header_length)
{
guint16 crc = 0;
guint8 * data = NULL;
/* First create new subset of header with first byte removed. */
tvbuff_t * headtvb = tvb_new_subset_length(tvb, 1, header_length-1);
/* Get data of header with first byte removed. */
data = (guint8 *)tvb_memdup(wmem_packet_scope(), headtvb, 0, header_length-1);
/* Remove first 4 bits of the remaining data which are Header CRC cont. */
data[0] = data[0] & 0x0f;
crc = crc11_307_noreflect_noxor(data, header_length-1);
if (header_crc == crc) {
proto_item_append_text(pi, " [correct]");
return TRUE;
} else {
proto_item_append_text(pi, " [incorrect, should be 0x%x]", crc);
expert_add_info(pinfo, pi, &ei_fp_bad_header_checksum);
return FALSE;
}
}
/* Dissect the TBs of a UL data frame*/
static int
dissect_tb_data(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree,
int offset, struct fp_info *p_fp_info,
dissector_handle_t *data_handle, void *data)
{
int chan, num_tbs = 0;
int bit_offset = 0;
int crci_bit_offset = (offset+1)<<3; /* Current offset + Quality estimate of 1 byte at the end*/
guint data_bits = 0;
guint8 crci_bit = 0;
proto_item *tree_ti = NULL;
proto_tree *data_tree = NULL;
gboolean dissected = FALSE;
if (tree) {
/* Add data subtree */
tree_ti = proto_tree_add_item(tree, hf_fp_data, tvb, offset, -1, ENC_NA);
proto_item_set_text(tree_ti, "TB data for %u chans", p_fp_info->num_chans);
data_tree = proto_item_add_subtree(tree_ti, ett_fp_data);
}
/* Calculate offset to CRCI bits */
if (p_fp_info->is_uplink) {
for (chan=0; chan < p_fp_info->num_chans; chan++) {
int n;
for (n=0; n < p_fp_info->chan_num_tbs[chan]; n++) {
/* Advance bit offset */
crci_bit_offset += p_fp_info->chan_tf_size[chan];
/* Pad out to next byte */
if (crci_bit_offset % 8) {
crci_bit_offset += (8 - (crci_bit_offset % 8));
}
}
}
}
/* Now for the TB data */
for (chan=0; chan < p_fp_info->num_chans; chan++) {
int n;
p_fp_info->cur_chan = chan; /*Set current channel?*/
/* Clearly show channels with no TBs */
if (p_fp_info->chan_num_tbs[chan] == 0) {
proto_item *no_tb_ti = proto_tree_add_uint(data_tree, hf_fp_chan_zero_tbs, tvb,
offset+(bit_offset/8),
0, chan+1);
proto_item_append_text(no_tb_ti, " (of size %d)",
p_fp_info->chan_tf_size[chan]);
PROTO_ITEM_SET_GENERATED(no_tb_ti);
}
/* Show TBs from non-empty channels */
pinfo->fd->subnum = chan; /* set subframe number to current TB */
for (n=0; n < p_fp_info->chan_num_tbs[chan]; n++) {
proto_item *ti;
p_fp_info->cur_tb = chan; /*Set current transport block?*/
if (data_tree) {
ti = proto_tree_add_item(data_tree, hf_fp_tb, tvb,
offset + (bit_offset/8),
((bit_offset % 8) + p_fp_info->chan_tf_size[chan] + 7) / 8,
ENC_NA);
proto_item_set_text(ti, "TB (chan %u, tb %u, %u bits)",
chan+1, n+1, p_fp_info->chan_tf_size[chan]);
}
if (preferences_call_mac_dissectors /*&& !rlc_is_ciphered(pinfo)*/ && data_handle &&
(p_fp_info->chan_tf_size[chan] > 0)) {
tvbuff_t *next_tvb;
proto_item *item;
/* If this is DL we should not care about crci bits (since they don't exists)*/
if (p_fp_info->is_uplink) {
if ( p_fp_info->channel == CHANNEL_RACH_FDD) { /*In RACH we don't have any QE field, hence go back 8 bits.*/
crci_bit = tvb_get_bits8(tvb, crci_bit_offset+n-8, 1);
item = proto_tree_add_item(data_tree, hf_fp_crci[n%8], tvb, (crci_bit_offset+n-8)/8, 1, ENC_BIG_ENDIAN);
PROTO_ITEM_SET_GENERATED(item);
} else {
crci_bit = tvb_get_bits8(tvb, crci_bit_offset+n, 1);
item = proto_tree_add_item(data_tree, hf_fp_crci[n%8], tvb, (crci_bit_offset+n)/8, 1, ENC_BIG_ENDIAN);
PROTO_ITEM_SET_GENERATED(item);
}
}
if (crci_bit == 0 || !p_fp_info->is_uplink) {
next_tvb = tvb_new_subset(tvb, offset + bit_offset/8,
((bit_offset % 8) + p_fp_info->chan_tf_size[chan] + 7) / 8, -1);
/****************/
/* TODO: maybe this decision can be based only on info available in fp_info */
call_dissector_with_data(*data_handle, next_tvb, pinfo, top_level_tree, data);
dissected = TRUE;
} else {
proto_tree_add_expert(tree, pinfo, &ei_fp_crci_no_subdissector, tvb, offset + bit_offset/8,
((bit_offset % 8) + p_fp_info->chan_tf_size[chan] + 7) / 8);
}
}
num_tbs++;
/* Advance bit offset */
bit_offset += p_fp_info->chan_tf_size[chan];
data_bits += p_fp_info->chan_tf_size[chan];
/* Pad out to next byte */
if (bit_offset % 8) {
bit_offset += (8 - (bit_offset % 8));
}
}
}
if (dissected == FALSE) {
col_append_fstr(pinfo->cinfo, COL_INFO, "(%u bits in %u tbs)",
data_bits, num_tbs);
}
/* Data tree should cover entire length */
if (data_tree) {
proto_item_set_len(tree_ti, bit_offset/8);
proto_item_append_text(tree_ti, " (%u bits in %u tbs)", data_bits, num_tbs);
}
/* Move offset past TBs (we know it's already padded out to next byte) */
offset += (bit_offset / 8);
return offset;
}
/* Dissect the MAC-d PDUs of an HS-DSCH (type 1) frame.
Length is in bits, and payload is offset by 4 bits of padding */
static int
dissect_macd_pdu_data(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree,
int offset, guint16 length, guint16 number_of_pdus,
struct fp_info *p_fp_info, void *data)
{
int pdu;
int bit_offset = 0;
proto_item *pdus_ti = NULL;
proto_tree *data_tree = NULL;
gboolean dissected = FALSE;
/* Add data subtree */
if (tree) {
pdus_ti = proto_tree_add_item(tree, hf_fp_data, tvb, offset, -1, ENC_NA);
proto_item_set_text(pdus_ti, "%u MAC-d PDUs of %u bits", number_of_pdus, length);
data_tree = proto_item_add_subtree(pdus_ti, ett_fp_data);
}
/* Now for the PDUs */
for (pdu=0; pdu < number_of_pdus; pdu++) {
proto_item *pdu_ti;
if (data_tree) {
/* Show 4 bits padding at start of PDU */
proto_tree_add_item(data_tree, hf_fp_hsdsch_data_padding, tvb, offset+(bit_offset/8), 1, ENC_BIG_ENDIAN);
}
bit_offset += 4;
/* Data bytes! */
if (data_tree) {
pinfo->fd->subnum = pdu; /* set subframe number to current TB */
p_fp_info->cur_tb = pdu; /*Set TB (PDU) index correctly*/
pdu_ti = proto_tree_add_item(data_tree, hf_fp_mac_d_pdu, tvb,
offset + (bit_offset/8),
((bit_offset % 8) + length + 7) / 8,
ENC_NA);
proto_item_set_text(pdu_ti, "MAC-d PDU (PDU %u)", pdu+1);
}
if (preferences_call_mac_dissectors /*&& !rlc_is_ciphered(pinfo)*/) {
tvbuff_t *next_tvb;
next_tvb = tvb_new_subset(tvb, offset + bit_offset/8,
((bit_offset % 8) + length + 7)/8, -1);
call_dissector_with_data(mac_fdd_hsdsch_handle, next_tvb, pinfo, top_level_tree, data);
dissected = TRUE;
}
/* Advance bit offset */
bit_offset += length;
/* Pad out to next byte */
if (bit_offset % 8) {
bit_offset += (8 - (bit_offset % 8));
}
}
/* Data tree should cover entire length */
proto_item_set_len(pdus_ti, bit_offset/8);
/* Move offset past PDUs (we know it's already padded out to next byte) */
offset += (bit_offset / 8);
/* Show summary in info column */
if (dissected == FALSE) {
col_append_fstr(pinfo->cinfo, COL_INFO, " %u PDUs of %u bits",
number_of_pdus, length);
}
return offset;
}
/* Dissect the MAC-d PDUs of an HS-DSCH (type 2) frame.
Length is in bytes, and payload is byte-aligned (no padding) */
static int
dissect_macd_pdu_data_type_2(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree,
int offset, guint16 length, guint16 number_of_pdus,
struct fp_info *fpi, void *data)
{
int pdu;
proto_item *pdus_ti = NULL;
proto_tree *data_tree = NULL;
int first_offset = offset;
gboolean dissected = FALSE;
/* Add data subtree */
if (tree) {
pdus_ti = proto_tree_add_item(tree, hf_fp_data, tvb, offset, -1, ENC_NA);
proto_item_set_text(pdus_ti, "%u MAC-d PDUs of %u bytes", number_of_pdus, length);
data_tree = proto_item_add_subtree(pdus_ti, ett_fp_data);
}
/* Now for the PDUs */
for (pdu=0; pdu < number_of_pdus; pdu++) {
proto_item *pdu_ti;
/* Data bytes! */
if (data_tree) {
pdu_ti = proto_tree_add_item(data_tree, hf_fp_mac_d_pdu, tvb,
offset, length, ENC_NA);
proto_item_set_text(pdu_ti, "MAC-d PDU (PDU %u)", pdu+1);
}
if (preferences_call_mac_dissectors /*&& !rlc_is_ciphered(pinfo)*/) {
tvbuff_t *next_tvb = tvb_new_subset(tvb, offset, length, -1);
fpi->cur_tb = pdu; /*Set proper pdu index for MAC and higher layers*/
call_dissector_with_data(mac_fdd_hsdsch_handle, next_tvb, pinfo, top_level_tree, data);
dissected = TRUE;
}
/* Advance offset */
offset += length;
}
/* Data tree should cover entire length */
proto_item_set_len(pdus_ti, offset-first_offset);
/* Show summary in info column */
if (!dissected) {
col_append_fstr(pinfo->cinfo, COL_INFO, " %u PDUs of %u bits",
number_of_pdus, length*8);
}
return offset;
}
/* Dissect CRCI bits (uplink) */
static int
dissect_crci_bits(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree,
fp_info *p_fp_info, int offset)
{
int n, num_tbs;
proto_item *ti = NULL;
proto_tree *crcis_tree = NULL;
guint errors = 0;
num_tbs = get_tb_count(p_fp_info);
/* Add CRCIs subtree */
if (tree) {
ti = proto_tree_add_item(tree, hf_fp_crcis, tvb, offset, (num_tbs+7)/8, ENC_NA);
proto_item_set_text(ti, "CRCI bits for %u tbs", num_tbs);
crcis_tree = proto_item_add_subtree(ti, ett_fp_crcis);
}
/* CRCIs */
for (n=0; n < num_tbs; n++) {
int bit = (tvb_get_guint8(tvb, offset+(n/8)) >> (7-(n%8))) & 0x01;
proto_tree_add_item(crcis_tree, hf_fp_crci[n%8], tvb, offset+(n/8),
1, ENC_BIG_ENDIAN);
if (bit == 1) {
errors++;
expert_add_info(pinfo, ti, &ei_fp_crci_error_bit_set_for_tb);
}
}
if (tree) {
/* Highlight range of bytes covered by indicator bits */
proto_item_set_len(ti, (num_tbs+7) / 8);
/* Show error count in root text */
proto_item_append_text(ti, " (%u errors)", errors);
}
offset += ((num_tbs+7) / 8);
return offset;
}
static void
dissect_spare_extension_and_crc(tvbuff_t *tvb, packet_info *pinfo,
proto_tree *tree, guint8 dch_crc_present,
int offset, guint header_length)
{
int crc_size = 0;
int remain = tvb_captured_length_remaining(tvb, offset);
/* Payload CRC (optional) */
if ((dch_crc_present == 1) || ((dch_crc_present == 2) && (remain >= 2))) {
crc_size = 2;
}
if (remain > crc_size) {
proto_item *ti;
ti = proto_tree_add_item(tree, hf_fp_spare_extension, tvb,
offset, remain-crc_size, ENC_NA);
proto_item_append_text(ti, " (%u octets)", remain-crc_size);
expert_add_info_format(pinfo, ti, &ei_fp_spare_extension, "Spare Extension present (%u bytes)", remain-crc_size);
offset += remain-crc_size;
}
if (crc_size) {
proto_item * pi = proto_tree_add_item(tree, hf_fp_payload_crc, tvb, offset, crc_size,
ENC_BIG_ENDIAN);
if (preferences_payload_checksum) {
guint16 calc_crc, read_crc;
guint8 * data = (guint8 *)tvb_memdup(wmem_packet_scope(), tvb, header_length, offset-header_length);
calc_crc = crc16_8005_noreflect_noxor(data, offset-header_length);
read_crc = tvb_get_bits16(tvb, offset*8, 16, ENC_BIG_ENDIAN);
if (calc_crc == read_crc) {
proto_item_append_text(pi, " [correct]");
} else {
proto_item_append_text(pi, " [incorrect, should be 0x%x]", calc_crc);
expert_add_info(pinfo, pi, &ei_fp_bad_payload_checksum);
}
}
}
}
/***********************************************************/
/* Common control message types */
static int
dissect_common_outer_loop_power_control(packet_info *pinfo, proto_tree *tree, tvbuff_t *tvb,
int offset, struct fp_info *p_fp_info _U_)
{
return dissect_dch_outer_loop_power_control(tree, pinfo, tvb, offset);
}
static int
dissect_common_timing_adjustment(packet_info *pinfo, proto_tree *tree, tvbuff_t *tvb,
int offset, struct fp_info *p_fp_info)
{
if (p_fp_info->channel != CHANNEL_PCH) {
guint8 cfn;
gint16 toa;
/* CFN control */
cfn = tvb_get_guint8(tvb, offset);
proto_tree_add_item(tree, hf_fp_cfn_control, tvb, offset, 1, ENC_BIG_ENDIAN);
offset++;
/* ToA */
toa = tvb_get_ntohs(tvb, offset);
proto_tree_add_item(tree, hf_fp_toa, tvb, offset, 2, ENC_BIG_ENDIAN);
offset += 2;
col_append_fstr(pinfo->cinfo, COL_INFO, " CFN=%u, ToA=%d", cfn, toa);
}
else {
guint16 cfn;
gint32 toa;
/* PCH CFN is 12 bits */
cfn = (tvb_get_ntohs(tvb, offset) >> 4);
proto_tree_add_item(tree, hf_fp_pch_cfn, tvb, offset, 2, ENC_BIG_ENDIAN);
offset += 2;
/* 4 bits of padding follow... */
/* 20 bits of ToA (followed by 4 padding bits) */
toa = ((int)(tvb_get_ntoh24(tvb, offset) << 8)) / 4096;
proto_tree_add_int(tree, hf_fp_pch_toa, tvb, offset, 3, toa);
offset += 3;
col_append_fstr(pinfo->cinfo, COL_INFO, " CFN=%u, ToA=%d", cfn, toa);
}
return offset;
}
static int
dissect_common_dl_node_synchronisation(packet_info *pinfo, proto_tree *tree,
tvbuff_t *tvb, int offset)
{
/* T1 (3 bytes) */
guint32 t1 = tvb_get_ntoh24(tvb, offset);
proto_tree_add_item(tree, hf_fp_t1, tvb, offset, 3, ENC_BIG_ENDIAN);
offset += 3;
col_append_fstr(pinfo->cinfo, COL_INFO, " T1=%u", t1);
return offset;
}
static int
dissect_common_ul_node_synchronisation(packet_info *pinfo, proto_tree *tree,
tvbuff_t *tvb, int offset)
{
guint32 t1, t2, t3;
/* T1 (3 bytes) */
t1 = tvb_get_ntoh24(tvb, offset);
proto_tree_add_item(tree, hf_fp_t1, tvb, offset, 3, ENC_BIG_ENDIAN);
offset += 3;
/* T2 (3 bytes) */
t2 = tvb_get_ntoh24(tvb, offset);
proto_tree_add_item(tree, hf_fp_t2, tvb, offset, 3, ENC_BIG_ENDIAN);
offset += 3;
/* T3 (3 bytes) */
t3 = tvb_get_ntoh24(tvb, offset);
proto_tree_add_item(tree, hf_fp_t3, tvb, offset, 3, ENC_BIG_ENDIAN);
offset += 3;
col_append_fstr(pinfo->cinfo, COL_INFO, " T1=%u T2=%u, T3=%u",
t1, t2, t3);
return offset;
}
static int
dissect_common_dl_synchronisation(packet_info *pinfo, proto_tree *tree,
tvbuff_t *tvb, int offset, struct fp_info *p_fp_info)
{
guint16 cfn;
if (p_fp_info->channel != CHANNEL_PCH) {
/* CFN control */
cfn = tvb_get_guint8(tvb, offset);
proto_tree_add_item(tree, hf_fp_cfn_control, tvb, offset, 1, ENC_BIG_ENDIAN);
offset++;
}
else {
/* PCH CFN is 12 bits */
cfn = (tvb_get_ntohs(tvb, offset) >> 4);
proto_tree_add_item(tree, hf_fp_pch_cfn, tvb, offset, 2, ENC_BIG_ENDIAN);
/* 4 bits of padding follow... */
offset += 2;
}
col_append_fstr(pinfo->cinfo, COL_INFO, " CFN=%u", cfn);
return offset;
}
static int
dissect_common_ul_synchronisation(packet_info *pinfo, proto_tree *tree,
tvbuff_t *tvb, int offset, struct fp_info *p_fp_info)
{
return dissect_common_timing_adjustment(pinfo, tree, tvb, offset, p_fp_info);
}
static int
dissect_common_timing_advance(packet_info *pinfo, proto_tree *tree, tvbuff_t *tvb, int offset)
{
guint8 cfn;
guint16 timing_advance;
/* CFN control */
cfn = tvb_get_guint8(tvb, offset);
proto_tree_add_item(tree, hf_fp_cfn_control, tvb, offset, 1, ENC_BIG_ENDIAN);
offset++;
/* Timing Advance */
timing_advance = (tvb_get_guint8(tvb, offset) & 0x3f) * 4;
proto_tree_add_uint(tree, hf_fp_timing_advance, tvb, offset, 1, timing_advance);
offset++;
col_append_fstr(pinfo->cinfo, COL_INFO, " CFN = %u, TA = %u",
cfn, timing_advance);
return offset;
}
static int
dissect_hsdpa_capacity_request(packet_info *pinfo, proto_tree *tree,
tvbuff_t *tvb, int offset)
{
guint8 priority;
guint16 user_buffer_size;
/* CmCH-PI */
priority = (tvb_get_guint8(tvb, offset) & 0x0f);
proto_tree_add_item(tree, hf_fp_cmch_pi, tvb, offset, 1, ENC_BIG_ENDIAN);
offset++;
/* User buffer size */
user_buffer_size = tvb_get_ntohs(tvb, offset);
proto_tree_add_item(tree, hf_fp_user_buffer_size, tvb, offset, 2, ENC_BIG_ENDIAN);
offset += 2;
col_append_fstr(pinfo->cinfo, COL_INFO, " CmCH-PI=%u User-Buffer-Size=%u",
priority, user_buffer_size);
return offset;
}
static int
dissect_hsdpa_capacity_allocation(packet_info *pinfo, proto_tree *tree,
tvbuff_t *tvb, int offset,
struct fp_info *p_fp_info)
{
proto_item *ti;
proto_item *rate_ti;
guint16 max_pdu_length;
guint8 repetition_period;
guint8 interval;
guint64 credits;
/* Congestion status (introduced sometime during R6...) */
if ((p_fp_info->release == 6) || (p_fp_info->release == 7)) {
proto_tree_add_bits_item(tree, hf_fp_congestion_status, tvb,
offset*8 + 2, 2, ENC_BIG_ENDIAN);
}
/* CmCH-PI */
proto_tree_add_item(tree, hf_fp_cmch_pi, tvb, offset, 1, ENC_BIG_ENDIAN);
offset++;
/* Max MAC-d PDU length (13 bits) */
max_pdu_length = tvb_get_ntohs(tvb, offset) >> 3;
proto_tree_add_item(tree, hf_fp_hsdsch_max_macd_pdu_len, tvb, offset, 2, ENC_BIG_ENDIAN);
offset++;
/* HS-DSCH credits (11 bits) */
ti = proto_tree_add_bits_ret_val(tree, hf_fp_hsdsch_credits, tvb,
offset*8 + 5, 11, &credits, ENC_BIG_ENDIAN);
offset += 2;
/* Interesting values */
if (credits == 0) {
proto_item_append_text(ti, " (stop transmission)");
expert_add_info(pinfo, ti, &ei_fp_stop_hsdpa_transmission);
}
if (credits == 2047) {
proto_item_append_text(ti, " (unlimited)");
}
/* HS-DSCH Interval */
interval = tvb_get_guint8(tvb, offset);
ti = proto_tree_add_uint(tree, hf_fp_hsdsch_interval, tvb, offset, 1, interval*10);
offset++;
if (interval == 0) {
proto_item_append_text(ti, " (none of the credits shall be used)");
}
/* HS-DSCH Repetition period */
repetition_period = tvb_get_guint8(tvb, offset);
ti = proto_tree_add_item(tree, hf_fp_hsdsch_repetition_period, tvb, offset, 1, ENC_BIG_ENDIAN);
offset++;
if (repetition_period == 0) {
proto_item_append_text(ti, " (unlimited repetition period)");
}
/* Calculated and show effective rate enabled */
if (credits == 2047) {
rate_ti = proto_tree_add_item(tree, hf_fp_hsdsch_unlimited_rate, tvb, 0, 0, ENC_NA);
PROTO_ITEM_SET_GENERATED(rate_ti);
}
else {
if (interval != 0) {
/* Cast on credits is safe, since we know it won't exceed 10^11 */
rate_ti = proto_tree_add_uint(tree, hf_fp_hsdsch_calculated_rate, tvb, 0, 0,
(guint16)credits * max_pdu_length * (1000 / (interval*10)));
PROTO_ITEM_SET_GENERATED(rate_ti);
}
}
col_append_fstr(pinfo->cinfo, COL_INFO,
" Max-PDU-len=%u Credits=%u Interval=%u Rep-Period=%u",
max_pdu_length, (guint16)credits, interval, repetition_period);
return offset;
}
static int
dissect_hsdpa_capacity_allocation_type_2(packet_info *pinfo, proto_tree *tree,
tvbuff_t *tvb, int offset)
{
proto_item *ti;
proto_item *rate_ti;
guint16 max_pdu_length;
guint8 repetition_period;
guint8 interval;
guint16 credits;
/* Congestion status */
proto_tree_add_bits_item(tree, hf_fp_congestion_status, tvb,
offset*8 + 2, 2, ENC_BIG_ENDIAN);
/* CmCH-PI */
proto_tree_add_item(tree, hf_fp_cmch_pi, tvb, offset, 1, ENC_BIG_ENDIAN);
offset++;
/* 5 spare bits follow here */
/* Max MAC-d/c PDU length (11 bits) */
max_pdu_length = tvb_get_ntohs(tvb, offset) & 0x7ff;
proto_tree_add_item(tree, hf_fp_hsdsch_max_macdc_pdu_len, tvb, offset, 2, ENC_BIG_ENDIAN);
offset += 2;
/* HS-DSCH credits (16 bits) */
credits = (tvb_get_ntohs(tvb, offset));
ti = proto_tree_add_uint(tree, hf_fp_hsdsch_credits, tvb,
offset, 2, credits);
offset += 2;
/* Interesting values */
if (credits == 0) {
proto_item_append_text(ti, " (stop transmission)");
expert_add_info(pinfo, ti, &ei_fp_stop_hsdpa_transmission);
}
if (credits == 65535) {
proto_item_append_text(ti, " (unlimited)");
}
/* HS-DSCH Interval */
interval = tvb_get_guint8(tvb, offset);
ti = proto_tree_add_uint(tree, hf_fp_hsdsch_interval, tvb, offset, 1, interval*10);
offset++;
if (interval == 0) {
proto_item_append_text(ti, " (none of the credits shall be used)");
}
/* HS-DSCH Repetition period */
repetition_period = tvb_get_guint8(tvb, offset);
ti = proto_tree_add_item(tree, hf_fp_hsdsch_repetition_period, tvb, offset, 1, ENC_BIG_ENDIAN);
offset++;
if (repetition_period == 0) {
proto_item_append_text(ti, " (unlimited repetition period)");
}
/* Calculated and show effective rate enabled */
if (credits == 65535) {
rate_ti = proto_tree_add_item(tree, hf_fp_hsdsch_unlimited_rate, tvb, 0, 0, ENC_NA);
PROTO_ITEM_SET_GENERATED(rate_ti);
}
else {
if (interval != 0) {
rate_ti = proto_tree_add_uint(tree, hf_fp_hsdsch_calculated_rate, tvb, 0, 0,
credits * max_pdu_length * (1000 / (interval*10)));
PROTO_ITEM_SET_GENERATED(rate_ti);
}
}
col_append_fstr(pinfo->cinfo, COL_INFO,
" Max-PDU-len=%u Credits=%u Interval=%u Rep-Period=%u",
max_pdu_length, credits, interval, repetition_period);
return offset;
}
static int
dissect_common_dynamic_pusch_assignment(packet_info *pinfo, proto_tree *tree,
tvbuff_t *tvb, int offset)
{
guint8 pusch_set_id;
guint8 activation_cfn;
guint8 duration;
/* PUSCH Set Id */
pusch_set_id = tvb_get_guint8(tvb, offset);
proto_tree_add_item(tree, hf_fp_pusch_set_id, tvb, offset, 1, ENC_BIG_ENDIAN);
offset++;
/* Activation CFN */
activation_cfn = tvb_get_guint8(tvb, offset);
proto_tree_add_item(tree, hf_fp_activation_cfn, tvb, offset, 1, ENC_BIG_ENDIAN);
offset++;
/* Duration */
duration = tvb_get_guint8(tvb, offset) * 10;
proto_tree_add_uint(tree, hf_fp_duration, tvb, offset, 1, duration);
offset++;
col_append_fstr(pinfo->cinfo, COL_INFO,
" PUSCH Set Id=%u Activation CFN=%u Duration=%u",
pusch_set_id, activation_cfn, duration);
return offset;
}
/* Dissect the control part of a common channel message */
static void
dissect_common_control(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree,
int offset, struct fp_info *p_fp_info)
{
/* Common control frame type */
guint8 control_frame_type = tvb_get_guint8(tvb, offset);
proto_tree_add_item(tree, hf_fp_common_control_frame_type, tvb, offset, 1, ENC_BIG_ENDIAN);
offset++;
col_append_str(pinfo->cinfo, COL_INFO,
val_to_str_const(control_frame_type, common_control_frame_type_vals, "Unknown"));
/* Frame-type specific dissection */
switch (control_frame_type) {
case COMMON_OUTER_LOOP_POWER_CONTROL:
/*offset =*/ dissect_common_outer_loop_power_control(pinfo, tree, tvb, offset, p_fp_info);
break;
case COMMON_TIMING_ADJUSTMENT:
/*offset =*/ dissect_common_timing_adjustment(pinfo, tree, tvb, offset, p_fp_info);
break;
case COMMON_DL_SYNCHRONISATION:
/*offset =*/ dissect_common_dl_synchronisation(pinfo, tree, tvb, offset, p_fp_info);
break;
case COMMON_UL_SYNCHRONISATION:
/*offset =*/ dissect_common_ul_synchronisation(pinfo, tree, tvb, offset, p_fp_info);
break;
case COMMON_DL_NODE_SYNCHRONISATION:
/*offset =*/ dissect_common_dl_node_synchronisation(pinfo, tree, tvb, offset);
break;
case COMMON_UL_NODE_SYNCHRONISATION:
/*offset =*/ dissect_common_ul_node_synchronisation(pinfo, tree, tvb, offset);
break;
case COMMON_DYNAMIC_PUSCH_ASSIGNMENT:
/*offset =*/ dissect_common_dynamic_pusch_assignment(pinfo, tree, tvb, offset);
break;
case COMMON_TIMING_ADVANCE:
/*offset =*/ dissect_common_timing_advance(pinfo, tree, tvb, offset);
break;
case COMMON_HS_DSCH_Capacity_Request:
/*offset =*/ dissect_hsdpa_capacity_request(pinfo, tree, tvb, offset);
break;
case COMMON_HS_DSCH_Capacity_Allocation:
/*offset =*/ dissect_hsdpa_capacity_allocation(pinfo, tree, tvb, offset, p_fp_info);
break;
case COMMON_HS_DSCH_Capacity_Allocation_Type_2:
/*offset =*/ dissect_hsdpa_capacity_allocation_type_2(pinfo, tree, tvb, offset);
break;
default:
break;
}
/* There is no Spare Extension nor payload crc in common control!? */
/* dissect_spare_extension_and_crc(tvb, pinfo, tree, 0, offset);
*/
}
/**************************/
/* Dissect a RACH channel */
static void
dissect_rach_channel_info(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree,
int offset, struct fp_info *p_fp_info, void *data)
{
gboolean is_control_frame;
guint16 header_crc = 0;
proto_item * header_crc_pi = NULL;
guint header_length = 0;
/* Header CRC */
header_crc = tvb_get_bits8(tvb, 0, 7);
header_crc_pi = proto_tree_add_item(tree, hf_fp_header_crc, tvb, offset, 1, ENC_BIG_ENDIAN);
/* Frame Type */
is_control_frame = tvb_get_guint8(tvb, offset) & 0x01;
proto_tree_add_item(tree, hf_fp_ft, tvb, offset, 1, ENC_BIG_ENDIAN);
offset++;
col_append_str(pinfo->cinfo, COL_INFO, is_control_frame ? " [Control] " : " [Data] ");
if (is_control_frame) {
dissect_common_control(tvb, pinfo, tree, offset, p_fp_info);
/* For control frame the header CRC is actually frame CRC covering all
* bytes except the first */
if (preferences_header_checksum) {
verify_control_frame_crc(tvb, pinfo, header_crc_pi, header_crc);
}
}
else {
guint8 cfn;
guint32 propagation_delay = 0;
proto_item *propagation_delay_ti = NULL;
guint32 received_sync_ul_timing_deviation = 0;
proto_item *received_sync_ul_timing_deviation_ti = NULL;
proto_item *rx_timing_deviation_ti = NULL;
guint16 rx_timing_deviation = 0;
/* DATA */
/* CFN */
cfn = tvb_get_guint8(tvb, offset);
proto_tree_add_item(tree, hf_fp_cfn, tvb, offset, 1, ENC_BIG_ENDIAN);
offset++;
col_append_fstr(pinfo->cinfo, COL_INFO, "CFN=%03u ", cfn);
/* TFI */
proto_tree_add_item(tree, hf_fp_tfi, tvb, offset, 1, ENC_BIG_ENDIAN);
offset++;
if (p_fp_info->channel == CHANNEL_RACH_FDD) {
/* Propagation delay */
propagation_delay = tvb_get_guint8(tvb, offset);
propagation_delay_ti = proto_tree_add_uint(tree, hf_fp_propagation_delay, tvb, offset, 1,
propagation_delay*3);
offset++;
}
/* Should be TDD 3.84 or 7.68 */
if (p_fp_info->channel == CHANNEL_RACH_TDD) {
/* Rx Timing Deviation */
rx_timing_deviation = tvb_get_guint8(tvb, offset);
rx_timing_deviation_ti = proto_tree_add_item(tree, hf_fp_rx_timing_deviation, tvb, offset, 1, ENC_BIG_ENDIAN);
offset++;
}
if (p_fp_info->channel == CHANNEL_RACH_TDD_128) {
/* Received SYNC UL Timing Deviation */
received_sync_ul_timing_deviation = tvb_get_guint8(tvb, offset);
received_sync_ul_timing_deviation_ti =
proto_tree_add_item(tree, hf_fp_received_sync_ul_timing_deviation, tvb, offset, 1, ENC_BIG_ENDIAN);
offset++;
}
header_length = offset;
/* TB data */
offset = dissect_tb_data(tvb, pinfo, tree, offset, p_fp_info, &mac_fdd_rach_handle, data);
/* CRCIs */
offset = dissect_crci_bits(tvb, pinfo, tree, p_fp_info, offset);
/* Info introduced in R6 */
/* only check if it looks as if they are present */
if (((p_fp_info->release == 6) || (p_fp_info->release == 7)) &&
(tvb_reported_length_remaining(tvb, offset) > 2))
{
int n;
guint8 flags;
/* guint8 flag_bytes = 0; */
gboolean cell_portion_id_present = FALSE;
gboolean ext_propagation_delay_present = FALSE;
gboolean angle_of_arrival_present = FALSE;
gboolean ext_rx_sync_ul_timing_deviation_present = FALSE;
gboolean ext_rx_timing_deviation_present = FALSE;
/* New IE flags (assume mandatory for now) */
do {
proto_item *new_ie_flags_ti;
proto_tree *new_ie_flags_tree;
guint ies_found = 0;
/* Add new IE flags subtree */
new_ie_flags_ti = proto_tree_add_string_format(tree, hf_fp_rach_new_ie_flags, tvb, offset, 1,
"", "New IE flags");
new_ie_flags_tree = proto_item_add_subtree(new_ie_flags_ti, ett_fp_rach_new_ie_flags);
/* Read next byte */
flags = tvb_get_guint8(tvb, offset);
/* flag_bytes++ */
/* Dissect individual bits */
for (n=0; n < 8; n++) {
switch (n) {
case 6:
switch (p_fp_info->division) {
case Division_FDD:
/* Ext propagation delay */
ext_propagation_delay_present = TRUE;
proto_tree_add_item(new_ie_flags_tree, hf_fp_rach_ext_propagation_delay_present,
tvb, offset, 1, ENC_BIG_ENDIAN);
break;
case Division_TDD_128:
/* Ext Rx Sync UL Timing */
ext_rx_sync_ul_timing_deviation_present = TRUE;
proto_tree_add_item(new_ie_flags_tree, hf_fp_rach_ext_rx_sync_ul_timing_deviation_present,
tvb, offset, 1, ENC_BIG_ENDIAN);
break;
default:
/* Not defined */
proto_tree_add_item(new_ie_flags_tree, hf_fp_rach_new_ie_flag_unused[6],
tvb, offset, 1, ENC_BIG_ENDIAN);
break;
}
break;
case 7:
switch (p_fp_info->division) {
case Division_FDD:
/* Cell Portion ID */
cell_portion_id_present = TRUE;
proto_tree_add_item(new_ie_flags_tree, hf_fp_rach_cell_portion_id_present,
tvb, offset, 1, ENC_BIG_ENDIAN);
break;
case Division_TDD_128:
/* AOA */
angle_of_arrival_present = TRUE;
proto_tree_add_item(new_ie_flags_tree, hf_fp_rach_angle_of_arrival_present,
tvb, offset, 1, ENC_BIG_ENDIAN);
break;
case Division_TDD_384:
case Division_TDD_768:
/* Extended Rx Timing Deviation */
ext_rx_timing_deviation_present = TRUE;
proto_tree_add_item(new_ie_flags_tree, hf_fp_rach_ext_rx_timing_deviation_present,
tvb, offset, 1, ENC_BIG_ENDIAN);
break;
}
break;
default:
/* No defined meanings */
/* Visual Studio Code Analyzer wrongly thinks n can be 7 here. It can't */
proto_tree_add_item(new_ie_flags_tree, hf_fp_rach_new_ie_flag_unused[n],
tvb, offset, 1, ENC_BIG_ENDIAN);
break;
}
if ((flags >> (7-n)) & 0x01) {
ies_found++;
}
}
offset++;
proto_item_append_text(new_ie_flags_ti, " (%u IEs found)", ies_found);
/* Last bit set will indicate another flags byte follows... */
} while (0); /*((flags & 0x01) && (flag_bytes < 31));*/
/* Cell Portion ID */
if (cell_portion_id_present) {
proto_tree_add_item(tree, hf_fp_cell_portion_id, tvb, offset, 1, ENC_BIG_ENDIAN);
offset++;
}
/* Ext Rx Timing Deviation */
if (ext_rx_timing_deviation_present) {
guint8 extra_bits;
guint bits_to_extend;
switch (p_fp_info->division) {
case Division_TDD_384:
bits_to_extend = 1;
break;
case Division_TDD_768:
bits_to_extend = 2;
break;
default:
/* TODO: report unexpected division type */
bits_to_extend = 1;
break;
}
extra_bits = tvb_get_guint8(tvb, offset) &
((bits_to_extend == 1) ? 0x01 : 0x03);
rx_timing_deviation = (extra_bits << 8) | (rx_timing_deviation);
proto_item_append_text(rx_timing_deviation_ti,
" (extended to 0x%x)",
rx_timing_deviation);
proto_tree_add_bits_item(tree, hf_fp_extended_bits, tvb,
offset*8 + (8-bits_to_extend), bits_to_extend, ENC_BIG_ENDIAN);
offset++;
}
/* Ext propagation delay. */
if (ext_propagation_delay_present) {
guint16 extra_bits = tvb_get_ntohs(tvb, offset) & 0x03ff;
proto_tree_add_item(tree, hf_fp_ext_propagation_delay, tvb, offset, 2, ENC_BIG_ENDIAN);
/* Adding 10 bits to original 8 */
proto_item_append_text(propagation_delay_ti, " (extended to %u)",
((extra_bits << 8) | propagation_delay) * 3);
offset += 2;
}
/* Angle of Arrival (AOA) */
if (angle_of_arrival_present) {
proto_tree_add_item(tree, hf_fp_angle_of_arrival, tvb, offset, 2, ENC_BIG_ENDIAN);
offset += 2;
}
/* Ext. Rx Sync UL Timing Deviation */
if (ext_rx_sync_ul_timing_deviation_present) {
guint16 extra_bits;
/* Ext received Sync UL Timing Deviation */
extra_bits = tvb_get_ntohs(tvb, offset) & 0x1fff;
proto_tree_add_item(tree, hf_fp_ext_received_sync_ul_timing_deviation, tvb, offset, 2, ENC_BIG_ENDIAN);
/* Adding 13 bits to original 8 */
proto_item_append_text(received_sync_ul_timing_deviation_ti, " (extended to %u)",
(extra_bits << 8) | received_sync_ul_timing_deviation);
offset += 2;
}
}
if (preferences_header_checksum) {
verify_header_crc(tvb, pinfo, header_crc_pi, header_crc, header_length);
}
/* Spare Extension and Payload CRC */
dissect_spare_extension_and_crc(tvb, pinfo, tree, 1, offset, header_length);
}
}
/**************************/
/* Dissect a FACH channel */
static void
dissect_fach_channel_info(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree,
int offset, struct fp_info *p_fp_info, void *data)
{
gboolean is_control_frame;
guint16 header_crc = 0;
proto_item * header_crc_pi = NULL;
guint header_length = 0;
/* Header CRC */
header_crc = tvb_get_bits8(tvb, 0, 7);
header_crc_pi = proto_tree_add_item(tree, hf_fp_header_crc, tvb, offset, 1, ENC_BIG_ENDIAN);
/* Frame Type */
is_control_frame = tvb_get_guint8(tvb, offset) & 0x01;
proto_tree_add_item(tree, hf_fp_ft, tvb, offset, 1, ENC_BIG_ENDIAN);
offset++;
col_append_str(pinfo->cinfo, COL_INFO, is_control_frame ? " [Control] " : " [Data] ");
if (is_control_frame) {
dissect_common_control(tvb, pinfo, tree, offset, p_fp_info);
/* For control frame the header CRC is actually frame CRC covering all
* bytes except the first */
if (preferences_header_checksum) {
verify_control_frame_crc(tvb, pinfo, header_crc_pi, header_crc);
}
}
else {
guint8 cfn;
/* DATA */
/* CFN */
cfn = tvb_get_guint8(tvb, offset);
proto_tree_add_item(tree, hf_fp_cfn, tvb, offset, 1, ENC_BIG_ENDIAN);
offset++;
col_append_fstr(pinfo->cinfo, COL_INFO, "CFN=%03u ", cfn);
/* TFI */
proto_tree_add_item(tree, hf_fp_fach_tfi, tvb, offset, 1, ENC_BIG_ENDIAN);
offset++;
/* Transmit power level */
proto_tree_add_float(tree, hf_fp_transmit_power_level, tvb, offset, 1,
(float)(int)(tvb_get_guint8(tvb, offset)) / 10);
offset++;
header_length = offset;
/* TB data */
offset = dissect_tb_data(tvb, pinfo, tree, offset, p_fp_info, &mac_fdd_fach_handle, data);
/* New IE flags (if it looks as though they are present) */
if ((p_fp_info->release == 7) &&
(tvb_reported_length_remaining(tvb, offset) > 2)) {
guint8 flags = tvb_get_guint8(tvb, offset);
guint8 aoa_present = flags & 0x01;
offset++;
if (aoa_present) {
proto_tree_add_item(tree, hf_fp_angle_of_arrival, tvb, offset, 2, ENC_BIG_ENDIAN);
offset += 2;
}
}
if (preferences_header_checksum) {
verify_header_crc(tvb, pinfo, header_crc_pi, header_crc, header_length);
}
/* Spare Extension and Payload CRC */
dissect_spare_extension_and_crc(tvb, pinfo, tree, 1, offset, header_length);
}
}
/**************************/
/* Dissect a DSCH channel */
static void
dissect_dsch_channel_info(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree,
int offset, struct fp_info *p_fp_info)
{
gboolean is_control_frame;
/* Header CRC */
proto_tree_add_item(tree, hf_fp_header_crc, tvb, offset, 1, ENC_BIG_ENDIAN);
/* Frame Type */
is_control_frame = tvb_get_guint8(tvb, offset) & 0x01;
proto_tree_add_item(tree, hf_fp_ft, tvb, offset, 1, ENC_BIG_ENDIAN);
offset++;
col_append_str(pinfo->cinfo, COL_INFO, is_control_frame ? " [Control] " : " [Data] ");
if (is_control_frame) {
dissect_common_control(tvb, pinfo, tree, offset, p_fp_info);
}
else {
guint8 cfn;
guint header_length = 0;
/* DATA */
/* CFN */
cfn = tvb_get_guint8(tvb, offset);
proto_tree_add_item(tree, hf_fp_cfn, tvb, offset, 1, ENC_BIG_ENDIAN);
offset++;
col_append_fstr(pinfo->cinfo, COL_INFO, "CFN=%03u ", cfn);
/* TFI */
proto_tree_add_item(tree, hf_fp_tfi, tvb, offset, 1, ENC_BIG_ENDIAN);
offset++;
/* Other fields depend upon release & FDD/TDD settings */
if (((p_fp_info->release == 99) || (p_fp_info->release == 4)) &&
(p_fp_info->channel == CHANNEL_DSCH_FDD)) {
/* Power offset */
proto_tree_add_float(tree, hf_fp_power_offset, tvb, offset, 1,
(float)(-32.0) +
((float)(int)(tvb_get_guint8(tvb, offset)) * (float)(0.25)));
offset++;
/* Code number */
proto_tree_add_item(tree, hf_fp_code_number, tvb, offset, 1, ENC_BIG_ENDIAN);
offset++;
/* Spreading Factor (3 bits) */
proto_tree_add_item(tree, hf_fp_spreading_factor, tvb, offset, 1, ENC_BIG_ENDIAN);
/* MC info (4 bits)*/
proto_tree_add_item(tree, hf_fp_mc_info, tvb, offset, 1, ENC_BIG_ENDIAN);
/* Last bit of this byte is spare */
offset++;
}
else {
/* Normal case */
/* PDSCH Set Id */
proto_tree_add_item(tree, hf_fp_pdsch_set_id, tvb, offset, 1, ENC_BIG_ENDIAN);
offset++;
/* Transmit power level */
proto_tree_add_float(tree, hf_fp_transmit_power_level, tvb, offset, 1,
(float)(int)(tvb_get_guint8(tvb, offset)) / 10);
offset++;
}
header_length = offset;
/* TB data */
offset = dissect_tb_data(tvb, pinfo, tree, offset, p_fp_info, NULL, NULL);
/* Spare Extension and Payload CRC */
dissect_spare_extension_and_crc(tvb, pinfo, tree, 1, offset, header_length);
}
}
/**************************/
/* Dissect a USCH channel */
static void
dissect_usch_channel_info(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree,
int offset, struct fp_info *p_fp_info)
{
gboolean is_control_frame;
/* Header CRC */
proto_tree_add_item(tree, hf_fp_header_crc, tvb, offset, 1, ENC_BIG_ENDIAN);
/* Frame Type */
is_control_frame = tvb_get_guint8(tvb, offset) & 0x01;
proto_tree_add_item(tree, hf_fp_ft, tvb, offset, 1, ENC_BIG_ENDIAN);
offset++;
col_append_str(pinfo->cinfo, COL_INFO, is_control_frame ? " [Control] " : " [Data] ");
if (is_control_frame) {
dissect_common_control(tvb, pinfo, tree, offset, p_fp_info);
}
else {
guint cfn;
guint16 rx_timing_deviation;
proto_item *rx_timing_deviation_ti;
guint header_length = 0;
/* DATA */
/* CFN */
cfn = tvb_get_guint8(tvb, offset);
proto_tree_add_item(tree, hf_fp_cfn, tvb, offset, 1, ENC_BIG_ENDIAN);
offset++;
col_append_fstr(pinfo->cinfo, COL_INFO, "CFN=%03u ", cfn);
/* TFI */
proto_tree_add_item(tree, hf_fp_usch_tfi, tvb, offset, 1, ENC_BIG_ENDIAN);
offset++;
/* Rx Timing Deviation */
rx_timing_deviation = tvb_get_guint8(tvb, offset);
rx_timing_deviation_ti = proto_tree_add_item(tree, hf_fp_rx_timing_deviation,
tvb, offset, 1, ENC_BIG_ENDIAN);
offset++;
header_length = offset;
/* TB data */
offset = dissect_tb_data(tvb, pinfo, tree, offset, p_fp_info, NULL, NULL);
/* QE */
proto_tree_add_item(tree, hf_fp_quality_estimate, tvb, offset, 1, ENC_BIG_ENDIAN);
offset++;
/* CRCIs */
offset = dissect_crci_bits(tvb, pinfo, tree, p_fp_info, offset);
/* New IEs */
if ((p_fp_info->release == 7) &&
(tvb_reported_length_remaining(tvb, offset) > 2)) {
guint8 flags = tvb_get_guint8(tvb, offset);
guint8 bits_extended = flags & 0x01;
offset++;
if (bits_extended) {
guint8 extra_bits = tvb_get_guint8(tvb, offset) & 0x03;
proto_item_append_text(rx_timing_deviation_ti,
" (extended to %u)",
(rx_timing_deviation << 2) | extra_bits);
}
offset++;
}
/* Spare Extension and Payload CRC */
dissect_spare_extension_and_crc(tvb, pinfo, tree, 1, offset, header_length);
}
}
/**************************/
/* Dissect a PCH channel */
static void
dissect_pch_channel_info(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree,
int offset, struct fp_info *p_fp_info, void *data)
{
gboolean is_control_frame;
guint16 pch_cfn;
gboolean paging_indication;
guint16 header_crc = 0;
proto_item * header_crc_pi = NULL;
/* Header CRC */
header_crc = tvb_get_bits8(tvb, 0, 7);
header_crc_pi = proto_tree_add_item(tree, hf_fp_header_crc, tvb, offset, 1, ENC_BIG_ENDIAN);
/* Frame Type */
is_control_frame = tvb_get_guint8(tvb, offset) & 0x01;
proto_tree_add_item(tree, hf_fp_ft, tvb, offset, 1, ENC_BIG_ENDIAN);
offset++;
col_append_str(pinfo->cinfo, COL_INFO, is_control_frame ? " [Control] " : " [Data] ");
if (is_control_frame) {
dissect_common_control(tvb, pinfo, tree, offset, p_fp_info);
/* For control frame the header CRC is actually frame CRC covering all
* bytes except the first */
if (preferences_header_checksum) {
verify_control_frame_crc(tvb, pinfo, header_crc_pi, header_crc);
}
}
else {
guint header_length = 0;
/* DATA */
/* 12-bit CFN value */
proto_tree_add_item(tree, hf_fp_pch_cfn, tvb, offset, 2, ENC_BIG_ENDIAN);
pch_cfn = (tvb_get_ntohs(tvb, offset) & 0xfff0) >> 4;
offset++;
col_append_fstr(pinfo->cinfo, COL_INFO, "CFN=%04u ", pch_cfn);
/* Paging indication */
proto_tree_add_item(tree, hf_fp_pch_pi, tvb, offset, 1, ENC_BIG_ENDIAN);
paging_indication = tvb_get_guint8(tvb, offset) & 0x01;
offset++;
/* 5-bit TFI */
proto_tree_add_item(tree, hf_fp_pch_tfi, tvb, offset, 1, ENC_BIG_ENDIAN);
offset++;
header_length = offset;
/* Optional paging indications */
if (paging_indication) {
proto_item *ti;
ti = proto_tree_add_item(tree, hf_fp_paging_indication_bitmap, tvb,
offset,
(p_fp_info->paging_indications+7) / 8,
ENC_NA);
proto_item_append_text(ti, " (%u bits)", p_fp_info->paging_indications);
offset += ((p_fp_info->paging_indications+7) / 8);
}
/* TB data */
offset = dissect_tb_data(tvb, pinfo, tree, offset, p_fp_info, &mac_fdd_pch_handle, data);
if (preferences_header_checksum) {
verify_header_crc(tvb, pinfo, header_crc_pi, header_crc, header_length);
}
/* Spare Extension and Payload CRC */
dissect_spare_extension_and_crc(tvb, pinfo, tree, 1, offset, header_length);
}
}
/**************************/
/* Dissect a CPCH channel */
static void
dissect_cpch_channel_info(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree,
int offset, struct fp_info *p_fp_info)
{
gboolean is_control_frame;
/* Header CRC */
proto_tree_add_item(tree, hf_fp_header_crc, tvb, offset, 1, ENC_BIG_ENDIAN);
/* Frame Type */
is_control_frame = tvb_get_guint8(tvb, offset) & 0x01;
proto_tree_add_item(tree, hf_fp_ft, tvb, offset, 1, ENC_BIG_ENDIAN);
offset++;
col_append_str(pinfo->cinfo, COL_INFO, is_control_frame ? " [Control] " : " [Data] ");
if (is_control_frame) {
dissect_common_control(tvb, pinfo, tree, offset, p_fp_info);
}
else {
guint cfn;
guint header_length = 0;
/* DATA */
/* CFN */
cfn = tvb_get_guint8(tvb, offset);
proto_tree_add_item(tree, hf_fp_cfn, tvb, offset, 1, ENC_BIG_ENDIAN);
offset++;
col_append_fstr(pinfo->cinfo, COL_INFO, "CFN=%03u ", cfn);
/* TFI */
proto_tree_add_item(tree, hf_fp_cpch_tfi, tvb, offset, 1, ENC_BIG_ENDIAN);
offset++;
/* Propagation delay */
proto_tree_add_uint(tree, hf_fp_propagation_delay, tvb, offset, 1,
tvb_get_guint8(tvb, offset) * 3);
offset++;
header_length = offset; /* XXX this might be wrong */
/* TB data */
offset = dissect_tb_data(tvb, pinfo, tree, offset, p_fp_info, NULL, NULL);
/* CRCIs */
offset = dissect_crci_bits(tvb, pinfo, tree, p_fp_info, offset);
/* Spare Extension and Payload CRC */
dissect_spare_extension_and_crc(tvb, pinfo, tree, 1, offset, header_length);
}
}
/**************************/
/* Dissect a BCH channel */
static void
dissect_bch_channel_info(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree,
int offset, struct fp_info *p_fp_info)
{
gboolean is_control_frame;
/* Header CRC */
proto_tree_add_item(tree, hf_fp_header_crc, tvb, offset, 1, ENC_BIG_ENDIAN);
/* Frame Type */
is_control_frame = tvb_get_guint8(tvb, offset) & 0x01;
proto_tree_add_item(tree, hf_fp_ft, tvb, offset, 1, ENC_BIG_ENDIAN);
offset++;
col_append_str(pinfo->cinfo, COL_INFO, is_control_frame ? " [Control] " : " [Data] ");
if (is_control_frame) {
dissect_common_control(tvb, pinfo, tree, offset, p_fp_info);
}
}
/********************************/
/* Dissect an IUR DSCH channel */
static void
dissect_iur_dsch_channel_info(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree,
int offset, struct fp_info *p_fp_info)
{
gboolean is_control_frame;
/* Header CRC */
proto_tree_add_item(tree, hf_fp_header_crc, tvb, offset, 1, ENC_BIG_ENDIAN);
/* Frame Type */
is_control_frame = tvb_get_guint8(tvb, offset) & 0x01;
proto_tree_add_item(tree, hf_fp_ft, tvb, offset, 1, ENC_BIG_ENDIAN);
offset++;
col_append_str(pinfo->cinfo, COL_INFO, is_control_frame ? " [Control] " : " [Data] ");
if (is_control_frame) {
dissect_common_control(tvb, pinfo, tree, offset, p_fp_info);
}
else {
/* TODO: DATA */
}
}
/************************/
/* DCH control messages */
static int
dissect_dch_timing_adjustment(proto_tree *tree, packet_info *pinfo, tvbuff_t *tvb, int offset)
{
guint8 control_cfn;
gint16 toa;
proto_item *toa_ti;
/* CFN control */
control_cfn = tvb_get_guint8(tvb, offset);
proto_tree_add_item(tree, hf_fp_cfn_control, tvb, offset, 1, ENC_BIG_ENDIAN);
offset++;
/* ToA */
toa = tvb_get_ntohs(tvb, offset);
toa_ti = proto_tree_add_item(tree, hf_fp_toa, tvb, offset, 2, ENC_BIG_ENDIAN);
offset += 2;
expert_add_info_format(pinfo, toa_ti, &ei_fp_timing_adjustmentment_reported, "Timing adjustmentment reported (%f ms)", (float)(toa / 8));
col_append_fstr(pinfo->cinfo, COL_INFO,
" CFN = %u, ToA = %d", control_cfn, toa);
return offset;
}
static int
dissect_dch_rx_timing_deviation(packet_info *pinfo, proto_tree *tree,
tvbuff_t *tvb, int offset,
struct fp_info *p_fp_info)
{
guint16 timing_deviation;
gint timing_deviation_chips;
proto_item *timing_deviation_ti;
/* CFN control */
proto_tree_add_item(tree, hf_fp_cfn_control, tvb, offset, 1, ENC_BIG_ENDIAN);
offset++;
/* Rx Timing Deviation */
timing_deviation = tvb_get_guint8(tvb, offset);
timing_deviation_ti = proto_tree_add_item(tree, hf_fp_dch_rx_timing_deviation, tvb, offset, 1, ENC_BIG_ENDIAN);
offset++;
/* May be extended in R7, but in this case there are at least 2 bytes remaining */
if ((p_fp_info->release == 7) &&
(tvb_reported_length_remaining(tvb, offset) >= 2)) {
/* New IE flags */
guint64 extended_bits_present;
guint64 e_rucch_present;
/* Read flags */
proto_tree_add_bits_ret_val(tree, hf_fp_e_rucch_present, tvb,
offset*8 + 6, 1, &e_rucch_present, ENC_BIG_ENDIAN);
proto_tree_add_bits_ret_val(tree, hf_fp_extended_bits_present, tvb,
offset*8 + 7, 1, &extended_bits_present, ENC_BIG_ENDIAN);
offset++;
/* Optional E-RUCCH */
if (e_rucch_present) {
/* Value of bit_offset depends upon division type */
int bit_offset;
switch (p_fp_info->division) {
case Division_TDD_384:
bit_offset = 6;
break;
case Division_TDD_768:
bit_offset = 5;
break;
default:
{
proto_tree_add_expert(tree, pinfo, &ei_fp_expecting_tdd, tvb, 0, 0);
bit_offset = 6;
}
}
proto_tree_add_item(tree, hf_fp_dch_e_rucch_flag, tvb, offset, 1, ENC_BIG_ENDIAN);
proto_tree_add_bits_item(tree, hf_fp_dch_e_rucch_flag, tvb,
offset*8 + bit_offset, 1, ENC_BIG_ENDIAN);
}
/* Timing deviation may be extended by another:
- 1 bits (3.84 TDD) OR
- 2 bits (7.68 TDD)
*/
if (extended_bits_present) {
guint8 extra_bits;
guint bits_to_extend;
switch (p_fp_info->division) {
case Division_TDD_384:
bits_to_extend = 1;
break;
case Division_TDD_768:
bits_to_extend = 2;
break;
default:
/* TODO: report unexpected division type */
bits_to_extend = 1;
break;
}
extra_bits = tvb_get_guint8(tvb, offset) &
((bits_to_extend == 1) ? 0x01 : 0x03);
timing_deviation = (extra_bits << 8) | (timing_deviation);
proto_item_append_text(timing_deviation_ti,
" (extended to 0x%x)",
timing_deviation);
proto_tree_add_bits_item(tree, hf_fp_extended_bits, tvb,
offset*8 + (8-bits_to_extend), bits_to_extend, ENC_BIG_ENDIAN);
offset++;
}
}
timing_deviation_chips = (timing_deviation*4) - 1024;
proto_item_append_text(timing_deviation_ti, " (%d chips)",
timing_deviation_chips);
col_append_fstr(pinfo->cinfo, COL_INFO, " deviation = %u (%d chips)",
timing_deviation, timing_deviation_chips);
return offset;
}
static int
dissect_dch_dl_synchronisation(proto_tree *tree, packet_info *pinfo, tvbuff_t *tvb, int offset)
{
/* CFN control */
guint cfn = tvb_get_guint8(tvb, offset);
proto_tree_add_item(tree, hf_fp_cfn_control, tvb, offset, 1, ENC_BIG_ENDIAN);
offset++;
col_append_fstr(pinfo->cinfo, COL_INFO, " CFN = %u", cfn);
return offset;
}
static int
dissect_dch_ul_synchronisation(proto_tree *tree, packet_info *pinfo, tvbuff_t *tvb, int offset)
{
guint8 cfn;
gint16 toa;
/* CFN control */
cfn = tvb_get_guint8(tvb, offset);
proto_tree_add_item(tree, hf_fp_cfn_control, tvb, offset, 1, ENC_BIG_ENDIAN);
offset++;
/* ToA */
toa = tvb_get_ntohs(tvb, offset);
proto_tree_add_item(tree, hf_fp_toa, tvb, offset, 2, ENC_BIG_ENDIAN);
offset += 2;
col_append_fstr(pinfo->cinfo, COL_INFO, " CFN = %u, ToA = %d",
cfn, toa);
return offset;
}
static int
dissect_dch_outer_loop_power_control(proto_tree *tree, packet_info *pinfo, tvbuff_t *tvb, int offset)
{
/* UL SIR target */
float target = (float)-8.2 + ((float)0.1 * (float)(int)(tvb_get_guint8(tvb, offset)));
proto_tree_add_float(tree, hf_fp_ul_sir_target, tvb, offset, 1, target);
offset++;
col_append_fstr(pinfo->cinfo, COL_INFO, " UL SIR Target = %f", target);
return offset;
}
static int
dissect_dch_dl_node_synchronisation(proto_tree *tree, packet_info *pinfo, tvbuff_t *tvb, int offset)
{
return dissect_common_dl_node_synchronisation(pinfo, tree, tvb, offset);
}
static int
dissect_dch_ul_node_synchronisation(proto_tree *tree, packet_info *pinfo, tvbuff_t *tvb, int offset)
{
return dissect_common_ul_node_synchronisation(pinfo, tree, tvb, offset);
}
static int
dissect_dch_radio_interface_parameter_update(proto_tree *tree, packet_info *pinfo _U_, tvbuff_t *tvb, int offset)
{
int n;
guint8 value;
/* Show defined flags in these 2 bytes */
for (n=4; n >= 0; n--) {
proto_tree_add_item(tree, hf_fp_radio_interface_parameter_update_flag[n], tvb, offset, 2, ENC_BIG_ENDIAN);
}
offset += 2;
/* CFN */
tvb_get_guint8(tvb, offset);
proto_tree_add_item(tree, hf_fp_cfn, tvb, offset, 1, ENC_BIG_ENDIAN);
offset++;
/* DPC mode */
proto_tree_add_item(tree, hf_fp_dpc_mode, tvb, offset, 1, ENC_BIG_ENDIAN);
/* TPC PO */
proto_tree_add_item(tree, hf_fp_tpc_po, tvb, offset, 1, ENC_BIG_ENDIAN);
offset++;
/* Multiple RL sets indicator */
proto_tree_add_item(tree, hf_fp_multiple_rl_set_indicator, tvb, offset, 1, ENC_BIG_ENDIAN);
offset += 2;
/* MAX_UE_TX_POW */
value = (tvb_get_guint8(tvb, offset) & 0x7f);
proto_tree_add_int(tree, hf_fp_max_ue_tx_pow, tvb, offset, 1, -55 + value);
offset++;
return offset;
}
static int
dissect_dch_timing_advance(proto_tree *tree, packet_info *pinfo,
tvbuff_t *tvb, int offset, struct fp_info *p_fp_info)
{
guint8 cfn;
guint16 timing_advance;
proto_item *timing_advance_ti;
/* CFN control */
cfn = tvb_get_guint8(tvb, offset);
proto_tree_add_item(tree, hf_fp_cfn_control, tvb, offset, 1, ENC_BIG_ENDIAN);
offset++;
/* Timing Advance */
timing_advance = (tvb_get_guint8(tvb, offset) & 0x3f) * 4;
timing_advance_ti = proto_tree_add_uint(tree, hf_fp_timing_advance, tvb, offset, 1, timing_advance);
offset++;
if ((p_fp_info->release == 7) &&
(tvb_reported_length_remaining(tvb, offset) > 0)) {
/* New IE flags */
guint8 flags = tvb_get_guint8(tvb, offset);
guint8 extended_bits = flags & 0x01;
offset++;
if (extended_bits) {
guint8 extra_bit = tvb_get_guint8(tvb, offset) & 0x01;
proto_item_append_text(timing_advance_ti, " (extended to %u)",
(timing_advance << 1) | extra_bit);
}
offset++;
}
col_append_fstr(pinfo->cinfo, COL_INFO, " CFN = %u, TA = %u",
cfn, timing_advance);
return offset;
}
static int
dissect_dch_tnl_congestion_indication(proto_tree *tree, packet_info *pinfo, tvbuff_t *tvb, int offset)
{
guint64 status;
/* Congestion status */
proto_tree_add_bits_ret_val(tree, hf_fp_congestion_status, tvb,
offset*8 + 6, 2, &status, ENC_BIG_ENDIAN);
offset++;
col_append_fstr(pinfo->cinfo, COL_INFO, " status = %s",
val_to_str_const((guint16)status, congestion_status_vals, "unknown"));
return offset;
}
/* DCH control frame */
static void
dissect_dch_control_frame(proto_tree *tree, packet_info *pinfo, tvbuff_t *tvb,
int offset, struct fp_info *p_fp_info)
{
/* Control frame type */
guint8 control_frame_type = tvb_get_guint8(tvb, offset);
proto_tree_add_item(tree, hf_fp_dch_control_frame_type, tvb, offset, 1, ENC_BIG_ENDIAN);
offset++;
col_append_str(pinfo->cinfo, COL_INFO,
val_to_str_const(control_frame_type,
dch_control_frame_type_vals, "Unknown"));
switch (control_frame_type) {
case DCH_TIMING_ADJUSTMENT:
/*offset =*/ dissect_dch_timing_adjustment(tree, pinfo, tvb, offset);
break;
case DCH_RX_TIMING_DEVIATION:
/*offset =*/ dissect_dch_rx_timing_deviation(pinfo, tree, tvb, offset, p_fp_info);
break;
case DCH_DL_SYNCHRONISATION:
/*offset =*/ dissect_dch_dl_synchronisation(tree, pinfo, tvb, offset);
break;
case DCH_UL_SYNCHRONISATION:
/*offset =*/ dissect_dch_ul_synchronisation(tree, pinfo, tvb, offset);
break;
case DCH_OUTER_LOOP_POWER_CONTROL:
/*offset =*/ dissect_dch_outer_loop_power_control(tree, pinfo, tvb, offset);
break;
case DCH_DL_NODE_SYNCHRONISATION:
/*offset =*/ dissect_dch_dl_node_synchronisation(tree, pinfo, tvb, offset);
break;
case DCH_UL_NODE_SYNCHRONISATION:
/*offset =*/ dissect_dch_ul_node_synchronisation(tree, pinfo, tvb, offset);
break;
case DCH_RADIO_INTERFACE_PARAMETER_UPDATE:
/*offset =*/ dissect_dch_radio_interface_parameter_update(tree, pinfo, tvb, offset);
break;
case DCH_TIMING_ADVANCE:
/*offset =*/ dissect_dch_timing_advance(tree, pinfo, tvb, offset, p_fp_info);
break;
case DCH_TNL_CONGESTION_INDICATION:
/*offset =*/ dissect_dch_tnl_congestion_indication(tree, pinfo, tvb, offset);
break;
}
/* Spare Extension */
/* dissect_spare_extension_and_crc(tvb, pinfo, tree, 0, offset);
*/
}
/*******************************/
/* Dissect a DCH channel */
static void
dissect_dch_channel_info(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree,
int offset, struct fp_info *p_fp_info, void *data)
{
gboolean is_control_frame;
guint8 cfn;
guint header_length = 0;
guint16 header_crc = 0;
proto_item * header_crc_pi = NULL;
/* Header CRC */
header_crc = tvb_get_bits8(tvb, 0, 7);
header_crc_pi = proto_tree_add_item(tree, hf_fp_header_crc, tvb, offset, 1, ENC_BIG_ENDIAN);
/* Frame Type */
is_control_frame = tvb_get_guint8(tvb, offset) & 0x01;
proto_tree_add_item(tree, hf_fp_ft, tvb, offset, 1, ENC_BIG_ENDIAN);
offset++;
col_append_str(pinfo->cinfo, COL_INFO,
is_control_frame ? " [Control] " :
((p_fp_info->is_uplink) ? " [ULData] " :
" [DLData] " ));
if (is_control_frame) {
/* DCH control frame */
dissect_dch_control_frame(tree, pinfo, tvb, offset, p_fp_info);
/* For control frame the header CRC is actually frame CRC covering all
* bytes except the first */
if (preferences_header_checksum) {
verify_control_frame_crc(tvb, pinfo, header_crc_pi, header_crc);
}
} else {
/************************/
/* DCH data here */
int chan;
/* CFN */
proto_tree_add_item(tree, hf_fp_cfn, tvb, offset, 1, ENC_BIG_ENDIAN);
cfn = tvb_get_guint8(tvb, offset);
offset++;
col_append_fstr(pinfo->cinfo, COL_INFO, "CFN=%03u ", cfn);
/* One TFI for each channel */
for (chan=0; chan < p_fp_info->num_chans; chan++) {
proto_tree_add_item(tree, hf_fp_tfi, tvb, offset, 1, ENC_BIG_ENDIAN);
offset++;
}
header_length = offset;
/* Dissect TB data */
offset = dissect_tb_data(tvb, pinfo, tree, offset, p_fp_info, &mac_fdd_dch_handle, data);
/* QE (uplink only) */
if (p_fp_info->is_uplink) {
proto_tree_add_item(tree, hf_fp_quality_estimate, tvb, offset, 1, ENC_BIG_ENDIAN);
offset++;
}
/* CRCI bits (uplink only) */
if (p_fp_info->is_uplink) {
offset = dissect_crci_bits(tvb, pinfo, tree, p_fp_info, offset);
}
if (preferences_header_checksum) {
verify_header_crc(tvb, pinfo, header_crc_pi, header_crc, header_length);
}
/* Spare extension and payload CRC (optional) */
dissect_spare_extension_and_crc(tvb, pinfo, tree,
p_fp_info->dch_crc_present, offset, header_length);
}
}
/**********************************/
/* Dissect an E-DCH channel */
static void
dissect_e_dch_channel_info(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree,
int offset, struct fp_info *p_fp_info,
gboolean is_common, rlc_info *rlcinf,
void *data)
{
gboolean is_control_frame;
guint8 number_of_subframes;
guint8 cfn;
int n;
struct edch_t1_subframe_info subframes[16];
guint16 header_crc = 0;
proto_item * header_crc_pi = NULL;
guint header_length = 0;
if (p_fp_info->edch_type == 1) {
col_append_str(pinfo->cinfo, COL_INFO, " (T2)");
}
/* Header CRC */
/* the bitmask doesn't properly handle this delicate case, do manually */
header_crc = (tvb_get_bits8(tvb, offset*8, 7) << 4) + tvb_get_bits8(tvb, offset*8+8, 4);
/* Frame Type */
is_control_frame = tvb_get_guint8(tvb, offset) & 0x01;
col_append_str(pinfo->cinfo, COL_INFO, is_control_frame ? " [Control] " : " [Data] ");
if (is_control_frame) {
/* DCH control frame */
/* For control frame the header CRC is actually frame CRC covering all
* bytes except the first */
header_crc = tvb_get_bits8(tvb, 0, 7);
header_crc_pi = proto_tree_add_item(tree, hf_fp_header_crc, tvb, 0, 1, ENC_BIG_ENDIAN);
proto_tree_add_item(tree, hf_fp_ft, tvb, 0, 1, ENC_BIG_ENDIAN);
offset++;
if (preferences_header_checksum) {
verify_control_frame_crc(tvb, pinfo, header_crc_pi, header_crc);
}
dissect_dch_control_frame(tree, pinfo, tvb, offset, p_fp_info);
}
else {
/********************************/
/* E-DCH data here */
guint bit_offset = 0;
guint total_pdus = 0;
guint total_bits = 0;
gboolean dissected = FALSE;
header_crc_pi = proto_tree_add_uint_format(tree, hf_fp_edch_header_crc, tvb,
offset, 2, header_crc,
"%u%u%u%u%u%u%u.%u%u%u%u.... = E-DCH Header CRC: 0x%x",
(header_crc >> 10) & 1,
(header_crc >> 9) & 1,
(header_crc >> 8) & 1,
(header_crc >> 7) & 1,
(header_crc >> 6) & 1,
(header_crc >> 5) & 1,
(header_crc >> 4) & 1,
(header_crc >> 3) & 1,
(header_crc >> 2) & 1,
(header_crc >> 1) & 1,
(header_crc >> 0) & 1, header_crc);
proto_tree_add_item(tree, hf_fp_ft, tvb, offset, 1, ENC_BIG_ENDIAN);
offset++;
/* FSN */
proto_tree_add_item(tree, hf_fp_edch_fsn, tvb, offset, 1, ENC_BIG_ENDIAN);
offset++;
/* Number of subframes.
This was 3 bits in early releases, is 4 bits offset by 1 in later releases */
if ((p_fp_info->release >= 6) &&
((p_fp_info->release_year > 2005) ||
((p_fp_info->release_year == 2005) && (p_fp_info->release_month >= 9)))) {
/* Use 4 bits plus offset of 1 */
number_of_subframes = (tvb_get_guint8(tvb, offset) & 0x0f) + 1;
}
else {
/* Use 3 bits only */
number_of_subframes = (tvb_get_guint8(tvb, offset) & 0x07);
}
proto_tree_add_uint(tree, hf_fp_edch_number_of_subframes, tvb, offset, 1,
number_of_subframes);
offset++;
/* CFN */
cfn = tvb_get_guint8(tvb, offset);
proto_tree_add_item(tree, hf_fp_cfn, tvb, offset, 1, ENC_BIG_ENDIAN);
offset++;
/* Remainder of T2 or common data frames differ here... */
if (p_fp_info->edch_type == 1) {
dissect_e_dch_t2_or_common_channel_info(tvb, pinfo, tree, offset, p_fp_info,
number_of_subframes,
is_common, header_crc,
header_crc_pi, data);
return;
}
/* EDCH subframe header list */
for (n=0; n < number_of_subframes; n++) {
int i;
int start_offset = offset;
proto_item *subframe_header_ti;
proto_tree *subframe_header_tree;
/* Add subframe header subtree */
subframe_header_ti = proto_tree_add_string_format(tree, hf_fp_edch_subframe_header, tvb, offset, 0,
"", "Subframe");
subframe_header_tree = proto_item_add_subtree(subframe_header_ti, ett_fp_edch_subframe_header);
/* Number of HARQ Retransmissions */
proto_tree_add_item(subframe_header_tree, hf_fp_edch_harq_retransmissions, tvb,
offset, 1, ENC_BIG_ENDIAN);
/* Subframe number */
subframes[n].subframe_number = (tvb_get_guint8(tvb, offset) & 0x07);
proto_tree_add_bits_item(subframe_header_tree, hf_fp_edch_subframe_number, tvb,
offset*8+5, 1, ENC_BIG_ENDIAN);
offset++;
/* Number of MAC-es PDUs */
subframes[n].number_of_mac_es_pdus = (tvb_get_guint8(tvb, offset) & 0xf0) >> 4;
proto_tree_add_item(subframe_header_tree, hf_fp_edch_number_of_mac_es_pdus,
tvb, offset, 1, ENC_BIG_ENDIAN);
bit_offset = 4;
proto_item_append_text(subframe_header_ti, " %u header (%u MAC-es PDUs)",
subframes[n].subframe_number,
subframes[n].number_of_mac_es_pdus);
/* Details of each MAC-es PDU */
for (i=0; i < subframes[n].number_of_mac_es_pdus; i++) {
guint64 ddi;
guint64 n_pdus; /*Size of the PDU*/
proto_item *ddi_ti;
gint ddi_size = -1;
int p;
/* DDI (6 bits) */
ddi_ti = proto_tree_add_bits_ret_val(subframe_header_tree, hf_fp_edch_ddi, tvb,
offset*8 + bit_offset, 6, &ddi, ENC_BIG_ENDIAN);
if (rlcinf) {
rlcinf->rbid[i] = (guint8)ddi;
}
/********************************/
/* Look up data in higher layers*/
/* Look up the size from this DDI value */
for (p=0; p < p_fp_info->no_ddi_entries; p++) {
if (ddi == p_fp_info->edch_ddi[p]) {
ddi_size = p_fp_info->edch_macd_pdu_size[p];
break;
}
}
if (ddi_size == -1) {
expert_add_info_format(pinfo, ddi_ti, &ei_fp_ddi_not_defined, "DDI %u not defined for this UE!", (guint)ddi);
return;
}
else {
proto_item_append_text(ddi_ti, " (%d bits)", ddi_size);
}
subframes[n].ddi[i] = (guint8)ddi;
bit_offset += 6;
/* Number of MAC-d PDUs (6 bits) */
proto_tree_add_bits_ret_val(subframe_header_tree, hf_fp_edch_number_of_mac_d_pdus, tvb,
offset*8 + bit_offset, 6, &n_pdus, ENC_BIG_ENDIAN);
subframes[n].number_of_mac_d_pdus[i] = (guint8)n_pdus;
bit_offset += 6;
}
offset += ((bit_offset+7)/8);
/* Tree should cover entire subframe header */
proto_item_set_len(subframe_header_ti, offset - start_offset);
}
header_length = offset;
/* EDCH subframes */
for (n=0; n < number_of_subframes; n++) {
int i;
proto_item *subframe_ti;
proto_tree *subframe_tree;
guint bits_in_subframe = 0;
guint mac_d_pdus_in_subframe = 0;
guint lchid=0; /*Logcial channel id*/
umts_mac_info *macinf;
bit_offset = 0;
macinf = (umts_mac_info *)p_get_proto_data(wmem_file_scope(), pinfo, proto_umts_mac, 0);
/* Add subframe subtree */
subframe_ti = proto_tree_add_string_format(tree, hf_fp_edch_subframe, tvb, offset, 0,
"", "Subframe %u data", subframes[n].subframe_number);
subframe_tree = proto_item_add_subtree(subframe_ti, ett_fp_edch_subframe);
for (i=0; i < subframes[n].number_of_mac_es_pdus; i++) {
int m;
guint16 size = 0;
/* guint8 tsn; */
guint send_size;
proto_item *ti;
int macd_idx;
proto_tree *maces_tree = NULL;
/** TODO: Merge these two loops? **/
/* Look up mac-d pdu size for this ddi */
for (m=0; m < p_fp_info->no_ddi_entries; m++) {
if (subframes[n].ddi[i] == p_fp_info->edch_ddi[m]) {
size = p_fp_info->edch_macd_pdu_size[m];
break;
}
}
/* Look up logicalchannel id for this DDI value */
for (m=0; m < p_fp_info->no_ddi_entries; m++) {
if (subframes[n].ddi[i] == p_fp_info->edch_ddi[m]) {
lchid = p_fp_info->edch_lchId[m];
break;
}
}
if (m == p_fp_info->no_ddi_entries) {
/* Not found. Oops */
expert_add_info(pinfo, NULL, &ei_fp_unable_to_locate_ddi_entry);
return;
}
/* Send MAC-dd PDUs together as one MAC-es PDU */
send_size = size * subframes[n].number_of_mac_d_pdus[i];
/* 2 bits spare */
proto_tree_add_item(subframe_tree, hf_fp_edch_pdu_padding, tvb,
offset + (bit_offset/8),
1, ENC_BIG_ENDIAN);
bit_offset += 2;
/* TSN */
/* tsn = (tvb_get_guint8(tvb, offset + (bit_offset/8)) & 0x3f); */
proto_tree_add_item(subframe_tree, hf_fp_edch_tsn, tvb,
offset + (bit_offset/8),
1, ENC_BIG_ENDIAN);
bit_offset += 6;
/* PDU */
if (subframe_tree) {
ti = proto_tree_add_item(subframe_tree, hf_fp_edch_mac_es_pdu, tvb,
offset + (bit_offset/8),
((bit_offset % 8) + send_size + 7) / 8,
ENC_NA);
proto_item_append_text(ti, " (%u * %u = %u bits, PDU %d)",
size, subframes[n].number_of_mac_d_pdus[i],
send_size, n);
maces_tree = proto_item_add_subtree(ti, ett_fp_edch_maces);
}
for (macd_idx = 0; macd_idx < subframes[n].number_of_mac_d_pdus[i]; macd_idx++) {
if (preferences_call_mac_dissectors /*&& !rlc_is_ciphered(pinfo)*/) {
tvbuff_t *next_tvb;
pinfo->fd->subnum = macd_idx; /* set subframe number to current TB */
/* create new TVB and pass further on */
next_tvb = tvb_new_subset(tvb, offset + bit_offset/8,
((bit_offset % 8) + size + 7) / 8, -1);
/*This was all previously stored in [0] rather than [macd_idx] and cur_tb wasn't updated!*/
/*Set up information needed for MAC and lower layers*/
macinf->content[macd_idx] = lchId_type_table[lchid]; /*Set the proper Content type for the mac layer.*/
macinf->lchid[macd_idx] = lchid;
rlcinf->mode[macd_idx] = lchId_rlc_map[lchid]; /* Set RLC mode by lchid to RLC_MODE map in nbap.h */
/* Set U-RNTI to ComuncationContext signaled from nbap*/
rlcinf->urnti[macd_idx] = p_fp_info->com_context_id;
rlcinf->rbid[macd_idx] = lchid; /*subframes[n].ddi[i];*/ /*Save the DDI value for RLC*/
/*g_warning("========Setting RBID:%d for lchid:%d", subframes[n].ddi[i], lchid);*/
/* rlcinf->mode[0] = RLC_AM;*/
rlcinf->li_size[macd_idx] = RLC_LI_7BITS;
#if 0
/*If this entry exists, SECRUITY_MODE is completed*/
if ( rrc_ciph_inf && g_tree_lookup(rrc_ciph_inf, GINT_TO_POINTER((gint)p_fp_info->com_context_id)) ) {
rlcinf->ciphered[macd_idx] = TRUE;
} else {
rlcinf->ciphered[macd_idx] = FALSE;
}
#endif
rlcinf->ciphered[macd_idx] = FALSE;
rlcinf->deciphered[macd_idx] = FALSE;
p_fp_info->cur_tb = macd_idx; /*Set the transport block index (NOTE: This and not subnum is used in MAC dissector!)*/
/* TODO: use maces_tree? */
call_dissector_with_data(mac_fdd_edch_handle, next_tvb, pinfo, top_level_tree, data);
dissected = TRUE;
}
else {
/* Just add as a MAC-d PDU */
proto_tree_add_item(maces_tree, hf_fp_mac_d_pdu, tvb,
offset + (bit_offset/8),
((bit_offset % 8) + size + 7) / 8,
ENC_NA);
}
bit_offset += size;
}
bits_in_subframe += send_size;
mac_d_pdus_in_subframe += subframes[n].number_of_mac_d_pdus[i];
/* Pad out to next byte */
if (bit_offset % 8) {
bit_offset += (8 - (bit_offset % 8));
}
}
if (tree) {
/* Tree should cover entire subframe */
proto_item_set_len(subframe_ti, bit_offset/8);
/* Append summary info to subframe label */
proto_item_append_text(subframe_ti, " (%u bits in %u MAC-d PDUs)",
bits_in_subframe, mac_d_pdus_in_subframe);
}
total_pdus += mac_d_pdus_in_subframe;
total_bits += bits_in_subframe;
offset += (bit_offset/8);
}
/* Report number of subframes in info column
* do this only if no other dissector was called */
if (dissected == FALSE) {
col_append_fstr(pinfo->cinfo, COL_INFO,
" CFN = %03u (%u bits in %u pdus in %u subframes)",
cfn, total_bits, total_pdus, number_of_subframes);
}
/* Add data summary to info column */
/*col_append_fstr(pinfo->cinfo, COL_INFO, " (%u bytes in %u SDUs in %u MAC-is PDUs in %u subframes)",
total_bytes, macis_sdus_found, macis_pdus, number_of_subframes);*/
if (preferences_header_checksum) {
verify_header_crc_edch(tvb, pinfo, header_crc_pi, header_crc, header_length);
}
/* Spare extension and payload CRC (optional) */
dissect_spare_extension_and_crc(tvb, pinfo, tree,
p_fp_info->dch_crc_present, offset, header_length);
}
}
/* Dissect the remainder of the T2 or common frame that differs from T1 */
static void
dissect_e_dch_t2_or_common_channel_info(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree,
int offset, struct fp_info *p_fp_info,
int number_of_subframes,
gboolean is_common,
guint16 header_crc,
proto_item * header_crc_pi,
void *data)
{
int n;
int pdu_no;
guint64 total_macis_sdus;
guint16 macis_sdus_found = 0;
guint16 macis_pdus = 0;
gboolean F = TRUE; /* We want to continue loop if get E-RNTI indication... */
gint bit_offset;
proto_item *subframe_macis_descriptors_ti = NULL;
static struct edch_t2_subframe_info subframes[16];
guint header_length = 0;
/* User Buffer size */
proto_tree_add_bits_item(tree, hf_fp_edch_user_buffer_size, tvb, offset*8,
18, ENC_BIG_ENDIAN);
offset += 2;
/* Spare is in-between... */
/* Total number of MAC-is SDUs */
proto_tree_add_bits_ret_val(tree, hf_fp_edch_no_macid_sdus, tvb, offset*8+4,
12, &total_macis_sdus, ENC_BIG_ENDIAN);
offset += 2;
if (is_common) {
/* E-RNTI */
proto_tree_add_item(tree, hf_fp_edch_e_rnti, tvb, offset, 2, ENC_BIG_ENDIAN);
offset += 2;
}
bit_offset = offset*8;
/* EDCH subframe header list */
for (n=0; n < number_of_subframes; n++) {
guint64 subframe_number;
guint64 no_of_macis_pdus;
proto_item *subframe_header_ti;
proto_tree *subframe_header_tree;
/* Add subframe header subtree */
subframe_header_ti = proto_tree_add_string_format(tree, hf_fp_edch_subframe_header, tvb, offset, 0,
"", "Subframe");
subframe_header_tree = proto_item_add_subtree(subframe_header_ti, ett_fp_edch_subframe_header);
/* Spare bit */
bit_offset++;
if (!is_common) {
/* Number of HARQ Retransmissions */
proto_tree_add_item(subframe_header_tree, hf_fp_edch_harq_retransmissions, tvb,
bit_offset/8, 1, ENC_BIG_ENDIAN);
bit_offset += 4;
}
/* Subframe number */
proto_tree_add_bits_ret_val(subframe_header_tree, hf_fp_edch_subframe_number, tvb,
bit_offset, 3, &subframe_number, ENC_BIG_ENDIAN);
subframes[n].subframe_number = (guint8)subframe_number;
bit_offset += 3;
/* Number of MAC-is PDUs */
proto_tree_add_bits_ret_val(subframe_header_tree, hf_fp_edch_number_of_mac_is_pdus, tvb,
bit_offset, 4, &no_of_macis_pdus, ENC_BIG_ENDIAN);
bit_offset += 4;
subframes[n].number_of_mac_is_pdus = (guint8)no_of_macis_pdus;
macis_pdus += subframes[n].number_of_mac_is_pdus;
/* Next 4 bits are spare for T2*/
if (!is_common) {
bit_offset += 4;
}
/* Show summary in root */
proto_item_append_text(subframe_header_ti, " (SFN %u, %u MAC-is PDUs)",
subframes[n].subframe_number, subframes[n].number_of_mac_is_pdus);
proto_item_set_len(subframe_header_ti, is_common ? 1 : 2);
}
offset = bit_offset / 8;
/* MAC-is PDU descriptors for each subframe follow */
for (n=0; n < number_of_subframes; n++) {
proto_tree *subframe_macis_descriptors_tree;
/* Add subframe header subtree */
subframe_macis_descriptors_ti = proto_tree_add_string_format(tree, hf_fp_edch_macis_descriptors, tvb, offset, 0,
"", "MAC-is descriptors (SFN %u)", subframes[n].subframe_number);
proto_item_set_len(subframe_macis_descriptors_ti, subframes[n].number_of_mac_is_pdus*2);
subframe_macis_descriptors_tree = proto_item_add_subtree(subframe_macis_descriptors_ti,
ett_fp_edch_macis_descriptors);
/* Find a sequence of descriptors for each MAC-is PDU in this subframe */
for (pdu_no=0; pdu_no < subframes[n].number_of_mac_is_pdus; pdu_no++) {
proto_item *f_ti = NULL;
subframes[n].number_of_mac_is_sdus[pdu_no] = 0;
do {
/* Check we haven't gone past the limit */
if (macis_sdus_found++ > total_macis_sdus) {
expert_add_info_format(pinfo, f_ti, &ei_fp_mac_is_sdus_miscount, "Found too many (%u) MAC-is SDUs - header said there were %u", macis_sdus_found, (guint16)total_macis_sdus);
}
/* LCH-ID */
subframes[n].mac_is_lchid[pdu_no][subframes[n].number_of_mac_is_sdus[pdu_no]] = (tvb_get_guint8(tvb, offset) & 0xf0) >> 4;
proto_tree_add_item(subframe_macis_descriptors_tree, hf_fp_edch_macis_lchid, tvb, offset, 1, ENC_BIG_ENDIAN);
if (subframes[n].mac_is_lchid[pdu_no][subframes[n].number_of_mac_is_sdus[pdu_no]] == 15) {
proto_item *ti;
/* 4 bits of spare */
offset++;
/* E-RNTI */
ti = proto_tree_add_item(tree, hf_fp_edch_e_rnti, tvb, offset, 2, ENC_BIG_ENDIAN);
offset += 2;
/* This is only allowed if:
- it's the common case AND
- it's the first descriptor */
if (!is_common) {
expert_add_info(pinfo, ti, &ei_fp_e_rnti_t2_edch_frames);
}
if (subframes[n].number_of_mac_is_sdus[pdu_no] > 0) {
expert_add_info(pinfo, ti, &ei_fp_e_rnti_first_entry);
}
continue;
}
/* Length */
subframes[n].mac_is_length[pdu_no][subframes[n].number_of_mac_is_sdus[pdu_no]] = (tvb_get_ntohs(tvb, offset) & 0x0ffe) >> 1;
proto_tree_add_item(subframe_macis_descriptors_tree, hf_fp_edch_macis_length, tvb, offset, 2, ENC_BIG_ENDIAN);
offset++;
/* Flag */
F = tvb_get_guint8(tvb, offset) & 0x01;
f_ti = proto_tree_add_item(subframe_macis_descriptors_tree, hf_fp_edch_macis_flag, tvb, offset, 1, ENC_BIG_ENDIAN);
subframes[n].number_of_mac_is_sdus[pdu_no]++;
offset++;
} while (F == 0);
}
}
/* Check overall count of MAC-is SDUs */
if (macis_sdus_found != total_macis_sdus) {
expert_add_info_format(pinfo, subframe_macis_descriptors_ti, &ei_fp_mac_is_sdus_miscount, "Frame contains %u MAC-is SDUs - header said there would be %u!", macis_sdus_found, (guint16)total_macis_sdus);
}
header_length = offset;
/* Now PDUs */
for (n=0; n < number_of_subframes; n++) {
/* MAC-is PDU */
for (pdu_no=0; pdu_no < subframes[n].number_of_mac_is_pdus; pdu_no++) {
int i;
guint length = 0;
umts_mac_is_info * mac_is_info = wmem_new(wmem_file_scope(), umts_mac_is_info);
mac_is_info->number_of_mac_is_sdus = subframes[n].number_of_mac_is_sdus[pdu_no];
DISSECTOR_ASSERT(subframes[n].number_of_mac_is_sdus[pdu_no] <= MAX_MAC_FRAMES);
for (i = 0; i < subframes[n].number_of_mac_is_sdus[pdu_no]; i++) {
mac_is_info->sdulength[i] = subframes[n].mac_is_length[pdu_no][i];
mac_is_info->lchid[i] = subframes[n].mac_is_lchid[pdu_no][i];
length += subframes[n].mac_is_length[pdu_no][i];
}
/* Call MAC for this PDU if configured to */
if (preferences_call_mac_dissectors) {
p_add_proto_data(wmem_file_scope(), pinfo, proto_umts_mac, 0, mac_is_info);
call_dissector_with_data(mac_fdd_edch_type2_handle, tvb_new_subset_remaining(tvb, offset), pinfo, top_level_tree, data);
}
else {
/* Still show data if not decoding as MAC PDU */
proto_tree_add_item(tree, hf_fp_edch_mac_is_pdu, tvb, offset, length, ENC_NA);
}
/* get_mac_tsn_size in packet-umts_mac.h, gets the global_mac_tsn_size preference in umts_mac.c */
if (get_mac_tsn_size() == MAC_TSN_14BITS) {
offset += length + 2; /* Plus 2 bytes for TSN 14 bits and SS 2 bit. */
} else {
offset += length + 1; /* Plus 1 byte for TSN 6 bits and SS 2 bit. */
}
}
}
if (preferences_header_checksum) {
verify_header_crc_edch(tvb, pinfo, header_crc_pi, header_crc, header_length);
}
/* Spare extension and payload CRC (optional) */
dissect_spare_extension_and_crc(tvb, pinfo, tree,
p_fp_info->dch_crc_present, offset, header_length);
}
/**********************************************************/
/* Dissect an HSDSCH channel */
/* The data format corresponds to the format */
/* described in R5 and R6, and frame type 1 in Release 7. */
static void
dissect_hsdsch_channel_info(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree,
int offset, struct fp_info *p_fp_info, void *data)
{
gboolean is_control_frame;
guint header_length = 0;
guint16 header_crc = 0;
proto_item * header_crc_pi = NULL;
/* Header CRC */
header_crc = tvb_get_bits8(tvb, 0, 7);
header_crc_pi = proto_tree_add_item(tree, hf_fp_header_crc, tvb, offset, 1, ENC_BIG_ENDIAN);
/* Frame Type */
is_control_frame = tvb_get_guint8(tvb, offset) & 0x01;
proto_tree_add_item(tree, hf_fp_ft, tvb, offset, 1, ENC_BIG_ENDIAN);
offset++;
col_append_str(pinfo->cinfo, COL_INFO, is_control_frame ? " [Control] " : " [Data] ");
if (is_control_frame) {
dissect_common_control(tvb, pinfo, tree, offset, p_fp_info);
/* For control frame the header CRC is actually frame CRC covering all
* bytes except the first */
if (preferences_header_checksum) {
verify_control_frame_crc(tvb, pinfo, header_crc_pi, header_crc);
}
}
else {
guint8 number_of_pdus;
guint16 pdu_length;
guint16 user_buffer_size;
int i;
umts_mac_info *macinf;
rlc_info *rlcinf;
rlcinf = (rlc_info *)p_get_proto_data(wmem_file_scope(), pinfo, proto_rlc, 0);
macinf = (umts_mac_info *)p_get_proto_data(wmem_file_scope(), pinfo, proto_umts_mac, 0);
/**************************************/
/* HS-DCH data here (type 1 in R7) */
/* Frame Seq Nr */
if ((p_fp_info->release == 6) ||
(p_fp_info->release == 7)) {
guint8 frame_seq_no = (tvb_get_guint8(tvb, offset) & 0xf0) >> 4;
proto_tree_add_item(tree, hf_fp_frame_seq_nr, tvb, offset, 1, ENC_BIG_ENDIAN);
col_append_fstr(pinfo->cinfo, COL_INFO, " seqno=%u", frame_seq_no);
}
/* CmCH-PI */
proto_tree_add_item(tree, hf_fp_cmch_pi, tvb, offset, 1, ENC_BIG_ENDIAN);
offset++;
/* MAC-d PDU Length (13 bits) */
pdu_length = (tvb_get_ntohs(tvb, offset) >> 3);
proto_tree_add_item(tree, hf_fp_mac_d_pdu_len, tvb, offset, 2, ENC_BIG_ENDIAN);
offset += 2;
macinf->pdu_len = pdu_length;
if ((p_fp_info->release == 6) ||
(p_fp_info->release == 7)) {
/* Flush bit */
proto_tree_add_item(tree, hf_fp_flush, tvb, offset-1, 1, ENC_BIG_ENDIAN);
/* FSN/DRT reset bit */
proto_tree_add_item(tree, hf_fp_fsn_drt_reset, tvb, offset-1, 1, ENC_BIG_ENDIAN);
}
/* Num of PDUs */
number_of_pdus = tvb_get_guint8(tvb, offset);
proto_tree_add_item(tree, hf_fp_num_of_pdu, tvb, offset, 1, ENC_BIG_ENDIAN);
offset++;
/* User buffer size */
user_buffer_size = tvb_get_ntohs(tvb, offset);
proto_tree_add_item(tree, hf_fp_user_buffer_size, tvb, offset, 2, ENC_BIG_ENDIAN);
offset += 2;
header_length = offset;
/************************/
/*Configure the pdus*/
for (i=0;i<number_of_pdus && i<MIN(MAX_MAC_FRAMES, MAX_RLC_CHANS); i++) {
macinf->content[i] = hsdsch_macdflow_id_mac_content_map[p_fp_info->hsdsch_macflowd_id]; /*MAC_CONTENT_PS_DTCH;*/
macinf->lchid[i] = fake_lchid_macd_flow[p_fp_info->hsdsch_macflowd_id];/*Faked logical channel id 255 used as a mark if it doesn't exist...*/
macinf->fake_chid[i] = TRUE; /**/
macinf->macdflow_id[i] = p_fp_info->hsdsch_macflowd_id; /*Save the flow ID (+1 to make it human readable (it's zero indexed!))*/
/*Figure out RLC_MODE based on MACd-flow-ID, basically MACd-flow-ID = 0 then it's SRB0 == UM else AM*/
rlcinf->mode[i] = hsdsch_macdflow_id_rlc_map[p_fp_info->hsdsch_macflowd_id];
/*Check if this is multiplexed (signaled by RRC)*/
if ( /*!rlc_is_ciphered(pinfo) &&*/ p_fp_info->hsdhsch_macfdlow_is_mux[p_fp_info->hsdsch_macflowd_id] ) {
macinf->ctmux[i] = TRUE;
} else if (p_fp_info->hsdsch_macflowd_id == 0) { /*MACd-flow = 0 is often SRB */
expert_add_info(pinfo, NULL, &ei_fp_maybe_srb);
} else {
macinf->ctmux[i] = FALSE; /*Either it's multiplexed and not signled or it's not MUX*/
}
rlcinf->urnti[i] = p_fp_info->com_context_id;
rlcinf->li_size[i] = RLC_LI_7BITS;
rlcinf->deciphered[i] = FALSE;
rlcinf->ciphered[i] = FALSE;
rlcinf->rbid[i] = macinf->lchid[i];
#if 0
/*When a flow has been reconfigured rlc needs to be reset.
* This needs more work though since we must figure out when the re-configuration becomes
* active based on the CFN value
* */
/*Indicate we need to reset stream*/
if (p_fp_info->reset_frag) {
rlc_reset_channel(rlcinf->mode[i], macinf->lchid[i], p_fp_info->is_uplink, rlcinf->urnti[i] );
p_fp_info->reset_frag = FALSE;
}
#endif
}
/* MAC-d PDUs */
offset = dissect_macd_pdu_data(tvb, pinfo, tree, offset, pdu_length,
number_of_pdus, p_fp_info, data);
col_append_fstr(pinfo->cinfo, COL_INFO, " %ux%u-bit PDUs User-Buffer-Size=%u",
number_of_pdus, pdu_length, user_buffer_size);
/* Extra IEs (if there is room for them) */
if (((p_fp_info->release == 6) ||
(p_fp_info->release == 7)) &&
(tvb_reported_length_remaining(tvb, offset) > 2)) {
int n;
guint8 flags;
/* guint8 flag_bytes = 0; */
/* New IE flags */
do {
proto_item *new_ie_flags_ti;
proto_tree *new_ie_flags_tree;
guint ies_found = 0;
/* Add new IE flags subtree */
new_ie_flags_ti = proto_tree_add_string_format(tree, hf_fp_hsdsch_new_ie_flags, tvb, offset, 1,
"", "New IE flags");
new_ie_flags_tree = proto_item_add_subtree(new_ie_flags_ti, ett_fp_hsdsch_new_ie_flags);
/* Read next byte */
flags = tvb_get_guint8(tvb, offset);
/* flag_bytes++; */
/* Dissect individual bits */
for (n=0; n < 8; n++) {
proto_tree_add_item(new_ie_flags_tree, hf_fp_hsdsch_new_ie_flag[n], tvb, offset, 1, ENC_BIG_ENDIAN);
if ((flags >> (7-n)) & 0x01) {
ies_found++;
}
}
offset++;
proto_item_append_text(new_ie_flags_ti, " (%u IEs found)", ies_found);
/* Last bit set will indicate another flags byte follows... */
} while (0); /*((flags & 0x01) && (flag_bytes < 31));*/
if (1) /*(flags & 0x8) */ {
/* DRT is shown as mandatory in the diagram (3GPP TS 25.435 V6.3.0),
but the description below it states that
it should depend upon the first bit. The detailed description of
New IE flags doesn't agree, so treat as mandatory for now... */
proto_tree_add_item(tree, hf_fp_hsdsch_drt, tvb, offset, 2, ENC_BIG_ENDIAN);
offset += 2;
}
}
if (preferences_header_checksum) {
verify_header_crc(tvb, pinfo, header_crc_pi, header_crc, header_length);
}
/* Spare Extension and Payload CRC */
dissect_spare_extension_and_crc(tvb, pinfo, tree, 1, offset, header_length);
}
}
/******************************************/
/* Dissect an HSDSCH type 2 channel */
/* (introduced in Release 7) */
/* N.B. there is currently no support for */
/* frame type 3 (IuR only?) */
static void
dissect_hsdsch_type_2_channel_info(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree,
int offset, struct fp_info *p_fp_info,
void *data)
{
gboolean is_control_frame;
guint16 header_crc = 0;
proto_item * header_crc_pi = NULL;
guint16 header_length = 0;
/* Header CRC */
header_crc = tvb_get_bits8(tvb, 0, 7);
header_crc_pi = proto_tree_add_item(tree, hf_fp_header_crc, tvb, offset, 1, ENC_BIG_ENDIAN);
/* Frame Type */
is_control_frame = tvb_get_guint8(tvb, offset) & 0x01;
proto_tree_add_item(tree, hf_fp_ft, tvb, offset, 1, ENC_BIG_ENDIAN);
offset++;
col_append_str(pinfo->cinfo, COL_INFO, is_control_frame ? " [Control] " : " [Data] ");
if (is_control_frame) {
dissect_common_control(tvb, pinfo, tree, offset, p_fp_info);
/* For control frame the header CRC is actually frame CRC covering all
* bytes except the first */
if (preferences_header_checksum) {
verify_control_frame_crc(tvb, pinfo, header_crc_pi, header_crc);
}
}
else {
guint8 number_of_pdu_blocks;
gboolean drt_present = FALSE;
gboolean fach_present = FALSE;
guint16 user_buffer_size;
int n;
guint j;
#define MAX_PDU_BLOCKS 31
guint64 lchid[MAX_PDU_BLOCKS];
guint64 pdu_length[MAX_PDU_BLOCKS];
guint64 no_of_pdus[MAX_PDU_BLOCKS];
umts_mac_info *macinf;
rlc_info *rlcinf;
rlcinf = (rlc_info *)p_get_proto_data(wmem_file_scope(), pinfo, proto_rlc, 0);
macinf = (umts_mac_info *)p_get_proto_data(wmem_file_scope(), pinfo, proto_umts_mac, 0);
/********************************/
/* HS-DCH type 2 data here */
col_append_str(pinfo->cinfo, COL_INFO, "(ehs)");
/* Frame Seq Nr (4 bits) */
if ((p_fp_info->release == 6) ||
(p_fp_info->release == 7)) {
guint8 frame_seq_no = (tvb_get_guint8(tvb, offset) & 0xf0) >> 4;
proto_tree_add_item(tree, hf_fp_frame_seq_nr, tvb, offset, 1, ENC_BIG_ENDIAN);
col_append_fstr(pinfo->cinfo, COL_INFO, " seqno=%u", frame_seq_no);
}
/* CmCH-PI (4 bits) */
proto_tree_add_item(tree, hf_fp_cmch_pi, tvb, offset, 1, ENC_BIG_ENDIAN);
offset++;
/* Total number of PDU blocks (5 bits) */
number_of_pdu_blocks = (tvb_get_guint8(tvb, offset) >> 3);
proto_tree_add_item(tree, hf_fp_total_pdu_blocks, tvb, offset, 1, ENC_BIG_ENDIAN);
if (p_fp_info->release == 7) {
/* Flush bit */
proto_tree_add_item(tree, hf_fp_flush, tvb, offset, 1, ENC_BIG_ENDIAN);
/* FSN/DRT reset bit */
proto_tree_add_item(tree, hf_fp_fsn_drt_reset, tvb, offset, 1, ENC_BIG_ENDIAN);
/* DRT Indicator */
drt_present = tvb_get_guint8(tvb, offset) & 0x01;
proto_tree_add_item(tree, hf_fp_drt_indicator, tvb, offset, 1, ENC_BIG_ENDIAN);
}
offset++;
/* FACH Indicator flag */
fach_present = (tvb_get_guint8(tvb, offset) & 0x80) >> 7;
proto_tree_add_item(tree, hf_fp_fach_indicator, tvb, offset, 1, ENC_BIG_ENDIAN);
offset++;
/* User buffer size */
user_buffer_size = tvb_get_ntohs(tvb, offset);
proto_tree_add_item(tree, hf_fp_user_buffer_size, tvb, offset, 2, ENC_BIG_ENDIAN);
offset += 2;
col_append_fstr(pinfo->cinfo, COL_INFO, " User-Buffer-Size=%u", user_buffer_size);
/********************************************************************/
/* Now read number_of_pdu_blocks header entries */
for (n=0; n < number_of_pdu_blocks; n++) {
proto_item *pdu_block_header_ti;
proto_tree *pdu_block_header_tree;
int block_header_start_offset = offset;
/* Add PDU block header subtree */
pdu_block_header_ti = proto_tree_add_string_format(tree, hf_fp_hsdsch_pdu_block_header,
tvb, offset, 0,
"",
"PDU Block Header");
pdu_block_header_tree = proto_item_add_subtree(pdu_block_header_ti,
ett_fp_hsdsch_pdu_block_header);
/* MAC-d/c PDU length in this block (11 bits) */
proto_tree_add_bits_ret_val(pdu_block_header_tree, hf_fp_pdu_length_in_block, tvb,
(offset*8) + ((n % 2) ? 4 : 0), 11,
&pdu_length[n], ENC_BIG_ENDIAN);
if ((n % 2) == 0)
offset++;
else
offset += 2;
/* # PDUs in this block (4 bits) */
proto_tree_add_bits_ret_val(pdu_block_header_tree, hf_fp_pdus_in_block, tvb,
(offset*8) + ((n % 2) ? 0 : 4), 4,
&no_of_pdus[n], ENC_BIG_ENDIAN);
if ((n % 2) == 0) {
offset++;
}
/* Logical channel ID in block (4 bits) */
proto_tree_add_bits_ret_val(pdu_block_header_tree, hf_fp_lchid, tvb,
(offset*8) + ((n % 2) ? 4 : 0), 4,
&lchid[n], ENC_BIG_ENDIAN);
if ((n % 2) == 1) {
offset++;
}
else {
if (n == (number_of_pdu_blocks-1)) {
/* Byte is padded out for last block */
offset++;
}
}
/* Append summary to header tree root */
proto_item_append_text(pdu_block_header_ti,
" (lch:%u, %u pdus of %u bytes)",
(guint16)lchid[n],
(guint16)no_of_pdus[n],
(guint16)pdu_length[n]);
/* Set length of header tree item */
if (((n % 2) == 0) && (n < (number_of_pdu_blocks-1))) {
proto_item_set_len(pdu_block_header_ti,
offset - block_header_start_offset+1);
}
else {
proto_item_set_len(pdu_block_header_ti,
offset - block_header_start_offset);
}
}
if (header_length == 0) {
header_length = offset;
}
/**********************************************/
/* Optional fields indicated by earlier flags */
if (drt_present) {
/* DRT */
proto_tree_add_item(tree, hf_fp_drt, tvb, offset, 2, ENC_BIG_ENDIAN);
offset += 2;
}
if (fach_present) {
/* H-RNTI: */
proto_tree_add_item(tree, hf_fp_hrnti, tvb, offset, 2, ENC_BIG_ENDIAN);
offset += 2;
/* RACH Measurement Result */
proto_tree_add_item(tree, hf_fp_rach_measurement_result, tvb, offset, 2, ENC_BIG_ENDIAN);
offset++;
}
/********************************************************************/
/* Now read the MAC-d/c PDUs for each block using info from headers */
for (n=0; n < number_of_pdu_blocks; n++) {
for (j=0;j<no_of_pdus[n];j++) {
/*Configure (signal to lower layers) the PDU!*/
macinf->content[j] = lchId_type_table[lchid[n]+1];/*hsdsch_macdflow_id_mac_content_map[p_fp_info->hsdsch_macflowd_id];*/ /*MAC_CONTENT_PS_DTCH;*/
macinf->lchid[j] = (guint8)lchid[n]+1; /*Add 1 since C/T is zero indexed? ie C/T =0 => L-CHID = 1*/
macinf->macdflow_id[j] = p_fp_info->hsdsch_macflowd_id;
/*Figure out RLC_MODE based on MACd-flow-ID, basically MACd-flow-ID = 0 then it's SRB0 == UM else AM*/
rlcinf->mode[j] = lchId_rlc_map[lchid[n]+1];/*hsdsch_macdflow_id_rlc_map[p_fp_info->hsdsch_macflowd_id];*/
macinf->ctmux[n] = FALSE;
rlcinf->li_size[j] = RLC_LI_7BITS;
/** Configure ciphering **/
#if 0
/*If this entry exists, SECRUITY_MODE is completed*/
if ( rrc_ciph_inf && g_tree_lookup(rrc_ciph_inf, GINT_TO_POINTER((gint)p_fp_info->com_context_id)) ) {
rlcinf->ciphered[j] = TRUE;
} else {
rlcinf->ciphered[j] = FALSE;
}
#endif
rlcinf->ciphered[j] = FALSE;
rlcinf->deciphered[j] = FALSE;
rlcinf->rbid[j] = (guint8)lchid[n]+1;
rlcinf->urnti[j] = p_fp_info->com_context_id; /*Set URNIT to comuncation context id*/
}
/* Add PDU block header subtree */
offset = dissect_macd_pdu_data_type_2(tvb, pinfo, tree, offset,
(guint16)pdu_length[n],
(guint16)no_of_pdus[n],
p_fp_info, data);
}
if (preferences_header_checksum) {
verify_header_crc(tvb, pinfo, header_crc_pi, header_crc, header_length);
}
/* Spare Extension and Payload CRC */
dissect_spare_extension_and_crc(tvb, pinfo, tree, 1, offset, header_length);
}
}
/**
* Dissect and CONFIGURE hsdsch_common channel.
*
* This will dissect hsdsch common channels of type 2, so this is
* very similar to regular type two (ehs) the difference being how
* the configuration is done. NOTE: VERY EXPERIMENTAL.
*
* @param tvb the tv buffer of the current data
* @param pinfo the packet info of the current data
* @param tree the tree to append this item to
* @param offset the offset in the tvb
* @param p_fp_info FP-packet information
*/
static
void dissect_hsdsch_common_channel_info(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree,
int offset, struct fp_info *p_fp_info,
void *data)
{
gboolean is_control_frame;
guint16 header_crc = 0;
proto_item * header_crc_pi = NULL;
guint header_length = 0;
/* Header CRC */
header_crc = tvb_get_bits8(tvb, 0, 7);
header_crc_pi = proto_tree_add_item(tree, hf_fp_header_crc, tvb, offset, 1, ENC_BIG_ENDIAN);
/* Frame Type */
is_control_frame = tvb_get_guint8(tvb, offset) & 0x01;
proto_tree_add_item(tree, hf_fp_ft, tvb, offset, 1, ENC_BIG_ENDIAN);
offset++;
col_append_str(pinfo->cinfo, COL_INFO, is_control_frame ? " [Control] " : " [Data] ");
if (is_control_frame) {
dissect_common_control(tvb, pinfo, tree, offset, p_fp_info);
/* For control frame the header CRC is actually frame CRC covering all
* bytes except the first */
if (preferences_header_checksum) {
verify_control_frame_crc(tvb, pinfo, header_crc_pi, header_crc);
}
}
else {
guint8 number_of_pdu_blocks;
gboolean drt_present = FALSE;
gboolean fach_present = FALSE;
guint16 user_buffer_size;
int n;
guint j;
#define MAX_PDU_BLOCKS 31
guint64 lchid[MAX_PDU_BLOCKS];
guint64 pdu_length[MAX_PDU_BLOCKS];
guint64 no_of_pdus[MAX_PDU_BLOCKS];
guint8 newieflags = 0;
umts_mac_info *macinf;
rlc_info *rlcinf;
rlcinf = (rlc_info *)p_get_proto_data(wmem_file_scope(), pinfo, proto_rlc, 0);
macinf = (umts_mac_info *)p_get_proto_data(wmem_file_scope(), pinfo, proto_umts_mac, 0);
/********************************/
/* HS-DCH type 2 data here */
col_append_str(pinfo->cinfo, COL_INFO, "(ehs)");
/* Frame Seq Nr (4 bits) */
if ((p_fp_info->release == 6) ||
(p_fp_info->release == 7)) {
guint8 frame_seq_no = (tvb_get_guint8(tvb, offset) & 0xf0) >> 4;
proto_tree_add_item(tree, hf_fp_frame_seq_nr, tvb, offset, 1, ENC_BIG_ENDIAN);
col_append_fstr(pinfo->cinfo, COL_INFO, " seqno=%u", frame_seq_no);
}
/* CmCH-PI (4 bits) */
proto_tree_add_item(tree, hf_fp_cmch_pi, tvb, offset, 1, ENC_BIG_ENDIAN);
offset++;
/* Total number of PDU blocks (5 bits) */
number_of_pdu_blocks = (tvb_get_guint8(tvb, offset) >> 3);
proto_tree_add_item(tree, hf_fp_total_pdu_blocks, tvb, offset, 1, ENC_BIG_ENDIAN);
if (p_fp_info->release == 7) {
/* Flush bit */
proto_tree_add_item(tree, hf_fp_flush, tvb, offset, 1, ENC_BIG_ENDIAN);
/* FSN/DRT reset bit */
proto_tree_add_item(tree, hf_fp_fsn_drt_reset, tvb, offset, 1, ENC_BIG_ENDIAN);
/* DRT Indicator */
drt_present = tvb_get_guint8(tvb, offset) & 0x01;
proto_tree_add_item(tree, hf_fp_drt_indicator, tvb, offset, 1, ENC_BIG_ENDIAN);
}
offset++;
/* FACH Indicator flag */
fach_present = (tvb_get_guint8(tvb, offset) & 0x80) >> 7;
proto_tree_add_item(tree, hf_fp_fach_indicator, tvb, offset, 1, ENC_BIG_ENDIAN);
offset++;
/* User buffer size */
user_buffer_size = tvb_get_ntohs(tvb, offset);
proto_tree_add_item(tree, hf_fp_user_buffer_size, tvb, offset, 2, ENC_BIG_ENDIAN);
offset += 2;
col_append_fstr(pinfo->cinfo, COL_INFO, " User-Buffer-Size=%u", user_buffer_size);
/********************************************************************/
/* Now read number_of_pdu_blocks header entries */
for (n=0; n < number_of_pdu_blocks; n++) {
proto_item *pdu_block_header_ti;
proto_tree *pdu_block_header_tree;
int block_header_start_offset = offset;
/* Add PDU block header subtree */
pdu_block_header_ti = proto_tree_add_string_format(tree, hf_fp_hsdsch_pdu_block_header,
tvb, offset, 0,
"",
"PDU Block Header");
pdu_block_header_tree = proto_item_add_subtree(pdu_block_header_ti,
ett_fp_hsdsch_pdu_block_header);
/* MAC-d/c PDU length in this block (11 bits) */
proto_tree_add_bits_ret_val(pdu_block_header_tree, hf_fp_pdu_length_in_block, tvb,
(offset*8) + ((n % 2) ? 4 : 0), 11,
&pdu_length[n], ENC_BIG_ENDIAN);
if ((n % 2) == 0)
offset++;
else
offset += 2;
/* # PDUs in this block (4 bits) */
proto_tree_add_bits_ret_val(pdu_block_header_tree, hf_fp_pdus_in_block, tvb,
(offset*8) + ((n % 2) ? 0 : 4), 4,
&no_of_pdus[n], ENC_BIG_ENDIAN);
if ((n % 2) == 0) {
offset++;
}
/* Logical channel ID in block (4 bits) */
proto_tree_add_bits_ret_val(pdu_block_header_tree, hf_fp_lchid, tvb,
(offset*8) + ((n % 2) ? 4 : 0), 4,
&lchid[n], ENC_BIG_ENDIAN);
if ((n % 2) == 1) {
offset++;
}
else {
if (n == (number_of_pdu_blocks-1)) {
/* Byte is padded out for last block */
offset++;
}
}
/* Append summary to header tree root */
proto_item_append_text(pdu_block_header_ti,
" (lch:%u, %u pdus of %u bytes)",
(guint16)lchid[n],
(guint16)no_of_pdus[n],
(guint16)pdu_length[n]);
/* Set length of header tree item */
if (((n % 2) == 0) && (n < (number_of_pdu_blocks-1))) {
proto_item_set_len(pdu_block_header_ti,
offset - block_header_start_offset+1);
}
else {
proto_item_set_len(pdu_block_header_ti,
offset - block_header_start_offset);
}
}
if (header_length == 0) {
header_length = offset;
}
/**********************************************/
/* Optional fields indicated by earlier flags */
if (drt_present) {
/* DRT */
proto_tree_add_item(tree, hf_fp_drt, tvb, offset, 2, ENC_BIG_ENDIAN);
offset += 2;
}
if (fach_present) {
/* H-RNTI: */
proto_tree_add_item(tree, hf_fp_hrnti, tvb, offset, 2, ENC_BIG_ENDIAN);
offset += 2;
/* RACH Measurement Result */
proto_tree_add_item(tree, hf_fp_rach_measurement_result, tvb, offset, 1, ENC_BIG_ENDIAN);
offset++;
}
/********************************************************************/
/* Now read the MAC-d/c PDUs for each block using info from headers */
for (n=0; n < number_of_pdu_blocks; n++) {
tvbuff_t *next_tvb;
for (j=0; j<no_of_pdus[n]; j++) {
/* If all bits are set, then this is BCCH or PCCH according to: 25.435 paragraph: 6.2.7.31 */
if (lchid[n] == 0xF) {
/* In the very few test cases I've seen, this seems to be
* BCCH with transparent MAC layer. Therefore skip right to
* rlc_bcch and hope for the best. */
next_tvb = tvb_new_subset_length(tvb, offset, (gint)pdu_length[n]);
call_dissector_with_data(rlc_bcch_handle, next_tvb, pinfo, top_level_tree, data);
offset += (gint)pdu_length[n];
} else { /* Else go for CCCH UM, this seems to work. */
p_fp_info->hsdsch_entity = ehs; /* HSDSCH type 2 */
/* TODO: use cur_tb or subnum everywhere. */
p_fp_info->cur_tb = j; /* set cur_tb for MAC */
pinfo->fd->subnum = j; /* set subframe number for RRC */
macinf->content[j] = MAC_CONTENT_CCCH;
macinf->lchid[j] = (guint8)lchid[n]+1; /*Add 1 since it is zero indexed? */
macinf->macdflow_id[j] = p_fp_info->hsdsch_macflowd_id;
macinf->ctmux[j] = FALSE;
rlcinf->li_size[j] = RLC_LI_7BITS;
rlcinf->ciphered[j] = FALSE;
rlcinf->deciphered[j] = FALSE;
rlcinf->rbid[j] = (guint8)lchid[n]+1;
rlcinf->urnti[j] = p_fp_info->channel; /*We need to fake urnti*/
next_tvb = tvb_new_subset_length(tvb, offset, (gint)pdu_length[n]);
call_dissector_with_data(mac_fdd_hsdsch_handle, next_tvb, pinfo, top_level_tree, data);
offset += (gint)pdu_length[n];
}
}
}
/* New IE Flags */
newieflags = tvb_get_guint8(tvb, offset);
/* If newieflags == 0000 0010 then this indicates that there is a
* HS-DSCH physical layer category and no other New IE flags. */
if (newieflags == 2) {
/* HS-DSCH physical layer category presence bit. */
proto_tree_add_uint(tree, hf_fp_hsdsch_new_ie_flag[6], tvb, offset, 1, newieflags);
offset++;
/* HS-DSCH physical layer category. */
proto_tree_add_bits_item(tree, hf_fp_hsdsch_physical_layer_category, tvb, offset*8, 6, ENC_BIG_ENDIAN);
offset++;
}
if (preferences_header_checksum) {
verify_header_crc(tvb, pinfo, header_crc_pi, header_crc, header_length);
}
/* Spare Extension and Payload CRC */
dissect_spare_extension_and_crc(tvb, pinfo, tree, 1, offset, header_length);
}
}
static gboolean
heur_dissect_fp(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, void *data _U_)
{
struct fp_info *p_fp_info;
p_fp_info = (fp_info *)p_get_proto_data(wmem_file_scope(), pinfo, proto_fp, 0);
/* if no FP info is present, this might be FP in a pcap(ng) file */
if (!p_fp_info) {
/* We only know the header length of control frames, so check that bit first */
int offset = 0, length;
guint8 oct, calc_crc = 0, crc;
unsigned char *buf;
oct = tvb_get_guint8(tvb, offset);
crc = oct & 0xfe;
if ((oct & 0x01) == 1) {
/*
* 6.3.2.1 Frame CRC
* Description: It is the result of the CRC applied to the remaining part of the frame,
* i.e. from bit 0 of the first byte of the header (the FT IE) to bit 0 of the last byte of the payload,
* with the corresponding generator polynomial: G(D) = D7+D6+D2+1. See subclause 7.2.
*/
length = tvb_reported_length(tvb);
buf = (unsigned char *)tvb_memdup(wmem_packet_scope(), tvb, 0, length);
buf[0] = 01;
calc_crc = crc7update(calc_crc, buf, length);
if (calc_crc == crc) {
/* assume this is FP, set conversatio dissector to catch the data frames too */
conversation_set_dissector(find_or_create_conversation(pinfo), fp_handle);
dissect_fp(tvb, pinfo, tree, data);
return TRUE;
}
}
return FALSE;
}
/* if FP info is present, check that it really is an ethernet link */
if (p_fp_info->link_type != FP_Link_Ethernet) {
return FALSE;
}
/* discriminate 'lower' UDP layer from 'user data' UDP layer
* (i.e. if an FP over UDP packet contains a user UDP packet */
if (p_fp_info->srcport != pinfo->srcport ||
p_fp_info->destport != pinfo->destport)
return FALSE;
/* assume this is FP */
dissect_fp(tvb, pinfo, tree, data);
return TRUE;
}
static guint8 fakes =5; /*[] ={1,5,8};*/
static guint8 fake_map[256];
/*
* TODO: This need to be fixed!
* Basically you would want the actual RRC messages, that sooner or later maps
* transport channel id's to logical id's or RAB IDs
* to set the proper logical channel/RAB ID, but for now we make syntethic ones.
* */
static guint8
make_fake_lchid(packet_info *pinfo _U_, gint trchld)
{
if ( fake_map[trchld] == 0) {
fake_map[trchld] = fakes;
fakes++;
}
return fake_map[trchld];
}
/*
* july 2012:
* Alot of configuration has been move into the actual dissecting functions
* since most of the configuration/signalign has to be set per tb (pdu) rather
* for the channel!
*/
static fp_info *
fp_set_per_packet_inf_from_conv(umts_fp_conversation_info_t *p_conv_data,
tvbuff_t *tvb, packet_info *pinfo,
proto_tree *tree _U_)
{
fp_info *fpi;
guint8 tfi, c_t;
int offset = 0, i=0, j=0, num_tbs, chan, tb_size, tb_bit_off;
gboolean is_control_frame;
umts_mac_info *macinf;
rlc_info *rlcinf;
guint8 fake_lchid=0;
gint *cur_val=NULL;
fpi = wmem_new0(wmem_file_scope(), fp_info);
p_add_proto_data(wmem_file_scope(), pinfo, proto_fp, 0, fpi);
fpi->iface_type = p_conv_data->iface_type;
fpi->division = p_conv_data->division;
fpi->release = 7; /* Set values greater then the checks performed */
fpi->release_year = 2006;
fpi->release_month = 12;
fpi->channel = p_conv_data->channel;
fpi->dch_crc_present = p_conv_data->dch_crc_present;
/*fpi->paging_indications;*/
fpi->link_type = FP_Link_Ethernet;
#if 0
/*Only do this the first run, signals that we need to reset the RLC fragtable*/
if (!pinfo->fd->flags.visited && p_conv_data->reset_frag ) {
fpi->reset_frag = p_conv_data->reset_frag;
p_conv_data->reset_frag = FALSE;
}
#endif
/* remember 'lower' UDP layer port information so we can later
* differentiate 'lower' UDP layer from 'user data' UDP layer */
fpi->srcport = pinfo->srcport;
fpi->destport = pinfo->destport;
fpi->com_context_id = p_conv_data->com_context_id;
if (pinfo->link_dir == P2P_DIR_UL) {
fpi->is_uplink = TRUE;
} else {
fpi->is_uplink = FALSE;
}
is_control_frame = tvb_get_guint8(tvb, offset) & 0x01;
switch (fpi->channel) {
case CHANNEL_HSDSCH: /* HS-DSCH - High Speed Downlink Shared Channel */
fpi->hsdsch_entity = p_conv_data->hsdsch_entity;
macinf = wmem_new0(wmem_file_scope(), umts_mac_info);
fpi->hsdsch_macflowd_id = p_conv_data->hsdsch_macdflow_id;
macinf->content[0] = hsdsch_macdflow_id_mac_content_map[p_conv_data->hsdsch_macdflow_id]; /*MAC_CONTENT_PS_DTCH;*/
macinf->lchid[0] = p_conv_data->hsdsch_macdflow_id;
/*macinf->content[0] = lchId_type_table[p_conv_data->edch_lchId[0]];*/
p_add_proto_data(wmem_file_scope(), pinfo, proto_umts_mac, 0, macinf);
rlcinf = wmem_new0(wmem_file_scope(), rlc_info);
/*Figure out RLC_MODE based on MACd-flow-ID, basically MACd-flow-ID = 0 then it's SRB0 == UM else AM*/
rlcinf->mode[0] = hsdsch_macdflow_id_rlc_map[p_conv_data->hsdsch_macdflow_id];
if (fpi->hsdsch_entity == hs /*&& !rlc_is_ciphered(pinfo)*/) {
for (i=0; i<MAX_NUM_HSDHSCH_MACDFLOW; i++) {
/*Figure out if this channel is multiplexed (signaled from RRC)*/
if ((cur_val=(gint *)g_tree_lookup(hsdsch_muxed_flows, GINT_TO_POINTER((gint)p_conv_data->hrnti))) != NULL) {
j = 1 << i;
fpi->hsdhsch_macfdlow_is_mux[i] = j & *cur_val;
} else {
fpi->hsdhsch_macfdlow_is_mux[i] = FALSE;
}
}
}
/* Make configurable ?(available in NBAP?) */
/* urnti[MAX_RLC_CHANS] */
/*
switch (p_conv_data->rlc_mode) {
case FP_RLC_TM:
rlcinf->mode[0] = RLC_TM;
break;
case FP_RLC_UM:
rlcinf->mode[0] = RLC_UM;
break;
case FP_RLC_AM:
rlcinf->mode[0] = RLC_AM;
break;
case FP_RLC_MODE_UNKNOWN:
default:
rlcinf->mode[0] = RLC_UNKNOWN_MODE;
break;
}*/
/* rbid[MAX_RLC_CHANS] */
/* For RLC re-assembly to work we urnti signaled from NBAP */
rlcinf->urnti[0] = fpi->com_context_id;
rlcinf->li_size[0] = RLC_LI_7BITS;
rlcinf->ciphered[0] = FALSE;
rlcinf->deciphered[0] = FALSE;
p_add_proto_data(wmem_file_scope(), pinfo, proto_rlc, 0, rlcinf);
return fpi;
case CHANNEL_EDCH:
/*Most configuration is now done in the actual dissecting function*/
macinf = wmem_new0(wmem_file_scope(), umts_mac_info);
rlcinf = wmem_new0(wmem_file_scope(), rlc_info);
fpi->no_ddi_entries = p_conv_data->no_ddi_entries;
for (i=0; i<fpi->no_ddi_entries; i++) {
fpi->edch_ddi[i] = p_conv_data->edch_ddi[i]; /*Set the DDI value*/
fpi->edch_macd_pdu_size[i] = p_conv_data->edch_macd_pdu_size[i]; /*Set the size*/
fpi->edch_lchId[i] = p_conv_data->edch_lchId[i]; /*Set the channel id for this entry*/
/*macinf->content[i] = lchId_type_table[p_conv_data->edch_lchId[i]]; */ /*Set the proper Content type for the mac layer.*/
/* rlcinf->mode[i] = lchId_rlc_map[p_conv_data->edch_lchId[i]];*/ /* Set RLC mode by lchid to RLC_MODE map in nbap.h */
}
fpi->edch_type = p_conv_data->edch_type;
/* macinf = wmem_new0(wmem_file_scope(), umts_mac_info);
macinf->content[0] = MAC_CONTENT_PS_DTCH;*/
p_add_proto_data(wmem_file_scope(), pinfo, proto_umts_mac, 0, macinf);
/* For RLC re-assembly to work we need a urnti signaled from NBAP */
rlcinf->urnti[0] = fpi->com_context_id;
/* rlcinf->mode[0] = RLC_AM;*/
rlcinf->li_size[0] = RLC_LI_7BITS;
rlcinf->ciphered[0] = FALSE;
rlcinf->deciphered[0] = FALSE;
p_add_proto_data(wmem_file_scope(), pinfo, proto_rlc, 0, rlcinf);
return fpi;
case CHANNEL_PCH:
fpi->paging_indications = p_conv_data->paging_indications;
fpi->num_chans = p_conv_data->num_dch_in_flow;
/* Set offset to point to first TFI
*/
if (is_control_frame) {
/* control frame, we're done */
return fpi;
}
/* Set offset to TFI */
offset = 3;
break;
case CHANNEL_DCH:
fpi->num_chans = p_conv_data->num_dch_in_flow;
if (is_control_frame) {
/* control frame, we're done */
return fpi;
}
rlcinf = wmem_new0(wmem_file_scope(), rlc_info);
macinf = wmem_new0(wmem_file_scope(), umts_mac_info);
offset = 2; /*To correctly read the tfi*/
fakes = 5; /* Reset fake counter. */
for (chan=0; chan < fpi->num_chans; chan++) { /*Iterate over the what channels*/
/*Iterate over the transport blocks*/
/*tfi = tvb_get_guint8(tvb, offset);*/
/*TFI is 5 bits according to 3GPP TS 25.321, paragraph 6.2.4.4*/
tfi = tvb_get_bits8(tvb, 3+offset*8, 5);
/*Figure out the number of tbs and size*/
num_tbs = (fpi->is_uplink) ? p_conv_data->fp_dch_channel_info[chan].ul_chan_num_tbs[tfi] : p_conv_data->fp_dch_channel_info[chan].dl_chan_num_tbs[tfi];
tb_size= (fpi->is_uplink) ? p_conv_data->fp_dch_channel_info[i].ul_chan_tf_size[tfi] : p_conv_data->fp_dch_channel_info[i].dl_chan_tf_size[tfi];
/*TODO: This stuff has to be reworked!*/
/*Generates a fake logical channel id for non multiplexed channel*/
if ( p_conv_data->dchs_in_flow_list[chan] != 31 && (p_conv_data->dchs_in_flow_list[chan] == 24 &&
tb_size != 340) ) {
fake_lchid = make_fake_lchid(pinfo, p_conv_data->dchs_in_flow_list[chan]);
}
tb_bit_off = (2+p_conv_data->num_dch_in_flow)*8; /*Point to the C/T of first TB*/
/*Set configuration for individual blocks*/
for (j=0; j < num_tbs && j+chan < MAX_MAC_FRAMES; j++) {
/*Set transport channel id (useful for debugging)*/
macinf->trchid[j+chan] = p_conv_data->dchs_in_flow_list[chan];
/*Transport Channel m31 and 24 might be multiplexed!*/
if ( p_conv_data->dchs_in_flow_list[chan] == 31 || p_conv_data->dchs_in_flow_list[chan] == 24) {
/****** MUST FIGURE OUT IF THIS IS REALLY MULTIPLEXED OR NOT*******/
/*If Trchid == 31 and only on TB, we have no multiplexing*/
if (0/*p_conv_data->dchs_in_flow_list[chan] == 31 && num_tbs == 1*/) {
macinf->ctmux[j+chan] = FALSE;/*Set TRUE if this channel is multiplexed (ie. C/T flag exists)*/
macinf->lchid[j+chan] = 1;
macinf->content[j+chan] = lchId_type_table[1]; /*Base MAC content on logical channel id (Table is in packet-nbap.h)*/
rlcinf->mode[j+chan] = lchId_rlc_map[1]; /*Based RLC mode on logical channel id*/
}
/*Indicate we don't have multiplexing.*/
else if (p_conv_data->dchs_in_flow_list[chan] == 24 && tb_size != 340) {
macinf->ctmux[j+chan] = FALSE;/*Set TRUE if this channel is multiplexed (ie. C/T flag exists)*/
/*g_warning("settin this for %d", pinfo->num);*/
macinf->lchid[j+chan] = fake_lchid;
macinf->fake_chid[j+chan] = TRUE;
macinf->content[j+chan] = MAC_CONTENT_PS_DTCH; /*lchId_type_table[fake_lchid];*/ /*Base MAC content on logical channel id (Table is in packet-nbap.h)*/
rlcinf->mode[j+chan] = RLC_AM;/*lchId_rlc_map[fake_lchid];*/ /*Based RLC mode on logical channel id*/
}
/*We have multiplexing*/
else {
macinf->ctmux[j+chan] = TRUE;/*Set TRUE if this channel is multiplexed (ie. C/T flag exists)*/
/* Peek at C/T, different RLC params for different logical channels */
/*C/T is 4 bits according to 3GPP TS 25.321, paragraph 9.2.1, from MAC header (not FP)*/
c_t = (tvb_get_bits8(tvb, tb_bit_off/*(2+p_conv_data->num_dch_in_flow)*8*/, 4) + 1) % 0xf; /* c_t = tvb_get_guint8(tvb, offset);*/
macinf->lchid[j+chan] = c_t;
macinf->content[j+chan] = lchId_type_table[c_t]; /*Base MAC content on logical channel id (Table is in packet-nbap.h)*/
rlcinf->mode[j+chan] = lchId_rlc_map[c_t]; /*Based RLC mode on logical channel id*/
}
} else {
fake_lchid = make_fake_lchid(pinfo, p_conv_data->dchs_in_flow_list[chan]);
macinf->ctmux[j+chan] = FALSE;/*Set TRUE if this channel is multiplexed (ie. C/T flag exists)*/
/*macinf->content[j+chan] = MAC_CONTENT_CS_DTCH;*/
macinf->content[j+chan] = lchId_type_table[fake_lchid];
rlcinf->mode[j+chan] = lchId_rlc_map[fake_lchid];
/*Generate virtual logical channel id*/
/************************/
/*TODO: Once proper lchid is always set, this has to be removed*/
macinf->fake_chid[j+chan] = TRUE;
macinf->lchid[j+chan] = fake_lchid; /*make_fake_lchid(pinfo, p_conv_data->dchs_in_flow_list[chan]);*/
/************************/
}
/*** Set rlc info ***/
rlcinf->urnti[j+chan] = p_conv_data->com_context_id;
rlcinf->li_size[j+chan] = RLC_LI_7BITS;
#if 0
/*If this entry exists, SECRUITY_MODE is completed (signled by RRC)*/
if ( rrc_ciph_inf && g_tree_lookup(rrc_ciph_inf, GINT_TO_POINTER((gint)p_conv_data->com_context_id)) != NULL ) {
rlcinf->ciphered[j+chan] = TRUE;
} else {
rlcinf->ciphered[j+chan] = FALSE;
}
#endif
rlcinf->ciphered[j+chan] = FALSE;
rlcinf->deciphered[j+chan] = FALSE;
rlcinf->rbid[j+chan] = macinf->lchid[j+chan];
/*Step over this TB and it's C/T flag.*/
tb_bit_off += tb_size+4;
}
offset++;
}
p_add_proto_data(wmem_file_scope(), pinfo, proto_umts_mac, 0, macinf);
p_add_proto_data(wmem_file_scope(), pinfo, proto_rlc, 0, rlcinf);
/* Set offset to point to first TFI
* the Number of TFI's = number of DCH's in the flow
*/
offset = 2;
break;
case CHANNEL_FACH_FDD:
fpi->num_chans = p_conv_data->num_dch_in_flow;
if (is_control_frame) {
/* control frame, we're done */
return fpi;
}
/* Set offset to point to first TFI
* the Number of TFI's = number of DCH's in the flow
*/
offset = 2;
/* Set MAC data */
macinf = wmem_new0(wmem_file_scope(), umts_mac_info);
macinf->ctmux[0] = 1;
macinf->content[0] = MAC_CONTENT_DCCH;
p_add_proto_data(wmem_file_scope(), pinfo, proto_umts_mac, 0, macinf);
/* Set RLC data */
rlcinf = wmem_new0(wmem_file_scope(), rlc_info);
/* Make configurable ?(avaliable in NBAP?) */
/* For RLC re-assembly to work we need to fake urnti */
rlcinf->urnti[0] = fpi->channel;
rlcinf->mode[0] = RLC_AM;
/* rbid[MAX_RLC_CHANS] */
rlcinf->li_size[0] = RLC_LI_7BITS;
rlcinf->ciphered[0] = FALSE;
rlcinf->deciphered[0] = FALSE;
p_add_proto_data(wmem_file_scope(), pinfo, proto_rlc, 0, rlcinf);
break;
case CHANNEL_RACH_FDD:
fpi->num_chans = p_conv_data->num_dch_in_flow;
if (is_control_frame) {
/* control frame, we're done */
return fpi;
}
/* Set offset to point to first TFI
* the Number of TFI's = number of DCH's in the flow
*/
offset = 2;
/* set MAC data */
macinf = wmem_new0(wmem_file_scope(), umts_mac_info);
rlcinf = wmem_new0(wmem_file_scope(), rlc_info);
for ( chan = 0; chan < fpi->num_chans; chan++ ) {
macinf->ctmux[chan] = 1;
macinf->content[chan] = MAC_CONTENT_DCCH;
rlcinf->urnti[chan] = fpi->com_context_id; /*Note that MAC probably will change this*/
}
p_add_proto_data(wmem_file_scope(), pinfo, proto_umts_mac, 0, macinf);
p_add_proto_data(wmem_file_scope(), pinfo, proto_rlc, 0, rlcinf);
break;
case CHANNEL_HSDSCH_COMMON:
rlcinf = wmem_new0(wmem_file_scope(), rlc_info);
macinf = wmem_new0(wmem_file_scope(), umts_mac_info);
p_add_proto_data(wmem_file_scope(), pinfo, proto_umts_mac, 0, macinf);
p_add_proto_data(wmem_file_scope(), pinfo, proto_rlc, 0, rlcinf);
break;
default:
expert_add_info(pinfo, NULL, &ei_fp_transport_channel_type_unknown);
return NULL;
}
/* Peek at the packet as the per packet info seems not to take the tfi into account */
for (i=0; i<fpi->num_chans; i++) {
tfi = tvb_get_guint8(tvb, offset);
/*TFI is 5 bits according to 3GPP TS 25.321, paragraph 6.2.4.4*/
/*tfi = tvb_get_bits8(tvb, offset*8, 5);*/
if (pinfo->link_dir == P2P_DIR_UL) {
fpi->chan_tf_size[i] = p_conv_data->fp_dch_channel_info[i].ul_chan_tf_size[tfi];
fpi->chan_num_tbs[i] = p_conv_data->fp_dch_channel_info[i].ul_chan_num_tbs[tfi];
} else {
fpi->chan_tf_size[i] = p_conv_data->fp_dch_channel_info[i].dl_chan_tf_size[tfi];
fpi->chan_num_tbs[i] = p_conv_data->fp_dch_channel_info[i].dl_chan_num_tbs[tfi];
}
offset++;
}
return fpi;
}
/*****************************/
/* Main dissection function. */
static int
dissect_fp_common(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, void *data)
{
proto_tree *fp_tree;
proto_item *ti;
gint offset = 0;
struct fp_info *p_fp_info;
rlc_info *rlcinf;
conversation_t *p_conv;
umts_fp_conversation_info_t *p_conv_data = NULL;
/* Append this protocol name rather than replace. */
col_set_str(pinfo->cinfo, COL_PROTOCOL, "FP");
/* Create fp tree. */
ti = proto_tree_add_item(tree, proto_fp, tvb, offset, -1, ENC_NA);
fp_tree = proto_item_add_subtree(ti, ett_fp);
top_level_tree = tree;
/* Look for packet info! */
p_fp_info = (struct fp_info *)p_get_proto_data(wmem_file_scope(), pinfo, proto_fp, 0);
/* Check if we have conversation info */
p_conv = (conversation_t *)find_conversation(pinfo->num, &pinfo->net_dst, &pinfo->net_src,
pinfo->ptype,
pinfo->destport, pinfo->srcport, NO_ADDR_B);
if (p_conv) {
/*Find correct conversation, basically find the one that's closest to this frame*/
#if 0
while (p_conv->next != NULL && p_conv->next->setup_frame < pinfo->num) {
p_conv = p_conv->next;
}
#endif
p_conv_data = (umts_fp_conversation_info_t *)conversation_get_proto_data(p_conv, proto_fp);
if (p_conv_data) {
/*Figure out the direction of the link*/
if (addresses_equal(&(pinfo->net_dst), (&p_conv_data->crnc_address))) {
proto_item *item= proto_tree_add_uint(fp_tree, hf_fp_ul_setup_frame,
tvb, 0, 0, p_conv_data->ul_frame_number);
PROTO_ITEM_SET_GENERATED(item);
/* CRNC -> Node B */
pinfo->link_dir=P2P_DIR_UL;
if (p_fp_info == NULL) {
p_fp_info = fp_set_per_packet_inf_from_conv(p_conv_data, tvb, pinfo, fp_tree);
}
}
else {
/* Maybe the frame number should be stored in the proper location already in nbap?, in ul_frame_number*/
proto_item *item= proto_tree_add_uint(fp_tree, hf_fp_dl_setup_frame,
tvb, 0, 0, p_conv_data->ul_frame_number);
PROTO_ITEM_SET_GENERATED(item);
pinfo->link_dir=P2P_DIR_DL;
if (p_fp_info == NULL) {
p_fp_info = fp_set_per_packet_inf_from_conv(p_conv_data, tvb, pinfo, fp_tree);
}
}
}
}
if (pinfo->p2p_dir == P2P_DIR_UNKNOWN) {
if (pinfo->link_dir == P2P_DIR_UL) {
pinfo->p2p_dir = P2P_DIR_RECV;
} else {
pinfo->p2p_dir = P2P_DIR_SENT;
}
}
/* Can't dissect anything without it... */
if (p_fp_info == NULL) {
proto_tree_add_expert(fp_tree, pinfo, &ei_fp_no_per_frame_info, tvb, offset, -1);
return 1;
}
rlcinf = (rlc_info *)p_get_proto_data(wmem_file_scope(), pinfo, proto_rlc, 0);
/* Show release information */
if (preferences_show_release_info) {
proto_item *release_ti;
proto_tree *release_tree;
proto_item *temp_ti;
release_ti = proto_tree_add_item(fp_tree, hf_fp_release, tvb, 0, 0, ENC_NA);
PROTO_ITEM_SET_GENERATED(release_ti);
proto_item_append_text(release_ti, " R%u (%d/%d)",
p_fp_info->release, p_fp_info->release_year, p_fp_info->release_month);
release_tree = proto_item_add_subtree(release_ti, ett_fp_release);
temp_ti = proto_tree_add_uint(release_tree, hf_fp_release_version, tvb, 0, 0, p_fp_info->release);
PROTO_ITEM_SET_GENERATED(temp_ti);
temp_ti = proto_tree_add_uint(release_tree, hf_fp_release_year, tvb, 0, 0, p_fp_info->release_year);
PROTO_ITEM_SET_GENERATED(temp_ti);
temp_ti = proto_tree_add_uint(release_tree, hf_fp_release_month, tvb, 0, 0, p_fp_info->release_month);
PROTO_ITEM_SET_GENERATED(temp_ti);
}
/* Show channel type in info column, tree */
col_set_str(pinfo->cinfo, COL_INFO,
val_to_str_const(p_fp_info->channel,
channel_type_vals,
"Unknown channel type"));
if (p_conv_data) {
int i;
col_append_fstr(pinfo->cinfo, COL_INFO, "(%u", p_conv_data->dchs_in_flow_list[0]);
for (i=1; i < p_conv_data->num_dch_in_flow; i++) {
col_append_fstr(pinfo->cinfo, COL_INFO, ",%u", p_conv_data->dchs_in_flow_list[i]);
}
col_append_fstr(pinfo->cinfo, COL_INFO, ") ");
}
proto_item_append_text(ti, " (%s)",
val_to_str_const(p_fp_info->channel,
channel_type_vals,
"Unknown channel type"));
/* Add channel type as a generated field */
ti = proto_tree_add_uint(fp_tree, hf_fp_channel_type, tvb, 0, 0, p_fp_info->channel);
PROTO_ITEM_SET_GENERATED(ti);
/* Add division type as a generated field */
if (p_fp_info->release == 7) {
ti = proto_tree_add_uint(fp_tree, hf_fp_division, tvb, 0, 0, p_fp_info->division);
PROTO_ITEM_SET_GENERATED(ti);
}
/* Add link direction as a generated field */
ti = proto_tree_add_uint(fp_tree, hf_fp_direction, tvb, 0, 0, p_fp_info->is_uplink);
PROTO_ITEM_SET_GENERATED(ti);
/* Don't currently handle IuR-specific formats, but it's useful to even see
the channel type and direction */
if (p_fp_info->iface_type == IuR_Interface) {
return 1;
}
/* Show DDI config info */
if (p_fp_info->no_ddi_entries > 0) {
int n;
proto_item *ddi_config_ti;
proto_tree *ddi_config_tree;
ddi_config_ti = proto_tree_add_string_format(fp_tree, hf_fp_ddi_config, tvb, offset, 0,
"", "DDI Config (");
PROTO_ITEM_SET_GENERATED(ddi_config_ti);
ddi_config_tree = proto_item_add_subtree(ddi_config_ti, ett_fp_ddi_config);
/* Add each entry */
for (n=0; n < p_fp_info->no_ddi_entries; n++) {
proto_item_append_text(ddi_config_ti, "%s%u->%ubits",
(n == 0) ? "" : " ",
p_fp_info->edch_ddi[n], p_fp_info->edch_macd_pdu_size[n]);
ti = proto_tree_add_uint(ddi_config_tree, hf_fp_ddi_config_ddi, tvb, 0, 0,
p_fp_info->edch_ddi[n]);
PROTO_ITEM_SET_GENERATED(ti);
ti = proto_tree_add_uint(ddi_config_tree, hf_fp_ddi_config_macd_pdu_size, tvb, 0, 0,
p_fp_info->edch_macd_pdu_size[n]);
PROTO_ITEM_SET_GENERATED(ti);
}
proto_item_append_text(ddi_config_ti, ")");
}
/*************************************/
/* Dissect according to channel type */
switch (p_fp_info->channel) {
case CHANNEL_RACH_TDD:
case CHANNEL_RACH_TDD_128:
case CHANNEL_RACH_FDD:
dissect_rach_channel_info(tvb, pinfo, fp_tree, offset, p_fp_info,
data);
break;
case CHANNEL_DCH:
dissect_dch_channel_info(tvb, pinfo, fp_tree, offset, p_fp_info,
data);
break;
case CHANNEL_FACH_FDD:
case CHANNEL_FACH_TDD:
dissect_fach_channel_info(tvb, pinfo, fp_tree, offset, p_fp_info,
data);
break;
case CHANNEL_DSCH_FDD:
case CHANNEL_DSCH_TDD:
dissect_dsch_channel_info(tvb, pinfo, fp_tree, offset, p_fp_info);
break;
case CHANNEL_USCH_TDD_128:
case CHANNEL_USCH_TDD_384:
dissect_usch_channel_info(tvb, pinfo, fp_tree, offset, p_fp_info);
break;
case CHANNEL_PCH:
dissect_pch_channel_info(tvb, pinfo, fp_tree, offset, p_fp_info,
data);
break;
case CHANNEL_CPCH:
dissect_cpch_channel_info(tvb, pinfo, fp_tree, offset, p_fp_info);
break;
case CHANNEL_BCH:
dissect_bch_channel_info(tvb, pinfo, fp_tree, offset, p_fp_info);
break;
case CHANNEL_HSDSCH:
/* Show configured MAC HS-DSCH entity in use */
if (fp_tree)
{
proto_item *entity_ti;
entity_ti = proto_tree_add_uint(fp_tree, hf_fp_hsdsch_entity,
tvb, 0, 0,
p_fp_info->hsdsch_entity);
PROTO_ITEM_SET_GENERATED(entity_ti);
}
switch (p_fp_info->hsdsch_entity) {
case entity_not_specified:
case hs:
/* This is the pre-R7 default */
dissect_hsdsch_channel_info(tvb, pinfo, fp_tree, offset, p_fp_info, data);
break;
case ehs:
dissect_hsdsch_type_2_channel_info(tvb, pinfo, fp_tree, offset, p_fp_info, data);
break;
default:
/* Report Error */
expert_add_info(pinfo, NULL, &ei_fp_hsdsch_entity_not_specified);
break;
}
break;
case CHANNEL_HSDSCH_COMMON:
expert_add_info(pinfo, NULL, &ei_fp_hsdsch_common_experimental_support);
/*if (FALSE)*/
dissect_hsdsch_common_channel_info(tvb, pinfo, fp_tree, offset, p_fp_info, data);
break;
case CHANNEL_HSDSCH_COMMON_T3:
expert_add_info(pinfo, NULL, &ei_fp_hsdsch_common_t3_not_implemented);
/* TODO: */
break;
case CHANNEL_IUR_CPCHF:
/* TODO: */
break;
case CHANNEL_IUR_FACH:
/* TODO: */
break;
case CHANNEL_IUR_DSCH:
dissect_iur_dsch_channel_info(tvb, pinfo, fp_tree, offset, p_fp_info);
break;
case CHANNEL_EDCH:
case CHANNEL_EDCH_COMMON:
dissect_e_dch_channel_info(tvb, pinfo, fp_tree, offset, p_fp_info,
p_fp_info->channel == CHANNEL_EDCH_COMMON,
rlcinf, data);
break;
default:
expert_add_info(pinfo, NULL, &ei_fp_channel_type_unknown);
break;
}
return tvb_captured_length(tvb);
}
static int
dissect_fp(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, void *data _U_)
{
return dissect_fp_common(tvb, pinfo, tree, NULL);
}
static int
dissect_fp_aal2(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, void *data)
{
return dissect_fp_common(tvb, pinfo, tree, data);
}
#ifdef UMTS_FP_USE_UAT
UAT_VS_DEF(uat_umts_fp_ep_and_ch_records, protocol, uat_umts_fp_ep_and_ch_record_t, guint8, UMTS_FP_IPV4, "IPv4")
UAT_CSTRING_CB_DEF(uat_umts_fp_ep_and_ch_records, srcIP, uat_umts_fp_ep_and_ch_record_t)
UAT_DEC_CB_DEF(uat_umts_fp_ep_and_ch_records, src_port, uat_umts_fp_ep_and_ch_record_t)
UAT_CSTRING_CB_DEF(uat_umts_fp_ep_and_ch_records, dstIP, uat_umts_fp_ep_and_ch_record_t)
UAT_DEC_CB_DEF(uat_umts_fp_ep_and_ch_records, dst_port, uat_umts_fp_ep_and_ch_record_t)
UAT_VS_DEF(uat_umts_fp_ep_and_ch_records, interface_type, uat_umts_fp_ep_and_ch_record_t, guint8, IuB_Interface, "IuB Interface")
UAT_VS_DEF(uat_umts_fp_ep_and_ch_records, division, uat_umts_fp_ep_and_ch_record_t, guint8, Division_FDD, "Division FDD")
UAT_VS_DEF(uat_umts_fp_ep_and_ch_records, rlc_mode, uat_umts_fp_ep_and_ch_record_t, guint8, FP_RLC_TM, "RLC mode")
UAT_VS_DEF(uat_umts_fp_ep_and_ch_records, channel_type, uat_umts_fp_ep_and_ch_record_t, guint8, CHANNEL_RACH_FDD, "RACH FDD")
static void *uat_umts_fp_record_copy_cb(void *n, const void *o, size_t siz _U_) {
uat_umts_fp_ep_and_ch_record_t *new_rec = (uat_umts_fp_ep_and_ch_record_t *)n;
const uat_umts_fp_ep_and_ch_record_t *old_rec = (const uat_umts_fp_ep_and_ch_record_t *)o;
new_rec->srcIP = (old_rec->srcIP) ? g_strdup(old_rec->srcIP) : NULL;
new_rec->dstIP = (old_rec->dstIP) ? g_strdup(old_rec->dstIP) : NULL;
return new_rec;
}
static void uat_umts_fp_record_free_cb(void*r) {
uat_umts_fp_ep_and_ch_record_t *rec = (uat_umts_fp_ep_and_ch_record_t *)r;
g_free(rec->srcIP);
g_free(rec->dstIP);
}
/*
* Set up UAT predefined conversations for specified channels
* typedef struct {
* guint8 protocol;
* gchar *srcIP;
* guint16 src_port;
* gchar *dstIP;
* guint16 dst_port;
* guint8 interface_type;
* guint8 division;
* guint8 rlc_mode;
* guint8 channel_type;
* } uat_umts_fp_ep_and_ch_record_t;
*/
static void
umts_fp_init_protocol(void)
{
guint32 hosta_addr[4];
guint32 hostb_addr[4];
address src_addr, dst_addr;
conversation_t *conversation;
umts_fp_conversation_info_t *umts_fp_conversation_info;
guint i, j, num_tf;
for (i=0; i<num_umts_fp_ep_and_ch_items; i++) {
/* check if we have a conversation allready */
/* Convert the strings to ADDR */
if (uat_umts_fp_ep_and_ch_records[i].protocol == UMTS_FP_IPV4) {
if ((uat_umts_fp_ep_and_ch_records[i].srcIP) && (!str_to_ip(uat_umts_fp_ep_and_ch_records[i].srcIP, &hosta_addr))) {
continue; /* parsing failed, skip this entry */
}
if ((uat_umts_fp_ep_and_ch_records[i].dstIP) && (!str_to_ip(uat_umts_fp_ep_and_ch_records[i].dstIP, &hostb_addr))) {
continue; /* parsing failed, skip this entry */
}
set_address(&src_addr, AT_IPv4, 4, &hosta_addr);
set_address(&dst_addr, AT_IPv4, 4, &hostb_addr);
} else {
continue; /* Not implemented yet */
}
conversation = find_conversation(1, &src_addr, &dst_addr, PT_UDP, uat_umts_fp_ep_and_ch_records[i].src_port, 0, NO_ADDR2|NO_PORT2);
if (conversation == NULL) {
/* It's not part of any conversation - create a new one. */
conversation = conversation_new(1, &src_addr, &dst_addr, PT_UDP, uat_umts_fp_ep_and_ch_records[i].src_port, 0, NO_ADDR2|NO_PORT2);
if (conversation == NULL)
continue;
conversation_set_dissector(conversation, fp_handle);
switch (uat_umts_fp_ep_and_ch_records[i].channel_type) {
case CHANNEL_RACH_FDD:
/* set up conversation info for RACH FDD channels */
umts_fp_conversation_info = wmem_new0(wmem_file_scope(), umts_fp_conversation_info_t);
/* Fill in the data */
umts_fp_conversation_info->iface_type = (enum fp_interface_type)uat_umts_fp_ep_and_ch_records[i].interface_type;
umts_fp_conversation_info->division = (enum division_type) uat_umts_fp_ep_and_ch_records[i].division;
umts_fp_conversation_info->channel = uat_umts_fp_ep_and_ch_records[i].channel_type;
umts_fp_conversation_info->dl_frame_number = 0;
umts_fp_conversation_info->ul_frame_number = 1;
copy_address_wmem(wmem_file_scope(), &(umts_fp_conversation_info->crnc_address), &src_addr);
umts_fp_conversation_info->crnc_port = uat_umts_fp_ep_and_ch_records[i].src_port;
umts_fp_conversation_info->rlc_mode = (enum fp_rlc_mode) uat_umts_fp_ep_and_ch_records[i].rlc_mode;
/*Save unique UE-identifier */
umts_fp_conversation_info->com_context_id = 1;
/* DCH's in this flow */
umts_fp_conversation_info->dch_crc_present = 2;
/* Set data for First or single channel */
umts_fp_conversation_info->fp_dch_channel_info[0].num_ul_chans = num_tf = 1;
for (j = 0; j < num_tf; j++) {
umts_fp_conversation_info->fp_dch_channel_info[0].ul_chan_tf_size[j] = 168;
umts_fp_conversation_info->fp_dch_channel_info[0].ul_chan_num_tbs[j] = 1;
}
umts_fp_conversation_info->dchs_in_flow_list[0] = 1;
umts_fp_conversation_info->num_dch_in_flow=1;
set_umts_fp_conv_data(conversation, umts_fp_conversation_info);
default:
break;
}
}
}
}
#endif /* UMTS_FP_USE_UAT */
void proto_register_fp(void)
{
static hf_register_info hf[] =
{
{ &hf_fp_release,
{ "Release",
"fp.release", FT_NONE, BASE_NONE, NULL, 0x0,
"Release information", HFILL
}
},
{ &hf_fp_release_version,
{ "Release Version",
"fp.release.version", FT_UINT8, BASE_DEC, NULL, 0x0,
"3GPP Release number", HFILL
}
},
{ &hf_fp_release_year,
{ "Release year",
"fp.release.year", FT_UINT16, BASE_DEC, NULL, 0x0,
NULL, HFILL
}
},
{ &hf_fp_release_month,
{ "Release month",
"fp.release.month", FT_UINT8, BASE_DEC, NULL, 0x0,
NULL, HFILL
}
},
{ &hf_fp_channel_type,
{ "Channel Type",
"fp.channel-type", FT_UINT8, BASE_HEX, VALS(channel_type_vals), 0x0,
NULL, HFILL
}
},
{ &hf_fp_division,
{ "Division",
"fp.division", FT_UINT8, BASE_HEX, VALS(division_vals), 0x0,
"Radio division type", HFILL
}
},
{ &hf_fp_direction,
{ "Direction",
"fp.direction", FT_UINT8, BASE_HEX, VALS(direction_vals), 0x0,
"Link direction", HFILL
}
},
{ &hf_fp_ddi_config,
{ "DDI Config",
"fp.ddi-config", FT_STRING, BASE_NONE, NULL, 0x0,
"DDI Config (for E-DCH)", HFILL
}
},
{ &hf_fp_ddi_config_ddi,
{ "DDI",
"fp.ddi-config.ddi", FT_UINT8, BASE_DEC, NULL, 0x0,
NULL, HFILL
}
},
{ &hf_fp_ddi_config_macd_pdu_size,
{ "MACd PDU Size",
"fp.ddi-config.macd-pdu-size", FT_UINT16, BASE_DEC, NULL, 0x0,
NULL, HFILL
}
},
{ &hf_fp_header_crc,
{ "Header CRC",
"fp.header-crc", FT_UINT8, BASE_HEX, NULL, 0xfe,
NULL, HFILL
}
},
{ &hf_fp_ft,
{ "Frame Type",
"fp.ft", FT_UINT8, BASE_HEX, VALS(data_control_vals), 0x01,
NULL, HFILL
}
},
{ &hf_fp_cfn,
{ "CFN",
"fp.cfn", FT_UINT8, BASE_DEC, NULL, 0x0,
"Connection Frame Number", HFILL
}
},
{ &hf_fp_pch_cfn,
{ "CFN (PCH)",
"fp.pch.cfn", FT_UINT16, BASE_DEC, NULL, 0xfff0,
"PCH Connection Frame Number", HFILL
}
},
{ &hf_fp_pch_toa,
{ "ToA (PCH)",
"fp.pch.toa", FT_INT24, BASE_DEC, NULL, 0x0,
"PCH Time of Arrival", HFILL
}
},
{ &hf_fp_cfn_control,
{ "CFN control",
"fp.cfn-control", FT_UINT8, BASE_DEC, NULL, 0x0,
"Connection Frame Number Control", HFILL
}
},
{ &hf_fp_toa,
{ "ToA",
"fp.toa", FT_INT16, BASE_DEC, NULL, 0x0,
"Time of arrival (units are 125 microseconds)", HFILL
}
},
{ &hf_fp_tb,
{ "TB",
"fp.tb", FT_BYTES, BASE_NONE, NULL, 0x0,
"Transport Block", HFILL
}
},
{ &hf_fp_chan_zero_tbs,
{ "No TBs for channel",
"fp.channel-with-zero-tbs", FT_UINT32, BASE_DEC, NULL, 0x0,
"Channel with 0 TBs", HFILL
}
},
{ &hf_fp_tfi,
{ "TFI",
"fp.tfi", FT_UINT8, BASE_DEC, NULL, 0x0,
"Transport Format Indicator", HFILL
}
},
{ &hf_fp_usch_tfi,
{ "TFI",
"fp.usch.tfi", FT_UINT8, BASE_DEC, NULL, 0x1f,
"USCH Transport Format Indicator", HFILL
}
},
{ &hf_fp_cpch_tfi,
{ "TFI",
"fp.cpch.tfi", FT_UINT8, BASE_DEC, NULL, 0x1f,
"CPCH Transport Format Indicator", HFILL
}
},
{ &hf_fp_propagation_delay,
{ "Propagation Delay",
"fp.propagation-delay", FT_UINT8, BASE_DEC, NULL, 0x0,
NULL, HFILL
}
},
{ &hf_fp_dch_control_frame_type,
{ "Control Frame Type",
"fp.dch.control.frame-type", FT_UINT8, BASE_HEX, VALS(dch_control_frame_type_vals), 0x0,
"DCH Control Frame Type", HFILL
}
},
{ &hf_fp_dch_rx_timing_deviation,
{ "Rx Timing Deviation",
"fp.dch.control.rx-timing-deviation", FT_UINT8, BASE_DEC, 0, 0x0,
"DCH Rx Timing Deviation", HFILL
}
},
{ &hf_fp_quality_estimate,
{ "Quality Estimate",
"fp.dch.quality-estimate", FT_UINT8, BASE_DEC, 0, 0x0,
NULL, HFILL
}
},
{ &hf_fp_payload_crc,
{ "Payload CRC",
"fp.payload-crc", FT_UINT16, BASE_HEX, 0, 0x0,
NULL, HFILL
}
},
{ &hf_fp_common_control_frame_type,
{ "Control Frame Type",
"fp.common.control.frame-type", FT_UINT8, BASE_HEX, VALS(common_control_frame_type_vals), 0x0,
"Common Control Frame Type", HFILL
}
},
{ &hf_fp_crci[0],
{ "CRCI",
"fp.crci", FT_UINT8, BASE_HEX, VALS(crci_vals), 0x80,
"CRC correctness indicator", HFILL
}
},
{ &hf_fp_crci[1],
{ "CRCI",
"fp.crci", FT_UINT8, BASE_HEX, VALS(crci_vals), 0x40,
"CRC correctness indicator", HFILL
}
},
{ &hf_fp_crci[2],
{ "CRCI",
"fp.crci", FT_UINT8, BASE_HEX, VALS(crci_vals), 0x20,
"CRC correctness indicator", HFILL
}
},
{ &hf_fp_crci[3],
{ "CRCI",
"fp.crci", FT_UINT8, BASE_HEX, VALS(crci_vals), 0x10,
"CRC correctness indicator", HFILL
}
},
{ &hf_fp_crci[4],
{ "CRCI",
"fp.crci", FT_UINT8, BASE_HEX, VALS(crci_vals), 0x08,
"CRC correctness indicator", HFILL
}
},
{ &hf_fp_crci[5],
{ "CRCI",
"fp.crci", FT_UINT8, BASE_HEX, VALS(crci_vals), 0x04,
"CRC correctness indicator", HFILL
}
},
{ &hf_fp_crci[6],
{ "CRCI",
"fp.crci", FT_UINT8, BASE_HEX, VALS(crci_vals), 0x02,
"CRC correctness indicator", HFILL
}
},
{ &hf_fp_crci[7],
{ "CRCI",
"fp.crci", FT_UINT8, BASE_HEX, VALS(crci_vals), 0x01,
"CRC correctness indicator", HFILL
}
},
{ &hf_fp_received_sync_ul_timing_deviation,
{ "Received SYNC UL Timing Deviation",
"fp.rx-sync-ul-timing-deviation", FT_UINT8, BASE_DEC, 0, 0x0,
NULL, HFILL
}
},
{ &hf_fp_pch_pi,
{ "Paging Indication",
"fp.pch.pi", FT_UINT8, BASE_DEC, VALS(paging_indication_vals), 0x01,
"Indicates if the PI Bitmap is present", HFILL
}
},
{ &hf_fp_pch_tfi,
{ "TFI",
"fp.pch.tfi", FT_UINT8, BASE_DEC, 0, 0x1f,
"PCH Transport Format Indicator", HFILL
}
},
{ &hf_fp_fach_tfi,
{ "TFI",
"fp.fach.tfi", FT_UINT8, BASE_DEC, 0, 0x1f,
"FACH Transport Format Indicator", HFILL
}
},
{ &hf_fp_transmit_power_level,
{ "Transmit Power Level",
"fp.transmit-power-level", FT_FLOAT, BASE_NONE, 0, 0x0,
"Transmit Power Level (dB)", HFILL
}
},
{ &hf_fp_pdsch_set_id,
{ "PDSCH Set Id",
"fp.pdsch-set-id", FT_UINT8, BASE_DEC, 0, 0x0,
"A pointer to the PDSCH Set which shall be used to transmit", HFILL
}
},
{ &hf_fp_paging_indication_bitmap,
{ "Paging Indications bitmap",
"fp.pch.pi-bitmap", FT_NONE, BASE_NONE, NULL, 0x0,
"Paging Indication bitmap", HFILL
}
},
{ &hf_fp_rx_timing_deviation,
{ "Rx Timing Deviation",
"fp.common.control.rx-timing-deviation", FT_UINT8, BASE_DEC, 0, 0x0,
"Common Rx Timing Deviation", HFILL
}
},
{ &hf_fp_dch_e_rucch_flag,
{ "E-RUCCH Flag",
"fp.common.control.e-rucch-flag", FT_UINT8, BASE_DEC, VALS(e_rucch_flag_vals), 0x0,
NULL, HFILL
}
},
{ &hf_fp_edch_header_crc,
{ "E-DCH Header CRC",
"fp.edch.header-crc", FT_UINT16, BASE_HEX, 0, 0,
NULL, HFILL
}
},
{ &hf_fp_edch_fsn,
{ "FSN",
"fp.edch.fsn", FT_UINT8, BASE_DEC, 0, 0x0f,
"E-DCH Frame Sequence Number", HFILL
}
},
{ &hf_fp_edch_number_of_subframes,
{ "No of subframes",
"fp.edch.no-of-subframes", FT_UINT8, BASE_DEC, 0, 0x0f,
"E-DCH Number of subframes", HFILL
}
},
{ &hf_fp_edch_harq_retransmissions,
{ "No of HARQ Retransmissions",
"fp.edch.no-of-harq-retransmissions", FT_UINT8, BASE_DEC, 0, 0x78,
"E-DCH Number of HARQ retransmissions", HFILL
}
},
{ &hf_fp_edch_subframe_number,
{ "Subframe number",
"fp.edch.subframe-number", FT_UINT8, BASE_DEC, 0, 0x0,
"E-DCH Subframe number", HFILL
}
},
{ &hf_fp_edch_number_of_mac_es_pdus,
{ "Number of Mac-es PDUs",
"fp.edch.number-of-mac-es-pdus", FT_UINT8, BASE_DEC, 0, 0xf0,
NULL, HFILL
}
},
{ &hf_fp_edch_ddi,
{ "DDI",
"fp.edch.ddi", FT_UINT8, BASE_DEC, 0, 0x0,
"E-DCH Data Description Indicator", HFILL
}
},
{ &hf_fp_edch_subframe,
{ "Subframe",
"fp.edch.subframe", FT_STRING, BASE_NONE, NULL, 0x0,
"EDCH Subframe", HFILL
}
},
{ &hf_fp_edch_subframe_header,
{ "Subframe header",
"fp.edch.subframe-header", FT_STRING, BASE_NONE, NULL, 0x0,
"EDCH Subframe header", HFILL
}
},
{ &hf_fp_edch_number_of_mac_d_pdus,
{ "Number of Mac-d PDUs",
"fp.edch.number-of-mac-d-pdus", FT_UINT8, BASE_DEC, 0, 0x0,
NULL, HFILL
}
},
{ &hf_fp_edch_pdu_padding,
{ "Padding",
"fp.edch-data-padding", FT_UINT8, BASE_DEC, 0, 0xc0,
"E-DCH padding before PDU", HFILL
}
},
{ &hf_fp_edch_tsn,
{ "TSN",
"fp.edch-tsn", FT_UINT8, BASE_DEC, 0, 0x3f,
"E-DCH Transmission Sequence Number", HFILL
}
},
{ &hf_fp_edch_mac_es_pdu,
{ "MAC-es PDU",
"fp.edch.mac-es-pdu", FT_NONE, BASE_NONE, NULL, 0x0,
NULL, HFILL
}
},
{ &hf_fp_edch_user_buffer_size,
{ "User Buffer Size",
"fp.edch.user-buffer-size", FT_UINT24, BASE_DEC, NULL, 0x0,
NULL, HFILL
}
},
{ &hf_fp_edch_no_macid_sdus,
{ "No of MAC-is SDUs",
"fp.edch.no-macis-sdus", FT_UINT16, BASE_DEC, NULL, 0x0,
NULL, HFILL
}
},
{ &hf_fp_edch_number_of_mac_is_pdus,
{ "Number of Mac-is PDUs",
"fp.edch.number-of-mac-is-pdus", FT_UINT8, BASE_DEC, 0, 0x0,
NULL, HFILL
}
},
{ &hf_fp_edch_mac_is_pdu,
{ "Mac-is PDU",
"fp.edch.mac-is-pdu", FT_BYTES, BASE_NONE, 0, 0x0,
NULL, HFILL
}
},
{ &hf_fp_edch_e_rnti,
{ "E-RNTI",
"fp.edch.e-rnti", FT_UINT16, BASE_DEC, 0, 0x0,
NULL, HFILL
}
},
{ &hf_fp_edch_macis_descriptors,
{ "MAC-is Descriptors",
"fp.edch.mac-is.descriptors", FT_STRING, BASE_NONE, NULL, 0x0,
NULL, HFILL
}
},
{ &hf_fp_edch_macis_lchid,
{ "LCH-ID",
"fp.edch.mac-is.lchid", FT_UINT8, BASE_HEX, VALS(lchid_vals), 0xf0,
NULL, HFILL
}
},
{ &hf_fp_edch_macis_length,
{ "Length",
"fp.edch.mac-is.length", FT_UINT16, BASE_DEC, 0, 0x0ffe,
NULL, HFILL
}
},
{ &hf_fp_edch_macis_flag,
{ "Flag",
"fp.edch.mac-is.lchid", FT_UINT8, BASE_HEX, 0, 0x01,
"Indicates if another entry follows", HFILL
}
},
{ &hf_fp_frame_seq_nr,
{ "Frame Seq Nr",
"fp.frame-seq-nr", FT_UINT8, BASE_DEC, 0, 0xf0,
"Frame Sequence Number", HFILL
}
},
{ &hf_fp_hsdsch_pdu_block_header,
{ "PDU block header",
"fp.hsdsch.pdu-block-header", FT_STRING, BASE_NONE, NULL, 0x0,
"HS-DSCH type 2 PDU block header", HFILL
}
},
#if 0
{ &hf_fp_hsdsch_pdu_block,
{ "PDU block",
"fp.hsdsch.pdu-block", FT_STRING, BASE_NONE, NULL, 0x0,
"HS-DSCH type 2 PDU block data", HFILL
}
},
#endif
{ &hf_fp_flush,
{ "Flush",
"fp.flush", FT_UINT8, BASE_DEC, 0, 0x04,
"Whether all PDUs for this priority queue should be removed", HFILL
}
},
{ &hf_fp_fsn_drt_reset,
{ "FSN-DRT reset",
"fp.fsn-drt-reset", FT_UINT8, BASE_DEC, 0, 0x02,
"FSN/DRT Reset Flag", HFILL
}
},
{ &hf_fp_drt_indicator,
{ "DRT Indicator",
"fp.drt-indicator", FT_UINT8, BASE_DEC, 0, 0x01,
NULL, HFILL
}
},
{ &hf_fp_fach_indicator,
{ "FACH Indicator",
"fp.fach-indicator", FT_UINT8, BASE_DEC, 0, 0x80,
NULL, HFILL
}
},
{ &hf_fp_total_pdu_blocks,
{ "PDU Blocks",
"fp.pdu_blocks", FT_UINT8, BASE_DEC, 0, 0xf8,
"Total number of PDU blocks", HFILL
}
},
{ &hf_fp_drt,
{ "DelayRefTime",
"fp.drt", FT_UINT16, BASE_DEC, 0, 0x0,
NULL, HFILL
}
},
{ &hf_fp_hrnti,
{ "HRNTI",
"fp.hrnti", FT_UINT16, BASE_DEC, 0, 0x0,
NULL, HFILL
}
},
{ &hf_fp_rach_measurement_result,
{ "RACH Measurement Result",
"fp.rach-measurement-result", FT_UINT16, BASE_DEC, 0, 0x0,
NULL, HFILL
}
},
{ &hf_fp_lchid,
{ "Logical Channel ID",
"fp.lchid", FT_UINT8, BASE_DEC, NULL, 0x0,
NULL, HFILL
}
},
{ &hf_fp_pdu_length_in_block,
{ "PDU length in block",
"fp.pdu-length-in-block", FT_UINT8, BASE_DEC, 0, 0x0,
"Length of each PDU in this block in bytes", HFILL
}
},
{ &hf_fp_pdus_in_block,
{ "PDUs in block",
"fp.no-pdus-in-block", FT_UINT8, BASE_DEC, 0, 0x0,
"Number of PDUs in block", HFILL
}
},
{ &hf_fp_cmch_pi,
{ "CmCH-PI",
"fp.cmch-pi", FT_UINT8, BASE_DEC, 0, 0x0f,
"Common Transport Channel Priority Indicator", HFILL
}
},
{ &hf_fp_user_buffer_size,
{ "User buffer size",
"fp.user-buffer-size", FT_UINT16, BASE_DEC, 0, 0x0,
"User buffer size in octets", HFILL
}
},
{ &hf_fp_hsdsch_credits,
{ "HS-DSCH Credits",
"fp.hsdsch-credits", FT_UINT16, BASE_DEC, 0, 0x0,
NULL, HFILL
}
},
{ &hf_fp_hsdsch_max_macd_pdu_len,
{ "Max MAC-d PDU Length",
"fp.hsdsch.max-macd-pdu-len", FT_UINT16, BASE_DEC, 0, 0xfff8,
"Maximum MAC-d PDU Length in bits", HFILL
}
},
{ &hf_fp_hsdsch_max_macdc_pdu_len,
{ "Max MAC-d/c PDU Length",
"fp.hsdsch.max-macdc-pdu-len", FT_UINT16, BASE_DEC, 0, 0x07ff,
"Maximum MAC-d/c PDU Length in bits", HFILL
}
},
{ &hf_fp_hsdsch_interval,
{ "HS-DSCH Interval in milliseconds",
"fp.hsdsch-interval", FT_UINT8, BASE_DEC, 0, 0x0,
NULL, HFILL
}
},
{ &hf_fp_hsdsch_calculated_rate,
{ "Calculated rate allocation (bps)",
"fp.hsdsch-calculated-rate", FT_UINT32, BASE_DEC, 0, 0x0,
"Calculated rate RNC is allowed to send in bps", HFILL
}
},
{ &hf_fp_hsdsch_unlimited_rate,
{ "Unlimited rate",
"fp.hsdsch-unlimited-rate", FT_NONE, BASE_NONE, 0, 0x0,
"No restriction on rate at which date may be sent", HFILL
}
},
{ &hf_fp_hsdsch_repetition_period,
{ "HS-DSCH Repetition Period",
"fp.hsdsch-repetition-period", FT_UINT8, BASE_DEC, 0, 0x0,
"HS-DSCH Repetition Period in milliseconds", HFILL
}
},
{ &hf_fp_hsdsch_data_padding,
{ "Padding",
"fp.hsdsch-data-padding", FT_UINT8, BASE_DEC, 0, 0xf0,
"HS-DSCH Repetition Period in milliseconds", HFILL
}
},
{ &hf_fp_hsdsch_new_ie_flags,
{ "New IEs flags",
"fp.hsdsch.new-ie-flags", FT_STRING, BASE_NONE, 0, 0x0,
NULL, HFILL
}
},
{ &hf_fp_hsdsch_new_ie_flag[0],
{ "DRT IE present",
"fp.hsdsch.new-ie-flag", FT_UINT8, BASE_DEC, 0, 0x80,
NULL, HFILL
}
},
{ &hf_fp_hsdsch_new_ie_flag[1],
{ "New IE present",
"fp.hsdsch.new-ie-flag", FT_UINT8, BASE_DEC, 0, 0x40,
NULL, HFILL
}
},
{ &hf_fp_hsdsch_new_ie_flag[2],
{ "New IE present",
"fp.hsdsch.new-ie-flag", FT_UINT8, BASE_DEC, 0, 0x20,
NULL, HFILL
}
},
{ &hf_fp_hsdsch_new_ie_flag[3],
{ "New IE present",
"fp.hsdsch.new-ie-flag", FT_UINT8, BASE_DEC, 0, 0x10,
NULL, HFILL
}
},
{ &hf_fp_hsdsch_new_ie_flag[4],
{ "New IE present",
"fp.hsdsch.new-ie-flag", FT_UINT8, BASE_DEC, 0, 0x08,
NULL, HFILL
}
},
{ &hf_fp_hsdsch_new_ie_flag[5],
{ "New IE present",
"fp.hsdsch.new-ie-flag", FT_UINT8, BASE_DEC, 0, 0x04,
NULL, HFILL
}
},
{ &hf_fp_hsdsch_new_ie_flag[6],
{ "HS-DSCH physical layer category present",
"fp.hsdsch.new-ie-flag", FT_UINT8, BASE_DEC, 0, 0x02,
NULL, HFILL
}
},
{ &hf_fp_hsdsch_new_ie_flag[7],
{ "Another new IE flags byte",
"fp.hsdsch.new-ie-flags-byte", FT_UINT8, BASE_DEC, 0, 0x01,
"Another new IE flagsbyte", HFILL
}
},
{ &hf_fp_hsdsch_drt,
{ "DRT",
"fp.hsdsch.drt", FT_UINT8, BASE_DEC, 0, 0xf0,
"Delay Reference Time", HFILL
}
},
{ &hf_fp_hsdsch_entity,
{ "HS-DSCH Entity",
"fp.hsdsch.entity", FT_UINT8, BASE_DEC, VALS(hsdshc_mac_entity_vals), 0x0,
"Type of MAC entity for this HS-DSCH channel", HFILL
}
},
{ &hf_fp_timing_advance,
{ "Timing advance",
"fp.timing-advance", FT_UINT8, BASE_DEC, 0, 0x3f,
"Timing advance in chips", HFILL
}
},
{ &hf_fp_num_of_pdu,
{ "Number of PDUs",
"fp.hsdsch.num-of-pdu", FT_UINT8, BASE_DEC, 0, 0x0,
"Number of PDUs in the payload", HFILL
}
},
{ &hf_fp_mac_d_pdu_len,
{ "MAC-d PDU Length",
"fp.hsdsch.mac-d-pdu-len", FT_UINT16, BASE_DEC, 0, 0xfff8,
"MAC-d PDU Length in bits", HFILL
}
},
{ &hf_fp_mac_d_pdu,
{ "MAC-d PDU",
"fp.mac-d-pdu", FT_BYTES, BASE_NONE, NULL, 0x0,
NULL, HFILL
}
},
{ &hf_fp_data,
{ "Data",
"fp.data", FT_BYTES, BASE_NONE, NULL, 0x0,
NULL, HFILL
}
},
{ &hf_fp_crcis,
{ "CRCIs",
"fp.crcis", FT_BYTES, BASE_NONE, NULL, 0x0,
"CRC Indicators for uplink TBs", HFILL
}
},
{ &hf_fp_t1,
{ "T1",
"fp.t1", FT_UINT24, BASE_DEC, NULL, 0x0,
"RNC frame number indicating time it sends frame", HFILL
}
},
{ &hf_fp_t2,
{ "T2",
"fp.t2", FT_UINT24, BASE_DEC, NULL, 0x0,
"NodeB frame number indicating time it received DL Sync", HFILL
}
},
{ &hf_fp_t3,
{ "T3",
"fp.t3", FT_UINT24, BASE_DEC, NULL, 0x0,
"NodeB frame number indicating time it sends frame", HFILL
}
},
{ &hf_fp_ul_sir_target,
{ "UL_SIR_TARGET",
"fp.ul-sir-target", FT_FLOAT, BASE_NONE, 0, 0x0,
"Value (in dB) of the SIR target to be used by the UL inner loop power control", HFILL
}
},
{ &hf_fp_pusch_set_id,
{ "PUSCH Set Id",
"fp.pusch-set-id", FT_UINT8, BASE_DEC, NULL, 0x0,
"Identifies PUSCH Set from those configured in NodeB", HFILL
}
},
{ &hf_fp_activation_cfn,
{ "Activation CFN",
"fp.activation-cfn", FT_UINT8, BASE_DEC, NULL, 0x0,
"Activation Connection Frame Number", HFILL
}
},
{ &hf_fp_duration,
{ "Duration (ms)",
"fp.pusch-set-id", FT_UINT8, BASE_DEC, NULL, 0x0,
"Duration of the activation period of the PUSCH Set", HFILL
}
},
{ &hf_fp_power_offset,
{ "Power offset",
"fp.power-offset", FT_FLOAT, BASE_NONE, NULL, 0x0,
"Power offset (in dB)", HFILL
}
},
{ &hf_fp_code_number,
{ "Code number",
"fp.code-number", FT_UINT8, BASE_DEC, NULL, 0x0,
NULL, HFILL
}
},
{ &hf_fp_spreading_factor,
{ "Spreading factor",
"fp.spreading-factor", FT_UINT8, BASE_DEC, VALS(spreading_factor_vals), 0xf0,
NULL, HFILL
}
},
{ &hf_fp_mc_info,
{ "MC info",
"fp.mc-info", FT_UINT8, BASE_DEC, NULL, 0x0e,
NULL, HFILL
}
},
{ &hf_fp_rach_new_ie_flags,
{ "New IEs flags",
"fp.rach.new-ie-flags", FT_STRING, BASE_NONE, 0, 0x0,
NULL, HFILL
}
},
{ &hf_fp_rach_new_ie_flag_unused[0],
{ "New IE present",
"fp.rach.new-ie-flag", FT_UINT8, BASE_DEC, 0, 0x80,
NULL, HFILL
}
},
{ &hf_fp_rach_new_ie_flag_unused[1],
{ "New IE present",
"fp.rach.new-ie-flag", FT_UINT8, BASE_DEC, 0, 0x40,
NULL, HFILL
}
},
{ &hf_fp_rach_new_ie_flag_unused[2],
{ "New IE present",
"fp.rach.new-ie-flag", FT_UINT8, BASE_DEC, 0, 0x20,
"New IE present (unused)", HFILL
}
},
{ &hf_fp_rach_new_ie_flag_unused[3],
{ "New IE present",
"fp.rach.new-ie-flag", FT_UINT8, BASE_DEC, 0, 0x10,
"New IE present (unused)", HFILL
}
},
{ &hf_fp_rach_new_ie_flag_unused[4],
{ "New IE present",
"fp.rach.new-ie-flag", FT_UINT8, BASE_DEC, 0, 0x08,
"New IE present (unused)", HFILL
}
},
{ &hf_fp_rach_new_ie_flag_unused[5],
{ "New IE present",
"fp.rach.new-ie-flag", FT_UINT8, BASE_DEC, 0, 0x04,
"New IE present (unused)", HFILL
}
},
{ &hf_fp_rach_new_ie_flag_unused[6],
{ "New IE present",
"fp.rach.new-ie-flag", FT_UINT8, BASE_DEC, 0, 0x02,
"New IE present (unused)", HFILL
}
},
{ &hf_fp_rach_cell_portion_id_present,
{ "Cell portion ID present",
"fp.rach.cell-portion-id-present", FT_UINT8, BASE_DEC, 0, 0x01,
NULL, HFILL
}
},
{ &hf_fp_rach_angle_of_arrival_present,
{ "Angle of arrival present",
"fp.rach.angle-of-arrival-present", FT_UINT8, BASE_DEC, 0, 0x01,
NULL, HFILL
}
},
{ &hf_fp_rach_ext_propagation_delay_present,
{ "Ext Propagation Delay Present",
"fp.rach.ext-propagation-delay-present", FT_UINT8, BASE_DEC, 0, 0x02,
NULL, HFILL
}
},
{ &hf_fp_rach_ext_rx_sync_ul_timing_deviation_present,
{ "Ext Received Sync UL Timing Deviation present",
"fp.rach.ext-rx-sync-ul-timing-deviation-present", FT_UINT8, BASE_DEC, 0, 0x02,
NULL, HFILL
}
},
{ &hf_fp_rach_ext_rx_timing_deviation_present,
{ "Ext Rx Timing Deviation present",
"fp.rach.ext-rx-timing-deviation-present", FT_UINT8, BASE_DEC, 0, 0x01,
NULL, HFILL
}
},
{ &hf_fp_cell_portion_id,
{ "Cell Portion ID",
"fp.cell-portion-id", FT_UINT8, BASE_DEC, NULL, 0x3f,
NULL, HFILL
}
},
{ &hf_fp_ext_propagation_delay,
{ "Ext Propagation Delay",
"fp.ext-propagation-delay", FT_UINT16, BASE_DEC, NULL, 0x03ff,
NULL, HFILL
}
},
{ &hf_fp_angle_of_arrival,
{ "Angle of Arrival",
"fp.angle-of-arrival", FT_UINT16, BASE_DEC, NULL, 0x03ff,
NULL, HFILL
}
},
{ &hf_fp_ext_received_sync_ul_timing_deviation,
{ "Ext Received SYNC UL Timing Deviation",
"fp.ext-received-sync-ul-timing-deviation", FT_UINT16, BASE_DEC, NULL, 0x1fff,
NULL, HFILL
}
},
{ &hf_fp_radio_interface_parameter_update_flag[0],
{ "CFN valid",
"fp.radio-interface-param.cfn-valid", FT_UINT16, BASE_DEC, 0, 0x0001,
NULL, HFILL
}
},
{ &hf_fp_radio_interface_parameter_update_flag[1],
{ "TPC PO valid",
"fp.radio-interface-param.tpc-po-valid", FT_UINT16, BASE_DEC, 0, 0x0002,
NULL, HFILL
}
},
{ &hf_fp_radio_interface_parameter_update_flag[2],
{ "DPC mode valid",
"fp.radio-interface-param.dpc-mode-valid", FT_UINT16, BASE_DEC, 0, 0x0004,
NULL, HFILL
}
},
{ &hf_fp_radio_interface_parameter_update_flag[3],
{ "RL sets indicator valid",
"fp.radio-interface_param.rl-sets-indicator-valid", FT_UINT16, BASE_DEC, 0, 0x0020,
NULL, HFILL
}
},
{ &hf_fp_radio_interface_parameter_update_flag[4],
{ "MAX_UE_TX_POW valid",
"fp.radio-interface-param.max-ue-tx-pow-valid", FT_UINT16, BASE_DEC, 0, 0x0040,
"MAX UE TX POW valid", HFILL
}
},
{ &hf_fp_dpc_mode,
{ "DPC Mode",
"fp.dpc-mode", FT_UINT8, BASE_DEC, NULL, 0x20,
"DPC Mode to be applied in the uplink", HFILL
}
},
{ &hf_fp_tpc_po,
{ "TPC PO",
"fp.tpc-po", FT_UINT8, BASE_DEC, NULL, 0x1f,
NULL, HFILL
}
},
{ &hf_fp_multiple_rl_set_indicator,
{ "Multiple RL sets indicator",
"fp.multiple-rl-sets-indicator", FT_UINT8, BASE_DEC, NULL, 0x80,
NULL, HFILL
}
},
{ &hf_fp_max_ue_tx_pow,
{ "MAX_UE_TX_POW",
"fp.max-ue-tx-pow", FT_INT8, BASE_DEC, NULL, 0x0,
"Max UE TX POW (dBm)", HFILL
}
},
{ &hf_fp_congestion_status,
{ "Congestion Status",
"fp.congestion-status", FT_UINT8, BASE_DEC, VALS(congestion_status_vals), 0x0,
NULL, HFILL
}
},
{ &hf_fp_e_rucch_present,
{ "E-RUCCH Present",
"fp.erucch-present", FT_UINT8, BASE_DEC, NULL, 0x0,
NULL, HFILL
}
},
{ &hf_fp_extended_bits_present,
{ "Extended Bits Present",
"fp.extended-bits-present", FT_UINT8, BASE_DEC, NULL, 0x0,
NULL, HFILL
}
},
{ &hf_fp_extended_bits,
{ "Extended Bits",
"fp.extended-bits", FT_UINT8, BASE_HEX, NULL, 0x0,
NULL, HFILL
}
},
{ &hf_fp_spare_extension,
{ "Spare Extension",
"fp.spare-extension", FT_NONE, BASE_NONE, NULL, 0x0,
NULL, HFILL
}
},
{ &hf_fp_ul_setup_frame,
{ "UL setup frame",
"fp.ul.setup_frame", FT_FRAMENUM, BASE_NONE, NULL, 0x0,
NULL, HFILL
}
},
{ &hf_fp_dl_setup_frame,
{ "DL setup frame",
"fp.dl.setup_frame", FT_FRAMENUM, BASE_NONE, NULL, 0x0,
NULL, HFILL
}
},
{ &hf_fp_hsdsch_physical_layer_category,
{ "HS-DSCH physical layer category",
"fp.hsdsch.physical_layer_category", FT_UINT8, BASE_DEC, NULL, 0x0,
NULL, HFILL
}
}
};
static gint *ett[] =
{
&ett_fp,
&ett_fp_data,
&ett_fp_crcis,
&ett_fp_ddi_config,
&ett_fp_edch_subframe_header,
&ett_fp_edch_subframe,
&ett_fp_edch_maces,
&ett_fp_edch_macis_descriptors,
&ett_fp_hsdsch_new_ie_flags,
&ett_fp_rach_new_ie_flags,
&ett_fp_hsdsch_pdu_block_header,
&ett_fp_release
};
static ei_register_info ei[] = {
{ &ei_fp_bad_header_checksum, { "fp.header.bad_checksum.", PI_CHECKSUM, PI_WARN, "Bad header checksum.", EXPFILL }},
{ &ei_fp_crci_no_subdissector, { "fp.crci.no_subdissector", PI_UNDECODED, PI_NOTE, "Not sent to subdissectors as CRCI is set", EXPFILL }},
{ &ei_fp_crci_error_bit_set_for_tb, { "fp.crci.error_bit_set_for_tb", PI_CHECKSUM, PI_WARN, "CRCI error bit set for TB", EXPFILL }},
{ &ei_fp_spare_extension, { "fp.spare-extension.expert", PI_UNDECODED, PI_WARN, "Spare Extension present (%u bytes)", EXPFILL }},
{ &ei_fp_bad_payload_checksum, { "fp.payload-crc.bad.", PI_CHECKSUM, PI_WARN, "Bad payload checksum.", EXPFILL }},
{ &ei_fp_stop_hsdpa_transmission, { "fp.stop_hsdpa_transmission", PI_RESPONSE_CODE, PI_NOTE, "Stop HSDPA transmission", EXPFILL }},
{ &ei_fp_timing_adjustmentment_reported, { "fp.timing_adjustmentment_reported", PI_SEQUENCE, PI_WARN, "Timing adjustmentment reported (%f ms)", EXPFILL }},
{ &ei_fp_expecting_tdd, { "fp.expecting_tdd", PI_MALFORMED, PI_NOTE, "Error: expecting TDD-384 or TDD-768", EXPFILL }},
{ &ei_fp_ddi_not_defined, { "fp.ddi_not_defined", PI_MALFORMED, PI_ERROR, "DDI %u not defined for this UE!", EXPFILL }},
{ &ei_fp_unable_to_locate_ddi_entry, { "fp.unable_to_locate_ddi_entry", PI_UNDECODED, PI_ERROR, "Unable to locate DDI entry.", EXPFILL }},
{ &ei_fp_mac_is_sdus_miscount, { "fp.mac_is_sdus.miscount", PI_MALFORMED, PI_ERROR, "Found too many (%u) MAC-is SDUs - header said there were %u", EXPFILL }},
{ &ei_fp_e_rnti_t2_edch_frames, { "fp.e_rnti.t2_edch_frames", PI_MALFORMED, PI_ERROR, "E-RNTI not supposed to appear for T2 EDCH frames", EXPFILL }},
{ &ei_fp_e_rnti_first_entry, { "fp.e_rnti.first_entry", PI_MALFORMED, PI_ERROR, "E-RNTI must be first entry among descriptors", EXPFILL }},
{ &ei_fp_maybe_srb, { "fp.maybe_srb", PI_PROTOCOL, PI_NOTE, "Found MACd-Flow = 0 and not MUX detected. (This might be SRB)", EXPFILL }},
{ &ei_fp_transport_channel_type_unknown, { "fp.transport_channel_type.unknown", PI_UNDECODED, PI_WARN, "Unknown transport channel type", EXPFILL }},
{ &ei_fp_hsdsch_entity_not_specified, { "fp.hsdsch_entity_not_specified", PI_MALFORMED, PI_ERROR, "HSDSCH Entity not specified", EXPFILL }},
{ &ei_fp_hsdsch_common_experimental_support, { "fp.hsdsch_common.experimental_support", PI_DEBUG, PI_WARN, "HSDSCH COMMON - Experimental support!", EXPFILL }},
{ &ei_fp_hsdsch_common_t3_not_implemented, { "fp.hsdsch_common_t3.not_implemented", PI_DEBUG, PI_ERROR, "HSDSCH COMMON T3 - Not implemeneted!", EXPFILL }},
{ &ei_fp_channel_type_unknown, { "fp.channel_type.unknown", PI_MALFORMED, PI_ERROR, "Unknown channel type", EXPFILL }},
{ &ei_fp_no_per_frame_info, { "fp.no_per_frame_info", PI_UNDECODED, PI_ERROR, "Can't dissect FP frame because no per-frame info was attached!", EXPFILL }},
};
module_t *fp_module;
expert_module_t *expert_fp;
#ifdef UMTS_FP_USE_UAT
/* Define a UAT to set channel configuration data */
static const value_string umts_fp_proto_type_vals[] = {
{ UMTS_FP_IPV4, "IPv4" },
{ UMTS_FP_IPV6, "IPv6" },
{ 0x00, NULL }
};
static const value_string umts_fp_uat_channel_type_vals[] = {
{ CHANNEL_RACH_FDD, "RACH FDD" },
{ 0x00, NULL }
};
static const value_string umts_fp_uat_interface_type_vals[] = {
{ IuB_Interface, "IuB Interface" },
{ 0x00, NULL }
};
static const value_string umts_fp_uat_division_type_vals[] = {
{ Division_FDD, "Division FDD" },
{ 0x00, NULL }
};
static const value_string umts_fp_uat_rlc_mode_vals[] = {
{ FP_RLC_TM, "FP RLC TM" },
{ 0x00, NULL }
};
static uat_field_t umts_fp_uat_flds[] = {
UAT_FLD_VS(uat_umts_fp_ep_and_ch_records, protocol, "IP address type", umts_fp_proto_type_vals, "IPv4 or IPv6"),
UAT_FLD_CSTRING(uat_umts_fp_ep_and_ch_records, srcIP, "RNC IP Address", "Source Address"),
UAT_FLD_DEC(uat_umts_fp_ep_and_ch_records, src_port, "RNC port for this channel", "Source port"),
UAT_FLD_CSTRING(uat_umts_fp_ep_and_ch_records, dstIP, "NodeB IP Address", "Destination Address"),
UAT_FLD_DEC(uat_umts_fp_ep_and_ch_records, dst_port, "NodeB port for this channel", "Destination port"),
UAT_FLD_VS(uat_umts_fp_ep_and_ch_records, interface_type, "Interface type", umts_fp_uat_interface_type_vals, "Interface type used"),
UAT_FLD_VS(uat_umts_fp_ep_and_ch_records, division, "division", umts_fp_uat_division_type_vals, "Division type used"),
UAT_FLD_VS(uat_umts_fp_ep_and_ch_records, channel_type, "Channel type", umts_fp_uat_channel_type_vals, "Channel type used"),
UAT_FLD_VS(uat_umts_fp_ep_and_ch_records, rlc_mode, "RLC mode", umts_fp_uat_rlc_mode_vals, "RLC mode used"),
UAT_END_FIELDS
};
#endif /* UMTS_FP_USE_UAT */
/* Register protocol. */
proto_fp = proto_register_protocol("FP", "FP", "fp");
proto_register_field_array(proto_fp, hf, array_length(hf));
proto_register_subtree_array(ett, array_length(ett));
expert_fp = expert_register_protocol(proto_fp);
expert_register_field_array(expert_fp, ei, array_length(ei));
/* Allow other dissectors to find this one by name. */
fp_handle = register_dissector("fp", dissect_fp, proto_fp);
/* Preferences */
fp_module = prefs_register_protocol(proto_fp, NULL);
/* Determines whether release information should be displayed */
prefs_register_bool_preference(fp_module, "show_release_info",
"Show reported release info",
"Show reported release info",
&preferences_show_release_info);
/* Determines whether MAC dissector should be called for payloads */
prefs_register_bool_preference(fp_module, "call_mac",
"Call MAC dissector for payloads",
"Call MAC dissector for payloads",
&preferences_call_mac_dissectors);
/* Determines whether or not to validate FP payload checksums */
prefs_register_bool_preference(fp_module, "payload_checksum",
"Validate FP payload checksums",
"Validate FP payload checksums",
&preferences_payload_checksum);
/* Determines whether or not to validate FP header checksums */
prefs_register_bool_preference(fp_module, "header_checksum",
"Validate FP header checksums",
"Validate FP header checksums",
&preferences_header_checksum);
/* Determines whether or not to validate FP header checksums */
prefs_register_obsolete_preference(fp_module, "udp_heur");
#ifdef UMTS_FP_USE_UAT
umts_fp_uat = uat_new("Endpoint and Channel Configuration",
sizeof(uat_umts_fp_ep_and_ch_record_t), /* record size */
"umts_fp_ep_and_channel_cnf", /* filename */
TRUE, /* from_profile */
&uat_umts_fp_ep_and_ch_records, /* data_ptr */
&num_umts_fp_ep_and_ch_items, /* numitems_ptr */
UAT_AFFECTS_DISSECTION, /* affects dissection of packets, but not set of named fields */
NULL, /* help */
uat_umts_fp_record_copy_cb, /* copy callback */
NULL, /* update callback */
uat_umts_fp_record_free_cb, /* free callback */
NULL, /* post update callback */
umts_fp_uat_flds); /* UAT field definitions */
prefs_register_uat_preference(fp_module,
"epandchannelconfigurationtable",
"Endpoints and Radio Channels configuration",
"Preconfigured endpoint and Channels data",
umts_fp_uat);
register_init_routine(&umts_fp_init_protocol);
#endif
}
void proto_reg_handoff_fp(void)
{
dissector_handle_t fp_aal2_handle;
rlc_bcch_handle = find_dissector_add_dependency("rlc.bcch", proto_fp);
mac_fdd_rach_handle = find_dissector_add_dependency("mac.fdd.rach", proto_fp);
mac_fdd_fach_handle = find_dissector_add_dependency("mac.fdd.fach", proto_fp);
mac_fdd_pch_handle = find_dissector_add_dependency("mac.fdd.pch", proto_fp);
mac_fdd_dch_handle = find_dissector_add_dependency("mac.fdd.dch", proto_fp);
mac_fdd_edch_handle = find_dissector_add_dependency("mac.fdd.edch", proto_fp);
mac_fdd_edch_type2_handle = find_dissector_add_dependency("mac.fdd.edch.type2", proto_fp);
mac_fdd_hsdsch_handle = find_dissector_add_dependency("mac.fdd.hsdsch", proto_fp);
heur_dissector_add("udp", heur_dissect_fp, "FP over UDP", "fp_udp", proto_fp, HEURISTIC_DISABLE);
fp_aal2_handle = create_dissector_handle(dissect_fp_aal2, proto_fp);
dissector_add_uint("atm.aal2.type", TRAF_UMTS_FP, fp_aal2_handle);
}
/*
* Editor modelines - http://www.wireshark.org/tools/modelines.html
*
* Local variables:
* c-basic-offset: 4
* tab-width: 8
* indent-tabs-mode: nil
* End:
*
* vi: set shiftwidth=4 tabstop=8 expandtab:
* :indentSize=4:tabSize=8:noTabs=true:
*/
| ./CrossVul/dataset_final_sorted/CWE-20/c/good_5107_0 |
crossvul-cpp_data_good_1575_2 | /* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* http_filter.c --- HTTP routines which either filters or deal with filters.
*/
#include "apr.h"
#include "apr_strings.h"
#include "apr_buckets.h"
#include "apr_lib.h"
#include "apr_signal.h"
#define APR_WANT_STDIO /* for sscanf */
#define APR_WANT_STRFUNC
#define APR_WANT_MEMFUNC
#include "apr_want.h"
#include "util_filter.h"
#include "ap_config.h"
#include "httpd.h"
#include "http_config.h"
#include "http_core.h"
#include "http_protocol.h"
#include "http_main.h"
#include "http_request.h"
#include "http_vhost.h"
#include "http_connection.h"
#include "http_log.h" /* For errors detected in basic auth common
* support code... */
#include "apr_date.h" /* For apr_date_parse_http and APR_DATE_BAD */
#include "util_charset.h"
#include "util_ebcdic.h"
#include "util_time.h"
#include "mod_core.h"
#if APR_HAVE_STDARG_H
#include <stdarg.h>
#endif
#if APR_HAVE_UNISTD_H
#include <unistd.h>
#endif
APLOG_USE_MODULE(http);
typedef struct http_filter_ctx
{
apr_off_t remaining;
apr_off_t limit;
apr_off_t limit_used;
apr_int32_t chunk_used;
apr_int32_t chunkbits;
enum
{
BODY_NONE, /* streamed data */
BODY_LENGTH, /* data constrained by content length */
BODY_CHUNK, /* chunk expected */
BODY_CHUNK_PART, /* chunk digits */
BODY_CHUNK_EXT, /* chunk extension */
BODY_CHUNK_LF, /* got CR, expect LF after digits/extension */
BODY_CHUNK_DATA, /* data constrained by chunked encoding */
BODY_CHUNK_END, /* chunked data terminating CRLF */
BODY_CHUNK_END_LF, /* got CR, expect LF after data */
BODY_CHUNK_TRAILER /* trailers */
} state;
unsigned int eos_sent :1;
} http_ctx_t;
/**
* Parse a chunk line with optional extension, detect overflow.
* There are two error cases:
* 1) If the conversion would require too many bits, APR_EGENERAL is returned.
* 2) If the conversion used the correct number of bits, but an overflow
* caused only the sign bit to flip, then APR_ENOSPC is returned.
* In general, any negative number can be considered an overflow error.
*/
static apr_status_t parse_chunk_size(http_ctx_t *ctx, const char *buffer,
apr_size_t len, int linelimit)
{
apr_size_t i = 0;
while (i < len) {
char c = buffer[i];
ap_xlate_proto_from_ascii(&c, 1);
/* handle CRLF after the chunk */
if (ctx->state == BODY_CHUNK_END
|| ctx->state == BODY_CHUNK_END_LF) {
if (c == LF) {
ctx->state = BODY_CHUNK;
}
else if (c == CR && ctx->state == BODY_CHUNK_END) {
ctx->state = BODY_CHUNK_END_LF;
}
else {
/*
* LF expected.
*/
return APR_EINVAL;
}
i++;
continue;
}
/* handle start of the chunk */
if (ctx->state == BODY_CHUNK) {
if (!apr_isxdigit(c)) {
/*
* Detect invalid character at beginning. This also works for
* empty chunk size lines.
*/
return APR_EINVAL;
}
else {
ctx->state = BODY_CHUNK_PART;
}
ctx->remaining = 0;
ctx->chunkbits = sizeof(apr_off_t) * 8;
ctx->chunk_used = 0;
}
if (c == LF) {
if (ctx->remaining) {
ctx->state = BODY_CHUNK_DATA;
}
else {
ctx->state = BODY_CHUNK_TRAILER;
}
}
else if (ctx->state == BODY_CHUNK_LF) {
/*
* LF expected.
*/
return APR_EINVAL;
}
else if (c == CR) {
ctx->state = BODY_CHUNK_LF;
}
else if (c == ';') {
ctx->state = BODY_CHUNK_EXT;
}
else if (ctx->state == BODY_CHUNK_EXT) {
/*
* Control chars (but tabs) are invalid.
*/
if (c != '\t' && apr_iscntrl(c)) {
return APR_EINVAL;
}
}
else if (ctx->state == BODY_CHUNK_PART) {
int xvalue;
/* ignore leading zeros */
if (!ctx->remaining && c == '0') {
i++;
continue;
}
ctx->chunkbits -= 4;
if (ctx->chunkbits < 0) {
/* overflow */
return APR_ENOSPC;
}
if (c >= '0' && c <= '9') {
xvalue = c - '0';
}
else if (c >= 'A' && c <= 'F') {
xvalue = c - 'A' + 0xa;
}
else if (c >= 'a' && c <= 'f') {
xvalue = c - 'a' + 0xa;
}
else {
/* bogus character */
return APR_EINVAL;
}
ctx->remaining = (ctx->remaining << 4) | xvalue;
if (ctx->remaining < 0) {
/* overflow */
return APR_ENOSPC;
}
}
else {
/* Should not happen */
return APR_EGENERAL;
}
i++;
}
/* sanity check */
ctx->chunk_used += len;
if (ctx->chunk_used < 0 || ctx->chunk_used > linelimit) {
return APR_ENOSPC;
}
return APR_SUCCESS;
}
static apr_status_t read_chunked_trailers(http_ctx_t *ctx, ap_filter_t *f,
apr_bucket_brigade *b, int merge)
{
int rv;
apr_bucket *e;
request_rec *r = f->r;
apr_table_t *saved_headers_in = r->headers_in;
int saved_status = r->status;
r->status = HTTP_OK;
r->headers_in = r->trailers_in;
apr_table_clear(r->headers_in);
ap_get_mime_headers(r);
if(r->status == HTTP_OK) {
r->status = saved_status;
e = apr_bucket_eos_create(f->c->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(b, e);
ctx->eos_sent = 1;
rv = APR_SUCCESS;
}
else {
const char *error_notes = apr_table_get(r->notes,
"error-notes");
ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(02656)
"Error while reading HTTP trailer: %i%s%s",
r->status, error_notes ? ": " : "",
error_notes ? error_notes : "");
rv = APR_EINVAL;
}
if(!merge) {
r->headers_in = saved_headers_in;
}
else {
r->headers_in = apr_table_overlay(r->pool, saved_headers_in,
r->trailers_in);
}
return rv;
}
/* This is the HTTP_INPUT filter for HTTP requests and responses from
* proxied servers (mod_proxy). It handles chunked and content-length
* bodies. This can only be inserted/used after the headers
* are successfully parsed.
*/
apr_status_t ap_http_filter(ap_filter_t *f, apr_bucket_brigade *b,
ap_input_mode_t mode, apr_read_type_e block,
apr_off_t readbytes)
{
core_server_config *conf;
apr_bucket *e;
http_ctx_t *ctx = f->ctx;
apr_status_t rv;
apr_off_t totalread;
int again;
conf = (core_server_config *)
ap_get_module_config(f->r->server->module_config, &core_module);
/* just get out of the way of things we don't want. */
if (mode != AP_MODE_READBYTES && mode != AP_MODE_GETLINE) {
return ap_get_brigade(f->next, b, mode, block, readbytes);
}
if (!ctx) {
const char *tenc, *lenp;
f->ctx = ctx = apr_pcalloc(f->r->pool, sizeof(*ctx));
ctx->state = BODY_NONE;
/* LimitRequestBody does not apply to proxied responses.
* Consider implementing this check in its own filter.
* Would adding a directive to limit the size of proxied
* responses be useful?
*/
if (!f->r->proxyreq) {
ctx->limit = ap_get_limit_req_body(f->r);
}
else {
ctx->limit = 0;
}
tenc = apr_table_get(f->r->headers_in, "Transfer-Encoding");
lenp = apr_table_get(f->r->headers_in, "Content-Length");
if (tenc) {
if (strcasecmp(tenc, "chunked") == 0 /* fast path */
|| ap_find_last_token(f->r->pool, tenc, "chunked")) {
ctx->state = BODY_CHUNK;
}
else if (f->r->proxyreq == PROXYREQ_RESPONSE) {
/* http://tools.ietf.org/html/draft-ietf-httpbis-p1-messaging-23
* Section 3.3.3.3: "If a Transfer-Encoding header field is
* present in a response and the chunked transfer coding is not
* the final encoding, the message body length is determined by
* reading the connection until it is closed by the server."
*/
ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, f->r, APLOGNO(02555)
"Unknown Transfer-Encoding: %s; "
"using read-until-close", tenc);
tenc = NULL;
}
else {
/* Something that isn't a HTTP request, unless some future
* edition defines new transfer encodings, is unsupported.
*/
ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, f->r, APLOGNO(01585)
"Unknown Transfer-Encoding: %s", tenc);
return APR_EGENERAL;
}
lenp = NULL;
}
if (lenp) {
char *endstr;
ctx->state = BODY_LENGTH;
/* Protects against over/underflow, non-digit chars in the
* string (excluding leading space) (the endstr checks)
* and a negative number. */
if (apr_strtoff(&ctx->remaining, lenp, &endstr, 10)
|| endstr == lenp || *endstr || ctx->remaining < 0) {
ctx->remaining = 0;
ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, f->r, APLOGNO(01587)
"Invalid Content-Length");
return APR_EINVAL;
}
/* If we have a limit in effect and we know the C-L ahead of
* time, stop it here if it is invalid.
*/
if (ctx->limit && ctx->limit < ctx->remaining) {
ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, f->r, APLOGNO(01588)
"Requested content-length of %" APR_OFF_T_FMT
" is larger than the configured limit"
" of %" APR_OFF_T_FMT, ctx->remaining, ctx->limit);
return APR_ENOSPC;
}
}
/* If we don't have a request entity indicated by the headers, EOS.
* (BODY_NONE is a valid intermediate state due to trailers,
* but it isn't a valid starting state.)
*
* RFC 2616 Section 4.4 note 5 states that connection-close
* is invalid for a request entity - request bodies must be
* denoted by C-L or T-E: chunked.
*
* Note that since the proxy uses this filter to handle the
* proxied *response*, proxy responses MUST be exempt.
*/
if (ctx->state == BODY_NONE && f->r->proxyreq != PROXYREQ_RESPONSE) {
e = apr_bucket_eos_create(f->c->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(b, e);
ctx->eos_sent = 1;
return APR_SUCCESS;
}
/* Since we're about to read data, send 100-Continue if needed.
* Only valid on chunked and C-L bodies where the C-L is > 0. */
if ((ctx->state == BODY_CHUNK
|| (ctx->state == BODY_LENGTH && ctx->remaining > 0))
&& f->r->expecting_100 && f->r->proto_num >= HTTP_VERSION(1,1)
&& !(f->r->eos_sent || f->r->bytes_sent)) {
if (!ap_is_HTTP_SUCCESS(f->r->status)) {
ctx->state = BODY_NONE;
ctx->eos_sent = 1;
}
else {
char *tmp;
int len;
apr_bucket_brigade *bb;
bb = apr_brigade_create(f->r->pool, f->c->bucket_alloc);
/* if we send an interim response, we're no longer
* in a state of expecting one.
*/
f->r->expecting_100 = 0;
tmp = apr_pstrcat(f->r->pool, AP_SERVER_PROTOCOL " ",
ap_get_status_line(HTTP_CONTINUE), CRLF CRLF, NULL);
len = strlen(tmp);
ap_xlate_proto_to_ascii(tmp, len);
e = apr_bucket_pool_create(tmp, len, f->r->pool,
f->c->bucket_alloc);
APR_BRIGADE_INSERT_HEAD(bb, e);
e = apr_bucket_flush_create(f->c->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(bb, e);
rv = ap_pass_brigade(f->c->output_filters, bb);
apr_brigade_cleanup(bb);
if (rv != APR_SUCCESS) {
return AP_FILTER_ERROR;
}
}
}
}
/* sanity check in case we're read twice */
if (ctx->eos_sent) {
e = apr_bucket_eos_create(f->c->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(b, e);
return APR_SUCCESS;
}
do {
apr_brigade_cleanup(b);
again = 0; /* until further notice */
/* read and handle the brigade */
switch (ctx->state) {
case BODY_CHUNK:
case BODY_CHUNK_PART:
case BODY_CHUNK_EXT:
case BODY_CHUNK_LF:
case BODY_CHUNK_END:
case BODY_CHUNK_END_LF: {
rv = ap_get_brigade(f->next, b, AP_MODE_GETLINE, block, 0);
/* for timeout */
if (block == APR_NONBLOCK_READ
&& ((rv == APR_SUCCESS && APR_BRIGADE_EMPTY(b))
|| (APR_STATUS_IS_EAGAIN(rv)))) {
return APR_EAGAIN;
}
if (rv == APR_EOF) {
return APR_INCOMPLETE;
}
if (rv != APR_SUCCESS) {
return rv;
}
e = APR_BRIGADE_FIRST(b);
while (e != APR_BRIGADE_SENTINEL(b)) {
const char *buffer;
apr_size_t len;
if (!APR_BUCKET_IS_METADATA(e)) {
rv = apr_bucket_read(e, &buffer, &len, APR_BLOCK_READ);
if (rv == APR_SUCCESS) {
rv = parse_chunk_size(ctx, buffer, len,
f->r->server->limit_req_fieldsize);
}
if (rv != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_INFO, rv, f->r, APLOGNO(01590)
"Error reading chunk %s ",
(APR_ENOSPC == rv) ? "(overflow)" : "");
return rv;
}
}
apr_bucket_delete(e);
e = APR_BRIGADE_FIRST(b);
}
again = 1; /* come around again */
if (ctx->state == BODY_CHUNK_TRAILER) {
/* Treat UNSET as DISABLE - trailers aren't merged by default */
return read_chunked_trailers(ctx, f, b,
conf->merge_trailers == AP_MERGE_TRAILERS_ENABLE);
}
break;
}
case BODY_NONE:
case BODY_LENGTH:
case BODY_CHUNK_DATA: {
/* Ensure that the caller can not go over our boundary point. */
if (ctx->state != BODY_NONE && ctx->remaining < readbytes) {
readbytes = ctx->remaining;
}
if (readbytes > 0) {
rv = ap_get_brigade(f->next, b, mode, block, readbytes);
/* for timeout */
if (block == APR_NONBLOCK_READ
&& ((rv == APR_SUCCESS && APR_BRIGADE_EMPTY(b))
|| (APR_STATUS_IS_EAGAIN(rv)))) {
return APR_EAGAIN;
}
if (rv == APR_EOF && ctx->state != BODY_NONE
&& ctx->remaining > 0) {
return APR_INCOMPLETE;
}
if (rv != APR_SUCCESS) {
return rv;
}
/* How many bytes did we just read? */
apr_brigade_length(b, 0, &totalread);
/* If this happens, we have a bucket of unknown length. Die because
* it means our assumptions have changed. */
AP_DEBUG_ASSERT(totalread >= 0);
if (ctx->state != BODY_NONE) {
ctx->remaining -= totalread;
if (ctx->remaining > 0) {
e = APR_BRIGADE_LAST(b);
if (APR_BUCKET_IS_EOS(e)) {
apr_bucket_delete(e);
return APR_INCOMPLETE;
}
}
else if (ctx->state == BODY_CHUNK_DATA) {
/* next chunk please */
ctx->state = BODY_CHUNK_END;
ctx->chunk_used = 0;
}
}
}
/* If we have no more bytes remaining on a C-L request,
* save the caller a round trip to discover EOS.
*/
if (ctx->state == BODY_LENGTH && ctx->remaining == 0) {
e = apr_bucket_eos_create(f->c->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(b, e);
ctx->eos_sent = 1;
}
/* We have a limit in effect. */
if (ctx->limit) {
/* FIXME: Note that we might get slightly confused on chunked inputs
* as we'd need to compensate for the chunk lengths which may not
* really count. This seems to be up for interpretation. */
ctx->limit_used += totalread;
if (ctx->limit < ctx->limit_used) {
ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, f->r, APLOGNO(01591)
"Read content-length of %" APR_OFF_T_FMT
" is larger than the configured limit"
" of %" APR_OFF_T_FMT, ctx->limit_used, ctx->limit);
return APR_ENOSPC;
}
}
break;
}
case BODY_CHUNK_TRAILER: {
rv = ap_get_brigade(f->next, b, mode, block, readbytes);
/* for timeout */
if (block == APR_NONBLOCK_READ
&& ((rv == APR_SUCCESS && APR_BRIGADE_EMPTY(b))
|| (APR_STATUS_IS_EAGAIN(rv)))) {
return APR_EAGAIN;
}
if (rv != APR_SUCCESS) {
return rv;
}
break;
}
default: {
/* Should not happen */
ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, f->r, APLOGNO(02901)
"Unexpected body state (%i)", (int)ctx->state);
return APR_EGENERAL;
}
}
} while (again);
return APR_SUCCESS;
}
struct check_header_ctx {
request_rec *r;
int error;
};
/* check a single header, to be used with apr_table_do() */
static int check_header(void *arg, const char *name, const char *val)
{
struct check_header_ctx *ctx = arg;
if (name[0] == '\0') {
ctx->error = 1;
ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, ctx->r, APLOGNO(02428)
"Empty response header name, aborting request");
return 0;
}
if (ap_has_cntrl(name)) {
ctx->error = 1;
ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, ctx->r, APLOGNO(02429)
"Response header name '%s' contains control "
"characters, aborting request",
name);
return 0;
}
if (ap_has_cntrl(val)) {
ctx->error = 1;
ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, ctx->r, APLOGNO(02430)
"Response header '%s' contains control characters, "
"aborting request: %s",
name, val);
return 0;
}
return 1;
}
/**
* Check headers for HTTP conformance
* @return 1 if ok, 0 if bad
*/
static APR_INLINE int check_headers(request_rec *r)
{
const char *loc;
struct check_header_ctx ctx = { 0, 0 };
ctx.r = r;
apr_table_do(check_header, &ctx, r->headers_out, NULL);
if (ctx.error)
return 0; /* problem has been logged by check_header() */
if ((loc = apr_table_get(r->headers_out, "Location")) != NULL) {
const char *scheme_end = ap_strchr_c(loc, ':');
/*
* Check that the URI has a valid scheme and is absolute
* XXX Should we do a full uri parse here?
*/
if (!ap_is_url(loc))
goto bad;
if (scheme_end[1] != '/' || scheme_end[2] != '/')
goto bad;
}
return 1;
bad:
ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02431)
"Bad Location header in response: '%s', aborting request",
loc);
return 0;
}
typedef struct header_struct {
apr_pool_t *pool;
apr_bucket_brigade *bb;
} header_struct;
/* Send a single HTTP header field to the client. Note that this function
* is used in calls to apr_table_do(), so don't change its interface.
* It returns true unless there was a write error of some kind.
*/
static int form_header_field(header_struct *h,
const char *fieldname, const char *fieldval)
{
#if APR_CHARSET_EBCDIC
char *headfield;
apr_size_t len;
headfield = apr_pstrcat(h->pool, fieldname, ": ", fieldval, CRLF, NULL);
len = strlen(headfield);
ap_xlate_proto_to_ascii(headfield, len);
apr_brigade_write(h->bb, NULL, NULL, headfield, len);
#else
struct iovec vec[4];
struct iovec *v = vec;
v->iov_base = (void *)fieldname;
v->iov_len = strlen(fieldname);
v++;
v->iov_base = ": ";
v->iov_len = sizeof(": ") - 1;
v++;
v->iov_base = (void *)fieldval;
v->iov_len = strlen(fieldval);
v++;
v->iov_base = CRLF;
v->iov_len = sizeof(CRLF) - 1;
apr_brigade_writev(h->bb, NULL, NULL, vec, 4);
#endif /* !APR_CHARSET_EBCDIC */
return 1;
}
/* This routine is called by apr_table_do and merges all instances of
* the passed field values into a single array that will be further
* processed by some later routine. Originally intended to help split
* and recombine multiple Vary fields, though it is generic to any field
* consisting of comma/space-separated tokens.
*/
static int uniq_field_values(void *d, const char *key, const char *val)
{
apr_array_header_t *values;
char *start;
char *e;
char **strpp;
int i;
values = (apr_array_header_t *)d;
e = apr_pstrdup(values->pool, val);
do {
/* Find a non-empty fieldname */
while (*e == ',' || apr_isspace(*e)) {
++e;
}
if (*e == '\0') {
break;
}
start = e;
while (*e != '\0' && *e != ',' && !apr_isspace(*e)) {
++e;
}
if (*e != '\0') {
*e++ = '\0';
}
/* Now add it to values if it isn't already represented.
* Could be replaced by a ap_array_strcasecmp() if we had one.
*/
for (i = 0, strpp = (char **) values->elts; i < values->nelts;
++i, ++strpp) {
if (*strpp && strcasecmp(*strpp, start) == 0) {
break;
}
}
if (i == values->nelts) { /* if not found */
*(char **)apr_array_push(values) = start;
}
} while (*e != '\0');
return 1;
}
/*
* Since some clients choke violently on multiple Vary fields, or
* Vary fields with duplicate tokens, combine any multiples and remove
* any duplicates.
*/
static void fixup_vary(request_rec *r)
{
apr_array_header_t *varies;
varies = apr_array_make(r->pool, 5, sizeof(char *));
/* Extract all Vary fields from the headers_out, separate each into
* its comma-separated fieldname values, and then add them to varies
* if not already present in the array.
*/
apr_table_do((int (*)(void *, const char *, const char *))uniq_field_values,
(void *) varies, r->headers_out, "Vary", NULL);
/* If we found any, replace old Vary fields with unique-ified value */
if (varies->nelts > 0) {
apr_table_setn(r->headers_out, "Vary",
apr_array_pstrcat(r->pool, varies, ','));
}
}
/* Send a request's HTTP response headers to the client.
*/
static apr_status_t send_all_header_fields(header_struct *h,
const request_rec *r)
{
const apr_array_header_t *elts;
const apr_table_entry_t *t_elt;
const apr_table_entry_t *t_end;
struct iovec *vec;
struct iovec *vec_next;
elts = apr_table_elts(r->headers_out);
if (elts->nelts == 0) {
return APR_SUCCESS;
}
t_elt = (const apr_table_entry_t *)(elts->elts);
t_end = t_elt + elts->nelts;
vec = (struct iovec *)apr_palloc(h->pool, 4 * elts->nelts *
sizeof(struct iovec));
vec_next = vec;
/* For each field, generate
* name ": " value CRLF
*/
do {
vec_next->iov_base = (void*)(t_elt->key);
vec_next->iov_len = strlen(t_elt->key);
vec_next++;
vec_next->iov_base = ": ";
vec_next->iov_len = sizeof(": ") - 1;
vec_next++;
vec_next->iov_base = (void*)(t_elt->val);
vec_next->iov_len = strlen(t_elt->val);
vec_next++;
vec_next->iov_base = CRLF;
vec_next->iov_len = sizeof(CRLF) - 1;
vec_next++;
t_elt++;
} while (t_elt < t_end);
if (APLOGrtrace4(r)) {
t_elt = (const apr_table_entry_t *)(elts->elts);
do {
ap_log_rerror(APLOG_MARK, APLOG_TRACE4, 0, r, " %s: %s",
ap_escape_logitem(r->pool, t_elt->key),
ap_escape_logitem(r->pool, t_elt->val));
t_elt++;
} while (t_elt < t_end);
}
#if APR_CHARSET_EBCDIC
{
apr_size_t len;
char *tmp = apr_pstrcatv(r->pool, vec, vec_next - vec, &len);
ap_xlate_proto_to_ascii(tmp, len);
return apr_brigade_write(h->bb, NULL, NULL, tmp, len);
}
#else
return apr_brigade_writev(h->bb, NULL, NULL, vec, vec_next - vec);
#endif
}
/* Confirm that the status line is well-formed and matches r->status.
* If they don't match, a filter may have negated the status line set by a
* handler.
* Zap r->status_line if bad.
*/
static apr_status_t validate_status_line(request_rec *r)
{
char *end;
if (r->status_line) {
int len = strlen(r->status_line);
if (len < 3
|| apr_strtoi64(r->status_line, &end, 10) != r->status
|| (end - 3) != r->status_line
|| (len >= 4 && ! apr_isspace(r->status_line[3]))) {
r->status_line = NULL;
return APR_EGENERAL;
}
/* Since we passed the above check, we know that length three
* is equivalent to only a 3 digit numeric http status.
* RFC2616 mandates a trailing space, let's add it.
*/
if (len == 3) {
r->status_line = apr_pstrcat(r->pool, r->status_line, " ", NULL);
return APR_EGENERAL;
}
return APR_SUCCESS;
}
return APR_EGENERAL;
}
/*
* Determine the protocol to use for the response. Potentially downgrade
* to HTTP/1.0 in some situations and/or turn off keepalives.
*
* also prepare r->status_line.
*/
static void basic_http_header_check(request_rec *r,
const char **protocol)
{
apr_status_t rv;
if (r->assbackwards) {
/* no such thing as a response protocol */
return;
}
rv = validate_status_line(r);
if (!r->status_line) {
r->status_line = ap_get_status_line(r->status);
} else if (rv != APR_SUCCESS) {
/* Status line is OK but our own reason phrase
* would be preferred if defined
*/
const char *tmp = ap_get_status_line(r->status);
if (!strncmp(tmp, r->status_line, 3)) {
r->status_line = tmp;
}
}
/* Note that we must downgrade before checking for force responses. */
if (r->proto_num > HTTP_VERSION(1,0)
&& apr_table_get(r->subprocess_env, "downgrade-1.0")) {
r->proto_num = HTTP_VERSION(1,0);
}
/* kludge around broken browsers when indicated by force-response-1.0
*/
if (r->proto_num == HTTP_VERSION(1,0)
&& apr_table_get(r->subprocess_env, "force-response-1.0")) {
*protocol = "HTTP/1.0";
r->connection->keepalive = AP_CONN_CLOSE;
}
else {
*protocol = AP_SERVER_PROTOCOL;
}
}
/* fill "bb" with a barebones/initial HTTP response header */
static void basic_http_header(request_rec *r, apr_bucket_brigade *bb,
const char *protocol)
{
char *date = NULL;
const char *proxy_date = NULL;
const char *server = NULL;
const char *us = ap_get_server_banner();
header_struct h;
struct iovec vec[4];
if (r->assbackwards) {
/* there are no headers to send */
return;
}
/* Output the HTTP/1.x Status-Line and the Date and Server fields */
vec[0].iov_base = (void *)protocol;
vec[0].iov_len = strlen(protocol);
vec[1].iov_base = (void *)" ";
vec[1].iov_len = sizeof(" ") - 1;
vec[2].iov_base = (void *)(r->status_line);
vec[2].iov_len = strlen(r->status_line);
vec[3].iov_base = (void *)CRLF;
vec[3].iov_len = sizeof(CRLF) - 1;
#if APR_CHARSET_EBCDIC
{
char *tmp;
apr_size_t len;
tmp = apr_pstrcatv(r->pool, vec, 4, &len);
ap_xlate_proto_to_ascii(tmp, len);
apr_brigade_write(bb, NULL, NULL, tmp, len);
}
#else
apr_brigade_writev(bb, NULL, NULL, vec, 4);
#endif
h.pool = r->pool;
h.bb = bb;
/*
* keep the set-by-proxy server and date headers, otherwise
* generate a new server header / date header
*/
if (r->proxyreq != PROXYREQ_NONE) {
proxy_date = apr_table_get(r->headers_out, "Date");
if (!proxy_date) {
/*
* proxy_date needs to be const. So use date for the creation of
* our own Date header and pass it over to proxy_date later to
* avoid a compiler warning.
*/
date = apr_palloc(r->pool, APR_RFC822_DATE_LEN);
ap_recent_rfc822_date(date, r->request_time);
}
server = apr_table_get(r->headers_out, "Server");
}
else {
date = apr_palloc(r->pool, APR_RFC822_DATE_LEN);
ap_recent_rfc822_date(date, r->request_time);
}
form_header_field(&h, "Date", proxy_date ? proxy_date : date );
if (!server && *us)
server = us;
if (server)
form_header_field(&h, "Server", server);
if (APLOGrtrace3(r)) {
ap_log_rerror(APLOG_MARK, APLOG_TRACE3, 0, r,
"Response sent with status %d%s",
r->status,
APLOGrtrace4(r) ? ", headers:" : "");
/*
* Date and Server are less interesting, use TRACE5 for them while
* using TRACE4 for the other headers.
*/
ap_log_rerror(APLOG_MARK, APLOG_TRACE5, 0, r, " Date: %s",
proxy_date ? proxy_date : date );
if (server)
ap_log_rerror(APLOG_MARK, APLOG_TRACE5, 0, r, " Server: %s",
server);
}
/* unset so we don't send them again */
apr_table_unset(r->headers_out, "Date"); /* Avoid bogosity */
if (server) {
apr_table_unset(r->headers_out, "Server");
}
}
AP_DECLARE(void) ap_basic_http_header(request_rec *r, apr_bucket_brigade *bb)
{
const char *protocol = NULL;
basic_http_header_check(r, &protocol);
basic_http_header(r, bb, protocol);
}
static void terminate_header(apr_bucket_brigade *bb)
{
char crlf[] = CRLF;
apr_size_t buflen;
buflen = strlen(crlf);
ap_xlate_proto_to_ascii(crlf, buflen);
apr_brigade_write(bb, NULL, NULL, crlf, buflen);
}
AP_DECLARE_NONSTD(int) ap_send_http_trace(request_rec *r)
{
core_server_config *conf;
int rv;
apr_bucket_brigade *bb;
header_struct h;
apr_bucket *b;
int body;
char *bodyread = NULL, *bodyoff;
apr_size_t bodylen = 0;
apr_size_t bodybuf;
long res = -1; /* init to avoid gcc -Wall warning */
if (r->method_number != M_TRACE) {
return DECLINED;
}
/* Get the original request */
while (r->prev) {
r = r->prev;
}
conf = ap_get_core_module_config(r->server->module_config);
if (conf->trace_enable == AP_TRACE_DISABLE) {
apr_table_setn(r->notes, "error-notes",
"TRACE denied by server configuration");
return HTTP_METHOD_NOT_ALLOWED;
}
if (conf->trace_enable == AP_TRACE_EXTENDED)
/* XXX: should be = REQUEST_CHUNKED_PASS */
body = REQUEST_CHUNKED_DECHUNK;
else
body = REQUEST_NO_BODY;
if ((rv = ap_setup_client_block(r, body))) {
if (rv == HTTP_REQUEST_ENTITY_TOO_LARGE)
apr_table_setn(r->notes, "error-notes",
"TRACE with a request body is not allowed");
return rv;
}
if (ap_should_client_block(r)) {
if (r->remaining > 0) {
if (r->remaining > 65536) {
apr_table_setn(r->notes, "error-notes",
"Extended TRACE request bodies cannot exceed 64k\n");
return HTTP_REQUEST_ENTITY_TOO_LARGE;
}
/* always 32 extra bytes to catch chunk header exceptions */
bodybuf = (apr_size_t)r->remaining + 32;
}
else {
/* Add an extra 8192 for chunk headers */
bodybuf = 73730;
}
bodyoff = bodyread = apr_palloc(r->pool, bodybuf);
/* only while we have enough for a chunked header */
while ((!bodylen || bodybuf >= 32) &&
(res = ap_get_client_block(r, bodyoff, bodybuf)) > 0) {
bodylen += res;
bodybuf -= res;
bodyoff += res;
}
if (res > 0 && bodybuf < 32) {
/* discard_rest_of_request_body into our buffer */
while (ap_get_client_block(r, bodyread, bodylen) > 0)
;
apr_table_setn(r->notes, "error-notes",
"Extended TRACE request bodies cannot exceed 64k\n");
return HTTP_REQUEST_ENTITY_TOO_LARGE;
}
if (res < 0) {
return HTTP_BAD_REQUEST;
}
}
ap_set_content_type(r, "message/http");
/* Now we recreate the request, and echo it back */
bb = apr_brigade_create(r->pool, r->connection->bucket_alloc);
#if APR_CHARSET_EBCDIC
{
char *tmp;
apr_size_t len;
len = strlen(r->the_request);
tmp = apr_pmemdup(r->pool, r->the_request, len);
ap_xlate_proto_to_ascii(tmp, len);
apr_brigade_putstrs(bb, NULL, NULL, tmp, CRLF_ASCII, NULL);
}
#else
apr_brigade_putstrs(bb, NULL, NULL, r->the_request, CRLF, NULL);
#endif
h.pool = r->pool;
h.bb = bb;
apr_table_do((int (*) (void *, const char *, const char *))
form_header_field, (void *) &h, r->headers_in, NULL);
apr_brigade_puts(bb, NULL, NULL, CRLF_ASCII);
/* If configured to accept a body, echo the body */
if (bodylen) {
b = apr_bucket_pool_create(bodyread, bodylen,
r->pool, bb->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(bb, b);
}
ap_pass_brigade(r->output_filters, bb);
return DONE;
}
typedef struct header_filter_ctx {
int headers_sent;
} header_filter_ctx;
AP_CORE_DECLARE_NONSTD(apr_status_t) ap_http_header_filter(ap_filter_t *f,
apr_bucket_brigade *b)
{
request_rec *r = f->r;
conn_rec *c = r->connection;
const char *protocol = NULL;
apr_bucket *e;
apr_bucket_brigade *b2;
header_struct h;
header_filter_ctx *ctx = f->ctx;
const char *ctype;
ap_bucket_error *eb = NULL;
core_server_config *conf;
AP_DEBUG_ASSERT(!r->main);
if (r->header_only) {
if (!ctx) {
ctx = f->ctx = apr_pcalloc(r->pool, sizeof(header_filter_ctx));
}
else if (ctx->headers_sent) {
apr_brigade_cleanup(b);
return OK;
}
}
for (e = APR_BRIGADE_FIRST(b);
e != APR_BRIGADE_SENTINEL(b);
e = APR_BUCKET_NEXT(e))
{
if (AP_BUCKET_IS_ERROR(e) && !eb) {
eb = e->data;
continue;
}
/*
* If we see an EOC bucket it is a signal that we should get out
* of the way doing nothing.
*/
if (AP_BUCKET_IS_EOC(e)) {
ap_remove_output_filter(f);
return ap_pass_brigade(f->next, b);
}
}
if (eb) {
int status;
status = eb->status;
apr_brigade_cleanup(b);
ap_die(status, r);
return AP_FILTER_ERROR;
}
if (r->assbackwards) {
r->sent_bodyct = 1;
ap_remove_output_filter(f);
return ap_pass_brigade(f->next, b);
}
/*
* Now that we are ready to send a response, we need to combine the two
* header field tables into a single table. If we don't do this, our
* later attempts to set or unset a given fieldname might be bypassed.
*/
if (!apr_is_empty_table(r->err_headers_out)) {
r->headers_out = apr_table_overlay(r->pool, r->err_headers_out,
r->headers_out);
}
conf = ap_get_core_module_config(r->server->module_config);
if (conf->http_conformance & AP_HTTP_CONFORMANCE_STRICT) {
int ok = check_headers(r);
if (!ok && !(conf->http_conformance & AP_HTTP_CONFORMANCE_LOGONLY)) {
ap_die(HTTP_INTERNAL_SERVER_ERROR, r);
return AP_FILTER_ERROR;
}
}
/*
* Remove the 'Vary' header field if the client can't handle it.
* Since this will have nasty effects on HTTP/1.1 caches, force
* the response into HTTP/1.0 mode.
*
* Note: the force-response-1.0 should come before the call to
* basic_http_header_check()
*/
if (apr_table_get(r->subprocess_env, "force-no-vary") != NULL) {
apr_table_unset(r->headers_out, "Vary");
r->proto_num = HTTP_VERSION(1,0);
apr_table_setn(r->subprocess_env, "force-response-1.0", "1");
}
else {
fixup_vary(r);
}
/*
* Now remove any ETag response header field if earlier processing
* says so (such as a 'FileETag None' directive).
*/
if (apr_table_get(r->notes, "no-etag") != NULL) {
apr_table_unset(r->headers_out, "ETag");
}
/* determine the protocol and whether we should use keepalives. */
basic_http_header_check(r, &protocol);
ap_set_keepalive(r);
if (r->chunked) {
apr_table_mergen(r->headers_out, "Transfer-Encoding", "chunked");
apr_table_unset(r->headers_out, "Content-Length");
}
ctype = ap_make_content_type(r, r->content_type);
if (ctype) {
apr_table_setn(r->headers_out, "Content-Type", ctype);
}
if (r->content_encoding) {
apr_table_setn(r->headers_out, "Content-Encoding",
r->content_encoding);
}
if (!apr_is_empty_array(r->content_languages)) {
int i;
char *token;
char **languages = (char **)(r->content_languages->elts);
const char *field = apr_table_get(r->headers_out, "Content-Language");
while (field && (token = ap_get_list_item(r->pool, &field)) != NULL) {
for (i = 0; i < r->content_languages->nelts; ++i) {
if (!strcasecmp(token, languages[i]))
break;
}
if (i == r->content_languages->nelts) {
*((char **) apr_array_push(r->content_languages)) = token;
}
}
field = apr_array_pstrcat(r->pool, r->content_languages, ',');
apr_table_setn(r->headers_out, "Content-Language", field);
}
/*
* Control cachability for non-cachable responses if not already set by
* some other part of the server configuration.
*/
if (r->no_cache && !apr_table_get(r->headers_out, "Expires")) {
char *date = apr_palloc(r->pool, APR_RFC822_DATE_LEN);
ap_recent_rfc822_date(date, r->request_time);
apr_table_addn(r->headers_out, "Expires", date);
}
b2 = apr_brigade_create(r->pool, c->bucket_alloc);
basic_http_header(r, b2, protocol);
h.pool = r->pool;
h.bb = b2;
if (r->status == HTTP_NOT_MODIFIED) {
apr_table_do((int (*)(void *, const char *, const char *)) form_header_field,
(void *) &h, r->headers_out,
"Connection",
"Keep-Alive",
"ETag",
"Content-Location",
"Expires",
"Cache-Control",
"Vary",
"Warning",
"WWW-Authenticate",
"Proxy-Authenticate",
"Set-Cookie",
"Set-Cookie2",
NULL);
}
else {
send_all_header_fields(&h, r);
}
terminate_header(b2);
ap_pass_brigade(f->next, b2);
if (r->header_only) {
apr_brigade_cleanup(b);
ctx->headers_sent = 1;
return OK;
}
r->sent_bodyct = 1; /* Whatever follows is real body stuff... */
if (r->chunked) {
/* We can't add this filter until we have already sent the headers.
* If we add it before this point, then the headers will be chunked
* as well, and that is just wrong.
*/
ap_add_output_filter("CHUNK", NULL, r, r->connection);
}
/* Don't remove this filter until after we have added the CHUNK filter.
* Otherwise, f->next won't be the CHUNK filter and thus the first
* brigade won't be chunked properly.
*/
ap_remove_output_filter(f);
return ap_pass_brigade(f->next, b);
}
/*
* Map specific APR codes returned by the filter stack to HTTP error
* codes, or the default status code provided. Use it as follows:
*
* return ap_map_http_request_error(rv, HTTP_BAD_REQUEST);
*
* If the filter has already handled the error, AP_FILTER_ERROR will
* be returned, which is cleanly passed through.
*
* These mappings imply that the filter stack is reading from the
* downstream client, the proxy will map these codes differently.
*/
AP_DECLARE(int) ap_map_http_request_error(apr_status_t rv, int status)
{
switch (rv) {
case AP_FILTER_ERROR: {
return AP_FILTER_ERROR;
}
case APR_EGENERAL: {
return HTTP_BAD_REQUEST;
}
case APR_ENOSPC: {
return HTTP_REQUEST_ENTITY_TOO_LARGE;
}
case APR_ENOTIMPL: {
return HTTP_NOT_IMPLEMENTED;
}
case APR_ETIMEDOUT: {
return HTTP_REQUEST_TIME_OUT;
}
default: {
return status;
}
}
}
/* In HTTP/1.1, any method can have a body. However, most GET handlers
* wouldn't know what to do with a request body if they received one.
* This helper routine tests for and reads any message body in the request,
* simply discarding whatever it receives. We need to do this because
* failing to read the request body would cause it to be interpreted
* as the next request on a persistent connection.
*
* Since we return an error status if the request is malformed, this
* routine should be called at the beginning of a no-body handler, e.g.,
*
* if ((retval = ap_discard_request_body(r)) != OK) {
* return retval;
* }
*/
AP_DECLARE(int) ap_discard_request_body(request_rec *r)
{
apr_bucket_brigade *bb;
int seen_eos;
apr_status_t rv;
/* Sometimes we'll get in a state where the input handling has
* detected an error where we want to drop the connection, so if
* that's the case, don't read the data as that is what we're trying
* to avoid.
*
* This function is also a no-op on a subrequest.
*/
if (r->main || r->connection->keepalive == AP_CONN_CLOSE ||
ap_status_drops_connection(r->status)) {
return OK;
}
bb = apr_brigade_create(r->pool, r->connection->bucket_alloc);
seen_eos = 0;
do {
apr_bucket *bucket;
rv = ap_get_brigade(r->input_filters, bb, AP_MODE_READBYTES,
APR_BLOCK_READ, HUGE_STRING_LEN);
if (rv != APR_SUCCESS) {
apr_brigade_destroy(bb);
return ap_map_http_request_error(rv, HTTP_BAD_REQUEST);
}
for (bucket = APR_BRIGADE_FIRST(bb);
bucket != APR_BRIGADE_SENTINEL(bb);
bucket = APR_BUCKET_NEXT(bucket))
{
const char *data;
apr_size_t len;
if (APR_BUCKET_IS_EOS(bucket)) {
seen_eos = 1;
break;
}
/* These are metadata buckets. */
if (bucket->length == 0) {
continue;
}
/* We MUST read because in case we have an unknown-length
* bucket or one that morphs, we want to exhaust it.
*/
rv = apr_bucket_read(bucket, &data, &len, APR_BLOCK_READ);
if (rv != APR_SUCCESS) {
apr_brigade_destroy(bb);
return HTTP_BAD_REQUEST;
}
}
apr_brigade_cleanup(bb);
} while (!seen_eos);
return OK;
}
/* Here we deal with getting the request message body from the client.
* Whether or not the request contains a body is signaled by the presence
* of a non-zero Content-Length or by a Transfer-Encoding: chunked.
*
* Note that this is more complicated than it was in Apache 1.1 and prior
* versions, because chunked support means that the module does less.
*
* The proper procedure is this:
*
* 1. Call ap_setup_client_block() near the beginning of the request
* handler. This will set up all the necessary properties, and will
* return either OK, or an error code. If the latter, the module should
* return that error code. The second parameter selects the policy to
* apply if the request message indicates a body, and how a chunked
* transfer-coding should be interpreted. Choose one of
*
* REQUEST_NO_BODY Send 413 error if message has any body
* REQUEST_CHUNKED_ERROR Send 411 error if body without Content-Length
* REQUEST_CHUNKED_DECHUNK If chunked, remove the chunks for me.
* REQUEST_CHUNKED_PASS If chunked, pass the chunk headers with body.
*
* In order to use the last two options, the caller MUST provide a buffer
* large enough to hold a chunk-size line, including any extensions.
*
* 2. When you are ready to read a body (if any), call ap_should_client_block().
* This will tell the module whether or not to read input. If it is 0,
* the module should assume that there is no message body to read.
*
* 3. Finally, call ap_get_client_block in a loop. Pass it a buffer and its size.
* It will put data into the buffer (not necessarily a full buffer), and
* return the length of the input block. When it is done reading, it will
* return 0 if EOF, or -1 if there was an error.
* If an error occurs on input, we force an end to keepalive.
*
* This step also sends a 100 Continue response to HTTP/1.1 clients if appropriate.
*/
AP_DECLARE(int) ap_setup_client_block(request_rec *r, int read_policy)
{
const char *tenc = apr_table_get(r->headers_in, "Transfer-Encoding");
const char *lenp = apr_table_get(r->headers_in, "Content-Length");
r->read_body = read_policy;
r->read_chunked = 0;
r->remaining = 0;
if (tenc) {
if (strcasecmp(tenc, "chunked")) {
ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(01592)
"Unknown Transfer-Encoding %s", tenc);
return HTTP_NOT_IMPLEMENTED;
}
if (r->read_body == REQUEST_CHUNKED_ERROR) {
ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(01593)
"chunked Transfer-Encoding forbidden: %s", r->uri);
return (lenp) ? HTTP_BAD_REQUEST : HTTP_LENGTH_REQUIRED;
}
r->read_chunked = 1;
}
else if (lenp) {
char *endstr;
if (apr_strtoff(&r->remaining, lenp, &endstr, 10)
|| *endstr || r->remaining < 0) {
r->remaining = 0;
ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(01594)
"Invalid Content-Length");
return HTTP_BAD_REQUEST;
}
}
if ((r->read_body == REQUEST_NO_BODY)
&& (r->read_chunked || (r->remaining > 0))) {
ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(01595)
"%s with body is not allowed for %s", r->method, r->uri);
return HTTP_REQUEST_ENTITY_TOO_LARGE;
}
#ifdef AP_DEBUG
{
/* Make sure ap_getline() didn't leave any droppings. */
core_request_config *req_cfg =
(core_request_config *)ap_get_core_module_config(r->request_config);
AP_DEBUG_ASSERT(APR_BRIGADE_EMPTY(req_cfg->bb));
}
#endif
return OK;
}
AP_DECLARE(int) ap_should_client_block(request_rec *r)
{
/* First check if we have already read the request body */
if (r->read_length || (!r->read_chunked && (r->remaining <= 0))) {
return 0;
}
return 1;
}
/* get_client_block is called in a loop to get the request message body.
* This is quite simple if the client includes a content-length
* (the normal case), but gets messy if the body is chunked. Note that
* r->remaining is used to maintain state across calls and that
* r->read_length is the total number of bytes given to the caller
* across all invocations. It is messy because we have to be careful not
* to read past the data provided by the client, since these reads block.
* Returns 0 on End-of-body, -1 on error or premature chunk end.
*
*/
AP_DECLARE(long) ap_get_client_block(request_rec *r, char *buffer,
apr_size_t bufsiz)
{
apr_status_t rv;
apr_bucket_brigade *bb;
if (r->remaining < 0 || (!r->read_chunked && r->remaining == 0)) {
return 0;
}
bb = apr_brigade_create(r->pool, r->connection->bucket_alloc);
if (bb == NULL) {
r->connection->keepalive = AP_CONN_CLOSE;
return -1;
}
rv = ap_get_brigade(r->input_filters, bb, AP_MODE_READBYTES,
APR_BLOCK_READ, bufsiz);
/* We lose the failure code here. This is why ap_get_client_block should
* not be used.
*/
if (rv == AP_FILTER_ERROR) {
/* AP_FILTER_ERROR means a filter has responded already,
* we are DONE.
*/
apr_brigade_destroy(bb);
return -1;
}
if (rv != APR_SUCCESS) {
apr_bucket *e;
/* work around our silent swallowing of error messages by mapping
* error codes at this point, and sending an error bucket back
* upstream.
*/
apr_brigade_cleanup(bb);
e = ap_bucket_error_create(
ap_map_http_request_error(rv, HTTP_BAD_REQUEST), NULL, r->pool,
r->connection->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(bb, e);
e = apr_bucket_eos_create(r->connection->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(bb, e);
rv = ap_pass_brigade(r->output_filters, bb);
if (APR_SUCCESS != rv) {
ap_log_rerror(APLOG_MARK, APLOG_INFO, rv, r, APLOGNO(02484)
"Error while writing error response");
}
/* if we actually fail here, we want to just return and
* stop trying to read data from the client.
*/
r->connection->keepalive = AP_CONN_CLOSE;
apr_brigade_destroy(bb);
return -1;
}
/* If this fails, it means that a filter is written incorrectly and that
* it needs to learn how to properly handle APR_BLOCK_READ requests by
* returning data when requested.
*/
AP_DEBUG_ASSERT(!APR_BRIGADE_EMPTY(bb));
/* Check to see if EOS in the brigade.
*
* If so, we have to leave a nugget for the *next* ap_get_client_block
* call to return 0.
*/
if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(bb))) {
if (r->read_chunked) {
r->remaining = -1;
}
else {
r->remaining = 0;
}
}
rv = apr_brigade_flatten(bb, buffer, &bufsiz);
if (rv != APR_SUCCESS) {
apr_brigade_destroy(bb);
return -1;
}
/* XXX yank me? */
r->read_length += bufsiz;
apr_brigade_destroy(bb);
return bufsiz;
}
/* Context struct for ap_http_outerror_filter */
typedef struct {
int seen_eoc;
} outerror_filter_ctx_t;
/* Filter to handle any error buckets on output */
apr_status_t ap_http_outerror_filter(ap_filter_t *f,
apr_bucket_brigade *b)
{
request_rec *r = f->r;
outerror_filter_ctx_t *ctx = (outerror_filter_ctx_t *)(f->ctx);
apr_bucket *e;
/* Create context if none is present */
if (!ctx) {
ctx = apr_pcalloc(r->pool, sizeof(outerror_filter_ctx_t));
f->ctx = ctx;
}
for (e = APR_BRIGADE_FIRST(b);
e != APR_BRIGADE_SENTINEL(b);
e = APR_BUCKET_NEXT(e))
{
if (AP_BUCKET_IS_ERROR(e)) {
/*
* Start of error handling state tree. Just one condition
* right now :)
*/
if (((ap_bucket_error *)(e->data))->status == HTTP_BAD_GATEWAY ||
((ap_bucket_error *)(e->data))->status == HTTP_GATEWAY_TIME_OUT) {
/* stream aborted and we have not ended it yet */
r->connection->keepalive = AP_CONN_CLOSE;
}
continue;
}
/* Detect EOC buckets and memorize this in the context. */
if (AP_BUCKET_IS_EOC(e)) {
ctx->seen_eoc = 1;
}
}
/*
* Remove all data buckets that are in a brigade after an EOC bucket
* was seen, as an EOC bucket tells us that no (further) resource
* and protocol data should go out to the client. OTOH meta buckets
* are still welcome as they might trigger needed actions down in
* the chain (e.g. in network filters like SSL).
* Remark 1: It is needed to dump ALL data buckets in the brigade
* since an filter in between might have inserted data
* buckets BEFORE the EOC bucket sent by the original
* sender and we do NOT want this data to be sent.
* Remark 2: Dumping all data buckets here does not necessarily mean
* that no further data is send to the client as:
* 1. Network filters like SSL can still be triggered via
* meta buckets to talk with the client e.g. for a
* clean shutdown.
* 2. There could be still data that was buffered before
* down in the chain that gets flushed by a FLUSH or an
* EOS bucket.
*/
if (ctx->seen_eoc) {
for (e = APR_BRIGADE_FIRST(b);
e != APR_BRIGADE_SENTINEL(b);
e = APR_BUCKET_NEXT(e))
{
if (!APR_BUCKET_IS_METADATA(e)) {
APR_BUCKET_REMOVE(e);
}
}
}
return ap_pass_brigade(f->next, b);
}
| ./CrossVul/dataset_final_sorted/CWE-20/c/good_1575_2 |
crossvul-cpp_data_bad_5420_0 | /* $Id: file.c,v 1.266 2012/05/22 09:45:56 inu Exp $ */
#include "fm.h"
#include <sys/types.h>
#include "myctype.h"
#include <signal.h>
#include <setjmp.h>
#if defined(HAVE_WAITPID) || defined(HAVE_WAIT3)
#include <sys/wait.h>
#endif
#include <stdio.h>
#include <time.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <utime.h>
/* foo */
#include "html.h"
#include "parsetagx.h"
#include "local.h"
#include "regex.h"
#ifndef max
#define max(a,b) ((a) > (b) ? (a) : (b))
#endif /* not max */
#ifndef min
#define min(a,b) ((a) > (b) ? (b) : (a))
#endif /* not min */
#define MAX_INPUT_SIZE 80 /* TODO - max should be screen line length */
static int frame_source = 0;
static char *guess_filename(char *file);
static int _MoveFile(char *path1, char *path2);
static void uncompress_stream(URLFile *uf, char **src);
static FILE *lessopen_stream(char *path);
static Buffer *loadcmdout(char *cmd,
Buffer *(*loadproc) (URLFile *, Buffer *),
Buffer *defaultbuf);
#ifndef USE_ANSI_COLOR
#define addnewline(a,b,c,d,e,f,g) _addnewline(a,b,c,e,f,g)
#endif
static void addnewline(Buffer *buf, char *line, Lineprop *prop,
Linecolor *color, int pos, int width, int nlines);
static void addLink(Buffer *buf, struct parsed_tag *tag);
static JMP_BUF AbortLoading;
static struct table *tables[MAX_TABLE];
static struct table_mode table_mode[MAX_TABLE];
#if defined(USE_M17N) || defined(USE_IMAGE)
static ParsedURL *cur_baseURL = NULL;
#endif
#ifdef USE_M17N
static wc_ces cur_document_charset = 0;
#endif
static Str cur_title;
static Str cur_select;
static Str select_str;
static int select_is_multiple;
static int n_selectitem;
static Str cur_option;
static Str cur_option_value;
static Str cur_option_label;
static int cur_option_selected;
static int cur_status;
#ifdef MENU_SELECT
/* menu based <select> */
FormSelectOption *select_option;
int max_select = MAX_SELECT;
static int n_select;
static int cur_option_maxwidth;
#endif /* MENU_SELECT */
static Str cur_textarea;
Str *textarea_str;
static int cur_textarea_size;
static int cur_textarea_rows;
static int cur_textarea_readonly;
static int n_textarea;
static int ignore_nl_textarea;
int max_textarea = MAX_TEXTAREA;
static int http_response_code;
#ifdef USE_M17N
static wc_ces content_charset = 0;
static wc_ces meta_charset = 0;
static char *check_charset(char *p);
static char *check_accept_charset(char *p);
#endif
#define set_prevchar(x,y,n) Strcopy_charp_n((x),(y),(n))
#define set_space_to_prevchar(x) Strcopy_charp_n((x)," ",1)
struct link_stack {
int cmd;
short offset;
short pos;
struct link_stack *next;
};
static struct link_stack *link_stack = NULL;
#define FORMSTACK_SIZE 10
#define FRAMESTACK_SIZE 10
#ifdef USE_NNTP
#define Str_news_endline(s) ((s)->ptr[0]=='.'&&((s)->ptr[1]=='\n'||(s)->ptr[1]=='\r'||(s)->ptr[1]=='\0'))
#endif /* USE_NNTP */
#define INITIAL_FORM_SIZE 10
static FormList **forms;
static int *form_stack;
static int form_max = -1;
static int forms_size = 0;
#define cur_form_id ((form_sp >= 0)? form_stack[form_sp] : -1)
static int form_sp = 0;
static clen_t current_content_length;
static int cur_hseq;
#ifdef USE_IMAGE
static int cur_iseq;
#endif
#define MAX_UL_LEVEL 9
#define UL_SYMBOL(x) (N_GRAPH_SYMBOL + (x))
#define UL_SYMBOL_DISC UL_SYMBOL(9)
#define UL_SYMBOL_CIRCLE UL_SYMBOL(10)
#define UL_SYMBOL_SQUARE UL_SYMBOL(11)
#define IMG_SYMBOL UL_SYMBOL(12)
#define HR_SYMBOL 26
#ifdef USE_COOKIE
/* This array should be somewhere else */
/* FIXME: gettextize? */
char *violations[COO_EMAX] = {
"internal error",
"tail match failed",
"wrong number of dots",
"RFC 2109 4.3.2 rule 1",
"RFC 2109 4.3.2 rule 2.1",
"RFC 2109 4.3.2 rule 2.2",
"RFC 2109 4.3.2 rule 3",
"RFC 2109 4.3.2 rule 4",
"RFC XXXX 4.3.2 rule 5"
};
#endif
/* *INDENT-OFF* */
static struct compression_decoder {
int type;
char *ext;
char *mime_type;
int auxbin_p;
char *cmd;
char *name;
char *encoding;
char *encodings[4];
} compression_decoders[] = {
{ CMP_COMPRESS, ".gz", "application/x-gzip",
0, GUNZIP_CMDNAME, GUNZIP_NAME, "gzip",
{"gzip", "x-gzip", NULL} },
{ CMP_COMPRESS, ".Z", "application/x-compress",
0, GUNZIP_CMDNAME, GUNZIP_NAME, "compress",
{"compress", "x-compress", NULL} },
{ CMP_BZIP2, ".bz2", "application/x-bzip",
0, BUNZIP2_CMDNAME, BUNZIP2_NAME, "bzip, bzip2",
{"x-bzip", "bzip", "bzip2", NULL} },
{ CMP_DEFLATE, ".deflate", "application/x-deflate",
1, INFLATE_CMDNAME, INFLATE_NAME, "deflate",
{"deflate", "x-deflate", NULL} },
{ CMP_NOCOMPRESS, NULL, NULL, 0, NULL, NULL, NULL, {NULL}},
};
/* *INDENT-ON* */
#define SAVE_BUF_SIZE 1536
static MySignalHandler
KeyAbort(SIGNAL_ARG)
{
LONGJMP(AbortLoading, 1);
SIGNAL_RETURN;
}
static void
UFhalfclose(URLFile *f)
{
switch (f->scheme) {
case SCM_FTP:
closeFTP();
break;
#ifdef USE_NNTP
case SCM_NEWS:
case SCM_NNTP:
closeNews();
break;
#endif
default:
UFclose(f);
break;
}
}
int
currentLn(Buffer *buf)
{
if (buf->currentLine)
/* return buf->currentLine->real_linenumber + 1; */
return buf->currentLine->linenumber + 1;
else
return 1;
}
static Buffer *
loadSomething(URLFile *f,
Buffer *(*loadproc) (URLFile *, Buffer *), Buffer *defaultbuf)
{
Buffer *buf;
if ((buf = loadproc(f, defaultbuf)) == NULL)
return NULL;
if (buf->buffername == NULL || buf->buffername[0] == '\0') {
buf->buffername = checkHeader(buf, "Subject:");
if (buf->buffername == NULL && buf->filename != NULL)
buf->buffername = conv_from_system(lastFileName(buf->filename));
}
if (buf->currentURL.scheme == SCM_UNKNOWN)
buf->currentURL.scheme = f->scheme;
if (f->scheme == SCM_LOCAL && buf->sourcefile == NULL)
buf->sourcefile = buf->filename;
if (loadproc == loadHTMLBuffer
#ifdef USE_IMAGE
|| loadproc == loadImageBuffer
#endif
)
buf->type = "text/html";
else
buf->type = "text/plain";
return buf;
}
int
dir_exist(char *path)
{
struct stat stbuf;
if (path == NULL || *path == '\0')
return 0;
if (stat(path, &stbuf) == -1)
return 0;
return IS_DIRECTORY(stbuf.st_mode);
}
static int
is_dump_text_type(char *type)
{
struct mailcap *mcap;
return (type && (mcap = searchExtViewer(type)) &&
(mcap->flags & (MAILCAP_HTMLOUTPUT | MAILCAP_COPIOUSOUTPUT)));
}
static int
is_text_type(char *type)
{
return (type == NULL || type[0] == '\0' ||
strncasecmp(type, "text/", 5) == 0 ||
(strncasecmp(type, "application/", 12) == 0 &&
strstr(type, "xhtml") != NULL) ||
strncasecmp(type, "message/", sizeof("message/") - 1) == 0);
}
static int
is_plain_text_type(char *type)
{
return ((type && strcasecmp(type, "text/plain") == 0) ||
(is_text_type(type) && !is_dump_text_type(type)));
}
int
is_html_type(char *type)
{
return (type && (strcasecmp(type, "text/html") == 0 ||
strcasecmp(type, "application/xhtml+xml") == 0));
}
static void
check_compression(char *path, URLFile *uf)
{
int len;
struct compression_decoder *d;
if (path == NULL)
return;
len = strlen(path);
uf->compression = CMP_NOCOMPRESS;
for (d = compression_decoders; d->type != CMP_NOCOMPRESS; d++) {
int elen;
if (d->ext == NULL)
continue;
elen = strlen(d->ext);
if (len > elen && strcasecmp(&path[len - elen], d->ext) == 0) {
uf->compression = d->type;
uf->guess_type = d->mime_type;
break;
}
}
}
static char *
compress_application_type(int compression)
{
struct compression_decoder *d;
for (d = compression_decoders; d->type != CMP_NOCOMPRESS; d++) {
if (d->type == compression)
return d->mime_type;
}
return NULL;
}
static char *
uncompressed_file_type(char *path, char **ext)
{
int len, slen;
Str fn;
char *t0;
struct compression_decoder *d;
if (path == NULL)
return NULL;
slen = 0;
len = strlen(path);
for (d = compression_decoders; d->type != CMP_NOCOMPRESS; d++) {
if (d->ext == NULL)
continue;
slen = strlen(d->ext);
if (len > slen && strcasecmp(&path[len - slen], d->ext) == 0)
break;
}
if (d->type == CMP_NOCOMPRESS)
return NULL;
fn = Strnew_charp(path);
Strshrink(fn, slen);
if (ext)
*ext = filename_extension(fn->ptr, 0);
t0 = guessContentType(fn->ptr);
if (t0 == NULL)
t0 = "text/plain";
return t0;
}
static int
setModtime(char *path, time_t modtime)
{
struct utimbuf t;
struct stat st;
if (stat(path, &st) == 0)
t.actime = st.st_atime;
else
t.actime = time(NULL);
t.modtime = modtime;
return utime(path, &t);
}
void
examineFile(char *path, URLFile *uf)
{
struct stat stbuf;
uf->guess_type = NULL;
if (path == NULL || *path == '\0' ||
stat(path, &stbuf) == -1 || NOT_REGULAR(stbuf.st_mode)) {
uf->stream = NULL;
return;
}
uf->stream = openIS(path);
if (!do_download) {
if (use_lessopen && getenv("LESSOPEN") != NULL) {
FILE *fp;
uf->guess_type = guessContentType(path);
if (uf->guess_type == NULL)
uf->guess_type = "text/plain";
if (is_html_type(uf->guess_type))
return;
if ((fp = lessopen_stream(path))) {
UFclose(uf);
uf->stream = newFileStream(fp, (void (*)())pclose);
uf->guess_type = "text/plain";
return;
}
}
check_compression(path, uf);
if (uf->compression != CMP_NOCOMPRESS) {
char *ext = uf->ext;
char *t0 = uncompressed_file_type(path, &ext);
uf->guess_type = t0;
uf->ext = ext;
uncompress_stream(uf, NULL);
return;
}
}
}
#define S_IXANY (S_IXUSR|S_IXGRP|S_IXOTH)
int
check_command(char *cmd, int auxbin_p)
{
static char *path = NULL;
Str dirs;
char *p, *np;
Str pathname;
struct stat st;
if (path == NULL)
path = getenv("PATH");
if (auxbin_p)
dirs = Strnew_charp(w3m_auxbin_dir());
else
dirs = Strnew_charp(path);
for (p = dirs->ptr; p != NULL; p = np) {
np = strchr(p, PATH_SEPARATOR);
if (np)
*np++ = '\0';
pathname = Strnew();
Strcat_charp(pathname, p);
Strcat_char(pathname, '/');
Strcat_charp(pathname, cmd);
if (stat(pathname->ptr, &st) == 0 && S_ISREG(st.st_mode)
&& (st.st_mode & S_IXANY) != 0)
return 1;
}
return 0;
}
char *
acceptableEncoding()
{
static Str encodings = NULL;
struct compression_decoder *d;
TextList *l;
char *p;
if (encodings != NULL)
return encodings->ptr;
l = newTextList();
for (d = compression_decoders; d->type != CMP_NOCOMPRESS; d++) {
if (check_command(d->cmd, d->auxbin_p)) {
pushText(l, d->encoding);
}
}
encodings = Strnew();
while ((p = popText(l)) != NULL) {
if (encodings->length)
Strcat_charp(encodings, ", ");
Strcat_charp(encodings, p);
}
return encodings->ptr;
}
/*
* convert line
*/
#ifdef USE_M17N
Str
convertLine(URLFile *uf, Str line, int mode, wc_ces * charset,
wc_ces doc_charset)
#else
Str
convertLine0(URLFile *uf, Str line, int mode)
#endif
{
#ifdef USE_M17N
line = wc_Str_conv_with_detect(line, charset, doc_charset, InnerCharset);
#endif
if (mode != RAW_MODE)
cleanup_line(line, mode);
#ifdef USE_NNTP
if (uf && uf->scheme == SCM_NEWS)
Strchop(line);
#endif /* USE_NNTP */
return line;
}
int
matchattr(char *p, char *attr, int len, Str *value)
{
int quoted;
char *q = NULL;
if (strncasecmp(p, attr, len) == 0) {
p += len;
SKIP_BLANKS(p);
if (value) {
*value = Strnew();
if (*p == '=') {
p++;
SKIP_BLANKS(p);
quoted = 0;
while (!IS_ENDL(*p) && (quoted || *p != ';')) {
if (!IS_SPACE(*p))
q = p;
if (*p == '"')
quoted = (quoted) ? 0 : 1;
else
Strcat_char(*value, *p);
p++;
}
if (q)
Strshrink(*value, p - q - 1);
}
return 1;
}
else {
if (IS_ENDT(*p)) {
return 1;
}
}
}
return 0;
}
#ifdef USE_IMAGE
#ifdef USE_XFACE
static char *
xface2xpm(char *xface)
{
Image image;
ImageCache *cache;
FILE *f;
struct stat st;
SKIP_BLANKS(xface);
image.url = xface;
image.ext = ".xpm";
image.width = 48;
image.height = 48;
image.cache = NULL;
cache = getImage(&image, NULL, IMG_FLAG_AUTO);
if (cache->loaded & IMG_FLAG_LOADED && !stat(cache->file, &st))
return cache->file;
cache->loaded = IMG_FLAG_ERROR;
f = popen(Sprintf("%s > %s", shell_quote(auxbinFile(XFACE2XPM)),
shell_quote(cache->file))->ptr, "w");
if (!f)
return NULL;
fputs(xface, f);
pclose(f);
if (stat(cache->file, &st) || !st.st_size)
return NULL;
cache->loaded = IMG_FLAG_LOADED | IMG_FLAG_DONT_REMOVE;
cache->index = 0;
return cache->file;
}
#endif
#endif
void
readHeader(URLFile *uf, Buffer *newBuf, int thru, ParsedURL *pu)
{
char *p, *q;
#ifdef USE_COOKIE
char *emsg;
#endif
char c;
Str lineBuf2 = NULL;
Str tmp;
TextList *headerlist;
#ifdef USE_M17N
wc_ces charset = WC_CES_US_ASCII, mime_charset;
#endif
char *tmpf;
FILE *src = NULL;
Lineprop *propBuffer;
headerlist = newBuf->document_header = newTextList();
if (uf->scheme == SCM_HTTP
#ifdef USE_SSL
|| uf->scheme == SCM_HTTPS
#endif /* USE_SSL */
)
http_response_code = -1;
else
http_response_code = 0;
if (thru && !newBuf->header_source
#ifdef USE_IMAGE
&& !image_source
#endif
) {
tmpf = tmpfname(TMPF_DFL, NULL)->ptr;
src = fopen(tmpf, "w");
if (src)
newBuf->header_source = tmpf;
}
while ((tmp = StrmyUFgets(uf))->length) {
#ifdef USE_NNTP
if (uf->scheme == SCM_NEWS && tmp->ptr[0] == '.')
Strshrinkfirst(tmp, 1);
#endif
if(w3m_reqlog){
FILE *ff;
ff = fopen(w3m_reqlog, "a");
Strfputs(tmp, ff);
fclose(ff);
}
if (src)
Strfputs(tmp, src);
cleanup_line(tmp, HEADER_MODE);
if (tmp->ptr[0] == '\n' || tmp->ptr[0] == '\r' || tmp->ptr[0] == '\0') {
if (!lineBuf2)
/* there is no header */
break;
/* last header */
}
else if (!(w3m_dump & DUMP_HEAD)) {
if (lineBuf2) {
Strcat(lineBuf2, tmp);
}
else {
lineBuf2 = tmp;
}
c = UFgetc(uf);
UFundogetc(uf);
if (c == ' ' || c == '\t')
/* header line is continued */
continue;
lineBuf2 = decodeMIME(lineBuf2, &mime_charset);
lineBuf2 = convertLine(NULL, lineBuf2, RAW_MODE,
mime_charset ? &mime_charset : &charset,
mime_charset ? mime_charset
: DocumentCharset);
/* separated with line and stored */
tmp = Strnew_size(lineBuf2->length);
for (p = lineBuf2->ptr; *p; p = q) {
for (q = p; *q && *q != '\r' && *q != '\n'; q++) ;
lineBuf2 = checkType(Strnew_charp_n(p, q - p), &propBuffer,
NULL);
Strcat(tmp, lineBuf2);
if (thru)
addnewline(newBuf, lineBuf2->ptr, propBuffer, NULL,
lineBuf2->length, FOLD_BUFFER_WIDTH, -1);
for (; *q && (*q == '\r' || *q == '\n'); q++) ;
}
#ifdef USE_IMAGE
if (thru && activeImage && displayImage) {
Str src = NULL;
if (!strncasecmp(tmp->ptr, "X-Image-URL:", 12)) {
tmpf = &tmp->ptr[12];
SKIP_BLANKS(tmpf);
src = Strnew_m_charp("<img src=\"", html_quote(tmpf),
"\" alt=\"X-Image-URL\">", NULL);
}
#ifdef USE_XFACE
else if (!strncasecmp(tmp->ptr, "X-Face:", 7)) {
tmpf = xface2xpm(&tmp->ptr[7]);
if (tmpf)
src = Strnew_m_charp("<img src=\"file:",
html_quote(tmpf),
"\" alt=\"X-Face\"",
" width=48 height=48>", NULL);
}
#endif
if (src) {
URLFile f;
Line *l;
#ifdef USE_M17N
wc_ces old_charset = newBuf->document_charset;
#endif
init_stream(&f, SCM_LOCAL, newStrStream(src));
loadHTMLstream(&f, newBuf, NULL, TRUE);
UFclose(&f);
for (l = newBuf->lastLine; l && l->real_linenumber;
l = l->prev)
l->real_linenumber = 0;
#ifdef USE_M17N
newBuf->document_charset = old_charset;
#endif
}
}
#endif
lineBuf2 = tmp;
}
else {
lineBuf2 = tmp;
}
if ((uf->scheme == SCM_HTTP
#ifdef USE_SSL
|| uf->scheme == SCM_HTTPS
#endif /* USE_SSL */
) && http_response_code == -1) {
p = lineBuf2->ptr;
while (*p && !IS_SPACE(*p))
p++;
while (*p && IS_SPACE(*p))
p++;
http_response_code = atoi(p);
if (fmInitialized) {
message(lineBuf2->ptr, 0, 0);
refresh();
}
}
if (!strncasecmp(lineBuf2->ptr, "content-transfer-encoding:", 26)) {
p = lineBuf2->ptr + 26;
while (IS_SPACE(*p))
p++;
if (!strncasecmp(p, "base64", 6))
uf->encoding = ENC_BASE64;
else if (!strncasecmp(p, "quoted-printable", 16))
uf->encoding = ENC_QUOTE;
else if (!strncasecmp(p, "uuencode", 8) ||
!strncasecmp(p, "x-uuencode", 10))
uf->encoding = ENC_UUENCODE;
else
uf->encoding = ENC_7BIT;
}
else if (!strncasecmp(lineBuf2->ptr, "content-encoding:", 17)) {
struct compression_decoder *d;
p = lineBuf2->ptr + 17;
while (IS_SPACE(*p))
p++;
uf->compression = CMP_NOCOMPRESS;
for (d = compression_decoders; d->type != CMP_NOCOMPRESS; d++) {
char **e;
for (e = d->encodings; *e != NULL; e++) {
if (strncasecmp(p, *e, strlen(*e)) == 0) {
uf->compression = d->type;
break;
}
}
if (uf->compression != CMP_NOCOMPRESS)
break;
}
uf->content_encoding = uf->compression;
}
#ifdef USE_COOKIE
else if (use_cookie && accept_cookie &&
pu && check_cookie_accept_domain(pu->host) &&
(!strncasecmp(lineBuf2->ptr, "Set-Cookie:", 11) ||
!strncasecmp(lineBuf2->ptr, "Set-Cookie2:", 12))) {
Str name = Strnew(), value = Strnew(), domain = NULL, path = NULL,
comment = NULL, commentURL = NULL, port = NULL, tmp2;
int version, quoted, flag = 0;
time_t expires = (time_t) - 1;
q = NULL;
if (lineBuf2->ptr[10] == '2') {
p = lineBuf2->ptr + 12;
version = 1;
}
else {
p = lineBuf2->ptr + 11;
version = 0;
}
#ifdef DEBUG
fprintf(stderr, "Set-Cookie: [%s]\n", p);
#endif /* DEBUG */
SKIP_BLANKS(p);
while (*p != '=' && !IS_ENDT(*p))
Strcat_char(name, *(p++));
Strremovetrailingspaces(name);
if (*p == '=') {
p++;
SKIP_BLANKS(p);
quoted = 0;
while (!IS_ENDL(*p) && (quoted || *p != ';')) {
if (!IS_SPACE(*p))
q = p;
if (*p == '"')
quoted = (quoted) ? 0 : 1;
Strcat_char(value, *(p++));
}
if (q)
Strshrink(value, p - q - 1);
}
while (*p == ';') {
p++;
SKIP_BLANKS(p);
if (matchattr(p, "expires", 7, &tmp2)) {
/* version 0 */
expires = mymktime(tmp2->ptr);
}
else if (matchattr(p, "max-age", 7, &tmp2)) {
/* XXX Is there any problem with max-age=0? (RFC 2109 ss. 4.2.1, 4.2.2 */
expires = time(NULL) + atol(tmp2->ptr);
}
else if (matchattr(p, "domain", 6, &tmp2)) {
domain = tmp2;
}
else if (matchattr(p, "path", 4, &tmp2)) {
path = tmp2;
}
else if (matchattr(p, "secure", 6, NULL)) {
flag |= COO_SECURE;
}
else if (matchattr(p, "comment", 7, &tmp2)) {
comment = tmp2;
}
else if (matchattr(p, "version", 7, &tmp2)) {
version = atoi(tmp2->ptr);
}
else if (matchattr(p, "port", 4, &tmp2)) {
/* version 1, Set-Cookie2 */
port = tmp2;
}
else if (matchattr(p, "commentURL", 10, &tmp2)) {
/* version 1, Set-Cookie2 */
commentURL = tmp2;
}
else if (matchattr(p, "discard", 7, NULL)) {
/* version 1, Set-Cookie2 */
flag |= COO_DISCARD;
}
quoted = 0;
while (!IS_ENDL(*p) && (quoted || *p != ';')) {
if (*p == '"')
quoted = (quoted) ? 0 : 1;
p++;
}
}
if (pu && name->length > 0) {
int err;
if (show_cookie) {
if (flag & COO_SECURE)
disp_message_nsec("Received a secured cookie", FALSE, 1,
TRUE, FALSE);
else
disp_message_nsec(Sprintf("Received cookie: %s=%s",
name->ptr, value->ptr)->ptr,
FALSE, 1, TRUE, FALSE);
}
err =
add_cookie(pu, name, value, expires, domain, path, flag,
comment, version, port, commentURL);
if (err) {
char *ans = (accept_bad_cookie == ACCEPT_BAD_COOKIE_ACCEPT)
? "y" : NULL;
if (fmInitialized && (err & COO_OVERRIDE_OK) &&
accept_bad_cookie == ACCEPT_BAD_COOKIE_ASK) {
Str msg = Sprintf("Accept bad cookie from %s for %s?",
pu->host,
((domain && domain->ptr)
? domain->ptr : "<localdomain>"));
if (msg->length > COLS - 10)
Strshrink(msg, msg->length - (COLS - 10));
Strcat_charp(msg, " (y/n)");
ans = inputAnswer(msg->ptr);
}
if (ans == NULL || TOLOWER(*ans) != 'y' ||
(err =
add_cookie(pu, name, value, expires, domain, path,
flag | COO_OVERRIDE, comment, version,
port, commentURL))) {
err = (err & ~COO_OVERRIDE_OK) - 1;
if (err >= 0 && err < COO_EMAX)
emsg = Sprintf("This cookie was rejected "
"to prevent security violation. [%s]",
violations[err])->ptr;
else
emsg =
"This cookie was rejected to prevent security violation.";
record_err_message(emsg);
if (show_cookie)
disp_message_nsec(emsg, FALSE, 1, TRUE, FALSE);
}
else
if (show_cookie)
disp_message_nsec(Sprintf
("Accepting invalid cookie: %s=%s",
name->ptr, value->ptr)->ptr, FALSE,
1, TRUE, FALSE);
}
}
}
#endif /* USE_COOKIE */
else if (!strncasecmp(lineBuf2->ptr, "w3m-control:", 12) &&
uf->scheme == SCM_LOCAL_CGI) {
Str funcname = Strnew();
int f;
p = lineBuf2->ptr + 12;
SKIP_BLANKS(p);
while (*p && !IS_SPACE(*p))
Strcat_char(funcname, *(p++));
SKIP_BLANKS(p);
f = getFuncList(funcname->ptr);
if (f >= 0) {
tmp = Strnew_charp(p);
Strchop(tmp);
pushEvent(f, tmp->ptr);
}
}
if (headerlist)
pushText(headerlist, lineBuf2->ptr);
Strfree(lineBuf2);
lineBuf2 = NULL;
}
if (thru)
addnewline(newBuf, "", propBuffer, NULL, 0, -1, -1);
if (src)
fclose(src);
}
char *
checkHeader(Buffer *buf, char *field)
{
int len;
TextListItem *i;
char *p;
if (buf == NULL || field == NULL || buf->document_header == NULL)
return NULL;
len = strlen(field);
for (i = buf->document_header->first; i != NULL; i = i->next) {
if (!strncasecmp(i->ptr, field, len)) {
p = i->ptr + len;
return remove_space(p);
}
}
return NULL;
}
char *
checkContentType(Buffer *buf)
{
char *p;
Str r;
p = checkHeader(buf, "Content-Type:");
if (p == NULL)
return NULL;
r = Strnew();
while (*p && *p != ';' && !IS_SPACE(*p))
Strcat_char(r, *p++);
#ifdef USE_M17N
if ((p = strcasestr(p, "charset")) != NULL) {
p += 7;
SKIP_BLANKS(p);
if (*p == '=') {
p++;
SKIP_BLANKS(p);
if (*p == '"')
p++;
content_charset = wc_guess_charset(p, 0);
}
}
#endif
return r->ptr;
}
struct auth_param {
char *name;
Str val;
};
struct http_auth {
int pri;
char *scheme;
struct auth_param *param;
Str (*cred) (struct http_auth * ha, Str uname, Str pw, ParsedURL *pu,
HRequest *hr, FormList *request);
};
enum {
AUTHCHR_NUL,
AUTHCHR_SEP,
AUTHCHR_TOKEN,
};
static int
skip_auth_token(char **pp)
{
char *p;
int first = AUTHCHR_NUL, typ;
for (p = *pp ;; ++p) {
switch (*p) {
case '\0':
goto endoftoken;
default:
if ((unsigned char)*p > 037) {
typ = AUTHCHR_TOKEN;
break;
}
/* thru */
case '\177':
case '[':
case ']':
case '(':
case ')':
case '<':
case '>':
case '@':
case ';':
case ':':
case '\\':
case '"':
case '/':
case '?':
case '=':
case ' ':
case '\t':
case ',':
typ = AUTHCHR_SEP;
break;
}
if (!first)
first = typ;
else if (first != typ)
break;
}
endoftoken:
*pp = p;
return first;
}
static Str
extract_auth_val(char **q)
{
unsigned char *qq = *(unsigned char **)q;
int quoted = 0;
Str val = Strnew();
SKIP_BLANKS(qq);
if (*qq == '"') {
quoted = TRUE;
Strcat_char(val, *qq++);
}
while (*qq != '\0') {
if (quoted && *qq == '"') {
Strcat_char(val, *qq++);
break;
}
if (!quoted) {
switch (*qq) {
case '[':
case ']':
case '(':
case ')':
case '<':
case '>':
case '@':
case ';':
case ':':
case '\\':
case '"':
case '/':
case '?':
case '=':
case ' ':
case '\t':
qq++;
case ',':
goto end_token;
default:
if (*qq <= 037 || *qq == 0177) {
qq++;
goto end_token;
}
}
}
else if (quoted && *qq == '\\')
Strcat_char(val, *qq++);
Strcat_char(val, *qq++);
}
end_token:
*q = (char *)qq;
return val;
}
static Str
qstr_unquote(Str s)
{
char *p;
if (s == NULL)
return NULL;
p = s->ptr;
if (*p == '"') {
Str tmp = Strnew();
for (p++; *p != '\0'; p++) {
if (*p == '\\')
p++;
Strcat_char(tmp, *p);
}
if (Strlastchar(tmp) == '"')
Strshrink(tmp, 1);
return tmp;
}
else
return s;
}
static char *
extract_auth_param(char *q, struct auth_param *auth)
{
struct auth_param *ap;
char *p;
for (ap = auth; ap->name != NULL; ap++) {
ap->val = NULL;
}
while (*q != '\0') {
SKIP_BLANKS(q);
for (ap = auth; ap->name != NULL; ap++) {
size_t len;
len = strlen(ap->name);
if (strncasecmp(q, ap->name, len) == 0 &&
(IS_SPACE(q[len]) || q[len] == '=')) {
p = q + len;
SKIP_BLANKS(p);
if (*p != '=')
return q;
q = p + 1;
ap->val = extract_auth_val(&q);
break;
}
}
if (ap->name == NULL) {
/* skip unknown param */
int token_type;
p = q;
if ((token_type = skip_auth_token(&q)) == AUTHCHR_TOKEN &&
(IS_SPACE(*q) || *q == '=')) {
SKIP_BLANKS(q);
if (*q != '=')
return p;
q++;
extract_auth_val(&q);
}
else
return p;
}
if (*q != '\0') {
SKIP_BLANKS(q);
if (*q == ',')
q++;
else
break;
}
}
return q;
}
static Str
get_auth_param(struct auth_param *auth, char *name)
{
struct auth_param *ap;
for (ap = auth; ap->name != NULL; ap++) {
if (strcasecmp(name, ap->name) == 0)
return ap->val;
}
return NULL;
}
static Str
AuthBasicCred(struct http_auth *ha, Str uname, Str pw, ParsedURL *pu,
HRequest *hr, FormList *request)
{
Str s = Strdup(uname);
Strcat_char(s, ':');
Strcat(s, pw);
return Strnew_m_charp("Basic ", encodeB(s->ptr)->ptr, NULL);
}
#ifdef USE_DIGEST_AUTH
#include <openssl/md5.h>
/* RFC2617: 3.2.2 The Authorization Request Header
*
* credentials = "Digest" digest-response
* digest-response = 1#( username | realm | nonce | digest-uri
* | response | [ algorithm ] | [cnonce] |
* [opaque] | [message-qop] |
* [nonce-count] | [auth-param] )
*
* username = "username" "=" username-value
* username-value = quoted-string
* digest-uri = "uri" "=" digest-uri-value
* digest-uri-value = request-uri ; As specified by HTTP/1.1
* message-qop = "qop" "=" qop-value
* cnonce = "cnonce" "=" cnonce-value
* cnonce-value = nonce-value
* nonce-count = "nc" "=" nc-value
* nc-value = 8LHEX
* response = "response" "=" request-digest
* request-digest = <"> 32LHEX <">
* LHEX = "0" | "1" | "2" | "3" |
* "4" | "5" | "6" | "7" |
* "8" | "9" | "a" | "b" |
* "c" | "d" | "e" | "f"
*/
static Str
digest_hex(unsigned char *p)
{
char *h = "0123456789abcdef";
Str tmp = Strnew_size(MD5_DIGEST_LENGTH * 2 + 1);
int i;
for (i = 0; i < MD5_DIGEST_LENGTH; i++, p++) {
Strcat_char(tmp, h[(*p >> 4) & 0x0f]);
Strcat_char(tmp, h[*p & 0x0f]);
}
return tmp;
}
enum {
QOP_NONE,
QOP_AUTH,
QOP_AUTH_INT,
};
static Str
AuthDigestCred(struct http_auth *ha, Str uname, Str pw, ParsedURL *pu,
HRequest *hr, FormList *request)
{
Str tmp, a1buf, a2buf, rd, s;
unsigned char md5[MD5_DIGEST_LENGTH + 1];
Str uri = HTTPrequestURI(pu, hr);
char nc[] = "00000001";
FILE *fp;
Str algorithm = qstr_unquote(get_auth_param(ha->param, "algorithm"));
Str nonce = qstr_unquote(get_auth_param(ha->param, "nonce"));
Str cnonce /* = qstr_unquote(get_auth_param(ha->param, "cnonce")) */;
/* cnonce is what client should generate. */
Str qop = qstr_unquote(get_auth_param(ha->param, "qop"));
static union {
int r[4];
unsigned char s[sizeof(int) * 4];
} cnonce_seed;
int qop_i = QOP_NONE;
cnonce_seed.r[0] = rand();
cnonce_seed.r[1] = rand();
cnonce_seed.r[2] = rand();
MD5(cnonce_seed.s, sizeof(cnonce_seed.s), md5);
cnonce = digest_hex(md5);
cnonce_seed.r[3]++;
if (qop) {
char *p;
size_t i;
p = qop->ptr;
SKIP_BLANKS(p);
for (;;) {
if ((i = strcspn(p, " \t,")) > 0) {
if (i == sizeof("auth-int") - sizeof("") && !strncasecmp(p, "auth-int", i)) {
if (qop_i < QOP_AUTH_INT)
qop_i = QOP_AUTH_INT;
}
else if (i == sizeof("auth") - sizeof("") && !strncasecmp(p, "auth", i)) {
if (qop_i < QOP_AUTH)
qop_i = QOP_AUTH;
}
}
if (p[i]) {
p += i + 1;
SKIP_BLANKS(p);
}
else
break;
}
}
/* A1 = unq(username-value) ":" unq(realm-value) ":" passwd */
tmp = Strnew_m_charp(uname->ptr, ":",
qstr_unquote(get_auth_param(ha->param, "realm"))->ptr,
":", pw->ptr, NULL);
MD5(tmp->ptr, strlen(tmp->ptr), md5);
a1buf = digest_hex(md5);
if (algorithm) {
if (strcasecmp(algorithm->ptr, "MD5-sess") == 0) {
/* A1 = H(unq(username-value) ":" unq(realm-value) ":" passwd)
* ":" unq(nonce-value) ":" unq(cnonce-value)
*/
if (nonce == NULL)
return NULL;
tmp = Strnew_m_charp(a1buf->ptr, ":",
qstr_unquote(nonce)->ptr,
":", qstr_unquote(cnonce)->ptr, NULL);
MD5(tmp->ptr, strlen(tmp->ptr), md5);
a1buf = digest_hex(md5);
}
else if (strcasecmp(algorithm->ptr, "MD5") == 0)
/* ok default */
;
else
/* unknown algorithm */
return NULL;
}
/* A2 = Method ":" digest-uri-value */
tmp = Strnew_m_charp(HTTPrequestMethod(hr)->ptr, ":", uri->ptr, NULL);
if (qop_i == QOP_AUTH_INT) {
/* A2 = Method ":" digest-uri-value ":" H(entity-body) */
if (request && request->body) {
if (request->method == FORM_METHOD_POST && request->enctype == FORM_ENCTYPE_MULTIPART) {
fp = fopen(request->body, "r");
if (fp != NULL) {
Str ebody;
ebody = Strfgetall(fp);
fclose(fp);
MD5(ebody->ptr, strlen(ebody->ptr), md5);
}
else {
MD5("", 0, md5);
}
}
else {
MD5(request->body, request->length, md5);
}
}
else {
MD5("", 0, md5);
}
Strcat_char(tmp, ':');
Strcat(tmp, digest_hex(md5));
}
MD5(tmp->ptr, strlen(tmp->ptr), md5);
a2buf = digest_hex(md5);
if (qop_i >= QOP_AUTH) {
/* request-digest = <"> < KD ( H(A1), unq(nonce-value)
* ":" nc-value
* ":" unq(cnonce-value)
* ":" unq(qop-value)
* ":" H(A2)
* ) <">
*/
if (nonce == NULL)
return NULL;
tmp = Strnew_m_charp(a1buf->ptr, ":", qstr_unquote(nonce)->ptr,
":", nc,
":", qstr_unquote(cnonce)->ptr,
":", qop_i == QOP_AUTH ? "auth" : "auth-int",
":", a2buf->ptr, NULL);
MD5(tmp->ptr, strlen(tmp->ptr), md5);
rd = digest_hex(md5);
}
else {
/* compatibility with RFC 2069
* request_digest = KD(H(A1), unq(nonce), H(A2))
*/
tmp = Strnew_m_charp(a1buf->ptr, ":",
qstr_unquote(get_auth_param(ha->param, "nonce"))->
ptr, ":", a2buf->ptr, NULL);
MD5(tmp->ptr, strlen(tmp->ptr), md5);
rd = digest_hex(md5);
}
/*
* digest-response = 1#( username | realm | nonce | digest-uri
* | response | [ algorithm ] | [cnonce] |
* [opaque] | [message-qop] |
* [nonce-count] | [auth-param] )
*/
tmp = Strnew_m_charp("Digest username=\"", uname->ptr, "\"", NULL);
Strcat_m_charp(tmp, ", realm=",
get_auth_param(ha->param, "realm")->ptr, NULL);
Strcat_m_charp(tmp, ", nonce=",
get_auth_param(ha->param, "nonce")->ptr, NULL);
Strcat_m_charp(tmp, ", uri=\"", uri->ptr, "\"", NULL);
Strcat_m_charp(tmp, ", response=\"", rd->ptr, "\"", NULL);
if (algorithm)
Strcat_m_charp(tmp, ", algorithm=",
get_auth_param(ha->param, "algorithm")->ptr, NULL);
if (cnonce)
Strcat_m_charp(tmp, ", cnonce=\"", cnonce->ptr, "\"", NULL);
if ((s = get_auth_param(ha->param, "opaque")) != NULL)
Strcat_m_charp(tmp, ", opaque=", s->ptr, NULL);
if (qop_i >= QOP_AUTH) {
Strcat_m_charp(tmp, ", qop=",
qop_i == QOP_AUTH ? "auth" : "auth-int",
NULL);
/* XXX how to count? */
/* Since nonce is unique up to each *-Authenticate and w3m does not re-use *-Authenticate: headers,
nonce-count should be always "00000001". */
Strcat_m_charp(tmp, ", nc=", nc, NULL);
}
return tmp;
}
#endif
/* *INDENT-OFF* */
struct auth_param none_auth_param[] = {
{NULL, NULL}
};
struct auth_param basic_auth_param[] = {
{"realm", NULL},
{NULL, NULL}
};
#ifdef USE_DIGEST_AUTH
/* RFC2617: 3.2.1 The WWW-Authenticate Response Header
* challenge = "Digest" digest-challenge
*
* digest-challenge = 1#( realm | [ domain ] | nonce |
* [ opaque ] |[ stale ] | [ algorithm ] |
* [ qop-options ] | [auth-param] )
*
* domain = "domain" "=" <"> URI ( 1*SP URI ) <">
* URI = absoluteURI | abs_path
* nonce = "nonce" "=" nonce-value
* nonce-value = quoted-string
* opaque = "opaque" "=" quoted-string
* stale = "stale" "=" ( "true" | "false" )
* algorithm = "algorithm" "=" ( "MD5" | "MD5-sess" |
* token )
* qop-options = "qop" "=" <"> 1#qop-value <">
* qop-value = "auth" | "auth-int" | token
*/
struct auth_param digest_auth_param[] = {
{"realm", NULL},
{"domain", NULL},
{"nonce", NULL},
{"opaque", NULL},
{"stale", NULL},
{"algorithm", NULL},
{"qop", NULL},
{NULL, NULL}
};
#endif
/* for RFC2617: HTTP Authentication */
struct http_auth www_auth[] = {
{ 1, "Basic ", basic_auth_param, AuthBasicCred },
#ifdef USE_DIGEST_AUTH
{ 10, "Digest ", digest_auth_param, AuthDigestCred },
#endif
{ 0, NULL, NULL, NULL,}
};
/* *INDENT-ON* */
static struct http_auth *
findAuthentication(struct http_auth *hauth, Buffer *buf, char *auth_field)
{
struct http_auth *ha;
int len = strlen(auth_field), slen;
TextListItem *i;
char *p0, *p;
bzero(hauth, sizeof(struct http_auth));
for (i = buf->document_header->first; i != NULL; i = i->next) {
if (strncasecmp(i->ptr, auth_field, len) == 0) {
for (p = i->ptr + len; p != NULL && *p != '\0';) {
SKIP_BLANKS(p);
p0 = p;
for (ha = &www_auth[0]; ha->scheme != NULL; ha++) {
slen = strlen(ha->scheme);
if (strncasecmp(p, ha->scheme, slen) == 0) {
p += slen;
SKIP_BLANKS(p);
if (hauth->pri < ha->pri) {
*hauth = *ha;
p = extract_auth_param(p, hauth->param);
break;
}
else {
/* weak auth */
p = extract_auth_param(p, none_auth_param);
}
}
}
if (p0 == p) {
/* all unknown auth failed */
int token_type;
if ((token_type = skip_auth_token(&p)) == AUTHCHR_TOKEN && IS_SPACE(*p)) {
SKIP_BLANKS(p);
p = extract_auth_param(p, none_auth_param);
}
else
break;
}
}
}
}
return hauth->scheme ? hauth : NULL;
}
static void
getAuthCookie(struct http_auth *hauth, char *auth_header,
TextList *extra_header, ParsedURL *pu, HRequest *hr,
FormList *request,
volatile Str *uname, volatile Str *pwd)
{
Str ss = NULL;
Str tmp;
TextListItem *i;
int a_found;
int auth_header_len = strlen(auth_header);
char *realm = NULL;
int proxy;
if (hauth)
realm = qstr_unquote(get_auth_param(hauth->param, "realm"))->ptr;
if (!realm)
return;
a_found = FALSE;
for (i = extra_header->first; i != NULL; i = i->next) {
if (!strncasecmp(i->ptr, auth_header, auth_header_len)) {
a_found = TRUE;
break;
}
}
proxy = !strncasecmp("Proxy-Authorization:", auth_header,
auth_header_len);
if (a_found) {
/* This means that *-Authenticate: header is received after
* Authorization: header is sent to the server.
*/
if (fmInitialized) {
message("Wrong username or password", 0, 0);
refresh();
}
else
fprintf(stderr, "Wrong username or password\n");
sleep(1);
/* delete Authenticate: header from extra_header */
delText(extra_header, i);
invalidate_auth_user_passwd(pu, realm, *uname, *pwd, proxy);
}
*uname = NULL;
*pwd = NULL;
if (!a_found && find_auth_user_passwd(pu, realm, (Str*)uname, (Str*)pwd,
proxy)) {
/* found username & password in passwd file */ ;
}
else {
if (QuietMessage)
return;
/* input username and password */
sleep(2);
if (fmInitialized) {
char *pp;
term_raw();
/* FIXME: gettextize? */
if ((pp = inputStr(Sprintf("Username for %s: ", realm)->ptr,
NULL)) == NULL)
return;
*uname = Str_conv_to_system(Strnew_charp(pp));
if ((pp = inputLine(Sprintf("Password for %s: ", realm)->ptr, NULL,
IN_PASSWORD)) == NULL) {
*uname = NULL;
return;
}
*pwd = Str_conv_to_system(Strnew_charp(pp));
term_cbreak();
}
else {
/*
* If post file is specified as '-', stdin is closed at this
* point.
* In this case, w3m cannot read username from stdin.
* So exit with error message.
* (This is same behavior as lwp-request.)
*/
if (feof(stdin) || ferror(stdin)) {
/* FIXME: gettextize? */
fprintf(stderr, "w3m: Authorization required for %s\n",
realm);
exit(1);
}
/* FIXME: gettextize? */
printf(proxy ? "Proxy Username for %s: " : "Username for %s: ",
realm);
fflush(stdout);
*uname = Strfgets(stdin);
Strchop(*uname);
#ifdef HAVE_GETPASSPHRASE
*pwd = Strnew_charp((char *)
getpassphrase(proxy ? "Proxy Password: " :
"Password: "));
#else
#ifndef __MINGW32_VERSION
*pwd = Strnew_charp((char *)
getpass(proxy ? "Proxy Password: " :
"Password: "));
#else
term_raw();
*pwd = Strnew_charp((char *)
inputLine(proxy ? "Proxy Password: " :
"Password: ", NULL, IN_PASSWORD));
term_cbreak();
#endif /* __MINGW32_VERSION */
#endif
}
}
ss = hauth->cred(hauth, *uname, *pwd, pu, hr, request);
if (ss) {
tmp = Strnew_charp(auth_header);
Strcat_m_charp(tmp, " ", ss->ptr, "\r\n", NULL);
pushText(extra_header, tmp->ptr);
}
else {
*uname = NULL;
*pwd = NULL;
}
return;
}
static int
same_url_p(ParsedURL *pu1, ParsedURL *pu2)
{
return (pu1->scheme == pu2->scheme && pu1->port == pu2->port &&
(pu1->host ? pu2->host ? !strcasecmp(pu1->host, pu2->host) : 0 : 1)
&& (pu1->file ? pu2->
file ? !strcmp(pu1->file, pu2->file) : 0 : 1));
}
static int
checkRedirection(ParsedURL *pu)
{
static ParsedURL *puv = NULL;
static int nredir = 0;
static int nredir_size = 0;
Str tmp;
if (pu == NULL) {
nredir = 0;
nredir_size = 0;
puv = NULL;
return TRUE;
}
if (nredir >= FollowRedirection) {
/* FIXME: gettextize? */
tmp = Sprintf("Number of redirections exceeded %d at %s",
FollowRedirection, parsedURL2Str(pu)->ptr);
disp_err_message(tmp->ptr, FALSE);
return FALSE;
}
else if (nredir_size > 0 &&
(same_url_p(pu, &puv[(nredir - 1) % nredir_size]) ||
(!(nredir % 2)
&& same_url_p(pu, &puv[(nredir / 2) % nredir_size])))) {
/* FIXME: gettextize? */
tmp = Sprintf("Redirection loop detected (%s)",
parsedURL2Str(pu)->ptr);
disp_err_message(tmp->ptr, FALSE);
return FALSE;
}
if (!puv) {
nredir_size = FollowRedirection / 2 + 1;
puv = New_N(ParsedURL, nredir_size);
memset(puv, 0, sizeof(ParsedURL) * nredir_size);
}
copyParsedURL(&puv[nredir % nredir_size], pu);
nredir++;
return TRUE;
}
Str
getLinkNumberStr(int correction)
{
return Sprintf("[%d]", cur_hseq + correction);
}
/*
* loadGeneralFile: load file to buffer
*/
#define DO_EXTERNAL ((Buffer *(*)(URLFile *, Buffer *))doExternal)
Buffer *
loadGeneralFile(char *path, ParsedURL *volatile current, char *referer,
int flag, FormList *volatile request)
{
URLFile f, *volatile of = NULL;
ParsedURL pu;
Buffer *b = NULL;
Buffer *(*volatile proc)(URLFile *, Buffer *) = loadBuffer;
char *volatile tpath;
char *volatile t = "text/plain", *p, *volatile real_type = NULL;
Buffer *volatile t_buf = NULL;
int volatile searchHeader = SearchHeader;
int volatile searchHeader_through = TRUE;
MySignalHandler(*volatile prevtrap) (SIGNAL_ARG) = NULL;
TextList *extra_header = newTextList();
volatile Str uname = NULL;
volatile Str pwd = NULL;
volatile Str realm = NULL;
int volatile add_auth_cookie_flag;
unsigned char status = HTST_NORMAL;
URLOption url_option;
Str tmp;
Str volatile page = NULL;
#ifdef USE_M17N
wc_ces charset = WC_CES_US_ASCII;
#endif
HRequest hr;
ParsedURL *volatile auth_pu;
tpath = path;
prevtrap = NULL;
add_auth_cookie_flag = 0;
checkRedirection(NULL);
load_doc:
{
const char *sc_redirect;
parseURL2(tpath, &pu, current);
sc_redirect = query_SCONF_SUBSTITUTE_URL(&pu);
if (sc_redirect && *sc_redirect && checkRedirection(&pu)) {
tpath = (char *)sc_redirect;
request = NULL;
add_auth_cookie_flag = 0;
current = New(ParsedURL);
*current = pu;
status = HTST_NORMAL;
goto load_doc;
}
}
TRAP_OFF;
url_option.referer = referer;
url_option.flag = flag;
f = openURL(tpath, &pu, current, &url_option, request, extra_header, of,
&hr, &status);
of = NULL;
#ifdef USE_M17N
content_charset = 0;
#endif
if (f.stream == NULL) {
switch (f.scheme) {
case SCM_LOCAL:
{
struct stat st;
if (stat(pu.real_file, &st) < 0)
return NULL;
if (S_ISDIR(st.st_mode)) {
if (UseExternalDirBuffer) {
Str cmd = Sprintf("%s?dir=%s#current",
DirBufferCommand, pu.file);
b = loadGeneralFile(cmd->ptr, NULL, NO_REFERER, 0,
NULL);
if (b != NULL && b != NO_BUFFER) {
copyParsedURL(&b->currentURL, &pu);
b->filename = b->currentURL.real_file;
}
return b;
}
else {
page = loadLocalDir(pu.real_file);
t = "local:directory";
#ifdef USE_M17N
charset = SystemCharset;
#endif
}
}
}
break;
case SCM_FTPDIR:
page = loadFTPDir(&pu, &charset);
t = "ftp:directory";
break;
#ifdef USE_NNTP
case SCM_NEWS_GROUP:
page = loadNewsgroup(&pu, &charset);
t = "news:group";
break;
#endif
case SCM_UNKNOWN:
#ifdef USE_EXTERNAL_URI_LOADER
tmp = searchURIMethods(&pu);
if (tmp != NULL) {
b = loadGeneralFile(tmp->ptr, current, referer, flag, request);
if (b != NULL && b != NO_BUFFER)
copyParsedURL(&b->currentURL, &pu);
return b;
}
#endif
/* FIXME: gettextize? */
disp_err_message(Sprintf("Unknown URI: %s",
parsedURL2Str(&pu)->ptr)->ptr, FALSE);
break;
}
if (page && page->length > 0)
goto page_loaded;
return NULL;
}
if (status == HTST_MISSING) {
TRAP_OFF;
UFclose(&f);
return NULL;
}
/* openURL() succeeded */
if (SETJMP(AbortLoading) != 0) {
/* transfer interrupted */
TRAP_OFF;
if (b)
discardBuffer(b);
UFclose(&f);
return NULL;
}
b = NULL;
if (f.is_cgi) {
/* local CGI */
searchHeader = TRUE;
searchHeader_through = FALSE;
}
if (header_string)
header_string = NULL;
TRAP_ON;
if (pu.scheme == SCM_HTTP ||
#ifdef USE_SSL
pu.scheme == SCM_HTTPS ||
#endif /* USE_SSL */
((
#ifdef USE_GOPHER
(pu.scheme == SCM_GOPHER && non_null(GOPHER_proxy)) ||
#endif /* USE_GOPHER */
(pu.scheme == SCM_FTP && non_null(FTP_proxy))
) && !Do_not_use_proxy && !check_no_proxy(pu.host))) {
if (fmInitialized) {
term_cbreak();
/* FIXME: gettextize? */
message(Sprintf("%s contacted. Waiting for reply...", pu.host)->
ptr, 0, 0);
refresh();
}
if (t_buf == NULL)
t_buf = newBuffer(INIT_BUFFER_WIDTH);
#if 0 /* USE_SSL */
if (IStype(f.stream) == IST_SSL) {
Str s = ssl_get_certificate(f.stream, pu.host);
if (s == NULL)
return NULL;
else
t_buf->ssl_certificate = s->ptr;
}
#endif
readHeader(&f, t_buf, FALSE, &pu);
if (((http_response_code >= 301 && http_response_code <= 303)
|| http_response_code == 307)
&& (p = checkHeader(t_buf, "Location:")) != NULL
&& checkRedirection(&pu)) {
/* document moved */
/* 301: Moved Permanently */
/* 302: Found */
/* 303: See Other */
/* 307: Temporary Redirect (HTTP/1.1) */
tpath = url_encode(p, NULL, 0);
request = NULL;
UFclose(&f);
current = New(ParsedURL);
copyParsedURL(current, &pu);
t_buf = newBuffer(INIT_BUFFER_WIDTH);
t_buf->bufferprop |= BP_REDIRECTED;
status = HTST_NORMAL;
goto load_doc;
}
t = checkContentType(t_buf);
if (t == NULL && pu.file != NULL) {
if (!((http_response_code >= 400 && http_response_code <= 407) ||
(http_response_code >= 500 && http_response_code <= 505)))
t = guessContentType(pu.file);
}
if (t == NULL)
t = "text/plain";
if (add_auth_cookie_flag && realm && uname && pwd) {
/* If authorization is required and passed */
add_auth_user_passwd(&pu, qstr_unquote(realm)->ptr, uname, pwd,
0);
add_auth_cookie_flag = 0;
}
if ((p = checkHeader(t_buf, "WWW-Authenticate:")) != NULL &&
http_response_code == 401) {
/* Authentication needed */
struct http_auth hauth;
if (findAuthentication(&hauth, t_buf, "WWW-Authenticate:") != NULL
&& (realm = get_auth_param(hauth.param, "realm")) != NULL) {
auth_pu = &pu;
getAuthCookie(&hauth, "Authorization:", extra_header,
auth_pu, &hr, request, &uname, &pwd);
if (uname == NULL) {
/* abort */
TRAP_OFF;
goto page_loaded;
}
UFclose(&f);
add_auth_cookie_flag = 1;
status = HTST_NORMAL;
goto load_doc;
}
}
if ((p = checkHeader(t_buf, "Proxy-Authenticate:")) != NULL &&
http_response_code == 407) {
/* Authentication needed */
struct http_auth hauth;
if (findAuthentication(&hauth, t_buf, "Proxy-Authenticate:")
!= NULL
&& (realm = get_auth_param(hauth.param, "realm")) != NULL) {
auth_pu = schemeToProxy(pu.scheme);
getAuthCookie(&hauth, "Proxy-Authorization:",
extra_header, auth_pu, &hr, request,
&uname, &pwd);
if (uname == NULL) {
/* abort */
TRAP_OFF;
goto page_loaded;
}
UFclose(&f);
add_auth_cookie_flag = 1;
status = HTST_NORMAL;
add_auth_user_passwd(auth_pu, qstr_unquote(realm)->ptr, uname, pwd, 1);
goto load_doc;
}
}
/* XXX: RFC2617 3.2.3 Authentication-Info: ? */
if (status == HTST_CONNECT) {
of = &f;
goto load_doc;
}
f.modtime = mymktime(checkHeader(t_buf, "Last-Modified:"));
}
#ifdef USE_NNTP
else if (pu.scheme == SCM_NEWS || pu.scheme == SCM_NNTP) {
if (t_buf == NULL)
t_buf = newBuffer(INIT_BUFFER_WIDTH);
readHeader(&f, t_buf, TRUE, &pu);
t = checkContentType(t_buf);
if (t == NULL)
t = "text/plain";
}
#endif /* USE_NNTP */
#ifdef USE_GOPHER
else if (pu.scheme == SCM_GOPHER) {
switch (*pu.file) {
case '0':
t = "text/plain";
break;
case '1':
case 'm':
page = loadGopherDir(&f, &pu, &charset);
t = "gopher:directory";
TRAP_OFF;
goto page_loaded;
case 's':
t = "audio/basic";
break;
case 'g':
t = "image/gif";
break;
case 'h':
t = "text/html";
break;
}
}
#endif /* USE_GOPHER */
else if (pu.scheme == SCM_FTP) {
check_compression(path, &f);
if (f.compression != CMP_NOCOMPRESS) {
char *t1 = uncompressed_file_type(pu.file, NULL);
real_type = f.guess_type;
#if 0
if (t1 && strncasecmp(t1, "application/", 12) == 0) {
f.compression = CMP_NOCOMPRESS;
t = real_type;
}
else
#endif
if (t1)
t = t1;
else
t = real_type;
}
else {
real_type = guessContentType(pu.file);
if (real_type == NULL)
real_type = "text/plain";
t = real_type;
}
#if 0
if (!strncasecmp(t, "application/", 12)) {
char *tmpf = tmpfname(TMPF_DFL, NULL)->ptr;
current_content_length = 0;
if (save2tmp(f, tmpf) < 0)
UFclose(&f);
else {
UFclose(&f);
TRAP_OFF;
doFileMove(tmpf, guess_save_name(t_buf, pu.file));
}
return NO_BUFFER;
}
#endif
}
else if (pu.scheme == SCM_DATA) {
t = f.guess_type;
}
else if (searchHeader) {
searchHeader = SearchHeader = FALSE;
if (t_buf == NULL)
t_buf = newBuffer(INIT_BUFFER_WIDTH);
readHeader(&f, t_buf, searchHeader_through, &pu);
if (f.is_cgi && (p = checkHeader(t_buf, "Location:")) != NULL &&
checkRedirection(&pu)) {
/* document moved */
tpath = url_encode(remove_space(p), NULL, 0);
request = NULL;
UFclose(&f);
add_auth_cookie_flag = 0;
current = New(ParsedURL);
copyParsedURL(current, &pu);
t_buf = newBuffer(INIT_BUFFER_WIDTH);
t_buf->bufferprop |= BP_REDIRECTED;
status = HTST_NORMAL;
goto load_doc;
}
#ifdef AUTH_DEBUG
if ((p = checkHeader(t_buf, "WWW-Authenticate:")) != NULL) {
/* Authentication needed */
struct http_auth hauth;
if (findAuthentication(&hauth, t_buf, "WWW-Authenticate:") != NULL
&& (realm = get_auth_param(hauth.param, "realm")) != NULL) {
auth_pu = &pu;
getAuthCookie(&hauth, "Authorization:", extra_header,
auth_pu, &hr, request, &uname, &pwd);
if (uname == NULL) {
/* abort */
TRAP_OFF;
goto page_loaded;
}
UFclose(&f);
add_auth_cookie_flag = 1;
status = HTST_NORMAL;
goto load_doc;
}
}
#endif /* defined(AUTH_DEBUG) */
t = checkContentType(t_buf);
if (t == NULL)
t = "text/plain";
}
else if (DefaultType) {
t = DefaultType;
DefaultType = NULL;
}
else {
t = guessContentType(pu.file);
if (t == NULL)
t = "text/plain";
real_type = t;
if (f.guess_type)
t = f.guess_type;
}
/* XXX: can we use guess_type to give the type to loadHTMLstream
* to support default utf8 encoding for XHTML here? */
f.guess_type = t;
page_loaded:
if (page) {
FILE *src;
#ifdef USE_IMAGE
if (image_source)
return NULL;
#endif
tmp = tmpfname(TMPF_SRC, ".html");
src = fopen(tmp->ptr, "w");
if (src) {
Str s;
s = wc_Str_conv_strict(page, InnerCharset, charset);
Strfputs(s, src);
fclose(src);
}
if (do_download) {
char *file;
if (!src)
return NULL;
file = guess_filename(pu.file);
#ifdef USE_GOPHER
if (f.scheme == SCM_GOPHER)
file = Sprintf("%s.html", file)->ptr;
#endif
#ifdef USE_NNTP
if (f.scheme == SCM_NEWS_GROUP)
file = Sprintf("%s.html", file)->ptr;
#endif
doFileMove(tmp->ptr, file);
return NO_BUFFER;
}
b = loadHTMLString(page);
if (b) {
copyParsedURL(&b->currentURL, &pu);
b->real_scheme = pu.scheme;
b->real_type = t;
if (src)
b->sourcefile = tmp->ptr;
#ifdef USE_M17N
b->document_charset = charset;
#endif
}
return b;
}
if (real_type == NULL)
real_type = t;
proc = loadBuffer;
current_content_length = 0;
if ((p = checkHeader(t_buf, "Content-Length:")) != NULL)
current_content_length = strtoclen(p);
if (do_download) {
/* download only */
char *file;
TRAP_OFF;
if (DecodeCTE && IStype(f.stream) != IST_ENCODED)
f.stream = newEncodedStream(f.stream, f.encoding);
if (pu.scheme == SCM_LOCAL) {
struct stat st;
if (PreserveTimestamp && !stat(pu.real_file, &st))
f.modtime = st.st_mtime;
file = conv_from_system(guess_save_name(NULL, pu.real_file));
}
else
file = guess_save_name(t_buf, pu.file);
if (doFileSave(f, file) == 0)
UFhalfclose(&f);
else
UFclose(&f);
return NO_BUFFER;
}
if ((f.content_encoding != CMP_NOCOMPRESS) && AutoUncompress
&& !(w3m_dump & DUMP_EXTRA)) {
uncompress_stream(&f, &pu.real_file);
}
else if (f.compression != CMP_NOCOMPRESS) {
if (!(w3m_dump & DUMP_SOURCE) &&
(w3m_dump & ~DUMP_FRAME || is_text_type(t)
|| searchExtViewer(t))) {
if (t_buf == NULL)
t_buf = newBuffer(INIT_BUFFER_WIDTH);
uncompress_stream(&f, &t_buf->sourcefile);
uncompressed_file_type(pu.file, &f.ext);
}
else {
t = compress_application_type(f.compression);
f.compression = CMP_NOCOMPRESS;
}
}
#ifdef USE_IMAGE
if (image_source) {
Buffer *b = NULL;
if (IStype(f.stream) != IST_ENCODED)
f.stream = newEncodedStream(f.stream, f.encoding);
if (save2tmp(f, image_source) == 0) {
b = newBuffer(INIT_BUFFER_WIDTH);
b->sourcefile = image_source;
b->real_type = t;
}
UFclose(&f);
TRAP_OFF;
return b;
}
#endif
if (is_html_type(t))
proc = loadHTMLBuffer;
else if (is_plain_text_type(t))
proc = loadBuffer;
#ifdef USE_IMAGE
else if (activeImage && displayImage && !useExtImageViewer &&
!(w3m_dump & ~DUMP_FRAME) && !strncasecmp(t, "image/", 6))
proc = loadImageBuffer;
#endif
else if (w3m_backend) ;
else if (!(w3m_dump & ~DUMP_FRAME) || is_dump_text_type(t)) {
if (!do_download && searchExtViewer(t) != NULL) {
proc = DO_EXTERNAL;
}
else {
TRAP_OFF;
if (pu.scheme == SCM_LOCAL) {
UFclose(&f);
_doFileCopy(pu.real_file,
conv_from_system(guess_save_name
(NULL, pu.real_file)), TRUE);
}
else {
if (DecodeCTE && IStype(f.stream) != IST_ENCODED)
f.stream = newEncodedStream(f.stream, f.encoding);
if (doFileSave(f, guess_save_name(t_buf, pu.file)) == 0)
UFhalfclose(&f);
else
UFclose(&f);
}
return NO_BUFFER;
}
}
else if (w3m_dump & DUMP_FRAME)
return NULL;
if (t_buf == NULL)
t_buf = newBuffer(INIT_BUFFER_WIDTH);
copyParsedURL(&t_buf->currentURL, &pu);
t_buf->filename = pu.real_file ? pu.real_file :
pu.file ? conv_to_system(pu.file) : NULL;
if (flag & RG_FRAME) {
t_buf->bufferprop |= BP_FRAME;
}
#ifdef USE_SSL
t_buf->ssl_certificate = f.ssl_certificate;
#endif
frame_source = flag & RG_FRAME_SRC;
if (proc == DO_EXTERNAL) {
b = doExternal(f, t, t_buf);
} else {
b = loadSomething(&f, proc, t_buf);
}
UFclose(&f);
frame_source = 0;
if (b && b != NO_BUFFER) {
b->real_scheme = f.scheme;
b->real_type = real_type;
if (w3m_backend)
b->type = allocStr(t, -1);
if (pu.label) {
if (proc == loadHTMLBuffer) {
Anchor *a;
a = searchURLLabel(b, pu.label);
if (a != NULL) {
gotoLine(b, a->start.line);
if (label_topline)
b->topLine = lineSkip(b, b->topLine,
b->currentLine->linenumber
- b->topLine->linenumber, FALSE);
b->pos = a->start.pos;
arrangeCursor(b);
}
}
else { /* plain text */
int l = atoi(pu.label);
gotoRealLine(b, l);
b->pos = 0;
arrangeCursor(b);
}
}
}
if (header_string)
header_string = NULL;
#ifdef USE_NNTP
if (b && b != NO_BUFFER && (f.scheme == SCM_NNTP || f.scheme == SCM_NEWS))
reAnchorNewsheader(b);
#endif
if (b && b != NO_BUFFER)
preFormUpdateBuffer(b);
TRAP_OFF;
return b;
}
#define TAG_IS(s,tag,len)\
(strncasecmp(s,tag,len)==0&&(s[len] == '>' || IS_SPACE((int)s[len])))
static char *
has_hidden_link(struct readbuffer *obuf, int cmd)
{
Str line = obuf->line;
struct link_stack *p;
if (Strlastchar(line) != '>')
return NULL;
for (p = link_stack; p; p = p->next)
if (p->cmd == cmd)
break;
if (!p)
return NULL;
if (obuf->pos == p->pos)
return line->ptr + p->offset;
return NULL;
}
static void
push_link(int cmd, int offset, int pos)
{
struct link_stack *p;
p = New(struct link_stack);
p->cmd = cmd;
p->offset = offset;
p->pos = pos;
p->next = link_stack;
link_stack = p;
}
static int
is_period_char(unsigned char *ch)
{
switch (*ch) {
case ',':
case '.':
case ':':
case ';':
case '?':
case '!':
case ')':
case ']':
case '}':
case '>':
return 1;
default:
return 0;
}
}
static int
is_beginning_char(unsigned char *ch)
{
switch (*ch) {
case '(':
case '[':
case '{':
case '`':
case '<':
return 1;
default:
return 0;
}
}
static int
is_word_char(unsigned char *ch)
{
Lineprop ctype = get_mctype(ch);
#ifdef USE_M17N
if (ctype & (PC_CTRL | PC_KANJI | PC_UNKNOWN))
return 0;
if (ctype & (PC_WCHAR1 | PC_WCHAR2))
return 1;
#else
if (ctype == PC_CTRL)
return 0;
#endif
if (IS_ALNUM(*ch))
return 1;
switch (*ch) {
case ',':
case '.':
case ':':
case '\"': /* " */
case '\'':
case '$':
case '%':
case '*':
case '+':
case '-':
case '@':
case '~':
case '_':
return 1;
}
#ifdef USE_M17N
if (*ch == NBSP_CODE)
return 1;
#else
if (*ch == TIMES_CODE || *ch == DIVIDE_CODE || *ch == ANSP_CODE)
return 0;
if (*ch >= AGRAVE_CODE || *ch == NBSP_CODE)
return 1;
#endif
return 0;
}
#ifdef USE_M17N
static int
is_combining_char(unsigned char *ch)
{
Lineprop ctype = get_mctype(ch);
if (ctype & PC_WCHAR2)
return 1;
return 0;
}
#endif
int
is_boundary(unsigned char *ch1, unsigned char *ch2)
{
if (!*ch1 || !*ch2)
return 1;
if (*ch1 == ' ' && *ch2 == ' ')
return 0;
if (*ch1 != ' ' && is_period_char(ch2))
return 0;
if (*ch2 != ' ' && is_beginning_char(ch1))
return 0;
#ifdef USE_M17N
if (is_combining_char(ch2))
return 0;
#endif
if (is_word_char(ch1) && is_word_char(ch2))
return 0;
return 1;
}
static void
set_breakpoint(struct readbuffer *obuf, int tag_length)
{
obuf->bp.len = obuf->line->length;
obuf->bp.pos = obuf->pos;
obuf->bp.tlen = tag_length;
obuf->bp.flag = obuf->flag;
#ifdef FORMAT_NICE
obuf->bp.flag &= ~RB_FILL;
#endif /* FORMAT_NICE */
obuf->bp.top_margin = obuf->top_margin;
obuf->bp.bottom_margin = obuf->bottom_margin;
if (!obuf->bp.init_flag)
return;
bcopy((void *)&obuf->anchor, (void *)&obuf->bp.anchor,
sizeof(obuf->anchor));
obuf->bp.img_alt = obuf->img_alt;
obuf->bp.input_alt = obuf->input_alt;
obuf->bp.in_bold = obuf->in_bold;
obuf->bp.in_italic = obuf->in_italic;
obuf->bp.in_under = obuf->in_under;
obuf->bp.in_strike = obuf->in_strike;
obuf->bp.in_ins = obuf->in_ins;
obuf->bp.nobr_level = obuf->nobr_level;
obuf->bp.prev_ctype = obuf->prev_ctype;
obuf->bp.init_flag = 0;
}
static void
back_to_breakpoint(struct readbuffer *obuf)
{
obuf->flag = obuf->bp.flag;
bcopy((void *)&obuf->bp.anchor, (void *)&obuf->anchor,
sizeof(obuf->anchor));
obuf->img_alt = obuf->bp.img_alt;
obuf->input_alt = obuf->bp.input_alt;
obuf->in_bold = obuf->bp.in_bold;
obuf->in_italic = obuf->bp.in_italic;
obuf->in_under = obuf->bp.in_under;
obuf->in_strike = obuf->bp.in_strike;
obuf->in_ins = obuf->bp.in_ins;
obuf->prev_ctype = obuf->bp.prev_ctype;
obuf->pos = obuf->bp.pos;
obuf->top_margin = obuf->bp.top_margin;
obuf->bottom_margin = obuf->bp.bottom_margin;
if (obuf->flag & RB_NOBR)
obuf->nobr_level = obuf->bp.nobr_level;
}
static void
append_tags(struct readbuffer *obuf)
{
int i;
int len = obuf->line->length;
int set_bp = 0;
for (i = 0; i < obuf->tag_sp; i++) {
switch (obuf->tag_stack[i]->cmd) {
case HTML_A:
case HTML_IMG_ALT:
case HTML_B:
case HTML_U:
case HTML_I:
case HTML_S:
push_link(obuf->tag_stack[i]->cmd, obuf->line->length, obuf->pos);
break;
}
Strcat_charp(obuf->line, obuf->tag_stack[i]->cmdname);
switch (obuf->tag_stack[i]->cmd) {
case HTML_NOBR:
if (obuf->nobr_level > 1)
break;
case HTML_WBR:
set_bp = 1;
break;
}
}
obuf->tag_sp = 0;
if (set_bp)
set_breakpoint(obuf, obuf->line->length - len);
}
static void
push_tag(struct readbuffer *obuf, char *cmdname, int cmd)
{
obuf->tag_stack[obuf->tag_sp] = New(struct cmdtable);
obuf->tag_stack[obuf->tag_sp]->cmdname = allocStr(cmdname, -1);
obuf->tag_stack[obuf->tag_sp]->cmd = cmd;
obuf->tag_sp++;
if (obuf->tag_sp >= TAG_STACK_SIZE || obuf->flag & (RB_SPECIAL & ~RB_NOBR))
append_tags(obuf);
}
static void
push_nchars(struct readbuffer *obuf, int width,
char *str, int len, Lineprop mode)
{
append_tags(obuf);
Strcat_charp_n(obuf->line, str, len);
obuf->pos += width;
if (width > 0) {
set_prevchar(obuf->prevchar, str, len);
obuf->prev_ctype = mode;
}
obuf->flag |= RB_NFLUSHED;
}
#define push_charp(obuf, width, str, mode)\
push_nchars(obuf, width, str, strlen(str), mode)
#define push_str(obuf, width, str, mode)\
push_nchars(obuf, width, str->ptr, str->length, mode)
static void
check_breakpoint(struct readbuffer *obuf, int pre_mode, char *ch)
{
int tlen, len = obuf->line->length;
append_tags(obuf);
if (pre_mode)
return;
tlen = obuf->line->length - len;
if (tlen > 0
|| is_boundary((unsigned char *)obuf->prevchar->ptr,
(unsigned char *)ch))
set_breakpoint(obuf, tlen);
}
static void
push_char(struct readbuffer *obuf, int pre_mode, char ch)
{
check_breakpoint(obuf, pre_mode, &ch);
Strcat_char(obuf->line, ch);
obuf->pos++;
set_prevchar(obuf->prevchar, &ch, 1);
if (ch != ' ')
obuf->prev_ctype = PC_ASCII;
obuf->flag |= RB_NFLUSHED;
}
#define PUSH(c) push_char(obuf, obuf->flag & RB_SPECIAL, c)
static void
push_spaces(struct readbuffer *obuf, int pre_mode, int width)
{
int i;
if (width <= 0)
return;
check_breakpoint(obuf, pre_mode, " ");
for (i = 0; i < width; i++)
Strcat_char(obuf->line, ' ');
obuf->pos += width;
set_space_to_prevchar(obuf->prevchar);
obuf->flag |= RB_NFLUSHED;
}
static void
proc_mchar(struct readbuffer *obuf, int pre_mode,
int width, char **str, Lineprop mode)
{
check_breakpoint(obuf, pre_mode, *str);
obuf->pos += width;
Strcat_charp_n(obuf->line, *str, get_mclen(*str));
if (width > 0) {
set_prevchar(obuf->prevchar, *str, 1);
if (**str != ' ')
obuf->prev_ctype = mode;
}
(*str) += get_mclen(*str);
obuf->flag |= RB_NFLUSHED;
}
void
push_render_image(Str str, int width, int limit,
struct html_feed_environ *h_env)
{
struct readbuffer *obuf = h_env->obuf;
int indent = h_env->envs[h_env->envc].indent;
push_spaces(obuf, 1, (limit - width) / 2);
push_str(obuf, width, str, PC_ASCII);
push_spaces(obuf, 1, (limit - width + 1) / 2);
if (width > 0)
flushline(h_env, obuf, indent, 0, h_env->limit);
}
static int
sloppy_parse_line(char **str)
{
if (**str == '<') {
while (**str && **str != '>')
(*str)++;
if (**str == '>')
(*str)++;
return 1;
}
else {
while (**str && **str != '<')
(*str)++;
return 0;
}
}
static void
passthrough(struct readbuffer *obuf, char *str, int back)
{
int cmd;
Str tok = Strnew();
char *str_bak;
if (back) {
Str str_save = Strnew_charp(str);
Strshrink(obuf->line, obuf->line->ptr + obuf->line->length - str);
str = str_save->ptr;
}
while (*str) {
str_bak = str;
if (sloppy_parse_line(&str)) {
char *q = str_bak;
cmd = gethtmlcmd(&q);
if (back) {
struct link_stack *p;
for (p = link_stack; p; p = p->next) {
if (p->cmd == cmd) {
link_stack = p->next;
break;
}
}
back = 0;
}
else {
Strcat_charp_n(tok, str_bak, str - str_bak);
push_tag(obuf, tok->ptr, cmd);
Strclear(tok);
}
}
else {
push_nchars(obuf, 0, str_bak, str - str_bak, obuf->prev_ctype);
}
}
}
#if 0
int
is_blank_line(char *line, int indent)
{
int i, is_blank = 0;
for (i = 0; i < indent; i++) {
if (line[i] == '\0') {
is_blank = 1;
}
else if (line[i] != ' ') {
break;
}
}
if (i == indent && line[i] == '\0')
is_blank = 1;
return is_blank;
}
#endif
void
fillline(struct readbuffer *obuf, int indent)
{
push_spaces(obuf, 1, indent - obuf->pos);
obuf->flag &= ~RB_NFLUSHED;
}
void
flushline(struct html_feed_environ *h_env, struct readbuffer *obuf, int indent,
int force, int width)
{
TextLineList *buf = h_env->buf;
FILE *f = h_env->f;
Str line = obuf->line, pass = NULL;
char *hidden_anchor = NULL, *hidden_img = NULL, *hidden_bold = NULL,
*hidden_under = NULL, *hidden_italic = NULL, *hidden_strike = NULL,
*hidden_ins = NULL, *hidden_input = NULL, *hidden = NULL;
#ifdef DEBUG
if (w3m_debug) {
FILE *df = fopen("zzzproc1", "a");
fprintf(df, "flushline(%s,%d,%d,%d)\n", obuf->line->ptr, indent, force,
width);
if (buf) {
TextLineListItem *p;
for (p = buf->first; p; p = p->next) {
fprintf(df, "buf=\"%s\"\n", p->ptr->line->ptr);
}
}
fclose(df);
}
#endif
if (!(obuf->flag & (RB_SPECIAL & ~RB_NOBR)) && Strlastchar(line) == ' ') {
Strshrink(line, 1);
obuf->pos--;
}
append_tags(obuf);
if (obuf->anchor.url)
hidden = hidden_anchor = has_hidden_link(obuf, HTML_A);
if (obuf->img_alt) {
if ((hidden_img = has_hidden_link(obuf, HTML_IMG_ALT)) != NULL) {
if (!hidden || hidden_img < hidden)
hidden = hidden_img;
}
}
if (obuf->input_alt.in) {
if ((hidden_input = has_hidden_link(obuf, HTML_INPUT_ALT)) != NULL) {
if (!hidden || hidden_input < hidden)
hidden = hidden_input;
}
}
if (obuf->in_bold) {
if ((hidden_bold = has_hidden_link(obuf, HTML_B)) != NULL) {
if (!hidden || hidden_bold < hidden)
hidden = hidden_bold;
}
}
if (obuf->in_italic) {
if ((hidden_italic = has_hidden_link(obuf, HTML_I)) != NULL) {
if (!hidden || hidden_italic < hidden)
hidden = hidden_italic;
}
}
if (obuf->in_under) {
if ((hidden_under = has_hidden_link(obuf, HTML_U)) != NULL) {
if (!hidden || hidden_under < hidden)
hidden = hidden_under;
}
}
if (obuf->in_strike) {
if ((hidden_strike = has_hidden_link(obuf, HTML_S)) != NULL) {
if (!hidden || hidden_strike < hidden)
hidden = hidden_strike;
}
}
if (obuf->in_ins) {
if ((hidden_ins = has_hidden_link(obuf, HTML_INS)) != NULL) {
if (!hidden || hidden_ins < hidden)
hidden = hidden_ins;
}
}
if (hidden) {
pass = Strnew_charp(hidden);
Strshrink(line, line->ptr + line->length - hidden);
}
if (!(obuf->flag & (RB_SPECIAL & ~RB_NOBR)) && obuf->pos > width) {
char *tp = &line->ptr[obuf->bp.len - obuf->bp.tlen];
char *ep = &line->ptr[line->length];
if (obuf->bp.pos == obuf->pos && tp <= ep &&
tp > line->ptr && tp[-1] == ' ') {
bcopy(tp, tp - 1, ep - tp + 1);
line->length--;
obuf->pos--;
}
}
if (obuf->anchor.url && !hidden_anchor)
Strcat_charp(line, "</a>");
if (obuf->img_alt && !hidden_img)
Strcat_charp(line, "</img_alt>");
if (obuf->input_alt.in && !hidden_input)
Strcat_charp(line, "</input_alt>");
if (obuf->in_bold && !hidden_bold)
Strcat_charp(line, "</b>");
if (obuf->in_italic && !hidden_italic)
Strcat_charp(line, "</i>");
if (obuf->in_under && !hidden_under)
Strcat_charp(line, "</u>");
if (obuf->in_strike && !hidden_strike)
Strcat_charp(line, "</s>");
if (obuf->in_ins && !hidden_ins)
Strcat_charp(line, "</ins>");
if (obuf->top_margin > 0) {
int i;
struct html_feed_environ h;
struct readbuffer o;
struct environment e[1];
init_henv(&h, &o, e, 1, NULL, width, indent);
o.line = Strnew_size(width + 20);
o.pos = obuf->pos;
o.flag = obuf->flag;
o.top_margin = -1;
o.bottom_margin = -1;
Strcat_charp(o.line, "<pre_int>");
for (i = 0; i < o.pos; i++)
Strcat_char(o.line, ' ');
Strcat_charp(o.line, "</pre_int>");
for (i = 0; i < obuf->top_margin; i++)
flushline(h_env, &o, indent, force, width);
}
if (force == 1 || obuf->flag & RB_NFLUSHED) {
TextLine *lbuf = newTextLine(line, obuf->pos);
if (RB_GET_ALIGN(obuf) == RB_CENTER) {
align(lbuf, width, ALIGN_CENTER);
}
else if (RB_GET_ALIGN(obuf) == RB_RIGHT) {
align(lbuf, width, ALIGN_RIGHT);
}
else if (RB_GET_ALIGN(obuf) == RB_LEFT && obuf->flag & RB_INTABLE) {
align(lbuf, width, ALIGN_LEFT);
}
#ifdef FORMAT_NICE
else if (obuf->flag & RB_FILL) {
char *p;
int rest, rrest;
int nspace, d, i;
rest = width - get_Str_strwidth(line);
if (rest > 1) {
nspace = 0;
for (p = line->ptr + indent; *p; p++) {
if (*p == ' ')
nspace++;
}
if (nspace > 0) {
int indent_here = 0;
d = rest / nspace;
p = line->ptr;
while (IS_SPACE(*p)) {
p++;
indent_here++;
}
rrest = rest - d * nspace;
line = Strnew_size(width + 1);
for (i = 0; i < indent_here; i++)
Strcat_char(line, ' ');
for (; *p; p++) {
Strcat_char(line, *p);
if (*p == ' ') {
for (i = 0; i < d; i++)
Strcat_char(line, ' ');
if (rrest > 0) {
Strcat_char(line, ' ');
rrest--;
}
}
}
lbuf = newTextLine(line, width);
}
}
}
#endif /* FORMAT_NICE */
#ifdef TABLE_DEBUG
if (w3m_debug) {
FILE *f = fopen("zzzproc1", "a");
fprintf(f, "pos=%d,%d, maxlimit=%d\n",
visible_length(lbuf->line->ptr), lbuf->pos,
h_env->maxlimit);
fclose(f);
}
#endif
if (lbuf->pos > h_env->maxlimit)
h_env->maxlimit = lbuf->pos;
if (buf)
pushTextLine(buf, lbuf);
else if (f) {
Strfputs(Str_conv_to_halfdump(lbuf->line), f);
fputc('\n', f);
}
if (obuf->flag & RB_SPECIAL || obuf->flag & RB_NFLUSHED)
h_env->blank_lines = 0;
else
h_env->blank_lines++;
}
else {
char *p = line->ptr, *q;
Str tmp = Strnew(), tmp2 = Strnew();
#define APPEND(str) \
if (buf) \
appendTextLine(buf,(str),0); \
else if (f) \
Strfputs((str),f)
while (*p) {
q = p;
if (sloppy_parse_line(&p)) {
Strcat_charp_n(tmp, q, p - q);
if (force == 2) {
APPEND(tmp);
}
else
Strcat(tmp2, tmp);
Strclear(tmp);
}
}
if (force == 2) {
if (pass) {
APPEND(pass);
}
pass = NULL;
}
else {
if (pass)
Strcat(tmp2, pass);
pass = tmp2;
}
}
if (obuf->bottom_margin > 0) {
int i;
struct html_feed_environ h;
struct readbuffer o;
struct environment e[1];
init_henv(&h, &o, e, 1, NULL, width, indent);
o.line = Strnew_size(width + 20);
o.pos = obuf->pos;
o.flag = obuf->flag;
o.top_margin = -1;
o.bottom_margin = -1;
Strcat_charp(o.line, "<pre_int>");
for (i = 0; i < o.pos; i++)
Strcat_char(o.line, ' ');
Strcat_charp(o.line, "</pre_int>");
for (i = 0; i < obuf->bottom_margin; i++)
flushline(h_env, &o, indent, force, width);
}
if (obuf->top_margin < 0 || obuf->bottom_margin < 0)
return;
obuf->line = Strnew_size(256);
obuf->pos = 0;
obuf->top_margin = 0;
obuf->bottom_margin = 0;
set_space_to_prevchar(obuf->prevchar);
obuf->bp.init_flag = 1;
obuf->flag &= ~RB_NFLUSHED;
set_breakpoint(obuf, 0);
obuf->prev_ctype = PC_ASCII;
link_stack = NULL;
fillline(obuf, indent);
if (pass)
passthrough(obuf, pass->ptr, 0);
if (!hidden_anchor && obuf->anchor.url) {
Str tmp;
if (obuf->anchor.hseq > 0)
obuf->anchor.hseq = -obuf->anchor.hseq;
tmp = Sprintf("<A HSEQ=\"%d\" HREF=\"", obuf->anchor.hseq);
Strcat_charp(tmp, html_quote(obuf->anchor.url));
if (obuf->anchor.target) {
Strcat_charp(tmp, "\" TARGET=\"");
Strcat_charp(tmp, html_quote(obuf->anchor.target));
}
if (obuf->anchor.referer) {
Strcat_charp(tmp, "\" REFERER=\"");
Strcat_charp(tmp, html_quote(obuf->anchor.referer));
}
if (obuf->anchor.title) {
Strcat_charp(tmp, "\" TITLE=\"");
Strcat_charp(tmp, html_quote(obuf->anchor.title));
}
if (obuf->anchor.accesskey) {
char *c = html_quote_char(obuf->anchor.accesskey);
Strcat_charp(tmp, "\" ACCESSKEY=\"");
if (c)
Strcat_charp(tmp, c);
else
Strcat_char(tmp, obuf->anchor.accesskey);
}
Strcat_charp(tmp, "\">");
push_tag(obuf, tmp->ptr, HTML_A);
}
if (!hidden_img && obuf->img_alt) {
Str tmp = Strnew_charp("<IMG_ALT SRC=\"");
Strcat_charp(tmp, html_quote(obuf->img_alt->ptr));
Strcat_charp(tmp, "\">");
push_tag(obuf, tmp->ptr, HTML_IMG_ALT);
}
if (!hidden_input && obuf->input_alt.in) {
Str tmp;
if (obuf->input_alt.hseq > 0)
obuf->input_alt.hseq = - obuf->input_alt.hseq;
tmp = Sprintf("<INPUT_ALT hseq=\"%d\" fid=\"%d\" name=\"%s\" type=\"%s\" value=\"%s\">",
obuf->input_alt.hseq,
obuf->input_alt.fid,
obuf->input_alt.name ? obuf->input_alt.name->ptr : "",
obuf->input_alt.type ? obuf->input_alt.type->ptr : "",
obuf->input_alt.value ? obuf->input_alt.value->ptr : "");
push_tag(obuf, tmp->ptr, HTML_INPUT_ALT);
}
if (!hidden_bold && obuf->in_bold)
push_tag(obuf, "<B>", HTML_B);
if (!hidden_italic && obuf->in_italic)
push_tag(obuf, "<I>", HTML_I);
if (!hidden_under && obuf->in_under)
push_tag(obuf, "<U>", HTML_U);
if (!hidden_strike && obuf->in_strike)
push_tag(obuf, "<S>", HTML_S);
if (!hidden_ins && obuf->in_ins)
push_tag(obuf, "<INS>", HTML_INS);
}
void
do_blankline(struct html_feed_environ *h_env, struct readbuffer *obuf,
int indent, int indent_incr, int width)
{
if (h_env->blank_lines == 0)
flushline(h_env, obuf, indent, 1, width);
}
void
purgeline(struct html_feed_environ *h_env)
{
char *p, *q;
Str tmp;
if (h_env->buf == NULL || h_env->blank_lines == 0)
return;
p = rpopTextLine(h_env->buf)->line->ptr;
tmp = Strnew();
while (*p) {
q = p;
if (sloppy_parse_line(&p)) {
Strcat_charp_n(tmp, q, p - q);
}
}
appendTextLine(h_env->buf, tmp, 0);
h_env->blank_lines--;
}
static int
close_effect0(struct readbuffer *obuf, int cmd)
{
int i;
char *p;
for (i = obuf->tag_sp - 1; i >= 0; i--) {
if (obuf->tag_stack[i]->cmd == cmd)
break;
}
if (i >= 0) {
obuf->tag_sp--;
bcopy(&obuf->tag_stack[i + 1], &obuf->tag_stack[i],
(obuf->tag_sp - i) * sizeof(struct cmdtable *));
return 1;
}
else if ((p = has_hidden_link(obuf, cmd)) != NULL) {
passthrough(obuf, p, 1);
return 1;
}
return 0;
}
static void
close_anchor(struct html_feed_environ *h_env, struct readbuffer *obuf)
{
if (obuf->anchor.url) {
int i;
char *p = NULL;
int is_erased = 0;
for (i = obuf->tag_sp - 1; i >= 0; i--) {
if (obuf->tag_stack[i]->cmd == HTML_A)
break;
}
if (i < 0 && obuf->anchor.hseq > 0 && Strlastchar(obuf->line) == ' ') {
Strshrink(obuf->line, 1);
obuf->pos--;
is_erased = 1;
}
if (i >= 0 || (p = has_hidden_link(obuf, HTML_A))) {
if (obuf->anchor.hseq > 0) {
HTMLlineproc1(ANSP, h_env);
set_space_to_prevchar(obuf->prevchar);
}
else {
if (i >= 0) {
obuf->tag_sp--;
bcopy(&obuf->tag_stack[i + 1], &obuf->tag_stack[i],
(obuf->tag_sp - i) * sizeof(struct cmdtable *));
}
else {
passthrough(obuf, p, 1);
}
bzero((void *)&obuf->anchor, sizeof(obuf->anchor));
return;
}
is_erased = 0;
}
if (is_erased) {
Strcat_char(obuf->line, ' ');
obuf->pos++;
}
push_tag(obuf, "</a>", HTML_N_A);
}
bzero((void *)&obuf->anchor, sizeof(obuf->anchor));
}
void
save_fonteffect(struct html_feed_environ *h_env, struct readbuffer *obuf)
{
if (obuf->fontstat_sp < FONT_STACK_SIZE)
bcopy(obuf->fontstat, obuf->fontstat_stack[obuf->fontstat_sp],
FONTSTAT_SIZE);
obuf->fontstat_sp++;
if (obuf->in_bold)
push_tag(obuf, "</b>", HTML_N_B);
if (obuf->in_italic)
push_tag(obuf, "</i>", HTML_N_I);
if (obuf->in_under)
push_tag(obuf, "</u>", HTML_N_U);
if (obuf->in_strike)
push_tag(obuf, "</s>", HTML_N_S);
if (obuf->in_ins)
push_tag(obuf, "</ins>", HTML_N_INS);
bzero(obuf->fontstat, FONTSTAT_SIZE);
}
void
restore_fonteffect(struct html_feed_environ *h_env, struct readbuffer *obuf)
{
if (obuf->fontstat_sp > 0)
obuf->fontstat_sp--;
if (obuf->fontstat_sp < FONT_STACK_SIZE)
bcopy(obuf->fontstat_stack[obuf->fontstat_sp], obuf->fontstat,
FONTSTAT_SIZE);
if (obuf->in_bold)
push_tag(obuf, "<b>", HTML_B);
if (obuf->in_italic)
push_tag(obuf, "<i>", HTML_I);
if (obuf->in_under)
push_tag(obuf, "<u>", HTML_U);
if (obuf->in_strike)
push_tag(obuf, "<s>", HTML_S);
if (obuf->in_ins)
push_tag(obuf, "<ins>", HTML_INS);
}
static Str
process_title(struct parsed_tag *tag)
{
cur_title = Strnew();
return NULL;
}
static Str
process_n_title(struct parsed_tag *tag)
{
Str tmp;
if (!cur_title)
return NULL;
Strremovefirstspaces(cur_title);
Strremovetrailingspaces(cur_title);
tmp = Strnew_m_charp("<title_alt title=\"",
html_quote(cur_title->ptr), "\">", NULL);
cur_title = NULL;
return tmp;
}
static void
feed_title(char *str)
{
if (!cur_title)
return;
while (*str) {
if (*str == '&')
Strcat_charp(cur_title, getescapecmd(&str));
else if (*str == '\n' || *str == '\r') {
Strcat_char(cur_title, ' ');
str++;
}
else
Strcat_char(cur_title, *(str++));
}
}
Str
process_img(struct parsed_tag *tag, int width)
{
char *p, *q, *r, *r2 = NULL, *s, *t;
#ifdef USE_IMAGE
int w, i, nw, ni = 1, n, w0 = -1, i0 = -1;
int align, xoffset, yoffset, top, bottom, ismap = 0;
int use_image = activeImage && displayImage;
#else
int w, i, nw, n;
#endif
int pre_int = FALSE, ext_pre_int = FALSE;
Str tmp = Strnew();
if (!parsedtag_get_value(tag, ATTR_SRC, &p))
return tmp;
p = url_encode(remove_space(p), cur_baseURL, cur_document_charset);
q = NULL;
parsedtag_get_value(tag, ATTR_ALT, &q);
if (!pseudoInlines && (q == NULL || (*q == '\0' && ignore_null_img_alt)))
return tmp;
t = q;
parsedtag_get_value(tag, ATTR_TITLE, &t);
w = -1;
if (parsedtag_get_value(tag, ATTR_WIDTH, &w)) {
if (w < 0) {
if (width > 0)
w = (int)(-width * pixel_per_char * w / 100 + 0.5);
else
w = -1;
}
#ifdef USE_IMAGE
if (use_image) {
if (w > 0) {
w = (int)(w * image_scale / 100 + 0.5);
if (w == 0)
w = 1;
else if (w > MAX_IMAGE_SIZE)
w = MAX_IMAGE_SIZE;
}
}
#endif
}
#ifdef USE_IMAGE
if (use_image) {
i = -1;
if (parsedtag_get_value(tag, ATTR_HEIGHT, &i)) {
if (i > 0) {
i = (int)(i * image_scale / 100 + 0.5);
if (i == 0)
i = 1;
else if (i > MAX_IMAGE_SIZE)
i = MAX_IMAGE_SIZE;
}
else {
i = -1;
}
}
align = -1;
parsedtag_get_value(tag, ATTR_ALIGN, &align);
ismap = 0;
if (parsedtag_exists(tag, ATTR_ISMAP))
ismap = 1;
}
else
#endif
parsedtag_get_value(tag, ATTR_HEIGHT, &i);
r = NULL;
parsedtag_get_value(tag, ATTR_USEMAP, &r);
if (parsedtag_exists(tag, ATTR_PRE_INT))
ext_pre_int = TRUE;
tmp = Strnew_size(128);
#ifdef USE_IMAGE
if (use_image) {
switch (align) {
case ALIGN_LEFT:
Strcat_charp(tmp, "<div_int align=left>");
break;
case ALIGN_CENTER:
Strcat_charp(tmp, "<div_int align=center>");
break;
case ALIGN_RIGHT:
Strcat_charp(tmp, "<div_int align=right>");
break;
}
}
#endif
if (r) {
Str tmp2;
r2 = strchr(r, '#');
s = "<form_int method=internal action=map>";
tmp2 = process_form(parse_tag(&s, TRUE));
if (tmp2)
Strcat(tmp, tmp2);
Strcat(tmp, Sprintf("<input_alt fid=\"%d\" "
"type=hidden name=link value=\"", cur_form_id));
Strcat_charp(tmp, html_quote((r2) ? r2 + 1 : r));
Strcat(tmp, Sprintf("\"><input_alt hseq=\"%d\" fid=\"%d\" "
"type=submit no_effect=true>",
cur_hseq++, cur_form_id));
}
#ifdef USE_IMAGE
if (use_image) {
w0 = w;
i0 = i;
if (w < 0 || i < 0) {
Image image;
ParsedURL u;
parseURL2(p, &u, cur_baseURL);
image.url = parsedURL2Str(&u)->ptr;
if (!uncompressed_file_type(u.file, &image.ext))
image.ext = filename_extension(u.file, TRUE);
image.cache = NULL;
image.width = w;
image.height = i;
image.cache = getImage(&image, cur_baseURL, IMG_FLAG_SKIP);
if (image.cache && image.cache->width > 0 &&
image.cache->height > 0) {
w = w0 = image.cache->width;
i = i0 = image.cache->height;
}
if (w < 0)
w = 8 * pixel_per_char;
if (i < 0)
i = pixel_per_line;
}
if (enable_inline_image) {
nw = (w > 1) ? ((w - 1) / pixel_per_char_i + 1) : 1 ;
ni = (i > 1) ? ((i - 1) / pixel_per_line_i + 1) : 1 ;
}
else {
nw = (w > 3) ? (int)((w - 3) / pixel_per_char + 1) : 1;
ni = (i > 3) ? (int)((i - 3) / pixel_per_line + 1) : 1;
}
Strcat(tmp,
Sprintf("<pre_int><img_alt hseq=\"%d\" src=\"", cur_iseq++));
pre_int = TRUE;
}
else
#endif
{
if (w < 0)
w = 12 * pixel_per_char;
nw = w ? (int)((w - 1) / pixel_per_char + 1) : 1;
if (r) {
Strcat_charp(tmp, "<pre_int>");
pre_int = TRUE;
}
Strcat_charp(tmp, "<img_alt src=\"");
}
Strcat_charp(tmp, html_quote(p));
Strcat_charp(tmp, "\"");
if (t) {
Strcat_charp(tmp, " title=\"");
Strcat_charp(tmp, html_quote(t));
Strcat_charp(tmp, "\"");
}
#ifdef USE_IMAGE
if (use_image) {
if (w0 >= 0)
Strcat(tmp, Sprintf(" width=%d", w0));
if (i0 >= 0)
Strcat(tmp, Sprintf(" height=%d", i0));
switch (align) {
case ALIGN_MIDDLE:
if (!enable_inline_image) {
top = ni / 2;
bottom = top;
if (top * 2 == ni)
yoffset = (int)(((ni + 1) * pixel_per_line - i) / 2);
else
yoffset = (int)((ni * pixel_per_line - i) / 2);
break;
}
case ALIGN_TOP:
top = 0;
bottom = ni - 1;
yoffset = 0;
break;
case ALIGN_BOTTOM:
top = ni - 1;
bottom = 0;
yoffset = (int)(ni * pixel_per_line - i);
break;
default:
top = ni - 1;
bottom = 0;
if (ni == 1 && ni * pixel_per_line > i)
yoffset = 0;
else {
yoffset = (int)(ni * pixel_per_line - i);
if (yoffset <= -2)
yoffset++;
}
break;
}
if (enable_inline_image)
xoffset = 0;
else
xoffset = (int)((nw * pixel_per_char - w) / 2);
if (xoffset)
Strcat(tmp, Sprintf(" xoffset=%d", xoffset));
if (yoffset)
Strcat(tmp, Sprintf(" yoffset=%d", yoffset));
if (top)
Strcat(tmp, Sprintf(" top_margin=%d", top));
if (bottom)
Strcat(tmp, Sprintf(" bottom_margin=%d", bottom));
if (r) {
Strcat_charp(tmp, " usemap=\"");
Strcat_charp(tmp, html_quote((r2) ? r2 + 1 : r));
Strcat_charp(tmp, "\"");
}
if (ismap)
Strcat_charp(tmp, " ismap");
}
#endif
Strcat_charp(tmp, ">");
if (q != NULL && *q == '\0' && ignore_null_img_alt)
q = NULL;
if (q != NULL) {
n = get_strwidth(q);
#ifdef USE_IMAGE
if (use_image) {
if (n > nw) {
char *r;
for (r = q, n = 0; r; r += get_mclen(r), n += get_mcwidth(r)) {
if (n + get_mcwidth(r) > nw)
break;
}
Strcat_charp(tmp, html_quote(Strnew_charp_n(q, r - q)->ptr));
}
else
Strcat_charp(tmp, html_quote(q));
}
else
#endif
Strcat_charp(tmp, html_quote(q));
goto img_end;
}
if (w > 0 && i > 0) {
/* guess what the image is! */
if (w < 32 && i < 48) {
/* must be an icon or space */
n = 1;
if (strcasestr(p, "space") || strcasestr(p, "blank"))
Strcat_charp(tmp, "_");
else {
if (w * i < 8 * 16)
Strcat_charp(tmp, "*");
else {
if (!pre_int) {
Strcat_charp(tmp, "<pre_int>");
pre_int = TRUE;
}
push_symbol(tmp, IMG_SYMBOL, symbol_width, 1);
n = symbol_width;
}
}
goto img_end;
}
if (w > 200 && i < 13) {
/* must be a horizontal line */
if (!pre_int) {
Strcat_charp(tmp, "<pre_int>");
pre_int = TRUE;
}
w = w / pixel_per_char / symbol_width;
if (w <= 0)
w = 1;
push_symbol(tmp, HR_SYMBOL, symbol_width, w);
n = w * symbol_width;
goto img_end;
}
}
for (q = p; *q; q++) ;
while (q > p && *q != '/')
q--;
if (*q == '/')
q++;
Strcat_char(tmp, '[');
n = 1;
p = q;
for (; *q; q++) {
if (!IS_ALNUM(*q) && *q != '_' && *q != '-') {
break;
}
Strcat_char(tmp, *q);
n++;
if (n + 1 >= nw)
break;
}
Strcat_char(tmp, ']');
n++;
img_end:
#ifdef USE_IMAGE
if (use_image) {
for (; n < nw; n++)
Strcat_char(tmp, ' ');
}
#endif
Strcat_charp(tmp, "</img_alt>");
if (pre_int && !ext_pre_int)
Strcat_charp(tmp, "</pre_int>");
if (r) {
Strcat_charp(tmp, "</input_alt>");
process_n_form();
}
#ifdef USE_IMAGE
if (use_image) {
switch (align) {
case ALIGN_RIGHT:
case ALIGN_CENTER:
case ALIGN_LEFT:
Strcat_charp(tmp, "</div_int>");
break;
}
}
#endif
return tmp;
}
Str
process_anchor(struct parsed_tag *tag, char *tagbuf)
{
if (parsedtag_need_reconstruct(tag)) {
parsedtag_set_value(tag, ATTR_HSEQ, Sprintf("%d", cur_hseq++)->ptr);
return parsedtag2str(tag);
}
else {
Str tmp = Sprintf("<a hseq=\"%d\"", cur_hseq++);
Strcat_charp(tmp, tagbuf + 2);
return tmp;
}
}
Str
process_input(struct parsed_tag *tag)
{
int i = 20, v, x, y, z, iw, ih, size = 20;
char *q, *p, *r, *p2, *s;
Str tmp = NULL;
char *qq = "";
int qlen = 0;
if (cur_form_id < 0) {
char *s = "<form_int method=internal action=none>";
tmp = process_form(parse_tag(&s, TRUE));
}
if (tmp == NULL)
tmp = Strnew();
p = "text";
parsedtag_get_value(tag, ATTR_TYPE, &p);
q = NULL;
parsedtag_get_value(tag, ATTR_VALUE, &q);
r = "";
parsedtag_get_value(tag, ATTR_NAME, &r);
parsedtag_get_value(tag, ATTR_SIZE, &size);
if (size > MAX_INPUT_SIZE)
size = MAX_INPUT_SIZE;
parsedtag_get_value(tag, ATTR_MAXLENGTH, &i);
p2 = NULL;
parsedtag_get_value(tag, ATTR_ALT, &p2);
x = parsedtag_exists(tag, ATTR_CHECKED);
y = parsedtag_exists(tag, ATTR_ACCEPT);
z = parsedtag_exists(tag, ATTR_READONLY);
v = formtype(p);
if (v == FORM_UNKNOWN)
return NULL;
if (!q) {
switch (v) {
case FORM_INPUT_IMAGE:
case FORM_INPUT_SUBMIT:
case FORM_INPUT_BUTTON:
q = "SUBMIT";
break;
case FORM_INPUT_RESET:
q = "RESET";
break;
/* if no VALUE attribute is specified in
* <INPUT TYPE=CHECKBOX> tag, then the value "on" is used
* as a default value. It is not a part of HTML4.0
* specification, but an imitation of Netscape behaviour.
*/
case FORM_INPUT_CHECKBOX:
q = "on";
}
}
/* VALUE attribute is not allowed in <INPUT TYPE=FILE> tag. */
if (v == FORM_INPUT_FILE)
q = NULL;
if (q) {
qq = html_quote(q);
qlen = get_strwidth(q);
}
Strcat_charp(tmp, "<pre_int>");
switch (v) {
case FORM_INPUT_PASSWORD:
case FORM_INPUT_TEXT:
case FORM_INPUT_FILE:
case FORM_INPUT_CHECKBOX:
if (displayLinkNumber)
Strcat(tmp, getLinkNumberStr(0));
Strcat_char(tmp, '[');
break;
case FORM_INPUT_RADIO:
if (displayLinkNumber)
Strcat(tmp, getLinkNumberStr(0));
Strcat_char(tmp, '(');
}
Strcat(tmp, Sprintf("<input_alt hseq=\"%d\" fid=\"%d\" type=\"%s\" "
"name=\"%s\" width=%d maxlength=%d value=\"%s\"",
cur_hseq++, cur_form_id, html_quote(p),
html_quote(r), size, i, qq));
if (x)
Strcat_charp(tmp, " checked");
if (y)
Strcat_charp(tmp, " accept");
if (z)
Strcat_charp(tmp, " readonly");
Strcat_char(tmp, '>');
if (v == FORM_INPUT_HIDDEN)
Strcat_charp(tmp, "</input_alt></pre_int>");
else {
switch (v) {
case FORM_INPUT_PASSWORD:
case FORM_INPUT_TEXT:
case FORM_INPUT_FILE:
Strcat_charp(tmp, "<u>");
break;
case FORM_INPUT_IMAGE:
s = NULL;
parsedtag_get_value(tag, ATTR_SRC, &s);
if (s) {
Strcat(tmp, Sprintf("<img src=\"%s\"", html_quote(s)));
if (p2)
Strcat(tmp, Sprintf(" alt=\"%s\"", html_quote(p2)));
if (parsedtag_get_value(tag, ATTR_WIDTH, &iw))
Strcat(tmp, Sprintf(" width=\"%d\"", iw));
if (parsedtag_get_value(tag, ATTR_HEIGHT, &ih))
Strcat(tmp, Sprintf(" height=\"%d\"", ih));
Strcat_charp(tmp, " pre_int>");
Strcat_charp(tmp, "</input_alt></pre_int>");
return tmp;
}
case FORM_INPUT_SUBMIT:
case FORM_INPUT_BUTTON:
case FORM_INPUT_RESET:
if (displayLinkNumber)
Strcat(tmp, getLinkNumberStr(-1));
Strcat_charp(tmp, "[");
break;
}
switch (v) {
case FORM_INPUT_PASSWORD:
i = 0;
if (q) {
for (; i < qlen && i < size; i++)
Strcat_char(tmp, '*');
}
for (; i < size; i++)
Strcat_char(tmp, ' ');
break;
case FORM_INPUT_TEXT:
case FORM_INPUT_FILE:
if (q)
Strcat(tmp, textfieldrep(Strnew_charp(q), size));
else {
for (i = 0; i < size; i++)
Strcat_char(tmp, ' ');
}
break;
case FORM_INPUT_SUBMIT:
case FORM_INPUT_BUTTON:
if (p2)
Strcat_charp(tmp, html_quote(p2));
else
Strcat_charp(tmp, qq);
break;
case FORM_INPUT_RESET:
Strcat_charp(tmp, qq);
break;
case FORM_INPUT_RADIO:
case FORM_INPUT_CHECKBOX:
if (x)
Strcat_char(tmp, '*');
else
Strcat_char(tmp, ' ');
break;
}
switch (v) {
case FORM_INPUT_PASSWORD:
case FORM_INPUT_TEXT:
case FORM_INPUT_FILE:
Strcat_charp(tmp, "</u>");
break;
case FORM_INPUT_IMAGE:
case FORM_INPUT_SUBMIT:
case FORM_INPUT_BUTTON:
case FORM_INPUT_RESET:
Strcat_charp(tmp, "]");
}
Strcat_charp(tmp, "</input_alt>");
switch (v) {
case FORM_INPUT_PASSWORD:
case FORM_INPUT_TEXT:
case FORM_INPUT_FILE:
case FORM_INPUT_CHECKBOX:
Strcat_char(tmp, ']');
break;
case FORM_INPUT_RADIO:
Strcat_char(tmp, ')');
}
Strcat_charp(tmp, "</pre_int>");
}
return tmp;
}
Str
process_button(struct parsed_tag *tag)
{
Str tmp = NULL;
char *p, *q, *r, *qq = "";
int qlen, v;
if (cur_form_id < 0) {
char *s = "<form_int method=internal action=none>";
tmp = process_form(parse_tag(&s, TRUE));
}
if (tmp == NULL)
tmp = Strnew();
p = "submit";
parsedtag_get_value(tag, ATTR_TYPE, &p);
q = NULL;
parsedtag_get_value(tag, ATTR_VALUE, &q);
r = "";
parsedtag_get_value(tag, ATTR_NAME, &r);
v = formtype(p);
if (v == FORM_UNKNOWN)
return NULL;
switch (v) {
case FORM_INPUT_SUBMIT:
case FORM_INPUT_BUTTON:
case FORM_INPUT_RESET:
break;
default:
p = "submit";
v = FORM_INPUT_SUBMIT;
break;
}
if (!q) {
switch (v) {
case FORM_INPUT_SUBMIT:
case FORM_INPUT_BUTTON:
q = "SUBMIT";
break;
case FORM_INPUT_RESET:
q = "RESET";
break;
}
}
if (q) {
qq = html_quote(q);
qlen = strlen(q);
}
/* Strcat_charp(tmp, "<pre_int>"); */
Strcat(tmp, Sprintf("<input_alt hseq=\"%d\" fid=\"%d\" type=\"%s\" "
"name=\"%s\" value=\"%s\">",
cur_hseq++, cur_form_id, html_quote(p),
html_quote(r), qq));
return tmp;
}
Str
process_n_button(void)
{
Str tmp = Strnew();
Strcat_charp(tmp, "</input_alt>");
/* Strcat_charp(tmp, "</pre_int>"); */
return tmp;
}
Str
process_select(struct parsed_tag *tag)
{
Str tmp = NULL;
char *p;
if (cur_form_id < 0) {
char *s = "<form_int method=internal action=none>";
tmp = process_form(parse_tag(&s, TRUE));
}
p = "";
parsedtag_get_value(tag, ATTR_NAME, &p);
cur_select = Strnew_charp(p);
select_is_multiple = parsedtag_exists(tag, ATTR_MULTIPLE);
#ifdef MENU_SELECT
if (!select_is_multiple) {
select_str = Strnew_charp("<pre_int>");
if (displayLinkNumber)
Strcat(select_str, getLinkNumberStr(0));
Strcat(select_str, Sprintf("[<input_alt hseq=\"%d\" "
"fid=\"%d\" type=select name=\"%s\" selectnumber=%d",
cur_hseq++, cur_form_id, html_quote(p), n_select));
Strcat_charp(select_str, ">");
if (n_select == max_select) {
max_select *= 2;
select_option =
New_Reuse(FormSelectOption, select_option, max_select);
}
select_option[n_select].first = NULL;
select_option[n_select].last = NULL;
cur_option_maxwidth = 0;
}
else
#endif /* MENU_SELECT */
select_str = Strnew();
cur_option = NULL;
cur_status = R_ST_NORMAL;
n_selectitem = 0;
return tmp;
}
Str
process_n_select(void)
{
if (cur_select == NULL)
return NULL;
process_option();
#ifdef MENU_SELECT
if (!select_is_multiple) {
if (select_option[n_select].first) {
FormItemList sitem;
chooseSelectOption(&sitem, select_option[n_select].first);
Strcat(select_str, textfieldrep(sitem.label, cur_option_maxwidth));
}
Strcat_charp(select_str, "</input_alt>]</pre_int>");
n_select++;
}
else
#endif /* MENU_SELECT */
Strcat_charp(select_str, "<br>");
cur_select = NULL;
n_selectitem = 0;
return select_str;
}
void
feed_select(char *str)
{
Str tmp = Strnew();
int prev_status = cur_status;
static int prev_spaces = -1;
char *p;
if (cur_select == NULL)
return;
while (read_token(tmp, &str, &cur_status, 0, 0)) {
if (cur_status != R_ST_NORMAL || prev_status != R_ST_NORMAL)
continue;
p = tmp->ptr;
if (tmp->ptr[0] == '<' && Strlastchar(tmp) == '>') {
struct parsed_tag *tag;
char *q;
if (!(tag = parse_tag(&p, FALSE)))
continue;
switch (tag->tagid) {
case HTML_OPTION:
process_option();
cur_option = Strnew();
if (parsedtag_get_value(tag, ATTR_VALUE, &q))
cur_option_value = Strnew_charp(q);
else
cur_option_value = NULL;
if (parsedtag_get_value(tag, ATTR_LABEL, &q))
cur_option_label = Strnew_charp(q);
else
cur_option_label = NULL;
cur_option_selected = parsedtag_exists(tag, ATTR_SELECTED);
prev_spaces = -1;
break;
case HTML_N_OPTION:
/* do nothing */
break;
default:
/* never happen */
break;
}
}
else if (cur_option) {
while (*p) {
if (IS_SPACE(*p) && prev_spaces != 0) {
p++;
if (prev_spaces > 0)
prev_spaces++;
}
else {
if (IS_SPACE(*p))
prev_spaces = 1;
else
prev_spaces = 0;
if (*p == '&')
Strcat_charp(cur_option, getescapecmd(&p));
else
Strcat_char(cur_option, *(p++));
}
}
}
}
}
void
process_option(void)
{
char begin_char = '[', end_char = ']';
int len;
if (cur_select == NULL || cur_option == NULL)
return;
while (cur_option->length > 0 && IS_SPACE(Strlastchar(cur_option)))
Strshrink(cur_option, 1);
if (cur_option_value == NULL)
cur_option_value = cur_option;
if (cur_option_label == NULL)
cur_option_label = cur_option;
#ifdef MENU_SELECT
if (!select_is_multiple) {
len = get_Str_strwidth(cur_option_label);
if (len > cur_option_maxwidth)
cur_option_maxwidth = len;
addSelectOption(&select_option[n_select],
cur_option_value,
cur_option_label, cur_option_selected);
return;
}
#endif /* MENU_SELECT */
if (!select_is_multiple) {
begin_char = '(';
end_char = ')';
}
Strcat(select_str, Sprintf("<br><pre_int>%c<input_alt hseq=\"%d\" "
"fid=\"%d\" type=%s name=\"%s\" value=\"%s\"",
begin_char, cur_hseq++, cur_form_id,
select_is_multiple ? "checkbox" : "radio",
html_quote(cur_select->ptr),
html_quote(cur_option_value->ptr)));
if (cur_option_selected)
Strcat_charp(select_str, " checked>*</input_alt>");
else
Strcat_charp(select_str, "> </input_alt>");
Strcat_char(select_str, end_char);
Strcat_charp(select_str, html_quote(cur_option_label->ptr));
Strcat_charp(select_str, "</pre_int>");
n_selectitem++;
}
Str
process_textarea(struct parsed_tag *tag, int width)
{
Str tmp = NULL;
char *p;
#define TEXTAREA_ATTR_COL_MAX 4096
#define TEXTAREA_ATTR_ROWS_MAX 4096
if (cur_form_id < 0) {
char *s = "<form_int method=internal action=none>";
tmp = process_form(parse_tag(&s, TRUE));
}
p = "";
parsedtag_get_value(tag, ATTR_NAME, &p);
cur_textarea = Strnew_charp(p);
cur_textarea_size = 20;
if (parsedtag_get_value(tag, ATTR_COLS, &p)) {
cur_textarea_size = atoi(p);
if (p[strlen(p) - 1] == '%')
cur_textarea_size = width * cur_textarea_size / 100 - 2;
if (cur_textarea_size <= 0) {
cur_textarea_size = 20;
} else if (cur_textarea_size > TEXTAREA_ATTR_COL_MAX) {
cur_textarea_size = TEXTAREA_ATTR_COL_MAX;
}
}
cur_textarea_rows = 1;
if (parsedtag_get_value(tag, ATTR_ROWS, &p)) {
cur_textarea_rows = atoi(p);
if (cur_textarea_rows <= 0) {
cur_textarea_rows = 1;
} else if (cur_textarea_rows > TEXTAREA_ATTR_ROWS_MAX) {
cur_textarea_rows = TEXTAREA_ATTR_ROWS_MAX;
}
}
cur_textarea_readonly = parsedtag_exists(tag, ATTR_READONLY);
if (n_textarea >= max_textarea) {
max_textarea *= 2;
textarea_str = New_Reuse(Str, textarea_str, max_textarea);
}
textarea_str[n_textarea] = Strnew();
ignore_nl_textarea = TRUE;
return tmp;
}
Str
process_n_textarea(void)
{
Str tmp;
int i;
if (cur_textarea == NULL)
return NULL;
tmp = Strnew();
Strcat(tmp, Sprintf("<pre_int>[<input_alt hseq=\"%d\" fid=\"%d\" "
"type=textarea name=\"%s\" size=%d rows=%d "
"top_margin=%d textareanumber=%d",
cur_hseq, cur_form_id,
html_quote(cur_textarea->ptr),
cur_textarea_size, cur_textarea_rows,
cur_textarea_rows - 1, n_textarea));
if (cur_textarea_readonly)
Strcat_charp(tmp, " readonly");
Strcat_charp(tmp, "><u>");
for (i = 0; i < cur_textarea_size; i++)
Strcat_char(tmp, ' ');
Strcat_charp(tmp, "</u></input_alt>]</pre_int>\n");
cur_hseq++;
n_textarea++;
cur_textarea = NULL;
return tmp;
}
void
feed_textarea(char *str)
{
if (cur_textarea == NULL)
return;
if (ignore_nl_textarea) {
if (*str == '\r')
str++;
if (*str == '\n')
str++;
}
ignore_nl_textarea = FALSE;
while (*str) {
if (*str == '&')
Strcat_charp(textarea_str[n_textarea], getescapecmd(&str));
else if (*str == '\n') {
Strcat_charp(textarea_str[n_textarea], "\r\n");
str++;
}
else if (*str != '\r')
Strcat_char(textarea_str[n_textarea], *(str++));
}
}
Str
process_hr(struct parsed_tag *tag, int width, int indent_width)
{
Str tmp = Strnew_charp("<nobr>");
int w = 0;
int x = ALIGN_CENTER;
#define HR_ATTR_WIDTH_MAX 65535
if (width > indent_width)
width -= indent_width;
if (parsedtag_get_value(tag, ATTR_WIDTH, &w)) {
if (w > HR_ATTR_WIDTH_MAX) {
w = HR_ATTR_WIDTH_MAX;
}
w = REAL_WIDTH(w, width);
} else {
w = width;
}
parsedtag_get_value(tag, ATTR_ALIGN, &x);
switch (x) {
case ALIGN_CENTER:
Strcat_charp(tmp, "<div_int align=center>");
break;
case ALIGN_RIGHT:
Strcat_charp(tmp, "<div_int align=right>");
break;
case ALIGN_LEFT:
Strcat_charp(tmp, "<div_int align=left>");
break;
}
w /= symbol_width;
if (w <= 0)
w = 1;
push_symbol(tmp, HR_SYMBOL, symbol_width, w);
Strcat_charp(tmp, "</div_int></nobr>");
return tmp;
}
#ifdef USE_M17N
static char *
check_charset(char *p)
{
return wc_guess_charset(p, 0) ? p : NULL;
}
static char *
check_accept_charset(char *ac)
{
char *s = ac, *e;
while (*s) {
while (*s && (IS_SPACE(*s) || *s == ','))
s++;
if (!*s)
break;
e = s;
while (*e && !(IS_SPACE(*e) || *e == ','))
e++;
if (wc_guess_charset(Strnew_charp_n(s, e - s)->ptr, 0))
return ac;
s = e;
}
return NULL;
}
#endif
static Str
process_form_int(struct parsed_tag *tag, int fid)
{
char *p, *q, *r, *s, *tg, *n;
p = "get";
parsedtag_get_value(tag, ATTR_METHOD, &p);
q = "!CURRENT_URL!";
parsedtag_get_value(tag, ATTR_ACTION, &q);
q = url_encode(remove_space(q), cur_baseURL, cur_document_charset);
r = NULL;
#ifdef USE_M17N
if (parsedtag_get_value(tag, ATTR_ACCEPT_CHARSET, &r))
r = check_accept_charset(r);
if (!r && parsedtag_get_value(tag, ATTR_CHARSET, &r))
r = check_charset(r);
#endif
s = NULL;
parsedtag_get_value(tag, ATTR_ENCTYPE, &s);
tg = NULL;
parsedtag_get_value(tag, ATTR_TARGET, &tg);
n = NULL;
parsedtag_get_value(tag, ATTR_NAME, &n);
if (fid < 0) {
form_max++;
form_sp++;
fid = form_max;
}
else { /* <form_int> */
if (form_max < fid)
form_max = fid;
form_sp = fid;
}
if (forms_size == 0) {
forms_size = INITIAL_FORM_SIZE;
forms = New_N(FormList *, forms_size);
form_stack = NewAtom_N(int, forms_size);
}
if (forms_size <= form_max) {
forms_size += form_max;
forms = New_Reuse(FormList *, forms, forms_size);
form_stack = New_Reuse(int, form_stack, forms_size);
}
form_stack[form_sp] = fid;
if (w3m_halfdump) {
Str tmp = Sprintf("<form_int fid=\"%d\" action=\"%s\" method=\"%s\"",
fid, html_quote(q), html_quote(p));
if (s)
Strcat(tmp, Sprintf(" enctype=\"%s\"", html_quote(s)));
if (tg)
Strcat(tmp, Sprintf(" target=\"%s\"", html_quote(tg)));
if (n)
Strcat(tmp, Sprintf(" name=\"%s\"", html_quote(n)));
#ifdef USE_M17N
if (r)
Strcat(tmp, Sprintf(" accept-charset=\"%s\"", html_quote(r)));
#endif
Strcat_charp(tmp, ">");
return tmp;
}
forms[fid] = newFormList(q, p, r, s, tg, n, NULL);
return NULL;
}
Str
process_form(struct parsed_tag *tag)
{
return process_form_int(tag, -1);
}
Str
process_n_form(void)
{
if (form_sp >= 0)
form_sp--;
return NULL;
}
static void
clear_ignore_p_flag(int cmd, struct readbuffer *obuf)
{
static int clear_flag_cmd[] = {
HTML_HR, HTML_UNKNOWN
};
int i;
for (i = 0; clear_flag_cmd[i] != HTML_UNKNOWN; i++) {
if (cmd == clear_flag_cmd[i]) {
obuf->flag &= ~RB_IGNORE_P;
return;
}
}
}
static void
set_alignment(struct readbuffer *obuf, struct parsed_tag *tag)
{
long flag = -1;
int align;
if (parsedtag_get_value(tag, ATTR_ALIGN, &align)) {
switch (align) {
case ALIGN_CENTER:
flag = RB_CENTER;
break;
case ALIGN_RIGHT:
flag = RB_RIGHT;
break;
case ALIGN_LEFT:
flag = RB_LEFT;
}
}
RB_SAVE_FLAG(obuf);
if (flag != -1) {
RB_SET_ALIGN(obuf, flag);
}
}
#ifdef ID_EXT
static void
process_idattr(struct readbuffer *obuf, int cmd, struct parsed_tag *tag)
{
char *id = NULL, *framename = NULL;
Str idtag = NULL;
/*
* HTML_TABLE is handled by the other process.
*/
if (cmd == HTML_TABLE)
return;
parsedtag_get_value(tag, ATTR_ID, &id);
parsedtag_get_value(tag, ATTR_FRAMENAME, &framename);
if (id == NULL)
return;
if (framename)
idtag = Sprintf("<_id id=\"%s\" framename=\"%s\">",
html_quote(id), html_quote(framename));
else
idtag = Sprintf("<_id id=\"%s\">", html_quote(id));
push_tag(obuf, idtag->ptr, HTML_NOP);
}
#endif /* ID_EXT */
#define CLOSE_P if (obuf->flag & RB_P) { \
flushline(h_env, obuf, envs[h_env->envc].indent,0,h_env->limit);\
RB_RESTORE_FLAG(obuf);\
obuf->flag &= ~RB_P;\
}
#define CLOSE_A \
CLOSE_P; \
close_anchor(h_env, obuf);
#define CLOSE_DT \
if (obuf->flag & RB_IN_DT) { \
obuf->flag &= ~RB_IN_DT; \
HTMLlineproc1("</b>", h_env); \
}
#define PUSH_ENV(cmd) \
if (++h_env->envc_real < h_env->nenv) { \
++h_env->envc; \
envs[h_env->envc].env = cmd; \
envs[h_env->envc].count = 0; \
if (h_env->envc <= MAX_INDENT_LEVEL) \
envs[h_env->envc].indent = envs[h_env->envc - 1].indent + INDENT_INCR; \
else \
envs[h_env->envc].indent = envs[h_env->envc - 1].indent; \
}
#define POP_ENV \
if (h_env->envc_real-- < h_env->nenv) \
h_env->envc--;
static int
ul_type(struct parsed_tag *tag, int default_type)
{
char *p;
if (parsedtag_get_value(tag, ATTR_TYPE, &p)) {
if (!strcasecmp(p, "disc"))
return (int)'d';
else if (!strcasecmp(p, "circle"))
return (int)'c';
else if (!strcasecmp(p, "square"))
return (int)'s';
}
return default_type;
}
int
getMetaRefreshParam(char *q, Str *refresh_uri)
{
int refresh_interval;
char *r;
Str s_tmp = NULL;
if (q == NULL || refresh_uri == NULL)
return 0;
refresh_interval = atoi(q);
if (refresh_interval < 0)
return 0;
while (*q) {
if (!strncasecmp(q, "url=", 4)) {
q += 4;
if (*q == '\"' || *q == '\'') /* " or ' */
q++;
r = q;
while (*r && !IS_SPACE(*r) && *r != ';')
r++;
s_tmp = Strnew_charp_n(q, r - q);
if (s_tmp->ptr[s_tmp->length - 1] == '\"' /* " */
|| s_tmp->ptr[s_tmp->length - 1] == '\'') { /* ' */
s_tmp->length--;
s_tmp->ptr[s_tmp->length] = '\0';
}
q = r;
}
while (*q && *q != ';')
q++;
if (*q == ';')
q++;
while (*q && *q == ' ')
q++;
}
*refresh_uri = s_tmp;
return refresh_interval;
}
int
HTMLtagproc1(struct parsed_tag *tag, struct html_feed_environ *h_env)
{
char *p, *q, *r;
int i, w, x, y, z, count, width;
struct readbuffer *obuf = h_env->obuf;
struct environment *envs = h_env->envs;
Str tmp;
int hseq;
int cmd;
#ifdef ID_EXT
char *id = NULL;
#endif /* ID_EXT */
cmd = tag->tagid;
if (obuf->flag & RB_PRE) {
switch (cmd) {
case HTML_NOBR:
case HTML_N_NOBR:
case HTML_PRE_INT:
case HTML_N_PRE_INT:
return 1;
}
}
switch (cmd) {
case HTML_B:
obuf->in_bold++;
if (obuf->in_bold > 1)
return 1;
return 0;
case HTML_N_B:
if (obuf->in_bold == 1 && close_effect0(obuf, HTML_B))
obuf->in_bold = 0;
if (obuf->in_bold > 0) {
obuf->in_bold--;
if (obuf->in_bold == 0)
return 0;
}
return 1;
case HTML_I:
obuf->in_italic++;
if (obuf->in_italic > 1)
return 1;
return 0;
case HTML_N_I:
if (obuf->in_italic == 1 && close_effect0(obuf, HTML_I))
obuf->in_italic = 0;
if (obuf->in_italic > 0) {
obuf->in_italic--;
if (obuf->in_italic == 0)
return 0;
}
return 1;
case HTML_U:
obuf->in_under++;
if (obuf->in_under > 1)
return 1;
return 0;
case HTML_N_U:
if (obuf->in_under == 1 && close_effect0(obuf, HTML_U))
obuf->in_under = 0;
if (obuf->in_under > 0) {
obuf->in_under--;
if (obuf->in_under == 0)
return 0;
}
return 1;
case HTML_EM:
HTMLlineproc1("<i>", h_env);
return 1;
case HTML_N_EM:
HTMLlineproc1("</i>", h_env);
return 1;
case HTML_STRONG:
HTMLlineproc1("<b>", h_env);
return 1;
case HTML_N_STRONG:
HTMLlineproc1("</b>", h_env);
return 1;
case HTML_Q:
HTMLlineproc1("`", h_env);
return 1;
case HTML_N_Q:
HTMLlineproc1("'", h_env);
return 1;
case HTML_P:
case HTML_N_P:
CLOSE_A;
if (!(obuf->flag & RB_IGNORE_P)) {
flushline(h_env, obuf, envs[h_env->envc].indent, 1, h_env->limit);
do_blankline(h_env, obuf, envs[h_env->envc].indent, 0,
h_env->limit);
}
obuf->flag |= RB_IGNORE_P;
if (cmd == HTML_P) {
set_alignment(obuf, tag);
obuf->flag |= RB_P;
}
return 1;
case HTML_BR:
flushline(h_env, obuf, envs[h_env->envc].indent, 1, h_env->limit);
h_env->blank_lines = 0;
return 1;
case HTML_H:
if (!(obuf->flag & (RB_PREMODE | RB_IGNORE_P))) {
flushline(h_env, obuf, envs[h_env->envc].indent, 0, h_env->limit);
do_blankline(h_env, obuf, envs[h_env->envc].indent, 0,
h_env->limit);
}
HTMLlineproc1("<b>", h_env);
set_alignment(obuf, tag);
return 1;
case HTML_N_H:
HTMLlineproc1("</b>", h_env);
if (!(obuf->flag & RB_PREMODE)) {
flushline(h_env, obuf, envs[h_env->envc].indent, 0, h_env->limit);
}
do_blankline(h_env, obuf, envs[h_env->envc].indent, 0, h_env->limit);
RB_RESTORE_FLAG(obuf);
close_anchor(h_env, obuf);
obuf->flag |= RB_IGNORE_P;
return 1;
case HTML_UL:
case HTML_OL:
case HTML_BLQ:
CLOSE_A;
if (!(obuf->flag & RB_IGNORE_P)) {
flushline(h_env, obuf, envs[h_env->envc].indent, 0, h_env->limit);
if (!(obuf->flag & RB_PREMODE) &&
(h_env->envc == 0 || cmd == HTML_BLQ))
do_blankline(h_env, obuf, envs[h_env->envc].indent, 0,
h_env->limit);
}
PUSH_ENV(cmd);
if (cmd == HTML_UL || cmd == HTML_OL) {
if (parsedtag_get_value(tag, ATTR_START, &count)) {
envs[h_env->envc].count = count - 1;
}
}
if (cmd == HTML_OL) {
envs[h_env->envc].type = '1';
if (parsedtag_get_value(tag, ATTR_TYPE, &p)) {
envs[h_env->envc].type = (int)*p;
}
}
if (cmd == HTML_UL)
envs[h_env->envc].type = ul_type(tag, 0);
flushline(h_env, obuf, envs[h_env->envc].indent, 0, h_env->limit);
return 1;
case HTML_N_UL:
case HTML_N_OL:
case HTML_N_DL:
case HTML_N_BLQ:
CLOSE_DT;
CLOSE_A;
if (h_env->envc > 0) {
flushline(h_env, obuf, envs[h_env->envc - 1].indent, 0,
h_env->limit);
POP_ENV;
if (!(obuf->flag & RB_PREMODE) &&
(h_env->envc == 0 || cmd == HTML_N_DL || cmd == HTML_N_BLQ)) {
do_blankline(h_env, obuf,
envs[h_env->envc].indent,
INDENT_INCR, h_env->limit);
obuf->flag |= RB_IGNORE_P;
}
}
close_anchor(h_env, obuf);
return 1;
case HTML_DL:
CLOSE_A;
if (!(obuf->flag & RB_IGNORE_P)) {
flushline(h_env, obuf, envs[h_env->envc].indent, 0, h_env->limit);
if (!(obuf->flag & RB_PREMODE))
do_blankline(h_env, obuf, envs[h_env->envc].indent, 0,
h_env->limit);
}
PUSH_ENV(cmd);
if (parsedtag_exists(tag, ATTR_COMPACT))
envs[h_env->envc].env = HTML_DL_COMPACT;
obuf->flag |= RB_IGNORE_P;
return 1;
case HTML_LI:
CLOSE_A;
CLOSE_DT;
if (h_env->envc > 0) {
Str num;
flushline(h_env, obuf,
envs[h_env->envc - 1].indent, 0, h_env->limit);
envs[h_env->envc].count++;
if (parsedtag_get_value(tag, ATTR_VALUE, &p)) {
count = atoi(p);
if (count > 0)
envs[h_env->envc].count = count;
else
envs[h_env->envc].count = 0;
}
switch (envs[h_env->envc].env) {
case HTML_UL:
envs[h_env->envc].type = ul_type(tag, envs[h_env->envc].type);
for (i = 0; i < INDENT_INCR - 3; i++)
push_charp(obuf, 1, NBSP, PC_ASCII);
tmp = Strnew();
switch (envs[h_env->envc].type) {
case 'd':
push_symbol(tmp, UL_SYMBOL_DISC, symbol_width, 1);
break;
case 'c':
push_symbol(tmp, UL_SYMBOL_CIRCLE, symbol_width, 1);
break;
case 's':
push_symbol(tmp, UL_SYMBOL_SQUARE, symbol_width, 1);
break;
default:
push_symbol(tmp,
UL_SYMBOL((h_env->envc_real -
1) % MAX_UL_LEVEL), symbol_width,
1);
break;
}
if (symbol_width == 1)
push_charp(obuf, 1, NBSP, PC_ASCII);
push_str(obuf, symbol_width, tmp, PC_ASCII);
push_charp(obuf, 1, NBSP, PC_ASCII);
set_space_to_prevchar(obuf->prevchar);
break;
case HTML_OL:
if (parsedtag_get_value(tag, ATTR_TYPE, &p))
envs[h_env->envc].type = (int)*p;
switch ((envs[h_env->envc].count > 0)? envs[h_env->envc].type: '1') {
case 'i':
num = romanNumeral(envs[h_env->envc].count);
break;
case 'I':
num = romanNumeral(envs[h_env->envc].count);
Strupper(num);
break;
case 'a':
num = romanAlphabet(envs[h_env->envc].count);
break;
case 'A':
num = romanAlphabet(envs[h_env->envc].count);
Strupper(num);
break;
default:
num = Sprintf("%d", envs[h_env->envc].count);
break;
}
if (INDENT_INCR >= 4)
Strcat_charp(num, ". ");
else
Strcat_char(num, '.');
push_spaces(obuf, 1, INDENT_INCR - num->length);
push_str(obuf, num->length, num, PC_ASCII);
if (INDENT_INCR >= 4)
set_space_to_prevchar(obuf->prevchar);
break;
default:
push_spaces(obuf, 1, INDENT_INCR);
break;
}
}
else {
flushline(h_env, obuf, 0, 0, h_env->limit);
}
obuf->flag |= RB_IGNORE_P;
return 1;
case HTML_DT:
CLOSE_A;
if (h_env->envc == 0 ||
(h_env->envc_real < h_env->nenv &&
envs[h_env->envc].env != HTML_DL &&
envs[h_env->envc].env != HTML_DL_COMPACT)) {
PUSH_ENV(HTML_DL);
}
if (h_env->envc > 0) {
flushline(h_env, obuf,
envs[h_env->envc - 1].indent, 0, h_env->limit);
}
if (!(obuf->flag & RB_IN_DT)) {
HTMLlineproc1("<b>", h_env);
obuf->flag |= RB_IN_DT;
}
obuf->flag |= RB_IGNORE_P;
return 1;
case HTML_DD:
CLOSE_A;
CLOSE_DT;
if (envs[h_env->envc].env == HTML_DL_COMPACT) {
if (obuf->pos > envs[h_env->envc].indent)
flushline(h_env, obuf, envs[h_env->envc].indent, 0,
h_env->limit);
else
push_spaces(obuf, 1, envs[h_env->envc].indent - obuf->pos);
}
else
flushline(h_env, obuf, envs[h_env->envc].indent, 0, h_env->limit);
/* obuf->flag |= RB_IGNORE_P; */
return 1;
case HTML_TITLE:
close_anchor(h_env, obuf);
process_title(tag);
obuf->flag |= RB_TITLE;
obuf->end_tag = HTML_N_TITLE;
return 1;
case HTML_N_TITLE:
if (!(obuf->flag & RB_TITLE))
return 1;
obuf->flag &= ~RB_TITLE;
obuf->end_tag = 0;
tmp = process_n_title(tag);
if (tmp)
HTMLlineproc1(tmp->ptr, h_env);
return 1;
case HTML_TITLE_ALT:
if (parsedtag_get_value(tag, ATTR_TITLE, &p))
h_env->title = html_unquote(p);
return 0;
case HTML_FRAMESET:
PUSH_ENV(cmd);
push_charp(obuf, 9, "--FRAME--", PC_ASCII);
flushline(h_env, obuf, envs[h_env->envc].indent, 0, h_env->limit);
return 0;
case HTML_N_FRAMESET:
if (h_env->envc > 0) {
POP_ENV;
flushline(h_env, obuf, envs[h_env->envc].indent, 0, h_env->limit);
}
return 0;
case HTML_NOFRAMES:
CLOSE_A;
flushline(h_env, obuf, envs[h_env->envc].indent, 0, h_env->limit);
obuf->flag |= (RB_NOFRAMES | RB_IGNORE_P);
/* istr = str; */
return 1;
case HTML_N_NOFRAMES:
CLOSE_A;
flushline(h_env, obuf, envs[h_env->envc].indent, 0, h_env->limit);
obuf->flag &= ~RB_NOFRAMES;
return 1;
case HTML_FRAME:
q = r = NULL;
parsedtag_get_value(tag, ATTR_SRC, &q);
parsedtag_get_value(tag, ATTR_NAME, &r);
if (q) {
q = html_quote(q);
push_tag(obuf, Sprintf("<a hseq=\"%d\" href=\"%s\">",
cur_hseq++, q)->ptr, HTML_A);
if (r)
q = html_quote(r);
push_charp(obuf, get_strwidth(q), q, PC_ASCII);
push_tag(obuf, "</a>", HTML_N_A);
}
flushline(h_env, obuf, envs[h_env->envc].indent, 0, h_env->limit);
return 0;
case HTML_HR:
close_anchor(h_env, obuf);
tmp = process_hr(tag, h_env->limit, envs[h_env->envc].indent);
HTMLlineproc1(tmp->ptr, h_env);
set_space_to_prevchar(obuf->prevchar);
return 1;
case HTML_PRE:
x = parsedtag_exists(tag, ATTR_FOR_TABLE);
CLOSE_A;
if (!(obuf->flag & RB_IGNORE_P)) {
flushline(h_env, obuf, envs[h_env->envc].indent, 0, h_env->limit);
if (!x)
do_blankline(h_env, obuf, envs[h_env->envc].indent, 0,
h_env->limit);
}
else
fillline(obuf, envs[h_env->envc].indent);
obuf->flag |= (RB_PRE | RB_IGNORE_P);
/* istr = str; */
return 1;
case HTML_N_PRE:
flushline(h_env, obuf, envs[h_env->envc].indent, 0, h_env->limit);
if (!(obuf->flag & RB_IGNORE_P)) {
do_blankline(h_env, obuf, envs[h_env->envc].indent, 0,
h_env->limit);
obuf->flag |= RB_IGNORE_P;
h_env->blank_lines++;
}
obuf->flag &= ~RB_PRE;
close_anchor(h_env, obuf);
return 1;
case HTML_PRE_INT:
i = obuf->line->length;
append_tags(obuf);
if (!(obuf->flag & RB_SPECIAL)) {
set_breakpoint(obuf, obuf->line->length - i);
}
obuf->flag |= RB_PRE_INT;
return 0;
case HTML_N_PRE_INT:
push_tag(obuf, "</pre_int>", HTML_N_PRE_INT);
obuf->flag &= ~RB_PRE_INT;
if (!(obuf->flag & RB_SPECIAL) && obuf->pos > obuf->bp.pos) {
set_prevchar(obuf->prevchar, "", 0);
obuf->prev_ctype = PC_CTRL;
}
return 1;
case HTML_NOBR:
obuf->flag |= RB_NOBR;
obuf->nobr_level++;
return 0;
case HTML_N_NOBR:
if (obuf->nobr_level > 0)
obuf->nobr_level--;
if (obuf->nobr_level == 0)
obuf->flag &= ~RB_NOBR;
return 0;
case HTML_PRE_PLAIN:
CLOSE_A;
if (!(obuf->flag & RB_IGNORE_P)) {
flushline(h_env, obuf, envs[h_env->envc].indent, 0, h_env->limit);
do_blankline(h_env, obuf, envs[h_env->envc].indent, 0,
h_env->limit);
}
obuf->flag |= (RB_PRE | RB_IGNORE_P);
return 1;
case HTML_N_PRE_PLAIN:
CLOSE_A;
if (!(obuf->flag & RB_IGNORE_P)) {
flushline(h_env, obuf, envs[h_env->envc].indent, 0, h_env->limit);
do_blankline(h_env, obuf, envs[h_env->envc].indent, 0,
h_env->limit);
obuf->flag |= RB_IGNORE_P;
}
obuf->flag &= ~RB_PRE;
return 1;
case HTML_LISTING:
case HTML_XMP:
case HTML_PLAINTEXT:
CLOSE_A;
if (!(obuf->flag & RB_IGNORE_P)) {
flushline(h_env, obuf, envs[h_env->envc].indent, 0, h_env->limit);
do_blankline(h_env, obuf, envs[h_env->envc].indent, 0,
h_env->limit);
}
obuf->flag |= (RB_PLAIN | RB_IGNORE_P);
switch (cmd) {
case HTML_LISTING:
obuf->end_tag = HTML_N_LISTING;
break;
case HTML_XMP:
obuf->end_tag = HTML_N_XMP;
break;
case HTML_PLAINTEXT:
obuf->end_tag = MAX_HTMLTAG;
break;
}
return 1;
case HTML_N_LISTING:
case HTML_N_XMP:
CLOSE_A;
if (!(obuf->flag & RB_IGNORE_P)) {
flushline(h_env, obuf, envs[h_env->envc].indent, 0, h_env->limit);
do_blankline(h_env, obuf, envs[h_env->envc].indent, 0,
h_env->limit);
obuf->flag |= RB_IGNORE_P;
}
obuf->flag &= ~RB_PLAIN;
obuf->end_tag = 0;
return 1;
case HTML_SCRIPT:
obuf->flag |= RB_SCRIPT;
obuf->end_tag = HTML_N_SCRIPT;
return 1;
case HTML_STYLE:
obuf->flag |= RB_STYLE;
obuf->end_tag = HTML_N_STYLE;
return 1;
case HTML_N_SCRIPT:
obuf->flag &= ~RB_SCRIPT;
obuf->end_tag = 0;
return 1;
case HTML_N_STYLE:
obuf->flag &= ~RB_STYLE;
obuf->end_tag = 0;
return 1;
case HTML_A:
if (obuf->anchor.url)
close_anchor(h_env, obuf);
hseq = 0;
if (parsedtag_get_value(tag, ATTR_HREF, &p))
obuf->anchor.url = Strnew_charp(p)->ptr;
if (parsedtag_get_value(tag, ATTR_TARGET, &p))
obuf->anchor.target = Strnew_charp(p)->ptr;
if (parsedtag_get_value(tag, ATTR_REFERER, &p))
obuf->anchor.referer = Strnew_charp(p)->ptr;
if (parsedtag_get_value(tag, ATTR_TITLE, &p))
obuf->anchor.title = Strnew_charp(p)->ptr;
if (parsedtag_get_value(tag, ATTR_ACCESSKEY, &p))
obuf->anchor.accesskey = (unsigned char)*p;
if (parsedtag_get_value(tag, ATTR_HSEQ, &hseq))
obuf->anchor.hseq = hseq;
if (hseq == 0 && obuf->anchor.url) {
obuf->anchor.hseq = cur_hseq;
tmp = process_anchor(tag, h_env->tagbuf->ptr);
push_tag(obuf, tmp->ptr, HTML_A);
if (displayLinkNumber)
HTMLlineproc1(getLinkNumberStr(-1)->ptr, h_env);
return 1;
}
return 0;
case HTML_N_A:
close_anchor(h_env, obuf);
return 1;
case HTML_IMG:
tmp = process_img(tag, h_env->limit);
HTMLlineproc1(tmp->ptr, h_env);
return 1;
case HTML_IMG_ALT:
if (parsedtag_get_value(tag, ATTR_SRC, &p))
obuf->img_alt = Strnew_charp(p);
#ifdef USE_IMAGE
i = 0;
if (parsedtag_get_value(tag, ATTR_TOP_MARGIN, &i)) {
if (i > obuf->top_margin)
obuf->top_margin = i;
}
i = 0;
if (parsedtag_get_value(tag, ATTR_BOTTOM_MARGIN, &i)) {
if (i > obuf->bottom_margin)
obuf->bottom_margin = i;
}
#endif
return 0;
case HTML_N_IMG_ALT:
if (obuf->img_alt) {
if (!close_effect0(obuf, HTML_IMG_ALT))
push_tag(obuf, "</img_alt>", HTML_N_IMG_ALT);
obuf->img_alt = NULL;
}
return 1;
case HTML_INPUT_ALT:
i = 0;
if (parsedtag_get_value(tag, ATTR_TOP_MARGIN, &i)) {
if (i > obuf->top_margin)
obuf->top_margin = i;
}
i = 0;
if (parsedtag_get_value(tag, ATTR_BOTTOM_MARGIN, &i)) {
if (i > obuf->bottom_margin)
obuf->bottom_margin = i;
}
if (parsedtag_get_value(tag, ATTR_HSEQ, &hseq)) {
obuf->input_alt.hseq = hseq;
}
if (parsedtag_get_value(tag, ATTR_FID, &i)) {
obuf->input_alt.fid = i;
}
if (parsedtag_get_value(tag, ATTR_TYPE, &p)) {
obuf->input_alt.type = Strnew_charp(p);
}
if (parsedtag_get_value(tag, ATTR_VALUE, &p)) {
obuf->input_alt.value = Strnew_charp(p);
}
if (parsedtag_get_value(tag, ATTR_NAME, &p)) {
obuf->input_alt.name = Strnew_charp(p);
}
obuf->input_alt.in = 1;
return 0;
case HTML_N_INPUT_ALT:
if (obuf->input_alt.in) {
if (!close_effect0(obuf, HTML_INPUT_ALT))
push_tag(obuf, "</input_alt>", HTML_N_INPUT_ALT);
obuf->input_alt.hseq = 0;
obuf->input_alt.fid = -1;
obuf->input_alt.in = 0;
obuf->input_alt.type = NULL;
obuf->input_alt.name = NULL;
obuf->input_alt.value = NULL;
}
return 1;
case HTML_TABLE:
close_anchor(h_env, obuf);
obuf->table_level++;
if (obuf->table_level >= MAX_TABLE)
break;
w = BORDER_NONE;
/* x: cellspacing, y: cellpadding */
x = 2;
y = 1;
z = 0;
width = 0;
if (parsedtag_exists(tag, ATTR_BORDER)) {
if (parsedtag_get_value(tag, ATTR_BORDER, &w)) {
if (w > 2)
w = BORDER_THICK;
else if (w < 0) { /* weird */
w = BORDER_THIN;
}
}
else
w = BORDER_THIN;
}
if (DisplayBorders && w == BORDER_NONE)
w = BORDER_THIN;
if (parsedtag_get_value(tag, ATTR_WIDTH, &i)) {
if (obuf->table_level == 0)
width = REAL_WIDTH(i, h_env->limit - envs[h_env->envc].indent);
else
width = RELATIVE_WIDTH(i);
}
if (parsedtag_exists(tag, ATTR_HBORDER))
w = BORDER_NOWIN;
#define MAX_CELLSPACING 1000
#define MAX_CELLPADDING 1000
#define MAX_VSPACE 1000
parsedtag_get_value(tag, ATTR_CELLSPACING, &x);
parsedtag_get_value(tag, ATTR_CELLPADDING, &y);
parsedtag_get_value(tag, ATTR_VSPACE, &z);
if (x > MAX_CELLSPACING)
x = MAX_CELLSPACING;
if (y > MAX_CELLPADDING)
y = MAX_CELLPADDING;
if (z > MAX_VSPACE)
z = MAX_VSPACE;
#ifdef ID_EXT
parsedtag_get_value(tag, ATTR_ID, &id);
#endif /* ID_EXT */
tables[obuf->table_level] = begin_table(w, x, y, z);
#ifdef ID_EXT
if (id != NULL)
tables[obuf->table_level]->id = Strnew_charp(id);
#endif /* ID_EXT */
table_mode[obuf->table_level].pre_mode = 0;
table_mode[obuf->table_level].indent_level = 0;
table_mode[obuf->table_level].nobr_level = 0;
table_mode[obuf->table_level].caption = 0;
table_mode[obuf->table_level].end_tag = 0; /* HTML_UNKNOWN */
#ifndef TABLE_EXPAND
tables[obuf->table_level]->total_width = width;
#else
tables[obuf->table_level]->real_width = width;
tables[obuf->table_level]->total_width = 0;
#endif
return 1;
case HTML_N_TABLE:
/* should be processed in HTMLlineproc() */
return 1;
case HTML_CENTER:
CLOSE_A;
if (!(obuf->flag & (RB_PREMODE | RB_IGNORE_P)))
flushline(h_env, obuf, envs[h_env->envc].indent, 0, h_env->limit);
RB_SAVE_FLAG(obuf);
RB_SET_ALIGN(obuf, RB_CENTER);
return 1;
case HTML_N_CENTER:
CLOSE_A;
if (!(obuf->flag & RB_PREMODE))
flushline(h_env, obuf, envs[h_env->envc].indent, 0, h_env->limit);
RB_RESTORE_FLAG(obuf);
return 1;
case HTML_DIV:
CLOSE_A;
if (!(obuf->flag & RB_IGNORE_P))
flushline(h_env, obuf, envs[h_env->envc].indent, 0, h_env->limit);
set_alignment(obuf, tag);
return 1;
case HTML_N_DIV:
CLOSE_A;
flushline(h_env, obuf, envs[h_env->envc].indent, 0, h_env->limit);
RB_RESTORE_FLAG(obuf);
return 1;
case HTML_DIV_INT:
CLOSE_P;
if (!(obuf->flag & RB_IGNORE_P))
flushline(h_env, obuf, envs[h_env->envc].indent, 0, h_env->limit);
set_alignment(obuf, tag);
return 1;
case HTML_N_DIV_INT:
CLOSE_P;
flushline(h_env, obuf, envs[h_env->envc].indent, 0, h_env->limit);
RB_RESTORE_FLAG(obuf);
return 1;
case HTML_FORM:
CLOSE_A;
if (!(obuf->flag & RB_IGNORE_P))
flushline(h_env, obuf, envs[h_env->envc].indent, 0, h_env->limit);
tmp = process_form(tag);
if (tmp)
HTMLlineproc1(tmp->ptr, h_env);
return 1;
case HTML_N_FORM:
CLOSE_A;
flushline(h_env, obuf, envs[h_env->envc].indent, 0, h_env->limit);
obuf->flag |= RB_IGNORE_P;
process_n_form();
return 1;
case HTML_INPUT:
close_anchor(h_env, obuf);
tmp = process_input(tag);
if (tmp)
HTMLlineproc1(tmp->ptr, h_env);
return 1;
case HTML_BUTTON:
tmp = process_button(tag);
if (tmp)
HTMLlineproc1(tmp->ptr, h_env);
return 1;
case HTML_N_BUTTON:
tmp = process_n_button();
if (tmp)
HTMLlineproc1(tmp->ptr, h_env);
return 1;
case HTML_SELECT:
close_anchor(h_env, obuf);
tmp = process_select(tag);
if (tmp)
HTMLlineproc1(tmp->ptr, h_env);
obuf->flag |= RB_INSELECT;
obuf->end_tag = HTML_N_SELECT;
return 1;
case HTML_N_SELECT:
obuf->flag &= ~RB_INSELECT;
obuf->end_tag = 0;
tmp = process_n_select();
if (tmp)
HTMLlineproc1(tmp->ptr, h_env);
return 1;
case HTML_OPTION:
/* nothing */
return 1;
case HTML_TEXTAREA:
close_anchor(h_env, obuf);
tmp = process_textarea(tag, h_env->limit);
if (tmp)
HTMLlineproc1(tmp->ptr, h_env);
obuf->flag |= RB_INTXTA;
obuf->end_tag = HTML_N_TEXTAREA;
return 1;
case HTML_N_TEXTAREA:
obuf->flag &= ~RB_INTXTA;
obuf->end_tag = 0;
tmp = process_n_textarea();
if (tmp)
HTMLlineproc1(tmp->ptr, h_env);
return 1;
case HTML_ISINDEX:
p = "";
q = "!CURRENT_URL!";
parsedtag_get_value(tag, ATTR_PROMPT, &p);
parsedtag_get_value(tag, ATTR_ACTION, &q);
tmp = Strnew_m_charp("<form method=get action=\"",
html_quote(q),
"\">",
html_quote(p),
"<input type=text name=\"\" accept></form>",
NULL);
HTMLlineproc1(tmp->ptr, h_env);
return 1;
case HTML_META:
p = q = r = NULL;
parsedtag_get_value(tag, ATTR_HTTP_EQUIV, &p);
parsedtag_get_value(tag, ATTR_CONTENT, &q);
#ifdef USE_M17N
parsedtag_get_value(tag, ATTR_CHARSET, &r);
if (r) {
/* <meta charset=""> */
SKIP_BLANKS(r);
meta_charset = wc_guess_charset(r, 0);
}
else
if (p && q && !strcasecmp(p, "Content-Type") &&
(q = strcasestr(q, "charset")) != NULL) {
q += 7;
SKIP_BLANKS(q);
if (*q == '=') {
q++;
SKIP_BLANKS(q);
meta_charset = wc_guess_charset(q, 0);
}
}
else
#endif
if (p && q && !strcasecmp(p, "refresh")) {
int refresh_interval;
tmp = NULL;
refresh_interval = getMetaRefreshParam(q, &tmp);
if (tmp) {
q = html_quote(tmp->ptr);
tmp = Sprintf("Refresh (%d sec) <a href=\"%s\">%s</a>",
refresh_interval, q, q);
}
else if (refresh_interval > 0)
tmp = Sprintf("Refresh (%d sec)", refresh_interval);
if (tmp) {
HTMLlineproc1(tmp->ptr, h_env);
do_blankline(h_env, obuf, envs[h_env->envc].indent, 0,
h_env->limit);
if (!is_redisplay &&
!((obuf->flag & RB_NOFRAMES) && RenderFrame)) {
tag->need_reconstruct = TRUE;
return 0;
}
}
}
return 1;
case HTML_BASE:
#if defined(USE_M17N) || defined(USE_IMAGE)
p = NULL;
if (parsedtag_get_value(tag, ATTR_HREF, &p)) {
cur_baseURL = New(ParsedURL);
parseURL(p, cur_baseURL, NULL);
}
#endif
case HTML_MAP:
case HTML_N_MAP:
case HTML_AREA:
return 0;
case HTML_DEL:
switch (displayInsDel) {
case DISPLAY_INS_DEL_SIMPLE:
obuf->flag |= RB_DEL;
break;
case DISPLAY_INS_DEL_NORMAL:
HTMLlineproc1("<U>[DEL:</U>", h_env);
break;
case DISPLAY_INS_DEL_FONTIFY:
obuf->in_strike++;
if (obuf->in_strike == 1) {
push_tag(obuf, "<s>", HTML_S);
}
break;
}
return 1;
case HTML_N_DEL:
switch (displayInsDel) {
case DISPLAY_INS_DEL_SIMPLE:
obuf->flag &= ~RB_DEL;
break;
case DISPLAY_INS_DEL_NORMAL:
HTMLlineproc1("<U>:DEL]</U>", h_env);
case DISPLAY_INS_DEL_FONTIFY:
if (obuf->in_strike == 0)
return 1;
if (obuf->in_strike == 1 && close_effect0(obuf, HTML_S))
obuf->in_strike = 0;
if (obuf->in_strike > 0) {
obuf->in_strike--;
if (obuf->in_strike == 0) {
push_tag(obuf, "</s>", HTML_N_S);
}
}
break;
}
return 1;
case HTML_S:
switch (displayInsDel) {
case DISPLAY_INS_DEL_SIMPLE:
obuf->flag |= RB_S;
break;
case DISPLAY_INS_DEL_NORMAL:
HTMLlineproc1("<U>[S:</U>", h_env);
break;
case DISPLAY_INS_DEL_FONTIFY:
obuf->in_strike++;
if (obuf->in_strike == 1) {
push_tag(obuf, "<s>", HTML_S);
}
break;
}
return 1;
case HTML_N_S:
switch (displayInsDel) {
case DISPLAY_INS_DEL_SIMPLE:
obuf->flag &= ~RB_S;
break;
case DISPLAY_INS_DEL_NORMAL:
HTMLlineproc1("<U>:S]</U>", h_env);
break;
case DISPLAY_INS_DEL_FONTIFY:
if (obuf->in_strike == 0)
return 1;
if (obuf->in_strike == 1 && close_effect0(obuf, HTML_S))
obuf->in_strike = 0;
if (obuf->in_strike > 0) {
obuf->in_strike--;
if (obuf->in_strike == 0) {
push_tag(obuf, "</s>", HTML_N_S);
}
}
}
return 1;
case HTML_INS:
switch (displayInsDel) {
case DISPLAY_INS_DEL_SIMPLE:
break;
case DISPLAY_INS_DEL_NORMAL:
HTMLlineproc1("<U>[INS:</U>", h_env);
break;
case DISPLAY_INS_DEL_FONTIFY:
obuf->in_ins++;
if (obuf->in_ins == 1) {
push_tag(obuf, "<ins>", HTML_INS);
}
break;
}
return 1;
case HTML_N_INS:
switch (displayInsDel) {
case DISPLAY_INS_DEL_SIMPLE:
break;
case DISPLAY_INS_DEL_NORMAL:
HTMLlineproc1("<U>:INS]</U>", h_env);
break;
case DISPLAY_INS_DEL_FONTIFY:
if (obuf->in_ins == 0)
return 1;
if (obuf->in_ins == 1 && close_effect0(obuf, HTML_INS))
obuf->in_ins = 0;
if (obuf->in_ins > 0) {
obuf->in_ins--;
if (obuf->in_ins == 0) {
push_tag(obuf, "</ins>", HTML_N_INS);
}
}
break;
}
return 1;
case HTML_SUP:
if (!(obuf->flag & (RB_DEL | RB_S)))
HTMLlineproc1("^", h_env);
return 1;
case HTML_N_SUP:
return 1;
case HTML_SUB:
if (!(obuf->flag & (RB_DEL | RB_S)))
HTMLlineproc1("[", h_env);
return 1;
case HTML_N_SUB:
if (!(obuf->flag & (RB_DEL | RB_S)))
HTMLlineproc1("]", h_env);
return 1;
case HTML_FONT:
case HTML_N_FONT:
case HTML_NOP:
return 1;
case HTML_BGSOUND:
if (view_unseenobject) {
if (parsedtag_get_value(tag, ATTR_SRC, &p)) {
Str s;
q = html_quote(p);
s = Sprintf("<A HREF=\"%s\">bgsound(%s)</A>", q, q);
HTMLlineproc1(s->ptr, h_env);
}
}
return 1;
case HTML_EMBED:
if (view_unseenobject) {
if (parsedtag_get_value(tag, ATTR_SRC, &p)) {
Str s;
q = html_quote(p);
s = Sprintf("<A HREF=\"%s\">embed(%s)</A>", q, q);
HTMLlineproc1(s->ptr, h_env);
}
}
return 1;
case HTML_APPLET:
if (view_unseenobject) {
if (parsedtag_get_value(tag, ATTR_ARCHIVE, &p)) {
Str s;
q = html_quote(p);
s = Sprintf("<A HREF=\"%s\">applet archive(%s)</A>", q, q);
HTMLlineproc1(s->ptr, h_env);
}
}
return 1;
case HTML_BODY:
if (view_unseenobject) {
if (parsedtag_get_value(tag, ATTR_BACKGROUND, &p)) {
Str s;
q = html_quote(p);
s = Sprintf("<IMG SRC=\"%s\" ALT=\"bg image(%s)\"><BR>", q, q);
HTMLlineproc1(s->ptr, h_env);
}
}
case HTML_N_HEAD:
if (obuf->flag & RB_TITLE)
HTMLlineproc1("</title>", h_env);
case HTML_HEAD:
case HTML_N_BODY:
return 1;
default:
/* obuf->prevchar = '\0'; */
return 0;
}
/* not reached */
return 0;
}
#define PPUSH(p,c) {outp[pos]=(p);outc[pos]=(c);pos++;}
#define PSIZE \
if (out_size <= pos + 1) { \
out_size = pos * 3 / 2; \
outc = New_Reuse(char, outc, out_size); \
outp = New_Reuse(Lineprop, outp, out_size); \
}
static TextLineListItem *_tl_lp2;
static Str
textlist_feed()
{
TextLine *p;
if (_tl_lp2 != NULL) {
p = _tl_lp2->ptr;
_tl_lp2 = _tl_lp2->next;
return p->line;
}
return NULL;
}
static int
ex_efct(int ex)
{
int effect = 0;
if (! ex)
return 0;
if (ex & PE_EX_ITALIC)
effect |= PE_EX_ITALIC_E;
if (ex & PE_EX_INSERT)
effect |= PE_EX_INSERT_E;
if (ex & PE_EX_STRIKE)
effect |= PE_EX_STRIKE_E;
return effect;
}
static void
HTMLlineproc2body(Buffer *buf, Str (*feed) (), int llimit)
{
static char *outc = NULL;
static Lineprop *outp = NULL;
static int out_size = 0;
Anchor *a_href = NULL, *a_img = NULL, *a_form = NULL;
char *p, *q, *r, *s, *t, *str;
Lineprop mode, effect, ex_effect;
int pos;
int nlines;
#ifdef DEBUG
FILE *debug = NULL;
#endif
struct frameset *frameset_s[FRAMESTACK_SIZE];
int frameset_sp = -1;
union frameset_element *idFrame = NULL;
char *id = NULL;
int hseq, form_id;
Str line;
char *endp;
char symbol = '\0';
int internal = 0;
Anchor **a_textarea = NULL;
#ifdef MENU_SELECT
Anchor **a_select = NULL;
#endif
#if defined(USE_M17N) || defined(USE_IMAGE)
ParsedURL *base = baseURL(buf);
#endif
#ifdef USE_M17N
wc_ces name_charset = url_to_charset(NULL, &buf->currentURL,
buf->document_charset);
#endif
if (out_size == 0) {
out_size = LINELEN;
outc = NewAtom_N(char, out_size);
outp = NewAtom_N(Lineprop, out_size);
}
n_textarea = -1;
if (!max_textarea) { /* halfload */
max_textarea = MAX_TEXTAREA;
textarea_str = New_N(Str, max_textarea);
a_textarea = New_N(Anchor *, max_textarea);
}
#ifdef MENU_SELECT
n_select = -1;
if (!max_select) { /* halfload */
max_select = MAX_SELECT;
select_option = New_N(FormSelectOption, max_select);
a_select = New_N(Anchor *, max_select);
}
#endif
#ifdef DEBUG
if (w3m_debug)
debug = fopen("zzzerr", "a");
#endif
effect = 0;
ex_effect = 0;
nlines = 0;
while ((line = feed()) != NULL) {
#ifdef DEBUG
if (w3m_debug) {
Strfputs(line, debug);
fputc('\n', debug);
}
#endif
if (n_textarea >= 0 && *(line->ptr) != '<') { /* halfload */
Strcat(textarea_str[n_textarea], line);
continue;
}
proc_again:
if (++nlines == llimit)
break;
pos = 0;
#ifdef ENABLE_REMOVE_TRAILINGSPACES
Strremovetrailingspaces(line);
#endif
str = line->ptr;
endp = str + line->length;
while (str < endp) {
PSIZE;
mode = get_mctype(str);
if ((effect | ex_efct(ex_effect)) & PC_SYMBOL && *str != '<') {
#ifdef USE_M17N
char **buf = set_symbol(symbol_width0);
int len;
p = buf[(int)symbol];
len = get_mclen(p);
mode = get_mctype(p);
PPUSH(mode | effect | ex_efct(ex_effect), *(p++));
if (--len) {
mode = (mode & ~PC_WCHAR1) | PC_WCHAR2;
while (len--) {
PSIZE;
PPUSH(mode | effect | ex_efct(ex_effect), *(p++));
}
}
#else
PPUSH(PC_ASCII | effect | ex_efct(ex_effect), SYMBOL_BASE + symbol);
#endif
str += symbol_width;
}
#ifdef USE_M17N
else if (mode == PC_CTRL || mode == PC_UNDEF) {
#else
else if (mode == PC_CTRL || IS_INTSPACE(*str)) {
#endif
PPUSH(PC_ASCII | effect | ex_efct(ex_effect), ' ');
str++;
}
#ifdef USE_M17N
else if (mode & PC_UNKNOWN) {
PPUSH(PC_ASCII | effect | ex_efct(ex_effect), ' ');
str += get_mclen(str);
}
#endif
else if (*str != '<' && *str != '&') {
#ifdef USE_M17N
int len = get_mclen(str);
#endif
PPUSH(mode | effect | ex_efct(ex_effect), *(str++));
#ifdef USE_M17N
if (--len) {
mode = (mode & ~PC_WCHAR1) | PC_WCHAR2;
while (len--) {
PSIZE;
PPUSH(mode | effect | ex_efct(ex_effect), *(str++));
}
}
#endif
}
else if (*str == '&') {
/*
* & escape processing
*/
p = getescapecmd(&str);
while (*p) {
PSIZE;
mode = get_mctype((unsigned char *)p);
#ifdef USE_M17N
if (mode == PC_CTRL || mode == PC_UNDEF) {
#else
if (mode == PC_CTRL || IS_INTSPACE(*str)) {
#endif
PPUSH(PC_ASCII | effect | ex_efct(ex_effect), ' ');
p++;
}
#ifdef USE_M17N
else if (mode & PC_UNKNOWN) {
PPUSH(PC_ASCII | effect | ex_efct(ex_effect), ' ');
p += get_mclen(p);
}
#endif
else {
#ifdef USE_M17N
int len = get_mclen(p);
#endif
PPUSH(mode | effect | ex_efct(ex_effect), *(p++));
#ifdef USE_M17N
if (--len) {
mode = (mode & ~PC_WCHAR1) | PC_WCHAR2;
while (len--) {
PSIZE;
PPUSH(mode | effect | ex_efct(ex_effect), *(p++));
}
}
#endif
}
}
}
else {
/* tag processing */
struct parsed_tag *tag;
if (!(tag = parse_tag(&str, TRUE)))
continue;
switch (tag->tagid) {
case HTML_B:
effect |= PE_BOLD;
break;
case HTML_N_B:
effect &= ~PE_BOLD;
break;
case HTML_I:
ex_effect |= PE_EX_ITALIC;
break;
case HTML_N_I:
ex_effect &= ~PE_EX_ITALIC;
break;
case HTML_INS:
ex_effect |= PE_EX_INSERT;
break;
case HTML_N_INS:
ex_effect &= ~PE_EX_INSERT;
break;
case HTML_U:
effect |= PE_UNDER;
break;
case HTML_N_U:
effect &= ~PE_UNDER;
break;
case HTML_S:
ex_effect |= PE_EX_STRIKE;
break;
case HTML_N_S:
ex_effect &= ~PE_EX_STRIKE;
break;
case HTML_A:
if (renderFrameSet &&
parsedtag_get_value(tag, ATTR_FRAMENAME, &p)) {
p = url_quote_conv(p, buf->document_charset);
if (!idFrame || strcmp(idFrame->body->name, p)) {
idFrame = search_frame(renderFrameSet, p);
if (idFrame && idFrame->body->attr != F_BODY)
idFrame = NULL;
}
}
p = r = s = NULL;
q = buf->baseTarget;
t = "";
hseq = 0;
id = NULL;
if (parsedtag_get_value(tag, ATTR_NAME, &id)) {
id = url_quote_conv(id, name_charset);
registerName(buf, id, currentLn(buf), pos);
}
if (parsedtag_get_value(tag, ATTR_HREF, &p))
p = url_encode(remove_space(p), base,
buf->document_charset);
if (parsedtag_get_value(tag, ATTR_TARGET, &q))
q = url_quote_conv(q, buf->document_charset);
if (parsedtag_get_value(tag, ATTR_REFERER, &r))
r = url_encode(r, base,
buf->document_charset);
parsedtag_get_value(tag, ATTR_TITLE, &s);
parsedtag_get_value(tag, ATTR_ACCESSKEY, &t);
parsedtag_get_value(tag, ATTR_HSEQ, &hseq);
if (hseq > 0)
buf->hmarklist =
putHmarker(buf->hmarklist, currentLn(buf),
pos, hseq - 1);
else if (hseq < 0) {
int h = -hseq - 1;
if (buf->hmarklist &&
h < buf->hmarklist->nmark &&
buf->hmarklist->marks[h].invalid) {
buf->hmarklist->marks[h].pos = pos;
buf->hmarklist->marks[h].line = currentLn(buf);
buf->hmarklist->marks[h].invalid = 0;
hseq = -hseq;
}
}
if (id && idFrame)
idFrame->body->nameList =
putAnchor(idFrame->body->nameList, id, NULL,
(Anchor **)NULL, NULL, NULL, '\0',
currentLn(buf), pos);
if (p) {
effect |= PE_ANCHOR;
a_href = registerHref(buf, p, q, r, s,
*t, currentLn(buf), pos);
a_href->hseq = ((hseq > 0) ? hseq : -hseq) - 1;
a_href->slave = (hseq > 0) ? FALSE : TRUE;
}
break;
case HTML_N_A:
effect &= ~PE_ANCHOR;
if (a_href) {
a_href->end.line = currentLn(buf);
a_href->end.pos = pos;
if (a_href->start.line == a_href->end.line &&
a_href->start.pos == a_href->end.pos) {
if (buf->hmarklist &&
a_href->hseq < buf->hmarklist->nmark)
buf->hmarklist->marks[a_href->hseq].invalid = 1;
a_href->hseq = -1;
}
a_href = NULL;
}
break;
case HTML_LINK:
addLink(buf, tag);
break;
case HTML_IMG_ALT:
if (parsedtag_get_value(tag, ATTR_SRC, &p)) {
#ifdef USE_IMAGE
int w = -1, h = -1, iseq = 0, ismap = 0;
int xoffset = 0, yoffset = 0, top = 0, bottom = 0;
parsedtag_get_value(tag, ATTR_HSEQ, &iseq);
parsedtag_get_value(tag, ATTR_WIDTH, &w);
parsedtag_get_value(tag, ATTR_HEIGHT, &h);
parsedtag_get_value(tag, ATTR_XOFFSET, &xoffset);
parsedtag_get_value(tag, ATTR_YOFFSET, &yoffset);
parsedtag_get_value(tag, ATTR_TOP_MARGIN, &top);
parsedtag_get_value(tag, ATTR_BOTTOM_MARGIN, &bottom);
if (parsedtag_exists(tag, ATTR_ISMAP))
ismap = 1;
q = NULL;
parsedtag_get_value(tag, ATTR_USEMAP, &q);
if (iseq > 0) {
buf->imarklist = putHmarker(buf->imarklist,
currentLn(buf), pos,
iseq - 1);
}
#endif
s = NULL;
parsedtag_get_value(tag, ATTR_TITLE, &s);
p = url_quote_conv(remove_space(p),
buf->document_charset);
a_img = registerImg(buf, p, s, currentLn(buf), pos);
#ifdef USE_IMAGE
a_img->hseq = iseq;
a_img->image = NULL;
if (iseq > 0) {
ParsedURL u;
Image *image;
parseURL2(a_img->url, &u, base);
a_img->image = image = New(Image);
image->url = parsedURL2Str(&u)->ptr;
if (!uncompressed_file_type(u.file, &image->ext))
image->ext = filename_extension(u.file, TRUE);
image->cache = NULL;
image->width =
(w > MAX_IMAGE_SIZE) ? MAX_IMAGE_SIZE : w;
image->height =
(h > MAX_IMAGE_SIZE) ? MAX_IMAGE_SIZE : h;
image->xoffset = xoffset;
image->yoffset = yoffset;
image->y = currentLn(buf) - top;
if (image->xoffset < 0 && pos == 0)
image->xoffset = 0;
if (image->yoffset < 0 && image->y == 1)
image->yoffset = 0;
image->rows = 1 + top + bottom;
image->map = q;
image->ismap = ismap;
image->touch = 0;
image->cache = getImage(image, base,
IMG_FLAG_SKIP);
}
else if (iseq < 0) {
BufferPoint *po = buf->imarklist->marks - iseq - 1;
Anchor *a = retrieveAnchor(buf->img,
po->line, po->pos);
if (a) {
a_img->url = a->url;
a_img->image = a->image;
}
}
#endif
}
effect |= PE_IMAGE;
break;
case HTML_N_IMG_ALT:
effect &= ~PE_IMAGE;
if (a_img) {
a_img->end.line = currentLn(buf);
a_img->end.pos = pos;
}
a_img = NULL;
break;
case HTML_INPUT_ALT:
{
FormList *form;
int top = 0, bottom = 0;
int textareanumber = -1;
#ifdef MENU_SELECT
int selectnumber = -1;
#endif
hseq = 0;
form_id = -1;
parsedtag_get_value(tag, ATTR_HSEQ, &hseq);
parsedtag_get_value(tag, ATTR_FID, &form_id);
parsedtag_get_value(tag, ATTR_TOP_MARGIN, &top);
parsedtag_get_value(tag, ATTR_BOTTOM_MARGIN, &bottom);
if (form_id < 0 || form_id > form_max || forms == NULL)
break; /* outside of <form>..</form> */
form = forms[form_id];
if (hseq > 0) {
int hpos = pos;
if (*str == '[')
hpos++;
buf->hmarklist =
putHmarker(buf->hmarklist, currentLn(buf),
hpos, hseq - 1);
}
else if (hseq < 0) {
int h = -hseq - 1;
int hpos = pos;
if (*str == '[')
hpos++;
if (buf->hmarklist &&
h < buf->hmarklist->nmark &&
buf->hmarklist->marks[h].invalid) {
buf->hmarklist->marks[h].pos = hpos;
buf->hmarklist->marks[h].line = currentLn(buf);
buf->hmarklist->marks[h].invalid = 0;
hseq = -hseq;
}
}
if (!form->target)
form->target = buf->baseTarget;
if (a_textarea &&
parsedtag_get_value(tag, ATTR_TEXTAREANUMBER,
&textareanumber)) {
if (textareanumber >= max_textarea) {
max_textarea = 2 * textareanumber;
textarea_str = New_Reuse(Str, textarea_str,
max_textarea);
a_textarea = New_Reuse(Anchor *, a_textarea,
max_textarea);
}
}
#ifdef MENU_SELECT
if (a_select &&
parsedtag_get_value(tag, ATTR_SELECTNUMBER,
&selectnumber)) {
if (selectnumber >= max_select) {
max_select = 2 * selectnumber;
select_option = New_Reuse(FormSelectOption,
select_option,
max_select);
a_select = New_Reuse(Anchor *, a_select,
max_select);
}
}
#endif
a_form =
registerForm(buf, form, tag, currentLn(buf), pos);
if (a_textarea && textareanumber >= 0)
a_textarea[textareanumber] = a_form;
#ifdef MENU_SELECT
if (a_select && selectnumber >= 0)
a_select[selectnumber] = a_form;
#endif
if (a_form) {
a_form->hseq = hseq - 1;
a_form->y = currentLn(buf) - top;
a_form->rows = 1 + top + bottom;
if (!parsedtag_exists(tag, ATTR_NO_EFFECT))
effect |= PE_FORM;
break;
}
}
case HTML_N_INPUT_ALT:
effect &= ~PE_FORM;
if (a_form) {
a_form->end.line = currentLn(buf);
a_form->end.pos = pos;
if (a_form->start.line == a_form->end.line &&
a_form->start.pos == a_form->end.pos)
a_form->hseq = -1;
}
a_form = NULL;
break;
case HTML_MAP:
if (parsedtag_get_value(tag, ATTR_NAME, &p)) {
MapList *m = New(MapList);
m->name = Strnew_charp(p);
m->area = newGeneralList();
m->next = buf->maplist;
buf->maplist = m;
}
break;
case HTML_N_MAP:
/* nothing to do */
break;
case HTML_AREA:
if (buf->maplist == NULL) /* outside of <map>..</map> */
break;
if (parsedtag_get_value(tag, ATTR_HREF, &p)) {
MapArea *a;
p = url_encode(remove_space(p), base,
buf->document_charset);
t = NULL;
parsedtag_get_value(tag, ATTR_TARGET, &t);
q = "";
parsedtag_get_value(tag, ATTR_ALT, &q);
r = NULL;
s = NULL;
#ifdef USE_IMAGE
parsedtag_get_value(tag, ATTR_SHAPE, &r);
parsedtag_get_value(tag, ATTR_COORDS, &s);
#endif
a = newMapArea(p, t, q, r, s);
pushValue(buf->maplist->area, (void *)a);
}
break;
case HTML_FRAMESET:
frameset_sp++;
if (frameset_sp >= FRAMESTACK_SIZE)
break;
frameset_s[frameset_sp] = newFrameSet(tag);
if (frameset_s[frameset_sp] == NULL)
break;
if (frameset_sp == 0) {
if (buf->frameset == NULL) {
buf->frameset = frameset_s[frameset_sp];
}
else
pushFrameTree(&(buf->frameQ),
frameset_s[frameset_sp], NULL);
}
else
addFrameSetElement(frameset_s[frameset_sp - 1],
*(union frameset_element *)
&frameset_s[frameset_sp]);
break;
case HTML_N_FRAMESET:
if (frameset_sp >= 0)
frameset_sp--;
break;
case HTML_FRAME:
if (frameset_sp >= 0 && frameset_sp < FRAMESTACK_SIZE) {
union frameset_element element;
element.body = newFrame(tag, buf);
addFrameSetElement(frameset_s[frameset_sp], element);
}
break;
case HTML_BASE:
if (parsedtag_get_value(tag, ATTR_HREF, &p)) {
p = url_encode(remove_space(p), NULL,
buf->document_charset);
if (!buf->baseURL)
buf->baseURL = New(ParsedURL);
parseURL(p, buf->baseURL, NULL);
#if defined(USE_M17N) || defined(USE_IMAGE)
base = buf->baseURL;
#endif
}
if (parsedtag_get_value(tag, ATTR_TARGET, &p))
buf->baseTarget =
url_quote_conv(p, buf->document_charset);
break;
case HTML_META:
p = q = NULL;
parsedtag_get_value(tag, ATTR_HTTP_EQUIV, &p);
parsedtag_get_value(tag, ATTR_CONTENT, &q);
if (p && q && !strcasecmp(p, "refresh") && MetaRefresh) {
Str tmp = NULL;
int refresh_interval = getMetaRefreshParam(q, &tmp);
#ifdef USE_ALARM
if (tmp) {
p = url_encode(remove_space(tmp->ptr), base,
buf->document_charset);
buf->event = setAlarmEvent(buf->event,
refresh_interval,
AL_IMPLICIT_ONCE,
FUNCNAME_gorURL, p);
}
else if (refresh_interval > 0)
buf->event = setAlarmEvent(buf->event,
refresh_interval,
AL_IMPLICIT,
FUNCNAME_reload, NULL);
#else
if (tmp && refresh_interval == 0) {
p = url_encode(remove_space(tmp->ptr), base,
buf->document_charset);
pushEvent(FUNCNAME_gorURL, p);
}
#endif
}
break;
case HTML_INTERNAL:
internal = HTML_INTERNAL;
break;
case HTML_N_INTERNAL:
internal = HTML_N_INTERNAL;
break;
case HTML_FORM_INT:
if (parsedtag_get_value(tag, ATTR_FID, &form_id))
process_form_int(tag, form_id);
break;
case HTML_TEXTAREA_INT:
if (parsedtag_get_value(tag, ATTR_TEXTAREANUMBER,
&n_textarea)
&& n_textarea >= 0 && n_textarea < max_textarea) {
textarea_str[n_textarea] = Strnew();
}
else
n_textarea = -1;
break;
case HTML_N_TEXTAREA_INT:
if (n_textarea >= 0) {
FormItemList *item =
(FormItemList *)a_textarea[n_textarea]->url;
item->init_value = item->value =
textarea_str[n_textarea];
}
break;
#ifdef MENU_SELECT
case HTML_SELECT_INT:
if (parsedtag_get_value(tag, ATTR_SELECTNUMBER, &n_select)
&& n_select >= 0 && n_select < max_select) {
select_option[n_select].first = NULL;
select_option[n_select].last = NULL;
}
else
n_select = -1;
break;
case HTML_N_SELECT_INT:
if (n_select >= 0) {
FormItemList *item =
(FormItemList *)a_select[n_select]->url;
item->select_option = select_option[n_select].first;
chooseSelectOption(item, item->select_option);
item->init_selected = item->selected;
item->init_value = item->value;
item->init_label = item->label;
}
break;
case HTML_OPTION_INT:
if (n_select >= 0) {
int selected;
q = "";
parsedtag_get_value(tag, ATTR_LABEL, &q);
p = q;
parsedtag_get_value(tag, ATTR_VALUE, &p);
selected = parsedtag_exists(tag, ATTR_SELECTED);
addSelectOption(&select_option[n_select],
Strnew_charp(p), Strnew_charp(q),
selected);
}
break;
#endif
case HTML_TITLE_ALT:
if (parsedtag_get_value(tag, ATTR_TITLE, &p))
buf->buffername = html_unquote(p);
break;
case HTML_SYMBOL:
effect |= PC_SYMBOL;
if (parsedtag_get_value(tag, ATTR_TYPE, &p))
symbol = (char)atoi(p);
break;
case HTML_N_SYMBOL:
effect &= ~PC_SYMBOL;
break;
}
#ifdef ID_EXT
id = NULL;
if (parsedtag_get_value(tag, ATTR_ID, &id)) {
id = url_quote_conv(id, name_charset);
registerName(buf, id, currentLn(buf), pos);
}
if (renderFrameSet &&
parsedtag_get_value(tag, ATTR_FRAMENAME, &p)) {
p = url_quote_conv(p, buf->document_charset);
if (!idFrame || strcmp(idFrame->body->name, p)) {
idFrame = search_frame(renderFrameSet, p);
if (idFrame && idFrame->body->attr != F_BODY)
idFrame = NULL;
}
}
if (id && idFrame)
idFrame->body->nameList =
putAnchor(idFrame->body->nameList, id, NULL,
(Anchor **)NULL, NULL, NULL, '\0',
currentLn(buf), pos);
#endif /* ID_EXT */
}
}
/* end of processing for one line */
if (!internal)
addnewline(buf, outc, outp, NULL, pos, -1, nlines);
if (internal == HTML_N_INTERNAL)
internal = 0;
if (str != endp) {
line = Strsubstr(line, str - line->ptr, endp - str);
goto proc_again;
}
}
#ifdef DEBUG
if (w3m_debug)
fclose(debug);
#endif
for (form_id = 1; form_id <= form_max; form_id++)
if (forms[form_id])
forms[form_id]->next = forms[form_id - 1];
buf->formlist = (form_max >= 0) ? forms[form_max] : NULL;
if (n_textarea)
addMultirowsForm(buf, buf->formitem);
#ifdef USE_IMAGE
addMultirowsImg(buf, buf->img);
#endif
}
static void
addLink(Buffer *buf, struct parsed_tag *tag)
{
char *href = NULL, *title = NULL, *ctype = NULL, *rel = NULL, *rev = NULL;
char type = LINK_TYPE_NONE;
LinkList *l;
parsedtag_get_value(tag, ATTR_HREF, &href);
if (href)
href = url_encode(remove_space(href), baseURL(buf),
buf->document_charset);
parsedtag_get_value(tag, ATTR_TITLE, &title);
parsedtag_get_value(tag, ATTR_TYPE, &ctype);
parsedtag_get_value(tag, ATTR_REL, &rel);
if (rel != NULL) {
/* forward link type */
type = LINK_TYPE_REL;
if (title == NULL)
title = rel;
}
parsedtag_get_value(tag, ATTR_REV, &rev);
if (rev != NULL) {
/* reverse link type */
type = LINK_TYPE_REV;
if (title == NULL)
title = rev;
}
l = New(LinkList);
l->url = href;
l->title = title;
l->ctype = ctype;
l->type = type;
l->next = NULL;
if (buf->linklist) {
LinkList *i;
for (i = buf->linklist; i->next; i = i->next) ;
i->next = l;
}
else
buf->linklist = l;
}
void
HTMLlineproc2(Buffer *buf, TextLineList *tl)
{
_tl_lp2 = tl->first;
HTMLlineproc2body(buf, textlist_feed, -1);
}
static InputStream _file_lp2;
static Str
file_feed()
{
Str s;
s = StrISgets(_file_lp2);
if (s->length == 0) {
ISclose(_file_lp2);
return NULL;
}
return s;
}
void
HTMLlineproc3(Buffer *buf, InputStream stream)
{
_file_lp2 = stream;
HTMLlineproc2body(buf, file_feed, -1);
}
static void
proc_escape(struct readbuffer *obuf, char **str_return)
{
char *str = *str_return, *estr;
int ech = getescapechar(str_return);
int width, n_add = *str_return - str;
Lineprop mode = PC_ASCII;
if (ech < 0) {
*str_return = str;
proc_mchar(obuf, obuf->flag & RB_SPECIAL, 1, str_return, PC_ASCII);
return;
}
mode = IS_CNTRL(ech) ? PC_CTRL : PC_ASCII;
estr = conv_entity(ech);
check_breakpoint(obuf, obuf->flag & RB_SPECIAL, estr);
width = get_strwidth(estr);
if (width == 1 && ech == (unsigned char)*estr &&
ech != '&' && ech != '<' && ech != '>') {
if (IS_CNTRL(ech))
mode = PC_CTRL;
push_charp(obuf, width, estr, mode);
}
else
push_nchars(obuf, width, str, n_add, mode);
set_prevchar(obuf->prevchar, estr, strlen(estr));
obuf->prev_ctype = mode;
}
static int
need_flushline(struct html_feed_environ *h_env, struct readbuffer *obuf,
Lineprop mode)
{
char ch;
if (obuf->flag & RB_PRE_INT) {
if (obuf->pos > h_env->limit)
return 1;
else
return 0;
}
ch = Strlastchar(obuf->line);
/* if (ch == ' ' && obuf->tag_sp > 0) */
if (ch == ' ')
return 0;
if (obuf->pos > h_env->limit)
return 1;
return 0;
}
static int
table_width(struct html_feed_environ *h_env, int table_level)
{
int width;
if (table_level < 0)
return 0;
width = tables[table_level]->total_width;
if (table_level > 0 || width > 0)
return width;
return h_env->limit - h_env->envs[h_env->envc].indent;
}
/* HTML processing first pass */
void
HTMLlineproc0(char *line, struct html_feed_environ *h_env, int internal)
{
Lineprop mode;
int cmd;
struct readbuffer *obuf = h_env->obuf;
int indent, delta;
struct parsed_tag *tag;
Str tokbuf;
struct table *tbl = NULL;
struct table_mode *tbl_mode = NULL;
int tbl_width = 0;
#ifdef USE_M17N
int is_hangul, prev_is_hangul = 0;
#endif
#ifdef DEBUG
if (w3m_debug) {
FILE *f = fopen("zzzproc1", "a");
fprintf(f, "%c%c%c%c",
(obuf->flag & RB_PREMODE) ? 'P' : ' ',
(obuf->table_level >= 0) ? 'T' : ' ',
(obuf->flag & RB_INTXTA) ? 'X' : ' ',
(obuf->flag & (RB_SCRIPT | RB_STYLE)) ? 'S' : ' ');
fprintf(f, "HTMLlineproc1(\"%s\",%d,%lx)\n", line, h_env->limit,
(unsigned long)h_env);
fclose(f);
}
#endif
tokbuf = Strnew();
table_start:
if (obuf->table_level >= 0) {
int level = min(obuf->table_level, MAX_TABLE - 1);
tbl = tables[level];
tbl_mode = &table_mode[level];
tbl_width = table_width(h_env, level);
}
while (*line != '\0') {
char *str, *p;
int is_tag = FALSE;
int pre_mode = (obuf->table_level >= 0) ? tbl_mode->pre_mode :
obuf->flag;
int end_tag = (obuf->table_level >= 0) ? tbl_mode->end_tag :
obuf->end_tag;
if (*line == '<' || obuf->status != R_ST_NORMAL) {
/*
* Tag processing
*/
if (obuf->status == R_ST_EOL)
obuf->status = R_ST_NORMAL;
else {
read_token(h_env->tagbuf, &line, &obuf->status,
pre_mode & RB_PREMODE, obuf->status != R_ST_NORMAL);
if (obuf->status != R_ST_NORMAL)
return;
}
if (h_env->tagbuf->length == 0)
continue;
str = h_env->tagbuf->ptr;
if (*str == '<') {
if (str[1] && REALLY_THE_BEGINNING_OF_A_TAG(str))
is_tag = TRUE;
else if (!(pre_mode & (RB_PLAIN | RB_INTXTA | RB_INSELECT |
RB_SCRIPT | RB_STYLE | RB_TITLE))) {
line = Strnew_m_charp(str + 1, line, NULL)->ptr;
str = "<";
}
}
}
else {
read_token(tokbuf, &line, &obuf->status, pre_mode & RB_PREMODE, 0);
if (obuf->status != R_ST_NORMAL) /* R_ST_AMP ? */
obuf->status = R_ST_NORMAL;
str = tokbuf->ptr;
}
if (pre_mode & (RB_PLAIN | RB_INTXTA | RB_INSELECT | RB_SCRIPT |
RB_STYLE | RB_TITLE)) {
if (is_tag) {
p = str;
if ((tag = parse_tag(&p, internal))) {
if (tag->tagid == end_tag ||
(pre_mode & RB_INSELECT && tag->tagid == HTML_N_FORM)
|| (pre_mode & RB_TITLE
&& (tag->tagid == HTML_N_HEAD
|| tag->tagid == HTML_BODY)))
goto proc_normal;
}
}
/* title */
if (pre_mode & RB_TITLE) {
feed_title(str);
continue;
}
/* select */
if (pre_mode & RB_INSELECT) {
if (obuf->table_level >= 0)
goto proc_normal;
feed_select(str);
continue;
}
if (is_tag) {
if (strncmp(str, "<!--", 4) && (p = strchr(str + 1, '<'))) {
str = Strnew_charp_n(str, p - str)->ptr;
line = Strnew_m_charp(p, line, NULL)->ptr;
}
is_tag = FALSE;
}
if (obuf->table_level >= 0)
goto proc_normal;
/* textarea */
if (pre_mode & RB_INTXTA) {
feed_textarea(str);
continue;
}
/* script */
if (pre_mode & RB_SCRIPT)
continue;
/* style */
if (pre_mode & RB_STYLE)
continue;
}
proc_normal:
if (obuf->table_level >= 0) {
/*
* within table: in <table>..</table>, all input tokens
* are fed to the table renderer, and then the renderer
* makes HTML output.
*/
switch (feed_table(tbl, str, tbl_mode, tbl_width, internal)) {
case 0:
/* </table> tag */
obuf->table_level--;
if (obuf->table_level >= MAX_TABLE - 1)
continue;
end_table(tbl);
if (obuf->table_level >= 0) {
struct table *tbl0 = tables[obuf->table_level];
str = Sprintf("<table_alt tid=%d>", tbl0->ntable)->ptr;
pushTable(tbl0, tbl);
tbl = tbl0;
tbl_mode = &table_mode[obuf->table_level];
tbl_width = table_width(h_env, obuf->table_level);
feed_table(tbl, str, tbl_mode, tbl_width, TRUE);
continue;
/* continue to the next */
}
if (obuf->flag & RB_DEL)
continue;
/* all tables have been read */
if (tbl->vspace > 0 && !(obuf->flag & RB_IGNORE_P)) {
int indent = h_env->envs[h_env->envc].indent;
flushline(h_env, obuf, indent, 0, h_env->limit);
do_blankline(h_env, obuf, indent, 0, h_env->limit);
}
save_fonteffect(h_env, obuf);
renderTable(tbl, tbl_width, h_env);
restore_fonteffect(h_env, obuf);
obuf->flag &= ~RB_IGNORE_P;
if (tbl->vspace > 0) {
int indent = h_env->envs[h_env->envc].indent;
do_blankline(h_env, obuf, indent, 0, h_env->limit);
obuf->flag |= RB_IGNORE_P;
}
set_space_to_prevchar(obuf->prevchar);
continue;
case 1:
/* <table> tag */
break;
default:
continue;
}
}
if (is_tag) {
/*** Beginning of a new tag ***/
if ((tag = parse_tag(&str, internal)))
cmd = tag->tagid;
else
continue;
/* process tags */
if (HTMLtagproc1(tag, h_env) == 0) {
/* preserve the tag for second-stage processing */
if (parsedtag_need_reconstruct(tag))
h_env->tagbuf = parsedtag2str(tag);
push_tag(obuf, h_env->tagbuf->ptr, cmd);
}
#ifdef ID_EXT
else {
process_idattr(obuf, cmd, tag);
}
#endif /* ID_EXT */
obuf->bp.init_flag = 1;
clear_ignore_p_flag(cmd, obuf);
if (cmd == HTML_TABLE)
goto table_start;
else
continue;
}
if (obuf->flag & (RB_DEL | RB_S))
continue;
while (*str) {
mode = get_mctype(str);
delta = get_mcwidth(str);
if (obuf->flag & (RB_SPECIAL & ~RB_NOBR)) {
char ch = *str;
if (!(obuf->flag & RB_PLAIN) && (*str == '&')) {
char *p = str;
int ech = getescapechar(&p);
if (ech == '\n' || ech == '\r') {
ch = '\n';
str = p - 1;
}
else if (ech == '\t') {
ch = '\t';
str = p - 1;
}
}
if (ch != '\n')
obuf->flag &= ~RB_IGNORE_P;
if (ch == '\n') {
str++;
if (obuf->flag & RB_IGNORE_P) {
obuf->flag &= ~RB_IGNORE_P;
continue;
}
if (obuf->flag & RB_PRE_INT)
PUSH(' ');
else
flushline(h_env, obuf, h_env->envs[h_env->envc].indent,
1, h_env->limit);
}
else if (ch == '\t') {
do {
PUSH(' ');
} while ((h_env->envs[h_env->envc].indent + obuf->pos)
% Tabstop != 0);
str++;
}
else if (obuf->flag & RB_PLAIN) {
char *p = html_quote_char(*str);
if (p) {
push_charp(obuf, 1, p, PC_ASCII);
str++;
}
else {
proc_mchar(obuf, 1, delta, &str, mode);
}
}
else {
if (*str == '&')
proc_escape(obuf, &str);
else
proc_mchar(obuf, 1, delta, &str, mode);
}
if (obuf->flag & (RB_SPECIAL & ~RB_PRE_INT))
continue;
}
else {
if (!IS_SPACE(*str))
obuf->flag &= ~RB_IGNORE_P;
if ((mode == PC_ASCII || mode == PC_CTRL) && IS_SPACE(*str)) {
if (*obuf->prevchar->ptr != ' ') {
PUSH(' ');
}
str++;
}
else {
#ifdef USE_M17N
if (mode == PC_KANJI1)
is_hangul = wtf_is_hangul((wc_uchar *) str);
else
is_hangul = 0;
if (!SimplePreserveSpace && mode == PC_KANJI1 &&
!is_hangul && !prev_is_hangul &&
obuf->pos > h_env->envs[h_env->envc].indent &&
Strlastchar(obuf->line) == ' ') {
while (obuf->line->length >= 2 &&
!strncmp(obuf->line->ptr + obuf->line->length -
2, " ", 2)
&& obuf->pos >= h_env->envs[h_env->envc].indent) {
Strshrink(obuf->line, 1);
obuf->pos--;
}
if (obuf->line->length >= 3 &&
obuf->prev_ctype == PC_KANJI1 &&
Strlastchar(obuf->line) == ' ' &&
obuf->pos >= h_env->envs[h_env->envc].indent) {
Strshrink(obuf->line, 1);
obuf->pos--;
}
}
prev_is_hangul = is_hangul;
#endif
if (*str == '&')
proc_escape(obuf, &str);
else
proc_mchar(obuf, obuf->flag & RB_SPECIAL, delta, &str,
mode);
}
}
if (need_flushline(h_env, obuf, mode)) {
char *bp = obuf->line->ptr + obuf->bp.len;
char *tp = bp - obuf->bp.tlen;
int i = 0;
if (tp > obuf->line->ptr && tp[-1] == ' ')
i = 1;
indent = h_env->envs[h_env->envc].indent;
if (obuf->bp.pos - i > indent) {
Str line;
append_tags(obuf);
line = Strnew_charp(bp);
Strshrink(obuf->line, obuf->line->length - obuf->bp.len);
#ifdef FORMAT_NICE
if (obuf->pos - i > h_env->limit)
obuf->flag |= RB_FILL;
#endif /* FORMAT_NICE */
back_to_breakpoint(obuf);
flushline(h_env, obuf, indent, 0, h_env->limit);
#ifdef FORMAT_NICE
obuf->flag &= ~RB_FILL;
#endif /* FORMAT_NICE */
HTMLlineproc1(line->ptr, h_env);
}
}
}
}
if (!(obuf->flag & (RB_SPECIAL | RB_INTXTA | RB_INSELECT))) {
char *tp;
int i = 0;
if (obuf->bp.pos == obuf->pos) {
tp = &obuf->line->ptr[obuf->bp.len - obuf->bp.tlen];
}
else {
tp = &obuf->line->ptr[obuf->line->length];
}
if (tp > obuf->line->ptr && tp[-1] == ' ')
i = 1;
indent = h_env->envs[h_env->envc].indent;
if (obuf->pos - i > h_env->limit) {
#ifdef FORMAT_NICE
obuf->flag |= RB_FILL;
#endif /* FORMAT_NICE */
flushline(h_env, obuf, indent, 0, h_env->limit);
#ifdef FORMAT_NICE
obuf->flag &= ~RB_FILL;
#endif /* FORMAT_NICE */
}
}
}
extern char *NullLine;
extern Lineprop NullProp[];
#ifndef USE_ANSI_COLOR
#define addnewline2(a,b,c,d,e,f) _addnewline2(a,b,c,e,f)
#endif
static void
addnewline2(Buffer *buf, char *line, Lineprop *prop, Linecolor *color, int pos,
int nlines)
{
Line *l;
l = New(Line);
l->next = NULL;
l->lineBuf = line;
l->propBuf = prop;
#ifdef USE_ANSI_COLOR
l->colorBuf = color;
#endif
l->len = pos;
l->width = -1;
l->size = pos;
l->bpos = 0;
l->bwidth = 0;
l->prev = buf->currentLine;
if (buf->currentLine) {
l->next = buf->currentLine->next;
buf->currentLine->next = l;
}
else
l->next = NULL;
if (buf->lastLine == NULL || buf->lastLine == buf->currentLine)
buf->lastLine = l;
buf->currentLine = l;
if (buf->firstLine == NULL)
buf->firstLine = l;
l->linenumber = ++buf->allLine;
if (nlines < 0) {
/* l->real_linenumber = l->linenumber; */
l->real_linenumber = 0;
}
else {
l->real_linenumber = nlines;
}
l = NULL;
}
static void
addnewline(Buffer *buf, char *line, Lineprop *prop, Linecolor *color, int pos,
int width, int nlines)
{
char *s;
Lineprop *p;
#ifdef USE_ANSI_COLOR
Linecolor *c;
#endif
Line *l;
int i, bpos, bwidth;
if (pos > 0) {
s = allocStr(line, pos);
p = NewAtom_N(Lineprop, pos);
bcopy((void *)prop, (void *)p, pos * sizeof(Lineprop));
}
else {
s = NullLine;
p = NullProp;
}
#ifdef USE_ANSI_COLOR
if (pos > 0 && color) {
c = NewAtom_N(Linecolor, pos);
bcopy((void *)color, (void *)c, pos * sizeof(Linecolor));
}
else {
c = NULL;
}
#endif
addnewline2(buf, s, p, c, pos, nlines);
if (pos <= 0 || width <= 0)
return;
bpos = 0;
bwidth = 0;
while (1) {
l = buf->currentLine;
l->bpos = bpos;
l->bwidth = bwidth;
i = columnLen(l, width);
if (i == 0) {
i++;
#ifdef USE_M17N
while (i < l->len && p[i] & PC_WCHAR2)
i++;
#endif
}
l->len = i;
l->width = COLPOS(l, l->len);
if (pos <= i)
return;
bpos += l->len;
bwidth += l->width;
s += i;
p += i;
#ifdef USE_ANSI_COLOR
if (c)
c += i;
#endif
pos -= i;
addnewline2(buf, s, p, c, pos, nlines);
}
}
/*
* loadHTMLBuffer: read file and make new buffer
*/
Buffer *
loadHTMLBuffer(URLFile *f, Buffer *newBuf)
{
FILE *src = NULL;
Str tmp;
if (newBuf == NULL)
newBuf = newBuffer(INIT_BUFFER_WIDTH);
if (newBuf->sourcefile == NULL &&
(f->scheme != SCM_LOCAL || newBuf->mailcap)) {
tmp = tmpfname(TMPF_SRC, ".html");
src = fopen(tmp->ptr, "w");
if (src)
newBuf->sourcefile = tmp->ptr;
}
loadHTMLstream(f, newBuf, src, newBuf->bufferprop & BP_FRAME);
newBuf->topLine = newBuf->firstLine;
newBuf->lastLine = newBuf->currentLine;
newBuf->currentLine = newBuf->firstLine;
if (n_textarea)
formResetBuffer(newBuf, newBuf->formitem);
if (src)
fclose(src);
return newBuf;
}
static char *_size_unit[] = { "b", "kb", "Mb", "Gb", "Tb",
"Pb", "Eb", "Zb", "Bb", "Yb", NULL
};
char *
convert_size(clen_t size, int usefloat)
{
float csize;
int sizepos = 0;
char **sizes = _size_unit;
csize = (float)size;
while (csize >= 999.495 && sizes[sizepos + 1]) {
csize = csize / 1024.0;
sizepos++;
}
return Sprintf(usefloat ? "%.3g%s" : "%.0f%s",
floor(csize * 100.0 + 0.5) / 100.0, sizes[sizepos])->ptr;
}
char *
convert_size2(clen_t size1, clen_t size2, int usefloat)
{
char **sizes = _size_unit;
float csize, factor = 1;
int sizepos = 0;
csize = (float)((size1 > size2) ? size1 : size2);
while (csize / factor >= 999.495 && sizes[sizepos + 1]) {
factor *= 1024.0;
sizepos++;
}
return Sprintf(usefloat ? "%.3g/%.3g%s" : "%.0f/%.0f%s",
floor(size1 / factor * 100.0 + 0.5) / 100.0,
floor(size2 / factor * 100.0 + 0.5) / 100.0,
sizes[sizepos])->ptr;
}
void
showProgress(clen_t * linelen, clen_t * trbyte)
{
int i, j, rate, duration, eta, pos;
static time_t last_time, start_time;
time_t cur_time;
Str messages;
char *fmtrbyte, *fmrate;
if (!fmInitialized)
return;
if (*linelen < 1024)
return;
if (current_content_length > 0) {
double ratio;
cur_time = time(0);
if (*trbyte == 0) {
move(LASTLINE, 0);
clrtoeolx();
start_time = cur_time;
}
*trbyte += *linelen;
*linelen = 0;
if (cur_time == last_time)
return;
last_time = cur_time;
move(LASTLINE, 0);
ratio = 100.0 * (*trbyte) / current_content_length;
fmtrbyte = convert_size2(*trbyte, current_content_length, 1);
duration = cur_time - start_time;
if (duration) {
rate = *trbyte / duration;
fmrate = convert_size(rate, 1);
eta = rate ? (current_content_length - *trbyte) / rate : -1;
messages = Sprintf("%11s %3.0f%% "
"%7s/s "
"eta %02d:%02d:%02d ",
fmtrbyte, ratio,
fmrate,
eta / (60 * 60), (eta / 60) % 60, eta % 60);
}
else {
messages = Sprintf("%11s %3.0f%% ",
fmtrbyte, ratio);
}
addstr(messages->ptr);
pos = 42;
i = pos + (COLS - pos - 1) * (*trbyte) / current_content_length;
move(LASTLINE, pos);
standout();
addch(' ');
for (j = pos + 1; j <= i; j++)
addch('|');
standend();
/* no_clrtoeol(); */
refresh();
}
else {
cur_time = time(0);
if (*trbyte == 0) {
move(LASTLINE, 0);
clrtoeolx();
start_time = cur_time;
}
*trbyte += *linelen;
*linelen = 0;
if (cur_time == last_time)
return;
last_time = cur_time;
move(LASTLINE, 0);
fmtrbyte = convert_size(*trbyte, 1);
duration = cur_time - start_time;
if (duration) {
fmrate = convert_size(*trbyte / duration, 1);
messages = Sprintf("%7s loaded %7s/s", fmtrbyte, fmrate);
}
else {
messages = Sprintf("%7s loaded", fmtrbyte);
}
message(messages->ptr, 0, 0);
refresh();
}
}
void
init_henv(struct html_feed_environ *h_env, struct readbuffer *obuf,
struct environment *envs, int nenv, TextLineList *buf,
int limit, int indent)
{
envs[0].indent = indent;
obuf->line = Strnew();
obuf->cprop = 0;
obuf->pos = 0;
obuf->prevchar = Strnew_size(8);
set_space_to_prevchar(obuf->prevchar);
obuf->flag = RB_IGNORE_P;
obuf->flag_sp = 0;
obuf->status = R_ST_NORMAL;
obuf->table_level = -1;
obuf->nobr_level = 0;
bzero((void *)&obuf->anchor, sizeof(obuf->anchor));
obuf->img_alt = 0;
obuf->input_alt.hseq = 0;
obuf->input_alt.fid = -1;
obuf->input_alt.in = 0;
obuf->input_alt.type = NULL;
obuf->input_alt.name = NULL;
obuf->input_alt.value = NULL;
obuf->in_bold = 0;
obuf->in_italic = 0;
obuf->in_under = 0;
obuf->in_strike = 0;
obuf->in_ins = 0;
obuf->prev_ctype = PC_ASCII;
obuf->tag_sp = 0;
obuf->fontstat_sp = 0;
obuf->top_margin = 0;
obuf->bottom_margin = 0;
obuf->bp.init_flag = 1;
set_breakpoint(obuf, 0);
h_env->buf = buf;
h_env->f = NULL;
h_env->obuf = obuf;
h_env->tagbuf = Strnew();
h_env->limit = limit;
h_env->maxlimit = 0;
h_env->envs = envs;
h_env->nenv = nenv;
h_env->envc = 0;
h_env->envc_real = 0;
h_env->title = NULL;
h_env->blank_lines = 0;
}
void
completeHTMLstream(struct html_feed_environ *h_env, struct readbuffer *obuf)
{
close_anchor(h_env, obuf);
if (obuf->img_alt) {
push_tag(obuf, "</img_alt>", HTML_N_IMG_ALT);
obuf->img_alt = NULL;
}
if (obuf->input_alt.in) {
push_tag(obuf, "</input_alt>", HTML_N_INPUT_ALT);
obuf->input_alt.hseq = 0;
obuf->input_alt.fid = -1;
obuf->input_alt.in = 0;
obuf->input_alt.type = NULL;
obuf->input_alt.name = NULL;
obuf->input_alt.value = NULL;
}
if (obuf->in_bold) {
push_tag(obuf, "</b>", HTML_N_B);
obuf->in_bold = 0;
}
if (obuf->in_italic) {
push_tag(obuf, "</i>", HTML_N_I);
obuf->in_italic = 0;
}
if (obuf->in_under) {
push_tag(obuf, "</u>", HTML_N_U);
obuf->in_under = 0;
}
if (obuf->in_strike) {
push_tag(obuf, "</s>", HTML_N_S);
obuf->in_strike = 0;
}
if (obuf->in_ins) {
push_tag(obuf, "</ins>", HTML_N_INS);
obuf->in_ins = 0;
}
if (obuf->flag & RB_INTXTA)
HTMLlineproc1("</textarea>", h_env);
/* for unbalanced select tag */
if (obuf->flag & RB_INSELECT)
HTMLlineproc1("</select>", h_env);
if (obuf->flag & RB_TITLE)
HTMLlineproc1("</title>", h_env);
/* for unbalanced table tag */
if (obuf->table_level >= MAX_TABLE)
obuf->table_level = MAX_TABLE - 1;
while (obuf->table_level >= 0) {
table_mode[obuf->table_level].pre_mode
&= ~(TBLM_SCRIPT | TBLM_STYLE | TBLM_PLAIN);
HTMLlineproc1("</table>", h_env);
}
}
static void
print_internal_information(struct html_feed_environ *henv)
{
int i;
Str s;
TextLineList *tl = newTextLineList();
s = Strnew_charp("<internal>");
pushTextLine(tl, newTextLine(s, 0));
if (henv->title) {
s = Strnew_m_charp("<title_alt title=\"",
html_quote(henv->title), "\">", NULL);
pushTextLine(tl, newTextLine(s, 0));
}
#if 0
if (form_max >= 0) {
FormList *fp;
for (i = 0; i <= form_max; i++) {
fp = forms[i];
s = Sprintf("<form_int fid=\"%d\" action=\"%s\" method=\"%s\"",
i, html_quote(fp->action->ptr),
(fp->method == FORM_METHOD_POST) ? "post"
: ((fp->method ==
FORM_METHOD_INTERNAL) ? "internal" : "get"));
if (fp->target)
Strcat(s, Sprintf(" target=\"%s\"", html_quote(fp->target)));
if (fp->enctype == FORM_ENCTYPE_MULTIPART)
Strcat_charp(s, " enctype=\"multipart/form-data\"");
#ifdef USE_M17N
if (fp->charset)
Strcat(s, Sprintf(" accept-charset=\"%s\"",
html_quote(fp->charset)));
#endif
Strcat_charp(s, ">");
pushTextLine(tl, newTextLine(s, 0));
}
}
#endif
#ifdef MENU_SELECT
if (n_select > 0) {
FormSelectOptionItem *ip;
for (i = 0; i < n_select; i++) {
s = Sprintf("<select_int selectnumber=%d>", i);
pushTextLine(tl, newTextLine(s, 0));
for (ip = select_option[i].first; ip; ip = ip->next) {
s = Sprintf("<option_int value=\"%s\" label=\"%s\"%s>",
html_quote(ip->value ? ip->value->ptr :
ip->label->ptr),
html_quote(ip->label->ptr),
ip->checked ? " selected" : "");
pushTextLine(tl, newTextLine(s, 0));
}
s = Strnew_charp("</select_int>");
pushTextLine(tl, newTextLine(s, 0));
}
}
#endif /* MENU_SELECT */
if (n_textarea > 0) {
for (i = 0; i < n_textarea; i++) {
s = Sprintf("<textarea_int textareanumber=%d>", i);
pushTextLine(tl, newTextLine(s, 0));
s = Strnew_charp(html_quote(textarea_str[i]->ptr));
Strcat_charp(s, "</textarea_int>");
pushTextLine(tl, newTextLine(s, 0));
}
}
s = Strnew_charp("</internal>");
pushTextLine(tl, newTextLine(s, 0));
if (henv->buf)
appendTextLineList(henv->buf, tl);
else if (henv->f) {
TextLineListItem *p;
for (p = tl->first; p; p = p->next)
fprintf(henv->f, "%s\n", Str_conv_to_halfdump(p->ptr->line)->ptr);
}
}
void
loadHTMLstream(URLFile *f, Buffer *newBuf, FILE * src, int internal)
{
struct environment envs[MAX_ENV_LEVEL];
clen_t linelen = 0;
clen_t trbyte = 0;
Str lineBuf2 = Strnew();
#ifdef USE_M17N
wc_ces charset = WC_CES_US_ASCII;
wc_ces volatile doc_charset = DocumentCharset;
#endif
struct html_feed_environ htmlenv1;
struct readbuffer obuf;
#ifdef USE_IMAGE
int volatile image_flag;
#endif
MySignalHandler(*volatile prevtrap) (SIGNAL_ARG) = NULL;
#ifdef USE_M17N
if (fmInitialized && graph_ok()) {
symbol_width = symbol_width0 = 1;
}
else {
symbol_width0 = 0;
get_symbol(DisplayCharset, &symbol_width0);
symbol_width = WcOption.use_wide ? symbol_width0 : 1;
}
#else
symbol_width = symbol_width0 = 1;
#endif
cur_title = NULL;
n_textarea = 0;
cur_textarea = NULL;
max_textarea = MAX_TEXTAREA;
textarea_str = New_N(Str, max_textarea);
#ifdef MENU_SELECT
n_select = 0;
max_select = MAX_SELECT;
select_option = New_N(FormSelectOption, max_select);
#endif /* MENU_SELECT */
cur_select = NULL;
form_sp = -1;
form_max = -1;
forms_size = 0;
forms = NULL;
cur_hseq = 1;
#ifdef USE_IMAGE
cur_iseq = 1;
if (newBuf->image_flag)
image_flag = newBuf->image_flag;
else if (activeImage && displayImage && autoImage)
image_flag = IMG_FLAG_AUTO;
else
image_flag = IMG_FLAG_SKIP;
#endif
if (w3m_halfload) {
newBuf->buffername = "---";
#ifdef USE_M17N
newBuf->document_charset = InnerCharset;
#endif
max_textarea = 0;
#ifdef MENU_SELECT
max_select = 0;
#endif
HTMLlineproc3(newBuf, f->stream);
w3m_halfload = FALSE;
return;
}
init_henv(&htmlenv1, &obuf, envs, MAX_ENV_LEVEL, NULL, newBuf->width, 0);
if (w3m_halfdump)
htmlenv1.f = stdout;
else
htmlenv1.buf = newTextLineList();
#if defined(USE_M17N) || defined(USE_IMAGE)
cur_baseURL = baseURL(newBuf);
#endif
if (SETJMP(AbortLoading) != 0) {
HTMLlineproc1("<br>Transfer Interrupted!<br>", &htmlenv1);
goto phase2;
}
TRAP_ON;
#ifdef USE_M17N
if (newBuf != NULL) {
if (newBuf->bufferprop & BP_FRAME)
charset = InnerCharset;
else if (newBuf->document_charset)
charset = doc_charset = newBuf->document_charset;
}
if (content_charset && UseContentCharset)
doc_charset = content_charset;
else if (f->guess_type && !strcasecmp(f->guess_type, "application/xhtml+xml"))
doc_charset = WC_CES_UTF_8;
meta_charset = 0;
#endif
#if 0
do_blankline(&htmlenv1, &obuf, 0, 0, htmlenv1.limit);
obuf.flag = RB_IGNORE_P;
#endif
if (IStype(f->stream) != IST_ENCODED)
f->stream = newEncodedStream(f->stream, f->encoding);
while ((lineBuf2 = StrmyUFgets(f))->length) {
#ifdef USE_NNTP
if (f->scheme == SCM_NEWS && lineBuf2->ptr[0] == '.') {
Strshrinkfirst(lineBuf2, 1);
if (lineBuf2->ptr[0] == '\n' || lineBuf2->ptr[0] == '\r' ||
lineBuf2->ptr[0] == '\0') {
/*
* iseos(f->stream) = TRUE;
*/
break;
}
}
#endif /* USE_NNTP */
if (src)
Strfputs(lineBuf2, src);
linelen += lineBuf2->length;
if (w3m_dump & DUMP_EXTRA)
printf("W3m-in-progress: %s\n", convert_size2(linelen, current_content_length, TRUE));
if (w3m_dump & DUMP_SOURCE)
continue;
showProgress(&linelen, &trbyte);
/*
* if (frame_source)
* continue;
*/
#ifdef USE_M17N
if (meta_charset) { /* <META> */
if (content_charset == 0 && UseContentCharset) {
doc_charset = meta_charset;
charset = WC_CES_US_ASCII;
}
meta_charset = 0;
}
#endif
lineBuf2 = convertLine(f, lineBuf2, HTML_MODE, &charset, doc_charset);
#ifdef USE_M17N
cur_document_charset = charset;
#endif
HTMLlineproc0(lineBuf2->ptr, &htmlenv1, internal);
}
if (obuf.status != R_ST_NORMAL) {
HTMLlineproc0("\n", &htmlenv1, internal);
}
obuf.status = R_ST_NORMAL;
completeHTMLstream(&htmlenv1, &obuf);
flushline(&htmlenv1, &obuf, 0, 2, htmlenv1.limit);
#if defined(USE_M17N) || defined(USE_IMAGE)
cur_baseURL = NULL;
#endif
#ifdef USE_M17N
cur_document_charset = 0;
#endif
if (htmlenv1.title)
newBuf->buffername = htmlenv1.title;
if (w3m_halfdump) {
TRAP_OFF;
print_internal_information(&htmlenv1);
return;
}
if (w3m_backend) {
TRAP_OFF;
print_internal_information(&htmlenv1);
backend_halfdump_buf = htmlenv1.buf;
return;
}
phase2:
newBuf->trbyte = trbyte + linelen;
TRAP_OFF;
#ifdef USE_M17N
if (!(newBuf->bufferprop & BP_FRAME))
newBuf->document_charset = charset;
#endif
#ifdef USE_IMAGE
newBuf->image_flag = image_flag;
#endif
HTMLlineproc2(newBuf, htmlenv1.buf);
}
/*
* loadHTMLString: read string and make new buffer
*/
Buffer *
loadHTMLString(Str page)
{
URLFile f;
MySignalHandler(*volatile prevtrap) (SIGNAL_ARG) = NULL;
Buffer *newBuf;
init_stream(&f, SCM_LOCAL, newStrStream(page));
newBuf = newBuffer(INIT_BUFFER_WIDTH);
if (SETJMP(AbortLoading) != 0) {
TRAP_OFF;
discardBuffer(newBuf);
UFclose(&f);
return NULL;
}
TRAP_ON;
#ifdef USE_M17N
newBuf->document_charset = InnerCharset;
#endif
loadHTMLstream(&f, newBuf, NULL, TRUE);
#ifdef USE_M17N
newBuf->document_charset = WC_CES_US_ASCII;
#endif
TRAP_OFF;
UFclose(&f);
newBuf->topLine = newBuf->firstLine;
newBuf->lastLine = newBuf->currentLine;
newBuf->currentLine = newBuf->firstLine;
newBuf->type = "text/html";
newBuf->real_type = newBuf->type;
if (n_textarea)
formResetBuffer(newBuf, newBuf->formitem);
return newBuf;
}
#ifdef USE_GOPHER
/*
* loadGopherDir: get gopher directory
*/
Str
loadGopherDir(URLFile *uf, ParsedURL *pu, wc_ces * charset)
{
Str volatile tmp;
Str lbuf, name, file, host, port;
char *volatile p, *volatile q;
MySignalHandler(*volatile prevtrap) (SIGNAL_ARG) = NULL;
#ifdef USE_M17N
wc_ces doc_charset = DocumentCharset;
#endif
tmp = parsedURL2Str(pu);
p = html_quote(tmp->ptr);
tmp =
convertLine(NULL, Strnew_charp(file_unquote(tmp->ptr)), RAW_MODE,
charset, doc_charset);
q = html_quote(tmp->ptr);
tmp = Strnew_m_charp("<html>\n<head>\n<base href=\"", p, "\">\n<title>", q,
"</title>\n</head>\n<body>\n<h1>Index of ", q,
"</h1>\n<table>\n", NULL);
if (SETJMP(AbortLoading) != 0)
goto gopher_end;
TRAP_ON;
while (1) {
if (lbuf = StrUFgets(uf), lbuf->length == 0)
break;
if (lbuf->ptr[0] == '.' &&
(lbuf->ptr[1] == '\n' || lbuf->ptr[1] == '\r'))
break;
lbuf = convertLine(uf, lbuf, HTML_MODE, charset, doc_charset);
p = lbuf->ptr;
for (q = p; *q && *q != '\t'; q++) ;
name = Strnew_charp_n(p, q - p);
if (!*q)
continue;
p = q + 1;
for (q = p; *q && *q != '\t'; q++) ;
file = Strnew_charp_n(p, q - p);
if (!*q)
continue;
p = q + 1;
for (q = p; *q && *q != '\t'; q++) ;
host = Strnew_charp_n(p, q - p);
if (!*q)
continue;
p = q + 1;
for (q = p; *q && *q != '\t' && *q != '\r' && *q != '\n'; q++) ;
port = Strnew_charp_n(p, q - p);
switch (name->ptr[0]) {
case '0':
p = "[text file]";
break;
case '1':
p = "[directory]";
break;
case 'm':
p = "[message]";
break;
case 's':
p = "[sound]";
break;
case 'g':
p = "[gif]";
break;
case 'h':
p = "[HTML]";
break;
default:
p = "[unsupported]";
break;
}
q = Strnew_m_charp("gopher://", host->ptr, ":", port->ptr,
"/", file->ptr, NULL)->ptr;
Strcat_m_charp(tmp, "<a href=\"",
html_quote(url_encode(q, NULL, *charset)),
"\">", p, html_quote(name->ptr + 1), "</a>\n", NULL);
}
gopher_end:
TRAP_OFF;
Strcat_charp(tmp, "</table>\n</body>\n</html>\n");
return tmp;
}
#endif /* USE_GOPHER */
/*
* loadBuffer: read file and make new buffer
*/
Buffer *
loadBuffer(URLFile *uf, Buffer *volatile newBuf)
{
FILE *volatile src = NULL;
#ifdef USE_M17N
wc_ces charset = WC_CES_US_ASCII;
wc_ces volatile doc_charset = DocumentCharset;
#endif
Str lineBuf2;
volatile char pre_lbuf = '\0';
int nlines;
Str tmpf;
clen_t linelen = 0, trbyte = 0;
Lineprop *propBuffer = NULL;
#ifdef USE_ANSI_COLOR
Linecolor *colorBuffer = NULL;
#endif
MySignalHandler(*volatile prevtrap) (SIGNAL_ARG) = NULL;
if (newBuf == NULL)
newBuf = newBuffer(INIT_BUFFER_WIDTH);
if (SETJMP(AbortLoading) != 0) {
goto _end;
}
TRAP_ON;
if (newBuf->sourcefile == NULL &&
(uf->scheme != SCM_LOCAL || newBuf->mailcap)) {
tmpf = tmpfname(TMPF_SRC, NULL);
src = fopen(tmpf->ptr, "w");
if (src)
newBuf->sourcefile = tmpf->ptr;
}
#ifdef USE_M17N
if (newBuf->document_charset)
charset = doc_charset = newBuf->document_charset;
if (content_charset && UseContentCharset)
doc_charset = content_charset;
#endif
nlines = 0;
if (IStype(uf->stream) != IST_ENCODED)
uf->stream = newEncodedStream(uf->stream, uf->encoding);
while ((lineBuf2 = StrmyISgets(uf->stream))->length) {
#ifdef USE_NNTP
if (uf->scheme == SCM_NEWS && lineBuf2->ptr[0] == '.') {
Strshrinkfirst(lineBuf2, 1);
if (lineBuf2->ptr[0] == '\n' || lineBuf2->ptr[0] == '\r' ||
lineBuf2->ptr[0] == '\0') {
/*
* iseos(uf->stream) = TRUE;
*/
break;
}
}
#endif /* USE_NNTP */
if (src)
Strfputs(lineBuf2, src);
linelen += lineBuf2->length;
if (w3m_dump & DUMP_EXTRA)
printf("W3m-in-progress: %s\n", convert_size2(linelen, current_content_length, TRUE));
if (w3m_dump & DUMP_SOURCE)
continue;
showProgress(&linelen, &trbyte);
if (frame_source)
continue;
lineBuf2 =
convertLine(uf, lineBuf2, PAGER_MODE, &charset, doc_charset);
if (squeezeBlankLine) {
if (lineBuf2->ptr[0] == '\n' && pre_lbuf == '\n') {
++nlines;
continue;
}
pre_lbuf = lineBuf2->ptr[0];
}
++nlines;
Strchop(lineBuf2);
lineBuf2 = checkType(lineBuf2, &propBuffer, NULL);
addnewline(newBuf, lineBuf2->ptr, propBuffer, colorBuffer,
lineBuf2->length, FOLD_BUFFER_WIDTH, nlines);
}
_end:
TRAP_OFF;
newBuf->topLine = newBuf->firstLine;
newBuf->lastLine = newBuf->currentLine;
newBuf->currentLine = newBuf->firstLine;
newBuf->trbyte = trbyte + linelen;
#ifdef USE_M17N
newBuf->document_charset = charset;
#endif
if (src)
fclose(src);
return newBuf;
}
#ifdef USE_IMAGE
Buffer *
loadImageBuffer(URLFile *uf, Buffer *newBuf)
{
Image image;
ImageCache *cache;
Str tmp, tmpf;
FILE *src = NULL;
URLFile f;
MySignalHandler(*volatile prevtrap) (SIGNAL_ARG) = NULL;
struct stat st;
const ParsedURL *pu = newBuf ? &newBuf->currentURL : NULL;
loadImage(newBuf, IMG_FLAG_STOP);
image.url = uf->url;
image.ext = uf->ext;
image.width = -1;
image.height = -1;
image.cache = NULL;
cache = getImage(&image, (ParsedURL *)pu, IMG_FLAG_AUTO);
if (!(pu && pu->is_nocache) && cache->loaded & IMG_FLAG_LOADED &&
!stat(cache->file, &st))
goto image_buffer;
if (IStype(uf->stream) != IST_ENCODED)
uf->stream = newEncodedStream(uf->stream, uf->encoding);
TRAP_ON;
if (save2tmp(*uf, cache->file) < 0) {
TRAP_OFF;
return NULL;
}
TRAP_OFF;
cache->loaded = IMG_FLAG_LOADED;
cache->index = 0;
image_buffer:
if (newBuf == NULL)
newBuf = newBuffer(INIT_BUFFER_WIDTH);
cache->loaded |= IMG_FLAG_DONT_REMOVE;
if (newBuf->sourcefile == NULL && uf->scheme != SCM_LOCAL)
newBuf->sourcefile = cache->file;
tmp = Sprintf("<img src=\"%s\"><br><br>", html_quote(image.url));
tmpf = tmpfname(TMPF_SRC, ".html");
src = fopen(tmpf->ptr, "w");
newBuf->mailcap_source = tmpf->ptr;
init_stream(&f, SCM_LOCAL, newStrStream(tmp));
loadHTMLstream(&f, newBuf, src, TRUE);
UFclose(&f);
if (src)
fclose(src);
newBuf->topLine = newBuf->firstLine;
newBuf->lastLine = newBuf->currentLine;
newBuf->currentLine = newBuf->firstLine;
newBuf->image_flag = IMG_FLAG_AUTO;
return newBuf;
}
#endif
static Str
conv_symbol(Line *l)
{
Str tmp = NULL;
char *p = l->lineBuf, *ep = p + l->len;
Lineprop *pr = l->propBuf;
#ifdef USE_M17N
int w;
char **symbol = NULL;
#else
char **symbol = get_symbol();
#endif
for (; p < ep; p++, pr++) {
if (*pr & PC_SYMBOL) {
#ifdef USE_M17N
char c = ((char)wtf_get_code((wc_uchar *) p) & 0x7f) - SYMBOL_BASE;
int len = get_mclen(p);
#else
char c = *p - SYMBOL_BASE;
#endif
if (tmp == NULL) {
tmp = Strnew_size(l->len);
Strcopy_charp_n(tmp, l->lineBuf, p - l->lineBuf);
#ifdef USE_M17N
w = (*pr & PC_KANJI) ? 2 : 1;
symbol = get_symbol(DisplayCharset, &w);
#endif
}
Strcat_charp(tmp, symbol[(int)c]);
#ifdef USE_M17N
p += len - 1;
pr += len - 1;
#endif
}
else if (tmp != NULL)
Strcat_char(tmp, *p);
}
if (tmp)
return tmp;
else
return Strnew_charp_n(l->lineBuf, l->len);
}
/*
* saveBuffer: write buffer to file
*/
static void
_saveBuffer(Buffer *buf, Line *l, FILE * f, int cont)
{
Str tmp;
int is_html = FALSE;
#ifdef USE_M17N
int set_charset = !DisplayCharset;
wc_ces charset = DisplayCharset ? DisplayCharset : WC_CES_US_ASCII;
#endif
is_html = is_html_type(buf->type);
pager_next:
for (; l != NULL; l = l->next) {
if (is_html)
tmp = conv_symbol(l);
else
tmp = Strnew_charp_n(l->lineBuf, l->len);
tmp = wc_Str_conv(tmp, InnerCharset, charset);
Strfputs(tmp, f);
if (Strlastchar(tmp) != '\n' && !(cont && l->next && l->next->bpos))
putc('\n', f);
}
if (buf->pagerSource && !(buf->bufferprop & BP_CLOSE)) {
l = getNextPage(buf, PagerMax);
#ifdef USE_M17N
if (set_charset)
charset = buf->document_charset;
#endif
goto pager_next;
}
}
void
saveBuffer(Buffer *buf, FILE * f, int cont)
{
_saveBuffer(buf, buf->firstLine, f, cont);
}
void
saveBufferBody(Buffer *buf, FILE * f, int cont)
{
Line *l = buf->firstLine;
while (l != NULL && l->real_linenumber == 0)
l = l->next;
_saveBuffer(buf, l, f, cont);
}
static Buffer *
loadcmdout(char *cmd,
Buffer *(*loadproc) (URLFile *, Buffer *), Buffer *defaultbuf)
{
FILE *f, *popen(const char *, const char *);
Buffer *buf;
URLFile uf;
if (cmd == NULL || *cmd == '\0')
return NULL;
f = popen(cmd, "r");
if (f == NULL)
return NULL;
init_stream(&uf, SCM_UNKNOWN, newFileStream(f, (void (*)())pclose));
buf = loadproc(&uf, defaultbuf);
UFclose(&uf);
return buf;
}
/*
* getshell: execute shell command and get the result into a buffer
*/
Buffer *
getshell(char *cmd)
{
Buffer *buf;
buf = loadcmdout(cmd, loadBuffer, NULL);
if (buf == NULL)
return NULL;
buf->filename = cmd;
buf->buffername = Sprintf("%s %s", SHELLBUFFERNAME,
conv_from_system(cmd))->ptr;
return buf;
}
/*
* getpipe: execute shell command and connect pipe to the buffer
*/
Buffer *
getpipe(char *cmd)
{
FILE *f, *popen(const char *, const char *);
Buffer *buf;
if (cmd == NULL || *cmd == '\0')
return NULL;
f = popen(cmd, "r");
if (f == NULL)
return NULL;
buf = newBuffer(INIT_BUFFER_WIDTH);
buf->pagerSource = newFileStream(f, (void (*)())pclose);
buf->filename = cmd;
buf->buffername = Sprintf("%s %s", PIPEBUFFERNAME,
conv_from_system(cmd))->ptr;
buf->bufferprop |= BP_PIPE;
#ifdef USE_M17N
buf->document_charset = WC_CES_US_ASCII;
#endif
return buf;
}
/*
* Open pager buffer
*/
Buffer *
openPagerBuffer(InputStream stream, Buffer *buf)
{
if (buf == NULL)
buf = newBuffer(INIT_BUFFER_WIDTH);
buf->pagerSource = stream;
buf->buffername = getenv("MAN_PN");
if (buf->buffername == NULL)
buf->buffername = PIPEBUFFERNAME;
else
buf->buffername = conv_from_system(buf->buffername);
buf->bufferprop |= BP_PIPE;
#ifdef USE_M17N
if (content_charset && UseContentCharset)
buf->document_charset = content_charset;
else
buf->document_charset = WC_CES_US_ASCII;
#endif
buf->currentLine = buf->firstLine;
return buf;
}
Buffer *
openGeneralPagerBuffer(InputStream stream)
{
Buffer *buf;
char *t = "text/plain";
Buffer *t_buf = NULL;
URLFile uf;
init_stream(&uf, SCM_UNKNOWN, stream);
#ifdef USE_M17N
content_charset = 0;
#endif
t_buf = newBuffer(INIT_BUFFER_WIDTH);
copyParsedURL(&t_buf->currentURL, NULL);
t_buf->currentURL.scheme = SCM_LOCAL;
t_buf->currentURL.file = "-";
if (SearchHeader) {
readHeader(&uf, t_buf, TRUE, NULL);
t = checkContentType(t_buf);
if (t == NULL)
t = "text/plain";
if (t_buf) {
t_buf->topLine = t_buf->firstLine;
t_buf->currentLine = t_buf->lastLine;
}
SearchHeader = FALSE;
}
else if (DefaultType) {
t = DefaultType;
DefaultType = NULL;
}
if (is_html_type(t)) {
buf = loadHTMLBuffer(&uf, t_buf);
buf->type = "text/html";
}
else if (is_plain_text_type(t)) {
if (IStype(stream) != IST_ENCODED)
stream = newEncodedStream(stream, uf.encoding);
buf = openPagerBuffer(stream, t_buf);
buf->type = "text/plain";
}
#ifdef USE_IMAGE
else if (activeImage && displayImage && !useExtImageViewer &&
!(w3m_dump & ~DUMP_FRAME) && !strncasecmp(t, "image/", 6)) {
buf = loadImageBuffer(&uf, t_buf);
buf->type = "text/html";
}
#endif
else {
if (searchExtViewer(t)) {
buf = doExternal(uf, t, t_buf);
UFclose(&uf);
if (buf == NULL || buf == NO_BUFFER)
return buf;
}
else { /* unknown type is regarded as text/plain */
if (IStype(stream) != IST_ENCODED)
stream = newEncodedStream(stream, uf.encoding);
buf = openPagerBuffer(stream, t_buf);
buf->type = "text/plain";
}
}
buf->real_type = t;
return buf;
}
Line *
getNextPage(Buffer *buf, int plen)
{
Line *volatile top = buf->topLine, *volatile last = buf->lastLine,
*volatile cur = buf->currentLine;
int i;
int volatile nlines = 0;
clen_t linelen = 0, trbyte = buf->trbyte;
Str lineBuf2;
char volatile pre_lbuf = '\0';
URLFile uf;
#ifdef USE_M17N
wc_ces charset;
wc_ces volatile doc_charset = DocumentCharset;
wc_uint8 old_auto_detect = WcOption.auto_detect;
#endif
int volatile squeeze_flag = FALSE;
Lineprop *propBuffer = NULL;
#ifdef USE_ANSI_COLOR
Linecolor *colorBuffer = NULL;
#endif
MySignalHandler(*volatile prevtrap) (SIGNAL_ARG) = NULL;
if (buf->pagerSource == NULL)
return NULL;
if (last != NULL) {
nlines = last->real_linenumber;
pre_lbuf = *(last->lineBuf);
if (pre_lbuf == '\0')
pre_lbuf = '\n';
buf->currentLine = last;
}
#ifdef USE_M17N
charset = buf->document_charset;
if (buf->document_charset != WC_CES_US_ASCII)
doc_charset = buf->document_charset;
else if (UseContentCharset) {
content_charset = 0;
checkContentType(buf);
if (content_charset)
doc_charset = content_charset;
}
WcOption.auto_detect = buf->auto_detect;
#endif
if (SETJMP(AbortLoading) != 0) {
goto pager_end;
}
TRAP_ON;
init_stream(&uf, SCM_UNKNOWN, NULL);
for (i = 0; i < plen; i++) {
lineBuf2 = StrmyISgets(buf->pagerSource);
if (lineBuf2->length == 0) {
/* Assume that `cmd == buf->filename' */
if (buf->filename)
buf->buffername = Sprintf("%s %s",
CPIPEBUFFERNAME,
conv_from_system(buf->filename))->
ptr;
else if (getenv("MAN_PN") == NULL)
buf->buffername = CPIPEBUFFERNAME;
buf->bufferprop |= BP_CLOSE;
break;
}
linelen += lineBuf2->length;
showProgress(&linelen, &trbyte);
lineBuf2 =
convertLine(&uf, lineBuf2, PAGER_MODE, &charset, doc_charset);
if (squeezeBlankLine) {
squeeze_flag = FALSE;
if (lineBuf2->ptr[0] == '\n' && pre_lbuf == '\n') {
++nlines;
--i;
squeeze_flag = TRUE;
continue;
}
pre_lbuf = lineBuf2->ptr[0];
}
++nlines;
Strchop(lineBuf2);
lineBuf2 = checkType(lineBuf2, &propBuffer, &colorBuffer);
addnewline(buf, lineBuf2->ptr, propBuffer, colorBuffer,
lineBuf2->length, FOLD_BUFFER_WIDTH, nlines);
if (!top) {
top = buf->firstLine;
cur = top;
}
if (buf->lastLine->real_linenumber - buf->firstLine->real_linenumber
>= PagerMax) {
Line *l = buf->firstLine;
do {
if (top == l)
top = l->next;
if (cur == l)
cur = l->next;
if (last == l)
last = NULL;
l = l->next;
} while (l && l->bpos);
buf->firstLine = l;
buf->firstLine->prev = NULL;
}
}
pager_end:
TRAP_OFF;
buf->trbyte = trbyte + linelen;
#ifdef USE_M17N
buf->document_charset = charset;
WcOption.auto_detect = old_auto_detect;
#endif
buf->topLine = top;
buf->currentLine = cur;
if (!last)
last = buf->firstLine;
else if (last && (last->next || !squeeze_flag))
last = last->next;
return last;
}
int
save2tmp(URLFile uf, char *tmpf)
{
FILE *ff;
int check;
clen_t linelen = 0, trbyte = 0;
MySignalHandler(*volatile prevtrap) (SIGNAL_ARG) = NULL;
static JMP_BUF env_bak;
volatile int retval = 0;
char *volatile buf = NULL;
ff = fopen(tmpf, "wb");
if (ff == NULL) {
/* fclose(f); */
return -1;
}
bcopy(AbortLoading, env_bak, sizeof(JMP_BUF));
if (SETJMP(AbortLoading) != 0) {
goto _end;
}
TRAP_ON;
check = 0;
#ifdef USE_NNTP
if (uf.scheme == SCM_NEWS) {
char c;
while (c = UFgetc(&uf), !iseos(uf.stream)) {
if (c == '\n') {
if (check == 0)
check++;
else if (check == 3)
break;
}
else if (c == '.' && check == 1)
check++;
else if (c == '\r' && check == 2)
check++;
else
check = 0;
putc(c, ff);
linelen += sizeof(c);
showProgress(&linelen, &trbyte);
}
}
else
#endif /* USE_NNTP */
{
int count;
buf = NewWithoutGC_N(char, SAVE_BUF_SIZE);
while ((count = ISread_n(uf.stream, buf, SAVE_BUF_SIZE)) > 0) {
if (fwrite(buf, 1, count, ff) != count) {
retval = -2;
goto _end;
}
linelen += count;
showProgress(&linelen, &trbyte);
}
}
_end:
bcopy(env_bak, AbortLoading, sizeof(JMP_BUF));
TRAP_OFF;
xfree(buf);
fclose(ff);
current_content_length = 0;
return retval;
}
Buffer *
doExternal(URLFile uf, char *type, Buffer *defaultbuf)
{
Str tmpf, command;
struct mailcap *mcap;
int mc_stat;
Buffer *buf = NULL;
char *header, *src = NULL, *ext = uf.ext;
if (!(mcap = searchExtViewer(type)))
return NULL;
if (mcap->nametemplate) {
tmpf = unquote_mailcap(mcap->nametemplate, NULL, "", NULL, NULL);
if (tmpf->ptr[0] == '.')
ext = tmpf->ptr;
}
tmpf = tmpfname(TMPF_DFL, (ext && *ext) ? ext : NULL);
if (IStype(uf.stream) != IST_ENCODED)
uf.stream = newEncodedStream(uf.stream, uf.encoding);
header = checkHeader(defaultbuf, "Content-Type:");
if (header)
header = conv_to_system(header);
command = unquote_mailcap(mcap->viewer, type, tmpf->ptr, header, &mc_stat);
#ifndef __EMX__
if (!(mc_stat & MCSTAT_REPNAME)) {
Str tmp = Sprintf("(%s) < %s", command->ptr, shell_quote(tmpf->ptr));
command = tmp;
}
#endif
#ifdef HAVE_SETPGRP
if (!(mcap->flags & (MAILCAP_HTMLOUTPUT | MAILCAP_COPIOUSOUTPUT)) &&
!(mcap->flags & MAILCAP_NEEDSTERMINAL) && BackgroundExtViewer) {
flush_tty();
if (!fork()) {
setup_child(FALSE, 0, UFfileno(&uf));
if (save2tmp(uf, tmpf->ptr) < 0)
exit(1);
UFclose(&uf);
myExec(command->ptr);
}
return NO_BUFFER;
}
else
#endif
{
if (save2tmp(uf, tmpf->ptr) < 0) {
return NULL;
}
}
if (mcap->flags & (MAILCAP_HTMLOUTPUT | MAILCAP_COPIOUSOUTPUT)) {
if (defaultbuf == NULL)
defaultbuf = newBuffer(INIT_BUFFER_WIDTH);
if (defaultbuf->sourcefile)
src = defaultbuf->sourcefile;
else
src = tmpf->ptr;
defaultbuf->sourcefile = NULL;
defaultbuf->mailcap = mcap;
}
if (mcap->flags & MAILCAP_HTMLOUTPUT) {
buf = loadcmdout(command->ptr, loadHTMLBuffer, defaultbuf);
if (buf && buf != NO_BUFFER) {
buf->type = "text/html";
buf->mailcap_source = buf->sourcefile;
buf->sourcefile = src;
}
}
else if (mcap->flags & MAILCAP_COPIOUSOUTPUT) {
buf = loadcmdout(command->ptr, loadBuffer, defaultbuf);
if (buf && buf != NO_BUFFER) {
buf->type = "text/plain";
buf->mailcap_source = buf->sourcefile;
buf->sourcefile = src;
}
}
else {
if (mcap->flags & MAILCAP_NEEDSTERMINAL || !BackgroundExtViewer) {
fmTerm();
mySystem(command->ptr, 0);
fmInit();
if (CurrentTab && Currentbuf)
displayBuffer(Currentbuf, B_FORCE_REDRAW);
}
else {
mySystem(command->ptr, 1);
}
buf = NO_BUFFER;
}
if (buf && buf != NO_BUFFER) {
if ((buf->buffername == NULL || buf->buffername[0] == '\0') &&
buf->filename)
buf->buffername = conv_from_system(lastFileName(buf->filename));
buf->edit = mcap->edit;
buf->mailcap = mcap;
}
return buf;
}
static int
_MoveFile(char *path1, char *path2)
{
InputStream f1;
FILE *f2;
int is_pipe;
clen_t linelen = 0, trbyte = 0;
char *buf = NULL;
int count;
f1 = openIS(path1);
if (f1 == NULL)
return -1;
if (*path2 == '|' && PermitSaveToPipe) {
is_pipe = TRUE;
f2 = popen(path2 + 1, "w");
}
else {
is_pipe = FALSE;
f2 = fopen(path2, "wb");
}
if (f2 == NULL) {
ISclose(f1);
return -1;
}
current_content_length = 0;
buf = NewWithoutGC_N(char, SAVE_BUF_SIZE);
while ((count = ISread_n(f1, buf, SAVE_BUF_SIZE)) > 0) {
fwrite(buf, 1, count, f2);
linelen += count;
showProgress(&linelen, &trbyte);
}
xfree(buf);
ISclose(f1);
if (is_pipe)
pclose(f2);
else
fclose(f2);
return 0;
}
int
_doFileCopy(char *tmpf, char *defstr, int download)
{
#ifndef __MINGW32_VERSION
Str msg;
Str filen;
char *p, *q = NULL;
pid_t pid;
char *lock;
#if !(defined(HAVE_SYMLINK) && defined(HAVE_LSTAT))
FILE *f;
#endif
struct stat st;
clen_t size = 0;
int is_pipe = FALSE;
if (fmInitialized) {
p = searchKeyData();
if (p == NULL || *p == '\0') {
/* FIXME: gettextize? */
q = inputLineHist("(Download)Save file to: ",
defstr, IN_COMMAND, SaveHist);
if (q == NULL || *q == '\0')
return FALSE;
p = conv_to_system(q);
}
if (*p == '|' && PermitSaveToPipe)
is_pipe = TRUE;
else {
if (q) {
p = unescape_spaces(Strnew_charp(q))->ptr;
p = conv_to_system(p);
}
p = expandPath(p);
if (checkOverWrite(p) < 0)
return -1;
}
if (checkCopyFile(tmpf, p) < 0) {
/* FIXME: gettextize? */
msg = Sprintf("Can't copy. %s and %s are identical.",
conv_from_system(tmpf), conv_from_system(p));
disp_err_message(msg->ptr, FALSE);
return -1;
}
if (!download) {
if (_MoveFile(tmpf, p) < 0) {
/* FIXME: gettextize? */
msg = Sprintf("Can't save to %s", conv_from_system(p));
disp_err_message(msg->ptr, FALSE);
}
return -1;
}
lock = tmpfname(TMPF_DFL, ".lock")->ptr;
#if defined(HAVE_SYMLINK) && defined(HAVE_LSTAT)
symlink(p, lock);
#else
f = fopen(lock, "w");
if (f)
fclose(f);
#endif
flush_tty();
pid = fork();
if (!pid) {
setup_child(FALSE, 0, -1);
if (!_MoveFile(tmpf, p) && PreserveTimestamp && !is_pipe &&
!stat(tmpf, &st))
setModtime(p, st.st_mtime);
unlink(lock);
exit(0);
}
if (!stat(tmpf, &st))
size = st.st_size;
addDownloadList(pid, conv_from_system(tmpf), p, lock, size);
}
else {
q = searchKeyData();
if (q == NULL || *q == '\0') {
/* FIXME: gettextize? */
printf("(Download)Save file to: ");
fflush(stdout);
filen = Strfgets(stdin);
if (filen->length == 0)
return -1;
q = filen->ptr;
}
for (p = q + strlen(q) - 1; IS_SPACE(*p); p--) ;
*(p + 1) = '\0';
if (*q == '\0')
return -1;
p = q;
if (*p == '|' && PermitSaveToPipe)
is_pipe = TRUE;
else {
p = expandPath(p);
if (checkOverWrite(p) < 0)
return -1;
}
if (checkCopyFile(tmpf, p) < 0) {
/* FIXME: gettextize? */
printf("Can't copy. %s and %s are identical.", tmpf, p);
return -1;
}
if (_MoveFile(tmpf, p) < 0) {
/* FIXME: gettextize? */
printf("Can't save to %s\n", p);
return -1;
}
if (PreserveTimestamp && !is_pipe && !stat(tmpf, &st))
setModtime(p, st.st_mtime);
}
#endif /* __MINGW32_VERSION */
return 0;
}
int
doFileMove(char *tmpf, char *defstr)
{
int ret = doFileCopy(tmpf, defstr);
unlink(tmpf);
return ret;
}
int
doFileSave(URLFile uf, char *defstr)
{
#ifndef __MINGW32_VERSION
Str msg;
Str filen;
char *p, *q;
pid_t pid;
char *lock;
char *tmpf = NULL;
#if !(defined(HAVE_SYMLINK) && defined(HAVE_LSTAT))
FILE *f;
#endif
if (fmInitialized) {
p = searchKeyData();
if (p == NULL || *p == '\0') {
/* FIXME: gettextize? */
p = inputLineHist("(Download)Save file to: ",
defstr, IN_FILENAME, SaveHist);
if (p == NULL || *p == '\0')
return -1;
p = conv_to_system(p);
}
if (checkOverWrite(p) < 0)
return -1;
if (checkSaveFile(uf.stream, p) < 0) {
/* FIXME: gettextize? */
msg = Sprintf("Can't save. Load file and %s are identical.",
conv_from_system(p));
disp_err_message(msg->ptr, FALSE);
return -1;
}
/*
* if (save2tmp(uf, p) < 0) {
* msg = Sprintf("Can't save to %s", conv_from_system(p));
* disp_err_message(msg->ptr, FALSE);
* }
*/
lock = tmpfname(TMPF_DFL, ".lock")->ptr;
#if defined(HAVE_SYMLINK) && defined(HAVE_LSTAT)
symlink(p, lock);
#else
f = fopen(lock, "w");
if (f)
fclose(f);
#endif
flush_tty();
pid = fork();
if (!pid) {
int err;
if ((uf.content_encoding != CMP_NOCOMPRESS) && AutoUncompress) {
uncompress_stream(&uf, &tmpf);
if (tmpf)
unlink(tmpf);
}
setup_child(FALSE, 0, UFfileno(&uf));
err = save2tmp(uf, p);
if (err == 0 && PreserveTimestamp && uf.modtime != -1)
setModtime(p, uf.modtime);
UFclose(&uf);
unlink(lock);
if (err != 0)
exit(-err);
exit(0);
}
addDownloadList(pid, uf.url, p, lock, current_content_length);
}
else {
q = searchKeyData();
if (q == NULL || *q == '\0') {
/* FIXME: gettextize? */
printf("(Download)Save file to: ");
fflush(stdout);
filen = Strfgets(stdin);
if (filen->length == 0)
return -1;
q = filen->ptr;
}
for (p = q + strlen(q) - 1; IS_SPACE(*p); p--) ;
*(p + 1) = '\0';
if (*q == '\0')
return -1;
p = expandPath(q);
if (checkOverWrite(p) < 0)
return -1;
if (checkSaveFile(uf.stream, p) < 0) {
/* FIXME: gettextize? */
printf("Can't save. Load file and %s are identical.", p);
return -1;
}
if (uf.content_encoding != CMP_NOCOMPRESS && AutoUncompress) {
uncompress_stream(&uf, &tmpf);
if (tmpf)
unlink(tmpf);
}
if (save2tmp(uf, p) < 0) {
/* FIXME: gettextize? */
printf("Can't save to %s\n", p);
return -1;
}
if (PreserveTimestamp && uf.modtime != -1)
setModtime(p, uf.modtime);
}
#endif /* __MINGW32_VERSION */
return 0;
}
int
checkCopyFile(char *path1, char *path2)
{
struct stat st1, st2;
if (*path2 == '|' && PermitSaveToPipe)
return 0;
if ((stat(path1, &st1) == 0) && (stat(path2, &st2) == 0))
if (st1.st_ino == st2.st_ino)
return -1;
return 0;
}
int
checkSaveFile(InputStream stream, char *path2)
{
struct stat st1, st2;
int des = ISfileno(stream);
if (des < 0)
return 0;
if (*path2 == '|' && PermitSaveToPipe)
return 0;
if ((fstat(des, &st1) == 0) && (stat(path2, &st2) == 0))
if (st1.st_ino == st2.st_ino)
return -1;
return 0;
}
int
checkOverWrite(char *path)
{
struct stat st;
char *ans;
if (stat(path, &st) < 0)
return 0;
/* FIXME: gettextize? */
ans = inputAnswer("File exists. Overwrite? (y/n)");
if (ans && TOLOWER(*ans) == 'y')
return 0;
else
return -1;
}
char *
inputAnswer(char *prompt)
{
char *ans;
if (QuietMessage)
return "n";
if (fmInitialized) {
term_raw();
ans = inputChar(prompt);
}
else {
printf("%s", prompt);
fflush(stdout);
ans = Strfgets(stdin)->ptr;
}
return ans;
}
static void
uncompress_stream(URLFile *uf, char **src)
{
#ifndef __MINGW32_VERSION
pid_t pid1;
FILE *f1;
char *expand_cmd = GUNZIP_CMDNAME;
char *expand_name = GUNZIP_NAME;
char *tmpf = NULL;
char *ext = NULL;
struct compression_decoder *d;
if (IStype(uf->stream) != IST_ENCODED) {
uf->stream = newEncodedStream(uf->stream, uf->encoding);
uf->encoding = ENC_7BIT;
}
for (d = compression_decoders; d->type != CMP_NOCOMPRESS; d++) {
if (uf->compression == d->type) {
if (d->auxbin_p)
expand_cmd = auxbinFile(d->cmd);
else
expand_cmd = d->cmd;
expand_name = d->name;
ext = d->ext;
break;
}
}
uf->compression = CMP_NOCOMPRESS;
if (uf->scheme != SCM_LOCAL
#ifdef USE_IMAGE
&& !image_source
#endif
) {
tmpf = tmpfname(TMPF_DFL, ext)->ptr;
}
/* child1 -- stdout|f1=uf -> parent */
pid1 = open_pipe_rw(&f1, NULL);
if (pid1 < 0) {
UFclose(uf);
return;
}
if (pid1 == 0) {
/* child */
pid_t pid2;
FILE *f2 = stdin;
/* uf -> child2 -- stdout|stdin -> child1 */
pid2 = open_pipe_rw(&f2, NULL);
if (pid2 < 0) {
UFclose(uf);
exit(1);
}
if (pid2 == 0) {
/* child2 */
char *buf = NewWithoutGC_N(char, SAVE_BUF_SIZE);
int count;
FILE *f = NULL;
setup_child(TRUE, 2, UFfileno(uf));
if (tmpf)
f = fopen(tmpf, "wb");
while ((count = ISread_n(uf->stream, buf, SAVE_BUF_SIZE)) > 0) {
if (fwrite(buf, 1, count, stdout) != count)
break;
if (f && fwrite(buf, 1, count, f) != count)
break;
}
UFclose(uf);
if (f)
fclose(f);
xfree(buf);
exit(0);
}
/* child1 */
dup2(1, 2); /* stderr>&stdout */
setup_child(TRUE, -1, -1);
execlp(expand_cmd, expand_name, NULL);
exit(1);
}
if (tmpf) {
if (src)
*src = tmpf;
else
uf->scheme = SCM_LOCAL;
}
UFhalfclose(uf);
uf->stream = newFileStream(f1, (void (*)())fclose);
#endif /* __MINGW32_VERSION */
}
static FILE *
lessopen_stream(char *path)
{
char *lessopen;
FILE *fp;
lessopen = getenv("LESSOPEN");
if (lessopen == NULL) {
return NULL;
}
if (lessopen[0] == '\0') {
return NULL;
}
if (lessopen[0] == '|') {
/* pipe mode */
Str tmpf;
int c;
++lessopen;
tmpf = Sprintf(lessopen, shell_quote(path));
fp = popen(tmpf->ptr, "r");
if (fp == NULL) {
return NULL;
}
c = getc(fp);
if (c == EOF) {
pclose(fp);
return NULL;
}
ungetc(c, fp);
}
else {
/* filename mode */
/* not supported m(__)m */
fp = NULL;
}
return fp;
}
#if 0
void
reloadBuffer(Buffer *buf)
{
URLFile uf;
if (buf->sourcefile == NULL || buf->pagerSource != NULL)
return;
init_stream(&uf, SCM_UNKNOWN, NULL);
examineFile(buf->mailcap_source ? buf->mailcap_source : buf->sourcefile,
&uf);
if (uf.stream == NULL)
return;
is_redisplay = TRUE;
buf->allLine = 0;
buf->href = NULL;
buf->name = NULL;
buf->img = NULL;
buf->formitem = NULL;
buf->linklist = NULL;
buf->maplist = NULL;
if (buf->hmarklist)
buf->hmarklist->nmark = 0;
if (buf->imarklist)
buf->imarklist->nmark = 0;
if (is_html_type(buf->type))
loadHTMLBuffer(&uf, buf);
else
loadBuffer(&uf, buf);
UFclose(&uf);
is_redisplay = FALSE;
}
#endif
static char *
guess_filename(char *file)
{
char *p = NULL, *s;
if (file != NULL)
p = mybasename(file);
if (p == NULL || *p == '\0')
return DEF_SAVE_FILE;
s = p;
if (*p == '#')
p++;
while (*p != '\0') {
if ((*p == '#' && *(p + 1) != '\0') || *p == '?') {
*p = '\0';
break;
}
p++;
}
return s;
}
char *
guess_save_name(Buffer *buf, char *path)
{
if (buf && buf->document_header) {
Str name = NULL;
char *p, *q;
if ((p = checkHeader(buf, "Content-Disposition:")) != NULL &&
(q = strcasestr(p, "filename")) != NULL &&
(q == p || IS_SPACE(*(q - 1)) || *(q - 1) == ';') &&
matchattr(q, "filename", 8, &name))
path = name->ptr;
else if ((p = checkHeader(buf, "Content-Type:")) != NULL &&
(q = strcasestr(p, "name")) != NULL &&
(q == p || IS_SPACE(*(q - 1)) || *(q - 1) == ';') &&
matchattr(q, "name", 4, &name))
path = name->ptr;
}
return guess_filename(path);
}
/* Local Variables: */
/* c-basic-offset: 4 */
/* tab-width: 8 */
/* End: */
| ./CrossVul/dataset_final_sorted/CWE-20/c/bad_5420_0 |
crossvul-cpp_data_bad_5845_10 | /*
RFCOMM implementation for Linux Bluetooth stack (BlueZ).
Copyright (C) 2002 Maxim Krasnyansky <maxk@qualcomm.com>
Copyright (C) 2002 Marcel Holtmann <marcel@holtmann.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License version 2 as
published by the Free Software Foundation;
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
SOFTWARE IS DISCLAIMED.
*/
/*
* RFCOMM sockets.
*/
#include <linux/export.h>
#include <linux/debugfs.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/l2cap.h>
#include <net/bluetooth/rfcomm.h>
static const struct proto_ops rfcomm_sock_ops;
static struct bt_sock_list rfcomm_sk_list = {
.lock = __RW_LOCK_UNLOCKED(rfcomm_sk_list.lock)
};
static void rfcomm_sock_close(struct sock *sk);
static void rfcomm_sock_kill(struct sock *sk);
/* ---- DLC callbacks ----
*
* called under rfcomm_dlc_lock()
*/
static void rfcomm_sk_data_ready(struct rfcomm_dlc *d, struct sk_buff *skb)
{
struct sock *sk = d->owner;
if (!sk)
return;
atomic_add(skb->len, &sk->sk_rmem_alloc);
skb_queue_tail(&sk->sk_receive_queue, skb);
sk->sk_data_ready(sk, skb->len);
if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
rfcomm_dlc_throttle(d);
}
static void rfcomm_sk_state_change(struct rfcomm_dlc *d, int err)
{
struct sock *sk = d->owner, *parent;
unsigned long flags;
if (!sk)
return;
BT_DBG("dlc %p state %ld err %d", d, d->state, err);
local_irq_save(flags);
bh_lock_sock(sk);
if (err)
sk->sk_err = err;
sk->sk_state = d->state;
parent = bt_sk(sk)->parent;
if (parent) {
if (d->state == BT_CLOSED) {
sock_set_flag(sk, SOCK_ZAPPED);
bt_accept_unlink(sk);
}
parent->sk_data_ready(parent, 0);
} else {
if (d->state == BT_CONNECTED)
rfcomm_session_getaddr(d->session,
&rfcomm_pi(sk)->src, NULL);
sk->sk_state_change(sk);
}
bh_unlock_sock(sk);
local_irq_restore(flags);
if (parent && sock_flag(sk, SOCK_ZAPPED)) {
/* We have to drop DLC lock here, otherwise
* rfcomm_sock_destruct() will dead lock. */
rfcomm_dlc_unlock(d);
rfcomm_sock_kill(sk);
rfcomm_dlc_lock(d);
}
}
/* ---- Socket functions ---- */
static struct sock *__rfcomm_get_sock_by_addr(u8 channel, bdaddr_t *src)
{
struct sock *sk = NULL;
sk_for_each(sk, &rfcomm_sk_list.head) {
if (rfcomm_pi(sk)->channel == channel &&
!bacmp(&rfcomm_pi(sk)->src, src))
break;
}
return sk ? sk : NULL;
}
/* Find socket with channel and source bdaddr.
* Returns closest match.
*/
static struct sock *rfcomm_get_sock_by_channel(int state, u8 channel, bdaddr_t *src)
{
struct sock *sk = NULL, *sk1 = NULL;
read_lock(&rfcomm_sk_list.lock);
sk_for_each(sk, &rfcomm_sk_list.head) {
if (state && sk->sk_state != state)
continue;
if (rfcomm_pi(sk)->channel == channel) {
/* Exact match. */
if (!bacmp(&rfcomm_pi(sk)->src, src))
break;
/* Closest match */
if (!bacmp(&rfcomm_pi(sk)->src, BDADDR_ANY))
sk1 = sk;
}
}
read_unlock(&rfcomm_sk_list.lock);
return sk ? sk : sk1;
}
static void rfcomm_sock_destruct(struct sock *sk)
{
struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc;
BT_DBG("sk %p dlc %p", sk, d);
skb_queue_purge(&sk->sk_receive_queue);
skb_queue_purge(&sk->sk_write_queue);
rfcomm_dlc_lock(d);
rfcomm_pi(sk)->dlc = NULL;
/* Detach DLC if it's owned by this socket */
if (d->owner == sk)
d->owner = NULL;
rfcomm_dlc_unlock(d);
rfcomm_dlc_put(d);
}
static void rfcomm_sock_cleanup_listen(struct sock *parent)
{
struct sock *sk;
BT_DBG("parent %p", parent);
/* Close not yet accepted dlcs */
while ((sk = bt_accept_dequeue(parent, NULL))) {
rfcomm_sock_close(sk);
rfcomm_sock_kill(sk);
}
parent->sk_state = BT_CLOSED;
sock_set_flag(parent, SOCK_ZAPPED);
}
/* Kill socket (only if zapped and orphan)
* Must be called on unlocked socket.
*/
static void rfcomm_sock_kill(struct sock *sk)
{
if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
return;
BT_DBG("sk %p state %d refcnt %d", sk, sk->sk_state, atomic_read(&sk->sk_refcnt));
/* Kill poor orphan */
bt_sock_unlink(&rfcomm_sk_list, sk);
sock_set_flag(sk, SOCK_DEAD);
sock_put(sk);
}
static void __rfcomm_sock_close(struct sock *sk)
{
struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc;
BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
switch (sk->sk_state) {
case BT_LISTEN:
rfcomm_sock_cleanup_listen(sk);
break;
case BT_CONNECT:
case BT_CONNECT2:
case BT_CONFIG:
case BT_CONNECTED:
rfcomm_dlc_close(d, 0);
default:
sock_set_flag(sk, SOCK_ZAPPED);
break;
}
}
/* Close socket.
* Must be called on unlocked socket.
*/
static void rfcomm_sock_close(struct sock *sk)
{
lock_sock(sk);
__rfcomm_sock_close(sk);
release_sock(sk);
}
static void rfcomm_sock_init(struct sock *sk, struct sock *parent)
{
struct rfcomm_pinfo *pi = rfcomm_pi(sk);
BT_DBG("sk %p", sk);
if (parent) {
sk->sk_type = parent->sk_type;
pi->dlc->defer_setup = test_bit(BT_SK_DEFER_SETUP,
&bt_sk(parent)->flags);
pi->sec_level = rfcomm_pi(parent)->sec_level;
pi->role_switch = rfcomm_pi(parent)->role_switch;
security_sk_clone(parent, sk);
} else {
pi->dlc->defer_setup = 0;
pi->sec_level = BT_SECURITY_LOW;
pi->role_switch = 0;
}
pi->dlc->sec_level = pi->sec_level;
pi->dlc->role_switch = pi->role_switch;
}
static struct proto rfcomm_proto = {
.name = "RFCOMM",
.owner = THIS_MODULE,
.obj_size = sizeof(struct rfcomm_pinfo)
};
static struct sock *rfcomm_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
{
struct rfcomm_dlc *d;
struct sock *sk;
sk = sk_alloc(net, PF_BLUETOOTH, prio, &rfcomm_proto);
if (!sk)
return NULL;
sock_init_data(sock, sk);
INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
d = rfcomm_dlc_alloc(prio);
if (!d) {
sk_free(sk);
return NULL;
}
d->data_ready = rfcomm_sk_data_ready;
d->state_change = rfcomm_sk_state_change;
rfcomm_pi(sk)->dlc = d;
d->owner = sk;
sk->sk_destruct = rfcomm_sock_destruct;
sk->sk_sndtimeo = RFCOMM_CONN_TIMEOUT;
sk->sk_sndbuf = RFCOMM_MAX_CREDITS * RFCOMM_DEFAULT_MTU * 10;
sk->sk_rcvbuf = RFCOMM_MAX_CREDITS * RFCOMM_DEFAULT_MTU * 10;
sock_reset_flag(sk, SOCK_ZAPPED);
sk->sk_protocol = proto;
sk->sk_state = BT_OPEN;
bt_sock_link(&rfcomm_sk_list, sk);
BT_DBG("sk %p", sk);
return sk;
}
static int rfcomm_sock_create(struct net *net, struct socket *sock,
int protocol, int kern)
{
struct sock *sk;
BT_DBG("sock %p", sock);
sock->state = SS_UNCONNECTED;
if (sock->type != SOCK_STREAM && sock->type != SOCK_RAW)
return -ESOCKTNOSUPPORT;
sock->ops = &rfcomm_sock_ops;
sk = rfcomm_sock_alloc(net, sock, protocol, GFP_ATOMIC);
if (!sk)
return -ENOMEM;
rfcomm_sock_init(sk, NULL);
return 0;
}
static int rfcomm_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
{
struct sockaddr_rc *sa = (struct sockaddr_rc *) addr;
struct sock *sk = sock->sk;
int err = 0;
BT_DBG("sk %p %pMR", sk, &sa->rc_bdaddr);
if (!addr || addr->sa_family != AF_BLUETOOTH)
return -EINVAL;
lock_sock(sk);
if (sk->sk_state != BT_OPEN) {
err = -EBADFD;
goto done;
}
if (sk->sk_type != SOCK_STREAM) {
err = -EINVAL;
goto done;
}
write_lock(&rfcomm_sk_list.lock);
if (sa->rc_channel && __rfcomm_get_sock_by_addr(sa->rc_channel, &sa->rc_bdaddr)) {
err = -EADDRINUSE;
} else {
/* Save source address */
bacpy(&rfcomm_pi(sk)->src, &sa->rc_bdaddr);
rfcomm_pi(sk)->channel = sa->rc_channel;
sk->sk_state = BT_BOUND;
}
write_unlock(&rfcomm_sk_list.lock);
done:
release_sock(sk);
return err;
}
static int rfcomm_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
{
struct sockaddr_rc *sa = (struct sockaddr_rc *) addr;
struct sock *sk = sock->sk;
struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc;
int err = 0;
BT_DBG("sk %p", sk);
if (alen < sizeof(struct sockaddr_rc) ||
addr->sa_family != AF_BLUETOOTH)
return -EINVAL;
lock_sock(sk);
if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND) {
err = -EBADFD;
goto done;
}
if (sk->sk_type != SOCK_STREAM) {
err = -EINVAL;
goto done;
}
sk->sk_state = BT_CONNECT;
bacpy(&rfcomm_pi(sk)->dst, &sa->rc_bdaddr);
rfcomm_pi(sk)->channel = sa->rc_channel;
d->sec_level = rfcomm_pi(sk)->sec_level;
d->role_switch = rfcomm_pi(sk)->role_switch;
err = rfcomm_dlc_open(d, &rfcomm_pi(sk)->src, &sa->rc_bdaddr,
sa->rc_channel);
if (!err)
err = bt_sock_wait_state(sk, BT_CONNECTED,
sock_sndtimeo(sk, flags & O_NONBLOCK));
done:
release_sock(sk);
return err;
}
static int rfcomm_sock_listen(struct socket *sock, int backlog)
{
struct sock *sk = sock->sk;
int err = 0;
BT_DBG("sk %p backlog %d", sk, backlog);
lock_sock(sk);
if (sk->sk_state != BT_BOUND) {
err = -EBADFD;
goto done;
}
if (sk->sk_type != SOCK_STREAM) {
err = -EINVAL;
goto done;
}
if (!rfcomm_pi(sk)->channel) {
bdaddr_t *src = &rfcomm_pi(sk)->src;
u8 channel;
err = -EINVAL;
write_lock(&rfcomm_sk_list.lock);
for (channel = 1; channel < 31; channel++)
if (!__rfcomm_get_sock_by_addr(channel, src)) {
rfcomm_pi(sk)->channel = channel;
err = 0;
break;
}
write_unlock(&rfcomm_sk_list.lock);
if (err < 0)
goto done;
}
sk->sk_max_ack_backlog = backlog;
sk->sk_ack_backlog = 0;
sk->sk_state = BT_LISTEN;
done:
release_sock(sk);
return err;
}
static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int flags)
{
DECLARE_WAITQUEUE(wait, current);
struct sock *sk = sock->sk, *nsk;
long timeo;
int err = 0;
lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
if (sk->sk_type != SOCK_STREAM) {
err = -EINVAL;
goto done;
}
timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
BT_DBG("sk %p timeo %ld", sk, timeo);
/* Wait for an incoming connection. (wake-one). */
add_wait_queue_exclusive(sk_sleep(sk), &wait);
while (1) {
set_current_state(TASK_INTERRUPTIBLE);
if (sk->sk_state != BT_LISTEN) {
err = -EBADFD;
break;
}
nsk = bt_accept_dequeue(sk, newsock);
if (nsk)
break;
if (!timeo) {
err = -EAGAIN;
break;
}
if (signal_pending(current)) {
err = sock_intr_errno(timeo);
break;
}
release_sock(sk);
timeo = schedule_timeout(timeo);
lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
}
__set_current_state(TASK_RUNNING);
remove_wait_queue(sk_sleep(sk), &wait);
if (err)
goto done;
newsock->state = SS_CONNECTED;
BT_DBG("new socket %p", nsk);
done:
release_sock(sk);
return err;
}
static int rfcomm_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
{
struct sockaddr_rc *sa = (struct sockaddr_rc *) addr;
struct sock *sk = sock->sk;
BT_DBG("sock %p, sk %p", sock, sk);
memset(sa, 0, sizeof(*sa));
sa->rc_family = AF_BLUETOOTH;
sa->rc_channel = rfcomm_pi(sk)->channel;
if (peer)
bacpy(&sa->rc_bdaddr, &rfcomm_pi(sk)->dst);
else
bacpy(&sa->rc_bdaddr, &rfcomm_pi(sk)->src);
*len = sizeof(struct sockaddr_rc);
return 0;
}
static int rfcomm_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len)
{
struct sock *sk = sock->sk;
struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc;
struct sk_buff *skb;
int sent;
if (test_bit(RFCOMM_DEFER_SETUP, &d->flags))
return -ENOTCONN;
if (msg->msg_flags & MSG_OOB)
return -EOPNOTSUPP;
if (sk->sk_shutdown & SEND_SHUTDOWN)
return -EPIPE;
BT_DBG("sock %p, sk %p", sock, sk);
lock_sock(sk);
sent = bt_sock_wait_ready(sk, msg->msg_flags);
if (sent)
goto done;
while (len) {
size_t size = min_t(size_t, len, d->mtu);
int err;
skb = sock_alloc_send_skb(sk, size + RFCOMM_SKB_RESERVE,
msg->msg_flags & MSG_DONTWAIT, &err);
if (!skb) {
if (sent == 0)
sent = err;
break;
}
skb_reserve(skb, RFCOMM_SKB_HEAD_RESERVE);
err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
if (err) {
kfree_skb(skb);
if (sent == 0)
sent = err;
break;
}
skb->priority = sk->sk_priority;
err = rfcomm_dlc_send(d, skb);
if (err < 0) {
kfree_skb(skb);
if (sent == 0)
sent = err;
break;
}
sent += size;
len -= size;
}
done:
release_sock(sk);
return sent;
}
static int rfcomm_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t size, int flags)
{
struct sock *sk = sock->sk;
struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc;
int len;
if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) {
rfcomm_dlc_accept(d);
msg->msg_namelen = 0;
return 0;
}
len = bt_sock_stream_recvmsg(iocb, sock, msg, size, flags);
lock_sock(sk);
if (!(flags & MSG_PEEK) && len > 0)
atomic_sub(len, &sk->sk_rmem_alloc);
if (atomic_read(&sk->sk_rmem_alloc) <= (sk->sk_rcvbuf >> 2))
rfcomm_dlc_unthrottle(rfcomm_pi(sk)->dlc);
release_sock(sk);
return len;
}
static int rfcomm_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
int err = 0;
u32 opt;
BT_DBG("sk %p", sk);
lock_sock(sk);
switch (optname) {
case RFCOMM_LM:
if (get_user(opt, (u32 __user *) optval)) {
err = -EFAULT;
break;
}
if (opt & RFCOMM_LM_AUTH)
rfcomm_pi(sk)->sec_level = BT_SECURITY_LOW;
if (opt & RFCOMM_LM_ENCRYPT)
rfcomm_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
if (opt & RFCOMM_LM_SECURE)
rfcomm_pi(sk)->sec_level = BT_SECURITY_HIGH;
rfcomm_pi(sk)->role_switch = (opt & RFCOMM_LM_MASTER);
break;
default:
err = -ENOPROTOOPT;
break;
}
release_sock(sk);
return err;
}
static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
struct bt_security sec;
int err = 0;
size_t len;
u32 opt;
BT_DBG("sk %p", sk);
if (level == SOL_RFCOMM)
return rfcomm_sock_setsockopt_old(sock, optname, optval, optlen);
if (level != SOL_BLUETOOTH)
return -ENOPROTOOPT;
lock_sock(sk);
switch (optname) {
case BT_SECURITY:
if (sk->sk_type != SOCK_STREAM) {
err = -EINVAL;
break;
}
sec.level = BT_SECURITY_LOW;
len = min_t(unsigned int, sizeof(sec), optlen);
if (copy_from_user((char *) &sec, optval, len)) {
err = -EFAULT;
break;
}
if (sec.level > BT_SECURITY_HIGH) {
err = -EINVAL;
break;
}
rfcomm_pi(sk)->sec_level = sec.level;
break;
case BT_DEFER_SETUP:
if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
err = -EINVAL;
break;
}
if (get_user(opt, (u32 __user *) optval)) {
err = -EFAULT;
break;
}
if (opt)
set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
else
clear_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
break;
default:
err = -ENOPROTOOPT;
break;
}
release_sock(sk);
return err;
}
static int rfcomm_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
{
struct sock *sk = sock->sk;
struct rfcomm_conninfo cinfo;
struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
int len, err = 0;
u32 opt;
BT_DBG("sk %p", sk);
if (get_user(len, optlen))
return -EFAULT;
lock_sock(sk);
switch (optname) {
case RFCOMM_LM:
switch (rfcomm_pi(sk)->sec_level) {
case BT_SECURITY_LOW:
opt = RFCOMM_LM_AUTH;
break;
case BT_SECURITY_MEDIUM:
opt = RFCOMM_LM_AUTH | RFCOMM_LM_ENCRYPT;
break;
case BT_SECURITY_HIGH:
opt = RFCOMM_LM_AUTH | RFCOMM_LM_ENCRYPT |
RFCOMM_LM_SECURE;
break;
default:
opt = 0;
break;
}
if (rfcomm_pi(sk)->role_switch)
opt |= RFCOMM_LM_MASTER;
if (put_user(opt, (u32 __user *) optval))
err = -EFAULT;
break;
case RFCOMM_CONNINFO:
if (sk->sk_state != BT_CONNECTED &&
!rfcomm_pi(sk)->dlc->defer_setup) {
err = -ENOTCONN;
break;
}
memset(&cinfo, 0, sizeof(cinfo));
cinfo.hci_handle = conn->hcon->handle;
memcpy(cinfo.dev_class, conn->hcon->dev_class, 3);
len = min_t(unsigned int, len, sizeof(cinfo));
if (copy_to_user(optval, (char *) &cinfo, len))
err = -EFAULT;
break;
default:
err = -ENOPROTOOPT;
break;
}
release_sock(sk);
return err;
}
static int rfcomm_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
{
struct sock *sk = sock->sk;
struct bt_security sec;
int len, err = 0;
BT_DBG("sk %p", sk);
if (level == SOL_RFCOMM)
return rfcomm_sock_getsockopt_old(sock, optname, optval, optlen);
if (level != SOL_BLUETOOTH)
return -ENOPROTOOPT;
if (get_user(len, optlen))
return -EFAULT;
lock_sock(sk);
switch (optname) {
case BT_SECURITY:
if (sk->sk_type != SOCK_STREAM) {
err = -EINVAL;
break;
}
sec.level = rfcomm_pi(sk)->sec_level;
sec.key_size = 0;
len = min_t(unsigned int, len, sizeof(sec));
if (copy_to_user(optval, (char *) &sec, len))
err = -EFAULT;
break;
case BT_DEFER_SETUP:
if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
err = -EINVAL;
break;
}
if (put_user(test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags),
(u32 __user *) optval))
err = -EFAULT;
break;
default:
err = -ENOPROTOOPT;
break;
}
release_sock(sk);
return err;
}
static int rfcomm_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
struct sock *sk __maybe_unused = sock->sk;
int err;
BT_DBG("sk %p cmd %x arg %lx", sk, cmd, arg);
err = bt_sock_ioctl(sock, cmd, arg);
if (err == -ENOIOCTLCMD) {
#ifdef CONFIG_BT_RFCOMM_TTY
lock_sock(sk);
err = rfcomm_dev_ioctl(sk, cmd, (void __user *) arg);
release_sock(sk);
#else
err = -EOPNOTSUPP;
#endif
}
return err;
}
static int rfcomm_sock_shutdown(struct socket *sock, int how)
{
struct sock *sk = sock->sk;
int err = 0;
BT_DBG("sock %p, sk %p", sock, sk);
if (!sk)
return 0;
lock_sock(sk);
if (!sk->sk_shutdown) {
sk->sk_shutdown = SHUTDOWN_MASK;
__rfcomm_sock_close(sk);
if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime);
}
release_sock(sk);
return err;
}
static int rfcomm_sock_release(struct socket *sock)
{
struct sock *sk = sock->sk;
int err;
BT_DBG("sock %p, sk %p", sock, sk);
if (!sk)
return 0;
err = rfcomm_sock_shutdown(sock, 2);
sock_orphan(sk);
rfcomm_sock_kill(sk);
return err;
}
/* ---- RFCOMM core layer callbacks ----
*
* called under rfcomm_lock()
*/
int rfcomm_connect_ind(struct rfcomm_session *s, u8 channel, struct rfcomm_dlc **d)
{
struct sock *sk, *parent;
bdaddr_t src, dst;
int result = 0;
BT_DBG("session %p channel %d", s, channel);
rfcomm_session_getaddr(s, &src, &dst);
/* Check if we have socket listening on channel */
parent = rfcomm_get_sock_by_channel(BT_LISTEN, channel, &src);
if (!parent)
return 0;
bh_lock_sock(parent);
/* Check for backlog size */
if (sk_acceptq_is_full(parent)) {
BT_DBG("backlog full %d", parent->sk_ack_backlog);
goto done;
}
sk = rfcomm_sock_alloc(sock_net(parent), NULL, BTPROTO_RFCOMM, GFP_ATOMIC);
if (!sk)
goto done;
bt_sock_reclassify_lock(sk, BTPROTO_RFCOMM);
rfcomm_sock_init(sk, parent);
bacpy(&rfcomm_pi(sk)->src, &src);
bacpy(&rfcomm_pi(sk)->dst, &dst);
rfcomm_pi(sk)->channel = channel;
sk->sk_state = BT_CONFIG;
bt_accept_enqueue(parent, sk);
/* Accept connection and return socket DLC */
*d = rfcomm_pi(sk)->dlc;
result = 1;
done:
bh_unlock_sock(parent);
if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags))
parent->sk_state_change(parent);
return result;
}
static int rfcomm_sock_debugfs_show(struct seq_file *f, void *p)
{
struct sock *sk;
read_lock(&rfcomm_sk_list.lock);
sk_for_each(sk, &rfcomm_sk_list.head) {
seq_printf(f, "%pMR %pMR %d %d\n",
&rfcomm_pi(sk)->src, &rfcomm_pi(sk)->dst,
sk->sk_state, rfcomm_pi(sk)->channel);
}
read_unlock(&rfcomm_sk_list.lock);
return 0;
}
static int rfcomm_sock_debugfs_open(struct inode *inode, struct file *file)
{
return single_open(file, rfcomm_sock_debugfs_show, inode->i_private);
}
static const struct file_operations rfcomm_sock_debugfs_fops = {
.open = rfcomm_sock_debugfs_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static struct dentry *rfcomm_sock_debugfs;
static const struct proto_ops rfcomm_sock_ops = {
.family = PF_BLUETOOTH,
.owner = THIS_MODULE,
.release = rfcomm_sock_release,
.bind = rfcomm_sock_bind,
.connect = rfcomm_sock_connect,
.listen = rfcomm_sock_listen,
.accept = rfcomm_sock_accept,
.getname = rfcomm_sock_getname,
.sendmsg = rfcomm_sock_sendmsg,
.recvmsg = rfcomm_sock_recvmsg,
.shutdown = rfcomm_sock_shutdown,
.setsockopt = rfcomm_sock_setsockopt,
.getsockopt = rfcomm_sock_getsockopt,
.ioctl = rfcomm_sock_ioctl,
.poll = bt_sock_poll,
.socketpair = sock_no_socketpair,
.mmap = sock_no_mmap
};
static const struct net_proto_family rfcomm_sock_family_ops = {
.family = PF_BLUETOOTH,
.owner = THIS_MODULE,
.create = rfcomm_sock_create
};
int __init rfcomm_init_sockets(void)
{
int err;
err = proto_register(&rfcomm_proto, 0);
if (err < 0)
return err;
err = bt_sock_register(BTPROTO_RFCOMM, &rfcomm_sock_family_ops);
if (err < 0) {
BT_ERR("RFCOMM socket layer registration failed");
goto error;
}
err = bt_procfs_init(&init_net, "rfcomm", &rfcomm_sk_list, NULL);
if (err < 0) {
BT_ERR("Failed to create RFCOMM proc file");
bt_sock_unregister(BTPROTO_RFCOMM);
goto error;
}
BT_INFO("RFCOMM socket layer initialized");
if (IS_ERR_OR_NULL(bt_debugfs))
return 0;
rfcomm_sock_debugfs = debugfs_create_file("rfcomm", 0444,
bt_debugfs, NULL,
&rfcomm_sock_debugfs_fops);
return 0;
error:
proto_unregister(&rfcomm_proto);
return err;
}
void __exit rfcomm_cleanup_sockets(void)
{
bt_procfs_cleanup(&init_net, "rfcomm");
debugfs_remove(rfcomm_sock_debugfs);
bt_sock_unregister(BTPROTO_RFCOMM);
proto_unregister(&rfcomm_proto);
}
| ./CrossVul/dataset_final_sorted/CWE-20/c/bad_5845_10 |
crossvul-cpp_data_good_3989_0 | /*
* Copyright (C) the libgit2 contributors. All rights reserved.
*
* This file is part of libgit2, distributed under the GNU GPL v2 with
* a Linking Exception. For full terms see the included COPYING file.
*/
#include "checkout.h"
#include "git2/repository.h"
#include "git2/refs.h"
#include "git2/tree.h"
#include "git2/blob.h"
#include "git2/config.h"
#include "git2/diff.h"
#include "git2/submodule.h"
#include "git2/sys/index.h"
#include "git2/sys/filter.h"
#include "git2/merge.h"
#include "refs.h"
#include "repository.h"
#include "index.h"
#include "filter.h"
#include "blob.h"
#include "diff.h"
#include "diff_generate.h"
#include "pathspec.h"
#include "buf_text.h"
#include "diff_xdiff.h"
#include "path.h"
#include "attr.h"
#include "pool.h"
#include "strmap.h"
/* See docs/checkout-internals.md for more information */
enum {
CHECKOUT_ACTION__NONE = 0,
CHECKOUT_ACTION__REMOVE = 1,
CHECKOUT_ACTION__UPDATE_BLOB = 2,
CHECKOUT_ACTION__UPDATE_SUBMODULE = 4,
CHECKOUT_ACTION__CONFLICT = 8,
CHECKOUT_ACTION__REMOVE_CONFLICT = 16,
CHECKOUT_ACTION__UPDATE_CONFLICT = 32,
CHECKOUT_ACTION__MAX = 32,
CHECKOUT_ACTION__DEFER_REMOVE = 64,
CHECKOUT_ACTION__REMOVE_AND_UPDATE =
(CHECKOUT_ACTION__UPDATE_BLOB | CHECKOUT_ACTION__REMOVE),
};
typedef struct {
git_repository *repo;
git_iterator *target;
git_diff *diff;
git_checkout_options opts;
bool opts_free_baseline;
char *pfx;
git_index *index;
git_pool pool;
git_vector removes;
git_vector remove_conflicts;
git_vector update_conflicts;
git_vector *update_reuc;
git_vector *update_names;
git_buf target_path;
size_t target_len;
git_buf tmp;
unsigned int strategy;
int can_symlink;
int respect_filemode;
bool reload_submodules;
size_t total_steps;
size_t completed_steps;
git_checkout_perfdata perfdata;
git_strmap *mkdir_map;
git_attr_session attr_session;
} checkout_data;
typedef struct {
const git_index_entry *ancestor;
const git_index_entry *ours;
const git_index_entry *theirs;
int name_collision:1,
directoryfile:1,
one_to_two:1,
binary:1,
submodule:1;
} checkout_conflictdata;
static int checkout_notify(
checkout_data *data,
git_checkout_notify_t why,
const git_diff_delta *delta,
const git_index_entry *wditem)
{
git_diff_file wdfile;
const git_diff_file *baseline = NULL, *target = NULL, *workdir = NULL;
const char *path = NULL;
if (!data->opts.notify_cb ||
(why & data->opts.notify_flags) == 0)
return 0;
if (wditem) {
memset(&wdfile, 0, sizeof(wdfile));
git_oid_cpy(&wdfile.id, &wditem->id);
wdfile.path = wditem->path;
wdfile.size = wditem->file_size;
wdfile.flags = GIT_DIFF_FLAG_VALID_ID;
wdfile.mode = wditem->mode;
workdir = &wdfile;
path = wditem->path;
}
if (delta) {
switch (delta->status) {
case GIT_DELTA_UNMODIFIED:
case GIT_DELTA_MODIFIED:
case GIT_DELTA_TYPECHANGE:
default:
baseline = &delta->old_file;
target = &delta->new_file;
break;
case GIT_DELTA_ADDED:
case GIT_DELTA_IGNORED:
case GIT_DELTA_UNTRACKED:
case GIT_DELTA_UNREADABLE:
target = &delta->new_file;
break;
case GIT_DELTA_DELETED:
baseline = &delta->old_file;
break;
}
path = delta->old_file.path;
}
{
int error = data->opts.notify_cb(
why, path, baseline, target, workdir, data->opts.notify_payload);
return git_error_set_after_callback_function(
error, "git_checkout notification");
}
}
GIT_INLINE(bool) is_workdir_base_or_new(
const git_oid *workdir_id,
const git_diff_file *baseitem,
const git_diff_file *newitem)
{
return (git_oid__cmp(&baseitem->id, workdir_id) == 0 ||
git_oid__cmp(&newitem->id, workdir_id) == 0);
}
GIT_INLINE(bool) is_filemode_changed(git_filemode_t a, git_filemode_t b, int respect_filemode)
{
/* If core.filemode = false, ignore links in the repository and executable bit changes */
if (!respect_filemode) {
if (a == S_IFLNK)
a = GIT_FILEMODE_BLOB;
if (b == S_IFLNK)
b = GIT_FILEMODE_BLOB;
a &= ~0111;
b &= ~0111;
}
return (a != b);
}
static bool checkout_is_workdir_modified(
checkout_data *data,
const git_diff_file *baseitem,
const git_diff_file *newitem,
const git_index_entry *wditem)
{
git_oid oid;
const git_index_entry *ie;
/* handle "modified" submodule */
if (wditem->mode == GIT_FILEMODE_COMMIT) {
git_submodule *sm;
unsigned int sm_status = 0;
const git_oid *sm_oid = NULL;
bool rval = false;
if (git_submodule_lookup(&sm, data->repo, wditem->path) < 0) {
git_error_clear();
return true;
}
if (git_submodule_status(&sm_status, data->repo, wditem->path, GIT_SUBMODULE_IGNORE_UNSPECIFIED) < 0 ||
GIT_SUBMODULE_STATUS_IS_WD_DIRTY(sm_status))
rval = true;
else if ((sm_oid = git_submodule_wd_id(sm)) == NULL)
rval = false;
else
rval = (git_oid__cmp(&baseitem->id, sm_oid) != 0);
git_submodule_free(sm);
return rval;
}
/*
* Look at the cache to decide if the workdir is modified: if the
* cache contents match the workdir contents, then we do not need
* to examine the working directory directly, instead we can
* examine the cache to see if _it_ has been modified. This allows
* us to avoid touching the disk.
*/
ie = git_index_get_bypath(data->index, wditem->path, 0);
if (ie != NULL &&
git_index_time_eq(&wditem->mtime, &ie->mtime) &&
wditem->file_size == ie->file_size &&
!is_filemode_changed(wditem->mode, ie->mode, data->respect_filemode)) {
/* The workdir is modified iff the index entry is modified */
return !is_workdir_base_or_new(&ie->id, baseitem, newitem) ||
is_filemode_changed(baseitem->mode, ie->mode, data->respect_filemode);
}
/* depending on where base is coming from, we may or may not know
* the actual size of the data, so we can't rely on this shortcut.
*/
if (baseitem->size && wditem->file_size != baseitem->size)
return true;
/* if the workdir item is a directory, it cannot be a modified file */
if (S_ISDIR(wditem->mode))
return false;
if (is_filemode_changed(baseitem->mode, wditem->mode, data->respect_filemode))
return true;
if (git_diff__oid_for_entry(&oid, data->diff, wditem, wditem->mode, NULL) < 0)
return false;
/* Allow the checkout if the workdir is not modified *or* if the checkout
* target's contents are already in the working directory.
*/
return !is_workdir_base_or_new(&oid, baseitem, newitem);
}
#define CHECKOUT_ACTION_IF(FLAG,YES,NO) \
((data->strategy & GIT_CHECKOUT_##FLAG) ? CHECKOUT_ACTION__##YES : CHECKOUT_ACTION__##NO)
static int checkout_action_common(
int *action,
checkout_data *data,
const git_diff_delta *delta,
const git_index_entry *wd)
{
git_checkout_notify_t notify = GIT_CHECKOUT_NOTIFY_NONE;
if ((data->strategy & GIT_CHECKOUT_UPDATE_ONLY) != 0)
*action = (*action & ~CHECKOUT_ACTION__REMOVE);
if ((*action & CHECKOUT_ACTION__UPDATE_BLOB) != 0) {
if (S_ISGITLINK(delta->new_file.mode))
*action = (*action & ~CHECKOUT_ACTION__UPDATE_BLOB) |
CHECKOUT_ACTION__UPDATE_SUBMODULE;
/* to "update" a symlink, we must remove the old one first */
if (delta->new_file.mode == GIT_FILEMODE_LINK && wd != NULL)
*action |= CHECKOUT_ACTION__REMOVE;
/* if the file is on disk and doesn't match our mode, force update */
if (wd &&
GIT_PERMS_IS_EXEC(wd->mode) !=
GIT_PERMS_IS_EXEC(delta->new_file.mode))
*action |= CHECKOUT_ACTION__REMOVE;
notify = GIT_CHECKOUT_NOTIFY_UPDATED;
}
if ((*action & CHECKOUT_ACTION__CONFLICT) != 0)
notify = GIT_CHECKOUT_NOTIFY_CONFLICT;
return checkout_notify(data, notify, delta, wd);
}
static int checkout_action_no_wd(
int *action,
checkout_data *data,
const git_diff_delta *delta)
{
int error = 0;
*action = CHECKOUT_ACTION__NONE;
switch (delta->status) {
case GIT_DELTA_UNMODIFIED: /* case 12 */
error = checkout_notify(data, GIT_CHECKOUT_NOTIFY_DIRTY, delta, NULL);
if (error)
return error;
*action = CHECKOUT_ACTION_IF(RECREATE_MISSING, UPDATE_BLOB, NONE);
break;
case GIT_DELTA_ADDED: /* case 2 or 28 (and 5 but not really) */
*action = CHECKOUT_ACTION_IF(SAFE, UPDATE_BLOB, NONE);
break;
case GIT_DELTA_MODIFIED: /* case 13 (and 35 but not really) */
*action = CHECKOUT_ACTION_IF(RECREATE_MISSING, UPDATE_BLOB, CONFLICT);
break;
case GIT_DELTA_TYPECHANGE: /* case 21 (B->T) and 28 (T->B)*/
if (delta->new_file.mode == GIT_FILEMODE_TREE)
*action = CHECKOUT_ACTION_IF(SAFE, UPDATE_BLOB, NONE);
break;
case GIT_DELTA_DELETED: /* case 8 or 25 */
*action = CHECKOUT_ACTION_IF(SAFE, REMOVE, NONE);
break;
default: /* impossible */
break;
}
return checkout_action_common(action, data, delta, NULL);
}
static int checkout_target_fullpath(
git_buf **out, checkout_data *data, const char *path)
{
git_buf_truncate(&data->target_path, data->target_len);
if (path && git_buf_puts(&data->target_path, path) < 0)
return -1;
*out = &data->target_path;
return 0;
}
static bool wd_item_is_removable(
checkout_data *data, const git_index_entry *wd)
{
git_buf *full;
if (wd->mode != GIT_FILEMODE_TREE)
return true;
if (checkout_target_fullpath(&full, data, wd->path) < 0)
return false;
return !full || !git_path_contains(full, DOT_GIT);
}
static int checkout_queue_remove(checkout_data *data, const char *path)
{
char *copy = git_pool_strdup(&data->pool, path);
GIT_ERROR_CHECK_ALLOC(copy);
return git_vector_insert(&data->removes, copy);
}
/* note that this advances the iterator over the wd item */
static int checkout_action_wd_only(
checkout_data *data,
git_iterator *workdir,
const git_index_entry **wditem,
git_vector *pathspec)
{
int error = 0;
bool remove = false;
git_checkout_notify_t notify = GIT_CHECKOUT_NOTIFY_NONE;
const git_index_entry *wd = *wditem;
if (!git_pathspec__match(
pathspec, wd->path,
(data->strategy & GIT_CHECKOUT_DISABLE_PATHSPEC_MATCH) != 0,
git_iterator_ignore_case(workdir), NULL, NULL))
return git_iterator_advance(wditem, workdir);
/* check if item is tracked in the index but not in the checkout diff */
if (data->index != NULL) {
size_t pos;
error = git_index__find_pos(
&pos, data->index, wd->path, 0, GIT_INDEX_STAGE_ANY);
if (wd->mode != GIT_FILEMODE_TREE) {
if (!error) { /* found by git_index__find_pos call */
notify = GIT_CHECKOUT_NOTIFY_DIRTY;
remove = ((data->strategy & GIT_CHECKOUT_FORCE) != 0);
} else if (error != GIT_ENOTFOUND)
return error;
else
error = 0; /* git_index__find_pos does not set error msg */
} else {
/* for tree entries, we have to see if there are any index
* entries that are contained inside that tree
*/
const git_index_entry *e = git_index_get_byindex(data->index, pos);
if (e != NULL && data->diff->pfxcomp(e->path, wd->path) == 0)
return git_iterator_advance_into(wditem, workdir);
}
}
if (notify != GIT_CHECKOUT_NOTIFY_NONE) {
/* if we found something in the index, notify and advance */
if ((error = checkout_notify(data, notify, NULL, wd)) != 0)
return error;
if (remove && wd_item_is_removable(data, wd))
error = checkout_queue_remove(data, wd->path);
if (!error)
error = git_iterator_advance(wditem, workdir);
} else {
/* untracked or ignored - can't know which until we advance through */
bool over = false, removable = wd_item_is_removable(data, wd);
git_iterator_status_t untracked_state;
/* copy the entry for issuing notification callback later */
git_index_entry saved_wd = *wd;
git_buf_sets(&data->tmp, wd->path);
saved_wd.path = data->tmp.ptr;
error = git_iterator_advance_over(
wditem, &untracked_state, workdir);
if (error == GIT_ITEROVER)
over = true;
else if (error < 0)
return error;
if (untracked_state == GIT_ITERATOR_STATUS_IGNORED) {
notify = GIT_CHECKOUT_NOTIFY_IGNORED;
remove = ((data->strategy & GIT_CHECKOUT_REMOVE_IGNORED) != 0);
} else {
notify = GIT_CHECKOUT_NOTIFY_UNTRACKED;
remove = ((data->strategy & GIT_CHECKOUT_REMOVE_UNTRACKED) != 0);
}
if ((error = checkout_notify(data, notify, NULL, &saved_wd)) != 0)
return error;
if (remove && removable)
error = checkout_queue_remove(data, saved_wd.path);
if (!error && over) /* restore ITEROVER if needed */
error = GIT_ITEROVER;
}
return error;
}
static bool submodule_is_config_only(
checkout_data *data,
const char *path)
{
git_submodule *sm = NULL;
unsigned int sm_loc = 0;
bool rval = false;
if (git_submodule_lookup(&sm, data->repo, path) < 0)
return true;
if (git_submodule_location(&sm_loc, sm) < 0 ||
sm_loc == GIT_SUBMODULE_STATUS_IN_CONFIG)
rval = true;
git_submodule_free(sm);
return rval;
}
static bool checkout_is_empty_dir(checkout_data *data, const char *path)
{
git_buf *fullpath;
if (checkout_target_fullpath(&fullpath, data, path) < 0)
return false;
return git_path_is_empty_dir(fullpath->ptr);
}
static int checkout_action_with_wd(
int *action,
checkout_data *data,
const git_diff_delta *delta,
git_iterator *workdir,
const git_index_entry *wd)
{
*action = CHECKOUT_ACTION__NONE;
switch (delta->status) {
case GIT_DELTA_UNMODIFIED: /* case 14/15 or 33 */
if (checkout_is_workdir_modified(data, &delta->old_file, &delta->new_file, wd)) {
GIT_ERROR_CHECK_ERROR(
checkout_notify(data, GIT_CHECKOUT_NOTIFY_DIRTY, delta, wd) );
*action = CHECKOUT_ACTION_IF(FORCE, UPDATE_BLOB, NONE);
}
break;
case GIT_DELTA_ADDED: /* case 3, 4 or 6 */
if (git_iterator_current_is_ignored(workdir))
*action = CHECKOUT_ACTION_IF(DONT_OVERWRITE_IGNORED, CONFLICT, UPDATE_BLOB);
else
*action = CHECKOUT_ACTION_IF(FORCE, UPDATE_BLOB, CONFLICT);
break;
case GIT_DELTA_DELETED: /* case 9 or 10 (or 26 but not really) */
if (checkout_is_workdir_modified(data, &delta->old_file, &delta->new_file, wd))
*action = CHECKOUT_ACTION_IF(FORCE, REMOVE, CONFLICT);
else
*action = CHECKOUT_ACTION_IF(SAFE, REMOVE, NONE);
break;
case GIT_DELTA_MODIFIED: /* case 16, 17, 18 (or 36 but not really) */
if (wd->mode != GIT_FILEMODE_COMMIT &&
checkout_is_workdir_modified(data, &delta->old_file, &delta->new_file, wd))
*action = CHECKOUT_ACTION_IF(FORCE, UPDATE_BLOB, CONFLICT);
else
*action = CHECKOUT_ACTION_IF(SAFE, UPDATE_BLOB, NONE);
break;
case GIT_DELTA_TYPECHANGE: /* case 22, 23, 29, 30 */
if (delta->old_file.mode == GIT_FILEMODE_TREE) {
if (wd->mode == GIT_FILEMODE_TREE)
/* either deleting items in old tree will delete the wd dir,
* or we'll get a conflict when we attempt blob update...
*/
*action = CHECKOUT_ACTION_IF(SAFE, UPDATE_BLOB, NONE);
else if (wd->mode == GIT_FILEMODE_COMMIT) {
/* workdir is possibly a "phantom" submodule - treat as a
* tree if the only submodule info came from the config
*/
if (submodule_is_config_only(data, wd->path))
*action = CHECKOUT_ACTION_IF(SAFE, UPDATE_BLOB, NONE);
else
*action = CHECKOUT_ACTION_IF(FORCE, REMOVE_AND_UPDATE, CONFLICT);
} else
*action = CHECKOUT_ACTION_IF(FORCE, REMOVE, CONFLICT);
}
else if (checkout_is_workdir_modified(data, &delta->old_file, &delta->new_file, wd))
*action = CHECKOUT_ACTION_IF(FORCE, REMOVE_AND_UPDATE, CONFLICT);
else
*action = CHECKOUT_ACTION_IF(SAFE, REMOVE_AND_UPDATE, NONE);
/* don't update if the typechange is to a tree */
if (delta->new_file.mode == GIT_FILEMODE_TREE)
*action = (*action & ~CHECKOUT_ACTION__UPDATE_BLOB);
break;
default: /* impossible */
break;
}
return checkout_action_common(action, data, delta, wd);
}
static int checkout_action_with_wd_blocker(
int *action,
checkout_data *data,
const git_diff_delta *delta,
const git_index_entry *wd)
{
*action = CHECKOUT_ACTION__NONE;
switch (delta->status) {
case GIT_DELTA_UNMODIFIED:
/* should show delta as dirty / deleted */
GIT_ERROR_CHECK_ERROR(
checkout_notify(data, GIT_CHECKOUT_NOTIFY_DIRTY, delta, wd) );
*action = CHECKOUT_ACTION_IF(FORCE, REMOVE_AND_UPDATE, NONE);
break;
case GIT_DELTA_ADDED:
case GIT_DELTA_MODIFIED:
*action = CHECKOUT_ACTION_IF(FORCE, REMOVE_AND_UPDATE, CONFLICT);
break;
case GIT_DELTA_DELETED:
*action = CHECKOUT_ACTION_IF(FORCE, REMOVE, CONFLICT);
break;
case GIT_DELTA_TYPECHANGE:
/* not 100% certain about this... */
*action = CHECKOUT_ACTION_IF(FORCE, REMOVE_AND_UPDATE, CONFLICT);
break;
default: /* impossible */
break;
}
return checkout_action_common(action, data, delta, wd);
}
static int checkout_action_with_wd_dir(
int *action,
checkout_data *data,
const git_diff_delta *delta,
git_iterator *workdir,
const git_index_entry *wd)
{
*action = CHECKOUT_ACTION__NONE;
switch (delta->status) {
case GIT_DELTA_UNMODIFIED: /* case 19 or 24 (or 34 but not really) */
GIT_ERROR_CHECK_ERROR(
checkout_notify(data, GIT_CHECKOUT_NOTIFY_DIRTY, delta, NULL));
GIT_ERROR_CHECK_ERROR(
checkout_notify(data, GIT_CHECKOUT_NOTIFY_UNTRACKED, NULL, wd));
*action = CHECKOUT_ACTION_IF(FORCE, REMOVE_AND_UPDATE, NONE);
break;
case GIT_DELTA_ADDED:/* case 4 (and 7 for dir) */
case GIT_DELTA_MODIFIED: /* case 20 (or 37 but not really) */
if (delta->old_file.mode == GIT_FILEMODE_COMMIT)
/* expected submodule (and maybe found one) */;
else if (delta->new_file.mode != GIT_FILEMODE_TREE)
*action = git_iterator_current_is_ignored(workdir) ?
CHECKOUT_ACTION_IF(DONT_OVERWRITE_IGNORED, CONFLICT, REMOVE_AND_UPDATE) :
CHECKOUT_ACTION_IF(FORCE, REMOVE_AND_UPDATE, CONFLICT);
break;
case GIT_DELTA_DELETED: /* case 11 (and 27 for dir) */
if (delta->old_file.mode != GIT_FILEMODE_TREE)
GIT_ERROR_CHECK_ERROR(
checkout_notify(data, GIT_CHECKOUT_NOTIFY_UNTRACKED, NULL, wd));
break;
case GIT_DELTA_TYPECHANGE: /* case 24 or 31 */
if (delta->old_file.mode == GIT_FILEMODE_TREE) {
/* For typechange from dir, remove dir and add blob, but it is
* not safe to remove dir if it contains modified files.
* However, safely removing child files will remove the parent
* directory if is it left empty, so we can defer removing the
* dir and it will succeed if no children are left.
*/
*action = CHECKOUT_ACTION_IF(SAFE, UPDATE_BLOB, NONE);
}
else if (delta->new_file.mode != GIT_FILEMODE_TREE)
/* For typechange to dir, dir is already created so no action */
*action = CHECKOUT_ACTION_IF(FORCE, REMOVE_AND_UPDATE, CONFLICT);
break;
default: /* impossible */
break;
}
return checkout_action_common(action, data, delta, wd);
}
static int checkout_action_with_wd_dir_empty(
int *action,
checkout_data *data,
const git_diff_delta *delta)
{
int error = checkout_action_no_wd(action, data, delta);
/* We can always safely remove an empty directory. */
if (error == 0 && *action != CHECKOUT_ACTION__NONE)
*action |= CHECKOUT_ACTION__REMOVE;
return error;
}
static int checkout_action(
int *action,
checkout_data *data,
git_diff_delta *delta,
git_iterator *workdir,
const git_index_entry **wditem,
git_vector *pathspec)
{
int cmp = -1, error;
int (*strcomp)(const char *, const char *) = data->diff->strcomp;
int (*pfxcomp)(const char *str, const char *pfx) = data->diff->pfxcomp;
int (*advance)(const git_index_entry **, git_iterator *) = NULL;
/* move workdir iterator to follow along with deltas */
while (1) {
const git_index_entry *wd = *wditem;
if (!wd)
return checkout_action_no_wd(action, data, delta);
cmp = strcomp(wd->path, delta->old_file.path);
/* 1. wd before delta ("a/a" before "a/b")
* 2. wd prefixes delta & should expand ("a/" before "a/b")
* 3. wd prefixes delta & cannot expand ("a/b" before "a/b/c")
* 4. wd equals delta ("a/b" and "a/b")
* 5. wd after delta & delta prefixes wd ("a/b/c" after "a/b/" or "a/b")
* 6. wd after delta ("a/c" after "a/b")
*/
if (cmp < 0) {
cmp = pfxcomp(delta->old_file.path, wd->path);
if (cmp == 0) {
if (wd->mode == GIT_FILEMODE_TREE) {
/* case 2 - entry prefixed by workdir tree */
error = git_iterator_advance_into(wditem, workdir);
if (error < 0 && error != GIT_ITEROVER)
goto done;
continue;
}
/* case 3 maybe - wd contains non-dir where dir expected */
if (delta->old_file.path[strlen(wd->path)] == '/') {
error = checkout_action_with_wd_blocker(
action, data, delta, wd);
advance = git_iterator_advance;
goto done;
}
}
/* case 1 - handle wd item (if it matches pathspec) */
error = checkout_action_wd_only(data, workdir, wditem, pathspec);
if (error && error != GIT_ITEROVER)
goto done;
continue;
}
if (cmp == 0) {
/* case 4 */
error = checkout_action_with_wd(action, data, delta, workdir, wd);
advance = git_iterator_advance;
goto done;
}
cmp = pfxcomp(wd->path, delta->old_file.path);
if (cmp == 0) { /* case 5 */
if (wd->path[strlen(delta->old_file.path)] != '/')
return checkout_action_no_wd(action, data, delta);
if (delta->status == GIT_DELTA_TYPECHANGE) {
if (delta->old_file.mode == GIT_FILEMODE_TREE) {
error = checkout_action_with_wd(action, data, delta, workdir, wd);
advance = git_iterator_advance_into;
goto done;
}
if (delta->new_file.mode == GIT_FILEMODE_TREE ||
delta->new_file.mode == GIT_FILEMODE_COMMIT ||
delta->old_file.mode == GIT_FILEMODE_COMMIT)
{
error = checkout_action_with_wd(action, data, delta, workdir, wd);
advance = git_iterator_advance;
goto done;
}
}
return checkout_is_empty_dir(data, wd->path) ?
checkout_action_with_wd_dir_empty(action, data, delta) :
checkout_action_with_wd_dir(action, data, delta, workdir, wd);
}
/* case 6 - wd is after delta */
return checkout_action_no_wd(action, data, delta);
}
done:
if (!error && advance != NULL &&
(error = advance(wditem, workdir)) < 0) {
*wditem = NULL;
if (error == GIT_ITEROVER)
error = 0;
}
return error;
}
static int checkout_remaining_wd_items(
checkout_data *data,
git_iterator *workdir,
const git_index_entry *wd,
git_vector *spec)
{
int error = 0;
while (wd && !error)
error = checkout_action_wd_only(data, workdir, &wd, spec);
if (error == GIT_ITEROVER)
error = 0;
return error;
}
GIT_INLINE(int) checkout_idxentry_cmp(
const git_index_entry *a,
const git_index_entry *b)
{
if (!a && !b)
return 0;
else if (!a && b)
return -1;
else if(a && !b)
return 1;
else
return strcmp(a->path, b->path);
}
static int checkout_conflictdata_cmp(const void *a, const void *b)
{
const checkout_conflictdata *ca = a;
const checkout_conflictdata *cb = b;
int diff;
if ((diff = checkout_idxentry_cmp(ca->ancestor, cb->ancestor)) == 0 &&
(diff = checkout_idxentry_cmp(ca->ours, cb->theirs)) == 0)
diff = checkout_idxentry_cmp(ca->theirs, cb->theirs);
return diff;
}
int checkout_conflictdata_empty(
const git_vector *conflicts, size_t idx, void *payload)
{
checkout_conflictdata *conflict;
GIT_UNUSED(payload);
if ((conflict = git_vector_get(conflicts, idx)) == NULL)
return -1;
if (conflict->ancestor || conflict->ours || conflict->theirs)
return 0;
git__free(conflict);
return 1;
}
GIT_INLINE(bool) conflict_pathspec_match(
checkout_data *data,
git_iterator *workdir,
git_vector *pathspec,
const git_index_entry *ancestor,
const git_index_entry *ours,
const git_index_entry *theirs)
{
/* if the pathspec matches ours *or* theirs, proceed */
if (ours && git_pathspec__match(pathspec, ours->path,
(data->strategy & GIT_CHECKOUT_DISABLE_PATHSPEC_MATCH) != 0,
git_iterator_ignore_case(workdir), NULL, NULL))
return true;
if (theirs && git_pathspec__match(pathspec, theirs->path,
(data->strategy & GIT_CHECKOUT_DISABLE_PATHSPEC_MATCH) != 0,
git_iterator_ignore_case(workdir), NULL, NULL))
return true;
if (ancestor && git_pathspec__match(pathspec, ancestor->path,
(data->strategy & GIT_CHECKOUT_DISABLE_PATHSPEC_MATCH) != 0,
git_iterator_ignore_case(workdir), NULL, NULL))
return true;
return false;
}
GIT_INLINE(int) checkout_conflict_detect_submodule(checkout_conflictdata *conflict)
{
conflict->submodule = ((conflict->ancestor && S_ISGITLINK(conflict->ancestor->mode)) ||
(conflict->ours && S_ISGITLINK(conflict->ours->mode)) ||
(conflict->theirs && S_ISGITLINK(conflict->theirs->mode)));
return 0;
}
GIT_INLINE(int) checkout_conflict_detect_binary(git_repository *repo, checkout_conflictdata *conflict)
{
git_blob *ancestor_blob = NULL, *our_blob = NULL, *their_blob = NULL;
int error = 0;
if (conflict->submodule)
return 0;
if (conflict->ancestor) {
if ((error = git_blob_lookup(&ancestor_blob, repo, &conflict->ancestor->id)) < 0)
goto done;
conflict->binary = git_blob_is_binary(ancestor_blob);
}
if (!conflict->binary && conflict->ours) {
if ((error = git_blob_lookup(&our_blob, repo, &conflict->ours->id)) < 0)
goto done;
conflict->binary = git_blob_is_binary(our_blob);
}
if (!conflict->binary && conflict->theirs) {
if ((error = git_blob_lookup(&their_blob, repo, &conflict->theirs->id)) < 0)
goto done;
conflict->binary = git_blob_is_binary(their_blob);
}
done:
git_blob_free(ancestor_blob);
git_blob_free(our_blob);
git_blob_free(their_blob);
return error;
}
static int checkout_conflict_append_update(
const git_index_entry *ancestor,
const git_index_entry *ours,
const git_index_entry *theirs,
void *payload)
{
checkout_data *data = payload;
checkout_conflictdata *conflict;
int error;
conflict = git__calloc(1, sizeof(checkout_conflictdata));
GIT_ERROR_CHECK_ALLOC(conflict);
conflict->ancestor = ancestor;
conflict->ours = ours;
conflict->theirs = theirs;
if ((error = checkout_conflict_detect_submodule(conflict)) < 0 ||
(error = checkout_conflict_detect_binary(data->repo, conflict)) < 0)
{
git__free(conflict);
return error;
}
if (git_vector_insert(&data->update_conflicts, conflict))
return -1;
return 0;
}
static int checkout_conflicts_foreach(
checkout_data *data,
git_index *index,
git_iterator *workdir,
git_vector *pathspec,
int (*cb)(const git_index_entry *, const git_index_entry *, const git_index_entry *, void *),
void *payload)
{
git_index_conflict_iterator *iterator = NULL;
const git_index_entry *ancestor, *ours, *theirs;
int error = 0;
if ((error = git_index_conflict_iterator_new(&iterator, index)) < 0)
goto done;
/* Collect the conflicts */
while ((error = git_index_conflict_next(&ancestor, &ours, &theirs, iterator)) == 0) {
if (!conflict_pathspec_match(data, workdir, pathspec, ancestor, ours, theirs))
continue;
if ((error = cb(ancestor, ours, theirs, payload)) < 0)
goto done;
}
if (error == GIT_ITEROVER)
error = 0;
done:
git_index_conflict_iterator_free(iterator);
return error;
}
static int checkout_conflicts_load(checkout_data *data, git_iterator *workdir, git_vector *pathspec)
{
git_index *index;
/* Only write conficts from sources that have them: indexes. */
if ((index = git_iterator_index(data->target)) == NULL)
return 0;
data->update_conflicts._cmp = checkout_conflictdata_cmp;
if (checkout_conflicts_foreach(data, index, workdir, pathspec, checkout_conflict_append_update, data) < 0)
return -1;
/* Collect the REUC and NAME entries */
data->update_reuc = &index->reuc;
data->update_names = &index->names;
return 0;
}
GIT_INLINE(int) checkout_conflicts_cmp_entry(
const char *path,
const git_index_entry *entry)
{
return strcmp((const char *)path, entry->path);
}
static int checkout_conflicts_cmp_ancestor(const void *p, const void *c)
{
const char *path = p;
const checkout_conflictdata *conflict = c;
if (!conflict->ancestor)
return 1;
return checkout_conflicts_cmp_entry(path, conflict->ancestor);
}
static checkout_conflictdata *checkout_conflicts_search_ancestor(
checkout_data *data,
const char *path)
{
size_t pos;
if (git_vector_bsearch2(&pos, &data->update_conflicts, checkout_conflicts_cmp_ancestor, path) < 0)
return NULL;
return git_vector_get(&data->update_conflicts, pos);
}
static checkout_conflictdata *checkout_conflicts_search_branch(
checkout_data *data,
const char *path)
{
checkout_conflictdata *conflict;
size_t i;
git_vector_foreach(&data->update_conflicts, i, conflict) {
int cmp = -1;
if (conflict->ancestor)
break;
if (conflict->ours)
cmp = checkout_conflicts_cmp_entry(path, conflict->ours);
else if (conflict->theirs)
cmp = checkout_conflicts_cmp_entry(path, conflict->theirs);
if (cmp == 0)
return conflict;
}
return NULL;
}
static int checkout_conflicts_load_byname_entry(
checkout_conflictdata **ancestor_out,
checkout_conflictdata **ours_out,
checkout_conflictdata **theirs_out,
checkout_data *data,
const git_index_name_entry *name_entry)
{
checkout_conflictdata *ancestor, *ours = NULL, *theirs = NULL;
int error = 0;
*ancestor_out = NULL;
*ours_out = NULL;
*theirs_out = NULL;
if (!name_entry->ancestor) {
git_error_set(GIT_ERROR_INDEX, "a NAME entry exists without an ancestor");
error = -1;
goto done;
}
if (!name_entry->ours && !name_entry->theirs) {
git_error_set(GIT_ERROR_INDEX, "a NAME entry exists without an ours or theirs");
error = -1;
goto done;
}
if ((ancestor = checkout_conflicts_search_ancestor(data,
name_entry->ancestor)) == NULL) {
git_error_set(GIT_ERROR_INDEX,
"a NAME entry referenced ancestor entry '%s' which does not exist in the main index",
name_entry->ancestor);
error = -1;
goto done;
}
if (name_entry->ours) {
if (strcmp(name_entry->ancestor, name_entry->ours) == 0)
ours = ancestor;
else if ((ours = checkout_conflicts_search_branch(data, name_entry->ours)) == NULL ||
ours->ours == NULL) {
git_error_set(GIT_ERROR_INDEX,
"a NAME entry referenced our entry '%s' which does not exist in the main index",
name_entry->ours);
error = -1;
goto done;
}
}
if (name_entry->theirs) {
if (strcmp(name_entry->ancestor, name_entry->theirs) == 0)
theirs = ancestor;
else if (name_entry->ours && strcmp(name_entry->ours, name_entry->theirs) == 0)
theirs = ours;
else if ((theirs = checkout_conflicts_search_branch(data, name_entry->theirs)) == NULL ||
theirs->theirs == NULL) {
git_error_set(GIT_ERROR_INDEX,
"a NAME entry referenced their entry '%s' which does not exist in the main index",
name_entry->theirs);
error = -1;
goto done;
}
}
*ancestor_out = ancestor;
*ours_out = ours;
*theirs_out = theirs;
done:
return error;
}
static int checkout_conflicts_coalesce_renames(
checkout_data *data)
{
git_index *index;
const git_index_name_entry *name_entry;
checkout_conflictdata *ancestor_conflict, *our_conflict, *their_conflict;
size_t i, names;
int error = 0;
if ((index = git_iterator_index(data->target)) == NULL)
return 0;
/* Juggle entries based on renames */
names = git_index_name_entrycount(index);
for (i = 0; i < names; i++) {
name_entry = git_index_name_get_byindex(index, i);
if ((error = checkout_conflicts_load_byname_entry(
&ancestor_conflict, &our_conflict, &their_conflict,
data, name_entry)) < 0)
goto done;
if (our_conflict && our_conflict != ancestor_conflict) {
ancestor_conflict->ours = our_conflict->ours;
our_conflict->ours = NULL;
if (our_conflict->theirs)
our_conflict->name_collision = 1;
if (our_conflict->name_collision)
ancestor_conflict->name_collision = 1;
}
if (their_conflict && their_conflict != ancestor_conflict) {
ancestor_conflict->theirs = their_conflict->theirs;
their_conflict->theirs = NULL;
if (their_conflict->ours)
their_conflict->name_collision = 1;
if (their_conflict->name_collision)
ancestor_conflict->name_collision = 1;
}
if (our_conflict && our_conflict != ancestor_conflict &&
their_conflict && their_conflict != ancestor_conflict)
ancestor_conflict->one_to_two = 1;
}
git_vector_remove_matching(
&data->update_conflicts, checkout_conflictdata_empty, NULL);
done:
return error;
}
static int checkout_conflicts_mark_directoryfile(
checkout_data *data)
{
git_index *index;
checkout_conflictdata *conflict;
const git_index_entry *entry;
size_t i, j, len;
const char *path;
int prefixed, error = 0;
if ((index = git_iterator_index(data->target)) == NULL)
return 0;
len = git_index_entrycount(index);
/* Find d/f conflicts */
git_vector_foreach(&data->update_conflicts, i, conflict) {
if ((conflict->ours && conflict->theirs) ||
(!conflict->ours && !conflict->theirs))
continue;
path = conflict->ours ?
conflict->ours->path : conflict->theirs->path;
if ((error = git_index_find(&j, index, path)) < 0) {
if (error == GIT_ENOTFOUND)
git_error_set(GIT_ERROR_INDEX,
"index inconsistency, could not find entry for expected conflict '%s'", path);
goto done;
}
for (; j < len; j++) {
if ((entry = git_index_get_byindex(index, j)) == NULL) {
git_error_set(GIT_ERROR_INDEX,
"index inconsistency, truncated index while loading expected conflict '%s'", path);
error = -1;
goto done;
}
prefixed = git_path_equal_or_prefixed(path, entry->path, NULL);
if (prefixed == GIT_PATH_EQUAL)
continue;
if (prefixed == GIT_PATH_PREFIX)
conflict->directoryfile = 1;
break;
}
}
done:
return error;
}
static int checkout_get_update_conflicts(
checkout_data *data,
git_iterator *workdir,
git_vector *pathspec)
{
int error = 0;
if (data->strategy & GIT_CHECKOUT_SKIP_UNMERGED)
return 0;
if ((error = checkout_conflicts_load(data, workdir, pathspec)) < 0 ||
(error = checkout_conflicts_coalesce_renames(data)) < 0 ||
(error = checkout_conflicts_mark_directoryfile(data)) < 0)
goto done;
done:
return error;
}
static int checkout_conflict_append_remove(
const git_index_entry *ancestor,
const git_index_entry *ours,
const git_index_entry *theirs,
void *payload)
{
checkout_data *data = payload;
const char *name;
assert(ancestor || ours || theirs);
if (ancestor)
name = git__strdup(ancestor->path);
else if (ours)
name = git__strdup(ours->path);
else if (theirs)
name = git__strdup(theirs->path);
else
abort();
GIT_ERROR_CHECK_ALLOC(name);
return git_vector_insert(&data->remove_conflicts, (char *)name);
}
static int checkout_get_remove_conflicts(
checkout_data *data,
git_iterator *workdir,
git_vector *pathspec)
{
if ((data->strategy & GIT_CHECKOUT_DONT_UPDATE_INDEX) != 0)
return 0;
return checkout_conflicts_foreach(data, data->index, workdir, pathspec, checkout_conflict_append_remove, data);
}
static int checkout_verify_paths(
git_repository *repo,
int action,
git_diff_delta *delta)
{
unsigned int flags = GIT_PATH_REJECT_WORKDIR_DEFAULTS | GIT_PATH_REJECT_DOT_GIT_NTFS;
if (action & CHECKOUT_ACTION__REMOVE) {
if (!git_path_isvalid(repo, delta->old_file.path, delta->old_file.mode, flags)) {
git_error_set(GIT_ERROR_CHECKOUT, "cannot remove invalid path '%s'", delta->old_file.path);
return -1;
}
}
if (action & ~CHECKOUT_ACTION__REMOVE) {
if (!git_path_isvalid(repo, delta->new_file.path, delta->new_file.mode, flags)) {
git_error_set(GIT_ERROR_CHECKOUT, "cannot checkout to invalid path '%s'", delta->new_file.path);
return -1;
}
}
return 0;
}
static int checkout_get_actions(
uint32_t **actions_ptr,
size_t **counts_ptr,
checkout_data *data,
git_iterator *workdir)
{
int error = 0, act;
const git_index_entry *wditem;
git_vector pathspec = GIT_VECTOR_INIT, *deltas;
git_pool pathpool;
git_diff_delta *delta;
size_t i, *counts = NULL;
uint32_t *actions = NULL;
git_pool_init(&pathpool, 1);
if (data->opts.paths.count > 0 &&
git_pathspec__vinit(&pathspec, &data->opts.paths, &pathpool) < 0)
return -1;
if ((error = git_iterator_current(&wditem, workdir)) < 0 &&
error != GIT_ITEROVER)
goto fail;
deltas = &data->diff->deltas;
*counts_ptr = counts = git__calloc(CHECKOUT_ACTION__MAX+1, sizeof(size_t));
*actions_ptr = actions = git__calloc(
deltas->length ? deltas->length : 1, sizeof(uint32_t));
if (!counts || !actions) {
error = -1;
goto fail;
}
git_vector_foreach(deltas, i, delta) {
if ((error = checkout_action(&act, data, delta, workdir, &wditem, &pathspec)) == 0)
error = checkout_verify_paths(data->repo, act, delta);
if (error != 0)
goto fail;
actions[i] = act;
if (act & CHECKOUT_ACTION__REMOVE)
counts[CHECKOUT_ACTION__REMOVE]++;
if (act & CHECKOUT_ACTION__UPDATE_BLOB)
counts[CHECKOUT_ACTION__UPDATE_BLOB]++;
if (act & CHECKOUT_ACTION__UPDATE_SUBMODULE)
counts[CHECKOUT_ACTION__UPDATE_SUBMODULE]++;
if (act & CHECKOUT_ACTION__CONFLICT)
counts[CHECKOUT_ACTION__CONFLICT]++;
}
error = checkout_remaining_wd_items(data, workdir, wditem, &pathspec);
if (error)
goto fail;
counts[CHECKOUT_ACTION__REMOVE] += data->removes.length;
if (counts[CHECKOUT_ACTION__CONFLICT] > 0 &&
(data->strategy & GIT_CHECKOUT_ALLOW_CONFLICTS) == 0)
{
git_error_set(GIT_ERROR_CHECKOUT, "%"PRIuZ" %s checkout",
counts[CHECKOUT_ACTION__CONFLICT],
counts[CHECKOUT_ACTION__CONFLICT] == 1 ?
"conflict prevents" : "conflicts prevent");
error = GIT_ECONFLICT;
goto fail;
}
if ((error = checkout_get_remove_conflicts(data, workdir, &pathspec)) < 0 ||
(error = checkout_get_update_conflicts(data, workdir, &pathspec)) < 0)
goto fail;
counts[CHECKOUT_ACTION__REMOVE_CONFLICT] = git_vector_length(&data->remove_conflicts);
counts[CHECKOUT_ACTION__UPDATE_CONFLICT] = git_vector_length(&data->update_conflicts);
git_pathspec__vfree(&pathspec);
git_pool_clear(&pathpool);
return 0;
fail:
*counts_ptr = NULL;
git__free(counts);
*actions_ptr = NULL;
git__free(actions);
git_pathspec__vfree(&pathspec);
git_pool_clear(&pathpool);
return error;
}
static bool should_remove_existing(checkout_data *data)
{
int ignorecase;
if (git_repository__configmap_lookup(&ignorecase, data->repo, GIT_CONFIGMAP_IGNORECASE) < 0) {
ignorecase = 0;
}
return (ignorecase &&
(data->strategy & GIT_CHECKOUT_DONT_REMOVE_EXISTING) == 0);
}
#define MKDIR_NORMAL \
GIT_MKDIR_PATH | GIT_MKDIR_VERIFY_DIR
#define MKDIR_REMOVE_EXISTING \
MKDIR_NORMAL | GIT_MKDIR_REMOVE_FILES | GIT_MKDIR_REMOVE_SYMLINKS
static int checkout_mkdir(
checkout_data *data,
const char *path,
const char *base,
mode_t mode,
unsigned int flags)
{
struct git_futils_mkdir_options mkdir_opts = {0};
int error;
mkdir_opts.dir_map = data->mkdir_map;
mkdir_opts.pool = &data->pool;
error = git_futils_mkdir_relative(
path, base, mode, flags, &mkdir_opts);
data->perfdata.mkdir_calls += mkdir_opts.perfdata.mkdir_calls;
data->perfdata.stat_calls += mkdir_opts.perfdata.stat_calls;
data->perfdata.chmod_calls += mkdir_opts.perfdata.chmod_calls;
return error;
}
static int mkpath2file(
checkout_data *data, const char *path, unsigned int mode)
{
struct stat st;
bool remove_existing = should_remove_existing(data);
unsigned int flags =
(remove_existing ? MKDIR_REMOVE_EXISTING : MKDIR_NORMAL) |
GIT_MKDIR_SKIP_LAST;
int error;
if ((error = checkout_mkdir(
data, path, data->opts.target_directory, mode, flags)) < 0)
return error;
if (remove_existing) {
data->perfdata.stat_calls++;
if (p_lstat(path, &st) == 0) {
/* Some file, symlink or folder already exists at this name.
* We would have removed it in remove_the_old unless we're on
* a case inensitive filesystem (or the user has asked us not
* to). Remove the similarly named file to write the new.
*/
error = git_futils_rmdir_r(path, NULL, GIT_RMDIR_REMOVE_FILES);
} else if (errno != ENOENT) {
git_error_set(GIT_ERROR_OS, "failed to stat '%s'", path);
return GIT_EEXISTS;
} else {
git_error_clear();
}
}
return error;
}
struct checkout_stream {
git_writestream base;
const char *path;
int fd;
int open;
};
static int checkout_stream_write(
git_writestream *s, const char *buffer, size_t len)
{
struct checkout_stream *stream = (struct checkout_stream *)s;
int ret;
if ((ret = p_write(stream->fd, buffer, len)) < 0)
git_error_set(GIT_ERROR_OS, "could not write to '%s'", stream->path);
return ret;
}
static int checkout_stream_close(git_writestream *s)
{
struct checkout_stream *stream = (struct checkout_stream *)s;
assert(stream && stream->open);
stream->open = 0;
return p_close(stream->fd);
}
static void checkout_stream_free(git_writestream *s)
{
GIT_UNUSED(s);
}
static int blob_content_to_file(
checkout_data *data,
struct stat *st,
git_blob *blob,
const char *path,
const char *hint_path,
mode_t entry_filemode)
{
int flags = data->opts.file_open_flags;
mode_t file_mode = data->opts.file_mode ?
data->opts.file_mode : entry_filemode;
git_filter_options filter_opts = GIT_FILTER_OPTIONS_INIT;
struct checkout_stream writer;
mode_t mode;
git_filter_list *fl = NULL;
int fd;
int error = 0;
if (hint_path == NULL)
hint_path = path;
if ((error = mkpath2file(data, path, data->opts.dir_mode)) < 0)
return error;
if (flags <= 0)
flags = O_CREAT | O_TRUNC | O_WRONLY;
if (!(mode = file_mode))
mode = GIT_FILEMODE_BLOB;
if ((fd = p_open(path, flags, mode)) < 0) {
git_error_set(GIT_ERROR_OS, "could not open '%s' for writing", path);
return fd;
}
filter_opts.attr_session = &data->attr_session;
filter_opts.temp_buf = &data->tmp;
if (!data->opts.disable_filters &&
(error = git_filter_list__load_ext(
&fl, data->repo, blob, hint_path,
GIT_FILTER_TO_WORKTREE, &filter_opts))) {
p_close(fd);
return error;
}
/* setup the writer */
memset(&writer, 0, sizeof(struct checkout_stream));
writer.base.write = checkout_stream_write;
writer.base.close = checkout_stream_close;
writer.base.free = checkout_stream_free;
writer.path = path;
writer.fd = fd;
writer.open = 1;
error = git_filter_list_stream_blob(fl, blob, &writer.base);
assert(writer.open == 0);
git_filter_list_free(fl);
if (error < 0)
return error;
if (st) {
data->perfdata.stat_calls++;
if ((error = p_stat(path, st)) < 0) {
git_error_set(GIT_ERROR_OS, "failed to stat '%s'", path);
return error;
}
st->st_mode = entry_filemode;
}
return 0;
}
static int blob_content_to_link(
checkout_data *data,
struct stat *st,
git_blob *blob,
const char *path)
{
git_buf linktarget = GIT_BUF_INIT;
int error;
if ((error = mkpath2file(data, path, data->opts.dir_mode)) < 0)
return error;
if ((error = git_blob__getbuf(&linktarget, blob)) < 0)
return error;
if (data->can_symlink) {
if ((error = p_symlink(git_buf_cstr(&linktarget), path)) < 0)
git_error_set(GIT_ERROR_OS, "could not create symlink %s", path);
} else {
error = git_futils_fake_symlink(git_buf_cstr(&linktarget), path);
}
if (!error) {
data->perfdata.stat_calls++;
if ((error = p_lstat(path, st)) < 0)
git_error_set(GIT_ERROR_CHECKOUT, "could not stat symlink %s", path);
st->st_mode = GIT_FILEMODE_LINK;
}
git_buf_dispose(&linktarget);
return error;
}
static int checkout_update_index(
checkout_data *data,
const git_diff_file *file,
struct stat *st)
{
git_index_entry entry;
if (!data->index)
return 0;
memset(&entry, 0, sizeof(entry));
entry.path = (char *)file->path; /* cast to prevent warning */
git_index_entry__init_from_stat(&entry, st, true);
git_oid_cpy(&entry.id, &file->id);
return git_index_add(data->index, &entry);
}
static int checkout_submodule_update_index(
checkout_data *data,
const git_diff_file *file)
{
git_buf *fullpath;
struct stat st;
/* update the index unless prevented */
if ((data->strategy & GIT_CHECKOUT_DONT_UPDATE_INDEX) != 0)
return 0;
if (checkout_target_fullpath(&fullpath, data, file->path) < 0)
return -1;
data->perfdata.stat_calls++;
if (p_stat(fullpath->ptr, &st) < 0) {
git_error_set(
GIT_ERROR_CHECKOUT, "could not stat submodule %s\n", file->path);
return GIT_ENOTFOUND;
}
st.st_mode = GIT_FILEMODE_COMMIT;
return checkout_update_index(data, file, &st);
}
static int checkout_submodule(
checkout_data *data,
const git_diff_file *file)
{
bool remove_existing = should_remove_existing(data);
int error = 0;
/* Until submodules are supported, UPDATE_ONLY means do nothing here */
if ((data->strategy & GIT_CHECKOUT_UPDATE_ONLY) != 0)
return 0;
if ((error = checkout_mkdir(
data,
file->path, data->opts.target_directory, data->opts.dir_mode,
remove_existing ? MKDIR_REMOVE_EXISTING : MKDIR_NORMAL)) < 0)
return error;
if ((error = git_submodule_lookup(NULL, data->repo, file->path)) < 0) {
/* I've observed repos with submodules in the tree that do not
* have a .gitmodules - core Git just makes an empty directory
*/
if (error == GIT_ENOTFOUND) {
git_error_clear();
return checkout_submodule_update_index(data, file);
}
return error;
}
/* TODO: Support checkout_strategy options. Two circumstances:
* 1 - submodule already checked out, but we need to move the HEAD
* to the new OID, or
* 2 - submodule not checked out and we should recursively check it out
*
* Checkout will not execute a pull on the submodule, but a clone
* command should probably be able to. Do we need a submodule callback?
*/
return checkout_submodule_update_index(data, file);
}
static void report_progress(
checkout_data *data,
const char *path)
{
if (data->opts.progress_cb)
data->opts.progress_cb(
path, data->completed_steps, data->total_steps,
data->opts.progress_payload);
}
static int checkout_safe_for_update_only(
checkout_data *data, const char *path, mode_t expected_mode)
{
struct stat st;
data->perfdata.stat_calls++;
if (p_lstat(path, &st) < 0) {
/* if doesn't exist, then no error and no update */
if (errno == ENOENT || errno == ENOTDIR)
return 0;
/* otherwise, stat error and no update */
git_error_set(GIT_ERROR_OS, "failed to stat '%s'", path);
return -1;
}
/* only safe for update if this is the same type of file */
if ((st.st_mode & ~0777) == (expected_mode & ~0777))
return 1;
return 0;
}
static int checkout_write_content(
checkout_data *data,
const git_oid *oid,
const char *full_path,
const char *hint_path,
unsigned int mode,
struct stat *st)
{
int error = 0;
git_blob *blob;
if ((error = git_blob_lookup(&blob, data->repo, oid)) < 0)
return error;
if (S_ISLNK(mode))
error = blob_content_to_link(data, st, blob, full_path);
else
error = blob_content_to_file(data, st, blob, full_path, hint_path, mode);
git_blob_free(blob);
/* if we try to create the blob and an existing directory blocks it from
* being written, then there must have been a typechange conflict in a
* parent directory - suppress the error and try to continue.
*/
if ((data->strategy & GIT_CHECKOUT_ALLOW_CONFLICTS) != 0 &&
(error == GIT_ENOTFOUND || error == GIT_EEXISTS))
{
git_error_clear();
error = 0;
}
return error;
}
static int checkout_blob(
checkout_data *data,
const git_diff_file *file)
{
git_buf *fullpath;
struct stat st;
int error = 0;
if (checkout_target_fullpath(&fullpath, data, file->path) < 0)
return -1;
if ((data->strategy & GIT_CHECKOUT_UPDATE_ONLY) != 0) {
int rval = checkout_safe_for_update_only(
data, fullpath->ptr, file->mode);
if (rval <= 0)
return rval;
}
error = checkout_write_content(
data, &file->id, fullpath->ptr, NULL, file->mode, &st);
/* update the index unless prevented */
if (!error && (data->strategy & GIT_CHECKOUT_DONT_UPDATE_INDEX) == 0)
error = checkout_update_index(data, file, &st);
/* update the submodule data if this was a new .gitmodules file */
if (!error && strcmp(file->path, ".gitmodules") == 0)
data->reload_submodules = true;
return error;
}
static int checkout_remove_the_old(
unsigned int *actions,
checkout_data *data)
{
int error = 0;
git_diff_delta *delta;
const char *str;
size_t i;
git_buf *fullpath;
uint32_t flg = GIT_RMDIR_EMPTY_PARENTS |
GIT_RMDIR_REMOVE_FILES | GIT_RMDIR_REMOVE_BLOCKERS;
if (data->opts.checkout_strategy & GIT_CHECKOUT_SKIP_LOCKED_DIRECTORIES)
flg |= GIT_RMDIR_SKIP_NONEMPTY;
if (checkout_target_fullpath(&fullpath, data, NULL) < 0)
return -1;
git_vector_foreach(&data->diff->deltas, i, delta) {
if (actions[i] & CHECKOUT_ACTION__REMOVE) {
error = git_futils_rmdir_r(
delta->old_file.path, fullpath->ptr, flg);
if (error < 0)
return error;
data->completed_steps++;
report_progress(data, delta->old_file.path);
if ((actions[i] & CHECKOUT_ACTION__UPDATE_BLOB) == 0 &&
(data->strategy & GIT_CHECKOUT_DONT_UPDATE_INDEX) == 0 &&
data->index != NULL)
{
(void)git_index_remove(data->index, delta->old_file.path, 0);
}
}
}
git_vector_foreach(&data->removes, i, str) {
error = git_futils_rmdir_r(str, fullpath->ptr, flg);
if (error < 0)
return error;
data->completed_steps++;
report_progress(data, str);
if ((data->strategy & GIT_CHECKOUT_DONT_UPDATE_INDEX) == 0 &&
data->index != NULL)
{
if (str[strlen(str) - 1] == '/')
(void)git_index_remove_directory(data->index, str, 0);
else
(void)git_index_remove(data->index, str, 0);
}
}
return 0;
}
static int checkout_deferred_remove(git_repository *repo, const char *path)
{
#if 0
int error = git_futils_rmdir_r(
path, data->opts.target_directory, GIT_RMDIR_EMPTY_PARENTS);
if (error == GIT_ENOTFOUND) {
error = 0;
git_error_clear();
}
return error;
#else
GIT_UNUSED(repo);
GIT_UNUSED(path);
assert(false);
return 0;
#endif
}
static int checkout_create_the_new(
unsigned int *actions,
checkout_data *data)
{
int error = 0;
git_diff_delta *delta;
size_t i;
git_vector_foreach(&data->diff->deltas, i, delta) {
if (actions[i] & CHECKOUT_ACTION__DEFER_REMOVE) {
/* this had a blocker directory that should only be removed iff
* all of the contents of the directory were safely removed
*/
if ((error = checkout_deferred_remove(
data->repo, delta->old_file.path)) < 0)
return error;
}
if (actions[i] & CHECKOUT_ACTION__UPDATE_BLOB && !S_ISLNK(delta->new_file.mode)) {
if ((error = checkout_blob(data, &delta->new_file)) < 0)
return error;
data->completed_steps++;
report_progress(data, delta->new_file.path);
}
}
git_vector_foreach(&data->diff->deltas, i, delta) {
if (actions[i] & CHECKOUT_ACTION__UPDATE_BLOB && S_ISLNK(delta->new_file.mode)) {
if ((error = checkout_blob(data, &delta->new_file)) < 0)
return error;
data->completed_steps++;
report_progress(data, delta->new_file.path);
}
}
return 0;
}
static int checkout_create_submodules(
unsigned int *actions,
checkout_data *data)
{
int error = 0;
git_diff_delta *delta;
size_t i;
git_vector_foreach(&data->diff->deltas, i, delta) {
if (actions[i] & CHECKOUT_ACTION__DEFER_REMOVE) {
/* this has a blocker directory that should only be removed iff
* all of the contents of the directory were safely removed
*/
if ((error = checkout_deferred_remove(
data->repo, delta->old_file.path)) < 0)
return error;
}
if (actions[i] & CHECKOUT_ACTION__UPDATE_SUBMODULE) {
int error = checkout_submodule(data, &delta->new_file);
if (error < 0)
return error;
data->completed_steps++;
report_progress(data, delta->new_file.path);
}
}
return 0;
}
static int checkout_lookup_head_tree(git_tree **out, git_repository *repo)
{
int error = 0;
git_reference *ref = NULL;
git_object *head;
if (!(error = git_repository_head(&ref, repo)) &&
!(error = git_reference_peel(&head, ref, GIT_OBJECT_TREE)))
*out = (git_tree *)head;
git_reference_free(ref);
return error;
}
static int conflict_entry_name(
git_buf *out,
const char *side_name,
const char *filename)
{
if (git_buf_puts(out, side_name) < 0 ||
git_buf_putc(out, ':') < 0 ||
git_buf_puts(out, filename) < 0)
return -1;
return 0;
}
static int checkout_path_suffixed(git_buf *path, const char *suffix)
{
size_t path_len;
int i = 0, error = 0;
if ((error = git_buf_putc(path, '~')) < 0 || (error = git_buf_puts(path, suffix)) < 0)
return -1;
path_len = git_buf_len(path);
while (git_path_exists(git_buf_cstr(path)) && i < INT_MAX) {
git_buf_truncate(path, path_len);
if ((error = git_buf_putc(path, '_')) < 0 ||
(error = git_buf_printf(path, "%d", i)) < 0)
return error;
i++;
}
if (i == INT_MAX) {
git_buf_truncate(path, path_len);
git_error_set(GIT_ERROR_CHECKOUT, "could not write '%s': working directory file exists", path->ptr);
return GIT_EEXISTS;
}
return 0;
}
static int checkout_write_entry(
checkout_data *data,
checkout_conflictdata *conflict,
const git_index_entry *side)
{
const char *hint_path = NULL, *suffix;
git_buf *fullpath;
struct stat st;
int error;
assert (side == conflict->ours || side == conflict->theirs);
if (checkout_target_fullpath(&fullpath, data, side->path) < 0)
return -1;
if ((conflict->name_collision || conflict->directoryfile) &&
(data->strategy & GIT_CHECKOUT_USE_OURS) == 0 &&
(data->strategy & GIT_CHECKOUT_USE_THEIRS) == 0) {
if (side == conflict->ours)
suffix = data->opts.our_label ? data->opts.our_label :
"ours";
else
suffix = data->opts.their_label ? data->opts.their_label :
"theirs";
if (checkout_path_suffixed(fullpath, suffix) < 0)
return -1;
hint_path = side->path;
}
if ((data->strategy & GIT_CHECKOUT_UPDATE_ONLY) != 0 &&
(error = checkout_safe_for_update_only(data, fullpath->ptr, side->mode)) <= 0)
return error;
if (!S_ISGITLINK(side->mode))
return checkout_write_content(data,
&side->id, fullpath->ptr, hint_path, side->mode, &st);
return 0;
}
static int checkout_write_entries(
checkout_data *data,
checkout_conflictdata *conflict)
{
int error = 0;
if ((error = checkout_write_entry(data, conflict, conflict->ours)) >= 0)
error = checkout_write_entry(data, conflict, conflict->theirs);
return error;
}
static int checkout_merge_path(
git_buf *out,
checkout_data *data,
checkout_conflictdata *conflict,
git_merge_file_result *result)
{
const char *our_label_raw, *their_label_raw, *suffix;
int error = 0;
if ((error = git_buf_joinpath(out, git_repository_workdir(data->repo), result->path)) < 0)
return error;
/* Most conflicts simply use the filename in the index */
if (!conflict->name_collision)
return 0;
/* Rename 2->1 conflicts need the branch name appended */
our_label_raw = data->opts.our_label ? data->opts.our_label : "ours";
their_label_raw = data->opts.their_label ? data->opts.their_label : "theirs";
suffix = strcmp(result->path, conflict->ours->path) == 0 ? our_label_raw : their_label_raw;
if ((error = checkout_path_suffixed(out, suffix)) < 0)
return error;
return 0;
}
static int checkout_write_merge(
checkout_data *data,
checkout_conflictdata *conflict)
{
git_buf our_label = GIT_BUF_INIT, their_label = GIT_BUF_INIT,
path_suffixed = GIT_BUF_INIT, path_workdir = GIT_BUF_INIT,
in_data = GIT_BUF_INIT, out_data = GIT_BUF_INIT;
git_merge_file_options opts = GIT_MERGE_FILE_OPTIONS_INIT;
git_merge_file_result result = {0};
git_filebuf output = GIT_FILEBUF_INIT;
git_filter_list *fl = NULL;
git_filter_options filter_opts = GIT_FILTER_OPTIONS_INIT;
int error = 0;
if (data->opts.checkout_strategy & GIT_CHECKOUT_CONFLICT_STYLE_DIFF3)
opts.flags |= GIT_MERGE_FILE_STYLE_DIFF3;
opts.ancestor_label = data->opts.ancestor_label ?
data->opts.ancestor_label : "ancestor";
opts.our_label = data->opts.our_label ?
data->opts.our_label : "ours";
opts.their_label = data->opts.their_label ?
data->opts.their_label : "theirs";
/* If all the paths are identical, decorate the diff3 file with the branch
* names. Otherwise, append branch_name:path.
*/
if (conflict->ours && conflict->theirs &&
strcmp(conflict->ours->path, conflict->theirs->path) != 0) {
if ((error = conflict_entry_name(
&our_label, opts.our_label, conflict->ours->path)) < 0 ||
(error = conflict_entry_name(
&their_label, opts.their_label, conflict->theirs->path)) < 0)
goto done;
opts.our_label = git_buf_cstr(&our_label);
opts.their_label = git_buf_cstr(&their_label);
}
if ((error = git_merge_file_from_index(&result, data->repo,
conflict->ancestor, conflict->ours, conflict->theirs, &opts)) < 0)
goto done;
if (result.path == NULL || result.mode == 0) {
git_error_set(GIT_ERROR_CHECKOUT, "could not merge contents of file");
error = GIT_ECONFLICT;
goto done;
}
if ((error = checkout_merge_path(&path_workdir, data, conflict, &result)) < 0)
goto done;
if ((data->strategy & GIT_CHECKOUT_UPDATE_ONLY) != 0 &&
(error = checkout_safe_for_update_only(data, git_buf_cstr(&path_workdir), result.mode)) <= 0)
goto done;
if (!data->opts.disable_filters) {
in_data.ptr = (char *)result.ptr;
in_data.size = result.len;
filter_opts.attr_session = &data->attr_session;
filter_opts.temp_buf = &data->tmp;
if ((error = git_filter_list__load_ext(
&fl, data->repo, NULL, git_buf_cstr(&path_workdir),
GIT_FILTER_TO_WORKTREE, &filter_opts)) < 0 ||
(error = git_filter_list_apply_to_data(&out_data, fl, &in_data)) < 0)
goto done;
} else {
out_data.ptr = (char *)result.ptr;
out_data.size = result.len;
}
if ((error = mkpath2file(data, path_workdir.ptr, data->opts.dir_mode)) < 0 ||
(error = git_filebuf_open(&output, git_buf_cstr(&path_workdir), GIT_FILEBUF_DO_NOT_BUFFER, result.mode)) < 0 ||
(error = git_filebuf_write(&output, out_data.ptr, out_data.size)) < 0 ||
(error = git_filebuf_commit(&output)) < 0)
goto done;
done:
git_filter_list_free(fl);
git_buf_dispose(&out_data);
git_buf_dispose(&our_label);
git_buf_dispose(&their_label);
git_merge_file_result_free(&result);
git_buf_dispose(&path_workdir);
git_buf_dispose(&path_suffixed);
return error;
}
static int checkout_conflict_add(
checkout_data *data,
const git_index_entry *conflict)
{
int error = git_index_remove(data->index, conflict->path, 0);
if (error == GIT_ENOTFOUND)
git_error_clear();
else if (error < 0)
return error;
return git_index_add(data->index, conflict);
}
static int checkout_conflict_update_index(
checkout_data *data,
checkout_conflictdata *conflict)
{
int error = 0;
if (conflict->ancestor)
error = checkout_conflict_add(data, conflict->ancestor);
if (!error && conflict->ours)
error = checkout_conflict_add(data, conflict->ours);
if (!error && conflict->theirs)
error = checkout_conflict_add(data, conflict->theirs);
return error;
}
static int checkout_create_conflicts(checkout_data *data)
{
checkout_conflictdata *conflict;
size_t i;
int error = 0;
git_vector_foreach(&data->update_conflicts, i, conflict) {
/* Both deleted: nothing to do */
if (conflict->ours == NULL && conflict->theirs == NULL)
error = 0;
else if ((data->strategy & GIT_CHECKOUT_USE_OURS) &&
conflict->ours)
error = checkout_write_entry(data, conflict, conflict->ours);
else if ((data->strategy & GIT_CHECKOUT_USE_THEIRS) &&
conflict->theirs)
error = checkout_write_entry(data, conflict, conflict->theirs);
/* Ignore the other side of name collisions. */
else if ((data->strategy & GIT_CHECKOUT_USE_OURS) &&
!conflict->ours && conflict->name_collision)
error = 0;
else if ((data->strategy & GIT_CHECKOUT_USE_THEIRS) &&
!conflict->theirs && conflict->name_collision)
error = 0;
/* For modify/delete, name collisions and d/f conflicts, write
* the file (potentially with the name mangled.
*/
else if (conflict->ours != NULL && conflict->theirs == NULL)
error = checkout_write_entry(data, conflict, conflict->ours);
else if (conflict->ours == NULL && conflict->theirs != NULL)
error = checkout_write_entry(data, conflict, conflict->theirs);
/* Add/add conflicts and rename 1->2 conflicts, write the
* ours/theirs sides (potentially name mangled).
*/
else if (conflict->one_to_two)
error = checkout_write_entries(data, conflict);
/* If all sides are links, write the ours side */
else if (S_ISLNK(conflict->ours->mode) &&
S_ISLNK(conflict->theirs->mode))
error = checkout_write_entry(data, conflict, conflict->ours);
/* Link/file conflicts, write the file side */
else if (S_ISLNK(conflict->ours->mode))
error = checkout_write_entry(data, conflict, conflict->theirs);
else if (S_ISLNK(conflict->theirs->mode))
error = checkout_write_entry(data, conflict, conflict->ours);
/* If any side is a gitlink, do nothing. */
else if (conflict->submodule)
error = 0;
/* If any side is binary, write the ours side */
else if (conflict->binary)
error = checkout_write_entry(data, conflict, conflict->ours);
else if (!error)
error = checkout_write_merge(data, conflict);
/* Update the index extensions (REUC and NAME) if we're checking
* out a different index. (Otherwise just leave them there.)
*/
if (!error && (data->strategy & GIT_CHECKOUT_DONT_UPDATE_INDEX) == 0)
error = checkout_conflict_update_index(data, conflict);
if (error)
break;
data->completed_steps++;
report_progress(data,
conflict->ours ? conflict->ours->path :
(conflict->theirs ? conflict->theirs->path : conflict->ancestor->path));
}
return error;
}
static int checkout_remove_conflicts(checkout_data *data)
{
const char *conflict;
size_t i;
git_vector_foreach(&data->remove_conflicts, i, conflict) {
if (git_index_conflict_remove(data->index, conflict) < 0)
return -1;
data->completed_steps++;
}
return 0;
}
static int checkout_extensions_update_index(checkout_data *data)
{
const git_index_reuc_entry *reuc_entry;
const git_index_name_entry *name_entry;
size_t i;
int error = 0;
if ((data->strategy & GIT_CHECKOUT_UPDATE_ONLY) != 0)
return 0;
if (data->update_reuc) {
git_vector_foreach(data->update_reuc, i, reuc_entry) {
if ((error = git_index_reuc_add(data->index, reuc_entry->path,
reuc_entry->mode[0], &reuc_entry->oid[0],
reuc_entry->mode[1], &reuc_entry->oid[1],
reuc_entry->mode[2], &reuc_entry->oid[2])) < 0)
goto done;
}
}
if (data->update_names) {
git_vector_foreach(data->update_names, i, name_entry) {
if ((error = git_index_name_add(data->index, name_entry->ancestor,
name_entry->ours, name_entry->theirs)) < 0)
goto done;
}
}
done:
return error;
}
static void checkout_data_clear(checkout_data *data)
{
if (data->opts_free_baseline) {
git_tree_free(data->opts.baseline);
data->opts.baseline = NULL;
}
git_vector_free(&data->removes);
git_pool_clear(&data->pool);
git_vector_free_deep(&data->remove_conflicts);
git_vector_free_deep(&data->update_conflicts);
git__free(data->pfx);
data->pfx = NULL;
git_buf_dispose(&data->target_path);
git_buf_dispose(&data->tmp);
git_index_free(data->index);
data->index = NULL;
git_strmap_free(data->mkdir_map);
data->mkdir_map = NULL;
git_attr_session__free(&data->attr_session);
}
static int checkout_data_init(
checkout_data *data,
git_iterator *target,
const git_checkout_options *proposed)
{
int error = 0;
git_repository *repo = git_iterator_owner(target);
memset(data, 0, sizeof(*data));
if (!repo) {
git_error_set(GIT_ERROR_CHECKOUT, "cannot checkout nothing");
return -1;
}
if ((!proposed || !proposed->target_directory) &&
(error = git_repository__ensure_not_bare(repo, "checkout")) < 0)
return error;
data->repo = repo;
data->target = target;
GIT_ERROR_CHECK_VERSION(
proposed, GIT_CHECKOUT_OPTIONS_VERSION, "git_checkout_options");
if (!proposed)
GIT_INIT_STRUCTURE(&data->opts, GIT_CHECKOUT_OPTIONS_VERSION);
else
memmove(&data->opts, proposed, sizeof(git_checkout_options));
if (!data->opts.target_directory)
data->opts.target_directory = git_repository_workdir(repo);
else if (!git_path_isdir(data->opts.target_directory) &&
(error = checkout_mkdir(data,
data->opts.target_directory, NULL,
GIT_DIR_MODE, GIT_MKDIR_VERIFY_DIR)) < 0)
goto cleanup;
if ((error = git_repository_index(&data->index, data->repo)) < 0)
goto cleanup;
/* refresh config and index content unless NO_REFRESH is given */
if ((data->opts.checkout_strategy & GIT_CHECKOUT_NO_REFRESH) == 0) {
git_config *cfg;
if ((error = git_repository_config__weakptr(&cfg, repo)) < 0)
goto cleanup;
/* Reload the repository index (unless we're checking out the
* index; then it has the changes we're trying to check out
* and those should not be overwritten.)
*/
if (data->index != git_iterator_index(target)) {
if (data->opts.checkout_strategy & GIT_CHECKOUT_FORCE) {
/* When forcing, we can blindly re-read the index */
if ((error = git_index_read(data->index, false)) < 0)
goto cleanup;
} else {
/*
* When not being forced, we need to check for unresolved
* conflicts and unsaved changes in the index before
* proceeding.
*/
if (git_index_has_conflicts(data->index)) {
error = GIT_ECONFLICT;
git_error_set(GIT_ERROR_CHECKOUT,
"unresolved conflicts exist in the index");
goto cleanup;
}
if ((error = git_index_read_safely(data->index)) < 0)
goto cleanup;
}
/* clean conflict data in the current index */
git_index_name_clear(data->index);
git_index_reuc_clear(data->index);
}
}
/* if you are forcing, allow all safe updates, plus recreate missing */
if ((data->opts.checkout_strategy & GIT_CHECKOUT_FORCE) != 0)
data->opts.checkout_strategy |= GIT_CHECKOUT_SAFE |
GIT_CHECKOUT_RECREATE_MISSING;
/* if the repository does not actually have an index file, then this
* is an initial checkout (perhaps from clone), so we allow safe updates
*/
if (!data->index->on_disk &&
(data->opts.checkout_strategy & GIT_CHECKOUT_SAFE) != 0)
data->opts.checkout_strategy |= GIT_CHECKOUT_RECREATE_MISSING;
data->strategy = data->opts.checkout_strategy;
/* opts->disable_filters is false by default */
if (!data->opts.dir_mode)
data->opts.dir_mode = GIT_DIR_MODE;
if (!data->opts.file_open_flags)
data->opts.file_open_flags = O_CREAT | O_TRUNC | O_WRONLY;
data->pfx = git_pathspec_prefix(&data->opts.paths);
if ((error = git_repository__configmap_lookup(
&data->can_symlink, repo, GIT_CONFIGMAP_SYMLINKS)) < 0)
goto cleanup;
if ((error = git_repository__configmap_lookup(
&data->respect_filemode, repo, GIT_CONFIGMAP_FILEMODE)) < 0)
goto cleanup;
if (!data->opts.baseline && !data->opts.baseline_index) {
data->opts_free_baseline = true;
error = 0;
/* if we don't have an index, this is an initial checkout and
* should be against an empty baseline
*/
if (data->index->on_disk)
error = checkout_lookup_head_tree(&data->opts.baseline, repo);
if (error == GIT_EUNBORNBRANCH) {
error = 0;
git_error_clear();
}
if (error < 0)
goto cleanup;
}
if ((data->opts.checkout_strategy &
(GIT_CHECKOUT_CONFLICT_STYLE_MERGE | GIT_CHECKOUT_CONFLICT_STYLE_DIFF3)) == 0) {
git_config_entry *conflict_style = NULL;
git_config *cfg = NULL;
if ((error = git_repository_config__weakptr(&cfg, repo)) < 0 ||
(error = git_config_get_entry(&conflict_style, cfg, "merge.conflictstyle")) < 0 ||
error == GIT_ENOTFOUND)
;
else if (error)
goto cleanup;
else if (strcmp(conflict_style->value, "merge") == 0)
data->opts.checkout_strategy |= GIT_CHECKOUT_CONFLICT_STYLE_MERGE;
else if (strcmp(conflict_style->value, "diff3") == 0)
data->opts.checkout_strategy |= GIT_CHECKOUT_CONFLICT_STYLE_DIFF3;
else {
git_error_set(GIT_ERROR_CHECKOUT, "unknown style '%s' given for 'merge.conflictstyle'",
conflict_style->value);
error = -1;
git_config_entry_free(conflict_style);
goto cleanup;
}
git_config_entry_free(conflict_style);
}
git_pool_init(&data->pool, 1);
if ((error = git_vector_init(&data->removes, 0, git__strcmp_cb)) < 0 ||
(error = git_vector_init(&data->remove_conflicts, 0, NULL)) < 0 ||
(error = git_vector_init(&data->update_conflicts, 0, NULL)) < 0 ||
(error = git_buf_puts(&data->target_path, data->opts.target_directory)) < 0 ||
(error = git_path_to_dir(&data->target_path)) < 0 ||
(error = git_strmap_new(&data->mkdir_map)) < 0)
goto cleanup;
data->target_len = git_buf_len(&data->target_path);
git_attr_session__init(&data->attr_session, data->repo);
cleanup:
if (error < 0)
checkout_data_clear(data);
return error;
}
#define CHECKOUT_INDEX_DONT_WRITE_MASK \
(GIT_CHECKOUT_DONT_UPDATE_INDEX | GIT_CHECKOUT_DONT_WRITE_INDEX)
int git_checkout_iterator(
git_iterator *target,
git_index *index,
const git_checkout_options *opts)
{
int error = 0;
git_iterator *baseline = NULL, *workdir = NULL;
git_iterator_options baseline_opts = GIT_ITERATOR_OPTIONS_INIT,
workdir_opts = GIT_ITERATOR_OPTIONS_INIT;
checkout_data data = {0};
git_diff_options diff_opts = GIT_DIFF_OPTIONS_INIT;
uint32_t *actions = NULL;
size_t *counts = NULL;
/* initialize structures and options */
error = checkout_data_init(&data, target, opts);
if (error < 0)
return error;
diff_opts.flags =
GIT_DIFF_INCLUDE_UNMODIFIED |
GIT_DIFF_INCLUDE_UNREADABLE |
GIT_DIFF_INCLUDE_UNTRACKED |
GIT_DIFF_RECURSE_UNTRACKED_DIRS | /* needed to match baseline */
GIT_DIFF_INCLUDE_IGNORED |
GIT_DIFF_INCLUDE_TYPECHANGE |
GIT_DIFF_INCLUDE_TYPECHANGE_TREES |
GIT_DIFF_SKIP_BINARY_CHECK |
GIT_DIFF_INCLUDE_CASECHANGE;
if (data.opts.checkout_strategy & GIT_CHECKOUT_DISABLE_PATHSPEC_MATCH)
diff_opts.flags |= GIT_DIFF_DISABLE_PATHSPEC_MATCH;
if (data.opts.paths.count > 0)
diff_opts.pathspec = data.opts.paths;
/* set up iterators */
workdir_opts.flags = git_iterator_ignore_case(target) ?
GIT_ITERATOR_IGNORE_CASE : GIT_ITERATOR_DONT_IGNORE_CASE;
workdir_opts.flags |= GIT_ITERATOR_DONT_AUTOEXPAND;
workdir_opts.start = data.pfx;
workdir_opts.end = data.pfx;
if ((error = git_iterator_reset_range(target, data.pfx, data.pfx)) < 0 ||
(error = git_iterator_for_workdir_ext(
&workdir, data.repo, data.opts.target_directory, index, NULL,
&workdir_opts)) < 0)
goto cleanup;
baseline_opts.flags = git_iterator_ignore_case(target) ?
GIT_ITERATOR_IGNORE_CASE : GIT_ITERATOR_DONT_IGNORE_CASE;
baseline_opts.start = data.pfx;
baseline_opts.end = data.pfx;
if (opts && (opts->checkout_strategy & GIT_CHECKOUT_DISABLE_PATHSPEC_MATCH)) {
baseline_opts.pathlist.count = opts->paths.count;
baseline_opts.pathlist.strings = opts->paths.strings;
}
if (data.opts.baseline_index) {
if ((error = git_iterator_for_index(
&baseline, git_index_owner(data.opts.baseline_index),
data.opts.baseline_index, &baseline_opts)) < 0)
goto cleanup;
} else {
if ((error = git_iterator_for_tree(
&baseline, data.opts.baseline, &baseline_opts)) < 0)
goto cleanup;
}
/* Should not have case insensitivity mismatch */
assert(git_iterator_ignore_case(workdir) == git_iterator_ignore_case(baseline));
/* Generate baseline-to-target diff which will include an entry for
* every possible update that might need to be made.
*/
if ((error = git_diff__from_iterators(
&data.diff, data.repo, baseline, target, &diff_opts)) < 0)
goto cleanup;
/* Loop through diff (and working directory iterator) building a list of
* actions to be taken, plus look for conflicts and send notifications,
* then loop through conflicts.
*/
if ((error = checkout_get_actions(&actions, &counts, &data, workdir)) != 0)
goto cleanup;
data.total_steps = counts[CHECKOUT_ACTION__REMOVE] +
counts[CHECKOUT_ACTION__REMOVE_CONFLICT] +
counts[CHECKOUT_ACTION__UPDATE_BLOB] +
counts[CHECKOUT_ACTION__UPDATE_SUBMODULE] +
counts[CHECKOUT_ACTION__UPDATE_CONFLICT];
report_progress(&data, NULL); /* establish 0 baseline */
/* To deal with some order dependencies, perform remaining checkout
* in three passes: removes, then update blobs, then update submodules.
*/
if (counts[CHECKOUT_ACTION__REMOVE] > 0 &&
(error = checkout_remove_the_old(actions, &data)) < 0)
goto cleanup;
if (counts[CHECKOUT_ACTION__REMOVE_CONFLICT] > 0 &&
(error = checkout_remove_conflicts(&data)) < 0)
goto cleanup;
if (counts[CHECKOUT_ACTION__UPDATE_BLOB] > 0 &&
(error = checkout_create_the_new(actions, &data)) < 0)
goto cleanup;
if (counts[CHECKOUT_ACTION__UPDATE_SUBMODULE] > 0 &&
(error = checkout_create_submodules(actions, &data)) < 0)
goto cleanup;
if (counts[CHECKOUT_ACTION__UPDATE_CONFLICT] > 0 &&
(error = checkout_create_conflicts(&data)) < 0)
goto cleanup;
if (data.index != git_iterator_index(target) &&
(error = checkout_extensions_update_index(&data)) < 0)
goto cleanup;
assert(data.completed_steps == data.total_steps);
if (data.opts.perfdata_cb)
data.opts.perfdata_cb(&data.perfdata, data.opts.perfdata_payload);
cleanup:
if (!error && data.index != NULL &&
(data.strategy & CHECKOUT_INDEX_DONT_WRITE_MASK) == 0)
error = git_index_write(data.index);
git_diff_free(data.diff);
git_iterator_free(workdir);
git_iterator_free(baseline);
git__free(actions);
git__free(counts);
checkout_data_clear(&data);
return error;
}
int git_checkout_index(
git_repository *repo,
git_index *index,
const git_checkout_options *opts)
{
int error, owned = 0;
git_iterator *index_i;
if (!index && !repo) {
git_error_set(GIT_ERROR_CHECKOUT,
"must provide either repository or index to checkout");
return -1;
}
if (index && repo &&
git_index_owner(index) &&
git_index_owner(index) != repo) {
git_error_set(GIT_ERROR_CHECKOUT,
"index to checkout does not match repository");
return -1;
} else if(index && repo && !git_index_owner(index)) {
GIT_REFCOUNT_OWN(index, repo);
owned = 1;
}
if (!repo)
repo = git_index_owner(index);
if (!index && (error = git_repository_index__weakptr(&index, repo)) < 0)
return error;
GIT_REFCOUNT_INC(index);
if (!(error = git_iterator_for_index(&index_i, repo, index, NULL)))
error = git_checkout_iterator(index_i, index, opts);
if (owned)
GIT_REFCOUNT_OWN(index, NULL);
git_iterator_free(index_i);
git_index_free(index);
return error;
}
int git_checkout_tree(
git_repository *repo,
const git_object *treeish,
const git_checkout_options *opts)
{
int error;
git_index *index;
git_tree *tree = NULL;
git_iterator *tree_i = NULL;
git_iterator_options iter_opts = GIT_ITERATOR_OPTIONS_INIT;
if (!treeish && !repo) {
git_error_set(GIT_ERROR_CHECKOUT,
"must provide either repository or tree to checkout");
return -1;
}
if (treeish && repo && git_object_owner(treeish) != repo) {
git_error_set(GIT_ERROR_CHECKOUT,
"object to checkout does not match repository");
return -1;
}
if (!repo)
repo = git_object_owner(treeish);
if (treeish) {
if (git_object_peel((git_object **)&tree, treeish, GIT_OBJECT_TREE) < 0) {
git_error_set(
GIT_ERROR_CHECKOUT, "provided object cannot be peeled to a tree");
return -1;
}
}
else {
if ((error = checkout_lookup_head_tree(&tree, repo)) < 0) {
if (error != GIT_EUNBORNBRANCH)
git_error_set(
GIT_ERROR_CHECKOUT,
"HEAD could not be peeled to a tree and no treeish given");
return error;
}
}
if ((error = git_repository_index(&index, repo)) < 0)
return error;
if (opts && (opts->checkout_strategy & GIT_CHECKOUT_DISABLE_PATHSPEC_MATCH)) {
iter_opts.pathlist.count = opts->paths.count;
iter_opts.pathlist.strings = opts->paths.strings;
}
if (!(error = git_iterator_for_tree(&tree_i, tree, &iter_opts)))
error = git_checkout_iterator(tree_i, index, opts);
git_iterator_free(tree_i);
git_index_free(index);
git_tree_free(tree);
return error;
}
int git_checkout_head(
git_repository *repo,
const git_checkout_options *opts)
{
assert(repo);
return git_checkout_tree(repo, NULL, opts);
}
int git_checkout_options_init(git_checkout_options *opts, unsigned int version)
{
GIT_INIT_STRUCTURE_FROM_TEMPLATE(
opts, version, git_checkout_options, GIT_CHECKOUT_OPTIONS_INIT);
return 0;
}
int git_checkout_init_options(git_checkout_options *opts, unsigned int version)
{
return git_checkout_options_init(opts, version);
}
| ./CrossVul/dataset_final_sorted/CWE-20/c/good_3989_0 |
crossvul-cpp_data_bad_2891_8 | /* procfs files for key database enumeration
*
* Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <asm/errno.h>
#include "internal.h"
static int proc_keys_open(struct inode *inode, struct file *file);
static void *proc_keys_start(struct seq_file *p, loff_t *_pos);
static void *proc_keys_next(struct seq_file *p, void *v, loff_t *_pos);
static void proc_keys_stop(struct seq_file *p, void *v);
static int proc_keys_show(struct seq_file *m, void *v);
static const struct seq_operations proc_keys_ops = {
.start = proc_keys_start,
.next = proc_keys_next,
.stop = proc_keys_stop,
.show = proc_keys_show,
};
static const struct file_operations proc_keys_fops = {
.open = proc_keys_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
static int proc_key_users_open(struct inode *inode, struct file *file);
static void *proc_key_users_start(struct seq_file *p, loff_t *_pos);
static void *proc_key_users_next(struct seq_file *p, void *v, loff_t *_pos);
static void proc_key_users_stop(struct seq_file *p, void *v);
static int proc_key_users_show(struct seq_file *m, void *v);
static const struct seq_operations proc_key_users_ops = {
.start = proc_key_users_start,
.next = proc_key_users_next,
.stop = proc_key_users_stop,
.show = proc_key_users_show,
};
static const struct file_operations proc_key_users_fops = {
.open = proc_key_users_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
/*
* Declare the /proc files.
*/
static int __init key_proc_init(void)
{
struct proc_dir_entry *p;
p = proc_create("keys", 0, NULL, &proc_keys_fops);
if (!p)
panic("Cannot create /proc/keys\n");
p = proc_create("key-users", 0, NULL, &proc_key_users_fops);
if (!p)
panic("Cannot create /proc/key-users\n");
return 0;
}
__initcall(key_proc_init);
/*
* Implement "/proc/keys" to provide a list of the keys on the system that
* grant View permission to the caller.
*/
static struct rb_node *key_serial_next(struct seq_file *p, struct rb_node *n)
{
struct user_namespace *user_ns = seq_user_ns(p);
n = rb_next(n);
while (n) {
struct key *key = rb_entry(n, struct key, serial_node);
if (kuid_has_mapping(user_ns, key->user->uid))
break;
n = rb_next(n);
}
return n;
}
static int proc_keys_open(struct inode *inode, struct file *file)
{
return seq_open(file, &proc_keys_ops);
}
static struct key *find_ge_key(struct seq_file *p, key_serial_t id)
{
struct user_namespace *user_ns = seq_user_ns(p);
struct rb_node *n = key_serial_tree.rb_node;
struct key *minkey = NULL;
while (n) {
struct key *key = rb_entry(n, struct key, serial_node);
if (id < key->serial) {
if (!minkey || minkey->serial > key->serial)
minkey = key;
n = n->rb_left;
} else if (id > key->serial) {
n = n->rb_right;
} else {
minkey = key;
break;
}
key = NULL;
}
if (!minkey)
return NULL;
for (;;) {
if (kuid_has_mapping(user_ns, minkey->user->uid))
return minkey;
n = rb_next(&minkey->serial_node);
if (!n)
return NULL;
minkey = rb_entry(n, struct key, serial_node);
}
}
static void *proc_keys_start(struct seq_file *p, loff_t *_pos)
__acquires(key_serial_lock)
{
key_serial_t pos = *_pos;
struct key *key;
spin_lock(&key_serial_lock);
if (*_pos > INT_MAX)
return NULL;
key = find_ge_key(p, pos);
if (!key)
return NULL;
*_pos = key->serial;
return &key->serial_node;
}
static inline key_serial_t key_node_serial(struct rb_node *n)
{
struct key *key = rb_entry(n, struct key, serial_node);
return key->serial;
}
static void *proc_keys_next(struct seq_file *p, void *v, loff_t *_pos)
{
struct rb_node *n;
n = key_serial_next(p, v);
if (n)
*_pos = key_node_serial(n);
return n;
}
static void proc_keys_stop(struct seq_file *p, void *v)
__releases(key_serial_lock)
{
spin_unlock(&key_serial_lock);
}
static int proc_keys_show(struct seq_file *m, void *v)
{
struct rb_node *_p = v;
struct key *key = rb_entry(_p, struct key, serial_node);
struct timespec now;
unsigned long timo;
key_ref_t key_ref, skey_ref;
char xbuf[16];
int rc;
struct keyring_search_context ctx = {
.index_key.type = key->type,
.index_key.description = key->description,
.cred = m->file->f_cred,
.match_data.cmp = lookup_user_key_possessed,
.match_data.raw_data = key,
.match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
.flags = KEYRING_SEARCH_NO_STATE_CHECK,
};
key_ref = make_key_ref(key, 0);
/* determine if the key is possessed by this process (a test we can
* skip if the key does not indicate the possessor can view it
*/
if (key->perm & KEY_POS_VIEW) {
skey_ref = search_my_process_keyrings(&ctx);
if (!IS_ERR(skey_ref)) {
key_ref_put(skey_ref);
key_ref = make_key_ref(key, 1);
}
}
/* check whether the current task is allowed to view the key */
rc = key_task_permission(key_ref, ctx.cred, KEY_NEED_VIEW);
if (rc < 0)
return 0;
now = current_kernel_time();
rcu_read_lock();
/* come up with a suitable timeout value */
if (key->expiry == 0) {
memcpy(xbuf, "perm", 5);
} else if (now.tv_sec >= key->expiry) {
memcpy(xbuf, "expd", 5);
} else {
timo = key->expiry - now.tv_sec;
if (timo < 60)
sprintf(xbuf, "%lus", timo);
else if (timo < 60*60)
sprintf(xbuf, "%lum", timo / 60);
else if (timo < 60*60*24)
sprintf(xbuf, "%luh", timo / (60*60));
else if (timo < 60*60*24*7)
sprintf(xbuf, "%lud", timo / (60*60*24));
else
sprintf(xbuf, "%luw", timo / (60*60*24*7));
}
#define showflag(KEY, LETTER, FLAG) \
(test_bit(FLAG, &(KEY)->flags) ? LETTER : '-')
seq_printf(m, "%08x %c%c%c%c%c%c%c %5d %4s %08x %5d %5d %-9.9s ",
key->serial,
showflag(key, 'I', KEY_FLAG_INSTANTIATED),
showflag(key, 'R', KEY_FLAG_REVOKED),
showflag(key, 'D', KEY_FLAG_DEAD),
showflag(key, 'Q', KEY_FLAG_IN_QUOTA),
showflag(key, 'U', KEY_FLAG_USER_CONSTRUCT),
showflag(key, 'N', KEY_FLAG_NEGATIVE),
showflag(key, 'i', KEY_FLAG_INVALIDATED),
refcount_read(&key->usage),
xbuf,
key->perm,
from_kuid_munged(seq_user_ns(m), key->uid),
from_kgid_munged(seq_user_ns(m), key->gid),
key->type->name);
#undef showflag
if (key->type->describe)
key->type->describe(key, m);
seq_putc(m, '\n');
rcu_read_unlock();
return 0;
}
static struct rb_node *__key_user_next(struct user_namespace *user_ns, struct rb_node *n)
{
while (n) {
struct key_user *user = rb_entry(n, struct key_user, node);
if (kuid_has_mapping(user_ns, user->uid))
break;
n = rb_next(n);
}
return n;
}
static struct rb_node *key_user_next(struct user_namespace *user_ns, struct rb_node *n)
{
return __key_user_next(user_ns, rb_next(n));
}
static struct rb_node *key_user_first(struct user_namespace *user_ns, struct rb_root *r)
{
struct rb_node *n = rb_first(r);
return __key_user_next(user_ns, n);
}
/*
* Implement "/proc/key-users" to provides a list of the key users and their
* quotas.
*/
static int proc_key_users_open(struct inode *inode, struct file *file)
{
return seq_open(file, &proc_key_users_ops);
}
static void *proc_key_users_start(struct seq_file *p, loff_t *_pos)
__acquires(key_user_lock)
{
struct rb_node *_p;
loff_t pos = *_pos;
spin_lock(&key_user_lock);
_p = key_user_first(seq_user_ns(p), &key_user_tree);
while (pos > 0 && _p) {
pos--;
_p = key_user_next(seq_user_ns(p), _p);
}
return _p;
}
static void *proc_key_users_next(struct seq_file *p, void *v, loff_t *_pos)
{
(*_pos)++;
return key_user_next(seq_user_ns(p), (struct rb_node *)v);
}
static void proc_key_users_stop(struct seq_file *p, void *v)
__releases(key_user_lock)
{
spin_unlock(&key_user_lock);
}
static int proc_key_users_show(struct seq_file *m, void *v)
{
struct rb_node *_p = v;
struct key_user *user = rb_entry(_p, struct key_user, node);
unsigned maxkeys = uid_eq(user->uid, GLOBAL_ROOT_UID) ?
key_quota_root_maxkeys : key_quota_maxkeys;
unsigned maxbytes = uid_eq(user->uid, GLOBAL_ROOT_UID) ?
key_quota_root_maxbytes : key_quota_maxbytes;
seq_printf(m, "%5u: %5d %d/%d %d/%d %d/%d\n",
from_kuid_munged(seq_user_ns(m), user->uid),
refcount_read(&user->usage),
atomic_read(&user->nkeys),
atomic_read(&user->nikeys),
user->qnkeys,
maxkeys,
user->qnbytes,
maxbytes);
return 0;
}
| ./CrossVul/dataset_final_sorted/CWE-20/c/bad_2891_8 |
crossvul-cpp_data_bad_1745_0 | /*
* linux/fs/ext4/namei.c
*
* Copyright (C) 1992, 1993, 1994, 1995
* Remy Card (card@masi.ibp.fr)
* Laboratoire MASI - Institut Blaise Pascal
* Universite Pierre et Marie Curie (Paris VI)
*
* from
*
* linux/fs/minix/namei.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
*
* Big-endian to little-endian byte-swapping/bitmaps by
* David S. Miller (davem@caip.rutgers.edu), 1995
* Directory entry file type support and forward compatibility hooks
* for B-tree directories by Theodore Ts'o (tytso@mit.edu), 1998
* Hash Tree Directory indexing (c)
* Daniel Phillips, 2001
* Hash Tree Directory indexing porting
* Christopher Li, 2002
* Hash Tree Directory indexing cleanup
* Theodore Ts'o, 2002
*/
#include <linux/fs.h>
#include <linux/pagemap.h>
#include <linux/jbd2.h>
#include <linux/time.h>
#include <linux/fcntl.h>
#include <linux/stat.h>
#include <linux/string.h>
#include <linux/quotaops.h>
#include <linux/buffer_head.h>
#include <linux/bio.h>
#include "ext4.h"
#include "ext4_jbd2.h"
#include "xattr.h"
#include "acl.h"
#include <trace/events/ext4.h>
/*
* define how far ahead to read directories while searching them.
*/
#define NAMEI_RA_CHUNKS 2
#define NAMEI_RA_BLOCKS 4
#define NAMEI_RA_SIZE (NAMEI_RA_CHUNKS * NAMEI_RA_BLOCKS)
#define NAMEI_RA_INDEX(c,b) (((c) * NAMEI_RA_BLOCKS) + (b))
static struct buffer_head *ext4_append(handle_t *handle,
struct inode *inode,
ext4_lblk_t *block, int *err)
{
struct buffer_head *bh;
if (unlikely(EXT4_SB(inode->i_sb)->s_max_dir_size_kb &&
((inode->i_size >> 10) >=
EXT4_SB(inode->i_sb)->s_max_dir_size_kb))) {
*err = -ENOSPC;
return NULL;
}
*block = inode->i_size >> inode->i_sb->s_blocksize_bits;
bh = ext4_bread(handle, inode, *block, 1, err);
if (bh) {
inode->i_size += inode->i_sb->s_blocksize;
EXT4_I(inode)->i_disksize = inode->i_size;
*err = ext4_journal_get_write_access(handle, bh);
if (*err) {
brelse(bh);
bh = NULL;
}
}
return bh;
}
#ifndef assert
#define assert(test) J_ASSERT(test)
#endif
#ifdef DX_DEBUG
#define dxtrace(command) command
#else
#define dxtrace(command)
#endif
struct fake_dirent
{
__le32 inode;
__le16 rec_len;
u8 name_len;
u8 file_type;
};
struct dx_countlimit
{
__le16 limit;
__le16 count;
};
struct dx_entry
{
__le32 hash;
__le32 block;
};
/*
* dx_root_info is laid out so that if it should somehow get overlaid by a
* dirent the two low bits of the hash version will be zero. Therefore, the
* hash version mod 4 should never be 0. Sincerely, the paranoia department.
*/
struct dx_root
{
struct fake_dirent dot;
char dot_name[4];
struct fake_dirent dotdot;
char dotdot_name[4];
struct dx_root_info
{
__le32 reserved_zero;
u8 hash_version;
u8 info_length; /* 8 */
u8 indirect_levels;
u8 unused_flags;
}
info;
struct dx_entry entries[0];
};
struct dx_node
{
struct fake_dirent fake;
struct dx_entry entries[0];
};
struct dx_frame
{
struct buffer_head *bh;
struct dx_entry *entries;
struct dx_entry *at;
};
struct dx_map_entry
{
u32 hash;
u16 offs;
u16 size;
};
/*
* This goes at the end of each htree block.
*/
struct dx_tail {
u32 dt_reserved;
__le32 dt_checksum; /* crc32c(uuid+inum+dirblock) */
};
static inline ext4_lblk_t dx_get_block(struct dx_entry *entry);
static void dx_set_block(struct dx_entry *entry, ext4_lblk_t value);
static inline unsigned dx_get_hash(struct dx_entry *entry);
static void dx_set_hash(struct dx_entry *entry, unsigned value);
static unsigned dx_get_count(struct dx_entry *entries);
static unsigned dx_get_limit(struct dx_entry *entries);
static void dx_set_count(struct dx_entry *entries, unsigned value);
static void dx_set_limit(struct dx_entry *entries, unsigned value);
static unsigned dx_root_limit(struct inode *dir, unsigned infosize);
static unsigned dx_node_limit(struct inode *dir);
static struct dx_frame *dx_probe(const struct qstr *d_name,
struct inode *dir,
struct dx_hash_info *hinfo,
struct dx_frame *frame,
int *err);
static void dx_release(struct dx_frame *frames);
static int dx_make_map(struct ext4_dir_entry_2 *de, unsigned blocksize,
struct dx_hash_info *hinfo, struct dx_map_entry map[]);
static void dx_sort_map(struct dx_map_entry *map, unsigned count);
static struct ext4_dir_entry_2 *dx_move_dirents(char *from, char *to,
struct dx_map_entry *offsets, int count, unsigned blocksize);
static struct ext4_dir_entry_2* dx_pack_dirents(char *base, unsigned blocksize);
static void dx_insert_block(struct dx_frame *frame,
u32 hash, ext4_lblk_t block);
static int ext4_htree_next_block(struct inode *dir, __u32 hash,
struct dx_frame *frame,
struct dx_frame *frames,
__u32 *start_hash);
static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
const struct qstr *d_name,
struct ext4_dir_entry_2 **res_dir,
int *err);
static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
struct inode *inode);
/* checksumming functions */
#define EXT4_DIRENT_TAIL(block, blocksize) \
((struct ext4_dir_entry_tail *)(((void *)(block)) + \
((blocksize) - \
sizeof(struct ext4_dir_entry_tail))))
static void initialize_dirent_tail(struct ext4_dir_entry_tail *t,
unsigned int blocksize)
{
memset(t, 0, sizeof(struct ext4_dir_entry_tail));
t->det_rec_len = ext4_rec_len_to_disk(
sizeof(struct ext4_dir_entry_tail), blocksize);
t->det_reserved_ft = EXT4_FT_DIR_CSUM;
}
/* Walk through a dirent block to find a checksum "dirent" at the tail */
static struct ext4_dir_entry_tail *get_dirent_tail(struct inode *inode,
struct ext4_dir_entry *de)
{
struct ext4_dir_entry_tail *t;
#ifdef PARANOID
struct ext4_dir_entry *d, *top;
d = de;
top = (struct ext4_dir_entry *)(((void *)de) +
(EXT4_BLOCK_SIZE(inode->i_sb) -
sizeof(struct ext4_dir_entry_tail)));
while (d < top && d->rec_len)
d = (struct ext4_dir_entry *)(((void *)d) +
le16_to_cpu(d->rec_len));
if (d != top)
return NULL;
t = (struct ext4_dir_entry_tail *)d;
#else
t = EXT4_DIRENT_TAIL(de, EXT4_BLOCK_SIZE(inode->i_sb));
#endif
if (t->det_reserved_zero1 ||
le16_to_cpu(t->det_rec_len) != sizeof(struct ext4_dir_entry_tail) ||
t->det_reserved_zero2 ||
t->det_reserved_ft != EXT4_FT_DIR_CSUM)
return NULL;
return t;
}
static __le32 ext4_dirent_csum(struct inode *inode,
struct ext4_dir_entry *dirent, int size)
{
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
struct ext4_inode_info *ei = EXT4_I(inode);
__u32 csum;
csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)dirent, size);
return cpu_to_le32(csum);
}
int ext4_dirent_csum_verify(struct inode *inode, struct ext4_dir_entry *dirent)
{
struct ext4_dir_entry_tail *t;
if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
return 1;
t = get_dirent_tail(inode, dirent);
if (!t) {
EXT4_ERROR_INODE(inode, "metadata_csum set but no space in dir "
"leaf for checksum. Please run e2fsck -D.");
return 0;
}
if (t->det_checksum != ext4_dirent_csum(inode, dirent,
(void *)t - (void *)dirent))
return 0;
return 1;
}
static void ext4_dirent_csum_set(struct inode *inode,
struct ext4_dir_entry *dirent)
{
struct ext4_dir_entry_tail *t;
if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
return;
t = get_dirent_tail(inode, dirent);
if (!t) {
EXT4_ERROR_INODE(inode, "metadata_csum set but no space in dir "
"leaf for checksum. Please run e2fsck -D.");
return;
}
t->det_checksum = ext4_dirent_csum(inode, dirent,
(void *)t - (void *)dirent);
}
static inline int ext4_handle_dirty_dirent_node(handle_t *handle,
struct inode *inode,
struct buffer_head *bh)
{
ext4_dirent_csum_set(inode, (struct ext4_dir_entry *)bh->b_data);
return ext4_handle_dirty_metadata(handle, inode, bh);
}
static struct dx_countlimit *get_dx_countlimit(struct inode *inode,
struct ext4_dir_entry *dirent,
int *offset)
{
struct ext4_dir_entry *dp;
struct dx_root_info *root;
int count_offset;
if (le16_to_cpu(dirent->rec_len) == EXT4_BLOCK_SIZE(inode->i_sb))
count_offset = 8;
else if (le16_to_cpu(dirent->rec_len) == 12) {
dp = (struct ext4_dir_entry *)(((void *)dirent) + 12);
if (le16_to_cpu(dp->rec_len) !=
EXT4_BLOCK_SIZE(inode->i_sb) - 12)
return NULL;
root = (struct dx_root_info *)(((void *)dp + 12));
if (root->reserved_zero ||
root->info_length != sizeof(struct dx_root_info))
return NULL;
count_offset = 32;
} else
return NULL;
if (offset)
*offset = count_offset;
return (struct dx_countlimit *)(((void *)dirent) + count_offset);
}
static __le32 ext4_dx_csum(struct inode *inode, struct ext4_dir_entry *dirent,
int count_offset, int count, struct dx_tail *t)
{
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
struct ext4_inode_info *ei = EXT4_I(inode);
__u32 csum, old_csum;
int size;
size = count_offset + (count * sizeof(struct dx_entry));
old_csum = t->dt_checksum;
t->dt_checksum = 0;
csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)dirent, size);
csum = ext4_chksum(sbi, csum, (__u8 *)t, sizeof(struct dx_tail));
t->dt_checksum = old_csum;
return cpu_to_le32(csum);
}
static int ext4_dx_csum_verify(struct inode *inode,
struct ext4_dir_entry *dirent)
{
struct dx_countlimit *c;
struct dx_tail *t;
int count_offset, limit, count;
if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
return 1;
c = get_dx_countlimit(inode, dirent, &count_offset);
if (!c) {
EXT4_ERROR_INODE(inode, "dir seems corrupt? Run e2fsck -D.");
return 1;
}
limit = le16_to_cpu(c->limit);
count = le16_to_cpu(c->count);
if (count_offset + (limit * sizeof(struct dx_entry)) >
EXT4_BLOCK_SIZE(inode->i_sb) - sizeof(struct dx_tail)) {
EXT4_ERROR_INODE(inode, "metadata_csum set but no space for "
"tree checksum found. Run e2fsck -D.");
return 1;
}
t = (struct dx_tail *)(((struct dx_entry *)c) + limit);
if (t->dt_checksum != ext4_dx_csum(inode, dirent, count_offset,
count, t))
return 0;
return 1;
}
static void ext4_dx_csum_set(struct inode *inode, struct ext4_dir_entry *dirent)
{
struct dx_countlimit *c;
struct dx_tail *t;
int count_offset, limit, count;
if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
return;
c = get_dx_countlimit(inode, dirent, &count_offset);
if (!c) {
EXT4_ERROR_INODE(inode, "dir seems corrupt? Run e2fsck -D.");
return;
}
limit = le16_to_cpu(c->limit);
count = le16_to_cpu(c->count);
if (count_offset + (limit * sizeof(struct dx_entry)) >
EXT4_BLOCK_SIZE(inode->i_sb) - sizeof(struct dx_tail)) {
EXT4_ERROR_INODE(inode, "metadata_csum set but no space for "
"tree checksum. Run e2fsck -D.");
return;
}
t = (struct dx_tail *)(((struct dx_entry *)c) + limit);
t->dt_checksum = ext4_dx_csum(inode, dirent, count_offset, count, t);
}
static inline int ext4_handle_dirty_dx_node(handle_t *handle,
struct inode *inode,
struct buffer_head *bh)
{
ext4_dx_csum_set(inode, (struct ext4_dir_entry *)bh->b_data);
return ext4_handle_dirty_metadata(handle, inode, bh);
}
/*
* p is at least 6 bytes before the end of page
*/
static inline struct ext4_dir_entry_2 *
ext4_next_entry(struct ext4_dir_entry_2 *p, unsigned long blocksize)
{
return (struct ext4_dir_entry_2 *)((char *)p +
ext4_rec_len_from_disk(p->rec_len, blocksize));
}
/*
* Future: use high four bits of block for coalesce-on-delete flags
* Mask them off for now.
*/
static inline ext4_lblk_t dx_get_block(struct dx_entry *entry)
{
return le32_to_cpu(entry->block) & 0x00ffffff;
}
static inline void dx_set_block(struct dx_entry *entry, ext4_lblk_t value)
{
entry->block = cpu_to_le32(value);
}
static inline unsigned dx_get_hash(struct dx_entry *entry)
{
return le32_to_cpu(entry->hash);
}
static inline void dx_set_hash(struct dx_entry *entry, unsigned value)
{
entry->hash = cpu_to_le32(value);
}
static inline unsigned dx_get_count(struct dx_entry *entries)
{
return le16_to_cpu(((struct dx_countlimit *) entries)->count);
}
static inline unsigned dx_get_limit(struct dx_entry *entries)
{
return le16_to_cpu(((struct dx_countlimit *) entries)->limit);
}
static inline void dx_set_count(struct dx_entry *entries, unsigned value)
{
((struct dx_countlimit *) entries)->count = cpu_to_le16(value);
}
static inline void dx_set_limit(struct dx_entry *entries, unsigned value)
{
((struct dx_countlimit *) entries)->limit = cpu_to_le16(value);
}
static inline unsigned dx_root_limit(struct inode *dir, unsigned infosize)
{
unsigned entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(1) -
EXT4_DIR_REC_LEN(2) - infosize;
if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb,
EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
entry_space -= sizeof(struct dx_tail);
return entry_space / sizeof(struct dx_entry);
}
static inline unsigned dx_node_limit(struct inode *dir)
{
unsigned entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(0);
if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb,
EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
entry_space -= sizeof(struct dx_tail);
return entry_space / sizeof(struct dx_entry);
}
/*
* Debug
*/
#ifdef DX_DEBUG
static void dx_show_index(char * label, struct dx_entry *entries)
{
int i, n = dx_get_count (entries);
printk(KERN_DEBUG "%s index ", label);
for (i = 0; i < n; i++) {
printk("%x->%lu ", i ? dx_get_hash(entries + i) :
0, (unsigned long)dx_get_block(entries + i));
}
printk("\n");
}
struct stats
{
unsigned names;
unsigned space;
unsigned bcount;
};
static struct stats dx_show_leaf(struct dx_hash_info *hinfo, struct ext4_dir_entry_2 *de,
int size, int show_names)
{
unsigned names = 0, space = 0;
char *base = (char *) de;
struct dx_hash_info h = *hinfo;
printk("names: ");
while ((char *) de < base + size)
{
if (de->inode)
{
if (show_names)
{
int len = de->name_len;
char *name = de->name;
while (len--) printk("%c", *name++);
ext4fs_dirhash(de->name, de->name_len, &h);
printk(":%x.%u ", h.hash,
(unsigned) ((char *) de - base));
}
space += EXT4_DIR_REC_LEN(de->name_len);
names++;
}
de = ext4_next_entry(de, size);
}
printk("(%i)\n", names);
return (struct stats) { names, space, 1 };
}
struct stats dx_show_entries(struct dx_hash_info *hinfo, struct inode *dir,
struct dx_entry *entries, int levels)
{
unsigned blocksize = dir->i_sb->s_blocksize;
unsigned count = dx_get_count(entries), names = 0, space = 0, i;
unsigned bcount = 0;
struct buffer_head *bh;
int err;
printk("%i indexed blocks...\n", count);
for (i = 0; i < count; i++, entries++)
{
ext4_lblk_t block = dx_get_block(entries);
ext4_lblk_t hash = i ? dx_get_hash(entries): 0;
u32 range = i < count - 1? (dx_get_hash(entries + 1) - hash): ~hash;
struct stats stats;
printk("%s%3u:%03u hash %8x/%8x ",levels?"":" ", i, block, hash, range);
if (!(bh = ext4_bread (NULL,dir, block, 0,&err))) continue;
stats = levels?
dx_show_entries(hinfo, dir, ((struct dx_node *) bh->b_data)->entries, levels - 1):
dx_show_leaf(hinfo, (struct ext4_dir_entry_2 *) bh->b_data, blocksize, 0);
names += stats.names;
space += stats.space;
bcount += stats.bcount;
brelse(bh);
}
if (bcount)
printk(KERN_DEBUG "%snames %u, fullness %u (%u%%)\n",
levels ? "" : " ", names, space/bcount,
(space/bcount)*100/blocksize);
return (struct stats) { names, space, bcount};
}
#endif /* DX_DEBUG */
/*
* Probe for a directory leaf block to search.
*
* dx_probe can return ERR_BAD_DX_DIR, which means there was a format
* error in the directory index, and the caller should fall back to
* searching the directory normally. The callers of dx_probe **MUST**
* check for this error code, and make sure it never gets reflected
* back to userspace.
*/
static struct dx_frame *
dx_probe(const struct qstr *d_name, struct inode *dir,
struct dx_hash_info *hinfo, struct dx_frame *frame_in, int *err)
{
unsigned count, indirect;
struct dx_entry *at, *entries, *p, *q, *m;
struct dx_root *root;
struct buffer_head *bh;
struct dx_frame *frame = frame_in;
u32 hash;
frame->bh = NULL;
if (!(bh = ext4_bread (NULL,dir, 0, 0, err)))
goto fail;
root = (struct dx_root *) bh->b_data;
if (root->info.hash_version != DX_HASH_TEA &&
root->info.hash_version != DX_HASH_HALF_MD4 &&
root->info.hash_version != DX_HASH_LEGACY) {
ext4_warning(dir->i_sb, "Unrecognised inode hash code %d",
root->info.hash_version);
brelse(bh);
*err = ERR_BAD_DX_DIR;
goto fail;
}
hinfo->hash_version = root->info.hash_version;
if (hinfo->hash_version <= DX_HASH_TEA)
hinfo->hash_version += EXT4_SB(dir->i_sb)->s_hash_unsigned;
hinfo->seed = EXT4_SB(dir->i_sb)->s_hash_seed;
if (d_name)
ext4fs_dirhash(d_name->name, d_name->len, hinfo);
hash = hinfo->hash;
if (root->info.unused_flags & 1) {
ext4_warning(dir->i_sb, "Unimplemented inode hash flags: %#06x",
root->info.unused_flags);
brelse(bh);
*err = ERR_BAD_DX_DIR;
goto fail;
}
if ((indirect = root->info.indirect_levels) > 1) {
ext4_warning(dir->i_sb, "Unimplemented inode hash depth: %#06x",
root->info.indirect_levels);
brelse(bh);
*err = ERR_BAD_DX_DIR;
goto fail;
}
if (!buffer_verified(bh) &&
!ext4_dx_csum_verify(dir, (struct ext4_dir_entry *)bh->b_data)) {
ext4_warning(dir->i_sb, "Root failed checksum");
brelse(bh);
*err = ERR_BAD_DX_DIR;
goto fail;
}
set_buffer_verified(bh);
entries = (struct dx_entry *) (((char *)&root->info) +
root->info.info_length);
if (dx_get_limit(entries) != dx_root_limit(dir,
root->info.info_length)) {
ext4_warning(dir->i_sb, "dx entry: limit != root limit");
brelse(bh);
*err = ERR_BAD_DX_DIR;
goto fail;
}
dxtrace(printk("Look up %x", hash));
while (1)
{
count = dx_get_count(entries);
if (!count || count > dx_get_limit(entries)) {
ext4_warning(dir->i_sb,
"dx entry: no count or count > limit");
brelse(bh);
*err = ERR_BAD_DX_DIR;
goto fail2;
}
p = entries + 1;
q = entries + count - 1;
while (p <= q)
{
m = p + (q - p)/2;
dxtrace(printk("."));
if (dx_get_hash(m) > hash)
q = m - 1;
else
p = m + 1;
}
if (0) // linear search cross check
{
unsigned n = count - 1;
at = entries;
while (n--)
{
dxtrace(printk(","));
if (dx_get_hash(++at) > hash)
{
at--;
break;
}
}
assert (at == p - 1);
}
at = p - 1;
dxtrace(printk(" %x->%u\n", at == entries? 0: dx_get_hash(at), dx_get_block(at)));
frame->bh = bh;
frame->entries = entries;
frame->at = at;
if (!indirect--) return frame;
if (!(bh = ext4_bread (NULL,dir, dx_get_block(at), 0, err)))
goto fail2;
at = entries = ((struct dx_node *) bh->b_data)->entries;
if (!buffer_verified(bh) &&
!ext4_dx_csum_verify(dir,
(struct ext4_dir_entry *)bh->b_data)) {
ext4_warning(dir->i_sb, "Node failed checksum");
brelse(bh);
*err = ERR_BAD_DX_DIR;
goto fail;
}
set_buffer_verified(bh);
if (dx_get_limit(entries) != dx_node_limit (dir)) {
ext4_warning(dir->i_sb,
"dx entry: limit != node limit");
brelse(bh);
*err = ERR_BAD_DX_DIR;
goto fail2;
}
frame++;
frame->bh = NULL;
}
fail2:
while (frame >= frame_in) {
brelse(frame->bh);
frame--;
}
fail:
if (*err == ERR_BAD_DX_DIR)
ext4_warning(dir->i_sb,
"Corrupt dir inode %lu, running e2fsck is "
"recommended.", dir->i_ino);
return NULL;
}
static void dx_release (struct dx_frame *frames)
{
if (frames[0].bh == NULL)
return;
if (((struct dx_root *) frames[0].bh->b_data)->info.indirect_levels)
brelse(frames[1].bh);
brelse(frames[0].bh);
}
/*
* This function increments the frame pointer to search the next leaf
* block, and reads in the necessary intervening nodes if the search
* should be necessary. Whether or not the search is necessary is
* controlled by the hash parameter. If the hash value is even, then
* the search is only continued if the next block starts with that
* hash value. This is used if we are searching for a specific file.
*
* If the hash value is HASH_NB_ALWAYS, then always go to the next block.
*
* This function returns 1 if the caller should continue to search,
* or 0 if it should not. If there is an error reading one of the
* index blocks, it will a negative error code.
*
* If start_hash is non-null, it will be filled in with the starting
* hash of the next page.
*/
static int ext4_htree_next_block(struct inode *dir, __u32 hash,
struct dx_frame *frame,
struct dx_frame *frames,
__u32 *start_hash)
{
struct dx_frame *p;
struct buffer_head *bh;
int err, num_frames = 0;
__u32 bhash;
p = frame;
/*
* Find the next leaf page by incrementing the frame pointer.
* If we run out of entries in the interior node, loop around and
* increment pointer in the parent node. When we break out of
* this loop, num_frames indicates the number of interior
* nodes need to be read.
*/
while (1) {
if (++(p->at) < p->entries + dx_get_count(p->entries))
break;
if (p == frames)
return 0;
num_frames++;
p--;
}
/*
* If the hash is 1, then continue only if the next page has a
* continuation hash of any value. This is used for readdir
* handling. Otherwise, check to see if the hash matches the
* desired contiuation hash. If it doesn't, return since
* there's no point to read in the successive index pages.
*/
bhash = dx_get_hash(p->at);
if (start_hash)
*start_hash = bhash;
if ((hash & 1) == 0) {
if ((bhash & ~1) != hash)
return 0;
}
/*
* If the hash is HASH_NB_ALWAYS, we always go to the next
* block so no check is necessary
*/
while (num_frames--) {
if (!(bh = ext4_bread(NULL, dir, dx_get_block(p->at),
0, &err)))
return err; /* Failure */
if (!buffer_verified(bh) &&
!ext4_dx_csum_verify(dir,
(struct ext4_dir_entry *)bh->b_data)) {
ext4_warning(dir->i_sb, "Node failed checksum");
return -EIO;
}
set_buffer_verified(bh);
p++;
brelse(p->bh);
p->bh = bh;
p->at = p->entries = ((struct dx_node *) bh->b_data)->entries;
}
return 1;
}
/*
* This function fills a red-black tree with information from a
* directory block. It returns the number directory entries loaded
* into the tree. If there is an error it is returned in err.
*/
static int htree_dirblock_to_tree(struct file *dir_file,
struct inode *dir, ext4_lblk_t block,
struct dx_hash_info *hinfo,
__u32 start_hash, __u32 start_minor_hash)
{
struct buffer_head *bh;
struct ext4_dir_entry_2 *de, *top;
int err = 0, count = 0;
dxtrace(printk(KERN_INFO "In htree dirblock_to_tree: block %lu\n",
(unsigned long)block));
if (!(bh = ext4_bread (NULL, dir, block, 0, &err)))
return err;
if (!buffer_verified(bh) &&
!ext4_dirent_csum_verify(dir, (struct ext4_dir_entry *)bh->b_data))
return -EIO;
set_buffer_verified(bh);
de = (struct ext4_dir_entry_2 *) bh->b_data;
top = (struct ext4_dir_entry_2 *) ((char *) de +
dir->i_sb->s_blocksize -
EXT4_DIR_REC_LEN(0));
for (; de < top; de = ext4_next_entry(de, dir->i_sb->s_blocksize)) {
if (ext4_check_dir_entry(dir, NULL, de, bh,
(block<<EXT4_BLOCK_SIZE_BITS(dir->i_sb))
+ ((char *)de - bh->b_data))) {
/* On error, skip the f_pos to the next block. */
dir_file->f_pos = (dir_file->f_pos |
(dir->i_sb->s_blocksize - 1)) + 1;
brelse(bh);
return count;
}
ext4fs_dirhash(de->name, de->name_len, hinfo);
if ((hinfo->hash < start_hash) ||
((hinfo->hash == start_hash) &&
(hinfo->minor_hash < start_minor_hash)))
continue;
if (de->inode == 0)
continue;
if ((err = ext4_htree_store_dirent(dir_file,
hinfo->hash, hinfo->minor_hash, de)) != 0) {
brelse(bh);
return err;
}
count++;
}
brelse(bh);
return count;
}
/*
* This function fills a red-black tree with information from a
* directory. We start scanning the directory in hash order, starting
* at start_hash and start_minor_hash.
*
* This function returns the number of entries inserted into the tree,
* or a negative error code.
*/
int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash,
__u32 start_minor_hash, __u32 *next_hash)
{
struct dx_hash_info hinfo;
struct ext4_dir_entry_2 *de;
struct dx_frame frames[2], *frame;
struct inode *dir;
ext4_lblk_t block;
int count = 0;
int ret, err;
__u32 hashval;
dxtrace(printk(KERN_DEBUG "In htree_fill_tree, start hash: %x:%x\n",
start_hash, start_minor_hash));
dir = dir_file->f_path.dentry->d_inode;
if (!(ext4_test_inode_flag(dir, EXT4_INODE_INDEX))) {
hinfo.hash_version = EXT4_SB(dir->i_sb)->s_def_hash_version;
if (hinfo.hash_version <= DX_HASH_TEA)
hinfo.hash_version +=
EXT4_SB(dir->i_sb)->s_hash_unsigned;
hinfo.seed = EXT4_SB(dir->i_sb)->s_hash_seed;
count = htree_dirblock_to_tree(dir_file, dir, 0, &hinfo,
start_hash, start_minor_hash);
*next_hash = ~0;
return count;
}
hinfo.hash = start_hash;
hinfo.minor_hash = 0;
frame = dx_probe(NULL, dir, &hinfo, frames, &err);
if (!frame)
return err;
/* Add '.' and '..' from the htree header */
if (!start_hash && !start_minor_hash) {
de = (struct ext4_dir_entry_2 *) frames[0].bh->b_data;
if ((err = ext4_htree_store_dirent(dir_file, 0, 0, de)) != 0)
goto errout;
count++;
}
if (start_hash < 2 || (start_hash ==2 && start_minor_hash==0)) {
de = (struct ext4_dir_entry_2 *) frames[0].bh->b_data;
de = ext4_next_entry(de, dir->i_sb->s_blocksize);
if ((err = ext4_htree_store_dirent(dir_file, 2, 0, de)) != 0)
goto errout;
count++;
}
while (1) {
block = dx_get_block(frame->at);
ret = htree_dirblock_to_tree(dir_file, dir, block, &hinfo,
start_hash, start_minor_hash);
if (ret < 0) {
err = ret;
goto errout;
}
count += ret;
hashval = ~0;
ret = ext4_htree_next_block(dir, HASH_NB_ALWAYS,
frame, frames, &hashval);
*next_hash = hashval;
if (ret < 0) {
err = ret;
goto errout;
}
/*
* Stop if: (a) there are no more entries, or
* (b) we have inserted at least one entry and the
* next hash value is not a continuation
*/
if ((ret == 0) ||
(count && ((hashval & 1) == 0)))
break;
}
dx_release(frames);
dxtrace(printk(KERN_DEBUG "Fill tree: returned %d entries, "
"next hash: %x\n", count, *next_hash));
return count;
errout:
dx_release(frames);
return (err);
}
/*
* Directory block splitting, compacting
*/
/*
* Create map of hash values, offsets, and sizes, stored at end of block.
* Returns number of entries mapped.
*/
static int dx_make_map(struct ext4_dir_entry_2 *de, unsigned blocksize,
struct dx_hash_info *hinfo,
struct dx_map_entry *map_tail)
{
int count = 0;
char *base = (char *) de;
struct dx_hash_info h = *hinfo;
while ((char *) de < base + blocksize) {
if (de->name_len && de->inode) {
ext4fs_dirhash(de->name, de->name_len, &h);
map_tail--;
map_tail->hash = h.hash;
map_tail->offs = ((char *) de - base)>>2;
map_tail->size = le16_to_cpu(de->rec_len);
count++;
cond_resched();
}
/* XXX: do we need to check rec_len == 0 case? -Chris */
de = ext4_next_entry(de, blocksize);
}
return count;
}
/* Sort map by hash value */
static void dx_sort_map (struct dx_map_entry *map, unsigned count)
{
struct dx_map_entry *p, *q, *top = map + count - 1;
int more;
/* Combsort until bubble sort doesn't suck */
while (count > 2) {
count = count*10/13;
if (count - 9 < 2) /* 9, 10 -> 11 */
count = 11;
for (p = top, q = p - count; q >= map; p--, q--)
if (p->hash < q->hash)
swap(*p, *q);
}
/* Garden variety bubble sort */
do {
more = 0;
q = top;
while (q-- > map) {
if (q[1].hash >= q[0].hash)
continue;
swap(*(q+1), *q);
more = 1;
}
} while(more);
}
static void dx_insert_block(struct dx_frame *frame, u32 hash, ext4_lblk_t block)
{
struct dx_entry *entries = frame->entries;
struct dx_entry *old = frame->at, *new = old + 1;
int count = dx_get_count(entries);
assert(count < dx_get_limit(entries));
assert(old < entries + count);
memmove(new + 1, new, (char *)(entries + count) - (char *)(new));
dx_set_hash(new, hash);
dx_set_block(new, block);
dx_set_count(entries, count + 1);
}
static void ext4_update_dx_flag(struct inode *inode)
{
if (!EXT4_HAS_COMPAT_FEATURE(inode->i_sb,
EXT4_FEATURE_COMPAT_DIR_INDEX))
ext4_clear_inode_flag(inode, EXT4_INODE_INDEX);
}
/*
* NOTE! unlike strncmp, ext4_match returns 1 for success, 0 for failure.
*
* `len <= EXT4_NAME_LEN' is guaranteed by caller.
* `de != NULL' is guaranteed by caller.
*/
static inline int ext4_match (int len, const char * const name,
struct ext4_dir_entry_2 * de)
{
if (len != de->name_len)
return 0;
if (!de->inode)
return 0;
return !memcmp(name, de->name, len);
}
/*
* Returns 0 if not found, -1 on failure, and 1 on success
*/
static inline int search_dirblock(struct buffer_head *bh,
struct inode *dir,
const struct qstr *d_name,
unsigned int offset,
struct ext4_dir_entry_2 ** res_dir)
{
struct ext4_dir_entry_2 * de;
char * dlimit;
int de_len;
const char *name = d_name->name;
int namelen = d_name->len;
de = (struct ext4_dir_entry_2 *) bh->b_data;
dlimit = bh->b_data + dir->i_sb->s_blocksize;
while ((char *) de < dlimit) {
/* this code is executed quadratically often */
/* do minimal checking `by hand' */
if ((char *) de + namelen <= dlimit &&
ext4_match (namelen, name, de)) {
/* found a match - just to be sure, do a full check */
if (ext4_check_dir_entry(dir, NULL, de, bh, offset))
return -1;
*res_dir = de;
return 1;
}
/* prevent looping on a bad block */
de_len = ext4_rec_len_from_disk(de->rec_len,
dir->i_sb->s_blocksize);
if (de_len <= 0)
return -1;
offset += de_len;
de = (struct ext4_dir_entry_2 *) ((char *) de + de_len);
}
return 0;
}
/*
* ext4_find_entry()
*
* finds an entry in the specified directory with the wanted name. It
* returns the cache buffer in which the entry was found, and the entry
* itself (as a parameter - res_dir). It does NOT read the inode of the
* entry - you'll have to do that yourself if you want to.
*
* The returned buffer_head has ->b_count elevated. The caller is expected
* to brelse() it when appropriate.
*/
static struct buffer_head * ext4_find_entry (struct inode *dir,
const struct qstr *d_name,
struct ext4_dir_entry_2 ** res_dir)
{
struct super_block *sb;
struct buffer_head *bh_use[NAMEI_RA_SIZE];
struct buffer_head *bh, *ret = NULL;
ext4_lblk_t start, block, b;
const u8 *name = d_name->name;
int ra_max = 0; /* Number of bh's in the readahead
buffer, bh_use[] */
int ra_ptr = 0; /* Current index into readahead
buffer */
int num = 0;
ext4_lblk_t nblocks;
int i, err;
int namelen;
*res_dir = NULL;
sb = dir->i_sb;
namelen = d_name->len;
if (namelen > EXT4_NAME_LEN)
return NULL;
if ((namelen <= 2) && (name[0] == '.') &&
(name[1] == '.' || name[1] == '\0')) {
/*
* "." or ".." will only be in the first block
* NFS may look up ".."; "." should be handled by the VFS
*/
block = start = 0;
nblocks = 1;
goto restart;
}
if (is_dx(dir)) {
bh = ext4_dx_find_entry(dir, d_name, res_dir, &err);
/*
* On success, or if the error was file not found,
* return. Otherwise, fall back to doing a search the
* old fashioned way.
*/
if (bh || (err != ERR_BAD_DX_DIR))
return bh;
dxtrace(printk(KERN_DEBUG "ext4_find_entry: dx failed, "
"falling back\n"));
}
nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb);
start = EXT4_I(dir)->i_dir_start_lookup;
if (start >= nblocks)
start = 0;
block = start;
restart:
do {
/*
* We deal with the read-ahead logic here.
*/
if (ra_ptr >= ra_max) {
/* Refill the readahead buffer */
ra_ptr = 0;
b = block;
for (ra_max = 0; ra_max < NAMEI_RA_SIZE; ra_max++) {
/*
* Terminate if we reach the end of the
* directory and must wrap, or if our
* search has finished at this block.
*/
if (b >= nblocks || (num && block == start)) {
bh_use[ra_max] = NULL;
break;
}
num++;
bh = ext4_getblk(NULL, dir, b++, 0, &err);
bh_use[ra_max] = bh;
if (bh)
ll_rw_block(READ | REQ_META | REQ_PRIO,
1, &bh);
}
}
if ((bh = bh_use[ra_ptr++]) == NULL)
goto next;
wait_on_buffer(bh);
if (!buffer_uptodate(bh)) {
/* read error, skip block & hope for the best */
EXT4_ERROR_INODE(dir, "reading directory lblock %lu",
(unsigned long) block);
brelse(bh);
goto next;
}
if (!buffer_verified(bh) &&
!ext4_dirent_csum_verify(dir,
(struct ext4_dir_entry *)bh->b_data)) {
EXT4_ERROR_INODE(dir, "checksumming directory "
"block %lu", (unsigned long)block);
brelse(bh);
goto next;
}
set_buffer_verified(bh);
i = search_dirblock(bh, dir, d_name,
block << EXT4_BLOCK_SIZE_BITS(sb), res_dir);
if (i == 1) {
EXT4_I(dir)->i_dir_start_lookup = block;
ret = bh;
goto cleanup_and_exit;
} else {
brelse(bh);
if (i < 0)
goto cleanup_and_exit;
}
next:
if (++block >= nblocks)
block = 0;
} while (block != start);
/*
* If the directory has grown while we were searching, then
* search the last part of the directory before giving up.
*/
block = nblocks;
nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb);
if (block < nblocks) {
start = 0;
goto restart;
}
cleanup_and_exit:
/* Clean up the read-ahead blocks */
for (; ra_ptr < ra_max; ra_ptr++)
brelse(bh_use[ra_ptr]);
return ret;
}
static struct buffer_head * ext4_dx_find_entry(struct inode *dir, const struct qstr *d_name,
struct ext4_dir_entry_2 **res_dir, int *err)
{
struct super_block * sb = dir->i_sb;
struct dx_hash_info hinfo;
struct dx_frame frames[2], *frame;
struct buffer_head *bh;
ext4_lblk_t block;
int retval;
if (!(frame = dx_probe(d_name, dir, &hinfo, frames, err)))
return NULL;
do {
block = dx_get_block(frame->at);
if (!(bh = ext4_bread(NULL, dir, block, 0, err)))
goto errout;
if (!buffer_verified(bh) &&
!ext4_dirent_csum_verify(dir,
(struct ext4_dir_entry *)bh->b_data)) {
EXT4_ERROR_INODE(dir, "checksumming directory "
"block %lu", (unsigned long)block);
brelse(bh);
*err = -EIO;
goto errout;
}
set_buffer_verified(bh);
retval = search_dirblock(bh, dir, d_name,
block << EXT4_BLOCK_SIZE_BITS(sb),
res_dir);
if (retval == 1) { /* Success! */
dx_release(frames);
return bh;
}
brelse(bh);
if (retval == -1) {
*err = ERR_BAD_DX_DIR;
goto errout;
}
/* Check to see if we should continue to search */
retval = ext4_htree_next_block(dir, hinfo.hash, frame,
frames, NULL);
if (retval < 0) {
ext4_warning(sb,
"error reading index page in directory #%lu",
dir->i_ino);
*err = retval;
goto errout;
}
} while (retval == 1);
*err = -ENOENT;
errout:
dxtrace(printk(KERN_DEBUG "%s not found\n", d_name->name));
dx_release (frames);
return NULL;
}
static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
{
struct inode *inode;
struct ext4_dir_entry_2 *de;
struct buffer_head *bh;
if (dentry->d_name.len > EXT4_NAME_LEN)
return ERR_PTR(-ENAMETOOLONG);
bh = ext4_find_entry(dir, &dentry->d_name, &de);
inode = NULL;
if (bh) {
__u32 ino = le32_to_cpu(de->inode);
brelse(bh);
if (!ext4_valid_inum(dir->i_sb, ino)) {
EXT4_ERROR_INODE(dir, "bad inode number: %u", ino);
return ERR_PTR(-EIO);
}
if (unlikely(ino == dir->i_ino)) {
EXT4_ERROR_INODE(dir, "'%.*s' linked to parent dir",
dentry->d_name.len,
dentry->d_name.name);
return ERR_PTR(-EIO);
}
inode = ext4_iget(dir->i_sb, ino);
if (inode == ERR_PTR(-ESTALE)) {
EXT4_ERROR_INODE(dir,
"deleted inode referenced: %u",
ino);
return ERR_PTR(-EIO);
}
}
return d_splice_alias(inode, dentry);
}
struct dentry *ext4_get_parent(struct dentry *child)
{
__u32 ino;
static const struct qstr dotdot = QSTR_INIT("..", 2);
struct ext4_dir_entry_2 * de;
struct buffer_head *bh;
bh = ext4_find_entry(child->d_inode, &dotdot, &de);
if (!bh)
return ERR_PTR(-ENOENT);
ino = le32_to_cpu(de->inode);
brelse(bh);
if (!ext4_valid_inum(child->d_inode->i_sb, ino)) {
EXT4_ERROR_INODE(child->d_inode,
"bad parent inode number: %u", ino);
return ERR_PTR(-EIO);
}
return d_obtain_alias(ext4_iget(child->d_inode->i_sb, ino));
}
#define S_SHIFT 12
static unsigned char ext4_type_by_mode[S_IFMT >> S_SHIFT] = {
[S_IFREG >> S_SHIFT] = EXT4_FT_REG_FILE,
[S_IFDIR >> S_SHIFT] = EXT4_FT_DIR,
[S_IFCHR >> S_SHIFT] = EXT4_FT_CHRDEV,
[S_IFBLK >> S_SHIFT] = EXT4_FT_BLKDEV,
[S_IFIFO >> S_SHIFT] = EXT4_FT_FIFO,
[S_IFSOCK >> S_SHIFT] = EXT4_FT_SOCK,
[S_IFLNK >> S_SHIFT] = EXT4_FT_SYMLINK,
};
static inline void ext4_set_de_type(struct super_block *sb,
struct ext4_dir_entry_2 *de,
umode_t mode) {
if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FILETYPE))
de->file_type = ext4_type_by_mode[(mode & S_IFMT)>>S_SHIFT];
}
/*
* Move count entries from end of map between two memory locations.
* Returns pointer to last entry moved.
*/
static struct ext4_dir_entry_2 *
dx_move_dirents(char *from, char *to, struct dx_map_entry *map, int count,
unsigned blocksize)
{
unsigned rec_len = 0;
while (count--) {
struct ext4_dir_entry_2 *de = (struct ext4_dir_entry_2 *)
(from + (map->offs<<2));
rec_len = EXT4_DIR_REC_LEN(de->name_len);
memcpy (to, de, rec_len);
((struct ext4_dir_entry_2 *) to)->rec_len =
ext4_rec_len_to_disk(rec_len, blocksize);
de->inode = 0;
map++;
to += rec_len;
}
return (struct ext4_dir_entry_2 *) (to - rec_len);
}
/*
* Compact each dir entry in the range to the minimal rec_len.
* Returns pointer to last entry in range.
*/
static struct ext4_dir_entry_2* dx_pack_dirents(char *base, unsigned blocksize)
{
struct ext4_dir_entry_2 *next, *to, *prev, *de = (struct ext4_dir_entry_2 *) base;
unsigned rec_len = 0;
prev = to = de;
while ((char*)de < base + blocksize) {
next = ext4_next_entry(de, blocksize);
if (de->inode && de->name_len) {
rec_len = EXT4_DIR_REC_LEN(de->name_len);
if (de > to)
memmove(to, de, rec_len);
to->rec_len = ext4_rec_len_to_disk(rec_len, blocksize);
prev = to;
to = (struct ext4_dir_entry_2 *) (((char *) to) + rec_len);
}
de = next;
}
return prev;
}
/*
* Split a full leaf block to make room for a new dir entry.
* Allocate a new block, and move entries so that they are approx. equally full.
* Returns pointer to de in block into which the new entry will be inserted.
*/
static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
struct buffer_head **bh,struct dx_frame *frame,
struct dx_hash_info *hinfo, int *error)
{
unsigned blocksize = dir->i_sb->s_blocksize;
unsigned count, continued;
struct buffer_head *bh2;
ext4_lblk_t newblock;
u32 hash2;
struct dx_map_entry *map;
char *data1 = (*bh)->b_data, *data2;
unsigned split, move, size;
struct ext4_dir_entry_2 *de = NULL, *de2;
struct ext4_dir_entry_tail *t;
int csum_size = 0;
int err = 0, i;
if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb,
EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
csum_size = sizeof(struct ext4_dir_entry_tail);
bh2 = ext4_append (handle, dir, &newblock, &err);
if (!(bh2)) {
brelse(*bh);
*bh = NULL;
goto errout;
}
BUFFER_TRACE(*bh, "get_write_access");
err = ext4_journal_get_write_access(handle, *bh);
if (err)
goto journal_error;
BUFFER_TRACE(frame->bh, "get_write_access");
err = ext4_journal_get_write_access(handle, frame->bh);
if (err)
goto journal_error;
data2 = bh2->b_data;
/* create map in the end of data2 block */
map = (struct dx_map_entry *) (data2 + blocksize);
count = dx_make_map((struct ext4_dir_entry_2 *) data1,
blocksize, hinfo, map);
map -= count;
dx_sort_map(map, count);
/* Split the existing block in the middle, size-wise */
size = 0;
move = 0;
for (i = count-1; i >= 0; i--) {
/* is more than half of this entry in 2nd half of the block? */
if (size + map[i].size/2 > blocksize/2)
break;
size += map[i].size;
move++;
}
/* map index at which we will split */
split = count - move;
hash2 = map[split].hash;
continued = hash2 == map[split - 1].hash;
dxtrace(printk(KERN_INFO "Split block %lu at %x, %i/%i\n",
(unsigned long)dx_get_block(frame->at),
hash2, split, count-split));
/* Fancy dance to stay within two buffers */
de2 = dx_move_dirents(data1, data2, map + split, count - split, blocksize);
de = dx_pack_dirents(data1, blocksize);
de->rec_len = ext4_rec_len_to_disk(data1 + (blocksize - csum_size) -
(char *) de,
blocksize);
de2->rec_len = ext4_rec_len_to_disk(data2 + (blocksize - csum_size) -
(char *) de2,
blocksize);
if (csum_size) {
t = EXT4_DIRENT_TAIL(data2, blocksize);
initialize_dirent_tail(t, blocksize);
t = EXT4_DIRENT_TAIL(data1, blocksize);
initialize_dirent_tail(t, blocksize);
}
dxtrace(dx_show_leaf (hinfo, (struct ext4_dir_entry_2 *) data1, blocksize, 1));
dxtrace(dx_show_leaf (hinfo, (struct ext4_dir_entry_2 *) data2, blocksize, 1));
/* Which block gets the new entry? */
if (hinfo->hash >= hash2)
{
swap(*bh, bh2);
de = de2;
}
dx_insert_block(frame, hash2 + continued, newblock);
err = ext4_handle_dirty_dirent_node(handle, dir, bh2);
if (err)
goto journal_error;
err = ext4_handle_dirty_dx_node(handle, dir, frame->bh);
if (err)
goto journal_error;
brelse(bh2);
dxtrace(dx_show_index("frame", frame->entries));
return de;
journal_error:
brelse(*bh);
brelse(bh2);
*bh = NULL;
ext4_std_error(dir->i_sb, err);
errout:
*error = err;
return NULL;
}
/*
* Add a new entry into a directory (leaf) block. If de is non-NULL,
* it points to a directory entry which is guaranteed to be large
* enough for new directory entry. If de is NULL, then
* add_dirent_to_buf will attempt search the directory block for
* space. It will return -ENOSPC if no space is available, and -EIO
* and -EEXIST if directory entry already exists.
*/
static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry,
struct inode *inode, struct ext4_dir_entry_2 *de,
struct buffer_head *bh)
{
struct inode *dir = dentry->d_parent->d_inode;
const char *name = dentry->d_name.name;
int namelen = dentry->d_name.len;
unsigned int offset = 0;
unsigned int blocksize = dir->i_sb->s_blocksize;
unsigned short reclen;
int nlen, rlen, err;
char *top;
int csum_size = 0;
if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
csum_size = sizeof(struct ext4_dir_entry_tail);
reclen = EXT4_DIR_REC_LEN(namelen);
if (!de) {
de = (struct ext4_dir_entry_2 *)bh->b_data;
top = bh->b_data + (blocksize - csum_size) - reclen;
while ((char *) de <= top) {
if (ext4_check_dir_entry(dir, NULL, de, bh, offset))
return -EIO;
if (ext4_match(namelen, name, de))
return -EEXIST;
nlen = EXT4_DIR_REC_LEN(de->name_len);
rlen = ext4_rec_len_from_disk(de->rec_len, blocksize);
if ((de->inode? rlen - nlen: rlen) >= reclen)
break;
de = (struct ext4_dir_entry_2 *)((char *)de + rlen);
offset += rlen;
}
if ((char *) de > top)
return -ENOSPC;
}
BUFFER_TRACE(bh, "get_write_access");
err = ext4_journal_get_write_access(handle, bh);
if (err) {
ext4_std_error(dir->i_sb, err);
return err;
}
/* By now the buffer is marked for journaling */
nlen = EXT4_DIR_REC_LEN(de->name_len);
rlen = ext4_rec_len_from_disk(de->rec_len, blocksize);
if (de->inode) {
struct ext4_dir_entry_2 *de1 = (struct ext4_dir_entry_2 *)((char *)de + nlen);
de1->rec_len = ext4_rec_len_to_disk(rlen - nlen, blocksize);
de->rec_len = ext4_rec_len_to_disk(nlen, blocksize);
de = de1;
}
de->file_type = EXT4_FT_UNKNOWN;
de->inode = cpu_to_le32(inode->i_ino);
ext4_set_de_type(dir->i_sb, de, inode->i_mode);
de->name_len = namelen;
memcpy(de->name, name, namelen);
/*
* XXX shouldn't update any times until successful
* completion of syscall, but too many callers depend
* on this.
*
* XXX similarly, too many callers depend on
* ext4_new_inode() setting the times, but error
* recovery deletes the inode, so the worst that can
* happen is that the times are slightly out of date
* and/or different from the directory change time.
*/
dir->i_mtime = dir->i_ctime = ext4_current_time(dir);
ext4_update_dx_flag(dir);
dir->i_version++;
ext4_mark_inode_dirty(handle, dir);
BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
err = ext4_handle_dirty_dirent_node(handle, dir, bh);
if (err)
ext4_std_error(dir->i_sb, err);
return 0;
}
/*
* This converts a one block unindexed directory to a 3 block indexed
* directory, and adds the dentry to the indexed directory.
*/
static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
struct inode *inode, struct buffer_head *bh)
{
struct inode *dir = dentry->d_parent->d_inode;
const char *name = dentry->d_name.name;
int namelen = dentry->d_name.len;
struct buffer_head *bh2;
struct dx_root *root;
struct dx_frame frames[2], *frame;
struct dx_entry *entries;
struct ext4_dir_entry_2 *de, *de2;
struct ext4_dir_entry_tail *t;
char *data1, *top;
unsigned len;
int retval;
unsigned blocksize;
struct dx_hash_info hinfo;
ext4_lblk_t block;
struct fake_dirent *fde;
int csum_size = 0;
if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
csum_size = sizeof(struct ext4_dir_entry_tail);
blocksize = dir->i_sb->s_blocksize;
dxtrace(printk(KERN_DEBUG "Creating index: inode %lu\n", dir->i_ino));
retval = ext4_journal_get_write_access(handle, bh);
if (retval) {
ext4_std_error(dir->i_sb, retval);
brelse(bh);
return retval;
}
root = (struct dx_root *) bh->b_data;
/* The 0th block becomes the root, move the dirents out */
fde = &root->dotdot;
de = (struct ext4_dir_entry_2 *)((char *)fde +
ext4_rec_len_from_disk(fde->rec_len, blocksize));
if ((char *) de >= (((char *) root) + blocksize)) {
EXT4_ERROR_INODE(dir, "invalid rec_len for '..'");
brelse(bh);
return -EIO;
}
len = ((char *) root) + (blocksize - csum_size) - (char *) de;
/* Allocate new block for the 0th block's dirents */
bh2 = ext4_append(handle, dir, &block, &retval);
if (!(bh2)) {
brelse(bh);
return retval;
}
ext4_set_inode_flag(dir, EXT4_INODE_INDEX);
data1 = bh2->b_data;
memcpy (data1, de, len);
de = (struct ext4_dir_entry_2 *) data1;
top = data1 + len;
while ((char *)(de2 = ext4_next_entry(de, blocksize)) < top)
de = de2;
de->rec_len = ext4_rec_len_to_disk(data1 + (blocksize - csum_size) -
(char *) de,
blocksize);
if (csum_size) {
t = EXT4_DIRENT_TAIL(data1, blocksize);
initialize_dirent_tail(t, blocksize);
}
/* Initialize the root; the dot dirents already exist */
de = (struct ext4_dir_entry_2 *) (&root->dotdot);
de->rec_len = ext4_rec_len_to_disk(blocksize - EXT4_DIR_REC_LEN(2),
blocksize);
memset (&root->info, 0, sizeof(root->info));
root->info.info_length = sizeof(root->info);
root->info.hash_version = EXT4_SB(dir->i_sb)->s_def_hash_version;
entries = root->entries;
dx_set_block(entries, 1);
dx_set_count(entries, 1);
dx_set_limit(entries, dx_root_limit(dir, sizeof(root->info)));
/* Initialize as for dx_probe */
hinfo.hash_version = root->info.hash_version;
if (hinfo.hash_version <= DX_HASH_TEA)
hinfo.hash_version += EXT4_SB(dir->i_sb)->s_hash_unsigned;
hinfo.seed = EXT4_SB(dir->i_sb)->s_hash_seed;
ext4fs_dirhash(name, namelen, &hinfo);
frame = frames;
frame->entries = entries;
frame->at = entries;
frame->bh = bh;
bh = bh2;
ext4_handle_dirty_dx_node(handle, dir, frame->bh);
ext4_handle_dirty_dirent_node(handle, dir, bh);
de = do_split(handle,dir, &bh, frame, &hinfo, &retval);
if (!de) {
/*
* Even if the block split failed, we have to properly write
* out all the changes we did so far. Otherwise we can end up
* with corrupted filesystem.
*/
ext4_mark_inode_dirty(handle, dir);
dx_release(frames);
return retval;
}
dx_release(frames);
retval = add_dirent_to_buf(handle, dentry, inode, de, bh);
brelse(bh);
return retval;
}
/*
* ext4_add_entry()
*
* adds a file entry to the specified directory, using the same
* semantics as ext4_find_entry(). It returns NULL if it failed.
*
* NOTE!! The inode part of 'de' is left at 0 - which means you
* may not sleep between calling this and putting something into
* the entry, as someone else might have used it while you slept.
*/
static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
struct inode *inode)
{
struct inode *dir = dentry->d_parent->d_inode;
struct buffer_head *bh;
struct ext4_dir_entry_2 *de;
struct ext4_dir_entry_tail *t;
struct super_block *sb;
int retval;
int dx_fallback=0;
unsigned blocksize;
ext4_lblk_t block, blocks;
int csum_size = 0;
if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
csum_size = sizeof(struct ext4_dir_entry_tail);
sb = dir->i_sb;
blocksize = sb->s_blocksize;
if (!dentry->d_name.len)
return -EINVAL;
if (is_dx(dir)) {
retval = ext4_dx_add_entry(handle, dentry, inode);
if (!retval || (retval != ERR_BAD_DX_DIR))
return retval;
ext4_clear_inode_flag(dir, EXT4_INODE_INDEX);
dx_fallback++;
ext4_mark_inode_dirty(handle, dir);
}
blocks = dir->i_size >> sb->s_blocksize_bits;
for (block = 0; block < blocks; block++) {
bh = ext4_bread(handle, dir, block, 0, &retval);
if(!bh)
return retval;
if (!buffer_verified(bh) &&
!ext4_dirent_csum_verify(dir,
(struct ext4_dir_entry *)bh->b_data))
return -EIO;
set_buffer_verified(bh);
retval = add_dirent_to_buf(handle, dentry, inode, NULL, bh);
if (retval != -ENOSPC) {
brelse(bh);
return retval;
}
if (blocks == 1 && !dx_fallback &&
EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_DIR_INDEX))
return make_indexed_dir(handle, dentry, inode, bh);
brelse(bh);
}
bh = ext4_append(handle, dir, &block, &retval);
if (!bh)
return retval;
de = (struct ext4_dir_entry_2 *) bh->b_data;
de->inode = 0;
de->rec_len = ext4_rec_len_to_disk(blocksize - csum_size, blocksize);
if (csum_size) {
t = EXT4_DIRENT_TAIL(bh->b_data, blocksize);
initialize_dirent_tail(t, blocksize);
}
retval = add_dirent_to_buf(handle, dentry, inode, de, bh);
brelse(bh);
if (retval == 0)
ext4_set_inode_state(inode, EXT4_STATE_NEWENTRY);
return retval;
}
/*
* Returns 0 for success, or a negative error value
*/
static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
struct inode *inode)
{
struct dx_frame frames[2], *frame;
struct dx_entry *entries, *at;
struct dx_hash_info hinfo;
struct buffer_head *bh;
struct inode *dir = dentry->d_parent->d_inode;
struct super_block *sb = dir->i_sb;
struct ext4_dir_entry_2 *de;
int err;
frame = dx_probe(&dentry->d_name, dir, &hinfo, frames, &err);
if (!frame)
return err;
entries = frame->entries;
at = frame->at;
if (!(bh = ext4_bread(handle,dir, dx_get_block(frame->at), 0, &err)))
goto cleanup;
if (!buffer_verified(bh) &&
!ext4_dirent_csum_verify(dir, (struct ext4_dir_entry *)bh->b_data))
goto journal_error;
set_buffer_verified(bh);
BUFFER_TRACE(bh, "get_write_access");
err = ext4_journal_get_write_access(handle, bh);
if (err)
goto journal_error;
err = add_dirent_to_buf(handle, dentry, inode, NULL, bh);
if (err != -ENOSPC)
goto cleanup;
/* Block full, should compress but for now just split */
dxtrace(printk(KERN_DEBUG "using %u of %u node entries\n",
dx_get_count(entries), dx_get_limit(entries)));
/* Need to split index? */
if (dx_get_count(entries) == dx_get_limit(entries)) {
ext4_lblk_t newblock;
unsigned icount = dx_get_count(entries);
int levels = frame - frames;
struct dx_entry *entries2;
struct dx_node *node2;
struct buffer_head *bh2;
if (levels && (dx_get_count(frames->entries) ==
dx_get_limit(frames->entries))) {
ext4_warning(sb, "Directory index full!");
err = -ENOSPC;
goto cleanup;
}
bh2 = ext4_append (handle, dir, &newblock, &err);
if (!(bh2))
goto cleanup;
node2 = (struct dx_node *)(bh2->b_data);
entries2 = node2->entries;
memset(&node2->fake, 0, sizeof(struct fake_dirent));
node2->fake.rec_len = ext4_rec_len_to_disk(sb->s_blocksize,
sb->s_blocksize);
BUFFER_TRACE(frame->bh, "get_write_access");
err = ext4_journal_get_write_access(handle, frame->bh);
if (err)
goto journal_error;
if (levels) {
unsigned icount1 = icount/2, icount2 = icount - icount1;
unsigned hash2 = dx_get_hash(entries + icount1);
dxtrace(printk(KERN_DEBUG "Split index %i/%i\n",
icount1, icount2));
BUFFER_TRACE(frame->bh, "get_write_access"); /* index root */
err = ext4_journal_get_write_access(handle,
frames[0].bh);
if (err)
goto journal_error;
memcpy((char *) entries2, (char *) (entries + icount1),
icount2 * sizeof(struct dx_entry));
dx_set_count(entries, icount1);
dx_set_count(entries2, icount2);
dx_set_limit(entries2, dx_node_limit(dir));
/* Which index block gets the new entry? */
if (at - entries >= icount1) {
frame->at = at = at - entries - icount1 + entries2;
frame->entries = entries = entries2;
swap(frame->bh, bh2);
}
dx_insert_block(frames + 0, hash2, newblock);
dxtrace(dx_show_index("node", frames[1].entries));
dxtrace(dx_show_index("node",
((struct dx_node *) bh2->b_data)->entries));
err = ext4_handle_dirty_dx_node(handle, dir, bh2);
if (err)
goto journal_error;
brelse (bh2);
} else {
dxtrace(printk(KERN_DEBUG
"Creating second level index...\n"));
memcpy((char *) entries2, (char *) entries,
icount * sizeof(struct dx_entry));
dx_set_limit(entries2, dx_node_limit(dir));
/* Set up root */
dx_set_count(entries, 1);
dx_set_block(entries + 0, newblock);
((struct dx_root *) frames[0].bh->b_data)->info.indirect_levels = 1;
/* Add new access path frame */
frame = frames + 1;
frame->at = at = at - entries + entries2;
frame->entries = entries = entries2;
frame->bh = bh2;
err = ext4_journal_get_write_access(handle,
frame->bh);
if (err)
goto journal_error;
}
err = ext4_handle_dirty_dx_node(handle, dir, frames[0].bh);
if (err) {
ext4_std_error(inode->i_sb, err);
goto cleanup;
}
}
de = do_split(handle, dir, &bh, frame, &hinfo, &err);
if (!de)
goto cleanup;
err = add_dirent_to_buf(handle, dentry, inode, de, bh);
goto cleanup;
journal_error:
ext4_std_error(dir->i_sb, err);
cleanup:
if (bh)
brelse(bh);
dx_release(frames);
return err;
}
/*
* ext4_delete_entry deletes a directory entry by merging it with the
* previous entry
*/
static int ext4_delete_entry(handle_t *handle,
struct inode *dir,
struct ext4_dir_entry_2 *de_del,
struct buffer_head *bh)
{
struct ext4_dir_entry_2 *de, *pde;
unsigned int blocksize = dir->i_sb->s_blocksize;
int csum_size = 0;
int i, err;
if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb,
EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
csum_size = sizeof(struct ext4_dir_entry_tail);
i = 0;
pde = NULL;
de = (struct ext4_dir_entry_2 *) bh->b_data;
while (i < bh->b_size - csum_size) {
if (ext4_check_dir_entry(dir, NULL, de, bh, i))
return -EIO;
if (de == de_del) {
BUFFER_TRACE(bh, "get_write_access");
err = ext4_journal_get_write_access(handle, bh);
if (unlikely(err)) {
ext4_std_error(dir->i_sb, err);
return err;
}
if (pde)
pde->rec_len = ext4_rec_len_to_disk(
ext4_rec_len_from_disk(pde->rec_len,
blocksize) +
ext4_rec_len_from_disk(de->rec_len,
blocksize),
blocksize);
else
de->inode = 0;
dir->i_version++;
BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
err = ext4_handle_dirty_dirent_node(handle, dir, bh);
if (unlikely(err)) {
ext4_std_error(dir->i_sb, err);
return err;
}
return 0;
}
i += ext4_rec_len_from_disk(de->rec_len, blocksize);
pde = de;
de = ext4_next_entry(de, blocksize);
}
return -ENOENT;
}
/*
* DIR_NLINK feature is set if 1) nlinks > EXT4_LINK_MAX or 2) nlinks == 2,
* since this indicates that nlinks count was previously 1.
*/
static void ext4_inc_count(handle_t *handle, struct inode *inode)
{
inc_nlink(inode);
if (is_dx(inode) && inode->i_nlink > 1) {
/* limit is 16-bit i_links_count */
if (inode->i_nlink >= EXT4_LINK_MAX || inode->i_nlink == 2) {
set_nlink(inode, 1);
EXT4_SET_RO_COMPAT_FEATURE(inode->i_sb,
EXT4_FEATURE_RO_COMPAT_DIR_NLINK);
}
}
}
/*
* If a directory had nlink == 1, then we should let it be 1. This indicates
* directory has >EXT4_LINK_MAX subdirs.
*/
static void ext4_dec_count(handle_t *handle, struct inode *inode)
{
if (!S_ISDIR(inode->i_mode) || inode->i_nlink > 2)
drop_nlink(inode);
}
static int ext4_add_nondir(handle_t *handle,
struct dentry *dentry, struct inode *inode)
{
int err = ext4_add_entry(handle, dentry, inode);
if (!err) {
ext4_mark_inode_dirty(handle, inode);
unlock_new_inode(inode);
d_instantiate(dentry, inode);
return 0;
}
drop_nlink(inode);
unlock_new_inode(inode);
iput(inode);
return err;
}
/*
* By the time this is called, we already have created
* the directory cache entry for the new file, but it
* is so far negative - it has no inode.
*
* If the create succeeds, we fill in the inode information
* with d_instantiate().
*/
static int ext4_create(struct inode *dir, struct dentry *dentry, umode_t mode,
bool excl)
{
handle_t *handle;
struct inode *inode;
int err, retries = 0;
dquot_initialize(dir);
retry:
handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 +
EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb));
if (IS_ERR(handle))
return PTR_ERR(handle);
if (IS_DIRSYNC(dir))
ext4_handle_sync(handle);
inode = ext4_new_inode(handle, dir, mode, &dentry->d_name, 0, NULL);
err = PTR_ERR(inode);
if (!IS_ERR(inode)) {
inode->i_op = &ext4_file_inode_operations;
inode->i_fop = &ext4_file_operations;
ext4_set_aops(inode);
err = ext4_add_nondir(handle, dentry, inode);
}
ext4_journal_stop(handle);
if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries))
goto retry;
return err;
}
static int ext4_mknod(struct inode *dir, struct dentry *dentry,
umode_t mode, dev_t rdev)
{
handle_t *handle;
struct inode *inode;
int err, retries = 0;
if (!new_valid_dev(rdev))
return -EINVAL;
dquot_initialize(dir);
retry:
handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 +
EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb));
if (IS_ERR(handle))
return PTR_ERR(handle);
if (IS_DIRSYNC(dir))
ext4_handle_sync(handle);
inode = ext4_new_inode(handle, dir, mode, &dentry->d_name, 0, NULL);
err = PTR_ERR(inode);
if (!IS_ERR(inode)) {
init_special_inode(inode, inode->i_mode, rdev);
#ifdef CONFIG_EXT4_FS_XATTR
inode->i_op = &ext4_special_inode_operations;
#endif
err = ext4_add_nondir(handle, dentry, inode);
}
ext4_journal_stop(handle);
if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries))
goto retry;
return err;
}
static int ext4_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
handle_t *handle;
struct inode *inode;
struct buffer_head *dir_block = NULL;
struct ext4_dir_entry_2 *de;
struct ext4_dir_entry_tail *t;
unsigned int blocksize = dir->i_sb->s_blocksize;
int csum_size = 0;
int err, retries = 0;
if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb,
EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
csum_size = sizeof(struct ext4_dir_entry_tail);
if (EXT4_DIR_LINK_MAX(dir))
return -EMLINK;
dquot_initialize(dir);
retry:
handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 +
EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb));
if (IS_ERR(handle))
return PTR_ERR(handle);
if (IS_DIRSYNC(dir))
ext4_handle_sync(handle);
inode = ext4_new_inode(handle, dir, S_IFDIR | mode,
&dentry->d_name, 0, NULL);
err = PTR_ERR(inode);
if (IS_ERR(inode))
goto out_stop;
inode->i_op = &ext4_dir_inode_operations;
inode->i_fop = &ext4_dir_operations;
inode->i_size = EXT4_I(inode)->i_disksize = inode->i_sb->s_blocksize;
dir_block = ext4_bread(handle, inode, 0, 1, &err);
if (!dir_block)
goto out_clear_inode;
BUFFER_TRACE(dir_block, "get_write_access");
err = ext4_journal_get_write_access(handle, dir_block);
if (err)
goto out_clear_inode;
de = (struct ext4_dir_entry_2 *) dir_block->b_data;
de->inode = cpu_to_le32(inode->i_ino);
de->name_len = 1;
de->rec_len = ext4_rec_len_to_disk(EXT4_DIR_REC_LEN(de->name_len),
blocksize);
strcpy(de->name, ".");
ext4_set_de_type(dir->i_sb, de, S_IFDIR);
de = ext4_next_entry(de, blocksize);
de->inode = cpu_to_le32(dir->i_ino);
de->rec_len = ext4_rec_len_to_disk(blocksize -
(csum_size + EXT4_DIR_REC_LEN(1)),
blocksize);
de->name_len = 2;
strcpy(de->name, "..");
ext4_set_de_type(dir->i_sb, de, S_IFDIR);
set_nlink(inode, 2);
if (csum_size) {
t = EXT4_DIRENT_TAIL(dir_block->b_data, blocksize);
initialize_dirent_tail(t, blocksize);
}
BUFFER_TRACE(dir_block, "call ext4_handle_dirty_metadata");
err = ext4_handle_dirty_dirent_node(handle, inode, dir_block);
if (err)
goto out_clear_inode;
set_buffer_verified(dir_block);
err = ext4_mark_inode_dirty(handle, inode);
if (!err)
err = ext4_add_entry(handle, dentry, inode);
if (err) {
out_clear_inode:
clear_nlink(inode);
unlock_new_inode(inode);
ext4_mark_inode_dirty(handle, inode);
iput(inode);
goto out_stop;
}
ext4_inc_count(handle, dir);
ext4_update_dx_flag(dir);
err = ext4_mark_inode_dirty(handle, dir);
if (err)
goto out_clear_inode;
unlock_new_inode(inode);
d_instantiate(dentry, inode);
out_stop:
brelse(dir_block);
ext4_journal_stop(handle);
if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries))
goto retry;
return err;
}
/*
* routine to check that the specified directory is empty (for rmdir)
*/
static int empty_dir(struct inode *inode)
{
unsigned int offset;
struct buffer_head *bh;
struct ext4_dir_entry_2 *de, *de1;
struct super_block *sb;
int err = 0;
sb = inode->i_sb;
if (inode->i_size < EXT4_DIR_REC_LEN(1) + EXT4_DIR_REC_LEN(2) ||
!(bh = ext4_bread(NULL, inode, 0, 0, &err))) {
if (err)
EXT4_ERROR_INODE(inode,
"error %d reading directory lblock 0", err);
else
ext4_warning(inode->i_sb,
"bad directory (dir #%lu) - no data block",
inode->i_ino);
return 1;
}
if (!buffer_verified(bh) &&
!ext4_dirent_csum_verify(inode,
(struct ext4_dir_entry *)bh->b_data)) {
EXT4_ERROR_INODE(inode, "checksum error reading directory "
"lblock 0");
return -EIO;
}
set_buffer_verified(bh);
de = (struct ext4_dir_entry_2 *) bh->b_data;
de1 = ext4_next_entry(de, sb->s_blocksize);
if (le32_to_cpu(de->inode) != inode->i_ino ||
!le32_to_cpu(de1->inode) ||
strcmp(".", de->name) ||
strcmp("..", de1->name)) {
ext4_warning(inode->i_sb,
"bad directory (dir #%lu) - no `.' or `..'",
inode->i_ino);
brelse(bh);
return 1;
}
offset = ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize) +
ext4_rec_len_from_disk(de1->rec_len, sb->s_blocksize);
de = ext4_next_entry(de1, sb->s_blocksize);
while (offset < inode->i_size) {
if (!bh ||
(void *) de >= (void *) (bh->b_data+sb->s_blocksize)) {
unsigned int lblock;
err = 0;
brelse(bh);
lblock = offset >> EXT4_BLOCK_SIZE_BITS(sb);
bh = ext4_bread(NULL, inode, lblock, 0, &err);
if (!bh) {
if (err)
EXT4_ERROR_INODE(inode,
"error %d reading directory "
"lblock %u", err, lblock);
offset += sb->s_blocksize;
continue;
}
if (!buffer_verified(bh) &&
!ext4_dirent_csum_verify(inode,
(struct ext4_dir_entry *)bh->b_data)) {
EXT4_ERROR_INODE(inode, "checksum error "
"reading directory lblock 0");
return -EIO;
}
set_buffer_verified(bh);
de = (struct ext4_dir_entry_2 *) bh->b_data;
}
if (ext4_check_dir_entry(inode, NULL, de, bh, offset)) {
de = (struct ext4_dir_entry_2 *)(bh->b_data +
sb->s_blocksize);
offset = (offset | (sb->s_blocksize - 1)) + 1;
continue;
}
if (le32_to_cpu(de->inode)) {
brelse(bh);
return 0;
}
offset += ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize);
de = ext4_next_entry(de, sb->s_blocksize);
}
brelse(bh);
return 1;
}
/* ext4_orphan_add() links an unlinked or truncated inode into a list of
* such inodes, starting at the superblock, in case we crash before the
* file is closed/deleted, or in case the inode truncate spans multiple
* transactions and the last transaction is not recovered after a crash.
*
* At filesystem recovery time, we walk this list deleting unlinked
* inodes and truncating linked inodes in ext4_orphan_cleanup().
*/
int ext4_orphan_add(handle_t *handle, struct inode *inode)
{
struct super_block *sb = inode->i_sb;
struct ext4_iloc iloc;
int err = 0, rc;
if (!ext4_handle_valid(handle))
return 0;
mutex_lock(&EXT4_SB(sb)->s_orphan_lock);
if (!list_empty(&EXT4_I(inode)->i_orphan))
goto out_unlock;
/*
* Orphan handling is only valid for files with data blocks
* being truncated, or files being unlinked. Note that we either
* hold i_mutex, or the inode can not be referenced from outside,
* so i_nlink should not be bumped due to race
*/
J_ASSERT((S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
S_ISLNK(inode->i_mode)) || inode->i_nlink == 0);
BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access");
err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh);
if (err)
goto out_unlock;
err = ext4_reserve_inode_write(handle, inode, &iloc);
if (err)
goto out_unlock;
/*
* Due to previous errors inode may be already a part of on-disk
* orphan list. If so skip on-disk list modification.
*/
if (NEXT_ORPHAN(inode) && NEXT_ORPHAN(inode) <=
(le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count)))
goto mem_insert;
/* Insert this inode at the head of the on-disk orphan list... */
NEXT_ORPHAN(inode) = le32_to_cpu(EXT4_SB(sb)->s_es->s_last_orphan);
EXT4_SB(sb)->s_es->s_last_orphan = cpu_to_le32(inode->i_ino);
err = ext4_handle_dirty_super(handle, sb);
rc = ext4_mark_iloc_dirty(handle, inode, &iloc);
if (!err)
err = rc;
/* Only add to the head of the in-memory list if all the
* previous operations succeeded. If the orphan_add is going to
* fail (possibly taking the journal offline), we can't risk
* leaving the inode on the orphan list: stray orphan-list
* entries can cause panics at unmount time.
*
* This is safe: on error we're going to ignore the orphan list
* anyway on the next recovery. */
mem_insert:
if (!err)
list_add(&EXT4_I(inode)->i_orphan, &EXT4_SB(sb)->s_orphan);
jbd_debug(4, "superblock will point to %lu\n", inode->i_ino);
jbd_debug(4, "orphan inode %lu will point to %d\n",
inode->i_ino, NEXT_ORPHAN(inode));
out_unlock:
mutex_unlock(&EXT4_SB(sb)->s_orphan_lock);
ext4_std_error(inode->i_sb, err);
return err;
}
/*
* ext4_orphan_del() removes an unlinked or truncated inode from the list
* of such inodes stored on disk, because it is finally being cleaned up.
*/
int ext4_orphan_del(handle_t *handle, struct inode *inode)
{
struct list_head *prev;
struct ext4_inode_info *ei = EXT4_I(inode);
struct ext4_sb_info *sbi;
__u32 ino_next;
struct ext4_iloc iloc;
int err = 0;
/* ext4_handle_valid() assumes a valid handle_t pointer */
if (handle && !ext4_handle_valid(handle))
return 0;
mutex_lock(&EXT4_SB(inode->i_sb)->s_orphan_lock);
if (list_empty(&ei->i_orphan))
goto out;
ino_next = NEXT_ORPHAN(inode);
prev = ei->i_orphan.prev;
sbi = EXT4_SB(inode->i_sb);
jbd_debug(4, "remove inode %lu from orphan list\n", inode->i_ino);
list_del_init(&ei->i_orphan);
/* If we're on an error path, we may not have a valid
* transaction handle with which to update the orphan list on
* disk, but we still need to remove the inode from the linked
* list in memory. */
if (sbi->s_journal && !handle)
goto out;
err = ext4_reserve_inode_write(handle, inode, &iloc);
if (err)
goto out_err;
if (prev == &sbi->s_orphan) {
jbd_debug(4, "superblock will point to %u\n", ino_next);
BUFFER_TRACE(sbi->s_sbh, "get_write_access");
err = ext4_journal_get_write_access(handle, sbi->s_sbh);
if (err)
goto out_brelse;
sbi->s_es->s_last_orphan = cpu_to_le32(ino_next);
err = ext4_handle_dirty_super(handle, inode->i_sb);
} else {
struct ext4_iloc iloc2;
struct inode *i_prev =
&list_entry(prev, struct ext4_inode_info, i_orphan)->vfs_inode;
jbd_debug(4, "orphan inode %lu will point to %u\n",
i_prev->i_ino, ino_next);
err = ext4_reserve_inode_write(handle, i_prev, &iloc2);
if (err)
goto out_brelse;
NEXT_ORPHAN(i_prev) = ino_next;
err = ext4_mark_iloc_dirty(handle, i_prev, &iloc2);
}
if (err)
goto out_brelse;
NEXT_ORPHAN(inode) = 0;
err = ext4_mark_iloc_dirty(handle, inode, &iloc);
out_err:
ext4_std_error(inode->i_sb, err);
out:
mutex_unlock(&EXT4_SB(inode->i_sb)->s_orphan_lock);
return err;
out_brelse:
brelse(iloc.bh);
goto out_err;
}
static int ext4_rmdir(struct inode *dir, struct dentry *dentry)
{
int retval;
struct inode *inode;
struct buffer_head *bh;
struct ext4_dir_entry_2 *de;
handle_t *handle;
/* Initialize quotas before so that eventual writes go in
* separate transaction */
dquot_initialize(dir);
dquot_initialize(dentry->d_inode);
handle = ext4_journal_start(dir, EXT4_DELETE_TRANS_BLOCKS(dir->i_sb));
if (IS_ERR(handle))
return PTR_ERR(handle);
retval = -ENOENT;
bh = ext4_find_entry(dir, &dentry->d_name, &de);
if (!bh)
goto end_rmdir;
if (IS_DIRSYNC(dir))
ext4_handle_sync(handle);
inode = dentry->d_inode;
retval = -EIO;
if (le32_to_cpu(de->inode) != inode->i_ino)
goto end_rmdir;
retval = -ENOTEMPTY;
if (!empty_dir(inode))
goto end_rmdir;
retval = ext4_delete_entry(handle, dir, de, bh);
if (retval)
goto end_rmdir;
if (!EXT4_DIR_LINK_EMPTY(inode))
ext4_warning(inode->i_sb,
"empty directory has too many links (%d)",
inode->i_nlink);
inode->i_version++;
clear_nlink(inode);
/* There's no need to set i_disksize: the fact that i_nlink is
* zero will ensure that the right thing happens during any
* recovery. */
inode->i_size = 0;
ext4_orphan_add(handle, inode);
inode->i_ctime = dir->i_ctime = dir->i_mtime = ext4_current_time(inode);
ext4_mark_inode_dirty(handle, inode);
ext4_dec_count(handle, dir);
ext4_update_dx_flag(dir);
ext4_mark_inode_dirty(handle, dir);
end_rmdir:
ext4_journal_stop(handle);
brelse(bh);
return retval;
}
static int ext4_unlink(struct inode *dir, struct dentry *dentry)
{
int retval;
struct inode *inode;
struct buffer_head *bh;
struct ext4_dir_entry_2 *de;
handle_t *handle;
trace_ext4_unlink_enter(dir, dentry);
/* Initialize quotas before so that eventual writes go
* in separate transaction */
dquot_initialize(dir);
dquot_initialize(dentry->d_inode);
handle = ext4_journal_start(dir, EXT4_DELETE_TRANS_BLOCKS(dir->i_sb));
if (IS_ERR(handle))
return PTR_ERR(handle);
if (IS_DIRSYNC(dir))
ext4_handle_sync(handle);
retval = -ENOENT;
bh = ext4_find_entry(dir, &dentry->d_name, &de);
if (!bh)
goto end_unlink;
inode = dentry->d_inode;
retval = -EIO;
if (le32_to_cpu(de->inode) != inode->i_ino)
goto end_unlink;
if (!inode->i_nlink) {
ext4_warning(inode->i_sb,
"Deleting nonexistent file (%lu), %d",
inode->i_ino, inode->i_nlink);
set_nlink(inode, 1);
}
retval = ext4_delete_entry(handle, dir, de, bh);
if (retval)
goto end_unlink;
dir->i_ctime = dir->i_mtime = ext4_current_time(dir);
ext4_update_dx_flag(dir);
ext4_mark_inode_dirty(handle, dir);
drop_nlink(inode);
if (!inode->i_nlink)
ext4_orphan_add(handle, inode);
inode->i_ctime = ext4_current_time(inode);
ext4_mark_inode_dirty(handle, inode);
retval = 0;
end_unlink:
ext4_journal_stop(handle);
brelse(bh);
trace_ext4_unlink_exit(dentry, retval);
return retval;
}
static int ext4_symlink(struct inode *dir,
struct dentry *dentry, const char *symname)
{
handle_t *handle;
struct inode *inode;
int l, err, retries = 0;
int credits;
l = strlen(symname)+1;
if (l > dir->i_sb->s_blocksize)
return -ENAMETOOLONG;
dquot_initialize(dir);
if (l > EXT4_N_BLOCKS * 4) {
/*
* For non-fast symlinks, we just allocate inode and put it on
* orphan list in the first transaction => we need bitmap,
* group descriptor, sb, inode block, quota blocks, and
* possibly selinux xattr blocks.
*/
credits = 4 + EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb) +
EXT4_XATTR_TRANS_BLOCKS;
} else {
/*
* Fast symlink. We have to add entry to directory
* (EXT4_DATA_TRANS_BLOCKS + EXT4_INDEX_EXTRA_TRANS_BLOCKS),
* allocate new inode (bitmap, group descriptor, inode block,
* quota blocks, sb is already counted in previous macros).
*/
credits = EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 +
EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb);
}
retry:
handle = ext4_journal_start(dir, credits);
if (IS_ERR(handle))
return PTR_ERR(handle);
if (IS_DIRSYNC(dir))
ext4_handle_sync(handle);
inode = ext4_new_inode(handle, dir, S_IFLNK|S_IRWXUGO,
&dentry->d_name, 0, NULL);
err = PTR_ERR(inode);
if (IS_ERR(inode))
goto out_stop;
if (l > EXT4_N_BLOCKS * 4) {
inode->i_op = &ext4_symlink_inode_operations;
ext4_set_aops(inode);
/*
* We cannot call page_symlink() with transaction started
* because it calls into ext4_write_begin() which can wait
* for transaction commit if we are running out of space
* and thus we deadlock. So we have to stop transaction now
* and restart it when symlink contents is written.
*
* To keep fs consistent in case of crash, we have to put inode
* to orphan list in the mean time.
*/
drop_nlink(inode);
err = ext4_orphan_add(handle, inode);
ext4_journal_stop(handle);
if (err)
goto err_drop_inode;
err = __page_symlink(inode, symname, l, 1);
if (err)
goto err_drop_inode;
/*
* Now inode is being linked into dir (EXT4_DATA_TRANS_BLOCKS
* + EXT4_INDEX_EXTRA_TRANS_BLOCKS), inode is also modified
*/
handle = ext4_journal_start(dir,
EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
EXT4_INDEX_EXTRA_TRANS_BLOCKS + 1);
if (IS_ERR(handle)) {
err = PTR_ERR(handle);
goto err_drop_inode;
}
set_nlink(inode, 1);
err = ext4_orphan_del(handle, inode);
if (err) {
ext4_journal_stop(handle);
clear_nlink(inode);
goto err_drop_inode;
}
} else {
/* clear the extent format for fast symlink */
ext4_clear_inode_flag(inode, EXT4_INODE_EXTENTS);
inode->i_op = &ext4_fast_symlink_inode_operations;
memcpy((char *)&EXT4_I(inode)->i_data, symname, l);
inode->i_size = l-1;
}
EXT4_I(inode)->i_disksize = inode->i_size;
err = ext4_add_nondir(handle, dentry, inode);
out_stop:
ext4_journal_stop(handle);
if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries))
goto retry;
return err;
err_drop_inode:
unlock_new_inode(inode);
iput(inode);
return err;
}
static int ext4_link(struct dentry *old_dentry,
struct inode *dir, struct dentry *dentry)
{
handle_t *handle;
struct inode *inode = old_dentry->d_inode;
int err, retries = 0;
if (inode->i_nlink >= EXT4_LINK_MAX)
return -EMLINK;
dquot_initialize(dir);
retry:
handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
EXT4_INDEX_EXTRA_TRANS_BLOCKS);
if (IS_ERR(handle))
return PTR_ERR(handle);
if (IS_DIRSYNC(dir))
ext4_handle_sync(handle);
inode->i_ctime = ext4_current_time(inode);
ext4_inc_count(handle, inode);
ihold(inode);
err = ext4_add_entry(handle, dentry, inode);
if (!err) {
ext4_mark_inode_dirty(handle, inode);
d_instantiate(dentry, inode);
} else {
drop_nlink(inode);
iput(inode);
}
ext4_journal_stop(handle);
if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries))
goto retry;
return err;
}
#define PARENT_INO(buffer, size) \
(ext4_next_entry((struct ext4_dir_entry_2 *)(buffer), size)->inode)
/*
* Anybody can rename anything with this: the permission checks are left to the
* higher-level routines.
*/
static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry)
{
handle_t *handle;
struct inode *old_inode, *new_inode;
struct buffer_head *old_bh, *new_bh, *dir_bh;
struct ext4_dir_entry_2 *old_de, *new_de;
int retval, force_da_alloc = 0;
dquot_initialize(old_dir);
dquot_initialize(new_dir);
old_bh = new_bh = dir_bh = NULL;
/* Initialize quotas before so that eventual writes go
* in separate transaction */
if (new_dentry->d_inode)
dquot_initialize(new_dentry->d_inode);
handle = ext4_journal_start(old_dir, 2 *
EXT4_DATA_TRANS_BLOCKS(old_dir->i_sb) +
EXT4_INDEX_EXTRA_TRANS_BLOCKS + 2);
if (IS_ERR(handle))
return PTR_ERR(handle);
if (IS_DIRSYNC(old_dir) || IS_DIRSYNC(new_dir))
ext4_handle_sync(handle);
old_bh = ext4_find_entry(old_dir, &old_dentry->d_name, &old_de);
/*
* Check for inode number is _not_ due to possible IO errors.
* We might rmdir the source, keep it as pwd of some process
* and merrily kill the link to whatever was created under the
* same name. Goodbye sticky bit ;-<
*/
old_inode = old_dentry->d_inode;
retval = -ENOENT;
if (!old_bh || le32_to_cpu(old_de->inode) != old_inode->i_ino)
goto end_rename;
new_inode = new_dentry->d_inode;
new_bh = ext4_find_entry(new_dir, &new_dentry->d_name, &new_de);
if (new_bh) {
if (!new_inode) {
brelse(new_bh);
new_bh = NULL;
}
}
if (S_ISDIR(old_inode->i_mode)) {
if (new_inode) {
retval = -ENOTEMPTY;
if (!empty_dir(new_inode))
goto end_rename;
}
retval = -EIO;
dir_bh = ext4_bread(handle, old_inode, 0, 0, &retval);
if (!dir_bh)
goto end_rename;
if (!buffer_verified(dir_bh) &&
!ext4_dirent_csum_verify(old_inode,
(struct ext4_dir_entry *)dir_bh->b_data))
goto end_rename;
set_buffer_verified(dir_bh);
if (le32_to_cpu(PARENT_INO(dir_bh->b_data,
old_dir->i_sb->s_blocksize)) != old_dir->i_ino)
goto end_rename;
retval = -EMLINK;
if (!new_inode && new_dir != old_dir &&
EXT4_DIR_LINK_MAX(new_dir))
goto end_rename;
BUFFER_TRACE(dir_bh, "get_write_access");
retval = ext4_journal_get_write_access(handle, dir_bh);
if (retval)
goto end_rename;
}
if (!new_bh) {
retval = ext4_add_entry(handle, new_dentry, old_inode);
if (retval)
goto end_rename;
} else {
BUFFER_TRACE(new_bh, "get write access");
retval = ext4_journal_get_write_access(handle, new_bh);
if (retval)
goto end_rename;
new_de->inode = cpu_to_le32(old_inode->i_ino);
if (EXT4_HAS_INCOMPAT_FEATURE(new_dir->i_sb,
EXT4_FEATURE_INCOMPAT_FILETYPE))
new_de->file_type = old_de->file_type;
new_dir->i_version++;
new_dir->i_ctime = new_dir->i_mtime =
ext4_current_time(new_dir);
ext4_mark_inode_dirty(handle, new_dir);
BUFFER_TRACE(new_bh, "call ext4_handle_dirty_metadata");
retval = ext4_handle_dirty_dirent_node(handle, new_dir, new_bh);
if (unlikely(retval)) {
ext4_std_error(new_dir->i_sb, retval);
goto end_rename;
}
brelse(new_bh);
new_bh = NULL;
}
/*
* Like most other Unix systems, set the ctime for inodes on a
* rename.
*/
old_inode->i_ctime = ext4_current_time(old_inode);
ext4_mark_inode_dirty(handle, old_inode);
/*
* ok, that's it
*/
if (le32_to_cpu(old_de->inode) != old_inode->i_ino ||
old_de->name_len != old_dentry->d_name.len ||
strncmp(old_de->name, old_dentry->d_name.name, old_de->name_len) ||
(retval = ext4_delete_entry(handle, old_dir,
old_de, old_bh)) == -ENOENT) {
/* old_de could have moved from under us during htree split, so
* make sure that we are deleting the right entry. We might
* also be pointing to a stale entry in the unused part of
* old_bh so just checking inum and the name isn't enough. */
struct buffer_head *old_bh2;
struct ext4_dir_entry_2 *old_de2;
old_bh2 = ext4_find_entry(old_dir, &old_dentry->d_name, &old_de2);
if (old_bh2) {
retval = ext4_delete_entry(handle, old_dir,
old_de2, old_bh2);
brelse(old_bh2);
}
}
if (retval) {
ext4_warning(old_dir->i_sb,
"Deleting old file (%lu), %d, error=%d",
old_dir->i_ino, old_dir->i_nlink, retval);
}
if (new_inode) {
ext4_dec_count(handle, new_inode);
new_inode->i_ctime = ext4_current_time(new_inode);
}
old_dir->i_ctime = old_dir->i_mtime = ext4_current_time(old_dir);
ext4_update_dx_flag(old_dir);
if (dir_bh) {
PARENT_INO(dir_bh->b_data, new_dir->i_sb->s_blocksize) =
cpu_to_le32(new_dir->i_ino);
BUFFER_TRACE(dir_bh, "call ext4_handle_dirty_metadata");
if (is_dx(old_inode)) {
retval = ext4_handle_dirty_dx_node(handle,
old_inode,
dir_bh);
} else {
retval = ext4_handle_dirty_dirent_node(handle,
old_inode,
dir_bh);
}
if (retval) {
ext4_std_error(old_dir->i_sb, retval);
goto end_rename;
}
ext4_dec_count(handle, old_dir);
if (new_inode) {
/* checked empty_dir above, can't have another parent,
* ext4_dec_count() won't work for many-linked dirs */
clear_nlink(new_inode);
} else {
ext4_inc_count(handle, new_dir);
ext4_update_dx_flag(new_dir);
ext4_mark_inode_dirty(handle, new_dir);
}
}
ext4_mark_inode_dirty(handle, old_dir);
if (new_inode) {
ext4_mark_inode_dirty(handle, new_inode);
if (!new_inode->i_nlink)
ext4_orphan_add(handle, new_inode);
if (!test_opt(new_dir->i_sb, NO_AUTO_DA_ALLOC))
force_da_alloc = 1;
}
retval = 0;
end_rename:
brelse(dir_bh);
brelse(old_bh);
brelse(new_bh);
ext4_journal_stop(handle);
if (retval == 0 && force_da_alloc)
ext4_alloc_da_blocks(old_inode);
return retval;
}
/*
* directories can handle most operations...
*/
const struct inode_operations ext4_dir_inode_operations = {
.create = ext4_create,
.lookup = ext4_lookup,
.link = ext4_link,
.unlink = ext4_unlink,
.symlink = ext4_symlink,
.mkdir = ext4_mkdir,
.rmdir = ext4_rmdir,
.mknod = ext4_mknod,
.rename = ext4_rename,
.setattr = ext4_setattr,
#ifdef CONFIG_EXT4_FS_XATTR
.setxattr = generic_setxattr,
.getxattr = generic_getxattr,
.listxattr = ext4_listxattr,
.removexattr = generic_removexattr,
#endif
.get_acl = ext4_get_acl,
.fiemap = ext4_fiemap,
};
const struct inode_operations ext4_special_inode_operations = {
.setattr = ext4_setattr,
#ifdef CONFIG_EXT4_FS_XATTR
.setxattr = generic_setxattr,
.getxattr = generic_getxattr,
.listxattr = ext4_listxattr,
.removexattr = generic_removexattr,
#endif
.get_acl = ext4_get_acl,
};
| ./CrossVul/dataset_final_sorted/CWE-20/c/bad_1745_0 |
crossvul-cpp_data_good_5544_1 | /*
* Network-device interface management.
*
* Copyright (c) 2004-2005, Keir Fraser
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation; or, when distributed
* separately from the Linux kernel or incorporated into other
* software packages, subject to the following license:
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this source file (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy, modify,
* merge, publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "common.h"
#include <linux/ethtool.h>
#include <linux/rtnetlink.h>
#include <linux/if_vlan.h>
#include <xen/events.h>
#include <asm/xen/hypercall.h>
#define XENVIF_QUEUE_LENGTH 32
void xenvif_get(struct xenvif *vif)
{
atomic_inc(&vif->refcnt);
}
void xenvif_put(struct xenvif *vif)
{
if (atomic_dec_and_test(&vif->refcnt))
wake_up(&vif->waiting_to_free);
}
int xenvif_schedulable(struct xenvif *vif)
{
return netif_running(vif->dev) && netif_carrier_ok(vif->dev);
}
static int xenvif_rx_schedulable(struct xenvif *vif)
{
return xenvif_schedulable(vif) && !xen_netbk_rx_ring_full(vif);
}
static irqreturn_t xenvif_interrupt(int irq, void *dev_id)
{
struct xenvif *vif = dev_id;
if (vif->netbk == NULL)
return IRQ_NONE;
xen_netbk_schedule_xenvif(vif);
if (xenvif_rx_schedulable(vif))
netif_wake_queue(vif->dev);
return IRQ_HANDLED;
}
static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct xenvif *vif = netdev_priv(dev);
BUG_ON(skb->dev != dev);
if (vif->netbk == NULL)
goto drop;
/* Drop the packet if the target domain has no receive buffers. */
if (!xenvif_rx_schedulable(vif))
goto drop;
/* Reserve ring slots for the worst-case number of fragments. */
vif->rx_req_cons_peek += xen_netbk_count_skb_slots(vif, skb);
xenvif_get(vif);
if (vif->can_queue && xen_netbk_must_stop_queue(vif))
netif_stop_queue(dev);
xen_netbk_queue_tx_skb(vif, skb);
return NETDEV_TX_OK;
drop:
vif->dev->stats.tx_dropped++;
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
void xenvif_receive_skb(struct xenvif *vif, struct sk_buff *skb)
{
netif_rx_ni(skb);
}
void xenvif_notify_tx_completion(struct xenvif *vif)
{
if (netif_queue_stopped(vif->dev) && xenvif_rx_schedulable(vif))
netif_wake_queue(vif->dev);
}
static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
{
struct xenvif *vif = netdev_priv(dev);
return &vif->dev->stats;
}
static void xenvif_up(struct xenvif *vif)
{
xen_netbk_add_xenvif(vif);
enable_irq(vif->irq);
xen_netbk_check_rx_xenvif(vif);
}
static void xenvif_down(struct xenvif *vif)
{
disable_irq(vif->irq);
xen_netbk_deschedule_xenvif(vif);
xen_netbk_remove_xenvif(vif);
}
static int xenvif_open(struct net_device *dev)
{
struct xenvif *vif = netdev_priv(dev);
if (netif_carrier_ok(dev))
xenvif_up(vif);
netif_start_queue(dev);
return 0;
}
static int xenvif_close(struct net_device *dev)
{
struct xenvif *vif = netdev_priv(dev);
if (netif_carrier_ok(dev))
xenvif_down(vif);
netif_stop_queue(dev);
return 0;
}
static int xenvif_change_mtu(struct net_device *dev, int mtu)
{
struct xenvif *vif = netdev_priv(dev);
int max = vif->can_sg ? 65535 - VLAN_ETH_HLEN : ETH_DATA_LEN;
if (mtu > max)
return -EINVAL;
dev->mtu = mtu;
return 0;
}
static netdev_features_t xenvif_fix_features(struct net_device *dev,
netdev_features_t features)
{
struct xenvif *vif = netdev_priv(dev);
if (!vif->can_sg)
features &= ~NETIF_F_SG;
if (!vif->gso && !vif->gso_prefix)
features &= ~NETIF_F_TSO;
if (!vif->csum)
features &= ~NETIF_F_IP_CSUM;
return features;
}
static const struct xenvif_stat {
char name[ETH_GSTRING_LEN];
u16 offset;
} xenvif_stats[] = {
{
"rx_gso_checksum_fixup",
offsetof(struct xenvif, rx_gso_checksum_fixup)
},
};
static int xenvif_get_sset_count(struct net_device *dev, int string_set)
{
switch (string_set) {
case ETH_SS_STATS:
return ARRAY_SIZE(xenvif_stats);
default:
return -EINVAL;
}
}
static void xenvif_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 * data)
{
void *vif = netdev_priv(dev);
int i;
for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++)
data[i] = *(unsigned long *)(vif + xenvif_stats[i].offset);
}
static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data)
{
int i;
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++)
memcpy(data + i * ETH_GSTRING_LEN,
xenvif_stats[i].name, ETH_GSTRING_LEN);
break;
}
}
static const struct ethtool_ops xenvif_ethtool_ops = {
.get_link = ethtool_op_get_link,
.get_sset_count = xenvif_get_sset_count,
.get_ethtool_stats = xenvif_get_ethtool_stats,
.get_strings = xenvif_get_strings,
};
static const struct net_device_ops xenvif_netdev_ops = {
.ndo_start_xmit = xenvif_start_xmit,
.ndo_get_stats = xenvif_get_stats,
.ndo_open = xenvif_open,
.ndo_stop = xenvif_close,
.ndo_change_mtu = xenvif_change_mtu,
.ndo_fix_features = xenvif_fix_features,
};
struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
unsigned int handle)
{
int err;
struct net_device *dev;
struct xenvif *vif;
char name[IFNAMSIZ] = {};
snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
dev = alloc_netdev(sizeof(struct xenvif), name, ether_setup);
if (dev == NULL) {
pr_warn("Could not allocate netdev\n");
return ERR_PTR(-ENOMEM);
}
SET_NETDEV_DEV(dev, parent);
vif = netdev_priv(dev);
vif->domid = domid;
vif->handle = handle;
vif->netbk = NULL;
vif->can_sg = 1;
vif->csum = 1;
atomic_set(&vif->refcnt, 1);
init_waitqueue_head(&vif->waiting_to_free);
vif->dev = dev;
INIT_LIST_HEAD(&vif->schedule_list);
INIT_LIST_HEAD(&vif->notify_list);
vif->credit_bytes = vif->remaining_credit = ~0UL;
vif->credit_usec = 0UL;
init_timer(&vif->credit_timeout);
/* Initialize 'expires' now: it's used to track the credit window. */
vif->credit_timeout.expires = jiffies;
dev->netdev_ops = &xenvif_netdev_ops;
dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
dev->features = dev->hw_features;
SET_ETHTOOL_OPS(dev, &xenvif_ethtool_ops);
dev->tx_queue_len = XENVIF_QUEUE_LENGTH;
/*
* Initialise a dummy MAC address. We choose the numerically
* largest non-broadcast address to prevent the address getting
* stolen by an Ethernet bridge for STP purposes.
* (FE:FF:FF:FF:FF:FF)
*/
memset(dev->dev_addr, 0xFF, ETH_ALEN);
dev->dev_addr[0] &= ~0x01;
netif_carrier_off(dev);
err = register_netdev(dev);
if (err) {
netdev_warn(dev, "Could not register device: err=%d\n", err);
free_netdev(dev);
return ERR_PTR(err);
}
netdev_dbg(dev, "Successfully created xenvif\n");
return vif;
}
int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
unsigned long rx_ring_ref, unsigned int evtchn)
{
int err = -ENOMEM;
/* Already connected through? */
if (vif->irq)
return 0;
err = xen_netbk_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref);
if (err < 0)
goto err;
err = bind_interdomain_evtchn_to_irqhandler(
vif->domid, evtchn, xenvif_interrupt, 0,
vif->dev->name, vif);
if (err < 0)
goto err_unmap;
vif->irq = err;
disable_irq(vif->irq);
xenvif_get(vif);
rtnl_lock();
if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
dev_set_mtu(vif->dev, ETH_DATA_LEN);
netdev_update_features(vif->dev);
netif_carrier_on(vif->dev);
if (netif_running(vif->dev))
xenvif_up(vif);
rtnl_unlock();
return 0;
err_unmap:
xen_netbk_unmap_frontend_rings(vif);
err:
return err;
}
void xenvif_carrier_off(struct xenvif *vif)
{
struct net_device *dev = vif->dev;
rtnl_lock();
netif_carrier_off(dev); /* discard queued packets */
if (netif_running(dev))
xenvif_down(vif);
rtnl_unlock();
xenvif_put(vif);
}
void xenvif_disconnect(struct xenvif *vif)
{
if (netif_carrier_ok(vif->dev))
xenvif_carrier_off(vif);
atomic_dec(&vif->refcnt);
wait_event(vif->waiting_to_free, atomic_read(&vif->refcnt) == 0);
del_timer_sync(&vif->credit_timeout);
if (vif->irq)
unbind_from_irqhandler(vif->irq, vif);
unregister_netdev(vif->dev);
xen_netbk_unmap_frontend_rings(vif);
free_netdev(vif->dev);
}
| ./CrossVul/dataset_final_sorted/CWE-20/c/good_5544_1 |
crossvul-cpp_data_bad_132_0 | /*
* Copyright(C) 2015 Linaro Limited. All rights reserved.
* Author: Mathieu Poirier <mathieu.poirier@linaro.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/coresight.h>
#include <linux/coresight-pmu.h>
#include <linux/cpumask.h>
#include <linux/device.h>
#include <linux/list.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/perf_event.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/workqueue.h>
#include "coresight-etm-perf.h"
#include "coresight-priv.h"
static struct pmu etm_pmu;
static bool etm_perf_up;
/**
* struct etm_event_data - Coresight specifics associated to an event
* @work: Handle to free allocated memory outside IRQ context.
* @mask: Hold the CPU(s) this event was set for.
* @snk_config: The sink configuration.
* @path: An array of path, each slot for one CPU.
*/
struct etm_event_data {
struct work_struct work;
cpumask_t mask;
void *snk_config;
struct list_head **path;
};
static DEFINE_PER_CPU(struct perf_output_handle, ctx_handle);
static DEFINE_PER_CPU(struct coresight_device *, csdev_src);
/* ETMv3.5/PTM's ETMCR is 'config' */
PMU_FORMAT_ATTR(cycacc, "config:" __stringify(ETM_OPT_CYCACC));
PMU_FORMAT_ATTR(timestamp, "config:" __stringify(ETM_OPT_TS));
static struct attribute *etm_config_formats_attr[] = {
&format_attr_cycacc.attr,
&format_attr_timestamp.attr,
NULL,
};
static struct attribute_group etm_pmu_format_group = {
.name = "format",
.attrs = etm_config_formats_attr,
};
static const struct attribute_group *etm_pmu_attr_groups[] = {
&etm_pmu_format_group,
NULL,
};
static void etm_event_read(struct perf_event *event) {}
static int etm_addr_filters_alloc(struct perf_event *event)
{
struct etm_filters *filters;
int node = event->cpu == -1 ? -1 : cpu_to_node(event->cpu);
filters = kzalloc_node(sizeof(struct etm_filters), GFP_KERNEL, node);
if (!filters)
return -ENOMEM;
if (event->parent)
memcpy(filters, event->parent->hw.addr_filters,
sizeof(*filters));
event->hw.addr_filters = filters;
return 0;
}
static void etm_event_destroy(struct perf_event *event)
{
kfree(event->hw.addr_filters);
event->hw.addr_filters = NULL;
}
static int etm_event_init(struct perf_event *event)
{
int ret = 0;
if (event->attr.type != etm_pmu.type) {
ret = -ENOENT;
goto out;
}
ret = etm_addr_filters_alloc(event);
if (ret)
goto out;
event->destroy = etm_event_destroy;
out:
return ret;
}
static void free_event_data(struct work_struct *work)
{
int cpu;
cpumask_t *mask;
struct etm_event_data *event_data;
struct coresight_device *sink;
event_data = container_of(work, struct etm_event_data, work);
mask = &event_data->mask;
/*
* First deal with the sink configuration. See comment in
* etm_setup_aux() about why we take the first available path.
*/
if (event_data->snk_config) {
cpu = cpumask_first(mask);
sink = coresight_get_sink(event_data->path[cpu]);
if (sink_ops(sink)->free_buffer)
sink_ops(sink)->free_buffer(event_data->snk_config);
}
for_each_cpu(cpu, mask) {
if (!(IS_ERR_OR_NULL(event_data->path[cpu])))
coresight_release_path(event_data->path[cpu]);
}
kfree(event_data->path);
kfree(event_data);
}
static void *alloc_event_data(int cpu)
{
int size;
cpumask_t *mask;
struct etm_event_data *event_data;
/* First get memory for the session's data */
event_data = kzalloc(sizeof(struct etm_event_data), GFP_KERNEL);
if (!event_data)
return NULL;
/* Make sure nothing disappears under us */
get_online_cpus();
size = num_online_cpus();
mask = &event_data->mask;
if (cpu != -1)
cpumask_set_cpu(cpu, mask);
else
cpumask_copy(mask, cpu_online_mask);
put_online_cpus();
/*
* Each CPU has a single path between source and destination. As such
* allocate an array using CPU numbers as indexes. That way a path
* for any CPU can easily be accessed at any given time. We proceed
* the same way for sessions involving a single CPU. The cost of
* unused memory when dealing with single CPU trace scenarios is small
* compared to the cost of searching through an optimized array.
*/
event_data->path = kcalloc(size,
sizeof(struct list_head *), GFP_KERNEL);
if (!event_data->path) {
kfree(event_data);
return NULL;
}
return event_data;
}
static void etm_free_aux(void *data)
{
struct etm_event_data *event_data = data;
schedule_work(&event_data->work);
}
static void *etm_setup_aux(int event_cpu, void **pages,
int nr_pages, bool overwrite)
{
int cpu;
cpumask_t *mask;
struct coresight_device *sink;
struct etm_event_data *event_data = NULL;
event_data = alloc_event_data(event_cpu);
if (!event_data)
return NULL;
/*
* In theory nothing prevent tracers in a trace session from being
* associated with different sinks, nor having a sink per tracer. But
* until we have HW with this kind of topology we need to assume tracers
* in a trace session are using the same sink. Therefore go through
* the coresight bus and pick the first enabled sink.
*
* When operated from sysFS users are responsible to enable the sink
* while from perf, the perf tools will do it based on the choice made
* on the cmd line. As such the "enable_sink" flag in sysFS is reset.
*/
sink = coresight_get_enabled_sink(true);
if (!sink)
goto err;
INIT_WORK(&event_data->work, free_event_data);
mask = &event_data->mask;
/* Setup the path for each CPU in a trace session */
for_each_cpu(cpu, mask) {
struct coresight_device *csdev;
csdev = per_cpu(csdev_src, cpu);
if (!csdev)
goto err;
/*
* Building a path doesn't enable it, it simply builds a
* list of devices from source to sink that can be
* referenced later when the path is actually needed.
*/
event_data->path[cpu] = coresight_build_path(csdev, sink);
if (IS_ERR(event_data->path[cpu]))
goto err;
}
if (!sink_ops(sink)->alloc_buffer)
goto err;
/* Get the AUX specific data from the sink buffer */
event_data->snk_config =
sink_ops(sink)->alloc_buffer(sink, cpu, pages,
nr_pages, overwrite);
if (!event_data->snk_config)
goto err;
out:
return event_data;
err:
etm_free_aux(event_data);
event_data = NULL;
goto out;
}
static void etm_event_start(struct perf_event *event, int flags)
{
int cpu = smp_processor_id();
struct etm_event_data *event_data;
struct perf_output_handle *handle = this_cpu_ptr(&ctx_handle);
struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu);
if (!csdev)
goto fail;
/*
* Deal with the ring buffer API and get a handle on the
* session's information.
*/
event_data = perf_aux_output_begin(handle, event);
if (!event_data)
goto fail;
/* We need a sink, no need to continue without one */
sink = coresight_get_sink(event_data->path[cpu]);
if (WARN_ON_ONCE(!sink || !sink_ops(sink)->set_buffer))
goto fail_end_stop;
/* Configure the sink */
if (sink_ops(sink)->set_buffer(sink, handle,
event_data->snk_config))
goto fail_end_stop;
/* Nothing will happen without a path */
if (coresight_enable_path(event_data->path[cpu], CS_MODE_PERF))
goto fail_end_stop;
/* Tell the perf core the event is alive */
event->hw.state = 0;
/* Finally enable the tracer */
if (source_ops(csdev)->enable(csdev, event, CS_MODE_PERF))
goto fail_end_stop;
out:
return;
fail_end_stop:
perf_aux_output_end(handle, 0, true);
fail:
event->hw.state = PERF_HES_STOPPED;
goto out;
}
static void etm_event_stop(struct perf_event *event, int mode)
{
bool lost;
int cpu = smp_processor_id();
unsigned long size;
struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu);
struct perf_output_handle *handle = this_cpu_ptr(&ctx_handle);
struct etm_event_data *event_data = perf_get_aux(handle);
if (event->hw.state == PERF_HES_STOPPED)
return;
if (!csdev)
return;
sink = coresight_get_sink(event_data->path[cpu]);
if (!sink)
return;
/* stop tracer */
source_ops(csdev)->disable(csdev, event);
/* tell the core */
event->hw.state = PERF_HES_STOPPED;
if (mode & PERF_EF_UPDATE) {
if (WARN_ON_ONCE(handle->event != event))
return;
/* update trace information */
if (!sink_ops(sink)->update_buffer)
return;
sink_ops(sink)->update_buffer(sink, handle,
event_data->snk_config);
if (!sink_ops(sink)->reset_buffer)
return;
size = sink_ops(sink)->reset_buffer(sink, handle,
event_data->snk_config,
&lost);
perf_aux_output_end(handle, size, lost);
}
/* Disabling the path make its elements available to other sessions */
coresight_disable_path(event_data->path[cpu]);
}
static int etm_event_add(struct perf_event *event, int mode)
{
int ret = 0;
struct hw_perf_event *hwc = &event->hw;
if (mode & PERF_EF_START) {
etm_event_start(event, 0);
if (hwc->state & PERF_HES_STOPPED)
ret = -EINVAL;
} else {
hwc->state = PERF_HES_STOPPED;
}
return ret;
}
static void etm_event_del(struct perf_event *event, int mode)
{
etm_event_stop(event, PERF_EF_UPDATE);
}
static int etm_addr_filters_validate(struct list_head *filters)
{
bool range = false, address = false;
int index = 0;
struct perf_addr_filter *filter;
list_for_each_entry(filter, filters, entry) {
/*
* No need to go further if there's no more
* room for filters.
*/
if (++index > ETM_ADDR_CMP_MAX)
return -EOPNOTSUPP;
/*
* As taken from the struct perf_addr_filter documentation:
* @range: 1: range, 0: address
*
* At this time we don't allow range and start/stop filtering
* to cohabitate, they have to be mutually exclusive.
*/
if ((filter->range == 1) && address)
return -EOPNOTSUPP;
if ((filter->range == 0) && range)
return -EOPNOTSUPP;
/*
* For range filtering, the second address in the address
* range comparator needs to be higher than the first.
* Invalid otherwise.
*/
if (filter->range && filter->size == 0)
return -EINVAL;
/*
* Everything checks out with this filter, record what we've
* received before moving on to the next one.
*/
if (filter->range)
range = true;
else
address = true;
}
return 0;
}
static void etm_addr_filters_sync(struct perf_event *event)
{
struct perf_addr_filters_head *head = perf_event_addr_filters(event);
unsigned long start, stop, *offs = event->addr_filters_offs;
struct etm_filters *filters = event->hw.addr_filters;
struct etm_filter *etm_filter;
struct perf_addr_filter *filter;
int i = 0;
list_for_each_entry(filter, &head->list, entry) {
start = filter->offset + offs[i];
stop = start + filter->size;
etm_filter = &filters->etm_filter[i];
if (filter->range == 1) {
etm_filter->start_addr = start;
etm_filter->stop_addr = stop;
etm_filter->type = ETM_ADDR_TYPE_RANGE;
} else {
if (filter->filter == 1) {
etm_filter->start_addr = start;
etm_filter->type = ETM_ADDR_TYPE_START;
} else {
etm_filter->stop_addr = stop;
etm_filter->type = ETM_ADDR_TYPE_STOP;
}
}
i++;
}
filters->nr_filters = i;
}
int etm_perf_symlink(struct coresight_device *csdev, bool link)
{
char entry[sizeof("cpu9999999")];
int ret = 0, cpu = source_ops(csdev)->cpu_id(csdev);
struct device *pmu_dev = etm_pmu.dev;
struct device *cs_dev = &csdev->dev;
sprintf(entry, "cpu%d", cpu);
if (!etm_perf_up)
return -EPROBE_DEFER;
if (link) {
ret = sysfs_create_link(&pmu_dev->kobj, &cs_dev->kobj, entry);
if (ret)
return ret;
per_cpu(csdev_src, cpu) = csdev;
} else {
sysfs_remove_link(&pmu_dev->kobj, entry);
per_cpu(csdev_src, cpu) = NULL;
}
return 0;
}
static int __init etm_perf_init(void)
{
int ret;
etm_pmu.capabilities = PERF_PMU_CAP_EXCLUSIVE;
etm_pmu.attr_groups = etm_pmu_attr_groups;
etm_pmu.task_ctx_nr = perf_sw_context;
etm_pmu.read = etm_event_read;
etm_pmu.event_init = etm_event_init;
etm_pmu.setup_aux = etm_setup_aux;
etm_pmu.free_aux = etm_free_aux;
etm_pmu.start = etm_event_start;
etm_pmu.stop = etm_event_stop;
etm_pmu.add = etm_event_add;
etm_pmu.del = etm_event_del;
etm_pmu.addr_filters_sync = etm_addr_filters_sync;
etm_pmu.addr_filters_validate = etm_addr_filters_validate;
etm_pmu.nr_addr_filters = ETM_ADDR_CMP_MAX;
ret = perf_pmu_register(&etm_pmu, CORESIGHT_ETM_PMU_NAME, -1);
if (ret == 0)
etm_perf_up = true;
return ret;
}
device_initcall(etm_perf_init);
| ./CrossVul/dataset_final_sorted/CWE-20/c/bad_132_0 |
crossvul-cpp_data_bad_5799_0 | /*
* Adaptec AAC series RAID controller driver
* (c) Copyright 2001 Red Hat Inc.
*
* based on the old aacraid driver that is..
* Adaptec aacraid device driver for Linux.
*
* Copyright (c) 2000-2010 Adaptec, Inc.
* 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*
* Module Name:
* commctrl.c
*
* Abstract: Contains all routines for control of the AFA comm layer
*
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/completion.h>
#include <linux/dma-mapping.h>
#include <linux/blkdev.h>
#include <linux/delay.h> /* ssleep prototype */
#include <linux/kthread.h>
#include <linux/semaphore.h>
#include <asm/uaccess.h>
#include <scsi/scsi_host.h>
#include "aacraid.h"
/**
* ioctl_send_fib - send a FIB from userspace
* @dev: adapter is being processed
* @arg: arguments to the ioctl call
*
* This routine sends a fib to the adapter on behalf of a user level
* program.
*/
# define AAC_DEBUG_PREAMBLE KERN_INFO
# define AAC_DEBUG_POSTAMBLE
static int ioctl_send_fib(struct aac_dev * dev, void __user *arg)
{
struct hw_fib * kfib;
struct fib *fibptr;
struct hw_fib * hw_fib = (struct hw_fib *)0;
dma_addr_t hw_fib_pa = (dma_addr_t)0LL;
unsigned size;
int retval;
if (dev->in_reset) {
return -EBUSY;
}
fibptr = aac_fib_alloc(dev);
if(fibptr == NULL) {
return -ENOMEM;
}
kfib = fibptr->hw_fib_va;
/*
* First copy in the header so that we can check the size field.
*/
if (copy_from_user((void *)kfib, arg, sizeof(struct aac_fibhdr))) {
aac_fib_free(fibptr);
return -EFAULT;
}
/*
* Since we copy based on the fib header size, make sure that we
* will not overrun the buffer when we copy the memory. Return
* an error if we would.
*/
size = le16_to_cpu(kfib->header.Size) + sizeof(struct aac_fibhdr);
if (size < le16_to_cpu(kfib->header.SenderSize))
size = le16_to_cpu(kfib->header.SenderSize);
if (size > dev->max_fib_size) {
dma_addr_t daddr;
if (size > 2048) {
retval = -EINVAL;
goto cleanup;
}
kfib = pci_alloc_consistent(dev->pdev, size, &daddr);
if (!kfib) {
retval = -ENOMEM;
goto cleanup;
}
/* Highjack the hw_fib */
hw_fib = fibptr->hw_fib_va;
hw_fib_pa = fibptr->hw_fib_pa;
fibptr->hw_fib_va = kfib;
fibptr->hw_fib_pa = daddr;
memset(((char *)kfib) + dev->max_fib_size, 0, size - dev->max_fib_size);
memcpy(kfib, hw_fib, dev->max_fib_size);
}
if (copy_from_user(kfib, arg, size)) {
retval = -EFAULT;
goto cleanup;
}
if (kfib->header.Command == cpu_to_le16(TakeABreakPt)) {
aac_adapter_interrupt(dev);
/*
* Since we didn't really send a fib, zero out the state to allow
* cleanup code not to assert.
*/
kfib->header.XferState = 0;
} else {
retval = aac_fib_send(le16_to_cpu(kfib->header.Command), fibptr,
le16_to_cpu(kfib->header.Size) , FsaNormal,
1, 1, NULL, NULL);
if (retval) {
goto cleanup;
}
if (aac_fib_complete(fibptr) != 0) {
retval = -EINVAL;
goto cleanup;
}
}
/*
* Make sure that the size returned by the adapter (which includes
* the header) is less than or equal to the size of a fib, so we
* don't corrupt application data. Then copy that size to the user
* buffer. (Don't try to add the header information again, since it
* was already included by the adapter.)
*/
retval = 0;
if (copy_to_user(arg, (void *)kfib, size))
retval = -EFAULT;
cleanup:
if (hw_fib) {
pci_free_consistent(dev->pdev, size, kfib, fibptr->hw_fib_pa);
fibptr->hw_fib_pa = hw_fib_pa;
fibptr->hw_fib_va = hw_fib;
}
if (retval != -ERESTARTSYS)
aac_fib_free(fibptr);
return retval;
}
/**
* open_getadapter_fib - Get the next fib
*
* This routine will get the next Fib, if available, from the AdapterFibContext
* passed in from the user.
*/
static int open_getadapter_fib(struct aac_dev * dev, void __user *arg)
{
struct aac_fib_context * fibctx;
int status;
fibctx = kmalloc(sizeof(struct aac_fib_context), GFP_KERNEL);
if (fibctx == NULL) {
status = -ENOMEM;
} else {
unsigned long flags;
struct list_head * entry;
struct aac_fib_context * context;
fibctx->type = FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT;
fibctx->size = sizeof(struct aac_fib_context);
/*
* Yes yes, I know this could be an index, but we have a
* better guarantee of uniqueness for the locked loop below.
* Without the aid of a persistent history, this also helps
* reduce the chance that the opaque context would be reused.
*/
fibctx->unique = (u32)((ulong)fibctx & 0xFFFFFFFF);
/*
* Initialize the mutex used to wait for the next AIF.
*/
sema_init(&fibctx->wait_sem, 0);
fibctx->wait = 0;
/*
* Initialize the fibs and set the count of fibs on
* the list to 0.
*/
fibctx->count = 0;
INIT_LIST_HEAD(&fibctx->fib_list);
fibctx->jiffies = jiffies/HZ;
/*
* Now add this context onto the adapter's
* AdapterFibContext list.
*/
spin_lock_irqsave(&dev->fib_lock, flags);
/* Ensure that we have a unique identifier */
entry = dev->fib_list.next;
while (entry != &dev->fib_list) {
context = list_entry(entry, struct aac_fib_context, next);
if (context->unique == fibctx->unique) {
/* Not unique (32 bits) */
fibctx->unique++;
entry = dev->fib_list.next;
} else {
entry = entry->next;
}
}
list_add_tail(&fibctx->next, &dev->fib_list);
spin_unlock_irqrestore(&dev->fib_lock, flags);
if (copy_to_user(arg, &fibctx->unique,
sizeof(fibctx->unique))) {
status = -EFAULT;
} else {
status = 0;
}
}
return status;
}
/**
* next_getadapter_fib - get the next fib
* @dev: adapter to use
* @arg: ioctl argument
*
* This routine will get the next Fib, if available, from the AdapterFibContext
* passed in from the user.
*/
static int next_getadapter_fib(struct aac_dev * dev, void __user *arg)
{
struct fib_ioctl f;
struct fib *fib;
struct aac_fib_context *fibctx;
int status;
struct list_head * entry;
unsigned long flags;
if(copy_from_user((void *)&f, arg, sizeof(struct fib_ioctl)))
return -EFAULT;
/*
* Verify that the HANDLE passed in was a valid AdapterFibContext
*
* Search the list of AdapterFibContext addresses on the adapter
* to be sure this is a valid address
*/
spin_lock_irqsave(&dev->fib_lock, flags);
entry = dev->fib_list.next;
fibctx = NULL;
while (entry != &dev->fib_list) {
fibctx = list_entry(entry, struct aac_fib_context, next);
/*
* Extract the AdapterFibContext from the Input parameters.
*/
if (fibctx->unique == f.fibctx) { /* We found a winner */
break;
}
entry = entry->next;
fibctx = NULL;
}
if (!fibctx) {
spin_unlock_irqrestore(&dev->fib_lock, flags);
dprintk ((KERN_INFO "Fib Context not found\n"));
return -EINVAL;
}
if((fibctx->type != FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT) ||
(fibctx->size != sizeof(struct aac_fib_context))) {
spin_unlock_irqrestore(&dev->fib_lock, flags);
dprintk ((KERN_INFO "Fib Context corrupt?\n"));
return -EINVAL;
}
status = 0;
/*
* If there are no fibs to send back, then either wait or return
* -EAGAIN
*/
return_fib:
if (!list_empty(&fibctx->fib_list)) {
/*
* Pull the next fib from the fibs
*/
entry = fibctx->fib_list.next;
list_del(entry);
fib = list_entry(entry, struct fib, fiblink);
fibctx->count--;
spin_unlock_irqrestore(&dev->fib_lock, flags);
if (copy_to_user(f.fib, fib->hw_fib_va, sizeof(struct hw_fib))) {
kfree(fib->hw_fib_va);
kfree(fib);
return -EFAULT;
}
/*
* Free the space occupied by this copy of the fib.
*/
kfree(fib->hw_fib_va);
kfree(fib);
status = 0;
} else {
spin_unlock_irqrestore(&dev->fib_lock, flags);
/* If someone killed the AIF aacraid thread, restart it */
status = !dev->aif_thread;
if (status && !dev->in_reset && dev->queues && dev->fsa_dev) {
/* Be paranoid, be very paranoid! */
kthread_stop(dev->thread);
ssleep(1);
dev->aif_thread = 0;
dev->thread = kthread_run(aac_command_thread, dev,
"%s", dev->name);
ssleep(1);
}
if (f.wait) {
if(down_interruptible(&fibctx->wait_sem) < 0) {
status = -ERESTARTSYS;
} else {
/* Lock again and retry */
spin_lock_irqsave(&dev->fib_lock, flags);
goto return_fib;
}
} else {
status = -EAGAIN;
}
}
fibctx->jiffies = jiffies/HZ;
return status;
}
int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context * fibctx)
{
struct fib *fib;
/*
* First free any FIBs that have not been consumed.
*/
while (!list_empty(&fibctx->fib_list)) {
struct list_head * entry;
/*
* Pull the next fib from the fibs
*/
entry = fibctx->fib_list.next;
list_del(entry);
fib = list_entry(entry, struct fib, fiblink);
fibctx->count--;
/*
* Free the space occupied by this copy of the fib.
*/
kfree(fib->hw_fib_va);
kfree(fib);
}
/*
* Remove the Context from the AdapterFibContext List
*/
list_del(&fibctx->next);
/*
* Invalidate context
*/
fibctx->type = 0;
/*
* Free the space occupied by the Context
*/
kfree(fibctx);
return 0;
}
/**
* close_getadapter_fib - close down user fib context
* @dev: adapter
* @arg: ioctl arguments
*
* This routine will close down the fibctx passed in from the user.
*/
static int close_getadapter_fib(struct aac_dev * dev, void __user *arg)
{
struct aac_fib_context *fibctx;
int status;
unsigned long flags;
struct list_head * entry;
/*
* Verify that the HANDLE passed in was a valid AdapterFibContext
*
* Search the list of AdapterFibContext addresses on the adapter
* to be sure this is a valid address
*/
entry = dev->fib_list.next;
fibctx = NULL;
while(entry != &dev->fib_list) {
fibctx = list_entry(entry, struct aac_fib_context, next);
/*
* Extract the fibctx from the input parameters
*/
if (fibctx->unique == (u32)(uintptr_t)arg) /* We found a winner */
break;
entry = entry->next;
fibctx = NULL;
}
if (!fibctx)
return 0; /* Already gone */
if((fibctx->type != FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT) ||
(fibctx->size != sizeof(struct aac_fib_context)))
return -EINVAL;
spin_lock_irqsave(&dev->fib_lock, flags);
status = aac_close_fib_context(dev, fibctx);
spin_unlock_irqrestore(&dev->fib_lock, flags);
return status;
}
/**
* check_revision - close down user fib context
* @dev: adapter
* @arg: ioctl arguments
*
* This routine returns the driver version.
* Under Linux, there have been no version incompatibilities, so this is
* simple!
*/
static int check_revision(struct aac_dev *dev, void __user *arg)
{
struct revision response;
char *driver_version = aac_driver_version;
u32 version;
response.compat = 1;
version = (simple_strtol(driver_version,
&driver_version, 10) << 24) | 0x00000400;
version += simple_strtol(driver_version + 1, &driver_version, 10) << 16;
version += simple_strtol(driver_version + 1, NULL, 10);
response.version = cpu_to_le32(version);
# ifdef AAC_DRIVER_BUILD
response.build = cpu_to_le32(AAC_DRIVER_BUILD);
# else
response.build = cpu_to_le32(9999);
# endif
if (copy_to_user(arg, &response, sizeof(response)))
return -EFAULT;
return 0;
}
/**
*
* aac_send_raw_scb
*
*/
static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
{
struct fib* srbfib;
int status;
struct aac_srb *srbcmd = NULL;
struct user_aac_srb *user_srbcmd = NULL;
struct user_aac_srb __user *user_srb = arg;
struct aac_srb_reply __user *user_reply;
struct aac_srb_reply* reply;
u32 fibsize = 0;
u32 flags = 0;
s32 rcode = 0;
u32 data_dir;
void __user *sg_user[32];
void *sg_list[32];
u32 sg_indx = 0;
u32 byte_count = 0;
u32 actual_fibsize64, actual_fibsize = 0;
int i;
if (dev->in_reset) {
dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
return -EBUSY;
}
if (!capable(CAP_SYS_ADMIN)){
dprintk((KERN_DEBUG"aacraid: No permission to send raw srb\n"));
return -EPERM;
}
/*
* Allocate and initialize a Fib then setup a SRB command
*/
if (!(srbfib = aac_fib_alloc(dev))) {
return -ENOMEM;
}
aac_fib_init(srbfib);
/* raw_srb FIB is not FastResponseCapable */
srbfib->hw_fib_va->header.XferState &= ~cpu_to_le32(FastResponseCapable);
srbcmd = (struct aac_srb*) fib_data(srbfib);
memset(sg_list, 0, sizeof(sg_list)); /* cleanup may take issue */
if(copy_from_user(&fibsize, &user_srb->count,sizeof(u32))){
dprintk((KERN_DEBUG"aacraid: Could not copy data size from user\n"));
rcode = -EFAULT;
goto cleanup;
}
if (fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr))) {
rcode = -EINVAL;
goto cleanup;
}
user_srbcmd = kmalloc(fibsize, GFP_KERNEL);
if (!user_srbcmd) {
dprintk((KERN_DEBUG"aacraid: Could not make a copy of the srb\n"));
rcode = -ENOMEM;
goto cleanup;
}
if(copy_from_user(user_srbcmd, user_srb,fibsize)){
dprintk((KERN_DEBUG"aacraid: Could not copy srb from user\n"));
rcode = -EFAULT;
goto cleanup;
}
user_reply = arg+fibsize;
flags = user_srbcmd->flags; /* from user in cpu order */
// Fix up srb for endian and force some values
srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi); // Force this
srbcmd->channel = cpu_to_le32(user_srbcmd->channel);
srbcmd->id = cpu_to_le32(user_srbcmd->id);
srbcmd->lun = cpu_to_le32(user_srbcmd->lun);
srbcmd->timeout = cpu_to_le32(user_srbcmd->timeout);
srbcmd->flags = cpu_to_le32(flags);
srbcmd->retry_limit = 0; // Obsolete parameter
srbcmd->cdb_size = cpu_to_le32(user_srbcmd->cdb_size);
memcpy(srbcmd->cdb, user_srbcmd->cdb, sizeof(srbcmd->cdb));
switch (flags & (SRB_DataIn | SRB_DataOut)) {
case SRB_DataOut:
data_dir = DMA_TO_DEVICE;
break;
case (SRB_DataIn | SRB_DataOut):
data_dir = DMA_BIDIRECTIONAL;
break;
case SRB_DataIn:
data_dir = DMA_FROM_DEVICE;
break;
default:
data_dir = DMA_NONE;
}
if (user_srbcmd->sg.count > ARRAY_SIZE(sg_list)) {
dprintk((KERN_DEBUG"aacraid: too many sg entries %d\n",
le32_to_cpu(srbcmd->sg.count)));
rcode = -EINVAL;
goto cleanup;
}
actual_fibsize = sizeof(struct aac_srb) - sizeof(struct sgentry) +
((user_srbcmd->sg.count & 0xff) * sizeof(struct sgentry));
actual_fibsize64 = actual_fibsize + (user_srbcmd->sg.count & 0xff) *
(sizeof(struct sgentry64) - sizeof(struct sgentry));
/* User made a mistake - should not continue */
if ((actual_fibsize != fibsize) && (actual_fibsize64 != fibsize)) {
dprintk((KERN_DEBUG"aacraid: Bad Size specified in "
"Raw SRB command calculated fibsize=%lu;%lu "
"user_srbcmd->sg.count=%d aac_srb=%lu sgentry=%lu;%lu "
"issued fibsize=%d\n",
actual_fibsize, actual_fibsize64, user_srbcmd->sg.count,
sizeof(struct aac_srb), sizeof(struct sgentry),
sizeof(struct sgentry64), fibsize));
rcode = -EINVAL;
goto cleanup;
}
if ((data_dir == DMA_NONE) && user_srbcmd->sg.count) {
dprintk((KERN_DEBUG"aacraid: SG with no direction specified in Raw SRB command\n"));
rcode = -EINVAL;
goto cleanup;
}
byte_count = 0;
if (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64) {
struct user_sgmap64* upsg = (struct user_sgmap64*)&user_srbcmd->sg;
struct sgmap64* psg = (struct sgmap64*)&srbcmd->sg;
/*
* This should also catch if user used the 32 bit sgmap
*/
if (actual_fibsize64 == fibsize) {
actual_fibsize = actual_fibsize64;
for (i = 0; i < upsg->count; i++) {
u64 addr;
void* p;
if (upsg->sg[i].count >
((dev->adapter_info.options &
AAC_OPT_NEW_COMM) ?
(dev->scsi_host_ptr->max_sectors << 9) :
65536)) {
rcode = -EINVAL;
goto cleanup;
}
/* Does this really need to be GFP_DMA? */
p = kmalloc(upsg->sg[i].count,GFP_KERNEL|__GFP_DMA);
if(!p) {
dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
upsg->sg[i].count,i,upsg->count));
rcode = -ENOMEM;
goto cleanup;
}
addr = (u64)upsg->sg[i].addr[0];
addr += ((u64)upsg->sg[i].addr[1]) << 32;
sg_user[i] = (void __user *)(uintptr_t)addr;
sg_list[i] = p; // save so we can clean up later
sg_indx = i;
if (flags & SRB_DataOut) {
if(copy_from_user(p,sg_user[i],upsg->sg[i].count)){
dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
rcode = -EFAULT;
goto cleanup;
}
}
addr = pci_map_single(dev->pdev, p, upsg->sg[i].count, data_dir);
psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff);
psg->sg[i].addr[1] = cpu_to_le32(addr>>32);
byte_count += upsg->sg[i].count;
psg->sg[i].count = cpu_to_le32(upsg->sg[i].count);
}
} else {
struct user_sgmap* usg;
usg = kmalloc(actual_fibsize - sizeof(struct aac_srb)
+ sizeof(struct sgmap), GFP_KERNEL);
if (!usg) {
dprintk((KERN_DEBUG"aacraid: Allocation error in Raw SRB command\n"));
rcode = -ENOMEM;
goto cleanup;
}
memcpy (usg, upsg, actual_fibsize - sizeof(struct aac_srb)
+ sizeof(struct sgmap));
actual_fibsize = actual_fibsize64;
for (i = 0; i < usg->count; i++) {
u64 addr;
void* p;
if (usg->sg[i].count >
((dev->adapter_info.options &
AAC_OPT_NEW_COMM) ?
(dev->scsi_host_ptr->max_sectors << 9) :
65536)) {
kfree(usg);
rcode = -EINVAL;
goto cleanup;
}
/* Does this really need to be GFP_DMA? */
p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA);
if(!p) {
dprintk((KERN_DEBUG "aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
usg->sg[i].count,i,usg->count));
kfree(usg);
rcode = -ENOMEM;
goto cleanup;
}
sg_user[i] = (void __user *)(uintptr_t)usg->sg[i].addr;
sg_list[i] = p; // save so we can clean up later
sg_indx = i;
if (flags & SRB_DataOut) {
if(copy_from_user(p,sg_user[i],upsg->sg[i].count)){
kfree (usg);
dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
rcode = -EFAULT;
goto cleanup;
}
}
addr = pci_map_single(dev->pdev, p, usg->sg[i].count, data_dir);
psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff);
psg->sg[i].addr[1] = cpu_to_le32(addr>>32);
byte_count += usg->sg[i].count;
psg->sg[i].count = cpu_to_le32(usg->sg[i].count);
}
kfree (usg);
}
srbcmd->count = cpu_to_le32(byte_count);
psg->count = cpu_to_le32(sg_indx+1);
status = aac_fib_send(ScsiPortCommand64, srbfib, actual_fibsize, FsaNormal, 1, 1,NULL,NULL);
} else {
struct user_sgmap* upsg = &user_srbcmd->sg;
struct sgmap* psg = &srbcmd->sg;
if (actual_fibsize64 == fibsize) {
struct user_sgmap64* usg = (struct user_sgmap64 *)upsg;
for (i = 0; i < upsg->count; i++) {
uintptr_t addr;
void* p;
if (usg->sg[i].count >
((dev->adapter_info.options &
AAC_OPT_NEW_COMM) ?
(dev->scsi_host_ptr->max_sectors << 9) :
65536)) {
rcode = -EINVAL;
goto cleanup;
}
/* Does this really need to be GFP_DMA? */
p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA);
if(!p) {
dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
usg->sg[i].count,i,usg->count));
rcode = -ENOMEM;
goto cleanup;
}
addr = (u64)usg->sg[i].addr[0];
addr += ((u64)usg->sg[i].addr[1]) << 32;
sg_user[i] = (void __user *)addr;
sg_list[i] = p; // save so we can clean up later
sg_indx = i;
if (flags & SRB_DataOut) {
if(copy_from_user(p,sg_user[i],usg->sg[i].count)){
dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
rcode = -EFAULT;
goto cleanup;
}
}
addr = pci_map_single(dev->pdev, p, usg->sg[i].count, data_dir);
psg->sg[i].addr = cpu_to_le32(addr & 0xffffffff);
byte_count += usg->sg[i].count;
psg->sg[i].count = cpu_to_le32(usg->sg[i].count);
}
} else {
for (i = 0; i < upsg->count; i++) {
dma_addr_t addr;
void* p;
if (upsg->sg[i].count >
((dev->adapter_info.options &
AAC_OPT_NEW_COMM) ?
(dev->scsi_host_ptr->max_sectors << 9) :
65536)) {
rcode = -EINVAL;
goto cleanup;
}
p = kmalloc(upsg->sg[i].count, GFP_KERNEL);
if (!p) {
dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
upsg->sg[i].count, i, upsg->count));
rcode = -ENOMEM;
goto cleanup;
}
sg_user[i] = (void __user *)(uintptr_t)upsg->sg[i].addr;
sg_list[i] = p; // save so we can clean up later
sg_indx = i;
if (flags & SRB_DataOut) {
if(copy_from_user(p, sg_user[i],
upsg->sg[i].count)) {
dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
rcode = -EFAULT;
goto cleanup;
}
}
addr = pci_map_single(dev->pdev, p,
upsg->sg[i].count, data_dir);
psg->sg[i].addr = cpu_to_le32(addr);
byte_count += upsg->sg[i].count;
psg->sg[i].count = cpu_to_le32(upsg->sg[i].count);
}
}
srbcmd->count = cpu_to_le32(byte_count);
psg->count = cpu_to_le32(sg_indx+1);
status = aac_fib_send(ScsiPortCommand, srbfib, actual_fibsize, FsaNormal, 1, 1, NULL, NULL);
}
if (status == -ERESTARTSYS) {
rcode = -ERESTARTSYS;
goto cleanup;
}
if (status != 0){
dprintk((KERN_DEBUG"aacraid: Could not send raw srb fib to hba\n"));
rcode = -ENXIO;
goto cleanup;
}
if (flags & SRB_DataIn) {
for(i = 0 ; i <= sg_indx; i++){
byte_count = le32_to_cpu(
(dev->adapter_info.options & AAC_OPT_SGMAP_HOST64)
? ((struct sgmap64*)&srbcmd->sg)->sg[i].count
: srbcmd->sg.sg[i].count);
if(copy_to_user(sg_user[i], sg_list[i], byte_count)){
dprintk((KERN_DEBUG"aacraid: Could not copy sg data to user\n"));
rcode = -EFAULT;
goto cleanup;
}
}
}
reply = (struct aac_srb_reply *) fib_data(srbfib);
if(copy_to_user(user_reply,reply,sizeof(struct aac_srb_reply))){
dprintk((KERN_DEBUG"aacraid: Could not copy reply to user\n"));
rcode = -EFAULT;
goto cleanup;
}
cleanup:
kfree(user_srbcmd);
for(i=0; i <= sg_indx; i++){
kfree(sg_list[i]);
}
if (rcode != -ERESTARTSYS) {
aac_fib_complete(srbfib);
aac_fib_free(srbfib);
}
return rcode;
}
struct aac_pci_info {
u32 bus;
u32 slot;
};
static int aac_get_pci_info(struct aac_dev* dev, void __user *arg)
{
struct aac_pci_info pci_info;
pci_info.bus = dev->pdev->bus->number;
pci_info.slot = PCI_SLOT(dev->pdev->devfn);
if (copy_to_user(arg, &pci_info, sizeof(struct aac_pci_info))) {
dprintk((KERN_DEBUG "aacraid: Could not copy pci info\n"));
return -EFAULT;
}
return 0;
}
int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg)
{
int status;
/*
* HBA gets first crack
*/
status = aac_dev_ioctl(dev, cmd, arg);
if (status != -ENOTTY)
return status;
switch (cmd) {
case FSACTL_MINIPORT_REV_CHECK:
status = check_revision(dev, arg);
break;
case FSACTL_SEND_LARGE_FIB:
case FSACTL_SENDFIB:
status = ioctl_send_fib(dev, arg);
break;
case FSACTL_OPEN_GET_ADAPTER_FIB:
status = open_getadapter_fib(dev, arg);
break;
case FSACTL_GET_NEXT_ADAPTER_FIB:
status = next_getadapter_fib(dev, arg);
break;
case FSACTL_CLOSE_GET_ADAPTER_FIB:
status = close_getadapter_fib(dev, arg);
break;
case FSACTL_SEND_RAW_SRB:
status = aac_send_raw_srb(dev,arg);
break;
case FSACTL_GET_PCI_INFO:
status = aac_get_pci_info(dev,arg);
break;
default:
status = -ENOTTY;
break;
}
return status;
}
| ./CrossVul/dataset_final_sorted/CWE-20/c/bad_5799_0 |
crossvul-cpp_data_good_5845_21 | /*
* NETLINK Kernel-user communication protocol.
*
* Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>
* Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
* Patrick McHardy <kaber@trash.net>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith
* added netlink_proto_exit
* Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <acme@conectiva.com.br>
* use nlk_sk, as sk->protinfo is on a diet 8)
* Fri Jul 22 19:51:12 MEST 2005 Harald Welte <laforge@gnumonks.org>
* - inc module use count of module that owns
* the kernel socket in case userspace opens
* socket of same protocol
* - remove all module support, since netlink is
* mandatory if CONFIG_NET=y these days
*/
#include <linux/module.h>
#include <linux/capability.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/stat.h>
#include <linux/socket.h>
#include <linux/un.h>
#include <linux/fcntl.h>
#include <linux/termios.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/fs.h>
#include <linux/slab.h>
#include <asm/uaccess.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/rtnetlink.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/notifier.h>
#include <linux/security.h>
#include <linux/jhash.h>
#include <linux/jiffies.h>
#include <linux/random.h>
#include <linux/bitops.h>
#include <linux/mm.h>
#include <linux/types.h>
#include <linux/audit.h>
#include <linux/mutex.h>
#include <linux/vmalloc.h>
#include <linux/if_arp.h>
#include <asm/cacheflush.h>
#include <net/net_namespace.h>
#include <net/sock.h>
#include <net/scm.h>
#include <net/netlink.h>
#include "af_netlink.h"
struct listeners {
struct rcu_head rcu;
unsigned long masks[0];
};
/* state bits */
#define NETLINK_CONGESTED 0x0
/* flags */
#define NETLINK_KERNEL_SOCKET 0x1
#define NETLINK_RECV_PKTINFO 0x2
#define NETLINK_BROADCAST_SEND_ERROR 0x4
#define NETLINK_RECV_NO_ENOBUFS 0x8
static inline int netlink_is_kernel(struct sock *sk)
{
return nlk_sk(sk)->flags & NETLINK_KERNEL_SOCKET;
}
struct netlink_table *nl_table;
EXPORT_SYMBOL_GPL(nl_table);
static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
static int netlink_dump(struct sock *sk);
static void netlink_skb_destructor(struct sk_buff *skb);
DEFINE_RWLOCK(nl_table_lock);
EXPORT_SYMBOL_GPL(nl_table_lock);
static atomic_t nl_table_users = ATOMIC_INIT(0);
#define nl_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&nl_table_lock));
static ATOMIC_NOTIFIER_HEAD(netlink_chain);
static DEFINE_SPINLOCK(netlink_tap_lock);
static struct list_head netlink_tap_all __read_mostly;
static inline u32 netlink_group_mask(u32 group)
{
return group ? 1 << (group - 1) : 0;
}
static inline struct hlist_head *nl_portid_hashfn(struct nl_portid_hash *hash, u32 portid)
{
return &hash->table[jhash_1word(portid, hash->rnd) & hash->mask];
}
int netlink_add_tap(struct netlink_tap *nt)
{
if (unlikely(nt->dev->type != ARPHRD_NETLINK))
return -EINVAL;
spin_lock(&netlink_tap_lock);
list_add_rcu(&nt->list, &netlink_tap_all);
spin_unlock(&netlink_tap_lock);
if (nt->module)
__module_get(nt->module);
return 0;
}
EXPORT_SYMBOL_GPL(netlink_add_tap);
int __netlink_remove_tap(struct netlink_tap *nt)
{
bool found = false;
struct netlink_tap *tmp;
spin_lock(&netlink_tap_lock);
list_for_each_entry(tmp, &netlink_tap_all, list) {
if (nt == tmp) {
list_del_rcu(&nt->list);
found = true;
goto out;
}
}
pr_warn("__netlink_remove_tap: %p not found\n", nt);
out:
spin_unlock(&netlink_tap_lock);
if (found && nt->module)
module_put(nt->module);
return found ? 0 : -ENODEV;
}
EXPORT_SYMBOL_GPL(__netlink_remove_tap);
int netlink_remove_tap(struct netlink_tap *nt)
{
int ret;
ret = __netlink_remove_tap(nt);
synchronize_net();
return ret;
}
EXPORT_SYMBOL_GPL(netlink_remove_tap);
static bool netlink_filter_tap(const struct sk_buff *skb)
{
struct sock *sk = skb->sk;
bool pass = false;
/* We take the more conservative approach and
* whitelist socket protocols that may pass.
*/
switch (sk->sk_protocol) {
case NETLINK_ROUTE:
case NETLINK_USERSOCK:
case NETLINK_SOCK_DIAG:
case NETLINK_NFLOG:
case NETLINK_XFRM:
case NETLINK_FIB_LOOKUP:
case NETLINK_NETFILTER:
case NETLINK_GENERIC:
pass = true;
break;
}
return pass;
}
static int __netlink_deliver_tap_skb(struct sk_buff *skb,
struct net_device *dev)
{
struct sk_buff *nskb;
struct sock *sk = skb->sk;
int ret = -ENOMEM;
dev_hold(dev);
nskb = skb_clone(skb, GFP_ATOMIC);
if (nskb) {
nskb->dev = dev;
nskb->protocol = htons((u16) sk->sk_protocol);
ret = dev_queue_xmit(nskb);
if (unlikely(ret > 0))
ret = net_xmit_errno(ret);
}
dev_put(dev);
return ret;
}
static void __netlink_deliver_tap(struct sk_buff *skb)
{
int ret;
struct netlink_tap *tmp;
if (!netlink_filter_tap(skb))
return;
list_for_each_entry_rcu(tmp, &netlink_tap_all, list) {
ret = __netlink_deliver_tap_skb(skb, tmp->dev);
if (unlikely(ret))
break;
}
}
static void netlink_deliver_tap(struct sk_buff *skb)
{
rcu_read_lock();
if (unlikely(!list_empty(&netlink_tap_all)))
__netlink_deliver_tap(skb);
rcu_read_unlock();
}
static void netlink_overrun(struct sock *sk)
{
struct netlink_sock *nlk = nlk_sk(sk);
if (!(nlk->flags & NETLINK_RECV_NO_ENOBUFS)) {
if (!test_and_set_bit(NETLINK_CONGESTED, &nlk_sk(sk)->state)) {
sk->sk_err = ENOBUFS;
sk->sk_error_report(sk);
}
}
atomic_inc(&sk->sk_drops);
}
static void netlink_rcv_wake(struct sock *sk)
{
struct netlink_sock *nlk = nlk_sk(sk);
if (skb_queue_empty(&sk->sk_receive_queue))
clear_bit(NETLINK_CONGESTED, &nlk->state);
if (!test_bit(NETLINK_CONGESTED, &nlk->state))
wake_up_interruptible(&nlk->wait);
}
#ifdef CONFIG_NETLINK_MMAP
static bool netlink_skb_is_mmaped(const struct sk_buff *skb)
{
return NETLINK_CB(skb).flags & NETLINK_SKB_MMAPED;
}
static bool netlink_rx_is_mmaped(struct sock *sk)
{
return nlk_sk(sk)->rx_ring.pg_vec != NULL;
}
static bool netlink_tx_is_mmaped(struct sock *sk)
{
return nlk_sk(sk)->tx_ring.pg_vec != NULL;
}
static __pure struct page *pgvec_to_page(const void *addr)
{
if (is_vmalloc_addr(addr))
return vmalloc_to_page(addr);
else
return virt_to_page(addr);
}
static void free_pg_vec(void **pg_vec, unsigned int order, unsigned int len)
{
unsigned int i;
for (i = 0; i < len; i++) {
if (pg_vec[i] != NULL) {
if (is_vmalloc_addr(pg_vec[i]))
vfree(pg_vec[i]);
else
free_pages((unsigned long)pg_vec[i], order);
}
}
kfree(pg_vec);
}
static void *alloc_one_pg_vec_page(unsigned long order)
{
void *buffer;
gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP | __GFP_ZERO |
__GFP_NOWARN | __GFP_NORETRY;
buffer = (void *)__get_free_pages(gfp_flags, order);
if (buffer != NULL)
return buffer;
buffer = vzalloc((1 << order) * PAGE_SIZE);
if (buffer != NULL)
return buffer;
gfp_flags &= ~__GFP_NORETRY;
return (void *)__get_free_pages(gfp_flags, order);
}
static void **alloc_pg_vec(struct netlink_sock *nlk,
struct nl_mmap_req *req, unsigned int order)
{
unsigned int block_nr = req->nm_block_nr;
unsigned int i;
void **pg_vec;
pg_vec = kcalloc(block_nr, sizeof(void *), GFP_KERNEL);
if (pg_vec == NULL)
return NULL;
for (i = 0; i < block_nr; i++) {
pg_vec[i] = alloc_one_pg_vec_page(order);
if (pg_vec[i] == NULL)
goto err1;
}
return pg_vec;
err1:
free_pg_vec(pg_vec, order, block_nr);
return NULL;
}
static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req,
bool closing, bool tx_ring)
{
struct netlink_sock *nlk = nlk_sk(sk);
struct netlink_ring *ring;
struct sk_buff_head *queue;
void **pg_vec = NULL;
unsigned int order = 0;
int err;
ring = tx_ring ? &nlk->tx_ring : &nlk->rx_ring;
queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
if (!closing) {
if (atomic_read(&nlk->mapped))
return -EBUSY;
if (atomic_read(&ring->pending))
return -EBUSY;
}
if (req->nm_block_nr) {
if (ring->pg_vec != NULL)
return -EBUSY;
if ((int)req->nm_block_size <= 0)
return -EINVAL;
if (!IS_ALIGNED(req->nm_block_size, PAGE_SIZE))
return -EINVAL;
if (req->nm_frame_size < NL_MMAP_HDRLEN)
return -EINVAL;
if (!IS_ALIGNED(req->nm_frame_size, NL_MMAP_MSG_ALIGNMENT))
return -EINVAL;
ring->frames_per_block = req->nm_block_size /
req->nm_frame_size;
if (ring->frames_per_block == 0)
return -EINVAL;
if (ring->frames_per_block * req->nm_block_nr !=
req->nm_frame_nr)
return -EINVAL;
order = get_order(req->nm_block_size);
pg_vec = alloc_pg_vec(nlk, req, order);
if (pg_vec == NULL)
return -ENOMEM;
} else {
if (req->nm_frame_nr)
return -EINVAL;
}
err = -EBUSY;
mutex_lock(&nlk->pg_vec_lock);
if (closing || atomic_read(&nlk->mapped) == 0) {
err = 0;
spin_lock_bh(&queue->lock);
ring->frame_max = req->nm_frame_nr - 1;
ring->head = 0;
ring->frame_size = req->nm_frame_size;
ring->pg_vec_pages = req->nm_block_size / PAGE_SIZE;
swap(ring->pg_vec_len, req->nm_block_nr);
swap(ring->pg_vec_order, order);
swap(ring->pg_vec, pg_vec);
__skb_queue_purge(queue);
spin_unlock_bh(&queue->lock);
WARN_ON(atomic_read(&nlk->mapped));
}
mutex_unlock(&nlk->pg_vec_lock);
if (pg_vec)
free_pg_vec(pg_vec, order, req->nm_block_nr);
return err;
}
static void netlink_mm_open(struct vm_area_struct *vma)
{
struct file *file = vma->vm_file;
struct socket *sock = file->private_data;
struct sock *sk = sock->sk;
if (sk)
atomic_inc(&nlk_sk(sk)->mapped);
}
static void netlink_mm_close(struct vm_area_struct *vma)
{
struct file *file = vma->vm_file;
struct socket *sock = file->private_data;
struct sock *sk = sock->sk;
if (sk)
atomic_dec(&nlk_sk(sk)->mapped);
}
static const struct vm_operations_struct netlink_mmap_ops = {
.open = netlink_mm_open,
.close = netlink_mm_close,
};
static int netlink_mmap(struct file *file, struct socket *sock,
struct vm_area_struct *vma)
{
struct sock *sk = sock->sk;
struct netlink_sock *nlk = nlk_sk(sk);
struct netlink_ring *ring;
unsigned long start, size, expected;
unsigned int i;
int err = -EINVAL;
if (vma->vm_pgoff)
return -EINVAL;
mutex_lock(&nlk->pg_vec_lock);
expected = 0;
for (ring = &nlk->rx_ring; ring <= &nlk->tx_ring; ring++) {
if (ring->pg_vec == NULL)
continue;
expected += ring->pg_vec_len * ring->pg_vec_pages * PAGE_SIZE;
}
if (expected == 0)
goto out;
size = vma->vm_end - vma->vm_start;
if (size != expected)
goto out;
start = vma->vm_start;
for (ring = &nlk->rx_ring; ring <= &nlk->tx_ring; ring++) {
if (ring->pg_vec == NULL)
continue;
for (i = 0; i < ring->pg_vec_len; i++) {
struct page *page;
void *kaddr = ring->pg_vec[i];
unsigned int pg_num;
for (pg_num = 0; pg_num < ring->pg_vec_pages; pg_num++) {
page = pgvec_to_page(kaddr);
err = vm_insert_page(vma, start, page);
if (err < 0)
goto out;
start += PAGE_SIZE;
kaddr += PAGE_SIZE;
}
}
}
atomic_inc(&nlk->mapped);
vma->vm_ops = &netlink_mmap_ops;
err = 0;
out:
mutex_unlock(&nlk->pg_vec_lock);
return err;
}
static void netlink_frame_flush_dcache(const struct nl_mmap_hdr *hdr)
{
#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
struct page *p_start, *p_end;
/* First page is flushed through netlink_{get,set}_status */
p_start = pgvec_to_page(hdr + PAGE_SIZE);
p_end = pgvec_to_page((void *)hdr + NL_MMAP_HDRLEN + hdr->nm_len - 1);
while (p_start <= p_end) {
flush_dcache_page(p_start);
p_start++;
}
#endif
}
static enum nl_mmap_status netlink_get_status(const struct nl_mmap_hdr *hdr)
{
smp_rmb();
flush_dcache_page(pgvec_to_page(hdr));
return hdr->nm_status;
}
static void netlink_set_status(struct nl_mmap_hdr *hdr,
enum nl_mmap_status status)
{
hdr->nm_status = status;
flush_dcache_page(pgvec_to_page(hdr));
smp_wmb();
}
static struct nl_mmap_hdr *
__netlink_lookup_frame(const struct netlink_ring *ring, unsigned int pos)
{
unsigned int pg_vec_pos, frame_off;
pg_vec_pos = pos / ring->frames_per_block;
frame_off = pos % ring->frames_per_block;
return ring->pg_vec[pg_vec_pos] + (frame_off * ring->frame_size);
}
static struct nl_mmap_hdr *
netlink_lookup_frame(const struct netlink_ring *ring, unsigned int pos,
enum nl_mmap_status status)
{
struct nl_mmap_hdr *hdr;
hdr = __netlink_lookup_frame(ring, pos);
if (netlink_get_status(hdr) != status)
return NULL;
return hdr;
}
static struct nl_mmap_hdr *
netlink_current_frame(const struct netlink_ring *ring,
enum nl_mmap_status status)
{
return netlink_lookup_frame(ring, ring->head, status);
}
static struct nl_mmap_hdr *
netlink_previous_frame(const struct netlink_ring *ring,
enum nl_mmap_status status)
{
unsigned int prev;
prev = ring->head ? ring->head - 1 : ring->frame_max;
return netlink_lookup_frame(ring, prev, status);
}
static void netlink_increment_head(struct netlink_ring *ring)
{
ring->head = ring->head != ring->frame_max ? ring->head + 1 : 0;
}
static void netlink_forward_ring(struct netlink_ring *ring)
{
unsigned int head = ring->head, pos = head;
const struct nl_mmap_hdr *hdr;
do {
hdr = __netlink_lookup_frame(ring, pos);
if (hdr->nm_status == NL_MMAP_STATUS_UNUSED)
break;
if (hdr->nm_status != NL_MMAP_STATUS_SKIP)
break;
netlink_increment_head(ring);
} while (ring->head != head);
}
static bool netlink_dump_space(struct netlink_sock *nlk)
{
struct netlink_ring *ring = &nlk->rx_ring;
struct nl_mmap_hdr *hdr;
unsigned int n;
hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
if (hdr == NULL)
return false;
n = ring->head + ring->frame_max / 2;
if (n > ring->frame_max)
n -= ring->frame_max;
hdr = __netlink_lookup_frame(ring, n);
return hdr->nm_status == NL_MMAP_STATUS_UNUSED;
}
static unsigned int netlink_poll(struct file *file, struct socket *sock,
poll_table *wait)
{
struct sock *sk = sock->sk;
struct netlink_sock *nlk = nlk_sk(sk);
unsigned int mask;
int err;
if (nlk->rx_ring.pg_vec != NULL) {
/* Memory mapped sockets don't call recvmsg(), so flow control
* for dumps is performed here. A dump is allowed to continue
* if at least half the ring is unused.
*/
while (nlk->cb_running && netlink_dump_space(nlk)) {
err = netlink_dump(sk);
if (err < 0) {
sk->sk_err = err;
sk->sk_error_report(sk);
break;
}
}
netlink_rcv_wake(sk);
}
mask = datagram_poll(file, sock, wait);
spin_lock_bh(&sk->sk_receive_queue.lock);
if (nlk->rx_ring.pg_vec) {
netlink_forward_ring(&nlk->rx_ring);
if (!netlink_previous_frame(&nlk->rx_ring, NL_MMAP_STATUS_UNUSED))
mask |= POLLIN | POLLRDNORM;
}
spin_unlock_bh(&sk->sk_receive_queue.lock);
spin_lock_bh(&sk->sk_write_queue.lock);
if (nlk->tx_ring.pg_vec) {
if (netlink_current_frame(&nlk->tx_ring, NL_MMAP_STATUS_UNUSED))
mask |= POLLOUT | POLLWRNORM;
}
spin_unlock_bh(&sk->sk_write_queue.lock);
return mask;
}
static struct nl_mmap_hdr *netlink_mmap_hdr(struct sk_buff *skb)
{
return (struct nl_mmap_hdr *)(skb->head - NL_MMAP_HDRLEN);
}
static void netlink_ring_setup_skb(struct sk_buff *skb, struct sock *sk,
struct netlink_ring *ring,
struct nl_mmap_hdr *hdr)
{
unsigned int size;
void *data;
size = ring->frame_size - NL_MMAP_HDRLEN;
data = (void *)hdr + NL_MMAP_HDRLEN;
skb->head = data;
skb->data = data;
skb_reset_tail_pointer(skb);
skb->end = skb->tail + size;
skb->len = 0;
skb->destructor = netlink_skb_destructor;
NETLINK_CB(skb).flags |= NETLINK_SKB_MMAPED;
NETLINK_CB(skb).sk = sk;
}
static int netlink_mmap_sendmsg(struct sock *sk, struct msghdr *msg,
u32 dst_portid, u32 dst_group,
struct sock_iocb *siocb)
{
struct netlink_sock *nlk = nlk_sk(sk);
struct netlink_ring *ring;
struct nl_mmap_hdr *hdr;
struct sk_buff *skb;
unsigned int maxlen;
bool excl = true;
int err = 0, len = 0;
/* Netlink messages are validated by the receiver before processing.
* In order to avoid userspace changing the contents of the message
* after validation, the socket and the ring may only be used by a
* single process, otherwise we fall back to copying.
*/
if (atomic_long_read(&sk->sk_socket->file->f_count) > 2 ||
atomic_read(&nlk->mapped) > 1)
excl = false;
mutex_lock(&nlk->pg_vec_lock);
ring = &nlk->tx_ring;
maxlen = ring->frame_size - NL_MMAP_HDRLEN;
do {
hdr = netlink_current_frame(ring, NL_MMAP_STATUS_VALID);
if (hdr == NULL) {
if (!(msg->msg_flags & MSG_DONTWAIT) &&
atomic_read(&nlk->tx_ring.pending))
schedule();
continue;
}
if (hdr->nm_len > maxlen) {
err = -EINVAL;
goto out;
}
netlink_frame_flush_dcache(hdr);
if (likely(dst_portid == 0 && dst_group == 0 && excl)) {
skb = alloc_skb_head(GFP_KERNEL);
if (skb == NULL) {
err = -ENOBUFS;
goto out;
}
sock_hold(sk);
netlink_ring_setup_skb(skb, sk, ring, hdr);
NETLINK_CB(skb).flags |= NETLINK_SKB_TX;
__skb_put(skb, hdr->nm_len);
netlink_set_status(hdr, NL_MMAP_STATUS_RESERVED);
atomic_inc(&ring->pending);
} else {
skb = alloc_skb(hdr->nm_len, GFP_KERNEL);
if (skb == NULL) {
err = -ENOBUFS;
goto out;
}
__skb_put(skb, hdr->nm_len);
memcpy(skb->data, (void *)hdr + NL_MMAP_HDRLEN, hdr->nm_len);
netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
}
netlink_increment_head(ring);
NETLINK_CB(skb).portid = nlk->portid;
NETLINK_CB(skb).dst_group = dst_group;
NETLINK_CB(skb).creds = siocb->scm->creds;
err = security_netlink_send(sk, skb);
if (err) {
kfree_skb(skb);
goto out;
}
if (unlikely(dst_group)) {
atomic_inc(&skb->users);
netlink_broadcast(sk, skb, dst_portid, dst_group,
GFP_KERNEL);
}
err = netlink_unicast(sk, skb, dst_portid,
msg->msg_flags & MSG_DONTWAIT);
if (err < 0)
goto out;
len += err;
} while (hdr != NULL ||
(!(msg->msg_flags & MSG_DONTWAIT) &&
atomic_read(&nlk->tx_ring.pending)));
if (len > 0)
err = len;
out:
mutex_unlock(&nlk->pg_vec_lock);
return err;
}
static void netlink_queue_mmaped_skb(struct sock *sk, struct sk_buff *skb)
{
struct nl_mmap_hdr *hdr;
hdr = netlink_mmap_hdr(skb);
hdr->nm_len = skb->len;
hdr->nm_group = NETLINK_CB(skb).dst_group;
hdr->nm_pid = NETLINK_CB(skb).creds.pid;
hdr->nm_uid = from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid);
hdr->nm_gid = from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid);
netlink_frame_flush_dcache(hdr);
netlink_set_status(hdr, NL_MMAP_STATUS_VALID);
NETLINK_CB(skb).flags |= NETLINK_SKB_DELIVERED;
kfree_skb(skb);
}
static void netlink_ring_set_copied(struct sock *sk, struct sk_buff *skb)
{
struct netlink_sock *nlk = nlk_sk(sk);
struct netlink_ring *ring = &nlk->rx_ring;
struct nl_mmap_hdr *hdr;
spin_lock_bh(&sk->sk_receive_queue.lock);
hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
if (hdr == NULL) {
spin_unlock_bh(&sk->sk_receive_queue.lock);
kfree_skb(skb);
netlink_overrun(sk);
return;
}
netlink_increment_head(ring);
__skb_queue_tail(&sk->sk_receive_queue, skb);
spin_unlock_bh(&sk->sk_receive_queue.lock);
hdr->nm_len = skb->len;
hdr->nm_group = NETLINK_CB(skb).dst_group;
hdr->nm_pid = NETLINK_CB(skb).creds.pid;
hdr->nm_uid = from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid);
hdr->nm_gid = from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid);
netlink_set_status(hdr, NL_MMAP_STATUS_COPY);
}
#else /* CONFIG_NETLINK_MMAP */
#define netlink_skb_is_mmaped(skb) false
#define netlink_rx_is_mmaped(sk) false
#define netlink_tx_is_mmaped(sk) false
#define netlink_mmap sock_no_mmap
#define netlink_poll datagram_poll
#define netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group, siocb) 0
#endif /* CONFIG_NETLINK_MMAP */
static void netlink_skb_destructor(struct sk_buff *skb)
{
#ifdef CONFIG_NETLINK_MMAP
struct nl_mmap_hdr *hdr;
struct netlink_ring *ring;
struct sock *sk;
/* If a packet from the kernel to userspace was freed because of an
* error without being delivered to userspace, the kernel must reset
* the status. In the direction userspace to kernel, the status is
* always reset here after the packet was processed and freed.
*/
if (netlink_skb_is_mmaped(skb)) {
hdr = netlink_mmap_hdr(skb);
sk = NETLINK_CB(skb).sk;
if (NETLINK_CB(skb).flags & NETLINK_SKB_TX) {
netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
ring = &nlk_sk(sk)->tx_ring;
} else {
if (!(NETLINK_CB(skb).flags & NETLINK_SKB_DELIVERED)) {
hdr->nm_len = 0;
netlink_set_status(hdr, NL_MMAP_STATUS_VALID);
}
ring = &nlk_sk(sk)->rx_ring;
}
WARN_ON(atomic_read(&ring->pending) == 0);
atomic_dec(&ring->pending);
sock_put(sk);
skb->head = NULL;
}
#endif
if (is_vmalloc_addr(skb->head)) {
if (!skb->cloned ||
!atomic_dec_return(&(skb_shinfo(skb)->dataref)))
vfree(skb->head);
skb->head = NULL;
}
if (skb->sk != NULL)
sock_rfree(skb);
}
static void netlink_skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
{
WARN_ON(skb->sk != NULL);
skb->sk = sk;
skb->destructor = netlink_skb_destructor;
atomic_add(skb->truesize, &sk->sk_rmem_alloc);
sk_mem_charge(sk, skb->truesize);
}
static void netlink_sock_destruct(struct sock *sk)
{
struct netlink_sock *nlk = nlk_sk(sk);
if (nlk->cb_running) {
if (nlk->cb.done)
nlk->cb.done(&nlk->cb);
module_put(nlk->cb.module);
kfree_skb(nlk->cb.skb);
}
skb_queue_purge(&sk->sk_receive_queue);
#ifdef CONFIG_NETLINK_MMAP
if (1) {
struct nl_mmap_req req;
memset(&req, 0, sizeof(req));
if (nlk->rx_ring.pg_vec)
netlink_set_ring(sk, &req, true, false);
memset(&req, 0, sizeof(req));
if (nlk->tx_ring.pg_vec)
netlink_set_ring(sk, &req, true, true);
}
#endif /* CONFIG_NETLINK_MMAP */
if (!sock_flag(sk, SOCK_DEAD)) {
printk(KERN_ERR "Freeing alive netlink socket %p\n", sk);
return;
}
WARN_ON(atomic_read(&sk->sk_rmem_alloc));
WARN_ON(atomic_read(&sk->sk_wmem_alloc));
WARN_ON(nlk_sk(sk)->groups);
}
/* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on
* SMP. Look, when several writers sleep and reader wakes them up, all but one
* immediately hit write lock and grab all the cpus. Exclusive sleep solves
* this, _but_ remember, it adds useless work on UP machines.
*/
void netlink_table_grab(void)
__acquires(nl_table_lock)
{
might_sleep();
write_lock_irq(&nl_table_lock);
if (atomic_read(&nl_table_users)) {
DECLARE_WAITQUEUE(wait, current);
add_wait_queue_exclusive(&nl_table_wait, &wait);
for (;;) {
set_current_state(TASK_UNINTERRUPTIBLE);
if (atomic_read(&nl_table_users) == 0)
break;
write_unlock_irq(&nl_table_lock);
schedule();
write_lock_irq(&nl_table_lock);
}
__set_current_state(TASK_RUNNING);
remove_wait_queue(&nl_table_wait, &wait);
}
}
void netlink_table_ungrab(void)
__releases(nl_table_lock)
{
write_unlock_irq(&nl_table_lock);
wake_up(&nl_table_wait);
}
static inline void
netlink_lock_table(void)
{
/* read_lock() synchronizes us to netlink_table_grab */
read_lock(&nl_table_lock);
atomic_inc(&nl_table_users);
read_unlock(&nl_table_lock);
}
static inline void
netlink_unlock_table(void)
{
if (atomic_dec_and_test(&nl_table_users))
wake_up(&nl_table_wait);
}
static bool netlink_compare(struct net *net, struct sock *sk)
{
return net_eq(sock_net(sk), net);
}
static struct sock *netlink_lookup(struct net *net, int protocol, u32 portid)
{
struct netlink_table *table = &nl_table[protocol];
struct nl_portid_hash *hash = &table->hash;
struct hlist_head *head;
struct sock *sk;
read_lock(&nl_table_lock);
head = nl_portid_hashfn(hash, portid);
sk_for_each(sk, head) {
if (table->compare(net, sk) &&
(nlk_sk(sk)->portid == portid)) {
sock_hold(sk);
goto found;
}
}
sk = NULL;
found:
read_unlock(&nl_table_lock);
return sk;
}
static struct hlist_head *nl_portid_hash_zalloc(size_t size)
{
if (size <= PAGE_SIZE)
return kzalloc(size, GFP_ATOMIC);
else
return (struct hlist_head *)
__get_free_pages(GFP_ATOMIC | __GFP_ZERO,
get_order(size));
}
static void nl_portid_hash_free(struct hlist_head *table, size_t size)
{
if (size <= PAGE_SIZE)
kfree(table);
else
free_pages((unsigned long)table, get_order(size));
}
static int nl_portid_hash_rehash(struct nl_portid_hash *hash, int grow)
{
unsigned int omask, mask, shift;
size_t osize, size;
struct hlist_head *otable, *table;
int i;
omask = mask = hash->mask;
osize = size = (mask + 1) * sizeof(*table);
shift = hash->shift;
if (grow) {
if (++shift > hash->max_shift)
return 0;
mask = mask * 2 + 1;
size *= 2;
}
table = nl_portid_hash_zalloc(size);
if (!table)
return 0;
otable = hash->table;
hash->table = table;
hash->mask = mask;
hash->shift = shift;
get_random_bytes(&hash->rnd, sizeof(hash->rnd));
for (i = 0; i <= omask; i++) {
struct sock *sk;
struct hlist_node *tmp;
sk_for_each_safe(sk, tmp, &otable[i])
__sk_add_node(sk, nl_portid_hashfn(hash, nlk_sk(sk)->portid));
}
nl_portid_hash_free(otable, osize);
hash->rehash_time = jiffies + 10 * 60 * HZ;
return 1;
}
static inline int nl_portid_hash_dilute(struct nl_portid_hash *hash, int len)
{
int avg = hash->entries >> hash->shift;
if (unlikely(avg > 1) && nl_portid_hash_rehash(hash, 1))
return 1;
if (unlikely(len > avg) && time_after(jiffies, hash->rehash_time)) {
nl_portid_hash_rehash(hash, 0);
return 1;
}
return 0;
}
static const struct proto_ops netlink_ops;
static void
netlink_update_listeners(struct sock *sk)
{
struct netlink_table *tbl = &nl_table[sk->sk_protocol];
unsigned long mask;
unsigned int i;
struct listeners *listeners;
listeners = nl_deref_protected(tbl->listeners);
if (!listeners)
return;
for (i = 0; i < NLGRPLONGS(tbl->groups); i++) {
mask = 0;
sk_for_each_bound(sk, &tbl->mc_list) {
if (i < NLGRPLONGS(nlk_sk(sk)->ngroups))
mask |= nlk_sk(sk)->groups[i];
}
listeners->masks[i] = mask;
}
/* this function is only called with the netlink table "grabbed", which
* makes sure updates are visible before bind or setsockopt return. */
}
static int netlink_insert(struct sock *sk, struct net *net, u32 portid)
{
struct netlink_table *table = &nl_table[sk->sk_protocol];
struct nl_portid_hash *hash = &table->hash;
struct hlist_head *head;
int err = -EADDRINUSE;
struct sock *osk;
int len;
netlink_table_grab();
head = nl_portid_hashfn(hash, portid);
len = 0;
sk_for_each(osk, head) {
if (table->compare(net, osk) &&
(nlk_sk(osk)->portid == portid))
break;
len++;
}
if (osk)
goto err;
err = -EBUSY;
if (nlk_sk(sk)->portid)
goto err;
err = -ENOMEM;
if (BITS_PER_LONG > 32 && unlikely(hash->entries >= UINT_MAX))
goto err;
if (len && nl_portid_hash_dilute(hash, len))
head = nl_portid_hashfn(hash, portid);
hash->entries++;
nlk_sk(sk)->portid = portid;
sk_add_node(sk, head);
err = 0;
err:
netlink_table_ungrab();
return err;
}
static void netlink_remove(struct sock *sk)
{
netlink_table_grab();
if (sk_del_node_init(sk))
nl_table[sk->sk_protocol].hash.entries--;
if (nlk_sk(sk)->subscriptions)
__sk_del_bind_node(sk);
netlink_table_ungrab();
}
static struct proto netlink_proto = {
.name = "NETLINK",
.owner = THIS_MODULE,
.obj_size = sizeof(struct netlink_sock),
};
static int __netlink_create(struct net *net, struct socket *sock,
struct mutex *cb_mutex, int protocol)
{
struct sock *sk;
struct netlink_sock *nlk;
sock->ops = &netlink_ops;
sk = sk_alloc(net, PF_NETLINK, GFP_KERNEL, &netlink_proto);
if (!sk)
return -ENOMEM;
sock_init_data(sock, sk);
nlk = nlk_sk(sk);
if (cb_mutex) {
nlk->cb_mutex = cb_mutex;
} else {
nlk->cb_mutex = &nlk->cb_def_mutex;
mutex_init(nlk->cb_mutex);
}
init_waitqueue_head(&nlk->wait);
#ifdef CONFIG_NETLINK_MMAP
mutex_init(&nlk->pg_vec_lock);
#endif
sk->sk_destruct = netlink_sock_destruct;
sk->sk_protocol = protocol;
return 0;
}
static int netlink_create(struct net *net, struct socket *sock, int protocol,
int kern)
{
struct module *module = NULL;
struct mutex *cb_mutex;
struct netlink_sock *nlk;
void (*bind)(int group);
int err = 0;
sock->state = SS_UNCONNECTED;
if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
return -ESOCKTNOSUPPORT;
if (protocol < 0 || protocol >= MAX_LINKS)
return -EPROTONOSUPPORT;
netlink_lock_table();
#ifdef CONFIG_MODULES
if (!nl_table[protocol].registered) {
netlink_unlock_table();
request_module("net-pf-%d-proto-%d", PF_NETLINK, protocol);
netlink_lock_table();
}
#endif
if (nl_table[protocol].registered &&
try_module_get(nl_table[protocol].module))
module = nl_table[protocol].module;
else
err = -EPROTONOSUPPORT;
cb_mutex = nl_table[protocol].cb_mutex;
bind = nl_table[protocol].bind;
netlink_unlock_table();
if (err < 0)
goto out;
err = __netlink_create(net, sock, cb_mutex, protocol);
if (err < 0)
goto out_module;
local_bh_disable();
sock_prot_inuse_add(net, &netlink_proto, 1);
local_bh_enable();
nlk = nlk_sk(sock->sk);
nlk->module = module;
nlk->netlink_bind = bind;
out:
return err;
out_module:
module_put(module);
goto out;
}
static int netlink_release(struct socket *sock)
{
struct sock *sk = sock->sk;
struct netlink_sock *nlk;
if (!sk)
return 0;
netlink_remove(sk);
sock_orphan(sk);
nlk = nlk_sk(sk);
/*
* OK. Socket is unlinked, any packets that arrive now
* will be purged.
*/
sock->sk = NULL;
wake_up_interruptible_all(&nlk->wait);
skb_queue_purge(&sk->sk_write_queue);
if (nlk->portid) {
struct netlink_notify n = {
.net = sock_net(sk),
.protocol = sk->sk_protocol,
.portid = nlk->portid,
};
atomic_notifier_call_chain(&netlink_chain,
NETLINK_URELEASE, &n);
}
module_put(nlk->module);
netlink_table_grab();
if (netlink_is_kernel(sk)) {
BUG_ON(nl_table[sk->sk_protocol].registered == 0);
if (--nl_table[sk->sk_protocol].registered == 0) {
struct listeners *old;
old = nl_deref_protected(nl_table[sk->sk_protocol].listeners);
RCU_INIT_POINTER(nl_table[sk->sk_protocol].listeners, NULL);
kfree_rcu(old, rcu);
nl_table[sk->sk_protocol].module = NULL;
nl_table[sk->sk_protocol].bind = NULL;
nl_table[sk->sk_protocol].flags = 0;
nl_table[sk->sk_protocol].registered = 0;
}
} else if (nlk->subscriptions) {
netlink_update_listeners(sk);
}
netlink_table_ungrab();
kfree(nlk->groups);
nlk->groups = NULL;
local_bh_disable();
sock_prot_inuse_add(sock_net(sk), &netlink_proto, -1);
local_bh_enable();
sock_put(sk);
return 0;
}
static int netlink_autobind(struct socket *sock)
{
struct sock *sk = sock->sk;
struct net *net = sock_net(sk);
struct netlink_table *table = &nl_table[sk->sk_protocol];
struct nl_portid_hash *hash = &table->hash;
struct hlist_head *head;
struct sock *osk;
s32 portid = task_tgid_vnr(current);
int err;
static s32 rover = -4097;
retry:
cond_resched();
netlink_table_grab();
head = nl_portid_hashfn(hash, portid);
sk_for_each(osk, head) {
if (!table->compare(net, osk))
continue;
if (nlk_sk(osk)->portid == portid) {
/* Bind collision, search negative portid values. */
portid = rover--;
if (rover > -4097)
rover = -4097;
netlink_table_ungrab();
goto retry;
}
}
netlink_table_ungrab();
err = netlink_insert(sk, net, portid);
if (err == -EADDRINUSE)
goto retry;
/* If 2 threads race to autobind, that is fine. */
if (err == -EBUSY)
err = 0;
return err;
}
static inline int netlink_capable(const struct socket *sock, unsigned int flag)
{
return (nl_table[sock->sk->sk_protocol].flags & flag) ||
ns_capable(sock_net(sock->sk)->user_ns, CAP_NET_ADMIN);
}
static void
netlink_update_subscriptions(struct sock *sk, unsigned int subscriptions)
{
struct netlink_sock *nlk = nlk_sk(sk);
if (nlk->subscriptions && !subscriptions)
__sk_del_bind_node(sk);
else if (!nlk->subscriptions && subscriptions)
sk_add_bind_node(sk, &nl_table[sk->sk_protocol].mc_list);
nlk->subscriptions = subscriptions;
}
static int netlink_realloc_groups(struct sock *sk)
{
struct netlink_sock *nlk = nlk_sk(sk);
unsigned int groups;
unsigned long *new_groups;
int err = 0;
netlink_table_grab();
groups = nl_table[sk->sk_protocol].groups;
if (!nl_table[sk->sk_protocol].registered) {
err = -ENOENT;
goto out_unlock;
}
if (nlk->ngroups >= groups)
goto out_unlock;
new_groups = krealloc(nlk->groups, NLGRPSZ(groups), GFP_ATOMIC);
if (new_groups == NULL) {
err = -ENOMEM;
goto out_unlock;
}
memset((char *)new_groups + NLGRPSZ(nlk->ngroups), 0,
NLGRPSZ(groups) - NLGRPSZ(nlk->ngroups));
nlk->groups = new_groups;
nlk->ngroups = groups;
out_unlock:
netlink_table_ungrab();
return err;
}
static int netlink_bind(struct socket *sock, struct sockaddr *addr,
int addr_len)
{
struct sock *sk = sock->sk;
struct net *net = sock_net(sk);
struct netlink_sock *nlk = nlk_sk(sk);
struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
int err;
if (addr_len < sizeof(struct sockaddr_nl))
return -EINVAL;
if (nladdr->nl_family != AF_NETLINK)
return -EINVAL;
/* Only superuser is allowed to listen multicasts */
if (nladdr->nl_groups) {
if (!netlink_capable(sock, NL_CFG_F_NONROOT_RECV))
return -EPERM;
err = netlink_realloc_groups(sk);
if (err)
return err;
}
if (nlk->portid) {
if (nladdr->nl_pid != nlk->portid)
return -EINVAL;
} else {
err = nladdr->nl_pid ?
netlink_insert(sk, net, nladdr->nl_pid) :
netlink_autobind(sock);
if (err)
return err;
}
if (!nladdr->nl_groups && (nlk->groups == NULL || !(u32)nlk->groups[0]))
return 0;
netlink_table_grab();
netlink_update_subscriptions(sk, nlk->subscriptions +
hweight32(nladdr->nl_groups) -
hweight32(nlk->groups[0]));
nlk->groups[0] = (nlk->groups[0] & ~0xffffffffUL) | nladdr->nl_groups;
netlink_update_listeners(sk);
netlink_table_ungrab();
if (nlk->netlink_bind && nlk->groups[0]) {
int i;
for (i=0; i<nlk->ngroups; i++) {
if (test_bit(i, nlk->groups))
nlk->netlink_bind(i);
}
}
return 0;
}
static int netlink_connect(struct socket *sock, struct sockaddr *addr,
int alen, int flags)
{
int err = 0;
struct sock *sk = sock->sk;
struct netlink_sock *nlk = nlk_sk(sk);
struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
if (alen < sizeof(addr->sa_family))
return -EINVAL;
if (addr->sa_family == AF_UNSPEC) {
sk->sk_state = NETLINK_UNCONNECTED;
nlk->dst_portid = 0;
nlk->dst_group = 0;
return 0;
}
if (addr->sa_family != AF_NETLINK)
return -EINVAL;
/* Only superuser is allowed to send multicasts */
if (nladdr->nl_groups && !netlink_capable(sock, NL_CFG_F_NONROOT_SEND))
return -EPERM;
if (!nlk->portid)
err = netlink_autobind(sock);
if (err == 0) {
sk->sk_state = NETLINK_CONNECTED;
nlk->dst_portid = nladdr->nl_pid;
nlk->dst_group = ffs(nladdr->nl_groups);
}
return err;
}
static int netlink_getname(struct socket *sock, struct sockaddr *addr,
int *addr_len, int peer)
{
struct sock *sk = sock->sk;
struct netlink_sock *nlk = nlk_sk(sk);
DECLARE_SOCKADDR(struct sockaddr_nl *, nladdr, addr);
nladdr->nl_family = AF_NETLINK;
nladdr->nl_pad = 0;
*addr_len = sizeof(*nladdr);
if (peer) {
nladdr->nl_pid = nlk->dst_portid;
nladdr->nl_groups = netlink_group_mask(nlk->dst_group);
} else {
nladdr->nl_pid = nlk->portid;
nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0;
}
return 0;
}
static struct sock *netlink_getsockbyportid(struct sock *ssk, u32 portid)
{
struct sock *sock;
struct netlink_sock *nlk;
sock = netlink_lookup(sock_net(ssk), ssk->sk_protocol, portid);
if (!sock)
return ERR_PTR(-ECONNREFUSED);
/* Don't bother queuing skb if kernel socket has no input function */
nlk = nlk_sk(sock);
if (sock->sk_state == NETLINK_CONNECTED &&
nlk->dst_portid != nlk_sk(ssk)->portid) {
sock_put(sock);
return ERR_PTR(-ECONNREFUSED);
}
return sock;
}
struct sock *netlink_getsockbyfilp(struct file *filp)
{
struct inode *inode = file_inode(filp);
struct sock *sock;
if (!S_ISSOCK(inode->i_mode))
return ERR_PTR(-ENOTSOCK);
sock = SOCKET_I(inode)->sk;
if (sock->sk_family != AF_NETLINK)
return ERR_PTR(-EINVAL);
sock_hold(sock);
return sock;
}
static struct sk_buff *netlink_alloc_large_skb(unsigned int size,
int broadcast)
{
struct sk_buff *skb;
void *data;
if (size <= NLMSG_GOODSIZE || broadcast)
return alloc_skb(size, GFP_KERNEL);
size = SKB_DATA_ALIGN(size) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
data = vmalloc(size);
if (data == NULL)
return NULL;
skb = build_skb(data, size);
if (skb == NULL)
vfree(data);
else {
skb->head_frag = 0;
skb->destructor = netlink_skb_destructor;
}
return skb;
}
/*
* Attach a skb to a netlink socket.
* The caller must hold a reference to the destination socket. On error, the
* reference is dropped. The skb is not send to the destination, just all
* all error checks are performed and memory in the queue is reserved.
* Return values:
* < 0: error. skb freed, reference to sock dropped.
* 0: continue
* 1: repeat lookup - reference dropped while waiting for socket memory.
*/
int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
long *timeo, struct sock *ssk)
{
struct netlink_sock *nlk;
nlk = nlk_sk(sk);
if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
test_bit(NETLINK_CONGESTED, &nlk->state)) &&
!netlink_skb_is_mmaped(skb)) {
DECLARE_WAITQUEUE(wait, current);
if (!*timeo) {
if (!ssk || netlink_is_kernel(ssk))
netlink_overrun(sk);
sock_put(sk);
kfree_skb(skb);
return -EAGAIN;
}
__set_current_state(TASK_INTERRUPTIBLE);
add_wait_queue(&nlk->wait, &wait);
if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
test_bit(NETLINK_CONGESTED, &nlk->state)) &&
!sock_flag(sk, SOCK_DEAD))
*timeo = schedule_timeout(*timeo);
__set_current_state(TASK_RUNNING);
remove_wait_queue(&nlk->wait, &wait);
sock_put(sk);
if (signal_pending(current)) {
kfree_skb(skb);
return sock_intr_errno(*timeo);
}
return 1;
}
netlink_skb_set_owner_r(skb, sk);
return 0;
}
static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb)
{
int len = skb->len;
netlink_deliver_tap(skb);
#ifdef CONFIG_NETLINK_MMAP
if (netlink_skb_is_mmaped(skb))
netlink_queue_mmaped_skb(sk, skb);
else if (netlink_rx_is_mmaped(sk))
netlink_ring_set_copied(sk, skb);
else
#endif /* CONFIG_NETLINK_MMAP */
skb_queue_tail(&sk->sk_receive_queue, skb);
sk->sk_data_ready(sk, len);
return len;
}
int netlink_sendskb(struct sock *sk, struct sk_buff *skb)
{
int len = __netlink_sendskb(sk, skb);
sock_put(sk);
return len;
}
void netlink_detachskb(struct sock *sk, struct sk_buff *skb)
{
kfree_skb(skb);
sock_put(sk);
}
static struct sk_buff *netlink_trim(struct sk_buff *skb, gfp_t allocation)
{
int delta;
WARN_ON(skb->sk != NULL);
if (netlink_skb_is_mmaped(skb))
return skb;
delta = skb->end - skb->tail;
if (is_vmalloc_addr(skb->head) || delta * 2 < skb->truesize)
return skb;
if (skb_shared(skb)) {
struct sk_buff *nskb = skb_clone(skb, allocation);
if (!nskb)
return skb;
consume_skb(skb);
skb = nskb;
}
if (!pskb_expand_head(skb, 0, -delta, allocation))
skb->truesize -= delta;
return skb;
}
static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb,
struct sock *ssk)
{
int ret;
struct netlink_sock *nlk = nlk_sk(sk);
ret = -ECONNREFUSED;
if (nlk->netlink_rcv != NULL) {
/* We could do a netlink_deliver_tap(skb) here as well
* but since this is intended for the kernel only, we
* should rather let it stay under the hood.
*/
ret = skb->len;
netlink_skb_set_owner_r(skb, sk);
NETLINK_CB(skb).sk = ssk;
nlk->netlink_rcv(skb);
consume_skb(skb);
} else {
kfree_skb(skb);
}
sock_put(sk);
return ret;
}
int netlink_unicast(struct sock *ssk, struct sk_buff *skb,
u32 portid, int nonblock)
{
struct sock *sk;
int err;
long timeo;
skb = netlink_trim(skb, gfp_any());
timeo = sock_sndtimeo(ssk, nonblock);
retry:
sk = netlink_getsockbyportid(ssk, portid);
if (IS_ERR(sk)) {
kfree_skb(skb);
return PTR_ERR(sk);
}
if (netlink_is_kernel(sk))
return netlink_unicast_kernel(sk, skb, ssk);
if (sk_filter(sk, skb)) {
err = skb->len;
kfree_skb(skb);
sock_put(sk);
return err;
}
err = netlink_attachskb(sk, skb, &timeo, ssk);
if (err == 1)
goto retry;
if (err)
return err;
return netlink_sendskb(sk, skb);
}
EXPORT_SYMBOL(netlink_unicast);
struct sk_buff *netlink_alloc_skb(struct sock *ssk, unsigned int size,
u32 dst_portid, gfp_t gfp_mask)
{
#ifdef CONFIG_NETLINK_MMAP
struct sock *sk = NULL;
struct sk_buff *skb;
struct netlink_ring *ring;
struct nl_mmap_hdr *hdr;
unsigned int maxlen;
sk = netlink_getsockbyportid(ssk, dst_portid);
if (IS_ERR(sk))
goto out;
ring = &nlk_sk(sk)->rx_ring;
/* fast-path without atomic ops for common case: non-mmaped receiver */
if (ring->pg_vec == NULL)
goto out_put;
skb = alloc_skb_head(gfp_mask);
if (skb == NULL)
goto err1;
spin_lock_bh(&sk->sk_receive_queue.lock);
/* check again under lock */
if (ring->pg_vec == NULL)
goto out_free;
maxlen = ring->frame_size - NL_MMAP_HDRLEN;
if (maxlen < size)
goto out_free;
netlink_forward_ring(ring);
hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
if (hdr == NULL)
goto err2;
netlink_ring_setup_skb(skb, sk, ring, hdr);
netlink_set_status(hdr, NL_MMAP_STATUS_RESERVED);
atomic_inc(&ring->pending);
netlink_increment_head(ring);
spin_unlock_bh(&sk->sk_receive_queue.lock);
return skb;
err2:
kfree_skb(skb);
spin_unlock_bh(&sk->sk_receive_queue.lock);
netlink_overrun(sk);
err1:
sock_put(sk);
return NULL;
out_free:
kfree_skb(skb);
spin_unlock_bh(&sk->sk_receive_queue.lock);
out_put:
sock_put(sk);
out:
#endif
return alloc_skb(size, gfp_mask);
}
EXPORT_SYMBOL_GPL(netlink_alloc_skb);
int netlink_has_listeners(struct sock *sk, unsigned int group)
{
int res = 0;
struct listeners *listeners;
BUG_ON(!netlink_is_kernel(sk));
rcu_read_lock();
listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners);
if (listeners && group - 1 < nl_table[sk->sk_protocol].groups)
res = test_bit(group - 1, listeners->masks);
rcu_read_unlock();
return res;
}
EXPORT_SYMBOL_GPL(netlink_has_listeners);
static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
{
struct netlink_sock *nlk = nlk_sk(sk);
if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
!test_bit(NETLINK_CONGESTED, &nlk->state)) {
netlink_skb_set_owner_r(skb, sk);
__netlink_sendskb(sk, skb);
return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1);
}
return -1;
}
struct netlink_broadcast_data {
struct sock *exclude_sk;
struct net *net;
u32 portid;
u32 group;
int failure;
int delivery_failure;
int congested;
int delivered;
gfp_t allocation;
struct sk_buff *skb, *skb2;
int (*tx_filter)(struct sock *dsk, struct sk_buff *skb, void *data);
void *tx_data;
};
static int do_one_broadcast(struct sock *sk,
struct netlink_broadcast_data *p)
{
struct netlink_sock *nlk = nlk_sk(sk);
int val;
if (p->exclude_sk == sk)
goto out;
if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
!test_bit(p->group - 1, nlk->groups))
goto out;
if (!net_eq(sock_net(sk), p->net))
goto out;
if (p->failure) {
netlink_overrun(sk);
goto out;
}
sock_hold(sk);
if (p->skb2 == NULL) {
if (skb_shared(p->skb)) {
p->skb2 = skb_clone(p->skb, p->allocation);
} else {
p->skb2 = skb_get(p->skb);
/*
* skb ownership may have been set when
* delivered to a previous socket.
*/
skb_orphan(p->skb2);
}
}
if (p->skb2 == NULL) {
netlink_overrun(sk);
/* Clone failed. Notify ALL listeners. */
p->failure = 1;
if (nlk->flags & NETLINK_BROADCAST_SEND_ERROR)
p->delivery_failure = 1;
} else if (p->tx_filter && p->tx_filter(sk, p->skb2, p->tx_data)) {
kfree_skb(p->skb2);
p->skb2 = NULL;
} else if (sk_filter(sk, p->skb2)) {
kfree_skb(p->skb2);
p->skb2 = NULL;
} else if ((val = netlink_broadcast_deliver(sk, p->skb2)) < 0) {
netlink_overrun(sk);
if (nlk->flags & NETLINK_BROADCAST_SEND_ERROR)
p->delivery_failure = 1;
} else {
p->congested |= val;
p->delivered = 1;
p->skb2 = NULL;
}
sock_put(sk);
out:
return 0;
}
int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 portid,
u32 group, gfp_t allocation,
int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data),
void *filter_data)
{
struct net *net = sock_net(ssk);
struct netlink_broadcast_data info;
struct sock *sk;
skb = netlink_trim(skb, allocation);
info.exclude_sk = ssk;
info.net = net;
info.portid = portid;
info.group = group;
info.failure = 0;
info.delivery_failure = 0;
info.congested = 0;
info.delivered = 0;
info.allocation = allocation;
info.skb = skb;
info.skb2 = NULL;
info.tx_filter = filter;
info.tx_data = filter_data;
/* While we sleep in clone, do not allow to change socket list */
netlink_lock_table();
sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
do_one_broadcast(sk, &info);
consume_skb(skb);
netlink_unlock_table();
if (info.delivery_failure) {
kfree_skb(info.skb2);
return -ENOBUFS;
}
consume_skb(info.skb2);
if (info.delivered) {
if (info.congested && (allocation & __GFP_WAIT))
yield();
return 0;
}
return -ESRCH;
}
EXPORT_SYMBOL(netlink_broadcast_filtered);
int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 portid,
u32 group, gfp_t allocation)
{
return netlink_broadcast_filtered(ssk, skb, portid, group, allocation,
NULL, NULL);
}
EXPORT_SYMBOL(netlink_broadcast);
struct netlink_set_err_data {
struct sock *exclude_sk;
u32 portid;
u32 group;
int code;
};
static int do_one_set_err(struct sock *sk, struct netlink_set_err_data *p)
{
struct netlink_sock *nlk = nlk_sk(sk);
int ret = 0;
if (sk == p->exclude_sk)
goto out;
if (!net_eq(sock_net(sk), sock_net(p->exclude_sk)))
goto out;
if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
!test_bit(p->group - 1, nlk->groups))
goto out;
if (p->code == ENOBUFS && nlk->flags & NETLINK_RECV_NO_ENOBUFS) {
ret = 1;
goto out;
}
sk->sk_err = p->code;
sk->sk_error_report(sk);
out:
return ret;
}
/**
* netlink_set_err - report error to broadcast listeners
* @ssk: the kernel netlink socket, as returned by netlink_kernel_create()
* @portid: the PORTID of a process that we want to skip (if any)
* @group: the broadcast group that will notice the error
* @code: error code, must be negative (as usual in kernelspace)
*
* This function returns the number of broadcast listeners that have set the
* NETLINK_RECV_NO_ENOBUFS socket option.
*/
int netlink_set_err(struct sock *ssk, u32 portid, u32 group, int code)
{
struct netlink_set_err_data info;
struct sock *sk;
int ret = 0;
info.exclude_sk = ssk;
info.portid = portid;
info.group = group;
/* sk->sk_err wants a positive error value */
info.code = -code;
read_lock(&nl_table_lock);
sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
ret += do_one_set_err(sk, &info);
read_unlock(&nl_table_lock);
return ret;
}
EXPORT_SYMBOL(netlink_set_err);
/* must be called with netlink table grabbed */
static void netlink_update_socket_mc(struct netlink_sock *nlk,
unsigned int group,
int is_new)
{
int old, new = !!is_new, subscriptions;
old = test_bit(group - 1, nlk->groups);
subscriptions = nlk->subscriptions - old + new;
if (new)
__set_bit(group - 1, nlk->groups);
else
__clear_bit(group - 1, nlk->groups);
netlink_update_subscriptions(&nlk->sk, subscriptions);
netlink_update_listeners(&nlk->sk);
}
static int netlink_setsockopt(struct socket *sock, int level, int optname,
char __user *optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
struct netlink_sock *nlk = nlk_sk(sk);
unsigned int val = 0;
int err;
if (level != SOL_NETLINK)
return -ENOPROTOOPT;
if (optname != NETLINK_RX_RING && optname != NETLINK_TX_RING &&
optlen >= sizeof(int) &&
get_user(val, (unsigned int __user *)optval))
return -EFAULT;
switch (optname) {
case NETLINK_PKTINFO:
if (val)
nlk->flags |= NETLINK_RECV_PKTINFO;
else
nlk->flags &= ~NETLINK_RECV_PKTINFO;
err = 0;
break;
case NETLINK_ADD_MEMBERSHIP:
case NETLINK_DROP_MEMBERSHIP: {
if (!netlink_capable(sock, NL_CFG_F_NONROOT_RECV))
return -EPERM;
err = netlink_realloc_groups(sk);
if (err)
return err;
if (!val || val - 1 >= nlk->ngroups)
return -EINVAL;
netlink_table_grab();
netlink_update_socket_mc(nlk, val,
optname == NETLINK_ADD_MEMBERSHIP);
netlink_table_ungrab();
if (nlk->netlink_bind)
nlk->netlink_bind(val);
err = 0;
break;
}
case NETLINK_BROADCAST_ERROR:
if (val)
nlk->flags |= NETLINK_BROADCAST_SEND_ERROR;
else
nlk->flags &= ~NETLINK_BROADCAST_SEND_ERROR;
err = 0;
break;
case NETLINK_NO_ENOBUFS:
if (val) {
nlk->flags |= NETLINK_RECV_NO_ENOBUFS;
clear_bit(NETLINK_CONGESTED, &nlk->state);
wake_up_interruptible(&nlk->wait);
} else {
nlk->flags &= ~NETLINK_RECV_NO_ENOBUFS;
}
err = 0;
break;
#ifdef CONFIG_NETLINK_MMAP
case NETLINK_RX_RING:
case NETLINK_TX_RING: {
struct nl_mmap_req req;
/* Rings might consume more memory than queue limits, require
* CAP_NET_ADMIN.
*/
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (optlen < sizeof(req))
return -EINVAL;
if (copy_from_user(&req, optval, sizeof(req)))
return -EFAULT;
err = netlink_set_ring(sk, &req, false,
optname == NETLINK_TX_RING);
break;
}
#endif /* CONFIG_NETLINK_MMAP */
default:
err = -ENOPROTOOPT;
}
return err;
}
static int netlink_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen)
{
struct sock *sk = sock->sk;
struct netlink_sock *nlk = nlk_sk(sk);
int len, val, err;
if (level != SOL_NETLINK)
return -ENOPROTOOPT;
if (get_user(len, optlen))
return -EFAULT;
if (len < 0)
return -EINVAL;
switch (optname) {
case NETLINK_PKTINFO:
if (len < sizeof(int))
return -EINVAL;
len = sizeof(int);
val = nlk->flags & NETLINK_RECV_PKTINFO ? 1 : 0;
if (put_user(len, optlen) ||
put_user(val, optval))
return -EFAULT;
err = 0;
break;
case NETLINK_BROADCAST_ERROR:
if (len < sizeof(int))
return -EINVAL;
len = sizeof(int);
val = nlk->flags & NETLINK_BROADCAST_SEND_ERROR ? 1 : 0;
if (put_user(len, optlen) ||
put_user(val, optval))
return -EFAULT;
err = 0;
break;
case NETLINK_NO_ENOBUFS:
if (len < sizeof(int))
return -EINVAL;
len = sizeof(int);
val = nlk->flags & NETLINK_RECV_NO_ENOBUFS ? 1 : 0;
if (put_user(len, optlen) ||
put_user(val, optval))
return -EFAULT;
err = 0;
break;
default:
err = -ENOPROTOOPT;
}
return err;
}
static void netlink_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
{
struct nl_pktinfo info;
info.group = NETLINK_CB(skb).dst_group;
put_cmsg(msg, SOL_NETLINK, NETLINK_PKTINFO, sizeof(info), &info);
}
static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
struct msghdr *msg, size_t len)
{
struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
struct sock *sk = sock->sk;
struct netlink_sock *nlk = nlk_sk(sk);
struct sockaddr_nl *addr = msg->msg_name;
u32 dst_portid;
u32 dst_group;
struct sk_buff *skb;
int err;
struct scm_cookie scm;
if (msg->msg_flags&MSG_OOB)
return -EOPNOTSUPP;
if (NULL == siocb->scm)
siocb->scm = &scm;
err = scm_send(sock, msg, siocb->scm, true);
if (err < 0)
return err;
if (msg->msg_namelen) {
err = -EINVAL;
if (addr->nl_family != AF_NETLINK)
goto out;
dst_portid = addr->nl_pid;
dst_group = ffs(addr->nl_groups);
err = -EPERM;
if ((dst_group || dst_portid) &&
!netlink_capable(sock, NL_CFG_F_NONROOT_SEND))
goto out;
} else {
dst_portid = nlk->dst_portid;
dst_group = nlk->dst_group;
}
if (!nlk->portid) {
err = netlink_autobind(sock);
if (err)
goto out;
}
if (netlink_tx_is_mmaped(sk) &&
msg->msg_iov->iov_base == NULL) {
err = netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group,
siocb);
goto out;
}
err = -EMSGSIZE;
if (len > sk->sk_sndbuf - 32)
goto out;
err = -ENOBUFS;
skb = netlink_alloc_large_skb(len, dst_group);
if (skb == NULL)
goto out;
NETLINK_CB(skb).portid = nlk->portid;
NETLINK_CB(skb).dst_group = dst_group;
NETLINK_CB(skb).creds = siocb->scm->creds;
err = -EFAULT;
if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
kfree_skb(skb);
goto out;
}
err = security_netlink_send(sk, skb);
if (err) {
kfree_skb(skb);
goto out;
}
if (dst_group) {
atomic_inc(&skb->users);
netlink_broadcast(sk, skb, dst_portid, dst_group, GFP_KERNEL);
}
err = netlink_unicast(sk, skb, dst_portid, msg->msg_flags&MSG_DONTWAIT);
out:
scm_destroy(siocb->scm);
return err;
}
static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
struct msghdr *msg, size_t len,
int flags)
{
struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
struct scm_cookie scm;
struct sock *sk = sock->sk;
struct netlink_sock *nlk = nlk_sk(sk);
int noblock = flags&MSG_DONTWAIT;
size_t copied;
struct sk_buff *skb, *data_skb;
int err, ret;
if (flags&MSG_OOB)
return -EOPNOTSUPP;
copied = 0;
skb = skb_recv_datagram(sk, flags, noblock, &err);
if (skb == NULL)
goto out;
data_skb = skb;
#ifdef CONFIG_COMPAT_NETLINK_MESSAGES
if (unlikely(skb_shinfo(skb)->frag_list)) {
/*
* If this skb has a frag_list, then here that means that we
* will have to use the frag_list skb's data for compat tasks
* and the regular skb's data for normal (non-compat) tasks.
*
* If we need to send the compat skb, assign it to the
* 'data_skb' variable so that it will be used below for data
* copying. We keep 'skb' for everything else, including
* freeing both later.
*/
if (flags & MSG_CMSG_COMPAT)
data_skb = skb_shinfo(skb)->frag_list;
}
#endif
copied = data_skb->len;
if (len < copied) {
msg->msg_flags |= MSG_TRUNC;
copied = len;
}
skb_reset_transport_header(data_skb);
err = skb_copy_datagram_iovec(data_skb, 0, msg->msg_iov, copied);
if (msg->msg_name) {
struct sockaddr_nl *addr = (struct sockaddr_nl *)msg->msg_name;
addr->nl_family = AF_NETLINK;
addr->nl_pad = 0;
addr->nl_pid = NETLINK_CB(skb).portid;
addr->nl_groups = netlink_group_mask(NETLINK_CB(skb).dst_group);
msg->msg_namelen = sizeof(*addr);
}
if (nlk->flags & NETLINK_RECV_PKTINFO)
netlink_cmsg_recv_pktinfo(msg, skb);
if (NULL == siocb->scm) {
memset(&scm, 0, sizeof(scm));
siocb->scm = &scm;
}
siocb->scm->creds = *NETLINK_CREDS(skb);
if (flags & MSG_TRUNC)
copied = data_skb->len;
skb_free_datagram(sk, skb);
if (nlk->cb_running &&
atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
ret = netlink_dump(sk);
if (ret) {
sk->sk_err = ret;
sk->sk_error_report(sk);
}
}
scm_recv(sock, msg, siocb->scm, flags);
out:
netlink_rcv_wake(sk);
return err ? : copied;
}
static void netlink_data_ready(struct sock *sk, int len)
{
BUG();
}
/*
* We export these functions to other modules. They provide a
* complete set of kernel non-blocking support for message
* queueing.
*/
struct sock *
__netlink_kernel_create(struct net *net, int unit, struct module *module,
struct netlink_kernel_cfg *cfg)
{
struct socket *sock;
struct sock *sk;
struct netlink_sock *nlk;
struct listeners *listeners = NULL;
struct mutex *cb_mutex = cfg ? cfg->cb_mutex : NULL;
unsigned int groups;
BUG_ON(!nl_table);
if (unit < 0 || unit >= MAX_LINKS)
return NULL;
if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock))
return NULL;
/*
* We have to just have a reference on the net from sk, but don't
* get_net it. Besides, we cannot get and then put the net here.
* So we create one inside init_net and the move it to net.
*/
if (__netlink_create(&init_net, sock, cb_mutex, unit) < 0)
goto out_sock_release_nosk;
sk = sock->sk;
sk_change_net(sk, net);
if (!cfg || cfg->groups < 32)
groups = 32;
else
groups = cfg->groups;
listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
if (!listeners)
goto out_sock_release;
sk->sk_data_ready = netlink_data_ready;
if (cfg && cfg->input)
nlk_sk(sk)->netlink_rcv = cfg->input;
if (netlink_insert(sk, net, 0))
goto out_sock_release;
nlk = nlk_sk(sk);
nlk->flags |= NETLINK_KERNEL_SOCKET;
netlink_table_grab();
if (!nl_table[unit].registered) {
nl_table[unit].groups = groups;
rcu_assign_pointer(nl_table[unit].listeners, listeners);
nl_table[unit].cb_mutex = cb_mutex;
nl_table[unit].module = module;
if (cfg) {
nl_table[unit].bind = cfg->bind;
nl_table[unit].flags = cfg->flags;
if (cfg->compare)
nl_table[unit].compare = cfg->compare;
}
nl_table[unit].registered = 1;
} else {
kfree(listeners);
nl_table[unit].registered++;
}
netlink_table_ungrab();
return sk;
out_sock_release:
kfree(listeners);
netlink_kernel_release(sk);
return NULL;
out_sock_release_nosk:
sock_release(sock);
return NULL;
}
EXPORT_SYMBOL(__netlink_kernel_create);
void
netlink_kernel_release(struct sock *sk)
{
sk_release_kernel(sk);
}
EXPORT_SYMBOL(netlink_kernel_release);
int __netlink_change_ngroups(struct sock *sk, unsigned int groups)
{
struct listeners *new, *old;
struct netlink_table *tbl = &nl_table[sk->sk_protocol];
if (groups < 32)
groups = 32;
if (NLGRPSZ(tbl->groups) < NLGRPSZ(groups)) {
new = kzalloc(sizeof(*new) + NLGRPSZ(groups), GFP_ATOMIC);
if (!new)
return -ENOMEM;
old = nl_deref_protected(tbl->listeners);
memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups));
rcu_assign_pointer(tbl->listeners, new);
kfree_rcu(old, rcu);
}
tbl->groups = groups;
return 0;
}
/**
* netlink_change_ngroups - change number of multicast groups
*
* This changes the number of multicast groups that are available
* on a certain netlink family. Note that it is not possible to
* change the number of groups to below 32. Also note that it does
* not implicitly call netlink_clear_multicast_users() when the
* number of groups is reduced.
*
* @sk: The kernel netlink socket, as returned by netlink_kernel_create().
* @groups: The new number of groups.
*/
int netlink_change_ngroups(struct sock *sk, unsigned int groups)
{
int err;
netlink_table_grab();
err = __netlink_change_ngroups(sk, groups);
netlink_table_ungrab();
return err;
}
void __netlink_clear_multicast_users(struct sock *ksk, unsigned int group)
{
struct sock *sk;
struct netlink_table *tbl = &nl_table[ksk->sk_protocol];
sk_for_each_bound(sk, &tbl->mc_list)
netlink_update_socket_mc(nlk_sk(sk), group, 0);
}
/**
* netlink_clear_multicast_users - kick off multicast listeners
*
* This function removes all listeners from the given group.
* @ksk: The kernel netlink socket, as returned by
* netlink_kernel_create().
* @group: The multicast group to clear.
*/
void netlink_clear_multicast_users(struct sock *ksk, unsigned int group)
{
netlink_table_grab();
__netlink_clear_multicast_users(ksk, group);
netlink_table_ungrab();
}
struct nlmsghdr *
__nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, int type, int len, int flags)
{
struct nlmsghdr *nlh;
int size = nlmsg_msg_size(len);
nlh = (struct nlmsghdr*)skb_put(skb, NLMSG_ALIGN(size));
nlh->nlmsg_type = type;
nlh->nlmsg_len = size;
nlh->nlmsg_flags = flags;
nlh->nlmsg_pid = portid;
nlh->nlmsg_seq = seq;
if (!__builtin_constant_p(size) || NLMSG_ALIGN(size) - size != 0)
memset(nlmsg_data(nlh) + len, 0, NLMSG_ALIGN(size) - size);
return nlh;
}
EXPORT_SYMBOL(__nlmsg_put);
/*
* It looks a bit ugly.
* It would be better to create kernel thread.
*/
static int netlink_dump(struct sock *sk)
{
struct netlink_sock *nlk = nlk_sk(sk);
struct netlink_callback *cb;
struct sk_buff *skb = NULL;
struct nlmsghdr *nlh;
int len, err = -ENOBUFS;
int alloc_size;
mutex_lock(nlk->cb_mutex);
if (!nlk->cb_running) {
err = -EINVAL;
goto errout_skb;
}
cb = &nlk->cb;
alloc_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE);
if (!netlink_rx_is_mmaped(sk) &&
atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
goto errout_skb;
skb = netlink_alloc_skb(sk, alloc_size, nlk->portid, GFP_KERNEL);
if (!skb)
goto errout_skb;
netlink_skb_set_owner_r(skb, sk);
len = cb->dump(skb, cb);
if (len > 0) {
mutex_unlock(nlk->cb_mutex);
if (sk_filter(sk, skb))
kfree_skb(skb);
else
__netlink_sendskb(sk, skb);
return 0;
}
nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE, sizeof(len), NLM_F_MULTI);
if (!nlh)
goto errout_skb;
nl_dump_check_consistent(cb, nlh);
memcpy(nlmsg_data(nlh), &len, sizeof(len));
if (sk_filter(sk, skb))
kfree_skb(skb);
else
__netlink_sendskb(sk, skb);
if (cb->done)
cb->done(cb);
nlk->cb_running = false;
mutex_unlock(nlk->cb_mutex);
module_put(cb->module);
consume_skb(cb->skb);
return 0;
errout_skb:
mutex_unlock(nlk->cb_mutex);
kfree_skb(skb);
return err;
}
int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
const struct nlmsghdr *nlh,
struct netlink_dump_control *control)
{
struct netlink_callback *cb;
struct sock *sk;
struct netlink_sock *nlk;
int ret;
/* Memory mapped dump requests need to be copied to avoid looping
* on the pending state in netlink_mmap_sendmsg() while the CB hold
* a reference to the skb.
*/
if (netlink_skb_is_mmaped(skb)) {
skb = skb_copy(skb, GFP_KERNEL);
if (skb == NULL)
return -ENOBUFS;
} else
atomic_inc(&skb->users);
sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).portid);
if (sk == NULL) {
ret = -ECONNREFUSED;
goto error_free;
}
nlk = nlk_sk(sk);
mutex_lock(nlk->cb_mutex);
/* A dump is in progress... */
if (nlk->cb_running) {
ret = -EBUSY;
goto error_unlock;
}
/* add reference of module which cb->dump belongs to */
if (!try_module_get(control->module)) {
ret = -EPROTONOSUPPORT;
goto error_unlock;
}
cb = &nlk->cb;
memset(cb, 0, sizeof(*cb));
cb->dump = control->dump;
cb->done = control->done;
cb->nlh = nlh;
cb->data = control->data;
cb->module = control->module;
cb->min_dump_alloc = control->min_dump_alloc;
cb->skb = skb;
nlk->cb_running = true;
mutex_unlock(nlk->cb_mutex);
ret = netlink_dump(sk);
sock_put(sk);
if (ret)
return ret;
/* We successfully started a dump, by returning -EINTR we
* signal not to send ACK even if it was requested.
*/
return -EINTR;
error_unlock:
sock_put(sk);
mutex_unlock(nlk->cb_mutex);
error_free:
kfree_skb(skb);
return ret;
}
EXPORT_SYMBOL(__netlink_dump_start);
void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
{
struct sk_buff *skb;
struct nlmsghdr *rep;
struct nlmsgerr *errmsg;
size_t payload = sizeof(*errmsg);
/* error messages get the original request appened */
if (err)
payload += nlmsg_len(nlh);
skb = netlink_alloc_skb(in_skb->sk, nlmsg_total_size(payload),
NETLINK_CB(in_skb).portid, GFP_KERNEL);
if (!skb) {
struct sock *sk;
sk = netlink_lookup(sock_net(in_skb->sk),
in_skb->sk->sk_protocol,
NETLINK_CB(in_skb).portid);
if (sk) {
sk->sk_err = ENOBUFS;
sk->sk_error_report(sk);
sock_put(sk);
}
return;
}
rep = __nlmsg_put(skb, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
NLMSG_ERROR, payload, 0);
errmsg = nlmsg_data(rep);
errmsg->error = err;
memcpy(&errmsg->msg, nlh, err ? nlh->nlmsg_len : sizeof(*nlh));
netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).portid, MSG_DONTWAIT);
}
EXPORT_SYMBOL(netlink_ack);
int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
struct nlmsghdr *))
{
struct nlmsghdr *nlh;
int err;
while (skb->len >= nlmsg_total_size(0)) {
int msglen;
nlh = nlmsg_hdr(skb);
err = 0;
if (nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len)
return 0;
/* Only requests are handled by the kernel */
if (!(nlh->nlmsg_flags & NLM_F_REQUEST))
goto ack;
/* Skip control messages */
if (nlh->nlmsg_type < NLMSG_MIN_TYPE)
goto ack;
err = cb(skb, nlh);
if (err == -EINTR)
goto skip;
ack:
if (nlh->nlmsg_flags & NLM_F_ACK || err)
netlink_ack(skb, nlh, err);
skip:
msglen = NLMSG_ALIGN(nlh->nlmsg_len);
if (msglen > skb->len)
msglen = skb->len;
skb_pull(skb, msglen);
}
return 0;
}
EXPORT_SYMBOL(netlink_rcv_skb);
/**
* nlmsg_notify - send a notification netlink message
* @sk: netlink socket to use
* @skb: notification message
* @portid: destination netlink portid for reports or 0
* @group: destination multicast group or 0
* @report: 1 to report back, 0 to disable
* @flags: allocation flags
*/
int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid,
unsigned int group, int report, gfp_t flags)
{
int err = 0;
if (group) {
int exclude_portid = 0;
if (report) {
atomic_inc(&skb->users);
exclude_portid = portid;
}
/* errors reported via destination sk->sk_err, but propagate
* delivery errors if NETLINK_BROADCAST_ERROR flag is set */
err = nlmsg_multicast(sk, skb, exclude_portid, group, flags);
}
if (report) {
int err2;
err2 = nlmsg_unicast(sk, skb, portid);
if (!err || err == -ESRCH)
err = err2;
}
return err;
}
EXPORT_SYMBOL(nlmsg_notify);
#ifdef CONFIG_PROC_FS
struct nl_seq_iter {
struct seq_net_private p;
int link;
int hash_idx;
};
static struct sock *netlink_seq_socket_idx(struct seq_file *seq, loff_t pos)
{
struct nl_seq_iter *iter = seq->private;
int i, j;
struct sock *s;
loff_t off = 0;
for (i = 0; i < MAX_LINKS; i++) {
struct nl_portid_hash *hash = &nl_table[i].hash;
for (j = 0; j <= hash->mask; j++) {
sk_for_each(s, &hash->table[j]) {
if (sock_net(s) != seq_file_net(seq))
continue;
if (off == pos) {
iter->link = i;
iter->hash_idx = j;
return s;
}
++off;
}
}
}
return NULL;
}
static void *netlink_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(nl_table_lock)
{
read_lock(&nl_table_lock);
return *pos ? netlink_seq_socket_idx(seq, *pos - 1) : SEQ_START_TOKEN;
}
static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct sock *s;
struct nl_seq_iter *iter;
struct net *net;
int i, j;
++*pos;
if (v == SEQ_START_TOKEN)
return netlink_seq_socket_idx(seq, 0);
net = seq_file_net(seq);
iter = seq->private;
s = v;
do {
s = sk_next(s);
} while (s && !nl_table[s->sk_protocol].compare(net, s));
if (s)
return s;
i = iter->link;
j = iter->hash_idx + 1;
do {
struct nl_portid_hash *hash = &nl_table[i].hash;
for (; j <= hash->mask; j++) {
s = sk_head(&hash->table[j]);
while (s && !nl_table[s->sk_protocol].compare(net, s))
s = sk_next(s);
if (s) {
iter->link = i;
iter->hash_idx = j;
return s;
}
}
j = 0;
} while (++i < MAX_LINKS);
return NULL;
}
static void netlink_seq_stop(struct seq_file *seq, void *v)
__releases(nl_table_lock)
{
read_unlock(&nl_table_lock);
}
static int netlink_seq_show(struct seq_file *seq, void *v)
{
if (v == SEQ_START_TOKEN) {
seq_puts(seq,
"sk Eth Pid Groups "
"Rmem Wmem Dump Locks Drops Inode\n");
} else {
struct sock *s = v;
struct netlink_sock *nlk = nlk_sk(s);
seq_printf(seq, "%pK %-3d %-6u %08x %-8d %-8d %d %-8d %-8d %-8lu\n",
s,
s->sk_protocol,
nlk->portid,
nlk->groups ? (u32)nlk->groups[0] : 0,
sk_rmem_alloc_get(s),
sk_wmem_alloc_get(s),
nlk->cb_running,
atomic_read(&s->sk_refcnt),
atomic_read(&s->sk_drops),
sock_i_ino(s)
);
}
return 0;
}
static const struct seq_operations netlink_seq_ops = {
.start = netlink_seq_start,
.next = netlink_seq_next,
.stop = netlink_seq_stop,
.show = netlink_seq_show,
};
static int netlink_seq_open(struct inode *inode, struct file *file)
{
return seq_open_net(inode, file, &netlink_seq_ops,
sizeof(struct nl_seq_iter));
}
static const struct file_operations netlink_seq_fops = {
.owner = THIS_MODULE,
.open = netlink_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_net,
};
#endif
int netlink_register_notifier(struct notifier_block *nb)
{
return atomic_notifier_chain_register(&netlink_chain, nb);
}
EXPORT_SYMBOL(netlink_register_notifier);
int netlink_unregister_notifier(struct notifier_block *nb)
{
return atomic_notifier_chain_unregister(&netlink_chain, nb);
}
EXPORT_SYMBOL(netlink_unregister_notifier);
static const struct proto_ops netlink_ops = {
.family = PF_NETLINK,
.owner = THIS_MODULE,
.release = netlink_release,
.bind = netlink_bind,
.connect = netlink_connect,
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.getname = netlink_getname,
.poll = netlink_poll,
.ioctl = sock_no_ioctl,
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
.setsockopt = netlink_setsockopt,
.getsockopt = netlink_getsockopt,
.sendmsg = netlink_sendmsg,
.recvmsg = netlink_recvmsg,
.mmap = netlink_mmap,
.sendpage = sock_no_sendpage,
};
static const struct net_proto_family netlink_family_ops = {
.family = PF_NETLINK,
.create = netlink_create,
.owner = THIS_MODULE, /* for consistency 8) */
};
static int __net_init netlink_net_init(struct net *net)
{
#ifdef CONFIG_PROC_FS
if (!proc_create("netlink", 0, net->proc_net, &netlink_seq_fops))
return -ENOMEM;
#endif
return 0;
}
static void __net_exit netlink_net_exit(struct net *net)
{
#ifdef CONFIG_PROC_FS
remove_proc_entry("netlink", net->proc_net);
#endif
}
static void __init netlink_add_usersock_entry(void)
{
struct listeners *listeners;
int groups = 32;
listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
if (!listeners)
panic("netlink_add_usersock_entry: Cannot allocate listeners\n");
netlink_table_grab();
nl_table[NETLINK_USERSOCK].groups = groups;
rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners);
nl_table[NETLINK_USERSOCK].module = THIS_MODULE;
nl_table[NETLINK_USERSOCK].registered = 1;
nl_table[NETLINK_USERSOCK].flags = NL_CFG_F_NONROOT_SEND;
netlink_table_ungrab();
}
static struct pernet_operations __net_initdata netlink_net_ops = {
.init = netlink_net_init,
.exit = netlink_net_exit,
};
static int __init netlink_proto_init(void)
{
int i;
unsigned long limit;
unsigned int order;
int err = proto_register(&netlink_proto, 0);
if (err != 0)
goto out;
BUILD_BUG_ON(sizeof(struct netlink_skb_parms) > FIELD_SIZEOF(struct sk_buff, cb));
nl_table = kcalloc(MAX_LINKS, sizeof(*nl_table), GFP_KERNEL);
if (!nl_table)
goto panic;
if (totalram_pages >= (128 * 1024))
limit = totalram_pages >> (21 - PAGE_SHIFT);
else
limit = totalram_pages >> (23 - PAGE_SHIFT);
order = get_bitmask_order(limit) - 1 + PAGE_SHIFT;
limit = (1UL << order) / sizeof(struct hlist_head);
order = get_bitmask_order(min(limit, (unsigned long)UINT_MAX)) - 1;
for (i = 0; i < MAX_LINKS; i++) {
struct nl_portid_hash *hash = &nl_table[i].hash;
hash->table = nl_portid_hash_zalloc(1 * sizeof(*hash->table));
if (!hash->table) {
while (i-- > 0)
nl_portid_hash_free(nl_table[i].hash.table,
1 * sizeof(*hash->table));
kfree(nl_table);
goto panic;
}
hash->max_shift = order;
hash->shift = 0;
hash->mask = 0;
hash->rehash_time = jiffies;
nl_table[i].compare = netlink_compare;
}
INIT_LIST_HEAD(&netlink_tap_all);
netlink_add_usersock_entry();
sock_register(&netlink_family_ops);
register_pernet_subsys(&netlink_net_ops);
/* The netlink device handler may be needed early. */
rtnetlink_init();
out:
return err;
panic:
panic("netlink_init: Cannot allocate nl_table\n");
}
core_initcall(netlink_proto_init);
| ./CrossVul/dataset_final_sorted/CWE-20/c/good_5845_21 |
crossvul-cpp_data_good_5737_0 | /* -*- linux-c -*-
* sysctl_net.c: sysctl interface to net subsystem.
*
* Begun April 1, 1996, Mike Shaver.
* Added /proc/sys/net directories for each protocol family. [MS]
*
* Revision 1.2 1996/05/08 20:24:40 shaver
* Added bits for NET_BRIDGE and the NET_IPV4_ARP stuff and
* NET_IPV4_IP_FORWARD.
*
*
*/
#include <linux/mm.h>
#include <linux/export.h>
#include <linux/sysctl.h>
#include <linux/nsproxy.h>
#include <net/sock.h>
#ifdef CONFIG_INET
#include <net/ip.h>
#endif
#ifdef CONFIG_NET
#include <linux/if_ether.h>
#endif
static struct ctl_table_set *
net_ctl_header_lookup(struct ctl_table_root *root, struct nsproxy *namespaces)
{
return &namespaces->net_ns->sysctls;
}
static int is_seen(struct ctl_table_set *set)
{
return ¤t->nsproxy->net_ns->sysctls == set;
}
/* Return standard mode bits for table entry. */
static int net_ctl_permissions(struct ctl_table_header *head,
struct ctl_table *table)
{
struct net *net = container_of(head->set, struct net, sysctls);
kuid_t root_uid = make_kuid(net->user_ns, 0);
kgid_t root_gid = make_kgid(net->user_ns, 0);
/* Allow network administrator to have same access as root. */
if (ns_capable(net->user_ns, CAP_NET_ADMIN) ||
uid_eq(root_uid, current_euid())) {
int mode = (table->mode >> 6) & 7;
return (mode << 6) | (mode << 3) | mode;
}
/* Allow netns root group to have the same access as the root group */
if (in_egroup_p(root_gid)) {
int mode = (table->mode >> 3) & 7;
return (mode << 3) | mode;
}
return table->mode;
}
static struct ctl_table_root net_sysctl_root = {
.lookup = net_ctl_header_lookup,
.permissions = net_ctl_permissions,
};
static int __net_init sysctl_net_init(struct net *net)
{
setup_sysctl_set(&net->sysctls, &net_sysctl_root, is_seen);
return 0;
}
static void __net_exit sysctl_net_exit(struct net *net)
{
retire_sysctl_set(&net->sysctls);
}
static struct pernet_operations sysctl_pernet_ops = {
.init = sysctl_net_init,
.exit = sysctl_net_exit,
};
static struct ctl_table_header *net_header;
__init int net_sysctl_init(void)
{
static struct ctl_table empty[1];
int ret = -ENOMEM;
/* Avoid limitations in the sysctl implementation by
* registering "/proc/sys/net" as an empty directory not in a
* network namespace.
*/
net_header = register_sysctl("net", empty);
if (!net_header)
goto out;
ret = register_pernet_subsys(&sysctl_pernet_ops);
if (ret)
goto out;
register_sysctl_root(&net_sysctl_root);
out:
return ret;
}
struct ctl_table_header *register_net_sysctl(struct net *net,
const char *path, struct ctl_table *table)
{
return __register_sysctl_table(&net->sysctls, path, table);
}
EXPORT_SYMBOL_GPL(register_net_sysctl);
void unregister_net_sysctl_table(struct ctl_table_header *header)
{
unregister_sysctl_table(header);
}
EXPORT_SYMBOL_GPL(unregister_net_sysctl_table);
| ./CrossVul/dataset_final_sorted/CWE-20/c/good_5737_0 |
crossvul-cpp_data_good_366_0 | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% PPPP IIIII CCCC TTTTT %
% P P I C T %
% PPPP I C T %
% P I C T %
% P IIIII CCCC T %
% %
% %
% Read/Write Apple Macintosh QuickDraw/PICT Format %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colormap-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/constitute.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/profile.h"
#include "MagickCore/resource_.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/static.h"
#include "MagickCore/string_.h"
#include "MagickCore/module.h"
#include "MagickCore/transform.h"
#include "MagickCore/utility.h"
/*
ImageMagick Macintosh PICT Methods.
*/
typedef struct _PICTCode
{
const char
*name;
ssize_t
length;
const char
*description;
} PICTCode;
typedef struct _PICTPixmap
{
short
version,
pack_type;
size_t
pack_size,
horizontal_resolution,
vertical_resolution;
short
pixel_type,
bits_per_pixel,
component_count,
component_size;
size_t
plane_bytes,
table,
reserved;
} PICTPixmap;
typedef struct _PICTRectangle
{
short
top,
left,
bottom,
right;
} PICTRectangle;
static const PICTCode
codes[] =
{
/* 0x00 */ { "NOP", 0, "nop" },
/* 0x01 */ { "Clip", 0, "clip" },
/* 0x02 */ { "BkPat", 8, "background pattern" },
/* 0x03 */ { "TxFont", 2, "text font (word)" },
/* 0x04 */ { "TxFace", 1, "text face (byte)" },
/* 0x05 */ { "TxMode", 2, "text mode (word)" },
/* 0x06 */ { "SpExtra", 4, "space extra (fixed point)" },
/* 0x07 */ { "PnSize", 4, "pen size (point)" },
/* 0x08 */ { "PnMode", 2, "pen mode (word)" },
/* 0x09 */ { "PnPat", 8, "pen pattern" },
/* 0x0a */ { "FillPat", 8, "fill pattern" },
/* 0x0b */ { "OvSize", 4, "oval size (point)" },
/* 0x0c */ { "Origin", 4, "dh, dv (word)" },
/* 0x0d */ { "TxSize", 2, "text size (word)" },
/* 0x0e */ { "FgColor", 4, "foreground color (ssize_tword)" },
/* 0x0f */ { "BkColor", 4, "background color (ssize_tword)" },
/* 0x10 */ { "TxRatio", 8, "numerator (point), denominator (point)" },
/* 0x11 */ { "Version", 1, "version (byte)" },
/* 0x12 */ { "BkPixPat", 0, "color background pattern" },
/* 0x13 */ { "PnPixPat", 0, "color pen pattern" },
/* 0x14 */ { "FillPixPat", 0, "color fill pattern" },
/* 0x15 */ { "PnLocHFrac", 2, "fractional pen position" },
/* 0x16 */ { "ChExtra", 2, "extra for each character" },
/* 0x17 */ { "reserved", 0, "reserved for Apple use" },
/* 0x18 */ { "reserved", 0, "reserved for Apple use" },
/* 0x19 */ { "reserved", 0, "reserved for Apple use" },
/* 0x1a */ { "RGBFgCol", 6, "RGB foreColor" },
/* 0x1b */ { "RGBBkCol", 6, "RGB backColor" },
/* 0x1c */ { "HiliteMode", 0, "hilite mode flag" },
/* 0x1d */ { "HiliteColor", 6, "RGB hilite color" },
/* 0x1e */ { "DefHilite", 0, "Use default hilite color" },
/* 0x1f */ { "OpColor", 6, "RGB OpColor for arithmetic modes" },
/* 0x20 */ { "Line", 8, "pnLoc (point), newPt (point)" },
/* 0x21 */ { "LineFrom", 4, "newPt (point)" },
/* 0x22 */ { "ShortLine", 6, "pnLoc (point, dh, dv (-128 .. 127))" },
/* 0x23 */ { "ShortLineFrom", 2, "dh, dv (-128 .. 127)" },
/* 0x24 */ { "reserved", -1, "reserved for Apple use" },
/* 0x25 */ { "reserved", -1, "reserved for Apple use" },
/* 0x26 */ { "reserved", -1, "reserved for Apple use" },
/* 0x27 */ { "reserved", -1, "reserved for Apple use" },
/* 0x28 */ { "LongText", 0, "txLoc (point), count (0..255), text" },
/* 0x29 */ { "DHText", 0, "dh (0..255), count (0..255), text" },
/* 0x2a */ { "DVText", 0, "dv (0..255), count (0..255), text" },
/* 0x2b */ { "DHDVText", 0, "dh, dv (0..255), count (0..255), text" },
/* 0x2c */ { "reserved", -1, "reserved for Apple use" },
/* 0x2d */ { "reserved", -1, "reserved for Apple use" },
/* 0x2e */ { "reserved", -1, "reserved for Apple use" },
/* 0x2f */ { "reserved", -1, "reserved for Apple use" },
/* 0x30 */ { "frameRect", 8, "rect" },
/* 0x31 */ { "paintRect", 8, "rect" },
/* 0x32 */ { "eraseRect", 8, "rect" },
/* 0x33 */ { "invertRect", 8, "rect" },
/* 0x34 */ { "fillRect", 8, "rect" },
/* 0x35 */ { "reserved", 8, "reserved for Apple use" },
/* 0x36 */ { "reserved", 8, "reserved for Apple use" },
/* 0x37 */ { "reserved", 8, "reserved for Apple use" },
/* 0x38 */ { "frameSameRect", 0, "rect" },
/* 0x39 */ { "paintSameRect", 0, "rect" },
/* 0x3a */ { "eraseSameRect", 0, "rect" },
/* 0x3b */ { "invertSameRect", 0, "rect" },
/* 0x3c */ { "fillSameRect", 0, "rect" },
/* 0x3d */ { "reserved", 0, "reserved for Apple use" },
/* 0x3e */ { "reserved", 0, "reserved for Apple use" },
/* 0x3f */ { "reserved", 0, "reserved for Apple use" },
/* 0x40 */ { "frameRRect", 8, "rect" },
/* 0x41 */ { "paintRRect", 8, "rect" },
/* 0x42 */ { "eraseRRect", 8, "rect" },
/* 0x43 */ { "invertRRect", 8, "rect" },
/* 0x44 */ { "fillRRrect", 8, "rect" },
/* 0x45 */ { "reserved", 8, "reserved for Apple use" },
/* 0x46 */ { "reserved", 8, "reserved for Apple use" },
/* 0x47 */ { "reserved", 8, "reserved for Apple use" },
/* 0x48 */ { "frameSameRRect", 0, "rect" },
/* 0x49 */ { "paintSameRRect", 0, "rect" },
/* 0x4a */ { "eraseSameRRect", 0, "rect" },
/* 0x4b */ { "invertSameRRect", 0, "rect" },
/* 0x4c */ { "fillSameRRect", 0, "rect" },
/* 0x4d */ { "reserved", 0, "reserved for Apple use" },
/* 0x4e */ { "reserved", 0, "reserved for Apple use" },
/* 0x4f */ { "reserved", 0, "reserved for Apple use" },
/* 0x50 */ { "frameOval", 8, "rect" },
/* 0x51 */ { "paintOval", 8, "rect" },
/* 0x52 */ { "eraseOval", 8, "rect" },
/* 0x53 */ { "invertOval", 8, "rect" },
/* 0x54 */ { "fillOval", 8, "rect" },
/* 0x55 */ { "reserved", 8, "reserved for Apple use" },
/* 0x56 */ { "reserved", 8, "reserved for Apple use" },
/* 0x57 */ { "reserved", 8, "reserved for Apple use" },
/* 0x58 */ { "frameSameOval", 0, "rect" },
/* 0x59 */ { "paintSameOval", 0, "rect" },
/* 0x5a */ { "eraseSameOval", 0, "rect" },
/* 0x5b */ { "invertSameOval", 0, "rect" },
/* 0x5c */ { "fillSameOval", 0, "rect" },
/* 0x5d */ { "reserved", 0, "reserved for Apple use" },
/* 0x5e */ { "reserved", 0, "reserved for Apple use" },
/* 0x5f */ { "reserved", 0, "reserved for Apple use" },
/* 0x60 */ { "frameArc", 12, "rect, startAngle, arcAngle" },
/* 0x61 */ { "paintArc", 12, "rect, startAngle, arcAngle" },
/* 0x62 */ { "eraseArc", 12, "rect, startAngle, arcAngle" },
/* 0x63 */ { "invertArc", 12, "rect, startAngle, arcAngle" },
/* 0x64 */ { "fillArc", 12, "rect, startAngle, arcAngle" },
/* 0x65 */ { "reserved", 12, "reserved for Apple use" },
/* 0x66 */ { "reserved", 12, "reserved for Apple use" },
/* 0x67 */ { "reserved", 12, "reserved for Apple use" },
/* 0x68 */ { "frameSameArc", 4, "rect, startAngle, arcAngle" },
/* 0x69 */ { "paintSameArc", 4, "rect, startAngle, arcAngle" },
/* 0x6a */ { "eraseSameArc", 4, "rect, startAngle, arcAngle" },
/* 0x6b */ { "invertSameArc", 4, "rect, startAngle, arcAngle" },
/* 0x6c */ { "fillSameArc", 4, "rect, startAngle, arcAngle" },
/* 0x6d */ { "reserved", 4, "reserved for Apple use" },
/* 0x6e */ { "reserved", 4, "reserved for Apple use" },
/* 0x6f */ { "reserved", 4, "reserved for Apple use" },
/* 0x70 */ { "framePoly", 0, "poly" },
/* 0x71 */ { "paintPoly", 0, "poly" },
/* 0x72 */ { "erasePoly", 0, "poly" },
/* 0x73 */ { "invertPoly", 0, "poly" },
/* 0x74 */ { "fillPoly", 0, "poly" },
/* 0x75 */ { "reserved", 0, "reserved for Apple use" },
/* 0x76 */ { "reserved", 0, "reserved for Apple use" },
/* 0x77 */ { "reserved", 0, "reserved for Apple use" },
/* 0x78 */ { "frameSamePoly", 0, "poly (NYI)" },
/* 0x79 */ { "paintSamePoly", 0, "poly (NYI)" },
/* 0x7a */ { "eraseSamePoly", 0, "poly (NYI)" },
/* 0x7b */ { "invertSamePoly", 0, "poly (NYI)" },
/* 0x7c */ { "fillSamePoly", 0, "poly (NYI)" },
/* 0x7d */ { "reserved", 0, "reserved for Apple use" },
/* 0x7e */ { "reserved", 0, "reserved for Apple use" },
/* 0x7f */ { "reserved", 0, "reserved for Apple use" },
/* 0x80 */ { "frameRgn", 0, "region" },
/* 0x81 */ { "paintRgn", 0, "region" },
/* 0x82 */ { "eraseRgn", 0, "region" },
/* 0x83 */ { "invertRgn", 0, "region" },
/* 0x84 */ { "fillRgn", 0, "region" },
/* 0x85 */ { "reserved", 0, "reserved for Apple use" },
/* 0x86 */ { "reserved", 0, "reserved for Apple use" },
/* 0x87 */ { "reserved", 0, "reserved for Apple use" },
/* 0x88 */ { "frameSameRgn", 0, "region (NYI)" },
/* 0x89 */ { "paintSameRgn", 0, "region (NYI)" },
/* 0x8a */ { "eraseSameRgn", 0, "region (NYI)" },
/* 0x8b */ { "invertSameRgn", 0, "region (NYI)" },
/* 0x8c */ { "fillSameRgn", 0, "region (NYI)" },
/* 0x8d */ { "reserved", 0, "reserved for Apple use" },
/* 0x8e */ { "reserved", 0, "reserved for Apple use" },
/* 0x8f */ { "reserved", 0, "reserved for Apple use" },
/* 0x90 */ { "BitsRect", 0, "copybits, rect clipped" },
/* 0x91 */ { "BitsRgn", 0, "copybits, rgn clipped" },
/* 0x92 */ { "reserved", -1, "reserved for Apple use" },
/* 0x93 */ { "reserved", -1, "reserved for Apple use" },
/* 0x94 */ { "reserved", -1, "reserved for Apple use" },
/* 0x95 */ { "reserved", -1, "reserved for Apple use" },
/* 0x96 */ { "reserved", -1, "reserved for Apple use" },
/* 0x97 */ { "reserved", -1, "reserved for Apple use" },
/* 0x98 */ { "PackBitsRect", 0, "packed copybits, rect clipped" },
/* 0x99 */ { "PackBitsRgn", 0, "packed copybits, rgn clipped" },
/* 0x9a */ { "DirectBitsRect", 0, "PixMap, srcRect, dstRect, mode, PixData" },
/* 0x9b */ { "DirectBitsRgn", 0, "PixMap, srcRect, dstRect, mode, maskRgn, PixData" },
/* 0x9c */ { "reserved", -1, "reserved for Apple use" },
/* 0x9d */ { "reserved", -1, "reserved for Apple use" },
/* 0x9e */ { "reserved", -1, "reserved for Apple use" },
/* 0x9f */ { "reserved", -1, "reserved for Apple use" },
/* 0xa0 */ { "ShortComment", 2, "kind (word)" },
/* 0xa1 */ { "LongComment", 0, "kind (word), size (word), data" }
};
/*
Forward declarations.
*/
static MagickBooleanType
WritePICTImage(const ImageInfo *,Image *,ExceptionInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e c o d e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DecodeImage decompresses an image via Macintosh pack bits decoding for
% Macintosh PICT images.
%
% The format of the DecodeImage method is:
%
% unsigned char *DecodeImage(Image *blob,Image *image,
% size_t bytes_per_line,const int bits_per_pixel,
% unsigned size_t extent)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o blob,image: the address of a structure of type Image.
%
% o bytes_per_line: This integer identifies the number of bytes in a
% scanline.
%
% o bits_per_pixel: the number of bits in a pixel.
%
% o extent: the number of pixels allocated.
%
*/
static unsigned char *ExpandBuffer(unsigned char *pixels,
MagickSizeType *bytes_per_line,const unsigned int bits_per_pixel)
{
register ssize_t
i;
register unsigned char
*p,
*q;
static unsigned char
scanline[8*256];
p=pixels;
q=scanline;
switch (bits_per_pixel)
{
case 8:
case 16:
case 32:
return(pixels);
case 4:
{
for (i=0; i < (ssize_t) *bytes_per_line; i++)
{
*q++=(*p >> 4) & 0xff;
*q++=(*p & 15);
p++;
}
*bytes_per_line*=2;
break;
}
case 2:
{
for (i=0; i < (ssize_t) *bytes_per_line; i++)
{
*q++=(*p >> 6) & 0x03;
*q++=(*p >> 4) & 0x03;
*q++=(*p >> 2) & 0x03;
*q++=(*p & 3);
p++;
}
*bytes_per_line*=4;
break;
}
case 1:
{
for (i=0; i < (ssize_t) *bytes_per_line; i++)
{
*q++=(*p >> 7) & 0x01;
*q++=(*p >> 6) & 0x01;
*q++=(*p >> 5) & 0x01;
*q++=(*p >> 4) & 0x01;
*q++=(*p >> 3) & 0x01;
*q++=(*p >> 2) & 0x01;
*q++=(*p >> 1) & 0x01;
*q++=(*p & 0x01);
p++;
}
*bytes_per_line*=8;
break;
}
default:
break;
}
return(scanline);
}
static unsigned char *DecodeImage(Image *blob,Image *image,
size_t bytes_per_line,const unsigned int bits_per_pixel,size_t *extent)
{
MagickBooleanType
status;
MagickSizeType
number_pixels;
register ssize_t
i;
register unsigned char
*p,
*q;
size_t
bytes_per_pixel,
length,
row_bytes,
scanline_length,
width;
ssize_t
count,
j,
y;
unsigned char
*pixels,
*scanline;
/*
Determine pixel buffer size.
*/
if (bits_per_pixel <= 8)
bytes_per_line&=0x7fff;
width=image->columns;
bytes_per_pixel=1;
if (bits_per_pixel == 16)
{
bytes_per_pixel=2;
width*=2;
}
else
if (bits_per_pixel == 32)
width*=image->alpha_trait ? 4 : 3;
if (bytes_per_line == 0)
bytes_per_line=width;
row_bytes=(size_t) (image->columns | 0x8000);
if (image->storage_class == DirectClass)
row_bytes=(size_t) ((4*image->columns) | 0x8000);
/*
Allocate pixel and scanline buffer.
*/
pixels=(unsigned char *) AcquireQuantumMemory(image->rows,row_bytes*
sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
return((unsigned char *) NULL);
*extent=row_bytes*image->rows*sizeof(*pixels);
(void) memset(pixels,0,*extent);
scanline=(unsigned char *) AcquireQuantumMemory(row_bytes,2*
sizeof(*scanline));
if (scanline == (unsigned char *) NULL)
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return((unsigned char *) NULL);
}
(void) memset(scanline,0,2*row_bytes*sizeof(*scanline));
status=MagickTrue;
if (bytes_per_line < 8)
{
/*
Pixels are already uncompressed.
*/
for (y=0; y < (ssize_t) image->rows; y++)
{
q=pixels+y*width*GetPixelChannels(image);
number_pixels=bytes_per_line;
count=ReadBlob(blob,(size_t) number_pixels,scanline);
if (count != (ssize_t) number_pixels)
{
status=MagickFalse;
break;
}
p=ExpandBuffer(scanline,&number_pixels,bits_per_pixel);
if ((q+number_pixels) > (pixels+(*extent)))
{
status=MagickFalse;
break;
}
(void) memcpy(q,p,(size_t) number_pixels);
}
scanline=(unsigned char *) RelinquishMagickMemory(scanline);
if (status == MagickFalse)
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(pixels);
}
/*
Uncompress RLE pixels into uncompressed pixel buffer.
*/
for (y=0; y < (ssize_t) image->rows; y++)
{
q=pixels+y*width;
if (bytes_per_line > 200)
scanline_length=ReadBlobMSBShort(blob);
else
scanline_length=(size_t) ReadBlobByte(blob);
if ((scanline_length >= row_bytes) || (scanline_length == 0))
{
status=MagickFalse;
break;
}
count=ReadBlob(blob,scanline_length,scanline);
if (count != (ssize_t) scanline_length)
{
status=MagickFalse;
break;
}
for (j=0; j < (ssize_t) scanline_length; )
if ((scanline[j] & 0x80) == 0)
{
length=(size_t) ((scanline[j] & 0xff)+1);
number_pixels=length*bytes_per_pixel;
p=ExpandBuffer(scanline+j+1,&number_pixels,bits_per_pixel);
if ((q-pixels+number_pixels) <= *extent)
(void) memcpy(q,p,(size_t) number_pixels);
q+=number_pixels;
j+=(ssize_t) (length*bytes_per_pixel+1);
}
else
{
length=(size_t) (((scanline[j] ^ 0xff) & 0xff)+2);
number_pixels=bytes_per_pixel;
p=ExpandBuffer(scanline+j+1,&number_pixels,bits_per_pixel);
for (i=0; i < (ssize_t) length; i++)
{
if ((q-pixels+number_pixels) <= *extent)
(void) memcpy(q,p,(size_t) number_pixels);
q+=number_pixels;
}
j+=(ssize_t) bytes_per_pixel+1;
}
}
scanline=(unsigned char *) RelinquishMagickMemory(scanline);
if (status == MagickFalse)
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E n c o d e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EncodeImage compresses an image via Macintosh pack bits encoding
% for Macintosh PICT images.
%
% The format of the EncodeImage method is:
%
% size_t EncodeImage(Image *image,const unsigned char *scanline,
% const size_t bytes_per_line,unsigned char *pixels)
%
% A description of each parameter follows:
%
% o image: the address of a structure of type Image.
%
% o scanline: A pointer to an array of characters to pack.
%
% o bytes_per_line: the number of bytes in a scanline.
%
% o pixels: A pointer to an array of characters where the packed
% characters are stored.
%
*/
static size_t EncodeImage(Image *image,const unsigned char *scanline,
const size_t bytes_per_line,unsigned char *pixels)
{
#define MaxCount 128
#define MaxPackbitsRunlength 128
register const unsigned char
*p;
register ssize_t
i;
register unsigned char
*q;
size_t
length;
ssize_t
count,
repeat_count,
runlength;
unsigned char
index;
/*
Pack scanline.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(scanline != (unsigned char *) NULL);
assert(pixels != (unsigned char *) NULL);
count=0;
runlength=0;
p=scanline+(bytes_per_line-1);
q=pixels;
index=(*p);
for (i=(ssize_t) bytes_per_line-1; i >= 0; i--)
{
if (index == *p)
runlength++;
else
{
if (runlength < 3)
while (runlength > 0)
{
*q++=(unsigned char) index;
runlength--;
count++;
if (count == MaxCount)
{
*q++=(unsigned char) (MaxCount-1);
count-=MaxCount;
}
}
else
{
if (count > 0)
*q++=(unsigned char) (count-1);
count=0;
while (runlength > 0)
{
repeat_count=runlength;
if (repeat_count > MaxPackbitsRunlength)
repeat_count=MaxPackbitsRunlength;
*q++=(unsigned char) index;
*q++=(unsigned char) (257-repeat_count);
runlength-=repeat_count;
}
}
runlength=1;
}
index=(*p);
p--;
}
if (runlength < 3)
while (runlength > 0)
{
*q++=(unsigned char) index;
runlength--;
count++;
if (count == MaxCount)
{
*q++=(unsigned char) (MaxCount-1);
count-=MaxCount;
}
}
else
{
if (count > 0)
*q++=(unsigned char) (count-1);
count=0;
while (runlength > 0)
{
repeat_count=runlength;
if (repeat_count > MaxPackbitsRunlength)
repeat_count=MaxPackbitsRunlength;
*q++=(unsigned char) index;
*q++=(unsigned char) (257-repeat_count);
runlength-=repeat_count;
}
}
if (count > 0)
*q++=(unsigned char) (count-1);
/*
Write the number of and the packed length.
*/
length=(size_t) (q-pixels);
if (bytes_per_line > 200)
{
(void) WriteBlobMSBShort(image,(unsigned short) length);
length+=2;
}
else
{
(void) WriteBlobByte(image,(unsigned char) length);
length++;
}
while (q != pixels)
{
q--;
(void) WriteBlobByte(image,*q);
}
return(length);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s P I C T %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsPICT()() returns MagickTrue if the image format type, identified by the
% magick string, is PICT.
%
% The format of the ReadPICTImage method is:
%
% MagickBooleanType IsPICT(const unsigned char *magick,const size_t length)
%
% A description of each parameter follows:
%
% o magick: compare image format pattern against these bytes.
%
% o length: Specifies the length of the magick string.
%
*/
static MagickBooleanType IsPICT(const unsigned char *magick,const size_t length)
{
if (length < 12)
return(MagickFalse);
/*
Embedded OLE2 macintosh have "PICT" instead of 512 platform header.
*/
if (memcmp(magick,"PICT",4) == 0)
return(MagickTrue);
if (length < 528)
return(MagickFalse);
if (memcmp(magick+522,"\000\021\002\377\014\000",6) == 0)
return(MagickTrue);
return(MagickFalse);
}
#if !defined(macintosh)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d P I C T I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPICTImage() reads an Apple Macintosh QuickDraw/PICT image file
% and returns it. It allocates the memory necessary for the new Image
% structure and returns a pointer to the new image.
%
% The format of the ReadPICTImage method is:
%
% Image *ReadPICTImage(const ImageInfo *image_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType ReadPixmap(Image *image,PICTPixmap *pixmap)
{
pixmap->version=(short) ReadBlobMSBShort(image);
pixmap->pack_type=(short) ReadBlobMSBShort(image);
pixmap->pack_size=ReadBlobMSBLong(image);
pixmap->horizontal_resolution=1UL*ReadBlobMSBShort(image);
(void) ReadBlobMSBShort(image);
pixmap->vertical_resolution=1UL*ReadBlobMSBShort(image);
(void) ReadBlobMSBShort(image);
pixmap->pixel_type=(short) ReadBlobMSBShort(image);
pixmap->bits_per_pixel=(short) ReadBlobMSBShort(image);
pixmap->component_count=(short) ReadBlobMSBShort(image);
pixmap->component_size=(short) ReadBlobMSBShort(image);
pixmap->plane_bytes=ReadBlobMSBLong(image);
pixmap->table=ReadBlobMSBLong(image);
pixmap->reserved=ReadBlobMSBLong(image);
if ((EOFBlob(image) != MagickFalse) || (pixmap->bits_per_pixel <= 0) ||
(pixmap->bits_per_pixel > 32) || (pixmap->component_count <= 0) ||
(pixmap->component_count > 4) || (pixmap->component_size <= 0))
return(MagickFalse);
return(MagickTrue);
}
static MagickBooleanType ReadRectangle(Image *image,PICTRectangle *rectangle)
{
rectangle->top=(short) ReadBlobMSBShort(image);
rectangle->left=(short) ReadBlobMSBShort(image);
rectangle->bottom=(short) ReadBlobMSBShort(image);
rectangle->right=(short) ReadBlobMSBShort(image);
if ((EOFBlob(image) != MagickFalse) ||
((rectangle->bottom-rectangle->top) <= 0) ||
((rectangle->right-rectangle->left) <= 0))
return(MagickFalse);
return(MagickTrue);
}
static Image *ReadPICTImage(const ImageInfo *image_info,
ExceptionInfo *exception)
{
#define ThrowPICTException(exception,message) \
{ \
if (tile_image != (Image *) NULL) \
tile_image=DestroyImage(tile_image); \
if (read_info != (ImageInfo *) NULL) \
read_info=DestroyImageInfo(read_info); \
ThrowReaderException((exception),(message)); \
}
char
geometry[MagickPathExtent],
header_ole[4];
Image
*image,
*tile_image;
ImageInfo
*read_info;
int
c,
code;
MagickBooleanType
jpeg,
status;
PICTRectangle
frame;
PICTPixmap
pixmap;
Quantum
index;
register Quantum
*q;
register ssize_t
i,
x;
size_t
extent,
length;
ssize_t
count,
flags,
j,
version,
y;
StringInfo
*profile;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImage(image_info,exception);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Read PICT header.
*/
read_info=(ImageInfo *) NULL;
tile_image=(Image *) NULL;
pixmap.bits_per_pixel=0;
pixmap.component_count=0;
/*
Skip header : 512 for standard PICT and 4, ie "PICT" for OLE2.
*/
header_ole[0]=ReadBlobByte(image);
header_ole[1]=ReadBlobByte(image);
header_ole[2]=ReadBlobByte(image);
header_ole[3]=ReadBlobByte(image);
if (!((header_ole[0] == 0x50) && (header_ole[1] == 0x49) &&
(header_ole[2] == 0x43) && (header_ole[3] == 0x54 )))
for (i=0; i < 508; i++)
if (ReadBlobByte(image) == EOF)
break;
(void) ReadBlobMSBShort(image); /* skip picture size */
if (ReadRectangle(image,&frame) == MagickFalse)
ThrowPICTException(CorruptImageError,"ImproperImageHeader");
while ((c=ReadBlobByte(image)) == 0) ;
if (c != 0x11)
ThrowPICTException(CorruptImageError,"ImproperImageHeader");
version=(ssize_t) ReadBlobByte(image);
if (version == 2)
{
c=ReadBlobByte(image);
if (c != 0xff)
ThrowPICTException(CorruptImageError,"ImproperImageHeader");
}
else
if (version != 1)
ThrowPICTException(CorruptImageError,"ImproperImageHeader");
if ((frame.left < 0) || (frame.right < 0) || (frame.top < 0) ||
(frame.bottom < 0) || (frame.left >= frame.right) ||
(frame.top >= frame.bottom))
ThrowPICTException(CorruptImageError,"ImproperImageHeader");
/*
Create black canvas.
*/
flags=0;
image->depth=8;
image->columns=(size_t) (frame.right-frame.left);
image->rows=(size_t) (frame.bottom-frame.top);
image->resolution.x=DefaultResolution;
image->resolution.y=DefaultResolution;
image->units=UndefinedResolution;
if ((image_info->ping != MagickFalse) && (image_info->number_scenes != 0))
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
{
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status != MagickFalse)
status=ResetImagePixels(image,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
/*
Interpret PICT opcodes.
*/
jpeg=MagickFalse;
for (code=0; EOFBlob(image) == MagickFalse; )
{
if ((image_info->ping != MagickFalse) && (image_info->number_scenes != 0))
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
if ((version == 1) || ((TellBlob(image) % 2) != 0))
code=ReadBlobByte(image);
if (version == 2)
code=ReadBlobMSBSignedShort(image);
if (code < 0)
break;
if (code == 0)
continue;
if (code > 0xa1)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),"%04X:",code);
}
else
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" %04X %s: %s",code,codes[code].name,codes[code].description);
switch (code)
{
case 0x01:
{
/*
Clipping rectangle.
*/
length=ReadBlobMSBShort(image);
if (length > GetBlobSize(image))
ThrowPICTException(CorruptImageError,
"InsufficientImageDataInFile");
if (length != 0x000a)
{
for (i=0; i < (ssize_t) (length-2); i++)
if (ReadBlobByte(image) == EOF)
break;
break;
}
if (ReadRectangle(image,&frame) == MagickFalse)
ThrowPICTException(CorruptImageError,"ImproperImageHeader");
if (((frame.left & 0x8000) != 0) || ((frame.top & 0x8000) != 0))
break;
image->columns=(size_t) (frame.right-frame.left);
image->rows=(size_t) (frame.bottom-frame.top);
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status != MagickFalse)
status=ResetImagePixels(image,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
break;
}
case 0x12:
case 0x13:
case 0x14:
{
ssize_t
pattern;
size_t
height,
width;
/*
Skip pattern definition.
*/
pattern=(ssize_t) ReadBlobMSBShort(image);
for (i=0; i < 8; i++)
if (ReadBlobByte(image) == EOF)
break;
if (pattern == 2)
{
for (i=0; i < 5; i++)
if (ReadBlobByte(image) == EOF)
break;
break;
}
if (pattern != 1)
ThrowPICTException(CorruptImageError,"UnknownPatternType");
length=ReadBlobMSBShort(image);
if (length > GetBlobSize(image))
ThrowPICTException(CorruptImageError,
"InsufficientImageDataInFile");
if (ReadRectangle(image,&frame) == MagickFalse)
ThrowPICTException(CorruptImageError,"ImproperImageHeader");
if (ReadPixmap(image,&pixmap) == MagickFalse)
ThrowPICTException(CorruptImageError,"ImproperImageHeader");
image->depth=(size_t) pixmap.component_size;
image->resolution.x=1.0*pixmap.horizontal_resolution;
image->resolution.y=1.0*pixmap.vertical_resolution;
image->units=PixelsPerInchResolution;
(void) ReadBlobMSBLong(image);
flags=(ssize_t) ReadBlobMSBShort(image);
length=ReadBlobMSBShort(image);
if (length > GetBlobSize(image))
ThrowPICTException(CorruptImageError,
"InsufficientImageDataInFile");
for (i=0; i <= (ssize_t) length; i++)
(void) ReadBlobMSBLong(image);
width=(size_t) (frame.bottom-frame.top);
height=(size_t) (frame.right-frame.left);
if (pixmap.bits_per_pixel <= 8)
length&=0x7fff;
if (pixmap.bits_per_pixel == 16)
width<<=1;
if (length == 0)
length=width;
if (length < 8)
{
for (i=0; i < (ssize_t) (length*height); i++)
if (ReadBlobByte(image) == EOF)
break;
}
else
for (i=0; i < (ssize_t) height; i++)
{
if (EOFBlob(image) != MagickFalse)
break;
if (length > 200)
{
for (j=0; j < (ssize_t) ReadBlobMSBShort(image); j++)
if (ReadBlobByte(image) == EOF)
break;
}
else
for (j=0; j < (ssize_t) ReadBlobByte(image); j++)
if (ReadBlobByte(image) == EOF)
break;
}
break;
}
case 0x1b:
{
/*
Initialize image background color.
*/
image->background_color.red=(Quantum)
ScaleShortToQuantum(ReadBlobMSBShort(image));
image->background_color.green=(Quantum)
ScaleShortToQuantum(ReadBlobMSBShort(image));
image->background_color.blue=(Quantum)
ScaleShortToQuantum(ReadBlobMSBShort(image));
break;
}
case 0x70:
case 0x71:
case 0x72:
case 0x73:
case 0x74:
case 0x75:
case 0x76:
case 0x77:
{
/*
Skip polygon or region.
*/
length=ReadBlobMSBShort(image);
if (length > GetBlobSize(image))
ThrowPICTException(CorruptImageError,
"InsufficientImageDataInFile");
for (i=0; i < (ssize_t) (length-2); i++)
if (ReadBlobByte(image) == EOF)
break;
break;
}
case 0x90:
case 0x91:
case 0x98:
case 0x99:
case 0x9a:
case 0x9b:
{
PICTRectangle
source,
destination;
register unsigned char
*p;
size_t
j;
ssize_t
bytes_per_line;
unsigned char
*pixels;
/*
Pixmap clipped by a rectangle.
*/
bytes_per_line=0;
if ((code != 0x9a) && (code != 0x9b))
bytes_per_line=(ssize_t) ReadBlobMSBShort(image);
else
{
(void) ReadBlobMSBShort(image);
(void) ReadBlobMSBShort(image);
(void) ReadBlobMSBShort(image);
}
if (ReadRectangle(image,&frame) == MagickFalse)
ThrowPICTException(CorruptImageError,"ImproperImageHeader");
/*
Initialize tile image.
*/
tile_image=CloneImage(image,(size_t) (frame.right-frame.left),
(size_t) (frame.bottom-frame.top),MagickTrue,exception);
if (tile_image == (Image *) NULL)
ThrowPICTException(CorruptImageError,"ImproperImageHeader");
if ((code == 0x9a) || (code == 0x9b) ||
((bytes_per_line & 0x8000) != 0))
{
if (ReadPixmap(image,&pixmap) == MagickFalse)
ThrowPICTException(CorruptImageError,"ImproperImageHeader");
tile_image->depth=(size_t) pixmap.component_size;
tile_image->alpha_trait=pixmap.component_count == 4 ?
BlendPixelTrait : UndefinedPixelTrait;
tile_image->resolution.x=(double) pixmap.horizontal_resolution;
tile_image->resolution.y=(double) pixmap.vertical_resolution;
tile_image->units=PixelsPerInchResolution;
if (tile_image->alpha_trait != UndefinedPixelTrait)
(void) SetImageAlpha(tile_image,OpaqueAlpha,exception);
}
if ((code != 0x9a) && (code != 0x9b))
{
/*
Initialize colormap.
*/
tile_image->colors=2;
if ((bytes_per_line & 0x8000) != 0)
{
(void) ReadBlobMSBLong(image);
flags=(ssize_t) ReadBlobMSBShort(image);
tile_image->colors=1UL*ReadBlobMSBShort(image)+1;
}
status=AcquireImageColormap(tile_image,tile_image->colors,
exception);
if (status == MagickFalse)
ThrowPICTException(ResourceLimitError,
"MemoryAllocationFailed");
if ((bytes_per_line & 0x8000) != 0)
{
for (i=0; i < (ssize_t) tile_image->colors; i++)
{
j=ReadBlobMSBShort(image) % tile_image->colors;
if ((flags & 0x8000) != 0)
j=(size_t) i;
tile_image->colormap[j].red=(Quantum)
ScaleShortToQuantum(ReadBlobMSBShort(image));
tile_image->colormap[j].green=(Quantum)
ScaleShortToQuantum(ReadBlobMSBShort(image));
tile_image->colormap[j].blue=(Quantum)
ScaleShortToQuantum(ReadBlobMSBShort(image));
}
}
else
{
for (i=0; i < (ssize_t) tile_image->colors; i++)
{
tile_image->colormap[i].red=(Quantum) (QuantumRange-
tile_image->colormap[i].red);
tile_image->colormap[i].green=(Quantum) (QuantumRange-
tile_image->colormap[i].green);
tile_image->colormap[i].blue=(Quantum) (QuantumRange-
tile_image->colormap[i].blue);
}
}
}
if (EOFBlob(image) != MagickFalse)
ThrowPICTException(CorruptImageError,
"InsufficientImageDataInFile");
if (ReadRectangle(image,&source) == MagickFalse)
ThrowPICTException(CorruptImageError,"ImproperImageHeader");
if (ReadRectangle(image,&destination) == MagickFalse)
ThrowPICTException(CorruptImageError,"ImproperImageHeader");
(void) ReadBlobMSBShort(image);
if ((code == 0x91) || (code == 0x99) || (code == 0x9b))
{
/*
Skip region.
*/
length=ReadBlobMSBShort(image);
if (length > GetBlobSize(image))
ThrowPICTException(CorruptImageError,
"InsufficientImageDataInFile");
for (i=0; i < (ssize_t) (length-2); i++)
if (ReadBlobByte(image) == EOF)
break;
}
if ((code != 0x9a) && (code != 0x9b) &&
(bytes_per_line & 0x8000) == 0)
pixels=DecodeImage(image,tile_image,(size_t) bytes_per_line,1,
&extent);
else
pixels=DecodeImage(image,tile_image,(size_t) bytes_per_line,
(unsigned int) pixmap.bits_per_pixel,&extent);
if (pixels == (unsigned char *) NULL)
ThrowPICTException(CorruptImageError,"UnableToUncompressImage");
/*
Convert PICT tile image to pixel packets.
*/
p=pixels;
for (y=0; y < (ssize_t) tile_image->rows; y++)
{
if (p > (pixels+extent+image->columns))
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowPICTException(CorruptImageError,"NotEnoughPixelData");
}
q=QueueAuthenticPixels(tile_image,0,y,tile_image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) tile_image->columns; x++)
{
if (tile_image->storage_class == PseudoClass)
{
index=(Quantum) ConstrainColormapIndex(tile_image,(ssize_t)
*p,exception);
SetPixelIndex(tile_image,index,q);
SetPixelRed(tile_image,
tile_image->colormap[(ssize_t) index].red,q);
SetPixelGreen(tile_image,
tile_image->colormap[(ssize_t) index].green,q);
SetPixelBlue(tile_image,
tile_image->colormap[(ssize_t) index].blue,q);
}
else
{
if (pixmap.bits_per_pixel == 16)
{
i=(ssize_t) (*p++);
j=(size_t) (*p);
SetPixelRed(tile_image,ScaleCharToQuantum(
(unsigned char) ((i & 0x7c) << 1)),q);
SetPixelGreen(tile_image,ScaleCharToQuantum(
(unsigned char) (((i & 0x03) << 6) |
((j & 0xe0) >> 2))),q);
SetPixelBlue(tile_image,ScaleCharToQuantum(
(unsigned char) ((j & 0x1f) << 3)),q);
}
else
if (tile_image->alpha_trait == UndefinedPixelTrait)
{
if (p > (pixels+extent+2*image->columns))
ThrowPICTException(CorruptImageError,
"NotEnoughPixelData");
SetPixelRed(tile_image,ScaleCharToQuantum(*p),q);
SetPixelGreen(tile_image,ScaleCharToQuantum(
*(p+tile_image->columns)),q);
SetPixelBlue(tile_image,ScaleCharToQuantum(
*(p+2*tile_image->columns)),q);
}
else
{
if (p > (pixels+extent+3*image->columns))
ThrowPICTException(CorruptImageError,
"NotEnoughPixelData");
SetPixelAlpha(tile_image,ScaleCharToQuantum(*p),q);
SetPixelRed(tile_image,ScaleCharToQuantum(
*(p+tile_image->columns)),q);
SetPixelGreen(tile_image,ScaleCharToQuantum(
*(p+2*tile_image->columns)),q);
SetPixelBlue(tile_image,ScaleCharToQuantum(
*(p+3*tile_image->columns)),q);
}
}
p++;
q+=GetPixelChannels(tile_image);
}
if (SyncAuthenticPixels(tile_image,exception) == MagickFalse)
break;
if ((tile_image->storage_class == DirectClass) &&
(pixmap.bits_per_pixel != 16))
{
p+=(pixmap.component_count-1)*tile_image->columns;
if (p < pixels)
break;
}
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,
tile_image->rows);
if (status == MagickFalse)
break;
}
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
if ((jpeg == MagickFalse) && (EOFBlob(image) == MagickFalse))
if ((code == 0x9a) || (code == 0x9b) ||
((bytes_per_line & 0x8000) != 0))
(void) CompositeImage(image,tile_image,CopyCompositeOp,
MagickTrue,(ssize_t) destination.left,(ssize_t)
destination.top,exception);
tile_image=DestroyImage(tile_image);
break;
}
case 0xa1:
{
unsigned char
*info;
size_t
type;
/*
Comment.
*/
type=ReadBlobMSBShort(image);
length=ReadBlobMSBShort(image);
if (length > GetBlobSize(image))
ThrowPICTException(CorruptImageError,
"InsufficientImageDataInFile");
if (length == 0)
break;
(void) ReadBlobMSBLong(image);
length-=MagickMin(length,4);
if (length == 0)
break;
info=(unsigned char *) AcquireQuantumMemory(length,sizeof(*info));
if (info == (unsigned char *) NULL)
break;
count=ReadBlob(image,length,info);
if (count != (ssize_t) length)
{
info=(unsigned char *) RelinquishMagickMemory(info);
ThrowPICTException(ResourceLimitError,"UnableToReadImageData");
}
switch (type)
{
case 0xe0:
{
profile=BlobToStringInfo((const void *) NULL,length);
SetStringInfoDatum(profile,info);
status=SetImageProfile(image,"icc",profile,exception);
profile=DestroyStringInfo(profile);
if (status == MagickFalse)
{
info=(unsigned char *) RelinquishMagickMemory(info);
ThrowPICTException(ResourceLimitError,
"MemoryAllocationFailed");
}
break;
}
case 0x1f2:
{
profile=BlobToStringInfo((const void *) NULL,length);
SetStringInfoDatum(profile,info);
status=SetImageProfile(image,"iptc",profile,exception);
if (status == MagickFalse)
{
info=(unsigned char *) RelinquishMagickMemory(info);
ThrowPICTException(ResourceLimitError,
"MemoryAllocationFailed");
}
profile=DestroyStringInfo(profile);
break;
}
default:
break;
}
info=(unsigned char *) RelinquishMagickMemory(info);
break;
}
default:
{
/*
Skip to next op code.
*/
if (codes[code].length == -1)
(void) ReadBlobMSBShort(image);
else
for (i=0; i < (ssize_t) codes[code].length; i++)
if (ReadBlobByte(image) == EOF)
break;
}
}
}
if (code == 0xc00)
{
/*
Skip header.
*/
for (i=0; i < 24; i++)
if (ReadBlobByte(image) == EOF)
break;
continue;
}
if (((code >= 0xb0) && (code <= 0xcf)) ||
((code >= 0x8000) && (code <= 0x80ff)))
continue;
if (code == 0x8200)
{
char
filename[MaxTextExtent];
FILE
*file;
int
unique_file;
/*
Embedded JPEG.
*/
jpeg=MagickTrue;
read_info=CloneImageInfo(image_info);
SetImageInfoBlob(read_info,(void *) NULL,0);
file=(FILE *) NULL;
unique_file=AcquireUniqueFileResource(filename);
(void) FormatLocaleString(read_info->filename,MaxTextExtent,"jpeg:%s",
filename);
if (unique_file != -1)
file=fdopen(unique_file,"wb");
if ((unique_file == -1) || (file == (FILE *) NULL))
{
(void) RelinquishUniqueFileResource(read_info->filename);
(void) CopyMagickString(image->filename,read_info->filename,
MagickPathExtent);
ThrowPICTException(FileOpenError,"UnableToCreateTemporaryFile");
}
length=ReadBlobMSBLong(image);
if (length > GetBlobSize(image))
ThrowPICTException(CorruptImageError,
"InsufficientImageDataInFile");
if (length > 154)
{
for (i=0; i < 6; i++)
(void) ReadBlobMSBLong(image);
if (ReadRectangle(image,&frame) == MagickFalse)
{
(void) fclose(file);
(void) RelinquishUniqueFileResource(read_info->filename);
ThrowPICTException(CorruptImageError,"ImproperImageHeader");
}
for (i=0; i < 122; i++)
if (ReadBlobByte(image) == EOF)
break;
for (i=0; i < (ssize_t) (length-154); i++)
{
c=ReadBlobByte(image);
if (c == EOF)
break;
if (fputc(c,file) != c)
break;
}
}
(void) fclose(file);
(void) close(unique_file);
tile_image=ReadImage(read_info,exception);
(void) RelinquishUniqueFileResource(filename);
read_info=DestroyImageInfo(read_info);
if (tile_image == (Image *) NULL)
continue;
(void) FormatLocaleString(geometry,MagickPathExtent,"%.20gx%.20g",
(double) MagickMax(image->columns,tile_image->columns),
(double) MagickMax(image->rows,tile_image->rows));
(void) SetImageExtent(image,
MagickMax(image->columns,tile_image->columns),
MagickMax(image->rows,tile_image->rows),exception);
(void) TransformImageColorspace(image,tile_image->colorspace,exception);
(void) CompositeImage(image,tile_image,CopyCompositeOp,MagickTrue,
(ssize_t) frame.left,(ssize_t) frame.right,exception);
image->compression=tile_image->compression;
tile_image=DestroyImage(tile_image);
continue;
}
if ((code == 0xff) || (code == 0xffff))
break;
if (((code >= 0xd0) && (code <= 0xfe)) ||
((code >= 0x8100) && (code <= 0xffff)))
{
/*
Skip reserved.
*/
length=ReadBlobMSBShort(image);
if (length > GetBlobSize(image))
ThrowPICTException(CorruptImageError,
"InsufficientImageDataInFile");
for (i=0; i < (ssize_t) length; i++)
if (ReadBlobByte(image) == EOF)
break;
continue;
}
if ((code >= 0x100) && (code <= 0x7fff))
{
/*
Skip reserved.
*/
length=(size_t) ((code >> 7) & 0xff);
if (length > GetBlobSize(image))
ThrowPICTException(CorruptImageError,
"InsufficientImageDataInFile");
for (i=0; i < (ssize_t) length; i++)
if (ReadBlobByte(image) == EOF)
break;
continue;
}
}
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e g i s t e r P I C T I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RegisterPICTImage() adds attributes for the PICT image format to
% the list of supported formats. The attributes include the image format
% tag, a method to read and/or write the format, whether the format
% supports the saving of more than one frame to the same file or blob,
% whether the format supports native in-memory I/O, and a brief
% description of the format.
%
% The format of the RegisterPICTImage method is:
%
% size_t RegisterPICTImage(void)
%
*/
ModuleExport size_t RegisterPICTImage(void)
{
MagickInfo
*entry;
entry=AcquireMagickInfo("PICT","PCT","Apple Macintosh QuickDraw/PICT");
entry->decoder=(DecodeImageHandler *) ReadPICTImage;
entry->encoder=(EncodeImageHandler *) WritePICTImage;
entry->flags^=CoderAdjoinFlag;
entry->flags|=CoderEncoderSeekableStreamFlag;
entry->magick=(IsImageFormatHandler *) IsPICT;
(void) RegisterMagickInfo(entry);
entry=AcquireMagickInfo("PICT","PICT","Apple Macintosh QuickDraw/PICT");
entry->decoder=(DecodeImageHandler *) ReadPICTImage;
entry->encoder=(EncodeImageHandler *) WritePICTImage;
entry->flags^=CoderAdjoinFlag;
entry->flags|=CoderEncoderSeekableStreamFlag;
entry->magick=(IsImageFormatHandler *) IsPICT;
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n r e g i s t e r P I C T I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnregisterPICTImage() removes format registrations made by the
% PICT module from the list of supported formats.
%
% The format of the UnregisterPICTImage method is:
%
% UnregisterPICTImage(void)
%
*/
ModuleExport void UnregisterPICTImage(void)
{
(void) UnregisterMagickInfo("PCT");
(void) UnregisterMagickInfo("PICT");
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e P I C T I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePICTImage() writes an image to a file in the Apple Macintosh
% QuickDraw/PICT image format.
%
% The format of the WritePICTImage method is:
%
% MagickBooleanType WritePICTImage(const ImageInfo *image_info,
% Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image_info: the image info.
%
% o image: The image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType WritePICTImage(const ImageInfo *image_info,
Image *image,ExceptionInfo *exception)
{
#define MaxCount 128
#define PictCropRegionOp 0x01
#define PictEndOfPictureOp 0xff
#define PictJPEGOp 0x8200
#define PictInfoOp 0x0C00
#define PictInfoSize 512
#define PictPixmapOp 0x9A
#define PictPICTOp 0x98
#define PictVersion 0x11
const StringInfo
*profile;
double
x_resolution,
y_resolution;
MagickBooleanType
status;
MagickOffsetType
offset;
PICTPixmap
pixmap;
PICTRectangle
bounds,
crop_rectangle,
destination_rectangle,
frame_rectangle,
size_rectangle,
source_rectangle;
register const Quantum
*p;
register ssize_t
i,
x;
size_t
bytes_per_line,
count,
row_bytes,
storage_class;
ssize_t
y;
unsigned char
*buffer,
*packed_scanline,
*scanline;
unsigned short
base_address,
transfer_mode;
/*
Open output image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->columns > 65535L) || (image->rows > 65535L))
ThrowWriterException(ImageError,"WidthOrHeightExceedsLimit");
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception);
if (status == MagickFalse)
return(status);
(void) TransformImageColorspace(image,sRGBColorspace,exception);
/*
Initialize image info.
*/
size_rectangle.top=0;
size_rectangle.left=0;
size_rectangle.bottom=(short) image->rows;
size_rectangle.right=(short) image->columns;
frame_rectangle=size_rectangle;
crop_rectangle=size_rectangle;
source_rectangle=size_rectangle;
destination_rectangle=size_rectangle;
base_address=0xff;
row_bytes=image->columns;
bounds.top=0;
bounds.left=0;
bounds.bottom=(short) image->rows;
bounds.right=(short) image->columns;
pixmap.version=0;
pixmap.pack_type=0;
pixmap.pack_size=0;
pixmap.pixel_type=0;
pixmap.bits_per_pixel=8;
pixmap.component_count=1;
pixmap.component_size=8;
pixmap.plane_bytes=0;
pixmap.table=0;
pixmap.reserved=0;
transfer_mode=0;
x_resolution=image->resolution.x != 0.0 ? image->resolution.x :
DefaultResolution;
y_resolution=image->resolution.y != 0.0 ? image->resolution.y :
DefaultResolution;
storage_class=image->storage_class;
if (image_info->compression == JPEGCompression)
storage_class=DirectClass;
if (storage_class == DirectClass)
{
pixmap.component_count=image->alpha_trait != UndefinedPixelTrait ? 4 : 3;
pixmap.pixel_type=16;
pixmap.bits_per_pixel=32;
pixmap.pack_type=0x04;
transfer_mode=0x40;
row_bytes=4*image->columns;
}
/*
Allocate memory.
*/
bytes_per_line=image->columns;
if (storage_class == DirectClass)
bytes_per_line*=image->alpha_trait != UndefinedPixelTrait ? 4 : 3;
buffer=(unsigned char *) AcquireQuantumMemory(PictInfoSize,sizeof(*buffer));
packed_scanline=(unsigned char *) AcquireQuantumMemory((size_t)
(row_bytes+MaxCount),sizeof(*packed_scanline));
scanline=(unsigned char *) AcquireQuantumMemory(row_bytes,sizeof(*scanline));
if ((buffer == (unsigned char *) NULL) ||
(packed_scanline == (unsigned char *) NULL) ||
(scanline == (unsigned char *) NULL))
{
if (scanline != (unsigned char *) NULL)
scanline=(unsigned char *) RelinquishMagickMemory(scanline);
if (packed_scanline != (unsigned char *) NULL)
packed_scanline=(unsigned char *) RelinquishMagickMemory(
packed_scanline);
if (buffer != (unsigned char *) NULL)
buffer=(unsigned char *) RelinquishMagickMemory(buffer);
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
}
(void) memset(scanline,0,row_bytes);
(void) memset(packed_scanline,0,(size_t) (row_bytes+MaxCount));
/*
Write header, header size, size bounding box, version, and reserved.
*/
(void) memset(buffer,0,PictInfoSize);
(void) WriteBlob(image,PictInfoSize,buffer);
(void) WriteBlobMSBShort(image,0);
(void) WriteBlobMSBShort(image,(unsigned short) size_rectangle.top);
(void) WriteBlobMSBShort(image,(unsigned short) size_rectangle.left);
(void) WriteBlobMSBShort(image,(unsigned short) size_rectangle.bottom);
(void) WriteBlobMSBShort(image,(unsigned short) size_rectangle.right);
(void) WriteBlobMSBShort(image,PictVersion);
(void) WriteBlobMSBShort(image,0x02ff); /* version #2 */
(void) WriteBlobMSBShort(image,PictInfoOp);
(void) WriteBlobMSBLong(image,0xFFFE0000U);
/*
Write full size of the file, resolution, frame bounding box, and reserved.
*/
(void) WriteBlobMSBShort(image,(unsigned short) x_resolution);
(void) WriteBlobMSBShort(image,0x0000);
(void) WriteBlobMSBShort(image,(unsigned short) y_resolution);
(void) WriteBlobMSBShort(image,0x0000);
(void) WriteBlobMSBShort(image,(unsigned short) frame_rectangle.top);
(void) WriteBlobMSBShort(image,(unsigned short) frame_rectangle.left);
(void) WriteBlobMSBShort(image,(unsigned short) frame_rectangle.bottom);
(void) WriteBlobMSBShort(image,(unsigned short) frame_rectangle.right);
(void) WriteBlobMSBLong(image,0x00000000L);
profile=GetImageProfile(image,"iptc");
if (profile != (StringInfo *) NULL)
{
(void) WriteBlobMSBShort(image,0xa1);
(void) WriteBlobMSBShort(image,0x1f2);
(void) WriteBlobMSBShort(image,(unsigned short)
(GetStringInfoLength(profile)+4));
(void) WriteBlobString(image,"8BIM");
(void) WriteBlob(image,GetStringInfoLength(profile),
GetStringInfoDatum(profile));
}
profile=GetImageProfile(image,"icc");
if (profile != (StringInfo *) NULL)
{
(void) WriteBlobMSBShort(image,0xa1);
(void) WriteBlobMSBShort(image,0xe0);
(void) WriteBlobMSBShort(image,(unsigned short)
(GetStringInfoLength(profile)+4));
(void) WriteBlobMSBLong(image,0x00000000U);
(void) WriteBlob(image,GetStringInfoLength(profile),
GetStringInfoDatum(profile));
(void) WriteBlobMSBShort(image,0xa1);
(void) WriteBlobMSBShort(image,0xe0);
(void) WriteBlobMSBShort(image,4);
(void) WriteBlobMSBLong(image,0x00000002U);
}
/*
Write crop region opcode and crop bounding box.
*/
(void) WriteBlobMSBShort(image,PictCropRegionOp);
(void) WriteBlobMSBShort(image,0xa);
(void) WriteBlobMSBShort(image,(unsigned short) crop_rectangle.top);
(void) WriteBlobMSBShort(image,(unsigned short) crop_rectangle.left);
(void) WriteBlobMSBShort(image,(unsigned short) crop_rectangle.bottom);
(void) WriteBlobMSBShort(image,(unsigned short) crop_rectangle.right);
if (image_info->compression == JPEGCompression)
{
Image
*jpeg_image;
ImageInfo
*jpeg_info;
size_t
length;
unsigned char
*blob;
jpeg_image=CloneImage(image,0,0,MagickTrue,exception);
if (jpeg_image == (Image *) NULL)
{
(void) CloseBlob(image);
return(MagickFalse);
}
jpeg_info=CloneImageInfo(image_info);
(void) CopyMagickString(jpeg_info->magick,"JPEG",MagickPathExtent);
length=0;
blob=(unsigned char *) ImageToBlob(jpeg_info,jpeg_image,&length,
exception);
jpeg_info=DestroyImageInfo(jpeg_info);
if (blob == (unsigned char *) NULL)
return(MagickFalse);
jpeg_image=DestroyImage(jpeg_image);
(void) WriteBlobMSBShort(image,PictJPEGOp);
(void) WriteBlobMSBLong(image,(unsigned int) length+154);
(void) WriteBlobMSBShort(image,0x0000);
(void) WriteBlobMSBLong(image,0x00010000U);
(void) WriteBlobMSBLong(image,0x00000000U);
(void) WriteBlobMSBLong(image,0x00000000U);
(void) WriteBlobMSBLong(image,0x00000000U);
(void) WriteBlobMSBLong(image,0x00010000U);
(void) WriteBlobMSBLong(image,0x00000000U);
(void) WriteBlobMSBLong(image,0x00000000U);
(void) WriteBlobMSBLong(image,0x00000000U);
(void) WriteBlobMSBLong(image,0x40000000U);
(void) WriteBlobMSBLong(image,0x00000000U);
(void) WriteBlobMSBLong(image,0x00000000U);
(void) WriteBlobMSBLong(image,0x00000000U);
(void) WriteBlobMSBLong(image,0x00400000U);
(void) WriteBlobMSBShort(image,0x0000);
(void) WriteBlobMSBShort(image,(unsigned short) image->rows);
(void) WriteBlobMSBShort(image,(unsigned short) image->columns);
(void) WriteBlobMSBShort(image,0x0000);
(void) WriteBlobMSBShort(image,768);
(void) WriteBlobMSBShort(image,0x0000);
(void) WriteBlobMSBLong(image,0x00000000U);
(void) WriteBlobMSBLong(image,0x00566A70U);
(void) WriteBlobMSBLong(image,0x65670000U);
(void) WriteBlobMSBLong(image,0x00000000U);
(void) WriteBlobMSBLong(image,0x00000001U);
(void) WriteBlobMSBLong(image,0x00016170U);
(void) WriteBlobMSBLong(image,0x706C0000U);
(void) WriteBlobMSBLong(image,0x00000000U);
(void) WriteBlobMSBShort(image,768);
(void) WriteBlobMSBShort(image,(unsigned short) image->columns);
(void) WriteBlobMSBShort(image,(unsigned short) image->rows);
(void) WriteBlobMSBShort(image,(unsigned short) x_resolution);
(void) WriteBlobMSBShort(image,0x0000);
(void) WriteBlobMSBShort(image,(unsigned short) y_resolution);
(void) WriteBlobMSBShort(image,0x0000);
(void) WriteBlobMSBLong(image,length);
(void) WriteBlobMSBShort(image,0x0001);
(void) WriteBlobMSBLong(image,0x0B466F74U);
(void) WriteBlobMSBLong(image,0x6F202D20U);
(void) WriteBlobMSBLong(image,0x4A504547U);
(void) WriteBlobMSBLong(image,0x00000000U);
(void) WriteBlobMSBLong(image,0x00000000U);
(void) WriteBlobMSBLong(image,0x00000000U);
(void) WriteBlobMSBLong(image,0x00000000U);
(void) WriteBlobMSBLong(image,0x00000000U);
(void) WriteBlobMSBLong(image,0x0018FFFFU);
(void) WriteBlob(image,length,blob);
if ((length & 0x01) != 0)
(void) WriteBlobByte(image,'\0');
blob=(unsigned char *) RelinquishMagickMemory(blob);
}
/*
Write picture opcode, row bytes, and picture bounding box, and version.
*/
if (storage_class == PseudoClass)
(void) WriteBlobMSBShort(image,PictPICTOp);
else
{
(void) WriteBlobMSBShort(image,PictPixmapOp);
(void) WriteBlobMSBLong(image,(unsigned int) base_address);
}
(void) WriteBlobMSBShort(image,(unsigned short) (row_bytes | 0x8000));
(void) WriteBlobMSBShort(image,(unsigned short) bounds.top);
(void) WriteBlobMSBShort(image,(unsigned short) bounds.left);
(void) WriteBlobMSBShort(image,(unsigned short) bounds.bottom);
(void) WriteBlobMSBShort(image,(unsigned short) bounds.right);
/*
Write pack type, pack size, resolution, pixel type, and pixel size.
*/
(void) WriteBlobMSBShort(image,(unsigned short) pixmap.version);
(void) WriteBlobMSBShort(image,(unsigned short) pixmap.pack_type);
(void) WriteBlobMSBLong(image,(unsigned int) pixmap.pack_size);
(void) WriteBlobMSBShort(image,(unsigned short) (x_resolution+0.5));
(void) WriteBlobMSBShort(image,0x0000);
(void) WriteBlobMSBShort(image,(unsigned short) (y_resolution+0.5));
(void) WriteBlobMSBShort(image,0x0000);
(void) WriteBlobMSBShort(image,(unsigned short) pixmap.pixel_type);
(void) WriteBlobMSBShort(image,(unsigned short) pixmap.bits_per_pixel);
/*
Write component count, size, plane bytes, table size, and reserved.
*/
(void) WriteBlobMSBShort(image,(unsigned short) pixmap.component_count);
(void) WriteBlobMSBShort(image,(unsigned short) pixmap.component_size);
(void) WriteBlobMSBLong(image,(unsigned int) pixmap.plane_bytes);
(void) WriteBlobMSBLong(image,(unsigned int) pixmap.table);
(void) WriteBlobMSBLong(image,(unsigned int) pixmap.reserved);
if (storage_class == PseudoClass)
{
/*
Write image colormap.
*/
(void) WriteBlobMSBLong(image,0x00000000L); /* color seed */
(void) WriteBlobMSBShort(image,0L); /* color flags */
(void) WriteBlobMSBShort(image,(unsigned short) (image->colors-1));
for (i=0; i < (ssize_t) image->colors; i++)
{
(void) WriteBlobMSBShort(image,(unsigned short) i);
(void) WriteBlobMSBShort(image,ScaleQuantumToShort(
image->colormap[i].red));
(void) WriteBlobMSBShort(image,ScaleQuantumToShort(
image->colormap[i].green));
(void) WriteBlobMSBShort(image,ScaleQuantumToShort(
image->colormap[i].blue));
}
}
/*
Write source and destination rectangle.
*/
(void) WriteBlobMSBShort(image,(unsigned short) source_rectangle.top);
(void) WriteBlobMSBShort(image,(unsigned short) source_rectangle.left);
(void) WriteBlobMSBShort(image,(unsigned short) source_rectangle.bottom);
(void) WriteBlobMSBShort(image,(unsigned short) source_rectangle.right);
(void) WriteBlobMSBShort(image,(unsigned short) destination_rectangle.top);
(void) WriteBlobMSBShort(image,(unsigned short) destination_rectangle.left);
(void) WriteBlobMSBShort(image,(unsigned short) destination_rectangle.bottom);
(void) WriteBlobMSBShort(image,(unsigned short) destination_rectangle.right);
(void) WriteBlobMSBShort(image,(unsigned short) transfer_mode);
/*
Write picture data.
*/
count=0;
if (storage_class == PseudoClass)
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
scanline[x]=(unsigned char) GetPixelIndex(image,p);
p+=GetPixelChannels(image);
}
count+=EncodeImage(image,scanline,(size_t) (row_bytes & 0x7FFF),
packed_scanline);
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
else
if (image_info->compression == JPEGCompression)
{
(void) memset(scanline,0,row_bytes);
for (y=0; y < (ssize_t) image->rows; y++)
count+=EncodeImage(image,scanline,(size_t) (row_bytes & 0x7FFF),
packed_scanline);
}
else
{
register unsigned char
*blue,
*green,
*opacity,
*red;
red=scanline;
green=scanline+image->columns;
blue=scanline+2*image->columns;
opacity=scanline+3*image->columns;
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
red=scanline;
green=scanline+image->columns;
blue=scanline+2*image->columns;
if (image->alpha_trait != UndefinedPixelTrait)
{
opacity=scanline;
red=scanline+image->columns;
green=scanline+2*image->columns;
blue=scanline+3*image->columns;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
*red++=ScaleQuantumToChar(GetPixelRed(image,p));
*green++=ScaleQuantumToChar(GetPixelGreen(image,p));
*blue++=ScaleQuantumToChar(GetPixelBlue(image,p));
if (image->alpha_trait != UndefinedPixelTrait)
*opacity++=ScaleQuantumToChar((Quantum) (GetPixelAlpha(image,p)));
p+=GetPixelChannels(image);
}
count+=EncodeImage(image,scanline,bytes_per_line,packed_scanline);
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
}
if ((count & 0x01) != 0)
(void) WriteBlobByte(image,'\0');
(void) WriteBlobMSBShort(image,PictEndOfPictureOp);
offset=TellBlob(image);
offset=SeekBlob(image,512,SEEK_SET);
(void) WriteBlobMSBShort(image,(unsigned short) offset);
scanline=(unsigned char *) RelinquishMagickMemory(scanline);
packed_scanline=(unsigned char *) RelinquishMagickMemory(packed_scanline);
buffer=(unsigned char *) RelinquishMagickMemory(buffer);
(void) CloseBlob(image);
return(MagickTrue);
}
| ./CrossVul/dataset_final_sorted/CWE-20/c/good_366_0 |
crossvul-cpp_data_bad_1053_0 | /* $OpenBSD: doas.c,v 1.57 2016/06/19 19:29:43 martijn Exp $ */
/*
* Copyright (c) 2015 Ted Unangst <tedu@openbsd.org>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/ioctl.h>
#if defined(HAVE_INTTYPES_H)
#include <inttypes.h>
#endif
#include <limits.h>
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include <err.h>
#include <unistd.h>
#include <pwd.h>
#include <grp.h>
#include <syslog.h>
#include <errno.h>
#include <fcntl.h>
#if defined(HAVE_LOGIN_CAP_H)
#include <login_cap.h>
#endif
#if defined(USE_BSD_AUTH)
#include <bsd_auth.h>
#include <readpassphrase.h>
#endif
#if defined(USE_PAM)
#include <security/pam_appl.h>
#if defined(OPENPAM) /* BSD, MacOS & certain Linux distros */
#include <security/openpam.h>
static struct pam_conv pamc = { openpam_ttyconv, NULL };
#elif defined(__LINUX_PAM__) /* Linux */
#include <security/pam_misc.h>
static struct pam_conv pamc = { misc_conv, NULL };
#elif defined(SOLARIS_PAM) /* illumos & Solaris */
#include "pm_pam_conv.h"
static struct pam_conv pamc = { pam_tty_conv, NULL };
#endif /* OPENPAM */
#endif /* USE_PAM */
#include "doas.h"
static void
usage(void)
{
fprintf(stderr, "usage: doas [-ns] [-a style] [-C config] [-u user]"
" command [args]\n");
exit(1);
}
#ifdef linux
void
errc(int eval, int code, const char *format)
{
fprintf(stderr, "%s", format);
exit(code);
}
#endif
static int
parseuid(const char *s, uid_t *uid)
{
struct passwd *pw;
const char *errstr;
if ((pw = getpwnam(s)) != NULL) {
*uid = pw->pw_uid;
return 0;
}
#if !defined(__linux__) && !defined(__NetBSD__)
*uid = strtonum(s, 0, UID_MAX, &errstr);
#else
sscanf(s, "%d", uid);
#endif
if (errstr)
return -1;
return 0;
}
static int
uidcheck(const char *s, uid_t desired)
{
uid_t uid;
if (parseuid(s, &uid) != 0)
return -1;
if (uid != desired)
return -1;
return 0;
}
static int
parsegid(const char *s, gid_t *gid)
{
struct group *gr;
const char *errstr;
if ((gr = getgrnam(s)) != NULL) {
*gid = gr->gr_gid;
return 0;
}
#if !defined(__linux__) && !defined(__NetBSD__)
*gid = strtonum(s, 0, GID_MAX, &errstr);
#else
sscanf(s, "%d", gid);
#endif
if (errstr)
return -1;
return 0;
}
static int
match(uid_t uid, gid_t *groups, int ngroups, uid_t target, const char *cmd,
const char **cmdargs, struct rule *r)
{
int i;
if (r->ident[0] == ':') {
gid_t rgid;
if (parsegid(r->ident + 1, &rgid) == -1)
return 0;
for (i = 0; i < ngroups; i++) {
if (rgid == groups[i])
break;
}
if (i == ngroups)
return 0;
} else {
if (uidcheck(r->ident, uid) != 0)
return 0;
}
if (r->target && uidcheck(r->target, target) != 0)
return 0;
if (r->cmd) {
if (strcmp(r->cmd, cmd))
return 0;
if (r->cmdargs) {
/* if arguments were given, they should match explicitly */
for (i = 0; r->cmdargs[i]; i++) {
if (!cmdargs[i])
return 0;
if (strcmp(r->cmdargs[i], cmdargs[i]))
return 0;
}
if (cmdargs[i])
return 0;
}
}
return 1;
}
static int
permit(uid_t uid, gid_t *groups, int ngroups, struct rule **lastr,
uid_t target, const char *cmd, const char **cmdargs)
{
int i;
*lastr = NULL;
for (i = 0; i < nrules; i++) {
if (match(uid, groups, ngroups, target, cmd,
cmdargs, rules[i]))
*lastr = rules[i];
}
if (!*lastr)
return 0;
return (*lastr)->action == PERMIT;
}
static void
parseconfig(const char *filename, int checkperms)
{
extern FILE *yyfp;
extern int yyparse(void);
struct stat sb;
yyfp = fopen(filename, "r");
if (!yyfp)
err(1, checkperms ? "doas is not enabled, %s" :
"could not open config file %s", filename);
if (checkperms) {
if (fstat(fileno(yyfp), &sb) != 0)
err(1, "fstat(\"%s\")", filename);
if ((sb.st_mode & (S_IWGRP|S_IWOTH)) != 0)
errx(1, "%s is writable by group or other", filename);
if (sb.st_uid != 0)
errx(1, "%s is not owned by root", filename);
}
yyparse();
fclose(yyfp);
if (parse_errors)
exit(1);
}
static void
checkconfig(const char *confpath, int argc, char **argv,
uid_t uid, gid_t *groups, int ngroups, uid_t target)
{
struct rule *rule;
int status;
#if defined(__linux__) || defined(__FreeBSD__)
status = setresuid(uid, uid, uid);
#else
status = setreuid(uid, uid);
#endif
if (status == -1)
{
printf("doas: Unable to set UID\n");
exit(1);
}
parseconfig(confpath, 0);
if (!argc)
exit(0);
if (permit(uid, groups, ngroups, &rule, target, argv[0],
(const char **)argv + 1)) {
printf("permit%s\n", (rule->options & NOPASS) ? " nopass" : "");
exit(0);
} else {
printf("deny\n");
exit(1);
}
}
#if defined(USE_BSD_AUTH)
static void
authuser(char *myname, char *login_style, int persist)
{
char *challenge = NULL, *response, rbuf[1024], cbuf[128];
auth_session_t *as;
int fd = -1;
if (persist)
fd = open("/dev/tty", O_RDWR);
if (fd != -1) {
if (ioctl(fd, TIOCCHKVERAUTH) == 0)
goto good;
}
if (!(as = auth_userchallenge(myname, login_style, "auth-doas",
&challenge)))
errx(1, "Authorization failed");
if (!challenge) {
char host[HOST_NAME_MAX + 1];
if (gethostname(host, sizeof(host)))
snprintf(host, sizeof(host), "?");
snprintf(cbuf, sizeof(cbuf),
"\rdoas (%.32s@%.32s) password: ", myname, host);
challenge = cbuf;
}
response = readpassphrase(challenge, rbuf, sizeof(rbuf),
RPP_REQUIRE_TTY);
if (response == NULL && errno == ENOTTY) {
syslog(LOG_AUTHPRIV | LOG_NOTICE,
"tty required for %s", myname);
errx(1, "a tty is required");
}
if (!auth_userresponse(as, response, 0)) {
syslog(LOG_AUTHPRIV | LOG_NOTICE,
"failed auth for %s", myname);
errc(1, EPERM, NULL);
}
explicit_bzero(rbuf, sizeof(rbuf));
good:
if (fd != -1) {
int secs = 5 * 60;
ioctl(fd, TIOCSETVERAUTH, &secs);
close(fd);
}
}
#endif
int
main(int argc, char **argv)
{
const char *safepath = SAFE_PATH;
const char *confpath = NULL;
char *shargv[] = { NULL, NULL };
char *sh;
const char *cmd;
char cmdline[LINE_MAX];
char myname[_PW_NAME_LEN + 1];
struct passwd *original_pw, *target_pw;
struct rule *rule;
uid_t uid;
uid_t target = 0;
gid_t groups[NGROUPS_MAX + 1];
int ngroups;
int i, ch;
int sflag = 0;
int nflag = 0;
char cwdpath[PATH_MAX];
const char *cwd;
char *login_style = NULL;
char **envp;
#ifndef linux
setprogname("doas");
#endif
#ifndef linux
closefrom(STDERR_FILENO + 1);
#endif
uid = getuid();
while ((ch = getopt(argc, argv, "a:C:nsu:")) != -1) {
/* while ((ch = getopt(argc, argv, "a:C:Lnsu:")) != -1) { */
switch (ch) {
case 'a':
login_style = optarg;
break;
case 'C':
confpath = optarg;
break;
/* case 'L':
i = open("/dev/tty", O_RDWR);
if (i != -1)
ioctl(i, TIOCCLRVERAUTH);
exit(i != -1);
*/
case 'u':
if (parseuid(optarg, &target) != 0)
errx(1, "unknown user");
break;
case 'n':
nflag = 1;
break;
case 's':
sflag = 1;
break;
default:
usage();
break;
}
}
argv += optind;
argc -= optind;
if (confpath) {
if (sflag)
usage();
} else if ((!sflag && !argc) || (sflag && argc))
usage();
original_pw = getpwuid(uid);
if (! original_pw)
err(1, "getpwuid failed");
if (strlcpy(myname, original_pw->pw_name, sizeof(myname)) >= sizeof(myname))
errx(1, "pw_name too long");
ngroups = getgroups(NGROUPS_MAX, groups);
if (ngroups == -1)
err(1, "can't get groups");
groups[ngroups++] = getgid();
if (sflag) {
sh = getenv("SHELL");
if (sh == NULL || *sh == '\0') {
shargv[0] = strdup(original_pw->pw_shell);
if (shargv[0] == NULL)
err(1, NULL);
} else
shargv[0] = sh;
argv = shargv;
argc = 1;
}
if (confpath) {
checkconfig(confpath, argc, argv, uid, groups, ngroups,
target);
exit(1); /* fail safe */
}
if (geteuid())
errx(1, "not installed setuid");
parseconfig(DOAS_CONF, 1);
/* cmdline is used only for logging, no need to abort on truncate */
(void)strlcpy(cmdline, argv[0], sizeof(cmdline));
for (i = 1; i < argc; i++) {
if (strlcat(cmdline, " ", sizeof(cmdline)) >= sizeof(cmdline))
break;
if (strlcat(cmdline, argv[i], sizeof(cmdline)) >= sizeof(cmdline))
break;
}
cmd = argv[0];
if (!permit(uid, groups, ngroups, &rule, target, cmd,
(const char **)argv + 1)) {
syslog(LOG_AUTHPRIV | LOG_NOTICE,
"failed command for %s: %s", myname, cmdline);
errc(1, EPERM, NULL);
}
if (!(rule->options & NOPASS)) {
if (nflag)
errx(1, "Authorization required");
#if defined(USE_BSD_AUTH)
authuser(myname, login_style, rule->options & PERSIST);
#elif defined(USE_PAM)
#define PAM_END(msg) do { \
syslog(LOG_ERR, "%s: %s", msg, pam_strerror(pamh, pam_err)); \
warnx("%s: %s", msg, pam_strerror(pamh, pam_err)); \
pam_end(pamh, pam_err); \
exit(EXIT_FAILURE); \
} while (/*CONSTCOND*/0)
pam_handle_t *pamh = NULL;
int pam_err;
/* #ifndef linux */
int temp_stdin;
/* openpam_ttyconv checks if stdin is a terminal and
* if it is then does not bother to open /dev/tty.
* The result is that PAM writes the password prompt
* directly to stdout. In scenarios where stdin is a
* terminal, but stdout is redirected to a file
* e.g. by running doas ls &> ls.out interactively,
* the password prompt gets written to ls.out as well.
* By closing stdin first we forces PAM to read/write
* to/from the terminal directly. We restore stdin
* after authenticating. */
temp_stdin = dup(STDIN_FILENO);
if (temp_stdin == -1)
err(1, "dup");
close(STDIN_FILENO);
/* #else */
/* force password prompt to display on stderr, not stdout */
int temp_stdout = dup(1);
if (temp_stdout == -1)
err(1, "dup");
close(1);
if (dup2(2, 1) == -1)
err(1, "dup2");
/* #endif */
pam_err = pam_start("doas", myname, &pamc, &pamh);
if (pam_err != PAM_SUCCESS) {
if (pamh != NULL)
PAM_END("pam_start");
syslog(LOG_ERR, "pam_start failed: %s",
pam_strerror(pamh, pam_err));
errx(EXIT_FAILURE, "pam_start failed");
}
switch (pam_err = pam_authenticate(pamh, PAM_SILENT)) {
case PAM_SUCCESS:
switch (pam_err = pam_acct_mgmt(pamh, PAM_SILENT)) {
case PAM_SUCCESS:
break;
case PAM_NEW_AUTHTOK_REQD:
pam_err = pam_chauthtok(pamh,
PAM_SILENT|PAM_CHANGE_EXPIRED_AUTHTOK);
if (pam_err != PAM_SUCCESS)
PAM_END("pam_chauthtok");
break;
case PAM_AUTH_ERR:
case PAM_USER_UNKNOWN:
case PAM_MAXTRIES:
syslog(LOG_AUTHPRIV | LOG_NOTICE,
"failed auth for %s", myname);
errx(EXIT_FAILURE, "second authentication failed");
break;
default:
PAM_END("pam_acct_mgmt");
break;
}
break;
case PAM_AUTH_ERR:
case PAM_USER_UNKNOWN:
case PAM_MAXTRIES:
syslog(LOG_AUTHPRIV | LOG_NOTICE,
"failed auth for %s", myname);
errx(EXIT_FAILURE, "authentication failed");
break;
default:
PAM_END("pam_authenticate");
break;
}
pam_end(pamh, pam_err);
#ifndef linux
/* Re-establish stdin */
if (dup2(temp_stdin, STDIN_FILENO) == -1)
err(1, "dup2");
close(temp_stdin);
#else
/* Re-establish stdout */
close(1);
if (dup2(temp_stdout, 1) == -1)
err(1, "dup2");
#endif
#else
#error No auth module!
#endif
}
/*
if (pledge("stdio rpath getpw exec id", NULL) == -1)
err(1, "pledge");
*/
target_pw = getpwuid(target);
if (! target_pw)
errx(1, "no passwd entry for target");
#if defined(HAVE_LOGIN_CAP_H)
if (setusercontext(NULL, target_pw, target, LOGIN_SETGROUP |
LOGIN_SETPRIORITY | LOGIN_SETRESOURCES | LOGIN_SETUMASK |
LOGIN_SETUSER) != 0)
errx(1, "failed to set user context for target");
#endif
/*
if (pledge("stdio rpath exec", NULL) == -1)
err(1, "pledge");
*/
if (getcwd(cwdpath, sizeof(cwdpath)) == NULL)
cwd = "(failed)";
else
cwd = cwdpath;
/*
if (pledge("stdio exec", NULL) == -1)
err(1, "pledge");
*/
#ifndef HAVE_LOGIN_CAP_H
/* If we effectively are root, set the UID to actually be root to avoid
permission errors. */
if (target != 0)
setuid(target);
if ( geteuid() == ROOT_UID )
setuid(ROOT_UID);
#endif
syslog(LOG_AUTHPRIV | LOG_INFO, "%s ran command %s as %s from %s",
myname, cmdline, target_pw->pw_name, cwd);
envp = prepenv(rule, original_pw, target_pw);
if (rule->cmd) {
if (setenv("PATH", safepath, 1) == -1)
err(1, "failed to set PATH '%s'", safepath);
}
execvpe(cmd, argv, envp);
if (errno == ENOENT)
errx(1, "%s: command not found", cmd);
err(1, "%s", cmd);
}
| ./CrossVul/dataset_final_sorted/CWE-20/c/bad_1053_0 |
crossvul-cpp_data_bad_5415_0 | /*
* Copyright (c) 1999-2000 Image Power, Inc. and the University of
* British Columbia.
* Copyright (c) 2001-2002 Michael David Adams.
* All rights reserved.
*/
/* __START_OF_JASPER_LICENSE__
*
* JasPer License Version 2.0
*
* Copyright (c) 2001-2006 Michael David Adams
* Copyright (c) 1999-2000 Image Power, Inc.
* Copyright (c) 1999-2000 The University of British Columbia
*
* All rights reserved.
*
* Permission is hereby granted, free of charge, to any person (the
* "User") obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without restriction,
* including without limitation the rights to use, copy, modify, merge,
* publish, distribute, and/or sell copies of the Software, and to permit
* persons to whom the Software is furnished to do so, subject to the
* following conditions:
*
* 1. The above copyright notices and this permission notice (which
* includes the disclaimer below) shall be included in all copies or
* substantial portions of the Software.
*
* 2. The name of a copyright holder shall not be used to endorse or
* promote products derived from the Software without specific prior
* written permission.
*
* THIS DISCLAIMER OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS
* LICENSE. NO USE OF THE SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER
* THIS DISCLAIMER. THE SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS
* "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
* BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
* PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO
* EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL
* INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING
* FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
* NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
* WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. NO ASSURANCES ARE
* PROVIDED BY THE COPYRIGHT HOLDERS THAT THE SOFTWARE DOES NOT INFRINGE
* THE PATENT OR OTHER INTELLECTUAL PROPERTY RIGHTS OF ANY OTHER ENTITY.
* EACH COPYRIGHT HOLDER DISCLAIMS ANY LIABILITY TO THE USER FOR CLAIMS
* BROUGHT BY ANY OTHER ENTITY BASED ON INFRINGEMENT OF INTELLECTUAL
* PROPERTY RIGHTS OR OTHERWISE. AS A CONDITION TO EXERCISING THE RIGHTS
* GRANTED HEREUNDER, EACH USER HEREBY ASSUMES SOLE RESPONSIBILITY TO SECURE
* ANY OTHER INTELLECTUAL PROPERTY RIGHTS NEEDED, IF ANY. THE SOFTWARE
* IS NOT FAULT-TOLERANT AND IS NOT INTENDED FOR USE IN MISSION-CRITICAL
* SYSTEMS, SUCH AS THOSE USED IN THE OPERATION OF NUCLEAR FACILITIES,
* AIRCRAFT NAVIGATION OR COMMUNICATION SYSTEMS, AIR TRAFFIC CONTROL
* SYSTEMS, DIRECT LIFE SUPPORT MACHINES, OR WEAPONS SYSTEMS, IN WHICH
* THE FAILURE OF THE SOFTWARE OR SYSTEM COULD LEAD DIRECTLY TO DEATH,
* PERSONAL INJURY, OR SEVERE PHYSICAL OR ENVIRONMENTAL DAMAGE ("HIGH
* RISK ACTIVITIES"). THE COPYRIGHT HOLDERS SPECIFICALLY DISCLAIM ANY
* EXPRESS OR IMPLIED WARRANTY OF FITNESS FOR HIGH RISK ACTIVITIES.
*
* __END_OF_JASPER_LICENSE__
*/
/*
* JPEG-2000 Code Stream Library
*
* $Id$
*/
/******************************************************************************\
* Includes.
\******************************************************************************/
#include <stdlib.h>
#include <assert.h>
#include <ctype.h>
#include <inttypes.h>
#include "jasper/jas_malloc.h"
#include "jasper/jas_debug.h"
#include "jpc_cs.h"
/******************************************************************************\
* Types.
\******************************************************************************/
/* Marker segment table entry. */
typedef struct {
int id;
char *name;
jpc_msops_t ops;
} jpc_mstabent_t;
/******************************************************************************\
* Local prototypes.
\******************************************************************************/
static jpc_mstabent_t *jpc_mstab_lookup(int id);
static int jpc_poc_dumpparms(jpc_ms_t *ms, FILE *out);
static int jpc_poc_putparms(jpc_ms_t *ms, jpc_cstate_t *cstate, jas_stream_t *out);
static int jpc_poc_getparms(jpc_ms_t *ms, jpc_cstate_t *cstate, jas_stream_t *in);
static void jpc_poc_destroyparms(jpc_ms_t *ms);
static int jpc_unk_getparms(jpc_ms_t *ms, jpc_cstate_t *cstate, jas_stream_t *in);
static int jpc_sot_getparms(jpc_ms_t *ms, jpc_cstate_t *cstate, jas_stream_t *in);
static int jpc_siz_getparms(jpc_ms_t *ms, jpc_cstate_t *cstate, jas_stream_t *in);
static int jpc_cod_getparms(jpc_ms_t *ms, jpc_cstate_t *cstate, jas_stream_t *in);
static int jpc_coc_getparms(jpc_ms_t *ms, jpc_cstate_t *cstate, jas_stream_t *in);
static int jpc_qcd_getparms(jpc_ms_t *ms, jpc_cstate_t *cstate, jas_stream_t *in);
static int jpc_qcc_getparms(jpc_ms_t *ms, jpc_cstate_t *cstate, jas_stream_t *in);
static int jpc_rgn_getparms(jpc_ms_t *ms, jpc_cstate_t *cstate, jas_stream_t *in);
static int jpc_sop_getparms(jpc_ms_t *ms, jpc_cstate_t *cstate, jas_stream_t *in);
static int jpc_ppm_getparms(jpc_ms_t *ms, jpc_cstate_t *cstate, jas_stream_t *in);
static int jpc_ppt_getparms(jpc_ms_t *ms, jpc_cstate_t *cstate, jas_stream_t *in);
static int jpc_crg_getparms(jpc_ms_t *ms, jpc_cstate_t *cstate, jas_stream_t *in);
static int jpc_com_getparms(jpc_ms_t *ms, jpc_cstate_t *cstate, jas_stream_t *in);
static int jpc_sot_putparms(jpc_ms_t *ms, jpc_cstate_t *cstate, jas_stream_t *out);
static int jpc_siz_putparms(jpc_ms_t *ms, jpc_cstate_t *cstate, jas_stream_t *out);
static int jpc_cod_putparms(jpc_ms_t *ms, jpc_cstate_t *cstate, jas_stream_t *out);
static int jpc_coc_putparms(jpc_ms_t *ms, jpc_cstate_t *cstate, jas_stream_t *out);
static int jpc_qcd_putparms(jpc_ms_t *ms, jpc_cstate_t *cstate, jas_stream_t *out);
static int jpc_qcc_putparms(jpc_ms_t *ms, jpc_cstate_t *cstate, jas_stream_t *out);
static int jpc_rgn_putparms(jpc_ms_t *ms, jpc_cstate_t *cstate, jas_stream_t *out);
static int jpc_unk_putparms(jpc_ms_t *ms, jpc_cstate_t *cstate, jas_stream_t *out);
static int jpc_sop_putparms(jpc_ms_t *ms, jpc_cstate_t *cstate, jas_stream_t *out);
static int jpc_ppm_putparms(jpc_ms_t *ms, jpc_cstate_t *cstate, jas_stream_t *out);
static int jpc_ppt_putparms(jpc_ms_t *ms, jpc_cstate_t *cstate, jas_stream_t *out);
static int jpc_crg_putparms(jpc_ms_t *ms, jpc_cstate_t *cstate, jas_stream_t *out);
static int jpc_com_putparms(jpc_ms_t *ms, jpc_cstate_t *cstate, jas_stream_t *out);
static int jpc_sot_dumpparms(jpc_ms_t *ms, FILE *out);
static int jpc_siz_dumpparms(jpc_ms_t *ms, FILE *out);
static int jpc_cod_dumpparms(jpc_ms_t *ms, FILE *out);
static int jpc_coc_dumpparms(jpc_ms_t *ms, FILE *out);
static int jpc_qcd_dumpparms(jpc_ms_t *ms, FILE *out);
static int jpc_qcc_dumpparms(jpc_ms_t *ms, FILE *out);
static int jpc_rgn_dumpparms(jpc_ms_t *ms, FILE *out);
static int jpc_unk_dumpparms(jpc_ms_t *ms, FILE *out);
static int jpc_sop_dumpparms(jpc_ms_t *ms, FILE *out);
static int jpc_ppm_dumpparms(jpc_ms_t *ms, FILE *out);
static int jpc_ppt_dumpparms(jpc_ms_t *ms, FILE *out);
static int jpc_crg_dumpparms(jpc_ms_t *ms, FILE *out);
static int jpc_com_dumpparms(jpc_ms_t *ms, FILE *out);
static void jpc_siz_destroyparms(jpc_ms_t *ms);
static void jpc_qcd_destroyparms(jpc_ms_t *ms);
static void jpc_qcc_destroyparms(jpc_ms_t *ms);
static void jpc_cod_destroyparms(jpc_ms_t *ms);
static void jpc_coc_destroyparms(jpc_ms_t *ms);
static void jpc_unk_destroyparms(jpc_ms_t *ms);
static void jpc_ppm_destroyparms(jpc_ms_t *ms);
static void jpc_ppt_destroyparms(jpc_ms_t *ms);
static void jpc_crg_destroyparms(jpc_ms_t *ms);
static void jpc_com_destroyparms(jpc_ms_t *ms);
static void jpc_qcx_destroycompparms(jpc_qcxcp_t *compparms);
static int jpc_qcx_getcompparms(jpc_qcxcp_t *compparms, jpc_cstate_t *cstate,
jas_stream_t *in, uint_fast16_t len);
static int jpc_qcx_putcompparms(jpc_qcxcp_t *compparms, jpc_cstate_t *cstate,
jas_stream_t *out);
static void jpc_cox_destroycompparms(jpc_coxcp_t *compparms);
static int jpc_cox_getcompparms(jpc_ms_t *ms, jpc_cstate_t *cstate,
jas_stream_t *in, int prtflag, jpc_coxcp_t *compparms);
static int jpc_cox_putcompparms(jpc_ms_t *ms, jpc_cstate_t *cstate,
jas_stream_t *out, int prtflag, jpc_coxcp_t *compparms);
/******************************************************************************\
* Global data.
\******************************************************************************/
static jpc_mstabent_t jpc_mstab[] = {
{JPC_MS_SOC, "SOC", {0, 0, 0, 0}},
{JPC_MS_SOT, "SOT", {0, jpc_sot_getparms, jpc_sot_putparms,
jpc_sot_dumpparms}},
{JPC_MS_SOD, "SOD", {0, 0, 0, 0}},
{JPC_MS_EOC, "EOC", {0, 0, 0, 0}},
{JPC_MS_SIZ, "SIZ", {jpc_siz_destroyparms, jpc_siz_getparms,
jpc_siz_putparms, jpc_siz_dumpparms}},
{JPC_MS_COD, "COD", {jpc_cod_destroyparms, jpc_cod_getparms,
jpc_cod_putparms, jpc_cod_dumpparms}},
{JPC_MS_COC, "COC", {jpc_coc_destroyparms, jpc_coc_getparms,
jpc_coc_putparms, jpc_coc_dumpparms}},
{JPC_MS_RGN, "RGN", {0, jpc_rgn_getparms, jpc_rgn_putparms,
jpc_rgn_dumpparms}},
{JPC_MS_QCD, "QCD", {jpc_qcd_destroyparms, jpc_qcd_getparms,
jpc_qcd_putparms, jpc_qcd_dumpparms}},
{JPC_MS_QCC, "QCC", {jpc_qcc_destroyparms, jpc_qcc_getparms,
jpc_qcc_putparms, jpc_qcc_dumpparms}},
{JPC_MS_POC, "POC", {jpc_poc_destroyparms, jpc_poc_getparms,
jpc_poc_putparms, jpc_poc_dumpparms}},
{JPC_MS_TLM, "TLM", {0, jpc_unk_getparms, jpc_unk_putparms, 0}},
{JPC_MS_PLM, "PLM", {0, jpc_unk_getparms, jpc_unk_putparms, 0}},
{JPC_MS_PPM, "PPM", {jpc_ppm_destroyparms, jpc_ppm_getparms,
jpc_ppm_putparms, jpc_ppm_dumpparms}},
{JPC_MS_PPT, "PPT", {jpc_ppt_destroyparms, jpc_ppt_getparms,
jpc_ppt_putparms, jpc_ppt_dumpparms}},
{JPC_MS_SOP, "SOP", {0, jpc_sop_getparms, jpc_sop_putparms,
jpc_sop_dumpparms}},
{JPC_MS_EPH, "EPH", {0, 0, 0, 0}},
{JPC_MS_CRG, "CRG", {jpc_crg_destroyparms, jpc_crg_getparms,
jpc_crg_putparms, jpc_crg_dumpparms}},
{JPC_MS_COM, "COM", {jpc_com_destroyparms, jpc_com_getparms,
jpc_com_putparms, jpc_com_dumpparms}},
{-1, "UNKNOWN", {jpc_unk_destroyparms, jpc_unk_getparms,
jpc_unk_putparms, jpc_unk_dumpparms}}
};
/******************************************************************************\
* Code stream manipulation functions.
\******************************************************************************/
/* Create a code stream state object. */
jpc_cstate_t *jpc_cstate_create()
{
jpc_cstate_t *cstate;
if (!(cstate = jas_malloc(sizeof(jpc_cstate_t)))) {
return 0;
}
cstate->numcomps = 0;
return cstate;
}
/* Destroy a code stream state object. */
void jpc_cstate_destroy(jpc_cstate_t *cstate)
{
jas_free(cstate);
}
/* Read a marker segment from a stream. */
jpc_ms_t *jpc_getms(jas_stream_t *in, jpc_cstate_t *cstate)
{
jpc_ms_t *ms;
jpc_mstabent_t *mstabent;
jas_stream_t *tmpstream;
if (!(ms = jpc_ms_create(0))) {
return 0;
}
/* Get the marker type. */
if (jpc_getuint16(in, &ms->id) || ms->id < JPC_MS_MIN ||
ms->id > JPC_MS_MAX) {
jpc_ms_destroy(ms);
return 0;
}
mstabent = jpc_mstab_lookup(ms->id);
ms->ops = &mstabent->ops;
/* Get the marker segment length and parameters if present. */
/* Note: It is tacitly assumed that a marker segment cannot have
parameters unless it has a length field. That is, there cannot
be a parameters field without a length field and vice versa. */
if (JPC_MS_HASPARMS(ms->id)) {
/* Get the length of the marker segment. */
if (jpc_getuint16(in, &ms->len) || ms->len < 3) {
jpc_ms_destroy(ms);
return 0;
}
/* Calculate the length of the marker segment parameters. */
ms->len -= 2;
/* Create and prepare a temporary memory stream from which to
read the marker segment parameters. */
/* Note: This approach provides a simple way of ensuring that
we never read beyond the end of the marker segment (even if
the marker segment length is errantly set too small). */
if (!(tmpstream = jas_stream_memopen(0, 0))) {
jpc_ms_destroy(ms);
return 0;
}
if (jas_stream_copy(tmpstream, in, ms->len) ||
jas_stream_seek(tmpstream, 0, SEEK_SET) < 0) {
jas_stream_close(tmpstream);
jpc_ms_destroy(ms);
return 0;
}
/* Get the marker segment parameters. */
if ((*ms->ops->getparms)(ms, cstate, tmpstream)) {
ms->ops = 0;
jpc_ms_destroy(ms);
jas_stream_close(tmpstream);
return 0;
}
if (jas_getdbglevel() > 0) {
jpc_ms_dump(ms, stderr);
}
if (JAS_CAST(ulong, jas_stream_tell(tmpstream)) != ms->len) {
jas_eprintf("warning: trailing garbage in marker segment (%ld bytes)\n",
ms->len - jas_stream_tell(tmpstream));
}
/* Close the temporary stream. */
jas_stream_close(tmpstream);
} else {
/* There are no marker segment parameters. */
ms->len = 0;
if (jas_getdbglevel() > 0) {
jpc_ms_dump(ms, stderr);
}
}
/* Update the code stream state information based on the type of
marker segment read. */
/* Note: This is a bit of a hack, but I'm not going to define another
type of virtual function for this one special case. */
if (ms->id == JPC_MS_SIZ) {
cstate->numcomps = ms->parms.siz.numcomps;
}
return ms;
}
/* Write a marker segment to a stream. */
int jpc_putms(jas_stream_t *out, jpc_cstate_t *cstate, jpc_ms_t *ms)
{
jas_stream_t *tmpstream;
int len;
/* Output the marker segment type. */
if (jpc_putuint16(out, ms->id)) {
return -1;
}
/* Output the marker segment length and parameters if necessary. */
if (ms->ops->putparms) {
/* Create a temporary stream in which to buffer the
parameter data. */
if (!(tmpstream = jas_stream_memopen(0, 0))) {
return -1;
}
if ((*ms->ops->putparms)(ms, cstate, tmpstream)) {
jas_stream_close(tmpstream);
return -1;
}
/* Get the number of bytes of parameter data written. */
if ((len = jas_stream_tell(tmpstream)) < 0) {
jas_stream_close(tmpstream);
return -1;
}
ms->len = len;
/* Write the marker segment length and parameter data to
the output stream. */
if (jas_stream_seek(tmpstream, 0, SEEK_SET) < 0 ||
jpc_putuint16(out, ms->len + 2) ||
jas_stream_copy(out, tmpstream, ms->len) < 0) {
jas_stream_close(tmpstream);
return -1;
}
/* Close the temporary stream. */
jas_stream_close(tmpstream);
}
/* This is a bit of a hack, but I'm not going to define another
type of virtual function for this one special case. */
if (ms->id == JPC_MS_SIZ) {
cstate->numcomps = ms->parms.siz.numcomps;
}
if (jas_getdbglevel() > 0) {
jpc_ms_dump(ms, stderr);
}
return 0;
}
/******************************************************************************\
* Marker segment operations.
\******************************************************************************/
/* Create a marker segment of the specified type. */
jpc_ms_t *jpc_ms_create(int type)
{
jpc_ms_t *ms;
jpc_mstabent_t *mstabent;
if (!(ms = jas_malloc(sizeof(jpc_ms_t)))) {
return 0;
}
ms->id = type;
ms->len = 0;
mstabent = jpc_mstab_lookup(ms->id);
ms->ops = &mstabent->ops;
memset(&ms->parms, 0, sizeof(jpc_msparms_t));
return ms;
}
/* Destroy a marker segment. */
void jpc_ms_destroy(jpc_ms_t *ms)
{
if (ms->ops && ms->ops->destroyparms) {
(*ms->ops->destroyparms)(ms);
}
jas_free(ms);
}
/* Dump a marker segment to a stream for debugging. */
void jpc_ms_dump(jpc_ms_t *ms, FILE *out)
{
jpc_mstabent_t *mstabent;
mstabent = jpc_mstab_lookup(ms->id);
fprintf(out, "type = 0x%04"PRIxFAST16" (%s);", ms->id, mstabent->name);
if (JPC_MS_HASPARMS(ms->id)) {
fprintf(out, " len = %"PRIuFAST16";", ms->len + 2);
if (ms->ops->dumpparms) {
(*ms->ops->dumpparms)(ms, out);
} else {
fprintf(out, "\n");
}
} else {
fprintf(out, "\n");
}
}
/******************************************************************************\
* SOT marker segment operations.
\******************************************************************************/
static int jpc_sot_getparms(jpc_ms_t *ms, jpc_cstate_t *cstate, jas_stream_t *in)
{
jpc_sot_t *sot = &ms->parms.sot;
/* Eliminate compiler warning about unused variables. */
cstate = 0;
if (jpc_getuint16(in, &sot->tileno) ||
jpc_getuint32(in, &sot->len) ||
jpc_getuint8(in, &sot->partno) ||
jpc_getuint8(in, &sot->numparts)) {
return -1;
}
if (sot->tileno > 65534 || sot->len < 12 || sot->partno > 254 ||
sot->numparts < 1 || sot->numparts > 255) {
return -1;
}
if (jas_stream_eof(in)) {
return -1;
}
return 0;
}
static int jpc_sot_putparms(jpc_ms_t *ms, jpc_cstate_t *cstate, jas_stream_t *out)
{
jpc_sot_t *sot = &ms->parms.sot;
/* Eliminate compiler warning about unused variables. */
cstate = 0;
if (jpc_putuint16(out, sot->tileno) ||
jpc_putuint32(out, sot->len) ||
jpc_putuint8(out, sot->partno) ||
jpc_putuint8(out, sot->numparts)) {
return -1;
}
return 0;
}
static int jpc_sot_dumpparms(jpc_ms_t *ms, FILE *out)
{
jpc_sot_t *sot = &ms->parms.sot;
fprintf(out,
"tileno = %"PRIuFAST16"; len = %"PRIuFAST32"; partno = %d; numparts = %d\n",
sot->tileno, sot->len, sot->partno, sot->numparts);
return 0;
}
/******************************************************************************\
* SIZ marker segment operations.
\******************************************************************************/
static void jpc_siz_destroyparms(jpc_ms_t *ms)
{
jpc_siz_t *siz = &ms->parms.siz;
if (siz->comps) {
jas_free(siz->comps);
}
}
static int jpc_siz_getparms(jpc_ms_t *ms, jpc_cstate_t *cstate,
jas_stream_t *in)
{
jpc_siz_t *siz = &ms->parms.siz;
unsigned int i;
uint_fast8_t tmp;
/* Eliminate compiler warning about unused variables. */
cstate = 0;
if (jpc_getuint16(in, &siz->caps) ||
jpc_getuint32(in, &siz->width) ||
jpc_getuint32(in, &siz->height) ||
jpc_getuint32(in, &siz->xoff) ||
jpc_getuint32(in, &siz->yoff) ||
jpc_getuint32(in, &siz->tilewidth) ||
jpc_getuint32(in, &siz->tileheight) ||
jpc_getuint32(in, &siz->tilexoff) ||
jpc_getuint32(in, &siz->tileyoff) ||
jpc_getuint16(in, &siz->numcomps)) {
return -1;
}
if (!siz->width || !siz->height || !siz->tilewidth ||
!siz->tileheight || !siz->numcomps || siz->numcomps > 16384) {
return -1;
}
if (!(siz->comps = jas_alloc2(siz->numcomps, sizeof(jpc_sizcomp_t)))) {
return -1;
}
for (i = 0; i < siz->numcomps; ++i) {
if (jpc_getuint8(in, &tmp) ||
jpc_getuint8(in, &siz->comps[i].hsamp) ||
jpc_getuint8(in, &siz->comps[i].vsamp)) {
jas_free(siz->comps);
return -1;
}
if (siz->comps[i].hsamp == 0 || siz->comps[i].hsamp > 255) {
jas_eprintf("invalid XRsiz value %d\n", siz->comps[i].hsamp);
jas_free(siz->comps);
return -1;
}
if (siz->comps[i].vsamp == 0 || siz->comps[i].vsamp > 255) {
jas_eprintf("invalid YRsiz value %d\n", siz->comps[i].vsamp);
jas_free(siz->comps);
return -1;
}
siz->comps[i].sgnd = (tmp >> 7) & 1;
siz->comps[i].prec = (tmp & 0x7f) + 1;
}
if (jas_stream_eof(in)) {
jas_free(siz->comps);
return -1;
}
return 0;
}
static int jpc_siz_putparms(jpc_ms_t *ms, jpc_cstate_t *cstate, jas_stream_t *out)
{
jpc_siz_t *siz = &ms->parms.siz;
unsigned int i;
/* Eliminate compiler warning about unused variables. */
cstate = 0;
assert(siz->width && siz->height && siz->tilewidth &&
siz->tileheight && siz->numcomps);
if (jpc_putuint16(out, siz->caps) ||
jpc_putuint32(out, siz->width) ||
jpc_putuint32(out, siz->height) ||
jpc_putuint32(out, siz->xoff) ||
jpc_putuint32(out, siz->yoff) ||
jpc_putuint32(out, siz->tilewidth) ||
jpc_putuint32(out, siz->tileheight) ||
jpc_putuint32(out, siz->tilexoff) ||
jpc_putuint32(out, siz->tileyoff) ||
jpc_putuint16(out, siz->numcomps)) {
return -1;
}
for (i = 0; i < siz->numcomps; ++i) {
if (jpc_putuint8(out, ((siz->comps[i].sgnd & 1) << 7) |
((siz->comps[i].prec - 1) & 0x7f)) ||
jpc_putuint8(out, siz->comps[i].hsamp) ||
jpc_putuint8(out, siz->comps[i].vsamp)) {
return -1;
}
}
return 0;
}
static int jpc_siz_dumpparms(jpc_ms_t *ms, FILE *out)
{
jpc_siz_t *siz = &ms->parms.siz;
unsigned int i;
fprintf(out, "caps = 0x%02"PRIxFAST16";\n", siz->caps);
fprintf(out, "width = %"PRIuFAST32"; height = %"PRIuFAST32"; xoff = %"PRIuFAST32"; yoff = %" PRIuFAST32 ";\n",
siz->width, siz->height, siz->xoff, siz->yoff);
fprintf(out, "tilewidth = %"PRIuFAST32"; tileheight = %"PRIuFAST32"; "
"tilexoff = %"PRIuFAST32"; tileyoff = %" PRIuFAST32 ";\n",
siz->tilewidth, siz->tileheight, siz->tilexoff, siz->tileyoff);
fprintf(out, "numcomps = %"PRIuFAST16";\n", siz->numcomps);
for (i = 0; i < siz->numcomps; ++i) {
fprintf(out, "prec[%d] = %d; sgnd[%d] = %d; hsamp[%d] = %d; "
"vsamp[%d] = %d\n", i, siz->comps[i].prec, i,
siz->comps[i].sgnd, i, siz->comps[i].hsamp, i,
siz->comps[i].vsamp);
}
return 0;
}
/******************************************************************************\
* COD marker segment operations.
\******************************************************************************/
static void jpc_cod_destroyparms(jpc_ms_t *ms)
{
jpc_cod_t *cod = &ms->parms.cod;
jpc_cox_destroycompparms(&cod->compparms);
}
static int jpc_cod_getparms(jpc_ms_t *ms, jpc_cstate_t *cstate, jas_stream_t *in)
{
jpc_cod_t *cod = &ms->parms.cod;
if (jpc_getuint8(in, &cod->csty)) {
return -1;
}
if (jpc_getuint8(in, &cod->prg) ||
jpc_getuint16(in, &cod->numlyrs) ||
jpc_getuint8(in, &cod->mctrans)) {
return -1;
}
if (cod->numlyrs < 1 || cod->numlyrs > 65535) {
return -1;
}
if (jpc_cox_getcompparms(ms, cstate, in,
(cod->csty & JPC_COX_PRT) != 0, &cod->compparms)) {
return -1;
}
if (jas_stream_eof(in)) {
jpc_cod_destroyparms(ms);
return -1;
}
return 0;
}
static int jpc_cod_putparms(jpc_ms_t *ms, jpc_cstate_t *cstate, jas_stream_t *out)
{
jpc_cod_t *cod = &ms->parms.cod;
assert(cod->numlyrs > 0 && cod->compparms.numdlvls <= 32);
assert(cod->compparms.numdlvls == cod->compparms.numrlvls - 1);
if (jpc_putuint8(out, cod->compparms.csty) ||
jpc_putuint8(out, cod->prg) ||
jpc_putuint16(out, cod->numlyrs) ||
jpc_putuint8(out, cod->mctrans)) {
return -1;
}
if (jpc_cox_putcompparms(ms, cstate, out,
(cod->csty & JPC_COX_PRT) != 0, &cod->compparms)) {
return -1;
}
return 0;
}
static int jpc_cod_dumpparms(jpc_ms_t *ms, FILE *out)
{
jpc_cod_t *cod = &ms->parms.cod;
int i;
fprintf(out, "csty = 0x%02x;\n", cod->compparms.csty);
fprintf(out, "numdlvls = %d; qmfbid = %d; mctrans = %d\n",
cod->compparms.numdlvls, cod->compparms.qmfbid, cod->mctrans);
fprintf(out, "prg = %d; numlyrs = %"PRIuFAST16";\n",
cod->prg, cod->numlyrs);
fprintf(out, "cblkwidthval = %d; cblkheightval = %d; "
"cblksty = 0x%02x;\n", cod->compparms.cblkwidthval, cod->compparms.cblkheightval,
cod->compparms.cblksty);
if (cod->csty & JPC_COX_PRT) {
for (i = 0; i < cod->compparms.numrlvls; ++i) {
jas_eprintf("prcwidth[%d] = %d, prcheight[%d] = %d\n",
i, cod->compparms.rlvls[i].parwidthval,
i, cod->compparms.rlvls[i].parheightval);
}
}
return 0;
}
/******************************************************************************\
* COC marker segment operations.
\******************************************************************************/
static void jpc_coc_destroyparms(jpc_ms_t *ms)
{
jpc_coc_t *coc = &ms->parms.coc;
jpc_cox_destroycompparms(&coc->compparms);
}
static int jpc_coc_getparms(jpc_ms_t *ms, jpc_cstate_t *cstate, jas_stream_t *in)
{
jpc_coc_t *coc = &ms->parms.coc;
uint_fast8_t tmp;
if (cstate->numcomps <= 256) {
if (jpc_getuint8(in, &tmp)) {
return -1;
}
coc->compno = tmp;
} else {
if (jpc_getuint16(in, &coc->compno)) {
return -1;
}
}
if (jpc_getuint8(in, &coc->compparms.csty)) {
return -1;
}
if (jpc_cox_getcompparms(ms, cstate, in,
(coc->compparms.csty & JPC_COX_PRT) != 0, &coc->compparms)) {
return -1;
}
if (jas_stream_eof(in)) {
return -1;
}
return 0;
}
static int jpc_coc_putparms(jpc_ms_t *ms, jpc_cstate_t *cstate, jas_stream_t *out)
{
jpc_coc_t *coc = &ms->parms.coc;
assert(coc->compparms.numdlvls <= 32);
if (cstate->numcomps <= 256) {
if (jpc_putuint8(out, coc->compno)) {
return -1;
}
} else {
if (jpc_putuint16(out, coc->compno)) {
return -1;
}
}
if (jpc_putuint8(out, coc->compparms.csty)) {
return -1;
}
if (jpc_cox_putcompparms(ms, cstate, out,
(coc->compparms.csty & JPC_COX_PRT) != 0, &coc->compparms)) {
return -1;
}
return 0;
}
static int jpc_coc_dumpparms(jpc_ms_t *ms, FILE *out)
{
jpc_coc_t *coc = &ms->parms.coc;
fprintf(out, "compno = %"PRIuFAST16"; csty = 0x%02x; numdlvls = %d;\n",
coc->compno, coc->compparms.csty, coc->compparms.numdlvls);
fprintf(out, "cblkwidthval = %d; cblkheightval = %d; "
"cblksty = 0x%02x; qmfbid = %d;\n", coc->compparms.cblkwidthval,
coc->compparms.cblkheightval, coc->compparms.cblksty, coc->compparms.qmfbid);
return 0;
}
/******************************************************************************\
* COD/COC marker segment operation helper functions.
\******************************************************************************/
static void jpc_cox_destroycompparms(jpc_coxcp_t *compparms)
{
/* Eliminate compiler warning about unused variables. */
compparms = 0;
}
static int jpc_cox_getcompparms(jpc_ms_t *ms, jpc_cstate_t *cstate,
jas_stream_t *in, int prtflag, jpc_coxcp_t *compparms)
{
uint_fast8_t tmp;
int i;
/* Eliminate compiler warning about unused variables. */
ms = 0;
cstate = 0;
if (jpc_getuint8(in, &compparms->numdlvls) ||
jpc_getuint8(in, &compparms->cblkwidthval) ||
jpc_getuint8(in, &compparms->cblkheightval) ||
jpc_getuint8(in, &compparms->cblksty) ||
jpc_getuint8(in, &compparms->qmfbid)) {
return -1;
}
if (compparms->numdlvls > 32) {
goto error;
}
compparms->numrlvls = compparms->numdlvls + 1;
if (compparms->numrlvls > JPC_MAXRLVLS) {
goto error;
}
if (prtflag) {
for (i = 0; i < compparms->numrlvls; ++i) {
if (jpc_getuint8(in, &tmp)) {
goto error;
}
compparms->rlvls[i].parwidthval = tmp & 0xf;
compparms->rlvls[i].parheightval = (tmp >> 4) & 0xf;
}
/* Sigh.
This bit should be in the same field in both COC and COD mrk segs. */
compparms->csty |= JPC_COX_PRT;
}
if (jas_stream_eof(in)) {
goto error;
}
return 0;
error:
if (compparms) {
jpc_cox_destroycompparms(compparms);
}
return -1;
}
static int jpc_cox_putcompparms(jpc_ms_t *ms, jpc_cstate_t *cstate,
jas_stream_t *out, int prtflag, jpc_coxcp_t *compparms)
{
int i;
assert(compparms->numdlvls <= 32);
/* Eliminate compiler warning about unused variables. */
ms = 0;
cstate = 0;
if (jpc_putuint8(out, compparms->numdlvls) ||
jpc_putuint8(out, compparms->cblkwidthval) ||
jpc_putuint8(out, compparms->cblkheightval) ||
jpc_putuint8(out, compparms->cblksty) ||
jpc_putuint8(out, compparms->qmfbid)) {
return -1;
}
if (prtflag) {
for (i = 0; i < compparms->numrlvls; ++i) {
if (jpc_putuint8(out,
((compparms->rlvls[i].parheightval & 0xf) << 4) |
(compparms->rlvls[i].parwidthval & 0xf))) {
return -1;
}
}
}
return 0;
}
/******************************************************************************\
* RGN marker segment operations.
\******************************************************************************/
static int jpc_rgn_getparms(jpc_ms_t *ms, jpc_cstate_t *cstate, jas_stream_t *in)
{
jpc_rgn_t *rgn = &ms->parms.rgn;
uint_fast8_t tmp;
if (cstate->numcomps <= 256) {
if (jpc_getuint8(in, &tmp)) {
return -1;
}
rgn->compno = tmp;
} else {
if (jpc_getuint16(in, &rgn->compno)) {
return -1;
}
}
if (jpc_getuint8(in, &rgn->roisty) ||
jpc_getuint8(in, &rgn->roishift)) {
return -1;
}
return 0;
}
static int jpc_rgn_putparms(jpc_ms_t *ms, jpc_cstate_t *cstate, jas_stream_t *out)
{
jpc_rgn_t *rgn = &ms->parms.rgn;
if (cstate->numcomps <= 256) {
if (jpc_putuint8(out, rgn->compno)) {
return -1;
}
} else {
if (jpc_putuint16(out, rgn->compno)) {
return -1;
}
}
if (jpc_putuint8(out, rgn->roisty) ||
jpc_putuint8(out, rgn->roishift)) {
return -1;
}
return 0;
}
static int jpc_rgn_dumpparms(jpc_ms_t *ms, FILE *out)
{
jpc_rgn_t *rgn = &ms->parms.rgn;
fprintf(out, "compno = %"PRIuFAST16"; roisty = %d; roishift = %d\n",
rgn->compno, rgn->roisty, rgn->roishift);
return 0;
}
/******************************************************************************\
* QCD marker segment operations.
\******************************************************************************/
static void jpc_qcd_destroyparms(jpc_ms_t *ms)
{
jpc_qcd_t *qcd = &ms->parms.qcd;
jpc_qcx_destroycompparms(&qcd->compparms);
}
static int jpc_qcd_getparms(jpc_ms_t *ms, jpc_cstate_t *cstate, jas_stream_t *in)
{
jpc_qcxcp_t *compparms = &ms->parms.qcd.compparms;
return jpc_qcx_getcompparms(compparms, cstate, in, ms->len);
}
static int jpc_qcd_putparms(jpc_ms_t *ms, jpc_cstate_t *cstate, jas_stream_t *out)
{
jpc_qcxcp_t *compparms = &ms->parms.qcd.compparms;
return jpc_qcx_putcompparms(compparms, cstate, out);
}
static int jpc_qcd_dumpparms(jpc_ms_t *ms, FILE *out)
{
jpc_qcd_t *qcd = &ms->parms.qcd;
int i;
fprintf(out, "qntsty = %d; numguard = %d; numstepsizes = %d\n",
(int) qcd->compparms.qntsty, qcd->compparms.numguard, qcd->compparms.numstepsizes);
for (i = 0; i < qcd->compparms.numstepsizes; ++i) {
fprintf(out, "expn[%d] = 0x%04x; mant[%d] = 0x%04x;\n",
i, JAS_CAST(unsigned, JPC_QCX_GETEXPN(qcd->compparms.stepsizes[i])),
i, JAS_CAST(unsigned, JPC_QCX_GETMANT(qcd->compparms.stepsizes[i])));
}
return 0;
}
/******************************************************************************\
* QCC marker segment operations.
\******************************************************************************/
static void jpc_qcc_destroyparms(jpc_ms_t *ms)
{
jpc_qcc_t *qcc = &ms->parms.qcc;
jpc_qcx_destroycompparms(&qcc->compparms);
}
static int jpc_qcc_getparms(jpc_ms_t *ms, jpc_cstate_t *cstate, jas_stream_t *in)
{
jpc_qcc_t *qcc = &ms->parms.qcc;
uint_fast8_t tmp;
int len;
len = ms->len;
if (cstate->numcomps <= 256) {
if (jpc_getuint8(in, &tmp)) {
return -1;
}
qcc->compno = tmp;
--len;
} else {
if (jpc_getuint16(in, &qcc->compno)) {
return -1;
}
len -= 2;
}
if (jpc_qcx_getcompparms(&qcc->compparms, cstate, in, len)) {
return -1;
}
if (jas_stream_eof(in)) {
jpc_qcc_destroyparms(ms);
return -1;
}
return 0;
}
static int jpc_qcc_putparms(jpc_ms_t *ms, jpc_cstate_t *cstate, jas_stream_t *out)
{
jpc_qcc_t *qcc = &ms->parms.qcc;
if (cstate->numcomps <= 256) {
if (jpc_putuint8(out, qcc->compno)) {
return -1;
}
} else {
if (jpc_putuint16(out, qcc->compno)) {
return -1;
}
}
if (jpc_qcx_putcompparms(&qcc->compparms, cstate, out)) {
return -1;
}
return 0;
}
static int jpc_qcc_dumpparms(jpc_ms_t *ms, FILE *out)
{
jpc_qcc_t *qcc = &ms->parms.qcc;
int i;
fprintf(out, "compno = %"PRIuFAST16"; qntsty = %d; numguard = %d; "
"numstepsizes = %d\n", qcc->compno, qcc->compparms.qntsty, qcc->compparms.numguard,
qcc->compparms.numstepsizes);
for (i = 0; i < qcc->compparms.numstepsizes; ++i) {
fprintf(out, "expn[%d] = 0x%04x; mant[%d] = 0x%04x;\n",
i, (unsigned) JPC_QCX_GETEXPN(qcc->compparms.stepsizes[i]),
i, (unsigned) JPC_QCX_GETMANT(qcc->compparms.stepsizes[i]));
}
return 0;
}
/******************************************************************************\
* QCD/QCC marker segment helper functions.
\******************************************************************************/
static void jpc_qcx_destroycompparms(jpc_qcxcp_t *compparms)
{
if (compparms->stepsizes) {
jas_free(compparms->stepsizes);
}
}
static int jpc_qcx_getcompparms(jpc_qcxcp_t *compparms, jpc_cstate_t *cstate,
jas_stream_t *in, uint_fast16_t len)
{
uint_fast8_t tmp;
int n;
int i;
/* Eliminate compiler warning about unused variables. */
cstate = 0;
n = 0;
if (jpc_getuint8(in, &tmp)) {
return -1;
}
++n;
compparms->qntsty = tmp & 0x1f;
compparms->numguard = (tmp >> 5) & 7;
switch (compparms->qntsty) {
case JPC_QCX_SIQNT:
compparms->numstepsizes = 1;
break;
case JPC_QCX_NOQNT:
compparms->numstepsizes = (len - n);
break;
case JPC_QCX_SEQNT:
/* XXX - this is a hack */
compparms->numstepsizes = (len - n) / 2;
break;
}
/* Ensure that the step size array is sufficiently large. */
if (compparms->numstepsizes > 3 * JPC_MAXRLVLS + 1) {
jpc_qcx_destroycompparms(compparms);
return -1;
}
if (compparms->numstepsizes > 0) {
if (!(compparms->stepsizes = jas_alloc2(compparms->numstepsizes,
sizeof(uint_fast16_t)))) {
abort();
}
for (i = 0; i < compparms->numstepsizes; ++i) {
if (compparms->qntsty == JPC_QCX_NOQNT) {
if (jpc_getuint8(in, &tmp)) {
return -1;
}
compparms->stepsizes[i] = JPC_QCX_EXPN(tmp >> 3);
} else {
if (jpc_getuint16(in, &compparms->stepsizes[i])) {
return -1;
}
}
}
} else {
compparms->stepsizes = 0;
}
if (jas_stream_error(in) || jas_stream_eof(in)) {
jpc_qcx_destroycompparms(compparms);
return -1;
}
return 0;
}
static int jpc_qcx_putcompparms(jpc_qcxcp_t *compparms, jpc_cstate_t *cstate,
jas_stream_t *out)
{
int i;
/* Eliminate compiler warning about unused variables. */
cstate = 0;
jpc_putuint8(out, ((compparms->numguard & 7) << 5) | compparms->qntsty);
for (i = 0; i < compparms->numstepsizes; ++i) {
if (compparms->qntsty == JPC_QCX_NOQNT) {
if (jpc_putuint8(out, JPC_QCX_GETEXPN(
compparms->stepsizes[i]) << 3)) {
return -1;
}
} else {
if (jpc_putuint16(out, compparms->stepsizes[i])) {
return -1;
}
}
}
return 0;
}
/******************************************************************************\
* SOP marker segment operations.
\******************************************************************************/
static int jpc_sop_getparms(jpc_ms_t *ms, jpc_cstate_t *cstate, jas_stream_t *in)
{
jpc_sop_t *sop = &ms->parms.sop;
/* Eliminate compiler warning about unused variable. */
cstate = 0;
if (jpc_getuint16(in, &sop->seqno)) {
return -1;
}
return 0;
}
static int jpc_sop_putparms(jpc_ms_t *ms, jpc_cstate_t *cstate, jas_stream_t *out)
{
jpc_sop_t *sop = &ms->parms.sop;
/* Eliminate compiler warning about unused variable. */
cstate = 0;
if (jpc_putuint16(out, sop->seqno)) {
return -1;
}
return 0;
}
static int jpc_sop_dumpparms(jpc_ms_t *ms, FILE *out)
{
jpc_sop_t *sop = &ms->parms.sop;
fprintf(out, "seqno = %"PRIuFAST16";\n", sop->seqno);
return 0;
}
/******************************************************************************\
* PPM marker segment operations.
\******************************************************************************/
static void jpc_ppm_destroyparms(jpc_ms_t *ms)
{
jpc_ppm_t *ppm = &ms->parms.ppm;
if (ppm->data) {
jas_free(ppm->data);
}
}
static int jpc_ppm_getparms(jpc_ms_t *ms, jpc_cstate_t *cstate, jas_stream_t *in)
{
jpc_ppm_t *ppm = &ms->parms.ppm;
/* Eliminate compiler warning about unused variables. */
cstate = 0;
ppm->data = 0;
if (ms->len < 1) {
goto error;
}
if (jpc_getuint8(in, &ppm->ind)) {
goto error;
}
ppm->len = ms->len - 1;
if (ppm->len > 0) {
if (!(ppm->data = jas_malloc(ppm->len))) {
goto error;
}
if (JAS_CAST(uint, jas_stream_read(in, ppm->data, ppm->len)) != ppm->len) {
goto error;
}
} else {
ppm->data = 0;
}
return 0;
error:
jpc_ppm_destroyparms(ms);
return -1;
}
static int jpc_ppm_putparms(jpc_ms_t *ms, jpc_cstate_t *cstate, jas_stream_t *out)
{
jpc_ppm_t *ppm = &ms->parms.ppm;
/* Eliminate compiler warning about unused variables. */
cstate = 0;
if (JAS_CAST(uint, jas_stream_write(out, (char *) ppm->data, ppm->len)) != ppm->len) {
return -1;
}
return 0;
}
static int jpc_ppm_dumpparms(jpc_ms_t *ms, FILE *out)
{
jpc_ppm_t *ppm = &ms->parms.ppm;
fprintf(out, "ind=%d; len = %"PRIuFAST16";\n", ppm->ind, ppm->len);
if (ppm->len > 0) {
fprintf(out, "data =\n");
jas_memdump(out, ppm->data, ppm->len);
}
return 0;
}
/******************************************************************************\
* PPT marker segment operations.
\******************************************************************************/
static void jpc_ppt_destroyparms(jpc_ms_t *ms)
{
jpc_ppt_t *ppt = &ms->parms.ppt;
if (ppt->data) {
jas_free(ppt->data);
}
}
static int jpc_ppt_getparms(jpc_ms_t *ms, jpc_cstate_t *cstate, jas_stream_t *in)
{
jpc_ppt_t *ppt = &ms->parms.ppt;
/* Eliminate compiler warning about unused variables. */
cstate = 0;
ppt->data = 0;
if (ms->len < 1) {
goto error;
}
if (jpc_getuint8(in, &ppt->ind)) {
goto error;
}
ppt->len = ms->len - 1;
if (ppt->len > 0) {
if (!(ppt->data = jas_malloc(ppt->len))) {
goto error;
}
if (jas_stream_read(in, (char *) ppt->data, ppt->len) != JAS_CAST(int, ppt->len)) {
goto error;
}
} else {
ppt->data = 0;
}
return 0;
error:
jpc_ppt_destroyparms(ms);
return -1;
}
static int jpc_ppt_putparms(jpc_ms_t *ms, jpc_cstate_t *cstate, jas_stream_t *out)
{
jpc_ppt_t *ppt = &ms->parms.ppt;
/* Eliminate compiler warning about unused variable. */
cstate = 0;
if (jpc_putuint8(out, ppt->ind)) {
return -1;
}
if (jas_stream_write(out, (char *) ppt->data, ppt->len) != JAS_CAST(int, ppt->len)) {
return -1;
}
return 0;
}
static int jpc_ppt_dumpparms(jpc_ms_t *ms, FILE *out)
{
jpc_ppt_t *ppt = &ms->parms.ppt;
fprintf(out, "ind=%d; len = %"PRIuFAST32";\n", ppt->ind, ppt->len);
if (ppt->len > 0) {
fprintf(out, "data =\n");
jas_memdump(out, ppt->data, ppt->len);
}
return 0;
}
/******************************************************************************\
* POC marker segment operations.
\******************************************************************************/
static void jpc_poc_destroyparms(jpc_ms_t *ms)
{
jpc_poc_t *poc = &ms->parms.poc;
if (poc->pchgs) {
jas_free(poc->pchgs);
}
}
static int jpc_poc_getparms(jpc_ms_t *ms, jpc_cstate_t *cstate, jas_stream_t *in)
{
jpc_poc_t *poc = &ms->parms.poc;
jpc_pocpchg_t *pchg;
int pchgno;
uint_fast8_t tmp;
poc->numpchgs = (cstate->numcomps > 256) ? (ms->len / 9) :
(ms->len / 7);
if (!(poc->pchgs = jas_alloc2(poc->numpchgs, sizeof(jpc_pocpchg_t)))) {
goto error;
}
for (pchgno = 0, pchg = poc->pchgs; pchgno < poc->numpchgs; ++pchgno,
++pchg) {
if (jpc_getuint8(in, &pchg->rlvlnostart)) {
goto error;
}
if (cstate->numcomps > 256) {
if (jpc_getuint16(in, &pchg->compnostart)) {
goto error;
}
} else {
if (jpc_getuint8(in, &tmp)) {
goto error;
};
pchg->compnostart = tmp;
}
if (jpc_getuint16(in, &pchg->lyrnoend) ||
jpc_getuint8(in, &pchg->rlvlnoend)) {
goto error;
}
if (cstate->numcomps > 256) {
if (jpc_getuint16(in, &pchg->compnoend)) {
goto error;
}
} else {
if (jpc_getuint8(in, &tmp)) {
goto error;
}
pchg->compnoend = tmp;
}
if (jpc_getuint8(in, &pchg->prgord)) {
goto error;
}
if (pchg->rlvlnostart > pchg->rlvlnoend ||
pchg->compnostart > pchg->compnoend) {
goto error;
}
}
return 0;
error:
jpc_poc_destroyparms(ms);
return -1;
}
static int jpc_poc_putparms(jpc_ms_t *ms, jpc_cstate_t *cstate, jas_stream_t *out)
{
jpc_poc_t *poc = &ms->parms.poc;
jpc_pocpchg_t *pchg;
int pchgno;
for (pchgno = 0, pchg = poc->pchgs; pchgno < poc->numpchgs; ++pchgno,
++pchg) {
if (jpc_putuint8(out, pchg->rlvlnostart) ||
((cstate->numcomps > 256) ?
jpc_putuint16(out, pchg->compnostart) :
jpc_putuint8(out, pchg->compnostart)) ||
jpc_putuint16(out, pchg->lyrnoend) ||
jpc_putuint8(out, pchg->rlvlnoend) ||
((cstate->numcomps > 256) ?
jpc_putuint16(out, pchg->compnoend) :
jpc_putuint8(out, pchg->compnoend)) ||
jpc_putuint8(out, pchg->prgord)) {
return -1;
}
}
return 0;
}
static int jpc_poc_dumpparms(jpc_ms_t *ms, FILE *out)
{
jpc_poc_t *poc = &ms->parms.poc;
jpc_pocpchg_t *pchg;
int pchgno;
for (pchgno = 0, pchg = poc->pchgs; pchgno < poc->numpchgs;
++pchgno, ++pchg) {
fprintf(out, "po[%d] = %d; ", pchgno, pchg->prgord);
fprintf(out, "cs[%d] = %"PRIuFAST16"; ce[%d] = %"PRIuFAST16"; ",
pchgno, pchg->compnostart, pchgno, pchg->compnoend);
fprintf(out, "rs[%d] = %d; re[%d] = %d; ",
pchgno, pchg->rlvlnostart, pchgno, pchg->rlvlnoend);
fprintf(out, "le[%d] = %"PRIuFAST16"\n", pchgno, pchg->lyrnoend);
}
return 0;
}
/******************************************************************************\
* CRG marker segment operations.
\******************************************************************************/
static void jpc_crg_destroyparms(jpc_ms_t *ms)
{
jpc_crg_t *crg = &ms->parms.crg;
if (crg->comps) {
jas_free(crg->comps);
}
}
static int jpc_crg_getparms(jpc_ms_t *ms, jpc_cstate_t *cstate, jas_stream_t *in)
{
jpc_crg_t *crg = &ms->parms.crg;
jpc_crgcomp_t *comp;
uint_fast16_t compno;
crg->numcomps = cstate->numcomps;
if (!(crg->comps = jas_alloc2(cstate->numcomps, sizeof(jpc_crgcomp_t)))) {
return -1;
}
for (compno = 0, comp = crg->comps; compno < cstate->numcomps;
++compno, ++comp) {
if (jpc_getuint16(in, &comp->hoff) ||
jpc_getuint16(in, &comp->voff)) {
jpc_crg_destroyparms(ms);
return -1;
}
}
return 0;
}
static int jpc_crg_putparms(jpc_ms_t *ms, jpc_cstate_t *cstate, jas_stream_t *out)
{
jpc_crg_t *crg = &ms->parms.crg;
int compno;
jpc_crgcomp_t *comp;
/* Eliminate compiler warning about unused variables. */
cstate = 0;
for (compno = 0, comp = crg->comps; compno < crg->numcomps; ++compno,
++comp) {
if (jpc_putuint16(out, comp->hoff) ||
jpc_putuint16(out, comp->voff)) {
return -1;
}
}
return 0;
}
static int jpc_crg_dumpparms(jpc_ms_t *ms, FILE *out)
{
jpc_crg_t *crg = &ms->parms.crg;
int compno;
jpc_crgcomp_t *comp;
for (compno = 0, comp = crg->comps; compno < crg->numcomps; ++compno,
++comp) {
fprintf(out, "hoff[%d] = %"PRIuFAST16"; voff[%d] = %"PRIuFAST16"\n",
compno, comp->hoff, compno, comp->voff);
}
return 0;
}
/******************************************************************************\
* Operations for COM marker segment.
\******************************************************************************/
static void jpc_com_destroyparms(jpc_ms_t *ms)
{
jpc_com_t *com = &ms->parms.com;
if (com->data) {
jas_free(com->data);
}
}
static int jpc_com_getparms(jpc_ms_t *ms, jpc_cstate_t *cstate, jas_stream_t *in)
{
jpc_com_t *com = &ms->parms.com;
/* Eliminate compiler warning about unused variables. */
cstate = 0;
if (jpc_getuint16(in, &com->regid)) {
return -1;
}
com->len = ms->len - 2;
if (com->len > 0) {
if (!(com->data = jas_malloc(com->len))) {
return -1;
}
if (jas_stream_read(in, com->data, com->len) != JAS_CAST(int, com->len)) {
return -1;
}
} else {
com->data = 0;
}
return 0;
}
static int jpc_com_putparms(jpc_ms_t *ms, jpc_cstate_t *cstate, jas_stream_t *out)
{
jpc_com_t *com = &ms->parms.com;
/* Eliminate compiler warning about unused variables. */
cstate = 0;
if (jpc_putuint16(out, com->regid)) {
return -1;
}
if (jas_stream_write(out, com->data, com->len) != JAS_CAST(int, com->len)) {
return -1;
}
return 0;
}
static int jpc_com_dumpparms(jpc_ms_t *ms, FILE *out)
{
jpc_com_t *com = &ms->parms.com;
unsigned int i;
int printable;
fprintf(out, "regid = %"PRIuFAST16";\n", com->regid);
printable = 1;
for (i = 0; i < com->len; ++i) {
if (!isprint(com->data[i])) {
printable = 0;
break;
}
}
if (printable) {
fprintf(out, "data = ");
fwrite(com->data, sizeof(char), com->len, out);
fprintf(out, "\n");
}
return 0;
}
/******************************************************************************\
* Operations for unknown types of marker segments.
\******************************************************************************/
static void jpc_unk_destroyparms(jpc_ms_t *ms)
{
jpc_unk_t *unk = &ms->parms.unk;
if (unk->data) {
jas_free(unk->data);
}
}
static int jpc_unk_getparms(jpc_ms_t *ms, jpc_cstate_t *cstate, jas_stream_t *in)
{
jpc_unk_t *unk = &ms->parms.unk;
/* Eliminate compiler warning about unused variables. */
cstate = 0;
if (ms->len > 0) {
if (!(unk->data = jas_alloc2(ms->len, sizeof(unsigned char)))) {
return -1;
}
if (jas_stream_read(in, (char *) unk->data, ms->len) != JAS_CAST(int, ms->len)) {
jas_free(unk->data);
return -1;
}
unk->len = ms->len;
} else {
unk->data = 0;
unk->len = 0;
}
return 0;
}
static int jpc_unk_putparms(jpc_ms_t *ms, jpc_cstate_t *cstate, jas_stream_t *out)
{
/* Eliminate compiler warning about unused variables. */
cstate = 0;
ms = 0;
out = 0;
/* If this function is called, we are trying to write an unsupported
type of marker segment. Return with an error indication. */
return -1;
}
static int jpc_unk_dumpparms(jpc_ms_t *ms, FILE *out)
{
unsigned int i;
jpc_unk_t *unk = &ms->parms.unk;
for (i = 0; i < unk->len; ++i) {
fprintf(out, "%02x ", unk->data[i]);
}
return 0;
}
/******************************************************************************\
* Primitive I/O operations.
\******************************************************************************/
int jpc_getuint8(jas_stream_t *in, uint_fast8_t *val)
{
int c;
if ((c = jas_stream_getc(in)) == EOF) {
return -1;
}
if (val) {
*val = c;
}
return 0;
}
int jpc_putuint8(jas_stream_t *out, uint_fast8_t val)
{
if (jas_stream_putc(out, val & 0xff) == EOF) {
return -1;
}
return 0;
}
int jpc_getuint16(jas_stream_t *in, uint_fast16_t *val)
{
uint_fast16_t v;
int c;
if ((c = jas_stream_getc(in)) == EOF) {
return -1;
}
v = c;
if ((c = jas_stream_getc(in)) == EOF) {
return -1;
}
v = (v << 8) | c;
if (val) {
*val = v;
}
return 0;
}
int jpc_putuint16(jas_stream_t *out, uint_fast16_t val)
{
if (jas_stream_putc(out, (val >> 8) & 0xff) == EOF ||
jas_stream_putc(out, val & 0xff) == EOF) {
return -1;
}
return 0;
}
int jpc_getuint32(jas_stream_t *in, uint_fast32_t *val)
{
uint_fast32_t v;
int c;
if ((c = jas_stream_getc(in)) == EOF) {
return -1;
}
v = c;
if ((c = jas_stream_getc(in)) == EOF) {
return -1;
}
v = (v << 8) | c;
if ((c = jas_stream_getc(in)) == EOF) {
return -1;
}
v = (v << 8) | c;
if ((c = jas_stream_getc(in)) == EOF) {
return -1;
}
v = (v << 8) | c;
if (val) {
*val = v;
}
return 0;
}
int jpc_putuint32(jas_stream_t *out, uint_fast32_t val)
{
if (jas_stream_putc(out, (val >> 24) & 0xff) == EOF ||
jas_stream_putc(out, (val >> 16) & 0xff) == EOF ||
jas_stream_putc(out, (val >> 8) & 0xff) == EOF ||
jas_stream_putc(out, val & 0xff) == EOF) {
return -1;
}
return 0;
}
/******************************************************************************\
* Miscellany
\******************************************************************************/
static jpc_mstabent_t *jpc_mstab_lookup(int id)
{
jpc_mstabent_t *mstabent;
for (mstabent = jpc_mstab;; ++mstabent) {
if (mstabent->id == id || mstabent->id < 0) {
return mstabent;
}
}
assert(0);
return 0;
}
int jpc_validate(jas_stream_t *in)
{
int n;
int i;
unsigned char buf[2];
assert(JAS_STREAM_MAXPUTBACK >= 2);
if ((n = jas_stream_read(in, (char *) buf, 2)) < 0) {
return -1;
}
for (i = n - 1; i >= 0; --i) {
if (jas_stream_ungetc(in, buf[i]) == EOF) {
return -1;
}
}
if (n < 2) {
return -1;
}
if (buf[0] == (JPC_MS_SOC >> 8) && buf[1] == (JPC_MS_SOC & 0xff)) {
return 0;
}
return -1;
}
int jpc_getdata(jas_stream_t *in, jas_stream_t *out, long len)
{
return jas_stream_copy(out, in, len);
}
int jpc_putdata(jas_stream_t *out, jas_stream_t *in, long len)
{
return jas_stream_copy(out, in, len);
}
| ./CrossVul/dataset_final_sorted/CWE-20/c/bad_5415_0 |
crossvul-cpp_data_bad_3989_0 | /*
* Copyright (C) the libgit2 contributors. All rights reserved.
*
* This file is part of libgit2, distributed under the GNU GPL v2 with
* a Linking Exception. For full terms see the included COPYING file.
*/
#include "checkout.h"
#include "git2/repository.h"
#include "git2/refs.h"
#include "git2/tree.h"
#include "git2/blob.h"
#include "git2/config.h"
#include "git2/diff.h"
#include "git2/submodule.h"
#include "git2/sys/index.h"
#include "git2/sys/filter.h"
#include "git2/merge.h"
#include "refs.h"
#include "repository.h"
#include "index.h"
#include "filter.h"
#include "blob.h"
#include "diff.h"
#include "diff_generate.h"
#include "pathspec.h"
#include "buf_text.h"
#include "diff_xdiff.h"
#include "path.h"
#include "attr.h"
#include "pool.h"
#include "strmap.h"
/* See docs/checkout-internals.md for more information */
enum {
CHECKOUT_ACTION__NONE = 0,
CHECKOUT_ACTION__REMOVE = 1,
CHECKOUT_ACTION__UPDATE_BLOB = 2,
CHECKOUT_ACTION__UPDATE_SUBMODULE = 4,
CHECKOUT_ACTION__CONFLICT = 8,
CHECKOUT_ACTION__REMOVE_CONFLICT = 16,
CHECKOUT_ACTION__UPDATE_CONFLICT = 32,
CHECKOUT_ACTION__MAX = 32,
CHECKOUT_ACTION__DEFER_REMOVE = 64,
CHECKOUT_ACTION__REMOVE_AND_UPDATE =
(CHECKOUT_ACTION__UPDATE_BLOB | CHECKOUT_ACTION__REMOVE),
};
typedef struct {
git_repository *repo;
git_iterator *target;
git_diff *diff;
git_checkout_options opts;
bool opts_free_baseline;
char *pfx;
git_index *index;
git_pool pool;
git_vector removes;
git_vector remove_conflicts;
git_vector update_conflicts;
git_vector *update_reuc;
git_vector *update_names;
git_buf target_path;
size_t target_len;
git_buf tmp;
unsigned int strategy;
int can_symlink;
int respect_filemode;
bool reload_submodules;
size_t total_steps;
size_t completed_steps;
git_checkout_perfdata perfdata;
git_strmap *mkdir_map;
git_attr_session attr_session;
} checkout_data;
typedef struct {
const git_index_entry *ancestor;
const git_index_entry *ours;
const git_index_entry *theirs;
int name_collision:1,
directoryfile:1,
one_to_two:1,
binary:1,
submodule:1;
} checkout_conflictdata;
static int checkout_notify(
checkout_data *data,
git_checkout_notify_t why,
const git_diff_delta *delta,
const git_index_entry *wditem)
{
git_diff_file wdfile;
const git_diff_file *baseline = NULL, *target = NULL, *workdir = NULL;
const char *path = NULL;
if (!data->opts.notify_cb ||
(why & data->opts.notify_flags) == 0)
return 0;
if (wditem) {
memset(&wdfile, 0, sizeof(wdfile));
git_oid_cpy(&wdfile.id, &wditem->id);
wdfile.path = wditem->path;
wdfile.size = wditem->file_size;
wdfile.flags = GIT_DIFF_FLAG_VALID_ID;
wdfile.mode = wditem->mode;
workdir = &wdfile;
path = wditem->path;
}
if (delta) {
switch (delta->status) {
case GIT_DELTA_UNMODIFIED:
case GIT_DELTA_MODIFIED:
case GIT_DELTA_TYPECHANGE:
default:
baseline = &delta->old_file;
target = &delta->new_file;
break;
case GIT_DELTA_ADDED:
case GIT_DELTA_IGNORED:
case GIT_DELTA_UNTRACKED:
case GIT_DELTA_UNREADABLE:
target = &delta->new_file;
break;
case GIT_DELTA_DELETED:
baseline = &delta->old_file;
break;
}
path = delta->old_file.path;
}
{
int error = data->opts.notify_cb(
why, path, baseline, target, workdir, data->opts.notify_payload);
return git_error_set_after_callback_function(
error, "git_checkout notification");
}
}
GIT_INLINE(bool) is_workdir_base_or_new(
const git_oid *workdir_id,
const git_diff_file *baseitem,
const git_diff_file *newitem)
{
return (git_oid__cmp(&baseitem->id, workdir_id) == 0 ||
git_oid__cmp(&newitem->id, workdir_id) == 0);
}
GIT_INLINE(bool) is_filemode_changed(git_filemode_t a, git_filemode_t b, int respect_filemode)
{
/* If core.filemode = false, ignore links in the repository and executable bit changes */
if (!respect_filemode) {
if (a == S_IFLNK)
a = GIT_FILEMODE_BLOB;
if (b == S_IFLNK)
b = GIT_FILEMODE_BLOB;
a &= ~0111;
b &= ~0111;
}
return (a != b);
}
static bool checkout_is_workdir_modified(
checkout_data *data,
const git_diff_file *baseitem,
const git_diff_file *newitem,
const git_index_entry *wditem)
{
git_oid oid;
const git_index_entry *ie;
/* handle "modified" submodule */
if (wditem->mode == GIT_FILEMODE_COMMIT) {
git_submodule *sm;
unsigned int sm_status = 0;
const git_oid *sm_oid = NULL;
bool rval = false;
if (git_submodule_lookup(&sm, data->repo, wditem->path) < 0) {
git_error_clear();
return true;
}
if (git_submodule_status(&sm_status, data->repo, wditem->path, GIT_SUBMODULE_IGNORE_UNSPECIFIED) < 0 ||
GIT_SUBMODULE_STATUS_IS_WD_DIRTY(sm_status))
rval = true;
else if ((sm_oid = git_submodule_wd_id(sm)) == NULL)
rval = false;
else
rval = (git_oid__cmp(&baseitem->id, sm_oid) != 0);
git_submodule_free(sm);
return rval;
}
/*
* Look at the cache to decide if the workdir is modified: if the
* cache contents match the workdir contents, then we do not need
* to examine the working directory directly, instead we can
* examine the cache to see if _it_ has been modified. This allows
* us to avoid touching the disk.
*/
ie = git_index_get_bypath(data->index, wditem->path, 0);
if (ie != NULL &&
git_index_time_eq(&wditem->mtime, &ie->mtime) &&
wditem->file_size == ie->file_size &&
!is_filemode_changed(wditem->mode, ie->mode, data->respect_filemode)) {
/* The workdir is modified iff the index entry is modified */
return !is_workdir_base_or_new(&ie->id, baseitem, newitem) ||
is_filemode_changed(baseitem->mode, ie->mode, data->respect_filemode);
}
/* depending on where base is coming from, we may or may not know
* the actual size of the data, so we can't rely on this shortcut.
*/
if (baseitem->size && wditem->file_size != baseitem->size)
return true;
/* if the workdir item is a directory, it cannot be a modified file */
if (S_ISDIR(wditem->mode))
return false;
if (is_filemode_changed(baseitem->mode, wditem->mode, data->respect_filemode))
return true;
if (git_diff__oid_for_entry(&oid, data->diff, wditem, wditem->mode, NULL) < 0)
return false;
/* Allow the checkout if the workdir is not modified *or* if the checkout
* target's contents are already in the working directory.
*/
return !is_workdir_base_or_new(&oid, baseitem, newitem);
}
#define CHECKOUT_ACTION_IF(FLAG,YES,NO) \
((data->strategy & GIT_CHECKOUT_##FLAG) ? CHECKOUT_ACTION__##YES : CHECKOUT_ACTION__##NO)
static int checkout_action_common(
int *action,
checkout_data *data,
const git_diff_delta *delta,
const git_index_entry *wd)
{
git_checkout_notify_t notify = GIT_CHECKOUT_NOTIFY_NONE;
if ((data->strategy & GIT_CHECKOUT_UPDATE_ONLY) != 0)
*action = (*action & ~CHECKOUT_ACTION__REMOVE);
if ((*action & CHECKOUT_ACTION__UPDATE_BLOB) != 0) {
if (S_ISGITLINK(delta->new_file.mode))
*action = (*action & ~CHECKOUT_ACTION__UPDATE_BLOB) |
CHECKOUT_ACTION__UPDATE_SUBMODULE;
/* to "update" a symlink, we must remove the old one first */
if (delta->new_file.mode == GIT_FILEMODE_LINK && wd != NULL)
*action |= CHECKOUT_ACTION__REMOVE;
/* if the file is on disk and doesn't match our mode, force update */
if (wd &&
GIT_PERMS_IS_EXEC(wd->mode) !=
GIT_PERMS_IS_EXEC(delta->new_file.mode))
*action |= CHECKOUT_ACTION__REMOVE;
notify = GIT_CHECKOUT_NOTIFY_UPDATED;
}
if ((*action & CHECKOUT_ACTION__CONFLICT) != 0)
notify = GIT_CHECKOUT_NOTIFY_CONFLICT;
return checkout_notify(data, notify, delta, wd);
}
static int checkout_action_no_wd(
int *action,
checkout_data *data,
const git_diff_delta *delta)
{
int error = 0;
*action = CHECKOUT_ACTION__NONE;
switch (delta->status) {
case GIT_DELTA_UNMODIFIED: /* case 12 */
error = checkout_notify(data, GIT_CHECKOUT_NOTIFY_DIRTY, delta, NULL);
if (error)
return error;
*action = CHECKOUT_ACTION_IF(RECREATE_MISSING, UPDATE_BLOB, NONE);
break;
case GIT_DELTA_ADDED: /* case 2 or 28 (and 5 but not really) */
*action = CHECKOUT_ACTION_IF(SAFE, UPDATE_BLOB, NONE);
break;
case GIT_DELTA_MODIFIED: /* case 13 (and 35 but not really) */
*action = CHECKOUT_ACTION_IF(RECREATE_MISSING, UPDATE_BLOB, CONFLICT);
break;
case GIT_DELTA_TYPECHANGE: /* case 21 (B->T) and 28 (T->B)*/
if (delta->new_file.mode == GIT_FILEMODE_TREE)
*action = CHECKOUT_ACTION_IF(SAFE, UPDATE_BLOB, NONE);
break;
case GIT_DELTA_DELETED: /* case 8 or 25 */
*action = CHECKOUT_ACTION_IF(SAFE, REMOVE, NONE);
break;
default: /* impossible */
break;
}
return checkout_action_common(action, data, delta, NULL);
}
static int checkout_target_fullpath(
git_buf **out, checkout_data *data, const char *path)
{
git_buf_truncate(&data->target_path, data->target_len);
if (path && git_buf_puts(&data->target_path, path) < 0)
return -1;
*out = &data->target_path;
return 0;
}
static bool wd_item_is_removable(
checkout_data *data, const git_index_entry *wd)
{
git_buf *full;
if (wd->mode != GIT_FILEMODE_TREE)
return true;
if (checkout_target_fullpath(&full, data, wd->path) < 0)
return false;
return !full || !git_path_contains(full, DOT_GIT);
}
static int checkout_queue_remove(checkout_data *data, const char *path)
{
char *copy = git_pool_strdup(&data->pool, path);
GIT_ERROR_CHECK_ALLOC(copy);
return git_vector_insert(&data->removes, copy);
}
/* note that this advances the iterator over the wd item */
static int checkout_action_wd_only(
checkout_data *data,
git_iterator *workdir,
const git_index_entry **wditem,
git_vector *pathspec)
{
int error = 0;
bool remove = false;
git_checkout_notify_t notify = GIT_CHECKOUT_NOTIFY_NONE;
const git_index_entry *wd = *wditem;
if (!git_pathspec__match(
pathspec, wd->path,
(data->strategy & GIT_CHECKOUT_DISABLE_PATHSPEC_MATCH) != 0,
git_iterator_ignore_case(workdir), NULL, NULL))
return git_iterator_advance(wditem, workdir);
/* check if item is tracked in the index but not in the checkout diff */
if (data->index != NULL) {
size_t pos;
error = git_index__find_pos(
&pos, data->index, wd->path, 0, GIT_INDEX_STAGE_ANY);
if (wd->mode != GIT_FILEMODE_TREE) {
if (!error) { /* found by git_index__find_pos call */
notify = GIT_CHECKOUT_NOTIFY_DIRTY;
remove = ((data->strategy & GIT_CHECKOUT_FORCE) != 0);
} else if (error != GIT_ENOTFOUND)
return error;
else
error = 0; /* git_index__find_pos does not set error msg */
} else {
/* for tree entries, we have to see if there are any index
* entries that are contained inside that tree
*/
const git_index_entry *e = git_index_get_byindex(data->index, pos);
if (e != NULL && data->diff->pfxcomp(e->path, wd->path) == 0)
return git_iterator_advance_into(wditem, workdir);
}
}
if (notify != GIT_CHECKOUT_NOTIFY_NONE) {
/* if we found something in the index, notify and advance */
if ((error = checkout_notify(data, notify, NULL, wd)) != 0)
return error;
if (remove && wd_item_is_removable(data, wd))
error = checkout_queue_remove(data, wd->path);
if (!error)
error = git_iterator_advance(wditem, workdir);
} else {
/* untracked or ignored - can't know which until we advance through */
bool over = false, removable = wd_item_is_removable(data, wd);
git_iterator_status_t untracked_state;
/* copy the entry for issuing notification callback later */
git_index_entry saved_wd = *wd;
git_buf_sets(&data->tmp, wd->path);
saved_wd.path = data->tmp.ptr;
error = git_iterator_advance_over(
wditem, &untracked_state, workdir);
if (error == GIT_ITEROVER)
over = true;
else if (error < 0)
return error;
if (untracked_state == GIT_ITERATOR_STATUS_IGNORED) {
notify = GIT_CHECKOUT_NOTIFY_IGNORED;
remove = ((data->strategy & GIT_CHECKOUT_REMOVE_IGNORED) != 0);
} else {
notify = GIT_CHECKOUT_NOTIFY_UNTRACKED;
remove = ((data->strategy & GIT_CHECKOUT_REMOVE_UNTRACKED) != 0);
}
if ((error = checkout_notify(data, notify, NULL, &saved_wd)) != 0)
return error;
if (remove && removable)
error = checkout_queue_remove(data, saved_wd.path);
if (!error && over) /* restore ITEROVER if needed */
error = GIT_ITEROVER;
}
return error;
}
static bool submodule_is_config_only(
checkout_data *data,
const char *path)
{
git_submodule *sm = NULL;
unsigned int sm_loc = 0;
bool rval = false;
if (git_submodule_lookup(&sm, data->repo, path) < 0)
return true;
if (git_submodule_location(&sm_loc, sm) < 0 ||
sm_loc == GIT_SUBMODULE_STATUS_IN_CONFIG)
rval = true;
git_submodule_free(sm);
return rval;
}
static bool checkout_is_empty_dir(checkout_data *data, const char *path)
{
git_buf *fullpath;
if (checkout_target_fullpath(&fullpath, data, path) < 0)
return false;
return git_path_is_empty_dir(fullpath->ptr);
}
static int checkout_action_with_wd(
int *action,
checkout_data *data,
const git_diff_delta *delta,
git_iterator *workdir,
const git_index_entry *wd)
{
*action = CHECKOUT_ACTION__NONE;
switch (delta->status) {
case GIT_DELTA_UNMODIFIED: /* case 14/15 or 33 */
if (checkout_is_workdir_modified(data, &delta->old_file, &delta->new_file, wd)) {
GIT_ERROR_CHECK_ERROR(
checkout_notify(data, GIT_CHECKOUT_NOTIFY_DIRTY, delta, wd) );
*action = CHECKOUT_ACTION_IF(FORCE, UPDATE_BLOB, NONE);
}
break;
case GIT_DELTA_ADDED: /* case 3, 4 or 6 */
if (git_iterator_current_is_ignored(workdir))
*action = CHECKOUT_ACTION_IF(DONT_OVERWRITE_IGNORED, CONFLICT, UPDATE_BLOB);
else
*action = CHECKOUT_ACTION_IF(FORCE, UPDATE_BLOB, CONFLICT);
break;
case GIT_DELTA_DELETED: /* case 9 or 10 (or 26 but not really) */
if (checkout_is_workdir_modified(data, &delta->old_file, &delta->new_file, wd))
*action = CHECKOUT_ACTION_IF(FORCE, REMOVE, CONFLICT);
else
*action = CHECKOUT_ACTION_IF(SAFE, REMOVE, NONE);
break;
case GIT_DELTA_MODIFIED: /* case 16, 17, 18 (or 36 but not really) */
if (wd->mode != GIT_FILEMODE_COMMIT &&
checkout_is_workdir_modified(data, &delta->old_file, &delta->new_file, wd))
*action = CHECKOUT_ACTION_IF(FORCE, UPDATE_BLOB, CONFLICT);
else
*action = CHECKOUT_ACTION_IF(SAFE, UPDATE_BLOB, NONE);
break;
case GIT_DELTA_TYPECHANGE: /* case 22, 23, 29, 30 */
if (delta->old_file.mode == GIT_FILEMODE_TREE) {
if (wd->mode == GIT_FILEMODE_TREE)
/* either deleting items in old tree will delete the wd dir,
* or we'll get a conflict when we attempt blob update...
*/
*action = CHECKOUT_ACTION_IF(SAFE, UPDATE_BLOB, NONE);
else if (wd->mode == GIT_FILEMODE_COMMIT) {
/* workdir is possibly a "phantom" submodule - treat as a
* tree if the only submodule info came from the config
*/
if (submodule_is_config_only(data, wd->path))
*action = CHECKOUT_ACTION_IF(SAFE, UPDATE_BLOB, NONE);
else
*action = CHECKOUT_ACTION_IF(FORCE, REMOVE_AND_UPDATE, CONFLICT);
} else
*action = CHECKOUT_ACTION_IF(FORCE, REMOVE, CONFLICT);
}
else if (checkout_is_workdir_modified(data, &delta->old_file, &delta->new_file, wd))
*action = CHECKOUT_ACTION_IF(FORCE, REMOVE_AND_UPDATE, CONFLICT);
else
*action = CHECKOUT_ACTION_IF(SAFE, REMOVE_AND_UPDATE, NONE);
/* don't update if the typechange is to a tree */
if (delta->new_file.mode == GIT_FILEMODE_TREE)
*action = (*action & ~CHECKOUT_ACTION__UPDATE_BLOB);
break;
default: /* impossible */
break;
}
return checkout_action_common(action, data, delta, wd);
}
static int checkout_action_with_wd_blocker(
int *action,
checkout_data *data,
const git_diff_delta *delta,
const git_index_entry *wd)
{
*action = CHECKOUT_ACTION__NONE;
switch (delta->status) {
case GIT_DELTA_UNMODIFIED:
/* should show delta as dirty / deleted */
GIT_ERROR_CHECK_ERROR(
checkout_notify(data, GIT_CHECKOUT_NOTIFY_DIRTY, delta, wd) );
*action = CHECKOUT_ACTION_IF(FORCE, REMOVE_AND_UPDATE, NONE);
break;
case GIT_DELTA_ADDED:
case GIT_DELTA_MODIFIED:
*action = CHECKOUT_ACTION_IF(FORCE, REMOVE_AND_UPDATE, CONFLICT);
break;
case GIT_DELTA_DELETED:
*action = CHECKOUT_ACTION_IF(FORCE, REMOVE, CONFLICT);
break;
case GIT_DELTA_TYPECHANGE:
/* not 100% certain about this... */
*action = CHECKOUT_ACTION_IF(FORCE, REMOVE_AND_UPDATE, CONFLICT);
break;
default: /* impossible */
break;
}
return checkout_action_common(action, data, delta, wd);
}
static int checkout_action_with_wd_dir(
int *action,
checkout_data *data,
const git_diff_delta *delta,
git_iterator *workdir,
const git_index_entry *wd)
{
*action = CHECKOUT_ACTION__NONE;
switch (delta->status) {
case GIT_DELTA_UNMODIFIED: /* case 19 or 24 (or 34 but not really) */
GIT_ERROR_CHECK_ERROR(
checkout_notify(data, GIT_CHECKOUT_NOTIFY_DIRTY, delta, NULL));
GIT_ERROR_CHECK_ERROR(
checkout_notify(data, GIT_CHECKOUT_NOTIFY_UNTRACKED, NULL, wd));
*action = CHECKOUT_ACTION_IF(FORCE, REMOVE_AND_UPDATE, NONE);
break;
case GIT_DELTA_ADDED:/* case 4 (and 7 for dir) */
case GIT_DELTA_MODIFIED: /* case 20 (or 37 but not really) */
if (delta->old_file.mode == GIT_FILEMODE_COMMIT)
/* expected submodule (and maybe found one) */;
else if (delta->new_file.mode != GIT_FILEMODE_TREE)
*action = git_iterator_current_is_ignored(workdir) ?
CHECKOUT_ACTION_IF(DONT_OVERWRITE_IGNORED, CONFLICT, REMOVE_AND_UPDATE) :
CHECKOUT_ACTION_IF(FORCE, REMOVE_AND_UPDATE, CONFLICT);
break;
case GIT_DELTA_DELETED: /* case 11 (and 27 for dir) */
if (delta->old_file.mode != GIT_FILEMODE_TREE)
GIT_ERROR_CHECK_ERROR(
checkout_notify(data, GIT_CHECKOUT_NOTIFY_UNTRACKED, NULL, wd));
break;
case GIT_DELTA_TYPECHANGE: /* case 24 or 31 */
if (delta->old_file.mode == GIT_FILEMODE_TREE) {
/* For typechange from dir, remove dir and add blob, but it is
* not safe to remove dir if it contains modified files.
* However, safely removing child files will remove the parent
* directory if is it left empty, so we can defer removing the
* dir and it will succeed if no children are left.
*/
*action = CHECKOUT_ACTION_IF(SAFE, UPDATE_BLOB, NONE);
}
else if (delta->new_file.mode != GIT_FILEMODE_TREE)
/* For typechange to dir, dir is already created so no action */
*action = CHECKOUT_ACTION_IF(FORCE, REMOVE_AND_UPDATE, CONFLICT);
break;
default: /* impossible */
break;
}
return checkout_action_common(action, data, delta, wd);
}
static int checkout_action_with_wd_dir_empty(
int *action,
checkout_data *data,
const git_diff_delta *delta)
{
int error = checkout_action_no_wd(action, data, delta);
/* We can always safely remove an empty directory. */
if (error == 0 && *action != CHECKOUT_ACTION__NONE)
*action |= CHECKOUT_ACTION__REMOVE;
return error;
}
static int checkout_action(
int *action,
checkout_data *data,
git_diff_delta *delta,
git_iterator *workdir,
const git_index_entry **wditem,
git_vector *pathspec)
{
int cmp = -1, error;
int (*strcomp)(const char *, const char *) = data->diff->strcomp;
int (*pfxcomp)(const char *str, const char *pfx) = data->diff->pfxcomp;
int (*advance)(const git_index_entry **, git_iterator *) = NULL;
/* move workdir iterator to follow along with deltas */
while (1) {
const git_index_entry *wd = *wditem;
if (!wd)
return checkout_action_no_wd(action, data, delta);
cmp = strcomp(wd->path, delta->old_file.path);
/* 1. wd before delta ("a/a" before "a/b")
* 2. wd prefixes delta & should expand ("a/" before "a/b")
* 3. wd prefixes delta & cannot expand ("a/b" before "a/b/c")
* 4. wd equals delta ("a/b" and "a/b")
* 5. wd after delta & delta prefixes wd ("a/b/c" after "a/b/" or "a/b")
* 6. wd after delta ("a/c" after "a/b")
*/
if (cmp < 0) {
cmp = pfxcomp(delta->old_file.path, wd->path);
if (cmp == 0) {
if (wd->mode == GIT_FILEMODE_TREE) {
/* case 2 - entry prefixed by workdir tree */
error = git_iterator_advance_into(wditem, workdir);
if (error < 0 && error != GIT_ITEROVER)
goto done;
continue;
}
/* case 3 maybe - wd contains non-dir where dir expected */
if (delta->old_file.path[strlen(wd->path)] == '/') {
error = checkout_action_with_wd_blocker(
action, data, delta, wd);
advance = git_iterator_advance;
goto done;
}
}
/* case 1 - handle wd item (if it matches pathspec) */
error = checkout_action_wd_only(data, workdir, wditem, pathspec);
if (error && error != GIT_ITEROVER)
goto done;
continue;
}
if (cmp == 0) {
/* case 4 */
error = checkout_action_with_wd(action, data, delta, workdir, wd);
advance = git_iterator_advance;
goto done;
}
cmp = pfxcomp(wd->path, delta->old_file.path);
if (cmp == 0) { /* case 5 */
if (wd->path[strlen(delta->old_file.path)] != '/')
return checkout_action_no_wd(action, data, delta);
if (delta->status == GIT_DELTA_TYPECHANGE) {
if (delta->old_file.mode == GIT_FILEMODE_TREE) {
error = checkout_action_with_wd(action, data, delta, workdir, wd);
advance = git_iterator_advance_into;
goto done;
}
if (delta->new_file.mode == GIT_FILEMODE_TREE ||
delta->new_file.mode == GIT_FILEMODE_COMMIT ||
delta->old_file.mode == GIT_FILEMODE_COMMIT)
{
error = checkout_action_with_wd(action, data, delta, workdir, wd);
advance = git_iterator_advance;
goto done;
}
}
return checkout_is_empty_dir(data, wd->path) ?
checkout_action_with_wd_dir_empty(action, data, delta) :
checkout_action_with_wd_dir(action, data, delta, workdir, wd);
}
/* case 6 - wd is after delta */
return checkout_action_no_wd(action, data, delta);
}
done:
if (!error && advance != NULL &&
(error = advance(wditem, workdir)) < 0) {
*wditem = NULL;
if (error == GIT_ITEROVER)
error = 0;
}
return error;
}
static int checkout_remaining_wd_items(
checkout_data *data,
git_iterator *workdir,
const git_index_entry *wd,
git_vector *spec)
{
int error = 0;
while (wd && !error)
error = checkout_action_wd_only(data, workdir, &wd, spec);
if (error == GIT_ITEROVER)
error = 0;
return error;
}
GIT_INLINE(int) checkout_idxentry_cmp(
const git_index_entry *a,
const git_index_entry *b)
{
if (!a && !b)
return 0;
else if (!a && b)
return -1;
else if(a && !b)
return 1;
else
return strcmp(a->path, b->path);
}
static int checkout_conflictdata_cmp(const void *a, const void *b)
{
const checkout_conflictdata *ca = a;
const checkout_conflictdata *cb = b;
int diff;
if ((diff = checkout_idxentry_cmp(ca->ancestor, cb->ancestor)) == 0 &&
(diff = checkout_idxentry_cmp(ca->ours, cb->theirs)) == 0)
diff = checkout_idxentry_cmp(ca->theirs, cb->theirs);
return diff;
}
int checkout_conflictdata_empty(
const git_vector *conflicts, size_t idx, void *payload)
{
checkout_conflictdata *conflict;
GIT_UNUSED(payload);
if ((conflict = git_vector_get(conflicts, idx)) == NULL)
return -1;
if (conflict->ancestor || conflict->ours || conflict->theirs)
return 0;
git__free(conflict);
return 1;
}
GIT_INLINE(bool) conflict_pathspec_match(
checkout_data *data,
git_iterator *workdir,
git_vector *pathspec,
const git_index_entry *ancestor,
const git_index_entry *ours,
const git_index_entry *theirs)
{
/* if the pathspec matches ours *or* theirs, proceed */
if (ours && git_pathspec__match(pathspec, ours->path,
(data->strategy & GIT_CHECKOUT_DISABLE_PATHSPEC_MATCH) != 0,
git_iterator_ignore_case(workdir), NULL, NULL))
return true;
if (theirs && git_pathspec__match(pathspec, theirs->path,
(data->strategy & GIT_CHECKOUT_DISABLE_PATHSPEC_MATCH) != 0,
git_iterator_ignore_case(workdir), NULL, NULL))
return true;
if (ancestor && git_pathspec__match(pathspec, ancestor->path,
(data->strategy & GIT_CHECKOUT_DISABLE_PATHSPEC_MATCH) != 0,
git_iterator_ignore_case(workdir), NULL, NULL))
return true;
return false;
}
GIT_INLINE(int) checkout_conflict_detect_submodule(checkout_conflictdata *conflict)
{
conflict->submodule = ((conflict->ancestor && S_ISGITLINK(conflict->ancestor->mode)) ||
(conflict->ours && S_ISGITLINK(conflict->ours->mode)) ||
(conflict->theirs && S_ISGITLINK(conflict->theirs->mode)));
return 0;
}
GIT_INLINE(int) checkout_conflict_detect_binary(git_repository *repo, checkout_conflictdata *conflict)
{
git_blob *ancestor_blob = NULL, *our_blob = NULL, *their_blob = NULL;
int error = 0;
if (conflict->submodule)
return 0;
if (conflict->ancestor) {
if ((error = git_blob_lookup(&ancestor_blob, repo, &conflict->ancestor->id)) < 0)
goto done;
conflict->binary = git_blob_is_binary(ancestor_blob);
}
if (!conflict->binary && conflict->ours) {
if ((error = git_blob_lookup(&our_blob, repo, &conflict->ours->id)) < 0)
goto done;
conflict->binary = git_blob_is_binary(our_blob);
}
if (!conflict->binary && conflict->theirs) {
if ((error = git_blob_lookup(&their_blob, repo, &conflict->theirs->id)) < 0)
goto done;
conflict->binary = git_blob_is_binary(their_blob);
}
done:
git_blob_free(ancestor_blob);
git_blob_free(our_blob);
git_blob_free(their_blob);
return error;
}
static int checkout_conflict_append_update(
const git_index_entry *ancestor,
const git_index_entry *ours,
const git_index_entry *theirs,
void *payload)
{
checkout_data *data = payload;
checkout_conflictdata *conflict;
int error;
conflict = git__calloc(1, sizeof(checkout_conflictdata));
GIT_ERROR_CHECK_ALLOC(conflict);
conflict->ancestor = ancestor;
conflict->ours = ours;
conflict->theirs = theirs;
if ((error = checkout_conflict_detect_submodule(conflict)) < 0 ||
(error = checkout_conflict_detect_binary(data->repo, conflict)) < 0)
{
git__free(conflict);
return error;
}
if (git_vector_insert(&data->update_conflicts, conflict))
return -1;
return 0;
}
static int checkout_conflicts_foreach(
checkout_data *data,
git_index *index,
git_iterator *workdir,
git_vector *pathspec,
int (*cb)(const git_index_entry *, const git_index_entry *, const git_index_entry *, void *),
void *payload)
{
git_index_conflict_iterator *iterator = NULL;
const git_index_entry *ancestor, *ours, *theirs;
int error = 0;
if ((error = git_index_conflict_iterator_new(&iterator, index)) < 0)
goto done;
/* Collect the conflicts */
while ((error = git_index_conflict_next(&ancestor, &ours, &theirs, iterator)) == 0) {
if (!conflict_pathspec_match(data, workdir, pathspec, ancestor, ours, theirs))
continue;
if ((error = cb(ancestor, ours, theirs, payload)) < 0)
goto done;
}
if (error == GIT_ITEROVER)
error = 0;
done:
git_index_conflict_iterator_free(iterator);
return error;
}
static int checkout_conflicts_load(checkout_data *data, git_iterator *workdir, git_vector *pathspec)
{
git_index *index;
/* Only write conficts from sources that have them: indexes. */
if ((index = git_iterator_index(data->target)) == NULL)
return 0;
data->update_conflicts._cmp = checkout_conflictdata_cmp;
if (checkout_conflicts_foreach(data, index, workdir, pathspec, checkout_conflict_append_update, data) < 0)
return -1;
/* Collect the REUC and NAME entries */
data->update_reuc = &index->reuc;
data->update_names = &index->names;
return 0;
}
GIT_INLINE(int) checkout_conflicts_cmp_entry(
const char *path,
const git_index_entry *entry)
{
return strcmp((const char *)path, entry->path);
}
static int checkout_conflicts_cmp_ancestor(const void *p, const void *c)
{
const char *path = p;
const checkout_conflictdata *conflict = c;
if (!conflict->ancestor)
return 1;
return checkout_conflicts_cmp_entry(path, conflict->ancestor);
}
static checkout_conflictdata *checkout_conflicts_search_ancestor(
checkout_data *data,
const char *path)
{
size_t pos;
if (git_vector_bsearch2(&pos, &data->update_conflicts, checkout_conflicts_cmp_ancestor, path) < 0)
return NULL;
return git_vector_get(&data->update_conflicts, pos);
}
static checkout_conflictdata *checkout_conflicts_search_branch(
checkout_data *data,
const char *path)
{
checkout_conflictdata *conflict;
size_t i;
git_vector_foreach(&data->update_conflicts, i, conflict) {
int cmp = -1;
if (conflict->ancestor)
break;
if (conflict->ours)
cmp = checkout_conflicts_cmp_entry(path, conflict->ours);
else if (conflict->theirs)
cmp = checkout_conflicts_cmp_entry(path, conflict->theirs);
if (cmp == 0)
return conflict;
}
return NULL;
}
static int checkout_conflicts_load_byname_entry(
checkout_conflictdata **ancestor_out,
checkout_conflictdata **ours_out,
checkout_conflictdata **theirs_out,
checkout_data *data,
const git_index_name_entry *name_entry)
{
checkout_conflictdata *ancestor, *ours = NULL, *theirs = NULL;
int error = 0;
*ancestor_out = NULL;
*ours_out = NULL;
*theirs_out = NULL;
if (!name_entry->ancestor) {
git_error_set(GIT_ERROR_INDEX, "a NAME entry exists without an ancestor");
error = -1;
goto done;
}
if (!name_entry->ours && !name_entry->theirs) {
git_error_set(GIT_ERROR_INDEX, "a NAME entry exists without an ours or theirs");
error = -1;
goto done;
}
if ((ancestor = checkout_conflicts_search_ancestor(data,
name_entry->ancestor)) == NULL) {
git_error_set(GIT_ERROR_INDEX,
"a NAME entry referenced ancestor entry '%s' which does not exist in the main index",
name_entry->ancestor);
error = -1;
goto done;
}
if (name_entry->ours) {
if (strcmp(name_entry->ancestor, name_entry->ours) == 0)
ours = ancestor;
else if ((ours = checkout_conflicts_search_branch(data, name_entry->ours)) == NULL ||
ours->ours == NULL) {
git_error_set(GIT_ERROR_INDEX,
"a NAME entry referenced our entry '%s' which does not exist in the main index",
name_entry->ours);
error = -1;
goto done;
}
}
if (name_entry->theirs) {
if (strcmp(name_entry->ancestor, name_entry->theirs) == 0)
theirs = ancestor;
else if (name_entry->ours && strcmp(name_entry->ours, name_entry->theirs) == 0)
theirs = ours;
else if ((theirs = checkout_conflicts_search_branch(data, name_entry->theirs)) == NULL ||
theirs->theirs == NULL) {
git_error_set(GIT_ERROR_INDEX,
"a NAME entry referenced their entry '%s' which does not exist in the main index",
name_entry->theirs);
error = -1;
goto done;
}
}
*ancestor_out = ancestor;
*ours_out = ours;
*theirs_out = theirs;
done:
return error;
}
static int checkout_conflicts_coalesce_renames(
checkout_data *data)
{
git_index *index;
const git_index_name_entry *name_entry;
checkout_conflictdata *ancestor_conflict, *our_conflict, *their_conflict;
size_t i, names;
int error = 0;
if ((index = git_iterator_index(data->target)) == NULL)
return 0;
/* Juggle entries based on renames */
names = git_index_name_entrycount(index);
for (i = 0; i < names; i++) {
name_entry = git_index_name_get_byindex(index, i);
if ((error = checkout_conflicts_load_byname_entry(
&ancestor_conflict, &our_conflict, &their_conflict,
data, name_entry)) < 0)
goto done;
if (our_conflict && our_conflict != ancestor_conflict) {
ancestor_conflict->ours = our_conflict->ours;
our_conflict->ours = NULL;
if (our_conflict->theirs)
our_conflict->name_collision = 1;
if (our_conflict->name_collision)
ancestor_conflict->name_collision = 1;
}
if (their_conflict && their_conflict != ancestor_conflict) {
ancestor_conflict->theirs = their_conflict->theirs;
their_conflict->theirs = NULL;
if (their_conflict->ours)
their_conflict->name_collision = 1;
if (their_conflict->name_collision)
ancestor_conflict->name_collision = 1;
}
if (our_conflict && our_conflict != ancestor_conflict &&
their_conflict && their_conflict != ancestor_conflict)
ancestor_conflict->one_to_two = 1;
}
git_vector_remove_matching(
&data->update_conflicts, checkout_conflictdata_empty, NULL);
done:
return error;
}
static int checkout_conflicts_mark_directoryfile(
checkout_data *data)
{
git_index *index;
checkout_conflictdata *conflict;
const git_index_entry *entry;
size_t i, j, len;
const char *path;
int prefixed, error = 0;
if ((index = git_iterator_index(data->target)) == NULL)
return 0;
len = git_index_entrycount(index);
/* Find d/f conflicts */
git_vector_foreach(&data->update_conflicts, i, conflict) {
if ((conflict->ours && conflict->theirs) ||
(!conflict->ours && !conflict->theirs))
continue;
path = conflict->ours ?
conflict->ours->path : conflict->theirs->path;
if ((error = git_index_find(&j, index, path)) < 0) {
if (error == GIT_ENOTFOUND)
git_error_set(GIT_ERROR_INDEX,
"index inconsistency, could not find entry for expected conflict '%s'", path);
goto done;
}
for (; j < len; j++) {
if ((entry = git_index_get_byindex(index, j)) == NULL) {
git_error_set(GIT_ERROR_INDEX,
"index inconsistency, truncated index while loading expected conflict '%s'", path);
error = -1;
goto done;
}
prefixed = git_path_equal_or_prefixed(path, entry->path, NULL);
if (prefixed == GIT_PATH_EQUAL)
continue;
if (prefixed == GIT_PATH_PREFIX)
conflict->directoryfile = 1;
break;
}
}
done:
return error;
}
static int checkout_get_update_conflicts(
checkout_data *data,
git_iterator *workdir,
git_vector *pathspec)
{
int error = 0;
if (data->strategy & GIT_CHECKOUT_SKIP_UNMERGED)
return 0;
if ((error = checkout_conflicts_load(data, workdir, pathspec)) < 0 ||
(error = checkout_conflicts_coalesce_renames(data)) < 0 ||
(error = checkout_conflicts_mark_directoryfile(data)) < 0)
goto done;
done:
return error;
}
static int checkout_conflict_append_remove(
const git_index_entry *ancestor,
const git_index_entry *ours,
const git_index_entry *theirs,
void *payload)
{
checkout_data *data = payload;
const char *name;
assert(ancestor || ours || theirs);
if (ancestor)
name = git__strdup(ancestor->path);
else if (ours)
name = git__strdup(ours->path);
else if (theirs)
name = git__strdup(theirs->path);
else
abort();
GIT_ERROR_CHECK_ALLOC(name);
return git_vector_insert(&data->remove_conflicts, (char *)name);
}
static int checkout_get_remove_conflicts(
checkout_data *data,
git_iterator *workdir,
git_vector *pathspec)
{
if ((data->strategy & GIT_CHECKOUT_DONT_UPDATE_INDEX) != 0)
return 0;
return checkout_conflicts_foreach(data, data->index, workdir, pathspec, checkout_conflict_append_remove, data);
}
static int checkout_verify_paths(
git_repository *repo,
int action,
git_diff_delta *delta)
{
unsigned int flags = GIT_PATH_REJECT_WORKDIR_DEFAULTS;
if (action & CHECKOUT_ACTION__REMOVE) {
if (!git_path_isvalid(repo, delta->old_file.path, delta->old_file.mode, flags)) {
git_error_set(GIT_ERROR_CHECKOUT, "cannot remove invalid path '%s'", delta->old_file.path);
return -1;
}
}
if (action & ~CHECKOUT_ACTION__REMOVE) {
if (!git_path_isvalid(repo, delta->new_file.path, delta->new_file.mode, flags)) {
git_error_set(GIT_ERROR_CHECKOUT, "cannot checkout to invalid path '%s'", delta->new_file.path);
return -1;
}
}
return 0;
}
static int checkout_get_actions(
uint32_t **actions_ptr,
size_t **counts_ptr,
checkout_data *data,
git_iterator *workdir)
{
int error = 0, act;
const git_index_entry *wditem;
git_vector pathspec = GIT_VECTOR_INIT, *deltas;
git_pool pathpool;
git_diff_delta *delta;
size_t i, *counts = NULL;
uint32_t *actions = NULL;
git_pool_init(&pathpool, 1);
if (data->opts.paths.count > 0 &&
git_pathspec__vinit(&pathspec, &data->opts.paths, &pathpool) < 0)
return -1;
if ((error = git_iterator_current(&wditem, workdir)) < 0 &&
error != GIT_ITEROVER)
goto fail;
deltas = &data->diff->deltas;
*counts_ptr = counts = git__calloc(CHECKOUT_ACTION__MAX+1, sizeof(size_t));
*actions_ptr = actions = git__calloc(
deltas->length ? deltas->length : 1, sizeof(uint32_t));
if (!counts || !actions) {
error = -1;
goto fail;
}
git_vector_foreach(deltas, i, delta) {
if ((error = checkout_action(&act, data, delta, workdir, &wditem, &pathspec)) == 0)
error = checkout_verify_paths(data->repo, act, delta);
if (error != 0)
goto fail;
actions[i] = act;
if (act & CHECKOUT_ACTION__REMOVE)
counts[CHECKOUT_ACTION__REMOVE]++;
if (act & CHECKOUT_ACTION__UPDATE_BLOB)
counts[CHECKOUT_ACTION__UPDATE_BLOB]++;
if (act & CHECKOUT_ACTION__UPDATE_SUBMODULE)
counts[CHECKOUT_ACTION__UPDATE_SUBMODULE]++;
if (act & CHECKOUT_ACTION__CONFLICT)
counts[CHECKOUT_ACTION__CONFLICT]++;
}
error = checkout_remaining_wd_items(data, workdir, wditem, &pathspec);
if (error)
goto fail;
counts[CHECKOUT_ACTION__REMOVE] += data->removes.length;
if (counts[CHECKOUT_ACTION__CONFLICT] > 0 &&
(data->strategy & GIT_CHECKOUT_ALLOW_CONFLICTS) == 0)
{
git_error_set(GIT_ERROR_CHECKOUT, "%"PRIuZ" %s checkout",
counts[CHECKOUT_ACTION__CONFLICT],
counts[CHECKOUT_ACTION__CONFLICT] == 1 ?
"conflict prevents" : "conflicts prevent");
error = GIT_ECONFLICT;
goto fail;
}
if ((error = checkout_get_remove_conflicts(data, workdir, &pathspec)) < 0 ||
(error = checkout_get_update_conflicts(data, workdir, &pathspec)) < 0)
goto fail;
counts[CHECKOUT_ACTION__REMOVE_CONFLICT] = git_vector_length(&data->remove_conflicts);
counts[CHECKOUT_ACTION__UPDATE_CONFLICT] = git_vector_length(&data->update_conflicts);
git_pathspec__vfree(&pathspec);
git_pool_clear(&pathpool);
return 0;
fail:
*counts_ptr = NULL;
git__free(counts);
*actions_ptr = NULL;
git__free(actions);
git_pathspec__vfree(&pathspec);
git_pool_clear(&pathpool);
return error;
}
static bool should_remove_existing(checkout_data *data)
{
int ignorecase;
if (git_repository__configmap_lookup(&ignorecase, data->repo, GIT_CONFIGMAP_IGNORECASE) < 0) {
ignorecase = 0;
}
return (ignorecase &&
(data->strategy & GIT_CHECKOUT_DONT_REMOVE_EXISTING) == 0);
}
#define MKDIR_NORMAL \
GIT_MKDIR_PATH | GIT_MKDIR_VERIFY_DIR
#define MKDIR_REMOVE_EXISTING \
MKDIR_NORMAL | GIT_MKDIR_REMOVE_FILES | GIT_MKDIR_REMOVE_SYMLINKS
static int checkout_mkdir(
checkout_data *data,
const char *path,
const char *base,
mode_t mode,
unsigned int flags)
{
struct git_futils_mkdir_options mkdir_opts = {0};
int error;
mkdir_opts.dir_map = data->mkdir_map;
mkdir_opts.pool = &data->pool;
error = git_futils_mkdir_relative(
path, base, mode, flags, &mkdir_opts);
data->perfdata.mkdir_calls += mkdir_opts.perfdata.mkdir_calls;
data->perfdata.stat_calls += mkdir_opts.perfdata.stat_calls;
data->perfdata.chmod_calls += mkdir_opts.perfdata.chmod_calls;
return error;
}
static int mkpath2file(
checkout_data *data, const char *path, unsigned int mode)
{
struct stat st;
bool remove_existing = should_remove_existing(data);
unsigned int flags =
(remove_existing ? MKDIR_REMOVE_EXISTING : MKDIR_NORMAL) |
GIT_MKDIR_SKIP_LAST;
int error;
if ((error = checkout_mkdir(
data, path, data->opts.target_directory, mode, flags)) < 0)
return error;
if (remove_existing) {
data->perfdata.stat_calls++;
if (p_lstat(path, &st) == 0) {
/* Some file, symlink or folder already exists at this name.
* We would have removed it in remove_the_old unless we're on
* a case inensitive filesystem (or the user has asked us not
* to). Remove the similarly named file to write the new.
*/
error = git_futils_rmdir_r(path, NULL, GIT_RMDIR_REMOVE_FILES);
} else if (errno != ENOENT) {
git_error_set(GIT_ERROR_OS, "failed to stat '%s'", path);
return GIT_EEXISTS;
} else {
git_error_clear();
}
}
return error;
}
struct checkout_stream {
git_writestream base;
const char *path;
int fd;
int open;
};
static int checkout_stream_write(
git_writestream *s, const char *buffer, size_t len)
{
struct checkout_stream *stream = (struct checkout_stream *)s;
int ret;
if ((ret = p_write(stream->fd, buffer, len)) < 0)
git_error_set(GIT_ERROR_OS, "could not write to '%s'", stream->path);
return ret;
}
static int checkout_stream_close(git_writestream *s)
{
struct checkout_stream *stream = (struct checkout_stream *)s;
assert(stream && stream->open);
stream->open = 0;
return p_close(stream->fd);
}
static void checkout_stream_free(git_writestream *s)
{
GIT_UNUSED(s);
}
static int blob_content_to_file(
checkout_data *data,
struct stat *st,
git_blob *blob,
const char *path,
const char *hint_path,
mode_t entry_filemode)
{
int flags = data->opts.file_open_flags;
mode_t file_mode = data->opts.file_mode ?
data->opts.file_mode : entry_filemode;
git_filter_options filter_opts = GIT_FILTER_OPTIONS_INIT;
struct checkout_stream writer;
mode_t mode;
git_filter_list *fl = NULL;
int fd;
int error = 0;
if (hint_path == NULL)
hint_path = path;
if ((error = mkpath2file(data, path, data->opts.dir_mode)) < 0)
return error;
if (flags <= 0)
flags = O_CREAT | O_TRUNC | O_WRONLY;
if (!(mode = file_mode))
mode = GIT_FILEMODE_BLOB;
if ((fd = p_open(path, flags, mode)) < 0) {
git_error_set(GIT_ERROR_OS, "could not open '%s' for writing", path);
return fd;
}
filter_opts.attr_session = &data->attr_session;
filter_opts.temp_buf = &data->tmp;
if (!data->opts.disable_filters &&
(error = git_filter_list__load_ext(
&fl, data->repo, blob, hint_path,
GIT_FILTER_TO_WORKTREE, &filter_opts))) {
p_close(fd);
return error;
}
/* setup the writer */
memset(&writer, 0, sizeof(struct checkout_stream));
writer.base.write = checkout_stream_write;
writer.base.close = checkout_stream_close;
writer.base.free = checkout_stream_free;
writer.path = path;
writer.fd = fd;
writer.open = 1;
error = git_filter_list_stream_blob(fl, blob, &writer.base);
assert(writer.open == 0);
git_filter_list_free(fl);
if (error < 0)
return error;
if (st) {
data->perfdata.stat_calls++;
if ((error = p_stat(path, st)) < 0) {
git_error_set(GIT_ERROR_OS, "failed to stat '%s'", path);
return error;
}
st->st_mode = entry_filemode;
}
return 0;
}
static int blob_content_to_link(
checkout_data *data,
struct stat *st,
git_blob *blob,
const char *path)
{
git_buf linktarget = GIT_BUF_INIT;
int error;
if ((error = mkpath2file(data, path, data->opts.dir_mode)) < 0)
return error;
if ((error = git_blob__getbuf(&linktarget, blob)) < 0)
return error;
if (data->can_symlink) {
if ((error = p_symlink(git_buf_cstr(&linktarget), path)) < 0)
git_error_set(GIT_ERROR_OS, "could not create symlink %s", path);
} else {
error = git_futils_fake_symlink(git_buf_cstr(&linktarget), path);
}
if (!error) {
data->perfdata.stat_calls++;
if ((error = p_lstat(path, st)) < 0)
git_error_set(GIT_ERROR_CHECKOUT, "could not stat symlink %s", path);
st->st_mode = GIT_FILEMODE_LINK;
}
git_buf_dispose(&linktarget);
return error;
}
static int checkout_update_index(
checkout_data *data,
const git_diff_file *file,
struct stat *st)
{
git_index_entry entry;
if (!data->index)
return 0;
memset(&entry, 0, sizeof(entry));
entry.path = (char *)file->path; /* cast to prevent warning */
git_index_entry__init_from_stat(&entry, st, true);
git_oid_cpy(&entry.id, &file->id);
return git_index_add(data->index, &entry);
}
static int checkout_submodule_update_index(
checkout_data *data,
const git_diff_file *file)
{
git_buf *fullpath;
struct stat st;
/* update the index unless prevented */
if ((data->strategy & GIT_CHECKOUT_DONT_UPDATE_INDEX) != 0)
return 0;
if (checkout_target_fullpath(&fullpath, data, file->path) < 0)
return -1;
data->perfdata.stat_calls++;
if (p_stat(fullpath->ptr, &st) < 0) {
git_error_set(
GIT_ERROR_CHECKOUT, "could not stat submodule %s\n", file->path);
return GIT_ENOTFOUND;
}
st.st_mode = GIT_FILEMODE_COMMIT;
return checkout_update_index(data, file, &st);
}
static int checkout_submodule(
checkout_data *data,
const git_diff_file *file)
{
bool remove_existing = should_remove_existing(data);
int error = 0;
/* Until submodules are supported, UPDATE_ONLY means do nothing here */
if ((data->strategy & GIT_CHECKOUT_UPDATE_ONLY) != 0)
return 0;
if ((error = checkout_mkdir(
data,
file->path, data->opts.target_directory, data->opts.dir_mode,
remove_existing ? MKDIR_REMOVE_EXISTING : MKDIR_NORMAL)) < 0)
return error;
if ((error = git_submodule_lookup(NULL, data->repo, file->path)) < 0) {
/* I've observed repos with submodules in the tree that do not
* have a .gitmodules - core Git just makes an empty directory
*/
if (error == GIT_ENOTFOUND) {
git_error_clear();
return checkout_submodule_update_index(data, file);
}
return error;
}
/* TODO: Support checkout_strategy options. Two circumstances:
* 1 - submodule already checked out, but we need to move the HEAD
* to the new OID, or
* 2 - submodule not checked out and we should recursively check it out
*
* Checkout will not execute a pull on the submodule, but a clone
* command should probably be able to. Do we need a submodule callback?
*/
return checkout_submodule_update_index(data, file);
}
static void report_progress(
checkout_data *data,
const char *path)
{
if (data->opts.progress_cb)
data->opts.progress_cb(
path, data->completed_steps, data->total_steps,
data->opts.progress_payload);
}
static int checkout_safe_for_update_only(
checkout_data *data, const char *path, mode_t expected_mode)
{
struct stat st;
data->perfdata.stat_calls++;
if (p_lstat(path, &st) < 0) {
/* if doesn't exist, then no error and no update */
if (errno == ENOENT || errno == ENOTDIR)
return 0;
/* otherwise, stat error and no update */
git_error_set(GIT_ERROR_OS, "failed to stat '%s'", path);
return -1;
}
/* only safe for update if this is the same type of file */
if ((st.st_mode & ~0777) == (expected_mode & ~0777))
return 1;
return 0;
}
static int checkout_write_content(
checkout_data *data,
const git_oid *oid,
const char *full_path,
const char *hint_path,
unsigned int mode,
struct stat *st)
{
int error = 0;
git_blob *blob;
if ((error = git_blob_lookup(&blob, data->repo, oid)) < 0)
return error;
if (S_ISLNK(mode))
error = blob_content_to_link(data, st, blob, full_path);
else
error = blob_content_to_file(data, st, blob, full_path, hint_path, mode);
git_blob_free(blob);
/* if we try to create the blob and an existing directory blocks it from
* being written, then there must have been a typechange conflict in a
* parent directory - suppress the error and try to continue.
*/
if ((data->strategy & GIT_CHECKOUT_ALLOW_CONFLICTS) != 0 &&
(error == GIT_ENOTFOUND || error == GIT_EEXISTS))
{
git_error_clear();
error = 0;
}
return error;
}
static int checkout_blob(
checkout_data *data,
const git_diff_file *file)
{
git_buf *fullpath;
struct stat st;
int error = 0;
if (checkout_target_fullpath(&fullpath, data, file->path) < 0)
return -1;
if ((data->strategy & GIT_CHECKOUT_UPDATE_ONLY) != 0) {
int rval = checkout_safe_for_update_only(
data, fullpath->ptr, file->mode);
if (rval <= 0)
return rval;
}
error = checkout_write_content(
data, &file->id, fullpath->ptr, NULL, file->mode, &st);
/* update the index unless prevented */
if (!error && (data->strategy & GIT_CHECKOUT_DONT_UPDATE_INDEX) == 0)
error = checkout_update_index(data, file, &st);
/* update the submodule data if this was a new .gitmodules file */
if (!error && strcmp(file->path, ".gitmodules") == 0)
data->reload_submodules = true;
return error;
}
static int checkout_remove_the_old(
unsigned int *actions,
checkout_data *data)
{
int error = 0;
git_diff_delta *delta;
const char *str;
size_t i;
git_buf *fullpath;
uint32_t flg = GIT_RMDIR_EMPTY_PARENTS |
GIT_RMDIR_REMOVE_FILES | GIT_RMDIR_REMOVE_BLOCKERS;
if (data->opts.checkout_strategy & GIT_CHECKOUT_SKIP_LOCKED_DIRECTORIES)
flg |= GIT_RMDIR_SKIP_NONEMPTY;
if (checkout_target_fullpath(&fullpath, data, NULL) < 0)
return -1;
git_vector_foreach(&data->diff->deltas, i, delta) {
if (actions[i] & CHECKOUT_ACTION__REMOVE) {
error = git_futils_rmdir_r(
delta->old_file.path, fullpath->ptr, flg);
if (error < 0)
return error;
data->completed_steps++;
report_progress(data, delta->old_file.path);
if ((actions[i] & CHECKOUT_ACTION__UPDATE_BLOB) == 0 &&
(data->strategy & GIT_CHECKOUT_DONT_UPDATE_INDEX) == 0 &&
data->index != NULL)
{
(void)git_index_remove(data->index, delta->old_file.path, 0);
}
}
}
git_vector_foreach(&data->removes, i, str) {
error = git_futils_rmdir_r(str, fullpath->ptr, flg);
if (error < 0)
return error;
data->completed_steps++;
report_progress(data, str);
if ((data->strategy & GIT_CHECKOUT_DONT_UPDATE_INDEX) == 0 &&
data->index != NULL)
{
if (str[strlen(str) - 1] == '/')
(void)git_index_remove_directory(data->index, str, 0);
else
(void)git_index_remove(data->index, str, 0);
}
}
return 0;
}
static int checkout_deferred_remove(git_repository *repo, const char *path)
{
#if 0
int error = git_futils_rmdir_r(
path, data->opts.target_directory, GIT_RMDIR_EMPTY_PARENTS);
if (error == GIT_ENOTFOUND) {
error = 0;
git_error_clear();
}
return error;
#else
GIT_UNUSED(repo);
GIT_UNUSED(path);
assert(false);
return 0;
#endif
}
static int checkout_create_the_new(
unsigned int *actions,
checkout_data *data)
{
int error = 0;
git_diff_delta *delta;
size_t i;
git_vector_foreach(&data->diff->deltas, i, delta) {
if (actions[i] & CHECKOUT_ACTION__DEFER_REMOVE) {
/* this had a blocker directory that should only be removed iff
* all of the contents of the directory were safely removed
*/
if ((error = checkout_deferred_remove(
data->repo, delta->old_file.path)) < 0)
return error;
}
if (actions[i] & CHECKOUT_ACTION__UPDATE_BLOB && !S_ISLNK(delta->new_file.mode)) {
if ((error = checkout_blob(data, &delta->new_file)) < 0)
return error;
data->completed_steps++;
report_progress(data, delta->new_file.path);
}
}
git_vector_foreach(&data->diff->deltas, i, delta) {
if (actions[i] & CHECKOUT_ACTION__UPDATE_BLOB && S_ISLNK(delta->new_file.mode)) {
if ((error = checkout_blob(data, &delta->new_file)) < 0)
return error;
data->completed_steps++;
report_progress(data, delta->new_file.path);
}
}
return 0;
}
static int checkout_create_submodules(
unsigned int *actions,
checkout_data *data)
{
int error = 0;
git_diff_delta *delta;
size_t i;
git_vector_foreach(&data->diff->deltas, i, delta) {
if (actions[i] & CHECKOUT_ACTION__DEFER_REMOVE) {
/* this has a blocker directory that should only be removed iff
* all of the contents of the directory were safely removed
*/
if ((error = checkout_deferred_remove(
data->repo, delta->old_file.path)) < 0)
return error;
}
if (actions[i] & CHECKOUT_ACTION__UPDATE_SUBMODULE) {
int error = checkout_submodule(data, &delta->new_file);
if (error < 0)
return error;
data->completed_steps++;
report_progress(data, delta->new_file.path);
}
}
return 0;
}
static int checkout_lookup_head_tree(git_tree **out, git_repository *repo)
{
int error = 0;
git_reference *ref = NULL;
git_object *head;
if (!(error = git_repository_head(&ref, repo)) &&
!(error = git_reference_peel(&head, ref, GIT_OBJECT_TREE)))
*out = (git_tree *)head;
git_reference_free(ref);
return error;
}
static int conflict_entry_name(
git_buf *out,
const char *side_name,
const char *filename)
{
if (git_buf_puts(out, side_name) < 0 ||
git_buf_putc(out, ':') < 0 ||
git_buf_puts(out, filename) < 0)
return -1;
return 0;
}
static int checkout_path_suffixed(git_buf *path, const char *suffix)
{
size_t path_len;
int i = 0, error = 0;
if ((error = git_buf_putc(path, '~')) < 0 || (error = git_buf_puts(path, suffix)) < 0)
return -1;
path_len = git_buf_len(path);
while (git_path_exists(git_buf_cstr(path)) && i < INT_MAX) {
git_buf_truncate(path, path_len);
if ((error = git_buf_putc(path, '_')) < 0 ||
(error = git_buf_printf(path, "%d", i)) < 0)
return error;
i++;
}
if (i == INT_MAX) {
git_buf_truncate(path, path_len);
git_error_set(GIT_ERROR_CHECKOUT, "could not write '%s': working directory file exists", path->ptr);
return GIT_EEXISTS;
}
return 0;
}
static int checkout_write_entry(
checkout_data *data,
checkout_conflictdata *conflict,
const git_index_entry *side)
{
const char *hint_path = NULL, *suffix;
git_buf *fullpath;
struct stat st;
int error;
assert (side == conflict->ours || side == conflict->theirs);
if (checkout_target_fullpath(&fullpath, data, side->path) < 0)
return -1;
if ((conflict->name_collision || conflict->directoryfile) &&
(data->strategy & GIT_CHECKOUT_USE_OURS) == 0 &&
(data->strategy & GIT_CHECKOUT_USE_THEIRS) == 0) {
if (side == conflict->ours)
suffix = data->opts.our_label ? data->opts.our_label :
"ours";
else
suffix = data->opts.their_label ? data->opts.their_label :
"theirs";
if (checkout_path_suffixed(fullpath, suffix) < 0)
return -1;
hint_path = side->path;
}
if ((data->strategy & GIT_CHECKOUT_UPDATE_ONLY) != 0 &&
(error = checkout_safe_for_update_only(data, fullpath->ptr, side->mode)) <= 0)
return error;
if (!S_ISGITLINK(side->mode))
return checkout_write_content(data,
&side->id, fullpath->ptr, hint_path, side->mode, &st);
return 0;
}
static int checkout_write_entries(
checkout_data *data,
checkout_conflictdata *conflict)
{
int error = 0;
if ((error = checkout_write_entry(data, conflict, conflict->ours)) >= 0)
error = checkout_write_entry(data, conflict, conflict->theirs);
return error;
}
static int checkout_merge_path(
git_buf *out,
checkout_data *data,
checkout_conflictdata *conflict,
git_merge_file_result *result)
{
const char *our_label_raw, *their_label_raw, *suffix;
int error = 0;
if ((error = git_buf_joinpath(out, git_repository_workdir(data->repo), result->path)) < 0)
return error;
/* Most conflicts simply use the filename in the index */
if (!conflict->name_collision)
return 0;
/* Rename 2->1 conflicts need the branch name appended */
our_label_raw = data->opts.our_label ? data->opts.our_label : "ours";
their_label_raw = data->opts.their_label ? data->opts.their_label : "theirs";
suffix = strcmp(result->path, conflict->ours->path) == 0 ? our_label_raw : their_label_raw;
if ((error = checkout_path_suffixed(out, suffix)) < 0)
return error;
return 0;
}
static int checkout_write_merge(
checkout_data *data,
checkout_conflictdata *conflict)
{
git_buf our_label = GIT_BUF_INIT, their_label = GIT_BUF_INIT,
path_suffixed = GIT_BUF_INIT, path_workdir = GIT_BUF_INIT,
in_data = GIT_BUF_INIT, out_data = GIT_BUF_INIT;
git_merge_file_options opts = GIT_MERGE_FILE_OPTIONS_INIT;
git_merge_file_result result = {0};
git_filebuf output = GIT_FILEBUF_INIT;
git_filter_list *fl = NULL;
git_filter_options filter_opts = GIT_FILTER_OPTIONS_INIT;
int error = 0;
if (data->opts.checkout_strategy & GIT_CHECKOUT_CONFLICT_STYLE_DIFF3)
opts.flags |= GIT_MERGE_FILE_STYLE_DIFF3;
opts.ancestor_label = data->opts.ancestor_label ?
data->opts.ancestor_label : "ancestor";
opts.our_label = data->opts.our_label ?
data->opts.our_label : "ours";
opts.their_label = data->opts.their_label ?
data->opts.their_label : "theirs";
/* If all the paths are identical, decorate the diff3 file with the branch
* names. Otherwise, append branch_name:path.
*/
if (conflict->ours && conflict->theirs &&
strcmp(conflict->ours->path, conflict->theirs->path) != 0) {
if ((error = conflict_entry_name(
&our_label, opts.our_label, conflict->ours->path)) < 0 ||
(error = conflict_entry_name(
&their_label, opts.their_label, conflict->theirs->path)) < 0)
goto done;
opts.our_label = git_buf_cstr(&our_label);
opts.their_label = git_buf_cstr(&their_label);
}
if ((error = git_merge_file_from_index(&result, data->repo,
conflict->ancestor, conflict->ours, conflict->theirs, &opts)) < 0)
goto done;
if (result.path == NULL || result.mode == 0) {
git_error_set(GIT_ERROR_CHECKOUT, "could not merge contents of file");
error = GIT_ECONFLICT;
goto done;
}
if ((error = checkout_merge_path(&path_workdir, data, conflict, &result)) < 0)
goto done;
if ((data->strategy & GIT_CHECKOUT_UPDATE_ONLY) != 0 &&
(error = checkout_safe_for_update_only(data, git_buf_cstr(&path_workdir), result.mode)) <= 0)
goto done;
if (!data->opts.disable_filters) {
in_data.ptr = (char *)result.ptr;
in_data.size = result.len;
filter_opts.attr_session = &data->attr_session;
filter_opts.temp_buf = &data->tmp;
if ((error = git_filter_list__load_ext(
&fl, data->repo, NULL, git_buf_cstr(&path_workdir),
GIT_FILTER_TO_WORKTREE, &filter_opts)) < 0 ||
(error = git_filter_list_apply_to_data(&out_data, fl, &in_data)) < 0)
goto done;
} else {
out_data.ptr = (char *)result.ptr;
out_data.size = result.len;
}
if ((error = mkpath2file(data, path_workdir.ptr, data->opts.dir_mode)) < 0 ||
(error = git_filebuf_open(&output, git_buf_cstr(&path_workdir), GIT_FILEBUF_DO_NOT_BUFFER, result.mode)) < 0 ||
(error = git_filebuf_write(&output, out_data.ptr, out_data.size)) < 0 ||
(error = git_filebuf_commit(&output)) < 0)
goto done;
done:
git_filter_list_free(fl);
git_buf_dispose(&out_data);
git_buf_dispose(&our_label);
git_buf_dispose(&their_label);
git_merge_file_result_free(&result);
git_buf_dispose(&path_workdir);
git_buf_dispose(&path_suffixed);
return error;
}
static int checkout_conflict_add(
checkout_data *data,
const git_index_entry *conflict)
{
int error = git_index_remove(data->index, conflict->path, 0);
if (error == GIT_ENOTFOUND)
git_error_clear();
else if (error < 0)
return error;
return git_index_add(data->index, conflict);
}
static int checkout_conflict_update_index(
checkout_data *data,
checkout_conflictdata *conflict)
{
int error = 0;
if (conflict->ancestor)
error = checkout_conflict_add(data, conflict->ancestor);
if (!error && conflict->ours)
error = checkout_conflict_add(data, conflict->ours);
if (!error && conflict->theirs)
error = checkout_conflict_add(data, conflict->theirs);
return error;
}
static int checkout_create_conflicts(checkout_data *data)
{
checkout_conflictdata *conflict;
size_t i;
int error = 0;
git_vector_foreach(&data->update_conflicts, i, conflict) {
/* Both deleted: nothing to do */
if (conflict->ours == NULL && conflict->theirs == NULL)
error = 0;
else if ((data->strategy & GIT_CHECKOUT_USE_OURS) &&
conflict->ours)
error = checkout_write_entry(data, conflict, conflict->ours);
else if ((data->strategy & GIT_CHECKOUT_USE_THEIRS) &&
conflict->theirs)
error = checkout_write_entry(data, conflict, conflict->theirs);
/* Ignore the other side of name collisions. */
else if ((data->strategy & GIT_CHECKOUT_USE_OURS) &&
!conflict->ours && conflict->name_collision)
error = 0;
else if ((data->strategy & GIT_CHECKOUT_USE_THEIRS) &&
!conflict->theirs && conflict->name_collision)
error = 0;
/* For modify/delete, name collisions and d/f conflicts, write
* the file (potentially with the name mangled.
*/
else if (conflict->ours != NULL && conflict->theirs == NULL)
error = checkout_write_entry(data, conflict, conflict->ours);
else if (conflict->ours == NULL && conflict->theirs != NULL)
error = checkout_write_entry(data, conflict, conflict->theirs);
/* Add/add conflicts and rename 1->2 conflicts, write the
* ours/theirs sides (potentially name mangled).
*/
else if (conflict->one_to_two)
error = checkout_write_entries(data, conflict);
/* If all sides are links, write the ours side */
else if (S_ISLNK(conflict->ours->mode) &&
S_ISLNK(conflict->theirs->mode))
error = checkout_write_entry(data, conflict, conflict->ours);
/* Link/file conflicts, write the file side */
else if (S_ISLNK(conflict->ours->mode))
error = checkout_write_entry(data, conflict, conflict->theirs);
else if (S_ISLNK(conflict->theirs->mode))
error = checkout_write_entry(data, conflict, conflict->ours);
/* If any side is a gitlink, do nothing. */
else if (conflict->submodule)
error = 0;
/* If any side is binary, write the ours side */
else if (conflict->binary)
error = checkout_write_entry(data, conflict, conflict->ours);
else if (!error)
error = checkout_write_merge(data, conflict);
/* Update the index extensions (REUC and NAME) if we're checking
* out a different index. (Otherwise just leave them there.)
*/
if (!error && (data->strategy & GIT_CHECKOUT_DONT_UPDATE_INDEX) == 0)
error = checkout_conflict_update_index(data, conflict);
if (error)
break;
data->completed_steps++;
report_progress(data,
conflict->ours ? conflict->ours->path :
(conflict->theirs ? conflict->theirs->path : conflict->ancestor->path));
}
return error;
}
static int checkout_remove_conflicts(checkout_data *data)
{
const char *conflict;
size_t i;
git_vector_foreach(&data->remove_conflicts, i, conflict) {
if (git_index_conflict_remove(data->index, conflict) < 0)
return -1;
data->completed_steps++;
}
return 0;
}
static int checkout_extensions_update_index(checkout_data *data)
{
const git_index_reuc_entry *reuc_entry;
const git_index_name_entry *name_entry;
size_t i;
int error = 0;
if ((data->strategy & GIT_CHECKOUT_UPDATE_ONLY) != 0)
return 0;
if (data->update_reuc) {
git_vector_foreach(data->update_reuc, i, reuc_entry) {
if ((error = git_index_reuc_add(data->index, reuc_entry->path,
reuc_entry->mode[0], &reuc_entry->oid[0],
reuc_entry->mode[1], &reuc_entry->oid[1],
reuc_entry->mode[2], &reuc_entry->oid[2])) < 0)
goto done;
}
}
if (data->update_names) {
git_vector_foreach(data->update_names, i, name_entry) {
if ((error = git_index_name_add(data->index, name_entry->ancestor,
name_entry->ours, name_entry->theirs)) < 0)
goto done;
}
}
done:
return error;
}
static void checkout_data_clear(checkout_data *data)
{
if (data->opts_free_baseline) {
git_tree_free(data->opts.baseline);
data->opts.baseline = NULL;
}
git_vector_free(&data->removes);
git_pool_clear(&data->pool);
git_vector_free_deep(&data->remove_conflicts);
git_vector_free_deep(&data->update_conflicts);
git__free(data->pfx);
data->pfx = NULL;
git_buf_dispose(&data->target_path);
git_buf_dispose(&data->tmp);
git_index_free(data->index);
data->index = NULL;
git_strmap_free(data->mkdir_map);
data->mkdir_map = NULL;
git_attr_session__free(&data->attr_session);
}
static int checkout_data_init(
checkout_data *data,
git_iterator *target,
const git_checkout_options *proposed)
{
int error = 0;
git_repository *repo = git_iterator_owner(target);
memset(data, 0, sizeof(*data));
if (!repo) {
git_error_set(GIT_ERROR_CHECKOUT, "cannot checkout nothing");
return -1;
}
if ((!proposed || !proposed->target_directory) &&
(error = git_repository__ensure_not_bare(repo, "checkout")) < 0)
return error;
data->repo = repo;
data->target = target;
GIT_ERROR_CHECK_VERSION(
proposed, GIT_CHECKOUT_OPTIONS_VERSION, "git_checkout_options");
if (!proposed)
GIT_INIT_STRUCTURE(&data->opts, GIT_CHECKOUT_OPTIONS_VERSION);
else
memmove(&data->opts, proposed, sizeof(git_checkout_options));
if (!data->opts.target_directory)
data->opts.target_directory = git_repository_workdir(repo);
else if (!git_path_isdir(data->opts.target_directory) &&
(error = checkout_mkdir(data,
data->opts.target_directory, NULL,
GIT_DIR_MODE, GIT_MKDIR_VERIFY_DIR)) < 0)
goto cleanup;
if ((error = git_repository_index(&data->index, data->repo)) < 0)
goto cleanup;
/* refresh config and index content unless NO_REFRESH is given */
if ((data->opts.checkout_strategy & GIT_CHECKOUT_NO_REFRESH) == 0) {
git_config *cfg;
if ((error = git_repository_config__weakptr(&cfg, repo)) < 0)
goto cleanup;
/* Reload the repository index (unless we're checking out the
* index; then it has the changes we're trying to check out
* and those should not be overwritten.)
*/
if (data->index != git_iterator_index(target)) {
if (data->opts.checkout_strategy & GIT_CHECKOUT_FORCE) {
/* When forcing, we can blindly re-read the index */
if ((error = git_index_read(data->index, false)) < 0)
goto cleanup;
} else {
/*
* When not being forced, we need to check for unresolved
* conflicts and unsaved changes in the index before
* proceeding.
*/
if (git_index_has_conflicts(data->index)) {
error = GIT_ECONFLICT;
git_error_set(GIT_ERROR_CHECKOUT,
"unresolved conflicts exist in the index");
goto cleanup;
}
if ((error = git_index_read_safely(data->index)) < 0)
goto cleanup;
}
/* clean conflict data in the current index */
git_index_name_clear(data->index);
git_index_reuc_clear(data->index);
}
}
/* if you are forcing, allow all safe updates, plus recreate missing */
if ((data->opts.checkout_strategy & GIT_CHECKOUT_FORCE) != 0)
data->opts.checkout_strategy |= GIT_CHECKOUT_SAFE |
GIT_CHECKOUT_RECREATE_MISSING;
/* if the repository does not actually have an index file, then this
* is an initial checkout (perhaps from clone), so we allow safe updates
*/
if (!data->index->on_disk &&
(data->opts.checkout_strategy & GIT_CHECKOUT_SAFE) != 0)
data->opts.checkout_strategy |= GIT_CHECKOUT_RECREATE_MISSING;
data->strategy = data->opts.checkout_strategy;
/* opts->disable_filters is false by default */
if (!data->opts.dir_mode)
data->opts.dir_mode = GIT_DIR_MODE;
if (!data->opts.file_open_flags)
data->opts.file_open_flags = O_CREAT | O_TRUNC | O_WRONLY;
data->pfx = git_pathspec_prefix(&data->opts.paths);
if ((error = git_repository__configmap_lookup(
&data->can_symlink, repo, GIT_CONFIGMAP_SYMLINKS)) < 0)
goto cleanup;
if ((error = git_repository__configmap_lookup(
&data->respect_filemode, repo, GIT_CONFIGMAP_FILEMODE)) < 0)
goto cleanup;
if (!data->opts.baseline && !data->opts.baseline_index) {
data->opts_free_baseline = true;
error = 0;
/* if we don't have an index, this is an initial checkout and
* should be against an empty baseline
*/
if (data->index->on_disk)
error = checkout_lookup_head_tree(&data->opts.baseline, repo);
if (error == GIT_EUNBORNBRANCH) {
error = 0;
git_error_clear();
}
if (error < 0)
goto cleanup;
}
if ((data->opts.checkout_strategy &
(GIT_CHECKOUT_CONFLICT_STYLE_MERGE | GIT_CHECKOUT_CONFLICT_STYLE_DIFF3)) == 0) {
git_config_entry *conflict_style = NULL;
git_config *cfg = NULL;
if ((error = git_repository_config__weakptr(&cfg, repo)) < 0 ||
(error = git_config_get_entry(&conflict_style, cfg, "merge.conflictstyle")) < 0 ||
error == GIT_ENOTFOUND)
;
else if (error)
goto cleanup;
else if (strcmp(conflict_style->value, "merge") == 0)
data->opts.checkout_strategy |= GIT_CHECKOUT_CONFLICT_STYLE_MERGE;
else if (strcmp(conflict_style->value, "diff3") == 0)
data->opts.checkout_strategy |= GIT_CHECKOUT_CONFLICT_STYLE_DIFF3;
else {
git_error_set(GIT_ERROR_CHECKOUT, "unknown style '%s' given for 'merge.conflictstyle'",
conflict_style->value);
error = -1;
git_config_entry_free(conflict_style);
goto cleanup;
}
git_config_entry_free(conflict_style);
}
git_pool_init(&data->pool, 1);
if ((error = git_vector_init(&data->removes, 0, git__strcmp_cb)) < 0 ||
(error = git_vector_init(&data->remove_conflicts, 0, NULL)) < 0 ||
(error = git_vector_init(&data->update_conflicts, 0, NULL)) < 0 ||
(error = git_buf_puts(&data->target_path, data->opts.target_directory)) < 0 ||
(error = git_path_to_dir(&data->target_path)) < 0 ||
(error = git_strmap_new(&data->mkdir_map)) < 0)
goto cleanup;
data->target_len = git_buf_len(&data->target_path);
git_attr_session__init(&data->attr_session, data->repo);
cleanup:
if (error < 0)
checkout_data_clear(data);
return error;
}
#define CHECKOUT_INDEX_DONT_WRITE_MASK \
(GIT_CHECKOUT_DONT_UPDATE_INDEX | GIT_CHECKOUT_DONT_WRITE_INDEX)
int git_checkout_iterator(
git_iterator *target,
git_index *index,
const git_checkout_options *opts)
{
int error = 0;
git_iterator *baseline = NULL, *workdir = NULL;
git_iterator_options baseline_opts = GIT_ITERATOR_OPTIONS_INIT,
workdir_opts = GIT_ITERATOR_OPTIONS_INIT;
checkout_data data = {0};
git_diff_options diff_opts = GIT_DIFF_OPTIONS_INIT;
uint32_t *actions = NULL;
size_t *counts = NULL;
/* initialize structures and options */
error = checkout_data_init(&data, target, opts);
if (error < 0)
return error;
diff_opts.flags =
GIT_DIFF_INCLUDE_UNMODIFIED |
GIT_DIFF_INCLUDE_UNREADABLE |
GIT_DIFF_INCLUDE_UNTRACKED |
GIT_DIFF_RECURSE_UNTRACKED_DIRS | /* needed to match baseline */
GIT_DIFF_INCLUDE_IGNORED |
GIT_DIFF_INCLUDE_TYPECHANGE |
GIT_DIFF_INCLUDE_TYPECHANGE_TREES |
GIT_DIFF_SKIP_BINARY_CHECK |
GIT_DIFF_INCLUDE_CASECHANGE;
if (data.opts.checkout_strategy & GIT_CHECKOUT_DISABLE_PATHSPEC_MATCH)
diff_opts.flags |= GIT_DIFF_DISABLE_PATHSPEC_MATCH;
if (data.opts.paths.count > 0)
diff_opts.pathspec = data.opts.paths;
/* set up iterators */
workdir_opts.flags = git_iterator_ignore_case(target) ?
GIT_ITERATOR_IGNORE_CASE : GIT_ITERATOR_DONT_IGNORE_CASE;
workdir_opts.flags |= GIT_ITERATOR_DONT_AUTOEXPAND;
workdir_opts.start = data.pfx;
workdir_opts.end = data.pfx;
if ((error = git_iterator_reset_range(target, data.pfx, data.pfx)) < 0 ||
(error = git_iterator_for_workdir_ext(
&workdir, data.repo, data.opts.target_directory, index, NULL,
&workdir_opts)) < 0)
goto cleanup;
baseline_opts.flags = git_iterator_ignore_case(target) ?
GIT_ITERATOR_IGNORE_CASE : GIT_ITERATOR_DONT_IGNORE_CASE;
baseline_opts.start = data.pfx;
baseline_opts.end = data.pfx;
if (opts && (opts->checkout_strategy & GIT_CHECKOUT_DISABLE_PATHSPEC_MATCH)) {
baseline_opts.pathlist.count = opts->paths.count;
baseline_opts.pathlist.strings = opts->paths.strings;
}
if (data.opts.baseline_index) {
if ((error = git_iterator_for_index(
&baseline, git_index_owner(data.opts.baseline_index),
data.opts.baseline_index, &baseline_opts)) < 0)
goto cleanup;
} else {
if ((error = git_iterator_for_tree(
&baseline, data.opts.baseline, &baseline_opts)) < 0)
goto cleanup;
}
/* Should not have case insensitivity mismatch */
assert(git_iterator_ignore_case(workdir) == git_iterator_ignore_case(baseline));
/* Generate baseline-to-target diff which will include an entry for
* every possible update that might need to be made.
*/
if ((error = git_diff__from_iterators(
&data.diff, data.repo, baseline, target, &diff_opts)) < 0)
goto cleanup;
/* Loop through diff (and working directory iterator) building a list of
* actions to be taken, plus look for conflicts and send notifications,
* then loop through conflicts.
*/
if ((error = checkout_get_actions(&actions, &counts, &data, workdir)) != 0)
goto cleanup;
data.total_steps = counts[CHECKOUT_ACTION__REMOVE] +
counts[CHECKOUT_ACTION__REMOVE_CONFLICT] +
counts[CHECKOUT_ACTION__UPDATE_BLOB] +
counts[CHECKOUT_ACTION__UPDATE_SUBMODULE] +
counts[CHECKOUT_ACTION__UPDATE_CONFLICT];
report_progress(&data, NULL); /* establish 0 baseline */
/* To deal with some order dependencies, perform remaining checkout
* in three passes: removes, then update blobs, then update submodules.
*/
if (counts[CHECKOUT_ACTION__REMOVE] > 0 &&
(error = checkout_remove_the_old(actions, &data)) < 0)
goto cleanup;
if (counts[CHECKOUT_ACTION__REMOVE_CONFLICT] > 0 &&
(error = checkout_remove_conflicts(&data)) < 0)
goto cleanup;
if (counts[CHECKOUT_ACTION__UPDATE_BLOB] > 0 &&
(error = checkout_create_the_new(actions, &data)) < 0)
goto cleanup;
if (counts[CHECKOUT_ACTION__UPDATE_SUBMODULE] > 0 &&
(error = checkout_create_submodules(actions, &data)) < 0)
goto cleanup;
if (counts[CHECKOUT_ACTION__UPDATE_CONFLICT] > 0 &&
(error = checkout_create_conflicts(&data)) < 0)
goto cleanup;
if (data.index != git_iterator_index(target) &&
(error = checkout_extensions_update_index(&data)) < 0)
goto cleanup;
assert(data.completed_steps == data.total_steps);
if (data.opts.perfdata_cb)
data.opts.perfdata_cb(&data.perfdata, data.opts.perfdata_payload);
cleanup:
if (!error && data.index != NULL &&
(data.strategy & CHECKOUT_INDEX_DONT_WRITE_MASK) == 0)
error = git_index_write(data.index);
git_diff_free(data.diff);
git_iterator_free(workdir);
git_iterator_free(baseline);
git__free(actions);
git__free(counts);
checkout_data_clear(&data);
return error;
}
int git_checkout_index(
git_repository *repo,
git_index *index,
const git_checkout_options *opts)
{
int error, owned = 0;
git_iterator *index_i;
if (!index && !repo) {
git_error_set(GIT_ERROR_CHECKOUT,
"must provide either repository or index to checkout");
return -1;
}
if (index && repo &&
git_index_owner(index) &&
git_index_owner(index) != repo) {
git_error_set(GIT_ERROR_CHECKOUT,
"index to checkout does not match repository");
return -1;
} else if(index && repo && !git_index_owner(index)) {
GIT_REFCOUNT_OWN(index, repo);
owned = 1;
}
if (!repo)
repo = git_index_owner(index);
if (!index && (error = git_repository_index__weakptr(&index, repo)) < 0)
return error;
GIT_REFCOUNT_INC(index);
if (!(error = git_iterator_for_index(&index_i, repo, index, NULL)))
error = git_checkout_iterator(index_i, index, opts);
if (owned)
GIT_REFCOUNT_OWN(index, NULL);
git_iterator_free(index_i);
git_index_free(index);
return error;
}
int git_checkout_tree(
git_repository *repo,
const git_object *treeish,
const git_checkout_options *opts)
{
int error;
git_index *index;
git_tree *tree = NULL;
git_iterator *tree_i = NULL;
git_iterator_options iter_opts = GIT_ITERATOR_OPTIONS_INIT;
if (!treeish && !repo) {
git_error_set(GIT_ERROR_CHECKOUT,
"must provide either repository or tree to checkout");
return -1;
}
if (treeish && repo && git_object_owner(treeish) != repo) {
git_error_set(GIT_ERROR_CHECKOUT,
"object to checkout does not match repository");
return -1;
}
if (!repo)
repo = git_object_owner(treeish);
if (treeish) {
if (git_object_peel((git_object **)&tree, treeish, GIT_OBJECT_TREE) < 0) {
git_error_set(
GIT_ERROR_CHECKOUT, "provided object cannot be peeled to a tree");
return -1;
}
}
else {
if ((error = checkout_lookup_head_tree(&tree, repo)) < 0) {
if (error != GIT_EUNBORNBRANCH)
git_error_set(
GIT_ERROR_CHECKOUT,
"HEAD could not be peeled to a tree and no treeish given");
return error;
}
}
if ((error = git_repository_index(&index, repo)) < 0)
return error;
if (opts && (opts->checkout_strategy & GIT_CHECKOUT_DISABLE_PATHSPEC_MATCH)) {
iter_opts.pathlist.count = opts->paths.count;
iter_opts.pathlist.strings = opts->paths.strings;
}
if (!(error = git_iterator_for_tree(&tree_i, tree, &iter_opts)))
error = git_checkout_iterator(tree_i, index, opts);
git_iterator_free(tree_i);
git_index_free(index);
git_tree_free(tree);
return error;
}
int git_checkout_head(
git_repository *repo,
const git_checkout_options *opts)
{
assert(repo);
return git_checkout_tree(repo, NULL, opts);
}
int git_checkout_options_init(git_checkout_options *opts, unsigned int version)
{
GIT_INIT_STRUCTURE_FROM_TEMPLATE(
opts, version, git_checkout_options, GIT_CHECKOUT_OPTIONS_INIT);
return 0;
}
int git_checkout_init_options(git_checkout_options *opts, unsigned int version)
{
return git_checkout_options_init(opts, version);
}
| ./CrossVul/dataset_final_sorted/CWE-20/c/bad_3989_0 |
crossvul-cpp_data_bad_2891_12 | /*
* Copyright (C) 2010 IBM Corporation
*
* Author:
* David Safford <safford@us.ibm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, version 2 of the License.
*
* See Documentation/security/keys/trusted-encrypted.rst
*/
#include <crypto/hash_info.h>
#include <linux/uaccess.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/parser.h>
#include <linux/string.h>
#include <linux/err.h>
#include <keys/user-type.h>
#include <keys/trusted-type.h>
#include <linux/key-type.h>
#include <linux/rcupdate.h>
#include <linux/crypto.h>
#include <crypto/hash.h>
#include <crypto/sha.h>
#include <linux/capability.h>
#include <linux/tpm.h>
#include <linux/tpm_command.h>
#include "trusted.h"
static const char hmac_alg[] = "hmac(sha1)";
static const char hash_alg[] = "sha1";
struct sdesc {
struct shash_desc shash;
char ctx[];
};
static struct crypto_shash *hashalg;
static struct crypto_shash *hmacalg;
static struct sdesc *init_sdesc(struct crypto_shash *alg)
{
struct sdesc *sdesc;
int size;
size = sizeof(struct shash_desc) + crypto_shash_descsize(alg);
sdesc = kmalloc(size, GFP_KERNEL);
if (!sdesc)
return ERR_PTR(-ENOMEM);
sdesc->shash.tfm = alg;
sdesc->shash.flags = 0x0;
return sdesc;
}
static int TSS_sha1(const unsigned char *data, unsigned int datalen,
unsigned char *digest)
{
struct sdesc *sdesc;
int ret;
sdesc = init_sdesc(hashalg);
if (IS_ERR(sdesc)) {
pr_info("trusted_key: can't alloc %s\n", hash_alg);
return PTR_ERR(sdesc);
}
ret = crypto_shash_digest(&sdesc->shash, data, datalen, digest);
kzfree(sdesc);
return ret;
}
static int TSS_rawhmac(unsigned char *digest, const unsigned char *key,
unsigned int keylen, ...)
{
struct sdesc *sdesc;
va_list argp;
unsigned int dlen;
unsigned char *data;
int ret;
sdesc = init_sdesc(hmacalg);
if (IS_ERR(sdesc)) {
pr_info("trusted_key: can't alloc %s\n", hmac_alg);
return PTR_ERR(sdesc);
}
ret = crypto_shash_setkey(hmacalg, key, keylen);
if (ret < 0)
goto out;
ret = crypto_shash_init(&sdesc->shash);
if (ret < 0)
goto out;
va_start(argp, keylen);
for (;;) {
dlen = va_arg(argp, unsigned int);
if (dlen == 0)
break;
data = va_arg(argp, unsigned char *);
if (data == NULL) {
ret = -EINVAL;
break;
}
ret = crypto_shash_update(&sdesc->shash, data, dlen);
if (ret < 0)
break;
}
va_end(argp);
if (!ret)
ret = crypto_shash_final(&sdesc->shash, digest);
out:
kzfree(sdesc);
return ret;
}
/*
* calculate authorization info fields to send to TPM
*/
static int TSS_authhmac(unsigned char *digest, const unsigned char *key,
unsigned int keylen, unsigned char *h1,
unsigned char *h2, unsigned char h3, ...)
{
unsigned char paramdigest[SHA1_DIGEST_SIZE];
struct sdesc *sdesc;
unsigned int dlen;
unsigned char *data;
unsigned char c;
int ret;
va_list argp;
sdesc = init_sdesc(hashalg);
if (IS_ERR(sdesc)) {
pr_info("trusted_key: can't alloc %s\n", hash_alg);
return PTR_ERR(sdesc);
}
c = h3;
ret = crypto_shash_init(&sdesc->shash);
if (ret < 0)
goto out;
va_start(argp, h3);
for (;;) {
dlen = va_arg(argp, unsigned int);
if (dlen == 0)
break;
data = va_arg(argp, unsigned char *);
if (!data) {
ret = -EINVAL;
break;
}
ret = crypto_shash_update(&sdesc->shash, data, dlen);
if (ret < 0)
break;
}
va_end(argp);
if (!ret)
ret = crypto_shash_final(&sdesc->shash, paramdigest);
if (!ret)
ret = TSS_rawhmac(digest, key, keylen, SHA1_DIGEST_SIZE,
paramdigest, TPM_NONCE_SIZE, h1,
TPM_NONCE_SIZE, h2, 1, &c, 0, 0);
out:
kzfree(sdesc);
return ret;
}
/*
* verify the AUTH1_COMMAND (Seal) result from TPM
*/
static int TSS_checkhmac1(unsigned char *buffer,
const uint32_t command,
const unsigned char *ononce,
const unsigned char *key,
unsigned int keylen, ...)
{
uint32_t bufsize;
uint16_t tag;
uint32_t ordinal;
uint32_t result;
unsigned char *enonce;
unsigned char *continueflag;
unsigned char *authdata;
unsigned char testhmac[SHA1_DIGEST_SIZE];
unsigned char paramdigest[SHA1_DIGEST_SIZE];
struct sdesc *sdesc;
unsigned int dlen;
unsigned int dpos;
va_list argp;
int ret;
bufsize = LOAD32(buffer, TPM_SIZE_OFFSET);
tag = LOAD16(buffer, 0);
ordinal = command;
result = LOAD32N(buffer, TPM_RETURN_OFFSET);
if (tag == TPM_TAG_RSP_COMMAND)
return 0;
if (tag != TPM_TAG_RSP_AUTH1_COMMAND)
return -EINVAL;
authdata = buffer + bufsize - SHA1_DIGEST_SIZE;
continueflag = authdata - 1;
enonce = continueflag - TPM_NONCE_SIZE;
sdesc = init_sdesc(hashalg);
if (IS_ERR(sdesc)) {
pr_info("trusted_key: can't alloc %s\n", hash_alg);
return PTR_ERR(sdesc);
}
ret = crypto_shash_init(&sdesc->shash);
if (ret < 0)
goto out;
ret = crypto_shash_update(&sdesc->shash, (const u8 *)&result,
sizeof result);
if (ret < 0)
goto out;
ret = crypto_shash_update(&sdesc->shash, (const u8 *)&ordinal,
sizeof ordinal);
if (ret < 0)
goto out;
va_start(argp, keylen);
for (;;) {
dlen = va_arg(argp, unsigned int);
if (dlen == 0)
break;
dpos = va_arg(argp, unsigned int);
ret = crypto_shash_update(&sdesc->shash, buffer + dpos, dlen);
if (ret < 0)
break;
}
va_end(argp);
if (!ret)
ret = crypto_shash_final(&sdesc->shash, paramdigest);
if (ret < 0)
goto out;
ret = TSS_rawhmac(testhmac, key, keylen, SHA1_DIGEST_SIZE, paramdigest,
TPM_NONCE_SIZE, enonce, TPM_NONCE_SIZE, ononce,
1, continueflag, 0, 0);
if (ret < 0)
goto out;
if (memcmp(testhmac, authdata, SHA1_DIGEST_SIZE))
ret = -EINVAL;
out:
kzfree(sdesc);
return ret;
}
/*
* verify the AUTH2_COMMAND (unseal) result from TPM
*/
static int TSS_checkhmac2(unsigned char *buffer,
const uint32_t command,
const unsigned char *ononce,
const unsigned char *key1,
unsigned int keylen1,
const unsigned char *key2,
unsigned int keylen2, ...)
{
uint32_t bufsize;
uint16_t tag;
uint32_t ordinal;
uint32_t result;
unsigned char *enonce1;
unsigned char *continueflag1;
unsigned char *authdata1;
unsigned char *enonce2;
unsigned char *continueflag2;
unsigned char *authdata2;
unsigned char testhmac1[SHA1_DIGEST_SIZE];
unsigned char testhmac2[SHA1_DIGEST_SIZE];
unsigned char paramdigest[SHA1_DIGEST_SIZE];
struct sdesc *sdesc;
unsigned int dlen;
unsigned int dpos;
va_list argp;
int ret;
bufsize = LOAD32(buffer, TPM_SIZE_OFFSET);
tag = LOAD16(buffer, 0);
ordinal = command;
result = LOAD32N(buffer, TPM_RETURN_OFFSET);
if (tag == TPM_TAG_RSP_COMMAND)
return 0;
if (tag != TPM_TAG_RSP_AUTH2_COMMAND)
return -EINVAL;
authdata1 = buffer + bufsize - (SHA1_DIGEST_SIZE + 1
+ SHA1_DIGEST_SIZE + SHA1_DIGEST_SIZE);
authdata2 = buffer + bufsize - (SHA1_DIGEST_SIZE);
continueflag1 = authdata1 - 1;
continueflag2 = authdata2 - 1;
enonce1 = continueflag1 - TPM_NONCE_SIZE;
enonce2 = continueflag2 - TPM_NONCE_SIZE;
sdesc = init_sdesc(hashalg);
if (IS_ERR(sdesc)) {
pr_info("trusted_key: can't alloc %s\n", hash_alg);
return PTR_ERR(sdesc);
}
ret = crypto_shash_init(&sdesc->shash);
if (ret < 0)
goto out;
ret = crypto_shash_update(&sdesc->shash, (const u8 *)&result,
sizeof result);
if (ret < 0)
goto out;
ret = crypto_shash_update(&sdesc->shash, (const u8 *)&ordinal,
sizeof ordinal);
if (ret < 0)
goto out;
va_start(argp, keylen2);
for (;;) {
dlen = va_arg(argp, unsigned int);
if (dlen == 0)
break;
dpos = va_arg(argp, unsigned int);
ret = crypto_shash_update(&sdesc->shash, buffer + dpos, dlen);
if (ret < 0)
break;
}
va_end(argp);
if (!ret)
ret = crypto_shash_final(&sdesc->shash, paramdigest);
if (ret < 0)
goto out;
ret = TSS_rawhmac(testhmac1, key1, keylen1, SHA1_DIGEST_SIZE,
paramdigest, TPM_NONCE_SIZE, enonce1,
TPM_NONCE_SIZE, ononce, 1, continueflag1, 0, 0);
if (ret < 0)
goto out;
if (memcmp(testhmac1, authdata1, SHA1_DIGEST_SIZE)) {
ret = -EINVAL;
goto out;
}
ret = TSS_rawhmac(testhmac2, key2, keylen2, SHA1_DIGEST_SIZE,
paramdigest, TPM_NONCE_SIZE, enonce2,
TPM_NONCE_SIZE, ononce, 1, continueflag2, 0, 0);
if (ret < 0)
goto out;
if (memcmp(testhmac2, authdata2, SHA1_DIGEST_SIZE))
ret = -EINVAL;
out:
kzfree(sdesc);
return ret;
}
/*
* For key specific tpm requests, we will generate and send our
* own TPM command packets using the drivers send function.
*/
static int trusted_tpm_send(const u32 chip_num, unsigned char *cmd,
size_t buflen)
{
int rc;
dump_tpm_buf(cmd);
rc = tpm_send(chip_num, cmd, buflen);
dump_tpm_buf(cmd);
if (rc > 0)
/* Can't return positive return codes values to keyctl */
rc = -EPERM;
return rc;
}
/*
* Lock a trusted key, by extending a selected PCR.
*
* Prevents a trusted key that is sealed to PCRs from being accessed.
* This uses the tpm driver's extend function.
*/
static int pcrlock(const int pcrnum)
{
unsigned char hash[SHA1_DIGEST_SIZE];
int ret;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
ret = tpm_get_random(TPM_ANY_NUM, hash, SHA1_DIGEST_SIZE);
if (ret != SHA1_DIGEST_SIZE)
return ret;
return tpm_pcr_extend(TPM_ANY_NUM, pcrnum, hash) ? -EINVAL : 0;
}
/*
* Create an object specific authorisation protocol (OSAP) session
*/
static int osap(struct tpm_buf *tb, struct osapsess *s,
const unsigned char *key, uint16_t type, uint32_t handle)
{
unsigned char enonce[TPM_NONCE_SIZE];
unsigned char ononce[TPM_NONCE_SIZE];
int ret;
ret = tpm_get_random(TPM_ANY_NUM, ononce, TPM_NONCE_SIZE);
if (ret != TPM_NONCE_SIZE)
return ret;
INIT_BUF(tb);
store16(tb, TPM_TAG_RQU_COMMAND);
store32(tb, TPM_OSAP_SIZE);
store32(tb, TPM_ORD_OSAP);
store16(tb, type);
store32(tb, handle);
storebytes(tb, ononce, TPM_NONCE_SIZE);
ret = trusted_tpm_send(TPM_ANY_NUM, tb->data, MAX_BUF_SIZE);
if (ret < 0)
return ret;
s->handle = LOAD32(tb->data, TPM_DATA_OFFSET);
memcpy(s->enonce, &(tb->data[TPM_DATA_OFFSET + sizeof(uint32_t)]),
TPM_NONCE_SIZE);
memcpy(enonce, &(tb->data[TPM_DATA_OFFSET + sizeof(uint32_t) +
TPM_NONCE_SIZE]), TPM_NONCE_SIZE);
return TSS_rawhmac(s->secret, key, SHA1_DIGEST_SIZE, TPM_NONCE_SIZE,
enonce, TPM_NONCE_SIZE, ononce, 0, 0);
}
/*
* Create an object independent authorisation protocol (oiap) session
*/
static int oiap(struct tpm_buf *tb, uint32_t *handle, unsigned char *nonce)
{
int ret;
INIT_BUF(tb);
store16(tb, TPM_TAG_RQU_COMMAND);
store32(tb, TPM_OIAP_SIZE);
store32(tb, TPM_ORD_OIAP);
ret = trusted_tpm_send(TPM_ANY_NUM, tb->data, MAX_BUF_SIZE);
if (ret < 0)
return ret;
*handle = LOAD32(tb->data, TPM_DATA_OFFSET);
memcpy(nonce, &tb->data[TPM_DATA_OFFSET + sizeof(uint32_t)],
TPM_NONCE_SIZE);
return 0;
}
struct tpm_digests {
unsigned char encauth[SHA1_DIGEST_SIZE];
unsigned char pubauth[SHA1_DIGEST_SIZE];
unsigned char xorwork[SHA1_DIGEST_SIZE * 2];
unsigned char xorhash[SHA1_DIGEST_SIZE];
unsigned char nonceodd[TPM_NONCE_SIZE];
};
/*
* Have the TPM seal(encrypt) the trusted key, possibly based on
* Platform Configuration Registers (PCRs). AUTH1 for sealing key.
*/
static int tpm_seal(struct tpm_buf *tb, uint16_t keytype,
uint32_t keyhandle, const unsigned char *keyauth,
const unsigned char *data, uint32_t datalen,
unsigned char *blob, uint32_t *bloblen,
const unsigned char *blobauth,
const unsigned char *pcrinfo, uint32_t pcrinfosize)
{
struct osapsess sess;
struct tpm_digests *td;
unsigned char cont;
uint32_t ordinal;
uint32_t pcrsize;
uint32_t datsize;
int sealinfosize;
int encdatasize;
int storedsize;
int ret;
int i;
/* alloc some work space for all the hashes */
td = kmalloc(sizeof *td, GFP_KERNEL);
if (!td)
return -ENOMEM;
/* get session for sealing key */
ret = osap(tb, &sess, keyauth, keytype, keyhandle);
if (ret < 0)
goto out;
dump_sess(&sess);
/* calculate encrypted authorization value */
memcpy(td->xorwork, sess.secret, SHA1_DIGEST_SIZE);
memcpy(td->xorwork + SHA1_DIGEST_SIZE, sess.enonce, SHA1_DIGEST_SIZE);
ret = TSS_sha1(td->xorwork, SHA1_DIGEST_SIZE * 2, td->xorhash);
if (ret < 0)
goto out;
ret = tpm_get_random(TPM_ANY_NUM, td->nonceodd, TPM_NONCE_SIZE);
if (ret != TPM_NONCE_SIZE)
goto out;
ordinal = htonl(TPM_ORD_SEAL);
datsize = htonl(datalen);
pcrsize = htonl(pcrinfosize);
cont = 0;
/* encrypt data authorization key */
for (i = 0; i < SHA1_DIGEST_SIZE; ++i)
td->encauth[i] = td->xorhash[i] ^ blobauth[i];
/* calculate authorization HMAC value */
if (pcrinfosize == 0) {
/* no pcr info specified */
ret = TSS_authhmac(td->pubauth, sess.secret, SHA1_DIGEST_SIZE,
sess.enonce, td->nonceodd, cont,
sizeof(uint32_t), &ordinal, SHA1_DIGEST_SIZE,
td->encauth, sizeof(uint32_t), &pcrsize,
sizeof(uint32_t), &datsize, datalen, data, 0,
0);
} else {
/* pcr info specified */
ret = TSS_authhmac(td->pubauth, sess.secret, SHA1_DIGEST_SIZE,
sess.enonce, td->nonceodd, cont,
sizeof(uint32_t), &ordinal, SHA1_DIGEST_SIZE,
td->encauth, sizeof(uint32_t), &pcrsize,
pcrinfosize, pcrinfo, sizeof(uint32_t),
&datsize, datalen, data, 0, 0);
}
if (ret < 0)
goto out;
/* build and send the TPM request packet */
INIT_BUF(tb);
store16(tb, TPM_TAG_RQU_AUTH1_COMMAND);
store32(tb, TPM_SEAL_SIZE + pcrinfosize + datalen);
store32(tb, TPM_ORD_SEAL);
store32(tb, keyhandle);
storebytes(tb, td->encauth, SHA1_DIGEST_SIZE);
store32(tb, pcrinfosize);
storebytes(tb, pcrinfo, pcrinfosize);
store32(tb, datalen);
storebytes(tb, data, datalen);
store32(tb, sess.handle);
storebytes(tb, td->nonceodd, TPM_NONCE_SIZE);
store8(tb, cont);
storebytes(tb, td->pubauth, SHA1_DIGEST_SIZE);
ret = trusted_tpm_send(TPM_ANY_NUM, tb->data, MAX_BUF_SIZE);
if (ret < 0)
goto out;
/* calculate the size of the returned Blob */
sealinfosize = LOAD32(tb->data, TPM_DATA_OFFSET + sizeof(uint32_t));
encdatasize = LOAD32(tb->data, TPM_DATA_OFFSET + sizeof(uint32_t) +
sizeof(uint32_t) + sealinfosize);
storedsize = sizeof(uint32_t) + sizeof(uint32_t) + sealinfosize +
sizeof(uint32_t) + encdatasize;
/* check the HMAC in the response */
ret = TSS_checkhmac1(tb->data, ordinal, td->nonceodd, sess.secret,
SHA1_DIGEST_SIZE, storedsize, TPM_DATA_OFFSET, 0,
0);
/* copy the returned blob to caller */
if (!ret) {
memcpy(blob, tb->data + TPM_DATA_OFFSET, storedsize);
*bloblen = storedsize;
}
out:
kzfree(td);
return ret;
}
/*
* use the AUTH2_COMMAND form of unseal, to authorize both key and blob
*/
static int tpm_unseal(struct tpm_buf *tb,
uint32_t keyhandle, const unsigned char *keyauth,
const unsigned char *blob, int bloblen,
const unsigned char *blobauth,
unsigned char *data, unsigned int *datalen)
{
unsigned char nonceodd[TPM_NONCE_SIZE];
unsigned char enonce1[TPM_NONCE_SIZE];
unsigned char enonce2[TPM_NONCE_SIZE];
unsigned char authdata1[SHA1_DIGEST_SIZE];
unsigned char authdata2[SHA1_DIGEST_SIZE];
uint32_t authhandle1 = 0;
uint32_t authhandle2 = 0;
unsigned char cont = 0;
uint32_t ordinal;
uint32_t keyhndl;
int ret;
/* sessions for unsealing key and data */
ret = oiap(tb, &authhandle1, enonce1);
if (ret < 0) {
pr_info("trusted_key: oiap failed (%d)\n", ret);
return ret;
}
ret = oiap(tb, &authhandle2, enonce2);
if (ret < 0) {
pr_info("trusted_key: oiap failed (%d)\n", ret);
return ret;
}
ordinal = htonl(TPM_ORD_UNSEAL);
keyhndl = htonl(SRKHANDLE);
ret = tpm_get_random(TPM_ANY_NUM, nonceodd, TPM_NONCE_SIZE);
if (ret != TPM_NONCE_SIZE) {
pr_info("trusted_key: tpm_get_random failed (%d)\n", ret);
return ret;
}
ret = TSS_authhmac(authdata1, keyauth, TPM_NONCE_SIZE,
enonce1, nonceodd, cont, sizeof(uint32_t),
&ordinal, bloblen, blob, 0, 0);
if (ret < 0)
return ret;
ret = TSS_authhmac(authdata2, blobauth, TPM_NONCE_SIZE,
enonce2, nonceodd, cont, sizeof(uint32_t),
&ordinal, bloblen, blob, 0, 0);
if (ret < 0)
return ret;
/* build and send TPM request packet */
INIT_BUF(tb);
store16(tb, TPM_TAG_RQU_AUTH2_COMMAND);
store32(tb, TPM_UNSEAL_SIZE + bloblen);
store32(tb, TPM_ORD_UNSEAL);
store32(tb, keyhandle);
storebytes(tb, blob, bloblen);
store32(tb, authhandle1);
storebytes(tb, nonceodd, TPM_NONCE_SIZE);
store8(tb, cont);
storebytes(tb, authdata1, SHA1_DIGEST_SIZE);
store32(tb, authhandle2);
storebytes(tb, nonceodd, TPM_NONCE_SIZE);
store8(tb, cont);
storebytes(tb, authdata2, SHA1_DIGEST_SIZE);
ret = trusted_tpm_send(TPM_ANY_NUM, tb->data, MAX_BUF_SIZE);
if (ret < 0) {
pr_info("trusted_key: authhmac failed (%d)\n", ret);
return ret;
}
*datalen = LOAD32(tb->data, TPM_DATA_OFFSET);
ret = TSS_checkhmac2(tb->data, ordinal, nonceodd,
keyauth, SHA1_DIGEST_SIZE,
blobauth, SHA1_DIGEST_SIZE,
sizeof(uint32_t), TPM_DATA_OFFSET,
*datalen, TPM_DATA_OFFSET + sizeof(uint32_t), 0,
0);
if (ret < 0) {
pr_info("trusted_key: TSS_checkhmac2 failed (%d)\n", ret);
return ret;
}
memcpy(data, tb->data + TPM_DATA_OFFSET + sizeof(uint32_t), *datalen);
return 0;
}
/*
* Have the TPM seal(encrypt) the symmetric key
*/
static int key_seal(struct trusted_key_payload *p,
struct trusted_key_options *o)
{
struct tpm_buf *tb;
int ret;
tb = kzalloc(sizeof *tb, GFP_KERNEL);
if (!tb)
return -ENOMEM;
/* include migratable flag at end of sealed key */
p->key[p->key_len] = p->migratable;
ret = tpm_seal(tb, o->keytype, o->keyhandle, o->keyauth,
p->key, p->key_len + 1, p->blob, &p->blob_len,
o->blobauth, o->pcrinfo, o->pcrinfo_len);
if (ret < 0)
pr_info("trusted_key: srkseal failed (%d)\n", ret);
kzfree(tb);
return ret;
}
/*
* Have the TPM unseal(decrypt) the symmetric key
*/
static int key_unseal(struct trusted_key_payload *p,
struct trusted_key_options *o)
{
struct tpm_buf *tb;
int ret;
tb = kzalloc(sizeof *tb, GFP_KERNEL);
if (!tb)
return -ENOMEM;
ret = tpm_unseal(tb, o->keyhandle, o->keyauth, p->blob, p->blob_len,
o->blobauth, p->key, &p->key_len);
if (ret < 0)
pr_info("trusted_key: srkunseal failed (%d)\n", ret);
else
/* pull migratable flag out of sealed key */
p->migratable = p->key[--p->key_len];
kzfree(tb);
return ret;
}
enum {
Opt_err = -1,
Opt_new, Opt_load, Opt_update,
Opt_keyhandle, Opt_keyauth, Opt_blobauth,
Opt_pcrinfo, Opt_pcrlock, Opt_migratable,
Opt_hash,
Opt_policydigest,
Opt_policyhandle,
};
static const match_table_t key_tokens = {
{Opt_new, "new"},
{Opt_load, "load"},
{Opt_update, "update"},
{Opt_keyhandle, "keyhandle=%s"},
{Opt_keyauth, "keyauth=%s"},
{Opt_blobauth, "blobauth=%s"},
{Opt_pcrinfo, "pcrinfo=%s"},
{Opt_pcrlock, "pcrlock=%s"},
{Opt_migratable, "migratable=%s"},
{Opt_hash, "hash=%s"},
{Opt_policydigest, "policydigest=%s"},
{Opt_policyhandle, "policyhandle=%s"},
{Opt_err, NULL}
};
/* can have zero or more token= options */
static int getoptions(char *c, struct trusted_key_payload *pay,
struct trusted_key_options *opt)
{
substring_t args[MAX_OPT_ARGS];
char *p = c;
int token;
int res;
unsigned long handle;
unsigned long lock;
unsigned long token_mask = 0;
unsigned int digest_len;
int i;
int tpm2;
tpm2 = tpm_is_tpm2(TPM_ANY_NUM);
if (tpm2 < 0)
return tpm2;
opt->hash = tpm2 ? HASH_ALGO_SHA256 : HASH_ALGO_SHA1;
while ((p = strsep(&c, " \t"))) {
if (*p == '\0' || *p == ' ' || *p == '\t')
continue;
token = match_token(p, key_tokens, args);
if (test_and_set_bit(token, &token_mask))
return -EINVAL;
switch (token) {
case Opt_pcrinfo:
opt->pcrinfo_len = strlen(args[0].from) / 2;
if (opt->pcrinfo_len > MAX_PCRINFO_SIZE)
return -EINVAL;
res = hex2bin(opt->pcrinfo, args[0].from,
opt->pcrinfo_len);
if (res < 0)
return -EINVAL;
break;
case Opt_keyhandle:
res = kstrtoul(args[0].from, 16, &handle);
if (res < 0)
return -EINVAL;
opt->keytype = SEAL_keytype;
opt->keyhandle = handle;
break;
case Opt_keyauth:
if (strlen(args[0].from) != 2 * SHA1_DIGEST_SIZE)
return -EINVAL;
res = hex2bin(opt->keyauth, args[0].from,
SHA1_DIGEST_SIZE);
if (res < 0)
return -EINVAL;
break;
case Opt_blobauth:
if (strlen(args[0].from) != 2 * SHA1_DIGEST_SIZE)
return -EINVAL;
res = hex2bin(opt->blobauth, args[0].from,
SHA1_DIGEST_SIZE);
if (res < 0)
return -EINVAL;
break;
case Opt_migratable:
if (*args[0].from == '0')
pay->migratable = 0;
else
return -EINVAL;
break;
case Opt_pcrlock:
res = kstrtoul(args[0].from, 10, &lock);
if (res < 0)
return -EINVAL;
opt->pcrlock = lock;
break;
case Opt_hash:
if (test_bit(Opt_policydigest, &token_mask))
return -EINVAL;
for (i = 0; i < HASH_ALGO__LAST; i++) {
if (!strcmp(args[0].from, hash_algo_name[i])) {
opt->hash = i;
break;
}
}
if (i == HASH_ALGO__LAST)
return -EINVAL;
if (!tpm2 && i != HASH_ALGO_SHA1) {
pr_info("trusted_key: TPM 1.x only supports SHA-1.\n");
return -EINVAL;
}
break;
case Opt_policydigest:
digest_len = hash_digest_size[opt->hash];
if (!tpm2 || strlen(args[0].from) != (2 * digest_len))
return -EINVAL;
res = hex2bin(opt->policydigest, args[0].from,
digest_len);
if (res < 0)
return -EINVAL;
opt->policydigest_len = digest_len;
break;
case Opt_policyhandle:
if (!tpm2)
return -EINVAL;
res = kstrtoul(args[0].from, 16, &handle);
if (res < 0)
return -EINVAL;
opt->policyhandle = handle;
break;
default:
return -EINVAL;
}
}
return 0;
}
/*
* datablob_parse - parse the keyctl data and fill in the
* payload and options structures
*
* On success returns 0, otherwise -EINVAL.
*/
static int datablob_parse(char *datablob, struct trusted_key_payload *p,
struct trusted_key_options *o)
{
substring_t args[MAX_OPT_ARGS];
long keylen;
int ret = -EINVAL;
int key_cmd;
char *c;
/* main command */
c = strsep(&datablob, " \t");
if (!c)
return -EINVAL;
key_cmd = match_token(c, key_tokens, args);
switch (key_cmd) {
case Opt_new:
/* first argument is key size */
c = strsep(&datablob, " \t");
if (!c)
return -EINVAL;
ret = kstrtol(c, 10, &keylen);
if (ret < 0 || keylen < MIN_KEY_SIZE || keylen > MAX_KEY_SIZE)
return -EINVAL;
p->key_len = keylen;
ret = getoptions(datablob, p, o);
if (ret < 0)
return ret;
ret = Opt_new;
break;
case Opt_load:
/* first argument is sealed blob */
c = strsep(&datablob, " \t");
if (!c)
return -EINVAL;
p->blob_len = strlen(c) / 2;
if (p->blob_len > MAX_BLOB_SIZE)
return -EINVAL;
ret = hex2bin(p->blob, c, p->blob_len);
if (ret < 0)
return -EINVAL;
ret = getoptions(datablob, p, o);
if (ret < 0)
return ret;
ret = Opt_load;
break;
case Opt_update:
/* all arguments are options */
ret = getoptions(datablob, p, o);
if (ret < 0)
return ret;
ret = Opt_update;
break;
case Opt_err:
return -EINVAL;
break;
}
return ret;
}
static struct trusted_key_options *trusted_options_alloc(void)
{
struct trusted_key_options *options;
int tpm2;
tpm2 = tpm_is_tpm2(TPM_ANY_NUM);
if (tpm2 < 0)
return NULL;
options = kzalloc(sizeof *options, GFP_KERNEL);
if (options) {
/* set any non-zero defaults */
options->keytype = SRK_keytype;
if (!tpm2)
options->keyhandle = SRKHANDLE;
}
return options;
}
static struct trusted_key_payload *trusted_payload_alloc(struct key *key)
{
struct trusted_key_payload *p = NULL;
int ret;
ret = key_payload_reserve(key, sizeof *p);
if (ret < 0)
return p;
p = kzalloc(sizeof *p, GFP_KERNEL);
if (p)
p->migratable = 1; /* migratable by default */
return p;
}
/*
* trusted_instantiate - create a new trusted key
*
* Unseal an existing trusted blob or, for a new key, get a
* random key, then seal and create a trusted key-type key,
* adding it to the specified keyring.
*
* On success, return 0. Otherwise return errno.
*/
static int trusted_instantiate(struct key *key,
struct key_preparsed_payload *prep)
{
struct trusted_key_payload *payload = NULL;
struct trusted_key_options *options = NULL;
size_t datalen = prep->datalen;
char *datablob;
int ret = 0;
int key_cmd;
size_t key_len;
int tpm2;
tpm2 = tpm_is_tpm2(TPM_ANY_NUM);
if (tpm2 < 0)
return tpm2;
if (datalen <= 0 || datalen > 32767 || !prep->data)
return -EINVAL;
datablob = kmalloc(datalen + 1, GFP_KERNEL);
if (!datablob)
return -ENOMEM;
memcpy(datablob, prep->data, datalen);
datablob[datalen] = '\0';
options = trusted_options_alloc();
if (!options) {
ret = -ENOMEM;
goto out;
}
payload = trusted_payload_alloc(key);
if (!payload) {
ret = -ENOMEM;
goto out;
}
key_cmd = datablob_parse(datablob, payload, options);
if (key_cmd < 0) {
ret = key_cmd;
goto out;
}
if (!options->keyhandle) {
ret = -EINVAL;
goto out;
}
dump_payload(payload);
dump_options(options);
switch (key_cmd) {
case Opt_load:
if (tpm2)
ret = tpm_unseal_trusted(TPM_ANY_NUM, payload, options);
else
ret = key_unseal(payload, options);
dump_payload(payload);
dump_options(options);
if (ret < 0)
pr_info("trusted_key: key_unseal failed (%d)\n", ret);
break;
case Opt_new:
key_len = payload->key_len;
ret = tpm_get_random(TPM_ANY_NUM, payload->key, key_len);
if (ret != key_len) {
pr_info("trusted_key: key_create failed (%d)\n", ret);
goto out;
}
if (tpm2)
ret = tpm_seal_trusted(TPM_ANY_NUM, payload, options);
else
ret = key_seal(payload, options);
if (ret < 0)
pr_info("trusted_key: key_seal failed (%d)\n", ret);
break;
default:
ret = -EINVAL;
goto out;
}
if (!ret && options->pcrlock)
ret = pcrlock(options->pcrlock);
out:
kzfree(datablob);
kzfree(options);
if (!ret)
rcu_assign_keypointer(key, payload);
else
kzfree(payload);
return ret;
}
static void trusted_rcu_free(struct rcu_head *rcu)
{
struct trusted_key_payload *p;
p = container_of(rcu, struct trusted_key_payload, rcu);
kzfree(p);
}
/*
* trusted_update - reseal an existing key with new PCR values
*/
static int trusted_update(struct key *key, struct key_preparsed_payload *prep)
{
struct trusted_key_payload *p;
struct trusted_key_payload *new_p;
struct trusted_key_options *new_o;
size_t datalen = prep->datalen;
char *datablob;
int ret = 0;
if (test_bit(KEY_FLAG_NEGATIVE, &key->flags))
return -ENOKEY;
p = key->payload.data[0];
if (!p->migratable)
return -EPERM;
if (datalen <= 0 || datalen > 32767 || !prep->data)
return -EINVAL;
datablob = kmalloc(datalen + 1, GFP_KERNEL);
if (!datablob)
return -ENOMEM;
new_o = trusted_options_alloc();
if (!new_o) {
ret = -ENOMEM;
goto out;
}
new_p = trusted_payload_alloc(key);
if (!new_p) {
ret = -ENOMEM;
goto out;
}
memcpy(datablob, prep->data, datalen);
datablob[datalen] = '\0';
ret = datablob_parse(datablob, new_p, new_o);
if (ret != Opt_update) {
ret = -EINVAL;
kzfree(new_p);
goto out;
}
if (!new_o->keyhandle) {
ret = -EINVAL;
kzfree(new_p);
goto out;
}
/* copy old key values, and reseal with new pcrs */
new_p->migratable = p->migratable;
new_p->key_len = p->key_len;
memcpy(new_p->key, p->key, p->key_len);
dump_payload(p);
dump_payload(new_p);
ret = key_seal(new_p, new_o);
if (ret < 0) {
pr_info("trusted_key: key_seal failed (%d)\n", ret);
kzfree(new_p);
goto out;
}
if (new_o->pcrlock) {
ret = pcrlock(new_o->pcrlock);
if (ret < 0) {
pr_info("trusted_key: pcrlock failed (%d)\n", ret);
kzfree(new_p);
goto out;
}
}
rcu_assign_keypointer(key, new_p);
call_rcu(&p->rcu, trusted_rcu_free);
out:
kzfree(datablob);
kzfree(new_o);
return ret;
}
/*
* trusted_read - copy the sealed blob data to userspace in hex.
* On success, return to userspace the trusted key datablob size.
*/
static long trusted_read(const struct key *key, char __user *buffer,
size_t buflen)
{
const struct trusted_key_payload *p;
char *ascii_buf;
char *bufp;
int i;
p = dereference_key_locked(key);
if (!p)
return -EINVAL;
if (!buffer || buflen <= 0)
return 2 * p->blob_len;
ascii_buf = kmalloc(2 * p->blob_len, GFP_KERNEL);
if (!ascii_buf)
return -ENOMEM;
bufp = ascii_buf;
for (i = 0; i < p->blob_len; i++)
bufp = hex_byte_pack(bufp, p->blob[i]);
if ((copy_to_user(buffer, ascii_buf, 2 * p->blob_len)) != 0) {
kzfree(ascii_buf);
return -EFAULT;
}
kzfree(ascii_buf);
return 2 * p->blob_len;
}
/*
* trusted_destroy - clear and free the key's payload
*/
static void trusted_destroy(struct key *key)
{
kzfree(key->payload.data[0]);
}
struct key_type key_type_trusted = {
.name = "trusted",
.instantiate = trusted_instantiate,
.update = trusted_update,
.destroy = trusted_destroy,
.describe = user_describe,
.read = trusted_read,
};
EXPORT_SYMBOL_GPL(key_type_trusted);
static void trusted_shash_release(void)
{
if (hashalg)
crypto_free_shash(hashalg);
if (hmacalg)
crypto_free_shash(hmacalg);
}
static int __init trusted_shash_alloc(void)
{
int ret;
hmacalg = crypto_alloc_shash(hmac_alg, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(hmacalg)) {
pr_info("trusted_key: could not allocate crypto %s\n",
hmac_alg);
return PTR_ERR(hmacalg);
}
hashalg = crypto_alloc_shash(hash_alg, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(hashalg)) {
pr_info("trusted_key: could not allocate crypto %s\n",
hash_alg);
ret = PTR_ERR(hashalg);
goto hashalg_fail;
}
return 0;
hashalg_fail:
crypto_free_shash(hmacalg);
return ret;
}
static int __init init_trusted(void)
{
int ret;
ret = trusted_shash_alloc();
if (ret < 0)
return ret;
ret = register_key_type(&key_type_trusted);
if (ret < 0)
trusted_shash_release();
return ret;
}
static void __exit cleanup_trusted(void)
{
trusted_shash_release();
unregister_key_type(&key_type_trusted);
}
late_initcall(init_trusted);
module_exit(cleanup_trusted);
MODULE_LICENSE("GPL");
| ./CrossVul/dataset_final_sorted/CWE-20/c/bad_2891_12 |
crossvul-cpp_data_good_5117_0 | /* State machine for IKEv1
* Copyright (C) 1997 Angelos D. Keromytis.
* Copyright (C) 1998-2010,2013-2015 D. Hugh Redelmeier <hugh@mimosa.com>
* Copyright (C) 2003-2008 Michael Richardson <mcr@xelerance.com>
* Copyright (C) 2008-2009 David McCullough <david_mccullough@securecomputing.com>
* Copyright (C) 2008-2010 Paul Wouters <paul@xelerance.com>
* Copyright (C) 2011 Avesh Agarwal <avagarwa@redhat.com>
* Copyright (C) 2008 Hiren Joshi <joshihirenn@gmail.com>
* Copyright (C) 2009 Anthony Tong <atong@TrustedCS.com>
* Copyright (C) 2012-2013 Paul Wouters <pwouters@redhat.com>
* Copyright (C) 2013 Wolfgang Nothdurft <wolfgang@linogate.de>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
*/
/* Ordering Constraints on Payloads
*
* rfc2409: The Internet Key Exchange (IKE)
*
* 5 Exchanges:
* "The SA payload MUST precede all other payloads in a phase 1 exchange."
*
* "Except where otherwise noted, there are no requirements for ISAKMP
* payloads in any message to be in any particular order."
*
* 5.3 Phase 1 Authenticated With a Revised Mode of Public Key Encryption:
*
* "If the HASH payload is sent it MUST be the first payload of the
* second message exchange and MUST be followed by the encrypted
* nonce. If the HASH payload is not sent, the first payload of the
* second message exchange MUST be the encrypted nonce."
*
* "Save the requirements on the location of the optional HASH payload
* and the mandatory nonce payload there are no further payload
* requirements. All payloads-- in whatever order-- following the
* encrypted nonce MUST be encrypted with Ke_i or Ke_r depending on the
* direction."
*
* 5.5 Phase 2 - Quick Mode
*
* "In Quick Mode, a HASH payload MUST immediately follow the ISAKMP
* header and a SA payload MUST immediately follow the HASH."
* [NOTE: there may be more than one SA payload, so this is not
* totally reasonable. Probably all SAs should be so constrained.]
*
* "If ISAKMP is acting as a client negotiator on behalf of another
* party, the identities of the parties MUST be passed as IDci and
* then IDcr."
*
* "With the exception of the HASH, SA, and the optional ID payloads,
* there are no payload ordering restrictions on Quick Mode."
*/
/* Unfolding of Identity -- a central mystery
*
* This concerns Phase 1 identities, those of the IKE hosts.
* These are the only ones that are authenticated. Phase 2
* identities are for IPsec SAs.
*
* There are three case of interest:
*
* (1) We initiate, based on a whack command specifying a Connection.
* We know the identity of the peer from the Connection.
*
* (2) (to be implemented) we initiate based on a flow from our client
* to some IP address.
* We immediately know one of the peer's client IP addresses from
* the flow. We must use this to figure out the peer's IP address
* and Id. To be solved.
*
* (3) We respond to an IKE negotiation.
* We immediately know the peer's IP address.
* We get an ID Payload in Main I2.
*
* Unfortunately, this is too late for a number of things:
* - the ISAKMP SA proposals have already been made (Main I1)
* AND one accepted (Main R1)
* - the SA includes a specification of the type of ID
* authentication so this is negotiated without being told the ID.
* - with Preshared Key authentication, Main I2 is encrypted
* using the key, so it cannot be decoded to reveal the ID
* without knowing (or guessing) which key to use.
*
* There are three reasonable choices here for the responder:
* + assume that the initiator is making wise offers since it
* knows the IDs involved. We can balk later (but not gracefully)
* when we find the actual initiator ID
* + attempt to infer identity by IP address. Again, we can balk
* when the true identity is revealed. Actually, it is enough
* to infer properties of the identity (eg. SA properties and
* PSK, if needed).
* + make all properties universal so discrimination based on
* identity isn't required. For example, always accept the same
* kinds of encryption. Accept Public Key Id authentication
* since the Initiator presumably has our public key and thinks
* we must have / can find his. This approach is weakest
* for preshared key since the actual key must be known to
* decrypt the Initiator's ID Payload.
* These choices can be blended. For example, a class of Identities
* can be inferred, sufficient to select a preshared key but not
* sufficient to infer a unique identity.
*/
#include <stdio.h>
#include <stdlib.h>
#include <stddef.h>
#include <string.h>
#include <unistd.h>
#include <errno.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <libreswan.h>
#include "sysdep.h"
#include "constants.h"
#include "lswlog.h"
#include "defs.h"
#include "cookie.h"
#include "id.h"
#include "x509.h"
#include "pluto_x509.h"
#include "certs.h"
#include "connections.h" /* needs id.h */
#include "state.h"
#include "ikev1_msgid.h"
#include "packet.h"
#include "md5.h"
#include "sha1.h"
#include "crypto.h" /* requires sha1.h and md5.h */
#include "ike_alg.h"
#include "log.h"
#include "demux.h" /* needs packet.h */
#include "ikev1.h"
#include "ipsec_doi.h" /* needs demux.h and state.h */
#include "ikev1_quick.h"
#include "timer.h"
#include "whack.h" /* requires connections.h */
#include "server.h"
#include "ikev1_xauth.h"
#include "nat_traversal.h"
#include "vendor.h"
#include "ikev1_dpd.h"
#include "hostpair.h"
#include "pluto_crypt.h" /* just for log_crypto_workers() */
#ifdef HAVE_NM
#include "kernel.h"
#endif
/* state_microcode is a tuple of information parameterizing certain
* centralized processing of a packet. For example, it roughly
* specifies what payloads are expected in this message.
* The microcode is selected primarily based on the state.
* In Phase 1, the payload structure often depends on the
* authentication technique, so that too plays a part in selecting
* the state_microcode to use.
*/
struct state_microcode {
enum state_kind state, next_state;
lset_t flags;
lset_t req_payloads; /* required payloads (allows just one) */
lset_t opt_payloads; /* optional payloads (any mumber) */
/* if not ISAKMP_NEXT_NONE, process_packet will emit HDR with this as np */
u_int8_t first_out_payload;
enum event_type timeout_event;
state_transition_fn *processor;
};
/* State Microcode Flags, in several groups */
/* Oakley Auth values: to which auth values does this entry apply?
* Most entries will use SMF_ALL_AUTH because they apply to all.
* Note: SMF_ALL_AUTH matches 0 for those circumstances when no auth
* has been set.
*/
#define SMF_ALL_AUTH LRANGE(0, OAKLEY_AUTH_ROOF - 1)
#define SMF_PSK_AUTH LELEM(OAKLEY_PRESHARED_KEY)
#define SMF_DS_AUTH (LELEM(OAKLEY_DSS_SIG) | LELEM(OAKLEY_RSA_SIG))
#define SMF_PKE_AUTH LELEM(OAKLEY_RSA_ENC)
#define SMF_RPKE_AUTH LELEM(OAKLEY_RSA_REVISED_MODE)
/* misc flags */
#define SMF_INITIATOR LELEM(OAKLEY_AUTH_ROOF + 0)
#define SMF_FIRST_ENCRYPTED_INPUT LELEM(OAKLEY_AUTH_ROOF + 1)
#define SMF_INPUT_ENCRYPTED LELEM(OAKLEY_AUTH_ROOF + 2)
#define SMF_OUTPUT_ENCRYPTED LELEM(OAKLEY_AUTH_ROOF + 3)
#define SMF_RETRANSMIT_ON_DUPLICATE LELEM(OAKLEY_AUTH_ROOF + 4)
#define SMF_ENCRYPTED (SMF_INPUT_ENCRYPTED | SMF_OUTPUT_ENCRYPTED)
/* this state generates a reply message */
#define SMF_REPLY LELEM(OAKLEY_AUTH_ROOF + 5)
/* this state completes P1, so any pending P2 negotiations should start */
#define SMF_RELEASE_PENDING_P2 LELEM(OAKLEY_AUTH_ROOF + 6)
/* if we have canoncalized the authentication from XAUTH mode */
#define SMF_XAUTH_AUTH LELEM(OAKLEY_AUTH_ROOF + 7)
/* end of flags */
static state_transition_fn /* forward declaration */
unexpected,
informational;
/* v1_state_microcode_table is a table of all state_microcode tuples.
* It must be in order of state (the first element).
* After initialization, ike_microcode_index[s] points to the
* first entry in v1_state_microcode_table for state s.
* Remember that each state name in Main or Quick Mode describes
* what has happened in the past, not what this message is.
*/
static const struct state_microcode
*ike_microcode_index[STATE_IKE_ROOF - STATE_IKE_FLOOR];
static const struct state_microcode v1_state_microcode_table[] = {
#define PT(n) ISAKMP_NEXT_ ## n
#define P(n) LELEM(PT(n))
/***** Phase 1 Main Mode *****/
/* No state for main_outI1: --> HDR, SA */
/* STATE_MAIN_R0: I1 --> R1
* HDR, SA --> HDR, SA
*/
{ STATE_MAIN_R0, STATE_MAIN_R1,
SMF_ALL_AUTH | SMF_REPLY,
P(SA), P(VID) | P(CR), PT(NONE),
EVENT_NULL, main_inI1_outR1 },
/* STATE_MAIN_I1: R1 --> I2
* HDR, SA --> auth dependent
* SMF_PSK_AUTH, SMF_DS_AUTH: --> HDR, KE, Ni
* SMF_PKE_AUTH:
* --> HDR, KE, [ HASH(1), ] <IDi1_b>PubKey_r, <Ni_b>PubKey_r
* SMF_RPKE_AUTH:
* --> HDR, [ HASH(1), ] <Ni_b>Pubkey_r, <KE_b>Ke_i, <IDi1_b>Ke_i [,<<Cert-I_b>Ke_i]
* Note: since we don't know auth at start, we cannot differentiate
* microcode entries based on it.
*/
{ STATE_MAIN_I1, STATE_MAIN_I2,
SMF_ALL_AUTH | SMF_INITIATOR | SMF_REPLY,
P(SA), P(VID) | P(CR), PT(NONE), /* don't know yet */
EVENT_v1_RETRANSMIT, main_inR1_outI2 },
/* STATE_MAIN_R1: I2 --> R2
* SMF_PSK_AUTH, SMF_DS_AUTH: HDR, KE, Ni --> HDR, KE, Nr
* SMF_PKE_AUTH: HDR, KE, [ HASH(1), ] <IDi1_b>PubKey_r, <Ni_b>PubKey_r
* --> HDR, KE, <IDr1_b>PubKey_i, <Nr_b>PubKey_i
* SMF_RPKE_AUTH:
* HDR, [ HASH(1), ] <Ni_b>Pubkey_r, <KE_b>Ke_i, <IDi1_b>Ke_i [,<<Cert-I_b>Ke_i]
* --> HDR, <Nr_b>PubKey_i, <KE_b>Ke_r, <IDr1_b>Ke_r
*/
{ STATE_MAIN_R1, STATE_MAIN_R2,
SMF_PSK_AUTH | SMF_DS_AUTH | SMF_REPLY
, P(KE) | P(NONCE), P(VID) | P(CR) | P(NATD_RFC), PT(NONE)
, EVENT_v1_RETRANSMIT, main_inI2_outR2 },
{ STATE_MAIN_R1, STATE_UNDEFINED,
SMF_PKE_AUTH | SMF_REPLY,
P(KE) | P(ID) | P(NONCE), P(VID) | P(CR) | P(HASH), PT(KE),
EVENT_v1_RETRANSMIT, unexpected /* ??? not yet implemented */ },
{ STATE_MAIN_R1, STATE_UNDEFINED,
SMF_RPKE_AUTH | SMF_REPLY,
P(NONCE) | P(KE) | P(ID), P(VID) | P(CR) | P(HASH) | P(CERT), PT(
NONCE),
EVENT_v1_RETRANSMIT, unexpected /* ??? not yet implemented */ },
/* for states from here on, output message must be encrypted */
/* STATE_MAIN_I2: R2 --> I3
* SMF_PSK_AUTH: HDR, KE, Nr --> HDR*, IDi1, HASH_I
* SMF_DS_AUTH: HDR, KE, Nr --> HDR*, IDi1, [ CERT, ] SIG_I
* SMF_PKE_AUTH: HDR, KE, <IDr1_b>PubKey_i, <Nr_b>PubKey_i
* --> HDR*, HASH_I
* SMF_RPKE_AUTH: HDR, <Nr_b>PubKey_i, <KE_b>Ke_r, <IDr1_b>Ke_r
* --> HDR*, HASH_I
*/
{ STATE_MAIN_I2, STATE_MAIN_I3,
SMF_PSK_AUTH | SMF_DS_AUTH | SMF_INITIATOR | SMF_OUTPUT_ENCRYPTED |
SMF_REPLY
, P(KE) | P(NONCE), P(VID) | P(CR) | P(NATD_RFC), PT(ID)
, EVENT_v1_RETRANSMIT, main_inR2_outI3 },
{ STATE_MAIN_I2, STATE_UNDEFINED,
SMF_PKE_AUTH | SMF_INITIATOR | SMF_OUTPUT_ENCRYPTED | SMF_REPLY,
P(KE) | P(ID) | P(NONCE), P(VID) | P(CR), PT(HASH),
EVENT_v1_RETRANSMIT, unexpected /* ??? not yet implemented */ },
{ STATE_MAIN_I2, STATE_UNDEFINED,
SMF_ALL_AUTH | SMF_INITIATOR | SMF_OUTPUT_ENCRYPTED | SMF_REPLY,
P(NONCE) | P(KE) | P(ID), P(VID) | P(CR), PT(HASH),
EVENT_v1_RETRANSMIT, unexpected /* ??? not yet implemented */ },
/* for states from here on, input message must be encrypted */
/* STATE_MAIN_R2: I3 --> R3
* SMF_PSK_AUTH: HDR*, IDi1, HASH_I --> HDR*, IDr1, HASH_R
* SMF_DS_AUTH: HDR*, IDi1, [ CERT, ] SIG_I --> HDR*, IDr1, [ CERT, ] SIG_R
* SMF_PKE_AUTH, SMF_RPKE_AUTH: HDR*, HASH_I --> HDR*, HASH_R
*/
{ STATE_MAIN_R2, STATE_MAIN_R3,
SMF_PSK_AUTH | SMF_FIRST_ENCRYPTED_INPUT | SMF_ENCRYPTED |
SMF_REPLY | SMF_RELEASE_PENDING_P2,
P(ID) | P(HASH), P(VID) | P(CR), PT(NONE),
EVENT_SA_REPLACE, main_inI3_outR3 },
{ STATE_MAIN_R2, STATE_MAIN_R3,
SMF_DS_AUTH | SMF_FIRST_ENCRYPTED_INPUT | SMF_ENCRYPTED |
SMF_REPLY | SMF_RELEASE_PENDING_P2,
P(ID) | P(SIG), P(VID) | P(CR) | P(CERT), PT(NONE),
EVENT_SA_REPLACE, main_inI3_outR3 },
{ STATE_MAIN_R2, STATE_UNDEFINED,
SMF_PKE_AUTH | SMF_RPKE_AUTH | SMF_FIRST_ENCRYPTED_INPUT |
SMF_ENCRYPTED |
SMF_REPLY | SMF_RELEASE_PENDING_P2,
P(HASH), P(VID) | P(CR), PT(NONE),
EVENT_SA_REPLACE, unexpected /* ??? not yet implemented */ },
/* STATE_MAIN_I3: R3 --> done
* SMF_PSK_AUTH: HDR*, IDr1, HASH_R --> done
* SMF_DS_AUTH: HDR*, IDr1, [ CERT, ] SIG_R --> done
* SMF_PKE_AUTH, SMF_RPKE_AUTH: HDR*, HASH_R --> done
* May initiate quick mode by calling quick_outI1
*/
{ STATE_MAIN_I3, STATE_MAIN_I4,
SMF_PSK_AUTH | SMF_INITIATOR |
SMF_FIRST_ENCRYPTED_INPUT | SMF_ENCRYPTED | SMF_RELEASE_PENDING_P2,
P(ID) | P(HASH), P(VID) | P(CR), PT(NONE),
EVENT_SA_REPLACE, main_inR3 },
{ STATE_MAIN_I3, STATE_MAIN_I4,
SMF_DS_AUTH | SMF_INITIATOR |
SMF_FIRST_ENCRYPTED_INPUT | SMF_ENCRYPTED | SMF_RELEASE_PENDING_P2,
P(ID) | P(SIG), P(VID) | P(CR) | P(CERT), PT(NONE),
EVENT_SA_REPLACE, main_inR3 },
{ STATE_MAIN_I3, STATE_UNDEFINED,
SMF_PKE_AUTH | SMF_RPKE_AUTH | SMF_INITIATOR |
SMF_FIRST_ENCRYPTED_INPUT | SMF_ENCRYPTED | SMF_RELEASE_PENDING_P2,
P(HASH), P(VID) | P(CR), PT(NONE),
EVENT_SA_REPLACE, unexpected /* ??? not yet implemented */ },
/* STATE_MAIN_R3: can only get here due to packet loss */
{ STATE_MAIN_R3, STATE_UNDEFINED,
SMF_ALL_AUTH | SMF_ENCRYPTED | SMF_RETRANSMIT_ON_DUPLICATE,
LEMPTY, LEMPTY,
PT(NONE), EVENT_NULL, unexpected },
/* STATE_MAIN_I4: can only get here due to packet loss */
{ STATE_MAIN_I4, STATE_UNDEFINED,
SMF_ALL_AUTH | SMF_INITIATOR | SMF_ENCRYPTED,
LEMPTY, LEMPTY,
PT(NONE), EVENT_NULL, unexpected },
/***** Phase 1 Aggressive Mode *****/
/* No initial state for aggr_outI1:
* SMF_DS_AUTH (RFC 2409 5.1) and SMF_PSK_AUTH (RFC 2409 5.4):
* -->HDR, SA, KE, Ni, IDii
*
* Not implemented:
* RFC 2409 5.2: --> HDR, SA, [ HASH(1),] KE, <IDii_b>Pubkey_r, <Ni_b>Pubkey_r
* RFC 2409 5.3: --> HDR, SA, [ HASH(1),] <Ni_b>Pubkey_r, <KE_b>Ke_i, <IDii_b>Ke_i [, <Cert-I_b>Ke_i ]
*/
/* STATE_AGGR_R0:
* SMF_PSK_AUTH: HDR, SA, KE, Ni, IDii
* --> HDR, SA, KE, Nr, IDir, HASH_R
* SMF_DS_AUTH: HDR, SA, KE, Nr, IDii
* --> HDR, SA, KE, Nr, IDir, [CERT,] SIG_R
*/
{ STATE_AGGR_R0, STATE_AGGR_R1,
SMF_PSK_AUTH | SMF_DS_AUTH | SMF_REPLY,
P(SA) | P(KE) | P(NONCE) | P(ID), P(VID) | P(NATD_RFC), PT(NONE),
EVENT_NULL, aggr_inI1_outR1 },
/* STATE_AGGR_I1:
* SMF_PSK_AUTH: HDR, SA, KE, Nr, IDir, HASH_R
* --> HDR*, HASH_I
* SMF_DS_AUTH: HDR, SA, KE, Nr, IDir, [CERT,] SIG_R
* --> HDR*, [CERT,] SIG_I
*/
{ STATE_AGGR_I1, STATE_AGGR_I2,
SMF_PSK_AUTH | SMF_INITIATOR | SMF_OUTPUT_ENCRYPTED | SMF_REPLY |
SMF_RELEASE_PENDING_P2,
P(SA) | P(KE) | P(NONCE) | P(ID) | P(HASH), P(VID) | P(NATD_RFC),
PT(NONE),
EVENT_SA_REPLACE, aggr_inR1_outI2 },
{ STATE_AGGR_I1, STATE_AGGR_I2,
SMF_DS_AUTH | SMF_INITIATOR | SMF_OUTPUT_ENCRYPTED | SMF_REPLY |
SMF_RELEASE_PENDING_P2,
P(SA) | P(KE) | P(NONCE) | P(ID) | P(SIG), P(VID) | P(NATD_RFC),
PT(NONE),
EVENT_SA_REPLACE, aggr_inR1_outI2 },
/* STATE_AGGR_R1:
* SMF_PSK_AUTH: HDR*, HASH_I --> done
* SMF_DS_AUTH: HDR*, SIG_I --> done
*/
{ STATE_AGGR_R1, STATE_AGGR_R2,
SMF_PSK_AUTH | SMF_FIRST_ENCRYPTED_INPUT |
SMF_ENCRYPTED | SMF_RELEASE_PENDING_P2,
P(HASH), P(VID) | P(NATD_RFC), PT(NONE),
EVENT_SA_REPLACE, aggr_inI2 },
{ STATE_AGGR_R1, STATE_AGGR_R2,
SMF_DS_AUTH | SMF_FIRST_ENCRYPTED_INPUT |
SMF_ENCRYPTED | SMF_RELEASE_PENDING_P2,
P(SIG), P(VID) | P(NATD_RFC), PT(NONE),
EVENT_SA_REPLACE, aggr_inI2 },
/* STATE_AGGR_I2: can only get here due to packet loss */
{ STATE_AGGR_I2, STATE_UNDEFINED,
SMF_ALL_AUTH | SMF_INITIATOR | SMF_RETRANSMIT_ON_DUPLICATE,
LEMPTY, LEMPTY, PT(NONE), EVENT_NULL, unexpected },
/* STATE_AGGR_R2: can only get here due to packet loss */
{ STATE_AGGR_R2, STATE_UNDEFINED,
SMF_ALL_AUTH,
LEMPTY, LEMPTY, PT(NONE), EVENT_NULL, unexpected },
/***** Phase 2 Quick Mode *****/
/* No state for quick_outI1:
* --> HDR*, HASH(1), SA, Nr [, KE ] [, IDci, IDcr ]
*/
/* STATE_QUICK_R0:
* HDR*, HASH(1), SA, Ni [, KE ] [, IDci, IDcr ] -->
* HDR*, HASH(2), SA, Nr [, KE ] [, IDci, IDcr ]
* Installs inbound IPsec SAs.
* Because it may suspend for asynchronous DNS, first_out_payload
* is set to NONE to suppress early emission of HDR*.
* ??? it is legal to have multiple SAs, but we don't support it yet.
*/
{ STATE_QUICK_R0, STATE_QUICK_R1,
SMF_ALL_AUTH | SMF_ENCRYPTED | SMF_REPLY
, P(HASH) | P(SA) | P(NONCE), /* P(SA) | */ P(KE) | P(ID) | P(
NATOA_RFC), PT(NONE)
, EVENT_v1_RETRANSMIT, quick_inI1_outR1 },
/* STATE_QUICK_I1:
* HDR*, HASH(2), SA, Nr [, KE ] [, IDci, IDcr ] -->
* HDR*, HASH(3)
* Installs inbound and outbound IPsec SAs, routing, etc.
* ??? it is legal to have multiple SAs, but we don't support it yet.
*/
{ STATE_QUICK_I1, STATE_QUICK_I2,
SMF_ALL_AUTH | SMF_INITIATOR | SMF_ENCRYPTED | SMF_REPLY
, P(HASH) | P(SA) | P(NONCE), /* P(SA) | */ P(KE) | P(ID) | P(
NATOA_RFC), PT(HASH)
, EVENT_SA_REPLACE, quick_inR1_outI2 },
/* STATE_QUICK_R1: HDR*, HASH(3) --> done
* Installs outbound IPsec SAs, routing, etc.
*/
{ STATE_QUICK_R1, STATE_QUICK_R2,
SMF_ALL_AUTH | SMF_ENCRYPTED,
P(HASH), LEMPTY, PT(NONE),
EVENT_SA_REPLACE, quick_inI2 },
/* STATE_QUICK_I2: can only happen due to lost packet */
{ STATE_QUICK_I2, STATE_UNDEFINED,
SMF_ALL_AUTH | SMF_INITIATOR | SMF_ENCRYPTED |
SMF_RETRANSMIT_ON_DUPLICATE,
LEMPTY, LEMPTY, PT(NONE),
EVENT_NULL, unexpected },
/* STATE_QUICK_R2: can only happen due to lost packet */
{ STATE_QUICK_R2, STATE_UNDEFINED,
SMF_ALL_AUTH | SMF_ENCRYPTED,
LEMPTY, LEMPTY, PT(NONE),
EVENT_NULL, unexpected },
/***** informational messages *****/
/* Informational Exchange (RFC 2408 4.8):
* HDR N/D
* Unencrypted: must not occur after ISAKMP Phase 1 exchange of keying material.
*/
/* STATE_INFO: */
{ STATE_INFO, STATE_UNDEFINED,
SMF_ALL_AUTH,
LEMPTY, LEMPTY, PT(NONE),
EVENT_NULL, informational },
/* Informational Exchange (RFC 2408 4.8):
* HDR* N/D
*/
/* STATE_INFO_PROTECTED: */
{ STATE_INFO_PROTECTED, STATE_UNDEFINED,
SMF_ALL_AUTH | SMF_ENCRYPTED,
P(HASH), LEMPTY, PT(NONE),
EVENT_NULL, informational },
{ STATE_XAUTH_R0, STATE_XAUTH_R1,
SMF_ALL_AUTH | SMF_ENCRYPTED,
P(MCFG_ATTR) | P(HASH), P(VID), PT(NONE),
EVENT_NULL, xauth_inR0 }, /*Re-transmit may be done by previous state*/
{ STATE_XAUTH_R1, STATE_MAIN_R3,
SMF_ALL_AUTH | SMF_ENCRYPTED,
P(MCFG_ATTR) | P(HASH), P(VID), PT(NONE),
EVENT_SA_REPLACE, xauth_inR1 },
#if 0
/* for situation where there is XAUTH + ModeCFG */
{ STATE_XAUTH_R2, STATE_XAUTH_R3,
SMF_ALL_AUTH | SMF_ENCRYPTED,
P(MCFG_ATTR) | P(HASH), P(VID), PT(NONE),
EVENT_SA_REPLACE, xauth_inR2 },
{ STATE_XAUTH_R3, STATE_MAIN_R3,
SMF_ALL_AUTH | SMF_ENCRYPTED,
P(MCFG_ATTR) | P(HASH), P(VID), PT(NONE),
EVENT_SA_REPLACE, xauth_inR3 },
#endif
/* MODE_CFG_x:
* Case R0: Responder -> Initiator
* <- Req(addr=0)
* Reply(ad=x) ->
*
* Case R1: Set(addr=x) ->
* <- Ack(ok)
*/
{ STATE_MODE_CFG_R0, STATE_MODE_CFG_R1,
SMF_ALL_AUTH | SMF_ENCRYPTED | SMF_REPLY,
P(MCFG_ATTR) | P(HASH), P(VID), PT(HASH),
EVENT_SA_REPLACE, modecfg_inR0 },
{ STATE_MODE_CFG_R1, STATE_MODE_CFG_R2,
SMF_ALL_AUTH | SMF_ENCRYPTED,
P(MCFG_ATTR) | P(HASH), P(VID), PT(HASH),
EVENT_SA_REPLACE, modecfg_inR1 },
{ STATE_MODE_CFG_R2, STATE_UNDEFINED,
SMF_ALL_AUTH | SMF_ENCRYPTED,
LEMPTY, LEMPTY, PT(NONE),
EVENT_NULL, unexpected },
{ STATE_MODE_CFG_I1, STATE_MAIN_I4,
SMF_ALL_AUTH | SMF_ENCRYPTED | SMF_RELEASE_PENDING_P2,
P(MCFG_ATTR) | P(HASH), P(VID), PT(HASH),
EVENT_SA_REPLACE, modecfg_inR1 },
{ STATE_XAUTH_I0, STATE_XAUTH_I1,
SMF_ALL_AUTH | SMF_ENCRYPTED | SMF_REPLY | SMF_RELEASE_PENDING_P2,
P(MCFG_ATTR) | P(HASH), P(VID), PT(HASH),
EVENT_v1_RETRANSMIT, xauth_inI0 },
{ STATE_XAUTH_I1, STATE_MAIN_I4,
SMF_ALL_AUTH | SMF_ENCRYPTED | SMF_REPLY | SMF_RELEASE_PENDING_P2,
P(MCFG_ATTR) | P(HASH), P(VID), PT(HASH),
EVENT_v1_RETRANSMIT, xauth_inI1 },
#undef P
#undef PT
};
void init_ikev1(void)
{
/* fill ike_microcode_index:
* make ike_microcode_index[s] point to first entry in
* v1_state_microcode_table for state s (backward scan makes this easier).
* Check that table is in order -- catch coding errors.
* For what it's worth, this routine is idempotent.
*/
const struct state_microcode *t;
for (t = &v1_state_microcode_table[elemsof(v1_state_microcode_table) - 1];;)
{
passert(STATE_IKE_FLOOR <= t->state &&
t->state < STATE_IKE_ROOF);
ike_microcode_index[t->state - STATE_IKE_FLOOR] = t;
if (t == v1_state_microcode_table)
break;
t--;
passert(t[0].state <= t[1].state);
}
}
static stf_status unexpected(struct msg_digest *md)
{
loglog(RC_LOG_SERIOUS, "unexpected message received in state %s",
enum_name(&state_names, md->st->st_state));
return STF_IGNORE;
}
/*
* RFC 2408 Section 4.6
*
* # Initiator Direction Responder NOTE
* (1) HDR*; N/D => Error Notification or Deletion
*/
static stf_status informational(struct msg_digest *md)
{
struct payload_digest *const n_pld = md->chain[ISAKMP_NEXT_N];
/* If the Notification Payload is not null... */
if (n_pld != NULL) {
pb_stream *const n_pbs = &n_pld->pbs;
struct isakmp_notification *const n =
&n_pld->payload.notification;
struct state *st = md->st; /* may be NULL */
/* Switch on Notification Type (enum) */
/* note that we _can_ get notification payloads unencrypted
* once we are at least in R3/I4.
* and that the handler is expected to treat them suspiciously.
*/
DBG(DBG_CONTROL, DBG_log("processing informational %s (%d)",
enum_name(&ikev1_notify_names,
n->isan_type),
n->isan_type));
switch (n->isan_type) {
case R_U_THERE:
if (st == NULL) {
loglog(RC_LOG_SERIOUS,
"received bogus R_U_THERE informational message");
return STF_IGNORE;
}
return dpd_inI_outR(st, n, n_pbs);
case R_U_THERE_ACK:
if (st == NULL) {
loglog(RC_LOG_SERIOUS,
"received bogus R_U_THERE_ACK informational message");
return STF_IGNORE;
}
return dpd_inR(st, n, n_pbs);
case PAYLOAD_MALFORMED:
if (st != NULL) {
st->hidden_variables.st_malformed_received++;
libreswan_log(
"received %u malformed payload notifies",
st->hidden_variables.st_malformed_received);
if (st->hidden_variables.st_malformed_sent >
MAXIMUM_MALFORMED_NOTIFY / 2 &&
((st->hidden_variables.st_malformed_sent +
st->hidden_variables.
st_malformed_received) >
MAXIMUM_MALFORMED_NOTIFY)) {
libreswan_log(
"too many malformed payloads (we sent %u and received %u",
st->hidden_variables.st_malformed_sent,
st->hidden_variables.st_malformed_received);
delete_state(st);
md->st = st = NULL;
}
}
return STF_IGNORE;
case ISAKMP_N_CISCO_LOAD_BALANCE:
if (st != NULL && IS_ISAKMP_SA_ESTABLISHED(st->st_state)) {
/* Saving connection name and whack sock id */
const char *tmp_name = st->st_connection->name;
int tmp_whack_sock = dup_any(st->st_whack_sock);
/* deleting ISAKMP SA with the current remote peer */
delete_state(st);
md->st = st = NULL;
/* to find and store the connection associated with tmp_name */
/* ??? how do we know that tmp_name hasn't been freed? */
struct connection *tmp_c = con_by_name(tmp_name, FALSE);
DBG_cond_dump(DBG_PARSING,
"redirected remote end info:", n_pbs->cur + pbs_left(
n_pbs) - 4, 4);
/* Current remote peer info */
{
ipstr_buf b;
const struct spd_route *tmp_spd =
&tmp_c->spd;
int count_spd = 0;
do {
DBG(DBG_CONTROLMORE,
DBG_log("spd route number: %d",
++count_spd));
/**that info**/
DBG(DBG_CONTROLMORE,
DBG_log("that id kind: %d",
tmp_spd->that.id.kind));
DBG(DBG_CONTROLMORE,
DBG_log("that id ipaddr: %s",
ipstr(&tmp_spd->that.id.ip_addr, &b)));
if (tmp_spd->that.id.name.ptr
!= NULL)
DBG(DBG_CONTROLMORE,
DBG_dump_chunk(
"that id name",
tmp_spd->
that.id.
name));
DBG(DBG_CONTROLMORE,
DBG_log("that host_addr: %s",
ipstr(&tmp_spd->that.host_addr, &b)));
DBG(DBG_CONTROLMORE,
DBG_log("that nexthop: %s",
ipstr(&tmp_spd->that.host_nexthop, &b)));
DBG(DBG_CONTROLMORE,
DBG_log("that srcip: %s",
ipstr(&tmp_spd->that.host_srcip, &b)));
DBG(DBG_CONTROLMORE,
DBG_log("that client_addr: %s, maskbits:%d",
ipstr(&tmp_spd->that.client.addr, &b),
tmp_spd->that.
client.maskbits));
DBG(DBG_CONTROLMORE,
DBG_log("that has_client: %d",
tmp_spd->that.
has_client));
DBG(DBG_CONTROLMORE,
DBG_log("that has_client_wildcard: %d",
tmp_spd->that.
has_client_wildcard));
DBG(DBG_CONTROLMORE,
DBG_log("that has_port_wildcard: %d",
tmp_spd->that.
has_port_wildcard));
DBG(DBG_CONTROLMORE,
DBG_log("that has_id_wildcards: %d",
tmp_spd->that.
has_id_wildcards));
tmp_spd = tmp_spd->spd_next;
} while (tmp_spd != NULL);
if (tmp_c->interface != NULL) {
DBG(DBG_CONTROLMORE,
DBG_log("Current interface_addr: %s",
ipstr(&tmp_c->interface->ip_addr, &b)));
}
if (tmp_c->gw_info != NULL) {
DBG(DBG_CONTROLMORE, {
DBG_log("Current gw_client_addr: %s",
ipstr(&tmp_c->gw_info->client_id.ip_addr, &b));
DBG_log("Current gw_gw_addr: %s",
ipstr(&tmp_c->gw_info->gw_id.ip_addr, &b));
});
}
}
/* storing old address for comparison purposes */
ip_address old_addr = tmp_c->spd.that.host_addr;
/* Decoding remote peer address info where connection has to be redirected to */
memcpy(&tmp_c->spd.that.host_addr.u.v4.sin_addr.s_addr,
(u_int32_t *)(n_pbs->cur +
pbs_left(n_pbs) - 4),
sizeof(tmp_c->spd.that.host_addr.u.v4.
sin_addr.
s_addr));
/* Modifying connection info to store the redirected remote peer info */
DBG(DBG_CONTROLMORE,
DBG_log("Old host_addr_name : %s",
tmp_c->spd.that.host_addr_name));
tmp_c->spd.that.host_addr_name = NULL;
tmp_c->spd.that.id.ip_addr =
tmp_c->spd.that.host_addr;
DBG(DBG_CONTROLMORE, {
ipstr_buf b;
if (sameaddr(&tmp_c->spd.this.
host_nexthop,
&old_addr)) {
DBG_log("Old remote addr %s",
ipstr(&old_addr, &b));
DBG_log("Old this host next hop %s",
ipstr(&tmp_c->spd.this.host_nexthop, &b));
tmp_c->spd.this.host_nexthop = tmp_c->spd.that.host_addr;
DBG_log("New this host next hop %s",
ipstr(&tmp_c->spd.this.host_nexthop, &b));
}
if (sameaddr(&tmp_c->spd.that.
host_srcip,
&old_addr)) {
DBG_log("Old that host srcip %s",
ipstr(&tmp_c->spd.that.host_srcip, &b));
tmp_c->spd.that.host_srcip = tmp_c->spd.that.host_addr;
DBG_log("New that host srcip %s",
ipstr(&tmp_c->spd.that.host_srcip, &b));
}
if (sameaddr(&tmp_c->spd.that.
client.addr,
&old_addr)) {
DBG_log("Old that client ip %s",
ipstr(&tmp_c->spd.that.client.addr, &b));
tmp_c->spd.that.client.addr = tmp_c->spd.that.host_addr;
DBG_log("New that client ip %s",
ipstr(&tmp_c->spd.that.client.addr, &b));
}
});
tmp_c->host_pair->him.addr =
tmp_c->spd.that.host_addr;
/* Initiating connection to the redirected peer */
initiate_connection(tmp_name, tmp_whack_sock,
LEMPTY, pcim_demand_crypto);
return STF_IGNORE;
}
loglog(RC_LOG_SERIOUS,
"received and ignored informational message with ISAKMP_N_CISCO_LOAD_BALANCE for unestablished state.");
return STF_IGNORE;
default:
if (st != NULL &&
(st->st_connection->extra_debugging &
IMPAIR_DIE_ONINFO)) {
loglog(RC_LOG_SERIOUS,
"received unhandled informational notification payload %d: '%s'",
n->isan_type,
enum_name(&ikev1_notify_names,
n->isan_type));
return STF_FATAL;
}
loglog(RC_LOG_SERIOUS,
"received and ignored informational message");
return STF_IGNORE;
}
} else {
loglog(RC_LOG_SERIOUS,
"received and ignored empty informational notification payload");
return STF_IGNORE;
}
}
/* create output HDR as replica of input HDR - IKEv1 only */
void ikev1_echo_hdr(struct msg_digest *md, bool enc, u_int8_t np)
{
struct isakmp_hdr hdr = md->hdr; /* mostly same as incoming header */
/* make sure we start with a clean buffer */
init_out_pbs(&reply_stream, reply_buffer, sizeof(reply_buffer),
"reply packet");
hdr.isa_flags = 0; /* zero all flags */
if (enc)
hdr.isa_flags |= ISAKMP_FLAGS_v1_ENCRYPTION;
if (DBGP(IMPAIR_SEND_BOGUS_ISAKMP_FLAG)) {
hdr.isa_flags |= ISAKMP_FLAGS_RESERVED_BIT6;
}
/* there is only one IKEv1 version, and no new one will ever come - no need to set version */
hdr.isa_np = np;
if (!out_struct(&hdr, &isakmp_hdr_desc, &reply_stream, &md->rbody))
impossible(); /* surely must have room and be well-formed */
}
/* process an input packet, possibly generating a reply.
*
* If all goes well, this routine eventually calls a state-specific
* transition function.
*
* This routine will not release_any_md(mdp). It is expected that its
* caller will do this. In fact, it will zap *mdp to NULL if it thinks
* **mdp should not be freed. So the caller should be prepared for
* *mdp being set to NULL.
*/
void process_v1_packet(struct msg_digest **mdp)
{
struct msg_digest *md = *mdp;
const struct state_microcode *smc;
bool new_iv_set = FALSE;
struct state *st = NULL;
enum state_kind from_state = STATE_UNDEFINED; /* state we started in */
#define SEND_NOTIFICATION(t) { \
if (st != NULL) \
send_notification_from_state(st, from_state, t); \
else \
send_notification_from_md(md, t); }
switch (md->hdr.isa_xchg) {
#ifdef NOTYET
case ISAKMP_XCHG_NONE:
case ISAKMP_XCHG_BASE:
case ISAKMP_XCHG_AO:
#endif
case ISAKMP_XCHG_AGGR:
case ISAKMP_XCHG_IDPROT: /* part of a Main Mode exchange */
if (md->hdr.isa_msgid != v1_MAINMODE_MSGID) {
libreswan_log(
"Message ID was 0x%08lx but should be zero in phase 1",
(unsigned long) md->hdr.isa_msgid);
SEND_NOTIFICATION(INVALID_MESSAGE_ID);
return;
}
if (is_zero_cookie(md->hdr.isa_icookie)) {
libreswan_log(
"Initiator Cookie must not be zero in phase 1 message");
SEND_NOTIFICATION(INVALID_COOKIE);
return;
}
if (is_zero_cookie(md->hdr.isa_rcookie)) {
/* initial message from initiator
* ??? what if this is a duplicate of another message?
*/
if (md->hdr.isa_flags & ISAKMP_FLAGS_v1_ENCRYPTION) {
libreswan_log("initial phase 1 message is invalid:"
" its Encrypted Flag is on");
SEND_NOTIFICATION(INVALID_FLAGS);
return;
}
/* don't build a state until the message looks tasty */
from_state = (md->hdr.isa_xchg == ISAKMP_XCHG_IDPROT ?
STATE_MAIN_R0 : STATE_AGGR_R0);
} else {
/* not an initial message */
st = find_state_ikev1(md->hdr.isa_icookie,
md->hdr.isa_rcookie,
md->hdr.isa_msgid);
if (st == NULL) {
/* perhaps this is a first message from the responder
* and contains a responder cookie that we've not yet seen.
*/
st = find_state_ikev1(md->hdr.isa_icookie,
zero_cookie,
md->hdr.isa_msgid);
if (st == NULL) {
libreswan_log(
"phase 1 message is part of an unknown exchange");
/* XXX Could send notification back */
return;
}
}
set_cur_state(st);
from_state = st->st_state;
}
break;
case ISAKMP_XCHG_INFO: /* an informational exchange */
st = ikev1_find_info_state(md->hdr.isa_icookie, md->hdr.isa_rcookie,
&md->sender, v1_MAINMODE_MSGID);
if (st == NULL) {
/*
* might be an informational response to our first
* message, in which case, we don't know the rcookie yet.
*/
st = find_state_ikev1(md->hdr.isa_icookie, zero_cookie,
v1_MAINMODE_MSGID);
}
if (st != NULL)
set_cur_state(st);
if (md->hdr.isa_flags & ISAKMP_FLAGS_v1_ENCRYPTION) {
bool quiet = (st == NULL ||
(st->st_connection->policy & POLICY_OPPORTUNISTIC));
if (st == NULL) {
DBG(DBG_CONTROL, DBG_log(
"Informational Exchange is for an unknown (expired?) SA with MSGID:0x%08lx",
(unsigned long)md->hdr.isa_msgid));
/* Let's try to log some info about these to track them down */
DBG(DBG_CONTROL, {
DBG_dump("- unknown SA's md->hdr.isa_icookie:",
md->hdr.isa_icookie,
COOKIE_SIZE);
DBG_dump("- unknown SA's md->hdr.isa_rcookie:",
md->hdr.isa_rcookie,
COOKIE_SIZE);
});
/* XXX Could send notification back */
return;
}
if (!IS_ISAKMP_ENCRYPTED(st->st_state)) {
if (!quiet) {
loglog(RC_LOG_SERIOUS, "encrypted Informational Exchange message is invalid because no key is known");
}
/* XXX Could send notification back */
return;
}
if (md->hdr.isa_msgid == v1_MAINMODE_MSGID) {
if (!quiet) {
loglog(RC_LOG_SERIOUS, "Informational Exchange message is invalid because it has a Message ID of 0");
}
/* XXX Could send notification back */
return;
}
if (!unique_msgid(st, md->hdr.isa_msgid)) {
if (!quiet) {
loglog(RC_LOG_SERIOUS, "Informational Exchange message is invalid because it has a previously used Message ID (0x%08lx)",
(unsigned long)md->hdr.isa_msgid);
}
/* XXX Could send notification back */
return;
}
st->st_msgid_reserved = FALSE;
init_phase2_iv(st, &md->hdr.isa_msgid);
new_iv_set = TRUE;
from_state = STATE_INFO_PROTECTED;
} else {
if (st != NULL &&
IS_ISAKMP_AUTHENTICATED(st->st_state)) {
if ((st->st_connection->policy & POLICY_OPPORTUNISTIC) == LEMPTY) {
loglog(RC_LOG_SERIOUS, "Informational Exchange message must be encrypted");
}
/* XXX Could send notification back */
return;
}
from_state = STATE_INFO;
}
break;
case ISAKMP_XCHG_QUICK: /* part of a Quick Mode exchange */
if (is_zero_cookie(md->hdr.isa_icookie)) {
DBG(DBG_CONTROL, DBG_log(
"Quick Mode message is invalid because it has an Initiator Cookie of 0"));
SEND_NOTIFICATION(INVALID_COOKIE);
return;
}
if (is_zero_cookie(md->hdr.isa_rcookie)) {
DBG(DBG_CONTROL, DBG_log(
"Quick Mode message is invalid because it has a Responder Cookie of 0"));
SEND_NOTIFICATION(INVALID_COOKIE);
return;
}
if (md->hdr.isa_msgid == v1_MAINMODE_MSGID) {
DBG(DBG_CONTROL, DBG_log(
"Quick Mode message is invalid because it has a Message ID of 0"));
SEND_NOTIFICATION(INVALID_MESSAGE_ID);
return;
}
st = find_state_ikev1(md->hdr.isa_icookie, md->hdr.isa_rcookie,
md->hdr.isa_msgid);
if (st == NULL) {
/* No appropriate Quick Mode state.
* See if we have a Main Mode state.
* ??? what if this is a duplicate of another message?
*/
st = find_state_ikev1(md->hdr.isa_icookie,
md->hdr.isa_rcookie,
v1_MAINMODE_MSGID);
if (st == NULL) {
DBG(DBG_CONTROL, DBG_log(
"Quick Mode message is for a non-existent (expired?) ISAKMP SA"));
/* XXX Could send notification back */
return;
}
if (st->st_oakley.doing_xauth) {
DBG(DBG_CONTROL, DBG_log(
"Cannot do Quick Mode until XAUTH done."));
return;
}
/* Have we just given an IP address to peer? */
if (st->st_state == STATE_MODE_CFG_R2) {
/* ISAKMP is up... */
change_state(st, STATE_MAIN_R3);
}
#ifdef SOFTREMOTE_CLIENT_WORKAROUND
/* See: http://popoludnica.pl/?id=10100110 */
if (st->st_state == STATE_MODE_CFG_R1) {
libreswan_log(
"SoftRemote workaround: Cannot do Quick Mode until MODECFG done.");
return;
}
#endif
set_cur_state(st);
if (!IS_ISAKMP_SA_ESTABLISHED(st->st_state)) {
if (DBGP(DBG_OPPO) || (st->st_connection->policy & POLICY_OPPORTUNISTIC) == LEMPTY) {
loglog(RC_LOG_SERIOUS, "Quick Mode message is unacceptable because it is for an incomplete ISAKMP SA");
}
SEND_NOTIFICATION(PAYLOAD_MALFORMED /* XXX ? */);
return;
}
if (!unique_msgid(st, md->hdr.isa_msgid)) {
if (DBGP(DBG_OPPO) || (st->st_connection->policy & POLICY_OPPORTUNISTIC) == LEMPTY) {
loglog(RC_LOG_SERIOUS, "Quick Mode I1 message is unacceptable because it uses a previously used Message ID 0x%08lx (perhaps this is a duplicated packet)",
(unsigned long) md->hdr.isa_msgid);
}
SEND_NOTIFICATION(INVALID_MESSAGE_ID);
return;
}
st->st_msgid_reserved = FALSE;
/* Quick Mode Initial IV */
init_phase2_iv(st, &md->hdr.isa_msgid);
new_iv_set = TRUE;
from_state = STATE_QUICK_R0;
} else {
if (st->st_oakley.doing_xauth) {
if (DBGP(DBG_OPPO) ||
(st->st_connection->policy & POLICY_OPPORTUNISTIC) == LEMPTY) {
libreswan_log("Cannot do Quick Mode until XAUTH done.");
}
return;
}
set_cur_state(st);
from_state = st->st_state;
}
break;
case ISAKMP_XCHG_MODE_CFG:
if (is_zero_cookie(md->hdr.isa_icookie)) {
DBG(DBG_CONTROL, DBG_log("Mode Config message is invalid because it has an Initiator Cookie of 0"));
/* XXX Could send notification back */
return;
}
if (is_zero_cookie(md->hdr.isa_rcookie)) {
DBG(DBG_CONTROL, DBG_log("Mode Config message is invalid because it has a Responder Cookie of 0"));
/* XXX Could send notification back */
return;
}
if (md->hdr.isa_msgid == 0) {
DBG(DBG_CONTROL, DBG_log("Mode Config message is invalid because it has a Message ID of 0"));
/* XXX Could send notification back */
return;
}
st = ikev1_find_info_state(md->hdr.isa_icookie, md->hdr.isa_rcookie,
&md->sender, md->hdr.isa_msgid);
if (st == NULL) {
DBG(DBG_CONTROL, DBG_log(
"No appropriate Mode Config state yet.See if we have a Main Mode state"));
/* No appropriate Mode Config state.
* See if we have a Main Mode state.
* ??? what if this is a duplicate of another message?
*/
st = ikev1_find_info_state(md->hdr.isa_icookie,
md->hdr.isa_rcookie,
&md->sender, 0);
if (st == NULL) {
DBG(DBG_CONTROL, DBG_log(
"Mode Config message is for a non-existent (expired?) ISAKMP SA"));
/* XXX Could send notification back */
return;
}
set_cur_state(st);
DBG(DBG_CONTROLMORE, DBG_log(" processing received "
"isakmp_xchg_type %s.",
enum_show(&ikev1_exchange_names,
md->hdr.isa_xchg)));
DBG(DBG_CONTROLMORE, DBG_log(" this is a%s%s%s%s",
st->st_connection->spd.
this.xauth_server ?
" xauthserver" : "",
st->st_connection->spd.
this.xauth_client ?
" xauthclient" : "",
st->st_connection->spd.
this.modecfg_server ?
" modecfgserver" : "",
st->st_connection->spd.
this.modecfg_client ?
" modecfgclient" : ""
));
if (!IS_ISAKMP_SA_ESTABLISHED(st->st_state)) {
DBG(DBG_CONTROLMORE, DBG_log(
"Mode Config message is unacceptable because it is for an incomplete ISAKMP SA (state=%s)",
enum_name(&state_names, st->st_state)));
/* XXX Could send notification back */
return;
}
DBG(DBG_CONTROLMORE, DBG_log(" call init_phase2_iv"));
init_phase2_iv(st, &md->hdr.isa_msgid);
new_iv_set = TRUE;
/*
* okay, now we have to figure out if we are receiving a bogus
* new message in an oustanding XAUTH server conversation
* (i.e. a reply to our challenge)
* (this occurs with some broken other implementations).
*
* or if receiving for the first time, an XAUTH challenge.
*
* or if we are getting a MODECFG request.
*
* we distinguish these states because we cannot both be an
* XAUTH server and client, and our policy tells us which
* one we are.
*
* to complicate further, it is normal to start a new msgid
* when going from one state to another, or when restarting
* the challenge.
*
*/
if (st->st_connection->spd.this.xauth_server &&
st->st_state == STATE_XAUTH_R1 &&
st->quirks.xauth_ack_msgid) {
from_state = STATE_XAUTH_R1;
DBG(DBG_CONTROLMORE, DBG_log(
" set from_state to %s state is STATE_XAUTH_R1 and quirks.xauth_ack_msgid is TRUE",
enum_name(&state_names,
st->st_state
)));
} else if (st->st_connection->spd.this.xauth_client
&&
IS_PHASE1(st->st_state)) {
from_state = STATE_XAUTH_I0;
DBG(DBG_CONTROLMORE, DBG_log(
" set from_state to %s this is xauthclient and IS_PHASE1() is TRUE",
enum_name(&state_names,
st->st_state
)));
} else if (st->st_connection->spd.this.xauth_client
&&
st->st_state == STATE_XAUTH_I1) {
/*
* in this case, we got a new MODECFG message after I0, maybe
* because it wants to start over again.
*/
from_state = STATE_XAUTH_I0;
DBG(DBG_CONTROLMORE, DBG_log(
" set from_state to %s this is xauthclient and state == STATE_XAUTH_I1",
enum_name(&state_names,
st->st_state
)));
} else if (st->st_connection->spd.this.modecfg_server
&&
IS_PHASE1(st->st_state)) {
from_state = STATE_MODE_CFG_R0;
DBG(DBG_CONTROLMORE, DBG_log(
" set from_state to %s this is modecfgserver and IS_PHASE1() is TRUE",
enum_name(&state_names,
st->st_state
)));
} else if (st->st_connection->spd.this.modecfg_client
&&
IS_PHASE1(st->st_state)) {
from_state = STATE_MODE_CFG_R1;
DBG(DBG_CONTROLMORE, DBG_log(
" set from_state to %s this is modecfgclient and IS_PHASE1() is TRUE",
enum_name(&state_names,
st->st_state
)));
} else {
DBG(DBG_CONTROLMORE, DBG_log(
"received isakmp_xchg_type %s",
enum_show(&ikev1_exchange_names,
md->hdr.isa_xchg)));
DBG(DBG_CONTROLMORE, DBG_log(
"this is a%s%s%s%s in state %s. Reply with UNSUPPORTED_EXCHANGE_TYPE",
st->st_connection
->spd.this.xauth_server ?
" xauthserver" : "",
st->st_connection
->spd.this.xauth_client ?
" xauthclient" : "",
st->st_connection
->spd.this.modecfg_server ?
" modecfgserver" :
"",
st->st_connection
->spd.this.modecfg_client ?
" modecfgclient" :
"",
enum_name(&
state_names,
st->st_state)
));
return;
}
} else {
if (st->st_connection->spd.this.xauth_server &&
IS_PHASE1(st->st_state)) {
/* Switch from Phase1 to Mode Config */
DBG(DBG_CONTROL, DBG_log(
"We were in phase 1, with no state, so we went to XAUTH_R0"));
change_state(st, STATE_XAUTH_R0);
}
/* otherweise, this is fine, we continue in the state we are in */
set_cur_state(st);
from_state = st->st_state;
}
break;
case ISAKMP_XCHG_NGRP:
default:
DBG(DBG_CONTROL, DBG_log("unsupported exchange type %s in message",
enum_show(&ikev1_exchange_names, md->hdr.isa_xchg)));
SEND_NOTIFICATION(UNSUPPORTED_EXCHANGE_TYPE);
return;
}
/* We have found a from_state, and perhaps a state object.
* If we need to build a new state object,
* we wait until the packet has been sanity checked.
*/
/* We don't support the Commit Flag. It is such a bad feature.
* It isn't protected -- neither encrypted nor authenticated.
* A man in the middle turns it on, leading to DoS.
* We just ignore it, with a warning.
*/
if (md->hdr.isa_flags & ISAKMP_FLAGS_v1_COMMIT)
DBG(DBG_CONTROL, DBG_log(
"IKE message has the Commit Flag set but Pluto doesn't implement this feature due to security concerns; ignoring flag"));
/* Handle IKE fragmentation payloads */
if (md->hdr.isa_np == ISAKMP_NEXT_IKE_FRAGMENTATION) {
struct isakmp_ikefrag fraghdr;
struct ike_frag *ike_frag, **i;
int last_frag_index = 0; /* index of the last fragment */
pb_stream frag_pbs;
if (st == NULL) {
DBG(DBG_CONTROL, DBG_log(
"received IKE fragment, but have no state. Ignoring packet."));
return;
}
if ((st->st_connection->policy & POLICY_IKE_FRAG_ALLOW) == 0) {
DBG(DBG_CONTROL, DBG_log(
"discarding IKE fragment packet - fragmentation not allowed by local policy (ike_frag=no)"));
return;
}
if (!in_struct(&fraghdr, &isakmp_ikefrag_desc,
&md->message_pbs, &frag_pbs) ||
pbs_room(&frag_pbs) != fraghdr.isafrag_length ||
fraghdr.isafrag_np != 0 ||
fraghdr.isafrag_number == 0 || fraghdr.isafrag_number >
16) {
SEND_NOTIFICATION(PAYLOAD_MALFORMED);
return;
}
DBG(DBG_CONTROL,
DBG_log("received IKE fragment id '%d', number '%u'%s",
fraghdr.isafrag_id,
fraghdr.isafrag_number,
(fraghdr.isafrag_flags == 1) ? "(last)" : ""));
ike_frag = alloc_thing(struct ike_frag, "ike_frag");
ike_frag->md = md;
ike_frag->index = fraghdr.isafrag_number;
ike_frag->last = (fraghdr.isafrag_flags & 1);
ike_frag->size = pbs_left(&frag_pbs);
ike_frag->data = frag_pbs.cur;
#if 0
/* is this ever hit? It was wrongly checking one byte instead of 4 bytes of marker */
/* Strip non-ESP marker from first fragment */
if (md->iface->ike_float && ike_frag->index == 1 &&
(ike_frag->size >= NON_ESP_MARKER_SIZE &&
memeq(non_ESP_marker, ike_frag->data,
NON_ESP_MARKER_SIZE))) {
ike_frag->data += NON_ESP_MARKER_SIZE;
ike_frag->size -= NON_ESP_MARKER_SIZE;
}
#endif
/* Add the fragment to the state */
i = &st->ike_frags;
for (;;) {
if (ike_frag != NULL) {
/* Still looking for a place to insert ike_frag */
if (*i == NULL ||
(*i)->index > ike_frag->index) {
ike_frag->next = *i;
*i = ike_frag;
ike_frag = NULL;
} else if ((*i)->index == ike_frag->index) {
/* Replace fragment with same index */
struct ike_frag *old = *i;
ike_frag->next = old->next;
*i = ike_frag;
release_md(old->md);
pfree(old);
ike_frag = NULL;
}
}
if (*i == NULL)
break;
if ((*i)->last)
last_frag_index = (*i)->index;
i = &(*i)->next;
}
/* We have the last fragment, reassemble if complete */
if (last_frag_index != 0) {
size_t size = 0;
int prev_index = 0;
struct ike_frag *frag;
for (frag = st->ike_frags; frag; frag = frag->next) {
size += frag->size;
if (frag->index != ++prev_index) {
break; /* fragment list incomplete */
} else if (frag->index == last_frag_index) {
struct msg_digest *whole_md = alloc_md();
u_int8_t *buffer = alloc_bytes(size,
"IKE fragments buffer");
size_t offset = 0;
whole_md->iface = frag->md->iface;
whole_md->sender = frag->md->sender;
whole_md->sender_port =
frag->md->sender_port;
/* Reassemble fragments in buffer */
frag = st->ike_frags;
while (frag != NULL &&
frag->index <= last_frag_index)
{
passert(offset + frag->size <=
size);
memcpy(buffer + offset,
frag->data, frag->size);
offset += frag->size;
frag = frag->next;
}
init_pbs(&whole_md->packet_pbs, buffer, size,
"packet");
process_packet(&whole_md);
release_any_md(&whole_md);
release_fragments(st);
/* optimize: if receiving fragments, immediately respond with fragments too */
st->st_seen_fragments = TRUE;
DBG(DBG_CONTROL, DBG_log(
" updated IKE fragment state to respond using fragments without waiting for re-transmits"));
break;
}
}
}
/* Don't release the md, taken care of by the ike_frag code */
/* ??? I'm not sure -- DHR */
*mdp = NULL;
return;
}
/* Set smc to describe this state's properties.
* Look up the appropriate microcode based on state and
* possibly Oakley Auth type.
*/
passert(STATE_IKE_FLOOR <= from_state && from_state <= STATE_IKE_ROOF);
smc = ike_microcode_index[from_state - STATE_IKE_FLOOR];
if (st != NULL) {
oakley_auth_t baseauth =
xauth_calcbaseauth(st->st_oakley.auth);
while (!LHAS(smc->flags, baseauth)) {
smc++;
passert(smc->state == from_state);
}
}
if (state_busy(st))
return;
/* Detect and handle duplicated packets.
* This won't work for the initial packet of an exchange
* because we won't have a state object to remember it.
* If we are in a non-receiving state (terminal), and the preceding
* state did transmit, then the duplicate may indicate that that
* transmission wasn't received -- retransmit it.
* Otherwise, just discard it.
* ??? Notification packets are like exchanges -- I hope that
* they are idempotent!
*/
if (st != NULL &&
st->st_rpacket.ptr != NULL &&
st->st_rpacket.len == pbs_room(&md->packet_pbs) &&
memeq(st->st_rpacket.ptr, md->packet_pbs.start,
st->st_rpacket.len)) {
if (smc->flags & SMF_RETRANSMIT_ON_DUPLICATE) {
if (st->st_retransmit < MAXIMUM_v1_ACCEPTED_DUPLICATES) {
st->st_retransmit++;
loglog(RC_RETRANSMISSION,
"retransmitting in response to duplicate packet; already %s",
enum_name(&state_names, st->st_state));
resend_ike_v1_msg(st,
"retransmit in response to duplicate");
} else {
loglog(RC_LOG_SERIOUS,
"discarding duplicate packet -- exhausted retransmission; already %s",
enum_name(&state_names, st->st_state));
}
} else {
loglog(RC_LOG_SERIOUS,
"discarding duplicate packet; already %s",
enum_name(&state_names, st->st_state));
}
return;
}
/* save values for use in resumption of processing below.
* (may be suspended due to crypto operation not yet complete)
*/
md->st = st;
md->from_state = from_state;
md->smc = smc;
md->new_iv_set = new_iv_set;
/*
* look for encrypt packets. We cannot handle them if we have not
* yet calculated the skeyids. We will just store the packet in
* the suspended state, since the calculation is likely underway.
*
* note that this differs from above, because skeyid is calculated
* in between states. (or will be, once DH is async)
*
*/
if ((md->hdr.isa_flags & ISAKMP_FLAGS_v1_ENCRYPTION) &&
st != NULL && !st->hidden_variables.st_skeyid_calculated ) {
DBG(DBG_CRYPT | DBG_CONTROL, {
ipstr_buf b;
DBG_log("received encrypted packet from %s:%u but exponentiation still in progress",
ipstr(&md->sender, &b),
(unsigned)md->sender_port);
});
/* if there was a previous packet, let it go, and go with most
* recent one.
*/
if (st->st_suspended_md != NULL) {
DBG(DBG_CONTROL,
DBG_log("releasing suspended operation before completion: %p",
st->st_suspended_md));
release_any_md(&st->st_suspended_md);
}
set_suspended(st, md);
*mdp = NULL;
return;
}
process_packet_tail(mdp);
/* our caller will release_any_md(mdp); */
}
/*
* This routine will not release_any_md(mdp). It is expected that its
* caller will do this. In fact, it will zap *mdp to NULL if it thinks
* **mdp should not be freed. So the caller should be prepared for
* *mdp being set to NULL.
*/
void process_packet_tail(struct msg_digest **mdp)
{
struct msg_digest *md = *mdp;
struct state *st = md->st;
enum state_kind from_state = md->from_state;
const struct state_microcode *smc = md->smc;
bool new_iv_set = md->new_iv_set;
bool self_delete = FALSE;
if (md->hdr.isa_flags & ISAKMP_FLAGS_v1_ENCRYPTION) {
DBG(DBG_CRYPT, {
ipstr_buf b;
DBG_log("received encrypted packet from %s:%u",
ipstr(&md->sender, &b),
(unsigned)md->sender_port);
});
if (st == NULL) {
libreswan_log(
"discarding encrypted message for an unknown ISAKMP SA");
SEND_NOTIFICATION(PAYLOAD_MALFORMED /* XXX ? */);
return;
}
if (st->st_skey_ei_nss == NULL) {
loglog(RC_LOG_SERIOUS,
"discarding encrypted message because we haven't yet negotiated keying material");
SEND_NOTIFICATION(INVALID_FLAGS);
return;
}
/* Mark as encrypted */
md->encrypted = TRUE;
DBG(DBG_CRYPT,
DBG_log("decrypting %u bytes using algorithm %s",
(unsigned) pbs_left(&md->message_pbs),
enum_show(&oakley_enc_names,
st->st_oakley.encrypt)));
/* do the specified decryption
*
* IV is from st->st_iv or (if new_iv_set) st->st_new_iv.
* The new IV is placed in st->st_new_iv
*
* See RFC 2409 "IKE" Appendix B
*
* XXX The IV should only be updated really if the packet
* is successfully processed.
* We should keep this value, check for a success return
* value from the parsing routines and then replace.
*
* Each post phase 1 exchange generates IVs from
* the last phase 1 block, not the last block sent.
*/
{
const struct encrypt_desc *e = st->st_oakley.encrypter;
if (pbs_left(&md->message_pbs) % e->enc_blocksize != 0)
{
loglog(RC_LOG_SERIOUS,
"malformed message: not a multiple of encryption blocksize");
SEND_NOTIFICATION(PAYLOAD_MALFORMED);
return;
}
/* XXX Detect weak keys */
/* grab a copy of raw packet (for duplicate packet detection) */
clonetochunk(md->raw_packet, md->packet_pbs.start,
pbs_room(&md->packet_pbs), "raw packet");
/* Decrypt everything after header */
if (!new_iv_set) {
if (st->st_iv_len == 0) {
init_phase2_iv(st, &md->hdr.isa_msgid);
} else {
/* use old IV */
restore_new_iv(st, st->st_iv, st->st_iv_len);
}
}
crypto_cbc_encrypt(e, FALSE, md->message_pbs.cur,
pbs_left(&md->message_pbs), st);
}
DBG_cond_dump(DBG_CRYPT, "decrypted:\n", md->message_pbs.cur,
md->message_pbs.roof - md->message_pbs.cur);
DBG_cond_dump(DBG_CRYPT, "next IV:",
st->st_new_iv, st->st_new_iv_len);
} else {
/* packet was not encryped -- should it have been? */
if (smc->flags & SMF_INPUT_ENCRYPTED) {
loglog(RC_LOG_SERIOUS,
"packet rejected: should have been encrypted");
SEND_NOTIFICATION(INVALID_FLAGS);
return;
}
}
/* Digest the message.
* Padding must be removed to make hashing work.
* Padding comes from encryption (so this code must be after decryption).
* Padding rules are described before the definition of
* struct isakmp_hdr in packet.h.
*/
{
struct payload_digest *pd = md->digest;
enum next_payload_types_ikev1 np = md->hdr.isa_np;
lset_t needed = smc->req_payloads;
const char *excuse =
LIN(SMF_PSK_AUTH | SMF_FIRST_ENCRYPTED_INPUT,
smc->flags) ?
"probable authentication failure (mismatch of preshared secrets?): "
:
"";
while (np != ISAKMP_NEXT_NONE) {
struct_desc *sd = v1_payload_desc(np);
if (pd == &md->digest[PAYLIMIT]) {
loglog(RC_LOG_SERIOUS,
"more than %d payloads in message; ignored",
PAYLIMIT);
SEND_NOTIFICATION(PAYLOAD_MALFORMED);
return;
}
/*
* only do this in main mode. In aggressive mode, there
* is no negotiation of NAT-T method. Get it right.
*/
if (st != NULL && st->st_connection != NULL &&
(st->st_connection->policy & POLICY_AGGRESSIVE) == LEMPTY)
{
switch (np) {
case ISAKMP_NEXT_NATD_RFC:
case ISAKMP_NEXT_NATOA_RFC:
if ((st->hidden_variables.st_nat_traversal & NAT_T_WITH_RFC_VALUES) == LEMPTY) {
/*
* don't accept NAT-D/NAT-OA reloc directly in message,
* unless we're using NAT-T RFC
*/
DBG(DBG_NATT,
DBG_log("st_nat_traversal was: %s",
bitnamesof(natt_bit_names,
st->hidden_variables.st_nat_traversal)));
sd = NULL;
}
break;
default:
break;
}
}
if (sd == NULL) {
/* payload type is out of range or requires special handling */
switch (np) {
case ISAKMP_NEXT_ID:
/* ??? two kinds of ID payloads */
sd = (IS_PHASE1(from_state) ||
IS_PHASE15(from_state)) ?
&isakmp_identification_desc :
&isakmp_ipsec_identification_desc;
break;
case ISAKMP_NEXT_NATD_DRAFTS:
/* NAT-D was a private use type before RFC-3947 -- same format */
np = ISAKMP_NEXT_NATD_RFC;
sd = v1_payload_desc(np);
break;
case ISAKMP_NEXT_NATOA_DRAFTS:
/* NAT-OA was a private use type before RFC-3947 -- same format */
np = ISAKMP_NEXT_NATOA_RFC;
sd = v1_payload_desc(np);
break;
case ISAKMP_NEXT_SAK: /* or ISAKMP_NEXT_NATD_BADDRAFTS */
/*
* Official standards say that this is ISAKMP_NEXT_SAK,
* a part of Group DOI, something we don't implement.
* Old non-updated Cisco gear abused this number in ancient NAT drafts.
* We ignore (rather than reject) this in support of people
* with crufty Cisco machines.
*/
loglog(RC_LOG_SERIOUS,
"%smessage with unsupported payload ISAKMP_NEXT_SAK (or ISAKMP_NEXT_NATD_BADDRAFTS) ignored",
excuse);
/*
* Hack to discard payload, whatever it was.
* Since we are skipping the rest of the loop
* body we must do some things ourself:
* - demarshall the payload
* - grab the next payload number (np)
* - don't keep payload (don't increment pd)
* - skip rest of loop body
*/
if (!in_struct(&pd->payload, &isakmp_ignore_desc, &md->message_pbs,
&pd->pbs)) {
loglog(RC_LOG_SERIOUS,
"%smalformed payload in packet",
excuse);
SEND_NOTIFICATION(PAYLOAD_MALFORMED);
return;
}
np = pd->payload.generic.isag_np;
/* NOTE: we do not increment pd! */
continue; /* skip rest of the loop */
default:
loglog(RC_LOG_SERIOUS,
"%smessage ignored because it contains an unknown or unexpected payload type (%s) at the outermost level",
excuse,
enum_show(&ikev1_payload_names, np));
SEND_NOTIFICATION(INVALID_PAYLOAD_TYPE);
return;
}
passert(sd != NULL);
}
passert(np < LELEM_ROOF);
{
lset_t s = LELEM(np);
if (LDISJOINT(s,
needed | smc->opt_payloads |
LELEM(ISAKMP_NEXT_VID) |
LELEM(ISAKMP_NEXT_N) |
LELEM(ISAKMP_NEXT_D) |
LELEM(ISAKMP_NEXT_CR) |
LELEM(ISAKMP_NEXT_CERT))) {
loglog(RC_LOG_SERIOUS, "%smessage ignored because it "
"contains an unexpected payload type (%s)",
excuse,
enum_show(&ikev1_payload_names, np));
SEND_NOTIFICATION(INVALID_PAYLOAD_TYPE);
return;
}
DBG(DBG_PARSING,
DBG_log("got payload 0x%" PRIxLSET" (%s) needed: 0x%" PRIxLSET "opt: 0x%" PRIxLSET,
s, enum_show(&ikev1_payload_names, np),
needed, smc->opt_payloads));
needed &= ~s;
}
if (!in_struct(&pd->payload, sd, &md->message_pbs,
&pd->pbs)) {
loglog(RC_LOG_SERIOUS,
"%smalformed payload in packet",
excuse);
SEND_NOTIFICATION(PAYLOAD_MALFORMED);
return;
}
/* do payload-type specific debugging */
switch (np) {
case ISAKMP_NEXT_ID:
case ISAKMP_NEXT_NATOA_RFC:
/* dump ID section */
DBG(DBG_PARSING,
DBG_dump(" obj: ", pd->pbs.cur,
pbs_left(&pd->pbs)));
break;
default:
break;
}
/* place this payload at the end of the chain for this type */
{
struct payload_digest **p;
for (p = &md->chain[np]; *p != NULL;
p = &(*p)->next)
;
*p = pd;
pd->next = NULL;
}
np = pd->payload.generic.isag_np;
pd++;
/* since we've digested one payload happily, it is probably
* the case that any decryption worked. So we will not suggest
* encryption failure as an excuse for subsequent payload
* problems.
*/
excuse = "";
}
md->digest_roof = pd;
DBG(DBG_PARSING, {
if (pbs_left(&md->message_pbs) != 0)
DBG_log("removing %d bytes of padding",
(int) pbs_left(&md->message_pbs));
});
md->message_pbs.roof = md->message_pbs.cur;
/* check that all mandatory payloads appeared */
if (needed != 0) {
loglog(RC_LOG_SERIOUS,
"message for %s is missing payloads %s",
enum_show(&state_names, from_state),
bitnamesof(payload_name_ikev1, needed));
SEND_NOTIFICATION(PAYLOAD_MALFORMED);
return;
}
}
/* more sanity checking: enforce most ordering constraints */
if (IS_PHASE1(from_state) || IS_PHASE15(from_state)) {
/* rfc2409: The Internet Key Exchange (IKE), 5 Exchanges:
* "The SA payload MUST precede all other payloads in a phase 1 exchange."
*/
if (md->chain[ISAKMP_NEXT_SA] != NULL &&
md->hdr.isa_np != ISAKMP_NEXT_SA) {
loglog(RC_LOG_SERIOUS,
"malformed Phase 1 message: does not start with an SA payload");
SEND_NOTIFICATION(PAYLOAD_MALFORMED);
return;
}
} else if (IS_QUICK(from_state)) {
/* rfc2409: The Internet Key Exchange (IKE), 5.5 Phase 2 - Quick Mode
*
* "In Quick Mode, a HASH payload MUST immediately follow the ISAKMP
* header and a SA payload MUST immediately follow the HASH."
* [NOTE: there may be more than one SA payload, so this is not
* totally reasonable. Probably all SAs should be so constrained.]
*
* "If ISAKMP is acting as a client negotiator on behalf of another
* party, the identities of the parties MUST be passed as IDci and
* then IDcr."
*
* "With the exception of the HASH, SA, and the optional ID payloads,
* there are no payload ordering restrictions on Quick Mode."
*/
if (md->hdr.isa_np != ISAKMP_NEXT_HASH) {
loglog(RC_LOG_SERIOUS,
"malformed Quick Mode message: does not start with a HASH payload");
SEND_NOTIFICATION(PAYLOAD_MALFORMED);
return;
}
{
struct payload_digest *p;
int i;
p = md->chain[ISAKMP_NEXT_SA];
i = 1;
while (p != NULL) {
if (p != &md->digest[i]) {
loglog(RC_LOG_SERIOUS,
"malformed Quick Mode message: SA payload is in wrong position");
SEND_NOTIFICATION(PAYLOAD_MALFORMED);
return;
}
p = p->next;
i++;
}
}
/* rfc2409: The Internet Key Exchange (IKE), 5.5 Phase 2 - Quick Mode:
* "If ISAKMP is acting as a client negotiator on behalf of another
* party, the identities of the parties MUST be passed as IDci and
* then IDcr."
*/
{
struct payload_digest *id = md->chain[ISAKMP_NEXT_ID];
if (id != NULL) {
if (id->next == NULL ||
id->next->next != NULL) {
loglog(RC_LOG_SERIOUS, "malformed Quick Mode message:"
" if any ID payload is present,"
" there must be exactly two");
SEND_NOTIFICATION(PAYLOAD_MALFORMED);
return;
}
if (id + 1 != id->next) {
loglog(RC_LOG_SERIOUS, "malformed Quick Mode message:"
" the ID payloads are not adjacent");
SEND_NOTIFICATION(PAYLOAD_MALFORMED);
return;
}
}
}
}
/*
* Ignore payloads that we don't handle:
*/
/* XXX Handle Notifications */
{
struct payload_digest *p = md->chain[ISAKMP_NEXT_N];
while (p != NULL) {
switch (p->payload.notification.isan_type) {
case R_U_THERE:
case R_U_THERE_ACK:
case ISAKMP_N_CISCO_LOAD_BALANCE:
case PAYLOAD_MALFORMED:
case INVALID_MESSAGE_ID:
case IPSEC_RESPONDER_LIFETIME:
if (md->hdr.isa_xchg == ISAKMP_XCHG_INFO) {
/* these are handled later on in informational() */
break;
}
/* FALL THROUGH */
default:
if (st == NULL || (st != NULL &&
(st->st_connection->policy & POLICY_OPPORTUNISTIC))) {
DBG(DBG_CONTROL, DBG_log(
"ignoring informational payload %s, no corresponding state",
enum_show(& ikev1_notify_names,
p->payload.
notification.isan_type)));
} else {
loglog(RC_LOG_SERIOUS,
"ignoring informational payload %s, msgid=%08" PRIx32 ", length=%d",
enum_show(&ikev1_notify_names,
p->payload.
notification.isan_type),
st->st_msgid,
p->payload.notification.isan_length);
DBG_dump_pbs(&p->pbs);
}
if (st != NULL &&
st->st_connection->extra_debugging &
IMPAIR_DIE_ONINFO) {
loglog(RC_LOG_SERIOUS,
"received and failed on unknown informational message");
complete_v1_state_transition(mdp,
STF_FATAL);
/* our caller will release_any_md(mdp); */
return;
}
}
DBG_cond_dump(DBG_PARSING, "info:", p->pbs.cur, pbs_left(
&p->pbs));
p = p->next;
}
p = md->chain[ISAKMP_NEXT_D];
while (p != NULL) {
self_delete |= accept_delete(md, p);
DBG_cond_dump(DBG_PARSING, "del:", p->pbs.cur, pbs_left(
&p->pbs));
p = p->next;
}
p = md->chain[ISAKMP_NEXT_VID];
while (p != NULL) {
handle_vendorid(md, (char *)p->pbs.cur,
pbs_left(&p->pbs), FALSE);
p = p->next;
}
}
if (self_delete) {
accept_self_delete(md);
st = md->st; /* st not subsequently used */
/* note: st ought to be NULL from here on */
}
#if 0
/* this does not seem to be right */
/* VERIFY that we only accept NAT-D/NAT-OE when they sent us the VID */
if ((md->chain[ISAKMP_NEXT_NATD_RFC] != NULL ||
md->chain[ISAKMP_NEXT_NATOA_RFC] != NULL) &&
(st->hidden_variables.st_nat_traversal & NAT_T_WITH_RFC_VALUES) == LEMPTY) {
/*
* don't accept NAT-D/NAT-OA reloc directly in message,
* unless we're using NAT-T RFC
*/
loglog(RC_LOG_SERIOUS,
"message ignored because it contains a NAT payload, when we did not receive the appropriate VendorID");
return;
}
#endif
/* possibly fill in hdr */
if (smc->first_out_payload != ISAKMP_NEXT_NONE)
ikev1_echo_hdr(md, (smc->flags & SMF_OUTPUT_ENCRYPTED) != 0,
smc->first_out_payload);
complete_v1_state_transition(mdp, smc->processor(md));
/* our caller will release_any_md(mdp); */
}
/*
* replace previous receive packet with latest, to update
* our notion of a retransmitted packet. This is important
* to do, even for failing transitions, and suspended transitions
* because the sender may well retransmit their request.
* We had better be idempotent since we can be called
* multiple times in handling a packet due to crypto helper logic.
*/
static void remember_received_packet(struct state *st, struct msg_digest *md)
{
if (md->encrypted) {
/* if encrypted, duplication already done */
if (md->raw_packet.ptr != NULL) {
pfreeany(st->st_rpacket.ptr);
st->st_rpacket = md->raw_packet;
md->raw_packet.ptr = NULL;
}
} else {
/* this may be a repeat, but it will work */
pfreeany(st->st_rpacket.ptr);
clonetochunk(st->st_rpacket,
md->packet_pbs.start,
pbs_room(&md->packet_pbs), "raw packet");
}
}
/* complete job started by the state-specific state transition function
*
* This routine will not release_any_md(mdp). It is expected that its
* caller will do this. In fact, it will zap *mdp to NULL if it thinks
* **mdp should not be freed. So the caller should be prepared for
* *mdp being set to NULL.
*
* md is used to:
* - find st
* - find from_state (st might be gone)
* - find note for STF_FAIL (might not be part of result (STF_FAIL+note))
* - find note for STF_INTERNAL_ERROR
* - record md->event_already_set
* - remember_received_packet(st, md);
* - nat_traversal_change_port_lookup(md, st);
* - smc for smc->next_state
* - smc for smc->flags & SMF_REPLY to trigger a reply
* - smc for smc->timeout_event
* - smc for !(smc->flags & SMF_INITIATOR) for Contivity mode
* - smc for smc->flags & SMF_RELEASE_PENDING_P2 to trigger unpend call
* - smc for smc->flags & SMF_INITIATOR to adjust retransmission
* - fragvid, dpd, nortel
*/
void complete_v1_state_transition(struct msg_digest **mdp, stf_status result)
{
struct msg_digest *md = *mdp;
enum state_kind from_state;
struct state *st;
passert(md != NULL);
/* handle oddball/meta results now */
switch (result) {
case STF_SUSPEND:
cur_state = md->st; /* might have changed */
/* FALL THROUGH */
case STF_INLINE: /* all done, including release_any_md */
*mdp = NULL; /* take md away from parent */
/* FALL THROUGH */
case STF_IGNORE:
DBG(DBG_CONTROL,
DBG_log("complete v1 state transition with %s",
enum_show(&stfstatus_name, result)));
return;
default:
break;
}
DBG(DBG_CONTROL,
DBG_log("complete v1 state transition with %s",
result > STF_FAIL ?
enum_name(&ikev1_notify_names, result - STF_FAIL) :
enum_name(&stfstatus_name, result)));
/* safe to refer to *md */
from_state = md->from_state;
cur_state = st = md->st; /* might have changed */
passert(st != NULL);
passert(!st->st_calculating);
switch (result) {
case STF_OK:
{
/* advance the state */
const struct state_microcode *smc = md->smc;
libreswan_log("transition from state %s to state %s",
enum_name(&state_names, from_state),
enum_name(&state_names, smc->next_state));
/* accept info from VID because we accept this message */
/* If state has FRAGMENTATION support, import it */
if (md->fragvid) {
DBG(DBG_CONTROLMORE, DBG_log("peer supports fragmentation"));
st->st_seen_fragvid = TRUE;
}
/* If state has DPD support, import it */
if (md->dpd &&
st->hidden_variables.st_peer_supports_dpd != md->dpd) {
DBG(DBG_DPD, DBG_log("peer supports dpd"));
st->hidden_variables.st_peer_supports_dpd = md->dpd;
if (dpd_active_locally(st)) {
DBG(DBG_DPD, DBG_log("dpd is active locally"));
}
}
/* If state has VID_NORTEL, import it to activate workaround */
if (md->nortel) {
DBG(DBG_CONTROLMORE, DBG_log("peer requires Nortel Contivity workaround"));
st->st_seen_nortel_vid = TRUE;
}
if (!st->st_msgid_reserved &&
IS_CHILD_SA(st) &&
st->st_msgid != v1_MAINMODE_MSGID) {
struct state *p1st = state_with_serialno(
st->st_clonedfrom);
if (p1st != NULL) {
/* do message ID reservation */
reserve_msgid(p1st, st->st_msgid);
}
st->st_msgid_reserved = TRUE;
}
change_state(st, smc->next_state);
/* XAUTH negotiation withOUT modecfg ends in STATE_XAUTH_I1
* which is wrong and creates issues further in several places
* As per libreswan design, it seems every phase 1 negotiation
* including xauth/modecfg must end with STATE_MAIN_I4 to mark
* actual end of phase 1. With modecfg, negotiation ends with
* STATE_MAIN_I4 already.
*/
#if 0 /* ??? what's this code for? */
if (st->st_connection->spd.this.xauth_client
&& st->hidden_variables.st_xauth_client_done
&& !st->st_connection->spd.this.modecfg_client
&& st->st_state == STATE_XAUTH_I1) {
DBG(DBG_CONTROL,
DBG_log("As XAUTH is done and modecfg is not configured, so Phase 1 neogtiation finishes successfully"));
change_state(st, STATE_MAIN_I4);
}
#endif
/* Schedule for whatever timeout is specified */
if (!md->event_already_set) {
/* Delete previous retransmission event.
* New event will be scheduled below.
*/
delete_event(st);
}
/* Delete IKE fragments */
release_fragments(st);
/* update the previous packet history */
remember_received_packet(st, md);
/* free previous transmit packet */
freeanychunk(st->st_tpacket);
/* in aggressive mode, there will be no reply packet in transition
* from STATE_AGGR_R1 to STATE_AGGR_R2
*/
if (nat_traversal_enabled) {
/* adjust our destination port if necessary */
nat_traversal_change_port_lookup(md, st);
}
/* if requested, send the new reply packet */
if (smc->flags & SMF_REPLY) {
DBG(DBG_CONTROL, {
ipstr_buf b;
DBG_log("sending reply packet to %s:%u (from port %u)",
ipstr(&st->st_remoteaddr, &b),
st->st_remoteport,
st->st_interface->port);
});
close_output_pbs(&reply_stream); /* good form, but actually a no-op */
record_and_send_ike_msg(st, &reply_stream,
enum_name(&state_names, from_state));
}
/* Schedule for whatever timeout is specified */
if (!md->event_already_set) {
unsigned long delay_ms; /* delay is in milliseconds here */
enum event_type kind = smc->timeout_event;
bool agreed_time = FALSE;
struct connection *c = st->st_connection;
switch (kind) {
case EVENT_v1_RETRANSMIT: /* Retransmit packet */
delay_ms = c->r_interval;
break;
case EVENT_SA_REPLACE: /* SA replacement event */
if (IS_PHASE1(st->st_state) ||
IS_PHASE15(st->st_state )) {
/* Note: we will defer to the "negotiated" (dictated)
* lifetime if we are POLICY_DONT_REKEY.
* This allows the other side to dictate
* a time we would not otherwise accept
* but it prevents us from having to initiate
* rekeying. The negative consequences seem
* minor.
*/
delay_ms = deltamillisecs(c->sa_ike_life_seconds);
if ((c->policy & POLICY_DONT_REKEY) ||
delay_ms >= deltamillisecs(st->st_oakley.life_seconds))
{
agreed_time = TRUE;
delay_ms = deltamillisecs(st->st_oakley.life_seconds);
}
} else {
/* Delay is min of up to four things:
* each can limit the lifetime.
*/
time_t delay = deltasecs(c->sa_ipsec_life_seconds);
#define clamp_delay(trans) { \
if (st->trans.present && \
delay >= deltasecs(st->trans.attrs.life_seconds)) { \
agreed_time = TRUE; \
delay = deltasecs(st->trans.attrs.life_seconds); \
} \
}
clamp_delay(st_ah);
clamp_delay(st_esp);
clamp_delay(st_ipcomp);
delay_ms = delay * 1000;
#undef clamp_delay
}
/* By default, we plan to rekey.
*
* If there isn't enough time to rekey, plan to
* expire.
*
* If we are --dontrekey, a lot more rules apply.
* If we are the Initiator, use REPLACE_IF_USED.
* If we are the Responder, and the dictated time
* was unacceptable (too large), plan to REPLACE
* (the only way to ratchet down the time).
* If we are the Responder, and the dictated time
* is acceptable, plan to EXPIRE.
*
* Important policy lies buried here.
* For example, we favour the initiator over the
* responder by making the initiator start rekeying
* sooner. Also, fuzz is only added to the
* initiator's margin.
*
* Note: for ISAKMP SA, we let the negotiated
* time stand (implemented by earlier logic).
*/
if (agreed_time &&
(c->policy & POLICY_DONT_REKEY)) {
kind = (smc->flags & SMF_INITIATOR) ?
EVENT_SA_REPLACE_IF_USED :
EVENT_SA_EXPIRE;
}
if (kind != EVENT_SA_EXPIRE) {
time_t marg =
deltasecs(c->sa_rekey_margin);
if (smc->flags & SMF_INITIATOR) {
marg += marg *
c->sa_rekey_fuzz /
100.E0 *
(rand() /
(RAND_MAX + 1.E0));
} else {
marg /= 2;
}
if (delay_ms > (unsigned long)marg * 1000) {
delay_ms -= (unsigned long)marg * 1000;
st->st_margin = deltatime(marg);
} else {
kind = EVENT_SA_EXPIRE;
}
}
break;
default:
bad_case(kind);
}
event_schedule_ms(kind, delay_ms, st);
}
/* tell whack and log of progress */
{
const char *story = enum_name(&state_stories,
st->st_state);
enum rc_type w = RC_NEW_STATE + st->st_state;
char sadetails[512];
passert(st->st_state < STATE_IKE_ROOF);
sadetails[0] = '\0';
/* document IPsec SA details for admin's pleasure */
if (IS_IPSEC_SA_ESTABLISHED(st->st_state)) {
fmt_ipsec_sa_established(st, sadetails,
sizeof(sadetails));
} else if (IS_ISAKMP_SA_ESTABLISHED(st->st_state) &&
!st->hidden_variables.st_logged_p1algos) {
fmt_isakmp_sa_established(st, sadetails,
sizeof(sadetails));
}
if (IS_ISAKMP_SA_ESTABLISHED(st->st_state) ||
IS_IPSEC_SA_ESTABLISHED(st->st_state)) {
/* log our success */
w = RC_SUCCESS;
}
/* tell whack and logs our progress */
loglog(w,
"%s: %s%s",
enum_name(&state_names, st->st_state),
story,
sadetails);
}
/*
* make sure that a DPD event gets created for a new phase 1
* SA.
*/
if (IS_ISAKMP_SA_ESTABLISHED(st->st_state)) {
if (deltasecs(st->st_connection->dpd_delay) > 0 &&
deltasecs(st->st_connection->dpd_timeout) > 0) {
/* don't ignore failure */
/* ??? in fact, we do ignore this:
* result is NEVER used
* (clang 3.4 noticed this)
*/
stf_status s = dpd_init(st);
pexpect(s != STF_FAIL);
if (s == STF_FAIL)
result = STF_FAIL; /* ??? fall through !?! */
}
}
/* Special case for XAUTH server */
if (st->st_connection->spd.this.xauth_server) {
if (st->st_oakley.doing_xauth &&
IS_ISAKMP_SA_ESTABLISHED(st->st_state)) {
DBG(DBG_CONTROL,
DBG_log("XAUTH: "
"Sending XAUTH Login/Password Request"));
event_schedule_ms(EVENT_v1_SEND_XAUTH,
EVENT_v1_SEND_XAUTH_DELAY, st);
break;
}
}
/*
* for XAUTH client, we are also done, because we need to
* stay in this state, and let the server query us
*/
if (!IS_QUICK(st->st_state) &&
st->st_connection->spd.this.xauth_client &&
!st->hidden_variables.st_xauth_client_done) {
DBG(DBG_CONTROL,
DBG_log("XAUTH client is not yet authenticated"));
break;
}
/*
* when talking to some vendors, we need to initiate a mode
* cfg request to get challenged, but there is also an
* override in the form of a policy bit.
*/
DBG(DBG_CONTROL,
DBG_log("modecfg pull: %s policy:%s %s",
(st->quirks.modecfg_pull_mode ?
"quirk-poll" : "noquirk"),
(st->st_connection->policy & POLICY_MODECFG_PULL) ?
"pull" : "push",
(st->st_connection->spd.this.modecfg_client ?
"modecfg-client" : "not-client")));
if (st->st_connection->spd.this.modecfg_client &&
IS_ISAKMP_SA_ESTABLISHED(st->st_state) &&
(st->quirks.modecfg_pull_mode ||
st->st_connection->policy & POLICY_MODECFG_PULL) &&
!st->hidden_variables.st_modecfg_started) {
DBG(DBG_CONTROL,
DBG_log("modecfg client is starting due to %s",
st->quirks.modecfg_pull_mode ? "quirk" :
"policy"));
modecfg_send_request(st);
break;
}
/* Should we set the peer's IP address regardless? */
if (st->st_connection->spd.this.modecfg_server &&
IS_ISAKMP_SA_ESTABLISHED(st->st_state) &&
!st->hidden_variables.st_modecfg_vars_set &&
!(st->st_connection->policy & POLICY_MODECFG_PULL)) {
change_state(st, STATE_MODE_CFG_R1);
set_cur_state(st);
libreswan_log("Sending MODE CONFIG set");
modecfg_start_set(st);
break;
}
/*
* If we are the responder and the client is in "Contivity mode",
* we need to initiate Quick mode
*/
if (!(smc->flags & SMF_INITIATOR) &&
IS_MODE_CFG_ESTABLISHED(st->st_state) &&
(st->st_seen_nortel_vid)) {
libreswan_log("Nortel 'Contivity Mode' detected, starting Quick Mode");
change_state(st, STATE_MAIN_R3); /* ISAKMP is up... */
set_cur_state(st);
quick_outI1(st->st_whack_sock, st, st->st_connection,
st->st_connection->policy, 1, SOS_NOBODY
#ifdef HAVE_LABELED_IPSEC
, NULL /* Setting NULL as this is responder and will not have sec ctx from a flow*/
#endif
);
break;
}
/* wait for modecfg_set */
if (st->st_connection->spd.this.modecfg_client &&
IS_ISAKMP_SA_ESTABLISHED(st->st_state) &&
!st->hidden_variables.st_modecfg_vars_set) {
DBG(DBG_CONTROL,
DBG_log("waiting for modecfg set from server"));
break;
}
if (st->st_rekeytov2) {
DBG(DBG_CONTROL,
DBG_log("waiting for IKEv1 -> IKEv2 rekey"));
break;
}
DBG(DBG_CONTROL,
DBG_log("phase 1 is done, looking for phase 2 to unpend"));
if (smc->flags & SMF_RELEASE_PENDING_P2) {
/* Initiate any Quick Mode negotiations that
* were waiting to piggyback on this Keying Channel.
*
* ??? there is a potential race condition
* if we are the responder: the initial Phase 2
* message might outrun the final Phase 1 message.
*
* so, instead of actually sending the traffic now,
* we schedule an event to do so.
*
* but, in fact, quick_mode will enqueue a cryptographic operation
* anyway, which will get done "later" anyway, so maybe it is just fine
* as it is.
*
*/
unpend(st);
}
if (IS_ISAKMP_SA_ESTABLISHED(st->st_state) ||
IS_IPSEC_SA_ESTABLISHED(st->st_state))
release_whack(st);
if (IS_QUICK(st->st_state))
break;
break;
}
case STF_INTERNAL_ERROR:
/* update the previous packet history */
remember_received_packet(st, md);
whack_log(RC_INTERNALERR + md->note,
"%s: internal error",
enum_name(&state_names, st->st_state));
DBG(DBG_CONTROL,
DBG_log("state transition function for %s had internal error",
enum_name(&state_names, from_state)));
break;
case STF_TOOMUCHCRYPTO:
/* ??? Why is this comment useful:
* well, this should never happen during a whack, since
* a whack will always force crypto.
*/
/* ??? why no call of remember_received_packet? */
unset_suspended(st);
libreswan_log(
"message in state %s ignored due to cryptographic overload",
enum_name(&state_names, from_state));
log_crypto_workers();
/* ??? the ikev2.c version used to FALL THROUGH to STF_FATAL */
break;
case STF_FATAL:
/* update the previous packet history */
remember_received_packet(st, md);
whack_log(RC_FATAL,
"encountered fatal error in state %s",
enum_name(&state_names, st->st_state));
#ifdef HAVE_NM
if (st->st_connection->remotepeertype == CISCO &&
st->st_connection->nmconfigured) {
if (!do_command(st->st_connection,
&st->st_connection->spd,
"disconnectNM", st))
DBG(DBG_CONTROL,
DBG_log("sending disconnect to NM failed, you may need to do it manually"));
}
#endif
release_pending_whacks(st, "fatal error");
delete_state(st);
md->st = st = NULL;
break;
default: /* a shortcut to STF_FAIL, setting md->note */
passert(result > STF_FAIL);
md->note = result - STF_FAIL;
/* FALL THROUGH */
case STF_FAIL:
/* As it is, we act as if this message never happened:
* whatever retrying was in place, remains in place.
*/
/*
* ??? why no call of remember_received_packet?
* Perhaps because the message hasn't been authenticated?
* But then then any duplicate would lose too, I would think.
*/
whack_log(RC_NOTIFICATION + md->note,
"%s: %s", enum_name(&state_names, st->st_state),
enum_name(&ikev1_notify_names, md->note));
if (md->note != NOTHING_WRONG)
SEND_NOTIFICATION(md->note);
DBG(DBG_CONTROL,
DBG_log("state transition function for %s failed: %s",
enum_name(&state_names, from_state),
enum_name(&ikev1_notify_names, md->note)));
#ifdef HAVE_NM
if (st->st_connection->remotepeertype == CISCO &&
st->st_connection->nmconfigured) {
if (!do_command(st->st_connection,
&st->st_connection->spd,
"disconnectNM", st))
DBG(DBG_CONTROL,
DBG_log("sending disconnect to NM failed, you may need to do it manually"));
}
#endif
if (IS_PHASE1_INIT(st->st_state)) {
delete_event(st);
release_whack(st);
}
if (IS_QUICK(st->st_state)) {
delete_state(st);
/* wipe out dangling pointer to st */
md->st = NULL;
}
break;
}
}
/* note: may change which connection is referenced by md->st->st_connection */
bool ikev1_decode_peer_id(struct msg_digest *md, bool initiator, bool aggrmode)
{
struct state *const st = md->st;
struct payload_digest *const id_pld = md->chain[ISAKMP_NEXT_ID];
const pb_stream *const id_pbs = &id_pld->pbs;
struct isakmp_id *const id = &id_pld->payload.id;
struct id peer;
/* I think that RFC2407 (IPSEC DOI) 4.6.2 is confused.
* It talks about the protocol ID and Port fields of the ID
* Payload, but they don't exist as such in Phase 1.
* We use more appropriate names.
* isaid_doi_specific_a is in place of Protocol ID.
* isaid_doi_specific_b is in place of Port.
* Besides, there is no good reason for allowing these to be
* other than 0 in Phase 1.
*/
if (st->hidden_variables.st_nat_traversal != LEMPTY &&
id->isaid_doi_specific_a == IPPROTO_UDP &&
(id->isaid_doi_specific_b == 0 ||
id->isaid_doi_specific_b == pluto_nat_port)) {
DBG_log("protocol/port in Phase 1 ID Payload is %d/%d. "
"accepted with port_floating NAT-T",
id->isaid_doi_specific_a, id->isaid_doi_specific_b);
} else if (!(id->isaid_doi_specific_a == 0 &&
id->isaid_doi_specific_b == 0) &&
!(id->isaid_doi_specific_a == IPPROTO_UDP &&
id->isaid_doi_specific_b == pluto_port))
{
loglog(RC_LOG_SERIOUS, "protocol/port in Phase 1 ID Payload MUST be 0/0 or %d/%d"
" but are %d/%d (attempting to continue)",
IPPROTO_UDP, pluto_port,
id->isaid_doi_specific_a,
id->isaid_doi_specific_b);
/* we have turned this into a warning because of bugs in other vendors
* products. Specifically CISCO VPN3000.
*/
/* return FALSE; */
}
zero(&peer); /* ??? pointer fields might not be NULLed */
peer.kind = id->isaid_idtype;
if (!extract_peer_id(&peer, id_pbs))
return FALSE;
/*
* For interop with SoftRemote/aggressive mode we need to remember some
* things for checking the hash
*/
st->st_peeridentity_protocol = id->isaid_doi_specific_a;
st->st_peeridentity_port = ntohs(id->isaid_doi_specific_b);
{
char buf[IDTOA_BUF];
idtoa(&peer, buf, sizeof(buf));
libreswan_log("%s mode peer ID is %s: '%s'",
aggrmode ? "Aggressive" : "Main",
enum_show(&ike_idtype_names, id->isaid_idtype), buf);
}
/* check for certificates */
if (!ikev1_decode_cert(md))
return FALSE;
/* Now that we've decoded the ID payload, let's see if we
* need to switch connections.
* We must not switch horses if we initiated:
* - if the initiation was explicit, we'd be ignoring user's intent
* - if opportunistic, we'll lose our HOLD info
*/
if (initiator) {
if (!same_id(&st->st_connection->spd.that.id, &peer) &&
id_kind(&st->st_connection->spd.that.id) != ID_FROMCERT) {
char expect[IDTOA_BUF],
found[IDTOA_BUF];
idtoa(&st->st_connection->spd.that.id, expect,
sizeof(expect));
idtoa(&peer, found, sizeof(found));
loglog(RC_LOG_SERIOUS,
"we require IKEv1 peer to have ID '%s', but peer declares '%s'",
expect, found);
return FALSE;
} else if (id_kind(&st->st_connection->spd.that.id) == ID_FROMCERT) {
if (id_kind(&peer) != ID_DER_ASN1_DN) {
loglog(RC_LOG_SERIOUS,
"peer ID is not a certificate type");
return FALSE;
}
duplicate_id(&st->st_connection->spd.that.id, &peer);
}
} else {
struct connection *c = st->st_connection;
struct connection *r = NULL;
bool fromcert;
uint16_t auth = xauth_calcbaseauth(st->st_oakley.auth);
lset_t auth_policy = LEMPTY;
switch (auth) {
case OAKLEY_PRESHARED_KEY:
auth_policy = POLICY_PSK;
break;
case OAKLEY_RSA_SIG:
auth_policy = POLICY_RSASIG;
break;
/* Not implemented */
case OAKLEY_DSS_SIG:
case OAKLEY_RSA_ENC:
case OAKLEY_RSA_REVISED_MODE:
case OAKLEY_ECDSA_P256:
case OAKLEY_ECDSA_P384:
case OAKLEY_ECDSA_P521:
default:
DBG(DBG_CONTROL, DBG_log("ikev1 ikev1_decode_peer_id bad_case due to not supported policy"));
// bad_case(auth);
}
if (aggrmode)
auth_policy |= POLICY_AGGRESSIVE;
/* check for certificate requests */
ikev1_decode_cr(md);
if ((auth_policy & ~POLICY_AGGRESSIVE) != LEMPTY) {
r = refine_host_connection(st, &peer, initiator, auth_policy, &fromcert);
pexpect(r != NULL);
}
if (r == NULL) {
char buf[IDTOA_BUF];
idtoa(&peer, buf, sizeof(buf));
loglog(RC_LOG_SERIOUS,
"no suitable connection for peer '%s'",
buf);
return FALSE;
}
DBG(DBG_CONTROL, {
char buf[IDTOA_BUF];
dntoa_or_null(buf, IDTOA_BUF, r->spd.this.ca,
"%none");
DBG_log("offered CA: '%s'", buf);
});
if (r != c) {
char b1[CONN_INST_BUF];
char b2[CONN_INST_BUF];
/* apparently, r is an improvement on c -- replace */
libreswan_log("switched from \"%s\"%s to \"%s\"%s",
c->name,
fmt_conn_instance(c, b1),
r->name,
fmt_conn_instance(r, b2));
if (r->kind == CK_TEMPLATE || r->kind == CK_GROUP) {
/* instantiate it, filling in peer's ID */
r = rw_instantiate(r, &c->spd.that.host_addr,
NULL,
&peer);
}
update_state_connection(st, r);
} else if (c->spd.that.has_id_wildcards) {
free_id_content(&c->spd.that.id);
c->spd.that.id = peer;
c->spd.that.has_id_wildcards = FALSE;
unshare_id_content(&c->spd.that.id);
} else if (fromcert) {
DBG(DBG_CONTROL, DBG_log("copying ID for fromcert"));
duplicate_id(&r->spd.that.id, &peer);
}
}
return TRUE;
}
bool ikev1_ship_chain(chunk_t *chain, int n, pb_stream *outs,
u_int8_t type,
u_int8_t setnp)
{
int i;
u_int8_t np;
for (i = 0; i < n; i++) {
/* set np for last cert, or another */
np = i == n - 1 ? setnp : ISAKMP_NEXT_CERT;
if (!ikev1_ship_CERT(type, chain[i], outs, np))
return FALSE;
}
return TRUE;
}
void doi_log_cert_thinking(u_int16_t auth,
enum ike_cert_type certtype,
enum certpolicy policy,
bool gotcertrequest,
bool send_cert,
bool send_chain)
{
DBG(DBG_CONTROL,
DBG_log("thinking about whether to send my certificate:"));
DBG(DBG_CONTROL, {
struct esb_buf esb;
DBG_log(" I have RSA key: %s cert.type: %s ",
enum_showb(&oakley_auth_names, auth, &esb),
enum_show(&ike_cert_type_names, certtype));
});
DBG(DBG_CONTROL,
DBG_log(" sendcert: %s and I did%s get a certificate request ",
enum_show(&certpolicy_type_names, policy),
gotcertrequest ? "" : " not"));
DBG(DBG_CONTROL,
DBG_log(" so %ssend cert.", send_cert ? "" : "do not "));
if (!send_cert) {
if (auth == OAKLEY_PRESHARED_KEY) {
DBG(DBG_CONTROL,
DBG_log("I did not send a certificate "
"because digital signatures are not "
"being used. (PSK)"));
} else if (certtype == CERT_NONE) {
DBG(DBG_CONTROL,
DBG_log("I did not send a certificate because "
"I do not have one."));
} else if (policy == cert_sendifasked) {
DBG(DBG_CONTROL,
DBG_log("I did not send my certificate "
"because I was not asked to."));
}
/* ??? should there be an additional else catch-all? */
}
if (send_chain)
DBG(DBG_CONTROL, DBG_log("Sending one or more authcerts"));
}
| ./CrossVul/dataset_final_sorted/CWE-20/c/good_5117_0 |
crossvul-cpp_data_good_2577_0 | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M M PPPP CCCC %
% MM MM P P C %
% M M M PPPP C %
% M M P C %
% M M P CCCC %
% %
% %
% Read/Write Magick Persistant Cache Image Format %
% %
% Software Design %
% Cristy %
% March 2000 %
% %
% %
% Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/artifact.h"
#include "magick/attribute.h"
#include "magick/blob.h"
#include "magick/blob-private.h"
#include "magick/cache.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colormap.h"
#include "magick/constitute.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/geometry.h"
#include "magick/hashmap.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/magick.h"
#include "magick/memory_.h"
#include "magick/module.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/pixel-accessor.h"
#include "magick/profile.h"
#include "magick/property.h"
#include "magick/quantum-private.h"
#include "magick/resource_.h"
#include "magick/static.h"
#include "magick/statistic.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/utility.h"
#include "magick/version-private.h"
/*
Forward declarations.
*/
static MagickBooleanType
WriteMPCImage(const ImageInfo *,Image *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s M P C %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsMPC() returns MagickTrue if the image format type, identified by the
% magick string, is an Magick Persistent Cache image.
%
% The format of the IsMPC method is:
%
% MagickBooleanType IsMPC(const unsigned char *magick,const size_t length)
%
% A description of each parameter follows:
%
% o magick: compare image format pattern against these bytes.
%
% o length: Specifies the length of the magick string.
%
*/
static MagickBooleanType IsMPC(const unsigned char *magick,const size_t length)
{
if (length < 14)
return(MagickFalse);
if (LocaleNCompare((const char *) magick,"id=MagickCache",14) == 0)
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d C A C H E I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadMPCImage() reads an Magick Persistent Cache image file and returns
% it. It allocates the memory necessary for the new Image structure and
% returns a pointer to the new image.
%
% The format of the ReadMPCImage method is:
%
% Image *ReadMPCImage(const ImageInfo *image_info,ExceptionInfo *exception)
%
% Decompression code contributed by Kyle Shorter.
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *ReadMPCImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
char
cache_filename[MaxTextExtent],
id[MaxTextExtent],
keyword[MaxTextExtent],
*options;
const unsigned char
*p;
GeometryInfo
geometry_info;
Image
*image;
int
c;
LinkedListInfo
*profiles;
MagickBooleanType
status;
MagickOffsetType
offset;
MagickStatusType
flags;
register ssize_t
i;
size_t
depth,
length;
ssize_t
count;
StringInfo
*profile;
unsigned int
signature;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
image=AcquireImage(image_info);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
(void) CopyMagickString(cache_filename,image->filename,MaxTextExtent);
AppendImageFormat("cache",cache_filename);
c=ReadBlobByte(image);
if (c == EOF)
{
image=DestroyImage(image);
return((Image *) NULL);
}
*id='\0';
(void) ResetMagickMemory(keyword,0,sizeof(keyword));
offset=0;
do
{
/*
Decode image header; header terminates one character beyond a ':'.
*/
profiles=(LinkedListInfo *) NULL;
length=MaxTextExtent;
options=AcquireString((char *) NULL);
signature=GetMagickSignature((const StringInfo *) NULL);
image->depth=8;
image->compression=NoCompression;
while ((isgraph(c) != MagickFalse) && (c != (int) ':'))
{
register char
*p;
if (c == (int) '{')
{
char
*comment;
/*
Read comment-- any text between { }.
*/
length=MaxTextExtent;
comment=AcquireString((char *) NULL);
for (p=comment; comment != (char *) NULL; p++)
{
c=ReadBlobByte(image);
if (c == (int) '\\')
c=ReadBlobByte(image);
else
if ((c == EOF) || (c == (int) '}'))
break;
if ((size_t) (p-comment+1) >= length)
{
*p='\0';
length<<=1;
comment=(char *) ResizeQuantumMemory(comment,length+
MaxTextExtent,sizeof(*comment));
if (comment == (char *) NULL)
break;
p=comment+strlen(comment);
}
*p=(char) c;
}
if (comment == (char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
*p='\0';
(void) SetImageProperty(image,"comment",comment);
comment=DestroyString(comment);
c=ReadBlobByte(image);
}
else
if (isalnum(c) != MagickFalse)
{
/*
Get the keyword.
*/
length=MaxTextExtent;
p=keyword;
do
{
if (c == (int) '=')
break;
if ((size_t) (p-keyword) < (MaxTextExtent-1))
*p++=(char) c;
c=ReadBlobByte(image);
} while (c != EOF);
*p='\0';
p=options;
while (isspace((int) ((unsigned char) c)) != 0)
c=ReadBlobByte(image);
if (c == (int) '=')
{
/*
Get the keyword value.
*/
c=ReadBlobByte(image);
while ((c != (int) '}') && (c != EOF))
{
if ((size_t) (p-options+1) >= length)
{
*p='\0';
length<<=1;
options=(char *) ResizeQuantumMemory(options,length+
MaxTextExtent,sizeof(*options));
if (options == (char *) NULL)
break;
p=options+strlen(options);
}
*p++=(char) c;
c=ReadBlobByte(image);
if (c == '\\')
{
c=ReadBlobByte(image);
if (c == (int) '}')
{
*p++=(char) c;
c=ReadBlobByte(image);
}
}
if (*options != '{')
if (isspace((int) ((unsigned char) c)) != 0)
break;
}
if (options == (char *) NULL)
ThrowReaderException(ResourceLimitError,
"MemoryAllocationFailed");
}
*p='\0';
if (*options == '{')
(void) CopyMagickString(options,options+1,strlen(options));
/*
Assign a value to the specified keyword.
*/
switch (*keyword)
{
case 'b':
case 'B':
{
if (LocaleCompare(keyword,"background-color") == 0)
{
(void) QueryColorDatabase(options,&image->background_color,
exception);
break;
}
if (LocaleCompare(keyword,"blue-primary") == 0)
{
flags=ParseGeometry(options,&geometry_info);
image->chromaticity.blue_primary.x=geometry_info.rho;
image->chromaticity.blue_primary.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.blue_primary.y=
image->chromaticity.blue_primary.x;
break;
}
if (LocaleCompare(keyword,"border-color") == 0)
{
(void) QueryColorDatabase(options,&image->border_color,
exception);
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 'c':
case 'C':
{
if (LocaleCompare(keyword,"class") == 0)
{
ssize_t
storage_class;
storage_class=ParseCommandOption(MagickClassOptions,
MagickFalse,options);
if (storage_class < 0)
break;
image->storage_class=(ClassType) storage_class;
break;
}
if (LocaleCompare(keyword,"colors") == 0)
{
image->colors=StringToUnsignedLong(options);
break;
}
if (LocaleCompare(keyword,"colorspace") == 0)
{
ssize_t
colorspace;
colorspace=ParseCommandOption(MagickColorspaceOptions,
MagickFalse,options);
if (colorspace < 0)
break;
image->colorspace=(ColorspaceType) colorspace;
break;
}
if (LocaleCompare(keyword,"compression") == 0)
{
ssize_t
compression;
compression=ParseCommandOption(MagickCompressOptions,
MagickFalse,options);
if (compression < 0)
break;
image->compression=(CompressionType) compression;
break;
}
if (LocaleCompare(keyword,"columns") == 0)
{
image->columns=StringToUnsignedLong(options);
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 'd':
case 'D':
{
if (LocaleCompare(keyword,"delay") == 0)
{
image->delay=StringToUnsignedLong(options);
break;
}
if (LocaleCompare(keyword,"depth") == 0)
{
image->depth=StringToUnsignedLong(options);
break;
}
if (LocaleCompare(keyword,"dispose") == 0)
{
ssize_t
dispose;
dispose=ParseCommandOption(MagickDisposeOptions,MagickFalse,
options);
if (dispose < 0)
break;
image->dispose=(DisposeType) dispose;
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 'e':
case 'E':
{
if (LocaleCompare(keyword,"endian") == 0)
{
ssize_t
endian;
endian=ParseCommandOption(MagickEndianOptions,MagickFalse,
options);
if (endian < 0)
break;
image->endian=(EndianType) endian;
break;
}
if (LocaleCompare(keyword,"error") == 0)
{
image->error.mean_error_per_pixel=StringToDouble(options,
(char **) NULL);
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 'g':
case 'G':
{
if (LocaleCompare(keyword,"gamma") == 0)
{
image->gamma=StringToDouble(options,(char **) NULL);
break;
}
if (LocaleCompare(keyword,"green-primary") == 0)
{
flags=ParseGeometry(options,&geometry_info);
image->chromaticity.green_primary.x=geometry_info.rho;
image->chromaticity.green_primary.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.green_primary.y=
image->chromaticity.green_primary.x;
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 'i':
case 'I':
{
if (LocaleCompare(keyword,"id") == 0)
{
(void) CopyMagickString(id,options,MaxTextExtent);
break;
}
if (LocaleCompare(keyword,"iterations") == 0)
{
image->iterations=StringToUnsignedLong(options);
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 'm':
case 'M':
{
if (LocaleCompare(keyword,"magick-signature") == 0)
{
signature=(unsigned int) StringToUnsignedLong(options);
break;
}
if (LocaleCompare(keyword,"matte") == 0)
{
ssize_t
matte;
matte=ParseCommandOption(MagickBooleanOptions,MagickFalse,
options);
if (matte < 0)
break;
image->matte=(MagickBooleanType) matte;
break;
}
if (LocaleCompare(keyword,"matte-color") == 0)
{
(void) QueryColorDatabase(options,&image->matte_color,
exception);
break;
}
if (LocaleCompare(keyword,"maximum-error") == 0)
{
image->error.normalized_maximum_error=StringToDouble(
options,(char **) NULL);
break;
}
if (LocaleCompare(keyword,"mean-error") == 0)
{
image->error.normalized_mean_error=StringToDouble(options,
(char **) NULL);
break;
}
if (LocaleCompare(keyword,"montage") == 0)
{
(void) CloneString(&image->montage,options);
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 'o':
case 'O':
{
if (LocaleCompare(keyword,"opaque") == 0)
{
ssize_t
matte;
matte=ParseCommandOption(MagickBooleanOptions,MagickFalse,
options);
if (matte < 0)
break;
image->matte=(MagickBooleanType) matte;
break;
}
if (LocaleCompare(keyword,"orientation") == 0)
{
ssize_t
orientation;
orientation=ParseCommandOption(MagickOrientationOptions,
MagickFalse,options);
if (orientation < 0)
break;
image->orientation=(OrientationType) orientation;
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 'p':
case 'P':
{
if (LocaleCompare(keyword,"page") == 0)
{
char
*geometry;
geometry=GetPageGeometry(options);
(void) ParseAbsoluteGeometry(geometry,&image->page);
geometry=DestroyString(geometry);
break;
}
if (LocaleCompare(keyword,"pixel-intensity") == 0)
{
ssize_t
intensity;
intensity=ParseCommandOption(MagickPixelIntensityOptions,
MagickFalse,options);
if (intensity < 0)
break;
image->intensity=(PixelIntensityMethod) intensity;
break;
}
if ((LocaleNCompare(keyword,"profile:",8) == 0) ||
(LocaleNCompare(keyword,"profile-",8) == 0))
{
if (profiles == (LinkedListInfo *) NULL)
profiles=NewLinkedList(0);
(void) AppendValueToLinkedList(profiles,
AcquireString(keyword+8));
profile=BlobToStringInfo((const void *) NULL,(size_t)
StringToLong(options));
if (profile == (StringInfo *) NULL)
ThrowReaderException(ResourceLimitError,
"MemoryAllocationFailed");
(void) SetImageProfile(image,keyword+8,profile);
profile=DestroyStringInfo(profile);
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 'q':
case 'Q':
{
if (LocaleCompare(keyword,"quality") == 0)
{
image->quality=StringToUnsignedLong(options);
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 'r':
case 'R':
{
if (LocaleCompare(keyword,"red-primary") == 0)
{
flags=ParseGeometry(options,&geometry_info);
image->chromaticity.red_primary.x=geometry_info.rho;
if ((flags & SigmaValue) != 0)
image->chromaticity.red_primary.y=geometry_info.sigma;
break;
}
if (LocaleCompare(keyword,"rendering-intent") == 0)
{
ssize_t
rendering_intent;
rendering_intent=ParseCommandOption(MagickIntentOptions,
MagickFalse,options);
if (rendering_intent < 0)
break;
image->rendering_intent=(RenderingIntent) rendering_intent;
break;
}
if (LocaleCompare(keyword,"resolution") == 0)
{
flags=ParseGeometry(options,&geometry_info);
image->x_resolution=geometry_info.rho;
image->y_resolution=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->y_resolution=image->x_resolution;
break;
}
if (LocaleCompare(keyword,"rows") == 0)
{
image->rows=StringToUnsignedLong(options);
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 's':
case 'S':
{
if (LocaleCompare(keyword,"scene") == 0)
{
image->scene=StringToUnsignedLong(options);
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 't':
case 'T':
{
if (LocaleCompare(keyword,"ticks-per-second") == 0)
{
image->ticks_per_second=(ssize_t) StringToLong(options);
break;
}
if (LocaleCompare(keyword,"tile-offset") == 0)
{
char
*geometry;
geometry=GetPageGeometry(options);
(void) ParseAbsoluteGeometry(geometry,&image->tile_offset);
geometry=DestroyString(geometry);
}
if (LocaleCompare(keyword,"type") == 0)
{
ssize_t
type;
type=ParseCommandOption(MagickTypeOptions,MagickFalse,
options);
if (type < 0)
break;
image->type=(ImageType) type;
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 'u':
case 'U':
{
if (LocaleCompare(keyword,"units") == 0)
{
ssize_t
units;
units=ParseCommandOption(MagickResolutionOptions,MagickFalse,
options);
if (units < 0)
break;
image->units=(ResolutionType) units;
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 'w':
case 'W':
{
if (LocaleCompare(keyword,"white-point") == 0)
{
flags=ParseGeometry(options,&geometry_info);
image->chromaticity.white_point.x=geometry_info.rho;
image->chromaticity.white_point.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.white_point.y=
image->chromaticity.white_point.x;
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
default:
{
(void) SetImageProperty(image,keyword,options);
break;
}
}
}
else
c=ReadBlobByte(image);
while (isspace((int) ((unsigned char) c)) != 0)
c=ReadBlobByte(image);
}
options=DestroyString(options);
(void) ReadBlobByte(image);
/*
Verify that required image information is defined.
*/
if ((LocaleCompare(id,"MagickCache") != 0) ||
(image->storage_class == UndefinedClass) ||
(image->compression == UndefinedCompression) || (image->columns == 0) ||
(image->rows == 0))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (signature != GetMagickSignature((const StringInfo *) NULL))
ThrowReaderException(CacheError,"IncompatibleAPI");
if (image->montage != (char *) NULL)
{
register char
*p;
/*
Image directory.
*/
length=MaxTextExtent;
image->directory=AcquireString((char *) NULL);
p=image->directory;
do
{
*p='\0';
if ((strlen(image->directory)+MaxTextExtent) >= length)
{
/*
Allocate more memory for the image directory.
*/
length<<=1;
image->directory=(char *) ResizeQuantumMemory(image->directory,
length+MaxTextExtent,sizeof(*image->directory));
if (image->directory == (char *) NULL)
ThrowReaderException(CorruptImageError,"UnableToReadImageData");
p=image->directory+strlen(image->directory);
}
c=ReadBlobByte(image);
*p++=(char) c;
} while (c != (int) '\0');
}
if (profiles != (LinkedListInfo *) NULL)
{
const char
*name;
const StringInfo
*profile;
register unsigned char
*p;
/*
Read image profiles.
*/
ResetLinkedListIterator(profiles);
name=(const char *) GetNextValueInLinkedList(profiles);
while (name != (const char *) NULL)
{
profile=GetImageProfile(image,name);
if (profile != (StringInfo *) NULL)
{
p=GetStringInfoDatum(profile);
(void) ReadBlob(image,GetStringInfoLength(profile),p);
}
name=(const char *) GetNextValueInLinkedList(profiles);
}
profiles=DestroyLinkedList(profiles,RelinquishMagickMemory);
}
depth=GetImageQuantumDepth(image,MagickFalse);
if (image->storage_class == PseudoClass)
{
size_t
packet_size;
unsigned char
*colormap;
/*
Create image colormap.
*/
packet_size=(size_t) (3UL*depth/8UL);
if ((packet_size*image->colors) > GetBlobSize(image))
ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile");
image->colormap=(PixelPacket *) AcquireQuantumMemory(image->colors+1,
sizeof(*image->colormap));
if (image->colormap == (PixelPacket *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
if (image->colors != 0)
{
/*
Read image colormap from file.
*/
colormap=(unsigned char *) AcquireQuantumMemory(image->colors,
packet_size*sizeof(*colormap));
if (colormap == (unsigned char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
count=ReadBlob(image,packet_size*image->colors,colormap);
if (count != (ssize_t) (packet_size*image->colors))
{
colormap=(unsigned char *) RelinquishMagickMemory(colormap);
ThrowReaderException(CorruptImageError,
"InsufficientImageDataInFile");
}
p=colormap;
switch (depth)
{
default:
colormap=(unsigned char *) RelinquishMagickMemory(colormap);
ThrowReaderException(CorruptImageError,
"ImageDepthNotSupported");
case 8:
{
unsigned char
pixel;
for (i=0; i < (ssize_t) image->colors; i++)
{
p=PushCharPixel(p,&pixel);
image->colormap[i].red=ScaleCharToQuantum(pixel);
p=PushCharPixel(p,&pixel);
image->colormap[i].green=ScaleCharToQuantum(pixel);
p=PushCharPixel(p,&pixel);
image->colormap[i].blue=ScaleCharToQuantum(pixel);
}
break;
}
case 16:
{
unsigned short
pixel;
for (i=0; i < (ssize_t) image->colors; i++)
{
p=PushShortPixel(MSBEndian,p,&pixel);
image->colormap[i].red=ScaleShortToQuantum(pixel);
p=PushShortPixel(MSBEndian,p,&pixel);
image->colormap[i].green=ScaleShortToQuantum(pixel);
p=PushShortPixel(MSBEndian,p,&pixel);
image->colormap[i].blue=ScaleShortToQuantum(pixel);
}
break;
}
case 32:
{
unsigned int
pixel;
for (i=0; i < (ssize_t) image->colors; i++)
{
p=PushLongPixel(MSBEndian,p,&pixel);
image->colormap[i].red=ScaleLongToQuantum(pixel);
p=PushLongPixel(MSBEndian,p,&pixel);
image->colormap[i].green=ScaleLongToQuantum(pixel);
p=PushLongPixel(MSBEndian,p,&pixel);
image->colormap[i].blue=ScaleLongToQuantum(pixel);
}
break;
}
}
colormap=(unsigned char *) RelinquishMagickMemory(colormap);
}
}
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
break;
}
if ((image_info->ping != MagickFalse) && (image_info->number_scenes != 0))
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
if ((AcquireMagickResource(WidthResource,image->columns) == MagickFalse) ||
(AcquireMagickResource(HeightResource,image->rows) == MagickFalse))
ThrowReaderException(ImageError,"WidthOrHeightExceedsLimit");
/*
Attach persistent pixel cache.
*/
status=PersistPixelCache(image,cache_filename,MagickTrue,&offset,exception);
if (status == MagickFalse)
ThrowReaderException(CacheError,"UnableToPersistPixelCache");
/*
Proceed to next image.
*/
do
{
c=ReadBlobByte(image);
} while ((isgraph(c) == MagickFalse) && (c != EOF));
if (c != EOF)
{
/*
Allocate next image structure.
*/
AcquireNextImage(image_info,image);
if (GetNextImageInList(image) == (Image *) NULL)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
image=SyncNextImageInList(image);
status=SetImageProgress(image,LoadImagesTag,TellBlob(image),
GetBlobSize(image));
if (status == MagickFalse)
break;
}
} while (c != EOF);
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e g i s t e r M P C I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RegisterMPCImage() adds properties for the Cache image format to
% the list of supported formats. The properties include the image format
% tag, a method to read and/or write the format, whether the format
% supports the saving of more than one frame to the same file or blob,
% whether the format supports native in-memory I/O, and a brief
% description of the format.
%
% The format of the RegisterMPCImage method is:
%
% size_t RegisterMPCImage(void)
%
*/
ModuleExport size_t RegisterMPCImage(void)
{
MagickInfo
*entry;
entry=SetMagickInfo("CACHE");
entry->description=ConstantString("Magick Persistent Cache image format");
entry->module=ConstantString("MPC");
entry->seekable_stream=MagickTrue;
entry->stealth=MagickTrue;
(void) RegisterMagickInfo(entry);
entry=SetMagickInfo("MPC");
entry->decoder=(DecodeImageHandler *) ReadMPCImage;
entry->encoder=(EncodeImageHandler *) WriteMPCImage;
entry->magick=(IsImageFormatHandler *) IsMPC;
entry->description=ConstantString("Magick Persistent Cache image format");
entry->seekable_stream=MagickTrue;
entry->module=ConstantString("MPC");
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n r e g i s t e r M P C I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnregisterMPCImage() removes format registrations made by the
% MPC module from the list of supported formats.
%
% The format of the UnregisterMPCImage method is:
%
% UnregisterMPCImage(void)
%
*/
ModuleExport void UnregisterMPCImage(void)
{
(void) UnregisterMagickInfo("CACHE");
(void) UnregisterMagickInfo("MPC");
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e M P C I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WriteMPCImage() writes an Magick Persistent Cache image to a file.
%
% The format of the WriteMPCImage method is:
%
% MagickBooleanType WriteMPCImage(const ImageInfo *image_info,Image *image)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o image: the image.
%
*/
static MagickBooleanType WriteMPCImage(const ImageInfo *image_info,Image *image)
{
char
buffer[MaxTextExtent],
cache_filename[MaxTextExtent];
const char
*property,
*value;
MagickBooleanType
status;
MagickOffsetType
offset,
scene;
register ssize_t
i;
size_t
depth,
one;
/*
Open persistent cache.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,&image->exception);
if (status == MagickFalse)
return(status);
(void) CopyMagickString(cache_filename,image->filename,MaxTextExtent);
AppendImageFormat("cache",cache_filename);
scene=0;
offset=0;
one=1;
do
{
/*
Write persistent cache meta-information.
*/
depth=GetImageQuantumDepth(image,MagickTrue);
if ((image->storage_class == PseudoClass) &&
(image->colors > (one << depth)))
image->storage_class=DirectClass;
(void) WriteBlobString(image,"id=MagickCache\n");
(void) FormatLocaleString(buffer,MaxTextExtent,"magick-signature=%u\n",
GetMagickSignature((const StringInfo *) NULL));
(void) WriteBlobString(image,buffer);
(void) FormatLocaleString(buffer,MaxTextExtent,
"class=%s colors=%.20g matte=%s\n",CommandOptionToMnemonic(
MagickClassOptions,image->storage_class),(double) image->colors,
CommandOptionToMnemonic(MagickBooleanOptions,(ssize_t) image->matte));
(void) WriteBlobString(image,buffer);
(void) FormatLocaleString(buffer,MaxTextExtent,
"columns=%.20g rows=%.20g depth=%.20g\n",(double) image->columns,
(double) image->rows,(double) image->depth);
(void) WriteBlobString(image,buffer);
if (image->type != UndefinedType)
{
(void) FormatLocaleString(buffer,MaxTextExtent,"type=%s\n",
CommandOptionToMnemonic(MagickTypeOptions,image->type));
(void) WriteBlobString(image,buffer);
}
if (image->colorspace != UndefinedColorspace)
{
(void) FormatLocaleString(buffer,MaxTextExtent,"colorspace=%s\n",
CommandOptionToMnemonic(MagickColorspaceOptions,image->colorspace));
(void) WriteBlobString(image,buffer);
}
if (image->intensity != UndefinedPixelIntensityMethod)
{
(void) FormatLocaleString(buffer,MaxTextExtent,"pixel-intensity=%s\n",
CommandOptionToMnemonic(MagickPixelIntensityOptions,
image->intensity));
(void) WriteBlobString(image,buffer);
}
if (image->endian != UndefinedEndian)
{
(void) FormatLocaleString(buffer,MaxTextExtent,"endian=%s\n",
CommandOptionToMnemonic(MagickEndianOptions,image->endian));
(void) WriteBlobString(image,buffer);
}
if (image->compression != UndefinedCompression)
{
(void) FormatLocaleString(buffer,MaxTextExtent,
"compression=%s quality=%.20g\n",CommandOptionToMnemonic(
MagickCompressOptions,image->compression),(double) image->quality);
(void) WriteBlobString(image,buffer);
}
if (image->units != UndefinedResolution)
{
(void) FormatLocaleString(buffer,MaxTextExtent,"units=%s\n",
CommandOptionToMnemonic(MagickResolutionOptions,image->units));
(void) WriteBlobString(image,buffer);
}
if ((image->x_resolution != 0) || (image->y_resolution != 0))
{
(void) FormatLocaleString(buffer,MaxTextExtent,
"resolution=%gx%g\n",image->x_resolution,image->y_resolution);
(void) WriteBlobString(image,buffer);
}
if ((image->page.width != 0) || (image->page.height != 0))
{
(void) FormatLocaleString(buffer,MaxTextExtent,
"page=%.20gx%.20g%+.20g%+.20g\n",(double) image->page.width,(double)
image->page.height,(double) image->page.x,(double) image->page.y);
(void) WriteBlobString(image,buffer);
}
else
if ((image->page.x != 0) || (image->page.y != 0))
{
(void) FormatLocaleString(buffer,MaxTextExtent,"page=%+ld%+ld\n",
(long) image->page.x,(long) image->page.y);
(void) WriteBlobString(image,buffer);
}
if ((image->tile_offset.x != 0) || (image->tile_offset.y != 0))
{
(void) FormatLocaleString(buffer,MaxTextExtent,"tile-offset=%+ld%+ld\n",
(long) image->tile_offset.x,(long) image->tile_offset.y);
(void) WriteBlobString(image,buffer);
}
if ((GetNextImageInList(image) != (Image *) NULL) ||
(GetPreviousImageInList(image) != (Image *) NULL))
{
if (image->scene == 0)
(void) FormatLocaleString(buffer,MaxTextExtent,
"iterations=%.20g delay=%.20g ticks-per-second=%.20g\n",(double)
image->iterations,(double) image->delay,(double)
image->ticks_per_second);
else
(void) FormatLocaleString(buffer,MaxTextExtent,"scene=%.20g "
"iterations=%.20g delay=%.20g ticks-per-second=%.20g\n",
(double) image->scene,(double) image->iterations,(double)
image->delay,(double) image->ticks_per_second);
(void) WriteBlobString(image,buffer);
}
else
{
if (image->scene != 0)
{
(void) FormatLocaleString(buffer,MaxTextExtent,"scene=%.20g\n",
(double) image->scene);
(void) WriteBlobString(image,buffer);
}
if (image->iterations != 0)
{
(void) FormatLocaleString(buffer,MaxTextExtent,"iterations=%.20g\n",
(double) image->iterations);
(void) WriteBlobString(image,buffer);
}
if (image->delay != 0)
{
(void) FormatLocaleString(buffer,MaxTextExtent,"delay=%.20g\n",
(double) image->delay);
(void) WriteBlobString(image,buffer);
}
if (image->ticks_per_second != UndefinedTicksPerSecond)
{
(void) FormatLocaleString(buffer,MaxTextExtent,
"ticks-per-second=%.20g\n",(double) image->ticks_per_second);
(void) WriteBlobString(image,buffer);
}
}
if (image->gravity != UndefinedGravity)
{
(void) FormatLocaleString(buffer,MaxTextExtent,"gravity=%s\n",
CommandOptionToMnemonic(MagickGravityOptions,image->gravity));
(void) WriteBlobString(image,buffer);
}
if (image->dispose != UndefinedDispose)
{
(void) FormatLocaleString(buffer,MaxTextExtent,"dispose=%s\n",
CommandOptionToMnemonic(MagickDisposeOptions,image->dispose));
(void) WriteBlobString(image,buffer);
}
if (image->rendering_intent != UndefinedIntent)
{
(void) FormatLocaleString(buffer,MaxTextExtent,
"rendering-intent=%s\n",CommandOptionToMnemonic(MagickIntentOptions,
image->rendering_intent));
(void) WriteBlobString(image,buffer);
}
if (image->gamma != 0.0)
{
(void) FormatLocaleString(buffer,MaxTextExtent,"gamma=%g\n",
image->gamma);
(void) WriteBlobString(image,buffer);
}
if (image->chromaticity.white_point.x != 0.0)
{
/*
Note chomaticity points.
*/
(void) FormatLocaleString(buffer,MaxTextExtent,"red-primary="
"%g,%g green-primary=%g,%g blue-primary=%g,%g\n",
image->chromaticity.red_primary.x,image->chromaticity.red_primary.y,
image->chromaticity.green_primary.x,
image->chromaticity.green_primary.y,
image->chromaticity.blue_primary.x,
image->chromaticity.blue_primary.y);
(void) WriteBlobString(image,buffer);
(void) FormatLocaleString(buffer,MaxTextExtent,
"white-point=%g,%g\n",image->chromaticity.white_point.x,
image->chromaticity.white_point.y);
(void) WriteBlobString(image,buffer);
}
if (image->orientation != UndefinedOrientation)
{
(void) FormatLocaleString(buffer,MaxTextExtent,
"orientation=%s\n",CommandOptionToMnemonic(MagickOrientationOptions,
image->orientation));
(void) WriteBlobString(image,buffer);
}
if (image->profiles != (void *) NULL)
{
const char
*name;
const StringInfo
*profile;
/*
Generic profile.
*/
ResetImageProfileIterator(image);
for (name=GetNextImageProfile(image); name != (const char *) NULL; )
{
profile=GetImageProfile(image,name);
if (profile != (StringInfo *) NULL)
{
(void) FormatLocaleString(buffer,MaxTextExtent,
"profile:%s=%.20g\n",name,(double)
GetStringInfoLength(profile));
(void) WriteBlobString(image,buffer);
}
name=GetNextImageProfile(image);
}
}
if (image->montage != (char *) NULL)
{
(void) FormatLocaleString(buffer,MaxTextExtent,"montage=%s\n",
image->montage);
(void) WriteBlobString(image,buffer);
}
ResetImagePropertyIterator(image);
property=GetNextImageProperty(image);
while (property != (const char *) NULL)
{
(void) FormatLocaleString(buffer,MaxTextExtent,"%s=",property);
(void) WriteBlobString(image,buffer);
value=GetImageProperty(image,property);
if (value != (const char *) NULL)
{
size_t
length;
length=strlen(value);
for (i=0; i < (ssize_t) length; i++)
if (isspace((int) ((unsigned char) value[i])) != 0)
break;
if ((i == (ssize_t) length) && (i != 0))
(void) WriteBlob(image,length,(const unsigned char *) value);
else
{
(void) WriteBlobByte(image,'{');
if (strchr(value,'}') == (char *) NULL)
(void) WriteBlob(image,length,(const unsigned char *) value);
else
for (i=0; i < (ssize_t) length; i++)
{
if (value[i] == (int) '}')
(void) WriteBlobByte(image,'\\');
(void) WriteBlobByte(image,value[i]);
}
(void) WriteBlobByte(image,'}');
}
}
(void) WriteBlobByte(image,'\n');
property=GetNextImageProperty(image);
}
(void) WriteBlobString(image,"\f\n:\032");
if (image->montage != (char *) NULL)
{
/*
Write montage tile directory.
*/
if (image->directory != (char *) NULL)
(void) WriteBlobString(image,image->directory);
(void) WriteBlobByte(image,'\0');
}
if (image->profiles != 0)
{
const char
*name;
const StringInfo
*profile;
/*
Write image profiles.
*/
ResetImageProfileIterator(image);
name=GetNextImageProfile(image);
while (name != (const char *) NULL)
{
profile=GetImageProfile(image,name);
(void) WriteBlob(image,GetStringInfoLength(profile),
GetStringInfoDatum(profile));
name=GetNextImageProfile(image);
}
}
if (image->storage_class == PseudoClass)
{
size_t
packet_size;
unsigned char
*colormap,
*q;
/*
Allocate colormap.
*/
packet_size=(size_t) (3UL*depth/8UL);
colormap=(unsigned char *) AcquireQuantumMemory(image->colors,
packet_size*sizeof(*colormap));
if (colormap == (unsigned char *) NULL)
return(MagickFalse);
/*
Write colormap to file.
*/
q=colormap;
for (i=0; i < (ssize_t) image->colors; i++)
{
switch (depth)
{
default:
ThrowWriterException(CorruptImageError,"ImageDepthNotSupported");
case 32:
{
unsigned int
pixel;
pixel=ScaleQuantumToLong(image->colormap[i].red);
q=PopLongPixel(MSBEndian,pixel,q);
pixel=ScaleQuantumToLong(image->colormap[i].green);
q=PopLongPixel(MSBEndian,pixel,q);
pixel=ScaleQuantumToLong(image->colormap[i].blue);
q=PopLongPixel(MSBEndian,pixel,q);
break;
}
case 16:
{
unsigned short
pixel;
pixel=ScaleQuantumToShort(image->colormap[i].red);
q=PopShortPixel(MSBEndian,pixel,q);
pixel=ScaleQuantumToShort(image->colormap[i].green);
q=PopShortPixel(MSBEndian,pixel,q);
pixel=ScaleQuantumToShort(image->colormap[i].blue);
q=PopShortPixel(MSBEndian,pixel,q);
break;
}
case 8:
{
unsigned char
pixel;
pixel=(unsigned char) ScaleQuantumToChar(image->colormap[i].red);
q=PopCharPixel(pixel,q);
pixel=(unsigned char) ScaleQuantumToChar(
image->colormap[i].green);
q=PopCharPixel(pixel,q);
pixel=(unsigned char) ScaleQuantumToChar(image->colormap[i].blue);
q=PopCharPixel(pixel,q);
break;
}
}
}
(void) WriteBlob(image,packet_size*image->colors,colormap);
colormap=(unsigned char *) RelinquishMagickMemory(colormap);
}
/*
Initialize persistent pixel cache.
*/
status=PersistPixelCache(image,cache_filename,MagickFalse,&offset,
&image->exception);
if (status == MagickFalse)
ThrowWriterException(CacheError,"UnableToPersistPixelCache");
if (GetNextImageInList(image) == (Image *) NULL)
break;
image=SyncNextImageInList(image);
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
status=image->progress_monitor(SaveImagesTag,scene,
GetImageListLength(image),image->client_data);
if (status == MagickFalse)
break;
}
scene++;
} while (image_info->adjoin != MagickFalse);
(void) CloseBlob(image);
return(status);
}
| ./CrossVul/dataset_final_sorted/CWE-20/c/good_2577_0 |
crossvul-cpp_data_good_2891_6 | /* Userspace key control operations
*
* Copyright (C) 2004-5 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/sched/task.h>
#include <linux/slab.h>
#include <linux/syscalls.h>
#include <linux/key.h>
#include <linux/keyctl.h>
#include <linux/fs.h>
#include <linux/capability.h>
#include <linux/cred.h>
#include <linux/string.h>
#include <linux/err.h>
#include <linux/vmalloc.h>
#include <linux/security.h>
#include <linux/uio.h>
#include <linux/uaccess.h>
#include "internal.h"
#define KEY_MAX_DESC_SIZE 4096
static int key_get_type_from_user(char *type,
const char __user *_type,
unsigned len)
{
int ret;
ret = strncpy_from_user(type, _type, len);
if (ret < 0)
return ret;
if (ret == 0 || ret >= len)
return -EINVAL;
if (type[0] == '.')
return -EPERM;
type[len - 1] = '\0';
return 0;
}
/*
* Extract the description of a new key from userspace and either add it as a
* new key to the specified keyring or update a matching key in that keyring.
*
* If the description is NULL or an empty string, the key type is asked to
* generate one from the payload.
*
* The keyring must be writable so that we can attach the key to it.
*
* If successful, the new key's serial number is returned, otherwise an error
* code is returned.
*/
SYSCALL_DEFINE5(add_key, const char __user *, _type,
const char __user *, _description,
const void __user *, _payload,
size_t, plen,
key_serial_t, ringid)
{
key_ref_t keyring_ref, key_ref;
char type[32], *description;
void *payload;
long ret;
ret = -EINVAL;
if (plen > 1024 * 1024 - 1)
goto error;
/* draw all the data into kernel space */
ret = key_get_type_from_user(type, _type, sizeof(type));
if (ret < 0)
goto error;
description = NULL;
if (_description) {
description = strndup_user(_description, KEY_MAX_DESC_SIZE);
if (IS_ERR(description)) {
ret = PTR_ERR(description);
goto error;
}
if (!*description) {
kfree(description);
description = NULL;
} else if ((description[0] == '.') &&
(strncmp(type, "keyring", 7) == 0)) {
ret = -EPERM;
goto error2;
}
}
/* pull the payload in if one was supplied */
payload = NULL;
if (plen) {
ret = -ENOMEM;
payload = kvmalloc(plen, GFP_KERNEL);
if (!payload)
goto error2;
ret = -EFAULT;
if (copy_from_user(payload, _payload, plen) != 0)
goto error3;
}
/* find the target keyring (which must be writable) */
keyring_ref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE);
if (IS_ERR(keyring_ref)) {
ret = PTR_ERR(keyring_ref);
goto error3;
}
/* create or update the requested key and add it to the target
* keyring */
key_ref = key_create_or_update(keyring_ref, type, description,
payload, plen, KEY_PERM_UNDEF,
KEY_ALLOC_IN_QUOTA);
if (!IS_ERR(key_ref)) {
ret = key_ref_to_ptr(key_ref)->serial;
key_ref_put(key_ref);
}
else {
ret = PTR_ERR(key_ref);
}
key_ref_put(keyring_ref);
error3:
if (payload) {
memzero_explicit(payload, plen);
kvfree(payload);
}
error2:
kfree(description);
error:
return ret;
}
/*
* Search the process keyrings and keyring trees linked from those for a
* matching key. Keyrings must have appropriate Search permission to be
* searched.
*
* If a key is found, it will be attached to the destination keyring if there's
* one specified and the serial number of the key will be returned.
*
* If no key is found, /sbin/request-key will be invoked if _callout_info is
* non-NULL in an attempt to create a key. The _callout_info string will be
* passed to /sbin/request-key to aid with completing the request. If the
* _callout_info string is "" then it will be changed to "-".
*/
SYSCALL_DEFINE4(request_key, const char __user *, _type,
const char __user *, _description,
const char __user *, _callout_info,
key_serial_t, destringid)
{
struct key_type *ktype;
struct key *key;
key_ref_t dest_ref;
size_t callout_len;
char type[32], *description, *callout_info;
long ret;
/* pull the type into kernel space */
ret = key_get_type_from_user(type, _type, sizeof(type));
if (ret < 0)
goto error;
/* pull the description into kernel space */
description = strndup_user(_description, KEY_MAX_DESC_SIZE);
if (IS_ERR(description)) {
ret = PTR_ERR(description);
goto error;
}
/* pull the callout info into kernel space */
callout_info = NULL;
callout_len = 0;
if (_callout_info) {
callout_info = strndup_user(_callout_info, PAGE_SIZE);
if (IS_ERR(callout_info)) {
ret = PTR_ERR(callout_info);
goto error2;
}
callout_len = strlen(callout_info);
}
/* get the destination keyring if specified */
dest_ref = NULL;
if (destringid) {
dest_ref = lookup_user_key(destringid, KEY_LOOKUP_CREATE,
KEY_NEED_WRITE);
if (IS_ERR(dest_ref)) {
ret = PTR_ERR(dest_ref);
goto error3;
}
}
/* find the key type */
ktype = key_type_lookup(type);
if (IS_ERR(ktype)) {
ret = PTR_ERR(ktype);
goto error4;
}
/* do the search */
key = request_key_and_link(ktype, description, callout_info,
callout_len, NULL, key_ref_to_ptr(dest_ref),
KEY_ALLOC_IN_QUOTA);
if (IS_ERR(key)) {
ret = PTR_ERR(key);
goto error5;
}
/* wait for the key to finish being constructed */
ret = wait_for_key_construction(key, 1);
if (ret < 0)
goto error6;
ret = key->serial;
error6:
key_put(key);
error5:
key_type_put(ktype);
error4:
key_ref_put(dest_ref);
error3:
kfree(callout_info);
error2:
kfree(description);
error:
return ret;
}
/*
* Get the ID of the specified process keyring.
*
* The requested keyring must have search permission to be found.
*
* If successful, the ID of the requested keyring will be returned.
*/
long keyctl_get_keyring_ID(key_serial_t id, int create)
{
key_ref_t key_ref;
unsigned long lflags;
long ret;
lflags = create ? KEY_LOOKUP_CREATE : 0;
key_ref = lookup_user_key(id, lflags, KEY_NEED_SEARCH);
if (IS_ERR(key_ref)) {
ret = PTR_ERR(key_ref);
goto error;
}
ret = key_ref_to_ptr(key_ref)->serial;
key_ref_put(key_ref);
error:
return ret;
}
/*
* Join a (named) session keyring.
*
* Create and join an anonymous session keyring or join a named session
* keyring, creating it if necessary. A named session keyring must have Search
* permission for it to be joined. Session keyrings without this permit will
* be skipped over. It is not permitted for userspace to create or join
* keyrings whose name begin with a dot.
*
* If successful, the ID of the joined session keyring will be returned.
*/
long keyctl_join_session_keyring(const char __user *_name)
{
char *name;
long ret;
/* fetch the name from userspace */
name = NULL;
if (_name) {
name = strndup_user(_name, KEY_MAX_DESC_SIZE);
if (IS_ERR(name)) {
ret = PTR_ERR(name);
goto error;
}
ret = -EPERM;
if (name[0] == '.')
goto error_name;
}
/* join the session */
ret = join_session_keyring(name);
error_name:
kfree(name);
error:
return ret;
}
/*
* Update a key's data payload from the given data.
*
* The key must grant the caller Write permission and the key type must support
* updating for this to work. A negative key can be positively instantiated
* with this call.
*
* If successful, 0 will be returned. If the key type does not support
* updating, then -EOPNOTSUPP will be returned.
*/
long keyctl_update_key(key_serial_t id,
const void __user *_payload,
size_t plen)
{
key_ref_t key_ref;
void *payload;
long ret;
ret = -EINVAL;
if (plen > PAGE_SIZE)
goto error;
/* pull the payload in if one was supplied */
payload = NULL;
if (plen) {
ret = -ENOMEM;
payload = kmalloc(plen, GFP_KERNEL);
if (!payload)
goto error;
ret = -EFAULT;
if (copy_from_user(payload, _payload, plen) != 0)
goto error2;
}
/* find the target key (which must be writable) */
key_ref = lookup_user_key(id, 0, KEY_NEED_WRITE);
if (IS_ERR(key_ref)) {
ret = PTR_ERR(key_ref);
goto error2;
}
/* update the key */
ret = key_update(key_ref, payload, plen);
key_ref_put(key_ref);
error2:
kzfree(payload);
error:
return ret;
}
/*
* Revoke a key.
*
* The key must be grant the caller Write or Setattr permission for this to
* work. The key type should give up its quota claim when revoked. The key
* and any links to the key will be automatically garbage collected after a
* certain amount of time (/proc/sys/kernel/keys/gc_delay).
*
* Keys with KEY_FLAG_KEEP set should not be revoked.
*
* If successful, 0 is returned.
*/
long keyctl_revoke_key(key_serial_t id)
{
key_ref_t key_ref;
struct key *key;
long ret;
key_ref = lookup_user_key(id, 0, KEY_NEED_WRITE);
if (IS_ERR(key_ref)) {
ret = PTR_ERR(key_ref);
if (ret != -EACCES)
goto error;
key_ref = lookup_user_key(id, 0, KEY_NEED_SETATTR);
if (IS_ERR(key_ref)) {
ret = PTR_ERR(key_ref);
goto error;
}
}
key = key_ref_to_ptr(key_ref);
ret = 0;
if (test_bit(KEY_FLAG_KEEP, &key->flags))
ret = -EPERM;
else
key_revoke(key);
key_ref_put(key_ref);
error:
return ret;
}
/*
* Invalidate a key.
*
* The key must be grant the caller Invalidate permission for this to work.
* The key and any links to the key will be automatically garbage collected
* immediately.
*
* Keys with KEY_FLAG_KEEP set should not be invalidated.
*
* If successful, 0 is returned.
*/
long keyctl_invalidate_key(key_serial_t id)
{
key_ref_t key_ref;
struct key *key;
long ret;
kenter("%d", id);
key_ref = lookup_user_key(id, 0, KEY_NEED_SEARCH);
if (IS_ERR(key_ref)) {
ret = PTR_ERR(key_ref);
/* Root is permitted to invalidate certain special keys */
if (capable(CAP_SYS_ADMIN)) {
key_ref = lookup_user_key(id, 0, 0);
if (IS_ERR(key_ref))
goto error;
if (test_bit(KEY_FLAG_ROOT_CAN_INVAL,
&key_ref_to_ptr(key_ref)->flags))
goto invalidate;
goto error_put;
}
goto error;
}
invalidate:
key = key_ref_to_ptr(key_ref);
ret = 0;
if (test_bit(KEY_FLAG_KEEP, &key->flags))
ret = -EPERM;
else
key_invalidate(key);
error_put:
key_ref_put(key_ref);
error:
kleave(" = %ld", ret);
return ret;
}
/*
* Clear the specified keyring, creating an empty process keyring if one of the
* special keyring IDs is used.
*
* The keyring must grant the caller Write permission and not have
* KEY_FLAG_KEEP set for this to work. If successful, 0 will be returned.
*/
long keyctl_keyring_clear(key_serial_t ringid)
{
key_ref_t keyring_ref;
struct key *keyring;
long ret;
keyring_ref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE);
if (IS_ERR(keyring_ref)) {
ret = PTR_ERR(keyring_ref);
/* Root is permitted to invalidate certain special keyrings */
if (capable(CAP_SYS_ADMIN)) {
keyring_ref = lookup_user_key(ringid, 0, 0);
if (IS_ERR(keyring_ref))
goto error;
if (test_bit(KEY_FLAG_ROOT_CAN_CLEAR,
&key_ref_to_ptr(keyring_ref)->flags))
goto clear;
goto error_put;
}
goto error;
}
clear:
keyring = key_ref_to_ptr(keyring_ref);
if (test_bit(KEY_FLAG_KEEP, &keyring->flags))
ret = -EPERM;
else
ret = keyring_clear(keyring);
error_put:
key_ref_put(keyring_ref);
error:
return ret;
}
/*
* Create a link from a keyring to a key if there's no matching key in the
* keyring, otherwise replace the link to the matching key with a link to the
* new key.
*
* The key must grant the caller Link permission and the the keyring must grant
* the caller Write permission. Furthermore, if an additional link is created,
* the keyring's quota will be extended.
*
* If successful, 0 will be returned.
*/
long keyctl_keyring_link(key_serial_t id, key_serial_t ringid)
{
key_ref_t keyring_ref, key_ref;
long ret;
keyring_ref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE);
if (IS_ERR(keyring_ref)) {
ret = PTR_ERR(keyring_ref);
goto error;
}
key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE, KEY_NEED_LINK);
if (IS_ERR(key_ref)) {
ret = PTR_ERR(key_ref);
goto error2;
}
ret = key_link(key_ref_to_ptr(keyring_ref), key_ref_to_ptr(key_ref));
key_ref_put(key_ref);
error2:
key_ref_put(keyring_ref);
error:
return ret;
}
/*
* Unlink a key from a keyring.
*
* The keyring must grant the caller Write permission for this to work; the key
* itself need not grant the caller anything. If the last link to a key is
* removed then that key will be scheduled for destruction.
*
* Keys or keyrings with KEY_FLAG_KEEP set should not be unlinked.
*
* If successful, 0 will be returned.
*/
long keyctl_keyring_unlink(key_serial_t id, key_serial_t ringid)
{
key_ref_t keyring_ref, key_ref;
struct key *keyring, *key;
long ret;
keyring_ref = lookup_user_key(ringid, 0, KEY_NEED_WRITE);
if (IS_ERR(keyring_ref)) {
ret = PTR_ERR(keyring_ref);
goto error;
}
key_ref = lookup_user_key(id, KEY_LOOKUP_FOR_UNLINK, 0);
if (IS_ERR(key_ref)) {
ret = PTR_ERR(key_ref);
goto error2;
}
keyring = key_ref_to_ptr(keyring_ref);
key = key_ref_to_ptr(key_ref);
if (test_bit(KEY_FLAG_KEEP, &keyring->flags) &&
test_bit(KEY_FLAG_KEEP, &key->flags))
ret = -EPERM;
else
ret = key_unlink(keyring, key);
key_ref_put(key_ref);
error2:
key_ref_put(keyring_ref);
error:
return ret;
}
/*
* Return a description of a key to userspace.
*
* The key must grant the caller View permission for this to work.
*
* If there's a buffer, we place up to buflen bytes of data into it formatted
* in the following way:
*
* type;uid;gid;perm;description<NUL>
*
* If successful, we return the amount of description available, irrespective
* of how much we may have copied into the buffer.
*/
long keyctl_describe_key(key_serial_t keyid,
char __user *buffer,
size_t buflen)
{
struct key *key, *instkey;
key_ref_t key_ref;
char *infobuf;
long ret;
int desclen, infolen;
key_ref = lookup_user_key(keyid, KEY_LOOKUP_PARTIAL, KEY_NEED_VIEW);
if (IS_ERR(key_ref)) {
/* viewing a key under construction is permitted if we have the
* authorisation token handy */
if (PTR_ERR(key_ref) == -EACCES) {
instkey = key_get_instantiation_authkey(keyid);
if (!IS_ERR(instkey)) {
key_put(instkey);
key_ref = lookup_user_key(keyid,
KEY_LOOKUP_PARTIAL,
0);
if (!IS_ERR(key_ref))
goto okay;
}
}
ret = PTR_ERR(key_ref);
goto error;
}
okay:
key = key_ref_to_ptr(key_ref);
desclen = strlen(key->description);
/* calculate how much information we're going to return */
ret = -ENOMEM;
infobuf = kasprintf(GFP_KERNEL,
"%s;%d;%d;%08x;",
key->type->name,
from_kuid_munged(current_user_ns(), key->uid),
from_kgid_munged(current_user_ns(), key->gid),
key->perm);
if (!infobuf)
goto error2;
infolen = strlen(infobuf);
ret = infolen + desclen + 1;
/* consider returning the data */
if (buffer && buflen >= ret) {
if (copy_to_user(buffer, infobuf, infolen) != 0 ||
copy_to_user(buffer + infolen, key->description,
desclen + 1) != 0)
ret = -EFAULT;
}
kfree(infobuf);
error2:
key_ref_put(key_ref);
error:
return ret;
}
/*
* Search the specified keyring and any keyrings it links to for a matching
* key. Only keyrings that grant the caller Search permission will be searched
* (this includes the starting keyring). Only keys with Search permission can
* be found.
*
* If successful, the found key will be linked to the destination keyring if
* supplied and the key has Link permission, and the found key ID will be
* returned.
*/
long keyctl_keyring_search(key_serial_t ringid,
const char __user *_type,
const char __user *_description,
key_serial_t destringid)
{
struct key_type *ktype;
key_ref_t keyring_ref, key_ref, dest_ref;
char type[32], *description;
long ret;
/* pull the type and description into kernel space */
ret = key_get_type_from_user(type, _type, sizeof(type));
if (ret < 0)
goto error;
description = strndup_user(_description, KEY_MAX_DESC_SIZE);
if (IS_ERR(description)) {
ret = PTR_ERR(description);
goto error;
}
/* get the keyring at which to begin the search */
keyring_ref = lookup_user_key(ringid, 0, KEY_NEED_SEARCH);
if (IS_ERR(keyring_ref)) {
ret = PTR_ERR(keyring_ref);
goto error2;
}
/* get the destination keyring if specified */
dest_ref = NULL;
if (destringid) {
dest_ref = lookup_user_key(destringid, KEY_LOOKUP_CREATE,
KEY_NEED_WRITE);
if (IS_ERR(dest_ref)) {
ret = PTR_ERR(dest_ref);
goto error3;
}
}
/* find the key type */
ktype = key_type_lookup(type);
if (IS_ERR(ktype)) {
ret = PTR_ERR(ktype);
goto error4;
}
/* do the search */
key_ref = keyring_search(keyring_ref, ktype, description);
if (IS_ERR(key_ref)) {
ret = PTR_ERR(key_ref);
/* treat lack or presence of a negative key the same */
if (ret == -EAGAIN)
ret = -ENOKEY;
goto error5;
}
/* link the resulting key to the destination keyring if we can */
if (dest_ref) {
ret = key_permission(key_ref, KEY_NEED_LINK);
if (ret < 0)
goto error6;
ret = key_link(key_ref_to_ptr(dest_ref), key_ref_to_ptr(key_ref));
if (ret < 0)
goto error6;
}
ret = key_ref_to_ptr(key_ref)->serial;
error6:
key_ref_put(key_ref);
error5:
key_type_put(ktype);
error4:
key_ref_put(dest_ref);
error3:
key_ref_put(keyring_ref);
error2:
kfree(description);
error:
return ret;
}
/*
* Read a key's payload.
*
* The key must either grant the caller Read permission, or it must grant the
* caller Search permission when searched for from the process keyrings.
*
* If successful, we place up to buflen bytes of data into the buffer, if one
* is provided, and return the amount of data that is available in the key,
* irrespective of how much we copied into the buffer.
*/
long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen)
{
struct key *key;
key_ref_t key_ref;
long ret;
/* find the key first */
key_ref = lookup_user_key(keyid, 0, 0);
if (IS_ERR(key_ref)) {
ret = -ENOKEY;
goto error;
}
key = key_ref_to_ptr(key_ref);
ret = key_read_state(key);
if (ret < 0)
goto error2; /* Negatively instantiated */
/* see if we can read it directly */
ret = key_permission(key_ref, KEY_NEED_READ);
if (ret == 0)
goto can_read_key;
if (ret != -EACCES)
goto error2;
/* we can't; see if it's searchable from this process's keyrings
* - we automatically take account of the fact that it may be
* dangling off an instantiation key
*/
if (!is_key_possessed(key_ref)) {
ret = -EACCES;
goto error2;
}
/* the key is probably readable - now try to read it */
can_read_key:
ret = -EOPNOTSUPP;
if (key->type->read) {
/* Read the data with the semaphore held (since we might sleep)
* to protect against the key being updated or revoked.
*/
down_read(&key->sem);
ret = key_validate(key);
if (ret == 0)
ret = key->type->read(key, buffer, buflen);
up_read(&key->sem);
}
error2:
key_put(key);
error:
return ret;
}
/*
* Change the ownership of a key
*
* The key must grant the caller Setattr permission for this to work, though
* the key need not be fully instantiated yet. For the UID to be changed, or
* for the GID to be changed to a group the caller is not a member of, the
* caller must have sysadmin capability. If either uid or gid is -1 then that
* attribute is not changed.
*
* If the UID is to be changed, the new user must have sufficient quota to
* accept the key. The quota deduction will be removed from the old user to
* the new user should the attribute be changed.
*
* If successful, 0 will be returned.
*/
long keyctl_chown_key(key_serial_t id, uid_t user, gid_t group)
{
struct key_user *newowner, *zapowner = NULL;
struct key *key;
key_ref_t key_ref;
long ret;
kuid_t uid;
kgid_t gid;
uid = make_kuid(current_user_ns(), user);
gid = make_kgid(current_user_ns(), group);
ret = -EINVAL;
if ((user != (uid_t) -1) && !uid_valid(uid))
goto error;
if ((group != (gid_t) -1) && !gid_valid(gid))
goto error;
ret = 0;
if (user == (uid_t) -1 && group == (gid_t) -1)
goto error;
key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE | KEY_LOOKUP_PARTIAL,
KEY_NEED_SETATTR);
if (IS_ERR(key_ref)) {
ret = PTR_ERR(key_ref);
goto error;
}
key = key_ref_to_ptr(key_ref);
/* make the changes with the locks held to prevent chown/chown races */
ret = -EACCES;
down_write(&key->sem);
if (!capable(CAP_SYS_ADMIN)) {
/* only the sysadmin can chown a key to some other UID */
if (user != (uid_t) -1 && !uid_eq(key->uid, uid))
goto error_put;
/* only the sysadmin can set the key's GID to a group other
* than one of those that the current process subscribes to */
if (group != (gid_t) -1 && !gid_eq(gid, key->gid) && !in_group_p(gid))
goto error_put;
}
/* change the UID */
if (user != (uid_t) -1 && !uid_eq(uid, key->uid)) {
ret = -ENOMEM;
newowner = key_user_lookup(uid);
if (!newowner)
goto error_put;
/* transfer the quota burden to the new user */
if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
unsigned maxkeys = uid_eq(uid, GLOBAL_ROOT_UID) ?
key_quota_root_maxkeys : key_quota_maxkeys;
unsigned maxbytes = uid_eq(uid, GLOBAL_ROOT_UID) ?
key_quota_root_maxbytes : key_quota_maxbytes;
spin_lock(&newowner->lock);
if (newowner->qnkeys + 1 >= maxkeys ||
newowner->qnbytes + key->quotalen >= maxbytes ||
newowner->qnbytes + key->quotalen <
newowner->qnbytes)
goto quota_overrun;
newowner->qnkeys++;
newowner->qnbytes += key->quotalen;
spin_unlock(&newowner->lock);
spin_lock(&key->user->lock);
key->user->qnkeys--;
key->user->qnbytes -= key->quotalen;
spin_unlock(&key->user->lock);
}
atomic_dec(&key->user->nkeys);
atomic_inc(&newowner->nkeys);
if (key->state != KEY_IS_UNINSTANTIATED) {
atomic_dec(&key->user->nikeys);
atomic_inc(&newowner->nikeys);
}
zapowner = key->user;
key->user = newowner;
key->uid = uid;
}
/* change the GID */
if (group != (gid_t) -1)
key->gid = gid;
ret = 0;
error_put:
up_write(&key->sem);
key_put(key);
if (zapowner)
key_user_put(zapowner);
error:
return ret;
quota_overrun:
spin_unlock(&newowner->lock);
zapowner = newowner;
ret = -EDQUOT;
goto error_put;
}
/*
* Change the permission mask on a key.
*
* The key must grant the caller Setattr permission for this to work, though
* the key need not be fully instantiated yet. If the caller does not have
* sysadmin capability, it may only change the permission on keys that it owns.
*/
long keyctl_setperm_key(key_serial_t id, key_perm_t perm)
{
struct key *key;
key_ref_t key_ref;
long ret;
ret = -EINVAL;
if (perm & ~(KEY_POS_ALL | KEY_USR_ALL | KEY_GRP_ALL | KEY_OTH_ALL))
goto error;
key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE | KEY_LOOKUP_PARTIAL,
KEY_NEED_SETATTR);
if (IS_ERR(key_ref)) {
ret = PTR_ERR(key_ref);
goto error;
}
key = key_ref_to_ptr(key_ref);
/* make the changes with the locks held to prevent chown/chmod races */
ret = -EACCES;
down_write(&key->sem);
/* if we're not the sysadmin, we can only change a key that we own */
if (capable(CAP_SYS_ADMIN) || uid_eq(key->uid, current_fsuid())) {
key->perm = perm;
ret = 0;
}
up_write(&key->sem);
key_put(key);
error:
return ret;
}
/*
* Get the destination keyring for instantiation and check that the caller has
* Write permission on it.
*/
static long get_instantiation_keyring(key_serial_t ringid,
struct request_key_auth *rka,
struct key **_dest_keyring)
{
key_ref_t dkref;
*_dest_keyring = NULL;
/* just return a NULL pointer if we weren't asked to make a link */
if (ringid == 0)
return 0;
/* if a specific keyring is nominated by ID, then use that */
if (ringid > 0) {
dkref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE);
if (IS_ERR(dkref))
return PTR_ERR(dkref);
*_dest_keyring = key_ref_to_ptr(dkref);
return 0;
}
if (ringid == KEY_SPEC_REQKEY_AUTH_KEY)
return -EINVAL;
/* otherwise specify the destination keyring recorded in the
* authorisation key (any KEY_SPEC_*_KEYRING) */
if (ringid >= KEY_SPEC_REQUESTOR_KEYRING) {
*_dest_keyring = key_get(rka->dest_keyring);
return 0;
}
return -ENOKEY;
}
/*
* Change the request_key authorisation key on the current process.
*/
static int keyctl_change_reqkey_auth(struct key *key)
{
struct cred *new;
new = prepare_creds();
if (!new)
return -ENOMEM;
key_put(new->request_key_auth);
new->request_key_auth = key_get(key);
return commit_creds(new);
}
/*
* Instantiate a key with the specified payload and link the key into the
* destination keyring if one is given.
*
* The caller must have the appropriate instantiation permit set for this to
* work (see keyctl_assume_authority). No other permissions are required.
*
* If successful, 0 will be returned.
*/
long keyctl_instantiate_key_common(key_serial_t id,
struct iov_iter *from,
key_serial_t ringid)
{
const struct cred *cred = current_cred();
struct request_key_auth *rka;
struct key *instkey, *dest_keyring;
size_t plen = from ? iov_iter_count(from) : 0;
void *payload;
long ret;
kenter("%d,,%zu,%d", id, plen, ringid);
if (!plen)
from = NULL;
ret = -EINVAL;
if (plen > 1024 * 1024 - 1)
goto error;
/* the appropriate instantiation authorisation key must have been
* assumed before calling this */
ret = -EPERM;
instkey = cred->request_key_auth;
if (!instkey)
goto error;
rka = instkey->payload.data[0];
if (rka->target_key->serial != id)
goto error;
/* pull the payload in if one was supplied */
payload = NULL;
if (from) {
ret = -ENOMEM;
payload = kvmalloc(plen, GFP_KERNEL);
if (!payload)
goto error;
ret = -EFAULT;
if (!copy_from_iter_full(payload, plen, from))
goto error2;
}
/* find the destination keyring amongst those belonging to the
* requesting task */
ret = get_instantiation_keyring(ringid, rka, &dest_keyring);
if (ret < 0)
goto error2;
/* instantiate the key and link it into a keyring */
ret = key_instantiate_and_link(rka->target_key, payload, plen,
dest_keyring, instkey);
key_put(dest_keyring);
/* discard the assumed authority if it's just been disabled by
* instantiation of the key */
if (ret == 0)
keyctl_change_reqkey_auth(NULL);
error2:
if (payload) {
memzero_explicit(payload, plen);
kvfree(payload);
}
error:
return ret;
}
/*
* Instantiate a key with the specified payload and link the key into the
* destination keyring if one is given.
*
* The caller must have the appropriate instantiation permit set for this to
* work (see keyctl_assume_authority). No other permissions are required.
*
* If successful, 0 will be returned.
*/
long keyctl_instantiate_key(key_serial_t id,
const void __user *_payload,
size_t plen,
key_serial_t ringid)
{
if (_payload && plen) {
struct iovec iov;
struct iov_iter from;
int ret;
ret = import_single_range(WRITE, (void __user *)_payload, plen,
&iov, &from);
if (unlikely(ret))
return ret;
return keyctl_instantiate_key_common(id, &from, ringid);
}
return keyctl_instantiate_key_common(id, NULL, ringid);
}
/*
* Instantiate a key with the specified multipart payload and link the key into
* the destination keyring if one is given.
*
* The caller must have the appropriate instantiation permit set for this to
* work (see keyctl_assume_authority). No other permissions are required.
*
* If successful, 0 will be returned.
*/
long keyctl_instantiate_key_iov(key_serial_t id,
const struct iovec __user *_payload_iov,
unsigned ioc,
key_serial_t ringid)
{
struct iovec iovstack[UIO_FASTIOV], *iov = iovstack;
struct iov_iter from;
long ret;
if (!_payload_iov)
ioc = 0;
ret = import_iovec(WRITE, _payload_iov, ioc,
ARRAY_SIZE(iovstack), &iov, &from);
if (ret < 0)
return ret;
ret = keyctl_instantiate_key_common(id, &from, ringid);
kfree(iov);
return ret;
}
/*
* Negatively instantiate the key with the given timeout (in seconds) and link
* the key into the destination keyring if one is given.
*
* The caller must have the appropriate instantiation permit set for this to
* work (see keyctl_assume_authority). No other permissions are required.
*
* The key and any links to the key will be automatically garbage collected
* after the timeout expires.
*
* Negative keys are used to rate limit repeated request_key() calls by causing
* them to return -ENOKEY until the negative key expires.
*
* If successful, 0 will be returned.
*/
long keyctl_negate_key(key_serial_t id, unsigned timeout, key_serial_t ringid)
{
return keyctl_reject_key(id, timeout, ENOKEY, ringid);
}
/*
* Negatively instantiate the key with the given timeout (in seconds) and error
* code and link the key into the destination keyring if one is given.
*
* The caller must have the appropriate instantiation permit set for this to
* work (see keyctl_assume_authority). No other permissions are required.
*
* The key and any links to the key will be automatically garbage collected
* after the timeout expires.
*
* Negative keys are used to rate limit repeated request_key() calls by causing
* them to return the specified error code until the negative key expires.
*
* If successful, 0 will be returned.
*/
long keyctl_reject_key(key_serial_t id, unsigned timeout, unsigned error,
key_serial_t ringid)
{
const struct cred *cred = current_cred();
struct request_key_auth *rka;
struct key *instkey, *dest_keyring;
long ret;
kenter("%d,%u,%u,%d", id, timeout, error, ringid);
/* must be a valid error code and mustn't be a kernel special */
if (error <= 0 ||
error >= MAX_ERRNO ||
error == ERESTARTSYS ||
error == ERESTARTNOINTR ||
error == ERESTARTNOHAND ||
error == ERESTART_RESTARTBLOCK)
return -EINVAL;
/* the appropriate instantiation authorisation key must have been
* assumed before calling this */
ret = -EPERM;
instkey = cred->request_key_auth;
if (!instkey)
goto error;
rka = instkey->payload.data[0];
if (rka->target_key->serial != id)
goto error;
/* find the destination keyring if present (which must also be
* writable) */
ret = get_instantiation_keyring(ringid, rka, &dest_keyring);
if (ret < 0)
goto error;
/* instantiate the key and link it into a keyring */
ret = key_reject_and_link(rka->target_key, timeout, error,
dest_keyring, instkey);
key_put(dest_keyring);
/* discard the assumed authority if it's just been disabled by
* instantiation of the key */
if (ret == 0)
keyctl_change_reqkey_auth(NULL);
error:
return ret;
}
/*
* Read or set the default keyring in which request_key() will cache keys and
* return the old setting.
*
* If a thread or process keyring is specified then it will be created if it
* doesn't yet exist. The old setting will be returned if successful.
*/
long keyctl_set_reqkey_keyring(int reqkey_defl)
{
struct cred *new;
int ret, old_setting;
old_setting = current_cred_xxx(jit_keyring);
if (reqkey_defl == KEY_REQKEY_DEFL_NO_CHANGE)
return old_setting;
new = prepare_creds();
if (!new)
return -ENOMEM;
switch (reqkey_defl) {
case KEY_REQKEY_DEFL_THREAD_KEYRING:
ret = install_thread_keyring_to_cred(new);
if (ret < 0)
goto error;
goto set;
case KEY_REQKEY_DEFL_PROCESS_KEYRING:
ret = install_process_keyring_to_cred(new);
if (ret < 0)
goto error;
goto set;
case KEY_REQKEY_DEFL_DEFAULT:
case KEY_REQKEY_DEFL_SESSION_KEYRING:
case KEY_REQKEY_DEFL_USER_KEYRING:
case KEY_REQKEY_DEFL_USER_SESSION_KEYRING:
case KEY_REQKEY_DEFL_REQUESTOR_KEYRING:
goto set;
case KEY_REQKEY_DEFL_NO_CHANGE:
case KEY_REQKEY_DEFL_GROUP_KEYRING:
default:
ret = -EINVAL;
goto error;
}
set:
new->jit_keyring = reqkey_defl;
commit_creds(new);
return old_setting;
error:
abort_creds(new);
return ret;
}
/*
* Set or clear the timeout on a key.
*
* Either the key must grant the caller Setattr permission or else the caller
* must hold an instantiation authorisation token for the key.
*
* The timeout is either 0 to clear the timeout, or a number of seconds from
* the current time. The key and any links to the key will be automatically
* garbage collected after the timeout expires.
*
* Keys with KEY_FLAG_KEEP set should not be timed out.
*
* If successful, 0 is returned.
*/
long keyctl_set_timeout(key_serial_t id, unsigned timeout)
{
struct key *key, *instkey;
key_ref_t key_ref;
long ret;
key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE | KEY_LOOKUP_PARTIAL,
KEY_NEED_SETATTR);
if (IS_ERR(key_ref)) {
/* setting the timeout on a key under construction is permitted
* if we have the authorisation token handy */
if (PTR_ERR(key_ref) == -EACCES) {
instkey = key_get_instantiation_authkey(id);
if (!IS_ERR(instkey)) {
key_put(instkey);
key_ref = lookup_user_key(id,
KEY_LOOKUP_PARTIAL,
0);
if (!IS_ERR(key_ref))
goto okay;
}
}
ret = PTR_ERR(key_ref);
goto error;
}
okay:
key = key_ref_to_ptr(key_ref);
ret = 0;
if (test_bit(KEY_FLAG_KEEP, &key->flags))
ret = -EPERM;
else
key_set_timeout(key, timeout);
key_put(key);
error:
return ret;
}
/*
* Assume (or clear) the authority to instantiate the specified key.
*
* This sets the authoritative token currently in force for key instantiation.
* This must be done for a key to be instantiated. It has the effect of making
* available all the keys from the caller of the request_key() that created a
* key to request_key() calls made by the caller of this function.
*
* The caller must have the instantiation key in their process keyrings with a
* Search permission grant available to the caller.
*
* If the ID given is 0, then the setting will be cleared and 0 returned.
*
* If the ID given has a matching an authorisation key, then that key will be
* set and its ID will be returned. The authorisation key can be read to get
* the callout information passed to request_key().
*/
long keyctl_assume_authority(key_serial_t id)
{
struct key *authkey;
long ret;
/* special key IDs aren't permitted */
ret = -EINVAL;
if (id < 0)
goto error;
/* we divest ourselves of authority if given an ID of 0 */
if (id == 0) {
ret = keyctl_change_reqkey_auth(NULL);
goto error;
}
/* attempt to assume the authority temporarily granted to us whilst we
* instantiate the specified key
* - the authorisation key must be in the current task's keyrings
* somewhere
*/
authkey = key_get_instantiation_authkey(id);
if (IS_ERR(authkey)) {
ret = PTR_ERR(authkey);
goto error;
}
ret = keyctl_change_reqkey_auth(authkey);
if (ret == 0)
ret = authkey->serial;
key_put(authkey);
error:
return ret;
}
/*
* Get a key's the LSM security label.
*
* The key must grant the caller View permission for this to work.
*
* If there's a buffer, then up to buflen bytes of data will be placed into it.
*
* If successful, the amount of information available will be returned,
* irrespective of how much was copied (including the terminal NUL).
*/
long keyctl_get_security(key_serial_t keyid,
char __user *buffer,
size_t buflen)
{
struct key *key, *instkey;
key_ref_t key_ref;
char *context;
long ret;
key_ref = lookup_user_key(keyid, KEY_LOOKUP_PARTIAL, KEY_NEED_VIEW);
if (IS_ERR(key_ref)) {
if (PTR_ERR(key_ref) != -EACCES)
return PTR_ERR(key_ref);
/* viewing a key under construction is also permitted if we
* have the authorisation token handy */
instkey = key_get_instantiation_authkey(keyid);
if (IS_ERR(instkey))
return PTR_ERR(instkey);
key_put(instkey);
key_ref = lookup_user_key(keyid, KEY_LOOKUP_PARTIAL, 0);
if (IS_ERR(key_ref))
return PTR_ERR(key_ref);
}
key = key_ref_to_ptr(key_ref);
ret = security_key_getsecurity(key, &context);
if (ret == 0) {
/* if no information was returned, give userspace an empty
* string */
ret = 1;
if (buffer && buflen > 0 &&
copy_to_user(buffer, "", 1) != 0)
ret = -EFAULT;
} else if (ret > 0) {
/* return as much data as there's room for */
if (buffer && buflen > 0) {
if (buflen > ret)
buflen = ret;
if (copy_to_user(buffer, context, buflen) != 0)
ret = -EFAULT;
}
kfree(context);
}
key_ref_put(key_ref);
return ret;
}
/*
* Attempt to install the calling process's session keyring on the process's
* parent process.
*
* The keyring must exist and must grant the caller LINK permission, and the
* parent process must be single-threaded and must have the same effective
* ownership as this process and mustn't be SUID/SGID.
*
* The keyring will be emplaced on the parent when it next resumes userspace.
*
* If successful, 0 will be returned.
*/
long keyctl_session_to_parent(void)
{
struct task_struct *me, *parent;
const struct cred *mycred, *pcred;
struct callback_head *newwork, *oldwork;
key_ref_t keyring_r;
struct cred *cred;
int ret;
keyring_r = lookup_user_key(KEY_SPEC_SESSION_KEYRING, 0, KEY_NEED_LINK);
if (IS_ERR(keyring_r))
return PTR_ERR(keyring_r);
ret = -ENOMEM;
/* our parent is going to need a new cred struct, a new tgcred struct
* and new security data, so we allocate them here to prevent ENOMEM in
* our parent */
cred = cred_alloc_blank();
if (!cred)
goto error_keyring;
newwork = &cred->rcu;
cred->session_keyring = key_ref_to_ptr(keyring_r);
keyring_r = NULL;
init_task_work(newwork, key_change_session_keyring);
me = current;
rcu_read_lock();
write_lock_irq(&tasklist_lock);
ret = -EPERM;
oldwork = NULL;
parent = me->real_parent;
/* the parent mustn't be init and mustn't be a kernel thread */
if (parent->pid <= 1 || !parent->mm)
goto unlock;
/* the parent must be single threaded */
if (!thread_group_empty(parent))
goto unlock;
/* the parent and the child must have different session keyrings or
* there's no point */
mycred = current_cred();
pcred = __task_cred(parent);
if (mycred == pcred ||
mycred->session_keyring == pcred->session_keyring) {
ret = 0;
goto unlock;
}
/* the parent must have the same effective ownership and mustn't be
* SUID/SGID */
if (!uid_eq(pcred->uid, mycred->euid) ||
!uid_eq(pcred->euid, mycred->euid) ||
!uid_eq(pcred->suid, mycred->euid) ||
!gid_eq(pcred->gid, mycred->egid) ||
!gid_eq(pcred->egid, mycred->egid) ||
!gid_eq(pcred->sgid, mycred->egid))
goto unlock;
/* the keyrings must have the same UID */
if ((pcred->session_keyring &&
!uid_eq(pcred->session_keyring->uid, mycred->euid)) ||
!uid_eq(mycred->session_keyring->uid, mycred->euid))
goto unlock;
/* cancel an already pending keyring replacement */
oldwork = task_work_cancel(parent, key_change_session_keyring);
/* the replacement session keyring is applied just prior to userspace
* restarting */
ret = task_work_add(parent, newwork, true);
if (!ret)
newwork = NULL;
unlock:
write_unlock_irq(&tasklist_lock);
rcu_read_unlock();
if (oldwork)
put_cred(container_of(oldwork, struct cred, rcu));
if (newwork)
put_cred(cred);
return ret;
error_keyring:
key_ref_put(keyring_r);
return ret;
}
/*
* Apply a restriction to a given keyring.
*
* The caller must have Setattr permission to change keyring restrictions.
*
* The requested type name may be a NULL pointer to reject all attempts
* to link to the keyring. If _type is non-NULL, _restriction can be
* NULL or a pointer to a string describing the restriction. If _type is
* NULL, _restriction must also be NULL.
*
* Returns 0 if successful.
*/
long keyctl_restrict_keyring(key_serial_t id, const char __user *_type,
const char __user *_restriction)
{
key_ref_t key_ref;
bool link_reject = !_type;
char type[32];
char *restriction = NULL;
long ret;
key_ref = lookup_user_key(id, 0, KEY_NEED_SETATTR);
if (IS_ERR(key_ref))
return PTR_ERR(key_ref);
if (_type) {
ret = key_get_type_from_user(type, _type, sizeof(type));
if (ret < 0)
goto error;
}
if (_restriction) {
if (!_type) {
ret = -EINVAL;
goto error;
}
restriction = strndup_user(_restriction, PAGE_SIZE);
if (IS_ERR(restriction)) {
ret = PTR_ERR(restriction);
goto error;
}
}
ret = keyring_restrict(key_ref, link_reject ? NULL : type, restriction);
kfree(restriction);
error:
key_ref_put(key_ref);
return ret;
}
/*
* The key control system call
*/
SYSCALL_DEFINE5(keyctl, int, option, unsigned long, arg2, unsigned long, arg3,
unsigned long, arg4, unsigned long, arg5)
{
switch (option) {
case KEYCTL_GET_KEYRING_ID:
return keyctl_get_keyring_ID((key_serial_t) arg2,
(int) arg3);
case KEYCTL_JOIN_SESSION_KEYRING:
return keyctl_join_session_keyring((const char __user *) arg2);
case KEYCTL_UPDATE:
return keyctl_update_key((key_serial_t) arg2,
(const void __user *) arg3,
(size_t) arg4);
case KEYCTL_REVOKE:
return keyctl_revoke_key((key_serial_t) arg2);
case KEYCTL_DESCRIBE:
return keyctl_describe_key((key_serial_t) arg2,
(char __user *) arg3,
(unsigned) arg4);
case KEYCTL_CLEAR:
return keyctl_keyring_clear((key_serial_t) arg2);
case KEYCTL_LINK:
return keyctl_keyring_link((key_serial_t) arg2,
(key_serial_t) arg3);
case KEYCTL_UNLINK:
return keyctl_keyring_unlink((key_serial_t) arg2,
(key_serial_t) arg3);
case KEYCTL_SEARCH:
return keyctl_keyring_search((key_serial_t) arg2,
(const char __user *) arg3,
(const char __user *) arg4,
(key_serial_t) arg5);
case KEYCTL_READ:
return keyctl_read_key((key_serial_t) arg2,
(char __user *) arg3,
(size_t) arg4);
case KEYCTL_CHOWN:
return keyctl_chown_key((key_serial_t) arg2,
(uid_t) arg3,
(gid_t) arg4);
case KEYCTL_SETPERM:
return keyctl_setperm_key((key_serial_t) arg2,
(key_perm_t) arg3);
case KEYCTL_INSTANTIATE:
return keyctl_instantiate_key((key_serial_t) arg2,
(const void __user *) arg3,
(size_t) arg4,
(key_serial_t) arg5);
case KEYCTL_NEGATE:
return keyctl_negate_key((key_serial_t) arg2,
(unsigned) arg3,
(key_serial_t) arg4);
case KEYCTL_SET_REQKEY_KEYRING:
return keyctl_set_reqkey_keyring(arg2);
case KEYCTL_SET_TIMEOUT:
return keyctl_set_timeout((key_serial_t) arg2,
(unsigned) arg3);
case KEYCTL_ASSUME_AUTHORITY:
return keyctl_assume_authority((key_serial_t) arg2);
case KEYCTL_GET_SECURITY:
return keyctl_get_security((key_serial_t) arg2,
(char __user *) arg3,
(size_t) arg4);
case KEYCTL_SESSION_TO_PARENT:
return keyctl_session_to_parent();
case KEYCTL_REJECT:
return keyctl_reject_key((key_serial_t) arg2,
(unsigned) arg3,
(unsigned) arg4,
(key_serial_t) arg5);
case KEYCTL_INSTANTIATE_IOV:
return keyctl_instantiate_key_iov(
(key_serial_t) arg2,
(const struct iovec __user *) arg3,
(unsigned) arg4,
(key_serial_t) arg5);
case KEYCTL_INVALIDATE:
return keyctl_invalidate_key((key_serial_t) arg2);
case KEYCTL_GET_PERSISTENT:
return keyctl_get_persistent((uid_t)arg2, (key_serial_t)arg3);
case KEYCTL_DH_COMPUTE:
return keyctl_dh_compute((struct keyctl_dh_params __user *) arg2,
(char __user *) arg3, (size_t) arg4,
(struct keyctl_kdf_params __user *) arg5);
case KEYCTL_RESTRICT_KEYRING:
return keyctl_restrict_keyring((key_serial_t) arg2,
(const char __user *) arg3,
(const char __user *) arg4);
default:
return -EOPNOTSUPP;
}
}
| ./CrossVul/dataset_final_sorted/CWE-20/c/good_2891_6 |
crossvul-cpp_data_bad_2401_0 | /*
* Copyright (c) Christos Zoulas 2003.
* All Rights Reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice immediately at the beginning of the file, without modification,
* this list of conditions, and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include "file.h"
#ifndef lint
FILE_RCSID("@(#)$File: readelf.c,v 1.113 2014/12/11 14:10:53 christos Exp $")
#endif
#ifdef BUILTIN_ELF
#include <string.h>
#include <ctype.h>
#include <stdlib.h>
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
#include "readelf.h"
#include "magic.h"
#ifdef ELFCORE
private int dophn_core(struct magic_set *, int, int, int, off_t, int, size_t,
off_t, int *);
#endif
private int dophn_exec(struct magic_set *, int, int, int, off_t, int, size_t,
off_t, int *, int);
private int doshn(struct magic_set *, int, int, int, off_t, int, size_t,
off_t, int *, int, int);
private size_t donote(struct magic_set *, void *, size_t, size_t, int,
int, size_t, int *);
#define ELF_ALIGN(a) ((((a) + align - 1) / align) * align)
#define isquote(c) (strchr("'\"`", (c)) != NULL)
private uint16_t getu16(int, uint16_t);
private uint32_t getu32(int, uint32_t);
private uint64_t getu64(int, uint64_t);
#define MAX_PHNUM 128
#define MAX_SHNUM 32768
#define SIZE_UNKNOWN ((off_t)-1)
private int
toomany(struct magic_set *ms, const char *name, uint16_t num)
{
if (file_printf(ms, ", too many %s header sections (%u)", name, num
) == -1)
return -1;
return 0;
}
private uint16_t
getu16(int swap, uint16_t value)
{
union {
uint16_t ui;
char c[2];
} retval, tmpval;
if (swap) {
tmpval.ui = value;
retval.c[0] = tmpval.c[1];
retval.c[1] = tmpval.c[0];
return retval.ui;
} else
return value;
}
private uint32_t
getu32(int swap, uint32_t value)
{
union {
uint32_t ui;
char c[4];
} retval, tmpval;
if (swap) {
tmpval.ui = value;
retval.c[0] = tmpval.c[3];
retval.c[1] = tmpval.c[2];
retval.c[2] = tmpval.c[1];
retval.c[3] = tmpval.c[0];
return retval.ui;
} else
return value;
}
private uint64_t
getu64(int swap, uint64_t value)
{
union {
uint64_t ui;
char c[8];
} retval, tmpval;
if (swap) {
tmpval.ui = value;
retval.c[0] = tmpval.c[7];
retval.c[1] = tmpval.c[6];
retval.c[2] = tmpval.c[5];
retval.c[3] = tmpval.c[4];
retval.c[4] = tmpval.c[3];
retval.c[5] = tmpval.c[2];
retval.c[6] = tmpval.c[1];
retval.c[7] = tmpval.c[0];
return retval.ui;
} else
return value;
}
#define elf_getu16(swap, value) getu16(swap, value)
#define elf_getu32(swap, value) getu32(swap, value)
#define elf_getu64(swap, value) getu64(swap, value)
#define xsh_addr (clazz == ELFCLASS32 \
? (void *)&sh32 \
: (void *)&sh64)
#define xsh_sizeof (clazz == ELFCLASS32 \
? sizeof(sh32) \
: sizeof(sh64))
#define xsh_size (size_t)(clazz == ELFCLASS32 \
? elf_getu32(swap, sh32.sh_size) \
: elf_getu64(swap, sh64.sh_size))
#define xsh_offset (off_t)(clazz == ELFCLASS32 \
? elf_getu32(swap, sh32.sh_offset) \
: elf_getu64(swap, sh64.sh_offset))
#define xsh_type (clazz == ELFCLASS32 \
? elf_getu32(swap, sh32.sh_type) \
: elf_getu32(swap, sh64.sh_type))
#define xsh_name (clazz == ELFCLASS32 \
? elf_getu32(swap, sh32.sh_name) \
: elf_getu32(swap, sh64.sh_name))
#define xph_addr (clazz == ELFCLASS32 \
? (void *) &ph32 \
: (void *) &ph64)
#define xph_sizeof (clazz == ELFCLASS32 \
? sizeof(ph32) \
: sizeof(ph64))
#define xph_type (clazz == ELFCLASS32 \
? elf_getu32(swap, ph32.p_type) \
: elf_getu32(swap, ph64.p_type))
#define xph_offset (off_t)(clazz == ELFCLASS32 \
? elf_getu32(swap, ph32.p_offset) \
: elf_getu64(swap, ph64.p_offset))
#define xph_align (size_t)((clazz == ELFCLASS32 \
? (off_t) (ph32.p_align ? \
elf_getu32(swap, ph32.p_align) : 4) \
: (off_t) (ph64.p_align ? \
elf_getu64(swap, ph64.p_align) : 4)))
#define xph_filesz (size_t)((clazz == ELFCLASS32 \
? elf_getu32(swap, ph32.p_filesz) \
: elf_getu64(swap, ph64.p_filesz)))
#define xnh_addr (clazz == ELFCLASS32 \
? (void *)&nh32 \
: (void *)&nh64)
#define xph_memsz (size_t)((clazz == ELFCLASS32 \
? elf_getu32(swap, ph32.p_memsz) \
: elf_getu64(swap, ph64.p_memsz)))
#define xnh_sizeof (clazz == ELFCLASS32 \
? sizeof nh32 \
: sizeof nh64)
#define xnh_type (clazz == ELFCLASS32 \
? elf_getu32(swap, nh32.n_type) \
: elf_getu32(swap, nh64.n_type))
#define xnh_namesz (clazz == ELFCLASS32 \
? elf_getu32(swap, nh32.n_namesz) \
: elf_getu32(swap, nh64.n_namesz))
#define xnh_descsz (clazz == ELFCLASS32 \
? elf_getu32(swap, nh32.n_descsz) \
: elf_getu32(swap, nh64.n_descsz))
#define prpsoffsets(i) (clazz == ELFCLASS32 \
? prpsoffsets32[i] \
: prpsoffsets64[i])
#define xcap_addr (clazz == ELFCLASS32 \
? (void *)&cap32 \
: (void *)&cap64)
#define xcap_sizeof (clazz == ELFCLASS32 \
? sizeof cap32 \
: sizeof cap64)
#define xcap_tag (clazz == ELFCLASS32 \
? elf_getu32(swap, cap32.c_tag) \
: elf_getu64(swap, cap64.c_tag))
#define xcap_val (clazz == ELFCLASS32 \
? elf_getu32(swap, cap32.c_un.c_val) \
: elf_getu64(swap, cap64.c_un.c_val))
#ifdef ELFCORE
/*
* Try larger offsets first to avoid false matches
* from earlier data that happen to look like strings.
*/
static const size_t prpsoffsets32[] = {
#ifdef USE_NT_PSINFO
104, /* SunOS 5.x (command line) */
88, /* SunOS 5.x (short name) */
#endif /* USE_NT_PSINFO */
100, /* SunOS 5.x (command line) */
84, /* SunOS 5.x (short name) */
44, /* Linux (command line) */
28, /* Linux 2.0.36 (short name) */
8, /* FreeBSD */
};
static const size_t prpsoffsets64[] = {
#ifdef USE_NT_PSINFO
152, /* SunOS 5.x (command line) */
136, /* SunOS 5.x (short name) */
#endif /* USE_NT_PSINFO */
136, /* SunOS 5.x, 64-bit (command line) */
120, /* SunOS 5.x, 64-bit (short name) */
56, /* Linux (command line) */
40, /* Linux (tested on core from 2.4.x, short name) */
16, /* FreeBSD, 64-bit */
};
#define NOFFSETS32 (sizeof prpsoffsets32 / sizeof prpsoffsets32[0])
#define NOFFSETS64 (sizeof prpsoffsets64 / sizeof prpsoffsets64[0])
#define NOFFSETS (clazz == ELFCLASS32 ? NOFFSETS32 : NOFFSETS64)
/*
* Look through the program headers of an executable image, searching
* for a PT_NOTE section of type NT_PRPSINFO, with a name "CORE" or
* "FreeBSD"; if one is found, try looking in various places in its
* contents for a 16-character string containing only printable
* characters - if found, that string should be the name of the program
* that dropped core. Note: right after that 16-character string is,
* at least in SunOS 5.x (and possibly other SVR4-flavored systems) and
* Linux, a longer string (80 characters, in 5.x, probably other
* SVR4-flavored systems, and Linux) containing the start of the
* command line for that program.
*
* SunOS 5.x core files contain two PT_NOTE sections, with the types
* NT_PRPSINFO (old) and NT_PSINFO (new). These structs contain the
* same info about the command name and command line, so it probably
* isn't worthwhile to look for NT_PSINFO, but the offsets are provided
* above (see USE_NT_PSINFO), in case we ever decide to do so. The
* NT_PRPSINFO and NT_PSINFO sections are always in order and adjacent;
* the SunOS 5.x file command relies on this (and prefers the latter).
*
* The signal number probably appears in a section of type NT_PRSTATUS,
* but that's also rather OS-dependent, in ways that are harder to
* dissect with heuristics, so I'm not bothering with the signal number.
* (I suppose the signal number could be of interest in situations where
* you don't have the binary of the program that dropped core; if you
* *do* have that binary, the debugger will probably tell you what
* signal it was.)
*/
#define OS_STYLE_SVR4 0
#define OS_STYLE_FREEBSD 1
#define OS_STYLE_NETBSD 2
private const char os_style_names[][8] = {
"SVR4",
"FreeBSD",
"NetBSD",
};
#define FLAGS_DID_CORE 0x01
#define FLAGS_DID_NOTE 0x02
#define FLAGS_DID_BUILD_ID 0x04
#define FLAGS_DID_CORE_STYLE 0x08
#define FLAGS_IS_CORE 0x10
private int
dophn_core(struct magic_set *ms, int clazz, int swap, int fd, off_t off,
int num, size_t size, off_t fsize, int *flags)
{
Elf32_Phdr ph32;
Elf64_Phdr ph64;
size_t offset, len;
unsigned char nbuf[BUFSIZ];
ssize_t bufsize;
if (size != xph_sizeof) {
if (file_printf(ms, ", corrupted program header size") == -1)
return -1;
return 0;
}
/*
* Loop through all the program headers.
*/
for ( ; num; num--) {
if (pread(fd, xph_addr, xph_sizeof, off) == -1) {
file_badread(ms);
return -1;
}
off += size;
if (fsize != SIZE_UNKNOWN && xph_offset > fsize) {
/* Perhaps warn here */
continue;
}
if (xph_type != PT_NOTE)
continue;
/*
* This is a PT_NOTE section; loop through all the notes
* in the section.
*/
len = xph_filesz < sizeof(nbuf) ? xph_filesz : sizeof(nbuf);
if ((bufsize = pread(fd, nbuf, len, xph_offset)) == -1) {
file_badread(ms);
return -1;
}
offset = 0;
for (;;) {
if (offset >= (size_t)bufsize)
break;
offset = donote(ms, nbuf, offset, (size_t)bufsize,
clazz, swap, 4, flags);
if (offset == 0)
break;
}
}
return 0;
}
#endif
static void
do_note_netbsd_version(struct magic_set *ms, int swap, void *v)
{
uint32_t desc;
(void)memcpy(&desc, v, sizeof(desc));
desc = elf_getu32(swap, desc);
if (file_printf(ms, ", for NetBSD") == -1)
return;
/*
* The version number used to be stuck as 199905, and was thus
* basically content-free. Newer versions of NetBSD have fixed
* this and now use the encoding of __NetBSD_Version__:
*
* MMmmrrpp00
*
* M = major version
* m = minor version
* r = release ["",A-Z,Z[A-Z] but numeric]
* p = patchlevel
*/
if (desc > 100000000U) {
uint32_t ver_patch = (desc / 100) % 100;
uint32_t ver_rel = (desc / 10000) % 100;
uint32_t ver_min = (desc / 1000000) % 100;
uint32_t ver_maj = desc / 100000000;
if (file_printf(ms, " %u.%u", ver_maj, ver_min) == -1)
return;
if (ver_rel == 0 && ver_patch != 0) {
if (file_printf(ms, ".%u", ver_patch) == -1)
return;
} else if (ver_rel != 0) {
while (ver_rel > 26) {
if (file_printf(ms, "Z") == -1)
return;
ver_rel -= 26;
}
if (file_printf(ms, "%c", 'A' + ver_rel - 1)
== -1)
return;
}
}
}
static void
do_note_freebsd_version(struct magic_set *ms, int swap, void *v)
{
uint32_t desc;
(void)memcpy(&desc, v, sizeof(desc));
desc = elf_getu32(swap, desc);
if (file_printf(ms, ", for FreeBSD") == -1)
return;
/*
* Contents is __FreeBSD_version, whose relation to OS
* versions is defined by a huge table in the Porter's
* Handbook. This is the general scheme:
*
* Releases:
* Mmp000 (before 4.10)
* Mmi0p0 (before 5.0)
* Mmm0p0
*
* Development branches:
* Mmpxxx (before 4.6)
* Mmp1xx (before 4.10)
* Mmi1xx (before 5.0)
* M000xx (pre-M.0)
* Mmm1xx
*
* M = major version
* m = minor version
* i = minor version increment (491000 -> 4.10)
* p = patchlevel
* x = revision
*
* The first release of FreeBSD to use ELF by default
* was version 3.0.
*/
if (desc == 460002) {
if (file_printf(ms, " 4.6.2") == -1)
return;
} else if (desc < 460100) {
if (file_printf(ms, " %d.%d", desc / 100000,
desc / 10000 % 10) == -1)
return;
if (desc / 1000 % 10 > 0)
if (file_printf(ms, ".%d", desc / 1000 % 10) == -1)
return;
if ((desc % 1000 > 0) || (desc % 100000 == 0))
if (file_printf(ms, " (%d)", desc) == -1)
return;
} else if (desc < 500000) {
if (file_printf(ms, " %d.%d", desc / 100000,
desc / 10000 % 10 + desc / 1000 % 10) == -1)
return;
if (desc / 100 % 10 > 0) {
if (file_printf(ms, " (%d)", desc) == -1)
return;
} else if (desc / 10 % 10 > 0) {
if (file_printf(ms, ".%d", desc / 10 % 10) == -1)
return;
}
} else {
if (file_printf(ms, " %d.%d", desc / 100000,
desc / 1000 % 100) == -1)
return;
if ((desc / 100 % 10 > 0) ||
(desc % 100000 / 100 == 0)) {
if (file_printf(ms, " (%d)", desc) == -1)
return;
} else if (desc / 10 % 10 > 0) {
if (file_printf(ms, ".%d", desc / 10 % 10) == -1)
return;
}
}
}
private size_t
donote(struct magic_set *ms, void *vbuf, size_t offset, size_t size,
int clazz, int swap, size_t align, int *flags)
{
Elf32_Nhdr nh32;
Elf64_Nhdr nh64;
size_t noff, doff;
#ifdef ELFCORE
int os_style = -1;
#endif
uint32_t namesz, descsz;
unsigned char *nbuf = CAST(unsigned char *, vbuf);
char sbuf[512];
if (xnh_sizeof + offset > size) {
/*
* We're out of note headers.
*/
return xnh_sizeof + offset;
}
(void)memcpy(xnh_addr, &nbuf[offset], xnh_sizeof);
offset += xnh_sizeof;
namesz = xnh_namesz;
descsz = xnh_descsz;
if ((namesz == 0) && (descsz == 0)) {
/*
* We're out of note headers.
*/
return (offset >= size) ? offset : size;
}
if (namesz & 0x80000000) {
(void)file_printf(ms, ", bad note name size 0x%lx",
(unsigned long)namesz);
return 0;
}
if (descsz & 0x80000000) {
(void)file_printf(ms, ", bad note description size 0x%lx",
(unsigned long)descsz);
return 0;
}
noff = offset;
doff = ELF_ALIGN(offset + namesz);
if (offset + namesz > size) {
/*
* We're past the end of the buffer.
*/
return doff;
}
offset = ELF_ALIGN(doff + descsz);
if (doff + descsz > size) {
/*
* We're past the end of the buffer.
*/
return (offset >= size) ? offset : size;
}
if ((*flags & (FLAGS_DID_NOTE|FLAGS_DID_BUILD_ID)) ==
(FLAGS_DID_NOTE|FLAGS_DID_BUILD_ID))
goto core;
if (namesz == 5 && strcmp((char *)&nbuf[noff], "SuSE") == 0 &&
xnh_type == NT_GNU_VERSION && descsz == 2) {
file_printf(ms, ", for SuSE %d.%d", nbuf[doff], nbuf[doff + 1]);
}
if (namesz == 4 && strcmp((char *)&nbuf[noff], "GNU") == 0 &&
xnh_type == NT_GNU_VERSION && descsz == 16) {
uint32_t desc[4];
(void)memcpy(desc, &nbuf[doff], sizeof(desc));
if (file_printf(ms, ", for GNU/") == -1)
return size;
switch (elf_getu32(swap, desc[0])) {
case GNU_OS_LINUX:
if (file_printf(ms, "Linux") == -1)
return size;
break;
case GNU_OS_HURD:
if (file_printf(ms, "Hurd") == -1)
return size;
break;
case GNU_OS_SOLARIS:
if (file_printf(ms, "Solaris") == -1)
return size;
break;
case GNU_OS_KFREEBSD:
if (file_printf(ms, "kFreeBSD") == -1)
return size;
break;
case GNU_OS_KNETBSD:
if (file_printf(ms, "kNetBSD") == -1)
return size;
break;
default:
if (file_printf(ms, "<unknown>") == -1)
return size;
}
if (file_printf(ms, " %d.%d.%d", elf_getu32(swap, desc[1]),
elf_getu32(swap, desc[2]), elf_getu32(swap, desc[3])) == -1)
return size;
*flags |= FLAGS_DID_NOTE;
return size;
}
if (namesz == 4 && strcmp((char *)&nbuf[noff], "GNU") == 0 &&
xnh_type == NT_GNU_BUILD_ID && (descsz == 16 || descsz == 20)) {
uint8_t desc[20];
uint32_t i;
if (file_printf(ms, ", BuildID[%s]=", descsz == 16 ? "md5/uuid" :
"sha1") == -1)
return size;
(void)memcpy(desc, &nbuf[doff], descsz);
for (i = 0; i < descsz; i++)
if (file_printf(ms, "%02x", desc[i]) == -1)
return size;
*flags |= FLAGS_DID_BUILD_ID;
}
if (namesz == 4 && strcmp((char *)&nbuf[noff], "PaX") == 0 &&
xnh_type == NT_NETBSD_PAX && descsz == 4) {
static const char *pax[] = {
"+mprotect",
"-mprotect",
"+segvguard",
"-segvguard",
"+ASLR",
"-ASLR",
};
uint32_t desc;
size_t i;
int did = 0;
(void)memcpy(&desc, &nbuf[doff], sizeof(desc));
desc = elf_getu32(swap, desc);
if (desc && file_printf(ms, ", PaX: ") == -1)
return size;
for (i = 0; i < __arraycount(pax); i++) {
if (((1 << i) & desc) == 0)
continue;
if (file_printf(ms, "%s%s", did++ ? "," : "",
pax[i]) == -1)
return size;
}
}
if (namesz == 7 && strcmp((char *)&nbuf[noff], "NetBSD") == 0) {
switch (xnh_type) {
case NT_NETBSD_VERSION:
if (descsz == 4) {
do_note_netbsd_version(ms, swap, &nbuf[doff]);
*flags |= FLAGS_DID_NOTE;
return size;
}
break;
case NT_NETBSD_MARCH:
if (file_printf(ms, ", compiled for: %.*s", (int)descsz,
(const char *)&nbuf[doff]) == -1)
return size;
break;
case NT_NETBSD_CMODEL:
if (file_printf(ms, ", compiler model: %.*s",
(int)descsz, (const char *)&nbuf[doff]) == -1)
return size;
break;
default:
if (file_printf(ms, ", note=%u", xnh_type) == -1)
return size;
break;
}
return size;
}
if (namesz == 8 && strcmp((char *)&nbuf[noff], "FreeBSD") == 0) {
if (xnh_type == NT_FREEBSD_VERSION && descsz == 4) {
do_note_freebsd_version(ms, swap, &nbuf[doff]);
*flags |= FLAGS_DID_NOTE;
return size;
}
}
if (namesz == 8 && strcmp((char *)&nbuf[noff], "OpenBSD") == 0 &&
xnh_type == NT_OPENBSD_VERSION && descsz == 4) {
if (file_printf(ms, ", for OpenBSD") == -1)
return size;
/* Content of note is always 0 */
*flags |= FLAGS_DID_NOTE;
return size;
}
if (namesz == 10 && strcmp((char *)&nbuf[noff], "DragonFly") == 0 &&
xnh_type == NT_DRAGONFLY_VERSION && descsz == 4) {
uint32_t desc;
if (file_printf(ms, ", for DragonFly") == -1)
return size;
(void)memcpy(&desc, &nbuf[doff], sizeof(desc));
desc = elf_getu32(swap, desc);
if (file_printf(ms, " %d.%d.%d", desc / 100000,
desc / 10000 % 10, desc % 10000) == -1)
return size;
*flags |= FLAGS_DID_NOTE;
return size;
}
core:
/*
* Sigh. The 2.0.36 kernel in Debian 2.1, at
* least, doesn't correctly implement name
* sections, in core dumps, as specified by
* the "Program Linking" section of "UNIX(R) System
* V Release 4 Programmer's Guide: ANSI C and
* Programming Support Tools", because my copy
* clearly says "The first 'namesz' bytes in 'name'
* contain a *null-terminated* [emphasis mine]
* character representation of the entry's owner
* or originator", but the 2.0.36 kernel code
* doesn't include the terminating null in the
* name....
*/
if ((namesz == 4 && strncmp((char *)&nbuf[noff], "CORE", 4) == 0) ||
(namesz == 5 && strcmp((char *)&nbuf[noff], "CORE") == 0)) {
os_style = OS_STYLE_SVR4;
}
if ((namesz == 8 && strcmp((char *)&nbuf[noff], "FreeBSD") == 0)) {
os_style = OS_STYLE_FREEBSD;
}
if ((namesz >= 11 && strncmp((char *)&nbuf[noff], "NetBSD-CORE", 11)
== 0)) {
os_style = OS_STYLE_NETBSD;
}
#ifdef ELFCORE
if ((*flags & FLAGS_DID_CORE) != 0)
return size;
if (os_style != -1 && (*flags & FLAGS_DID_CORE_STYLE) == 0) {
if (file_printf(ms, ", %s-style", os_style_names[os_style])
== -1)
return size;
*flags |= FLAGS_DID_CORE_STYLE;
}
switch (os_style) {
case OS_STYLE_NETBSD:
if (xnh_type == NT_NETBSD_CORE_PROCINFO) {
uint32_t signo;
/*
* Extract the program name. It is at
* offset 0x7c, and is up to 32-bytes,
* including the terminating NUL.
*/
if (file_printf(ms, ", from '%.31s'",
file_printable(sbuf, sizeof(sbuf),
(const char *)&nbuf[doff + 0x7c])) == -1)
return size;
/*
* Extract the signal number. It is at
* offset 0x08.
*/
(void)memcpy(&signo, &nbuf[doff + 0x08],
sizeof(signo));
if (file_printf(ms, " (signal %u)",
elf_getu32(swap, signo)) == -1)
return size;
*flags |= FLAGS_DID_CORE;
return size;
}
break;
default:
if (xnh_type == NT_PRPSINFO && *flags & FLAGS_IS_CORE) {
size_t i, j;
unsigned char c;
/*
* Extract the program name. We assume
* it to be 16 characters (that's what it
* is in SunOS 5.x and Linux).
*
* Unfortunately, it's at a different offset
* in various OSes, so try multiple offsets.
* If the characters aren't all printable,
* reject it.
*/
for (i = 0; i < NOFFSETS; i++) {
unsigned char *cname, *cp;
size_t reloffset = prpsoffsets(i);
size_t noffset = doff + reloffset;
size_t k;
for (j = 0; j < 16; j++, noffset++,
reloffset++) {
/*
* Make sure we're not past
* the end of the buffer; if
* we are, just give up.
*/
if (noffset >= size)
goto tryanother;
/*
* Make sure we're not past
* the end of the contents;
* if we are, this obviously
* isn't the right offset.
*/
if (reloffset >= descsz)
goto tryanother;
c = nbuf[noffset];
if (c == '\0') {
/*
* A '\0' at the
* beginning is
* obviously wrong.
* Any other '\0'
* means we're done.
*/
if (j == 0)
goto tryanother;
else
break;
} else {
/*
* A nonprintable
* character is also
* wrong.
*/
if (!isprint(c) || isquote(c))
goto tryanother;
}
}
/*
* Well, that worked.
*/
/*
* Try next offsets, in case this match is
* in the middle of a string.
*/
for (k = i + 1 ; k < NOFFSETS ; k++) {
size_t no;
int adjust = 1;
if (prpsoffsets(k) >= prpsoffsets(i))
continue;
for (no = doff + prpsoffsets(k);
no < doff + prpsoffsets(i); no++)
adjust = adjust
&& isprint(nbuf[no]);
if (adjust)
i = k;
}
cname = (unsigned char *)
&nbuf[doff + prpsoffsets(i)];
for (cp = cname; *cp && isprint(*cp); cp++)
continue;
/*
* Linux apparently appends a space at the end
* of the command line: remove it.
*/
while (cp > cname && isspace(cp[-1]))
cp--;
if (file_printf(ms, ", from '%.*s'",
(int)(cp - cname), cname) == -1)
return size;
*flags |= FLAGS_DID_CORE;
return size;
tryanother:
;
}
}
break;
}
#endif
return offset;
}
/* SunOS 5.x hardware capability descriptions */
typedef struct cap_desc {
uint64_t cd_mask;
const char *cd_name;
} cap_desc_t;
static const cap_desc_t cap_desc_sparc[] = {
{ AV_SPARC_MUL32, "MUL32" },
{ AV_SPARC_DIV32, "DIV32" },
{ AV_SPARC_FSMULD, "FSMULD" },
{ AV_SPARC_V8PLUS, "V8PLUS" },
{ AV_SPARC_POPC, "POPC" },
{ AV_SPARC_VIS, "VIS" },
{ AV_SPARC_VIS2, "VIS2" },
{ AV_SPARC_ASI_BLK_INIT, "ASI_BLK_INIT" },
{ AV_SPARC_FMAF, "FMAF" },
{ AV_SPARC_FJFMAU, "FJFMAU" },
{ AV_SPARC_IMA, "IMA" },
{ 0, NULL }
};
static const cap_desc_t cap_desc_386[] = {
{ AV_386_FPU, "FPU" },
{ AV_386_TSC, "TSC" },
{ AV_386_CX8, "CX8" },
{ AV_386_SEP, "SEP" },
{ AV_386_AMD_SYSC, "AMD_SYSC" },
{ AV_386_CMOV, "CMOV" },
{ AV_386_MMX, "MMX" },
{ AV_386_AMD_MMX, "AMD_MMX" },
{ AV_386_AMD_3DNow, "AMD_3DNow" },
{ AV_386_AMD_3DNowx, "AMD_3DNowx" },
{ AV_386_FXSR, "FXSR" },
{ AV_386_SSE, "SSE" },
{ AV_386_SSE2, "SSE2" },
{ AV_386_PAUSE, "PAUSE" },
{ AV_386_SSE3, "SSE3" },
{ AV_386_MON, "MON" },
{ AV_386_CX16, "CX16" },
{ AV_386_AHF, "AHF" },
{ AV_386_TSCP, "TSCP" },
{ AV_386_AMD_SSE4A, "AMD_SSE4A" },
{ AV_386_POPCNT, "POPCNT" },
{ AV_386_AMD_LZCNT, "AMD_LZCNT" },
{ AV_386_SSSE3, "SSSE3" },
{ AV_386_SSE4_1, "SSE4.1" },
{ AV_386_SSE4_2, "SSE4.2" },
{ 0, NULL }
};
private int
doshn(struct magic_set *ms, int clazz, int swap, int fd, off_t off, int num,
size_t size, off_t fsize, int *flags, int mach, int strtab)
{
Elf32_Shdr sh32;
Elf64_Shdr sh64;
int stripped = 1;
size_t nbadcap = 0;
void *nbuf;
off_t noff, coff, name_off;
uint64_t cap_hw1 = 0; /* SunOS 5.x hardware capabilites */
uint64_t cap_sf1 = 0; /* SunOS 5.x software capabilites */
char name[50];
if (size != xsh_sizeof) {
if (file_printf(ms, ", corrupted section header size") == -1)
return -1;
return 0;
}
/* Read offset of name section to be able to read section names later */
if (pread(fd, xsh_addr, xsh_sizeof, off + size * strtab) == -1) {
file_badread(ms);
return -1;
}
name_off = xsh_offset;
for ( ; num; num--) {
/* Read the name of this section. */
if (pread(fd, name, sizeof(name), name_off + xsh_name) == -1) {
file_badread(ms);
return -1;
}
name[sizeof(name) - 1] = '\0';
if (strcmp(name, ".debug_info") == 0)
stripped = 0;
if (pread(fd, xsh_addr, xsh_sizeof, off) == -1) {
file_badread(ms);
return -1;
}
off += size;
/* Things we can determine before we seek */
switch (xsh_type) {
case SHT_SYMTAB:
#if 0
case SHT_DYNSYM:
#endif
stripped = 0;
break;
default:
if (fsize != SIZE_UNKNOWN && xsh_offset > fsize) {
/* Perhaps warn here */
continue;
}
break;
}
/* Things we can determine when we seek */
switch (xsh_type) {
case SHT_NOTE:
if ((nbuf = malloc(xsh_size)) == NULL) {
file_error(ms, errno, "Cannot allocate memory"
" for note");
return -1;
}
if (pread(fd, nbuf, xsh_size, xsh_offset) == -1) {
file_badread(ms);
free(nbuf);
return -1;
}
noff = 0;
for (;;) {
if (noff >= (off_t)xsh_size)
break;
noff = donote(ms, nbuf, (size_t)noff,
xsh_size, clazz, swap, 4, flags);
if (noff == 0)
break;
}
free(nbuf);
break;
case SHT_SUNW_cap:
switch (mach) {
case EM_SPARC:
case EM_SPARCV9:
case EM_IA_64:
case EM_386:
case EM_AMD64:
break;
default:
goto skip;
}
if (nbadcap > 5)
break;
if (lseek(fd, xsh_offset, SEEK_SET) == (off_t)-1) {
file_badseek(ms);
return -1;
}
coff = 0;
for (;;) {
Elf32_Cap cap32;
Elf64_Cap cap64;
char cbuf[/*CONSTCOND*/
MAX(sizeof cap32, sizeof cap64)];
if ((coff += xcap_sizeof) > (off_t)xsh_size)
break;
if (read(fd, cbuf, (size_t)xcap_sizeof) !=
(ssize_t)xcap_sizeof) {
file_badread(ms);
return -1;
}
if (cbuf[0] == 'A') {
#ifdef notyet
char *p = cbuf + 1;
uint32_t len, tag;
memcpy(&len, p, sizeof(len));
p += 4;
len = getu32(swap, len);
if (memcmp("gnu", p, 3) != 0) {
if (file_printf(ms,
", unknown capability %.3s", p)
== -1)
return -1;
break;
}
p += strlen(p) + 1;
tag = *p++;
memcpy(&len, p, sizeof(len));
p += 4;
len = getu32(swap, len);
if (tag != 1) {
if (file_printf(ms, ", unknown gnu"
" capability tag %d", tag)
== -1)
return -1;
break;
}
// gnu attributes
#endif
break;
}
(void)memcpy(xcap_addr, cbuf, xcap_sizeof);
switch (xcap_tag) {
case CA_SUNW_NULL:
break;
case CA_SUNW_HW_1:
cap_hw1 |= xcap_val;
break;
case CA_SUNW_SF_1:
cap_sf1 |= xcap_val;
break;
default:
if (file_printf(ms,
", with unknown capability "
"0x%" INT64_T_FORMAT "x = 0x%"
INT64_T_FORMAT "x",
(unsigned long long)xcap_tag,
(unsigned long long)xcap_val) == -1)
return -1;
if (nbadcap++ > 2)
coff = xsh_size;
break;
}
}
/*FALLTHROUGH*/
skip:
default:
break;
}
}
if (file_printf(ms, ", %sstripped", stripped ? "" : "not ") == -1)
return -1;
if (cap_hw1) {
const cap_desc_t *cdp;
switch (mach) {
case EM_SPARC:
case EM_SPARC32PLUS:
case EM_SPARCV9:
cdp = cap_desc_sparc;
break;
case EM_386:
case EM_IA_64:
case EM_AMD64:
cdp = cap_desc_386;
break;
default:
cdp = NULL;
break;
}
if (file_printf(ms, ", uses") == -1)
return -1;
if (cdp) {
while (cdp->cd_name) {
if (cap_hw1 & cdp->cd_mask) {
if (file_printf(ms,
" %s", cdp->cd_name) == -1)
return -1;
cap_hw1 &= ~cdp->cd_mask;
}
++cdp;
}
if (cap_hw1)
if (file_printf(ms,
" unknown hardware capability 0x%"
INT64_T_FORMAT "x",
(unsigned long long)cap_hw1) == -1)
return -1;
} else {
if (file_printf(ms,
" hardware capability 0x%" INT64_T_FORMAT "x",
(unsigned long long)cap_hw1) == -1)
return -1;
}
}
if (cap_sf1) {
if (cap_sf1 & SF1_SUNW_FPUSED) {
if (file_printf(ms,
(cap_sf1 & SF1_SUNW_FPKNWN)
? ", uses frame pointer"
: ", not known to use frame pointer") == -1)
return -1;
}
cap_sf1 &= ~SF1_SUNW_MASK;
if (cap_sf1)
if (file_printf(ms,
", with unknown software capability 0x%"
INT64_T_FORMAT "x",
(unsigned long long)cap_sf1) == -1)
return -1;
}
return 0;
}
/*
* Look through the program headers of an executable image, searching
* for a PT_INTERP section; if one is found, it's dynamically linked,
* otherwise it's statically linked.
*/
private int
dophn_exec(struct magic_set *ms, int clazz, int swap, int fd, off_t off,
int num, size_t size, off_t fsize, int *flags, int sh_num)
{
Elf32_Phdr ph32;
Elf64_Phdr ph64;
const char *linking_style = "statically";
const char *interp = "";
unsigned char nbuf[BUFSIZ];
char ibuf[BUFSIZ];
ssize_t bufsize;
size_t offset, align, len;
if (size != xph_sizeof) {
if (file_printf(ms, ", corrupted program header size") == -1)
return -1;
return 0;
}
for ( ; num; num--) {
if (pread(fd, xph_addr, xph_sizeof, off) == -1) {
file_badread(ms);
return -1;
}
off += size;
bufsize = 0;
align = 4;
/* Things we can determine before we seek */
switch (xph_type) {
case PT_DYNAMIC:
linking_style = "dynamically";
break;
case PT_NOTE:
if (sh_num) /* Did this through section headers */
continue;
if (((align = xph_align) & 0x80000000UL) != 0 ||
align < 4) {
if (file_printf(ms,
", invalid note alignment 0x%lx",
(unsigned long)align) == -1)
return -1;
align = 4;
}
/*FALLTHROUGH*/
case PT_INTERP:
len = xph_filesz < sizeof(nbuf) ? xph_filesz
: sizeof(nbuf);
bufsize = pread(fd, nbuf, len, xph_offset);
if (bufsize == -1) {
file_badread(ms);
return -1;
}
break;
default:
if (fsize != SIZE_UNKNOWN && xph_offset > fsize) {
/* Maybe warn here? */
continue;
}
break;
}
/* Things we can determine when we seek */
switch (xph_type) {
case PT_INTERP:
if (bufsize && nbuf[0]) {
nbuf[bufsize - 1] = '\0';
interp = (const char *)nbuf;
} else
interp = "*empty*";
break;
case PT_NOTE:
/*
* This is a PT_NOTE section; loop through all the notes
* in the section.
*/
offset = 0;
for (;;) {
if (offset >= (size_t)bufsize)
break;
offset = donote(ms, nbuf, offset,
(size_t)bufsize, clazz, swap, align,
flags);
if (offset == 0)
break;
}
break;
default:
break;
}
}
if (file_printf(ms, ", %s linked", linking_style)
== -1)
return -1;
if (interp[0])
if (file_printf(ms, ", interpreter %s",
file_printable(ibuf, sizeof(ibuf), interp)) == -1)
return -1;
return 0;
}
protected int
file_tryelf(struct magic_set *ms, int fd, const unsigned char *buf,
size_t nbytes)
{
union {
int32_t l;
char c[sizeof (int32_t)];
} u;
int clazz;
int swap;
struct stat st;
off_t fsize;
int flags = 0;
Elf32_Ehdr elf32hdr;
Elf64_Ehdr elf64hdr;
uint16_t type, phnum, shnum;
if (ms->flags & (MAGIC_MIME|MAGIC_APPLE))
return 0;
/*
* ELF executables have multiple section headers in arbitrary
* file locations and thus file(1) cannot determine it from easily.
* Instead we traverse thru all section headers until a symbol table
* one is found or else the binary is stripped.
* Return immediately if it's not ELF (so we avoid pipe2file unless needed).
*/
if (buf[EI_MAG0] != ELFMAG0
|| (buf[EI_MAG1] != ELFMAG1 && buf[EI_MAG1] != OLFMAG1)
|| buf[EI_MAG2] != ELFMAG2 || buf[EI_MAG3] != ELFMAG3)
return 0;
/*
* If we cannot seek, it must be a pipe, socket or fifo.
*/
if((lseek(fd, (off_t)0, SEEK_SET) == (off_t)-1) && (errno == ESPIPE))
fd = file_pipe2file(ms, fd, buf, nbytes);
if (fstat(fd, &st) == -1) {
file_badread(ms);
return -1;
}
if (S_ISREG(st.st_mode) || st.st_size != 0)
fsize = st.st_size;
else
fsize = SIZE_UNKNOWN;
clazz = buf[EI_CLASS];
switch (clazz) {
case ELFCLASS32:
#undef elf_getu
#define elf_getu(a, b) elf_getu32(a, b)
#undef elfhdr
#define elfhdr elf32hdr
#include "elfclass.h"
case ELFCLASS64:
#undef elf_getu
#define elf_getu(a, b) elf_getu64(a, b)
#undef elfhdr
#define elfhdr elf64hdr
#include "elfclass.h"
default:
if (file_printf(ms, ", unknown class %d", clazz) == -1)
return -1;
break;
}
return 0;
}
#endif
| ./CrossVul/dataset_final_sorted/CWE-20/c/bad_2401_0 |
crossvul-cpp_data_bad_3160_1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/***************************************************************************
* Copyright (C) 2013-2017 Ping Identity Corporation
* All rights reserved.
*
* For further information please contact:
*
* Ping Identity Corporation
* 1099 18th St Suite 2950
* Denver, CO 80202
* 303.468.2900
* http://www.pingidentity.com
*
* DISCLAIMER OF WARRANTIES:
*
* THE SOFTWARE PROVIDED HEREUNDER IS PROVIDED ON AN "AS IS" BASIS, WITHOUT
* ANY WARRANTIES OR REPRESENTATIONS EXPRESS, IMPLIED OR STATUTORY; INCLUDING,
* WITHOUT LIMITATION, WARRANTIES OF QUALITY, PERFORMANCE, NONINFRINGEMENT,
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. NOR ARE THERE ANY
* WARRANTIES CREATED BY A COURSE OR DEALING, COURSE OF PERFORMANCE OR TRADE
* USAGE. FURTHERMORE, THERE ARE NO WARRANTIES THAT THE SOFTWARE WILL MEET
* YOUR NEEDS OR BE FREE FROM ERRORS, OR THAT THE OPERATION OF THE SOFTWARE
* WILL BE UNINTERRUPTED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Initially based on mod_auth_cas.c:
* https://github.com/Jasig/mod_auth_cas
*
* Other code copied/borrowed/adapted:
* shared memory caching: mod_auth_mellon
*
* @Author: Hans Zandbelt - hzandbelt@pingidentity.com
*
**************************************************************************/
#include "apr_hash.h"
#include "apr_strings.h"
#include "ap_config.h"
#include "ap_provider.h"
#include "apr_lib.h"
#include "apr_file_io.h"
#include "apr_sha1.h"
#include "apr_base64.h"
#include "httpd.h"
#include "http_core.h"
#include "http_config.h"
#include "http_log.h"
#include "http_protocol.h"
#include "http_request.h"
#include "mod_auth_openidc.h"
// TODO:
// - sort out oidc_cfg vs. oidc_dir_cfg stuff
// - rigid input checking on discovery responses
// - check self-issued support
// - README.quickstart
// - refresh metadata once-per too? (for non-signing key changes)
extern module AP_MODULE_DECLARE_DATA auth_openidc_module;
/*
* clean any suspicious headers in the HTTP request sent by the user agent
*/
static void oidc_scrub_request_headers(request_rec *r, const char *claim_prefix,
const char *authn_header) {
const int prefix_len = claim_prefix ? strlen(claim_prefix) : 0;
/* get an array representation of the incoming HTTP headers */
const apr_array_header_t * const h = apr_table_elts(r->headers_in);
/* table to keep the non-suspicious headers */
apr_table_t *clean_headers = apr_table_make(r->pool, h->nelts);
/* loop over the incoming HTTP headers */
const apr_table_entry_t * const e = (const apr_table_entry_t *) h->elts;
int i;
for (i = 0; i < h->nelts; i++) {
const char * const k = e[i].key;
/* is this header's name equivalent to the header that mod_auth_openidc would set for the authenticated user? */
const int authn_header_matches = (k != NULL) && authn_header
&& (oidc_strnenvcmp(k, authn_header, -1) == 0);
/*
* would this header be interpreted as a mod_auth_openidc attribute? Note
* that prefix_len will be zero if no attr_prefix is defined,
* so this will always be false. Also note that we do not
* scrub headers if the prefix is empty because every header
* would match.
*/
const int prefix_matches = (k != NULL) && prefix_len
&& (oidc_strnenvcmp(k, claim_prefix, prefix_len) == 0);
/* add to the clean_headers if non-suspicious, skip and report otherwise */
if (!prefix_matches && !authn_header_matches) {
apr_table_addn(clean_headers, k, e[i].val);
} else {
oidc_warn(r, "scrubbed suspicious request header (%s: %.32s)", k,
e[i].val);
}
}
/* overwrite the incoming headers with the cleaned result */
r->headers_in = clean_headers;
}
/*
* strip the session cookie from the headers sent to the application/backend
*/
static void oidc_strip_cookies(request_rec *r) {
char *cookie, *ctx, *result = NULL;
const char *name = NULL;
int i;
apr_array_header_t *strip = oidc_dir_cfg_strip_cookies(r);
char *cookies = apr_pstrdup(r->pool,
(char *) apr_table_get(r->headers_in, "Cookie"));
if ((cookies != NULL) && (strip != NULL)) {
oidc_debug(r,
"looking for the following cookies to strip from cookie header: %s",
apr_array_pstrcat(r->pool, strip, ','));
cookie = apr_strtok(cookies, ";", &ctx);
do {
while (cookie != NULL && *cookie == ' ')
cookie++;
for (i = 0; i < strip->nelts; i++) {
name = ((const char**) strip->elts)[i];
if ((strncmp(cookie, name, strlen(name)) == 0)
&& (cookie[strlen(name)] == '=')) {
oidc_debug(r, "stripping: %s", name);
break;
}
}
if (i == strip->nelts) {
result =
result ?
apr_psprintf(r->pool, "%s;%s", result, cookie) :
cookie;
}
cookie = apr_strtok(NULL, ";", &ctx);
} while (cookie != NULL);
if (result != NULL) {
oidc_debug(r, "set cookie to backend to: %s",
result ?
apr_psprintf(r->pool, "\"%s\"", result) : "<null>");
apr_table_set(r->headers_in, "Cookie", result);
} else {
oidc_debug(r, "unsetting all cookies to backend");
apr_table_unset(r->headers_in, "Cookie");
}
}
}
#define OIDC_SHA1_LEN 20
/*
* calculates a hash value based on request fingerprint plus a provided nonce string.
*/
static char *oidc_get_browser_state_hash(request_rec *r, const char *nonce) {
oidc_debug(r, "enter");
/* helper to hold to header values */
const char *value = NULL;
/* the hash context */
apr_sha1_ctx_t sha1;
/* Initialize the hash context */
apr_sha1_init(&sha1);
/* get the X_FORWARDED_FOR header value */
value = (char *) apr_table_get(r->headers_in, "X_FORWARDED_FOR");
/* if we have a value for this header, concat it to the hash input */
if (value != NULL)
apr_sha1_update(&sha1, value, strlen(value));
/* get the USER_AGENT header value */
value = (char *) apr_table_get(r->headers_in, "USER_AGENT");
/* if we have a value for this header, concat it to the hash input */
if (value != NULL)
apr_sha1_update(&sha1, value, strlen(value));
/* get the remote client IP address or host name */
/*
int remotehost_is_ip;
value = ap_get_remote_host(r->connection, r->per_dir_config,
REMOTE_NOLOOKUP, &remotehost_is_ip);
apr_sha1_update(&sha1, value, strlen(value));
*/
/* concat the nonce parameter to the hash input */
apr_sha1_update(&sha1, nonce, strlen(nonce));
/* finalize the hash input and calculate the resulting hash output */
unsigned char hash[OIDC_SHA1_LEN];
apr_sha1_final(hash, &sha1);
/* base64url-encode the resulting hash and return it */
char *result = NULL;
oidc_base64url_encode(r, &result, (const char *) hash, OIDC_SHA1_LEN, TRUE);
return result;
}
/*
* return the name for the state cookie
*/
static char *oidc_get_state_cookie_name(request_rec *r, const char *state) {
return apr_psprintf(r->pool, "%s%s", OIDCStateCookiePrefix, state);
}
/*
* return the static provider configuration, i.e. from a metadata URL or configuration primitives
*/
static apr_byte_t oidc_provider_static_config(request_rec *r, oidc_cfg *c,
oidc_provider_t **provider) {
json_t *j_provider = NULL;
const char *s_json = NULL;
/* see if we should configure a static provider based on external (cached) metadata */
if ((c->metadata_dir != NULL) || (c->provider.metadata_url == NULL)) {
*provider = &c->provider;
return TRUE;
}
c->cache->get(r, OIDC_CACHE_SECTION_PROVIDER,
oidc_util_escape_string(r, c->provider.metadata_url), &s_json);
if (s_json == NULL) {
if (oidc_metadata_provider_retrieve(r, c, NULL,
c->provider.metadata_url, &j_provider, &s_json) == FALSE) {
oidc_error(r, "could not retrieve metadata from url: %s",
c->provider.metadata_url);
return FALSE;
}
c->cache->set(r, OIDC_CACHE_SECTION_PROVIDER,
oidc_util_escape_string(r, c->provider.metadata_url), s_json,
apr_time_now()
+ (c->provider_metadata_refresh_interval <= 0 ?
apr_time_from_sec(
OIDC_CACHE_PROVIDER_METADATA_EXPIRY_DEFAULT) :
c->provider_metadata_refresh_interval));
} else {
/* correct parsing and validation was already done when it was put in the cache */
j_provider = json_loads(s_json, 0, 0);
}
*provider = apr_pcalloc(r->pool, sizeof(oidc_provider_t));
memcpy(*provider, &c->provider, sizeof(oidc_provider_t));
if (oidc_metadata_provider_parse(r, c, j_provider, *provider) == FALSE) {
oidc_error(r, "could not parse metadata from url: %s",
c->provider.metadata_url);
if (j_provider)
json_decref(j_provider);
return FALSE;
}
json_decref(j_provider);
return TRUE;
}
/*
* return the oidc_provider_t struct for the specified issuer
*/
static oidc_provider_t *oidc_get_provider_for_issuer(request_rec *r,
oidc_cfg *c, const char *issuer, apr_byte_t allow_discovery) {
/* by default we'll assume that we're dealing with a single statically configured OP */
oidc_provider_t *provider = NULL;
if (oidc_provider_static_config(r, c, &provider) == FALSE)
return NULL;
/* unless a metadata directory was configured, so we'll try and get the provider settings from there */
if (c->metadata_dir != NULL) {
/* try and get metadata from the metadata directory for the OP that sent this response */
if ((oidc_metadata_get(r, c, issuer, &provider, allow_discovery)
== FALSE) || (provider == NULL)) {
/* don't know nothing about this OP/issuer */
oidc_error(r, "no provider metadata found for issuer \"%s\"",
issuer);
return NULL;
}
}
return provider;
}
/*
* find out whether the request is a response from an IDP discovery page
*/
static apr_byte_t oidc_is_discovery_response(request_rec *r, oidc_cfg *cfg) {
/*
* prereq: this is a call to the configured redirect_uri, now see if:
* the OIDC_DISC_OP_PARAM is present
*/
return oidc_util_request_has_parameter(r, OIDC_DISC_OP_PARAM)
|| oidc_util_request_has_parameter(r, OIDC_DISC_USER_PARAM);
}
/*
* return the HTTP method being called: only for POST data persistence purposes
*/
static const char *oidc_original_request_method(request_rec *r, oidc_cfg *cfg,
apr_byte_t handle_discovery_response) {
const char *method = OIDC_METHOD_GET;
char *m = NULL;
if ((handle_discovery_response == TRUE)
&& (oidc_util_request_matches_url(r, cfg->redirect_uri))
&& (oidc_is_discovery_response(r, cfg))) {
oidc_util_get_request_parameter(r, OIDC_DISC_RM_PARAM, &m);
if (m != NULL)
method = apr_pstrdup(r->pool, m);
} else {
/*
* if POST preserve is not enabled for this location, there's no point in preserving
* the method either which would result in POSTing empty data on return;
* so we revert to legacy behavior
*/
if (oidc_cfg_dir_preserve_post(r) == 0)
return OIDC_METHOD_GET;
const char *content_type = apr_table_get(r->headers_in, "Content-Type");
if ((r->method_number == M_POST)
&& (apr_strnatcmp(content_type,
"application/x-www-form-urlencoded") == 0))
method = OIDC_METHOD_FORM_POST;
}
oidc_debug(r, "return: %s", method);
return method;
}
/*
* send an OpenID Connect authorization request to the specified provider preserving POST parameters using HTML5 storage
*/
apr_byte_t oidc_post_preserve_javascript(request_rec *r, const char *location,
char **javascript, char **javascript_method) {
if (oidc_cfg_dir_preserve_post(r) == 0)
return FALSE;
oidc_debug(r, "enter");
oidc_cfg *cfg = ap_get_module_config(r->server->module_config,
&auth_openidc_module);
const char *method = oidc_original_request_method(r, cfg, FALSE);
if (apr_strnatcmp(method, OIDC_METHOD_FORM_POST) != 0)
return FALSE;
/* read the parameters that are POST-ed to us */
apr_table_t *params = apr_table_make(r->pool, 8);
if (oidc_util_read_post_params(r, params) == FALSE) {
oidc_error(r, "something went wrong when reading the POST parameters");
return FALSE;
}
const apr_array_header_t *arr = apr_table_elts(params);
const apr_table_entry_t *elts = (const apr_table_entry_t*) arr->elts;
int i;
char *json = "";
for (i = 0; i < arr->nelts; i++) {
json = apr_psprintf(r->pool, "%s'%s': '%s'%s", json,
oidc_util_escape_string(r, elts[i].key),
oidc_util_escape_string(r, elts[i].val),
i < arr->nelts - 1 ? "," : "");
}
json = apr_psprintf(r->pool, "{ %s }", json);
const char *jmethod = "preserveOnLoad";
const char *jscript =
apr_psprintf(r->pool,
" <script type=\"text/javascript\">\n"
" function %s() {\n"
" localStorage.setItem('mod_auth_openidc_preserve_post_params', JSON.stringify(%s));\n"
" %s"
" }\n"
" </script>\n", jmethod, json,
location ?
apr_psprintf(r->pool, "window.location='%s';\n",
location) :
"");
if (location == NULL) {
if (javascript_method)
*javascript_method = apr_pstrdup(r->pool, jmethod);
if (javascript)
*javascript = apr_pstrdup(r->pool, jscript);
} else {
oidc_util_html_send(r, "Preserving...", jscript, jmethod,
"<p>Preserving...</p>", DONE);
}
return TRUE;
}
/*
* restore POST parameters on original_url from HTML5 local storage
*/
static int oidc_request_post_preserved_restore(request_rec *r,
const char *original_url) {
oidc_debug(r, "enter: original_url=%s", original_url);
const char *method = "postOnLoad";
const char *script =
apr_psprintf(r->pool,
" <script type=\"text/javascript\">\n"
" function %s() {\n"
" var mod_auth_openidc_preserve_post_params = JSON.parse(localStorage.getItem('mod_auth_openidc_preserve_post_params'));\n"
" localStorage.removeItem('mod_auth_openidc_preserve_post_params');\n"
" for (var key in mod_auth_openidc_preserve_post_params) {\n"
" var input = document.createElement(\"input\");\n"
" input.name = decodeURIComponent(key);\n"
" input.value = decodeURIComponent(mod_auth_openidc_preserve_post_params[key]);\n"
" input.type = \"hidden\";\n"
" document.forms[0].appendChild(input);\n"
" }\n"
" document.forms[0].action = '%s';\n"
" document.forms[0].submit();\n"
" }\n"
" </script>\n", method, original_url);
const char *body = " <p>Restoring...</p>\n"
" <form method=\"post\"></form>\n";
return oidc_util_html_send(r, "Restoring...", script, method, body,
DONE);
}
/*
* parse state that was sent to us by the issuer
*/
static apr_byte_t oidc_unsolicited_proto_state(request_rec *r, oidc_cfg *c,
const char *state, json_t **proto_state) {
char *alg = NULL;
oidc_debug(r, "enter: state header=%s",
oidc_proto_peek_jwt_header(r, state, &alg));
oidc_jose_error_t err;
oidc_jwk_t *jwk = NULL;
if (oidc_util_create_symmetric_key(r, c->provider.client_secret,
oidc_alg2keysize(alg), "sha256",
TRUE, &jwk) == FALSE)
return FALSE;
oidc_jwt_t *jwt = NULL;
if (oidc_jwt_parse(r->pool, state, &jwt,
oidc_util_merge_symmetric_key(r->pool, c->private_keys, jwk),
&err) == FALSE) {
oidc_error(r,
"could not parse JWT from state: invalid unsolicited response: %s",
oidc_jose_e2s(r->pool, err));
return FALSE;
}
oidc_jwk_destroy(jwk);
oidc_debug(r, "successfully parsed JWT from state");
if (jwt->payload.iss == NULL) {
oidc_error(r, "no \"iss\" could be retrieved from JWT state, aborting");
oidc_jwt_destroy(jwt);
return FALSE;
}
oidc_provider_t *provider = oidc_get_provider_for_issuer(r, c,
jwt->payload.iss, FALSE);
if (provider == NULL) {
oidc_jwt_destroy(jwt);
return FALSE;
}
/* validate the state JWT, validating optional exp + iat */
if (oidc_proto_validate_jwt(r, jwt, provider->issuer, FALSE, FALSE,
provider->idtoken_iat_slack) == FALSE) {
oidc_jwt_destroy(jwt);
return FALSE;
}
char *rfp = NULL;
if (oidc_jose_get_string(r->pool, jwt->payload.value.json, "rfp", TRUE,
&rfp, &err) == FALSE) {
oidc_error(r,
"no \"rfp\" claim could be retrieved from JWT state, aborting: %s",
oidc_jose_e2s(r->pool, err));
oidc_jwt_destroy(jwt);
return FALSE;
}
if (apr_strnatcmp(rfp, "iss") != 0) {
oidc_error(r, "\"rfp\" (%s) does not match \"iss\", aborting", rfp);
oidc_jwt_destroy(jwt);
return FALSE;
}
char *target_link_uri = NULL;
oidc_jose_get_string(r->pool, jwt->payload.value.json, "target_link_uri",
FALSE, &target_link_uri, NULL);
if (target_link_uri == NULL) {
if (c->default_sso_url == NULL) {
oidc_error(r,
"no \"target_link_uri\" claim could be retrieved from JWT state and no OIDCDefaultURL is set, aborting");
oidc_jwt_destroy(jwt);
return FALSE;
}
target_link_uri = c->default_sso_url;
}
if (c->metadata_dir != NULL) {
if ((oidc_metadata_get(r, c, jwt->payload.iss, &provider, FALSE)
== FALSE) || (provider == NULL)) {
oidc_error(r, "no provider metadata found for provider \"%s\"",
jwt->payload.iss);
oidc_jwt_destroy(jwt);
return FALSE;
}
}
char *jti = NULL;
oidc_jose_get_string(r->pool, jwt->payload.value.json, "jti", FALSE, &jti,
NULL);
if (jti == NULL) {
char *cser = oidc_jwt_serialize(r->pool, jwt, &err);
if (cser == NULL)
return FALSE;
if (oidc_util_hash_string_and_base64url_encode(r, "sha256", cser,
&jti) == FALSE) {
oidc_error(r,
"oidc_util_hash_string_and_base64url_encode returned an error");
return FALSE;
}
}
const char *replay = NULL;
c->cache->get(r, OIDC_CACHE_SECTION_JTI, jti, &replay);
if (replay != NULL) {
oidc_error(r,
"the jti value (%s) passed in the browser state was found in the cache already; possible replay attack!?",
jti);
oidc_jwt_destroy(jwt);
return FALSE;
}
/* jti cache duration is the configured replay prevention window for token issuance plus 10 seconds for safety */
apr_time_t jti_cache_duration = apr_time_from_sec(
provider->idtoken_iat_slack * 2 + 10);
/* store it in the cache for the calculated duration */
c->cache->set(r, OIDC_CACHE_SECTION_JTI, jti, jti,
apr_time_now() + jti_cache_duration);
oidc_debug(r,
"jti \"%s\" validated successfully and is now cached for %" APR_TIME_T_FMT " seconds",
jti, apr_time_sec(jti_cache_duration));
jwk = NULL;
if (oidc_util_create_symmetric_key(r, c->provider.client_secret, 0,
NULL, TRUE, &jwk) == FALSE)
return FALSE;
oidc_jwks_uri_t jwks_uri = { provider->jwks_uri,
provider->jwks_refresh_interval, provider->ssl_validate_server };
if (oidc_proto_jwt_verify(r, c, jwt, &jwks_uri,
oidc_util_merge_symmetric_key(r->pool, NULL, jwk)) == FALSE) {
oidc_error(r, "state JWT could not be validated, aborting");
oidc_jwt_destroy(jwt);
return FALSE;
}
oidc_jwk_destroy(jwk);
oidc_debug(r, "successfully verified state JWT");
*proto_state = json_object();
json_object_set_new(*proto_state, "issuer", json_string(jwt->payload.iss));
json_object_set_new(*proto_state, "original_url",
json_string(target_link_uri));
json_object_set_new(*proto_state, "original_method", json_string("get"));
json_object_set_new(*proto_state, "response_mode",
json_string(provider->response_mode));
json_object_set_new(*proto_state, "response_type",
json_string(provider->response_type));
json_object_set_new(*proto_state, "timestamp",
json_integer(apr_time_sec(apr_time_now())));
oidc_jwt_destroy(jwt);
return TRUE;
}
/* obtain the state from the cookie value */
static json_t * oidc_get_state_from_cookie(request_rec *r, oidc_cfg *c,
const char *cookieValue) {
json_t *result = NULL;
oidc_util_jwt_verify(r, c->crypto_passphrase, cookieValue, &result);
return result;
}
static void oidc_clean_expired_state_cookies(request_rec *r, oidc_cfg *c) {
char *cookie, *tokenizerCtx;
char *cookies = apr_pstrdup(r->pool,
(char *) apr_table_get(r->headers_in, "Cookie"));
if (cookies != NULL) {
cookie = apr_strtok(cookies, ";", &tokenizerCtx);
do {
while (cookie != NULL && *cookie == ' ')
cookie++;
if (strstr(cookie, OIDCStateCookiePrefix) == cookie) {
char *cookieName = cookie;
while (cookie != NULL && *cookie != '=')
cookie++;
if (*cookie == '=') {
*cookie = '\0';
cookie++;
json_t *state = oidc_get_state_from_cookie(r, c, cookie);
if (state != NULL) {
json_t *v = json_object_get(state, "timestamp");
apr_time_t now = apr_time_sec(apr_time_now());
if (now > json_integer_value(v) + c->state_timeout) {
oidc_error(r, "state has expired");
oidc_util_set_cookie(r, cookieName, "", 0);
}
json_decref(state);
}
}
}
cookie = apr_strtok(NULL, ";", &tokenizerCtx);
} while (cookie != NULL);
}
}
/*
* restore the state that was maintained between authorization request and response in an encrypted cookie
*/
static apr_byte_t oidc_restore_proto_state(request_rec *r, oidc_cfg *c,
const char *state, json_t **proto_state) {
oidc_debug(r, "enter");
/* clean expired state cookies to avoid pollution */
oidc_clean_expired_state_cookies(r, c);
const char *cookieName = oidc_get_state_cookie_name(r, state);
/* get the state cookie value first */
char *cookieValue = oidc_util_get_cookie(r, cookieName);
if (cookieValue == NULL) {
oidc_error(r, "no \"%s\" state cookie found", cookieName);
return oidc_unsolicited_proto_state(r, c, state, proto_state);
}
/* clear state cookie because we don't need it anymore */
oidc_util_set_cookie(r, cookieName, "", 0);
*proto_state = oidc_get_state_from_cookie(r, c, cookieValue);
if (*proto_state == NULL)
return FALSE;
json_t *v = json_object_get(*proto_state, "nonce");
/* calculate the hash of the browser fingerprint concatenated with the nonce */
char *calc = oidc_get_browser_state_hash(r, json_string_value(v));
/* compare the calculated hash with the value provided in the authorization response */
if (apr_strnatcmp(calc, state) != 0) {
oidc_error(r,
"calculated state from cookie does not match state parameter passed back in URL: \"%s\" != \"%s\"",
state, calc);
json_decref(*proto_state);
return FALSE;
}
v = json_object_get(*proto_state, "timestamp");
apr_time_t now = apr_time_sec(apr_time_now());
/* check that the timestamp is not beyond the valid interval */
if (now > json_integer_value(v) + c->state_timeout) {
oidc_error(r, "state has expired");
json_decref(*proto_state);
return FALSE;
}
/* add the state */
json_object_set_new(*proto_state, "state", json_string(state));
char *s_value = json_dumps(*proto_state, JSON_ENCODE_ANY);
oidc_debug(r, "restored state: %s", s_value);
free(s_value);
/* we've made it */
return TRUE;
}
/*
* set the state that is maintained between an authorization request and an authorization response
* in a cookie in the browser that is cryptographically bound to that state
*/
static apr_byte_t oidc_authorization_request_set_cookie(request_rec *r,
oidc_cfg *c, const char *state, json_t *proto_state) {
/*
* create a cookie consisting of 8 elements:
* random value, original URL, original method, issuer, response_type, response_mod, prompt and timestamp
* encoded as JSON
*/
/* encrypt the resulting JSON value */
char *cookieValue = NULL;
if (oidc_util_jwt_create(r, c->crypto_passphrase, proto_state,
&cookieValue) == FALSE)
return FALSE;
/* clean expired state cookies to avoid pollution */
oidc_clean_expired_state_cookies(r, c);
/* assemble the cookie name for the state cookie */
const char *cookieName = oidc_get_state_cookie_name(r, state);
/* set it as a cookie */
oidc_util_set_cookie(r, cookieName, cookieValue, -1);
//free(s_value);
return TRUE;
}
/*
* get the mod_auth_openidc related context from the (userdata in the) request
* (used for passing state between various Apache request processing stages and hook callbacks)
*/
static apr_table_t *oidc_request_state(request_rec *rr) {
/* our state is always stored in the main request */
request_rec *r = (rr->main != NULL) ? rr->main : rr;
/* our state is a table, get it */
apr_table_t *state = NULL;
apr_pool_userdata_get((void **) &state, OIDC_USERDATA_KEY, r->pool);
/* if it does not exist, we'll create a new table */
if (state == NULL) {
state = apr_table_make(r->pool, 5);
apr_pool_userdata_set(state, OIDC_USERDATA_KEY, NULL, r->pool);
}
/* return the resulting table, always non-null now */
return state;
}
/*
* set a name/value pair in the mod_auth_openidc-specific request context
* (used for passing state between various Apache request processing stages and hook callbacks)
*/
void oidc_request_state_set(request_rec *r, const char *key, const char *value) {
/* get a handle to the global state, which is a table */
apr_table_t *state = oidc_request_state(r);
/* put the name/value pair in that table */
apr_table_setn(state, key, value);
}
/*
* get a name/value pair from the mod_auth_openidc-specific request context
* (used for passing state between various Apache request processing stages and hook callbacks)
*/
const char*oidc_request_state_get(request_rec *r, const char *key) {
/* get a handle to the global state, which is a table */
apr_table_t *state = oidc_request_state(r);
/* return the value from the table */
return apr_table_get(state, key);
}
/*
* set the claims from a JSON object (c.q. id_token or user_info response) stored
* in the session in to HTTP headers passed on to the application
*/
static apr_byte_t oidc_set_app_claims(request_rec *r,
const oidc_cfg * const cfg, oidc_session_t *session,
const char *s_claims) {
json_t *j_claims = NULL;
/* decode the string-encoded attributes in to a JSON structure */
if (s_claims != NULL) {
json_error_t json_error;
j_claims = json_loads(s_claims, 0, &json_error);
if (j_claims == NULL) {
/* whoops, JSON has been corrupted */
oidc_error(r,
"unable to parse \"%s\" JSON stored in the session: %s",
s_claims, json_error.text);
return FALSE;
}
}
/* set the resolved claims a HTTP headers for the application */
if (j_claims != NULL) {
oidc_util_set_app_infos(r, j_claims, cfg->claim_prefix,
cfg->claim_delimiter, oidc_cfg_dir_pass_info_in_headers(r),
oidc_cfg_dir_pass_info_in_envvars(r));
/* release resources */
json_decref(j_claims);
}
return TRUE;
}
static int oidc_authenticate_user(request_rec *r, oidc_cfg *c,
oidc_provider_t *provider, const char *original_url,
const char *login_hint, const char *id_token_hint, const char *prompt,
const char *auth_request_params);
/*
* log message about max session duration
*/
static void oidc_log_session_expires(request_rec *r, apr_time_t session_expires) {
char buf[APR_RFC822_DATE_LEN + 1];
apr_rfc822_date(buf, session_expires);
oidc_debug(r, "session expires %s (in %" APR_TIME_T_FMT " secs from now)",
buf, apr_time_sec(session_expires - apr_time_now()));
}
/*
* check if maximum session duration was exceeded
*/
static int oidc_check_max_session_duration(request_rec *r, oidc_cfg *cfg,
oidc_session_t *session) {
const char *s_session_expires = NULL;
apr_time_t session_expires;
/* get the session expiry from the session data */
oidc_session_get(r, session, OIDC_SESSION_EXPIRES_SESSION_KEY,
&s_session_expires);
/* convert the string to a timestamp */
sscanf(s_session_expires, "%" APR_TIME_T_FMT, &session_expires);
/* check the expire timestamp against the current time */
if (apr_time_now() > session_expires) {
oidc_warn(r, "maximum session duration exceeded for user: %s",
session->remote_user);
oidc_session_kill(r, session);
return oidc_authenticate_user(r, cfg, NULL, oidc_get_current_url(r),
NULL,
NULL, NULL, NULL);
}
/* log message about max session duration */
oidc_log_session_expires(r, session_expires);
return OK;
}
/*
* validate received session cookie against the domain it was issued for:
*
* this handles the case where the cache configured is a the same single memcache, Redis, or file
* backend for different (virtual) hosts, or a client-side cookie protected with the same secret
*
* it also handles the case that a cookie is unexpectedly shared across multiple hosts in
* name-based virtual hosting even though the OP(s) would be the same
*/
static apr_byte_t oidc_check_cookie_domain(request_rec *r, oidc_cfg *cfg,
oidc_session_t *session) {
const char *c_cookie_domain =
cfg->cookie_domain ?
cfg->cookie_domain : oidc_get_current_url_host(r);
const char *s_cookie_domain = NULL;
oidc_session_get(r, session, OIDC_COOKIE_DOMAIN_SESSION_KEY,
&s_cookie_domain);
if ((s_cookie_domain == NULL)
|| (apr_strnatcmp(c_cookie_domain, s_cookie_domain) != 0)) {
oidc_warn(r,
"aborting: detected attempt to play cookie against a different domain/host than issued for! (issued=%s, current=%s)",
s_cookie_domain, c_cookie_domain);
return FALSE;
}
return TRUE;
}
/*
* get a handle to the provider configuration via the "issuer" stored in the session
*/
apr_byte_t oidc_get_provider_from_session(request_rec *r, oidc_cfg *c,
oidc_session_t *session, oidc_provider_t **provider) {
oidc_debug(r, "enter");
/* get the issuer value from the session state */
const char *issuer = NULL;
oidc_session_get(r, session, OIDC_ISSUER_SESSION_KEY, &issuer);
if (issuer == NULL) {
oidc_error(r, "session corrupted: no issuer found in session");
return FALSE;
}
/* get the provider info associated with the issuer value */
oidc_provider_t *p = oidc_get_provider_for_issuer(r, c, issuer, FALSE);
if (p == NULL) {
oidc_error(r, "session corrupted: no provider found for issuer: %s",
issuer);
return FALSE;
}
*provider = p;
return TRUE;
}
/*
* store the access token expiry timestamp in the session, based on the expires_in
*/
static void oidc_store_access_token_expiry(request_rec *r,
oidc_session_t *session, int expires_in) {
if (expires_in != -1) {
oidc_session_set(r, session, OIDC_ACCESSTOKEN_EXPIRES_SESSION_KEY,
apr_psprintf(r->pool, "%" APR_TIME_T_FMT,
apr_time_sec(apr_time_now()) + expires_in));
}
}
/*
* store claims resolved from the userinfo endpoint in the session
*/
static void oidc_store_userinfo_claims(request_rec *r, oidc_session_t *session,
oidc_provider_t *provider, const char *claims) {
oidc_debug(r, "enter");
/* see if we've resolved any claims */
if (claims != NULL) {
/*
* Successfully decoded a set claims from the response so we can store them
* (well actually the stringified representation in the response)
* in the session context safely now
*/
oidc_session_set(r, session, OIDC_CLAIMS_SESSION_KEY, claims);
} else {
/*
* clear the existing claims because we could not refresh them
*/
oidc_session_set(r, session, OIDC_CLAIMS_SESSION_KEY, NULL);
}
/* store the last refresh time if we've configured a userinfo refresh interval */
if (provider->userinfo_refresh_interval > 0)
oidc_session_set(r, session, OIDC_USERINFO_LAST_REFRESH_SESSION_KEY,
apr_psprintf(r->pool, "%" APR_TIME_T_FMT, apr_time_now()));
}
/*
* execute refresh token grant to refresh the existing access token
*/
static apr_byte_t oidc_refresh_access_token(request_rec *r, oidc_cfg *c,
oidc_session_t *session, oidc_provider_t *provider,
char **new_access_token) {
oidc_debug(r, "enter");
/* get the refresh token that was stored in the session */
const char *refresh_token = NULL;
oidc_session_get(r, session, OIDC_REFRESHTOKEN_SESSION_KEY, &refresh_token);
if (refresh_token == NULL) {
oidc_warn(r,
"refresh token routine called but no refresh_token found in the session");
return FALSE;
}
/* elements returned in the refresh response */
char *s_id_token = NULL;
int expires_in = -1;
char *s_token_type = NULL;
char *s_access_token = NULL;
char *s_refresh_token = NULL;
/* refresh the tokens by calling the token endpoint */
if (oidc_proto_refresh_request(r, c, provider, refresh_token, &s_id_token,
&s_access_token, &s_token_type, &expires_in,
&s_refresh_token) == FALSE) {
oidc_error(r, "access_token could not be refreshed");
return FALSE;
}
/* store the new access_token in the session and discard the old one */
oidc_session_set(r, session, OIDC_ACCESSTOKEN_SESSION_KEY, s_access_token);
oidc_store_access_token_expiry(r, session, expires_in);
/* see if we need to return it as a parameter */
if (new_access_token != NULL)
*new_access_token = s_access_token;
/* if we have a new refresh token (rolling refresh), store it in the session and overwrite the old one */
if (s_refresh_token != NULL)
oidc_session_set(r, session, OIDC_REFRESHTOKEN_SESSION_KEY,
s_refresh_token);
return TRUE;
}
/*
* retrieve claims from the userinfo endpoint and return the stringified response
*/
static const char *oidc_retrieve_claims_from_userinfo_endpoint(request_rec *r,
oidc_cfg *c, oidc_provider_t *provider, const char *access_token,
oidc_session_t *session, char *id_token_sub) {
oidc_debug(r, "enter");
/* see if a userinfo endpoint is set, otherwise there's nothing to do for us */
if (provider->userinfo_endpoint_url == NULL) {
oidc_debug(r,
"not retrieving userinfo claims because userinfo_endpoint is not set");
return NULL;
}
/* see if there's an access token, otherwise we can't call the userinfo endpoint at all */
if (access_token == NULL) {
oidc_debug(r,
"not retrieving userinfo claims because access_token is not provided");
return NULL;
}
if ((id_token_sub == NULL) && (session != NULL)) {
// when refreshing claims from the userinfo endpoint
const char *s_id_token_claims = NULL;
oidc_session_get(r, session, OIDC_IDTOKEN_CLAIMS_SESSION_KEY,
&s_id_token_claims);
if (s_id_token_claims == NULL) {
oidc_error(r, "no id_token claims provided");
return NULL;
}
json_error_t json_error;
json_t *id_token_claims = json_loads(s_id_token_claims, 0, &json_error);
if (id_token_claims == NULL) {
oidc_error(r, "JSON parsing (json_loads) failed: %s (%s)",
json_error.text, s_id_token_claims);
return NULL;
}
oidc_jose_get_string(r->pool, id_token_claims, "sub", FALSE, &id_token_sub, NULL);
}
// TODO: return code should indicate whether the token expired or some other error occurred
// TODO: long-term: session storage should be JSON (with explicit types and less conversion, using standard routines)
/* try to get claims from the userinfo endpoint using the provided access token */
const char *result = NULL;
if (oidc_proto_resolve_userinfo(r, c, provider, id_token_sub, access_token,
&result) == FALSE) {
/* see if we have an existing session and we are refreshing the user info claims */
if (session != NULL) {
/* first call to user info endpoint failed, but the access token may have just expired, so refresh it */
char *access_token = NULL;
if (oidc_refresh_access_token(r, c, session, provider,
&access_token) == TRUE) {
/* try again with the new access token */
if (oidc_proto_resolve_userinfo(r, c, provider, id_token_sub, access_token,
&result) == FALSE) {
oidc_error(r,
"resolving user info claims with the refreshed access token failed, nothing will be stored in the session");
result = NULL;
}
} else {
oidc_warn(r,
"refreshing access token failed, claims will not be retrieved/refreshed from the userinfo endpoint");
result = NULL;
}
} else {
oidc_error(r,
"resolving user info claims with the existing/provided access token failed, nothing will be stored in the session");
result = NULL;
}
}
return result;
}
/*
* get (new) claims from the userinfo endpoint
*/
static apr_byte_t oidc_refresh_claims_from_userinfo_endpoint(request_rec *r,
oidc_cfg *cfg, oidc_session_t *session) {
oidc_provider_t *provider = NULL;
const char *claims = NULL;
char *access_token = NULL;
/* get the current provider info */
if (oidc_get_provider_from_session(r, cfg, session, &provider) == FALSE)
return FALSE;
/* see if we can do anything here, i.e. we have a userinfo endpoint and a refresh interval is configured */
apr_time_t interval = apr_time_from_sec(
provider->userinfo_refresh_interval);
oidc_debug(r, "userinfo_endpoint=%s, interval=%d",
provider->userinfo_endpoint_url,
provider->userinfo_refresh_interval);
if ((provider->userinfo_endpoint_url != NULL) && (interval > 0)) {
/* get the last refresh timestamp from the session info */
apr_time_t last_refresh = 0;
const char *s_last_refresh = NULL;
oidc_session_get(r, session, OIDC_USERINFO_LAST_REFRESH_SESSION_KEY,
&s_last_refresh);
if (s_last_refresh != NULL) {
sscanf(s_last_refresh, "%" APR_TIME_T_FMT, &last_refresh);
}
oidc_debug(r, "refresh needed in: %" APR_TIME_T_FMT " seconds",
apr_time_sec(last_refresh + interval - apr_time_now()));
/* see if we need to refresh again */
if (last_refresh + interval < apr_time_now()) {
/* get the current access token */
oidc_session_get(r, session, OIDC_ACCESSTOKEN_SESSION_KEY,
(const char **) &access_token);
/* retrieve the current claims */
claims = oidc_retrieve_claims_from_userinfo_endpoint(r, cfg,
provider, access_token, session, NULL);
/* store claims resolved from userinfo endpoint */
oidc_store_userinfo_claims(r, session, provider, claims);
/* indicated something changed */
return TRUE;
}
}
return FALSE;
}
/*
* copy the claims and id_token from the session to the request state and optionally return them
*/
static void oidc_copy_tokens_to_request_state(request_rec *r,
oidc_session_t *session, const char **s_id_token, const char **s_claims) {
const char *id_token = NULL, *claims = NULL;
oidc_session_get(r, session, OIDC_IDTOKEN_CLAIMS_SESSION_KEY, &id_token);
oidc_session_get(r, session, OIDC_CLAIMS_SESSION_KEY, &claims);
oidc_debug(r, "id_token=%s claims=%s", id_token, claims);
if (id_token != NULL) {
oidc_request_state_set(r, OIDC_IDTOKEN_CLAIMS_SESSION_KEY, id_token);
if (s_id_token != NULL)
*s_id_token = id_token;
}
if (claims != NULL) {
oidc_request_state_set(r, OIDC_CLAIMS_SESSION_KEY, claims);
if (s_claims != NULL)
*s_claims = claims;
}
}
/*
* handle the case where we have identified an existing authentication session for a user
*/
static int oidc_handle_existing_session(request_rec *r, oidc_cfg *cfg,
oidc_session_t *session) {
oidc_debug(r, "enter");
/* get the header name in which the remote user name needs to be passed */
char *authn_header = oidc_cfg_dir_authn_header(r);
int pass_headers = oidc_cfg_dir_pass_info_in_headers(r);
int pass_envvars = oidc_cfg_dir_pass_info_in_envvars(r);
/* verify current cookie domain against issued cookie domain */
if (oidc_check_cookie_domain(r, cfg, session) == FALSE)
return HTTP_UNAUTHORIZED;
/* check if the maximum session duration was exceeded */
int rc = oidc_check_max_session_duration(r, cfg, session);
if (rc != OK)
return rc;
/* if needed, refresh claims from the user info endpoint */
apr_byte_t needs_save = oidc_refresh_claims_from_userinfo_endpoint(r, cfg,
session);
/*
* we're going to pass the information that we have to the application,
* but first we need to scrub the headers that we're going to use for security reasons
*/
if (cfg->scrub_request_headers != 0) {
/* scrub all headers starting with OIDC_ first */
oidc_scrub_request_headers(r, OIDC_DEFAULT_HEADER_PREFIX,
oidc_cfg_dir_authn_header(r));
/*
* then see if the claim headers need to be removed on top of that
* (i.e. the prefix does not start with the default OIDC_)
*/
if ((strstr(cfg->claim_prefix, OIDC_DEFAULT_HEADER_PREFIX)
!= cfg->claim_prefix)) {
oidc_scrub_request_headers(r, cfg->claim_prefix, NULL);
}
}
/* set the user authentication HTTP header if set and required */
if ((r->user != NULL) && (authn_header != NULL))
oidc_util_set_header(r, authn_header, r->user);
const char *s_claims = NULL;
const char *s_id_token = NULL;
/* copy id_token and claims from session to request state and obtain their values */
oidc_copy_tokens_to_request_state(r, session, &s_id_token, &s_claims);
/* set the claims in the app headers */
if (oidc_set_app_claims(r, cfg, session, s_claims) == FALSE)
return HTTP_INTERNAL_SERVER_ERROR;
if ((cfg->pass_idtoken_as & OIDC_PASS_IDTOKEN_AS_CLAIMS)) {
/* set the id_token in the app headers */
if (oidc_set_app_claims(r, cfg, session, s_id_token) == FALSE)
return HTTP_INTERNAL_SERVER_ERROR;
}
if ((cfg->pass_idtoken_as & OIDC_PASS_IDTOKEN_AS_PAYLOAD)) {
/* pass the id_token JSON object to the app in a header or environment variable */
oidc_util_set_app_info(r, "id_token_payload", s_id_token,
OIDC_DEFAULT_HEADER_PREFIX, pass_headers, pass_envvars);
}
if (cfg->session_type != OIDC_SESSION_TYPE_CLIENT_COOKIE) {
if ((cfg->pass_idtoken_as & OIDC_PASS_IDTOKEN_AS_SERIALIZED)) {
const char *s_id_token = NULL;
/* get the compact serialized JWT from the session */
oidc_session_get(r, session, OIDC_IDTOKEN_SESSION_KEY, &s_id_token);
/* pass the compact serialized JWT to the app in a header or environment variable */
oidc_util_set_app_info(r, "id_token", s_id_token,
OIDC_DEFAULT_HEADER_PREFIX, pass_headers, pass_envvars);
}
} else {
oidc_error(r,
"session type \"client-cookie\" does not allow storing/passing the id_token; use \"OIDCSessionType server-cache\" for that");
}
/* set the refresh_token in the app headers/variables, if enabled for this location/directory */
const char *refresh_token = NULL;
oidc_session_get(r, session, OIDC_REFRESHTOKEN_SESSION_KEY, &refresh_token);
if ((oidc_cfg_dir_pass_refresh_token(r) != 0) && (refresh_token != NULL)) {
/* pass it to the app in a header or environment variable */
oidc_util_set_app_info(r, "refresh_token", refresh_token,
OIDC_DEFAULT_HEADER_PREFIX, pass_headers, pass_envvars);
}
/* set the access_token in the app headers/variables */
const char *access_token = NULL;
oidc_session_get(r, session, OIDC_ACCESSTOKEN_SESSION_KEY, &access_token);
if (access_token != NULL) {
/* pass it to the app in a header or environment variable */
oidc_util_set_app_info(r, "access_token", access_token,
OIDC_DEFAULT_HEADER_PREFIX, pass_headers, pass_envvars);
}
/* set the expiry timestamp in the app headers/variables */
const char *access_token_expires = NULL;
oidc_session_get(r, session, OIDC_ACCESSTOKEN_EXPIRES_SESSION_KEY,
&access_token_expires);
if (access_token_expires != NULL) {
/* pass it to the app in a header or environment variable */
oidc_util_set_app_info(r, "access_token_expires", access_token_expires,
OIDC_DEFAULT_HEADER_PREFIX, pass_headers, pass_envvars);
}
/*
* reset the session inactivity timer
* but only do this once per 10% of the inactivity timeout interval (with a max to 60 seconds)
* for performance reasons
*
* now there's a small chance that the session ends 10% (or a minute) earlier than configured/expected
* cq. when there's a request after a recent save (so no update) and then no activity happens until
* a request comes in just before the session should expire
* ("recent" and "just before" refer to 10%-with-a-max-of-60-seconds of the inactivity interval after
* the start/last-update and before the expiry of the session respectively)
*
* this is be deemed acceptable here because of performance gain
*/
apr_time_t interval = apr_time_from_sec(cfg->session_inactivity_timeout);
apr_time_t now = apr_time_now();
apr_time_t slack = interval / 10;
if (slack > apr_time_from_sec(60))
slack = apr_time_from_sec(60);
if (session->expiry - now < interval - slack) {
session->expiry = now + interval;
needs_save = TRUE;
}
/* check if something was updated in the session and we need to save it again */
if (needs_save)
if (oidc_session_save(r, session) == FALSE)
return HTTP_INTERNAL_SERVER_ERROR;
/* return "user authenticated" status */
return OK;
}
/*
* helper function for basic/implicit client flows upon receiving an authorization response:
* check that it matches the state stored in the browser and return the variables associated
* with the state, such as original_url and OP oidc_provider_t pointer.
*/
static apr_byte_t oidc_authorization_response_match_state(request_rec *r,
oidc_cfg *c, const char *state, struct oidc_provider_t **provider,
json_t **proto_state) {
oidc_debug(r, "enter (state=%s)", state);
if ((state == NULL) || (apr_strnatcmp(state, "") == 0)) {
oidc_error(r, "state parameter is not set");
return FALSE;
}
/* check the state parameter against what we stored in a cookie */
if (oidc_restore_proto_state(r, c, state, proto_state) == FALSE) {
oidc_error(r, "unable to restore state");
return FALSE;
}
*provider = oidc_get_provider_for_issuer(r, c,
json_string_value(json_object_get(*proto_state, "issuer")), FALSE);
return (*provider != NULL);
}
/*
* redirect the browser to the session logout endpoint
*/
static int oidc_session_redirect_parent_window_to_logout(request_rec *r,
oidc_cfg *c) {
oidc_debug(r, "enter");
char *java_script = apr_psprintf(r->pool,
" <script type=\"text/javascript\">\n"
" window.top.location.href = '%s?session=logout';\n"
" </script>\n", c->redirect_uri);
return oidc_util_html_send(r, "Redirecting...", java_script, NULL, NULL,
DONE);
}
/*
* handle an error returned by the OP
*/
static int oidc_authorization_response_error(request_rec *r, oidc_cfg *c,
json_t *proto_state, const char *error, const char *error_description) {
const char *prompt =
json_object_get(proto_state, "prompt") ?
apr_pstrdup(r->pool,
json_string_value(
json_object_get(proto_state, "prompt"))) :
NULL;
json_decref(proto_state);
if ((prompt != NULL) && (apr_strnatcmp(prompt, "none") == 0)) {
return oidc_session_redirect_parent_window_to_logout(r, c);
}
return oidc_util_html_send_error(r, c->error_template,
apr_psprintf(r->pool, "OpenID Connect Provider error: %s", error),
error_description, DONE);
}
/*
* set the unique user identifier that will be propagated in the Apache r->user and REMOTE_USER variables
*/
static apr_byte_t oidc_get_remote_user(request_rec *r, oidc_cfg *c,
oidc_provider_t *provider, oidc_jwt_t *jwt, char **user,
const char *s_claims) {
char *issuer = provider->issuer;
char *claim_name = apr_pstrdup(r->pool, c->remote_user_claim.claim_name);
int n = strlen(claim_name);
int post_fix_with_issuer = (claim_name[n - 1] == '@');
if (post_fix_with_issuer) {
claim_name[n - 1] = '\0';
issuer =
(strstr(issuer, "https://") == NULL) ?
apr_pstrdup(r->pool, issuer) :
apr_pstrdup(r->pool, issuer + strlen("https://"));
}
/* extract the username claim (default: "sub") from the id_token payload or user claims */
char *username = NULL;
json_error_t json_error;
json_t *claims = json_loads(s_claims, 0, &json_error);
if (claims == NULL) {
username = apr_pstrdup(r->pool,
json_string_value(
json_object_get(jwt->payload.value.json, claim_name)));
} else {
oidc_util_json_merge(jwt->payload.value.json, claims);
username = apr_pstrdup(r->pool,
json_string_value(json_object_get(claims, claim_name)));
json_decref(claims);
}
if (username == NULL) {
oidc_error(r,
"OIDCRemoteUserClaim is set to \"%s\", but the id_token JSON payload and user claims did not contain a \"%s\" string",
c->remote_user_claim.claim_name, claim_name);
*user = NULL;
return FALSE;
}
/* set the unique username in the session (will propagate to r->user/REMOTE_USER) */
*user = post_fix_with_issuer ?
apr_psprintf(r->pool, "%s@%s", username, issuer) : username;
if (c->remote_user_claim.reg_exp != NULL) {
char *error_str = NULL;
if (oidc_util_regexp_first_match(r->pool, *user,
c->remote_user_claim.reg_exp, user, &error_str) == FALSE) {
oidc_error(r, "oidc_util_regexp_first_match failed: %s", error_str);
*user = NULL;
return FALSE;
}
}
oidc_debug(r, "set user to \"%s\"", *user);
return TRUE;
}
/*
* store resolved information in the session
*/
static apr_byte_t oidc_save_in_session(request_rec *r, oidc_cfg *c,
oidc_session_t *session, oidc_provider_t *provider,
const char *remoteUser, const char *id_token, oidc_jwt_t *id_token_jwt,
const char *claims, const char *access_token, const int expires_in,
const char *refresh_token, const char *session_state, const char *state,
const char *original_url) {
/* store the user in the session */
session->remote_user = remoteUser;
/* set the session expiry to the inactivity timeout */
session->expiry =
apr_time_now() + apr_time_from_sec(c->session_inactivity_timeout);
/* store the claims payload in the id_token for later reference */
oidc_session_set(r, session, OIDC_IDTOKEN_CLAIMS_SESSION_KEY,
id_token_jwt->payload.value.str);
if (c->session_type != OIDC_SESSION_TYPE_CLIENT_COOKIE) {
/* store the compact serialized representation of the id_token for later reference */
oidc_session_set(r, session, OIDC_IDTOKEN_SESSION_KEY, id_token);
}
/* store the issuer in the session (at least needed for session mgmt and token refresh */
oidc_session_set(r, session, OIDC_ISSUER_SESSION_KEY, provider->issuer);
/* store the state and original URL in the session for handling browser-back more elegantly */
oidc_session_set(r, session, OIDC_REQUEST_STATE_SESSION_KEY, state);
oidc_session_set(r, session, OIDC_REQUEST_ORIGINAL_URL, original_url);
if ((session_state != NULL) && (provider->check_session_iframe != NULL)) {
/* store the session state and required parameters session management */
oidc_session_set(r, session, OIDC_SESSION_STATE_SESSION_KEY,
session_state);
oidc_session_set(r, session, OIDC_CHECK_IFRAME_SESSION_KEY,
provider->check_session_iframe);
oidc_session_set(r, session, OIDC_CLIENTID_SESSION_KEY,
provider->client_id);
oidc_debug(r,
"session management enabled: stored session_state (%s), check_session_iframe (%s) and client_id (%s) in the session",
session_state, provider->check_session_iframe,
provider->client_id);
} else {
oidc_debug(r,
"session management disabled: session_state (%s) and/or check_session_iframe (%s) is not provided",
session_state, provider->check_session_iframe);
}
if (provider->end_session_endpoint != NULL)
oidc_session_set(r, session, OIDC_LOGOUT_ENDPOINT_SESSION_KEY,
provider->end_session_endpoint);
/* store claims resolved from userinfo endpoint */
oidc_store_userinfo_claims(r, session, provider, claims);
/* see if we have an access_token */
if (access_token != NULL) {
/* store the access_token in the session context */
oidc_session_set(r, session, OIDC_ACCESSTOKEN_SESSION_KEY,
access_token);
/* store the associated expires_in value */
oidc_store_access_token_expiry(r, session, expires_in);
}
/* see if we have a refresh_token */
if (refresh_token != NULL) {
/* store the refresh_token in the session context */
oidc_session_set(r, session, OIDC_REFRESHTOKEN_SESSION_KEY,
refresh_token);
}
/* store max session duration in the session as a hard cut-off expiry timestamp */
apr_time_t session_expires =
(provider->session_max_duration == 0) ?
apr_time_from_sec(id_token_jwt->payload.exp) :
(apr_time_now()
+ apr_time_from_sec(provider->session_max_duration));
oidc_session_set(r, session, OIDC_SESSION_EXPIRES_SESSION_KEY,
apr_psprintf(r->pool, "%" APR_TIME_T_FMT, session_expires));
/* log message about max session duration */
oidc_log_session_expires(r, session_expires);
/* store the domain for which this session is valid */
oidc_session_set(r, session, OIDC_COOKIE_DOMAIN_SESSION_KEY,
c->cookie_domain ? c->cookie_domain : oidc_get_current_url_host(r));
/* store the session */
return oidc_session_save(r, session);
}
/*
* parse the expiry for the access token
*/
static int oidc_parse_expires_in(request_rec *r, const char *expires_in) {
if (expires_in != NULL) {
char *ptr = NULL;
long number = strtol(expires_in, &ptr, 10);
if (number <= 0) {
oidc_warn(r,
"could not convert \"expires_in\" value (%s) to a number",
expires_in);
return -1;
}
return number;
}
return -1;
}
/*
* handle the different flows (hybrid, implicit, Authorization Code)
*/
static apr_byte_t oidc_handle_flows(request_rec *r, oidc_cfg *c,
json_t *proto_state, oidc_provider_t *provider, apr_table_t *params,
const char *response_mode, oidc_jwt_t **jwt) {
apr_byte_t rc = FALSE;
const char *requested_response_type = json_string_value(
json_object_get(proto_state, "response_type"));
/* handle the requested response type/mode */
if (oidc_util_spaced_string_equals(r->pool, requested_response_type,
"code id_token token")) {
rc = oidc_proto_authorization_response_code_idtoken_token(r, c,
proto_state, provider, params, response_mode, jwt);
} else if (oidc_util_spaced_string_equals(r->pool, requested_response_type,
"code id_token")) {
rc = oidc_proto_authorization_response_code_idtoken(r, c, proto_state,
provider, params, response_mode, jwt);
} else if (oidc_util_spaced_string_equals(r->pool, requested_response_type,
"code token")) {
rc = oidc_proto_handle_authorization_response_code_token(r, c,
proto_state, provider, params, response_mode, jwt);
} else if (oidc_util_spaced_string_equals(r->pool, requested_response_type,
"code")) {
rc = oidc_proto_handle_authorization_response_code(r, c, proto_state,
provider, params, response_mode, jwt);
} else if (oidc_util_spaced_string_equals(r->pool, requested_response_type,
"id_token token")) {
rc = oidc_proto_handle_authorization_response_idtoken_token(r, c,
proto_state, provider, params, response_mode, jwt);
} else if (oidc_util_spaced_string_equals(r->pool, requested_response_type,
"id_token")) {
rc = oidc_proto_handle_authorization_response_idtoken(r, c, proto_state,
provider, params, response_mode, jwt);
} else {
oidc_error(r, "unsupported response type: \"%s\"",
requested_response_type);
}
if ((rc == FALSE) && (*jwt != NULL)) {
oidc_jwt_destroy(*jwt);
*jwt = NULL;
}
return rc;
}
/* handle the browser back on an authorization response */
static apr_byte_t oidc_handle_browser_back(request_rec *r, const char *r_state,
oidc_session_t *session) {
/* see if we have an existing session and browser-back was used */
const char *s_state = NULL, *o_url = NULL;
if (session->remote_user != NULL) {
oidc_session_get(r, session, OIDC_REQUEST_STATE_SESSION_KEY, &s_state);
oidc_session_get(r, session, OIDC_REQUEST_ORIGINAL_URL, &o_url);
if ((r_state != NULL) && (s_state != NULL)
&& (apr_strnatcmp(r_state, s_state) == 0)) {
/* log the browser back event detection */
oidc_warn(r,
"browser back detected, redirecting to original URL: %s",
o_url);
/* go back to the URL that he originally tried to access */
apr_table_add(r->headers_out, "Location", o_url);
return TRUE;
}
}
return FALSE;
}
/*
* complete the handling of an authorization response by obtaining, parsing and verifying the
* id_token and storing the authenticated user state in the session
*/
static int oidc_handle_authorization_response(request_rec *r, oidc_cfg *c,
oidc_session_t *session, apr_table_t *params, const char *response_mode) {
oidc_debug(r, "enter, response_mode=%s", response_mode);
oidc_provider_t *provider = NULL;
json_t *proto_state = NULL;
oidc_jwt_t *jwt = NULL;
/* see if this response came from a browser-back event */
if (oidc_handle_browser_back(r, apr_table_get(params, "state"),
session) == TRUE)
return HTTP_MOVED_TEMPORARILY;
/* match the returned state parameter against the state stored in the browser */
if (oidc_authorization_response_match_state(r, c,
apr_table_get(params, "state"), &provider, &proto_state) == FALSE) {
if (c->default_sso_url != NULL) {
oidc_warn(r,
"invalid authorization response state; a default SSO URL is set, sending the user there: %s",
c->default_sso_url);
apr_table_add(r->headers_out, "Location", c->default_sso_url);
return HTTP_MOVED_TEMPORARILY;
}
oidc_error(r,
"invalid authorization response state and no default SSO URL is set, sending an error...");
return HTTP_INTERNAL_SERVER_ERROR;
}
/* see if the response is an error response */
if (apr_table_get(params, "error") != NULL)
return oidc_authorization_response_error(r, c, proto_state,
apr_table_get(params, "error"),
apr_table_get(params, "error_description"));
/* handle the code, implicit or hybrid flow */
if (oidc_handle_flows(r, c, proto_state, provider, params, response_mode,
&jwt) == FALSE)
return oidc_authorization_response_error(r, c, proto_state,
"Error in handling response type.", NULL);
if (jwt == NULL) {
oidc_error(r, "no id_token was provided");
return oidc_authorization_response_error(r, c, proto_state,
"No id_token was provided.", NULL);
}
int expires_in = oidc_parse_expires_in(r,
apr_table_get(params, "expires_in"));
/*
* optionally resolve additional claims against the userinfo endpoint
* parsed claims are not actually used here but need to be parsed anyway for error checking purposes
*/
const char *claims = oidc_retrieve_claims_from_userinfo_endpoint(r, c,
provider, apr_table_get(params, "access_token"), NULL, jwt->payload.sub);
/* restore the original protected URL that the user was trying to access */
const char *original_url = apr_pstrdup(r->pool,
json_string_value(json_object_get(proto_state, "original_url")));
const char *original_method = apr_pstrdup(r->pool,
json_string_value(json_object_get(proto_state, "original_method")));
/* set the user */
if (oidc_get_remote_user(r, c, provider, jwt, &r->user, claims) == TRUE) {
/* session management: if the user in the new response is not equal to the old one, error out */
if ((json_object_get(proto_state, "prompt") != NULL)
&& (apr_strnatcmp(
json_string_value(
json_object_get(proto_state, "prompt")), "none")
== 0)) {
// TOOD: actually need to compare sub? (need to store it in the session separately then
//const char *sub = NULL;
//oidc_session_get(r, session, "sub", &sub);
//if (apr_strnatcmp(sub, jwt->payload.sub) != 0) {
if (apr_strnatcmp(session->remote_user, r->user) != 0) {
oidc_warn(r,
"user set from new id_token is different from current one");
oidc_jwt_destroy(jwt);
return oidc_authorization_response_error(r, c, proto_state,
"User changed!", NULL);
}
}
/* store resolved information in the session */
if (oidc_save_in_session(r, c, session, provider, r->user,
apr_table_get(params, "id_token"), jwt, claims,
apr_table_get(params, "access_token"), expires_in,
apr_table_get(params, "refresh_token"),
apr_table_get(params, "session_state"),
apr_table_get(params, "state"), original_url) == FALSE)
return HTTP_INTERNAL_SERVER_ERROR;
} else {
oidc_error(r, "remote user could not be set");
return oidc_authorization_response_error(r, c, proto_state,
"Remote user could not be set: contact the website administrator",
NULL);
}
/* cleanup */
json_decref(proto_state);
oidc_jwt_destroy(jwt);
/* check that we've actually authenticated a user; functions as error handling for oidc_get_remote_user */
if (r->user == NULL)
return HTTP_UNAUTHORIZED;
/* log the successful response */
oidc_debug(r,
"session created and stored, returning to original URL: %s, original method: %s",
original_url, original_method);
/* check whether form post data was preserved; if so restore it */
if (apr_strnatcmp(original_method, OIDC_METHOD_FORM_POST) == 0) {
return oidc_request_post_preserved_restore(r, original_url);
}
/* now we've authenticated the user so go back to the URL that he originally tried to access */
apr_table_add(r->headers_out, "Location", original_url);
/* do the actual redirect to the original URL */
return HTTP_MOVED_TEMPORARILY;
}
/*
* handle an OpenID Connect Authorization Response using the POST (+fragment->POST) response_mode
*/
static int oidc_handle_post_authorization_response(request_rec *r, oidc_cfg *c,
oidc_session_t *session) {
oidc_debug(r, "enter");
/* initialize local variables */
char *response_mode = NULL;
/* read the parameters that are POST-ed to us */
apr_table_t *params = apr_table_make(r->pool, 8);
if (oidc_util_read_post_params(r, params) == FALSE) {
oidc_error(r, "something went wrong when reading the POST parameters");
return HTTP_INTERNAL_SERVER_ERROR;
}
/* see if we've got any POST-ed data at all */
if ((apr_table_elts(params)->nelts < 1)
|| ((apr_table_elts(params)->nelts == 1)
&& apr_table_get(params, "response_mode")
&& (apr_strnatcmp(apr_table_get(params, "response_mode"),
"fragment") == 0))) {
return oidc_util_html_send_error(r, c->error_template,
"Invalid Request",
"You've hit an OpenID Connect Redirect URI with no parameters, this is an invalid request; you should not open this URL in your browser directly, or have the server administrator use a different OIDCRedirectURI setting.",
HTTP_INTERNAL_SERVER_ERROR);
}
/* get the parameters */
response_mode = (char *) apr_table_get(params, "response_mode");
/* do the actual implicit work */
return oidc_handle_authorization_response(r, c, session, params,
response_mode ? response_mode : "form_post");
}
/*
* handle an OpenID Connect Authorization Response using the redirect response_mode
*/
static int oidc_handle_redirect_authorization_response(request_rec *r,
oidc_cfg *c, oidc_session_t *session) {
oidc_debug(r, "enter");
/* read the parameters from the query string */
apr_table_t *params = apr_table_make(r->pool, 8);
oidc_util_read_form_encoded_params(r, params, r->args);
/* do the actual work */
return oidc_handle_authorization_response(r, c, session, params, "query");
}
/*
* present the user with an OP selection screen
*/
static int oidc_discovery(request_rec *r, oidc_cfg *cfg) {
oidc_debug(r, "enter");
/* obtain the URL we're currently accessing, to be stored in the state/session */
char *current_url = oidc_get_current_url(r);
const char *method = oidc_original_request_method(r, cfg, FALSE);
/* generate CSRF token */
char *csrf = NULL;
if (oidc_proto_generate_nonce(r, &csrf, 8) == FALSE)
return HTTP_INTERNAL_SERVER_ERROR;
char *discover_url = oidc_cfg_dir_discover_url(r);
/* see if there's an external discovery page configured */
if (discover_url != NULL) {
/* yes, assemble the parameters for external discovery */
char *url = apr_psprintf(r->pool, "%s%s%s=%s&%s=%s&%s=%s&%s=%s",
discover_url, strchr(discover_url, '?') != NULL ? "&" : "?",
OIDC_DISC_RT_PARAM, oidc_util_escape_string(r, current_url),
OIDC_DISC_RM_PARAM, method,
OIDC_DISC_CB_PARAM,
oidc_util_escape_string(r, cfg->redirect_uri),
OIDC_CSRF_NAME, oidc_util_escape_string(r, csrf));
/* log what we're about to do */
oidc_debug(r, "redirecting to external discovery page: %s", url);
/* set CSRF cookie */
oidc_util_set_cookie(r, OIDC_CSRF_NAME, csrf, -1);
/* see if we need to preserve POST parameters through Javascript/HTML5 storage */
if (oidc_post_preserve_javascript(r, url, NULL, NULL) == TRUE)
return DONE;
/* do the actual redirect to an external discovery page */
apr_table_add(r->headers_out, "Location", url);
return HTTP_MOVED_TEMPORARILY;
}
/* get a list of all providers configured in the metadata directory */
apr_array_header_t *arr = NULL;
if (oidc_metadata_list(r, cfg, &arr) == FALSE)
return oidc_util_html_send_error(r, cfg->error_template,
"Configuration Error",
"No configured providers found, contact your administrator",
HTTP_UNAUTHORIZED);
/* assemble a where-are-you-from IDP discovery HTML page */
const char *s = " <h3>Select your OpenID Connect Identity Provider</h3>\n";
/* list all configured providers in there */
int i;
for (i = 0; i < arr->nelts; i++) {
const char *issuer = ((const char**) arr->elts)[i];
// TODO: html escape (especially & character)
char *display =
(strstr(issuer, "https://") == NULL) ?
apr_pstrdup(r->pool, issuer) :
apr_pstrdup(r->pool, issuer + strlen("https://"));
/* strip port number */
//char *p = strstr(display, ":");
//if (p != NULL) *p = '\0';
/* point back to the redirect_uri, where the selection is handled, with an IDP selection and return_to URL */
s =
apr_psprintf(r->pool,
"%s<p><a href=\"%s?%s=%s&%s=%s&%s=%s&%s=%s\">%s</a></p>\n",
s, cfg->redirect_uri, OIDC_DISC_OP_PARAM,
oidc_util_escape_string(r, issuer),
OIDC_DISC_RT_PARAM,
oidc_util_escape_string(r, current_url),
OIDC_DISC_RM_PARAM, method,
OIDC_CSRF_NAME, csrf, display);
}
/* add an option to enter an account or issuer name for dynamic OP discovery */
s = apr_psprintf(r->pool, "%s<form method=\"get\" action=\"%s\">\n", s,
cfg->redirect_uri);
s = apr_psprintf(r->pool,
"%s<p><input type=\"hidden\" name=\"%s\" value=\"%s\"><p>\n", s,
OIDC_DISC_RT_PARAM, current_url);
s = apr_psprintf(r->pool,
"%s<p><input type=\"hidden\" name=\"%s\" value=\"%s\"><p>\n", s,
OIDC_DISC_RM_PARAM, method);
s = apr_psprintf(r->pool,
"%s<p><input type=\"hidden\" name=\"%s\" value=\"%s\"><p>\n", s,
OIDC_CSRF_NAME, csrf);
s =
apr_psprintf(r->pool,
"%s<p>Or enter your account name (eg. "mike@seed.gluu.org", or an IDP identifier (eg. "mitreid.org"):</p>\n",
s);
s = apr_psprintf(r->pool,
"%s<p><input type=\"text\" name=\"%s\" value=\"%s\"></p>\n", s,
OIDC_DISC_OP_PARAM, "");
s = apr_psprintf(r->pool,
"%s<p><input type=\"submit\" value=\"Submit\"></p>\n", s);
s = apr_psprintf(r->pool, "%s</form>\n", s);
oidc_util_set_cookie(r, OIDC_CSRF_NAME, csrf, -1);
char *javascript = NULL, *javascript_method = NULL;
char *html_head =
"<style type=\"text/css\">body {text-align: center}</style>";
if (oidc_post_preserve_javascript(r, NULL, &javascript,
&javascript_method) == TRUE)
html_head = apr_psprintf(r->pool, "%s%s", html_head, javascript);
/* now send the HTML contents to the user agent */
return oidc_util_html_send(r, "OpenID Connect Provider Discovery",
html_head, javascript_method, s, DONE);
}
/*
* authenticate the user to the selected OP, if the OP is not selected yet perform discovery first
*/
static int oidc_authenticate_user(request_rec *r, oidc_cfg *c,
oidc_provider_t *provider, const char *original_url,
const char *login_hint, const char *id_token_hint, const char *prompt,
const char *auth_request_params) {
oidc_debug(r, "enter");
if (provider == NULL) {
// TODO: should we use an explicit redirect to the discovery endpoint (maybe a "discovery" param to the redirect_uri)?
if (c->metadata_dir != NULL)
return oidc_discovery(r, c);
/* we're not using multiple OP's configured in a metadata directory, pick the statically configured OP */
if (oidc_provider_static_config(r, c, &provider) == FALSE)
return HTTP_INTERNAL_SERVER_ERROR;
}
/* generate the random nonce value that correlates requests and responses */
char *nonce = NULL;
if (oidc_proto_generate_nonce(r, &nonce, OIDC_PROTO_NONCE_LENGTH) == FALSE)
return HTTP_INTERNAL_SERVER_ERROR;
char *code_verifier = NULL;
char *code_challenge = NULL;
if ((oidc_util_spaced_string_contains(r->pool, provider->response_type,
"code") == TRUE) && (provider->pkce_method != NULL)) {
/* generate the code verifier value that correlates authorization requests and code exchange requests */
if (oidc_proto_generate_code_verifier(r, &code_verifier,
OIDC_PROTO_CODE_VERIFIER_LENGTH) == FALSE)
return HTTP_INTERNAL_SERVER_ERROR;
/* generate the PKCE code challenge */
if (oidc_proto_generate_code_challenge(r, code_verifier,
&code_challenge, provider->pkce_method) == FALSE)
return HTTP_INTERNAL_SERVER_ERROR;
}
/* create the state between request/response */
json_t *proto_state = json_object();
json_object_set_new(proto_state, "original_url", json_string(original_url));
json_object_set_new(proto_state, "original_method",
json_string(oidc_original_request_method(r, c, TRUE)));
json_object_set_new(proto_state, "issuer", json_string(provider->issuer));
json_object_set_new(proto_state, "response_type",
json_string(provider->response_type));
json_object_set_new(proto_state, "nonce", json_string(nonce));
json_object_set_new(proto_state, "timestamp",
json_integer(apr_time_sec(apr_time_now())));
if (provider->response_mode)
json_object_set_new(proto_state, "response_mode",
json_string(provider->response_mode));
if (prompt)
json_object_set_new(proto_state, "prompt", json_string(prompt));
if (code_verifier)
json_object_set_new(proto_state, "code_verifier",
json_string(code_verifier));
/* get a hash value that fingerprints the browser concatenated with the random input */
char *state = oidc_get_browser_state_hash(r, nonce);
/* create state that restores the context when the authorization response comes in; cryptographically bind it to the browser */
if (oidc_authorization_request_set_cookie(r, c, state, proto_state) == FALSE)
return HTTP_INTERNAL_SERVER_ERROR;
/*
* printout errors if Cookie settings are not going to work
*/
apr_uri_t o_uri;
memset(&o_uri, 0, sizeof(apr_uri_t));
apr_uri_t r_uri;
memset(&r_uri, 0, sizeof(apr_uri_t));
apr_uri_parse(r->pool, original_url, &o_uri);
apr_uri_parse(r->pool, c->redirect_uri, &r_uri);
if ((apr_strnatcmp(o_uri.scheme, r_uri.scheme) != 0)
&& (apr_strnatcmp(r_uri.scheme, "https") == 0)) {
oidc_error(r,
"the URL scheme (%s) of the configured OIDCRedirectURI does not match the URL scheme of the URL being accessed (%s): the \"state\" and \"session\" cookies will not be shared between the two!",
r_uri.scheme, o_uri.scheme);
return HTTP_INTERNAL_SERVER_ERROR;
}
if (c->cookie_domain == NULL) {
if (apr_strnatcmp(o_uri.hostname, r_uri.hostname) != 0) {
char *p = strstr(o_uri.hostname, r_uri.hostname);
if ((p == NULL) || (apr_strnatcmp(r_uri.hostname, p) != 0)) {
oidc_error(r,
"the URL hostname (%s) of the configured OIDCRedirectURI does not match the URL hostname of the URL being accessed (%s): the \"state\" and \"session\" cookies will not be shared between the two!",
r_uri.hostname, o_uri.hostname);
return HTTP_INTERNAL_SERVER_ERROR;
}
}
} else {
if (!oidc_util_cookie_domain_valid(r_uri.hostname, c->cookie_domain)) {
oidc_error(r,
"the domain (%s) configured in OIDCCookieDomain does not match the URL hostname (%s) of the URL being accessed (%s): setting \"state\" and \"session\" cookies will not work!!",
c->cookie_domain, o_uri.hostname, original_url);
return HTTP_INTERNAL_SERVER_ERROR;
}
}
/* send off to the OpenID Connect Provider */
// TODO: maybe show intermediate/progress screen "redirecting to"
return oidc_proto_authorization_request(r, provider, login_hint,
c->redirect_uri, state, proto_state, id_token_hint, code_challenge,
auth_request_params);
}
/*
* check if the target_link_uri matches to configuration settings to prevent an open redirect
*/
static int oidc_target_link_uri_matches_configuration(request_rec *r,
oidc_cfg *cfg, const char *target_link_uri) {
apr_uri_t o_uri;
apr_uri_parse(r->pool, target_link_uri, &o_uri);
if (o_uri.hostname == NULL) {
oidc_error(r,
"could not parse the \"target_link_uri\" (%s) in to a valid URL: aborting.",
target_link_uri);
return FALSE;
}
apr_uri_t r_uri;
apr_uri_parse(r->pool, cfg->redirect_uri, &r_uri);
if (cfg->cookie_domain == NULL) {
/* cookie_domain set: see if the target_link_uri matches the redirect_uri host (because the session cookie will be set host-wide) */
if (apr_strnatcmp(o_uri.hostname, r_uri.hostname) != 0) {
char *p = strstr(o_uri.hostname, r_uri.hostname);
if ((p == NULL) || (apr_strnatcmp(r_uri.hostname, p) != 0)) {
oidc_error(r,
"the URL hostname (%s) of the configured OIDCRedirectURI does not match the URL hostname of the \"target_link_uri\" (%s): aborting to prevent an open redirect.",
r_uri.hostname, o_uri.hostname);
return FALSE;
}
}
} else {
/* cookie_domain set: see if the target_link_uri is within the cookie_domain */
char *p = strstr(o_uri.hostname, cfg->cookie_domain);
if ((p == NULL) || (apr_strnatcmp(cfg->cookie_domain, p) != 0)) {
oidc_error(r,
"the domain (%s) configured in OIDCCookieDomain does not match the URL hostname (%s) of the \"target_link_uri\" (%s): aborting to prevent an open redirect.",
cfg->cookie_domain, o_uri.hostname, target_link_uri);
return FALSE;
}
}
/* see if the cookie_path setting matches the target_link_uri path */
char *cookie_path = oidc_cfg_dir_cookie_path(r);
if (cookie_path != NULL) {
char *p = (o_uri.path != NULL) ? strstr(o_uri.path, cookie_path) : NULL;
if ((p == NULL) || (p != o_uri.path)) {
oidc_error(r,
"the path (%s) configured in OIDCCookiePath does not match the URL path (%s) of the \"target_link_uri\" (%s): aborting to prevent an open redirect.",
cfg->cookie_domain, o_uri.path, target_link_uri);
return FALSE;
} else if (strlen(o_uri.path) > strlen(cookie_path)) {
int n = strlen(cookie_path);
if (cookie_path[n - 1] == '/')
n--;
if (o_uri.path[n] != '/') {
oidc_error(r,
"the path (%s) configured in OIDCCookiePath does not match the URL path (%s) of the \"target_link_uri\" (%s): aborting to prevent an open redirect.",
cfg->cookie_domain, o_uri.path, target_link_uri);
return FALSE;
}
}
}
return TRUE;
}
/*
* handle a response from an IDP discovery page and/or handle 3rd-party initiated SSO
*/
static int oidc_handle_discovery_response(request_rec *r, oidc_cfg *c) {
/* variables to hold the values returned in the response */
char *issuer = NULL, *target_link_uri = NULL, *login_hint = NULL,
*auth_request_params = NULL, *csrf_cookie, *csrf_query = NULL,
*user = NULL;
oidc_provider_t *provider = NULL;
oidc_util_get_request_parameter(r, OIDC_DISC_OP_PARAM, &issuer);
oidc_util_get_request_parameter(r, OIDC_DISC_USER_PARAM, &user);
oidc_util_get_request_parameter(r, OIDC_DISC_RT_PARAM, &target_link_uri);
oidc_util_get_request_parameter(r, OIDC_DISC_LH_PARAM, &login_hint);
oidc_util_get_request_parameter(r, OIDC_DISC_AR_PARAM,
&auth_request_params);
oidc_util_get_request_parameter(r, OIDC_CSRF_NAME, &csrf_query);
csrf_cookie = oidc_util_get_cookie(r, OIDC_CSRF_NAME);
/* do CSRF protection if not 3rd party initiated SSO */
if (csrf_cookie) {
/* clean CSRF cookie */
oidc_util_set_cookie(r, OIDC_CSRF_NAME, "", 0);
/* compare CSRF cookie value with query parameter value */
if ((csrf_query == NULL)
|| apr_strnatcmp(csrf_query, csrf_cookie) != 0) {
oidc_warn(r,
"CSRF protection failed, no Discovery and dynamic client registration will be allowed");
csrf_cookie = NULL;
}
}
// TODO: trim issuer/accountname/domain input and do more input validation
oidc_debug(r,
"issuer=\"%s\", target_link_uri=\"%s\", login_hint=\"%s\", user=\"%s\"",
issuer, target_link_uri, login_hint, user);
if (target_link_uri == NULL) {
if (c->default_sso_url == NULL) {
return oidc_util_html_send_error(r, c->error_template,
"Invalid Request",
"SSO to this module without specifying a \"target_link_uri\" parameter is not possible because OIDCDefaultURL is not set.",
HTTP_INTERNAL_SERVER_ERROR);
}
target_link_uri = c->default_sso_url;
}
/* do open redirect prevention */
if (oidc_target_link_uri_matches_configuration(r, c,
target_link_uri) == FALSE) {
return oidc_util_html_send_error(r, c->error_template,
"Invalid Request",
"\"target_link_uri\" parameter does not match configuration settings, aborting to prevent an open redirect.",
HTTP_UNAUTHORIZED);
}
/* find out if the user entered an account name or selected an OP manually */
if (user != NULL) {
if (login_hint == NULL)
login_hint = apr_pstrdup(r->pool, user);
/* normalize the user identifier */
if (strstr(user, "https://") != user)
user = apr_psprintf(r->pool, "https://%s", user);
/* got an user identifier as input, perform OP discovery with that */
if (oidc_proto_url_based_discovery(r, c, user, &issuer) == FALSE) {
/* something did not work out, show a user facing error */
return oidc_util_html_send_error(r, c->error_template,
"Invalid Request",
"Could not resolve the provided user identifier to an OpenID Connect provider; check your syntax.",
HTTP_NOT_FOUND);
}
/* issuer is set now, so let's continue as planned */
} else if (strstr(issuer, "@") != NULL) {
if (login_hint == NULL) {
login_hint = apr_pstrdup(r->pool, issuer);
//char *p = strstr(issuer, "@");
//*p = '\0';
}
/* got an account name as input, perform OP discovery with that */
if (oidc_proto_account_based_discovery(r, c, issuer, &issuer) == FALSE) {
/* something did not work out, show a user facing error */
return oidc_util_html_send_error(r, c->error_template,
"Invalid Request",
"Could not resolve the provided account name to an OpenID Connect provider; check your syntax.",
HTTP_NOT_FOUND);
}
/* issuer is set now, so let's continue as planned */
}
/* strip trailing '/' */
int n = strlen(issuer);
if (issuer[n - 1] == '/')
issuer[n - 1] = '\0';
/* try and get metadata from the metadata directories for the selected OP */
if ((oidc_metadata_get(r, c, issuer, &provider, csrf_cookie != NULL) == TRUE)
&& (provider != NULL)) {
/* now we've got a selected OP, send the user there to authenticate */
return oidc_authenticate_user(r, c, provider, target_link_uri,
login_hint, NULL, NULL, auth_request_params);
}
/* something went wrong */
return oidc_util_html_send_error(r, c->error_template, "Invalid Request",
"Could not find valid provider metadata for the selected OpenID Connect provider; contact the administrator",
HTTP_NOT_FOUND);
}
static apr_uint32_t oidc_transparent_pixel[17] = { 0x474e5089, 0x0a1a0a0d,
0x0d000000, 0x52444849, 0x01000000, 0x01000000, 0x00000408, 0x0c1cb500,
0x00000002, 0x4144490b, 0x639c7854, 0x0000cffa, 0x02010702, 0x71311c9a,
0x00000000, 0x444e4549, 0x826042ae };
static apr_byte_t oidc_is_front_channel_logout(const char *logout_param_value) {
return ((logout_param_value != NULL)
&& ((apr_strnatcmp(logout_param_value,
OIDC_GET_STYLE_LOGOUT_PARAM_VALUE) == 0)
|| (apr_strnatcmp(logout_param_value,
OIDC_IMG_STYLE_LOGOUT_PARAM_VALUE) == 0)));
}
/*
* handle a local logout
*/
static int oidc_handle_logout_request(request_rec *r, oidc_cfg *c,
oidc_session_t *session, const char *url) {
oidc_debug(r, "enter (url=%s)", url);
/* if there's no remote_user then there's no (stored) session to kill */
if (session->remote_user != NULL) {
/* remove session state (cq. cache entry and cookie) */
oidc_session_kill(r, session);
}
/* see if this is the OP calling us */
if (oidc_is_front_channel_logout(url)) {
/* set recommended cache control headers */
apr_table_add(r->err_headers_out, "Cache-Control",
"no-cache, no-store");
apr_table_add(r->err_headers_out, "Pragma", "no-cache");
apr_table_add(r->err_headers_out, "P3P", "CAO PSA OUR");
apr_table_add(r->err_headers_out, "Expires", "0");
apr_table_add(r->err_headers_out, "X-Frame-Options", "DENY");
/* see if this is PF-PA style logout in which case we return a transparent pixel */
const char *accept = apr_table_get(r->headers_in, "Accept");
if ((apr_strnatcmp(url, OIDC_IMG_STYLE_LOGOUT_PARAM_VALUE) == 0)
|| ((accept) && strstr(accept, "image/png"))) {
return oidc_util_http_send(r,
(const char *) &oidc_transparent_pixel,
sizeof(oidc_transparent_pixel), "image/png", DONE);
}
/* standard HTTP based logout: should be called in an iframe from the OP */
return oidc_util_html_send(r, "Logged Out", NULL, NULL,
"<p>Logged Out</p>", DONE);
}
/* see if we don't need to go somewhere special after killing the session locally */
if (url == NULL)
return oidc_util_html_send(r, "Logged Out", NULL, NULL,
"<p>Logged Out</p>", DONE);
/* send the user to the specified where-to-go-after-logout URL */
apr_table_add(r->headers_out, "Location", url);
return HTTP_MOVED_TEMPORARILY;
}
/*
* perform (single) logout
*/
static int oidc_handle_logout(request_rec *r, oidc_cfg *c,
oidc_session_t *session) {
/* pickup the command or URL where the user wants to go after logout */
char *url = NULL;
oidc_util_get_request_parameter(r, "logout", &url);
oidc_debug(r, "enter (url=%s)", url);
if (oidc_is_front_channel_logout(url)) {
return oidc_handle_logout_request(r, c, session, url);
}
if ((url == NULL) || (apr_strnatcmp(url, "") == 0)) {
url = c->default_slo_url;
} else {
/* do input validation on the logout parameter value */
const char *error_description = NULL;
apr_uri_t uri;
if (apr_uri_parse(r->pool, url, &uri) != APR_SUCCESS) {
const char *error_description = apr_psprintf(r->pool,
"Logout URL malformed: %s", url);
oidc_error(r, "%s", error_description);
return oidc_util_html_send_error(r, c->error_template,
"Malformed URL", error_description,
HTTP_INTERNAL_SERVER_ERROR);
}
if ((strstr(r->hostname, uri.hostname) == NULL)
|| (strstr(uri.hostname, r->hostname) == NULL)) {
error_description =
apr_psprintf(r->pool,
"logout value \"%s\" does not match the hostname of the current request \"%s\"",
apr_uri_unparse(r->pool, &uri, 0), r->hostname);
oidc_error(r, "%s", error_description);
return oidc_util_html_send_error(r, c->error_template,
"Invalid Request", error_description,
HTTP_INTERNAL_SERVER_ERROR);
}
/* validate the URL to prevent HTTP header splitting */
if (((strstr(url, "\n") != NULL) || strstr(url, "\r") != NULL)) {
error_description =
apr_psprintf(r->pool,
"logout value \"%s\" contains illegal \"\n\" or \"\r\" character(s)",
url);
oidc_error(r, "%s", error_description);
return oidc_util_html_send_error(r, c->error_template,
"Invalid Request", error_description,
HTTP_INTERNAL_SERVER_ERROR);
}
}
const char *end_session_endpoint = NULL;
oidc_session_get(r, session, OIDC_LOGOUT_ENDPOINT_SESSION_KEY,
&end_session_endpoint);
if (end_session_endpoint != NULL) {
const char *id_token_hint = NULL;
oidc_session_get(r, session, OIDC_IDTOKEN_SESSION_KEY, &id_token_hint);
char *logout_request = apr_pstrdup(r->pool, end_session_endpoint);
if (id_token_hint != NULL) {
logout_request = apr_psprintf(r->pool, "%s%sid_token_hint=%s",
logout_request,
strchr(logout_request, '?') != NULL ? "&" : "?",
oidc_util_escape_string(r, id_token_hint));
}
if (url != NULL) {
logout_request = apr_psprintf(r->pool,
"%s%spost_logout_redirect_uri=%s", logout_request,
strchr(logout_request, '?') != NULL ? "&" : "?",
oidc_util_escape_string(r, url));
}
url = logout_request;
}
return oidc_handle_logout_request(r, c, session, url);
}
/*
* handle request for JWKs
*/
int oidc_handle_jwks(request_rec *r, oidc_cfg *c) {
/* pickup requested JWKs type */
// char *jwks_type = NULL;
// oidc_util_get_request_parameter(r, "jwks", &jwks_type);
char *jwks = apr_pstrdup(r->pool, "{ \"keys\" : [");
apr_hash_index_t *hi = NULL;
apr_byte_t first = TRUE;
oidc_jose_error_t err;
if (c->public_keys != NULL) {
/* loop over the RSA public keys */
for (hi = apr_hash_first(r->pool, c->public_keys); hi; hi =
apr_hash_next(hi)) {
const char *s_kid = NULL;
oidc_jwk_t *jwk = NULL;
char *s_json = NULL;
apr_hash_this(hi, (const void**) &s_kid, NULL, (void**) &jwk);
if (oidc_jwk_to_json(r->pool, jwk, &s_json, &err) == TRUE) {
jwks = apr_psprintf(r->pool, "%s%s %s ", jwks, first ? "" : ",",
s_json);
first = FALSE;
} else {
oidc_error(r,
"could not convert RSA JWK to JSON using oidc_jwk_to_json: %s",
oidc_jose_e2s(r->pool, err));
}
}
}
// TODO: send stuff if first == FALSE?
jwks = apr_psprintf(r->pool, "%s ] }", jwks);
return oidc_util_http_send(r, jwks, strlen(jwks), "application/json", DONE);
}
static int oidc_handle_session_management_iframe_op(request_rec *r, oidc_cfg *c,
oidc_session_t *session, const char *check_session_iframe) {
oidc_debug(r, "enter");
apr_table_add(r->headers_out, "Location", check_session_iframe);
return HTTP_MOVED_TEMPORARILY;
}
static int oidc_handle_session_management_iframe_rp(request_rec *r, oidc_cfg *c,
oidc_session_t *session, const char *client_id,
const char *check_session_iframe) {
oidc_debug(r, "enter");
const char *java_script =
" <script type=\"text/javascript\">\n"
" var targetOrigin = '%s';\n"
" var message = '%s' + ' ' + '%s';\n"
" var timerID;\n"
"\n"
" function checkSession() {\n"
" console.log('checkSession: posting ' + message + ' to ' + targetOrigin);\n"
" var win = window.parent.document.getElementById('%s').contentWindow;\n"
" win.postMessage( message, targetOrigin);\n"
" }\n"
"\n"
" function setTimer() {\n"
" checkSession();\n"
" timerID = setInterval('checkSession()', %s);\n"
" }\n"
"\n"
" function receiveMessage(e) {\n"
" console.log('receiveMessage: ' + e.data + ' from ' + e.origin);\n"
" if (e.origin !== targetOrigin ) {\n"
" console.log('receiveMessage: cross-site scripting attack?');\n"
" return;\n"
" }\n"
" if (e.data != 'unchanged') {\n"
" clearInterval(timerID);\n"
" if (e.data == 'changed') {\n"
" window.location.href = '%s?session=check';\n"
" } else {\n"
" window.location.href = '%s?session=logout';\n"
" }\n"
" }\n"
" }\n"
"\n"
" window.addEventListener('message', receiveMessage, false);\n"
"\n"
" </script>\n";
/* determine the origin for the check_session_iframe endpoint */
char *origin = apr_pstrdup(r->pool, check_session_iframe);
apr_uri_t uri;
apr_uri_parse(r->pool, check_session_iframe, &uri);
char *p = strstr(origin, uri.path);
*p = '\0';
/* the element identifier for the OP iframe */
const char *op_iframe_id = "openidc-op";
/* restore the OP session_state from the session */
const char *session_state = NULL;
oidc_session_get(r, session, OIDC_SESSION_STATE_SESSION_KEY,
&session_state);
if (session_state == NULL) {
oidc_warn(r,
"no session_state found in the session; the OP does probably not support session management!?");
return DONE;
}
char *s_poll_interval = NULL;
oidc_util_get_request_parameter(r, "poll", &s_poll_interval);
if (s_poll_interval == NULL)
s_poll_interval = "3000";
java_script = apr_psprintf(r->pool, java_script, origin, client_id,
session_state, op_iframe_id, s_poll_interval, c->redirect_uri,
c->redirect_uri);
return oidc_util_html_send(r, NULL, java_script, "setTimer", NULL, DONE);
}
/*
* handle session management request
*/
static int oidc_handle_session_management(request_rec *r, oidc_cfg *c,
oidc_session_t *session) {
char *cmd = NULL;
const char *id_token_hint = NULL, *client_id = NULL, *check_session_iframe =
NULL;
oidc_provider_t *provider = NULL;
/* get the command passed to the session management handler */
oidc_util_get_request_parameter(r, "session", &cmd);
if (cmd == NULL) {
oidc_error(r, "session management handler called with no command");
return HTTP_INTERNAL_SERVER_ERROR;
}
/* see if this is a local logout during session management */
if (apr_strnatcmp("logout", cmd) == 0) {
oidc_debug(r,
"[session=logout] calling oidc_handle_logout_request because of session mgmt local logout call.");
return oidc_handle_logout_request(r, c, session, c->default_slo_url);
}
/* see if this is a request for the OP iframe */
if (apr_strnatcmp("iframe_op", cmd) == 0) {
oidc_session_get(r, session, OIDC_CHECK_IFRAME_SESSION_KEY,
&check_session_iframe);
if (check_session_iframe != NULL) {
return oidc_handle_session_management_iframe_op(r, c, session,
check_session_iframe);
}
return HTTP_NOT_FOUND;
}
/* see if this is a request for the RP iframe */
if (apr_strnatcmp("iframe_rp", cmd) == 0) {
oidc_session_get(r, session, OIDC_CLIENTID_SESSION_KEY, &client_id);
oidc_session_get(r, session, OIDC_CHECK_IFRAME_SESSION_KEY,
&check_session_iframe);
if ((client_id != NULL) && (check_session_iframe != NULL)) {
return oidc_handle_session_management_iframe_rp(r, c, session,
client_id, check_session_iframe);
}
oidc_debug(r,
"iframe_rp command issued but no client (%s) and/or no check_session_iframe (%s) set",
client_id, check_session_iframe);
return HTTP_NOT_FOUND;
}
/* see if this is a request check the login state with the OP */
if (apr_strnatcmp("check", cmd) == 0) {
oidc_session_get(r, session, OIDC_IDTOKEN_SESSION_KEY, &id_token_hint);
oidc_get_provider_from_session(r, c, session, &provider);
if ((session->remote_user != NULL) && (provider != NULL)) {
return oidc_authenticate_user(r, c, provider,
apr_psprintf(r->pool, "%s?session=iframe_rp",
c->redirect_uri), NULL, id_token_hint, "none", NULL);
}
oidc_debug(r,
"[session=check] calling oidc_handle_logout_request because no session found.");
return oidc_session_redirect_parent_window_to_logout(r, c);
}
/* handle failure in fallthrough */
oidc_error(r, "unknown command: %s", cmd);
return HTTP_INTERNAL_SERVER_ERROR;
}
/*
* handle refresh token request
*/
static int oidc_handle_refresh_token_request(request_rec *r, oidc_cfg *c,
oidc_session_t *session) {
char *return_to = NULL;
char *r_access_token = NULL;
char *error_code = NULL;
/* get the command passed to the session management handler */
oidc_util_get_request_parameter(r, "refresh", &return_to);
oidc_util_get_request_parameter(r, "access_token", &r_access_token);
/* check the input parameters */
if (return_to == NULL) {
oidc_error(r,
"refresh token request handler called with no URL to return to");
return HTTP_INTERNAL_SERVER_ERROR;
}
if (r_access_token == NULL) {
oidc_error(r,
"refresh token request handler called with no access_token parameter");
error_code = "no_access_token";
goto end;
}
char *s_access_token = NULL;
oidc_session_get(r, session, OIDC_ACCESSTOKEN_SESSION_KEY,
(const char **) &s_access_token);
if (s_access_token == NULL) {
oidc_error(r,
"no existing access_token found in the session, nothing to refresh");
error_code = "no_access_token_exists";
goto end;
}
/* compare the access_token parameter used for XSRF protection */
if (apr_strnatcmp(s_access_token, r_access_token) != 0) {
oidc_error(r,
"access_token passed in refresh request does not match the one stored in the session");
error_code = "no_access_token_match";
goto end;
}
/* get a handle to the provider configuration */
oidc_provider_t *provider = NULL;
if (oidc_get_provider_from_session(r, c, session, &provider) == FALSE) {
error_code = "session_corruption";
goto end;
}
/* execute the actual refresh grant */
if (oidc_refresh_access_token(r, c, session, provider, NULL) == FALSE) {
oidc_error(r, "access_token could not be refreshed");
error_code = "refresh_failed";
goto end;
}
/* store the session */
if (oidc_session_save(r, session) == FALSE) {
error_code = "session_corruption";
goto end;
}
end:
/* pass optional error message to the return URL */
if (error_code != NULL)
return_to = apr_psprintf(r->pool, "%s%serror_code=%s", return_to,
strchr(return_to, '?') ? "&" : "?",
oidc_util_escape_string(r, error_code));
/* add the redirect location header */
apr_table_add(r->headers_out, "Location", return_to);
return HTTP_MOVED_TEMPORARILY;
}
/*
* handle request object by reference request
*/
static int oidc_handle_request_uri(request_rec *r, oidc_cfg *c) {
char *request_ref = NULL;
oidc_util_get_request_parameter(r, "request_uri", &request_ref);
if (request_ref == NULL) {
oidc_error(r, "no \"request_uri\" parameter found");
return HTTP_BAD_REQUEST;
}
const char *jwt = NULL;
c->cache->get(r, OIDC_CACHE_SECTION_REQUEST_URI, request_ref, &jwt);
if (jwt == NULL) {
oidc_error(r, "no cached JWT found for request_uri reference: %s",
request_ref);
return HTTP_NOT_FOUND;
}
c->cache->set(r, OIDC_CACHE_SECTION_REQUEST_URI, request_ref, NULL, 0);
return oidc_util_http_send(r, jwt, strlen(jwt), " application/jwt", DONE);
}
/*
* handle a request to invalidate a cached access token introspection result
*/
static int oidc_handle_remove_at_cache(request_rec *r, oidc_cfg *c) {
char *access_token = NULL;
oidc_util_get_request_parameter(r, "remove_at_cache", &access_token);
const char *cache_entry = NULL;
c->cache->get(r, OIDC_CACHE_SECTION_ACCESS_TOKEN, access_token, &cache_entry);
if (cache_entry == NULL) {
oidc_error(r, "no cached access token found for value: %s", access_token);
return HTTP_NOT_FOUND;
}
c->cache->set(r, OIDC_CACHE_SECTION_ACCESS_TOKEN, access_token, NULL, 0);
return DONE;
}
/*
* handle all requests to the redirect_uri
*/
int oidc_handle_redirect_uri_request(request_rec *r, oidc_cfg *c,
oidc_session_t *session) {
if (oidc_proto_is_redirect_authorization_response(r, c)) {
/* this is an authorization response from the OP using the Basic Client profile or a Hybrid flow*/
return oidc_handle_redirect_authorization_response(r, c, session);
} else if (oidc_proto_is_post_authorization_response(r, c)) {
/* this is an authorization response using the fragment(+POST) response_mode with the Implicit Client profile */
return oidc_handle_post_authorization_response(r, c, session);
} else if (oidc_is_discovery_response(r, c)) {
/* this is response from the OP discovery page */
return oidc_handle_discovery_response(r, c);
} else if (oidc_util_request_has_parameter(r, "logout")) {
/* handle logout */
return oidc_handle_logout(r, c, session);
} else if (oidc_util_request_has_parameter(r, "jwks")) {
/* handle JWKs request */
return oidc_handle_jwks(r, c);
} else if (oidc_util_request_has_parameter(r, "session")) {
/* handle session management request */
return oidc_handle_session_management(r, c, session);
} else if (oidc_util_request_has_parameter(r, "refresh")) {
/* handle refresh token request */
return oidc_handle_refresh_token_request(r, c, session);
} else if (oidc_util_request_has_parameter(r, "request_uri")) {
/* handle request object by reference request */
return oidc_handle_request_uri(r, c);
} else if (oidc_util_request_has_parameter(r, "remove_at_cache")) {
/* handle request to invalidate access token cache */
return oidc_handle_remove_at_cache(r, c);
} else if ((r->args == NULL) || (apr_strnatcmp(r->args, "") == 0)) {
/* this is a "bare" request to the redirect URI, indicating implicit flow using the fragment response_mode */
return oidc_proto_javascript_implicit(r, c);
}
/* this is not an authorization response or logout request */
/* check for "error" response */
if (oidc_util_request_has_parameter(r, "error")) {
// char *error = NULL, *descr = NULL;
// oidc_util_get_request_parameter(r, "error", &error);
// oidc_util_get_request_parameter(r, "error_description", &descr);
//
// /* send user facing error to browser */
// return oidc_util_html_send_error(r, error, descr, DONE);
oidc_handle_redirect_authorization_response(r, c, session);
}
/* something went wrong */
return oidc_util_html_send_error(r, c->error_template, "Invalid Request",
apr_psprintf(r->pool,
"The OpenID Connect callback URL received an invalid request: %s",
r->args), HTTP_INTERNAL_SERVER_ERROR);
}
/*
* main routine: handle OpenID Connect authentication
*/
static int oidc_check_userid_openidc(request_rec *r, oidc_cfg *c) {
if (c->redirect_uri == NULL) {
oidc_error(r,
"configuration error: the authentication type is set to \"openid-connect\" but OIDCRedirectURI has not been set");
return HTTP_INTERNAL_SERVER_ERROR;
}
/* check if this is a sub-request or an initial request */
if (ap_is_initial_req(r)) {
int rc = OK;
/* load the session from the request state; this will be a new "empty" session if no state exists */
oidc_session_t *session = NULL;
oidc_session_load(r, &session);
/* see if the initial request is to the redirect URI; this handles potential logout too */
if (oidc_util_request_matches_url(r, c->redirect_uri)) {
/* handle request to the redirect_uri */
rc = oidc_handle_redirect_uri_request(r, c, session);
/* free resources allocated for the session */
oidc_session_free(r, session);
return rc;
/* initial request to non-redirect URI, check if we have an existing session */
} else if (session->remote_user != NULL) {
/* set the user in the main request for further (incl. sub-request) processing */
r->user = (char *) session->remote_user;
/* this is initial request and we already have a session */
rc = oidc_handle_existing_session(r, c, session);
/* free resources allocated for the session */
oidc_session_free(r, session);
/* strip any cookies that we need to */
oidc_strip_cookies(r);
return rc;
}
/* free resources allocated for the session */
oidc_session_free(r, session);
/*
* else: initial request, we have no session and it is not an authorization or
* discovery response: just hit the default flow for unauthenticated users
*/
} else {
/* not an initial request, try to recycle what we've already established in the main request */
if (r->main != NULL)
r->user = r->main->user;
else if (r->prev != NULL)
r->user = r->prev->user;
if (r->user != NULL) {
/* this is a sub-request and we have a session (headers will have been scrubbed and set already) */
oidc_debug(r,
"recycling user '%s' from initial request for sub-request",
r->user);
/*
* apparently request state can get lost in sub-requests, so let's see
* if we need to restore id_token and/or claims from the session cache
*/
const char *s_id_token = oidc_request_state_get(r,
OIDC_IDTOKEN_CLAIMS_SESSION_KEY);
if (s_id_token == NULL) {
oidc_session_t *session = NULL;
oidc_session_load(r, &session);
oidc_copy_tokens_to_request_state(r, session, NULL, NULL);
/* free resources allocated for the session */
oidc_session_free(r, session);
}
/* strip any cookies that we need to */
oidc_strip_cookies(r);
return OK;
}
/*
* else: not initial request, but we could not find a session, so:
* just hit the default flow for unauthenticated users
*/
}
/* find out which action we need to take when encountering an unauthenticated request */
switch (oidc_dir_cfg_unauth_action(r)) {
case OIDC_UNAUTH_RETURN410:
return HTTP_GONE;
case OIDC_UNAUTH_RETURN401:
return HTTP_UNAUTHORIZED;
case OIDC_UNAUTH_PASS:
r->user = "";
return OK;
case OIDC_UNAUTH_AUTHENTICATE:
/* if this is a Javascript path we won't redirect the user and create a state cookie */
if (apr_table_get(r->headers_in, "X-Requested-With") != NULL)
return HTTP_UNAUTHORIZED;
break;
}
/* else: no session (regardless of whether it is main or sub-request), go and authenticate the user */
return oidc_authenticate_user(r, c, NULL, oidc_get_current_url(r), NULL,
NULL, NULL, NULL);
}
/*
* generic Apache authentication hook for this module: dispatches to OpenID Connect or OAuth 2.0 specific routines
*/
int oidc_check_user_id(request_rec *r) {
oidc_cfg *c = ap_get_module_config(r->server->module_config,
&auth_openidc_module);
/* log some stuff about the incoming HTTP request */
oidc_debug(r, "incoming request: \"%s?%s\", ap_is_initial_req(r)=%d",
r->parsed_uri.path, r->args, ap_is_initial_req(r));
/* see if any authentication has been defined at all */
if (ap_auth_type(r) == NULL)
return DECLINED;
/* see if we've configured OpenID Connect user authentication for this request */
if (apr_strnatcasecmp((const char *) ap_auth_type(r), "openid-connect")
== 0)
return oidc_check_userid_openidc(r, c);
/* see if we've configured OAuth 2.0 access control for this request */
if (apr_strnatcasecmp((const char *) ap_auth_type(r), "oauth20") == 0)
return oidc_oauth_check_userid(r, c);
/* this is not for us but for some other handler */
return DECLINED;
}
/*
* get the claims and id_token from request state
*/
static void oidc_authz_get_claims_and_idtoken(request_rec *r, json_t **claims,
json_t **id_token) {
const char *s_claims = oidc_request_state_get(r, OIDC_CLAIMS_SESSION_KEY);
const char *s_id_token = oidc_request_state_get(r,
OIDC_IDTOKEN_CLAIMS_SESSION_KEY);
json_error_t json_error;
if (s_claims != NULL) {
*claims = json_loads(s_claims, 0, &json_error);
if (*claims == NULL) {
oidc_error(r, "could not restore claims from request state: %s",
json_error.text);
}
}
if (s_id_token != NULL) {
*id_token = json_loads(s_id_token, 0, &json_error);
if (*id_token == NULL) {
oidc_error(r, "could not restore id_token from request state: %s",
json_error.text);
}
}
}
#if MODULE_MAGIC_NUMBER_MAJOR >= 20100714
/*
* generic Apache >=2.4 authorization hook for this module
* handles both OpenID Connect or OAuth 2.0 in the same way, based on the claims stored in the session
*/
authz_status oidc_authz_checker(request_rec *r, const char *require_args, const void *parsed_require_args) {
/* check for anonymous access and PASS mode */
if (r->user != NULL && strlen(r->user) == 0) {
r->user = NULL;
if (oidc_dir_cfg_unauth_action(r) == OIDC_UNAUTH_PASS) return AUTHZ_GRANTED;
}
/* get the set of claims from the request state (they've been set in the authentication part earlier */
json_t *claims = NULL, *id_token = NULL;
oidc_authz_get_claims_and_idtoken(r, &claims, &id_token);
/* merge id_token claims (e.g. "iss") in to claims json object */
if (claims)
oidc_util_json_merge(id_token, claims);
/* dispatch to the >=2.4 specific authz routine */
authz_status rc = oidc_authz_worker24(r, claims ? claims : id_token, require_args);
/* cleanup */
if (claims) json_decref(claims);
if (id_token) json_decref(id_token);
if ((rc == AUTHZ_DENIED) && ap_auth_type(r)
&& (apr_strnatcasecmp((const char *) ap_auth_type(r), "oauth20")
== 0))
oidc_oauth_return_www_authenticate(r, "insufficient_scope", "Different scope(s) or other claims required");
return rc;
}
#else
/*
* generic Apache <2.4 authorization hook for this module
* handles both OpenID Connect and OAuth 2.0 in the same way, based on the claims stored in the request context
*/
int oidc_auth_checker(request_rec *r) {
/* check for anonymous access and PASS mode */
if (r->user != NULL && strlen(r->user) == 0) {
r->user = NULL;
if (oidc_dir_cfg_unauth_action(r) == OIDC_UNAUTH_PASS)
return OK;
}
/* get the set of claims from the request state (they've been set in the authentication part earlier */
json_t *claims = NULL, *id_token = NULL;
oidc_authz_get_claims_and_idtoken(r, &claims, &id_token);
/* get the Require statements */
const apr_array_header_t * const reqs_arr = ap_requires(r);
/* see if we have any */
const require_line * const reqs =
reqs_arr ? (require_line *) reqs_arr->elts : NULL;
if (!reqs_arr) {
oidc_debug(r,
"no require statements found, so declining to perform authorization.");
return DECLINED;
}
/* merge id_token claims (e.g. "iss") in to claims json object */
if (claims)
oidc_util_json_merge(id_token, claims);
/* dispatch to the <2.4 specific authz routine */
int rc = oidc_authz_worker(r, claims ? claims : id_token, reqs,
reqs_arr->nelts);
/* cleanup */
if (claims)
json_decref(claims);
if (id_token)
json_decref(id_token);
if ((rc == HTTP_UNAUTHORIZED) && ap_auth_type(r)
&& (apr_strnatcasecmp((const char *) ap_auth_type(r), "oauth20")
== 0))
oidc_oauth_return_www_authenticate(r, "insufficient_scope",
"Different scope(s) or other claims required");
return rc;
}
#endif
extern const command_rec oidc_config_cmds[];
module AP_MODULE_DECLARE_DATA auth_openidc_module = {
STANDARD20_MODULE_STUFF,
oidc_create_dir_config,
oidc_merge_dir_config,
oidc_create_server_config,
oidc_merge_server_config,
oidc_config_cmds,
oidc_register_hooks
};
| ./CrossVul/dataset_final_sorted/CWE-20/c/bad_3160_1 |
crossvul-cpp_data_bad_5411_0 | /* $Id$ */
#include <stdio.h>
#include <math.h>
#include <string.h>
#include <stdlib.h>
#include <stdarg.h>
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include "gd_intern.h"
/* 2.03: don't include zlib here or we can't build without PNG */
#include "gd.h"
#include "gdhelpers.h"
#include "gd_color.h"
#include "gd_errors.h"
/* 2.0.12: this now checks the clipping rectangle */
#define gdImageBoundsSafeMacro(im, x, y) (!((((y) < (im)->cy1) || ((y) > (im)->cy2)) || (((x) < (im)->cx1) || ((x) > (im)->cx2))))
#ifdef _OSD_POSIX /* BS2000 uses the EBCDIC char set instead of ASCII */
#define CHARSET_EBCDIC
#define __attribute__(any) /*nothing */
#endif
/*_OSD_POSIX*/
#ifndef CHARSET_EBCDIC
#define ASC(ch) ch
#else /*CHARSET_EBCDIC */
#define ASC(ch) gd_toascii[(unsigned char)ch]
static const unsigned char gd_toascii[256] = {
/*00 */ 0x00, 0x01, 0x02, 0x03, 0x85, 0x09, 0x86, 0x7f,
0x87, 0x8d, 0x8e, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /*................ */
/*10 */ 0x10, 0x11, 0x12, 0x13, 0x8f, 0x0a, 0x08, 0x97,
0x18, 0x19, 0x9c, 0x9d, 0x1c, 0x1d, 0x1e, 0x1f, /*................ */
/*20 */ 0x80, 0x81, 0x82, 0x83, 0x84, 0x92, 0x17, 0x1b,
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x05, 0x06, 0x07, /*................ */
/*30 */ 0x90, 0x91, 0x16, 0x93, 0x94, 0x95, 0x96, 0x04,
0x98, 0x99, 0x9a, 0x9b, 0x14, 0x15, 0x9e, 0x1a, /*................ */
/*40 */ 0x20, 0xa0, 0xe2, 0xe4, 0xe0, 0xe1, 0xe3, 0xe5,
0xe7, 0xf1, 0x60, 0x2e, 0x3c, 0x28, 0x2b, 0x7c, /* .........`.<(+| */
/*50 */ 0x26, 0xe9, 0xea, 0xeb, 0xe8, 0xed, 0xee, 0xef,
0xec, 0xdf, 0x21, 0x24, 0x2a, 0x29, 0x3b, 0x9f, /*&.........!$*);. */
/*60 */ 0x2d, 0x2f, 0xc2, 0xc4, 0xc0, 0xc1, 0xc3, 0xc5,
0xc7, 0xd1, 0x5e, 0x2c, 0x25, 0x5f, 0x3e, 0x3f,
/*-/........^,%_>?*/
/*70 */ 0xf8, 0xc9, 0xca, 0xcb, 0xc8, 0xcd, 0xce, 0xcf,
0xcc, 0xa8, 0x3a, 0x23, 0x40, 0x27, 0x3d, 0x22, /*..........:#@'=" */
/*80 */ 0xd8, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
0x68, 0x69, 0xab, 0xbb, 0xf0, 0xfd, 0xfe, 0xb1, /*.abcdefghi...... */
/*90 */ 0xb0, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70,
0x71, 0x72, 0xaa, 0xba, 0xe6, 0xb8, 0xc6, 0xa4, /*.jklmnopqr...... */
/*a0 */ 0xb5, 0xaf, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78,
0x79, 0x7a, 0xa1, 0xbf, 0xd0, 0xdd, 0xde, 0xae, /*..stuvwxyz...... */
/*b0 */ 0xa2, 0xa3, 0xa5, 0xb7, 0xa9, 0xa7, 0xb6, 0xbc,
0xbd, 0xbe, 0xac, 0x5b, 0x5c, 0x5d, 0xb4, 0xd7, /*...........[\].. */
/*c0 */ 0xf9, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
0x48, 0x49, 0xad, 0xf4, 0xf6, 0xf2, 0xf3, 0xf5, /*.ABCDEFGHI...... */
/*d0 */ 0xa6, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50,
0x51, 0x52, 0xb9, 0xfb, 0xfc, 0xdb, 0xfa, 0xff, /*.JKLMNOPQR...... */
/*e0 */ 0xd9, 0xf7, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58,
0x59, 0x5a, 0xb2, 0xd4, 0xd6, 0xd2, 0xd3, 0xd5, /*..STUVWXYZ...... */
/*f0 */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
0x38, 0x39, 0xb3, 0x7b, 0xdc, 0x7d, 0xda, 0x7e /*0123456789.{.}.~ */
};
#endif /*CHARSET_EBCDIC */
extern const int gdCosT[];
extern const int gdSinT[];
/**
* Group: Error Handling
*/
void gd_stderr_error(int priority, const char *format, va_list args)
{
switch (priority) {
case GD_ERROR:
fputs("GD Error: ", stderr);
break;
case GD_WARNING:
fputs("GD Warning: ", stderr);
break;
case GD_NOTICE:
fputs("GD Notice: ", stderr);
break;
case GD_INFO:
fputs("GD Info: ", stderr);
break;
case GD_DEBUG:
fputs("GD Debug: ", stderr);
break;
}
vfprintf(stderr, format, args);
fflush(stderr);
}
static gdErrorMethod gd_error_method = gd_stderr_error;
static void _gd_error_ex(int priority, const char *format, va_list args)
{
if (gd_error_method) {
gd_error_method(priority, format, args);
}
}
void gd_error(const char *format, ...)
{
va_list args;
va_start(args, format);
_gd_error_ex(GD_WARNING, format, args);
va_end(args);
}
void gd_error_ex(int priority, const char *format, ...)
{
va_list args;
va_start(args, format);
_gd_error_ex(priority, format, args);
va_end(args);
}
/*
Function: gdSetErrorMethod
*/
BGD_DECLARE(void) gdSetErrorMethod(gdErrorMethod error_method)
{
gd_error_method = error_method;
}
/*
Function: gdClearErrorMethod
*/
BGD_DECLARE(void) gdClearErrorMethod(void)
{
gd_error_method = gd_stderr_error;
}
static void gdImageBrushApply (gdImagePtr im, int x, int y);
static void gdImageTileApply (gdImagePtr im, int x, int y);
BGD_DECLARE(int) gdImageGetTrueColorPixel (gdImagePtr im, int x, int y);
/**
* Group: Creation and Destruction
*/
/*
Function: gdImageCreate
gdImageCreate is called to create palette-based images, with no
more than 256 colors. The image must eventually be destroyed using
gdImageDestroy().
Parameters:
sx - The image width.
sy - The image height.
Returns:
A pointer to the new image or NULL if an error occurred.
Example:
(start code)
gdImagePtr im;
im = gdImageCreate(64, 64);
// ... Use the image ...
gdImageDestroy(im);
(end code)
See Also:
<gdImageCreateTrueColor>
*/
BGD_DECLARE(gdImagePtr) gdImageCreate (int sx, int sy)
{
int i;
gdImagePtr im;
if (overflow2(sx, sy)) {
return NULL;
}
if (overflow2(sizeof (unsigned char *), sy)) {
return NULL;
}
if (overflow2(sizeof (unsigned char), sx)) {
return NULL;
}
im = (gdImage *) gdCalloc(1, sizeof(gdImage));
if (!im) {
return NULL;
}
/* Row-major ever since gd 1.3 */
im->pixels = (unsigned char **) gdMalloc (sizeof (unsigned char *) * sy);
if (!im->pixels) {
gdFree(im);
return NULL;
}
im->polyInts = 0;
im->polyAllocated = 0;
im->brush = 0;
im->tile = 0;
im->style = 0;
for (i = 0; (i < sy); i++) {
/* Row-major ever since gd 1.3 */
im->pixels[i] = (unsigned char *) gdCalloc (sx, sizeof (unsigned char));
if (!im->pixels[i]) {
for (--i ; i >= 0; i--) {
gdFree(im->pixels[i]);
}
gdFree(im->pixels);
gdFree(im);
return NULL;
}
}
im->sx = sx;
im->sy = sy;
im->colorsTotal = 0;
im->transparent = (-1);
im->interlace = 0;
im->thick = 1;
im->AA = 0;
for (i = 0; (i < gdMaxColors); i++) {
im->open[i] = 1;
};
im->trueColor = 0;
im->tpixels = 0;
im->cx1 = 0;
im->cy1 = 0;
im->cx2 = im->sx - 1;
im->cy2 = im->sy - 1;
im->res_x = GD_RESOLUTION;
im->res_y = GD_RESOLUTION;
im->interpolation = NULL;
im->interpolation_id = GD_BILINEAR_FIXED;
return im;
}
/*
Function: gdImageCreateTrueColor
<gdImageCreateTrueColor> is called to create truecolor images,
with an essentially unlimited number of colors. Invoke
<gdImageCreateTrueColor> with the x and y dimensions of the
desired image. <gdImageCreateTrueColor> returns a <gdImagePtr>
to the new image, or NULL if unable to allocate the image. The
image must eventually be destroyed using <gdImageDestroy>().
Truecolor images are always filled with black at creation
time. There is no concept of a "background" color index.
Parameters:
sx - The image width.
sy - The image height.
Returns:
A pointer to the new image or NULL if an error occurred.
Example:
(start code)
gdImagePtr im;
im = gdImageCreateTrueColor(64, 64);
// ... Use the image ...
gdImageDestroy(im);
(end code)
See Also:
<gdImageCreateTrueColor>
*/
BGD_DECLARE(gdImagePtr) gdImageCreateTrueColor (int sx, int sy)
{
int i;
gdImagePtr im;
if (overflow2(sx, sy)) {
return NULL;
}
if (overflow2(sizeof (int *), sy)) {
return 0;
}
if (overflow2(sizeof(int), sx)) {
return NULL;
}
im = (gdImage *) gdMalloc (sizeof (gdImage));
if (!im) {
return 0;
}
memset (im, 0, sizeof (gdImage));
im->tpixels = (int **) gdMalloc (sizeof (int *) * sy);
if (!im->tpixels) {
gdFree(im);
return 0;
}
im->polyInts = 0;
im->polyAllocated = 0;
im->brush = 0;
im->tile = 0;
im->style = 0;
for (i = 0; (i < sy); i++) {
im->tpixels[i] = (int *) gdCalloc (sx, sizeof (int));
if (!im->tpixels[i]) {
/* 2.0.34 */
i--;
while (i >= 0) {
gdFree(im->tpixels[i]);
i--;
}
gdFree(im->tpixels);
gdFree(im);
return 0;
}
}
im->sx = sx;
im->sy = sy;
im->transparent = (-1);
im->interlace = 0;
im->trueColor = 1;
/* 2.0.2: alpha blending is now on by default, and saving of alpha is
off by default. This allows font antialiasing to work as expected
on the first try in JPEGs -- quite important -- and also allows
for smaller PNGs when saving of alpha channel is not really
desired, which it usually isn't! */
im->saveAlphaFlag = 0;
im->alphaBlendingFlag = 1;
im->thick = 1;
im->AA = 0;
im->cx1 = 0;
im->cy1 = 0;
im->cx2 = im->sx - 1;
im->cy2 = im->sy - 1;
im->res_x = GD_RESOLUTION;
im->res_y = GD_RESOLUTION;
im->interpolation = NULL;
im->interpolation_id = GD_BILINEAR_FIXED;
return im;
}
/*
Function: gdImageDestroy
<gdImageDestroy> is used to free the memory associated with an
image. It is important to invoke <gdImageDestroy> before exiting
your program or assigning a new image to a <gdImagePtr> variable.
Parameters:
im - Pointer to the gdImage to delete.
Returns:
Nothing.
Example:
(start code)
gdImagePtr im;
im = gdImageCreate(10, 10);
// ... Use the image ...
// Now destroy it
gdImageDestroy(im);
(end code)
*/
BGD_DECLARE(void) gdImageDestroy (gdImagePtr im)
{
int i;
if (im->pixels) {
for (i = 0; (i < im->sy); i++) {
gdFree (im->pixels[i]);
}
gdFree (im->pixels);
}
if (im->tpixels) {
for (i = 0; (i < im->sy); i++) {
gdFree (im->tpixels[i]);
}
gdFree (im->tpixels);
}
if (im->polyInts) {
gdFree (im->polyInts);
}
if (im->style) {
gdFree (im->style);
}
gdFree (im);
}
/**
* Group: Color
*/
/**
* Function: gdImageColorClosest
*
* Gets the closest color of the image
*
* This is a simplified variant of <gdImageColorClosestAlpha> where the alpha
* channel is always opaque.
*
* Parameters:
* im - The image.
* r - The value of the red component.
* g - The value of the green component.
* b - The value of the blue component.
*
* Returns:
* The closest color already available in the palette for palette images;
* the color value of the given components for truecolor images.
*
* See also:
* - <gdImageColorExact>
*/
BGD_DECLARE(int) gdImageColorClosest (gdImagePtr im, int r, int g, int b)
{
return gdImageColorClosestAlpha (im, r, g, b, gdAlphaOpaque);
}
/**
* Function: gdImageColorClosestAlpha
*
* Gets the closest color of the image
*
* Parameters:
* im - The image.
* r - The value of the red component.
* g - The value of the green component.
* b - The value of the blue component.
* a - The value of the alpha component.
*
* Returns:
* The closest color already available in the palette for palette images;
* the color value of the given components for truecolor images.
*
* See also:
* - <gdImageColorExactAlpha>
*/
BGD_DECLARE(int) gdImageColorClosestAlpha (gdImagePtr im, int r, int g, int b, int a)
{
int i;
long rd, gd, bd, ad;
int ct = (-1);
int first = 1;
long mindist = 0;
if (im->trueColor) {
return gdTrueColorAlpha (r, g, b, a);
}
for (i = 0; (i < (im->colorsTotal)); i++) {
long dist;
if (im->open[i]) {
continue;
}
rd = (im->red[i] - r);
gd = (im->green[i] - g);
bd = (im->blue[i] - b);
/* gd 2.02: whoops, was - b (thanks to David Marwood) */
/* gd 2.16: was blue rather than alpha! Geez! Thanks to
Artur Jakub Jerzak */
ad = (im->alpha[i] - a);
dist = rd * rd + gd * gd + bd * bd + ad * ad;
if (first || (dist < mindist)) {
mindist = dist;
ct = i;
first = 0;
}
}
return ct;
}
/* This code is taken from http://www.acm.org/jgt/papers/SmithLyons96/hwb_rgb.html, an article
* on colour conversion to/from RBG and HWB colour systems.
* It has been modified to return the converted value as a * parameter.
*/
#define RETURN_HWB(h, w, b) {HWB->H = h; HWB->W = w; HWB->B = b; return HWB;}
#define RETURN_RGB(r, g, b) {RGB->R = r; RGB->G = g; RGB->B = b; return RGB;}
#define HWB_UNDEFINED -1
#define SETUP_RGB(s, r, g, b) {s.R = r/255.0; s.G = g/255.0; s.B = b/255.0;}
#define MIN(a,b) ((a)<(b)?(a):(b))
#define MIN3(a,b,c) ((a)<(b)?(MIN(a,c)):(MIN(b,c)))
#define MAX(a,b) ((a)<(b)?(b):(a))
#define MAX3(a,b,c) ((a)<(b)?(MAX(b,c)):(MAX(a,c)))
/*
* Theoretically, hue 0 (pure red) is identical to hue 6 in these transforms. Pure
* red always maps to 6 in this implementation. Therefore UNDEFINED can be
* defined as 0 in situations where only unsigned numbers are desired.
*/
typedef struct {
float R, G, B;
}
RGBType;
typedef struct {
float H, W, B;
}
HWBType;
static HWBType *
RGB_to_HWB (RGBType RGB, HWBType * HWB)
{
/*
* RGB are each on [0, 1]. W and B are returned on [0, 1] and H is
* returned on [0, 6]. Exception: H is returned UNDEFINED if W == 1 - B.
*/
float R = RGB.R, G = RGB.G, B = RGB.B, w, v, b, f;
int i;
w = MIN3 (R, G, B);
v = MAX3 (R, G, B);
b = 1 - v;
if (v == w)
RETURN_HWB (HWB_UNDEFINED, w, b);
f = (R == w) ? G - B : ((G == w) ? B - R : R - G);
i = (R == w) ? 3 : ((G == w) ? 5 : 1);
RETURN_HWB (i - f / (v - w), w, b);
}
static float
HWB_Diff (int r1, int g1, int b1, int r2, int g2, int b2)
{
RGBType RGB1, RGB2;
HWBType HWB1, HWB2;
float diff;
SETUP_RGB (RGB1, r1, g1, b1);
SETUP_RGB (RGB2, r2, g2, b2);
RGB_to_HWB (RGB1, &HWB1);
RGB_to_HWB (RGB2, &HWB2);
/*
* I made this bit up; it seems to produce OK results, and it is certainly
* more visually correct than the current RGB metric. (PJW)
*/
if ((HWB1.H == HWB_UNDEFINED) || (HWB2.H == HWB_UNDEFINED)) {
diff = 0; /* Undefined hues always match... */
} else {
diff = fabs (HWB1.H - HWB2.H);
if (diff > 3) {
diff = 6 - diff; /* Remember, it's a colour circle */
}
}
diff =
diff * diff + (HWB1.W - HWB2.W) * (HWB1.W - HWB2.W) + (HWB1.B -
HWB2.B) * (HWB1.B -
HWB2.B);
return diff;
}
#if 0
/*
* This is not actually used, but is here for completeness, in case someone wants to
* use the HWB stuff for anything else...
*/
static RGBType *
HWB_to_RGB (HWBType HWB, RGBType * RGB)
{
/*
* H is given on [0, 6] or UNDEFINED. W and B are given on [0, 1].
* RGB are each returned on [0, 1].
*/
float h = HWB.H, w = HWB.W, b = HWB.B, v, n, f;
int i;
v = 1 - b;
if (h == HWB_UNDEFINED)
RETURN_RGB (v, v, v);
i = floor (h);
f = h - i;
if (i & 1)
f = 1 - f; /* if i is odd */
n = w + f * (v - w); /* linear interpolation between w and v */
switch (i) {
case 6:
case 0:
RETURN_RGB (v, n, w);
case 1:
RETURN_RGB (n, v, w);
case 2:
RETURN_RGB (w, v, n);
case 3:
RETURN_RGB (w, n, v);
case 4:
RETURN_RGB (n, w, v);
case 5:
RETURN_RGB (v, w, n);
}
return RGB;
}
#endif
/*
Function: gdImageColorClosestHWB
*/
BGD_DECLARE(int) gdImageColorClosestHWB (gdImagePtr im, int r, int g, int b)
{
int i;
/* long rd, gd, bd; */
int ct = (-1);
int first = 1;
float mindist = 0;
if (im->trueColor) {
return gdTrueColor (r, g, b);
}
for (i = 0; (i < (im->colorsTotal)); i++) {
float dist;
if (im->open[i]) {
continue;
}
dist = HWB_Diff (im->red[i], im->green[i], im->blue[i], r, g, b);
if (first || (dist < mindist)) {
mindist = dist;
ct = i;
first = 0;
}
}
return ct;
}
/**
* Function: gdImageColorExact
*
* Gets the exact color of the image
*
* This is a simplified variant of <gdImageColorExactAlpha> where the alpha
* channel is always opaque.
*
* Parameters:
* im - The image.
* r - The value of the red component.
* g - The value of the green component.
* b - The value of the blue component.
*
* Returns:
* The exact color already available in the palette for palette images; if
* there is no exact color, -1 is returned.
* For truecolor images the color value of the given components is returned.
*
* See also:
* - <gdImageColorClosest>
*/
BGD_DECLARE(int) gdImageColorExact (gdImagePtr im, int r, int g, int b)
{
return gdImageColorExactAlpha (im, r, g, b, gdAlphaOpaque);
}
/**
* Function: gdImageColorExactAlpha
*
* Gets the exact color of the image
*
* Parameters:
* im - The image.
* r - The value of the red component.
* g - The value of the green component.
* b - The value of the blue component.
* a - The value of the alpha component.
*
* Returns:
* The exact color already available in the palette for palette images; if
* there is no exact color, -1 is returned.
* For truecolor images the color value of the given components is returned.
*
* See also:
* - <gdImageColorClosestAlpha>
* - <gdTrueColorAlpha>
*/
BGD_DECLARE(int) gdImageColorExactAlpha (gdImagePtr im, int r, int g, int b, int a)
{
int i;
if (im->trueColor) {
return gdTrueColorAlpha (r, g, b, a);
}
for (i = 0; (i < (im->colorsTotal)); i++) {
if (im->open[i]) {
continue;
}
if ((im->red[i] == r) &&
(im->green[i] == g) && (im->blue[i] == b) && (im->alpha[i] == a)) {
return i;
}
}
return -1;
}
/**
* Function: gdImageColorAllocate
*
* Allocates a color
*
* This is a simplified variant of <gdImageColorAllocateAlpha> where the alpha
* channel is always opaque.
*
* Parameters:
* im - The image.
* r - The value of the red component.
* g - The value of the green component.
* b - The value of the blue component.
*
* Returns:
* The color value.
*
* See also:
* - <gdImageColorDeallocate>
*/
BGD_DECLARE(int) gdImageColorAllocate (gdImagePtr im, int r, int g, int b)
{
return gdImageColorAllocateAlpha (im, r, g, b, gdAlphaOpaque);
}
/**
* Function: gdImageColorAllocateAlpha
*
* Allocates a color
*
* This is typically used for palette images, but can be used for truecolor
* images as well.
*
* Parameters:
* im - The image.
* r - The value of the red component.
* g - The value of the green component.
* b - The value of the blue component.
*
* Returns:
* The color value.
*
* See also:
* - <gdImageColorDeallocate>
*/
BGD_DECLARE(int) gdImageColorAllocateAlpha (gdImagePtr im, int r, int g, int b, int a)
{
int i;
int ct = (-1);
if (im->trueColor) {
return gdTrueColorAlpha (r, g, b, a);
}
for (i = 0; (i < (im->colorsTotal)); i++) {
if (im->open[i]) {
ct = i;
break;
}
}
if (ct == (-1)) {
ct = im->colorsTotal;
if (ct == gdMaxColors) {
return -1;
}
im->colorsTotal++;
}
im->red[ct] = r;
im->green[ct] = g;
im->blue[ct] = b;
im->alpha[ct] = a;
im->open[ct] = 0;
return ct;
}
/*
Function: gdImageColorResolve
gdImageColorResolve is an alternative for the code fragment
(start code)
if ((color=gdImageColorExact(im,R,G,B)) < 0)
if ((color=gdImageColorAllocate(im,R,G,B)) < 0)
color=gdImageColorClosest(im,R,G,B);
(end code)
in a single function. Its advantage is that it is guaranteed to
return a color index in one search over the color table.
*/
BGD_DECLARE(int) gdImageColorResolve (gdImagePtr im, int r, int g, int b)
{
return gdImageColorResolveAlpha (im, r, g, b, gdAlphaOpaque);
}
/*
Function: gdImageColorResolveAlpha
*/
BGD_DECLARE(int) gdImageColorResolveAlpha (gdImagePtr im, int r, int g, int b, int a)
{
int c;
int ct = -1;
int op = -1;
long rd, gd, bd, ad, dist;
long mindist = 4 * 255 * 255; /* init to max poss dist */
if (im->trueColor) {
return gdTrueColorAlpha (r, g, b, a);
}
for (c = 0; c < im->colorsTotal; c++) {
if (im->open[c]) {
op = c; /* Save open slot */
continue; /* Color not in use */
}
if (c == im->transparent) {
/* don't ever resolve to the color that has
* been designated as the transparent color */
continue;
}
rd = (long) (im->red[c] - r);
gd = (long) (im->green[c] - g);
bd = (long) (im->blue[c] - b);
ad = (long) (im->alpha[c] - a);
dist = rd * rd + gd * gd + bd * bd + ad * ad;
if (dist < mindist) {
if (dist == 0) {
return c; /* Return exact match color */
}
mindist = dist;
ct = c;
}
}
/* no exact match. We now know closest, but first try to allocate exact */
if (op == -1) {
op = im->colorsTotal;
if (op == gdMaxColors) {
/* No room for more colors */
return ct; /* Return closest available color */
}
im->colorsTotal++;
}
im->red[op] = r;
im->green[op] = g;
im->blue[op] = b;
im->alpha[op] = a;
im->open[op] = 0;
return op; /* Return newly allocated color */
}
/**
* Function: gdImageColorDeallocate
*
* Removes a palette entry
*
* This is a no-op for truecolor images.
*
* Parameters:
* im - The image.
* color - The palette index.
*
* See also:
* - <gdImageColorAllocate>
* - <gdImageColorAllocateAlpha>
*/
BGD_DECLARE(void) gdImageColorDeallocate (gdImagePtr im, int color)
{
if (im->trueColor || (color >= gdMaxColors) || (color < 0)) {
return;
}
/* Mark it open. */
im->open[color] = 1;
}
/**
* Function: gdImageColorTransparent
*
* Sets the transparent color of the image
*
* Parameter:
* im - The image.
* color - The color.
*
* See also:
* - <gdImageGetTransparent>
*/
BGD_DECLARE(void) gdImageColorTransparent (gdImagePtr im, int color)
{
if (color < 0) {
return;
}
if (!im->trueColor) {
if((color < -1) || (color >= gdMaxColors)) {
return;
}
if (im->transparent != -1) {
im->alpha[im->transparent] = gdAlphaOpaque;
}
if (color != -1) {
im->alpha[color] = gdAlphaTransparent;
}
}
im->transparent = color;
}
/*
Function: gdImagePaletteCopy
*/
BGD_DECLARE(void) gdImagePaletteCopy (gdImagePtr to, gdImagePtr from)
{
int i;
int x, y, p;
int xlate[256];
if (to->trueColor) {
return;
}
if (from->trueColor) {
return;
}
for (i = 0; i < 256; i++) {
xlate[i] = -1;
};
for (y = 0; y < (to->sy); y++) {
for (x = 0; x < (to->sx); x++) {
/* Optimization: no gdImageGetPixel */
p = to->pixels[y][x];
if (xlate[p] == -1) {
/* This ought to use HWB, but we don't have an alpha-aware
version of that yet. */
xlate[p] =
gdImageColorClosestAlpha (from, to->red[p], to->green[p],
to->blue[p], to->alpha[p]);
/*printf("Mapping %d (%d, %d, %d, %d) to %d (%d, %d, %d, %d)\n", */
/* p, to->red[p], to->green[p], to->blue[p], to->alpha[p], */
/* xlate[p], from->red[xlate[p]], from->green[xlate[p]], from->blue[xlate[p]], from->alpha[xlate[p]]); */
};
/* Optimization: no gdImageSetPixel */
to->pixels[y][x] = xlate[p];
};
};
for (i = 0; (i < (from->colorsTotal)); i++) {
/*printf("Copying color %d (%d, %d, %d, %d)\n", i, from->red[i], from->blue[i], from->green[i], from->alpha[i]); */
to->red[i] = from->red[i];
to->blue[i] = from->blue[i];
to->green[i] = from->green[i];
to->alpha[i] = from->alpha[i];
to->open[i] = 0;
};
for (i = from->colorsTotal; (i < to->colorsTotal); i++) {
to->open[i] = 1;
};
to->colorsTotal = from->colorsTotal;
}
/*
Function: gdImageColorReplace
*/
BGD_DECLARE(int) gdImageColorReplace (gdImagePtr im, int src, int dst)
{
register int x, y;
int n = 0;
if (src == dst) {
return 0;
}
#define REPLACING_LOOP(pixel) do { \
for (y = im->cy1; y <= im->cy2; y++) { \
for (x = im->cx1; x <= im->cx2; x++) { \
if (pixel(im, x, y) == src) { \
gdImageSetPixel(im, x, y, dst); \
n++; \
} \
} \
} \
} while (0)
if (im->trueColor) {
REPLACING_LOOP(gdImageTrueColorPixel);
} else {
REPLACING_LOOP(gdImagePalettePixel);
}
#undef REPLACING_LOOP
return n;
}
/*
Function: gdImageColorReplaceThreshold
*/
BGD_DECLARE(int) gdImageColorReplaceThreshold (gdImagePtr im, int src, int dst, float threshold)
{
register int x, y;
int n = 0;
if (src == dst) {
return 0;
}
#define REPLACING_LOOP(pixel) do { \
for (y = im->cy1; y <= im->cy2; y++) { \
for (x = im->cx1; x <= im->cx2; x++) { \
if (gdColorMatch(im, src, pixel(im, x, y), threshold)) { \
gdImageSetPixel(im, x, y, dst); \
n++; \
} \
} \
} \
} while (0)
if (im->trueColor) {
REPLACING_LOOP(gdImageTrueColorPixel);
} else {
REPLACING_LOOP(gdImagePalettePixel);
}
#undef REPLACING_LOOP
return n;
}
static int colorCmp (const void *x, const void *y)
{
int a = *(int const *)x;
int b = *(int const *)y;
return (a > b) - (a < b);
}
/*
Function: gdImageColorReplaceArray
*/
BGD_DECLARE(int) gdImageColorReplaceArray (gdImagePtr im, int len, int *src, int *dst)
{
register int x, y;
int c, *d, *base;
int i, n = 0;
if (len <= 0 || src == dst) {
return 0;
}
if (len == 1) {
return gdImageColorReplace(im, src[0], dst[0]);
}
if (overflow2(len, sizeof(int)<<1)) {
return -1;
}
base = (int *)gdMalloc(len * (sizeof(int)<<1));
if (!base) {
return -1;
}
for (i = 0; i < len; i++) {
base[(i<<1)] = src[i];
base[(i<<1)+1] = dst[i];
}
qsort(base, len, sizeof(int)<<1, colorCmp);
#define REPLACING_LOOP(pixel) do { \
for (y = im->cy1; y <= im->cy2; y++) { \
for (x = im->cx1; x <= im->cx2; x++) { \
c = pixel(im, x, y); \
if ( (d = (int *)bsearch(&c, base, len, sizeof(int)<<1, colorCmp)) ) { \
gdImageSetPixel(im, x, y, d[1]); \
n++; \
} \
} \
} \
} while (0)
if (im->trueColor) {
REPLACING_LOOP(gdImageTrueColorPixel);
} else {
REPLACING_LOOP(gdImagePalettePixel);
}
#undef REPLACING_LOOP
gdFree(base);
return n;
}
/*
Function: gdImageColorReplaceCallback
*/
BGD_DECLARE(int) gdImageColorReplaceCallback (gdImagePtr im, gdCallbackImageColor callback)
{
int c, d, n = 0;
if (!callback) {
return 0;
}
if (im->trueColor) {
register int x, y;
for (y = im->cy1; y <= im->cy2; y++) {
for (x = im->cx1; x <= im->cx2; x++) {
c = gdImageTrueColorPixel(im, x, y);
if ( (d = callback(im, c)) != c) {
gdImageSetPixel(im, x, y, d);
n++;
}
}
}
} else { /* palette */
int *sarr, *darr;
int k, len = 0;
sarr = (int *)gdCalloc(im->colorsTotal, sizeof(int));
if (!sarr) {
return -1;
}
for (c = 0; c < im->colorsTotal; c++) {
if (!im->open[c]) {
sarr[len++] = c;
}
}
darr = (int *)gdCalloc(len, sizeof(int));
if (!darr) {
gdFree(sarr);
return -1;
}
for (k = 0; k < len; k++) {
darr[k] = callback(im, sarr[k]);
}
n = gdImageColorReplaceArray(im, k, sarr, darr);
gdFree(darr);
gdFree(sarr);
}
return n;
}
/* 2.0.10: before the drawing routines, some code to clip points that are
* outside the drawing window. Nick Atty (nick@canalplan.org.uk)
*
* This is the Sutherland Hodgman Algorithm, as implemented by
* Duvanenko, Robbins and Gyurcsik - SH(DRG) for short. See Dr Dobb's
* Journal, January 1996, pp107-110 and 116-117
*
* Given the end points of a line, and a bounding rectangle (which we
* know to be from (0,0) to (SX,SY)), adjust the endpoints to be on
* the edges of the rectangle if the line should be drawn at all,
* otherwise return a failure code */
/* this does "one-dimensional" clipping: note that the second time it
is called, all the x parameters refer to height and the y to width
- the comments ignore this (if you can understand it when it's
looking at the X parameters, it should become clear what happens on
the second call!) The code is simplified from that in the article,
as we know that gd images always start at (0,0) */
/* 2.0.26, TBB: we now have to respect a clipping rectangle, it won't
necessarily start at 0. */
static int
clip_1d (int *x0, int *y0, int *x1, int *y1, int mindim, int maxdim)
{
double m; /* gradient of line */
if (*x0 < mindim) {
/* start of line is left of window */
if (*x1 < mindim) /* as is the end, so the line never cuts the window */
return 0;
m = (*y1 - *y0) / (double) (*x1 - *x0); /* calculate the slope of the line */
/* adjust x0 to be on the left boundary (ie to be zero), and y0 to match */
*y0 -= (int)(m * (*x0 - mindim));
*x0 = mindim;
/* now, perhaps, adjust the far end of the line as well */
if (*x1 > maxdim) {
*y1 += m * (maxdim - *x1);
*x1 = maxdim;
}
return 1;
}
if (*x0 > maxdim) {
/* start of line is right of window -
complement of above */
if (*x1 > maxdim) /* as is the end, so the line misses the window */
return 0;
m = (*y1 - *y0) / (double) (*x1 - *x0); /* calculate the slope of the line */
*y0 += (int)(m * (maxdim - *x0)); /* adjust so point is on the right
boundary */
*x0 = maxdim;
/* now, perhaps, adjust the end of the line */
if (*x1 < mindim) {
*y1 -= (int)(m * (*x1 - mindim));
*x1 = mindim;
}
return 1;
}
/* the final case - the start of the line is inside the window */
if (*x1 > maxdim) {
/* other end is outside to the right */
m = (*y1 - *y0) / (double) (*x1 - *x0); /* calculate the slope of the line */
*y1 += (int)(m * (maxdim - *x1));
*x1 = maxdim;
return 1;
}
if (*x1 < mindim) {
/* other end is outside to the left */
m = (*y1 - *y0) / (double) (*x1 - *x0); /* calculate the slope of the line */
*y1 -= (int)(m * (*x1 - mindim));
*x1 = mindim;
return 1;
}
/* only get here if both points are inside the window */
return 1;
}
/* end of line clipping code */
/**
* Group: Pixels
*/
/*
Function: gdImageSetPixel
*/
BGD_DECLARE(void) gdImageSetPixel (gdImagePtr im, int x, int y, int color)
{
int p;
switch (color) {
case gdStyled:
if (!im->style) {
/* Refuse to draw if no style is set. */
return;
} else {
p = im->style[im->stylePos++];
}
if (p != (gdTransparent)) {
gdImageSetPixel (im, x, y, p);
}
im->stylePos = im->stylePos % im->styleLength;
break;
case gdStyledBrushed:
if (!im->style) {
/* Refuse to draw if no style is set. */
return;
}
p = im->style[im->stylePos++];
if ((p != gdTransparent) && (p != 0)) {
gdImageSetPixel (im, x, y, gdBrushed);
}
im->stylePos = im->stylePos % im->styleLength;
break;
case gdBrushed:
gdImageBrushApply (im, x, y);
break;
case gdTiled:
gdImageTileApply (im, x, y);
break;
case gdAntiAliased:
/* This shouldn't happen (2.0.26) because we just call
gdImageAALine now, but do something sane. */
gdImageSetPixel(im, x, y, im->AA_color);
break;
default:
if (gdImageBoundsSafeMacro (im, x, y)) {
if (im->trueColor) {
switch (im->alphaBlendingFlag) {
default:
case gdEffectReplace:
im->tpixels[y][x] = color;
break;
case gdEffectAlphaBlend:
case gdEffectNormal:
im->tpixels[y][x] = gdAlphaBlend(im->tpixels[y][x], color);
break;
case gdEffectOverlay :
im->tpixels[y][x] = gdLayerOverlay(im->tpixels[y][x], color);
break;
case gdEffectMultiply :
im->tpixels[y][x] = gdLayerMultiply(im->tpixels[y][x], color);
break;
}
} else {
im->pixels[y][x] = color;
}
}
break;
}
}
static void
gdImageBrushApply (gdImagePtr im, int x, int y)
{
int lx, ly;
int hy;
int hx;
int x1, y1, x2, y2;
int srcx, srcy;
if (!im->brush) {
return;
}
hy = gdImageSY (im->brush) / 2;
y1 = y - hy;
y2 = y1 + gdImageSY (im->brush);
hx = gdImageSX (im->brush) / 2;
x1 = x - hx;
x2 = x1 + gdImageSX (im->brush);
srcy = 0;
if (im->trueColor) {
if (im->brush->trueColor) {
for (ly = y1; (ly < y2); ly++) {
srcx = 0;
for (lx = x1; (lx < x2); lx++) {
int p;
p = gdImageGetTrueColorPixel (im->brush, srcx, srcy);
/* 2.0.9, Thomas Winzig: apply simple full transparency */
if (p != gdImageGetTransparent (im->brush)) {
gdImageSetPixel (im, lx, ly, p);
}
srcx++;
}
srcy++;
}
} else {
/* 2.0.12: Brush palette, image truecolor (thanks to Thorben Kundinger
for pointing out the issue) */
for (ly = y1; (ly < y2); ly++) {
srcx = 0;
for (lx = x1; (lx < x2); lx++) {
int p, tc;
p = gdImageGetPixel (im->brush, srcx, srcy);
tc = gdImageGetTrueColorPixel (im->brush, srcx, srcy);
/* 2.0.9, Thomas Winzig: apply simple full transparency */
if (p != gdImageGetTransparent (im->brush)) {
gdImageSetPixel (im, lx, ly, tc);
}
srcx++;
}
srcy++;
}
}
} else {
for (ly = y1; (ly < y2); ly++) {
srcx = 0;
for (lx = x1; (lx < x2); lx++) {
int p;
p = gdImageGetPixel (im->brush, srcx, srcy);
/* Allow for non-square brushes! */
if (p != gdImageGetTransparent (im->brush)) {
/* Truecolor brush. Very slow
on a palette destination. */
if (im->brush->trueColor) {
gdImageSetPixel (im, lx, ly,
gdImageColorResolveAlpha (im,
gdTrueColorGetRed
(p),
gdTrueColorGetGreen
(p),
gdTrueColorGetBlue
(p),
gdTrueColorGetAlpha
(p)));
} else {
gdImageSetPixel (im, lx, ly, im->brushColorMap[p]);
}
}
srcx++;
}
srcy++;
}
}
}
static void
gdImageTileApply (gdImagePtr im, int x, int y)
{
gdImagePtr tile = im->tile;
int srcx, srcy;
int p;
if (!tile) {
return;
}
srcx = x % gdImageSX (tile);
srcy = y % gdImageSY (tile);
if (im->trueColor) {
p = gdImageGetPixel (tile, srcx, srcy);
if (p != gdImageGetTransparent (tile)) {
if (!tile->trueColor) {
p = gdTrueColorAlpha(tile->red[p], tile->green[p], tile->blue[p], tile->alpha[p]);
}
gdImageSetPixel (im, x, y, p);
}
} else {
p = gdImageGetPixel (tile, srcx, srcy);
/* Allow for transparency */
if (p != gdImageGetTransparent (tile)) {
if (tile->trueColor) {
/* Truecolor tile. Very slow
on a palette destination. */
gdImageSetPixel (im, x, y,
gdImageColorResolveAlpha (im,
gdTrueColorGetRed
(p),
gdTrueColorGetGreen
(p),
gdTrueColorGetBlue
(p),
gdTrueColorGetAlpha
(p)));
} else {
gdImageSetPixel (im, x, y, im->tileColorMap[p]);
}
}
}
}
/**
* Function: gdImageGetPixel
*
* Gets a pixel color as stored in the image.
*
* Parameters:
* im - The image.
* x - The x-coordinate.
* y - The y-coordinate.
*
* See also:
* - <gdImageGetTrueColorPixel>
* - <gdImagePalettePixel>
* - <gdImageTrueColorPixel>
*/
BGD_DECLARE(int) gdImageGetPixel (gdImagePtr im, int x, int y)
{
if (gdImageBoundsSafeMacro (im, x, y)) {
if (im->trueColor) {
return im->tpixels[y][x];
} else {
return im->pixels[y][x];
}
} else {
return 0;
}
}
/**
* Function: gdImageGetTrueColorPixel
*
* Gets a pixel color always as truecolor value.
*
* Parameters:
* im - The image.
* x - The x-coordinate.
* y - The y-coordinate.
*
* See also:
* - <gdImageGetPixel>
* - <gdImageTrueColorPixel>
*/
BGD_DECLARE(int) gdImageGetTrueColorPixel (gdImagePtr im, int x, int y)
{
int p = gdImageGetPixel (im, x, y);
if (!im->trueColor) {
return gdTrueColorAlpha (im->red[p], im->green[p], im->blue[p],
(im->transparent == p) ? gdAlphaTransparent :
im->alpha[p]);
} else {
return p;
}
}
/**
* Group: Primitives
*/
/*
Function: gdImageAABlend
NO-OP, kept for library compatibility.
*/
BGD_DECLARE(void) gdImageAABlend (gdImagePtr im)
{
(void)im;
}
static void gdImageAALine (gdImagePtr im, int x1, int y1, int x2, int y2, int col);
static void _gdImageFilledHRectangle (gdImagePtr im, int x1, int y1, int x2, int y2,
int color);
static void gdImageHLine(gdImagePtr im, int y, int x1, int x2, int col)
{
if (im->thick > 1) {
int thickhalf = im->thick >> 1;
_gdImageFilledHRectangle(im, x1, y - thickhalf, x2, y + im->thick - thickhalf - 1, col);
} else {
if (x2 < x1) {
int t = x2;
x2 = x1;
x1 = t;
}
for (; x1 <= x2; x1++) {
gdImageSetPixel(im, x1, y, col);
}
}
return;
}
static void gdImageVLine(gdImagePtr im, int x, int y1, int y2, int col)
{
if (im->thick > 1) {
int thickhalf = im->thick >> 1;
gdImageFilledRectangle(im, x - thickhalf, y1, x + im->thick - thickhalf - 1, y2, col);
} else {
if (y2 < y1) {
int t = y1;
y1 = y2;
y2 = t;
}
for (; y1 <= y2; y1++) {
gdImageSetPixel(im, x, y1, col);
}
}
return;
}
/*
Function: gdImageLine
Bresenham as presented in Foley & Van Dam.
*/
BGD_DECLARE(void) gdImageLine (gdImagePtr im, int x1, int y1, int x2, int y2, int color)
{
int dx, dy, incr1, incr2, d, x, y, xend, yend, xdirflag, ydirflag;
int wid;
int w, wstart;
int thick;
if (color == gdAntiAliased) {
/*
gdAntiAliased passed as color: use the much faster, much cheaper
and equally attractive gdImageAALine implementation. That
clips too, so don't clip twice.
*/
gdImageAALine(im, x1, y1, x2, y2, im->AA_color);
return;
}
/* 2.0.10: Nick Atty: clip to edges of drawing rectangle, return if no
points need to be drawn. 2.0.26, TBB: clip to edges of clipping
rectangle. We were getting away with this because gdImageSetPixel
is used for actual drawing, but this is still more efficient and opens
the way to skip per-pixel bounds checking in the future. */
if (clip_1d (&x1, &y1, &x2, &y2, im->cx1, im->cx2) == 0)
return;
if (clip_1d (&y1, &x1, &y2, &x2, im->cy1, im->cy2) == 0)
return;
thick = im->thick;
dx = abs (x2 - x1);
dy = abs (y2 - y1);
if (dx == 0) {
gdImageVLine(im, x1, y1, y2, color);
return;
} else if (dy == 0) {
gdImageHLine(im, y1, x1, x2, color);
return;
}
if (dy <= dx) {
/* More-or-less horizontal. use wid for vertical stroke */
/* Doug Claar: watch out for NaN in atan2 (2.0.5) */
/* 2.0.12: Michael Schwartz: divide rather than multiply;
TBB: but watch out for /0! */
double ac = cos (atan2 (dy, dx));
if (ac != 0) {
wid = thick / ac;
} else {
wid = 1;
}
if (wid == 0) {
wid = 1;
}
d = 2 * dy - dx;
incr1 = 2 * dy;
incr2 = 2 * (dy - dx);
if (x1 > x2) {
x = x2;
y = y2;
ydirflag = (-1);
xend = x1;
} else {
x = x1;
y = y1;
ydirflag = 1;
xend = x2;
}
/* Set up line thickness */
wstart = y - wid / 2;
for (w = wstart; w < wstart + wid; w++)
gdImageSetPixel (im, x, w, color);
if (((y2 - y1) * ydirflag) > 0) {
while (x < xend) {
x++;
if (d < 0) {
d += incr1;
} else {
y++;
d += incr2;
}
wstart = y - wid / 2;
for (w = wstart; w < wstart + wid; w++)
gdImageSetPixel (im, x, w, color);
}
} else {
while (x < xend) {
x++;
if (d < 0) {
d += incr1;
} else {
y--;
d += incr2;
}
wstart = y - wid / 2;
for (w = wstart; w < wstart + wid; w++)
gdImageSetPixel (im, x, w, color);
}
}
} else {
/* More-or-less vertical. use wid for horizontal stroke */
/* 2.0.12: Michael Schwartz: divide rather than multiply;
TBB: but watch out for /0! */
double as = sin (atan2 (dy, dx));
if (as != 0) {
wid = thick / as;
} else {
wid = 1;
}
if (wid == 0)
wid = 1;
d = 2 * dx - dy;
incr1 = 2 * dx;
incr2 = 2 * (dx - dy);
if (y1 > y2) {
y = y2;
x = x2;
yend = y1;
xdirflag = (-1);
} else {
y = y1;
x = x1;
yend = y2;
xdirflag = 1;
}
/* Set up line thickness */
wstart = x - wid / 2;
for (w = wstart; w < wstart + wid; w++)
gdImageSetPixel (im, w, y, color);
if (((x2 - x1) * xdirflag) > 0) {
while (y < yend) {
y++;
if (d < 0) {
d += incr1;
} else {
x++;
d += incr2;
}
wstart = x - wid / 2;
for (w = wstart; w < wstart + wid; w++)
gdImageSetPixel (im, w, y, color);
}
} else {
while (y < yend) {
y++;
if (d < 0) {
d += incr1;
} else {
x--;
d += incr2;
}
wstart = x - wid / 2;
for (w = wstart; w < wstart + wid; w++)
gdImageSetPixel (im, w, y, color);
}
}
}
}
static void dashedSet (gdImagePtr im, int x, int y, int color,
int *onP, int *dashStepP, int wid, int vert);
/*
Function: gdImageDashedLine
*/
BGD_DECLARE(void) gdImageDashedLine (gdImagePtr im, int x1, int y1, int x2, int y2, int color)
{
int dx, dy, incr1, incr2, d, x, y, xend, yend, xdirflag, ydirflag;
int dashStep = 0;
int on = 1;
int wid;
int vert;
int thick = im->thick;
dx = abs (x2 - x1);
dy = abs (y2 - y1);
if (dy <= dx) {
/* More-or-less horizontal. use wid for vertical stroke */
/* 2.0.12: Michael Schwartz: divide rather than multiply;
TBB: but watch out for /0! */
double as = sin (atan2 (dy, dx));
if (as != 0) {
wid = thick / as;
} else {
wid = 1;
}
vert = 1;
d = 2 * dy - dx;
incr1 = 2 * dy;
incr2 = 2 * (dy - dx);
if (x1 > x2) {
x = x2;
y = y2;
ydirflag = (-1);
xend = x1;
} else {
x = x1;
y = y1;
ydirflag = 1;
xend = x2;
}
dashedSet (im, x, y, color, &on, &dashStep, wid, vert);
if (((y2 - y1) * ydirflag) > 0) {
while (x < xend) {
x++;
if (d < 0) {
d += incr1;
} else {
y++;
d += incr2;
}
dashedSet (im, x, y, color, &on, &dashStep, wid, vert);
}
} else {
while (x < xend) {
x++;
if (d < 0) {
d += incr1;
} else {
y--;
d += incr2;
}
dashedSet (im, x, y, color, &on, &dashStep, wid, vert);
}
}
} else {
/* 2.0.12: Michael Schwartz: divide rather than multiply;
TBB: but watch out for /0! */
double as = sin (atan2 (dy, dx));
if (as != 0) {
wid = thick / as;
} else {
wid = 1;
}
vert = 0;
d = 2 * dx - dy;
incr1 = 2 * dx;
incr2 = 2 * (dx - dy);
if (y1 > y2) {
y = y2;
x = x2;
yend = y1;
xdirflag = (-1);
} else {
y = y1;
x = x1;
yend = y2;
xdirflag = 1;
}
dashedSet (im, x, y, color, &on, &dashStep, wid, vert);
if (((x2 - x1) * xdirflag) > 0) {
while (y < yend) {
y++;
if (d < 0) {
d += incr1;
} else {
x++;
d += incr2;
}
dashedSet (im, x, y, color, &on, &dashStep, wid, vert);
}
} else {
while (y < yend) {
y++;
if (d < 0) {
d += incr1;
} else {
x--;
d += incr2;
}
dashedSet (im, x, y, color, &on, &dashStep, wid, vert);
}
}
}
}
static void
dashedSet (gdImagePtr im, int x, int y, int color,
int *onP, int *dashStepP, int wid, int vert)
{
int dashStep = *dashStepP;
int on = *onP;
int w, wstart;
dashStep++;
if (dashStep == gdDashSize) {
dashStep = 0;
on = !on;
}
if (on) {
if (vert) {
wstart = y - wid / 2;
for (w = wstart; w < wstart + wid; w++)
gdImageSetPixel (im, x, w, color);
} else {
wstart = x - wid / 2;
for (w = wstart; w < wstart + wid; w++)
gdImageSetPixel (im, w, y, color);
}
}
*dashStepP = dashStep;
*onP = on;
}
/*
Function: gdImageBoundsSafe
*/
BGD_DECLARE(int) gdImageBoundsSafe (gdImagePtr im, int x, int y)
{
return gdImageBoundsSafeMacro (im, x, y);
}
/**
* Function: gdImageChar
*
* Draws a single character.
*
* Parameters:
* im - The image to draw onto.
* f - The raster font.
* x - The x coordinate of the upper left pixel.
* y - The y coordinate of the upper left pixel.
* c - The character.
* color - The color.
*
* Variants:
* - <gdImageCharUp>
*
* See also:
* - <gdFontPtr>
*/
BGD_DECLARE(void) gdImageChar (gdImagePtr im, gdFontPtr f, int x, int y, int c, int color)
{
int cx, cy;
int px, py;
int fline;
cx = 0;
cy = 0;
#ifdef CHARSET_EBCDIC
c = ASC (c);
#endif /*CHARSET_EBCDIC */
if ((c < f->offset) || (c >= (f->offset + f->nchars))) {
return;
}
fline = (c - f->offset) * f->h * f->w;
for (py = y; (py < (y + f->h)); py++) {
for (px = x; (px < (x + f->w)); px++) {
if (f->data[fline + cy * f->w + cx]) {
gdImageSetPixel (im, px, py, color);
}
cx++;
}
cx = 0;
cy++;
}
}
/**
* Function: gdImageCharUp
*/
BGD_DECLARE(void) gdImageCharUp (gdImagePtr im, gdFontPtr f, int x, int y, int c, int color)
{
int cx, cy;
int px, py;
int fline;
cx = 0;
cy = 0;
#ifdef CHARSET_EBCDIC
c = ASC (c);
#endif /*CHARSET_EBCDIC */
if ((c < f->offset) || (c >= (f->offset + f->nchars))) {
return;
}
fline = (c - f->offset) * f->h * f->w;
for (py = y; (py > (y - f->w)); py--) {
for (px = x; (px < (x + f->h)); px++) {
if (f->data[fline + cy * f->w + cx]) {
gdImageSetPixel (im, px, py, color);
}
cy++;
}
cy = 0;
cx++;
}
}
/**
* Function: gdImageString
*
* Draws a character string.
*
* Parameters:
* im - The image to draw onto.
* f - The raster font.
* x - The x coordinate of the upper left pixel.
* y - The y coordinate of the upper left pixel.
* c - The character string.
* color - The color.
*
* Variants:
* - <gdImageStringUp>
* - <gdImageString16>
* - <gdImageStringUp16>
*
* See also:
* - <gdFontPtr>
* - <gdImageStringTTF>
*/
BGD_DECLARE(void) gdImageString (gdImagePtr im, gdFontPtr f,
int x, int y, unsigned char *s, int color)
{
int i;
int l;
l = strlen ((char *) s);
for (i = 0; (i < l); i++) {
gdImageChar (im, f, x, y, s[i], color);
x += f->w;
}
}
/**
* Function: gdImageStringUp
*/
BGD_DECLARE(void) gdImageStringUp (gdImagePtr im, gdFontPtr f,
int x, int y, unsigned char *s, int color)
{
int i;
int l;
l = strlen ((char *) s);
for (i = 0; (i < l); i++) {
gdImageCharUp (im, f, x, y, s[i], color);
y -= f->w;
}
}
static int strlen16 (unsigned short *s);
/**
* Function: gdImageString16
*/
BGD_DECLARE(void) gdImageString16 (gdImagePtr im, gdFontPtr f,
int x, int y, unsigned short *s, int color)
{
int i;
int l;
l = strlen16 (s);
for (i = 0; (i < l); i++) {
gdImageChar (im, f, x, y, s[i], color);
x += f->w;
}
}
/**
* Function: gdImageStringUp16
*/
BGD_DECLARE(void) gdImageStringUp16 (gdImagePtr im, gdFontPtr f,
int x, int y, unsigned short *s, int color)
{
int i;
int l;
l = strlen16 (s);
for (i = 0; (i < l); i++) {
gdImageCharUp (im, f, x, y, s[i], color);
y -= f->w;
}
}
static int
strlen16 (unsigned short *s)
{
int len = 0;
while (*s) {
s++;
len++;
}
return len;
}
#ifndef HAVE_LSQRT
/* If you don't have a nice square root function for longs, you can use
** this hack
*/
long
lsqrt (long n)
{
long result = (long) sqrt ((double) n);
return result;
}
#endif
/* s and e are integers modulo 360 (degrees), with 0 degrees
being the rightmost extreme and degrees changing clockwise.
cx and cy are the center in pixels; w and h are the horizontal
and vertical diameter in pixels. */
/*
Function: gdImageArc
*/
BGD_DECLARE(void) gdImageArc (gdImagePtr im, int cx, int cy, int w, int h, int s, int e,
int color)
{
gdImageFilledArc (im, cx, cy, w, h, s, e, color, gdNoFill);
}
/*
Function: gdImageFilledArc
*/
BGD_DECLARE(void) gdImageFilledArc (gdImagePtr im, int cx, int cy, int w, int h, int s, int e,
int color, int style)
{
gdPoint pts[363];
int i, pti;
int lx = 0, ly = 0;
int fx = 0, fy = 0;
if ((s % 360) == (e % 360)) {
s = 0;
e = 360;
} else {
if (s > 360) {
s = s % 360;
}
if (e > 360) {
e = e % 360;
}
while (s < 0) {
s += 360;
}
while (e < s) {
e += 360;
}
if (s == e) {
s = 0;
e = 360;
}
}
for (i = s, pti = 1; (i <= e); i++, pti++) {
int x, y;
x = ((long) gdCosT[i % 360] * (long) w / (2 * 1024)) + cx;
y = ((long) gdSinT[i % 360] * (long) h / (2 * 1024)) + cy;
if (i != s) {
if (!(style & gdChord)) {
if (style & gdNoFill) {
gdImageLine (im, lx, ly, x, y, color);
} else {
if (y == ly) {
pti--; /* don't add this point */
if (((i > 270 || i < 90) && x > lx) || ((i > 90 && i < 270) && x < lx)) {
/* replace the old x coord, if increasing on the
right side or decreasing on the left side */
pts[pti].x = x;
}
} else {
pts[pti].x = x;
pts[pti].y = y;
}
}
}
} else {
fx = x;
fy = y;
if (!(style & (gdChord | gdNoFill))) {
pts[0].x = cx;
pts[0].y = cy;
pts[pti].x = x;
pts[pti].y = y;
}
}
lx = x;
ly = y;
}
if (style & gdChord) {
if (style & gdNoFill) {
if (style & gdEdged) {
gdImageLine (im, cx, cy, lx, ly, color);
gdImageLine (im, cx, cy, fx, fy, color);
}
gdImageLine (im, fx, fy, lx, ly, color);
} else {
pts[0].x = fx;
pts[0].y = fy;
pts[1].x = lx;
pts[1].y = ly;
pts[2].x = cx;
pts[2].y = cy;
gdImageFilledPolygon (im, pts, 3, color);
}
} else {
if (style & gdNoFill) {
if (style & gdEdged) {
gdImageLine (im, cx, cy, lx, ly, color);
gdImageLine (im, cx, cy, fx, fy, color);
}
} else {
pts[pti].x = cx;
pts[pti].y = cy;
gdImageFilledPolygon(im, pts, pti+1, color);
}
}
}
/*
Function: gdImageEllipse
*/
BGD_DECLARE(void) gdImageEllipse(gdImagePtr im, int mx, int my, int w, int h, int c)
{
int x=0,mx1=0,mx2=0,my1=0,my2=0;
long aq,bq,dx,dy,r,rx,ry,a,b;
a=w>>1;
b=h>>1;
gdImageSetPixel(im,mx+a, my, c);
gdImageSetPixel(im,mx-a, my, c);
mx1 = mx-a;
my1 = my;
mx2 = mx+a;
my2 = my;
aq = a * a;
bq = b * b;
dx = aq << 1;
dy = bq << 1;
r = a * bq;
rx = r << 1;
ry = 0;
x = a;
while (x > 0) {
if (r > 0) {
my1++;
my2--;
ry +=dx;
r -=ry;
}
if (r <= 0) {
x--;
mx1++;
mx2--;
rx -=dy;
r +=rx;
}
gdImageSetPixel(im,mx1, my1, c);
gdImageSetPixel(im,mx1, my2, c);
gdImageSetPixel(im,mx2, my1, c);
gdImageSetPixel(im,mx2, my2, c);
}
}
/*
Function: gdImageFilledEllipse
*/
BGD_DECLARE(void) gdImageFilledEllipse (gdImagePtr im, int mx, int my, int w, int h, int c)
{
int x=0,mx1=0,mx2=0,my1=0,my2=0;
long aq,bq,dx,dy,r,rx,ry,a,b;
int i;
int old_y2;
a=w>>1;
b=h>>1;
for (x = mx-a; x <= mx+a; x++) {
gdImageSetPixel(im, x, my, c);
}
mx1 = mx-a;
my1 = my;
mx2 = mx+a;
my2 = my;
aq = a * a;
bq = b * b;
dx = aq << 1;
dy = bq << 1;
r = a * bq;
rx = r << 1;
ry = 0;
x = a;
old_y2=-2;
while (x > 0) {
if (r > 0) {
my1++;
my2--;
ry +=dx;
r -=ry;
}
if (r <= 0) {
x--;
mx1++;
mx2--;
rx -=dy;
r +=rx;
}
if(old_y2!=my2) {
for(i=mx1; i<=mx2; i++) {
gdImageSetPixel(im,i,my2,c);
gdImageSetPixel(im,i,my1,c);
}
}
old_y2 = my2;
}
}
/*
Function: gdImageFillToBorder
*/
BGD_DECLARE(void) gdImageFillToBorder (gdImagePtr im, int x, int y, int border, int color)
{
int lastBorder;
/* Seek left */
int leftLimit, rightLimit;
int i;
int restoreAlphaBleding;
if (border < 0 || color < 0) {
/* Refuse to fill to a non-solid border */
return;
}
if (!im->trueColor) {
if ((color > (im->colorsTotal - 1)) || (border > (im->colorsTotal - 1)) || (color < 0)) {
return;
}
}
leftLimit = (-1);
restoreAlphaBleding = im->alphaBlendingFlag;
im->alphaBlendingFlag = 0;
if (x >= im->sx) {
x = im->sx - 1;
} else if (x < 0) {
x = 0;
}
if (y >= im->sy) {
y = im->sy - 1;
} else if (y < 0) {
y = 0;
}
for (i = x; (i >= 0); i--) {
if (gdImageGetPixel (im, i, y) == border) {
break;
}
gdImageSetPixel (im, i, y, color);
leftLimit = i;
}
if (leftLimit == (-1)) {
im->alphaBlendingFlag = restoreAlphaBleding;
return;
}
/* Seek right */
rightLimit = x;
for (i = (x + 1); (i < im->sx); i++) {
if (gdImageGetPixel (im, i, y) == border) {
break;
}
gdImageSetPixel (im, i, y, color);
rightLimit = i;
}
/* Look at lines above and below and start paints */
/* Above */
if (y > 0) {
lastBorder = 1;
for (i = leftLimit; (i <= rightLimit); i++) {
int c;
c = gdImageGetPixel (im, i, y - 1);
if (lastBorder) {
if ((c != border) && (c != color)) {
gdImageFillToBorder (im, i, y - 1, border, color);
lastBorder = 0;
}
} else if ((c == border) || (c == color)) {
lastBorder = 1;
}
}
}
/* Below */
if (y < ((im->sy) - 1)) {
lastBorder = 1;
for (i = leftLimit; (i <= rightLimit); i++) {
int c = gdImageGetPixel (im, i, y + 1);
if (lastBorder) {
if ((c != border) && (c != color)) {
gdImageFillToBorder (im, i, y + 1, border, color);
lastBorder = 0;
}
} else if ((c == border) || (c == color)) {
lastBorder = 1;
}
}
}
im->alphaBlendingFlag = restoreAlphaBleding;
}
/*
* set the pixel at (x,y) and its 4-connected neighbors
* with the same pixel value to the new pixel value nc (new color).
* A 4-connected neighbor: pixel above, below, left, or right of a pixel.
* ideas from comp.graphics discussions.
* For tiled fill, the use of a flag buffer is mandatory. As the tile image can
* contain the same color as the color to fill. To do not bloat normal filling
* code I added a 2nd private function.
*/
static int gdImageTileGet (gdImagePtr im, int x, int y)
{
int srcx, srcy;
int tileColor,p;
if (!im->tile) {
return -1;
}
srcx = x % gdImageSX(im->tile);
srcy = y % gdImageSY(im->tile);
p = gdImageGetPixel(im->tile, srcx, srcy);
if (p == im->tile->transparent) {
tileColor = im->transparent;
} else if (im->trueColor) {
if (im->tile->trueColor) {
tileColor = p;
} else {
tileColor = gdTrueColorAlpha( gdImageRed(im->tile,p), gdImageGreen(im->tile,p), gdImageBlue (im->tile,p), gdImageAlpha (im->tile,p));
}
} else {
if (im->tile->trueColor) {
tileColor = gdImageColorResolveAlpha(im, gdTrueColorGetRed (p), gdTrueColorGetGreen (p), gdTrueColorGetBlue (p), gdTrueColorGetAlpha (p));
} else {
tileColor = gdImageColorResolveAlpha(im, gdImageRed (im->tile,p), gdImageGreen (im->tile,p), gdImageBlue (im->tile,p), gdImageAlpha (im->tile,p));
}
}
return tileColor;
}
/* horizontal segment of scan line y */
struct seg {
int y, xl, xr, dy;
};
/* max depth of stack */
#define FILL_MAX ((int)(im->sy*im->sx)/4)
#define FILL_PUSH(Y, XL, XR, DY) \
if (sp<stack+FILL_MAX && Y+(DY)>=0 && Y+(DY)<wy2) \
{sp->y = Y; sp->xl = XL; sp->xr = XR; sp->dy = DY; sp++;}
#define FILL_POP(Y, XL, XR, DY) \
{sp--; Y = sp->y+(DY = sp->dy); XL = sp->xl; XR = sp->xr;}
static void _gdImageFillTiled(gdImagePtr im, int x, int y, int nc);
/*
Function: gdImageFill
*/
BGD_DECLARE(void) gdImageFill(gdImagePtr im, int x, int y, int nc)
{
int l, x1, x2, dy;
int oc; /* old pixel value */
int wx2,wy2;
int alphablending_bak;
/* stack of filled segments */
/* struct seg stack[FILL_MAX],*sp = stack; */
struct seg *stack;
struct seg *sp;
if (!im->trueColor && nc > (im->colorsTotal - 1)) {
return;
}
alphablending_bak = im->alphaBlendingFlag;
im->alphaBlendingFlag = 0;
if (nc==gdTiled) {
_gdImageFillTiled(im,x,y,nc);
im->alphaBlendingFlag = alphablending_bak;
return;
}
wx2=im->sx;
wy2=im->sy;
oc = gdImageGetPixel(im, x, y);
if (oc==nc || x<0 || x>wx2 || y<0 || y>wy2) {
im->alphaBlendingFlag = alphablending_bak;
return;
}
/* Do not use the 4 neighbors implementation with
* small images
*/
if (im->sx < 4) {
int ix = x, iy = y, c;
do {
do {
c = gdImageGetPixel(im, ix, iy);
if (c != oc) {
goto done;
}
gdImageSetPixel(im, ix, iy, nc);
} while(ix++ < (im->sx -1));
ix = x;
} while(iy++ < (im->sy -1));
goto done;
}
if(overflow2(im->sy, im->sx)) {
return;
}
if(overflow2(sizeof(struct seg), ((im->sy * im->sx) / 4))) {
return;
}
stack = (struct seg *)gdMalloc(sizeof(struct seg) * ((int)(im->sy*im->sx)/4));
if (!stack) {
return;
}
sp = stack;
/* required! */
FILL_PUSH(y,x,x,1);
/* seed segment (popped 1st) */
FILL_PUSH(y+1, x, x, -1);
while (sp>stack) {
FILL_POP(y, x1, x2, dy);
for (x=x1; x>=0 && gdImageGetPixel(im,x, y)==oc; x--) {
gdImageSetPixel(im,x, y, nc);
}
if (x>=x1) {
goto skip;
}
l = x+1;
/* leak on left? */
if (l<x1) {
FILL_PUSH(y, l, x1-1, -dy);
}
x = x1+1;
do {
for (; x<=wx2 && gdImageGetPixel(im,x, y)==oc; x++) {
gdImageSetPixel(im, x, y, nc);
}
FILL_PUSH(y, l, x-1, dy);
/* leak on right? */
if (x>x2+1) {
FILL_PUSH(y, x2+1, x-1, -dy);
}
skip:
for (x++; x<=x2 && (gdImageGetPixel(im, x, y)!=oc); x++);
l = x;
} while (x<=x2);
}
gdFree(stack);
done:
im->alphaBlendingFlag = alphablending_bak;
}
static void _gdImageFillTiled(gdImagePtr im, int x, int y, int nc)
{
int l, x1, x2, dy;
int oc; /* old pixel value */
int wx2,wy2;
/* stack of filled segments */
struct seg *stack;
struct seg *sp;
char *pts;
if (!im->tile) {
return;
}
wx2=im->sx;
wy2=im->sy;
if(overflow2(im->sy, im->sx)) {
return;
}
if(overflow2(sizeof(struct seg), ((im->sy * im->sx) / 4))) {
return;
}
pts = (char *) gdCalloc(im->sy * im->sx, sizeof(char));
if (!pts) {
return;
}
stack = (struct seg *)gdMalloc(sizeof(struct seg) * ((int)(im->sy*im->sx)/4));
if (!stack) {
gdFree(pts);
return;
}
sp = stack;
oc = gdImageGetPixel(im, x, y);
/* required! */
FILL_PUSH(y,x,x,1);
/* seed segment (popped 1st) */
FILL_PUSH(y+1, x, x, -1);
while (sp>stack) {
FILL_POP(y, x1, x2, dy);
for (x=x1; x>=0 && (!pts[y + x*wy2] && gdImageGetPixel(im,x,y)==oc); x--) {
nc = gdImageTileGet(im,x,y);
pts[y + x*wy2]=1;
gdImageSetPixel(im,x, y, nc);
}
if (x>=x1) {
goto skip;
}
l = x+1;
/* leak on left? */
if (l<x1) {
FILL_PUSH(y, l, x1-1, -dy);
}
x = x1+1;
do {
for (; x<wx2 && (!pts[y + x*wy2] && gdImageGetPixel(im,x, y)==oc) ; x++) {
if (pts[y + x*wy2]) {
/* we should never be here */
break;
}
nc = gdImageTileGet(im,x,y);
pts[y + x*wy2]=1;
gdImageSetPixel(im, x, y, nc);
}
FILL_PUSH(y, l, x-1, dy);
/* leak on right? */
if (x>x2+1) {
FILL_PUSH(y, x2+1, x-1, -dy);
}
skip:
for (x++; x<=x2 && (pts[y + x*wy2] || gdImageGetPixel(im,x, y)!=oc); x++);
l = x;
} while (x<=x2);
}
gdFree(pts);
gdFree(stack);
}
/**
* Function: gdImageRectangle
*
* Draws a rectangle.
*
* Parameters:
* im - The image.
* x1 - The x-coordinate of one of the corners.
* y1 - The y-coordinate of one of the corners.
* x2 - The x-coordinate of another corner.
* y2 - The y-coordinate of another corner.
* color - The color.
*
* See also:
* - <gdImageFilledRectangle>
*/
BGD_DECLARE(void) gdImageRectangle (gdImagePtr im, int x1, int y1, int x2, int y2, int color)
{
int thick = im->thick;
if (x1 == x2 && y1 == y2 && thick == 1) {
gdImageSetPixel(im, x1, y1, color);
return;
}
if (y2 < y1) {
int t = y1;
y1 = y2;
y2 = t;
}
if (x2 < x1) {
int t = x1;
x1 = x2;
x2 = t;
}
if (thick > 1) {
int cx, cy, x1ul, y1ul, x2lr, y2lr;
int half = thick >> 1;
x1ul = x1 - half;
y1ul = y1 - half;
x2lr = x2 + half;
y2lr = y2 + half;
cy = y1ul + thick;
while (cy-- > y1ul) {
cx = x1ul - 1;
while (cx++ < x2lr) {
gdImageSetPixel(im, cx, cy, color);
}
}
cy = y2lr - thick;
while (cy++ < y2lr) {
cx = x1ul - 1;
while (cx++ < x2lr) {
gdImageSetPixel(im, cx, cy, color);
}
}
cy = y1ul + thick - 1;
while (cy++ < y2lr -thick) {
cx = x1ul - 1;
while (cx++ < x1ul + thick) {
gdImageSetPixel(im, cx, cy, color);
}
}
cy = y1ul + thick - 1;
while (cy++ < y2lr -thick) {
cx = x2lr - thick - 1;
while (cx++ < x2lr) {
gdImageSetPixel(im, cx, cy, color);
}
}
return;
} else {
if (x1 == x2 || y1 == y2) {
gdImageLine(im, x1, y1, x2, y2, color);
} else {
gdImageLine(im, x1, y1, x2, y1, color);
gdImageLine(im, x1, y2, x2, y2, color);
gdImageLine(im, x1, y1 + 1, x1, y2 - 1, color);
gdImageLine(im, x2, y1 + 1, x2, y2 - 1, color);
}
}
}
static void _gdImageFilledHRectangle (gdImagePtr im, int x1, int y1, int x2, int y2,
int color)
{
int x, y;
if (x1 == x2 && y1 == y2) {
gdImageSetPixel(im, x1, y1, color);
return;
}
if (x1 > x2) {
x = x1;
x1 = x2;
x2 = x;
}
if (y1 > y2) {
y = y1;
y1 = y2;
y2 = y;
}
if (x1 < 0) {
x1 = 0;
}
if (x2 >= gdImageSX(im)) {
x2 = gdImageSX(im) - 1;
}
if (y1 < 0) {
y1 = 0;
}
if (y2 >= gdImageSY(im)) {
y2 = gdImageSY(im) - 1;
}
for (x = x1; (x <= x2); x++) {
for (y = y1; (y <= y2); y++) {
gdImageSetPixel (im, x, y, color);
}
}
}
static void _gdImageFilledVRectangle (gdImagePtr im, int x1, int y1, int x2, int y2,
int color)
{
int x, y;
if (x1 == x2 && y1 == y2) {
gdImageSetPixel(im, x1, y1, color);
return;
}
if (x1 > x2) {
x = x1;
x1 = x2;
x2 = x;
}
if (y1 > y2) {
y = y1;
y1 = y2;
y2 = y;
}
if (x1 < 0) {
x1 = 0;
}
if (x2 >= gdImageSX(im)) {
x2 = gdImageSX(im) - 1;
}
if (y1 < 0) {
y1 = 0;
}
if (y2 >= gdImageSY(im)) {
y2 = gdImageSY(im) - 1;
}
for (y = y1; (y <= y2); y++) {
for (x = x1; (x <= x2); x++) {
gdImageSetPixel (im, x, y, color);
}
}
}
/*
Function: gdImageFilledRectangle
*/
BGD_DECLARE(void) gdImageFilledRectangle (gdImagePtr im, int x1, int y1, int x2, int y2,
int color)
{
_gdImageFilledVRectangle(im, x1, y1, x2, y2, color);
}
/**
* Group: Cloning and Copying
*/
/**
* Function: gdImageClone
*
* Clones an image
*
* Creates an exact duplicate of the given image.
*
* Parameters:
* src - The source image.
*
* Returns:
* The cloned image on success, NULL on failure.
*/
BGD_DECLARE(gdImagePtr) gdImageClone (gdImagePtr src) {
gdImagePtr dst;
register int i, x;
if (src->trueColor) {
dst = gdImageCreateTrueColor(src->sx , src->sy);
} else {
dst = gdImageCreate(src->sx , src->sy);
}
if (dst == NULL) {
return NULL;
}
if (src->trueColor == 0) {
dst->colorsTotal = src->colorsTotal;
for (i = 0; i < gdMaxColors; i++) {
dst->red[i] = src->red[i];
dst->green[i] = src->green[i];
dst->blue[i] = src->blue[i];
dst->alpha[i] = src->alpha[i];
dst->open[i] = src->open[i];
}
for (i = 0; i < src->sy; i++) {
for (x = 0; x < src->sx; x++) {
dst->pixels[i][x] = src->pixels[i][x];
}
}
} else {
for (i = 0; i < src->sy; i++) {
for (x = 0; x < src->sx; x++) {
dst->tpixels[i][x] = src->tpixels[i][x];
}
}
}
if (src->styleLength > 0) {
dst->styleLength = src->styleLength;
dst->stylePos = src->stylePos;
for (i = 0; i < src->styleLength; i++) {
dst->style[i] = src->style[i];
}
}
dst->interlace = src->interlace;
dst->alphaBlendingFlag = src->alphaBlendingFlag;
dst->saveAlphaFlag = src->saveAlphaFlag;
dst->AA = src->AA;
dst->AA_color = src->AA_color;
dst->AA_dont_blend = src->AA_dont_blend;
dst->cx1 = src->cx1;
dst->cy1 = src->cy1;
dst->cx2 = src->cx2;
dst->cy2 = src->cy2;
dst->res_x = src->res_x;
dst->res_y = src->res_y;
dst->paletteQuantizationMethod = src->paletteQuantizationMethod;
dst->paletteQuantizationSpeed = src->paletteQuantizationSpeed;
dst->paletteQuantizationMinQuality = src->paletteQuantizationMinQuality;
dst->paletteQuantizationMinQuality = src->paletteQuantizationMinQuality;
dst->interpolation_id = src->interpolation_id;
dst->interpolation = src->interpolation;
if (src->brush) {
dst->brush = gdImageClone(src->brush);
}
if (src->tile) {
dst->tile = gdImageClone(src->tile);
}
if (src->style) {
gdImageSetStyle(dst, src->style, src->styleLength);
}
for (i = 0; i < gdMaxColors; i++) {
dst->brushColorMap[i] = src->brushColorMap[i];
dst->tileColorMap[i] = src->tileColorMap[i];
}
if (src->polyAllocated > 0) {
dst->polyAllocated = src->polyAllocated;
for (i = 0; i < src->polyAllocated; i++) {
dst->polyInts[i] = src->polyInts[i];
}
}
return dst;
}
/**
* Function: gdImageCopy
*
* Copy an area of an image to another image
*
* Parameters:
* dst - The destination image.
* src - The source image.
* dstX - The x-coordinate of the upper left corner to copy to.
* dstY - The y-coordinate of the upper left corner to copy to.
* srcX - The x-coordinate of the upper left corner to copy from.
* srcY - The y-coordinate of the upper left corner to copy from.
* w - The width of the area to copy.
* h - The height of the area to copy.
*
* See also:
* - <gdImageCopyMerge>
* - <gdImageCopyMergeGray>
*/
BGD_DECLARE(void) gdImageCopy (gdImagePtr dst, gdImagePtr src, int dstX, int dstY, int srcX,
int srcY, int w, int h)
{
int c;
int x, y;
int tox, toy;
int i;
int colorMap[gdMaxColors];
if (dst->trueColor) {
/* 2.0: much easier when the destination is truecolor. */
/* 2.0.10: needs a transparent-index check that is still valid if
* * the source is not truecolor. Thanks to Frank Warmerdam.
*/
if (src->trueColor) {
for (y = 0; (y < h); y++) {
for (x = 0; (x < w); x++) {
int c = gdImageGetTrueColorPixel (src, srcX + x, srcY + y);
if (c != src->transparent) {
gdImageSetPixel (dst, dstX + x, dstY + y, c);
}
}
}
} else {
/* source is palette based */
for (y = 0; (y < h); y++) {
for (x = 0; (x < w); x++) {
int c = gdImageGetPixel (src, srcX + x, srcY + y);
if (c != src->transparent) {
gdImageSetPixel(dst, dstX + x, dstY + y, gdTrueColorAlpha(src->red[c], src->green[c], src->blue[c], src->alpha[c]));
}
}
}
}
return;
}
for (i = 0; (i < gdMaxColors); i++) {
colorMap[i] = (-1);
}
toy = dstY;
for (y = srcY; (y < (srcY + h)); y++) {
tox = dstX;
for (x = srcX; (x < (srcX + w)); x++) {
int nc;
int mapTo;
c = gdImageGetPixel (src, x, y);
/* Added 7/24/95: support transparent copies */
if (gdImageGetTransparent (src) == c) {
tox++;
continue;
}
/* Have we established a mapping for this color? */
if (src->trueColor) {
/* 2.05: remap to the palette available in the
destination image. This is slow and
works badly, but it beats crashing! Thanks
to Padhrig McCarthy. */
mapTo = gdImageColorResolveAlpha (dst,
gdTrueColorGetRed (c),
gdTrueColorGetGreen (c),
gdTrueColorGetBlue (c),
gdTrueColorGetAlpha (c));
} else if (colorMap[c] == (-1)) {
/* If it's the same image, mapping is trivial */
if (dst == src) {
nc = c;
} else {
/* Get best match possible. This
function never returns error. */
nc = gdImageColorResolveAlpha (dst,
src->red[c], src->green[c],
src->blue[c], src->alpha[c]);
}
colorMap[c] = nc;
mapTo = colorMap[c];
} else {
mapTo = colorMap[c];
}
gdImageSetPixel (dst, tox, toy, mapTo);
tox++;
}
toy++;
}
}
/**
* Function: gdImageCopyMerge
*
* Copy an area of an image to another image ignoring alpha
*
* The source area will be copied to the destination are by merging the pixels.
*
* Note:
* This function is a substitute for real alpha channel operations,
* so it doesn't pay attention to the alpha channel.
*
* Parameters:
* dst - The destination image.
* src - The source image.
* dstX - The x-coordinate of the upper left corner to copy to.
* dstY - The y-coordinate of the upper left corner to copy to.
* srcX - The x-coordinate of the upper left corner to copy from.
* srcY - The y-coordinate of the upper left corner to copy from.
* w - The width of the area to copy.
* h - The height of the area to copy.
* pct - The percentage in range 0..100.
*
* See also:
* - <gdImageCopy>
* - <gdImageCopyMergeGray>
*/
BGD_DECLARE(void) gdImageCopyMerge (gdImagePtr dst, gdImagePtr src, int dstX, int dstY,
int srcX, int srcY, int w, int h, int pct)
{
int c, dc;
int x, y;
int tox, toy;
int ncR, ncG, ncB;
toy = dstY;
for (y = srcY; (y < (srcY + h)); y++) {
tox = dstX;
for (x = srcX; (x < (srcX + w)); x++) {
int nc;
c = gdImageGetPixel (src, x, y);
/* Added 7/24/95: support transparent copies */
if (gdImageGetTransparent (src) == c) {
tox++;
continue;
}
/* If it's the same image, mapping is trivial */
if (dst == src) {
nc = c;
} else {
dc = gdImageGetPixel (dst, tox, toy);
ncR = gdImageRed (src, c) * (pct / 100.0)
+ gdImageRed (dst, dc) * ((100 - pct) / 100.0);
ncG = gdImageGreen (src, c) * (pct / 100.0)
+ gdImageGreen (dst, dc) * ((100 - pct) / 100.0);
ncB = gdImageBlue (src, c) * (pct / 100.0)
+ gdImageBlue (dst, dc) * ((100 - pct) / 100.0);
/* Find a reasonable color */
nc = gdImageColorResolve (dst, ncR, ncG, ncB);
}
gdImageSetPixel (dst, tox, toy, nc);
tox++;
}
toy++;
}
}
/**
* Function: gdImageCopyMergeGray
*
* Copy an area of an image to another image ignoring alpha
*
* The source area will be copied to the grayscaled destination area by merging
* the pixels.
*
* Note:
* This function is a substitute for real alpha channel operations,
* so it doesn't pay attention to the alpha channel.
*
* Parameters:
* dst - The destination image.
* src - The source image.
* dstX - The x-coordinate of the upper left corner to copy to.
* dstY - The y-coordinate of the upper left corner to copy to.
* srcX - The x-coordinate of the upper left corner to copy from.
* srcY - The y-coordinate of the upper left corner to copy from.
* w - The width of the area to copy.
* h - The height of the area to copy.
* pct - The percentage of the source color intensity in range 0..100.
*
* See also:
* - <gdImageCopy>
* - <gdImageCopyMerge>
*/
BGD_DECLARE(void) gdImageCopyMergeGray (gdImagePtr dst, gdImagePtr src, int dstX, int dstY,
int srcX, int srcY, int w, int h, int pct)
{
int c, dc;
int x, y;
int tox, toy;
int ncR, ncG, ncB;
float g;
toy = dstY;
for (y = srcY; (y < (srcY + h)); y++) {
tox = dstX;
for (x = srcX; (x < (srcX + w)); x++) {
int nc;
c = gdImageGetPixel (src, x, y);
/* Added 7/24/95: support transparent copies */
if (gdImageGetTransparent (src) == c) {
tox++;
continue;
}
/*
* If it's the same image, mapping is NOT trivial since we
* merge with greyscale target, but if pct is 100, the grey
* value is not used, so it becomes trivial. pjw 2.0.12.
*/
if (dst == src && pct == 100) {
nc = c;
} else {
dc = gdImageGetPixel (dst, tox, toy);
g = 0.29900 * gdImageRed(dst, dc)
+ 0.58700 * gdImageGreen(dst, dc) + 0.11400 * gdImageBlue(dst, dc);
ncR = gdImageRed (src, c) * (pct / 100.0)
+ g * ((100 - pct) / 100.0);
ncG = gdImageGreen (src, c) * (pct / 100.0)
+ g * ((100 - pct) / 100.0);
ncB = gdImageBlue (src, c) * (pct / 100.0)
+ g * ((100 - pct) / 100.0);
/* First look for an exact match */
nc = gdImageColorExact (dst, ncR, ncG, ncB);
if (nc == (-1)) {
/* No, so try to allocate it */
nc = gdImageColorAllocate (dst, ncR, ncG, ncB);
/* If we're out of colors, go for the
closest color */
if (nc == (-1)) {
nc = gdImageColorClosest (dst, ncR, ncG, ncB);
}
}
}
gdImageSetPixel (dst, tox, toy, nc);
tox++;
}
toy++;
}
}
/**
* Function: gdImageCopyResized
*
* Copy a resized area from an image to another image
*
* If the source and destination area differ in size, the area will be resized
* using nearest-neighbor interpolation.
*
* Parameters:
* dst - The destination image.
* src - The source image.
* dstX - The x-coordinate of the upper left corner to copy to.
* dstY - The y-coordinate of the upper left corner to copy to.
* srcX - The x-coordinate of the upper left corner to copy from.
* srcY - The y-coordinate of the upper left corner to copy from.
* dstW - The width of the area to copy to.
* dstH - The height of the area to copy to.
* srcW - The width of the area to copy from.
* srcH - The height of the area to copy from.
*
* See also:
* - <gdImageCopyResampled>
* - <gdImageScale>
*/
BGD_DECLARE(void) gdImageCopyResized (gdImagePtr dst, gdImagePtr src, int dstX, int dstY,
int srcX, int srcY, int dstW, int dstH, int srcW,
int srcH)
{
int c;
int x, y;
int tox, toy;
int ydest;
int i;
int colorMap[gdMaxColors];
/* Stretch vectors */
int *stx;
int *sty;
/* We only need to use floating point to determine the correct
stretch vector for one line's worth. */
if (overflow2(sizeof (int), srcW)) {
return;
}
if (overflow2(sizeof (int), srcH)) {
return;
}
stx = (int *) gdMalloc (sizeof (int) * srcW);
if (!stx) {
return;
}
sty = (int *) gdMalloc (sizeof (int) * srcH);
if (!sty) {
gdFree(stx);
return;
}
/* Fixed by Mao Morimoto 2.0.16 */
for (i = 0; (i < srcW); i++) {
stx[i] = dstW * (i + 1) / srcW - dstW * i / srcW;
}
for (i = 0; (i < srcH); i++) {
sty[i] = dstH * (i + 1) / srcH - dstH * i / srcH;
}
for (i = 0; (i < gdMaxColors); i++) {
colorMap[i] = (-1);
}
toy = dstY;
for (y = srcY; (y < (srcY + srcH)); y++) {
for (ydest = 0; (ydest < sty[y - srcY]); ydest++) {
tox = dstX;
for (x = srcX; (x < (srcX + srcW)); x++) {
int nc = 0;
int mapTo;
if (!stx[x - srcX]) {
continue;
}
if (dst->trueColor) {
/* 2.0.9: Thorben Kundinger: Maybe the source image is not
a truecolor image */
if (!src->trueColor) {
int tmp = gdImageGetPixel (src, x, y);
mapTo = gdImageGetTrueColorPixel (src, x, y);
if (gdImageGetTransparent (src) == tmp) {
/* 2.0.21, TK: not tox++ */
tox += stx[x - srcX];
continue;
}
} else {
/* TK: old code follows */
mapTo = gdImageGetTrueColorPixel (src, x, y);
/* Added 7/24/95: support transparent copies */
if (gdImageGetTransparent (src) == mapTo) {
/* 2.0.21, TK: not tox++ */
tox += stx[x - srcX];
continue;
}
}
} else {
c = gdImageGetPixel (src, x, y);
/* Added 7/24/95: support transparent copies */
if (gdImageGetTransparent (src) == c) {
tox += stx[x - srcX];
continue;
}
if (src->trueColor) {
/* Remap to the palette available in the
destination image. This is slow and
works badly. */
mapTo = gdImageColorResolveAlpha (dst,
gdTrueColorGetRed (c),
gdTrueColorGetGreen
(c),
gdTrueColorGetBlue
(c),
gdTrueColorGetAlpha
(c));
} else {
/* Have we established a mapping for this color? */
if (colorMap[c] == (-1)) {
/* If it's the same image, mapping is trivial */
if (dst == src) {
nc = c;
} else {
/* Find or create the best match */
/* 2.0.5: can't use gdTrueColorGetRed, etc with palette */
nc = gdImageColorResolveAlpha (dst,
gdImageRed (src,
c),
gdImageGreen
(src, c),
gdImageBlue (src,
c),
gdImageAlpha
(src, c));
}
colorMap[c] = nc;
}
mapTo = colorMap[c];
}
}
for (i = 0; (i < stx[x - srcX]); i++) {
gdImageSetPixel (dst, tox, toy, mapTo);
tox++;
}
}
toy++;
}
}
gdFree (stx);
gdFree (sty);
}
/**
* Function: gdImageCopyRotated
*
* Copy a rotated area from an image to another image
*
* The area is counter-clockwise rotated using nearest-neighbor interpolation.
*
* Parameters:
* dst - The destination image.
* src - The source image.
* dstX - The x-coordinate of the center of the area to copy to.
* dstY - The y-coordinate of the center of the area to copy to.
* srcX - The x-coordinate of the upper left corner to copy from.
* srcY - The y-coordinate of the upper left corner to copy from.
* srcW - The width of the area to copy from.
* srcH - The height of the area to copy from.
* angle - The angle in degrees.
*
* See also:
* - <gdImageRotateInterpolated>
*/
BGD_DECLARE(void) gdImageCopyRotated (gdImagePtr dst,
gdImagePtr src,
double dstX, double dstY,
int srcX, int srcY,
int srcWidth, int srcHeight, int angle)
{
double dx, dy;
double radius = sqrt (srcWidth * srcWidth + srcHeight * srcHeight);
double aCos = cos (angle * .0174532925);
double aSin = sin (angle * .0174532925);
double scX = srcX + ((double) srcWidth) / 2;
double scY = srcY + ((double) srcHeight) / 2;
int cmap[gdMaxColors];
int i;
/*
2.0.34: transparency preservation. The transparentness of
the transparent color is more important than its hue.
*/
if (src->transparent != -1) {
if (dst->transparent == -1) {
dst->transparent = src->transparent;
}
}
for (i = 0; (i < gdMaxColors); i++) {
cmap[i] = (-1);
}
for (dy = dstY - radius; (dy <= dstY + radius); dy++) {
for (dx = dstX - radius; (dx <= dstX + radius); dx++) {
double sxd = (dx - dstX) * aCos - (dy - dstY) * aSin;
double syd = (dy - dstY) * aCos + (dx - dstX) * aSin;
int sx = sxd + scX;
int sy = syd + scY;
if ((sx >= srcX) && (sx < srcX + srcWidth) &&
(sy >= srcY) && (sy < srcY + srcHeight)) {
int c = gdImageGetPixel (src, sx, sy);
/* 2.0.34: transparency wins */
if (c == src->transparent) {
gdImageSetPixel (dst, dx, dy, dst->transparent);
} else if (!src->trueColor) {
/* Use a table to avoid an expensive
lookup on every single pixel */
if (cmap[c] == -1) {
cmap[c] = gdImageColorResolveAlpha (dst,
gdImageRed (src, c),
gdImageGreen (src,
c),
gdImageBlue (src,
c),
gdImageAlpha (src,
c));
}
gdImageSetPixel (dst, dx, dy, cmap[c]);
} else {
gdImageSetPixel (dst,
dx, dy,
gdImageColorResolveAlpha (dst,
gdImageRed (src,
c),
gdImageGreen
(src, c),
gdImageBlue (src,
c),
gdImageAlpha
(src, c)));
}
}
}
}
}
/* When gd 1.x was first created, floating point was to be avoided.
These days it is often faster than table lookups or integer
arithmetic. The routine below is shamelessly, gloriously
floating point. TBB */
/* 2.0.10: cast instead of floor() yields 35% performance improvement.
Thanks to John Buckman. */
#define floor2(exp) ((long) exp)
/*#define floor2(exp) floor(exp)*/
/**
* Function: gdImageCopyResampled
*
* Copy a resampled area from an image to another image
*
* If the source and destination area differ in size, the area will be resized
* using bilinear interpolation for truecolor images, and nearest-neighbor
* interpolation for palette images.
*
* Parameters:
* dst - The destination image.
* src - The source image.
* dstX - The x-coordinate of the upper left corner to copy to.
* dstY - The y-coordinate of the upper left corner to copy to.
* srcX - The x-coordinate of the upper left corner to copy from.
* srcY - The y-coordinate of the upper left corner to copy from.
* dstW - The width of the area to copy to.
* dstH - The height of the area to copy to.
* srcW - The width of the area to copy from.
* srcH - The height of the area to copy from.
*
* See also:
* - <gdImageCopyResized>
* - <gdImageScale>
*/
BGD_DECLARE(void) gdImageCopyResampled (gdImagePtr dst,
gdImagePtr src,
int dstX, int dstY,
int srcX, int srcY,
int dstW, int dstH, int srcW, int srcH)
{
int x, y;
if (!dst->trueColor) {
gdImageCopyResized (dst, src, dstX, dstY, srcX, srcY, dstW, dstH, srcW, srcH);
return;
}
for (y = dstY; (y < dstY + dstH); y++) {
for (x = dstX; (x < dstX + dstW); x++) {
float sy1, sy2, sx1, sx2;
float sx, sy;
float spixels = 0.0;
float red = 0.0, green = 0.0, blue = 0.0, alpha = 0.0;
float alpha_factor, alpha_sum = 0.0, contrib_sum = 0.0;
sy1 = ((float)(y - dstY)) * (float)srcH / (float)dstH;
sy2 = ((float)(y + 1 - dstY)) * (float) srcH / (float) dstH;
sy = sy1;
do {
float yportion;
if (floorf(sy) == floorf(sy1)) {
yportion = 1.0 - (sy - floorf(sy));
if (yportion > sy2 - sy1) {
yportion = sy2 - sy1;
}
sy = floorf(sy);
} else if (sy == floorf(sy2)) {
yportion = sy2 - floorf(sy2);
} else {
yportion = 1.0;
}
sx1 = ((float)(x - dstX)) * (float) srcW / dstW;
sx2 = ((float)(x + 1 - dstX)) * (float) srcW / dstW;
sx = sx1;
do {
float xportion;
float pcontribution;
int p;
if (floorf(sx) == floorf(sx1)) {
xportion = 1.0 - (sx - floorf(sx));
if (xportion > sx2 - sx1) {
xportion = sx2 - sx1;
}
sx = floorf(sx);
} else if (sx == floorf(sx2)) {
xportion = sx2 - floorf(sx2);
} else {
xportion = 1.0;
}
pcontribution = xportion * yportion;
p = gdImageGetTrueColorPixel(src, (int) sx + srcX, (int) sy + srcY);
alpha_factor = ((gdAlphaMax - gdTrueColorGetAlpha(p))) * pcontribution;
red += gdTrueColorGetRed (p) * alpha_factor;
green += gdTrueColorGetGreen (p) * alpha_factor;
blue += gdTrueColorGetBlue (p) * alpha_factor;
alpha += gdTrueColorGetAlpha (p) * pcontribution;
alpha_sum += alpha_factor;
contrib_sum += pcontribution;
spixels += xportion * yportion;
sx += 1.0;
}
while (sx < sx2);
sy += 1.0f;
}
while (sy < sy2);
if (spixels != 0.0) {
red /= spixels;
green /= spixels;
blue /= spixels;
alpha /= spixels;
}
if ( alpha_sum != 0.0) {
if( contrib_sum != 0.0) {
alpha_sum /= contrib_sum;
}
red /= alpha_sum;
green /= alpha_sum;
blue /= alpha_sum;
}
/* Clamping to allow for rounding errors above */
if (red > 255.0) {
red = 255.0;
}
if (green > 255.0) {
green = 255.0;
}
if (blue > 255.0f) {
blue = 255.0;
}
if (alpha > gdAlphaMax) {
alpha = gdAlphaMax;
}
gdImageSetPixel(dst, x, y, gdTrueColorAlpha ((int) red, (int) green, (int) blue, (int) alpha));
}
}
}
/**
* Group: Polygons
*/
/**
* Function: gdImagePolygon
*
* Draws a closed polygon
*
* Parameters:
* im - The image.
* p - The vertices as array of <gdPoint>s.
* n - The number of vertices.
* c - The color.
*
* See also:
* - <gdImageOpenPolygon>
* - <gdImageFilledPolygon>
*/
BGD_DECLARE(void) gdImagePolygon (gdImagePtr im, gdPointPtr p, int n, int c)
{
if (n <= 0) {
return;
}
gdImageLine (im, p->x, p->y, p[n - 1].x, p[n - 1].y, c);
gdImageOpenPolygon (im, p, n, c);
}
/**
* Function: gdImageOpenPolygon
*
* Draws an open polygon
*
* Parameters:
* im - The image.
* p - The vertices as array of <gdPoint>s.
* n - The number of vertices.
* c - The color
*
* See also:
* - <gdImagePolygon>
*/
BGD_DECLARE(void) gdImageOpenPolygon (gdImagePtr im, gdPointPtr p, int n, int c)
{
int i;
int lx, ly;
if (n <= 0) {
return;
}
lx = p->x;
ly = p->y;
for (i = 1; (i < n); i++) {
p++;
gdImageLine (im, lx, ly, p->x, p->y, c);
lx = p->x;
ly = p->y;
}
}
/* THANKS to Kirsten Schulz for the polygon fixes! */
/* The intersection finding technique of this code could be improved */
/* by remembering the previous intertersection, and by using the slope. */
/* That could help to adjust intersections to produce a nice */
/* interior_extrema. */
/**
* Function: gdImageFilledPolygon
*
* Draws a filled polygon
*
* The polygon is filled using the even-odd fillrule what can leave unfilled
* regions inside of self-intersecting polygons. This behavior might change in
* a future version.
*
* Parameters:
* im - The image.
* p - The vertices as array of <gdPoint>s.
* n - The number of vertices.
* c - The color
*
* See also:
* - <gdImagePolygon>
*/
BGD_DECLARE(void) gdImageFilledPolygon (gdImagePtr im, gdPointPtr p, int n, int c)
{
int i;
int j;
int index;
int y;
int miny, maxy, pmaxy;
int x1, y1;
int x2, y2;
int ind1, ind2;
int ints;
int fill_color;
if (n <= 0) {
return;
}
if (c == gdAntiAliased) {
fill_color = im->AA_color;
} else {
fill_color = c;
}
if (!im->polyAllocated) {
if (overflow2(sizeof (int), n)) {
return;
}
im->polyInts = (int *) gdMalloc (sizeof (int) * n);
if (!im->polyInts) {
return;
}
im->polyAllocated = n;
}
if (im->polyAllocated < n) {
while (im->polyAllocated < n) {
im->polyAllocated *= 2;
}
if (overflow2(sizeof (int), im->polyAllocated)) {
return;
}
im->polyInts = (int *) gdReallocEx (im->polyInts,
sizeof (int) * im->polyAllocated);
if (!im->polyInts) {
return;
}
}
miny = p[0].y;
maxy = p[0].y;
for (i = 1; (i < n); i++) {
if (p[i].y < miny) {
miny = p[i].y;
}
if (p[i].y > maxy) {
maxy = p[i].y;
}
}
/* necessary special case: horizontal line */
if (n > 1 && miny == maxy) {
x1 = x2 = p[0].x;
for (i = 1; (i < n); i++) {
if (p[i].x < x1) {
x1 = p[i].x;
} else if (p[i].x > x2) {
x2 = p[i].x;
}
}
gdImageLine(im, x1, miny, x2, miny, c);
return;
}
pmaxy = maxy;
/* 2.0.16: Optimization by Ilia Chipitsine -- don't waste time offscreen */
/* 2.0.26: clipping rectangle is even better */
if (miny < im->cy1) {
miny = im->cy1;
}
if (maxy > im->cy2) {
maxy = im->cy2;
}
/* Fix in 1.3: count a vertex only once */
for (y = miny; (y <= maxy); y++) {
ints = 0;
for (i = 0; (i < n); i++) {
if (!i) {
ind1 = n - 1;
ind2 = 0;
} else {
ind1 = i - 1;
ind2 = i;
}
y1 = p[ind1].y;
y2 = p[ind2].y;
if (y1 < y2) {
x1 = p[ind1].x;
x2 = p[ind2].x;
} else if (y1 > y2) {
y2 = p[ind1].y;
y1 = p[ind2].y;
x2 = p[ind1].x;
x1 = p[ind2].x;
} else {
continue;
}
/* Do the following math as float intermediately, and round to ensure
* that Polygon and FilledPolygon for the same set of points have the
* same footprint. */
if ((y >= y1) && (y < y2)) {
im->polyInts[ints++] = (int) ((float) ((y - y1) * (x2 - x1)) /
(float) (y2 - y1) + 0.5 + x1);
} else if ((y == pmaxy) && (y == y2)) {
im->polyInts[ints++] = x2;
}
}
/*
2.0.26: polygons pretty much always have less than 100 points,
and most of the time they have considerably less. For such trivial
cases, insertion sort is a good choice. Also a good choice for
future implementations that may wish to indirect through a table.
*/
for (i = 1; (i < ints); i++) {
index = im->polyInts[i];
j = i;
while ((j > 0) && (im->polyInts[j - 1] > index)) {
im->polyInts[j] = im->polyInts[j - 1];
j--;
}
im->polyInts[j] = index;
}
for (i = 0; (i < (ints-1)); i += 2) {
/* 2.0.29: back to gdImageLine to prevent segfaults when
performing a pattern fill */
gdImageLine (im, im->polyInts[i], y, im->polyInts[i + 1], y,
fill_color);
}
}
/* If we are drawing this AA, then redraw the border with AA lines. */
/* This doesn't work as well as I'd like, but it doesn't clash either. */
if (c == gdAntiAliased) {
gdImagePolygon (im, p, n, c);
}
}
/**
* Group: other
*/
static void gdImageSetAAPixelColor(gdImagePtr im, int x, int y, int color, int t);
/**
* Function: gdImageSetStyle
*
* Sets the style for following drawing operations
*
* Parameters:
* im - The image.
* style - An array of color values.
* noOfPixel - The number of color values.
*/
BGD_DECLARE(void) gdImageSetStyle (gdImagePtr im, int *style, int noOfPixels)
{
if (im->style) {
gdFree (im->style);
}
if (overflow2(sizeof (int), noOfPixels)) {
return;
}
im->style = (int *) gdMalloc (sizeof (int) * noOfPixels);
if (!im->style) {
return;
}
memcpy (im->style, style, sizeof (int) * noOfPixels);
im->styleLength = noOfPixels;
im->stylePos = 0;
}
/**
* Function: gdImageSetThickness
*
* Sets the thickness for following drawing operations
*
* Parameters:
* im - The image.
* thickness - The thickness in pixels.
*/
BGD_DECLARE(void) gdImageSetThickness (gdImagePtr im, int thickness)
{
im->thick = thickness;
}
/**
* Function: gdImageSetBrush
*
* Sets the brush for following drawing operations
*
* Parameters:
* im - The image.
* brush - The brush image.
*/
BGD_DECLARE(void) gdImageSetBrush (gdImagePtr im, gdImagePtr brush)
{
int i;
im->brush = brush;
if ((!im->trueColor) && (!im->brush->trueColor)) {
for (i = 0; (i < gdImageColorsTotal (brush)); i++) {
int index;
index = gdImageColorResolveAlpha (im,
gdImageRed (brush, i),
gdImageGreen (brush, i),
gdImageBlue (brush, i),
gdImageAlpha (brush, i));
im->brushColorMap[i] = index;
}
}
}
/*
Function: gdImageSetTile
*/
BGD_DECLARE(void) gdImageSetTile (gdImagePtr im, gdImagePtr tile)
{
int i;
im->tile = tile;
if ((!im->trueColor) && (!im->tile->trueColor)) {
for (i = 0; (i < gdImageColorsTotal (tile)); i++) {
int index;
index = gdImageColorResolveAlpha (im,
gdImageRed (tile, i),
gdImageGreen (tile, i),
gdImageBlue (tile, i),
gdImageAlpha (tile, i));
im->tileColorMap[i] = index;
}
}
}
/**
* Function: gdImageSetAntiAliased
*
* Set the color for subsequent anti-aliased drawing
*
* If <gdAntiAliased> is passed as color to drawing operations that support
* anti-aliased drawing (such as <gdImageLine> and <gdImagePolygon>), the actual
* color to be used can be set with this function.
*
* Example: draw an anti-aliased blue line:
* | gdImageSetAntiAliased(im, gdTrueColorAlpha(0, 0, gdBlueMax, gdAlphaOpaque));
* | gdImageLine(im, 10,10, 20,20, gdAntiAliased);
*
* Parameters:
* im - The image.
* c - The color.
*
* See also:
* - <gdImageSetAntiAliasedDontBlend>
*/
BGD_DECLARE(void) gdImageSetAntiAliased (gdImagePtr im, int c)
{
im->AA = 1;
im->AA_color = c;
im->AA_dont_blend = -1;
}
/**
* Function: gdImageSetAntiAliasedDontBlend
*
* Set the color and "dont_blend" color for subsequent anti-aliased drawing
*
* This extended variant of <gdImageSetAntiAliased> allows to also specify a
* (background) color that will not be blended in anti-aliased drawing
* operations.
*
* Parameters:
* im - The image.
* c - The color.
* dont_blend - Whether to blend.
*/
BGD_DECLARE(void) gdImageSetAntiAliasedDontBlend (gdImagePtr im, int c, int dont_blend)
{
im->AA = 1;
im->AA_color = c;
im->AA_dont_blend = dont_blend;
}
/**
* Function: gdImageInterlace
*
* Sets whether an image is interlaced
*
* This is relevant only when saving the image in a format that supports
* interlacing.
*
* Parameters:
* im - The image.
* interlaceArg - Whether the image is interlaced.
*
* See also:
* - <gdImageGetInterlaced>
*/
BGD_DECLARE(void) gdImageInterlace (gdImagePtr im, int interlaceArg)
{
im->interlace = interlaceArg;
}
/**
* Function: gdImageCompare
*
* Compare two images
*
* Parameters:
* im1 - An image.
* im2 - Another image.
*
* Returns:
* A bitmask of <Image Comparison> flags where each set flag signals
* which attributes of the images are different.
*/
BGD_DECLARE(int) gdImageCompare (gdImagePtr im1, gdImagePtr im2)
{
int x, y;
int p1, p2;
int cmpStatus = 0;
int sx, sy;
if (im1->interlace != im2->interlace) {
cmpStatus |= GD_CMP_INTERLACE;
}
if (im1->transparent != im2->transparent) {
cmpStatus |= GD_CMP_TRANSPARENT;
}
if (im1->trueColor != im2->trueColor) {
cmpStatus |= GD_CMP_TRUECOLOR;
}
sx = im1->sx;
if (im1->sx != im2->sx) {
cmpStatus |= GD_CMP_SIZE_X + GD_CMP_IMAGE;
if (im2->sx < im1->sx) {
sx = im2->sx;
}
}
sy = im1->sy;
if (im1->sy != im2->sy) {
cmpStatus |= GD_CMP_SIZE_Y + GD_CMP_IMAGE;
if (im2->sy < im1->sy) {
sy = im2->sy;
}
}
if (im1->colorsTotal != im2->colorsTotal) {
cmpStatus |= GD_CMP_NUM_COLORS;
}
for (y = 0; (y < sy); y++) {
for (x = 0; (x < sx); x++) {
p1 =
im1->trueColor ? gdImageTrueColorPixel (im1, x,
y) :
gdImagePalettePixel (im1, x, y);
p2 =
im2->trueColor ? gdImageTrueColorPixel (im2, x,
y) :
gdImagePalettePixel (im2, x, y);
if (gdImageRed (im1, p1) != gdImageRed (im2, p2)) {
cmpStatus |= GD_CMP_COLOR + GD_CMP_IMAGE;
break;
}
if (gdImageGreen (im1, p1) != gdImageGreen (im2, p2)) {
cmpStatus |= GD_CMP_COLOR + GD_CMP_IMAGE;
break;
}
if (gdImageBlue (im1, p1) != gdImageBlue (im2, p2)) {
cmpStatus |= GD_CMP_COLOR + GD_CMP_IMAGE;
break;
}
#if 0
/* Soon we'll add alpha channel to palettes */
if (gdImageAlpha (im1, p1) != gdImageAlpha (im2, p2)) {
cmpStatus |= GD_CMP_COLOR + GD_CMP_IMAGE;
break;
}
#endif
}
if (cmpStatus & GD_CMP_COLOR) {
break;
};
}
return cmpStatus;
}
/* Thanks to Frank Warmerdam for this superior implementation
of gdAlphaBlend(), which merges alpha in the
destination color much better. */
/**
* Function: gdAlphaBlend
*
* Blend two colors
*
* Parameters:
* dst - The color to blend onto.
* src - The color to blend.
*
* See also:
* - <gdImageAlphaBlending>
* - <gdLayerOverlay>
* - <gdLayerMultiply>
*/
BGD_DECLARE(int) gdAlphaBlend (int dst, int src)
{
int src_alpha = gdTrueColorGetAlpha(src);
int dst_alpha, alpha, red, green, blue;
int src_weight, dst_weight, tot_weight;
/* -------------------------------------------------------------------- */
/* Simple cases we want to handle fast. */
/* -------------------------------------------------------------------- */
if( src_alpha == gdAlphaOpaque )
return src;
dst_alpha = gdTrueColorGetAlpha(dst);
if( src_alpha == gdAlphaTransparent )
return dst;
if( dst_alpha == gdAlphaTransparent )
return src;
/* -------------------------------------------------------------------- */
/* What will the source and destination alphas be? Note that */
/* the destination weighting is substantially reduced as the */
/* overlay becomes quite opaque. */
/* -------------------------------------------------------------------- */
src_weight = gdAlphaTransparent - src_alpha;
dst_weight = (gdAlphaTransparent - dst_alpha) * src_alpha / gdAlphaMax;
tot_weight = src_weight + dst_weight;
/* -------------------------------------------------------------------- */
/* What red, green and blue result values will we use? */
/* -------------------------------------------------------------------- */
alpha = src_alpha * dst_alpha / gdAlphaMax;
red = (gdTrueColorGetRed(src) * src_weight
+ gdTrueColorGetRed(dst) * dst_weight) / tot_weight;
green = (gdTrueColorGetGreen(src) * src_weight
+ gdTrueColorGetGreen(dst) * dst_weight) / tot_weight;
blue = (gdTrueColorGetBlue(src) * src_weight
+ gdTrueColorGetBlue(dst) * dst_weight) / tot_weight;
/* -------------------------------------------------------------------- */
/* Return merged result. */
/* -------------------------------------------------------------------- */
return ((alpha << 24) + (red << 16) + (green << 8) + blue);
}
static int gdAlphaOverlayColor (int src, int dst, int max );
/**
* Function: gdLayerOverlay
*
* Overlay two colors
*
* Parameters:
* dst - The color to overlay onto.
* src - The color to overlay.
*
* See also:
* - <gdImageAlphaBlending>
* - <gdAlphaBlend>
* - <gdLayerMultiply>
*/
BGD_DECLARE(int) gdLayerOverlay (int dst, int src)
{
int a1, a2;
a1 = gdAlphaMax - gdTrueColorGetAlpha(dst);
a2 = gdAlphaMax - gdTrueColorGetAlpha(src);
return ( ((gdAlphaMax - a1*a2/gdAlphaMax) << 24) +
(gdAlphaOverlayColor( gdTrueColorGetRed(src), gdTrueColorGetRed(dst), gdRedMax ) << 16) +
(gdAlphaOverlayColor( gdTrueColorGetGreen(src), gdTrueColorGetGreen(dst), gdGreenMax ) << 8) +
(gdAlphaOverlayColor( gdTrueColorGetBlue(src), gdTrueColorGetBlue(dst), gdBlueMax ))
);
}
/* Apply 'overlay' effect - background pixels are colourised by the foreground colour */
static int gdAlphaOverlayColor (int src, int dst, int max )
{
dst = dst << 1;
if( dst > max ) {
/* in the "light" zone */
return dst + (src << 1) - (dst * src / max) - max;
} else {
/* in the "dark" zone */
return dst * src / max;
}
}
/**
* Function: gdLayerMultiply
*
* Overlay two colors with multiply effect
*
* Parameters:
* dst - The color to overlay onto.
* src - The color to overlay.
*
* See also:
* - <gdImageAlphaBlending>
* - <gdAlphaBlend>
* - <gdLayerOverlay>
*/
BGD_DECLARE(int) gdLayerMultiply (int dst, int src)
{
int a1, a2, r1, r2, g1, g2, b1, b2;
a1 = gdAlphaMax - gdTrueColorGetAlpha(src);
a2 = gdAlphaMax - gdTrueColorGetAlpha(dst);
r1 = gdRedMax - (a1 * (gdRedMax - gdTrueColorGetRed(src))) / gdAlphaMax;
r2 = gdRedMax - (a2 * (gdRedMax - gdTrueColorGetRed(dst))) / gdAlphaMax;
g1 = gdGreenMax - (a1 * (gdGreenMax - gdTrueColorGetGreen(src))) / gdAlphaMax;
g2 = gdGreenMax - (a2 * (gdGreenMax - gdTrueColorGetGreen(dst))) / gdAlphaMax;
b1 = gdBlueMax - (a1 * (gdBlueMax - gdTrueColorGetBlue(src))) / gdAlphaMax;
b2 = gdBlueMax - (a2 * (gdBlueMax - gdTrueColorGetBlue(dst))) / gdAlphaMax ;
a1 = gdAlphaMax - a1;
a2 = gdAlphaMax - a2;
return ( ((a1*a2/gdAlphaMax) << 24) +
((r1*r2/gdRedMax) << 16) +
((g1*g2/gdGreenMax) << 8) +
((b1*b2/gdBlueMax))
);
}
/**
* Function: gdImageAlphaBlending
*
* Set the effect for subsequent drawing operations
*
* Note that the effect is used for truecolor images only.
*
* Parameters:
* im - The image.
* alphaBlendingArg - The effect.
*
* See also:
* - <Effects>
*/
BGD_DECLARE(void) gdImageAlphaBlending (gdImagePtr im, int alphaBlendingArg)
{
im->alphaBlendingFlag = alphaBlendingArg;
}
/**
* Function: gdImageSaveAlpha
*
* Sets the save alpha flag
*
* The save alpha flag specifies whether the alpha channel of the pixels should
* be saved. This is supported only for image formats that support full alpha
* transparency, e.g. PNG.
*/
BGD_DECLARE(void) gdImageSaveAlpha (gdImagePtr im, int saveAlphaArg)
{
im->saveAlphaFlag = saveAlphaArg;
}
/**
* Function: gdImageSetClip
*
* Sets the clipping rectangle
*
* The clipping rectangle restricts the drawing area for following drawing
* operations.
*
* Parameters:
* im - The image.
* x1 - The x-coordinate of the upper left corner.
* y1 - The y-coordinate of the upper left corner.
* x2 - The x-coordinate of the lower right corner.
* y2 - The y-coordinate of the lower right corner.
*
* See also:
* - <gdImageGetClip>
*/
BGD_DECLARE(void) gdImageSetClip (gdImagePtr im, int x1, int y1, int x2, int y2)
{
if (x1 < 0) {
x1 = 0;
}
if (x1 >= im->sx) {
x1 = im->sx - 1;
}
if (x2 < 0) {
x2 = 0;
}
if (x2 >= im->sx) {
x2 = im->sx - 1;
}
if (y1 < 0) {
y1 = 0;
}
if (y1 >= im->sy) {
y1 = im->sy - 1;
}
if (y2 < 0) {
y2 = 0;
}
if (y2 >= im->sy) {
y2 = im->sy - 1;
}
im->cx1 = x1;
im->cy1 = y1;
im->cx2 = x2;
im->cy2 = y2;
}
/**
* Function: gdImageGetClip
*
* Gets the current clipping rectangle
*
* Parameters:
* im - The image.
* x1P - (out) The x-coordinate of the upper left corner.
* y1P - (out) The y-coordinate of the upper left corner.
* x2P - (out) The x-coordinate of the lower right corner.
* y2P - (out) The y-coordinate of the lower right corner.
*
* See also:
* - <gdImageSetClip>
*/
BGD_DECLARE(void) gdImageGetClip (gdImagePtr im, int *x1P, int *y1P, int *x2P, int *y2P)
{
*x1P = im->cx1;
*y1P = im->cy1;
*x2P = im->cx2;
*y2P = im->cy2;
}
/**
* Function: gdImageSetResolution
*
* Sets the resolution of an image.
*
* Parameters:
* im - The image.
* res_x - The horizontal resolution in DPI.
* res_y - The vertical resolution in DPI.
*
* See also:
* - <gdImageResolutionX>
* - <gdImageResolutionY>
*/
BGD_DECLARE(void) gdImageSetResolution(gdImagePtr im, const unsigned int res_x, const unsigned int res_y)
{
if (res_x > 0) im->res_x = res_x;
if (res_y > 0) im->res_y = res_y;
}
/*
* Added on 2003/12 by Pierre-Alain Joye (pajoye@pearfr.org)
* */
#define BLEND_COLOR(a, nc, c, cc) \
nc = (cc) + (((((c) - (cc)) * (a)) + ((((c) - (cc)) * (a)) >> 8) + 0x80) >> 8);
static void gdImageSetAAPixelColor(gdImagePtr im, int x, int y, int color, int t)
{
int dr,dg,db,p,r,g,b;
/* 2.0.34: watch out for out of range calls */
if (!gdImageBoundsSafeMacro(im, x, y)) {
return;
}
p = gdImageGetPixel(im,x,y);
/* TBB: we have to implement the dont_blend stuff to provide
the full feature set of the old implementation */
if ((p == color)
|| ((p == im->AA_dont_blend)
&& (t != 0x00))) {
return;
}
dr = gdTrueColorGetRed(color);
dg = gdTrueColorGetGreen(color);
db = gdTrueColorGetBlue(color);
r = gdTrueColorGetRed(p);
g = gdTrueColorGetGreen(p);
b = gdTrueColorGetBlue(p);
BLEND_COLOR(t, dr, r, dr);
BLEND_COLOR(t, dg, g, dg);
BLEND_COLOR(t, db, b, db);
im->tpixels[y][x] = gdTrueColorAlpha(dr, dg, db, gdAlphaOpaque);
}
static void gdImageAALine (gdImagePtr im, int x1, int y1, int x2, int y2, int col)
{
/* keep them as 32bits */
long x, y, inc, frac;
long dx, dy,tmp;
int w, wid, wstart;
int thick = im->thick;
if (!im->trueColor) {
/* TBB: don't crash when the image is of the wrong type */
gdImageLine(im, x1, y1, x2, y2, col);
return;
}
/* TBB: use the clipping rectangle */
if (clip_1d (&x1, &y1, &x2, &y2, im->cx1, im->cx2) == 0)
return;
if (clip_1d (&y1, &x1, &y2, &x2, im->cy1, im->cy2) == 0)
return;
dx = x2 - x1;
dy = y2 - y1;
if (dx == 0 && dy == 0) {
/* TBB: allow setting points */
gdImageSetPixel(im, x1, y1, col);
return;
} else {
double ag;
/* Cast the long to an int to avoid compiler warnings about truncation.
* This isn't a problem as computed dy/dx values came from ints above. */
ag = fabs(abs((int)dy) < abs((int)dx) ? cos(atan2(dy, dx)) : sin(atan2(dy, dx)));
if (ag != 0) {
wid = thick / ag;
} else {
wid = 1;
}
if (wid == 0) {
wid = 1;
}
}
/* Axis aligned lines */
if (dx == 0) {
gdImageVLine(im, x1, y1, y2, col);
return;
} else if (dy == 0) {
gdImageHLine(im, y1, x1, x2, col);
return;
}
if (abs((int)dx) > abs((int)dy)) {
if (dx < 0) {
tmp = x1;
x1 = x2;
x2 = tmp;
tmp = y1;
y1 = y2;
y2 = tmp;
dx = x2 - x1;
dy = y2 - y1;
}
y = y1;
inc = (dy * 65536) / dx;
frac = 0;
/* TBB: set the last pixel for consistency (<=) */
for (x = x1 ; x <= x2 ; x++) {
wstart = y - wid / 2;
for (w = wstart; w < wstart + wid; w++) {
gdImageSetAAPixelColor(im, x , w , col , (frac >> 8) & 0xFF);
gdImageSetAAPixelColor(im, x , w + 1 , col, (~frac >> 8) & 0xFF);
}
frac += inc;
if (frac >= 65536) {
frac -= 65536;
y++;
} else if (frac < 0) {
frac += 65536;
y--;
}
}
} else {
if (dy < 0) {
tmp = x1;
x1 = x2;
x2 = tmp;
tmp = y1;
y1 = y2;
y2 = tmp;
dx = x2 - x1;
dy = y2 - y1;
}
x = x1;
inc = (dx * 65536) / dy;
frac = 0;
/* TBB: set the last pixel for consistency (<=) */
for (y = y1 ; y <= y2 ; y++) {
wstart = x - wid / 2;
for (w = wstart; w < wstart + wid; w++) {
gdImageSetAAPixelColor(im, w , y , col, (frac >> 8) & 0xFF);
gdImageSetAAPixelColor(im, w + 1, y, col, (~frac >> 8) & 0xFF);
}
frac += inc;
if (frac >= 65536) {
frac -= 65536;
x++;
} else if (frac < 0) {
frac += 65536;
x--;
}
}
}
}
/**
* Function: gdImagePaletteToTrueColor
*
* Convert a palette image to true color
*
* Parameters:
* src - The image.
*
* Returns:
* Non-zero if the conversion succeeded, zero otherwise.
*
* See also:
* - <gdImageTrueColorToPalette>
*/
BGD_DECLARE(int) gdImagePaletteToTrueColor(gdImagePtr src)
{
unsigned int y;
unsigned int yy;
if (src == NULL) {
return 0;
}
if (src->trueColor == 1) {
return 1;
} else {
unsigned int x;
const unsigned int sy = gdImageSY(src);
const unsigned int sx = gdImageSX(src);
src->tpixels = (int **) gdMalloc(sizeof(int *) * sy);
if (src->tpixels == NULL) {
return 0;
}
for (y = 0; y < sy; y++) {
const unsigned char *src_row = src->pixels[y];
int * dst_row;
/* no need to calloc it, we overwrite all pxl anyway */
src->tpixels[y] = (int *) gdMalloc(sx * sizeof(int));
if (src->tpixels[y] == NULL) {
goto clean_on_error;
}
dst_row = src->tpixels[y];
for (x = 0; x < sx; x++) {
const unsigned char c = *(src_row + x);
if (c == src->transparent) {
*(dst_row + x) = gdTrueColorAlpha(0, 0, 0, 127);
} else {
*(dst_row + x) = gdTrueColorAlpha(src->red[c], src->green[c], src->blue[c], src->alpha[c]);
}
}
}
}
/* free old palette buffer (y is sy) */
for (yy = 0; yy < y; yy++) {
gdFree(src->pixels[yy]);
}
gdFree(src->pixels);
src->trueColor = 1;
src->pixels = NULL;
src->alphaBlendingFlag = 0;
src->saveAlphaFlag = 1;
if (src->transparent >= 0) {
const unsigned char c = src->transparent;
src->transparent = gdTrueColorAlpha(src->red[c], src->green[c], src->blue[c], src->alpha[c]);
}
return 1;
clean_on_error:
/* free new true color buffer (y is not allocated, have failed) */
for (yy = 0; yy < y; yy++) {
gdFree(src->tpixels[yy]);
}
gdFree(src->tpixels);
return 0;
}
| ./CrossVul/dataset_final_sorted/CWE-20/c/bad_5411_0 |
crossvul-cpp_data_good_3848_0 | /*
* Fast Userspace Mutexes (which I call "Futexes!").
* (C) Rusty Russell, IBM 2002
*
* Generalized futexes, futex requeueing, misc fixes by Ingo Molnar
* (C) Copyright 2003 Red Hat Inc, All Rights Reserved
*
* Removed page pinning, fix privately mapped COW pages and other cleanups
* (C) Copyright 2003, 2004 Jamie Lokier
*
* Robust futex support started by Ingo Molnar
* (C) Copyright 2006 Red Hat Inc, All Rights Reserved
* Thanks to Thomas Gleixner for suggestions, analysis and fixes.
*
* PI-futex support started by Ingo Molnar and Thomas Gleixner
* Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
* Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
*
* PRIVATE futexes by Eric Dumazet
* Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com>
*
* Requeue-PI support by Darren Hart <dvhltc@us.ibm.com>
* Copyright (C) IBM Corporation, 2009
* Thanks to Thomas Gleixner for conceptual design and careful reviews.
*
* Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly
* enough at me, Linus for the original (flawed) idea, Matthew
* Kirkwood for proof-of-concept implementation.
*
* "The futexes are also cursed."
* "But they come in a choice of three flavours!"
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/slab.h>
#include <linux/poll.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/jhash.h>
#include <linux/init.h>
#include <linux/futex.h>
#include <linux/mount.h>
#include <linux/pagemap.h>
#include <linux/syscalls.h>
#include <linux/signal.h>
#include <linux/export.h>
#include <linux/magic.h>
#include <linux/pid.h>
#include <linux/nsproxy.h>
#include <linux/ptrace.h>
#include <asm/futex.h>
#include "rtmutex_common.h"
int __read_mostly futex_cmpxchg_enabled;
#define FUTEX_HASHBITS (CONFIG_BASE_SMALL ? 4 : 8)
/*
* Futex flags used to encode options to functions and preserve them across
* restarts.
*/
#define FLAGS_SHARED 0x01
#define FLAGS_CLOCKRT 0x02
#define FLAGS_HAS_TIMEOUT 0x04
/*
* Priority Inheritance state:
*/
struct futex_pi_state {
/*
* list of 'owned' pi_state instances - these have to be
* cleaned up in do_exit() if the task exits prematurely:
*/
struct list_head list;
/*
* The PI object:
*/
struct rt_mutex pi_mutex;
struct task_struct *owner;
atomic_t refcount;
union futex_key key;
};
/**
* struct futex_q - The hashed futex queue entry, one per waiting task
* @list: priority-sorted list of tasks waiting on this futex
* @task: the task waiting on the futex
* @lock_ptr: the hash bucket lock
* @key: the key the futex is hashed on
* @pi_state: optional priority inheritance state
* @rt_waiter: rt_waiter storage for use with requeue_pi
* @requeue_pi_key: the requeue_pi target futex key
* @bitset: bitset for the optional bitmasked wakeup
*
* We use this hashed waitqueue, instead of a normal wait_queue_t, so
* we can wake only the relevant ones (hashed queues may be shared).
*
* A futex_q has a woken state, just like tasks have TASK_RUNNING.
* It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0.
* The order of wakeup is always to make the first condition true, then
* the second.
*
* PI futexes are typically woken before they are removed from the hash list via
* the rt_mutex code. See unqueue_me_pi().
*/
struct futex_q {
struct plist_node list;
struct task_struct *task;
spinlock_t *lock_ptr;
union futex_key key;
struct futex_pi_state *pi_state;
struct rt_mutex_waiter *rt_waiter;
union futex_key *requeue_pi_key;
u32 bitset;
};
static const struct futex_q futex_q_init = {
/* list gets initialized in queue_me()*/
.key = FUTEX_KEY_INIT,
.bitset = FUTEX_BITSET_MATCH_ANY
};
/*
* Hash buckets are shared by all the futex_keys that hash to the same
* location. Each key may have multiple futex_q structures, one for each task
* waiting on a futex.
*/
struct futex_hash_bucket {
spinlock_t lock;
struct plist_head chain;
};
static struct futex_hash_bucket futex_queues[1<<FUTEX_HASHBITS];
/*
* We hash on the keys returned from get_futex_key (see below).
*/
static struct futex_hash_bucket *hash_futex(union futex_key *key)
{
u32 hash = jhash2((u32*)&key->both.word,
(sizeof(key->both.word)+sizeof(key->both.ptr))/4,
key->both.offset);
return &futex_queues[hash & ((1 << FUTEX_HASHBITS)-1)];
}
/*
* Return 1 if two futex_keys are equal, 0 otherwise.
*/
static inline int match_futex(union futex_key *key1, union futex_key *key2)
{
return (key1 && key2
&& key1->both.word == key2->both.word
&& key1->both.ptr == key2->both.ptr
&& key1->both.offset == key2->both.offset);
}
/*
* Take a reference to the resource addressed by a key.
* Can be called while holding spinlocks.
*
*/
static void get_futex_key_refs(union futex_key *key)
{
if (!key->both.ptr)
return;
switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
case FUT_OFF_INODE:
ihold(key->shared.inode);
break;
case FUT_OFF_MMSHARED:
atomic_inc(&key->private.mm->mm_count);
break;
}
}
/*
* Drop a reference to the resource addressed by a key.
* The hash bucket spinlock must not be held.
*/
static void drop_futex_key_refs(union futex_key *key)
{
if (!key->both.ptr) {
/* If we're here then we tried to put a key we failed to get */
WARN_ON_ONCE(1);
return;
}
switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
case FUT_OFF_INODE:
iput(key->shared.inode);
break;
case FUT_OFF_MMSHARED:
mmdrop(key->private.mm);
break;
}
}
/**
* get_futex_key() - Get parameters which are the keys for a futex
* @uaddr: virtual address of the futex
* @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED
* @key: address where result is stored.
* @rw: mapping needs to be read/write (values: VERIFY_READ,
* VERIFY_WRITE)
*
* Returns a negative error code or 0
* The key words are stored in *key on success.
*
* For shared mappings, it's (page->index, vma->vm_file->f_path.dentry->d_inode,
* offset_within_page). For private mappings, it's (uaddr, current->mm).
* We can usually work out the index without swapping in the page.
*
* lock_page() might sleep, the caller should not hold a spinlock.
*/
static int
get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
{
unsigned long address = (unsigned long)uaddr;
struct mm_struct *mm = current->mm;
struct page *page, *page_head;
int err, ro = 0;
/*
* The futex address must be "naturally" aligned.
*/
key->both.offset = address % PAGE_SIZE;
if (unlikely((address % sizeof(u32)) != 0))
return -EINVAL;
address -= key->both.offset;
/*
* PROCESS_PRIVATE futexes are fast.
* As the mm cannot disappear under us and the 'key' only needs
* virtual address, we dont even have to find the underlying vma.
* Note : We do have to check 'uaddr' is a valid user address,
* but access_ok() should be faster than find_vma()
*/
if (!fshared) {
if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))))
return -EFAULT;
key->private.mm = mm;
key->private.address = address;
get_futex_key_refs(key);
return 0;
}
again:
err = get_user_pages_fast(address, 1, 1, &page);
/*
* If write access is not required (eg. FUTEX_WAIT), try
* and get read-only access.
*/
if (err == -EFAULT && rw == VERIFY_READ) {
err = get_user_pages_fast(address, 1, 0, &page);
ro = 1;
}
if (err < 0)
return err;
else
err = 0;
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
page_head = page;
if (unlikely(PageTail(page))) {
put_page(page);
/* serialize against __split_huge_page_splitting() */
local_irq_disable();
if (likely(__get_user_pages_fast(address, 1, 1, &page) == 1)) {
page_head = compound_head(page);
/*
* page_head is valid pointer but we must pin
* it before taking the PG_lock and/or
* PG_compound_lock. The moment we re-enable
* irqs __split_huge_page_splitting() can
* return and the head page can be freed from
* under us. We can't take the PG_lock and/or
* PG_compound_lock on a page that could be
* freed from under us.
*/
if (page != page_head) {
get_page(page_head);
put_page(page);
}
local_irq_enable();
} else {
local_irq_enable();
goto again;
}
}
#else
page_head = compound_head(page);
if (page != page_head) {
get_page(page_head);
put_page(page);
}
#endif
lock_page(page_head);
/*
* If page_head->mapping is NULL, then it cannot be a PageAnon
* page; but it might be the ZERO_PAGE or in the gate area or
* in a special mapping (all cases which we are happy to fail);
* or it may have been a good file page when get_user_pages_fast
* found it, but truncated or holepunched or subjected to
* invalidate_complete_page2 before we got the page lock (also
* cases which we are happy to fail). And we hold a reference,
* so refcount care in invalidate_complete_page's remove_mapping
* prevents drop_caches from setting mapping to NULL beneath us.
*
* The case we do have to guard against is when memory pressure made
* shmem_writepage move it from filecache to swapcache beneath us:
* an unlikely race, but we do need to retry for page_head->mapping.
*/
if (!page_head->mapping) {
int shmem_swizzled = PageSwapCache(page_head);
unlock_page(page_head);
put_page(page_head);
if (shmem_swizzled)
goto again;
return -EFAULT;
}
/*
* Private mappings are handled in a simple way.
*
* NOTE: When userspace waits on a MAP_SHARED mapping, even if
* it's a read-only handle, it's expected that futexes attach to
* the object not the particular process.
*/
if (PageAnon(page_head)) {
/*
* A RO anonymous page will never change and thus doesn't make
* sense for futex operations.
*/
if (ro) {
err = -EFAULT;
goto out;
}
key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
key->private.mm = mm;
key->private.address = address;
} else {
key->both.offset |= FUT_OFF_INODE; /* inode-based key */
key->shared.inode = page_head->mapping->host;
key->shared.pgoff = page_head->index;
}
get_futex_key_refs(key);
out:
unlock_page(page_head);
put_page(page_head);
return err;
}
static inline void put_futex_key(union futex_key *key)
{
drop_futex_key_refs(key);
}
/**
* fault_in_user_writeable() - Fault in user address and verify RW access
* @uaddr: pointer to faulting user space address
*
* Slow path to fixup the fault we just took in the atomic write
* access to @uaddr.
*
* We have no generic implementation of a non-destructive write to the
* user address. We know that we faulted in the atomic pagefault
* disabled section so we can as well avoid the #PF overhead by
* calling get_user_pages() right away.
*/
static int fault_in_user_writeable(u32 __user *uaddr)
{
struct mm_struct *mm = current->mm;
int ret;
down_read(&mm->mmap_sem);
ret = fixup_user_fault(current, mm, (unsigned long)uaddr,
FAULT_FLAG_WRITE);
up_read(&mm->mmap_sem);
return ret < 0 ? ret : 0;
}
/**
* futex_top_waiter() - Return the highest priority waiter on a futex
* @hb: the hash bucket the futex_q's reside in
* @key: the futex key (to distinguish it from other futex futex_q's)
*
* Must be called with the hb lock held.
*/
static struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb,
union futex_key *key)
{
struct futex_q *this;
plist_for_each_entry(this, &hb->chain, list) {
if (match_futex(&this->key, key))
return this;
}
return NULL;
}
static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
u32 uval, u32 newval)
{
int ret;
pagefault_disable();
ret = futex_atomic_cmpxchg_inatomic(curval, uaddr, uval, newval);
pagefault_enable();
return ret;
}
static int get_futex_value_locked(u32 *dest, u32 __user *from)
{
int ret;
pagefault_disable();
ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
pagefault_enable();
return ret ? -EFAULT : 0;
}
/*
* PI code:
*/
static int refill_pi_state_cache(void)
{
struct futex_pi_state *pi_state;
if (likely(current->pi_state_cache))
return 0;
pi_state = kzalloc(sizeof(*pi_state), GFP_KERNEL);
if (!pi_state)
return -ENOMEM;
INIT_LIST_HEAD(&pi_state->list);
/* pi_mutex gets initialized later */
pi_state->owner = NULL;
atomic_set(&pi_state->refcount, 1);
pi_state->key = FUTEX_KEY_INIT;
current->pi_state_cache = pi_state;
return 0;
}
static struct futex_pi_state * alloc_pi_state(void)
{
struct futex_pi_state *pi_state = current->pi_state_cache;
WARN_ON(!pi_state);
current->pi_state_cache = NULL;
return pi_state;
}
static void free_pi_state(struct futex_pi_state *pi_state)
{
if (!atomic_dec_and_test(&pi_state->refcount))
return;
/*
* If pi_state->owner is NULL, the owner is most probably dying
* and has cleaned up the pi_state already
*/
if (pi_state->owner) {
raw_spin_lock_irq(&pi_state->owner->pi_lock);
list_del_init(&pi_state->list);
raw_spin_unlock_irq(&pi_state->owner->pi_lock);
rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner);
}
if (current->pi_state_cache)
kfree(pi_state);
else {
/*
* pi_state->list is already empty.
* clear pi_state->owner.
* refcount is at 0 - put it back to 1.
*/
pi_state->owner = NULL;
atomic_set(&pi_state->refcount, 1);
current->pi_state_cache = pi_state;
}
}
/*
* Look up the task based on what TID userspace gave us.
* We dont trust it.
*/
static struct task_struct * futex_find_get_task(pid_t pid)
{
struct task_struct *p;
rcu_read_lock();
p = find_task_by_vpid(pid);
if (p)
get_task_struct(p);
rcu_read_unlock();
return p;
}
/*
* This task is holding PI mutexes at exit time => bad.
* Kernel cleans up PI-state, but userspace is likely hosed.
* (Robust-futex cleanup is separate and might save the day for userspace.)
*/
void exit_pi_state_list(struct task_struct *curr)
{
struct list_head *next, *head = &curr->pi_state_list;
struct futex_pi_state *pi_state;
struct futex_hash_bucket *hb;
union futex_key key = FUTEX_KEY_INIT;
if (!futex_cmpxchg_enabled)
return;
/*
* We are a ZOMBIE and nobody can enqueue itself on
* pi_state_list anymore, but we have to be careful
* versus waiters unqueueing themselves:
*/
raw_spin_lock_irq(&curr->pi_lock);
while (!list_empty(head)) {
next = head->next;
pi_state = list_entry(next, struct futex_pi_state, list);
key = pi_state->key;
hb = hash_futex(&key);
raw_spin_unlock_irq(&curr->pi_lock);
spin_lock(&hb->lock);
raw_spin_lock_irq(&curr->pi_lock);
/*
* We dropped the pi-lock, so re-check whether this
* task still owns the PI-state:
*/
if (head->next != next) {
spin_unlock(&hb->lock);
continue;
}
WARN_ON(pi_state->owner != curr);
WARN_ON(list_empty(&pi_state->list));
list_del_init(&pi_state->list);
pi_state->owner = NULL;
raw_spin_unlock_irq(&curr->pi_lock);
rt_mutex_unlock(&pi_state->pi_mutex);
spin_unlock(&hb->lock);
raw_spin_lock_irq(&curr->pi_lock);
}
raw_spin_unlock_irq(&curr->pi_lock);
}
static int
lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
union futex_key *key, struct futex_pi_state **ps)
{
struct futex_pi_state *pi_state = NULL;
struct futex_q *this, *next;
struct plist_head *head;
struct task_struct *p;
pid_t pid = uval & FUTEX_TID_MASK;
head = &hb->chain;
plist_for_each_entry_safe(this, next, head, list) {
if (match_futex(&this->key, key)) {
/*
* Another waiter already exists - bump up
* the refcount and return its pi_state:
*/
pi_state = this->pi_state;
/*
* Userspace might have messed up non-PI and PI futexes
*/
if (unlikely(!pi_state))
return -EINVAL;
WARN_ON(!atomic_read(&pi_state->refcount));
/*
* When pi_state->owner is NULL then the owner died
* and another waiter is on the fly. pi_state->owner
* is fixed up by the task which acquires
* pi_state->rt_mutex.
*
* We do not check for pid == 0 which can happen when
* the owner died and robust_list_exit() cleared the
* TID.
*/
if (pid && pi_state->owner) {
/*
* Bail out if user space manipulated the
* futex value.
*/
if (pid != task_pid_vnr(pi_state->owner))
return -EINVAL;
}
atomic_inc(&pi_state->refcount);
*ps = pi_state;
return 0;
}
}
/*
* We are the first waiter - try to look up the real owner and attach
* the new pi_state to it, but bail out when TID = 0
*/
if (!pid)
return -ESRCH;
p = futex_find_get_task(pid);
if (!p)
return -ESRCH;
/*
* We need to look at the task state flags to figure out,
* whether the task is exiting. To protect against the do_exit
* change of the task flags, we do this protected by
* p->pi_lock:
*/
raw_spin_lock_irq(&p->pi_lock);
if (unlikely(p->flags & PF_EXITING)) {
/*
* The task is on the way out. When PF_EXITPIDONE is
* set, we know that the task has finished the
* cleanup:
*/
int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN;
raw_spin_unlock_irq(&p->pi_lock);
put_task_struct(p);
return ret;
}
pi_state = alloc_pi_state();
/*
* Initialize the pi_mutex in locked state and make 'p'
* the owner of it:
*/
rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p);
/* Store the key for possible exit cleanups: */
pi_state->key = *key;
WARN_ON(!list_empty(&pi_state->list));
list_add(&pi_state->list, &p->pi_state_list);
pi_state->owner = p;
raw_spin_unlock_irq(&p->pi_lock);
put_task_struct(p);
*ps = pi_state;
return 0;
}
/**
* futex_lock_pi_atomic() - Atomic work required to acquire a pi aware futex
* @uaddr: the pi futex user address
* @hb: the pi futex hash bucket
* @key: the futex key associated with uaddr and hb
* @ps: the pi_state pointer where we store the result of the
* lookup
* @task: the task to perform the atomic lock work for. This will
* be "current" except in the case of requeue pi.
* @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0)
*
* Returns:
* 0 - ready to wait
* 1 - acquired the lock
* <0 - error
*
* The hb->lock and futex_key refs shall be held by the caller.
*/
static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
union futex_key *key,
struct futex_pi_state **ps,
struct task_struct *task, int set_waiters)
{
int lock_taken, ret, ownerdied = 0;
u32 uval, newval, curval, vpid = task_pid_vnr(task);
retry:
ret = lock_taken = 0;
/*
* To avoid races, we attempt to take the lock here again
* (by doing a 0 -> TID atomic cmpxchg), while holding all
* the locks. It will most likely not succeed.
*/
newval = vpid;
if (set_waiters)
newval |= FUTEX_WAITERS;
if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, 0, newval)))
return -EFAULT;
/*
* Detect deadlocks.
*/
if ((unlikely((curval & FUTEX_TID_MASK) == vpid)))
return -EDEADLK;
/*
* Surprise - we got the lock. Just return to userspace:
*/
if (unlikely(!curval))
return 1;
uval = curval;
/*
* Set the FUTEX_WAITERS flag, so the owner will know it has someone
* to wake at the next unlock.
*/
newval = curval | FUTEX_WAITERS;
/*
* There are two cases, where a futex might have no owner (the
* owner TID is 0): OWNER_DIED. We take over the futex in this
* case. We also do an unconditional take over, when the owner
* of the futex died.
*
* This is safe as we are protected by the hash bucket lock !
*/
if (unlikely(ownerdied || !(curval & FUTEX_TID_MASK))) {
/* Keep the OWNER_DIED bit */
newval = (curval & ~FUTEX_TID_MASK) | vpid;
ownerdied = 0;
lock_taken = 1;
}
if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)))
return -EFAULT;
if (unlikely(curval != uval))
goto retry;
/*
* We took the lock due to owner died take over.
*/
if (unlikely(lock_taken))
return 1;
/*
* We dont have the lock. Look up the PI state (or create it if
* we are the first waiter):
*/
ret = lookup_pi_state(uval, hb, key, ps);
if (unlikely(ret)) {
switch (ret) {
case -ESRCH:
/*
* No owner found for this futex. Check if the
* OWNER_DIED bit is set to figure out whether
* this is a robust futex or not.
*/
if (get_futex_value_locked(&curval, uaddr))
return -EFAULT;
/*
* We simply start over in case of a robust
* futex. The code above will take the futex
* and return happy.
*/
if (curval & FUTEX_OWNER_DIED) {
ownerdied = 1;
goto retry;
}
default:
break;
}
}
return ret;
}
/**
* __unqueue_futex() - Remove the futex_q from its futex_hash_bucket
* @q: The futex_q to unqueue
*
* The q->lock_ptr must not be NULL and must be held by the caller.
*/
static void __unqueue_futex(struct futex_q *q)
{
struct futex_hash_bucket *hb;
if (WARN_ON_SMP(!q->lock_ptr || !spin_is_locked(q->lock_ptr))
|| WARN_ON(plist_node_empty(&q->list)))
return;
hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock);
plist_del(&q->list, &hb->chain);
}
/*
* The hash bucket lock must be held when this is called.
* Afterwards, the futex_q must not be accessed.
*/
static void wake_futex(struct futex_q *q)
{
struct task_struct *p = q->task;
/*
* We set q->lock_ptr = NULL _before_ we wake up the task. If
* a non-futex wake up happens on another CPU then the task
* might exit and p would dereference a non-existing task
* struct. Prevent this by holding a reference on p across the
* wake up.
*/
get_task_struct(p);
__unqueue_futex(q);
/*
* The waiting task can free the futex_q as soon as
* q->lock_ptr = NULL is written, without taking any locks. A
* memory barrier is required here to prevent the following
* store to lock_ptr from getting ahead of the plist_del.
*/
smp_wmb();
q->lock_ptr = NULL;
wake_up_state(p, TASK_NORMAL);
put_task_struct(p);
}
static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
{
struct task_struct *new_owner;
struct futex_pi_state *pi_state = this->pi_state;
u32 uninitialized_var(curval), newval;
if (!pi_state)
return -EINVAL;
/*
* If current does not own the pi_state then the futex is
* inconsistent and user space fiddled with the futex value.
*/
if (pi_state->owner != current)
return -EINVAL;
raw_spin_lock(&pi_state->pi_mutex.wait_lock);
new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
/*
* It is possible that the next waiter (the one that brought
* this owner to the kernel) timed out and is no longer
* waiting on the lock.
*/
if (!new_owner)
new_owner = this->task;
/*
* We pass it to the next owner. (The WAITERS bit is always
* kept enabled while there is PI state around. We must also
* preserve the owner died bit.)
*/
if (!(uval & FUTEX_OWNER_DIED)) {
int ret = 0;
newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
ret = -EFAULT;
else if (curval != uval)
ret = -EINVAL;
if (ret) {
raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
return ret;
}
}
raw_spin_lock_irq(&pi_state->owner->pi_lock);
WARN_ON(list_empty(&pi_state->list));
list_del_init(&pi_state->list);
raw_spin_unlock_irq(&pi_state->owner->pi_lock);
raw_spin_lock_irq(&new_owner->pi_lock);
WARN_ON(!list_empty(&pi_state->list));
list_add(&pi_state->list, &new_owner->pi_state_list);
pi_state->owner = new_owner;
raw_spin_unlock_irq(&new_owner->pi_lock);
raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
rt_mutex_unlock(&pi_state->pi_mutex);
return 0;
}
static int unlock_futex_pi(u32 __user *uaddr, u32 uval)
{
u32 uninitialized_var(oldval);
/*
* There is no waiter, so we unlock the futex. The owner died
* bit has not to be preserved here. We are the owner:
*/
if (cmpxchg_futex_value_locked(&oldval, uaddr, uval, 0))
return -EFAULT;
if (oldval != uval)
return -EAGAIN;
return 0;
}
/*
* Express the locking dependencies for lockdep:
*/
static inline void
double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
{
if (hb1 <= hb2) {
spin_lock(&hb1->lock);
if (hb1 < hb2)
spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING);
} else { /* hb1 > hb2 */
spin_lock(&hb2->lock);
spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING);
}
}
static inline void
double_unlock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
{
spin_unlock(&hb1->lock);
if (hb1 != hb2)
spin_unlock(&hb2->lock);
}
/*
* Wake up waiters matching bitset queued on this futex (uaddr).
*/
static int
futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset)
{
struct futex_hash_bucket *hb;
struct futex_q *this, *next;
struct plist_head *head;
union futex_key key = FUTEX_KEY_INIT;
int ret;
if (!bitset)
return -EINVAL;
ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, VERIFY_READ);
if (unlikely(ret != 0))
goto out;
hb = hash_futex(&key);
spin_lock(&hb->lock);
head = &hb->chain;
plist_for_each_entry_safe(this, next, head, list) {
if (match_futex (&this->key, &key)) {
if (this->pi_state || this->rt_waiter) {
ret = -EINVAL;
break;
}
/* Check if one of the bits is set in both bitsets */
if (!(this->bitset & bitset))
continue;
wake_futex(this);
if (++ret >= nr_wake)
break;
}
}
spin_unlock(&hb->lock);
put_futex_key(&key);
out:
return ret;
}
/*
* Wake up all waiters hashed on the physical page that is mapped
* to this virtual address:
*/
static int
futex_wake_op(u32 __user *uaddr1, unsigned int flags, u32 __user *uaddr2,
int nr_wake, int nr_wake2, int op)
{
union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
struct futex_hash_bucket *hb1, *hb2;
struct plist_head *head;
struct futex_q *this, *next;
int ret, op_ret;
retry:
ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ);
if (unlikely(ret != 0))
goto out;
ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE);
if (unlikely(ret != 0))
goto out_put_key1;
hb1 = hash_futex(&key1);
hb2 = hash_futex(&key2);
retry_private:
double_lock_hb(hb1, hb2);
op_ret = futex_atomic_op_inuser(op, uaddr2);
if (unlikely(op_ret < 0)) {
double_unlock_hb(hb1, hb2);
#ifndef CONFIG_MMU
/*
* we don't get EFAULT from MMU faults if we don't have an MMU,
* but we might get them from range checking
*/
ret = op_ret;
goto out_put_keys;
#endif
if (unlikely(op_ret != -EFAULT)) {
ret = op_ret;
goto out_put_keys;
}
ret = fault_in_user_writeable(uaddr2);
if (ret)
goto out_put_keys;
if (!(flags & FLAGS_SHARED))
goto retry_private;
put_futex_key(&key2);
put_futex_key(&key1);
goto retry;
}
head = &hb1->chain;
plist_for_each_entry_safe(this, next, head, list) {
if (match_futex (&this->key, &key1)) {
wake_futex(this);
if (++ret >= nr_wake)
break;
}
}
if (op_ret > 0) {
head = &hb2->chain;
op_ret = 0;
plist_for_each_entry_safe(this, next, head, list) {
if (match_futex (&this->key, &key2)) {
wake_futex(this);
if (++op_ret >= nr_wake2)
break;
}
}
ret += op_ret;
}
double_unlock_hb(hb1, hb2);
out_put_keys:
put_futex_key(&key2);
out_put_key1:
put_futex_key(&key1);
out:
return ret;
}
/**
* requeue_futex() - Requeue a futex_q from one hb to another
* @q: the futex_q to requeue
* @hb1: the source hash_bucket
* @hb2: the target hash_bucket
* @key2: the new key for the requeued futex_q
*/
static inline
void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
struct futex_hash_bucket *hb2, union futex_key *key2)
{
/*
* If key1 and key2 hash to the same bucket, no need to
* requeue.
*/
if (likely(&hb1->chain != &hb2->chain)) {
plist_del(&q->list, &hb1->chain);
plist_add(&q->list, &hb2->chain);
q->lock_ptr = &hb2->lock;
}
get_futex_key_refs(key2);
q->key = *key2;
}
/**
* requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue
* @q: the futex_q
* @key: the key of the requeue target futex
* @hb: the hash_bucket of the requeue target futex
*
* During futex_requeue, with requeue_pi=1, it is possible to acquire the
* target futex if it is uncontended or via a lock steal. Set the futex_q key
* to the requeue target futex so the waiter can detect the wakeup on the right
* futex, but remove it from the hb and NULL the rt_waiter so it can detect
* atomic lock acquisition. Set the q->lock_ptr to the requeue target hb->lock
* to protect access to the pi_state to fixup the owner later. Must be called
* with both q->lock_ptr and hb->lock held.
*/
static inline
void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
struct futex_hash_bucket *hb)
{
get_futex_key_refs(key);
q->key = *key;
__unqueue_futex(q);
WARN_ON(!q->rt_waiter);
q->rt_waiter = NULL;
q->lock_ptr = &hb->lock;
wake_up_state(q->task, TASK_NORMAL);
}
/**
* futex_proxy_trylock_atomic() - Attempt an atomic lock for the top waiter
* @pifutex: the user address of the to futex
* @hb1: the from futex hash bucket, must be locked by the caller
* @hb2: the to futex hash bucket, must be locked by the caller
* @key1: the from futex key
* @key2: the to futex key
* @ps: address to store the pi_state pointer
* @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0)
*
* Try and get the lock on behalf of the top waiter if we can do it atomically.
* Wake the top waiter if we succeed. If the caller specified set_waiters,
* then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit.
* hb1 and hb2 must be held by the caller.
*
* Returns:
* 0 - failed to acquire the lock atomicly
* 1 - acquired the lock
* <0 - error
*/
static int futex_proxy_trylock_atomic(u32 __user *pifutex,
struct futex_hash_bucket *hb1,
struct futex_hash_bucket *hb2,
union futex_key *key1, union futex_key *key2,
struct futex_pi_state **ps, int set_waiters)
{
struct futex_q *top_waiter = NULL;
u32 curval;
int ret;
if (get_futex_value_locked(&curval, pifutex))
return -EFAULT;
/*
* Find the top_waiter and determine if there are additional waiters.
* If the caller intends to requeue more than 1 waiter to pifutex,
* force futex_lock_pi_atomic() to set the FUTEX_WAITERS bit now,
* as we have means to handle the possible fault. If not, don't set
* the bit unecessarily as it will force the subsequent unlock to enter
* the kernel.
*/
top_waiter = futex_top_waiter(hb1, key1);
/* There are no waiters, nothing for us to do. */
if (!top_waiter)
return 0;
/* Ensure we requeue to the expected futex. */
if (!match_futex(top_waiter->requeue_pi_key, key2))
return -EINVAL;
/*
* Try to take the lock for top_waiter. Set the FUTEX_WAITERS bit in
* the contended case or if set_waiters is 1. The pi_state is returned
* in ps in contended cases.
*/
ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task,
set_waiters);
if (ret == 1)
requeue_pi_wake_futex(top_waiter, key2, hb2);
return ret;
}
/**
* futex_requeue() - Requeue waiters from uaddr1 to uaddr2
* @uaddr1: source futex user address
* @flags: futex flags (FLAGS_SHARED, etc.)
* @uaddr2: target futex user address
* @nr_wake: number of waiters to wake (must be 1 for requeue_pi)
* @nr_requeue: number of waiters to requeue (0-INT_MAX)
* @cmpval: @uaddr1 expected value (or %NULL)
* @requeue_pi: if we are attempting to requeue from a non-pi futex to a
* pi futex (pi to pi requeue is not supported)
*
* Requeue waiters on uaddr1 to uaddr2. In the requeue_pi case, try to acquire
* uaddr2 atomically on behalf of the top waiter.
*
* Returns:
* >=0 - on success, the number of tasks requeued or woken
* <0 - on error
*/
static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
u32 __user *uaddr2, int nr_wake, int nr_requeue,
u32 *cmpval, int requeue_pi)
{
union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
int drop_count = 0, task_count = 0, ret;
struct futex_pi_state *pi_state = NULL;
struct futex_hash_bucket *hb1, *hb2;
struct plist_head *head1;
struct futex_q *this, *next;
u32 curval2;
if (requeue_pi) {
/*
* requeue_pi requires a pi_state, try to allocate it now
* without any locks in case it fails.
*/
if (refill_pi_state_cache())
return -ENOMEM;
/*
* requeue_pi must wake as many tasks as it can, up to nr_wake
* + nr_requeue, since it acquires the rt_mutex prior to
* returning to userspace, so as to not leave the rt_mutex with
* waiters and no owner. However, second and third wake-ups
* cannot be predicted as they involve race conditions with the
* first wake and a fault while looking up the pi_state. Both
* pthread_cond_signal() and pthread_cond_broadcast() should
* use nr_wake=1.
*/
if (nr_wake != 1)
return -EINVAL;
}
retry:
if (pi_state != NULL) {
/*
* We will have to lookup the pi_state again, so free this one
* to keep the accounting correct.
*/
free_pi_state(pi_state);
pi_state = NULL;
}
ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ);
if (unlikely(ret != 0))
goto out;
ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2,
requeue_pi ? VERIFY_WRITE : VERIFY_READ);
if (unlikely(ret != 0))
goto out_put_key1;
hb1 = hash_futex(&key1);
hb2 = hash_futex(&key2);
retry_private:
double_lock_hb(hb1, hb2);
if (likely(cmpval != NULL)) {
u32 curval;
ret = get_futex_value_locked(&curval, uaddr1);
if (unlikely(ret)) {
double_unlock_hb(hb1, hb2);
ret = get_user(curval, uaddr1);
if (ret)
goto out_put_keys;
if (!(flags & FLAGS_SHARED))
goto retry_private;
put_futex_key(&key2);
put_futex_key(&key1);
goto retry;
}
if (curval != *cmpval) {
ret = -EAGAIN;
goto out_unlock;
}
}
if (requeue_pi && (task_count - nr_wake < nr_requeue)) {
/*
* Attempt to acquire uaddr2 and wake the top waiter. If we
* intend to requeue waiters, force setting the FUTEX_WAITERS
* bit. We force this here where we are able to easily handle
* faults rather in the requeue loop below.
*/
ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1,
&key2, &pi_state, nr_requeue);
/*
* At this point the top_waiter has either taken uaddr2 or is
* waiting on it. If the former, then the pi_state will not
* exist yet, look it up one more time to ensure we have a
* reference to it.
*/
if (ret == 1) {
WARN_ON(pi_state);
drop_count++;
task_count++;
ret = get_futex_value_locked(&curval2, uaddr2);
if (!ret)
ret = lookup_pi_state(curval2, hb2, &key2,
&pi_state);
}
switch (ret) {
case 0:
break;
case -EFAULT:
double_unlock_hb(hb1, hb2);
put_futex_key(&key2);
put_futex_key(&key1);
ret = fault_in_user_writeable(uaddr2);
if (!ret)
goto retry;
goto out;
case -EAGAIN:
/* The owner was exiting, try again. */
double_unlock_hb(hb1, hb2);
put_futex_key(&key2);
put_futex_key(&key1);
cond_resched();
goto retry;
default:
goto out_unlock;
}
}
head1 = &hb1->chain;
plist_for_each_entry_safe(this, next, head1, list) {
if (task_count - nr_wake >= nr_requeue)
break;
if (!match_futex(&this->key, &key1))
continue;
/*
* FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always
* be paired with each other and no other futex ops.
*/
if ((requeue_pi && !this->rt_waiter) ||
(!requeue_pi && this->rt_waiter)) {
ret = -EINVAL;
break;
}
/*
* Wake nr_wake waiters. For requeue_pi, if we acquired the
* lock, we already woke the top_waiter. If not, it will be
* woken by futex_unlock_pi().
*/
if (++task_count <= nr_wake && !requeue_pi) {
wake_futex(this);
continue;
}
/* Ensure we requeue to the expected futex for requeue_pi. */
if (requeue_pi && !match_futex(this->requeue_pi_key, &key2)) {
ret = -EINVAL;
break;
}
/*
* Requeue nr_requeue waiters and possibly one more in the case
* of requeue_pi if we couldn't acquire the lock atomically.
*/
if (requeue_pi) {
/* Prepare the waiter to take the rt_mutex. */
atomic_inc(&pi_state->refcount);
this->pi_state = pi_state;
ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex,
this->rt_waiter,
this->task, 1);
if (ret == 1) {
/* We got the lock. */
requeue_pi_wake_futex(this, &key2, hb2);
drop_count++;
continue;
} else if (ret) {
/* -EDEADLK */
this->pi_state = NULL;
free_pi_state(pi_state);
goto out_unlock;
}
}
requeue_futex(this, hb1, hb2, &key2);
drop_count++;
}
out_unlock:
double_unlock_hb(hb1, hb2);
/*
* drop_futex_key_refs() must be called outside the spinlocks. During
* the requeue we moved futex_q's from the hash bucket at key1 to the
* one at key2 and updated their key pointer. We no longer need to
* hold the references to key1.
*/
while (--drop_count >= 0)
drop_futex_key_refs(&key1);
out_put_keys:
put_futex_key(&key2);
out_put_key1:
put_futex_key(&key1);
out:
if (pi_state != NULL)
free_pi_state(pi_state);
return ret ? ret : task_count;
}
/* The key must be already stored in q->key. */
static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
__acquires(&hb->lock)
{
struct futex_hash_bucket *hb;
hb = hash_futex(&q->key);
q->lock_ptr = &hb->lock;
spin_lock(&hb->lock);
return hb;
}
static inline void
queue_unlock(struct futex_q *q, struct futex_hash_bucket *hb)
__releases(&hb->lock)
{
spin_unlock(&hb->lock);
}
/**
* queue_me() - Enqueue the futex_q on the futex_hash_bucket
* @q: The futex_q to enqueue
* @hb: The destination hash bucket
*
* The hb->lock must be held by the caller, and is released here. A call to
* queue_me() is typically paired with exactly one call to unqueue_me(). The
* exceptions involve the PI related operations, which may use unqueue_me_pi()
* or nothing if the unqueue is done as part of the wake process and the unqueue
* state is implicit in the state of woken task (see futex_wait_requeue_pi() for
* an example).
*/
static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
__releases(&hb->lock)
{
int prio;
/*
* The priority used to register this element is
* - either the real thread-priority for the real-time threads
* (i.e. threads with a priority lower than MAX_RT_PRIO)
* - or MAX_RT_PRIO for non-RT threads.
* Thus, all RT-threads are woken first in priority order, and
* the others are woken last, in FIFO order.
*/
prio = min(current->normal_prio, MAX_RT_PRIO);
plist_node_init(&q->list, prio);
plist_add(&q->list, &hb->chain);
q->task = current;
spin_unlock(&hb->lock);
}
/**
* unqueue_me() - Remove the futex_q from its futex_hash_bucket
* @q: The futex_q to unqueue
*
* The q->lock_ptr must not be held by the caller. A call to unqueue_me() must
* be paired with exactly one earlier call to queue_me().
*
* Returns:
* 1 - if the futex_q was still queued (and we removed unqueued it)
* 0 - if the futex_q was already removed by the waking thread
*/
static int unqueue_me(struct futex_q *q)
{
spinlock_t *lock_ptr;
int ret = 0;
/* In the common case we don't take the spinlock, which is nice. */
retry:
lock_ptr = q->lock_ptr;
barrier();
if (lock_ptr != NULL) {
spin_lock(lock_ptr);
/*
* q->lock_ptr can change between reading it and
* spin_lock(), causing us to take the wrong lock. This
* corrects the race condition.
*
* Reasoning goes like this: if we have the wrong lock,
* q->lock_ptr must have changed (maybe several times)
* between reading it and the spin_lock(). It can
* change again after the spin_lock() but only if it was
* already changed before the spin_lock(). It cannot,
* however, change back to the original value. Therefore
* we can detect whether we acquired the correct lock.
*/
if (unlikely(lock_ptr != q->lock_ptr)) {
spin_unlock(lock_ptr);
goto retry;
}
__unqueue_futex(q);
BUG_ON(q->pi_state);
spin_unlock(lock_ptr);
ret = 1;
}
drop_futex_key_refs(&q->key);
return ret;
}
/*
* PI futexes can not be requeued and must remove themself from the
* hash bucket. The hash bucket lock (i.e. lock_ptr) is held on entry
* and dropped here.
*/
static void unqueue_me_pi(struct futex_q *q)
__releases(q->lock_ptr)
{
__unqueue_futex(q);
BUG_ON(!q->pi_state);
free_pi_state(q->pi_state);
q->pi_state = NULL;
spin_unlock(q->lock_ptr);
}
/*
* Fixup the pi_state owner with the new owner.
*
* Must be called with hash bucket lock held and mm->sem held for non
* private futexes.
*/
static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
struct task_struct *newowner)
{
u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
struct futex_pi_state *pi_state = q->pi_state;
struct task_struct *oldowner = pi_state->owner;
u32 uval, uninitialized_var(curval), newval;
int ret;
/* Owner died? */
if (!pi_state->owner)
newtid |= FUTEX_OWNER_DIED;
/*
* We are here either because we stole the rtmutex from the
* previous highest priority waiter or we are the highest priority
* waiter but failed to get the rtmutex the first time.
* We have to replace the newowner TID in the user space variable.
* This must be atomic as we have to preserve the owner died bit here.
*
* Note: We write the user space value _before_ changing the pi_state
* because we can fault here. Imagine swapped out pages or a fork
* that marked all the anonymous memory readonly for cow.
*
* Modifying pi_state _before_ the user space value would
* leave the pi_state in an inconsistent state when we fault
* here, because we need to drop the hash bucket lock to
* handle the fault. This might be observed in the PID check
* in lookup_pi_state.
*/
retry:
if (get_futex_value_locked(&uval, uaddr))
goto handle_fault;
while (1) {
newval = (uval & FUTEX_OWNER_DIED) | newtid;
if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
goto handle_fault;
if (curval == uval)
break;
uval = curval;
}
/*
* We fixed up user space. Now we need to fix the pi_state
* itself.
*/
if (pi_state->owner != NULL) {
raw_spin_lock_irq(&pi_state->owner->pi_lock);
WARN_ON(list_empty(&pi_state->list));
list_del_init(&pi_state->list);
raw_spin_unlock_irq(&pi_state->owner->pi_lock);
}
pi_state->owner = newowner;
raw_spin_lock_irq(&newowner->pi_lock);
WARN_ON(!list_empty(&pi_state->list));
list_add(&pi_state->list, &newowner->pi_state_list);
raw_spin_unlock_irq(&newowner->pi_lock);
return 0;
/*
* To handle the page fault we need to drop the hash bucket
* lock here. That gives the other task (either the highest priority
* waiter itself or the task which stole the rtmutex) the
* chance to try the fixup of the pi_state. So once we are
* back from handling the fault we need to check the pi_state
* after reacquiring the hash bucket lock and before trying to
* do another fixup. When the fixup has been done already we
* simply return.
*/
handle_fault:
spin_unlock(q->lock_ptr);
ret = fault_in_user_writeable(uaddr);
spin_lock(q->lock_ptr);
/*
* Check if someone else fixed it for us:
*/
if (pi_state->owner != oldowner)
return 0;
if (ret)
return ret;
goto retry;
}
static long futex_wait_restart(struct restart_block *restart);
/**
* fixup_owner() - Post lock pi_state and corner case management
* @uaddr: user address of the futex
* @q: futex_q (contains pi_state and access to the rt_mutex)
* @locked: if the attempt to take the rt_mutex succeeded (1) or not (0)
*
* After attempting to lock an rt_mutex, this function is called to cleanup
* the pi_state owner as well as handle race conditions that may allow us to
* acquire the lock. Must be called with the hb lock held.
*
* Returns:
* 1 - success, lock taken
* 0 - success, lock not taken
* <0 - on error (-EFAULT)
*/
static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
{
struct task_struct *owner;
int ret = 0;
if (locked) {
/*
* Got the lock. We might not be the anticipated owner if we
* did a lock-steal - fix up the PI-state in that case:
*/
if (q->pi_state->owner != current)
ret = fixup_pi_state_owner(uaddr, q, current);
goto out;
}
/*
* Catch the rare case, where the lock was released when we were on the
* way back before we locked the hash bucket.
*/
if (q->pi_state->owner == current) {
/*
* Try to get the rt_mutex now. This might fail as some other
* task acquired the rt_mutex after we removed ourself from the
* rt_mutex waiters list.
*/
if (rt_mutex_trylock(&q->pi_state->pi_mutex)) {
locked = 1;
goto out;
}
/*
* pi_state is incorrect, some other task did a lock steal and
* we returned due to timeout or signal without taking the
* rt_mutex. Too late.
*/
raw_spin_lock(&q->pi_state->pi_mutex.wait_lock);
owner = rt_mutex_owner(&q->pi_state->pi_mutex);
if (!owner)
owner = rt_mutex_next_owner(&q->pi_state->pi_mutex);
raw_spin_unlock(&q->pi_state->pi_mutex.wait_lock);
ret = fixup_pi_state_owner(uaddr, q, owner);
goto out;
}
/*
* Paranoia check. If we did not take the lock, then we should not be
* the owner of the rt_mutex.
*/
if (rt_mutex_owner(&q->pi_state->pi_mutex) == current)
printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p "
"pi-state %p\n", ret,
q->pi_state->pi_mutex.owner,
q->pi_state->owner);
out:
return ret ? ret : locked;
}
/**
* futex_wait_queue_me() - queue_me() and wait for wakeup, timeout, or signal
* @hb: the futex hash bucket, must be locked by the caller
* @q: the futex_q to queue up on
* @timeout: the prepared hrtimer_sleeper, or null for no timeout
*/
static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q,
struct hrtimer_sleeper *timeout)
{
/*
* The task state is guaranteed to be set before another task can
* wake it. set_current_state() is implemented using set_mb() and
* queue_me() calls spin_unlock() upon completion, both serializing
* access to the hash list and forcing another memory barrier.
*/
set_current_state(TASK_INTERRUPTIBLE);
queue_me(q, hb);
/* Arm the timer */
if (timeout) {
hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
if (!hrtimer_active(&timeout->timer))
timeout->task = NULL;
}
/*
* If we have been removed from the hash list, then another task
* has tried to wake us, and we can skip the call to schedule().
*/
if (likely(!plist_node_empty(&q->list))) {
/*
* If the timer has already expired, current will already be
* flagged for rescheduling. Only call schedule if there
* is no timeout, or if it has yet to expire.
*/
if (!timeout || timeout->task)
schedule();
}
__set_current_state(TASK_RUNNING);
}
/**
* futex_wait_setup() - Prepare to wait on a futex
* @uaddr: the futex userspace address
* @val: the expected value
* @flags: futex flags (FLAGS_SHARED, etc.)
* @q: the associated futex_q
* @hb: storage for hash_bucket pointer to be returned to caller
*
* Setup the futex_q and locate the hash_bucket. Get the futex value and
* compare it with the expected value. Handle atomic faults internally.
* Return with the hb lock held and a q.key reference on success, and unlocked
* with no q.key reference on failure.
*
* Returns:
* 0 - uaddr contains val and hb has been locked
* <1 - -EFAULT or -EWOULDBLOCK (uaddr does not contain val) and hb is unlocked
*/
static int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags,
struct futex_q *q, struct futex_hash_bucket **hb)
{
u32 uval;
int ret;
/*
* Access the page AFTER the hash-bucket is locked.
* Order is important:
*
* Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val);
* Userspace waker: if (cond(var)) { var = new; futex_wake(&var); }
*
* The basic logical guarantee of a futex is that it blocks ONLY
* if cond(var) is known to be true at the time of blocking, for
* any cond. If we locked the hash-bucket after testing *uaddr, that
* would open a race condition where we could block indefinitely with
* cond(var) false, which would violate the guarantee.
*
* On the other hand, we insert q and release the hash-bucket only
* after testing *uaddr. This guarantees that futex_wait() will NOT
* absorb a wakeup if *uaddr does not match the desired values
* while the syscall executes.
*/
retry:
ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q->key, VERIFY_READ);
if (unlikely(ret != 0))
return ret;
retry_private:
*hb = queue_lock(q);
ret = get_futex_value_locked(&uval, uaddr);
if (ret) {
queue_unlock(q, *hb);
ret = get_user(uval, uaddr);
if (ret)
goto out;
if (!(flags & FLAGS_SHARED))
goto retry_private;
put_futex_key(&q->key);
goto retry;
}
if (uval != val) {
queue_unlock(q, *hb);
ret = -EWOULDBLOCK;
}
out:
if (ret)
put_futex_key(&q->key);
return ret;
}
static int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val,
ktime_t *abs_time, u32 bitset)
{
struct hrtimer_sleeper timeout, *to = NULL;
struct restart_block *restart;
struct futex_hash_bucket *hb;
struct futex_q q = futex_q_init;
int ret;
if (!bitset)
return -EINVAL;
q.bitset = bitset;
if (abs_time) {
to = &timeout;
hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ?
CLOCK_REALTIME : CLOCK_MONOTONIC,
HRTIMER_MODE_ABS);
hrtimer_init_sleeper(to, current);
hrtimer_set_expires_range_ns(&to->timer, *abs_time,
current->timer_slack_ns);
}
retry:
/*
* Prepare to wait on uaddr. On success, holds hb lock and increments
* q.key refs.
*/
ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
if (ret)
goto out;
/* queue_me and wait for wakeup, timeout, or a signal. */
futex_wait_queue_me(hb, &q, to);
/* If we were woken (and unqueued), we succeeded, whatever. */
ret = 0;
/* unqueue_me() drops q.key ref */
if (!unqueue_me(&q))
goto out;
ret = -ETIMEDOUT;
if (to && !to->task)
goto out;
/*
* We expect signal_pending(current), but we might be the
* victim of a spurious wakeup as well.
*/
if (!signal_pending(current))
goto retry;
ret = -ERESTARTSYS;
if (!abs_time)
goto out;
restart = ¤t_thread_info()->restart_block;
restart->fn = futex_wait_restart;
restart->futex.uaddr = uaddr;
restart->futex.val = val;
restart->futex.time = abs_time->tv64;
restart->futex.bitset = bitset;
restart->futex.flags = flags | FLAGS_HAS_TIMEOUT;
ret = -ERESTART_RESTARTBLOCK;
out:
if (to) {
hrtimer_cancel(&to->timer);
destroy_hrtimer_on_stack(&to->timer);
}
return ret;
}
static long futex_wait_restart(struct restart_block *restart)
{
u32 __user *uaddr = restart->futex.uaddr;
ktime_t t, *tp = NULL;
if (restart->futex.flags & FLAGS_HAS_TIMEOUT) {
t.tv64 = restart->futex.time;
tp = &t;
}
restart->fn = do_no_restart_syscall;
return (long)futex_wait(uaddr, restart->futex.flags,
restart->futex.val, tp, restart->futex.bitset);
}
/*
* Userspace tried a 0 -> TID atomic transition of the futex value
* and failed. The kernel side here does the whole locking operation:
* if there are waiters then it will block, it does PI, etc. (Due to
* races the kernel might see a 0 value of the futex too.)
*/
static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, int detect,
ktime_t *time, int trylock)
{
struct hrtimer_sleeper timeout, *to = NULL;
struct futex_hash_bucket *hb;
struct futex_q q = futex_q_init;
int res, ret;
if (refill_pi_state_cache())
return -ENOMEM;
if (time) {
to = &timeout;
hrtimer_init_on_stack(&to->timer, CLOCK_REALTIME,
HRTIMER_MODE_ABS);
hrtimer_init_sleeper(to, current);
hrtimer_set_expires(&to->timer, *time);
}
retry:
ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q.key, VERIFY_WRITE);
if (unlikely(ret != 0))
goto out;
retry_private:
hb = queue_lock(&q);
ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current, 0);
if (unlikely(ret)) {
switch (ret) {
case 1:
/* We got the lock. */
ret = 0;
goto out_unlock_put_key;
case -EFAULT:
goto uaddr_faulted;
case -EAGAIN:
/*
* Task is exiting and we just wait for the
* exit to complete.
*/
queue_unlock(&q, hb);
put_futex_key(&q.key);
cond_resched();
goto retry;
default:
goto out_unlock_put_key;
}
}
/*
* Only actually queue now that the atomic ops are done:
*/
queue_me(&q, hb);
WARN_ON(!q.pi_state);
/*
* Block on the PI mutex:
*/
if (!trylock)
ret = rt_mutex_timed_lock(&q.pi_state->pi_mutex, to, 1);
else {
ret = rt_mutex_trylock(&q.pi_state->pi_mutex);
/* Fixup the trylock return value: */
ret = ret ? 0 : -EWOULDBLOCK;
}
spin_lock(q.lock_ptr);
/*
* Fixup the pi_state owner and possibly acquire the lock if we
* haven't already.
*/
res = fixup_owner(uaddr, &q, !ret);
/*
* If fixup_owner() returned an error, proprogate that. If it acquired
* the lock, clear our -ETIMEDOUT or -EINTR.
*/
if (res)
ret = (res < 0) ? res : 0;
/*
* If fixup_owner() faulted and was unable to handle the fault, unlock
* it and return the fault to userspace.
*/
if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current))
rt_mutex_unlock(&q.pi_state->pi_mutex);
/* Unqueue and drop the lock */
unqueue_me_pi(&q);
goto out_put_key;
out_unlock_put_key:
queue_unlock(&q, hb);
out_put_key:
put_futex_key(&q.key);
out:
if (to)
destroy_hrtimer_on_stack(&to->timer);
return ret != -EINTR ? ret : -ERESTARTNOINTR;
uaddr_faulted:
queue_unlock(&q, hb);
ret = fault_in_user_writeable(uaddr);
if (ret)
goto out_put_key;
if (!(flags & FLAGS_SHARED))
goto retry_private;
put_futex_key(&q.key);
goto retry;
}
/*
* Userspace attempted a TID -> 0 atomic transition, and failed.
* This is the in-kernel slowpath: we look up the PI state (if any),
* and do the rt-mutex unlock.
*/
static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
{
struct futex_hash_bucket *hb;
struct futex_q *this, *next;
struct plist_head *head;
union futex_key key = FUTEX_KEY_INIT;
u32 uval, vpid = task_pid_vnr(current);
int ret;
retry:
if (get_user(uval, uaddr))
return -EFAULT;
/*
* We release only a lock we actually own:
*/
if ((uval & FUTEX_TID_MASK) != vpid)
return -EPERM;
ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, VERIFY_WRITE);
if (unlikely(ret != 0))
goto out;
hb = hash_futex(&key);
spin_lock(&hb->lock);
/*
* To avoid races, try to do the TID -> 0 atomic transition
* again. If it succeeds then we can return without waking
* anyone else up:
*/
if (!(uval & FUTEX_OWNER_DIED) &&
cmpxchg_futex_value_locked(&uval, uaddr, vpid, 0))
goto pi_faulted;
/*
* Rare case: we managed to release the lock atomically,
* no need to wake anyone else up:
*/
if (unlikely(uval == vpid))
goto out_unlock;
/*
* Ok, other tasks may need to be woken up - check waiters
* and do the wakeup if necessary:
*/
head = &hb->chain;
plist_for_each_entry_safe(this, next, head, list) {
if (!match_futex (&this->key, &key))
continue;
ret = wake_futex_pi(uaddr, uval, this);
/*
* The atomic access to the futex value
* generated a pagefault, so retry the
* user-access and the wakeup:
*/
if (ret == -EFAULT)
goto pi_faulted;
goto out_unlock;
}
/*
* No waiters - kernel unlocks the futex:
*/
if (!(uval & FUTEX_OWNER_DIED)) {
ret = unlock_futex_pi(uaddr, uval);
if (ret == -EFAULT)
goto pi_faulted;
}
out_unlock:
spin_unlock(&hb->lock);
put_futex_key(&key);
out:
return ret;
pi_faulted:
spin_unlock(&hb->lock);
put_futex_key(&key);
ret = fault_in_user_writeable(uaddr);
if (!ret)
goto retry;
return ret;
}
/**
* handle_early_requeue_pi_wakeup() - Detect early wakeup on the initial futex
* @hb: the hash_bucket futex_q was original enqueued on
* @q: the futex_q woken while waiting to be requeued
* @key2: the futex_key of the requeue target futex
* @timeout: the timeout associated with the wait (NULL if none)
*
* Detect if the task was woken on the initial futex as opposed to the requeue
* target futex. If so, determine if it was a timeout or a signal that caused
* the wakeup and return the appropriate error code to the caller. Must be
* called with the hb lock held.
*
* Returns
* 0 - no early wakeup detected
* <0 - -ETIMEDOUT or -ERESTARTNOINTR
*/
static inline
int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
struct futex_q *q, union futex_key *key2,
struct hrtimer_sleeper *timeout)
{
int ret = 0;
/*
* With the hb lock held, we avoid races while we process the wakeup.
* We only need to hold hb (and not hb2) to ensure atomicity as the
* wakeup code can't change q.key from uaddr to uaddr2 if we hold hb.
* It can't be requeued from uaddr2 to something else since we don't
* support a PI aware source futex for requeue.
*/
if (!match_futex(&q->key, key2)) {
WARN_ON(q->lock_ptr && (&hb->lock != q->lock_ptr));
/*
* We were woken prior to requeue by a timeout or a signal.
* Unqueue the futex_q and determine which it was.
*/
plist_del(&q->list, &hb->chain);
/* Handle spurious wakeups gracefully */
ret = -EWOULDBLOCK;
if (timeout && !timeout->task)
ret = -ETIMEDOUT;
else if (signal_pending(current))
ret = -ERESTARTNOINTR;
}
return ret;
}
/**
* futex_wait_requeue_pi() - Wait on uaddr and take uaddr2
* @uaddr: the futex we initially wait on (non-pi)
* @flags: futex flags (FLAGS_SHARED, FLAGS_CLOCKRT, etc.), they must be
* the same type, no requeueing from private to shared, etc.
* @val: the expected value of uaddr
* @abs_time: absolute timeout
* @bitset: 32 bit wakeup bitset set by userspace, defaults to all
* @clockrt: whether to use CLOCK_REALTIME (1) or CLOCK_MONOTONIC (0)
* @uaddr2: the pi futex we will take prior to returning to user-space
*
* The caller will wait on uaddr and will be requeued by futex_requeue() to
* uaddr2 which must be PI aware and unique from uaddr. Normal wakeup will wake
* on uaddr2 and complete the acquisition of the rt_mutex prior to returning to
* userspace. This ensures the rt_mutex maintains an owner when it has waiters;
* without one, the pi logic would not know which task to boost/deboost, if
* there was a need to.
*
* We call schedule in futex_wait_queue_me() when we enqueue and return there
* via the following:
* 1) wakeup on uaddr2 after an atomic lock acquisition by futex_requeue()
* 2) wakeup on uaddr2 after a requeue
* 3) signal
* 4) timeout
*
* If 3, cleanup and return -ERESTARTNOINTR.
*
* If 2, we may then block on trying to take the rt_mutex and return via:
* 5) successful lock
* 6) signal
* 7) timeout
* 8) other lock acquisition failure
*
* If 6, return -EWOULDBLOCK (restarting the syscall would do the same).
*
* If 4 or 7, we cleanup and return with -ETIMEDOUT.
*
* Returns:
* 0 - On success
* <0 - On error
*/
static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
u32 val, ktime_t *abs_time, u32 bitset,
u32 __user *uaddr2)
{
struct hrtimer_sleeper timeout, *to = NULL;
struct rt_mutex_waiter rt_waiter;
struct rt_mutex *pi_mutex = NULL;
struct futex_hash_bucket *hb;
union futex_key key2 = FUTEX_KEY_INIT;
struct futex_q q = futex_q_init;
int res, ret;
if (uaddr == uaddr2)
return -EINVAL;
if (!bitset)
return -EINVAL;
if (abs_time) {
to = &timeout;
hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ?
CLOCK_REALTIME : CLOCK_MONOTONIC,
HRTIMER_MODE_ABS);
hrtimer_init_sleeper(to, current);
hrtimer_set_expires_range_ns(&to->timer, *abs_time,
current->timer_slack_ns);
}
/*
* The waiter is allocated on our stack, manipulated by the requeue
* code while we sleep on uaddr.
*/
debug_rt_mutex_init_waiter(&rt_waiter);
rt_waiter.task = NULL;
ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE);
if (unlikely(ret != 0))
goto out;
q.bitset = bitset;
q.rt_waiter = &rt_waiter;
q.requeue_pi_key = &key2;
/*
* Prepare to wait on uaddr. On success, increments q.key (key1) ref
* count.
*/
ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
if (ret)
goto out_key2;
/* Queue the futex_q, drop the hb lock, wait for wakeup. */
futex_wait_queue_me(hb, &q, to);
spin_lock(&hb->lock);
ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
spin_unlock(&hb->lock);
if (ret)
goto out_put_keys;
/*
* In order for us to be here, we know our q.key == key2, and since
* we took the hb->lock above, we also know that futex_requeue() has
* completed and we no longer have to concern ourselves with a wakeup
* race with the atomic proxy lock acquisition by the requeue code. The
* futex_requeue dropped our key1 reference and incremented our key2
* reference count.
*/
/* Check if the requeue code acquired the second futex for us. */
if (!q.rt_waiter) {
/*
* Got the lock. We might not be the anticipated owner if we
* did a lock-steal - fix up the PI-state in that case.
*/
if (q.pi_state && (q.pi_state->owner != current)) {
spin_lock(q.lock_ptr);
ret = fixup_pi_state_owner(uaddr2, &q, current);
spin_unlock(q.lock_ptr);
}
} else {
/*
* We have been woken up by futex_unlock_pi(), a timeout, or a
* signal. futex_unlock_pi() will not destroy the lock_ptr nor
* the pi_state.
*/
WARN_ON(!q.pi_state);
pi_mutex = &q.pi_state->pi_mutex;
ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter, 1);
debug_rt_mutex_free_waiter(&rt_waiter);
spin_lock(q.lock_ptr);
/*
* Fixup the pi_state owner and possibly acquire the lock if we
* haven't already.
*/
res = fixup_owner(uaddr2, &q, !ret);
/*
* If fixup_owner() returned an error, proprogate that. If it
* acquired the lock, clear -ETIMEDOUT or -EINTR.
*/
if (res)
ret = (res < 0) ? res : 0;
/* Unqueue and drop the lock. */
unqueue_me_pi(&q);
}
/*
* If fixup_pi_state_owner() faulted and was unable to handle the
* fault, unlock the rt_mutex and return the fault to userspace.
*/
if (ret == -EFAULT) {
if (pi_mutex && rt_mutex_owner(pi_mutex) == current)
rt_mutex_unlock(pi_mutex);
} else if (ret == -EINTR) {
/*
* We've already been requeued, but cannot restart by calling
* futex_lock_pi() directly. We could restart this syscall, but
* it would detect that the user space "val" changed and return
* -EWOULDBLOCK. Save the overhead of the restart and return
* -EWOULDBLOCK directly.
*/
ret = -EWOULDBLOCK;
}
out_put_keys:
put_futex_key(&q.key);
out_key2:
put_futex_key(&key2);
out:
if (to) {
hrtimer_cancel(&to->timer);
destroy_hrtimer_on_stack(&to->timer);
}
return ret;
}
/*
* Support for robust futexes: the kernel cleans up held futexes at
* thread exit time.
*
* Implementation: user-space maintains a per-thread list of locks it
* is holding. Upon do_exit(), the kernel carefully walks this list,
* and marks all locks that are owned by this thread with the
* FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is
* always manipulated with the lock held, so the list is private and
* per-thread. Userspace also maintains a per-thread 'list_op_pending'
* field, to allow the kernel to clean up if the thread dies after
* acquiring the lock, but just before it could have added itself to
* the list. There can only be one such pending lock.
*/
/**
* sys_set_robust_list() - Set the robust-futex list head of a task
* @head: pointer to the list-head
* @len: length of the list-head, as userspace expects
*/
SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head,
size_t, len)
{
if (!futex_cmpxchg_enabled)
return -ENOSYS;
/*
* The kernel knows only one size for now:
*/
if (unlikely(len != sizeof(*head)))
return -EINVAL;
current->robust_list = head;
return 0;
}
/**
* sys_get_robust_list() - Get the robust-futex list head of a task
* @pid: pid of the process [zero for current task]
* @head_ptr: pointer to a list-head pointer, the kernel fills it in
* @len_ptr: pointer to a length field, the kernel fills in the header size
*/
SYSCALL_DEFINE3(get_robust_list, int, pid,
struct robust_list_head __user * __user *, head_ptr,
size_t __user *, len_ptr)
{
struct robust_list_head __user *head;
unsigned long ret;
struct task_struct *p;
if (!futex_cmpxchg_enabled)
return -ENOSYS;
WARN_ONCE(1, "deprecated: get_robust_list will be deleted in 2013.\n");
rcu_read_lock();
ret = -ESRCH;
if (!pid)
p = current;
else {
p = find_task_by_vpid(pid);
if (!p)
goto err_unlock;
}
ret = -EPERM;
if (!ptrace_may_access(p, PTRACE_MODE_READ))
goto err_unlock;
head = p->robust_list;
rcu_read_unlock();
if (put_user(sizeof(*head), len_ptr))
return -EFAULT;
return put_user(head, head_ptr);
err_unlock:
rcu_read_unlock();
return ret;
}
/*
* Process a futex-list entry, check whether it's owned by the
* dying task, and do notification if so:
*/
int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
{
u32 uval, uninitialized_var(nval), mval;
retry:
if (get_user(uval, uaddr))
return -1;
if ((uval & FUTEX_TID_MASK) == task_pid_vnr(curr)) {
/*
* Ok, this dying thread is truly holding a futex
* of interest. Set the OWNER_DIED bit atomically
* via cmpxchg, and if the value had FUTEX_WAITERS
* set, wake up a waiter (if any). (We have to do a
* futex_wake() even if OWNER_DIED is already set -
* to handle the rare but possible case of recursive
* thread-death.) The rest of the cleanup is done in
* userspace.
*/
mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
/*
* We are not holding a lock here, but we want to have
* the pagefault_disable/enable() protection because
* we want to handle the fault gracefully. If the
* access fails we try to fault in the futex with R/W
* verification via get_user_pages. get_user() above
* does not guarantee R/W access. If that fails we
* give up and leave the futex locked.
*/
if (cmpxchg_futex_value_locked(&nval, uaddr, uval, mval)) {
if (fault_in_user_writeable(uaddr))
return -1;
goto retry;
}
if (nval != uval)
goto retry;
/*
* Wake robust non-PI futexes here. The wakeup of
* PI futexes happens in exit_pi_state():
*/
if (!pi && (uval & FUTEX_WAITERS))
futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
}
return 0;
}
/*
* Fetch a robust-list pointer. Bit 0 signals PI futexes:
*/
static inline int fetch_robust_entry(struct robust_list __user **entry,
struct robust_list __user * __user *head,
unsigned int *pi)
{
unsigned long uentry;
if (get_user(uentry, (unsigned long __user *)head))
return -EFAULT;
*entry = (void __user *)(uentry & ~1UL);
*pi = uentry & 1;
return 0;
}
/*
* Walk curr->robust_list (very carefully, it's a userspace list!)
* and mark any locks found there dead, and notify any waiters.
*
* We silently return on any sign of list-walking problem.
*/
void exit_robust_list(struct task_struct *curr)
{
struct robust_list_head __user *head = curr->robust_list;
struct robust_list __user *entry, *next_entry, *pending;
unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
unsigned int uninitialized_var(next_pi);
unsigned long futex_offset;
int rc;
if (!futex_cmpxchg_enabled)
return;
/*
* Fetch the list head (which was registered earlier, via
* sys_set_robust_list()):
*/
if (fetch_robust_entry(&entry, &head->list.next, &pi))
return;
/*
* Fetch the relative futex offset:
*/
if (get_user(futex_offset, &head->futex_offset))
return;
/*
* Fetch any possibly pending lock-add first, and handle it
* if it exists:
*/
if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
return;
next_entry = NULL; /* avoid warning with gcc */
while (entry != &head->list) {
/*
* Fetch the next entry in the list before calling
* handle_futex_death:
*/
rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi);
/*
* A pending lock might already be on the list, so
* don't process it twice:
*/
if (entry != pending)
if (handle_futex_death((void __user *)entry + futex_offset,
curr, pi))
return;
if (rc)
return;
entry = next_entry;
pi = next_pi;
/*
* Avoid excessively long or circular lists:
*/
if (!--limit)
break;
cond_resched();
}
if (pending)
handle_futex_death((void __user *)pending + futex_offset,
curr, pip);
}
long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
u32 __user *uaddr2, u32 val2, u32 val3)
{
int cmd = op & FUTEX_CMD_MASK;
unsigned int flags = 0;
if (!(op & FUTEX_PRIVATE_FLAG))
flags |= FLAGS_SHARED;
if (op & FUTEX_CLOCK_REALTIME) {
flags |= FLAGS_CLOCKRT;
if (cmd != FUTEX_WAIT_BITSET && cmd != FUTEX_WAIT_REQUEUE_PI)
return -ENOSYS;
}
switch (cmd) {
case FUTEX_LOCK_PI:
case FUTEX_UNLOCK_PI:
case FUTEX_TRYLOCK_PI:
case FUTEX_WAIT_REQUEUE_PI:
case FUTEX_CMP_REQUEUE_PI:
if (!futex_cmpxchg_enabled)
return -ENOSYS;
}
switch (cmd) {
case FUTEX_WAIT:
val3 = FUTEX_BITSET_MATCH_ANY;
case FUTEX_WAIT_BITSET:
return futex_wait(uaddr, flags, val, timeout, val3);
case FUTEX_WAKE:
val3 = FUTEX_BITSET_MATCH_ANY;
case FUTEX_WAKE_BITSET:
return futex_wake(uaddr, flags, val, val3);
case FUTEX_REQUEUE:
return futex_requeue(uaddr, flags, uaddr2, val, val2, NULL, 0);
case FUTEX_CMP_REQUEUE:
return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 0);
case FUTEX_WAKE_OP:
return futex_wake_op(uaddr, flags, uaddr2, val, val2, val3);
case FUTEX_LOCK_PI:
return futex_lock_pi(uaddr, flags, val, timeout, 0);
case FUTEX_UNLOCK_PI:
return futex_unlock_pi(uaddr, flags);
case FUTEX_TRYLOCK_PI:
return futex_lock_pi(uaddr, flags, 0, timeout, 1);
case FUTEX_WAIT_REQUEUE_PI:
val3 = FUTEX_BITSET_MATCH_ANY;
return futex_wait_requeue_pi(uaddr, flags, val, timeout, val3,
uaddr2);
case FUTEX_CMP_REQUEUE_PI:
return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 1);
}
return -ENOSYS;
}
SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
struct timespec __user *, utime, u32 __user *, uaddr2,
u32, val3)
{
struct timespec ts;
ktime_t t, *tp = NULL;
u32 val2 = 0;
int cmd = op & FUTEX_CMD_MASK;
if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
cmd == FUTEX_WAIT_BITSET ||
cmd == FUTEX_WAIT_REQUEUE_PI)) {
if (copy_from_user(&ts, utime, sizeof(ts)) != 0)
return -EFAULT;
if (!timespec_valid(&ts))
return -EINVAL;
t = timespec_to_ktime(ts);
if (cmd == FUTEX_WAIT)
t = ktime_add_safe(ktime_get(), t);
tp = &t;
}
/*
* requeue parameter in 'utime' if cmd == FUTEX_*_REQUEUE_*.
* number of waiters to wake in 'utime' if cmd == FUTEX_WAKE_OP.
*/
if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
val2 = (u32) (unsigned long) utime;
return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
}
static int __init futex_init(void)
{
u32 curval;
int i;
/*
* This will fail and we want it. Some arch implementations do
* runtime detection of the futex_atomic_cmpxchg_inatomic()
* functionality. We want to know that before we call in any
* of the complex code paths. Also we want to prevent
* registration of robust lists in that case. NULL is
* guaranteed to fault and we get -EFAULT on functional
* implementation, the non-functional ones will return
* -ENOSYS.
*/
if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
futex_cmpxchg_enabled = 1;
for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
plist_head_init(&futex_queues[i].chain);
spin_lock_init(&futex_queues[i].lock);
}
return 0;
}
__initcall(futex_init);
| ./CrossVul/dataset_final_sorted/CWE-20/c/good_3848_0 |
crossvul-cpp_data_good_3516_0 | #include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/huge_mm.h>
#include <linux/mount.h>
#include <linux/seq_file.h>
#include <linux/highmem.h>
#include <linux/ptrace.h>
#include <linux/slab.h>
#include <linux/pagemap.h>
#include <linux/mempolicy.h>
#include <linux/rmap.h>
#include <linux/swap.h>
#include <linux/swapops.h>
#include <asm/elf.h>
#include <asm/uaccess.h>
#include <asm/tlbflush.h>
#include "internal.h"
void task_mem(struct seq_file *m, struct mm_struct *mm)
{
unsigned long data, text, lib, swap;
unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;
/*
* Note: to minimize their overhead, mm maintains hiwater_vm and
* hiwater_rss only when about to *lower* total_vm or rss. Any
* collector of these hiwater stats must therefore get total_vm
* and rss too, which will usually be the higher. Barriers? not
* worth the effort, such snapshots can always be inconsistent.
*/
hiwater_vm = total_vm = mm->total_vm;
if (hiwater_vm < mm->hiwater_vm)
hiwater_vm = mm->hiwater_vm;
hiwater_rss = total_rss = get_mm_rss(mm);
if (hiwater_rss < mm->hiwater_rss)
hiwater_rss = mm->hiwater_rss;
data = mm->total_vm - mm->shared_vm - mm->stack_vm;
text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10;
lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text;
swap = get_mm_counter(mm, MM_SWAPENTS);
seq_printf(m,
"VmPeak:\t%8lu kB\n"
"VmSize:\t%8lu kB\n"
"VmLck:\t%8lu kB\n"
"VmHWM:\t%8lu kB\n"
"VmRSS:\t%8lu kB\n"
"VmData:\t%8lu kB\n"
"VmStk:\t%8lu kB\n"
"VmExe:\t%8lu kB\n"
"VmLib:\t%8lu kB\n"
"VmPTE:\t%8lu kB\n"
"VmSwap:\t%8lu kB\n",
hiwater_vm << (PAGE_SHIFT-10),
(total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
mm->locked_vm << (PAGE_SHIFT-10),
hiwater_rss << (PAGE_SHIFT-10),
total_rss << (PAGE_SHIFT-10),
data << (PAGE_SHIFT-10),
mm->stack_vm << (PAGE_SHIFT-10), text, lib,
(PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
swap << (PAGE_SHIFT-10));
}
unsigned long task_vsize(struct mm_struct *mm)
{
return PAGE_SIZE * mm->total_vm;
}
unsigned long task_statm(struct mm_struct *mm,
unsigned long *shared, unsigned long *text,
unsigned long *data, unsigned long *resident)
{
*shared = get_mm_counter(mm, MM_FILEPAGES);
*text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
>> PAGE_SHIFT;
*data = mm->total_vm - mm->shared_vm;
*resident = *shared + get_mm_counter(mm, MM_ANONPAGES);
return mm->total_vm;
}
static void pad_len_spaces(struct seq_file *m, int len)
{
len = 25 + sizeof(void*) * 6 - len;
if (len < 1)
len = 1;
seq_printf(m, "%*c", len, ' ');
}
static void vma_stop(struct proc_maps_private *priv, struct vm_area_struct *vma)
{
if (vma && vma != priv->tail_vma) {
struct mm_struct *mm = vma->vm_mm;
up_read(&mm->mmap_sem);
mmput(mm);
}
}
static void *m_start(struct seq_file *m, loff_t *pos)
{
struct proc_maps_private *priv = m->private;
unsigned long last_addr = m->version;
struct mm_struct *mm;
struct vm_area_struct *vma, *tail_vma = NULL;
loff_t l = *pos;
/* Clear the per syscall fields in priv */
priv->task = NULL;
priv->tail_vma = NULL;
/*
* We remember last_addr rather than next_addr to hit with
* mmap_cache most of the time. We have zero last_addr at
* the beginning and also after lseek. We will have -1 last_addr
* after the end of the vmas.
*/
if (last_addr == -1UL)
return NULL;
priv->task = get_pid_task(priv->pid, PIDTYPE_PID);
if (!priv->task)
return ERR_PTR(-ESRCH);
mm = mm_for_maps(priv->task);
if (!mm || IS_ERR(mm))
return mm;
down_read(&mm->mmap_sem);
tail_vma = get_gate_vma(priv->task->mm);
priv->tail_vma = tail_vma;
/* Start with last addr hint */
vma = find_vma(mm, last_addr);
if (last_addr && vma) {
vma = vma->vm_next;
goto out;
}
/*
* Check the vma index is within the range and do
* sequential scan until m_index.
*/
vma = NULL;
if ((unsigned long)l < mm->map_count) {
vma = mm->mmap;
while (l-- && vma)
vma = vma->vm_next;
goto out;
}
if (l != mm->map_count)
tail_vma = NULL; /* After gate vma */
out:
if (vma)
return vma;
/* End of vmas has been reached */
m->version = (tail_vma != NULL)? 0: -1UL;
up_read(&mm->mmap_sem);
mmput(mm);
return tail_vma;
}
static void *m_next(struct seq_file *m, void *v, loff_t *pos)
{
struct proc_maps_private *priv = m->private;
struct vm_area_struct *vma = v;
struct vm_area_struct *tail_vma = priv->tail_vma;
(*pos)++;
if (vma && (vma != tail_vma) && vma->vm_next)
return vma->vm_next;
vma_stop(priv, vma);
return (vma != tail_vma)? tail_vma: NULL;
}
static void m_stop(struct seq_file *m, void *v)
{
struct proc_maps_private *priv = m->private;
struct vm_area_struct *vma = v;
if (!IS_ERR(vma))
vma_stop(priv, vma);
if (priv->task)
put_task_struct(priv->task);
}
static int do_maps_open(struct inode *inode, struct file *file,
const struct seq_operations *ops)
{
struct proc_maps_private *priv;
int ret = -ENOMEM;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (priv) {
priv->pid = proc_pid(inode);
ret = seq_open(file, ops);
if (!ret) {
struct seq_file *m = file->private_data;
m->private = priv;
} else {
kfree(priv);
}
}
return ret;
}
static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
{
struct mm_struct *mm = vma->vm_mm;
struct file *file = vma->vm_file;
int flags = vma->vm_flags;
unsigned long ino = 0;
unsigned long long pgoff = 0;
unsigned long start;
dev_t dev = 0;
int len;
if (file) {
struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
dev = inode->i_sb->s_dev;
ino = inode->i_ino;
pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
}
/* We don't show the stack guard page in /proc/maps */
start = vma->vm_start;
if (vma->vm_flags & VM_GROWSDOWN)
if (!vma_stack_continue(vma->vm_prev, vma->vm_start))
start += PAGE_SIZE;
seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
start,
vma->vm_end,
flags & VM_READ ? 'r' : '-',
flags & VM_WRITE ? 'w' : '-',
flags & VM_EXEC ? 'x' : '-',
flags & VM_MAYSHARE ? 's' : 'p',
pgoff,
MAJOR(dev), MINOR(dev), ino, &len);
/*
* Print the dentry name for named mappings, and a
* special [heap] marker for the heap:
*/
if (file) {
pad_len_spaces(m, len);
seq_path(m, &file->f_path, "\n");
} else {
const char *name = arch_vma_name(vma);
if (!name) {
if (mm) {
if (vma->vm_start <= mm->brk &&
vma->vm_end >= mm->start_brk) {
name = "[heap]";
} else if (vma->vm_start <= mm->start_stack &&
vma->vm_end >= mm->start_stack) {
name = "[stack]";
}
} else {
name = "[vdso]";
}
}
if (name) {
pad_len_spaces(m, len);
seq_puts(m, name);
}
}
seq_putc(m, '\n');
}
static int show_map(struct seq_file *m, void *v)
{
struct vm_area_struct *vma = v;
struct proc_maps_private *priv = m->private;
struct task_struct *task = priv->task;
show_map_vma(m, vma);
if (m->count < m->size) /* vma is copied successfully */
m->version = (vma != get_gate_vma(task->mm))
? vma->vm_start : 0;
return 0;
}
static const struct seq_operations proc_pid_maps_op = {
.start = m_start,
.next = m_next,
.stop = m_stop,
.show = show_map
};
static int maps_open(struct inode *inode, struct file *file)
{
return do_maps_open(inode, file, &proc_pid_maps_op);
}
const struct file_operations proc_maps_operations = {
.open = maps_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_private,
};
/*
* Proportional Set Size(PSS): my share of RSS.
*
* PSS of a process is the count of pages it has in memory, where each
* page is divided by the number of processes sharing it. So if a
* process has 1000 pages all to itself, and 1000 shared with one other
* process, its PSS will be 1500.
*
* To keep (accumulated) division errors low, we adopt a 64bit
* fixed-point pss counter to minimize division errors. So (pss >>
* PSS_SHIFT) would be the real byte count.
*
* A shift of 12 before division means (assuming 4K page size):
* - 1M 3-user-pages add up to 8KB errors;
* - supports mapcount up to 2^24, or 16M;
* - supports PSS up to 2^52 bytes, or 4PB.
*/
#define PSS_SHIFT 12
#ifdef CONFIG_PROC_PAGE_MONITOR
struct mem_size_stats {
struct vm_area_struct *vma;
unsigned long resident;
unsigned long shared_clean;
unsigned long shared_dirty;
unsigned long private_clean;
unsigned long private_dirty;
unsigned long referenced;
unsigned long anonymous;
unsigned long anonymous_thp;
unsigned long swap;
u64 pss;
};
static void smaps_pte_entry(pte_t ptent, unsigned long addr,
unsigned long ptent_size, struct mm_walk *walk)
{
struct mem_size_stats *mss = walk->private;
struct vm_area_struct *vma = mss->vma;
struct page *page;
int mapcount;
if (is_swap_pte(ptent)) {
mss->swap += ptent_size;
return;
}
if (!pte_present(ptent))
return;
page = vm_normal_page(vma, addr, ptent);
if (!page)
return;
if (PageAnon(page))
mss->anonymous += ptent_size;
mss->resident += ptent_size;
/* Accumulate the size in pages that have been accessed. */
if (pte_young(ptent) || PageReferenced(page))
mss->referenced += ptent_size;
mapcount = page_mapcount(page);
if (mapcount >= 2) {
if (pte_dirty(ptent) || PageDirty(page))
mss->shared_dirty += ptent_size;
else
mss->shared_clean += ptent_size;
mss->pss += (ptent_size << PSS_SHIFT) / mapcount;
} else {
if (pte_dirty(ptent) || PageDirty(page))
mss->private_dirty += ptent_size;
else
mss->private_clean += ptent_size;
mss->pss += (ptent_size << PSS_SHIFT);
}
}
static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
struct mm_walk *walk)
{
struct mem_size_stats *mss = walk->private;
struct vm_area_struct *vma = mss->vma;
pte_t *pte;
spinlock_t *ptl;
spin_lock(&walk->mm->page_table_lock);
if (pmd_trans_huge(*pmd)) {
if (pmd_trans_splitting(*pmd)) {
spin_unlock(&walk->mm->page_table_lock);
wait_split_huge_page(vma->anon_vma, pmd);
} else {
smaps_pte_entry(*(pte_t *)pmd, addr,
HPAGE_PMD_SIZE, walk);
spin_unlock(&walk->mm->page_table_lock);
mss->anonymous_thp += HPAGE_PMD_SIZE;
return 0;
}
} else {
spin_unlock(&walk->mm->page_table_lock);
}
/*
* The mmap_sem held all the way back in m_start() is what
* keeps khugepaged out of here and from collapsing things
* in here.
*/
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
for (; addr != end; pte++, addr += PAGE_SIZE)
smaps_pte_entry(*pte, addr, PAGE_SIZE, walk);
pte_unmap_unlock(pte - 1, ptl);
cond_resched();
return 0;
}
static int show_smap(struct seq_file *m, void *v)
{
struct proc_maps_private *priv = m->private;
struct task_struct *task = priv->task;
struct vm_area_struct *vma = v;
struct mem_size_stats mss;
struct mm_walk smaps_walk = {
.pmd_entry = smaps_pte_range,
.mm = vma->vm_mm,
.private = &mss,
};
memset(&mss, 0, sizeof mss);
mss.vma = vma;
/* mmap_sem is held in m_start */
if (vma->vm_mm && !is_vm_hugetlb_page(vma))
walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
show_map_vma(m, vma);
seq_printf(m,
"Size: %8lu kB\n"
"Rss: %8lu kB\n"
"Pss: %8lu kB\n"
"Shared_Clean: %8lu kB\n"
"Shared_Dirty: %8lu kB\n"
"Private_Clean: %8lu kB\n"
"Private_Dirty: %8lu kB\n"
"Referenced: %8lu kB\n"
"Anonymous: %8lu kB\n"
"AnonHugePages: %8lu kB\n"
"Swap: %8lu kB\n"
"KernelPageSize: %8lu kB\n"
"MMUPageSize: %8lu kB\n"
"Locked: %8lu kB\n",
(vma->vm_end - vma->vm_start) >> 10,
mss.resident >> 10,
(unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
mss.shared_clean >> 10,
mss.shared_dirty >> 10,
mss.private_clean >> 10,
mss.private_dirty >> 10,
mss.referenced >> 10,
mss.anonymous >> 10,
mss.anonymous_thp >> 10,
mss.swap >> 10,
vma_kernel_pagesize(vma) >> 10,
vma_mmu_pagesize(vma) >> 10,
(vma->vm_flags & VM_LOCKED) ?
(unsigned long)(mss.pss >> (10 + PSS_SHIFT)) : 0);
if (m->count < m->size) /* vma is copied successfully */
m->version = (vma != get_gate_vma(task->mm))
? vma->vm_start : 0;
return 0;
}
static const struct seq_operations proc_pid_smaps_op = {
.start = m_start,
.next = m_next,
.stop = m_stop,
.show = show_smap
};
static int smaps_open(struct inode *inode, struct file *file)
{
return do_maps_open(inode, file, &proc_pid_smaps_op);
}
const struct file_operations proc_smaps_operations = {
.open = smaps_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_private,
};
static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
unsigned long end, struct mm_walk *walk)
{
struct vm_area_struct *vma = walk->private;
pte_t *pte, ptent;
spinlock_t *ptl;
struct page *page;
split_huge_page_pmd(walk->mm, pmd);
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
for (; addr != end; pte++, addr += PAGE_SIZE) {
ptent = *pte;
if (!pte_present(ptent))
continue;
page = vm_normal_page(vma, addr, ptent);
if (!page)
continue;
/* Clear accessed and referenced bits. */
ptep_test_and_clear_young(vma, addr, pte);
ClearPageReferenced(page);
}
pte_unmap_unlock(pte - 1, ptl);
cond_resched();
return 0;
}
#define CLEAR_REFS_ALL 1
#define CLEAR_REFS_ANON 2
#define CLEAR_REFS_MAPPED 3
static ssize_t clear_refs_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
struct task_struct *task;
char buffer[PROC_NUMBUF];
struct mm_struct *mm;
struct vm_area_struct *vma;
long type;
memset(buffer, 0, sizeof(buffer));
if (count > sizeof(buffer) - 1)
count = sizeof(buffer) - 1;
if (copy_from_user(buffer, buf, count))
return -EFAULT;
if (strict_strtol(strstrip(buffer), 10, &type))
return -EINVAL;
if (type < CLEAR_REFS_ALL || type > CLEAR_REFS_MAPPED)
return -EINVAL;
task = get_proc_task(file->f_path.dentry->d_inode);
if (!task)
return -ESRCH;
mm = get_task_mm(task);
if (mm) {
struct mm_walk clear_refs_walk = {
.pmd_entry = clear_refs_pte_range,
.mm = mm,
};
down_read(&mm->mmap_sem);
for (vma = mm->mmap; vma; vma = vma->vm_next) {
clear_refs_walk.private = vma;
if (is_vm_hugetlb_page(vma))
continue;
/*
* Writing 1 to /proc/pid/clear_refs affects all pages.
*
* Writing 2 to /proc/pid/clear_refs only affects
* Anonymous pages.
*
* Writing 3 to /proc/pid/clear_refs only affects file
* mapped pages.
*/
if (type == CLEAR_REFS_ANON && vma->vm_file)
continue;
if (type == CLEAR_REFS_MAPPED && !vma->vm_file)
continue;
walk_page_range(vma->vm_start, vma->vm_end,
&clear_refs_walk);
}
flush_tlb_mm(mm);
up_read(&mm->mmap_sem);
mmput(mm);
}
put_task_struct(task);
return count;
}
const struct file_operations proc_clear_refs_operations = {
.write = clear_refs_write,
.llseek = noop_llseek,
};
struct pagemapread {
int pos, len;
u64 *buffer;
};
#define PM_ENTRY_BYTES sizeof(u64)
#define PM_STATUS_BITS 3
#define PM_STATUS_OFFSET (64 - PM_STATUS_BITS)
#define PM_STATUS_MASK (((1LL << PM_STATUS_BITS) - 1) << PM_STATUS_OFFSET)
#define PM_STATUS(nr) (((nr) << PM_STATUS_OFFSET) & PM_STATUS_MASK)
#define PM_PSHIFT_BITS 6
#define PM_PSHIFT_OFFSET (PM_STATUS_OFFSET - PM_PSHIFT_BITS)
#define PM_PSHIFT_MASK (((1LL << PM_PSHIFT_BITS) - 1) << PM_PSHIFT_OFFSET)
#define PM_PSHIFT(x) (((u64) (x) << PM_PSHIFT_OFFSET) & PM_PSHIFT_MASK)
#define PM_PFRAME_MASK ((1LL << PM_PSHIFT_OFFSET) - 1)
#define PM_PFRAME(x) ((x) & PM_PFRAME_MASK)
#define PM_PRESENT PM_STATUS(4LL)
#define PM_SWAP PM_STATUS(2LL)
#define PM_NOT_PRESENT PM_PSHIFT(PAGE_SHIFT)
#define PM_END_OF_BUFFER 1
static int add_to_pagemap(unsigned long addr, u64 pfn,
struct pagemapread *pm)
{
pm->buffer[pm->pos++] = pfn;
if (pm->pos >= pm->len)
return PM_END_OF_BUFFER;
return 0;
}
static int pagemap_pte_hole(unsigned long start, unsigned long end,
struct mm_walk *walk)
{
struct pagemapread *pm = walk->private;
unsigned long addr;
int err = 0;
for (addr = start; addr < end; addr += PAGE_SIZE) {
err = add_to_pagemap(addr, PM_NOT_PRESENT, pm);
if (err)
break;
}
return err;
}
static u64 swap_pte_to_pagemap_entry(pte_t pte)
{
swp_entry_t e = pte_to_swp_entry(pte);
return swp_type(e) | (swp_offset(e) << MAX_SWAPFILES_SHIFT);
}
static u64 pte_to_pagemap_entry(pte_t pte)
{
u64 pme = 0;
if (is_swap_pte(pte))
pme = PM_PFRAME(swap_pte_to_pagemap_entry(pte))
| PM_PSHIFT(PAGE_SHIFT) | PM_SWAP;
else if (pte_present(pte))
pme = PM_PFRAME(pte_pfn(pte))
| PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT;
return pme;
}
static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
struct mm_walk *walk)
{
struct vm_area_struct *vma;
struct pagemapread *pm = walk->private;
pte_t *pte;
int err = 0;
split_huge_page_pmd(walk->mm, pmd);
/* find the first VMA at or above 'addr' */
vma = find_vma(walk->mm, addr);
for (; addr != end; addr += PAGE_SIZE) {
u64 pfn = PM_NOT_PRESENT;
/* check to see if we've left 'vma' behind
* and need a new, higher one */
if (vma && (addr >= vma->vm_end))
vma = find_vma(walk->mm, addr);
/* check that 'vma' actually covers this address,
* and that it isn't a huge page vma */
if (vma && (vma->vm_start <= addr) &&
!is_vm_hugetlb_page(vma)) {
pte = pte_offset_map(pmd, addr);
pfn = pte_to_pagemap_entry(*pte);
/* unmap before userspace copy */
pte_unmap(pte);
}
err = add_to_pagemap(addr, pfn, pm);
if (err)
return err;
}
cond_resched();
return err;
}
#ifdef CONFIG_HUGETLB_PAGE
static u64 huge_pte_to_pagemap_entry(pte_t pte, int offset)
{
u64 pme = 0;
if (pte_present(pte))
pme = PM_PFRAME(pte_pfn(pte) + offset)
| PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT;
return pme;
}
/* This function walks within one hugetlb entry in the single call */
static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask,
unsigned long addr, unsigned long end,
struct mm_walk *walk)
{
struct pagemapread *pm = walk->private;
int err = 0;
u64 pfn;
for (; addr != end; addr += PAGE_SIZE) {
int offset = (addr & ~hmask) >> PAGE_SHIFT;
pfn = huge_pte_to_pagemap_entry(*pte, offset);
err = add_to_pagemap(addr, pfn, pm);
if (err)
return err;
}
cond_resched();
return err;
}
#endif /* HUGETLB_PAGE */
/*
* /proc/pid/pagemap - an array mapping virtual pages to pfns
*
* For each page in the address space, this file contains one 64-bit entry
* consisting of the following:
*
* Bits 0-55 page frame number (PFN) if present
* Bits 0-4 swap type if swapped
* Bits 5-55 swap offset if swapped
* Bits 55-60 page shift (page size = 1<<page shift)
* Bit 61 reserved for future use
* Bit 62 page swapped
* Bit 63 page present
*
* If the page is not present but in swap, then the PFN contains an
* encoding of the swap file number and the page's offset into the
* swap. Unmapped pages return a null PFN. This allows determining
* precisely which pages are mapped (or in swap) and comparing mapped
* pages between processes.
*
* Efficient users of this interface will use /proc/pid/maps to
* determine which areas of memory are actually mapped and llseek to
* skip over unmapped regions.
*/
#define PAGEMAP_WALK_SIZE (PMD_SIZE)
#define PAGEMAP_WALK_MASK (PMD_MASK)
static ssize_t pagemap_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
struct mm_struct *mm;
struct pagemapread pm;
int ret = -ESRCH;
struct mm_walk pagemap_walk = {};
unsigned long src;
unsigned long svpfn;
unsigned long start_vaddr;
unsigned long end_vaddr;
int copied = 0;
if (!task)
goto out;
mm = mm_for_maps(task);
ret = PTR_ERR(mm);
if (!mm || IS_ERR(mm))
goto out_task;
ret = -EINVAL;
/* file position must be aligned */
if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES))
goto out_task;
ret = 0;
if (!count)
goto out_task;
pm.len = PM_ENTRY_BYTES * (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
pm.buffer = kmalloc(pm.len, GFP_TEMPORARY);
ret = -ENOMEM;
if (!pm.buffer)
goto out_mm;
pagemap_walk.pmd_entry = pagemap_pte_range;
pagemap_walk.pte_hole = pagemap_pte_hole;
#ifdef CONFIG_HUGETLB_PAGE
pagemap_walk.hugetlb_entry = pagemap_hugetlb_range;
#endif
pagemap_walk.mm = mm;
pagemap_walk.private = ±
src = *ppos;
svpfn = src / PM_ENTRY_BYTES;
start_vaddr = svpfn << PAGE_SHIFT;
end_vaddr = TASK_SIZE_OF(task);
/* watch out for wraparound */
if (svpfn > TASK_SIZE_OF(task) >> PAGE_SHIFT)
start_vaddr = end_vaddr;
/*
* The odds are that this will stop walking way
* before end_vaddr, because the length of the
* user buffer is tracked in "pm", and the walk
* will stop when we hit the end of the buffer.
*/
ret = 0;
while (count && (start_vaddr < end_vaddr)) {
int len;
unsigned long end;
pm.pos = 0;
end = (start_vaddr + PAGEMAP_WALK_SIZE) & PAGEMAP_WALK_MASK;
/* overflow ? */
if (end < start_vaddr || end > end_vaddr)
end = end_vaddr;
down_read(&mm->mmap_sem);
ret = walk_page_range(start_vaddr, end, &pagemap_walk);
up_read(&mm->mmap_sem);
start_vaddr = end;
len = min(count, PM_ENTRY_BYTES * pm.pos);
if (copy_to_user(buf, pm.buffer, len)) {
ret = -EFAULT;
goto out_free;
}
copied += len;
buf += len;
count -= len;
}
*ppos += copied;
if (!ret || ret == PM_END_OF_BUFFER)
ret = copied;
out_free:
kfree(pm.buffer);
out_mm:
mmput(mm);
out_task:
put_task_struct(task);
out:
return ret;
}
const struct file_operations proc_pagemap_operations = {
.llseek = mem_lseek, /* borrow this */
.read = pagemap_read,
};
#endif /* CONFIG_PROC_PAGE_MONITOR */
#ifdef CONFIG_NUMA
extern int show_numa_map(struct seq_file *m, void *v);
static const struct seq_operations proc_pid_numa_maps_op = {
.start = m_start,
.next = m_next,
.stop = m_stop,
.show = show_numa_map,
};
static int numa_maps_open(struct inode *inode, struct file *file)
{
return do_maps_open(inode, file, &proc_pid_numa_maps_op);
}
const struct file_operations proc_numa_maps_operations = {
.open = numa_maps_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_private,
};
#endif
| ./CrossVul/dataset_final_sorted/CWE-20/c/good_3516_0 |
crossvul-cpp_data_bad_2989_1 | /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
* Copyright (c) 2016 Facebook
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/bpf.h>
#include <linux/bpf_verifier.h>
#include <linux/filter.h>
#include <net/netlink.h>
#include <linux/file.h>
#include <linux/vmalloc.h>
#include <linux/stringify.h>
#include "disasm.h"
static const struct bpf_verifier_ops * const bpf_verifier_ops[] = {
#define BPF_PROG_TYPE(_id, _name) \
[_id] = & _name ## _verifier_ops,
#define BPF_MAP_TYPE(_id, _ops)
#include <linux/bpf_types.h>
#undef BPF_PROG_TYPE
#undef BPF_MAP_TYPE
};
/* bpf_check() is a static code analyzer that walks eBPF program
* instruction by instruction and updates register/stack state.
* All paths of conditional branches are analyzed until 'bpf_exit' insn.
*
* The first pass is depth-first-search to check that the program is a DAG.
* It rejects the following programs:
* - larger than BPF_MAXINSNS insns
* - if loop is present (detected via back-edge)
* - unreachable insns exist (shouldn't be a forest. program = one function)
* - out of bounds or malformed jumps
* The second pass is all possible path descent from the 1st insn.
* Since it's analyzing all pathes through the program, the length of the
* analysis is limited to 64k insn, which may be hit even if total number of
* insn is less then 4K, but there are too many branches that change stack/regs.
* Number of 'branches to be analyzed' is limited to 1k
*
* On entry to each instruction, each register has a type, and the instruction
* changes the types of the registers depending on instruction semantics.
* If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is
* copied to R1.
*
* All registers are 64-bit.
* R0 - return register
* R1-R5 argument passing registers
* R6-R9 callee saved registers
* R10 - frame pointer read-only
*
* At the start of BPF program the register R1 contains a pointer to bpf_context
* and has type PTR_TO_CTX.
*
* Verifier tracks arithmetic operations on pointers in case:
* BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
* BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20),
* 1st insn copies R10 (which has FRAME_PTR) type into R1
* and 2nd arithmetic instruction is pattern matched to recognize
* that it wants to construct a pointer to some element within stack.
* So after 2nd insn, the register R1 has type PTR_TO_STACK
* (and -20 constant is saved for further stack bounds checking).
* Meaning that this reg is a pointer to stack plus known immediate constant.
*
* Most of the time the registers have SCALAR_VALUE type, which
* means the register has some value, but it's not a valid pointer.
* (like pointer plus pointer becomes SCALAR_VALUE type)
*
* When verifier sees load or store instructions the type of base register
* can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, PTR_TO_STACK. These are three pointer
* types recognized by check_mem_access() function.
*
* PTR_TO_MAP_VALUE means that this register is pointing to 'map element value'
* and the range of [ptr, ptr + map's value_size) is accessible.
*
* registers used to pass values to function calls are checked against
* function argument constraints.
*
* ARG_PTR_TO_MAP_KEY is one of such argument constraints.
* It means that the register type passed to this function must be
* PTR_TO_STACK and it will be used inside the function as
* 'pointer to map element key'
*
* For example the argument constraints for bpf_map_lookup_elem():
* .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
* .arg1_type = ARG_CONST_MAP_PTR,
* .arg2_type = ARG_PTR_TO_MAP_KEY,
*
* ret_type says that this function returns 'pointer to map elem value or null'
* function expects 1st argument to be a const pointer to 'struct bpf_map' and
* 2nd argument should be a pointer to stack, which will be used inside
* the helper function as a pointer to map element key.
*
* On the kernel side the helper function looks like:
* u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
* {
* struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
* void *key = (void *) (unsigned long) r2;
* void *value;
*
* here kernel can access 'key' and 'map' pointers safely, knowing that
* [key, key + map->key_size) bytes are valid and were initialized on
* the stack of eBPF program.
* }
*
* Corresponding eBPF program may look like:
* BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), // after this insn R2 type is FRAME_PTR
* BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK
* BPF_LD_MAP_FD(BPF_REG_1, map_fd), // after this insn R1 type is CONST_PTR_TO_MAP
* BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
* here verifier looks at prototype of map_lookup_elem() and sees:
* .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok,
* Now verifier knows that this map has key of R1->map_ptr->key_size bytes
*
* Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far,
* Now verifier checks that [R2, R2 + map's key_size) are within stack limits
* and were initialized prior to this call.
* If it's ok, then verifier allows this BPF_CALL insn and looks at
* .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets
* R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function
* returns ether pointer to map value or NULL.
*
* When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off'
* insn, the register holding that pointer in the true branch changes state to
* PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false
* branch. See check_cond_jmp_op().
*
* After the call R0 is set to return type of the function and registers R1-R5
* are set to NOT_INIT to indicate that they are no longer readable.
*/
/* verifier_state + insn_idx are pushed to stack when branch is encountered */
struct bpf_verifier_stack_elem {
/* verifer state is 'st'
* before processing instruction 'insn_idx'
* and after processing instruction 'prev_insn_idx'
*/
struct bpf_verifier_state st;
int insn_idx;
int prev_insn_idx;
struct bpf_verifier_stack_elem *next;
};
#define BPF_COMPLEXITY_LIMIT_INSNS 131072
#define BPF_COMPLEXITY_LIMIT_STACK 1024
#define BPF_MAP_PTR_POISON ((void *)0xeB9F + POISON_POINTER_DELTA)
struct bpf_call_arg_meta {
struct bpf_map *map_ptr;
bool raw_mode;
bool pkt_access;
int regno;
int access_size;
};
static DEFINE_MUTEX(bpf_verifier_lock);
/* log_level controls verbosity level of eBPF verifier.
* verbose() is used to dump the verification trace to the log, so the user
* can figure out what's wrong with the program
*/
static __printf(2, 3) void verbose(struct bpf_verifier_env *env,
const char *fmt, ...)
{
struct bpf_verifer_log *log = &env->log;
unsigned int n;
va_list args;
if (!log->level || !log->ubuf || bpf_verifier_log_full(log))
return;
va_start(args, fmt);
n = vscnprintf(log->kbuf, BPF_VERIFIER_TMP_LOG_SIZE, fmt, args);
va_end(args);
WARN_ONCE(n >= BPF_VERIFIER_TMP_LOG_SIZE - 1,
"verifier log line truncated - local buffer too short\n");
n = min(log->len_total - log->len_used - 1, n);
log->kbuf[n] = '\0';
if (!copy_to_user(log->ubuf + log->len_used, log->kbuf, n + 1))
log->len_used += n;
else
log->ubuf = NULL;
}
static bool type_is_pkt_pointer(enum bpf_reg_type type)
{
return type == PTR_TO_PACKET ||
type == PTR_TO_PACKET_META;
}
/* string representation of 'enum bpf_reg_type' */
static const char * const reg_type_str[] = {
[NOT_INIT] = "?",
[SCALAR_VALUE] = "inv",
[PTR_TO_CTX] = "ctx",
[CONST_PTR_TO_MAP] = "map_ptr",
[PTR_TO_MAP_VALUE] = "map_value",
[PTR_TO_MAP_VALUE_OR_NULL] = "map_value_or_null",
[PTR_TO_STACK] = "fp",
[PTR_TO_PACKET] = "pkt",
[PTR_TO_PACKET_META] = "pkt_meta",
[PTR_TO_PACKET_END] = "pkt_end",
};
static void print_verifier_state(struct bpf_verifier_env *env,
struct bpf_verifier_state *state)
{
struct bpf_reg_state *reg;
enum bpf_reg_type t;
int i;
for (i = 0; i < MAX_BPF_REG; i++) {
reg = &state->regs[i];
t = reg->type;
if (t == NOT_INIT)
continue;
verbose(env, " R%d=%s", i, reg_type_str[t]);
if ((t == SCALAR_VALUE || t == PTR_TO_STACK) &&
tnum_is_const(reg->var_off)) {
/* reg->off should be 0 for SCALAR_VALUE */
verbose(env, "%lld", reg->var_off.value + reg->off);
} else {
verbose(env, "(id=%d", reg->id);
if (t != SCALAR_VALUE)
verbose(env, ",off=%d", reg->off);
if (type_is_pkt_pointer(t))
verbose(env, ",r=%d", reg->range);
else if (t == CONST_PTR_TO_MAP ||
t == PTR_TO_MAP_VALUE ||
t == PTR_TO_MAP_VALUE_OR_NULL)
verbose(env, ",ks=%d,vs=%d",
reg->map_ptr->key_size,
reg->map_ptr->value_size);
if (tnum_is_const(reg->var_off)) {
/* Typically an immediate SCALAR_VALUE, but
* could be a pointer whose offset is too big
* for reg->off
*/
verbose(env, ",imm=%llx", reg->var_off.value);
} else {
if (reg->smin_value != reg->umin_value &&
reg->smin_value != S64_MIN)
verbose(env, ",smin_value=%lld",
(long long)reg->smin_value);
if (reg->smax_value != reg->umax_value &&
reg->smax_value != S64_MAX)
verbose(env, ",smax_value=%lld",
(long long)reg->smax_value);
if (reg->umin_value != 0)
verbose(env, ",umin_value=%llu",
(unsigned long long)reg->umin_value);
if (reg->umax_value != U64_MAX)
verbose(env, ",umax_value=%llu",
(unsigned long long)reg->umax_value);
if (!tnum_is_unknown(reg->var_off)) {
char tn_buf[48];
tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
verbose(env, ",var_off=%s", tn_buf);
}
}
verbose(env, ")");
}
}
for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
if (state->stack[i].slot_type[0] == STACK_SPILL)
verbose(env, " fp%d=%s",
-MAX_BPF_STACK + i * BPF_REG_SIZE,
reg_type_str[state->stack[i].spilled_ptr.type]);
}
verbose(env, "\n");
}
static int copy_stack_state(struct bpf_verifier_state *dst,
const struct bpf_verifier_state *src)
{
if (!src->stack)
return 0;
if (WARN_ON_ONCE(dst->allocated_stack < src->allocated_stack)) {
/* internal bug, make state invalid to reject the program */
memset(dst, 0, sizeof(*dst));
return -EFAULT;
}
memcpy(dst->stack, src->stack,
sizeof(*src->stack) * (src->allocated_stack / BPF_REG_SIZE));
return 0;
}
/* do_check() starts with zero-sized stack in struct bpf_verifier_state to
* make it consume minimal amount of memory. check_stack_write() access from
* the program calls into realloc_verifier_state() to grow the stack size.
* Note there is a non-zero 'parent' pointer inside bpf_verifier_state
* which this function copies over. It points to previous bpf_verifier_state
* which is never reallocated
*/
static int realloc_verifier_state(struct bpf_verifier_state *state, int size,
bool copy_old)
{
u32 old_size = state->allocated_stack;
struct bpf_stack_state *new_stack;
int slot = size / BPF_REG_SIZE;
if (size <= old_size || !size) {
if (copy_old)
return 0;
state->allocated_stack = slot * BPF_REG_SIZE;
if (!size && old_size) {
kfree(state->stack);
state->stack = NULL;
}
return 0;
}
new_stack = kmalloc_array(slot, sizeof(struct bpf_stack_state),
GFP_KERNEL);
if (!new_stack)
return -ENOMEM;
if (copy_old) {
if (state->stack)
memcpy(new_stack, state->stack,
sizeof(*new_stack) * (old_size / BPF_REG_SIZE));
memset(new_stack + old_size / BPF_REG_SIZE, 0,
sizeof(*new_stack) * (size - old_size) / BPF_REG_SIZE);
}
state->allocated_stack = slot * BPF_REG_SIZE;
kfree(state->stack);
state->stack = new_stack;
return 0;
}
static void free_verifier_state(struct bpf_verifier_state *state,
bool free_self)
{
kfree(state->stack);
if (free_self)
kfree(state);
}
/* copy verifier state from src to dst growing dst stack space
* when necessary to accommodate larger src stack
*/
static int copy_verifier_state(struct bpf_verifier_state *dst,
const struct bpf_verifier_state *src)
{
int err;
err = realloc_verifier_state(dst, src->allocated_stack, false);
if (err)
return err;
memcpy(dst, src, offsetof(struct bpf_verifier_state, allocated_stack));
return copy_stack_state(dst, src);
}
static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx,
int *insn_idx)
{
struct bpf_verifier_state *cur = env->cur_state;
struct bpf_verifier_stack_elem *elem, *head = env->head;
int err;
if (env->head == NULL)
return -ENOENT;
if (cur) {
err = copy_verifier_state(cur, &head->st);
if (err)
return err;
}
if (insn_idx)
*insn_idx = head->insn_idx;
if (prev_insn_idx)
*prev_insn_idx = head->prev_insn_idx;
elem = head->next;
free_verifier_state(&head->st, false);
kfree(head);
env->head = elem;
env->stack_size--;
return 0;
}
static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
int insn_idx, int prev_insn_idx)
{
struct bpf_verifier_state *cur = env->cur_state;
struct bpf_verifier_stack_elem *elem;
int err;
elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL);
if (!elem)
goto err;
elem->insn_idx = insn_idx;
elem->prev_insn_idx = prev_insn_idx;
elem->next = env->head;
env->head = elem;
env->stack_size++;
err = copy_verifier_state(&elem->st, cur);
if (err)
goto err;
if (env->stack_size > BPF_COMPLEXITY_LIMIT_STACK) {
verbose(env, "BPF program is too complex\n");
goto err;
}
return &elem->st;
err:
/* pop all elements and return */
while (!pop_stack(env, NULL, NULL));
return NULL;
}
#define CALLER_SAVED_REGS 6
static const int caller_saved[CALLER_SAVED_REGS] = {
BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5
};
static void __mark_reg_not_init(struct bpf_reg_state *reg);
/* Mark the unknown part of a register (variable offset or scalar value) as
* known to have the value @imm.
*/
static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm)
{
reg->id = 0;
reg->var_off = tnum_const(imm);
reg->smin_value = (s64)imm;
reg->smax_value = (s64)imm;
reg->umin_value = imm;
reg->umax_value = imm;
}
/* Mark the 'variable offset' part of a register as zero. This should be
* used only on registers holding a pointer type.
*/
static void __mark_reg_known_zero(struct bpf_reg_state *reg)
{
__mark_reg_known(reg, 0);
}
static void mark_reg_known_zero(struct bpf_verifier_env *env,
struct bpf_reg_state *regs, u32 regno)
{
if (WARN_ON(regno >= MAX_BPF_REG)) {
verbose(env, "mark_reg_known_zero(regs, %u)\n", regno);
/* Something bad happened, let's kill all regs */
for (regno = 0; regno < MAX_BPF_REG; regno++)
__mark_reg_not_init(regs + regno);
return;
}
__mark_reg_known_zero(regs + regno);
}
static bool reg_is_pkt_pointer(const struct bpf_reg_state *reg)
{
return type_is_pkt_pointer(reg->type);
}
static bool reg_is_pkt_pointer_any(const struct bpf_reg_state *reg)
{
return reg_is_pkt_pointer(reg) ||
reg->type == PTR_TO_PACKET_END;
}
/* Unmodified PTR_TO_PACKET[_META,_END] register from ctx access. */
static bool reg_is_init_pkt_pointer(const struct bpf_reg_state *reg,
enum bpf_reg_type which)
{
/* The register can already have a range from prior markings.
* This is fine as long as it hasn't been advanced from its
* origin.
*/
return reg->type == which &&
reg->id == 0 &&
reg->off == 0 &&
tnum_equals_const(reg->var_off, 0);
}
/* Attempts to improve min/max values based on var_off information */
static void __update_reg_bounds(struct bpf_reg_state *reg)
{
/* min signed is max(sign bit) | min(other bits) */
reg->smin_value = max_t(s64, reg->smin_value,
reg->var_off.value | (reg->var_off.mask & S64_MIN));
/* max signed is min(sign bit) | max(other bits) */
reg->smax_value = min_t(s64, reg->smax_value,
reg->var_off.value | (reg->var_off.mask & S64_MAX));
reg->umin_value = max(reg->umin_value, reg->var_off.value);
reg->umax_value = min(reg->umax_value,
reg->var_off.value | reg->var_off.mask);
}
/* Uses signed min/max values to inform unsigned, and vice-versa */
static void __reg_deduce_bounds(struct bpf_reg_state *reg)
{
/* Learn sign from signed bounds.
* If we cannot cross the sign boundary, then signed and unsigned bounds
* are the same, so combine. This works even in the negative case, e.g.
* -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff.
*/
if (reg->smin_value >= 0 || reg->smax_value < 0) {
reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value,
reg->umin_value);
reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value,
reg->umax_value);
return;
}
/* Learn sign from unsigned bounds. Signed bounds cross the sign
* boundary, so we must be careful.
*/
if ((s64)reg->umax_value >= 0) {
/* Positive. We can't learn anything from the smin, but smax
* is positive, hence safe.
*/
reg->smin_value = reg->umin_value;
reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value,
reg->umax_value);
} else if ((s64)reg->umin_value < 0) {
/* Negative. We can't learn anything from the smax, but smin
* is negative, hence safe.
*/
reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value,
reg->umin_value);
reg->smax_value = reg->umax_value;
}
}
/* Attempts to improve var_off based on unsigned min/max information */
static void __reg_bound_offset(struct bpf_reg_state *reg)
{
reg->var_off = tnum_intersect(reg->var_off,
tnum_range(reg->umin_value,
reg->umax_value));
}
/* Reset the min/max bounds of a register */
static void __mark_reg_unbounded(struct bpf_reg_state *reg)
{
reg->smin_value = S64_MIN;
reg->smax_value = S64_MAX;
reg->umin_value = 0;
reg->umax_value = U64_MAX;
}
/* Mark a register as having a completely unknown (scalar) value. */
static void __mark_reg_unknown(struct bpf_reg_state *reg)
{
reg->type = SCALAR_VALUE;
reg->id = 0;
reg->off = 0;
reg->var_off = tnum_unknown;
__mark_reg_unbounded(reg);
}
static void mark_reg_unknown(struct bpf_verifier_env *env,
struct bpf_reg_state *regs, u32 regno)
{
if (WARN_ON(regno >= MAX_BPF_REG)) {
verbose(env, "mark_reg_unknown(regs, %u)\n", regno);
/* Something bad happened, let's kill all regs */
for (regno = 0; regno < MAX_BPF_REG; regno++)
__mark_reg_not_init(regs + regno);
return;
}
__mark_reg_unknown(regs + regno);
}
static void __mark_reg_not_init(struct bpf_reg_state *reg)
{
__mark_reg_unknown(reg);
reg->type = NOT_INIT;
}
static void mark_reg_not_init(struct bpf_verifier_env *env,
struct bpf_reg_state *regs, u32 regno)
{
if (WARN_ON(regno >= MAX_BPF_REG)) {
verbose(env, "mark_reg_not_init(regs, %u)\n", regno);
/* Something bad happened, let's kill all regs */
for (regno = 0; regno < MAX_BPF_REG; regno++)
__mark_reg_not_init(regs + regno);
return;
}
__mark_reg_not_init(regs + regno);
}
static void init_reg_state(struct bpf_verifier_env *env,
struct bpf_reg_state *regs)
{
int i;
for (i = 0; i < MAX_BPF_REG; i++) {
mark_reg_not_init(env, regs, i);
regs[i].live = REG_LIVE_NONE;
}
/* frame pointer */
regs[BPF_REG_FP].type = PTR_TO_STACK;
mark_reg_known_zero(env, regs, BPF_REG_FP);
/* 1st arg to a function */
regs[BPF_REG_1].type = PTR_TO_CTX;
mark_reg_known_zero(env, regs, BPF_REG_1);
}
enum reg_arg_type {
SRC_OP, /* register is used as source operand */
DST_OP, /* register is used as destination operand */
DST_OP_NO_MARK /* same as above, check only, don't mark */
};
static void mark_reg_read(const struct bpf_verifier_state *state, u32 regno)
{
struct bpf_verifier_state *parent = state->parent;
if (regno == BPF_REG_FP)
/* We don't need to worry about FP liveness because it's read-only */
return;
while (parent) {
/* if read wasn't screened by an earlier write ... */
if (state->regs[regno].live & REG_LIVE_WRITTEN)
break;
/* ... then we depend on parent's value */
parent->regs[regno].live |= REG_LIVE_READ;
state = parent;
parent = state->parent;
}
}
static int check_reg_arg(struct bpf_verifier_env *env, u32 regno,
enum reg_arg_type t)
{
struct bpf_reg_state *regs = env->cur_state->regs;
if (regno >= MAX_BPF_REG) {
verbose(env, "R%d is invalid\n", regno);
return -EINVAL;
}
if (t == SRC_OP) {
/* check whether register used as source operand can be read */
if (regs[regno].type == NOT_INIT) {
verbose(env, "R%d !read_ok\n", regno);
return -EACCES;
}
mark_reg_read(env->cur_state, regno);
} else {
/* check whether register used as dest operand can be written to */
if (regno == BPF_REG_FP) {
verbose(env, "frame pointer is read only\n");
return -EACCES;
}
regs[regno].live |= REG_LIVE_WRITTEN;
if (t == DST_OP)
mark_reg_unknown(env, regs, regno);
}
return 0;
}
static bool is_spillable_regtype(enum bpf_reg_type type)
{
switch (type) {
case PTR_TO_MAP_VALUE:
case PTR_TO_MAP_VALUE_OR_NULL:
case PTR_TO_STACK:
case PTR_TO_CTX:
case PTR_TO_PACKET:
case PTR_TO_PACKET_META:
case PTR_TO_PACKET_END:
case CONST_PTR_TO_MAP:
return true;
default:
return false;
}
}
/* check_stack_read/write functions track spill/fill of registers,
* stack boundary and alignment are checked in check_mem_access()
*/
static int check_stack_write(struct bpf_verifier_env *env,
struct bpf_verifier_state *state, int off,
int size, int value_regno)
{
int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err;
err = realloc_verifier_state(state, round_up(slot + 1, BPF_REG_SIZE),
true);
if (err)
return err;
/* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0,
* so it's aligned access and [off, off + size) are within stack limits
*/
if (!env->allow_ptr_leaks &&
state->stack[spi].slot_type[0] == STACK_SPILL &&
size != BPF_REG_SIZE) {
verbose(env, "attempt to corrupt spilled pointer on stack\n");
return -EACCES;
}
if (value_regno >= 0 &&
is_spillable_regtype(state->regs[value_regno].type)) {
/* register containing pointer is being spilled into stack */
if (size != BPF_REG_SIZE) {
verbose(env, "invalid size of register spill\n");
return -EACCES;
}
/* save register state */
state->stack[spi].spilled_ptr = state->regs[value_regno];
state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
for (i = 0; i < BPF_REG_SIZE; i++)
state->stack[spi].slot_type[i] = STACK_SPILL;
} else {
/* regular write of data into stack */
state->stack[spi].spilled_ptr = (struct bpf_reg_state) {};
for (i = 0; i < size; i++)
state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] =
STACK_MISC;
}
return 0;
}
static void mark_stack_slot_read(const struct bpf_verifier_state *state, int slot)
{
struct bpf_verifier_state *parent = state->parent;
while (parent) {
/* if read wasn't screened by an earlier write ... */
if (state->stack[slot].spilled_ptr.live & REG_LIVE_WRITTEN)
break;
/* ... then we depend on parent's value */
parent->stack[slot].spilled_ptr.live |= REG_LIVE_READ;
state = parent;
parent = state->parent;
}
}
static int check_stack_read(struct bpf_verifier_env *env,
struct bpf_verifier_state *state, int off, int size,
int value_regno)
{
int i, slot = -off - 1, spi = slot / BPF_REG_SIZE;
u8 *stype;
if (state->allocated_stack <= slot) {
verbose(env, "invalid read from stack off %d+0 size %d\n",
off, size);
return -EACCES;
}
stype = state->stack[spi].slot_type;
if (stype[0] == STACK_SPILL) {
if (size != BPF_REG_SIZE) {
verbose(env, "invalid size of register spill\n");
return -EACCES;
}
for (i = 1; i < BPF_REG_SIZE; i++) {
if (stype[(slot - i) % BPF_REG_SIZE] != STACK_SPILL) {
verbose(env, "corrupted spill memory\n");
return -EACCES;
}
}
if (value_regno >= 0) {
/* restore register state from stack */
state->regs[value_regno] = state->stack[spi].spilled_ptr;
mark_stack_slot_read(state, spi);
}
return 0;
} else {
for (i = 0; i < size; i++) {
if (stype[(slot - i) % BPF_REG_SIZE] != STACK_MISC) {
verbose(env, "invalid read from stack off %d+%d size %d\n",
off, i, size);
return -EACCES;
}
}
if (value_regno >= 0)
/* have read misc data from the stack */
mark_reg_unknown(env, state->regs, value_regno);
return 0;
}
}
/* check read/write into map element returned by bpf_map_lookup_elem() */
static int __check_map_access(struct bpf_verifier_env *env, u32 regno, int off,
int size, bool zero_size_allowed)
{
struct bpf_reg_state *regs = cur_regs(env);
struct bpf_map *map = regs[regno].map_ptr;
if (off < 0 || size < 0 || (size == 0 && !zero_size_allowed) ||
off + size > map->value_size) {
verbose(env, "invalid access to map value, value_size=%d off=%d size=%d\n",
map->value_size, off, size);
return -EACCES;
}
return 0;
}
/* check read/write into a map element with possible variable offset */
static int check_map_access(struct bpf_verifier_env *env, u32 regno,
int off, int size, bool zero_size_allowed)
{
struct bpf_verifier_state *state = env->cur_state;
struct bpf_reg_state *reg = &state->regs[regno];
int err;
/* We may have adjusted the register to this map value, so we
* need to try adding each of min_value and max_value to off
* to make sure our theoretical access will be safe.
*/
if (env->log.level)
print_verifier_state(env, state);
/* The minimum value is only important with signed
* comparisons where we can't assume the floor of a
* value is 0. If we are using signed variables for our
* index'es we need to make sure that whatever we use
* will have a set floor within our range.
*/
if (reg->smin_value < 0) {
verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
regno);
return -EACCES;
}
err = __check_map_access(env, regno, reg->smin_value + off, size,
zero_size_allowed);
if (err) {
verbose(env, "R%d min value is outside of the array range\n",
regno);
return err;
}
/* If we haven't set a max value then we need to bail since we can't be
* sure we won't do bad things.
* If reg->umax_value + off could overflow, treat that as unbounded too.
*/
if (reg->umax_value >= BPF_MAX_VAR_OFF) {
verbose(env, "R%d unbounded memory access, make sure to bounds check any array access into a map\n",
regno);
return -EACCES;
}
err = __check_map_access(env, regno, reg->umax_value + off, size,
zero_size_allowed);
if (err)
verbose(env, "R%d max value is outside of the array range\n",
regno);
return err;
}
#define MAX_PACKET_OFF 0xffff
static bool may_access_direct_pkt_data(struct bpf_verifier_env *env,
const struct bpf_call_arg_meta *meta,
enum bpf_access_type t)
{
switch (env->prog->type) {
case BPF_PROG_TYPE_LWT_IN:
case BPF_PROG_TYPE_LWT_OUT:
/* dst_input() and dst_output() can't write for now */
if (t == BPF_WRITE)
return false;
/* fallthrough */
case BPF_PROG_TYPE_SCHED_CLS:
case BPF_PROG_TYPE_SCHED_ACT:
case BPF_PROG_TYPE_XDP:
case BPF_PROG_TYPE_LWT_XMIT:
case BPF_PROG_TYPE_SK_SKB:
if (meta)
return meta->pkt_access;
env->seen_direct_write = true;
return true;
default:
return false;
}
}
static int __check_packet_access(struct bpf_verifier_env *env, u32 regno,
int off, int size, bool zero_size_allowed)
{
struct bpf_reg_state *regs = cur_regs(env);
struct bpf_reg_state *reg = ®s[regno];
if (off < 0 || size < 0 || (size == 0 && !zero_size_allowed) ||
(u64)off + size > reg->range) {
verbose(env, "invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n",
off, size, regno, reg->id, reg->off, reg->range);
return -EACCES;
}
return 0;
}
static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off,
int size, bool zero_size_allowed)
{
struct bpf_reg_state *regs = cur_regs(env);
struct bpf_reg_state *reg = ®s[regno];
int err;
/* We may have added a variable offset to the packet pointer; but any
* reg->range we have comes after that. We are only checking the fixed
* offset.
*/
/* We don't allow negative numbers, because we aren't tracking enough
* detail to prove they're safe.
*/
if (reg->smin_value < 0) {
verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
regno);
return -EACCES;
}
err = __check_packet_access(env, regno, off, size, zero_size_allowed);
if (err) {
verbose(env, "R%d offset is outside of the packet\n", regno);
return err;
}
return err;
}
/* check access to 'struct bpf_context' fields. Supports fixed offsets only */
static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size,
enum bpf_access_type t, enum bpf_reg_type *reg_type)
{
struct bpf_insn_access_aux info = {
.reg_type = *reg_type,
};
if (env->ops->is_valid_access &&
env->ops->is_valid_access(off, size, t, &info)) {
/* A non zero info.ctx_field_size indicates that this field is a
* candidate for later verifier transformation to load the whole
* field and then apply a mask when accessed with a narrower
* access than actual ctx access size. A zero info.ctx_field_size
* will only allow for whole field access and rejects any other
* type of narrower access.
*/
*reg_type = info.reg_type;
env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size;
/* remember the offset of last byte accessed in ctx */
if (env->prog->aux->max_ctx_offset < off + size)
env->prog->aux->max_ctx_offset = off + size;
return 0;
}
verbose(env, "invalid bpf_context access off=%d size=%d\n", off, size);
return -EACCES;
}
static bool __is_pointer_value(bool allow_ptr_leaks,
const struct bpf_reg_state *reg)
{
if (allow_ptr_leaks)
return false;
return reg->type != SCALAR_VALUE;
}
static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
{
return __is_pointer_value(env->allow_ptr_leaks, cur_regs(env) + regno);
}
static int check_pkt_ptr_alignment(struct bpf_verifier_env *env,
const struct bpf_reg_state *reg,
int off, int size, bool strict)
{
struct tnum reg_off;
int ip_align;
/* Byte size accesses are always allowed. */
if (!strict || size == 1)
return 0;
/* For platforms that do not have a Kconfig enabling
* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS the value of
* NET_IP_ALIGN is universally set to '2'. And on platforms
* that do set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS, we get
* to this code only in strict mode where we want to emulate
* the NET_IP_ALIGN==2 checking. Therefore use an
* unconditional IP align value of '2'.
*/
ip_align = 2;
reg_off = tnum_add(reg->var_off, tnum_const(ip_align + reg->off + off));
if (!tnum_is_aligned(reg_off, size)) {
char tn_buf[48];
tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
verbose(env,
"misaligned packet access off %d+%s+%d+%d size %d\n",
ip_align, tn_buf, reg->off, off, size);
return -EACCES;
}
return 0;
}
static int check_generic_ptr_alignment(struct bpf_verifier_env *env,
const struct bpf_reg_state *reg,
const char *pointer_desc,
int off, int size, bool strict)
{
struct tnum reg_off;
/* Byte size accesses are always allowed. */
if (!strict || size == 1)
return 0;
reg_off = tnum_add(reg->var_off, tnum_const(reg->off + off));
if (!tnum_is_aligned(reg_off, size)) {
char tn_buf[48];
tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
verbose(env, "misaligned %saccess off %s+%d+%d size %d\n",
pointer_desc, tn_buf, reg->off, off, size);
return -EACCES;
}
return 0;
}
static int check_ptr_alignment(struct bpf_verifier_env *env,
const struct bpf_reg_state *reg,
int off, int size)
{
bool strict = env->strict_alignment;
const char *pointer_desc = "";
switch (reg->type) {
case PTR_TO_PACKET:
case PTR_TO_PACKET_META:
/* Special case, because of NET_IP_ALIGN. Given metadata sits
* right in front, treat it the very same way.
*/
return check_pkt_ptr_alignment(env, reg, off, size, strict);
case PTR_TO_MAP_VALUE:
pointer_desc = "value ";
break;
case PTR_TO_CTX:
pointer_desc = "context ";
break;
case PTR_TO_STACK:
pointer_desc = "stack ";
break;
default:
break;
}
return check_generic_ptr_alignment(env, reg, pointer_desc, off, size,
strict);
}
/* check whether memory at (regno + off) is accessible for t = (read | write)
* if t==write, value_regno is a register which value is stored into memory
* if t==read, value_regno is a register which will receive the value from memory
* if t==write && value_regno==-1, some unknown value is stored into memory
* if t==read && value_regno==-1, don't care what we read from memory
*/
static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno, int off,
int bpf_size, enum bpf_access_type t,
int value_regno)
{
struct bpf_verifier_state *state = env->cur_state;
struct bpf_reg_state *regs = cur_regs(env);
struct bpf_reg_state *reg = regs + regno;
int size, err = 0;
size = bpf_size_to_bytes(bpf_size);
if (size < 0)
return size;
/* alignment checks will add in reg->off themselves */
err = check_ptr_alignment(env, reg, off, size);
if (err)
return err;
/* for access checks, reg->off is just part of off */
off += reg->off;
if (reg->type == PTR_TO_MAP_VALUE) {
if (t == BPF_WRITE && value_regno >= 0 &&
is_pointer_value(env, value_regno)) {
verbose(env, "R%d leaks addr into map\n", value_regno);
return -EACCES;
}
err = check_map_access(env, regno, off, size, false);
if (!err && t == BPF_READ && value_regno >= 0)
mark_reg_unknown(env, regs, value_regno);
} else if (reg->type == PTR_TO_CTX) {
enum bpf_reg_type reg_type = SCALAR_VALUE;
if (t == BPF_WRITE && value_regno >= 0 &&
is_pointer_value(env, value_regno)) {
verbose(env, "R%d leaks addr into ctx\n", value_regno);
return -EACCES;
}
/* ctx accesses must be at a fixed offset, so that we can
* determine what type of data were returned.
*/
if (reg->off) {
verbose(env,
"dereference of modified ctx ptr R%d off=%d+%d, ctx+const is allowed, ctx+const+const is not\n",
regno, reg->off, off - reg->off);
return -EACCES;
}
if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
char tn_buf[48];
tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
verbose(env,
"variable ctx access var_off=%s off=%d size=%d",
tn_buf, off, size);
return -EACCES;
}
err = check_ctx_access(env, insn_idx, off, size, t, ®_type);
if (!err && t == BPF_READ && value_regno >= 0) {
/* ctx access returns either a scalar, or a
* PTR_TO_PACKET[_META,_END]. In the latter
* case, we know the offset is zero.
*/
if (reg_type == SCALAR_VALUE)
mark_reg_unknown(env, regs, value_regno);
else
mark_reg_known_zero(env, regs,
value_regno);
regs[value_regno].id = 0;
regs[value_regno].off = 0;
regs[value_regno].range = 0;
regs[value_regno].type = reg_type;
}
} else if (reg->type == PTR_TO_STACK) {
/* stack accesses must be at a fixed offset, so that we can
* determine what type of data were returned.
* See check_stack_read().
*/
if (!tnum_is_const(reg->var_off)) {
char tn_buf[48];
tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
verbose(env, "variable stack access var_off=%s off=%d size=%d",
tn_buf, off, size);
return -EACCES;
}
off += reg->var_off.value;
if (off >= 0 || off < -MAX_BPF_STACK) {
verbose(env, "invalid stack off=%d size=%d\n", off,
size);
return -EACCES;
}
if (env->prog->aux->stack_depth < -off)
env->prog->aux->stack_depth = -off;
if (t == BPF_WRITE)
err = check_stack_write(env, state, off, size,
value_regno);
else
err = check_stack_read(env, state, off, size,
value_regno);
} else if (reg_is_pkt_pointer(reg)) {
if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) {
verbose(env, "cannot write into packet\n");
return -EACCES;
}
if (t == BPF_WRITE && value_regno >= 0 &&
is_pointer_value(env, value_regno)) {
verbose(env, "R%d leaks addr into packet\n",
value_regno);
return -EACCES;
}
err = check_packet_access(env, regno, off, size, false);
if (!err && t == BPF_READ && value_regno >= 0)
mark_reg_unknown(env, regs, value_regno);
} else {
verbose(env, "R%d invalid mem access '%s'\n", regno,
reg_type_str[reg->type]);
return -EACCES;
}
if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ &&
regs[value_regno].type == SCALAR_VALUE) {
/* b/h/w load zero-extends, mark upper bits as known 0 */
regs[value_regno].var_off =
tnum_cast(regs[value_regno].var_off, size);
__update_reg_bounds(®s[value_regno]);
}
return err;
}
static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn)
{
int err;
if ((BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) ||
insn->imm != 0) {
verbose(env, "BPF_XADD uses reserved fields\n");
return -EINVAL;
}
/* check src1 operand */
err = check_reg_arg(env, insn->src_reg, SRC_OP);
if (err)
return err;
/* check src2 operand */
err = check_reg_arg(env, insn->dst_reg, SRC_OP);
if (err)
return err;
if (is_pointer_value(env, insn->src_reg)) {
verbose(env, "R%d leaks addr into mem\n", insn->src_reg);
return -EACCES;
}
/* check whether atomic_add can read the memory */
err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
BPF_SIZE(insn->code), BPF_READ, -1);
if (err)
return err;
/* check whether atomic_add can write into the same memory */
return check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
BPF_SIZE(insn->code), BPF_WRITE, -1);
}
/* Does this register contain a constant zero? */
static bool register_is_null(struct bpf_reg_state reg)
{
return reg.type == SCALAR_VALUE && tnum_equals_const(reg.var_off, 0);
}
/* when register 'regno' is passed into function that will read 'access_size'
* bytes from that pointer, make sure that it's within stack boundary
* and all elements of stack are initialized.
* Unlike most pointer bounds-checking functions, this one doesn't take an
* 'off' argument, so it has to add in reg->off itself.
*/
static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
int access_size, bool zero_size_allowed,
struct bpf_call_arg_meta *meta)
{
struct bpf_verifier_state *state = env->cur_state;
struct bpf_reg_state *regs = state->regs;
int off, i, slot, spi;
if (regs[regno].type != PTR_TO_STACK) {
/* Allow zero-byte read from NULL, regardless of pointer type */
if (zero_size_allowed && access_size == 0 &&
register_is_null(regs[regno]))
return 0;
verbose(env, "R%d type=%s expected=%s\n", regno,
reg_type_str[regs[regno].type],
reg_type_str[PTR_TO_STACK]);
return -EACCES;
}
/* Only allow fixed-offset stack reads */
if (!tnum_is_const(regs[regno].var_off)) {
char tn_buf[48];
tnum_strn(tn_buf, sizeof(tn_buf), regs[regno].var_off);
verbose(env, "invalid variable stack read R%d var_off=%s\n",
regno, tn_buf);
}
off = regs[regno].off + regs[regno].var_off.value;
if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 ||
access_size < 0 || (access_size == 0 && !zero_size_allowed)) {
verbose(env, "invalid stack type R%d off=%d access_size=%d\n",
regno, off, access_size);
return -EACCES;
}
if (env->prog->aux->stack_depth < -off)
env->prog->aux->stack_depth = -off;
if (meta && meta->raw_mode) {
meta->access_size = access_size;
meta->regno = regno;
return 0;
}
for (i = 0; i < access_size; i++) {
slot = -(off + i) - 1;
spi = slot / BPF_REG_SIZE;
if (state->allocated_stack <= slot ||
state->stack[spi].slot_type[slot % BPF_REG_SIZE] !=
STACK_MISC) {
verbose(env, "invalid indirect read from stack off %d+%d size %d\n",
off, i, access_size);
return -EACCES;
}
}
return 0;
}
static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
int access_size, bool zero_size_allowed,
struct bpf_call_arg_meta *meta)
{
struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno];
switch (reg->type) {
case PTR_TO_PACKET:
case PTR_TO_PACKET_META:
return check_packet_access(env, regno, reg->off, access_size,
zero_size_allowed);
case PTR_TO_MAP_VALUE:
return check_map_access(env, regno, reg->off, access_size,
zero_size_allowed);
default: /* scalar_value|ptr_to_stack or invalid ptr */
return check_stack_boundary(env, regno, access_size,
zero_size_allowed, meta);
}
}
static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
enum bpf_arg_type arg_type,
struct bpf_call_arg_meta *meta)
{
struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno];
enum bpf_reg_type expected_type, type = reg->type;
int err = 0;
if (arg_type == ARG_DONTCARE)
return 0;
err = check_reg_arg(env, regno, SRC_OP);
if (err)
return err;
if (arg_type == ARG_ANYTHING) {
if (is_pointer_value(env, regno)) {
verbose(env, "R%d leaks addr into helper function\n",
regno);
return -EACCES;
}
return 0;
}
if (type_is_pkt_pointer(type) &&
!may_access_direct_pkt_data(env, meta, BPF_READ)) {
verbose(env, "helper access to the packet is not allowed\n");
return -EACCES;
}
if (arg_type == ARG_PTR_TO_MAP_KEY ||
arg_type == ARG_PTR_TO_MAP_VALUE) {
expected_type = PTR_TO_STACK;
if (!type_is_pkt_pointer(type) &&
type != expected_type)
goto err_type;
} else if (arg_type == ARG_CONST_SIZE ||
arg_type == ARG_CONST_SIZE_OR_ZERO) {
expected_type = SCALAR_VALUE;
if (type != expected_type)
goto err_type;
} else if (arg_type == ARG_CONST_MAP_PTR) {
expected_type = CONST_PTR_TO_MAP;
if (type != expected_type)
goto err_type;
} else if (arg_type == ARG_PTR_TO_CTX) {
expected_type = PTR_TO_CTX;
if (type != expected_type)
goto err_type;
} else if (arg_type == ARG_PTR_TO_MEM ||
arg_type == ARG_PTR_TO_MEM_OR_NULL ||
arg_type == ARG_PTR_TO_UNINIT_MEM) {
expected_type = PTR_TO_STACK;
/* One exception here. In case function allows for NULL to be
* passed in as argument, it's a SCALAR_VALUE type. Final test
* happens during stack boundary checking.
*/
if (register_is_null(*reg) &&
arg_type == ARG_PTR_TO_MEM_OR_NULL)
/* final test in check_stack_boundary() */;
else if (!type_is_pkt_pointer(type) &&
type != PTR_TO_MAP_VALUE &&
type != expected_type)
goto err_type;
meta->raw_mode = arg_type == ARG_PTR_TO_UNINIT_MEM;
} else {
verbose(env, "unsupported arg_type %d\n", arg_type);
return -EFAULT;
}
if (arg_type == ARG_CONST_MAP_PTR) {
/* bpf_map_xxx(map_ptr) call: remember that map_ptr */
meta->map_ptr = reg->map_ptr;
} else if (arg_type == ARG_PTR_TO_MAP_KEY) {
/* bpf_map_xxx(..., map_ptr, ..., key) call:
* check that [key, key + map->key_size) are within
* stack limits and initialized
*/
if (!meta->map_ptr) {
/* in function declaration map_ptr must come before
* map_key, so that it's verified and known before
* we have to check map_key here. Otherwise it means
* that kernel subsystem misconfigured verifier
*/
verbose(env, "invalid map_ptr to access map->key\n");
return -EACCES;
}
if (type_is_pkt_pointer(type))
err = check_packet_access(env, regno, reg->off,
meta->map_ptr->key_size,
false);
else
err = check_stack_boundary(env, regno,
meta->map_ptr->key_size,
false, NULL);
} else if (arg_type == ARG_PTR_TO_MAP_VALUE) {
/* bpf_map_xxx(..., map_ptr, ..., value) call:
* check [value, value + map->value_size) validity
*/
if (!meta->map_ptr) {
/* kernel subsystem misconfigured verifier */
verbose(env, "invalid map_ptr to access map->value\n");
return -EACCES;
}
if (type_is_pkt_pointer(type))
err = check_packet_access(env, regno, reg->off,
meta->map_ptr->value_size,
false);
else
err = check_stack_boundary(env, regno,
meta->map_ptr->value_size,
false, NULL);
} else if (arg_type == ARG_CONST_SIZE ||
arg_type == ARG_CONST_SIZE_OR_ZERO) {
bool zero_size_allowed = (arg_type == ARG_CONST_SIZE_OR_ZERO);
/* bpf_xxx(..., buf, len) call will access 'len' bytes
* from stack pointer 'buf'. Check it
* note: regno == len, regno - 1 == buf
*/
if (regno == 0) {
/* kernel subsystem misconfigured verifier */
verbose(env,
"ARG_CONST_SIZE cannot be first argument\n");
return -EACCES;
}
/* The register is SCALAR_VALUE; the access check
* happens using its boundaries.
*/
if (!tnum_is_const(reg->var_off))
/* For unprivileged variable accesses, disable raw
* mode so that the program is required to
* initialize all the memory that the helper could
* just partially fill up.
*/
meta = NULL;
if (reg->smin_value < 0) {
verbose(env, "R%d min value is negative, either use unsigned or 'var &= const'\n",
regno);
return -EACCES;
}
if (reg->umin_value == 0) {
err = check_helper_mem_access(env, regno - 1, 0,
zero_size_allowed,
meta);
if (err)
return err;
}
if (reg->umax_value >= BPF_MAX_VAR_SIZ) {
verbose(env, "R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n",
regno);
return -EACCES;
}
err = check_helper_mem_access(env, regno - 1,
reg->umax_value,
zero_size_allowed, meta);
}
return err;
err_type:
verbose(env, "R%d type=%s expected=%s\n", regno,
reg_type_str[type], reg_type_str[expected_type]);
return -EACCES;
}
static int check_map_func_compatibility(struct bpf_verifier_env *env,
struct bpf_map *map, int func_id)
{
if (!map)
return 0;
/* We need a two way check, first is from map perspective ... */
switch (map->map_type) {
case BPF_MAP_TYPE_PROG_ARRAY:
if (func_id != BPF_FUNC_tail_call)
goto error;
break;
case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
if (func_id != BPF_FUNC_perf_event_read &&
func_id != BPF_FUNC_perf_event_output &&
func_id != BPF_FUNC_perf_event_read_value)
goto error;
break;
case BPF_MAP_TYPE_STACK_TRACE:
if (func_id != BPF_FUNC_get_stackid)
goto error;
break;
case BPF_MAP_TYPE_CGROUP_ARRAY:
if (func_id != BPF_FUNC_skb_under_cgroup &&
func_id != BPF_FUNC_current_task_under_cgroup)
goto error;
break;
/* devmap returns a pointer to a live net_device ifindex that we cannot
* allow to be modified from bpf side. So do not allow lookup elements
* for now.
*/
case BPF_MAP_TYPE_DEVMAP:
if (func_id != BPF_FUNC_redirect_map)
goto error;
break;
/* Restrict bpf side of cpumap, open when use-cases appear */
case BPF_MAP_TYPE_CPUMAP:
if (func_id != BPF_FUNC_redirect_map)
goto error;
break;
case BPF_MAP_TYPE_ARRAY_OF_MAPS:
case BPF_MAP_TYPE_HASH_OF_MAPS:
if (func_id != BPF_FUNC_map_lookup_elem)
goto error;
break;
case BPF_MAP_TYPE_SOCKMAP:
if (func_id != BPF_FUNC_sk_redirect_map &&
func_id != BPF_FUNC_sock_map_update &&
func_id != BPF_FUNC_map_delete_elem)
goto error;
break;
default:
break;
}
/* ... and second from the function itself. */
switch (func_id) {
case BPF_FUNC_tail_call:
if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
goto error;
break;
case BPF_FUNC_perf_event_read:
case BPF_FUNC_perf_event_output:
case BPF_FUNC_perf_event_read_value:
if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
goto error;
break;
case BPF_FUNC_get_stackid:
if (map->map_type != BPF_MAP_TYPE_STACK_TRACE)
goto error;
break;
case BPF_FUNC_current_task_under_cgroup:
case BPF_FUNC_skb_under_cgroup:
if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY)
goto error;
break;
case BPF_FUNC_redirect_map:
if (map->map_type != BPF_MAP_TYPE_DEVMAP &&
map->map_type != BPF_MAP_TYPE_CPUMAP)
goto error;
break;
case BPF_FUNC_sk_redirect_map:
if (map->map_type != BPF_MAP_TYPE_SOCKMAP)
goto error;
break;
case BPF_FUNC_sock_map_update:
if (map->map_type != BPF_MAP_TYPE_SOCKMAP)
goto error;
break;
default:
break;
}
return 0;
error:
verbose(env, "cannot pass map_type %d into func %s#%d\n",
map->map_type, func_id_name(func_id), func_id);
return -EINVAL;
}
static int check_raw_mode(const struct bpf_func_proto *fn)
{
int count = 0;
if (fn->arg1_type == ARG_PTR_TO_UNINIT_MEM)
count++;
if (fn->arg2_type == ARG_PTR_TO_UNINIT_MEM)
count++;
if (fn->arg3_type == ARG_PTR_TO_UNINIT_MEM)
count++;
if (fn->arg4_type == ARG_PTR_TO_UNINIT_MEM)
count++;
if (fn->arg5_type == ARG_PTR_TO_UNINIT_MEM)
count++;
return count > 1 ? -EINVAL : 0;
}
/* Packet data might have moved, any old PTR_TO_PACKET[_META,_END]
* are now invalid, so turn them into unknown SCALAR_VALUE.
*/
static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
{
struct bpf_verifier_state *state = env->cur_state;
struct bpf_reg_state *regs = state->regs, *reg;
int i;
for (i = 0; i < MAX_BPF_REG; i++)
if (reg_is_pkt_pointer_any(®s[i]))
mark_reg_unknown(env, regs, i);
for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
if (state->stack[i].slot_type[0] != STACK_SPILL)
continue;
reg = &state->stack[i].spilled_ptr;
if (reg_is_pkt_pointer_any(reg))
__mark_reg_unknown(reg);
}
}
static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
{
const struct bpf_func_proto *fn = NULL;
struct bpf_reg_state *regs;
struct bpf_call_arg_meta meta;
bool changes_data;
int i, err;
/* find function prototype */
if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) {
verbose(env, "invalid func %s#%d\n", func_id_name(func_id),
func_id);
return -EINVAL;
}
if (env->ops->get_func_proto)
fn = env->ops->get_func_proto(func_id);
if (!fn) {
verbose(env, "unknown func %s#%d\n", func_id_name(func_id),
func_id);
return -EINVAL;
}
/* eBPF programs must be GPL compatible to use GPL-ed functions */
if (!env->prog->gpl_compatible && fn->gpl_only) {
verbose(env, "cannot call GPL only function from proprietary program\n");
return -EINVAL;
}
changes_data = bpf_helper_changes_pkt_data(fn->func);
memset(&meta, 0, sizeof(meta));
meta.pkt_access = fn->pkt_access;
/* We only support one arg being in raw mode at the moment, which
* is sufficient for the helper functions we have right now.
*/
err = check_raw_mode(fn);
if (err) {
verbose(env, "kernel subsystem misconfigured func %s#%d\n",
func_id_name(func_id), func_id);
return err;
}
/* check args */
err = check_func_arg(env, BPF_REG_1, fn->arg1_type, &meta);
if (err)
return err;
err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &meta);
if (err)
return err;
err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &meta);
if (err)
return err;
err = check_func_arg(env, BPF_REG_4, fn->arg4_type, &meta);
if (err)
return err;
err = check_func_arg(env, BPF_REG_5, fn->arg5_type, &meta);
if (err)
return err;
/* Mark slots with STACK_MISC in case of raw mode, stack offset
* is inferred from register state.
*/
for (i = 0; i < meta.access_size; i++) {
err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B, BPF_WRITE, -1);
if (err)
return err;
}
regs = cur_regs(env);
/* reset caller saved regs */
for (i = 0; i < CALLER_SAVED_REGS; i++) {
mark_reg_not_init(env, regs, caller_saved[i]);
check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
}
/* update return register (already marked as written above) */
if (fn->ret_type == RET_INTEGER) {
/* sets type to SCALAR_VALUE */
mark_reg_unknown(env, regs, BPF_REG_0);
} else if (fn->ret_type == RET_VOID) {
regs[BPF_REG_0].type = NOT_INIT;
} else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL) {
struct bpf_insn_aux_data *insn_aux;
regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL;
/* There is no offset yet applied, variable or fixed */
mark_reg_known_zero(env, regs, BPF_REG_0);
regs[BPF_REG_0].off = 0;
/* remember map_ptr, so that check_map_access()
* can check 'value_size' boundary of memory access
* to map element returned from bpf_map_lookup_elem()
*/
if (meta.map_ptr == NULL) {
verbose(env,
"kernel subsystem misconfigured verifier\n");
return -EINVAL;
}
regs[BPF_REG_0].map_ptr = meta.map_ptr;
regs[BPF_REG_0].id = ++env->id_gen;
insn_aux = &env->insn_aux_data[insn_idx];
if (!insn_aux->map_ptr)
insn_aux->map_ptr = meta.map_ptr;
else if (insn_aux->map_ptr != meta.map_ptr)
insn_aux->map_ptr = BPF_MAP_PTR_POISON;
} else {
verbose(env, "unknown return type %d of func %s#%d\n",
fn->ret_type, func_id_name(func_id), func_id);
return -EINVAL;
}
err = check_map_func_compatibility(env, meta.map_ptr, func_id);
if (err)
return err;
if (changes_data)
clear_all_pkt_pointers(env);
return 0;
}
static void coerce_reg_to_32(struct bpf_reg_state *reg)
{
/* clear high 32 bits */
reg->var_off = tnum_cast(reg->var_off, 4);
/* Update bounds */
__update_reg_bounds(reg);
}
static bool signed_add_overflows(s64 a, s64 b)
{
/* Do the add in u64, where overflow is well-defined */
s64 res = (s64)((u64)a + (u64)b);
if (b < 0)
return res > a;
return res < a;
}
static bool signed_sub_overflows(s64 a, s64 b)
{
/* Do the sub in u64, where overflow is well-defined */
s64 res = (s64)((u64)a - (u64)b);
if (b < 0)
return res < a;
return res > a;
}
/* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
* Caller should also handle BPF_MOV case separately.
* If we return -EACCES, caller may want to try again treating pointer as a
* scalar. So we only emit a diagnostic if !env->allow_ptr_leaks.
*/
static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
struct bpf_insn *insn,
const struct bpf_reg_state *ptr_reg,
const struct bpf_reg_state *off_reg)
{
struct bpf_reg_state *regs = cur_regs(env), *dst_reg;
bool known = tnum_is_const(off_reg->var_off);
s64 smin_val = off_reg->smin_value, smax_val = off_reg->smax_value,
smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value;
u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value,
umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value;
u8 opcode = BPF_OP(insn->code);
u32 dst = insn->dst_reg;
dst_reg = ®s[dst];
if (WARN_ON_ONCE(known && (smin_val != smax_val))) {
print_verifier_state(env, env->cur_state);
verbose(env,
"verifier internal error: known but bad sbounds\n");
return -EINVAL;
}
if (WARN_ON_ONCE(known && (umin_val != umax_val))) {
print_verifier_state(env, env->cur_state);
verbose(env,
"verifier internal error: known but bad ubounds\n");
return -EINVAL;
}
if (BPF_CLASS(insn->code) != BPF_ALU64) {
/* 32-bit ALU ops on pointers produce (meaningless) scalars */
if (!env->allow_ptr_leaks)
verbose(env,
"R%d 32-bit pointer arithmetic prohibited\n",
dst);
return -EACCES;
}
if (ptr_reg->type == PTR_TO_MAP_VALUE_OR_NULL) {
if (!env->allow_ptr_leaks)
verbose(env, "R%d pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL prohibited, null-check it first\n",
dst);
return -EACCES;
}
if (ptr_reg->type == CONST_PTR_TO_MAP) {
if (!env->allow_ptr_leaks)
verbose(env, "R%d pointer arithmetic on CONST_PTR_TO_MAP prohibited\n",
dst);
return -EACCES;
}
if (ptr_reg->type == PTR_TO_PACKET_END) {
if (!env->allow_ptr_leaks)
verbose(env, "R%d pointer arithmetic on PTR_TO_PACKET_END prohibited\n",
dst);
return -EACCES;
}
/* In case of 'scalar += pointer', dst_reg inherits pointer type and id.
* The id may be overwritten later if we create a new variable offset.
*/
dst_reg->type = ptr_reg->type;
dst_reg->id = ptr_reg->id;
switch (opcode) {
case BPF_ADD:
/* We can take a fixed offset as long as it doesn't overflow
* the s32 'off' field
*/
if (known && (ptr_reg->off + smin_val ==
(s64)(s32)(ptr_reg->off + smin_val))) {
/* pointer += K. Accumulate it into fixed offset */
dst_reg->smin_value = smin_ptr;
dst_reg->smax_value = smax_ptr;
dst_reg->umin_value = umin_ptr;
dst_reg->umax_value = umax_ptr;
dst_reg->var_off = ptr_reg->var_off;
dst_reg->off = ptr_reg->off + smin_val;
dst_reg->range = ptr_reg->range;
break;
}
/* A new variable offset is created. Note that off_reg->off
* == 0, since it's a scalar.
* dst_reg gets the pointer type and since some positive
* integer value was added to the pointer, give it a new 'id'
* if it's a PTR_TO_PACKET.
* this creates a new 'base' pointer, off_reg (variable) gets
* added into the variable offset, and we copy the fixed offset
* from ptr_reg.
*/
if (signed_add_overflows(smin_ptr, smin_val) ||
signed_add_overflows(smax_ptr, smax_val)) {
dst_reg->smin_value = S64_MIN;
dst_reg->smax_value = S64_MAX;
} else {
dst_reg->smin_value = smin_ptr + smin_val;
dst_reg->smax_value = smax_ptr + smax_val;
}
if (umin_ptr + umin_val < umin_ptr ||
umax_ptr + umax_val < umax_ptr) {
dst_reg->umin_value = 0;
dst_reg->umax_value = U64_MAX;
} else {
dst_reg->umin_value = umin_ptr + umin_val;
dst_reg->umax_value = umax_ptr + umax_val;
}
dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off);
dst_reg->off = ptr_reg->off;
if (reg_is_pkt_pointer(ptr_reg)) {
dst_reg->id = ++env->id_gen;
/* something was added to pkt_ptr, set range to zero */
dst_reg->range = 0;
}
break;
case BPF_SUB:
if (dst_reg == off_reg) {
/* scalar -= pointer. Creates an unknown scalar */
if (!env->allow_ptr_leaks)
verbose(env, "R%d tried to subtract pointer from scalar\n",
dst);
return -EACCES;
}
/* We don't allow subtraction from FP, because (according to
* test_verifier.c test "invalid fp arithmetic", JITs might not
* be able to deal with it.
*/
if (ptr_reg->type == PTR_TO_STACK) {
if (!env->allow_ptr_leaks)
verbose(env, "R%d subtraction from stack pointer prohibited\n",
dst);
return -EACCES;
}
if (known && (ptr_reg->off - smin_val ==
(s64)(s32)(ptr_reg->off - smin_val))) {
/* pointer -= K. Subtract it from fixed offset */
dst_reg->smin_value = smin_ptr;
dst_reg->smax_value = smax_ptr;
dst_reg->umin_value = umin_ptr;
dst_reg->umax_value = umax_ptr;
dst_reg->var_off = ptr_reg->var_off;
dst_reg->id = ptr_reg->id;
dst_reg->off = ptr_reg->off - smin_val;
dst_reg->range = ptr_reg->range;
break;
}
/* A new variable offset is created. If the subtrahend is known
* nonnegative, then any reg->range we had before is still good.
*/
if (signed_sub_overflows(smin_ptr, smax_val) ||
signed_sub_overflows(smax_ptr, smin_val)) {
/* Overflow possible, we know nothing */
dst_reg->smin_value = S64_MIN;
dst_reg->smax_value = S64_MAX;
} else {
dst_reg->smin_value = smin_ptr - smax_val;
dst_reg->smax_value = smax_ptr - smin_val;
}
if (umin_ptr < umax_val) {
/* Overflow possible, we know nothing */
dst_reg->umin_value = 0;
dst_reg->umax_value = U64_MAX;
} else {
/* Cannot overflow (as long as bounds are consistent) */
dst_reg->umin_value = umin_ptr - umax_val;
dst_reg->umax_value = umax_ptr - umin_val;
}
dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off);
dst_reg->off = ptr_reg->off;
if (reg_is_pkt_pointer(ptr_reg)) {
dst_reg->id = ++env->id_gen;
/* something was added to pkt_ptr, set range to zero */
if (smin_val < 0)
dst_reg->range = 0;
}
break;
case BPF_AND:
case BPF_OR:
case BPF_XOR:
/* bitwise ops on pointers are troublesome, prohibit for now.
* (However, in principle we could allow some cases, e.g.
* ptr &= ~3 which would reduce min_value by 3.)
*/
if (!env->allow_ptr_leaks)
verbose(env, "R%d bitwise operator %s on pointer prohibited\n",
dst, bpf_alu_string[opcode >> 4]);
return -EACCES;
default:
/* other operators (e.g. MUL,LSH) produce non-pointer results */
if (!env->allow_ptr_leaks)
verbose(env, "R%d pointer arithmetic with %s operator prohibited\n",
dst, bpf_alu_string[opcode >> 4]);
return -EACCES;
}
__update_reg_bounds(dst_reg);
__reg_deduce_bounds(dst_reg);
__reg_bound_offset(dst_reg);
return 0;
}
static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
struct bpf_insn *insn,
struct bpf_reg_state *dst_reg,
struct bpf_reg_state src_reg)
{
struct bpf_reg_state *regs = cur_regs(env);
u8 opcode = BPF_OP(insn->code);
bool src_known, dst_known;
s64 smin_val, smax_val;
u64 umin_val, umax_val;
if (BPF_CLASS(insn->code) != BPF_ALU64) {
/* 32-bit ALU ops are (32,32)->64 */
coerce_reg_to_32(dst_reg);
coerce_reg_to_32(&src_reg);
}
smin_val = src_reg.smin_value;
smax_val = src_reg.smax_value;
umin_val = src_reg.umin_value;
umax_val = src_reg.umax_value;
src_known = tnum_is_const(src_reg.var_off);
dst_known = tnum_is_const(dst_reg->var_off);
switch (opcode) {
case BPF_ADD:
if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
signed_add_overflows(dst_reg->smax_value, smax_val)) {
dst_reg->smin_value = S64_MIN;
dst_reg->smax_value = S64_MAX;
} else {
dst_reg->smin_value += smin_val;
dst_reg->smax_value += smax_val;
}
if (dst_reg->umin_value + umin_val < umin_val ||
dst_reg->umax_value + umax_val < umax_val) {
dst_reg->umin_value = 0;
dst_reg->umax_value = U64_MAX;
} else {
dst_reg->umin_value += umin_val;
dst_reg->umax_value += umax_val;
}
dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off);
break;
case BPF_SUB:
if (signed_sub_overflows(dst_reg->smin_value, smax_val) ||
signed_sub_overflows(dst_reg->smax_value, smin_val)) {
/* Overflow possible, we know nothing */
dst_reg->smin_value = S64_MIN;
dst_reg->smax_value = S64_MAX;
} else {
dst_reg->smin_value -= smax_val;
dst_reg->smax_value -= smin_val;
}
if (dst_reg->umin_value < umax_val) {
/* Overflow possible, we know nothing */
dst_reg->umin_value = 0;
dst_reg->umax_value = U64_MAX;
} else {
/* Cannot overflow (as long as bounds are consistent) */
dst_reg->umin_value -= umax_val;
dst_reg->umax_value -= umin_val;
}
dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off);
break;
case BPF_MUL:
dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off);
if (smin_val < 0 || dst_reg->smin_value < 0) {
/* Ain't nobody got time to multiply that sign */
__mark_reg_unbounded(dst_reg);
__update_reg_bounds(dst_reg);
break;
}
/* Both values are positive, so we can work with unsigned and
* copy the result to signed (unless it exceeds S64_MAX).
*/
if (umax_val > U32_MAX || dst_reg->umax_value > U32_MAX) {
/* Potential overflow, we know nothing */
__mark_reg_unbounded(dst_reg);
/* (except what we can learn from the var_off) */
__update_reg_bounds(dst_reg);
break;
}
dst_reg->umin_value *= umin_val;
dst_reg->umax_value *= umax_val;
if (dst_reg->umax_value > S64_MAX) {
/* Overflow possible, we know nothing */
dst_reg->smin_value = S64_MIN;
dst_reg->smax_value = S64_MAX;
} else {
dst_reg->smin_value = dst_reg->umin_value;
dst_reg->smax_value = dst_reg->umax_value;
}
break;
case BPF_AND:
if (src_known && dst_known) {
__mark_reg_known(dst_reg, dst_reg->var_off.value &
src_reg.var_off.value);
break;
}
/* We get our minimum from the var_off, since that's inherently
* bitwise. Our maximum is the minimum of the operands' maxima.
*/
dst_reg->var_off = tnum_and(dst_reg->var_off, src_reg.var_off);
dst_reg->umin_value = dst_reg->var_off.value;
dst_reg->umax_value = min(dst_reg->umax_value, umax_val);
if (dst_reg->smin_value < 0 || smin_val < 0) {
/* Lose signed bounds when ANDing negative numbers,
* ain't nobody got time for that.
*/
dst_reg->smin_value = S64_MIN;
dst_reg->smax_value = S64_MAX;
} else {
/* ANDing two positives gives a positive, so safe to
* cast result into s64.
*/
dst_reg->smin_value = dst_reg->umin_value;
dst_reg->smax_value = dst_reg->umax_value;
}
/* We may learn something more from the var_off */
__update_reg_bounds(dst_reg);
break;
case BPF_OR:
if (src_known && dst_known) {
__mark_reg_known(dst_reg, dst_reg->var_off.value |
src_reg.var_off.value);
break;
}
/* We get our maximum from the var_off, and our minimum is the
* maximum of the operands' minima
*/
dst_reg->var_off = tnum_or(dst_reg->var_off, src_reg.var_off);
dst_reg->umin_value = max(dst_reg->umin_value, umin_val);
dst_reg->umax_value = dst_reg->var_off.value |
dst_reg->var_off.mask;
if (dst_reg->smin_value < 0 || smin_val < 0) {
/* Lose signed bounds when ORing negative numbers,
* ain't nobody got time for that.
*/
dst_reg->smin_value = S64_MIN;
dst_reg->smax_value = S64_MAX;
} else {
/* ORing two positives gives a positive, so safe to
* cast result into s64.
*/
dst_reg->smin_value = dst_reg->umin_value;
dst_reg->smax_value = dst_reg->umax_value;
}
/* We may learn something more from the var_off */
__update_reg_bounds(dst_reg);
break;
case BPF_LSH:
if (umax_val > 63) {
/* Shifts greater than 63 are undefined. This includes
* shifts by a negative number.
*/
mark_reg_unknown(env, regs, insn->dst_reg);
break;
}
/* We lose all sign bit information (except what we can pick
* up from var_off)
*/
dst_reg->smin_value = S64_MIN;
dst_reg->smax_value = S64_MAX;
/* If we might shift our top bit out, then we know nothing */
if (dst_reg->umax_value > 1ULL << (63 - umax_val)) {
dst_reg->umin_value = 0;
dst_reg->umax_value = U64_MAX;
} else {
dst_reg->umin_value <<= umin_val;
dst_reg->umax_value <<= umax_val;
}
if (src_known)
dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val);
else
dst_reg->var_off = tnum_lshift(tnum_unknown, umin_val);
/* We may learn something more from the var_off */
__update_reg_bounds(dst_reg);
break;
case BPF_RSH:
if (umax_val > 63) {
/* Shifts greater than 63 are undefined. This includes
* shifts by a negative number.
*/
mark_reg_unknown(env, regs, insn->dst_reg);
break;
}
/* BPF_RSH is an unsigned shift, so make the appropriate casts */
if (dst_reg->smin_value < 0) {
if (umin_val) {
/* Sign bit will be cleared */
dst_reg->smin_value = 0;
} else {
/* Lost sign bit information */
dst_reg->smin_value = S64_MIN;
dst_reg->smax_value = S64_MAX;
}
} else {
dst_reg->smin_value =
(u64)(dst_reg->smin_value) >> umax_val;
}
if (src_known)
dst_reg->var_off = tnum_rshift(dst_reg->var_off,
umin_val);
else
dst_reg->var_off = tnum_rshift(tnum_unknown, umin_val);
dst_reg->umin_value >>= umax_val;
dst_reg->umax_value >>= umin_val;
/* We may learn something more from the var_off */
__update_reg_bounds(dst_reg);
break;
default:
mark_reg_unknown(env, regs, insn->dst_reg);
break;
}
__reg_deduce_bounds(dst_reg);
__reg_bound_offset(dst_reg);
return 0;
}
/* Handles ALU ops other than BPF_END, BPF_NEG and BPF_MOV: computes new min/max
* and var_off.
*/
static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
struct bpf_insn *insn)
{
struct bpf_reg_state *regs = cur_regs(env), *dst_reg, *src_reg;
struct bpf_reg_state *ptr_reg = NULL, off_reg = {0};
u8 opcode = BPF_OP(insn->code);
int rc;
dst_reg = ®s[insn->dst_reg];
src_reg = NULL;
if (dst_reg->type != SCALAR_VALUE)
ptr_reg = dst_reg;
if (BPF_SRC(insn->code) == BPF_X) {
src_reg = ®s[insn->src_reg];
if (src_reg->type != SCALAR_VALUE) {
if (dst_reg->type != SCALAR_VALUE) {
/* Combining two pointers by any ALU op yields
* an arbitrary scalar.
*/
if (!env->allow_ptr_leaks) {
verbose(env, "R%d pointer %s pointer prohibited\n",
insn->dst_reg,
bpf_alu_string[opcode >> 4]);
return -EACCES;
}
mark_reg_unknown(env, regs, insn->dst_reg);
return 0;
} else {
/* scalar += pointer
* This is legal, but we have to reverse our
* src/dest handling in computing the range
*/
rc = adjust_ptr_min_max_vals(env, insn,
src_reg, dst_reg);
if (rc == -EACCES && env->allow_ptr_leaks) {
/* scalar += unknown scalar */
__mark_reg_unknown(&off_reg);
return adjust_scalar_min_max_vals(
env, insn,
dst_reg, off_reg);
}
return rc;
}
} else if (ptr_reg) {
/* pointer += scalar */
rc = adjust_ptr_min_max_vals(env, insn,
dst_reg, src_reg);
if (rc == -EACCES && env->allow_ptr_leaks) {
/* unknown scalar += scalar */
__mark_reg_unknown(dst_reg);
return adjust_scalar_min_max_vals(
env, insn, dst_reg, *src_reg);
}
return rc;
}
} else {
/* Pretend the src is a reg with a known value, since we only
* need to be able to read from this state.
*/
off_reg.type = SCALAR_VALUE;
__mark_reg_known(&off_reg, insn->imm);
src_reg = &off_reg;
if (ptr_reg) { /* pointer += K */
rc = adjust_ptr_min_max_vals(env, insn,
ptr_reg, src_reg);
if (rc == -EACCES && env->allow_ptr_leaks) {
/* unknown scalar += K */
__mark_reg_unknown(dst_reg);
return adjust_scalar_min_max_vals(
env, insn, dst_reg, off_reg);
}
return rc;
}
}
/* Got here implies adding two SCALAR_VALUEs */
if (WARN_ON_ONCE(ptr_reg)) {
print_verifier_state(env, env->cur_state);
verbose(env, "verifier internal error: unexpected ptr_reg\n");
return -EINVAL;
}
if (WARN_ON(!src_reg)) {
print_verifier_state(env, env->cur_state);
verbose(env, "verifier internal error: no src_reg\n");
return -EINVAL;
}
return adjust_scalar_min_max_vals(env, insn, dst_reg, *src_reg);
}
/* check validity of 32-bit and 64-bit arithmetic operations */
static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
{
struct bpf_reg_state *regs = cur_regs(env);
u8 opcode = BPF_OP(insn->code);
int err;
if (opcode == BPF_END || opcode == BPF_NEG) {
if (opcode == BPF_NEG) {
if (BPF_SRC(insn->code) != 0 ||
insn->src_reg != BPF_REG_0 ||
insn->off != 0 || insn->imm != 0) {
verbose(env, "BPF_NEG uses reserved fields\n");
return -EINVAL;
}
} else {
if (insn->src_reg != BPF_REG_0 || insn->off != 0 ||
(insn->imm != 16 && insn->imm != 32 && insn->imm != 64) ||
BPF_CLASS(insn->code) == BPF_ALU64) {
verbose(env, "BPF_END uses reserved fields\n");
return -EINVAL;
}
}
/* check src operand */
err = check_reg_arg(env, insn->dst_reg, SRC_OP);
if (err)
return err;
if (is_pointer_value(env, insn->dst_reg)) {
verbose(env, "R%d pointer arithmetic prohibited\n",
insn->dst_reg);
return -EACCES;
}
/* check dest operand */
err = check_reg_arg(env, insn->dst_reg, DST_OP);
if (err)
return err;
} else if (opcode == BPF_MOV) {
if (BPF_SRC(insn->code) == BPF_X) {
if (insn->imm != 0 || insn->off != 0) {
verbose(env, "BPF_MOV uses reserved fields\n");
return -EINVAL;
}
/* check src operand */
err = check_reg_arg(env, insn->src_reg, SRC_OP);
if (err)
return err;
} else {
if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
verbose(env, "BPF_MOV uses reserved fields\n");
return -EINVAL;
}
}
/* check dest operand */
err = check_reg_arg(env, insn->dst_reg, DST_OP);
if (err)
return err;
if (BPF_SRC(insn->code) == BPF_X) {
if (BPF_CLASS(insn->code) == BPF_ALU64) {
/* case: R1 = R2
* copy register state to dest reg
*/
regs[insn->dst_reg] = regs[insn->src_reg];
regs[insn->dst_reg].live |= REG_LIVE_WRITTEN;
} else {
/* R1 = (u32) R2 */
if (is_pointer_value(env, insn->src_reg)) {
verbose(env,
"R%d partial copy of pointer\n",
insn->src_reg);
return -EACCES;
}
mark_reg_unknown(env, regs, insn->dst_reg);
/* high 32 bits are known zero. */
regs[insn->dst_reg].var_off = tnum_cast(
regs[insn->dst_reg].var_off, 4);
__update_reg_bounds(®s[insn->dst_reg]);
}
} else {
/* case: R = imm
* remember the value we stored into this reg
*/
regs[insn->dst_reg].type = SCALAR_VALUE;
__mark_reg_known(regs + insn->dst_reg, insn->imm);
}
} else if (opcode > BPF_END) {
verbose(env, "invalid BPF_ALU opcode %x\n", opcode);
return -EINVAL;
} else { /* all other ALU ops: and, sub, xor, add, ... */
if (BPF_SRC(insn->code) == BPF_X) {
if (insn->imm != 0 || insn->off != 0) {
verbose(env, "BPF_ALU uses reserved fields\n");
return -EINVAL;
}
/* check src1 operand */
err = check_reg_arg(env, insn->src_reg, SRC_OP);
if (err)
return err;
} else {
if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
verbose(env, "BPF_ALU uses reserved fields\n");
return -EINVAL;
}
}
/* check src2 operand */
err = check_reg_arg(env, insn->dst_reg, SRC_OP);
if (err)
return err;
if ((opcode == BPF_MOD || opcode == BPF_DIV) &&
BPF_SRC(insn->code) == BPF_K && insn->imm == 0) {
verbose(env, "div by zero\n");
return -EINVAL;
}
if ((opcode == BPF_LSH || opcode == BPF_RSH ||
opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) {
int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32;
if (insn->imm < 0 || insn->imm >= size) {
verbose(env, "invalid shift %d\n", insn->imm);
return -EINVAL;
}
}
/* check dest operand */
err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
if (err)
return err;
return adjust_reg_min_max_vals(env, insn);
}
return 0;
}
static void find_good_pkt_pointers(struct bpf_verifier_state *state,
struct bpf_reg_state *dst_reg,
enum bpf_reg_type type,
bool range_right_open)
{
struct bpf_reg_state *regs = state->regs, *reg;
u16 new_range;
int i;
if (dst_reg->off < 0 ||
(dst_reg->off == 0 && range_right_open))
/* This doesn't give us any range */
return;
if (dst_reg->umax_value > MAX_PACKET_OFF ||
dst_reg->umax_value + dst_reg->off > MAX_PACKET_OFF)
/* Risk of overflow. For instance, ptr + (1<<63) may be less
* than pkt_end, but that's because it's also less than pkt.
*/
return;
new_range = dst_reg->off;
if (range_right_open)
new_range--;
/* Examples for register markings:
*
* pkt_data in dst register:
*
* r2 = r3;
* r2 += 8;
* if (r2 > pkt_end) goto <handle exception>
* <access okay>
*
* r2 = r3;
* r2 += 8;
* if (r2 < pkt_end) goto <access okay>
* <handle exception>
*
* Where:
* r2 == dst_reg, pkt_end == src_reg
* r2=pkt(id=n,off=8,r=0)
* r3=pkt(id=n,off=0,r=0)
*
* pkt_data in src register:
*
* r2 = r3;
* r2 += 8;
* if (pkt_end >= r2) goto <access okay>
* <handle exception>
*
* r2 = r3;
* r2 += 8;
* if (pkt_end <= r2) goto <handle exception>
* <access okay>
*
* Where:
* pkt_end == dst_reg, r2 == src_reg
* r2=pkt(id=n,off=8,r=0)
* r3=pkt(id=n,off=0,r=0)
*
* Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8)
* or r3=pkt(id=n,off=0,r=8-1), so that range of bytes [r3, r3 + 8)
* and [r3, r3 + 8-1) respectively is safe to access depending on
* the check.
*/
/* If our ids match, then we must have the same max_value. And we
* don't care about the other reg's fixed offset, since if it's too big
* the range won't allow anything.
* dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16.
*/
for (i = 0; i < MAX_BPF_REG; i++)
if (regs[i].type == type && regs[i].id == dst_reg->id)
/* keep the maximum range already checked */
regs[i].range = max(regs[i].range, new_range);
for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
if (state->stack[i].slot_type[0] != STACK_SPILL)
continue;
reg = &state->stack[i].spilled_ptr;
if (reg->type == type && reg->id == dst_reg->id)
reg->range = max(reg->range, new_range);
}
}
/* Adjusts the register min/max values in the case that the dst_reg is the
* variable register that we are working on, and src_reg is a constant or we're
* simply doing a BPF_K check.
* In JEQ/JNE cases we also adjust the var_off values.
*/
static void reg_set_min_max(struct bpf_reg_state *true_reg,
struct bpf_reg_state *false_reg, u64 val,
u8 opcode)
{
/* If the dst_reg is a pointer, we can't learn anything about its
* variable offset from the compare (unless src_reg were a pointer into
* the same object, but we don't bother with that.
* Since false_reg and true_reg have the same type by construction, we
* only need to check one of them for pointerness.
*/
if (__is_pointer_value(false, false_reg))
return;
switch (opcode) {
case BPF_JEQ:
/* If this is false then we know nothing Jon Snow, but if it is
* true then we know for sure.
*/
__mark_reg_known(true_reg, val);
break;
case BPF_JNE:
/* If this is true we know nothing Jon Snow, but if it is false
* we know the value for sure;
*/
__mark_reg_known(false_reg, val);
break;
case BPF_JGT:
false_reg->umax_value = min(false_reg->umax_value, val);
true_reg->umin_value = max(true_reg->umin_value, val + 1);
break;
case BPF_JSGT:
false_reg->smax_value = min_t(s64, false_reg->smax_value, val);
true_reg->smin_value = max_t(s64, true_reg->smin_value, val + 1);
break;
case BPF_JLT:
false_reg->umin_value = max(false_reg->umin_value, val);
true_reg->umax_value = min(true_reg->umax_value, val - 1);
break;
case BPF_JSLT:
false_reg->smin_value = max_t(s64, false_reg->smin_value, val);
true_reg->smax_value = min_t(s64, true_reg->smax_value, val - 1);
break;
case BPF_JGE:
false_reg->umax_value = min(false_reg->umax_value, val - 1);
true_reg->umin_value = max(true_reg->umin_value, val);
break;
case BPF_JSGE:
false_reg->smax_value = min_t(s64, false_reg->smax_value, val - 1);
true_reg->smin_value = max_t(s64, true_reg->smin_value, val);
break;
case BPF_JLE:
false_reg->umin_value = max(false_reg->umin_value, val + 1);
true_reg->umax_value = min(true_reg->umax_value, val);
break;
case BPF_JSLE:
false_reg->smin_value = max_t(s64, false_reg->smin_value, val + 1);
true_reg->smax_value = min_t(s64, true_reg->smax_value, val);
break;
default:
break;
}
__reg_deduce_bounds(false_reg);
__reg_deduce_bounds(true_reg);
/* We might have learned some bits from the bounds. */
__reg_bound_offset(false_reg);
__reg_bound_offset(true_reg);
/* Intersecting with the old var_off might have improved our bounds
* slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
* then new var_off is (0; 0x7f...fc) which improves our umax.
*/
__update_reg_bounds(false_reg);
__update_reg_bounds(true_reg);
}
/* Same as above, but for the case that dst_reg holds a constant and src_reg is
* the variable reg.
*/
static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
struct bpf_reg_state *false_reg, u64 val,
u8 opcode)
{
if (__is_pointer_value(false, false_reg))
return;
switch (opcode) {
case BPF_JEQ:
/* If this is false then we know nothing Jon Snow, but if it is
* true then we know for sure.
*/
__mark_reg_known(true_reg, val);
break;
case BPF_JNE:
/* If this is true we know nothing Jon Snow, but if it is false
* we know the value for sure;
*/
__mark_reg_known(false_reg, val);
break;
case BPF_JGT:
true_reg->umax_value = min(true_reg->umax_value, val - 1);
false_reg->umin_value = max(false_reg->umin_value, val);
break;
case BPF_JSGT:
true_reg->smax_value = min_t(s64, true_reg->smax_value, val - 1);
false_reg->smin_value = max_t(s64, false_reg->smin_value, val);
break;
case BPF_JLT:
true_reg->umin_value = max(true_reg->umin_value, val + 1);
false_reg->umax_value = min(false_reg->umax_value, val);
break;
case BPF_JSLT:
true_reg->smin_value = max_t(s64, true_reg->smin_value, val + 1);
false_reg->smax_value = min_t(s64, false_reg->smax_value, val);
break;
case BPF_JGE:
true_reg->umax_value = min(true_reg->umax_value, val);
false_reg->umin_value = max(false_reg->umin_value, val + 1);
break;
case BPF_JSGE:
true_reg->smax_value = min_t(s64, true_reg->smax_value, val);
false_reg->smin_value = max_t(s64, false_reg->smin_value, val + 1);
break;
case BPF_JLE:
true_reg->umin_value = max(true_reg->umin_value, val);
false_reg->umax_value = min(false_reg->umax_value, val - 1);
break;
case BPF_JSLE:
true_reg->smin_value = max_t(s64, true_reg->smin_value, val);
false_reg->smax_value = min_t(s64, false_reg->smax_value, val - 1);
break;
default:
break;
}
__reg_deduce_bounds(false_reg);
__reg_deduce_bounds(true_reg);
/* We might have learned some bits from the bounds. */
__reg_bound_offset(false_reg);
__reg_bound_offset(true_reg);
/* Intersecting with the old var_off might have improved our bounds
* slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
* then new var_off is (0; 0x7f...fc) which improves our umax.
*/
__update_reg_bounds(false_reg);
__update_reg_bounds(true_reg);
}
/* Regs are known to be equal, so intersect their min/max/var_off */
static void __reg_combine_min_max(struct bpf_reg_state *src_reg,
struct bpf_reg_state *dst_reg)
{
src_reg->umin_value = dst_reg->umin_value = max(src_reg->umin_value,
dst_reg->umin_value);
src_reg->umax_value = dst_reg->umax_value = min(src_reg->umax_value,
dst_reg->umax_value);
src_reg->smin_value = dst_reg->smin_value = max(src_reg->smin_value,
dst_reg->smin_value);
src_reg->smax_value = dst_reg->smax_value = min(src_reg->smax_value,
dst_reg->smax_value);
src_reg->var_off = dst_reg->var_off = tnum_intersect(src_reg->var_off,
dst_reg->var_off);
/* We might have learned new bounds from the var_off. */
__update_reg_bounds(src_reg);
__update_reg_bounds(dst_reg);
/* We might have learned something about the sign bit. */
__reg_deduce_bounds(src_reg);
__reg_deduce_bounds(dst_reg);
/* We might have learned some bits from the bounds. */
__reg_bound_offset(src_reg);
__reg_bound_offset(dst_reg);
/* Intersecting with the old var_off might have improved our bounds
* slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
* then new var_off is (0; 0x7f...fc) which improves our umax.
*/
__update_reg_bounds(src_reg);
__update_reg_bounds(dst_reg);
}
static void reg_combine_min_max(struct bpf_reg_state *true_src,
struct bpf_reg_state *true_dst,
struct bpf_reg_state *false_src,
struct bpf_reg_state *false_dst,
u8 opcode)
{
switch (opcode) {
case BPF_JEQ:
__reg_combine_min_max(true_src, true_dst);
break;
case BPF_JNE:
__reg_combine_min_max(false_src, false_dst);
break;
}
}
static void mark_map_reg(struct bpf_reg_state *regs, u32 regno, u32 id,
bool is_null)
{
struct bpf_reg_state *reg = ®s[regno];
if (reg->type == PTR_TO_MAP_VALUE_OR_NULL && reg->id == id) {
/* Old offset (both fixed and variable parts) should
* have been known-zero, because we don't allow pointer
* arithmetic on pointers that might be NULL.
*/
if (WARN_ON_ONCE(reg->smin_value || reg->smax_value ||
!tnum_equals_const(reg->var_off, 0) ||
reg->off)) {
__mark_reg_known_zero(reg);
reg->off = 0;
}
if (is_null) {
reg->type = SCALAR_VALUE;
} else if (reg->map_ptr->inner_map_meta) {
reg->type = CONST_PTR_TO_MAP;
reg->map_ptr = reg->map_ptr->inner_map_meta;
} else {
reg->type = PTR_TO_MAP_VALUE;
}
/* We don't need id from this point onwards anymore, thus we
* should better reset it, so that state pruning has chances
* to take effect.
*/
reg->id = 0;
}
}
/* The logic is similar to find_good_pkt_pointers(), both could eventually
* be folded together at some point.
*/
static void mark_map_regs(struct bpf_verifier_state *state, u32 regno,
bool is_null)
{
struct bpf_reg_state *regs = state->regs;
u32 id = regs[regno].id;
int i;
for (i = 0; i < MAX_BPF_REG; i++)
mark_map_reg(regs, i, id, is_null);
for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
if (state->stack[i].slot_type[0] != STACK_SPILL)
continue;
mark_map_reg(&state->stack[i].spilled_ptr, 0, id, is_null);
}
}
static bool try_match_pkt_pointers(const struct bpf_insn *insn,
struct bpf_reg_state *dst_reg,
struct bpf_reg_state *src_reg,
struct bpf_verifier_state *this_branch,
struct bpf_verifier_state *other_branch)
{
if (BPF_SRC(insn->code) != BPF_X)
return false;
switch (BPF_OP(insn->code)) {
case BPF_JGT:
if ((dst_reg->type == PTR_TO_PACKET &&
src_reg->type == PTR_TO_PACKET_END) ||
(dst_reg->type == PTR_TO_PACKET_META &&
reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
/* pkt_data' > pkt_end, pkt_meta' > pkt_data */
find_good_pkt_pointers(this_branch, dst_reg,
dst_reg->type, false);
} else if ((dst_reg->type == PTR_TO_PACKET_END &&
src_reg->type == PTR_TO_PACKET) ||
(reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
src_reg->type == PTR_TO_PACKET_META)) {
/* pkt_end > pkt_data', pkt_data > pkt_meta' */
find_good_pkt_pointers(other_branch, src_reg,
src_reg->type, true);
} else {
return false;
}
break;
case BPF_JLT:
if ((dst_reg->type == PTR_TO_PACKET &&
src_reg->type == PTR_TO_PACKET_END) ||
(dst_reg->type == PTR_TO_PACKET_META &&
reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
/* pkt_data' < pkt_end, pkt_meta' < pkt_data */
find_good_pkt_pointers(other_branch, dst_reg,
dst_reg->type, true);
} else if ((dst_reg->type == PTR_TO_PACKET_END &&
src_reg->type == PTR_TO_PACKET) ||
(reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
src_reg->type == PTR_TO_PACKET_META)) {
/* pkt_end < pkt_data', pkt_data > pkt_meta' */
find_good_pkt_pointers(this_branch, src_reg,
src_reg->type, false);
} else {
return false;
}
break;
case BPF_JGE:
if ((dst_reg->type == PTR_TO_PACKET &&
src_reg->type == PTR_TO_PACKET_END) ||
(dst_reg->type == PTR_TO_PACKET_META &&
reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
/* pkt_data' >= pkt_end, pkt_meta' >= pkt_data */
find_good_pkt_pointers(this_branch, dst_reg,
dst_reg->type, true);
} else if ((dst_reg->type == PTR_TO_PACKET_END &&
src_reg->type == PTR_TO_PACKET) ||
(reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
src_reg->type == PTR_TO_PACKET_META)) {
/* pkt_end >= pkt_data', pkt_data >= pkt_meta' */
find_good_pkt_pointers(other_branch, src_reg,
src_reg->type, false);
} else {
return false;
}
break;
case BPF_JLE:
if ((dst_reg->type == PTR_TO_PACKET &&
src_reg->type == PTR_TO_PACKET_END) ||
(dst_reg->type == PTR_TO_PACKET_META &&
reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
/* pkt_data' <= pkt_end, pkt_meta' <= pkt_data */
find_good_pkt_pointers(other_branch, dst_reg,
dst_reg->type, false);
} else if ((dst_reg->type == PTR_TO_PACKET_END &&
src_reg->type == PTR_TO_PACKET) ||
(reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
src_reg->type == PTR_TO_PACKET_META)) {
/* pkt_end <= pkt_data', pkt_data <= pkt_meta' */
find_good_pkt_pointers(this_branch, src_reg,
src_reg->type, true);
} else {
return false;
}
break;
default:
return false;
}
return true;
}
static int check_cond_jmp_op(struct bpf_verifier_env *env,
struct bpf_insn *insn, int *insn_idx)
{
struct bpf_verifier_state *other_branch, *this_branch = env->cur_state;
struct bpf_reg_state *regs = this_branch->regs, *dst_reg;
u8 opcode = BPF_OP(insn->code);
int err;
if (opcode > BPF_JSLE) {
verbose(env, "invalid BPF_JMP opcode %x\n", opcode);
return -EINVAL;
}
if (BPF_SRC(insn->code) == BPF_X) {
if (insn->imm != 0) {
verbose(env, "BPF_JMP uses reserved fields\n");
return -EINVAL;
}
/* check src1 operand */
err = check_reg_arg(env, insn->src_reg, SRC_OP);
if (err)
return err;
if (is_pointer_value(env, insn->src_reg)) {
verbose(env, "R%d pointer comparison prohibited\n",
insn->src_reg);
return -EACCES;
}
} else {
if (insn->src_reg != BPF_REG_0) {
verbose(env, "BPF_JMP uses reserved fields\n");
return -EINVAL;
}
}
/* check src2 operand */
err = check_reg_arg(env, insn->dst_reg, SRC_OP);
if (err)
return err;
dst_reg = ®s[insn->dst_reg];
/* detect if R == 0 where R was initialized to zero earlier */
if (BPF_SRC(insn->code) == BPF_K &&
(opcode == BPF_JEQ || opcode == BPF_JNE) &&
dst_reg->type == SCALAR_VALUE &&
tnum_equals_const(dst_reg->var_off, insn->imm)) {
if (opcode == BPF_JEQ) {
/* if (imm == imm) goto pc+off;
* only follow the goto, ignore fall-through
*/
*insn_idx += insn->off;
return 0;
} else {
/* if (imm != imm) goto pc+off;
* only follow fall-through branch, since
* that's where the program will go
*/
return 0;
}
}
other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx);
if (!other_branch)
return -EFAULT;
/* detect if we are comparing against a constant value so we can adjust
* our min/max values for our dst register.
* this is only legit if both are scalars (or pointers to the same
* object, I suppose, but we don't support that right now), because
* otherwise the different base pointers mean the offsets aren't
* comparable.
*/
if (BPF_SRC(insn->code) == BPF_X) {
if (dst_reg->type == SCALAR_VALUE &&
regs[insn->src_reg].type == SCALAR_VALUE) {
if (tnum_is_const(regs[insn->src_reg].var_off))
reg_set_min_max(&other_branch->regs[insn->dst_reg],
dst_reg, regs[insn->src_reg].var_off.value,
opcode);
else if (tnum_is_const(dst_reg->var_off))
reg_set_min_max_inv(&other_branch->regs[insn->src_reg],
®s[insn->src_reg],
dst_reg->var_off.value, opcode);
else if (opcode == BPF_JEQ || opcode == BPF_JNE)
/* Comparing for equality, we can combine knowledge */
reg_combine_min_max(&other_branch->regs[insn->src_reg],
&other_branch->regs[insn->dst_reg],
®s[insn->src_reg],
®s[insn->dst_reg], opcode);
}
} else if (dst_reg->type == SCALAR_VALUE) {
reg_set_min_max(&other_branch->regs[insn->dst_reg],
dst_reg, insn->imm, opcode);
}
/* detect if R == 0 where R is returned from bpf_map_lookup_elem() */
if (BPF_SRC(insn->code) == BPF_K &&
insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) &&
dst_reg->type == PTR_TO_MAP_VALUE_OR_NULL) {
/* Mark all identical map registers in each branch as either
* safe or unknown depending R == 0 or R != 0 conditional.
*/
mark_map_regs(this_branch, insn->dst_reg, opcode == BPF_JNE);
mark_map_regs(other_branch, insn->dst_reg, opcode == BPF_JEQ);
} else if (!try_match_pkt_pointers(insn, dst_reg, ®s[insn->src_reg],
this_branch, other_branch) &&
is_pointer_value(env, insn->dst_reg)) {
verbose(env, "R%d pointer comparison prohibited\n",
insn->dst_reg);
return -EACCES;
}
if (env->log.level)
print_verifier_state(env, this_branch);
return 0;
}
/* return the map pointer stored inside BPF_LD_IMM64 instruction */
static struct bpf_map *ld_imm64_to_map_ptr(struct bpf_insn *insn)
{
u64 imm64 = ((u64) (u32) insn[0].imm) | ((u64) (u32) insn[1].imm) << 32;
return (struct bpf_map *) (unsigned long) imm64;
}
/* verify BPF_LD_IMM64 instruction */
static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn)
{
struct bpf_reg_state *regs = cur_regs(env);
int err;
if (BPF_SIZE(insn->code) != BPF_DW) {
verbose(env, "invalid BPF_LD_IMM insn\n");
return -EINVAL;
}
if (insn->off != 0) {
verbose(env, "BPF_LD_IMM64 uses reserved fields\n");
return -EINVAL;
}
err = check_reg_arg(env, insn->dst_reg, DST_OP);
if (err)
return err;
if (insn->src_reg == 0) {
u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm;
regs[insn->dst_reg].type = SCALAR_VALUE;
__mark_reg_known(®s[insn->dst_reg], imm);
return 0;
}
/* replace_map_fd_with_map_ptr() should have caught bad ld_imm64 */
BUG_ON(insn->src_reg != BPF_PSEUDO_MAP_FD);
regs[insn->dst_reg].type = CONST_PTR_TO_MAP;
regs[insn->dst_reg].map_ptr = ld_imm64_to_map_ptr(insn);
return 0;
}
static bool may_access_skb(enum bpf_prog_type type)
{
switch (type) {
case BPF_PROG_TYPE_SOCKET_FILTER:
case BPF_PROG_TYPE_SCHED_CLS:
case BPF_PROG_TYPE_SCHED_ACT:
return true;
default:
return false;
}
}
/* verify safety of LD_ABS|LD_IND instructions:
* - they can only appear in the programs where ctx == skb
* - since they are wrappers of function calls, they scratch R1-R5 registers,
* preserve R6-R9, and store return value into R0
*
* Implicit input:
* ctx == skb == R6 == CTX
*
* Explicit input:
* SRC == any register
* IMM == 32-bit immediate
*
* Output:
* R0 - 8/16/32-bit skb data converted to cpu endianness
*/
static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
{
struct bpf_reg_state *regs = cur_regs(env);
u8 mode = BPF_MODE(insn->code);
int i, err;
if (!may_access_skb(env->prog->type)) {
verbose(env, "BPF_LD_[ABS|IND] instructions not allowed for this program type\n");
return -EINVAL;
}
if (insn->dst_reg != BPF_REG_0 || insn->off != 0 ||
BPF_SIZE(insn->code) == BPF_DW ||
(mode == BPF_ABS && insn->src_reg != BPF_REG_0)) {
verbose(env, "BPF_LD_[ABS|IND] uses reserved fields\n");
return -EINVAL;
}
/* check whether implicit source operand (register R6) is readable */
err = check_reg_arg(env, BPF_REG_6, SRC_OP);
if (err)
return err;
if (regs[BPF_REG_6].type != PTR_TO_CTX) {
verbose(env,
"at the time of BPF_LD_ABS|IND R6 != pointer to skb\n");
return -EINVAL;
}
if (mode == BPF_IND) {
/* check explicit source operand */
err = check_reg_arg(env, insn->src_reg, SRC_OP);
if (err)
return err;
}
/* reset caller saved regs to unreadable */
for (i = 0; i < CALLER_SAVED_REGS; i++) {
mark_reg_not_init(env, regs, caller_saved[i]);
check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
}
/* mark destination R0 register as readable, since it contains
* the value fetched from the packet.
* Already marked as written above.
*/
mark_reg_unknown(env, regs, BPF_REG_0);
return 0;
}
static int check_return_code(struct bpf_verifier_env *env)
{
struct bpf_reg_state *reg;
struct tnum range = tnum_range(0, 1);
switch (env->prog->type) {
case BPF_PROG_TYPE_CGROUP_SKB:
case BPF_PROG_TYPE_CGROUP_SOCK:
case BPF_PROG_TYPE_SOCK_OPS:
case BPF_PROG_TYPE_CGROUP_DEVICE:
break;
default:
return 0;
}
reg = cur_regs(env) + BPF_REG_0;
if (reg->type != SCALAR_VALUE) {
verbose(env, "At program exit the register R0 is not a known value (%s)\n",
reg_type_str[reg->type]);
return -EINVAL;
}
if (!tnum_in(range, reg->var_off)) {
verbose(env, "At program exit the register R0 ");
if (!tnum_is_unknown(reg->var_off)) {
char tn_buf[48];
tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
verbose(env, "has value %s", tn_buf);
} else {
verbose(env, "has unknown scalar value");
}
verbose(env, " should have been 0 or 1\n");
return -EINVAL;
}
return 0;
}
/* non-recursive DFS pseudo code
* 1 procedure DFS-iterative(G,v):
* 2 label v as discovered
* 3 let S be a stack
* 4 S.push(v)
* 5 while S is not empty
* 6 t <- S.pop()
* 7 if t is what we're looking for:
* 8 return t
* 9 for all edges e in G.adjacentEdges(t) do
* 10 if edge e is already labelled
* 11 continue with the next edge
* 12 w <- G.adjacentVertex(t,e)
* 13 if vertex w is not discovered and not explored
* 14 label e as tree-edge
* 15 label w as discovered
* 16 S.push(w)
* 17 continue at 5
* 18 else if vertex w is discovered
* 19 label e as back-edge
* 20 else
* 21 // vertex w is explored
* 22 label e as forward- or cross-edge
* 23 label t as explored
* 24 S.pop()
*
* convention:
* 0x10 - discovered
* 0x11 - discovered and fall-through edge labelled
* 0x12 - discovered and fall-through and branch edges labelled
* 0x20 - explored
*/
enum {
DISCOVERED = 0x10,
EXPLORED = 0x20,
FALLTHROUGH = 1,
BRANCH = 2,
};
#define STATE_LIST_MARK ((struct bpf_verifier_state_list *) -1L)
static int *insn_stack; /* stack of insns to process */
static int cur_stack; /* current stack index */
static int *insn_state;
/* t, w, e - match pseudo-code above:
* t - index of current instruction
* w - next instruction
* e - edge
*/
static int push_insn(int t, int w, int e, struct bpf_verifier_env *env)
{
if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH))
return 0;
if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH))
return 0;
if (w < 0 || w >= env->prog->len) {
verbose(env, "jump out of range from insn %d to %d\n", t, w);
return -EINVAL;
}
if (e == BRANCH)
/* mark branch target for state pruning */
env->explored_states[w] = STATE_LIST_MARK;
if (insn_state[w] == 0) {
/* tree-edge */
insn_state[t] = DISCOVERED | e;
insn_state[w] = DISCOVERED;
if (cur_stack >= env->prog->len)
return -E2BIG;
insn_stack[cur_stack++] = w;
return 1;
} else if ((insn_state[w] & 0xF0) == DISCOVERED) {
verbose(env, "back-edge from insn %d to %d\n", t, w);
return -EINVAL;
} else if (insn_state[w] == EXPLORED) {
/* forward- or cross-edge */
insn_state[t] = DISCOVERED | e;
} else {
verbose(env, "insn state internal bug\n");
return -EFAULT;
}
return 0;
}
/* non-recursive depth-first-search to detect loops in BPF program
* loop == back-edge in directed graph
*/
static int check_cfg(struct bpf_verifier_env *env)
{
struct bpf_insn *insns = env->prog->insnsi;
int insn_cnt = env->prog->len;
int ret = 0;
int i, t;
insn_state = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
if (!insn_state)
return -ENOMEM;
insn_stack = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
if (!insn_stack) {
kfree(insn_state);
return -ENOMEM;
}
insn_state[0] = DISCOVERED; /* mark 1st insn as discovered */
insn_stack[0] = 0; /* 0 is the first instruction */
cur_stack = 1;
peek_stack:
if (cur_stack == 0)
goto check_state;
t = insn_stack[cur_stack - 1];
if (BPF_CLASS(insns[t].code) == BPF_JMP) {
u8 opcode = BPF_OP(insns[t].code);
if (opcode == BPF_EXIT) {
goto mark_explored;
} else if (opcode == BPF_CALL) {
ret = push_insn(t, t + 1, FALLTHROUGH, env);
if (ret == 1)
goto peek_stack;
else if (ret < 0)
goto err_free;
if (t + 1 < insn_cnt)
env->explored_states[t + 1] = STATE_LIST_MARK;
} else if (opcode == BPF_JA) {
if (BPF_SRC(insns[t].code) != BPF_K) {
ret = -EINVAL;
goto err_free;
}
/* unconditional jump with single edge */
ret = push_insn(t, t + insns[t].off + 1,
FALLTHROUGH, env);
if (ret == 1)
goto peek_stack;
else if (ret < 0)
goto err_free;
/* tell verifier to check for equivalent states
* after every call and jump
*/
if (t + 1 < insn_cnt)
env->explored_states[t + 1] = STATE_LIST_MARK;
} else {
/* conditional jump with two edges */
env->explored_states[t] = STATE_LIST_MARK;
ret = push_insn(t, t + 1, FALLTHROUGH, env);
if (ret == 1)
goto peek_stack;
else if (ret < 0)
goto err_free;
ret = push_insn(t, t + insns[t].off + 1, BRANCH, env);
if (ret == 1)
goto peek_stack;
else if (ret < 0)
goto err_free;
}
} else {
/* all other non-branch instructions with single
* fall-through edge
*/
ret = push_insn(t, t + 1, FALLTHROUGH, env);
if (ret == 1)
goto peek_stack;
else if (ret < 0)
goto err_free;
}
mark_explored:
insn_state[t] = EXPLORED;
if (cur_stack-- <= 0) {
verbose(env, "pop stack internal bug\n");
ret = -EFAULT;
goto err_free;
}
goto peek_stack;
check_state:
for (i = 0; i < insn_cnt; i++) {
if (insn_state[i] != EXPLORED) {
verbose(env, "unreachable insn %d\n", i);
ret = -EINVAL;
goto err_free;
}
}
ret = 0; /* cfg looks good */
err_free:
kfree(insn_state);
kfree(insn_stack);
return ret;
}
/* check %cur's range satisfies %old's */
static bool range_within(struct bpf_reg_state *old,
struct bpf_reg_state *cur)
{
return old->umin_value <= cur->umin_value &&
old->umax_value >= cur->umax_value &&
old->smin_value <= cur->smin_value &&
old->smax_value >= cur->smax_value;
}
/* Maximum number of register states that can exist at once */
#define ID_MAP_SIZE (MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE)
struct idpair {
u32 old;
u32 cur;
};
/* If in the old state two registers had the same id, then they need to have
* the same id in the new state as well. But that id could be different from
* the old state, so we need to track the mapping from old to new ids.
* Once we have seen that, say, a reg with old id 5 had new id 9, any subsequent
* regs with old id 5 must also have new id 9 for the new state to be safe. But
* regs with a different old id could still have new id 9, we don't care about
* that.
* So we look through our idmap to see if this old id has been seen before. If
* so, we require the new id to match; otherwise, we add the id pair to the map.
*/
static bool check_ids(u32 old_id, u32 cur_id, struct idpair *idmap)
{
unsigned int i;
for (i = 0; i < ID_MAP_SIZE; i++) {
if (!idmap[i].old) {
/* Reached an empty slot; haven't seen this id before */
idmap[i].old = old_id;
idmap[i].cur = cur_id;
return true;
}
if (idmap[i].old == old_id)
return idmap[i].cur == cur_id;
}
/* We ran out of idmap slots, which should be impossible */
WARN_ON_ONCE(1);
return false;
}
/* Returns true if (rold safe implies rcur safe) */
static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
struct idpair *idmap)
{
if (!(rold->live & REG_LIVE_READ))
/* explored state didn't use this */
return true;
if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, live)) == 0)
return true;
if (rold->type == NOT_INIT)
/* explored state can't have used this */
return true;
if (rcur->type == NOT_INIT)
return false;
switch (rold->type) {
case SCALAR_VALUE:
if (rcur->type == SCALAR_VALUE) {
/* new val must satisfy old val knowledge */
return range_within(rold, rcur) &&
tnum_in(rold->var_off, rcur->var_off);
} else {
/* if we knew anything about the old value, we're not
* equal, because we can't know anything about the
* scalar value of the pointer in the new value.
*/
return rold->umin_value == 0 &&
rold->umax_value == U64_MAX &&
rold->smin_value == S64_MIN &&
rold->smax_value == S64_MAX &&
tnum_is_unknown(rold->var_off);
}
case PTR_TO_MAP_VALUE:
/* If the new min/max/var_off satisfy the old ones and
* everything else matches, we are OK.
* We don't care about the 'id' value, because nothing
* uses it for PTR_TO_MAP_VALUE (only for ..._OR_NULL)
*/
return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 &&
range_within(rold, rcur) &&
tnum_in(rold->var_off, rcur->var_off);
case PTR_TO_MAP_VALUE_OR_NULL:
/* a PTR_TO_MAP_VALUE could be safe to use as a
* PTR_TO_MAP_VALUE_OR_NULL into the same map.
* However, if the old PTR_TO_MAP_VALUE_OR_NULL then got NULL-
* checked, doing so could have affected others with the same
* id, and we can't check for that because we lost the id when
* we converted to a PTR_TO_MAP_VALUE.
*/
if (rcur->type != PTR_TO_MAP_VALUE_OR_NULL)
return false;
if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)))
return false;
/* Check our ids match any regs they're supposed to */
return check_ids(rold->id, rcur->id, idmap);
case PTR_TO_PACKET_META:
case PTR_TO_PACKET:
if (rcur->type != rold->type)
return false;
/* We must have at least as much range as the old ptr
* did, so that any accesses which were safe before are
* still safe. This is true even if old range < old off,
* since someone could have accessed through (ptr - k), or
* even done ptr -= k in a register, to get a safe access.
*/
if (rold->range > rcur->range)
return false;
/* If the offsets don't match, we can't trust our alignment;
* nor can we be sure that we won't fall out of range.
*/
if (rold->off != rcur->off)
return false;
/* id relations must be preserved */
if (rold->id && !check_ids(rold->id, rcur->id, idmap))
return false;
/* new val must satisfy old val knowledge */
return range_within(rold, rcur) &&
tnum_in(rold->var_off, rcur->var_off);
case PTR_TO_CTX:
case CONST_PTR_TO_MAP:
case PTR_TO_STACK:
case PTR_TO_PACKET_END:
/* Only valid matches are exact, which memcmp() above
* would have accepted
*/
default:
/* Don't know what's going on, just say it's not safe */
return false;
}
/* Shouldn't get here; if we do, say it's not safe */
WARN_ON_ONCE(1);
return false;
}
static bool stacksafe(struct bpf_verifier_state *old,
struct bpf_verifier_state *cur,
struct idpair *idmap)
{
int i, spi;
/* if explored stack has more populated slots than current stack
* such stacks are not equivalent
*/
if (old->allocated_stack > cur->allocated_stack)
return false;
/* walk slots of the explored stack and ignore any additional
* slots in the current stack, since explored(safe) state
* didn't use them
*/
for (i = 0; i < old->allocated_stack; i++) {
spi = i / BPF_REG_SIZE;
if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID)
continue;
if (old->stack[spi].slot_type[i % BPF_REG_SIZE] !=
cur->stack[spi].slot_type[i % BPF_REG_SIZE])
/* Ex: old explored (safe) state has STACK_SPILL in
* this stack slot, but current has has STACK_MISC ->
* this verifier states are not equivalent,
* return false to continue verification of this path
*/
return false;
if (i % BPF_REG_SIZE)
continue;
if (old->stack[spi].slot_type[0] != STACK_SPILL)
continue;
if (!regsafe(&old->stack[spi].spilled_ptr,
&cur->stack[spi].spilled_ptr,
idmap))
/* when explored and current stack slot are both storing
* spilled registers, check that stored pointers types
* are the same as well.
* Ex: explored safe path could have stored
* (bpf_reg_state) {.type = PTR_TO_STACK, .off = -8}
* but current path has stored:
* (bpf_reg_state) {.type = PTR_TO_STACK, .off = -16}
* such verifier states are not equivalent.
* return false to continue verification of this path
*/
return false;
}
return true;
}
/* compare two verifier states
*
* all states stored in state_list are known to be valid, since
* verifier reached 'bpf_exit' instruction through them
*
* this function is called when verifier exploring different branches of
* execution popped from the state stack. If it sees an old state that has
* more strict register state and more strict stack state then this execution
* branch doesn't need to be explored further, since verifier already
* concluded that more strict state leads to valid finish.
*
* Therefore two states are equivalent if register state is more conservative
* and explored stack state is more conservative than the current one.
* Example:
* explored current
* (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC)
* (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC)
*
* In other words if current stack state (one being explored) has more
* valid slots than old one that already passed validation, it means
* the verifier can stop exploring and conclude that current state is valid too
*
* Similarly with registers. If explored state has register type as invalid
* whereas register type in current state is meaningful, it means that
* the current state will reach 'bpf_exit' instruction safely
*/
static bool states_equal(struct bpf_verifier_env *env,
struct bpf_verifier_state *old,
struct bpf_verifier_state *cur)
{
struct idpair *idmap;
bool ret = false;
int i;
idmap = kcalloc(ID_MAP_SIZE, sizeof(struct idpair), GFP_KERNEL);
/* If we failed to allocate the idmap, just say it's not safe */
if (!idmap)
return false;
for (i = 0; i < MAX_BPF_REG; i++) {
if (!regsafe(&old->regs[i], &cur->regs[i], idmap))
goto out_free;
}
if (!stacksafe(old, cur, idmap))
goto out_free;
ret = true;
out_free:
kfree(idmap);
return ret;
}
/* A write screens off any subsequent reads; but write marks come from the
* straight-line code between a state and its parent. When we arrive at a
* jump target (in the first iteration of the propagate_liveness() loop),
* we didn't arrive by the straight-line code, so read marks in state must
* propagate to parent regardless of state's write marks.
*/
static bool do_propagate_liveness(const struct bpf_verifier_state *state,
struct bpf_verifier_state *parent)
{
bool writes = parent == state->parent; /* Observe write marks */
bool touched = false; /* any changes made? */
int i;
if (!parent)
return touched;
/* Propagate read liveness of registers... */
BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
/* We don't need to worry about FP liveness because it's read-only */
for (i = 0; i < BPF_REG_FP; i++) {
if (parent->regs[i].live & REG_LIVE_READ)
continue;
if (writes && (state->regs[i].live & REG_LIVE_WRITTEN))
continue;
if (state->regs[i].live & REG_LIVE_READ) {
parent->regs[i].live |= REG_LIVE_READ;
touched = true;
}
}
/* ... and stack slots */
for (i = 0; i < state->allocated_stack / BPF_REG_SIZE &&
i < parent->allocated_stack / BPF_REG_SIZE; i++) {
if (parent->stack[i].slot_type[0] != STACK_SPILL)
continue;
if (state->stack[i].slot_type[0] != STACK_SPILL)
continue;
if (parent->stack[i].spilled_ptr.live & REG_LIVE_READ)
continue;
if (writes &&
(state->stack[i].spilled_ptr.live & REG_LIVE_WRITTEN))
continue;
if (state->stack[i].spilled_ptr.live & REG_LIVE_READ) {
parent->stack[i].spilled_ptr.live |= REG_LIVE_READ;
touched = true;
}
}
return touched;
}
/* "parent" is "a state from which we reach the current state", but initially
* it is not the state->parent (i.e. "the state whose straight-line code leads
* to the current state"), instead it is the state that happened to arrive at
* a (prunable) equivalent of the current state. See comment above
* do_propagate_liveness() for consequences of this.
* This function is just a more efficient way of calling mark_reg_read() or
* mark_stack_slot_read() on each reg in "parent" that is read in "state",
* though it requires that parent != state->parent in the call arguments.
*/
static void propagate_liveness(const struct bpf_verifier_state *state,
struct bpf_verifier_state *parent)
{
while (do_propagate_liveness(state, parent)) {
/* Something changed, so we need to feed those changes onward */
state = parent;
parent = state->parent;
}
}
static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
{
struct bpf_verifier_state_list *new_sl;
struct bpf_verifier_state_list *sl;
struct bpf_verifier_state *cur = env->cur_state;
int i, err;
sl = env->explored_states[insn_idx];
if (!sl)
/* this 'insn_idx' instruction wasn't marked, so we will not
* be doing state search here
*/
return 0;
while (sl != STATE_LIST_MARK) {
if (states_equal(env, &sl->state, cur)) {
/* reached equivalent register/stack state,
* prune the search.
* Registers read by the continuation are read by us.
* If we have any write marks in env->cur_state, they
* will prevent corresponding reads in the continuation
* from reaching our parent (an explored_state). Our
* own state will get the read marks recorded, but
* they'll be immediately forgotten as we're pruning
* this state and will pop a new one.
*/
propagate_liveness(&sl->state, cur);
return 1;
}
sl = sl->next;
}
/* there were no equivalent states, remember current one.
* technically the current state is not proven to be safe yet,
* but it will either reach bpf_exit (which means it's safe) or
* it will be rejected. Since there are no loops, we won't be
* seeing this 'insn_idx' instruction again on the way to bpf_exit
*/
new_sl = kzalloc(sizeof(struct bpf_verifier_state_list), GFP_KERNEL);
if (!new_sl)
return -ENOMEM;
/* add new state to the head of linked list */
err = copy_verifier_state(&new_sl->state, cur);
if (err) {
free_verifier_state(&new_sl->state, false);
kfree(new_sl);
return err;
}
new_sl->next = env->explored_states[insn_idx];
env->explored_states[insn_idx] = new_sl;
/* connect new state to parentage chain */
cur->parent = &new_sl->state;
/* clear write marks in current state: the writes we did are not writes
* our child did, so they don't screen off its reads from us.
* (There are no read marks in current state, because reads always mark
* their parent and current state never has children yet. Only
* explored_states can get read marks.)
*/
for (i = 0; i < BPF_REG_FP; i++)
cur->regs[i].live = REG_LIVE_NONE;
for (i = 0; i < cur->allocated_stack / BPF_REG_SIZE; i++)
if (cur->stack[i].slot_type[0] == STACK_SPILL)
cur->stack[i].spilled_ptr.live = REG_LIVE_NONE;
return 0;
}
static int ext_analyzer_insn_hook(struct bpf_verifier_env *env,
int insn_idx, int prev_insn_idx)
{
if (env->dev_ops && env->dev_ops->insn_hook)
return env->dev_ops->insn_hook(env, insn_idx, prev_insn_idx);
return 0;
}
static int do_check(struct bpf_verifier_env *env)
{
struct bpf_verifier_state *state;
struct bpf_insn *insns = env->prog->insnsi;
struct bpf_reg_state *regs;
int insn_cnt = env->prog->len;
int insn_idx, prev_insn_idx = 0;
int insn_processed = 0;
bool do_print_state = false;
state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL);
if (!state)
return -ENOMEM;
env->cur_state = state;
init_reg_state(env, state->regs);
state->parent = NULL;
insn_idx = 0;
for (;;) {
struct bpf_insn *insn;
u8 class;
int err;
if (insn_idx >= insn_cnt) {
verbose(env, "invalid insn idx %d insn_cnt %d\n",
insn_idx, insn_cnt);
return -EFAULT;
}
insn = &insns[insn_idx];
class = BPF_CLASS(insn->code);
if (++insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) {
verbose(env,
"BPF program is too large. Processed %d insn\n",
insn_processed);
return -E2BIG;
}
err = is_state_visited(env, insn_idx);
if (err < 0)
return err;
if (err == 1) {
/* found equivalent state, can prune the search */
if (env->log.level) {
if (do_print_state)
verbose(env, "\nfrom %d to %d: safe\n",
prev_insn_idx, insn_idx);
else
verbose(env, "%d: safe\n", insn_idx);
}
goto process_bpf_exit;
}
if (need_resched())
cond_resched();
if (env->log.level > 1 || (env->log.level && do_print_state)) {
if (env->log.level > 1)
verbose(env, "%d:", insn_idx);
else
verbose(env, "\nfrom %d to %d:",
prev_insn_idx, insn_idx);
print_verifier_state(env, state);
do_print_state = false;
}
if (env->log.level) {
verbose(env, "%d: ", insn_idx);
print_bpf_insn(verbose, env, insn,
env->allow_ptr_leaks);
}
err = ext_analyzer_insn_hook(env, insn_idx, prev_insn_idx);
if (err)
return err;
regs = cur_regs(env);
if (class == BPF_ALU || class == BPF_ALU64) {
err = check_alu_op(env, insn);
if (err)
return err;
} else if (class == BPF_LDX) {
enum bpf_reg_type *prev_src_type, src_reg_type;
/* check for reserved fields is already done */
/* check src operand */
err = check_reg_arg(env, insn->src_reg, SRC_OP);
if (err)
return err;
err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
if (err)
return err;
src_reg_type = regs[insn->src_reg].type;
/* check that memory (src_reg + off) is readable,
* the state of dst_reg will be updated by this func
*/
err = check_mem_access(env, insn_idx, insn->src_reg, insn->off,
BPF_SIZE(insn->code), BPF_READ,
insn->dst_reg);
if (err)
return err;
prev_src_type = &env->insn_aux_data[insn_idx].ptr_type;
if (*prev_src_type == NOT_INIT) {
/* saw a valid insn
* dst_reg = *(u32 *)(src_reg + off)
* save type to validate intersecting paths
*/
*prev_src_type = src_reg_type;
} else if (src_reg_type != *prev_src_type &&
(src_reg_type == PTR_TO_CTX ||
*prev_src_type == PTR_TO_CTX)) {
/* ABuser program is trying to use the same insn
* dst_reg = *(u32*) (src_reg + off)
* with different pointer types:
* src_reg == ctx in one branch and
* src_reg == stack|map in some other branch.
* Reject it.
*/
verbose(env, "same insn cannot be used with different pointers\n");
return -EINVAL;
}
} else if (class == BPF_STX) {
enum bpf_reg_type *prev_dst_type, dst_reg_type;
if (BPF_MODE(insn->code) == BPF_XADD) {
err = check_xadd(env, insn_idx, insn);
if (err)
return err;
insn_idx++;
continue;
}
/* check src1 operand */
err = check_reg_arg(env, insn->src_reg, SRC_OP);
if (err)
return err;
/* check src2 operand */
err = check_reg_arg(env, insn->dst_reg, SRC_OP);
if (err)
return err;
dst_reg_type = regs[insn->dst_reg].type;
/* check that memory (dst_reg + off) is writeable */
err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
BPF_SIZE(insn->code), BPF_WRITE,
insn->src_reg);
if (err)
return err;
prev_dst_type = &env->insn_aux_data[insn_idx].ptr_type;
if (*prev_dst_type == NOT_INIT) {
*prev_dst_type = dst_reg_type;
} else if (dst_reg_type != *prev_dst_type &&
(dst_reg_type == PTR_TO_CTX ||
*prev_dst_type == PTR_TO_CTX)) {
verbose(env, "same insn cannot be used with different pointers\n");
return -EINVAL;
}
} else if (class == BPF_ST) {
if (BPF_MODE(insn->code) != BPF_MEM ||
insn->src_reg != BPF_REG_0) {
verbose(env, "BPF_ST uses reserved fields\n");
return -EINVAL;
}
/* check src operand */
err = check_reg_arg(env, insn->dst_reg, SRC_OP);
if (err)
return err;
/* check that memory (dst_reg + off) is writeable */
err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
BPF_SIZE(insn->code), BPF_WRITE,
-1);
if (err)
return err;
} else if (class == BPF_JMP) {
u8 opcode = BPF_OP(insn->code);
if (opcode == BPF_CALL) {
if (BPF_SRC(insn->code) != BPF_K ||
insn->off != 0 ||
insn->src_reg != BPF_REG_0 ||
insn->dst_reg != BPF_REG_0) {
verbose(env, "BPF_CALL uses reserved fields\n");
return -EINVAL;
}
err = check_call(env, insn->imm, insn_idx);
if (err)
return err;
} else if (opcode == BPF_JA) {
if (BPF_SRC(insn->code) != BPF_K ||
insn->imm != 0 ||
insn->src_reg != BPF_REG_0 ||
insn->dst_reg != BPF_REG_0) {
verbose(env, "BPF_JA uses reserved fields\n");
return -EINVAL;
}
insn_idx += insn->off + 1;
continue;
} else if (opcode == BPF_EXIT) {
if (BPF_SRC(insn->code) != BPF_K ||
insn->imm != 0 ||
insn->src_reg != BPF_REG_0 ||
insn->dst_reg != BPF_REG_0) {
verbose(env, "BPF_EXIT uses reserved fields\n");
return -EINVAL;
}
/* eBPF calling convetion is such that R0 is used
* to return the value from eBPF program.
* Make sure that it's readable at this time
* of bpf_exit, which means that program wrote
* something into it earlier
*/
err = check_reg_arg(env, BPF_REG_0, SRC_OP);
if (err)
return err;
if (is_pointer_value(env, BPF_REG_0)) {
verbose(env, "R0 leaks addr as return value\n");
return -EACCES;
}
err = check_return_code(env);
if (err)
return err;
process_bpf_exit:
err = pop_stack(env, &prev_insn_idx, &insn_idx);
if (err < 0) {
if (err != -ENOENT)
return err;
break;
} else {
do_print_state = true;
continue;
}
} else {
err = check_cond_jmp_op(env, insn, &insn_idx);
if (err)
return err;
}
} else if (class == BPF_LD) {
u8 mode = BPF_MODE(insn->code);
if (mode == BPF_ABS || mode == BPF_IND) {
err = check_ld_abs(env, insn);
if (err)
return err;
} else if (mode == BPF_IMM) {
err = check_ld_imm(env, insn);
if (err)
return err;
insn_idx++;
} else {
verbose(env, "invalid BPF_LD mode\n");
return -EINVAL;
}
} else {
verbose(env, "unknown insn class %d\n", class);
return -EINVAL;
}
insn_idx++;
}
verbose(env, "processed %d insns, stack depth %d\n", insn_processed,
env->prog->aux->stack_depth);
return 0;
}
static int check_map_prealloc(struct bpf_map *map)
{
return (map->map_type != BPF_MAP_TYPE_HASH &&
map->map_type != BPF_MAP_TYPE_PERCPU_HASH &&
map->map_type != BPF_MAP_TYPE_HASH_OF_MAPS) ||
!(map->map_flags & BPF_F_NO_PREALLOC);
}
static int check_map_prog_compatibility(struct bpf_verifier_env *env,
struct bpf_map *map,
struct bpf_prog *prog)
{
/* Make sure that BPF_PROG_TYPE_PERF_EVENT programs only use
* preallocated hash maps, since doing memory allocation
* in overflow_handler can crash depending on where nmi got
* triggered.
*/
if (prog->type == BPF_PROG_TYPE_PERF_EVENT) {
if (!check_map_prealloc(map)) {
verbose(env, "perf_event programs can only use preallocated hash map\n");
return -EINVAL;
}
if (map->inner_map_meta &&
!check_map_prealloc(map->inner_map_meta)) {
verbose(env, "perf_event programs can only use preallocated inner hash map\n");
return -EINVAL;
}
}
return 0;
}
/* look for pseudo eBPF instructions that access map FDs and
* replace them with actual map pointers
*/
static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env)
{
struct bpf_insn *insn = env->prog->insnsi;
int insn_cnt = env->prog->len;
int i, j, err;
err = bpf_prog_calc_tag(env->prog);
if (err)
return err;
for (i = 0; i < insn_cnt; i++, insn++) {
if (BPF_CLASS(insn->code) == BPF_LDX &&
(BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) {
verbose(env, "BPF_LDX uses reserved fields\n");
return -EINVAL;
}
if (BPF_CLASS(insn->code) == BPF_STX &&
((BPF_MODE(insn->code) != BPF_MEM &&
BPF_MODE(insn->code) != BPF_XADD) || insn->imm != 0)) {
verbose(env, "BPF_STX uses reserved fields\n");
return -EINVAL;
}
if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) {
struct bpf_map *map;
struct fd f;
if (i == insn_cnt - 1 || insn[1].code != 0 ||
insn[1].dst_reg != 0 || insn[1].src_reg != 0 ||
insn[1].off != 0) {
verbose(env, "invalid bpf_ld_imm64 insn\n");
return -EINVAL;
}
if (insn->src_reg == 0)
/* valid generic load 64-bit imm */
goto next_insn;
if (insn->src_reg != BPF_PSEUDO_MAP_FD) {
verbose(env,
"unrecognized bpf_ld_imm64 insn\n");
return -EINVAL;
}
f = fdget(insn->imm);
map = __bpf_map_get(f);
if (IS_ERR(map)) {
verbose(env, "fd %d is not pointing to valid bpf_map\n",
insn->imm);
return PTR_ERR(map);
}
err = check_map_prog_compatibility(env, map, env->prog);
if (err) {
fdput(f);
return err;
}
/* store map pointer inside BPF_LD_IMM64 instruction */
insn[0].imm = (u32) (unsigned long) map;
insn[1].imm = ((u64) (unsigned long) map) >> 32;
/* check whether we recorded this map already */
for (j = 0; j < env->used_map_cnt; j++)
if (env->used_maps[j] == map) {
fdput(f);
goto next_insn;
}
if (env->used_map_cnt >= MAX_USED_MAPS) {
fdput(f);
return -E2BIG;
}
/* hold the map. If the program is rejected by verifier,
* the map will be released by release_maps() or it
* will be used by the valid program until it's unloaded
* and all maps are released in free_bpf_prog_info()
*/
map = bpf_map_inc(map, false);
if (IS_ERR(map)) {
fdput(f);
return PTR_ERR(map);
}
env->used_maps[env->used_map_cnt++] = map;
fdput(f);
next_insn:
insn++;
i++;
}
}
/* now all pseudo BPF_LD_IMM64 instructions load valid
* 'struct bpf_map *' into a register instead of user map_fd.
* These pointers will be used later by verifier to validate map access.
*/
return 0;
}
/* drop refcnt of maps used by the rejected program */
static void release_maps(struct bpf_verifier_env *env)
{
int i;
for (i = 0; i < env->used_map_cnt; i++)
bpf_map_put(env->used_maps[i]);
}
/* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */
static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env)
{
struct bpf_insn *insn = env->prog->insnsi;
int insn_cnt = env->prog->len;
int i;
for (i = 0; i < insn_cnt; i++, insn++)
if (insn->code == (BPF_LD | BPF_IMM | BPF_DW))
insn->src_reg = 0;
}
/* single env->prog->insni[off] instruction was replaced with the range
* insni[off, off + cnt). Adjust corresponding insn_aux_data by copying
* [0, off) and [off, end) to new locations, so the patched range stays zero
*/
static int adjust_insn_aux_data(struct bpf_verifier_env *env, u32 prog_len,
u32 off, u32 cnt)
{
struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data;
if (cnt == 1)
return 0;
new_data = vzalloc(sizeof(struct bpf_insn_aux_data) * prog_len);
if (!new_data)
return -ENOMEM;
memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off);
memcpy(new_data + off + cnt - 1, old_data + off,
sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
env->insn_aux_data = new_data;
vfree(old_data);
return 0;
}
static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off,
const struct bpf_insn *patch, u32 len)
{
struct bpf_prog *new_prog;
new_prog = bpf_patch_insn_single(env->prog, off, patch, len);
if (!new_prog)
return NULL;
if (adjust_insn_aux_data(env, new_prog->len, off, len))
return NULL;
return new_prog;
}
/* convert load instructions that access fields of 'struct __sk_buff'
* into sequence of instructions that access fields of 'struct sk_buff'
*/
static int convert_ctx_accesses(struct bpf_verifier_env *env)
{
const struct bpf_verifier_ops *ops = env->ops;
int i, cnt, size, ctx_field_size, delta = 0;
const int insn_cnt = env->prog->len;
struct bpf_insn insn_buf[16], *insn;
struct bpf_prog *new_prog;
enum bpf_access_type type;
bool is_narrower_load;
u32 target_size;
if (ops->gen_prologue) {
cnt = ops->gen_prologue(insn_buf, env->seen_direct_write,
env->prog);
if (cnt >= ARRAY_SIZE(insn_buf)) {
verbose(env, "bpf verifier is misconfigured\n");
return -EINVAL;
} else if (cnt) {
new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt);
if (!new_prog)
return -ENOMEM;
env->prog = new_prog;
delta += cnt - 1;
}
}
if (!ops->convert_ctx_access)
return 0;
insn = env->prog->insnsi + delta;
for (i = 0; i < insn_cnt; i++, insn++) {
if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) ||
insn->code == (BPF_LDX | BPF_MEM | BPF_H) ||
insn->code == (BPF_LDX | BPF_MEM | BPF_W) ||
insn->code == (BPF_LDX | BPF_MEM | BPF_DW))
type = BPF_READ;
else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) ||
insn->code == (BPF_STX | BPF_MEM | BPF_H) ||
insn->code == (BPF_STX | BPF_MEM | BPF_W) ||
insn->code == (BPF_STX | BPF_MEM | BPF_DW))
type = BPF_WRITE;
else
continue;
if (env->insn_aux_data[i + delta].ptr_type != PTR_TO_CTX)
continue;
ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size;
size = BPF_LDST_BYTES(insn);
/* If the read access is a narrower load of the field,
* convert to a 4/8-byte load, to minimum program type specific
* convert_ctx_access changes. If conversion is successful,
* we will apply proper mask to the result.
*/
is_narrower_load = size < ctx_field_size;
if (is_narrower_load) {
u32 off = insn->off;
u8 size_code;
if (type == BPF_WRITE) {
verbose(env, "bpf verifier narrow ctx access misconfigured\n");
return -EINVAL;
}
size_code = BPF_H;
if (ctx_field_size == 4)
size_code = BPF_W;
else if (ctx_field_size == 8)
size_code = BPF_DW;
insn->off = off & ~(ctx_field_size - 1);
insn->code = BPF_LDX | BPF_MEM | size_code;
}
target_size = 0;
cnt = ops->convert_ctx_access(type, insn, insn_buf, env->prog,
&target_size);
if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf) ||
(ctx_field_size && !target_size)) {
verbose(env, "bpf verifier is misconfigured\n");
return -EINVAL;
}
if (is_narrower_load && size < target_size) {
if (ctx_field_size <= 4)
insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg,
(1 << size * 8) - 1);
else
insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg,
(1 << size * 8) - 1);
}
new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
if (!new_prog)
return -ENOMEM;
delta += cnt - 1;
/* keep walking new program and skip insns we just inserted */
env->prog = new_prog;
insn = new_prog->insnsi + i + delta;
}
return 0;
}
/* fixup insn->imm field of bpf_call instructions
* and inline eligible helpers as explicit sequence of BPF instructions
*
* this function is called after eBPF program passed verification
*/
static int fixup_bpf_calls(struct bpf_verifier_env *env)
{
struct bpf_prog *prog = env->prog;
struct bpf_insn *insn = prog->insnsi;
const struct bpf_func_proto *fn;
const int insn_cnt = prog->len;
struct bpf_insn insn_buf[16];
struct bpf_prog *new_prog;
struct bpf_map *map_ptr;
int i, cnt, delta = 0;
for (i = 0; i < insn_cnt; i++, insn++) {
if (insn->code != (BPF_JMP | BPF_CALL))
continue;
if (insn->imm == BPF_FUNC_get_route_realm)
prog->dst_needed = 1;
if (insn->imm == BPF_FUNC_get_prandom_u32)
bpf_user_rnd_init_once();
if (insn->imm == BPF_FUNC_tail_call) {
/* If we tail call into other programs, we
* cannot make any assumptions since they can
* be replaced dynamically during runtime in
* the program array.
*/
prog->cb_access = 1;
env->prog->aux->stack_depth = MAX_BPF_STACK;
/* mark bpf_tail_call as different opcode to avoid
* conditional branch in the interpeter for every normal
* call and to prevent accidental JITing by JIT compiler
* that doesn't support bpf_tail_call yet
*/
insn->imm = 0;
insn->code = BPF_JMP | BPF_TAIL_CALL;
continue;
}
/* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup
* handlers are currently limited to 64 bit only.
*/
if (ebpf_jit_enabled() && BITS_PER_LONG == 64 &&
insn->imm == BPF_FUNC_map_lookup_elem) {
map_ptr = env->insn_aux_data[i + delta].map_ptr;
if (map_ptr == BPF_MAP_PTR_POISON ||
!map_ptr->ops->map_gen_lookup)
goto patch_call_imm;
cnt = map_ptr->ops->map_gen_lookup(map_ptr, insn_buf);
if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
verbose(env, "bpf verifier is misconfigured\n");
return -EINVAL;
}
new_prog = bpf_patch_insn_data(env, i + delta, insn_buf,
cnt);
if (!new_prog)
return -ENOMEM;
delta += cnt - 1;
/* keep walking new program and skip insns we just inserted */
env->prog = prog = new_prog;
insn = new_prog->insnsi + i + delta;
continue;
}
if (insn->imm == BPF_FUNC_redirect_map) {
/* Note, we cannot use prog directly as imm as subsequent
* rewrites would still change the prog pointer. The only
* stable address we can use is aux, which also works with
* prog clones during blinding.
*/
u64 addr = (unsigned long)prog->aux;
struct bpf_insn r4_ld[] = {
BPF_LD_IMM64(BPF_REG_4, addr),
*insn,
};
cnt = ARRAY_SIZE(r4_ld);
new_prog = bpf_patch_insn_data(env, i + delta, r4_ld, cnt);
if (!new_prog)
return -ENOMEM;
delta += cnt - 1;
env->prog = prog = new_prog;
insn = new_prog->insnsi + i + delta;
}
patch_call_imm:
fn = env->ops->get_func_proto(insn->imm);
/* all functions that have prototype and verifier allowed
* programs to call them, must be real in-kernel functions
*/
if (!fn->func) {
verbose(env,
"kernel subsystem misconfigured func %s#%d\n",
func_id_name(insn->imm), insn->imm);
return -EFAULT;
}
insn->imm = fn->func - __bpf_call_base;
}
return 0;
}
static void free_states(struct bpf_verifier_env *env)
{
struct bpf_verifier_state_list *sl, *sln;
int i;
if (!env->explored_states)
return;
for (i = 0; i < env->prog->len; i++) {
sl = env->explored_states[i];
if (sl)
while (sl != STATE_LIST_MARK) {
sln = sl->next;
free_verifier_state(&sl->state, false);
kfree(sl);
sl = sln;
}
}
kfree(env->explored_states);
}
int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
{
struct bpf_verifier_env *env;
struct bpf_verifer_log *log;
int ret = -EINVAL;
/* no program is valid */
if (ARRAY_SIZE(bpf_verifier_ops) == 0)
return -EINVAL;
/* 'struct bpf_verifier_env' can be global, but since it's not small,
* allocate/free it every time bpf_check() is called
*/
env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL);
if (!env)
return -ENOMEM;
log = &env->log;
env->insn_aux_data = vzalloc(sizeof(struct bpf_insn_aux_data) *
(*prog)->len);
ret = -ENOMEM;
if (!env->insn_aux_data)
goto err_free_env;
env->prog = *prog;
env->ops = bpf_verifier_ops[env->prog->type];
/* grab the mutex to protect few globals used by verifier */
mutex_lock(&bpf_verifier_lock);
if (attr->log_level || attr->log_buf || attr->log_size) {
/* user requested verbose verifier output
* and supplied buffer to store the verification trace
*/
log->level = attr->log_level;
log->ubuf = (char __user *) (unsigned long) attr->log_buf;
log->len_total = attr->log_size;
ret = -EINVAL;
/* log attributes have to be sane */
if (log->len_total < 128 || log->len_total > UINT_MAX >> 8 ||
!log->level || !log->ubuf)
goto err_unlock;
}
env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT);
if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
env->strict_alignment = true;
if (env->prog->aux->offload) {
ret = bpf_prog_offload_verifier_prep(env);
if (ret)
goto err_unlock;
}
ret = replace_map_fd_with_map_ptr(env);
if (ret < 0)
goto skip_full_check;
env->explored_states = kcalloc(env->prog->len,
sizeof(struct bpf_verifier_state_list *),
GFP_USER);
ret = -ENOMEM;
if (!env->explored_states)
goto skip_full_check;
ret = check_cfg(env);
if (ret < 0)
goto skip_full_check;
env->allow_ptr_leaks = capable(CAP_SYS_ADMIN);
ret = do_check(env);
if (env->cur_state) {
free_verifier_state(env->cur_state, true);
env->cur_state = NULL;
}
skip_full_check:
while (!pop_stack(env, NULL, NULL));
free_states(env);
if (ret == 0)
/* program is valid, convert *(u32*)(ctx + off) accesses */
ret = convert_ctx_accesses(env);
if (ret == 0)
ret = fixup_bpf_calls(env);
if (log->level && bpf_verifier_log_full(log))
ret = -ENOSPC;
if (log->level && !log->ubuf) {
ret = -EFAULT;
goto err_release_maps;
}
if (ret == 0 && env->used_map_cnt) {
/* if program passed verifier, update used_maps in bpf_prog_info */
env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt,
sizeof(env->used_maps[0]),
GFP_KERNEL);
if (!env->prog->aux->used_maps) {
ret = -ENOMEM;
goto err_release_maps;
}
memcpy(env->prog->aux->used_maps, env->used_maps,
sizeof(env->used_maps[0]) * env->used_map_cnt);
env->prog->aux->used_map_cnt = env->used_map_cnt;
/* program is valid. Convert pseudo bpf_ld_imm64 into generic
* bpf_ld_imm64 instructions
*/
convert_pseudo_ld_imm64(env);
}
err_release_maps:
if (!env->prog->aux->used_maps)
/* if we didn't copy map pointers into bpf_prog_info, release
* them now. Otherwise free_bpf_prog_info() will release them.
*/
release_maps(env);
*prog = env->prog;
err_unlock:
mutex_unlock(&bpf_verifier_lock);
vfree(env->insn_aux_data);
err_free_env:
kfree(env);
return ret;
}
| ./CrossVul/dataset_final_sorted/CWE-20/c/bad_2989_1 |
crossvul-cpp_data_bad_3012_2 | /*
* fs/f2fs/super.c
*
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/statfs.h>
#include <linux/buffer_head.h>
#include <linux/backing-dev.h>
#include <linux/kthread.h>
#include <linux/parser.h>
#include <linux/mount.h>
#include <linux/seq_file.h>
#include <linux/proc_fs.h>
#include <linux/random.h>
#include <linux/exportfs.h>
#include <linux/blkdev.h>
#include <linux/quotaops.h>
#include <linux/f2fs_fs.h>
#include <linux/sysfs.h>
#include <linux/quota.h>
#include "f2fs.h"
#include "node.h"
#include "segment.h"
#include "xattr.h"
#include "gc.h"
#include "trace.h"
#define CREATE_TRACE_POINTS
#include <trace/events/f2fs.h>
static struct kmem_cache *f2fs_inode_cachep;
#ifdef CONFIG_F2FS_FAULT_INJECTION
char *fault_name[FAULT_MAX] = {
[FAULT_KMALLOC] = "kmalloc",
[FAULT_PAGE_ALLOC] = "page alloc",
[FAULT_ALLOC_NID] = "alloc nid",
[FAULT_ORPHAN] = "orphan",
[FAULT_BLOCK] = "no more block",
[FAULT_DIR_DEPTH] = "too big dir depth",
[FAULT_EVICT_INODE] = "evict_inode fail",
[FAULT_TRUNCATE] = "truncate fail",
[FAULT_IO] = "IO error",
[FAULT_CHECKPOINT] = "checkpoint error",
};
static void f2fs_build_fault_attr(struct f2fs_sb_info *sbi,
unsigned int rate)
{
struct f2fs_fault_info *ffi = &sbi->fault_info;
if (rate) {
atomic_set(&ffi->inject_ops, 0);
ffi->inject_rate = rate;
ffi->inject_type = (1 << FAULT_MAX) - 1;
} else {
memset(ffi, 0, sizeof(struct f2fs_fault_info));
}
}
#endif
/* f2fs-wide shrinker description */
static struct shrinker f2fs_shrinker_info = {
.scan_objects = f2fs_shrink_scan,
.count_objects = f2fs_shrink_count,
.seeks = DEFAULT_SEEKS,
};
enum {
Opt_gc_background,
Opt_disable_roll_forward,
Opt_norecovery,
Opt_discard,
Opt_nodiscard,
Opt_noheap,
Opt_heap,
Opt_user_xattr,
Opt_nouser_xattr,
Opt_acl,
Opt_noacl,
Opt_active_logs,
Opt_disable_ext_identify,
Opt_inline_xattr,
Opt_noinline_xattr,
Opt_inline_data,
Opt_inline_dentry,
Opt_noinline_dentry,
Opt_flush_merge,
Opt_noflush_merge,
Opt_nobarrier,
Opt_fastboot,
Opt_extent_cache,
Opt_noextent_cache,
Opt_noinline_data,
Opt_data_flush,
Opt_mode,
Opt_io_size_bits,
Opt_fault_injection,
Opt_lazytime,
Opt_nolazytime,
Opt_quota,
Opt_noquota,
Opt_usrquota,
Opt_grpquota,
Opt_prjquota,
Opt_usrjquota,
Opt_grpjquota,
Opt_prjjquota,
Opt_offusrjquota,
Opt_offgrpjquota,
Opt_offprjjquota,
Opt_jqfmt_vfsold,
Opt_jqfmt_vfsv0,
Opt_jqfmt_vfsv1,
Opt_err,
};
static match_table_t f2fs_tokens = {
{Opt_gc_background, "background_gc=%s"},
{Opt_disable_roll_forward, "disable_roll_forward"},
{Opt_norecovery, "norecovery"},
{Opt_discard, "discard"},
{Opt_nodiscard, "nodiscard"},
{Opt_noheap, "no_heap"},
{Opt_heap, "heap"},
{Opt_user_xattr, "user_xattr"},
{Opt_nouser_xattr, "nouser_xattr"},
{Opt_acl, "acl"},
{Opt_noacl, "noacl"},
{Opt_active_logs, "active_logs=%u"},
{Opt_disable_ext_identify, "disable_ext_identify"},
{Opt_inline_xattr, "inline_xattr"},
{Opt_noinline_xattr, "noinline_xattr"},
{Opt_inline_data, "inline_data"},
{Opt_inline_dentry, "inline_dentry"},
{Opt_noinline_dentry, "noinline_dentry"},
{Opt_flush_merge, "flush_merge"},
{Opt_noflush_merge, "noflush_merge"},
{Opt_nobarrier, "nobarrier"},
{Opt_fastboot, "fastboot"},
{Opt_extent_cache, "extent_cache"},
{Opt_noextent_cache, "noextent_cache"},
{Opt_noinline_data, "noinline_data"},
{Opt_data_flush, "data_flush"},
{Opt_mode, "mode=%s"},
{Opt_io_size_bits, "io_bits=%u"},
{Opt_fault_injection, "fault_injection=%u"},
{Opt_lazytime, "lazytime"},
{Opt_nolazytime, "nolazytime"},
{Opt_quota, "quota"},
{Opt_noquota, "noquota"},
{Opt_usrquota, "usrquota"},
{Opt_grpquota, "grpquota"},
{Opt_prjquota, "prjquota"},
{Opt_usrjquota, "usrjquota=%s"},
{Opt_grpjquota, "grpjquota=%s"},
{Opt_prjjquota, "prjjquota=%s"},
{Opt_offusrjquota, "usrjquota="},
{Opt_offgrpjquota, "grpjquota="},
{Opt_offprjjquota, "prjjquota="},
{Opt_jqfmt_vfsold, "jqfmt=vfsold"},
{Opt_jqfmt_vfsv0, "jqfmt=vfsv0"},
{Opt_jqfmt_vfsv1, "jqfmt=vfsv1"},
{Opt_err, NULL},
};
void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...)
{
struct va_format vaf;
va_list args;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
printk_ratelimited("%sF2FS-fs (%s): %pV\n", level, sb->s_id, &vaf);
va_end(args);
}
static void init_once(void *foo)
{
struct f2fs_inode_info *fi = (struct f2fs_inode_info *) foo;
inode_init_once(&fi->vfs_inode);
}
#ifdef CONFIG_QUOTA
static const char * const quotatypes[] = INITQFNAMES;
#define QTYPE2NAME(t) (quotatypes[t])
static int f2fs_set_qf_name(struct super_block *sb, int qtype,
substring_t *args)
{
struct f2fs_sb_info *sbi = F2FS_SB(sb);
char *qname;
int ret = -EINVAL;
if (sb_any_quota_loaded(sb) && !sbi->s_qf_names[qtype]) {
f2fs_msg(sb, KERN_ERR,
"Cannot change journaled "
"quota options when quota turned on");
return -EINVAL;
}
qname = match_strdup(args);
if (!qname) {
f2fs_msg(sb, KERN_ERR,
"Not enough memory for storing quotafile name");
return -EINVAL;
}
if (sbi->s_qf_names[qtype]) {
if (strcmp(sbi->s_qf_names[qtype], qname) == 0)
ret = 0;
else
f2fs_msg(sb, KERN_ERR,
"%s quota file already specified",
QTYPE2NAME(qtype));
goto errout;
}
if (strchr(qname, '/')) {
f2fs_msg(sb, KERN_ERR,
"quotafile must be on filesystem root");
goto errout;
}
sbi->s_qf_names[qtype] = qname;
set_opt(sbi, QUOTA);
return 0;
errout:
kfree(qname);
return ret;
}
static int f2fs_clear_qf_name(struct super_block *sb, int qtype)
{
struct f2fs_sb_info *sbi = F2FS_SB(sb);
if (sb_any_quota_loaded(sb) && sbi->s_qf_names[qtype]) {
f2fs_msg(sb, KERN_ERR, "Cannot change journaled quota options"
" when quota turned on");
return -EINVAL;
}
kfree(sbi->s_qf_names[qtype]);
sbi->s_qf_names[qtype] = NULL;
return 0;
}
static int f2fs_check_quota_options(struct f2fs_sb_info *sbi)
{
/*
* We do the test below only for project quotas. 'usrquota' and
* 'grpquota' mount options are allowed even without quota feature
* to support legacy quotas in quota files.
*/
if (test_opt(sbi, PRJQUOTA) && !f2fs_sb_has_project_quota(sbi->sb)) {
f2fs_msg(sbi->sb, KERN_ERR, "Project quota feature not enabled. "
"Cannot enable project quota enforcement.");
return -1;
}
if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA] ||
sbi->s_qf_names[PRJQUOTA]) {
if (test_opt(sbi, USRQUOTA) && sbi->s_qf_names[USRQUOTA])
clear_opt(sbi, USRQUOTA);
if (test_opt(sbi, GRPQUOTA) && sbi->s_qf_names[GRPQUOTA])
clear_opt(sbi, GRPQUOTA);
if (test_opt(sbi, PRJQUOTA) && sbi->s_qf_names[PRJQUOTA])
clear_opt(sbi, PRJQUOTA);
if (test_opt(sbi, GRPQUOTA) || test_opt(sbi, USRQUOTA) ||
test_opt(sbi, PRJQUOTA)) {
f2fs_msg(sbi->sb, KERN_ERR, "old and new quota "
"format mixing");
return -1;
}
if (!sbi->s_jquota_fmt) {
f2fs_msg(sbi->sb, KERN_ERR, "journaled quota format "
"not specified");
return -1;
}
}
return 0;
}
#endif
static int parse_options(struct super_block *sb, char *options)
{
struct f2fs_sb_info *sbi = F2FS_SB(sb);
struct request_queue *q;
substring_t args[MAX_OPT_ARGS];
char *p, *name;
int arg = 0;
#ifdef CONFIG_QUOTA
int ret;
#endif
if (!options)
return 0;
while ((p = strsep(&options, ",")) != NULL) {
int token;
if (!*p)
continue;
/*
* Initialize args struct so we know whether arg was
* found; some options take optional arguments.
*/
args[0].to = args[0].from = NULL;
token = match_token(p, f2fs_tokens, args);
switch (token) {
case Opt_gc_background:
name = match_strdup(&args[0]);
if (!name)
return -ENOMEM;
if (strlen(name) == 2 && !strncmp(name, "on", 2)) {
set_opt(sbi, BG_GC);
clear_opt(sbi, FORCE_FG_GC);
} else if (strlen(name) == 3 && !strncmp(name, "off", 3)) {
clear_opt(sbi, BG_GC);
clear_opt(sbi, FORCE_FG_GC);
} else if (strlen(name) == 4 && !strncmp(name, "sync", 4)) {
set_opt(sbi, BG_GC);
set_opt(sbi, FORCE_FG_GC);
} else {
kfree(name);
return -EINVAL;
}
kfree(name);
break;
case Opt_disable_roll_forward:
set_opt(sbi, DISABLE_ROLL_FORWARD);
break;
case Opt_norecovery:
/* this option mounts f2fs with ro */
set_opt(sbi, DISABLE_ROLL_FORWARD);
if (!f2fs_readonly(sb))
return -EINVAL;
break;
case Opt_discard:
q = bdev_get_queue(sb->s_bdev);
if (blk_queue_discard(q)) {
set_opt(sbi, DISCARD);
} else if (!f2fs_sb_mounted_blkzoned(sb)) {
f2fs_msg(sb, KERN_WARNING,
"mounting with \"discard\" option, but "
"the device does not support discard");
}
break;
case Opt_nodiscard:
if (f2fs_sb_mounted_blkzoned(sb)) {
f2fs_msg(sb, KERN_WARNING,
"discard is required for zoned block devices");
return -EINVAL;
}
clear_opt(sbi, DISCARD);
break;
case Opt_noheap:
set_opt(sbi, NOHEAP);
break;
case Opt_heap:
clear_opt(sbi, NOHEAP);
break;
#ifdef CONFIG_F2FS_FS_XATTR
case Opt_user_xattr:
set_opt(sbi, XATTR_USER);
break;
case Opt_nouser_xattr:
clear_opt(sbi, XATTR_USER);
break;
case Opt_inline_xattr:
set_opt(sbi, INLINE_XATTR);
break;
case Opt_noinline_xattr:
clear_opt(sbi, INLINE_XATTR);
break;
#else
case Opt_user_xattr:
f2fs_msg(sb, KERN_INFO,
"user_xattr options not supported");
break;
case Opt_nouser_xattr:
f2fs_msg(sb, KERN_INFO,
"nouser_xattr options not supported");
break;
case Opt_inline_xattr:
f2fs_msg(sb, KERN_INFO,
"inline_xattr options not supported");
break;
case Opt_noinline_xattr:
f2fs_msg(sb, KERN_INFO,
"noinline_xattr options not supported");
break;
#endif
#ifdef CONFIG_F2FS_FS_POSIX_ACL
case Opt_acl:
set_opt(sbi, POSIX_ACL);
break;
case Opt_noacl:
clear_opt(sbi, POSIX_ACL);
break;
#else
case Opt_acl:
f2fs_msg(sb, KERN_INFO, "acl options not supported");
break;
case Opt_noacl:
f2fs_msg(sb, KERN_INFO, "noacl options not supported");
break;
#endif
case Opt_active_logs:
if (args->from && match_int(args, &arg))
return -EINVAL;
if (arg != 2 && arg != 4 && arg != NR_CURSEG_TYPE)
return -EINVAL;
sbi->active_logs = arg;
break;
case Opt_disable_ext_identify:
set_opt(sbi, DISABLE_EXT_IDENTIFY);
break;
case Opt_inline_data:
set_opt(sbi, INLINE_DATA);
break;
case Opt_inline_dentry:
set_opt(sbi, INLINE_DENTRY);
break;
case Opt_noinline_dentry:
clear_opt(sbi, INLINE_DENTRY);
break;
case Opt_flush_merge:
set_opt(sbi, FLUSH_MERGE);
break;
case Opt_noflush_merge:
clear_opt(sbi, FLUSH_MERGE);
break;
case Opt_nobarrier:
set_opt(sbi, NOBARRIER);
break;
case Opt_fastboot:
set_opt(sbi, FASTBOOT);
break;
case Opt_extent_cache:
set_opt(sbi, EXTENT_CACHE);
break;
case Opt_noextent_cache:
clear_opt(sbi, EXTENT_CACHE);
break;
case Opt_noinline_data:
clear_opt(sbi, INLINE_DATA);
break;
case Opt_data_flush:
set_opt(sbi, DATA_FLUSH);
break;
case Opt_mode:
name = match_strdup(&args[0]);
if (!name)
return -ENOMEM;
if (strlen(name) == 8 &&
!strncmp(name, "adaptive", 8)) {
if (f2fs_sb_mounted_blkzoned(sb)) {
f2fs_msg(sb, KERN_WARNING,
"adaptive mode is not allowed with "
"zoned block device feature");
kfree(name);
return -EINVAL;
}
set_opt_mode(sbi, F2FS_MOUNT_ADAPTIVE);
} else if (strlen(name) == 3 &&
!strncmp(name, "lfs", 3)) {
set_opt_mode(sbi, F2FS_MOUNT_LFS);
} else {
kfree(name);
return -EINVAL;
}
kfree(name);
break;
case Opt_io_size_bits:
if (args->from && match_int(args, &arg))
return -EINVAL;
if (arg > __ilog2_u32(BIO_MAX_PAGES)) {
f2fs_msg(sb, KERN_WARNING,
"Not support %d, larger than %d",
1 << arg, BIO_MAX_PAGES);
return -EINVAL;
}
sbi->write_io_size_bits = arg;
break;
case Opt_fault_injection:
if (args->from && match_int(args, &arg))
return -EINVAL;
#ifdef CONFIG_F2FS_FAULT_INJECTION
f2fs_build_fault_attr(sbi, arg);
set_opt(sbi, FAULT_INJECTION);
#else
f2fs_msg(sb, KERN_INFO,
"FAULT_INJECTION was not selected");
#endif
break;
case Opt_lazytime:
sb->s_flags |= MS_LAZYTIME;
break;
case Opt_nolazytime:
sb->s_flags &= ~MS_LAZYTIME;
break;
#ifdef CONFIG_QUOTA
case Opt_quota:
case Opt_usrquota:
set_opt(sbi, USRQUOTA);
break;
case Opt_grpquota:
set_opt(sbi, GRPQUOTA);
break;
case Opt_prjquota:
set_opt(sbi, PRJQUOTA);
break;
case Opt_usrjquota:
ret = f2fs_set_qf_name(sb, USRQUOTA, &args[0]);
if (ret)
return ret;
break;
case Opt_grpjquota:
ret = f2fs_set_qf_name(sb, GRPQUOTA, &args[0]);
if (ret)
return ret;
break;
case Opt_prjjquota:
ret = f2fs_set_qf_name(sb, PRJQUOTA, &args[0]);
if (ret)
return ret;
break;
case Opt_offusrjquota:
ret = f2fs_clear_qf_name(sb, USRQUOTA);
if (ret)
return ret;
break;
case Opt_offgrpjquota:
ret = f2fs_clear_qf_name(sb, GRPQUOTA);
if (ret)
return ret;
break;
case Opt_offprjjquota:
ret = f2fs_clear_qf_name(sb, PRJQUOTA);
if (ret)
return ret;
break;
case Opt_jqfmt_vfsold:
sbi->s_jquota_fmt = QFMT_VFS_OLD;
break;
case Opt_jqfmt_vfsv0:
sbi->s_jquota_fmt = QFMT_VFS_V0;
break;
case Opt_jqfmt_vfsv1:
sbi->s_jquota_fmt = QFMT_VFS_V1;
break;
case Opt_noquota:
clear_opt(sbi, QUOTA);
clear_opt(sbi, USRQUOTA);
clear_opt(sbi, GRPQUOTA);
clear_opt(sbi, PRJQUOTA);
break;
#else
case Opt_quota:
case Opt_usrquota:
case Opt_grpquota:
case Opt_prjquota:
case Opt_usrjquota:
case Opt_grpjquota:
case Opt_prjjquota:
case Opt_offusrjquota:
case Opt_offgrpjquota:
case Opt_offprjjquota:
case Opt_jqfmt_vfsold:
case Opt_jqfmt_vfsv0:
case Opt_jqfmt_vfsv1:
case Opt_noquota:
f2fs_msg(sb, KERN_INFO,
"quota operations not supported");
break;
#endif
default:
f2fs_msg(sb, KERN_ERR,
"Unrecognized mount option \"%s\" or missing value",
p);
return -EINVAL;
}
}
#ifdef CONFIG_QUOTA
if (f2fs_check_quota_options(sbi))
return -EINVAL;
#endif
if (F2FS_IO_SIZE_BITS(sbi) && !test_opt(sbi, LFS)) {
f2fs_msg(sb, KERN_ERR,
"Should set mode=lfs with %uKB-sized IO",
F2FS_IO_SIZE_KB(sbi));
return -EINVAL;
}
return 0;
}
static struct inode *f2fs_alloc_inode(struct super_block *sb)
{
struct f2fs_inode_info *fi;
fi = kmem_cache_alloc(f2fs_inode_cachep, GFP_F2FS_ZERO);
if (!fi)
return NULL;
init_once((void *) fi);
/* Initialize f2fs-specific inode info */
fi->vfs_inode.i_version = 1;
atomic_set(&fi->dirty_pages, 0);
fi->i_current_depth = 1;
fi->i_advise = 0;
init_rwsem(&fi->i_sem);
INIT_LIST_HEAD(&fi->dirty_list);
INIT_LIST_HEAD(&fi->gdirty_list);
INIT_LIST_HEAD(&fi->inmem_pages);
mutex_init(&fi->inmem_lock);
init_rwsem(&fi->dio_rwsem[READ]);
init_rwsem(&fi->dio_rwsem[WRITE]);
init_rwsem(&fi->i_mmap_sem);
init_rwsem(&fi->i_xattr_sem);
#ifdef CONFIG_QUOTA
memset(&fi->i_dquot, 0, sizeof(fi->i_dquot));
fi->i_reserved_quota = 0;
#endif
/* Will be used by directory only */
fi->i_dir_level = F2FS_SB(sb)->dir_level;
return &fi->vfs_inode;
}
static int f2fs_drop_inode(struct inode *inode)
{
int ret;
/*
* This is to avoid a deadlock condition like below.
* writeback_single_inode(inode)
* - f2fs_write_data_page
* - f2fs_gc -> iput -> evict
* - inode_wait_for_writeback(inode)
*/
if ((!inode_unhashed(inode) && inode->i_state & I_SYNC)) {
if (!inode->i_nlink && !is_bad_inode(inode)) {
/* to avoid evict_inode call simultaneously */
atomic_inc(&inode->i_count);
spin_unlock(&inode->i_lock);
/* some remained atomic pages should discarded */
if (f2fs_is_atomic_file(inode))
drop_inmem_pages(inode);
/* should remain fi->extent_tree for writepage */
f2fs_destroy_extent_node(inode);
sb_start_intwrite(inode->i_sb);
f2fs_i_size_write(inode, 0);
if (F2FS_HAS_BLOCKS(inode))
f2fs_truncate(inode);
sb_end_intwrite(inode->i_sb);
fscrypt_put_encryption_info(inode, NULL);
spin_lock(&inode->i_lock);
atomic_dec(&inode->i_count);
}
trace_f2fs_drop_inode(inode, 0);
return 0;
}
ret = generic_drop_inode(inode);
trace_f2fs_drop_inode(inode, ret);
return ret;
}
int f2fs_inode_dirtied(struct inode *inode, bool sync)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
int ret = 0;
spin_lock(&sbi->inode_lock[DIRTY_META]);
if (is_inode_flag_set(inode, FI_DIRTY_INODE)) {
ret = 1;
} else {
set_inode_flag(inode, FI_DIRTY_INODE);
stat_inc_dirty_inode(sbi, DIRTY_META);
}
if (sync && list_empty(&F2FS_I(inode)->gdirty_list)) {
list_add_tail(&F2FS_I(inode)->gdirty_list,
&sbi->inode_list[DIRTY_META]);
inc_page_count(sbi, F2FS_DIRTY_IMETA);
}
spin_unlock(&sbi->inode_lock[DIRTY_META]);
return ret;
}
void f2fs_inode_synced(struct inode *inode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
spin_lock(&sbi->inode_lock[DIRTY_META]);
if (!is_inode_flag_set(inode, FI_DIRTY_INODE)) {
spin_unlock(&sbi->inode_lock[DIRTY_META]);
return;
}
if (!list_empty(&F2FS_I(inode)->gdirty_list)) {
list_del_init(&F2FS_I(inode)->gdirty_list);
dec_page_count(sbi, F2FS_DIRTY_IMETA);
}
clear_inode_flag(inode, FI_DIRTY_INODE);
clear_inode_flag(inode, FI_AUTO_RECOVER);
stat_dec_dirty_inode(F2FS_I_SB(inode), DIRTY_META);
spin_unlock(&sbi->inode_lock[DIRTY_META]);
}
/*
* f2fs_dirty_inode() is called from __mark_inode_dirty()
*
* We should call set_dirty_inode to write the dirty inode through write_inode.
*/
static void f2fs_dirty_inode(struct inode *inode, int flags)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
if (inode->i_ino == F2FS_NODE_INO(sbi) ||
inode->i_ino == F2FS_META_INO(sbi))
return;
if (flags == I_DIRTY_TIME)
return;
if (is_inode_flag_set(inode, FI_AUTO_RECOVER))
clear_inode_flag(inode, FI_AUTO_RECOVER);
f2fs_inode_dirtied(inode, false);
}
static void f2fs_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
kmem_cache_free(f2fs_inode_cachep, F2FS_I(inode));
}
static void f2fs_destroy_inode(struct inode *inode)
{
call_rcu(&inode->i_rcu, f2fs_i_callback);
}
static void destroy_percpu_info(struct f2fs_sb_info *sbi)
{
percpu_counter_destroy(&sbi->alloc_valid_block_count);
percpu_counter_destroy(&sbi->total_valid_inode_count);
}
static void destroy_device_list(struct f2fs_sb_info *sbi)
{
int i;
for (i = 0; i < sbi->s_ndevs; i++) {
blkdev_put(FDEV(i).bdev, FMODE_EXCL);
#ifdef CONFIG_BLK_DEV_ZONED
kfree(FDEV(i).blkz_type);
#endif
}
kfree(sbi->devs);
}
static void f2fs_put_super(struct super_block *sb)
{
struct f2fs_sb_info *sbi = F2FS_SB(sb);
int i;
f2fs_quota_off_umount(sb);
/* prevent remaining shrinker jobs */
mutex_lock(&sbi->umount_mutex);
/*
* We don't need to do checkpoint when superblock is clean.
* But, the previous checkpoint was not done by umount, it needs to do
* clean checkpoint again.
*/
if (is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
!is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
struct cp_control cpc = {
.reason = CP_UMOUNT,
};
write_checkpoint(sbi, &cpc);
}
/* be sure to wait for any on-going discard commands */
f2fs_wait_discard_bios(sbi);
if (f2fs_discard_en(sbi) && !sbi->discard_blks) {
struct cp_control cpc = {
.reason = CP_UMOUNT | CP_TRIMMED,
};
write_checkpoint(sbi, &cpc);
}
/* write_checkpoint can update stat informaion */
f2fs_destroy_stats(sbi);
/*
* normally superblock is clean, so we need to release this.
* In addition, EIO will skip do checkpoint, we need this as well.
*/
release_ino_entry(sbi, true);
f2fs_leave_shrinker(sbi);
mutex_unlock(&sbi->umount_mutex);
/* our cp_error case, we can wait for any writeback page */
f2fs_flush_merged_writes(sbi);
iput(sbi->node_inode);
iput(sbi->meta_inode);
/* destroy f2fs internal modules */
destroy_node_manager(sbi);
destroy_segment_manager(sbi);
kfree(sbi->ckpt);
f2fs_unregister_sysfs(sbi);
sb->s_fs_info = NULL;
if (sbi->s_chksum_driver)
crypto_free_shash(sbi->s_chksum_driver);
kfree(sbi->raw_super);
destroy_device_list(sbi);
mempool_destroy(sbi->write_io_dummy);
#ifdef CONFIG_QUOTA
for (i = 0; i < MAXQUOTAS; i++)
kfree(sbi->s_qf_names[i]);
#endif
destroy_percpu_info(sbi);
for (i = 0; i < NR_PAGE_TYPE; i++)
kfree(sbi->write_io[i]);
kfree(sbi);
}
int f2fs_sync_fs(struct super_block *sb, int sync)
{
struct f2fs_sb_info *sbi = F2FS_SB(sb);
int err = 0;
trace_f2fs_sync_fs(sb, sync);
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
return -EAGAIN;
if (sync) {
struct cp_control cpc;
cpc.reason = __get_cp_reason(sbi);
mutex_lock(&sbi->gc_mutex);
err = write_checkpoint(sbi, &cpc);
mutex_unlock(&sbi->gc_mutex);
}
f2fs_trace_ios(NULL, 1);
return err;
}
static int f2fs_freeze(struct super_block *sb)
{
if (f2fs_readonly(sb))
return 0;
/* IO error happened before */
if (unlikely(f2fs_cp_error(F2FS_SB(sb))))
return -EIO;
/* must be clean, since sync_filesystem() was already called */
if (is_sbi_flag_set(F2FS_SB(sb), SBI_IS_DIRTY))
return -EINVAL;
return 0;
}
static int f2fs_unfreeze(struct super_block *sb)
{
return 0;
}
#ifdef CONFIG_QUOTA
static int f2fs_statfs_project(struct super_block *sb,
kprojid_t projid, struct kstatfs *buf)
{
struct kqid qid;
struct dquot *dquot;
u64 limit;
u64 curblock;
qid = make_kqid_projid(projid);
dquot = dqget(sb, qid);
if (IS_ERR(dquot))
return PTR_ERR(dquot);
spin_lock(&dq_data_lock);
limit = (dquot->dq_dqb.dqb_bsoftlimit ?
dquot->dq_dqb.dqb_bsoftlimit :
dquot->dq_dqb.dqb_bhardlimit) >> sb->s_blocksize_bits;
if (limit && buf->f_blocks > limit) {
curblock = dquot->dq_dqb.dqb_curspace >> sb->s_blocksize_bits;
buf->f_blocks = limit;
buf->f_bfree = buf->f_bavail =
(buf->f_blocks > curblock) ?
(buf->f_blocks - curblock) : 0;
}
limit = dquot->dq_dqb.dqb_isoftlimit ?
dquot->dq_dqb.dqb_isoftlimit :
dquot->dq_dqb.dqb_ihardlimit;
if (limit && buf->f_files > limit) {
buf->f_files = limit;
buf->f_ffree =
(buf->f_files > dquot->dq_dqb.dqb_curinodes) ?
(buf->f_files - dquot->dq_dqb.dqb_curinodes) : 0;
}
spin_unlock(&dq_data_lock);
dqput(dquot);
return 0;
}
#endif
static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct super_block *sb = dentry->d_sb;
struct f2fs_sb_info *sbi = F2FS_SB(sb);
u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
block_t total_count, user_block_count, start_count, ovp_count;
u64 avail_node_count;
total_count = le64_to_cpu(sbi->raw_super->block_count);
user_block_count = sbi->user_block_count;
start_count = le32_to_cpu(sbi->raw_super->segment0_blkaddr);
ovp_count = SM_I(sbi)->ovp_segments << sbi->log_blocks_per_seg;
buf->f_type = F2FS_SUPER_MAGIC;
buf->f_bsize = sbi->blocksize;
buf->f_blocks = total_count - start_count;
buf->f_bfree = user_block_count - valid_user_blocks(sbi) + ovp_count;
buf->f_bavail = user_block_count - valid_user_blocks(sbi) -
sbi->reserved_blocks;
avail_node_count = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
if (avail_node_count > user_block_count) {
buf->f_files = user_block_count;
buf->f_ffree = buf->f_bavail;
} else {
buf->f_files = avail_node_count;
buf->f_ffree = min(avail_node_count - valid_node_count(sbi),
buf->f_bavail);
}
buf->f_namelen = F2FS_NAME_LEN;
buf->f_fsid.val[0] = (u32)id;
buf->f_fsid.val[1] = (u32)(id >> 32);
#ifdef CONFIG_QUOTA
if (is_inode_flag_set(dentry->d_inode, FI_PROJ_INHERIT) &&
sb_has_quota_limits_enabled(sb, PRJQUOTA)) {
f2fs_statfs_project(sb, F2FS_I(dentry->d_inode)->i_projid, buf);
}
#endif
return 0;
}
static inline void f2fs_show_quota_options(struct seq_file *seq,
struct super_block *sb)
{
#ifdef CONFIG_QUOTA
struct f2fs_sb_info *sbi = F2FS_SB(sb);
if (sbi->s_jquota_fmt) {
char *fmtname = "";
switch (sbi->s_jquota_fmt) {
case QFMT_VFS_OLD:
fmtname = "vfsold";
break;
case QFMT_VFS_V0:
fmtname = "vfsv0";
break;
case QFMT_VFS_V1:
fmtname = "vfsv1";
break;
}
seq_printf(seq, ",jqfmt=%s", fmtname);
}
if (sbi->s_qf_names[USRQUOTA])
seq_show_option(seq, "usrjquota", sbi->s_qf_names[USRQUOTA]);
if (sbi->s_qf_names[GRPQUOTA])
seq_show_option(seq, "grpjquota", sbi->s_qf_names[GRPQUOTA]);
if (sbi->s_qf_names[PRJQUOTA])
seq_show_option(seq, "prjjquota", sbi->s_qf_names[PRJQUOTA]);
#endif
}
static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
{
struct f2fs_sb_info *sbi = F2FS_SB(root->d_sb);
if (!f2fs_readonly(sbi->sb) && test_opt(sbi, BG_GC)) {
if (test_opt(sbi, FORCE_FG_GC))
seq_printf(seq, ",background_gc=%s", "sync");
else
seq_printf(seq, ",background_gc=%s", "on");
} else {
seq_printf(seq, ",background_gc=%s", "off");
}
if (test_opt(sbi, DISABLE_ROLL_FORWARD))
seq_puts(seq, ",disable_roll_forward");
if (test_opt(sbi, DISCARD))
seq_puts(seq, ",discard");
if (test_opt(sbi, NOHEAP))
seq_puts(seq, ",no_heap");
else
seq_puts(seq, ",heap");
#ifdef CONFIG_F2FS_FS_XATTR
if (test_opt(sbi, XATTR_USER))
seq_puts(seq, ",user_xattr");
else
seq_puts(seq, ",nouser_xattr");
if (test_opt(sbi, INLINE_XATTR))
seq_puts(seq, ",inline_xattr");
else
seq_puts(seq, ",noinline_xattr");
#endif
#ifdef CONFIG_F2FS_FS_POSIX_ACL
if (test_opt(sbi, POSIX_ACL))
seq_puts(seq, ",acl");
else
seq_puts(seq, ",noacl");
#endif
if (test_opt(sbi, DISABLE_EXT_IDENTIFY))
seq_puts(seq, ",disable_ext_identify");
if (test_opt(sbi, INLINE_DATA))
seq_puts(seq, ",inline_data");
else
seq_puts(seq, ",noinline_data");
if (test_opt(sbi, INLINE_DENTRY))
seq_puts(seq, ",inline_dentry");
else
seq_puts(seq, ",noinline_dentry");
if (!f2fs_readonly(sbi->sb) && test_opt(sbi, FLUSH_MERGE))
seq_puts(seq, ",flush_merge");
if (test_opt(sbi, NOBARRIER))
seq_puts(seq, ",nobarrier");
if (test_opt(sbi, FASTBOOT))
seq_puts(seq, ",fastboot");
if (test_opt(sbi, EXTENT_CACHE))
seq_puts(seq, ",extent_cache");
else
seq_puts(seq, ",noextent_cache");
if (test_opt(sbi, DATA_FLUSH))
seq_puts(seq, ",data_flush");
seq_puts(seq, ",mode=");
if (test_opt(sbi, ADAPTIVE))
seq_puts(seq, "adaptive");
else if (test_opt(sbi, LFS))
seq_puts(seq, "lfs");
seq_printf(seq, ",active_logs=%u", sbi->active_logs);
if (F2FS_IO_SIZE_BITS(sbi))
seq_printf(seq, ",io_size=%uKB", F2FS_IO_SIZE_KB(sbi));
#ifdef CONFIG_F2FS_FAULT_INJECTION
if (test_opt(sbi, FAULT_INJECTION))
seq_printf(seq, ",fault_injection=%u",
sbi->fault_info.inject_rate);
#endif
#ifdef CONFIG_QUOTA
if (test_opt(sbi, QUOTA))
seq_puts(seq, ",quota");
if (test_opt(sbi, USRQUOTA))
seq_puts(seq, ",usrquota");
if (test_opt(sbi, GRPQUOTA))
seq_puts(seq, ",grpquota");
if (test_opt(sbi, PRJQUOTA))
seq_puts(seq, ",prjquota");
#endif
f2fs_show_quota_options(seq, sbi->sb);
return 0;
}
static void default_options(struct f2fs_sb_info *sbi)
{
/* init some FS parameters */
sbi->active_logs = NR_CURSEG_TYPE;
set_opt(sbi, BG_GC);
set_opt(sbi, INLINE_XATTR);
set_opt(sbi, INLINE_DATA);
set_opt(sbi, INLINE_DENTRY);
set_opt(sbi, EXTENT_CACHE);
set_opt(sbi, NOHEAP);
sbi->sb->s_flags |= MS_LAZYTIME;
set_opt(sbi, FLUSH_MERGE);
if (f2fs_sb_mounted_blkzoned(sbi->sb)) {
set_opt_mode(sbi, F2FS_MOUNT_LFS);
set_opt(sbi, DISCARD);
} else {
set_opt_mode(sbi, F2FS_MOUNT_ADAPTIVE);
}
#ifdef CONFIG_F2FS_FS_XATTR
set_opt(sbi, XATTR_USER);
#endif
#ifdef CONFIG_F2FS_FS_POSIX_ACL
set_opt(sbi, POSIX_ACL);
#endif
#ifdef CONFIG_F2FS_FAULT_INJECTION
f2fs_build_fault_attr(sbi, 0);
#endif
}
static int f2fs_remount(struct super_block *sb, int *flags, char *data)
{
struct f2fs_sb_info *sbi = F2FS_SB(sb);
struct f2fs_mount_info org_mount_opt;
unsigned long old_sb_flags;
int err, active_logs;
bool need_restart_gc = false;
bool need_stop_gc = false;
bool no_extent_cache = !test_opt(sbi, EXTENT_CACHE);
#ifdef CONFIG_F2FS_FAULT_INJECTION
struct f2fs_fault_info ffi = sbi->fault_info;
#endif
#ifdef CONFIG_QUOTA
int s_jquota_fmt;
char *s_qf_names[MAXQUOTAS];
int i, j;
#endif
/*
* Save the old mount options in case we
* need to restore them.
*/
org_mount_opt = sbi->mount_opt;
old_sb_flags = sb->s_flags;
active_logs = sbi->active_logs;
#ifdef CONFIG_QUOTA
s_jquota_fmt = sbi->s_jquota_fmt;
for (i = 0; i < MAXQUOTAS; i++) {
if (sbi->s_qf_names[i]) {
s_qf_names[i] = kstrdup(sbi->s_qf_names[i],
GFP_KERNEL);
if (!s_qf_names[i]) {
for (j = 0; j < i; j++)
kfree(s_qf_names[j]);
return -ENOMEM;
}
} else {
s_qf_names[i] = NULL;
}
}
#endif
/* recover superblocks we couldn't write due to previous RO mount */
if (!(*flags & MS_RDONLY) && is_sbi_flag_set(sbi, SBI_NEED_SB_WRITE)) {
err = f2fs_commit_super(sbi, false);
f2fs_msg(sb, KERN_INFO,
"Try to recover all the superblocks, ret: %d", err);
if (!err)
clear_sbi_flag(sbi, SBI_NEED_SB_WRITE);
}
default_options(sbi);
/* parse mount options */
err = parse_options(sb, data);
if (err)
goto restore_opts;
/*
* Previous and new state of filesystem is RO,
* so skip checking GC and FLUSH_MERGE conditions.
*/
if (f2fs_readonly(sb) && (*flags & MS_RDONLY))
goto skip;
if (!f2fs_readonly(sb) && (*flags & MS_RDONLY)) {
err = dquot_suspend(sb, -1);
if (err < 0)
goto restore_opts;
} else {
/* dquot_resume needs RW */
sb->s_flags &= ~MS_RDONLY;
dquot_resume(sb, -1);
}
/* disallow enable/disable extent_cache dynamically */
if (no_extent_cache == !!test_opt(sbi, EXTENT_CACHE)) {
err = -EINVAL;
f2fs_msg(sbi->sb, KERN_WARNING,
"switch extent_cache option is not allowed");
goto restore_opts;
}
/*
* We stop the GC thread if FS is mounted as RO
* or if background_gc = off is passed in mount
* option. Also sync the filesystem.
*/
if ((*flags & MS_RDONLY) || !test_opt(sbi, BG_GC)) {
if (sbi->gc_thread) {
stop_gc_thread(sbi);
need_restart_gc = true;
}
} else if (!sbi->gc_thread) {
err = start_gc_thread(sbi);
if (err)
goto restore_opts;
need_stop_gc = true;
}
if (*flags & MS_RDONLY) {
writeback_inodes_sb(sb, WB_REASON_SYNC);
sync_inodes_sb(sb);
set_sbi_flag(sbi, SBI_IS_DIRTY);
set_sbi_flag(sbi, SBI_IS_CLOSE);
f2fs_sync_fs(sb, 1);
clear_sbi_flag(sbi, SBI_IS_CLOSE);
}
/*
* We stop issue flush thread if FS is mounted as RO
* or if flush_merge is not passed in mount option.
*/
if ((*flags & MS_RDONLY) || !test_opt(sbi, FLUSH_MERGE)) {
clear_opt(sbi, FLUSH_MERGE);
destroy_flush_cmd_control(sbi, false);
} else {
err = create_flush_cmd_control(sbi);
if (err)
goto restore_gc;
}
skip:
#ifdef CONFIG_QUOTA
/* Release old quota file names */
for (i = 0; i < MAXQUOTAS; i++)
kfree(s_qf_names[i]);
#endif
/* Update the POSIXACL Flag */
sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
(test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0);
return 0;
restore_gc:
if (need_restart_gc) {
if (start_gc_thread(sbi))
f2fs_msg(sbi->sb, KERN_WARNING,
"background gc thread has stopped");
} else if (need_stop_gc) {
stop_gc_thread(sbi);
}
restore_opts:
#ifdef CONFIG_QUOTA
sbi->s_jquota_fmt = s_jquota_fmt;
for (i = 0; i < MAXQUOTAS; i++) {
kfree(sbi->s_qf_names[i]);
sbi->s_qf_names[i] = s_qf_names[i];
}
#endif
sbi->mount_opt = org_mount_opt;
sbi->active_logs = active_logs;
sb->s_flags = old_sb_flags;
#ifdef CONFIG_F2FS_FAULT_INJECTION
sbi->fault_info = ffi;
#endif
return err;
}
#ifdef CONFIG_QUOTA
/* Read data from quotafile */
static ssize_t f2fs_quota_read(struct super_block *sb, int type, char *data,
size_t len, loff_t off)
{
struct inode *inode = sb_dqopt(sb)->files[type];
struct address_space *mapping = inode->i_mapping;
block_t blkidx = F2FS_BYTES_TO_BLK(off);
int offset = off & (sb->s_blocksize - 1);
int tocopy;
size_t toread;
loff_t i_size = i_size_read(inode);
struct page *page;
char *kaddr;
if (off > i_size)
return 0;
if (off + len > i_size)
len = i_size - off;
toread = len;
while (toread > 0) {
tocopy = min_t(unsigned long, sb->s_blocksize - offset, toread);
repeat:
page = read_mapping_page(mapping, blkidx, NULL);
if (IS_ERR(page))
return PTR_ERR(page);
lock_page(page);
if (unlikely(page->mapping != mapping)) {
f2fs_put_page(page, 1);
goto repeat;
}
if (unlikely(!PageUptodate(page))) {
f2fs_put_page(page, 1);
return -EIO;
}
kaddr = kmap_atomic(page);
memcpy(data, kaddr + offset, tocopy);
kunmap_atomic(kaddr);
f2fs_put_page(page, 1);
offset = 0;
toread -= tocopy;
data += tocopy;
blkidx++;
}
return len;
}
/* Write to quotafile */
static ssize_t f2fs_quota_write(struct super_block *sb, int type,
const char *data, size_t len, loff_t off)
{
struct inode *inode = sb_dqopt(sb)->files[type];
struct address_space *mapping = inode->i_mapping;
const struct address_space_operations *a_ops = mapping->a_ops;
int offset = off & (sb->s_blocksize - 1);
size_t towrite = len;
struct page *page;
char *kaddr;
int err = 0;
int tocopy;
while (towrite > 0) {
tocopy = min_t(unsigned long, sb->s_blocksize - offset,
towrite);
err = a_ops->write_begin(NULL, mapping, off, tocopy, 0,
&page, NULL);
if (unlikely(err))
break;
kaddr = kmap_atomic(page);
memcpy(kaddr + offset, data, tocopy);
kunmap_atomic(kaddr);
flush_dcache_page(page);
a_ops->write_end(NULL, mapping, off, tocopy, tocopy,
page, NULL);
offset = 0;
towrite -= tocopy;
off += tocopy;
data += tocopy;
cond_resched();
}
if (len == towrite)
return 0;
inode->i_version++;
inode->i_mtime = inode->i_ctime = current_time(inode);
f2fs_mark_inode_dirty_sync(inode, false);
return len - towrite;
}
static struct dquot **f2fs_get_dquots(struct inode *inode)
{
return F2FS_I(inode)->i_dquot;
}
static qsize_t *f2fs_get_reserved_space(struct inode *inode)
{
return &F2FS_I(inode)->i_reserved_quota;
}
static int f2fs_quota_on_mount(struct f2fs_sb_info *sbi, int type)
{
return dquot_quota_on_mount(sbi->sb, sbi->s_qf_names[type],
sbi->s_jquota_fmt, type);
}
void f2fs_enable_quota_files(struct f2fs_sb_info *sbi)
{
int i, ret;
for (i = 0; i < MAXQUOTAS; i++) {
if (sbi->s_qf_names[i]) {
ret = f2fs_quota_on_mount(sbi, i);
if (ret < 0)
f2fs_msg(sbi->sb, KERN_ERR,
"Cannot turn on journaled "
"quota: error %d", ret);
}
}
}
static int f2fs_quota_sync(struct super_block *sb, int type)
{
struct quota_info *dqopt = sb_dqopt(sb);
int cnt;
int ret;
ret = dquot_writeback_dquots(sb, type);
if (ret)
return ret;
/*
* Now when everything is written we can discard the pagecache so
* that userspace sees the changes.
*/
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (type != -1 && cnt != type)
continue;
if (!sb_has_quota_active(sb, cnt))
continue;
ret = filemap_write_and_wait(dqopt->files[cnt]->i_mapping);
if (ret)
return ret;
inode_lock(dqopt->files[cnt]);
truncate_inode_pages(&dqopt->files[cnt]->i_data, 0);
inode_unlock(dqopt->files[cnt]);
}
return 0;
}
static int f2fs_quota_on(struct super_block *sb, int type, int format_id,
const struct path *path)
{
struct inode *inode;
int err;
err = f2fs_quota_sync(sb, type);
if (err)
return err;
err = dquot_quota_on(sb, type, format_id, path);
if (err)
return err;
inode = d_inode(path->dentry);
inode_lock(inode);
F2FS_I(inode)->i_flags |= FS_NOATIME_FL | FS_IMMUTABLE_FL;
inode_set_flags(inode, S_NOATIME | S_IMMUTABLE,
S_NOATIME | S_IMMUTABLE);
inode_unlock(inode);
f2fs_mark_inode_dirty_sync(inode, false);
return 0;
}
static int f2fs_quota_off(struct super_block *sb, int type)
{
struct inode *inode = sb_dqopt(sb)->files[type];
int err;
if (!inode || !igrab(inode))
return dquot_quota_off(sb, type);
f2fs_quota_sync(sb, type);
err = dquot_quota_off(sb, type);
if (err)
goto out_put;
inode_lock(inode);
F2FS_I(inode)->i_flags &= ~(FS_NOATIME_FL | FS_IMMUTABLE_FL);
inode_set_flags(inode, 0, S_NOATIME | S_IMMUTABLE);
inode_unlock(inode);
f2fs_mark_inode_dirty_sync(inode, false);
out_put:
iput(inode);
return err;
}
void f2fs_quota_off_umount(struct super_block *sb)
{
int type;
for (type = 0; type < MAXQUOTAS; type++)
f2fs_quota_off(sb, type);
}
int f2fs_get_projid(struct inode *inode, kprojid_t *projid)
{
*projid = F2FS_I(inode)->i_projid;
return 0;
}
static const struct dquot_operations f2fs_quota_operations = {
.get_reserved_space = f2fs_get_reserved_space,
.write_dquot = dquot_commit,
.acquire_dquot = dquot_acquire,
.release_dquot = dquot_release,
.mark_dirty = dquot_mark_dquot_dirty,
.write_info = dquot_commit_info,
.alloc_dquot = dquot_alloc,
.destroy_dquot = dquot_destroy,
.get_projid = f2fs_get_projid,
.get_next_id = dquot_get_next_id,
};
static const struct quotactl_ops f2fs_quotactl_ops = {
.quota_on = f2fs_quota_on,
.quota_off = f2fs_quota_off,
.quota_sync = f2fs_quota_sync,
.get_state = dquot_get_state,
.set_info = dquot_set_dqinfo,
.get_dqblk = dquot_get_dqblk,
.set_dqblk = dquot_set_dqblk,
.get_nextdqblk = dquot_get_next_dqblk,
};
#else
void f2fs_quota_off_umount(struct super_block *sb)
{
}
#endif
static const struct super_operations f2fs_sops = {
.alloc_inode = f2fs_alloc_inode,
.drop_inode = f2fs_drop_inode,
.destroy_inode = f2fs_destroy_inode,
.write_inode = f2fs_write_inode,
.dirty_inode = f2fs_dirty_inode,
.show_options = f2fs_show_options,
#ifdef CONFIG_QUOTA
.quota_read = f2fs_quota_read,
.quota_write = f2fs_quota_write,
.get_dquots = f2fs_get_dquots,
#endif
.evict_inode = f2fs_evict_inode,
.put_super = f2fs_put_super,
.sync_fs = f2fs_sync_fs,
.freeze_fs = f2fs_freeze,
.unfreeze_fs = f2fs_unfreeze,
.statfs = f2fs_statfs,
.remount_fs = f2fs_remount,
};
#ifdef CONFIG_F2FS_FS_ENCRYPTION
static int f2fs_get_context(struct inode *inode, void *ctx, size_t len)
{
return f2fs_getxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
F2FS_XATTR_NAME_ENCRYPTION_CONTEXT,
ctx, len, NULL);
}
static int f2fs_set_context(struct inode *inode, const void *ctx, size_t len,
void *fs_data)
{
return f2fs_setxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
F2FS_XATTR_NAME_ENCRYPTION_CONTEXT,
ctx, len, fs_data, XATTR_CREATE);
}
static unsigned f2fs_max_namelen(struct inode *inode)
{
return S_ISLNK(inode->i_mode) ?
inode->i_sb->s_blocksize : F2FS_NAME_LEN;
}
static const struct fscrypt_operations f2fs_cryptops = {
.key_prefix = "f2fs:",
.get_context = f2fs_get_context,
.set_context = f2fs_set_context,
.is_encrypted = f2fs_encrypted_inode,
.empty_dir = f2fs_empty_dir,
.max_namelen = f2fs_max_namelen,
};
#else
static const struct fscrypt_operations f2fs_cryptops = {
.is_encrypted = f2fs_encrypted_inode,
};
#endif
static struct inode *f2fs_nfs_get_inode(struct super_block *sb,
u64 ino, u32 generation)
{
struct f2fs_sb_info *sbi = F2FS_SB(sb);
struct inode *inode;
if (check_nid_range(sbi, ino))
return ERR_PTR(-ESTALE);
/*
* f2fs_iget isn't quite right if the inode is currently unallocated!
* However f2fs_iget currently does appropriate checks to handle stale
* inodes so everything is OK.
*/
inode = f2fs_iget(sb, ino);
if (IS_ERR(inode))
return ERR_CAST(inode);
if (unlikely(generation && inode->i_generation != generation)) {
/* we didn't find the right inode.. */
iput(inode);
return ERR_PTR(-ESTALE);
}
return inode;
}
static struct dentry *f2fs_fh_to_dentry(struct super_block *sb, struct fid *fid,
int fh_len, int fh_type)
{
return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
f2fs_nfs_get_inode);
}
static struct dentry *f2fs_fh_to_parent(struct super_block *sb, struct fid *fid,
int fh_len, int fh_type)
{
return generic_fh_to_parent(sb, fid, fh_len, fh_type,
f2fs_nfs_get_inode);
}
static const struct export_operations f2fs_export_ops = {
.fh_to_dentry = f2fs_fh_to_dentry,
.fh_to_parent = f2fs_fh_to_parent,
.get_parent = f2fs_get_parent,
};
static loff_t max_file_blocks(void)
{
loff_t result = 0;
loff_t leaf_count = ADDRS_PER_BLOCK;
/*
* note: previously, result is equal to (DEF_ADDRS_PER_INODE -
* F2FS_INLINE_XATTR_ADDRS), but now f2fs try to reserve more
* space in inode.i_addr, it will be more safe to reassign
* result as zero.
*/
/* two direct node blocks */
result += (leaf_count * 2);
/* two indirect node blocks */
leaf_count *= NIDS_PER_BLOCK;
result += (leaf_count * 2);
/* one double indirect node block */
leaf_count *= NIDS_PER_BLOCK;
result += leaf_count;
return result;
}
static int __f2fs_commit_super(struct buffer_head *bh,
struct f2fs_super_block *super)
{
lock_buffer(bh);
if (super)
memcpy(bh->b_data + F2FS_SUPER_OFFSET, super, sizeof(*super));
set_buffer_uptodate(bh);
set_buffer_dirty(bh);
unlock_buffer(bh);
/* it's rare case, we can do fua all the time */
return __sync_dirty_buffer(bh, REQ_SYNC | REQ_PREFLUSH | REQ_FUA);
}
static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi,
struct buffer_head *bh)
{
struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
(bh->b_data + F2FS_SUPER_OFFSET);
struct super_block *sb = sbi->sb;
u32 segment0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
u32 cp_blkaddr = le32_to_cpu(raw_super->cp_blkaddr);
u32 sit_blkaddr = le32_to_cpu(raw_super->sit_blkaddr);
u32 nat_blkaddr = le32_to_cpu(raw_super->nat_blkaddr);
u32 ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
u32 main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
u32 segment_count_ckpt = le32_to_cpu(raw_super->segment_count_ckpt);
u32 segment_count_sit = le32_to_cpu(raw_super->segment_count_sit);
u32 segment_count_nat = le32_to_cpu(raw_super->segment_count_nat);
u32 segment_count_ssa = le32_to_cpu(raw_super->segment_count_ssa);
u32 segment_count_main = le32_to_cpu(raw_super->segment_count_main);
u32 segment_count = le32_to_cpu(raw_super->segment_count);
u32 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
u64 main_end_blkaddr = main_blkaddr +
(segment_count_main << log_blocks_per_seg);
u64 seg_end_blkaddr = segment0_blkaddr +
(segment_count << log_blocks_per_seg);
if (segment0_blkaddr != cp_blkaddr) {
f2fs_msg(sb, KERN_INFO,
"Mismatch start address, segment0(%u) cp_blkaddr(%u)",
segment0_blkaddr, cp_blkaddr);
return true;
}
if (cp_blkaddr + (segment_count_ckpt << log_blocks_per_seg) !=
sit_blkaddr) {
f2fs_msg(sb, KERN_INFO,
"Wrong CP boundary, start(%u) end(%u) blocks(%u)",
cp_blkaddr, sit_blkaddr,
segment_count_ckpt << log_blocks_per_seg);
return true;
}
if (sit_blkaddr + (segment_count_sit << log_blocks_per_seg) !=
nat_blkaddr) {
f2fs_msg(sb, KERN_INFO,
"Wrong SIT boundary, start(%u) end(%u) blocks(%u)",
sit_blkaddr, nat_blkaddr,
segment_count_sit << log_blocks_per_seg);
return true;
}
if (nat_blkaddr + (segment_count_nat << log_blocks_per_seg) !=
ssa_blkaddr) {
f2fs_msg(sb, KERN_INFO,
"Wrong NAT boundary, start(%u) end(%u) blocks(%u)",
nat_blkaddr, ssa_blkaddr,
segment_count_nat << log_blocks_per_seg);
return true;
}
if (ssa_blkaddr + (segment_count_ssa << log_blocks_per_seg) !=
main_blkaddr) {
f2fs_msg(sb, KERN_INFO,
"Wrong SSA boundary, start(%u) end(%u) blocks(%u)",
ssa_blkaddr, main_blkaddr,
segment_count_ssa << log_blocks_per_seg);
return true;
}
if (main_end_blkaddr > seg_end_blkaddr) {
f2fs_msg(sb, KERN_INFO,
"Wrong MAIN_AREA boundary, start(%u) end(%u) block(%u)",
main_blkaddr,
segment0_blkaddr +
(segment_count << log_blocks_per_seg),
segment_count_main << log_blocks_per_seg);
return true;
} else if (main_end_blkaddr < seg_end_blkaddr) {
int err = 0;
char *res;
/* fix in-memory information all the time */
raw_super->segment_count = cpu_to_le32((main_end_blkaddr -
segment0_blkaddr) >> log_blocks_per_seg);
if (f2fs_readonly(sb) || bdev_read_only(sb->s_bdev)) {
set_sbi_flag(sbi, SBI_NEED_SB_WRITE);
res = "internally";
} else {
err = __f2fs_commit_super(bh, NULL);
res = err ? "failed" : "done";
}
f2fs_msg(sb, KERN_INFO,
"Fix alignment : %s, start(%u) end(%u) block(%u)",
res, main_blkaddr,
segment0_blkaddr +
(segment_count << log_blocks_per_seg),
segment_count_main << log_blocks_per_seg);
if (err)
return true;
}
return false;
}
static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
struct buffer_head *bh)
{
struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
(bh->b_data + F2FS_SUPER_OFFSET);
struct super_block *sb = sbi->sb;
unsigned int blocksize;
if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) {
f2fs_msg(sb, KERN_INFO,
"Magic Mismatch, valid(0x%x) - read(0x%x)",
F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic));
return 1;
}
/* Currently, support only 4KB page cache size */
if (F2FS_BLKSIZE != PAGE_SIZE) {
f2fs_msg(sb, KERN_INFO,
"Invalid page_cache_size (%lu), supports only 4KB\n",
PAGE_SIZE);
return 1;
}
/* Currently, support only 4KB block size */
blocksize = 1 << le32_to_cpu(raw_super->log_blocksize);
if (blocksize != F2FS_BLKSIZE) {
f2fs_msg(sb, KERN_INFO,
"Invalid blocksize (%u), supports only 4KB\n",
blocksize);
return 1;
}
/* check log blocks per segment */
if (le32_to_cpu(raw_super->log_blocks_per_seg) != 9) {
f2fs_msg(sb, KERN_INFO,
"Invalid log blocks per segment (%u)\n",
le32_to_cpu(raw_super->log_blocks_per_seg));
return 1;
}
/* Currently, support 512/1024/2048/4096 bytes sector size */
if (le32_to_cpu(raw_super->log_sectorsize) >
F2FS_MAX_LOG_SECTOR_SIZE ||
le32_to_cpu(raw_super->log_sectorsize) <
F2FS_MIN_LOG_SECTOR_SIZE) {
f2fs_msg(sb, KERN_INFO, "Invalid log sectorsize (%u)",
le32_to_cpu(raw_super->log_sectorsize));
return 1;
}
if (le32_to_cpu(raw_super->log_sectors_per_block) +
le32_to_cpu(raw_super->log_sectorsize) !=
F2FS_MAX_LOG_SECTOR_SIZE) {
f2fs_msg(sb, KERN_INFO,
"Invalid log sectors per block(%u) log sectorsize(%u)",
le32_to_cpu(raw_super->log_sectors_per_block),
le32_to_cpu(raw_super->log_sectorsize));
return 1;
}
/* check reserved ino info */
if (le32_to_cpu(raw_super->node_ino) != 1 ||
le32_to_cpu(raw_super->meta_ino) != 2 ||
le32_to_cpu(raw_super->root_ino) != 3) {
f2fs_msg(sb, KERN_INFO,
"Invalid Fs Meta Ino: node(%u) meta(%u) root(%u)",
le32_to_cpu(raw_super->node_ino),
le32_to_cpu(raw_super->meta_ino),
le32_to_cpu(raw_super->root_ino));
return 1;
}
if (le32_to_cpu(raw_super->segment_count) > F2FS_MAX_SEGMENT) {
f2fs_msg(sb, KERN_INFO,
"Invalid segment count (%u)",
le32_to_cpu(raw_super->segment_count));
return 1;
}
/* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
if (sanity_check_area_boundary(sbi, bh))
return 1;
return 0;
}
int sanity_check_ckpt(struct f2fs_sb_info *sbi)
{
unsigned int total, fsmeta;
struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
unsigned int ovp_segments, reserved_segments;
unsigned int main_segs, blocks_per_seg;
int i;
total = le32_to_cpu(raw_super->segment_count);
fsmeta = le32_to_cpu(raw_super->segment_count_ckpt);
fsmeta += le32_to_cpu(raw_super->segment_count_sit);
fsmeta += le32_to_cpu(raw_super->segment_count_nat);
fsmeta += le32_to_cpu(ckpt->rsvd_segment_count);
fsmeta += le32_to_cpu(raw_super->segment_count_ssa);
if (unlikely(fsmeta >= total))
return 1;
ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
if (unlikely(fsmeta < F2FS_MIN_SEGMENTS ||
ovp_segments == 0 || reserved_segments == 0)) {
f2fs_msg(sbi->sb, KERN_ERR,
"Wrong layout: check mkfs.f2fs version");
return 1;
}
main_segs = le32_to_cpu(raw_super->segment_count_main);
blocks_per_seg = sbi->blocks_per_seg;
for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
if (le32_to_cpu(ckpt->cur_node_segno[i]) >= main_segs ||
le16_to_cpu(ckpt->cur_node_blkoff[i]) >= blocks_per_seg)
return 1;
}
for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) {
if (le32_to_cpu(ckpt->cur_data_segno[i]) >= main_segs ||
le16_to_cpu(ckpt->cur_data_blkoff[i]) >= blocks_per_seg)
return 1;
}
if (unlikely(f2fs_cp_error(sbi))) {
f2fs_msg(sbi->sb, KERN_ERR, "A bug case: need to run fsck");
return 1;
}
return 0;
}
static void init_sb_info(struct f2fs_sb_info *sbi)
{
struct f2fs_super_block *raw_super = sbi->raw_super;
int i, j;
sbi->log_sectors_per_block =
le32_to_cpu(raw_super->log_sectors_per_block);
sbi->log_blocksize = le32_to_cpu(raw_super->log_blocksize);
sbi->blocksize = 1 << sbi->log_blocksize;
sbi->log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
sbi->blocks_per_seg = 1 << sbi->log_blocks_per_seg;
sbi->segs_per_sec = le32_to_cpu(raw_super->segs_per_sec);
sbi->secs_per_zone = le32_to_cpu(raw_super->secs_per_zone);
sbi->total_sections = le32_to_cpu(raw_super->section_count);
sbi->total_node_count =
(le32_to_cpu(raw_super->segment_count_nat) / 2)
* sbi->blocks_per_seg * NAT_ENTRY_PER_BLOCK;
sbi->root_ino_num = le32_to_cpu(raw_super->root_ino);
sbi->node_ino_num = le32_to_cpu(raw_super->node_ino);
sbi->meta_ino_num = le32_to_cpu(raw_super->meta_ino);
sbi->cur_victim_sec = NULL_SECNO;
sbi->max_victim_search = DEF_MAX_VICTIM_SEARCH;
sbi->dir_level = DEF_DIR_LEVEL;
sbi->interval_time[CP_TIME] = DEF_CP_INTERVAL;
sbi->interval_time[REQ_TIME] = DEF_IDLE_INTERVAL;
clear_sbi_flag(sbi, SBI_NEED_FSCK);
for (i = 0; i < NR_COUNT_TYPE; i++)
atomic_set(&sbi->nr_pages[i], 0);
atomic_set(&sbi->wb_sync_req, 0);
INIT_LIST_HEAD(&sbi->s_list);
mutex_init(&sbi->umount_mutex);
for (i = 0; i < NR_PAGE_TYPE - 1; i++)
for (j = HOT; j < NR_TEMP_TYPE; j++)
mutex_init(&sbi->wio_mutex[i][j]);
spin_lock_init(&sbi->cp_lock);
}
static int init_percpu_info(struct f2fs_sb_info *sbi)
{
int err;
err = percpu_counter_init(&sbi->alloc_valid_block_count, 0, GFP_KERNEL);
if (err)
return err;
return percpu_counter_init(&sbi->total_valid_inode_count, 0,
GFP_KERNEL);
}
#ifdef CONFIG_BLK_DEV_ZONED
static int init_blkz_info(struct f2fs_sb_info *sbi, int devi)
{
struct block_device *bdev = FDEV(devi).bdev;
sector_t nr_sectors = bdev->bd_part->nr_sects;
sector_t sector = 0;
struct blk_zone *zones;
unsigned int i, nr_zones;
unsigned int n = 0;
int err = -EIO;
if (!f2fs_sb_mounted_blkzoned(sbi->sb))
return 0;
if (sbi->blocks_per_blkz && sbi->blocks_per_blkz !=
SECTOR_TO_BLOCK(bdev_zone_sectors(bdev)))
return -EINVAL;
sbi->blocks_per_blkz = SECTOR_TO_BLOCK(bdev_zone_sectors(bdev));
if (sbi->log_blocks_per_blkz && sbi->log_blocks_per_blkz !=
__ilog2_u32(sbi->blocks_per_blkz))
return -EINVAL;
sbi->log_blocks_per_blkz = __ilog2_u32(sbi->blocks_per_blkz);
FDEV(devi).nr_blkz = SECTOR_TO_BLOCK(nr_sectors) >>
sbi->log_blocks_per_blkz;
if (nr_sectors & (bdev_zone_sectors(bdev) - 1))
FDEV(devi).nr_blkz++;
FDEV(devi).blkz_type = kmalloc(FDEV(devi).nr_blkz, GFP_KERNEL);
if (!FDEV(devi).blkz_type)
return -ENOMEM;
#define F2FS_REPORT_NR_ZONES 4096
zones = kcalloc(F2FS_REPORT_NR_ZONES, sizeof(struct blk_zone),
GFP_KERNEL);
if (!zones)
return -ENOMEM;
/* Get block zones type */
while (zones && sector < nr_sectors) {
nr_zones = F2FS_REPORT_NR_ZONES;
err = blkdev_report_zones(bdev, sector,
zones, &nr_zones,
GFP_KERNEL);
if (err)
break;
if (!nr_zones) {
err = -EIO;
break;
}
for (i = 0; i < nr_zones; i++) {
FDEV(devi).blkz_type[n] = zones[i].type;
sector += zones[i].len;
n++;
}
}
kfree(zones);
return err;
}
#endif
/*
* Read f2fs raw super block.
* Because we have two copies of super block, so read both of them
* to get the first valid one. If any one of them is broken, we pass
* them recovery flag back to the caller.
*/
static int read_raw_super_block(struct f2fs_sb_info *sbi,
struct f2fs_super_block **raw_super,
int *valid_super_block, int *recovery)
{
struct super_block *sb = sbi->sb;
int block;
struct buffer_head *bh;
struct f2fs_super_block *super;
int err = 0;
super = kzalloc(sizeof(struct f2fs_super_block), GFP_KERNEL);
if (!super)
return -ENOMEM;
for (block = 0; block < 2; block++) {
bh = sb_bread(sb, block);
if (!bh) {
f2fs_msg(sb, KERN_ERR, "Unable to read %dth superblock",
block + 1);
err = -EIO;
continue;
}
/* sanity checking of raw super */
if (sanity_check_raw_super(sbi, bh)) {
f2fs_msg(sb, KERN_ERR,
"Can't find valid F2FS filesystem in %dth superblock",
block + 1);
err = -EINVAL;
brelse(bh);
continue;
}
if (!*raw_super) {
memcpy(super, bh->b_data + F2FS_SUPER_OFFSET,
sizeof(*super));
*valid_super_block = block;
*raw_super = super;
}
brelse(bh);
}
/* Fail to read any one of the superblocks*/
if (err < 0)
*recovery = 1;
/* No valid superblock */
if (!*raw_super)
kfree(super);
else
err = 0;
return err;
}
int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
{
struct buffer_head *bh;
int err;
if ((recover && f2fs_readonly(sbi->sb)) ||
bdev_read_only(sbi->sb->s_bdev)) {
set_sbi_flag(sbi, SBI_NEED_SB_WRITE);
return -EROFS;
}
/* write back-up superblock first */
bh = sb_getblk(sbi->sb, sbi->valid_super_block ? 0: 1);
if (!bh)
return -EIO;
err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
brelse(bh);
/* if we are in recovery path, skip writing valid superblock */
if (recover || err)
return err;
/* write current valid superblock */
bh = sb_getblk(sbi->sb, sbi->valid_super_block);
if (!bh)
return -EIO;
err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
brelse(bh);
return err;
}
static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
{
struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
unsigned int max_devices = MAX_DEVICES;
int i;
/* Initialize single device information */
if (!RDEV(0).path[0]) {
if (!bdev_is_zoned(sbi->sb->s_bdev))
return 0;
max_devices = 1;
}
/*
* Initialize multiple devices information, or single
* zoned block device information.
*/
sbi->devs = kcalloc(max_devices, sizeof(struct f2fs_dev_info),
GFP_KERNEL);
if (!sbi->devs)
return -ENOMEM;
for (i = 0; i < max_devices; i++) {
if (i > 0 && !RDEV(i).path[0])
break;
if (max_devices == 1) {
/* Single zoned block device mount */
FDEV(0).bdev =
blkdev_get_by_dev(sbi->sb->s_bdev->bd_dev,
sbi->sb->s_mode, sbi->sb->s_type);
} else {
/* Multi-device mount */
memcpy(FDEV(i).path, RDEV(i).path, MAX_PATH_LEN);
FDEV(i).total_segments =
le32_to_cpu(RDEV(i).total_segments);
if (i == 0) {
FDEV(i).start_blk = 0;
FDEV(i).end_blk = FDEV(i).start_blk +
(FDEV(i).total_segments <<
sbi->log_blocks_per_seg) - 1 +
le32_to_cpu(raw_super->segment0_blkaddr);
} else {
FDEV(i).start_blk = FDEV(i - 1).end_blk + 1;
FDEV(i).end_blk = FDEV(i).start_blk +
(FDEV(i).total_segments <<
sbi->log_blocks_per_seg) - 1;
}
FDEV(i).bdev = blkdev_get_by_path(FDEV(i).path,
sbi->sb->s_mode, sbi->sb->s_type);
}
if (IS_ERR(FDEV(i).bdev))
return PTR_ERR(FDEV(i).bdev);
/* to release errored devices */
sbi->s_ndevs = i + 1;
#ifdef CONFIG_BLK_DEV_ZONED
if (bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HM &&
!f2fs_sb_mounted_blkzoned(sbi->sb)) {
f2fs_msg(sbi->sb, KERN_ERR,
"Zoned block device feature not enabled\n");
return -EINVAL;
}
if (bdev_zoned_model(FDEV(i).bdev) != BLK_ZONED_NONE) {
if (init_blkz_info(sbi, i)) {
f2fs_msg(sbi->sb, KERN_ERR,
"Failed to initialize F2FS blkzone information");
return -EINVAL;
}
if (max_devices == 1)
break;
f2fs_msg(sbi->sb, KERN_INFO,
"Mount Device [%2d]: %20s, %8u, %8x - %8x (zone: %s)",
i, FDEV(i).path,
FDEV(i).total_segments,
FDEV(i).start_blk, FDEV(i).end_blk,
bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HA ?
"Host-aware" : "Host-managed");
continue;
}
#endif
f2fs_msg(sbi->sb, KERN_INFO,
"Mount Device [%2d]: %20s, %8u, %8x - %8x",
i, FDEV(i).path,
FDEV(i).total_segments,
FDEV(i).start_blk, FDEV(i).end_blk);
}
f2fs_msg(sbi->sb, KERN_INFO,
"IO Block Size: %8d KB", F2FS_IO_SIZE_KB(sbi));
return 0;
}
static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
{
struct f2fs_sb_info *sbi;
struct f2fs_super_block *raw_super;
struct inode *root;
int err;
bool retry = true, need_fsck = false;
char *options = NULL;
int recovery, i, valid_super_block;
struct curseg_info *seg_i;
try_onemore:
err = -EINVAL;
raw_super = NULL;
valid_super_block = -1;
recovery = 0;
/* allocate memory for f2fs-specific super block info */
sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL);
if (!sbi)
return -ENOMEM;
sbi->sb = sb;
/* Load the checksum driver */
sbi->s_chksum_driver = crypto_alloc_shash("crc32", 0, 0);
if (IS_ERR(sbi->s_chksum_driver)) {
f2fs_msg(sb, KERN_ERR, "Cannot load crc32 driver.");
err = PTR_ERR(sbi->s_chksum_driver);
sbi->s_chksum_driver = NULL;
goto free_sbi;
}
/* set a block size */
if (unlikely(!sb_set_blocksize(sb, F2FS_BLKSIZE))) {
f2fs_msg(sb, KERN_ERR, "unable to set blocksize");
goto free_sbi;
}
err = read_raw_super_block(sbi, &raw_super, &valid_super_block,
&recovery);
if (err)
goto free_sbi;
sb->s_fs_info = sbi;
sbi->raw_super = raw_super;
/* precompute checksum seed for metadata */
if (f2fs_sb_has_inode_chksum(sb))
sbi->s_chksum_seed = f2fs_chksum(sbi, ~0, raw_super->uuid,
sizeof(raw_super->uuid));
/*
* The BLKZONED feature indicates that the drive was formatted with
* zone alignment optimization. This is optional for host-aware
* devices, but mandatory for host-managed zoned block devices.
*/
#ifndef CONFIG_BLK_DEV_ZONED
if (f2fs_sb_mounted_blkzoned(sb)) {
f2fs_msg(sb, KERN_ERR,
"Zoned block device support is not enabled\n");
err = -EOPNOTSUPP;
goto free_sb_buf;
}
#endif
default_options(sbi);
/* parse mount options */
options = kstrdup((const char *)data, GFP_KERNEL);
if (data && !options) {
err = -ENOMEM;
goto free_sb_buf;
}
err = parse_options(sb, options);
if (err)
goto free_options;
sbi->max_file_blocks = max_file_blocks();
sb->s_maxbytes = sbi->max_file_blocks <<
le32_to_cpu(raw_super->log_blocksize);
sb->s_max_links = F2FS_LINK_MAX;
get_random_bytes(&sbi->s_next_generation, sizeof(u32));
#ifdef CONFIG_QUOTA
sb->dq_op = &f2fs_quota_operations;
sb->s_qcop = &f2fs_quotactl_ops;
sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
#endif
sb->s_op = &f2fs_sops;
sb->s_cop = &f2fs_cryptops;
sb->s_xattr = f2fs_xattr_handlers;
sb->s_export_op = &f2fs_export_ops;
sb->s_magic = F2FS_SUPER_MAGIC;
sb->s_time_gran = 1;
sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
(test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0);
memcpy(&sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid));
/* init f2fs-specific super block info */
sbi->valid_super_block = valid_super_block;
mutex_init(&sbi->gc_mutex);
mutex_init(&sbi->cp_mutex);
init_rwsem(&sbi->node_write);
init_rwsem(&sbi->node_change);
/* disallow all the data/node/meta page writes */
set_sbi_flag(sbi, SBI_POR_DOING);
spin_lock_init(&sbi->stat_lock);
/* init iostat info */
spin_lock_init(&sbi->iostat_lock);
sbi->iostat_enable = false;
for (i = 0; i < NR_PAGE_TYPE; i++) {
int n = (i == META) ? 1: NR_TEMP_TYPE;
int j;
sbi->write_io[i] = kmalloc(n * sizeof(struct f2fs_bio_info),
GFP_KERNEL);
if (!sbi->write_io[i]) {
err = -ENOMEM;
goto free_options;
}
for (j = HOT; j < n; j++) {
init_rwsem(&sbi->write_io[i][j].io_rwsem);
sbi->write_io[i][j].sbi = sbi;
sbi->write_io[i][j].bio = NULL;
spin_lock_init(&sbi->write_io[i][j].io_lock);
INIT_LIST_HEAD(&sbi->write_io[i][j].io_list);
}
}
init_rwsem(&sbi->cp_rwsem);
init_waitqueue_head(&sbi->cp_wait);
init_sb_info(sbi);
err = init_percpu_info(sbi);
if (err)
goto free_options;
if (F2FS_IO_SIZE(sbi) > 1) {
sbi->write_io_dummy =
mempool_create_page_pool(2 * (F2FS_IO_SIZE(sbi) - 1), 0);
if (!sbi->write_io_dummy) {
err = -ENOMEM;
goto free_options;
}
}
/* get an inode for meta space */
sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi));
if (IS_ERR(sbi->meta_inode)) {
f2fs_msg(sb, KERN_ERR, "Failed to read F2FS meta data inode");
err = PTR_ERR(sbi->meta_inode);
goto free_io_dummy;
}
err = get_valid_checkpoint(sbi);
if (err) {
f2fs_msg(sb, KERN_ERR, "Failed to get valid F2FS checkpoint");
goto free_meta_inode;
}
/* Initialize device list */
err = f2fs_scan_devices(sbi);
if (err) {
f2fs_msg(sb, KERN_ERR, "Failed to find devices");
goto free_devices;
}
sbi->total_valid_node_count =
le32_to_cpu(sbi->ckpt->valid_node_count);
percpu_counter_set(&sbi->total_valid_inode_count,
le32_to_cpu(sbi->ckpt->valid_inode_count));
sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count);
sbi->total_valid_block_count =
le64_to_cpu(sbi->ckpt->valid_block_count);
sbi->last_valid_block_count = sbi->total_valid_block_count;
sbi->reserved_blocks = 0;
for (i = 0; i < NR_INODE_TYPE; i++) {
INIT_LIST_HEAD(&sbi->inode_list[i]);
spin_lock_init(&sbi->inode_lock[i]);
}
init_extent_cache_info(sbi);
init_ino_entry_info(sbi);
/* setup f2fs internal modules */
err = build_segment_manager(sbi);
if (err) {
f2fs_msg(sb, KERN_ERR,
"Failed to initialize F2FS segment manager");
goto free_sm;
}
err = build_node_manager(sbi);
if (err) {
f2fs_msg(sb, KERN_ERR,
"Failed to initialize F2FS node manager");
goto free_nm;
}
/* For write statistics */
if (sb->s_bdev->bd_part)
sbi->sectors_written_start =
(u64)part_stat_read(sb->s_bdev->bd_part, sectors[1]);
/* Read accumulated write IO statistics if exists */
seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
if (__exist_node_summaries(sbi))
sbi->kbytes_written =
le64_to_cpu(seg_i->journal->info.kbytes_written);
build_gc_manager(sbi);
/* get an inode for node space */
sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi));
if (IS_ERR(sbi->node_inode)) {
f2fs_msg(sb, KERN_ERR, "Failed to read node inode");
err = PTR_ERR(sbi->node_inode);
goto free_nm;
}
f2fs_join_shrinker(sbi);
err = f2fs_build_stats(sbi);
if (err)
goto free_nm;
/* read root inode and dentry */
root = f2fs_iget(sb, F2FS_ROOT_INO(sbi));
if (IS_ERR(root)) {
f2fs_msg(sb, KERN_ERR, "Failed to read root inode");
err = PTR_ERR(root);
goto free_node_inode;
}
if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
iput(root);
err = -EINVAL;
goto free_node_inode;
}
sb->s_root = d_make_root(root); /* allocate root dentry */
if (!sb->s_root) {
err = -ENOMEM;
goto free_root_inode;
}
err = f2fs_register_sysfs(sbi);
if (err)
goto free_root_inode;
/* if there are nt orphan nodes free them */
err = recover_orphan_inodes(sbi);
if (err)
goto free_sysfs;
/* recover fsynced data */
if (!test_opt(sbi, DISABLE_ROLL_FORWARD)) {
/*
* mount should be failed, when device has readonly mode, and
* previous checkpoint was not done by clean system shutdown.
*/
if (bdev_read_only(sb->s_bdev) &&
!is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
err = -EROFS;
goto free_meta;
}
if (need_fsck)
set_sbi_flag(sbi, SBI_NEED_FSCK);
if (!retry)
goto skip_recovery;
err = recover_fsync_data(sbi, false);
if (err < 0) {
need_fsck = true;
f2fs_msg(sb, KERN_ERR,
"Cannot recover all fsync data errno=%d", err);
goto free_meta;
}
} else {
err = recover_fsync_data(sbi, true);
if (!f2fs_readonly(sb) && err > 0) {
err = -EINVAL;
f2fs_msg(sb, KERN_ERR,
"Need to recover fsync data");
goto free_sysfs;
}
}
skip_recovery:
/* recover_fsync_data() cleared this already */
clear_sbi_flag(sbi, SBI_POR_DOING);
/*
* If filesystem is not mounted as read-only then
* do start the gc_thread.
*/
if (test_opt(sbi, BG_GC) && !f2fs_readonly(sb)) {
/* After POR, we can run background GC thread.*/
err = start_gc_thread(sbi);
if (err)
goto free_meta;
}
kfree(options);
/* recover broken superblock */
if (recovery) {
err = f2fs_commit_super(sbi, true);
f2fs_msg(sb, KERN_INFO,
"Try to recover %dth superblock, ret: %d",
sbi->valid_super_block ? 1 : 2, err);
}
f2fs_msg(sbi->sb, KERN_NOTICE, "Mounted with checkpoint version = %llx",
cur_cp_version(F2FS_CKPT(sbi)));
f2fs_update_time(sbi, CP_TIME);
f2fs_update_time(sbi, REQ_TIME);
return 0;
free_meta:
f2fs_sync_inode_meta(sbi);
/*
* Some dirty meta pages can be produced by recover_orphan_inodes()
* failed by EIO. Then, iput(node_inode) can trigger balance_fs_bg()
* followed by write_checkpoint() through f2fs_write_node_pages(), which
* falls into an infinite loop in sync_meta_pages().
*/
truncate_inode_pages_final(META_MAPPING(sbi));
free_sysfs:
f2fs_unregister_sysfs(sbi);
free_root_inode:
dput(sb->s_root);
sb->s_root = NULL;
free_node_inode:
truncate_inode_pages_final(NODE_MAPPING(sbi));
mutex_lock(&sbi->umount_mutex);
release_ino_entry(sbi, true);
f2fs_leave_shrinker(sbi);
iput(sbi->node_inode);
mutex_unlock(&sbi->umount_mutex);
f2fs_destroy_stats(sbi);
free_nm:
destroy_node_manager(sbi);
free_sm:
destroy_segment_manager(sbi);
free_devices:
destroy_device_list(sbi);
kfree(sbi->ckpt);
free_meta_inode:
make_bad_inode(sbi->meta_inode);
iput(sbi->meta_inode);
free_io_dummy:
mempool_destroy(sbi->write_io_dummy);
free_options:
for (i = 0; i < NR_PAGE_TYPE; i++)
kfree(sbi->write_io[i]);
destroy_percpu_info(sbi);
#ifdef CONFIG_QUOTA
for (i = 0; i < MAXQUOTAS; i++)
kfree(sbi->s_qf_names[i]);
#endif
kfree(options);
free_sb_buf:
kfree(raw_super);
free_sbi:
if (sbi->s_chksum_driver)
crypto_free_shash(sbi->s_chksum_driver);
kfree(sbi);
/* give only one another chance */
if (retry) {
retry = false;
shrink_dcache_sb(sb);
goto try_onemore;
}
return err;
}
static struct dentry *f2fs_mount(struct file_system_type *fs_type, int flags,
const char *dev_name, void *data)
{
return mount_bdev(fs_type, flags, dev_name, data, f2fs_fill_super);
}
static void kill_f2fs_super(struct super_block *sb)
{
if (sb->s_root) {
set_sbi_flag(F2FS_SB(sb), SBI_IS_CLOSE);
stop_gc_thread(F2FS_SB(sb));
stop_discard_thread(F2FS_SB(sb));
}
kill_block_super(sb);
}
static struct file_system_type f2fs_fs_type = {
.owner = THIS_MODULE,
.name = "f2fs",
.mount = f2fs_mount,
.kill_sb = kill_f2fs_super,
.fs_flags = FS_REQUIRES_DEV,
};
MODULE_ALIAS_FS("f2fs");
static int __init init_inodecache(void)
{
f2fs_inode_cachep = kmem_cache_create("f2fs_inode_cache",
sizeof(struct f2fs_inode_info), 0,
SLAB_RECLAIM_ACCOUNT|SLAB_ACCOUNT, NULL);
if (!f2fs_inode_cachep)
return -ENOMEM;
return 0;
}
static void destroy_inodecache(void)
{
/*
* Make sure all delayed rcu free inodes are flushed before we
* destroy cache.
*/
rcu_barrier();
kmem_cache_destroy(f2fs_inode_cachep);
}
static int __init init_f2fs_fs(void)
{
int err;
f2fs_build_trace_ios();
err = init_inodecache();
if (err)
goto fail;
err = create_node_manager_caches();
if (err)
goto free_inodecache;
err = create_segment_manager_caches();
if (err)
goto free_node_manager_caches;
err = create_checkpoint_caches();
if (err)
goto free_segment_manager_caches;
err = create_extent_cache();
if (err)
goto free_checkpoint_caches;
err = f2fs_init_sysfs();
if (err)
goto free_extent_cache;
err = register_shrinker(&f2fs_shrinker_info);
if (err)
goto free_sysfs;
err = register_filesystem(&f2fs_fs_type);
if (err)
goto free_shrinker;
err = f2fs_create_root_stats();
if (err)
goto free_filesystem;
return 0;
free_filesystem:
unregister_filesystem(&f2fs_fs_type);
free_shrinker:
unregister_shrinker(&f2fs_shrinker_info);
free_sysfs:
f2fs_exit_sysfs();
free_extent_cache:
destroy_extent_cache();
free_checkpoint_caches:
destroy_checkpoint_caches();
free_segment_manager_caches:
destroy_segment_manager_caches();
free_node_manager_caches:
destroy_node_manager_caches();
free_inodecache:
destroy_inodecache();
fail:
return err;
}
static void __exit exit_f2fs_fs(void)
{
f2fs_destroy_root_stats();
unregister_filesystem(&f2fs_fs_type);
unregister_shrinker(&f2fs_shrinker_info);
f2fs_exit_sysfs();
destroy_extent_cache();
destroy_checkpoint_caches();
destroy_segment_manager_caches();
destroy_node_manager_caches();
destroy_inodecache();
f2fs_destroy_trace_ios();
}
module_init(init_f2fs_fs)
module_exit(exit_f2fs_fs)
MODULE_AUTHOR("Samsung Electronics's Praesto Team");
MODULE_DESCRIPTION("Flash Friendly File System");
MODULE_LICENSE("GPL");
| ./CrossVul/dataset_final_sorted/CWE-20/c/bad_3012_2 |
crossvul-cpp_data_bad_5845_14 | /*
* iovec manipulation routines.
*
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Fixes:
* Andrew Lunn : Errors in iovec copying.
* Pedro Roque : Added memcpy_fromiovecend and
* csum_..._fromiovecend.
* Andi Kleen : fixed error handling for 2.1
* Alexey Kuznetsov: 2.1 optimisations
* Andi Kleen : Fix csum*fromiovecend for IPv6.
*/
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/net.h>
#include <linux/in6.h>
#include <asm/uaccess.h>
#include <asm/byteorder.h>
#include <net/checksum.h>
#include <net/sock.h>
/*
* Verify iovec. The caller must ensure that the iovec is big enough
* to hold the message iovec.
*
* Save time not doing access_ok. copy_*_user will make this work
* in any case.
*/
int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *address, int mode)
{
int size, ct, err;
if (m->msg_namelen) {
if (mode == VERIFY_READ) {
void __user *namep;
namep = (void __user __force *) m->msg_name;
err = move_addr_to_kernel(namep, m->msg_namelen,
address);
if (err < 0)
return err;
}
m->msg_name = address;
} else {
m->msg_name = NULL;
}
size = m->msg_iovlen * sizeof(struct iovec);
if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
return -EFAULT;
m->msg_iov = iov;
err = 0;
for (ct = 0; ct < m->msg_iovlen; ct++) {
size_t len = iov[ct].iov_len;
if (len > INT_MAX - err) {
len = INT_MAX - err;
iov[ct].iov_len = len;
}
err += len;
}
return err;
}
/*
* Copy kernel to iovec. Returns -EFAULT on error.
*/
int memcpy_toiovecend(const struct iovec *iov, unsigned char *kdata,
int offset, int len)
{
int copy;
for (; len > 0; ++iov) {
/* Skip over the finished iovecs */
if (unlikely(offset >= iov->iov_len)) {
offset -= iov->iov_len;
continue;
}
copy = min_t(unsigned int, iov->iov_len - offset, len);
if (copy_to_user(iov->iov_base + offset, kdata, copy))
return -EFAULT;
offset = 0;
kdata += copy;
len -= copy;
}
return 0;
}
EXPORT_SYMBOL(memcpy_toiovecend);
/*
* Copy iovec to kernel. Returns -EFAULT on error.
*/
int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
int offset, int len)
{
/* Skip over the finished iovecs */
while (offset >= iov->iov_len) {
offset -= iov->iov_len;
iov++;
}
while (len > 0) {
u8 __user *base = iov->iov_base + offset;
int copy = min_t(unsigned int, len, iov->iov_len - offset);
offset = 0;
if (copy_from_user(kdata, base, copy))
return -EFAULT;
len -= copy;
kdata += copy;
iov++;
}
return 0;
}
EXPORT_SYMBOL(memcpy_fromiovecend);
/*
* And now for the all-in-one: copy and checksum from a user iovec
* directly to a datagram
* Calls to csum_partial but the last must be in 32 bit chunks
*
* ip_build_xmit must ensure that when fragmenting only the last
* call to this function will be unaligned also.
*/
int csum_partial_copy_fromiovecend(unsigned char *kdata, struct iovec *iov,
int offset, unsigned int len, __wsum *csump)
{
__wsum csum = *csump;
int partial_cnt = 0, err = 0;
/* Skip over the finished iovecs */
while (offset >= iov->iov_len) {
offset -= iov->iov_len;
iov++;
}
while (len > 0) {
u8 __user *base = iov->iov_base + offset;
int copy = min_t(unsigned int, len, iov->iov_len - offset);
offset = 0;
/* There is a remnant from previous iov. */
if (partial_cnt) {
int par_len = 4 - partial_cnt;
/* iov component is too short ... */
if (par_len > copy) {
if (copy_from_user(kdata, base, copy))
goto out_fault;
kdata += copy;
base += copy;
partial_cnt += copy;
len -= copy;
iov++;
if (len)
continue;
*csump = csum_partial(kdata - partial_cnt,
partial_cnt, csum);
goto out;
}
if (copy_from_user(kdata, base, par_len))
goto out_fault;
csum = csum_partial(kdata - partial_cnt, 4, csum);
kdata += par_len;
base += par_len;
copy -= par_len;
len -= par_len;
partial_cnt = 0;
}
if (len > copy) {
partial_cnt = copy % 4;
if (partial_cnt) {
copy -= partial_cnt;
if (copy_from_user(kdata + copy, base + copy,
partial_cnt))
goto out_fault;
}
}
if (copy) {
csum = csum_and_copy_from_user(base, kdata, copy,
csum, &err);
if (err)
goto out;
}
len -= copy + partial_cnt;
kdata += copy + partial_cnt;
iov++;
}
*csump = csum;
out:
return err;
out_fault:
err = -EFAULT;
goto out;
}
EXPORT_SYMBOL(csum_partial_copy_fromiovecend);
unsigned long iov_pages(const struct iovec *iov, int offset,
unsigned long nr_segs)
{
unsigned long seg, base;
int pages = 0, len, size;
while (nr_segs && (offset >= iov->iov_len)) {
offset -= iov->iov_len;
++iov;
--nr_segs;
}
for (seg = 0; seg < nr_segs; seg++) {
base = (unsigned long)iov[seg].iov_base + offset;
len = iov[seg].iov_len - offset;
size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
pages += size;
offset = 0;
}
return pages;
}
EXPORT_SYMBOL(iov_pages);
| ./CrossVul/dataset_final_sorted/CWE-20/c/bad_5845_14 |
crossvul-cpp_data_good_2891_7 | /* Keyring handling
*
* Copyright (C) 2004-2005, 2008, 2013 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/security.h>
#include <linux/seq_file.h>
#include <linux/err.h>
#include <keys/keyring-type.h>
#include <keys/user-type.h>
#include <linux/assoc_array_priv.h>
#include <linux/uaccess.h>
#include "internal.h"
/*
* When plumbing the depths of the key tree, this sets a hard limit
* set on how deep we're willing to go.
*/
#define KEYRING_SEARCH_MAX_DEPTH 6
/*
* We keep all named keyrings in a hash to speed looking them up.
*/
#define KEYRING_NAME_HASH_SIZE (1 << 5)
/*
* We mark pointers we pass to the associative array with bit 1 set if
* they're keyrings and clear otherwise.
*/
#define KEYRING_PTR_SUBTYPE 0x2UL
static inline bool keyring_ptr_is_keyring(const struct assoc_array_ptr *x)
{
return (unsigned long)x & KEYRING_PTR_SUBTYPE;
}
static inline struct key *keyring_ptr_to_key(const struct assoc_array_ptr *x)
{
void *object = assoc_array_ptr_to_leaf(x);
return (struct key *)((unsigned long)object & ~KEYRING_PTR_SUBTYPE);
}
static inline void *keyring_key_to_ptr(struct key *key)
{
if (key->type == &key_type_keyring)
return (void *)((unsigned long)key | KEYRING_PTR_SUBTYPE);
return key;
}
static struct list_head keyring_name_hash[KEYRING_NAME_HASH_SIZE];
static DEFINE_RWLOCK(keyring_name_lock);
static inline unsigned keyring_hash(const char *desc)
{
unsigned bucket = 0;
for (; *desc; desc++)
bucket += (unsigned char)*desc;
return bucket & (KEYRING_NAME_HASH_SIZE - 1);
}
/*
* The keyring key type definition. Keyrings are simply keys of this type and
* can be treated as ordinary keys in addition to having their own special
* operations.
*/
static int keyring_preparse(struct key_preparsed_payload *prep);
static void keyring_free_preparse(struct key_preparsed_payload *prep);
static int keyring_instantiate(struct key *keyring,
struct key_preparsed_payload *prep);
static void keyring_revoke(struct key *keyring);
static void keyring_destroy(struct key *keyring);
static void keyring_describe(const struct key *keyring, struct seq_file *m);
static long keyring_read(const struct key *keyring,
char __user *buffer, size_t buflen);
struct key_type key_type_keyring = {
.name = "keyring",
.def_datalen = 0,
.preparse = keyring_preparse,
.free_preparse = keyring_free_preparse,
.instantiate = keyring_instantiate,
.revoke = keyring_revoke,
.destroy = keyring_destroy,
.describe = keyring_describe,
.read = keyring_read,
};
EXPORT_SYMBOL(key_type_keyring);
/*
* Semaphore to serialise link/link calls to prevent two link calls in parallel
* introducing a cycle.
*/
static DECLARE_RWSEM(keyring_serialise_link_sem);
/*
* Publish the name of a keyring so that it can be found by name (if it has
* one).
*/
static void keyring_publish_name(struct key *keyring)
{
int bucket;
if (keyring->description) {
bucket = keyring_hash(keyring->description);
write_lock(&keyring_name_lock);
if (!keyring_name_hash[bucket].next)
INIT_LIST_HEAD(&keyring_name_hash[bucket]);
list_add_tail(&keyring->name_link,
&keyring_name_hash[bucket]);
write_unlock(&keyring_name_lock);
}
}
/*
* Preparse a keyring payload
*/
static int keyring_preparse(struct key_preparsed_payload *prep)
{
return prep->datalen != 0 ? -EINVAL : 0;
}
/*
* Free a preparse of a user defined key payload
*/
static void keyring_free_preparse(struct key_preparsed_payload *prep)
{
}
/*
* Initialise a keyring.
*
* Returns 0 on success, -EINVAL if given any data.
*/
static int keyring_instantiate(struct key *keyring,
struct key_preparsed_payload *prep)
{
assoc_array_init(&keyring->keys);
/* make the keyring available by name if it has one */
keyring_publish_name(keyring);
return 0;
}
/*
* Multiply 64-bits by 32-bits to 96-bits and fold back to 64-bit. Ideally we'd
* fold the carry back too, but that requires inline asm.
*/
static u64 mult_64x32_and_fold(u64 x, u32 y)
{
u64 hi = (u64)(u32)(x >> 32) * y;
u64 lo = (u64)(u32)(x) * y;
return lo + ((u64)(u32)hi << 32) + (u32)(hi >> 32);
}
/*
* Hash a key type and description.
*/
static unsigned long hash_key_type_and_desc(const struct keyring_index_key *index_key)
{
const unsigned level_shift = ASSOC_ARRAY_LEVEL_STEP;
const unsigned long fan_mask = ASSOC_ARRAY_FAN_MASK;
const char *description = index_key->description;
unsigned long hash, type;
u32 piece;
u64 acc;
int n, desc_len = index_key->desc_len;
type = (unsigned long)index_key->type;
acc = mult_64x32_and_fold(type, desc_len + 13);
acc = mult_64x32_and_fold(acc, 9207);
for (;;) {
n = desc_len;
if (n <= 0)
break;
if (n > 4)
n = 4;
piece = 0;
memcpy(&piece, description, n);
description += n;
desc_len -= n;
acc = mult_64x32_and_fold(acc, piece);
acc = mult_64x32_and_fold(acc, 9207);
}
/* Fold the hash down to 32 bits if need be. */
hash = acc;
if (ASSOC_ARRAY_KEY_CHUNK_SIZE == 32)
hash ^= acc >> 32;
/* Squidge all the keyrings into a separate part of the tree to
* ordinary keys by making sure the lowest level segment in the hash is
* zero for keyrings and non-zero otherwise.
*/
if (index_key->type != &key_type_keyring && (hash & fan_mask) == 0)
return hash | (hash >> (ASSOC_ARRAY_KEY_CHUNK_SIZE - level_shift)) | 1;
if (index_key->type == &key_type_keyring && (hash & fan_mask) != 0)
return (hash + (hash << level_shift)) & ~fan_mask;
return hash;
}
/*
* Build the next index key chunk.
*
* On 32-bit systems the index key is laid out as:
*
* 0 4 5 9...
* hash desclen typeptr desc[]
*
* On 64-bit systems:
*
* 0 8 9 17...
* hash desclen typeptr desc[]
*
* We return it one word-sized chunk at a time.
*/
static unsigned long keyring_get_key_chunk(const void *data, int level)
{
const struct keyring_index_key *index_key = data;
unsigned long chunk = 0;
long offset = 0;
int desc_len = index_key->desc_len, n = sizeof(chunk);
level /= ASSOC_ARRAY_KEY_CHUNK_SIZE;
switch (level) {
case 0:
return hash_key_type_and_desc(index_key);
case 1:
return ((unsigned long)index_key->type << 8) | desc_len;
case 2:
if (desc_len == 0)
return (u8)((unsigned long)index_key->type >>
(ASSOC_ARRAY_KEY_CHUNK_SIZE - 8));
n--;
offset = 1;
default:
offset += sizeof(chunk) - 1;
offset += (level - 3) * sizeof(chunk);
if (offset >= desc_len)
return 0;
desc_len -= offset;
if (desc_len > n)
desc_len = n;
offset += desc_len;
do {
chunk <<= 8;
chunk |= ((u8*)index_key->description)[--offset];
} while (--desc_len > 0);
if (level == 2) {
chunk <<= 8;
chunk |= (u8)((unsigned long)index_key->type >>
(ASSOC_ARRAY_KEY_CHUNK_SIZE - 8));
}
return chunk;
}
}
static unsigned long keyring_get_object_key_chunk(const void *object, int level)
{
const struct key *key = keyring_ptr_to_key(object);
return keyring_get_key_chunk(&key->index_key, level);
}
static bool keyring_compare_object(const void *object, const void *data)
{
const struct keyring_index_key *index_key = data;
const struct key *key = keyring_ptr_to_key(object);
return key->index_key.type == index_key->type &&
key->index_key.desc_len == index_key->desc_len &&
memcmp(key->index_key.description, index_key->description,
index_key->desc_len) == 0;
}
/*
* Compare the index keys of a pair of objects and determine the bit position
* at which they differ - if they differ.
*/
static int keyring_diff_objects(const void *object, const void *data)
{
const struct key *key_a = keyring_ptr_to_key(object);
const struct keyring_index_key *a = &key_a->index_key;
const struct keyring_index_key *b = data;
unsigned long seg_a, seg_b;
int level, i;
level = 0;
seg_a = hash_key_type_and_desc(a);
seg_b = hash_key_type_and_desc(b);
if ((seg_a ^ seg_b) != 0)
goto differ;
/* The number of bits contributed by the hash is controlled by a
* constant in the assoc_array headers. Everything else thereafter we
* can deal with as being machine word-size dependent.
*/
level += ASSOC_ARRAY_KEY_CHUNK_SIZE / 8;
seg_a = a->desc_len;
seg_b = b->desc_len;
if ((seg_a ^ seg_b) != 0)
goto differ;
/* The next bit may not work on big endian */
level++;
seg_a = (unsigned long)a->type;
seg_b = (unsigned long)b->type;
if ((seg_a ^ seg_b) != 0)
goto differ;
level += sizeof(unsigned long);
if (a->desc_len == 0)
goto same;
i = 0;
if (((unsigned long)a->description | (unsigned long)b->description) &
(sizeof(unsigned long) - 1)) {
do {
seg_a = *(unsigned long *)(a->description + i);
seg_b = *(unsigned long *)(b->description + i);
if ((seg_a ^ seg_b) != 0)
goto differ_plus_i;
i += sizeof(unsigned long);
} while (i < (a->desc_len & (sizeof(unsigned long) - 1)));
}
for (; i < a->desc_len; i++) {
seg_a = *(unsigned char *)(a->description + i);
seg_b = *(unsigned char *)(b->description + i);
if ((seg_a ^ seg_b) != 0)
goto differ_plus_i;
}
same:
return -1;
differ_plus_i:
level += i;
differ:
i = level * 8 + __ffs(seg_a ^ seg_b);
return i;
}
/*
* Free an object after stripping the keyring flag off of the pointer.
*/
static void keyring_free_object(void *object)
{
key_put(keyring_ptr_to_key(object));
}
/*
* Operations for keyring management by the index-tree routines.
*/
static const struct assoc_array_ops keyring_assoc_array_ops = {
.get_key_chunk = keyring_get_key_chunk,
.get_object_key_chunk = keyring_get_object_key_chunk,
.compare_object = keyring_compare_object,
.diff_objects = keyring_diff_objects,
.free_object = keyring_free_object,
};
/*
* Clean up a keyring when it is destroyed. Unpublish its name if it had one
* and dispose of its data.
*
* The garbage collector detects the final key_put(), removes the keyring from
* the serial number tree and then does RCU synchronisation before coming here,
* so we shouldn't need to worry about code poking around here with the RCU
* readlock held by this time.
*/
static void keyring_destroy(struct key *keyring)
{
if (keyring->description) {
write_lock(&keyring_name_lock);
if (keyring->name_link.next != NULL &&
!list_empty(&keyring->name_link))
list_del(&keyring->name_link);
write_unlock(&keyring_name_lock);
}
if (keyring->restrict_link) {
struct key_restriction *keyres = keyring->restrict_link;
key_put(keyres->key);
kfree(keyres);
}
assoc_array_destroy(&keyring->keys, &keyring_assoc_array_ops);
}
/*
* Describe a keyring for /proc.
*/
static void keyring_describe(const struct key *keyring, struct seq_file *m)
{
if (keyring->description)
seq_puts(m, keyring->description);
else
seq_puts(m, "[anon]");
if (key_is_positive(keyring)) {
if (keyring->keys.nr_leaves_on_tree != 0)
seq_printf(m, ": %lu", keyring->keys.nr_leaves_on_tree);
else
seq_puts(m, ": empty");
}
}
struct keyring_read_iterator_context {
size_t buflen;
size_t count;
key_serial_t __user *buffer;
};
static int keyring_read_iterator(const void *object, void *data)
{
struct keyring_read_iterator_context *ctx = data;
const struct key *key = keyring_ptr_to_key(object);
int ret;
kenter("{%s,%d},,{%zu/%zu}",
key->type->name, key->serial, ctx->count, ctx->buflen);
if (ctx->count >= ctx->buflen)
return 1;
ret = put_user(key->serial, ctx->buffer);
if (ret < 0)
return ret;
ctx->buffer++;
ctx->count += sizeof(key->serial);
return 0;
}
/*
* Read a list of key IDs from the keyring's contents in binary form
*
* The keyring's semaphore is read-locked by the caller. This prevents someone
* from modifying it under us - which could cause us to read key IDs multiple
* times.
*/
static long keyring_read(const struct key *keyring,
char __user *buffer, size_t buflen)
{
struct keyring_read_iterator_context ctx;
unsigned long nr_keys;
int ret;
kenter("{%d},,%zu", key_serial(keyring), buflen);
if (buflen & (sizeof(key_serial_t) - 1))
return -EINVAL;
nr_keys = keyring->keys.nr_leaves_on_tree;
if (nr_keys == 0)
return 0;
/* Calculate how much data we could return */
if (!buffer || !buflen)
return nr_keys * sizeof(key_serial_t);
/* Copy the IDs of the subscribed keys into the buffer */
ctx.buffer = (key_serial_t __user *)buffer;
ctx.buflen = buflen;
ctx.count = 0;
ret = assoc_array_iterate(&keyring->keys, keyring_read_iterator, &ctx);
if (ret < 0) {
kleave(" = %d [iterate]", ret);
return ret;
}
kleave(" = %zu [ok]", ctx.count);
return ctx.count;
}
/*
* Allocate a keyring and link into the destination keyring.
*/
struct key *keyring_alloc(const char *description, kuid_t uid, kgid_t gid,
const struct cred *cred, key_perm_t perm,
unsigned long flags,
struct key_restriction *restrict_link,
struct key *dest)
{
struct key *keyring;
int ret;
keyring = key_alloc(&key_type_keyring, description,
uid, gid, cred, perm, flags, restrict_link);
if (!IS_ERR(keyring)) {
ret = key_instantiate_and_link(keyring, NULL, 0, dest, NULL);
if (ret < 0) {
key_put(keyring);
keyring = ERR_PTR(ret);
}
}
return keyring;
}
EXPORT_SYMBOL(keyring_alloc);
/**
* restrict_link_reject - Give -EPERM to restrict link
* @keyring: The keyring being added to.
* @type: The type of key being added.
* @payload: The payload of the key intended to be added.
* @data: Additional data for evaluating restriction.
*
* Reject the addition of any links to a keyring. It can be overridden by
* passing KEY_ALLOC_BYPASS_RESTRICTION to key_instantiate_and_link() when
* adding a key to a keyring.
*
* This is meant to be stored in a key_restriction structure which is passed
* in the restrict_link parameter to keyring_alloc().
*/
int restrict_link_reject(struct key *keyring,
const struct key_type *type,
const union key_payload *payload,
struct key *restriction_key)
{
return -EPERM;
}
/*
* By default, we keys found by getting an exact match on their descriptions.
*/
bool key_default_cmp(const struct key *key,
const struct key_match_data *match_data)
{
return strcmp(key->description, match_data->raw_data) == 0;
}
/*
* Iteration function to consider each key found.
*/
static int keyring_search_iterator(const void *object, void *iterator_data)
{
struct keyring_search_context *ctx = iterator_data;
const struct key *key = keyring_ptr_to_key(object);
unsigned long kflags = READ_ONCE(key->flags);
short state = READ_ONCE(key->state);
kenter("{%d}", key->serial);
/* ignore keys not of this type */
if (key->type != ctx->index_key.type) {
kleave(" = 0 [!type]");
return 0;
}
/* skip invalidated, revoked and expired keys */
if (ctx->flags & KEYRING_SEARCH_DO_STATE_CHECK) {
if (kflags & ((1 << KEY_FLAG_INVALIDATED) |
(1 << KEY_FLAG_REVOKED))) {
ctx->result = ERR_PTR(-EKEYREVOKED);
kleave(" = %d [invrev]", ctx->skipped_ret);
goto skipped;
}
if (key->expiry && ctx->now.tv_sec >= key->expiry) {
if (!(ctx->flags & KEYRING_SEARCH_SKIP_EXPIRED))
ctx->result = ERR_PTR(-EKEYEXPIRED);
kleave(" = %d [expire]", ctx->skipped_ret);
goto skipped;
}
}
/* keys that don't match */
if (!ctx->match_data.cmp(key, &ctx->match_data)) {
kleave(" = 0 [!match]");
return 0;
}
/* key must have search permissions */
if (!(ctx->flags & KEYRING_SEARCH_NO_CHECK_PERM) &&
key_task_permission(make_key_ref(key, ctx->possessed),
ctx->cred, KEY_NEED_SEARCH) < 0) {
ctx->result = ERR_PTR(-EACCES);
kleave(" = %d [!perm]", ctx->skipped_ret);
goto skipped;
}
if (ctx->flags & KEYRING_SEARCH_DO_STATE_CHECK) {
/* we set a different error code if we pass a negative key */
if (state < 0) {
ctx->result = ERR_PTR(state);
kleave(" = %d [neg]", ctx->skipped_ret);
goto skipped;
}
}
/* Found */
ctx->result = make_key_ref(key, ctx->possessed);
kleave(" = 1 [found]");
return 1;
skipped:
return ctx->skipped_ret;
}
/*
* Search inside a keyring for a key. We can search by walking to it
* directly based on its index-key or we can iterate over the entire
* tree looking for it, based on the match function.
*/
static int search_keyring(struct key *keyring, struct keyring_search_context *ctx)
{
if (ctx->match_data.lookup_type == KEYRING_SEARCH_LOOKUP_DIRECT) {
const void *object;
object = assoc_array_find(&keyring->keys,
&keyring_assoc_array_ops,
&ctx->index_key);
return object ? ctx->iterator(object, ctx) : 0;
}
return assoc_array_iterate(&keyring->keys, ctx->iterator, ctx);
}
/*
* Search a tree of keyrings that point to other keyrings up to the maximum
* depth.
*/
static bool search_nested_keyrings(struct key *keyring,
struct keyring_search_context *ctx)
{
struct {
struct key *keyring;
struct assoc_array_node *node;
int slot;
} stack[KEYRING_SEARCH_MAX_DEPTH];
struct assoc_array_shortcut *shortcut;
struct assoc_array_node *node;
struct assoc_array_ptr *ptr;
struct key *key;
int sp = 0, slot;
kenter("{%d},{%s,%s}",
keyring->serial,
ctx->index_key.type->name,
ctx->index_key.description);
#define STATE_CHECKS (KEYRING_SEARCH_NO_STATE_CHECK | KEYRING_SEARCH_DO_STATE_CHECK)
BUG_ON((ctx->flags & STATE_CHECKS) == 0 ||
(ctx->flags & STATE_CHECKS) == STATE_CHECKS);
if (ctx->index_key.description)
ctx->index_key.desc_len = strlen(ctx->index_key.description);
/* Check to see if this top-level keyring is what we are looking for
* and whether it is valid or not.
*/
if (ctx->match_data.lookup_type == KEYRING_SEARCH_LOOKUP_ITERATE ||
keyring_compare_object(keyring, &ctx->index_key)) {
ctx->skipped_ret = 2;
switch (ctx->iterator(keyring_key_to_ptr(keyring), ctx)) {
case 1:
goto found;
case 2:
return false;
default:
break;
}
}
ctx->skipped_ret = 0;
/* Start processing a new keyring */
descend_to_keyring:
kdebug("descend to %d", keyring->serial);
if (keyring->flags & ((1 << KEY_FLAG_INVALIDATED) |
(1 << KEY_FLAG_REVOKED)))
goto not_this_keyring;
/* Search through the keys in this keyring before its searching its
* subtrees.
*/
if (search_keyring(keyring, ctx))
goto found;
/* Then manually iterate through the keyrings nested in this one.
*
* Start from the root node of the index tree. Because of the way the
* hash function has been set up, keyrings cluster on the leftmost
* branch of the root node (root slot 0) or in the root node itself.
* Non-keyrings avoid the leftmost branch of the root entirely (root
* slots 1-15).
*/
ptr = READ_ONCE(keyring->keys.root);
if (!ptr)
goto not_this_keyring;
if (assoc_array_ptr_is_shortcut(ptr)) {
/* If the root is a shortcut, either the keyring only contains
* keyring pointers (everything clusters behind root slot 0) or
* doesn't contain any keyring pointers.
*/
shortcut = assoc_array_ptr_to_shortcut(ptr);
smp_read_barrier_depends();
if ((shortcut->index_key[0] & ASSOC_ARRAY_FAN_MASK) != 0)
goto not_this_keyring;
ptr = READ_ONCE(shortcut->next_node);
node = assoc_array_ptr_to_node(ptr);
goto begin_node;
}
node = assoc_array_ptr_to_node(ptr);
smp_read_barrier_depends();
ptr = node->slots[0];
if (!assoc_array_ptr_is_meta(ptr))
goto begin_node;
descend_to_node:
/* Descend to a more distal node in this keyring's content tree and go
* through that.
*/
kdebug("descend");
if (assoc_array_ptr_is_shortcut(ptr)) {
shortcut = assoc_array_ptr_to_shortcut(ptr);
smp_read_barrier_depends();
ptr = READ_ONCE(shortcut->next_node);
BUG_ON(!assoc_array_ptr_is_node(ptr));
}
node = assoc_array_ptr_to_node(ptr);
begin_node:
kdebug("begin_node");
smp_read_barrier_depends();
slot = 0;
ascend_to_node:
/* Go through the slots in a node */
for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
ptr = READ_ONCE(node->slots[slot]);
if (assoc_array_ptr_is_meta(ptr) && node->back_pointer)
goto descend_to_node;
if (!keyring_ptr_is_keyring(ptr))
continue;
key = keyring_ptr_to_key(ptr);
if (sp >= KEYRING_SEARCH_MAX_DEPTH) {
if (ctx->flags & KEYRING_SEARCH_DETECT_TOO_DEEP) {
ctx->result = ERR_PTR(-ELOOP);
return false;
}
goto not_this_keyring;
}
/* Search a nested keyring */
if (!(ctx->flags & KEYRING_SEARCH_NO_CHECK_PERM) &&
key_task_permission(make_key_ref(key, ctx->possessed),
ctx->cred, KEY_NEED_SEARCH) < 0)
continue;
/* stack the current position */
stack[sp].keyring = keyring;
stack[sp].node = node;
stack[sp].slot = slot;
sp++;
/* begin again with the new keyring */
keyring = key;
goto descend_to_keyring;
}
/* We've dealt with all the slots in the current node, so now we need
* to ascend to the parent and continue processing there.
*/
ptr = READ_ONCE(node->back_pointer);
slot = node->parent_slot;
if (ptr && assoc_array_ptr_is_shortcut(ptr)) {
shortcut = assoc_array_ptr_to_shortcut(ptr);
smp_read_barrier_depends();
ptr = READ_ONCE(shortcut->back_pointer);
slot = shortcut->parent_slot;
}
if (!ptr)
goto not_this_keyring;
node = assoc_array_ptr_to_node(ptr);
smp_read_barrier_depends();
slot++;
/* If we've ascended to the root (zero backpointer), we must have just
* finished processing the leftmost branch rather than the root slots -
* so there can't be any more keyrings for us to find.
*/
if (node->back_pointer) {
kdebug("ascend %d", slot);
goto ascend_to_node;
}
/* The keyring we're looking at was disqualified or didn't contain a
* matching key.
*/
not_this_keyring:
kdebug("not_this_keyring %d", sp);
if (sp <= 0) {
kleave(" = false");
return false;
}
/* Resume the processing of a keyring higher up in the tree */
sp--;
keyring = stack[sp].keyring;
node = stack[sp].node;
slot = stack[sp].slot + 1;
kdebug("ascend to %d [%d]", keyring->serial, slot);
goto ascend_to_node;
/* We found a viable match */
found:
key = key_ref_to_ptr(ctx->result);
key_check(key);
if (!(ctx->flags & KEYRING_SEARCH_NO_UPDATE_TIME)) {
key->last_used_at = ctx->now.tv_sec;
keyring->last_used_at = ctx->now.tv_sec;
while (sp > 0)
stack[--sp].keyring->last_used_at = ctx->now.tv_sec;
}
kleave(" = true");
return true;
}
/**
* keyring_search_aux - Search a keyring tree for a key matching some criteria
* @keyring_ref: A pointer to the keyring with possession indicator.
* @ctx: The keyring search context.
*
* Search the supplied keyring tree for a key that matches the criteria given.
* The root keyring and any linked keyrings must grant Search permission to the
* caller to be searchable and keys can only be found if they too grant Search
* to the caller. The possession flag on the root keyring pointer controls use
* of the possessor bits in permissions checking of the entire tree. In
* addition, the LSM gets to forbid keyring searches and key matches.
*
* The search is performed as a breadth-then-depth search up to the prescribed
* limit (KEYRING_SEARCH_MAX_DEPTH).
*
* Keys are matched to the type provided and are then filtered by the match
* function, which is given the description to use in any way it sees fit. The
* match function may use any attributes of a key that it wishes to to
* determine the match. Normally the match function from the key type would be
* used.
*
* RCU can be used to prevent the keyring key lists from disappearing without
* the need to take lots of locks.
*
* Returns a pointer to the found key and increments the key usage count if
* successful; -EAGAIN if no matching keys were found, or if expired or revoked
* keys were found; -ENOKEY if only negative keys were found; -ENOTDIR if the
* specified keyring wasn't a keyring.
*
* In the case of a successful return, the possession attribute from
* @keyring_ref is propagated to the returned key reference.
*/
key_ref_t keyring_search_aux(key_ref_t keyring_ref,
struct keyring_search_context *ctx)
{
struct key *keyring;
long err;
ctx->iterator = keyring_search_iterator;
ctx->possessed = is_key_possessed(keyring_ref);
ctx->result = ERR_PTR(-EAGAIN);
keyring = key_ref_to_ptr(keyring_ref);
key_check(keyring);
if (keyring->type != &key_type_keyring)
return ERR_PTR(-ENOTDIR);
if (!(ctx->flags & KEYRING_SEARCH_NO_CHECK_PERM)) {
err = key_task_permission(keyring_ref, ctx->cred, KEY_NEED_SEARCH);
if (err < 0)
return ERR_PTR(err);
}
rcu_read_lock();
ctx->now = current_kernel_time();
if (search_nested_keyrings(keyring, ctx))
__key_get(key_ref_to_ptr(ctx->result));
rcu_read_unlock();
return ctx->result;
}
/**
* keyring_search - Search the supplied keyring tree for a matching key
* @keyring: The root of the keyring tree to be searched.
* @type: The type of keyring we want to find.
* @description: The name of the keyring we want to find.
*
* As keyring_search_aux() above, but using the current task's credentials and
* type's default matching function and preferred search method.
*/
key_ref_t keyring_search(key_ref_t keyring,
struct key_type *type,
const char *description)
{
struct keyring_search_context ctx = {
.index_key.type = type,
.index_key.description = description,
.cred = current_cred(),
.match_data.cmp = key_default_cmp,
.match_data.raw_data = description,
.match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
.flags = KEYRING_SEARCH_DO_STATE_CHECK,
};
key_ref_t key;
int ret;
if (type->match_preparse) {
ret = type->match_preparse(&ctx.match_data);
if (ret < 0)
return ERR_PTR(ret);
}
key = keyring_search_aux(keyring, &ctx);
if (type->match_free)
type->match_free(&ctx.match_data);
return key;
}
EXPORT_SYMBOL(keyring_search);
static struct key_restriction *keyring_restriction_alloc(
key_restrict_link_func_t check)
{
struct key_restriction *keyres =
kzalloc(sizeof(struct key_restriction), GFP_KERNEL);
if (!keyres)
return ERR_PTR(-ENOMEM);
keyres->check = check;
return keyres;
}
/*
* Semaphore to serialise restriction setup to prevent reference count
* cycles through restriction key pointers.
*/
static DECLARE_RWSEM(keyring_serialise_restrict_sem);
/*
* Check for restriction cycles that would prevent keyring garbage collection.
* keyring_serialise_restrict_sem must be held.
*/
static bool keyring_detect_restriction_cycle(const struct key *dest_keyring,
struct key_restriction *keyres)
{
while (keyres && keyres->key &&
keyres->key->type == &key_type_keyring) {
if (keyres->key == dest_keyring)
return true;
keyres = keyres->key->restrict_link;
}
return false;
}
/**
* keyring_restrict - Look up and apply a restriction to a keyring
*
* @keyring: The keyring to be restricted
* @restriction: The restriction options to apply to the keyring
*/
int keyring_restrict(key_ref_t keyring_ref, const char *type,
const char *restriction)
{
struct key *keyring;
struct key_type *restrict_type = NULL;
struct key_restriction *restrict_link;
int ret = 0;
keyring = key_ref_to_ptr(keyring_ref);
key_check(keyring);
if (keyring->type != &key_type_keyring)
return -ENOTDIR;
if (!type) {
restrict_link = keyring_restriction_alloc(restrict_link_reject);
} else {
restrict_type = key_type_lookup(type);
if (IS_ERR(restrict_type))
return PTR_ERR(restrict_type);
if (!restrict_type->lookup_restriction) {
ret = -ENOENT;
goto error;
}
restrict_link = restrict_type->lookup_restriction(restriction);
}
if (IS_ERR(restrict_link)) {
ret = PTR_ERR(restrict_link);
goto error;
}
down_write(&keyring->sem);
down_write(&keyring_serialise_restrict_sem);
if (keyring->restrict_link)
ret = -EEXIST;
else if (keyring_detect_restriction_cycle(keyring, restrict_link))
ret = -EDEADLK;
else
keyring->restrict_link = restrict_link;
up_write(&keyring_serialise_restrict_sem);
up_write(&keyring->sem);
if (ret < 0) {
key_put(restrict_link->key);
kfree(restrict_link);
}
error:
if (restrict_type)
key_type_put(restrict_type);
return ret;
}
EXPORT_SYMBOL(keyring_restrict);
/*
* Search the given keyring for a key that might be updated.
*
* The caller must guarantee that the keyring is a keyring and that the
* permission is granted to modify the keyring as no check is made here. The
* caller must also hold a lock on the keyring semaphore.
*
* Returns a pointer to the found key with usage count incremented if
* successful and returns NULL if not found. Revoked and invalidated keys are
* skipped over.
*
* If successful, the possession indicator is propagated from the keyring ref
* to the returned key reference.
*/
key_ref_t find_key_to_update(key_ref_t keyring_ref,
const struct keyring_index_key *index_key)
{
struct key *keyring, *key;
const void *object;
keyring = key_ref_to_ptr(keyring_ref);
kenter("{%d},{%s,%s}",
keyring->serial, index_key->type->name, index_key->description);
object = assoc_array_find(&keyring->keys, &keyring_assoc_array_ops,
index_key);
if (object)
goto found;
kleave(" = NULL");
return NULL;
found:
key = keyring_ptr_to_key(object);
if (key->flags & ((1 << KEY_FLAG_INVALIDATED) |
(1 << KEY_FLAG_REVOKED))) {
kleave(" = NULL [x]");
return NULL;
}
__key_get(key);
kleave(" = {%d}", key->serial);
return make_key_ref(key, is_key_possessed(keyring_ref));
}
/*
* Find a keyring with the specified name.
*
* Only keyrings that have nonzero refcount, are not revoked, and are owned by a
* user in the current user namespace are considered. If @uid_keyring is %true,
* the keyring additionally must have been allocated as a user or user session
* keyring; otherwise, it must grant Search permission directly to the caller.
*
* Returns a pointer to the keyring with the keyring's refcount having being
* incremented on success. -ENOKEY is returned if a key could not be found.
*/
struct key *find_keyring_by_name(const char *name, bool uid_keyring)
{
struct key *keyring;
int bucket;
if (!name)
return ERR_PTR(-EINVAL);
bucket = keyring_hash(name);
read_lock(&keyring_name_lock);
if (keyring_name_hash[bucket].next) {
/* search this hash bucket for a keyring with a matching name
* that's readable and that hasn't been revoked */
list_for_each_entry(keyring,
&keyring_name_hash[bucket],
name_link
) {
if (!kuid_has_mapping(current_user_ns(), keyring->user->uid))
continue;
if (test_bit(KEY_FLAG_REVOKED, &keyring->flags))
continue;
if (strcmp(keyring->description, name) != 0)
continue;
if (uid_keyring) {
if (!test_bit(KEY_FLAG_UID_KEYRING,
&keyring->flags))
continue;
} else {
if (key_permission(make_key_ref(keyring, 0),
KEY_NEED_SEARCH) < 0)
continue;
}
/* we've got a match but we might end up racing with
* key_cleanup() if the keyring is currently 'dead'
* (ie. it has a zero usage count) */
if (!refcount_inc_not_zero(&keyring->usage))
continue;
keyring->last_used_at = current_kernel_time().tv_sec;
goto out;
}
}
keyring = ERR_PTR(-ENOKEY);
out:
read_unlock(&keyring_name_lock);
return keyring;
}
static int keyring_detect_cycle_iterator(const void *object,
void *iterator_data)
{
struct keyring_search_context *ctx = iterator_data;
const struct key *key = keyring_ptr_to_key(object);
kenter("{%d}", key->serial);
/* We might get a keyring with matching index-key that is nonetheless a
* different keyring. */
if (key != ctx->match_data.raw_data)
return 0;
ctx->result = ERR_PTR(-EDEADLK);
return 1;
}
/*
* See if a cycle will will be created by inserting acyclic tree B in acyclic
* tree A at the topmost level (ie: as a direct child of A).
*
* Since we are adding B to A at the top level, checking for cycles should just
* be a matter of seeing if node A is somewhere in tree B.
*/
static int keyring_detect_cycle(struct key *A, struct key *B)
{
struct keyring_search_context ctx = {
.index_key = A->index_key,
.match_data.raw_data = A,
.match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
.iterator = keyring_detect_cycle_iterator,
.flags = (KEYRING_SEARCH_NO_STATE_CHECK |
KEYRING_SEARCH_NO_UPDATE_TIME |
KEYRING_SEARCH_NO_CHECK_PERM |
KEYRING_SEARCH_DETECT_TOO_DEEP),
};
rcu_read_lock();
search_nested_keyrings(B, &ctx);
rcu_read_unlock();
return PTR_ERR(ctx.result) == -EAGAIN ? 0 : PTR_ERR(ctx.result);
}
/*
* Preallocate memory so that a key can be linked into to a keyring.
*/
int __key_link_begin(struct key *keyring,
const struct keyring_index_key *index_key,
struct assoc_array_edit **_edit)
__acquires(&keyring->sem)
__acquires(&keyring_serialise_link_sem)
{
struct assoc_array_edit *edit;
int ret;
kenter("%d,%s,%s,",
keyring->serial, index_key->type->name, index_key->description);
BUG_ON(index_key->desc_len == 0);
if (keyring->type != &key_type_keyring)
return -ENOTDIR;
down_write(&keyring->sem);
ret = -EKEYREVOKED;
if (test_bit(KEY_FLAG_REVOKED, &keyring->flags))
goto error_krsem;
/* serialise link/link calls to prevent parallel calls causing a cycle
* when linking two keyring in opposite orders */
if (index_key->type == &key_type_keyring)
down_write(&keyring_serialise_link_sem);
/* Create an edit script that will insert/replace the key in the
* keyring tree.
*/
edit = assoc_array_insert(&keyring->keys,
&keyring_assoc_array_ops,
index_key,
NULL);
if (IS_ERR(edit)) {
ret = PTR_ERR(edit);
goto error_sem;
}
/* If we're not replacing a link in-place then we're going to need some
* extra quota.
*/
if (!edit->dead_leaf) {
ret = key_payload_reserve(keyring,
keyring->datalen + KEYQUOTA_LINK_BYTES);
if (ret < 0)
goto error_cancel;
}
*_edit = edit;
kleave(" = 0");
return 0;
error_cancel:
assoc_array_cancel_edit(edit);
error_sem:
if (index_key->type == &key_type_keyring)
up_write(&keyring_serialise_link_sem);
error_krsem:
up_write(&keyring->sem);
kleave(" = %d", ret);
return ret;
}
/*
* Check already instantiated keys aren't going to be a problem.
*
* The caller must have called __key_link_begin(). Don't need to call this for
* keys that were created since __key_link_begin() was called.
*/
int __key_link_check_live_key(struct key *keyring, struct key *key)
{
if (key->type == &key_type_keyring)
/* check that we aren't going to create a cycle by linking one
* keyring to another */
return keyring_detect_cycle(keyring, key);
return 0;
}
/*
* Link a key into to a keyring.
*
* Must be called with __key_link_begin() having being called. Discards any
* already extant link to matching key if there is one, so that each keyring
* holds at most one link to any given key of a particular type+description
* combination.
*/
void __key_link(struct key *key, struct assoc_array_edit **_edit)
{
__key_get(key);
assoc_array_insert_set_object(*_edit, keyring_key_to_ptr(key));
assoc_array_apply_edit(*_edit);
*_edit = NULL;
}
/*
* Finish linking a key into to a keyring.
*
* Must be called with __key_link_begin() having being called.
*/
void __key_link_end(struct key *keyring,
const struct keyring_index_key *index_key,
struct assoc_array_edit *edit)
__releases(&keyring->sem)
__releases(&keyring_serialise_link_sem)
{
BUG_ON(index_key->type == NULL);
kenter("%d,%s,", keyring->serial, index_key->type->name);
if (index_key->type == &key_type_keyring)
up_write(&keyring_serialise_link_sem);
if (edit) {
if (!edit->dead_leaf) {
key_payload_reserve(keyring,
keyring->datalen - KEYQUOTA_LINK_BYTES);
}
assoc_array_cancel_edit(edit);
}
up_write(&keyring->sem);
}
/*
* Check addition of keys to restricted keyrings.
*/
static int __key_link_check_restriction(struct key *keyring, struct key *key)
{
if (!keyring->restrict_link || !keyring->restrict_link->check)
return 0;
return keyring->restrict_link->check(keyring, key->type, &key->payload,
keyring->restrict_link->key);
}
/**
* key_link - Link a key to a keyring
* @keyring: The keyring to make the link in.
* @key: The key to link to.
*
* Make a link in a keyring to a key, such that the keyring holds a reference
* on that key and the key can potentially be found by searching that keyring.
*
* This function will write-lock the keyring's semaphore and will consume some
* of the user's key data quota to hold the link.
*
* Returns 0 if successful, -ENOTDIR if the keyring isn't a keyring,
* -EKEYREVOKED if the keyring has been revoked, -ENFILE if the keyring is
* full, -EDQUOT if there is insufficient key data quota remaining to add
* another link or -ENOMEM if there's insufficient memory.
*
* It is assumed that the caller has checked that it is permitted for a link to
* be made (the keyring should have Write permission and the key Link
* permission).
*/
int key_link(struct key *keyring, struct key *key)
{
struct assoc_array_edit *edit;
int ret;
kenter("{%d,%d}", keyring->serial, refcount_read(&keyring->usage));
key_check(keyring);
key_check(key);
ret = __key_link_begin(keyring, &key->index_key, &edit);
if (ret == 0) {
kdebug("begun {%d,%d}", keyring->serial, refcount_read(&keyring->usage));
ret = __key_link_check_restriction(keyring, key);
if (ret == 0)
ret = __key_link_check_live_key(keyring, key);
if (ret == 0)
__key_link(key, &edit);
__key_link_end(keyring, &key->index_key, edit);
}
kleave(" = %d {%d,%d}", ret, keyring->serial, refcount_read(&keyring->usage));
return ret;
}
EXPORT_SYMBOL(key_link);
/**
* key_unlink - Unlink the first link to a key from a keyring.
* @keyring: The keyring to remove the link from.
* @key: The key the link is to.
*
* Remove a link from a keyring to a key.
*
* This function will write-lock the keyring's semaphore.
*
* Returns 0 if successful, -ENOTDIR if the keyring isn't a keyring, -ENOENT if
* the key isn't linked to by the keyring or -ENOMEM if there's insufficient
* memory.
*
* It is assumed that the caller has checked that it is permitted for a link to
* be removed (the keyring should have Write permission; no permissions are
* required on the key).
*/
int key_unlink(struct key *keyring, struct key *key)
{
struct assoc_array_edit *edit;
int ret;
key_check(keyring);
key_check(key);
if (keyring->type != &key_type_keyring)
return -ENOTDIR;
down_write(&keyring->sem);
edit = assoc_array_delete(&keyring->keys, &keyring_assoc_array_ops,
&key->index_key);
if (IS_ERR(edit)) {
ret = PTR_ERR(edit);
goto error;
}
ret = -ENOENT;
if (edit == NULL)
goto error;
assoc_array_apply_edit(edit);
key_payload_reserve(keyring, keyring->datalen - KEYQUOTA_LINK_BYTES);
ret = 0;
error:
up_write(&keyring->sem);
return ret;
}
EXPORT_SYMBOL(key_unlink);
/**
* keyring_clear - Clear a keyring
* @keyring: The keyring to clear.
*
* Clear the contents of the specified keyring.
*
* Returns 0 if successful or -ENOTDIR if the keyring isn't a keyring.
*/
int keyring_clear(struct key *keyring)
{
struct assoc_array_edit *edit;
int ret;
if (keyring->type != &key_type_keyring)
return -ENOTDIR;
down_write(&keyring->sem);
edit = assoc_array_clear(&keyring->keys, &keyring_assoc_array_ops);
if (IS_ERR(edit)) {
ret = PTR_ERR(edit);
} else {
if (edit)
assoc_array_apply_edit(edit);
key_payload_reserve(keyring, 0);
ret = 0;
}
up_write(&keyring->sem);
return ret;
}
EXPORT_SYMBOL(keyring_clear);
/*
* Dispose of the links from a revoked keyring.
*
* This is called with the key sem write-locked.
*/
static void keyring_revoke(struct key *keyring)
{
struct assoc_array_edit *edit;
edit = assoc_array_clear(&keyring->keys, &keyring_assoc_array_ops);
if (!IS_ERR(edit)) {
if (edit)
assoc_array_apply_edit(edit);
key_payload_reserve(keyring, 0);
}
}
static bool keyring_gc_select_iterator(void *object, void *iterator_data)
{
struct key *key = keyring_ptr_to_key(object);
time_t *limit = iterator_data;
if (key_is_dead(key, *limit))
return false;
key_get(key);
return true;
}
static int keyring_gc_check_iterator(const void *object, void *iterator_data)
{
const struct key *key = keyring_ptr_to_key(object);
time_t *limit = iterator_data;
key_check(key);
return key_is_dead(key, *limit);
}
/*
* Garbage collect pointers from a keyring.
*
* Not called with any locks held. The keyring's key struct will not be
* deallocated under us as only our caller may deallocate it.
*/
void keyring_gc(struct key *keyring, time_t limit)
{
int result;
kenter("%x{%s}", keyring->serial, keyring->description ?: "");
if (keyring->flags & ((1 << KEY_FLAG_INVALIDATED) |
(1 << KEY_FLAG_REVOKED)))
goto dont_gc;
/* scan the keyring looking for dead keys */
rcu_read_lock();
result = assoc_array_iterate(&keyring->keys,
keyring_gc_check_iterator, &limit);
rcu_read_unlock();
if (result == true)
goto do_gc;
dont_gc:
kleave(" [no gc]");
return;
do_gc:
down_write(&keyring->sem);
assoc_array_gc(&keyring->keys, &keyring_assoc_array_ops,
keyring_gc_select_iterator, &limit);
up_write(&keyring->sem);
kleave(" [gc]");
}
/*
* Garbage collect restriction pointers from a keyring.
*
* Keyring restrictions are associated with a key type, and must be cleaned
* up if the key type is unregistered. The restriction is altered to always
* reject additional keys so a keyring cannot be opened up by unregistering
* a key type.
*
* Not called with any keyring locks held. The keyring's key struct will not
* be deallocated under us as only our caller may deallocate it.
*
* The caller is required to hold key_types_sem and dead_type->sem. This is
* fulfilled by key_gc_keytype() holding the locks on behalf of
* key_garbage_collector(), which it invokes on a workqueue.
*/
void keyring_restriction_gc(struct key *keyring, struct key_type *dead_type)
{
struct key_restriction *keyres;
kenter("%x{%s}", keyring->serial, keyring->description ?: "");
/*
* keyring->restrict_link is only assigned at key allocation time
* or with the key type locked, so the only values that could be
* concurrently assigned to keyring->restrict_link are for key
* types other than dead_type. Given this, it's ok to check
* the key type before acquiring keyring->sem.
*/
if (!dead_type || !keyring->restrict_link ||
keyring->restrict_link->keytype != dead_type) {
kleave(" [no restriction gc]");
return;
}
/* Lock the keyring to ensure that a link is not in progress */
down_write(&keyring->sem);
keyres = keyring->restrict_link;
keyres->check = restrict_link_reject;
key_put(keyres->key);
keyres->key = NULL;
keyres->keytype = NULL;
up_write(&keyring->sem);
kleave(" [restriction gc]");
}
| ./CrossVul/dataset_final_sorted/CWE-20/c/good_2891_7 |
crossvul-cpp_data_good_5197_0 | /*
*
* Copyright (C) 2011 Novell Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*/
#include <linux/fs.h>
#include <linux/namei.h>
#include <linux/xattr.h>
#include <linux/security.h>
#include <linux/cred.h>
#include "overlayfs.h"
void ovl_cleanup(struct inode *wdir, struct dentry *wdentry)
{
int err;
dget(wdentry);
if (d_is_dir(wdentry))
err = ovl_do_rmdir(wdir, wdentry);
else
err = ovl_do_unlink(wdir, wdentry);
dput(wdentry);
if (err) {
pr_err("overlayfs: cleanup of '%pd2' failed (%i)\n",
wdentry, err);
}
}
struct dentry *ovl_lookup_temp(struct dentry *workdir, struct dentry *dentry)
{
struct dentry *temp;
char name[20];
snprintf(name, sizeof(name), "#%lx", (unsigned long) dentry);
temp = lookup_one_len(name, workdir, strlen(name));
if (!IS_ERR(temp) && temp->d_inode) {
pr_err("overlayfs: workdir/%s already exists\n", name);
dput(temp);
temp = ERR_PTR(-EIO);
}
return temp;
}
/* caller holds i_mutex on workdir */
static struct dentry *ovl_whiteout(struct dentry *workdir,
struct dentry *dentry)
{
int err;
struct dentry *whiteout;
struct inode *wdir = workdir->d_inode;
whiteout = ovl_lookup_temp(workdir, dentry);
if (IS_ERR(whiteout))
return whiteout;
err = ovl_do_whiteout(wdir, whiteout);
if (err) {
dput(whiteout);
whiteout = ERR_PTR(err);
}
return whiteout;
}
int ovl_create_real(struct inode *dir, struct dentry *newdentry,
struct kstat *stat, const char *link,
struct dentry *hardlink, bool debug)
{
int err;
if (newdentry->d_inode)
return -ESTALE;
if (hardlink) {
err = ovl_do_link(hardlink, dir, newdentry, debug);
} else {
switch (stat->mode & S_IFMT) {
case S_IFREG:
err = ovl_do_create(dir, newdentry, stat->mode, debug);
break;
case S_IFDIR:
err = ovl_do_mkdir(dir, newdentry, stat->mode, debug);
break;
case S_IFCHR:
case S_IFBLK:
case S_IFIFO:
case S_IFSOCK:
err = ovl_do_mknod(dir, newdentry,
stat->mode, stat->rdev, debug);
break;
case S_IFLNK:
err = ovl_do_symlink(dir, newdentry, link, debug);
break;
default:
err = -EPERM;
}
}
if (!err && WARN_ON(!newdentry->d_inode)) {
/*
* Not quite sure if non-instantiated dentry is legal or not.
* VFS doesn't seem to care so check and warn here.
*/
err = -ENOENT;
}
return err;
}
static int ovl_set_opaque(struct dentry *upperdentry)
{
return ovl_do_setxattr(upperdentry, OVL_XATTR_OPAQUE, "y", 1, 0);
}
static void ovl_remove_opaque(struct dentry *upperdentry)
{
int err;
err = ovl_do_removexattr(upperdentry, OVL_XATTR_OPAQUE);
if (err) {
pr_warn("overlayfs: failed to remove opaque from '%s' (%i)\n",
upperdentry->d_name.name, err);
}
}
static int ovl_dir_getattr(struct vfsmount *mnt, struct dentry *dentry,
struct kstat *stat)
{
int err;
enum ovl_path_type type;
struct path realpath;
type = ovl_path_real(dentry, &realpath);
err = vfs_getattr(&realpath, stat);
if (err)
return err;
stat->dev = dentry->d_sb->s_dev;
stat->ino = dentry->d_inode->i_ino;
/*
* It's probably not worth it to count subdirs to get the
* correct link count. nlink=1 seems to pacify 'find' and
* other utilities.
*/
if (OVL_TYPE_MERGE(type))
stat->nlink = 1;
return 0;
}
static int ovl_create_upper(struct dentry *dentry, struct inode *inode,
struct kstat *stat, const char *link,
struct dentry *hardlink)
{
struct dentry *upperdir = ovl_dentry_upper(dentry->d_parent);
struct inode *udir = upperdir->d_inode;
struct dentry *newdentry;
int err;
inode_lock_nested(udir, I_MUTEX_PARENT);
newdentry = lookup_one_len(dentry->d_name.name, upperdir,
dentry->d_name.len);
err = PTR_ERR(newdentry);
if (IS_ERR(newdentry))
goto out_unlock;
err = ovl_create_real(udir, newdentry, stat, link, hardlink, false);
if (err)
goto out_dput;
ovl_dentry_version_inc(dentry->d_parent);
ovl_dentry_update(dentry, newdentry);
ovl_copyattr(newdentry->d_inode, inode);
d_instantiate(dentry, inode);
newdentry = NULL;
out_dput:
dput(newdentry);
out_unlock:
inode_unlock(udir);
return err;
}
static int ovl_lock_rename_workdir(struct dentry *workdir,
struct dentry *upperdir)
{
/* Workdir should not be the same as upperdir */
if (workdir == upperdir)
goto err;
/* Workdir should not be subdir of upperdir and vice versa */
if (lock_rename(workdir, upperdir) != NULL)
goto err_unlock;
return 0;
err_unlock:
unlock_rename(workdir, upperdir);
err:
pr_err("overlayfs: failed to lock workdir+upperdir\n");
return -EIO;
}
static struct dentry *ovl_clear_empty(struct dentry *dentry,
struct list_head *list)
{
struct dentry *workdir = ovl_workdir(dentry);
struct inode *wdir = workdir->d_inode;
struct dentry *upperdir = ovl_dentry_upper(dentry->d_parent);
struct inode *udir = upperdir->d_inode;
struct path upperpath;
struct dentry *upper;
struct dentry *opaquedir;
struct kstat stat;
int err;
if (WARN_ON(!workdir))
return ERR_PTR(-EROFS);
err = ovl_lock_rename_workdir(workdir, upperdir);
if (err)
goto out;
ovl_path_upper(dentry, &upperpath);
err = vfs_getattr(&upperpath, &stat);
if (err)
goto out_unlock;
err = -ESTALE;
if (!S_ISDIR(stat.mode))
goto out_unlock;
upper = upperpath.dentry;
if (upper->d_parent->d_inode != udir)
goto out_unlock;
opaquedir = ovl_lookup_temp(workdir, dentry);
err = PTR_ERR(opaquedir);
if (IS_ERR(opaquedir))
goto out_unlock;
err = ovl_create_real(wdir, opaquedir, &stat, NULL, NULL, true);
if (err)
goto out_dput;
err = ovl_copy_xattr(upper, opaquedir);
if (err)
goto out_cleanup;
err = ovl_set_opaque(opaquedir);
if (err)
goto out_cleanup;
inode_lock(opaquedir->d_inode);
err = ovl_set_attr(opaquedir, &stat);
inode_unlock(opaquedir->d_inode);
if (err)
goto out_cleanup;
err = ovl_do_rename(wdir, opaquedir, udir, upper, RENAME_EXCHANGE);
if (err)
goto out_cleanup;
ovl_cleanup_whiteouts(upper, list);
ovl_cleanup(wdir, upper);
unlock_rename(workdir, upperdir);
/* dentry's upper doesn't match now, get rid of it */
d_drop(dentry);
return opaquedir;
out_cleanup:
ovl_cleanup(wdir, opaquedir);
out_dput:
dput(opaquedir);
out_unlock:
unlock_rename(workdir, upperdir);
out:
return ERR_PTR(err);
}
static struct dentry *ovl_check_empty_and_clear(struct dentry *dentry)
{
int err;
struct dentry *ret = NULL;
LIST_HEAD(list);
err = ovl_check_empty_dir(dentry, &list);
if (err)
ret = ERR_PTR(err);
else {
/*
* If no upperdentry then skip clearing whiteouts.
*
* Can race with copy-up, since we don't hold the upperdir
* mutex. Doesn't matter, since copy-up can't create a
* non-empty directory from an empty one.
*/
if (ovl_dentry_upper(dentry))
ret = ovl_clear_empty(dentry, &list);
}
ovl_cache_free(&list);
return ret;
}
static int ovl_create_over_whiteout(struct dentry *dentry, struct inode *inode,
struct kstat *stat, const char *link,
struct dentry *hardlink)
{
struct dentry *workdir = ovl_workdir(dentry);
struct inode *wdir = workdir->d_inode;
struct dentry *upperdir = ovl_dentry_upper(dentry->d_parent);
struct inode *udir = upperdir->d_inode;
struct dentry *upper;
struct dentry *newdentry;
int err;
if (WARN_ON(!workdir))
return -EROFS;
err = ovl_lock_rename_workdir(workdir, upperdir);
if (err)
goto out;
newdentry = ovl_lookup_temp(workdir, dentry);
err = PTR_ERR(newdentry);
if (IS_ERR(newdentry))
goto out_unlock;
upper = lookup_one_len(dentry->d_name.name, upperdir,
dentry->d_name.len);
err = PTR_ERR(upper);
if (IS_ERR(upper))
goto out_dput;
err = ovl_create_real(wdir, newdentry, stat, link, hardlink, true);
if (err)
goto out_dput2;
if (S_ISDIR(stat->mode)) {
err = ovl_set_opaque(newdentry);
if (err)
goto out_cleanup;
err = ovl_do_rename(wdir, newdentry, udir, upper,
RENAME_EXCHANGE);
if (err)
goto out_cleanup;
ovl_cleanup(wdir, upper);
} else {
err = ovl_do_rename(wdir, newdentry, udir, upper, 0);
if (err)
goto out_cleanup;
}
ovl_dentry_version_inc(dentry->d_parent);
ovl_dentry_update(dentry, newdentry);
ovl_copyattr(newdentry->d_inode, inode);
d_instantiate(dentry, inode);
newdentry = NULL;
out_dput2:
dput(upper);
out_dput:
dput(newdentry);
out_unlock:
unlock_rename(workdir, upperdir);
out:
return err;
out_cleanup:
ovl_cleanup(wdir, newdentry);
goto out_dput2;
}
static int ovl_create_or_link(struct dentry *dentry, int mode, dev_t rdev,
const char *link, struct dentry *hardlink)
{
int err;
struct inode *inode;
struct kstat stat = {
.mode = mode,
.rdev = rdev,
};
err = -ENOMEM;
inode = ovl_new_inode(dentry->d_sb, mode, dentry->d_fsdata);
if (!inode)
goto out;
err = ovl_copy_up(dentry->d_parent);
if (err)
goto out_iput;
if (!ovl_dentry_is_opaque(dentry)) {
err = ovl_create_upper(dentry, inode, &stat, link, hardlink);
} else {
const struct cred *old_cred;
struct cred *override_cred;
err = -ENOMEM;
override_cred = prepare_creds();
if (!override_cred)
goto out_iput;
/*
* CAP_SYS_ADMIN for setting opaque xattr
* CAP_DAC_OVERRIDE for create in workdir, rename
* CAP_FOWNER for removing whiteout from sticky dir
*/
cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN);
cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE);
cap_raise(override_cred->cap_effective, CAP_FOWNER);
old_cred = override_creds(override_cred);
err = ovl_create_over_whiteout(dentry, inode, &stat, link,
hardlink);
revert_creds(old_cred);
put_cred(override_cred);
}
if (!err)
inode = NULL;
out_iput:
iput(inode);
out:
return err;
}
static int ovl_create_object(struct dentry *dentry, int mode, dev_t rdev,
const char *link)
{
int err;
err = ovl_want_write(dentry);
if (!err) {
err = ovl_create_or_link(dentry, mode, rdev, link, NULL);
ovl_drop_write(dentry);
}
return err;
}
static int ovl_create(struct inode *dir, struct dentry *dentry, umode_t mode,
bool excl)
{
return ovl_create_object(dentry, (mode & 07777) | S_IFREG, 0, NULL);
}
static int ovl_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
return ovl_create_object(dentry, (mode & 07777) | S_IFDIR, 0, NULL);
}
static int ovl_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
dev_t rdev)
{
/* Don't allow creation of "whiteout" on overlay */
if (S_ISCHR(mode) && rdev == WHITEOUT_DEV)
return -EPERM;
return ovl_create_object(dentry, mode, rdev, NULL);
}
static int ovl_symlink(struct inode *dir, struct dentry *dentry,
const char *link)
{
return ovl_create_object(dentry, S_IFLNK, 0, link);
}
static int ovl_link(struct dentry *old, struct inode *newdir,
struct dentry *new)
{
int err;
struct dentry *upper;
err = ovl_want_write(old);
if (err)
goto out;
err = ovl_copy_up(old);
if (err)
goto out_drop_write;
upper = ovl_dentry_upper(old);
err = ovl_create_or_link(new, upper->d_inode->i_mode, 0, NULL, upper);
out_drop_write:
ovl_drop_write(old);
out:
return err;
}
static int ovl_remove_and_whiteout(struct dentry *dentry, bool is_dir)
{
struct dentry *workdir = ovl_workdir(dentry);
struct inode *wdir = workdir->d_inode;
struct dentry *upperdir = ovl_dentry_upper(dentry->d_parent);
struct inode *udir = upperdir->d_inode;
struct dentry *whiteout;
struct dentry *upper;
struct dentry *opaquedir = NULL;
int err;
if (WARN_ON(!workdir))
return -EROFS;
if (is_dir) {
if (OVL_TYPE_MERGE_OR_LOWER(ovl_path_type(dentry))) {
opaquedir = ovl_check_empty_and_clear(dentry);
err = PTR_ERR(opaquedir);
if (IS_ERR(opaquedir))
goto out;
} else {
LIST_HEAD(list);
/*
* When removing an empty opaque directory, then it
* makes no sense to replace it with an exact replica of
* itself. But emptiness still needs to be checked.
*/
err = ovl_check_empty_dir(dentry, &list);
ovl_cache_free(&list);
if (err)
goto out;
}
}
err = ovl_lock_rename_workdir(workdir, upperdir);
if (err)
goto out_dput;
whiteout = ovl_whiteout(workdir, dentry);
err = PTR_ERR(whiteout);
if (IS_ERR(whiteout))
goto out_unlock;
upper = ovl_dentry_upper(dentry);
if (!upper) {
upper = lookup_one_len(dentry->d_name.name, upperdir,
dentry->d_name.len);
err = PTR_ERR(upper);
if (IS_ERR(upper))
goto kill_whiteout;
err = ovl_do_rename(wdir, whiteout, udir, upper, 0);
dput(upper);
if (err)
goto kill_whiteout;
} else {
int flags = 0;
if (opaquedir)
upper = opaquedir;
err = -ESTALE;
if (upper->d_parent != upperdir)
goto kill_whiteout;
if (is_dir)
flags |= RENAME_EXCHANGE;
err = ovl_do_rename(wdir, whiteout, udir, upper, flags);
if (err)
goto kill_whiteout;
if (is_dir)
ovl_cleanup(wdir, upper);
}
ovl_dentry_version_inc(dentry->d_parent);
out_d_drop:
d_drop(dentry);
dput(whiteout);
out_unlock:
unlock_rename(workdir, upperdir);
out_dput:
dput(opaquedir);
out:
return err;
kill_whiteout:
ovl_cleanup(wdir, whiteout);
goto out_d_drop;
}
static int ovl_remove_upper(struct dentry *dentry, bool is_dir)
{
struct dentry *upperdir = ovl_dentry_upper(dentry->d_parent);
struct inode *dir = upperdir->d_inode;
struct dentry *upper;
int err;
inode_lock_nested(dir, I_MUTEX_PARENT);
upper = lookup_one_len(dentry->d_name.name, upperdir,
dentry->d_name.len);
err = PTR_ERR(upper);
if (IS_ERR(upper))
goto out_unlock;
err = -ESTALE;
if (upper == ovl_dentry_upper(dentry)) {
if (is_dir)
err = vfs_rmdir(dir, upper);
else
err = vfs_unlink(dir, upper, NULL);
ovl_dentry_version_inc(dentry->d_parent);
}
dput(upper);
/*
* Keeping this dentry hashed would mean having to release
* upperpath/lowerpath, which could only be done if we are the
* sole user of this dentry. Too tricky... Just unhash for
* now.
*/
if (!err)
d_drop(dentry);
out_unlock:
inode_unlock(dir);
return err;
}
static inline int ovl_check_sticky(struct dentry *dentry)
{
struct inode *dir = ovl_dentry_real(dentry->d_parent)->d_inode;
struct inode *inode = ovl_dentry_real(dentry)->d_inode;
if (check_sticky(dir, inode))
return -EPERM;
return 0;
}
static int ovl_do_remove(struct dentry *dentry, bool is_dir)
{
enum ovl_path_type type;
int err;
err = ovl_check_sticky(dentry);
if (err)
goto out;
err = ovl_want_write(dentry);
if (err)
goto out;
err = ovl_copy_up(dentry->d_parent);
if (err)
goto out_drop_write;
type = ovl_path_type(dentry);
if (OVL_TYPE_PURE_UPPER(type)) {
err = ovl_remove_upper(dentry, is_dir);
} else {
const struct cred *old_cred;
struct cred *override_cred;
err = -ENOMEM;
override_cred = prepare_creds();
if (!override_cred)
goto out_drop_write;
/*
* CAP_SYS_ADMIN for setting xattr on whiteout, opaque dir
* CAP_DAC_OVERRIDE for create in workdir, rename
* CAP_FOWNER for removing whiteout from sticky dir
* CAP_FSETID for chmod of opaque dir
* CAP_CHOWN for chown of opaque dir
*/
cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN);
cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE);
cap_raise(override_cred->cap_effective, CAP_FOWNER);
cap_raise(override_cred->cap_effective, CAP_FSETID);
cap_raise(override_cred->cap_effective, CAP_CHOWN);
old_cred = override_creds(override_cred);
err = ovl_remove_and_whiteout(dentry, is_dir);
revert_creds(old_cred);
put_cred(override_cred);
}
out_drop_write:
ovl_drop_write(dentry);
out:
return err;
}
static int ovl_unlink(struct inode *dir, struct dentry *dentry)
{
return ovl_do_remove(dentry, false);
}
static int ovl_rmdir(struct inode *dir, struct dentry *dentry)
{
return ovl_do_remove(dentry, true);
}
static int ovl_rename2(struct inode *olddir, struct dentry *old,
struct inode *newdir, struct dentry *new,
unsigned int flags)
{
int err;
enum ovl_path_type old_type;
enum ovl_path_type new_type;
struct dentry *old_upperdir;
struct dentry *new_upperdir;
struct dentry *olddentry;
struct dentry *newdentry;
struct dentry *trap;
bool old_opaque;
bool new_opaque;
bool new_create = false;
bool cleanup_whiteout = false;
bool overwrite = !(flags & RENAME_EXCHANGE);
bool is_dir = d_is_dir(old);
bool new_is_dir = false;
struct dentry *opaquedir = NULL;
const struct cred *old_cred = NULL;
struct cred *override_cred = NULL;
err = -EINVAL;
if (flags & ~(RENAME_EXCHANGE | RENAME_NOREPLACE))
goto out;
flags &= ~RENAME_NOREPLACE;
err = ovl_check_sticky(old);
if (err)
goto out;
/* Don't copy up directory trees */
old_type = ovl_path_type(old);
err = -EXDEV;
if (OVL_TYPE_MERGE_OR_LOWER(old_type) && is_dir)
goto out;
if (new->d_inode) {
err = ovl_check_sticky(new);
if (err)
goto out;
if (d_is_dir(new))
new_is_dir = true;
new_type = ovl_path_type(new);
err = -EXDEV;
if (!overwrite && OVL_TYPE_MERGE_OR_LOWER(new_type) && new_is_dir)
goto out;
err = 0;
if (!OVL_TYPE_UPPER(new_type) && !OVL_TYPE_UPPER(old_type)) {
if (ovl_dentry_lower(old)->d_inode ==
ovl_dentry_lower(new)->d_inode)
goto out;
}
if (OVL_TYPE_UPPER(new_type) && OVL_TYPE_UPPER(old_type)) {
if (ovl_dentry_upper(old)->d_inode ==
ovl_dentry_upper(new)->d_inode)
goto out;
}
} else {
if (ovl_dentry_is_opaque(new))
new_type = __OVL_PATH_UPPER;
else
new_type = __OVL_PATH_UPPER | __OVL_PATH_PURE;
}
err = ovl_want_write(old);
if (err)
goto out;
err = ovl_copy_up(old);
if (err)
goto out_drop_write;
err = ovl_copy_up(new->d_parent);
if (err)
goto out_drop_write;
if (!overwrite) {
err = ovl_copy_up(new);
if (err)
goto out_drop_write;
}
old_opaque = !OVL_TYPE_PURE_UPPER(old_type);
new_opaque = !OVL_TYPE_PURE_UPPER(new_type);
if (old_opaque || new_opaque) {
err = -ENOMEM;
override_cred = prepare_creds();
if (!override_cred)
goto out_drop_write;
/*
* CAP_SYS_ADMIN for setting xattr on whiteout, opaque dir
* CAP_DAC_OVERRIDE for create in workdir
* CAP_FOWNER for removing whiteout from sticky dir
* CAP_FSETID for chmod of opaque dir
* CAP_CHOWN for chown of opaque dir
*/
cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN);
cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE);
cap_raise(override_cred->cap_effective, CAP_FOWNER);
cap_raise(override_cred->cap_effective, CAP_FSETID);
cap_raise(override_cred->cap_effective, CAP_CHOWN);
old_cred = override_creds(override_cred);
}
if (overwrite && OVL_TYPE_MERGE_OR_LOWER(new_type) && new_is_dir) {
opaquedir = ovl_check_empty_and_clear(new);
err = PTR_ERR(opaquedir);
if (IS_ERR(opaquedir)) {
opaquedir = NULL;
goto out_revert_creds;
}
}
if (overwrite) {
if (old_opaque) {
if (new->d_inode || !new_opaque) {
/* Whiteout source */
flags |= RENAME_WHITEOUT;
} else {
/* Switch whiteouts */
flags |= RENAME_EXCHANGE;
}
} else if (is_dir && !new->d_inode && new_opaque) {
flags |= RENAME_EXCHANGE;
cleanup_whiteout = true;
}
}
old_upperdir = ovl_dentry_upper(old->d_parent);
new_upperdir = ovl_dentry_upper(new->d_parent);
trap = lock_rename(new_upperdir, old_upperdir);
olddentry = lookup_one_len(old->d_name.name, old_upperdir,
old->d_name.len);
err = PTR_ERR(olddentry);
if (IS_ERR(olddentry))
goto out_unlock;
err = -ESTALE;
if (olddentry != ovl_dentry_upper(old))
goto out_dput_old;
newdentry = lookup_one_len(new->d_name.name, new_upperdir,
new->d_name.len);
err = PTR_ERR(newdentry);
if (IS_ERR(newdentry))
goto out_dput_old;
err = -ESTALE;
if (ovl_dentry_upper(new)) {
if (opaquedir) {
if (newdentry != opaquedir)
goto out_dput;
} else {
if (newdentry != ovl_dentry_upper(new))
goto out_dput;
}
} else {
new_create = true;
if (!d_is_negative(newdentry) &&
(!new_opaque || !ovl_is_whiteout(newdentry)))
goto out_dput;
}
if (olddentry == trap)
goto out_dput;
if (newdentry == trap)
goto out_dput;
if (is_dir && !old_opaque && new_opaque) {
err = ovl_set_opaque(olddentry);
if (err)
goto out_dput;
}
if (!overwrite && new_is_dir && old_opaque && !new_opaque) {
err = ovl_set_opaque(newdentry);
if (err)
goto out_dput;
}
if (old_opaque || new_opaque) {
err = ovl_do_rename(old_upperdir->d_inode, olddentry,
new_upperdir->d_inode, newdentry,
flags);
} else {
/* No debug for the plain case */
BUG_ON(flags & ~RENAME_EXCHANGE);
err = vfs_rename(old_upperdir->d_inode, olddentry,
new_upperdir->d_inode, newdentry,
NULL, flags);
}
if (err) {
if (is_dir && !old_opaque && new_opaque)
ovl_remove_opaque(olddentry);
if (!overwrite && new_is_dir && old_opaque && !new_opaque)
ovl_remove_opaque(newdentry);
goto out_dput;
}
if (is_dir && old_opaque && !new_opaque)
ovl_remove_opaque(olddentry);
if (!overwrite && new_is_dir && !old_opaque && new_opaque)
ovl_remove_opaque(newdentry);
/*
* Old dentry now lives in different location. Dentries in
* lowerstack are stale. We cannot drop them here because
* access to them is lockless. This could be only pure upper
* or opaque directory - numlower is zero. Or upper non-dir
* entry - its pureness is tracked by flag opaque.
*/
if (old_opaque != new_opaque) {
ovl_dentry_set_opaque(old, new_opaque);
if (!overwrite)
ovl_dentry_set_opaque(new, old_opaque);
}
if (cleanup_whiteout)
ovl_cleanup(old_upperdir->d_inode, newdentry);
ovl_dentry_version_inc(old->d_parent);
ovl_dentry_version_inc(new->d_parent);
out_dput:
dput(newdentry);
out_dput_old:
dput(olddentry);
out_unlock:
unlock_rename(new_upperdir, old_upperdir);
out_revert_creds:
if (old_opaque || new_opaque) {
revert_creds(old_cred);
put_cred(override_cred);
}
out_drop_write:
ovl_drop_write(old);
out:
dput(opaquedir);
return err;
}
const struct inode_operations ovl_dir_inode_operations = {
.lookup = ovl_lookup,
.mkdir = ovl_mkdir,
.symlink = ovl_symlink,
.unlink = ovl_unlink,
.rmdir = ovl_rmdir,
.rename2 = ovl_rename2,
.link = ovl_link,
.setattr = ovl_setattr,
.create = ovl_create,
.mknod = ovl_mknod,
.permission = ovl_permission,
.getattr = ovl_dir_getattr,
.setxattr = ovl_setxattr,
.getxattr = ovl_getxattr,
.listxattr = ovl_listxattr,
.removexattr = ovl_removexattr,
};
| ./CrossVul/dataset_final_sorted/CWE-20/c/good_5197_0 |
crossvul-cpp_data_bad_2137_0 | /*-
* Copyright (c) 2008 Christos Zoulas
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Parse Composite Document Files, the format used in Microsoft Office
* document files before they switched to zipped XML.
* Info from: http://sc.openoffice.org/compdocfileformat.pdf
*
* N.B. This is the "Composite Document File" format, and not the
* "Compound Document Format", nor the "Channel Definition Format".
*/
#include "file.h"
#ifndef lint
FILE_RCSID("@(#)$File: cdf.c,v 1.59 2014/05/14 23:22:48 christos Exp $")
#endif
#include <assert.h>
#ifdef CDF_DEBUG
#include <err.h>
#endif
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
#include <time.h>
#include <ctype.h>
#ifdef HAVE_LIMITS_H
#include <limits.h>
#endif
#ifndef EFTYPE
#define EFTYPE EINVAL
#endif
#include "cdf.h"
#ifdef CDF_DEBUG
#define DPRINTF(a) printf a, fflush(stdout)
#else
#define DPRINTF(a)
#endif
static union {
char s[4];
uint32_t u;
} cdf_bo;
#define NEED_SWAP (cdf_bo.u == (uint32_t)0x01020304)
#define CDF_TOLE8(x) ((uint64_t)(NEED_SWAP ? _cdf_tole8(x) : (uint64_t)(x)))
#define CDF_TOLE4(x) ((uint32_t)(NEED_SWAP ? _cdf_tole4(x) : (uint32_t)(x)))
#define CDF_TOLE2(x) ((uint16_t)(NEED_SWAP ? _cdf_tole2(x) : (uint16_t)(x)))
#define CDF_GETUINT32(x, y) cdf_getuint32(x, y)
/*
* swap a short
*/
static uint16_t
_cdf_tole2(uint16_t sv)
{
uint16_t rv;
uint8_t *s = (uint8_t *)(void *)&sv;
uint8_t *d = (uint8_t *)(void *)&rv;
d[0] = s[1];
d[1] = s[0];
return rv;
}
/*
* swap an int
*/
static uint32_t
_cdf_tole4(uint32_t sv)
{
uint32_t rv;
uint8_t *s = (uint8_t *)(void *)&sv;
uint8_t *d = (uint8_t *)(void *)&rv;
d[0] = s[3];
d[1] = s[2];
d[2] = s[1];
d[3] = s[0];
return rv;
}
/*
* swap a quad
*/
static uint64_t
_cdf_tole8(uint64_t sv)
{
uint64_t rv;
uint8_t *s = (uint8_t *)(void *)&sv;
uint8_t *d = (uint8_t *)(void *)&rv;
d[0] = s[7];
d[1] = s[6];
d[2] = s[5];
d[3] = s[4];
d[4] = s[3];
d[5] = s[2];
d[6] = s[1];
d[7] = s[0];
return rv;
}
/*
* grab a uint32_t from a possibly unaligned address, and return it in
* the native host order.
*/
static uint32_t
cdf_getuint32(const uint8_t *p, size_t offs)
{
uint32_t rv;
(void)memcpy(&rv, p + offs * sizeof(uint32_t), sizeof(rv));
return CDF_TOLE4(rv);
}
#define CDF_UNPACK(a) \
(void)memcpy(&(a), &buf[len], sizeof(a)), len += sizeof(a)
#define CDF_UNPACKA(a) \
(void)memcpy((a), &buf[len], sizeof(a)), len += sizeof(a)
uint16_t
cdf_tole2(uint16_t sv)
{
return CDF_TOLE2(sv);
}
uint32_t
cdf_tole4(uint32_t sv)
{
return CDF_TOLE4(sv);
}
uint64_t
cdf_tole8(uint64_t sv)
{
return CDF_TOLE8(sv);
}
void
cdf_swap_header(cdf_header_t *h)
{
size_t i;
h->h_magic = CDF_TOLE8(h->h_magic);
h->h_uuid[0] = CDF_TOLE8(h->h_uuid[0]);
h->h_uuid[1] = CDF_TOLE8(h->h_uuid[1]);
h->h_revision = CDF_TOLE2(h->h_revision);
h->h_version = CDF_TOLE2(h->h_version);
h->h_byte_order = CDF_TOLE2(h->h_byte_order);
h->h_sec_size_p2 = CDF_TOLE2(h->h_sec_size_p2);
h->h_short_sec_size_p2 = CDF_TOLE2(h->h_short_sec_size_p2);
h->h_num_sectors_in_sat = CDF_TOLE4(h->h_num_sectors_in_sat);
h->h_secid_first_directory = CDF_TOLE4(h->h_secid_first_directory);
h->h_min_size_standard_stream =
CDF_TOLE4(h->h_min_size_standard_stream);
h->h_secid_first_sector_in_short_sat =
CDF_TOLE4((uint32_t)h->h_secid_first_sector_in_short_sat);
h->h_num_sectors_in_short_sat =
CDF_TOLE4(h->h_num_sectors_in_short_sat);
h->h_secid_first_sector_in_master_sat =
CDF_TOLE4((uint32_t)h->h_secid_first_sector_in_master_sat);
h->h_num_sectors_in_master_sat =
CDF_TOLE4(h->h_num_sectors_in_master_sat);
for (i = 0; i < __arraycount(h->h_master_sat); i++)
h->h_master_sat[i] = CDF_TOLE4((uint32_t)h->h_master_sat[i]);
}
void
cdf_unpack_header(cdf_header_t *h, char *buf)
{
size_t i;
size_t len = 0;
CDF_UNPACK(h->h_magic);
CDF_UNPACKA(h->h_uuid);
CDF_UNPACK(h->h_revision);
CDF_UNPACK(h->h_version);
CDF_UNPACK(h->h_byte_order);
CDF_UNPACK(h->h_sec_size_p2);
CDF_UNPACK(h->h_short_sec_size_p2);
CDF_UNPACKA(h->h_unused0);
CDF_UNPACK(h->h_num_sectors_in_sat);
CDF_UNPACK(h->h_secid_first_directory);
CDF_UNPACKA(h->h_unused1);
CDF_UNPACK(h->h_min_size_standard_stream);
CDF_UNPACK(h->h_secid_first_sector_in_short_sat);
CDF_UNPACK(h->h_num_sectors_in_short_sat);
CDF_UNPACK(h->h_secid_first_sector_in_master_sat);
CDF_UNPACK(h->h_num_sectors_in_master_sat);
for (i = 0; i < __arraycount(h->h_master_sat); i++)
CDF_UNPACK(h->h_master_sat[i]);
}
void
cdf_swap_dir(cdf_directory_t *d)
{
d->d_namelen = CDF_TOLE2(d->d_namelen);
d->d_left_child = CDF_TOLE4((uint32_t)d->d_left_child);
d->d_right_child = CDF_TOLE4((uint32_t)d->d_right_child);
d->d_storage = CDF_TOLE4((uint32_t)d->d_storage);
d->d_storage_uuid[0] = CDF_TOLE8(d->d_storage_uuid[0]);
d->d_storage_uuid[1] = CDF_TOLE8(d->d_storage_uuid[1]);
d->d_flags = CDF_TOLE4(d->d_flags);
d->d_created = CDF_TOLE8((uint64_t)d->d_created);
d->d_modified = CDF_TOLE8((uint64_t)d->d_modified);
d->d_stream_first_sector = CDF_TOLE4((uint32_t)d->d_stream_first_sector);
d->d_size = CDF_TOLE4(d->d_size);
}
void
cdf_swap_class(cdf_classid_t *d)
{
d->cl_dword = CDF_TOLE4(d->cl_dword);
d->cl_word[0] = CDF_TOLE2(d->cl_word[0]);
d->cl_word[1] = CDF_TOLE2(d->cl_word[1]);
}
void
cdf_unpack_dir(cdf_directory_t *d, char *buf)
{
size_t len = 0;
CDF_UNPACKA(d->d_name);
CDF_UNPACK(d->d_namelen);
CDF_UNPACK(d->d_type);
CDF_UNPACK(d->d_color);
CDF_UNPACK(d->d_left_child);
CDF_UNPACK(d->d_right_child);
CDF_UNPACK(d->d_storage);
CDF_UNPACKA(d->d_storage_uuid);
CDF_UNPACK(d->d_flags);
CDF_UNPACK(d->d_created);
CDF_UNPACK(d->d_modified);
CDF_UNPACK(d->d_stream_first_sector);
CDF_UNPACK(d->d_size);
CDF_UNPACK(d->d_unused0);
}
static int
cdf_check_stream_offset(const cdf_stream_t *sst, const cdf_header_t *h,
const void *p, size_t tail, int line)
{
const char *b = (const char *)sst->sst_tab;
const char *e = ((const char *)p) + tail;
(void)&line;
if (e >= b && (size_t)(e - b) <= CDF_SEC_SIZE(h) * sst->sst_len)
return 0;
DPRINTF(("%d: offset begin %p < end %p || %" SIZE_T_FORMAT "u"
" > %" SIZE_T_FORMAT "u [%" SIZE_T_FORMAT "u %"
SIZE_T_FORMAT "u]\n", line, b, e, (size_t)(e - b),
CDF_SEC_SIZE(h) * sst->sst_len, CDF_SEC_SIZE(h), sst->sst_len));
errno = EFTYPE;
return -1;
}
static ssize_t
cdf_read(const cdf_info_t *info, off_t off, void *buf, size_t len)
{
size_t siz = (size_t)off + len;
if ((off_t)(off + len) != (off_t)siz) {
errno = EINVAL;
return -1;
}
if (info->i_buf != NULL && info->i_len >= siz) {
(void)memcpy(buf, &info->i_buf[off], len);
return (ssize_t)len;
}
if (info->i_fd == -1)
return -1;
if (pread(info->i_fd, buf, len, off) != (ssize_t)len)
return -1;
return (ssize_t)len;
}
int
cdf_read_header(const cdf_info_t *info, cdf_header_t *h)
{
char buf[512];
(void)memcpy(cdf_bo.s, "\01\02\03\04", 4);
if (cdf_read(info, (off_t)0, buf, sizeof(buf)) == -1)
return -1;
cdf_unpack_header(h, buf);
cdf_swap_header(h);
if (h->h_magic != CDF_MAGIC) {
DPRINTF(("Bad magic 0x%" INT64_T_FORMAT "x != 0x%"
INT64_T_FORMAT "x\n",
(unsigned long long)h->h_magic,
(unsigned long long)CDF_MAGIC));
goto out;
}
if (h->h_sec_size_p2 > 20) {
DPRINTF(("Bad sector size 0x%u\n", h->h_sec_size_p2));
goto out;
}
if (h->h_short_sec_size_p2 > 20) {
DPRINTF(("Bad short sector size 0x%u\n",
h->h_short_sec_size_p2));
goto out;
}
return 0;
out:
errno = EFTYPE;
return -1;
}
ssize_t
cdf_read_sector(const cdf_info_t *info, void *buf, size_t offs, size_t len,
const cdf_header_t *h, cdf_secid_t id)
{
size_t ss = CDF_SEC_SIZE(h);
size_t pos = CDF_SEC_POS(h, id);
assert(ss == len);
return cdf_read(info, (off_t)pos, ((char *)buf) + offs, len);
}
ssize_t
cdf_read_short_sector(const cdf_stream_t *sst, void *buf, size_t offs,
size_t len, const cdf_header_t *h, cdf_secid_t id)
{
size_t ss = CDF_SHORT_SEC_SIZE(h);
size_t pos = CDF_SHORT_SEC_POS(h, id);
assert(ss == len);
if (pos + len > CDF_SEC_SIZE(h) * sst->sst_len) {
DPRINTF(("Out of bounds read %" SIZE_T_FORMAT "u > %"
SIZE_T_FORMAT "u\n",
pos + len, CDF_SEC_SIZE(h) * sst->sst_len));
return -1;
}
(void)memcpy(((char *)buf) + offs,
((const char *)sst->sst_tab) + pos, len);
return len;
}
/*
* Read the sector allocation table.
*/
int
cdf_read_sat(const cdf_info_t *info, cdf_header_t *h, cdf_sat_t *sat)
{
size_t i, j, k;
size_t ss = CDF_SEC_SIZE(h);
cdf_secid_t *msa, mid, sec;
size_t nsatpersec = (ss / sizeof(mid)) - 1;
for (i = 0; i < __arraycount(h->h_master_sat); i++)
if (h->h_master_sat[i] == CDF_SECID_FREE)
break;
#define CDF_SEC_LIMIT (UINT32_MAX / (4 * ss))
if ((nsatpersec > 0 &&
h->h_num_sectors_in_master_sat > CDF_SEC_LIMIT / nsatpersec) ||
i > CDF_SEC_LIMIT) {
DPRINTF(("Number of sectors in master SAT too big %u %"
SIZE_T_FORMAT "u\n", h->h_num_sectors_in_master_sat, i));
errno = EFTYPE;
return -1;
}
sat->sat_len = h->h_num_sectors_in_master_sat * nsatpersec + i;
DPRINTF(("sat_len = %" SIZE_T_FORMAT "u ss = %" SIZE_T_FORMAT "u\n",
sat->sat_len, ss));
if ((sat->sat_tab = CAST(cdf_secid_t *, calloc(sat->sat_len, ss)))
== NULL)
return -1;
for (i = 0; i < __arraycount(h->h_master_sat); i++) {
if (h->h_master_sat[i] < 0)
break;
if (cdf_read_sector(info, sat->sat_tab, ss * i, ss, h,
h->h_master_sat[i]) != (ssize_t)ss) {
DPRINTF(("Reading sector %d", h->h_master_sat[i]));
goto out1;
}
}
if ((msa = CAST(cdf_secid_t *, calloc(1, ss))) == NULL)
goto out1;
mid = h->h_secid_first_sector_in_master_sat;
for (j = 0; j < h->h_num_sectors_in_master_sat; j++) {
if (mid < 0)
goto out;
if (j >= CDF_LOOP_LIMIT) {
DPRINTF(("Reading master sector loop limit"));
errno = EFTYPE;
goto out2;
}
if (cdf_read_sector(info, msa, 0, ss, h, mid) != (ssize_t)ss) {
DPRINTF(("Reading master sector %d", mid));
goto out2;
}
for (k = 0; k < nsatpersec; k++, i++) {
sec = CDF_TOLE4((uint32_t)msa[k]);
if (sec < 0)
goto out;
if (i >= sat->sat_len) {
DPRINTF(("Out of bounds reading MSA %" SIZE_T_FORMAT
"u >= %" SIZE_T_FORMAT "u", i, sat->sat_len));
errno = EFTYPE;
goto out2;
}
if (cdf_read_sector(info, sat->sat_tab, ss * i, ss, h,
sec) != (ssize_t)ss) {
DPRINTF(("Reading sector %d",
CDF_TOLE4(msa[k])));
goto out2;
}
}
mid = CDF_TOLE4((uint32_t)msa[nsatpersec]);
}
out:
sat->sat_len = i;
free(msa);
return 0;
out2:
free(msa);
out1:
free(sat->sat_tab);
return -1;
}
size_t
cdf_count_chain(const cdf_sat_t *sat, cdf_secid_t sid, size_t size)
{
size_t i, j;
cdf_secid_t maxsector = (cdf_secid_t)(sat->sat_len * size);
DPRINTF(("Chain:"));
for (j = i = 0; sid >= 0; i++, j++) {
DPRINTF((" %d", sid));
if (j >= CDF_LOOP_LIMIT) {
DPRINTF(("Counting chain loop limit"));
errno = EFTYPE;
return (size_t)-1;
}
if (sid > maxsector) {
DPRINTF(("Sector %d > %d\n", sid, maxsector));
errno = EFTYPE;
return (size_t)-1;
}
sid = CDF_TOLE4((uint32_t)sat->sat_tab[sid]);
}
if (i == 0) {
DPRINTF((" none, sid: %d\n", sid));
return (size_t)-1;
}
DPRINTF(("\n"));
return i;
}
int
cdf_read_long_sector_chain(const cdf_info_t *info, const cdf_header_t *h,
const cdf_sat_t *sat, cdf_secid_t sid, size_t len, cdf_stream_t *scn)
{
size_t ss = CDF_SEC_SIZE(h), i, j;
ssize_t nr;
scn->sst_len = cdf_count_chain(sat, sid, ss);
scn->sst_dirlen = len;
if (scn->sst_len == (size_t)-1)
return -1;
scn->sst_tab = calloc(scn->sst_len, ss);
if (scn->sst_tab == NULL)
return -1;
for (j = i = 0; sid >= 0; i++, j++) {
if (j >= CDF_LOOP_LIMIT) {
DPRINTF(("Read long sector chain loop limit"));
errno = EFTYPE;
goto out;
}
if (i >= scn->sst_len) {
DPRINTF(("Out of bounds reading long sector chain "
"%" SIZE_T_FORMAT "u > %" SIZE_T_FORMAT "u\n", i,
scn->sst_len));
errno = EFTYPE;
goto out;
}
if ((nr = cdf_read_sector(info, scn->sst_tab, i * ss, ss, h,
sid)) != (ssize_t)ss) {
if (i == scn->sst_len - 1 && nr > 0) {
/* Last sector might be truncated */
return 0;
}
DPRINTF(("Reading long sector chain %d", sid));
goto out;
}
sid = CDF_TOLE4((uint32_t)sat->sat_tab[sid]);
}
return 0;
out:
free(scn->sst_tab);
return -1;
}
int
cdf_read_short_sector_chain(const cdf_header_t *h,
const cdf_sat_t *ssat, const cdf_stream_t *sst,
cdf_secid_t sid, size_t len, cdf_stream_t *scn)
{
size_t ss = CDF_SHORT_SEC_SIZE(h), i, j;
scn->sst_len = cdf_count_chain(ssat, sid, CDF_SEC_SIZE(h));
scn->sst_dirlen = len;
if (sst->sst_tab == NULL || scn->sst_len == (size_t)-1)
return -1;
scn->sst_tab = calloc(scn->sst_len, ss);
if (scn->sst_tab == NULL)
return -1;
for (j = i = 0; sid >= 0; i++, j++) {
if (j >= CDF_LOOP_LIMIT) {
DPRINTF(("Read short sector chain loop limit"));
errno = EFTYPE;
goto out;
}
if (i >= scn->sst_len) {
DPRINTF(("Out of bounds reading short sector chain "
"%" SIZE_T_FORMAT "u > %" SIZE_T_FORMAT "u\n",
i, scn->sst_len));
errno = EFTYPE;
goto out;
}
if (cdf_read_short_sector(sst, scn->sst_tab, i * ss, ss, h,
sid) != (ssize_t)ss) {
DPRINTF(("Reading short sector chain %d", sid));
goto out;
}
sid = CDF_TOLE4((uint32_t)ssat->sat_tab[sid]);
}
return 0;
out:
free(scn->sst_tab);
return -1;
}
int
cdf_read_sector_chain(const cdf_info_t *info, const cdf_header_t *h,
const cdf_sat_t *sat, const cdf_sat_t *ssat, const cdf_stream_t *sst,
cdf_secid_t sid, size_t len, cdf_stream_t *scn)
{
if (len < h->h_min_size_standard_stream && sst->sst_tab != NULL)
return cdf_read_short_sector_chain(h, ssat, sst, sid, len,
scn);
else
return cdf_read_long_sector_chain(info, h, sat, sid, len, scn);
}
int
cdf_read_dir(const cdf_info_t *info, const cdf_header_t *h,
const cdf_sat_t *sat, cdf_dir_t *dir)
{
size_t i, j;
size_t ss = CDF_SEC_SIZE(h), ns, nd;
char *buf;
cdf_secid_t sid = h->h_secid_first_directory;
ns = cdf_count_chain(sat, sid, ss);
if (ns == (size_t)-1)
return -1;
nd = ss / CDF_DIRECTORY_SIZE;
dir->dir_len = ns * nd;
dir->dir_tab = CAST(cdf_directory_t *,
calloc(dir->dir_len, sizeof(dir->dir_tab[0])));
if (dir->dir_tab == NULL)
return -1;
if ((buf = CAST(char *, malloc(ss))) == NULL) {
free(dir->dir_tab);
return -1;
}
for (j = i = 0; i < ns; i++, j++) {
if (j >= CDF_LOOP_LIMIT) {
DPRINTF(("Read dir loop limit"));
errno = EFTYPE;
goto out;
}
if (cdf_read_sector(info, buf, 0, ss, h, sid) != (ssize_t)ss) {
DPRINTF(("Reading directory sector %d", sid));
goto out;
}
for (j = 0; j < nd; j++) {
cdf_unpack_dir(&dir->dir_tab[i * nd + j],
&buf[j * CDF_DIRECTORY_SIZE]);
}
sid = CDF_TOLE4((uint32_t)sat->sat_tab[sid]);
}
if (NEED_SWAP)
for (i = 0; i < dir->dir_len; i++)
cdf_swap_dir(&dir->dir_tab[i]);
free(buf);
return 0;
out:
free(dir->dir_tab);
free(buf);
return -1;
}
int
cdf_read_ssat(const cdf_info_t *info, const cdf_header_t *h,
const cdf_sat_t *sat, cdf_sat_t *ssat)
{
size_t i, j;
size_t ss = CDF_SEC_SIZE(h);
cdf_secid_t sid = h->h_secid_first_sector_in_short_sat;
ssat->sat_len = cdf_count_chain(sat, sid, CDF_SEC_SIZE(h));
if (ssat->sat_len == (size_t)-1)
return -1;
ssat->sat_tab = CAST(cdf_secid_t *, calloc(ssat->sat_len, ss));
if (ssat->sat_tab == NULL)
return -1;
for (j = i = 0; sid >= 0; i++, j++) {
if (j >= CDF_LOOP_LIMIT) {
DPRINTF(("Read short sat sector loop limit"));
errno = EFTYPE;
goto out;
}
if (i >= ssat->sat_len) {
DPRINTF(("Out of bounds reading short sector chain "
"%" SIZE_T_FORMAT "u > %" SIZE_T_FORMAT "u\n", i,
ssat->sat_len));
errno = EFTYPE;
goto out;
}
if (cdf_read_sector(info, ssat->sat_tab, i * ss, ss, h, sid) !=
(ssize_t)ss) {
DPRINTF(("Reading short sat sector %d", sid));
goto out;
}
sid = CDF_TOLE4((uint32_t)sat->sat_tab[sid]);
}
return 0;
out:
free(ssat->sat_tab);
return -1;
}
int
cdf_read_short_stream(const cdf_info_t *info, const cdf_header_t *h,
const cdf_sat_t *sat, const cdf_dir_t *dir, cdf_stream_t *scn,
const cdf_directory_t **root)
{
size_t i;
const cdf_directory_t *d;
*root = NULL;
for (i = 0; i < dir->dir_len; i++)
if (dir->dir_tab[i].d_type == CDF_DIR_TYPE_ROOT_STORAGE)
break;
/* If the it is not there, just fake it; some docs don't have it */
if (i == dir->dir_len)
goto out;
d = &dir->dir_tab[i];
*root = d;
/* If the it is not there, just fake it; some docs don't have it */
if (d->d_stream_first_sector < 0)
goto out;
return cdf_read_long_sector_chain(info, h, sat,
d->d_stream_first_sector, d->d_size, scn);
out:
scn->sst_tab = NULL;
scn->sst_len = 0;
scn->sst_dirlen = 0;
return 0;
}
static int
cdf_namecmp(const char *d, const uint16_t *s, size_t l)
{
for (; l--; d++, s++)
if (*d != CDF_TOLE2(*s))
return (unsigned char)*d - CDF_TOLE2(*s);
return 0;
}
int
cdf_read_summary_info(const cdf_info_t *info, const cdf_header_t *h,
const cdf_sat_t *sat, const cdf_sat_t *ssat, const cdf_stream_t *sst,
const cdf_dir_t *dir, cdf_stream_t *scn)
{
return cdf_read_user_stream(info, h, sat, ssat, sst, dir,
"\05SummaryInformation", scn);
}
int
cdf_read_user_stream(const cdf_info_t *info, const cdf_header_t *h,
const cdf_sat_t *sat, const cdf_sat_t *ssat, const cdf_stream_t *sst,
const cdf_dir_t *dir, const char *name, cdf_stream_t *scn)
{
size_t i;
const cdf_directory_t *d;
size_t name_len = strlen(name) + 1;
for (i = dir->dir_len; i > 0; i--)
if (dir->dir_tab[i - 1].d_type == CDF_DIR_TYPE_USER_STREAM &&
cdf_namecmp(name, dir->dir_tab[i - 1].d_name, name_len)
== 0)
break;
if (i == 0) {
DPRINTF(("Cannot find user stream `%s'\n", name));
errno = ESRCH;
return -1;
}
d = &dir->dir_tab[i - 1];
return cdf_read_sector_chain(info, h, sat, ssat, sst,
d->d_stream_first_sector, d->d_size, scn);
}
int
cdf_read_property_info(const cdf_stream_t *sst, const cdf_header_t *h,
uint32_t offs, cdf_property_info_t **info, size_t *count, size_t *maxcount)
{
const cdf_section_header_t *shp;
cdf_section_header_t sh;
const uint8_t *p, *q, *e;
int16_t s16;
int32_t s32;
uint32_t u32;
int64_t s64;
uint64_t u64;
cdf_timestamp_t tp;
size_t i, o, o4, nelements, j;
cdf_property_info_t *inp;
if (offs > UINT32_MAX / 4) {
errno = EFTYPE;
goto out;
}
shp = CAST(const cdf_section_header_t *, (const void *)
((const char *)sst->sst_tab + offs));
if (cdf_check_stream_offset(sst, h, shp, sizeof(*shp), __LINE__) == -1)
goto out;
sh.sh_len = CDF_TOLE4(shp->sh_len);
#define CDF_SHLEN_LIMIT (UINT32_MAX / 8)
if (sh.sh_len > CDF_SHLEN_LIMIT) {
errno = EFTYPE;
goto out;
}
sh.sh_properties = CDF_TOLE4(shp->sh_properties);
#define CDF_PROP_LIMIT (UINT32_MAX / (4 * sizeof(*inp)))
if (sh.sh_properties > CDF_PROP_LIMIT)
goto out;
DPRINTF(("section len: %u properties %u\n", sh.sh_len,
sh.sh_properties));
if (*maxcount) {
if (*maxcount > CDF_PROP_LIMIT)
goto out;
*maxcount += sh.sh_properties;
inp = CAST(cdf_property_info_t *,
realloc(*info, *maxcount * sizeof(*inp)));
} else {
*maxcount = sh.sh_properties;
inp = CAST(cdf_property_info_t *,
malloc(*maxcount * sizeof(*inp)));
}
if (inp == NULL)
goto out;
*info = inp;
inp += *count;
*count += sh.sh_properties;
p = CAST(const uint8_t *, (const void *)
((const char *)(const void *)sst->sst_tab +
offs + sizeof(sh)));
e = CAST(const uint8_t *, (const void *)
(((const char *)(const void *)shp) + sh.sh_len));
if (cdf_check_stream_offset(sst, h, e, 0, __LINE__) == -1)
goto out;
for (i = 0; i < sh.sh_properties; i++) {
size_t ofs = CDF_GETUINT32(p, (i << 1) + 1);
q = (const uint8_t *)(const void *)
((const char *)(const void *)p + ofs
- 2 * sizeof(uint32_t));
if (q > e) {
DPRINTF(("Ran of the end %p > %p\n", q, e));
goto out;
}
inp[i].pi_id = CDF_GETUINT32(p, i << 1);
inp[i].pi_type = CDF_GETUINT32(q, 0);
DPRINTF(("%" SIZE_T_FORMAT "u) id=%x type=%x offs=0x%tx,0x%x\n",
i, inp[i].pi_id, inp[i].pi_type, q - p, offs));
if (inp[i].pi_type & CDF_VECTOR) {
nelements = CDF_GETUINT32(q, 1);
if (nelements == 0) {
DPRINTF(("CDF_VECTOR with nelements == 0\n"));
goto out;
}
o = 2;
} else {
nelements = 1;
o = 1;
}
o4 = o * sizeof(uint32_t);
if (inp[i].pi_type & (CDF_ARRAY|CDF_BYREF|CDF_RESERVED))
goto unknown;
switch (inp[i].pi_type & CDF_TYPEMASK) {
case CDF_NULL:
case CDF_EMPTY:
break;
case CDF_SIGNED16:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&s16, &q[o4], sizeof(s16));
inp[i].pi_s16 = CDF_TOLE2(s16);
break;
case CDF_SIGNED32:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&s32, &q[o4], sizeof(s32));
inp[i].pi_s32 = CDF_TOLE4((uint32_t)s32);
break;
case CDF_BOOL:
case CDF_UNSIGNED32:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&u32, &q[o4], sizeof(u32));
inp[i].pi_u32 = CDF_TOLE4(u32);
break;
case CDF_SIGNED64:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&s64, &q[o4], sizeof(s64));
inp[i].pi_s64 = CDF_TOLE8((uint64_t)s64);
break;
case CDF_UNSIGNED64:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&u64, &q[o4], sizeof(u64));
inp[i].pi_u64 = CDF_TOLE8((uint64_t)u64);
break;
case CDF_FLOAT:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&u32, &q[o4], sizeof(u32));
u32 = CDF_TOLE4(u32);
memcpy(&inp[i].pi_f, &u32, sizeof(inp[i].pi_f));
break;
case CDF_DOUBLE:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&u64, &q[o4], sizeof(u64));
u64 = CDF_TOLE8((uint64_t)u64);
memcpy(&inp[i].pi_d, &u64, sizeof(inp[i].pi_d));
break;
case CDF_LENGTH32_STRING:
case CDF_LENGTH32_WSTRING:
if (nelements > 1) {
size_t nelem = inp - *info;
if (*maxcount > CDF_PROP_LIMIT
|| nelements > CDF_PROP_LIMIT)
goto out;
*maxcount += nelements;
inp = CAST(cdf_property_info_t *,
realloc(*info, *maxcount * sizeof(*inp)));
if (inp == NULL)
goto out;
*info = inp;
inp = *info + nelem;
}
DPRINTF(("nelements = %" SIZE_T_FORMAT "u\n",
nelements));
for (j = 0; j < nelements && i < sh.sh_properties;
j++, i++)
{
uint32_t l = CDF_GETUINT32(q, o);
inp[i].pi_str.s_len = l;
inp[i].pi_str.s_buf = (const char *)
(const void *)(&q[o4 + sizeof(l)]);
DPRINTF(("l = %d, r = %" SIZE_T_FORMAT
"u, s = %s\n", l,
CDF_ROUND(l, sizeof(l)),
inp[i].pi_str.s_buf));
if (l & 1)
l++;
o += l >> 1;
if (q + o >= e)
goto out;
o4 = o * sizeof(uint32_t);
}
i--;
break;
case CDF_FILETIME:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&tp, &q[o4], sizeof(tp));
inp[i].pi_tp = CDF_TOLE8((uint64_t)tp);
break;
case CDF_CLIPBOARD:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
break;
default:
unknown:
DPRINTF(("Don't know how to deal with %x\n",
inp[i].pi_type));
break;
}
}
return 0;
out:
free(*info);
return -1;
}
int
cdf_unpack_summary_info(const cdf_stream_t *sst, const cdf_header_t *h,
cdf_summary_info_header_t *ssi, cdf_property_info_t **info, size_t *count)
{
size_t maxcount;
const cdf_summary_info_header_t *si =
CAST(const cdf_summary_info_header_t *, sst->sst_tab);
const cdf_section_declaration_t *sd =
CAST(const cdf_section_declaration_t *, (const void *)
((const char *)sst->sst_tab + CDF_SECTION_DECLARATION_OFFSET));
if (cdf_check_stream_offset(sst, h, si, sizeof(*si), __LINE__) == -1 ||
cdf_check_stream_offset(sst, h, sd, sizeof(*sd), __LINE__) == -1)
return -1;
ssi->si_byte_order = CDF_TOLE2(si->si_byte_order);
ssi->si_os_version = CDF_TOLE2(si->si_os_version);
ssi->si_os = CDF_TOLE2(si->si_os);
ssi->si_class = si->si_class;
cdf_swap_class(&ssi->si_class);
ssi->si_count = CDF_TOLE4(si->si_count);
*count = 0;
maxcount = 0;
*info = NULL;
if (cdf_read_property_info(sst, h, CDF_TOLE4(sd->sd_offset), info,
count, &maxcount) == -1)
return -1;
return 0;
}
int
cdf_print_classid(char *buf, size_t buflen, const cdf_classid_t *id)
{
return snprintf(buf, buflen, "%.8x-%.4x-%.4x-%.2x%.2x-"
"%.2x%.2x%.2x%.2x%.2x%.2x", id->cl_dword, id->cl_word[0],
id->cl_word[1], id->cl_two[0], id->cl_two[1], id->cl_six[0],
id->cl_six[1], id->cl_six[2], id->cl_six[3], id->cl_six[4],
id->cl_six[5]);
}
static const struct {
uint32_t v;
const char *n;
} vn[] = {
{ CDF_PROPERTY_CODE_PAGE, "Code page" },
{ CDF_PROPERTY_TITLE, "Title" },
{ CDF_PROPERTY_SUBJECT, "Subject" },
{ CDF_PROPERTY_AUTHOR, "Author" },
{ CDF_PROPERTY_KEYWORDS, "Keywords" },
{ CDF_PROPERTY_COMMENTS, "Comments" },
{ CDF_PROPERTY_TEMPLATE, "Template" },
{ CDF_PROPERTY_LAST_SAVED_BY, "Last Saved By" },
{ CDF_PROPERTY_REVISION_NUMBER, "Revision Number" },
{ CDF_PROPERTY_TOTAL_EDITING_TIME, "Total Editing Time" },
{ CDF_PROPERTY_LAST_PRINTED, "Last Printed" },
{ CDF_PROPERTY_CREATE_TIME, "Create Time/Date" },
{ CDF_PROPERTY_LAST_SAVED_TIME, "Last Saved Time/Date" },
{ CDF_PROPERTY_NUMBER_OF_PAGES, "Number of Pages" },
{ CDF_PROPERTY_NUMBER_OF_WORDS, "Number of Words" },
{ CDF_PROPERTY_NUMBER_OF_CHARACTERS, "Number of Characters" },
{ CDF_PROPERTY_THUMBNAIL, "Thumbnail" },
{ CDF_PROPERTY_NAME_OF_APPLICATION, "Name of Creating Application" },
{ CDF_PROPERTY_SECURITY, "Security" },
{ CDF_PROPERTY_LOCALE_ID, "Locale ID" },
};
int
cdf_print_property_name(char *buf, size_t bufsiz, uint32_t p)
{
size_t i;
for (i = 0; i < __arraycount(vn); i++)
if (vn[i].v == p)
return snprintf(buf, bufsiz, "%s", vn[i].n);
return snprintf(buf, bufsiz, "0x%x", p);
}
int
cdf_print_elapsed_time(char *buf, size_t bufsiz, cdf_timestamp_t ts)
{
int len = 0;
int days, hours, mins, secs;
ts /= CDF_TIME_PREC;
secs = (int)(ts % 60);
ts /= 60;
mins = (int)(ts % 60);
ts /= 60;
hours = (int)(ts % 24);
ts /= 24;
days = (int)ts;
if (days) {
len += snprintf(buf + len, bufsiz - len, "%dd+", days);
if ((size_t)len >= bufsiz)
return len;
}
if (days || hours) {
len += snprintf(buf + len, bufsiz - len, "%.2d:", hours);
if ((size_t)len >= bufsiz)
return len;
}
len += snprintf(buf + len, bufsiz - len, "%.2d:", mins);
if ((size_t)len >= bufsiz)
return len;
len += snprintf(buf + len, bufsiz - len, "%.2d", secs);
return len;
}
#ifdef CDF_DEBUG
void
cdf_dump_header(const cdf_header_t *h)
{
size_t i;
#define DUMP(a, b) (void)fprintf(stderr, "%40.40s = " a "\n", # b, h->h_ ## b)
#define DUMP2(a, b) (void)fprintf(stderr, "%40.40s = " a " (" a ")\n", # b, \
h->h_ ## b, 1 << h->h_ ## b)
DUMP("%d", revision);
DUMP("%d", version);
DUMP("0x%x", byte_order);
DUMP2("%d", sec_size_p2);
DUMP2("%d", short_sec_size_p2);
DUMP("%d", num_sectors_in_sat);
DUMP("%d", secid_first_directory);
DUMP("%d", min_size_standard_stream);
DUMP("%d", secid_first_sector_in_short_sat);
DUMP("%d", num_sectors_in_short_sat);
DUMP("%d", secid_first_sector_in_master_sat);
DUMP("%d", num_sectors_in_master_sat);
for (i = 0; i < __arraycount(h->h_master_sat); i++) {
if (h->h_master_sat[i] == CDF_SECID_FREE)
break;
(void)fprintf(stderr, "%35.35s[%.3zu] = %d\n",
"master_sat", i, h->h_master_sat[i]);
}
}
void
cdf_dump_sat(const char *prefix, const cdf_sat_t *sat, size_t size)
{
size_t i, j, s = size / sizeof(cdf_secid_t);
for (i = 0; i < sat->sat_len; i++) {
(void)fprintf(stderr, "%s[%" SIZE_T_FORMAT "u]:\n%.6"
SIZE_T_FORMAT "u: ", prefix, i, i * s);
for (j = 0; j < s; j++) {
(void)fprintf(stderr, "%5d, ",
CDF_TOLE4(sat->sat_tab[s * i + j]));
if ((j + 1) % 10 == 0)
(void)fprintf(stderr, "\n%.6" SIZE_T_FORMAT
"u: ", i * s + j + 1);
}
(void)fprintf(stderr, "\n");
}
}
void
cdf_dump(void *v, size_t len)
{
size_t i, j;
unsigned char *p = v;
char abuf[16];
(void)fprintf(stderr, "%.4x: ", 0);
for (i = 0, j = 0; i < len; i++, p++) {
(void)fprintf(stderr, "%.2x ", *p);
abuf[j++] = isprint(*p) ? *p : '.';
if (j == 16) {
j = 0;
abuf[15] = '\0';
(void)fprintf(stderr, "%s\n%.4" SIZE_T_FORMAT "x: ",
abuf, i + 1);
}
}
(void)fprintf(stderr, "\n");
}
void
cdf_dump_stream(const cdf_header_t *h, const cdf_stream_t *sst)
{
size_t ss = sst->sst_dirlen < h->h_min_size_standard_stream ?
CDF_SHORT_SEC_SIZE(h) : CDF_SEC_SIZE(h);
cdf_dump(sst->sst_tab, ss * sst->sst_len);
}
void
cdf_dump_dir(const cdf_info_t *info, const cdf_header_t *h,
const cdf_sat_t *sat, const cdf_sat_t *ssat, const cdf_stream_t *sst,
const cdf_dir_t *dir)
{
size_t i, j;
cdf_directory_t *d;
char name[__arraycount(d->d_name)];
cdf_stream_t scn;
struct timespec ts;
static const char *types[] = { "empty", "user storage",
"user stream", "lockbytes", "property", "root storage" };
for (i = 0; i < dir->dir_len; i++) {
char buf[26];
d = &dir->dir_tab[i];
for (j = 0; j < sizeof(name); j++)
name[j] = (char)CDF_TOLE2(d->d_name[j]);
(void)fprintf(stderr, "Directory %" SIZE_T_FORMAT "u: %s\n",
i, name);
if (d->d_type < __arraycount(types))
(void)fprintf(stderr, "Type: %s\n", types[d->d_type]);
else
(void)fprintf(stderr, "Type: %d\n", d->d_type);
(void)fprintf(stderr, "Color: %s\n",
d->d_color ? "black" : "red");
(void)fprintf(stderr, "Left child: %d\n", d->d_left_child);
(void)fprintf(stderr, "Right child: %d\n", d->d_right_child);
(void)fprintf(stderr, "Flags: 0x%x\n", d->d_flags);
cdf_timestamp_to_timespec(&ts, d->d_created);
(void)fprintf(stderr, "Created %s", cdf_ctime(&ts.tv_sec, buf));
cdf_timestamp_to_timespec(&ts, d->d_modified);
(void)fprintf(stderr, "Modified %s",
cdf_ctime(&ts.tv_sec, buf));
(void)fprintf(stderr, "Stream %d\n", d->d_stream_first_sector);
(void)fprintf(stderr, "Size %d\n", d->d_size);
switch (d->d_type) {
case CDF_DIR_TYPE_USER_STORAGE:
(void)fprintf(stderr, "Storage: %d\n", d->d_storage);
break;
case CDF_DIR_TYPE_USER_STREAM:
if (sst == NULL)
break;
if (cdf_read_sector_chain(info, h, sat, ssat, sst,
d->d_stream_first_sector, d->d_size, &scn) == -1) {
warn("Can't read stream for %s at %d len %d",
name, d->d_stream_first_sector, d->d_size);
break;
}
cdf_dump_stream(h, &scn);
free(scn.sst_tab);
break;
default:
break;
}
}
}
void
cdf_dump_property_info(const cdf_property_info_t *info, size_t count)
{
cdf_timestamp_t tp;
struct timespec ts;
char buf[64];
size_t i, j;
for (i = 0; i < count; i++) {
cdf_print_property_name(buf, sizeof(buf), info[i].pi_id);
(void)fprintf(stderr, "%" SIZE_T_FORMAT "u) %s: ", i, buf);
switch (info[i].pi_type) {
case CDF_NULL:
break;
case CDF_SIGNED16:
(void)fprintf(stderr, "signed 16 [%hd]\n",
info[i].pi_s16);
break;
case CDF_SIGNED32:
(void)fprintf(stderr, "signed 32 [%d]\n",
info[i].pi_s32);
break;
case CDF_UNSIGNED32:
(void)fprintf(stderr, "unsigned 32 [%u]\n",
info[i].pi_u32);
break;
case CDF_FLOAT:
(void)fprintf(stderr, "float [%g]\n",
info[i].pi_f);
break;
case CDF_DOUBLE:
(void)fprintf(stderr, "double [%g]\n",
info[i].pi_d);
break;
case CDF_LENGTH32_STRING:
(void)fprintf(stderr, "string %u [%.*s]\n",
info[i].pi_str.s_len,
info[i].pi_str.s_len, info[i].pi_str.s_buf);
break;
case CDF_LENGTH32_WSTRING:
(void)fprintf(stderr, "string %u [",
info[i].pi_str.s_len);
for (j = 0; j < info[i].pi_str.s_len - 1; j++)
(void)fputc(info[i].pi_str.s_buf[j << 1], stderr);
(void)fprintf(stderr, "]\n");
break;
case CDF_FILETIME:
tp = info[i].pi_tp;
if (tp < 1000000000000000LL) {
cdf_print_elapsed_time(buf, sizeof(buf), tp);
(void)fprintf(stderr, "timestamp %s\n", buf);
} else {
char buf[26];
cdf_timestamp_to_timespec(&ts, tp);
(void)fprintf(stderr, "timestamp %s",
cdf_ctime(&ts.tv_sec, buf));
}
break;
case CDF_CLIPBOARD:
(void)fprintf(stderr, "CLIPBOARD %u\n", info[i].pi_u32);
break;
default:
DPRINTF(("Don't know how to deal with %x\n",
info[i].pi_type));
break;
}
}
}
void
cdf_dump_summary_info(const cdf_header_t *h, const cdf_stream_t *sst)
{
char buf[128];
cdf_summary_info_header_t ssi;
cdf_property_info_t *info;
size_t count;
(void)&h;
if (cdf_unpack_summary_info(sst, h, &ssi, &info, &count) == -1)
return;
(void)fprintf(stderr, "Endian: %x\n", ssi.si_byte_order);
(void)fprintf(stderr, "Os Version %d.%d\n", ssi.si_os_version & 0xff,
ssi.si_os_version >> 8);
(void)fprintf(stderr, "Os %d\n", ssi.si_os);
cdf_print_classid(buf, sizeof(buf), &ssi.si_class);
(void)fprintf(stderr, "Class %s\n", buf);
(void)fprintf(stderr, "Count %d\n", ssi.si_count);
cdf_dump_property_info(info, count);
free(info);
}
#endif
#ifdef TEST
int
main(int argc, char *argv[])
{
int i;
cdf_header_t h;
cdf_sat_t sat, ssat;
cdf_stream_t sst, scn;
cdf_dir_t dir;
cdf_info_t info;
if (argc < 2) {
(void)fprintf(stderr, "Usage: %s <filename>\n", getprogname());
return -1;
}
info.i_buf = NULL;
info.i_len = 0;
for (i = 1; i < argc; i++) {
if ((info.i_fd = open(argv[1], O_RDONLY)) == -1)
err(1, "Cannot open `%s'", argv[1]);
if (cdf_read_header(&info, &h) == -1)
err(1, "Cannot read header");
#ifdef CDF_DEBUG
cdf_dump_header(&h);
#endif
if (cdf_read_sat(&info, &h, &sat) == -1)
err(1, "Cannot read sat");
#ifdef CDF_DEBUG
cdf_dump_sat("SAT", &sat, CDF_SEC_SIZE(&h));
#endif
if (cdf_read_ssat(&info, &h, &sat, &ssat) == -1)
err(1, "Cannot read ssat");
#ifdef CDF_DEBUG
cdf_dump_sat("SSAT", &ssat, CDF_SHORT_SEC_SIZE(&h));
#endif
if (cdf_read_dir(&info, &h, &sat, &dir) == -1)
err(1, "Cannot read dir");
if (cdf_read_short_stream(&info, &h, &sat, &dir, &sst) == -1)
err(1, "Cannot read short stream");
#ifdef CDF_DEBUG
cdf_dump_stream(&h, &sst);
#endif
#ifdef CDF_DEBUG
cdf_dump_dir(&info, &h, &sat, &ssat, &sst, &dir);
#endif
if (cdf_read_summary_info(&info, &h, &sat, &ssat, &sst, &dir,
&scn) == -1)
err(1, "Cannot read summary info");
#ifdef CDF_DEBUG
cdf_dump_summary_info(&h, &scn);
#endif
(void)close(info.i_fd);
}
return 0;
}
#endif
| ./CrossVul/dataset_final_sorted/CWE-20/c/bad_2137_0 |
crossvul-cpp_data_good_1583_0 | /*
* linux/mm/memory.c
*
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
*/
/*
* demand-loading started 01.12.91 - seems it is high on the list of
* things wanted, and it should be easy to implement. - Linus
*/
/*
* Ok, demand-loading was easy, shared pages a little bit tricker. Shared
* pages started 02.12.91, seems to work. - Linus.
*
* Tested sharing by executing about 30 /bin/sh: under the old kernel it
* would have taken more than the 6M I have free, but it worked well as
* far as I could see.
*
* Also corrected some "invalidate()"s - I wasn't doing enough of them.
*/
/*
* Real VM (paging to/from disk) started 18.12.91. Much more work and
* thought has to go into this. Oh, well..
* 19.12.91 - works, somewhat. Sometimes I get faults, don't know why.
* Found it. Everything seems to work now.
* 20.12.91 - Ok, making the swap-device changeable like the root.
*/
/*
* 05.04.94 - Multi-page memory management added for v1.1.
* Idea by Alex Bligh (alex@cconcepts.co.uk)
*
* 16.07.99 - Support of BIGMEM added by Gerhard Wichert, Siemens AG
* (Gerhard.Wichert@pdb.siemens.de)
*
* Aug/Sep 2004 Changed to four level page tables (Andi Kleen)
*/
#include <linux/kernel_stat.h>
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/mman.h>
#include <linux/swap.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
#include <linux/ksm.h>
#include <linux/rmap.h>
#include <linux/export.h>
#include <linux/delayacct.h>
#include <linux/init.h>
#include <linux/writeback.h>
#include <linux/memcontrol.h>
#include <linux/mmu_notifier.h>
#include <linux/kallsyms.h>
#include <linux/swapops.h>
#include <linux/elf.h>
#include <linux/gfp.h>
#include <linux/migrate.h>
#include <linux/string.h>
#include <linux/dma-debug.h>
#include <linux/debugfs.h>
#include <asm/io.h>
#include <asm/pgalloc.h>
#include <asm/uaccess.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
#include <asm/pgtable.h>
#include "internal.h"
#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
#warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid.
#endif
#ifndef CONFIG_NEED_MULTIPLE_NODES
/* use the per-pgdat data instead for discontigmem - mbligh */
unsigned long max_mapnr;
struct page *mem_map;
EXPORT_SYMBOL(max_mapnr);
EXPORT_SYMBOL(mem_map);
#endif
/*
* A number of key systems in x86 including ioremap() rely on the assumption
* that high_memory defines the upper bound on direct map memory, then end
* of ZONE_NORMAL. Under CONFIG_DISCONTIG this means that max_low_pfn and
* highstart_pfn must be the same; there must be no gap between ZONE_NORMAL
* and ZONE_HIGHMEM.
*/
void * high_memory;
EXPORT_SYMBOL(high_memory);
/*
* Randomize the address space (stacks, mmaps, brk, etc.).
*
* ( When CONFIG_COMPAT_BRK=y we exclude brk from randomization,
* as ancient (libc5 based) binaries can segfault. )
*/
int randomize_va_space __read_mostly =
#ifdef CONFIG_COMPAT_BRK
1;
#else
2;
#endif
static int __init disable_randmaps(char *s)
{
randomize_va_space = 0;
return 1;
}
__setup("norandmaps", disable_randmaps);
unsigned long zero_pfn __read_mostly;
unsigned long highest_memmap_pfn __read_mostly;
EXPORT_SYMBOL(zero_pfn);
/*
* CONFIG_MMU architectures set up ZERO_PAGE in their paging_init()
*/
static int __init init_zero_pfn(void)
{
zero_pfn = page_to_pfn(ZERO_PAGE(0));
return 0;
}
core_initcall(init_zero_pfn);
#if defined(SPLIT_RSS_COUNTING)
void sync_mm_rss(struct mm_struct *mm)
{
int i;
for (i = 0; i < NR_MM_COUNTERS; i++) {
if (current->rss_stat.count[i]) {
add_mm_counter(mm, i, current->rss_stat.count[i]);
current->rss_stat.count[i] = 0;
}
}
current->rss_stat.events = 0;
}
static void add_mm_counter_fast(struct mm_struct *mm, int member, int val)
{
struct task_struct *task = current;
if (likely(task->mm == mm))
task->rss_stat.count[member] += val;
else
add_mm_counter(mm, member, val);
}
#define inc_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, 1)
#define dec_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, -1)
/* sync counter once per 64 page faults */
#define TASK_RSS_EVENTS_THRESH (64)
static void check_sync_rss_stat(struct task_struct *task)
{
if (unlikely(task != current))
return;
if (unlikely(task->rss_stat.events++ > TASK_RSS_EVENTS_THRESH))
sync_mm_rss(task->mm);
}
#else /* SPLIT_RSS_COUNTING */
#define inc_mm_counter_fast(mm, member) inc_mm_counter(mm, member)
#define dec_mm_counter_fast(mm, member) dec_mm_counter(mm, member)
static void check_sync_rss_stat(struct task_struct *task)
{
}
#endif /* SPLIT_RSS_COUNTING */
#ifdef HAVE_GENERIC_MMU_GATHER
static int tlb_next_batch(struct mmu_gather *tlb)
{
struct mmu_gather_batch *batch;
batch = tlb->active;
if (batch->next) {
tlb->active = batch->next;
return 1;
}
if (tlb->batch_count == MAX_GATHER_BATCH_COUNT)
return 0;
batch = (void *)__get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
if (!batch)
return 0;
tlb->batch_count++;
batch->next = NULL;
batch->nr = 0;
batch->max = MAX_GATHER_BATCH;
tlb->active->next = batch;
tlb->active = batch;
return 1;
}
/* tlb_gather_mmu
* Called to initialize an (on-stack) mmu_gather structure for page-table
* tear-down from @mm. The @fullmm argument is used when @mm is without
* users and we're going to destroy the full address space (exit/execve).
*/
void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
{
tlb->mm = mm;
/* Is it from 0 to ~0? */
tlb->fullmm = !(start | (end+1));
tlb->need_flush_all = 0;
tlb->local.next = NULL;
tlb->local.nr = 0;
tlb->local.max = ARRAY_SIZE(tlb->__pages);
tlb->active = &tlb->local;
tlb->batch_count = 0;
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
tlb->batch = NULL;
#endif
__tlb_reset_range(tlb);
}
static void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
{
if (!tlb->end)
return;
tlb_flush(tlb);
mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end);
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
tlb_table_flush(tlb);
#endif
__tlb_reset_range(tlb);
}
static void tlb_flush_mmu_free(struct mmu_gather *tlb)
{
struct mmu_gather_batch *batch;
for (batch = &tlb->local; batch && batch->nr; batch = batch->next) {
free_pages_and_swap_cache(batch->pages, batch->nr);
batch->nr = 0;
}
tlb->active = &tlb->local;
}
void tlb_flush_mmu(struct mmu_gather *tlb)
{
tlb_flush_mmu_tlbonly(tlb);
tlb_flush_mmu_free(tlb);
}
/* tlb_finish_mmu
* Called at the end of the shootdown operation to free up any resources
* that were required.
*/
void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
{
struct mmu_gather_batch *batch, *next;
tlb_flush_mmu(tlb);
/* keep the page table cache within bounds */
check_pgt_cache();
for (batch = tlb->local.next; batch; batch = next) {
next = batch->next;
free_pages((unsigned long)batch, 0);
}
tlb->local.next = NULL;
}
/* __tlb_remove_page
* Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), while
* handling the additional races in SMP caused by other CPUs caching valid
* mappings in their TLBs. Returns the number of free page slots left.
* When out of page slots we must call tlb_flush_mmu().
*/
int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
struct mmu_gather_batch *batch;
VM_BUG_ON(!tlb->end);
batch = tlb->active;
batch->pages[batch->nr++] = page;
if (batch->nr == batch->max) {
if (!tlb_next_batch(tlb))
return 0;
batch = tlb->active;
}
VM_BUG_ON_PAGE(batch->nr > batch->max, page);
return batch->max - batch->nr;
}
#endif /* HAVE_GENERIC_MMU_GATHER */
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
/*
* See the comment near struct mmu_table_batch.
*/
static void tlb_remove_table_smp_sync(void *arg)
{
/* Simply deliver the interrupt */
}
static void tlb_remove_table_one(void *table)
{
/*
* This isn't an RCU grace period and hence the page-tables cannot be
* assumed to be actually RCU-freed.
*
* It is however sufficient for software page-table walkers that rely on
* IRQ disabling. See the comment near struct mmu_table_batch.
*/
smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
__tlb_remove_table(table);
}
static void tlb_remove_table_rcu(struct rcu_head *head)
{
struct mmu_table_batch *batch;
int i;
batch = container_of(head, struct mmu_table_batch, rcu);
for (i = 0; i < batch->nr; i++)
__tlb_remove_table(batch->tables[i]);
free_page((unsigned long)batch);
}
void tlb_table_flush(struct mmu_gather *tlb)
{
struct mmu_table_batch **batch = &tlb->batch;
if (*batch) {
call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
*batch = NULL;
}
}
void tlb_remove_table(struct mmu_gather *tlb, void *table)
{
struct mmu_table_batch **batch = &tlb->batch;
/*
* When there's less then two users of this mm there cannot be a
* concurrent page-table walk.
*/
if (atomic_read(&tlb->mm->mm_users) < 2) {
__tlb_remove_table(table);
return;
}
if (*batch == NULL) {
*batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
if (*batch == NULL) {
tlb_remove_table_one(table);
return;
}
(*batch)->nr = 0;
}
(*batch)->tables[(*batch)->nr++] = table;
if ((*batch)->nr == MAX_TABLE_BATCH)
tlb_table_flush(tlb);
}
#endif /* CONFIG_HAVE_RCU_TABLE_FREE */
/*
* Note: this doesn't free the actual pages themselves. That
* has been handled earlier when unmapping all the memory regions.
*/
static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
unsigned long addr)
{
pgtable_t token = pmd_pgtable(*pmd);
pmd_clear(pmd);
pte_free_tlb(tlb, token, addr);
atomic_long_dec(&tlb->mm->nr_ptes);
}
static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
unsigned long addr, unsigned long end,
unsigned long floor, unsigned long ceiling)
{
pmd_t *pmd;
unsigned long next;
unsigned long start;
start = addr;
pmd = pmd_offset(pud, addr);
do {
next = pmd_addr_end(addr, end);
if (pmd_none_or_clear_bad(pmd))
continue;
free_pte_range(tlb, pmd, addr);
} while (pmd++, addr = next, addr != end);
start &= PUD_MASK;
if (start < floor)
return;
if (ceiling) {
ceiling &= PUD_MASK;
if (!ceiling)
return;
}
if (end - 1 > ceiling - 1)
return;
pmd = pmd_offset(pud, start);
pud_clear(pud);
pmd_free_tlb(tlb, pmd, start);
mm_dec_nr_pmds(tlb->mm);
}
static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
unsigned long addr, unsigned long end,
unsigned long floor, unsigned long ceiling)
{
pud_t *pud;
unsigned long next;
unsigned long start;
start = addr;
pud = pud_offset(pgd, addr);
do {
next = pud_addr_end(addr, end);
if (pud_none_or_clear_bad(pud))
continue;
free_pmd_range(tlb, pud, addr, next, floor, ceiling);
} while (pud++, addr = next, addr != end);
start &= PGDIR_MASK;
if (start < floor)
return;
if (ceiling) {
ceiling &= PGDIR_MASK;
if (!ceiling)
return;
}
if (end - 1 > ceiling - 1)
return;
pud = pud_offset(pgd, start);
pgd_clear(pgd);
pud_free_tlb(tlb, pud, start);
}
/*
* This function frees user-level page tables of a process.
*/
void free_pgd_range(struct mmu_gather *tlb,
unsigned long addr, unsigned long end,
unsigned long floor, unsigned long ceiling)
{
pgd_t *pgd;
unsigned long next;
/*
* The next few lines have given us lots of grief...
*
* Why are we testing PMD* at this top level? Because often
* there will be no work to do at all, and we'd prefer not to
* go all the way down to the bottom just to discover that.
*
* Why all these "- 1"s? Because 0 represents both the bottom
* of the address space and the top of it (using -1 for the
* top wouldn't help much: the masks would do the wrong thing).
* The rule is that addr 0 and floor 0 refer to the bottom of
* the address space, but end 0 and ceiling 0 refer to the top
* Comparisons need to use "end - 1" and "ceiling - 1" (though
* that end 0 case should be mythical).
*
* Wherever addr is brought up or ceiling brought down, we must
* be careful to reject "the opposite 0" before it confuses the
* subsequent tests. But what about where end is brought down
* by PMD_SIZE below? no, end can't go down to 0 there.
*
* Whereas we round start (addr) and ceiling down, by different
* masks at different levels, in order to test whether a table
* now has no other vmas using it, so can be freed, we don't
* bother to round floor or end up - the tests don't need that.
*/
addr &= PMD_MASK;
if (addr < floor) {
addr += PMD_SIZE;
if (!addr)
return;
}
if (ceiling) {
ceiling &= PMD_MASK;
if (!ceiling)
return;
}
if (end - 1 > ceiling - 1)
end -= PMD_SIZE;
if (addr > end - 1)
return;
pgd = pgd_offset(tlb->mm, addr);
do {
next = pgd_addr_end(addr, end);
if (pgd_none_or_clear_bad(pgd))
continue;
free_pud_range(tlb, pgd, addr, next, floor, ceiling);
} while (pgd++, addr = next, addr != end);
}
void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
unsigned long floor, unsigned long ceiling)
{
while (vma) {
struct vm_area_struct *next = vma->vm_next;
unsigned long addr = vma->vm_start;
/*
* Hide vma from rmap and truncate_pagecache before freeing
* pgtables
*/
unlink_anon_vmas(vma);
unlink_file_vma(vma);
if (is_vm_hugetlb_page(vma)) {
hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
floor, next? next->vm_start: ceiling);
} else {
/*
* Optimization: gather nearby vmas into one call down
*/
while (next && next->vm_start <= vma->vm_end + PMD_SIZE
&& !is_vm_hugetlb_page(next)) {
vma = next;
next = vma->vm_next;
unlink_anon_vmas(vma);
unlink_file_vma(vma);
}
free_pgd_range(tlb, addr, vma->vm_end,
floor, next? next->vm_start: ceiling);
}
vma = next;
}
}
int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
pmd_t *pmd, unsigned long address)
{
spinlock_t *ptl;
pgtable_t new = pte_alloc_one(mm, address);
int wait_split_huge_page;
if (!new)
return -ENOMEM;
/*
* Ensure all pte setup (eg. pte page lock and page clearing) are
* visible before the pte is made visible to other CPUs by being
* put into page tables.
*
* The other side of the story is the pointer chasing in the page
* table walking code (when walking the page table without locking;
* ie. most of the time). Fortunately, these data accesses consist
* of a chain of data-dependent loads, meaning most CPUs (alpha
* being the notable exception) will already guarantee loads are
* seen in-order. See the alpha page table accessors for the
* smp_read_barrier_depends() barriers in page table walking code.
*/
smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */
ptl = pmd_lock(mm, pmd);
wait_split_huge_page = 0;
if (likely(pmd_none(*pmd))) { /* Has another populated it ? */
atomic_long_inc(&mm->nr_ptes);
pmd_populate(mm, pmd, new);
new = NULL;
} else if (unlikely(pmd_trans_splitting(*pmd)))
wait_split_huge_page = 1;
spin_unlock(ptl);
if (new)
pte_free(mm, new);
if (wait_split_huge_page)
wait_split_huge_page(vma->anon_vma, pmd);
return 0;
}
int __pte_alloc_kernel(pmd_t *pmd, unsigned long address)
{
pte_t *new = pte_alloc_one_kernel(&init_mm, address);
if (!new)
return -ENOMEM;
smp_wmb(); /* See comment in __pte_alloc */
spin_lock(&init_mm.page_table_lock);
if (likely(pmd_none(*pmd))) { /* Has another populated it ? */
pmd_populate_kernel(&init_mm, pmd, new);
new = NULL;
} else
VM_BUG_ON(pmd_trans_splitting(*pmd));
spin_unlock(&init_mm.page_table_lock);
if (new)
pte_free_kernel(&init_mm, new);
return 0;
}
static inline void init_rss_vec(int *rss)
{
memset(rss, 0, sizeof(int) * NR_MM_COUNTERS);
}
static inline void add_mm_rss_vec(struct mm_struct *mm, int *rss)
{
int i;
if (current->mm == mm)
sync_mm_rss(mm);
for (i = 0; i < NR_MM_COUNTERS; i++)
if (rss[i])
add_mm_counter(mm, i, rss[i]);
}
/*
* This function is called to print an error when a bad pte
* is found. For example, we might have a PFN-mapped pte in
* a region that doesn't allow it.
*
* The calling function must still handle the error.
*/
static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
pte_t pte, struct page *page)
{
pgd_t *pgd = pgd_offset(vma->vm_mm, addr);
pud_t *pud = pud_offset(pgd, addr);
pmd_t *pmd = pmd_offset(pud, addr);
struct address_space *mapping;
pgoff_t index;
static unsigned long resume;
static unsigned long nr_shown;
static unsigned long nr_unshown;
/*
* Allow a burst of 60 reports, then keep quiet for that minute;
* or allow a steady drip of one report per second.
*/
if (nr_shown == 60) {
if (time_before(jiffies, resume)) {
nr_unshown++;
return;
}
if (nr_unshown) {
printk(KERN_ALERT
"BUG: Bad page map: %lu messages suppressed\n",
nr_unshown);
nr_unshown = 0;
}
nr_shown = 0;
}
if (nr_shown++ == 0)
resume = jiffies + 60 * HZ;
mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL;
index = linear_page_index(vma, addr);
printk(KERN_ALERT
"BUG: Bad page map in process %s pte:%08llx pmd:%08llx\n",
current->comm,
(long long)pte_val(pte), (long long)pmd_val(*pmd));
if (page)
dump_page(page, "bad pte");
printk(KERN_ALERT
"addr:%p vm_flags:%08lx anon_vma:%p mapping:%p index:%lx\n",
(void *)addr, vma->vm_flags, vma->anon_vma, mapping, index);
/*
* Choose text because data symbols depend on CONFIG_KALLSYMS_ALL=y
*/
pr_alert("file:%pD fault:%pf mmap:%pf readpage:%pf\n",
vma->vm_file,
vma->vm_ops ? vma->vm_ops->fault : NULL,
vma->vm_file ? vma->vm_file->f_op->mmap : NULL,
mapping ? mapping->a_ops->readpage : NULL);
dump_stack();
add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
}
/*
* vm_normal_page -- This function gets the "struct page" associated with a pte.
*
* "Special" mappings do not wish to be associated with a "struct page" (either
* it doesn't exist, or it exists but they don't want to touch it). In this
* case, NULL is returned here. "Normal" mappings do have a struct page.
*
* There are 2 broad cases. Firstly, an architecture may define a pte_special()
* pte bit, in which case this function is trivial. Secondly, an architecture
* may not have a spare pte bit, which requires a more complicated scheme,
* described below.
*
* A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a
* special mapping (even if there are underlying and valid "struct pages").
* COWed pages of a VM_PFNMAP are always normal.
*
* The way we recognize COWed pages within VM_PFNMAP mappings is through the
* rules set up by "remap_pfn_range()": the vma will have the VM_PFNMAP bit
* set, and the vm_pgoff will point to the first PFN mapped: thus every special
* mapping will always honor the rule
*
* pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT)
*
* And for normal mappings this is false.
*
* This restricts such mappings to be a linear translation from virtual address
* to pfn. To get around this restriction, we allow arbitrary mappings so long
* as the vma is not a COW mapping; in that case, we know that all ptes are
* special (because none can have been COWed).
*
*
* In order to support COW of arbitrary special mappings, we have VM_MIXEDMAP.
*
* VM_MIXEDMAP mappings can likewise contain memory with or without "struct
* page" backing, however the difference is that _all_ pages with a struct
* page (that is, those where pfn_valid is true) are refcounted and considered
* normal pages by the VM. The disadvantage is that pages are refcounted
* (which can be slower and simply not an option for some PFNMAP users). The
* advantage is that we don't have to follow the strict linearity rule of
* PFNMAP mappings in order to support COWable mappings.
*
*/
#ifdef __HAVE_ARCH_PTE_SPECIAL
# define HAVE_PTE_SPECIAL 1
#else
# define HAVE_PTE_SPECIAL 0
#endif
struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
pte_t pte)
{
unsigned long pfn = pte_pfn(pte);
if (HAVE_PTE_SPECIAL) {
if (likely(!pte_special(pte)))
goto check_pfn;
if (vma->vm_ops && vma->vm_ops->find_special_page)
return vma->vm_ops->find_special_page(vma, addr);
if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
return NULL;
if (!is_zero_pfn(pfn))
print_bad_pte(vma, addr, pte, NULL);
return NULL;
}
/* !HAVE_PTE_SPECIAL case follows: */
if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
if (vma->vm_flags & VM_MIXEDMAP) {
if (!pfn_valid(pfn))
return NULL;
goto out;
} else {
unsigned long off;
off = (addr - vma->vm_start) >> PAGE_SHIFT;
if (pfn == vma->vm_pgoff + off)
return NULL;
if (!is_cow_mapping(vma->vm_flags))
return NULL;
}
}
if (is_zero_pfn(pfn))
return NULL;
check_pfn:
if (unlikely(pfn > highest_memmap_pfn)) {
print_bad_pte(vma, addr, pte, NULL);
return NULL;
}
/*
* NOTE! We still have PageReserved() pages in the page tables.
* eg. VDSO mappings can cause them to exist.
*/
out:
return pfn_to_page(pfn);
}
/*
* copy one vm_area from one task to the other. Assumes the page tables
* already present in the new task to be cleared in the whole range
* covered by this vma.
*/
static inline unsigned long
copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma,
unsigned long addr, int *rss)
{
unsigned long vm_flags = vma->vm_flags;
pte_t pte = *src_pte;
struct page *page;
/* pte contains position in swap or file, so copy. */
if (unlikely(!pte_present(pte))) {
swp_entry_t entry = pte_to_swp_entry(pte);
if (likely(!non_swap_entry(entry))) {
if (swap_duplicate(entry) < 0)
return entry.val;
/* make sure dst_mm is on swapoff's mmlist. */
if (unlikely(list_empty(&dst_mm->mmlist))) {
spin_lock(&mmlist_lock);
if (list_empty(&dst_mm->mmlist))
list_add(&dst_mm->mmlist,
&src_mm->mmlist);
spin_unlock(&mmlist_lock);
}
rss[MM_SWAPENTS]++;
} else if (is_migration_entry(entry)) {
page = migration_entry_to_page(entry);
if (PageAnon(page))
rss[MM_ANONPAGES]++;
else
rss[MM_FILEPAGES]++;
if (is_write_migration_entry(entry) &&
is_cow_mapping(vm_flags)) {
/*
* COW mappings require pages in both
* parent and child to be set to read.
*/
make_migration_entry_read(&entry);
pte = swp_entry_to_pte(entry);
if (pte_swp_soft_dirty(*src_pte))
pte = pte_swp_mksoft_dirty(pte);
set_pte_at(src_mm, addr, src_pte, pte);
}
}
goto out_set_pte;
}
/*
* If it's a COW mapping, write protect it both
* in the parent and the child
*/
if (is_cow_mapping(vm_flags)) {
ptep_set_wrprotect(src_mm, addr, src_pte);
pte = pte_wrprotect(pte);
}
/*
* If it's a shared mapping, mark it clean in
* the child
*/
if (vm_flags & VM_SHARED)
pte = pte_mkclean(pte);
pte = pte_mkold(pte);
page = vm_normal_page(vma, addr, pte);
if (page) {
get_page(page);
page_dup_rmap(page);
if (PageAnon(page))
rss[MM_ANONPAGES]++;
else
rss[MM_FILEPAGES]++;
}
out_set_pte:
set_pte_at(dst_mm, addr, dst_pte, pte);
return 0;
}
static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma,
unsigned long addr, unsigned long end)
{
pte_t *orig_src_pte, *orig_dst_pte;
pte_t *src_pte, *dst_pte;
spinlock_t *src_ptl, *dst_ptl;
int progress = 0;
int rss[NR_MM_COUNTERS];
swp_entry_t entry = (swp_entry_t){0};
again:
init_rss_vec(rss);
dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
if (!dst_pte)
return -ENOMEM;
src_pte = pte_offset_map(src_pmd, addr);
src_ptl = pte_lockptr(src_mm, src_pmd);
spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
orig_src_pte = src_pte;
orig_dst_pte = dst_pte;
arch_enter_lazy_mmu_mode();
do {
/*
* We are holding two locks at this point - either of them
* could generate latencies in another task on another CPU.
*/
if (progress >= 32) {
progress = 0;
if (need_resched() ||
spin_needbreak(src_ptl) || spin_needbreak(dst_ptl))
break;
}
if (pte_none(*src_pte)) {
progress++;
continue;
}
entry.val = copy_one_pte(dst_mm, src_mm, dst_pte, src_pte,
vma, addr, rss);
if (entry.val)
break;
progress += 8;
} while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
arch_leave_lazy_mmu_mode();
spin_unlock(src_ptl);
pte_unmap(orig_src_pte);
add_mm_rss_vec(dst_mm, rss);
pte_unmap_unlock(orig_dst_pte, dst_ptl);
cond_resched();
if (entry.val) {
if (add_swap_count_continuation(entry, GFP_KERNEL) < 0)
return -ENOMEM;
progress = 0;
}
if (addr != end)
goto again;
return 0;
}
static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pud_t *dst_pud, pud_t *src_pud, struct vm_area_struct *vma,
unsigned long addr, unsigned long end)
{
pmd_t *src_pmd, *dst_pmd;
unsigned long next;
dst_pmd = pmd_alloc(dst_mm, dst_pud, addr);
if (!dst_pmd)
return -ENOMEM;
src_pmd = pmd_offset(src_pud, addr);
do {
next = pmd_addr_end(addr, end);
if (pmd_trans_huge(*src_pmd)) {
int err;
VM_BUG_ON(next-addr != HPAGE_PMD_SIZE);
err = copy_huge_pmd(dst_mm, src_mm,
dst_pmd, src_pmd, addr, vma);
if (err == -ENOMEM)
return -ENOMEM;
if (!err)
continue;
/* fall through */
}
if (pmd_none_or_clear_bad(src_pmd))
continue;
if (copy_pte_range(dst_mm, src_mm, dst_pmd, src_pmd,
vma, addr, next))
return -ENOMEM;
} while (dst_pmd++, src_pmd++, addr = next, addr != end);
return 0;
}
static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma,
unsigned long addr, unsigned long end)
{
pud_t *src_pud, *dst_pud;
unsigned long next;
dst_pud = pud_alloc(dst_mm, dst_pgd, addr);
if (!dst_pud)
return -ENOMEM;
src_pud = pud_offset(src_pgd, addr);
do {
next = pud_addr_end(addr, end);
if (pud_none_or_clear_bad(src_pud))
continue;
if (copy_pmd_range(dst_mm, src_mm, dst_pud, src_pud,
vma, addr, next))
return -ENOMEM;
} while (dst_pud++, src_pud++, addr = next, addr != end);
return 0;
}
int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
struct vm_area_struct *vma)
{
pgd_t *src_pgd, *dst_pgd;
unsigned long next;
unsigned long addr = vma->vm_start;
unsigned long end = vma->vm_end;
unsigned long mmun_start; /* For mmu_notifiers */
unsigned long mmun_end; /* For mmu_notifiers */
bool is_cow;
int ret;
/*
* Don't copy ptes where a page fault will fill them correctly.
* Fork becomes much lighter when there are big shared or private
* readonly mappings. The tradeoff is that copy_page_range is more
* efficient than faulting.
*/
if (!(vma->vm_flags & (VM_HUGETLB | VM_PFNMAP | VM_MIXEDMAP)) &&
!vma->anon_vma)
return 0;
if (is_vm_hugetlb_page(vma))
return copy_hugetlb_page_range(dst_mm, src_mm, vma);
if (unlikely(vma->vm_flags & VM_PFNMAP)) {
/*
* We do not free on error cases below as remove_vma
* gets called on error from higher level routine
*/
ret = track_pfn_copy(vma);
if (ret)
return ret;
}
/*
* We need to invalidate the secondary MMU mappings only when
* there could be a permission downgrade on the ptes of the
* parent mm. And a permission downgrade will only happen if
* is_cow_mapping() returns true.
*/
is_cow = is_cow_mapping(vma->vm_flags);
mmun_start = addr;
mmun_end = end;
if (is_cow)
mmu_notifier_invalidate_range_start(src_mm, mmun_start,
mmun_end);
ret = 0;
dst_pgd = pgd_offset(dst_mm, addr);
src_pgd = pgd_offset(src_mm, addr);
do {
next = pgd_addr_end(addr, end);
if (pgd_none_or_clear_bad(src_pgd))
continue;
if (unlikely(copy_pud_range(dst_mm, src_mm, dst_pgd, src_pgd,
vma, addr, next))) {
ret = -ENOMEM;
break;
}
} while (dst_pgd++, src_pgd++, addr = next, addr != end);
if (is_cow)
mmu_notifier_invalidate_range_end(src_mm, mmun_start, mmun_end);
return ret;
}
static unsigned long zap_pte_range(struct mmu_gather *tlb,
struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, unsigned long end,
struct zap_details *details)
{
struct mm_struct *mm = tlb->mm;
int force_flush = 0;
int rss[NR_MM_COUNTERS];
spinlock_t *ptl;
pte_t *start_pte;
pte_t *pte;
swp_entry_t entry;
again:
init_rss_vec(rss);
start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
pte = start_pte;
arch_enter_lazy_mmu_mode();
do {
pte_t ptent = *pte;
if (pte_none(ptent)) {
continue;
}
if (pte_present(ptent)) {
struct page *page;
page = vm_normal_page(vma, addr, ptent);
if (unlikely(details) && page) {
/*
* unmap_shared_mapping_pages() wants to
* invalidate cache without truncating:
* unmap shared but keep private pages.
*/
if (details->check_mapping &&
details->check_mapping != page->mapping)
continue;
}
ptent = ptep_get_and_clear_full(mm, addr, pte,
tlb->fullmm);
tlb_remove_tlb_entry(tlb, pte, addr);
if (unlikely(!page))
continue;
if (PageAnon(page))
rss[MM_ANONPAGES]--;
else {
if (pte_dirty(ptent)) {
force_flush = 1;
set_page_dirty(page);
}
if (pte_young(ptent) &&
likely(!(vma->vm_flags & VM_SEQ_READ)))
mark_page_accessed(page);
rss[MM_FILEPAGES]--;
}
page_remove_rmap(page);
if (unlikely(page_mapcount(page) < 0))
print_bad_pte(vma, addr, ptent, page);
if (unlikely(!__tlb_remove_page(tlb, page))) {
force_flush = 1;
addr += PAGE_SIZE;
break;
}
continue;
}
/* If details->check_mapping, we leave swap entries. */
if (unlikely(details))
continue;
entry = pte_to_swp_entry(ptent);
if (!non_swap_entry(entry))
rss[MM_SWAPENTS]--;
else if (is_migration_entry(entry)) {
struct page *page;
page = migration_entry_to_page(entry);
if (PageAnon(page))
rss[MM_ANONPAGES]--;
else
rss[MM_FILEPAGES]--;
}
if (unlikely(!free_swap_and_cache(entry)))
print_bad_pte(vma, addr, ptent, NULL);
pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
} while (pte++, addr += PAGE_SIZE, addr != end);
add_mm_rss_vec(mm, rss);
arch_leave_lazy_mmu_mode();
/* Do the actual TLB flush before dropping ptl */
if (force_flush)
tlb_flush_mmu_tlbonly(tlb);
pte_unmap_unlock(start_pte, ptl);
/*
* If we forced a TLB flush (either due to running out of
* batch buffers or because we needed to flush dirty TLB
* entries before releasing the ptl), free the batched
* memory too. Restart if we didn't do everything.
*/
if (force_flush) {
force_flush = 0;
tlb_flush_mmu_free(tlb);
if (addr != end)
goto again;
}
return addr;
}
static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
struct vm_area_struct *vma, pud_t *pud,
unsigned long addr, unsigned long end,
struct zap_details *details)
{
pmd_t *pmd;
unsigned long next;
pmd = pmd_offset(pud, addr);
do {
next = pmd_addr_end(addr, end);
if (pmd_trans_huge(*pmd)) {
if (next - addr != HPAGE_PMD_SIZE) {
#ifdef CONFIG_DEBUG_VM
if (!rwsem_is_locked(&tlb->mm->mmap_sem)) {
pr_err("%s: mmap_sem is unlocked! addr=0x%lx end=0x%lx vma->vm_start=0x%lx vma->vm_end=0x%lx\n",
__func__, addr, end,
vma->vm_start,
vma->vm_end);
BUG();
}
#endif
split_huge_page_pmd(vma, addr, pmd);
} else if (zap_huge_pmd(tlb, vma, pmd, addr))
goto next;
/* fall through */
}
/*
* Here there can be other concurrent MADV_DONTNEED or
* trans huge page faults running, and if the pmd is
* none or trans huge it can change under us. This is
* because MADV_DONTNEED holds the mmap_sem in read
* mode.
*/
if (pmd_none_or_trans_huge_or_clear_bad(pmd))
goto next;
next = zap_pte_range(tlb, vma, pmd, addr, next, details);
next:
cond_resched();
} while (pmd++, addr = next, addr != end);
return addr;
}
static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
struct vm_area_struct *vma, pgd_t *pgd,
unsigned long addr, unsigned long end,
struct zap_details *details)
{
pud_t *pud;
unsigned long next;
pud = pud_offset(pgd, addr);
do {
next = pud_addr_end(addr, end);
if (pud_none_or_clear_bad(pud))
continue;
next = zap_pmd_range(tlb, vma, pud, addr, next, details);
} while (pud++, addr = next, addr != end);
return addr;
}
static void unmap_page_range(struct mmu_gather *tlb,
struct vm_area_struct *vma,
unsigned long addr, unsigned long end,
struct zap_details *details)
{
pgd_t *pgd;
unsigned long next;
if (details && !details->check_mapping)
details = NULL;
BUG_ON(addr >= end);
tlb_start_vma(tlb, vma);
pgd = pgd_offset(vma->vm_mm, addr);
do {
next = pgd_addr_end(addr, end);
if (pgd_none_or_clear_bad(pgd))
continue;
next = zap_pud_range(tlb, vma, pgd, addr, next, details);
} while (pgd++, addr = next, addr != end);
tlb_end_vma(tlb, vma);
}
static void unmap_single_vma(struct mmu_gather *tlb,
struct vm_area_struct *vma, unsigned long start_addr,
unsigned long end_addr,
struct zap_details *details)
{
unsigned long start = max(vma->vm_start, start_addr);
unsigned long end;
if (start >= vma->vm_end)
return;
end = min(vma->vm_end, end_addr);
if (end <= vma->vm_start)
return;
if (vma->vm_file)
uprobe_munmap(vma, start, end);
if (unlikely(vma->vm_flags & VM_PFNMAP))
untrack_pfn(vma, 0, 0);
if (start != end) {
if (unlikely(is_vm_hugetlb_page(vma))) {
/*
* It is undesirable to test vma->vm_file as it
* should be non-null for valid hugetlb area.
* However, vm_file will be NULL in the error
* cleanup path of mmap_region. When
* hugetlbfs ->mmap method fails,
* mmap_region() nullifies vma->vm_file
* before calling this function to clean up.
* Since no pte has actually been setup, it is
* safe to do nothing in this case.
*/
if (vma->vm_file) {
i_mmap_lock_write(vma->vm_file->f_mapping);
__unmap_hugepage_range_final(tlb, vma, start, end, NULL);
i_mmap_unlock_write(vma->vm_file->f_mapping);
}
} else
unmap_page_range(tlb, vma, start, end, details);
}
}
/**
* unmap_vmas - unmap a range of memory covered by a list of vma's
* @tlb: address of the caller's struct mmu_gather
* @vma: the starting vma
* @start_addr: virtual address at which to start unmapping
* @end_addr: virtual address at which to end unmapping
*
* Unmap all pages in the vma list.
*
* Only addresses between `start' and `end' will be unmapped.
*
* The VMA list must be sorted in ascending virtual address order.
*
* unmap_vmas() assumes that the caller will flush the whole unmapped address
* range after unmap_vmas() returns. So the only responsibility here is to
* ensure that any thus-far unmapped pages are flushed before unmap_vmas()
* drops the lock and schedules.
*/
void unmap_vmas(struct mmu_gather *tlb,
struct vm_area_struct *vma, unsigned long start_addr,
unsigned long end_addr)
{
struct mm_struct *mm = vma->vm_mm;
mmu_notifier_invalidate_range_start(mm, start_addr, end_addr);
for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next)
unmap_single_vma(tlb, vma, start_addr, end_addr, NULL);
mmu_notifier_invalidate_range_end(mm, start_addr, end_addr);
}
/**
* zap_page_range - remove user pages in a given range
* @vma: vm_area_struct holding the applicable pages
* @start: starting address of pages to zap
* @size: number of bytes to zap
* @details: details of shared cache invalidation
*
* Caller must protect the VMA list
*/
void zap_page_range(struct vm_area_struct *vma, unsigned long start,
unsigned long size, struct zap_details *details)
{
struct mm_struct *mm = vma->vm_mm;
struct mmu_gather tlb;
unsigned long end = start + size;
lru_add_drain();
tlb_gather_mmu(&tlb, mm, start, end);
update_hiwater_rss(mm);
mmu_notifier_invalidate_range_start(mm, start, end);
for ( ; vma && vma->vm_start < end; vma = vma->vm_next)
unmap_single_vma(&tlb, vma, start, end, details);
mmu_notifier_invalidate_range_end(mm, start, end);
tlb_finish_mmu(&tlb, start, end);
}
/**
* zap_page_range_single - remove user pages in a given range
* @vma: vm_area_struct holding the applicable pages
* @address: starting address of pages to zap
* @size: number of bytes to zap
* @details: details of shared cache invalidation
*
* The range must fit into one VMA.
*/
static void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
unsigned long size, struct zap_details *details)
{
struct mm_struct *mm = vma->vm_mm;
struct mmu_gather tlb;
unsigned long end = address + size;
lru_add_drain();
tlb_gather_mmu(&tlb, mm, address, end);
update_hiwater_rss(mm);
mmu_notifier_invalidate_range_start(mm, address, end);
unmap_single_vma(&tlb, vma, address, end, details);
mmu_notifier_invalidate_range_end(mm, address, end);
tlb_finish_mmu(&tlb, address, end);
}
/**
* zap_vma_ptes - remove ptes mapping the vma
* @vma: vm_area_struct holding ptes to be zapped
* @address: starting address of pages to zap
* @size: number of bytes to zap
*
* This function only unmaps ptes assigned to VM_PFNMAP vmas.
*
* The entire address range must be fully contained within the vma.
*
* Returns 0 if successful.
*/
int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
unsigned long size)
{
if (address < vma->vm_start || address + size > vma->vm_end ||
!(vma->vm_flags & VM_PFNMAP))
return -1;
zap_page_range_single(vma, address, size, NULL);
return 0;
}
EXPORT_SYMBOL_GPL(zap_vma_ptes);
pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
spinlock_t **ptl)
{
pgd_t * pgd = pgd_offset(mm, addr);
pud_t * pud = pud_alloc(mm, pgd, addr);
if (pud) {
pmd_t * pmd = pmd_alloc(mm, pud, addr);
if (pmd) {
VM_BUG_ON(pmd_trans_huge(*pmd));
return pte_alloc_map_lock(mm, pmd, addr, ptl);
}
}
return NULL;
}
/*
* This is the old fallback for page remapping.
*
* For historical reasons, it only allows reserved pages. Only
* old drivers should use this, and they needed to mark their
* pages reserved for the old functions anyway.
*/
static int insert_page(struct vm_area_struct *vma, unsigned long addr,
struct page *page, pgprot_t prot)
{
struct mm_struct *mm = vma->vm_mm;
int retval;
pte_t *pte;
spinlock_t *ptl;
retval = -EINVAL;
if (PageAnon(page))
goto out;
retval = -ENOMEM;
flush_dcache_page(page);
pte = get_locked_pte(mm, addr, &ptl);
if (!pte)
goto out;
retval = -EBUSY;
if (!pte_none(*pte))
goto out_unlock;
/* Ok, finally just insert the thing.. */
get_page(page);
inc_mm_counter_fast(mm, MM_FILEPAGES);
page_add_file_rmap(page);
set_pte_at(mm, addr, pte, mk_pte(page, prot));
retval = 0;
pte_unmap_unlock(pte, ptl);
return retval;
out_unlock:
pte_unmap_unlock(pte, ptl);
out:
return retval;
}
/**
* vm_insert_page - insert single page into user vma
* @vma: user vma to map to
* @addr: target user address of this page
* @page: source kernel page
*
* This allows drivers to insert individual pages they've allocated
* into a user vma.
*
* The page has to be a nice clean _individual_ kernel allocation.
* If you allocate a compound page, you need to have marked it as
* such (__GFP_COMP), or manually just split the page up yourself
* (see split_page()).
*
* NOTE! Traditionally this was done with "remap_pfn_range()" which
* took an arbitrary page protection parameter. This doesn't allow
* that. Your vma protection will have to be set up correctly, which
* means that if you want a shared writable mapping, you'd better
* ask for a shared writable mapping!
*
* The page does not need to be reserved.
*
* Usually this function is called from f_op->mmap() handler
* under mm->mmap_sem write-lock, so it can change vma->vm_flags.
* Caller must set VM_MIXEDMAP on vma if it wants to call this
* function from other places, for example from page-fault handler.
*/
int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
struct page *page)
{
if (addr < vma->vm_start || addr >= vma->vm_end)
return -EFAULT;
if (!page_count(page))
return -EINVAL;
if (!(vma->vm_flags & VM_MIXEDMAP)) {
BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem));
BUG_ON(vma->vm_flags & VM_PFNMAP);
vma->vm_flags |= VM_MIXEDMAP;
}
return insert_page(vma, addr, page, vma->vm_page_prot);
}
EXPORT_SYMBOL(vm_insert_page);
static int insert_pfn(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn, pgprot_t prot)
{
struct mm_struct *mm = vma->vm_mm;
int retval;
pte_t *pte, entry;
spinlock_t *ptl;
retval = -ENOMEM;
pte = get_locked_pte(mm, addr, &ptl);
if (!pte)
goto out;
retval = -EBUSY;
if (!pte_none(*pte))
goto out_unlock;
/* Ok, finally just insert the thing.. */
entry = pte_mkspecial(pfn_pte(pfn, prot));
set_pte_at(mm, addr, pte, entry);
update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */
retval = 0;
out_unlock:
pte_unmap_unlock(pte, ptl);
out:
return retval;
}
/**
* vm_insert_pfn - insert single pfn into user vma
* @vma: user vma to map to
* @addr: target user address of this page
* @pfn: source kernel pfn
*
* Similar to vm_insert_page, this allows drivers to insert individual pages
* they've allocated into a user vma. Same comments apply.
*
* This function should only be called from a vm_ops->fault handler, and
* in that case the handler should return NULL.
*
* vma cannot be a COW mapping.
*
* As this is called only for pages that do not currently exist, we
* do not need to flush old virtual caches or the TLB.
*/
int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn)
{
int ret;
pgprot_t pgprot = vma->vm_page_prot;
/*
* Technically, architectures with pte_special can avoid all these
* restrictions (same for remap_pfn_range). However we would like
* consistency in testing and feature parity among all, so we should
* try to keep these invariants in place for everybody.
*/
BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
(VM_PFNMAP|VM_MIXEDMAP));
BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn));
if (addr < vma->vm_start || addr >= vma->vm_end)
return -EFAULT;
if (track_pfn_insert(vma, &pgprot, pfn))
return -EINVAL;
ret = insert_pfn(vma, addr, pfn, pgprot);
return ret;
}
EXPORT_SYMBOL(vm_insert_pfn);
int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn)
{
BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
if (addr < vma->vm_start || addr >= vma->vm_end)
return -EFAULT;
/*
* If we don't have pte special, then we have to use the pfn_valid()
* based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must*
* refcount the page if pfn_valid is true (hence insert_page rather
* than insert_pfn). If a zero_pfn were inserted into a VM_MIXEDMAP
* without pte special, it would there be refcounted as a normal page.
*/
if (!HAVE_PTE_SPECIAL && pfn_valid(pfn)) {
struct page *page;
page = pfn_to_page(pfn);
return insert_page(vma, addr, page, vma->vm_page_prot);
}
return insert_pfn(vma, addr, pfn, vma->vm_page_prot);
}
EXPORT_SYMBOL(vm_insert_mixed);
/*
* maps a range of physical memory into the requested pages. the old
* mappings are removed. any references to nonexistent pages results
* in null mappings (currently treated as "copy-on-access")
*/
static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
unsigned long addr, unsigned long end,
unsigned long pfn, pgprot_t prot)
{
pte_t *pte;
spinlock_t *ptl;
pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
if (!pte)
return -ENOMEM;
arch_enter_lazy_mmu_mode();
do {
BUG_ON(!pte_none(*pte));
set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot)));
pfn++;
} while (pte++, addr += PAGE_SIZE, addr != end);
arch_leave_lazy_mmu_mode();
pte_unmap_unlock(pte - 1, ptl);
return 0;
}
static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
unsigned long addr, unsigned long end,
unsigned long pfn, pgprot_t prot)
{
pmd_t *pmd;
unsigned long next;
pfn -= addr >> PAGE_SHIFT;
pmd = pmd_alloc(mm, pud, addr);
if (!pmd)
return -ENOMEM;
VM_BUG_ON(pmd_trans_huge(*pmd));
do {
next = pmd_addr_end(addr, end);
if (remap_pte_range(mm, pmd, addr, next,
pfn + (addr >> PAGE_SHIFT), prot))
return -ENOMEM;
} while (pmd++, addr = next, addr != end);
return 0;
}
static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd,
unsigned long addr, unsigned long end,
unsigned long pfn, pgprot_t prot)
{
pud_t *pud;
unsigned long next;
pfn -= addr >> PAGE_SHIFT;
pud = pud_alloc(mm, pgd, addr);
if (!pud)
return -ENOMEM;
do {
next = pud_addr_end(addr, end);
if (remap_pmd_range(mm, pud, addr, next,
pfn + (addr >> PAGE_SHIFT), prot))
return -ENOMEM;
} while (pud++, addr = next, addr != end);
return 0;
}
/**
* remap_pfn_range - remap kernel memory to userspace
* @vma: user vma to map to
* @addr: target user address to start at
* @pfn: physical address of kernel memory
* @size: size of map area
* @prot: page protection flags for this mapping
*
* Note: this is only safe if the mm semaphore is held when called.
*/
int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn, unsigned long size, pgprot_t prot)
{
pgd_t *pgd;
unsigned long next;
unsigned long end = addr + PAGE_ALIGN(size);
struct mm_struct *mm = vma->vm_mm;
int err;
/*
* Physically remapped pages are special. Tell the
* rest of the world about it:
* VM_IO tells people not to look at these pages
* (accesses can have side effects).
* VM_PFNMAP tells the core MM that the base pages are just
* raw PFN mappings, and do not have a "struct page" associated
* with them.
* VM_DONTEXPAND
* Disable vma merging and expanding with mremap().
* VM_DONTDUMP
* Omit vma from core dump, even when VM_IO turned off.
*
* There's a horrible special case to handle copy-on-write
* behaviour that some programs depend on. We mark the "original"
* un-COW'ed pages by matching them up with "vma->vm_pgoff".
* See vm_normal_page() for details.
*/
if (is_cow_mapping(vma->vm_flags)) {
if (addr != vma->vm_start || end != vma->vm_end)
return -EINVAL;
vma->vm_pgoff = pfn;
}
err = track_pfn_remap(vma, &prot, pfn, addr, PAGE_ALIGN(size));
if (err)
return -EINVAL;
vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
BUG_ON(addr >= end);
pfn -= addr >> PAGE_SHIFT;
pgd = pgd_offset(mm, addr);
flush_cache_range(vma, addr, end);
do {
next = pgd_addr_end(addr, end);
err = remap_pud_range(mm, pgd, addr, next,
pfn + (addr >> PAGE_SHIFT), prot);
if (err)
break;
} while (pgd++, addr = next, addr != end);
if (err)
untrack_pfn(vma, pfn, PAGE_ALIGN(size));
return err;
}
EXPORT_SYMBOL(remap_pfn_range);
/**
* vm_iomap_memory - remap memory to userspace
* @vma: user vma to map to
* @start: start of area
* @len: size of area
*
* This is a simplified io_remap_pfn_range() for common driver use. The
* driver just needs to give us the physical memory range to be mapped,
* we'll figure out the rest from the vma information.
*
* NOTE! Some drivers might want to tweak vma->vm_page_prot first to get
* whatever write-combining details or similar.
*/
int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
{
unsigned long vm_len, pfn, pages;
/* Check that the physical memory area passed in looks valid */
if (start + len < start)
return -EINVAL;
/*
* You *really* shouldn't map things that aren't page-aligned,
* but we've historically allowed it because IO memory might
* just have smaller alignment.
*/
len += start & ~PAGE_MASK;
pfn = start >> PAGE_SHIFT;
pages = (len + ~PAGE_MASK) >> PAGE_SHIFT;
if (pfn + pages < pfn)
return -EINVAL;
/* We start the mapping 'vm_pgoff' pages into the area */
if (vma->vm_pgoff > pages)
return -EINVAL;
pfn += vma->vm_pgoff;
pages -= vma->vm_pgoff;
/* Can we fit all of the mapping? */
vm_len = vma->vm_end - vma->vm_start;
if (vm_len >> PAGE_SHIFT > pages)
return -EINVAL;
/* Ok, let it rip */
return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
}
EXPORT_SYMBOL(vm_iomap_memory);
static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
unsigned long addr, unsigned long end,
pte_fn_t fn, void *data)
{
pte_t *pte;
int err;
pgtable_t token;
spinlock_t *uninitialized_var(ptl);
pte = (mm == &init_mm) ?
pte_alloc_kernel(pmd, addr) :
pte_alloc_map_lock(mm, pmd, addr, &ptl);
if (!pte)
return -ENOMEM;
BUG_ON(pmd_huge(*pmd));
arch_enter_lazy_mmu_mode();
token = pmd_pgtable(*pmd);
do {
err = fn(pte++, token, addr, data);
if (err)
break;
} while (addr += PAGE_SIZE, addr != end);
arch_leave_lazy_mmu_mode();
if (mm != &init_mm)
pte_unmap_unlock(pte-1, ptl);
return err;
}
static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
unsigned long addr, unsigned long end,
pte_fn_t fn, void *data)
{
pmd_t *pmd;
unsigned long next;
int err;
BUG_ON(pud_huge(*pud));
pmd = pmd_alloc(mm, pud, addr);
if (!pmd)
return -ENOMEM;
do {
next = pmd_addr_end(addr, end);
err = apply_to_pte_range(mm, pmd, addr, next, fn, data);
if (err)
break;
} while (pmd++, addr = next, addr != end);
return err;
}
static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
unsigned long addr, unsigned long end,
pte_fn_t fn, void *data)
{
pud_t *pud;
unsigned long next;
int err;
pud = pud_alloc(mm, pgd, addr);
if (!pud)
return -ENOMEM;
do {
next = pud_addr_end(addr, end);
err = apply_to_pmd_range(mm, pud, addr, next, fn, data);
if (err)
break;
} while (pud++, addr = next, addr != end);
return err;
}
/*
* Scan a region of virtual memory, filling in page tables as necessary
* and calling a provided function on each leaf page table.
*/
int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
unsigned long size, pte_fn_t fn, void *data)
{
pgd_t *pgd;
unsigned long next;
unsigned long end = addr + size;
int err;
BUG_ON(addr >= end);
pgd = pgd_offset(mm, addr);
do {
next = pgd_addr_end(addr, end);
err = apply_to_pud_range(mm, pgd, addr, next, fn, data);
if (err)
break;
} while (pgd++, addr = next, addr != end);
return err;
}
EXPORT_SYMBOL_GPL(apply_to_page_range);
/*
* handle_pte_fault chooses page fault handler according to an entry which was
* read non-atomically. Before making any commitment, on those architectures
* or configurations (e.g. i386 with PAE) which might give a mix of unmatched
* parts, do_swap_page must check under lock before unmapping the pte and
* proceeding (but do_wp_page is only called after already making such a check;
* and do_anonymous_page can safely check later on).
*/
static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd,
pte_t *page_table, pte_t orig_pte)
{
int same = 1;
#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
if (sizeof(pte_t) > sizeof(unsigned long)) {
spinlock_t *ptl = pte_lockptr(mm, pmd);
spin_lock(ptl);
same = pte_same(*page_table, orig_pte);
spin_unlock(ptl);
}
#endif
pte_unmap(page_table);
return same;
}
static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma)
{
debug_dma_assert_idle(src);
/*
* If the source page was a PFN mapping, we don't have
* a "struct page" for it. We do a best-effort copy by
* just copying from the original user address. If that
* fails, we just zero-fill it. Live with it.
*/
if (unlikely(!src)) {
void *kaddr = kmap_atomic(dst);
void __user *uaddr = (void __user *)(va & PAGE_MASK);
/*
* This really shouldn't fail, because the page is there
* in the page tables. But it might just be unreadable,
* in which case we just give up and fill the result with
* zeroes.
*/
if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE))
clear_page(kaddr);
kunmap_atomic(kaddr);
flush_dcache_page(dst);
} else
copy_user_highpage(dst, src, va, vma);
}
/*
* Notify the address space that the page is about to become writable so that
* it can prohibit this or wait for the page to get into an appropriate state.
*
* We do this without the lock held, so that it can sleep if it needs to.
*/
static int do_page_mkwrite(struct vm_area_struct *vma, struct page *page,
unsigned long address)
{
struct vm_fault vmf;
int ret;
vmf.virtual_address = (void __user *)(address & PAGE_MASK);
vmf.pgoff = page->index;
vmf.flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
vmf.page = page;
vmf.cow_page = NULL;
ret = vma->vm_ops->page_mkwrite(vma, &vmf);
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))
return ret;
if (unlikely(!(ret & VM_FAULT_LOCKED))) {
lock_page(page);
if (!page->mapping) {
unlock_page(page);
return 0; /* retry */
}
ret |= VM_FAULT_LOCKED;
} else
VM_BUG_ON_PAGE(!PageLocked(page), page);
return ret;
}
/*
* Handle write page faults for pages that can be reused in the current vma
*
* This can happen either due to the mapping being with the VM_SHARED flag,
* or due to us being the last reference standing to the page. In either
* case, all we need to do here is to mark the page as writable and update
* any related book-keeping.
*/
static inline int wp_page_reuse(struct mm_struct *mm,
struct vm_area_struct *vma, unsigned long address,
pte_t *page_table, spinlock_t *ptl, pte_t orig_pte,
struct page *page, int page_mkwrite,
int dirty_shared)
__releases(ptl)
{
pte_t entry;
/*
* Clear the pages cpupid information as the existing
* information potentially belongs to a now completely
* unrelated process.
*/
if (page)
page_cpupid_xchg_last(page, (1 << LAST_CPUPID_SHIFT) - 1);
flush_cache_page(vma, address, pte_pfn(orig_pte));
entry = pte_mkyoung(orig_pte);
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
if (ptep_set_access_flags(vma, address, page_table, entry, 1))
update_mmu_cache(vma, address, page_table);
pte_unmap_unlock(page_table, ptl);
if (dirty_shared) {
struct address_space *mapping;
int dirtied;
if (!page_mkwrite)
lock_page(page);
dirtied = set_page_dirty(page);
VM_BUG_ON_PAGE(PageAnon(page), page);
mapping = page->mapping;
unlock_page(page);
page_cache_release(page);
if ((dirtied || page_mkwrite) && mapping) {
/*
* Some device drivers do not set page.mapping
* but still dirty their pages
*/
balance_dirty_pages_ratelimited(mapping);
}
if (!page_mkwrite)
file_update_time(vma->vm_file);
}
return VM_FAULT_WRITE;
}
/*
* Handle the case of a page which we actually need to copy to a new page.
*
* Called with mmap_sem locked and the old page referenced, but
* without the ptl held.
*
* High level logic flow:
*
* - Allocate a page, copy the content of the old page to the new one.
* - Handle book keeping and accounting - cgroups, mmu-notifiers, etc.
* - Take the PTL. If the pte changed, bail out and release the allocated page
* - If the pte is still the way we remember it, update the page table and all
* relevant references. This includes dropping the reference the page-table
* held to the old page, as well as updating the rmap.
* - In any case, unlock the PTL and drop the reference we took to the old page.
*/
static int wp_page_copy(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pte_t *page_table, pmd_t *pmd,
pte_t orig_pte, struct page *old_page)
{
struct page *new_page = NULL;
spinlock_t *ptl = NULL;
pte_t entry;
int page_copied = 0;
const unsigned long mmun_start = address & PAGE_MASK; /* For mmu_notifiers */
const unsigned long mmun_end = mmun_start + PAGE_SIZE; /* For mmu_notifiers */
struct mem_cgroup *memcg;
if (unlikely(anon_vma_prepare(vma)))
goto oom;
if (is_zero_pfn(pte_pfn(orig_pte))) {
new_page = alloc_zeroed_user_highpage_movable(vma, address);
if (!new_page)
goto oom;
} else {
new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
if (!new_page)
goto oom;
cow_user_page(new_page, old_page, address, vma);
}
if (mem_cgroup_try_charge(new_page, mm, GFP_KERNEL, &memcg))
goto oom_free_new;
__SetPageUptodate(new_page);
mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
/*
* Re-check the pte - we dropped the lock
*/
page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
if (likely(pte_same(*page_table, orig_pte))) {
if (old_page) {
if (!PageAnon(old_page)) {
dec_mm_counter_fast(mm, MM_FILEPAGES);
inc_mm_counter_fast(mm, MM_ANONPAGES);
}
} else {
inc_mm_counter_fast(mm, MM_ANONPAGES);
}
flush_cache_page(vma, address, pte_pfn(orig_pte));
entry = mk_pte(new_page, vma->vm_page_prot);
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
/*
* Clear the pte entry and flush it first, before updating the
* pte with the new entry. This will avoid a race condition
* seen in the presence of one thread doing SMC and another
* thread doing COW.
*/
ptep_clear_flush_notify(vma, address, page_table);
page_add_new_anon_rmap(new_page, vma, address);
mem_cgroup_commit_charge(new_page, memcg, false);
lru_cache_add_active_or_unevictable(new_page, vma);
/*
* We call the notify macro here because, when using secondary
* mmu page tables (such as kvm shadow page tables), we want the
* new page to be mapped directly into the secondary page table.
*/
set_pte_at_notify(mm, address, page_table, entry);
update_mmu_cache(vma, address, page_table);
if (old_page) {
/*
* Only after switching the pte to the new page may
* we remove the mapcount here. Otherwise another
* process may come and find the rmap count decremented
* before the pte is switched to the new page, and
* "reuse" the old page writing into it while our pte
* here still points into it and can be read by other
* threads.
*
* The critical issue is to order this
* page_remove_rmap with the ptp_clear_flush above.
* Those stores are ordered by (if nothing else,)
* the barrier present in the atomic_add_negative
* in page_remove_rmap.
*
* Then the TLB flush in ptep_clear_flush ensures that
* no process can access the old page before the
* decremented mapcount is visible. And the old page
* cannot be reused until after the decremented
* mapcount is visible. So transitively, TLBs to
* old page will be flushed before it can be reused.
*/
page_remove_rmap(old_page);
}
/* Free the old page.. */
new_page = old_page;
page_copied = 1;
} else {
mem_cgroup_cancel_charge(new_page, memcg);
}
if (new_page)
page_cache_release(new_page);
pte_unmap_unlock(page_table, ptl);
mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
if (old_page) {
/*
* Don't let another task, with possibly unlocked vma,
* keep the mlocked page.
*/
if (page_copied && (vma->vm_flags & VM_LOCKED)) {
lock_page(old_page); /* LRU manipulation */
munlock_vma_page(old_page);
unlock_page(old_page);
}
page_cache_release(old_page);
}
return page_copied ? VM_FAULT_WRITE : 0;
oom_free_new:
page_cache_release(new_page);
oom:
if (old_page)
page_cache_release(old_page);
return VM_FAULT_OOM;
}
/*
* Handle write page faults for VM_MIXEDMAP or VM_PFNMAP for a VM_SHARED
* mapping
*/
static int wp_pfn_shared(struct mm_struct *mm,
struct vm_area_struct *vma, unsigned long address,
pte_t *page_table, spinlock_t *ptl, pte_t orig_pte,
pmd_t *pmd)
{
if (vma->vm_ops && vma->vm_ops->pfn_mkwrite) {
struct vm_fault vmf = {
.page = NULL,
.pgoff = linear_page_index(vma, address),
.virtual_address = (void __user *)(address & PAGE_MASK),
.flags = FAULT_FLAG_WRITE | FAULT_FLAG_MKWRITE,
};
int ret;
pte_unmap_unlock(page_table, ptl);
ret = vma->vm_ops->pfn_mkwrite(vma, &vmf);
if (ret & VM_FAULT_ERROR)
return ret;
page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
/*
* We might have raced with another page fault while we
* released the pte_offset_map_lock.
*/
if (!pte_same(*page_table, orig_pte)) {
pte_unmap_unlock(page_table, ptl);
return 0;
}
}
return wp_page_reuse(mm, vma, address, page_table, ptl, orig_pte,
NULL, 0, 0);
}
static int wp_page_shared(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pte_t *page_table,
pmd_t *pmd, spinlock_t *ptl, pte_t orig_pte,
struct page *old_page)
__releases(ptl)
{
int page_mkwrite = 0;
page_cache_get(old_page);
/*
* Only catch write-faults on shared writable pages,
* read-only shared pages can get COWed by
* get_user_pages(.write=1, .force=1).
*/
if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
int tmp;
pte_unmap_unlock(page_table, ptl);
tmp = do_page_mkwrite(vma, old_page, address);
if (unlikely(!tmp || (tmp &
(VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
page_cache_release(old_page);
return tmp;
}
/*
* Since we dropped the lock we need to revalidate
* the PTE as someone else may have changed it. If
* they did, we just return, as we can count on the
* MMU to tell us if they didn't also make it writable.
*/
page_table = pte_offset_map_lock(mm, pmd, address,
&ptl);
if (!pte_same(*page_table, orig_pte)) {
unlock_page(old_page);
pte_unmap_unlock(page_table, ptl);
page_cache_release(old_page);
return 0;
}
page_mkwrite = 1;
}
return wp_page_reuse(mm, vma, address, page_table, ptl,
orig_pte, old_page, page_mkwrite, 1);
}
/*
* This routine handles present pages, when users try to write
* to a shared page. It is done by copying the page to a new address
* and decrementing the shared-page counter for the old page.
*
* Note that this routine assumes that the protection checks have been
* done by the caller (the low-level page fault routine in most cases).
* Thus we can safely just mark it writable once we've done any necessary
* COW.
*
* We also mark the page dirty at this point even though the page will
* change only once the write actually happens. This avoids a few races,
* and potentially makes it more efficient.
*
* We enter with non-exclusive mmap_sem (to exclude vma changes,
* but allow concurrent faults), with pte both mapped and locked.
* We return with mmap_sem still held, but pte unmapped and unlocked.
*/
static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pte_t *page_table, pmd_t *pmd,
spinlock_t *ptl, pte_t orig_pte)
__releases(ptl)
{
struct page *old_page;
old_page = vm_normal_page(vma, address, orig_pte);
if (!old_page) {
/*
* VM_MIXEDMAP !pfn_valid() case, or VM_SOFTDIRTY clear on a
* VM_PFNMAP VMA.
*
* We should not cow pages in a shared writeable mapping.
* Just mark the pages writable and/or call ops->pfn_mkwrite.
*/
if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
(VM_WRITE|VM_SHARED))
return wp_pfn_shared(mm, vma, address, page_table, ptl,
orig_pte, pmd);
pte_unmap_unlock(page_table, ptl);
return wp_page_copy(mm, vma, address, page_table, pmd,
orig_pte, old_page);
}
/*
* Take out anonymous pages first, anonymous shared vmas are
* not dirty accountable.
*/
if (PageAnon(old_page) && !PageKsm(old_page)) {
if (!trylock_page(old_page)) {
page_cache_get(old_page);
pte_unmap_unlock(page_table, ptl);
lock_page(old_page);
page_table = pte_offset_map_lock(mm, pmd, address,
&ptl);
if (!pte_same(*page_table, orig_pte)) {
unlock_page(old_page);
pte_unmap_unlock(page_table, ptl);
page_cache_release(old_page);
return 0;
}
page_cache_release(old_page);
}
if (reuse_swap_page(old_page)) {
/*
* The page is all ours. Move it to our anon_vma so
* the rmap code will not search our parent or siblings.
* Protected against the rmap code by the page lock.
*/
page_move_anon_rmap(old_page, vma, address);
unlock_page(old_page);
return wp_page_reuse(mm, vma, address, page_table, ptl,
orig_pte, old_page, 0, 0);
}
unlock_page(old_page);
} else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
(VM_WRITE|VM_SHARED))) {
return wp_page_shared(mm, vma, address, page_table, pmd,
ptl, orig_pte, old_page);
}
/*
* Ok, we need to copy. Oh, well..
*/
page_cache_get(old_page);
pte_unmap_unlock(page_table, ptl);
return wp_page_copy(mm, vma, address, page_table, pmd,
orig_pte, old_page);
}
static void unmap_mapping_range_vma(struct vm_area_struct *vma,
unsigned long start_addr, unsigned long end_addr,
struct zap_details *details)
{
zap_page_range_single(vma, start_addr, end_addr - start_addr, details);
}
static inline void unmap_mapping_range_tree(struct rb_root *root,
struct zap_details *details)
{
struct vm_area_struct *vma;
pgoff_t vba, vea, zba, zea;
vma_interval_tree_foreach(vma, root,
details->first_index, details->last_index) {
vba = vma->vm_pgoff;
vea = vba + vma_pages(vma) - 1;
/* Assume for now that PAGE_CACHE_SHIFT == PAGE_SHIFT */
zba = details->first_index;
if (zba < vba)
zba = vba;
zea = details->last_index;
if (zea > vea)
zea = vea;
unmap_mapping_range_vma(vma,
((zba - vba) << PAGE_SHIFT) + vma->vm_start,
((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start,
details);
}
}
/**
* unmap_mapping_range - unmap the portion of all mmaps in the specified
* address_space corresponding to the specified page range in the underlying
* file.
*
* @mapping: the address space containing mmaps to be unmapped.
* @holebegin: byte in first page to unmap, relative to the start of
* the underlying file. This will be rounded down to a PAGE_SIZE
* boundary. Note that this is different from truncate_pagecache(), which
* must keep the partial page. In contrast, we must get rid of
* partial pages.
* @holelen: size of prospective hole in bytes. This will be rounded
* up to a PAGE_SIZE boundary. A holelen of zero truncates to the
* end of the file.
* @even_cows: 1 when truncating a file, unmap even private COWed pages;
* but 0 when invalidating pagecache, don't throw away private data.
*/
void unmap_mapping_range(struct address_space *mapping,
loff_t const holebegin, loff_t const holelen, int even_cows)
{
struct zap_details details;
pgoff_t hba = holebegin >> PAGE_SHIFT;
pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
/* Check for overflow. */
if (sizeof(holelen) > sizeof(hlen)) {
long long holeend =
(holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
if (holeend & ~(long long)ULONG_MAX)
hlen = ULONG_MAX - hba + 1;
}
details.check_mapping = even_cows? NULL: mapping;
details.first_index = hba;
details.last_index = hba + hlen - 1;
if (details.last_index < details.first_index)
details.last_index = ULONG_MAX;
/* DAX uses i_mmap_lock to serialise file truncate vs page fault */
i_mmap_lock_write(mapping);
if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap)))
unmap_mapping_range_tree(&mapping->i_mmap, &details);
i_mmap_unlock_write(mapping);
}
EXPORT_SYMBOL(unmap_mapping_range);
/*
* We enter with non-exclusive mmap_sem (to exclude vma changes,
* but allow concurrent faults), and pte mapped but not yet locked.
* We return with pte unmapped and unlocked.
*
* We return with the mmap_sem locked or unlocked in the same cases
* as does filemap_fault().
*/
static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pte_t *page_table, pmd_t *pmd,
unsigned int flags, pte_t orig_pte)
{
spinlock_t *ptl;
struct page *page, *swapcache;
struct mem_cgroup *memcg;
swp_entry_t entry;
pte_t pte;
int locked;
int exclusive = 0;
int ret = 0;
if (!pte_unmap_same(mm, pmd, page_table, orig_pte))
goto out;
entry = pte_to_swp_entry(orig_pte);
if (unlikely(non_swap_entry(entry))) {
if (is_migration_entry(entry)) {
migration_entry_wait(mm, pmd, address);
} else if (is_hwpoison_entry(entry)) {
ret = VM_FAULT_HWPOISON;
} else {
print_bad_pte(vma, address, orig_pte, NULL);
ret = VM_FAULT_SIGBUS;
}
goto out;
}
delayacct_set_flag(DELAYACCT_PF_SWAPIN);
page = lookup_swap_cache(entry);
if (!page) {
page = swapin_readahead(entry,
GFP_HIGHUSER_MOVABLE, vma, address);
if (!page) {
/*
* Back out if somebody else faulted in this pte
* while we released the pte lock.
*/
page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
if (likely(pte_same(*page_table, orig_pte)))
ret = VM_FAULT_OOM;
delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
goto unlock;
}
/* Had to read the page from swap area: Major fault */
ret = VM_FAULT_MAJOR;
count_vm_event(PGMAJFAULT);
mem_cgroup_count_vm_event(mm, PGMAJFAULT);
} else if (PageHWPoison(page)) {
/*
* hwpoisoned dirty swapcache pages are kept for killing
* owner processes (which may be unknown at hwpoison time)
*/
ret = VM_FAULT_HWPOISON;
delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
swapcache = page;
goto out_release;
}
swapcache = page;
locked = lock_page_or_retry(page, mm, flags);
delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
if (!locked) {
ret |= VM_FAULT_RETRY;
goto out_release;
}
/*
* Make sure try_to_free_swap or reuse_swap_page or swapoff did not
* release the swapcache from under us. The page pin, and pte_same
* test below, are not enough to exclude that. Even if it is still
* swapcache, we need to check that the page's swap has not changed.
*/
if (unlikely(!PageSwapCache(page) || page_private(page) != entry.val))
goto out_page;
page = ksm_might_need_to_copy(page, vma, address);
if (unlikely(!page)) {
ret = VM_FAULT_OOM;
page = swapcache;
goto out_page;
}
if (mem_cgroup_try_charge(page, mm, GFP_KERNEL, &memcg)) {
ret = VM_FAULT_OOM;
goto out_page;
}
/*
* Back out if somebody else already faulted in this pte.
*/
page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
if (unlikely(!pte_same(*page_table, orig_pte)))
goto out_nomap;
if (unlikely(!PageUptodate(page))) {
ret = VM_FAULT_SIGBUS;
goto out_nomap;
}
/*
* The page isn't present yet, go ahead with the fault.
*
* Be careful about the sequence of operations here.
* To get its accounting right, reuse_swap_page() must be called
* while the page is counted on swap but not yet in mapcount i.e.
* before page_add_anon_rmap() and swap_free(); try_to_free_swap()
* must be called after the swap_free(), or it will never succeed.
*/
inc_mm_counter_fast(mm, MM_ANONPAGES);
dec_mm_counter_fast(mm, MM_SWAPENTS);
pte = mk_pte(page, vma->vm_page_prot);
if ((flags & FAULT_FLAG_WRITE) && reuse_swap_page(page)) {
pte = maybe_mkwrite(pte_mkdirty(pte), vma);
flags &= ~FAULT_FLAG_WRITE;
ret |= VM_FAULT_WRITE;
exclusive = 1;
}
flush_icache_page(vma, page);
if (pte_swp_soft_dirty(orig_pte))
pte = pte_mksoft_dirty(pte);
set_pte_at(mm, address, page_table, pte);
if (page == swapcache) {
do_page_add_anon_rmap(page, vma, address, exclusive);
mem_cgroup_commit_charge(page, memcg, true);
} else { /* ksm created a completely new copy */
page_add_new_anon_rmap(page, vma, address);
mem_cgroup_commit_charge(page, memcg, false);
lru_cache_add_active_or_unevictable(page, vma);
}
swap_free(entry);
if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
try_to_free_swap(page);
unlock_page(page);
if (page != swapcache) {
/*
* Hold the lock to avoid the swap entry to be reused
* until we take the PT lock for the pte_same() check
* (to avoid false positives from pte_same). For
* further safety release the lock after the swap_free
* so that the swap count won't change under a
* parallel locked swapcache.
*/
unlock_page(swapcache);
page_cache_release(swapcache);
}
if (flags & FAULT_FLAG_WRITE) {
ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte);
if (ret & VM_FAULT_ERROR)
ret &= VM_FAULT_ERROR;
goto out;
}
/* No need to invalidate - it was non-present before */
update_mmu_cache(vma, address, page_table);
unlock:
pte_unmap_unlock(page_table, ptl);
out:
return ret;
out_nomap:
mem_cgroup_cancel_charge(page, memcg);
pte_unmap_unlock(page_table, ptl);
out_page:
unlock_page(page);
out_release:
page_cache_release(page);
if (page != swapcache) {
unlock_page(swapcache);
page_cache_release(swapcache);
}
return ret;
}
/*
* This is like a special single-page "expand_{down|up}wards()",
* except we must first make sure that 'address{-|+}PAGE_SIZE'
* doesn't hit another vma.
*/
static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
{
address &= PAGE_MASK;
if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
struct vm_area_struct *prev = vma->vm_prev;
/*
* Is there a mapping abutting this one below?
*
* That's only ok if it's the same stack mapping
* that has gotten split..
*/
if (prev && prev->vm_end == address)
return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
return expand_downwards(vma, address - PAGE_SIZE);
}
if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
struct vm_area_struct *next = vma->vm_next;
/* As VM_GROWSDOWN but s/below/above/ */
if (next && next->vm_start == address + PAGE_SIZE)
return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
return expand_upwards(vma, address + PAGE_SIZE);
}
return 0;
}
/*
* We enter with non-exclusive mmap_sem (to exclude vma changes,
* but allow concurrent faults), and pte mapped but not yet locked.
* We return with mmap_sem still held, but pte unmapped and unlocked.
*/
static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pte_t *page_table, pmd_t *pmd,
unsigned int flags)
{
struct mem_cgroup *memcg;
struct page *page;
spinlock_t *ptl;
pte_t entry;
pte_unmap(page_table);
/* File mapping without ->vm_ops ? */
if (vma->vm_flags & VM_SHARED)
return VM_FAULT_SIGBUS;
/* Check if we need to add a guard page to the stack */
if (check_stack_guard_page(vma, address) < 0)
return VM_FAULT_SIGSEGV;
/* Use the zero-page for reads */
if (!(flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(mm)) {
entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
vma->vm_page_prot));
page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
if (!pte_none(*page_table))
goto unlock;
goto setpte;
}
/* Allocate our own private page. */
if (unlikely(anon_vma_prepare(vma)))
goto oom;
page = alloc_zeroed_user_highpage_movable(vma, address);
if (!page)
goto oom;
if (mem_cgroup_try_charge(page, mm, GFP_KERNEL, &memcg))
goto oom_free_page;
/*
* The memory barrier inside __SetPageUptodate makes sure that
* preceeding stores to the page contents become visible before
* the set_pte_at() write.
*/
__SetPageUptodate(page);
entry = mk_pte(page, vma->vm_page_prot);
if (vma->vm_flags & VM_WRITE)
entry = pte_mkwrite(pte_mkdirty(entry));
page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
if (!pte_none(*page_table))
goto release;
inc_mm_counter_fast(mm, MM_ANONPAGES);
page_add_new_anon_rmap(page, vma, address);
mem_cgroup_commit_charge(page, memcg, false);
lru_cache_add_active_or_unevictable(page, vma);
setpte:
set_pte_at(mm, address, page_table, entry);
/* No need to invalidate - it was non-present before */
update_mmu_cache(vma, address, page_table);
unlock:
pte_unmap_unlock(page_table, ptl);
return 0;
release:
mem_cgroup_cancel_charge(page, memcg);
page_cache_release(page);
goto unlock;
oom_free_page:
page_cache_release(page);
oom:
return VM_FAULT_OOM;
}
/*
* The mmap_sem must have been held on entry, and may have been
* released depending on flags and vma->vm_ops->fault() return value.
* See filemap_fault() and __lock_page_retry().
*/
static int __do_fault(struct vm_area_struct *vma, unsigned long address,
pgoff_t pgoff, unsigned int flags,
struct page *cow_page, struct page **page)
{
struct vm_fault vmf;
int ret;
vmf.virtual_address = (void __user *)(address & PAGE_MASK);
vmf.pgoff = pgoff;
vmf.flags = flags;
vmf.page = NULL;
vmf.cow_page = cow_page;
ret = vma->vm_ops->fault(vma, &vmf);
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
return ret;
if (!vmf.page)
goto out;
if (unlikely(PageHWPoison(vmf.page))) {
if (ret & VM_FAULT_LOCKED)
unlock_page(vmf.page);
page_cache_release(vmf.page);
return VM_FAULT_HWPOISON;
}
if (unlikely(!(ret & VM_FAULT_LOCKED)))
lock_page(vmf.page);
else
VM_BUG_ON_PAGE(!PageLocked(vmf.page), vmf.page);
out:
*page = vmf.page;
return ret;
}
/**
* do_set_pte - setup new PTE entry for given page and add reverse page mapping.
*
* @vma: virtual memory area
* @address: user virtual address
* @page: page to map
* @pte: pointer to target page table entry
* @write: true, if new entry is writable
* @anon: true, if it's anonymous page
*
* Caller must hold page table lock relevant for @pte.
*
* Target users are page handler itself and implementations of
* vm_ops->map_pages.
*/
void do_set_pte(struct vm_area_struct *vma, unsigned long address,
struct page *page, pte_t *pte, bool write, bool anon)
{
pte_t entry;
flush_icache_page(vma, page);
entry = mk_pte(page, vma->vm_page_prot);
if (write)
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
if (anon) {
inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
page_add_new_anon_rmap(page, vma, address);
} else {
inc_mm_counter_fast(vma->vm_mm, MM_FILEPAGES);
page_add_file_rmap(page);
}
set_pte_at(vma->vm_mm, address, pte, entry);
/* no need to invalidate: a not-present page won't be cached */
update_mmu_cache(vma, address, pte);
}
static unsigned long fault_around_bytes __read_mostly =
rounddown_pow_of_two(65536);
#ifdef CONFIG_DEBUG_FS
static int fault_around_bytes_get(void *data, u64 *val)
{
*val = fault_around_bytes;
return 0;
}
/*
* fault_around_pages() and fault_around_mask() expects fault_around_bytes
* rounded down to nearest page order. It's what do_fault_around() expects to
* see.
*/
static int fault_around_bytes_set(void *data, u64 val)
{
if (val / PAGE_SIZE > PTRS_PER_PTE)
return -EINVAL;
if (val > PAGE_SIZE)
fault_around_bytes = rounddown_pow_of_two(val);
else
fault_around_bytes = PAGE_SIZE; /* rounddown_pow_of_two(0) is undefined */
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(fault_around_bytes_fops,
fault_around_bytes_get, fault_around_bytes_set, "%llu\n");
static int __init fault_around_debugfs(void)
{
void *ret;
ret = debugfs_create_file("fault_around_bytes", 0644, NULL, NULL,
&fault_around_bytes_fops);
if (!ret)
pr_warn("Failed to create fault_around_bytes in debugfs");
return 0;
}
late_initcall(fault_around_debugfs);
#endif
/*
* do_fault_around() tries to map few pages around the fault address. The hope
* is that the pages will be needed soon and this will lower the number of
* faults to handle.
*
* It uses vm_ops->map_pages() to map the pages, which skips the page if it's
* not ready to be mapped: not up-to-date, locked, etc.
*
* This function is called with the page table lock taken. In the split ptlock
* case the page table lock only protects only those entries which belong to
* the page table corresponding to the fault address.
*
* This function doesn't cross the VMA boundaries, in order to call map_pages()
* only once.
*
* fault_around_pages() defines how many pages we'll try to map.
* do_fault_around() expects it to return a power of two less than or equal to
* PTRS_PER_PTE.
*
* The virtual address of the area that we map is naturally aligned to the
* fault_around_pages() value (and therefore to page order). This way it's
* easier to guarantee that we don't cross page table boundaries.
*/
static void do_fault_around(struct vm_area_struct *vma, unsigned long address,
pte_t *pte, pgoff_t pgoff, unsigned int flags)
{
unsigned long start_addr, nr_pages, mask;
pgoff_t max_pgoff;
struct vm_fault vmf;
int off;
nr_pages = READ_ONCE(fault_around_bytes) >> PAGE_SHIFT;
mask = ~(nr_pages * PAGE_SIZE - 1) & PAGE_MASK;
start_addr = max(address & mask, vma->vm_start);
off = ((address - start_addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
pte -= off;
pgoff -= off;
/*
* max_pgoff is either end of page table or end of vma
* or fault_around_pages() from pgoff, depending what is nearest.
*/
max_pgoff = pgoff - ((start_addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) +
PTRS_PER_PTE - 1;
max_pgoff = min3(max_pgoff, vma_pages(vma) + vma->vm_pgoff - 1,
pgoff + nr_pages - 1);
/* Check if it makes any sense to call ->map_pages */
while (!pte_none(*pte)) {
if (++pgoff > max_pgoff)
return;
start_addr += PAGE_SIZE;
if (start_addr >= vma->vm_end)
return;
pte++;
}
vmf.virtual_address = (void __user *) start_addr;
vmf.pte = pte;
vmf.pgoff = pgoff;
vmf.max_pgoff = max_pgoff;
vmf.flags = flags;
vma->vm_ops->map_pages(vma, &vmf);
}
static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pmd_t *pmd,
pgoff_t pgoff, unsigned int flags, pte_t orig_pte)
{
struct page *fault_page;
spinlock_t *ptl;
pte_t *pte;
int ret = 0;
/*
* Let's call ->map_pages() first and use ->fault() as fallback
* if page by the offset is not ready to be mapped (cold cache or
* something).
*/
if (vma->vm_ops->map_pages && fault_around_bytes >> PAGE_SHIFT > 1) {
pte = pte_offset_map_lock(mm, pmd, address, &ptl);
do_fault_around(vma, address, pte, pgoff, flags);
if (!pte_same(*pte, orig_pte))
goto unlock_out;
pte_unmap_unlock(pte, ptl);
}
ret = __do_fault(vma, address, pgoff, flags, NULL, &fault_page);
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
return ret;
pte = pte_offset_map_lock(mm, pmd, address, &ptl);
if (unlikely(!pte_same(*pte, orig_pte))) {
pte_unmap_unlock(pte, ptl);
unlock_page(fault_page);
page_cache_release(fault_page);
return ret;
}
do_set_pte(vma, address, fault_page, pte, false, false);
unlock_page(fault_page);
unlock_out:
pte_unmap_unlock(pte, ptl);
return ret;
}
static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pmd_t *pmd,
pgoff_t pgoff, unsigned int flags, pte_t orig_pte)
{
struct page *fault_page, *new_page;
struct mem_cgroup *memcg;
spinlock_t *ptl;
pte_t *pte;
int ret;
if (unlikely(anon_vma_prepare(vma)))
return VM_FAULT_OOM;
new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
if (!new_page)
return VM_FAULT_OOM;
if (mem_cgroup_try_charge(new_page, mm, GFP_KERNEL, &memcg)) {
page_cache_release(new_page);
return VM_FAULT_OOM;
}
ret = __do_fault(vma, address, pgoff, flags, new_page, &fault_page);
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
goto uncharge_out;
if (fault_page)
copy_user_highpage(new_page, fault_page, address, vma);
__SetPageUptodate(new_page);
pte = pte_offset_map_lock(mm, pmd, address, &ptl);
if (unlikely(!pte_same(*pte, orig_pte))) {
pte_unmap_unlock(pte, ptl);
if (fault_page) {
unlock_page(fault_page);
page_cache_release(fault_page);
} else {
/*
* The fault handler has no page to lock, so it holds
* i_mmap_lock for read to protect against truncate.
*/
i_mmap_unlock_read(vma->vm_file->f_mapping);
}
goto uncharge_out;
}
do_set_pte(vma, address, new_page, pte, true, true);
mem_cgroup_commit_charge(new_page, memcg, false);
lru_cache_add_active_or_unevictable(new_page, vma);
pte_unmap_unlock(pte, ptl);
if (fault_page) {
unlock_page(fault_page);
page_cache_release(fault_page);
} else {
/*
* The fault handler has no page to lock, so it holds
* i_mmap_lock for read to protect against truncate.
*/
i_mmap_unlock_read(vma->vm_file->f_mapping);
}
return ret;
uncharge_out:
mem_cgroup_cancel_charge(new_page, memcg);
page_cache_release(new_page);
return ret;
}
static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pmd_t *pmd,
pgoff_t pgoff, unsigned int flags, pte_t orig_pte)
{
struct page *fault_page;
struct address_space *mapping;
spinlock_t *ptl;
pte_t *pte;
int dirtied = 0;
int ret, tmp;
ret = __do_fault(vma, address, pgoff, flags, NULL, &fault_page);
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
return ret;
/*
* Check if the backing address space wants to know that the page is
* about to become writable
*/
if (vma->vm_ops->page_mkwrite) {
unlock_page(fault_page);
tmp = do_page_mkwrite(vma, fault_page, address);
if (unlikely(!tmp ||
(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
page_cache_release(fault_page);
return tmp;
}
}
pte = pte_offset_map_lock(mm, pmd, address, &ptl);
if (unlikely(!pte_same(*pte, orig_pte))) {
pte_unmap_unlock(pte, ptl);
unlock_page(fault_page);
page_cache_release(fault_page);
return ret;
}
do_set_pte(vma, address, fault_page, pte, true, false);
pte_unmap_unlock(pte, ptl);
if (set_page_dirty(fault_page))
dirtied = 1;
/*
* Take a local copy of the address_space - page.mapping may be zeroed
* by truncate after unlock_page(). The address_space itself remains
* pinned by vma->vm_file's reference. We rely on unlock_page()'s
* release semantics to prevent the compiler from undoing this copying.
*/
mapping = fault_page->mapping;
unlock_page(fault_page);
if ((dirtied || vma->vm_ops->page_mkwrite) && mapping) {
/*
* Some device drivers do not set page.mapping but still
* dirty their pages
*/
balance_dirty_pages_ratelimited(mapping);
}
if (!vma->vm_ops->page_mkwrite)
file_update_time(vma->vm_file);
return ret;
}
/*
* We enter with non-exclusive mmap_sem (to exclude vma changes,
* but allow concurrent faults).
* The mmap_sem may have been released depending on flags and our
* return value. See filemap_fault() and __lock_page_or_retry().
*/
static int do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pte_t *page_table, pmd_t *pmd,
unsigned int flags, pte_t orig_pte)
{
pgoff_t pgoff = (((address & PAGE_MASK)
- vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
pte_unmap(page_table);
/* The VMA was not fully populated on mmap() or missing VM_DONTEXPAND */
if (!vma->vm_ops->fault)
return VM_FAULT_SIGBUS;
if (!(flags & FAULT_FLAG_WRITE))
return do_read_fault(mm, vma, address, pmd, pgoff, flags,
orig_pte);
if (!(vma->vm_flags & VM_SHARED))
return do_cow_fault(mm, vma, address, pmd, pgoff, flags,
orig_pte);
return do_shared_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
}
static int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
unsigned long addr, int page_nid,
int *flags)
{
get_page(page);
count_vm_numa_event(NUMA_HINT_FAULTS);
if (page_nid == numa_node_id()) {
count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
*flags |= TNF_FAULT_LOCAL;
}
return mpol_misplaced(page, vma, addr);
}
static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, pte_t pte, pte_t *ptep, pmd_t *pmd)
{
struct page *page = NULL;
spinlock_t *ptl;
int page_nid = -1;
int last_cpupid;
int target_nid;
bool migrated = false;
bool was_writable = pte_write(pte);
int flags = 0;
/* A PROT_NONE fault should not end up here */
BUG_ON(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)));
/*
* The "pte" at this point cannot be used safely without
* validation through pte_unmap_same(). It's of NUMA type but
* the pfn may be screwed if the read is non atomic.
*
* We can safely just do a "set_pte_at()", because the old
* page table entry is not accessible, so there would be no
* concurrent hardware modifications to the PTE.
*/
ptl = pte_lockptr(mm, pmd);
spin_lock(ptl);
if (unlikely(!pte_same(*ptep, pte))) {
pte_unmap_unlock(ptep, ptl);
goto out;
}
/* Make it present again */
pte = pte_modify(pte, vma->vm_page_prot);
pte = pte_mkyoung(pte);
if (was_writable)
pte = pte_mkwrite(pte);
set_pte_at(mm, addr, ptep, pte);
update_mmu_cache(vma, addr, ptep);
page = vm_normal_page(vma, addr, pte);
if (!page) {
pte_unmap_unlock(ptep, ptl);
return 0;
}
/*
* Avoid grouping on RO pages in general. RO pages shouldn't hurt as
* much anyway since they can be in shared cache state. This misses
* the case where a mapping is writable but the process never writes
* to it but pte_write gets cleared during protection updates and
* pte_dirty has unpredictable behaviour between PTE scan updates,
* background writeback, dirty balancing and application behaviour.
*/
if (!(vma->vm_flags & VM_WRITE))
flags |= TNF_NO_GROUP;
/*
* Flag if the page is shared between multiple address spaces. This
* is later used when determining whether to group tasks together
*/
if (page_mapcount(page) > 1 && (vma->vm_flags & VM_SHARED))
flags |= TNF_SHARED;
last_cpupid = page_cpupid_last(page);
page_nid = page_to_nid(page);
target_nid = numa_migrate_prep(page, vma, addr, page_nid, &flags);
pte_unmap_unlock(ptep, ptl);
if (target_nid == -1) {
put_page(page);
goto out;
}
/* Migrate to the requested node */
migrated = migrate_misplaced_page(page, vma, target_nid);
if (migrated) {
page_nid = target_nid;
flags |= TNF_MIGRATED;
} else
flags |= TNF_MIGRATE_FAIL;
out:
if (page_nid != -1)
task_numa_fault(last_cpupid, page_nid, 1, flags);
return 0;
}
/*
* These routines also need to handle stuff like marking pages dirty
* and/or accessed for architectures that don't do it in hardware (most
* RISC architectures). The early dirtying is also good on the i386.
*
* There is also a hook called "update_mmu_cache()" that architectures
* with external mmu caches can use to update those (ie the Sparc or
* PowerPC hashed page tables that act as extended TLBs).
*
* We enter with non-exclusive mmap_sem (to exclude vma changes,
* but allow concurrent faults), and pte mapped but not yet locked.
* We return with pte unmapped and unlocked.
*
* The mmap_sem may have been released depending on flags and our
* return value. See filemap_fault() and __lock_page_or_retry().
*/
static int handle_pte_fault(struct mm_struct *mm,
struct vm_area_struct *vma, unsigned long address,
pte_t *pte, pmd_t *pmd, unsigned int flags)
{
pte_t entry;
spinlock_t *ptl;
/*
* some architectures can have larger ptes than wordsize,
* e.g.ppc44x-defconfig has CONFIG_PTE_64BIT=y and CONFIG_32BIT=y,
* so READ_ONCE or ACCESS_ONCE cannot guarantee atomic accesses.
* The code below just needs a consistent view for the ifs and
* we later double check anyway with the ptl lock held. So here
* a barrier will do.
*/
entry = *pte;
barrier();
if (!pte_present(entry)) {
if (pte_none(entry)) {
if (vma->vm_ops)
return do_fault(mm, vma, address, pte, pmd,
flags, entry);
return do_anonymous_page(mm, vma, address, pte, pmd,
flags);
}
return do_swap_page(mm, vma, address,
pte, pmd, flags, entry);
}
if (pte_protnone(entry))
return do_numa_page(mm, vma, address, entry, pte, pmd);
ptl = pte_lockptr(mm, pmd);
spin_lock(ptl);
if (unlikely(!pte_same(*pte, entry)))
goto unlock;
if (flags & FAULT_FLAG_WRITE) {
if (!pte_write(entry))
return do_wp_page(mm, vma, address,
pte, pmd, ptl, entry);
entry = pte_mkdirty(entry);
}
entry = pte_mkyoung(entry);
if (ptep_set_access_flags(vma, address, pte, entry, flags & FAULT_FLAG_WRITE)) {
update_mmu_cache(vma, address, pte);
} else {
/*
* This is needed only for protection faults but the arch code
* is not yet telling us if this is a protection fault or not.
* This still avoids useless tlb flushes for .text page faults
* with threads.
*/
if (flags & FAULT_FLAG_WRITE)
flush_tlb_fix_spurious_fault(vma, address);
}
unlock:
pte_unmap_unlock(pte, ptl);
return 0;
}
/*
* By the time we get here, we already hold the mm semaphore
*
* The mmap_sem may have been released depending on flags and our
* return value. See filemap_fault() and __lock_page_or_retry().
*/
static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, unsigned int flags)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
if (unlikely(is_vm_hugetlb_page(vma)))
return hugetlb_fault(mm, vma, address, flags);
pgd = pgd_offset(mm, address);
pud = pud_alloc(mm, pgd, address);
if (!pud)
return VM_FAULT_OOM;
pmd = pmd_alloc(mm, pud, address);
if (!pmd)
return VM_FAULT_OOM;
if (pmd_none(*pmd) && transparent_hugepage_enabled(vma)) {
int ret = VM_FAULT_FALLBACK;
if (!vma->vm_ops)
ret = do_huge_pmd_anonymous_page(mm, vma, address,
pmd, flags);
if (!(ret & VM_FAULT_FALLBACK))
return ret;
} else {
pmd_t orig_pmd = *pmd;
int ret;
barrier();
if (pmd_trans_huge(orig_pmd)) {
unsigned int dirty = flags & FAULT_FLAG_WRITE;
/*
* If the pmd is splitting, return and retry the
* the fault. Alternative: wait until the split
* is done, and goto retry.
*/
if (pmd_trans_splitting(orig_pmd))
return 0;
if (pmd_protnone(orig_pmd))
return do_huge_pmd_numa_page(mm, vma, address,
orig_pmd, pmd);
if (dirty && !pmd_write(orig_pmd)) {
ret = do_huge_pmd_wp_page(mm, vma, address, pmd,
orig_pmd);
if (!(ret & VM_FAULT_FALLBACK))
return ret;
} else {
huge_pmd_set_accessed(mm, vma, address, pmd,
orig_pmd, dirty);
return 0;
}
}
}
/*
* Use __pte_alloc instead of pte_alloc_map, because we can't
* run pte_offset_map on the pmd, if an huge pmd could
* materialize from under us from a different thread.
*/
if (unlikely(pmd_none(*pmd)) &&
unlikely(__pte_alloc(mm, vma, pmd, address)))
return VM_FAULT_OOM;
/* if an huge pmd materialized from under us just retry later */
if (unlikely(pmd_trans_huge(*pmd)))
return 0;
/*
* A regular pmd is established and it can't morph into a huge pmd
* from under us anymore at this point because we hold the mmap_sem
* read mode and khugepaged takes it in write mode. So now it's
* safe to run pte_offset_map().
*/
pte = pte_offset_map(pmd, address);
return handle_pte_fault(mm, vma, address, pte, pmd, flags);
}
/*
* By the time we get here, we already hold the mm semaphore
*
* The mmap_sem may have been released depending on flags and our
* return value. See filemap_fault() and __lock_page_or_retry().
*/
int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, unsigned int flags)
{
int ret;
__set_current_state(TASK_RUNNING);
count_vm_event(PGFAULT);
mem_cgroup_count_vm_event(mm, PGFAULT);
/* do counter updates before entering really critical section. */
check_sync_rss_stat(current);
/*
* Enable the memcg OOM handling for faults triggered in user
* space. Kernel faults are handled more gracefully.
*/
if (flags & FAULT_FLAG_USER)
mem_cgroup_oom_enable();
ret = __handle_mm_fault(mm, vma, address, flags);
if (flags & FAULT_FLAG_USER) {
mem_cgroup_oom_disable();
/*
* The task may have entered a memcg OOM situation but
* if the allocation error was handled gracefully (no
* VM_FAULT_OOM), there is no need to kill anything.
* Just clean up the OOM state peacefully.
*/
if (task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM))
mem_cgroup_oom_synchronize(false);
}
return ret;
}
EXPORT_SYMBOL_GPL(handle_mm_fault);
#ifndef __PAGETABLE_PUD_FOLDED
/*
* Allocate page upper directory.
* We've already handled the fast-path in-line.
*/
int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
{
pud_t *new = pud_alloc_one(mm, address);
if (!new)
return -ENOMEM;
smp_wmb(); /* See comment in __pte_alloc */
spin_lock(&mm->page_table_lock);
if (pgd_present(*pgd)) /* Another has populated it */
pud_free(mm, new);
else
pgd_populate(mm, pgd, new);
spin_unlock(&mm->page_table_lock);
return 0;
}
#endif /* __PAGETABLE_PUD_FOLDED */
#ifndef __PAGETABLE_PMD_FOLDED
/*
* Allocate page middle directory.
* We've already handled the fast-path in-line.
*/
int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
{
pmd_t *new = pmd_alloc_one(mm, address);
if (!new)
return -ENOMEM;
smp_wmb(); /* See comment in __pte_alloc */
spin_lock(&mm->page_table_lock);
#ifndef __ARCH_HAS_4LEVEL_HACK
if (!pud_present(*pud)) {
mm_inc_nr_pmds(mm);
pud_populate(mm, pud, new);
} else /* Another has populated it */
pmd_free(mm, new);
#else
if (!pgd_present(*pud)) {
mm_inc_nr_pmds(mm);
pgd_populate(mm, pud, new);
} else /* Another has populated it */
pmd_free(mm, new);
#endif /* __ARCH_HAS_4LEVEL_HACK */
spin_unlock(&mm->page_table_lock);
return 0;
}
#endif /* __PAGETABLE_PMD_FOLDED */
static int __follow_pte(struct mm_struct *mm, unsigned long address,
pte_t **ptepp, spinlock_t **ptlp)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *ptep;
pgd = pgd_offset(mm, address);
if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
goto out;
pud = pud_offset(pgd, address);
if (pud_none(*pud) || unlikely(pud_bad(*pud)))
goto out;
pmd = pmd_offset(pud, address);
VM_BUG_ON(pmd_trans_huge(*pmd));
if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
goto out;
/* We cannot handle huge page PFN maps. Luckily they don't exist. */
if (pmd_huge(*pmd))
goto out;
ptep = pte_offset_map_lock(mm, pmd, address, ptlp);
if (!ptep)
goto out;
if (!pte_present(*ptep))
goto unlock;
*ptepp = ptep;
return 0;
unlock:
pte_unmap_unlock(ptep, *ptlp);
out:
return -EINVAL;
}
static inline int follow_pte(struct mm_struct *mm, unsigned long address,
pte_t **ptepp, spinlock_t **ptlp)
{
int res;
/* (void) is needed to make gcc happy */
(void) __cond_lock(*ptlp,
!(res = __follow_pte(mm, address, ptepp, ptlp)));
return res;
}
/**
* follow_pfn - look up PFN at a user virtual address
* @vma: memory mapping
* @address: user virtual address
* @pfn: location to store found PFN
*
* Only IO mappings and raw PFN mappings are allowed.
*
* Returns zero and the pfn at @pfn on success, -ve otherwise.
*/
int follow_pfn(struct vm_area_struct *vma, unsigned long address,
unsigned long *pfn)
{
int ret = -EINVAL;
spinlock_t *ptl;
pte_t *ptep;
if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
return ret;
ret = follow_pte(vma->vm_mm, address, &ptep, &ptl);
if (ret)
return ret;
*pfn = pte_pfn(*ptep);
pte_unmap_unlock(ptep, ptl);
return 0;
}
EXPORT_SYMBOL(follow_pfn);
#ifdef CONFIG_HAVE_IOREMAP_PROT
int follow_phys(struct vm_area_struct *vma,
unsigned long address, unsigned int flags,
unsigned long *prot, resource_size_t *phys)
{
int ret = -EINVAL;
pte_t *ptep, pte;
spinlock_t *ptl;
if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
goto out;
if (follow_pte(vma->vm_mm, address, &ptep, &ptl))
goto out;
pte = *ptep;
if ((flags & FOLL_WRITE) && !pte_write(pte))
goto unlock;
*prot = pgprot_val(pte_pgprot(pte));
*phys = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT;
ret = 0;
unlock:
pte_unmap_unlock(ptep, ptl);
out:
return ret;
}
int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
void *buf, int len, int write)
{
resource_size_t phys_addr;
unsigned long prot = 0;
void __iomem *maddr;
int offset = addr & (PAGE_SIZE-1);
if (follow_phys(vma, addr, write, &prot, &phys_addr))
return -EINVAL;
maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot);
if (write)
memcpy_toio(maddr + offset, buf, len);
else
memcpy_fromio(buf, maddr + offset, len);
iounmap(maddr);
return len;
}
EXPORT_SYMBOL_GPL(generic_access_phys);
#endif
/*
* Access another process' address space as given in mm. If non-NULL, use the
* given task for page fault accounting.
*/
static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
unsigned long addr, void *buf, int len, int write)
{
struct vm_area_struct *vma;
void *old_buf = buf;
down_read(&mm->mmap_sem);
/* ignore errors, just check how much was successfully transferred */
while (len) {
int bytes, ret, offset;
void *maddr;
struct page *page = NULL;
ret = get_user_pages(tsk, mm, addr, 1,
write, 1, &page, &vma);
if (ret <= 0) {
#ifndef CONFIG_HAVE_IOREMAP_PROT
break;
#else
/*
* Check if this is a VM_IO | VM_PFNMAP VMA, which
* we can access using slightly different code.
*/
vma = find_vma(mm, addr);
if (!vma || vma->vm_start > addr)
break;
if (vma->vm_ops && vma->vm_ops->access)
ret = vma->vm_ops->access(vma, addr, buf,
len, write);
if (ret <= 0)
break;
bytes = ret;
#endif
} else {
bytes = len;
offset = addr & (PAGE_SIZE-1);
if (bytes > PAGE_SIZE-offset)
bytes = PAGE_SIZE-offset;
maddr = kmap(page);
if (write) {
copy_to_user_page(vma, page, addr,
maddr + offset, buf, bytes);
set_page_dirty_lock(page);
} else {
copy_from_user_page(vma, page, addr,
buf, maddr + offset, bytes);
}
kunmap(page);
page_cache_release(page);
}
len -= bytes;
buf += bytes;
addr += bytes;
}
up_read(&mm->mmap_sem);
return buf - old_buf;
}
/**
* access_remote_vm - access another process' address space
* @mm: the mm_struct of the target address space
* @addr: start address to access
* @buf: source or destination buffer
* @len: number of bytes to transfer
* @write: whether the access is a write
*
* The caller must hold a reference on @mm.
*/
int access_remote_vm(struct mm_struct *mm, unsigned long addr,
void *buf, int len, int write)
{
return __access_remote_vm(NULL, mm, addr, buf, len, write);
}
/*
* Access another process' address space.
* Source/target buffer must be kernel space,
* Do not walk the page table directly, use get_user_pages
*/
int access_process_vm(struct task_struct *tsk, unsigned long addr,
void *buf, int len, int write)
{
struct mm_struct *mm;
int ret;
mm = get_task_mm(tsk);
if (!mm)
return 0;
ret = __access_remote_vm(tsk, mm, addr, buf, len, write);
mmput(mm);
return ret;
}
/*
* Print the name of a VMA.
*/
void print_vma_addr(char *prefix, unsigned long ip)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
/*
* Do not print if we are in atomic
* contexts (in exception stacks, etc.):
*/
if (preempt_count())
return;
down_read(&mm->mmap_sem);
vma = find_vma(mm, ip);
if (vma && vma->vm_file) {
struct file *f = vma->vm_file;
char *buf = (char *)__get_free_page(GFP_KERNEL);
if (buf) {
char *p;
p = file_path(f, buf, PAGE_SIZE);
if (IS_ERR(p))
p = "?";
printk("%s%s[%lx+%lx]", prefix, kbasename(p),
vma->vm_start,
vma->vm_end - vma->vm_start);
free_page((unsigned long)buf);
}
}
up_read(&mm->mmap_sem);
}
#if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP)
void __might_fault(const char *file, int line)
{
/*
* Some code (nfs/sunrpc) uses socket ops on kernel memory while
* holding the mmap_sem, this is safe because kernel memory doesn't
* get paged out, therefore we'll never actually fault, and the
* below annotations will generate false positives.
*/
if (segment_eq(get_fs(), KERNEL_DS))
return;
if (pagefault_disabled())
return;
__might_sleep(file, line, 0);
#if defined(CONFIG_DEBUG_ATOMIC_SLEEP)
if (current->mm)
might_lock_read(¤t->mm->mmap_sem);
#endif
}
EXPORT_SYMBOL(__might_fault);
#endif
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
static void clear_gigantic_page(struct page *page,
unsigned long addr,
unsigned int pages_per_huge_page)
{
int i;
struct page *p = page;
might_sleep();
for (i = 0; i < pages_per_huge_page;
i++, p = mem_map_next(p, page, i)) {
cond_resched();
clear_user_highpage(p, addr + i * PAGE_SIZE);
}
}
void clear_huge_page(struct page *page,
unsigned long addr, unsigned int pages_per_huge_page)
{
int i;
if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
clear_gigantic_page(page, addr, pages_per_huge_page);
return;
}
might_sleep();
for (i = 0; i < pages_per_huge_page; i++) {
cond_resched();
clear_user_highpage(page + i, addr + i * PAGE_SIZE);
}
}
static void copy_user_gigantic_page(struct page *dst, struct page *src,
unsigned long addr,
struct vm_area_struct *vma,
unsigned int pages_per_huge_page)
{
int i;
struct page *dst_base = dst;
struct page *src_base = src;
for (i = 0; i < pages_per_huge_page; ) {
cond_resched();
copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma);
i++;
dst = mem_map_next(dst, dst_base, i);
src = mem_map_next(src, src_base, i);
}
}
void copy_user_huge_page(struct page *dst, struct page *src,
unsigned long addr, struct vm_area_struct *vma,
unsigned int pages_per_huge_page)
{
int i;
if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
copy_user_gigantic_page(dst, src, addr, vma,
pages_per_huge_page);
return;
}
might_sleep();
for (i = 0; i < pages_per_huge_page; i++) {
cond_resched();
copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma);
}
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
#if USE_SPLIT_PTE_PTLOCKS && ALLOC_SPLIT_PTLOCKS
static struct kmem_cache *page_ptl_cachep;
void __init ptlock_cache_init(void)
{
page_ptl_cachep = kmem_cache_create("page->ptl", sizeof(spinlock_t), 0,
SLAB_PANIC, NULL);
}
bool ptlock_alloc(struct page *page)
{
spinlock_t *ptl;
ptl = kmem_cache_alloc(page_ptl_cachep, GFP_KERNEL);
if (!ptl)
return false;
page->ptl = ptl;
return true;
}
void ptlock_free(struct page *page)
{
kmem_cache_free(page_ptl_cachep, page->ptl);
}
#endif
| ./CrossVul/dataset_final_sorted/CWE-20/c/good_1583_0 |
crossvul-cpp_data_bad_5576_0 | /* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil -*- */
/* kdc/do_tgs_req.c - KDC Routines to deal with TGS_REQ's */
/*
* Copyright 1990,1991,2001,2007,2008,2009 by the Massachusetts Institute of Technology.
* All Rights Reserved.
*
* Export of this software from the United States of America may
* require a specific license from the United States Government.
* It is the responsibility of any person or organization contemplating
* export to obtain such a license before exporting.
*
* WITHIN THAT CONSTRAINT, permission to use, copy, modify, and
* distribute this software and its documentation for any purpose and
* without fee is hereby granted, provided that the above copyright
* notice appear in all copies and that both that copyright notice and
* this permission notice appear in supporting documentation, and that
* the name of M.I.T. not be used in advertising or publicity pertaining
* to distribution of the software without specific, written prior
* permission. Furthermore if you modify this software you must label
* your software as modified software and not distribute it in such a
* fashion that it might be confused with the original M.I.T. software.
* M.I.T. makes no representations about the suitability of
* this software for any purpose. It is provided "as is" without express
* or implied warranty.
*/
/*
* Copyright (c) 2006-2008, Novell, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * The copyright holder's name is not used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "k5-int.h"
#include <syslog.h>
#ifdef HAVE_NETINET_IN_H
#include <sys/types.h>
#include <netinet/in.h>
#ifndef hpux
#include <arpa/inet.h>
#endif
#endif
#include "kdc_util.h"
#include "policy.h"
#include "extern.h"
#include "adm_proto.h"
#include <ctype.h>
static krb5_error_code
find_alternate_tgs(kdc_realm_t *, krb5_principal, krb5_db_entry **,
const char**);
static krb5_error_code
prepare_error_tgs(struct kdc_request_state *, krb5_kdc_req *,krb5_ticket *,int,
krb5_principal,krb5_data **,const char *, krb5_pa_data **);
static krb5_error_code
decrypt_2ndtkt(kdc_realm_t *, krb5_kdc_req *, krb5_flags, krb5_db_entry **,
const char **);
static krb5_error_code
gen_session_key(kdc_realm_t *, krb5_kdc_req *, krb5_db_entry *,
krb5_keyblock *, const char **);
static krb5_int32
find_referral_tgs(kdc_realm_t *, krb5_kdc_req *, krb5_principal *);
static krb5_error_code
db_get_svc_princ(krb5_context, krb5_principal, krb5_flags,
krb5_db_entry **, const char **);
static krb5_error_code
search_sprinc(kdc_realm_t *, krb5_kdc_req *, krb5_flags,
krb5_db_entry **, const char **);
/*ARGSUSED*/
krb5_error_code
process_tgs_req(struct server_handle *handle, krb5_data *pkt,
const krb5_fulladdr *from, krb5_data **response)
{
krb5_keyblock * subkey = 0;
krb5_keyblock * tgskey = 0;
krb5_kdc_req *request = 0;
krb5_db_entry *server = NULL;
krb5_db_entry *stkt_server = NULL;
krb5_kdc_rep reply;
krb5_enc_kdc_rep_part reply_encpart;
krb5_ticket ticket_reply, *header_ticket = 0;
int st_idx = 0;
krb5_enc_tkt_part enc_tkt_reply;
krb5_transited enc_tkt_transited;
int newtransited = 0;
krb5_error_code retval = 0;
krb5_keyblock encrypting_key;
krb5_timestamp kdc_time, authtime = 0;
krb5_keyblock session_key;
krb5_timestamp rtime;
krb5_keyblock *reply_key = NULL;
krb5_key_data *server_key;
krb5_principal cprinc = NULL, sprinc = NULL, altcprinc = NULL;
krb5_last_req_entry *nolrarray[2], nolrentry;
int errcode;
const char *status = 0;
krb5_enc_tkt_part *header_enc_tkt = NULL; /* TGT */
krb5_enc_tkt_part *subject_tkt = NULL; /* TGT or evidence ticket */
krb5_db_entry *client = NULL, *krbtgt = NULL;
krb5_pa_s4u_x509_user *s4u_x509_user = NULL; /* protocol transition request */
krb5_authdata **kdc_issued_auth_data = NULL; /* auth data issued by KDC */
unsigned int c_flags = 0, s_flags = 0; /* client/server KDB flags */
krb5_boolean is_referral;
const char *emsg = NULL;
krb5_kvno ticket_kvno = 0;
struct kdc_request_state *state = NULL;
krb5_pa_data *pa_tgs_req; /*points into request*/
krb5_data scratch;
krb5_pa_data **e_data = NULL;
kdc_realm_t *kdc_active_realm = NULL;
reply.padata = 0; /* For cleanup handler */
reply_encpart.enc_padata = 0;
enc_tkt_reply.authorization_data = NULL;
session_key.contents = NULL;
retval = decode_krb5_tgs_req(pkt, &request);
if (retval)
return retval;
if (request->msg_type != KRB5_TGS_REQ) {
krb5_free_kdc_req(handle->kdc_err_context, request);
return KRB5_BADMSGTYPE;
}
/*
* setup_server_realm() sets up the global realm-specific data pointer.
*/
kdc_active_realm = setup_server_realm(handle, request->server);
if (kdc_active_realm == NULL) {
krb5_free_kdc_req(handle->kdc_err_context, request);
return KRB5KDC_ERR_WRONG_REALM;
}
errcode = kdc_make_rstate(kdc_active_realm, &state);
if (errcode !=0) {
krb5_free_kdc_req(handle->kdc_err_context, request);
return errcode;
}
errcode = kdc_process_tgs_req(kdc_active_realm,
request, from, pkt, &header_ticket,
&krbtgt, &tgskey, &subkey, &pa_tgs_req);
if (header_ticket && header_ticket->enc_part2)
cprinc = header_ticket->enc_part2->client;
if (errcode) {
status = "PROCESS_TGS";
goto cleanup;
}
if (!header_ticket) {
errcode = KRB5_NO_TKT_SUPPLIED; /* XXX? */
status="UNEXPECTED NULL in header_ticket";
goto cleanup;
}
scratch.length = pa_tgs_req->length;
scratch.data = (char *) pa_tgs_req->contents;
errcode = kdc_find_fast(&request, &scratch, subkey,
header_ticket->enc_part2->session, state, NULL);
if (errcode !=0) {
status = "kdc_find_fast";
goto cleanup;
}
/*
* Pointer to the encrypted part of the header ticket, which may be
* replaced to point to the encrypted part of the evidence ticket
* if constrained delegation is used. This simplifies the number of
* special cases for constrained delegation.
*/
header_enc_tkt = header_ticket->enc_part2;
/*
* We've already dealt with the AP_REQ authentication, so we can
* use header_ticket freely. The encrypted part (if any) has been
* decrypted with the session key.
*/
/* XXX make sure server here has the proper realm...taken from AP_REQ
header? */
setflag(s_flags, KRB5_KDB_FLAG_ALIAS_OK);
if (isflagset(request->kdc_options, KDC_OPT_CANONICALIZE)) {
setflag(c_flags, KRB5_KDB_FLAG_CANONICALIZE);
setflag(s_flags, KRB5_KDB_FLAG_CANONICALIZE);
}
errcode = search_sprinc(kdc_active_realm, request, s_flags, &server,
&status);
if (errcode != 0)
goto cleanup;
sprinc = server->princ;
/* XXX until nothing depends on request being mutated */
krb5_free_principal(kdc_context, request->server);
request->server = NULL;
errcode = krb5_copy_principal(kdc_context, server->princ,
&request->server);
if (errcode != 0) {
status = "COPYING RESOLVED SERVER";
goto cleanup;
}
if ((errcode = krb5_timeofday(kdc_context, &kdc_time))) {
status = "TIME_OF_DAY";
goto cleanup;
}
if ((retval = validate_tgs_request(kdc_active_realm,
request, *server, header_ticket,
kdc_time, &status, &e_data))) {
if (!status)
status = "UNKNOWN_REASON";
errcode = retval + ERROR_TABLE_BASE_krb5;
goto cleanup;
}
if (!is_local_principal(kdc_active_realm, header_enc_tkt->client))
setflag(c_flags, KRB5_KDB_FLAG_CROSS_REALM);
is_referral = krb5_is_tgs_principal(server->princ) &&
!krb5_principal_compare(kdc_context, tgs_server, server->princ);
/* Check for protocol transition */
errcode = kdc_process_s4u2self_req(kdc_active_realm,
request,
header_enc_tkt->client,
server,
subkey,
header_enc_tkt->session,
kdc_time,
&s4u_x509_user,
&client,
&status);
if (errcode)
goto cleanup;
if (s4u_x509_user != NULL)
setflag(c_flags, KRB5_KDB_FLAG_PROTOCOL_TRANSITION);
errcode = decrypt_2ndtkt(kdc_active_realm, request, c_flags,
&stkt_server, &status);
if (errcode)
goto cleanup;
if (isflagset(request->kdc_options, KDC_OPT_CNAME_IN_ADDL_TKT)) {
/* Do constrained delegation protocol and authorization checks */
errcode = kdc_process_s4u2proxy_req(kdc_active_realm,
request,
request->second_ticket[st_idx]->enc_part2,
stkt_server,
header_ticket->enc_part2->client,
request->server,
&status);
if (errcode)
goto cleanup;
setflag(c_flags, KRB5_KDB_FLAG_CONSTRAINED_DELEGATION);
assert(krb5_is_tgs_principal(header_ticket->server));
assert(client == NULL); /* assured by kdc_process_s4u2self_req() */
client = stkt_server;
stkt_server = NULL;
} else if (request->kdc_options & KDC_OPT_ENC_TKT_IN_SKEY) {
krb5_db_free_principal(kdc_context, stkt_server);
stkt_server = NULL;
} else
assert(stkt_server == NULL);
errcode = gen_session_key(kdc_active_realm, request, server, &session_key,
&status);
if (errcode)
goto cleanup;
/*
* subject_tkt will refer to the evidence ticket (for constrained
* delegation) or the TGT. The distinction from header_enc_tkt is
* necessary because the TGS signature only protects some fields:
* the others could be forged by a malicious server.
*/
if (isflagset(c_flags, KRB5_KDB_FLAG_CONSTRAINED_DELEGATION))
subject_tkt = request->second_ticket[st_idx]->enc_part2;
else
subject_tkt = header_enc_tkt;
authtime = subject_tkt->times.authtime;
if (is_referral)
ticket_reply.server = server->princ;
else
ticket_reply.server = request->server; /* XXX careful for realm... */
enc_tkt_reply.flags = 0;
enc_tkt_reply.times.starttime = 0;
if (isflagset(server->attributes, KRB5_KDB_OK_AS_DELEGATE))
setflag(enc_tkt_reply.flags, TKT_FLG_OK_AS_DELEGATE);
/*
* Fix header_ticket's starttime; if it's zero, fill in the
* authtime's value.
*/
if (!(header_enc_tkt->times.starttime))
header_enc_tkt->times.starttime = authtime;
setflag(enc_tkt_reply.flags, TKT_FLG_ENC_PA_REP);
/* don't use new addresses unless forwarded, see below */
enc_tkt_reply.caddrs = header_enc_tkt->caddrs;
/* noaddrarray[0] = 0; */
reply_encpart.caddrs = 0;/* optional...don't put it in */
reply_encpart.enc_padata = NULL;
/*
* It should be noted that local policy may affect the
* processing of any of these flags. For example, some
* realms may refuse to issue renewable tickets
*/
if (isflagset(request->kdc_options, KDC_OPT_FORWARDABLE)) {
setflag(enc_tkt_reply.flags, TKT_FLG_FORWARDABLE);
if (isflagset(c_flags, KRB5_KDB_FLAG_PROTOCOL_TRANSITION)) {
/*
* If S4U2Self principal is not forwardable, then mark ticket as
* unforwardable. This behaviour matches Windows, but it is
* different to the MIT AS-REQ path, which returns an error
* (KDC_ERR_POLICY) if forwardable tickets cannot be issued.
*
* Consider this block the S4U2Self equivalent to
* validate_forwardable().
*/
if (client != NULL &&
isflagset(client->attributes, KRB5_KDB_DISALLOW_FORWARDABLE))
clear(enc_tkt_reply.flags, TKT_FLG_FORWARDABLE);
/*
* Forwardable flag is propagated along referral path.
*/
else if (!isflagset(header_enc_tkt->flags, TKT_FLG_FORWARDABLE))
clear(enc_tkt_reply.flags, TKT_FLG_FORWARDABLE);
/*
* OK_TO_AUTH_AS_DELEGATE must be set on the service requesting
* S4U2Self in order for forwardable tickets to be returned.
*/
else if (!is_referral &&
!isflagset(server->attributes,
KRB5_KDB_OK_TO_AUTH_AS_DELEGATE))
clear(enc_tkt_reply.flags, TKT_FLG_FORWARDABLE);
}
}
if (isflagset(request->kdc_options, KDC_OPT_FORWARDED)) {
setflag(enc_tkt_reply.flags, TKT_FLG_FORWARDED);
/* include new addresses in ticket & reply */
enc_tkt_reply.caddrs = request->addresses;
reply_encpart.caddrs = request->addresses;
}
if (isflagset(header_enc_tkt->flags, TKT_FLG_FORWARDED))
setflag(enc_tkt_reply.flags, TKT_FLG_FORWARDED);
if (isflagset(request->kdc_options, KDC_OPT_PROXIABLE))
setflag(enc_tkt_reply.flags, TKT_FLG_PROXIABLE);
if (isflagset(request->kdc_options, KDC_OPT_PROXY)) {
setflag(enc_tkt_reply.flags, TKT_FLG_PROXY);
/* include new addresses in ticket & reply */
enc_tkt_reply.caddrs = request->addresses;
reply_encpart.caddrs = request->addresses;
}
if (isflagset(request->kdc_options, KDC_OPT_ALLOW_POSTDATE))
setflag(enc_tkt_reply.flags, TKT_FLG_MAY_POSTDATE);
if (isflagset(request->kdc_options, KDC_OPT_POSTDATED)) {
setflag(enc_tkt_reply.flags, TKT_FLG_POSTDATED);
setflag(enc_tkt_reply.flags, TKT_FLG_INVALID);
enc_tkt_reply.times.starttime = request->from;
} else
enc_tkt_reply.times.starttime = kdc_time;
if (isflagset(request->kdc_options, KDC_OPT_VALIDATE)) {
assert(isflagset(c_flags, KRB5_KDB_FLAGS_S4U) == 0);
/* BEWARE of allocation hanging off of ticket & enc_part2, it belongs
to the caller */
ticket_reply = *(header_ticket);
enc_tkt_reply = *(header_ticket->enc_part2);
enc_tkt_reply.authorization_data = NULL;
clear(enc_tkt_reply.flags, TKT_FLG_INVALID);
}
if (isflagset(request->kdc_options, KDC_OPT_RENEW)) {
krb5_deltat old_life;
assert(isflagset(c_flags, KRB5_KDB_FLAGS_S4U) == 0);
/* BEWARE of allocation hanging off of ticket & enc_part2, it belongs
to the caller */
ticket_reply = *(header_ticket);
enc_tkt_reply = *(header_ticket->enc_part2);
enc_tkt_reply.authorization_data = NULL;
old_life = enc_tkt_reply.times.endtime - enc_tkt_reply.times.starttime;
enc_tkt_reply.times.starttime = kdc_time;
enc_tkt_reply.times.endtime =
min(header_ticket->enc_part2->times.renew_till,
kdc_time + old_life);
} else {
/* not a renew request */
enc_tkt_reply.times.starttime = kdc_time;
kdc_get_ticket_endtime(kdc_active_realm, enc_tkt_reply.times.starttime,
header_enc_tkt->times.endtime, request->till,
client, server, &enc_tkt_reply.times.endtime);
if (isflagset(request->kdc_options, KDC_OPT_RENEWABLE_OK) &&
(enc_tkt_reply.times.endtime < request->till) &&
isflagset(header_enc_tkt->flags, TKT_FLG_RENEWABLE)) {
setflag(request->kdc_options, KDC_OPT_RENEWABLE);
request->rtime =
min(request->till, header_enc_tkt->times.renew_till);
}
}
rtime = (request->rtime == 0) ? kdc_infinity : request->rtime;
if (isflagset(request->kdc_options, KDC_OPT_RENEWABLE)) {
/* already checked above in policy check to reject request for a
renewable ticket using a non-renewable ticket */
setflag(enc_tkt_reply.flags, TKT_FLG_RENEWABLE);
enc_tkt_reply.times.renew_till =
min(rtime,
min(header_enc_tkt->times.renew_till,
enc_tkt_reply.times.starttime +
min(server->max_renewable_life,
max_renewable_life_for_realm)));
} else {
enc_tkt_reply.times.renew_till = 0;
}
if (isflagset(header_enc_tkt->flags, TKT_FLG_ANONYMOUS))
setflag(enc_tkt_reply.flags, TKT_FLG_ANONYMOUS);
/*
* Set authtime to be the same as header or evidence ticket's
*/
enc_tkt_reply.times.authtime = authtime;
/*
* Propagate the preauthentication flags through to the returned ticket.
*/
if (isflagset(header_enc_tkt->flags, TKT_FLG_PRE_AUTH))
setflag(enc_tkt_reply.flags, TKT_FLG_PRE_AUTH);
if (isflagset(header_enc_tkt->flags, TKT_FLG_HW_AUTH))
setflag(enc_tkt_reply.flags, TKT_FLG_HW_AUTH);
/* starttime is optional, and treated as authtime if not present.
so we can nuke it if it matches */
if (enc_tkt_reply.times.starttime == enc_tkt_reply.times.authtime)
enc_tkt_reply.times.starttime = 0;
if (isflagset(c_flags, KRB5_KDB_FLAG_PROTOCOL_TRANSITION)) {
altcprinc = s4u_x509_user->user_id.user;
} else if (isflagset(c_flags, KRB5_KDB_FLAG_CONSTRAINED_DELEGATION)) {
altcprinc = subject_tkt->client;
} else {
altcprinc = NULL;
}
if (isflagset(request->kdc_options, KDC_OPT_ENC_TKT_IN_SKEY)) {
krb5_enc_tkt_part *t2enc = request->second_ticket[st_idx]->enc_part2;
encrypting_key = *(t2enc->session);
} else {
/*
* Find the server key
*/
if ((errcode = krb5_dbe_find_enctype(kdc_context, server,
-1, /* ignore keytype */
-1, /* Ignore salttype */
0, /* Get highest kvno */
&server_key))) {
status = "FINDING_SERVER_KEY";
goto cleanup;
}
/*
* Convert server.key into a real key
* (it may be encrypted in the database)
*/
if ((errcode = krb5_dbe_decrypt_key_data(kdc_context, NULL,
server_key, &encrypting_key,
NULL))) {
status = "DECRYPT_SERVER_KEY";
goto cleanup;
}
}
if (isflagset(c_flags, KRB5_KDB_FLAG_CONSTRAINED_DELEGATION)) {
/*
* Don't allow authorization data to be disabled if constrained
* delegation is requested. We don't want to deny the server
* the ability to validate that delegation was used.
*/
clear(server->attributes, KRB5_KDB_NO_AUTH_DATA_REQUIRED);
}
if (isflagset(server->attributes, KRB5_KDB_NO_AUTH_DATA_REQUIRED) == 0) {
/*
* If we are not doing protocol transition/constrained delegation
* try to lookup the client principal so plugins can add additional
* authorization information.
*
* Always validate authorization data for constrained delegation
* because we must validate the KDC signatures.
*/
if (!isflagset(c_flags, KRB5_KDB_FLAGS_S4U)) {
/* Generate authorization data so we can include it in ticket */
setflag(c_flags, KRB5_KDB_FLAG_INCLUDE_PAC);
/* Map principals from foreign (possibly non-AD) realms */
setflag(c_flags, KRB5_KDB_FLAG_MAP_PRINCIPALS);
assert(client == NULL); /* should not have been set already */
errcode = krb5_db_get_principal(kdc_context, subject_tkt->client,
c_flags, &client);
}
}
if (isflagset(c_flags, KRB5_KDB_FLAG_PROTOCOL_TRANSITION) &&
!isflagset(c_flags, KRB5_KDB_FLAG_CROSS_REALM))
enc_tkt_reply.client = s4u_x509_user->user_id.user;
else
enc_tkt_reply.client = subject_tkt->client;
enc_tkt_reply.session = &session_key;
enc_tkt_reply.transited.tr_type = KRB5_DOMAIN_X500_COMPRESS;
enc_tkt_reply.transited.tr_contents = empty_string; /* equivalent of "" */
errcode = handle_authdata(kdc_context, c_flags, client, server, krbtgt,
subkey != NULL ? subkey :
header_ticket->enc_part2->session,
&encrypting_key, /* U2U or server key */
tgskey,
pkt,
request,
s4u_x509_user ?
s4u_x509_user->user_id.user : NULL,
subject_tkt,
&enc_tkt_reply);
if (errcode) {
krb5_klog_syslog(LOG_INFO, _("TGS_REQ : handle_authdata (%d)"),
errcode);
status = "HANDLE_AUTHDATA";
goto cleanup;
}
/*
* Only add the realm of the presented tgt to the transited list if
* it is different than the local realm (cross-realm) and it is different
* than the realm of the client (since the realm of the client is already
* implicitly part of the transited list and should not be explicitly
* listed).
*/
/* realm compare is like strcmp, but knows how to deal with these args */
if (krb5_realm_compare(kdc_context, header_ticket->server, tgs_server) ||
krb5_realm_compare(kdc_context, header_ticket->server,
enc_tkt_reply.client)) {
/* tgt issued by local realm or issued by realm of client */
enc_tkt_reply.transited = header_enc_tkt->transited;
} else {
/* tgt issued by some other realm and not the realm of the client */
/* assemble new transited field into allocated storage */
if (header_enc_tkt->transited.tr_type !=
KRB5_DOMAIN_X500_COMPRESS) {
status = "BAD_TRTYPE";
errcode = KRB5KDC_ERR_TRTYPE_NOSUPP;
goto cleanup;
}
enc_tkt_transited.tr_type = KRB5_DOMAIN_X500_COMPRESS;
enc_tkt_transited.magic = 0;
enc_tkt_transited.tr_contents.magic = 0;
enc_tkt_transited.tr_contents.data = 0;
enc_tkt_transited.tr_contents.length = 0;
enc_tkt_reply.transited = enc_tkt_transited;
if ((errcode =
add_to_transited(&header_enc_tkt->transited.tr_contents,
&enc_tkt_reply.transited.tr_contents,
header_ticket->server,
enc_tkt_reply.client,
request->server))) {
status = "ADD_TR_FAIL";
goto cleanup;
}
newtransited = 1;
}
if (isflagset(c_flags, KRB5_KDB_FLAG_CROSS_REALM)) {
errcode = validate_transit_path(kdc_context, header_enc_tkt->client,
server, krbtgt);
if (errcode) {
status = "NON_TRANSITIVE";
goto cleanup;
}
}
if (!isflagset (request->kdc_options, KDC_OPT_DISABLE_TRANSITED_CHECK)) {
errcode = kdc_check_transited_list (kdc_active_realm,
&enc_tkt_reply.transited.tr_contents,
krb5_princ_realm (kdc_context, header_enc_tkt->client),
krb5_princ_realm (kdc_context, request->server));
if (errcode == 0) {
setflag (enc_tkt_reply.flags, TKT_FLG_TRANSIT_POLICY_CHECKED);
} else {
log_tgs_badtrans(kdc_context, cprinc, sprinc,
&enc_tkt_reply.transited.tr_contents, errcode);
}
} else
krb5_klog_syslog(LOG_INFO, _("not checking transit path"));
if (reject_bad_transit
&& !isflagset (enc_tkt_reply.flags, TKT_FLG_TRANSIT_POLICY_CHECKED)) {
errcode = KRB5KDC_ERR_POLICY;
status = "BAD_TRANSIT";
goto cleanup;
}
ticket_reply.enc_part2 = &enc_tkt_reply;
/*
* If we are doing user-to-user authentication, then make sure
* that the client for the second ticket matches the request
* server, and then encrypt the ticket using the session key of
* the second ticket.
*/
if (isflagset(request->kdc_options, KDC_OPT_ENC_TKT_IN_SKEY)) {
/*
* Make sure the client for the second ticket matches
* requested server.
*/
krb5_enc_tkt_part *t2enc = request->second_ticket[st_idx]->enc_part2;
krb5_principal client2 = t2enc->client;
if (!krb5_principal_compare(kdc_context, request->server, client2)) {
altcprinc = client2;
errcode = KRB5KDC_ERR_SERVER_NOMATCH;
status = "2ND_TKT_MISMATCH";
goto cleanup;
}
ticket_kvno = 0;
ticket_reply.enc_part.enctype = t2enc->session->enctype;
st_idx++;
} else {
ticket_kvno = server_key->key_data_kvno;
}
errcode = krb5_encrypt_tkt_part(kdc_context, &encrypting_key,
&ticket_reply);
if (!isflagset(request->kdc_options, KDC_OPT_ENC_TKT_IN_SKEY))
krb5_free_keyblock_contents(kdc_context, &encrypting_key);
if (errcode) {
status = "TKT_ENCRYPT";
goto cleanup;
}
ticket_reply.enc_part.kvno = ticket_kvno;
/* Start assembling the response */
reply.msg_type = KRB5_TGS_REP;
if (isflagset(c_flags, KRB5_KDB_FLAG_PROTOCOL_TRANSITION) &&
krb5int_find_pa_data(kdc_context, request->padata,
KRB5_PADATA_S4U_X509_USER) != NULL) {
errcode = kdc_make_s4u2self_rep(kdc_context,
subkey,
header_ticket->enc_part2->session,
s4u_x509_user,
&reply,
&reply_encpart);
if (errcode) {
status = "KDC_RETURN_S4U2SELF_PADATA";
goto cleanup;
}
}
reply.client = enc_tkt_reply.client;
reply.enc_part.kvno = 0;/* We are using the session key */
reply.ticket = &ticket_reply;
reply_encpart.session = &session_key;
reply_encpart.nonce = request->nonce;
/* copy the time fields */
reply_encpart.times = enc_tkt_reply.times;
/* starttime is optional, and treated as authtime if not present.
so we can nuke it if it matches */
if (enc_tkt_reply.times.starttime == enc_tkt_reply.times.authtime)
enc_tkt_reply.times.starttime = 0;
nolrentry.lr_type = KRB5_LRQ_NONE;
nolrentry.value = 0;
nolrarray[0] = &nolrentry;
nolrarray[1] = 0;
reply_encpart.last_req = nolrarray; /* not available for TGS reqs */
reply_encpart.key_exp = 0;/* ditto */
reply_encpart.flags = enc_tkt_reply.flags;
reply_encpart.server = ticket_reply.server;
/* use the session key in the ticket, unless there's a subsession key
in the AP_REQ */
reply.enc_part.enctype = subkey ? subkey->enctype :
header_ticket->enc_part2->session->enctype;
errcode = kdc_fast_response_handle_padata(state, request, &reply,
subkey ? subkey->enctype : header_ticket->enc_part2->session->enctype);
if (errcode !=0 ) {
status = "Preparing FAST padata";
goto cleanup;
}
errcode =kdc_fast_handle_reply_key(state,
subkey?subkey:header_ticket->enc_part2->session, &reply_key);
if (errcode) {
status = "generating reply key";
goto cleanup;
}
errcode = return_enc_padata(kdc_context, pkt, request,
reply_key, server, &reply_encpart,
is_referral &&
isflagset(s_flags,
KRB5_KDB_FLAG_CANONICALIZE));
if (errcode) {
status = "KDC_RETURN_ENC_PADATA";
goto cleanup;
}
errcode = krb5_encode_kdc_rep(kdc_context, KRB5_TGS_REP, &reply_encpart,
subkey ? 1 : 0,
reply_key,
&reply, response);
if (errcode) {
status = "ENCODE_KDC_REP";
} else {
status = "ISSUE";
}
memset(ticket_reply.enc_part.ciphertext.data, 0,
ticket_reply.enc_part.ciphertext.length);
free(ticket_reply.enc_part.ciphertext.data);
/* these parts are left on as a courtesy from krb5_encode_kdc_rep so we
can use them in raw form if needed. But, we don't... */
memset(reply.enc_part.ciphertext.data, 0,
reply.enc_part.ciphertext.length);
free(reply.enc_part.ciphertext.data);
cleanup:
assert(status != NULL);
if (reply_key)
krb5_free_keyblock(kdc_context, reply_key);
if (errcode)
emsg = krb5_get_error_message (kdc_context, errcode);
log_tgs_req(kdc_context, from, request, &reply, cprinc,
sprinc, altcprinc, authtime,
c_flags, status, errcode, emsg);
if (errcode) {
krb5_free_error_message (kdc_context, emsg);
emsg = NULL;
}
if (errcode) {
int got_err = 0;
if (status == 0) {
status = krb5_get_error_message (kdc_context, errcode);
got_err = 1;
}
errcode -= ERROR_TABLE_BASE_krb5;
if (errcode < 0 || errcode > 128)
errcode = KRB_ERR_GENERIC;
retval = prepare_error_tgs(state, request, header_ticket, errcode,
(server != NULL) ? server->princ : NULL,
response, status, e_data);
if (got_err) {
krb5_free_error_message (kdc_context, status);
status = 0;
}
}
if (header_ticket != NULL)
krb5_free_ticket(kdc_context, header_ticket);
if (request != NULL)
krb5_free_kdc_req(kdc_context, request);
if (state)
kdc_free_rstate(state);
krb5_db_free_principal(kdc_context, server);
krb5_db_free_principal(kdc_context, krbtgt);
krb5_db_free_principal(kdc_context, client);
if (session_key.contents != NULL)
krb5_free_keyblock_contents(kdc_context, &session_key);
if (newtransited)
free(enc_tkt_reply.transited.tr_contents.data);
if (s4u_x509_user != NULL)
krb5_free_pa_s4u_x509_user(kdc_context, s4u_x509_user);
if (kdc_issued_auth_data != NULL)
krb5_free_authdata(kdc_context, kdc_issued_auth_data);
if (subkey != NULL)
krb5_free_keyblock(kdc_context, subkey);
if (tgskey != NULL)
krb5_free_keyblock(kdc_context, tgskey);
if (reply.padata)
krb5_free_pa_data(kdc_context, reply.padata);
if (reply_encpart.enc_padata)
krb5_free_pa_data(kdc_context, reply_encpart.enc_padata);
if (enc_tkt_reply.authorization_data != NULL)
krb5_free_authdata(kdc_context, enc_tkt_reply.authorization_data);
krb5_free_pa_data(kdc_context, e_data);
return retval;
}
static krb5_error_code
prepare_error_tgs (struct kdc_request_state *state,
krb5_kdc_req *request, krb5_ticket *ticket, int error,
krb5_principal canon_server,
krb5_data **response, const char *status,
krb5_pa_data **e_data)
{
krb5_error errpkt;
krb5_error_code retval = 0;
krb5_data *scratch, *e_data_asn1 = NULL, *fast_edata = NULL;
kdc_realm_t *kdc_active_realm = state->realm_data;
errpkt.ctime = request->nonce;
errpkt.cusec = 0;
if ((retval = krb5_us_timeofday(kdc_context, &errpkt.stime,
&errpkt.susec)))
return(retval);
errpkt.error = error;
errpkt.server = request->server;
if (ticket && ticket->enc_part2)
errpkt.client = ticket->enc_part2->client;
else
errpkt.client = NULL;
errpkt.text.length = strlen(status);
if (!(errpkt.text.data = strdup(status)))
return ENOMEM;
if (!(scratch = (krb5_data *)malloc(sizeof(*scratch)))) {
free(errpkt.text.data);
return ENOMEM;
}
if (e_data != NULL) {
retval = encode_krb5_padata_sequence(e_data, &e_data_asn1);
if (retval) {
free(scratch);
free(errpkt.text.data);
return retval;
}
errpkt.e_data = *e_data_asn1;
} else
errpkt.e_data = empty_data();
if (state) {
retval = kdc_fast_handle_error(kdc_context, state, request, e_data,
&errpkt, &fast_edata);
}
if (retval) {
free(scratch);
free(errpkt.text.data);
krb5_free_data(kdc_context, e_data_asn1);
return retval;
}
if (fast_edata)
errpkt.e_data = *fast_edata;
retval = krb5_mk_error(kdc_context, &errpkt, scratch);
free(errpkt.text.data);
krb5_free_data(kdc_context, e_data_asn1);
krb5_free_data(kdc_context, fast_edata);
if (retval)
free(scratch);
else
*response = scratch;
return retval;
}
/* KDC options that require a second ticket */
#define STKT_OPTIONS (KDC_OPT_CNAME_IN_ADDL_TKT | KDC_OPT_ENC_TKT_IN_SKEY)
/*
* Get the key for the second ticket, if any, and decrypt it.
*/
static krb5_error_code
decrypt_2ndtkt(kdc_realm_t *kdc_active_realm, krb5_kdc_req *req,
krb5_flags flags, krb5_db_entry **server_out,
const char **status)
{
krb5_error_code retval;
krb5_db_entry *server;
krb5_keyblock *key;
krb5_kvno kvno;
krb5_ticket *stkt;
if (!(req->kdc_options & STKT_OPTIONS))
return 0;
stkt = req->second_ticket[0];
retval = kdc_get_server_key(kdc_context, stkt,
flags,
TRUE, /* match_enctype */
&server,
&key,
&kvno);
if (retval != 0) {
*status = "2ND_TKT_SERVER";
goto cleanup;
}
retval = krb5_decrypt_tkt_part(kdc_context, key,
req->second_ticket[0]);
krb5_free_keyblock(kdc_context, key);
if (retval != 0) {
*status = "2ND_TKT_DECRYPT";
goto cleanup;
}
*server_out = server;
cleanup:
return retval;
}
static krb5_error_code
get_2ndtkt_enctype(kdc_realm_t *kdc_active_realm, krb5_kdc_req *req,
krb5_enctype *useenctype, const char **status)
{
krb5_enctype etype;
krb5_ticket *stkt = req->second_ticket[0];
int i;
etype = stkt->enc_part2->session->enctype;
if (!krb5_c_valid_enctype(etype)) {
*status = "BAD_ETYPE_IN_2ND_TKT";
return KRB5KDC_ERR_ETYPE_NOSUPP;
}
for (i = 0; i < req->nktypes; i++) {
if (req->ktype[i] == etype) {
*useenctype = etype;
break;
}
}
return 0;
}
static krb5_error_code
gen_session_key(kdc_realm_t *kdc_active_realm, krb5_kdc_req *req,
krb5_db_entry *server, krb5_keyblock *skey,
const char **status)
{
krb5_error_code retval;
krb5_enctype useenctype = 0;
/*
* Some special care needs to be taken in the user-to-user
* case, since we don't know what keytypes the application server
* which is doing user-to-user authentication can support. We
* know that it at least must be able to support the encryption
* type of the session key in the TGT, since otherwise it won't be
* able to decrypt the U2U ticket! So we use that in preference
* to anything else.
*/
if (req->kdc_options & KDC_OPT_ENC_TKT_IN_SKEY) {
retval = get_2ndtkt_enctype(kdc_active_realm, req, &useenctype,
status);
if (retval != 0)
goto cleanup;
}
if (useenctype == 0) {
useenctype = select_session_keytype(kdc_active_realm, server,
req->nktypes,
req->ktype);
}
if (useenctype == 0) {
/* unsupported ktype */
*status = "BAD_ENCRYPTION_TYPE";
retval = KRB5KDC_ERR_ETYPE_NOSUPP;
goto cleanup;
}
retval = krb5_c_make_random_key(kdc_context, useenctype, skey);
if (retval != 0) {
/* random key failed */
*status = "RANDOM_KEY_FAILED";
goto cleanup;
}
cleanup:
return retval;
}
/*
* The request seems to be for a ticket-granting service somewhere else,
* but we don't have a ticket for the final TGS. Try to give the requestor
* some intermediate realm.
*/
static krb5_error_code
find_alternate_tgs(kdc_realm_t *kdc_active_realm, krb5_principal princ,
krb5_db_entry **server_ptr, const char **status)
{
krb5_error_code retval;
krb5_principal *plist = NULL, *pl2;
krb5_data tmp;
krb5_db_entry *server = NULL;
*server_ptr = NULL;
assert(is_cross_tgs_principal(princ));
if ((retval = krb5_walk_realm_tree(kdc_context,
krb5_princ_realm(kdc_context, princ),
krb5_princ_component(kdc_context, princ, 1),
&plist, KRB5_REALM_BRANCH_CHAR))) {
goto cleanup;
}
/* move to the end */
for (pl2 = plist; *pl2; pl2++);
/* the first entry in this array is for krbtgt/local@local, so we
ignore it */
while (--pl2 > plist) {
tmp = *krb5_princ_realm(kdc_context, *pl2);
krb5_princ_set_realm(kdc_context, *pl2,
krb5_princ_realm(kdc_context, tgs_server));
retval = db_get_svc_princ(kdc_context, *pl2, 0, &server, status);
krb5_princ_set_realm(kdc_context, *pl2, &tmp);
if (retval == KRB5_KDB_NOENTRY)
continue;
else if (retval)
goto cleanup;
log_tgs_alt_tgt(kdc_context, server->princ);
*server_ptr = server;
server = NULL;
goto cleanup;
}
cleanup:
if (retval != 0)
*status = "UNKNOWN_SERVER";
krb5_free_realm_tree(kdc_context, plist);
krb5_db_free_principal(kdc_context, server);
return retval;
}
/*
* Check whether the request satisfies the conditions for generating a referral
* TGT. The caller checks whether the hostname component looks like a FQDN.
*/
static krb5_boolean
is_referral_req(kdc_realm_t *kdc_active_realm, krb5_kdc_req *request)
{
krb5_boolean ret = FALSE;
char *stype = NULL;
char *ref_services = kdc_active_realm->realm_host_based_services;
char *nonref_services = kdc_active_realm->realm_no_host_referral;
if (!(request->kdc_options & KDC_OPT_CANONICALIZE))
return FALSE;
if (request->kdc_options & KDC_OPT_ENC_TKT_IN_SKEY)
return FALSE;
if (krb5_princ_size(kdc_context, request->server) != 2)
return FALSE;
stype = data2string(krb5_princ_component(kdc_context, request->server, 0));
if (stype == NULL)
return FALSE;
switch (krb5_princ_type(kdc_context, request->server)) {
case KRB5_NT_UNKNOWN:
/* Allow referrals for NT-UNKNOWN principals, if configured. */
if (kdc_active_realm->realm_host_based_services != NULL) {
if (!krb5_match_config_pattern(ref_services, stype) &&
!krb5_match_config_pattern(ref_services, KRB5_CONF_ASTERISK))
goto cleanup;
} else
goto cleanup;
/* FALLTHROUGH */
case KRB5_NT_SRV_HST:
case KRB5_NT_SRV_INST:
/* Deny referrals for specific service types, if configured. */
if (kdc_active_realm->realm_no_host_referral != NULL) {
if (krb5_match_config_pattern(nonref_services, stype))
goto cleanup;
if (krb5_match_config_pattern(nonref_services, KRB5_CONF_ASTERISK))
goto cleanup;
}
ret = TRUE;
break;
default:
goto cleanup;
}
cleanup:
free(stype);
return ret;
}
/*
* Find a remote realm TGS principal for an unknown host-based service
* principal.
*/
static krb5_int32
find_referral_tgs(kdc_realm_t *kdc_active_realm, krb5_kdc_req *request,
krb5_principal *krbtgt_princ)
{
krb5_error_code retval = KRB5KDC_ERR_S_PRINCIPAL_UNKNOWN;
char **realms = NULL, *hostname = NULL;
krb5_data srealm = request->server->realm;
if (!is_referral_req(kdc_active_realm, request))
goto cleanup;
hostname = data2string(krb5_princ_component(kdc_context,
request->server, 1));
if (hostname == NULL) {
retval = ENOMEM;
goto cleanup;
}
/* If the hostname doesn't contain a '.', it's not a FQDN. */
if (strchr(hostname, '.') == NULL)
goto cleanup;
retval = krb5_get_host_realm(kdc_context, hostname, &realms);
if (retval) {
/* no match found */
kdc_err(kdc_context, retval, "unable to find realm of host");
goto cleanup;
}
/* Don't return a referral to the empty realm or the service realm. */
if (realms == NULL || realms[0] == '\0' ||
data_eq_string(srealm, realms[0])) {
retval = KRB5KDC_ERR_S_PRINCIPAL_UNKNOWN;
goto cleanup;
}
retval = krb5_build_principal(kdc_context, krbtgt_princ,
srealm.length, srealm.data,
"krbtgt", realms[0], (char *)0);
cleanup:
krb5_free_host_realm(kdc_context, realms);
free(hostname);
return retval;
}
static krb5_error_code
db_get_svc_princ(krb5_context ctx, krb5_principal princ,
krb5_flags flags, krb5_db_entry **server,
const char **status)
{
krb5_error_code ret;
ret = krb5_db_get_principal(ctx, princ, flags, server);
if (ret == KRB5_KDB_CANTLOCK_DB)
ret = KRB5KDC_ERR_SVC_UNAVAILABLE;
if (ret != 0) {
*status = "LOOKING_UP_SERVER";
}
return ret;
}
static krb5_error_code
search_sprinc(kdc_realm_t *kdc_active_realm, krb5_kdc_req *req,
krb5_flags flags, krb5_db_entry **server, const char **status)
{
krb5_error_code ret;
krb5_principal princ = req->server;
krb5_principal reftgs = NULL;
ret = db_get_svc_princ(kdc_context, princ, flags, server, status);
if (ret == 0 || ret != KRB5_KDB_NOENTRY)
goto cleanup;
if (!is_cross_tgs_principal(req->server)) {
ret = find_referral_tgs(kdc_active_realm, req, &reftgs);
if (ret != 0)
goto cleanup;
ret = db_get_svc_princ(kdc_context, reftgs, flags, server, status);
if (ret == 0 || ret != KRB5_KDB_NOENTRY)
goto cleanup;
princ = reftgs;
}
ret = find_alternate_tgs(kdc_active_realm, princ, server, status);
cleanup:
if (ret != 0 && ret != KRB5KDC_ERR_SVC_UNAVAILABLE) {
ret = KRB5KDC_ERR_S_PRINCIPAL_UNKNOWN;
if (*status == NULL)
*status = "LOOKING_UP_SERVER";
}
krb5_free_principal(kdc_context, reftgs);
return ret;
}
| ./CrossVul/dataset_final_sorted/CWE-20/c/bad_5576_0 |
crossvul-cpp_data_good_1762_0 | /* Key garbage collector
*
* Copyright (C) 2009-2011 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/security.h>
#include <keys/keyring-type.h>
#include "internal.h"
/*
* Delay between key revocation/expiry in seconds
*/
unsigned key_gc_delay = 5 * 60;
/*
* Reaper for unused keys.
*/
static void key_garbage_collector(struct work_struct *work);
DECLARE_WORK(key_gc_work, key_garbage_collector);
/*
* Reaper for links from keyrings to dead keys.
*/
static void key_gc_timer_func(unsigned long);
static DEFINE_TIMER(key_gc_timer, key_gc_timer_func, 0, 0);
static time_t key_gc_next_run = LONG_MAX;
static struct key_type *key_gc_dead_keytype;
static unsigned long key_gc_flags;
#define KEY_GC_KEY_EXPIRED 0 /* A key expired and needs unlinking */
#define KEY_GC_REAP_KEYTYPE 1 /* A keytype is being unregistered */
#define KEY_GC_REAPING_KEYTYPE 2 /* Cleared when keytype reaped */
/*
* Any key whose type gets unregistered will be re-typed to this if it can't be
* immediately unlinked.
*/
struct key_type key_type_dead = {
.name = "dead",
};
/*
* Schedule a garbage collection run.
* - time precision isn't particularly important
*/
void key_schedule_gc(time_t gc_at)
{
unsigned long expires;
time_t now = current_kernel_time().tv_sec;
kenter("%ld", gc_at - now);
if (gc_at <= now || test_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags)) {
kdebug("IMMEDIATE");
schedule_work(&key_gc_work);
} else if (gc_at < key_gc_next_run) {
kdebug("DEFERRED");
key_gc_next_run = gc_at;
expires = jiffies + (gc_at - now) * HZ;
mod_timer(&key_gc_timer, expires);
}
}
/*
* Schedule a dead links collection run.
*/
void key_schedule_gc_links(void)
{
set_bit(KEY_GC_KEY_EXPIRED, &key_gc_flags);
schedule_work(&key_gc_work);
}
/*
* Some key's cleanup time was met after it expired, so we need to get the
* reaper to go through a cycle finding expired keys.
*/
static void key_gc_timer_func(unsigned long data)
{
kenter("");
key_gc_next_run = LONG_MAX;
key_schedule_gc_links();
}
/*
* Reap keys of dead type.
*
* We use three flags to make sure we see three complete cycles of the garbage
* collector: the first to mark keys of that type as being dead, the second to
* collect dead links and the third to clean up the dead keys. We have to be
* careful as there may already be a cycle in progress.
*
* The caller must be holding key_types_sem.
*/
void key_gc_keytype(struct key_type *ktype)
{
kenter("%s", ktype->name);
key_gc_dead_keytype = ktype;
set_bit(KEY_GC_REAPING_KEYTYPE, &key_gc_flags);
smp_mb();
set_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags);
kdebug("schedule");
schedule_work(&key_gc_work);
kdebug("sleep");
wait_on_bit(&key_gc_flags, KEY_GC_REAPING_KEYTYPE,
TASK_UNINTERRUPTIBLE);
key_gc_dead_keytype = NULL;
kleave("");
}
/*
* Garbage collect a list of unreferenced, detached keys
*/
static noinline void key_gc_unused_keys(struct list_head *keys)
{
while (!list_empty(keys)) {
struct key *key =
list_entry(keys->next, struct key, graveyard_link);
list_del(&key->graveyard_link);
kdebug("- %u", key->serial);
key_check(key);
/* Throw away the key data if the key is instantiated */
if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags) &&
!test_bit(KEY_FLAG_NEGATIVE, &key->flags) &&
key->type->destroy)
key->type->destroy(key);
security_key_free(key);
/* deal with the user's key tracking and quota */
if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
spin_lock(&key->user->lock);
key->user->qnkeys--;
key->user->qnbytes -= key->quotalen;
spin_unlock(&key->user->lock);
}
atomic_dec(&key->user->nkeys);
if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
atomic_dec(&key->user->nikeys);
key_user_put(key->user);
kfree(key->description);
#ifdef KEY_DEBUGGING
key->magic = KEY_DEBUG_MAGIC_X;
#endif
kmem_cache_free(key_jar, key);
}
}
/*
* Garbage collector for unused keys.
*
* This is done in process context so that we don't have to disable interrupts
* all over the place. key_put() schedules this rather than trying to do the
* cleanup itself, which means key_put() doesn't have to sleep.
*/
static void key_garbage_collector(struct work_struct *work)
{
static LIST_HEAD(graveyard);
static u8 gc_state; /* Internal persistent state */
#define KEY_GC_REAP_AGAIN 0x01 /* - Need another cycle */
#define KEY_GC_REAPING_LINKS 0x02 /* - We need to reap links */
#define KEY_GC_SET_TIMER 0x04 /* - We need to restart the timer */
#define KEY_GC_REAPING_DEAD_1 0x10 /* - We need to mark dead keys */
#define KEY_GC_REAPING_DEAD_2 0x20 /* - We need to reap dead key links */
#define KEY_GC_REAPING_DEAD_3 0x40 /* - We need to reap dead keys */
#define KEY_GC_FOUND_DEAD_KEY 0x80 /* - We found at least one dead key */
struct rb_node *cursor;
struct key *key;
time_t new_timer, limit;
kenter("[%lx,%x]", key_gc_flags, gc_state);
limit = current_kernel_time().tv_sec;
if (limit > key_gc_delay)
limit -= key_gc_delay;
else
limit = key_gc_delay;
/* Work out what we're going to be doing in this pass */
gc_state &= KEY_GC_REAPING_DEAD_1 | KEY_GC_REAPING_DEAD_2;
gc_state <<= 1;
if (test_and_clear_bit(KEY_GC_KEY_EXPIRED, &key_gc_flags))
gc_state |= KEY_GC_REAPING_LINKS | KEY_GC_SET_TIMER;
if (test_and_clear_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags))
gc_state |= KEY_GC_REAPING_DEAD_1;
kdebug("new pass %x", gc_state);
new_timer = LONG_MAX;
/* As only this function is permitted to remove things from the key
* serial tree, if cursor is non-NULL then it will always point to a
* valid node in the tree - even if lock got dropped.
*/
spin_lock(&key_serial_lock);
cursor = rb_first(&key_serial_tree);
continue_scanning:
while (cursor) {
key = rb_entry(cursor, struct key, serial_node);
cursor = rb_next(cursor);
if (atomic_read(&key->usage) == 0)
goto found_unreferenced_key;
if (unlikely(gc_state & KEY_GC_REAPING_DEAD_1)) {
if (key->type == key_gc_dead_keytype) {
gc_state |= KEY_GC_FOUND_DEAD_KEY;
set_bit(KEY_FLAG_DEAD, &key->flags);
key->perm = 0;
goto skip_dead_key;
}
}
if (gc_state & KEY_GC_SET_TIMER) {
if (key->expiry > limit && key->expiry < new_timer) {
kdebug("will expire %x in %ld",
key_serial(key), key->expiry - limit);
new_timer = key->expiry;
}
}
if (unlikely(gc_state & KEY_GC_REAPING_DEAD_2))
if (key->type == key_gc_dead_keytype)
gc_state |= KEY_GC_FOUND_DEAD_KEY;
if ((gc_state & KEY_GC_REAPING_LINKS) ||
unlikely(gc_state & KEY_GC_REAPING_DEAD_2)) {
if (key->type == &key_type_keyring)
goto found_keyring;
}
if (unlikely(gc_state & KEY_GC_REAPING_DEAD_3))
if (key->type == key_gc_dead_keytype)
goto destroy_dead_key;
skip_dead_key:
if (spin_is_contended(&key_serial_lock) || need_resched())
goto contended;
}
contended:
spin_unlock(&key_serial_lock);
maybe_resched:
if (cursor) {
cond_resched();
spin_lock(&key_serial_lock);
goto continue_scanning;
}
/* We've completed the pass. Set the timer if we need to and queue a
* new cycle if necessary. We keep executing cycles until we find one
* where we didn't reap any keys.
*/
kdebug("pass complete");
if (gc_state & KEY_GC_SET_TIMER && new_timer != (time_t)LONG_MAX) {
new_timer += key_gc_delay;
key_schedule_gc(new_timer);
}
if (unlikely(gc_state & KEY_GC_REAPING_DEAD_2) ||
!list_empty(&graveyard)) {
/* Make sure that all pending keyring payload destructions are
* fulfilled and that people aren't now looking at dead or
* dying keys that they don't have a reference upon or a link
* to.
*/
kdebug("gc sync");
synchronize_rcu();
}
if (!list_empty(&graveyard)) {
kdebug("gc keys");
key_gc_unused_keys(&graveyard);
}
if (unlikely(gc_state & (KEY_GC_REAPING_DEAD_1 |
KEY_GC_REAPING_DEAD_2))) {
if (!(gc_state & KEY_GC_FOUND_DEAD_KEY)) {
/* No remaining dead keys: short circuit the remaining
* keytype reap cycles.
*/
kdebug("dead short");
gc_state &= ~(KEY_GC_REAPING_DEAD_1 | KEY_GC_REAPING_DEAD_2);
gc_state |= KEY_GC_REAPING_DEAD_3;
} else {
gc_state |= KEY_GC_REAP_AGAIN;
}
}
if (unlikely(gc_state & KEY_GC_REAPING_DEAD_3)) {
kdebug("dead wake");
smp_mb();
clear_bit(KEY_GC_REAPING_KEYTYPE, &key_gc_flags);
wake_up_bit(&key_gc_flags, KEY_GC_REAPING_KEYTYPE);
}
if (gc_state & KEY_GC_REAP_AGAIN)
schedule_work(&key_gc_work);
kleave(" [end %x]", gc_state);
return;
/* We found an unreferenced key - once we've removed it from the tree,
* we can safely drop the lock.
*/
found_unreferenced_key:
kdebug("unrefd key %d", key->serial);
rb_erase(&key->serial_node, &key_serial_tree);
spin_unlock(&key_serial_lock);
list_add_tail(&key->graveyard_link, &graveyard);
gc_state |= KEY_GC_REAP_AGAIN;
goto maybe_resched;
/* We found a keyring and we need to check the payload for links to
* dead or expired keys. We don't flag another reap immediately as we
* have to wait for the old payload to be destroyed by RCU before we
* can reap the keys to which it refers.
*/
found_keyring:
spin_unlock(&key_serial_lock);
keyring_gc(key, limit);
goto maybe_resched;
/* We found a dead key that is still referenced. Reset its type and
* destroy its payload with its semaphore held.
*/
destroy_dead_key:
spin_unlock(&key_serial_lock);
kdebug("destroy key %d", key->serial);
down_write(&key->sem);
key->type = &key_type_dead;
if (key_gc_dead_keytype->destroy)
key_gc_dead_keytype->destroy(key);
memset(&key->payload, KEY_DESTROY, sizeof(key->payload));
up_write(&key->sem);
goto maybe_resched;
}
| ./CrossVul/dataset_final_sorted/CWE-20/c/good_1762_0 |
crossvul-cpp_data_good_2802_3 | /* nautilus-metadata.c - metadata utils
*
* Copyright (C) 2009 Red Hatl, Inc.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#include <config.h>
#include "nautilus-metadata.h"
#include <glib.h>
static char *used_metadata_names[] =
{
NAUTILUS_METADATA_KEY_LOCATION_BACKGROUND_COLOR,
NAUTILUS_METADATA_KEY_LOCATION_BACKGROUND_IMAGE,
NAUTILUS_METADATA_KEY_ICON_VIEW_AUTO_LAYOUT,
NAUTILUS_METADATA_KEY_ICON_VIEW_SORT_BY,
NAUTILUS_METADATA_KEY_ICON_VIEW_SORT_REVERSED,
NAUTILUS_METADATA_KEY_ICON_VIEW_KEEP_ALIGNED,
NAUTILUS_METADATA_KEY_ICON_VIEW_LAYOUT_TIMESTAMP,
NAUTILUS_METADATA_KEY_DESKTOP_ICON_SIZE,
NAUTILUS_METADATA_KEY_LIST_VIEW_SORT_COLUMN,
NAUTILUS_METADATA_KEY_LIST_VIEW_SORT_REVERSED,
NAUTILUS_METADATA_KEY_LIST_VIEW_VISIBLE_COLUMNS,
NAUTILUS_METADATA_KEY_LIST_VIEW_COLUMN_ORDER,
NAUTILUS_METADATA_KEY_WINDOW_GEOMETRY,
NAUTILUS_METADATA_KEY_WINDOW_SCROLL_POSITION,
NAUTILUS_METADATA_KEY_WINDOW_SHOW_HIDDEN_FILES,
NAUTILUS_METADATA_KEY_WINDOW_MAXIMIZED,
NAUTILUS_METADATA_KEY_WINDOW_STICKY,
NAUTILUS_METADATA_KEY_WINDOW_KEEP_ABOVE,
NAUTILUS_METADATA_KEY_SIDEBAR_BACKGROUND_COLOR,
NAUTILUS_METADATA_KEY_SIDEBAR_BACKGROUND_IMAGE,
NAUTILUS_METADATA_KEY_SIDEBAR_BUTTONS,
NAUTILUS_METADATA_KEY_ANNOTATION,
NAUTILUS_METADATA_KEY_ICON_POSITION,
NAUTILUS_METADATA_KEY_ICON_POSITION_TIMESTAMP,
NAUTILUS_METADATA_KEY_ICON_SCALE,
NAUTILUS_METADATA_KEY_CUSTOM_ICON,
NAUTILUS_METADATA_KEY_CUSTOM_ICON_NAME,
NAUTILUS_METADATA_KEY_SCREEN,
NAUTILUS_METADATA_KEY_EMBLEMS,
NAUTILUS_METADATA_KEY_DESKTOP_FILE_TRUSTED,
NULL
};
guint
nautilus_metadata_get_id (const char *metadata)
{
static GHashTable *hash;
int i;
if (hash == NULL)
{
hash = g_hash_table_new (g_str_hash, g_str_equal);
for (i = 0; used_metadata_names[i] != NULL; i++)
{
g_hash_table_insert (hash,
used_metadata_names[i],
GINT_TO_POINTER (i + 1));
}
}
return GPOINTER_TO_INT (g_hash_table_lookup (hash, metadata));
}
| ./CrossVul/dataset_final_sorted/CWE-20/c/good_2802_3 |
crossvul-cpp_data_good_5819_0 | /*
* Flash Screen Video decoder
* Copyright (C) 2004 Alex Beregszaszi
* Copyright (C) 2006 Benjamin Larsson
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* Flash Screen Video decoder
* @author Alex Beregszaszi
* @author Benjamin Larsson
* @author Daniel Verkamp
* @author Konstantin Shishkov
*
* A description of the bitstream format for Flash Screen Video version 1/2
* is part of the SWF File Format Specification (version 10), which can be
* downloaded from http://www.adobe.com/devnet/swf.html.
*/
#include <stdio.h>
#include <stdlib.h>
#include <zlib.h>
#include "libavutil/intreadwrite.h"
#include "avcodec.h"
#include "bytestream.h"
#include "get_bits.h"
#include "internal.h"
typedef struct BlockInfo {
uint8_t *pos;
int size;
} BlockInfo;
typedef struct FlashSVContext {
AVCodecContext *avctx;
AVFrame frame;
int image_width, image_height;
int block_width, block_height;
uint8_t *tmpblock;
int block_size;
z_stream zstream;
int ver;
const uint32_t *pal;
int is_keyframe;
uint8_t *keyframedata;
uint8_t *keyframe;
BlockInfo *blocks;
uint8_t *deflate_block;
int deflate_block_size;
int color_depth;
int zlibprime_curr, zlibprime_prev;
int diff_start, diff_height;
} FlashSVContext;
static int decode_hybrid(const uint8_t *sptr, uint8_t *dptr, int dx, int dy,
int h, int w, int stride, const uint32_t *pal)
{
int x, y;
const uint8_t *orig_src = sptr;
for (y = dx+h; y > dx; y--) {
uint8_t *dst = dptr + (y * stride) + dy * 3;
for (x = 0; x < w; x++) {
if (*sptr & 0x80) {
/* 15-bit color */
unsigned c = AV_RB16(sptr) & ~0x8000;
unsigned b = c & 0x1F;
unsigned g = (c >> 5) & 0x1F;
unsigned r = c >> 10;
/* 000aaabb -> aaabbaaa */
*dst++ = (b << 3) | (b >> 2);
*dst++ = (g << 3) | (g >> 2);
*dst++ = (r << 3) | (r >> 2);
sptr += 2;
} else {
/* palette index */
uint32_t c = pal[*sptr++];
bytestream_put_le24(&dst, c);
}
}
}
return sptr - orig_src;
}
static av_cold int flashsv_decode_init(AVCodecContext *avctx)
{
FlashSVContext *s = avctx->priv_data;
int zret; // Zlib return code
s->avctx = avctx;
s->zstream.zalloc = Z_NULL;
s->zstream.zfree = Z_NULL;
s->zstream.opaque = Z_NULL;
zret = inflateInit(&s->zstream);
if (zret != Z_OK) {
av_log(avctx, AV_LOG_ERROR, "Inflate init error: %d\n", zret);
return 1;
}
avctx->pix_fmt = AV_PIX_FMT_BGR24;
avcodec_get_frame_defaults(&s->frame);
return 0;
}
static int flashsv2_prime(FlashSVContext *s, uint8_t *src, int size)
{
z_stream zs;
int zret; // Zlib return code
if (!src)
return AVERROR_INVALIDDATA;
zs.zalloc = NULL;
zs.zfree = NULL;
zs.opaque = NULL;
s->zstream.next_in = src;
s->zstream.avail_in = size;
s->zstream.next_out = s->tmpblock;
s->zstream.avail_out = s->block_size * 3;
inflate(&s->zstream, Z_SYNC_FLUSH);
if (deflateInit(&zs, 0) != Z_OK)
return -1;
zs.next_in = s->tmpblock;
zs.avail_in = s->block_size * 3 - s->zstream.avail_out;
zs.next_out = s->deflate_block;
zs.avail_out = s->deflate_block_size;
deflate(&zs, Z_SYNC_FLUSH);
deflateEnd(&zs);
if ((zret = inflateReset(&s->zstream)) != Z_OK) {
av_log(s->avctx, AV_LOG_ERROR, "Inflate reset error: %d\n", zret);
return AVERROR_UNKNOWN;
}
s->zstream.next_in = s->deflate_block;
s->zstream.avail_in = s->deflate_block_size - zs.avail_out;
s->zstream.next_out = s->tmpblock;
s->zstream.avail_out = s->block_size * 3;
inflate(&s->zstream, Z_SYNC_FLUSH);
return 0;
}
static int flashsv_decode_block(AVCodecContext *avctx, AVPacket *avpkt,
GetBitContext *gb, int block_size,
int width, int height, int x_pos, int y_pos,
int blk_idx)
{
struct FlashSVContext *s = avctx->priv_data;
uint8_t *line = s->tmpblock;
int k;
int ret = inflateReset(&s->zstream);
if (ret != Z_OK) {
av_log(avctx, AV_LOG_ERROR, "Inflate reset error: %d\n", ret);
return AVERROR_UNKNOWN;
}
if (s->zlibprime_curr || s->zlibprime_prev) {
ret = flashsv2_prime(s,
s->blocks[blk_idx].pos,
s->blocks[blk_idx].size);
if (ret < 0)
return ret;
}
s->zstream.next_in = avpkt->data + get_bits_count(gb) / 8;
s->zstream.avail_in = block_size;
s->zstream.next_out = s->tmpblock;
s->zstream.avail_out = s->block_size * 3;
ret = inflate(&s->zstream, Z_FINISH);
if (ret == Z_DATA_ERROR) {
av_log(avctx, AV_LOG_ERROR, "Zlib resync occurred\n");
inflateSync(&s->zstream);
ret = inflate(&s->zstream, Z_FINISH);
}
if (ret != Z_OK && ret != Z_STREAM_END) {
//return -1;
}
if (s->is_keyframe) {
s->blocks[blk_idx].pos = s->keyframedata + (get_bits_count(gb) / 8);
s->blocks[blk_idx].size = block_size;
}
if (!s->color_depth) {
/* Flash Screen Video stores the image upside down, so copy
* lines to destination in reverse order. */
for (k = 1; k <= s->diff_height; k++) {
memcpy(s->frame.data[0] + x_pos * 3 +
(s->image_height - y_pos - s->diff_start - k) * s->frame.linesize[0],
line, width * 3);
/* advance source pointer to next line */
line += width * 3;
}
} else {
/* hybrid 15-bit/palette mode */
decode_hybrid(s->tmpblock, s->frame.data[0],
s->image_height - (y_pos + 1 + s->diff_start + s->diff_height),
x_pos, s->diff_height, width,
s->frame.linesize[0], s->pal);
}
skip_bits_long(gb, 8 * block_size); /* skip the consumed bits */
return 0;
}
static int calc_deflate_block_size(int tmpblock_size)
{
z_stream zstream;
int size;
zstream.zalloc = Z_NULL;
zstream.zfree = Z_NULL;
zstream.opaque = Z_NULL;
if (deflateInit(&zstream, 0) != Z_OK)
return -1;
size = deflateBound(&zstream, tmpblock_size);
deflateEnd(&zstream);
return size;
}
static int flashsv_decode_frame(AVCodecContext *avctx, void *data,
int *got_frame, AVPacket *avpkt)
{
int buf_size = avpkt->size;
FlashSVContext *s = avctx->priv_data;
int h_blocks, v_blocks, h_part, v_part, i, j, ret;
GetBitContext gb;
int last_blockwidth = s->block_width;
int last_blockheight= s->block_height;
/* no supplementary picture */
if (buf_size == 0)
return 0;
if (buf_size < 4)
return -1;
init_get_bits(&gb, avpkt->data, buf_size * 8);
/* start to parse the bitstream */
s->block_width = 16 * (get_bits(&gb, 4) + 1);
s->image_width = get_bits(&gb, 12);
s->block_height = 16 * (get_bits(&gb, 4) + 1);
s->image_height = get_bits(&gb, 12);
if ( last_blockwidth != s->block_width
|| last_blockheight!= s->block_height)
av_freep(&s->blocks);
if (s->ver == 2) {
skip_bits(&gb, 6);
if (get_bits1(&gb)) {
avpriv_request_sample(avctx, "iframe");
return AVERROR_PATCHWELCOME;
}
if (get_bits1(&gb)) {
avpriv_request_sample(avctx, "Custom palette");
return AVERROR_PATCHWELCOME;
}
}
/* calculate number of blocks and size of border (partial) blocks */
h_blocks = s->image_width / s->block_width;
h_part = s->image_width % s->block_width;
v_blocks = s->image_height / s->block_height;
v_part = s->image_height % s->block_height;
/* the block size could change between frames, make sure the buffer
* is large enough, if not, get a larger one */
if (s->block_size < s->block_width * s->block_height) {
int tmpblock_size = 3 * s->block_width * s->block_height;
s->tmpblock = av_realloc(s->tmpblock, tmpblock_size);
if (!s->tmpblock) {
av_log(avctx, AV_LOG_ERROR, "Can't allocate decompression buffer.\n");
return AVERROR(ENOMEM);
}
if (s->ver == 2) {
s->deflate_block_size = calc_deflate_block_size(tmpblock_size);
if (s->deflate_block_size <= 0) {
av_log(avctx, AV_LOG_ERROR, "Can't determine deflate buffer size.\n");
return -1;
}
s->deflate_block = av_realloc(s->deflate_block, s->deflate_block_size);
if (!s->deflate_block) {
av_log(avctx, AV_LOG_ERROR, "Can't allocate deflate buffer.\n");
return AVERROR(ENOMEM);
}
}
}
s->block_size = s->block_width * s->block_height;
/* initialize the image size once */
if (avctx->width == 0 && avctx->height == 0) {
avcodec_set_dimensions(avctx, s->image_width, s->image_height);
}
/* check for changes of image width and image height */
if (avctx->width != s->image_width || avctx->height != s->image_height) {
av_log(avctx, AV_LOG_ERROR,
"Frame width or height differs from first frame!\n");
av_log(avctx, AV_LOG_ERROR, "fh = %d, fv %d vs ch = %d, cv = %d\n",
avctx->height, avctx->width, s->image_height, s->image_width);
return AVERROR_INVALIDDATA;
}
/* we care for keyframes only in Screen Video v2 */
s->is_keyframe = (avpkt->flags & AV_PKT_FLAG_KEY) && (s->ver == 2);
if (s->is_keyframe) {
s->keyframedata = av_realloc(s->keyframedata, avpkt->size);
memcpy(s->keyframedata, avpkt->data, avpkt->size);
}
if(s->ver == 2 && !s->blocks)
s->blocks = av_mallocz((v_blocks + !!v_part) * (h_blocks + !!h_part)
* sizeof(s->blocks[0]));
av_dlog(avctx, "image: %dx%d block: %dx%d num: %dx%d part: %dx%d\n",
s->image_width, s->image_height, s->block_width, s->block_height,
h_blocks, v_blocks, h_part, v_part);
if ((ret = ff_reget_buffer(avctx, &s->frame)) < 0)
return ret;
/* loop over all block columns */
for (j = 0; j < v_blocks + (v_part ? 1 : 0); j++) {
int y_pos = j * s->block_height; // vertical position in frame
int cur_blk_height = (j < v_blocks) ? s->block_height : v_part;
/* loop over all block rows */
for (i = 0; i < h_blocks + (h_part ? 1 : 0); i++) {
int x_pos = i * s->block_width; // horizontal position in frame
int cur_blk_width = (i < h_blocks) ? s->block_width : h_part;
int has_diff = 0;
/* get the size of the compressed zlib chunk */
int size = get_bits(&gb, 16);
s->color_depth = 0;
s->zlibprime_curr = 0;
s->zlibprime_prev = 0;
s->diff_start = 0;
s->diff_height = cur_blk_height;
if (8 * size > get_bits_left(&gb)) {
av_frame_unref(&s->frame);
return AVERROR_INVALIDDATA;
}
if (s->ver == 2 && size) {
skip_bits(&gb, 3);
s->color_depth = get_bits(&gb, 2);
has_diff = get_bits1(&gb);
s->zlibprime_curr = get_bits1(&gb);
s->zlibprime_prev = get_bits1(&gb);
if (s->color_depth != 0 && s->color_depth != 2) {
av_log(avctx, AV_LOG_ERROR,
"%dx%d invalid color depth %d\n", i, j, s->color_depth);
return AVERROR_INVALIDDATA;
}
if (has_diff) {
if (!s->keyframe) {
av_log(avctx, AV_LOG_ERROR,
"inter frame without keyframe\n");
return AVERROR_INVALIDDATA;
}
s->diff_start = get_bits(&gb, 8);
s->diff_height = get_bits(&gb, 8);
if (s->diff_start + s->diff_height > cur_blk_height) {
av_log(avctx, AV_LOG_ERROR, "Block parameters invalid\n");
return AVERROR_INVALIDDATA;
}
av_log(avctx, AV_LOG_DEBUG,
"%dx%d diff start %d height %d\n",
i, j, s->diff_start, s->diff_height);
size -= 2;
}
if (s->zlibprime_prev)
av_log(avctx, AV_LOG_DEBUG, "%dx%d zlibprime_prev\n", i, j);
if (s->zlibprime_curr) {
int col = get_bits(&gb, 8);
int row = get_bits(&gb, 8);
av_log(avctx, AV_LOG_DEBUG, "%dx%d zlibprime_curr %dx%d\n", i, j, col, row);
size -= 2;
avpriv_request_sample(avctx, "zlibprime_curr");
return AVERROR_PATCHWELCOME;
}
if (!s->blocks && (s->zlibprime_curr || s->zlibprime_prev)) {
av_log(avctx, AV_LOG_ERROR, "no data available for zlib "
"priming\n");
return AVERROR_INVALIDDATA;
}
size--; // account for flags byte
}
if (has_diff) {
int k;
int off = (s->image_height - y_pos - 1) * s->frame.linesize[0];
for (k = 0; k < cur_blk_height; k++)
memcpy(s->frame.data[0] + off - k*s->frame.linesize[0] + x_pos*3,
s->keyframe + off - k*s->frame.linesize[0] + x_pos*3,
cur_blk_width * 3);
}
/* skip unchanged blocks, which have size 0 */
if (size) {
if (flashsv_decode_block(avctx, avpkt, &gb, size,
cur_blk_width, cur_blk_height,
x_pos, y_pos,
i + j * (h_blocks + !!h_part)))
av_log(avctx, AV_LOG_ERROR,
"error in decompression of block %dx%d\n", i, j);
}
}
}
if (s->is_keyframe && s->ver == 2) {
if (!s->keyframe) {
s->keyframe = av_malloc(s->frame.linesize[0] * avctx->height);
if (!s->keyframe) {
av_log(avctx, AV_LOG_ERROR, "Cannot allocate image data\n");
return AVERROR(ENOMEM);
}
}
memcpy(s->keyframe, s->frame.data[0], s->frame.linesize[0] * avctx->height);
}
if ((ret = av_frame_ref(data, &s->frame)) < 0)
return ret;
*got_frame = 1;
if ((get_bits_count(&gb) / 8) != buf_size)
av_log(avctx, AV_LOG_ERROR, "buffer not fully consumed (%d != %d)\n",
buf_size, (get_bits_count(&gb) / 8));
/* report that the buffer was completely consumed */
return buf_size;
}
static av_cold int flashsv_decode_end(AVCodecContext *avctx)
{
FlashSVContext *s = avctx->priv_data;
inflateEnd(&s->zstream);
/* release the frame if needed */
av_frame_unref(&s->frame);
/* free the tmpblock */
av_free(s->tmpblock);
return 0;
}
#if CONFIG_FLASHSV_DECODER
AVCodec ff_flashsv_decoder = {
.name = "flashsv",
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_FLASHSV,
.priv_data_size = sizeof(FlashSVContext),
.init = flashsv_decode_init,
.close = flashsv_decode_end,
.decode = flashsv_decode_frame,
.capabilities = CODEC_CAP_DR1,
.pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_BGR24, AV_PIX_FMT_NONE },
.long_name = NULL_IF_CONFIG_SMALL("Flash Screen Video v1"),
};
#endif /* CONFIG_FLASHSV_DECODER */
#if CONFIG_FLASHSV2_DECODER
static const uint32_t ff_flashsv2_default_palette[128] = {
0x000000, 0x333333, 0x666666, 0x999999, 0xCCCCCC, 0xFFFFFF,
0x330000, 0x660000, 0x990000, 0xCC0000, 0xFF0000, 0x003300,
0x006600, 0x009900, 0x00CC00, 0x00FF00, 0x000033, 0x000066,
0x000099, 0x0000CC, 0x0000FF, 0x333300, 0x666600, 0x999900,
0xCCCC00, 0xFFFF00, 0x003333, 0x006666, 0x009999, 0x00CCCC,
0x00FFFF, 0x330033, 0x660066, 0x990099, 0xCC00CC, 0xFF00FF,
0xFFFF33, 0xFFFF66, 0xFFFF99, 0xFFFFCC, 0xFF33FF, 0xFF66FF,
0xFF99FF, 0xFFCCFF, 0x33FFFF, 0x66FFFF, 0x99FFFF, 0xCCFFFF,
0xCCCC33, 0xCCCC66, 0xCCCC99, 0xCCCCFF, 0xCC33CC, 0xCC66CC,
0xCC99CC, 0xCCFFCC, 0x33CCCC, 0x66CCCC, 0x99CCCC, 0xFFCCCC,
0x999933, 0x999966, 0x9999CC, 0x9999FF, 0x993399, 0x996699,
0x99CC99, 0x99FF99, 0x339999, 0x669999, 0xCC9999, 0xFF9999,
0x666633, 0x666699, 0x6666CC, 0x6666FF, 0x663366, 0x669966,
0x66CC66, 0x66FF66, 0x336666, 0x996666, 0xCC6666, 0xFF6666,
0x333366, 0x333399, 0x3333CC, 0x3333FF, 0x336633, 0x339933,
0x33CC33, 0x33FF33, 0x663333, 0x993333, 0xCC3333, 0xFF3333,
0x003366, 0x336600, 0x660033, 0x006633, 0x330066, 0x663300,
0x336699, 0x669933, 0x993366, 0x339966, 0x663399, 0x996633,
0x6699CC, 0x99CC66, 0xCC6699, 0x66CC99, 0x9966CC, 0xCC9966,
0x99CCFF, 0xCCFF99, 0xFF99CC, 0x99FFCC, 0xCC99FF, 0xFFCC99,
0x111111, 0x222222, 0x444444, 0x555555, 0xAAAAAA, 0xBBBBBB,
0xDDDDDD, 0xEEEEEE
};
static av_cold int flashsv2_decode_init(AVCodecContext *avctx)
{
FlashSVContext *s = avctx->priv_data;
flashsv_decode_init(avctx);
s->pal = ff_flashsv2_default_palette;
s->ver = 2;
return 0;
}
static av_cold int flashsv2_decode_end(AVCodecContext *avctx)
{
FlashSVContext *s = avctx->priv_data;
av_freep(&s->keyframedata);
av_freep(&s->blocks);
av_freep(&s->keyframe);
av_freep(&s->deflate_block);
flashsv_decode_end(avctx);
return 0;
}
AVCodec ff_flashsv2_decoder = {
.name = "flashsv2",
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_FLASHSV2,
.priv_data_size = sizeof(FlashSVContext),
.init = flashsv2_decode_init,
.close = flashsv2_decode_end,
.decode = flashsv_decode_frame,
.capabilities = CODEC_CAP_DR1,
.pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_BGR24, AV_PIX_FMT_NONE },
.long_name = NULL_IF_CONFIG_SMALL("Flash Screen Video v2"),
};
#endif /* CONFIG_FLASHSV2_DECODER */
| ./CrossVul/dataset_final_sorted/CWE-20/c/good_5819_0 |
crossvul-cpp_data_bad_3174_0 | /*
* llc_conn.c - Driver routines for connection component.
*
* Copyright (c) 1997 by Procom Technology, Inc.
* 2001-2003 by Arnaldo Carvalho de Melo <acme@conectiva.com.br>
*
* This program can be redistributed or modified under the terms of the
* GNU General Public License as published by the Free Software Foundation.
* This program is distributed without any warranty or implied warranty
* of merchantability or fitness for a particular purpose.
*
* See the GNU General Public License for more details.
*/
#include <linux/init.h>
#include <linux/slab.h>
#include <net/llc_sap.h>
#include <net/llc_conn.h>
#include <net/sock.h>
#include <net/tcp_states.h>
#include <net/llc_c_ev.h>
#include <net/llc_c_ac.h>
#include <net/llc_c_st.h>
#include <net/llc_pdu.h>
#if 0
#define dprintk(args...) printk(KERN_DEBUG args)
#else
#define dprintk(args...)
#endif
static int llc_find_offset(int state, int ev_type);
static void llc_conn_send_pdus(struct sock *sk);
static int llc_conn_service(struct sock *sk, struct sk_buff *skb);
static int llc_exec_conn_trans_actions(struct sock *sk,
struct llc_conn_state_trans *trans,
struct sk_buff *ev);
static struct llc_conn_state_trans *llc_qualify_conn_ev(struct sock *sk,
struct sk_buff *skb);
/* Offset table on connection states transition diagram */
static int llc_offset_table[NBR_CONN_STATES][NBR_CONN_EV];
int sysctl_llc2_ack_timeout = LLC2_ACK_TIME * HZ;
int sysctl_llc2_p_timeout = LLC2_P_TIME * HZ;
int sysctl_llc2_rej_timeout = LLC2_REJ_TIME * HZ;
int sysctl_llc2_busy_timeout = LLC2_BUSY_TIME * HZ;
/**
* llc_conn_state_process - sends event to connection state machine
* @sk: connection
* @skb: occurred event
*
* Sends an event to connection state machine. After processing event
* (executing it's actions and changing state), upper layer will be
* indicated or confirmed, if needed. Returns 0 for success, 1 for
* failure. The socket lock has to be held before calling this function.
*/
int llc_conn_state_process(struct sock *sk, struct sk_buff *skb)
{
int rc;
struct llc_sock *llc = llc_sk(skb->sk);
struct llc_conn_state_ev *ev = llc_conn_ev(skb);
/*
* We have to hold the skb, because llc_conn_service will kfree it in
* the sending path and we need to look at the skb->cb, where we encode
* llc_conn_state_ev.
*/
skb_get(skb);
ev->ind_prim = ev->cfm_prim = 0;
/*
* Send event to state machine
*/
rc = llc_conn_service(skb->sk, skb);
if (unlikely(rc != 0)) {
printk(KERN_ERR "%s: llc_conn_service failed\n", __func__);
goto out_kfree_skb;
}
if (unlikely(!ev->ind_prim && !ev->cfm_prim)) {
/* indicate or confirm not required */
if (!skb->next)
goto out_kfree_skb;
goto out_skb_put;
}
if (unlikely(ev->ind_prim && ev->cfm_prim)) /* Paranoia */
skb_get(skb);
switch (ev->ind_prim) {
case LLC_DATA_PRIM:
llc_save_primitive(sk, skb, LLC_DATA_PRIM);
if (unlikely(sock_queue_rcv_skb(sk, skb))) {
/*
* shouldn't happen
*/
printk(KERN_ERR "%s: sock_queue_rcv_skb failed!\n",
__func__);
kfree_skb(skb);
}
break;
case LLC_CONN_PRIM:
/*
* Can't be sock_queue_rcv_skb, because we have to leave the
* skb->sk pointing to the newly created struct sock in
* llc_conn_handler. -acme
*/
skb_queue_tail(&sk->sk_receive_queue, skb);
sk->sk_state_change(sk);
break;
case LLC_DISC_PRIM:
sock_hold(sk);
if (sk->sk_type == SOCK_STREAM &&
sk->sk_state == TCP_ESTABLISHED) {
sk->sk_shutdown = SHUTDOWN_MASK;
sk->sk_socket->state = SS_UNCONNECTED;
sk->sk_state = TCP_CLOSE;
if (!sock_flag(sk, SOCK_DEAD)) {
sock_set_flag(sk, SOCK_DEAD);
sk->sk_state_change(sk);
}
}
kfree_skb(skb);
sock_put(sk);
break;
case LLC_RESET_PRIM:
/*
* FIXME:
* RESET is not being notified to upper layers for now
*/
printk(KERN_INFO "%s: received a reset ind!\n", __func__);
kfree_skb(skb);
break;
default:
if (ev->ind_prim) {
printk(KERN_INFO "%s: received unknown %d prim!\n",
__func__, ev->ind_prim);
kfree_skb(skb);
}
/* No indication */
break;
}
switch (ev->cfm_prim) {
case LLC_DATA_PRIM:
if (!llc_data_accept_state(llc->state))
sk->sk_write_space(sk);
else
rc = llc->failed_data_req = 1;
break;
case LLC_CONN_PRIM:
if (sk->sk_type == SOCK_STREAM &&
sk->sk_state == TCP_SYN_SENT) {
if (ev->status) {
sk->sk_socket->state = SS_UNCONNECTED;
sk->sk_state = TCP_CLOSE;
} else {
sk->sk_socket->state = SS_CONNECTED;
sk->sk_state = TCP_ESTABLISHED;
}
sk->sk_state_change(sk);
}
break;
case LLC_DISC_PRIM:
sock_hold(sk);
if (sk->sk_type == SOCK_STREAM && sk->sk_state == TCP_CLOSING) {
sk->sk_socket->state = SS_UNCONNECTED;
sk->sk_state = TCP_CLOSE;
sk->sk_state_change(sk);
}
sock_put(sk);
break;
case LLC_RESET_PRIM:
/*
* FIXME:
* RESET is not being notified to upper layers for now
*/
printk(KERN_INFO "%s: received a reset conf!\n", __func__);
break;
default:
if (ev->cfm_prim) {
printk(KERN_INFO "%s: received unknown %d prim!\n",
__func__, ev->cfm_prim);
break;
}
goto out_skb_put; /* No confirmation */
}
out_kfree_skb:
kfree_skb(skb);
out_skb_put:
kfree_skb(skb);
return rc;
}
void llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb)
{
/* queue PDU to send to MAC layer */
skb_queue_tail(&sk->sk_write_queue, skb);
llc_conn_send_pdus(sk);
}
/**
* llc_conn_rtn_pdu - sends received data pdu to upper layer
* @sk: Active connection
* @skb: Received data frame
*
* Sends received data pdu to upper layer (by using indicate function).
* Prepares service parameters (prim and prim_data). calling indication
* function will be done in llc_conn_state_process.
*/
void llc_conn_rtn_pdu(struct sock *sk, struct sk_buff *skb)
{
struct llc_conn_state_ev *ev = llc_conn_ev(skb);
ev->ind_prim = LLC_DATA_PRIM;
}
/**
* llc_conn_resend_i_pdu_as_cmd - resend all all unacknowledged I PDUs
* @sk: active connection
* @nr: NR
* @first_p_bit: p_bit value of first pdu
*
* Resend all unacknowledged I PDUs, starting with the NR; send first as
* command PDU with P bit equal first_p_bit; if more than one send
* subsequent as command PDUs with P bit equal zero (0).
*/
void llc_conn_resend_i_pdu_as_cmd(struct sock *sk, u8 nr, u8 first_p_bit)
{
struct sk_buff *skb;
struct llc_pdu_sn *pdu;
u16 nbr_unack_pdus;
struct llc_sock *llc;
u8 howmany_resend = 0;
llc_conn_remove_acked_pdus(sk, nr, &nbr_unack_pdus);
if (!nbr_unack_pdus)
goto out;
/*
* Process unack PDUs only if unack queue is not empty; remove
* appropriate PDUs, fix them up, and put them on mac_pdu_q.
*/
llc = llc_sk(sk);
while ((skb = skb_dequeue(&llc->pdu_unack_q)) != NULL) {
pdu = llc_pdu_sn_hdr(skb);
llc_pdu_set_cmd_rsp(skb, LLC_PDU_CMD);
llc_pdu_set_pf_bit(skb, first_p_bit);
skb_queue_tail(&sk->sk_write_queue, skb);
first_p_bit = 0;
llc->vS = LLC_I_GET_NS(pdu);
howmany_resend++;
}
if (howmany_resend > 0)
llc->vS = (llc->vS + 1) % LLC_2_SEQ_NBR_MODULO;
/* any PDUs to re-send are queued up; start sending to MAC */
llc_conn_send_pdus(sk);
out:;
}
/**
* llc_conn_resend_i_pdu_as_rsp - Resend all unacknowledged I PDUs
* @sk: active connection.
* @nr: NR
* @first_f_bit: f_bit value of first pdu.
*
* Resend all unacknowledged I PDUs, starting with the NR; send first as
* response PDU with F bit equal first_f_bit; if more than one send
* subsequent as response PDUs with F bit equal zero (0).
*/
void llc_conn_resend_i_pdu_as_rsp(struct sock *sk, u8 nr, u8 first_f_bit)
{
struct sk_buff *skb;
u16 nbr_unack_pdus;
struct llc_sock *llc = llc_sk(sk);
u8 howmany_resend = 0;
llc_conn_remove_acked_pdus(sk, nr, &nbr_unack_pdus);
if (!nbr_unack_pdus)
goto out;
/*
* Process unack PDUs only if unack queue is not empty; remove
* appropriate PDUs, fix them up, and put them on mac_pdu_q
*/
while ((skb = skb_dequeue(&llc->pdu_unack_q)) != NULL) {
struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
llc_pdu_set_cmd_rsp(skb, LLC_PDU_RSP);
llc_pdu_set_pf_bit(skb, first_f_bit);
skb_queue_tail(&sk->sk_write_queue, skb);
first_f_bit = 0;
llc->vS = LLC_I_GET_NS(pdu);
howmany_resend++;
}
if (howmany_resend > 0)
llc->vS = (llc->vS + 1) % LLC_2_SEQ_NBR_MODULO;
/* any PDUs to re-send are queued up; start sending to MAC */
llc_conn_send_pdus(sk);
out:;
}
/**
* llc_conn_remove_acked_pdus - Removes acknowledged pdus from tx queue
* @sk: active connection
* nr: NR
* how_many_unacked: size of pdu_unack_q after removing acked pdus
*
* Removes acknowledged pdus from transmit queue (pdu_unack_q). Returns
* the number of pdus that removed from queue.
*/
int llc_conn_remove_acked_pdus(struct sock *sk, u8 nr, u16 *how_many_unacked)
{
int pdu_pos, i;
struct sk_buff *skb;
struct llc_pdu_sn *pdu;
int nbr_acked = 0;
struct llc_sock *llc = llc_sk(sk);
int q_len = skb_queue_len(&llc->pdu_unack_q);
if (!q_len)
goto out;
skb = skb_peek(&llc->pdu_unack_q);
pdu = llc_pdu_sn_hdr(skb);
/* finding position of last acked pdu in queue */
pdu_pos = ((int)LLC_2_SEQ_NBR_MODULO + (int)nr -
(int)LLC_I_GET_NS(pdu)) % LLC_2_SEQ_NBR_MODULO;
for (i = 0; i < pdu_pos && i < q_len; i++) {
skb = skb_dequeue(&llc->pdu_unack_q);
kfree_skb(skb);
nbr_acked++;
}
out:
*how_many_unacked = skb_queue_len(&llc->pdu_unack_q);
return nbr_acked;
}
/**
* llc_conn_send_pdus - Sends queued PDUs
* @sk: active connection
*
* Sends queued pdus to MAC layer for transmission.
*/
static void llc_conn_send_pdus(struct sock *sk)
{
struct sk_buff *skb;
while ((skb = skb_dequeue(&sk->sk_write_queue)) != NULL) {
struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
if (LLC_PDU_TYPE_IS_I(pdu) &&
!(skb->dev->flags & IFF_LOOPBACK)) {
struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
skb_queue_tail(&llc_sk(sk)->pdu_unack_q, skb);
if (!skb2)
break;
skb = skb2;
}
dev_queue_xmit(skb);
}
}
/**
* llc_conn_service - finds transition and changes state of connection
* @sk: connection
* @skb: happened event
*
* This function finds transition that matches with happened event, then
* executes related actions and finally changes state of connection.
* Returns 0 for success, 1 for failure.
*/
static int llc_conn_service(struct sock *sk, struct sk_buff *skb)
{
int rc = 1;
struct llc_sock *llc = llc_sk(sk);
struct llc_conn_state_trans *trans;
if (llc->state > NBR_CONN_STATES)
goto out;
rc = 0;
trans = llc_qualify_conn_ev(sk, skb);
if (trans) {
rc = llc_exec_conn_trans_actions(sk, trans, skb);
if (!rc && trans->next_state != NO_STATE_CHANGE) {
llc->state = trans->next_state;
if (!llc_data_accept_state(llc->state))
sk->sk_state_change(sk);
}
}
out:
return rc;
}
/**
* llc_qualify_conn_ev - finds transition for event
* @sk: connection
* @skb: happened event
*
* This function finds transition that matches with happened event.
* Returns pointer to found transition on success, %NULL otherwise.
*/
static struct llc_conn_state_trans *llc_qualify_conn_ev(struct sock *sk,
struct sk_buff *skb)
{
struct llc_conn_state_trans **next_trans;
const llc_conn_ev_qfyr_t *next_qualifier;
struct llc_conn_state_ev *ev = llc_conn_ev(skb);
struct llc_sock *llc = llc_sk(sk);
struct llc_conn_state *curr_state =
&llc_conn_state_table[llc->state - 1];
/* search thru events for this state until
* list exhausted or until no more
*/
for (next_trans = curr_state->transitions +
llc_find_offset(llc->state - 1, ev->type);
(*next_trans)->ev; next_trans++) {
if (!((*next_trans)->ev)(sk, skb)) {
/* got POSSIBLE event match; the event may require
* qualification based on the values of a number of
* state flags; if all qualifications are met (i.e.,
* if all qualifying functions return success, or 0,
* then this is THE event we're looking for
*/
for (next_qualifier = (*next_trans)->ev_qualifiers;
next_qualifier && *next_qualifier &&
!(*next_qualifier)(sk, skb); next_qualifier++)
/* nothing */;
if (!next_qualifier || !*next_qualifier)
/* all qualifiers executed successfully; this is
* our transition; return it so we can perform
* the associated actions & change the state
*/
return *next_trans;
}
}
return NULL;
}
/**
* llc_exec_conn_trans_actions - executes related actions
* @sk: connection
* @trans: transition that it's actions must be performed
* @skb: event
*
* Executes actions that is related to happened event. Returns 0 for
* success, 1 to indicate failure of at least one action.
*/
static int llc_exec_conn_trans_actions(struct sock *sk,
struct llc_conn_state_trans *trans,
struct sk_buff *skb)
{
int rc = 0;
const llc_conn_action_t *next_action;
for (next_action = trans->ev_actions;
next_action && *next_action; next_action++) {
int rc2 = (*next_action)(sk, skb);
if (rc2 == 2) {
rc = rc2;
break;
} else if (rc2)
rc = 1;
}
return rc;
}
static inline bool llc_estab_match(const struct llc_sap *sap,
const struct llc_addr *daddr,
const struct llc_addr *laddr,
const struct sock *sk)
{
struct llc_sock *llc = llc_sk(sk);
return llc->laddr.lsap == laddr->lsap &&
llc->daddr.lsap == daddr->lsap &&
ether_addr_equal(llc->laddr.mac, laddr->mac) &&
ether_addr_equal(llc->daddr.mac, daddr->mac);
}
/**
* __llc_lookup_established - Finds connection for the remote/local sap/mac
* @sap: SAP
* @daddr: address of remote LLC (MAC + SAP)
* @laddr: address of local LLC (MAC + SAP)
*
* Search connection list of the SAP and finds connection using the remote
* mac, remote sap, local mac, and local sap. Returns pointer for
* connection found, %NULL otherwise.
* Caller has to make sure local_bh is disabled.
*/
static struct sock *__llc_lookup_established(struct llc_sap *sap,
struct llc_addr *daddr,
struct llc_addr *laddr)
{
struct sock *rc;
struct hlist_nulls_node *node;
int slot = llc_sk_laddr_hashfn(sap, laddr);
struct hlist_nulls_head *laddr_hb = &sap->sk_laddr_hash[slot];
rcu_read_lock();
again:
sk_nulls_for_each_rcu(rc, node, laddr_hb) {
if (llc_estab_match(sap, daddr, laddr, rc)) {
/* Extra checks required by SLAB_DESTROY_BY_RCU */
if (unlikely(!atomic_inc_not_zero(&rc->sk_refcnt)))
goto again;
if (unlikely(llc_sk(rc)->sap != sap ||
!llc_estab_match(sap, daddr, laddr, rc))) {
sock_put(rc);
continue;
}
goto found;
}
}
rc = NULL;
/*
* if the nulls value we got at the end of this lookup is
* not the expected one, we must restart lookup.
* We probably met an item that was moved to another chain.
*/
if (unlikely(get_nulls_value(node) != slot))
goto again;
found:
rcu_read_unlock();
return rc;
}
struct sock *llc_lookup_established(struct llc_sap *sap,
struct llc_addr *daddr,
struct llc_addr *laddr)
{
struct sock *sk;
local_bh_disable();
sk = __llc_lookup_established(sap, daddr, laddr);
local_bh_enable();
return sk;
}
static inline bool llc_listener_match(const struct llc_sap *sap,
const struct llc_addr *laddr,
const struct sock *sk)
{
struct llc_sock *llc = llc_sk(sk);
return sk->sk_type == SOCK_STREAM && sk->sk_state == TCP_LISTEN &&
llc->laddr.lsap == laddr->lsap &&
ether_addr_equal(llc->laddr.mac, laddr->mac);
}
static struct sock *__llc_lookup_listener(struct llc_sap *sap,
struct llc_addr *laddr)
{
struct sock *rc;
struct hlist_nulls_node *node;
int slot = llc_sk_laddr_hashfn(sap, laddr);
struct hlist_nulls_head *laddr_hb = &sap->sk_laddr_hash[slot];
rcu_read_lock();
again:
sk_nulls_for_each_rcu(rc, node, laddr_hb) {
if (llc_listener_match(sap, laddr, rc)) {
/* Extra checks required by SLAB_DESTROY_BY_RCU */
if (unlikely(!atomic_inc_not_zero(&rc->sk_refcnt)))
goto again;
if (unlikely(llc_sk(rc)->sap != sap ||
!llc_listener_match(sap, laddr, rc))) {
sock_put(rc);
continue;
}
goto found;
}
}
rc = NULL;
/*
* if the nulls value we got at the end of this lookup is
* not the expected one, we must restart lookup.
* We probably met an item that was moved to another chain.
*/
if (unlikely(get_nulls_value(node) != slot))
goto again;
found:
rcu_read_unlock();
return rc;
}
/**
* llc_lookup_listener - Finds listener for local MAC + SAP
* @sap: SAP
* @laddr: address of local LLC (MAC + SAP)
*
* Search connection list of the SAP and finds connection listening on
* local mac, and local sap. Returns pointer for parent socket found,
* %NULL otherwise.
* Caller has to make sure local_bh is disabled.
*/
static struct sock *llc_lookup_listener(struct llc_sap *sap,
struct llc_addr *laddr)
{
static struct llc_addr null_addr;
struct sock *rc = __llc_lookup_listener(sap, laddr);
if (!rc)
rc = __llc_lookup_listener(sap, &null_addr);
return rc;
}
static struct sock *__llc_lookup(struct llc_sap *sap,
struct llc_addr *daddr,
struct llc_addr *laddr)
{
struct sock *sk = __llc_lookup_established(sap, daddr, laddr);
return sk ? : llc_lookup_listener(sap, laddr);
}
/**
* llc_data_accept_state - designates if in this state data can be sent.
* @state: state of connection.
*
* Returns 0 if data can be sent, 1 otherwise.
*/
u8 llc_data_accept_state(u8 state)
{
return state != LLC_CONN_STATE_NORMAL && state != LLC_CONN_STATE_BUSY &&
state != LLC_CONN_STATE_REJ;
}
/**
* llc_find_next_offset - finds offset for next category of transitions
* @state: state table.
* @offset: start offset.
*
* Finds offset of next category of transitions in transition table.
* Returns the start index of next category.
*/
static u16 __init llc_find_next_offset(struct llc_conn_state *state, u16 offset)
{
u16 cnt = 0;
struct llc_conn_state_trans **next_trans;
for (next_trans = state->transitions + offset;
(*next_trans)->ev; next_trans++)
++cnt;
return cnt;
}
/**
* llc_build_offset_table - builds offset table of connection
*
* Fills offset table of connection state transition table
* (llc_offset_table).
*/
void __init llc_build_offset_table(void)
{
struct llc_conn_state *curr_state;
int state, ev_type, next_offset;
for (state = 0; state < NBR_CONN_STATES; state++) {
curr_state = &llc_conn_state_table[state];
next_offset = 0;
for (ev_type = 0; ev_type < NBR_CONN_EV; ev_type++) {
llc_offset_table[state][ev_type] = next_offset;
next_offset += llc_find_next_offset(curr_state,
next_offset) + 1;
}
}
}
/**
* llc_find_offset - finds start offset of category of transitions
* @state: state of connection
* @ev_type: type of happened event
*
* Finds start offset of desired category of transitions. Returns the
* desired start offset.
*/
static int llc_find_offset(int state, int ev_type)
{
int rc = 0;
/* at this stage, llc_offset_table[..][2] is not important. it is for
* init_pf_cycle and I don't know what is it.
*/
switch (ev_type) {
case LLC_CONN_EV_TYPE_PRIM:
rc = llc_offset_table[state][0]; break;
case LLC_CONN_EV_TYPE_PDU:
rc = llc_offset_table[state][4]; break;
case LLC_CONN_EV_TYPE_SIMPLE:
rc = llc_offset_table[state][1]; break;
case LLC_CONN_EV_TYPE_P_TMR:
case LLC_CONN_EV_TYPE_ACK_TMR:
case LLC_CONN_EV_TYPE_REJ_TMR:
case LLC_CONN_EV_TYPE_BUSY_TMR:
rc = llc_offset_table[state][3]; break;
}
return rc;
}
/**
* llc_sap_add_socket - adds a socket to a SAP
* @sap: SAP
* @sk: socket
*
* This function adds a socket to the hash tables of a SAP.
*/
void llc_sap_add_socket(struct llc_sap *sap, struct sock *sk)
{
struct llc_sock *llc = llc_sk(sk);
struct hlist_head *dev_hb = llc_sk_dev_hash(sap, llc->dev->ifindex);
struct hlist_nulls_head *laddr_hb = llc_sk_laddr_hash(sap, &llc->laddr);
llc_sap_hold(sap);
llc_sk(sk)->sap = sap;
spin_lock_bh(&sap->sk_lock);
sap->sk_count++;
sk_nulls_add_node_rcu(sk, laddr_hb);
hlist_add_head(&llc->dev_hash_node, dev_hb);
spin_unlock_bh(&sap->sk_lock);
}
/**
* llc_sap_remove_socket - removes a socket from SAP
* @sap: SAP
* @sk: socket
*
* This function removes a connection from the hash tables of a SAP if
* the connection was in this list.
*/
void llc_sap_remove_socket(struct llc_sap *sap, struct sock *sk)
{
struct llc_sock *llc = llc_sk(sk);
spin_lock_bh(&sap->sk_lock);
sk_nulls_del_node_init_rcu(sk);
hlist_del(&llc->dev_hash_node);
sap->sk_count--;
spin_unlock_bh(&sap->sk_lock);
llc_sap_put(sap);
}
/**
* llc_conn_rcv - sends received pdus to the connection state machine
* @sk: current connection structure.
* @skb: received frame.
*
* Sends received pdus to the connection state machine.
*/
static int llc_conn_rcv(struct sock *sk, struct sk_buff *skb)
{
struct llc_conn_state_ev *ev = llc_conn_ev(skb);
ev->type = LLC_CONN_EV_TYPE_PDU;
ev->reason = 0;
return llc_conn_state_process(sk, skb);
}
static struct sock *llc_create_incoming_sock(struct sock *sk,
struct net_device *dev,
struct llc_addr *saddr,
struct llc_addr *daddr)
{
struct sock *newsk = llc_sk_alloc(sock_net(sk), sk->sk_family, GFP_ATOMIC,
sk->sk_prot, 0);
struct llc_sock *newllc, *llc = llc_sk(sk);
if (!newsk)
goto out;
newllc = llc_sk(newsk);
memcpy(&newllc->laddr, daddr, sizeof(newllc->laddr));
memcpy(&newllc->daddr, saddr, sizeof(newllc->daddr));
newllc->dev = dev;
dev_hold(dev);
llc_sap_add_socket(llc->sap, newsk);
llc_sap_hold(llc->sap);
out:
return newsk;
}
void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb)
{
struct llc_addr saddr, daddr;
struct sock *sk;
llc_pdu_decode_sa(skb, saddr.mac);
llc_pdu_decode_ssap(skb, &saddr.lsap);
llc_pdu_decode_da(skb, daddr.mac);
llc_pdu_decode_dsap(skb, &daddr.lsap);
sk = __llc_lookup(sap, &saddr, &daddr);
if (!sk)
goto drop;
bh_lock_sock(sk);
/*
* This has to be done here and not at the upper layer ->accept
* method because of the way the PROCOM state machine works:
* it needs to set several state variables (see, for instance,
* llc_adm_actions_2 in net/llc/llc_c_st.c) and send a packet to
* the originator of the new connection, and this state has to be
* in the newly created struct sock private area. -acme
*/
if (unlikely(sk->sk_state == TCP_LISTEN)) {
struct sock *newsk = llc_create_incoming_sock(sk, skb->dev,
&saddr, &daddr);
if (!newsk)
goto drop_unlock;
skb_set_owner_r(skb, newsk);
} else {
/*
* Can't be skb_set_owner_r, this will be done at the
* llc_conn_state_process function, later on, when we will use
* skb_queue_rcv_skb to send it to upper layers, this is
* another trick required to cope with how the PROCOM state
* machine works. -acme
*/
skb->sk = sk;
}
if (!sock_owned_by_user(sk))
llc_conn_rcv(sk, skb);
else {
dprintk("%s: adding to backlog...\n", __func__);
llc_set_backlog_type(skb, LLC_PACKET);
if (sk_add_backlog(sk, skb, sk->sk_rcvbuf))
goto drop_unlock;
}
out:
bh_unlock_sock(sk);
sock_put(sk);
return;
drop:
kfree_skb(skb);
return;
drop_unlock:
kfree_skb(skb);
goto out;
}
#undef LLC_REFCNT_DEBUG
#ifdef LLC_REFCNT_DEBUG
static atomic_t llc_sock_nr;
#endif
/**
* llc_backlog_rcv - Processes rx frames and expired timers.
* @sk: LLC sock (p8022 connection)
* @skb: queued rx frame or event
*
* This function processes frames that has received and timers that has
* expired during sending an I pdu (refer to data_req_handler). frames
* queue by llc_rcv function (llc_mac.c) and timers queue by timer
* callback functions(llc_c_ac.c).
*/
static int llc_backlog_rcv(struct sock *sk, struct sk_buff *skb)
{
int rc = 0;
struct llc_sock *llc = llc_sk(sk);
if (likely(llc_backlog_type(skb) == LLC_PACKET)) {
if (likely(llc->state > 1)) /* not closed */
rc = llc_conn_rcv(sk, skb);
else
goto out_kfree_skb;
} else if (llc_backlog_type(skb) == LLC_EVENT) {
/* timer expiration event */
if (likely(llc->state > 1)) /* not closed */
rc = llc_conn_state_process(sk, skb);
else
goto out_kfree_skb;
} else {
printk(KERN_ERR "%s: invalid skb in backlog\n", __func__);
goto out_kfree_skb;
}
out:
return rc;
out_kfree_skb:
kfree_skb(skb);
goto out;
}
/**
* llc_sk_init - Initializes a socket with default llc values.
* @sk: socket to initialize.
*
* Initializes a socket with default llc values.
*/
static void llc_sk_init(struct sock *sk)
{
struct llc_sock *llc = llc_sk(sk);
llc->state = LLC_CONN_STATE_ADM;
llc->inc_cntr = llc->dec_cntr = 2;
llc->dec_step = llc->connect_step = 1;
setup_timer(&llc->ack_timer.timer, llc_conn_ack_tmr_cb,
(unsigned long)sk);
llc->ack_timer.expire = sysctl_llc2_ack_timeout;
setup_timer(&llc->pf_cycle_timer.timer, llc_conn_pf_cycle_tmr_cb,
(unsigned long)sk);
llc->pf_cycle_timer.expire = sysctl_llc2_p_timeout;
setup_timer(&llc->rej_sent_timer.timer, llc_conn_rej_tmr_cb,
(unsigned long)sk);
llc->rej_sent_timer.expire = sysctl_llc2_rej_timeout;
setup_timer(&llc->busy_state_timer.timer, llc_conn_busy_tmr_cb,
(unsigned long)sk);
llc->busy_state_timer.expire = sysctl_llc2_busy_timeout;
llc->n2 = 2; /* max retransmit */
llc->k = 2; /* tx win size, will adjust dynam */
llc->rw = 128; /* rx win size (opt and equal to
* tx_win of remote LLC) */
skb_queue_head_init(&llc->pdu_unack_q);
sk->sk_backlog_rcv = llc_backlog_rcv;
}
/**
* llc_sk_alloc - Allocates LLC sock
* @family: upper layer protocol family
* @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
*
* Allocates a LLC sock and initializes it. Returns the new LLC sock
* or %NULL if there's no memory available for one
*/
struct sock *llc_sk_alloc(struct net *net, int family, gfp_t priority, struct proto *prot, int kern)
{
struct sock *sk = sk_alloc(net, family, priority, prot, kern);
if (!sk)
goto out;
llc_sk_init(sk);
sock_init_data(NULL, sk);
#ifdef LLC_REFCNT_DEBUG
atomic_inc(&llc_sock_nr);
printk(KERN_DEBUG "LLC socket %p created in %s, now we have %d alive\n", sk,
__func__, atomic_read(&llc_sock_nr));
#endif
out:
return sk;
}
/**
* llc_sk_free - Frees a LLC socket
* @sk - socket to free
*
* Frees a LLC socket
*/
void llc_sk_free(struct sock *sk)
{
struct llc_sock *llc = llc_sk(sk);
llc->state = LLC_CONN_OUT_OF_SVC;
/* Stop all (possibly) running timers */
llc_conn_ac_stop_all_timers(sk, NULL);
#ifdef DEBUG_LLC_CONN_ALLOC
printk(KERN_INFO "%s: unackq=%d, txq=%d\n", __func__,
skb_queue_len(&llc->pdu_unack_q),
skb_queue_len(&sk->sk_write_queue));
#endif
skb_queue_purge(&sk->sk_receive_queue);
skb_queue_purge(&sk->sk_write_queue);
skb_queue_purge(&llc->pdu_unack_q);
#ifdef LLC_REFCNT_DEBUG
if (atomic_read(&sk->sk_refcnt) != 1) {
printk(KERN_DEBUG "Destruction of LLC sock %p delayed in %s, cnt=%d\n",
sk, __func__, atomic_read(&sk->sk_refcnt));
printk(KERN_DEBUG "%d LLC sockets are still alive\n",
atomic_read(&llc_sock_nr));
} else {
atomic_dec(&llc_sock_nr);
printk(KERN_DEBUG "LLC socket %p released in %s, %d are still alive\n", sk,
__func__, atomic_read(&llc_sock_nr));
}
#endif
sock_put(sk);
}
/**
* llc_sk_reset - resets a connection
* @sk: LLC socket to reset
*
* Resets a connection to the out of service state. Stops its timers
* and frees any frames in the queues of the connection.
*/
void llc_sk_reset(struct sock *sk)
{
struct llc_sock *llc = llc_sk(sk);
llc_conn_ac_stop_all_timers(sk, NULL);
skb_queue_purge(&sk->sk_write_queue);
skb_queue_purge(&llc->pdu_unack_q);
llc->remote_busy_flag = 0;
llc->cause_flag = 0;
llc->retry_count = 0;
llc_conn_set_p_flag(sk, 0);
llc->f_flag = 0;
llc->s_flag = 0;
llc->ack_pf = 0;
llc->first_pdu_Ns = 0;
llc->ack_must_be_send = 0;
llc->dec_step = 1;
llc->inc_cntr = 2;
llc->dec_cntr = 2;
llc->X = 0;
llc->failed_data_req = 0 ;
llc->last_nr = 0;
}
| ./CrossVul/dataset_final_sorted/CWE-20/c/bad_3174_0 |
crossvul-cpp_data_bad_1789_0 | /***************************************************************************
* Copyright (C) 2005-2015 by the Quassel Project *
* devel@quassel-irc.org *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) version 3. *
* *
* This program is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License for more details. *
* *
* You should have received a copy of the GNU General Public License *
* along with this program; if not, write to the *
* Free Software Foundation, Inc., *
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. *
***************************************************************************/
#include "coreuserinputhandler.h"
#include "util.h"
#include "ctcpparser.h"
#include <QRegExp>
#ifdef HAVE_QCA2
# include "cipher.h"
#endif
CoreUserInputHandler::CoreUserInputHandler(CoreNetwork *parent)
: CoreBasicHandler(parent)
{
}
void CoreUserInputHandler::handleUserInput(const BufferInfo &bufferInfo, const QString &msg)
{
if (msg.isEmpty())
return;
AliasManager::CommandList list = coreSession()->aliasManager().processInput(bufferInfo, msg);
for (int i = 0; i < list.count(); i++) {
QString cmd = list.at(i).second.section(' ', 0, 0).remove(0, 1).toUpper();
QString payload = list.at(i).second.section(' ', 1);
handle(cmd, Q_ARG(BufferInfo, list.at(i).first), Q_ARG(QString, payload));
}
}
// ====================
// Public Slots
// ====================
void CoreUserInputHandler::handleAway(const BufferInfo &bufferInfo, const QString &msg)
{
Q_UNUSED(bufferInfo)
if (msg.startsWith("-all")) {
if (msg.length() == 4) {
coreSession()->globalAway();
return;
}
Q_ASSERT(msg.length() > 4);
if (msg[4] == ' ') {
coreSession()->globalAway(msg.mid(5));
return;
}
}
issueAway(msg);
}
void CoreUserInputHandler::issueAway(const QString &msg, bool autoCheck)
{
QString awayMsg = msg;
IrcUser *me = network()->me();
// if there is no message supplied we have to check if we are already away or not
if (autoCheck && msg.isEmpty()) {
if (me && !me->isAway()) {
Identity *identity = network()->identityPtr();
if (identity) {
awayMsg = identity->awayReason();
}
if (awayMsg.isEmpty()) {
awayMsg = tr("away");
}
}
}
if (me)
me->setAwayMessage(awayMsg);
putCmd("AWAY", serverEncode(awayMsg));
}
void CoreUserInputHandler::handleBan(const BufferInfo &bufferInfo, const QString &msg)
{
banOrUnban(bufferInfo, msg, true);
}
void CoreUserInputHandler::handleUnban(const BufferInfo &bufferInfo, const QString &msg)
{
banOrUnban(bufferInfo, msg, false);
}
void CoreUserInputHandler::banOrUnban(const BufferInfo &bufferInfo, const QString &msg, bool ban)
{
QString banChannel;
QString banUser;
QStringList params = msg.split(" ");
if (!params.isEmpty() && isChannelName(params[0])) {
banChannel = params.takeFirst();
}
else if (bufferInfo.type() == BufferInfo::ChannelBuffer) {
banChannel = bufferInfo.bufferName();
}
else {
emit displayMsg(Message::Error, BufferInfo::StatusBuffer, "", QString("Error: channel unknown in command: /BAN %1").arg(msg));
return;
}
if (!params.isEmpty() && !params.contains("!") && network()->ircUser(params[0])) {
IrcUser *ircuser = network()->ircUser(params[0]);
// generalizedHost changes <nick> to *!ident@*.sld.tld.
QString generalizedHost = ircuser->host();
if (generalizedHost.isEmpty()) {
emit displayMsg(Message::Error, BufferInfo::StatusBuffer, "", QString("Error: host unknown in command: /BAN %1").arg(msg));
return;
}
static QRegExp ipAddress("\\d+\\.\\d+\\.\\d+\\.\\d+");
if (ipAddress.exactMatch(generalizedHost)) {
int lastDotPos = generalizedHost.lastIndexOf('.') + 1;
generalizedHost.replace(lastDotPos, generalizedHost.length() - lastDotPos, '*');
}
else if (generalizedHost.lastIndexOf(".") != -1 && generalizedHost.lastIndexOf(".", generalizedHost.lastIndexOf(".")-1) != -1) {
int secondLastPeriodPosition = generalizedHost.lastIndexOf(".", generalizedHost.lastIndexOf(".")-1);
generalizedHost.replace(0, secondLastPeriodPosition, "*");
}
banUser = QString("*!%1@%2").arg(ircuser->user(), generalizedHost);
}
else {
banUser = params.join(" ");
}
QString banMode = ban ? "+b" : "-b";
QString banMsg = QString("MODE %1 %2 %3").arg(banChannel, banMode, banUser);
emit putRawLine(serverEncode(banMsg));
}
void CoreUserInputHandler::handleCtcp(const BufferInfo &bufferInfo, const QString &msg)
{
Q_UNUSED(bufferInfo)
QString nick = msg.section(' ', 0, 0);
QString ctcpTag = msg.section(' ', 1, 1).toUpper();
if (ctcpTag.isEmpty())
return;
QString message = msg.section(' ', 2);
QString verboseMessage = tr("sending CTCP-%1 request to %2").arg(ctcpTag).arg(nick);
if (ctcpTag == "PING") {
message = QString::number(QDateTime::currentMSecsSinceEpoch());
}
// FIXME make this a proper event
coreNetwork()->coreSession()->ctcpParser()->query(coreNetwork(), nick, ctcpTag, message);
emit displayMsg(Message::Action, BufferInfo::StatusBuffer, "", verboseMessage, network()->myNick());
}
void CoreUserInputHandler::handleDelkey(const BufferInfo &bufferInfo, const QString &msg)
{
QString bufname = bufferInfo.bufferName().isNull() ? "" : bufferInfo.bufferName();
#ifdef HAVE_QCA2
if (!bufferInfo.isValid())
return;
if (!Cipher::neededFeaturesAvailable()) {
emit displayMsg(Message::Error, typeByTarget(bufname), bufname, tr("Error: QCA provider plugin not found. It is usually provided by the qca-ossl plugin."));
return;
}
QStringList parms = msg.split(' ', QString::SkipEmptyParts);
if (parms.isEmpty() && !bufferInfo.bufferName().isEmpty() && bufferInfo.acceptsRegularMessages())
parms.prepend(bufferInfo.bufferName());
if (parms.isEmpty()) {
emit displayMsg(Message::Info, typeByTarget(bufname), bufname,
tr("[usage] /delkey <nick|channel> deletes the encryption key for nick or channel or just /delkey when in a channel or query."));
return;
}
QString target = parms.at(0);
if (network()->cipherKey(target).isEmpty()) {
emit displayMsg(Message::Info, typeByTarget(bufname), bufname, tr("No key has been set for %1.").arg(target));
return;
}
network()->setCipherKey(target, QByteArray());
emit displayMsg(Message::Info, typeByTarget(bufname), bufname, tr("The key for %1 has been deleted.").arg(target));
#else
Q_UNUSED(msg)
emit displayMsg(Message::Error, typeByTarget(bufname), bufname, tr("Error: Setting an encryption key requires Quassel to have been built "
"with support for the Qt Cryptographic Architecture (QCA2) library. "
"Contact your distributor about a Quassel package with QCA2 "
"support, or rebuild Quassel with QCA2 present."));
#endif
}
void CoreUserInputHandler::doMode(const BufferInfo &bufferInfo, const QChar& addOrRemove, const QChar& mode, const QString &nicks)
{
QString m;
bool isNumber;
int maxModes = network()->support("MODES").toInt(&isNumber);
if (!isNumber || maxModes == 0) maxModes = 1;
QStringList nickList;
if (nicks == "*") { // All users in channel
const QList<IrcUser*> users = network()->ircChannel(bufferInfo.bufferName())->ircUsers();
foreach(IrcUser *user, users) {
if ((addOrRemove == '+' && !network()->ircChannel(bufferInfo.bufferName())->userModes(user).contains(mode))
|| (addOrRemove == '-' && network()->ircChannel(bufferInfo.bufferName())->userModes(user).contains(mode)))
nickList.append(user->nick());
}
} else {
nickList = nicks.split(' ', QString::SkipEmptyParts);
}
if (nickList.count() == 0) return;
while (!nickList.isEmpty()) {
int amount = qMin(nickList.count(), maxModes);
QString m = addOrRemove; for(int i = 0; i < amount; i++) m += mode;
QStringList params;
params << bufferInfo.bufferName() << m;
for(int i = 0; i < amount; i++) params << nickList.takeFirst();
emit putCmd("MODE", serverEncode(params));
}
}
void CoreUserInputHandler::handleDeop(const BufferInfo &bufferInfo, const QString &nicks)
{
doMode(bufferInfo, '-', 'o', nicks);
}
void CoreUserInputHandler::handleDehalfop(const BufferInfo &bufferInfo, const QString &nicks)
{
doMode(bufferInfo, '-', 'h', nicks);
}
void CoreUserInputHandler::handleDevoice(const BufferInfo &bufferInfo, const QString &nicks)
{
doMode(bufferInfo, '-', 'v', nicks);
}
void CoreUserInputHandler::handleHalfop(const BufferInfo &bufferInfo, const QString &nicks)
{
doMode(bufferInfo, '+', 'h', nicks);
}
void CoreUserInputHandler::handleOp(const BufferInfo &bufferInfo, const QString &nicks) {
doMode(bufferInfo, '+', 'o', nicks);
}
void CoreUserInputHandler::handleInvite(const BufferInfo &bufferInfo, const QString &msg)
{
QStringList params;
params << msg << bufferInfo.bufferName();
emit putCmd("INVITE", serverEncode(params));
}
void CoreUserInputHandler::handleJoin(const BufferInfo &bufferInfo, const QString &msg)
{
Q_UNUSED(bufferInfo);
// trim spaces before chans or keys
QString sane_msg = msg;
sane_msg.replace(QRegExp(", +"), ",");
QStringList params = sane_msg.trimmed().split(" ");
QStringList chans = params[0].split(",", QString::SkipEmptyParts);
QStringList keys;
if (params.count() > 1)
keys = params[1].split(",");
int i;
for (i = 0; i < chans.count(); i++) {
if (!network()->isChannelName(chans[i]))
chans[i].prepend('#');
if (i < keys.count()) {
network()->addChannelKey(chans[i], keys[i]);
}
else {
network()->removeChannelKey(chans[i]);
}
}
static const char *cmd = "JOIN";
i = 0;
QStringList joinChans, joinKeys;
int slicesize = chans.count();
QList<QByteArray> encodedParams;
// go through all to-be-joined channels and (re)build the join list
while (i < chans.count()) {
joinChans.append(chans.at(i));
if (i < keys.count())
joinKeys.append(keys.at(i));
// if the channel list we built so far either contains all requested channels or exceeds
// the desired amount of channels in this slice, try to send what we have so far
if (++i == chans.count() || joinChans.count() >= slicesize) {
params.clear();
params.append(joinChans.join(","));
params.append(joinKeys.join(","));
encodedParams = serverEncode(params);
// check if it fits in one command
if (lastParamOverrun(cmd, encodedParams) == 0) {
emit putCmd(cmd, encodedParams);
}
else if (slicesize > 1) {
// back to start of slice, try again with half the amount of channels
i -= slicesize;
slicesize /= 2;
}
joinChans.clear();
joinKeys.clear();
}
}
}
void CoreUserInputHandler::handleKeyx(const BufferInfo &bufferInfo, const QString &msg)
{
QString bufname = bufferInfo.bufferName().isNull() ? "" : bufferInfo.bufferName();
#ifdef HAVE_QCA2
if (!bufferInfo.isValid())
return;
if (!Cipher::neededFeaturesAvailable()) {
emit displayMsg(Message::Error, typeByTarget(bufname), bufname, tr("Error: QCA provider plugin not found. It is usually provided by the qca-ossl plugin."));
return;
}
QStringList parms = msg.split(' ', QString::SkipEmptyParts);
if (parms.count() == 0 && !bufferInfo.bufferName().isEmpty() && bufferInfo.acceptsRegularMessages())
parms.prepend(bufferInfo.bufferName());
else if (parms.count() != 1) {
emit displayMsg(Message::Info, typeByTarget(bufname), bufname,
tr("[usage] /keyx [<nick>] Initiates a DH1080 key exchange with the target."));
return;
}
QString target = parms.at(0);
if (network()->isChannelName(target)) {
emit displayMsg(Message::Info, typeByTarget(bufname), bufname, tr("It is only possible to exchange keys in a query buffer."));
return;
}
Cipher *cipher = network()->cipher(target);
if (!cipher) // happens when there is no CoreIrcChannel for the target
return;
QByteArray pubKey = cipher->initKeyExchange();
if (pubKey.isEmpty())
emit displayMsg(Message::Error, typeByTarget(bufname), bufname, tr("Failed to initiate key exchange with %1.").arg(target));
else {
QList<QByteArray> params;
params << serverEncode(target) << serverEncode("DH1080_INIT ") + pubKey;
emit putCmd("NOTICE", params);
emit displayMsg(Message::Info, typeByTarget(bufname), bufname, tr("Initiated key exchange with %1.").arg(target));
}
#else
Q_UNUSED(msg)
emit displayMsg(Message::Error, typeByTarget(bufname), bufname, tr("Error: Setting an encryption key requires Quassel to have been built "
"with support for the Qt Cryptographic Architecture (QCA) library. "
"Contact your distributor about a Quassel package with QCA "
"support, or rebuild Quassel with QCA present."));
#endif
}
void CoreUserInputHandler::handleKick(const BufferInfo &bufferInfo, const QString &msg)
{
QString nick = msg.section(' ', 0, 0, QString::SectionSkipEmpty);
QString reason = msg.section(' ', 1, -1, QString::SectionSkipEmpty).trimmed();
if (reason.isEmpty())
reason = network()->identityPtr()->kickReason();
QList<QByteArray> params;
params << serverEncode(bufferInfo.bufferName()) << serverEncode(nick) << channelEncode(bufferInfo.bufferName(), reason);
emit putCmd("KICK", params);
}
void CoreUserInputHandler::handleKill(const BufferInfo &bufferInfo, const QString &msg)
{
Q_UNUSED(bufferInfo)
QString nick = msg.section(' ', 0, 0, QString::SectionSkipEmpty);
QString pass = msg.section(' ', 1, -1, QString::SectionSkipEmpty);
QList<QByteArray> params;
params << serverEncode(nick) << serverEncode(pass);
emit putCmd("KILL", params);
}
void CoreUserInputHandler::handleList(const BufferInfo &bufferInfo, const QString &msg)
{
Q_UNUSED(bufferInfo)
emit putCmd("LIST", serverEncode(msg.split(' ', QString::SkipEmptyParts)));
}
void CoreUserInputHandler::handleMe(const BufferInfo &bufferInfo, const QString &msg)
{
if (bufferInfo.bufferName().isEmpty() || !bufferInfo.acceptsRegularMessages())
return; // server buffer
// FIXME make this a proper event
coreNetwork()->coreSession()->ctcpParser()->query(coreNetwork(), bufferInfo.bufferName(), "ACTION", msg);
emit displayMsg(Message::Action, bufferInfo.type(), bufferInfo.bufferName(), msg, network()->myNick(), Message::Self);
}
void CoreUserInputHandler::handleMode(const BufferInfo &bufferInfo, const QString &msg)
{
Q_UNUSED(bufferInfo)
QStringList params = msg.split(' ', QString::SkipEmptyParts);
// if the first argument is neither a channel nor us (user modes are only to oneself) the current buffer is assumed to be the target
if (!params.isEmpty()) {
if (!network()->isChannelName(params[0]) && !network()->isMyNick(params[0]))
params.prepend(bufferInfo.bufferName());
if (network()->isMyNick(params[0]) && params.count() == 2)
network()->updateIssuedModes(params[1]);
if (params[0] == "-reset" && params.count() == 1) {
// FIXME: give feedback to the user (I don't want to add new strings right now)
network()->resetPersistentModes();
return;
}
}
// TODO handle correct encoding for buffer modes (channelEncode())
emit putCmd("MODE", serverEncode(params));
}
// TODO: show privmsgs
void CoreUserInputHandler::handleMsg(const BufferInfo &bufferInfo, const QString &msg)
{
Q_UNUSED(bufferInfo);
if (!msg.contains(' '))
return;
QString target = msg.section(' ', 0, 0);
QString msgSection = msg.section(' ', 1);
std::function<QByteArray(const QString &, const QString &)> encodeFunc = [this] (const QString &target, const QString &message) -> QByteArray {
return userEncode(target, message);
};
#ifdef HAVE_QCA2
putPrivmsg(target, msgSection, encodeFunc, network()->cipher(target));
#else
putPrivmsg(target, msgSection, encodeFunc);
#endif
}
void CoreUserInputHandler::handleNick(const BufferInfo &bufferInfo, const QString &msg)
{
Q_UNUSED(bufferInfo)
QString nick = msg.section(' ', 0, 0);
emit putCmd("NICK", serverEncode(nick));
}
void CoreUserInputHandler::handleNotice(const BufferInfo &bufferInfo, const QString &msg)
{
QString bufferName = msg.section(' ', 0, 0);
QString payload = msg.section(' ', 1);
QList<QByteArray> params;
params << serverEncode(bufferName) << channelEncode(bufferInfo.bufferName(), payload);
emit putCmd("NOTICE", params);
emit displayMsg(Message::Notice, typeByTarget(bufferName), bufferName, payload, network()->myNick(), Message::Self);
}
void CoreUserInputHandler::handleOper(const BufferInfo &bufferInfo, const QString &msg)
{
Q_UNUSED(bufferInfo)
emit putRawLine(serverEncode(QString("OPER %1").arg(msg)));
}
void CoreUserInputHandler::handlePart(const BufferInfo &bufferInfo, const QString &msg)
{
QList<QByteArray> params;
QString partReason;
// msg might contain either a channel name and/or a reaon, so we have to check if the first word is a known channel
QString channelName = msg.section(' ', 0, 0);
if (channelName.isEmpty() || !network()->ircChannel(channelName)) {
channelName = bufferInfo.bufferName();
partReason = msg;
}
else {
partReason = msg.mid(channelName.length() + 1);
}
if (partReason.isEmpty())
partReason = network()->identityPtr()->partReason();
params << serverEncode(channelName) << channelEncode(bufferInfo.bufferName(), partReason);
emit putCmd("PART", params);
}
void CoreUserInputHandler::handlePing(const BufferInfo &bufferInfo, const QString &msg)
{
Q_UNUSED(bufferInfo)
QString param = msg;
if (param.isEmpty())
param = QTime::currentTime().toString("hh:mm:ss.zzz");
putCmd("PING", serverEncode(param));
}
void CoreUserInputHandler::handlePrint(const BufferInfo &bufferInfo, const QString &msg)
{
if (bufferInfo.bufferName().isEmpty() || !bufferInfo.acceptsRegularMessages())
return; // server buffer
QByteArray encMsg = channelEncode(bufferInfo.bufferName(), msg);
emit displayMsg(Message::Info, bufferInfo.type(), bufferInfo.bufferName(), msg, network()->myNick(), Message::Self);
}
// TODO: implement queries
void CoreUserInputHandler::handleQuery(const BufferInfo &bufferInfo, const QString &msg)
{
Q_UNUSED(bufferInfo)
QString target = msg.section(' ', 0, 0);
QString message = msg.section(' ', 1);
if (message.isEmpty())
emit displayMsg(Message::Server, BufferInfo::QueryBuffer, target, tr("Starting query with %1").arg(target), network()->myNick(), Message::Self);
else
emit displayMsg(Message::Plain, BufferInfo::QueryBuffer, target, message, network()->myNick(), Message::Self);
handleMsg(bufferInfo, msg);
}
void CoreUserInputHandler::handleQuit(const BufferInfo &bufferInfo, const QString &msg)
{
Q_UNUSED(bufferInfo)
network()->disconnectFromIrc(true, msg);
}
void CoreUserInputHandler::issueQuit(const QString &reason)
{
emit putCmd("QUIT", serverEncode(reason));
}
void CoreUserInputHandler::handleQuote(const BufferInfo &bufferInfo, const QString &msg)
{
Q_UNUSED(bufferInfo)
emit putRawLine(serverEncode(msg));
}
void CoreUserInputHandler::handleSay(const BufferInfo &bufferInfo, const QString &msg)
{
if (bufferInfo.bufferName().isEmpty() || !bufferInfo.acceptsRegularMessages())
return; // server buffer
std::function<QByteArray(const QString &, const QString &)> encodeFunc = [this] (const QString &target, const QString &message) -> QByteArray {
return channelEncode(target, message);
};
#ifdef HAVE_QCA2
putPrivmsg(bufferInfo.bufferName(), msg, encodeFunc, network()->cipher(bufferInfo.bufferName()));
#else
putPrivmsg(bufferInfo.bufferName(), msg, encodeFunc);
#endif
emit displayMsg(Message::Plain, bufferInfo.type(), bufferInfo.bufferName(), msg, network()->myNick(), Message::Self);
}
void CoreUserInputHandler::handleSetkey(const BufferInfo &bufferInfo, const QString &msg)
{
QString bufname = bufferInfo.bufferName().isNull() ? "" : bufferInfo.bufferName();
#ifdef HAVE_QCA2
if (!bufferInfo.isValid())
return;
if (!Cipher::neededFeaturesAvailable()) {
emit displayMsg(Message::Error, typeByTarget(bufname), bufname, tr("Error: QCA provider plugin not found. It is usually provided by the qca-ossl plugin."));
return;
}
QStringList parms = msg.split(' ', QString::SkipEmptyParts);
if (parms.count() == 1 && !bufferInfo.bufferName().isEmpty() && bufferInfo.acceptsRegularMessages())
parms.prepend(bufferInfo.bufferName());
else if (parms.count() != 2) {
emit displayMsg(Message::Info, typeByTarget(bufname), bufname,
tr("[usage] /setkey <nick|channel> <key> sets the encryption key for nick or channel. "
"/setkey <key> when in a channel or query buffer sets the key for it."));
return;
}
QString target = parms.at(0);
QByteArray key = parms.at(1).toLocal8Bit();
network()->setCipherKey(target, key);
emit displayMsg(Message::Info, typeByTarget(bufname), bufname, tr("The key for %1 has been set.").arg(target));
#else
Q_UNUSED(msg)
emit displayMsg(Message::Error, typeByTarget(bufname), bufname, tr("Error: Setting an encryption key requires Quassel to have been built "
"with support for the Qt Cryptographic Architecture (QCA) library. "
"Contact your distributor about a Quassel package with QCA "
"support, or rebuild Quassel with QCA present."));
#endif
}
void CoreUserInputHandler::handleShowkey(const BufferInfo &bufferInfo, const QString &msg)
{
QString bufname = bufferInfo.bufferName().isNull() ? "" : bufferInfo.bufferName();
#ifdef HAVE_QCA2
if (!bufferInfo.isValid())
return;
if (!Cipher::neededFeaturesAvailable()) {
emit displayMsg(Message::Error, typeByTarget(bufname), bufname, tr("Error: QCA provider plugin not found. It is usually provided by the qca-ossl plugin."));
return;
}
QStringList parms = msg.split(' ', QString::SkipEmptyParts);
if (parms.isEmpty() && !bufferInfo.bufferName().isEmpty() && bufferInfo.acceptsRegularMessages())
parms.prepend(bufferInfo.bufferName());
if (parms.isEmpty()) {
emit displayMsg(Message::Info, typeByTarget(bufname), bufname, tr("[usage] /showkey <nick|channel> shows the encryption key for nick or channel or just /showkey when in a channel or query."));
return;
}
QString target = parms.at(0);
QByteArray key = network()->cipherKey(target);
if (key.isEmpty()) {
emit displayMsg(Message::Info, typeByTarget(bufname), bufname, tr("No key has been set for %1.").arg(target));
return;
}
emit displayMsg(Message::Info, typeByTarget(bufname), bufname, tr("The key for %1 is %2:%3").arg(target, network()->cipherUsesCBC(target) ? "CBC" : "ECB", QString(key)));
#else
Q_UNUSED(msg)
emit displayMsg(Message::Error, typeByTarget(bufname), bufname, tr("Error: Setting an encryption key requires Quassel to have been built "
"with support for the Qt Cryptographic Architecture (QCA2) library. "
"Contact your distributor about a Quassel package with QCA2 "
"support, or rebuild Quassel with QCA2 present."));
#endif
}
void CoreUserInputHandler::handleTopic(const BufferInfo &bufferInfo, const QString &msg)
{
if (bufferInfo.bufferName().isEmpty() || !bufferInfo.acceptsRegularMessages())
return;
QList<QByteArray> params;
params << serverEncode(bufferInfo.bufferName());
if (!msg.isEmpty()) {
# ifdef HAVE_QCA2
params << encrypt(bufferInfo.bufferName(), channelEncode(bufferInfo.bufferName(), msg));
# else
params << channelEncode(bufferInfo.bufferName(), msg);
# endif
}
emit putCmd("TOPIC", params);
}
void CoreUserInputHandler::handleVoice(const BufferInfo &bufferInfo, const QString &msg)
{
QStringList nicks = msg.split(' ', QString::SkipEmptyParts);
QString m = "+"; for (int i = 0; i < nicks.count(); i++) m += 'v';
QStringList params;
params << bufferInfo.bufferName() << m << nicks;
emit putCmd("MODE", serverEncode(params));
}
void CoreUserInputHandler::handleWait(const BufferInfo &bufferInfo, const QString &msg)
{
int splitPos = msg.indexOf(';');
if (splitPos <= 0)
return;
bool ok;
int delay = msg.left(splitPos).trimmed().toInt(&ok);
if (!ok)
return;
delay *= 1000;
QString command = msg.mid(splitPos + 1).trimmed();
if (command.isEmpty())
return;
_delayedCommands[startTimer(delay)] = Command(bufferInfo, command);
}
void CoreUserInputHandler::handleWho(const BufferInfo &bufferInfo, const QString &msg)
{
Q_UNUSED(bufferInfo)
emit putCmd("WHO", serverEncode(msg.split(' ')));
}
void CoreUserInputHandler::handleWhois(const BufferInfo &bufferInfo, const QString &msg)
{
Q_UNUSED(bufferInfo)
emit putCmd("WHOIS", serverEncode(msg.split(' ')));
}
void CoreUserInputHandler::handleWhowas(const BufferInfo &bufferInfo, const QString &msg)
{
Q_UNUSED(bufferInfo)
emit putCmd("WHOWAS", serverEncode(msg.split(' ')));
}
void CoreUserInputHandler::defaultHandler(QString cmd, const BufferInfo &bufferInfo, const QString &msg)
{
Q_UNUSED(bufferInfo);
emit putCmd(serverEncode(cmd.toUpper()), serverEncode(msg.split(" ")));
}
void CoreUserInputHandler::putPrivmsg(const QString &target, const QString &message, std::function<QByteArray(const QString &, const QString &)> encodeFunc, Cipher *cipher)
{
Q_UNUSED(cipher);
QString cmd("PRIVMSG");
QByteArray targetEnc = serverEncode(target);
std::function<QList<QByteArray>(QString &)> cmdGenerator = [&] (QString &splitMsg) -> QList<QByteArray> {
QByteArray splitMsgEnc = encodeFunc(target, splitMsg);
#ifdef HAVE_QCA2
if (cipher && !cipher->key().isEmpty() && !splitMsg.isEmpty()) {
cipher->encrypt(splitMsgEnc);
}
#endif
return QList<QByteArray>() << targetEnc << splitMsgEnc;
};
putCmd(cmd, network()->splitMessage(cmd, message, cmdGenerator));
}
// returns 0 if the message will not be chopped by the irc server or number of chopped bytes if message is too long
int CoreUserInputHandler::lastParamOverrun(const QString &cmd, const QList<QByteArray> ¶ms)
{
// the server will pass our message truncated to 512 bytes including CRLF with the following format:
// ":prefix COMMAND param0 param1 :lastparam"
// where prefix = "nickname!user@host"
// that means that the last message can be as long as:
// 512 - nicklen - userlen - hostlen - commandlen - sum(param[0]..param[n-1])) - 2 (for CRLF) - 4 (":!@" + 1space between prefix and command) - max(paramcount - 1, 0) (space for simple params) - 2 (space and colon for last param)
IrcUser *me = network()->me();
int maxLen = 480 - cmd.toLatin1().count(); // educated guess in case we don't know us (yet?)
if (me)
maxLen = 512 - serverEncode(me->nick()).count() - serverEncode(me->user()).count() - serverEncode(me->host()).count() - cmd.toLatin1().count() - 6;
if (!params.isEmpty()) {
for (int i = 0; i < params.count() - 1; i++) {
maxLen -= (params[i].count() + 1);
}
maxLen -= 2; // " :" last param separator;
if (params.last().count() > maxLen) {
return params.last().count() - maxLen;
}
else {
return 0;
}
}
else {
return 0;
}
}
#ifdef HAVE_QCA2
QByteArray CoreUserInputHandler::encrypt(const QString &target, const QByteArray &message_, bool *didEncrypt) const
{
if (didEncrypt)
*didEncrypt = false;
if (message_.isEmpty())
return message_;
if (!Cipher::neededFeaturesAvailable())
return message_;
Cipher *cipher = network()->cipher(target);
if (!cipher || cipher->key().isEmpty())
return message_;
QByteArray message = message_;
bool result = cipher->encrypt(message);
if (didEncrypt)
*didEncrypt = result;
return message;
}
#endif
void CoreUserInputHandler::timerEvent(QTimerEvent *event)
{
if (!_delayedCommands.contains(event->timerId())) {
QObject::timerEvent(event);
return;
}
BufferInfo bufferInfo = _delayedCommands[event->timerId()].bufferInfo;
QString rawCommand = _delayedCommands[event->timerId()].command;
_delayedCommands.remove(event->timerId());
event->accept();
// the stored command might be the result of an alias expansion, so we need to split it up again
QStringList commands = rawCommand.split(QRegExp("; ?"));
foreach(QString command, commands) {
handleUserInput(bufferInfo, command);
}
}
| ./CrossVul/dataset_final_sorted/CWE-17/cpp/bad_1789_0 |
crossvul-cpp_data_good_1789_0 | /***************************************************************************
* Copyright (C) 2005-2015 by the Quassel Project *
* devel@quassel-irc.org *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) version 3. *
* *
* This program is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License for more details. *
* *
* You should have received a copy of the GNU General Public License *
* along with this program; if not, write to the *
* Free Software Foundation, Inc., *
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. *
***************************************************************************/
#include "coreuserinputhandler.h"
#include "util.h"
#include "ctcpparser.h"
#include <QRegExp>
#ifdef HAVE_QCA2
# include "cipher.h"
#endif
CoreUserInputHandler::CoreUserInputHandler(CoreNetwork *parent)
: CoreBasicHandler(parent)
{
}
void CoreUserInputHandler::handleUserInput(const BufferInfo &bufferInfo, const QString &msg)
{
if (msg.isEmpty())
return;
AliasManager::CommandList list = coreSession()->aliasManager().processInput(bufferInfo, msg);
for (int i = 0; i < list.count(); i++) {
QString cmd = list.at(i).second.section(' ', 0, 0).remove(0, 1).toUpper();
QString payload = list.at(i).second.section(' ', 1);
handle(cmd, Q_ARG(BufferInfo, list.at(i).first), Q_ARG(QString, payload));
}
}
// ====================
// Public Slots
// ====================
void CoreUserInputHandler::handleAway(const BufferInfo &bufferInfo, const QString &msg)
{
Q_UNUSED(bufferInfo)
if (msg.startsWith("-all")) {
if (msg.length() == 4) {
coreSession()->globalAway();
return;
}
Q_ASSERT(msg.length() > 4);
if (msg[4] == ' ') {
coreSession()->globalAway(msg.mid(5));
return;
}
}
issueAway(msg);
}
void CoreUserInputHandler::issueAway(const QString &msg, bool autoCheck)
{
QString awayMsg = msg;
IrcUser *me = network()->me();
// if there is no message supplied we have to check if we are already away or not
if (autoCheck && msg.isEmpty()) {
if (me && !me->isAway()) {
Identity *identity = network()->identityPtr();
if (identity) {
awayMsg = identity->awayReason();
}
if (awayMsg.isEmpty()) {
awayMsg = tr("away");
}
}
}
if (me)
me->setAwayMessage(awayMsg);
putCmd("AWAY", serverEncode(awayMsg));
}
void CoreUserInputHandler::handleBan(const BufferInfo &bufferInfo, const QString &msg)
{
banOrUnban(bufferInfo, msg, true);
}
void CoreUserInputHandler::handleUnban(const BufferInfo &bufferInfo, const QString &msg)
{
banOrUnban(bufferInfo, msg, false);
}
void CoreUserInputHandler::banOrUnban(const BufferInfo &bufferInfo, const QString &msg, bool ban)
{
QString banChannel;
QString banUser;
QStringList params = msg.split(" ");
if (!params.isEmpty() && isChannelName(params[0])) {
banChannel = params.takeFirst();
}
else if (bufferInfo.type() == BufferInfo::ChannelBuffer) {
banChannel = bufferInfo.bufferName();
}
else {
emit displayMsg(Message::Error, BufferInfo::StatusBuffer, "", QString("Error: channel unknown in command: /BAN %1").arg(msg));
return;
}
if (!params.isEmpty() && !params.contains("!") && network()->ircUser(params[0])) {
IrcUser *ircuser = network()->ircUser(params[0]);
// generalizedHost changes <nick> to *!ident@*.sld.tld.
QString generalizedHost = ircuser->host();
if (generalizedHost.isEmpty()) {
emit displayMsg(Message::Error, BufferInfo::StatusBuffer, "", QString("Error: host unknown in command: /BAN %1").arg(msg));
return;
}
static QRegExp ipAddress("\\d+\\.\\d+\\.\\d+\\.\\d+");
if (ipAddress.exactMatch(generalizedHost)) {
int lastDotPos = generalizedHost.lastIndexOf('.') + 1;
generalizedHost.replace(lastDotPos, generalizedHost.length() - lastDotPos, '*');
}
else if (generalizedHost.lastIndexOf(".") != -1 && generalizedHost.lastIndexOf(".", generalizedHost.lastIndexOf(".")-1) != -1) {
int secondLastPeriodPosition = generalizedHost.lastIndexOf(".", generalizedHost.lastIndexOf(".")-1);
generalizedHost.replace(0, secondLastPeriodPosition, "*");
}
banUser = QString("*!%1@%2").arg(ircuser->user(), generalizedHost);
}
else {
banUser = params.join(" ");
}
QString banMode = ban ? "+b" : "-b";
QString banMsg = QString("MODE %1 %2 %3").arg(banChannel, banMode, banUser);
emit putRawLine(serverEncode(banMsg));
}
void CoreUserInputHandler::handleCtcp(const BufferInfo &bufferInfo, const QString &msg)
{
Q_UNUSED(bufferInfo)
QString nick = msg.section(' ', 0, 0);
QString ctcpTag = msg.section(' ', 1, 1).toUpper();
if (ctcpTag.isEmpty())
return;
QString message = msg.section(' ', 2);
QString verboseMessage = tr("sending CTCP-%1 request to %2").arg(ctcpTag).arg(nick);
if (ctcpTag == "PING") {
message = QString::number(QDateTime::currentMSecsSinceEpoch());
}
// FIXME make this a proper event
coreNetwork()->coreSession()->ctcpParser()->query(coreNetwork(), nick, ctcpTag, message);
emit displayMsg(Message::Action, BufferInfo::StatusBuffer, "", verboseMessage, network()->myNick());
}
void CoreUserInputHandler::handleDelkey(const BufferInfo &bufferInfo, const QString &msg)
{
QString bufname = bufferInfo.bufferName().isNull() ? "" : bufferInfo.bufferName();
#ifdef HAVE_QCA2
if (!bufferInfo.isValid())
return;
if (!Cipher::neededFeaturesAvailable()) {
emit displayMsg(Message::Error, typeByTarget(bufname), bufname, tr("Error: QCA provider plugin not found. It is usually provided by the qca-ossl plugin."));
return;
}
QStringList parms = msg.split(' ', QString::SkipEmptyParts);
if (parms.isEmpty() && !bufferInfo.bufferName().isEmpty() && bufferInfo.acceptsRegularMessages())
parms.prepend(bufferInfo.bufferName());
if (parms.isEmpty()) {
emit displayMsg(Message::Info, typeByTarget(bufname), bufname,
tr("[usage] /delkey <nick|channel> deletes the encryption key for nick or channel or just /delkey when in a channel or query."));
return;
}
QString target = parms.at(0);
if (network()->cipherKey(target).isEmpty()) {
emit displayMsg(Message::Info, typeByTarget(bufname), bufname, tr("No key has been set for %1.").arg(target));
return;
}
network()->setCipherKey(target, QByteArray());
emit displayMsg(Message::Info, typeByTarget(bufname), bufname, tr("The key for %1 has been deleted.").arg(target));
#else
Q_UNUSED(msg)
emit displayMsg(Message::Error, typeByTarget(bufname), bufname, tr("Error: Setting an encryption key requires Quassel to have been built "
"with support for the Qt Cryptographic Architecture (QCA2) library. "
"Contact your distributor about a Quassel package with QCA2 "
"support, or rebuild Quassel with QCA2 present."));
#endif
}
void CoreUserInputHandler::doMode(const BufferInfo &bufferInfo, const QChar& addOrRemove, const QChar& mode, const QString &nicks)
{
QString m;
bool isNumber;
int maxModes = network()->support("MODES").toInt(&isNumber);
if (!isNumber || maxModes == 0) maxModes = 1;
QStringList nickList;
if (nicks == "*" && bufferInfo.type() == BufferInfo::ChannelBuffer) { // All users in channel
const QList<IrcUser*> users = network()->ircChannel(bufferInfo.bufferName())->ircUsers();
foreach(IrcUser *user, users) {
if ((addOrRemove == '+' && !network()->ircChannel(bufferInfo.bufferName())->userModes(user).contains(mode))
|| (addOrRemove == '-' && network()->ircChannel(bufferInfo.bufferName())->userModes(user).contains(mode)))
nickList.append(user->nick());
}
} else {
nickList = nicks.split(' ', QString::SkipEmptyParts);
}
if (nickList.count() == 0) return;
while (!nickList.isEmpty()) {
int amount = qMin(nickList.count(), maxModes);
QString m = addOrRemove; for(int i = 0; i < amount; i++) m += mode;
QStringList params;
params << bufferInfo.bufferName() << m;
for(int i = 0; i < amount; i++) params << nickList.takeFirst();
emit putCmd("MODE", serverEncode(params));
}
}
void CoreUserInputHandler::handleDeop(const BufferInfo &bufferInfo, const QString &nicks)
{
doMode(bufferInfo, '-', 'o', nicks);
}
void CoreUserInputHandler::handleDehalfop(const BufferInfo &bufferInfo, const QString &nicks)
{
doMode(bufferInfo, '-', 'h', nicks);
}
void CoreUserInputHandler::handleDevoice(const BufferInfo &bufferInfo, const QString &nicks)
{
doMode(bufferInfo, '-', 'v', nicks);
}
void CoreUserInputHandler::handleHalfop(const BufferInfo &bufferInfo, const QString &nicks)
{
doMode(bufferInfo, '+', 'h', nicks);
}
void CoreUserInputHandler::handleOp(const BufferInfo &bufferInfo, const QString &nicks) {
doMode(bufferInfo, '+', 'o', nicks);
}
void CoreUserInputHandler::handleInvite(const BufferInfo &bufferInfo, const QString &msg)
{
QStringList params;
params << msg << bufferInfo.bufferName();
emit putCmd("INVITE", serverEncode(params));
}
void CoreUserInputHandler::handleJoin(const BufferInfo &bufferInfo, const QString &msg)
{
Q_UNUSED(bufferInfo);
// trim spaces before chans or keys
QString sane_msg = msg;
sane_msg.replace(QRegExp(", +"), ",");
QStringList params = sane_msg.trimmed().split(" ");
QStringList chans = params[0].split(",", QString::SkipEmptyParts);
QStringList keys;
if (params.count() > 1)
keys = params[1].split(",");
int i;
for (i = 0; i < chans.count(); i++) {
if (!network()->isChannelName(chans[i]))
chans[i].prepend('#');
if (i < keys.count()) {
network()->addChannelKey(chans[i], keys[i]);
}
else {
network()->removeChannelKey(chans[i]);
}
}
static const char *cmd = "JOIN";
i = 0;
QStringList joinChans, joinKeys;
int slicesize = chans.count();
QList<QByteArray> encodedParams;
// go through all to-be-joined channels and (re)build the join list
while (i < chans.count()) {
joinChans.append(chans.at(i));
if (i < keys.count())
joinKeys.append(keys.at(i));
// if the channel list we built so far either contains all requested channels or exceeds
// the desired amount of channels in this slice, try to send what we have so far
if (++i == chans.count() || joinChans.count() >= slicesize) {
params.clear();
params.append(joinChans.join(","));
params.append(joinKeys.join(","));
encodedParams = serverEncode(params);
// check if it fits in one command
if (lastParamOverrun(cmd, encodedParams) == 0) {
emit putCmd(cmd, encodedParams);
}
else if (slicesize > 1) {
// back to start of slice, try again with half the amount of channels
i -= slicesize;
slicesize /= 2;
}
joinChans.clear();
joinKeys.clear();
}
}
}
void CoreUserInputHandler::handleKeyx(const BufferInfo &bufferInfo, const QString &msg)
{
QString bufname = bufferInfo.bufferName().isNull() ? "" : bufferInfo.bufferName();
#ifdef HAVE_QCA2
if (!bufferInfo.isValid())
return;
if (!Cipher::neededFeaturesAvailable()) {
emit displayMsg(Message::Error, typeByTarget(bufname), bufname, tr("Error: QCA provider plugin not found. It is usually provided by the qca-ossl plugin."));
return;
}
QStringList parms = msg.split(' ', QString::SkipEmptyParts);
if (parms.count() == 0 && !bufferInfo.bufferName().isEmpty() && bufferInfo.acceptsRegularMessages())
parms.prepend(bufferInfo.bufferName());
else if (parms.count() != 1) {
emit displayMsg(Message::Info, typeByTarget(bufname), bufname,
tr("[usage] /keyx [<nick>] Initiates a DH1080 key exchange with the target."));
return;
}
QString target = parms.at(0);
if (network()->isChannelName(target)) {
emit displayMsg(Message::Info, typeByTarget(bufname), bufname, tr("It is only possible to exchange keys in a query buffer."));
return;
}
Cipher *cipher = network()->cipher(target);
if (!cipher) // happens when there is no CoreIrcChannel for the target
return;
QByteArray pubKey = cipher->initKeyExchange();
if (pubKey.isEmpty())
emit displayMsg(Message::Error, typeByTarget(bufname), bufname, tr("Failed to initiate key exchange with %1.").arg(target));
else {
QList<QByteArray> params;
params << serverEncode(target) << serverEncode("DH1080_INIT ") + pubKey;
emit putCmd("NOTICE", params);
emit displayMsg(Message::Info, typeByTarget(bufname), bufname, tr("Initiated key exchange with %1.").arg(target));
}
#else
Q_UNUSED(msg)
emit displayMsg(Message::Error, typeByTarget(bufname), bufname, tr("Error: Setting an encryption key requires Quassel to have been built "
"with support for the Qt Cryptographic Architecture (QCA) library. "
"Contact your distributor about a Quassel package with QCA "
"support, or rebuild Quassel with QCA present."));
#endif
}
void CoreUserInputHandler::handleKick(const BufferInfo &bufferInfo, const QString &msg)
{
QString nick = msg.section(' ', 0, 0, QString::SectionSkipEmpty);
QString reason = msg.section(' ', 1, -1, QString::SectionSkipEmpty).trimmed();
if (reason.isEmpty())
reason = network()->identityPtr()->kickReason();
QList<QByteArray> params;
params << serverEncode(bufferInfo.bufferName()) << serverEncode(nick) << channelEncode(bufferInfo.bufferName(), reason);
emit putCmd("KICK", params);
}
void CoreUserInputHandler::handleKill(const BufferInfo &bufferInfo, const QString &msg)
{
Q_UNUSED(bufferInfo)
QString nick = msg.section(' ', 0, 0, QString::SectionSkipEmpty);
QString pass = msg.section(' ', 1, -1, QString::SectionSkipEmpty);
QList<QByteArray> params;
params << serverEncode(nick) << serverEncode(pass);
emit putCmd("KILL", params);
}
void CoreUserInputHandler::handleList(const BufferInfo &bufferInfo, const QString &msg)
{
Q_UNUSED(bufferInfo)
emit putCmd("LIST", serverEncode(msg.split(' ', QString::SkipEmptyParts)));
}
void CoreUserInputHandler::handleMe(const BufferInfo &bufferInfo, const QString &msg)
{
if (bufferInfo.bufferName().isEmpty() || !bufferInfo.acceptsRegularMessages())
return; // server buffer
// FIXME make this a proper event
coreNetwork()->coreSession()->ctcpParser()->query(coreNetwork(), bufferInfo.bufferName(), "ACTION", msg);
emit displayMsg(Message::Action, bufferInfo.type(), bufferInfo.bufferName(), msg, network()->myNick(), Message::Self);
}
void CoreUserInputHandler::handleMode(const BufferInfo &bufferInfo, const QString &msg)
{
Q_UNUSED(bufferInfo)
QStringList params = msg.split(' ', QString::SkipEmptyParts);
// if the first argument is neither a channel nor us (user modes are only to oneself) the current buffer is assumed to be the target
if (!params.isEmpty()) {
if (!network()->isChannelName(params[0]) && !network()->isMyNick(params[0]))
params.prepend(bufferInfo.bufferName());
if (network()->isMyNick(params[0]) && params.count() == 2)
network()->updateIssuedModes(params[1]);
if (params[0] == "-reset" && params.count() == 1) {
// FIXME: give feedback to the user (I don't want to add new strings right now)
network()->resetPersistentModes();
return;
}
}
// TODO handle correct encoding for buffer modes (channelEncode())
emit putCmd("MODE", serverEncode(params));
}
// TODO: show privmsgs
void CoreUserInputHandler::handleMsg(const BufferInfo &bufferInfo, const QString &msg)
{
Q_UNUSED(bufferInfo);
if (!msg.contains(' '))
return;
QString target = msg.section(' ', 0, 0);
QString msgSection = msg.section(' ', 1);
std::function<QByteArray(const QString &, const QString &)> encodeFunc = [this] (const QString &target, const QString &message) -> QByteArray {
return userEncode(target, message);
};
#ifdef HAVE_QCA2
putPrivmsg(target, msgSection, encodeFunc, network()->cipher(target));
#else
putPrivmsg(target, msgSection, encodeFunc);
#endif
}
void CoreUserInputHandler::handleNick(const BufferInfo &bufferInfo, const QString &msg)
{
Q_UNUSED(bufferInfo)
QString nick = msg.section(' ', 0, 0);
emit putCmd("NICK", serverEncode(nick));
}
void CoreUserInputHandler::handleNotice(const BufferInfo &bufferInfo, const QString &msg)
{
QString bufferName = msg.section(' ', 0, 0);
QString payload = msg.section(' ', 1);
QList<QByteArray> params;
params << serverEncode(bufferName) << channelEncode(bufferInfo.bufferName(), payload);
emit putCmd("NOTICE", params);
emit displayMsg(Message::Notice, typeByTarget(bufferName), bufferName, payload, network()->myNick(), Message::Self);
}
void CoreUserInputHandler::handleOper(const BufferInfo &bufferInfo, const QString &msg)
{
Q_UNUSED(bufferInfo)
emit putRawLine(serverEncode(QString("OPER %1").arg(msg)));
}
void CoreUserInputHandler::handlePart(const BufferInfo &bufferInfo, const QString &msg)
{
QList<QByteArray> params;
QString partReason;
// msg might contain either a channel name and/or a reaon, so we have to check if the first word is a known channel
QString channelName = msg.section(' ', 0, 0);
if (channelName.isEmpty() || !network()->ircChannel(channelName)) {
channelName = bufferInfo.bufferName();
partReason = msg;
}
else {
partReason = msg.mid(channelName.length() + 1);
}
if (partReason.isEmpty())
partReason = network()->identityPtr()->partReason();
params << serverEncode(channelName) << channelEncode(bufferInfo.bufferName(), partReason);
emit putCmd("PART", params);
}
void CoreUserInputHandler::handlePing(const BufferInfo &bufferInfo, const QString &msg)
{
Q_UNUSED(bufferInfo)
QString param = msg;
if (param.isEmpty())
param = QTime::currentTime().toString("hh:mm:ss.zzz");
putCmd("PING", serverEncode(param));
}
void CoreUserInputHandler::handlePrint(const BufferInfo &bufferInfo, const QString &msg)
{
if (bufferInfo.bufferName().isEmpty() || !bufferInfo.acceptsRegularMessages())
return; // server buffer
QByteArray encMsg = channelEncode(bufferInfo.bufferName(), msg);
emit displayMsg(Message::Info, bufferInfo.type(), bufferInfo.bufferName(), msg, network()->myNick(), Message::Self);
}
// TODO: implement queries
void CoreUserInputHandler::handleQuery(const BufferInfo &bufferInfo, const QString &msg)
{
Q_UNUSED(bufferInfo)
QString target = msg.section(' ', 0, 0);
QString message = msg.section(' ', 1);
if (message.isEmpty())
emit displayMsg(Message::Server, BufferInfo::QueryBuffer, target, tr("Starting query with %1").arg(target), network()->myNick(), Message::Self);
else
emit displayMsg(Message::Plain, BufferInfo::QueryBuffer, target, message, network()->myNick(), Message::Self);
handleMsg(bufferInfo, msg);
}
void CoreUserInputHandler::handleQuit(const BufferInfo &bufferInfo, const QString &msg)
{
Q_UNUSED(bufferInfo)
network()->disconnectFromIrc(true, msg);
}
void CoreUserInputHandler::issueQuit(const QString &reason)
{
emit putCmd("QUIT", serverEncode(reason));
}
void CoreUserInputHandler::handleQuote(const BufferInfo &bufferInfo, const QString &msg)
{
Q_UNUSED(bufferInfo)
emit putRawLine(serverEncode(msg));
}
void CoreUserInputHandler::handleSay(const BufferInfo &bufferInfo, const QString &msg)
{
if (bufferInfo.bufferName().isEmpty() || !bufferInfo.acceptsRegularMessages())
return; // server buffer
std::function<QByteArray(const QString &, const QString &)> encodeFunc = [this] (const QString &target, const QString &message) -> QByteArray {
return channelEncode(target, message);
};
#ifdef HAVE_QCA2
putPrivmsg(bufferInfo.bufferName(), msg, encodeFunc, network()->cipher(bufferInfo.bufferName()));
#else
putPrivmsg(bufferInfo.bufferName(), msg, encodeFunc);
#endif
emit displayMsg(Message::Plain, bufferInfo.type(), bufferInfo.bufferName(), msg, network()->myNick(), Message::Self);
}
void CoreUserInputHandler::handleSetkey(const BufferInfo &bufferInfo, const QString &msg)
{
QString bufname = bufferInfo.bufferName().isNull() ? "" : bufferInfo.bufferName();
#ifdef HAVE_QCA2
if (!bufferInfo.isValid())
return;
if (!Cipher::neededFeaturesAvailable()) {
emit displayMsg(Message::Error, typeByTarget(bufname), bufname, tr("Error: QCA provider plugin not found. It is usually provided by the qca-ossl plugin."));
return;
}
QStringList parms = msg.split(' ', QString::SkipEmptyParts);
if (parms.count() == 1 && !bufferInfo.bufferName().isEmpty() && bufferInfo.acceptsRegularMessages())
parms.prepend(bufferInfo.bufferName());
else if (parms.count() != 2) {
emit displayMsg(Message::Info, typeByTarget(bufname), bufname,
tr("[usage] /setkey <nick|channel> <key> sets the encryption key for nick or channel. "
"/setkey <key> when in a channel or query buffer sets the key for it."));
return;
}
QString target = parms.at(0);
QByteArray key = parms.at(1).toLocal8Bit();
network()->setCipherKey(target, key);
emit displayMsg(Message::Info, typeByTarget(bufname), bufname, tr("The key for %1 has been set.").arg(target));
#else
Q_UNUSED(msg)
emit displayMsg(Message::Error, typeByTarget(bufname), bufname, tr("Error: Setting an encryption key requires Quassel to have been built "
"with support for the Qt Cryptographic Architecture (QCA) library. "
"Contact your distributor about a Quassel package with QCA "
"support, or rebuild Quassel with QCA present."));
#endif
}
void CoreUserInputHandler::handleShowkey(const BufferInfo &bufferInfo, const QString &msg)
{
QString bufname = bufferInfo.bufferName().isNull() ? "" : bufferInfo.bufferName();
#ifdef HAVE_QCA2
if (!bufferInfo.isValid())
return;
if (!Cipher::neededFeaturesAvailable()) {
emit displayMsg(Message::Error, typeByTarget(bufname), bufname, tr("Error: QCA provider plugin not found. It is usually provided by the qca-ossl plugin."));
return;
}
QStringList parms = msg.split(' ', QString::SkipEmptyParts);
if (parms.isEmpty() && !bufferInfo.bufferName().isEmpty() && bufferInfo.acceptsRegularMessages())
parms.prepend(bufferInfo.bufferName());
if (parms.isEmpty()) {
emit displayMsg(Message::Info, typeByTarget(bufname), bufname, tr("[usage] /showkey <nick|channel> shows the encryption key for nick or channel or just /showkey when in a channel or query."));
return;
}
QString target = parms.at(0);
QByteArray key = network()->cipherKey(target);
if (key.isEmpty()) {
emit displayMsg(Message::Info, typeByTarget(bufname), bufname, tr("No key has been set for %1.").arg(target));
return;
}
emit displayMsg(Message::Info, typeByTarget(bufname), bufname, tr("The key for %1 is %2:%3").arg(target, network()->cipherUsesCBC(target) ? "CBC" : "ECB", QString(key)));
#else
Q_UNUSED(msg)
emit displayMsg(Message::Error, typeByTarget(bufname), bufname, tr("Error: Setting an encryption key requires Quassel to have been built "
"with support for the Qt Cryptographic Architecture (QCA2) library. "
"Contact your distributor about a Quassel package with QCA2 "
"support, or rebuild Quassel with QCA2 present."));
#endif
}
void CoreUserInputHandler::handleTopic(const BufferInfo &bufferInfo, const QString &msg)
{
if (bufferInfo.bufferName().isEmpty() || !bufferInfo.acceptsRegularMessages())
return;
QList<QByteArray> params;
params << serverEncode(bufferInfo.bufferName());
if (!msg.isEmpty()) {
# ifdef HAVE_QCA2
params << encrypt(bufferInfo.bufferName(), channelEncode(bufferInfo.bufferName(), msg));
# else
params << channelEncode(bufferInfo.bufferName(), msg);
# endif
}
emit putCmd("TOPIC", params);
}
void CoreUserInputHandler::handleVoice(const BufferInfo &bufferInfo, const QString &msg)
{
QStringList nicks = msg.split(' ', QString::SkipEmptyParts);
QString m = "+"; for (int i = 0; i < nicks.count(); i++) m += 'v';
QStringList params;
params << bufferInfo.bufferName() << m << nicks;
emit putCmd("MODE", serverEncode(params));
}
void CoreUserInputHandler::handleWait(const BufferInfo &bufferInfo, const QString &msg)
{
int splitPos = msg.indexOf(';');
if (splitPos <= 0)
return;
bool ok;
int delay = msg.left(splitPos).trimmed().toInt(&ok);
if (!ok)
return;
delay *= 1000;
QString command = msg.mid(splitPos + 1).trimmed();
if (command.isEmpty())
return;
_delayedCommands[startTimer(delay)] = Command(bufferInfo, command);
}
void CoreUserInputHandler::handleWho(const BufferInfo &bufferInfo, const QString &msg)
{
Q_UNUSED(bufferInfo)
emit putCmd("WHO", serverEncode(msg.split(' ')));
}
void CoreUserInputHandler::handleWhois(const BufferInfo &bufferInfo, const QString &msg)
{
Q_UNUSED(bufferInfo)
emit putCmd("WHOIS", serverEncode(msg.split(' ')));
}
void CoreUserInputHandler::handleWhowas(const BufferInfo &bufferInfo, const QString &msg)
{
Q_UNUSED(bufferInfo)
emit putCmd("WHOWAS", serverEncode(msg.split(' ')));
}
void CoreUserInputHandler::defaultHandler(QString cmd, const BufferInfo &bufferInfo, const QString &msg)
{
Q_UNUSED(bufferInfo);
emit putCmd(serverEncode(cmd.toUpper()), serverEncode(msg.split(" ")));
}
void CoreUserInputHandler::putPrivmsg(const QString &target, const QString &message, std::function<QByteArray(const QString &, const QString &)> encodeFunc, Cipher *cipher)
{
Q_UNUSED(cipher);
QString cmd("PRIVMSG");
QByteArray targetEnc = serverEncode(target);
std::function<QList<QByteArray>(QString &)> cmdGenerator = [&] (QString &splitMsg) -> QList<QByteArray> {
QByteArray splitMsgEnc = encodeFunc(target, splitMsg);
#ifdef HAVE_QCA2
if (cipher && !cipher->key().isEmpty() && !splitMsg.isEmpty()) {
cipher->encrypt(splitMsgEnc);
}
#endif
return QList<QByteArray>() << targetEnc << splitMsgEnc;
};
putCmd(cmd, network()->splitMessage(cmd, message, cmdGenerator));
}
// returns 0 if the message will not be chopped by the irc server or number of chopped bytes if message is too long
int CoreUserInputHandler::lastParamOverrun(const QString &cmd, const QList<QByteArray> ¶ms)
{
// the server will pass our message truncated to 512 bytes including CRLF with the following format:
// ":prefix COMMAND param0 param1 :lastparam"
// where prefix = "nickname!user@host"
// that means that the last message can be as long as:
// 512 - nicklen - userlen - hostlen - commandlen - sum(param[0]..param[n-1])) - 2 (for CRLF) - 4 (":!@" + 1space between prefix and command) - max(paramcount - 1, 0) (space for simple params) - 2 (space and colon for last param)
IrcUser *me = network()->me();
int maxLen = 480 - cmd.toLatin1().count(); // educated guess in case we don't know us (yet?)
if (me)
maxLen = 512 - serverEncode(me->nick()).count() - serverEncode(me->user()).count() - serverEncode(me->host()).count() - cmd.toLatin1().count() - 6;
if (!params.isEmpty()) {
for (int i = 0; i < params.count() - 1; i++) {
maxLen -= (params[i].count() + 1);
}
maxLen -= 2; // " :" last param separator;
if (params.last().count() > maxLen) {
return params.last().count() - maxLen;
}
else {
return 0;
}
}
else {
return 0;
}
}
#ifdef HAVE_QCA2
QByteArray CoreUserInputHandler::encrypt(const QString &target, const QByteArray &message_, bool *didEncrypt) const
{
if (didEncrypt)
*didEncrypt = false;
if (message_.isEmpty())
return message_;
if (!Cipher::neededFeaturesAvailable())
return message_;
Cipher *cipher = network()->cipher(target);
if (!cipher || cipher->key().isEmpty())
return message_;
QByteArray message = message_;
bool result = cipher->encrypt(message);
if (didEncrypt)
*didEncrypt = result;
return message;
}
#endif
void CoreUserInputHandler::timerEvent(QTimerEvent *event)
{
if (!_delayedCommands.contains(event->timerId())) {
QObject::timerEvent(event);
return;
}
BufferInfo bufferInfo = _delayedCommands[event->timerId()].bufferInfo;
QString rawCommand = _delayedCommands[event->timerId()].command;
_delayedCommands.remove(event->timerId());
event->accept();
// the stored command might be the result of an alias expansion, so we need to split it up again
QStringList commands = rawCommand.split(QRegExp("; ?"));
foreach(QString command, commands) {
handleUserInput(bufferInfo, command);
}
}
| ./CrossVul/dataset_final_sorted/CWE-17/cpp/good_1789_0 |
crossvul-cpp_data_bad_2415_2 | /*
* symlink.c
*
* PURPOSE
* Symlink handling routines for the OSTA-UDF(tm) filesystem.
*
* COPYRIGHT
* This file is distributed under the terms of the GNU General Public
* License (GPL). Copies of the GPL can be obtained from:
* ftp://prep.ai.mit.edu/pub/gnu/GPL
* Each contributing author retains all rights to their own work.
*
* (C) 1998-2001 Ben Fennema
* (C) 1999 Stelias Computing Inc
*
* HISTORY
*
* 04/16/99 blf Created.
*
*/
#include "udfdecl.h"
#include <linux/uaccess.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/time.h>
#include <linux/mm.h>
#include <linux/stat.h>
#include <linux/pagemap.h>
#include <linux/buffer_head.h>
#include "udf_i.h"
static void udf_pc_to_char(struct super_block *sb, unsigned char *from,
int fromlen, unsigned char *to)
{
struct pathComponent *pc;
int elen = 0;
unsigned char *p = to;
while (elen < fromlen) {
pc = (struct pathComponent *)(from + elen);
switch (pc->componentType) {
case 1:
/*
* Symlink points to some place which should be agreed
* upon between originator and receiver of the media. Ignore.
*/
if (pc->lengthComponentIdent > 0)
break;
/* Fall through */
case 2:
p = to;
*p++ = '/';
break;
case 3:
memcpy(p, "../", 3);
p += 3;
break;
case 4:
memcpy(p, "./", 2);
p += 2;
/* that would be . - just ignore */
break;
case 5:
p += udf_get_filename(sb, pc->componentIdent, p,
pc->lengthComponentIdent);
*p++ = '/';
break;
}
elen += sizeof(struct pathComponent) + pc->lengthComponentIdent;
}
if (p > to + 1)
p[-1] = '\0';
else
p[0] = '\0';
}
static int udf_symlink_filler(struct file *file, struct page *page)
{
struct inode *inode = page->mapping->host;
struct buffer_head *bh = NULL;
unsigned char *symlink;
int err;
unsigned char *p = kmap(page);
struct udf_inode_info *iinfo;
uint32_t pos;
/* We don't support symlinks longer than one block */
if (inode->i_size > inode->i_sb->s_blocksize) {
err = -ENAMETOOLONG;
goto out_unmap;
}
iinfo = UDF_I(inode);
pos = udf_block_map(inode, 0);
down_read(&iinfo->i_data_sem);
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
symlink = iinfo->i_ext.i_data + iinfo->i_lenEAttr;
} else {
bh = sb_bread(inode->i_sb, pos);
if (!bh) {
err = -EIO;
goto out_unlock_inode;
}
symlink = bh->b_data;
}
udf_pc_to_char(inode->i_sb, symlink, inode->i_size, p);
brelse(bh);
up_read(&iinfo->i_data_sem);
SetPageUptodate(page);
kunmap(page);
unlock_page(page);
return 0;
out_unlock_inode:
up_read(&iinfo->i_data_sem);
SetPageError(page);
out_unmap:
kunmap(page);
unlock_page(page);
return err;
}
/*
* symlinks can't do much...
*/
const struct address_space_operations udf_symlink_aops = {
.readpage = udf_symlink_filler,
};
| ./CrossVul/dataset_final_sorted/CWE-17/c/bad_2415_2 |
crossvul-cpp_data_good_1454_0 | /*
* Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
* Written by Alex Tomas <alex@clusterfs.com>
*
* Architecture independence:
* Copyright (c) 2005, Bull S.A.
* Written by Pierre Peiffer <pierre.peiffer@bull.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public Licens
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
*/
/*
* Extents support for EXT4
*
* TODO:
* - ext4*_error() should be used in some situations
* - analyze all BUG()/BUG_ON(), use -EIO where appropriate
* - smart tree reduction
*/
#include <linux/fs.h>
#include <linux/time.h>
#include <linux/jbd2.h>
#include <linux/highuid.h>
#include <linux/pagemap.h>
#include <linux/quotaops.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <asm/uaccess.h>
#include <linux/fiemap.h>
#include "ext4_jbd2.h"
#include "ext4_extents.h"
#include "xattr.h"
#include <trace/events/ext4.h>
/*
* used by extent splitting.
*/
#define EXT4_EXT_MAY_ZEROOUT 0x1 /* safe to zeroout if split fails \
due to ENOSPC */
#define EXT4_EXT_MARK_UNWRIT1 0x2 /* mark first half unwritten */
#define EXT4_EXT_MARK_UNWRIT2 0x4 /* mark second half unwritten */
#define EXT4_EXT_DATA_VALID1 0x8 /* first half contains valid data */
#define EXT4_EXT_DATA_VALID2 0x10 /* second half contains valid data */
static __le32 ext4_extent_block_csum(struct inode *inode,
struct ext4_extent_header *eh)
{
struct ext4_inode_info *ei = EXT4_I(inode);
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
__u32 csum;
csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)eh,
EXT4_EXTENT_TAIL_OFFSET(eh));
return cpu_to_le32(csum);
}
static int ext4_extent_block_csum_verify(struct inode *inode,
struct ext4_extent_header *eh)
{
struct ext4_extent_tail *et;
if (!ext4_has_metadata_csum(inode->i_sb))
return 1;
et = find_ext4_extent_tail(eh);
if (et->et_checksum != ext4_extent_block_csum(inode, eh))
return 0;
return 1;
}
static void ext4_extent_block_csum_set(struct inode *inode,
struct ext4_extent_header *eh)
{
struct ext4_extent_tail *et;
if (!ext4_has_metadata_csum(inode->i_sb))
return;
et = find_ext4_extent_tail(eh);
et->et_checksum = ext4_extent_block_csum(inode, eh);
}
static int ext4_split_extent(handle_t *handle,
struct inode *inode,
struct ext4_ext_path **ppath,
struct ext4_map_blocks *map,
int split_flag,
int flags);
static int ext4_split_extent_at(handle_t *handle,
struct inode *inode,
struct ext4_ext_path **ppath,
ext4_lblk_t split,
int split_flag,
int flags);
static int ext4_find_delayed_extent(struct inode *inode,
struct extent_status *newes);
static int ext4_ext_truncate_extend_restart(handle_t *handle,
struct inode *inode,
int needed)
{
int err;
if (!ext4_handle_valid(handle))
return 0;
if (handle->h_buffer_credits > needed)
return 0;
err = ext4_journal_extend(handle, needed);
if (err <= 0)
return err;
err = ext4_truncate_restart_trans(handle, inode, needed);
if (err == 0)
err = -EAGAIN;
return err;
}
/*
* could return:
* - EROFS
* - ENOMEM
*/
static int ext4_ext_get_access(handle_t *handle, struct inode *inode,
struct ext4_ext_path *path)
{
if (path->p_bh) {
/* path points to block */
BUFFER_TRACE(path->p_bh, "get_write_access");
return ext4_journal_get_write_access(handle, path->p_bh);
}
/* path points to leaf/index in inode body */
/* we use in-core data, no need to protect them */
return 0;
}
/*
* could return:
* - EROFS
* - ENOMEM
* - EIO
*/
int __ext4_ext_dirty(const char *where, unsigned int line, handle_t *handle,
struct inode *inode, struct ext4_ext_path *path)
{
int err;
WARN_ON(!rwsem_is_locked(&EXT4_I(inode)->i_data_sem));
if (path->p_bh) {
ext4_extent_block_csum_set(inode, ext_block_hdr(path->p_bh));
/* path points to block */
err = __ext4_handle_dirty_metadata(where, line, handle,
inode, path->p_bh);
} else {
/* path points to leaf/index in inode body */
err = ext4_mark_inode_dirty(handle, inode);
}
return err;
}
static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
struct ext4_ext_path *path,
ext4_lblk_t block)
{
if (path) {
int depth = path->p_depth;
struct ext4_extent *ex;
/*
* Try to predict block placement assuming that we are
* filling in a file which will eventually be
* non-sparse --- i.e., in the case of libbfd writing
* an ELF object sections out-of-order but in a way
* the eventually results in a contiguous object or
* executable file, or some database extending a table
* space file. However, this is actually somewhat
* non-ideal if we are writing a sparse file such as
* qemu or KVM writing a raw image file that is going
* to stay fairly sparse, since it will end up
* fragmenting the file system's free space. Maybe we
* should have some hueristics or some way to allow
* userspace to pass a hint to file system,
* especially if the latter case turns out to be
* common.
*/
ex = path[depth].p_ext;
if (ex) {
ext4_fsblk_t ext_pblk = ext4_ext_pblock(ex);
ext4_lblk_t ext_block = le32_to_cpu(ex->ee_block);
if (block > ext_block)
return ext_pblk + (block - ext_block);
else
return ext_pblk - (ext_block - block);
}
/* it looks like index is empty;
* try to find starting block from index itself */
if (path[depth].p_bh)
return path[depth].p_bh->b_blocknr;
}
/* OK. use inode's group */
return ext4_inode_to_goal_block(inode);
}
/*
* Allocation for a meta data block
*/
static ext4_fsblk_t
ext4_ext_new_meta_block(handle_t *handle, struct inode *inode,
struct ext4_ext_path *path,
struct ext4_extent *ex, int *err, unsigned int flags)
{
ext4_fsblk_t goal, newblock;
goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block));
newblock = ext4_new_meta_blocks(handle, inode, goal, flags,
NULL, err);
return newblock;
}
static inline int ext4_ext_space_block(struct inode *inode, int check)
{
int size;
size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
/ sizeof(struct ext4_extent);
#ifdef AGGRESSIVE_TEST
if (!check && size > 6)
size = 6;
#endif
return size;
}
static inline int ext4_ext_space_block_idx(struct inode *inode, int check)
{
int size;
size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
/ sizeof(struct ext4_extent_idx);
#ifdef AGGRESSIVE_TEST
if (!check && size > 5)
size = 5;
#endif
return size;
}
static inline int ext4_ext_space_root(struct inode *inode, int check)
{
int size;
size = sizeof(EXT4_I(inode)->i_data);
size -= sizeof(struct ext4_extent_header);
size /= sizeof(struct ext4_extent);
#ifdef AGGRESSIVE_TEST
if (!check && size > 3)
size = 3;
#endif
return size;
}
static inline int ext4_ext_space_root_idx(struct inode *inode, int check)
{
int size;
size = sizeof(EXT4_I(inode)->i_data);
size -= sizeof(struct ext4_extent_header);
size /= sizeof(struct ext4_extent_idx);
#ifdef AGGRESSIVE_TEST
if (!check && size > 4)
size = 4;
#endif
return size;
}
static inline int
ext4_force_split_extent_at(handle_t *handle, struct inode *inode,
struct ext4_ext_path **ppath, ext4_lblk_t lblk,
int nofail)
{
struct ext4_ext_path *path = *ppath;
int unwritten = ext4_ext_is_unwritten(path[path->p_depth].p_ext);
return ext4_split_extent_at(handle, inode, ppath, lblk, unwritten ?
EXT4_EXT_MARK_UNWRIT1|EXT4_EXT_MARK_UNWRIT2 : 0,
EXT4_EX_NOCACHE | EXT4_GET_BLOCKS_PRE_IO |
(nofail ? EXT4_GET_BLOCKS_METADATA_NOFAIL:0));
}
/*
* Calculate the number of metadata blocks needed
* to allocate @blocks
* Worse case is one block per extent
*/
int ext4_ext_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock)
{
struct ext4_inode_info *ei = EXT4_I(inode);
int idxs;
idxs = ((inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
/ sizeof(struct ext4_extent_idx));
/*
* If the new delayed allocation block is contiguous with the
* previous da block, it can share index blocks with the
* previous block, so we only need to allocate a new index
* block every idxs leaf blocks. At ldxs**2 blocks, we need
* an additional index block, and at ldxs**3 blocks, yet
* another index blocks.
*/
if (ei->i_da_metadata_calc_len &&
ei->i_da_metadata_calc_last_lblock+1 == lblock) {
int num = 0;
if ((ei->i_da_metadata_calc_len % idxs) == 0)
num++;
if ((ei->i_da_metadata_calc_len % (idxs*idxs)) == 0)
num++;
if ((ei->i_da_metadata_calc_len % (idxs*idxs*idxs)) == 0) {
num++;
ei->i_da_metadata_calc_len = 0;
} else
ei->i_da_metadata_calc_len++;
ei->i_da_metadata_calc_last_lblock++;
return num;
}
/*
* In the worst case we need a new set of index blocks at
* every level of the inode's extent tree.
*/
ei->i_da_metadata_calc_len = 1;
ei->i_da_metadata_calc_last_lblock = lblock;
return ext_depth(inode) + 1;
}
static int
ext4_ext_max_entries(struct inode *inode, int depth)
{
int max;
if (depth == ext_depth(inode)) {
if (depth == 0)
max = ext4_ext_space_root(inode, 1);
else
max = ext4_ext_space_root_idx(inode, 1);
} else {
if (depth == 0)
max = ext4_ext_space_block(inode, 1);
else
max = ext4_ext_space_block_idx(inode, 1);
}
return max;
}
static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
{
ext4_fsblk_t block = ext4_ext_pblock(ext);
int len = ext4_ext_get_actual_len(ext);
ext4_lblk_t lblock = le32_to_cpu(ext->ee_block);
ext4_lblk_t last = lblock + len - 1;
if (lblock > last)
return 0;
return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len);
}
static int ext4_valid_extent_idx(struct inode *inode,
struct ext4_extent_idx *ext_idx)
{
ext4_fsblk_t block = ext4_idx_pblock(ext_idx);
return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, 1);
}
static int ext4_valid_extent_entries(struct inode *inode,
struct ext4_extent_header *eh,
int depth)
{
unsigned short entries;
if (eh->eh_entries == 0)
return 1;
entries = le16_to_cpu(eh->eh_entries);
if (depth == 0) {
/* leaf entries */
struct ext4_extent *ext = EXT_FIRST_EXTENT(eh);
struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
ext4_fsblk_t pblock = 0;
ext4_lblk_t lblock = 0;
ext4_lblk_t prev = 0;
int len = 0;
while (entries) {
if (!ext4_valid_extent(inode, ext))
return 0;
/* Check for overlapping extents */
lblock = le32_to_cpu(ext->ee_block);
len = ext4_ext_get_actual_len(ext);
if ((lblock <= prev) && prev) {
pblock = ext4_ext_pblock(ext);
es->s_last_error_block = cpu_to_le64(pblock);
return 0;
}
ext++;
entries--;
prev = lblock + len - 1;
}
} else {
struct ext4_extent_idx *ext_idx = EXT_FIRST_INDEX(eh);
while (entries) {
if (!ext4_valid_extent_idx(inode, ext_idx))
return 0;
ext_idx++;
entries--;
}
}
return 1;
}
static int __ext4_ext_check(const char *function, unsigned int line,
struct inode *inode, struct ext4_extent_header *eh,
int depth, ext4_fsblk_t pblk)
{
const char *error_msg;
int max = 0;
if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) {
error_msg = "invalid magic";
goto corrupted;
}
if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) {
error_msg = "unexpected eh_depth";
goto corrupted;
}
if (unlikely(eh->eh_max == 0)) {
error_msg = "invalid eh_max";
goto corrupted;
}
max = ext4_ext_max_entries(inode, depth);
if (unlikely(le16_to_cpu(eh->eh_max) > max)) {
error_msg = "too large eh_max";
goto corrupted;
}
if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) {
error_msg = "invalid eh_entries";
goto corrupted;
}
if (!ext4_valid_extent_entries(inode, eh, depth)) {
error_msg = "invalid extent entries";
goto corrupted;
}
/* Verify checksum on non-root extent tree nodes */
if (ext_depth(inode) != depth &&
!ext4_extent_block_csum_verify(inode, eh)) {
error_msg = "extent tree corrupted";
goto corrupted;
}
return 0;
corrupted:
ext4_error_inode(inode, function, line, 0,
"pblk %llu bad header/extent: %s - magic %x, "
"entries %u, max %u(%u), depth %u(%u)",
(unsigned long long) pblk, error_msg,
le16_to_cpu(eh->eh_magic),
le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max),
max, le16_to_cpu(eh->eh_depth), depth);
return -EIO;
}
#define ext4_ext_check(inode, eh, depth, pblk) \
__ext4_ext_check(__func__, __LINE__, (inode), (eh), (depth), (pblk))
int ext4_ext_check_inode(struct inode *inode)
{
return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode), 0);
}
static struct buffer_head *
__read_extent_tree_block(const char *function, unsigned int line,
struct inode *inode, ext4_fsblk_t pblk, int depth,
int flags)
{
struct buffer_head *bh;
int err;
bh = sb_getblk(inode->i_sb, pblk);
if (unlikely(!bh))
return ERR_PTR(-ENOMEM);
if (!bh_uptodate_or_lock(bh)) {
trace_ext4_ext_load_extent(inode, pblk, _RET_IP_);
err = bh_submit_read(bh);
if (err < 0)
goto errout;
}
if (buffer_verified(bh) && !(flags & EXT4_EX_FORCE_CACHE))
return bh;
err = __ext4_ext_check(function, line, inode,
ext_block_hdr(bh), depth, pblk);
if (err)
goto errout;
set_buffer_verified(bh);
/*
* If this is a leaf block, cache all of its entries
*/
if (!(flags & EXT4_EX_NOCACHE) && depth == 0) {
struct ext4_extent_header *eh = ext_block_hdr(bh);
struct ext4_extent *ex = EXT_FIRST_EXTENT(eh);
ext4_lblk_t prev = 0;
int i;
for (i = le16_to_cpu(eh->eh_entries); i > 0; i--, ex++) {
unsigned int status = EXTENT_STATUS_WRITTEN;
ext4_lblk_t lblk = le32_to_cpu(ex->ee_block);
int len = ext4_ext_get_actual_len(ex);
if (prev && (prev != lblk))
ext4_es_cache_extent(inode, prev,
lblk - prev, ~0,
EXTENT_STATUS_HOLE);
if (ext4_ext_is_unwritten(ex))
status = EXTENT_STATUS_UNWRITTEN;
ext4_es_cache_extent(inode, lblk, len,
ext4_ext_pblock(ex), status);
prev = lblk + len;
}
}
return bh;
errout:
put_bh(bh);
return ERR_PTR(err);
}
#define read_extent_tree_block(inode, pblk, depth, flags) \
__read_extent_tree_block(__func__, __LINE__, (inode), (pblk), \
(depth), (flags))
/*
* This function is called to cache a file's extent information in the
* extent status tree
*/
int ext4_ext_precache(struct inode *inode)
{
struct ext4_inode_info *ei = EXT4_I(inode);
struct ext4_ext_path *path = NULL;
struct buffer_head *bh;
int i = 0, depth, ret = 0;
if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
return 0; /* not an extent-mapped inode */
down_read(&ei->i_data_sem);
depth = ext_depth(inode);
path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1),
GFP_NOFS);
if (path == NULL) {
up_read(&ei->i_data_sem);
return -ENOMEM;
}
/* Don't cache anything if there are no external extent blocks */
if (depth == 0)
goto out;
path[0].p_hdr = ext_inode_hdr(inode);
ret = ext4_ext_check(inode, path[0].p_hdr, depth, 0);
if (ret)
goto out;
path[0].p_idx = EXT_FIRST_INDEX(path[0].p_hdr);
while (i >= 0) {
/*
* If this is a leaf block or we've reached the end of
* the index block, go up
*/
if ((i == depth) ||
path[i].p_idx > EXT_LAST_INDEX(path[i].p_hdr)) {
brelse(path[i].p_bh);
path[i].p_bh = NULL;
i--;
continue;
}
bh = read_extent_tree_block(inode,
ext4_idx_pblock(path[i].p_idx++),
depth - i - 1,
EXT4_EX_FORCE_CACHE);
if (IS_ERR(bh)) {
ret = PTR_ERR(bh);
break;
}
i++;
path[i].p_bh = bh;
path[i].p_hdr = ext_block_hdr(bh);
path[i].p_idx = EXT_FIRST_INDEX(path[i].p_hdr);
}
ext4_set_inode_state(inode, EXT4_STATE_EXT_PRECACHED);
out:
up_read(&ei->i_data_sem);
ext4_ext_drop_refs(path);
kfree(path);
return ret;
}
#ifdef EXT_DEBUG
static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
{
int k, l = path->p_depth;
ext_debug("path:");
for (k = 0; k <= l; k++, path++) {
if (path->p_idx) {
ext_debug(" %d->%llu", le32_to_cpu(path->p_idx->ei_block),
ext4_idx_pblock(path->p_idx));
} else if (path->p_ext) {
ext_debug(" %d:[%d]%d:%llu ",
le32_to_cpu(path->p_ext->ee_block),
ext4_ext_is_unwritten(path->p_ext),
ext4_ext_get_actual_len(path->p_ext),
ext4_ext_pblock(path->p_ext));
} else
ext_debug(" []");
}
ext_debug("\n");
}
static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path)
{
int depth = ext_depth(inode);
struct ext4_extent_header *eh;
struct ext4_extent *ex;
int i;
if (!path)
return;
eh = path[depth].p_hdr;
ex = EXT_FIRST_EXTENT(eh);
ext_debug("Displaying leaf extents for inode %lu\n", inode->i_ino);
for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) {
ext_debug("%d:[%d]%d:%llu ", le32_to_cpu(ex->ee_block),
ext4_ext_is_unwritten(ex),
ext4_ext_get_actual_len(ex), ext4_ext_pblock(ex));
}
ext_debug("\n");
}
static void ext4_ext_show_move(struct inode *inode, struct ext4_ext_path *path,
ext4_fsblk_t newblock, int level)
{
int depth = ext_depth(inode);
struct ext4_extent *ex;
if (depth != level) {
struct ext4_extent_idx *idx;
idx = path[level].p_idx;
while (idx <= EXT_MAX_INDEX(path[level].p_hdr)) {
ext_debug("%d: move %d:%llu in new index %llu\n", level,
le32_to_cpu(idx->ei_block),
ext4_idx_pblock(idx),
newblock);
idx++;
}
return;
}
ex = path[depth].p_ext;
while (ex <= EXT_MAX_EXTENT(path[depth].p_hdr)) {
ext_debug("move %d:%llu:[%d]%d in new leaf %llu\n",
le32_to_cpu(ex->ee_block),
ext4_ext_pblock(ex),
ext4_ext_is_unwritten(ex),
ext4_ext_get_actual_len(ex),
newblock);
ex++;
}
}
#else
#define ext4_ext_show_path(inode, path)
#define ext4_ext_show_leaf(inode, path)
#define ext4_ext_show_move(inode, path, newblock, level)
#endif
void ext4_ext_drop_refs(struct ext4_ext_path *path)
{
int depth, i;
if (!path)
return;
depth = path->p_depth;
for (i = 0; i <= depth; i++, path++)
if (path->p_bh) {
brelse(path->p_bh);
path->p_bh = NULL;
}
}
/*
* ext4_ext_binsearch_idx:
* binary search for the closest index of the given block
* the header must be checked before calling this
*/
static void
ext4_ext_binsearch_idx(struct inode *inode,
struct ext4_ext_path *path, ext4_lblk_t block)
{
struct ext4_extent_header *eh = path->p_hdr;
struct ext4_extent_idx *r, *l, *m;
ext_debug("binsearch for %u(idx): ", block);
l = EXT_FIRST_INDEX(eh) + 1;
r = EXT_LAST_INDEX(eh);
while (l <= r) {
m = l + (r - l) / 2;
if (block < le32_to_cpu(m->ei_block))
r = m - 1;
else
l = m + 1;
ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ei_block),
m, le32_to_cpu(m->ei_block),
r, le32_to_cpu(r->ei_block));
}
path->p_idx = l - 1;
ext_debug(" -> %u->%lld ", le32_to_cpu(path->p_idx->ei_block),
ext4_idx_pblock(path->p_idx));
#ifdef CHECK_BINSEARCH
{
struct ext4_extent_idx *chix, *ix;
int k;
chix = ix = EXT_FIRST_INDEX(eh);
for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) {
if (k != 0 &&
le32_to_cpu(ix->ei_block) <= le32_to_cpu(ix[-1].ei_block)) {
printk(KERN_DEBUG "k=%d, ix=0x%p, "
"first=0x%p\n", k,
ix, EXT_FIRST_INDEX(eh));
printk(KERN_DEBUG "%u <= %u\n",
le32_to_cpu(ix->ei_block),
le32_to_cpu(ix[-1].ei_block));
}
BUG_ON(k && le32_to_cpu(ix->ei_block)
<= le32_to_cpu(ix[-1].ei_block));
if (block < le32_to_cpu(ix->ei_block))
break;
chix = ix;
}
BUG_ON(chix != path->p_idx);
}
#endif
}
/*
* ext4_ext_binsearch:
* binary search for closest extent of the given block
* the header must be checked before calling this
*/
static void
ext4_ext_binsearch(struct inode *inode,
struct ext4_ext_path *path, ext4_lblk_t block)
{
struct ext4_extent_header *eh = path->p_hdr;
struct ext4_extent *r, *l, *m;
if (eh->eh_entries == 0) {
/*
* this leaf is empty:
* we get such a leaf in split/add case
*/
return;
}
ext_debug("binsearch for %u: ", block);
l = EXT_FIRST_EXTENT(eh) + 1;
r = EXT_LAST_EXTENT(eh);
while (l <= r) {
m = l + (r - l) / 2;
if (block < le32_to_cpu(m->ee_block))
r = m - 1;
else
l = m + 1;
ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ee_block),
m, le32_to_cpu(m->ee_block),
r, le32_to_cpu(r->ee_block));
}
path->p_ext = l - 1;
ext_debug(" -> %d:%llu:[%d]%d ",
le32_to_cpu(path->p_ext->ee_block),
ext4_ext_pblock(path->p_ext),
ext4_ext_is_unwritten(path->p_ext),
ext4_ext_get_actual_len(path->p_ext));
#ifdef CHECK_BINSEARCH
{
struct ext4_extent *chex, *ex;
int k;
chex = ex = EXT_FIRST_EXTENT(eh);
for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) {
BUG_ON(k && le32_to_cpu(ex->ee_block)
<= le32_to_cpu(ex[-1].ee_block));
if (block < le32_to_cpu(ex->ee_block))
break;
chex = ex;
}
BUG_ON(chex != path->p_ext);
}
#endif
}
int ext4_ext_tree_init(handle_t *handle, struct inode *inode)
{
struct ext4_extent_header *eh;
eh = ext_inode_hdr(inode);
eh->eh_depth = 0;
eh->eh_entries = 0;
eh->eh_magic = EXT4_EXT_MAGIC;
eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0));
ext4_mark_inode_dirty(handle, inode);
return 0;
}
struct ext4_ext_path *
ext4_find_extent(struct inode *inode, ext4_lblk_t block,
struct ext4_ext_path **orig_path, int flags)
{
struct ext4_extent_header *eh;
struct buffer_head *bh;
struct ext4_ext_path *path = orig_path ? *orig_path : NULL;
short int depth, i, ppos = 0;
int ret;
eh = ext_inode_hdr(inode);
depth = ext_depth(inode);
if (path) {
ext4_ext_drop_refs(path);
if (depth > path[0].p_maxdepth) {
kfree(path);
*orig_path = path = NULL;
}
}
if (!path) {
/* account possible depth increase */
path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 2),
GFP_NOFS);
if (unlikely(!path))
return ERR_PTR(-ENOMEM);
path[0].p_maxdepth = depth + 1;
}
path[0].p_hdr = eh;
path[0].p_bh = NULL;
i = depth;
/* walk through the tree */
while (i) {
ext_debug("depth %d: num %d, max %d\n",
ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
ext4_ext_binsearch_idx(inode, path + ppos, block);
path[ppos].p_block = ext4_idx_pblock(path[ppos].p_idx);
path[ppos].p_depth = i;
path[ppos].p_ext = NULL;
bh = read_extent_tree_block(inode, path[ppos].p_block, --i,
flags);
if (unlikely(IS_ERR(bh))) {
ret = PTR_ERR(bh);
goto err;
}
eh = ext_block_hdr(bh);
ppos++;
if (unlikely(ppos > depth)) {
put_bh(bh);
EXT4_ERROR_INODE(inode,
"ppos %d > depth %d", ppos, depth);
ret = -EIO;
goto err;
}
path[ppos].p_bh = bh;
path[ppos].p_hdr = eh;
}
path[ppos].p_depth = i;
path[ppos].p_ext = NULL;
path[ppos].p_idx = NULL;
/* find extent */
ext4_ext_binsearch(inode, path + ppos, block);
/* if not an empty leaf */
if (path[ppos].p_ext)
path[ppos].p_block = ext4_ext_pblock(path[ppos].p_ext);
ext4_ext_show_path(inode, path);
return path;
err:
ext4_ext_drop_refs(path);
kfree(path);
if (orig_path)
*orig_path = NULL;
return ERR_PTR(ret);
}
/*
* ext4_ext_insert_index:
* insert new index [@logical;@ptr] into the block at @curp;
* check where to insert: before @curp or after @curp
*/
static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
struct ext4_ext_path *curp,
int logical, ext4_fsblk_t ptr)
{
struct ext4_extent_idx *ix;
int len, err;
err = ext4_ext_get_access(handle, inode, curp);
if (err)
return err;
if (unlikely(logical == le32_to_cpu(curp->p_idx->ei_block))) {
EXT4_ERROR_INODE(inode,
"logical %d == ei_block %d!",
logical, le32_to_cpu(curp->p_idx->ei_block));
return -EIO;
}
if (unlikely(le16_to_cpu(curp->p_hdr->eh_entries)
>= le16_to_cpu(curp->p_hdr->eh_max))) {
EXT4_ERROR_INODE(inode,
"eh_entries %d >= eh_max %d!",
le16_to_cpu(curp->p_hdr->eh_entries),
le16_to_cpu(curp->p_hdr->eh_max));
return -EIO;
}
if (logical > le32_to_cpu(curp->p_idx->ei_block)) {
/* insert after */
ext_debug("insert new index %d after: %llu\n", logical, ptr);
ix = curp->p_idx + 1;
} else {
/* insert before */
ext_debug("insert new index %d before: %llu\n", logical, ptr);
ix = curp->p_idx;
}
len = EXT_LAST_INDEX(curp->p_hdr) - ix + 1;
BUG_ON(len < 0);
if (len > 0) {
ext_debug("insert new index %d: "
"move %d indices from 0x%p to 0x%p\n",
logical, len, ix, ix + 1);
memmove(ix + 1, ix, len * sizeof(struct ext4_extent_idx));
}
if (unlikely(ix > EXT_MAX_INDEX(curp->p_hdr))) {
EXT4_ERROR_INODE(inode, "ix > EXT_MAX_INDEX!");
return -EIO;
}
ix->ei_block = cpu_to_le32(logical);
ext4_idx_store_pblock(ix, ptr);
le16_add_cpu(&curp->p_hdr->eh_entries, 1);
if (unlikely(ix > EXT_LAST_INDEX(curp->p_hdr))) {
EXT4_ERROR_INODE(inode, "ix > EXT_LAST_INDEX!");
return -EIO;
}
err = ext4_ext_dirty(handle, inode, curp);
ext4_std_error(inode->i_sb, err);
return err;
}
/*
* ext4_ext_split:
* inserts new subtree into the path, using free index entry
* at depth @at:
* - allocates all needed blocks (new leaf and all intermediate index blocks)
* - makes decision where to split
* - moves remaining extents and index entries (right to the split point)
* into the newly allocated blocks
* - initializes subtree
*/
static int ext4_ext_split(handle_t *handle, struct inode *inode,
unsigned int flags,
struct ext4_ext_path *path,
struct ext4_extent *newext, int at)
{
struct buffer_head *bh = NULL;
int depth = ext_depth(inode);
struct ext4_extent_header *neh;
struct ext4_extent_idx *fidx;
int i = at, k, m, a;
ext4_fsblk_t newblock, oldblock;
__le32 border;
ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */
int err = 0;
/* make decision: where to split? */
/* FIXME: now decision is simplest: at current extent */
/* if current leaf will be split, then we should use
* border from split point */
if (unlikely(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr))) {
EXT4_ERROR_INODE(inode, "p_ext > EXT_MAX_EXTENT!");
return -EIO;
}
if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {
border = path[depth].p_ext[1].ee_block;
ext_debug("leaf will be split."
" next leaf starts at %d\n",
le32_to_cpu(border));
} else {
border = newext->ee_block;
ext_debug("leaf will be added."
" next leaf starts at %d\n",
le32_to_cpu(border));
}
/*
* If error occurs, then we break processing
* and mark filesystem read-only. index won't
* be inserted and tree will be in consistent
* state. Next mount will repair buffers too.
*/
/*
* Get array to track all allocated blocks.
* We need this to handle errors and free blocks
* upon them.
*/
ablocks = kzalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS);
if (!ablocks)
return -ENOMEM;
/* allocate all needed blocks */
ext_debug("allocate %d blocks for indexes/leaf\n", depth - at);
for (a = 0; a < depth - at; a++) {
newblock = ext4_ext_new_meta_block(handle, inode, path,
newext, &err, flags);
if (newblock == 0)
goto cleanup;
ablocks[a] = newblock;
}
/* initialize new leaf */
newblock = ablocks[--a];
if (unlikely(newblock == 0)) {
EXT4_ERROR_INODE(inode, "newblock == 0!");
err = -EIO;
goto cleanup;
}
bh = sb_getblk(inode->i_sb, newblock);
if (unlikely(!bh)) {
err = -ENOMEM;
goto cleanup;
}
lock_buffer(bh);
err = ext4_journal_get_create_access(handle, bh);
if (err)
goto cleanup;
neh = ext_block_hdr(bh);
neh->eh_entries = 0;
neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
neh->eh_magic = EXT4_EXT_MAGIC;
neh->eh_depth = 0;
/* move remainder of path[depth] to the new leaf */
if (unlikely(path[depth].p_hdr->eh_entries !=
path[depth].p_hdr->eh_max)) {
EXT4_ERROR_INODE(inode, "eh_entries %d != eh_max %d!",
path[depth].p_hdr->eh_entries,
path[depth].p_hdr->eh_max);
err = -EIO;
goto cleanup;
}
/* start copy from next extent */
m = EXT_MAX_EXTENT(path[depth].p_hdr) - path[depth].p_ext++;
ext4_ext_show_move(inode, path, newblock, depth);
if (m) {
struct ext4_extent *ex;
ex = EXT_FIRST_EXTENT(neh);
memmove(ex, path[depth].p_ext, sizeof(struct ext4_extent) * m);
le16_add_cpu(&neh->eh_entries, m);
}
ext4_extent_block_csum_set(inode, neh);
set_buffer_uptodate(bh);
unlock_buffer(bh);
err = ext4_handle_dirty_metadata(handle, inode, bh);
if (err)
goto cleanup;
brelse(bh);
bh = NULL;
/* correct old leaf */
if (m) {
err = ext4_ext_get_access(handle, inode, path + depth);
if (err)
goto cleanup;
le16_add_cpu(&path[depth].p_hdr->eh_entries, -m);
err = ext4_ext_dirty(handle, inode, path + depth);
if (err)
goto cleanup;
}
/* create intermediate indexes */
k = depth - at - 1;
if (unlikely(k < 0)) {
EXT4_ERROR_INODE(inode, "k %d < 0!", k);
err = -EIO;
goto cleanup;
}
if (k)
ext_debug("create %d intermediate indices\n", k);
/* insert new index into current index block */
/* current depth stored in i var */
i = depth - 1;
while (k--) {
oldblock = newblock;
newblock = ablocks[--a];
bh = sb_getblk(inode->i_sb, newblock);
if (unlikely(!bh)) {
err = -ENOMEM;
goto cleanup;
}
lock_buffer(bh);
err = ext4_journal_get_create_access(handle, bh);
if (err)
goto cleanup;
neh = ext_block_hdr(bh);
neh->eh_entries = cpu_to_le16(1);
neh->eh_magic = EXT4_EXT_MAGIC;
neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
neh->eh_depth = cpu_to_le16(depth - i);
fidx = EXT_FIRST_INDEX(neh);
fidx->ei_block = border;
ext4_idx_store_pblock(fidx, oldblock);
ext_debug("int.index at %d (block %llu): %u -> %llu\n",
i, newblock, le32_to_cpu(border), oldblock);
/* move remainder of path[i] to the new index block */
if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) !=
EXT_LAST_INDEX(path[i].p_hdr))) {
EXT4_ERROR_INODE(inode,
"EXT_MAX_INDEX != EXT_LAST_INDEX ee_block %d!",
le32_to_cpu(path[i].p_ext->ee_block));
err = -EIO;
goto cleanup;
}
/* start copy indexes */
m = EXT_MAX_INDEX(path[i].p_hdr) - path[i].p_idx++;
ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx,
EXT_MAX_INDEX(path[i].p_hdr));
ext4_ext_show_move(inode, path, newblock, i);
if (m) {
memmove(++fidx, path[i].p_idx,
sizeof(struct ext4_extent_idx) * m);
le16_add_cpu(&neh->eh_entries, m);
}
ext4_extent_block_csum_set(inode, neh);
set_buffer_uptodate(bh);
unlock_buffer(bh);
err = ext4_handle_dirty_metadata(handle, inode, bh);
if (err)
goto cleanup;
brelse(bh);
bh = NULL;
/* correct old index */
if (m) {
err = ext4_ext_get_access(handle, inode, path + i);
if (err)
goto cleanup;
le16_add_cpu(&path[i].p_hdr->eh_entries, -m);
err = ext4_ext_dirty(handle, inode, path + i);
if (err)
goto cleanup;
}
i--;
}
/* insert new index */
err = ext4_ext_insert_index(handle, inode, path + at,
le32_to_cpu(border), newblock);
cleanup:
if (bh) {
if (buffer_locked(bh))
unlock_buffer(bh);
brelse(bh);
}
if (err) {
/* free all allocated blocks in error case */
for (i = 0; i < depth; i++) {
if (!ablocks[i])
continue;
ext4_free_blocks(handle, inode, NULL, ablocks[i], 1,
EXT4_FREE_BLOCKS_METADATA);
}
}
kfree(ablocks);
return err;
}
/*
* ext4_ext_grow_indepth:
* implements tree growing procedure:
* - allocates new block
* - moves top-level data (index block or leaf) into the new block
* - initializes new top-level, creating index that points to the
* just created block
*/
static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
unsigned int flags)
{
struct ext4_extent_header *neh;
struct buffer_head *bh;
ext4_fsblk_t newblock, goal = 0;
struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
int err = 0;
/* Try to prepend new index to old one */
if (ext_depth(inode))
goal = ext4_idx_pblock(EXT_FIRST_INDEX(ext_inode_hdr(inode)));
if (goal > le32_to_cpu(es->s_first_data_block)) {
flags |= EXT4_MB_HINT_TRY_GOAL;
goal--;
} else
goal = ext4_inode_to_goal_block(inode);
newblock = ext4_new_meta_blocks(handle, inode, goal, flags,
NULL, &err);
if (newblock == 0)
return err;
bh = sb_getblk(inode->i_sb, newblock);
if (unlikely(!bh))
return -ENOMEM;
lock_buffer(bh);
err = ext4_journal_get_create_access(handle, bh);
if (err) {
unlock_buffer(bh);
goto out;
}
/* move top-level index/leaf into new block */
memmove(bh->b_data, EXT4_I(inode)->i_data,
sizeof(EXT4_I(inode)->i_data));
/* set size of new block */
neh = ext_block_hdr(bh);
/* old root could have indexes or leaves
* so calculate e_max right way */
if (ext_depth(inode))
neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
else
neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
neh->eh_magic = EXT4_EXT_MAGIC;
ext4_extent_block_csum_set(inode, neh);
set_buffer_uptodate(bh);
unlock_buffer(bh);
err = ext4_handle_dirty_metadata(handle, inode, bh);
if (err)
goto out;
/* Update top-level index: num,max,pointer */
neh = ext_inode_hdr(inode);
neh->eh_entries = cpu_to_le16(1);
ext4_idx_store_pblock(EXT_FIRST_INDEX(neh), newblock);
if (neh->eh_depth == 0) {
/* Root extent block becomes index block */
neh->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode, 0));
EXT_FIRST_INDEX(neh)->ei_block =
EXT_FIRST_EXTENT(neh)->ee_block;
}
ext_debug("new root: num %d(%d), lblock %d, ptr %llu\n",
le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max),
le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block),
ext4_idx_pblock(EXT_FIRST_INDEX(neh)));
le16_add_cpu(&neh->eh_depth, 1);
ext4_mark_inode_dirty(handle, inode);
out:
brelse(bh);
return err;
}
/*
* ext4_ext_create_new_leaf:
* finds empty index and adds new leaf.
* if no free index is found, then it requests in-depth growing.
*/
static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
unsigned int mb_flags,
unsigned int gb_flags,
struct ext4_ext_path **ppath,
struct ext4_extent *newext)
{
struct ext4_ext_path *path = *ppath;
struct ext4_ext_path *curp;
int depth, i, err = 0;
repeat:
i = depth = ext_depth(inode);
/* walk up to the tree and look for free index entry */
curp = path + depth;
while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) {
i--;
curp--;
}
/* we use already allocated block for index block,
* so subsequent data blocks should be contiguous */
if (EXT_HAS_FREE_INDEX(curp)) {
/* if we found index with free entry, then use that
* entry: create all needed subtree and add new leaf */
err = ext4_ext_split(handle, inode, mb_flags, path, newext, i);
if (err)
goto out;
/* refill path */
path = ext4_find_extent(inode,
(ext4_lblk_t)le32_to_cpu(newext->ee_block),
ppath, gb_flags);
if (IS_ERR(path))
err = PTR_ERR(path);
} else {
/* tree is full, time to grow in depth */
err = ext4_ext_grow_indepth(handle, inode, mb_flags);
if (err)
goto out;
/* refill path */
path = ext4_find_extent(inode,
(ext4_lblk_t)le32_to_cpu(newext->ee_block),
ppath, gb_flags);
if (IS_ERR(path)) {
err = PTR_ERR(path);
goto out;
}
/*
* only first (depth 0 -> 1) produces free space;
* in all other cases we have to split the grown tree
*/
depth = ext_depth(inode);
if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) {
/* now we need to split */
goto repeat;
}
}
out:
return err;
}
/*
* search the closest allocated block to the left for *logical
* and returns it at @logical + it's physical address at @phys
* if *logical is the smallest allocated block, the function
* returns 0 at @phys
* return value contains 0 (success) or error code
*/
static int ext4_ext_search_left(struct inode *inode,
struct ext4_ext_path *path,
ext4_lblk_t *logical, ext4_fsblk_t *phys)
{
struct ext4_extent_idx *ix;
struct ext4_extent *ex;
int depth, ee_len;
if (unlikely(path == NULL)) {
EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
return -EIO;
}
depth = path->p_depth;
*phys = 0;
if (depth == 0 && path->p_ext == NULL)
return 0;
/* usually extent in the path covers blocks smaller
* then *logical, but it can be that extent is the
* first one in the file */
ex = path[depth].p_ext;
ee_len = ext4_ext_get_actual_len(ex);
if (*logical < le32_to_cpu(ex->ee_block)) {
if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
EXT4_ERROR_INODE(inode,
"EXT_FIRST_EXTENT != ex *logical %d ee_block %d!",
*logical, le32_to_cpu(ex->ee_block));
return -EIO;
}
while (--depth >= 0) {
ix = path[depth].p_idx;
if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
EXT4_ERROR_INODE(inode,
"ix (%d) != EXT_FIRST_INDEX (%d) (depth %d)!",
ix != NULL ? le32_to_cpu(ix->ei_block) : 0,
EXT_FIRST_INDEX(path[depth].p_hdr) != NULL ?
le32_to_cpu(EXT_FIRST_INDEX(path[depth].p_hdr)->ei_block) : 0,
depth);
return -EIO;
}
}
return 0;
}
if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
EXT4_ERROR_INODE(inode,
"logical %d < ee_block %d + ee_len %d!",
*logical, le32_to_cpu(ex->ee_block), ee_len);
return -EIO;
}
*logical = le32_to_cpu(ex->ee_block) + ee_len - 1;
*phys = ext4_ext_pblock(ex) + ee_len - 1;
return 0;
}
/*
* search the closest allocated block to the right for *logical
* and returns it at @logical + it's physical address at @phys
* if *logical is the largest allocated block, the function
* returns 0 at @phys
* return value contains 0 (success) or error code
*/
static int ext4_ext_search_right(struct inode *inode,
struct ext4_ext_path *path,
ext4_lblk_t *logical, ext4_fsblk_t *phys,
struct ext4_extent **ret_ex)
{
struct buffer_head *bh = NULL;
struct ext4_extent_header *eh;
struct ext4_extent_idx *ix;
struct ext4_extent *ex;
ext4_fsblk_t block;
int depth; /* Note, NOT eh_depth; depth from top of tree */
int ee_len;
if (unlikely(path == NULL)) {
EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
return -EIO;
}
depth = path->p_depth;
*phys = 0;
if (depth == 0 && path->p_ext == NULL)
return 0;
/* usually extent in the path covers blocks smaller
* then *logical, but it can be that extent is the
* first one in the file */
ex = path[depth].p_ext;
ee_len = ext4_ext_get_actual_len(ex);
if (*logical < le32_to_cpu(ex->ee_block)) {
if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
EXT4_ERROR_INODE(inode,
"first_extent(path[%d].p_hdr) != ex",
depth);
return -EIO;
}
while (--depth >= 0) {
ix = path[depth].p_idx;
if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
EXT4_ERROR_INODE(inode,
"ix != EXT_FIRST_INDEX *logical %d!",
*logical);
return -EIO;
}
}
goto found_extent;
}
if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
EXT4_ERROR_INODE(inode,
"logical %d < ee_block %d + ee_len %d!",
*logical, le32_to_cpu(ex->ee_block), ee_len);
return -EIO;
}
if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) {
/* next allocated block in this leaf */
ex++;
goto found_extent;
}
/* go up and search for index to the right */
while (--depth >= 0) {
ix = path[depth].p_idx;
if (ix != EXT_LAST_INDEX(path[depth].p_hdr))
goto got_index;
}
/* we've gone up to the root and found no index to the right */
return 0;
got_index:
/* we've found index to the right, let's
* follow it and find the closest allocated
* block to the right */
ix++;
block = ext4_idx_pblock(ix);
while (++depth < path->p_depth) {
/* subtract from p_depth to get proper eh_depth */
bh = read_extent_tree_block(inode, block,
path->p_depth - depth, 0);
if (IS_ERR(bh))
return PTR_ERR(bh);
eh = ext_block_hdr(bh);
ix = EXT_FIRST_INDEX(eh);
block = ext4_idx_pblock(ix);
put_bh(bh);
}
bh = read_extent_tree_block(inode, block, path->p_depth - depth, 0);
if (IS_ERR(bh))
return PTR_ERR(bh);
eh = ext_block_hdr(bh);
ex = EXT_FIRST_EXTENT(eh);
found_extent:
*logical = le32_to_cpu(ex->ee_block);
*phys = ext4_ext_pblock(ex);
*ret_ex = ex;
if (bh)
put_bh(bh);
return 0;
}
/*
* ext4_ext_next_allocated_block:
* returns allocated block in subsequent extent or EXT_MAX_BLOCKS.
* NOTE: it considers block number from index entry as
* allocated block. Thus, index entries have to be consistent
* with leaves.
*/
ext4_lblk_t
ext4_ext_next_allocated_block(struct ext4_ext_path *path)
{
int depth;
BUG_ON(path == NULL);
depth = path->p_depth;
if (depth == 0 && path->p_ext == NULL)
return EXT_MAX_BLOCKS;
while (depth >= 0) {
if (depth == path->p_depth) {
/* leaf */
if (path[depth].p_ext &&
path[depth].p_ext !=
EXT_LAST_EXTENT(path[depth].p_hdr))
return le32_to_cpu(path[depth].p_ext[1].ee_block);
} else {
/* index */
if (path[depth].p_idx !=
EXT_LAST_INDEX(path[depth].p_hdr))
return le32_to_cpu(path[depth].p_idx[1].ei_block);
}
depth--;
}
return EXT_MAX_BLOCKS;
}
/*
* ext4_ext_next_leaf_block:
* returns first allocated block from next leaf or EXT_MAX_BLOCKS
*/
static ext4_lblk_t ext4_ext_next_leaf_block(struct ext4_ext_path *path)
{
int depth;
BUG_ON(path == NULL);
depth = path->p_depth;
/* zero-tree has no leaf blocks at all */
if (depth == 0)
return EXT_MAX_BLOCKS;
/* go to index block */
depth--;
while (depth >= 0) {
if (path[depth].p_idx !=
EXT_LAST_INDEX(path[depth].p_hdr))
return (ext4_lblk_t)
le32_to_cpu(path[depth].p_idx[1].ei_block);
depth--;
}
return EXT_MAX_BLOCKS;
}
/*
* ext4_ext_correct_indexes:
* if leaf gets modified and modified extent is first in the leaf,
* then we have to correct all indexes above.
* TODO: do we need to correct tree in all cases?
*/
static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode,
struct ext4_ext_path *path)
{
struct ext4_extent_header *eh;
int depth = ext_depth(inode);
struct ext4_extent *ex;
__le32 border;
int k, err = 0;
eh = path[depth].p_hdr;
ex = path[depth].p_ext;
if (unlikely(ex == NULL || eh == NULL)) {
EXT4_ERROR_INODE(inode,
"ex %p == NULL or eh %p == NULL", ex, eh);
return -EIO;
}
if (depth == 0) {
/* there is no tree at all */
return 0;
}
if (ex != EXT_FIRST_EXTENT(eh)) {
/* we correct tree if first leaf got modified only */
return 0;
}
/*
* TODO: we need correction if border is smaller than current one
*/
k = depth - 1;
border = path[depth].p_ext->ee_block;
err = ext4_ext_get_access(handle, inode, path + k);
if (err)
return err;
path[k].p_idx->ei_block = border;
err = ext4_ext_dirty(handle, inode, path + k);
if (err)
return err;
while (k--) {
/* change all left-side indexes */
if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr))
break;
err = ext4_ext_get_access(handle, inode, path + k);
if (err)
break;
path[k].p_idx->ei_block = border;
err = ext4_ext_dirty(handle, inode, path + k);
if (err)
break;
}
return err;
}
int
ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
struct ext4_extent *ex2)
{
unsigned short ext1_ee_len, ext2_ee_len;
if (ext4_ext_is_unwritten(ex1) != ext4_ext_is_unwritten(ex2))
return 0;
ext1_ee_len = ext4_ext_get_actual_len(ex1);
ext2_ee_len = ext4_ext_get_actual_len(ex2);
if (le32_to_cpu(ex1->ee_block) + ext1_ee_len !=
le32_to_cpu(ex2->ee_block))
return 0;
/*
* To allow future support for preallocated extents to be added
* as an RO_COMPAT feature, refuse to merge to extents if
* this can result in the top bit of ee_len being set.
*/
if (ext1_ee_len + ext2_ee_len > EXT_INIT_MAX_LEN)
return 0;
if (ext4_ext_is_unwritten(ex1) &&
(ext4_test_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN) ||
atomic_read(&EXT4_I(inode)->i_unwritten) ||
(ext1_ee_len + ext2_ee_len > EXT_UNWRITTEN_MAX_LEN)))
return 0;
#ifdef AGGRESSIVE_TEST
if (ext1_ee_len >= 4)
return 0;
#endif
if (ext4_ext_pblock(ex1) + ext1_ee_len == ext4_ext_pblock(ex2))
return 1;
return 0;
}
/*
* This function tries to merge the "ex" extent to the next extent in the tree.
* It always tries to merge towards right. If you want to merge towards
* left, pass "ex - 1" as argument instead of "ex".
* Returns 0 if the extents (ex and ex+1) were _not_ merged and returns
* 1 if they got merged.
*/
static int ext4_ext_try_to_merge_right(struct inode *inode,
struct ext4_ext_path *path,
struct ext4_extent *ex)
{
struct ext4_extent_header *eh;
unsigned int depth, len;
int merge_done = 0, unwritten;
depth = ext_depth(inode);
BUG_ON(path[depth].p_hdr == NULL);
eh = path[depth].p_hdr;
while (ex < EXT_LAST_EXTENT(eh)) {
if (!ext4_can_extents_be_merged(inode, ex, ex + 1))
break;
/* merge with next extent! */
unwritten = ext4_ext_is_unwritten(ex);
ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
+ ext4_ext_get_actual_len(ex + 1));
if (unwritten)
ext4_ext_mark_unwritten(ex);
if (ex + 1 < EXT_LAST_EXTENT(eh)) {
len = (EXT_LAST_EXTENT(eh) - ex - 1)
* sizeof(struct ext4_extent);
memmove(ex + 1, ex + 2, len);
}
le16_add_cpu(&eh->eh_entries, -1);
merge_done = 1;
WARN_ON(eh->eh_entries == 0);
if (!eh->eh_entries)
EXT4_ERROR_INODE(inode, "eh->eh_entries = 0!");
}
return merge_done;
}
/*
* This function does a very simple check to see if we can collapse
* an extent tree with a single extent tree leaf block into the inode.
*/
static void ext4_ext_try_to_merge_up(handle_t *handle,
struct inode *inode,
struct ext4_ext_path *path)
{
size_t s;
unsigned max_root = ext4_ext_space_root(inode, 0);
ext4_fsblk_t blk;
if ((path[0].p_depth != 1) ||
(le16_to_cpu(path[0].p_hdr->eh_entries) != 1) ||
(le16_to_cpu(path[1].p_hdr->eh_entries) > max_root))
return;
/*
* We need to modify the block allocation bitmap and the block
* group descriptor to release the extent tree block. If we
* can't get the journal credits, give up.
*/
if (ext4_journal_extend(handle, 2))
return;
/*
* Copy the extent data up to the inode
*/
blk = ext4_idx_pblock(path[0].p_idx);
s = le16_to_cpu(path[1].p_hdr->eh_entries) *
sizeof(struct ext4_extent_idx);
s += sizeof(struct ext4_extent_header);
path[1].p_maxdepth = path[0].p_maxdepth;
memcpy(path[0].p_hdr, path[1].p_hdr, s);
path[0].p_depth = 0;
path[0].p_ext = EXT_FIRST_EXTENT(path[0].p_hdr) +
(path[1].p_ext - EXT_FIRST_EXTENT(path[1].p_hdr));
path[0].p_hdr->eh_max = cpu_to_le16(max_root);
brelse(path[1].p_bh);
ext4_free_blocks(handle, inode, NULL, blk, 1,
EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
}
/*
* This function tries to merge the @ex extent to neighbours in the tree.
* return 1 if merge left else 0.
*/
static void ext4_ext_try_to_merge(handle_t *handle,
struct inode *inode,
struct ext4_ext_path *path,
struct ext4_extent *ex) {
struct ext4_extent_header *eh;
unsigned int depth;
int merge_done = 0;
depth = ext_depth(inode);
BUG_ON(path[depth].p_hdr == NULL);
eh = path[depth].p_hdr;
if (ex > EXT_FIRST_EXTENT(eh))
merge_done = ext4_ext_try_to_merge_right(inode, path, ex - 1);
if (!merge_done)
(void) ext4_ext_try_to_merge_right(inode, path, ex);
ext4_ext_try_to_merge_up(handle, inode, path);
}
/*
* check if a portion of the "newext" extent overlaps with an
* existing extent.
*
* If there is an overlap discovered, it updates the length of the newext
* such that there will be no overlap, and then returns 1.
* If there is no overlap found, it returns 0.
*/
static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi,
struct inode *inode,
struct ext4_extent *newext,
struct ext4_ext_path *path)
{
ext4_lblk_t b1, b2;
unsigned int depth, len1;
unsigned int ret = 0;
b1 = le32_to_cpu(newext->ee_block);
len1 = ext4_ext_get_actual_len(newext);
depth = ext_depth(inode);
if (!path[depth].p_ext)
goto out;
b2 = EXT4_LBLK_CMASK(sbi, le32_to_cpu(path[depth].p_ext->ee_block));
/*
* get the next allocated block if the extent in the path
* is before the requested block(s)
*/
if (b2 < b1) {
b2 = ext4_ext_next_allocated_block(path);
if (b2 == EXT_MAX_BLOCKS)
goto out;
b2 = EXT4_LBLK_CMASK(sbi, b2);
}
/* check for wrap through zero on extent logical start block*/
if (b1 + len1 < b1) {
len1 = EXT_MAX_BLOCKS - b1;
newext->ee_len = cpu_to_le16(len1);
ret = 1;
}
/* check for overlap */
if (b1 + len1 > b2) {
newext->ee_len = cpu_to_le16(b2 - b1);
ret = 1;
}
out:
return ret;
}
/*
* ext4_ext_insert_extent:
* tries to merge requsted extent into the existing extent or
* inserts requested extent as new one into the tree,
* creating new leaf in the no-space case.
*/
int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
struct ext4_ext_path **ppath,
struct ext4_extent *newext, int gb_flags)
{
struct ext4_ext_path *path = *ppath;
struct ext4_extent_header *eh;
struct ext4_extent *ex, *fex;
struct ext4_extent *nearex; /* nearest extent */
struct ext4_ext_path *npath = NULL;
int depth, len, err;
ext4_lblk_t next;
int mb_flags = 0, unwritten;
if (gb_flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
mb_flags |= EXT4_MB_DELALLOC_RESERVED;
if (unlikely(ext4_ext_get_actual_len(newext) == 0)) {
EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0");
return -EIO;
}
depth = ext_depth(inode);
ex = path[depth].p_ext;
eh = path[depth].p_hdr;
if (unlikely(path[depth].p_hdr == NULL)) {
EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
return -EIO;
}
/* try to insert block into found extent and return */
if (ex && !(gb_flags & EXT4_GET_BLOCKS_PRE_IO)) {
/*
* Try to see whether we should rather test the extent on
* right from ex, or from the left of ex. This is because
* ext4_find_extent() can return either extent on the
* left, or on the right from the searched position. This
* will make merging more effective.
*/
if (ex < EXT_LAST_EXTENT(eh) &&
(le32_to_cpu(ex->ee_block) +
ext4_ext_get_actual_len(ex) <
le32_to_cpu(newext->ee_block))) {
ex += 1;
goto prepend;
} else if ((ex > EXT_FIRST_EXTENT(eh)) &&
(le32_to_cpu(newext->ee_block) +
ext4_ext_get_actual_len(newext) <
le32_to_cpu(ex->ee_block)))
ex -= 1;
/* Try to append newex to the ex */
if (ext4_can_extents_be_merged(inode, ex, newext)) {
ext_debug("append [%d]%d block to %u:[%d]%d"
"(from %llu)\n",
ext4_ext_is_unwritten(newext),
ext4_ext_get_actual_len(newext),
le32_to_cpu(ex->ee_block),
ext4_ext_is_unwritten(ex),
ext4_ext_get_actual_len(ex),
ext4_ext_pblock(ex));
err = ext4_ext_get_access(handle, inode,
path + depth);
if (err)
return err;
unwritten = ext4_ext_is_unwritten(ex);
ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
+ ext4_ext_get_actual_len(newext));
if (unwritten)
ext4_ext_mark_unwritten(ex);
eh = path[depth].p_hdr;
nearex = ex;
goto merge;
}
prepend:
/* Try to prepend newex to the ex */
if (ext4_can_extents_be_merged(inode, newext, ex)) {
ext_debug("prepend %u[%d]%d block to %u:[%d]%d"
"(from %llu)\n",
le32_to_cpu(newext->ee_block),
ext4_ext_is_unwritten(newext),
ext4_ext_get_actual_len(newext),
le32_to_cpu(ex->ee_block),
ext4_ext_is_unwritten(ex),
ext4_ext_get_actual_len(ex),
ext4_ext_pblock(ex));
err = ext4_ext_get_access(handle, inode,
path + depth);
if (err)
return err;
unwritten = ext4_ext_is_unwritten(ex);
ex->ee_block = newext->ee_block;
ext4_ext_store_pblock(ex, ext4_ext_pblock(newext));
ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
+ ext4_ext_get_actual_len(newext));
if (unwritten)
ext4_ext_mark_unwritten(ex);
eh = path[depth].p_hdr;
nearex = ex;
goto merge;
}
}
depth = ext_depth(inode);
eh = path[depth].p_hdr;
if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max))
goto has_space;
/* probably next leaf has space for us? */
fex = EXT_LAST_EXTENT(eh);
next = EXT_MAX_BLOCKS;
if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block))
next = ext4_ext_next_leaf_block(path);
if (next != EXT_MAX_BLOCKS) {
ext_debug("next leaf block - %u\n", next);
BUG_ON(npath != NULL);
npath = ext4_find_extent(inode, next, NULL, 0);
if (IS_ERR(npath))
return PTR_ERR(npath);
BUG_ON(npath->p_depth != path->p_depth);
eh = npath[depth].p_hdr;
if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) {
ext_debug("next leaf isn't full(%d)\n",
le16_to_cpu(eh->eh_entries));
path = npath;
goto has_space;
}
ext_debug("next leaf has no free space(%d,%d)\n",
le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
}
/*
* There is no free space in the found leaf.
* We're gonna add a new leaf in the tree.
*/
if (gb_flags & EXT4_GET_BLOCKS_METADATA_NOFAIL)
mb_flags |= EXT4_MB_USE_RESERVED;
err = ext4_ext_create_new_leaf(handle, inode, mb_flags, gb_flags,
ppath, newext);
if (err)
goto cleanup;
depth = ext_depth(inode);
eh = path[depth].p_hdr;
has_space:
nearex = path[depth].p_ext;
err = ext4_ext_get_access(handle, inode, path + depth);
if (err)
goto cleanup;
if (!nearex) {
/* there is no extent in this leaf, create first one */
ext_debug("first extent in the leaf: %u:%llu:[%d]%d\n",
le32_to_cpu(newext->ee_block),
ext4_ext_pblock(newext),
ext4_ext_is_unwritten(newext),
ext4_ext_get_actual_len(newext));
nearex = EXT_FIRST_EXTENT(eh);
} else {
if (le32_to_cpu(newext->ee_block)
> le32_to_cpu(nearex->ee_block)) {
/* Insert after */
ext_debug("insert %u:%llu:[%d]%d before: "
"nearest %p\n",
le32_to_cpu(newext->ee_block),
ext4_ext_pblock(newext),
ext4_ext_is_unwritten(newext),
ext4_ext_get_actual_len(newext),
nearex);
nearex++;
} else {
/* Insert before */
BUG_ON(newext->ee_block == nearex->ee_block);
ext_debug("insert %u:%llu:[%d]%d after: "
"nearest %p\n",
le32_to_cpu(newext->ee_block),
ext4_ext_pblock(newext),
ext4_ext_is_unwritten(newext),
ext4_ext_get_actual_len(newext),
nearex);
}
len = EXT_LAST_EXTENT(eh) - nearex + 1;
if (len > 0) {
ext_debug("insert %u:%llu:[%d]%d: "
"move %d extents from 0x%p to 0x%p\n",
le32_to_cpu(newext->ee_block),
ext4_ext_pblock(newext),
ext4_ext_is_unwritten(newext),
ext4_ext_get_actual_len(newext),
len, nearex, nearex + 1);
memmove(nearex + 1, nearex,
len * sizeof(struct ext4_extent));
}
}
le16_add_cpu(&eh->eh_entries, 1);
path[depth].p_ext = nearex;
nearex->ee_block = newext->ee_block;
ext4_ext_store_pblock(nearex, ext4_ext_pblock(newext));
nearex->ee_len = newext->ee_len;
merge:
/* try to merge extents */
if (!(gb_flags & EXT4_GET_BLOCKS_PRE_IO))
ext4_ext_try_to_merge(handle, inode, path, nearex);
/* time to correct all indexes above */
err = ext4_ext_correct_indexes(handle, inode, path);
if (err)
goto cleanup;
err = ext4_ext_dirty(handle, inode, path + path->p_depth);
cleanup:
ext4_ext_drop_refs(npath);
kfree(npath);
return err;
}
static int ext4_fill_fiemap_extents(struct inode *inode,
ext4_lblk_t block, ext4_lblk_t num,
struct fiemap_extent_info *fieinfo)
{
struct ext4_ext_path *path = NULL;
struct ext4_extent *ex;
struct extent_status es;
ext4_lblk_t next, next_del, start = 0, end = 0;
ext4_lblk_t last = block + num;
int exists, depth = 0, err = 0;
unsigned int flags = 0;
unsigned char blksize_bits = inode->i_sb->s_blocksize_bits;
while (block < last && block != EXT_MAX_BLOCKS) {
num = last - block;
/* find extent for this block */
down_read(&EXT4_I(inode)->i_data_sem);
path = ext4_find_extent(inode, block, &path, 0);
if (IS_ERR(path)) {
up_read(&EXT4_I(inode)->i_data_sem);
err = PTR_ERR(path);
path = NULL;
break;
}
depth = ext_depth(inode);
if (unlikely(path[depth].p_hdr == NULL)) {
up_read(&EXT4_I(inode)->i_data_sem);
EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
err = -EIO;
break;
}
ex = path[depth].p_ext;
next = ext4_ext_next_allocated_block(path);
flags = 0;
exists = 0;
if (!ex) {
/* there is no extent yet, so try to allocate
* all requested space */
start = block;
end = block + num;
} else if (le32_to_cpu(ex->ee_block) > block) {
/* need to allocate space before found extent */
start = block;
end = le32_to_cpu(ex->ee_block);
if (block + num < end)
end = block + num;
} else if (block >= le32_to_cpu(ex->ee_block)
+ ext4_ext_get_actual_len(ex)) {
/* need to allocate space after found extent */
start = block;
end = block + num;
if (end >= next)
end = next;
} else if (block >= le32_to_cpu(ex->ee_block)) {
/*
* some part of requested space is covered
* by found extent
*/
start = block;
end = le32_to_cpu(ex->ee_block)
+ ext4_ext_get_actual_len(ex);
if (block + num < end)
end = block + num;
exists = 1;
} else {
BUG();
}
BUG_ON(end <= start);
if (!exists) {
es.es_lblk = start;
es.es_len = end - start;
es.es_pblk = 0;
} else {
es.es_lblk = le32_to_cpu(ex->ee_block);
es.es_len = ext4_ext_get_actual_len(ex);
es.es_pblk = ext4_ext_pblock(ex);
if (ext4_ext_is_unwritten(ex))
flags |= FIEMAP_EXTENT_UNWRITTEN;
}
/*
* Find delayed extent and update es accordingly. We call
* it even in !exists case to find out whether es is the
* last existing extent or not.
*/
next_del = ext4_find_delayed_extent(inode, &es);
if (!exists && next_del) {
exists = 1;
flags |= (FIEMAP_EXTENT_DELALLOC |
FIEMAP_EXTENT_UNKNOWN);
}
up_read(&EXT4_I(inode)->i_data_sem);
if (unlikely(es.es_len == 0)) {
EXT4_ERROR_INODE(inode, "es.es_len == 0");
err = -EIO;
break;
}
/*
* This is possible iff next == next_del == EXT_MAX_BLOCKS.
* we need to check next == EXT_MAX_BLOCKS because it is
* possible that an extent is with unwritten and delayed
* status due to when an extent is delayed allocated and
* is allocated by fallocate status tree will track both of
* them in a extent.
*
* So we could return a unwritten and delayed extent, and
* its block is equal to 'next'.
*/
if (next == next_del && next == EXT_MAX_BLOCKS) {
flags |= FIEMAP_EXTENT_LAST;
if (unlikely(next_del != EXT_MAX_BLOCKS ||
next != EXT_MAX_BLOCKS)) {
EXT4_ERROR_INODE(inode,
"next extent == %u, next "
"delalloc extent = %u",
next, next_del);
err = -EIO;
break;
}
}
if (exists) {
err = fiemap_fill_next_extent(fieinfo,
(__u64)es.es_lblk << blksize_bits,
(__u64)es.es_pblk << blksize_bits,
(__u64)es.es_len << blksize_bits,
flags);
if (err < 0)
break;
if (err == 1) {
err = 0;
break;
}
}
block = es.es_lblk + es.es_len;
}
ext4_ext_drop_refs(path);
kfree(path);
return err;
}
/*
* ext4_ext_put_gap_in_cache:
* calculate boundaries of the gap that the requested block fits into
* and cache this gap
*/
static void
ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
ext4_lblk_t block)
{
int depth = ext_depth(inode);
ext4_lblk_t len;
ext4_lblk_t lblock;
struct ext4_extent *ex;
struct extent_status es;
ex = path[depth].p_ext;
if (ex == NULL) {
/* there is no extent yet, so gap is [0;-] */
lblock = 0;
len = EXT_MAX_BLOCKS;
ext_debug("cache gap(whole file):");
} else if (block < le32_to_cpu(ex->ee_block)) {
lblock = block;
len = le32_to_cpu(ex->ee_block) - block;
ext_debug("cache gap(before): %u [%u:%u]",
block,
le32_to_cpu(ex->ee_block),
ext4_ext_get_actual_len(ex));
} else if (block >= le32_to_cpu(ex->ee_block)
+ ext4_ext_get_actual_len(ex)) {
ext4_lblk_t next;
lblock = le32_to_cpu(ex->ee_block)
+ ext4_ext_get_actual_len(ex);
next = ext4_ext_next_allocated_block(path);
ext_debug("cache gap(after): [%u:%u] %u",
le32_to_cpu(ex->ee_block),
ext4_ext_get_actual_len(ex),
block);
BUG_ON(next == lblock);
len = next - lblock;
} else {
BUG();
}
ext4_es_find_delayed_extent_range(inode, lblock, lblock + len - 1, &es);
if (es.es_len) {
/* There's delayed extent containing lblock? */
if (es.es_lblk <= lblock)
return;
len = min(es.es_lblk - lblock, len);
}
ext_debug(" -> %u:%u\n", lblock, len);
ext4_es_insert_extent(inode, lblock, len, ~0, EXTENT_STATUS_HOLE);
}
/*
* ext4_ext_rm_idx:
* removes index from the index block.
*/
static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
struct ext4_ext_path *path, int depth)
{
int err;
ext4_fsblk_t leaf;
/* free index block */
depth--;
path = path + depth;
leaf = ext4_idx_pblock(path->p_idx);
if (unlikely(path->p_hdr->eh_entries == 0)) {
EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0");
return -EIO;
}
err = ext4_ext_get_access(handle, inode, path);
if (err)
return err;
if (path->p_idx != EXT_LAST_INDEX(path->p_hdr)) {
int len = EXT_LAST_INDEX(path->p_hdr) - path->p_idx;
len *= sizeof(struct ext4_extent_idx);
memmove(path->p_idx, path->p_idx + 1, len);
}
le16_add_cpu(&path->p_hdr->eh_entries, -1);
err = ext4_ext_dirty(handle, inode, path);
if (err)
return err;
ext_debug("index is empty, remove it, free block %llu\n", leaf);
trace_ext4_ext_rm_idx(inode, leaf);
ext4_free_blocks(handle, inode, NULL, leaf, 1,
EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
while (--depth >= 0) {
if (path->p_idx != EXT_FIRST_INDEX(path->p_hdr))
break;
path--;
err = ext4_ext_get_access(handle, inode, path);
if (err)
break;
path->p_idx->ei_block = (path+1)->p_idx->ei_block;
err = ext4_ext_dirty(handle, inode, path);
if (err)
break;
}
return err;
}
/*
* ext4_ext_calc_credits_for_single_extent:
* This routine returns max. credits that needed to insert an extent
* to the extent tree.
* When pass the actual path, the caller should calculate credits
* under i_data_sem.
*/
int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks,
struct ext4_ext_path *path)
{
if (path) {
int depth = ext_depth(inode);
int ret = 0;
/* probably there is space in leaf? */
if (le16_to_cpu(path[depth].p_hdr->eh_entries)
< le16_to_cpu(path[depth].p_hdr->eh_max)) {
/*
* There are some space in the leaf tree, no
* need to account for leaf block credit
*
* bitmaps and block group descriptor blocks
* and other metadata blocks still need to be
* accounted.
*/
/* 1 bitmap, 1 block group descriptor */
ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb);
return ret;
}
}
return ext4_chunk_trans_blocks(inode, nrblocks);
}
/*
* How many index/leaf blocks need to change/allocate to add @extents extents?
*
* If we add a single extent, then in the worse case, each tree level
* index/leaf need to be changed in case of the tree split.
*
* If more extents are inserted, they could cause the whole tree split more
* than once, but this is really rare.
*/
int ext4_ext_index_trans_blocks(struct inode *inode, int extents)
{
int index;
int depth;
/* If we are converting the inline data, only one is needed here. */
if (ext4_has_inline_data(inode))
return 1;
depth = ext_depth(inode);
if (extents <= 1)
index = depth * 2;
else
index = depth * 3;
return index;
}
static inline int get_default_free_blocks_flags(struct inode *inode)
{
if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
return EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET;
else if (ext4_should_journal_data(inode))
return EXT4_FREE_BLOCKS_FORGET;
return 0;
}
static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
struct ext4_extent *ex,
long long *partial_cluster,
ext4_lblk_t from, ext4_lblk_t to)
{
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
unsigned short ee_len = ext4_ext_get_actual_len(ex);
ext4_fsblk_t pblk;
int flags = get_default_free_blocks_flags(inode);
/*
* For bigalloc file systems, we never free a partial cluster
* at the beginning of the extent. Instead, we make a note
* that we tried freeing the cluster, and check to see if we
* need to free it on a subsequent call to ext4_remove_blocks,
* or at the end of ext4_ext_rm_leaf or ext4_ext_remove_space.
*/
flags |= EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER;
trace_ext4_remove_blocks(inode, ex, from, to, *partial_cluster);
/*
* If we have a partial cluster, and it's different from the
* cluster of the last block, we need to explicitly free the
* partial cluster here.
*/
pblk = ext4_ext_pblock(ex) + ee_len - 1;
if (*partial_cluster > 0 &&
*partial_cluster != (long long) EXT4_B2C(sbi, pblk)) {
ext4_free_blocks(handle, inode, NULL,
EXT4_C2B(sbi, *partial_cluster),
sbi->s_cluster_ratio, flags);
*partial_cluster = 0;
}
#ifdef EXTENTS_STATS
{
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
spin_lock(&sbi->s_ext_stats_lock);
sbi->s_ext_blocks += ee_len;
sbi->s_ext_extents++;
if (ee_len < sbi->s_ext_min)
sbi->s_ext_min = ee_len;
if (ee_len > sbi->s_ext_max)
sbi->s_ext_max = ee_len;
if (ext_depth(inode) > sbi->s_depth_max)
sbi->s_depth_max = ext_depth(inode);
spin_unlock(&sbi->s_ext_stats_lock);
}
#endif
if (from >= le32_to_cpu(ex->ee_block)
&& to == le32_to_cpu(ex->ee_block) + ee_len - 1) {
/* tail removal */
ext4_lblk_t num;
long long first_cluster;
num = le32_to_cpu(ex->ee_block) + ee_len - from;
pblk = ext4_ext_pblock(ex) + ee_len - num;
/*
* Usually we want to free partial cluster at the end of the
* extent, except for the situation when the cluster is still
* used by any other extent (partial_cluster is negative).
*/
if (*partial_cluster < 0 &&
*partial_cluster == -(long long) EXT4_B2C(sbi, pblk+num-1))
flags |= EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER;
ext_debug("free last %u blocks starting %llu partial %lld\n",
num, pblk, *partial_cluster);
ext4_free_blocks(handle, inode, NULL, pblk, num, flags);
/*
* If the block range to be freed didn't start at the
* beginning of a cluster, and we removed the entire
* extent and the cluster is not used by any other extent,
* save the partial cluster here, since we might need to
* delete if we determine that the truncate or punch hole
* operation has removed all of the blocks in the cluster.
* If that cluster is used by another extent, preserve its
* negative value so it isn't freed later on.
*
* If the whole extent wasn't freed, we've reached the
* start of the truncated/punched region and have finished
* removing blocks. If there's a partial cluster here it's
* shared with the remainder of the extent and is no longer
* a candidate for removal.
*/
if (EXT4_PBLK_COFF(sbi, pblk) && ee_len == num) {
first_cluster = (long long) EXT4_B2C(sbi, pblk);
if (first_cluster != -*partial_cluster)
*partial_cluster = first_cluster;
} else {
*partial_cluster = 0;
}
} else
ext4_error(sbi->s_sb, "strange request: removal(2) "
"%u-%u from %u:%u\n",
from, to, le32_to_cpu(ex->ee_block), ee_len);
return 0;
}
/*
* ext4_ext_rm_leaf() Removes the extents associated with the
* blocks appearing between "start" and "end". Both "start"
* and "end" must appear in the same extent or EIO is returned.
*
* @handle: The journal handle
* @inode: The files inode
* @path: The path to the leaf
* @partial_cluster: The cluster which we'll have to free if all extents
* has been released from it. However, if this value is
* negative, it's a cluster just to the right of the
* punched region and it must not be freed.
* @start: The first block to remove
* @end: The last block to remove
*/
static int
ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
struct ext4_ext_path *path,
long long *partial_cluster,
ext4_lblk_t start, ext4_lblk_t end)
{
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
int err = 0, correct_index = 0;
int depth = ext_depth(inode), credits;
struct ext4_extent_header *eh;
ext4_lblk_t a, b;
unsigned num;
ext4_lblk_t ex_ee_block;
unsigned short ex_ee_len;
unsigned unwritten = 0;
struct ext4_extent *ex;
ext4_fsblk_t pblk;
/* the header must be checked already in ext4_ext_remove_space() */
ext_debug("truncate since %u in leaf to %u\n", start, end);
if (!path[depth].p_hdr)
path[depth].p_hdr = ext_block_hdr(path[depth].p_bh);
eh = path[depth].p_hdr;
if (unlikely(path[depth].p_hdr == NULL)) {
EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
return -EIO;
}
/* find where to start removing */
ex = path[depth].p_ext;
if (!ex)
ex = EXT_LAST_EXTENT(eh);
ex_ee_block = le32_to_cpu(ex->ee_block);
ex_ee_len = ext4_ext_get_actual_len(ex);
trace_ext4_ext_rm_leaf(inode, start, ex, *partial_cluster);
while (ex >= EXT_FIRST_EXTENT(eh) &&
ex_ee_block + ex_ee_len > start) {
if (ext4_ext_is_unwritten(ex))
unwritten = 1;
else
unwritten = 0;
ext_debug("remove ext %u:[%d]%d\n", ex_ee_block,
unwritten, ex_ee_len);
path[depth].p_ext = ex;
a = ex_ee_block > start ? ex_ee_block : start;
b = ex_ee_block+ex_ee_len - 1 < end ?
ex_ee_block+ex_ee_len - 1 : end;
ext_debug(" border %u:%u\n", a, b);
/* If this extent is beyond the end of the hole, skip it */
if (end < ex_ee_block) {
/*
* We're going to skip this extent and move to another,
* so note that its first cluster is in use to avoid
* freeing it when removing blocks. Eventually, the
* right edge of the truncated/punched region will
* be just to the left.
*/
if (sbi->s_cluster_ratio > 1) {
pblk = ext4_ext_pblock(ex);
*partial_cluster =
-(long long) EXT4_B2C(sbi, pblk);
}
ex--;
ex_ee_block = le32_to_cpu(ex->ee_block);
ex_ee_len = ext4_ext_get_actual_len(ex);
continue;
} else if (b != ex_ee_block + ex_ee_len - 1) {
EXT4_ERROR_INODE(inode,
"can not handle truncate %u:%u "
"on extent %u:%u",
start, end, ex_ee_block,
ex_ee_block + ex_ee_len - 1);
err = -EIO;
goto out;
} else if (a != ex_ee_block) {
/* remove tail of the extent */
num = a - ex_ee_block;
} else {
/* remove whole extent: excellent! */
num = 0;
}
/*
* 3 for leaf, sb, and inode plus 2 (bmap and group
* descriptor) for each block group; assume two block
* groups plus ex_ee_len/blocks_per_block_group for
* the worst case
*/
credits = 7 + 2*(ex_ee_len/EXT4_BLOCKS_PER_GROUP(inode->i_sb));
if (ex == EXT_FIRST_EXTENT(eh)) {
correct_index = 1;
credits += (ext_depth(inode)) + 1;
}
credits += EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb);
err = ext4_ext_truncate_extend_restart(handle, inode, credits);
if (err)
goto out;
err = ext4_ext_get_access(handle, inode, path + depth);
if (err)
goto out;
err = ext4_remove_blocks(handle, inode, ex, partial_cluster,
a, b);
if (err)
goto out;
if (num == 0)
/* this extent is removed; mark slot entirely unused */
ext4_ext_store_pblock(ex, 0);
ex->ee_len = cpu_to_le16(num);
/*
* Do not mark unwritten if all the blocks in the
* extent have been removed.
*/
if (unwritten && num)
ext4_ext_mark_unwritten(ex);
/*
* If the extent was completely released,
* we need to remove it from the leaf
*/
if (num == 0) {
if (end != EXT_MAX_BLOCKS - 1) {
/*
* For hole punching, we need to scoot all the
* extents up when an extent is removed so that
* we dont have blank extents in the middle
*/
memmove(ex, ex+1, (EXT_LAST_EXTENT(eh) - ex) *
sizeof(struct ext4_extent));
/* Now get rid of the one at the end */
memset(EXT_LAST_EXTENT(eh), 0,
sizeof(struct ext4_extent));
}
le16_add_cpu(&eh->eh_entries, -1);
}
err = ext4_ext_dirty(handle, inode, path + depth);
if (err)
goto out;
ext_debug("new extent: %u:%u:%llu\n", ex_ee_block, num,
ext4_ext_pblock(ex));
ex--;
ex_ee_block = le32_to_cpu(ex->ee_block);
ex_ee_len = ext4_ext_get_actual_len(ex);
}
if (correct_index && eh->eh_entries)
err = ext4_ext_correct_indexes(handle, inode, path);
/*
* If there's a partial cluster and at least one extent remains in
* the leaf, free the partial cluster if it isn't shared with the
* current extent. If it is shared with the current extent
* we zero partial_cluster because we've reached the start of the
* truncated/punched region and we're done removing blocks.
*/
if (*partial_cluster > 0 && ex >= EXT_FIRST_EXTENT(eh)) {
pblk = ext4_ext_pblock(ex) + ex_ee_len - 1;
if (*partial_cluster != (long long) EXT4_B2C(sbi, pblk)) {
ext4_free_blocks(handle, inode, NULL,
EXT4_C2B(sbi, *partial_cluster),
sbi->s_cluster_ratio,
get_default_free_blocks_flags(inode));
}
*partial_cluster = 0;
}
/* if this leaf is free, then we should
* remove it from index block above */
if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)
err = ext4_ext_rm_idx(handle, inode, path, depth);
out:
return err;
}
/*
* ext4_ext_more_to_rm:
* returns 1 if current index has to be freed (even partial)
*/
static int
ext4_ext_more_to_rm(struct ext4_ext_path *path)
{
BUG_ON(path->p_idx == NULL);
if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr))
return 0;
/*
* if truncate on deeper level happened, it wasn't partial,
* so we have to consider current index for truncation
*/
if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block)
return 0;
return 1;
}
int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start,
ext4_lblk_t end)
{
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
int depth = ext_depth(inode);
struct ext4_ext_path *path = NULL;
long long partial_cluster = 0;
handle_t *handle;
int i = 0, err = 0;
ext_debug("truncate since %u to %u\n", start, end);
/* probably first extent we're gonna free will be last in block */
handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, depth + 1);
if (IS_ERR(handle))
return PTR_ERR(handle);
again:
trace_ext4_ext_remove_space(inode, start, end, depth);
/*
* Check if we are removing extents inside the extent tree. If that
* is the case, we are going to punch a hole inside the extent tree
* so we have to check whether we need to split the extent covering
* the last block to remove so we can easily remove the part of it
* in ext4_ext_rm_leaf().
*/
if (end < EXT_MAX_BLOCKS - 1) {
struct ext4_extent *ex;
ext4_lblk_t ee_block, ex_end, lblk;
ext4_fsblk_t pblk;
/* find extent for or closest extent to this block */
path = ext4_find_extent(inode, end, NULL, EXT4_EX_NOCACHE);
if (IS_ERR(path)) {
ext4_journal_stop(handle);
return PTR_ERR(path);
}
depth = ext_depth(inode);
/* Leaf not may not exist only if inode has no blocks at all */
ex = path[depth].p_ext;
if (!ex) {
if (depth) {
EXT4_ERROR_INODE(inode,
"path[%d].p_hdr == NULL",
depth);
err = -EIO;
}
goto out;
}
ee_block = le32_to_cpu(ex->ee_block);
ex_end = ee_block + ext4_ext_get_actual_len(ex) - 1;
/*
* See if the last block is inside the extent, if so split
* the extent at 'end' block so we can easily remove the
* tail of the first part of the split extent in
* ext4_ext_rm_leaf().
*/
if (end >= ee_block && end < ex_end) {
/*
* If we're going to split the extent, note that
* the cluster containing the block after 'end' is
* in use to avoid freeing it when removing blocks.
*/
if (sbi->s_cluster_ratio > 1) {
pblk = ext4_ext_pblock(ex) + end - ee_block + 2;
partial_cluster =
-(long long) EXT4_B2C(sbi, pblk);
}
/*
* Split the extent in two so that 'end' is the last
* block in the first new extent. Also we should not
* fail removing space due to ENOSPC so try to use
* reserved block if that happens.
*/
err = ext4_force_split_extent_at(handle, inode, &path,
end + 1, 1);
if (err < 0)
goto out;
} else if (sbi->s_cluster_ratio > 1 && end >= ex_end) {
/*
* If there's an extent to the right its first cluster
* contains the immediate right boundary of the
* truncated/punched region. Set partial_cluster to
* its negative value so it won't be freed if shared
* with the current extent. The end < ee_block case
* is handled in ext4_ext_rm_leaf().
*/
lblk = ex_end + 1;
err = ext4_ext_search_right(inode, path, &lblk, &pblk,
&ex);
if (err)
goto out;
if (pblk)
partial_cluster =
-(long long) EXT4_B2C(sbi, pblk);
}
}
/*
* We start scanning from right side, freeing all the blocks
* after i_size and walking into the tree depth-wise.
*/
depth = ext_depth(inode);
if (path) {
int k = i = depth;
while (--k > 0)
path[k].p_block =
le16_to_cpu(path[k].p_hdr->eh_entries)+1;
} else {
path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1),
GFP_NOFS);
if (path == NULL) {
ext4_journal_stop(handle);
return -ENOMEM;
}
path[0].p_maxdepth = path[0].p_depth = depth;
path[0].p_hdr = ext_inode_hdr(inode);
i = 0;
if (ext4_ext_check(inode, path[0].p_hdr, depth, 0)) {
err = -EIO;
goto out;
}
}
err = 0;
while (i >= 0 && err == 0) {
if (i == depth) {
/* this is leaf block */
err = ext4_ext_rm_leaf(handle, inode, path,
&partial_cluster, start,
end);
/* root level has p_bh == NULL, brelse() eats this */
brelse(path[i].p_bh);
path[i].p_bh = NULL;
i--;
continue;
}
/* this is index block */
if (!path[i].p_hdr) {
ext_debug("initialize header\n");
path[i].p_hdr = ext_block_hdr(path[i].p_bh);
}
if (!path[i].p_idx) {
/* this level hasn't been touched yet */
path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr);
path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1;
ext_debug("init index ptr: hdr 0x%p, num %d\n",
path[i].p_hdr,
le16_to_cpu(path[i].p_hdr->eh_entries));
} else {
/* we were already here, see at next index */
path[i].p_idx--;
}
ext_debug("level %d - index, first 0x%p, cur 0x%p\n",
i, EXT_FIRST_INDEX(path[i].p_hdr),
path[i].p_idx);
if (ext4_ext_more_to_rm(path + i)) {
struct buffer_head *bh;
/* go to the next level */
ext_debug("move to level %d (block %llu)\n",
i + 1, ext4_idx_pblock(path[i].p_idx));
memset(path + i + 1, 0, sizeof(*path));
bh = read_extent_tree_block(inode,
ext4_idx_pblock(path[i].p_idx), depth - i - 1,
EXT4_EX_NOCACHE);
if (IS_ERR(bh)) {
/* should we reset i_size? */
err = PTR_ERR(bh);
break;
}
/* Yield here to deal with large extent trees.
* Should be a no-op if we did IO above. */
cond_resched();
if (WARN_ON(i + 1 > depth)) {
err = -EIO;
break;
}
path[i + 1].p_bh = bh;
/* save actual number of indexes since this
* number is changed at the next iteration */
path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries);
i++;
} else {
/* we finished processing this index, go up */
if (path[i].p_hdr->eh_entries == 0 && i > 0) {
/* index is empty, remove it;
* handle must be already prepared by the
* truncatei_leaf() */
err = ext4_ext_rm_idx(handle, inode, path, i);
}
/* root level has p_bh == NULL, brelse() eats this */
brelse(path[i].p_bh);
path[i].p_bh = NULL;
i--;
ext_debug("return to level %d\n", i);
}
}
trace_ext4_ext_remove_space_done(inode, start, end, depth,
partial_cluster, path->p_hdr->eh_entries);
/*
* If we still have something in the partial cluster and we have removed
* even the first extent, then we should free the blocks in the partial
* cluster as well. (This code will only run when there are no leaves
* to the immediate left of the truncated/punched region.)
*/
if (partial_cluster > 0 && err == 0) {
/* don't zero partial_cluster since it's not used afterwards */
ext4_free_blocks(handle, inode, NULL,
EXT4_C2B(sbi, partial_cluster),
sbi->s_cluster_ratio,
get_default_free_blocks_flags(inode));
}
/* TODO: flexible tree reduction should be here */
if (path->p_hdr->eh_entries == 0) {
/*
* truncate to zero freed all the tree,
* so we need to correct eh_depth
*/
err = ext4_ext_get_access(handle, inode, path);
if (err == 0) {
ext_inode_hdr(inode)->eh_depth = 0;
ext_inode_hdr(inode)->eh_max =
cpu_to_le16(ext4_ext_space_root(inode, 0));
err = ext4_ext_dirty(handle, inode, path);
}
}
out:
ext4_ext_drop_refs(path);
kfree(path);
path = NULL;
if (err == -EAGAIN)
goto again;
ext4_journal_stop(handle);
return err;
}
/*
* called at mount time
*/
void ext4_ext_init(struct super_block *sb)
{
/*
* possible initialization would be here
*/
if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) {
#if defined(AGGRESSIVE_TEST) || defined(CHECK_BINSEARCH) || defined(EXTENTS_STATS)
printk(KERN_INFO "EXT4-fs: file extents enabled"
#ifdef AGGRESSIVE_TEST
", aggressive tests"
#endif
#ifdef CHECK_BINSEARCH
", check binsearch"
#endif
#ifdef EXTENTS_STATS
", stats"
#endif
"\n");
#endif
#ifdef EXTENTS_STATS
spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock);
EXT4_SB(sb)->s_ext_min = 1 << 30;
EXT4_SB(sb)->s_ext_max = 0;
#endif
}
}
/*
* called at umount time
*/
void ext4_ext_release(struct super_block *sb)
{
if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS))
return;
#ifdef EXTENTS_STATS
if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) {
struct ext4_sb_info *sbi = EXT4_SB(sb);
printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n",
sbi->s_ext_blocks, sbi->s_ext_extents,
sbi->s_ext_blocks / sbi->s_ext_extents);
printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n",
sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max);
}
#endif
}
static int ext4_zeroout_es(struct inode *inode, struct ext4_extent *ex)
{
ext4_lblk_t ee_block;
ext4_fsblk_t ee_pblock;
unsigned int ee_len;
ee_block = le32_to_cpu(ex->ee_block);
ee_len = ext4_ext_get_actual_len(ex);
ee_pblock = ext4_ext_pblock(ex);
if (ee_len == 0)
return 0;
return ext4_es_insert_extent(inode, ee_block, ee_len, ee_pblock,
EXTENT_STATUS_WRITTEN);
}
/* FIXME!! we need to try to merge to left or right after zero-out */
static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
{
ext4_fsblk_t ee_pblock;
unsigned int ee_len;
int ret;
ee_len = ext4_ext_get_actual_len(ex);
ee_pblock = ext4_ext_pblock(ex);
ret = sb_issue_zeroout(inode->i_sb, ee_pblock, ee_len, GFP_NOFS);
if (ret > 0)
ret = 0;
return ret;
}
/*
* ext4_split_extent_at() splits an extent at given block.
*
* @handle: the journal handle
* @inode: the file inode
* @path: the path to the extent
* @split: the logical block where the extent is splitted.
* @split_flags: indicates if the extent could be zeroout if split fails, and
* the states(init or unwritten) of new extents.
* @flags: flags used to insert new extent to extent tree.
*
*
* Splits extent [a, b] into two extents [a, @split) and [@split, b], states
* of which are deterimined by split_flag.
*
* There are two cases:
* a> the extent are splitted into two extent.
* b> split is not needed, and just mark the extent.
*
* return 0 on success.
*/
static int ext4_split_extent_at(handle_t *handle,
struct inode *inode,
struct ext4_ext_path **ppath,
ext4_lblk_t split,
int split_flag,
int flags)
{
struct ext4_ext_path *path = *ppath;
ext4_fsblk_t newblock;
ext4_lblk_t ee_block;
struct ext4_extent *ex, newex, orig_ex, zero_ex;
struct ext4_extent *ex2 = NULL;
unsigned int ee_len, depth;
int err = 0;
BUG_ON((split_flag & (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2)) ==
(EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2));
ext_debug("ext4_split_extents_at: inode %lu, logical"
"block %llu\n", inode->i_ino, (unsigned long long)split);
ext4_ext_show_leaf(inode, path);
depth = ext_depth(inode);
ex = path[depth].p_ext;
ee_block = le32_to_cpu(ex->ee_block);
ee_len = ext4_ext_get_actual_len(ex);
newblock = split - ee_block + ext4_ext_pblock(ex);
BUG_ON(split < ee_block || split >= (ee_block + ee_len));
BUG_ON(!ext4_ext_is_unwritten(ex) &&
split_flag & (EXT4_EXT_MAY_ZEROOUT |
EXT4_EXT_MARK_UNWRIT1 |
EXT4_EXT_MARK_UNWRIT2));
err = ext4_ext_get_access(handle, inode, path + depth);
if (err)
goto out;
if (split == ee_block) {
/*
* case b: block @split is the block that the extent begins with
* then we just change the state of the extent, and splitting
* is not needed.
*/
if (split_flag & EXT4_EXT_MARK_UNWRIT2)
ext4_ext_mark_unwritten(ex);
else
ext4_ext_mark_initialized(ex);
if (!(flags & EXT4_GET_BLOCKS_PRE_IO))
ext4_ext_try_to_merge(handle, inode, path, ex);
err = ext4_ext_dirty(handle, inode, path + path->p_depth);
goto out;
}
/* case a */
memcpy(&orig_ex, ex, sizeof(orig_ex));
ex->ee_len = cpu_to_le16(split - ee_block);
if (split_flag & EXT4_EXT_MARK_UNWRIT1)
ext4_ext_mark_unwritten(ex);
/*
* path may lead to new leaf, not to original leaf any more
* after ext4_ext_insert_extent() returns,
*/
err = ext4_ext_dirty(handle, inode, path + depth);
if (err)
goto fix_extent_len;
ex2 = &newex;
ex2->ee_block = cpu_to_le32(split);
ex2->ee_len = cpu_to_le16(ee_len - (split - ee_block));
ext4_ext_store_pblock(ex2, newblock);
if (split_flag & EXT4_EXT_MARK_UNWRIT2)
ext4_ext_mark_unwritten(ex2);
err = ext4_ext_insert_extent(handle, inode, ppath, &newex, flags);
if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) {
if (split_flag & EXT4_EXT_DATA_VALID1) {
err = ext4_ext_zeroout(inode, ex2);
zero_ex.ee_block = ex2->ee_block;
zero_ex.ee_len = cpu_to_le16(
ext4_ext_get_actual_len(ex2));
ext4_ext_store_pblock(&zero_ex,
ext4_ext_pblock(ex2));
} else {
err = ext4_ext_zeroout(inode, ex);
zero_ex.ee_block = ex->ee_block;
zero_ex.ee_len = cpu_to_le16(
ext4_ext_get_actual_len(ex));
ext4_ext_store_pblock(&zero_ex,
ext4_ext_pblock(ex));
}
} else {
err = ext4_ext_zeroout(inode, &orig_ex);
zero_ex.ee_block = orig_ex.ee_block;
zero_ex.ee_len = cpu_to_le16(
ext4_ext_get_actual_len(&orig_ex));
ext4_ext_store_pblock(&zero_ex,
ext4_ext_pblock(&orig_ex));
}
if (err)
goto fix_extent_len;
/* update the extent length and mark as initialized */
ex->ee_len = cpu_to_le16(ee_len);
ext4_ext_try_to_merge(handle, inode, path, ex);
err = ext4_ext_dirty(handle, inode, path + path->p_depth);
if (err)
goto fix_extent_len;
/* update extent status tree */
err = ext4_zeroout_es(inode, &zero_ex);
goto out;
} else if (err)
goto fix_extent_len;
out:
ext4_ext_show_leaf(inode, path);
return err;
fix_extent_len:
ex->ee_len = orig_ex.ee_len;
ext4_ext_dirty(handle, inode, path + path->p_depth);
return err;
}
/*
* ext4_split_extents() splits an extent and mark extent which is covered
* by @map as split_flags indicates
*
* It may result in splitting the extent into multiple extents (up to three)
* There are three possibilities:
* a> There is no split required
* b> Splits in two extents: Split is happening at either end of the extent
* c> Splits in three extents: Somone is splitting in middle of the extent
*
*/
static int ext4_split_extent(handle_t *handle,
struct inode *inode,
struct ext4_ext_path **ppath,
struct ext4_map_blocks *map,
int split_flag,
int flags)
{
struct ext4_ext_path *path = *ppath;
ext4_lblk_t ee_block;
struct ext4_extent *ex;
unsigned int ee_len, depth;
int err = 0;
int unwritten;
int split_flag1, flags1;
int allocated = map->m_len;
depth = ext_depth(inode);
ex = path[depth].p_ext;
ee_block = le32_to_cpu(ex->ee_block);
ee_len = ext4_ext_get_actual_len(ex);
unwritten = ext4_ext_is_unwritten(ex);
if (map->m_lblk + map->m_len < ee_block + ee_len) {
split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT;
flags1 = flags | EXT4_GET_BLOCKS_PRE_IO;
if (unwritten)
split_flag1 |= EXT4_EXT_MARK_UNWRIT1 |
EXT4_EXT_MARK_UNWRIT2;
if (split_flag & EXT4_EXT_DATA_VALID2)
split_flag1 |= EXT4_EXT_DATA_VALID1;
err = ext4_split_extent_at(handle, inode, ppath,
map->m_lblk + map->m_len, split_flag1, flags1);
if (err)
goto out;
} else {
allocated = ee_len - (map->m_lblk - ee_block);
}
/*
* Update path is required because previous ext4_split_extent_at() may
* result in split of original leaf or extent zeroout.
*/
path = ext4_find_extent(inode, map->m_lblk, ppath, 0);
if (IS_ERR(path))
return PTR_ERR(path);
depth = ext_depth(inode);
ex = path[depth].p_ext;
if (!ex) {
EXT4_ERROR_INODE(inode, "unexpected hole at %lu",
(unsigned long) map->m_lblk);
return -EIO;
}
unwritten = ext4_ext_is_unwritten(ex);
split_flag1 = 0;
if (map->m_lblk >= ee_block) {
split_flag1 = split_flag & EXT4_EXT_DATA_VALID2;
if (unwritten) {
split_flag1 |= EXT4_EXT_MARK_UNWRIT1;
split_flag1 |= split_flag & (EXT4_EXT_MAY_ZEROOUT |
EXT4_EXT_MARK_UNWRIT2);
}
err = ext4_split_extent_at(handle, inode, ppath,
map->m_lblk, split_flag1, flags);
if (err)
goto out;
}
ext4_ext_show_leaf(inode, path);
out:
return err ? err : allocated;
}
/*
* This function is called by ext4_ext_map_blocks() if someone tries to write
* to an unwritten extent. It may result in splitting the unwritten
* extent into multiple extents (up to three - one initialized and two
* unwritten).
* There are three possibilities:
* a> There is no split required: Entire extent should be initialized
* b> Splits in two extents: Write is happening at either end of the extent
* c> Splits in three extents: Somone is writing in middle of the extent
*
* Pre-conditions:
* - The extent pointed to by 'path' is unwritten.
* - The extent pointed to by 'path' contains a superset
* of the logical span [map->m_lblk, map->m_lblk + map->m_len).
*
* Post-conditions on success:
* - the returned value is the number of blocks beyond map->l_lblk
* that are allocated and initialized.
* It is guaranteed to be >= map->m_len.
*/
static int ext4_ext_convert_to_initialized(handle_t *handle,
struct inode *inode,
struct ext4_map_blocks *map,
struct ext4_ext_path **ppath,
int flags)
{
struct ext4_ext_path *path = *ppath;
struct ext4_sb_info *sbi;
struct ext4_extent_header *eh;
struct ext4_map_blocks split_map;
struct ext4_extent zero_ex;
struct ext4_extent *ex, *abut_ex;
ext4_lblk_t ee_block, eof_block;
unsigned int ee_len, depth, map_len = map->m_len;
int allocated = 0, max_zeroout = 0;
int err = 0;
int split_flag = 0;
ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical"
"block %llu, max_blocks %u\n", inode->i_ino,
(unsigned long long)map->m_lblk, map_len);
sbi = EXT4_SB(inode->i_sb);
eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
inode->i_sb->s_blocksize_bits;
if (eof_block < map->m_lblk + map_len)
eof_block = map->m_lblk + map_len;
depth = ext_depth(inode);
eh = path[depth].p_hdr;
ex = path[depth].p_ext;
ee_block = le32_to_cpu(ex->ee_block);
ee_len = ext4_ext_get_actual_len(ex);
zero_ex.ee_len = 0;
trace_ext4_ext_convert_to_initialized_enter(inode, map, ex);
/* Pre-conditions */
BUG_ON(!ext4_ext_is_unwritten(ex));
BUG_ON(!in_range(map->m_lblk, ee_block, ee_len));
/*
* Attempt to transfer newly initialized blocks from the currently
* unwritten extent to its neighbor. This is much cheaper
* than an insertion followed by a merge as those involve costly
* memmove() calls. Transferring to the left is the common case in
* steady state for workloads doing fallocate(FALLOC_FL_KEEP_SIZE)
* followed by append writes.
*
* Limitations of the current logic:
* - L1: we do not deal with writes covering the whole extent.
* This would require removing the extent if the transfer
* is possible.
* - L2: we only attempt to merge with an extent stored in the
* same extent tree node.
*/
if ((map->m_lblk == ee_block) &&
/* See if we can merge left */
(map_len < ee_len) && /*L1*/
(ex > EXT_FIRST_EXTENT(eh))) { /*L2*/
ext4_lblk_t prev_lblk;
ext4_fsblk_t prev_pblk, ee_pblk;
unsigned int prev_len;
abut_ex = ex - 1;
prev_lblk = le32_to_cpu(abut_ex->ee_block);
prev_len = ext4_ext_get_actual_len(abut_ex);
prev_pblk = ext4_ext_pblock(abut_ex);
ee_pblk = ext4_ext_pblock(ex);
/*
* A transfer of blocks from 'ex' to 'abut_ex' is allowed
* upon those conditions:
* - C1: abut_ex is initialized,
* - C2: abut_ex is logically abutting ex,
* - C3: abut_ex is physically abutting ex,
* - C4: abut_ex can receive the additional blocks without
* overflowing the (initialized) length limit.
*/
if ((!ext4_ext_is_unwritten(abut_ex)) && /*C1*/
((prev_lblk + prev_len) == ee_block) && /*C2*/
((prev_pblk + prev_len) == ee_pblk) && /*C3*/
(prev_len < (EXT_INIT_MAX_LEN - map_len))) { /*C4*/
err = ext4_ext_get_access(handle, inode, path + depth);
if (err)
goto out;
trace_ext4_ext_convert_to_initialized_fastpath(inode,
map, ex, abut_ex);
/* Shift the start of ex by 'map_len' blocks */
ex->ee_block = cpu_to_le32(ee_block + map_len);
ext4_ext_store_pblock(ex, ee_pblk + map_len);
ex->ee_len = cpu_to_le16(ee_len - map_len);
ext4_ext_mark_unwritten(ex); /* Restore the flag */
/* Extend abut_ex by 'map_len' blocks */
abut_ex->ee_len = cpu_to_le16(prev_len + map_len);
/* Result: number of initialized blocks past m_lblk */
allocated = map_len;
}
} else if (((map->m_lblk + map_len) == (ee_block + ee_len)) &&
(map_len < ee_len) && /*L1*/
ex < EXT_LAST_EXTENT(eh)) { /*L2*/
/* See if we can merge right */
ext4_lblk_t next_lblk;
ext4_fsblk_t next_pblk, ee_pblk;
unsigned int next_len;
abut_ex = ex + 1;
next_lblk = le32_to_cpu(abut_ex->ee_block);
next_len = ext4_ext_get_actual_len(abut_ex);
next_pblk = ext4_ext_pblock(abut_ex);
ee_pblk = ext4_ext_pblock(ex);
/*
* A transfer of blocks from 'ex' to 'abut_ex' is allowed
* upon those conditions:
* - C1: abut_ex is initialized,
* - C2: abut_ex is logically abutting ex,
* - C3: abut_ex is physically abutting ex,
* - C4: abut_ex can receive the additional blocks without
* overflowing the (initialized) length limit.
*/
if ((!ext4_ext_is_unwritten(abut_ex)) && /*C1*/
((map->m_lblk + map_len) == next_lblk) && /*C2*/
((ee_pblk + ee_len) == next_pblk) && /*C3*/
(next_len < (EXT_INIT_MAX_LEN - map_len))) { /*C4*/
err = ext4_ext_get_access(handle, inode, path + depth);
if (err)
goto out;
trace_ext4_ext_convert_to_initialized_fastpath(inode,
map, ex, abut_ex);
/* Shift the start of abut_ex by 'map_len' blocks */
abut_ex->ee_block = cpu_to_le32(next_lblk - map_len);
ext4_ext_store_pblock(abut_ex, next_pblk - map_len);
ex->ee_len = cpu_to_le16(ee_len - map_len);
ext4_ext_mark_unwritten(ex); /* Restore the flag */
/* Extend abut_ex by 'map_len' blocks */
abut_ex->ee_len = cpu_to_le16(next_len + map_len);
/* Result: number of initialized blocks past m_lblk */
allocated = map_len;
}
}
if (allocated) {
/* Mark the block containing both extents as dirty */
ext4_ext_dirty(handle, inode, path + depth);
/* Update path to point to the right extent */
path[depth].p_ext = abut_ex;
goto out;
} else
allocated = ee_len - (map->m_lblk - ee_block);
WARN_ON(map->m_lblk < ee_block);
/*
* It is safe to convert extent to initialized via explicit
* zeroout only if extent is fully inside i_size or new_size.
*/
split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0;
if (EXT4_EXT_MAY_ZEROOUT & split_flag)
max_zeroout = sbi->s_extent_max_zeroout_kb >>
(inode->i_sb->s_blocksize_bits - 10);
/* If extent is less than s_max_zeroout_kb, zeroout directly */
if (max_zeroout && (ee_len <= max_zeroout)) {
err = ext4_ext_zeroout(inode, ex);
if (err)
goto out;
zero_ex.ee_block = ex->ee_block;
zero_ex.ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex));
ext4_ext_store_pblock(&zero_ex, ext4_ext_pblock(ex));
err = ext4_ext_get_access(handle, inode, path + depth);
if (err)
goto out;
ext4_ext_mark_initialized(ex);
ext4_ext_try_to_merge(handle, inode, path, ex);
err = ext4_ext_dirty(handle, inode, path + path->p_depth);
goto out;
}
/*
* four cases:
* 1. split the extent into three extents.
* 2. split the extent into two extents, zeroout the first half.
* 3. split the extent into two extents, zeroout the second half.
* 4. split the extent into two extents with out zeroout.
*/
split_map.m_lblk = map->m_lblk;
split_map.m_len = map->m_len;
if (max_zeroout && (allocated > map->m_len)) {
if (allocated <= max_zeroout) {
/* case 3 */
zero_ex.ee_block =
cpu_to_le32(map->m_lblk);
zero_ex.ee_len = cpu_to_le16(allocated);
ext4_ext_store_pblock(&zero_ex,
ext4_ext_pblock(ex) + map->m_lblk - ee_block);
err = ext4_ext_zeroout(inode, &zero_ex);
if (err)
goto out;
split_map.m_lblk = map->m_lblk;
split_map.m_len = allocated;
} else if (map->m_lblk - ee_block + map->m_len < max_zeroout) {
/* case 2 */
if (map->m_lblk != ee_block) {
zero_ex.ee_block = ex->ee_block;
zero_ex.ee_len = cpu_to_le16(map->m_lblk -
ee_block);
ext4_ext_store_pblock(&zero_ex,
ext4_ext_pblock(ex));
err = ext4_ext_zeroout(inode, &zero_ex);
if (err)
goto out;
}
split_map.m_lblk = ee_block;
split_map.m_len = map->m_lblk - ee_block + map->m_len;
allocated = map->m_len;
}
}
err = ext4_split_extent(handle, inode, ppath, &split_map, split_flag,
flags);
if (err > 0)
err = 0;
out:
/* If we have gotten a failure, don't zero out status tree */
if (!err)
err = ext4_zeroout_es(inode, &zero_ex);
return err ? err : allocated;
}
/*
* This function is called by ext4_ext_map_blocks() from
* ext4_get_blocks_dio_write() when DIO to write
* to an unwritten extent.
*
* Writing to an unwritten extent may result in splitting the unwritten
* extent into multiple initialized/unwritten extents (up to three)
* There are three possibilities:
* a> There is no split required: Entire extent should be unwritten
* b> Splits in two extents: Write is happening at either end of the extent
* c> Splits in three extents: Somone is writing in middle of the extent
*
* This works the same way in the case of initialized -> unwritten conversion.
*
* One of more index blocks maybe needed if the extent tree grow after
* the unwritten extent split. To prevent ENOSPC occur at the IO
* complete, we need to split the unwritten extent before DIO submit
* the IO. The unwritten extent called at this time will be split
* into three unwritten extent(at most). After IO complete, the part
* being filled will be convert to initialized by the end_io callback function
* via ext4_convert_unwritten_extents().
*
* Returns the size of unwritten extent to be written on success.
*/
static int ext4_split_convert_extents(handle_t *handle,
struct inode *inode,
struct ext4_map_blocks *map,
struct ext4_ext_path **ppath,
int flags)
{
struct ext4_ext_path *path = *ppath;
ext4_lblk_t eof_block;
ext4_lblk_t ee_block;
struct ext4_extent *ex;
unsigned int ee_len;
int split_flag = 0, depth;
ext_debug("%s: inode %lu, logical block %llu, max_blocks %u\n",
__func__, inode->i_ino,
(unsigned long long)map->m_lblk, map->m_len);
eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
inode->i_sb->s_blocksize_bits;
if (eof_block < map->m_lblk + map->m_len)
eof_block = map->m_lblk + map->m_len;
/*
* It is safe to convert extent to initialized via explicit
* zeroout only if extent is fully insde i_size or new_size.
*/
depth = ext_depth(inode);
ex = path[depth].p_ext;
ee_block = le32_to_cpu(ex->ee_block);
ee_len = ext4_ext_get_actual_len(ex);
/* Convert to unwritten */
if (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN) {
split_flag |= EXT4_EXT_DATA_VALID1;
/* Convert to initialized */
} else if (flags & EXT4_GET_BLOCKS_CONVERT) {
split_flag |= ee_block + ee_len <= eof_block ?
EXT4_EXT_MAY_ZEROOUT : 0;
split_flag |= (EXT4_EXT_MARK_UNWRIT2 | EXT4_EXT_DATA_VALID2);
}
flags |= EXT4_GET_BLOCKS_PRE_IO;
return ext4_split_extent(handle, inode, ppath, map, split_flag, flags);
}
static int ext4_convert_unwritten_extents_endio(handle_t *handle,
struct inode *inode,
struct ext4_map_blocks *map,
struct ext4_ext_path **ppath)
{
struct ext4_ext_path *path = *ppath;
struct ext4_extent *ex;
ext4_lblk_t ee_block;
unsigned int ee_len;
int depth;
int err = 0;
depth = ext_depth(inode);
ex = path[depth].p_ext;
ee_block = le32_to_cpu(ex->ee_block);
ee_len = ext4_ext_get_actual_len(ex);
ext_debug("ext4_convert_unwritten_extents_endio: inode %lu, logical"
"block %llu, max_blocks %u\n", inode->i_ino,
(unsigned long long)ee_block, ee_len);
/* If extent is larger than requested it is a clear sign that we still
* have some extent state machine issues left. So extent_split is still
* required.
* TODO: Once all related issues will be fixed this situation should be
* illegal.
*/
if (ee_block != map->m_lblk || ee_len > map->m_len) {
#ifdef EXT4_DEBUG
ext4_warning("Inode (%ld) finished: extent logical block %llu,"
" len %u; IO logical block %llu, len %u\n",
inode->i_ino, (unsigned long long)ee_block, ee_len,
(unsigned long long)map->m_lblk, map->m_len);
#endif
err = ext4_split_convert_extents(handle, inode, map, ppath,
EXT4_GET_BLOCKS_CONVERT);
if (err < 0)
return err;
path = ext4_find_extent(inode, map->m_lblk, ppath, 0);
if (IS_ERR(path))
return PTR_ERR(path);
depth = ext_depth(inode);
ex = path[depth].p_ext;
}
err = ext4_ext_get_access(handle, inode, path + depth);
if (err)
goto out;
/* first mark the extent as initialized */
ext4_ext_mark_initialized(ex);
/* note: ext4_ext_correct_indexes() isn't needed here because
* borders are not changed
*/
ext4_ext_try_to_merge(handle, inode, path, ex);
/* Mark modified extent as dirty */
err = ext4_ext_dirty(handle, inode, path + path->p_depth);
out:
ext4_ext_show_leaf(inode, path);
return err;
}
static void unmap_underlying_metadata_blocks(struct block_device *bdev,
sector_t block, int count)
{
int i;
for (i = 0; i < count; i++)
unmap_underlying_metadata(bdev, block + i);
}
/*
* Handle EOFBLOCKS_FL flag, clearing it if necessary
*/
static int check_eofblocks_fl(handle_t *handle, struct inode *inode,
ext4_lblk_t lblk,
struct ext4_ext_path *path,
unsigned int len)
{
int i, depth;
struct ext4_extent_header *eh;
struct ext4_extent *last_ex;
if (!ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS))
return 0;
depth = ext_depth(inode);
eh = path[depth].p_hdr;
/*
* We're going to remove EOFBLOCKS_FL entirely in future so we
* do not care for this case anymore. Simply remove the flag
* if there are no extents.
*/
if (unlikely(!eh->eh_entries))
goto out;
last_ex = EXT_LAST_EXTENT(eh);
/*
* We should clear the EOFBLOCKS_FL flag if we are writing the
* last block in the last extent in the file. We test this by
* first checking to see if the caller to
* ext4_ext_get_blocks() was interested in the last block (or
* a block beyond the last block) in the current extent. If
* this turns out to be false, we can bail out from this
* function immediately.
*/
if (lblk + len < le32_to_cpu(last_ex->ee_block) +
ext4_ext_get_actual_len(last_ex))
return 0;
/*
* If the caller does appear to be planning to write at or
* beyond the end of the current extent, we then test to see
* if the current extent is the last extent in the file, by
* checking to make sure it was reached via the rightmost node
* at each level of the tree.
*/
for (i = depth-1; i >= 0; i--)
if (path[i].p_idx != EXT_LAST_INDEX(path[i].p_hdr))
return 0;
out:
ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
return ext4_mark_inode_dirty(handle, inode);
}
/**
* ext4_find_delalloc_range: find delayed allocated block in the given range.
*
* Return 1 if there is a delalloc block in the range, otherwise 0.
*/
int ext4_find_delalloc_range(struct inode *inode,
ext4_lblk_t lblk_start,
ext4_lblk_t lblk_end)
{
struct extent_status es;
ext4_es_find_delayed_extent_range(inode, lblk_start, lblk_end, &es);
if (es.es_len == 0)
return 0; /* there is no delay extent in this tree */
else if (es.es_lblk <= lblk_start &&
lblk_start < es.es_lblk + es.es_len)
return 1;
else if (lblk_start <= es.es_lblk && es.es_lblk <= lblk_end)
return 1;
else
return 0;
}
int ext4_find_delalloc_cluster(struct inode *inode, ext4_lblk_t lblk)
{
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
ext4_lblk_t lblk_start, lblk_end;
lblk_start = EXT4_LBLK_CMASK(sbi, lblk);
lblk_end = lblk_start + sbi->s_cluster_ratio - 1;
return ext4_find_delalloc_range(inode, lblk_start, lblk_end);
}
/**
* Determines how many complete clusters (out of those specified by the 'map')
* are under delalloc and were reserved quota for.
* This function is called when we are writing out the blocks that were
* originally written with their allocation delayed, but then the space was
* allocated using fallocate() before the delayed allocation could be resolved.
* The cases to look for are:
* ('=' indicated delayed allocated blocks
* '-' indicates non-delayed allocated blocks)
* (a) partial clusters towards beginning and/or end outside of allocated range
* are not delalloc'ed.
* Ex:
* |----c---=|====c====|====c====|===-c----|
* |++++++ allocated ++++++|
* ==> 4 complete clusters in above example
*
* (b) partial cluster (outside of allocated range) towards either end is
* marked for delayed allocation. In this case, we will exclude that
* cluster.
* Ex:
* |----====c========|========c========|
* |++++++ allocated ++++++|
* ==> 1 complete clusters in above example
*
* Ex:
* |================c================|
* |++++++ allocated ++++++|
* ==> 0 complete clusters in above example
*
* The ext4_da_update_reserve_space will be called only if we
* determine here that there were some "entire" clusters that span
* this 'allocated' range.
* In the non-bigalloc case, this function will just end up returning num_blks
* without ever calling ext4_find_delalloc_range.
*/
static unsigned int
get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start,
unsigned int num_blks)
{
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
ext4_lblk_t alloc_cluster_start, alloc_cluster_end;
ext4_lblk_t lblk_from, lblk_to, c_offset;
unsigned int allocated_clusters = 0;
alloc_cluster_start = EXT4_B2C(sbi, lblk_start);
alloc_cluster_end = EXT4_B2C(sbi, lblk_start + num_blks - 1);
/* max possible clusters for this allocation */
allocated_clusters = alloc_cluster_end - alloc_cluster_start + 1;
trace_ext4_get_reserved_cluster_alloc(inode, lblk_start, num_blks);
/* Check towards left side */
c_offset = EXT4_LBLK_COFF(sbi, lblk_start);
if (c_offset) {
lblk_from = EXT4_LBLK_CMASK(sbi, lblk_start);
lblk_to = lblk_from + c_offset - 1;
if (ext4_find_delalloc_range(inode, lblk_from, lblk_to))
allocated_clusters--;
}
/* Now check towards right. */
c_offset = EXT4_LBLK_COFF(sbi, lblk_start + num_blks);
if (allocated_clusters && c_offset) {
lblk_from = lblk_start + num_blks;
lblk_to = lblk_from + (sbi->s_cluster_ratio - c_offset) - 1;
if (ext4_find_delalloc_range(inode, lblk_from, lblk_to))
allocated_clusters--;
}
return allocated_clusters;
}
static int
convert_initialized_extent(handle_t *handle, struct inode *inode,
struct ext4_map_blocks *map,
struct ext4_ext_path **ppath, int flags,
unsigned int allocated, ext4_fsblk_t newblock)
{
struct ext4_ext_path *path = *ppath;
struct ext4_extent *ex;
ext4_lblk_t ee_block;
unsigned int ee_len;
int depth;
int err = 0;
/*
* Make sure that the extent is no bigger than we support with
* unwritten extent
*/
if (map->m_len > EXT_UNWRITTEN_MAX_LEN)
map->m_len = EXT_UNWRITTEN_MAX_LEN / 2;
depth = ext_depth(inode);
ex = path[depth].p_ext;
ee_block = le32_to_cpu(ex->ee_block);
ee_len = ext4_ext_get_actual_len(ex);
ext_debug("%s: inode %lu, logical"
"block %llu, max_blocks %u\n", __func__, inode->i_ino,
(unsigned long long)ee_block, ee_len);
if (ee_block != map->m_lblk || ee_len > map->m_len) {
err = ext4_split_convert_extents(handle, inode, map, ppath,
EXT4_GET_BLOCKS_CONVERT_UNWRITTEN);
if (err < 0)
return err;
path = ext4_find_extent(inode, map->m_lblk, ppath, 0);
if (IS_ERR(path))
return PTR_ERR(path);
depth = ext_depth(inode);
ex = path[depth].p_ext;
if (!ex) {
EXT4_ERROR_INODE(inode, "unexpected hole at %lu",
(unsigned long) map->m_lblk);
return -EIO;
}
}
err = ext4_ext_get_access(handle, inode, path + depth);
if (err)
return err;
/* first mark the extent as unwritten */
ext4_ext_mark_unwritten(ex);
/* note: ext4_ext_correct_indexes() isn't needed here because
* borders are not changed
*/
ext4_ext_try_to_merge(handle, inode, path, ex);
/* Mark modified extent as dirty */
err = ext4_ext_dirty(handle, inode, path + path->p_depth);
if (err)
return err;
ext4_ext_show_leaf(inode, path);
ext4_update_inode_fsync_trans(handle, inode, 1);
err = check_eofblocks_fl(handle, inode, map->m_lblk, path, map->m_len);
if (err)
return err;
map->m_flags |= EXT4_MAP_UNWRITTEN;
if (allocated > map->m_len)
allocated = map->m_len;
map->m_len = allocated;
return allocated;
}
static int
ext4_ext_handle_unwritten_extents(handle_t *handle, struct inode *inode,
struct ext4_map_blocks *map,
struct ext4_ext_path **ppath, int flags,
unsigned int allocated, ext4_fsblk_t newblock)
{
struct ext4_ext_path *path = *ppath;
int ret = 0;
int err = 0;
ext4_io_end_t *io = ext4_inode_aio(inode);
ext_debug("ext4_ext_handle_unwritten_extents: inode %lu, logical "
"block %llu, max_blocks %u, flags %x, allocated %u\n",
inode->i_ino, (unsigned long long)map->m_lblk, map->m_len,
flags, allocated);
ext4_ext_show_leaf(inode, path);
/*
* When writing into unwritten space, we should not fail to
* allocate metadata blocks for the new extent block if needed.
*/
flags |= EXT4_GET_BLOCKS_METADATA_NOFAIL;
trace_ext4_ext_handle_unwritten_extents(inode, map, flags,
allocated, newblock);
/* get_block() before submit the IO, split the extent */
if (flags & EXT4_GET_BLOCKS_PRE_IO) {
ret = ext4_split_convert_extents(handle, inode, map, ppath,
flags | EXT4_GET_BLOCKS_CONVERT);
if (ret <= 0)
goto out;
/*
* Flag the inode(non aio case) or end_io struct (aio case)
* that this IO needs to conversion to written when IO is
* completed
*/
if (io)
ext4_set_io_unwritten_flag(inode, io);
else
ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
map->m_flags |= EXT4_MAP_UNWRITTEN;
goto out;
}
/* IO end_io complete, convert the filled extent to written */
if (flags & EXT4_GET_BLOCKS_CONVERT) {
ret = ext4_convert_unwritten_extents_endio(handle, inode, map,
ppath);
if (ret >= 0) {
ext4_update_inode_fsync_trans(handle, inode, 1);
err = check_eofblocks_fl(handle, inode, map->m_lblk,
path, map->m_len);
} else
err = ret;
map->m_flags |= EXT4_MAP_MAPPED;
map->m_pblk = newblock;
if (allocated > map->m_len)
allocated = map->m_len;
map->m_len = allocated;
goto out2;
}
/* buffered IO case */
/*
* repeat fallocate creation request
* we already have an unwritten extent
*/
if (flags & EXT4_GET_BLOCKS_UNWRIT_EXT) {
map->m_flags |= EXT4_MAP_UNWRITTEN;
goto map_out;
}
/* buffered READ or buffered write_begin() lookup */
if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
/*
* We have blocks reserved already. We
* return allocated blocks so that delalloc
* won't do block reservation for us. But
* the buffer head will be unmapped so that
* a read from the block returns 0s.
*/
map->m_flags |= EXT4_MAP_UNWRITTEN;
goto out1;
}
/* buffered write, writepage time, convert*/
ret = ext4_ext_convert_to_initialized(handle, inode, map, ppath, flags);
if (ret >= 0)
ext4_update_inode_fsync_trans(handle, inode, 1);
out:
if (ret <= 0) {
err = ret;
goto out2;
} else
allocated = ret;
map->m_flags |= EXT4_MAP_NEW;
/*
* if we allocated more blocks than requested
* we need to make sure we unmap the extra block
* allocated. The actual needed block will get
* unmapped later when we find the buffer_head marked
* new.
*/
if (allocated > map->m_len) {
unmap_underlying_metadata_blocks(inode->i_sb->s_bdev,
newblock + map->m_len,
allocated - map->m_len);
allocated = map->m_len;
}
map->m_len = allocated;
/*
* If we have done fallocate with the offset that is already
* delayed allocated, we would have block reservation
* and quota reservation done in the delayed write path.
* But fallocate would have already updated quota and block
* count for this offset. So cancel these reservation
*/
if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
unsigned int reserved_clusters;
reserved_clusters = get_reserved_cluster_alloc(inode,
map->m_lblk, map->m_len);
if (reserved_clusters)
ext4_da_update_reserve_space(inode,
reserved_clusters,
0);
}
map_out:
map->m_flags |= EXT4_MAP_MAPPED;
if ((flags & EXT4_GET_BLOCKS_KEEP_SIZE) == 0) {
err = check_eofblocks_fl(handle, inode, map->m_lblk, path,
map->m_len);
if (err < 0)
goto out2;
}
out1:
if (allocated > map->m_len)
allocated = map->m_len;
ext4_ext_show_leaf(inode, path);
map->m_pblk = newblock;
map->m_len = allocated;
out2:
return err ? err : allocated;
}
/*
* get_implied_cluster_alloc - check to see if the requested
* allocation (in the map structure) overlaps with a cluster already
* allocated in an extent.
* @sb The filesystem superblock structure
* @map The requested lblk->pblk mapping
* @ex The extent structure which might contain an implied
* cluster allocation
*
* This function is called by ext4_ext_map_blocks() after we failed to
* find blocks that were already in the inode's extent tree. Hence,
* we know that the beginning of the requested region cannot overlap
* the extent from the inode's extent tree. There are three cases we
* want to catch. The first is this case:
*
* |--- cluster # N--|
* |--- extent ---| |---- requested region ---|
* |==========|
*
* The second case that we need to test for is this one:
*
* |--------- cluster # N ----------------|
* |--- requested region --| |------- extent ----|
* |=======================|
*
* The third case is when the requested region lies between two extents
* within the same cluster:
* |------------- cluster # N-------------|
* |----- ex -----| |---- ex_right ----|
* |------ requested region ------|
* |================|
*
* In each of the above cases, we need to set the map->m_pblk and
* map->m_len so it corresponds to the return the extent labelled as
* "|====|" from cluster #N, since it is already in use for data in
* cluster EXT4_B2C(sbi, map->m_lblk). We will then return 1 to
* signal to ext4_ext_map_blocks() that map->m_pblk should be treated
* as a new "allocated" block region. Otherwise, we will return 0 and
* ext4_ext_map_blocks() will then allocate one or more new clusters
* by calling ext4_mb_new_blocks().
*/
static int get_implied_cluster_alloc(struct super_block *sb,
struct ext4_map_blocks *map,
struct ext4_extent *ex,
struct ext4_ext_path *path)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
ext4_lblk_t c_offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
ext4_lblk_t ex_cluster_start, ex_cluster_end;
ext4_lblk_t rr_cluster_start;
ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
ext4_fsblk_t ee_start = ext4_ext_pblock(ex);
unsigned short ee_len = ext4_ext_get_actual_len(ex);
/* The extent passed in that we are trying to match */
ex_cluster_start = EXT4_B2C(sbi, ee_block);
ex_cluster_end = EXT4_B2C(sbi, ee_block + ee_len - 1);
/* The requested region passed into ext4_map_blocks() */
rr_cluster_start = EXT4_B2C(sbi, map->m_lblk);
if ((rr_cluster_start == ex_cluster_end) ||
(rr_cluster_start == ex_cluster_start)) {
if (rr_cluster_start == ex_cluster_end)
ee_start += ee_len - 1;
map->m_pblk = EXT4_PBLK_CMASK(sbi, ee_start) + c_offset;
map->m_len = min(map->m_len,
(unsigned) sbi->s_cluster_ratio - c_offset);
/*
* Check for and handle this case:
*
* |--------- cluster # N-------------|
* |------- extent ----|
* |--- requested region ---|
* |===========|
*/
if (map->m_lblk < ee_block)
map->m_len = min(map->m_len, ee_block - map->m_lblk);
/*
* Check for the case where there is already another allocated
* block to the right of 'ex' but before the end of the cluster.
*
* |------------- cluster # N-------------|
* |----- ex -----| |---- ex_right ----|
* |------ requested region ------|
* |================|
*/
if (map->m_lblk > ee_block) {
ext4_lblk_t next = ext4_ext_next_allocated_block(path);
map->m_len = min(map->m_len, next - map->m_lblk);
}
trace_ext4_get_implied_cluster_alloc_exit(sb, map, 1);
return 1;
}
trace_ext4_get_implied_cluster_alloc_exit(sb, map, 0);
return 0;
}
/*
* Block allocation/map/preallocation routine for extents based files
*
*
* Need to be called with
* down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block
* (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem)
*
* return > 0, number of of blocks already mapped/allocated
* if create == 0 and these are pre-allocated blocks
* buffer head is unmapped
* otherwise blocks are mapped
*
* return = 0, if plain look up failed (blocks have not been allocated)
* buffer head is unmapped
*
* return < 0, error case.
*/
int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
struct ext4_map_blocks *map, int flags)
{
struct ext4_ext_path *path = NULL;
struct ext4_extent newex, *ex, *ex2;
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
ext4_fsblk_t newblock = 0;
int free_on_err = 0, err = 0, depth, ret;
unsigned int allocated = 0, offset = 0;
unsigned int allocated_clusters = 0;
struct ext4_allocation_request ar;
ext4_io_end_t *io = ext4_inode_aio(inode);
ext4_lblk_t cluster_offset;
int set_unwritten = 0;
bool map_from_cluster = false;
ext_debug("blocks %u/%u requested for inode %lu\n",
map->m_lblk, map->m_len, inode->i_ino);
trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
/* find extent for this block */
path = ext4_find_extent(inode, map->m_lblk, NULL, 0);
if (IS_ERR(path)) {
err = PTR_ERR(path);
path = NULL;
goto out2;
}
depth = ext_depth(inode);
/*
* consistent leaf must not be empty;
* this situation is possible, though, _during_ tree modification;
* this is why assert can't be put in ext4_find_extent()
*/
if (unlikely(path[depth].p_ext == NULL && depth != 0)) {
EXT4_ERROR_INODE(inode, "bad extent address "
"lblock: %lu, depth: %d pblock %lld",
(unsigned long) map->m_lblk, depth,
path[depth].p_block);
err = -EIO;
goto out2;
}
ex = path[depth].p_ext;
if (ex) {
ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
ext4_fsblk_t ee_start = ext4_ext_pblock(ex);
unsigned short ee_len;
/*
* unwritten extents are treated as holes, except that
* we split out initialized portions during a write.
*/
ee_len = ext4_ext_get_actual_len(ex);
trace_ext4_ext_show_extent(inode, ee_block, ee_start, ee_len);
/* if found extent covers block, simply return it */
if (in_range(map->m_lblk, ee_block, ee_len)) {
newblock = map->m_lblk - ee_block + ee_start;
/* number of remaining blocks in the extent */
allocated = ee_len - (map->m_lblk - ee_block);
ext_debug("%u fit into %u:%d -> %llu\n", map->m_lblk,
ee_block, ee_len, newblock);
/*
* If the extent is initialized check whether the
* caller wants to convert it to unwritten.
*/
if ((!ext4_ext_is_unwritten(ex)) &&
(flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN)) {
allocated = convert_initialized_extent(
handle, inode, map, &path,
flags, allocated, newblock);
goto out2;
} else if (!ext4_ext_is_unwritten(ex))
goto out;
ret = ext4_ext_handle_unwritten_extents(
handle, inode, map, &path, flags,
allocated, newblock);
if (ret < 0)
err = ret;
else
allocated = ret;
goto out2;
}
}
/*
* requested block isn't allocated yet;
* we couldn't try to create block if create flag is zero
*/
if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
/*
* put just found gap into cache to speed up
* subsequent requests
*/
ext4_ext_put_gap_in_cache(inode, path, map->m_lblk);
goto out2;
}
/*
* Okay, we need to do block allocation.
*/
newex.ee_block = cpu_to_le32(map->m_lblk);
cluster_offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
/*
* If we are doing bigalloc, check to see if the extent returned
* by ext4_find_extent() implies a cluster we can use.
*/
if (cluster_offset && ex &&
get_implied_cluster_alloc(inode->i_sb, map, ex, path)) {
ar.len = allocated = map->m_len;
newblock = map->m_pblk;
map_from_cluster = true;
goto got_allocated_blocks;
}
/* find neighbour allocated blocks */
ar.lleft = map->m_lblk;
err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft);
if (err)
goto out2;
ar.lright = map->m_lblk;
ex2 = NULL;
err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright, &ex2);
if (err)
goto out2;
/* Check if the extent after searching to the right implies a
* cluster we can use. */
if ((sbi->s_cluster_ratio > 1) && ex2 &&
get_implied_cluster_alloc(inode->i_sb, map, ex2, path)) {
ar.len = allocated = map->m_len;
newblock = map->m_pblk;
map_from_cluster = true;
goto got_allocated_blocks;
}
/*
* See if request is beyond maximum number of blocks we can have in
* a single extent. For an initialized extent this limit is
* EXT_INIT_MAX_LEN and for an unwritten extent this limit is
* EXT_UNWRITTEN_MAX_LEN.
*/
if (map->m_len > EXT_INIT_MAX_LEN &&
!(flags & EXT4_GET_BLOCKS_UNWRIT_EXT))
map->m_len = EXT_INIT_MAX_LEN;
else if (map->m_len > EXT_UNWRITTEN_MAX_LEN &&
(flags & EXT4_GET_BLOCKS_UNWRIT_EXT))
map->m_len = EXT_UNWRITTEN_MAX_LEN;
/* Check if we can really insert (m_lblk)::(m_lblk + m_len) extent */
newex.ee_len = cpu_to_le16(map->m_len);
err = ext4_ext_check_overlap(sbi, inode, &newex, path);
if (err)
allocated = ext4_ext_get_actual_len(&newex);
else
allocated = map->m_len;
/* allocate new block */
ar.inode = inode;
ar.goal = ext4_ext_find_goal(inode, path, map->m_lblk);
ar.logical = map->m_lblk;
/*
* We calculate the offset from the beginning of the cluster
* for the logical block number, since when we allocate a
* physical cluster, the physical block should start at the
* same offset from the beginning of the cluster. This is
* needed so that future calls to get_implied_cluster_alloc()
* work correctly.
*/
offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
ar.len = EXT4_NUM_B2C(sbi, offset+allocated);
ar.goal -= offset;
ar.logical -= offset;
if (S_ISREG(inode->i_mode))
ar.flags = EXT4_MB_HINT_DATA;
else
/* disable in-core preallocation for non-regular files */
ar.flags = 0;
if (flags & EXT4_GET_BLOCKS_NO_NORMALIZE)
ar.flags |= EXT4_MB_HINT_NOPREALLOC;
if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
ar.flags |= EXT4_MB_DELALLOC_RESERVED;
newblock = ext4_mb_new_blocks(handle, &ar, &err);
if (!newblock)
goto out2;
ext_debug("allocate new block: goal %llu, found %llu/%u\n",
ar.goal, newblock, allocated);
free_on_err = 1;
allocated_clusters = ar.len;
ar.len = EXT4_C2B(sbi, ar.len) - offset;
if (ar.len > allocated)
ar.len = allocated;
got_allocated_blocks:
/* try to insert new extent into found leaf and return */
ext4_ext_store_pblock(&newex, newblock + offset);
newex.ee_len = cpu_to_le16(ar.len);
/* Mark unwritten */
if (flags & EXT4_GET_BLOCKS_UNWRIT_EXT){
ext4_ext_mark_unwritten(&newex);
map->m_flags |= EXT4_MAP_UNWRITTEN;
/*
* io_end structure was created for every IO write to an
* unwritten extent. To avoid unnecessary conversion,
* here we flag the IO that really needs the conversion.
* For non asycn direct IO case, flag the inode state
* that we need to perform conversion when IO is done.
*/
if (flags & EXT4_GET_BLOCKS_PRE_IO)
set_unwritten = 1;
}
err = 0;
if ((flags & EXT4_GET_BLOCKS_KEEP_SIZE) == 0)
err = check_eofblocks_fl(handle, inode, map->m_lblk,
path, ar.len);
if (!err)
err = ext4_ext_insert_extent(handle, inode, &path,
&newex, flags);
if (!err && set_unwritten) {
if (io)
ext4_set_io_unwritten_flag(inode, io);
else
ext4_set_inode_state(inode,
EXT4_STATE_DIO_UNWRITTEN);
}
if (err && free_on_err) {
int fb_flags = flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE ?
EXT4_FREE_BLOCKS_NO_QUOT_UPDATE : 0;
/* free data blocks we just allocated */
/* not a good idea to call discard here directly,
* but otherwise we'd need to call it every free() */
ext4_discard_preallocations(inode);
ext4_free_blocks(handle, inode, NULL, newblock,
EXT4_C2B(sbi, allocated_clusters), fb_flags);
goto out2;
}
/* previous routine could use block we allocated */
newblock = ext4_ext_pblock(&newex);
allocated = ext4_ext_get_actual_len(&newex);
if (allocated > map->m_len)
allocated = map->m_len;
map->m_flags |= EXT4_MAP_NEW;
/*
* Update reserved blocks/metadata blocks after successful
* block allocation which had been deferred till now.
*/
if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
unsigned int reserved_clusters;
/*
* Check how many clusters we had reserved this allocated range
*/
reserved_clusters = get_reserved_cluster_alloc(inode,
map->m_lblk, allocated);
if (map_from_cluster) {
if (reserved_clusters) {
/*
* We have clusters reserved for this range.
* But since we are not doing actual allocation
* and are simply using blocks from previously
* allocated cluster, we should release the
* reservation and not claim quota.
*/
ext4_da_update_reserve_space(inode,
reserved_clusters, 0);
}
} else {
BUG_ON(allocated_clusters < reserved_clusters);
if (reserved_clusters < allocated_clusters) {
struct ext4_inode_info *ei = EXT4_I(inode);
int reservation = allocated_clusters -
reserved_clusters;
/*
* It seems we claimed few clusters outside of
* the range of this allocation. We should give
* it back to the reservation pool. This can
* happen in the following case:
*
* * Suppose s_cluster_ratio is 4 (i.e., each
* cluster has 4 blocks. Thus, the clusters
* are [0-3],[4-7],[8-11]...
* * First comes delayed allocation write for
* logical blocks 10 & 11. Since there were no
* previous delayed allocated blocks in the
* range [8-11], we would reserve 1 cluster
* for this write.
* * Next comes write for logical blocks 3 to 8.
* In this case, we will reserve 2 clusters
* (for [0-3] and [4-7]; and not for [8-11] as
* that range has a delayed allocated blocks.
* Thus total reserved clusters now becomes 3.
* * Now, during the delayed allocation writeout
* time, we will first write blocks [3-8] and
* allocate 3 clusters for writing these
* blocks. Also, we would claim all these
* three clusters above.
* * Now when we come here to writeout the
* blocks [10-11], we would expect to claim
* the reservation of 1 cluster we had made
* (and we would claim it since there are no
* more delayed allocated blocks in the range
* [8-11]. But our reserved cluster count had
* already gone to 0.
*
* Thus, at the step 4 above when we determine
* that there are still some unwritten delayed
* allocated blocks outside of our current
* block range, we should increment the
* reserved clusters count so that when the
* remaining blocks finally gets written, we
* could claim them.
*/
dquot_reserve_block(inode,
EXT4_C2B(sbi, reservation));
spin_lock(&ei->i_block_reservation_lock);
ei->i_reserved_data_blocks += reservation;
spin_unlock(&ei->i_block_reservation_lock);
}
/*
* We will claim quota for all newly allocated blocks.
* We're updating the reserved space *after* the
* correction above so we do not accidentally free
* all the metadata reservation because we might
* actually need it later on.
*/
ext4_da_update_reserve_space(inode, allocated_clusters,
1);
}
}
/*
* Cache the extent and update transaction to commit on fdatasync only
* when it is _not_ an unwritten extent.
*/
if ((flags & EXT4_GET_BLOCKS_UNWRIT_EXT) == 0)
ext4_update_inode_fsync_trans(handle, inode, 1);
else
ext4_update_inode_fsync_trans(handle, inode, 0);
out:
if (allocated > map->m_len)
allocated = map->m_len;
ext4_ext_show_leaf(inode, path);
map->m_flags |= EXT4_MAP_MAPPED;
map->m_pblk = newblock;
map->m_len = allocated;
out2:
ext4_ext_drop_refs(path);
kfree(path);
trace_ext4_ext_map_blocks_exit(inode, flags, map,
err ? err : allocated);
return err ? err : allocated;
}
void ext4_ext_truncate(handle_t *handle, struct inode *inode)
{
struct super_block *sb = inode->i_sb;
ext4_lblk_t last_block;
int err = 0;
/*
* TODO: optimization is possible here.
* Probably we need not scan at all,
* because page truncation is enough.
*/
/* we have to know where to truncate from in crash case */
EXT4_I(inode)->i_disksize = inode->i_size;
ext4_mark_inode_dirty(handle, inode);
last_block = (inode->i_size + sb->s_blocksize - 1)
>> EXT4_BLOCK_SIZE_BITS(sb);
retry:
err = ext4_es_remove_extent(inode, last_block,
EXT_MAX_BLOCKS - last_block);
if (err == -ENOMEM) {
cond_resched();
congestion_wait(BLK_RW_ASYNC, HZ/50);
goto retry;
}
if (err) {
ext4_std_error(inode->i_sb, err);
return;
}
err = ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCKS - 1);
ext4_std_error(inode->i_sb, err);
}
static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset,
ext4_lblk_t len, loff_t new_size,
int flags, int mode)
{
struct inode *inode = file_inode(file);
handle_t *handle;
int ret = 0;
int ret2 = 0;
int retries = 0;
struct ext4_map_blocks map;
unsigned int credits;
loff_t epos;
map.m_lblk = offset;
map.m_len = len;
/*
* Don't normalize the request if it can fit in one extent so
* that it doesn't get unnecessarily split into multiple
* extents.
*/
if (len <= EXT_UNWRITTEN_MAX_LEN)
flags |= EXT4_GET_BLOCKS_NO_NORMALIZE;
/*
* credits to insert 1 extent into extent tree
*/
credits = ext4_chunk_trans_blocks(inode, len);
retry:
while (ret >= 0 && len) {
handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS,
credits);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
break;
}
ret = ext4_map_blocks(handle, inode, &map, flags);
if (ret <= 0) {
ext4_debug("inode #%lu: block %u: len %u: "
"ext4_ext_map_blocks returned %d",
inode->i_ino, map.m_lblk,
map.m_len, ret);
ext4_mark_inode_dirty(handle, inode);
ret2 = ext4_journal_stop(handle);
break;
}
map.m_lblk += ret;
map.m_len = len = len - ret;
epos = (loff_t)map.m_lblk << inode->i_blkbits;
inode->i_ctime = ext4_current_time(inode);
if (new_size) {
if (epos > new_size)
epos = new_size;
if (ext4_update_inode_size(inode, epos) & 0x1)
inode->i_mtime = inode->i_ctime;
} else {
if (epos > inode->i_size)
ext4_set_inode_flag(inode,
EXT4_INODE_EOFBLOCKS);
}
ext4_mark_inode_dirty(handle, inode);
ret2 = ext4_journal_stop(handle);
if (ret2)
break;
}
if (ret == -ENOSPC &&
ext4_should_retry_alloc(inode->i_sb, &retries)) {
ret = 0;
goto retry;
}
return ret > 0 ? ret2 : ret;
}
static long ext4_zero_range(struct file *file, loff_t offset,
loff_t len, int mode)
{
struct inode *inode = file_inode(file);
handle_t *handle = NULL;
unsigned int max_blocks;
loff_t new_size = 0;
int ret = 0;
int flags;
int credits;
int partial_begin, partial_end;
loff_t start, end;
ext4_lblk_t lblk;
struct address_space *mapping = inode->i_mapping;
unsigned int blkbits = inode->i_blkbits;
trace_ext4_zero_range(inode, offset, len, mode);
if (!S_ISREG(inode->i_mode))
return -EINVAL;
/* Call ext4_force_commit to flush all data in case of data=journal. */
if (ext4_should_journal_data(inode)) {
ret = ext4_force_commit(inode->i_sb);
if (ret)
return ret;
}
/*
* Write out all dirty pages to avoid race conditions
* Then release them.
*/
if (mapping->nrpages && mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
ret = filemap_write_and_wait_range(mapping, offset,
offset + len - 1);
if (ret)
return ret;
}
/*
* Round up offset. This is not fallocate, we neet to zero out
* blocks, so convert interior block aligned part of the range to
* unwritten and possibly manually zero out unaligned parts of the
* range.
*/
start = round_up(offset, 1 << blkbits);
end = round_down((offset + len), 1 << blkbits);
if (start < offset || end > offset + len)
return -EINVAL;
partial_begin = offset & ((1 << blkbits) - 1);
partial_end = (offset + len) & ((1 << blkbits) - 1);
lblk = start >> blkbits;
max_blocks = (end >> blkbits);
if (max_blocks < lblk)
max_blocks = 0;
else
max_blocks -= lblk;
mutex_lock(&inode->i_mutex);
/*
* Indirect files do not support unwritten extnets
*/
if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
ret = -EOPNOTSUPP;
goto out_mutex;
}
if (!(mode & FALLOC_FL_KEEP_SIZE) &&
offset + len > i_size_read(inode)) {
new_size = offset + len;
ret = inode_newsize_ok(inode, new_size);
if (ret)
goto out_mutex;
}
flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT;
if (mode & FALLOC_FL_KEEP_SIZE)
flags |= EXT4_GET_BLOCKS_KEEP_SIZE;
/* Preallocate the range including the unaligned edges */
if (partial_begin || partial_end) {
ret = ext4_alloc_file_blocks(file,
round_down(offset, 1 << blkbits) >> blkbits,
(round_up((offset + len), 1 << blkbits) -
round_down(offset, 1 << blkbits)) >> blkbits,
new_size, flags, mode);
if (ret)
goto out_mutex;
}
/* Zero range excluding the unaligned edges */
if (max_blocks > 0) {
flags |= (EXT4_GET_BLOCKS_CONVERT_UNWRITTEN |
EXT4_EX_NOCACHE);
/* Now release the pages and zero block aligned part of pages*/
truncate_pagecache_range(inode, start, end - 1);
inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
/* Wait all existing dio workers, newcomers will block on i_mutex */
ext4_inode_block_unlocked_dio(inode);
inode_dio_wait(inode);
ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size,
flags, mode);
if (ret)
goto out_dio;
/*
* Remove entire range from the extent status tree.
*
* ext4_es_remove_extent(inode, lblk, max_blocks) is
* NOT sufficient. I'm not sure why this is the case,
* but let's be conservative and remove the extent
* status tree for the entire inode. There should be
* no outstanding delalloc extents thanks to the
* filemap_write_and_wait_range() call above.
*/
ret = ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS);
if (ret)
goto out_dio;
}
if (!partial_begin && !partial_end)
goto out_dio;
/*
* In worst case we have to writeout two nonadjacent unwritten
* blocks and update the inode
*/
credits = (2 * ext4_ext_index_trans_blocks(inode, 2)) + 1;
if (ext4_should_journal_data(inode))
credits += 2;
handle = ext4_journal_start(inode, EXT4_HT_MISC, credits);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
ext4_std_error(inode->i_sb, ret);
goto out_dio;
}
inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
if (new_size) {
ext4_update_inode_size(inode, new_size);
} else {
/*
* Mark that we allocate beyond EOF so the subsequent truncate
* can proceed even if the new size is the same as i_size.
*/
if ((offset + len) > i_size_read(inode))
ext4_set_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
}
ext4_mark_inode_dirty(handle, inode);
/* Zero out partial block at the edges of the range */
ret = ext4_zero_partial_blocks(handle, inode, offset, len);
if (file->f_flags & O_SYNC)
ext4_handle_sync(handle);
ext4_journal_stop(handle);
out_dio:
ext4_inode_resume_unlocked_dio(inode);
out_mutex:
mutex_unlock(&inode->i_mutex);
return ret;
}
/*
* preallocate space for a file. This implements ext4's fallocate file
* operation, which gets called from sys_fallocate system call.
* For block-mapped files, posix_fallocate should fall back to the method
* of writing zeroes to the required new blocks (the same behavior which is
* expected for file systems which do not support fallocate() system call).
*/
long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
{
struct inode *inode = file_inode(file);
loff_t new_size = 0;
unsigned int max_blocks;
int ret = 0;
int flags;
ext4_lblk_t lblk;
unsigned int blkbits = inode->i_blkbits;
/* Return error if mode is not supported */
if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE))
return -EOPNOTSUPP;
if (mode & FALLOC_FL_PUNCH_HOLE)
return ext4_punch_hole(inode, offset, len);
ret = ext4_convert_inline_data(inode);
if (ret)
return ret;
/*
* currently supporting (pre)allocate mode for extent-based
* files _only_
*/
if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
return -EOPNOTSUPP;
if (mode & FALLOC_FL_COLLAPSE_RANGE)
return ext4_collapse_range(inode, offset, len);
if (mode & FALLOC_FL_ZERO_RANGE)
return ext4_zero_range(file, offset, len, mode);
trace_ext4_fallocate_enter(inode, offset, len, mode);
lblk = offset >> blkbits;
/*
* We can't just convert len to max_blocks because
* If blocksize = 4096 offset = 3072 and len = 2048
*/
max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits)
- lblk;
flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT;
if (mode & FALLOC_FL_KEEP_SIZE)
flags |= EXT4_GET_BLOCKS_KEEP_SIZE;
mutex_lock(&inode->i_mutex);
if (!(mode & FALLOC_FL_KEEP_SIZE) &&
offset + len > i_size_read(inode)) {
new_size = offset + len;
ret = inode_newsize_ok(inode, new_size);
if (ret)
goto out;
}
ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size,
flags, mode);
if (ret)
goto out;
if (file->f_flags & O_SYNC && EXT4_SB(inode->i_sb)->s_journal) {
ret = jbd2_complete_transaction(EXT4_SB(inode->i_sb)->s_journal,
EXT4_I(inode)->i_sync_tid);
}
out:
mutex_unlock(&inode->i_mutex);
trace_ext4_fallocate_exit(inode, offset, max_blocks, ret);
return ret;
}
/*
* This function convert a range of blocks to written extents
* The caller of this function will pass the start offset and the size.
* all unwritten extents within this range will be converted to
* written extents.
*
* This function is called from the direct IO end io call back
* function, to convert the fallocated extents after IO is completed.
* Returns 0 on success.
*/
int ext4_convert_unwritten_extents(handle_t *handle, struct inode *inode,
loff_t offset, ssize_t len)
{
unsigned int max_blocks;
int ret = 0;
int ret2 = 0;
struct ext4_map_blocks map;
unsigned int credits, blkbits = inode->i_blkbits;
map.m_lblk = offset >> blkbits;
/*
* We can't just convert len to max_blocks because
* If blocksize = 4096 offset = 3072 and len = 2048
*/
max_blocks = ((EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits) -
map.m_lblk);
/*
* This is somewhat ugly but the idea is clear: When transaction is
* reserved, everything goes into it. Otherwise we rather start several
* smaller transactions for conversion of each extent separately.
*/
if (handle) {
handle = ext4_journal_start_reserved(handle,
EXT4_HT_EXT_CONVERT);
if (IS_ERR(handle))
return PTR_ERR(handle);
credits = 0;
} else {
/*
* credits to insert 1 extent into extent tree
*/
credits = ext4_chunk_trans_blocks(inode, max_blocks);
}
while (ret >= 0 && ret < max_blocks) {
map.m_lblk += ret;
map.m_len = (max_blocks -= ret);
if (credits) {
handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS,
credits);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
break;
}
}
ret = ext4_map_blocks(handle, inode, &map,
EXT4_GET_BLOCKS_IO_CONVERT_EXT);
if (ret <= 0)
ext4_warning(inode->i_sb,
"inode #%lu: block %u: len %u: "
"ext4_ext_map_blocks returned %d",
inode->i_ino, map.m_lblk,
map.m_len, ret);
ext4_mark_inode_dirty(handle, inode);
if (credits)
ret2 = ext4_journal_stop(handle);
if (ret <= 0 || ret2)
break;
}
if (!credits)
ret2 = ext4_journal_stop(handle);
return ret > 0 ? ret2 : ret;
}
/*
* If newes is not existing extent (newes->ec_pblk equals zero) find
* delayed extent at start of newes and update newes accordingly and
* return start of the next delayed extent.
*
* If newes is existing extent (newes->ec_pblk is not equal zero)
* return start of next delayed extent or EXT_MAX_BLOCKS if no delayed
* extent found. Leave newes unmodified.
*/
static int ext4_find_delayed_extent(struct inode *inode,
struct extent_status *newes)
{
struct extent_status es;
ext4_lblk_t block, next_del;
if (newes->es_pblk == 0) {
ext4_es_find_delayed_extent_range(inode, newes->es_lblk,
newes->es_lblk + newes->es_len - 1, &es);
/*
* No extent in extent-tree contains block @newes->es_pblk,
* then the block may stay in 1)a hole or 2)delayed-extent.
*/
if (es.es_len == 0)
/* A hole found. */
return 0;
if (es.es_lblk > newes->es_lblk) {
/* A hole found. */
newes->es_len = min(es.es_lblk - newes->es_lblk,
newes->es_len);
return 0;
}
newes->es_len = es.es_lblk + es.es_len - newes->es_lblk;
}
block = newes->es_lblk + newes->es_len;
ext4_es_find_delayed_extent_range(inode, block, EXT_MAX_BLOCKS, &es);
if (es.es_len == 0)
next_del = EXT_MAX_BLOCKS;
else
next_del = es.es_lblk;
return next_del;
}
/* fiemap flags we can handle specified here */
#define EXT4_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR)
static int ext4_xattr_fiemap(struct inode *inode,
struct fiemap_extent_info *fieinfo)
{
__u64 physical = 0;
__u64 length;
__u32 flags = FIEMAP_EXTENT_LAST;
int blockbits = inode->i_sb->s_blocksize_bits;
int error = 0;
/* in-inode? */
if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
struct ext4_iloc iloc;
int offset; /* offset of xattr in inode */
error = ext4_get_inode_loc(inode, &iloc);
if (error)
return error;
physical = (__u64)iloc.bh->b_blocknr << blockbits;
offset = EXT4_GOOD_OLD_INODE_SIZE +
EXT4_I(inode)->i_extra_isize;
physical += offset;
length = EXT4_SB(inode->i_sb)->s_inode_size - offset;
flags |= FIEMAP_EXTENT_DATA_INLINE;
brelse(iloc.bh);
} else { /* external block */
physical = (__u64)EXT4_I(inode)->i_file_acl << blockbits;
length = inode->i_sb->s_blocksize;
}
if (physical)
error = fiemap_fill_next_extent(fieinfo, 0, physical,
length, flags);
return (error < 0 ? error : 0);
}
int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
__u64 start, __u64 len)
{
ext4_lblk_t start_blk;
int error = 0;
if (ext4_has_inline_data(inode)) {
int has_inline = 1;
error = ext4_inline_data_fiemap(inode, fieinfo, &has_inline,
start, len);
if (has_inline)
return error;
}
if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) {
error = ext4_ext_precache(inode);
if (error)
return error;
}
/* fallback to generic here if not in extents fmt */
if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
return generic_block_fiemap(inode, fieinfo, start, len,
ext4_get_block);
if (fiemap_check_flags(fieinfo, EXT4_FIEMAP_FLAGS))
return -EBADR;
if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
error = ext4_xattr_fiemap(inode, fieinfo);
} else {
ext4_lblk_t len_blks;
__u64 last_blk;
start_blk = start >> inode->i_sb->s_blocksize_bits;
last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits;
if (last_blk >= EXT_MAX_BLOCKS)
last_blk = EXT_MAX_BLOCKS-1;
len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1;
/*
* Walk the extent tree gathering extent information
* and pushing extents back to the user.
*/
error = ext4_fill_fiemap_extents(inode, start_blk,
len_blks, fieinfo);
}
return error;
}
/*
* ext4_access_path:
* Function to access the path buffer for marking it dirty.
* It also checks if there are sufficient credits left in the journal handle
* to update path.
*/
static int
ext4_access_path(handle_t *handle, struct inode *inode,
struct ext4_ext_path *path)
{
int credits, err;
if (!ext4_handle_valid(handle))
return 0;
/*
* Check if need to extend journal credits
* 3 for leaf, sb, and inode plus 2 (bmap and group
* descriptor) for each block group; assume two block
* groups
*/
if (handle->h_buffer_credits < 7) {
credits = ext4_writepage_trans_blocks(inode);
err = ext4_ext_truncate_extend_restart(handle, inode, credits);
/* EAGAIN is success */
if (err && err != -EAGAIN)
return err;
}
err = ext4_ext_get_access(handle, inode, path);
return err;
}
/*
* ext4_ext_shift_path_extents:
* Shift the extents of a path structure lying between path[depth].p_ext
* and EXT_LAST_EXTENT(path[depth].p_hdr) downwards, by subtracting shift
* from starting block for each extent.
*/
static int
ext4_ext_shift_path_extents(struct ext4_ext_path *path, ext4_lblk_t shift,
struct inode *inode, handle_t *handle,
ext4_lblk_t *start)
{
int depth, err = 0;
struct ext4_extent *ex_start, *ex_last;
bool update = 0;
depth = path->p_depth;
while (depth >= 0) {
if (depth == path->p_depth) {
ex_start = path[depth].p_ext;
if (!ex_start)
return -EIO;
ex_last = EXT_LAST_EXTENT(path[depth].p_hdr);
err = ext4_access_path(handle, inode, path + depth);
if (err)
goto out;
if (ex_start == EXT_FIRST_EXTENT(path[depth].p_hdr))
update = 1;
*start = le32_to_cpu(ex_last->ee_block) +
ext4_ext_get_actual_len(ex_last);
while (ex_start <= ex_last) {
le32_add_cpu(&ex_start->ee_block, -shift);
/* Try to merge to the left. */
if ((ex_start >
EXT_FIRST_EXTENT(path[depth].p_hdr)) &&
ext4_ext_try_to_merge_right(inode,
path, ex_start - 1))
ex_last--;
else
ex_start++;
}
err = ext4_ext_dirty(handle, inode, path + depth);
if (err)
goto out;
if (--depth < 0 || !update)
break;
}
/* Update index too */
err = ext4_access_path(handle, inode, path + depth);
if (err)
goto out;
le32_add_cpu(&path[depth].p_idx->ei_block, -shift);
err = ext4_ext_dirty(handle, inode, path + depth);
if (err)
goto out;
/* we are done if current index is not a starting index */
if (path[depth].p_idx != EXT_FIRST_INDEX(path[depth].p_hdr))
break;
depth--;
}
out:
return err;
}
/*
* ext4_ext_shift_extents:
* All the extents which lies in the range from start to the last allocated
* block for the file are shifted downwards by shift blocks.
* On success, 0 is returned, error otherwise.
*/
static int
ext4_ext_shift_extents(struct inode *inode, handle_t *handle,
ext4_lblk_t start, ext4_lblk_t shift)
{
struct ext4_ext_path *path;
int ret = 0, depth;
struct ext4_extent *extent;
ext4_lblk_t stop_block;
ext4_lblk_t ex_start, ex_end;
/* Let path point to the last extent */
path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL, 0);
if (IS_ERR(path))
return PTR_ERR(path);
depth = path->p_depth;
extent = path[depth].p_ext;
if (!extent)
goto out;
stop_block = le32_to_cpu(extent->ee_block) +
ext4_ext_get_actual_len(extent);
/* Nothing to shift, if hole is at the end of file */
if (start >= stop_block)
goto out;
/*
* Don't start shifting extents until we make sure the hole is big
* enough to accomodate the shift.
*/
path = ext4_find_extent(inode, start - 1, &path, 0);
if (IS_ERR(path))
return PTR_ERR(path);
depth = path->p_depth;
extent = path[depth].p_ext;
if (extent) {
ex_start = le32_to_cpu(extent->ee_block);
ex_end = le32_to_cpu(extent->ee_block) +
ext4_ext_get_actual_len(extent);
} else {
ex_start = 0;
ex_end = 0;
}
if ((start == ex_start && shift > ex_start) ||
(shift > start - ex_end))
return -EINVAL;
/* Its safe to start updating extents */
while (start < stop_block) {
path = ext4_find_extent(inode, start, &path, 0);
if (IS_ERR(path))
return PTR_ERR(path);
depth = path->p_depth;
extent = path[depth].p_ext;
if (!extent) {
EXT4_ERROR_INODE(inode, "unexpected hole at %lu",
(unsigned long) start);
return -EIO;
}
if (start > le32_to_cpu(extent->ee_block)) {
/* Hole, move to the next extent */
if (extent < EXT_LAST_EXTENT(path[depth].p_hdr)) {
path[depth].p_ext++;
} else {
start = ext4_ext_next_allocated_block(path);
continue;
}
}
ret = ext4_ext_shift_path_extents(path, shift, inode,
handle, &start);
if (ret)
break;
}
out:
ext4_ext_drop_refs(path);
kfree(path);
return ret;
}
/*
* ext4_collapse_range:
* This implements the fallocate's collapse range functionality for ext4
* Returns: 0 and non-zero on error.
*/
int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
{
struct super_block *sb = inode->i_sb;
ext4_lblk_t punch_start, punch_stop;
handle_t *handle;
unsigned int credits;
loff_t new_size, ioffset;
int ret;
/* Collapse range works only on fs block size aligned offsets. */
if (offset & (EXT4_CLUSTER_SIZE(sb) - 1) ||
len & (EXT4_CLUSTER_SIZE(sb) - 1))
return -EINVAL;
if (!S_ISREG(inode->i_mode))
return -EINVAL;
trace_ext4_collapse_range(inode, offset, len);
punch_start = offset >> EXT4_BLOCK_SIZE_BITS(sb);
punch_stop = (offset + len) >> EXT4_BLOCK_SIZE_BITS(sb);
/* Call ext4_force_commit to flush all data in case of data=journal. */
if (ext4_should_journal_data(inode)) {
ret = ext4_force_commit(inode->i_sb);
if (ret)
return ret;
}
/*
* Need to round down offset to be aligned with page size boundary
* for page size > block size.
*/
ioffset = round_down(offset, PAGE_SIZE);
/* Write out all dirty pages */
ret = filemap_write_and_wait_range(inode->i_mapping, ioffset,
LLONG_MAX);
if (ret)
return ret;
/* Take mutex lock */
mutex_lock(&inode->i_mutex);
/*
* There is no need to overlap collapse range with EOF, in which case
* it is effectively a truncate operation
*/
if (offset + len >= i_size_read(inode)) {
ret = -EINVAL;
goto out_mutex;
}
/* Currently just for extent based files */
if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
ret = -EOPNOTSUPP;
goto out_mutex;
}
truncate_pagecache(inode, ioffset);
/* Wait for existing dio to complete */
ext4_inode_block_unlocked_dio(inode);
inode_dio_wait(inode);
credits = ext4_writepage_trans_blocks(inode);
handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
goto out_dio;
}
down_write(&EXT4_I(inode)->i_data_sem);
ext4_discard_preallocations(inode);
ret = ext4_es_remove_extent(inode, punch_start,
EXT_MAX_BLOCKS - punch_start);
if (ret) {
up_write(&EXT4_I(inode)->i_data_sem);
goto out_stop;
}
ret = ext4_ext_remove_space(inode, punch_start, punch_stop - 1);
if (ret) {
up_write(&EXT4_I(inode)->i_data_sem);
goto out_stop;
}
ext4_discard_preallocations(inode);
ret = ext4_ext_shift_extents(inode, handle, punch_stop,
punch_stop - punch_start);
if (ret) {
up_write(&EXT4_I(inode)->i_data_sem);
goto out_stop;
}
new_size = i_size_read(inode) - len;
i_size_write(inode, new_size);
EXT4_I(inode)->i_disksize = new_size;
up_write(&EXT4_I(inode)->i_data_sem);
if (IS_SYNC(inode))
ext4_handle_sync(handle);
inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
ext4_mark_inode_dirty(handle, inode);
out_stop:
ext4_journal_stop(handle);
out_dio:
ext4_inode_resume_unlocked_dio(inode);
out_mutex:
mutex_unlock(&inode->i_mutex);
return ret;
}
/**
* ext4_swap_extents - Swap extents between two inodes
*
* @inode1: First inode
* @inode2: Second inode
* @lblk1: Start block for first inode
* @lblk2: Start block for second inode
* @count: Number of blocks to swap
* @mark_unwritten: Mark second inode's extents as unwritten after swap
* @erp: Pointer to save error value
*
* This helper routine does exactly what is promise "swap extents". All other
* stuff such as page-cache locking consistency, bh mapping consistency or
* extent's data copying must be performed by caller.
* Locking:
* i_mutex is held for both inodes
* i_data_sem is locked for write for both inodes
* Assumptions:
* All pages from requested range are locked for both inodes
*/
int
ext4_swap_extents(handle_t *handle, struct inode *inode1,
struct inode *inode2, ext4_lblk_t lblk1, ext4_lblk_t lblk2,
ext4_lblk_t count, int unwritten, int *erp)
{
struct ext4_ext_path *path1 = NULL;
struct ext4_ext_path *path2 = NULL;
int replaced_count = 0;
BUG_ON(!rwsem_is_locked(&EXT4_I(inode1)->i_data_sem));
BUG_ON(!rwsem_is_locked(&EXT4_I(inode2)->i_data_sem));
BUG_ON(!mutex_is_locked(&inode1->i_mutex));
BUG_ON(!mutex_is_locked(&inode1->i_mutex));
*erp = ext4_es_remove_extent(inode1, lblk1, count);
if (unlikely(*erp))
return 0;
*erp = ext4_es_remove_extent(inode2, lblk2, count);
if (unlikely(*erp))
return 0;
while (count) {
struct ext4_extent *ex1, *ex2, tmp_ex;
ext4_lblk_t e1_blk, e2_blk;
int e1_len, e2_len, len;
int split = 0;
path1 = ext4_find_extent(inode1, lblk1, NULL, EXT4_EX_NOCACHE);
if (unlikely(IS_ERR(path1))) {
*erp = PTR_ERR(path1);
path1 = NULL;
finish:
count = 0;
goto repeat;
}
path2 = ext4_find_extent(inode2, lblk2, NULL, EXT4_EX_NOCACHE);
if (unlikely(IS_ERR(path2))) {
*erp = PTR_ERR(path2);
path2 = NULL;
goto finish;
}
ex1 = path1[path1->p_depth].p_ext;
ex2 = path2[path2->p_depth].p_ext;
/* Do we have somthing to swap ? */
if (unlikely(!ex2 || !ex1))
goto finish;
e1_blk = le32_to_cpu(ex1->ee_block);
e2_blk = le32_to_cpu(ex2->ee_block);
e1_len = ext4_ext_get_actual_len(ex1);
e2_len = ext4_ext_get_actual_len(ex2);
/* Hole handling */
if (!in_range(lblk1, e1_blk, e1_len) ||
!in_range(lblk2, e2_blk, e2_len)) {
ext4_lblk_t next1, next2;
/* if hole after extent, then go to next extent */
next1 = ext4_ext_next_allocated_block(path1);
next2 = ext4_ext_next_allocated_block(path2);
/* If hole before extent, then shift to that extent */
if (e1_blk > lblk1)
next1 = e1_blk;
if (e2_blk > lblk2)
next2 = e1_blk;
/* Do we have something to swap */
if (next1 == EXT_MAX_BLOCKS || next2 == EXT_MAX_BLOCKS)
goto finish;
/* Move to the rightest boundary */
len = next1 - lblk1;
if (len < next2 - lblk2)
len = next2 - lblk2;
if (len > count)
len = count;
lblk1 += len;
lblk2 += len;
count -= len;
goto repeat;
}
/* Prepare left boundary */
if (e1_blk < lblk1) {
split = 1;
*erp = ext4_force_split_extent_at(handle, inode1,
&path1, lblk1, 0);
if (unlikely(*erp))
goto finish;
}
if (e2_blk < lblk2) {
split = 1;
*erp = ext4_force_split_extent_at(handle, inode2,
&path2, lblk2, 0);
if (unlikely(*erp))
goto finish;
}
/* ext4_split_extent_at() may result in leaf extent split,
* path must to be revalidated. */
if (split)
goto repeat;
/* Prepare right boundary */
len = count;
if (len > e1_blk + e1_len - lblk1)
len = e1_blk + e1_len - lblk1;
if (len > e2_blk + e2_len - lblk2)
len = e2_blk + e2_len - lblk2;
if (len != e1_len) {
split = 1;
*erp = ext4_force_split_extent_at(handle, inode1,
&path1, lblk1 + len, 0);
if (unlikely(*erp))
goto finish;
}
if (len != e2_len) {
split = 1;
*erp = ext4_force_split_extent_at(handle, inode2,
&path2, lblk2 + len, 0);
if (*erp)
goto finish;
}
/* ext4_split_extent_at() may result in leaf extent split,
* path must to be revalidated. */
if (split)
goto repeat;
BUG_ON(e2_len != e1_len);
*erp = ext4_ext_get_access(handle, inode1, path1 + path1->p_depth);
if (unlikely(*erp))
goto finish;
*erp = ext4_ext_get_access(handle, inode2, path2 + path2->p_depth);
if (unlikely(*erp))
goto finish;
/* Both extents are fully inside boundaries. Swap it now */
tmp_ex = *ex1;
ext4_ext_store_pblock(ex1, ext4_ext_pblock(ex2));
ext4_ext_store_pblock(ex2, ext4_ext_pblock(&tmp_ex));
ex1->ee_len = cpu_to_le16(e2_len);
ex2->ee_len = cpu_to_le16(e1_len);
if (unwritten)
ext4_ext_mark_unwritten(ex2);
if (ext4_ext_is_unwritten(&tmp_ex))
ext4_ext_mark_unwritten(ex1);
ext4_ext_try_to_merge(handle, inode2, path2, ex2);
ext4_ext_try_to_merge(handle, inode1, path1, ex1);
*erp = ext4_ext_dirty(handle, inode2, path2 +
path2->p_depth);
if (unlikely(*erp))
goto finish;
*erp = ext4_ext_dirty(handle, inode1, path1 +
path1->p_depth);
/*
* Looks scarry ah..? second inode already points to new blocks,
* and it was successfully dirtied. But luckily error may happen
* only due to journal error, so full transaction will be
* aborted anyway.
*/
if (unlikely(*erp))
goto finish;
lblk1 += len;
lblk2 += len;
replaced_count += len;
count -= len;
repeat:
ext4_ext_drop_refs(path1);
kfree(path1);
ext4_ext_drop_refs(path2);
kfree(path2);
path1 = path2 = NULL;
}
return replaced_count;
}
| ./CrossVul/dataset_final_sorted/CWE-17/c/good_1454_0 |
crossvul-cpp_data_good_2415_0 | /*
* dir.c
*
* PURPOSE
* Directory handling routines for the OSTA-UDF(tm) filesystem.
*
* COPYRIGHT
* This file is distributed under the terms of the GNU General Public
* License (GPL). Copies of the GPL can be obtained from:
* ftp://prep.ai.mit.edu/pub/gnu/GPL
* Each contributing author retains all rights to their own work.
*
* (C) 1998-2004 Ben Fennema
*
* HISTORY
*
* 10/05/98 dgb Split directory operations into its own file
* Implemented directory reads via do_udf_readdir
* 10/06/98 Made directory operations work!
* 11/17/98 Rewrote directory to support ICBTAG_FLAG_AD_LONG
* 11/25/98 blf Rewrote directory handling (readdir+lookup) to support reading
* across blocks.
* 12/12/98 Split out the lookup code to namei.c. bulk of directory
* code now in directory.c:udf_fileident_read.
*/
#include "udfdecl.h"
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/buffer_head.h>
#include "udf_i.h"
#include "udf_sb.h"
static int udf_readdir(struct file *file, struct dir_context *ctx)
{
struct inode *dir = file_inode(file);
struct udf_inode_info *iinfo = UDF_I(dir);
struct udf_fileident_bh fibh = { .sbh = NULL, .ebh = NULL};
struct fileIdentDesc *fi = NULL;
struct fileIdentDesc cfi;
int block, iblock;
loff_t nf_pos;
int flen;
unsigned char *fname = NULL;
unsigned char *nameptr;
uint16_t liu;
uint8_t lfi;
loff_t size = udf_ext0_offset(dir) + dir->i_size;
struct buffer_head *tmp, *bha[16];
struct kernel_lb_addr eloc;
uint32_t elen;
sector_t offset;
int i, num, ret = 0;
struct extent_position epos = { NULL, 0, {0, 0} };
if (ctx->pos == 0) {
if (!dir_emit_dot(file, ctx))
return 0;
ctx->pos = 1;
}
nf_pos = (ctx->pos - 1) << 2;
if (nf_pos >= size)
goto out;
fname = kmalloc(UDF_NAME_LEN, GFP_NOFS);
if (!fname) {
ret = -ENOMEM;
goto out;
}
if (nf_pos == 0)
nf_pos = udf_ext0_offset(dir);
fibh.soffset = fibh.eoffset = nf_pos & (dir->i_sb->s_blocksize - 1);
if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
if (inode_bmap(dir, nf_pos >> dir->i_sb->s_blocksize_bits,
&epos, &eloc, &elen, &offset)
!= (EXT_RECORDED_ALLOCATED >> 30)) {
ret = -ENOENT;
goto out;
}
block = udf_get_lb_pblock(dir->i_sb, &eloc, offset);
if ((++offset << dir->i_sb->s_blocksize_bits) < elen) {
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
epos.offset -= sizeof(struct short_ad);
else if (iinfo->i_alloc_type ==
ICBTAG_FLAG_AD_LONG)
epos.offset -= sizeof(struct long_ad);
} else {
offset = 0;
}
if (!(fibh.sbh = fibh.ebh = udf_tread(dir->i_sb, block))) {
ret = -EIO;
goto out;
}
if (!(offset & ((16 >> (dir->i_sb->s_blocksize_bits - 9)) - 1))) {
i = 16 >> (dir->i_sb->s_blocksize_bits - 9);
if (i + offset > (elen >> dir->i_sb->s_blocksize_bits))
i = (elen >> dir->i_sb->s_blocksize_bits) - offset;
for (num = 0; i > 0; i--) {
block = udf_get_lb_pblock(dir->i_sb, &eloc, offset + i);
tmp = udf_tgetblk(dir->i_sb, block);
if (tmp && !buffer_uptodate(tmp) && !buffer_locked(tmp))
bha[num++] = tmp;
else
brelse(tmp);
}
if (num) {
ll_rw_block(READA, num, bha);
for (i = 0; i < num; i++)
brelse(bha[i]);
}
}
}
while (nf_pos < size) {
struct kernel_lb_addr tloc;
ctx->pos = (nf_pos >> 2) + 1;
fi = udf_fileident_read(dir, &nf_pos, &fibh, &cfi, &epos, &eloc,
&elen, &offset);
if (!fi)
goto out;
liu = le16_to_cpu(cfi.lengthOfImpUse);
lfi = cfi.lengthFileIdent;
if (fibh.sbh == fibh.ebh) {
nameptr = fi->fileIdent + liu;
} else {
int poffset; /* Unpaded ending offset */
poffset = fibh.soffset + sizeof(struct fileIdentDesc) + liu + lfi;
if (poffset >= lfi) {
nameptr = (char *)(fibh.ebh->b_data + poffset - lfi);
} else {
nameptr = fname;
memcpy(nameptr, fi->fileIdent + liu,
lfi - poffset);
memcpy(nameptr + lfi - poffset,
fibh.ebh->b_data, poffset);
}
}
if ((cfi.fileCharacteristics & FID_FILE_CHAR_DELETED) != 0) {
if (!UDF_QUERY_FLAG(dir->i_sb, UDF_FLAG_UNDELETE))
continue;
}
if ((cfi.fileCharacteristics & FID_FILE_CHAR_HIDDEN) != 0) {
if (!UDF_QUERY_FLAG(dir->i_sb, UDF_FLAG_UNHIDE))
continue;
}
if (cfi.fileCharacteristics & FID_FILE_CHAR_PARENT) {
if (!dir_emit_dotdot(file, ctx))
goto out;
continue;
}
flen = udf_get_filename(dir->i_sb, nameptr, lfi, fname,
UDF_NAME_LEN);
if (!flen)
continue;
tloc = lelb_to_cpu(cfi.icb.extLocation);
iblock = udf_get_lb_pblock(dir->i_sb, &tloc, 0);
if (!dir_emit(ctx, fname, flen, iblock, DT_UNKNOWN))
goto out;
} /* end while */
ctx->pos = (nf_pos >> 2) + 1;
out:
if (fibh.sbh != fibh.ebh)
brelse(fibh.ebh);
brelse(fibh.sbh);
brelse(epos.bh);
kfree(fname);
return ret;
}
/* readdir and lookup functions */
const struct file_operations udf_dir_operations = {
.llseek = generic_file_llseek,
.read = generic_read_dir,
.iterate = udf_readdir,
.unlocked_ioctl = udf_ioctl,
.fsync = generic_file_fsync,
};
| ./CrossVul/dataset_final_sorted/CWE-17/c/good_2415_0 |
crossvul-cpp_data_good_2415_1 | /*
* namei.c
*
* PURPOSE
* Inode name handling routines for the OSTA-UDF(tm) filesystem.
*
* COPYRIGHT
* This file is distributed under the terms of the GNU General Public
* License (GPL). Copies of the GPL can be obtained from:
* ftp://prep.ai.mit.edu/pub/gnu/GPL
* Each contributing author retains all rights to their own work.
*
* (C) 1998-2004 Ben Fennema
* (C) 1999-2000 Stelias Computing Inc
*
* HISTORY
*
* 12/12/98 blf Created. Split out the lookup code from dir.c
* 04/19/99 blf link, mknod, symlink support
*/
#include "udfdecl.h"
#include "udf_i.h"
#include "udf_sb.h"
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/buffer_head.h>
#include <linux/sched.h>
#include <linux/crc-itu-t.h>
#include <linux/exportfs.h>
static inline int udf_match(int len1, const unsigned char *name1, int len2,
const unsigned char *name2)
{
if (len1 != len2)
return 0;
return !memcmp(name1, name2, len1);
}
int udf_write_fi(struct inode *inode, struct fileIdentDesc *cfi,
struct fileIdentDesc *sfi, struct udf_fileident_bh *fibh,
uint8_t *impuse, uint8_t *fileident)
{
uint16_t crclen = fibh->eoffset - fibh->soffset - sizeof(struct tag);
uint16_t crc;
int offset;
uint16_t liu = le16_to_cpu(cfi->lengthOfImpUse);
uint8_t lfi = cfi->lengthFileIdent;
int padlen = fibh->eoffset - fibh->soffset - liu - lfi -
sizeof(struct fileIdentDesc);
int adinicb = 0;
if (UDF_I(inode)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
adinicb = 1;
offset = fibh->soffset + sizeof(struct fileIdentDesc);
if (impuse) {
if (adinicb || (offset + liu < 0)) {
memcpy((uint8_t *)sfi->impUse, impuse, liu);
} else if (offset >= 0) {
memcpy(fibh->ebh->b_data + offset, impuse, liu);
} else {
memcpy((uint8_t *)sfi->impUse, impuse, -offset);
memcpy(fibh->ebh->b_data, impuse - offset,
liu + offset);
}
}
offset += liu;
if (fileident) {
if (adinicb || (offset + lfi < 0)) {
memcpy((uint8_t *)sfi->fileIdent + liu, fileident, lfi);
} else if (offset >= 0) {
memcpy(fibh->ebh->b_data + offset, fileident, lfi);
} else {
memcpy((uint8_t *)sfi->fileIdent + liu, fileident,
-offset);
memcpy(fibh->ebh->b_data, fileident - offset,
lfi + offset);
}
}
offset += lfi;
if (adinicb || (offset + padlen < 0)) {
memset((uint8_t *)sfi->padding + liu + lfi, 0x00, padlen);
} else if (offset >= 0) {
memset(fibh->ebh->b_data + offset, 0x00, padlen);
} else {
memset((uint8_t *)sfi->padding + liu + lfi, 0x00, -offset);
memset(fibh->ebh->b_data, 0x00, padlen + offset);
}
crc = crc_itu_t(0, (uint8_t *)cfi + sizeof(struct tag),
sizeof(struct fileIdentDesc) - sizeof(struct tag));
if (fibh->sbh == fibh->ebh) {
crc = crc_itu_t(crc, (uint8_t *)sfi->impUse,
crclen + sizeof(struct tag) -
sizeof(struct fileIdentDesc));
} else if (sizeof(struct fileIdentDesc) >= -fibh->soffset) {
crc = crc_itu_t(crc, fibh->ebh->b_data +
sizeof(struct fileIdentDesc) +
fibh->soffset,
crclen + sizeof(struct tag) -
sizeof(struct fileIdentDesc));
} else {
crc = crc_itu_t(crc, (uint8_t *)sfi->impUse,
-fibh->soffset - sizeof(struct fileIdentDesc));
crc = crc_itu_t(crc, fibh->ebh->b_data, fibh->eoffset);
}
cfi->descTag.descCRC = cpu_to_le16(crc);
cfi->descTag.descCRCLength = cpu_to_le16(crclen);
cfi->descTag.tagChecksum = udf_tag_checksum(&cfi->descTag);
if (adinicb || (sizeof(struct fileIdentDesc) <= -fibh->soffset)) {
memcpy((uint8_t *)sfi, (uint8_t *)cfi,
sizeof(struct fileIdentDesc));
} else {
memcpy((uint8_t *)sfi, (uint8_t *)cfi, -fibh->soffset);
memcpy(fibh->ebh->b_data, (uint8_t *)cfi - fibh->soffset,
sizeof(struct fileIdentDesc) + fibh->soffset);
}
if (adinicb) {
mark_inode_dirty(inode);
} else {
if (fibh->sbh != fibh->ebh)
mark_buffer_dirty_inode(fibh->ebh, inode);
mark_buffer_dirty_inode(fibh->sbh, inode);
}
return 0;
}
static struct fileIdentDesc *udf_find_entry(struct inode *dir,
const struct qstr *child,
struct udf_fileident_bh *fibh,
struct fileIdentDesc *cfi)
{
struct fileIdentDesc *fi = NULL;
loff_t f_pos;
int block, flen;
unsigned char *fname = NULL;
unsigned char *nameptr;
uint8_t lfi;
uint16_t liu;
loff_t size;
struct kernel_lb_addr eloc;
uint32_t elen;
sector_t offset;
struct extent_position epos = {};
struct udf_inode_info *dinfo = UDF_I(dir);
int isdotdot = child->len == 2 &&
child->name[0] == '.' && child->name[1] == '.';
size = udf_ext0_offset(dir) + dir->i_size;
f_pos = udf_ext0_offset(dir);
fibh->sbh = fibh->ebh = NULL;
fibh->soffset = fibh->eoffset = f_pos & (dir->i_sb->s_blocksize - 1);
if (dinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
if (inode_bmap(dir, f_pos >> dir->i_sb->s_blocksize_bits, &epos,
&eloc, &elen, &offset) != (EXT_RECORDED_ALLOCATED >> 30))
goto out_err;
block = udf_get_lb_pblock(dir->i_sb, &eloc, offset);
if ((++offset << dir->i_sb->s_blocksize_bits) < elen) {
if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
epos.offset -= sizeof(struct short_ad);
else if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
epos.offset -= sizeof(struct long_ad);
} else
offset = 0;
fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block);
if (!fibh->sbh)
goto out_err;
}
fname = kmalloc(UDF_NAME_LEN, GFP_NOFS);
if (!fname)
goto out_err;
while (f_pos < size) {
fi = udf_fileident_read(dir, &f_pos, fibh, cfi, &epos, &eloc,
&elen, &offset);
if (!fi)
goto out_err;
liu = le16_to_cpu(cfi->lengthOfImpUse);
lfi = cfi->lengthFileIdent;
if (fibh->sbh == fibh->ebh) {
nameptr = fi->fileIdent + liu;
} else {
int poffset; /* Unpaded ending offset */
poffset = fibh->soffset + sizeof(struct fileIdentDesc) +
liu + lfi;
if (poffset >= lfi)
nameptr = (uint8_t *)(fibh->ebh->b_data +
poffset - lfi);
else {
nameptr = fname;
memcpy(nameptr, fi->fileIdent + liu,
lfi - poffset);
memcpy(nameptr + lfi - poffset,
fibh->ebh->b_data, poffset);
}
}
if ((cfi->fileCharacteristics & FID_FILE_CHAR_DELETED) != 0) {
if (!UDF_QUERY_FLAG(dir->i_sb, UDF_FLAG_UNDELETE))
continue;
}
if ((cfi->fileCharacteristics & FID_FILE_CHAR_HIDDEN) != 0) {
if (!UDF_QUERY_FLAG(dir->i_sb, UDF_FLAG_UNHIDE))
continue;
}
if ((cfi->fileCharacteristics & FID_FILE_CHAR_PARENT) &&
isdotdot)
goto out_ok;
if (!lfi)
continue;
flen = udf_get_filename(dir->i_sb, nameptr, lfi, fname,
UDF_NAME_LEN);
if (flen && udf_match(flen, fname, child->len, child->name))
goto out_ok;
}
out_err:
fi = NULL;
if (fibh->sbh != fibh->ebh)
brelse(fibh->ebh);
brelse(fibh->sbh);
out_ok:
brelse(epos.bh);
kfree(fname);
return fi;
}
static struct dentry *udf_lookup(struct inode *dir, struct dentry *dentry,
unsigned int flags)
{
struct inode *inode = NULL;
struct fileIdentDesc cfi;
struct udf_fileident_bh fibh;
if (dentry->d_name.len > UDF_NAME_LEN - 2)
return ERR_PTR(-ENAMETOOLONG);
#ifdef UDF_RECOVERY
/* temporary shorthand for specifying files by inode number */
if (!strncmp(dentry->d_name.name, ".B=", 3)) {
struct kernel_lb_addr lb = {
.logicalBlockNum = 0,
.partitionReferenceNum =
simple_strtoul(dentry->d_name.name + 3,
NULL, 0),
};
inode = udf_iget(dir->i_sb, lb);
if (IS_ERR(inode))
return inode;
} else
#endif /* UDF_RECOVERY */
if (udf_find_entry(dir, &dentry->d_name, &fibh, &cfi)) {
struct kernel_lb_addr loc;
if (fibh.sbh != fibh.ebh)
brelse(fibh.ebh);
brelse(fibh.sbh);
loc = lelb_to_cpu(cfi.icb.extLocation);
inode = udf_iget(dir->i_sb, &loc);
if (IS_ERR(inode))
return ERR_CAST(inode);
}
return d_splice_alias(inode, dentry);
}
static struct fileIdentDesc *udf_add_entry(struct inode *dir,
struct dentry *dentry,
struct udf_fileident_bh *fibh,
struct fileIdentDesc *cfi, int *err)
{
struct super_block *sb = dir->i_sb;
struct fileIdentDesc *fi = NULL;
unsigned char *name = NULL;
int namelen;
loff_t f_pos;
loff_t size = udf_ext0_offset(dir) + dir->i_size;
int nfidlen;
uint8_t lfi;
uint16_t liu;
int block;
struct kernel_lb_addr eloc;
uint32_t elen = 0;
sector_t offset;
struct extent_position epos = {};
struct udf_inode_info *dinfo;
fibh->sbh = fibh->ebh = NULL;
name = kmalloc(UDF_NAME_LEN, GFP_NOFS);
if (!name) {
*err = -ENOMEM;
goto out_err;
}
if (dentry) {
if (!dentry->d_name.len) {
*err = -EINVAL;
goto out_err;
}
namelen = udf_put_filename(sb, dentry->d_name.name, name,
dentry->d_name.len);
if (!namelen) {
*err = -ENAMETOOLONG;
goto out_err;
}
} else {
namelen = 0;
}
nfidlen = (sizeof(struct fileIdentDesc) + namelen + 3) & ~3;
f_pos = udf_ext0_offset(dir);
fibh->soffset = fibh->eoffset = f_pos & (dir->i_sb->s_blocksize - 1);
dinfo = UDF_I(dir);
if (dinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
if (inode_bmap(dir, f_pos >> dir->i_sb->s_blocksize_bits, &epos,
&eloc, &elen, &offset) != (EXT_RECORDED_ALLOCATED >> 30)) {
block = udf_get_lb_pblock(dir->i_sb,
&dinfo->i_location, 0);
fibh->soffset = fibh->eoffset = sb->s_blocksize;
goto add;
}
block = udf_get_lb_pblock(dir->i_sb, &eloc, offset);
if ((++offset << dir->i_sb->s_blocksize_bits) < elen) {
if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
epos.offset -= sizeof(struct short_ad);
else if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
epos.offset -= sizeof(struct long_ad);
} else
offset = 0;
fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block);
if (!fibh->sbh) {
*err = -EIO;
goto out_err;
}
block = dinfo->i_location.logicalBlockNum;
}
while (f_pos < size) {
fi = udf_fileident_read(dir, &f_pos, fibh, cfi, &epos, &eloc,
&elen, &offset);
if (!fi) {
*err = -EIO;
goto out_err;
}
liu = le16_to_cpu(cfi->lengthOfImpUse);
lfi = cfi->lengthFileIdent;
if ((cfi->fileCharacteristics & FID_FILE_CHAR_DELETED) != 0) {
if (((sizeof(struct fileIdentDesc) +
liu + lfi + 3) & ~3) == nfidlen) {
cfi->descTag.tagSerialNum = cpu_to_le16(1);
cfi->fileVersionNum = cpu_to_le16(1);
cfi->fileCharacteristics = 0;
cfi->lengthFileIdent = namelen;
cfi->lengthOfImpUse = cpu_to_le16(0);
if (!udf_write_fi(dir, cfi, fi, fibh, NULL,
name))
goto out_ok;
else {
*err = -EIO;
goto out_err;
}
}
}
}
add:
f_pos += nfidlen;
if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB &&
sb->s_blocksize - fibh->eoffset < nfidlen) {
brelse(epos.bh);
epos.bh = NULL;
fibh->soffset -= udf_ext0_offset(dir);
fibh->eoffset -= udf_ext0_offset(dir);
f_pos -= udf_ext0_offset(dir);
if (fibh->sbh != fibh->ebh)
brelse(fibh->ebh);
brelse(fibh->sbh);
fibh->sbh = fibh->ebh =
udf_expand_dir_adinicb(dir, &block, err);
if (!fibh->sbh)
goto out_err;
epos.block = dinfo->i_location;
epos.offset = udf_file_entry_alloc_offset(dir);
/* Load extent udf_expand_dir_adinicb() has created */
udf_current_aext(dir, &epos, &eloc, &elen, 1);
}
/* Entry fits into current block? */
if (sb->s_blocksize - fibh->eoffset >= nfidlen) {
fibh->soffset = fibh->eoffset;
fibh->eoffset += nfidlen;
if (fibh->sbh != fibh->ebh) {
brelse(fibh->sbh);
fibh->sbh = fibh->ebh;
}
if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
block = dinfo->i_location.logicalBlockNum;
fi = (struct fileIdentDesc *)
(dinfo->i_ext.i_data +
fibh->soffset -
udf_ext0_offset(dir) +
dinfo->i_lenEAttr);
} else {
block = eloc.logicalBlockNum +
((elen - 1) >>
dir->i_sb->s_blocksize_bits);
fi = (struct fileIdentDesc *)
(fibh->sbh->b_data + fibh->soffset);
}
} else {
/* Round up last extent in the file */
elen = (elen + sb->s_blocksize - 1) & ~(sb->s_blocksize - 1);
if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
epos.offset -= sizeof(struct short_ad);
else if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
epos.offset -= sizeof(struct long_ad);
udf_write_aext(dir, &epos, &eloc, elen, 1);
dinfo->i_lenExtents = (dinfo->i_lenExtents + sb->s_blocksize
- 1) & ~(sb->s_blocksize - 1);
fibh->soffset = fibh->eoffset - sb->s_blocksize;
fibh->eoffset += nfidlen - sb->s_blocksize;
if (fibh->sbh != fibh->ebh) {
brelse(fibh->sbh);
fibh->sbh = fibh->ebh;
}
block = eloc.logicalBlockNum + ((elen - 1) >>
dir->i_sb->s_blocksize_bits);
fibh->ebh = udf_bread(dir,
f_pos >> dir->i_sb->s_blocksize_bits, 1, err);
if (!fibh->ebh)
goto out_err;
/* Extents could have been merged, invalidate our position */
brelse(epos.bh);
epos.bh = NULL;
epos.block = dinfo->i_location;
epos.offset = udf_file_entry_alloc_offset(dir);
if (!fibh->soffset) {
/* Find the freshly allocated block */
while (udf_next_aext(dir, &epos, &eloc, &elen, 1) ==
(EXT_RECORDED_ALLOCATED >> 30))
;
block = eloc.logicalBlockNum + ((elen - 1) >>
dir->i_sb->s_blocksize_bits);
brelse(fibh->sbh);
fibh->sbh = fibh->ebh;
fi = (struct fileIdentDesc *)(fibh->sbh->b_data);
} else {
fi = (struct fileIdentDesc *)
(fibh->sbh->b_data + sb->s_blocksize +
fibh->soffset);
}
}
memset(cfi, 0, sizeof(struct fileIdentDesc));
if (UDF_SB(sb)->s_udfrev >= 0x0200)
udf_new_tag((char *)cfi, TAG_IDENT_FID, 3, 1, block,
sizeof(struct tag));
else
udf_new_tag((char *)cfi, TAG_IDENT_FID, 2, 1, block,
sizeof(struct tag));
cfi->fileVersionNum = cpu_to_le16(1);
cfi->lengthFileIdent = namelen;
cfi->lengthOfImpUse = cpu_to_le16(0);
if (!udf_write_fi(dir, cfi, fi, fibh, NULL, name)) {
dir->i_size += nfidlen;
if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
dinfo->i_lenAlloc += nfidlen;
else {
/* Find the last extent and truncate it to proper size */
while (udf_next_aext(dir, &epos, &eloc, &elen, 1) ==
(EXT_RECORDED_ALLOCATED >> 30))
;
elen -= dinfo->i_lenExtents - dir->i_size;
if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
epos.offset -= sizeof(struct short_ad);
else if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
epos.offset -= sizeof(struct long_ad);
udf_write_aext(dir, &epos, &eloc, elen, 1);
dinfo->i_lenExtents = dir->i_size;
}
mark_inode_dirty(dir);
goto out_ok;
} else {
*err = -EIO;
goto out_err;
}
out_err:
fi = NULL;
if (fibh->sbh != fibh->ebh)
brelse(fibh->ebh);
brelse(fibh->sbh);
out_ok:
brelse(epos.bh);
kfree(name);
return fi;
}
static int udf_delete_entry(struct inode *inode, struct fileIdentDesc *fi,
struct udf_fileident_bh *fibh,
struct fileIdentDesc *cfi)
{
cfi->fileCharacteristics |= FID_FILE_CHAR_DELETED;
if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT))
memset(&(cfi->icb), 0x00, sizeof(struct long_ad));
return udf_write_fi(inode, cfi, fi, fibh, NULL, NULL);
}
static int udf_add_nondir(struct dentry *dentry, struct inode *inode)
{
struct udf_inode_info *iinfo = UDF_I(inode);
struct inode *dir = dentry->d_parent->d_inode;
struct udf_fileident_bh fibh;
struct fileIdentDesc cfi, *fi;
int err;
fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err);
if (unlikely(!fi)) {
inode_dec_link_count(inode);
unlock_new_inode(inode);
iput(inode);
return err;
}
cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize);
cfi.icb.extLocation = cpu_to_lelb(iinfo->i_location);
*(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse =
cpu_to_le32(iinfo->i_unique & 0x00000000FFFFFFFFUL);
udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL);
if (UDF_I(dir)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
mark_inode_dirty(dir);
if (fibh.sbh != fibh.ebh)
brelse(fibh.ebh);
brelse(fibh.sbh);
unlock_new_inode(inode);
d_instantiate(dentry, inode);
return 0;
}
static int udf_create(struct inode *dir, struct dentry *dentry, umode_t mode,
bool excl)
{
struct inode *inode = udf_new_inode(dir, mode);
if (IS_ERR(inode))
return PTR_ERR(inode);
if (UDF_I(inode)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
inode->i_data.a_ops = &udf_adinicb_aops;
else
inode->i_data.a_ops = &udf_aops;
inode->i_op = &udf_file_inode_operations;
inode->i_fop = &udf_file_operations;
mark_inode_dirty(inode);
return udf_add_nondir(dentry, inode);
}
static int udf_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
{
struct inode *inode = udf_new_inode(dir, mode);
if (IS_ERR(inode))
return PTR_ERR(inode);
if (UDF_I(inode)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
inode->i_data.a_ops = &udf_adinicb_aops;
else
inode->i_data.a_ops = &udf_aops;
inode->i_op = &udf_file_inode_operations;
inode->i_fop = &udf_file_operations;
mark_inode_dirty(inode);
d_tmpfile(dentry, inode);
unlock_new_inode(inode);
return 0;
}
static int udf_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
dev_t rdev)
{
struct inode *inode;
if (!old_valid_dev(rdev))
return -EINVAL;
inode = udf_new_inode(dir, mode);
if (IS_ERR(inode))
return PTR_ERR(inode);
init_special_inode(inode, mode, rdev);
return udf_add_nondir(dentry, inode);
}
static int udf_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
struct inode *inode;
struct udf_fileident_bh fibh;
struct fileIdentDesc cfi, *fi;
int err;
struct udf_inode_info *dinfo = UDF_I(dir);
struct udf_inode_info *iinfo;
inode = udf_new_inode(dir, S_IFDIR | mode);
if (IS_ERR(inode))
return PTR_ERR(inode);
iinfo = UDF_I(inode);
inode->i_op = &udf_dir_inode_operations;
inode->i_fop = &udf_dir_operations;
fi = udf_add_entry(inode, NULL, &fibh, &cfi, &err);
if (!fi) {
inode_dec_link_count(inode);
unlock_new_inode(inode);
iput(inode);
goto out;
}
set_nlink(inode, 2);
cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize);
cfi.icb.extLocation = cpu_to_lelb(dinfo->i_location);
*(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse =
cpu_to_le32(dinfo->i_unique & 0x00000000FFFFFFFFUL);
cfi.fileCharacteristics =
FID_FILE_CHAR_DIRECTORY | FID_FILE_CHAR_PARENT;
udf_write_fi(inode, &cfi, fi, &fibh, NULL, NULL);
brelse(fibh.sbh);
mark_inode_dirty(inode);
fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err);
if (!fi) {
clear_nlink(inode);
mark_inode_dirty(inode);
unlock_new_inode(inode);
iput(inode);
goto out;
}
cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize);
cfi.icb.extLocation = cpu_to_lelb(iinfo->i_location);
*(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse =
cpu_to_le32(iinfo->i_unique & 0x00000000FFFFFFFFUL);
cfi.fileCharacteristics |= FID_FILE_CHAR_DIRECTORY;
udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL);
inc_nlink(dir);
mark_inode_dirty(dir);
unlock_new_inode(inode);
d_instantiate(dentry, inode);
if (fibh.sbh != fibh.ebh)
brelse(fibh.ebh);
brelse(fibh.sbh);
err = 0;
out:
return err;
}
static int empty_dir(struct inode *dir)
{
struct fileIdentDesc *fi, cfi;
struct udf_fileident_bh fibh;
loff_t f_pos;
loff_t size = udf_ext0_offset(dir) + dir->i_size;
int block;
struct kernel_lb_addr eloc;
uint32_t elen;
sector_t offset;
struct extent_position epos = {};
struct udf_inode_info *dinfo = UDF_I(dir);
f_pos = udf_ext0_offset(dir);
fibh.soffset = fibh.eoffset = f_pos & (dir->i_sb->s_blocksize - 1);
if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
fibh.sbh = fibh.ebh = NULL;
else if (inode_bmap(dir, f_pos >> dir->i_sb->s_blocksize_bits,
&epos, &eloc, &elen, &offset) ==
(EXT_RECORDED_ALLOCATED >> 30)) {
block = udf_get_lb_pblock(dir->i_sb, &eloc, offset);
if ((++offset << dir->i_sb->s_blocksize_bits) < elen) {
if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
epos.offset -= sizeof(struct short_ad);
else if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
epos.offset -= sizeof(struct long_ad);
} else
offset = 0;
fibh.sbh = fibh.ebh = udf_tread(dir->i_sb, block);
if (!fibh.sbh) {
brelse(epos.bh);
return 0;
}
} else {
brelse(epos.bh);
return 0;
}
while (f_pos < size) {
fi = udf_fileident_read(dir, &f_pos, &fibh, &cfi, &epos, &eloc,
&elen, &offset);
if (!fi) {
if (fibh.sbh != fibh.ebh)
brelse(fibh.ebh);
brelse(fibh.sbh);
brelse(epos.bh);
return 0;
}
if (cfi.lengthFileIdent &&
(cfi.fileCharacteristics & FID_FILE_CHAR_DELETED) == 0) {
if (fibh.sbh != fibh.ebh)
brelse(fibh.ebh);
brelse(fibh.sbh);
brelse(epos.bh);
return 0;
}
}
if (fibh.sbh != fibh.ebh)
brelse(fibh.ebh);
brelse(fibh.sbh);
brelse(epos.bh);
return 1;
}
static int udf_rmdir(struct inode *dir, struct dentry *dentry)
{
int retval;
struct inode *inode = dentry->d_inode;
struct udf_fileident_bh fibh;
struct fileIdentDesc *fi, cfi;
struct kernel_lb_addr tloc;
retval = -ENOENT;
fi = udf_find_entry(dir, &dentry->d_name, &fibh, &cfi);
if (!fi)
goto out;
retval = -EIO;
tloc = lelb_to_cpu(cfi.icb.extLocation);
if (udf_get_lb_pblock(dir->i_sb, &tloc, 0) != inode->i_ino)
goto end_rmdir;
retval = -ENOTEMPTY;
if (!empty_dir(inode))
goto end_rmdir;
retval = udf_delete_entry(dir, fi, &fibh, &cfi);
if (retval)
goto end_rmdir;
if (inode->i_nlink != 2)
udf_warn(inode->i_sb, "empty directory has nlink != 2 (%d)\n",
inode->i_nlink);
clear_nlink(inode);
inode->i_size = 0;
inode_dec_link_count(dir);
inode->i_ctime = dir->i_ctime = dir->i_mtime =
current_fs_time(dir->i_sb);
mark_inode_dirty(dir);
end_rmdir:
if (fibh.sbh != fibh.ebh)
brelse(fibh.ebh);
brelse(fibh.sbh);
out:
return retval;
}
static int udf_unlink(struct inode *dir, struct dentry *dentry)
{
int retval;
struct inode *inode = dentry->d_inode;
struct udf_fileident_bh fibh;
struct fileIdentDesc *fi;
struct fileIdentDesc cfi;
struct kernel_lb_addr tloc;
retval = -ENOENT;
fi = udf_find_entry(dir, &dentry->d_name, &fibh, &cfi);
if (!fi)
goto out;
retval = -EIO;
tloc = lelb_to_cpu(cfi.icb.extLocation);
if (udf_get_lb_pblock(dir->i_sb, &tloc, 0) != inode->i_ino)
goto end_unlink;
if (!inode->i_nlink) {
udf_debug("Deleting nonexistent file (%lu), %d\n",
inode->i_ino, inode->i_nlink);
set_nlink(inode, 1);
}
retval = udf_delete_entry(dir, fi, &fibh, &cfi);
if (retval)
goto end_unlink;
dir->i_ctime = dir->i_mtime = current_fs_time(dir->i_sb);
mark_inode_dirty(dir);
inode_dec_link_count(inode);
inode->i_ctime = dir->i_ctime;
retval = 0;
end_unlink:
if (fibh.sbh != fibh.ebh)
brelse(fibh.ebh);
brelse(fibh.sbh);
out:
return retval;
}
static int udf_symlink(struct inode *dir, struct dentry *dentry,
const char *symname)
{
struct inode *inode = udf_new_inode(dir, S_IFLNK | S_IRWXUGO);
struct pathComponent *pc;
const char *compstart;
struct extent_position epos = {};
int eoffset, elen = 0;
uint8_t *ea;
int err;
int block;
unsigned char *name = NULL;
int namelen;
struct udf_inode_info *iinfo;
struct super_block *sb = dir->i_sb;
if (IS_ERR(inode))
return PTR_ERR(inode);
iinfo = UDF_I(inode);
down_write(&iinfo->i_data_sem);
name = kmalloc(UDF_NAME_LEN, GFP_NOFS);
if (!name) {
err = -ENOMEM;
goto out_no_entry;
}
inode->i_data.a_ops = &udf_symlink_aops;
inode->i_op = &udf_symlink_inode_operations;
if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
struct kernel_lb_addr eloc;
uint32_t bsize;
block = udf_new_block(sb, inode,
iinfo->i_location.partitionReferenceNum,
iinfo->i_location.logicalBlockNum, &err);
if (!block)
goto out_no_entry;
epos.block = iinfo->i_location;
epos.offset = udf_file_entry_alloc_offset(inode);
epos.bh = NULL;
eloc.logicalBlockNum = block;
eloc.partitionReferenceNum =
iinfo->i_location.partitionReferenceNum;
bsize = sb->s_blocksize;
iinfo->i_lenExtents = bsize;
udf_add_aext(inode, &epos, &eloc, bsize, 0);
brelse(epos.bh);
block = udf_get_pblock(sb, block,
iinfo->i_location.partitionReferenceNum,
0);
epos.bh = udf_tgetblk(sb, block);
lock_buffer(epos.bh);
memset(epos.bh->b_data, 0x00, bsize);
set_buffer_uptodate(epos.bh);
unlock_buffer(epos.bh);
mark_buffer_dirty_inode(epos.bh, inode);
ea = epos.bh->b_data + udf_ext0_offset(inode);
} else
ea = iinfo->i_ext.i_data + iinfo->i_lenEAttr;
eoffset = sb->s_blocksize - udf_ext0_offset(inode);
pc = (struct pathComponent *)ea;
if (*symname == '/') {
do {
symname++;
} while (*symname == '/');
pc->componentType = 1;
pc->lengthComponentIdent = 0;
pc->componentFileVersionNum = 0;
elen += sizeof(struct pathComponent);
}
err = -ENAMETOOLONG;
while (*symname) {
if (elen + sizeof(struct pathComponent) > eoffset)
goto out_no_entry;
pc = (struct pathComponent *)(ea + elen);
compstart = symname;
do {
symname++;
} while (*symname && *symname != '/');
pc->componentType = 5;
pc->lengthComponentIdent = 0;
pc->componentFileVersionNum = 0;
if (compstart[0] == '.') {
if ((symname - compstart) == 1)
pc->componentType = 4;
else if ((symname - compstart) == 2 &&
compstart[1] == '.')
pc->componentType = 3;
}
if (pc->componentType == 5) {
namelen = udf_put_filename(sb, compstart, name,
symname - compstart);
if (!namelen)
goto out_no_entry;
if (elen + sizeof(struct pathComponent) + namelen >
eoffset)
goto out_no_entry;
else
pc->lengthComponentIdent = namelen;
memcpy(pc->componentIdent, name, namelen);
}
elen += sizeof(struct pathComponent) + pc->lengthComponentIdent;
if (*symname) {
do {
symname++;
} while (*symname == '/');
}
}
brelse(epos.bh);
inode->i_size = elen;
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
iinfo->i_lenAlloc = inode->i_size;
else
udf_truncate_tail_extent(inode);
mark_inode_dirty(inode);
up_write(&iinfo->i_data_sem);
err = udf_add_nondir(dentry, inode);
out:
kfree(name);
return err;
out_no_entry:
up_write(&iinfo->i_data_sem);
inode_dec_link_count(inode);
unlock_new_inode(inode);
iput(inode);
goto out;
}
static int udf_link(struct dentry *old_dentry, struct inode *dir,
struct dentry *dentry)
{
struct inode *inode = old_dentry->d_inode;
struct udf_fileident_bh fibh;
struct fileIdentDesc cfi, *fi;
int err;
fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err);
if (!fi) {
return err;
}
cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize);
cfi.icb.extLocation = cpu_to_lelb(UDF_I(inode)->i_location);
if (UDF_SB(inode->i_sb)->s_lvid_bh) {
*(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse =
cpu_to_le32(lvid_get_unique_id(inode->i_sb));
}
udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL);
if (UDF_I(dir)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
mark_inode_dirty(dir);
if (fibh.sbh != fibh.ebh)
brelse(fibh.ebh);
brelse(fibh.sbh);
inc_nlink(inode);
inode->i_ctime = current_fs_time(inode->i_sb);
mark_inode_dirty(inode);
ihold(inode);
d_instantiate(dentry, inode);
return 0;
}
/* Anybody can rename anything with this: the permission checks are left to the
* higher-level routines.
*/
static int udf_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry)
{
struct inode *old_inode = old_dentry->d_inode;
struct inode *new_inode = new_dentry->d_inode;
struct udf_fileident_bh ofibh, nfibh;
struct fileIdentDesc *ofi = NULL, *nfi = NULL, *dir_fi = NULL;
struct fileIdentDesc ocfi, ncfi;
struct buffer_head *dir_bh = NULL;
int retval = -ENOENT;
struct kernel_lb_addr tloc;
struct udf_inode_info *old_iinfo = UDF_I(old_inode);
ofi = udf_find_entry(old_dir, &old_dentry->d_name, &ofibh, &ocfi);
if (ofi) {
if (ofibh.sbh != ofibh.ebh)
brelse(ofibh.ebh);
brelse(ofibh.sbh);
}
tloc = lelb_to_cpu(ocfi.icb.extLocation);
if (!ofi || udf_get_lb_pblock(old_dir->i_sb, &tloc, 0)
!= old_inode->i_ino)
goto end_rename;
nfi = udf_find_entry(new_dir, &new_dentry->d_name, &nfibh, &ncfi);
if (nfi) {
if (!new_inode) {
if (nfibh.sbh != nfibh.ebh)
brelse(nfibh.ebh);
brelse(nfibh.sbh);
nfi = NULL;
}
}
if (S_ISDIR(old_inode->i_mode)) {
int offset = udf_ext0_offset(old_inode);
if (new_inode) {
retval = -ENOTEMPTY;
if (!empty_dir(new_inode))
goto end_rename;
}
retval = -EIO;
if (old_iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
dir_fi = udf_get_fileident(
old_iinfo->i_ext.i_data -
(old_iinfo->i_efe ?
sizeof(struct extendedFileEntry) :
sizeof(struct fileEntry)),
old_inode->i_sb->s_blocksize, &offset);
} else {
dir_bh = udf_bread(old_inode, 0, 0, &retval);
if (!dir_bh)
goto end_rename;
dir_fi = udf_get_fileident(dir_bh->b_data,
old_inode->i_sb->s_blocksize, &offset);
}
if (!dir_fi)
goto end_rename;
tloc = lelb_to_cpu(dir_fi->icb.extLocation);
if (udf_get_lb_pblock(old_inode->i_sb, &tloc, 0) !=
old_dir->i_ino)
goto end_rename;
}
if (!nfi) {
nfi = udf_add_entry(new_dir, new_dentry, &nfibh, &ncfi,
&retval);
if (!nfi)
goto end_rename;
}
/*
* Like most other Unix systems, set the ctime for inodes on a
* rename.
*/
old_inode->i_ctime = current_fs_time(old_inode->i_sb);
mark_inode_dirty(old_inode);
/*
* ok, that's it
*/
ncfi.fileVersionNum = ocfi.fileVersionNum;
ncfi.fileCharacteristics = ocfi.fileCharacteristics;
memcpy(&(ncfi.icb), &(ocfi.icb), sizeof(struct long_ad));
udf_write_fi(new_dir, &ncfi, nfi, &nfibh, NULL, NULL);
/* The old fid may have moved - find it again */
ofi = udf_find_entry(old_dir, &old_dentry->d_name, &ofibh, &ocfi);
udf_delete_entry(old_dir, ofi, &ofibh, &ocfi);
if (new_inode) {
new_inode->i_ctime = current_fs_time(new_inode->i_sb);
inode_dec_link_count(new_inode);
}
old_dir->i_ctime = old_dir->i_mtime = current_fs_time(old_dir->i_sb);
mark_inode_dirty(old_dir);
if (dir_fi) {
dir_fi->icb.extLocation = cpu_to_lelb(UDF_I(new_dir)->i_location);
udf_update_tag((char *)dir_fi,
(sizeof(struct fileIdentDesc) +
le16_to_cpu(dir_fi->lengthOfImpUse) + 3) & ~3);
if (old_iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
mark_inode_dirty(old_inode);
else
mark_buffer_dirty_inode(dir_bh, old_inode);
inode_dec_link_count(old_dir);
if (new_inode)
inode_dec_link_count(new_inode);
else {
inc_nlink(new_dir);
mark_inode_dirty(new_dir);
}
}
if (ofi) {
if (ofibh.sbh != ofibh.ebh)
brelse(ofibh.ebh);
brelse(ofibh.sbh);
}
retval = 0;
end_rename:
brelse(dir_bh);
if (nfi) {
if (nfibh.sbh != nfibh.ebh)
brelse(nfibh.ebh);
brelse(nfibh.sbh);
}
return retval;
}
static struct dentry *udf_get_parent(struct dentry *child)
{
struct kernel_lb_addr tloc;
struct inode *inode = NULL;
struct qstr dotdot = QSTR_INIT("..", 2);
struct fileIdentDesc cfi;
struct udf_fileident_bh fibh;
if (!udf_find_entry(child->d_inode, &dotdot, &fibh, &cfi))
return ERR_PTR(-EACCES);
if (fibh.sbh != fibh.ebh)
brelse(fibh.ebh);
brelse(fibh.sbh);
tloc = lelb_to_cpu(cfi.icb.extLocation);
inode = udf_iget(child->d_inode->i_sb, &tloc);
if (IS_ERR(inode))
return ERR_CAST(inode);
return d_obtain_alias(inode);
}
static struct dentry *udf_nfs_get_inode(struct super_block *sb, u32 block,
u16 partref, __u32 generation)
{
struct inode *inode;
struct kernel_lb_addr loc;
if (block == 0)
return ERR_PTR(-ESTALE);
loc.logicalBlockNum = block;
loc.partitionReferenceNum = partref;
inode = udf_iget(sb, &loc);
if (IS_ERR(inode))
return ERR_CAST(inode);
if (generation && inode->i_generation != generation) {
iput(inode);
return ERR_PTR(-ESTALE);
}
return d_obtain_alias(inode);
}
static struct dentry *udf_fh_to_dentry(struct super_block *sb,
struct fid *fid, int fh_len, int fh_type)
{
if ((fh_len != 3 && fh_len != 5) ||
(fh_type != FILEID_UDF_WITH_PARENT &&
fh_type != FILEID_UDF_WITHOUT_PARENT))
return NULL;
return udf_nfs_get_inode(sb, fid->udf.block, fid->udf.partref,
fid->udf.generation);
}
static struct dentry *udf_fh_to_parent(struct super_block *sb,
struct fid *fid, int fh_len, int fh_type)
{
if (fh_len != 5 || fh_type != FILEID_UDF_WITH_PARENT)
return NULL;
return udf_nfs_get_inode(sb, fid->udf.parent_block,
fid->udf.parent_partref,
fid->udf.parent_generation);
}
static int udf_encode_fh(struct inode *inode, __u32 *fh, int *lenp,
struct inode *parent)
{
int len = *lenp;
struct kernel_lb_addr location = UDF_I(inode)->i_location;
struct fid *fid = (struct fid *)fh;
int type = FILEID_UDF_WITHOUT_PARENT;
if (parent && (len < 5)) {
*lenp = 5;
return FILEID_INVALID;
} else if (len < 3) {
*lenp = 3;
return FILEID_INVALID;
}
*lenp = 3;
fid->udf.block = location.logicalBlockNum;
fid->udf.partref = location.partitionReferenceNum;
fid->udf.parent_partref = 0;
fid->udf.generation = inode->i_generation;
if (parent) {
location = UDF_I(parent)->i_location;
fid->udf.parent_block = location.logicalBlockNum;
fid->udf.parent_partref = location.partitionReferenceNum;
fid->udf.parent_generation = inode->i_generation;
*lenp = 5;
type = FILEID_UDF_WITH_PARENT;
}
return type;
}
const struct export_operations udf_export_ops = {
.encode_fh = udf_encode_fh,
.fh_to_dentry = udf_fh_to_dentry,
.fh_to_parent = udf_fh_to_parent,
.get_parent = udf_get_parent,
};
const struct inode_operations udf_dir_inode_operations = {
.lookup = udf_lookup,
.create = udf_create,
.link = udf_link,
.unlink = udf_unlink,
.symlink = udf_symlink,
.mkdir = udf_mkdir,
.rmdir = udf_rmdir,
.mknod = udf_mknod,
.rename = udf_rename,
.tmpfile = udf_tmpfile,
};
const struct inode_operations udf_symlink_inode_operations = {
.readlink = generic_readlink,
.follow_link = page_follow_link_light,
.put_link = page_put_link,
};
| ./CrossVul/dataset_final_sorted/CWE-17/c/good_2415_1 |
crossvul-cpp_data_good_2348_5 | /*
* Copyright (C) 1991, 1992 Linus Torvalds
* Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
*
* Pentium III FXSR, SSE support
* Gareth Hughes <gareth@valinux.com>, May 2000
*/
/*
* Handle hardware traps and faults.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/context_tracking.h>
#include <linux/interrupt.h>
#include <linux/kallsyms.h>
#include <linux/spinlock.h>
#include <linux/kprobes.h>
#include <linux/uaccess.h>
#include <linux/kdebug.h>
#include <linux/kgdb.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/ptrace.h>
#include <linux/uprobes.h>
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/kexec.h>
#include <linux/sched.h>
#include <linux/timer.h>
#include <linux/init.h>
#include <linux/bug.h>
#include <linux/nmi.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/io.h>
#ifdef CONFIG_EISA
#include <linux/ioport.h>
#include <linux/eisa.h>
#endif
#if defined(CONFIG_EDAC)
#include <linux/edac.h>
#endif
#include <asm/kmemcheck.h>
#include <asm/stacktrace.h>
#include <asm/processor.h>
#include <asm/debugreg.h>
#include <linux/atomic.h>
#include <asm/ftrace.h>
#include <asm/traps.h>
#include <asm/desc.h>
#include <asm/i387.h>
#include <asm/fpu-internal.h>
#include <asm/mce.h>
#include <asm/fixmap.h>
#include <asm/mach_traps.h>
#include <asm/alternative.h>
#ifdef CONFIG_X86_64
#include <asm/x86_init.h>
#include <asm/pgalloc.h>
#include <asm/proto.h>
/* No need to be aligned, but done to keep all IDTs defined the same way. */
gate_desc debug_idt_table[NR_VECTORS] __page_aligned_bss;
#else
#include <asm/processor-flags.h>
#include <asm/setup.h>
asmlinkage int system_call(void);
#endif
/* Must be page-aligned because the real IDT is used in a fixmap. */
gate_desc idt_table[NR_VECTORS] __page_aligned_bss;
DECLARE_BITMAP(used_vectors, NR_VECTORS);
EXPORT_SYMBOL_GPL(used_vectors);
static inline void conditional_sti(struct pt_regs *regs)
{
if (regs->flags & X86_EFLAGS_IF)
local_irq_enable();
}
static inline void preempt_conditional_sti(struct pt_regs *regs)
{
preempt_count_inc();
if (regs->flags & X86_EFLAGS_IF)
local_irq_enable();
}
static inline void conditional_cli(struct pt_regs *regs)
{
if (regs->flags & X86_EFLAGS_IF)
local_irq_disable();
}
static inline void preempt_conditional_cli(struct pt_regs *regs)
{
if (regs->flags & X86_EFLAGS_IF)
local_irq_disable();
preempt_count_dec();
}
static nokprobe_inline int
do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
struct pt_regs *regs, long error_code)
{
#ifdef CONFIG_X86_32
if (regs->flags & X86_VM_MASK) {
/*
* Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
* On nmi (interrupt 2), do_trap should not be called.
*/
if (trapnr < X86_TRAP_UD) {
if (!handle_vm86_trap((struct kernel_vm86_regs *) regs,
error_code, trapnr))
return 0;
}
return -1;
}
#endif
if (!user_mode(regs)) {
if (!fixup_exception(regs)) {
tsk->thread.error_code = error_code;
tsk->thread.trap_nr = trapnr;
die(str, regs, error_code);
}
return 0;
}
return -1;
}
static siginfo_t *fill_trap_info(struct pt_regs *regs, int signr, int trapnr,
siginfo_t *info)
{
unsigned long siaddr;
int sicode;
switch (trapnr) {
default:
return SEND_SIG_PRIV;
case X86_TRAP_DE:
sicode = FPE_INTDIV;
siaddr = uprobe_get_trap_addr(regs);
break;
case X86_TRAP_UD:
sicode = ILL_ILLOPN;
siaddr = uprobe_get_trap_addr(regs);
break;
case X86_TRAP_AC:
sicode = BUS_ADRALN;
siaddr = 0;
break;
}
info->si_signo = signr;
info->si_errno = 0;
info->si_code = sicode;
info->si_addr = (void __user *)siaddr;
return info;
}
static void
do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
long error_code, siginfo_t *info)
{
struct task_struct *tsk = current;
if (!do_trap_no_signal(tsk, trapnr, str, regs, error_code))
return;
/*
* We want error_code and trap_nr set for userspace faults and
* kernelspace faults which result in die(), but not
* kernelspace faults which are fixed up. die() gives the
* process no chance to handle the signal and notice the
* kernel fault information, so that won't result in polluting
* the information about previously queued, but not yet
* delivered, faults. See also do_general_protection below.
*/
tsk->thread.error_code = error_code;
tsk->thread.trap_nr = trapnr;
#ifdef CONFIG_X86_64
if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
printk_ratelimit()) {
pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
tsk->comm, tsk->pid, str,
regs->ip, regs->sp, error_code);
print_vma_addr(" in ", regs->ip);
pr_cont("\n");
}
#endif
force_sig_info(signr, info ?: SEND_SIG_PRIV, tsk);
}
NOKPROBE_SYMBOL(do_trap);
static void do_error_trap(struct pt_regs *regs, long error_code, char *str,
unsigned long trapnr, int signr)
{
enum ctx_state prev_state = exception_enter();
siginfo_t info;
if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) !=
NOTIFY_STOP) {
conditional_sti(regs);
do_trap(trapnr, signr, str, regs, error_code,
fill_trap_info(regs, signr, trapnr, &info));
}
exception_exit(prev_state);
}
#define DO_ERROR(trapnr, signr, str, name) \
dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \
{ \
do_error_trap(regs, error_code, str, trapnr, signr); \
}
DO_ERROR(X86_TRAP_DE, SIGFPE, "divide error", divide_error)
DO_ERROR(X86_TRAP_OF, SIGSEGV, "overflow", overflow)
DO_ERROR(X86_TRAP_BR, SIGSEGV, "bounds", bounds)
DO_ERROR(X86_TRAP_UD, SIGILL, "invalid opcode", invalid_op)
DO_ERROR(X86_TRAP_OLD_MF, SIGFPE, "coprocessor segment overrun",coprocessor_segment_overrun)
DO_ERROR(X86_TRAP_TS, SIGSEGV, "invalid TSS", invalid_TSS)
DO_ERROR(X86_TRAP_NP, SIGBUS, "segment not present", segment_not_present)
DO_ERROR(X86_TRAP_SS, SIGBUS, "stack segment", stack_segment)
DO_ERROR(X86_TRAP_AC, SIGBUS, "alignment check", alignment_check)
#ifdef CONFIG_X86_64
/* Runs on IST stack */
dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
{
static const char str[] = "double fault";
struct task_struct *tsk = current;
#ifdef CONFIG_X86_ESPFIX64
extern unsigned char native_irq_return_iret[];
/*
* If IRET takes a non-IST fault on the espfix64 stack, then we
* end up promoting it to a doublefault. In that case, modify
* the stack to make it look like we just entered the #GP
* handler from user space, similar to bad_iret.
*/
if (((long)regs->sp >> PGDIR_SHIFT) == ESPFIX_PGD_ENTRY &&
regs->cs == __KERNEL_CS &&
regs->ip == (unsigned long)native_irq_return_iret)
{
struct pt_regs *normal_regs = task_pt_regs(current);
/* Fake a #GP(0) from userspace. */
memmove(&normal_regs->ip, (void *)regs->sp, 5*8);
normal_regs->orig_ax = 0; /* Missing (lost) #GP error code */
regs->ip = (unsigned long)general_protection;
regs->sp = (unsigned long)&normal_regs->orig_ax;
return;
}
#endif
exception_enter();
/* Return not checked because double check cannot be ignored */
notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV);
tsk->thread.error_code = error_code;
tsk->thread.trap_nr = X86_TRAP_DF;
#ifdef CONFIG_DOUBLEFAULT
df_debug(regs, error_code);
#endif
/*
* This is always a kernel trap and never fixable (and thus must
* never return).
*/
for (;;)
die(str, regs, error_code);
}
#endif
dotraplinkage void
do_general_protection(struct pt_regs *regs, long error_code)
{
struct task_struct *tsk;
enum ctx_state prev_state;
prev_state = exception_enter();
conditional_sti(regs);
#ifdef CONFIG_X86_32
if (regs->flags & X86_VM_MASK) {
local_irq_enable();
handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
goto exit;
}
#endif
tsk = current;
if (!user_mode(regs)) {
if (fixup_exception(regs))
goto exit;
tsk->thread.error_code = error_code;
tsk->thread.trap_nr = X86_TRAP_GP;
if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP)
die("general protection fault", regs, error_code);
goto exit;
}
tsk->thread.error_code = error_code;
tsk->thread.trap_nr = X86_TRAP_GP;
if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
printk_ratelimit()) {
pr_info("%s[%d] general protection ip:%lx sp:%lx error:%lx",
tsk->comm, task_pid_nr(tsk),
regs->ip, regs->sp, error_code);
print_vma_addr(" in ", regs->ip);
pr_cont("\n");
}
force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk);
exit:
exception_exit(prev_state);
}
NOKPROBE_SYMBOL(do_general_protection);
/* May run on IST stack. */
dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code)
{
enum ctx_state prev_state;
#ifdef CONFIG_DYNAMIC_FTRACE
/*
* ftrace must be first, everything else may cause a recursive crash.
* See note by declaration of modifying_ftrace_code in ftrace.c
*/
if (unlikely(atomic_read(&modifying_ftrace_code)) &&
ftrace_int3_handler(regs))
return;
#endif
if (poke_int3_handler(regs))
return;
prev_state = exception_enter();
#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
SIGTRAP) == NOTIFY_STOP)
goto exit;
#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
#ifdef CONFIG_KPROBES
if (kprobe_int3_handler(regs))
goto exit;
#endif
if (notify_die(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
SIGTRAP) == NOTIFY_STOP)
goto exit;
/*
* Let others (NMI) know that the debug stack is in use
* as we may switch to the interrupt stack.
*/
debug_stack_usage_inc();
preempt_conditional_sti(regs);
do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL);
preempt_conditional_cli(regs);
debug_stack_usage_dec();
exit:
exception_exit(prev_state);
}
NOKPROBE_SYMBOL(do_int3);
#ifdef CONFIG_X86_64
/*
* Help handler running on IST stack to switch back to user stack
* for scheduling or signal handling. The actual stack switch is done in
* entry.S
*/
asmlinkage __visible struct pt_regs *sync_regs(struct pt_regs *eregs)
{
struct pt_regs *regs = eregs;
/* Did already sync */
if (eregs == (struct pt_regs *)eregs->sp)
;
/* Exception from user space */
else if (user_mode(eregs))
regs = task_pt_regs(current);
/*
* Exception from kernel and interrupts are enabled. Move to
* kernel process stack.
*/
else if (eregs->flags & X86_EFLAGS_IF)
regs = (struct pt_regs *)(eregs->sp -= sizeof(struct pt_regs));
if (eregs != regs)
*regs = *eregs;
return regs;
}
NOKPROBE_SYMBOL(sync_regs);
#endif
/*
* Our handling of the processor debug registers is non-trivial.
* We do not clear them on entry and exit from the kernel. Therefore
* it is possible to get a watchpoint trap here from inside the kernel.
* However, the code in ./ptrace.c has ensured that the user can
* only set watchpoints on userspace addresses. Therefore the in-kernel
* watchpoint trap can only occur in code which is reading/writing
* from user space. Such code must not hold kernel locks (since it
* can equally take a page fault), therefore it is safe to call
* force_sig_info even though that claims and releases locks.
*
* Code in ./signal.c ensures that the debug control register
* is restored before we deliver any signal, and therefore that
* user code runs with the correct debug control register even though
* we clear it here.
*
* Being careful here means that we don't have to be as careful in a
* lot of more complicated places (task switching can be a bit lazy
* about restoring all the debug state, and ptrace doesn't have to
* find every occurrence of the TF bit that could be saved away even
* by user code)
*
* May run on IST stack.
*/
dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
{
struct task_struct *tsk = current;
enum ctx_state prev_state;
int user_icebp = 0;
unsigned long dr6;
int si_code;
prev_state = exception_enter();
get_debugreg(dr6, 6);
/* Filter out all the reserved bits which are preset to 1 */
dr6 &= ~DR6_RESERVED;
/*
* If dr6 has no reason to give us about the origin of this trap,
* then it's very likely the result of an icebp/int01 trap.
* User wants a sigtrap for that.
*/
if (!dr6 && user_mode(regs))
user_icebp = 1;
/* Catch kmemcheck conditions first of all! */
if ((dr6 & DR_STEP) && kmemcheck_trap(regs))
goto exit;
/* DR6 may or may not be cleared by the CPU */
set_debugreg(0, 6);
/*
* The processor cleared BTF, so don't mark that we need it set.
*/
clear_tsk_thread_flag(tsk, TIF_BLOCKSTEP);
/* Store the virtualized DR6 value */
tsk->thread.debugreg6 = dr6;
#ifdef CONFIG_KPROBES
if (kprobe_debug_handler(regs))
goto exit;
#endif
if (notify_die(DIE_DEBUG, "debug", regs, (long)&dr6, error_code,
SIGTRAP) == NOTIFY_STOP)
goto exit;
/*
* Let others (NMI) know that the debug stack is in use
* as we may switch to the interrupt stack.
*/
debug_stack_usage_inc();
/* It's safe to allow irq's after DR6 has been saved */
preempt_conditional_sti(regs);
if (regs->flags & X86_VM_MASK) {
handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
X86_TRAP_DB);
preempt_conditional_cli(regs);
debug_stack_usage_dec();
goto exit;
}
/*
* Single-stepping through system calls: ignore any exceptions in
* kernel space, but re-enable TF when returning to user mode.
*
* We already checked v86 mode above, so we can check for kernel mode
* by just checking the CPL of CS.
*/
if ((dr6 & DR_STEP) && !user_mode(regs)) {
tsk->thread.debugreg6 &= ~DR_STEP;
set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
regs->flags &= ~X86_EFLAGS_TF;
}
si_code = get_si_code(tsk->thread.debugreg6);
if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp)
send_sigtrap(tsk, regs, error_code, si_code);
preempt_conditional_cli(regs);
debug_stack_usage_dec();
exit:
exception_exit(prev_state);
}
NOKPROBE_SYMBOL(do_debug);
/*
* Note that we play around with the 'TS' bit in an attempt to get
* the correct behaviour even in the presence of the asynchronous
* IRQ13 behaviour
*/
static void math_error(struct pt_regs *regs, int error_code, int trapnr)
{
struct task_struct *task = current;
siginfo_t info;
unsigned short err;
char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" :
"simd exception";
if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP)
return;
conditional_sti(regs);
if (!user_mode_vm(regs))
{
if (!fixup_exception(regs)) {
task->thread.error_code = error_code;
task->thread.trap_nr = trapnr;
die(str, regs, error_code);
}
return;
}
/*
* Save the info for the exception handler and clear the error.
*/
save_init_fpu(task);
task->thread.trap_nr = trapnr;
task->thread.error_code = error_code;
info.si_signo = SIGFPE;
info.si_errno = 0;
info.si_addr = (void __user *)uprobe_get_trap_addr(regs);
if (trapnr == X86_TRAP_MF) {
unsigned short cwd, swd;
/*
* (~cwd & swd) will mask out exceptions that are not set to unmasked
* status. 0x3f is the exception bits in these regs, 0x200 is the
* C1 reg you need in case of a stack fault, 0x040 is the stack
* fault bit. We should only be taking one exception at a time,
* so if this combination doesn't produce any single exception,
* then we have a bad program that isn't synchronizing its FPU usage
* and it will suffer the consequences since we won't be able to
* fully reproduce the context of the exception
*/
cwd = get_fpu_cwd(task);
swd = get_fpu_swd(task);
err = swd & ~cwd;
} else {
/*
* The SIMD FPU exceptions are handled a little differently, as there
* is only a single status/control register. Thus, to determine which
* unmasked exception was caught we must mask the exception mask bits
* at 0x1f80, and then use these to mask the exception bits at 0x3f.
*/
unsigned short mxcsr = get_fpu_mxcsr(task);
err = ~(mxcsr >> 7) & mxcsr;
}
if (err & 0x001) { /* Invalid op */
/*
* swd & 0x240 == 0x040: Stack Underflow
* swd & 0x240 == 0x240: Stack Overflow
* User must clear the SF bit (0x40) if set
*/
info.si_code = FPE_FLTINV;
} else if (err & 0x004) { /* Divide by Zero */
info.si_code = FPE_FLTDIV;
} else if (err & 0x008) { /* Overflow */
info.si_code = FPE_FLTOVF;
} else if (err & 0x012) { /* Denormal, Underflow */
info.si_code = FPE_FLTUND;
} else if (err & 0x020) { /* Precision */
info.si_code = FPE_FLTRES;
} else {
/*
* If we're using IRQ 13, or supposedly even some trap
* X86_TRAP_MF implementations, it's possible
* we get a spurious trap, which is not an error.
*/
return;
}
force_sig_info(SIGFPE, &info, task);
}
dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code)
{
enum ctx_state prev_state;
prev_state = exception_enter();
math_error(regs, error_code, X86_TRAP_MF);
exception_exit(prev_state);
}
dotraplinkage void
do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
{
enum ctx_state prev_state;
prev_state = exception_enter();
math_error(regs, error_code, X86_TRAP_XF);
exception_exit(prev_state);
}
dotraplinkage void
do_spurious_interrupt_bug(struct pt_regs *regs, long error_code)
{
conditional_sti(regs);
#if 0
/* No need to warn about this any longer. */
pr_info("Ignoring P6 Local APIC Spurious Interrupt Bug...\n");
#endif
}
asmlinkage __visible void __attribute__((weak)) smp_thermal_interrupt(void)
{
}
asmlinkage __visible void __attribute__((weak)) smp_threshold_interrupt(void)
{
}
/*
* 'math_state_restore()' saves the current math information in the
* old math state array, and gets the new ones from the current task
*
* Careful.. There are problems with IBM-designed IRQ13 behaviour.
* Don't touch unless you *really* know how it works.
*
* Must be called with kernel preemption disabled (eg with local
* local interrupts as in the case of do_device_not_available).
*/
void math_state_restore(void)
{
struct task_struct *tsk = current;
if (!tsk_used_math(tsk)) {
local_irq_enable();
/*
* does a slab alloc which can sleep
*/
if (init_fpu(tsk)) {
/*
* ran out of memory!
*/
do_group_exit(SIGKILL);
return;
}
local_irq_disable();
}
__thread_fpu_begin(tsk);
/*
* Paranoid restore. send a SIGSEGV if we fail to restore the state.
*/
if (unlikely(restore_fpu_checking(tsk))) {
drop_init_fpu(tsk);
force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk);
return;
}
tsk->thread.fpu_counter++;
}
EXPORT_SYMBOL_GPL(math_state_restore);
dotraplinkage void
do_device_not_available(struct pt_regs *regs, long error_code)
{
enum ctx_state prev_state;
prev_state = exception_enter();
BUG_ON(use_eager_fpu());
#ifdef CONFIG_MATH_EMULATION
if (read_cr0() & X86_CR0_EM) {
struct math_emu_info info = { };
conditional_sti(regs);
info.regs = regs;
math_emulate(&info);
exception_exit(prev_state);
return;
}
#endif
math_state_restore(); /* interrupts still off */
#ifdef CONFIG_X86_32
conditional_sti(regs);
#endif
exception_exit(prev_state);
}
NOKPROBE_SYMBOL(do_device_not_available);
#ifdef CONFIG_X86_32
dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code)
{
siginfo_t info;
enum ctx_state prev_state;
prev_state = exception_enter();
local_irq_enable();
info.si_signo = SIGILL;
info.si_errno = 0;
info.si_code = ILL_BADSTK;
info.si_addr = NULL;
if (notify_die(DIE_TRAP, "iret exception", regs, error_code,
X86_TRAP_IRET, SIGILL) != NOTIFY_STOP) {
do_trap(X86_TRAP_IRET, SIGILL, "iret exception", regs, error_code,
&info);
}
exception_exit(prev_state);
}
#endif
/* Set of traps needed for early debugging. */
void __init early_trap_init(void)
{
set_intr_gate_ist(X86_TRAP_DB, &debug, DEBUG_STACK);
/* int3 can be called from all */
set_system_intr_gate_ist(X86_TRAP_BP, &int3, DEBUG_STACK);
#ifdef CONFIG_X86_32
set_intr_gate(X86_TRAP_PF, page_fault);
#endif
load_idt(&idt_descr);
}
void __init early_trap_pf_init(void)
{
#ifdef CONFIG_X86_64
set_intr_gate(X86_TRAP_PF, page_fault);
#endif
}
void __init trap_init(void)
{
int i;
#ifdef CONFIG_EISA
void __iomem *p = early_ioremap(0x0FFFD9, 4);
if (readl(p) == 'E' + ('I'<<8) + ('S'<<16) + ('A'<<24))
EISA_bus = 1;
early_iounmap(p, 4);
#endif
set_intr_gate(X86_TRAP_DE, divide_error);
set_intr_gate_ist(X86_TRAP_NMI, &nmi, NMI_STACK);
/* int4 can be called from all */
set_system_intr_gate(X86_TRAP_OF, &overflow);
set_intr_gate(X86_TRAP_BR, bounds);
set_intr_gate(X86_TRAP_UD, invalid_op);
set_intr_gate(X86_TRAP_NM, device_not_available);
#ifdef CONFIG_X86_32
set_task_gate(X86_TRAP_DF, GDT_ENTRY_DOUBLEFAULT_TSS);
#else
set_intr_gate_ist(X86_TRAP_DF, &double_fault, DOUBLEFAULT_STACK);
#endif
set_intr_gate(X86_TRAP_OLD_MF, coprocessor_segment_overrun);
set_intr_gate(X86_TRAP_TS, invalid_TSS);
set_intr_gate(X86_TRAP_NP, segment_not_present);
set_intr_gate(X86_TRAP_SS, stack_segment);
set_intr_gate(X86_TRAP_GP, general_protection);
set_intr_gate(X86_TRAP_SPURIOUS, spurious_interrupt_bug);
set_intr_gate(X86_TRAP_MF, coprocessor_error);
set_intr_gate(X86_TRAP_AC, alignment_check);
#ifdef CONFIG_X86_MCE
set_intr_gate_ist(X86_TRAP_MC, &machine_check, MCE_STACK);
#endif
set_intr_gate(X86_TRAP_XF, simd_coprocessor_error);
/* Reserve all the builtin and the syscall vector: */
for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++)
set_bit(i, used_vectors);
#ifdef CONFIG_IA32_EMULATION
set_system_intr_gate(IA32_SYSCALL_VECTOR, ia32_syscall);
set_bit(IA32_SYSCALL_VECTOR, used_vectors);
#endif
#ifdef CONFIG_X86_32
set_system_trap_gate(SYSCALL_VECTOR, &system_call);
set_bit(SYSCALL_VECTOR, used_vectors);
#endif
/*
* Set the IDT descriptor to a fixed read-only location, so that the
* "sidt" instruction will not leak the location of the kernel, and
* to defend the IDT against arbitrary memory write vulnerabilities.
* It will be reloaded in cpu_init() */
__set_fixmap(FIX_RO_IDT, __pa_symbol(idt_table), PAGE_KERNEL_RO);
idt_descr.address = fix_to_virt(FIX_RO_IDT);
/*
* Should be a barrier for any external CPU state:
*/
cpu_init();
x86_init.irqs.trap_init();
#ifdef CONFIG_X86_64
memcpy(&debug_idt_table, &idt_table, IDT_ENTRIES * 16);
set_nmi_gate(X86_TRAP_DB, &debug);
set_nmi_gate(X86_TRAP_BP, &int3);
#endif
}
| ./CrossVul/dataset_final_sorted/CWE-17/c/good_2348_5 |
crossvul-cpp_data_good_1499_0 | /*
* linux/fs/pipe.c
*
* Copyright (C) 1991, 1992, 1999 Linus Torvalds
*/
#include <linux/mm.h>
#include <linux/file.h>
#include <linux/poll.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/log2.h>
#include <linux/mount.h>
#include <linux/magic.h>
#include <linux/pipe_fs_i.h>
#include <linux/uio.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
#include <linux/audit.h>
#include <linux/syscalls.h>
#include <linux/fcntl.h>
#include <linux/aio.h>
#include <asm/uaccess.h>
#include <asm/ioctls.h>
#include "internal.h"
/*
* The max size that a non-root user is allowed to grow the pipe. Can
* be set by root in /proc/sys/fs/pipe-max-size
*/
unsigned int pipe_max_size = 1048576;
/*
* Minimum pipe size, as required by POSIX
*/
unsigned int pipe_min_size = PAGE_SIZE;
/*
* We use a start+len construction, which provides full use of the
* allocated memory.
* -- Florian Coosmann (FGC)
*
* Reads with count = 0 should always return 0.
* -- Julian Bradfield 1999-06-07.
*
* FIFOs and Pipes now generate SIGIO for both readers and writers.
* -- Jeremy Elson <jelson@circlemud.org> 2001-08-16
*
* pipe_read & write cleanup
* -- Manfred Spraul <manfred@colorfullife.com> 2002-05-09
*/
static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
{
if (pipe->files)
mutex_lock_nested(&pipe->mutex, subclass);
}
void pipe_lock(struct pipe_inode_info *pipe)
{
/*
* pipe_lock() nests non-pipe inode locks (for writing to a file)
*/
pipe_lock_nested(pipe, I_MUTEX_PARENT);
}
EXPORT_SYMBOL(pipe_lock);
void pipe_unlock(struct pipe_inode_info *pipe)
{
if (pipe->files)
mutex_unlock(&pipe->mutex);
}
EXPORT_SYMBOL(pipe_unlock);
static inline void __pipe_lock(struct pipe_inode_info *pipe)
{
mutex_lock_nested(&pipe->mutex, I_MUTEX_PARENT);
}
static inline void __pipe_unlock(struct pipe_inode_info *pipe)
{
mutex_unlock(&pipe->mutex);
}
void pipe_double_lock(struct pipe_inode_info *pipe1,
struct pipe_inode_info *pipe2)
{
BUG_ON(pipe1 == pipe2);
if (pipe1 < pipe2) {
pipe_lock_nested(pipe1, I_MUTEX_PARENT);
pipe_lock_nested(pipe2, I_MUTEX_CHILD);
} else {
pipe_lock_nested(pipe2, I_MUTEX_PARENT);
pipe_lock_nested(pipe1, I_MUTEX_CHILD);
}
}
/* Drop the inode semaphore and wait for a pipe event, atomically */
void pipe_wait(struct pipe_inode_info *pipe)
{
DEFINE_WAIT(wait);
/*
* Pipes are system-local resources, so sleeping on them
* is considered a noninteractive wait:
*/
prepare_to_wait(&pipe->wait, &wait, TASK_INTERRUPTIBLE);
pipe_unlock(pipe);
schedule();
finish_wait(&pipe->wait, &wait);
pipe_lock(pipe);
}
static void anon_pipe_buf_release(struct pipe_inode_info *pipe,
struct pipe_buffer *buf)
{
struct page *page = buf->page;
/*
* If nobody else uses this page, and we don't already have a
* temporary page, let's keep track of it as a one-deep
* allocation cache. (Otherwise just release our reference to it)
*/
if (page_count(page) == 1 && !pipe->tmp_page)
pipe->tmp_page = page;
else
page_cache_release(page);
}
/**
* generic_pipe_buf_steal - attempt to take ownership of a &pipe_buffer
* @pipe: the pipe that the buffer belongs to
* @buf: the buffer to attempt to steal
*
* Description:
* This function attempts to steal the &struct page attached to
* @buf. If successful, this function returns 0 and returns with
* the page locked. The caller may then reuse the page for whatever
* he wishes; the typical use is insertion into a different file
* page cache.
*/
int generic_pipe_buf_steal(struct pipe_inode_info *pipe,
struct pipe_buffer *buf)
{
struct page *page = buf->page;
/*
* A reference of one is golden, that means that the owner of this
* page is the only one holding a reference to it. lock the page
* and return OK.
*/
if (page_count(page) == 1) {
lock_page(page);
return 0;
}
return 1;
}
EXPORT_SYMBOL(generic_pipe_buf_steal);
/**
* generic_pipe_buf_get - get a reference to a &struct pipe_buffer
* @pipe: the pipe that the buffer belongs to
* @buf: the buffer to get a reference to
*
* Description:
* This function grabs an extra reference to @buf. It's used in
* in the tee() system call, when we duplicate the buffers in one
* pipe into another.
*/
void generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
{
page_cache_get(buf->page);
}
EXPORT_SYMBOL(generic_pipe_buf_get);
/**
* generic_pipe_buf_confirm - verify contents of the pipe buffer
* @info: the pipe that the buffer belongs to
* @buf: the buffer to confirm
*
* Description:
* This function does nothing, because the generic pipe code uses
* pages that are always good when inserted into the pipe.
*/
int generic_pipe_buf_confirm(struct pipe_inode_info *info,
struct pipe_buffer *buf)
{
return 0;
}
EXPORT_SYMBOL(generic_pipe_buf_confirm);
/**
* generic_pipe_buf_release - put a reference to a &struct pipe_buffer
* @pipe: the pipe that the buffer belongs to
* @buf: the buffer to put a reference to
*
* Description:
* This function releases a reference to @buf.
*/
void generic_pipe_buf_release(struct pipe_inode_info *pipe,
struct pipe_buffer *buf)
{
page_cache_release(buf->page);
}
EXPORT_SYMBOL(generic_pipe_buf_release);
static const struct pipe_buf_operations anon_pipe_buf_ops = {
.can_merge = 1,
.confirm = generic_pipe_buf_confirm,
.release = anon_pipe_buf_release,
.steal = generic_pipe_buf_steal,
.get = generic_pipe_buf_get,
};
static const struct pipe_buf_operations packet_pipe_buf_ops = {
.can_merge = 0,
.confirm = generic_pipe_buf_confirm,
.release = anon_pipe_buf_release,
.steal = generic_pipe_buf_steal,
.get = generic_pipe_buf_get,
};
static ssize_t
pipe_read(struct kiocb *iocb, struct iov_iter *to)
{
size_t total_len = iov_iter_count(to);
struct file *filp = iocb->ki_filp;
struct pipe_inode_info *pipe = filp->private_data;
int do_wakeup;
ssize_t ret;
/* Null read succeeds. */
if (unlikely(total_len == 0))
return 0;
do_wakeup = 0;
ret = 0;
__pipe_lock(pipe);
for (;;) {
int bufs = pipe->nrbufs;
if (bufs) {
int curbuf = pipe->curbuf;
struct pipe_buffer *buf = pipe->bufs + curbuf;
const struct pipe_buf_operations *ops = buf->ops;
size_t chars = buf->len;
size_t written;
int error;
if (chars > total_len)
chars = total_len;
error = ops->confirm(pipe, buf);
if (error) {
if (!ret)
ret = error;
break;
}
written = copy_page_to_iter(buf->page, buf->offset, chars, to);
if (unlikely(written < chars)) {
if (!ret)
ret = -EFAULT;
break;
}
ret += chars;
buf->offset += chars;
buf->len -= chars;
/* Was it a packet buffer? Clean up and exit */
if (buf->flags & PIPE_BUF_FLAG_PACKET) {
total_len = chars;
buf->len = 0;
}
if (!buf->len) {
buf->ops = NULL;
ops->release(pipe, buf);
curbuf = (curbuf + 1) & (pipe->buffers - 1);
pipe->curbuf = curbuf;
pipe->nrbufs = --bufs;
do_wakeup = 1;
}
total_len -= chars;
if (!total_len)
break; /* common path: read succeeded */
}
if (bufs) /* More to do? */
continue;
if (!pipe->writers)
break;
if (!pipe->waiting_writers) {
/* syscall merging: Usually we must not sleep
* if O_NONBLOCK is set, or if we got some data.
* But if a writer sleeps in kernel space, then
* we can wait for that data without violating POSIX.
*/
if (ret)
break;
if (filp->f_flags & O_NONBLOCK) {
ret = -EAGAIN;
break;
}
}
if (signal_pending(current)) {
if (!ret)
ret = -ERESTARTSYS;
break;
}
if (do_wakeup) {
wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT | POLLWRNORM);
kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
}
pipe_wait(pipe);
}
__pipe_unlock(pipe);
/* Signal writers asynchronously that there is more room. */
if (do_wakeup) {
wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT | POLLWRNORM);
kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
}
if (ret > 0)
file_accessed(filp);
return ret;
}
static inline int is_packetized(struct file *file)
{
return (file->f_flags & O_DIRECT) != 0;
}
static ssize_t
pipe_write(struct kiocb *iocb, struct iov_iter *from)
{
struct file *filp = iocb->ki_filp;
struct pipe_inode_info *pipe = filp->private_data;
ssize_t ret = 0;
int do_wakeup = 0;
size_t total_len = iov_iter_count(from);
ssize_t chars;
/* Null write succeeds. */
if (unlikely(total_len == 0))
return 0;
__pipe_lock(pipe);
if (!pipe->readers) {
send_sig(SIGPIPE, current, 0);
ret = -EPIPE;
goto out;
}
/* We try to merge small writes */
chars = total_len & (PAGE_SIZE-1); /* size of the last buffer */
if (pipe->nrbufs && chars != 0) {
int lastbuf = (pipe->curbuf + pipe->nrbufs - 1) &
(pipe->buffers - 1);
struct pipe_buffer *buf = pipe->bufs + lastbuf;
const struct pipe_buf_operations *ops = buf->ops;
int offset = buf->offset + buf->len;
if (ops->can_merge && offset + chars <= PAGE_SIZE) {
int error = ops->confirm(pipe, buf);
if (error)
goto out;
ret = copy_page_from_iter(buf->page, offset, chars, from);
if (unlikely(ret < chars)) {
error = -EFAULT;
goto out;
}
do_wakeup = 1;
buf->len += chars;
ret = chars;
if (!iov_iter_count(from))
goto out;
}
}
for (;;) {
int bufs;
if (!pipe->readers) {
send_sig(SIGPIPE, current, 0);
if (!ret)
ret = -EPIPE;
break;
}
bufs = pipe->nrbufs;
if (bufs < pipe->buffers) {
int newbuf = (pipe->curbuf + bufs) & (pipe->buffers-1);
struct pipe_buffer *buf = pipe->bufs + newbuf;
struct page *page = pipe->tmp_page;
int copied;
if (!page) {
page = alloc_page(GFP_HIGHUSER);
if (unlikely(!page)) {
ret = ret ? : -ENOMEM;
break;
}
pipe->tmp_page = page;
}
/* Always wake up, even if the copy fails. Otherwise
* we lock up (O_NONBLOCK-)readers that sleep due to
* syscall merging.
* FIXME! Is this really true?
*/
do_wakeup = 1;
copied = copy_page_from_iter(page, 0, PAGE_SIZE, from);
if (unlikely(copied < PAGE_SIZE && iov_iter_count(from))) {
if (!ret)
ret = -EFAULT;
break;
}
ret += copied;
/* Insert it into the buffer array */
buf->page = page;
buf->ops = &anon_pipe_buf_ops;
buf->offset = 0;
buf->len = copied;
buf->flags = 0;
if (is_packetized(filp)) {
buf->ops = &packet_pipe_buf_ops;
buf->flags = PIPE_BUF_FLAG_PACKET;
}
pipe->nrbufs = ++bufs;
pipe->tmp_page = NULL;
if (!iov_iter_count(from))
break;
}
if (bufs < pipe->buffers)
continue;
if (filp->f_flags & O_NONBLOCK) {
if (!ret)
ret = -EAGAIN;
break;
}
if (signal_pending(current)) {
if (!ret)
ret = -ERESTARTSYS;
break;
}
if (do_wakeup) {
wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLRDNORM);
kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
do_wakeup = 0;
}
pipe->waiting_writers++;
pipe_wait(pipe);
pipe->waiting_writers--;
}
out:
__pipe_unlock(pipe);
if (do_wakeup) {
wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLRDNORM);
kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
}
if (ret > 0 && sb_start_write_trylock(file_inode(filp)->i_sb)) {
int err = file_update_time(filp);
if (err)
ret = err;
sb_end_write(file_inode(filp)->i_sb);
}
return ret;
}
static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct pipe_inode_info *pipe = filp->private_data;
int count, buf, nrbufs;
switch (cmd) {
case FIONREAD:
__pipe_lock(pipe);
count = 0;
buf = pipe->curbuf;
nrbufs = pipe->nrbufs;
while (--nrbufs >= 0) {
count += pipe->bufs[buf].len;
buf = (buf+1) & (pipe->buffers - 1);
}
__pipe_unlock(pipe);
return put_user(count, (int __user *)arg);
default:
return -ENOIOCTLCMD;
}
}
/* No kernel lock held - fine */
static unsigned int
pipe_poll(struct file *filp, poll_table *wait)
{
unsigned int mask;
struct pipe_inode_info *pipe = filp->private_data;
int nrbufs;
poll_wait(filp, &pipe->wait, wait);
/* Reading only -- no need for acquiring the semaphore. */
nrbufs = pipe->nrbufs;
mask = 0;
if (filp->f_mode & FMODE_READ) {
mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
if (!pipe->writers && filp->f_version != pipe->w_counter)
mask |= POLLHUP;
}
if (filp->f_mode & FMODE_WRITE) {
mask |= (nrbufs < pipe->buffers) ? POLLOUT | POLLWRNORM : 0;
/*
* Most Unices do not set POLLERR for FIFOs but on Linux they
* behave exactly like pipes for poll().
*/
if (!pipe->readers)
mask |= POLLERR;
}
return mask;
}
static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe)
{
int kill = 0;
spin_lock(&inode->i_lock);
if (!--pipe->files) {
inode->i_pipe = NULL;
kill = 1;
}
spin_unlock(&inode->i_lock);
if (kill)
free_pipe_info(pipe);
}
static int
pipe_release(struct inode *inode, struct file *file)
{
struct pipe_inode_info *pipe = file->private_data;
__pipe_lock(pipe);
if (file->f_mode & FMODE_READ)
pipe->readers--;
if (file->f_mode & FMODE_WRITE)
pipe->writers--;
if (pipe->readers || pipe->writers) {
wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
}
__pipe_unlock(pipe);
put_pipe_info(inode, pipe);
return 0;
}
static int
pipe_fasync(int fd, struct file *filp, int on)
{
struct pipe_inode_info *pipe = filp->private_data;
int retval = 0;
__pipe_lock(pipe);
if (filp->f_mode & FMODE_READ)
retval = fasync_helper(fd, filp, on, &pipe->fasync_readers);
if ((filp->f_mode & FMODE_WRITE) && retval >= 0) {
retval = fasync_helper(fd, filp, on, &pipe->fasync_writers);
if (retval < 0 && (filp->f_mode & FMODE_READ))
/* this can happen only if on == T */
fasync_helper(-1, filp, 0, &pipe->fasync_readers);
}
__pipe_unlock(pipe);
return retval;
}
struct pipe_inode_info *alloc_pipe_info(void)
{
struct pipe_inode_info *pipe;
pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL);
if (pipe) {
pipe->bufs = kzalloc(sizeof(struct pipe_buffer) * PIPE_DEF_BUFFERS, GFP_KERNEL);
if (pipe->bufs) {
init_waitqueue_head(&pipe->wait);
pipe->r_counter = pipe->w_counter = 1;
pipe->buffers = PIPE_DEF_BUFFERS;
mutex_init(&pipe->mutex);
return pipe;
}
kfree(pipe);
}
return NULL;
}
void free_pipe_info(struct pipe_inode_info *pipe)
{
int i;
for (i = 0; i < pipe->buffers; i++) {
struct pipe_buffer *buf = pipe->bufs + i;
if (buf->ops)
buf->ops->release(pipe, buf);
}
if (pipe->tmp_page)
__free_page(pipe->tmp_page);
kfree(pipe->bufs);
kfree(pipe);
}
static struct vfsmount *pipe_mnt __read_mostly;
/*
* pipefs_dname() is called from d_path().
*/
static char *pipefs_dname(struct dentry *dentry, char *buffer, int buflen)
{
return dynamic_dname(dentry, buffer, buflen, "pipe:[%lu]",
dentry->d_inode->i_ino);
}
static const struct dentry_operations pipefs_dentry_operations = {
.d_dname = pipefs_dname,
};
static struct inode * get_pipe_inode(void)
{
struct inode *inode = new_inode_pseudo(pipe_mnt->mnt_sb);
struct pipe_inode_info *pipe;
if (!inode)
goto fail_inode;
inode->i_ino = get_next_ino();
pipe = alloc_pipe_info();
if (!pipe)
goto fail_iput;
inode->i_pipe = pipe;
pipe->files = 2;
pipe->readers = pipe->writers = 1;
inode->i_fop = &pipefifo_fops;
/*
* Mark the inode dirty from the very beginning,
* that way it will never be moved to the dirty
* list because "mark_inode_dirty()" will think
* that it already _is_ on the dirty list.
*/
inode->i_state = I_DIRTY;
inode->i_mode = S_IFIFO | S_IRUSR | S_IWUSR;
inode->i_uid = current_fsuid();
inode->i_gid = current_fsgid();
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
return inode;
fail_iput:
iput(inode);
fail_inode:
return NULL;
}
int create_pipe_files(struct file **res, int flags)
{
int err;
struct inode *inode = get_pipe_inode();
struct file *f;
struct path path;
static struct qstr name = { .name = "" };
if (!inode)
return -ENFILE;
err = -ENOMEM;
path.dentry = d_alloc_pseudo(pipe_mnt->mnt_sb, &name);
if (!path.dentry)
goto err_inode;
path.mnt = mntget(pipe_mnt);
d_instantiate(path.dentry, inode);
err = -ENFILE;
f = alloc_file(&path, FMODE_WRITE, &pipefifo_fops);
if (IS_ERR(f))
goto err_dentry;
f->f_flags = O_WRONLY | (flags & (O_NONBLOCK | O_DIRECT));
f->private_data = inode->i_pipe;
res[0] = alloc_file(&path, FMODE_READ, &pipefifo_fops);
if (IS_ERR(res[0]))
goto err_file;
path_get(&path);
res[0]->private_data = inode->i_pipe;
res[0]->f_flags = O_RDONLY | (flags & O_NONBLOCK);
res[1] = f;
return 0;
err_file:
put_filp(f);
err_dentry:
free_pipe_info(inode->i_pipe);
path_put(&path);
return err;
err_inode:
free_pipe_info(inode->i_pipe);
iput(inode);
return err;
}
static int __do_pipe_flags(int *fd, struct file **files, int flags)
{
int error;
int fdw, fdr;
if (flags & ~(O_CLOEXEC | O_NONBLOCK | O_DIRECT))
return -EINVAL;
error = create_pipe_files(files, flags);
if (error)
return error;
error = get_unused_fd_flags(flags);
if (error < 0)
goto err_read_pipe;
fdr = error;
error = get_unused_fd_flags(flags);
if (error < 0)
goto err_fdr;
fdw = error;
audit_fd_pair(fdr, fdw);
fd[0] = fdr;
fd[1] = fdw;
return 0;
err_fdr:
put_unused_fd(fdr);
err_read_pipe:
fput(files[0]);
fput(files[1]);
return error;
}
int do_pipe_flags(int *fd, int flags)
{
struct file *files[2];
int error = __do_pipe_flags(fd, files, flags);
if (!error) {
fd_install(fd[0], files[0]);
fd_install(fd[1], files[1]);
}
return error;
}
/*
* sys_pipe() is the normal C calling standard for creating
* a pipe. It's not the way Unix traditionally does this, though.
*/
SYSCALL_DEFINE2(pipe2, int __user *, fildes, int, flags)
{
struct file *files[2];
int fd[2];
int error;
error = __do_pipe_flags(fd, files, flags);
if (!error) {
if (unlikely(copy_to_user(fildes, fd, sizeof(fd)))) {
fput(files[0]);
fput(files[1]);
put_unused_fd(fd[0]);
put_unused_fd(fd[1]);
error = -EFAULT;
} else {
fd_install(fd[0], files[0]);
fd_install(fd[1], files[1]);
}
}
return error;
}
SYSCALL_DEFINE1(pipe, int __user *, fildes)
{
return sys_pipe2(fildes, 0);
}
static int wait_for_partner(struct pipe_inode_info *pipe, unsigned int *cnt)
{
int cur = *cnt;
while (cur == *cnt) {
pipe_wait(pipe);
if (signal_pending(current))
break;
}
return cur == *cnt ? -ERESTARTSYS : 0;
}
static void wake_up_partner(struct pipe_inode_info *pipe)
{
wake_up_interruptible(&pipe->wait);
}
static int fifo_open(struct inode *inode, struct file *filp)
{
struct pipe_inode_info *pipe;
bool is_pipe = inode->i_sb->s_magic == PIPEFS_MAGIC;
int ret;
filp->f_version = 0;
spin_lock(&inode->i_lock);
if (inode->i_pipe) {
pipe = inode->i_pipe;
pipe->files++;
spin_unlock(&inode->i_lock);
} else {
spin_unlock(&inode->i_lock);
pipe = alloc_pipe_info();
if (!pipe)
return -ENOMEM;
pipe->files = 1;
spin_lock(&inode->i_lock);
if (unlikely(inode->i_pipe)) {
inode->i_pipe->files++;
spin_unlock(&inode->i_lock);
free_pipe_info(pipe);
pipe = inode->i_pipe;
} else {
inode->i_pipe = pipe;
spin_unlock(&inode->i_lock);
}
}
filp->private_data = pipe;
/* OK, we have a pipe and it's pinned down */
__pipe_lock(pipe);
/* We can only do regular read/write on fifos */
filp->f_mode &= (FMODE_READ | FMODE_WRITE);
switch (filp->f_mode) {
case FMODE_READ:
/*
* O_RDONLY
* POSIX.1 says that O_NONBLOCK means return with the FIFO
* opened, even when there is no process writing the FIFO.
*/
pipe->r_counter++;
if (pipe->readers++ == 0)
wake_up_partner(pipe);
if (!is_pipe && !pipe->writers) {
if ((filp->f_flags & O_NONBLOCK)) {
/* suppress POLLHUP until we have
* seen a writer */
filp->f_version = pipe->w_counter;
} else {
if (wait_for_partner(pipe, &pipe->w_counter))
goto err_rd;
}
}
break;
case FMODE_WRITE:
/*
* O_WRONLY
* POSIX.1 says that O_NONBLOCK means return -1 with
* errno=ENXIO when there is no process reading the FIFO.
*/
ret = -ENXIO;
if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers)
goto err;
pipe->w_counter++;
if (!pipe->writers++)
wake_up_partner(pipe);
if (!is_pipe && !pipe->readers) {
if (wait_for_partner(pipe, &pipe->r_counter))
goto err_wr;
}
break;
case FMODE_READ | FMODE_WRITE:
/*
* O_RDWR
* POSIX.1 leaves this case "undefined" when O_NONBLOCK is set.
* This implementation will NEVER block on a O_RDWR open, since
* the process can at least talk to itself.
*/
pipe->readers++;
pipe->writers++;
pipe->r_counter++;
pipe->w_counter++;
if (pipe->readers == 1 || pipe->writers == 1)
wake_up_partner(pipe);
break;
default:
ret = -EINVAL;
goto err;
}
/* Ok! */
__pipe_unlock(pipe);
return 0;
err_rd:
if (!--pipe->readers)
wake_up_interruptible(&pipe->wait);
ret = -ERESTARTSYS;
goto err;
err_wr:
if (!--pipe->writers)
wake_up_interruptible(&pipe->wait);
ret = -ERESTARTSYS;
goto err;
err:
__pipe_unlock(pipe);
put_pipe_info(inode, pipe);
return ret;
}
const struct file_operations pipefifo_fops = {
.open = fifo_open,
.llseek = no_llseek,
.read = new_sync_read,
.read_iter = pipe_read,
.write = new_sync_write,
.write_iter = pipe_write,
.poll = pipe_poll,
.unlocked_ioctl = pipe_ioctl,
.release = pipe_release,
.fasync = pipe_fasync,
};
/*
* Allocate a new array of pipe buffers and copy the info over. Returns the
* pipe size if successful, or return -ERROR on error.
*/
static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long nr_pages)
{
struct pipe_buffer *bufs;
/*
* We can shrink the pipe, if arg >= pipe->nrbufs. Since we don't
* expect a lot of shrink+grow operations, just free and allocate
* again like we would do for growing. If the pipe currently
* contains more buffers than arg, then return busy.
*/
if (nr_pages < pipe->nrbufs)
return -EBUSY;
bufs = kcalloc(nr_pages, sizeof(*bufs), GFP_KERNEL | __GFP_NOWARN);
if (unlikely(!bufs))
return -ENOMEM;
/*
* The pipe array wraps around, so just start the new one at zero
* and adjust the indexes.
*/
if (pipe->nrbufs) {
unsigned int tail;
unsigned int head;
tail = pipe->curbuf + pipe->nrbufs;
if (tail < pipe->buffers)
tail = 0;
else
tail &= (pipe->buffers - 1);
head = pipe->nrbufs - tail;
if (head)
memcpy(bufs, pipe->bufs + pipe->curbuf, head * sizeof(struct pipe_buffer));
if (tail)
memcpy(bufs + head, pipe->bufs, tail * sizeof(struct pipe_buffer));
}
pipe->curbuf = 0;
kfree(pipe->bufs);
pipe->bufs = bufs;
pipe->buffers = nr_pages;
return nr_pages * PAGE_SIZE;
}
/*
* Currently we rely on the pipe array holding a power-of-2 number
* of pages.
*/
static inline unsigned int round_pipe_size(unsigned int size)
{
unsigned long nr_pages;
nr_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
return roundup_pow_of_two(nr_pages) << PAGE_SHIFT;
}
/*
* This should work even if CONFIG_PROC_FS isn't set, as proc_dointvec_minmax
* will return an error.
*/
int pipe_proc_fn(struct ctl_table *table, int write, void __user *buf,
size_t *lenp, loff_t *ppos)
{
int ret;
ret = proc_dointvec_minmax(table, write, buf, lenp, ppos);
if (ret < 0 || !write)
return ret;
pipe_max_size = round_pipe_size(pipe_max_size);
return ret;
}
/*
* After the inode slimming patch, i_pipe/i_bdev/i_cdev share the same
* location, so checking ->i_pipe is not enough to verify that this is a
* pipe.
*/
struct pipe_inode_info *get_pipe_info(struct file *file)
{
return file->f_op == &pipefifo_fops ? file->private_data : NULL;
}
long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct pipe_inode_info *pipe;
long ret;
pipe = get_pipe_info(file);
if (!pipe)
return -EBADF;
__pipe_lock(pipe);
switch (cmd) {
case F_SETPIPE_SZ: {
unsigned int size, nr_pages;
size = round_pipe_size(arg);
nr_pages = size >> PAGE_SHIFT;
ret = -EINVAL;
if (!nr_pages)
goto out;
if (!capable(CAP_SYS_RESOURCE) && size > pipe_max_size) {
ret = -EPERM;
goto out;
}
ret = pipe_set_size(pipe, nr_pages);
break;
}
case F_GETPIPE_SZ:
ret = pipe->buffers * PAGE_SIZE;
break;
default:
ret = -EINVAL;
break;
}
out:
__pipe_unlock(pipe);
return ret;
}
static const struct super_operations pipefs_ops = {
.destroy_inode = free_inode_nonrcu,
.statfs = simple_statfs,
};
/*
* pipefs should _never_ be mounted by userland - too much of security hassle,
* no real gain from having the whole whorehouse mounted. So we don't need
* any operations on the root directory. However, we need a non-trivial
* d_name - pipe: will go nicely and kill the special-casing in procfs.
*/
static struct dentry *pipefs_mount(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data)
{
return mount_pseudo(fs_type, "pipe:", &pipefs_ops,
&pipefs_dentry_operations, PIPEFS_MAGIC);
}
static struct file_system_type pipe_fs_type = {
.name = "pipefs",
.mount = pipefs_mount,
.kill_sb = kill_anon_super,
};
static int __init init_pipe_fs(void)
{
int err = register_filesystem(&pipe_fs_type);
if (!err) {
pipe_mnt = kern_mount(&pipe_fs_type);
if (IS_ERR(pipe_mnt)) {
err = PTR_ERR(pipe_mnt);
unregister_filesystem(&pipe_fs_type);
}
}
return err;
}
fs_initcall(init_pipe_fs);
| ./CrossVul/dataset_final_sorted/CWE-17/c/good_1499_0 |
crossvul-cpp_data_good_1470_0 | /*
* lxc: linux Container library
*
* (C) Copyright IBM Corp. 2007, 2008
*
* Authors:
* Daniel Lezcano <daniel.lezcano at free.fr>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#define _GNU_SOURCE
#include <unistd.h>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <signal.h>
#include <errno.h>
#include <fcntl.h>
#include <grp.h>
#include <sys/param.h>
#include <sys/prctl.h>
#include <sys/mount.h>
#include <sys/socket.h>
#include <sys/syscall.h>
#include <sys/wait.h>
#include <linux/unistd.h>
#include <pwd.h>
#if !HAVE_DECL_PR_CAPBSET_DROP
#define PR_CAPBSET_DROP 24
#endif
#include "namespace.h"
#include "log.h"
#include "attach.h"
#include "caps.h"
#include "config.h"
#include "utils.h"
#include "commands.h"
#include "cgroup.h"
#include "lxclock.h"
#include "conf.h"
#include "lxcseccomp.h"
#include <lxc/lxccontainer.h>
#include "lsm/lsm.h"
#include "confile.h"
#if HAVE_SYS_PERSONALITY_H
#include <sys/personality.h>
#endif
#ifndef SOCK_CLOEXEC
# define SOCK_CLOEXEC 02000000
#endif
#ifndef MS_REC
#define MS_REC 16384
#endif
#ifndef MS_SLAVE
#define MS_SLAVE (1<<19)
#endif
lxc_log_define(lxc_attach, lxc);
int lsm_set_label_at(int procfd, int on_exec, char* lsm_label) {
int labelfd = -1;
int ret = 0;
const char* name;
char* command = NULL;
name = lsm_name();
if (strcmp(name, "nop") == 0)
goto out;
if (strcmp(name, "none") == 0)
goto out;
/* We don't support on-exec with AppArmor */
if (strcmp(name, "AppArmor") == 0)
on_exec = 0;
if (on_exec) {
labelfd = openat(procfd, "self/attr/exec", O_RDWR);
}
else {
labelfd = openat(procfd, "self/attr/current", O_RDWR);
}
if (labelfd < 0) {
SYSERROR("Unable to open LSM label");
ret = -1;
goto out;
}
if (strcmp(name, "AppArmor") == 0) {
int size;
command = malloc(strlen(lsm_label) + strlen("changeprofile ") + 1);
if (!command) {
SYSERROR("Failed to write apparmor profile");
ret = -1;
goto out;
}
size = sprintf(command, "changeprofile %s", lsm_label);
if (size < 0) {
SYSERROR("Failed to write apparmor profile");
ret = -1;
goto out;
}
if (write(labelfd, command, size + 1) < 0) {
SYSERROR("Unable to set LSM label");
ret = -1;
goto out;
}
}
else if (strcmp(name, "SELinux") == 0) {
if (write(labelfd, lsm_label, strlen(lsm_label) + 1) < 0) {
SYSERROR("Unable to set LSM label");
ret = -1;
goto out;
}
}
else {
ERROR("Unable to restore label for unknown LSM: %s", name);
ret = -1;
goto out;
}
out:
free(command);
if (labelfd != -1)
close(labelfd);
return ret;
}
static struct lxc_proc_context_info *lxc_proc_get_context_info(pid_t pid)
{
struct lxc_proc_context_info *info = calloc(1, sizeof(*info));
FILE *proc_file;
char proc_fn[MAXPATHLEN];
char *line = NULL;
size_t line_bufsz = 0;
int ret, found;
if (!info) {
SYSERROR("Could not allocate memory.");
return NULL;
}
/* read capabilities */
snprintf(proc_fn, MAXPATHLEN, "/proc/%d/status", pid);
proc_file = fopen(proc_fn, "r");
if (!proc_file) {
SYSERROR("Could not open %s", proc_fn);
goto out_error;
}
found = 0;
while (getline(&line, &line_bufsz, proc_file) != -1) {
ret = sscanf(line, "CapBnd: %llx", &info->capability_mask);
if (ret != EOF && ret > 0) {
found = 1;
break;
}
}
free(line);
fclose(proc_file);
if (!found) {
SYSERROR("Could not read capability bounding set from %s", proc_fn);
errno = ENOENT;
goto out_error;
}
info->lsm_label = lsm_process_label_get(pid);
return info;
out_error:
free(info);
return NULL;
}
static void lxc_proc_put_context_info(struct lxc_proc_context_info *ctx)
{
free(ctx->lsm_label);
if (ctx->container)
lxc_container_put(ctx->container);
free(ctx);
}
static int lxc_attach_to_ns(pid_t pid, int which)
{
char path[MAXPATHLEN];
/* according to <http://article.gmane.org/gmane.linux.kernel.containers.lxc.devel/1429>,
* the file for user namepsaces in /proc/$pid/ns will be called
* 'user' once the kernel supports it
*/
static char *ns[] = { "user", "mnt", "pid", "uts", "ipc", "net" };
static int flags[] = {
CLONE_NEWUSER, CLONE_NEWNS, CLONE_NEWPID, CLONE_NEWUTS, CLONE_NEWIPC,
CLONE_NEWNET
};
static const int size = sizeof(ns) / sizeof(char *);
int fd[size];
int i, j, saved_errno;
snprintf(path, MAXPATHLEN, "/proc/%d/ns", pid);
if (access(path, X_OK)) {
ERROR("Does this kernel version support 'attach' ?");
return -1;
}
for (i = 0; i < size; i++) {
/* ignore if we are not supposed to attach to that
* namespace
*/
if (which != -1 && !(which & flags[i])) {
fd[i] = -1;
continue;
}
snprintf(path, MAXPATHLEN, "/proc/%d/ns/%s", pid, ns[i]);
fd[i] = open(path, O_RDONLY | O_CLOEXEC);
if (fd[i] < 0) {
saved_errno = errno;
/* close all already opened file descriptors before
* we return an error, so we don't leak them
*/
for (j = 0; j < i; j++)
close(fd[j]);
errno = saved_errno;
SYSERROR("failed to open '%s'", path);
return -1;
}
}
for (i = 0; i < size; i++) {
if (fd[i] >= 0 && setns(fd[i], 0) != 0) {
saved_errno = errno;
for (j = i; j < size; j++)
close(fd[j]);
errno = saved_errno;
SYSERROR("failed to set namespace '%s'", ns[i]);
return -1;
}
close(fd[i]);
}
return 0;
}
static int lxc_attach_remount_sys_proc(void)
{
int ret;
ret = unshare(CLONE_NEWNS);
if (ret < 0) {
SYSERROR("failed to unshare mount namespace");
return -1;
}
if (detect_shared_rootfs()) {
if (mount(NULL, "/", NULL, MS_SLAVE|MS_REC, NULL)) {
SYSERROR("Failed to make / rslave");
ERROR("Continuing...");
}
}
/* assume /proc is always mounted, so remount it */
ret = umount2("/proc", MNT_DETACH);
if (ret < 0) {
SYSERROR("failed to unmount /proc");
return -1;
}
ret = mount("none", "/proc", "proc", 0, NULL);
if (ret < 0) {
SYSERROR("failed to remount /proc");
return -1;
}
/* try to umount /sys - if it's not a mount point,
* we'll get EINVAL, then we ignore it because it
* may not have been mounted in the first place
*/
ret = umount2("/sys", MNT_DETACH);
if (ret < 0 && errno != EINVAL) {
SYSERROR("failed to unmount /sys");
return -1;
} else if (ret == 0) {
/* remount it */
ret = mount("none", "/sys", "sysfs", 0, NULL);
if (ret < 0) {
SYSERROR("failed to remount /sys");
return -1;
}
}
return 0;
}
static int lxc_attach_drop_privs(struct lxc_proc_context_info *ctx)
{
int last_cap = lxc_caps_last_cap();
int cap;
for (cap = 0; cap <= last_cap; cap++) {
if (ctx->capability_mask & (1LL << cap))
continue;
if (prctl(PR_CAPBSET_DROP, cap, 0, 0, 0)) {
SYSERROR("failed to remove capability id %d", cap);
return -1;
}
}
return 0;
}
static int lxc_attach_set_environment(enum lxc_attach_env_policy_t policy, char** extra_env, char** extra_keep)
{
if (policy == LXC_ATTACH_CLEAR_ENV) {
char **extra_keep_store = NULL;
int path_kept = 0;
if (extra_keep) {
size_t count, i;
for (count = 0; extra_keep[count]; count++);
extra_keep_store = calloc(count, sizeof(char *));
if (!extra_keep_store) {
SYSERROR("failed to allocate memory for storing current "
"environment variable values that will be kept");
return -1;
}
for (i = 0; i < count; i++) {
char *v = getenv(extra_keep[i]);
if (v) {
extra_keep_store[i] = strdup(v);
if (!extra_keep_store[i]) {
SYSERROR("failed to allocate memory for storing current "
"environment variable values that will be kept");
while (i > 0)
free(extra_keep_store[--i]);
free(extra_keep_store);
return -1;
}
if (strcmp(extra_keep[i], "PATH") == 0)
path_kept = 1;
}
/* calloc sets entire array to zero, so we don't
* need an else */
}
}
if (clearenv()) {
char **p;
SYSERROR("failed to clear environment");
if (extra_keep_store) {
for (p = extra_keep_store; *p; p++)
free(*p);
free(extra_keep_store);
}
return -1;
}
if (extra_keep_store) {
size_t i;
for (i = 0; extra_keep[i]; i++) {
if (extra_keep_store[i]) {
if (setenv(extra_keep[i], extra_keep_store[i], 1) < 0)
SYSERROR("Unable to set environment variable");
}
free(extra_keep_store[i]);
}
free(extra_keep_store);
}
/* always set a default path; shells and execlp tend
* to be fine without it, but there is a disturbing
* number of C programs out there that just assume
* that getenv("PATH") is never NULL and then die a
* painful segfault death. */
if (!path_kept)
setenv("PATH", "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", 1);
}
if (putenv("container=lxc")) {
SYSERROR("failed to set environment variable");
return -1;
}
/* set extra environment variables */
if (extra_env) {
for (; *extra_env; extra_env++) {
/* duplicate the string, just to be on
* the safe side, because putenv does not
* do it for us */
char *p = strdup(*extra_env);
/* we just assume the user knows what they
* are doing, so we don't do any checks */
if (!p) {
SYSERROR("failed to allocate memory for additional environment "
"variables");
return -1;
}
putenv(p);
}
}
return 0;
}
static char *lxc_attach_getpwshell(uid_t uid)
{
/* local variables */
pid_t pid;
int pipes[2];
int ret;
int fd;
char *result = NULL;
/* we need to fork off a process that runs the
* getent program, and we need to capture its
* output, so we use a pipe for that purpose
*/
ret = pipe(pipes);
if (ret < 0)
return NULL;
pid = fork();
if (pid < 0) {
close(pipes[0]);
close(pipes[1]);
return NULL;
}
if (pid) {
/* parent process */
FILE *pipe_f;
char *line = NULL;
size_t line_bufsz = 0;
int found = 0;
int status;
close(pipes[1]);
pipe_f = fdopen(pipes[0], "r");
while (getline(&line, &line_bufsz, pipe_f) != -1) {
char *token;
char *saveptr = NULL;
long value;
char *endptr = NULL;
int i;
/* if we already found something, just continue
* to read until the pipe doesn't deliver any more
* data, but don't modify the existing data
* structure
*/
if (found)
continue;
/* trim line on the right hand side */
for (i = strlen(line); i > 0 && (line[i - 1] == '\n' || line[i - 1] == '\r'); --i)
line[i - 1] = '\0';
/* split into tokens: first user name */
token = strtok_r(line, ":", &saveptr);
if (!token)
continue;
/* next: dummy password field */
token = strtok_r(NULL, ":", &saveptr);
if (!token)
continue;
/* next: user id */
token = strtok_r(NULL, ":", &saveptr);
value = token ? strtol(token, &endptr, 10) : 0;
if (!token || !endptr || *endptr || value == LONG_MIN || value == LONG_MAX)
continue;
/* dummy sanity check: user id matches */
if ((uid_t) value != uid)
continue;
/* skip fields: gid, gecos, dir, go to next field 'shell' */
for (i = 0; i < 4; i++) {
token = strtok_r(NULL, ":", &saveptr);
if (!token)
break;
}
if (!token)
continue;
free(result);
result = strdup(token);
/* sanity check that there are no fields after that */
token = strtok_r(NULL, ":", &saveptr);
if (token)
continue;
found = 1;
}
free(line);
fclose(pipe_f);
again:
if (waitpid(pid, &status, 0) < 0) {
if (errno == EINTR)
goto again;
return NULL;
}
/* some sanity checks: if anything even hinted at going
* wrong: we can't be sure we have a valid result, so
* we assume we don't
*/
if (!WIFEXITED(status))
return NULL;
if (WEXITSTATUS(status) != 0)
return NULL;
if (!found)
return NULL;
return result;
} else {
/* child process */
char uid_buf[32];
char *arguments[] = {
"getent",
"passwd",
uid_buf,
NULL
};
close(pipes[0]);
/* we want to capture stdout */
dup2(pipes[1], 1);
close(pipes[1]);
/* get rid of stdin/stderr, so we try to associate it
* with /dev/null
*/
fd = open("/dev/null", O_RDWR);
if (fd < 0) {
close(0);
close(2);
} else {
dup2(fd, 0);
dup2(fd, 2);
close(fd);
}
/* finish argument list */
ret = snprintf(uid_buf, sizeof(uid_buf), "%ld", (long) uid);
if (ret <= 0)
exit(-1);
/* try to run getent program */
(void) execvp("getent", arguments);
exit(-1);
}
}
static void lxc_attach_get_init_uidgid(uid_t* init_uid, gid_t* init_gid)
{
FILE *proc_file;
char proc_fn[MAXPATHLEN];
char *line = NULL;
size_t line_bufsz = 0;
int ret;
long value = -1;
uid_t uid = (uid_t)-1;
gid_t gid = (gid_t)-1;
/* read capabilities */
snprintf(proc_fn, MAXPATHLEN, "/proc/%d/status", 1);
proc_file = fopen(proc_fn, "r");
if (!proc_file)
return;
while (getline(&line, &line_bufsz, proc_file) != -1) {
/* format is: real, effective, saved set user, fs
* we only care about real uid
*/
ret = sscanf(line, "Uid: %ld", &value);
if (ret != EOF && ret > 0) {
uid = (uid_t) value;
} else {
ret = sscanf(line, "Gid: %ld", &value);
if (ret != EOF && ret > 0)
gid = (gid_t) value;
}
if (uid != (uid_t)-1 && gid != (gid_t)-1)
break;
}
fclose(proc_file);
free(line);
/* only override arguments if we found something */
if (uid != (uid_t)-1)
*init_uid = uid;
if (gid != (gid_t)-1)
*init_gid = gid;
/* TODO: we should also parse supplementary groups and use
* setgroups() to set them */
}
struct attach_clone_payload {
int ipc_socket;
lxc_attach_options_t* options;
struct lxc_proc_context_info* init_ctx;
lxc_attach_exec_t exec_function;
void* exec_payload;
int procfd;
};
static int attach_child_main(void* data);
/* help the optimizer along if it doesn't know that exit always exits */
#define rexit(c) do { int __c = (c); _exit(__c); return __c; } while(0)
/* define default options if no options are supplied by the user */
static lxc_attach_options_t attach_static_default_options = LXC_ATTACH_OPTIONS_DEFAULT;
static bool fetch_seccomp(const char *name, const char *lxcpath,
struct lxc_proc_context_info *i, lxc_attach_options_t *options)
{
struct lxc_container *c;
if (!(options->namespaces & CLONE_NEWNS) || !(options->attach_flags & LXC_ATTACH_LSM))
return true;
c = lxc_container_new(name, lxcpath);
if (!c)
return false;
i->container = c;
if (!c->lxc_conf)
return false;
if (lxc_read_seccomp_config(c->lxc_conf) < 0) {
ERROR("Error reading seccomp policy");
return false;
}
return true;
}
static signed long get_personality(const char *name, const char *lxcpath)
{
char *p = lxc_cmd_get_config_item(name, "lxc.arch", lxcpath);
signed long ret;
if (!p)
return -1;
ret = lxc_config_parse_arch(p);
free(p);
return ret;
}
int lxc_attach(const char* name, const char* lxcpath, lxc_attach_exec_t exec_function, void* exec_payload, lxc_attach_options_t* options, pid_t* attached_process)
{
int ret, status;
pid_t init_pid, pid, attached_pid, expected;
struct lxc_proc_context_info *init_ctx;
char* cwd;
char* new_cwd;
int ipc_sockets[2];
int procfd;
signed long personality;
if (!options)
options = &attach_static_default_options;
init_pid = lxc_cmd_get_init_pid(name, lxcpath);
if (init_pid < 0) {
ERROR("failed to get the init pid");
return -1;
}
init_ctx = lxc_proc_get_context_info(init_pid);
if (!init_ctx) {
ERROR("failed to get context of the init process, pid = %ld", (long)init_pid);
return -1;
}
personality = get_personality(name, lxcpath);
if (init_ctx->personality < 0) {
ERROR("Failed to get personality of the container");
lxc_proc_put_context_info(init_ctx);
return -1;
}
init_ctx->personality = personality;
if (!fetch_seccomp(name, lxcpath, init_ctx, options))
WARN("Failed to get seccomp policy");
cwd = getcwd(NULL, 0);
/* determine which namespaces the container was created with
* by asking lxc-start, if necessary
*/
if (options->namespaces == -1) {
options->namespaces = lxc_cmd_get_clone_flags(name, lxcpath);
/* call failed */
if (options->namespaces == -1) {
ERROR("failed to automatically determine the "
"namespaces which the container unshared");
free(cwd);
lxc_proc_put_context_info(init_ctx);
return -1;
}
}
/* create a socket pair for IPC communication; set SOCK_CLOEXEC in order
* to make sure we don't irritate other threads that want to fork+exec away
*
* IMPORTANT: if the initial process is multithreaded and another call
* just fork()s away without exec'ing directly after, the socket fd will
* exist in the forked process from the other thread and any close() in
* our own child process will not really cause the socket to close properly,
* potentiall causing the parent to hang.
*
* For this reason, while IPC is still active, we have to use shutdown()
* if the child exits prematurely in order to signal that the socket
* is closed and cannot assume that the child exiting will automatically
* do that.
*
* IPC mechanism: (X is receiver)
* initial process intermediate attached
* X <--- send pid of
* attached proc,
* then exit
* send 0 ------------------------------------> X
* [do initialization]
* X <------------------------------------ send 1
* [add to cgroup, ...]
* send 2 ------------------------------------> X
* close socket close socket
* run program
*/
ret = socketpair(PF_LOCAL, SOCK_STREAM | SOCK_CLOEXEC, 0, ipc_sockets);
if (ret < 0) {
SYSERROR("could not set up required IPC mechanism for attaching");
free(cwd);
lxc_proc_put_context_info(init_ctx);
return -1;
}
/* create intermediate subprocess, three reasons:
* 1. runs all pthread_atfork handlers and the
* child will no longer be threaded
* (we can't properly setns() in a threaded process)
* 2. we can't setns() in the child itself, since
* we want to make sure we are properly attached to
* the pidns
* 3. also, the initial thread has to put the attached
* process into the cgroup, which we can only do if
* we didn't already setns() (otherwise, user
* namespaces will hate us)
*/
pid = fork();
if (pid < 0) {
SYSERROR("failed to create first subprocess");
free(cwd);
lxc_proc_put_context_info(init_ctx);
return -1;
}
if (pid) {
pid_t to_cleanup_pid = pid;
/* initial thread, we close the socket that is for the
* subprocesses
*/
close(ipc_sockets[1]);
free(cwd);
/* attach to cgroup, if requested */
if (options->attach_flags & LXC_ATTACH_MOVE_TO_CGROUP) {
if (!cgroup_attach(name, lxcpath, pid))
goto cleanup_error;
}
/* Let the child process know to go ahead */
status = 0;
ret = lxc_write_nointr(ipc_sockets[0], &status, sizeof(status));
if (ret <= 0) {
ERROR("error using IPC to notify attached process for initialization (0)");
goto cleanup_error;
}
/* get pid from intermediate process */
ret = lxc_read_nointr_expect(ipc_sockets[0], &attached_pid, sizeof(attached_pid), NULL);
if (ret <= 0) {
if (ret != 0)
ERROR("error using IPC to receive pid of attached process");
goto cleanup_error;
}
/* ignore SIGKILL (CTRL-C) and SIGQUIT (CTRL-\) - issue #313 */
if (options->stdin_fd == 0) {
signal(SIGINT, SIG_IGN);
signal(SIGQUIT, SIG_IGN);
}
/* reap intermediate process */
ret = wait_for_pid(pid);
if (ret < 0)
goto cleanup_error;
/* we will always have to reap the grandchild now */
to_cleanup_pid = attached_pid;
/* tell attached process it may start initializing */
status = 0;
ret = lxc_write_nointr(ipc_sockets[0], &status, sizeof(status));
if (ret <= 0) {
ERROR("error using IPC to notify attached process for initialization (0)");
goto cleanup_error;
}
/* wait for the attached process to finish initializing */
expected = 1;
ret = lxc_read_nointr_expect(ipc_sockets[0], &status, sizeof(status), &expected);
if (ret <= 0) {
if (ret != 0)
ERROR("error using IPC to receive notification from attached process (1)");
goto cleanup_error;
}
/* tell attached process we're done */
status = 2;
ret = lxc_write_nointr(ipc_sockets[0], &status, sizeof(status));
if (ret <= 0) {
ERROR("error using IPC to notify attached process for initialization (2)");
goto cleanup_error;
}
/* now shut down communication with child, we're done */
shutdown(ipc_sockets[0], SHUT_RDWR);
close(ipc_sockets[0]);
lxc_proc_put_context_info(init_ctx);
/* we're done, the child process should now execute whatever
* it is that the user requested. The parent can now track it
* with waitpid() or similar.
*/
*attached_process = attached_pid;
return 0;
cleanup_error:
/* first shut down the socket, then wait for the pid,
* otherwise the pid we're waiting for may never exit
*/
shutdown(ipc_sockets[0], SHUT_RDWR);
close(ipc_sockets[0]);
if (to_cleanup_pid)
(void) wait_for_pid(to_cleanup_pid);
lxc_proc_put_context_info(init_ctx);
return -1;
}
/* first subprocess begins here, we close the socket that is for the
* initial thread
*/
close(ipc_sockets[0]);
/* Wait for the parent to have setup cgroups */
expected = 0;
status = -1;
ret = lxc_read_nointr_expect(ipc_sockets[1], &status, sizeof(status), &expected);
if (ret <= 0) {
ERROR("error communicating with child process");
shutdown(ipc_sockets[1], SHUT_RDWR);
rexit(-1);
}
procfd = open("/proc", O_DIRECTORY | O_RDONLY);
if (procfd < 0) {
SYSERROR("Unable to open /proc");
shutdown(ipc_sockets[1], SHUT_RDWR);
rexit(-1);
}
/* attach now, create another subprocess later, since pid namespaces
* only really affect the children of the current process
*/
ret = lxc_attach_to_ns(init_pid, options->namespaces);
if (ret < 0) {
ERROR("failed to enter the namespace");
shutdown(ipc_sockets[1], SHUT_RDWR);
rexit(-1);
}
/* attach succeeded, try to cwd */
if (options->initial_cwd)
new_cwd = options->initial_cwd;
else
new_cwd = cwd;
ret = chdir(new_cwd);
if (ret < 0)
WARN("could not change directory to '%s'", new_cwd);
free(cwd);
/* now create the real child process */
{
struct attach_clone_payload payload = {
.ipc_socket = ipc_sockets[1],
.options = options,
.init_ctx = init_ctx,
.exec_function = exec_function,
.exec_payload = exec_payload,
.procfd = procfd
};
/* We use clone_parent here to make this subprocess a direct child of
* the initial process. Then this intermediate process can exit and
* the parent can directly track the attached process.
*/
pid = lxc_clone(attach_child_main, &payload, CLONE_PARENT);
}
/* shouldn't happen, clone() should always return positive pid */
if (pid <= 0) {
SYSERROR("failed to create subprocess");
shutdown(ipc_sockets[1], SHUT_RDWR);
rexit(-1);
}
/* tell grandparent the pid of the pid of the newly created child */
ret = lxc_write_nointr(ipc_sockets[1], &pid, sizeof(pid));
if (ret != sizeof(pid)) {
/* if this really happens here, this is very unfortunate, since the
* parent will not know the pid of the attached process and will
* not be able to wait for it (and we won't either due to CLONE_PARENT)
* so the parent won't be able to reap it and the attached process
* will remain a zombie
*/
ERROR("error using IPC to notify main process of pid of the attached process");
shutdown(ipc_sockets[1], SHUT_RDWR);
rexit(-1);
}
/* the rest is in the hands of the initial and the attached process */
rexit(0);
}
static int attach_child_main(void* data)
{
struct attach_clone_payload* payload = (struct attach_clone_payload*)data;
int ipc_socket = payload->ipc_socket;
int procfd = payload->procfd;
lxc_attach_options_t* options = payload->options;
struct lxc_proc_context_info* init_ctx = payload->init_ctx;
#if HAVE_SYS_PERSONALITY_H
long new_personality;
#endif
int ret;
int status;
int expected;
long flags;
int fd;
uid_t new_uid;
gid_t new_gid;
/* wait for the initial thread to signal us that it's ready
* for us to start initializing
*/
expected = 0;
status = -1;
ret = lxc_read_nointr_expect(ipc_socket, &status, sizeof(status), &expected);
if (ret <= 0) {
ERROR("error using IPC to receive notification from initial process (0)");
shutdown(ipc_socket, SHUT_RDWR);
rexit(-1);
}
/* A description of the purpose of this functionality is
* provided in the lxc-attach(1) manual page. We have to
* remount here and not in the parent process, otherwise
* /proc may not properly reflect the new pid namespace.
*/
if (!(options->namespaces & CLONE_NEWNS) && (options->attach_flags & LXC_ATTACH_REMOUNT_PROC_SYS)) {
ret = lxc_attach_remount_sys_proc();
if (ret < 0) {
shutdown(ipc_socket, SHUT_RDWR);
rexit(-1);
}
}
/* now perform additional attachments*/
#if HAVE_SYS_PERSONALITY_H
if (options->personality < 0)
new_personality = init_ctx->personality;
else
new_personality = options->personality;
if (options->attach_flags & LXC_ATTACH_SET_PERSONALITY) {
ret = personality(new_personality);
if (ret < 0) {
SYSERROR("could not ensure correct architecture");
shutdown(ipc_socket, SHUT_RDWR);
rexit(-1);
}
}
#endif
if (options->attach_flags & LXC_ATTACH_DROP_CAPABILITIES) {
ret = lxc_attach_drop_privs(init_ctx);
if (ret < 0) {
ERROR("could not drop privileges");
shutdown(ipc_socket, SHUT_RDWR);
rexit(-1);
}
}
/* always set the environment (specify (LXC_ATTACH_KEEP_ENV, NULL, NULL) if you want this to be a no-op) */
ret = lxc_attach_set_environment(options->env_policy, options->extra_env_vars, options->extra_keep_env);
if (ret < 0) {
ERROR("could not set initial environment for attached process");
shutdown(ipc_socket, SHUT_RDWR);
rexit(-1);
}
/* set user / group id */
new_uid = 0;
new_gid = 0;
/* ignore errors, we will fall back to root in that case
* (/proc was not mounted etc.)
*/
if (options->namespaces & CLONE_NEWUSER)
lxc_attach_get_init_uidgid(&new_uid, &new_gid);
if (options->uid != (uid_t)-1)
new_uid = options->uid;
if (options->gid != (gid_t)-1)
new_gid = options->gid;
/* setup the control tty */
if (options->stdin_fd && isatty(options->stdin_fd)) {
if (setsid() < 0) {
SYSERROR("unable to setsid");
shutdown(ipc_socket, SHUT_RDWR);
rexit(-1);
}
if (ioctl(options->stdin_fd, TIOCSCTTY, (char *)NULL) < 0) {
SYSERROR("unable to TIOCSTTY");
shutdown(ipc_socket, SHUT_RDWR);
rexit(-1);
}
}
/* try to set the uid/gid combination */
if ((new_gid != 0 || options->namespaces & CLONE_NEWUSER)) {
if (setgid(new_gid) || setgroups(0, NULL)) {
SYSERROR("switching to container gid");
shutdown(ipc_socket, SHUT_RDWR);
rexit(-1);
}
}
if ((new_uid != 0 || options->namespaces & CLONE_NEWUSER) && setuid(new_uid)) {
SYSERROR("switching to container uid");
shutdown(ipc_socket, SHUT_RDWR);
rexit(-1);
}
/* tell initial process it may now put us into the cgroups */
status = 1;
ret = lxc_write_nointr(ipc_socket, &status, sizeof(status));
if (ret != sizeof(status)) {
ERROR("error using IPC to notify initial process for initialization (1)");
shutdown(ipc_socket, SHUT_RDWR);
rexit(-1);
}
/* wait for the initial thread to signal us that it has done
* everything for us when it comes to cgroups etc.
*/
expected = 2;
status = -1;
ret = lxc_read_nointr_expect(ipc_socket, &status, sizeof(status), &expected);
if (ret <= 0) {
ERROR("error using IPC to receive final notification from initial process (2)");
shutdown(ipc_socket, SHUT_RDWR);
rexit(-1);
}
shutdown(ipc_socket, SHUT_RDWR);
close(ipc_socket);
/* set new apparmor profile/selinux context */
if ((options->namespaces & CLONE_NEWNS) && (options->attach_flags & LXC_ATTACH_LSM) && init_ctx->lsm_label) {
int on_exec;
on_exec = options->attach_flags & LXC_ATTACH_LSM_EXEC ? 1 : 0;
if (lsm_set_label_at(procfd, on_exec, init_ctx->lsm_label) < 0) {
rexit(-1);
}
}
if (init_ctx->container && init_ctx->container->lxc_conf &&
lxc_seccomp_load(init_ctx->container->lxc_conf) != 0) {
ERROR("Loading seccomp policy");
rexit(-1);
}
lxc_proc_put_context_info(init_ctx);
/* The following is done after the communication socket is
* shut down. That way, all errors that might (though
* unlikely) occur up until this point will have their messages
* printed to the original stderr (if logging is so configured)
* and not the fd the user supplied, if any.
*/
/* fd handling for stdin, stdout and stderr;
* ignore errors here, user may want to make sure
* the fds are closed, for example */
if (options->stdin_fd >= 0 && options->stdin_fd != 0)
dup2(options->stdin_fd, 0);
if (options->stdout_fd >= 0 && options->stdout_fd != 1)
dup2(options->stdout_fd, 1);
if (options->stderr_fd >= 0 && options->stderr_fd != 2)
dup2(options->stderr_fd, 2);
/* close the old fds */
if (options->stdin_fd > 2)
close(options->stdin_fd);
if (options->stdout_fd > 2)
close(options->stdout_fd);
if (options->stderr_fd > 2)
close(options->stderr_fd);
/* try to remove CLOEXEC flag from stdin/stdout/stderr,
* but also here, ignore errors */
for (fd = 0; fd <= 2; fd++) {
flags = fcntl(fd, F_GETFL);
if (flags < 0)
continue;
if (flags & FD_CLOEXEC) {
if (fcntl(fd, F_SETFL, flags & ~FD_CLOEXEC) < 0) {
SYSERROR("Unable to clear CLOEXEC from fd");
}
}
}
/* we don't need proc anymore */
close(procfd);
/* we're done, so we can now do whatever the user intended us to do */
rexit(payload->exec_function(payload->exec_payload));
}
int lxc_attach_run_command(void* payload)
{
lxc_attach_command_t* cmd = (lxc_attach_command_t*)payload;
execvp(cmd->program, cmd->argv);
SYSERROR("failed to exec '%s'", cmd->program);
return -1;
}
int lxc_attach_run_shell(void* payload)
{
uid_t uid;
struct passwd *passwd;
char *user_shell;
/* ignore payload parameter */
(void)payload;
uid = getuid();
passwd = getpwuid(uid);
/* this probably happens because of incompatible nss
* implementations in host and container (remember, this
* code is still using the host's glibc but our mount
* namespace is in the container)
* we may try to get the information by spawning a
* [getent passwd uid] process and parsing the result
*/
if (!passwd)
user_shell = lxc_attach_getpwshell(uid);
else
user_shell = passwd->pw_shell;
if (user_shell)
execlp(user_shell, user_shell, NULL);
/* executed if either no passwd entry or execvp fails,
* we will fall back on /bin/sh as a default shell
*/
execlp("/bin/sh", "/bin/sh", NULL);
SYSERROR("failed to exec shell");
return -1;
}
| ./CrossVul/dataset_final_sorted/CWE-17/c/good_1470_0 |
crossvul-cpp_data_bad_1454_0 | /*
* Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
* Written by Alex Tomas <alex@clusterfs.com>
*
* Architecture independence:
* Copyright (c) 2005, Bull S.A.
* Written by Pierre Peiffer <pierre.peiffer@bull.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public Licens
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
*/
/*
* Extents support for EXT4
*
* TODO:
* - ext4*_error() should be used in some situations
* - analyze all BUG()/BUG_ON(), use -EIO where appropriate
* - smart tree reduction
*/
#include <linux/fs.h>
#include <linux/time.h>
#include <linux/jbd2.h>
#include <linux/highuid.h>
#include <linux/pagemap.h>
#include <linux/quotaops.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <asm/uaccess.h>
#include <linux/fiemap.h>
#include "ext4_jbd2.h"
#include "ext4_extents.h"
#include "xattr.h"
#include <trace/events/ext4.h>
/*
* used by extent splitting.
*/
#define EXT4_EXT_MAY_ZEROOUT 0x1 /* safe to zeroout if split fails \
due to ENOSPC */
#define EXT4_EXT_MARK_UNWRIT1 0x2 /* mark first half unwritten */
#define EXT4_EXT_MARK_UNWRIT2 0x4 /* mark second half unwritten */
#define EXT4_EXT_DATA_VALID1 0x8 /* first half contains valid data */
#define EXT4_EXT_DATA_VALID2 0x10 /* second half contains valid data */
static __le32 ext4_extent_block_csum(struct inode *inode,
struct ext4_extent_header *eh)
{
struct ext4_inode_info *ei = EXT4_I(inode);
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
__u32 csum;
csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)eh,
EXT4_EXTENT_TAIL_OFFSET(eh));
return cpu_to_le32(csum);
}
static int ext4_extent_block_csum_verify(struct inode *inode,
struct ext4_extent_header *eh)
{
struct ext4_extent_tail *et;
if (!ext4_has_metadata_csum(inode->i_sb))
return 1;
et = find_ext4_extent_tail(eh);
if (et->et_checksum != ext4_extent_block_csum(inode, eh))
return 0;
return 1;
}
static void ext4_extent_block_csum_set(struct inode *inode,
struct ext4_extent_header *eh)
{
struct ext4_extent_tail *et;
if (!ext4_has_metadata_csum(inode->i_sb))
return;
et = find_ext4_extent_tail(eh);
et->et_checksum = ext4_extent_block_csum(inode, eh);
}
static int ext4_split_extent(handle_t *handle,
struct inode *inode,
struct ext4_ext_path **ppath,
struct ext4_map_blocks *map,
int split_flag,
int flags);
static int ext4_split_extent_at(handle_t *handle,
struct inode *inode,
struct ext4_ext_path **ppath,
ext4_lblk_t split,
int split_flag,
int flags);
static int ext4_find_delayed_extent(struct inode *inode,
struct extent_status *newes);
static int ext4_ext_truncate_extend_restart(handle_t *handle,
struct inode *inode,
int needed)
{
int err;
if (!ext4_handle_valid(handle))
return 0;
if (handle->h_buffer_credits > needed)
return 0;
err = ext4_journal_extend(handle, needed);
if (err <= 0)
return err;
err = ext4_truncate_restart_trans(handle, inode, needed);
if (err == 0)
err = -EAGAIN;
return err;
}
/*
* could return:
* - EROFS
* - ENOMEM
*/
static int ext4_ext_get_access(handle_t *handle, struct inode *inode,
struct ext4_ext_path *path)
{
if (path->p_bh) {
/* path points to block */
BUFFER_TRACE(path->p_bh, "get_write_access");
return ext4_journal_get_write_access(handle, path->p_bh);
}
/* path points to leaf/index in inode body */
/* we use in-core data, no need to protect them */
return 0;
}
/*
* could return:
* - EROFS
* - ENOMEM
* - EIO
*/
int __ext4_ext_dirty(const char *where, unsigned int line, handle_t *handle,
struct inode *inode, struct ext4_ext_path *path)
{
int err;
WARN_ON(!rwsem_is_locked(&EXT4_I(inode)->i_data_sem));
if (path->p_bh) {
ext4_extent_block_csum_set(inode, ext_block_hdr(path->p_bh));
/* path points to block */
err = __ext4_handle_dirty_metadata(where, line, handle,
inode, path->p_bh);
} else {
/* path points to leaf/index in inode body */
err = ext4_mark_inode_dirty(handle, inode);
}
return err;
}
static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
struct ext4_ext_path *path,
ext4_lblk_t block)
{
if (path) {
int depth = path->p_depth;
struct ext4_extent *ex;
/*
* Try to predict block placement assuming that we are
* filling in a file which will eventually be
* non-sparse --- i.e., in the case of libbfd writing
* an ELF object sections out-of-order but in a way
* the eventually results in a contiguous object or
* executable file, or some database extending a table
* space file. However, this is actually somewhat
* non-ideal if we are writing a sparse file such as
* qemu or KVM writing a raw image file that is going
* to stay fairly sparse, since it will end up
* fragmenting the file system's free space. Maybe we
* should have some hueristics or some way to allow
* userspace to pass a hint to file system,
* especially if the latter case turns out to be
* common.
*/
ex = path[depth].p_ext;
if (ex) {
ext4_fsblk_t ext_pblk = ext4_ext_pblock(ex);
ext4_lblk_t ext_block = le32_to_cpu(ex->ee_block);
if (block > ext_block)
return ext_pblk + (block - ext_block);
else
return ext_pblk - (ext_block - block);
}
/* it looks like index is empty;
* try to find starting block from index itself */
if (path[depth].p_bh)
return path[depth].p_bh->b_blocknr;
}
/* OK. use inode's group */
return ext4_inode_to_goal_block(inode);
}
/*
* Allocation for a meta data block
*/
static ext4_fsblk_t
ext4_ext_new_meta_block(handle_t *handle, struct inode *inode,
struct ext4_ext_path *path,
struct ext4_extent *ex, int *err, unsigned int flags)
{
ext4_fsblk_t goal, newblock;
goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block));
newblock = ext4_new_meta_blocks(handle, inode, goal, flags,
NULL, err);
return newblock;
}
static inline int ext4_ext_space_block(struct inode *inode, int check)
{
int size;
size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
/ sizeof(struct ext4_extent);
#ifdef AGGRESSIVE_TEST
if (!check && size > 6)
size = 6;
#endif
return size;
}
static inline int ext4_ext_space_block_idx(struct inode *inode, int check)
{
int size;
size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
/ sizeof(struct ext4_extent_idx);
#ifdef AGGRESSIVE_TEST
if (!check && size > 5)
size = 5;
#endif
return size;
}
static inline int ext4_ext_space_root(struct inode *inode, int check)
{
int size;
size = sizeof(EXT4_I(inode)->i_data);
size -= sizeof(struct ext4_extent_header);
size /= sizeof(struct ext4_extent);
#ifdef AGGRESSIVE_TEST
if (!check && size > 3)
size = 3;
#endif
return size;
}
static inline int ext4_ext_space_root_idx(struct inode *inode, int check)
{
int size;
size = sizeof(EXT4_I(inode)->i_data);
size -= sizeof(struct ext4_extent_header);
size /= sizeof(struct ext4_extent_idx);
#ifdef AGGRESSIVE_TEST
if (!check && size > 4)
size = 4;
#endif
return size;
}
static inline int
ext4_force_split_extent_at(handle_t *handle, struct inode *inode,
struct ext4_ext_path **ppath, ext4_lblk_t lblk,
int nofail)
{
struct ext4_ext_path *path = *ppath;
int unwritten = ext4_ext_is_unwritten(path[path->p_depth].p_ext);
return ext4_split_extent_at(handle, inode, ppath, lblk, unwritten ?
EXT4_EXT_MARK_UNWRIT1|EXT4_EXT_MARK_UNWRIT2 : 0,
EXT4_EX_NOCACHE | EXT4_GET_BLOCKS_PRE_IO |
(nofail ? EXT4_GET_BLOCKS_METADATA_NOFAIL:0));
}
/*
* Calculate the number of metadata blocks needed
* to allocate @blocks
* Worse case is one block per extent
*/
int ext4_ext_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock)
{
struct ext4_inode_info *ei = EXT4_I(inode);
int idxs;
idxs = ((inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
/ sizeof(struct ext4_extent_idx));
/*
* If the new delayed allocation block is contiguous with the
* previous da block, it can share index blocks with the
* previous block, so we only need to allocate a new index
* block every idxs leaf blocks. At ldxs**2 blocks, we need
* an additional index block, and at ldxs**3 blocks, yet
* another index blocks.
*/
if (ei->i_da_metadata_calc_len &&
ei->i_da_metadata_calc_last_lblock+1 == lblock) {
int num = 0;
if ((ei->i_da_metadata_calc_len % idxs) == 0)
num++;
if ((ei->i_da_metadata_calc_len % (idxs*idxs)) == 0)
num++;
if ((ei->i_da_metadata_calc_len % (idxs*idxs*idxs)) == 0) {
num++;
ei->i_da_metadata_calc_len = 0;
} else
ei->i_da_metadata_calc_len++;
ei->i_da_metadata_calc_last_lblock++;
return num;
}
/*
* In the worst case we need a new set of index blocks at
* every level of the inode's extent tree.
*/
ei->i_da_metadata_calc_len = 1;
ei->i_da_metadata_calc_last_lblock = lblock;
return ext_depth(inode) + 1;
}
static int
ext4_ext_max_entries(struct inode *inode, int depth)
{
int max;
if (depth == ext_depth(inode)) {
if (depth == 0)
max = ext4_ext_space_root(inode, 1);
else
max = ext4_ext_space_root_idx(inode, 1);
} else {
if (depth == 0)
max = ext4_ext_space_block(inode, 1);
else
max = ext4_ext_space_block_idx(inode, 1);
}
return max;
}
static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
{
ext4_fsblk_t block = ext4_ext_pblock(ext);
int len = ext4_ext_get_actual_len(ext);
ext4_lblk_t lblock = le32_to_cpu(ext->ee_block);
ext4_lblk_t last = lblock + len - 1;
if (lblock > last)
return 0;
return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len);
}
static int ext4_valid_extent_idx(struct inode *inode,
struct ext4_extent_idx *ext_idx)
{
ext4_fsblk_t block = ext4_idx_pblock(ext_idx);
return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, 1);
}
static int ext4_valid_extent_entries(struct inode *inode,
struct ext4_extent_header *eh,
int depth)
{
unsigned short entries;
if (eh->eh_entries == 0)
return 1;
entries = le16_to_cpu(eh->eh_entries);
if (depth == 0) {
/* leaf entries */
struct ext4_extent *ext = EXT_FIRST_EXTENT(eh);
struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
ext4_fsblk_t pblock = 0;
ext4_lblk_t lblock = 0;
ext4_lblk_t prev = 0;
int len = 0;
while (entries) {
if (!ext4_valid_extent(inode, ext))
return 0;
/* Check for overlapping extents */
lblock = le32_to_cpu(ext->ee_block);
len = ext4_ext_get_actual_len(ext);
if ((lblock <= prev) && prev) {
pblock = ext4_ext_pblock(ext);
es->s_last_error_block = cpu_to_le64(pblock);
return 0;
}
ext++;
entries--;
prev = lblock + len - 1;
}
} else {
struct ext4_extent_idx *ext_idx = EXT_FIRST_INDEX(eh);
while (entries) {
if (!ext4_valid_extent_idx(inode, ext_idx))
return 0;
ext_idx++;
entries--;
}
}
return 1;
}
static int __ext4_ext_check(const char *function, unsigned int line,
struct inode *inode, struct ext4_extent_header *eh,
int depth, ext4_fsblk_t pblk)
{
const char *error_msg;
int max = 0;
if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) {
error_msg = "invalid magic";
goto corrupted;
}
if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) {
error_msg = "unexpected eh_depth";
goto corrupted;
}
if (unlikely(eh->eh_max == 0)) {
error_msg = "invalid eh_max";
goto corrupted;
}
max = ext4_ext_max_entries(inode, depth);
if (unlikely(le16_to_cpu(eh->eh_max) > max)) {
error_msg = "too large eh_max";
goto corrupted;
}
if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) {
error_msg = "invalid eh_entries";
goto corrupted;
}
if (!ext4_valid_extent_entries(inode, eh, depth)) {
error_msg = "invalid extent entries";
goto corrupted;
}
/* Verify checksum on non-root extent tree nodes */
if (ext_depth(inode) != depth &&
!ext4_extent_block_csum_verify(inode, eh)) {
error_msg = "extent tree corrupted";
goto corrupted;
}
return 0;
corrupted:
ext4_error_inode(inode, function, line, 0,
"pblk %llu bad header/extent: %s - magic %x, "
"entries %u, max %u(%u), depth %u(%u)",
(unsigned long long) pblk, error_msg,
le16_to_cpu(eh->eh_magic),
le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max),
max, le16_to_cpu(eh->eh_depth), depth);
return -EIO;
}
#define ext4_ext_check(inode, eh, depth, pblk) \
__ext4_ext_check(__func__, __LINE__, (inode), (eh), (depth), (pblk))
int ext4_ext_check_inode(struct inode *inode)
{
return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode), 0);
}
static struct buffer_head *
__read_extent_tree_block(const char *function, unsigned int line,
struct inode *inode, ext4_fsblk_t pblk, int depth,
int flags)
{
struct buffer_head *bh;
int err;
bh = sb_getblk(inode->i_sb, pblk);
if (unlikely(!bh))
return ERR_PTR(-ENOMEM);
if (!bh_uptodate_or_lock(bh)) {
trace_ext4_ext_load_extent(inode, pblk, _RET_IP_);
err = bh_submit_read(bh);
if (err < 0)
goto errout;
}
if (buffer_verified(bh) && !(flags & EXT4_EX_FORCE_CACHE))
return bh;
err = __ext4_ext_check(function, line, inode,
ext_block_hdr(bh), depth, pblk);
if (err)
goto errout;
set_buffer_verified(bh);
/*
* If this is a leaf block, cache all of its entries
*/
if (!(flags & EXT4_EX_NOCACHE) && depth == 0) {
struct ext4_extent_header *eh = ext_block_hdr(bh);
struct ext4_extent *ex = EXT_FIRST_EXTENT(eh);
ext4_lblk_t prev = 0;
int i;
for (i = le16_to_cpu(eh->eh_entries); i > 0; i--, ex++) {
unsigned int status = EXTENT_STATUS_WRITTEN;
ext4_lblk_t lblk = le32_to_cpu(ex->ee_block);
int len = ext4_ext_get_actual_len(ex);
if (prev && (prev != lblk))
ext4_es_cache_extent(inode, prev,
lblk - prev, ~0,
EXTENT_STATUS_HOLE);
if (ext4_ext_is_unwritten(ex))
status = EXTENT_STATUS_UNWRITTEN;
ext4_es_cache_extent(inode, lblk, len,
ext4_ext_pblock(ex), status);
prev = lblk + len;
}
}
return bh;
errout:
put_bh(bh);
return ERR_PTR(err);
}
#define read_extent_tree_block(inode, pblk, depth, flags) \
__read_extent_tree_block(__func__, __LINE__, (inode), (pblk), \
(depth), (flags))
/*
* This function is called to cache a file's extent information in the
* extent status tree
*/
int ext4_ext_precache(struct inode *inode)
{
struct ext4_inode_info *ei = EXT4_I(inode);
struct ext4_ext_path *path = NULL;
struct buffer_head *bh;
int i = 0, depth, ret = 0;
if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
return 0; /* not an extent-mapped inode */
down_read(&ei->i_data_sem);
depth = ext_depth(inode);
path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1),
GFP_NOFS);
if (path == NULL) {
up_read(&ei->i_data_sem);
return -ENOMEM;
}
/* Don't cache anything if there are no external extent blocks */
if (depth == 0)
goto out;
path[0].p_hdr = ext_inode_hdr(inode);
ret = ext4_ext_check(inode, path[0].p_hdr, depth, 0);
if (ret)
goto out;
path[0].p_idx = EXT_FIRST_INDEX(path[0].p_hdr);
while (i >= 0) {
/*
* If this is a leaf block or we've reached the end of
* the index block, go up
*/
if ((i == depth) ||
path[i].p_idx > EXT_LAST_INDEX(path[i].p_hdr)) {
brelse(path[i].p_bh);
path[i].p_bh = NULL;
i--;
continue;
}
bh = read_extent_tree_block(inode,
ext4_idx_pblock(path[i].p_idx++),
depth - i - 1,
EXT4_EX_FORCE_CACHE);
if (IS_ERR(bh)) {
ret = PTR_ERR(bh);
break;
}
i++;
path[i].p_bh = bh;
path[i].p_hdr = ext_block_hdr(bh);
path[i].p_idx = EXT_FIRST_INDEX(path[i].p_hdr);
}
ext4_set_inode_state(inode, EXT4_STATE_EXT_PRECACHED);
out:
up_read(&ei->i_data_sem);
ext4_ext_drop_refs(path);
kfree(path);
return ret;
}
#ifdef EXT_DEBUG
static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
{
int k, l = path->p_depth;
ext_debug("path:");
for (k = 0; k <= l; k++, path++) {
if (path->p_idx) {
ext_debug(" %d->%llu", le32_to_cpu(path->p_idx->ei_block),
ext4_idx_pblock(path->p_idx));
} else if (path->p_ext) {
ext_debug(" %d:[%d]%d:%llu ",
le32_to_cpu(path->p_ext->ee_block),
ext4_ext_is_unwritten(path->p_ext),
ext4_ext_get_actual_len(path->p_ext),
ext4_ext_pblock(path->p_ext));
} else
ext_debug(" []");
}
ext_debug("\n");
}
static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path)
{
int depth = ext_depth(inode);
struct ext4_extent_header *eh;
struct ext4_extent *ex;
int i;
if (!path)
return;
eh = path[depth].p_hdr;
ex = EXT_FIRST_EXTENT(eh);
ext_debug("Displaying leaf extents for inode %lu\n", inode->i_ino);
for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) {
ext_debug("%d:[%d]%d:%llu ", le32_to_cpu(ex->ee_block),
ext4_ext_is_unwritten(ex),
ext4_ext_get_actual_len(ex), ext4_ext_pblock(ex));
}
ext_debug("\n");
}
static void ext4_ext_show_move(struct inode *inode, struct ext4_ext_path *path,
ext4_fsblk_t newblock, int level)
{
int depth = ext_depth(inode);
struct ext4_extent *ex;
if (depth != level) {
struct ext4_extent_idx *idx;
idx = path[level].p_idx;
while (idx <= EXT_MAX_INDEX(path[level].p_hdr)) {
ext_debug("%d: move %d:%llu in new index %llu\n", level,
le32_to_cpu(idx->ei_block),
ext4_idx_pblock(idx),
newblock);
idx++;
}
return;
}
ex = path[depth].p_ext;
while (ex <= EXT_MAX_EXTENT(path[depth].p_hdr)) {
ext_debug("move %d:%llu:[%d]%d in new leaf %llu\n",
le32_to_cpu(ex->ee_block),
ext4_ext_pblock(ex),
ext4_ext_is_unwritten(ex),
ext4_ext_get_actual_len(ex),
newblock);
ex++;
}
}
#else
#define ext4_ext_show_path(inode, path)
#define ext4_ext_show_leaf(inode, path)
#define ext4_ext_show_move(inode, path, newblock, level)
#endif
void ext4_ext_drop_refs(struct ext4_ext_path *path)
{
int depth, i;
if (!path)
return;
depth = path->p_depth;
for (i = 0; i <= depth; i++, path++)
if (path->p_bh) {
brelse(path->p_bh);
path->p_bh = NULL;
}
}
/*
* ext4_ext_binsearch_idx:
* binary search for the closest index of the given block
* the header must be checked before calling this
*/
static void
ext4_ext_binsearch_idx(struct inode *inode,
struct ext4_ext_path *path, ext4_lblk_t block)
{
struct ext4_extent_header *eh = path->p_hdr;
struct ext4_extent_idx *r, *l, *m;
ext_debug("binsearch for %u(idx): ", block);
l = EXT_FIRST_INDEX(eh) + 1;
r = EXT_LAST_INDEX(eh);
while (l <= r) {
m = l + (r - l) / 2;
if (block < le32_to_cpu(m->ei_block))
r = m - 1;
else
l = m + 1;
ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ei_block),
m, le32_to_cpu(m->ei_block),
r, le32_to_cpu(r->ei_block));
}
path->p_idx = l - 1;
ext_debug(" -> %u->%lld ", le32_to_cpu(path->p_idx->ei_block),
ext4_idx_pblock(path->p_idx));
#ifdef CHECK_BINSEARCH
{
struct ext4_extent_idx *chix, *ix;
int k;
chix = ix = EXT_FIRST_INDEX(eh);
for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) {
if (k != 0 &&
le32_to_cpu(ix->ei_block) <= le32_to_cpu(ix[-1].ei_block)) {
printk(KERN_DEBUG "k=%d, ix=0x%p, "
"first=0x%p\n", k,
ix, EXT_FIRST_INDEX(eh));
printk(KERN_DEBUG "%u <= %u\n",
le32_to_cpu(ix->ei_block),
le32_to_cpu(ix[-1].ei_block));
}
BUG_ON(k && le32_to_cpu(ix->ei_block)
<= le32_to_cpu(ix[-1].ei_block));
if (block < le32_to_cpu(ix->ei_block))
break;
chix = ix;
}
BUG_ON(chix != path->p_idx);
}
#endif
}
/*
* ext4_ext_binsearch:
* binary search for closest extent of the given block
* the header must be checked before calling this
*/
static void
ext4_ext_binsearch(struct inode *inode,
struct ext4_ext_path *path, ext4_lblk_t block)
{
struct ext4_extent_header *eh = path->p_hdr;
struct ext4_extent *r, *l, *m;
if (eh->eh_entries == 0) {
/*
* this leaf is empty:
* we get such a leaf in split/add case
*/
return;
}
ext_debug("binsearch for %u: ", block);
l = EXT_FIRST_EXTENT(eh) + 1;
r = EXT_LAST_EXTENT(eh);
while (l <= r) {
m = l + (r - l) / 2;
if (block < le32_to_cpu(m->ee_block))
r = m - 1;
else
l = m + 1;
ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ee_block),
m, le32_to_cpu(m->ee_block),
r, le32_to_cpu(r->ee_block));
}
path->p_ext = l - 1;
ext_debug(" -> %d:%llu:[%d]%d ",
le32_to_cpu(path->p_ext->ee_block),
ext4_ext_pblock(path->p_ext),
ext4_ext_is_unwritten(path->p_ext),
ext4_ext_get_actual_len(path->p_ext));
#ifdef CHECK_BINSEARCH
{
struct ext4_extent *chex, *ex;
int k;
chex = ex = EXT_FIRST_EXTENT(eh);
for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) {
BUG_ON(k && le32_to_cpu(ex->ee_block)
<= le32_to_cpu(ex[-1].ee_block));
if (block < le32_to_cpu(ex->ee_block))
break;
chex = ex;
}
BUG_ON(chex != path->p_ext);
}
#endif
}
int ext4_ext_tree_init(handle_t *handle, struct inode *inode)
{
struct ext4_extent_header *eh;
eh = ext_inode_hdr(inode);
eh->eh_depth = 0;
eh->eh_entries = 0;
eh->eh_magic = EXT4_EXT_MAGIC;
eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0));
ext4_mark_inode_dirty(handle, inode);
return 0;
}
struct ext4_ext_path *
ext4_find_extent(struct inode *inode, ext4_lblk_t block,
struct ext4_ext_path **orig_path, int flags)
{
struct ext4_extent_header *eh;
struct buffer_head *bh;
struct ext4_ext_path *path = orig_path ? *orig_path : NULL;
short int depth, i, ppos = 0;
int ret;
eh = ext_inode_hdr(inode);
depth = ext_depth(inode);
if (path) {
ext4_ext_drop_refs(path);
if (depth > path[0].p_maxdepth) {
kfree(path);
*orig_path = path = NULL;
}
}
if (!path) {
/* account possible depth increase */
path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 2),
GFP_NOFS);
if (unlikely(!path))
return ERR_PTR(-ENOMEM);
path[0].p_maxdepth = depth + 1;
}
path[0].p_hdr = eh;
path[0].p_bh = NULL;
i = depth;
/* walk through the tree */
while (i) {
ext_debug("depth %d: num %d, max %d\n",
ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
ext4_ext_binsearch_idx(inode, path + ppos, block);
path[ppos].p_block = ext4_idx_pblock(path[ppos].p_idx);
path[ppos].p_depth = i;
path[ppos].p_ext = NULL;
bh = read_extent_tree_block(inode, path[ppos].p_block, --i,
flags);
if (unlikely(IS_ERR(bh))) {
ret = PTR_ERR(bh);
goto err;
}
eh = ext_block_hdr(bh);
ppos++;
if (unlikely(ppos > depth)) {
put_bh(bh);
EXT4_ERROR_INODE(inode,
"ppos %d > depth %d", ppos, depth);
ret = -EIO;
goto err;
}
path[ppos].p_bh = bh;
path[ppos].p_hdr = eh;
}
path[ppos].p_depth = i;
path[ppos].p_ext = NULL;
path[ppos].p_idx = NULL;
/* find extent */
ext4_ext_binsearch(inode, path + ppos, block);
/* if not an empty leaf */
if (path[ppos].p_ext)
path[ppos].p_block = ext4_ext_pblock(path[ppos].p_ext);
ext4_ext_show_path(inode, path);
return path;
err:
ext4_ext_drop_refs(path);
kfree(path);
if (orig_path)
*orig_path = NULL;
return ERR_PTR(ret);
}
/*
* ext4_ext_insert_index:
* insert new index [@logical;@ptr] into the block at @curp;
* check where to insert: before @curp or after @curp
*/
static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
struct ext4_ext_path *curp,
int logical, ext4_fsblk_t ptr)
{
struct ext4_extent_idx *ix;
int len, err;
err = ext4_ext_get_access(handle, inode, curp);
if (err)
return err;
if (unlikely(logical == le32_to_cpu(curp->p_idx->ei_block))) {
EXT4_ERROR_INODE(inode,
"logical %d == ei_block %d!",
logical, le32_to_cpu(curp->p_idx->ei_block));
return -EIO;
}
if (unlikely(le16_to_cpu(curp->p_hdr->eh_entries)
>= le16_to_cpu(curp->p_hdr->eh_max))) {
EXT4_ERROR_INODE(inode,
"eh_entries %d >= eh_max %d!",
le16_to_cpu(curp->p_hdr->eh_entries),
le16_to_cpu(curp->p_hdr->eh_max));
return -EIO;
}
if (logical > le32_to_cpu(curp->p_idx->ei_block)) {
/* insert after */
ext_debug("insert new index %d after: %llu\n", logical, ptr);
ix = curp->p_idx + 1;
} else {
/* insert before */
ext_debug("insert new index %d before: %llu\n", logical, ptr);
ix = curp->p_idx;
}
len = EXT_LAST_INDEX(curp->p_hdr) - ix + 1;
BUG_ON(len < 0);
if (len > 0) {
ext_debug("insert new index %d: "
"move %d indices from 0x%p to 0x%p\n",
logical, len, ix, ix + 1);
memmove(ix + 1, ix, len * sizeof(struct ext4_extent_idx));
}
if (unlikely(ix > EXT_MAX_INDEX(curp->p_hdr))) {
EXT4_ERROR_INODE(inode, "ix > EXT_MAX_INDEX!");
return -EIO;
}
ix->ei_block = cpu_to_le32(logical);
ext4_idx_store_pblock(ix, ptr);
le16_add_cpu(&curp->p_hdr->eh_entries, 1);
if (unlikely(ix > EXT_LAST_INDEX(curp->p_hdr))) {
EXT4_ERROR_INODE(inode, "ix > EXT_LAST_INDEX!");
return -EIO;
}
err = ext4_ext_dirty(handle, inode, curp);
ext4_std_error(inode->i_sb, err);
return err;
}
/*
* ext4_ext_split:
* inserts new subtree into the path, using free index entry
* at depth @at:
* - allocates all needed blocks (new leaf and all intermediate index blocks)
* - makes decision where to split
* - moves remaining extents and index entries (right to the split point)
* into the newly allocated blocks
* - initializes subtree
*/
static int ext4_ext_split(handle_t *handle, struct inode *inode,
unsigned int flags,
struct ext4_ext_path *path,
struct ext4_extent *newext, int at)
{
struct buffer_head *bh = NULL;
int depth = ext_depth(inode);
struct ext4_extent_header *neh;
struct ext4_extent_idx *fidx;
int i = at, k, m, a;
ext4_fsblk_t newblock, oldblock;
__le32 border;
ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */
int err = 0;
/* make decision: where to split? */
/* FIXME: now decision is simplest: at current extent */
/* if current leaf will be split, then we should use
* border from split point */
if (unlikely(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr))) {
EXT4_ERROR_INODE(inode, "p_ext > EXT_MAX_EXTENT!");
return -EIO;
}
if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {
border = path[depth].p_ext[1].ee_block;
ext_debug("leaf will be split."
" next leaf starts at %d\n",
le32_to_cpu(border));
} else {
border = newext->ee_block;
ext_debug("leaf will be added."
" next leaf starts at %d\n",
le32_to_cpu(border));
}
/*
* If error occurs, then we break processing
* and mark filesystem read-only. index won't
* be inserted and tree will be in consistent
* state. Next mount will repair buffers too.
*/
/*
* Get array to track all allocated blocks.
* We need this to handle errors and free blocks
* upon them.
*/
ablocks = kzalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS);
if (!ablocks)
return -ENOMEM;
/* allocate all needed blocks */
ext_debug("allocate %d blocks for indexes/leaf\n", depth - at);
for (a = 0; a < depth - at; a++) {
newblock = ext4_ext_new_meta_block(handle, inode, path,
newext, &err, flags);
if (newblock == 0)
goto cleanup;
ablocks[a] = newblock;
}
/* initialize new leaf */
newblock = ablocks[--a];
if (unlikely(newblock == 0)) {
EXT4_ERROR_INODE(inode, "newblock == 0!");
err = -EIO;
goto cleanup;
}
bh = sb_getblk(inode->i_sb, newblock);
if (unlikely(!bh)) {
err = -ENOMEM;
goto cleanup;
}
lock_buffer(bh);
err = ext4_journal_get_create_access(handle, bh);
if (err)
goto cleanup;
neh = ext_block_hdr(bh);
neh->eh_entries = 0;
neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
neh->eh_magic = EXT4_EXT_MAGIC;
neh->eh_depth = 0;
/* move remainder of path[depth] to the new leaf */
if (unlikely(path[depth].p_hdr->eh_entries !=
path[depth].p_hdr->eh_max)) {
EXT4_ERROR_INODE(inode, "eh_entries %d != eh_max %d!",
path[depth].p_hdr->eh_entries,
path[depth].p_hdr->eh_max);
err = -EIO;
goto cleanup;
}
/* start copy from next extent */
m = EXT_MAX_EXTENT(path[depth].p_hdr) - path[depth].p_ext++;
ext4_ext_show_move(inode, path, newblock, depth);
if (m) {
struct ext4_extent *ex;
ex = EXT_FIRST_EXTENT(neh);
memmove(ex, path[depth].p_ext, sizeof(struct ext4_extent) * m);
le16_add_cpu(&neh->eh_entries, m);
}
ext4_extent_block_csum_set(inode, neh);
set_buffer_uptodate(bh);
unlock_buffer(bh);
err = ext4_handle_dirty_metadata(handle, inode, bh);
if (err)
goto cleanup;
brelse(bh);
bh = NULL;
/* correct old leaf */
if (m) {
err = ext4_ext_get_access(handle, inode, path + depth);
if (err)
goto cleanup;
le16_add_cpu(&path[depth].p_hdr->eh_entries, -m);
err = ext4_ext_dirty(handle, inode, path + depth);
if (err)
goto cleanup;
}
/* create intermediate indexes */
k = depth - at - 1;
if (unlikely(k < 0)) {
EXT4_ERROR_INODE(inode, "k %d < 0!", k);
err = -EIO;
goto cleanup;
}
if (k)
ext_debug("create %d intermediate indices\n", k);
/* insert new index into current index block */
/* current depth stored in i var */
i = depth - 1;
while (k--) {
oldblock = newblock;
newblock = ablocks[--a];
bh = sb_getblk(inode->i_sb, newblock);
if (unlikely(!bh)) {
err = -ENOMEM;
goto cleanup;
}
lock_buffer(bh);
err = ext4_journal_get_create_access(handle, bh);
if (err)
goto cleanup;
neh = ext_block_hdr(bh);
neh->eh_entries = cpu_to_le16(1);
neh->eh_magic = EXT4_EXT_MAGIC;
neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
neh->eh_depth = cpu_to_le16(depth - i);
fidx = EXT_FIRST_INDEX(neh);
fidx->ei_block = border;
ext4_idx_store_pblock(fidx, oldblock);
ext_debug("int.index at %d (block %llu): %u -> %llu\n",
i, newblock, le32_to_cpu(border), oldblock);
/* move remainder of path[i] to the new index block */
if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) !=
EXT_LAST_INDEX(path[i].p_hdr))) {
EXT4_ERROR_INODE(inode,
"EXT_MAX_INDEX != EXT_LAST_INDEX ee_block %d!",
le32_to_cpu(path[i].p_ext->ee_block));
err = -EIO;
goto cleanup;
}
/* start copy indexes */
m = EXT_MAX_INDEX(path[i].p_hdr) - path[i].p_idx++;
ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx,
EXT_MAX_INDEX(path[i].p_hdr));
ext4_ext_show_move(inode, path, newblock, i);
if (m) {
memmove(++fidx, path[i].p_idx,
sizeof(struct ext4_extent_idx) * m);
le16_add_cpu(&neh->eh_entries, m);
}
ext4_extent_block_csum_set(inode, neh);
set_buffer_uptodate(bh);
unlock_buffer(bh);
err = ext4_handle_dirty_metadata(handle, inode, bh);
if (err)
goto cleanup;
brelse(bh);
bh = NULL;
/* correct old index */
if (m) {
err = ext4_ext_get_access(handle, inode, path + i);
if (err)
goto cleanup;
le16_add_cpu(&path[i].p_hdr->eh_entries, -m);
err = ext4_ext_dirty(handle, inode, path + i);
if (err)
goto cleanup;
}
i--;
}
/* insert new index */
err = ext4_ext_insert_index(handle, inode, path + at,
le32_to_cpu(border), newblock);
cleanup:
if (bh) {
if (buffer_locked(bh))
unlock_buffer(bh);
brelse(bh);
}
if (err) {
/* free all allocated blocks in error case */
for (i = 0; i < depth; i++) {
if (!ablocks[i])
continue;
ext4_free_blocks(handle, inode, NULL, ablocks[i], 1,
EXT4_FREE_BLOCKS_METADATA);
}
}
kfree(ablocks);
return err;
}
/*
* ext4_ext_grow_indepth:
* implements tree growing procedure:
* - allocates new block
* - moves top-level data (index block or leaf) into the new block
* - initializes new top-level, creating index that points to the
* just created block
*/
static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
unsigned int flags)
{
struct ext4_extent_header *neh;
struct buffer_head *bh;
ext4_fsblk_t newblock, goal = 0;
struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
int err = 0;
/* Try to prepend new index to old one */
if (ext_depth(inode))
goal = ext4_idx_pblock(EXT_FIRST_INDEX(ext_inode_hdr(inode)));
if (goal > le32_to_cpu(es->s_first_data_block)) {
flags |= EXT4_MB_HINT_TRY_GOAL;
goal--;
} else
goal = ext4_inode_to_goal_block(inode);
newblock = ext4_new_meta_blocks(handle, inode, goal, flags,
NULL, &err);
if (newblock == 0)
return err;
bh = sb_getblk(inode->i_sb, newblock);
if (unlikely(!bh))
return -ENOMEM;
lock_buffer(bh);
err = ext4_journal_get_create_access(handle, bh);
if (err) {
unlock_buffer(bh);
goto out;
}
/* move top-level index/leaf into new block */
memmove(bh->b_data, EXT4_I(inode)->i_data,
sizeof(EXT4_I(inode)->i_data));
/* set size of new block */
neh = ext_block_hdr(bh);
/* old root could have indexes or leaves
* so calculate e_max right way */
if (ext_depth(inode))
neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
else
neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
neh->eh_magic = EXT4_EXT_MAGIC;
ext4_extent_block_csum_set(inode, neh);
set_buffer_uptodate(bh);
unlock_buffer(bh);
err = ext4_handle_dirty_metadata(handle, inode, bh);
if (err)
goto out;
/* Update top-level index: num,max,pointer */
neh = ext_inode_hdr(inode);
neh->eh_entries = cpu_to_le16(1);
ext4_idx_store_pblock(EXT_FIRST_INDEX(neh), newblock);
if (neh->eh_depth == 0) {
/* Root extent block becomes index block */
neh->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode, 0));
EXT_FIRST_INDEX(neh)->ei_block =
EXT_FIRST_EXTENT(neh)->ee_block;
}
ext_debug("new root: num %d(%d), lblock %d, ptr %llu\n",
le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max),
le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block),
ext4_idx_pblock(EXT_FIRST_INDEX(neh)));
le16_add_cpu(&neh->eh_depth, 1);
ext4_mark_inode_dirty(handle, inode);
out:
brelse(bh);
return err;
}
/*
* ext4_ext_create_new_leaf:
* finds empty index and adds new leaf.
* if no free index is found, then it requests in-depth growing.
*/
static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
unsigned int mb_flags,
unsigned int gb_flags,
struct ext4_ext_path **ppath,
struct ext4_extent *newext)
{
struct ext4_ext_path *path = *ppath;
struct ext4_ext_path *curp;
int depth, i, err = 0;
repeat:
i = depth = ext_depth(inode);
/* walk up to the tree and look for free index entry */
curp = path + depth;
while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) {
i--;
curp--;
}
/* we use already allocated block for index block,
* so subsequent data blocks should be contiguous */
if (EXT_HAS_FREE_INDEX(curp)) {
/* if we found index with free entry, then use that
* entry: create all needed subtree and add new leaf */
err = ext4_ext_split(handle, inode, mb_flags, path, newext, i);
if (err)
goto out;
/* refill path */
path = ext4_find_extent(inode,
(ext4_lblk_t)le32_to_cpu(newext->ee_block),
ppath, gb_flags);
if (IS_ERR(path))
err = PTR_ERR(path);
} else {
/* tree is full, time to grow in depth */
err = ext4_ext_grow_indepth(handle, inode, mb_flags);
if (err)
goto out;
/* refill path */
path = ext4_find_extent(inode,
(ext4_lblk_t)le32_to_cpu(newext->ee_block),
ppath, gb_flags);
if (IS_ERR(path)) {
err = PTR_ERR(path);
goto out;
}
/*
* only first (depth 0 -> 1) produces free space;
* in all other cases we have to split the grown tree
*/
depth = ext_depth(inode);
if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) {
/* now we need to split */
goto repeat;
}
}
out:
return err;
}
/*
* search the closest allocated block to the left for *logical
* and returns it at @logical + it's physical address at @phys
* if *logical is the smallest allocated block, the function
* returns 0 at @phys
* return value contains 0 (success) or error code
*/
static int ext4_ext_search_left(struct inode *inode,
struct ext4_ext_path *path,
ext4_lblk_t *logical, ext4_fsblk_t *phys)
{
struct ext4_extent_idx *ix;
struct ext4_extent *ex;
int depth, ee_len;
if (unlikely(path == NULL)) {
EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
return -EIO;
}
depth = path->p_depth;
*phys = 0;
if (depth == 0 && path->p_ext == NULL)
return 0;
/* usually extent in the path covers blocks smaller
* then *logical, but it can be that extent is the
* first one in the file */
ex = path[depth].p_ext;
ee_len = ext4_ext_get_actual_len(ex);
if (*logical < le32_to_cpu(ex->ee_block)) {
if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
EXT4_ERROR_INODE(inode,
"EXT_FIRST_EXTENT != ex *logical %d ee_block %d!",
*logical, le32_to_cpu(ex->ee_block));
return -EIO;
}
while (--depth >= 0) {
ix = path[depth].p_idx;
if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
EXT4_ERROR_INODE(inode,
"ix (%d) != EXT_FIRST_INDEX (%d) (depth %d)!",
ix != NULL ? le32_to_cpu(ix->ei_block) : 0,
EXT_FIRST_INDEX(path[depth].p_hdr) != NULL ?
le32_to_cpu(EXT_FIRST_INDEX(path[depth].p_hdr)->ei_block) : 0,
depth);
return -EIO;
}
}
return 0;
}
if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
EXT4_ERROR_INODE(inode,
"logical %d < ee_block %d + ee_len %d!",
*logical, le32_to_cpu(ex->ee_block), ee_len);
return -EIO;
}
*logical = le32_to_cpu(ex->ee_block) + ee_len - 1;
*phys = ext4_ext_pblock(ex) + ee_len - 1;
return 0;
}
/*
* search the closest allocated block to the right for *logical
* and returns it at @logical + it's physical address at @phys
* if *logical is the largest allocated block, the function
* returns 0 at @phys
* return value contains 0 (success) or error code
*/
static int ext4_ext_search_right(struct inode *inode,
struct ext4_ext_path *path,
ext4_lblk_t *logical, ext4_fsblk_t *phys,
struct ext4_extent **ret_ex)
{
struct buffer_head *bh = NULL;
struct ext4_extent_header *eh;
struct ext4_extent_idx *ix;
struct ext4_extent *ex;
ext4_fsblk_t block;
int depth; /* Note, NOT eh_depth; depth from top of tree */
int ee_len;
if (unlikely(path == NULL)) {
EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
return -EIO;
}
depth = path->p_depth;
*phys = 0;
if (depth == 0 && path->p_ext == NULL)
return 0;
/* usually extent in the path covers blocks smaller
* then *logical, but it can be that extent is the
* first one in the file */
ex = path[depth].p_ext;
ee_len = ext4_ext_get_actual_len(ex);
if (*logical < le32_to_cpu(ex->ee_block)) {
if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
EXT4_ERROR_INODE(inode,
"first_extent(path[%d].p_hdr) != ex",
depth);
return -EIO;
}
while (--depth >= 0) {
ix = path[depth].p_idx;
if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
EXT4_ERROR_INODE(inode,
"ix != EXT_FIRST_INDEX *logical %d!",
*logical);
return -EIO;
}
}
goto found_extent;
}
if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
EXT4_ERROR_INODE(inode,
"logical %d < ee_block %d + ee_len %d!",
*logical, le32_to_cpu(ex->ee_block), ee_len);
return -EIO;
}
if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) {
/* next allocated block in this leaf */
ex++;
goto found_extent;
}
/* go up and search for index to the right */
while (--depth >= 0) {
ix = path[depth].p_idx;
if (ix != EXT_LAST_INDEX(path[depth].p_hdr))
goto got_index;
}
/* we've gone up to the root and found no index to the right */
return 0;
got_index:
/* we've found index to the right, let's
* follow it and find the closest allocated
* block to the right */
ix++;
block = ext4_idx_pblock(ix);
while (++depth < path->p_depth) {
/* subtract from p_depth to get proper eh_depth */
bh = read_extent_tree_block(inode, block,
path->p_depth - depth, 0);
if (IS_ERR(bh))
return PTR_ERR(bh);
eh = ext_block_hdr(bh);
ix = EXT_FIRST_INDEX(eh);
block = ext4_idx_pblock(ix);
put_bh(bh);
}
bh = read_extent_tree_block(inode, block, path->p_depth - depth, 0);
if (IS_ERR(bh))
return PTR_ERR(bh);
eh = ext_block_hdr(bh);
ex = EXT_FIRST_EXTENT(eh);
found_extent:
*logical = le32_to_cpu(ex->ee_block);
*phys = ext4_ext_pblock(ex);
*ret_ex = ex;
if (bh)
put_bh(bh);
return 0;
}
/*
* ext4_ext_next_allocated_block:
* returns allocated block in subsequent extent or EXT_MAX_BLOCKS.
* NOTE: it considers block number from index entry as
* allocated block. Thus, index entries have to be consistent
* with leaves.
*/
ext4_lblk_t
ext4_ext_next_allocated_block(struct ext4_ext_path *path)
{
int depth;
BUG_ON(path == NULL);
depth = path->p_depth;
if (depth == 0 && path->p_ext == NULL)
return EXT_MAX_BLOCKS;
while (depth >= 0) {
if (depth == path->p_depth) {
/* leaf */
if (path[depth].p_ext &&
path[depth].p_ext !=
EXT_LAST_EXTENT(path[depth].p_hdr))
return le32_to_cpu(path[depth].p_ext[1].ee_block);
} else {
/* index */
if (path[depth].p_idx !=
EXT_LAST_INDEX(path[depth].p_hdr))
return le32_to_cpu(path[depth].p_idx[1].ei_block);
}
depth--;
}
return EXT_MAX_BLOCKS;
}
/*
* ext4_ext_next_leaf_block:
* returns first allocated block from next leaf or EXT_MAX_BLOCKS
*/
static ext4_lblk_t ext4_ext_next_leaf_block(struct ext4_ext_path *path)
{
int depth;
BUG_ON(path == NULL);
depth = path->p_depth;
/* zero-tree has no leaf blocks at all */
if (depth == 0)
return EXT_MAX_BLOCKS;
/* go to index block */
depth--;
while (depth >= 0) {
if (path[depth].p_idx !=
EXT_LAST_INDEX(path[depth].p_hdr))
return (ext4_lblk_t)
le32_to_cpu(path[depth].p_idx[1].ei_block);
depth--;
}
return EXT_MAX_BLOCKS;
}
/*
* ext4_ext_correct_indexes:
* if leaf gets modified and modified extent is first in the leaf,
* then we have to correct all indexes above.
* TODO: do we need to correct tree in all cases?
*/
static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode,
struct ext4_ext_path *path)
{
struct ext4_extent_header *eh;
int depth = ext_depth(inode);
struct ext4_extent *ex;
__le32 border;
int k, err = 0;
eh = path[depth].p_hdr;
ex = path[depth].p_ext;
if (unlikely(ex == NULL || eh == NULL)) {
EXT4_ERROR_INODE(inode,
"ex %p == NULL or eh %p == NULL", ex, eh);
return -EIO;
}
if (depth == 0) {
/* there is no tree at all */
return 0;
}
if (ex != EXT_FIRST_EXTENT(eh)) {
/* we correct tree if first leaf got modified only */
return 0;
}
/*
* TODO: we need correction if border is smaller than current one
*/
k = depth - 1;
border = path[depth].p_ext->ee_block;
err = ext4_ext_get_access(handle, inode, path + k);
if (err)
return err;
path[k].p_idx->ei_block = border;
err = ext4_ext_dirty(handle, inode, path + k);
if (err)
return err;
while (k--) {
/* change all left-side indexes */
if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr))
break;
err = ext4_ext_get_access(handle, inode, path + k);
if (err)
break;
path[k].p_idx->ei_block = border;
err = ext4_ext_dirty(handle, inode, path + k);
if (err)
break;
}
return err;
}
int
ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
struct ext4_extent *ex2)
{
unsigned short ext1_ee_len, ext2_ee_len;
if (ext4_ext_is_unwritten(ex1) != ext4_ext_is_unwritten(ex2))
return 0;
ext1_ee_len = ext4_ext_get_actual_len(ex1);
ext2_ee_len = ext4_ext_get_actual_len(ex2);
if (le32_to_cpu(ex1->ee_block) + ext1_ee_len !=
le32_to_cpu(ex2->ee_block))
return 0;
/*
* To allow future support for preallocated extents to be added
* as an RO_COMPAT feature, refuse to merge to extents if
* this can result in the top bit of ee_len being set.
*/
if (ext1_ee_len + ext2_ee_len > EXT_INIT_MAX_LEN)
return 0;
if (ext4_ext_is_unwritten(ex1) &&
(ext4_test_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN) ||
atomic_read(&EXT4_I(inode)->i_unwritten) ||
(ext1_ee_len + ext2_ee_len > EXT_UNWRITTEN_MAX_LEN)))
return 0;
#ifdef AGGRESSIVE_TEST
if (ext1_ee_len >= 4)
return 0;
#endif
if (ext4_ext_pblock(ex1) + ext1_ee_len == ext4_ext_pblock(ex2))
return 1;
return 0;
}
/*
* This function tries to merge the "ex" extent to the next extent in the tree.
* It always tries to merge towards right. If you want to merge towards
* left, pass "ex - 1" as argument instead of "ex".
* Returns 0 if the extents (ex and ex+1) were _not_ merged and returns
* 1 if they got merged.
*/
static int ext4_ext_try_to_merge_right(struct inode *inode,
struct ext4_ext_path *path,
struct ext4_extent *ex)
{
struct ext4_extent_header *eh;
unsigned int depth, len;
int merge_done = 0, unwritten;
depth = ext_depth(inode);
BUG_ON(path[depth].p_hdr == NULL);
eh = path[depth].p_hdr;
while (ex < EXT_LAST_EXTENT(eh)) {
if (!ext4_can_extents_be_merged(inode, ex, ex + 1))
break;
/* merge with next extent! */
unwritten = ext4_ext_is_unwritten(ex);
ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
+ ext4_ext_get_actual_len(ex + 1));
if (unwritten)
ext4_ext_mark_unwritten(ex);
if (ex + 1 < EXT_LAST_EXTENT(eh)) {
len = (EXT_LAST_EXTENT(eh) - ex - 1)
* sizeof(struct ext4_extent);
memmove(ex + 1, ex + 2, len);
}
le16_add_cpu(&eh->eh_entries, -1);
merge_done = 1;
WARN_ON(eh->eh_entries == 0);
if (!eh->eh_entries)
EXT4_ERROR_INODE(inode, "eh->eh_entries = 0!");
}
return merge_done;
}
/*
* This function does a very simple check to see if we can collapse
* an extent tree with a single extent tree leaf block into the inode.
*/
static void ext4_ext_try_to_merge_up(handle_t *handle,
struct inode *inode,
struct ext4_ext_path *path)
{
size_t s;
unsigned max_root = ext4_ext_space_root(inode, 0);
ext4_fsblk_t blk;
if ((path[0].p_depth != 1) ||
(le16_to_cpu(path[0].p_hdr->eh_entries) != 1) ||
(le16_to_cpu(path[1].p_hdr->eh_entries) > max_root))
return;
/*
* We need to modify the block allocation bitmap and the block
* group descriptor to release the extent tree block. If we
* can't get the journal credits, give up.
*/
if (ext4_journal_extend(handle, 2))
return;
/*
* Copy the extent data up to the inode
*/
blk = ext4_idx_pblock(path[0].p_idx);
s = le16_to_cpu(path[1].p_hdr->eh_entries) *
sizeof(struct ext4_extent_idx);
s += sizeof(struct ext4_extent_header);
path[1].p_maxdepth = path[0].p_maxdepth;
memcpy(path[0].p_hdr, path[1].p_hdr, s);
path[0].p_depth = 0;
path[0].p_ext = EXT_FIRST_EXTENT(path[0].p_hdr) +
(path[1].p_ext - EXT_FIRST_EXTENT(path[1].p_hdr));
path[0].p_hdr->eh_max = cpu_to_le16(max_root);
brelse(path[1].p_bh);
ext4_free_blocks(handle, inode, NULL, blk, 1,
EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
}
/*
* This function tries to merge the @ex extent to neighbours in the tree.
* return 1 if merge left else 0.
*/
static void ext4_ext_try_to_merge(handle_t *handle,
struct inode *inode,
struct ext4_ext_path *path,
struct ext4_extent *ex) {
struct ext4_extent_header *eh;
unsigned int depth;
int merge_done = 0;
depth = ext_depth(inode);
BUG_ON(path[depth].p_hdr == NULL);
eh = path[depth].p_hdr;
if (ex > EXT_FIRST_EXTENT(eh))
merge_done = ext4_ext_try_to_merge_right(inode, path, ex - 1);
if (!merge_done)
(void) ext4_ext_try_to_merge_right(inode, path, ex);
ext4_ext_try_to_merge_up(handle, inode, path);
}
/*
* check if a portion of the "newext" extent overlaps with an
* existing extent.
*
* If there is an overlap discovered, it updates the length of the newext
* such that there will be no overlap, and then returns 1.
* If there is no overlap found, it returns 0.
*/
static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi,
struct inode *inode,
struct ext4_extent *newext,
struct ext4_ext_path *path)
{
ext4_lblk_t b1, b2;
unsigned int depth, len1;
unsigned int ret = 0;
b1 = le32_to_cpu(newext->ee_block);
len1 = ext4_ext_get_actual_len(newext);
depth = ext_depth(inode);
if (!path[depth].p_ext)
goto out;
b2 = EXT4_LBLK_CMASK(sbi, le32_to_cpu(path[depth].p_ext->ee_block));
/*
* get the next allocated block if the extent in the path
* is before the requested block(s)
*/
if (b2 < b1) {
b2 = ext4_ext_next_allocated_block(path);
if (b2 == EXT_MAX_BLOCKS)
goto out;
b2 = EXT4_LBLK_CMASK(sbi, b2);
}
/* check for wrap through zero on extent logical start block*/
if (b1 + len1 < b1) {
len1 = EXT_MAX_BLOCKS - b1;
newext->ee_len = cpu_to_le16(len1);
ret = 1;
}
/* check for overlap */
if (b1 + len1 > b2) {
newext->ee_len = cpu_to_le16(b2 - b1);
ret = 1;
}
out:
return ret;
}
/*
* ext4_ext_insert_extent:
* tries to merge requsted extent into the existing extent or
* inserts requested extent as new one into the tree,
* creating new leaf in the no-space case.
*/
int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
struct ext4_ext_path **ppath,
struct ext4_extent *newext, int gb_flags)
{
struct ext4_ext_path *path = *ppath;
struct ext4_extent_header *eh;
struct ext4_extent *ex, *fex;
struct ext4_extent *nearex; /* nearest extent */
struct ext4_ext_path *npath = NULL;
int depth, len, err;
ext4_lblk_t next;
int mb_flags = 0, unwritten;
if (gb_flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
mb_flags |= EXT4_MB_DELALLOC_RESERVED;
if (unlikely(ext4_ext_get_actual_len(newext) == 0)) {
EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0");
return -EIO;
}
depth = ext_depth(inode);
ex = path[depth].p_ext;
eh = path[depth].p_hdr;
if (unlikely(path[depth].p_hdr == NULL)) {
EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
return -EIO;
}
/* try to insert block into found extent and return */
if (ex && !(gb_flags & EXT4_GET_BLOCKS_PRE_IO)) {
/*
* Try to see whether we should rather test the extent on
* right from ex, or from the left of ex. This is because
* ext4_find_extent() can return either extent on the
* left, or on the right from the searched position. This
* will make merging more effective.
*/
if (ex < EXT_LAST_EXTENT(eh) &&
(le32_to_cpu(ex->ee_block) +
ext4_ext_get_actual_len(ex) <
le32_to_cpu(newext->ee_block))) {
ex += 1;
goto prepend;
} else if ((ex > EXT_FIRST_EXTENT(eh)) &&
(le32_to_cpu(newext->ee_block) +
ext4_ext_get_actual_len(newext) <
le32_to_cpu(ex->ee_block)))
ex -= 1;
/* Try to append newex to the ex */
if (ext4_can_extents_be_merged(inode, ex, newext)) {
ext_debug("append [%d]%d block to %u:[%d]%d"
"(from %llu)\n",
ext4_ext_is_unwritten(newext),
ext4_ext_get_actual_len(newext),
le32_to_cpu(ex->ee_block),
ext4_ext_is_unwritten(ex),
ext4_ext_get_actual_len(ex),
ext4_ext_pblock(ex));
err = ext4_ext_get_access(handle, inode,
path + depth);
if (err)
return err;
unwritten = ext4_ext_is_unwritten(ex);
ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
+ ext4_ext_get_actual_len(newext));
if (unwritten)
ext4_ext_mark_unwritten(ex);
eh = path[depth].p_hdr;
nearex = ex;
goto merge;
}
prepend:
/* Try to prepend newex to the ex */
if (ext4_can_extents_be_merged(inode, newext, ex)) {
ext_debug("prepend %u[%d]%d block to %u:[%d]%d"
"(from %llu)\n",
le32_to_cpu(newext->ee_block),
ext4_ext_is_unwritten(newext),
ext4_ext_get_actual_len(newext),
le32_to_cpu(ex->ee_block),
ext4_ext_is_unwritten(ex),
ext4_ext_get_actual_len(ex),
ext4_ext_pblock(ex));
err = ext4_ext_get_access(handle, inode,
path + depth);
if (err)
return err;
unwritten = ext4_ext_is_unwritten(ex);
ex->ee_block = newext->ee_block;
ext4_ext_store_pblock(ex, ext4_ext_pblock(newext));
ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
+ ext4_ext_get_actual_len(newext));
if (unwritten)
ext4_ext_mark_unwritten(ex);
eh = path[depth].p_hdr;
nearex = ex;
goto merge;
}
}
depth = ext_depth(inode);
eh = path[depth].p_hdr;
if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max))
goto has_space;
/* probably next leaf has space for us? */
fex = EXT_LAST_EXTENT(eh);
next = EXT_MAX_BLOCKS;
if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block))
next = ext4_ext_next_leaf_block(path);
if (next != EXT_MAX_BLOCKS) {
ext_debug("next leaf block - %u\n", next);
BUG_ON(npath != NULL);
npath = ext4_find_extent(inode, next, NULL, 0);
if (IS_ERR(npath))
return PTR_ERR(npath);
BUG_ON(npath->p_depth != path->p_depth);
eh = npath[depth].p_hdr;
if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) {
ext_debug("next leaf isn't full(%d)\n",
le16_to_cpu(eh->eh_entries));
path = npath;
goto has_space;
}
ext_debug("next leaf has no free space(%d,%d)\n",
le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
}
/*
* There is no free space in the found leaf.
* We're gonna add a new leaf in the tree.
*/
if (gb_flags & EXT4_GET_BLOCKS_METADATA_NOFAIL)
mb_flags |= EXT4_MB_USE_RESERVED;
err = ext4_ext_create_new_leaf(handle, inode, mb_flags, gb_flags,
ppath, newext);
if (err)
goto cleanup;
depth = ext_depth(inode);
eh = path[depth].p_hdr;
has_space:
nearex = path[depth].p_ext;
err = ext4_ext_get_access(handle, inode, path + depth);
if (err)
goto cleanup;
if (!nearex) {
/* there is no extent in this leaf, create first one */
ext_debug("first extent in the leaf: %u:%llu:[%d]%d\n",
le32_to_cpu(newext->ee_block),
ext4_ext_pblock(newext),
ext4_ext_is_unwritten(newext),
ext4_ext_get_actual_len(newext));
nearex = EXT_FIRST_EXTENT(eh);
} else {
if (le32_to_cpu(newext->ee_block)
> le32_to_cpu(nearex->ee_block)) {
/* Insert after */
ext_debug("insert %u:%llu:[%d]%d before: "
"nearest %p\n",
le32_to_cpu(newext->ee_block),
ext4_ext_pblock(newext),
ext4_ext_is_unwritten(newext),
ext4_ext_get_actual_len(newext),
nearex);
nearex++;
} else {
/* Insert before */
BUG_ON(newext->ee_block == nearex->ee_block);
ext_debug("insert %u:%llu:[%d]%d after: "
"nearest %p\n",
le32_to_cpu(newext->ee_block),
ext4_ext_pblock(newext),
ext4_ext_is_unwritten(newext),
ext4_ext_get_actual_len(newext),
nearex);
}
len = EXT_LAST_EXTENT(eh) - nearex + 1;
if (len > 0) {
ext_debug("insert %u:%llu:[%d]%d: "
"move %d extents from 0x%p to 0x%p\n",
le32_to_cpu(newext->ee_block),
ext4_ext_pblock(newext),
ext4_ext_is_unwritten(newext),
ext4_ext_get_actual_len(newext),
len, nearex, nearex + 1);
memmove(nearex + 1, nearex,
len * sizeof(struct ext4_extent));
}
}
le16_add_cpu(&eh->eh_entries, 1);
path[depth].p_ext = nearex;
nearex->ee_block = newext->ee_block;
ext4_ext_store_pblock(nearex, ext4_ext_pblock(newext));
nearex->ee_len = newext->ee_len;
merge:
/* try to merge extents */
if (!(gb_flags & EXT4_GET_BLOCKS_PRE_IO))
ext4_ext_try_to_merge(handle, inode, path, nearex);
/* time to correct all indexes above */
err = ext4_ext_correct_indexes(handle, inode, path);
if (err)
goto cleanup;
err = ext4_ext_dirty(handle, inode, path + path->p_depth);
cleanup:
ext4_ext_drop_refs(npath);
kfree(npath);
return err;
}
static int ext4_fill_fiemap_extents(struct inode *inode,
ext4_lblk_t block, ext4_lblk_t num,
struct fiemap_extent_info *fieinfo)
{
struct ext4_ext_path *path = NULL;
struct ext4_extent *ex;
struct extent_status es;
ext4_lblk_t next, next_del, start = 0, end = 0;
ext4_lblk_t last = block + num;
int exists, depth = 0, err = 0;
unsigned int flags = 0;
unsigned char blksize_bits = inode->i_sb->s_blocksize_bits;
while (block < last && block != EXT_MAX_BLOCKS) {
num = last - block;
/* find extent for this block */
down_read(&EXT4_I(inode)->i_data_sem);
path = ext4_find_extent(inode, block, &path, 0);
if (IS_ERR(path)) {
up_read(&EXT4_I(inode)->i_data_sem);
err = PTR_ERR(path);
path = NULL;
break;
}
depth = ext_depth(inode);
if (unlikely(path[depth].p_hdr == NULL)) {
up_read(&EXT4_I(inode)->i_data_sem);
EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
err = -EIO;
break;
}
ex = path[depth].p_ext;
next = ext4_ext_next_allocated_block(path);
flags = 0;
exists = 0;
if (!ex) {
/* there is no extent yet, so try to allocate
* all requested space */
start = block;
end = block + num;
} else if (le32_to_cpu(ex->ee_block) > block) {
/* need to allocate space before found extent */
start = block;
end = le32_to_cpu(ex->ee_block);
if (block + num < end)
end = block + num;
} else if (block >= le32_to_cpu(ex->ee_block)
+ ext4_ext_get_actual_len(ex)) {
/* need to allocate space after found extent */
start = block;
end = block + num;
if (end >= next)
end = next;
} else if (block >= le32_to_cpu(ex->ee_block)) {
/*
* some part of requested space is covered
* by found extent
*/
start = block;
end = le32_to_cpu(ex->ee_block)
+ ext4_ext_get_actual_len(ex);
if (block + num < end)
end = block + num;
exists = 1;
} else {
BUG();
}
BUG_ON(end <= start);
if (!exists) {
es.es_lblk = start;
es.es_len = end - start;
es.es_pblk = 0;
} else {
es.es_lblk = le32_to_cpu(ex->ee_block);
es.es_len = ext4_ext_get_actual_len(ex);
es.es_pblk = ext4_ext_pblock(ex);
if (ext4_ext_is_unwritten(ex))
flags |= FIEMAP_EXTENT_UNWRITTEN;
}
/*
* Find delayed extent and update es accordingly. We call
* it even in !exists case to find out whether es is the
* last existing extent or not.
*/
next_del = ext4_find_delayed_extent(inode, &es);
if (!exists && next_del) {
exists = 1;
flags |= (FIEMAP_EXTENT_DELALLOC |
FIEMAP_EXTENT_UNKNOWN);
}
up_read(&EXT4_I(inode)->i_data_sem);
if (unlikely(es.es_len == 0)) {
EXT4_ERROR_INODE(inode, "es.es_len == 0");
err = -EIO;
break;
}
/*
* This is possible iff next == next_del == EXT_MAX_BLOCKS.
* we need to check next == EXT_MAX_BLOCKS because it is
* possible that an extent is with unwritten and delayed
* status due to when an extent is delayed allocated and
* is allocated by fallocate status tree will track both of
* them in a extent.
*
* So we could return a unwritten and delayed extent, and
* its block is equal to 'next'.
*/
if (next == next_del && next == EXT_MAX_BLOCKS) {
flags |= FIEMAP_EXTENT_LAST;
if (unlikely(next_del != EXT_MAX_BLOCKS ||
next != EXT_MAX_BLOCKS)) {
EXT4_ERROR_INODE(inode,
"next extent == %u, next "
"delalloc extent = %u",
next, next_del);
err = -EIO;
break;
}
}
if (exists) {
err = fiemap_fill_next_extent(fieinfo,
(__u64)es.es_lblk << blksize_bits,
(__u64)es.es_pblk << blksize_bits,
(__u64)es.es_len << blksize_bits,
flags);
if (err < 0)
break;
if (err == 1) {
err = 0;
break;
}
}
block = es.es_lblk + es.es_len;
}
ext4_ext_drop_refs(path);
kfree(path);
return err;
}
/*
* ext4_ext_put_gap_in_cache:
* calculate boundaries of the gap that the requested block fits into
* and cache this gap
*/
static void
ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
ext4_lblk_t block)
{
int depth = ext_depth(inode);
ext4_lblk_t len;
ext4_lblk_t lblock;
struct ext4_extent *ex;
struct extent_status es;
ex = path[depth].p_ext;
if (ex == NULL) {
/* there is no extent yet, so gap is [0;-] */
lblock = 0;
len = EXT_MAX_BLOCKS;
ext_debug("cache gap(whole file):");
} else if (block < le32_to_cpu(ex->ee_block)) {
lblock = block;
len = le32_to_cpu(ex->ee_block) - block;
ext_debug("cache gap(before): %u [%u:%u]",
block,
le32_to_cpu(ex->ee_block),
ext4_ext_get_actual_len(ex));
} else if (block >= le32_to_cpu(ex->ee_block)
+ ext4_ext_get_actual_len(ex)) {
ext4_lblk_t next;
lblock = le32_to_cpu(ex->ee_block)
+ ext4_ext_get_actual_len(ex);
next = ext4_ext_next_allocated_block(path);
ext_debug("cache gap(after): [%u:%u] %u",
le32_to_cpu(ex->ee_block),
ext4_ext_get_actual_len(ex),
block);
BUG_ON(next == lblock);
len = next - lblock;
} else {
BUG();
}
ext4_es_find_delayed_extent_range(inode, lblock, lblock + len - 1, &es);
if (es.es_len) {
/* There's delayed extent containing lblock? */
if (es.es_lblk <= lblock)
return;
len = min(es.es_lblk - lblock, len);
}
ext_debug(" -> %u:%u\n", lblock, len);
ext4_es_insert_extent(inode, lblock, len, ~0, EXTENT_STATUS_HOLE);
}
/*
* ext4_ext_rm_idx:
* removes index from the index block.
*/
static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
struct ext4_ext_path *path, int depth)
{
int err;
ext4_fsblk_t leaf;
/* free index block */
depth--;
path = path + depth;
leaf = ext4_idx_pblock(path->p_idx);
if (unlikely(path->p_hdr->eh_entries == 0)) {
EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0");
return -EIO;
}
err = ext4_ext_get_access(handle, inode, path);
if (err)
return err;
if (path->p_idx != EXT_LAST_INDEX(path->p_hdr)) {
int len = EXT_LAST_INDEX(path->p_hdr) - path->p_idx;
len *= sizeof(struct ext4_extent_idx);
memmove(path->p_idx, path->p_idx + 1, len);
}
le16_add_cpu(&path->p_hdr->eh_entries, -1);
err = ext4_ext_dirty(handle, inode, path);
if (err)
return err;
ext_debug("index is empty, remove it, free block %llu\n", leaf);
trace_ext4_ext_rm_idx(inode, leaf);
ext4_free_blocks(handle, inode, NULL, leaf, 1,
EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
while (--depth >= 0) {
if (path->p_idx != EXT_FIRST_INDEX(path->p_hdr))
break;
path--;
err = ext4_ext_get_access(handle, inode, path);
if (err)
break;
path->p_idx->ei_block = (path+1)->p_idx->ei_block;
err = ext4_ext_dirty(handle, inode, path);
if (err)
break;
}
return err;
}
/*
* ext4_ext_calc_credits_for_single_extent:
* This routine returns max. credits that needed to insert an extent
* to the extent tree.
* When pass the actual path, the caller should calculate credits
* under i_data_sem.
*/
int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks,
struct ext4_ext_path *path)
{
if (path) {
int depth = ext_depth(inode);
int ret = 0;
/* probably there is space in leaf? */
if (le16_to_cpu(path[depth].p_hdr->eh_entries)
< le16_to_cpu(path[depth].p_hdr->eh_max)) {
/*
* There are some space in the leaf tree, no
* need to account for leaf block credit
*
* bitmaps and block group descriptor blocks
* and other metadata blocks still need to be
* accounted.
*/
/* 1 bitmap, 1 block group descriptor */
ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb);
return ret;
}
}
return ext4_chunk_trans_blocks(inode, nrblocks);
}
/*
* How many index/leaf blocks need to change/allocate to add @extents extents?
*
* If we add a single extent, then in the worse case, each tree level
* index/leaf need to be changed in case of the tree split.
*
* If more extents are inserted, they could cause the whole tree split more
* than once, but this is really rare.
*/
int ext4_ext_index_trans_blocks(struct inode *inode, int extents)
{
int index;
int depth;
/* If we are converting the inline data, only one is needed here. */
if (ext4_has_inline_data(inode))
return 1;
depth = ext_depth(inode);
if (extents <= 1)
index = depth * 2;
else
index = depth * 3;
return index;
}
static inline int get_default_free_blocks_flags(struct inode *inode)
{
if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
return EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET;
else if (ext4_should_journal_data(inode))
return EXT4_FREE_BLOCKS_FORGET;
return 0;
}
static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
struct ext4_extent *ex,
long long *partial_cluster,
ext4_lblk_t from, ext4_lblk_t to)
{
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
unsigned short ee_len = ext4_ext_get_actual_len(ex);
ext4_fsblk_t pblk;
int flags = get_default_free_blocks_flags(inode);
/*
* For bigalloc file systems, we never free a partial cluster
* at the beginning of the extent. Instead, we make a note
* that we tried freeing the cluster, and check to see if we
* need to free it on a subsequent call to ext4_remove_blocks,
* or at the end of ext4_ext_rm_leaf or ext4_ext_remove_space.
*/
flags |= EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER;
trace_ext4_remove_blocks(inode, ex, from, to, *partial_cluster);
/*
* If we have a partial cluster, and it's different from the
* cluster of the last block, we need to explicitly free the
* partial cluster here.
*/
pblk = ext4_ext_pblock(ex) + ee_len - 1;
if (*partial_cluster > 0 &&
*partial_cluster != (long long) EXT4_B2C(sbi, pblk)) {
ext4_free_blocks(handle, inode, NULL,
EXT4_C2B(sbi, *partial_cluster),
sbi->s_cluster_ratio, flags);
*partial_cluster = 0;
}
#ifdef EXTENTS_STATS
{
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
spin_lock(&sbi->s_ext_stats_lock);
sbi->s_ext_blocks += ee_len;
sbi->s_ext_extents++;
if (ee_len < sbi->s_ext_min)
sbi->s_ext_min = ee_len;
if (ee_len > sbi->s_ext_max)
sbi->s_ext_max = ee_len;
if (ext_depth(inode) > sbi->s_depth_max)
sbi->s_depth_max = ext_depth(inode);
spin_unlock(&sbi->s_ext_stats_lock);
}
#endif
if (from >= le32_to_cpu(ex->ee_block)
&& to == le32_to_cpu(ex->ee_block) + ee_len - 1) {
/* tail removal */
ext4_lblk_t num;
long long first_cluster;
num = le32_to_cpu(ex->ee_block) + ee_len - from;
pblk = ext4_ext_pblock(ex) + ee_len - num;
/*
* Usually we want to free partial cluster at the end of the
* extent, except for the situation when the cluster is still
* used by any other extent (partial_cluster is negative).
*/
if (*partial_cluster < 0 &&
*partial_cluster == -(long long) EXT4_B2C(sbi, pblk+num-1))
flags |= EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER;
ext_debug("free last %u blocks starting %llu partial %lld\n",
num, pblk, *partial_cluster);
ext4_free_blocks(handle, inode, NULL, pblk, num, flags);
/*
* If the block range to be freed didn't start at the
* beginning of a cluster, and we removed the entire
* extent and the cluster is not used by any other extent,
* save the partial cluster here, since we might need to
* delete if we determine that the truncate or punch hole
* operation has removed all of the blocks in the cluster.
* If that cluster is used by another extent, preserve its
* negative value so it isn't freed later on.
*
* If the whole extent wasn't freed, we've reached the
* start of the truncated/punched region and have finished
* removing blocks. If there's a partial cluster here it's
* shared with the remainder of the extent and is no longer
* a candidate for removal.
*/
if (EXT4_PBLK_COFF(sbi, pblk) && ee_len == num) {
first_cluster = (long long) EXT4_B2C(sbi, pblk);
if (first_cluster != -*partial_cluster)
*partial_cluster = first_cluster;
} else {
*partial_cluster = 0;
}
} else
ext4_error(sbi->s_sb, "strange request: removal(2) "
"%u-%u from %u:%u\n",
from, to, le32_to_cpu(ex->ee_block), ee_len);
return 0;
}
/*
* ext4_ext_rm_leaf() Removes the extents associated with the
* blocks appearing between "start" and "end". Both "start"
* and "end" must appear in the same extent or EIO is returned.
*
* @handle: The journal handle
* @inode: The files inode
* @path: The path to the leaf
* @partial_cluster: The cluster which we'll have to free if all extents
* has been released from it. However, if this value is
* negative, it's a cluster just to the right of the
* punched region and it must not be freed.
* @start: The first block to remove
* @end: The last block to remove
*/
static int
ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
struct ext4_ext_path *path,
long long *partial_cluster,
ext4_lblk_t start, ext4_lblk_t end)
{
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
int err = 0, correct_index = 0;
int depth = ext_depth(inode), credits;
struct ext4_extent_header *eh;
ext4_lblk_t a, b;
unsigned num;
ext4_lblk_t ex_ee_block;
unsigned short ex_ee_len;
unsigned unwritten = 0;
struct ext4_extent *ex;
ext4_fsblk_t pblk;
/* the header must be checked already in ext4_ext_remove_space() */
ext_debug("truncate since %u in leaf to %u\n", start, end);
if (!path[depth].p_hdr)
path[depth].p_hdr = ext_block_hdr(path[depth].p_bh);
eh = path[depth].p_hdr;
if (unlikely(path[depth].p_hdr == NULL)) {
EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
return -EIO;
}
/* find where to start removing */
ex = path[depth].p_ext;
if (!ex)
ex = EXT_LAST_EXTENT(eh);
ex_ee_block = le32_to_cpu(ex->ee_block);
ex_ee_len = ext4_ext_get_actual_len(ex);
trace_ext4_ext_rm_leaf(inode, start, ex, *partial_cluster);
while (ex >= EXT_FIRST_EXTENT(eh) &&
ex_ee_block + ex_ee_len > start) {
if (ext4_ext_is_unwritten(ex))
unwritten = 1;
else
unwritten = 0;
ext_debug("remove ext %u:[%d]%d\n", ex_ee_block,
unwritten, ex_ee_len);
path[depth].p_ext = ex;
a = ex_ee_block > start ? ex_ee_block : start;
b = ex_ee_block+ex_ee_len - 1 < end ?
ex_ee_block+ex_ee_len - 1 : end;
ext_debug(" border %u:%u\n", a, b);
/* If this extent is beyond the end of the hole, skip it */
if (end < ex_ee_block) {
/*
* We're going to skip this extent and move to another,
* so note that its first cluster is in use to avoid
* freeing it when removing blocks. Eventually, the
* right edge of the truncated/punched region will
* be just to the left.
*/
if (sbi->s_cluster_ratio > 1) {
pblk = ext4_ext_pblock(ex);
*partial_cluster =
-(long long) EXT4_B2C(sbi, pblk);
}
ex--;
ex_ee_block = le32_to_cpu(ex->ee_block);
ex_ee_len = ext4_ext_get_actual_len(ex);
continue;
} else if (b != ex_ee_block + ex_ee_len - 1) {
EXT4_ERROR_INODE(inode,
"can not handle truncate %u:%u "
"on extent %u:%u",
start, end, ex_ee_block,
ex_ee_block + ex_ee_len - 1);
err = -EIO;
goto out;
} else if (a != ex_ee_block) {
/* remove tail of the extent */
num = a - ex_ee_block;
} else {
/* remove whole extent: excellent! */
num = 0;
}
/*
* 3 for leaf, sb, and inode plus 2 (bmap and group
* descriptor) for each block group; assume two block
* groups plus ex_ee_len/blocks_per_block_group for
* the worst case
*/
credits = 7 + 2*(ex_ee_len/EXT4_BLOCKS_PER_GROUP(inode->i_sb));
if (ex == EXT_FIRST_EXTENT(eh)) {
correct_index = 1;
credits += (ext_depth(inode)) + 1;
}
credits += EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb);
err = ext4_ext_truncate_extend_restart(handle, inode, credits);
if (err)
goto out;
err = ext4_ext_get_access(handle, inode, path + depth);
if (err)
goto out;
err = ext4_remove_blocks(handle, inode, ex, partial_cluster,
a, b);
if (err)
goto out;
if (num == 0)
/* this extent is removed; mark slot entirely unused */
ext4_ext_store_pblock(ex, 0);
ex->ee_len = cpu_to_le16(num);
/*
* Do not mark unwritten if all the blocks in the
* extent have been removed.
*/
if (unwritten && num)
ext4_ext_mark_unwritten(ex);
/*
* If the extent was completely released,
* we need to remove it from the leaf
*/
if (num == 0) {
if (end != EXT_MAX_BLOCKS - 1) {
/*
* For hole punching, we need to scoot all the
* extents up when an extent is removed so that
* we dont have blank extents in the middle
*/
memmove(ex, ex+1, (EXT_LAST_EXTENT(eh) - ex) *
sizeof(struct ext4_extent));
/* Now get rid of the one at the end */
memset(EXT_LAST_EXTENT(eh), 0,
sizeof(struct ext4_extent));
}
le16_add_cpu(&eh->eh_entries, -1);
}
err = ext4_ext_dirty(handle, inode, path + depth);
if (err)
goto out;
ext_debug("new extent: %u:%u:%llu\n", ex_ee_block, num,
ext4_ext_pblock(ex));
ex--;
ex_ee_block = le32_to_cpu(ex->ee_block);
ex_ee_len = ext4_ext_get_actual_len(ex);
}
if (correct_index && eh->eh_entries)
err = ext4_ext_correct_indexes(handle, inode, path);
/*
* If there's a partial cluster and at least one extent remains in
* the leaf, free the partial cluster if it isn't shared with the
* current extent. If it is shared with the current extent
* we zero partial_cluster because we've reached the start of the
* truncated/punched region and we're done removing blocks.
*/
if (*partial_cluster > 0 && ex >= EXT_FIRST_EXTENT(eh)) {
pblk = ext4_ext_pblock(ex) + ex_ee_len - 1;
if (*partial_cluster != (long long) EXT4_B2C(sbi, pblk)) {
ext4_free_blocks(handle, inode, NULL,
EXT4_C2B(sbi, *partial_cluster),
sbi->s_cluster_ratio,
get_default_free_blocks_flags(inode));
}
*partial_cluster = 0;
}
/* if this leaf is free, then we should
* remove it from index block above */
if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)
err = ext4_ext_rm_idx(handle, inode, path, depth);
out:
return err;
}
/*
* ext4_ext_more_to_rm:
* returns 1 if current index has to be freed (even partial)
*/
static int
ext4_ext_more_to_rm(struct ext4_ext_path *path)
{
BUG_ON(path->p_idx == NULL);
if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr))
return 0;
/*
* if truncate on deeper level happened, it wasn't partial,
* so we have to consider current index for truncation
*/
if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block)
return 0;
return 1;
}
int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start,
ext4_lblk_t end)
{
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
int depth = ext_depth(inode);
struct ext4_ext_path *path = NULL;
long long partial_cluster = 0;
handle_t *handle;
int i = 0, err = 0;
ext_debug("truncate since %u to %u\n", start, end);
/* probably first extent we're gonna free will be last in block */
handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, depth + 1);
if (IS_ERR(handle))
return PTR_ERR(handle);
again:
trace_ext4_ext_remove_space(inode, start, end, depth);
/*
* Check if we are removing extents inside the extent tree. If that
* is the case, we are going to punch a hole inside the extent tree
* so we have to check whether we need to split the extent covering
* the last block to remove so we can easily remove the part of it
* in ext4_ext_rm_leaf().
*/
if (end < EXT_MAX_BLOCKS - 1) {
struct ext4_extent *ex;
ext4_lblk_t ee_block, ex_end, lblk;
ext4_fsblk_t pblk;
/* find extent for or closest extent to this block */
path = ext4_find_extent(inode, end, NULL, EXT4_EX_NOCACHE);
if (IS_ERR(path)) {
ext4_journal_stop(handle);
return PTR_ERR(path);
}
depth = ext_depth(inode);
/* Leaf not may not exist only if inode has no blocks at all */
ex = path[depth].p_ext;
if (!ex) {
if (depth) {
EXT4_ERROR_INODE(inode,
"path[%d].p_hdr == NULL",
depth);
err = -EIO;
}
goto out;
}
ee_block = le32_to_cpu(ex->ee_block);
ex_end = ee_block + ext4_ext_get_actual_len(ex) - 1;
/*
* See if the last block is inside the extent, if so split
* the extent at 'end' block so we can easily remove the
* tail of the first part of the split extent in
* ext4_ext_rm_leaf().
*/
if (end >= ee_block && end < ex_end) {
/*
* If we're going to split the extent, note that
* the cluster containing the block after 'end' is
* in use to avoid freeing it when removing blocks.
*/
if (sbi->s_cluster_ratio > 1) {
pblk = ext4_ext_pblock(ex) + end - ee_block + 2;
partial_cluster =
-(long long) EXT4_B2C(sbi, pblk);
}
/*
* Split the extent in two so that 'end' is the last
* block in the first new extent. Also we should not
* fail removing space due to ENOSPC so try to use
* reserved block if that happens.
*/
err = ext4_force_split_extent_at(handle, inode, &path,
end + 1, 1);
if (err < 0)
goto out;
} else if (sbi->s_cluster_ratio > 1 && end >= ex_end) {
/*
* If there's an extent to the right its first cluster
* contains the immediate right boundary of the
* truncated/punched region. Set partial_cluster to
* its negative value so it won't be freed if shared
* with the current extent. The end < ee_block case
* is handled in ext4_ext_rm_leaf().
*/
lblk = ex_end + 1;
err = ext4_ext_search_right(inode, path, &lblk, &pblk,
&ex);
if (err)
goto out;
if (pblk)
partial_cluster =
-(long long) EXT4_B2C(sbi, pblk);
}
}
/*
* We start scanning from right side, freeing all the blocks
* after i_size and walking into the tree depth-wise.
*/
depth = ext_depth(inode);
if (path) {
int k = i = depth;
while (--k > 0)
path[k].p_block =
le16_to_cpu(path[k].p_hdr->eh_entries)+1;
} else {
path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1),
GFP_NOFS);
if (path == NULL) {
ext4_journal_stop(handle);
return -ENOMEM;
}
path[0].p_maxdepth = path[0].p_depth = depth;
path[0].p_hdr = ext_inode_hdr(inode);
i = 0;
if (ext4_ext_check(inode, path[0].p_hdr, depth, 0)) {
err = -EIO;
goto out;
}
}
err = 0;
while (i >= 0 && err == 0) {
if (i == depth) {
/* this is leaf block */
err = ext4_ext_rm_leaf(handle, inode, path,
&partial_cluster, start,
end);
/* root level has p_bh == NULL, brelse() eats this */
brelse(path[i].p_bh);
path[i].p_bh = NULL;
i--;
continue;
}
/* this is index block */
if (!path[i].p_hdr) {
ext_debug("initialize header\n");
path[i].p_hdr = ext_block_hdr(path[i].p_bh);
}
if (!path[i].p_idx) {
/* this level hasn't been touched yet */
path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr);
path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1;
ext_debug("init index ptr: hdr 0x%p, num %d\n",
path[i].p_hdr,
le16_to_cpu(path[i].p_hdr->eh_entries));
} else {
/* we were already here, see at next index */
path[i].p_idx--;
}
ext_debug("level %d - index, first 0x%p, cur 0x%p\n",
i, EXT_FIRST_INDEX(path[i].p_hdr),
path[i].p_idx);
if (ext4_ext_more_to_rm(path + i)) {
struct buffer_head *bh;
/* go to the next level */
ext_debug("move to level %d (block %llu)\n",
i + 1, ext4_idx_pblock(path[i].p_idx));
memset(path + i + 1, 0, sizeof(*path));
bh = read_extent_tree_block(inode,
ext4_idx_pblock(path[i].p_idx), depth - i - 1,
EXT4_EX_NOCACHE);
if (IS_ERR(bh)) {
/* should we reset i_size? */
err = PTR_ERR(bh);
break;
}
/* Yield here to deal with large extent trees.
* Should be a no-op if we did IO above. */
cond_resched();
if (WARN_ON(i + 1 > depth)) {
err = -EIO;
break;
}
path[i + 1].p_bh = bh;
/* save actual number of indexes since this
* number is changed at the next iteration */
path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries);
i++;
} else {
/* we finished processing this index, go up */
if (path[i].p_hdr->eh_entries == 0 && i > 0) {
/* index is empty, remove it;
* handle must be already prepared by the
* truncatei_leaf() */
err = ext4_ext_rm_idx(handle, inode, path, i);
}
/* root level has p_bh == NULL, brelse() eats this */
brelse(path[i].p_bh);
path[i].p_bh = NULL;
i--;
ext_debug("return to level %d\n", i);
}
}
trace_ext4_ext_remove_space_done(inode, start, end, depth,
partial_cluster, path->p_hdr->eh_entries);
/*
* If we still have something in the partial cluster and we have removed
* even the first extent, then we should free the blocks in the partial
* cluster as well. (This code will only run when there are no leaves
* to the immediate left of the truncated/punched region.)
*/
if (partial_cluster > 0 && err == 0) {
/* don't zero partial_cluster since it's not used afterwards */
ext4_free_blocks(handle, inode, NULL,
EXT4_C2B(sbi, partial_cluster),
sbi->s_cluster_ratio,
get_default_free_blocks_flags(inode));
}
/* TODO: flexible tree reduction should be here */
if (path->p_hdr->eh_entries == 0) {
/*
* truncate to zero freed all the tree,
* so we need to correct eh_depth
*/
err = ext4_ext_get_access(handle, inode, path);
if (err == 0) {
ext_inode_hdr(inode)->eh_depth = 0;
ext_inode_hdr(inode)->eh_max =
cpu_to_le16(ext4_ext_space_root(inode, 0));
err = ext4_ext_dirty(handle, inode, path);
}
}
out:
ext4_ext_drop_refs(path);
kfree(path);
path = NULL;
if (err == -EAGAIN)
goto again;
ext4_journal_stop(handle);
return err;
}
/*
* called at mount time
*/
void ext4_ext_init(struct super_block *sb)
{
/*
* possible initialization would be here
*/
if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) {
#if defined(AGGRESSIVE_TEST) || defined(CHECK_BINSEARCH) || defined(EXTENTS_STATS)
printk(KERN_INFO "EXT4-fs: file extents enabled"
#ifdef AGGRESSIVE_TEST
", aggressive tests"
#endif
#ifdef CHECK_BINSEARCH
", check binsearch"
#endif
#ifdef EXTENTS_STATS
", stats"
#endif
"\n");
#endif
#ifdef EXTENTS_STATS
spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock);
EXT4_SB(sb)->s_ext_min = 1 << 30;
EXT4_SB(sb)->s_ext_max = 0;
#endif
}
}
/*
* called at umount time
*/
void ext4_ext_release(struct super_block *sb)
{
if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS))
return;
#ifdef EXTENTS_STATS
if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) {
struct ext4_sb_info *sbi = EXT4_SB(sb);
printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n",
sbi->s_ext_blocks, sbi->s_ext_extents,
sbi->s_ext_blocks / sbi->s_ext_extents);
printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n",
sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max);
}
#endif
}
static int ext4_zeroout_es(struct inode *inode, struct ext4_extent *ex)
{
ext4_lblk_t ee_block;
ext4_fsblk_t ee_pblock;
unsigned int ee_len;
ee_block = le32_to_cpu(ex->ee_block);
ee_len = ext4_ext_get_actual_len(ex);
ee_pblock = ext4_ext_pblock(ex);
if (ee_len == 0)
return 0;
return ext4_es_insert_extent(inode, ee_block, ee_len, ee_pblock,
EXTENT_STATUS_WRITTEN);
}
/* FIXME!! we need to try to merge to left or right after zero-out */
static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
{
ext4_fsblk_t ee_pblock;
unsigned int ee_len;
int ret;
ee_len = ext4_ext_get_actual_len(ex);
ee_pblock = ext4_ext_pblock(ex);
ret = sb_issue_zeroout(inode->i_sb, ee_pblock, ee_len, GFP_NOFS);
if (ret > 0)
ret = 0;
return ret;
}
/*
* ext4_split_extent_at() splits an extent at given block.
*
* @handle: the journal handle
* @inode: the file inode
* @path: the path to the extent
* @split: the logical block where the extent is splitted.
* @split_flags: indicates if the extent could be zeroout if split fails, and
* the states(init or unwritten) of new extents.
* @flags: flags used to insert new extent to extent tree.
*
*
* Splits extent [a, b] into two extents [a, @split) and [@split, b], states
* of which are deterimined by split_flag.
*
* There are two cases:
* a> the extent are splitted into two extent.
* b> split is not needed, and just mark the extent.
*
* return 0 on success.
*/
static int ext4_split_extent_at(handle_t *handle,
struct inode *inode,
struct ext4_ext_path **ppath,
ext4_lblk_t split,
int split_flag,
int flags)
{
struct ext4_ext_path *path = *ppath;
ext4_fsblk_t newblock;
ext4_lblk_t ee_block;
struct ext4_extent *ex, newex, orig_ex, zero_ex;
struct ext4_extent *ex2 = NULL;
unsigned int ee_len, depth;
int err = 0;
BUG_ON((split_flag & (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2)) ==
(EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2));
ext_debug("ext4_split_extents_at: inode %lu, logical"
"block %llu\n", inode->i_ino, (unsigned long long)split);
ext4_ext_show_leaf(inode, path);
depth = ext_depth(inode);
ex = path[depth].p_ext;
ee_block = le32_to_cpu(ex->ee_block);
ee_len = ext4_ext_get_actual_len(ex);
newblock = split - ee_block + ext4_ext_pblock(ex);
BUG_ON(split < ee_block || split >= (ee_block + ee_len));
BUG_ON(!ext4_ext_is_unwritten(ex) &&
split_flag & (EXT4_EXT_MAY_ZEROOUT |
EXT4_EXT_MARK_UNWRIT1 |
EXT4_EXT_MARK_UNWRIT2));
err = ext4_ext_get_access(handle, inode, path + depth);
if (err)
goto out;
if (split == ee_block) {
/*
* case b: block @split is the block that the extent begins with
* then we just change the state of the extent, and splitting
* is not needed.
*/
if (split_flag & EXT4_EXT_MARK_UNWRIT2)
ext4_ext_mark_unwritten(ex);
else
ext4_ext_mark_initialized(ex);
if (!(flags & EXT4_GET_BLOCKS_PRE_IO))
ext4_ext_try_to_merge(handle, inode, path, ex);
err = ext4_ext_dirty(handle, inode, path + path->p_depth);
goto out;
}
/* case a */
memcpy(&orig_ex, ex, sizeof(orig_ex));
ex->ee_len = cpu_to_le16(split - ee_block);
if (split_flag & EXT4_EXT_MARK_UNWRIT1)
ext4_ext_mark_unwritten(ex);
/*
* path may lead to new leaf, not to original leaf any more
* after ext4_ext_insert_extent() returns,
*/
err = ext4_ext_dirty(handle, inode, path + depth);
if (err)
goto fix_extent_len;
ex2 = &newex;
ex2->ee_block = cpu_to_le32(split);
ex2->ee_len = cpu_to_le16(ee_len - (split - ee_block));
ext4_ext_store_pblock(ex2, newblock);
if (split_flag & EXT4_EXT_MARK_UNWRIT2)
ext4_ext_mark_unwritten(ex2);
err = ext4_ext_insert_extent(handle, inode, ppath, &newex, flags);
if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) {
if (split_flag & EXT4_EXT_DATA_VALID1) {
err = ext4_ext_zeroout(inode, ex2);
zero_ex.ee_block = ex2->ee_block;
zero_ex.ee_len = cpu_to_le16(
ext4_ext_get_actual_len(ex2));
ext4_ext_store_pblock(&zero_ex,
ext4_ext_pblock(ex2));
} else {
err = ext4_ext_zeroout(inode, ex);
zero_ex.ee_block = ex->ee_block;
zero_ex.ee_len = cpu_to_le16(
ext4_ext_get_actual_len(ex));
ext4_ext_store_pblock(&zero_ex,
ext4_ext_pblock(ex));
}
} else {
err = ext4_ext_zeroout(inode, &orig_ex);
zero_ex.ee_block = orig_ex.ee_block;
zero_ex.ee_len = cpu_to_le16(
ext4_ext_get_actual_len(&orig_ex));
ext4_ext_store_pblock(&zero_ex,
ext4_ext_pblock(&orig_ex));
}
if (err)
goto fix_extent_len;
/* update the extent length and mark as initialized */
ex->ee_len = cpu_to_le16(ee_len);
ext4_ext_try_to_merge(handle, inode, path, ex);
err = ext4_ext_dirty(handle, inode, path + path->p_depth);
if (err)
goto fix_extent_len;
/* update extent status tree */
err = ext4_zeroout_es(inode, &zero_ex);
goto out;
} else if (err)
goto fix_extent_len;
out:
ext4_ext_show_leaf(inode, path);
return err;
fix_extent_len:
ex->ee_len = orig_ex.ee_len;
ext4_ext_dirty(handle, inode, path + path->p_depth);
return err;
}
/*
* ext4_split_extents() splits an extent and mark extent which is covered
* by @map as split_flags indicates
*
* It may result in splitting the extent into multiple extents (up to three)
* There are three possibilities:
* a> There is no split required
* b> Splits in two extents: Split is happening at either end of the extent
* c> Splits in three extents: Somone is splitting in middle of the extent
*
*/
static int ext4_split_extent(handle_t *handle,
struct inode *inode,
struct ext4_ext_path **ppath,
struct ext4_map_blocks *map,
int split_flag,
int flags)
{
struct ext4_ext_path *path = *ppath;
ext4_lblk_t ee_block;
struct ext4_extent *ex;
unsigned int ee_len, depth;
int err = 0;
int unwritten;
int split_flag1, flags1;
int allocated = map->m_len;
depth = ext_depth(inode);
ex = path[depth].p_ext;
ee_block = le32_to_cpu(ex->ee_block);
ee_len = ext4_ext_get_actual_len(ex);
unwritten = ext4_ext_is_unwritten(ex);
if (map->m_lblk + map->m_len < ee_block + ee_len) {
split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT;
flags1 = flags | EXT4_GET_BLOCKS_PRE_IO;
if (unwritten)
split_flag1 |= EXT4_EXT_MARK_UNWRIT1 |
EXT4_EXT_MARK_UNWRIT2;
if (split_flag & EXT4_EXT_DATA_VALID2)
split_flag1 |= EXT4_EXT_DATA_VALID1;
err = ext4_split_extent_at(handle, inode, ppath,
map->m_lblk + map->m_len, split_flag1, flags1);
if (err)
goto out;
} else {
allocated = ee_len - (map->m_lblk - ee_block);
}
/*
* Update path is required because previous ext4_split_extent_at() may
* result in split of original leaf or extent zeroout.
*/
path = ext4_find_extent(inode, map->m_lblk, ppath, 0);
if (IS_ERR(path))
return PTR_ERR(path);
depth = ext_depth(inode);
ex = path[depth].p_ext;
if (!ex) {
EXT4_ERROR_INODE(inode, "unexpected hole at %lu",
(unsigned long) map->m_lblk);
return -EIO;
}
unwritten = ext4_ext_is_unwritten(ex);
split_flag1 = 0;
if (map->m_lblk >= ee_block) {
split_flag1 = split_flag & EXT4_EXT_DATA_VALID2;
if (unwritten) {
split_flag1 |= EXT4_EXT_MARK_UNWRIT1;
split_flag1 |= split_flag & (EXT4_EXT_MAY_ZEROOUT |
EXT4_EXT_MARK_UNWRIT2);
}
err = ext4_split_extent_at(handle, inode, ppath,
map->m_lblk, split_flag1, flags);
if (err)
goto out;
}
ext4_ext_show_leaf(inode, path);
out:
return err ? err : allocated;
}
/*
* This function is called by ext4_ext_map_blocks() if someone tries to write
* to an unwritten extent. It may result in splitting the unwritten
* extent into multiple extents (up to three - one initialized and two
* unwritten).
* There are three possibilities:
* a> There is no split required: Entire extent should be initialized
* b> Splits in two extents: Write is happening at either end of the extent
* c> Splits in three extents: Somone is writing in middle of the extent
*
* Pre-conditions:
* - The extent pointed to by 'path' is unwritten.
* - The extent pointed to by 'path' contains a superset
* of the logical span [map->m_lblk, map->m_lblk + map->m_len).
*
* Post-conditions on success:
* - the returned value is the number of blocks beyond map->l_lblk
* that are allocated and initialized.
* It is guaranteed to be >= map->m_len.
*/
static int ext4_ext_convert_to_initialized(handle_t *handle,
struct inode *inode,
struct ext4_map_blocks *map,
struct ext4_ext_path **ppath,
int flags)
{
struct ext4_ext_path *path = *ppath;
struct ext4_sb_info *sbi;
struct ext4_extent_header *eh;
struct ext4_map_blocks split_map;
struct ext4_extent zero_ex;
struct ext4_extent *ex, *abut_ex;
ext4_lblk_t ee_block, eof_block;
unsigned int ee_len, depth, map_len = map->m_len;
int allocated = 0, max_zeroout = 0;
int err = 0;
int split_flag = 0;
ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical"
"block %llu, max_blocks %u\n", inode->i_ino,
(unsigned long long)map->m_lblk, map_len);
sbi = EXT4_SB(inode->i_sb);
eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
inode->i_sb->s_blocksize_bits;
if (eof_block < map->m_lblk + map_len)
eof_block = map->m_lblk + map_len;
depth = ext_depth(inode);
eh = path[depth].p_hdr;
ex = path[depth].p_ext;
ee_block = le32_to_cpu(ex->ee_block);
ee_len = ext4_ext_get_actual_len(ex);
zero_ex.ee_len = 0;
trace_ext4_ext_convert_to_initialized_enter(inode, map, ex);
/* Pre-conditions */
BUG_ON(!ext4_ext_is_unwritten(ex));
BUG_ON(!in_range(map->m_lblk, ee_block, ee_len));
/*
* Attempt to transfer newly initialized blocks from the currently
* unwritten extent to its neighbor. This is much cheaper
* than an insertion followed by a merge as those involve costly
* memmove() calls. Transferring to the left is the common case in
* steady state for workloads doing fallocate(FALLOC_FL_KEEP_SIZE)
* followed by append writes.
*
* Limitations of the current logic:
* - L1: we do not deal with writes covering the whole extent.
* This would require removing the extent if the transfer
* is possible.
* - L2: we only attempt to merge with an extent stored in the
* same extent tree node.
*/
if ((map->m_lblk == ee_block) &&
/* See if we can merge left */
(map_len < ee_len) && /*L1*/
(ex > EXT_FIRST_EXTENT(eh))) { /*L2*/
ext4_lblk_t prev_lblk;
ext4_fsblk_t prev_pblk, ee_pblk;
unsigned int prev_len;
abut_ex = ex - 1;
prev_lblk = le32_to_cpu(abut_ex->ee_block);
prev_len = ext4_ext_get_actual_len(abut_ex);
prev_pblk = ext4_ext_pblock(abut_ex);
ee_pblk = ext4_ext_pblock(ex);
/*
* A transfer of blocks from 'ex' to 'abut_ex' is allowed
* upon those conditions:
* - C1: abut_ex is initialized,
* - C2: abut_ex is logically abutting ex,
* - C3: abut_ex is physically abutting ex,
* - C4: abut_ex can receive the additional blocks without
* overflowing the (initialized) length limit.
*/
if ((!ext4_ext_is_unwritten(abut_ex)) && /*C1*/
((prev_lblk + prev_len) == ee_block) && /*C2*/
((prev_pblk + prev_len) == ee_pblk) && /*C3*/
(prev_len < (EXT_INIT_MAX_LEN - map_len))) { /*C4*/
err = ext4_ext_get_access(handle, inode, path + depth);
if (err)
goto out;
trace_ext4_ext_convert_to_initialized_fastpath(inode,
map, ex, abut_ex);
/* Shift the start of ex by 'map_len' blocks */
ex->ee_block = cpu_to_le32(ee_block + map_len);
ext4_ext_store_pblock(ex, ee_pblk + map_len);
ex->ee_len = cpu_to_le16(ee_len - map_len);
ext4_ext_mark_unwritten(ex); /* Restore the flag */
/* Extend abut_ex by 'map_len' blocks */
abut_ex->ee_len = cpu_to_le16(prev_len + map_len);
/* Result: number of initialized blocks past m_lblk */
allocated = map_len;
}
} else if (((map->m_lblk + map_len) == (ee_block + ee_len)) &&
(map_len < ee_len) && /*L1*/
ex < EXT_LAST_EXTENT(eh)) { /*L2*/
/* See if we can merge right */
ext4_lblk_t next_lblk;
ext4_fsblk_t next_pblk, ee_pblk;
unsigned int next_len;
abut_ex = ex + 1;
next_lblk = le32_to_cpu(abut_ex->ee_block);
next_len = ext4_ext_get_actual_len(abut_ex);
next_pblk = ext4_ext_pblock(abut_ex);
ee_pblk = ext4_ext_pblock(ex);
/*
* A transfer of blocks from 'ex' to 'abut_ex' is allowed
* upon those conditions:
* - C1: abut_ex is initialized,
* - C2: abut_ex is logically abutting ex,
* - C3: abut_ex is physically abutting ex,
* - C4: abut_ex can receive the additional blocks without
* overflowing the (initialized) length limit.
*/
if ((!ext4_ext_is_unwritten(abut_ex)) && /*C1*/
((map->m_lblk + map_len) == next_lblk) && /*C2*/
((ee_pblk + ee_len) == next_pblk) && /*C3*/
(next_len < (EXT_INIT_MAX_LEN - map_len))) { /*C4*/
err = ext4_ext_get_access(handle, inode, path + depth);
if (err)
goto out;
trace_ext4_ext_convert_to_initialized_fastpath(inode,
map, ex, abut_ex);
/* Shift the start of abut_ex by 'map_len' blocks */
abut_ex->ee_block = cpu_to_le32(next_lblk - map_len);
ext4_ext_store_pblock(abut_ex, next_pblk - map_len);
ex->ee_len = cpu_to_le16(ee_len - map_len);
ext4_ext_mark_unwritten(ex); /* Restore the flag */
/* Extend abut_ex by 'map_len' blocks */
abut_ex->ee_len = cpu_to_le16(next_len + map_len);
/* Result: number of initialized blocks past m_lblk */
allocated = map_len;
}
}
if (allocated) {
/* Mark the block containing both extents as dirty */
ext4_ext_dirty(handle, inode, path + depth);
/* Update path to point to the right extent */
path[depth].p_ext = abut_ex;
goto out;
} else
allocated = ee_len - (map->m_lblk - ee_block);
WARN_ON(map->m_lblk < ee_block);
/*
* It is safe to convert extent to initialized via explicit
* zeroout only if extent is fully inside i_size or new_size.
*/
split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0;
if (EXT4_EXT_MAY_ZEROOUT & split_flag)
max_zeroout = sbi->s_extent_max_zeroout_kb >>
(inode->i_sb->s_blocksize_bits - 10);
/* If extent is less than s_max_zeroout_kb, zeroout directly */
if (max_zeroout && (ee_len <= max_zeroout)) {
err = ext4_ext_zeroout(inode, ex);
if (err)
goto out;
zero_ex.ee_block = ex->ee_block;
zero_ex.ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex));
ext4_ext_store_pblock(&zero_ex, ext4_ext_pblock(ex));
err = ext4_ext_get_access(handle, inode, path + depth);
if (err)
goto out;
ext4_ext_mark_initialized(ex);
ext4_ext_try_to_merge(handle, inode, path, ex);
err = ext4_ext_dirty(handle, inode, path + path->p_depth);
goto out;
}
/*
* four cases:
* 1. split the extent into three extents.
* 2. split the extent into two extents, zeroout the first half.
* 3. split the extent into two extents, zeroout the second half.
* 4. split the extent into two extents with out zeroout.
*/
split_map.m_lblk = map->m_lblk;
split_map.m_len = map->m_len;
if (max_zeroout && (allocated > map->m_len)) {
if (allocated <= max_zeroout) {
/* case 3 */
zero_ex.ee_block =
cpu_to_le32(map->m_lblk);
zero_ex.ee_len = cpu_to_le16(allocated);
ext4_ext_store_pblock(&zero_ex,
ext4_ext_pblock(ex) + map->m_lblk - ee_block);
err = ext4_ext_zeroout(inode, &zero_ex);
if (err)
goto out;
split_map.m_lblk = map->m_lblk;
split_map.m_len = allocated;
} else if (map->m_lblk - ee_block + map->m_len < max_zeroout) {
/* case 2 */
if (map->m_lblk != ee_block) {
zero_ex.ee_block = ex->ee_block;
zero_ex.ee_len = cpu_to_le16(map->m_lblk -
ee_block);
ext4_ext_store_pblock(&zero_ex,
ext4_ext_pblock(ex));
err = ext4_ext_zeroout(inode, &zero_ex);
if (err)
goto out;
}
split_map.m_lblk = ee_block;
split_map.m_len = map->m_lblk - ee_block + map->m_len;
allocated = map->m_len;
}
}
err = ext4_split_extent(handle, inode, ppath, &split_map, split_flag,
flags);
if (err > 0)
err = 0;
out:
/* If we have gotten a failure, don't zero out status tree */
if (!err)
err = ext4_zeroout_es(inode, &zero_ex);
return err ? err : allocated;
}
/*
* This function is called by ext4_ext_map_blocks() from
* ext4_get_blocks_dio_write() when DIO to write
* to an unwritten extent.
*
* Writing to an unwritten extent may result in splitting the unwritten
* extent into multiple initialized/unwritten extents (up to three)
* There are three possibilities:
* a> There is no split required: Entire extent should be unwritten
* b> Splits in two extents: Write is happening at either end of the extent
* c> Splits in three extents: Somone is writing in middle of the extent
*
* This works the same way in the case of initialized -> unwritten conversion.
*
* One of more index blocks maybe needed if the extent tree grow after
* the unwritten extent split. To prevent ENOSPC occur at the IO
* complete, we need to split the unwritten extent before DIO submit
* the IO. The unwritten extent called at this time will be split
* into three unwritten extent(at most). After IO complete, the part
* being filled will be convert to initialized by the end_io callback function
* via ext4_convert_unwritten_extents().
*
* Returns the size of unwritten extent to be written on success.
*/
static int ext4_split_convert_extents(handle_t *handle,
struct inode *inode,
struct ext4_map_blocks *map,
struct ext4_ext_path **ppath,
int flags)
{
struct ext4_ext_path *path = *ppath;
ext4_lblk_t eof_block;
ext4_lblk_t ee_block;
struct ext4_extent *ex;
unsigned int ee_len;
int split_flag = 0, depth;
ext_debug("%s: inode %lu, logical block %llu, max_blocks %u\n",
__func__, inode->i_ino,
(unsigned long long)map->m_lblk, map->m_len);
eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
inode->i_sb->s_blocksize_bits;
if (eof_block < map->m_lblk + map->m_len)
eof_block = map->m_lblk + map->m_len;
/*
* It is safe to convert extent to initialized via explicit
* zeroout only if extent is fully insde i_size or new_size.
*/
depth = ext_depth(inode);
ex = path[depth].p_ext;
ee_block = le32_to_cpu(ex->ee_block);
ee_len = ext4_ext_get_actual_len(ex);
/* Convert to unwritten */
if (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN) {
split_flag |= EXT4_EXT_DATA_VALID1;
/* Convert to initialized */
} else if (flags & EXT4_GET_BLOCKS_CONVERT) {
split_flag |= ee_block + ee_len <= eof_block ?
EXT4_EXT_MAY_ZEROOUT : 0;
split_flag |= (EXT4_EXT_MARK_UNWRIT2 | EXT4_EXT_DATA_VALID2);
}
flags |= EXT4_GET_BLOCKS_PRE_IO;
return ext4_split_extent(handle, inode, ppath, map, split_flag, flags);
}
static int ext4_convert_unwritten_extents_endio(handle_t *handle,
struct inode *inode,
struct ext4_map_blocks *map,
struct ext4_ext_path **ppath)
{
struct ext4_ext_path *path = *ppath;
struct ext4_extent *ex;
ext4_lblk_t ee_block;
unsigned int ee_len;
int depth;
int err = 0;
depth = ext_depth(inode);
ex = path[depth].p_ext;
ee_block = le32_to_cpu(ex->ee_block);
ee_len = ext4_ext_get_actual_len(ex);
ext_debug("ext4_convert_unwritten_extents_endio: inode %lu, logical"
"block %llu, max_blocks %u\n", inode->i_ino,
(unsigned long long)ee_block, ee_len);
/* If extent is larger than requested it is a clear sign that we still
* have some extent state machine issues left. So extent_split is still
* required.
* TODO: Once all related issues will be fixed this situation should be
* illegal.
*/
if (ee_block != map->m_lblk || ee_len > map->m_len) {
#ifdef EXT4_DEBUG
ext4_warning("Inode (%ld) finished: extent logical block %llu,"
" len %u; IO logical block %llu, len %u\n",
inode->i_ino, (unsigned long long)ee_block, ee_len,
(unsigned long long)map->m_lblk, map->m_len);
#endif
err = ext4_split_convert_extents(handle, inode, map, ppath,
EXT4_GET_BLOCKS_CONVERT);
if (err < 0)
return err;
path = ext4_find_extent(inode, map->m_lblk, ppath, 0);
if (IS_ERR(path))
return PTR_ERR(path);
depth = ext_depth(inode);
ex = path[depth].p_ext;
}
err = ext4_ext_get_access(handle, inode, path + depth);
if (err)
goto out;
/* first mark the extent as initialized */
ext4_ext_mark_initialized(ex);
/* note: ext4_ext_correct_indexes() isn't needed here because
* borders are not changed
*/
ext4_ext_try_to_merge(handle, inode, path, ex);
/* Mark modified extent as dirty */
err = ext4_ext_dirty(handle, inode, path + path->p_depth);
out:
ext4_ext_show_leaf(inode, path);
return err;
}
static void unmap_underlying_metadata_blocks(struct block_device *bdev,
sector_t block, int count)
{
int i;
for (i = 0; i < count; i++)
unmap_underlying_metadata(bdev, block + i);
}
/*
* Handle EOFBLOCKS_FL flag, clearing it if necessary
*/
static int check_eofblocks_fl(handle_t *handle, struct inode *inode,
ext4_lblk_t lblk,
struct ext4_ext_path *path,
unsigned int len)
{
int i, depth;
struct ext4_extent_header *eh;
struct ext4_extent *last_ex;
if (!ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS))
return 0;
depth = ext_depth(inode);
eh = path[depth].p_hdr;
/*
* We're going to remove EOFBLOCKS_FL entirely in future so we
* do not care for this case anymore. Simply remove the flag
* if there are no extents.
*/
if (unlikely(!eh->eh_entries))
goto out;
last_ex = EXT_LAST_EXTENT(eh);
/*
* We should clear the EOFBLOCKS_FL flag if we are writing the
* last block in the last extent in the file. We test this by
* first checking to see if the caller to
* ext4_ext_get_blocks() was interested in the last block (or
* a block beyond the last block) in the current extent. If
* this turns out to be false, we can bail out from this
* function immediately.
*/
if (lblk + len < le32_to_cpu(last_ex->ee_block) +
ext4_ext_get_actual_len(last_ex))
return 0;
/*
* If the caller does appear to be planning to write at or
* beyond the end of the current extent, we then test to see
* if the current extent is the last extent in the file, by
* checking to make sure it was reached via the rightmost node
* at each level of the tree.
*/
for (i = depth-1; i >= 0; i--)
if (path[i].p_idx != EXT_LAST_INDEX(path[i].p_hdr))
return 0;
out:
ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
return ext4_mark_inode_dirty(handle, inode);
}
/**
* ext4_find_delalloc_range: find delayed allocated block in the given range.
*
* Return 1 if there is a delalloc block in the range, otherwise 0.
*/
int ext4_find_delalloc_range(struct inode *inode,
ext4_lblk_t lblk_start,
ext4_lblk_t lblk_end)
{
struct extent_status es;
ext4_es_find_delayed_extent_range(inode, lblk_start, lblk_end, &es);
if (es.es_len == 0)
return 0; /* there is no delay extent in this tree */
else if (es.es_lblk <= lblk_start &&
lblk_start < es.es_lblk + es.es_len)
return 1;
else if (lblk_start <= es.es_lblk && es.es_lblk <= lblk_end)
return 1;
else
return 0;
}
int ext4_find_delalloc_cluster(struct inode *inode, ext4_lblk_t lblk)
{
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
ext4_lblk_t lblk_start, lblk_end;
lblk_start = EXT4_LBLK_CMASK(sbi, lblk);
lblk_end = lblk_start + sbi->s_cluster_ratio - 1;
return ext4_find_delalloc_range(inode, lblk_start, lblk_end);
}
/**
* Determines how many complete clusters (out of those specified by the 'map')
* are under delalloc and were reserved quota for.
* This function is called when we are writing out the blocks that were
* originally written with their allocation delayed, but then the space was
* allocated using fallocate() before the delayed allocation could be resolved.
* The cases to look for are:
* ('=' indicated delayed allocated blocks
* '-' indicates non-delayed allocated blocks)
* (a) partial clusters towards beginning and/or end outside of allocated range
* are not delalloc'ed.
* Ex:
* |----c---=|====c====|====c====|===-c----|
* |++++++ allocated ++++++|
* ==> 4 complete clusters in above example
*
* (b) partial cluster (outside of allocated range) towards either end is
* marked for delayed allocation. In this case, we will exclude that
* cluster.
* Ex:
* |----====c========|========c========|
* |++++++ allocated ++++++|
* ==> 1 complete clusters in above example
*
* Ex:
* |================c================|
* |++++++ allocated ++++++|
* ==> 0 complete clusters in above example
*
* The ext4_da_update_reserve_space will be called only if we
* determine here that there were some "entire" clusters that span
* this 'allocated' range.
* In the non-bigalloc case, this function will just end up returning num_blks
* without ever calling ext4_find_delalloc_range.
*/
static unsigned int
get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start,
unsigned int num_blks)
{
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
ext4_lblk_t alloc_cluster_start, alloc_cluster_end;
ext4_lblk_t lblk_from, lblk_to, c_offset;
unsigned int allocated_clusters = 0;
alloc_cluster_start = EXT4_B2C(sbi, lblk_start);
alloc_cluster_end = EXT4_B2C(sbi, lblk_start + num_blks - 1);
/* max possible clusters for this allocation */
allocated_clusters = alloc_cluster_end - alloc_cluster_start + 1;
trace_ext4_get_reserved_cluster_alloc(inode, lblk_start, num_blks);
/* Check towards left side */
c_offset = EXT4_LBLK_COFF(sbi, lblk_start);
if (c_offset) {
lblk_from = EXT4_LBLK_CMASK(sbi, lblk_start);
lblk_to = lblk_from + c_offset - 1;
if (ext4_find_delalloc_range(inode, lblk_from, lblk_to))
allocated_clusters--;
}
/* Now check towards right. */
c_offset = EXT4_LBLK_COFF(sbi, lblk_start + num_blks);
if (allocated_clusters && c_offset) {
lblk_from = lblk_start + num_blks;
lblk_to = lblk_from + (sbi->s_cluster_ratio - c_offset) - 1;
if (ext4_find_delalloc_range(inode, lblk_from, lblk_to))
allocated_clusters--;
}
return allocated_clusters;
}
static int
convert_initialized_extent(handle_t *handle, struct inode *inode,
struct ext4_map_blocks *map,
struct ext4_ext_path **ppath, int flags,
unsigned int allocated, ext4_fsblk_t newblock)
{
struct ext4_ext_path *path = *ppath;
struct ext4_extent *ex;
ext4_lblk_t ee_block;
unsigned int ee_len;
int depth;
int err = 0;
/*
* Make sure that the extent is no bigger than we support with
* unwritten extent
*/
if (map->m_len > EXT_UNWRITTEN_MAX_LEN)
map->m_len = EXT_UNWRITTEN_MAX_LEN / 2;
depth = ext_depth(inode);
ex = path[depth].p_ext;
ee_block = le32_to_cpu(ex->ee_block);
ee_len = ext4_ext_get_actual_len(ex);
ext_debug("%s: inode %lu, logical"
"block %llu, max_blocks %u\n", __func__, inode->i_ino,
(unsigned long long)ee_block, ee_len);
if (ee_block != map->m_lblk || ee_len > map->m_len) {
err = ext4_split_convert_extents(handle, inode, map, ppath,
EXT4_GET_BLOCKS_CONVERT_UNWRITTEN);
if (err < 0)
return err;
path = ext4_find_extent(inode, map->m_lblk, ppath, 0);
if (IS_ERR(path))
return PTR_ERR(path);
depth = ext_depth(inode);
ex = path[depth].p_ext;
if (!ex) {
EXT4_ERROR_INODE(inode, "unexpected hole at %lu",
(unsigned long) map->m_lblk);
return -EIO;
}
}
err = ext4_ext_get_access(handle, inode, path + depth);
if (err)
return err;
/* first mark the extent as unwritten */
ext4_ext_mark_unwritten(ex);
/* note: ext4_ext_correct_indexes() isn't needed here because
* borders are not changed
*/
ext4_ext_try_to_merge(handle, inode, path, ex);
/* Mark modified extent as dirty */
err = ext4_ext_dirty(handle, inode, path + path->p_depth);
if (err)
return err;
ext4_ext_show_leaf(inode, path);
ext4_update_inode_fsync_trans(handle, inode, 1);
err = check_eofblocks_fl(handle, inode, map->m_lblk, path, map->m_len);
if (err)
return err;
map->m_flags |= EXT4_MAP_UNWRITTEN;
if (allocated > map->m_len)
allocated = map->m_len;
map->m_len = allocated;
return allocated;
}
static int
ext4_ext_handle_unwritten_extents(handle_t *handle, struct inode *inode,
struct ext4_map_blocks *map,
struct ext4_ext_path **ppath, int flags,
unsigned int allocated, ext4_fsblk_t newblock)
{
struct ext4_ext_path *path = *ppath;
int ret = 0;
int err = 0;
ext4_io_end_t *io = ext4_inode_aio(inode);
ext_debug("ext4_ext_handle_unwritten_extents: inode %lu, logical "
"block %llu, max_blocks %u, flags %x, allocated %u\n",
inode->i_ino, (unsigned long long)map->m_lblk, map->m_len,
flags, allocated);
ext4_ext_show_leaf(inode, path);
/*
* When writing into unwritten space, we should not fail to
* allocate metadata blocks for the new extent block if needed.
*/
flags |= EXT4_GET_BLOCKS_METADATA_NOFAIL;
trace_ext4_ext_handle_unwritten_extents(inode, map, flags,
allocated, newblock);
/* get_block() before submit the IO, split the extent */
if (flags & EXT4_GET_BLOCKS_PRE_IO) {
ret = ext4_split_convert_extents(handle, inode, map, ppath,
flags | EXT4_GET_BLOCKS_CONVERT);
if (ret <= 0)
goto out;
/*
* Flag the inode(non aio case) or end_io struct (aio case)
* that this IO needs to conversion to written when IO is
* completed
*/
if (io)
ext4_set_io_unwritten_flag(inode, io);
else
ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
map->m_flags |= EXT4_MAP_UNWRITTEN;
goto out;
}
/* IO end_io complete, convert the filled extent to written */
if (flags & EXT4_GET_BLOCKS_CONVERT) {
ret = ext4_convert_unwritten_extents_endio(handle, inode, map,
ppath);
if (ret >= 0) {
ext4_update_inode_fsync_trans(handle, inode, 1);
err = check_eofblocks_fl(handle, inode, map->m_lblk,
path, map->m_len);
} else
err = ret;
map->m_flags |= EXT4_MAP_MAPPED;
map->m_pblk = newblock;
if (allocated > map->m_len)
allocated = map->m_len;
map->m_len = allocated;
goto out2;
}
/* buffered IO case */
/*
* repeat fallocate creation request
* we already have an unwritten extent
*/
if (flags & EXT4_GET_BLOCKS_UNWRIT_EXT) {
map->m_flags |= EXT4_MAP_UNWRITTEN;
goto map_out;
}
/* buffered READ or buffered write_begin() lookup */
if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
/*
* We have blocks reserved already. We
* return allocated blocks so that delalloc
* won't do block reservation for us. But
* the buffer head will be unmapped so that
* a read from the block returns 0s.
*/
map->m_flags |= EXT4_MAP_UNWRITTEN;
goto out1;
}
/* buffered write, writepage time, convert*/
ret = ext4_ext_convert_to_initialized(handle, inode, map, ppath, flags);
if (ret >= 0)
ext4_update_inode_fsync_trans(handle, inode, 1);
out:
if (ret <= 0) {
err = ret;
goto out2;
} else
allocated = ret;
map->m_flags |= EXT4_MAP_NEW;
/*
* if we allocated more blocks than requested
* we need to make sure we unmap the extra block
* allocated. The actual needed block will get
* unmapped later when we find the buffer_head marked
* new.
*/
if (allocated > map->m_len) {
unmap_underlying_metadata_blocks(inode->i_sb->s_bdev,
newblock + map->m_len,
allocated - map->m_len);
allocated = map->m_len;
}
map->m_len = allocated;
/*
* If we have done fallocate with the offset that is already
* delayed allocated, we would have block reservation
* and quota reservation done in the delayed write path.
* But fallocate would have already updated quota and block
* count for this offset. So cancel these reservation
*/
if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
unsigned int reserved_clusters;
reserved_clusters = get_reserved_cluster_alloc(inode,
map->m_lblk, map->m_len);
if (reserved_clusters)
ext4_da_update_reserve_space(inode,
reserved_clusters,
0);
}
map_out:
map->m_flags |= EXT4_MAP_MAPPED;
if ((flags & EXT4_GET_BLOCKS_KEEP_SIZE) == 0) {
err = check_eofblocks_fl(handle, inode, map->m_lblk, path,
map->m_len);
if (err < 0)
goto out2;
}
out1:
if (allocated > map->m_len)
allocated = map->m_len;
ext4_ext_show_leaf(inode, path);
map->m_pblk = newblock;
map->m_len = allocated;
out2:
return err ? err : allocated;
}
/*
* get_implied_cluster_alloc - check to see if the requested
* allocation (in the map structure) overlaps with a cluster already
* allocated in an extent.
* @sb The filesystem superblock structure
* @map The requested lblk->pblk mapping
* @ex The extent structure which might contain an implied
* cluster allocation
*
* This function is called by ext4_ext_map_blocks() after we failed to
* find blocks that were already in the inode's extent tree. Hence,
* we know that the beginning of the requested region cannot overlap
* the extent from the inode's extent tree. There are three cases we
* want to catch. The first is this case:
*
* |--- cluster # N--|
* |--- extent ---| |---- requested region ---|
* |==========|
*
* The second case that we need to test for is this one:
*
* |--------- cluster # N ----------------|
* |--- requested region --| |------- extent ----|
* |=======================|
*
* The third case is when the requested region lies between two extents
* within the same cluster:
* |------------- cluster # N-------------|
* |----- ex -----| |---- ex_right ----|
* |------ requested region ------|
* |================|
*
* In each of the above cases, we need to set the map->m_pblk and
* map->m_len so it corresponds to the return the extent labelled as
* "|====|" from cluster #N, since it is already in use for data in
* cluster EXT4_B2C(sbi, map->m_lblk). We will then return 1 to
* signal to ext4_ext_map_blocks() that map->m_pblk should be treated
* as a new "allocated" block region. Otherwise, we will return 0 and
* ext4_ext_map_blocks() will then allocate one or more new clusters
* by calling ext4_mb_new_blocks().
*/
static int get_implied_cluster_alloc(struct super_block *sb,
struct ext4_map_blocks *map,
struct ext4_extent *ex,
struct ext4_ext_path *path)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
ext4_lblk_t c_offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
ext4_lblk_t ex_cluster_start, ex_cluster_end;
ext4_lblk_t rr_cluster_start;
ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
ext4_fsblk_t ee_start = ext4_ext_pblock(ex);
unsigned short ee_len = ext4_ext_get_actual_len(ex);
/* The extent passed in that we are trying to match */
ex_cluster_start = EXT4_B2C(sbi, ee_block);
ex_cluster_end = EXT4_B2C(sbi, ee_block + ee_len - 1);
/* The requested region passed into ext4_map_blocks() */
rr_cluster_start = EXT4_B2C(sbi, map->m_lblk);
if ((rr_cluster_start == ex_cluster_end) ||
(rr_cluster_start == ex_cluster_start)) {
if (rr_cluster_start == ex_cluster_end)
ee_start += ee_len - 1;
map->m_pblk = EXT4_PBLK_CMASK(sbi, ee_start) + c_offset;
map->m_len = min(map->m_len,
(unsigned) sbi->s_cluster_ratio - c_offset);
/*
* Check for and handle this case:
*
* |--------- cluster # N-------------|
* |------- extent ----|
* |--- requested region ---|
* |===========|
*/
if (map->m_lblk < ee_block)
map->m_len = min(map->m_len, ee_block - map->m_lblk);
/*
* Check for the case where there is already another allocated
* block to the right of 'ex' but before the end of the cluster.
*
* |------------- cluster # N-------------|
* |----- ex -----| |---- ex_right ----|
* |------ requested region ------|
* |================|
*/
if (map->m_lblk > ee_block) {
ext4_lblk_t next = ext4_ext_next_allocated_block(path);
map->m_len = min(map->m_len, next - map->m_lblk);
}
trace_ext4_get_implied_cluster_alloc_exit(sb, map, 1);
return 1;
}
trace_ext4_get_implied_cluster_alloc_exit(sb, map, 0);
return 0;
}
/*
* Block allocation/map/preallocation routine for extents based files
*
*
* Need to be called with
* down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block
* (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem)
*
* return > 0, number of of blocks already mapped/allocated
* if create == 0 and these are pre-allocated blocks
* buffer head is unmapped
* otherwise blocks are mapped
*
* return = 0, if plain look up failed (blocks have not been allocated)
* buffer head is unmapped
*
* return < 0, error case.
*/
int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
struct ext4_map_blocks *map, int flags)
{
struct ext4_ext_path *path = NULL;
struct ext4_extent newex, *ex, *ex2;
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
ext4_fsblk_t newblock = 0;
int free_on_err = 0, err = 0, depth, ret;
unsigned int allocated = 0, offset = 0;
unsigned int allocated_clusters = 0;
struct ext4_allocation_request ar;
ext4_io_end_t *io = ext4_inode_aio(inode);
ext4_lblk_t cluster_offset;
int set_unwritten = 0;
bool map_from_cluster = false;
ext_debug("blocks %u/%u requested for inode %lu\n",
map->m_lblk, map->m_len, inode->i_ino);
trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
/* find extent for this block */
path = ext4_find_extent(inode, map->m_lblk, NULL, 0);
if (IS_ERR(path)) {
err = PTR_ERR(path);
path = NULL;
goto out2;
}
depth = ext_depth(inode);
/*
* consistent leaf must not be empty;
* this situation is possible, though, _during_ tree modification;
* this is why assert can't be put in ext4_find_extent()
*/
if (unlikely(path[depth].p_ext == NULL && depth != 0)) {
EXT4_ERROR_INODE(inode, "bad extent address "
"lblock: %lu, depth: %d pblock %lld",
(unsigned long) map->m_lblk, depth,
path[depth].p_block);
err = -EIO;
goto out2;
}
ex = path[depth].p_ext;
if (ex) {
ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
ext4_fsblk_t ee_start = ext4_ext_pblock(ex);
unsigned short ee_len;
/*
* unwritten extents are treated as holes, except that
* we split out initialized portions during a write.
*/
ee_len = ext4_ext_get_actual_len(ex);
trace_ext4_ext_show_extent(inode, ee_block, ee_start, ee_len);
/* if found extent covers block, simply return it */
if (in_range(map->m_lblk, ee_block, ee_len)) {
newblock = map->m_lblk - ee_block + ee_start;
/* number of remaining blocks in the extent */
allocated = ee_len - (map->m_lblk - ee_block);
ext_debug("%u fit into %u:%d -> %llu\n", map->m_lblk,
ee_block, ee_len, newblock);
/*
* If the extent is initialized check whether the
* caller wants to convert it to unwritten.
*/
if ((!ext4_ext_is_unwritten(ex)) &&
(flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN)) {
allocated = convert_initialized_extent(
handle, inode, map, &path,
flags, allocated, newblock);
goto out2;
} else if (!ext4_ext_is_unwritten(ex))
goto out;
ret = ext4_ext_handle_unwritten_extents(
handle, inode, map, &path, flags,
allocated, newblock);
if (ret < 0)
err = ret;
else
allocated = ret;
goto out2;
}
}
/*
* requested block isn't allocated yet;
* we couldn't try to create block if create flag is zero
*/
if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
/*
* put just found gap into cache to speed up
* subsequent requests
*/
ext4_ext_put_gap_in_cache(inode, path, map->m_lblk);
goto out2;
}
/*
* Okay, we need to do block allocation.
*/
newex.ee_block = cpu_to_le32(map->m_lblk);
cluster_offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
/*
* If we are doing bigalloc, check to see if the extent returned
* by ext4_find_extent() implies a cluster we can use.
*/
if (cluster_offset && ex &&
get_implied_cluster_alloc(inode->i_sb, map, ex, path)) {
ar.len = allocated = map->m_len;
newblock = map->m_pblk;
map_from_cluster = true;
goto got_allocated_blocks;
}
/* find neighbour allocated blocks */
ar.lleft = map->m_lblk;
err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft);
if (err)
goto out2;
ar.lright = map->m_lblk;
ex2 = NULL;
err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright, &ex2);
if (err)
goto out2;
/* Check if the extent after searching to the right implies a
* cluster we can use. */
if ((sbi->s_cluster_ratio > 1) && ex2 &&
get_implied_cluster_alloc(inode->i_sb, map, ex2, path)) {
ar.len = allocated = map->m_len;
newblock = map->m_pblk;
map_from_cluster = true;
goto got_allocated_blocks;
}
/*
* See if request is beyond maximum number of blocks we can have in
* a single extent. For an initialized extent this limit is
* EXT_INIT_MAX_LEN and for an unwritten extent this limit is
* EXT_UNWRITTEN_MAX_LEN.
*/
if (map->m_len > EXT_INIT_MAX_LEN &&
!(flags & EXT4_GET_BLOCKS_UNWRIT_EXT))
map->m_len = EXT_INIT_MAX_LEN;
else if (map->m_len > EXT_UNWRITTEN_MAX_LEN &&
(flags & EXT4_GET_BLOCKS_UNWRIT_EXT))
map->m_len = EXT_UNWRITTEN_MAX_LEN;
/* Check if we can really insert (m_lblk)::(m_lblk + m_len) extent */
newex.ee_len = cpu_to_le16(map->m_len);
err = ext4_ext_check_overlap(sbi, inode, &newex, path);
if (err)
allocated = ext4_ext_get_actual_len(&newex);
else
allocated = map->m_len;
/* allocate new block */
ar.inode = inode;
ar.goal = ext4_ext_find_goal(inode, path, map->m_lblk);
ar.logical = map->m_lblk;
/*
* We calculate the offset from the beginning of the cluster
* for the logical block number, since when we allocate a
* physical cluster, the physical block should start at the
* same offset from the beginning of the cluster. This is
* needed so that future calls to get_implied_cluster_alloc()
* work correctly.
*/
offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
ar.len = EXT4_NUM_B2C(sbi, offset+allocated);
ar.goal -= offset;
ar.logical -= offset;
if (S_ISREG(inode->i_mode))
ar.flags = EXT4_MB_HINT_DATA;
else
/* disable in-core preallocation for non-regular files */
ar.flags = 0;
if (flags & EXT4_GET_BLOCKS_NO_NORMALIZE)
ar.flags |= EXT4_MB_HINT_NOPREALLOC;
if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
ar.flags |= EXT4_MB_DELALLOC_RESERVED;
newblock = ext4_mb_new_blocks(handle, &ar, &err);
if (!newblock)
goto out2;
ext_debug("allocate new block: goal %llu, found %llu/%u\n",
ar.goal, newblock, allocated);
free_on_err = 1;
allocated_clusters = ar.len;
ar.len = EXT4_C2B(sbi, ar.len) - offset;
if (ar.len > allocated)
ar.len = allocated;
got_allocated_blocks:
/* try to insert new extent into found leaf and return */
ext4_ext_store_pblock(&newex, newblock + offset);
newex.ee_len = cpu_to_le16(ar.len);
/* Mark unwritten */
if (flags & EXT4_GET_BLOCKS_UNWRIT_EXT){
ext4_ext_mark_unwritten(&newex);
map->m_flags |= EXT4_MAP_UNWRITTEN;
/*
* io_end structure was created for every IO write to an
* unwritten extent. To avoid unnecessary conversion,
* here we flag the IO that really needs the conversion.
* For non asycn direct IO case, flag the inode state
* that we need to perform conversion when IO is done.
*/
if (flags & EXT4_GET_BLOCKS_PRE_IO)
set_unwritten = 1;
}
err = 0;
if ((flags & EXT4_GET_BLOCKS_KEEP_SIZE) == 0)
err = check_eofblocks_fl(handle, inode, map->m_lblk,
path, ar.len);
if (!err)
err = ext4_ext_insert_extent(handle, inode, &path,
&newex, flags);
if (!err && set_unwritten) {
if (io)
ext4_set_io_unwritten_flag(inode, io);
else
ext4_set_inode_state(inode,
EXT4_STATE_DIO_UNWRITTEN);
}
if (err && free_on_err) {
int fb_flags = flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE ?
EXT4_FREE_BLOCKS_NO_QUOT_UPDATE : 0;
/* free data blocks we just allocated */
/* not a good idea to call discard here directly,
* but otherwise we'd need to call it every free() */
ext4_discard_preallocations(inode);
ext4_free_blocks(handle, inode, NULL, newblock,
EXT4_C2B(sbi, allocated_clusters), fb_flags);
goto out2;
}
/* previous routine could use block we allocated */
newblock = ext4_ext_pblock(&newex);
allocated = ext4_ext_get_actual_len(&newex);
if (allocated > map->m_len)
allocated = map->m_len;
map->m_flags |= EXT4_MAP_NEW;
/*
* Update reserved blocks/metadata blocks after successful
* block allocation which had been deferred till now.
*/
if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
unsigned int reserved_clusters;
/*
* Check how many clusters we had reserved this allocated range
*/
reserved_clusters = get_reserved_cluster_alloc(inode,
map->m_lblk, allocated);
if (map_from_cluster) {
if (reserved_clusters) {
/*
* We have clusters reserved for this range.
* But since we are not doing actual allocation
* and are simply using blocks from previously
* allocated cluster, we should release the
* reservation and not claim quota.
*/
ext4_da_update_reserve_space(inode,
reserved_clusters, 0);
}
} else {
BUG_ON(allocated_clusters < reserved_clusters);
if (reserved_clusters < allocated_clusters) {
struct ext4_inode_info *ei = EXT4_I(inode);
int reservation = allocated_clusters -
reserved_clusters;
/*
* It seems we claimed few clusters outside of
* the range of this allocation. We should give
* it back to the reservation pool. This can
* happen in the following case:
*
* * Suppose s_cluster_ratio is 4 (i.e., each
* cluster has 4 blocks. Thus, the clusters
* are [0-3],[4-7],[8-11]...
* * First comes delayed allocation write for
* logical blocks 10 & 11. Since there were no
* previous delayed allocated blocks in the
* range [8-11], we would reserve 1 cluster
* for this write.
* * Next comes write for logical blocks 3 to 8.
* In this case, we will reserve 2 clusters
* (for [0-3] and [4-7]; and not for [8-11] as
* that range has a delayed allocated blocks.
* Thus total reserved clusters now becomes 3.
* * Now, during the delayed allocation writeout
* time, we will first write blocks [3-8] and
* allocate 3 clusters for writing these
* blocks. Also, we would claim all these
* three clusters above.
* * Now when we come here to writeout the
* blocks [10-11], we would expect to claim
* the reservation of 1 cluster we had made
* (and we would claim it since there are no
* more delayed allocated blocks in the range
* [8-11]. But our reserved cluster count had
* already gone to 0.
*
* Thus, at the step 4 above when we determine
* that there are still some unwritten delayed
* allocated blocks outside of our current
* block range, we should increment the
* reserved clusters count so that when the
* remaining blocks finally gets written, we
* could claim them.
*/
dquot_reserve_block(inode,
EXT4_C2B(sbi, reservation));
spin_lock(&ei->i_block_reservation_lock);
ei->i_reserved_data_blocks += reservation;
spin_unlock(&ei->i_block_reservation_lock);
}
/*
* We will claim quota for all newly allocated blocks.
* We're updating the reserved space *after* the
* correction above so we do not accidentally free
* all the metadata reservation because we might
* actually need it later on.
*/
ext4_da_update_reserve_space(inode, allocated_clusters,
1);
}
}
/*
* Cache the extent and update transaction to commit on fdatasync only
* when it is _not_ an unwritten extent.
*/
if ((flags & EXT4_GET_BLOCKS_UNWRIT_EXT) == 0)
ext4_update_inode_fsync_trans(handle, inode, 1);
else
ext4_update_inode_fsync_trans(handle, inode, 0);
out:
if (allocated > map->m_len)
allocated = map->m_len;
ext4_ext_show_leaf(inode, path);
map->m_flags |= EXT4_MAP_MAPPED;
map->m_pblk = newblock;
map->m_len = allocated;
out2:
ext4_ext_drop_refs(path);
kfree(path);
trace_ext4_ext_map_blocks_exit(inode, flags, map,
err ? err : allocated);
return err ? err : allocated;
}
void ext4_ext_truncate(handle_t *handle, struct inode *inode)
{
struct super_block *sb = inode->i_sb;
ext4_lblk_t last_block;
int err = 0;
/*
* TODO: optimization is possible here.
* Probably we need not scan at all,
* because page truncation is enough.
*/
/* we have to know where to truncate from in crash case */
EXT4_I(inode)->i_disksize = inode->i_size;
ext4_mark_inode_dirty(handle, inode);
last_block = (inode->i_size + sb->s_blocksize - 1)
>> EXT4_BLOCK_SIZE_BITS(sb);
retry:
err = ext4_es_remove_extent(inode, last_block,
EXT_MAX_BLOCKS - last_block);
if (err == -ENOMEM) {
cond_resched();
congestion_wait(BLK_RW_ASYNC, HZ/50);
goto retry;
}
if (err) {
ext4_std_error(inode->i_sb, err);
return;
}
err = ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCKS - 1);
ext4_std_error(inode->i_sb, err);
}
static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset,
ext4_lblk_t len, loff_t new_size,
int flags, int mode)
{
struct inode *inode = file_inode(file);
handle_t *handle;
int ret = 0;
int ret2 = 0;
int retries = 0;
struct ext4_map_blocks map;
unsigned int credits;
loff_t epos;
map.m_lblk = offset;
map.m_len = len;
/*
* Don't normalize the request if it can fit in one extent so
* that it doesn't get unnecessarily split into multiple
* extents.
*/
if (len <= EXT_UNWRITTEN_MAX_LEN)
flags |= EXT4_GET_BLOCKS_NO_NORMALIZE;
/*
* credits to insert 1 extent into extent tree
*/
credits = ext4_chunk_trans_blocks(inode, len);
retry:
while (ret >= 0 && len) {
handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS,
credits);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
break;
}
ret = ext4_map_blocks(handle, inode, &map, flags);
if (ret <= 0) {
ext4_debug("inode #%lu: block %u: len %u: "
"ext4_ext_map_blocks returned %d",
inode->i_ino, map.m_lblk,
map.m_len, ret);
ext4_mark_inode_dirty(handle, inode);
ret2 = ext4_journal_stop(handle);
break;
}
map.m_lblk += ret;
map.m_len = len = len - ret;
epos = (loff_t)map.m_lblk << inode->i_blkbits;
inode->i_ctime = ext4_current_time(inode);
if (new_size) {
if (epos > new_size)
epos = new_size;
if (ext4_update_inode_size(inode, epos) & 0x1)
inode->i_mtime = inode->i_ctime;
} else {
if (epos > inode->i_size)
ext4_set_inode_flag(inode,
EXT4_INODE_EOFBLOCKS);
}
ext4_mark_inode_dirty(handle, inode);
ret2 = ext4_journal_stop(handle);
if (ret2)
break;
}
if (ret == -ENOSPC &&
ext4_should_retry_alloc(inode->i_sb, &retries)) {
ret = 0;
goto retry;
}
return ret > 0 ? ret2 : ret;
}
static long ext4_zero_range(struct file *file, loff_t offset,
loff_t len, int mode)
{
struct inode *inode = file_inode(file);
handle_t *handle = NULL;
unsigned int max_blocks;
loff_t new_size = 0;
int ret = 0;
int flags;
int credits;
int partial_begin, partial_end;
loff_t start, end;
ext4_lblk_t lblk;
struct address_space *mapping = inode->i_mapping;
unsigned int blkbits = inode->i_blkbits;
trace_ext4_zero_range(inode, offset, len, mode);
if (!S_ISREG(inode->i_mode))
return -EINVAL;
/* Call ext4_force_commit to flush all data in case of data=journal. */
if (ext4_should_journal_data(inode)) {
ret = ext4_force_commit(inode->i_sb);
if (ret)
return ret;
}
/*
* Write out all dirty pages to avoid race conditions
* Then release them.
*/
if (mapping->nrpages && mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
ret = filemap_write_and_wait_range(mapping, offset,
offset + len - 1);
if (ret)
return ret;
}
/*
* Round up offset. This is not fallocate, we neet to zero out
* blocks, so convert interior block aligned part of the range to
* unwritten and possibly manually zero out unaligned parts of the
* range.
*/
start = round_up(offset, 1 << blkbits);
end = round_down((offset + len), 1 << blkbits);
if (start < offset || end > offset + len)
return -EINVAL;
partial_begin = offset & ((1 << blkbits) - 1);
partial_end = (offset + len) & ((1 << blkbits) - 1);
lblk = start >> blkbits;
max_blocks = (end >> blkbits);
if (max_blocks < lblk)
max_blocks = 0;
else
max_blocks -= lblk;
flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT |
EXT4_GET_BLOCKS_CONVERT_UNWRITTEN |
EXT4_EX_NOCACHE;
if (mode & FALLOC_FL_KEEP_SIZE)
flags |= EXT4_GET_BLOCKS_KEEP_SIZE;
mutex_lock(&inode->i_mutex);
/*
* Indirect files do not support unwritten extnets
*/
if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
ret = -EOPNOTSUPP;
goto out_mutex;
}
if (!(mode & FALLOC_FL_KEEP_SIZE) &&
offset + len > i_size_read(inode)) {
new_size = offset + len;
ret = inode_newsize_ok(inode, new_size);
if (ret)
goto out_mutex;
/*
* If we have a partial block after EOF we have to allocate
* the entire block.
*/
if (partial_end)
max_blocks += 1;
}
if (max_blocks > 0) {
/* Now release the pages and zero block aligned part of pages*/
truncate_pagecache_range(inode, start, end - 1);
inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
/* Wait all existing dio workers, newcomers will block on i_mutex */
ext4_inode_block_unlocked_dio(inode);
inode_dio_wait(inode);
ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size,
flags, mode);
if (ret)
goto out_dio;
/*
* Remove entire range from the extent status tree.
*
* ext4_es_remove_extent(inode, lblk, max_blocks) is
* NOT sufficient. I'm not sure why this is the case,
* but let's be conservative and remove the extent
* status tree for the entire inode. There should be
* no outstanding delalloc extents thanks to the
* filemap_write_and_wait_range() call above.
*/
ret = ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS);
if (ret)
goto out_dio;
}
if (!partial_begin && !partial_end)
goto out_dio;
/*
* In worst case we have to writeout two nonadjacent unwritten
* blocks and update the inode
*/
credits = (2 * ext4_ext_index_trans_blocks(inode, 2)) + 1;
if (ext4_should_journal_data(inode))
credits += 2;
handle = ext4_journal_start(inode, EXT4_HT_MISC, credits);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
ext4_std_error(inode->i_sb, ret);
goto out_dio;
}
inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
if (new_size) {
ext4_update_inode_size(inode, new_size);
} else {
/*
* Mark that we allocate beyond EOF so the subsequent truncate
* can proceed even if the new size is the same as i_size.
*/
if ((offset + len) > i_size_read(inode))
ext4_set_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
}
ext4_mark_inode_dirty(handle, inode);
/* Zero out partial block at the edges of the range */
ret = ext4_zero_partial_blocks(handle, inode, offset, len);
if (file->f_flags & O_SYNC)
ext4_handle_sync(handle);
ext4_journal_stop(handle);
out_dio:
ext4_inode_resume_unlocked_dio(inode);
out_mutex:
mutex_unlock(&inode->i_mutex);
return ret;
}
/*
* preallocate space for a file. This implements ext4's fallocate file
* operation, which gets called from sys_fallocate system call.
* For block-mapped files, posix_fallocate should fall back to the method
* of writing zeroes to the required new blocks (the same behavior which is
* expected for file systems which do not support fallocate() system call).
*/
long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
{
struct inode *inode = file_inode(file);
loff_t new_size = 0;
unsigned int max_blocks;
int ret = 0;
int flags;
ext4_lblk_t lblk;
unsigned int blkbits = inode->i_blkbits;
/* Return error if mode is not supported */
if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE))
return -EOPNOTSUPP;
if (mode & FALLOC_FL_PUNCH_HOLE)
return ext4_punch_hole(inode, offset, len);
ret = ext4_convert_inline_data(inode);
if (ret)
return ret;
/*
* currently supporting (pre)allocate mode for extent-based
* files _only_
*/
if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
return -EOPNOTSUPP;
if (mode & FALLOC_FL_COLLAPSE_RANGE)
return ext4_collapse_range(inode, offset, len);
if (mode & FALLOC_FL_ZERO_RANGE)
return ext4_zero_range(file, offset, len, mode);
trace_ext4_fallocate_enter(inode, offset, len, mode);
lblk = offset >> blkbits;
/*
* We can't just convert len to max_blocks because
* If blocksize = 4096 offset = 3072 and len = 2048
*/
max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits)
- lblk;
flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT;
if (mode & FALLOC_FL_KEEP_SIZE)
flags |= EXT4_GET_BLOCKS_KEEP_SIZE;
mutex_lock(&inode->i_mutex);
if (!(mode & FALLOC_FL_KEEP_SIZE) &&
offset + len > i_size_read(inode)) {
new_size = offset + len;
ret = inode_newsize_ok(inode, new_size);
if (ret)
goto out;
}
ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size,
flags, mode);
if (ret)
goto out;
if (file->f_flags & O_SYNC && EXT4_SB(inode->i_sb)->s_journal) {
ret = jbd2_complete_transaction(EXT4_SB(inode->i_sb)->s_journal,
EXT4_I(inode)->i_sync_tid);
}
out:
mutex_unlock(&inode->i_mutex);
trace_ext4_fallocate_exit(inode, offset, max_blocks, ret);
return ret;
}
/*
* This function convert a range of blocks to written extents
* The caller of this function will pass the start offset and the size.
* all unwritten extents within this range will be converted to
* written extents.
*
* This function is called from the direct IO end io call back
* function, to convert the fallocated extents after IO is completed.
* Returns 0 on success.
*/
int ext4_convert_unwritten_extents(handle_t *handle, struct inode *inode,
loff_t offset, ssize_t len)
{
unsigned int max_blocks;
int ret = 0;
int ret2 = 0;
struct ext4_map_blocks map;
unsigned int credits, blkbits = inode->i_blkbits;
map.m_lblk = offset >> blkbits;
/*
* We can't just convert len to max_blocks because
* If blocksize = 4096 offset = 3072 and len = 2048
*/
max_blocks = ((EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits) -
map.m_lblk);
/*
* This is somewhat ugly but the idea is clear: When transaction is
* reserved, everything goes into it. Otherwise we rather start several
* smaller transactions for conversion of each extent separately.
*/
if (handle) {
handle = ext4_journal_start_reserved(handle,
EXT4_HT_EXT_CONVERT);
if (IS_ERR(handle))
return PTR_ERR(handle);
credits = 0;
} else {
/*
* credits to insert 1 extent into extent tree
*/
credits = ext4_chunk_trans_blocks(inode, max_blocks);
}
while (ret >= 0 && ret < max_blocks) {
map.m_lblk += ret;
map.m_len = (max_blocks -= ret);
if (credits) {
handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS,
credits);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
break;
}
}
ret = ext4_map_blocks(handle, inode, &map,
EXT4_GET_BLOCKS_IO_CONVERT_EXT);
if (ret <= 0)
ext4_warning(inode->i_sb,
"inode #%lu: block %u: len %u: "
"ext4_ext_map_blocks returned %d",
inode->i_ino, map.m_lblk,
map.m_len, ret);
ext4_mark_inode_dirty(handle, inode);
if (credits)
ret2 = ext4_journal_stop(handle);
if (ret <= 0 || ret2)
break;
}
if (!credits)
ret2 = ext4_journal_stop(handle);
return ret > 0 ? ret2 : ret;
}
/*
* If newes is not existing extent (newes->ec_pblk equals zero) find
* delayed extent at start of newes and update newes accordingly and
* return start of the next delayed extent.
*
* If newes is existing extent (newes->ec_pblk is not equal zero)
* return start of next delayed extent or EXT_MAX_BLOCKS if no delayed
* extent found. Leave newes unmodified.
*/
static int ext4_find_delayed_extent(struct inode *inode,
struct extent_status *newes)
{
struct extent_status es;
ext4_lblk_t block, next_del;
if (newes->es_pblk == 0) {
ext4_es_find_delayed_extent_range(inode, newes->es_lblk,
newes->es_lblk + newes->es_len - 1, &es);
/*
* No extent in extent-tree contains block @newes->es_pblk,
* then the block may stay in 1)a hole or 2)delayed-extent.
*/
if (es.es_len == 0)
/* A hole found. */
return 0;
if (es.es_lblk > newes->es_lblk) {
/* A hole found. */
newes->es_len = min(es.es_lblk - newes->es_lblk,
newes->es_len);
return 0;
}
newes->es_len = es.es_lblk + es.es_len - newes->es_lblk;
}
block = newes->es_lblk + newes->es_len;
ext4_es_find_delayed_extent_range(inode, block, EXT_MAX_BLOCKS, &es);
if (es.es_len == 0)
next_del = EXT_MAX_BLOCKS;
else
next_del = es.es_lblk;
return next_del;
}
/* fiemap flags we can handle specified here */
#define EXT4_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR)
static int ext4_xattr_fiemap(struct inode *inode,
struct fiemap_extent_info *fieinfo)
{
__u64 physical = 0;
__u64 length;
__u32 flags = FIEMAP_EXTENT_LAST;
int blockbits = inode->i_sb->s_blocksize_bits;
int error = 0;
/* in-inode? */
if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
struct ext4_iloc iloc;
int offset; /* offset of xattr in inode */
error = ext4_get_inode_loc(inode, &iloc);
if (error)
return error;
physical = (__u64)iloc.bh->b_blocknr << blockbits;
offset = EXT4_GOOD_OLD_INODE_SIZE +
EXT4_I(inode)->i_extra_isize;
physical += offset;
length = EXT4_SB(inode->i_sb)->s_inode_size - offset;
flags |= FIEMAP_EXTENT_DATA_INLINE;
brelse(iloc.bh);
} else { /* external block */
physical = (__u64)EXT4_I(inode)->i_file_acl << blockbits;
length = inode->i_sb->s_blocksize;
}
if (physical)
error = fiemap_fill_next_extent(fieinfo, 0, physical,
length, flags);
return (error < 0 ? error : 0);
}
int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
__u64 start, __u64 len)
{
ext4_lblk_t start_blk;
int error = 0;
if (ext4_has_inline_data(inode)) {
int has_inline = 1;
error = ext4_inline_data_fiemap(inode, fieinfo, &has_inline,
start, len);
if (has_inline)
return error;
}
if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) {
error = ext4_ext_precache(inode);
if (error)
return error;
}
/* fallback to generic here if not in extents fmt */
if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
return generic_block_fiemap(inode, fieinfo, start, len,
ext4_get_block);
if (fiemap_check_flags(fieinfo, EXT4_FIEMAP_FLAGS))
return -EBADR;
if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
error = ext4_xattr_fiemap(inode, fieinfo);
} else {
ext4_lblk_t len_blks;
__u64 last_blk;
start_blk = start >> inode->i_sb->s_blocksize_bits;
last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits;
if (last_blk >= EXT_MAX_BLOCKS)
last_blk = EXT_MAX_BLOCKS-1;
len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1;
/*
* Walk the extent tree gathering extent information
* and pushing extents back to the user.
*/
error = ext4_fill_fiemap_extents(inode, start_blk,
len_blks, fieinfo);
}
return error;
}
/*
* ext4_access_path:
* Function to access the path buffer for marking it dirty.
* It also checks if there are sufficient credits left in the journal handle
* to update path.
*/
static int
ext4_access_path(handle_t *handle, struct inode *inode,
struct ext4_ext_path *path)
{
int credits, err;
if (!ext4_handle_valid(handle))
return 0;
/*
* Check if need to extend journal credits
* 3 for leaf, sb, and inode plus 2 (bmap and group
* descriptor) for each block group; assume two block
* groups
*/
if (handle->h_buffer_credits < 7) {
credits = ext4_writepage_trans_blocks(inode);
err = ext4_ext_truncate_extend_restart(handle, inode, credits);
/* EAGAIN is success */
if (err && err != -EAGAIN)
return err;
}
err = ext4_ext_get_access(handle, inode, path);
return err;
}
/*
* ext4_ext_shift_path_extents:
* Shift the extents of a path structure lying between path[depth].p_ext
* and EXT_LAST_EXTENT(path[depth].p_hdr) downwards, by subtracting shift
* from starting block for each extent.
*/
static int
ext4_ext_shift_path_extents(struct ext4_ext_path *path, ext4_lblk_t shift,
struct inode *inode, handle_t *handle,
ext4_lblk_t *start)
{
int depth, err = 0;
struct ext4_extent *ex_start, *ex_last;
bool update = 0;
depth = path->p_depth;
while (depth >= 0) {
if (depth == path->p_depth) {
ex_start = path[depth].p_ext;
if (!ex_start)
return -EIO;
ex_last = EXT_LAST_EXTENT(path[depth].p_hdr);
err = ext4_access_path(handle, inode, path + depth);
if (err)
goto out;
if (ex_start == EXT_FIRST_EXTENT(path[depth].p_hdr))
update = 1;
*start = le32_to_cpu(ex_last->ee_block) +
ext4_ext_get_actual_len(ex_last);
while (ex_start <= ex_last) {
le32_add_cpu(&ex_start->ee_block, -shift);
/* Try to merge to the left. */
if ((ex_start >
EXT_FIRST_EXTENT(path[depth].p_hdr)) &&
ext4_ext_try_to_merge_right(inode,
path, ex_start - 1))
ex_last--;
else
ex_start++;
}
err = ext4_ext_dirty(handle, inode, path + depth);
if (err)
goto out;
if (--depth < 0 || !update)
break;
}
/* Update index too */
err = ext4_access_path(handle, inode, path + depth);
if (err)
goto out;
le32_add_cpu(&path[depth].p_idx->ei_block, -shift);
err = ext4_ext_dirty(handle, inode, path + depth);
if (err)
goto out;
/* we are done if current index is not a starting index */
if (path[depth].p_idx != EXT_FIRST_INDEX(path[depth].p_hdr))
break;
depth--;
}
out:
return err;
}
/*
* ext4_ext_shift_extents:
* All the extents which lies in the range from start to the last allocated
* block for the file are shifted downwards by shift blocks.
* On success, 0 is returned, error otherwise.
*/
static int
ext4_ext_shift_extents(struct inode *inode, handle_t *handle,
ext4_lblk_t start, ext4_lblk_t shift)
{
struct ext4_ext_path *path;
int ret = 0, depth;
struct ext4_extent *extent;
ext4_lblk_t stop_block;
ext4_lblk_t ex_start, ex_end;
/* Let path point to the last extent */
path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL, 0);
if (IS_ERR(path))
return PTR_ERR(path);
depth = path->p_depth;
extent = path[depth].p_ext;
if (!extent)
goto out;
stop_block = le32_to_cpu(extent->ee_block) +
ext4_ext_get_actual_len(extent);
/* Nothing to shift, if hole is at the end of file */
if (start >= stop_block)
goto out;
/*
* Don't start shifting extents until we make sure the hole is big
* enough to accomodate the shift.
*/
path = ext4_find_extent(inode, start - 1, &path, 0);
if (IS_ERR(path))
return PTR_ERR(path);
depth = path->p_depth;
extent = path[depth].p_ext;
if (extent) {
ex_start = le32_to_cpu(extent->ee_block);
ex_end = le32_to_cpu(extent->ee_block) +
ext4_ext_get_actual_len(extent);
} else {
ex_start = 0;
ex_end = 0;
}
if ((start == ex_start && shift > ex_start) ||
(shift > start - ex_end))
return -EINVAL;
/* Its safe to start updating extents */
while (start < stop_block) {
path = ext4_find_extent(inode, start, &path, 0);
if (IS_ERR(path))
return PTR_ERR(path);
depth = path->p_depth;
extent = path[depth].p_ext;
if (!extent) {
EXT4_ERROR_INODE(inode, "unexpected hole at %lu",
(unsigned long) start);
return -EIO;
}
if (start > le32_to_cpu(extent->ee_block)) {
/* Hole, move to the next extent */
if (extent < EXT_LAST_EXTENT(path[depth].p_hdr)) {
path[depth].p_ext++;
} else {
start = ext4_ext_next_allocated_block(path);
continue;
}
}
ret = ext4_ext_shift_path_extents(path, shift, inode,
handle, &start);
if (ret)
break;
}
out:
ext4_ext_drop_refs(path);
kfree(path);
return ret;
}
/*
* ext4_collapse_range:
* This implements the fallocate's collapse range functionality for ext4
* Returns: 0 and non-zero on error.
*/
int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
{
struct super_block *sb = inode->i_sb;
ext4_lblk_t punch_start, punch_stop;
handle_t *handle;
unsigned int credits;
loff_t new_size, ioffset;
int ret;
/* Collapse range works only on fs block size aligned offsets. */
if (offset & (EXT4_CLUSTER_SIZE(sb) - 1) ||
len & (EXT4_CLUSTER_SIZE(sb) - 1))
return -EINVAL;
if (!S_ISREG(inode->i_mode))
return -EINVAL;
trace_ext4_collapse_range(inode, offset, len);
punch_start = offset >> EXT4_BLOCK_SIZE_BITS(sb);
punch_stop = (offset + len) >> EXT4_BLOCK_SIZE_BITS(sb);
/* Call ext4_force_commit to flush all data in case of data=journal. */
if (ext4_should_journal_data(inode)) {
ret = ext4_force_commit(inode->i_sb);
if (ret)
return ret;
}
/*
* Need to round down offset to be aligned with page size boundary
* for page size > block size.
*/
ioffset = round_down(offset, PAGE_SIZE);
/* Write out all dirty pages */
ret = filemap_write_and_wait_range(inode->i_mapping, ioffset,
LLONG_MAX);
if (ret)
return ret;
/* Take mutex lock */
mutex_lock(&inode->i_mutex);
/*
* There is no need to overlap collapse range with EOF, in which case
* it is effectively a truncate operation
*/
if (offset + len >= i_size_read(inode)) {
ret = -EINVAL;
goto out_mutex;
}
/* Currently just for extent based files */
if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
ret = -EOPNOTSUPP;
goto out_mutex;
}
truncate_pagecache(inode, ioffset);
/* Wait for existing dio to complete */
ext4_inode_block_unlocked_dio(inode);
inode_dio_wait(inode);
credits = ext4_writepage_trans_blocks(inode);
handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
goto out_dio;
}
down_write(&EXT4_I(inode)->i_data_sem);
ext4_discard_preallocations(inode);
ret = ext4_es_remove_extent(inode, punch_start,
EXT_MAX_BLOCKS - punch_start);
if (ret) {
up_write(&EXT4_I(inode)->i_data_sem);
goto out_stop;
}
ret = ext4_ext_remove_space(inode, punch_start, punch_stop - 1);
if (ret) {
up_write(&EXT4_I(inode)->i_data_sem);
goto out_stop;
}
ext4_discard_preallocations(inode);
ret = ext4_ext_shift_extents(inode, handle, punch_stop,
punch_stop - punch_start);
if (ret) {
up_write(&EXT4_I(inode)->i_data_sem);
goto out_stop;
}
new_size = i_size_read(inode) - len;
i_size_write(inode, new_size);
EXT4_I(inode)->i_disksize = new_size;
up_write(&EXT4_I(inode)->i_data_sem);
if (IS_SYNC(inode))
ext4_handle_sync(handle);
inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
ext4_mark_inode_dirty(handle, inode);
out_stop:
ext4_journal_stop(handle);
out_dio:
ext4_inode_resume_unlocked_dio(inode);
out_mutex:
mutex_unlock(&inode->i_mutex);
return ret;
}
/**
* ext4_swap_extents - Swap extents between two inodes
*
* @inode1: First inode
* @inode2: Second inode
* @lblk1: Start block for first inode
* @lblk2: Start block for second inode
* @count: Number of blocks to swap
* @mark_unwritten: Mark second inode's extents as unwritten after swap
* @erp: Pointer to save error value
*
* This helper routine does exactly what is promise "swap extents". All other
* stuff such as page-cache locking consistency, bh mapping consistency or
* extent's data copying must be performed by caller.
* Locking:
* i_mutex is held for both inodes
* i_data_sem is locked for write for both inodes
* Assumptions:
* All pages from requested range are locked for both inodes
*/
int
ext4_swap_extents(handle_t *handle, struct inode *inode1,
struct inode *inode2, ext4_lblk_t lblk1, ext4_lblk_t lblk2,
ext4_lblk_t count, int unwritten, int *erp)
{
struct ext4_ext_path *path1 = NULL;
struct ext4_ext_path *path2 = NULL;
int replaced_count = 0;
BUG_ON(!rwsem_is_locked(&EXT4_I(inode1)->i_data_sem));
BUG_ON(!rwsem_is_locked(&EXT4_I(inode2)->i_data_sem));
BUG_ON(!mutex_is_locked(&inode1->i_mutex));
BUG_ON(!mutex_is_locked(&inode1->i_mutex));
*erp = ext4_es_remove_extent(inode1, lblk1, count);
if (unlikely(*erp))
return 0;
*erp = ext4_es_remove_extent(inode2, lblk2, count);
if (unlikely(*erp))
return 0;
while (count) {
struct ext4_extent *ex1, *ex2, tmp_ex;
ext4_lblk_t e1_blk, e2_blk;
int e1_len, e2_len, len;
int split = 0;
path1 = ext4_find_extent(inode1, lblk1, NULL, EXT4_EX_NOCACHE);
if (unlikely(IS_ERR(path1))) {
*erp = PTR_ERR(path1);
path1 = NULL;
finish:
count = 0;
goto repeat;
}
path2 = ext4_find_extent(inode2, lblk2, NULL, EXT4_EX_NOCACHE);
if (unlikely(IS_ERR(path2))) {
*erp = PTR_ERR(path2);
path2 = NULL;
goto finish;
}
ex1 = path1[path1->p_depth].p_ext;
ex2 = path2[path2->p_depth].p_ext;
/* Do we have somthing to swap ? */
if (unlikely(!ex2 || !ex1))
goto finish;
e1_blk = le32_to_cpu(ex1->ee_block);
e2_blk = le32_to_cpu(ex2->ee_block);
e1_len = ext4_ext_get_actual_len(ex1);
e2_len = ext4_ext_get_actual_len(ex2);
/* Hole handling */
if (!in_range(lblk1, e1_blk, e1_len) ||
!in_range(lblk2, e2_blk, e2_len)) {
ext4_lblk_t next1, next2;
/* if hole after extent, then go to next extent */
next1 = ext4_ext_next_allocated_block(path1);
next2 = ext4_ext_next_allocated_block(path2);
/* If hole before extent, then shift to that extent */
if (e1_blk > lblk1)
next1 = e1_blk;
if (e2_blk > lblk2)
next2 = e1_blk;
/* Do we have something to swap */
if (next1 == EXT_MAX_BLOCKS || next2 == EXT_MAX_BLOCKS)
goto finish;
/* Move to the rightest boundary */
len = next1 - lblk1;
if (len < next2 - lblk2)
len = next2 - lblk2;
if (len > count)
len = count;
lblk1 += len;
lblk2 += len;
count -= len;
goto repeat;
}
/* Prepare left boundary */
if (e1_blk < lblk1) {
split = 1;
*erp = ext4_force_split_extent_at(handle, inode1,
&path1, lblk1, 0);
if (unlikely(*erp))
goto finish;
}
if (e2_blk < lblk2) {
split = 1;
*erp = ext4_force_split_extent_at(handle, inode2,
&path2, lblk2, 0);
if (unlikely(*erp))
goto finish;
}
/* ext4_split_extent_at() may result in leaf extent split,
* path must to be revalidated. */
if (split)
goto repeat;
/* Prepare right boundary */
len = count;
if (len > e1_blk + e1_len - lblk1)
len = e1_blk + e1_len - lblk1;
if (len > e2_blk + e2_len - lblk2)
len = e2_blk + e2_len - lblk2;
if (len != e1_len) {
split = 1;
*erp = ext4_force_split_extent_at(handle, inode1,
&path1, lblk1 + len, 0);
if (unlikely(*erp))
goto finish;
}
if (len != e2_len) {
split = 1;
*erp = ext4_force_split_extent_at(handle, inode2,
&path2, lblk2 + len, 0);
if (*erp)
goto finish;
}
/* ext4_split_extent_at() may result in leaf extent split,
* path must to be revalidated. */
if (split)
goto repeat;
BUG_ON(e2_len != e1_len);
*erp = ext4_ext_get_access(handle, inode1, path1 + path1->p_depth);
if (unlikely(*erp))
goto finish;
*erp = ext4_ext_get_access(handle, inode2, path2 + path2->p_depth);
if (unlikely(*erp))
goto finish;
/* Both extents are fully inside boundaries. Swap it now */
tmp_ex = *ex1;
ext4_ext_store_pblock(ex1, ext4_ext_pblock(ex2));
ext4_ext_store_pblock(ex2, ext4_ext_pblock(&tmp_ex));
ex1->ee_len = cpu_to_le16(e2_len);
ex2->ee_len = cpu_to_le16(e1_len);
if (unwritten)
ext4_ext_mark_unwritten(ex2);
if (ext4_ext_is_unwritten(&tmp_ex))
ext4_ext_mark_unwritten(ex1);
ext4_ext_try_to_merge(handle, inode2, path2, ex2);
ext4_ext_try_to_merge(handle, inode1, path1, ex1);
*erp = ext4_ext_dirty(handle, inode2, path2 +
path2->p_depth);
if (unlikely(*erp))
goto finish;
*erp = ext4_ext_dirty(handle, inode1, path1 +
path1->p_depth);
/*
* Looks scarry ah..? second inode already points to new blocks,
* and it was successfully dirtied. But luckily error may happen
* only due to journal error, so full transaction will be
* aborted anyway.
*/
if (unlikely(*erp))
goto finish;
lblk1 += len;
lblk2 += len;
replaced_count += len;
count -= len;
repeat:
ext4_ext_drop_refs(path1);
kfree(path1);
ext4_ext_drop_refs(path2);
kfree(path2);
path1 = path2 = NULL;
}
return replaced_count;
}
| ./CrossVul/dataset_final_sorted/CWE-17/c/bad_1454_0 |
crossvul-cpp_data_good_1499_2 | #include <linux/export.h>
#include <linux/uio.h>
#include <linux/pagemap.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
struct iov_iter *i)
{
size_t skip, copy, left, wanted;
const struct iovec *iov;
char __user *buf;
void *kaddr, *from;
if (unlikely(bytes > i->count))
bytes = i->count;
if (unlikely(!bytes))
return 0;
wanted = bytes;
iov = i->iov;
skip = i->iov_offset;
buf = iov->iov_base + skip;
copy = min(bytes, iov->iov_len - skip);
if (!fault_in_pages_writeable(buf, copy)) {
kaddr = kmap_atomic(page);
from = kaddr + offset;
/* first chunk, usually the only one */
left = __copy_to_user_inatomic(buf, from, copy);
copy -= left;
skip += copy;
from += copy;
bytes -= copy;
while (unlikely(!left && bytes)) {
iov++;
buf = iov->iov_base;
copy = min(bytes, iov->iov_len);
left = __copy_to_user_inatomic(buf, from, copy);
copy -= left;
skip = copy;
from += copy;
bytes -= copy;
}
if (likely(!bytes)) {
kunmap_atomic(kaddr);
goto done;
}
offset = from - kaddr;
buf += copy;
kunmap_atomic(kaddr);
copy = min(bytes, iov->iov_len - skip);
}
/* Too bad - revert to non-atomic kmap */
kaddr = kmap(page);
from = kaddr + offset;
left = __copy_to_user(buf, from, copy);
copy -= left;
skip += copy;
from += copy;
bytes -= copy;
while (unlikely(!left && bytes)) {
iov++;
buf = iov->iov_base;
copy = min(bytes, iov->iov_len);
left = __copy_to_user(buf, from, copy);
copy -= left;
skip = copy;
from += copy;
bytes -= copy;
}
kunmap(page);
done:
i->count -= wanted - bytes;
i->nr_segs -= iov - i->iov;
i->iov = iov;
i->iov_offset = skip;
return wanted - bytes;
}
EXPORT_SYMBOL(copy_page_to_iter);
size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
struct iov_iter *i)
{
size_t skip, copy, left, wanted;
const struct iovec *iov;
char __user *buf;
void *kaddr, *to;
if (unlikely(bytes > i->count))
bytes = i->count;
if (unlikely(!bytes))
return 0;
wanted = bytes;
iov = i->iov;
skip = i->iov_offset;
buf = iov->iov_base + skip;
copy = min(bytes, iov->iov_len - skip);
if (!fault_in_pages_readable(buf, copy)) {
kaddr = kmap_atomic(page);
to = kaddr + offset;
/* first chunk, usually the only one */
left = __copy_from_user_inatomic(to, buf, copy);
copy -= left;
skip += copy;
to += copy;
bytes -= copy;
while (unlikely(!left && bytes)) {
iov++;
buf = iov->iov_base;
copy = min(bytes, iov->iov_len);
left = __copy_from_user_inatomic(to, buf, copy);
copy -= left;
skip = copy;
to += copy;
bytes -= copy;
}
if (likely(!bytes)) {
kunmap_atomic(kaddr);
goto done;
}
offset = to - kaddr;
buf += copy;
kunmap_atomic(kaddr);
copy = min(bytes, iov->iov_len - skip);
}
/* Too bad - revert to non-atomic kmap */
kaddr = kmap(page);
to = kaddr + offset;
left = __copy_from_user(to, buf, copy);
copy -= left;
skip += copy;
to += copy;
bytes -= copy;
while (unlikely(!left && bytes)) {
iov++;
buf = iov->iov_base;
copy = min(bytes, iov->iov_len);
left = __copy_from_user(to, buf, copy);
copy -= left;
skip = copy;
to += copy;
bytes -= copy;
}
kunmap(page);
done:
i->count -= wanted - bytes;
i->nr_segs -= iov - i->iov;
i->iov = iov;
i->iov_offset = skip;
return wanted - bytes;
}
EXPORT_SYMBOL(copy_page_from_iter);
static size_t __iovec_copy_from_user_inatomic(char *vaddr,
const struct iovec *iov, size_t base, size_t bytes)
{
size_t copied = 0, left = 0;
while (bytes) {
char __user *buf = iov->iov_base + base;
int copy = min(bytes, iov->iov_len - base);
base = 0;
left = __copy_from_user_inatomic(vaddr, buf, copy);
copied += copy;
bytes -= copy;
vaddr += copy;
iov++;
if (unlikely(left))
break;
}
return copied - left;
}
/*
* Copy as much as we can into the page and return the number of bytes which
* were successfully copied. If a fault is encountered then return the number of
* bytes which were copied.
*/
size_t iov_iter_copy_from_user_atomic(struct page *page,
struct iov_iter *i, unsigned long offset, size_t bytes)
{
char *kaddr;
size_t copied;
kaddr = kmap_atomic(page);
if (likely(i->nr_segs == 1)) {
int left;
char __user *buf = i->iov->iov_base + i->iov_offset;
left = __copy_from_user_inatomic(kaddr + offset, buf, bytes);
copied = bytes - left;
} else {
copied = __iovec_copy_from_user_inatomic(kaddr + offset,
i->iov, i->iov_offset, bytes);
}
kunmap_atomic(kaddr);
return copied;
}
EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
void iov_iter_advance(struct iov_iter *i, size_t bytes)
{
BUG_ON(i->count < bytes);
if (likely(i->nr_segs == 1)) {
i->iov_offset += bytes;
i->count -= bytes;
} else {
const struct iovec *iov = i->iov;
size_t base = i->iov_offset;
unsigned long nr_segs = i->nr_segs;
/*
* The !iov->iov_len check ensures we skip over unlikely
* zero-length segments (without overruning the iovec).
*/
while (bytes || unlikely(i->count && !iov->iov_len)) {
int copy;
copy = min(bytes, iov->iov_len - base);
BUG_ON(!i->count || i->count < copy);
i->count -= copy;
bytes -= copy;
base += copy;
if (iov->iov_len == base) {
iov++;
nr_segs--;
base = 0;
}
}
i->iov = iov;
i->iov_offset = base;
i->nr_segs = nr_segs;
}
}
EXPORT_SYMBOL(iov_iter_advance);
/*
* Fault in the first iovec of the given iov_iter, to a maximum length
* of bytes. Returns 0 on success, or non-zero if the memory could not be
* accessed (ie. because it is an invalid address).
*
* writev-intensive code may want this to prefault several iovecs -- that
* would be possible (callers must not rely on the fact that _only_ the
* first iovec will be faulted with the current implementation).
*/
int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
{
char __user *buf = i->iov->iov_base + i->iov_offset;
bytes = min(bytes, i->iov->iov_len - i->iov_offset);
return fault_in_pages_readable(buf, bytes);
}
EXPORT_SYMBOL(iov_iter_fault_in_readable);
/*
* Return the count of just the current iov_iter segment.
*/
size_t iov_iter_single_seg_count(const struct iov_iter *i)
{
const struct iovec *iov = i->iov;
if (i->nr_segs == 1)
return i->count;
else
return min(i->count, iov->iov_len - i->iov_offset);
}
EXPORT_SYMBOL(iov_iter_single_seg_count);
unsigned long iov_iter_alignment(const struct iov_iter *i)
{
const struct iovec *iov = i->iov;
unsigned long res;
size_t size = i->count;
size_t n;
if (!size)
return 0;
res = (unsigned long)iov->iov_base + i->iov_offset;
n = iov->iov_len - i->iov_offset;
if (n >= size)
return res | size;
size -= n;
res |= n;
while (size > (++iov)->iov_len) {
res |= (unsigned long)iov->iov_base | iov->iov_len;
size -= iov->iov_len;
}
res |= (unsigned long)iov->iov_base | size;
return res;
}
EXPORT_SYMBOL(iov_iter_alignment);
void iov_iter_init(struct iov_iter *i, int direction,
const struct iovec *iov, unsigned long nr_segs,
size_t count)
{
/* It will get better. Eventually... */
if (segment_eq(get_fs(), KERNEL_DS))
direction |= REQ_KERNEL;
i->type = direction;
i->iov = iov;
i->nr_segs = nr_segs;
i->iov_offset = 0;
i->count = count;
}
EXPORT_SYMBOL(iov_iter_init);
ssize_t iov_iter_get_pages(struct iov_iter *i,
struct page **pages, size_t maxsize,
size_t *start)
{
size_t offset = i->iov_offset;
const struct iovec *iov = i->iov;
size_t len;
unsigned long addr;
int n;
int res;
len = iov->iov_len - offset;
if (len > i->count)
len = i->count;
if (len > maxsize)
len = maxsize;
addr = (unsigned long)iov->iov_base + offset;
len += *start = addr & (PAGE_SIZE - 1);
addr &= ~(PAGE_SIZE - 1);
n = (len + PAGE_SIZE - 1) / PAGE_SIZE;
res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages);
if (unlikely(res < 0))
return res;
return (res == n ? len : res * PAGE_SIZE) - *start;
}
EXPORT_SYMBOL(iov_iter_get_pages);
ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
struct page ***pages, size_t maxsize,
size_t *start)
{
size_t offset = i->iov_offset;
const struct iovec *iov = i->iov;
size_t len;
unsigned long addr;
void *p;
int n;
int res;
len = iov->iov_len - offset;
if (len > i->count)
len = i->count;
if (len > maxsize)
len = maxsize;
addr = (unsigned long)iov->iov_base + offset;
len += *start = addr & (PAGE_SIZE - 1);
addr &= ~(PAGE_SIZE - 1);
n = (len + PAGE_SIZE - 1) / PAGE_SIZE;
p = kmalloc(n * sizeof(struct page *), GFP_KERNEL);
if (!p)
p = vmalloc(n * sizeof(struct page *));
if (!p)
return -ENOMEM;
res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p);
if (unlikely(res < 0)) {
kvfree(p);
return res;
}
*pages = p;
return (res == n ? len : res * PAGE_SIZE) - *start;
}
EXPORT_SYMBOL(iov_iter_get_pages_alloc);
int iov_iter_npages(const struct iov_iter *i, int maxpages)
{
size_t offset = i->iov_offset;
size_t size = i->count;
const struct iovec *iov = i->iov;
int npages = 0;
int n;
for (n = 0; size && n < i->nr_segs; n++, iov++) {
unsigned long addr = (unsigned long)iov->iov_base + offset;
size_t len = iov->iov_len - offset;
offset = 0;
if (unlikely(!len)) /* empty segment */
continue;
if (len > size)
len = size;
npages += (addr + len + PAGE_SIZE - 1) / PAGE_SIZE
- addr / PAGE_SIZE;
if (npages >= maxpages) /* don't bother going further */
return maxpages;
size -= len;
offset = 0;
}
return min(npages, maxpages);
}
EXPORT_SYMBOL(iov_iter_npages);
| ./CrossVul/dataset_final_sorted/CWE-17/c/good_1499_2 |
crossvul-cpp_data_bad_2415_4 | /*
* unicode.c
*
* PURPOSE
* Routines for converting between UTF-8 and OSTA Compressed Unicode.
* Also handles filename mangling
*
* DESCRIPTION
* OSTA Compressed Unicode is explained in the OSTA UDF specification.
* http://www.osta.org/
* UTF-8 is explained in the IETF RFC XXXX.
* ftp://ftp.internic.net/rfc/rfcxxxx.txt
*
* COPYRIGHT
* This file is distributed under the terms of the GNU General Public
* License (GPL). Copies of the GPL can be obtained from:
* ftp://prep.ai.mit.edu/pub/gnu/GPL
* Each contributing author retains all rights to their own work.
*/
#include "udfdecl.h"
#include <linux/kernel.h>
#include <linux/string.h> /* for memset */
#include <linux/nls.h>
#include <linux/crc-itu-t.h>
#include <linux/slab.h>
#include "udf_sb.h"
static int udf_translate_to_linux(uint8_t *, uint8_t *, int, uint8_t *, int);
static int udf_char_to_ustr(struct ustr *dest, const uint8_t *src, int strlen)
{
if ((!dest) || (!src) || (!strlen) || (strlen > UDF_NAME_LEN - 2))
return 0;
memset(dest, 0, sizeof(struct ustr));
memcpy(dest->u_name, src, strlen);
dest->u_cmpID = 0x08;
dest->u_len = strlen;
return strlen;
}
/*
* udf_build_ustr
*/
int udf_build_ustr(struct ustr *dest, dstring *ptr, int size)
{
int usesize;
if (!dest || !ptr || !size)
return -1;
BUG_ON(size < 2);
usesize = min_t(size_t, ptr[size - 1], sizeof(dest->u_name));
usesize = min(usesize, size - 2);
dest->u_cmpID = ptr[0];
dest->u_len = usesize;
memcpy(dest->u_name, ptr + 1, usesize);
memset(dest->u_name + usesize, 0, sizeof(dest->u_name) - usesize);
return 0;
}
/*
* udf_build_ustr_exact
*/
static int udf_build_ustr_exact(struct ustr *dest, dstring *ptr, int exactsize)
{
if ((!dest) || (!ptr) || (!exactsize))
return -1;
memset(dest, 0, sizeof(struct ustr));
dest->u_cmpID = ptr[0];
dest->u_len = exactsize - 1;
memcpy(dest->u_name, ptr + 1, exactsize - 1);
return 0;
}
/*
* udf_ocu_to_utf8
*
* PURPOSE
* Convert OSTA Compressed Unicode to the UTF-8 equivalent.
*
* PRE-CONDITIONS
* utf Pointer to UTF-8 output buffer.
* ocu Pointer to OSTA Compressed Unicode input buffer
* of size UDF_NAME_LEN bytes.
* both of type "struct ustr *"
*
* POST-CONDITIONS
* <return> Zero on success.
*
* HISTORY
* November 12, 1997 - Andrew E. Mileski
* Written, tested, and released.
*/
int udf_CS0toUTF8(struct ustr *utf_o, const struct ustr *ocu_i)
{
const uint8_t *ocu;
uint8_t cmp_id, ocu_len;
int i;
ocu_len = ocu_i->u_len;
if (ocu_len == 0) {
memset(utf_o, 0, sizeof(struct ustr));
return 0;
}
cmp_id = ocu_i->u_cmpID;
if (cmp_id != 8 && cmp_id != 16) {
memset(utf_o, 0, sizeof(struct ustr));
pr_err("unknown compression code (%d) stri=%s\n",
cmp_id, ocu_i->u_name);
return 0;
}
ocu = ocu_i->u_name;
utf_o->u_len = 0;
for (i = 0; (i < ocu_len) && (utf_o->u_len <= (UDF_NAME_LEN - 3));) {
/* Expand OSTA compressed Unicode to Unicode */
uint32_t c = ocu[i++];
if (cmp_id == 16)
c = (c << 8) | ocu[i++];
/* Compress Unicode to UTF-8 */
if (c < 0x80U)
utf_o->u_name[utf_o->u_len++] = (uint8_t)c;
else if (c < 0x800U) {
utf_o->u_name[utf_o->u_len++] =
(uint8_t)(0xc0 | (c >> 6));
utf_o->u_name[utf_o->u_len++] =
(uint8_t)(0x80 | (c & 0x3f));
} else {
utf_o->u_name[utf_o->u_len++] =
(uint8_t)(0xe0 | (c >> 12));
utf_o->u_name[utf_o->u_len++] =
(uint8_t)(0x80 |
((c >> 6) & 0x3f));
utf_o->u_name[utf_o->u_len++] =
(uint8_t)(0x80 | (c & 0x3f));
}
}
utf_o->u_cmpID = 8;
return utf_o->u_len;
}
/*
*
* udf_utf8_to_ocu
*
* PURPOSE
* Convert UTF-8 to the OSTA Compressed Unicode equivalent.
*
* DESCRIPTION
* This routine is only called by udf_lookup().
*
* PRE-CONDITIONS
* ocu Pointer to OSTA Compressed Unicode output
* buffer of size UDF_NAME_LEN bytes.
* utf Pointer to UTF-8 input buffer.
* utf_len Length of UTF-8 input buffer in bytes.
*
* POST-CONDITIONS
* <return> Zero on success.
*
* HISTORY
* November 12, 1997 - Andrew E. Mileski
* Written, tested, and released.
*/
static int udf_UTF8toCS0(dstring *ocu, struct ustr *utf, int length)
{
unsigned c, i, max_val, utf_char;
int utf_cnt, u_len;
memset(ocu, 0, sizeof(dstring) * length);
ocu[0] = 8;
max_val = 0xffU;
try_again:
u_len = 0U;
utf_char = 0U;
utf_cnt = 0U;
for (i = 0U; i < utf->u_len; i++) {
c = (uint8_t)utf->u_name[i];
/* Complete a multi-byte UTF-8 character */
if (utf_cnt) {
utf_char = (utf_char << 6) | (c & 0x3fU);
if (--utf_cnt)
continue;
} else {
/* Check for a multi-byte UTF-8 character */
if (c & 0x80U) {
/* Start a multi-byte UTF-8 character */
if ((c & 0xe0U) == 0xc0U) {
utf_char = c & 0x1fU;
utf_cnt = 1;
} else if ((c & 0xf0U) == 0xe0U) {
utf_char = c & 0x0fU;
utf_cnt = 2;
} else if ((c & 0xf8U) == 0xf0U) {
utf_char = c & 0x07U;
utf_cnt = 3;
} else if ((c & 0xfcU) == 0xf8U) {
utf_char = c & 0x03U;
utf_cnt = 4;
} else if ((c & 0xfeU) == 0xfcU) {
utf_char = c & 0x01U;
utf_cnt = 5;
} else {
goto error_out;
}
continue;
} else {
/* Single byte UTF-8 character (most common) */
utf_char = c;
}
}
/* Choose no compression if necessary */
if (utf_char > max_val) {
if (max_val == 0xffU) {
max_val = 0xffffU;
ocu[0] = (uint8_t)0x10U;
goto try_again;
}
goto error_out;
}
if (max_val == 0xffffU)
ocu[++u_len] = (uint8_t)(utf_char >> 8);
ocu[++u_len] = (uint8_t)(utf_char & 0xffU);
}
if (utf_cnt) {
error_out:
ocu[++u_len] = '?';
printk(KERN_DEBUG pr_fmt("bad UTF-8 character\n"));
}
ocu[length - 1] = (uint8_t)u_len + 1;
return u_len + 1;
}
static int udf_CS0toNLS(struct nls_table *nls, struct ustr *utf_o,
const struct ustr *ocu_i)
{
const uint8_t *ocu;
uint8_t cmp_id, ocu_len;
int i, len;
ocu_len = ocu_i->u_len;
if (ocu_len == 0) {
memset(utf_o, 0, sizeof(struct ustr));
return 0;
}
cmp_id = ocu_i->u_cmpID;
if (cmp_id != 8 && cmp_id != 16) {
memset(utf_o, 0, sizeof(struct ustr));
pr_err("unknown compression code (%d) stri=%s\n",
cmp_id, ocu_i->u_name);
return 0;
}
ocu = ocu_i->u_name;
utf_o->u_len = 0;
for (i = 0; (i < ocu_len) && (utf_o->u_len <= (UDF_NAME_LEN - 3));) {
/* Expand OSTA compressed Unicode to Unicode */
uint32_t c = ocu[i++];
if (cmp_id == 16)
c = (c << 8) | ocu[i++];
len = nls->uni2char(c, &utf_o->u_name[utf_o->u_len],
UDF_NAME_LEN - utf_o->u_len);
/* Valid character? */
if (len >= 0)
utf_o->u_len += len;
else
utf_o->u_name[utf_o->u_len++] = '?';
}
utf_o->u_cmpID = 8;
return utf_o->u_len;
}
static int udf_NLStoCS0(struct nls_table *nls, dstring *ocu, struct ustr *uni,
int length)
{
int len;
unsigned i, max_val;
uint16_t uni_char;
int u_len;
memset(ocu, 0, sizeof(dstring) * length);
ocu[0] = 8;
max_val = 0xffU;
try_again:
u_len = 0U;
for (i = 0U; i < uni->u_len; i++) {
len = nls->char2uni(&uni->u_name[i], uni->u_len - i, &uni_char);
if (!len)
continue;
/* Invalid character, deal with it */
if (len < 0) {
len = 1;
uni_char = '?';
}
if (uni_char > max_val) {
max_val = 0xffffU;
ocu[0] = (uint8_t)0x10U;
goto try_again;
}
if (max_val == 0xffffU)
ocu[++u_len] = (uint8_t)(uni_char >> 8);
ocu[++u_len] = (uint8_t)(uni_char & 0xffU);
i += len - 1;
}
ocu[length - 1] = (uint8_t)u_len + 1;
return u_len + 1;
}
int udf_get_filename(struct super_block *sb, uint8_t *sname, uint8_t *dname,
int flen)
{
struct ustr *filename, *unifilename;
int len = 0;
filename = kmalloc(sizeof(struct ustr), GFP_NOFS);
if (!filename)
return 0;
unifilename = kmalloc(sizeof(struct ustr), GFP_NOFS);
if (!unifilename)
goto out1;
if (udf_build_ustr_exact(unifilename, sname, flen))
goto out2;
if (UDF_QUERY_FLAG(sb, UDF_FLAG_UTF8)) {
if (!udf_CS0toUTF8(filename, unifilename)) {
udf_debug("Failed in udf_get_filename: sname = %s\n",
sname);
goto out2;
}
} else if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP)) {
if (!udf_CS0toNLS(UDF_SB(sb)->s_nls_map, filename,
unifilename)) {
udf_debug("Failed in udf_get_filename: sname = %s\n",
sname);
goto out2;
}
} else
goto out2;
len = udf_translate_to_linux(dname, filename->u_name, filename->u_len,
unifilename->u_name, unifilename->u_len);
out2:
kfree(unifilename);
out1:
kfree(filename);
return len;
}
int udf_put_filename(struct super_block *sb, const uint8_t *sname,
uint8_t *dname, int flen)
{
struct ustr unifilename;
int namelen;
if (!udf_char_to_ustr(&unifilename, sname, flen))
return 0;
if (UDF_QUERY_FLAG(sb, UDF_FLAG_UTF8)) {
namelen = udf_UTF8toCS0(dname, &unifilename, UDF_NAME_LEN);
if (!namelen)
return 0;
} else if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP)) {
namelen = udf_NLStoCS0(UDF_SB(sb)->s_nls_map, dname,
&unifilename, UDF_NAME_LEN);
if (!namelen)
return 0;
} else
return 0;
return namelen;
}
#define ILLEGAL_CHAR_MARK '_'
#define EXT_MARK '.'
#define CRC_MARK '#'
#define EXT_SIZE 5
static int udf_translate_to_linux(uint8_t *newName, uint8_t *udfName,
int udfLen, uint8_t *fidName,
int fidNameLen)
{
int index, newIndex = 0, needsCRC = 0;
int extIndex = 0, newExtIndex = 0, hasExt = 0;
unsigned short valueCRC;
uint8_t curr;
if (udfName[0] == '.' &&
(udfLen == 1 || (udfLen == 2 && udfName[1] == '.'))) {
needsCRC = 1;
newIndex = udfLen;
memcpy(newName, udfName, udfLen);
} else {
for (index = 0; index < udfLen; index++) {
curr = udfName[index];
if (curr == '/' || curr == 0) {
needsCRC = 1;
curr = ILLEGAL_CHAR_MARK;
while (index + 1 < udfLen &&
(udfName[index + 1] == '/' ||
udfName[index + 1] == 0))
index++;
}
if (curr == EXT_MARK &&
(udfLen - index - 1) <= EXT_SIZE) {
if (udfLen == index + 1)
hasExt = 0;
else {
hasExt = 1;
extIndex = index;
newExtIndex = newIndex;
}
}
if (newIndex < 256)
newName[newIndex++] = curr;
else
needsCRC = 1;
}
}
if (needsCRC) {
uint8_t ext[EXT_SIZE];
int localExtIndex = 0;
if (hasExt) {
int maxFilenameLen;
for (index = 0;
index < EXT_SIZE && extIndex + index + 1 < udfLen;
index++) {
curr = udfName[extIndex + index + 1];
if (curr == '/' || curr == 0) {
needsCRC = 1;
curr = ILLEGAL_CHAR_MARK;
while (extIndex + index + 2 < udfLen &&
(index + 1 < EXT_SIZE &&
(udfName[extIndex + index + 2] == '/' ||
udfName[extIndex + index + 2] == 0)))
index++;
}
ext[localExtIndex++] = curr;
}
maxFilenameLen = 250 - localExtIndex;
if (newIndex > maxFilenameLen)
newIndex = maxFilenameLen;
else
newIndex = newExtIndex;
} else if (newIndex > 250)
newIndex = 250;
newName[newIndex++] = CRC_MARK;
valueCRC = crc_itu_t(0, fidName, fidNameLen);
newName[newIndex++] = hex_asc_upper_hi(valueCRC >> 8);
newName[newIndex++] = hex_asc_upper_lo(valueCRC >> 8);
newName[newIndex++] = hex_asc_upper_hi(valueCRC);
newName[newIndex++] = hex_asc_upper_lo(valueCRC);
if (hasExt) {
newName[newIndex++] = EXT_MARK;
for (index = 0; index < localExtIndex; index++)
newName[newIndex++] = ext[index];
}
}
return newIndex;
}
| ./CrossVul/dataset_final_sorted/CWE-17/c/bad_2415_4 |
crossvul-cpp_data_good_2306_0 | /*
* linux/fs/file_table.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
* Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
*/
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/file.h>
#include <linux/fdtable.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/security.h>
#include <linux/eventpoll.h>
#include <linux/rcupdate.h>
#include <linux/mount.h>
#include <linux/capability.h>
#include <linux/cdev.h>
#include <linux/fsnotify.h>
#include <linux/sysctl.h>
#include <linux/lglock.h>
#include <linux/percpu_counter.h>
#include <linux/percpu.h>
#include <linux/hardirq.h>
#include <linux/task_work.h>
#include <linux/ima.h>
#include <linux/atomic.h>
#include "internal.h"
/* sysctl tunables... */
struct files_stat_struct files_stat = {
.max_files = NR_FILE
};
/* SLAB cache for file structures */
static struct kmem_cache *filp_cachep __read_mostly;
static struct percpu_counter nr_files __cacheline_aligned_in_smp;
static void file_free_rcu(struct rcu_head *head)
{
struct file *f = container_of(head, struct file, f_u.fu_rcuhead);
put_cred(f->f_cred);
kmem_cache_free(filp_cachep, f);
}
static inline void file_free(struct file *f)
{
percpu_counter_dec(&nr_files);
file_check_state(f);
call_rcu(&f->f_u.fu_rcuhead, file_free_rcu);
}
/*
* Return the total number of open files in the system
*/
static long get_nr_files(void)
{
return percpu_counter_read_positive(&nr_files);
}
/*
* Return the maximum number of open files in the system
*/
unsigned long get_max_files(void)
{
return files_stat.max_files;
}
EXPORT_SYMBOL_GPL(get_max_files);
/*
* Handle nr_files sysctl
*/
#if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
int proc_nr_files(ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
files_stat.nr_files = get_nr_files();
return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
}
#else
int proc_nr_files(ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
return -ENOSYS;
}
#endif
/* Find an unused file structure and return a pointer to it.
* Returns an error pointer if some error happend e.g. we over file
* structures limit, run out of memory or operation is not permitted.
*
* Be very careful using this. You are responsible for
* getting write access to any mount that you might assign
* to this filp, if it is opened for write. If this is not
* done, you will imbalance int the mount's writer count
* and a warning at __fput() time.
*/
struct file *get_empty_filp(void)
{
const struct cred *cred = current_cred();
static long old_max;
struct file *f;
int error;
/*
* Privileged users can go above max_files
*/
if (get_nr_files() >= files_stat.max_files && !capable(CAP_SYS_ADMIN)) {
/*
* percpu_counters are inaccurate. Do an expensive check before
* we go and fail.
*/
if (percpu_counter_sum_positive(&nr_files) >= files_stat.max_files)
goto over;
}
f = kmem_cache_zalloc(filp_cachep, GFP_KERNEL);
if (unlikely(!f))
return ERR_PTR(-ENOMEM);
percpu_counter_inc(&nr_files);
f->f_cred = get_cred(cred);
error = security_file_alloc(f);
if (unlikely(error)) {
file_free(f);
return ERR_PTR(error);
}
atomic_long_set(&f->f_count, 1);
rwlock_init(&f->f_owner.lock);
spin_lock_init(&f->f_lock);
eventpoll_init_file(f);
/* f->f_version: 0 */
return f;
over:
/* Ran out of filps - report that */
if (get_nr_files() > old_max) {
pr_info("VFS: file-max limit %lu reached\n", get_max_files());
old_max = get_nr_files();
}
return ERR_PTR(-ENFILE);
}
/**
* alloc_file - allocate and initialize a 'struct file'
* @mnt: the vfsmount on which the file will reside
* @dentry: the dentry representing the new file
* @mode: the mode with which the new file will be opened
* @fop: the 'struct file_operations' for the new file
*
* Use this instead of get_empty_filp() to get a new
* 'struct file'. Do so because of the same initialization
* pitfalls reasons listed for init_file(). This is a
* preferred interface to using init_file().
*
* If all the callers of init_file() are eliminated, its
* code should be moved into this function.
*/
struct file *alloc_file(struct path *path, fmode_t mode,
const struct file_operations *fop)
{
struct file *file;
file = get_empty_filp();
if (IS_ERR(file))
return file;
file->f_path = *path;
file->f_inode = path->dentry->d_inode;
file->f_mapping = path->dentry->d_inode->i_mapping;
file->f_mode = mode;
file->f_op = fop;
/*
* These mounts don't really matter in practice
* for r/o bind mounts. They aren't userspace-
* visible. We do this for consistency, and so
* that we can do debugging checks at __fput()
*/
if ((mode & FMODE_WRITE) && !special_file(path->dentry->d_inode->i_mode)) {
file_take_write(file);
WARN_ON(mnt_clone_write(path->mnt));
}
if ((mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
i_readcount_inc(path->dentry->d_inode);
return file;
}
EXPORT_SYMBOL(alloc_file);
/**
* drop_file_write_access - give up ability to write to a file
* @file: the file to which we will stop writing
*
* This is a central place which will give up the ability
* to write to @file, along with access to write through
* its vfsmount.
*/
static void drop_file_write_access(struct file *file)
{
struct vfsmount *mnt = file->f_path.mnt;
struct dentry *dentry = file->f_path.dentry;
struct inode *inode = dentry->d_inode;
put_write_access(inode);
if (special_file(inode->i_mode))
return;
if (file_check_writeable(file) != 0)
return;
__mnt_drop_write(mnt);
file_release_write(file);
}
/* the real guts of fput() - releasing the last reference to file
*/
static void __fput(struct file *file)
{
struct dentry *dentry = file->f_path.dentry;
struct vfsmount *mnt = file->f_path.mnt;
struct inode *inode = file->f_inode;
might_sleep();
fsnotify_close(file);
/*
* The function eventpoll_release() should be the first called
* in the file cleanup chain.
*/
eventpoll_release(file);
locks_remove_flock(file);
if (unlikely(file->f_flags & FASYNC)) {
if (file->f_op->fasync)
file->f_op->fasync(-1, file, 0);
}
ima_file_free(file);
if (file->f_op->release)
file->f_op->release(inode, file);
security_file_free(file);
if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL &&
!(file->f_mode & FMODE_PATH))) {
cdev_put(inode->i_cdev);
}
fops_put(file->f_op);
put_pid(file->f_owner.pid);
if ((file->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
i_readcount_dec(inode);
if (file->f_mode & FMODE_WRITE)
drop_file_write_access(file);
file->f_path.dentry = NULL;
file->f_path.mnt = NULL;
file->f_inode = NULL;
file_free(file);
dput(dentry);
mntput(mnt);
}
static LLIST_HEAD(delayed_fput_list);
static void delayed_fput(struct work_struct *unused)
{
struct llist_node *node = llist_del_all(&delayed_fput_list);
struct llist_node *next;
for (; node; node = next) {
next = llist_next(node);
__fput(llist_entry(node, struct file, f_u.fu_llist));
}
}
static void ____fput(struct callback_head *work)
{
__fput(container_of(work, struct file, f_u.fu_rcuhead));
}
/*
* If kernel thread really needs to have the final fput() it has done
* to complete, call this. The only user right now is the boot - we
* *do* need to make sure our writes to binaries on initramfs has
* not left us with opened struct file waiting for __fput() - execve()
* won't work without that. Please, don't add more callers without
* very good reasons; in particular, never call that with locks
* held and never call that from a thread that might need to do
* some work on any kind of umount.
*/
void flush_delayed_fput(void)
{
delayed_fput(NULL);
}
static DECLARE_WORK(delayed_fput_work, delayed_fput);
void fput(struct file *file)
{
if (atomic_long_dec_and_test(&file->f_count)) {
struct task_struct *task = current;
if (likely(!in_interrupt() && !(task->flags & PF_KTHREAD))) {
init_task_work(&file->f_u.fu_rcuhead, ____fput);
if (!task_work_add(task, &file->f_u.fu_rcuhead, true))
return;
/*
* After this task has run exit_task_work(),
* task_work_add() will fail. Fall through to delayed
* fput to avoid leaking *file.
*/
}
if (llist_add(&file->f_u.fu_llist, &delayed_fput_list))
schedule_work(&delayed_fput_work);
}
}
/*
* synchronous analog of fput(); for kernel threads that might be needed
* in some umount() (and thus can't use flush_delayed_fput() without
* risking deadlocks), need to wait for completion of __fput() and know
* for this specific struct file it won't involve anything that would
* need them. Use only if you really need it - at the very least,
* don't blindly convert fput() by kernel thread to that.
*/
void __fput_sync(struct file *file)
{
if (atomic_long_dec_and_test(&file->f_count)) {
struct task_struct *task = current;
BUG_ON(!(task->flags & PF_KTHREAD));
__fput(file);
}
}
EXPORT_SYMBOL(fput);
void put_filp(struct file *file)
{
if (atomic_long_dec_and_test(&file->f_count)) {
security_file_free(file);
file_free(file);
}
}
void __init files_init(unsigned long mempages)
{
unsigned long n;
filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0,
SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
/*
* One file with associated inode and dcache is very roughly 1K.
* Per default don't use more than 10% of our memory for files.
*/
n = (mempages * (PAGE_SIZE / 1024)) / 10;
files_stat.max_files = max_t(unsigned long, n, NR_FILE);
files_defer_init();
percpu_counter_init(&nr_files, 0);
}
| ./CrossVul/dataset_final_sorted/CWE-17/c/good_2306_0 |
crossvul-cpp_data_bad_2415_0 | /*
* dir.c
*
* PURPOSE
* Directory handling routines for the OSTA-UDF(tm) filesystem.
*
* COPYRIGHT
* This file is distributed under the terms of the GNU General Public
* License (GPL). Copies of the GPL can be obtained from:
* ftp://prep.ai.mit.edu/pub/gnu/GPL
* Each contributing author retains all rights to their own work.
*
* (C) 1998-2004 Ben Fennema
*
* HISTORY
*
* 10/05/98 dgb Split directory operations into its own file
* Implemented directory reads via do_udf_readdir
* 10/06/98 Made directory operations work!
* 11/17/98 Rewrote directory to support ICBTAG_FLAG_AD_LONG
* 11/25/98 blf Rewrote directory handling (readdir+lookup) to support reading
* across blocks.
* 12/12/98 Split out the lookup code to namei.c. bulk of directory
* code now in directory.c:udf_fileident_read.
*/
#include "udfdecl.h"
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/buffer_head.h>
#include "udf_i.h"
#include "udf_sb.h"
static int udf_readdir(struct file *file, struct dir_context *ctx)
{
struct inode *dir = file_inode(file);
struct udf_inode_info *iinfo = UDF_I(dir);
struct udf_fileident_bh fibh = { .sbh = NULL, .ebh = NULL};
struct fileIdentDesc *fi = NULL;
struct fileIdentDesc cfi;
int block, iblock;
loff_t nf_pos;
int flen;
unsigned char *fname = NULL;
unsigned char *nameptr;
uint16_t liu;
uint8_t lfi;
loff_t size = udf_ext0_offset(dir) + dir->i_size;
struct buffer_head *tmp, *bha[16];
struct kernel_lb_addr eloc;
uint32_t elen;
sector_t offset;
int i, num, ret = 0;
struct extent_position epos = { NULL, 0, {0, 0} };
if (ctx->pos == 0) {
if (!dir_emit_dot(file, ctx))
return 0;
ctx->pos = 1;
}
nf_pos = (ctx->pos - 1) << 2;
if (nf_pos >= size)
goto out;
fname = kmalloc(UDF_NAME_LEN, GFP_NOFS);
if (!fname) {
ret = -ENOMEM;
goto out;
}
if (nf_pos == 0)
nf_pos = udf_ext0_offset(dir);
fibh.soffset = fibh.eoffset = nf_pos & (dir->i_sb->s_blocksize - 1);
if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
if (inode_bmap(dir, nf_pos >> dir->i_sb->s_blocksize_bits,
&epos, &eloc, &elen, &offset)
!= (EXT_RECORDED_ALLOCATED >> 30)) {
ret = -ENOENT;
goto out;
}
block = udf_get_lb_pblock(dir->i_sb, &eloc, offset);
if ((++offset << dir->i_sb->s_blocksize_bits) < elen) {
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
epos.offset -= sizeof(struct short_ad);
else if (iinfo->i_alloc_type ==
ICBTAG_FLAG_AD_LONG)
epos.offset -= sizeof(struct long_ad);
} else {
offset = 0;
}
if (!(fibh.sbh = fibh.ebh = udf_tread(dir->i_sb, block))) {
ret = -EIO;
goto out;
}
if (!(offset & ((16 >> (dir->i_sb->s_blocksize_bits - 9)) - 1))) {
i = 16 >> (dir->i_sb->s_blocksize_bits - 9);
if (i + offset > (elen >> dir->i_sb->s_blocksize_bits))
i = (elen >> dir->i_sb->s_blocksize_bits) - offset;
for (num = 0; i > 0; i--) {
block = udf_get_lb_pblock(dir->i_sb, &eloc, offset + i);
tmp = udf_tgetblk(dir->i_sb, block);
if (tmp && !buffer_uptodate(tmp) && !buffer_locked(tmp))
bha[num++] = tmp;
else
brelse(tmp);
}
if (num) {
ll_rw_block(READA, num, bha);
for (i = 0; i < num; i++)
brelse(bha[i]);
}
}
}
while (nf_pos < size) {
struct kernel_lb_addr tloc;
ctx->pos = (nf_pos >> 2) + 1;
fi = udf_fileident_read(dir, &nf_pos, &fibh, &cfi, &epos, &eloc,
&elen, &offset);
if (!fi)
goto out;
liu = le16_to_cpu(cfi.lengthOfImpUse);
lfi = cfi.lengthFileIdent;
if (fibh.sbh == fibh.ebh) {
nameptr = fi->fileIdent + liu;
} else {
int poffset; /* Unpaded ending offset */
poffset = fibh.soffset + sizeof(struct fileIdentDesc) + liu + lfi;
if (poffset >= lfi) {
nameptr = (char *)(fibh.ebh->b_data + poffset - lfi);
} else {
nameptr = fname;
memcpy(nameptr, fi->fileIdent + liu,
lfi - poffset);
memcpy(nameptr + lfi - poffset,
fibh.ebh->b_data, poffset);
}
}
if ((cfi.fileCharacteristics & FID_FILE_CHAR_DELETED) != 0) {
if (!UDF_QUERY_FLAG(dir->i_sb, UDF_FLAG_UNDELETE))
continue;
}
if ((cfi.fileCharacteristics & FID_FILE_CHAR_HIDDEN) != 0) {
if (!UDF_QUERY_FLAG(dir->i_sb, UDF_FLAG_UNHIDE))
continue;
}
if (cfi.fileCharacteristics & FID_FILE_CHAR_PARENT) {
if (!dir_emit_dotdot(file, ctx))
goto out;
continue;
}
flen = udf_get_filename(dir->i_sb, nameptr, fname, lfi);
if (!flen)
continue;
tloc = lelb_to_cpu(cfi.icb.extLocation);
iblock = udf_get_lb_pblock(dir->i_sb, &tloc, 0);
if (!dir_emit(ctx, fname, flen, iblock, DT_UNKNOWN))
goto out;
} /* end while */
ctx->pos = (nf_pos >> 2) + 1;
out:
if (fibh.sbh != fibh.ebh)
brelse(fibh.ebh);
brelse(fibh.sbh);
brelse(epos.bh);
kfree(fname);
return ret;
}
/* readdir and lookup functions */
const struct file_operations udf_dir_operations = {
.llseek = generic_file_llseek,
.read = generic_read_dir,
.iterate = udf_readdir,
.unlocked_ioctl = udf_ioctl,
.fsync = generic_file_fsync,
};
| ./CrossVul/dataset_final_sorted/CWE-17/c/bad_2415_0 |
crossvul-cpp_data_good_2415_4 | /*
* unicode.c
*
* PURPOSE
* Routines for converting between UTF-8 and OSTA Compressed Unicode.
* Also handles filename mangling
*
* DESCRIPTION
* OSTA Compressed Unicode is explained in the OSTA UDF specification.
* http://www.osta.org/
* UTF-8 is explained in the IETF RFC XXXX.
* ftp://ftp.internic.net/rfc/rfcxxxx.txt
*
* COPYRIGHT
* This file is distributed under the terms of the GNU General Public
* License (GPL). Copies of the GPL can be obtained from:
* ftp://prep.ai.mit.edu/pub/gnu/GPL
* Each contributing author retains all rights to their own work.
*/
#include "udfdecl.h"
#include <linux/kernel.h>
#include <linux/string.h> /* for memset */
#include <linux/nls.h>
#include <linux/crc-itu-t.h>
#include <linux/slab.h>
#include "udf_sb.h"
static int udf_translate_to_linux(uint8_t *, int, uint8_t *, int, uint8_t *,
int);
static int udf_char_to_ustr(struct ustr *dest, const uint8_t *src, int strlen)
{
if ((!dest) || (!src) || (!strlen) || (strlen > UDF_NAME_LEN - 2))
return 0;
memset(dest, 0, sizeof(struct ustr));
memcpy(dest->u_name, src, strlen);
dest->u_cmpID = 0x08;
dest->u_len = strlen;
return strlen;
}
/*
* udf_build_ustr
*/
int udf_build_ustr(struct ustr *dest, dstring *ptr, int size)
{
int usesize;
if (!dest || !ptr || !size)
return -1;
BUG_ON(size < 2);
usesize = min_t(size_t, ptr[size - 1], sizeof(dest->u_name));
usesize = min(usesize, size - 2);
dest->u_cmpID = ptr[0];
dest->u_len = usesize;
memcpy(dest->u_name, ptr + 1, usesize);
memset(dest->u_name + usesize, 0, sizeof(dest->u_name) - usesize);
return 0;
}
/*
* udf_build_ustr_exact
*/
static int udf_build_ustr_exact(struct ustr *dest, dstring *ptr, int exactsize)
{
if ((!dest) || (!ptr) || (!exactsize))
return -1;
memset(dest, 0, sizeof(struct ustr));
dest->u_cmpID = ptr[0];
dest->u_len = exactsize - 1;
memcpy(dest->u_name, ptr + 1, exactsize - 1);
return 0;
}
/*
* udf_ocu_to_utf8
*
* PURPOSE
* Convert OSTA Compressed Unicode to the UTF-8 equivalent.
*
* PRE-CONDITIONS
* utf Pointer to UTF-8 output buffer.
* ocu Pointer to OSTA Compressed Unicode input buffer
* of size UDF_NAME_LEN bytes.
* both of type "struct ustr *"
*
* POST-CONDITIONS
* <return> Zero on success.
*
* HISTORY
* November 12, 1997 - Andrew E. Mileski
* Written, tested, and released.
*/
int udf_CS0toUTF8(struct ustr *utf_o, const struct ustr *ocu_i)
{
const uint8_t *ocu;
uint8_t cmp_id, ocu_len;
int i;
ocu_len = ocu_i->u_len;
if (ocu_len == 0) {
memset(utf_o, 0, sizeof(struct ustr));
return 0;
}
cmp_id = ocu_i->u_cmpID;
if (cmp_id != 8 && cmp_id != 16) {
memset(utf_o, 0, sizeof(struct ustr));
pr_err("unknown compression code (%d) stri=%s\n",
cmp_id, ocu_i->u_name);
return 0;
}
ocu = ocu_i->u_name;
utf_o->u_len = 0;
for (i = 0; (i < ocu_len) && (utf_o->u_len <= (UDF_NAME_LEN - 3));) {
/* Expand OSTA compressed Unicode to Unicode */
uint32_t c = ocu[i++];
if (cmp_id == 16)
c = (c << 8) | ocu[i++];
/* Compress Unicode to UTF-8 */
if (c < 0x80U)
utf_o->u_name[utf_o->u_len++] = (uint8_t)c;
else if (c < 0x800U) {
utf_o->u_name[utf_o->u_len++] =
(uint8_t)(0xc0 | (c >> 6));
utf_o->u_name[utf_o->u_len++] =
(uint8_t)(0x80 | (c & 0x3f));
} else {
utf_o->u_name[utf_o->u_len++] =
(uint8_t)(0xe0 | (c >> 12));
utf_o->u_name[utf_o->u_len++] =
(uint8_t)(0x80 |
((c >> 6) & 0x3f));
utf_o->u_name[utf_o->u_len++] =
(uint8_t)(0x80 | (c & 0x3f));
}
}
utf_o->u_cmpID = 8;
return utf_o->u_len;
}
/*
*
* udf_utf8_to_ocu
*
* PURPOSE
* Convert UTF-8 to the OSTA Compressed Unicode equivalent.
*
* DESCRIPTION
* This routine is only called by udf_lookup().
*
* PRE-CONDITIONS
* ocu Pointer to OSTA Compressed Unicode output
* buffer of size UDF_NAME_LEN bytes.
* utf Pointer to UTF-8 input buffer.
* utf_len Length of UTF-8 input buffer in bytes.
*
* POST-CONDITIONS
* <return> Zero on success.
*
* HISTORY
* November 12, 1997 - Andrew E. Mileski
* Written, tested, and released.
*/
static int udf_UTF8toCS0(dstring *ocu, struct ustr *utf, int length)
{
unsigned c, i, max_val, utf_char;
int utf_cnt, u_len;
memset(ocu, 0, sizeof(dstring) * length);
ocu[0] = 8;
max_val = 0xffU;
try_again:
u_len = 0U;
utf_char = 0U;
utf_cnt = 0U;
for (i = 0U; i < utf->u_len; i++) {
c = (uint8_t)utf->u_name[i];
/* Complete a multi-byte UTF-8 character */
if (utf_cnt) {
utf_char = (utf_char << 6) | (c & 0x3fU);
if (--utf_cnt)
continue;
} else {
/* Check for a multi-byte UTF-8 character */
if (c & 0x80U) {
/* Start a multi-byte UTF-8 character */
if ((c & 0xe0U) == 0xc0U) {
utf_char = c & 0x1fU;
utf_cnt = 1;
} else if ((c & 0xf0U) == 0xe0U) {
utf_char = c & 0x0fU;
utf_cnt = 2;
} else if ((c & 0xf8U) == 0xf0U) {
utf_char = c & 0x07U;
utf_cnt = 3;
} else if ((c & 0xfcU) == 0xf8U) {
utf_char = c & 0x03U;
utf_cnt = 4;
} else if ((c & 0xfeU) == 0xfcU) {
utf_char = c & 0x01U;
utf_cnt = 5;
} else {
goto error_out;
}
continue;
} else {
/* Single byte UTF-8 character (most common) */
utf_char = c;
}
}
/* Choose no compression if necessary */
if (utf_char > max_val) {
if (max_val == 0xffU) {
max_val = 0xffffU;
ocu[0] = (uint8_t)0x10U;
goto try_again;
}
goto error_out;
}
if (max_val == 0xffffU)
ocu[++u_len] = (uint8_t)(utf_char >> 8);
ocu[++u_len] = (uint8_t)(utf_char & 0xffU);
}
if (utf_cnt) {
error_out:
ocu[++u_len] = '?';
printk(KERN_DEBUG pr_fmt("bad UTF-8 character\n"));
}
ocu[length - 1] = (uint8_t)u_len + 1;
return u_len + 1;
}
static int udf_CS0toNLS(struct nls_table *nls, struct ustr *utf_o,
const struct ustr *ocu_i)
{
const uint8_t *ocu;
uint8_t cmp_id, ocu_len;
int i, len;
ocu_len = ocu_i->u_len;
if (ocu_len == 0) {
memset(utf_o, 0, sizeof(struct ustr));
return 0;
}
cmp_id = ocu_i->u_cmpID;
if (cmp_id != 8 && cmp_id != 16) {
memset(utf_o, 0, sizeof(struct ustr));
pr_err("unknown compression code (%d) stri=%s\n",
cmp_id, ocu_i->u_name);
return 0;
}
ocu = ocu_i->u_name;
utf_o->u_len = 0;
for (i = 0; (i < ocu_len) && (utf_o->u_len <= (UDF_NAME_LEN - 3));) {
/* Expand OSTA compressed Unicode to Unicode */
uint32_t c = ocu[i++];
if (cmp_id == 16)
c = (c << 8) | ocu[i++];
len = nls->uni2char(c, &utf_o->u_name[utf_o->u_len],
UDF_NAME_LEN - utf_o->u_len);
/* Valid character? */
if (len >= 0)
utf_o->u_len += len;
else
utf_o->u_name[utf_o->u_len++] = '?';
}
utf_o->u_cmpID = 8;
return utf_o->u_len;
}
static int udf_NLStoCS0(struct nls_table *nls, dstring *ocu, struct ustr *uni,
int length)
{
int len;
unsigned i, max_val;
uint16_t uni_char;
int u_len;
memset(ocu, 0, sizeof(dstring) * length);
ocu[0] = 8;
max_val = 0xffU;
try_again:
u_len = 0U;
for (i = 0U; i < uni->u_len; i++) {
len = nls->char2uni(&uni->u_name[i], uni->u_len - i, &uni_char);
if (!len)
continue;
/* Invalid character, deal with it */
if (len < 0) {
len = 1;
uni_char = '?';
}
if (uni_char > max_val) {
max_val = 0xffffU;
ocu[0] = (uint8_t)0x10U;
goto try_again;
}
if (max_val == 0xffffU)
ocu[++u_len] = (uint8_t)(uni_char >> 8);
ocu[++u_len] = (uint8_t)(uni_char & 0xffU);
i += len - 1;
}
ocu[length - 1] = (uint8_t)u_len + 1;
return u_len + 1;
}
int udf_get_filename(struct super_block *sb, uint8_t *sname, int slen,
uint8_t *dname, int dlen)
{
struct ustr *filename, *unifilename;
int len = 0;
filename = kmalloc(sizeof(struct ustr), GFP_NOFS);
if (!filename)
return 0;
unifilename = kmalloc(sizeof(struct ustr), GFP_NOFS);
if (!unifilename)
goto out1;
if (udf_build_ustr_exact(unifilename, sname, slen))
goto out2;
if (UDF_QUERY_FLAG(sb, UDF_FLAG_UTF8)) {
if (!udf_CS0toUTF8(filename, unifilename)) {
udf_debug("Failed in udf_get_filename: sname = %s\n",
sname);
goto out2;
}
} else if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP)) {
if (!udf_CS0toNLS(UDF_SB(sb)->s_nls_map, filename,
unifilename)) {
udf_debug("Failed in udf_get_filename: sname = %s\n",
sname);
goto out2;
}
} else
goto out2;
len = udf_translate_to_linux(dname, dlen,
filename->u_name, filename->u_len,
unifilename->u_name, unifilename->u_len);
out2:
kfree(unifilename);
out1:
kfree(filename);
return len;
}
int udf_put_filename(struct super_block *sb, const uint8_t *sname,
uint8_t *dname, int flen)
{
struct ustr unifilename;
int namelen;
if (!udf_char_to_ustr(&unifilename, sname, flen))
return 0;
if (UDF_QUERY_FLAG(sb, UDF_FLAG_UTF8)) {
namelen = udf_UTF8toCS0(dname, &unifilename, UDF_NAME_LEN);
if (!namelen)
return 0;
} else if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP)) {
namelen = udf_NLStoCS0(UDF_SB(sb)->s_nls_map, dname,
&unifilename, UDF_NAME_LEN);
if (!namelen)
return 0;
} else
return 0;
return namelen;
}
#define ILLEGAL_CHAR_MARK '_'
#define EXT_MARK '.'
#define CRC_MARK '#'
#define EXT_SIZE 5
/* Number of chars we need to store generated CRC to make filename unique */
#define CRC_LEN 5
static int udf_translate_to_linux(uint8_t *newName, int newLen,
uint8_t *udfName, int udfLen,
uint8_t *fidName, int fidNameLen)
{
int index, newIndex = 0, needsCRC = 0;
int extIndex = 0, newExtIndex = 0, hasExt = 0;
unsigned short valueCRC;
uint8_t curr;
if (udfName[0] == '.' &&
(udfLen == 1 || (udfLen == 2 && udfName[1] == '.'))) {
needsCRC = 1;
newIndex = udfLen;
memcpy(newName, udfName, udfLen);
} else {
for (index = 0; index < udfLen; index++) {
curr = udfName[index];
if (curr == '/' || curr == 0) {
needsCRC = 1;
curr = ILLEGAL_CHAR_MARK;
while (index + 1 < udfLen &&
(udfName[index + 1] == '/' ||
udfName[index + 1] == 0))
index++;
}
if (curr == EXT_MARK &&
(udfLen - index - 1) <= EXT_SIZE) {
if (udfLen == index + 1)
hasExt = 0;
else {
hasExt = 1;
extIndex = index;
newExtIndex = newIndex;
}
}
if (newIndex < newLen)
newName[newIndex++] = curr;
else
needsCRC = 1;
}
}
if (needsCRC) {
uint8_t ext[EXT_SIZE];
int localExtIndex = 0;
if (hasExt) {
int maxFilenameLen;
for (index = 0;
index < EXT_SIZE && extIndex + index + 1 < udfLen;
index++) {
curr = udfName[extIndex + index + 1];
if (curr == '/' || curr == 0) {
needsCRC = 1;
curr = ILLEGAL_CHAR_MARK;
while (extIndex + index + 2 < udfLen &&
(index + 1 < EXT_SIZE &&
(udfName[extIndex + index + 2] == '/' ||
udfName[extIndex + index + 2] == 0)))
index++;
}
ext[localExtIndex++] = curr;
}
maxFilenameLen = newLen - CRC_LEN - localExtIndex;
if (newIndex > maxFilenameLen)
newIndex = maxFilenameLen;
else
newIndex = newExtIndex;
} else if (newIndex > newLen - CRC_LEN)
newIndex = newLen - CRC_LEN;
newName[newIndex++] = CRC_MARK;
valueCRC = crc_itu_t(0, fidName, fidNameLen);
newName[newIndex++] = hex_asc_upper_hi(valueCRC >> 8);
newName[newIndex++] = hex_asc_upper_lo(valueCRC >> 8);
newName[newIndex++] = hex_asc_upper_hi(valueCRC);
newName[newIndex++] = hex_asc_upper_lo(valueCRC);
if (hasExt) {
newName[newIndex++] = EXT_MARK;
for (index = 0; index < localExtIndex; index++)
newName[newIndex++] = ext[index];
}
}
return newIndex;
}
| ./CrossVul/dataset_final_sorted/CWE-17/c/good_2415_4 |
crossvul-cpp_data_bad_1482_2 | /*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* ROUTE - implementation of the IP router.
*
* Authors: Ross Biro
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
* Alan Cox, <gw4pts@gw4pts.ampr.org>
* Linus Torvalds, <Linus.Torvalds@helsinki.fi>
* Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
*
* Fixes:
* Alan Cox : Verify area fixes.
* Alan Cox : cli() protects routing changes
* Rui Oliveira : ICMP routing table updates
* (rco@di.uminho.pt) Routing table insertion and update
* Linus Torvalds : Rewrote bits to be sensible
* Alan Cox : Added BSD route gw semantics
* Alan Cox : Super /proc >4K
* Alan Cox : MTU in route table
* Alan Cox : MSS actually. Also added the window
* clamper.
* Sam Lantinga : Fixed route matching in rt_del()
* Alan Cox : Routing cache support.
* Alan Cox : Removed compatibility cruft.
* Alan Cox : RTF_REJECT support.
* Alan Cox : TCP irtt support.
* Jonathan Naylor : Added Metric support.
* Miquel van Smoorenburg : BSD API fixes.
* Miquel van Smoorenburg : Metrics.
* Alan Cox : Use __u32 properly
* Alan Cox : Aligned routing errors more closely with BSD
* our system is still very different.
* Alan Cox : Faster /proc handling
* Alexey Kuznetsov : Massive rework to support tree based routing,
* routing caches and better behaviour.
*
* Olaf Erb : irtt wasn't being copied right.
* Bjorn Ekwall : Kerneld route support.
* Alan Cox : Multicast fixed (I hope)
* Pavel Krauz : Limited broadcast fixed
* Mike McLagan : Routing by source
* Alexey Kuznetsov : End of old history. Split to fib.c and
* route.c and rewritten from scratch.
* Andi Kleen : Load-limit warning messages.
* Vitaly E. Lavrov : Transparent proxy revived after year coma.
* Vitaly E. Lavrov : Race condition in ip_route_input_slow.
* Tobias Ringstrom : Uninitialized res.type in ip_route_output_slow.
* Vladimir V. Ivanov : IP rule info (flowid) is really useful.
* Marc Boucher : routing by fwmark
* Robert Olsson : Added rt_cache statistics
* Arnaldo C. Melo : Convert proc stuff to seq_file
* Eric Dumazet : hashed spinlocks and rt_check_expire() fixes.
* Ilia Sotnikov : Ignore TOS on PMTUD and Redirect
* Ilia Sotnikov : Removed TOS from hash calculations
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#define pr_fmt(fmt) "IPv4: " fmt
#include <linux/module.h>
#include <asm/uaccess.h>
#include <linux/bitops.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/socket.h>
#include <linux/sockios.h>
#include <linux/errno.h>
#include <linux/in.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/proc_fs.h>
#include <linux/init.h>
#include <linux/skbuff.h>
#include <linux/inetdevice.h>
#include <linux/igmp.h>
#include <linux/pkt_sched.h>
#include <linux/mroute.h>
#include <linux/netfilter_ipv4.h>
#include <linux/random.h>
#include <linux/rcupdate.h>
#include <linux/times.h>
#include <linux/slab.h>
#include <linux/jhash.h>
#include <net/dst.h>
#include <net/net_namespace.h>
#include <net/protocol.h>
#include <net/ip.h>
#include <net/route.h>
#include <net/inetpeer.h>
#include <net/sock.h>
#include <net/ip_fib.h>
#include <net/arp.h>
#include <net/tcp.h>
#include <net/icmp.h>
#include <net/xfrm.h>
#include <net/netevent.h>
#include <net/rtnetlink.h>
#ifdef CONFIG_SYSCTL
#include <linux/sysctl.h>
#include <linux/kmemleak.h>
#endif
#include <net/secure_seq.h>
#define RT_FL_TOS(oldflp4) \
((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
#define RT_GC_TIMEOUT (300*HZ)
static int ip_rt_max_size;
static int ip_rt_redirect_number __read_mostly = 9;
static int ip_rt_redirect_load __read_mostly = HZ / 50;
static int ip_rt_redirect_silence __read_mostly = ((HZ / 50) << (9 + 1));
static int ip_rt_error_cost __read_mostly = HZ;
static int ip_rt_error_burst __read_mostly = 5 * HZ;
static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
static int ip_rt_min_advmss __read_mostly = 256;
/*
* Interface to generic destination cache.
*/
static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
static unsigned int ipv4_default_advmss(const struct dst_entry *dst);
static unsigned int ipv4_mtu(const struct dst_entry *dst);
static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
static void ipv4_link_failure(struct sk_buff *skb);
static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb, u32 mtu);
static void ip_do_redirect(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb);
static void ipv4_dst_destroy(struct dst_entry *dst);
static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old)
{
WARN_ON(1);
return NULL;
}
static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
struct sk_buff *skb,
const void *daddr);
static struct dst_ops ipv4_dst_ops = {
.family = AF_INET,
.protocol = cpu_to_be16(ETH_P_IP),
.check = ipv4_dst_check,
.default_advmss = ipv4_default_advmss,
.mtu = ipv4_mtu,
.cow_metrics = ipv4_cow_metrics,
.destroy = ipv4_dst_destroy,
.negative_advice = ipv4_negative_advice,
.link_failure = ipv4_link_failure,
.update_pmtu = ip_rt_update_pmtu,
.redirect = ip_do_redirect,
.local_out = __ip_local_out,
.neigh_lookup = ipv4_neigh_lookup,
};
#define ECN_OR_COST(class) TC_PRIO_##class
const __u8 ip_tos2prio[16] = {
TC_PRIO_BESTEFFORT,
ECN_OR_COST(BESTEFFORT),
TC_PRIO_BESTEFFORT,
ECN_OR_COST(BESTEFFORT),
TC_PRIO_BULK,
ECN_OR_COST(BULK),
TC_PRIO_BULK,
ECN_OR_COST(BULK),
TC_PRIO_INTERACTIVE,
ECN_OR_COST(INTERACTIVE),
TC_PRIO_INTERACTIVE,
ECN_OR_COST(INTERACTIVE),
TC_PRIO_INTERACTIVE_BULK,
ECN_OR_COST(INTERACTIVE_BULK),
TC_PRIO_INTERACTIVE_BULK,
ECN_OR_COST(INTERACTIVE_BULK)
};
EXPORT_SYMBOL(ip_tos2prio);
static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
#define RT_CACHE_STAT_INC(field) raw_cpu_inc(rt_cache_stat.field)
#ifdef CONFIG_PROC_FS
static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
{
if (*pos)
return NULL;
return SEQ_START_TOKEN;
}
static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
++*pos;
return NULL;
}
static void rt_cache_seq_stop(struct seq_file *seq, void *v)
{
}
static int rt_cache_seq_show(struct seq_file *seq, void *v)
{
if (v == SEQ_START_TOKEN)
seq_printf(seq, "%-127s\n",
"Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t"
"Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t"
"HHUptod\tSpecDst");
return 0;
}
static const struct seq_operations rt_cache_seq_ops = {
.start = rt_cache_seq_start,
.next = rt_cache_seq_next,
.stop = rt_cache_seq_stop,
.show = rt_cache_seq_show,
};
static int rt_cache_seq_open(struct inode *inode, struct file *file)
{
return seq_open(file, &rt_cache_seq_ops);
}
static const struct file_operations rt_cache_seq_fops = {
.owner = THIS_MODULE,
.open = rt_cache_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos)
{
int cpu;
if (*pos == 0)
return SEQ_START_TOKEN;
for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
if (!cpu_possible(cpu))
continue;
*pos = cpu+1;
return &per_cpu(rt_cache_stat, cpu);
}
return NULL;
}
static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
int cpu;
for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
if (!cpu_possible(cpu))
continue;
*pos = cpu+1;
return &per_cpu(rt_cache_stat, cpu);
}
return NULL;
}
static void rt_cpu_seq_stop(struct seq_file *seq, void *v)
{
}
static int rt_cpu_seq_show(struct seq_file *seq, void *v)
{
struct rt_cache_stat *st = v;
if (v == SEQ_START_TOKEN) {
seq_printf(seq, "entries in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src out_hit out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
return 0;
}
seq_printf(seq,"%08x %08x %08x %08x %08x %08x %08x %08x "
" %08x %08x %08x %08x %08x %08x %08x %08x %08x \n",
dst_entries_get_slow(&ipv4_dst_ops),
0, /* st->in_hit */
st->in_slow_tot,
st->in_slow_mc,
st->in_no_route,
st->in_brd,
st->in_martian_dst,
st->in_martian_src,
0, /* st->out_hit */
st->out_slow_tot,
st->out_slow_mc,
0, /* st->gc_total */
0, /* st->gc_ignored */
0, /* st->gc_goal_miss */
0, /* st->gc_dst_overflow */
0, /* st->in_hlist_search */
0 /* st->out_hlist_search */
);
return 0;
}
static const struct seq_operations rt_cpu_seq_ops = {
.start = rt_cpu_seq_start,
.next = rt_cpu_seq_next,
.stop = rt_cpu_seq_stop,
.show = rt_cpu_seq_show,
};
static int rt_cpu_seq_open(struct inode *inode, struct file *file)
{
return seq_open(file, &rt_cpu_seq_ops);
}
static const struct file_operations rt_cpu_seq_fops = {
.owner = THIS_MODULE,
.open = rt_cpu_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
#ifdef CONFIG_IP_ROUTE_CLASSID
static int rt_acct_proc_show(struct seq_file *m, void *v)
{
struct ip_rt_acct *dst, *src;
unsigned int i, j;
dst = kcalloc(256, sizeof(struct ip_rt_acct), GFP_KERNEL);
if (!dst)
return -ENOMEM;
for_each_possible_cpu(i) {
src = (struct ip_rt_acct *)per_cpu_ptr(ip_rt_acct, i);
for (j = 0; j < 256; j++) {
dst[j].o_bytes += src[j].o_bytes;
dst[j].o_packets += src[j].o_packets;
dst[j].i_bytes += src[j].i_bytes;
dst[j].i_packets += src[j].i_packets;
}
}
seq_write(m, dst, 256 * sizeof(struct ip_rt_acct));
kfree(dst);
return 0;
}
static int rt_acct_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, rt_acct_proc_show, NULL);
}
static const struct file_operations rt_acct_proc_fops = {
.owner = THIS_MODULE,
.open = rt_acct_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
#endif
static int __net_init ip_rt_do_proc_init(struct net *net)
{
struct proc_dir_entry *pde;
pde = proc_create("rt_cache", S_IRUGO, net->proc_net,
&rt_cache_seq_fops);
if (!pde)
goto err1;
pde = proc_create("rt_cache", S_IRUGO,
net->proc_net_stat, &rt_cpu_seq_fops);
if (!pde)
goto err2;
#ifdef CONFIG_IP_ROUTE_CLASSID
pde = proc_create("rt_acct", 0, net->proc_net, &rt_acct_proc_fops);
if (!pde)
goto err3;
#endif
return 0;
#ifdef CONFIG_IP_ROUTE_CLASSID
err3:
remove_proc_entry("rt_cache", net->proc_net_stat);
#endif
err2:
remove_proc_entry("rt_cache", net->proc_net);
err1:
return -ENOMEM;
}
static void __net_exit ip_rt_do_proc_exit(struct net *net)
{
remove_proc_entry("rt_cache", net->proc_net_stat);
remove_proc_entry("rt_cache", net->proc_net);
#ifdef CONFIG_IP_ROUTE_CLASSID
remove_proc_entry("rt_acct", net->proc_net);
#endif
}
static struct pernet_operations ip_rt_proc_ops __net_initdata = {
.init = ip_rt_do_proc_init,
.exit = ip_rt_do_proc_exit,
};
static int __init ip_rt_proc_init(void)
{
return register_pernet_subsys(&ip_rt_proc_ops);
}
#else
static inline int ip_rt_proc_init(void)
{
return 0;
}
#endif /* CONFIG_PROC_FS */
static inline bool rt_is_expired(const struct rtable *rth)
{
return rth->rt_genid != rt_genid_ipv4(dev_net(rth->dst.dev));
}
void rt_cache_flush(struct net *net)
{
rt_genid_bump_ipv4(net);
}
static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
struct sk_buff *skb,
const void *daddr)
{
struct net_device *dev = dst->dev;
const __be32 *pkey = daddr;
const struct rtable *rt;
struct neighbour *n;
rt = (const struct rtable *) dst;
if (rt->rt_gateway)
pkey = (const __be32 *) &rt->rt_gateway;
else if (skb)
pkey = &ip_hdr(skb)->daddr;
n = __ipv4_neigh_lookup(dev, *(__force u32 *)pkey);
if (n)
return n;
return neigh_create(&arp_tbl, pkey, dev);
}
#define IP_IDENTS_SZ 2048u
struct ip_ident_bucket {
atomic_t id;
u32 stamp32;
};
static struct ip_ident_bucket *ip_idents __read_mostly;
/* In order to protect privacy, we add a perturbation to identifiers
* if one generator is seldom used. This makes hard for an attacker
* to infer how many packets were sent between two points in time.
*/
u32 ip_idents_reserve(u32 hash, int segs)
{
struct ip_ident_bucket *bucket = ip_idents + hash % IP_IDENTS_SZ;
u32 old = ACCESS_ONCE(bucket->stamp32);
u32 now = (u32)jiffies;
u32 delta = 0;
if (old != now && cmpxchg(&bucket->stamp32, old, now) == old)
delta = prandom_u32_max(now - old);
return atomic_add_return(segs + delta, &bucket->id) - segs;
}
EXPORT_SYMBOL(ip_idents_reserve);
void __ip_select_ident(struct iphdr *iph, int segs)
{
static u32 ip_idents_hashrnd __read_mostly;
u32 hash, id;
net_get_random_once(&ip_idents_hashrnd, sizeof(ip_idents_hashrnd));
hash = jhash_3words((__force u32)iph->daddr,
(__force u32)iph->saddr,
iph->protocol,
ip_idents_hashrnd);
id = ip_idents_reserve(hash, segs);
iph->id = htons(id);
}
EXPORT_SYMBOL(__ip_select_ident);
static void __build_flow_key(struct flowi4 *fl4, const struct sock *sk,
const struct iphdr *iph,
int oif, u8 tos,
u8 prot, u32 mark, int flow_flags)
{
if (sk) {
const struct inet_sock *inet = inet_sk(sk);
oif = sk->sk_bound_dev_if;
mark = sk->sk_mark;
tos = RT_CONN_FLAGS(sk);
prot = inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol;
}
flowi4_init_output(fl4, oif, mark, tos,
RT_SCOPE_UNIVERSE, prot,
flow_flags,
iph->daddr, iph->saddr, 0, 0);
}
static void build_skb_flow_key(struct flowi4 *fl4, const struct sk_buff *skb,
const struct sock *sk)
{
const struct iphdr *iph = ip_hdr(skb);
int oif = skb->dev->ifindex;
u8 tos = RT_TOS(iph->tos);
u8 prot = iph->protocol;
u32 mark = skb->mark;
__build_flow_key(fl4, sk, iph, oif, tos, prot, mark, 0);
}
static void build_sk_flow_key(struct flowi4 *fl4, const struct sock *sk)
{
const struct inet_sock *inet = inet_sk(sk);
const struct ip_options_rcu *inet_opt;
__be32 daddr = inet->inet_daddr;
rcu_read_lock();
inet_opt = rcu_dereference(inet->inet_opt);
if (inet_opt && inet_opt->opt.srr)
daddr = inet_opt->opt.faddr;
flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
inet_sk_flowi_flags(sk),
daddr, inet->inet_saddr, 0, 0);
rcu_read_unlock();
}
static void ip_rt_build_flow_key(struct flowi4 *fl4, const struct sock *sk,
const struct sk_buff *skb)
{
if (skb)
build_skb_flow_key(fl4, skb, sk);
else
build_sk_flow_key(fl4, sk);
}
static inline void rt_free(struct rtable *rt)
{
call_rcu(&rt->dst.rcu_head, dst_rcu_free);
}
static DEFINE_SPINLOCK(fnhe_lock);
static void fnhe_flush_routes(struct fib_nh_exception *fnhe)
{
struct rtable *rt;
rt = rcu_dereference(fnhe->fnhe_rth_input);
if (rt) {
RCU_INIT_POINTER(fnhe->fnhe_rth_input, NULL);
rt_free(rt);
}
rt = rcu_dereference(fnhe->fnhe_rth_output);
if (rt) {
RCU_INIT_POINTER(fnhe->fnhe_rth_output, NULL);
rt_free(rt);
}
}
static struct fib_nh_exception *fnhe_oldest(struct fnhe_hash_bucket *hash)
{
struct fib_nh_exception *fnhe, *oldest;
oldest = rcu_dereference(hash->chain);
for (fnhe = rcu_dereference(oldest->fnhe_next); fnhe;
fnhe = rcu_dereference(fnhe->fnhe_next)) {
if (time_before(fnhe->fnhe_stamp, oldest->fnhe_stamp))
oldest = fnhe;
}
fnhe_flush_routes(oldest);
return oldest;
}
static inline u32 fnhe_hashfun(__be32 daddr)
{
static u32 fnhe_hashrnd __read_mostly;
u32 hval;
net_get_random_once(&fnhe_hashrnd, sizeof(fnhe_hashrnd));
hval = jhash_1word((__force u32) daddr, fnhe_hashrnd);
return hash_32(hval, FNHE_HASH_SHIFT);
}
static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnhe)
{
rt->rt_pmtu = fnhe->fnhe_pmtu;
rt->dst.expires = fnhe->fnhe_expires;
if (fnhe->fnhe_gw) {
rt->rt_flags |= RTCF_REDIRECTED;
rt->rt_gateway = fnhe->fnhe_gw;
rt->rt_uses_gateway = 1;
}
}
static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
u32 pmtu, unsigned long expires)
{
struct fnhe_hash_bucket *hash;
struct fib_nh_exception *fnhe;
struct rtable *rt;
unsigned int i;
int depth;
u32 hval = fnhe_hashfun(daddr);
spin_lock_bh(&fnhe_lock);
hash = rcu_dereference(nh->nh_exceptions);
if (!hash) {
hash = kzalloc(FNHE_HASH_SIZE * sizeof(*hash), GFP_ATOMIC);
if (!hash)
goto out_unlock;
rcu_assign_pointer(nh->nh_exceptions, hash);
}
hash += hval;
depth = 0;
for (fnhe = rcu_dereference(hash->chain); fnhe;
fnhe = rcu_dereference(fnhe->fnhe_next)) {
if (fnhe->fnhe_daddr == daddr)
break;
depth++;
}
if (fnhe) {
if (gw)
fnhe->fnhe_gw = gw;
if (pmtu) {
fnhe->fnhe_pmtu = pmtu;
fnhe->fnhe_expires = max(1UL, expires);
}
/* Update all cached dsts too */
rt = rcu_dereference(fnhe->fnhe_rth_input);
if (rt)
fill_route_from_fnhe(rt, fnhe);
rt = rcu_dereference(fnhe->fnhe_rth_output);
if (rt)
fill_route_from_fnhe(rt, fnhe);
} else {
if (depth > FNHE_RECLAIM_DEPTH)
fnhe = fnhe_oldest(hash);
else {
fnhe = kzalloc(sizeof(*fnhe), GFP_ATOMIC);
if (!fnhe)
goto out_unlock;
fnhe->fnhe_next = hash->chain;
rcu_assign_pointer(hash->chain, fnhe);
}
fnhe->fnhe_genid = fnhe_genid(dev_net(nh->nh_dev));
fnhe->fnhe_daddr = daddr;
fnhe->fnhe_gw = gw;
fnhe->fnhe_pmtu = pmtu;
fnhe->fnhe_expires = expires;
/* Exception created; mark the cached routes for the nexthop
* stale, so anyone caching it rechecks if this exception
* applies to them.
*/
rt = rcu_dereference(nh->nh_rth_input);
if (rt)
rt->dst.obsolete = DST_OBSOLETE_KILL;
for_each_possible_cpu(i) {
struct rtable __rcu **prt;
prt = per_cpu_ptr(nh->nh_pcpu_rth_output, i);
rt = rcu_dereference(*prt);
if (rt)
rt->dst.obsolete = DST_OBSOLETE_KILL;
}
}
fnhe->fnhe_stamp = jiffies;
out_unlock:
spin_unlock_bh(&fnhe_lock);
}
static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flowi4 *fl4,
bool kill_route)
{
__be32 new_gw = icmp_hdr(skb)->un.gateway;
__be32 old_gw = ip_hdr(skb)->saddr;
struct net_device *dev = skb->dev;
struct in_device *in_dev;
struct fib_result res;
struct neighbour *n;
struct net *net;
switch (icmp_hdr(skb)->code & 7) {
case ICMP_REDIR_NET:
case ICMP_REDIR_NETTOS:
case ICMP_REDIR_HOST:
case ICMP_REDIR_HOSTTOS:
break;
default:
return;
}
if (rt->rt_gateway != old_gw)
return;
in_dev = __in_dev_get_rcu(dev);
if (!in_dev)
return;
net = dev_net(dev);
if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev) ||
ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw) ||
ipv4_is_zeronet(new_gw))
goto reject_redirect;
if (!IN_DEV_SHARED_MEDIA(in_dev)) {
if (!inet_addr_onlink(in_dev, new_gw, old_gw))
goto reject_redirect;
if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev))
goto reject_redirect;
} else {
if (inet_addr_type(net, new_gw) != RTN_UNICAST)
goto reject_redirect;
}
n = ipv4_neigh_lookup(&rt->dst, NULL, &new_gw);
if (!IS_ERR(n)) {
if (!(n->nud_state & NUD_VALID)) {
neigh_event_send(n, NULL);
} else {
if (fib_lookup(net, fl4, &res) == 0) {
struct fib_nh *nh = &FIB_RES_NH(res);
update_or_create_fnhe(nh, fl4->daddr, new_gw,
0, 0);
}
if (kill_route)
rt->dst.obsolete = DST_OBSOLETE_KILL;
call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n);
}
neigh_release(n);
}
return;
reject_redirect:
#ifdef CONFIG_IP_ROUTE_VERBOSE
if (IN_DEV_LOG_MARTIANS(in_dev)) {
const struct iphdr *iph = (const struct iphdr *) skb->data;
__be32 daddr = iph->daddr;
__be32 saddr = iph->saddr;
net_info_ratelimited("Redirect from %pI4 on %s about %pI4 ignored\n"
" Advised path = %pI4 -> %pI4\n",
&old_gw, dev->name, &new_gw,
&saddr, &daddr);
}
#endif
;
}
static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
{
struct rtable *rt;
struct flowi4 fl4;
const struct iphdr *iph = (const struct iphdr *) skb->data;
int oif = skb->dev->ifindex;
u8 tos = RT_TOS(iph->tos);
u8 prot = iph->protocol;
u32 mark = skb->mark;
rt = (struct rtable *) dst;
__build_flow_key(&fl4, sk, iph, oif, tos, prot, mark, 0);
__ip_do_redirect(rt, skb, &fl4, true);
}
static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
{
struct rtable *rt = (struct rtable *)dst;
struct dst_entry *ret = dst;
if (rt) {
if (dst->obsolete > 0) {
ip_rt_put(rt);
ret = NULL;
} else if ((rt->rt_flags & RTCF_REDIRECTED) ||
rt->dst.expires) {
ip_rt_put(rt);
ret = NULL;
}
}
return ret;
}
/*
* Algorithm:
* 1. The first ip_rt_redirect_number redirects are sent
* with exponential backoff, then we stop sending them at all,
* assuming that the host ignores our redirects.
* 2. If we did not see packets requiring redirects
* during ip_rt_redirect_silence, we assume that the host
* forgot redirected route and start to send redirects again.
*
* This algorithm is much cheaper and more intelligent than dumb load limiting
* in icmp.c.
*
* NOTE. Do not forget to inhibit load limiting for redirects (redundant)
* and "frag. need" (breaks PMTU discovery) in icmp.c.
*/
void ip_rt_send_redirect(struct sk_buff *skb)
{
struct rtable *rt = skb_rtable(skb);
struct in_device *in_dev;
struct inet_peer *peer;
struct net *net;
int log_martians;
rcu_read_lock();
in_dev = __in_dev_get_rcu(rt->dst.dev);
if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev)) {
rcu_read_unlock();
return;
}
log_martians = IN_DEV_LOG_MARTIANS(in_dev);
rcu_read_unlock();
net = dev_net(rt->dst.dev);
peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, 1);
if (!peer) {
icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST,
rt_nexthop(rt, ip_hdr(skb)->daddr));
return;
}
/* No redirected packets during ip_rt_redirect_silence;
* reset the algorithm.
*/
if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence))
peer->rate_tokens = 0;
/* Too many ignored redirects; do not send anything
* set dst.rate_last to the last seen redirected packet.
*/
if (peer->rate_tokens >= ip_rt_redirect_number) {
peer->rate_last = jiffies;
goto out_put_peer;
}
/* Check for load limit; set rate_last to the latest sent
* redirect.
*/
if (peer->rate_tokens == 0 ||
time_after(jiffies,
(peer->rate_last +
(ip_rt_redirect_load << peer->rate_tokens)))) {
__be32 gw = rt_nexthop(rt, ip_hdr(skb)->daddr);
icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw);
peer->rate_last = jiffies;
++peer->rate_tokens;
#ifdef CONFIG_IP_ROUTE_VERBOSE
if (log_martians &&
peer->rate_tokens == ip_rt_redirect_number)
net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n",
&ip_hdr(skb)->saddr, inet_iif(skb),
&ip_hdr(skb)->daddr, &gw);
#endif
}
out_put_peer:
inet_putpeer(peer);
}
static int ip_error(struct sk_buff *skb)
{
struct in_device *in_dev = __in_dev_get_rcu(skb->dev);
struct rtable *rt = skb_rtable(skb);
struct inet_peer *peer;
unsigned long now;
struct net *net;
bool send;
int code;
net = dev_net(rt->dst.dev);
if (!IN_DEV_FORWARD(in_dev)) {
switch (rt->dst.error) {
case EHOSTUNREACH:
IP_INC_STATS_BH(net, IPSTATS_MIB_INADDRERRORS);
break;
case ENETUNREACH:
IP_INC_STATS_BH(net, IPSTATS_MIB_INNOROUTES);
break;
}
goto out;
}
switch (rt->dst.error) {
case EINVAL:
default:
goto out;
case EHOSTUNREACH:
code = ICMP_HOST_UNREACH;
break;
case ENETUNREACH:
code = ICMP_NET_UNREACH;
IP_INC_STATS_BH(net, IPSTATS_MIB_INNOROUTES);
break;
case EACCES:
code = ICMP_PKT_FILTERED;
break;
}
peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, 1);
send = true;
if (peer) {
now = jiffies;
peer->rate_tokens += now - peer->rate_last;
if (peer->rate_tokens > ip_rt_error_burst)
peer->rate_tokens = ip_rt_error_burst;
peer->rate_last = now;
if (peer->rate_tokens >= ip_rt_error_cost)
peer->rate_tokens -= ip_rt_error_cost;
else
send = false;
inet_putpeer(peer);
}
if (send)
icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
out: kfree_skb(skb);
return 0;
}
static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
{
struct dst_entry *dst = &rt->dst;
struct fib_result res;
if (dst_metric_locked(dst, RTAX_MTU))
return;
if (dst->dev->mtu < mtu)
return;
if (mtu < ip_rt_min_pmtu)
mtu = ip_rt_min_pmtu;
if (rt->rt_pmtu == mtu &&
time_before(jiffies, dst->expires - ip_rt_mtu_expires / 2))
return;
rcu_read_lock();
if (fib_lookup(dev_net(dst->dev), fl4, &res) == 0) {
struct fib_nh *nh = &FIB_RES_NH(res);
update_or_create_fnhe(nh, fl4->daddr, 0, mtu,
jiffies + ip_rt_mtu_expires);
}
rcu_read_unlock();
}
static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb, u32 mtu)
{
struct rtable *rt = (struct rtable *) dst;
struct flowi4 fl4;
ip_rt_build_flow_key(&fl4, sk, skb);
__ip_rt_update_pmtu(rt, &fl4, mtu);
}
void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu,
int oif, u32 mark, u8 protocol, int flow_flags)
{
const struct iphdr *iph = (const struct iphdr *) skb->data;
struct flowi4 fl4;
struct rtable *rt;
if (!mark)
mark = IP4_REPLY_MARK(net, skb->mark);
__build_flow_key(&fl4, NULL, iph, oif,
RT_TOS(iph->tos), protocol, mark, flow_flags);
rt = __ip_route_output_key(net, &fl4);
if (!IS_ERR(rt)) {
__ip_rt_update_pmtu(rt, &fl4, mtu);
ip_rt_put(rt);
}
}
EXPORT_SYMBOL_GPL(ipv4_update_pmtu);
static void __ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
{
const struct iphdr *iph = (const struct iphdr *) skb->data;
struct flowi4 fl4;
struct rtable *rt;
__build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
if (!fl4.flowi4_mark)
fl4.flowi4_mark = IP4_REPLY_MARK(sock_net(sk), skb->mark);
rt = __ip_route_output_key(sock_net(sk), &fl4);
if (!IS_ERR(rt)) {
__ip_rt_update_pmtu(rt, &fl4, mtu);
ip_rt_put(rt);
}
}
void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
{
const struct iphdr *iph = (const struct iphdr *) skb->data;
struct flowi4 fl4;
struct rtable *rt;
struct dst_entry *odst = NULL;
bool new = false;
bh_lock_sock(sk);
if (!ip_sk_accept_pmtu(sk))
goto out;
odst = sk_dst_get(sk);
if (sock_owned_by_user(sk) || !odst) {
__ipv4_sk_update_pmtu(skb, sk, mtu);
goto out;
}
__build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
rt = (struct rtable *)odst;
if (odst->obsolete && odst->ops->check(odst, 0) == NULL) {
rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
if (IS_ERR(rt))
goto out;
new = true;
}
__ip_rt_update_pmtu((struct rtable *) rt->dst.path, &fl4, mtu);
if (!dst_check(&rt->dst, 0)) {
if (new)
dst_release(&rt->dst);
rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
if (IS_ERR(rt))
goto out;
new = true;
}
if (new)
sk_dst_set(sk, &rt->dst);
out:
bh_unlock_sock(sk);
dst_release(odst);
}
EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu);
void ipv4_redirect(struct sk_buff *skb, struct net *net,
int oif, u32 mark, u8 protocol, int flow_flags)
{
const struct iphdr *iph = (const struct iphdr *) skb->data;
struct flowi4 fl4;
struct rtable *rt;
__build_flow_key(&fl4, NULL, iph, oif,
RT_TOS(iph->tos), protocol, mark, flow_flags);
rt = __ip_route_output_key(net, &fl4);
if (!IS_ERR(rt)) {
__ip_do_redirect(rt, skb, &fl4, false);
ip_rt_put(rt);
}
}
EXPORT_SYMBOL_GPL(ipv4_redirect);
void ipv4_sk_redirect(struct sk_buff *skb, struct sock *sk)
{
const struct iphdr *iph = (const struct iphdr *) skb->data;
struct flowi4 fl4;
struct rtable *rt;
__build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
rt = __ip_route_output_key(sock_net(sk), &fl4);
if (!IS_ERR(rt)) {
__ip_do_redirect(rt, skb, &fl4, false);
ip_rt_put(rt);
}
}
EXPORT_SYMBOL_GPL(ipv4_sk_redirect);
static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
{
struct rtable *rt = (struct rtable *) dst;
/* All IPV4 dsts are created with ->obsolete set to the value
* DST_OBSOLETE_FORCE_CHK which forces validation calls down
* into this function always.
*
* When a PMTU/redirect information update invalidates a route,
* this is indicated by setting obsolete to DST_OBSOLETE_KILL or
* DST_OBSOLETE_DEAD by dst_free().
*/
if (dst->obsolete != DST_OBSOLETE_FORCE_CHK || rt_is_expired(rt))
return NULL;
return dst;
}
static void ipv4_link_failure(struct sk_buff *skb)
{
struct rtable *rt;
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
rt = skb_rtable(skb);
if (rt)
dst_set_expires(&rt->dst, 0);
}
static int ip_rt_bug(struct sock *sk, struct sk_buff *skb)
{
pr_debug("%s: %pI4 -> %pI4, %s\n",
__func__, &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
skb->dev ? skb->dev->name : "?");
kfree_skb(skb);
WARN_ON(1);
return 0;
}
/*
We do not cache source address of outgoing interface,
because it is used only by IP RR, TS and SRR options,
so that it out of fast path.
BTW remember: "addr" is allowed to be not aligned
in IP options!
*/
void ip_rt_get_source(u8 *addr, struct sk_buff *skb, struct rtable *rt)
{
__be32 src;
if (rt_is_output_route(rt))
src = ip_hdr(skb)->saddr;
else {
struct fib_result res;
struct flowi4 fl4;
struct iphdr *iph;
iph = ip_hdr(skb);
memset(&fl4, 0, sizeof(fl4));
fl4.daddr = iph->daddr;
fl4.saddr = iph->saddr;
fl4.flowi4_tos = RT_TOS(iph->tos);
fl4.flowi4_oif = rt->dst.dev->ifindex;
fl4.flowi4_iif = skb->dev->ifindex;
fl4.flowi4_mark = skb->mark;
rcu_read_lock();
if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res) == 0)
src = FIB_RES_PREFSRC(dev_net(rt->dst.dev), res);
else
src = inet_select_addr(rt->dst.dev,
rt_nexthop(rt, iph->daddr),
RT_SCOPE_UNIVERSE);
rcu_read_unlock();
}
memcpy(addr, &src, 4);
}
#ifdef CONFIG_IP_ROUTE_CLASSID
static void set_class_tag(struct rtable *rt, u32 tag)
{
if (!(rt->dst.tclassid & 0xFFFF))
rt->dst.tclassid |= tag & 0xFFFF;
if (!(rt->dst.tclassid & 0xFFFF0000))
rt->dst.tclassid |= tag & 0xFFFF0000;
}
#endif
static unsigned int ipv4_default_advmss(const struct dst_entry *dst)
{
unsigned int advmss = dst_metric_raw(dst, RTAX_ADVMSS);
if (advmss == 0) {
advmss = max_t(unsigned int, dst->dev->mtu - 40,
ip_rt_min_advmss);
if (advmss > 65535 - 40)
advmss = 65535 - 40;
}
return advmss;
}
static unsigned int ipv4_mtu(const struct dst_entry *dst)
{
const struct rtable *rt = (const struct rtable *) dst;
unsigned int mtu = rt->rt_pmtu;
if (!mtu || time_after_eq(jiffies, rt->dst.expires))
mtu = dst_metric_raw(dst, RTAX_MTU);
if (mtu)
return mtu;
mtu = dst->dev->mtu;
if (unlikely(dst_metric_locked(dst, RTAX_MTU))) {
if (rt->rt_uses_gateway && mtu > 576)
mtu = 576;
}
return min_t(unsigned int, mtu, IP_MAX_MTU);
}
static struct fib_nh_exception *find_exception(struct fib_nh *nh, __be32 daddr)
{
struct fnhe_hash_bucket *hash = rcu_dereference(nh->nh_exceptions);
struct fib_nh_exception *fnhe;
u32 hval;
if (!hash)
return NULL;
hval = fnhe_hashfun(daddr);
for (fnhe = rcu_dereference(hash[hval].chain); fnhe;
fnhe = rcu_dereference(fnhe->fnhe_next)) {
if (fnhe->fnhe_daddr == daddr)
return fnhe;
}
return NULL;
}
static bool rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe,
__be32 daddr)
{
bool ret = false;
spin_lock_bh(&fnhe_lock);
if (daddr == fnhe->fnhe_daddr) {
struct rtable __rcu **porig;
struct rtable *orig;
int genid = fnhe_genid(dev_net(rt->dst.dev));
if (rt_is_input_route(rt))
porig = &fnhe->fnhe_rth_input;
else
porig = &fnhe->fnhe_rth_output;
orig = rcu_dereference(*porig);
if (fnhe->fnhe_genid != genid) {
fnhe->fnhe_genid = genid;
fnhe->fnhe_gw = 0;
fnhe->fnhe_pmtu = 0;
fnhe->fnhe_expires = 0;
fnhe_flush_routes(fnhe);
orig = NULL;
}
fill_route_from_fnhe(rt, fnhe);
if (!rt->rt_gateway)
rt->rt_gateway = daddr;
if (!(rt->dst.flags & DST_NOCACHE)) {
rcu_assign_pointer(*porig, rt);
if (orig)
rt_free(orig);
ret = true;
}
fnhe->fnhe_stamp = jiffies;
}
spin_unlock_bh(&fnhe_lock);
return ret;
}
static bool rt_cache_route(struct fib_nh *nh, struct rtable *rt)
{
struct rtable *orig, *prev, **p;
bool ret = true;
if (rt_is_input_route(rt)) {
p = (struct rtable **)&nh->nh_rth_input;
} else {
p = (struct rtable **)raw_cpu_ptr(nh->nh_pcpu_rth_output);
}
orig = *p;
prev = cmpxchg(p, orig, rt);
if (prev == orig) {
if (orig)
rt_free(orig);
} else
ret = false;
return ret;
}
static DEFINE_SPINLOCK(rt_uncached_lock);
static LIST_HEAD(rt_uncached_list);
static void rt_add_uncached_list(struct rtable *rt)
{
spin_lock_bh(&rt_uncached_lock);
list_add_tail(&rt->rt_uncached, &rt_uncached_list);
spin_unlock_bh(&rt_uncached_lock);
}
static void ipv4_dst_destroy(struct dst_entry *dst)
{
struct rtable *rt = (struct rtable *) dst;
if (!list_empty(&rt->rt_uncached)) {
spin_lock_bh(&rt_uncached_lock);
list_del(&rt->rt_uncached);
spin_unlock_bh(&rt_uncached_lock);
}
}
void rt_flush_dev(struct net_device *dev)
{
if (!list_empty(&rt_uncached_list)) {
struct net *net = dev_net(dev);
struct rtable *rt;
spin_lock_bh(&rt_uncached_lock);
list_for_each_entry(rt, &rt_uncached_list, rt_uncached) {
if (rt->dst.dev != dev)
continue;
rt->dst.dev = net->loopback_dev;
dev_hold(rt->dst.dev);
dev_put(dev);
}
spin_unlock_bh(&rt_uncached_lock);
}
}
static bool rt_cache_valid(const struct rtable *rt)
{
return rt &&
rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
!rt_is_expired(rt);
}
static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
const struct fib_result *res,
struct fib_nh_exception *fnhe,
struct fib_info *fi, u16 type, u32 itag)
{
bool cached = false;
if (fi) {
struct fib_nh *nh = &FIB_RES_NH(*res);
if (nh->nh_gw && nh->nh_scope == RT_SCOPE_LINK) {
rt->rt_gateway = nh->nh_gw;
rt->rt_uses_gateway = 1;
}
dst_init_metrics(&rt->dst, fi->fib_metrics, true);
#ifdef CONFIG_IP_ROUTE_CLASSID
rt->dst.tclassid = nh->nh_tclassid;
#endif
if (unlikely(fnhe))
cached = rt_bind_exception(rt, fnhe, daddr);
else if (!(rt->dst.flags & DST_NOCACHE))
cached = rt_cache_route(nh, rt);
if (unlikely(!cached)) {
/* Routes we intend to cache in nexthop exception or
* FIB nexthop have the DST_NOCACHE bit clear.
* However, if we are unsuccessful at storing this
* route into the cache we really need to set it.
*/
rt->dst.flags |= DST_NOCACHE;
if (!rt->rt_gateway)
rt->rt_gateway = daddr;
rt_add_uncached_list(rt);
}
} else
rt_add_uncached_list(rt);
#ifdef CONFIG_IP_ROUTE_CLASSID
#ifdef CONFIG_IP_MULTIPLE_TABLES
set_class_tag(rt, res->tclassid);
#endif
set_class_tag(rt, itag);
#endif
}
static struct rtable *rt_dst_alloc(struct net_device *dev,
bool nopolicy, bool noxfrm, bool will_cache)
{
return dst_alloc(&ipv4_dst_ops, dev, 1, DST_OBSOLETE_FORCE_CHK,
(will_cache ? 0 : (DST_HOST | DST_NOCACHE)) |
(nopolicy ? DST_NOPOLICY : 0) |
(noxfrm ? DST_NOXFRM : 0));
}
/* called in rcu_read_lock() section */
static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
u8 tos, struct net_device *dev, int our)
{
struct rtable *rth;
struct in_device *in_dev = __in_dev_get_rcu(dev);
u32 itag = 0;
int err;
/* Primary sanity checks. */
if (in_dev == NULL)
return -EINVAL;
if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
skb->protocol != htons(ETH_P_IP))
goto e_inval;
if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev)))
if (ipv4_is_loopback(saddr))
goto e_inval;
if (ipv4_is_zeronet(saddr)) {
if (!ipv4_is_local_multicast(daddr))
goto e_inval;
} else {
err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
in_dev, &itag);
if (err < 0)
goto e_err;
}
rth = rt_dst_alloc(dev_net(dev)->loopback_dev,
IN_DEV_CONF_GET(in_dev, NOPOLICY), false, false);
if (!rth)
goto e_nobufs;
#ifdef CONFIG_IP_ROUTE_CLASSID
rth->dst.tclassid = itag;
#endif
rth->dst.output = ip_rt_bug;
rth->rt_genid = rt_genid_ipv4(dev_net(dev));
rth->rt_flags = RTCF_MULTICAST;
rth->rt_type = RTN_MULTICAST;
rth->rt_is_input= 1;
rth->rt_iif = 0;
rth->rt_pmtu = 0;
rth->rt_gateway = 0;
rth->rt_uses_gateway = 0;
INIT_LIST_HEAD(&rth->rt_uncached);
if (our) {
rth->dst.input= ip_local_deliver;
rth->rt_flags |= RTCF_LOCAL;
}
#ifdef CONFIG_IP_MROUTE
if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev))
rth->dst.input = ip_mr_input;
#endif
RT_CACHE_STAT_INC(in_slow_mc);
skb_dst_set(skb, &rth->dst);
return 0;
e_nobufs:
return -ENOBUFS;
e_inval:
return -EINVAL;
e_err:
return err;
}
static void ip_handle_martian_source(struct net_device *dev,
struct in_device *in_dev,
struct sk_buff *skb,
__be32 daddr,
__be32 saddr)
{
RT_CACHE_STAT_INC(in_martian_src);
#ifdef CONFIG_IP_ROUTE_VERBOSE
if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) {
/*
* RFC1812 recommendation, if source is martian,
* the only hint is MAC header.
*/
pr_warn("martian source %pI4 from %pI4, on dev %s\n",
&daddr, &saddr, dev->name);
if (dev->hard_header_len && skb_mac_header_was_set(skb)) {
print_hex_dump(KERN_WARNING, "ll header: ",
DUMP_PREFIX_OFFSET, 16, 1,
skb_mac_header(skb),
dev->hard_header_len, true);
}
}
#endif
}
/* called in rcu_read_lock() section */
static int __mkroute_input(struct sk_buff *skb,
const struct fib_result *res,
struct in_device *in_dev,
__be32 daddr, __be32 saddr, u32 tos)
{
struct fib_nh_exception *fnhe;
struct rtable *rth;
int err;
struct in_device *out_dev;
unsigned int flags = 0;
bool do_cache;
u32 itag = 0;
/* get a working reference to the output device */
out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res));
if (out_dev == NULL) {
net_crit_ratelimited("Bug in ip_route_input_slow(). Please report.\n");
return -EINVAL;
}
err = fib_validate_source(skb, saddr, daddr, tos, FIB_RES_OIF(*res),
in_dev->dev, in_dev, &itag);
if (err < 0) {
ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
saddr);
goto cleanup;
}
do_cache = res->fi && !itag;
if (out_dev == in_dev && err && IN_DEV_TX_REDIRECTS(out_dev) &&
(IN_DEV_SHARED_MEDIA(out_dev) ||
inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res)))) {
flags |= RTCF_DOREDIRECT;
do_cache = false;
}
if (skb->protocol != htons(ETH_P_IP)) {
/* Not IP (i.e. ARP). Do not create route, if it is
* invalid for proxy arp. DNAT routes are always valid.
*
* Proxy arp feature have been extended to allow, ARP
* replies back to the same interface, to support
* Private VLAN switch technologies. See arp.c.
*/
if (out_dev == in_dev &&
IN_DEV_PROXY_ARP_PVLAN(in_dev) == 0) {
err = -EINVAL;
goto cleanup;
}
}
fnhe = find_exception(&FIB_RES_NH(*res), daddr);
if (do_cache) {
if (fnhe != NULL)
rth = rcu_dereference(fnhe->fnhe_rth_input);
else
rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
if (rt_cache_valid(rth)) {
skb_dst_set_noref(skb, &rth->dst);
goto out;
}
}
rth = rt_dst_alloc(out_dev->dev,
IN_DEV_CONF_GET(in_dev, NOPOLICY),
IN_DEV_CONF_GET(out_dev, NOXFRM), do_cache);
if (!rth) {
err = -ENOBUFS;
goto cleanup;
}
rth->rt_genid = rt_genid_ipv4(dev_net(rth->dst.dev));
rth->rt_flags = flags;
rth->rt_type = res->type;
rth->rt_is_input = 1;
rth->rt_iif = 0;
rth->rt_pmtu = 0;
rth->rt_gateway = 0;
rth->rt_uses_gateway = 0;
INIT_LIST_HEAD(&rth->rt_uncached);
RT_CACHE_STAT_INC(in_slow_tot);
rth->dst.input = ip_forward;
rth->dst.output = ip_output;
rt_set_nexthop(rth, daddr, res, fnhe, res->fi, res->type, itag);
skb_dst_set(skb, &rth->dst);
out:
err = 0;
cleanup:
return err;
}
static int ip_mkroute_input(struct sk_buff *skb,
struct fib_result *res,
const struct flowi4 *fl4,
struct in_device *in_dev,
__be32 daddr, __be32 saddr, u32 tos)
{
#ifdef CONFIG_IP_ROUTE_MULTIPATH
if (res->fi && res->fi->fib_nhs > 1)
fib_select_multipath(res);
#endif
/* create a routing cache entry */
return __mkroute_input(skb, res, in_dev, daddr, saddr, tos);
}
/*
* NOTE. We drop all the packets that has local source
* addresses, because every properly looped back packet
* must have correct destination already attached by output routine.
*
* Such approach solves two big problems:
* 1. Not simplex devices are handled properly.
* 2. IP spoofing attempts are filtered with 100% of guarantee.
* called with rcu_read_lock()
*/
static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
u8 tos, struct net_device *dev)
{
struct fib_result res;
struct in_device *in_dev = __in_dev_get_rcu(dev);
struct flowi4 fl4;
unsigned int flags = 0;
u32 itag = 0;
struct rtable *rth;
int err = -EINVAL;
struct net *net = dev_net(dev);
bool do_cache;
/* IP on this device is disabled. */
if (!in_dev)
goto out;
/* Check for the most weird martians, which can be not detected
by fib_lookup.
*/
if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr))
goto martian_source;
res.fi = NULL;
if (ipv4_is_lbcast(daddr) || (saddr == 0 && daddr == 0))
goto brd_input;
/* Accept zero addresses only to limited broadcast;
* I even do not know to fix it or not. Waiting for complains :-)
*/
if (ipv4_is_zeronet(saddr))
goto martian_source;
if (ipv4_is_zeronet(daddr))
goto martian_destination;
/* Following code try to avoid calling IN_DEV_NET_ROUTE_LOCALNET(),
* and call it once if daddr or/and saddr are loopback addresses
*/
if (ipv4_is_loopback(daddr)) {
if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
goto martian_destination;
} else if (ipv4_is_loopback(saddr)) {
if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
goto martian_source;
}
/*
* Now we are ready to route packet.
*/
fl4.flowi4_oif = 0;
fl4.flowi4_iif = dev->ifindex;
fl4.flowi4_mark = skb->mark;
fl4.flowi4_tos = tos;
fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
fl4.daddr = daddr;
fl4.saddr = saddr;
err = fib_lookup(net, &fl4, &res);
if (err != 0) {
if (!IN_DEV_FORWARD(in_dev))
err = -EHOSTUNREACH;
goto no_route;
}
if (res.type == RTN_BROADCAST)
goto brd_input;
if (res.type == RTN_LOCAL) {
err = fib_validate_source(skb, saddr, daddr, tos,
0, dev, in_dev, &itag);
if (err < 0)
goto martian_source_keep_err;
goto local_input;
}
if (!IN_DEV_FORWARD(in_dev)) {
err = -EHOSTUNREACH;
goto no_route;
}
if (res.type != RTN_UNICAST)
goto martian_destination;
err = ip_mkroute_input(skb, &res, &fl4, in_dev, daddr, saddr, tos);
out: return err;
brd_input:
if (skb->protocol != htons(ETH_P_IP))
goto e_inval;
if (!ipv4_is_zeronet(saddr)) {
err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
in_dev, &itag);
if (err < 0)
goto martian_source_keep_err;
}
flags |= RTCF_BROADCAST;
res.type = RTN_BROADCAST;
RT_CACHE_STAT_INC(in_brd);
local_input:
do_cache = false;
if (res.fi) {
if (!itag) {
rth = rcu_dereference(FIB_RES_NH(res).nh_rth_input);
if (rt_cache_valid(rth)) {
skb_dst_set_noref(skb, &rth->dst);
err = 0;
goto out;
}
do_cache = true;
}
}
rth = rt_dst_alloc(net->loopback_dev,
IN_DEV_CONF_GET(in_dev, NOPOLICY), false, do_cache);
if (!rth)
goto e_nobufs;
rth->dst.input= ip_local_deliver;
rth->dst.output= ip_rt_bug;
#ifdef CONFIG_IP_ROUTE_CLASSID
rth->dst.tclassid = itag;
#endif
rth->rt_genid = rt_genid_ipv4(net);
rth->rt_flags = flags|RTCF_LOCAL;
rth->rt_type = res.type;
rth->rt_is_input = 1;
rth->rt_iif = 0;
rth->rt_pmtu = 0;
rth->rt_gateway = 0;
rth->rt_uses_gateway = 0;
INIT_LIST_HEAD(&rth->rt_uncached);
RT_CACHE_STAT_INC(in_slow_tot);
if (res.type == RTN_UNREACHABLE) {
rth->dst.input= ip_error;
rth->dst.error= -err;
rth->rt_flags &= ~RTCF_LOCAL;
}
if (do_cache) {
if (unlikely(!rt_cache_route(&FIB_RES_NH(res), rth))) {
rth->dst.flags |= DST_NOCACHE;
rt_add_uncached_list(rth);
}
}
skb_dst_set(skb, &rth->dst);
err = 0;
goto out;
no_route:
RT_CACHE_STAT_INC(in_no_route);
res.type = RTN_UNREACHABLE;
res.fi = NULL;
goto local_input;
/*
* Do not cache martian addresses: they should be logged (RFC1812)
*/
martian_destination:
RT_CACHE_STAT_INC(in_martian_dst);
#ifdef CONFIG_IP_ROUTE_VERBOSE
if (IN_DEV_LOG_MARTIANS(in_dev))
net_warn_ratelimited("martian destination %pI4 from %pI4, dev %s\n",
&daddr, &saddr, dev->name);
#endif
e_inval:
err = -EINVAL;
goto out;
e_nobufs:
err = -ENOBUFS;
goto out;
martian_source:
err = -EINVAL;
martian_source_keep_err:
ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
goto out;
}
int ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr,
u8 tos, struct net_device *dev)
{
int res;
rcu_read_lock();
/* Multicast recognition logic is moved from route cache to here.
The problem was that too many Ethernet cards have broken/missing
hardware multicast filters :-( As result the host on multicasting
network acquires a lot of useless route cache entries, sort of
SDR messages from all the world. Now we try to get rid of them.
Really, provided software IP multicast filter is organized
reasonably (at least, hashed), it does not result in a slowdown
comparing with route cache reject entries.
Note, that multicast routers are not affected, because
route cache entry is created eventually.
*/
if (ipv4_is_multicast(daddr)) {
struct in_device *in_dev = __in_dev_get_rcu(dev);
if (in_dev) {
int our = ip_check_mc_rcu(in_dev, daddr, saddr,
ip_hdr(skb)->protocol);
if (our
#ifdef CONFIG_IP_MROUTE
||
(!ipv4_is_local_multicast(daddr) &&
IN_DEV_MFORWARD(in_dev))
#endif
) {
int res = ip_route_input_mc(skb, daddr, saddr,
tos, dev, our);
rcu_read_unlock();
return res;
}
}
rcu_read_unlock();
return -EINVAL;
}
res = ip_route_input_slow(skb, daddr, saddr, tos, dev);
rcu_read_unlock();
return res;
}
EXPORT_SYMBOL(ip_route_input_noref);
/* called with rcu_read_lock() */
static struct rtable *__mkroute_output(const struct fib_result *res,
const struct flowi4 *fl4, int orig_oif,
struct net_device *dev_out,
unsigned int flags)
{
struct fib_info *fi = res->fi;
struct fib_nh_exception *fnhe;
struct in_device *in_dev;
u16 type = res->type;
struct rtable *rth;
bool do_cache;
in_dev = __in_dev_get_rcu(dev_out);
if (!in_dev)
return ERR_PTR(-EINVAL);
if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev)))
if (ipv4_is_loopback(fl4->saddr) && !(dev_out->flags & IFF_LOOPBACK))
return ERR_PTR(-EINVAL);
if (ipv4_is_lbcast(fl4->daddr))
type = RTN_BROADCAST;
else if (ipv4_is_multicast(fl4->daddr))
type = RTN_MULTICAST;
else if (ipv4_is_zeronet(fl4->daddr))
return ERR_PTR(-EINVAL);
if (dev_out->flags & IFF_LOOPBACK)
flags |= RTCF_LOCAL;
do_cache = true;
if (type == RTN_BROADCAST) {
flags |= RTCF_BROADCAST | RTCF_LOCAL;
fi = NULL;
} else if (type == RTN_MULTICAST) {
flags |= RTCF_MULTICAST | RTCF_LOCAL;
if (!ip_check_mc_rcu(in_dev, fl4->daddr, fl4->saddr,
fl4->flowi4_proto))
flags &= ~RTCF_LOCAL;
else
do_cache = false;
/* If multicast route do not exist use
* default one, but do not gateway in this case.
* Yes, it is hack.
*/
if (fi && res->prefixlen < 4)
fi = NULL;
}
fnhe = NULL;
do_cache &= fi != NULL;
if (do_cache) {
struct rtable __rcu **prth;
struct fib_nh *nh = &FIB_RES_NH(*res);
fnhe = find_exception(nh, fl4->daddr);
if (fnhe)
prth = &fnhe->fnhe_rth_output;
else {
if (unlikely(fl4->flowi4_flags &
FLOWI_FLAG_KNOWN_NH &&
!(nh->nh_gw &&
nh->nh_scope == RT_SCOPE_LINK))) {
do_cache = false;
goto add;
}
prth = raw_cpu_ptr(nh->nh_pcpu_rth_output);
}
rth = rcu_dereference(*prth);
if (rt_cache_valid(rth)) {
dst_hold(&rth->dst);
return rth;
}
}
add:
rth = rt_dst_alloc(dev_out,
IN_DEV_CONF_GET(in_dev, NOPOLICY),
IN_DEV_CONF_GET(in_dev, NOXFRM),
do_cache);
if (!rth)
return ERR_PTR(-ENOBUFS);
rth->dst.output = ip_output;
rth->rt_genid = rt_genid_ipv4(dev_net(dev_out));
rth->rt_flags = flags;
rth->rt_type = type;
rth->rt_is_input = 0;
rth->rt_iif = orig_oif ? : 0;
rth->rt_pmtu = 0;
rth->rt_gateway = 0;
rth->rt_uses_gateway = 0;
INIT_LIST_HEAD(&rth->rt_uncached);
RT_CACHE_STAT_INC(out_slow_tot);
if (flags & RTCF_LOCAL)
rth->dst.input = ip_local_deliver;
if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
if (flags & RTCF_LOCAL &&
!(dev_out->flags & IFF_LOOPBACK)) {
rth->dst.output = ip_mc_output;
RT_CACHE_STAT_INC(out_slow_mc);
}
#ifdef CONFIG_IP_MROUTE
if (type == RTN_MULTICAST) {
if (IN_DEV_MFORWARD(in_dev) &&
!ipv4_is_local_multicast(fl4->daddr)) {
rth->dst.input = ip_mr_input;
rth->dst.output = ip_mc_output;
}
}
#endif
}
rt_set_nexthop(rth, fl4->daddr, res, fnhe, fi, type, 0);
return rth;
}
/*
* Major route resolver routine.
*/
struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4)
{
struct net_device *dev_out = NULL;
__u8 tos = RT_FL_TOS(fl4);
unsigned int flags = 0;
struct fib_result res;
struct rtable *rth;
int orig_oif;
res.tclassid = 0;
res.fi = NULL;
res.table = NULL;
orig_oif = fl4->flowi4_oif;
fl4->flowi4_iif = LOOPBACK_IFINDEX;
fl4->flowi4_tos = tos & IPTOS_RT_MASK;
fl4->flowi4_scope = ((tos & RTO_ONLINK) ?
RT_SCOPE_LINK : RT_SCOPE_UNIVERSE);
rcu_read_lock();
if (fl4->saddr) {
rth = ERR_PTR(-EINVAL);
if (ipv4_is_multicast(fl4->saddr) ||
ipv4_is_lbcast(fl4->saddr) ||
ipv4_is_zeronet(fl4->saddr))
goto out;
/* I removed check for oif == dev_out->oif here.
It was wrong for two reasons:
1. ip_dev_find(net, saddr) can return wrong iface, if saddr
is assigned to multiple interfaces.
2. Moreover, we are allowed to send packets with saddr
of another iface. --ANK
*/
if (fl4->flowi4_oif == 0 &&
(ipv4_is_multicast(fl4->daddr) ||
ipv4_is_lbcast(fl4->daddr))) {
/* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
dev_out = __ip_dev_find(net, fl4->saddr, false);
if (dev_out == NULL)
goto out;
/* Special hack: user can direct multicasts
and limited broadcast via necessary interface
without fiddling with IP_MULTICAST_IF or IP_PKTINFO.
This hack is not just for fun, it allows
vic,vat and friends to work.
They bind socket to loopback, set ttl to zero
and expect that it will work.
From the viewpoint of routing cache they are broken,
because we are not allowed to build multicast path
with loopback source addr (look, routing cache
cannot know, that ttl is zero, so that packet
will not leave this host and route is valid).
Luckily, this hack is good workaround.
*/
fl4->flowi4_oif = dev_out->ifindex;
goto make_route;
}
if (!(fl4->flowi4_flags & FLOWI_FLAG_ANYSRC)) {
/* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
if (!__ip_dev_find(net, fl4->saddr, false))
goto out;
}
}
if (fl4->flowi4_oif) {
dev_out = dev_get_by_index_rcu(net, fl4->flowi4_oif);
rth = ERR_PTR(-ENODEV);
if (dev_out == NULL)
goto out;
/* RACE: Check return value of inet_select_addr instead. */
if (!(dev_out->flags & IFF_UP) || !__in_dev_get_rcu(dev_out)) {
rth = ERR_PTR(-ENETUNREACH);
goto out;
}
if (ipv4_is_local_multicast(fl4->daddr) ||
ipv4_is_lbcast(fl4->daddr)) {
if (!fl4->saddr)
fl4->saddr = inet_select_addr(dev_out, 0,
RT_SCOPE_LINK);
goto make_route;
}
if (!fl4->saddr) {
if (ipv4_is_multicast(fl4->daddr))
fl4->saddr = inet_select_addr(dev_out, 0,
fl4->flowi4_scope);
else if (!fl4->daddr)
fl4->saddr = inet_select_addr(dev_out, 0,
RT_SCOPE_HOST);
}
}
if (!fl4->daddr) {
fl4->daddr = fl4->saddr;
if (!fl4->daddr)
fl4->daddr = fl4->saddr = htonl(INADDR_LOOPBACK);
dev_out = net->loopback_dev;
fl4->flowi4_oif = LOOPBACK_IFINDEX;
res.type = RTN_LOCAL;
flags |= RTCF_LOCAL;
goto make_route;
}
if (fib_lookup(net, fl4, &res)) {
res.fi = NULL;
res.table = NULL;
if (fl4->flowi4_oif) {
/* Apparently, routing tables are wrong. Assume,
that the destination is on link.
WHY? DW.
Because we are allowed to send to iface
even if it has NO routes and NO assigned
addresses. When oif is specified, routing
tables are looked up with only one purpose:
to catch if destination is gatewayed, rather than
direct. Moreover, if MSG_DONTROUTE is set,
we send packet, ignoring both routing tables
and ifaddr state. --ANK
We could make it even if oif is unknown,
likely IPv6, but we do not.
*/
if (fl4->saddr == 0)
fl4->saddr = inet_select_addr(dev_out, 0,
RT_SCOPE_LINK);
res.type = RTN_UNICAST;
goto make_route;
}
rth = ERR_PTR(-ENETUNREACH);
goto out;
}
if (res.type == RTN_LOCAL) {
if (!fl4->saddr) {
if (res.fi->fib_prefsrc)
fl4->saddr = res.fi->fib_prefsrc;
else
fl4->saddr = fl4->daddr;
}
dev_out = net->loopback_dev;
fl4->flowi4_oif = dev_out->ifindex;
flags |= RTCF_LOCAL;
goto make_route;
}
#ifdef CONFIG_IP_ROUTE_MULTIPATH
if (res.fi->fib_nhs > 1 && fl4->flowi4_oif == 0)
fib_select_multipath(&res);
else
#endif
if (!res.prefixlen &&
res.table->tb_num_default > 1 &&
res.type == RTN_UNICAST && !fl4->flowi4_oif)
fib_select_default(&res);
if (!fl4->saddr)
fl4->saddr = FIB_RES_PREFSRC(net, res);
dev_out = FIB_RES_DEV(res);
fl4->flowi4_oif = dev_out->ifindex;
make_route:
rth = __mkroute_output(&res, fl4, orig_oif, dev_out, flags);
out:
rcu_read_unlock();
return rth;
}
EXPORT_SYMBOL_GPL(__ip_route_output_key);
static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie)
{
return NULL;
}
static unsigned int ipv4_blackhole_mtu(const struct dst_entry *dst)
{
unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
return mtu ? : dst->dev->mtu;
}
static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb, u32 mtu)
{
}
static void ipv4_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb)
{
}
static u32 *ipv4_rt_blackhole_cow_metrics(struct dst_entry *dst,
unsigned long old)
{
return NULL;
}
static struct dst_ops ipv4_dst_blackhole_ops = {
.family = AF_INET,
.protocol = cpu_to_be16(ETH_P_IP),
.check = ipv4_blackhole_dst_check,
.mtu = ipv4_blackhole_mtu,
.default_advmss = ipv4_default_advmss,
.update_pmtu = ipv4_rt_blackhole_update_pmtu,
.redirect = ipv4_rt_blackhole_redirect,
.cow_metrics = ipv4_rt_blackhole_cow_metrics,
.neigh_lookup = ipv4_neigh_lookup,
};
struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig)
{
struct rtable *ort = (struct rtable *) dst_orig;
struct rtable *rt;
rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, DST_OBSOLETE_NONE, 0);
if (rt) {
struct dst_entry *new = &rt->dst;
new->__use = 1;
new->input = dst_discard;
new->output = dst_discard_sk;
new->dev = ort->dst.dev;
if (new->dev)
dev_hold(new->dev);
rt->rt_is_input = ort->rt_is_input;
rt->rt_iif = ort->rt_iif;
rt->rt_pmtu = ort->rt_pmtu;
rt->rt_genid = rt_genid_ipv4(net);
rt->rt_flags = ort->rt_flags;
rt->rt_type = ort->rt_type;
rt->rt_gateway = ort->rt_gateway;
rt->rt_uses_gateway = ort->rt_uses_gateway;
INIT_LIST_HEAD(&rt->rt_uncached);
dst_free(new);
}
dst_release(dst_orig);
return rt ? &rt->dst : ERR_PTR(-ENOMEM);
}
struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
struct sock *sk)
{
struct rtable *rt = __ip_route_output_key(net, flp4);
if (IS_ERR(rt))
return rt;
if (flp4->flowi4_proto)
rt = (struct rtable *)xfrm_lookup_route(net, &rt->dst,
flowi4_to_flowi(flp4),
sk, 0);
return rt;
}
EXPORT_SYMBOL_GPL(ip_route_output_flow);
static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
struct flowi4 *fl4, struct sk_buff *skb, u32 portid,
u32 seq, int event, int nowait, unsigned int flags)
{
struct rtable *rt = skb_rtable(skb);
struct rtmsg *r;
struct nlmsghdr *nlh;
unsigned long expires = 0;
u32 error;
u32 metrics[RTAX_MAX];
nlh = nlmsg_put(skb, portid, seq, event, sizeof(*r), flags);
if (nlh == NULL)
return -EMSGSIZE;
r = nlmsg_data(nlh);
r->rtm_family = AF_INET;
r->rtm_dst_len = 32;
r->rtm_src_len = 0;
r->rtm_tos = fl4->flowi4_tos;
r->rtm_table = RT_TABLE_MAIN;
if (nla_put_u32(skb, RTA_TABLE, RT_TABLE_MAIN))
goto nla_put_failure;
r->rtm_type = rt->rt_type;
r->rtm_scope = RT_SCOPE_UNIVERSE;
r->rtm_protocol = RTPROT_UNSPEC;
r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
if (rt->rt_flags & RTCF_NOTIFY)
r->rtm_flags |= RTM_F_NOTIFY;
if (nla_put_be32(skb, RTA_DST, dst))
goto nla_put_failure;
if (src) {
r->rtm_src_len = 32;
if (nla_put_be32(skb, RTA_SRC, src))
goto nla_put_failure;
}
if (rt->dst.dev &&
nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
goto nla_put_failure;
#ifdef CONFIG_IP_ROUTE_CLASSID
if (rt->dst.tclassid &&
nla_put_u32(skb, RTA_FLOW, rt->dst.tclassid))
goto nla_put_failure;
#endif
if (!rt_is_input_route(rt) &&
fl4->saddr != src) {
if (nla_put_be32(skb, RTA_PREFSRC, fl4->saddr))
goto nla_put_failure;
}
if (rt->rt_uses_gateway &&
nla_put_be32(skb, RTA_GATEWAY, rt->rt_gateway))
goto nla_put_failure;
expires = rt->dst.expires;
if (expires) {
unsigned long now = jiffies;
if (time_before(now, expires))
expires -= now;
else
expires = 0;
}
memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics));
if (rt->rt_pmtu && expires)
metrics[RTAX_MTU - 1] = rt->rt_pmtu;
if (rtnetlink_put_metrics(skb, metrics) < 0)
goto nla_put_failure;
if (fl4->flowi4_mark &&
nla_put_u32(skb, RTA_MARK, fl4->flowi4_mark))
goto nla_put_failure;
error = rt->dst.error;
if (rt_is_input_route(rt)) {
#ifdef CONFIG_IP_MROUTE
if (ipv4_is_multicast(dst) && !ipv4_is_local_multicast(dst) &&
IPV4_DEVCONF_ALL(net, MC_FORWARDING)) {
int err = ipmr_get_route(net, skb,
fl4->saddr, fl4->daddr,
r, nowait);
if (err <= 0) {
if (!nowait) {
if (err == 0)
return 0;
goto nla_put_failure;
} else {
if (err == -EMSGSIZE)
goto nla_put_failure;
error = err;
}
}
} else
#endif
if (nla_put_u32(skb, RTA_IIF, skb->dev->ifindex))
goto nla_put_failure;
}
if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, error) < 0)
goto nla_put_failure;
return nlmsg_end(skb, nlh);
nla_put_failure:
nlmsg_cancel(skb, nlh);
return -EMSGSIZE;
}
static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh)
{
struct net *net = sock_net(in_skb->sk);
struct rtmsg *rtm;
struct nlattr *tb[RTA_MAX+1];
struct rtable *rt = NULL;
struct flowi4 fl4;
__be32 dst = 0;
__be32 src = 0;
u32 iif;
int err;
int mark;
struct sk_buff *skb;
err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy);
if (err < 0)
goto errout;
rtm = nlmsg_data(nlh);
skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
if (skb == NULL) {
err = -ENOBUFS;
goto errout;
}
/* Reserve room for dummy headers, this skb can pass
through good chunk of routing engine.
*/
skb_reset_mac_header(skb);
skb_reset_network_header(skb);
/* Bugfix: need to give ip_route_input enough of an IP header to not gag. */
ip_hdr(skb)->protocol = IPPROTO_ICMP;
skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr));
src = tb[RTA_SRC] ? nla_get_be32(tb[RTA_SRC]) : 0;
dst = tb[RTA_DST] ? nla_get_be32(tb[RTA_DST]) : 0;
iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0;
memset(&fl4, 0, sizeof(fl4));
fl4.daddr = dst;
fl4.saddr = src;
fl4.flowi4_tos = rtm->rtm_tos;
fl4.flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0;
fl4.flowi4_mark = mark;
if (iif) {
struct net_device *dev;
dev = __dev_get_by_index(net, iif);
if (dev == NULL) {
err = -ENODEV;
goto errout_free;
}
skb->protocol = htons(ETH_P_IP);
skb->dev = dev;
skb->mark = mark;
local_bh_disable();
err = ip_route_input(skb, dst, src, rtm->rtm_tos, dev);
local_bh_enable();
rt = skb_rtable(skb);
if (err == 0 && rt->dst.error)
err = -rt->dst.error;
} else {
rt = ip_route_output_key(net, &fl4);
err = 0;
if (IS_ERR(rt))
err = PTR_ERR(rt);
}
if (err)
goto errout_free;
skb_dst_set(skb, &rt->dst);
if (rtm->rtm_flags & RTM_F_NOTIFY)
rt->rt_flags |= RTCF_NOTIFY;
err = rt_fill_info(net, dst, src, &fl4, skb,
NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
RTM_NEWROUTE, 0, 0);
if (err <= 0)
goto errout_free;
err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
errout:
return err;
errout_free:
kfree_skb(skb);
goto errout;
}
void ip_rt_multicast_event(struct in_device *in_dev)
{
rt_cache_flush(dev_net(in_dev->dev));
}
#ifdef CONFIG_SYSCTL
static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
static int ip_rt_gc_interval __read_mostly = 60 * HZ;
static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
static int ip_rt_gc_elasticity __read_mostly = 8;
static int ipv4_sysctl_rtcache_flush(struct ctl_table *__ctl, int write,
void __user *buffer,
size_t *lenp, loff_t *ppos)
{
struct net *net = (struct net *)__ctl->extra1;
if (write) {
rt_cache_flush(net);
fnhe_genid_bump(net);
return 0;
}
return -EINVAL;
}
static struct ctl_table ipv4_route_table[] = {
{
.procname = "gc_thresh",
.data = &ipv4_dst_ops.gc_thresh,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "max_size",
.data = &ip_rt_max_size,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
/* Deprecated. Use gc_min_interval_ms */
.procname = "gc_min_interval",
.data = &ip_rt_gc_min_interval,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "gc_min_interval_ms",
.data = &ip_rt_gc_min_interval,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_ms_jiffies,
},
{
.procname = "gc_timeout",
.data = &ip_rt_gc_timeout,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "gc_interval",
.data = &ip_rt_gc_interval,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "redirect_load",
.data = &ip_rt_redirect_load,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "redirect_number",
.data = &ip_rt_redirect_number,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "redirect_silence",
.data = &ip_rt_redirect_silence,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "error_cost",
.data = &ip_rt_error_cost,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "error_burst",
.data = &ip_rt_error_burst,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "gc_elasticity",
.data = &ip_rt_gc_elasticity,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "mtu_expires",
.data = &ip_rt_mtu_expires,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "min_pmtu",
.data = &ip_rt_min_pmtu,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "min_adv_mss",
.data = &ip_rt_min_advmss,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{ }
};
static struct ctl_table ipv4_route_flush_table[] = {
{
.procname = "flush",
.maxlen = sizeof(int),
.mode = 0200,
.proc_handler = ipv4_sysctl_rtcache_flush,
},
{ },
};
static __net_init int sysctl_route_net_init(struct net *net)
{
struct ctl_table *tbl;
tbl = ipv4_route_flush_table;
if (!net_eq(net, &init_net)) {
tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
if (tbl == NULL)
goto err_dup;
/* Don't export sysctls to unprivileged users */
if (net->user_ns != &init_user_ns)
tbl[0].procname = NULL;
}
tbl[0].extra1 = net;
net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
if (net->ipv4.route_hdr == NULL)
goto err_reg;
return 0;
err_reg:
if (tbl != ipv4_route_flush_table)
kfree(tbl);
err_dup:
return -ENOMEM;
}
static __net_exit void sysctl_route_net_exit(struct net *net)
{
struct ctl_table *tbl;
tbl = net->ipv4.route_hdr->ctl_table_arg;
unregister_net_sysctl_table(net->ipv4.route_hdr);
BUG_ON(tbl == ipv4_route_flush_table);
kfree(tbl);
}
static __net_initdata struct pernet_operations sysctl_route_ops = {
.init = sysctl_route_net_init,
.exit = sysctl_route_net_exit,
};
#endif
static __net_init int rt_genid_init(struct net *net)
{
atomic_set(&net->ipv4.rt_genid, 0);
atomic_set(&net->fnhe_genid, 0);
get_random_bytes(&net->ipv4.dev_addr_genid,
sizeof(net->ipv4.dev_addr_genid));
return 0;
}
static __net_initdata struct pernet_operations rt_genid_ops = {
.init = rt_genid_init,
};
static int __net_init ipv4_inetpeer_init(struct net *net)
{
struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
if (!bp)
return -ENOMEM;
inet_peer_base_init(bp);
net->ipv4.peers = bp;
return 0;
}
static void __net_exit ipv4_inetpeer_exit(struct net *net)
{
struct inet_peer_base *bp = net->ipv4.peers;
net->ipv4.peers = NULL;
inetpeer_invalidate_tree(bp);
kfree(bp);
}
static __net_initdata struct pernet_operations ipv4_inetpeer_ops = {
.init = ipv4_inetpeer_init,
.exit = ipv4_inetpeer_exit,
};
#ifdef CONFIG_IP_ROUTE_CLASSID
struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
#endif /* CONFIG_IP_ROUTE_CLASSID */
int __init ip_rt_init(void)
{
int rc = 0;
ip_idents = kmalloc(IP_IDENTS_SZ * sizeof(*ip_idents), GFP_KERNEL);
if (!ip_idents)
panic("IP: failed to allocate ip_idents\n");
prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents));
#ifdef CONFIG_IP_ROUTE_CLASSID
ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
if (!ip_rt_acct)
panic("IP: failed to allocate ip_rt_acct\n");
#endif
ipv4_dst_ops.kmem_cachep =
kmem_cache_create("ip_dst_cache", sizeof(struct rtable), 0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep;
if (dst_entries_init(&ipv4_dst_ops) < 0)
panic("IP: failed to allocate ipv4_dst_ops counter\n");
if (dst_entries_init(&ipv4_dst_blackhole_ops) < 0)
panic("IP: failed to allocate ipv4_dst_blackhole_ops counter\n");
ipv4_dst_ops.gc_thresh = ~0;
ip_rt_max_size = INT_MAX;
devinet_init();
ip_fib_init();
if (ip_rt_proc_init())
pr_err("Unable to create route proc files\n");
#ifdef CONFIG_XFRM
xfrm_init();
xfrm4_init();
#endif
rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL, NULL);
#ifdef CONFIG_SYSCTL
register_pernet_subsys(&sysctl_route_ops);
#endif
register_pernet_subsys(&rt_genid_ops);
register_pernet_subsys(&ipv4_inetpeer_ops);
return rc;
}
#ifdef CONFIG_SYSCTL
/*
* We really need to sanitize the damn ipv4 init order, then all
* this nonsense will go away.
*/
void __init ip_static_sysctl_init(void)
{
register_net_sysctl(&init_net, "net/ipv4/route", ipv4_route_table);
}
#endif
| ./CrossVul/dataset_final_sorted/CWE-17/c/bad_1482_2 |
crossvul-cpp_data_bad_2306_2 | /*
* linux/fs/open.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
*/
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/file.h>
#include <linux/fdtable.h>
#include <linux/fsnotify.h>
#include <linux/module.h>
#include <linux/tty.h>
#include <linux/namei.h>
#include <linux/backing-dev.h>
#include <linux/capability.h>
#include <linux/securebits.h>
#include <linux/security.h>
#include <linux/mount.h>
#include <linux/fcntl.h>
#include <linux/slab.h>
#include <asm/uaccess.h>
#include <linux/fs.h>
#include <linux/personality.h>
#include <linux/pagemap.h>
#include <linux/syscalls.h>
#include <linux/rcupdate.h>
#include <linux/audit.h>
#include <linux/falloc.h>
#include <linux/fs_struct.h>
#include <linux/ima.h>
#include <linux/dnotify.h>
#include <linux/compat.h>
#include "internal.h"
int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
struct file *filp)
{
int ret;
struct iattr newattrs;
/* Not pretty: "inode->i_size" shouldn't really be signed. But it is. */
if (length < 0)
return -EINVAL;
newattrs.ia_size = length;
newattrs.ia_valid = ATTR_SIZE | time_attrs;
if (filp) {
newattrs.ia_file = filp;
newattrs.ia_valid |= ATTR_FILE;
}
/* Remove suid/sgid on truncate too */
ret = should_remove_suid(dentry);
if (ret)
newattrs.ia_valid |= ret | ATTR_FORCE;
mutex_lock(&dentry->d_inode->i_mutex);
ret = notify_change(dentry, &newattrs);
mutex_unlock(&dentry->d_inode->i_mutex);
return ret;
}
long vfs_truncate(struct path *path, loff_t length)
{
struct inode *inode;
long error;
inode = path->dentry->d_inode;
/* For directories it's -EISDIR, for other non-regulars - -EINVAL */
if (S_ISDIR(inode->i_mode))
return -EISDIR;
if (!S_ISREG(inode->i_mode))
return -EINVAL;
error = mnt_want_write(path->mnt);
if (error)
goto out;
error = inode_permission(inode, MAY_WRITE);
if (error)
goto mnt_drop_write_and_out;
error = -EPERM;
if (IS_APPEND(inode))
goto mnt_drop_write_and_out;
error = get_write_access(inode);
if (error)
goto mnt_drop_write_and_out;
/*
* Make sure that there are no leases. get_write_access() protects
* against the truncate racing with a lease-granting setlease().
*/
error = break_lease(inode, O_WRONLY);
if (error)
goto put_write_and_out;
error = locks_verify_truncate(inode, NULL, length);
if (!error)
error = security_path_truncate(path);
if (!error)
error = do_truncate(path->dentry, length, 0, NULL);
put_write_and_out:
put_write_access(inode);
mnt_drop_write_and_out:
mnt_drop_write(path->mnt);
out:
return error;
}
EXPORT_SYMBOL_GPL(vfs_truncate);
static long do_sys_truncate(const char __user *pathname, loff_t length)
{
unsigned int lookup_flags = LOOKUP_FOLLOW;
struct path path;
int error;
if (length < 0) /* sorry, but loff_t says... */
return -EINVAL;
retry:
error = user_path_at(AT_FDCWD, pathname, lookup_flags, &path);
if (!error) {
error = vfs_truncate(&path, length);
path_put(&path);
}
if (retry_estale(error, lookup_flags)) {
lookup_flags |= LOOKUP_REVAL;
goto retry;
}
return error;
}
SYSCALL_DEFINE2(truncate, const char __user *, path, long, length)
{
return do_sys_truncate(path, length);
}
#ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE2(truncate, const char __user *, path, compat_off_t, length)
{
return do_sys_truncate(path, length);
}
#endif
static long do_sys_ftruncate(unsigned int fd, loff_t length, int small)
{
struct inode *inode;
struct dentry *dentry;
struct fd f;
int error;
error = -EINVAL;
if (length < 0)
goto out;
error = -EBADF;
f = fdget(fd);
if (!f.file)
goto out;
/* explicitly opened as large or we are on 64-bit box */
if (f.file->f_flags & O_LARGEFILE)
small = 0;
dentry = f.file->f_path.dentry;
inode = dentry->d_inode;
error = -EINVAL;
if (!S_ISREG(inode->i_mode) || !(f.file->f_mode & FMODE_WRITE))
goto out_putf;
error = -EINVAL;
/* Cannot ftruncate over 2^31 bytes without large file support */
if (small && length > MAX_NON_LFS)
goto out_putf;
error = -EPERM;
if (IS_APPEND(inode))
goto out_putf;
sb_start_write(inode->i_sb);
error = locks_verify_truncate(inode, f.file, length);
if (!error)
error = security_path_truncate(&f.file->f_path);
if (!error)
error = do_truncate(dentry, length, ATTR_MTIME|ATTR_CTIME, f.file);
sb_end_write(inode->i_sb);
out_putf:
fdput(f);
out:
return error;
}
SYSCALL_DEFINE2(ftruncate, unsigned int, fd, unsigned long, length)
{
return do_sys_ftruncate(fd, length, 1);
}
#ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE2(ftruncate, unsigned int, fd, compat_ulong_t, length)
{
return do_sys_ftruncate(fd, length, 1);
}
#endif
/* LFS versions of truncate are only needed on 32 bit machines */
#if BITS_PER_LONG == 32
SYSCALL_DEFINE2(truncate64, const char __user *, path, loff_t, length)
{
return do_sys_truncate(path, length);
}
SYSCALL_DEFINE2(ftruncate64, unsigned int, fd, loff_t, length)
{
return do_sys_ftruncate(fd, length, 0);
}
#endif /* BITS_PER_LONG == 32 */
int do_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
{
struct inode *inode = file_inode(file);
long ret;
if (offset < 0 || len <= 0)
return -EINVAL;
/* Return error if mode is not supported */
if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
return -EOPNOTSUPP;
/* Punch hole must have keep size set */
if ((mode & FALLOC_FL_PUNCH_HOLE) &&
!(mode & FALLOC_FL_KEEP_SIZE))
return -EOPNOTSUPP;
if (!(file->f_mode & FMODE_WRITE))
return -EBADF;
/* It's not possible punch hole on append only file */
if (mode & FALLOC_FL_PUNCH_HOLE && IS_APPEND(inode))
return -EPERM;
if (IS_IMMUTABLE(inode))
return -EPERM;
/*
* Revalidate the write permissions, in case security policy has
* changed since the files were opened.
*/
ret = security_file_permission(file, MAY_WRITE);
if (ret)
return ret;
if (S_ISFIFO(inode->i_mode))
return -ESPIPE;
/*
* Let individual file system decide if it supports preallocation
* for directories or not.
*/
if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode))
return -ENODEV;
/* Check for wrap through zero too */
if (((offset + len) > inode->i_sb->s_maxbytes) || ((offset + len) < 0))
return -EFBIG;
if (!file->f_op->fallocate)
return -EOPNOTSUPP;
sb_start_write(inode->i_sb);
ret = file->f_op->fallocate(file, mode, offset, len);
sb_end_write(inode->i_sb);
return ret;
}
SYSCALL_DEFINE4(fallocate, int, fd, int, mode, loff_t, offset, loff_t, len)
{
struct fd f = fdget(fd);
int error = -EBADF;
if (f.file) {
error = do_fallocate(f.file, mode, offset, len);
fdput(f);
}
return error;
}
/*
* access() needs to use the real uid/gid, not the effective uid/gid.
* We do this by temporarily clearing all FS-related capabilities and
* switching the fsuid/fsgid around to the real ones.
*/
SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
{
const struct cred *old_cred;
struct cred *override_cred;
struct path path;
struct inode *inode;
int res;
unsigned int lookup_flags = LOOKUP_FOLLOW;
if (mode & ~S_IRWXO) /* where's F_OK, X_OK, W_OK, R_OK? */
return -EINVAL;
override_cred = prepare_creds();
if (!override_cred)
return -ENOMEM;
override_cred->fsuid = override_cred->uid;
override_cred->fsgid = override_cred->gid;
if (!issecure(SECURE_NO_SETUID_FIXUP)) {
/* Clear the capabilities if we switch to a non-root user */
kuid_t root_uid = make_kuid(override_cred->user_ns, 0);
if (!uid_eq(override_cred->uid, root_uid))
cap_clear(override_cred->cap_effective);
else
override_cred->cap_effective =
override_cred->cap_permitted;
}
old_cred = override_creds(override_cred);
retry:
res = user_path_at(dfd, filename, lookup_flags, &path);
if (res)
goto out;
inode = path.dentry->d_inode;
if ((mode & MAY_EXEC) && S_ISREG(inode->i_mode)) {
/*
* MAY_EXEC on regular files is denied if the fs is mounted
* with the "noexec" flag.
*/
res = -EACCES;
if (path.mnt->mnt_flags & MNT_NOEXEC)
goto out_path_release;
}
res = inode_permission(inode, mode | MAY_ACCESS);
/* SuS v2 requires we report a read only fs too */
if (res || !(mode & S_IWOTH) || special_file(inode->i_mode))
goto out_path_release;
/*
* This is a rare case where using __mnt_is_readonly()
* is OK without a mnt_want/drop_write() pair. Since
* no actual write to the fs is performed here, we do
* not need to telegraph to that to anyone.
*
* By doing this, we accept that this access is
* inherently racy and know that the fs may change
* state before we even see this result.
*/
if (__mnt_is_readonly(path.mnt))
res = -EROFS;
out_path_release:
path_put(&path);
if (retry_estale(res, lookup_flags)) {
lookup_flags |= LOOKUP_REVAL;
goto retry;
}
out:
revert_creds(old_cred);
put_cred(override_cred);
return res;
}
SYSCALL_DEFINE2(access, const char __user *, filename, int, mode)
{
return sys_faccessat(AT_FDCWD, filename, mode);
}
SYSCALL_DEFINE1(chdir, const char __user *, filename)
{
struct path path;
int error;
unsigned int lookup_flags = LOOKUP_FOLLOW | LOOKUP_DIRECTORY;
retry:
error = user_path_at(AT_FDCWD, filename, lookup_flags, &path);
if (error)
goto out;
error = inode_permission(path.dentry->d_inode, MAY_EXEC | MAY_CHDIR);
if (error)
goto dput_and_out;
set_fs_pwd(current->fs, &path);
dput_and_out:
path_put(&path);
if (retry_estale(error, lookup_flags)) {
lookup_flags |= LOOKUP_REVAL;
goto retry;
}
out:
return error;
}
SYSCALL_DEFINE1(fchdir, unsigned int, fd)
{
struct fd f = fdget_raw(fd);
struct inode *inode;
int error = -EBADF;
error = -EBADF;
if (!f.file)
goto out;
inode = file_inode(f.file);
error = -ENOTDIR;
if (!S_ISDIR(inode->i_mode))
goto out_putf;
error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
if (!error)
set_fs_pwd(current->fs, &f.file->f_path);
out_putf:
fdput(f);
out:
return error;
}
SYSCALL_DEFINE1(chroot, const char __user *, filename)
{
struct path path;
int error;
unsigned int lookup_flags = LOOKUP_FOLLOW | LOOKUP_DIRECTORY;
retry:
error = user_path_at(AT_FDCWD, filename, lookup_flags, &path);
if (error)
goto out;
error = inode_permission(path.dentry->d_inode, MAY_EXEC | MAY_CHDIR);
if (error)
goto dput_and_out;
error = -EPERM;
if (!ns_capable(current_user_ns(), CAP_SYS_CHROOT))
goto dput_and_out;
error = security_path_chroot(&path);
if (error)
goto dput_and_out;
set_fs_root(current->fs, &path);
error = 0;
dput_and_out:
path_put(&path);
if (retry_estale(error, lookup_flags)) {
lookup_flags |= LOOKUP_REVAL;
goto retry;
}
out:
return error;
}
static int chmod_common(struct path *path, umode_t mode)
{
struct inode *inode = path->dentry->d_inode;
struct iattr newattrs;
int error;
error = mnt_want_write(path->mnt);
if (error)
return error;
mutex_lock(&inode->i_mutex);
error = security_path_chmod(path, mode);
if (error)
goto out_unlock;
newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
error = notify_change(path->dentry, &newattrs);
out_unlock:
mutex_unlock(&inode->i_mutex);
mnt_drop_write(path->mnt);
return error;
}
SYSCALL_DEFINE2(fchmod, unsigned int, fd, umode_t, mode)
{
struct fd f = fdget(fd);
int err = -EBADF;
if (f.file) {
audit_inode(NULL, f.file->f_path.dentry, 0);
err = chmod_common(&f.file->f_path, mode);
fdput(f);
}
return err;
}
SYSCALL_DEFINE3(fchmodat, int, dfd, const char __user *, filename, umode_t, mode)
{
struct path path;
int error;
unsigned int lookup_flags = LOOKUP_FOLLOW;
retry:
error = user_path_at(dfd, filename, lookup_flags, &path);
if (!error) {
error = chmod_common(&path, mode);
path_put(&path);
if (retry_estale(error, lookup_flags)) {
lookup_flags |= LOOKUP_REVAL;
goto retry;
}
}
return error;
}
SYSCALL_DEFINE2(chmod, const char __user *, filename, umode_t, mode)
{
return sys_fchmodat(AT_FDCWD, filename, mode);
}
static int chown_common(struct path *path, uid_t user, gid_t group)
{
struct inode *inode = path->dentry->d_inode;
int error;
struct iattr newattrs;
kuid_t uid;
kgid_t gid;
uid = make_kuid(current_user_ns(), user);
gid = make_kgid(current_user_ns(), group);
newattrs.ia_valid = ATTR_CTIME;
if (user != (uid_t) -1) {
if (!uid_valid(uid))
return -EINVAL;
newattrs.ia_valid |= ATTR_UID;
newattrs.ia_uid = uid;
}
if (group != (gid_t) -1) {
if (!gid_valid(gid))
return -EINVAL;
newattrs.ia_valid |= ATTR_GID;
newattrs.ia_gid = gid;
}
if (!S_ISDIR(inode->i_mode))
newattrs.ia_valid |=
ATTR_KILL_SUID | ATTR_KILL_SGID | ATTR_KILL_PRIV;
mutex_lock(&inode->i_mutex);
error = security_path_chown(path, uid, gid);
if (!error)
error = notify_change(path->dentry, &newattrs);
mutex_unlock(&inode->i_mutex);
return error;
}
SYSCALL_DEFINE5(fchownat, int, dfd, const char __user *, filename, uid_t, user,
gid_t, group, int, flag)
{
struct path path;
int error = -EINVAL;
int lookup_flags;
if ((flag & ~(AT_SYMLINK_NOFOLLOW | AT_EMPTY_PATH)) != 0)
goto out;
lookup_flags = (flag & AT_SYMLINK_NOFOLLOW) ? 0 : LOOKUP_FOLLOW;
if (flag & AT_EMPTY_PATH)
lookup_flags |= LOOKUP_EMPTY;
retry:
error = user_path_at(dfd, filename, lookup_flags, &path);
if (error)
goto out;
error = mnt_want_write(path.mnt);
if (error)
goto out_release;
error = chown_common(&path, user, group);
mnt_drop_write(path.mnt);
out_release:
path_put(&path);
if (retry_estale(error, lookup_flags)) {
lookup_flags |= LOOKUP_REVAL;
goto retry;
}
out:
return error;
}
SYSCALL_DEFINE3(chown, const char __user *, filename, uid_t, user, gid_t, group)
{
return sys_fchownat(AT_FDCWD, filename, user, group, 0);
}
SYSCALL_DEFINE3(lchown, const char __user *, filename, uid_t, user, gid_t, group)
{
return sys_fchownat(AT_FDCWD, filename, user, group,
AT_SYMLINK_NOFOLLOW);
}
SYSCALL_DEFINE3(fchown, unsigned int, fd, uid_t, user, gid_t, group)
{
struct fd f = fdget(fd);
int error = -EBADF;
if (!f.file)
goto out;
error = mnt_want_write_file(f.file);
if (error)
goto out_fput;
audit_inode(NULL, f.file->f_path.dentry, 0);
error = chown_common(&f.file->f_path, user, group);
mnt_drop_write_file(f.file);
out_fput:
fdput(f);
out:
return error;
}
/*
* You have to be very careful that these write
* counts get cleaned up in error cases and
* upon __fput(). This should probably never
* be called outside of __dentry_open().
*/
static inline int __get_file_write_access(struct inode *inode,
struct vfsmount *mnt)
{
int error;
error = get_write_access(inode);
if (error)
return error;
/*
* Do not take mount writer counts on
* special files since no writes to
* the mount itself will occur.
*/
if (!special_file(inode->i_mode)) {
/*
* Balanced in __fput()
*/
error = __mnt_want_write(mnt);
if (error)
put_write_access(inode);
}
return error;
}
int open_check_o_direct(struct file *f)
{
/* NB: we're sure to have correct a_ops only after f_op->open */
if (f->f_flags & O_DIRECT) {
if (!f->f_mapping->a_ops ||
((!f->f_mapping->a_ops->direct_IO) &&
(!f->f_mapping->a_ops->get_xip_mem))) {
return -EINVAL;
}
}
return 0;
}
static int do_dentry_open(struct file *f,
int (*open)(struct inode *, struct file *),
const struct cred *cred)
{
static const struct file_operations empty_fops = {};
struct inode *inode;
int error;
f->f_mode = OPEN_FMODE(f->f_flags) | FMODE_LSEEK |
FMODE_PREAD | FMODE_PWRITE;
if (unlikely(f->f_flags & O_PATH))
f->f_mode = FMODE_PATH;
path_get(&f->f_path);
inode = f->f_inode = f->f_path.dentry->d_inode;
if (f->f_mode & FMODE_WRITE) {
error = __get_file_write_access(inode, f->f_path.mnt);
if (error)
goto cleanup_file;
if (!special_file(inode->i_mode))
file_take_write(f);
}
f->f_mapping = inode->i_mapping;
file_sb_list_add(f, inode->i_sb);
if (unlikely(f->f_mode & FMODE_PATH)) {
f->f_op = &empty_fops;
return 0;
}
f->f_op = fops_get(inode->i_fop);
if (unlikely(WARN_ON(!f->f_op))) {
error = -ENODEV;
goto cleanup_all;
}
error = security_file_open(f, cred);
if (error)
goto cleanup_all;
error = break_lease(inode, f->f_flags);
if (error)
goto cleanup_all;
if (!open)
open = f->f_op->open;
if (open) {
error = open(inode, f);
if (error)
goto cleanup_all;
}
if ((f->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
i_readcount_inc(inode);
f->f_flags &= ~(O_CREAT | O_EXCL | O_NOCTTY | O_TRUNC);
file_ra_state_init(&f->f_ra, f->f_mapping->host->i_mapping);
return 0;
cleanup_all:
fops_put(f->f_op);
file_sb_list_del(f);
if (f->f_mode & FMODE_WRITE) {
put_write_access(inode);
if (!special_file(inode->i_mode)) {
/*
* We don't consider this a real
* mnt_want/drop_write() pair
* because it all happenend right
* here, so just reset the state.
*/
file_reset_write(f);
__mnt_drop_write(f->f_path.mnt);
}
}
cleanup_file:
path_put(&f->f_path);
f->f_path.mnt = NULL;
f->f_path.dentry = NULL;
f->f_inode = NULL;
return error;
}
/**
* finish_open - finish opening a file
* @file: file pointer
* @dentry: pointer to dentry
* @open: open callback
* @opened: state of open
*
* This can be used to finish opening a file passed to i_op->atomic_open().
*
* If the open callback is set to NULL, then the standard f_op->open()
* filesystem callback is substituted.
*
* NB: the dentry reference is _not_ consumed. If, for example, the dentry is
* the return value of d_splice_alias(), then the caller needs to perform dput()
* on it after finish_open().
*
* On successful return @file is a fully instantiated open file. After this, if
* an error occurs in ->atomic_open(), it needs to clean up with fput().
*
* Returns zero on success or -errno if the open failed.
*/
int finish_open(struct file *file, struct dentry *dentry,
int (*open)(struct inode *, struct file *),
int *opened)
{
int error;
BUG_ON(*opened & FILE_OPENED); /* once it's opened, it's opened */
file->f_path.dentry = dentry;
error = do_dentry_open(file, open, current_cred());
if (!error)
*opened |= FILE_OPENED;
return error;
}
EXPORT_SYMBOL(finish_open);
/**
* finish_no_open - finish ->atomic_open() without opening the file
*
* @file: file pointer
* @dentry: dentry or NULL (as returned from ->lookup())
*
* This can be used to set the result of a successful lookup in ->atomic_open().
*
* NB: unlike finish_open() this function does consume the dentry reference and
* the caller need not dput() it.
*
* Returns "1" which must be the return value of ->atomic_open() after having
* called this function.
*/
int finish_no_open(struct file *file, struct dentry *dentry)
{
file->f_path.dentry = dentry;
return 1;
}
EXPORT_SYMBOL(finish_no_open);
struct file *dentry_open(const struct path *path, int flags,
const struct cred *cred)
{
int error;
struct file *f;
validate_creds(cred);
/* We must always pass in a valid mount pointer. */
BUG_ON(!path->mnt);
f = get_empty_filp();
if (!IS_ERR(f)) {
f->f_flags = flags;
f->f_path = *path;
error = do_dentry_open(f, NULL, cred);
if (!error) {
/* from now on we need fput() to dispose of f */
error = open_check_o_direct(f);
if (error) {
fput(f);
f = ERR_PTR(error);
}
} else {
put_filp(f);
f = ERR_PTR(error);
}
}
return f;
}
EXPORT_SYMBOL(dentry_open);
static inline int build_open_flags(int flags, umode_t mode, struct open_flags *op)
{
int lookup_flags = 0;
int acc_mode;
if (flags & (O_CREAT | __O_TMPFILE))
op->mode = (mode & S_IALLUGO) | S_IFREG;
else
op->mode = 0;
/* Must never be set by userspace */
flags &= ~FMODE_NONOTIFY & ~O_CLOEXEC;
/*
* O_SYNC is implemented as __O_SYNC|O_DSYNC. As many places only
* check for O_DSYNC if the need any syncing at all we enforce it's
* always set instead of having to deal with possibly weird behaviour
* for malicious applications setting only __O_SYNC.
*/
if (flags & __O_SYNC)
flags |= O_DSYNC;
if (flags & __O_TMPFILE) {
if ((flags & O_TMPFILE_MASK) != O_TMPFILE)
return -EINVAL;
acc_mode = MAY_OPEN | ACC_MODE(flags);
if (!(acc_mode & MAY_WRITE))
return -EINVAL;
} else if (flags & O_PATH) {
/*
* If we have O_PATH in the open flag. Then we
* cannot have anything other than the below set of flags
*/
flags &= O_DIRECTORY | O_NOFOLLOW | O_PATH;
acc_mode = 0;
} else {
acc_mode = MAY_OPEN | ACC_MODE(flags);
}
op->open_flag = flags;
/* O_TRUNC implies we need access checks for write permissions */
if (flags & O_TRUNC)
acc_mode |= MAY_WRITE;
/* Allow the LSM permission hook to distinguish append
access from general write access. */
if (flags & O_APPEND)
acc_mode |= MAY_APPEND;
op->acc_mode = acc_mode;
op->intent = flags & O_PATH ? 0 : LOOKUP_OPEN;
if (flags & O_CREAT) {
op->intent |= LOOKUP_CREATE;
if (flags & O_EXCL)
op->intent |= LOOKUP_EXCL;
}
if (flags & O_DIRECTORY)
lookup_flags |= LOOKUP_DIRECTORY;
if (!(flags & O_NOFOLLOW))
lookup_flags |= LOOKUP_FOLLOW;
op->lookup_flags = lookup_flags;
return 0;
}
/**
* file_open_name - open file and return file pointer
*
* @name: struct filename containing path to open
* @flags: open flags as per the open(2) second argument
* @mode: mode for the new file if O_CREAT is set, else ignored
*
* This is the helper to open a file from kernelspace if you really
* have to. But in generally you should not do this, so please move
* along, nothing to see here..
*/
struct file *file_open_name(struct filename *name, int flags, umode_t mode)
{
struct open_flags op;
int err = build_open_flags(flags, mode, &op);
return err ? ERR_PTR(err) : do_filp_open(AT_FDCWD, name, &op);
}
/**
* filp_open - open file and return file pointer
*
* @filename: path to open
* @flags: open flags as per the open(2) second argument
* @mode: mode for the new file if O_CREAT is set, else ignored
*
* This is the helper to open a file from kernelspace if you really
* have to. But in generally you should not do this, so please move
* along, nothing to see here..
*/
struct file *filp_open(const char *filename, int flags, umode_t mode)
{
struct filename name = {.name = filename};
return file_open_name(&name, flags, mode);
}
EXPORT_SYMBOL(filp_open);
struct file *file_open_root(struct dentry *dentry, struct vfsmount *mnt,
const char *filename, int flags)
{
struct open_flags op;
int err = build_open_flags(flags, 0, &op);
if (err)
return ERR_PTR(err);
if (flags & O_CREAT)
return ERR_PTR(-EINVAL);
if (!filename && (flags & O_DIRECTORY))
if (!dentry->d_inode->i_op->lookup)
return ERR_PTR(-ENOTDIR);
return do_file_open_root(dentry, mnt, filename, &op);
}
EXPORT_SYMBOL(file_open_root);
long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
{
struct open_flags op;
int fd = build_open_flags(flags, mode, &op);
struct filename *tmp;
if (fd)
return fd;
tmp = getname(filename);
if (IS_ERR(tmp))
return PTR_ERR(tmp);
fd = get_unused_fd_flags(flags);
if (fd >= 0) {
struct file *f = do_filp_open(dfd, tmp, &op);
if (IS_ERR(f)) {
put_unused_fd(fd);
fd = PTR_ERR(f);
} else {
fsnotify_open(f);
fd_install(fd, f);
}
}
putname(tmp);
return fd;
}
SYSCALL_DEFINE3(open, const char __user *, filename, int, flags, umode_t, mode)
{
if (force_o_largefile())
flags |= O_LARGEFILE;
return do_sys_open(AT_FDCWD, filename, flags, mode);
}
SYSCALL_DEFINE4(openat, int, dfd, const char __user *, filename, int, flags,
umode_t, mode)
{
if (force_o_largefile())
flags |= O_LARGEFILE;
return do_sys_open(dfd, filename, flags, mode);
}
#ifndef __alpha__
/*
* For backward compatibility? Maybe this should be moved
* into arch/i386 instead?
*/
SYSCALL_DEFINE2(creat, const char __user *, pathname, umode_t, mode)
{
return sys_open(pathname, O_CREAT | O_WRONLY | O_TRUNC, mode);
}
#endif
/*
* "id" is the POSIX thread ID. We use the
* files pointer for this..
*/
int filp_close(struct file *filp, fl_owner_t id)
{
int retval = 0;
if (!file_count(filp)) {
printk(KERN_ERR "VFS: Close: file count is 0\n");
return 0;
}
if (filp->f_op->flush)
retval = filp->f_op->flush(filp, id);
if (likely(!(filp->f_mode & FMODE_PATH))) {
dnotify_flush(filp, id);
locks_remove_posix(filp, id);
}
fput(filp);
return retval;
}
EXPORT_SYMBOL(filp_close);
/*
* Careful here! We test whether the file pointer is NULL before
* releasing the fd. This ensures that one clone task can't release
* an fd while another clone is opening it.
*/
SYSCALL_DEFINE1(close, unsigned int, fd)
{
int retval = __close_fd(current->files, fd);
/* can't restart close syscall because file table entry was cleared */
if (unlikely(retval == -ERESTARTSYS ||
retval == -ERESTARTNOINTR ||
retval == -ERESTARTNOHAND ||
retval == -ERESTART_RESTARTBLOCK))
retval = -EINTR;
return retval;
}
EXPORT_SYMBOL(sys_close);
/*
* This routine simulates a hangup on the tty, to arrange that users
* are given clean terminals at login time.
*/
SYSCALL_DEFINE0(vhangup)
{
if (capable(CAP_SYS_TTY_CONFIG)) {
tty_vhangup_self();
return 0;
}
return -EPERM;
}
/*
* Called when an inode is about to be open.
* We use this to disallow opening large files on 32bit systems if
* the caller didn't specify O_LARGEFILE. On 64bit systems we force
* on this flag in sys_open.
*/
int generic_file_open(struct inode * inode, struct file * filp)
{
if (!(filp->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS)
return -EOVERFLOW;
return 0;
}
EXPORT_SYMBOL(generic_file_open);
/*
* This is used by subsystems that don't want seekable
* file descriptors. The function is not supposed to ever fail, the only
* reason it returns an 'int' and not 'void' is so that it can be plugged
* directly into file_operations structure.
*/
int nonseekable_open(struct inode *inode, struct file *filp)
{
filp->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
return 0;
}
EXPORT_SYMBOL(nonseekable_open);
| ./CrossVul/dataset_final_sorted/CWE-17/c/bad_2306_2 |
crossvul-cpp_data_bad_1498_0 | /*
* linux/fs/pipe.c
*
* Copyright (C) 1991, 1992, 1999 Linus Torvalds
*/
#include <linux/mm.h>
#include <linux/file.h>
#include <linux/poll.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/log2.h>
#include <linux/mount.h>
#include <linux/magic.h>
#include <linux/pipe_fs_i.h>
#include <linux/uio.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
#include <linux/audit.h>
#include <linux/syscalls.h>
#include <linux/fcntl.h>
#include <linux/aio.h>
#include <asm/uaccess.h>
#include <asm/ioctls.h>
#include "internal.h"
/*
* The max size that a non-root user is allowed to grow the pipe. Can
* be set by root in /proc/sys/fs/pipe-max-size
*/
unsigned int pipe_max_size = 1048576;
/*
* Minimum pipe size, as required by POSIX
*/
unsigned int pipe_min_size = PAGE_SIZE;
/*
* We use a start+len construction, which provides full use of the
* allocated memory.
* -- Florian Coosmann (FGC)
*
* Reads with count = 0 should always return 0.
* -- Julian Bradfield 1999-06-07.
*
* FIFOs and Pipes now generate SIGIO for both readers and writers.
* -- Jeremy Elson <jelson@circlemud.org> 2001-08-16
*
* pipe_read & write cleanup
* -- Manfred Spraul <manfred@colorfullife.com> 2002-05-09
*/
static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
{
if (pipe->files)
mutex_lock_nested(&pipe->mutex, subclass);
}
void pipe_lock(struct pipe_inode_info *pipe)
{
/*
* pipe_lock() nests non-pipe inode locks (for writing to a file)
*/
pipe_lock_nested(pipe, I_MUTEX_PARENT);
}
EXPORT_SYMBOL(pipe_lock);
void pipe_unlock(struct pipe_inode_info *pipe)
{
if (pipe->files)
mutex_unlock(&pipe->mutex);
}
EXPORT_SYMBOL(pipe_unlock);
static inline void __pipe_lock(struct pipe_inode_info *pipe)
{
mutex_lock_nested(&pipe->mutex, I_MUTEX_PARENT);
}
static inline void __pipe_unlock(struct pipe_inode_info *pipe)
{
mutex_unlock(&pipe->mutex);
}
void pipe_double_lock(struct pipe_inode_info *pipe1,
struct pipe_inode_info *pipe2)
{
BUG_ON(pipe1 == pipe2);
if (pipe1 < pipe2) {
pipe_lock_nested(pipe1, I_MUTEX_PARENT);
pipe_lock_nested(pipe2, I_MUTEX_CHILD);
} else {
pipe_lock_nested(pipe2, I_MUTEX_PARENT);
pipe_lock_nested(pipe1, I_MUTEX_CHILD);
}
}
/* Drop the inode semaphore and wait for a pipe event, atomically */
void pipe_wait(struct pipe_inode_info *pipe)
{
DEFINE_WAIT(wait);
/*
* Pipes are system-local resources, so sleeping on them
* is considered a noninteractive wait:
*/
prepare_to_wait(&pipe->wait, &wait, TASK_INTERRUPTIBLE);
pipe_unlock(pipe);
schedule();
finish_wait(&pipe->wait, &wait);
pipe_lock(pipe);
}
static int
pipe_iov_copy_from_user(void *to, struct iovec *iov, unsigned long len,
int atomic)
{
unsigned long copy;
while (len > 0) {
while (!iov->iov_len)
iov++;
copy = min_t(unsigned long, len, iov->iov_len);
if (atomic) {
if (__copy_from_user_inatomic(to, iov->iov_base, copy))
return -EFAULT;
} else {
if (copy_from_user(to, iov->iov_base, copy))
return -EFAULT;
}
to += copy;
len -= copy;
iov->iov_base += copy;
iov->iov_len -= copy;
}
return 0;
}
static int
pipe_iov_copy_to_user(struct iovec *iov, const void *from, unsigned long len,
int atomic)
{
unsigned long copy;
while (len > 0) {
while (!iov->iov_len)
iov++;
copy = min_t(unsigned long, len, iov->iov_len);
if (atomic) {
if (__copy_to_user_inatomic(iov->iov_base, from, copy))
return -EFAULT;
} else {
if (copy_to_user(iov->iov_base, from, copy))
return -EFAULT;
}
from += copy;
len -= copy;
iov->iov_base += copy;
iov->iov_len -= copy;
}
return 0;
}
/*
* Attempt to pre-fault in the user memory, so we can use atomic copies.
* Returns the number of bytes not faulted in.
*/
static int iov_fault_in_pages_write(struct iovec *iov, unsigned long len)
{
while (!iov->iov_len)
iov++;
while (len > 0) {
unsigned long this_len;
this_len = min_t(unsigned long, len, iov->iov_len);
if (fault_in_pages_writeable(iov->iov_base, this_len))
break;
len -= this_len;
iov++;
}
return len;
}
/*
* Pre-fault in the user memory, so we can use atomic copies.
*/
static void iov_fault_in_pages_read(struct iovec *iov, unsigned long len)
{
while (!iov->iov_len)
iov++;
while (len > 0) {
unsigned long this_len;
this_len = min_t(unsigned long, len, iov->iov_len);
fault_in_pages_readable(iov->iov_base, this_len);
len -= this_len;
iov++;
}
}
static void anon_pipe_buf_release(struct pipe_inode_info *pipe,
struct pipe_buffer *buf)
{
struct page *page = buf->page;
/*
* If nobody else uses this page, and we don't already have a
* temporary page, let's keep track of it as a one-deep
* allocation cache. (Otherwise just release our reference to it)
*/
if (page_count(page) == 1 && !pipe->tmp_page)
pipe->tmp_page = page;
else
page_cache_release(page);
}
/**
* generic_pipe_buf_steal - attempt to take ownership of a &pipe_buffer
* @pipe: the pipe that the buffer belongs to
* @buf: the buffer to attempt to steal
*
* Description:
* This function attempts to steal the &struct page attached to
* @buf. If successful, this function returns 0 and returns with
* the page locked. The caller may then reuse the page for whatever
* he wishes; the typical use is insertion into a different file
* page cache.
*/
int generic_pipe_buf_steal(struct pipe_inode_info *pipe,
struct pipe_buffer *buf)
{
struct page *page = buf->page;
/*
* A reference of one is golden, that means that the owner of this
* page is the only one holding a reference to it. lock the page
* and return OK.
*/
if (page_count(page) == 1) {
lock_page(page);
return 0;
}
return 1;
}
EXPORT_SYMBOL(generic_pipe_buf_steal);
/**
* generic_pipe_buf_get - get a reference to a &struct pipe_buffer
* @pipe: the pipe that the buffer belongs to
* @buf: the buffer to get a reference to
*
* Description:
* This function grabs an extra reference to @buf. It's used in
* in the tee() system call, when we duplicate the buffers in one
* pipe into another.
*/
void generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
{
page_cache_get(buf->page);
}
EXPORT_SYMBOL(generic_pipe_buf_get);
/**
* generic_pipe_buf_confirm - verify contents of the pipe buffer
* @info: the pipe that the buffer belongs to
* @buf: the buffer to confirm
*
* Description:
* This function does nothing, because the generic pipe code uses
* pages that are always good when inserted into the pipe.
*/
int generic_pipe_buf_confirm(struct pipe_inode_info *info,
struct pipe_buffer *buf)
{
return 0;
}
EXPORT_SYMBOL(generic_pipe_buf_confirm);
/**
* generic_pipe_buf_release - put a reference to a &struct pipe_buffer
* @pipe: the pipe that the buffer belongs to
* @buf: the buffer to put a reference to
*
* Description:
* This function releases a reference to @buf.
*/
void generic_pipe_buf_release(struct pipe_inode_info *pipe,
struct pipe_buffer *buf)
{
page_cache_release(buf->page);
}
EXPORT_SYMBOL(generic_pipe_buf_release);
static const struct pipe_buf_operations anon_pipe_buf_ops = {
.can_merge = 1,
.confirm = generic_pipe_buf_confirm,
.release = anon_pipe_buf_release,
.steal = generic_pipe_buf_steal,
.get = generic_pipe_buf_get,
};
static const struct pipe_buf_operations packet_pipe_buf_ops = {
.can_merge = 0,
.confirm = generic_pipe_buf_confirm,
.release = anon_pipe_buf_release,
.steal = generic_pipe_buf_steal,
.get = generic_pipe_buf_get,
};
static ssize_t
pipe_read(struct kiocb *iocb, const struct iovec *_iov,
unsigned long nr_segs, loff_t pos)
{
struct file *filp = iocb->ki_filp;
struct pipe_inode_info *pipe = filp->private_data;
int do_wakeup;
ssize_t ret;
struct iovec *iov = (struct iovec *)_iov;
size_t total_len;
total_len = iov_length(iov, nr_segs);
/* Null read succeeds. */
if (unlikely(total_len == 0))
return 0;
do_wakeup = 0;
ret = 0;
__pipe_lock(pipe);
for (;;) {
int bufs = pipe->nrbufs;
if (bufs) {
int curbuf = pipe->curbuf;
struct pipe_buffer *buf = pipe->bufs + curbuf;
const struct pipe_buf_operations *ops = buf->ops;
void *addr;
size_t chars = buf->len;
int error, atomic;
if (chars > total_len)
chars = total_len;
error = ops->confirm(pipe, buf);
if (error) {
if (!ret)
ret = error;
break;
}
atomic = !iov_fault_in_pages_write(iov, chars);
redo:
if (atomic)
addr = kmap_atomic(buf->page);
else
addr = kmap(buf->page);
error = pipe_iov_copy_to_user(iov, addr + buf->offset, chars, atomic);
if (atomic)
kunmap_atomic(addr);
else
kunmap(buf->page);
if (unlikely(error)) {
/*
* Just retry with the slow path if we failed.
*/
if (atomic) {
atomic = 0;
goto redo;
}
if (!ret)
ret = error;
break;
}
ret += chars;
buf->offset += chars;
buf->len -= chars;
/* Was it a packet buffer? Clean up and exit */
if (buf->flags & PIPE_BUF_FLAG_PACKET) {
total_len = chars;
buf->len = 0;
}
if (!buf->len) {
buf->ops = NULL;
ops->release(pipe, buf);
curbuf = (curbuf + 1) & (pipe->buffers - 1);
pipe->curbuf = curbuf;
pipe->nrbufs = --bufs;
do_wakeup = 1;
}
total_len -= chars;
if (!total_len)
break; /* common path: read succeeded */
}
if (bufs) /* More to do? */
continue;
if (!pipe->writers)
break;
if (!pipe->waiting_writers) {
/* syscall merging: Usually we must not sleep
* if O_NONBLOCK is set, or if we got some data.
* But if a writer sleeps in kernel space, then
* we can wait for that data without violating POSIX.
*/
if (ret)
break;
if (filp->f_flags & O_NONBLOCK) {
ret = -EAGAIN;
break;
}
}
if (signal_pending(current)) {
if (!ret)
ret = -ERESTARTSYS;
break;
}
if (do_wakeup) {
wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT | POLLWRNORM);
kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
}
pipe_wait(pipe);
}
__pipe_unlock(pipe);
/* Signal writers asynchronously that there is more room. */
if (do_wakeup) {
wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT | POLLWRNORM);
kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
}
if (ret > 0)
file_accessed(filp);
return ret;
}
static inline int is_packetized(struct file *file)
{
return (file->f_flags & O_DIRECT) != 0;
}
static ssize_t
pipe_write(struct kiocb *iocb, const struct iovec *_iov,
unsigned long nr_segs, loff_t ppos)
{
struct file *filp = iocb->ki_filp;
struct pipe_inode_info *pipe = filp->private_data;
ssize_t ret;
int do_wakeup;
struct iovec *iov = (struct iovec *)_iov;
size_t total_len;
ssize_t chars;
total_len = iov_length(iov, nr_segs);
/* Null write succeeds. */
if (unlikely(total_len == 0))
return 0;
do_wakeup = 0;
ret = 0;
__pipe_lock(pipe);
if (!pipe->readers) {
send_sig(SIGPIPE, current, 0);
ret = -EPIPE;
goto out;
}
/* We try to merge small writes */
chars = total_len & (PAGE_SIZE-1); /* size of the last buffer */
if (pipe->nrbufs && chars != 0) {
int lastbuf = (pipe->curbuf + pipe->nrbufs - 1) &
(pipe->buffers - 1);
struct pipe_buffer *buf = pipe->bufs + lastbuf;
const struct pipe_buf_operations *ops = buf->ops;
int offset = buf->offset + buf->len;
if (ops->can_merge && offset + chars <= PAGE_SIZE) {
int error, atomic = 1;
void *addr;
error = ops->confirm(pipe, buf);
if (error)
goto out;
iov_fault_in_pages_read(iov, chars);
redo1:
if (atomic)
addr = kmap_atomic(buf->page);
else
addr = kmap(buf->page);
error = pipe_iov_copy_from_user(offset + addr, iov,
chars, atomic);
if (atomic)
kunmap_atomic(addr);
else
kunmap(buf->page);
ret = error;
do_wakeup = 1;
if (error) {
if (atomic) {
atomic = 0;
goto redo1;
}
goto out;
}
buf->len += chars;
total_len -= chars;
ret = chars;
if (!total_len)
goto out;
}
}
for (;;) {
int bufs;
if (!pipe->readers) {
send_sig(SIGPIPE, current, 0);
if (!ret)
ret = -EPIPE;
break;
}
bufs = pipe->nrbufs;
if (bufs < pipe->buffers) {
int newbuf = (pipe->curbuf + bufs) & (pipe->buffers-1);
struct pipe_buffer *buf = pipe->bufs + newbuf;
struct page *page = pipe->tmp_page;
char *src;
int error, atomic = 1;
if (!page) {
page = alloc_page(GFP_HIGHUSER);
if (unlikely(!page)) {
ret = ret ? : -ENOMEM;
break;
}
pipe->tmp_page = page;
}
/* Always wake up, even if the copy fails. Otherwise
* we lock up (O_NONBLOCK-)readers that sleep due to
* syscall merging.
* FIXME! Is this really true?
*/
do_wakeup = 1;
chars = PAGE_SIZE;
if (chars > total_len)
chars = total_len;
iov_fault_in_pages_read(iov, chars);
redo2:
if (atomic)
src = kmap_atomic(page);
else
src = kmap(page);
error = pipe_iov_copy_from_user(src, iov, chars,
atomic);
if (atomic)
kunmap_atomic(src);
else
kunmap(page);
if (unlikely(error)) {
if (atomic) {
atomic = 0;
goto redo2;
}
if (!ret)
ret = error;
break;
}
ret += chars;
/* Insert it into the buffer array */
buf->page = page;
buf->ops = &anon_pipe_buf_ops;
buf->offset = 0;
buf->len = chars;
buf->flags = 0;
if (is_packetized(filp)) {
buf->ops = &packet_pipe_buf_ops;
buf->flags = PIPE_BUF_FLAG_PACKET;
}
pipe->nrbufs = ++bufs;
pipe->tmp_page = NULL;
total_len -= chars;
if (!total_len)
break;
}
if (bufs < pipe->buffers)
continue;
if (filp->f_flags & O_NONBLOCK) {
if (!ret)
ret = -EAGAIN;
break;
}
if (signal_pending(current)) {
if (!ret)
ret = -ERESTARTSYS;
break;
}
if (do_wakeup) {
wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLRDNORM);
kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
do_wakeup = 0;
}
pipe->waiting_writers++;
pipe_wait(pipe);
pipe->waiting_writers--;
}
out:
__pipe_unlock(pipe);
if (do_wakeup) {
wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLRDNORM);
kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
}
if (ret > 0 && sb_start_write_trylock(file_inode(filp)->i_sb)) {
int err = file_update_time(filp);
if (err)
ret = err;
sb_end_write(file_inode(filp)->i_sb);
}
return ret;
}
static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct pipe_inode_info *pipe = filp->private_data;
int count, buf, nrbufs;
switch (cmd) {
case FIONREAD:
__pipe_lock(pipe);
count = 0;
buf = pipe->curbuf;
nrbufs = pipe->nrbufs;
while (--nrbufs >= 0) {
count += pipe->bufs[buf].len;
buf = (buf+1) & (pipe->buffers - 1);
}
__pipe_unlock(pipe);
return put_user(count, (int __user *)arg);
default:
return -ENOIOCTLCMD;
}
}
/* No kernel lock held - fine */
static unsigned int
pipe_poll(struct file *filp, poll_table *wait)
{
unsigned int mask;
struct pipe_inode_info *pipe = filp->private_data;
int nrbufs;
poll_wait(filp, &pipe->wait, wait);
/* Reading only -- no need for acquiring the semaphore. */
nrbufs = pipe->nrbufs;
mask = 0;
if (filp->f_mode & FMODE_READ) {
mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
if (!pipe->writers && filp->f_version != pipe->w_counter)
mask |= POLLHUP;
}
if (filp->f_mode & FMODE_WRITE) {
mask |= (nrbufs < pipe->buffers) ? POLLOUT | POLLWRNORM : 0;
/*
* Most Unices do not set POLLERR for FIFOs but on Linux they
* behave exactly like pipes for poll().
*/
if (!pipe->readers)
mask |= POLLERR;
}
return mask;
}
static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe)
{
int kill = 0;
spin_lock(&inode->i_lock);
if (!--pipe->files) {
inode->i_pipe = NULL;
kill = 1;
}
spin_unlock(&inode->i_lock);
if (kill)
free_pipe_info(pipe);
}
static int
pipe_release(struct inode *inode, struct file *file)
{
struct pipe_inode_info *pipe = file->private_data;
__pipe_lock(pipe);
if (file->f_mode & FMODE_READ)
pipe->readers--;
if (file->f_mode & FMODE_WRITE)
pipe->writers--;
if (pipe->readers || pipe->writers) {
wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
}
__pipe_unlock(pipe);
put_pipe_info(inode, pipe);
return 0;
}
static int
pipe_fasync(int fd, struct file *filp, int on)
{
struct pipe_inode_info *pipe = filp->private_data;
int retval = 0;
__pipe_lock(pipe);
if (filp->f_mode & FMODE_READ)
retval = fasync_helper(fd, filp, on, &pipe->fasync_readers);
if ((filp->f_mode & FMODE_WRITE) && retval >= 0) {
retval = fasync_helper(fd, filp, on, &pipe->fasync_writers);
if (retval < 0 && (filp->f_mode & FMODE_READ))
/* this can happen only if on == T */
fasync_helper(-1, filp, 0, &pipe->fasync_readers);
}
__pipe_unlock(pipe);
return retval;
}
struct pipe_inode_info *alloc_pipe_info(void)
{
struct pipe_inode_info *pipe;
pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL);
if (pipe) {
pipe->bufs = kzalloc(sizeof(struct pipe_buffer) * PIPE_DEF_BUFFERS, GFP_KERNEL);
if (pipe->bufs) {
init_waitqueue_head(&pipe->wait);
pipe->r_counter = pipe->w_counter = 1;
pipe->buffers = PIPE_DEF_BUFFERS;
mutex_init(&pipe->mutex);
return pipe;
}
kfree(pipe);
}
return NULL;
}
void free_pipe_info(struct pipe_inode_info *pipe)
{
int i;
for (i = 0; i < pipe->buffers; i++) {
struct pipe_buffer *buf = pipe->bufs + i;
if (buf->ops)
buf->ops->release(pipe, buf);
}
if (pipe->tmp_page)
__free_page(pipe->tmp_page);
kfree(pipe->bufs);
kfree(pipe);
}
static struct vfsmount *pipe_mnt __read_mostly;
/*
* pipefs_dname() is called from d_path().
*/
static char *pipefs_dname(struct dentry *dentry, char *buffer, int buflen)
{
return dynamic_dname(dentry, buffer, buflen, "pipe:[%lu]",
dentry->d_inode->i_ino);
}
static const struct dentry_operations pipefs_dentry_operations = {
.d_dname = pipefs_dname,
};
static struct inode * get_pipe_inode(void)
{
struct inode *inode = new_inode_pseudo(pipe_mnt->mnt_sb);
struct pipe_inode_info *pipe;
if (!inode)
goto fail_inode;
inode->i_ino = get_next_ino();
pipe = alloc_pipe_info();
if (!pipe)
goto fail_iput;
inode->i_pipe = pipe;
pipe->files = 2;
pipe->readers = pipe->writers = 1;
inode->i_fop = &pipefifo_fops;
/*
* Mark the inode dirty from the very beginning,
* that way it will never be moved to the dirty
* list because "mark_inode_dirty()" will think
* that it already _is_ on the dirty list.
*/
inode->i_state = I_DIRTY;
inode->i_mode = S_IFIFO | S_IRUSR | S_IWUSR;
inode->i_uid = current_fsuid();
inode->i_gid = current_fsgid();
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
return inode;
fail_iput:
iput(inode);
fail_inode:
return NULL;
}
int create_pipe_files(struct file **res, int flags)
{
int err;
struct inode *inode = get_pipe_inode();
struct file *f;
struct path path;
static struct qstr name = { .name = "" };
if (!inode)
return -ENFILE;
err = -ENOMEM;
path.dentry = d_alloc_pseudo(pipe_mnt->mnt_sb, &name);
if (!path.dentry)
goto err_inode;
path.mnt = mntget(pipe_mnt);
d_instantiate(path.dentry, inode);
err = -ENFILE;
f = alloc_file(&path, FMODE_WRITE, &pipefifo_fops);
if (IS_ERR(f))
goto err_dentry;
f->f_flags = O_WRONLY | (flags & (O_NONBLOCK | O_DIRECT));
f->private_data = inode->i_pipe;
res[0] = alloc_file(&path, FMODE_READ, &pipefifo_fops);
if (IS_ERR(res[0]))
goto err_file;
path_get(&path);
res[0]->private_data = inode->i_pipe;
res[0]->f_flags = O_RDONLY | (flags & O_NONBLOCK);
res[1] = f;
return 0;
err_file:
put_filp(f);
err_dentry:
free_pipe_info(inode->i_pipe);
path_put(&path);
return err;
err_inode:
free_pipe_info(inode->i_pipe);
iput(inode);
return err;
}
static int __do_pipe_flags(int *fd, struct file **files, int flags)
{
int error;
int fdw, fdr;
if (flags & ~(O_CLOEXEC | O_NONBLOCK | O_DIRECT))
return -EINVAL;
error = create_pipe_files(files, flags);
if (error)
return error;
error = get_unused_fd_flags(flags);
if (error < 0)
goto err_read_pipe;
fdr = error;
error = get_unused_fd_flags(flags);
if (error < 0)
goto err_fdr;
fdw = error;
audit_fd_pair(fdr, fdw);
fd[0] = fdr;
fd[1] = fdw;
return 0;
err_fdr:
put_unused_fd(fdr);
err_read_pipe:
fput(files[0]);
fput(files[1]);
return error;
}
int do_pipe_flags(int *fd, int flags)
{
struct file *files[2];
int error = __do_pipe_flags(fd, files, flags);
if (!error) {
fd_install(fd[0], files[0]);
fd_install(fd[1], files[1]);
}
return error;
}
/*
* sys_pipe() is the normal C calling standard for creating
* a pipe. It's not the way Unix traditionally does this, though.
*/
SYSCALL_DEFINE2(pipe2, int __user *, fildes, int, flags)
{
struct file *files[2];
int fd[2];
int error;
error = __do_pipe_flags(fd, files, flags);
if (!error) {
if (unlikely(copy_to_user(fildes, fd, sizeof(fd)))) {
fput(files[0]);
fput(files[1]);
put_unused_fd(fd[0]);
put_unused_fd(fd[1]);
error = -EFAULT;
} else {
fd_install(fd[0], files[0]);
fd_install(fd[1], files[1]);
}
}
return error;
}
SYSCALL_DEFINE1(pipe, int __user *, fildes)
{
return sys_pipe2(fildes, 0);
}
static int wait_for_partner(struct pipe_inode_info *pipe, unsigned int *cnt)
{
int cur = *cnt;
while (cur == *cnt) {
pipe_wait(pipe);
if (signal_pending(current))
break;
}
return cur == *cnt ? -ERESTARTSYS : 0;
}
static void wake_up_partner(struct pipe_inode_info *pipe)
{
wake_up_interruptible(&pipe->wait);
}
static int fifo_open(struct inode *inode, struct file *filp)
{
struct pipe_inode_info *pipe;
bool is_pipe = inode->i_sb->s_magic == PIPEFS_MAGIC;
int ret;
filp->f_version = 0;
spin_lock(&inode->i_lock);
if (inode->i_pipe) {
pipe = inode->i_pipe;
pipe->files++;
spin_unlock(&inode->i_lock);
} else {
spin_unlock(&inode->i_lock);
pipe = alloc_pipe_info();
if (!pipe)
return -ENOMEM;
pipe->files = 1;
spin_lock(&inode->i_lock);
if (unlikely(inode->i_pipe)) {
inode->i_pipe->files++;
spin_unlock(&inode->i_lock);
free_pipe_info(pipe);
pipe = inode->i_pipe;
} else {
inode->i_pipe = pipe;
spin_unlock(&inode->i_lock);
}
}
filp->private_data = pipe;
/* OK, we have a pipe and it's pinned down */
__pipe_lock(pipe);
/* We can only do regular read/write on fifos */
filp->f_mode &= (FMODE_READ | FMODE_WRITE);
switch (filp->f_mode) {
case FMODE_READ:
/*
* O_RDONLY
* POSIX.1 says that O_NONBLOCK means return with the FIFO
* opened, even when there is no process writing the FIFO.
*/
pipe->r_counter++;
if (pipe->readers++ == 0)
wake_up_partner(pipe);
if (!is_pipe && !pipe->writers) {
if ((filp->f_flags & O_NONBLOCK)) {
/* suppress POLLHUP until we have
* seen a writer */
filp->f_version = pipe->w_counter;
} else {
if (wait_for_partner(pipe, &pipe->w_counter))
goto err_rd;
}
}
break;
case FMODE_WRITE:
/*
* O_WRONLY
* POSIX.1 says that O_NONBLOCK means return -1 with
* errno=ENXIO when there is no process reading the FIFO.
*/
ret = -ENXIO;
if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers)
goto err;
pipe->w_counter++;
if (!pipe->writers++)
wake_up_partner(pipe);
if (!is_pipe && !pipe->readers) {
if (wait_for_partner(pipe, &pipe->r_counter))
goto err_wr;
}
break;
case FMODE_READ | FMODE_WRITE:
/*
* O_RDWR
* POSIX.1 leaves this case "undefined" when O_NONBLOCK is set.
* This implementation will NEVER block on a O_RDWR open, since
* the process can at least talk to itself.
*/
pipe->readers++;
pipe->writers++;
pipe->r_counter++;
pipe->w_counter++;
if (pipe->readers == 1 || pipe->writers == 1)
wake_up_partner(pipe);
break;
default:
ret = -EINVAL;
goto err;
}
/* Ok! */
__pipe_unlock(pipe);
return 0;
err_rd:
if (!--pipe->readers)
wake_up_interruptible(&pipe->wait);
ret = -ERESTARTSYS;
goto err;
err_wr:
if (!--pipe->writers)
wake_up_interruptible(&pipe->wait);
ret = -ERESTARTSYS;
goto err;
err:
__pipe_unlock(pipe);
put_pipe_info(inode, pipe);
return ret;
}
const struct file_operations pipefifo_fops = {
.open = fifo_open,
.llseek = no_llseek,
.read = do_sync_read,
.aio_read = pipe_read,
.write = do_sync_write,
.aio_write = pipe_write,
.poll = pipe_poll,
.unlocked_ioctl = pipe_ioctl,
.release = pipe_release,
.fasync = pipe_fasync,
};
/*
* Allocate a new array of pipe buffers and copy the info over. Returns the
* pipe size if successful, or return -ERROR on error.
*/
static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long nr_pages)
{
struct pipe_buffer *bufs;
/*
* We can shrink the pipe, if arg >= pipe->nrbufs. Since we don't
* expect a lot of shrink+grow operations, just free and allocate
* again like we would do for growing. If the pipe currently
* contains more buffers than arg, then return busy.
*/
if (nr_pages < pipe->nrbufs)
return -EBUSY;
bufs = kcalloc(nr_pages, sizeof(*bufs), GFP_KERNEL | __GFP_NOWARN);
if (unlikely(!bufs))
return -ENOMEM;
/*
* The pipe array wraps around, so just start the new one at zero
* and adjust the indexes.
*/
if (pipe->nrbufs) {
unsigned int tail;
unsigned int head;
tail = pipe->curbuf + pipe->nrbufs;
if (tail < pipe->buffers)
tail = 0;
else
tail &= (pipe->buffers - 1);
head = pipe->nrbufs - tail;
if (head)
memcpy(bufs, pipe->bufs + pipe->curbuf, head * sizeof(struct pipe_buffer));
if (tail)
memcpy(bufs + head, pipe->bufs, tail * sizeof(struct pipe_buffer));
}
pipe->curbuf = 0;
kfree(pipe->bufs);
pipe->bufs = bufs;
pipe->buffers = nr_pages;
return nr_pages * PAGE_SIZE;
}
/*
* Currently we rely on the pipe array holding a power-of-2 number
* of pages.
*/
static inline unsigned int round_pipe_size(unsigned int size)
{
unsigned long nr_pages;
nr_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
return roundup_pow_of_two(nr_pages) << PAGE_SHIFT;
}
/*
* This should work even if CONFIG_PROC_FS isn't set, as proc_dointvec_minmax
* will return an error.
*/
int pipe_proc_fn(struct ctl_table *table, int write, void __user *buf,
size_t *lenp, loff_t *ppos)
{
int ret;
ret = proc_dointvec_minmax(table, write, buf, lenp, ppos);
if (ret < 0 || !write)
return ret;
pipe_max_size = round_pipe_size(pipe_max_size);
return ret;
}
/*
* After the inode slimming patch, i_pipe/i_bdev/i_cdev share the same
* location, so checking ->i_pipe is not enough to verify that this is a
* pipe.
*/
struct pipe_inode_info *get_pipe_info(struct file *file)
{
return file->f_op == &pipefifo_fops ? file->private_data : NULL;
}
long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct pipe_inode_info *pipe;
long ret;
pipe = get_pipe_info(file);
if (!pipe)
return -EBADF;
__pipe_lock(pipe);
switch (cmd) {
case F_SETPIPE_SZ: {
unsigned int size, nr_pages;
size = round_pipe_size(arg);
nr_pages = size >> PAGE_SHIFT;
ret = -EINVAL;
if (!nr_pages)
goto out;
if (!capable(CAP_SYS_RESOURCE) && size > pipe_max_size) {
ret = -EPERM;
goto out;
}
ret = pipe_set_size(pipe, nr_pages);
break;
}
case F_GETPIPE_SZ:
ret = pipe->buffers * PAGE_SIZE;
break;
default:
ret = -EINVAL;
break;
}
out:
__pipe_unlock(pipe);
return ret;
}
static const struct super_operations pipefs_ops = {
.destroy_inode = free_inode_nonrcu,
.statfs = simple_statfs,
};
/*
* pipefs should _never_ be mounted by userland - too much of security hassle,
* no real gain from having the whole whorehouse mounted. So we don't need
* any operations on the root directory. However, we need a non-trivial
* d_name - pipe: will go nicely and kill the special-casing in procfs.
*/
static struct dentry *pipefs_mount(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data)
{
return mount_pseudo(fs_type, "pipe:", &pipefs_ops,
&pipefs_dentry_operations, PIPEFS_MAGIC);
}
static struct file_system_type pipe_fs_type = {
.name = "pipefs",
.mount = pipefs_mount,
.kill_sb = kill_anon_super,
};
static int __init init_pipe_fs(void)
{
int err = register_filesystem(&pipe_fs_type);
if (!err) {
pipe_mnt = kern_mount(&pipe_fs_type);
if (IS_ERR(pipe_mnt)) {
err = PTR_ERR(pipe_mnt);
unregister_filesystem(&pipe_fs_type);
}
}
return err;
}
fs_initcall(init_pipe_fs);
| ./CrossVul/dataset_final_sorted/CWE-17/c/bad_1498_0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.