idx
int64 | func
string | target
int64 |
|---|---|---|
230,611
|
void mc_luma(const base_context* ctx,
const seq_parameter_set* sps, int mv_x, int mv_y,
int xP,int yP,
int16_t* out, int out_stride,
const pixel_t* ref, int ref_stride,
int nPbW, int nPbH, int bitDepth_L)
{
int xFracL = mv_x & 3;
int yFracL = mv_y & 3;
int xIntOffsL = xP + (mv_x>>2);
int yIntOffsL = yP + (mv_y>>2);
// luma sample interpolation process (8.5.3.2.2.1)
//const int shift1 = sps->BitDepth_Y-8;
//const int shift2 = 6;
const int shift3 = 14 - sps->BitDepth_Y;
int w = sps->pic_width_in_luma_samples;
int h = sps->pic_height_in_luma_samples;
ALIGNED_16(int16_t) mcbuffer[MAX_CU_SIZE * (MAX_CU_SIZE+7)];
if (xFracL==0 && yFracL==0) {
if (xIntOffsL >= 0 && yIntOffsL >= 0 &&
nPbW+xIntOffsL <= w && nPbH+yIntOffsL <= h) {
ctx->acceleration.put_hevc_qpel(out, out_stride,
&ref[yIntOffsL*ref_stride + xIntOffsL],
ref_stride /* sizeof(pixel_t)*/,
nPbW,nPbH, mcbuffer, 0,0, bitDepth_L);
}
else {
for (int y=0;y<nPbH;y++)
for (int x=0;x<nPbW;x++) {
int xA = Clip3(0,w-1,x + xIntOffsL);
int yA = Clip3(0,h-1,y + yIntOffsL);
out[y*out_stride+x] = ref[ xA + yA*ref_stride ] << shift3;
}
}
#ifdef DE265_LOG_TRACE
logtrace(LogMotion,"---MC luma %d %d = direct---\n",xFracL,yFracL);
for (int y=0;y<nPbH;y++) {
for (int x=0;x<nPbW;x++) {
int xA = Clip3(0,w-1,x + xIntOffsL);
int yA = Clip3(0,h-1,y + yIntOffsL);
logtrace(LogMotion,"%02x ", ref[ xA + yA*ref_stride ]);
}
logtrace(LogMotion,"\n");
}
logtrace(LogMotion," -> \n");
for (int y=0;y<nPbH;y++) {
for (int x=0;x<nPbW;x++) {
logtrace(LogMotion,"%02x ",out[y*out_stride+x] >> 6); // 6 will be used when summing predictions
}
logtrace(LogMotion,"\n");
}
#endif
}
else {
int extra_left = extra_before[xFracL];
int extra_right = extra_after [xFracL];
int extra_top = extra_before[yFracL];
int extra_bottom = extra_after [yFracL];
//int nPbW_extra = extra_left + nPbW + extra_right;
//int nPbH_extra = extra_top + nPbH + extra_bottom;
pixel_t padbuf[(MAX_CU_SIZE+16)*(MAX_CU_SIZE+7)];
const pixel_t* src_ptr;
int src_stride;
if (-extra_left + xIntOffsL >= 0 &&
-extra_top + yIntOffsL >= 0 &&
nPbW+extra_right + xIntOffsL < w &&
nPbH+extra_bottom + yIntOffsL < h) {
src_ptr = &ref[xIntOffsL + yIntOffsL*ref_stride];
src_stride = ref_stride;
}
else {
for (int y=-extra_top;y<nPbH+extra_bottom;y++) {
for (int x=-extra_left;x<nPbW+extra_right;x++) {
int xA = Clip3(0,w-1,x + xIntOffsL);
int yA = Clip3(0,h-1,y + yIntOffsL);
padbuf[x+extra_left + (y+extra_top)*(MAX_CU_SIZE+16)] = ref[ xA + yA*ref_stride ];
}
}
src_ptr = &padbuf[extra_top*(MAX_CU_SIZE+16) + extra_left];
src_stride = MAX_CU_SIZE+16;
}
ctx->acceleration.put_hevc_qpel(out, out_stride,
src_ptr, src_stride /* sizeof(pixel_t) */,
nPbW,nPbH, mcbuffer, xFracL,yFracL, bitDepth_L);
logtrace(LogMotion,"---V---\n");
for (int y=0;y<nPbH;y++) {
for (int x=0;x<nPbW;x++) {
logtrace(LogMotion,"%04x ",out[x+y*out_stride]);
}
logtrace(LogMotion,"\n");
}
}
}
| 0
|
242,935
|
void mbedtls_ssl_transform_free( mbedtls_ssl_transform *transform )
{
if( transform == NULL )
return;
#if defined(MBEDTLS_ZLIB_SUPPORT)
deflateEnd( &transform->ctx_deflate );
inflateEnd( &transform->ctx_inflate );
#endif
mbedtls_cipher_free( &transform->cipher_ctx_enc );
mbedtls_cipher_free( &transform->cipher_ctx_dec );
#if defined(MBEDTLS_SSL_SOME_MODES_USE_MAC)
mbedtls_md_free( &transform->md_ctx_enc );
mbedtls_md_free( &transform->md_ctx_dec );
#endif
mbedtls_platform_zeroize( transform, sizeof( mbedtls_ssl_transform ) );
}
| 0
|
317,233
|
static int __inode_security_revalidate(struct inode *inode,
struct dentry *dentry,
bool may_sleep)
{
struct inode_security_struct *isec = selinux_inode(inode);
might_sleep_if(may_sleep);
if (selinux_initialized(&selinux_state) &&
isec->initialized != LABEL_INITIALIZED) {
if (!may_sleep)
return -ECHILD;
/*
* Try reloading the inode security label. This will fail if
* @opt_dentry is NULL and no dentry for this inode can be
* found; in that case, continue using the old label.
*/
inode_doinit_with_dentry(inode, dentry);
}
return 0;
}
| 0
|
361,750
|
int em28xx_tuner_callback(void *ptr, int component, int command, int arg)
{
struct em28xx_i2c_bus *i2c_bus = ptr;
struct em28xx *dev = i2c_bus->dev;
int rc = 0;
if (dev->tuner_type != TUNER_XC2028 && dev->tuner_type != TUNER_XC5000)
return 0;
if (command != XC2028_TUNER_RESET && command != XC5000_TUNER_RESET)
return 0;
rc = em28xx_gpio_set(dev, dev->board.tuner_gpio);
return rc;
}
| 0
|
336,683
|
SPICE_GNUC_VISIBLE void spice_server_set_seamless_migration(SpiceServer *reds, int enable)
{
/* seamless migration is not supported with multiple clients */
reds->seamless_migration_enabled = enable && !reds->allow_multiple_clients;
spice_debug("seamless migration enabled=%d", enable);
}
| 0
|
254,887
|
StringMap<boost::intrusive_ptr<Expression>> DocumentSourceGroup::getIdFields() const {
if (_idFieldNames.empty()) {
invariant(_idExpressions.size() == 1);
return {{"_id", _idExpressions[0]}};
} else {
invariant(_idFieldNames.size() == _idExpressions.size());
StringMap<boost::intrusive_ptr<Expression>> result;
for (std::size_t i = 0; i < _idFieldNames.size(); ++i) {
result["_id." + _idFieldNames[i]] = _idExpressions[i];
}
return result;
}
}
| 0
|
225,007
|
parseServiceFile(const char *serviceFile,
const char *service,
PQconninfoOption *options,
PQExpBuffer errorMessage,
bool *group_found)
{
int result = 0,
linenr = 0,
i;
FILE *f;
char *line;
char buf[1024];
*group_found = false;
f = fopen(serviceFile, "r");
if (f == NULL)
{
appendPQExpBuffer(errorMessage, libpq_gettext("service file \"%s\" not found\n"),
serviceFile);
return 1;
}
while ((line = fgets(buf, sizeof(buf), f)) != NULL)
{
int len;
linenr++;
if (strlen(line) >= sizeof(buf) - 1)
{
appendPQExpBuffer(errorMessage,
libpq_gettext("line %d too long in service file \"%s\"\n"),
linenr,
serviceFile);
result = 2;
goto exit;
}
/* ignore whitespace at end of line, especially the newline */
len = strlen(line);
while (len > 0 && isspace((unsigned char) line[len - 1]))
line[--len] = '\0';
/* ignore leading whitespace too */
while (*line && isspace((unsigned char) line[0]))
line++;
/* ignore comments and empty lines */
if (line[0] == '\0' || line[0] == '#')
continue;
/* Check for right groupname */
if (line[0] == '[')
{
if (*group_found)
{
/* end of desired group reached; return success */
goto exit;
}
if (strncmp(line + 1, service, strlen(service)) == 0 &&
line[strlen(service) + 1] == ']')
*group_found = true;
else
*group_found = false;
}
else
{
if (*group_found)
{
/*
* Finally, we are in the right group and can parse the line
*/
char *key,
*val;
bool found_keyword;
#ifdef USE_LDAP
if (strncmp(line, "ldap", 4) == 0)
{
int rc = ldapServiceLookup(line, options, errorMessage);
/* if rc = 2, go on reading for fallback */
switch (rc)
{
case 0:
goto exit;
case 1:
case 3:
result = 3;
goto exit;
case 2:
continue;
}
}
#endif
key = line;
val = strchr(line, '=');
if (val == NULL)
{
appendPQExpBuffer(errorMessage,
libpq_gettext("syntax error in service file \"%s\", line %d\n"),
serviceFile,
linenr);
result = 3;
goto exit;
}
*val++ = '\0';
if (strcmp(key, "service") == 0)
{
appendPQExpBuffer(errorMessage,
libpq_gettext("nested service specifications not supported in service file \"%s\", line %d\n"),
serviceFile,
linenr);
result = 3;
goto exit;
}
/*
* Set the parameter --- but don't override any previous
* explicit setting.
*/
found_keyword = false;
for (i = 0; options[i].keyword; i++)
{
if (strcmp(options[i].keyword, key) == 0)
{
if (options[i].val == NULL)
options[i].val = strdup(val);
if (!options[i].val)
{
appendPQExpBufferStr(errorMessage,
libpq_gettext("out of memory\n"));
result = 3;
goto exit;
}
found_keyword = true;
break;
}
}
if (!found_keyword)
{
appendPQExpBuffer(errorMessage,
libpq_gettext("syntax error in service file \"%s\", line %d\n"),
serviceFile,
linenr);
result = 3;
goto exit;
}
}
}
}
exit:
fclose(f);
return result;
}
| 0
|
269,941
|
static Image *ReadMATImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
Image *image, *image2=NULL,
*rotated_image;
register Quantum *q;
unsigned int status;
MATHeader MATLAB_HDR;
size_t size;
size_t CellType;
QuantumInfo *quantum_info;
ImageInfo *clone_info;
int i;
ssize_t ldblk;
unsigned char *BImgBuff = NULL;
double MinVal, MaxVal;
unsigned z, z2;
unsigned Frames;
int logging;
int sample_size;
MagickOffsetType filepos=0x80;
BlobInfo *blob;
size_t one;
unsigned int (*ReadBlobXXXLong)(Image *image);
unsigned short (*ReadBlobXXXShort)(Image *image);
void (*ReadBlobDoublesXXX)(Image * image, size_t len, double *data);
void (*ReadBlobFloatsXXX)(Image * image, size_t len, float *data);
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
logging = LogMagickEvent(CoderEvent,GetMagickModule(),"enter");
/*
Open image file.
*/
image = AcquireImage(image_info,exception);
status = OpenBlob(image_info, image, ReadBinaryBlobMode, exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Read MATLAB image.
*/
clone_info=CloneImageInfo(image_info);
if(ReadBlob(image,124,(unsigned char *) &MATLAB_HDR.identific) != 124)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
MATLAB_HDR.Version = ReadBlobLSBShort(image);
if(ReadBlob(image,2,(unsigned char *) &MATLAB_HDR.EndianIndicator) != 2)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (logging) (void)LogMagickEvent(CoderEvent,GetMagickModule()," Endian %c%c",
MATLAB_HDR.EndianIndicator[0],MATLAB_HDR.EndianIndicator[1]);
if (!strncmp(MATLAB_HDR.EndianIndicator, "IM", 2))
{
ReadBlobXXXLong = ReadBlobLSBLong;
ReadBlobXXXShort = ReadBlobLSBShort;
ReadBlobDoublesXXX = ReadBlobDoublesLSB;
ReadBlobFloatsXXX = ReadBlobFloatsLSB;
image->endian = LSBEndian;
}
else if (!strncmp(MATLAB_HDR.EndianIndicator, "MI", 2))
{
ReadBlobXXXLong = ReadBlobMSBLong;
ReadBlobXXXShort = ReadBlobMSBShort;
ReadBlobDoublesXXX = ReadBlobDoublesMSB;
ReadBlobFloatsXXX = ReadBlobFloatsMSB;
image->endian = MSBEndian;
}
else
goto MATLAB_KO; /* unsupported endian */
if (strncmp(MATLAB_HDR.identific, "MATLAB", 6))
MATLAB_KO: ThrowReaderException(CorruptImageError,"ImproperImageHeader");
filepos = TellBlob(image);
while(!EOFBlob(image)) /* object parser loop */
{
Frames = 1;
(void) SeekBlob(image,filepos,SEEK_SET);
/* printf("pos=%X\n",TellBlob(image)); */
MATLAB_HDR.DataType = ReadBlobXXXLong(image);
if(EOFBlob(image)) break;
MATLAB_HDR.ObjectSize = ReadBlobXXXLong(image);
if(EOFBlob(image)) break;
filepos += MATLAB_HDR.ObjectSize + 4 + 4;
image2 = image;
#if defined(MAGICKCORE_ZLIB_DELEGATE)
if(MATLAB_HDR.DataType == miCOMPRESSED)
{
image2 = DecompressBlock(image,MATLAB_HDR.ObjectSize,clone_info,exception);
if(image2==NULL) continue;
MATLAB_HDR.DataType = ReadBlobXXXLong(image2); /* replace compressed object type. */
}
#endif
if(MATLAB_HDR.DataType!=miMATRIX) continue; /* skip another objects. */
MATLAB_HDR.unknown1 = ReadBlobXXXLong(image2);
MATLAB_HDR.unknown2 = ReadBlobXXXLong(image2);
MATLAB_HDR.unknown5 = ReadBlobXXXLong(image2);
MATLAB_HDR.StructureClass = MATLAB_HDR.unknown5 & 0xFF;
MATLAB_HDR.StructureFlag = (MATLAB_HDR.unknown5>>8) & 0xFF;
MATLAB_HDR.unknown3 = ReadBlobXXXLong(image2);
if(image!=image2)
MATLAB_HDR.unknown4 = ReadBlobXXXLong(image2); /* ??? don't understand why ?? */
MATLAB_HDR.unknown4 = ReadBlobXXXLong(image2);
MATLAB_HDR.DimFlag = ReadBlobXXXLong(image2);
MATLAB_HDR.SizeX = ReadBlobXXXLong(image2);
MATLAB_HDR.SizeY = ReadBlobXXXLong(image2);
switch(MATLAB_HDR.DimFlag)
{
case 8: z2=z=1; break; /* 2D matrix*/
case 12: z2=z = ReadBlobXXXLong(image2); /* 3D matrix RGB*/
(void) ReadBlobXXXLong(image2);
if(z!=3) ThrowReaderException(CoderError, "MultidimensionalMatricesAreNotSupported");
break;
case 16: z2=z = ReadBlobXXXLong(image2); /* 4D matrix animation */
if(z!=3 && z!=1)
ThrowReaderException(CoderError, "MultidimensionalMatricesAreNotSupported");
Frames = ReadBlobXXXLong(image2);
break;
default: ThrowReaderException(CoderError, "MultidimensionalMatricesAreNotSupported");
}
MATLAB_HDR.Flag1 = ReadBlobXXXShort(image2);
MATLAB_HDR.NameFlag = ReadBlobXXXShort(image2);
if (logging) (void)LogMagickEvent(CoderEvent,GetMagickModule(),
"MATLAB_HDR.StructureClass %d",MATLAB_HDR.StructureClass);
if (MATLAB_HDR.StructureClass != mxCHAR_CLASS &&
MATLAB_HDR.StructureClass != mxSINGLE_CLASS && /* float + complex float */
MATLAB_HDR.StructureClass != mxDOUBLE_CLASS && /* double + complex double */
MATLAB_HDR.StructureClass != mxINT8_CLASS &&
MATLAB_HDR.StructureClass != mxUINT8_CLASS && /* uint8 + uint8 3D */
MATLAB_HDR.StructureClass != mxINT16_CLASS &&
MATLAB_HDR.StructureClass != mxUINT16_CLASS && /* uint16 + uint16 3D */
MATLAB_HDR.StructureClass != mxINT32_CLASS &&
MATLAB_HDR.StructureClass != mxUINT32_CLASS && /* uint32 + uint32 3D */
MATLAB_HDR.StructureClass != mxINT64_CLASS &&
MATLAB_HDR.StructureClass != mxUINT64_CLASS) /* uint64 + uint64 3D */
ThrowReaderException(CoderError,"UnsupportedCellTypeInTheMatrix");
switch (MATLAB_HDR.NameFlag)
{
case 0:
size = ReadBlobXXXLong(image2); /* Object name string size */
size = 4 * (ssize_t) ((size + 3 + 1) / 4);
(void) SeekBlob(image2, size, SEEK_CUR);
break;
case 1:
case 2:
case 3:
case 4:
(void) ReadBlob(image2, 4, (unsigned char *) &size); /* Object name string */
break;
default:
goto MATLAB_KO;
}
CellType = ReadBlobXXXLong(image2); /* Additional object type */
if (logging)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"MATLAB_HDR.CellType: %.20g",(double) CellType);
(void) ReadBlob(image2, 4, (unsigned char *) &size); /* data size */
NEXT_FRAME:
switch (CellType)
{
case miINT8:
case miUINT8:
sample_size = 8;
if(MATLAB_HDR.StructureFlag & FLAG_LOGICAL)
image->depth = 1;
else
image->depth = 8; /* Byte type cell */
ldblk = (ssize_t) MATLAB_HDR.SizeX;
break;
case miINT16:
case miUINT16:
sample_size = 16;
image->depth = 16; /* Word type cell */
ldblk = (ssize_t) (2 * MATLAB_HDR.SizeX);
break;
case miINT32:
case miUINT32:
sample_size = 32;
image->depth = 32; /* Dword type cell */
ldblk = (ssize_t) (4 * MATLAB_HDR.SizeX);
break;
case miINT64:
case miUINT64:
sample_size = 64;
image->depth = 64; /* Qword type cell */
ldblk = (ssize_t) (8 * MATLAB_HDR.SizeX);
break;
case miSINGLE:
sample_size = 32;
image->depth = 32; /* double type cell */
(void) SetImageOption(clone_info,"quantum:format","floating-point");
if (MATLAB_HDR.StructureFlag & FLAG_COMPLEX)
{ /* complex float type cell */
}
ldblk = (ssize_t) (4 * MATLAB_HDR.SizeX);
break;
case miDOUBLE:
sample_size = 64;
image->depth = 64; /* double type cell */
(void) SetImageOption(clone_info,"quantum:format","floating-point");
DisableMSCWarning(4127)
if (sizeof(double) != 8)
RestoreMSCWarning
ThrowReaderException(CoderError, "IncompatibleSizeOfDouble");
if (MATLAB_HDR.StructureFlag & FLAG_COMPLEX)
{ /* complex double type cell */
}
ldblk = (ssize_t) (8 * MATLAB_HDR.SizeX);
break;
default:
ThrowReaderException(CoderError, "UnsupportedCellTypeInTheMatrix");
}
(void) sample_size;
image->columns = MATLAB_HDR.SizeX;
image->rows = MATLAB_HDR.SizeY;
quantum_info=AcquireQuantumInfo(clone_info,image);
if (quantum_info == (QuantumInfo *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
one=1;
image->colors = one << image->depth;
if (image->columns == 0 || image->rows == 0)
goto MATLAB_KO;
/* Image is gray when no complex flag is set and 2D Matrix */
if ((MATLAB_HDR.DimFlag == 8) &&
((MATLAB_HDR.StructureFlag & FLAG_COMPLEX) == 0))
{
image->type=GrayscaleType;
SetImageColorspace(image,GRAYColorspace,exception);
}
/*
If ping is true, then only set image size and colors without
reading any image data.
*/
if (image_info->ping)
{
size_t temp = image->columns;
image->columns = image->rows;
image->rows = temp;
goto done_reading; /* !!!!!! BAD !!!! */
}
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
/* ----- Load raster data ----- */
BImgBuff = (unsigned char *) AcquireQuantumMemory((size_t) (ldblk),sizeof(double)); /* Ldblk was set in the check phase */
if (BImgBuff == NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
MinVal = 0;
MaxVal = 0;
if (CellType==miDOUBLE || CellType==miSINGLE) /* Find Min and Max Values for floats */
{
CalcMinMax(image2, image_info->endian, MATLAB_HDR.SizeX, MATLAB_HDR.SizeY, CellType, ldblk, BImgBuff, &quantum_info->minimum, &quantum_info->maximum);
}
/* Main loop for reading all scanlines */
if(z==1) z=0; /* read grey scanlines */
/* else read color scanlines */
do
{
for (i = 0; i < (ssize_t) MATLAB_HDR.SizeY; i++)
{
q=GetAuthenticPixels(image,0,MATLAB_HDR.SizeY-i-1,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
if (logging) (void)LogMagickEvent(CoderEvent,GetMagickModule(),
" MAT set image pixels returns unexpected NULL on a row %u.", (unsigned)(MATLAB_HDR.SizeY-i-1));
goto done_reading; /* Skip image rotation, when cannot set image pixels */
}
if(ReadBlob(image2,ldblk,(unsigned char *)BImgBuff) != (ssize_t) ldblk)
{
if (logging) (void)LogMagickEvent(CoderEvent,GetMagickModule(),
" MAT cannot read scanrow %u from a file.", (unsigned)(MATLAB_HDR.SizeY-i-1));
goto ExitLoop;
}
if((CellType==miINT8 || CellType==miUINT8) && (MATLAB_HDR.StructureFlag & FLAG_LOGICAL))
{
FixLogical((unsigned char *)BImgBuff,ldblk);
if(ImportQuantumPixels(image,(CacheView *) NULL,quantum_info,z2qtype[z],BImgBuff,exception) <= 0)
{
ImportQuantumPixelsFailed:
if (logging) (void)LogMagickEvent(CoderEvent,GetMagickModule(),
" MAT failed to ImportQuantumPixels for a row %u", (unsigned)(MATLAB_HDR.SizeY-i-1));
break;
}
}
else
{
if(ImportQuantumPixels(image,(CacheView *) NULL,quantum_info,z2qtype[z],BImgBuff,exception) <= 0)
goto ImportQuantumPixelsFailed;
if (z<=1 && /* fix only during a last pass z==0 || z==1 */
(CellType==miINT8 || CellType==miINT16 || CellType==miINT32 || CellType==miINT64))
FixSignedValues(image,q,MATLAB_HDR.SizeX);
}
if (!SyncAuthenticPixels(image,exception))
{
if (logging) (void)LogMagickEvent(CoderEvent,GetMagickModule(),
" MAT failed to sync image pixels for a row %u", (unsigned)(MATLAB_HDR.SizeY-i-1));
goto ExitLoop;
}
}
} while(z-- >= 2);
quantum_info=DestroyQuantumInfo(quantum_info);
ExitLoop:
/* Read complex part of numbers here */
if (MATLAB_HDR.StructureFlag & FLAG_COMPLEX)
{ /* Find Min and Max Values for complex parts of floats */
CellType = ReadBlobXXXLong(image2); /* Additional object type */
i = ReadBlobXXXLong(image2); /* size of a complex part - toss away*/
if (CellType==miDOUBLE || CellType==miSINGLE)
{
CalcMinMax(image2, image_info->endian, MATLAB_HDR.SizeX, MATLAB_HDR.SizeY, CellType, ldblk, BImgBuff, &MinVal, &MaxVal);
}
if (CellType==miDOUBLE)
for (i = 0; i < (ssize_t) MATLAB_HDR.SizeY; i++)
{
ReadBlobDoublesXXX(image2, ldblk, (double *)BImgBuff);
InsertComplexDoubleRow(image, (double *)BImgBuff, i, MinVal, MaxVal,
exception);
}
if (CellType==miSINGLE)
for (i = 0; i < (ssize_t) MATLAB_HDR.SizeY; i++)
{
ReadBlobFloatsXXX(image2, ldblk, (float *)BImgBuff);
InsertComplexFloatRow(image,(float *)BImgBuff,i,MinVal,MaxVal,
exception);
}
}
/* Image is gray when no complex flag is set and 2D Matrix AGAIN!!! */
if ((MATLAB_HDR.DimFlag == 8) &&
((MATLAB_HDR.StructureFlag & FLAG_COMPLEX) == 0))
image->type=GrayscaleType;
if (image->depth == 1)
image->type=BilevelType;
if(image2==image)
image2 = NULL; /* Remove shadow copy to an image before rotation. */
/* Rotate image. */
rotated_image = RotateImage(image, 90.0, exception);
if (rotated_image != (Image *) NULL)
{
/* Remove page offsets added by RotateImage */
rotated_image->page.x=0;
rotated_image->page.y=0;
blob = rotated_image->blob;
rotated_image->blob = image->blob;
rotated_image->colors = image->colors;
image->blob = blob;
AppendImageToList(&image,rotated_image);
DeleteImageFromList(&image);
}
done_reading:
if(image2!=NULL)
if(image2!=image)
{
DeleteImageFromList(&image2);
if(clone_info)
{
if(clone_info->file)
{
fclose(clone_info->file);
clone_info->file = NULL;
(void) remove_utf8(clone_info->filename);
}
}
}
/* Allocate next image structure. */
AcquireNextImage(image_info,image,exception);
if (image->next == (Image *) NULL) break;
image=SyncNextImageInList(image);
image->columns=image->rows=0;
image->colors=0;
/* row scan buffer is no longer needed */
RelinquishMagickMemory(BImgBuff);
BImgBuff = NULL;
if(--Frames>0)
{
z = z2;
if(image2==NULL) image2 = image;
goto NEXT_FRAME;
}
if ((image2!=NULL) && (image2!=image)) /* Does shadow temporary decompressed image exist? */
{
/* CloseBlob(image2); */
DeleteImageFromList(&image2);
if(clone_info)
{
if(clone_info->file)
{
fclose(clone_info->file);
clone_info->file = NULL;
(void) remove_utf8(clone_info->filename);
}
}
}
}
clone_info=DestroyImageInfo(clone_info);
RelinquishMagickMemory(BImgBuff);
CloseBlob(image);
{
Image *p;
ssize_t scene=0;
/*
Rewind list, removing any empty images while rewinding.
*/
p=image;
image=NULL;
while (p != (Image *) NULL)
{
Image *tmp=p;
if ((p->rows == 0) || (p->columns == 0)) {
p=p->previous;
DeleteImageFromList(&tmp);
} else {
image=p;
p=p->previous;
}
}
/*
Fix scene numbers
*/
for (p=image; p != (Image *) NULL; p=p->next)
p->scene=scene++;
}
if(clone_info != NULL) /* cleanup garbage file from compression */
{
if(clone_info->file)
{
fclose(clone_info->file);
clone_info->file = NULL;
(void) remove_utf8(clone_info->filename);
}
DestroyImageInfo(clone_info);
clone_info = NULL;
}
if (logging) (void)LogMagickEvent(CoderEvent,GetMagickModule(),"return");
if(image==NULL)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
return (image);
}
| 0
|
225,810
|
void gen_sample_entry_box_del(GF_Box *s)
{
GF_SampleEntryBox *ptr = (GF_SampleEntryBox *)s;
if (ptr == NULL) return;
gf_isom_sample_entry_predestroy((GF_SampleEntryBox *)s);
gf_free(ptr);
}
| 0
|
196,885
|
void Compute(OpKernelContext* ctx) override {
const Tensor& input = ctx->input(kInputTensorIndex);
const Tensor& input_min = ctx->input(kInputMinIndex);
const Tensor& input_max = ctx->input(kInputMaxIndex);
const size_t depth = input_max.NumElements();
OP_REQUIRES(
ctx, input_min.dim_size(0) == depth,
errors::InvalidArgument("input_min has incorrect size, expected ",
depth, " was ", input_min.dim_size(0)));
OP_REQUIRES(
ctx, input_max.dim_size(0) == depth,
errors::InvalidArgument("input_max has incorrect size, expected ",
depth, " was ", input_max.dim_size(0)));
const float* input_min_data = input_min.flat<float>().data();
const float* input_max_data = input_max.flat<float>().data();
std::vector<float> ranges(depth);
bool is_non_negative = true;
Eigen::array<int, 2> shuffling({1, 0});
auto input_matrix = input.flat_inner_dims<qint32>();
// TODO: verify performance of not transposing and finding the min max
// directly from input_matrix vs the one presented below of transposing and
// using the transposed matrix as the transposing operation in itself might
// be more costly.
// Note that this operation is a calibration step for quantization and will
// cease to exist in the final inference graph(will exist as a const node).
auto transposed_input = input_matrix.shuffle(shuffling);
// Find the ranges of each channel in parallel.
float out_min_max = std::numeric_limits<float>::min();
#ifdef ENABLE_ONEDNN_OPENMP
#ifdef _MSC_VER
#pragma omp parallel for
#else
#pragma omp parallel for reduction(max : out_min_max)
#endif
#endif // ENABLE_ONEDNN_OPENMP
// TODO: Add eigen parallel_for
for (int64_t i = 0; i < depth; ++i) {
Eigen::Tensor<qint32, 0, Eigen::RowMajor> min =
transposed_input.chip<0>(i).minimum();
Eigen::Tensor<qint32, 0, Eigen::RowMajor> max =
transposed_input.chip<0>(i).maximum();
const int32_t min_per_channel = min();
const int32_t max_per_channel = max();
const int32_t abs_max =
std::max(std::abs(min_per_channel), std::abs(max_per_channel));
float scale =
std::max(std::abs(input_min_data[i]), std::abs(input_max_data[i]));
ranges[i] =
scale * static_cast<float>(abs_max) / static_cast<float>(1L << 31);
if (min_per_channel < 0) is_non_negative = false;
// Thread-local out_min_max.
out_min_max = std::max(out_min_max, ranges[i]);
}
// All local out_min_max gets max-reduced into one global out_min_max at
// the end of the loop by specifying reduction(max:out_min_max) along with
// omp parallel for.
// Fixing max to clip_value_max_ (example 6.0 to support relu6)
if (out_min_max > clip_value_max_) out_min_max = clip_value_max_;
Tensor* output_min = nullptr;
Tensor* output_max = nullptr;
OP_REQUIRES_OK(ctx, ctx->allocate_output(kOutputMinIndex, {}, &output_min));
OP_REQUIRES_OK(ctx, ctx->allocate_output(kOutputMaxIndex, {}, &output_max));
output_min->flat<float>()(0) = is_non_negative ? 0.0f : -out_min_max;
output_max->flat<float>()(0) = out_min_max;
}
| 1
|
424,534
|
static const char* video_command_name(BYTE cmd)
{
switch (cmd)
{
case TSMM_START_PRESENTATION:
return "start";
case TSMM_STOP_PRESENTATION:
return "stop";
default:
return "<unknown>";
}
}
| 0
|
446,091
|
static int hulusb_set_channel(struct ieee802154_hw *hw, u8 page, u8 channel)
{
int rc;
int rssi_base_val;
struct atusb *lp = hw->priv;
if (channel == 0)
rc = atusb_write_subreg(lp, SR_SUB_MODE, 0);
else
rc = atusb_write_subreg(lp, SR_SUB_MODE, 1);
if (rc < 0)
return rc;
if (page == 0) {
rc = atusb_write_subreg(lp, SR_BPSK_QPSK, 0);
rssi_base_val = -100;
} else {
rc = atusb_write_subreg(lp, SR_BPSK_QPSK, 1);
rssi_base_val = -98;
}
if (rc < 0)
return rc;
rc = hulusb_set_cca_ed_level(lp, rssi_base_val);
if (rc < 0)
return rc;
/* This sets the symbol_duration according frequency on the 212.
* TODO move this handling while set channel and page in cfg802154.
* We can do that, this timings are according 802.15.4 standard.
* If we do that in cfg802154, this is a more generic calculation.
*
* This should also protected from ifs_timer. Means cancel timer and
* init with a new value. For now, this is okay.
*/
if (channel == 0) {
if (page == 0) {
/* SUB:0 and BPSK:0 -> BPSK-20 */
lp->hw->phy->symbol_duration = 50;
} else {
/* SUB:1 and BPSK:0 -> BPSK-40 */
lp->hw->phy->symbol_duration = 25;
}
} else {
if (page == 0)
/* SUB:0 and BPSK:1 -> OQPSK-100/200/400 */
lp->hw->phy->symbol_duration = 40;
else
/* SUB:1 and BPSK:1 -> OQPSK-250/500/1000 */
lp->hw->phy->symbol_duration = 16;
}
lp->hw->phy->lifs_period = IEEE802154_LIFS_PERIOD *
lp->hw->phy->symbol_duration;
lp->hw->phy->sifs_period = IEEE802154_SIFS_PERIOD *
lp->hw->phy->symbol_duration;
return atusb_write_subreg(lp, SR_CHANNEL, channel);
}
| 0
|
279,902
|
ex_oldfiles(exarg_T *eap UNUSED)
{
list_T *l = get_vim_var_list(VV_OLDFILES);
listitem_T *li;
int nr = 0;
char_u *fname;
if (l == NULL)
msg(_("No old files"));
else
{
msg_start();
msg_scroll = TRUE;
for (li = l->lv_first; li != NULL && !got_int; li = li->li_next)
{
++nr;
fname = tv_get_string(&li->li_tv);
if (!message_filtered(fname))
{
msg_outnum((long)nr);
msg_puts(": ");
msg_outtrans(fname);
msg_clr_eos();
msg_putchar('\n');
out_flush(); // output one line at a time
ui_breakcheck();
}
}
// Assume "got_int" was set to truncate the listing.
got_int = FALSE;
# ifdef FEAT_BROWSE_CMD
if (cmdmod.cmod_flags & CMOD_BROWSE)
{
quit_more = FALSE;
nr = prompt_for_number(FALSE);
msg_starthere();
if (nr > 0)
{
char_u *p = list_find_str(get_vim_var_list(VV_OLDFILES),
(long)nr);
if (p != NULL)
{
p = expand_env_save(p);
eap->arg = p;
eap->cmdidx = CMD_edit;
cmdmod.cmod_flags &= ~CMOD_BROWSE;
do_exedit(eap, NULL);
vim_free(p);
}
}
}
# endif
}
}
| 0
|
234,243
|
process_debug_info (struct dwarf_section * section,
void *file,
enum dwarf_section_display_enum abbrev_sec,
bool do_loc,
bool do_types)
{
unsigned char *start = section->start;
unsigned char *end = start + section->size;
unsigned char *section_begin;
unsigned int unit;
unsigned int num_units = 0;
/* First scan the section to get the number of comp units.
Length sanity checks are done here. */
for (section_begin = start, num_units = 0; section_begin < end;
num_units ++)
{
dwarf_vma length;
/* Read the first 4 bytes. For a 32-bit DWARF section, this
will be the length. For a 64-bit DWARF section, it'll be
the escape code 0xffffffff followed by an 8 byte length. */
SAFE_BYTE_GET_AND_INC (length, section_begin, 4, end);
if (length == 0xffffffff)
SAFE_BYTE_GET_AND_INC (length, section_begin, 8, end);
else if (length >= 0xfffffff0 && length < 0xffffffff)
{
warn (_("Reserved length value (0x%s) found in section %s\n"),
dwarf_vmatoa ("x", length), section->name);
return false;
}
/* Negative values are illegal, they may even cause infinite
looping. This can happen if we can't accurately apply
relocations to an object file, or if the file is corrupt. */
if (length > (size_t) (end - section_begin))
{
warn (_("Corrupt unit length (got 0x%s expected at most 0x%s) in section %s\n"),
dwarf_vmatoa ("x", length),
dwarf_vmatoa ("x", end - section_begin),
section->name);
return false;
}
section_begin += length;
}
if (num_units == 0)
{
error (_("No comp units in %s section ?\n"), section->name);
return false;
}
if ((do_loc || do_debug_loc || do_debug_ranges || do_debug_info)
&& num_debug_info_entries == 0
&& ! do_types)
{
/* Then allocate an array to hold the information. */
debug_information = (debug_info *) cmalloc (num_units,
sizeof (* debug_information));
if (debug_information == NULL)
{
error (_("Not enough memory for a debug info array of %u entries\n"),
num_units);
alloc_num_debug_info_entries = num_debug_info_entries = 0;
return false;
}
/* PR 17531: file: 92ca3797.
We cannot rely upon the debug_information array being initialised
before it is used. A corrupt file could easily contain references
to a unit for which information has not been made available. So
we ensure that the array is zeroed here. */
memset (debug_information, 0, num_units * sizeof (*debug_information));
alloc_num_debug_info_entries = num_units;
}
if (!do_loc)
{
load_debug_section_with_follow (str, file);
load_debug_section_with_follow (line_str, file);
load_debug_section_with_follow (str_dwo, file);
load_debug_section_with_follow (str_index, file);
load_debug_section_with_follow (str_index_dwo, file);
load_debug_section_with_follow (debug_addr, file);
}
load_debug_section_with_follow (abbrev_sec, file);
load_debug_section_with_follow (loclists, file);
load_debug_section_with_follow (rnglists, file);
if (debug_displays [abbrev_sec].section.start == NULL)
{
warn (_("Unable to locate %s section!\n"),
debug_displays [abbrev_sec].section.uncompressed_name);
return false;
}
if (!do_loc && dwarf_start_die == 0)
introduce (section, false);
free_all_abbrevs ();
free (cu_abbrev_map);
cu_abbrev_map = NULL;
next_free_abbrev_map_entry = 0;
/* In order to be able to resolve DW_FORM_ref_attr forms we need
to load *all* of the abbrevs for all CUs in this .debug_info
section. This does effectively mean that we (partially) read
every CU header twice. */
for (section_begin = start; start < end;)
{
DWARF2_Internal_CompUnit compunit;
unsigned char * hdrptr;
dwarf_vma abbrev_base;
size_t abbrev_size;
dwarf_vma cu_offset;
unsigned int offset_size;
struct cu_tu_set * this_set;
abbrev_list * list;
unsigned char *end_cu;
hdrptr = start;
cu_offset = start - section_begin;
SAFE_BYTE_GET_AND_INC (compunit.cu_length, hdrptr, 4, end);
if (compunit.cu_length == 0xffffffff)
{
SAFE_BYTE_GET_AND_INC (compunit.cu_length, hdrptr, 8, end);
offset_size = 8;
}
else
offset_size = 4;
end_cu = hdrptr + compunit.cu_length;
SAFE_BYTE_GET_AND_INC (compunit.cu_version, hdrptr, 2, end_cu);
this_set = find_cu_tu_set_v2 (cu_offset, do_types);
if (compunit.cu_version < 5)
{
compunit.cu_unit_type = DW_UT_compile;
/* Initialize it due to a false compiler warning. */
compunit.cu_pointer_size = -1;
}
else
{
SAFE_BYTE_GET_AND_INC (compunit.cu_unit_type, hdrptr, 1, end_cu);
do_types = (compunit.cu_unit_type == DW_UT_type);
SAFE_BYTE_GET_AND_INC (compunit.cu_pointer_size, hdrptr, 1, end_cu);
}
SAFE_BYTE_GET_AND_INC (compunit.cu_abbrev_offset, hdrptr, offset_size,
end_cu);
if (compunit.cu_unit_type == DW_UT_split_compile
|| compunit.cu_unit_type == DW_UT_skeleton)
{
uint64_t dwo_id;
SAFE_BYTE_GET_AND_INC (dwo_id, hdrptr, 8, end_cu);
}
if (this_set == NULL)
{
abbrev_base = 0;
abbrev_size = debug_displays [abbrev_sec].section.size;
}
else
{
abbrev_base = this_set->section_offsets [DW_SECT_ABBREV];
abbrev_size = this_set->section_sizes [DW_SECT_ABBREV];
}
list = find_abbrev_list_by_abbrev_offset (abbrev_base,
compunit.cu_abbrev_offset);
if (list == NULL)
{
unsigned char * next;
list = new_abbrev_list (abbrev_base,
compunit.cu_abbrev_offset);
next = process_abbrev_set (&debug_displays[abbrev_sec].section,
abbrev_base, abbrev_size,
compunit.cu_abbrev_offset, list);
list->start_of_next_abbrevs = next;
}
start = end_cu;
record_abbrev_list_for_cu (cu_offset, start - section_begin, list);
}
for (start = section_begin, unit = 0; start < end; unit++)
{
DWARF2_Internal_CompUnit compunit;
unsigned char *hdrptr;
unsigned char *tags;
int level, last_level, saved_level;
dwarf_vma cu_offset;
unsigned int offset_size;
dwarf_vma signature = 0;
dwarf_vma type_offset = 0;
struct cu_tu_set *this_set;
dwarf_vma abbrev_base;
size_t abbrev_size;
abbrev_list * list = NULL;
unsigned char *end_cu;
hdrptr = start;
cu_offset = start - section_begin;
SAFE_BYTE_GET_AND_INC (compunit.cu_length, hdrptr, 4, end);
if (compunit.cu_length == 0xffffffff)
{
SAFE_BYTE_GET_AND_INC (compunit.cu_length, hdrptr, 8, end);
offset_size = 8;
}
else
offset_size = 4;
end_cu = hdrptr + compunit.cu_length;
SAFE_BYTE_GET_AND_INC (compunit.cu_version, hdrptr, 2, end_cu);
this_set = find_cu_tu_set_v2 (cu_offset, do_types);
if (compunit.cu_version < 5)
{
compunit.cu_unit_type = DW_UT_compile;
/* Initialize it due to a false compiler warning. */
compunit.cu_pointer_size = -1;
}
else
{
SAFE_BYTE_GET_AND_INC (compunit.cu_unit_type, hdrptr, 1, end_cu);
do_types = (compunit.cu_unit_type == DW_UT_type);
SAFE_BYTE_GET_AND_INC (compunit.cu_pointer_size, hdrptr, 1, end_cu);
}
SAFE_BYTE_GET_AND_INC (compunit.cu_abbrev_offset, hdrptr, offset_size, end_cu);
if (this_set == NULL)
{
abbrev_base = 0;
abbrev_size = debug_displays [abbrev_sec].section.size;
}
else
{
abbrev_base = this_set->section_offsets [DW_SECT_ABBREV];
abbrev_size = this_set->section_sizes [DW_SECT_ABBREV];
}
if (compunit.cu_version < 5)
SAFE_BYTE_GET_AND_INC (compunit.cu_pointer_size, hdrptr, 1, end_cu);
bool do_dwo_id = false;
uint64_t dwo_id = 0;
if (compunit.cu_unit_type == DW_UT_split_compile
|| compunit.cu_unit_type == DW_UT_skeleton)
{
SAFE_BYTE_GET_AND_INC (dwo_id, hdrptr, 8, end_cu);
do_dwo_id = true;
}
/* PR 17512: file: 001-108546-0.001:0.1. */
if (compunit.cu_pointer_size < 2 || compunit.cu_pointer_size > 8)
{
warn (_("Invalid pointer size (%d) in compunit header, using %d instead\n"),
compunit.cu_pointer_size, offset_size);
compunit.cu_pointer_size = offset_size;
}
if (do_types)
{
SAFE_BYTE_GET_AND_INC (signature, hdrptr, 8, end_cu);
SAFE_BYTE_GET_AND_INC (type_offset, hdrptr, offset_size, end_cu);
}
if (dwarf_start_die >= (size_t) (end_cu - section_begin))
{
start = end_cu;
continue;
}
if ((do_loc || do_debug_loc || do_debug_ranges || do_debug_info)
&& num_debug_info_entries == 0
&& alloc_num_debug_info_entries > unit
&& ! do_types)
{
debug_information [unit].cu_offset = cu_offset;
debug_information [unit].pointer_size
= compunit.cu_pointer_size;
debug_information [unit].offset_size = offset_size;
debug_information [unit].dwarf_version = compunit.cu_version;
debug_information [unit].base_address = 0;
debug_information [unit].addr_base = DEBUG_INFO_UNAVAILABLE;
debug_information [unit].ranges_base = DEBUG_INFO_UNAVAILABLE;
debug_information [unit].loc_offsets = NULL;
debug_information [unit].have_frame_base = NULL;
debug_information [unit].max_loc_offsets = 0;
debug_information [unit].num_loc_offsets = 0;
debug_information [unit].loclists_base = 0;
debug_information [unit].range_lists = NULL;
debug_information [unit].max_range_lists= 0;
debug_information [unit].num_range_lists = 0;
debug_information [unit].rnglists_base = 0;
debug_information [unit].str_offsets_base = 0;
}
if (!do_loc && dwarf_start_die == 0)
{
printf (_(" Compilation Unit @ offset 0x%s:\n"),
dwarf_vmatoa ("x", cu_offset));
printf (_(" Length: 0x%s (%s)\n"),
dwarf_vmatoa ("x", compunit.cu_length),
offset_size == 8 ? "64-bit" : "32-bit");
printf (_(" Version: %d\n"), compunit.cu_version);
if (compunit.cu_version >= 5)
{
const char *name = get_DW_UT_name (compunit.cu_unit_type);
printf (_(" Unit Type: %s (%x)\n"),
name ? name : "???",
compunit.cu_unit_type);
}
printf (_(" Abbrev Offset: 0x%s\n"),
dwarf_vmatoa ("x", compunit.cu_abbrev_offset));
printf (_(" Pointer Size: %d\n"), compunit.cu_pointer_size);
if (do_types)
{
printf (_(" Signature: 0x%s\n"),
dwarf_vmatoa ("x", signature));
printf (_(" Type Offset: 0x%s\n"),
dwarf_vmatoa ("x", type_offset));
}
if (do_dwo_id)
printf (_(" DWO ID: 0x%s\n"), dwarf_vmatoa ("x", dwo_id));
if (this_set != NULL)
{
dwarf_vma *offsets = this_set->section_offsets;
size_t *sizes = this_set->section_sizes;
printf (_(" Section contributions:\n"));
printf (_(" .debug_abbrev.dwo: 0x%s 0x%s\n"),
dwarf_vmatoa ("x", offsets [DW_SECT_ABBREV]),
dwarf_vmatoa ("x", sizes [DW_SECT_ABBREV]));
printf (_(" .debug_line.dwo: 0x%s 0x%s\n"),
dwarf_vmatoa ("x", offsets [DW_SECT_LINE]),
dwarf_vmatoa ("x", sizes [DW_SECT_LINE]));
printf (_(" .debug_loc.dwo: 0x%s 0x%s\n"),
dwarf_vmatoa ("x", offsets [DW_SECT_LOC]),
dwarf_vmatoa ("x", sizes [DW_SECT_LOC]));
printf (_(" .debug_str_offsets.dwo: 0x%s 0x%s\n"),
dwarf_vmatoa ("x", offsets [DW_SECT_STR_OFFSETS]),
dwarf_vmatoa ("x", sizes [DW_SECT_STR_OFFSETS]));
}
}
tags = hdrptr;
start = end_cu;
if (compunit.cu_version < 2 || compunit.cu_version > 5)
{
warn (_("CU at offset %s contains corrupt or "
"unsupported version number: %d.\n"),
dwarf_vmatoa ("x", cu_offset), compunit.cu_version);
continue;
}
if (compunit.cu_unit_type != DW_UT_compile
&& compunit.cu_unit_type != DW_UT_partial
&& compunit.cu_unit_type != DW_UT_type
&& compunit.cu_unit_type != DW_UT_split_compile
&& compunit.cu_unit_type != DW_UT_skeleton)
{
warn (_("CU at offset %s contains corrupt or "
"unsupported unit type: %d.\n"),
dwarf_vmatoa ("x", cu_offset), compunit.cu_unit_type);
continue;
}
/* Process the abbrevs used by this compilation unit. */
list = find_abbrev_list_by_abbrev_offset (abbrev_base,
compunit.cu_abbrev_offset);
if (list == NULL)
{
unsigned char *next;
list = new_abbrev_list (abbrev_base,
compunit.cu_abbrev_offset);
next = process_abbrev_set (&debug_displays[abbrev_sec].section,
abbrev_base, abbrev_size,
compunit.cu_abbrev_offset, list);
list->start_of_next_abbrevs = next;
}
level = 0;
last_level = level;
saved_level = -1;
while (tags < start)
{
unsigned long abbrev_number;
unsigned long die_offset;
abbrev_entry *entry;
abbrev_attr *attr;
int do_printing = 1;
die_offset = tags - section_begin;
READ_ULEB (abbrev_number, tags, start);
/* A null DIE marks the end of a list of siblings or it may also be
a section padding. */
if (abbrev_number == 0)
{
/* Check if it can be a section padding for the last CU. */
if (level == 0 && start == end)
{
unsigned char *chk;
for (chk = tags; chk < start; chk++)
if (*chk != 0)
break;
if (chk == start)
break;
}
if (!do_loc && die_offset >= dwarf_start_die
&& (dwarf_cutoff_level == -1
|| level < dwarf_cutoff_level))
printf (_(" <%d><%lx>: Abbrev Number: 0\n"),
level, die_offset);
--level;
if (level < 0)
{
static unsigned num_bogus_warns = 0;
if (num_bogus_warns < 3)
{
warn (_("Bogus end-of-siblings marker detected at offset %lx in %s section\n"),
die_offset, section->name);
num_bogus_warns ++;
if (num_bogus_warns == 3)
warn (_("Further warnings about bogus end-of-sibling markers suppressed\n"));
}
}
if (dwarf_start_die != 0 && level < saved_level)
return true;
continue;
}
if (!do_loc)
{
if (dwarf_start_die != 0 && die_offset < dwarf_start_die)
do_printing = 0;
else
{
if (dwarf_start_die != 0 && die_offset == dwarf_start_die)
saved_level = level;
do_printing = (dwarf_cutoff_level == -1
|| level < dwarf_cutoff_level);
if (do_printing)
printf (_(" <%d><%lx>: Abbrev Number: %lu"),
level, die_offset, abbrev_number);
else if (dwarf_cutoff_level == -1
|| last_level < dwarf_cutoff_level)
printf (_(" <%d><%lx>: ...\n"), level, die_offset);
last_level = level;
}
}
/* Scan through the abbreviation list until we reach the
correct entry. */
if (list == NULL)
continue;
for (entry = list->first_abbrev; entry != NULL; entry = entry->next)
if (entry->number == abbrev_number)
break;
if (entry == NULL)
{
if (!do_loc && do_printing)
{
printf ("\n");
fflush (stdout);
}
warn (_("DIE at offset 0x%lx refers to abbreviation number %lu which does not exist\n"),
die_offset, abbrev_number);
return false;
}
if (!do_loc && do_printing)
printf (" (%s)\n", get_TAG_name (entry->tag));
switch (entry->tag)
{
default:
need_base_address = 0;
break;
case DW_TAG_compile_unit:
need_base_address = 1;
need_dwo_info = do_loc;
break;
case DW_TAG_entry_point:
case DW_TAG_subprogram:
need_base_address = 0;
/* Assuming that there is no DW_AT_frame_base. */
have_frame_base = 0;
break;
}
debug_info *debug_info_p =
(debug_information && unit < alloc_num_debug_info_entries)
? debug_information + unit : NULL;
assert (!debug_info_p
|| (debug_info_p->num_loc_offsets
== debug_info_p->num_loc_views));
for (attr = entry->first_attr;
attr && attr->attribute;
attr = attr->next)
{
if (! do_loc && do_printing)
/* Show the offset from where the tag was extracted. */
printf (" <%lx>", (unsigned long)(tags - section_begin));
tags = read_and_display_attr (attr->attribute,
attr->form,
attr->implicit_const,
section_begin,
tags,
start,
cu_offset,
compunit.cu_pointer_size,
offset_size,
compunit.cu_version,
debug_info_p,
do_loc || ! do_printing,
section,
this_set,
level);
}
/* If a locview attribute appears before a location one,
make sure we don't associate it with an earlier
loclist. */
if (debug_info_p)
switch (debug_info_p->num_loc_offsets - debug_info_p->num_loc_views)
{
case 1:
debug_info_p->loc_views [debug_info_p->num_loc_views] = vm1;
debug_info_p->num_loc_views++;
assert (debug_info_p->num_loc_views
== debug_info_p->num_loc_offsets);
break;
case 0:
break;
case -1:
warn(_("DIE has locviews without loclist\n"));
debug_info_p->num_loc_views--;
break;
default:
assert (0);
}
if (entry->children)
++level;
}
}
/* Set num_debug_info_entries here so that it can be used to check if
we need to process .debug_loc and .debug_ranges sections. */
if ((do_loc || do_debug_loc || do_debug_ranges || do_debug_info)
&& num_debug_info_entries == 0
&& ! do_types)
{
if (num_units > alloc_num_debug_info_entries)
num_debug_info_entries = alloc_num_debug_info_entries;
else
num_debug_info_entries = num_units;
}
if (!do_loc)
printf ("\n");
return true;
}
| 0
|
242,609
|
void Compute(OpKernelContext* ctx) override {
Buffer* buf = nullptr;
OP_REQUIRES_OK(ctx, GetBuffer(ctx, def(), &buf));
core::ScopedUnref scope(buf);
Buffer::Tuple tuple;
tuple.reserve(ctx->num_inputs());
for (int i = 0; i < ctx->num_inputs(); ++i) {
tuple.push_back(ctx->input(i));
}
OP_REQUIRES_OK(ctx, buf->Put(&tuple));
}
| 0
|
459,400
|
do_tag(
char_u *tag, // tag (pattern) to jump to
int type,
int count,
int forceit, // :ta with !
int verbose) // print "tag not found" message
{
taggy_T *tagstack = curwin->w_tagstack;
int tagstackidx = curwin->w_tagstackidx;
int tagstacklen = curwin->w_tagstacklen;
int cur_match = 0;
int cur_fnum = curbuf->b_fnum;
int oldtagstackidx = tagstackidx;
int prevtagstackidx = tagstackidx;
int prev_num_matches;
int new_tag = FALSE;
int i;
int ic;
int no_regexp = FALSE;
int error_cur_match = 0;
int save_pos = FALSE;
fmark_T saved_fmark;
#ifdef FEAT_CSCOPE
int jumped_to_tag = FALSE;
#endif
int new_num_matches;
char_u **new_matches;
int use_tagstack;
int skip_msg = FALSE;
char_u *buf_ffname = curbuf->b_ffname; // name to use for
// priority computation
int use_tfu = 1;
char_u *tofree = NULL;
// remember the matches for the last used tag
static int num_matches = 0;
static int max_num_matches = 0; // limit used for match search
static char_u **matches = NULL;
static int flags;
#ifdef FEAT_EVAL
if (tfu_in_use)
{
emsg(_(e_cannot_modify_tag_stack_within_tagfunc));
return FALSE;
}
#endif
#ifdef EXITFREE
if (type == DT_FREE)
{
// remove the list of matches
FreeWild(num_matches, matches);
# ifdef FEAT_CSCOPE
cs_free_tags();
# endif
num_matches = 0;
return FALSE;
}
#endif
if (type == DT_HELP)
{
type = DT_TAG;
no_regexp = TRUE;
use_tfu = 0;
}
prev_num_matches = num_matches;
free_string_option(nofile_fname);
nofile_fname = NULL;
CLEAR_POS(&saved_fmark.mark); // shutup gcc 4.0
saved_fmark.fnum = 0;
/*
* Don't add a tag to the tagstack if 'tagstack' has been reset.
*/
if ((!p_tgst && *tag != NUL))
{
use_tagstack = FALSE;
new_tag = TRUE;
#if defined(FEAT_QUICKFIX)
if (g_do_tagpreview != 0)
{
tagstack_clear_entry(&ptag_entry);
if ((ptag_entry.tagname = vim_strsave(tag)) == NULL)
goto end_do_tag;
}
#endif
}
else
{
#if defined(FEAT_QUICKFIX)
if (g_do_tagpreview != 0)
use_tagstack = FALSE;
else
#endif
use_tagstack = TRUE;
// new pattern, add to the tag stack
if (*tag != NUL
&& (type == DT_TAG || type == DT_SELECT || type == DT_JUMP
#ifdef FEAT_QUICKFIX
|| type == DT_LTAG
#endif
#ifdef FEAT_CSCOPE
|| type == DT_CSCOPE
#endif
))
{
#if defined(FEAT_QUICKFIX)
if (g_do_tagpreview != 0)
{
if (ptag_entry.tagname != NULL
&& STRCMP(ptag_entry.tagname, tag) == 0)
{
// Jumping to same tag: keep the current match, so that
// the CursorHold autocommand example works.
cur_match = ptag_entry.cur_match;
cur_fnum = ptag_entry.cur_fnum;
}
else
{
tagstack_clear_entry(&ptag_entry);
if ((ptag_entry.tagname = vim_strsave(tag)) == NULL)
goto end_do_tag;
}
}
else
#endif
{
/*
* If the last used entry is not at the top, delete all tag
* stack entries above it.
*/
while (tagstackidx < tagstacklen)
tagstack_clear_entry(&tagstack[--tagstacklen]);
// if the tagstack is full: remove oldest entry
if (++tagstacklen > TAGSTACKSIZE)
{
tagstacklen = TAGSTACKSIZE;
tagstack_clear_entry(&tagstack[0]);
for (i = 1; i < tagstacklen; ++i)
tagstack[i - 1] = tagstack[i];
--tagstackidx;
}
/*
* put the tag name in the tag stack
*/
if ((tagstack[tagstackidx].tagname = vim_strsave(tag)) == NULL)
{
curwin->w_tagstacklen = tagstacklen - 1;
goto end_do_tag;
}
curwin->w_tagstacklen = tagstacklen;
save_pos = TRUE; // save the cursor position below
}
new_tag = TRUE;
}
else
{
if (
#if defined(FEAT_QUICKFIX)
g_do_tagpreview != 0 ? ptag_entry.tagname == NULL :
#endif
tagstacklen == 0)
{
// empty stack
emsg(_(e_tag_stack_empty));
goto end_do_tag;
}
if (type == DT_POP) // go to older position
{
#ifdef FEAT_FOLDING
int old_KeyTyped = KeyTyped;
#endif
if ((tagstackidx -= count) < 0)
{
emsg(_(e_at_bottom_of_tag_stack));
if (tagstackidx + count == 0)
{
// We did [num]^T from the bottom of the stack
tagstackidx = 0;
goto end_do_tag;
}
// We weren't at the bottom of the stack, so jump all the
// way to the bottom now.
tagstackidx = 0;
}
else if (tagstackidx >= tagstacklen) // count == 0?
{
emsg(_(e_at_top_of_tag_stack));
goto end_do_tag;
}
// Make a copy of the fmark, autocommands may invalidate the
// tagstack before it's used.
saved_fmark = tagstack[tagstackidx].fmark;
if (saved_fmark.fnum != curbuf->b_fnum)
{
/*
* Jump to other file. If this fails (e.g. because the
* file was changed) keep original position in tag stack.
*/
if (buflist_getfile(saved_fmark.fnum, saved_fmark.mark.lnum,
GETF_SETMARK, forceit) == FAIL)
{
tagstackidx = oldtagstackidx; // back to old posn
goto end_do_tag;
}
// An BufReadPost autocommand may jump to the '" mark, but
// we don't what that here.
curwin->w_cursor.lnum = saved_fmark.mark.lnum;
}
else
{
setpcmark();
curwin->w_cursor.lnum = saved_fmark.mark.lnum;
}
curwin->w_cursor.col = saved_fmark.mark.col;
curwin->w_set_curswant = TRUE;
check_cursor();
#ifdef FEAT_FOLDING
if ((fdo_flags & FDO_TAG) && old_KeyTyped)
foldOpenCursor();
#endif
// remove the old list of matches
FreeWild(num_matches, matches);
#ifdef FEAT_CSCOPE
cs_free_tags();
#endif
num_matches = 0;
tag_freematch();
goto end_do_tag;
}
if (type == DT_TAG
#if defined(FEAT_QUICKFIX)
|| type == DT_LTAG
#endif
)
{
#if defined(FEAT_QUICKFIX)
if (g_do_tagpreview != 0)
{
cur_match = ptag_entry.cur_match;
cur_fnum = ptag_entry.cur_fnum;
}
else
#endif
{
// ":tag" (no argument): go to newer pattern
save_pos = TRUE; // save the cursor position below
if ((tagstackidx += count - 1) >= tagstacklen)
{
/*
* Beyond the last one, just give an error message and
* go to the last one. Don't store the cursor
* position.
*/
tagstackidx = tagstacklen - 1;
emsg(_(e_at_top_of_tag_stack));
save_pos = FALSE;
}
else if (tagstackidx < 0) // must have been count == 0
{
emsg(_(e_at_bottom_of_tag_stack));
tagstackidx = 0;
goto end_do_tag;
}
cur_match = tagstack[tagstackidx].cur_match;
cur_fnum = tagstack[tagstackidx].cur_fnum;
}
new_tag = TRUE;
}
else // go to other matching tag
{
// Save index for when selection is cancelled.
prevtagstackidx = tagstackidx;
#if defined(FEAT_QUICKFIX)
if (g_do_tagpreview != 0)
{
cur_match = ptag_entry.cur_match;
cur_fnum = ptag_entry.cur_fnum;
}
else
#endif
{
if (--tagstackidx < 0)
tagstackidx = 0;
cur_match = tagstack[tagstackidx].cur_match;
cur_fnum = tagstack[tagstackidx].cur_fnum;
}
switch (type)
{
case DT_FIRST: cur_match = count - 1; break;
case DT_SELECT:
case DT_JUMP:
#ifdef FEAT_CSCOPE
case DT_CSCOPE:
#endif
case DT_LAST: cur_match = MAXCOL - 1; break;
case DT_NEXT: cur_match += count; break;
case DT_PREV: cur_match -= count; break;
}
if (cur_match >= MAXCOL)
cur_match = MAXCOL - 1;
else if (cur_match < 0)
{
emsg(_(e_cannot_go_before_first_matching_tag));
skip_msg = TRUE;
cur_match = 0;
cur_fnum = curbuf->b_fnum;
}
}
}
#if defined(FEAT_QUICKFIX)
if (g_do_tagpreview != 0)
{
if (type != DT_SELECT && type != DT_JUMP)
{
ptag_entry.cur_match = cur_match;
ptag_entry.cur_fnum = cur_fnum;
}
}
else
#endif
{
/*
* For ":tag [arg]" or ":tselect" remember position before the jump.
*/
saved_fmark = tagstack[tagstackidx].fmark;
if (save_pos)
{
tagstack[tagstackidx].fmark.mark = curwin->w_cursor;
tagstack[tagstackidx].fmark.fnum = curbuf->b_fnum;
}
// Curwin will change in the call to jumpto_tag() if ":stag" was
// used or an autocommand jumps to another window; store value of
// tagstackidx now.
curwin->w_tagstackidx = tagstackidx;
if (type != DT_SELECT && type != DT_JUMP)
{
curwin->w_tagstack[tagstackidx].cur_match = cur_match;
curwin->w_tagstack[tagstackidx].cur_fnum = cur_fnum;
}
}
}
// When not using the current buffer get the name of buffer "cur_fnum".
// Makes sure that the tag order doesn't change when using a remembered
// position for "cur_match".
if (cur_fnum != curbuf->b_fnum)
{
buf_T *buf = buflist_findnr(cur_fnum);
if (buf != NULL)
buf_ffname = buf->b_ffname;
}
/*
* Repeat searching for tags, when a file has not been found.
*/
for (;;)
{
int other_name;
char_u *name;
/*
* When desired match not found yet, try to find it (and others).
*/
if (use_tagstack)
{
// make a copy, the tagstack may change in 'tagfunc'
name = vim_strsave(tagstack[tagstackidx].tagname);
vim_free(tofree);
tofree = name;
}
#if defined(FEAT_QUICKFIX)
else if (g_do_tagpreview != 0)
name = ptag_entry.tagname;
#endif
else
name = tag;
other_name = (tagmatchname == NULL || STRCMP(tagmatchname, name) != 0);
if (new_tag
|| (cur_match >= num_matches && max_num_matches != MAXCOL)
|| other_name)
{
if (other_name)
{
vim_free(tagmatchname);
tagmatchname = vim_strsave(name);
}
if (type == DT_SELECT || type == DT_JUMP
#if defined(FEAT_QUICKFIX)
|| type == DT_LTAG
#endif
)
cur_match = MAXCOL - 1;
if (type == DT_TAG)
max_num_matches = MAXCOL;
else
max_num_matches = cur_match + 1;
// when the argument starts with '/', use it as a regexp
if (!no_regexp && *name == '/')
{
flags = TAG_REGEXP;
++name;
}
else
flags = TAG_NOIC;
#ifdef FEAT_CSCOPE
if (type == DT_CSCOPE)
flags = TAG_CSCOPE;
#endif
if (verbose)
flags |= TAG_VERBOSE;
if (!use_tfu)
flags |= TAG_NO_TAGFUNC;
if (find_tags(name, &new_num_matches, &new_matches, flags,
max_num_matches, buf_ffname) == OK
&& new_num_matches < max_num_matches)
max_num_matches = MAXCOL; // If less than max_num_matches
// found: all matches found.
// A tag function may do anything, which may cause various
// information to become invalid. At least check for the tagstack
// to still be the same.
if (tagstack != curwin->w_tagstack)
{
emsg(_(e_window_unexpectedly_close_while_searching_for_tags));
FreeWild(new_num_matches, new_matches);
break;
}
// If there already were some matches for the same name, move them
// to the start. Avoids that the order changes when using
// ":tnext" and jumping to another file.
if (!new_tag && !other_name)
{
int j, k;
int idx = 0;
tagptrs_T tagp, tagp2;
// Find the position of each old match in the new list. Need
// to use parse_match() to find the tag line.
for (j = 0; j < num_matches; ++j)
{
parse_match(matches[j], &tagp);
for (i = idx; i < new_num_matches; ++i)
{
parse_match(new_matches[i], &tagp2);
if (STRCMP(tagp.tagname, tagp2.tagname) == 0)
{
char_u *p = new_matches[i];
for (k = i; k > idx; --k)
new_matches[k] = new_matches[k - 1];
new_matches[idx++] = p;
break;
}
}
}
}
FreeWild(num_matches, matches);
num_matches = new_num_matches;
matches = new_matches;
}
if (num_matches <= 0)
{
if (verbose)
semsg(_(e_tag_not_found_str), name);
#if defined(FEAT_QUICKFIX)
g_do_tagpreview = 0;
#endif
}
else
{
int ask_for_selection = FALSE;
#ifdef FEAT_CSCOPE
if (type == DT_CSCOPE && num_matches > 1)
{
cs_print_tags();
ask_for_selection = TRUE;
}
else
#endif
if (type == DT_TAG && *tag != NUL)
// If a count is supplied to the ":tag <name>" command, then
// jump to count'th matching tag.
cur_match = count > 0 ? count - 1 : 0;
else if (type == DT_SELECT || (type == DT_JUMP && num_matches > 1))
{
print_tag_list(new_tag, use_tagstack, num_matches, matches);
ask_for_selection = TRUE;
}
#if defined(FEAT_QUICKFIX) && defined(FEAT_EVAL)
else if (type == DT_LTAG)
{
if (add_llist_tags(tag, num_matches, matches) == FAIL)
goto end_do_tag;
cur_match = 0; // Jump to the first tag
}
#endif
if (ask_for_selection == TRUE)
{
/*
* Ask to select a tag from the list.
*/
i = prompt_for_number(NULL);
if (i <= 0 || i > num_matches || got_int)
{
// no valid choice: don't change anything
if (use_tagstack)
{
tagstack[tagstackidx].fmark = saved_fmark;
tagstackidx = prevtagstackidx;
}
#ifdef FEAT_CSCOPE
cs_free_tags();
jumped_to_tag = TRUE;
#endif
break;
}
cur_match = i - 1;
}
if (cur_match >= num_matches)
{
// Avoid giving this error when a file wasn't found and we're
// looking for a match in another file, which wasn't found.
// There will be an emsg("file doesn't exist") below then.
if ((type == DT_NEXT || type == DT_FIRST)
&& nofile_fname == NULL)
{
if (num_matches == 1)
emsg(_(e_there_is_only_one_matching_tag));
else
emsg(_(e_cannot_go_beyond_last_matching_tag));
skip_msg = TRUE;
}
cur_match = num_matches - 1;
}
if (use_tagstack)
{
tagptrs_T tagp;
tagstack[tagstackidx].cur_match = cur_match;
tagstack[tagstackidx].cur_fnum = cur_fnum;
// store user-provided data originating from tagfunc
if (use_tfu && parse_match(matches[cur_match], &tagp) == OK
&& tagp.user_data)
{
VIM_CLEAR(tagstack[tagstackidx].user_data);
tagstack[tagstackidx].user_data = vim_strnsave(
tagp.user_data, tagp.user_data_end - tagp.user_data);
}
++tagstackidx;
}
#if defined(FEAT_QUICKFIX)
else if (g_do_tagpreview != 0)
{
ptag_entry.cur_match = cur_match;
ptag_entry.cur_fnum = cur_fnum;
}
#endif
/*
* Only when going to try the next match, report that the previous
* file didn't exist. Otherwise an emsg() is given below.
*/
if (nofile_fname != NULL && error_cur_match != cur_match)
smsg(_("File \"%s\" does not exist"), nofile_fname);
ic = (matches[cur_match][0] & MT_IC_OFF);
if (type != DT_TAG && type != DT_SELECT && type != DT_JUMP
#ifdef FEAT_CSCOPE
&& type != DT_CSCOPE
#endif
&& (num_matches > 1 || ic)
&& !skip_msg)
{
// Give an indication of the number of matching tags
sprintf((char *)IObuff, _("tag %d of %d%s"),
cur_match + 1,
num_matches,
max_num_matches != MAXCOL ? _(" or more") : "");
if (ic)
STRCAT(IObuff, _(" Using tag with different case!"));
if ((num_matches > prev_num_matches || new_tag)
&& num_matches > 1)
{
if (ic)
msg_attr((char *)IObuff, HL_ATTR(HLF_W));
else
msg((char *)IObuff);
msg_scroll = TRUE; // don't overwrite this message
}
else
give_warning(IObuff, ic);
if (ic && !msg_scrolled && msg_silent == 0)
{
out_flush();
ui_delay(1007L, TRUE);
}
}
#if defined(FEAT_EVAL)
// Let the SwapExists event know what tag we are jumping to.
vim_snprintf((char *)IObuff, IOSIZE, ":ta %s\r", name);
set_vim_var_string(VV_SWAPCOMMAND, IObuff, -1);
#endif
/*
* Jump to the desired match.
*/
i = jumpto_tag(matches[cur_match], forceit, type != DT_CSCOPE);
#if defined(FEAT_EVAL)
set_vim_var_string(VV_SWAPCOMMAND, NULL, -1);
#endif
if (i == NOTAGFILE)
{
// File not found: try again with another matching tag
if ((type == DT_PREV && cur_match > 0)
|| ((type == DT_TAG || type == DT_NEXT
|| type == DT_FIRST)
&& (max_num_matches != MAXCOL
|| cur_match < num_matches - 1)))
{
error_cur_match = cur_match;
if (use_tagstack)
--tagstackidx;
if (type == DT_PREV)
--cur_match;
else
{
type = DT_NEXT;
++cur_match;
}
continue;
}
semsg(_(e_file_str_does_not_exist), nofile_fname);
}
else
{
// We may have jumped to another window, check that
// tagstackidx is still valid.
if (use_tagstack && tagstackidx > curwin->w_tagstacklen)
tagstackidx = curwin->w_tagstackidx;
#ifdef FEAT_CSCOPE
jumped_to_tag = TRUE;
#endif
}
}
break;
}
end_do_tag:
// Only store the new index when using the tagstack and it's valid.
if (use_tagstack && tagstackidx <= curwin->w_tagstacklen)
curwin->w_tagstackidx = tagstackidx;
postponed_split = 0; // don't split next time
# ifdef FEAT_QUICKFIX
g_do_tagpreview = 0; // don't do tag preview next time
# endif
vim_free(tofree);
#ifdef FEAT_CSCOPE
return jumped_to_tag;
#else
return FALSE;
#endif
}
| 0
|
437,712
|
static inline u16 ns_to_lpf_count(unsigned int ns)
{
return count_to_lpf_count(
DIV_ROUND_CLOSEST(CX23888_IR_REFCLK_FREQ / 1000000 * ns, 1000));
}
| 0
|
312,493
|
win_set_loclist(win_T *wp, qf_info_T *qi)
{
wp->w_llist = qi;
qi->qf_refcount++;
}
| 0
|
317,098
|
static int selinux_task_setpgid(struct task_struct *p, pid_t pgid)
{
return avc_has_perm(&selinux_state,
current_sid(), task_sid_obj(p), SECCLASS_PROCESS,
PROCESS__SETPGID, NULL);
}
| 0
|
261,925
|
njs_string_split_part_add(njs_vm_t *vm, njs_array_t *array, njs_utf8_t utf8,
const u_char *start, size_t size)
{
ssize_t length;
length = njs_string_calc_length(utf8, start, size);
return njs_array_string_add(vm, array, start, size, length);
}
| 0
|
309,873
|
default_fg(NCURSES_SP_DCL0)
{
return (SP_PARM != 0) ? SP_PARM->_default_fg : COLOR_WHITE;
}
| 0
|
421,372
|
static void pvar(int d, js_Ast *var)
{
assert(var->type == EXP_VAR);
pexp(d, var->a);
if (var->b) {
sp(); pc('='); sp();
pexp(d, var->b);
}
}
| 0
|
221,477
|
add_dconf_key_to_keyfile (GKeyFile *keyfile,
DConfClient *client,
const char *key,
DConfReadFlags flags)
{
g_autofree char *group = g_path_get_dirname (key);
g_autofree char *k = g_path_get_basename (key);
GVariant *value = dconf_client_read_full (client, key, flags, NULL);
if (value)
{
g_autofree char *val = g_variant_print (value, TRUE);
g_key_file_set_value (keyfile, group + 1, k, val);
}
}
| 0
|
242,965
|
int mbedtls_ssl_get_record_expansion( const mbedtls_ssl_context *ssl )
{
size_t transform_expansion = 0;
const mbedtls_ssl_transform *transform = ssl->transform_out;
unsigned block_size;
size_t out_hdr_len = mbedtls_ssl_out_hdr_len( ssl );
if( transform == NULL )
return( (int) out_hdr_len );
#if defined(MBEDTLS_ZLIB_SUPPORT)
if( ssl->session_out->compression != MBEDTLS_SSL_COMPRESS_NULL )
return( MBEDTLS_ERR_SSL_FEATURE_UNAVAILABLE );
#endif
switch( mbedtls_cipher_get_cipher_mode( &transform->cipher_ctx_enc ) )
{
case MBEDTLS_MODE_GCM:
case MBEDTLS_MODE_CCM:
case MBEDTLS_MODE_CHACHAPOLY:
case MBEDTLS_MODE_STREAM:
transform_expansion = transform->minlen;
break;
case MBEDTLS_MODE_CBC:
block_size = mbedtls_cipher_get_block_size(
&transform->cipher_ctx_enc );
/* Expansion due to the addition of the MAC. */
transform_expansion += transform->maclen;
/* Expansion due to the addition of CBC padding;
* Theoretically up to 256 bytes, but we never use
* more than the block size of the underlying cipher. */
transform_expansion += block_size;
/* For TLS 1.1 or higher, an explicit IV is added
* after the record header. */
#if defined(MBEDTLS_SSL_PROTO_TLS1_1) || defined(MBEDTLS_SSL_PROTO_TLS1_2)
if( ssl->minor_ver >= MBEDTLS_SSL_MINOR_VERSION_2 )
transform_expansion += block_size;
#endif /* MBEDTLS_SSL_PROTO_TLS1_1 || MBEDTLS_SSL_PROTO_TLS1_2 */
break;
default:
MBEDTLS_SSL_DEBUG_MSG( 1, ( "should never happen" ) );
return( MBEDTLS_ERR_SSL_INTERNAL_ERROR );
}
#if defined(MBEDTLS_SSL_DTLS_CONNECTION_ID)
if( transform->out_cid_len != 0 )
transform_expansion += MBEDTLS_SSL_MAX_CID_EXPANSION;
#endif /* MBEDTLS_SSL_DTLS_CONNECTION_ID */
return( (int)( out_hdr_len + transform_expansion ) );
}
| 0
|
224,168
|
std::size_t operator()(const Tensor& key) const {
return std::hash<int64_t>{}(key.scalar<int64_t>()());
}
| 0
|
443,707
|
utf16be_mbc_case_fold(OnigCaseFoldType flag,
const UChar** pp, const UChar* end, UChar* fold)
{
const UChar* p = *pp;
if (ONIGENC_IS_ASCII_CODE(*(p+1)) && *p == 0) {
p++;
#ifdef USE_UNICODE_CASE_FOLD_TURKISH_AZERI
if ((flag & ONIGENC_CASE_FOLD_TURKISH_AZERI) != 0) {
if (*p == 0x49) {
*fold++ = 0x01;
*fold = 0x31;
(*pp) += 2;
return 2;
}
}
#endif
*fold++ = 0;
*fold = ONIGENC_ASCII_CODE_TO_LOWER_CASE(*p);
*pp += 2;
return 2;
}
else
return onigenc_unicode_mbc_case_fold(ONIG_ENCODING_UTF16_BE, flag,
pp, end, fold);
}
| 0
|
437,384
|
recursive_call_check(Node* node)
{
int r;
switch (NODE_TYPE(node)) {
case NODE_LIST:
case NODE_ALT:
r = 0;
do {
r |= recursive_call_check(NODE_CAR(node));
} while (IS_NOT_NULL(node = NODE_CDR(node)));
break;
case NODE_ANCHOR:
if (! ANCHOR_HAS_BODY(ANCHOR_(node))) {
r = 0;
break;
}
/* fall */
case NODE_QUANT:
r = recursive_call_check(NODE_BODY(node));
break;
case NODE_CALL:
r = recursive_call_check(NODE_BODY(node));
if (r != 0) {
if (NODE_IS_MARK1(NODE_BODY(node)))
NODE_STATUS_ADD(node, RECURSION);
}
break;
case NODE_ENCLOSURE:
{
EnclosureNode* en = ENCLOSURE_(node);
if (en->type == ENCLOSURE_MEMORY) {
if (NODE_IS_MARK2(node))
return 0;
else if (NODE_IS_MARK1(node))
return 1; /* recursion */
else {
NODE_STATUS_ADD(node, MARK2);
r = recursive_call_check(NODE_BODY(node));
NODE_STATUS_REMOVE(node, MARK2);
}
}
else if (en->type == ENCLOSURE_IF_ELSE) {
r = 0;
if (IS_NOT_NULL(en->te.Then)) {
r |= recursive_call_check(en->te.Then);
}
if (IS_NOT_NULL(en->te.Else)) {
r |= recursive_call_check(en->te.Else);
}
r |= recursive_call_check(NODE_BODY(node));
}
else {
r = recursive_call_check(NODE_BODY(node));
}
}
break;
default:
r = 0;
break;
}
return r;
}
| 0
|
445,889
|
archive_extraction_ready_for_convertion_cb (GObject *source_object,
GAsyncResult *result,
gpointer user_data)
{
ConvertData *cdata = user_data;
FrWindow *window = cdata->window;
GList *list;
GError *error = NULL;
if (! fr_archive_operation_finish (FR_ARCHIVE (source_object), result, &error)) {
_convertion_completed_with_error (window, FR_ACTION_EXTRACTING_FILES, error);
return;
}
list = g_list_prepend (NULL, cdata->temp_extraction_dir);
fr_archive_add_files (cdata->new_archive,
list,
cdata->temp_extraction_dir,
NULL,
FALSE,
FALSE,
cdata->password,
cdata->encrypt_header,
window->priv->compression,
cdata->volume_size,
window->priv->cancellable,
archive_add_ready_for_conversion_cb,
cdata);
g_list_free (list);
}
| 0
|
198,239
|
static int check_passwd(unsigned char *passwd, size_t length)
{
struct digest *d = NULL;
unsigned char *passwd1_sum;
unsigned char *passwd2_sum;
int ret = 0;
int hash_len;
if (IS_ENABLED(CONFIG_PASSWD_CRYPTO_PBKDF2)) {
hash_len = PBKDF2_LENGTH;
} else {
d = digest_alloc(PASSWD_SUM);
if (!d) {
pr_err("No such digest: %s\n",
PASSWD_SUM ? PASSWD_SUM : "NULL");
return -ENOENT;
}
hash_len = digest_length(d);
}
passwd1_sum = calloc(hash_len * 2, sizeof(unsigned char));
if (!passwd1_sum)
return -ENOMEM;
passwd2_sum = passwd1_sum + hash_len;
if (is_passwd_env_enable())
ret = read_env_passwd(passwd2_sum, hash_len);
else if (is_passwd_default_enable())
ret = read_default_passwd(passwd2_sum, hash_len);
else
ret = -EINVAL;
if (ret < 0)
goto err;
if (IS_ENABLED(CONFIG_PASSWD_CRYPTO_PBKDF2)) {
char *key = passwd2_sum + PBKDF2_SALT_LEN;
char *salt = passwd2_sum;
int keylen = PBKDF2_LENGTH - PBKDF2_SALT_LEN;
ret = pkcs5_pbkdf2_hmac_sha1(passwd, length, salt,
PBKDF2_SALT_LEN, PBKDF2_COUNT, keylen, passwd1_sum);
if (ret)
goto err;
if (strncmp(passwd1_sum, key, keylen) == 0)
ret = 1;
} else {
ret = digest_digest(d, passwd, length, passwd1_sum);
if (ret)
goto err;
if (strncmp(passwd1_sum, passwd2_sum, hash_len) == 0)
ret = 1;
}
err:
free(passwd1_sum);
digest_free(d);
return ret;
}
| 1
|
317,027
|
static int selinux_socket_unix_may_send(struct socket *sock,
struct socket *other)
{
struct sk_security_struct *ssec = sock->sk->sk_security;
struct sk_security_struct *osec = other->sk->sk_security;
struct common_audit_data ad;
struct lsm_network_audit net = {0,};
ad.type = LSM_AUDIT_DATA_NET;
ad.u.net = &net;
ad.u.net->sk = other->sk;
return avc_has_perm(&selinux_state,
ssec->sid, osec->sid, osec->sclass, SOCKET__SENDTO,
&ad);
}
| 0
|
352,942
|
utcTimeValidate(
Syntax *syntax,
struct berval *in )
{
int parts[9];
return check_time_syntax(in, 1, parts, NULL);
}
| 0
|
234,731
|
int btrfs_rm_device(struct btrfs_fs_info *fs_info, const char *device_path,
u64 devid)
{
struct btrfs_device *device;
struct btrfs_fs_devices *cur_devices;
struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
u64 num_devices;
int ret = 0;
mutex_lock(&uuid_mutex);
num_devices = btrfs_num_devices(fs_info);
ret = btrfs_check_raid_min_devices(fs_info, num_devices - 1);
if (ret)
goto out;
device = btrfs_find_device_by_devspec(fs_info, devid, device_path);
if (IS_ERR(device)) {
if (PTR_ERR(device) == -ENOENT &&
device_path && strcmp(device_path, "missing") == 0)
ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND;
else
ret = PTR_ERR(device);
goto out;
}
if (btrfs_pinned_by_swapfile(fs_info, device)) {
btrfs_warn_in_rcu(fs_info,
"cannot remove device %s (devid %llu) due to active swapfile",
rcu_str_deref(device->name), device->devid);
ret = -ETXTBSY;
goto out;
}
if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
ret = BTRFS_ERROR_DEV_TGT_REPLACE;
goto out;
}
if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
fs_info->fs_devices->rw_devices == 1) {
ret = BTRFS_ERROR_DEV_ONLY_WRITABLE;
goto out;
}
if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
mutex_lock(&fs_info->chunk_mutex);
list_del_init(&device->dev_alloc_list);
device->fs_devices->rw_devices--;
mutex_unlock(&fs_info->chunk_mutex);
}
mutex_unlock(&uuid_mutex);
ret = btrfs_shrink_device(device, 0);
if (!ret)
btrfs_reada_remove_dev(device);
mutex_lock(&uuid_mutex);
if (ret)
goto error_undo;
/*
* TODO: the superblock still includes this device in its num_devices
* counter although write_all_supers() is not locked out. This
* could give a filesystem state which requires a degraded mount.
*/
ret = btrfs_rm_dev_item(device);
if (ret)
goto error_undo;
clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
btrfs_scrub_cancel_dev(device);
/*
* the device list mutex makes sure that we don't change
* the device list while someone else is writing out all
* the device supers. Whoever is writing all supers, should
* lock the device list mutex before getting the number of
* devices in the super block (super_copy). Conversely,
* whoever updates the number of devices in the super block
* (super_copy) should hold the device list mutex.
*/
/*
* In normal cases the cur_devices == fs_devices. But in case
* of deleting a seed device, the cur_devices should point to
* its own fs_devices listed under the fs_devices->seed.
*/
cur_devices = device->fs_devices;
mutex_lock(&fs_devices->device_list_mutex);
list_del_rcu(&device->dev_list);
cur_devices->num_devices--;
cur_devices->total_devices--;
/* Update total_devices of the parent fs_devices if it's seed */
if (cur_devices != fs_devices)
fs_devices->total_devices--;
if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
cur_devices->missing_devices--;
btrfs_assign_next_active_device(device, NULL);
if (device->bdev) {
cur_devices->open_devices--;
/* remove sysfs entry */
btrfs_sysfs_remove_device(device);
}
num_devices = btrfs_super_num_devices(fs_info->super_copy) - 1;
btrfs_set_super_num_devices(fs_info->super_copy, num_devices);
mutex_unlock(&fs_devices->device_list_mutex);
/*
* at this point, the device is zero sized and detached from
* the devices list. All that's left is to zero out the old
* supers and free the device.
*/
if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
btrfs_scratch_superblocks(fs_info, device->bdev,
device->name->str);
btrfs_close_bdev(device);
synchronize_rcu();
btrfs_free_device(device);
if (cur_devices->open_devices == 0) {
list_del_init(&cur_devices->seed_list);
close_fs_devices(cur_devices);
free_fs_devices(cur_devices);
}
out:
mutex_unlock(&uuid_mutex);
return ret;
error_undo:
btrfs_reada_undo_remove_dev(device);
if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
mutex_lock(&fs_info->chunk_mutex);
list_add(&device->dev_alloc_list,
&fs_devices->alloc_list);
device->fs_devices->rw_devices++;
mutex_unlock(&fs_info->chunk_mutex);
}
goto out;
}
| 0
|
310,138
|
_nc_mouse_resume(SCREEN *sp)
/* re-connect to mouse -- called by doupdate() after shellout */
{
TR(MY_TRACE, ("_nc_mouse_resume() called"));
switch (sp->_mouse_type) {
case M_XTERM:
/* xterm: re-enable reporting */
if (sp->_mouse_mask)
mouse_activate(sp, TRUE);
break;
#if USE_GPM_SUPPORT
case M_GPM:
/* GPM: reclaim our event set */
if (sp->_mouse_mask)
mouse_activate(sp, TRUE);
break;
#endif
#if USE_SYSMOUSE
case M_SYSMOUSE:
mouse_activate(sp, TRUE);
break;
#endif
#ifdef USE_TERM_DRIVER
case M_TERM_DRIVER:
mouse_activate(sp, TRUE);
break;
#endif
case M_NONE:
break;
}
}
| 0
|
445,957
|
fr_archive_libarchive_init (FrArchiveLibarchive *self)
{
FrArchive *base = FR_ARCHIVE (self);
self->priv = G_TYPE_INSTANCE_GET_PRIVATE (self, FR_TYPE_ARCHIVE_LIBARCHIVE, FrArchiveLibarchivePrivate);
base->propAddCanReplace = TRUE;
base->propAddCanUpdate = TRUE;
base->propAddCanStoreFolders = TRUE;
base->propAddCanStoreLinks = TRUE;
base->propExtractCanAvoidOverwrite = TRUE;
base->propExtractCanSkipOlder = TRUE;
base->propExtractCanJunkPaths = TRUE;
base->propCanExtractAll = TRUE;
base->propCanDeleteNonEmptyFolders = TRUE;
base->propCanExtractNonEmptyFolders = TRUE;
}
| 0
|
225,934
|
void payt_box_del(GF_Box *s)
{
GF_PAYTBox *payt = (GF_PAYTBox *)s;
if (payt->payloadString) gf_free(payt->payloadString);
gf_free(payt);
}
| 0
|
432,326
|
static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
{
/*
* There may not be a virtual to physical translation for the pc
* right now, but there may exist cached TB for this pc.
* Flush the whole TB cache to force re-translation of such TBs.
* This is heavyweight, but we're debugging anyway.
*/
tb_flush(cpu);
}
| 0
|
353,209
|
T3FontCache::T3FontCache(const Ref *fontIDA, double m11A, double m12A,
double m21A, double m22A,
int glyphXA, int glyphYA, int glyphWA, int glyphHA,
bool validBBoxA, bool aa) {
fontID = *fontIDA;
m11 = m11A;
m12 = m12A;
m21 = m21A;
m22 = m22A;
glyphX = glyphXA;
glyphY = glyphYA;
glyphW = glyphWA;
glyphH = glyphHA;
validBBox = validBBoxA;
// sanity check for excessively large glyphs (which most likely
// indicate an incorrect BBox)
if (glyphW > INT_MAX / glyphH || glyphW <= 0 || glyphH <= 0 || glyphW * glyphH > 100000) {
glyphW = glyphH = 100;
validBBox = false;
}
if (aa) {
glyphSize = glyphW * glyphH;
} else {
glyphSize = ((glyphW + 7) >> 3) * glyphH;
}
cacheAssoc = type3FontCacheAssoc;
for (cacheSets = type3FontCacheMaxSets;
cacheSets > 1 &&
cacheSets * cacheAssoc * glyphSize > type3FontCacheSize;
cacheSets >>= 1) ;
if (glyphSize < 10485760 / cacheAssoc / cacheSets) {
cacheData = (unsigned char *)gmallocn_checkoverflow(cacheSets * cacheAssoc, glyphSize);
} else {
error(errSyntaxWarning, -1, "Not creating cacheData for T3FontCache, it asked for too much memory.\n"
" This could teoretically result in wrong rendering,\n"
" but most probably the document is bogus.\n"
" Please report a bug if you think the rendering may be wrong because of this.");
cacheData = nullptr;
}
if (cacheData != nullptr)
{
cacheTags = (T3FontCacheTag *)gmallocn(cacheSets * cacheAssoc,
sizeof(T3FontCacheTag));
for (int i = 0; i < cacheSets * cacheAssoc; ++i) {
cacheTags[i].mru = i & (cacheAssoc - 1);
}
}
else
{
cacheTags = nullptr;
}
}
| 0
|
195,768
|
void Compute(OpKernelContext* context) override {
// Only create one, if one does not exist already. Report status for all
// other exceptions. If one already exists, it unrefs the new one.
// An epsilon value of zero could cause performance issues and is therefore,
// disallowed.
const Tensor* epsilon_t;
OP_REQUIRES_OK(context, context->input(kEpsilonName, &epsilon_t));
float epsilon = epsilon_t->scalar<float>()();
OP_REQUIRES(
context, epsilon > 0,
errors::InvalidArgument("An epsilon value of zero is not allowed."));
const Tensor* num_streams_t;
OP_REQUIRES_OK(context, context->input(kNumStreamsName, &num_streams_t));
int64_t num_streams = num_streams_t->scalar<int64>()();
auto result =
new QuantileStreamResource(epsilon, max_elements_, num_streams);
auto status = CreateResource(context, HandleFromInput(context, 0), result);
if (!status.ok() && status.code() != tensorflow::error::ALREADY_EXISTS) {
OP_REQUIRES(context, false, status);
}
}
| 1
|
349,274
|
static void read_fragment(unsigned int fragment, long long *start_block, int *size)
{
TRACE("read_fragment: reading fragment %d\n", fragment);
struct squashfs_fragment_entry *fragment_entry;
fragment_entry = &fragment_table[fragment];
*start_block = fragment_entry->start_block;
*size = fragment_entry->size;
}
| 0
|
225,082
|
defaultNoticeReceiver(void *arg, const PGresult *res)
{
(void) arg; /* not used */
if (res->noticeHooks.noticeProc != NULL)
res->noticeHooks.noticeProc(res->noticeHooks.noticeProcArg,
PQresultErrorMessage(res));
}
| 0
|
210,555
|
vhost_backend_cleanup(struct virtio_net *dev)
{
if (dev->mem) {
free_mem_region(dev);
rte_free(dev->mem);
dev->mem = NULL;
}
free(dev->guest_pages);
dev->guest_pages = NULL;
if (dev->log_addr) {
munmap((void *)(uintptr_t)dev->log_addr, dev->log_size);
dev->log_addr = 0;
}
if (dev->slave_req_fd >= 0) {
close(dev->slave_req_fd);
dev->slave_req_fd = -1;
}
if (dev->postcopy_ufd >= 0) {
close(dev->postcopy_ufd);
dev->postcopy_ufd = -1;
}
dev->postcopy_listening = 0;
}
| 1
|
219,007
|
bool ConstantFolding::ReplaceReductionWithIdentity(NodeDef* node) const {
// Replace the reduction node with an identity node, that can be further
// optimized by other passes.
DataType output_type;
if (node->attr().count("T") != 0) {
output_type = node->attr().at("T").type();
} else if (IsAny(*node) || IsAll(*node)) {
output_type = DT_BOOL;
} else {
return false;
}
node->set_op("Identity");
EraseRegularNodeAttributes(node);
(*node->mutable_attr())["T"].set_type(output_type);
*node->mutable_input(1) = AsControlDependency(node->input(1));
return true;
}
| 0
|
233,941
|
DepsTracker::State DocumentSourceUnionWith::getDependencies(DepsTracker* deps) const {
// Since the $unionWith stage is a simple passthrough, we *could* report SEE_NEXT here in an
// attempt to get a covered plan for the base collection. The ideal solution would involve
// pushing down any dependencies to the inner pipeline as well.
return DepsTracker::State::NOT_SUPPORTED;
}
| 0
|
237,891
|
lsquic_qeh_cleanup (struct qpack_enc_hdl *qeh)
{
if (qeh->qeh_flags & QEH_INITIALIZED)
{
LSQ_DEBUG("cleanup");
if (qeh->qeh_exp_rec)
qeh_log_and_clean_exp_rec(qeh);
lsqpack_enc_cleanup(&qeh->qeh_encoder);
lsquic_frab_list_cleanup(&qeh->qeh_fral);
memset(qeh, 0, sizeof(*qeh));
}
}
| 0
|
345,141
|
static void pxa3xx_gcu_init_debug_timer(struct pxa3xx_gcu_priv *priv)
{
/* init the timer structure */
debug_timer_priv = priv;
timer_setup(&pxa3xx_gcu_debug_timer, pxa3xx_gcu_debug_timedout, 0);
pxa3xx_gcu_debug_timedout(NULL);
}
| 0
|
227,007
|
irc_protocol_nick_address (struct t_irc_server *server,
int server_message,
struct t_irc_nick *nick,
const char *nickname,
const char *address)
{
static char string[1024];
string[0] = '\0';
if (nickname && address && (strcmp (nickname, address) != 0))
{
/* display nick and address if they are different */
snprintf (string, sizeof (string),
"%s%s %s(%s%s%s)%s",
irc_nick_color_for_msg (server, server_message, nick,
nickname),
nickname,
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_CHAT_HOST,
address,
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_RESET);
}
else if (nickname)
{
/* display only nick if no address or if nick == address */
snprintf (string, sizeof (string),
"%s%s%s",
irc_nick_color_for_msg (server, server_message, nick,
nickname),
nickname,
IRC_COLOR_RESET);
}
return string;
}
| 0
|
459,167
|
static void tcf_proto_signal_destroying(struct tcf_chain *chain,
struct tcf_proto *tp)
{
struct tcf_block *block = chain->block;
mutex_lock(&block->proto_destroy_lock);
hash_add_rcu(block->proto_destroy_ht, &tp->destroy_ht_node,
destroy_obj_hashfn(tp));
mutex_unlock(&block->proto_destroy_lock);
}
| 0
|
512,927
|
Item_cache_timestamp(THD *thd)
:Item_cache(thd, &type_handler_timestamp2) { }
| 0
|
386,604
|
void DL_Dxf::addBlock(DL_CreationInterface* creationInterface) {
std::string name = getStringValue(2, "");
if (name.length()==0) {
return;
}
DL_BlockData d(
// Name:
name,
// flags:
getIntValue(70, 0),
// base point:
getRealValue(10, 0.0),
getRealValue(20, 0.0),
getRealValue(30, 0.0));
creationInterface->addBlock(d);
}
| 0
|
273,096
|
two_str_hash(const char *a, const char *b)
{
char hashbuf[2048];
int64_t hash;
int i;
int ret;
ret = snprintf(hashbuf, sizeof(hashbuf), "%s==%s", (a) ? a : "", (b) ? b : "");
if (ret < 0 || ret == sizeof(hashbuf))
{
DPRINTF(E_LOG, L_MISC, "Buffer too large to calculate hash: '%s==%s'\n", a, b);
return 999999; // Stand-in hash...
}
for (i = 0; hashbuf[i]; i++)
hashbuf[i] = tolower(hashbuf[i]);
// Limit hash length to 63 bits, due to signed type in sqlite
hash = murmur_hash64(hashbuf, strlen(hashbuf), 0) >> 1;
return hash;
}
| 0
|
225,764
|
GF_Err cprt_box_size(GF_Box *s)
{
GF_CopyrightBox *ptr = (GF_CopyrightBox *)s;
ptr->size += 2;
if (ptr->notice)
ptr->size += strlen(ptr->notice) + 1;
return GF_OK;
}
| 0
|
259,209
|
static int mov_read_meta(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{
while (atom.size > 8) {
uint32_t tag;
if (avio_feof(pb))
return AVERROR_EOF;
tag = avio_rl32(pb);
atom.size -= 4;
if (tag == MKTAG('h','d','l','r')) {
int ret;
avio_seek(pb, -8, SEEK_CUR);
atom.size += 8;
if ((ret = mov_read_default(c, pb, atom)) < 0)
return ret;
if (c->is_still_picture_avif) {
int ret;
// Add a stream for the YUV planes (primary item).
if ((ret = avif_add_stream(c, c->primary_item_id)) < 0)
return ret;
// For still AVIF images, the meta box contains all the
// necessary information that would generally be provided by the
// moov box. So simply mark that we have found the moov box so
// that parsing can continue.
c->found_moov = 1;
}
return ret;
}
}
return 0;
}
| 0
|
416,357
|
finish_incsearch_highlighting(
int gotesc,
incsearch_state_T *is_state,
int call_update_screen)
{
if (is_state->did_incsearch)
{
is_state->did_incsearch = FALSE;
if (gotesc)
curwin->w_cursor = is_state->save_cursor;
else
{
if (!EQUAL_POS(is_state->save_cursor, is_state->search_start))
{
// put the '" mark at the original position
curwin->w_cursor = is_state->save_cursor;
setpcmark();
}
curwin->w_cursor = is_state->search_start;
}
restore_viewstate(&is_state->old_viewstate);
highlight_match = FALSE;
// by default search all lines
search_first_line = 0;
search_last_line = MAXLNUM;
magic_overruled = is_state->magic_overruled_save;
validate_cursor(); // needed for TAB
redraw_all_later(UPD_SOME_VALID);
if (call_update_screen)
update_screen(UPD_SOME_VALID);
}
}
| 0
|
269,310
|
static inline int get_symbol2(RangeCoder *c, uint8_t *state, int log2){
int i;
int r= log2>=0 ? 1<<log2 : 1;
int v=0;
av_assert2(log2>=-4);
while(log2<28 && get_rac(c, state+4+log2)){
v+= r;
log2++;
if(log2>0) r+=r;
}
for(i=log2-1; i>=0; i--){
v+= get_rac(c, state+31-i)<<i;
}
return v;
}
| 0
|
389,689
|
check_for_opt_buffer_arg(typval_T *args, int idx)
{
return (args[idx].v_type == VAR_UNKNOWN
|| check_for_buffer_arg(args, idx));
}
| 0
|
256,416
|
PJ_DEF(pj_status_t) pjmedia_rtcp_fb_build_nack(
pjmedia_rtcp_session *session,
void *buf,
pj_size_t *length,
unsigned nack_cnt,
const pjmedia_rtcp_fb_nack nack[])
{
pjmedia_rtcp_fb_common *hdr;
pj_uint8_t *p;
unsigned len, i;
PJ_ASSERT_RETURN(session && buf && length && nack_cnt && nack, PJ_EINVAL);
len = (3 + nack_cnt) * 4;
if (len > *length)
return PJ_ETOOSMALL;
/* Build RTCP-FB NACK header */
hdr = (pjmedia_rtcp_fb_common*)buf;
pj_memcpy(hdr, &session->rtcp_fb_com, sizeof(*hdr));
hdr->rtcp_common.pt = RTCP_RTPFB;
hdr->rtcp_common.count = 1; /* FMT = 1 */
hdr->rtcp_common.length = pj_htons((pj_uint16_t)(len/4 - 1));
/* Build RTCP-FB NACK FCI */
p = (pj_uint8_t*)hdr + sizeof(*hdr);
for (i = 0; i < nack_cnt; ++i) {
pj_uint16_t val;
val = pj_htons((pj_uint16_t)nack[i].pid);
pj_memcpy(p, &val, 2);
val = pj_htons(nack[i].blp);
pj_memcpy(p+2, &val, 2);
p += 4;
}
/* Finally */
*length = len;
return PJ_SUCCESS;
}
| 0
|
432,273
|
static void flatview_destroy(FlatView *view)
{
if (view->dispatch) {
address_space_dispatch_free(view->dispatch);
}
g_free(view->ranges);
g_free(view);
}
| 0
|
230,380
|
static pj_xml_attr *alloc_attr( pj_pool_t *pool )
{
return PJ_POOL_ZALLOC_T(pool, pj_xml_attr);
}
| 0
|
307,841
|
ciMethod* ciEnv::get_method_by_index_impl(constantPoolHandle cpool,
int index, Bytecodes::Code bc,
ciInstanceKlass* accessor) {
if (bc == Bytecodes::_invokedynamic) {
ConstantPoolCacheEntry* cpce = cpool->invokedynamic_cp_cache_entry_at(index);
bool is_resolved = !cpce->is_f1_null();
// FIXME: code generation could allow for null (unlinked) call site
// The call site could be made patchable as follows:
// Load the appendix argument from the constant pool.
// Test the appendix argument and jump to a known deopt routine if it is null.
// Jump through a patchable call site, which is initially a deopt routine.
// Patch the call site to the nmethod entry point of the static compiled lambda form.
// As with other two-component call sites, both values must be independently verified.
if (is_resolved) {
// Get the invoker Method* from the constant pool.
// (The appendix argument, if any, will be noted in the method's signature.)
Method* adapter = cpce->f1_as_method();
return get_method(adapter);
}
// Fake a method that is equivalent to a declared method.
ciInstanceKlass* holder = get_instance_klass(SystemDictionary::MethodHandle_klass());
ciSymbol* name = ciSymbol::invokeBasic_name();
ciSymbol* signature = get_symbol(cpool->signature_ref_at(index));
return get_unloaded_method(holder, name, signature, accessor);
} else {
const int holder_index = cpool->klass_ref_index_at(index);
bool holder_is_accessible;
ciKlass* holder = get_klass_by_index_impl(cpool, holder_index, holder_is_accessible, accessor);
ciInstanceKlass* declared_holder = get_instance_klass_for_declared_method_holder(holder);
// Get the method's name and signature.
Symbol* name_sym = cpool->name_ref_at(index);
Symbol* sig_sym = cpool->signature_ref_at(index);
if (cpool->has_preresolution()
|| (holder == ciEnv::MethodHandle_klass() &&
MethodHandles::is_signature_polymorphic_name(holder->get_Klass(), name_sym))) {
// Short-circuit lookups for JSR 292-related call sites.
// That is, do not rely only on name-based lookups, because they may fail
// if the names are not resolvable in the boot class loader (7056328).
switch (bc) {
case Bytecodes::_invokevirtual:
case Bytecodes::_invokeinterface:
case Bytecodes::_invokespecial:
case Bytecodes::_invokestatic:
{
Method* m = ConstantPool::method_at_if_loaded(cpool, index);
if (m != NULL) {
return get_method(m);
}
}
break;
}
}
if (holder_is_accessible) { // Our declared holder is loaded.
InstanceKlass* lookup = declared_holder->get_instanceKlass();
Method* m = lookup_method(accessor->get_instanceKlass(), lookup, name_sym, sig_sym, bc);
if (m != NULL &&
(bc == Bytecodes::_invokestatic
? m->method_holder()->is_not_initialized()
: !m->method_holder()->is_loaded())) {
m = NULL;
}
#ifdef ASSERT
if (m != NULL && ReplayCompiles && !ciReplay::is_loaded(m)) {
m = NULL;
}
#endif
if (m != NULL) {
// We found the method.
return get_method(m);
}
}
// Either the declared holder was not loaded, or the method could
// not be found. Create a dummy ciMethod to represent the failed
// lookup.
ciSymbol* name = get_symbol(name_sym);
ciSymbol* signature = get_symbol(sig_sym);
return get_unloaded_method(declared_holder, name, signature, accessor);
}
}
| 0
|
225,971
|
GF_Err ihdr_box_size(GF_Box *s)
{
s->size += 14;
return GF_OK;
| 0
|
513,124
|
static st_plugin_int *plugin_insert_or_reuse(struct st_plugin_int *plugin)
{
uint i;
struct st_plugin_int *tmp;
DBUG_ENTER("plugin_insert_or_reuse");
for (i= 0; i < plugin_array.elements; i++)
{
tmp= *dynamic_element(&plugin_array, i, struct st_plugin_int **);
if (tmp->state == PLUGIN_IS_FREED)
{
memcpy(tmp, plugin, sizeof(struct st_plugin_int));
DBUG_RETURN(tmp);
}
}
if (insert_dynamic(&plugin_array, (uchar*)&plugin))
DBUG_RETURN(0);
tmp= *dynamic_element(&plugin_array, plugin_array.elements - 1,
struct st_plugin_int **)=
(struct st_plugin_int *) memdup_root(&plugin_mem_root, (uchar*)plugin,
sizeof(struct st_plugin_int));
DBUG_RETURN(tmp);
}
| 0
|
387,839
|
bool InstanceKlass::can_be_primary_super_slow() const {
if (is_interface())
return false;
else
return Klass::can_be_primary_super_slow();
}
| 0
|
359,500
|
DEFUN (no_neighbor_ebgp_multihop,
no_neighbor_ebgp_multihop_cmd,
NO_NEIGHBOR_CMD2 "ebgp-multihop",
NO_STR
NEIGHBOR_STR
NEIGHBOR_ADDR_STR2
"Allow EBGP neighbors not on directly connected networks\n")
{
return peer_ebgp_multihop_unset_vty (vty, argv[0]);
}
| 0
|
411,910
|
router_append_dirobj_signature(char *buf, size_t buf_len, const char *digest,
size_t digest_len, crypto_pk_env_t *private_key)
{
char *signature;
size_t i, keysize;
int siglen;
keysize = crypto_pk_keysize(private_key);
signature = tor_malloc(keysize);
siglen = crypto_pk_private_sign(private_key, signature, keysize,
digest, digest_len);
if (siglen < 0) {
log_warn(LD_BUG,"Couldn't sign digest.");
goto err;
}
if (strlcat(buf, "-----BEGIN SIGNATURE-----\n", buf_len) >= buf_len)
goto truncated;
i = strlen(buf);
if (base64_encode(buf+i, buf_len-i, signature, siglen) < 0) {
log_warn(LD_BUG,"couldn't base64-encode signature");
goto err;
}
if (strlcat(buf, "-----END SIGNATURE-----\n", buf_len) >= buf_len)
goto truncated;
tor_free(signature);
return 0;
truncated:
log_warn(LD_BUG,"tried to exceed string length.");
err:
tor_free(signature);
return -1;
}
| 0
|
312,438
|
qf_cmd_get_stack(exarg_T *eap, int print_emsg)
{
qf_info_T *qi = &ql_info;
if (is_loclist_cmd(eap->cmdidx))
{
qi = GET_LOC_LIST(curwin);
if (qi == NULL)
{
if (print_emsg)
emsg(_(e_no_location_list));
return NULL;
}
}
return qi;
}
| 0
|
90,859
|
void GetUsageAndQuota(const GURL& origin, StorageType type) {
quota_status_ = kQuotaStatusUnknown;
usage_ = -1;
quota_ = -1;
quota_manager_->GetUsageAndQuota(origin, type,
callback_factory_.NewCallback(
&QuotaManagerTest::DidGetUsageAndQuota));
}
| 0
|
310,258
|
directory_set_dirty(void)
{
time_t now = time(NULL);
int set_v1_dirty=0;
/* Regenerate stubs only every 8 hours.
* XXXX It would be nice to generate less often, but these are just
* stubs: it doesn't matter. */
#define STUB_REGENERATE_INTERVAL (8*60*60)
if (!the_directory || !the_runningrouters.dir)
set_v1_dirty = 1;
else if (the_directory->published < now - STUB_REGENERATE_INTERVAL ||
the_runningrouters.published < now - STUB_REGENERATE_INTERVAL)
set_v1_dirty = 1;
if (set_v1_dirty) {
if (!the_directory_is_dirty)
the_directory_is_dirty = now;
if (!runningrouters_is_dirty)
runningrouters_is_dirty = now;
}
if (!the_v2_networkstatus_is_dirty)
the_v2_networkstatus_is_dirty = now;
}
| 0
|
432,257
|
static void mmio_write_wrapper(struct uc_struct *uc, void *opaque, hwaddr addr, uint64_t data, unsigned size)
{
mmio_cbs* cbs = (mmio_cbs*)opaque;
// We have to care about 32bit target.
addr = addr & ( (target_ulong)(-1) );
if (cbs->write) {
cbs->write(uc, addr, size, data, cbs->user_data_write);
}
}
| 0
|
473,894
|
cp949_mbc_enc_len(const UChar* p, const UChar* e, OnigEncoding enc ARG_UNUSED)
{
int firstbyte = *p++;
state_t s = trans[0][firstbyte];
#define RETURN(n) \
return s == ACCEPT ? ONIGENC_CONSTRUCT_MBCLEN_CHARFOUND(n) : \
ONIGENC_CONSTRUCT_MBCLEN_INVALID()
if (s < 0) RETURN(1);
if (p == e) return ONIGENC_CONSTRUCT_MBCLEN_NEEDMORE(EncLen_CP949[firstbyte]-1);
s = trans[s][*p++];
RETURN(2);
#undef RETURN
}
| 0
|
244,179
|
GF_Box *st3d_box_new()
{
ISOM_DECL_BOX_ALLOC(GF_Stereo3DBox, GF_ISOM_BOX_TYPE_ST3D);
return (GF_Box *)tmp;
}
| 0
|
473,821
|
onigenc_unicode_get_case_fold_codes_by_str(OnigEncoding enc,
OnigCaseFoldType flag, const OnigUChar* p, const OnigUChar* end,
OnigCaseFoldCodeItem items[])
{
int n, i, j, k, len;
OnigCodePoint code, codes[3];
CodePointList3 *to, *z3;
CodePointList2 *z2;
if (CaseFoldInited == 0) init_case_fold_table();
n = 0;
code = ONIGENC_MBC_TO_CODE(enc, p, end);
len = enclen(enc, p, end);
#ifdef USE_UNICODE_CASE_FOLD_TURKISH_AZERI
if ((flag & ONIGENC_CASE_FOLD_TURKISH_AZERI) != 0) {
if (code == 0x0049) {
items[0].byte_len = len;
items[0].code_len = 1;
items[0].code[0] = 0x0131;
return 1;
}
else if (code == 0x0130) {
items[0].byte_len = len;
items[0].code_len = 1;
items[0].code[0] = 0x0069;
return 1;
}
else if (code == 0x0131) {
items[0].byte_len = len;
items[0].code_len = 1;
items[0].code[0] = 0x0049;
return 1;
}
else if (code == 0x0069) {
items[0].byte_len = len;
items[0].code_len = 1;
items[0].code[0] = 0x0130;
return 1;
}
}
#endif
if (onig_st_lookup(FoldTable, (st_data_t )code, (void* )&to) != 0) {
if (to->n == 1) {
OnigCodePoint orig_code = code;
items[0].byte_len = len;
items[0].code_len = 1;
items[0].code[0] = to->code[0];
n++;
code = to->code[0];
if (onig_st_lookup(Unfold1Table, (st_data_t )code, (void* )&to) != 0) {
for (i = 0; i < to->n; i++) {
if (to->code[i] != orig_code) {
items[n].byte_len = len;
items[n].code_len = 1;
items[n].code[0] = to->code[i];
n++;
}
}
}
}
else if ((flag & INTERNAL_ONIGENC_CASE_FOLD_MULTI_CHAR) != 0) {
OnigCodePoint cs[3][4];
int fn, ncs[3];
for (fn = 0; fn < to->n; fn++) {
cs[fn][0] = to->code[fn];
if (onig_st_lookup(Unfold1Table, (st_data_t )cs[fn][0],
(void* )&z3) != 0) {
for (i = 0; i < z3->n; i++) {
cs[fn][i+1] = z3->code[i];
}
ncs[fn] = z3->n + 1;
}
else
ncs[fn] = 1;
}
if (fn == 2) {
for (i = 0; i < ncs[0]; i++) {
for (j = 0; j < ncs[1]; j++) {
items[n].byte_len = len;
items[n].code_len = 2;
items[n].code[0] = cs[0][i];
items[n].code[1] = cs[1][j];
n++;
}
}
if (onig_st_lookup(Unfold2Table, (st_data_t )to->code,
(void* )&z2) != 0) {
for (i = 0; i < z2->n; i++) {
if (z2->code[i] == code) continue;
items[n].byte_len = len;
items[n].code_len = 1;
items[n].code[0] = z2->code[i];
n++;
}
}
}
else {
for (i = 0; i < ncs[0]; i++) {
for (j = 0; j < ncs[1]; j++) {
for (k = 0; k < ncs[2]; k++) {
items[n].byte_len = len;
items[n].code_len = 3;
items[n].code[0] = cs[0][i];
items[n].code[1] = cs[1][j];
items[n].code[2] = cs[2][k];
n++;
}
}
}
if (onig_st_lookup(Unfold3Table, (st_data_t )to->code,
(void* )&z2) != 0) {
for (i = 0; i < z2->n; i++) {
if (z2->code[i] == code) continue;
items[n].byte_len = len;
items[n].code_len = 1;
items[n].code[0] = z2->code[i];
n++;
}
}
}
/* multi char folded code is not head of another folded multi char */
flag = 0; /* DISABLE_CASE_FOLD_MULTI_CHAR(flag); */
}
}
else {
if (onig_st_lookup(Unfold1Table, (st_data_t )code, (void* )&to) != 0) {
for (i = 0; i < to->n; i++) {
items[n].byte_len = len;
items[n].code_len = 1;
items[n].code[0] = to->code[i];
n++;
}
}
}
if ((flag & INTERNAL_ONIGENC_CASE_FOLD_MULTI_CHAR) != 0) {
p += len;
if (p < end) {
int clen;
codes[0] = code;
code = ONIGENC_MBC_TO_CODE(enc, p, end);
if (onig_st_lookup(FoldTable, (st_data_t )code, (void* )&to) != 0
&& to->n == 1) {
codes[1] = to->code[0];
}
else
codes[1] = code;
clen = enclen(enc, p, end);
len += clen;
if (onig_st_lookup(Unfold2Table, (st_data_t )codes, (void* )&z2) != 0) {
for (i = 0; i < z2->n; i++) {
items[n].byte_len = len;
items[n].code_len = 1;
items[n].code[0] = z2->code[i];
n++;
}
}
p += clen;
if (p < end) {
code = ONIGENC_MBC_TO_CODE(enc, p, end);
if (onig_st_lookup(FoldTable, (st_data_t )code, (void* )&to) != 0
&& to->n == 1) {
codes[2] = to->code[0];
}
else
codes[2] = code;
clen = enclen(enc, p, end);
len += clen;
if (onig_st_lookup(Unfold3Table, (st_data_t )codes,
(void* )&z2) != 0) {
for (i = 0; i < z2->n; i++) {
items[n].byte_len = len;
items[n].code_len = 1;
items[n].code[0] = z2->code[i];
n++;
}
}
}
}
}
return n;
}
| 0
|
448,535
|
static int bgp_route_refresh_receive(struct peer *peer, bgp_size_t size)
{
iana_afi_t pkt_afi;
afi_t afi;
iana_safi_t pkt_safi;
safi_t safi;
struct stream *s;
struct peer_af *paf;
struct update_group *updgrp;
struct peer *updgrp_peer;
uint8_t subtype;
bool force_update = false;
bgp_size_t msg_length =
size - (BGP_MSG_ROUTE_REFRESH_MIN_SIZE - BGP_HEADER_SIZE);
/* If peer does not have the capability, send notification. */
if (!CHECK_FLAG(peer->cap, PEER_CAP_REFRESH_ADV)) {
flog_err(EC_BGP_NO_CAP,
"%s [Error] BGP route refresh is not enabled",
peer->host);
bgp_notify_send(peer, BGP_NOTIFY_HEADER_ERR,
BGP_NOTIFY_HEADER_BAD_MESTYPE);
return BGP_Stop;
}
/* Status must be Established. */
if (!peer_established(peer)) {
flog_err(
EC_BGP_INVALID_STATUS,
"%s [Error] Route refresh packet received under status %s",
peer->host,
lookup_msg(bgp_status_msg, peer->status, NULL));
bgp_notify_send(peer, BGP_NOTIFY_FSM_ERR,
bgp_fsm_error_subcode(peer->status));
return BGP_Stop;
}
s = peer->curr;
/* Parse packet. */
pkt_afi = stream_getw(s);
subtype = stream_getc(s);
pkt_safi = stream_getc(s);
/* Convert AFI, SAFI to internal values and check. */
if (bgp_map_afi_safi_iana2int(pkt_afi, pkt_safi, &afi, &safi)) {
zlog_info(
"%s REFRESH_REQ for unrecognized afi/safi: %s/%s - ignored",
peer->host, iana_afi2str(pkt_afi),
iana_safi2str(pkt_safi));
return BGP_PACKET_NOOP;
}
if (size != BGP_MSG_ROUTE_REFRESH_MIN_SIZE - BGP_HEADER_SIZE) {
uint8_t *end;
uint8_t when_to_refresh;
uint8_t orf_type;
uint16_t orf_len;
if (subtype) {
/* If the length, excluding the fixed-size message
* header, of the received ROUTE-REFRESH message with
* Message Subtype 1 and 2 is not 4, then the BGP
* speaker MUST send a NOTIFICATION message with the
* Error Code of "ROUTE-REFRESH Message Error" and the
* subcode of "Invalid Message Length".
*/
if (msg_length != 4) {
zlog_err(
"%s Enhanced Route Refresh message length error",
peer->host);
bgp_notify_send(
peer, BGP_NOTIFY_ROUTE_REFRESH_ERR,
BGP_NOTIFY_ROUTE_REFRESH_INVALID_MSG_LEN);
}
/* When the BGP speaker receives a ROUTE-REFRESH message
* with a "Message Subtype" field other than 0, 1, or 2,
* it MUST ignore the received ROUTE-REFRESH message.
*/
if (subtype > 2)
zlog_err(
"%s Enhanced Route Refresh invalid subtype",
peer->host);
}
if (msg_length < 5) {
zlog_info("%s ORF route refresh length error",
peer->host);
bgp_notify_send(peer, BGP_NOTIFY_CEASE,
BGP_NOTIFY_SUBCODE_UNSPECIFIC);
return BGP_Stop;
}
when_to_refresh = stream_getc(s);
end = stream_pnt(s) + (size - 5);
while ((stream_pnt(s) + 2) < end) {
orf_type = stream_getc(s);
orf_len = stream_getw(s);
/* orf_len in bounds? */
if ((stream_pnt(s) + orf_len) > end)
break; /* XXX: Notify instead?? */
if (orf_type == ORF_TYPE_PREFIX
|| orf_type == ORF_TYPE_PREFIX_OLD) {
uint8_t *p_pnt = stream_pnt(s);
uint8_t *p_end = stream_pnt(s) + orf_len;
struct orf_prefix orfp;
uint8_t common = 0;
uint32_t seq;
int psize;
char name[BUFSIZ];
int ret = CMD_SUCCESS;
if (bgp_debug_neighbor_events(peer)) {
zlog_debug(
"%pBP rcvd Prefixlist ORF(%d) length %d",
peer, orf_type, orf_len);
}
/* we're going to read at least 1 byte of common
* ORF header,
* and 7 bytes of ORF Address-filter entry from
* the stream
*/
if (orf_len < 7)
break;
/* ORF prefix-list name */
snprintf(name, sizeof(name), "%s.%d.%d",
peer->host, afi, safi);
while (p_pnt < p_end) {
/* If the ORF entry is malformed, want
* to read as much of it
* as possible without going beyond the
* bounds of the entry,
* to maximise debug information.
*/
int ok;
memset(&orfp, 0, sizeof(orfp));
common = *p_pnt++;
/* after ++: p_pnt <= p_end */
if (common
& ORF_COMMON_PART_REMOVE_ALL) {
if (bgp_debug_neighbor_events(
peer))
zlog_debug(
"%pBP rcvd Remove-All pfxlist ORF request",
peer);
prefix_bgp_orf_remove_all(afi,
name);
break;
}
ok = ((uint32_t)(p_end - p_pnt)
>= sizeof(uint32_t));
if (ok) {
memcpy(&seq, p_pnt,
sizeof(uint32_t));
p_pnt += sizeof(uint32_t);
orfp.seq = ntohl(seq);
} else
p_pnt = p_end;
/* val checked in prefix_bgp_orf_set */
if (p_pnt < p_end)
orfp.ge = *p_pnt++;
/* val checked in prefix_bgp_orf_set */
if (p_pnt < p_end)
orfp.le = *p_pnt++;
if ((ok = (p_pnt < p_end)))
orfp.p.prefixlen = *p_pnt++;
/* afi checked already */
orfp.p.family = afi2family(afi);
/* 0 if not ok */
psize = PSIZE(orfp.p.prefixlen);
/* valid for family ? */
if (psize > prefix_blen(&orfp.p)) {
ok = 0;
psize = prefix_blen(&orfp.p);
}
/* valid for packet ? */
if (psize > (p_end - p_pnt)) {
ok = 0;
psize = p_end - p_pnt;
}
if (psize > 0)
memcpy(&orfp.p.u.prefix, p_pnt,
psize);
p_pnt += psize;
if (bgp_debug_neighbor_events(peer)) {
char buf[INET6_BUFSIZ];
zlog_debug(
"%pBP rcvd %s %s seq %u %s/%d ge %d le %d%s",
peer,
(common & ORF_COMMON_PART_REMOVE
? "Remove"
: "Add"),
(common & ORF_COMMON_PART_DENY
? "deny"
: "permit"),
orfp.seq,
inet_ntop(
orfp.p.family,
&orfp.p.u.prefix,
buf,
INET6_BUFSIZ),
orfp.p.prefixlen,
orfp.ge, orfp.le,
ok ? "" : " MALFORMED");
}
if (ok)
ret = prefix_bgp_orf_set(
name, afi, &orfp,
(common & ORF_COMMON_PART_DENY
? 0
: 1),
(common & ORF_COMMON_PART_REMOVE
? 0
: 1));
if (!ok || (ok && ret != CMD_SUCCESS)) {
zlog_info(
"%pBP Received misformatted prefixlist ORF. Remove All pfxlist",
peer);
prefix_bgp_orf_remove_all(afi,
name);
break;
}
}
peer->orf_plist[afi][safi] =
prefix_bgp_orf_lookup(afi, name);
}
stream_forward_getp(s, orf_len);
}
if (bgp_debug_neighbor_events(peer))
zlog_debug("%pBP rcvd Refresh %s ORF request", peer,
when_to_refresh == REFRESH_DEFER
? "Defer"
: "Immediate");
if (when_to_refresh == REFRESH_DEFER)
return BGP_PACKET_NOOP;
}
/* First update is deferred until ORF or ROUTE-REFRESH is received */
if (CHECK_FLAG(peer->af_sflags[afi][safi],
PEER_STATUS_ORF_WAIT_REFRESH))
UNSET_FLAG(peer->af_sflags[afi][safi],
PEER_STATUS_ORF_WAIT_REFRESH);
paf = peer_af_find(peer, afi, safi);
if (paf && paf->subgroup) {
if (peer->orf_plist[afi][safi]) {
updgrp = PAF_UPDGRP(paf);
updgrp_peer = UPDGRP_PEER(updgrp);
updgrp_peer->orf_plist[afi][safi] =
peer->orf_plist[afi][safi];
}
/* Avoid supressing duplicate routes later
* when processing in subgroup_announce_table().
*/
force_update = true;
/* If the peer is configured for default-originate clear the
* SUBGRP_STATUS_DEFAULT_ORIGINATE flag so that we will
* re-advertise the
* default
*/
if (CHECK_FLAG(paf->subgroup->sflags,
SUBGRP_STATUS_DEFAULT_ORIGINATE))
UNSET_FLAG(paf->subgroup->sflags,
SUBGRP_STATUS_DEFAULT_ORIGINATE);
}
if (subtype == BGP_ROUTE_REFRESH_BORR) {
/* A BGP speaker that has received the Graceful Restart
* Capability from its neighbor MUST ignore any BoRRs for
* an <AFI, SAFI> from the neighbor before the speaker
* receives the EoR for the given <AFI, SAFI> from the
* neighbor.
*/
if (CHECK_FLAG(peer->cap, PEER_CAP_RESTART_RCV)
&& !CHECK_FLAG(peer->af_sflags[afi][safi],
PEER_STATUS_EOR_RECEIVED)) {
if (bgp_debug_neighbor_events(peer))
zlog_debug(
"%pBP rcvd route-refresh (BoRR) for %s/%s before EoR",
peer, afi2str(afi), safi2str(safi));
return BGP_PACKET_NOOP;
}
if (peer->t_refresh_stalepath) {
if (bgp_debug_neighbor_events(peer))
zlog_debug(
"%pBP rcvd route-refresh (BoRR) for %s/%s, whereas BoRR already received",
peer, afi2str(afi), safi2str(safi));
return BGP_PACKET_NOOP;
}
SET_FLAG(peer->af_sflags[afi][safi], PEER_STATUS_BORR_RECEIVED);
UNSET_FLAG(peer->af_sflags[afi][safi],
PEER_STATUS_EORR_RECEIVED);
/* When a BGP speaker receives a BoRR message from
* a peer, it MUST mark all the routes with the given
* Address Family Identifier and Subsequent Address
* Family Identifier, <AFI, SAFI> [RFC2918], from
* that peer as stale.
*/
if (peer_active_nego(peer)) {
SET_FLAG(peer->af_sflags[afi][safi],
PEER_STATUS_ENHANCED_REFRESH);
bgp_set_stale_route(peer, afi, safi);
}
if (peer_established(peer))
thread_add_timer(bm->master,
bgp_refresh_stalepath_timer_expire,
paf, peer->bgp->stalepath_time,
&peer->t_refresh_stalepath);
if (bgp_debug_neighbor_events(peer))
zlog_debug(
"%pBP rcvd route-refresh (BoRR) for %s/%s, triggering timer for %u seconds",
peer, afi2str(afi), safi2str(safi),
peer->bgp->stalepath_time);
} else if (subtype == BGP_ROUTE_REFRESH_EORR) {
if (!peer->t_refresh_stalepath) {
zlog_err(
"%pBP rcvd route-refresh (EoRR) for %s/%s, whereas no BoRR received",
peer, afi2str(afi), safi2str(safi));
return BGP_PACKET_NOOP;
}
THREAD_OFF(peer->t_refresh_stalepath);
SET_FLAG(peer->af_sflags[afi][safi], PEER_STATUS_EORR_RECEIVED);
UNSET_FLAG(peer->af_sflags[afi][safi],
PEER_STATUS_BORR_RECEIVED);
if (bgp_debug_neighbor_events(peer))
zlog_debug(
"%pBP rcvd route-refresh (EoRR) for %s/%s, stopping BoRR timer",
peer, afi2str(afi), safi2str(safi));
if (peer->nsf[afi][safi])
bgp_clear_stale_route(peer, afi, safi);
} else {
if (bgp_debug_neighbor_events(peer))
zlog_debug(
"%pBP rcvd route-refresh (REQUEST) for %s/%s",
peer, afi2str(afi), safi2str(safi));
/* In response to a "normal route refresh request" from the
* peer, the speaker MUST send a BoRR message.
*/
if (CHECK_FLAG(peer->cap, PEER_CAP_ENHANCED_RR_RCV)) {
/* For a BGP speaker that supports the BGP Graceful
* Restart, it MUST NOT send a BoRR for an <AFI, SAFI>
* to a neighbor before it sends the EoR for the
* <AFI, SAFI> to the neighbor.
*/
if (!CHECK_FLAG(peer->af_sflags[afi][safi],
PEER_STATUS_EOR_SEND)) {
if (bgp_debug_neighbor_events(peer))
zlog_debug(
"%pBP rcvd route-refresh (REQUEST) for %s/%s before EoR",
peer, afi2str(afi),
safi2str(safi));
return BGP_PACKET_NOOP;
}
bgp_route_refresh_send(peer, afi, safi, 0, 0, 0,
BGP_ROUTE_REFRESH_BORR);
if (bgp_debug_neighbor_events(peer))
zlog_debug(
"%pBP sending route-refresh (BoRR) for %s/%s",
peer, afi2str(afi), safi2str(safi));
/* Set flag Ready-To-Send to know when we can send EoRR
* message.
*/
SET_FLAG(peer->af_sflags[afi][safi],
PEER_STATUS_BORR_SEND);
UNSET_FLAG(peer->af_sflags[afi][safi],
PEER_STATUS_EORR_SEND);
}
}
/* Perform route refreshment to the peer */
bgp_announce_route(peer, afi, safi, force_update);
/* No FSM action necessary */
return BGP_PACKET_NOOP;
}
| 0
|
405,350
|
static struct xfrm_dst *xfrm_bundle_lookup(struct net *net,
const struct flowi *fl,
u16 family, u8 dir,
struct xfrm_flo *xflo, u32 if_id)
{
struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
int num_pols = 0, num_xfrms = 0, err;
struct xfrm_dst *xdst;
/* Resolve policies to use if we couldn't get them from
* previous cache entry */
num_pols = 1;
pols[0] = xfrm_policy_lookup(net, fl, family, dir, if_id);
err = xfrm_expand_policies(fl, family, pols,
&num_pols, &num_xfrms);
if (err < 0)
goto inc_error;
if (num_pols == 0)
return NULL;
if (num_xfrms <= 0)
goto make_dummy_bundle;
xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family,
xflo->dst_orig);
if (IS_ERR(xdst)) {
err = PTR_ERR(xdst);
if (err == -EREMOTE) {
xfrm_pols_put(pols, num_pols);
return NULL;
}
if (err != -EAGAIN)
goto error;
goto make_dummy_bundle;
} else if (xdst == NULL) {
num_xfrms = 0;
goto make_dummy_bundle;
}
return xdst;
make_dummy_bundle:
/* We found policies, but there's no bundles to instantiate:
* either because the policy blocks, has no transformations or
* we could not build template (no xfrm_states).*/
xdst = xfrm_create_dummy_bundle(net, xflo, fl, num_xfrms, family);
if (IS_ERR(xdst)) {
xfrm_pols_put(pols, num_pols);
return ERR_CAST(xdst);
}
xdst->num_pols = num_pols;
xdst->num_xfrms = num_xfrms;
memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
return xdst;
inc_error:
XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
error:
xfrm_pols_put(pols, num_pols);
return ERR_PTR(err);
}
| 0
|
257,454
|
void AutoParallel::AddSharedNodes(GraphDef* graph) {
string prefix = strings::StrCat(kAutoParallelPrefix, "-Replica-", 0);
for (const auto& node : shared_nodes_) {
auto new_node = graph->add_node();
*new_node = *all_nodes_[node];
for (int i = 0; i < new_node->input_size(); i++) {
if (NotSharedNode(NodeName(new_node->input(i)))) {
string new_name = AddPrefixToNodeName(new_node->input(i), prefix);
*new_node->mutable_input(i) = new_name;
}
}
}
}
| 0
|
196,621
|
mrb_remove_method(mrb_state *mrb, struct RClass *c, mrb_sym mid)
{
mt_tbl *h;
MRB_CLASS_ORIGIN(c);
h = c->mt;
if (h && mt_del(mrb, h, mid)) return;
mrb_name_error(mrb, mid, "method '%n' not defined in %C", mid, c);
}
| 1
|
473,838
|
count_collision(const struct st_hash_type *type)
{
collision.all++;
if (type == &type_numhash) {
collision.num++;
}
else if (type == &type_strhash) {
collision.strcase++;
}
else if (type == &type_strcasehash) {
collision.str++;
}
}
| 0
|
252,417
|
static MZ_FORCEINLINE mz_bool mz_zip_array_push_back(mz_zip_archive *pZip,
mz_zip_array *pArray,
const void *pElements,
size_t n) {
size_t orig_size = pArray->m_size;
if (!mz_zip_array_resize(pZip, pArray, orig_size + n, MZ_TRUE))
return MZ_FALSE;
memcpy((mz_uint8 *)pArray->m_p + orig_size * pArray->m_element_size,
pElements, n * pArray->m_element_size);
return MZ_TRUE;
}
| 0
|
516,243
|
static uint16_t virtio_net_handle_rss(VirtIONet *n,
struct iovec *iov,
unsigned int iov_cnt,
bool do_rss)
{
VirtIODevice *vdev = VIRTIO_DEVICE(n);
struct virtio_net_rss_config cfg;
size_t s, offset = 0, size_get;
uint16_t queues, i;
struct {
uint16_t us;
uint8_t b;
} QEMU_PACKED temp;
const char *err_msg = "";
uint32_t err_value = 0;
if (do_rss && !virtio_vdev_has_feature(vdev, VIRTIO_NET_F_RSS)) {
err_msg = "RSS is not negotiated";
goto error;
}
if (!do_rss && !virtio_vdev_has_feature(vdev, VIRTIO_NET_F_HASH_REPORT)) {
err_msg = "Hash report is not negotiated";
goto error;
}
size_get = offsetof(struct virtio_net_rss_config, indirection_table);
s = iov_to_buf(iov, iov_cnt, offset, &cfg, size_get);
if (s != size_get) {
err_msg = "Short command buffer";
err_value = (uint32_t)s;
goto error;
}
n->rss_data.hash_types = virtio_ldl_p(vdev, &cfg.hash_types);
n->rss_data.indirections_len =
virtio_lduw_p(vdev, &cfg.indirection_table_mask);
n->rss_data.indirections_len++;
if (!do_rss) {
n->rss_data.indirections_len = 1;
}
if (!is_power_of_2(n->rss_data.indirections_len)) {
err_msg = "Invalid size of indirection table";
err_value = n->rss_data.indirections_len;
goto error;
}
if (n->rss_data.indirections_len > VIRTIO_NET_RSS_MAX_TABLE_LEN) {
err_msg = "Too large indirection table";
err_value = n->rss_data.indirections_len;
goto error;
}
n->rss_data.default_queue = do_rss ?
virtio_lduw_p(vdev, &cfg.unclassified_queue) : 0;
if (n->rss_data.default_queue >= n->max_queues) {
err_msg = "Invalid default queue";
err_value = n->rss_data.default_queue;
goto error;
}
offset += size_get;
size_get = sizeof(uint16_t) * n->rss_data.indirections_len;
g_free(n->rss_data.indirections_table);
n->rss_data.indirections_table = g_malloc(size_get);
if (!n->rss_data.indirections_table) {
err_msg = "Can't allocate indirections table";
err_value = n->rss_data.indirections_len;
goto error;
}
s = iov_to_buf(iov, iov_cnt, offset,
n->rss_data.indirections_table, size_get);
if (s != size_get) {
err_msg = "Short indirection table buffer";
err_value = (uint32_t)s;
goto error;
}
for (i = 0; i < n->rss_data.indirections_len; ++i) {
uint16_t val = n->rss_data.indirections_table[i];
n->rss_data.indirections_table[i] = virtio_lduw_p(vdev, &val);
}
offset += size_get;
size_get = sizeof(temp);
s = iov_to_buf(iov, iov_cnt, offset, &temp, size_get);
if (s != size_get) {
err_msg = "Can't get queues";
err_value = (uint32_t)s;
goto error;
}
queues = do_rss ? virtio_lduw_p(vdev, &temp.us) : n->curr_queues;
if (queues == 0 || queues > n->max_queues) {
err_msg = "Invalid number of queues";
err_value = queues;
goto error;
}
if (temp.b > VIRTIO_NET_RSS_MAX_KEY_SIZE) {
err_msg = "Invalid key size";
err_value = temp.b;
goto error;
}
if (!temp.b && n->rss_data.hash_types) {
err_msg = "No key provided";
err_value = 0;
goto error;
}
if (!temp.b && !n->rss_data.hash_types) {
virtio_net_disable_rss(n);
return queues;
}
offset += size_get;
size_get = temp.b;
s = iov_to_buf(iov, iov_cnt, offset, n->rss_data.key, size_get);
if (s != size_get) {
err_msg = "Can get key buffer";
err_value = (uint32_t)s;
goto error;
}
n->rss_data.enabled = true;
if (!n->rss_data.populate_hash) {
if (!virtio_net_attach_epbf_rss(n)) {
/* EBPF must be loaded for vhost */
if (get_vhost_net(qemu_get_queue(n->nic)->peer)) {
warn_report("Can't load eBPF RSS for vhost");
goto error;
}
/* fallback to software RSS */
warn_report("Can't load eBPF RSS - fallback to software RSS");
n->rss_data.enabled_software_rss = true;
}
} else {
/* use software RSS for hash populating */
/* and detach eBPF if was loaded before */
virtio_net_detach_epbf_rss(n);
n->rss_data.enabled_software_rss = true;
}
trace_virtio_net_rss_enable(n->rss_data.hash_types,
n->rss_data.indirections_len,
temp.b);
return queues;
error:
trace_virtio_net_rss_error(err_msg, err_value);
virtio_net_disable_rss(n);
return 0;
}
| 0
|
328,928
|
static void delete_obj(RBinJavaCPTypeObj *obj) {
if (obj && obj->metas && obj->metas->type_info) {
RBinJavaCPTypeMetas *ti = obj->metas->type_info;
if (ti && ti->allocs && ti->allocs->delete_obj) {
ti->allocs->delete_obj (obj);
}
}
}
| 0
|
424,532
|
static void VideoClientContextPriv_free(VideoClientContextPriv* priv)
{
EnterCriticalSection(&priv->framesLock);
while (Queue_Count(priv->frames))
{
VideoFrame* frame = Queue_Dequeue(priv->frames);
if (frame)
VideoFrame_free(&frame);
}
Queue_Free(priv->frames);
LeaveCriticalSection(&priv->framesLock);
DeleteCriticalSection(&priv->framesLock);
if (priv->currentPresentation)
PresentationContext_unref(priv->currentPresentation);
BufferPool_Free(priv->surfacePool);
free(priv);
}
| 0
|
225,426
|
static int vidioc_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
{
struct v4l2_loopback_device *dev;
struct v4l2_loopback_opener *opener;
MARK();
dev = v4l2loopback_getdevice(file);
opener = fh_to_opener(fh);
switch (type) {
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
if (!dev->ready_for_capture) {
int ret = allocate_buffers(dev);
if (ret < 0)
return ret;
}
opener->type = WRITER;
dev->ready_for_output = 0;
dev->ready_for_capture++;
return 0;
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
if (!dev->ready_for_capture)
return -EIO;
opener->type = READER;
return 0;
default:
return -EINVAL;
}
return -EINVAL;
}
| 0
|
278,260
|
tabstop_at(colnr_T col, int ts, int *vts)
{
int tabcount;
colnr_T tabcol = 0;
int t;
int tab_size = 0;
if (vts == 0 || vts[0] == 0)
return ts;
tabcount = vts[0];
for (t = 1; t <= tabcount; ++t)
{
tabcol += vts[t];
if (tabcol > col)
{
tab_size = vts[t];
break;
}
}
if (t > tabcount)
tab_size = vts[tabcount];
return tab_size;
}
| 0
|
369,945
|
static struct mm_struct *__check_mem_permission(struct task_struct *task)
{
struct mm_struct *mm;
mm = get_task_mm(task);
if (!mm)
return ERR_PTR(-EINVAL);
/*
* A task can always look at itself, in case it chooses
* to use system calls instead of load instructions.
*/
if (task == current)
return mm;
/*
* If current is actively ptrace'ing, and would also be
* permitted to freshly attach with ptrace now, permit it.
*/
if (task_is_stopped_or_traced(task)) {
int match;
rcu_read_lock();
match = (ptrace_parent(task) == current);
rcu_read_unlock();
if (match && ptrace_may_access(task, PTRACE_MODE_ATTACH))
return mm;
}
/*
* No one else is allowed.
*/
mmput(mm);
return ERR_PTR(-EPERM);
}
| 0
|
438,662
|
static int virtio_rpmsg_release_channel(struct rpmsg_device *rpdev,
struct rpmsg_channel_info *chinfo)
{
struct virtio_rpmsg_channel *vch = to_virtio_rpmsg_channel(rpdev);
struct virtproc_info *vrp = vch->vrp;
return rpmsg_unregister_device(&vrp->vdev->dev, chinfo);
}
| 0
|
255,081
|
Eigen::VectorXd ompl::geometric::VFRRT::getNewDirection(const base::State *qnear, const base::State *qrand)
{
// Set vrand to be the normalized vector from qnear to qrand
Eigen::VectorXd vrand(vfdim_);
for (unsigned int i = 0; i < vfdim_; i++)
vrand[i] = *si_->getStateSpace()->getValueAddressAtIndex(qrand, i) -
*si_->getStateSpace()->getValueAddressAtIndex(qnear, i);
vrand /= si_->distance(qnear, qrand);
// Get the vector at qnear, and normalize
Eigen::VectorXd vfield = vf_(qnear);
const double lambdaScale = vfield.norm();
// In the case where there is no vector field present, vfield.norm() == 0,
// return the direction of the random state.
if (lambdaScale < std::numeric_limits<float>::epsilon())
return vrand;
vfield /= lambdaScale;
// Sample a weight from the distribution
const double omega = biasedSampling(vrand, vfield, lambdaScale);
// Determine updated direction
return computeAlphaBeta(omega, vrand, vfield);
}
| 0
|
253,716
|
static int ccp_copy_to_sb(struct ccp_cmd_queue *cmd_q,
struct ccp_dm_workarea *wa, u32 jobid, u32 sb,
u32 byte_swap)
{
return ccp_copy_to_from_sb(cmd_q, wa, jobid, sb, byte_swap, false);
}
| 0
|
90,233
|
CellularNetwork::~CellularNetwork() {
}
| 0
|
507,764
|
ECPKPARAMETERS *EC_GROUP_get_ecpkparameters(const EC_GROUP *group,
ECPKPARAMETERS *params)
{
int ok = 1, tmp;
ECPKPARAMETERS *ret = params;
if (ret == NULL) {
if ((ret = ECPKPARAMETERS_new()) == NULL) {
ECerr(EC_F_EC_GROUP_GET_ECPKPARAMETERS, ERR_R_MALLOC_FAILURE);
return NULL;
}
} else {
if (ret->type == ECPKPARAMETERS_TYPE_NAMED)
ASN1_OBJECT_free(ret->value.named_curve);
else if (ret->type == ECPKPARAMETERS_TYPE_EXPLICIT
&& ret->value.parameters != NULL)
ECPARAMETERS_free(ret->value.parameters);
}
if (EC_GROUP_get_asn1_flag(group)) {
/*
* use the asn1 OID to describe the elliptic curve parameters
*/
tmp = EC_GROUP_get_curve_name(group);
if (tmp) {
ASN1_OBJECT *asn1obj = OBJ_nid2obj(tmp);
if (asn1obj == NULL || OBJ_length(asn1obj) == 0) {
ASN1_OBJECT_free(asn1obj);
ECerr(EC_F_EC_GROUP_GET_ECPKPARAMETERS, EC_R_MISSING_OID);
ok = 0;
} else {
ret->type = ECPKPARAMETERS_TYPE_NAMED;
ret->value.named_curve = asn1obj;
}
} else
/* we don't know the nid => ERROR */
ok = 0;
} else {
/* use the ECPARAMETERS structure */
ret->type = ECPKPARAMETERS_TYPE_EXPLICIT;
if ((ret->value.parameters =
EC_GROUP_get_ecparameters(group, NULL)) == NULL)
ok = 0;
}
if (!ok) {
ECPKPARAMETERS_free(ret);
return NULL;
}
return ret;
}
| 0
|
310,267
|
generate_runningrouters(void)
{
char *s=NULL;
char digest[DIGEST_LEN];
char published[ISO_TIME_LEN+1];
size_t len;
crypto_pk_env_t *private_key = get_server_identity_key();
char *identity_pkey; /* Identity key, DER64-encoded. */
size_t identity_pkey_len;
if (crypto_pk_write_public_key_to_string(private_key,&identity_pkey,
&identity_pkey_len)<0) {
log_warn(LD_BUG,"write identity_pkey to string failed!");
goto err;
}
format_iso_time(published, time(NULL));
len = 2048;
s = tor_malloc_zero(len);
tor_snprintf(s, len,
"network-status\n"
"published %s\n"
"router-status %s\n"
"dir-signing-key\n%s"
"directory-signature %s\n",
published, "", identity_pkey,
get_options()->Nickname);
tor_free(identity_pkey);
if (router_get_runningrouters_hash(s,digest)) {
log_warn(LD_BUG,"couldn't compute digest");
goto err;
}
note_crypto_pk_op(SIGN_DIR);
if (router_append_dirobj_signature(s, len, digest, DIGEST_LEN,
private_key)<0)
goto err;
set_cached_dir(&the_runningrouters, s, time(NULL));
runningrouters_is_dirty = 0;
return &the_runningrouters;
err:
tor_free(s);
return NULL;
}
| 0
|
393,462
|
static SQInteger class_getattributes(HSQUIRRELVM v)
{
return SQ_SUCCEEDED(sq_getattributes(v,-2))?1:SQ_ERROR;
}
| 0
|
353,121
|
void SplashOutputDev::updateFont(GfxState * /*state*/) {
needFontUpdate = true;
}
| 0
|
383,307
|
gdImageCreateTrueColor (int sx, int sy)
{
int i;
gdImagePtr im;
im = (gdImage *) gdMalloc (sizeof (gdImage));
memset (im, 0, sizeof (gdImage));
im->tpixels = (int **) gdMalloc (sizeof (int *) * sy);
im->polyInts = 0;
im->polyAllocated = 0;
im->brush = 0;
im->tile = 0;
im->style = 0;
for (i = 0; (i < sy); i++)
{
im->tpixels[i] = (int *) gdCalloc (
sx, sizeof (int));
}
im->sx = sx;
im->sy = sy;
im->transparent = (-1);
im->interlace = 0;
im->trueColor = 1;
/* 2.0.2: alpha blending is now on by default, and saving of alpha is
off by default. This allows font antialiasing to work as expected
on the first try in JPEGs -- quite important -- and also allows
for smaller PNGs when saving of alpha channel is not really
desired, which it usually isn't! */
im->saveAlphaFlag = 0;
im->alphaBlendingFlag = 1;
im->thick = 1;
return im;
}
| 0
|
326,600
|
cleanup_pathname_win(char *path)
{
wchar_t wc;
char *p;
size_t alen, l;
int mb, complete, utf8;
alen = 0;
mb = 0;
complete = 1;
utf8 = (strcmp(nl_langinfo(CODESET), "UTF-8") == 0)? 1: 0;
for (p = path; *p != '\0'; p++) {
++alen;
if (*p == '\\') {
/* If previous byte is smaller than 128,
* this is not second byte of multibyte characters,
* so we can replace '\' with '/'. */
if (utf8 || !mb)
*p = '/';
else
complete = 0;/* uncompleted. */
} else if (*(unsigned char *)p > 127)
mb = 1;
else
mb = 0;
/* Rewrite the path name if its next character is unusable. */
if (*p == ':' || *p == '*' || *p == '?' || *p == '"' ||
*p == '<' || *p == '>' || *p == '|')
*p = '_';
}
if (complete)
return;
/*
* Convert path separator in wide-character.
*/
p = path;
while (*p != '\0' && alen) {
l = mbtowc(&wc, p, alen);
if (l == (size_t)-1) {
while (*p != '\0') {
if (*p == '\\')
*p = '/';
++p;
}
break;
}
if (l == 1 && wc == L'\\')
*p = '/';
p += l;
alen -= l;
}
}
| 0
|
247,117
|
GF_Err gf_fs_post_user_task(GF_FilterSession *fsess, Bool (*task_execute) (GF_FilterSession *fsess, void *callback, u32 *reschedule_ms), void *udta_callback, const char *log_name)
{
GF_UserTask *utask;
char *_log_name;
if (!fsess || !task_execute) return GF_BAD_PARAM;
GF_SAFEALLOC(utask, GF_UserTask);
if (!utask) return GF_OUT_OF_MEM;
utask->fsess = fsess;
utask->callback = udta_callback;
utask->task_execute = task_execute;
//dup mem for user task
_log_name = gf_strdup(log_name ? log_name : "user_task");
gf_fs_post_task(fsess, gf_fs_user_task, NULL, NULL, _log_name, utask);
return GF_OK;
}
| 0
|
512,354
|
static inline int cmp_longs (longlong a_val, longlong b_val)
{
return a_val < b_val ? -1 : a_val == b_val ? 0 : 1;
}
| 0
|
336,137
|
static int ip6gre_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[])
{
struct ip6_tnl *nt;
struct net *net = dev_net(dev);
struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
struct ip_tunnel_encap ipencap;
int err;
nt = netdev_priv(dev);
if (ip6gre_netlink_encap_parms(data, &ipencap)) {
int err = ip6_tnl_encap_setup(nt, &ipencap);
if (err < 0)
return err;
}
ip6gre_netlink_parms(data, &nt->parms);
if (ip6gre_tunnel_find(net, &nt->parms, dev->type))
return -EEXIST;
if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS])
eth_hw_addr_random(dev);
nt->dev = dev;
nt->net = dev_net(dev);
ip6gre_tnl_link_config(nt, !tb[IFLA_MTU]);
dev->features |= GRE6_FEATURES;
dev->hw_features |= GRE6_FEATURES;
if (!(nt->parms.o_flags & TUNNEL_SEQ)) {
/* TCP offload with GRE SEQ is not supported, nor
* can we support 2 levels of outer headers requiring
* an update.
*/
if (!(nt->parms.o_flags & TUNNEL_CSUM) ||
(nt->encap.type == TUNNEL_ENCAP_NONE)) {
dev->features |= NETIF_F_GSO_SOFTWARE;
dev->hw_features |= NETIF_F_GSO_SOFTWARE;
}
/* Can use a lockless transmit, unless we generate
* output sequences
*/
dev->features |= NETIF_F_LLTX;
}
err = register_netdevice(dev);
if (err)
goto out;
dev_hold(dev);
ip6gre_tunnel_link(ign, nt);
out:
return err;
}
| 0
|
236,191
|
GF_Err href_box_size(GF_Box *s)
{
GF_TextHyperTextBox*ptr = (GF_TextHyperTextBox*)s;
s->size += 6;
if (ptr->URL) s->size += strlen(ptr->URL);
if (ptr->URL_hint) s->size += strlen(ptr->URL_hint);
return GF_OK;
}
| 0
|
242,582
|
relocate_coff (PE_COFF_LOADER_IMAGE_CONTEXT *context,
EFI_IMAGE_SECTION_HEADER *Section,
void *orig, void *data)
{
EFI_IMAGE_BASE_RELOCATION *RelocBase, *RelocBaseEnd;
UINT64 Adjust;
UINT16 *Reloc, *RelocEnd;
char *Fixup, *FixupBase;
UINT16 *Fixup16;
UINT32 *Fixup32;
UINT64 *Fixup64;
int size = context->ImageSize;
void *ImageEnd = (char *)orig + size;
int n = 0;
/* Alright, so here's how this works:
*
* context->RelocDir gives us two things:
* - the VA the table of base relocation blocks are (maybe) to be
* mapped at (RelocDir->VirtualAddress)
* - the virtual size (RelocDir->Size)
*
* The .reloc section (Section here) gives us some other things:
* - the name! kind of. (Section->Name)
* - the virtual size (Section->VirtualSize), which should be the same
* as RelocDir->Size
* - the virtual address (Section->VirtualAddress)
* - the file section size (Section->SizeOfRawData), which is
* a multiple of OptHdr->FileAlignment. Only useful for image
* validation, not really useful for iteration bounds.
* - the file address (Section->PointerToRawData)
* - a bunch of stuff we don't use that's 0 in our binaries usually
* - Flags (Section->Characteristics)
*
* and then the thing that's actually at the file address is an array
* of EFI_IMAGE_BASE_RELOCATION structs with some values packed behind
* them. The SizeOfBlock field of this structure includes the
* structure itself, and adding it to that structure's address will
* yield the next entry in the array.
*/
RelocBase = ImageAddress(orig, size, Section->PointerToRawData);
/* RelocBaseEnd here is the address of the first entry /past/ the
* table. */
RelocBaseEnd = ImageAddress(orig, size, Section->PointerToRawData +
Section->Misc.VirtualSize);
if (!RelocBase && !RelocBaseEnd)
return EFI_SUCCESS;
if (!RelocBase || !RelocBaseEnd) {
perror(L"Reloc table overflows binary\n");
return EFI_UNSUPPORTED;
}
Adjust = (UINTN)data - context->ImageAddress;
if (Adjust == 0)
return EFI_SUCCESS;
while (RelocBase < RelocBaseEnd) {
Reloc = (UINT16 *) ((char *) RelocBase + sizeof (EFI_IMAGE_BASE_RELOCATION));
if (RelocBase->SizeOfBlock == 0) {
perror(L"Reloc %d block size 0 is invalid\n", n);
return EFI_UNSUPPORTED;
} else if (RelocBase->SizeOfBlock > context->RelocDir->Size) {
perror(L"Reloc %d block size %d greater than reloc dir"
"size %d, which is invalid\n", n,
RelocBase->SizeOfBlock,
context->RelocDir->Size);
return EFI_UNSUPPORTED;
}
RelocEnd = (UINT16 *) ((char *) RelocBase + RelocBase->SizeOfBlock);
if ((void *)RelocEnd < orig || (void *)RelocEnd > ImageEnd) {
perror(L"Reloc %d entry overflows binary\n", n);
return EFI_UNSUPPORTED;
}
FixupBase = ImageAddress(data, size, RelocBase->VirtualAddress);
if (!FixupBase) {
perror(L"Reloc %d Invalid fixupbase\n", n);
return EFI_UNSUPPORTED;
}
while (Reloc < RelocEnd) {
Fixup = FixupBase + (*Reloc & 0xFFF);
switch ((*Reloc) >> 12) {
case EFI_IMAGE_REL_BASED_ABSOLUTE:
break;
case EFI_IMAGE_REL_BASED_HIGH:
Fixup16 = (UINT16 *) Fixup;
*Fixup16 = (UINT16) (*Fixup16 + ((UINT16) ((UINT32) Adjust >> 16)));
break;
case EFI_IMAGE_REL_BASED_LOW:
Fixup16 = (UINT16 *) Fixup;
*Fixup16 = (UINT16) (*Fixup16 + (UINT16) Adjust);
break;
case EFI_IMAGE_REL_BASED_HIGHLOW:
Fixup32 = (UINT32 *) Fixup;
*Fixup32 = *Fixup32 + (UINT32) Adjust;
break;
case EFI_IMAGE_REL_BASED_DIR64:
Fixup64 = (UINT64 *) Fixup;
*Fixup64 = *Fixup64 + (UINT64) Adjust;
break;
default:
perror(L"Reloc %d Unknown relocation\n", n);
return EFI_UNSUPPORTED;
}
Reloc += 1;
}
RelocBase = (EFI_IMAGE_BASE_RELOCATION *) RelocEnd;
n++;
}
return EFI_SUCCESS;
}
| 0
|
344,821
|
get_u32(const void *vp)
{
const u_char *p = (const u_char *)vp;
u_int32_t v;
v = (u_int32_t)p[0] << 24;
v |= (u_int32_t)p[1] << 16;
v |= (u_int32_t)p[2] << 8;
v |= (u_int32_t)p[3];
return (v);
}
| 0
|
216,701
|
static enum TIFFReadDirEntryErr TIFFReadDirEntryArrayWithLimit(
TIFF* tif, TIFFDirEntry* direntry, uint32* count, uint32 desttypesize,
void** value, uint64 maxcount)
{
int typesize;
uint32 datasize;
void* data;
uint64 target_count64;
typesize=TIFFDataWidth(direntry->tdir_type);
target_count64 = (direntry->tdir_count > maxcount) ?
maxcount : direntry->tdir_count;
if ((target_count64==0)||(typesize==0))
{
*value=0;
return(TIFFReadDirEntryErrOk);
}
(void) desttypesize;
/*
* As a sanity check, make sure we have no more than a 2GB tag array
* in either the current data type or the dest data type. This also
* avoids problems with overflow of tmsize_t on 32bit systems.
*/
if ((uint64)(2147483647/typesize)<target_count64)
return(TIFFReadDirEntryErrSizesan);
if ((uint64)(2147483647/desttypesize)<target_count64)
return(TIFFReadDirEntryErrSizesan);
*count=(uint32)target_count64;
datasize=(*count)*typesize;
assert((tmsize_t)datasize>0);
data=_TIFFCheckMalloc(tif, *count, typesize, "ReadDirEntryArray");
if (data==0)
return(TIFFReadDirEntryErrAlloc);
if (!(tif->tif_flags&TIFF_BIGTIFF))
{
if (datasize<=4)
_TIFFmemcpy(data,&direntry->tdir_offset,datasize);
else
{
enum TIFFReadDirEntryErr err;
uint32 offset = direntry->tdir_offset.toff_long;
if (tif->tif_flags&TIFF_SWAB)
TIFFSwabLong(&offset);
err=TIFFReadDirEntryData(tif,(uint64)offset,(tmsize_t)datasize,data);
if (err!=TIFFReadDirEntryErrOk)
{
_TIFFfree(data);
return(err);
}
}
}
else
{
if (datasize<=8)
_TIFFmemcpy(data,&direntry->tdir_offset,datasize);
else
{
enum TIFFReadDirEntryErr err;
uint64 offset = direntry->tdir_offset.toff_long8;
if (tif->tif_flags&TIFF_SWAB)
TIFFSwabLong8(&offset);
err=TIFFReadDirEntryData(tif,offset,(tmsize_t)datasize,data);
if (err!=TIFFReadDirEntryErrOk)
{
_TIFFfree(data);
return(err);
}
}
}
*value=data;
return(TIFFReadDirEntryErrOk);
}
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.