idx
int64 | func
string | target
int64 |
|---|---|---|
37,038
|
static int tg3_phy_reset_chanpat(struct tg3 *tp)
{
int chan;
for (chan = 0; chan < 4; chan++) {
int i;
tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
(chan * 0x2000) | 0x0200);
tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
for (i = 0; i < 6; i++)
tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
if (tg3_wait_macro_done(tp))
return -EBUSY;
}
return 0;
}
| 0
|
211,985
|
static MagickBooleanType WritePALMImage(const ImageInfo *image_info,
Image *image)
{
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
currentOffset,
offset,
scene;
MagickSizeType
cc;
PixelPacket
transpix;
QuantizeInfo
*quantize_info;
register IndexPacket
*indexes;
register ssize_t
x;
register PixelPacket
*p;
ssize_t
y;
size_t
count,
bits_per_pixel,
bytes_per_row,
nextDepthOffset,
one;
unsigned char
bit,
byte,
color,
*last_row,
*one_row,
*ptr,
version;
unsigned int
transparentIndex;
unsigned short
color16,
flags;
/*
Open output image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
exception=AcquireExceptionInfo();
status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception);
if (status == MagickFalse)
return(status);
quantize_info=AcquireQuantizeInfo(image_info);
flags=0;
currentOffset=0;
transparentIndex=0;
transpix.red=0;
transpix.green=0;
transpix.blue=0;
transpix.opacity=0;
one=1;
version=0;
scene=0;
do
{
(void) TransformImageColorspace(image,sRGBColorspace);
count=GetNumberColors(image,NULL,exception);
for (bits_per_pixel=1; (one << bits_per_pixel) < count; bits_per_pixel*=2) ;
if (bits_per_pixel > 16)
bits_per_pixel=16;
else
if (bits_per_pixel < 16)
(void) TransformImageColorspace(image,image->colorspace);
if (bits_per_pixel < 8)
{
(void) TransformImageColorspace(image,GRAYColorspace);
(void) SetImageType(image,PaletteType);
(void) SortColormapByIntensity(image);
}
if ((image->storage_class == PseudoClass) && (image->colors > 256))
(void) SetImageStorageClass(image,DirectClass);
if (image->storage_class == PseudoClass)
flags|=PALM_HAS_COLORMAP_FLAG;
else
flags|=PALM_IS_DIRECT_COLOR;
(void) WriteBlobMSBShort(image,(unsigned short) image->columns); /* width */
(void) WriteBlobMSBShort(image,(unsigned short) image->rows); /* height */
bytes_per_row=((image->columns+(16/bits_per_pixel-1))/(16/
bits_per_pixel))*2;
(void) WriteBlobMSBShort(image,(unsigned short) bytes_per_row);
if ((image_info->compression == RLECompression) ||
(image_info->compression == FaxCompression))
flags|=PALM_IS_COMPRESSED_FLAG;
(void) WriteBlobMSBShort(image, flags);
(void) WriteBlobByte(image,(unsigned char) bits_per_pixel);
if (bits_per_pixel > 1)
version=1;
if ((image_info->compression == RLECompression) ||
(image_info->compression == FaxCompression))
version=2;
(void) WriteBlobByte(image,version);
(void) WriteBlobMSBShort(image,0); /* nextDepthOffset */
(void) WriteBlobByte(image,(unsigned char) transparentIndex);
if (image_info->compression == RLECompression)
(void) WriteBlobByte(image,PALM_COMPRESSION_RLE);
else
if (image_info->compression == FaxCompression)
(void) WriteBlobByte(image,PALM_COMPRESSION_SCANLINE);
else
(void) WriteBlobByte(image,PALM_COMPRESSION_NONE);
(void) WriteBlobMSBShort(image,0); /* reserved */
offset=16;
if (bits_per_pixel == 16)
{
(void) WriteBlobByte(image,5); /* # of bits of red */
(void) WriteBlobByte(image,6); /* # of bits of green */
(void) WriteBlobByte(image,5); /* # of bits of blue */
(void) WriteBlobByte(image,0); /* reserved by Palm */
(void) WriteBlobMSBLong(image,0); /* no transparent color, YET */
offset+=8;
}
if (bits_per_pixel == 8)
{
if (flags & PALM_HAS_COLORMAP_FLAG) /* Write out colormap */
{
quantize_info->dither=IsPaletteImage(image,&image->exception);
quantize_info->number_colors=image->colors;
(void) QuantizeImage(quantize_info,image);
(void) WriteBlobMSBShort(image,(unsigned short) image->colors);
for (count = 0; count < image->colors; count++)
{
(void) WriteBlobByte(image,(unsigned char) count);
(void) WriteBlobByte(image,ScaleQuantumToChar(
image->colormap[count].red));
(void) WriteBlobByte(image,
ScaleQuantumToChar(image->colormap[count].green));
(void) WriteBlobByte(image,
ScaleQuantumToChar(image->colormap[count].blue));
}
offset+=2+count*4;
}
else /* Map colors to Palm standard colormap */
{
Image
*affinity_image;
affinity_image=ConstituteImage(256,1,"RGB",CharPixel,&PalmPalette,
exception);
(void) TransformImageColorspace(affinity_image,
affinity_image->colorspace);
(void) RemapImage(quantize_info,image,affinity_image);
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetAuthenticPixels(image,0,y,image->columns,1,exception);
indexes=GetAuthenticIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; x++)
SetPixelIndex(indexes+x,FindColor(&image->colormap[
(ssize_t) GetPixelIndex(indexes+x)]));
}
affinity_image=DestroyImage(affinity_image);
}
}
if (flags & PALM_IS_COMPRESSED_FLAG)
(void) WriteBlobMSBShort(image,0); /* fill in size later */
last_row=(unsigned char *) NULL;
if (image_info->compression == FaxCompression)
{
last_row=(unsigned char *) AcquireQuantumMemory(bytes_per_row,
sizeof(*last_row));
if (last_row == (unsigned char *) NULL)
{
quantize_info=DestroyQuantizeInfo(quantize_info);
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
}
}
one_row=(unsigned char *) AcquireQuantumMemory(bytes_per_row,
sizeof(*one_row));
if (one_row == (unsigned char *) NULL)
{
quantize_info=DestroyQuantizeInfo(quantize_info);
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
}
for (y=0; y < (ssize_t) image->rows; y++)
{
ptr=one_row;
(void) ResetMagickMemory(ptr,0,bytes_per_row);
p=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (p == (PixelPacket *) NULL)
break;
indexes=GetAuthenticIndexQueue(image);
if (bits_per_pixel == 16)
{
for (x=0; x < (ssize_t) image->columns; x++)
{
color16=(unsigned short) ((((31*(size_t) GetPixelRed(p))/
(size_t) QuantumRange) << 11) |
(((63*(size_t) GetPixelGreen(p))/(size_t) QuantumRange) << 5) |
((31*(size_t) GetPixelBlue(p))/(size_t) QuantumRange));
if (GetPixelOpacity(p) == (Quantum) TransparentOpacity)
{
transpix.red=GetPixelRed(p);
transpix.green=GetPixelGreen(p);
transpix.blue=GetPixelBlue(p);
transpix.opacity=GetPixelOpacity(p);
flags|=PALM_HAS_TRANSPARENCY_FLAG;
}
*ptr++=(unsigned char) ((color16 >> 8) & 0xff);
*ptr++=(unsigned char) (color16 & 0xff);
p++;
}
}
else
{
byte=0x00;
bit=(unsigned char) (8-bits_per_pixel);
for (x=0; x < (ssize_t) image->columns; x++)
{
if (bits_per_pixel >= 8)
color=(unsigned char) GetPixelIndex(indexes+x);
else
color=(unsigned char) (GetPixelIndex(indexes+x)*
((one << bits_per_pixel)-1)/MagickMax(1*image->colors-1,1));
byte|=color << bit;
if (bit != 0)
bit-=(unsigned char) bits_per_pixel;
else
{
*ptr++=byte;
byte=0x00;
bit=(unsigned char) (8-bits_per_pixel);
}
}
if ((image->columns % (8/bits_per_pixel)) != 0)
*ptr++=byte;
}
if (image_info->compression == RLECompression)
{
x=0;
while (x < (ssize_t) bytes_per_row)
{
byte=one_row[x];
count=1;
while ((one_row[++x] == byte) && (count < 255) &&
(x < (ssize_t) bytes_per_row))
count++;
(void) WriteBlobByte(image,(unsigned char) count);
(void) WriteBlobByte(image,(unsigned char) byte);
}
}
else
if (image_info->compression == FaxCompression)
{
char
tmpbuf[8],
*tptr;
for (x = 0; x < (ssize_t) bytes_per_row; x += 8)
{
tptr = tmpbuf;
for (bit=0, byte=0; bit < (unsigned char) MagickMin(8,(ssize_t) bytes_per_row-x); bit++)
{
if ((y == 0) || (last_row[x + bit] != one_row[x + bit]))
{
byte |= (1 << (7 - bit));
*tptr++ = (char) one_row[x + bit];
}
}
(void) WriteBlobByte(image, byte);
(void) WriteBlob(image,tptr-tmpbuf,(unsigned char *) tmpbuf);
}
(void) CopyMagickMemory(last_row,one_row,bytes_per_row);
}
else
(void) WriteBlob(image,bytes_per_row,one_row);
}
if (flags & PALM_HAS_TRANSPARENCY_FLAG)
{
offset=SeekBlob(image,currentOffset+6,SEEK_SET);
(void) WriteBlobMSBShort(image,flags);
offset=SeekBlob(image,currentOffset+12,SEEK_SET);
(void) WriteBlobByte(image,(unsigned char) transparentIndex); /* trans index */
}
if (bits_per_pixel == 16)
{
offset=SeekBlob(image,currentOffset+20,SEEK_SET);
(void) WriteBlobByte(image,0); /* reserved by Palm */
(void) WriteBlobByte(image,(unsigned char) ((31*transpix.red)/
QuantumRange));
(void) WriteBlobByte(image,(unsigned char) ((63*transpix.green)/
QuantumRange));
(void) WriteBlobByte(image,(unsigned char) ((31*transpix.blue)/
QuantumRange));
}
if (flags & PALM_IS_COMPRESSED_FLAG) /* fill in size now */
{
offset=SeekBlob(image,currentOffset+offset,SEEK_SET);
(void) WriteBlobMSBShort(image,(unsigned short) (GetBlobSize(image)-
currentOffset-offset));
}
if (one_row != (unsigned char *) NULL)
one_row=(unsigned char *) RelinquishMagickMemory(one_row);
if (last_row != (unsigned char *) NULL)
last_row=(unsigned char *) RelinquishMagickMemory(last_row);
if (GetNextImageInList(image) == (Image *) NULL)
break;
/* padding to 4 byte word */
for (cc=(GetBlobSize(image)) % 4; cc > 0; cc--)
(void) WriteBlobByte(image,0);
/* write nextDepthOffset and return to end of image */
(void) SeekBlob(image,currentOffset+10,SEEK_SET);
nextDepthOffset=(size_t) ((GetBlobSize(image)-currentOffset)/4);
(void) WriteBlobMSBShort(image,(unsigned short) nextDepthOffset);
currentOffset=(MagickOffsetType) GetBlobSize(image);
(void) SeekBlob(image,currentOffset,SEEK_SET);
image=SyncNextImageInList(image);
status=SetImageProgress(image,SaveImagesTag,scene++,
GetImageListLength(image));
if (status == MagickFalse)
break;
} while (image_info->adjoin != MagickFalse);
quantize_info=DestroyQuantizeInfo(quantize_info);
(void) CloseBlob(image);
(void) DestroyExceptionInfo(exception);
return(MagickTrue);
}
| 0
|
160,178
|
cockpit_session_unref (gpointer data)
{
CockpitSession *session = data;
CockpitCreds *creds;
GObject *object;
session->refs--;
if (session->refs > 0)
return;
cockpit_session_reset (data);
g_free (session->name);
g_free (session->cookie);
if (session->authorize)
json_object_unref (session->authorize);
if (session->transport)
{
if (session->control_sig)
g_signal_handler_disconnect (session->transport, session->control_sig);
if (session->close_sig)
g_signal_handler_disconnect (session->transport, session->close_sig);
g_object_unref (session->transport);
}
if (session->service)
{
creds = cockpit_web_service_get_creds (session->service);
object = G_OBJECT (session->service);
session->service = NULL;
if (creds)
cockpit_creds_poison (creds);
if (session->idling_sig)
g_signal_handler_disconnect (object, session->idling_sig);
if (session->destroy_sig)
g_signal_handler_disconnect (object, session->destroy_sig);
g_object_weak_unref (object, on_web_service_gone, session);
g_object_run_dispose (object);
g_object_unref (object);
}
if (session->timeout_tag)
g_source_remove (session->timeout_tag);
g_free (session);
}
| 0
|
304,165
|
xar_read_header(struct archive_read *a, struct archive_entry *entry)
{
struct xar *xar;
struct xar_file *file;
struct xattr *xattr;
int r;
xar = (struct xar *)(a->format->data);
r = ARCHIVE_OK;
if (xar->offset == 0) {
/* Create a character conversion object. */
if (xar->sconv == NULL) {
xar->sconv = archive_string_conversion_from_charset(
&(a->archive), "UTF-8", 1);
if (xar->sconv == NULL)
return (ARCHIVE_FATAL);
}
/* Read TOC. */
r = read_toc(a);
if (r != ARCHIVE_OK)
return (r);
}
for (;;) {
file = xar->file = heap_get_entry(&(xar->file_queue));
if (file == NULL) {
xar->end_of_file = 1;
return (ARCHIVE_EOF);
}
if ((file->mode & AE_IFMT) != AE_IFDIR)
break;
if (file->has != (HAS_PATHNAME | HAS_TYPE))
break;
/*
* If a file type is a directory and it does not have
* any metadata, do not export.
*/
file_free(file);
}
archive_entry_set_atime(entry, file->atime, 0);
archive_entry_set_ctime(entry, file->ctime, 0);
archive_entry_set_mtime(entry, file->mtime, 0);
archive_entry_set_gid(entry, file->gid);
if (file->gname.length > 0 &&
archive_entry_copy_gname_l(entry, file->gname.s,
archive_strlen(&(file->gname)), xar->sconv) != 0) {
if (errno == ENOMEM) {
archive_set_error(&a->archive, ENOMEM,
"Can't allocate memory for Gname");
return (ARCHIVE_FATAL);
}
archive_set_error(&a->archive,
ARCHIVE_ERRNO_FILE_FORMAT,
"Gname cannot be converted from %s to current locale.",
archive_string_conversion_charset_name(xar->sconv));
r = ARCHIVE_WARN;
}
archive_entry_set_uid(entry, file->uid);
if (file->uname.length > 0 &&
archive_entry_copy_uname_l(entry, file->uname.s,
archive_strlen(&(file->uname)), xar->sconv) != 0) {
if (errno == ENOMEM) {
archive_set_error(&a->archive, ENOMEM,
"Can't allocate memory for Uname");
return (ARCHIVE_FATAL);
}
archive_set_error(&a->archive,
ARCHIVE_ERRNO_FILE_FORMAT,
"Uname cannot be converted from %s to current locale.",
archive_string_conversion_charset_name(xar->sconv));
r = ARCHIVE_WARN;
}
archive_entry_set_mode(entry, file->mode);
if (archive_entry_copy_pathname_l(entry, file->pathname.s,
archive_strlen(&(file->pathname)), xar->sconv) != 0) {
if (errno == ENOMEM) {
archive_set_error(&a->archive, ENOMEM,
"Can't allocate memory for Pathname");
return (ARCHIVE_FATAL);
}
archive_set_error(&a->archive,
ARCHIVE_ERRNO_FILE_FORMAT,
"Pathname cannot be converted from %s to current locale.",
archive_string_conversion_charset_name(xar->sconv));
r = ARCHIVE_WARN;
}
if (file->symlink.length > 0 &&
archive_entry_copy_symlink_l(entry, file->symlink.s,
archive_strlen(&(file->symlink)), xar->sconv) != 0) {
if (errno == ENOMEM) {
archive_set_error(&a->archive, ENOMEM,
"Can't allocate memory for Linkname");
return (ARCHIVE_FATAL);
}
archive_set_error(&a->archive,
ARCHIVE_ERRNO_FILE_FORMAT,
"Linkname cannot be converted from %s to current locale.",
archive_string_conversion_charset_name(xar->sconv));
r = ARCHIVE_WARN;
}
/* Set proper nlink. */
if ((file->mode & AE_IFMT) == AE_IFDIR)
archive_entry_set_nlink(entry, file->subdirs + 2);
else
archive_entry_set_nlink(entry, file->nlink);
archive_entry_set_size(entry, file->size);
if (archive_strlen(&(file->hardlink)) > 0)
archive_entry_set_hardlink(entry, file->hardlink.s);
archive_entry_set_ino64(entry, file->ino64);
if (file->has & HAS_DEV)
archive_entry_set_dev(entry, file->dev);
if (file->has & HAS_DEVMAJOR)
archive_entry_set_devmajor(entry, file->devmajor);
if (file->has & HAS_DEVMINOR)
archive_entry_set_devminor(entry, file->devminor);
if (archive_strlen(&(file->fflags_text)) > 0)
archive_entry_copy_fflags_text(entry, file->fflags_text.s);
xar->entry_init = 1;
xar->entry_total = 0;
xar->entry_remaining = file->length;
xar->entry_size = file->size;
xar->entry_encoding = file->encoding;
xar->entry_a_sum = file->a_sum;
xar->entry_e_sum = file->e_sum;
/*
* Read extended attributes.
*/
xattr = file->xattr_list;
while (xattr != NULL) {
const void *d;
size_t outbytes, used;
r = move_reading_point(a, xattr->offset);
if (r != ARCHIVE_OK)
break;
r = rd_contents_init(a, xattr->encoding,
xattr->a_sum.alg, xattr->e_sum.alg);
if (r != ARCHIVE_OK)
break;
d = NULL;
r = rd_contents(a, &d, &outbytes, &used, xattr->length);
if (r != ARCHIVE_OK)
break;
if (outbytes != xattr->size) {
archive_set_error(&(a->archive), ARCHIVE_ERRNO_MISC,
"Decompressed size error");
r = ARCHIVE_FATAL;
break;
}
r = checksum_final(a,
xattr->a_sum.val, xattr->a_sum.len,
xattr->e_sum.val, xattr->e_sum.len);
if (r != ARCHIVE_OK)
break;
archive_entry_xattr_add_entry(entry,
xattr->name.s, d, outbytes);
xattr = xattr->next;
}
if (r != ARCHIVE_OK) {
file_free(file);
return (r);
}
if (xar->entry_remaining > 0)
/* Move reading point to the beginning of current
* file contents. */
r = move_reading_point(a, file->offset);
else
r = ARCHIVE_OK;
file_free(file);
return (r);
}
| 0
|
60,078
|
~TensorSliceReaderTable() override {
delete table_;
delete file_;
}
| 0
|
200,924
|
find_same_op(const gs_composite_t *composite_action, int my_op, gs_composite_t **ppcte)
{
const gs_pdf14trans_t *pct0 = (gs_pdf14trans_t *)composite_action;
gs_composite_t *pct = *ppcte;
for (;;) {
if (pct->type->comp_id == GX_COMPOSITOR_PDF14_TRANS) {
gs_pdf14trans_t *pct_pdf14 = (gs_pdf14trans_t *)pct;
*ppcte = pct;
if (pct_pdf14->params.pdf14_op != my_op)
return COMP_ENQUEUE;
if (pct_pdf14->params.csel == pct0->params.csel) {
/* If the new parameters completely replace the old ones
then remove the old one from the queu */
if ((pct_pdf14->params.changed & pct0->params.changed) ==
pct_pdf14->params.changed) {
return COMP_REPLACE_CURR;
} else {
return COMP_ENQUEUE;
}
}
} else
return COMP_ENQUEUE;
pct = pct->prev;
if (pct == NULL)
return COMP_ENQUEUE; /* Not in queue. */
}
}
| 0
|
357,853
|
static int empty_dir(struct inode *inode)
{
unsigned int offset;
struct buffer_head *bh;
struct ext4_dir_entry_2 *de, *de1;
struct super_block *sb;
int err = 0;
sb = inode->i_sb;
if (inode->i_size < EXT4_DIR_REC_LEN(1) + EXT4_DIR_REC_LEN(2) ||
!(bh = ext4_bread(NULL, inode, 0, 0, &err))) {
if (err)
ext4_error(inode->i_sb, __func__,
"error %d reading directory #%lu offset 0",
err, inode->i_ino);
else
ext4_warning(inode->i_sb, __func__,
"bad directory (dir #%lu) - no data block",
inode->i_ino);
return 1;
}
de = (struct ext4_dir_entry_2 *) bh->b_data;
de1 = ext4_next_entry(de);
if (le32_to_cpu(de->inode) != inode->i_ino ||
!le32_to_cpu(de1->inode) ||
strcmp(".", de->name) ||
strcmp("..", de1->name)) {
ext4_warning(inode->i_sb, "empty_dir",
"bad directory (dir #%lu) - no `.' or `..'",
inode->i_ino);
brelse(bh);
return 1;
}
offset = ext4_rec_len_from_disk(de->rec_len) +
ext4_rec_len_from_disk(de1->rec_len);
de = ext4_next_entry(de1);
while (offset < inode->i_size) {
if (!bh ||
(void *) de >= (void *) (bh->b_data+sb->s_blocksize)) {
err = 0;
brelse(bh);
bh = ext4_bread(NULL, inode,
offset >> EXT4_BLOCK_SIZE_BITS(sb), 0, &err);
if (!bh) {
if (err)
ext4_error(sb, __func__,
"error %d reading directory"
" #%lu offset %u",
err, inode->i_ino, offset);
offset += sb->s_blocksize;
continue;
}
de = (struct ext4_dir_entry_2 *) bh->b_data;
}
if (!ext4_check_dir_entry("empty_dir", inode, de, bh, offset)) {
de = (struct ext4_dir_entry_2 *)(bh->b_data +
sb->s_blocksize);
offset = (offset | (sb->s_blocksize - 1)) + 1;
continue;
}
if (le32_to_cpu(de->inode)) {
brelse(bh);
return 0;
}
offset += ext4_rec_len_from_disk(de->rec_len);
de = ext4_next_entry(de);
}
brelse(bh);
return 1;
}
| 0
|
63,770
|
S_ssc_anything(pTHX_ regnode_ssc *ssc)
{
/* Set the SSC 'ssc' to match an empty string or any code point */
PERL_ARGS_ASSERT_SSC_ANYTHING;
assert(is_ANYOF_SYNTHETIC(ssc));
/* mortalize so won't leak */
ssc->invlist = sv_2mortal(_add_range_to_invlist(NULL, 0, UV_MAX));
ANYOF_FLAGS(ssc) |= SSC_MATCHES_EMPTY_STRING; /* Plus matches empty */
}
| 0
|
93,912
|
briopt_check(win_T *wp)
{
char_u *p;
int bri_shift = 0;
long bri_min = 20;
int bri_sbr = FALSE;
p = wp->w_p_briopt;
while (*p != NUL)
{
if (STRNCMP(p, "shift:", 6) == 0
&& ((p[6] == '-' && VIM_ISDIGIT(p[7])) || VIM_ISDIGIT(p[6])))
{
p += 6;
bri_shift = getdigits(&p);
}
else if (STRNCMP(p, "min:", 4) == 0 && VIM_ISDIGIT(p[4]))
{
p += 4;
bri_min = getdigits(&p);
}
else if (STRNCMP(p, "sbr", 3) == 0)
{
p += 3;
bri_sbr = TRUE;
}
if (*p != ',' && *p != NUL)
return FAIL;
if (*p == ',')
++p;
}
wp->w_p_brishift = bri_shift;
wp->w_p_brimin = bri_min;
wp->w_p_brisbr = bri_sbr;
return OK;
}
| 0
|
102,195
|
static int bin_pe_init_exports(struct PE_(r_bin_pe_obj_t)* bin) {
PE_(image_data_directory) * data_dir_export = &bin->data_directory[PE_IMAGE_DIRECTORY_ENTRY_EXPORT];
PE_DWord export_dir_paddr = bin_pe_rva_to_paddr (bin, data_dir_export->VirtualAddress);
if (!export_dir_paddr) {
// This export-dir-paddr should only appear in DLL files
// bprintf ("Warning: Cannot find the paddr of the export directory\n");
return false;
}
// sdb_setn (DB, "hdr.exports_directory", export_dir_paddr);
// bprintf ("Pexports paddr at 0x%"PFMT64x"\n", export_dir_paddr);
if (!(bin->export_directory = malloc (sizeof(PE_(image_export_directory))))) {
r_sys_perror ("malloc (export directory)");
return false;
}
if (r_buf_read_at (bin->b, export_dir_paddr, (ut8*) bin->export_directory, sizeof (PE_(image_export_directory))) == -1) {
bprintf ("Warning: read (export directory)\n");
free (bin->export_directory);
bin->export_directory = NULL;
return false;
}
return true;
}
| 0
|
370,607
|
htmlParseAttribute(htmlParserCtxtPtr ctxt, xmlChar **value) {
const xmlChar *name;
xmlChar *val = NULL;
*value = NULL;
name = htmlParseHTMLName(ctxt);
if (name == NULL) {
htmlParseErr(ctxt, XML_ERR_NAME_REQUIRED,
"error parsing attribute name\n", NULL, NULL);
return(NULL);
}
/*
* read the value
*/
SKIP_BLANKS;
if (CUR == '=') {
NEXT;
SKIP_BLANKS;
val = htmlParseAttValue(ctxt);
}
*value = val;
return(name);
}
| 0
|
96,798
|
R_API char *cmd_syscall_dostr(RCore *core, int n) {
char *res = NULL;
int i;
char str[64];
if (n == -1) {
n = (int)r_debug_reg_get (core->dbg, "oeax");
if (!n || n == -1) {
const char *a0 = r_reg_get_name (core->anal->reg, R_REG_NAME_SN);
n = (int)r_debug_reg_get (core->dbg, a0);
}
}
RSyscallItem *item = r_syscall_get (core->anal->syscall, n, -1);
if (!item) {
res = r_str_appendf (res, "%d = unknown ()", n);
return res;
}
res = r_str_appendf (res, "%d = %s (", item->num, item->name);
// TODO: move this to r_syscall
//TODO replace the hardcoded CC with the sdb ones
for (i = 0; i < item->args; i++) {
// XXX this is a hack to make syscall args work on x86-32 and x86-64
// we need to shift sn first.. which is bad, but needs to be redesigned
int regidx = i;
if (core->assembler->bits == 32) {
regidx++;
}
ut64 arg = r_debug_arg_get (core->dbg, R_ANAL_CC_TYPE_FASTCALL, regidx);
//r_cons_printf ("(%d:0x%"PFMT64x")\n", i, arg);
if (item->sargs) {
switch (item->sargs[i]) {
case 'p': // pointer
res = r_str_appendf (res, "0x%08" PFMT64x "", arg);
break;
case 'i':
res = r_str_appendf (res, "%" PFMT64d "", arg);
break;
case 'z':
memset (str, 0, sizeof (str));
r_io_read_at (core->io, arg, (ut8 *)str, sizeof (str) - 1);
r_str_filter (str, strlen (str));
res = r_str_appendf (res, "\"%s\"", str);
break;
case 'Z': {
//TODO replace the hardcoded CC with the sdb ones
ut64 len = r_debug_arg_get (core->dbg, R_ANAL_CC_TYPE_FASTCALL, i + 2);
len = R_MIN (len + 1, sizeof (str) - 1);
if (len == 0) {
len = 16; // override default
}
(void)r_io_read_at (core->io, arg, (ut8 *)str, len);
str[len] = 0;
r_str_filter (str, -1);
res = r_str_appendf (res, "\"%s\"", str);
} break;
default:
res = r_str_appendf (res, "0x%08" PFMT64x "", arg);
break;
}
} else {
res = r_str_appendf (res, "0x%08" PFMT64x "", arg);
}
if (i + 1 < item->args) {
res = r_str_appendf (res, ", ");
}
}
r_syscall_item_free (item);
res = r_str_appendf (res, ")");
return res;
}
| 0
|
293,707
|
void dm_set_mdptr(struct mapped_device *md, void *ptr)
{
md->interface_ptr = ptr;
}
| 0
|
93,157
|
int sr_select_speed(struct cdrom_device_info *cdi, int speed)
{
Scsi_CD *cd = cdi->handle;
struct packet_command cgc;
if (speed == 0)
speed = 0xffff; /* set to max */
else
speed *= 177; /* Nx to kbyte/s */
memset(&cgc, 0, sizeof(struct packet_command));
cgc.cmd[0] = GPCMD_SET_SPEED; /* SET CD SPEED */
cgc.cmd[2] = (speed >> 8) & 0xff; /* MSB for speed (in kbytes/sec) */
cgc.cmd[3] = speed & 0xff; /* LSB */
cgc.data_direction = DMA_NONE;
cgc.timeout = IOCTL_TIMEOUT;
if (sr_do_ioctl(cd, &cgc))
return -EIO;
return 0;
}
| 0
|
116,327
|
void DeepScanLineInputFile::multiPartInitialize(InputPartData* part)
{
_data->_streamData = part->mutex;
_data->memoryMapped = _data->_streamData->is->isMemoryMapped();
_data->version = part->version;
initialize(part->header);
_data->lineOffsets = part->chunkOffsets;
_data->partNumber = part->partNumber;
}
| 0
|
486,590
|
void qemu_input_event_send_impl(QemuConsole *src, InputEvent *evt)
{
QemuInputHandlerState *s;
qemu_input_event_trace(src, evt);
/* pre processing */
if (graphic_rotate && (evt->type == INPUT_EVENT_KIND_ABS)) {
qemu_input_transform_abs_rotate(evt);
}
/* send event */
s = qemu_input_find_handler(1 << evt->type, src);
if (!s) {
return;
}
s->handler->event(s->dev, src, evt);
s->events++;
}
| 0
|
223,337
|
handle_nxt_resume(struct ofconn *ofconn, const struct ofp_header *oh)
{
struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
struct ofputil_packet_in_private pin;
enum ofperr error;
error = ofputil_decode_packet_in_private(oh, false,
ofproto_get_tun_tab(ofproto),
&ofproto->vl_mff_map, &pin, NULL,
NULL);
if (error) {
return error;
}
error = (ofproto->ofproto_class->nxt_resume
? ofproto->ofproto_class->nxt_resume(ofproto, &pin)
: OFPERR_NXR_NOT_SUPPORTED);
ofputil_packet_in_private_destroy(&pin);
return error;
}
| 0
|
361,399
|
static int generate_key(schema_id schema,
const char *password,
struct pbkdf2_params *kdf_params,
struct pbe_enc_params *enc_params,
gnutls_datum_t * key)
{
opaque rnd[2];
int ret;
/* We should use the flags here to use different
* encryption algorithms etc.
*/
if (schema == PKCS12_ARCFOUR_SHA1)
enc_params->cipher = GNUTLS_CIPHER_ARCFOUR_128;
else if (schema == PKCS12_3DES_SHA1)
enc_params->cipher = GNUTLS_CIPHER_3DES_CBC;
else if (schema == PKCS12_RC2_40_SHA1)
enc_params->cipher = GNUTLS_CIPHER_RC2_40_CBC;
_gnutls_get_random(rnd, 2, GNUTLS_STRONG_RANDOM);
/* generate salt */
if (schema == PBES2)
kdf_params->salt_size =
MIN(sizeof(kdf_params->salt), (uint) (10 + (rnd[1] % 10)));
else
kdf_params->salt_size = 8;
_gnutls_get_random(kdf_params->salt, kdf_params->salt_size,
GNUTLS_STRONG_RANDOM);
kdf_params->iter_count = 256 + rnd[0];
key->size = kdf_params->key_size =
gnutls_cipher_get_key_size(enc_params->cipher);
enc_params->iv_size = _gnutls_cipher_get_iv_size(enc_params->cipher);
key->data = gnutls_secure_malloc(key->size);
if (key->data == NULL) {
gnutls_assert();
return GNUTLS_E_MEMORY_ERROR;
}
/* now generate the key.
*/
if (schema == PBES2) {
ret = gc_pkcs5_pbkdf2_sha1(password, strlen(password),
kdf_params->salt, kdf_params->salt_size,
kdf_params->iter_count,
kdf_params->key_size, key->data);
if (ret != GC_OK) {
gnutls_assert();
return GNUTLS_E_ENCRYPTION_FAILED;
}
if (enc_params->iv_size)
_gnutls_get_random(enc_params->iv, enc_params->iv_size,
GNUTLS_WEAK_RANDOM);
} else { /* PKCS12 schemas */
ret =
_pkcs12_string_to_key(1 /*KEY*/, kdf_params->salt,
kdf_params->salt_size,
kdf_params->iter_count, password,
kdf_params->key_size, key->data);
if (ret < 0) {
gnutls_assert();
return ret;
}
/* Now generate the IV
*/
if (enc_params->iv_size) {
ret =
_pkcs12_string_to_key(2 /*IV*/, kdf_params->salt,
kdf_params->salt_size,
kdf_params->iter_count, password,
enc_params->iv_size, enc_params->iv);
if (ret < 0) {
gnutls_assert();
return ret;
}
}
}
return 0;
}
| 0
|
87,299
|
template<typename t, typename tc>
CImg<T>& draw_axis(const CImg<t>& values_x, const int y,
const tc *const color, const float opacity=1,
const unsigned int pattern=~0U, const unsigned int font_height=13,
const bool allow_zero=true, const float round_x=0) {
if (is_empty()) return *this;
const int yt = (y + 3 + font_height)<_height?y + 3:y - 2 - (int)font_height;
const int siz = (int)values_x.size() - 1;
CImg<charT> txt(32);
CImg<T> a_label;
if (siz<=0) { // Degenerated case
draw_line(0,y,_width - 1,y,color,opacity,pattern);
if (!siz) {
cimg_snprintf(txt,txt._width,"%g",round_x?cimg::round((double)*values_x,round_x):(double)*values_x);
a_label.assign().draw_text(0,0,txt,color,(tc*)0,opacity,font_height);
const int
_xt = (width() - a_label.width())/2,
xt = _xt<3?3:_xt + a_label.width()>=width() - 2?width() - 3 - a_label.width():_xt;
draw_point(width()/2,y - 1,color,opacity).draw_point(width()/2,y + 1,color,opacity);
if (allow_zero || *txt!='0' || txt[1]!=0)
draw_text(xt,yt,txt,color,(tc*)0,opacity,font_height);
}
} else { // Regular case
if (values_x[0]<values_x[siz]) draw_arrow(0,y,_width - 1,y,color,opacity,30,5,pattern);
else draw_arrow(_width - 1,y,0,y,color,opacity,30,5,pattern);
cimg_foroff(values_x,x) {
cimg_snprintf(txt,txt._width,"%g",round_x?cimg::round((double)values_x(x),round_x):(double)values_x(x));
a_label.assign().draw_text(0,0,txt,color,(tc*)0,opacity,font_height);
const int
xi = (int)(x*(_width - 1)/siz),
_xt = xi - a_label.width()/2,
xt = _xt<3?3:_xt + a_label.width()>=width() - 2?width() - 3 - a_label.width():_xt;
draw_point(xi,y - 1,color,opacity).draw_point(xi,y + 1,color,opacity);
if (allow_zero || *txt!='0' || txt[1]!=0)
draw_text(xt,yt,txt,color,(tc*)0,opacity,font_height);
}
}
return *this;
| 0
|
84,956
|
bool imap_code(const char *s)
{
return (cmd_status(s) == IMAP_CMD_OK);
}
| 0
|
318,012
|
static void incomplete_class_unset_property(zval *object, zval *member, const zend_literal *key TSRMLS_DC) /* {{{ */
{
incomplete_class_message(object, E_NOTICE TSRMLS_CC);
}
/* }}} */
| 0
|
517,062
|
int st_select_lex_unit::save_union_explain_part2(Explain_query *output)
{
Explain_union *eu= output->get_union(first_select()->select_number);
if (fake_select_lex)
{
for (SELECT_LEX_UNIT *unit= fake_select_lex->first_inner_unit();
unit; unit= unit->next_unit())
{
if (!(unit->item && unit->item->eliminated))
{
eu->add_child(unit->first_select()->select_number);
}
}
fake_select_lex->join->explain= &eu->fake_select_lex_explain;
}
return 0;
}
| 0
|
435,539
|
static int ql_create_send_free_list(struct ql3_adapter *qdev)
{
struct ql_tx_buf_cb *tx_cb;
int i;
struct ob_mac_iocb_req *req_q_curr = qdev->req_q_virt_addr;
/* Create free list of transmit buffers */
for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
tx_cb = &qdev->tx_buf[i];
tx_cb->skb = NULL;
tx_cb->queue_entry = req_q_curr;
req_q_curr++;
tx_cb->oal = kmalloc(512, GFP_KERNEL);
if (tx_cb->oal == NULL)
return -ENOMEM;
}
return 0;
| 0
|
419,681
|
static void server_cache_boot_id(Server *s) {
sd_id128_t id;
int r;
assert(s);
r = sd_id128_get_boot(&id);
if (r < 0)
return;
sd_id128_to_string(id, stpcpy(s->boot_id_field, "_BOOT_ID="));
}
| 0
|
446,111
|
virDomainChrGetDomainPtrs(const virDomainDef *vmdef,
virDomainChrDeviceType type,
const virDomainChrDef ***arrPtr,
size_t *cntPtr)
{
virDomainChrDef ***arrVar = NULL;
size_t *cntVar = NULL;
/* Cast away const; we add it back in the final assignment. */
if (virDomainChrGetDomainPtrsInternal((virDomainDefPtr) vmdef, type,
&arrVar, &cntVar) < 0) {
*arrPtr = NULL;
*cntPtr = 0;
} else {
*arrPtr = (const virDomainChrDef **) *arrVar;
*cntPtr = *cntVar;
}
}
| 0
|
3,535
|
process(register int code, unsigned char** fill)
{
int incode;
static unsigned char firstchar;
if (code == clear) {
codesize = datasize + 1;
codemask = (1 << codesize) - 1;
avail = clear + 2;
oldcode = -1;
return 1;
}
if (oldcode == -1) {
*(*fill)++ = suffix[code];
firstchar = oldcode = code;
return 1;
}
if (code > avail) {
fprintf(stderr, "code %d too large for %d\n", code, avail);
return 0;
}
incode = code;
if (code == avail) { /* the first code is always < avail */
*stackp++ = firstchar;
code = oldcode;
}
while (code > clear) {
*stackp++ = suffix[code];
code = prefix[code];
}
*stackp++ = firstchar = suffix[code];
prefix[avail] = oldcode;
suffix[avail] = firstchar;
avail++;
if (((avail & codemask) == 0) && (avail < 4096)) {
codesize++;
codemask += avail;
}
oldcode = incode;
do {
*(*fill)++ = *--stackp;
} while (stackp > stack);
return 1;
}
| 1
|
341,561
|
static inline int signed_shift(int i, int shift) {
if (shift > 0)
return i << shift;
return i >> -shift;
}
| 1
|
182,238
|
PHP_FUNCTION(snmpset)
{
php_snmp(INTERNAL_FUNCTION_PARAM_PASSTHRU, SNMP_CMD_SET, SNMP_VERSION_1);
}
| 0
|
57,164
|
void CSteamNetworkConnectionBase::SNP_GatherAckBlocks( SNPAckSerializerHelper &helper, SteamNetworkingMicroseconds usecNow )
{
helper.m_nBlocks = 0;
helper.m_nBlocksNeedToAck = 0;
// Fast case for no packet loss we need to ack, which will (hopefully!) be a common case
int n = len( m_receiverState.m_mapPacketGaps ) - 1;
if ( n <= 0 )
return;
// Let's not just flush the acks that are due right now. Let's flush all of them
// that will be due any time before we have the bandwidth to send the next packet.
// (Assuming that we send the max packet size here.)
SteamNetworkingMicroseconds usecSendAcksDueBefore = usecNow;
SteamNetworkingMicroseconds usecTimeUntilNextPacket = SteamNetworkingMicroseconds( ( m_senderState.m_flTokenBucket - (float)m_cbMTUPacketSize ) / (float)m_senderState.m_n_x * -1e6 );
if ( usecTimeUntilNextPacket > 0 )
usecSendAcksDueBefore += usecTimeUntilNextPacket;
m_receiverState.DebugCheckPackGapMap();
n = std::min( (int)helper.k_nMaxBlocks, n );
auto itNext = m_receiverState.m_mapPacketGaps.begin();
int cbEncodedSize = helper.k_cbHeaderSize;
while ( n > 0 )
{
--n;
auto itCur = itNext;
++itNext;
Assert( itCur->first < itCur->second.m_nEnd );
// Do we need to report on this block now?
bool bNeedToReport = ( itNext->second.m_usecWhenAckPrior <= usecSendAcksDueBefore );
// Should we wait to NACK this?
if ( itCur == m_receiverState.m_itPendingNack )
{
// Wait to NACK this?
if ( !bNeedToReport )
{
if ( usecNow < itCur->second.m_usecWhenOKToNack )
break;
bNeedToReport = true;
}
// Go ahead and NACK it. If the packet arrives, we will use it.
// But our NACK may cause the sender to retransmit.
++m_receiverState.m_itPendingNack;
}
SNPAckSerializerHelper::Block &block = helper.m_arBlocks[ helper.m_nBlocks ];
block.m_nNack = uint32( itCur->second.m_nEnd - itCur->first );
int64 nAckEnd;
SteamNetworkingMicroseconds usecWhenSentLast;
if ( n == 0 )
{
// itNext should be the sentinel
Assert( itNext->first == INT64_MAX );
nAckEnd = m_statsEndToEnd.m_nMaxRecvPktNum+1;
usecWhenSentLast = m_statsEndToEnd.m_usecTimeLastRecvSeq;
}
else
{
nAckEnd = itNext->first;
usecWhenSentLast = itNext->second.m_usecWhenReceivedPktBefore;
}
Assert( itCur->second.m_nEnd < nAckEnd );
block.m_nAck = uint32( nAckEnd - itCur->second.m_nEnd );
block.m_nLatestPktNum = uint32( nAckEnd-1 );
block.m_nEncodedTimeSinceLatestPktNum = SNPAckSerializerHelper::EncodeTimeSince( usecNow, usecWhenSentLast );
// When we encode 7+ blocks, the header grows by one byte
// to store an explicit count
if ( helper.m_nBlocks == 6 )
++cbEncodedSize;
// This block
++cbEncodedSize;
if ( block.m_nAck > 7 )
cbEncodedSize += VarIntSerializedSize( block.m_nAck>>3 );
if ( block.m_nNack > 7 )
cbEncodedSize += VarIntSerializedSize( block.m_nNack>>3 );
block.m_cbTotalEncodedSize = cbEncodedSize;
// FIXME Here if the caller knows they are working with limited space,
// they could tell us how much space they have and we could bail
// if we already know we're over
++helper.m_nBlocks;
// Do we really need to try to flush the ack/nack for that block out now?
if ( bNeedToReport )
helper.m_nBlocksNeedToAck = helper.m_nBlocks;
}
}
| 0
|
437,246
|
static int ttusb_dec_set_interface(struct ttusb_dec *dec,
enum ttusb_dec_interface interface)
{
int result = 0;
u8 b[] = { 0x05 };
if (interface != dec->interface) {
switch (interface) {
case TTUSB_DEC_INTERFACE_INITIAL:
result = usb_set_interface(dec->udev, 0, 0);
break;
case TTUSB_DEC_INTERFACE_IN:
result = ttusb_dec_send_command(dec, 0x80, sizeof(b),
b, NULL, NULL);
if (result)
return result;
result = usb_set_interface(dec->udev, 0, 8);
break;
case TTUSB_DEC_INTERFACE_OUT:
result = usb_set_interface(dec->udev, 0, 1);
break;
}
if (result)
return result;
dec->interface = interface;
}
return 0;
}
| 0
|
28,447
|
int TSHttpTxnBackgroundFillStarted ( TSHttpTxn txnp ) {
sdk_assert ( sdk_sanity_check_txn ( txnp ) == TS_SUCCESS ) ;
HttpSM * s = ( HttpSM * ) txnp ;
return ( s -> background_fill == BACKGROUND_FILL_STARTED ) ;
}
| 0
|
65,098
|
word32 SetBitString(word32 len, byte unusedBits, byte* output)
{
word32 idx = 0;
if (output)
output[idx] = ASN_BIT_STRING;
idx++;
idx += SetLength(len + 1, output ? output + idx : NULL);
if (output)
output[idx] = unusedBits;
idx++;
return idx;
}
| 0
|
492,182
|
deleteDependencyRecordsFor(Oid classId, Oid objectId,
bool skipExtensionDeps)
{
long count = 0;
Relation depRel;
ScanKeyData key[2];
SysScanDesc scan;
HeapTuple tup;
depRel = table_open(DependRelationId, RowExclusiveLock);
ScanKeyInit(&key[0],
Anum_pg_depend_classid,
BTEqualStrategyNumber, F_OIDEQ,
ObjectIdGetDatum(classId));
ScanKeyInit(&key[1],
Anum_pg_depend_objid,
BTEqualStrategyNumber, F_OIDEQ,
ObjectIdGetDatum(objectId));
scan = systable_beginscan(depRel, DependDependerIndexId, true,
NULL, 2, key);
while (HeapTupleIsValid(tup = systable_getnext(scan)))
{
if (skipExtensionDeps &&
((Form_pg_depend) GETSTRUCT(tup))->deptype == DEPENDENCY_EXTENSION)
continue;
CatalogTupleDelete(depRel, &tup->t_self);
count++;
}
systable_endscan(scan);
table_close(depRel, RowExclusiveLock);
return count;
}
| 0
|
332,357
|
static void adb_mouse_realizefn(DeviceState *dev, Error **errp)
{
MouseState *s = ADB_MOUSE(dev);
ADBMouseClass *amc = ADB_MOUSE_GET_CLASS(dev);
amc->parent_realize(dev, errp);
qemu_add_mouse_event_handler(adb_mouse_event, s, 0, "QEMU ADB Mouse");
}
| 1
|
170,093
|
WebMediaPlayer::NetworkState WebMediaPlayerImpl::GetNetworkState() const {
DCHECK(main_task_runner_->BelongsToCurrentThread());
return network_state_;
}
| 0
|
386,574
|
/* {{{ Timezone Cache functions */
static timelib_tzinfo *php_date_parse_tzfile(char *formal_tzname, const timelib_tzdb *tzdb TSRMLS_DC)
{
timelib_tzinfo *tzi, **ptzi;
if(!DATEG(tzcache)) {
ALLOC_HASHTABLE(DATEG(tzcache));
zend_hash_init(DATEG(tzcache), 4, NULL, _php_date_tzinfo_dtor, 0);
}
if (zend_hash_find(DATEG(tzcache), formal_tzname, strlen(formal_tzname) + 1, (void **) &ptzi) == SUCCESS) {
return *ptzi;
}
tzi = timelib_parse_tzfile(formal_tzname, tzdb);
if (tzi) {
zend_hash_add(DATEG(tzcache), formal_tzname, strlen(formal_tzname) + 1, (void *) &tzi, sizeof(timelib_tzinfo*), NULL);
}
| 0
|
450,514
|
static struct rpc_task *nfs4_do_unlck(struct file_lock *fl,
struct nfs_open_context *ctx,
struct nfs4_lock_state *lsp,
struct nfs_seqid *seqid)
{
struct nfs4_unlockdata *data;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKU],
.rpc_cred = ctx->cred,
};
struct rpc_task_setup task_setup_data = {
.rpc_client = NFS_CLIENT(lsp->ls_state->inode),
.rpc_message = &msg,
.callback_ops = &nfs4_locku_ops,
.workqueue = nfsiod_workqueue,
.flags = RPC_TASK_ASYNC,
};
nfs4_state_protect(NFS_SERVER(lsp->ls_state->inode)->nfs_client,
NFS_SP4_MACH_CRED_CLEANUP, &task_setup_data.rpc_client, &msg);
/* Ensure this is an unlock - when canceling a lock, the
* canceled lock is passed in, and it won't be an unlock.
*/
fl->fl_type = F_UNLCK;
if (fl->fl_flags & FL_CLOSE)
set_bit(NFS_CONTEXT_UNLOCK, &ctx->flags);
data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid);
if (data == NULL) {
nfs_free_seqid(seqid);
return ERR_PTR(-ENOMEM);
}
nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1, 0);
msg.rpc_argp = &data->arg;
msg.rpc_resp = &data->res;
task_setup_data.callback_data = data;
return rpc_run_task(&task_setup_data);
}
| 0
|
85,642
|
static void sync_entity_load_avg(struct sched_entity *se)
{
struct cfs_rq *cfs_rq = cfs_rq_of(se);
u64 last_update_time;
last_update_time = cfs_rq_last_update_time(cfs_rq);
__update_load_avg_blocked_se(last_update_time, se);
}
| 0
|
307,491
|
void UsbChooserContext::OnDeviceRemoved(
device::mojom::UsbDeviceInfoPtr device_info) {
DCHECK(device_info);
for (auto& observer : observer_list_)
observer.OnDeviceRemoved(*device_info);
for (auto& map_entry : ephemeral_devices_)
map_entry.second.erase(device_info->guid);
ephemeral_dicts_.erase(device_info->guid);
}
| 0
|
509,890
|
TIFFReadRGBATileExt(TIFF* tif, uint32 col, uint32 row, uint32 * raster, int stop_on_error )
{
char emsg[1024] = "";
TIFFRGBAImage img;
int ok;
uint32 tile_xsize, tile_ysize;
uint32 read_xsize, read_ysize;
uint32 i_row;
/*
* Verify that our request is legal - on a tile file, and on a
* tile boundary.
*/
if( !TIFFIsTiled( tif ) )
{
TIFFErrorExt(tif->tif_clientdata, TIFFFileName(tif),
"Can't use TIFFReadRGBATile() with stripped file.");
return (0);
}
TIFFGetFieldDefaulted(tif, TIFFTAG_TILEWIDTH, &tile_xsize);
TIFFGetFieldDefaulted(tif, TIFFTAG_TILELENGTH, &tile_ysize);
if( (col % tile_xsize) != 0 || (row % tile_ysize) != 0 )
{
TIFFErrorExt(tif->tif_clientdata, TIFFFileName(tif),
"Row/col passed to TIFFReadRGBATile() must be top"
"left corner of a tile.");
return (0);
}
/*
* Setup the RGBA reader.
*/
if (!TIFFRGBAImageOK(tif, emsg)
|| !TIFFRGBAImageBegin(&img, tif, stop_on_error, emsg)) {
TIFFErrorExt(tif->tif_clientdata, TIFFFileName(tif), "%s", emsg);
return( 0 );
}
/*
* The TIFFRGBAImageGet() function doesn't allow us to get off the
* edge of the image, even to fill an otherwise valid tile. So we
* figure out how much we can read, and fix up the tile buffer to
* a full tile configuration afterwards.
*/
if( row + tile_ysize > img.height )
read_ysize = img.height - row;
else
read_ysize = tile_ysize;
if( col + tile_xsize > img.width )
read_xsize = img.width - col;
else
read_xsize = tile_xsize;
/*
* Read the chunk of imagery.
*/
img.row_offset = row;
img.col_offset = col;
ok = TIFFRGBAImageGet(&img, raster, read_xsize, read_ysize );
TIFFRGBAImageEnd(&img);
/*
* If our read was incomplete we will need to fix up the tile by
* shifting the data around as if a full tile of data is being returned.
*
* This is all the more complicated because the image is organized in
* bottom to top format.
*/
if( read_xsize == tile_xsize && read_ysize == tile_ysize )
return( ok );
for( i_row = 0; i_row < read_ysize; i_row++ ) {
memmove( raster + (tile_ysize - i_row - 1) * tile_xsize,
raster + (read_ysize - i_row - 1) * read_xsize,
read_xsize * sizeof(uint32) );
_TIFFmemset( raster + (tile_ysize - i_row - 1) * tile_xsize+read_xsize,
0, sizeof(uint32) * (tile_xsize - read_xsize) );
}
for( i_row = read_ysize; i_row < tile_ysize; i_row++ ) {
_TIFFmemset( raster + (tile_ysize - i_row - 1) * tile_xsize,
0, sizeof(uint32) * tile_xsize );
}
return (ok);
}
| 0
|
461,045
|
static void gic_dist_writew(void *opaque, hwaddr offset,
uint32_t value, MemTxAttrs attrs)
{
gic_dist_writeb(opaque, offset, value & 0xff, attrs);
gic_dist_writeb(opaque, offset + 1, value >> 8, attrs);
}
| 0
|
418,313
|
static void dump_status(struct req_state *s, int status,
const char *status_name)
{
s->formatter->set_status(status, status_name);
try {
RESTFUL_IO(s)->send_status(status, status_name);
} catch (rgw::io::Exception& e) {
ldout(s->cct, 0) << "ERROR: s->cio->send_status() returned err="
<< e.what() << dendl;
}
}
| 0
|
198,146
|
status_t NuMediaExtractor::getSampleMeta(sp<MetaData> *sampleMeta) {
Mutex::Autolock autoLock(mLock);
*sampleMeta = NULL;
ssize_t minIndex = fetchTrackSamples();
if (minIndex < 0) {
return ERROR_END_OF_STREAM;
}
TrackInfo *info = &mSelectedTracks.editItemAt(minIndex);
*sampleMeta = info->mSample->meta_data();
return OK;
}
| 0
|
512,711
|
void PSOutputDev::writeDocSetup(PDFDoc *doc, Catalog *catalog,
int firstPage, int lastPage,
GBool duplexA) {
Page *page;
Dict *resDict;
Annots *annots;
Object obj1, obj2;
int pg, i;
if (mode == psModeForm) {
// swap the form and xpdf dicts
writePS("xpdf end begin dup begin\n");
} else {
writePS("xpdf begin\n");
}
for (pg = firstPage; pg <= lastPage; ++pg) {
page = doc->getPage(pg);
if (!page) {
error(-1, "Failed writing resources for page %d", pg);
continue;
}
if ((resDict = page->getResourceDict())) {
setupResources(resDict);
}
annots = new Annots(xref, catalog, page->getAnnots(&obj1));
obj1.free();
for (i = 0; i < annots->getNumAnnots(); ++i) {
if (annots->getAnnot(i)->getAppearance(&obj1)->isStream()) {
obj1.streamGetDict()->lookup("Resources", &obj2);
if (obj2.isDict()) {
setupResources(obj2.getDict());
}
obj2.free();
}
obj1.free();
}
delete annots;
}
if (mode != psModeForm) {
if (mode != psModeEPS && !manualCtrl) {
writePSFmt("{0:d} {1:d} {2:s} pdfSetup\n",
paperWidth, paperHeight, duplexA ? "true" : "false");
}
#if OPI_SUPPORT
if (globalParams->getPSOPI()) {
writePS("/opiMatrix matrix currentmatrix def\n");
}
#endif
}
}
| 0
|
272,207
|
static PHP_MSHUTDOWN_FUNCTION(session) /* {{{ */
{
UNREGISTER_INI_ENTRIES();
#ifdef HAVE_LIBMM
PHP_MSHUTDOWN(ps_mm) (SHUTDOWN_FUNC_ARGS_PASSTHRU);
#endif
/* reset rfc1867 callbacks */
php_session_rfc1867_orig_callback = NULL;
if (php_rfc1867_callback == php_session_rfc1867_callback) {
php_rfc1867_callback = NULL;
}
ps_serializers[PREDEFINED_SERIALIZERS].name = NULL;
memset(&ps_modules[PREDEFINED_MODULES], 0, (MAX_MODULES-PREDEFINED_MODULES)*sizeof(ps_module *));
return SUCCESS;
}
| 0
|
423,695
|
dns__zone_updatesigs(dns_diff_t *diff, dns_db_t *db, dns_dbversion_t *version,
dst_key_t *zone_keys[], unsigned int nkeys,
dns_zone_t *zone, isc_stdtime_t inception,
isc_stdtime_t expire, isc_stdtime_t keyexpire,
isc_stdtime_t now, bool check_ksk,
bool keyset_kskonly, dns__zonediff_t *zonediff)
{
dns_difftuple_t *tuple;
isc_result_t result;
while ((tuple = ISC_LIST_HEAD(diff->tuples)) != NULL) {
isc_stdtime_t exp = expire;
if (keyexpire != 0 &&
(tuple->rdata.type == dns_rdatatype_dnskey ||
tuple->rdata.type == dns_rdatatype_cdnskey ||
tuple->rdata.type == dns_rdatatype_cds))
{
exp = keyexpire;
}
result = del_sigs(zone, db, version, &tuple->name,
tuple->rdata.type, zonediff,
zone_keys, nkeys, now, false);
if (result != ISC_R_SUCCESS) {
dns_zone_log(zone, ISC_LOG_ERROR,
"dns__zone_updatesigs:del_sigs -> %s",
dns_result_totext(result));
return (result);
}
result = add_sigs(db, version, &tuple->name,
tuple->rdata.type, zonediff->diff,
zone_keys, nkeys, zone->mctx, inception,
exp, check_ksk, keyset_kskonly);
if (result != ISC_R_SUCCESS) {
dns_zone_log(zone, ISC_LOG_ERROR,
"dns__zone_updatesigs:add_sigs -> %s",
dns_result_totext(result));
return (result);
}
/*
* Signature changes for all RRs with name tuple->name and type
* tuple->rdata.type were appended to zonediff->diff. Now we
* remove all the "raw" changes with the same name and type
* from diff (so that they are not processed by this loop
* again) and append them to zonediff so that they get applied.
*/
move_matching_tuples(tuple, diff, zonediff->diff);
}
return (ISC_R_SUCCESS);
}
| 0
|
463,551
|
static void io_wq_submit_work(struct io_wq_work *work)
{
struct io_kiocb *req = container_of(work, struct io_kiocb, work);
struct io_kiocb *timeout;
int ret = 0;
timeout = io_prep_linked_timeout(req);
if (timeout)
io_queue_linked_timeout(timeout);
if (work->flags & IO_WQ_WORK_CANCEL)
ret = -ECANCELED;
if (!ret) {
do {
ret = io_issue_sqe(req, 0);
/*
* We can get EAGAIN for polled IO even though we're
* forcing a sync submission from here, since we can't
* wait for request slots on the block side.
*/
if (ret != -EAGAIN)
break;
cond_resched();
} while (1);
}
/* avoid locking problems by failing it from a clean context */
if (ret) {
/* io-wq is going to take one down */
refcount_inc(&req->refs);
io_req_task_queue_fail(req, ret);
}
| 0
|
10,619
|
hash_foreach_mangle_dict_of_strings (gpointer key, gpointer val, gpointer user_data)
{
GHashTable *out = (GHashTable*) user_data;
GHashTable *in_dict = (GHashTable *) val;
HashAndString *data = g_new0 (HashAndString, 1);
data->string = (gchar*) key;
data->hash = g_hash_table_new_full (g_str_hash, g_str_equal,
g_free, g_free);
g_hash_table_foreach (in_dict, hash_foreach_prepend_string, data);
g_hash_table_insert(out, g_strdup ((gchar*) key), data->hash);
}
| 1
|
90,815
|
int set_encryption_filter(const char* input)
{
if(input == NULL) return 1;
if(strlen(input) < 3) return 1;
if(strcasecmp(input, "opn") == 0)
G.f_encrypt |= STD_OPN;
if(strcasecmp(input, "wep") == 0)
G.f_encrypt |= STD_WEP;
if(strcasecmp(input, "wpa") == 0)
{
G.f_encrypt |= STD_WPA;
G.f_encrypt |= STD_WPA2;
}
if(strcasecmp(input, "wpa1") == 0)
G.f_encrypt |= STD_WPA;
if(strcasecmp(input, "wpa2") == 0)
G.f_encrypt |= STD_WPA2;
return 0;
}
| 0
|
486,446
|
ossl_cipher_set_iv(VALUE self, VALUE iv)
{
EVP_CIPHER_CTX *ctx;
StringValue(iv);
GetCipher(self, ctx);
if (RSTRING_LEN(iv) < EVP_CIPHER_CTX_iv_length(ctx))
ossl_raise(eCipherError, "iv length too short");
if (EVP_CipherInit_ex(ctx, NULL, NULL, NULL, (unsigned char *)RSTRING_PTR(iv), -1) != 1)
ossl_raise(eCipherError, NULL);
return iv;
}
| 0
|
190,473
|
void UniqueElementData::addAttribute(const QualifiedName& attributeName, const AtomicString& value)
{
m_attributeVector.append(Attribute(attributeName, value));
}
| 0
|
14,098
|
void WebContentsImpl::RunJavaScriptDialog(RenderFrameHost* render_frame_host,
const base::string16& message,
const base::string16& default_prompt,
const GURL& frame_url,
JavaScriptDialogType dialog_type,
IPC::Message* reply_msg) {
bool suppress_this_message =
ShowingInterstitialPage() || !delegate_ ||
delegate_->ShouldSuppressDialogs(this) ||
!delegate_->GetJavaScriptDialogManager(this);
if (!suppress_this_message) {
is_showing_javascript_dialog_ = true;
dialog_manager_ = delegate_->GetJavaScriptDialogManager(this);
dialog_manager_->RunJavaScriptDialog(
this, frame_url, dialog_type, message, default_prompt,
base::Bind(&WebContentsImpl::OnDialogClosed, base::Unretained(this),
render_frame_host->GetProcess()->GetID(),
render_frame_host->GetRoutingID(), reply_msg, false),
&suppress_this_message);
}
if (suppress_this_message) {
OnDialogClosed(render_frame_host->GetProcess()->GetID(),
render_frame_host->GetRoutingID(), reply_msg,
true, false, base::string16());
}
}
| 1
|
21,147
|
int qemuMonitorTextAddPCIHostDevice ( qemuMonitorPtr mon , virDomainDevicePCIAddress * hostAddr , virDomainDevicePCIAddress * guestAddr ) {
char * cmd ;
char * reply = NULL ;
int ret = - 1 ;
memset ( guestAddr , 0 , sizeof ( * guestAddr ) ) ;
if ( virAsprintf ( & cmd , "pci_add pci_addr=auto host host=%.2x:%.2x.%.1x" , hostAddr -> bus , hostAddr -> slot , hostAddr -> function ) < 0 ) {
virReportOOMError ( ) ;
goto cleanup ;
}
if ( qemuMonitorHMPCommand ( mon , cmd , & reply ) < 0 ) {
qemuReportError ( VIR_ERR_OPERATION_FAILED , "%s" , _ ( "cannot attach host pci device" ) ) ;
goto cleanup ;
}
if ( strstr ( reply , "invalid type: host" ) ) {
qemuReportError ( VIR_ERR_OPERATION_INVALID , "%s" , _ ( "PCI device assignment is not supported by this version of qemu" ) ) ;
goto cleanup ;
}
if ( qemuMonitorTextParsePciAddReply ( mon , reply , guestAddr ) < 0 ) {
qemuReportError ( VIR_ERR_OPERATION_FAILED , _ ( "parsing pci_add reply failed: %s" ) , reply ) ;
goto cleanup ;
}
ret = 0 ;
cleanup : VIR_FREE ( cmd ) ;
VIR_FREE ( reply ) ;
return ret ;
}
| 0
|
304,315
|
static bool HHVM_METHOD(Memcache, close) {
auto data = Native::data<MemcacheData>(this_);
memcached_quit(&data->m_memcache);
return true;
}
| 0
|
247,184
|
TransportDIB* BrowserRenderProcessHost::GetTransportDIB(
TransportDIB::Id dib_id) {
if (!TransportDIB::is_valid_id(dib_id))
return NULL;
const std::map<TransportDIB::Id, TransportDIB*>::iterator
i = cached_dibs_.find(dib_id);
if (i != cached_dibs_.end()) {
cached_dibs_cleaner_.Reset();
return i->second;
}
TransportDIB* dib = MapTransportDIB(dib_id);
if (!dib)
return NULL;
if (cached_dibs_.size() >= MAX_MAPPED_TRANSPORT_DIBS) {
std::map<TransportDIB::Id, TransportDIB*>::iterator smallest_iterator;
size_t smallest_size = std::numeric_limits<size_t>::max();
for (std::map<TransportDIB::Id, TransportDIB*>::iterator
i = cached_dibs_.begin(); i != cached_dibs_.end(); ++i) {
if (i->second->size() <= smallest_size) {
smallest_iterator = i;
smallest_size = i->second->size();
}
}
delete smallest_iterator->second;
cached_dibs_.erase(smallest_iterator);
}
cached_dibs_[dib_id] = dib;
cached_dibs_cleaner_.Reset();
return dib;
}
| 0
|
259,582
|
u32 gf_isom_get_track_priority_in_group(GF_ISOFile *the_file, u32 trackNumber)
{
GF_TrackBox *trak;
trak = gf_isom_get_track_from_file(the_file, trackNumber);
if (!trak) return 0;
return trak->Media->information->sampleTable->trackPriority;
}
| 0
|
468,994
|
static int initialize_tables(server_rec *s, apr_pool_t *ctx)
{
unsigned long idx;
apr_status_t sts;
/* set up client list */
/* Create the shared memory segment */
client_shm = NULL;
client_rmm = NULL;
client_lock = NULL;
opaque_lock = NULL;
client_list = NULL;
/*
* Create a unique filename using our pid. This information is
* stashed in the global variable so the children inherit it.
*/
client_shm_filename = ap_runtime_dir_relative(ctx, "authdigest_shm");
client_shm_filename = ap_append_pid(ctx, client_shm_filename, ".");
/* Use anonymous shm by default, fall back on name-based. */
sts = apr_shm_create(&client_shm, shmem_size, NULL, ctx);
if (APR_STATUS_IS_ENOTIMPL(sts)) {
/* For a name-based segment, remove it first in case of a
* previous unclean shutdown. */
apr_shm_remove(client_shm_filename, ctx);
/* Now create that segment */
sts = apr_shm_create(&client_shm, shmem_size,
client_shm_filename, ctx);
}
if (APR_SUCCESS != sts) {
ap_log_error(APLOG_MARK, APLOG_ERR, sts, s, APLOGNO(01762)
"Failed to create shared memory segment on file %s",
client_shm_filename);
log_error_and_cleanup("failed to initialize shm", sts, s);
return HTTP_INTERNAL_SERVER_ERROR;
}
sts = apr_rmm_init(&client_rmm,
NULL, /* no lock, we'll do the locking ourselves */
apr_shm_baseaddr_get(client_shm),
shmem_size, ctx);
if (sts != APR_SUCCESS) {
log_error_and_cleanup("failed to initialize rmm", sts, s);
return !OK;
}
client_list = rmm_malloc(client_rmm, sizeof(*client_list) +
sizeof(client_entry *) * num_buckets);
if (!client_list) {
log_error_and_cleanup("failed to allocate shared memory", -1, s);
return !OK;
}
client_list->table = (client_entry**) (client_list + 1);
for (idx = 0; idx < num_buckets; idx++) {
client_list->table[idx] = NULL;
}
client_list->tbl_len = num_buckets;
client_list->num_entries = 0;
sts = ap_global_mutex_create(&client_lock, NULL, client_mutex_type, NULL,
s, ctx, 0);
if (sts != APR_SUCCESS) {
log_error_and_cleanup("failed to create lock (client_lock)", sts, s);
return !OK;
}
/* setup opaque */
opaque_cntr = rmm_malloc(client_rmm, sizeof(*opaque_cntr));
if (opaque_cntr == NULL) {
log_error_and_cleanup("failed to allocate shared memory", -1, s);
return !OK;
}
*opaque_cntr = 1UL;
sts = ap_global_mutex_create(&opaque_lock, NULL, opaque_mutex_type, NULL,
s, ctx, 0);
if (sts != APR_SUCCESS) {
log_error_and_cleanup("failed to create lock (opaque_lock)", sts, s);
return !OK;
}
/* setup one-time-nonce counter */
otn_counter = rmm_malloc(client_rmm, sizeof(*otn_counter));
if (otn_counter == NULL) {
log_error_and_cleanup("failed to allocate shared memory", -1, s);
return !OK;
}
*otn_counter = 0;
/* no lock here */
/* success */
return OK;
}
| 0
|
514,995
|
gst_rmdemux_parse_indx_data (GstRMDemux * rmdemux, const guint8 * data,
int length)
{
int i;
int n;
GstRMDemuxIndex *index;
/* The number of index records */
n = length / 14;
if (rmdemux->index_stream == NULL)
return;
/* don't parse the index a second time when operating pull-based and
* reaching the end of the file */
if (rmdemux->index_stream->index_length > 0) {
GST_DEBUG_OBJECT (rmdemux, "Already have an index for this stream");
return;
}
index = g_malloc (sizeof (GstRMDemuxIndex) * n);
rmdemux->index_stream->index = index;
rmdemux->index_stream->index_length = n;
for (i = 0; i < n; i++) {
index[i].timestamp = RMDEMUX_GUINT32_GET (data + 2) * GST_MSECOND;
index[i].offset = RMDEMUX_GUINT32_GET (data + 6);
GST_DEBUG_OBJECT (rmdemux, "Index found for timestamp=%f (at offset=%x)",
gst_guint64_to_gdouble (index[i].timestamp) / GST_SECOND,
index[i].offset);
data += 14;
}
}
| 0
|
91,136
|
static int gfs2_mmap(struct file *file, struct vm_area_struct *vma)
{
struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
if (!(file->f_flags & O_NOATIME) &&
!IS_NOATIME(&ip->i_inode)) {
struct gfs2_holder i_gh;
int error;
gfs2_holder_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
error = gfs2_glock_nq(&i_gh);
if (error == 0) {
file_accessed(file);
gfs2_glock_dq(&i_gh);
}
gfs2_holder_uninit(&i_gh);
if (error)
return error;
}
vma->vm_ops = &gfs2_vm_ops;
vma->vm_flags |= VM_CAN_NONLINEAR;
return 0;
}
| 0
|
361,221
|
void reply_findclose(struct smb_request *req)
{
int dptr_num;
struct smbd_server_connection *sconn = smbd_server_conn;
START_PROFILE(SMBfindclose);
if (req->wct < 1) {
reply_nterror(req, NT_STATUS_INVALID_PARAMETER);
END_PROFILE(SMBfindclose);
return;
}
dptr_num = SVALS(req->vwv+0, 0);
DEBUG(3,("reply_findclose, dptr_num = %d\n", dptr_num));
dptr_close(sconn, &dptr_num);
reply_outbuf(req, 0, 0);
DEBUG(3,("SMBfindclose dptr_num = %d\n", dptr_num));
END_PROFILE(SMBfindclose);
return;
}
| 0
|
331,328
|
static int find_pte64 (mmu_ctx_t *ctx, int h, int rw)
{
return _find_pte(ctx, 1, h, rw);
}
| 0
|
289,965
|
static void estimate_block_intra ( int plane , int block , BLOCK_SIZE plane_bsize , TX_SIZE tx_size , void * arg ) {
struct estimate_block_intra_args * const args = arg ;
VP9_COMP * const cpi = args -> cpi ;
MACROBLOCK * const x = args -> x ;
MACROBLOCKD * const xd = & x -> e_mbd ;
struct macroblock_plane * const p = & x -> plane [ 0 ] ;
struct macroblockd_plane * const pd = & xd -> plane [ 0 ] ;
const BLOCK_SIZE bsize_tx = txsize_to_bsize [ tx_size ] ;
uint8_t * const src_buf_base = p -> src . buf ;
uint8_t * const dst_buf_base = pd -> dst . buf ;
const int src_stride = p -> src . stride ;
const int dst_stride = pd -> dst . stride ;
int i , j ;
int rate ;
int64_t dist ;
unsigned int var_y , sse_y ;
txfrm_block_to_raster_xy ( plane_bsize , tx_size , block , & i , & j ) ;
assert ( plane == 0 ) ;
( void ) plane ;
p -> src . buf = & src_buf_base [ 4 * ( j * src_stride + i ) ] ;
pd -> dst . buf = & dst_buf_base [ 4 * ( j * dst_stride + i ) ] ;
vp9_predict_intra_block ( xd , block >> ( 2 * tx_size ) , b_width_log2 ( plane_bsize ) , tx_size , args -> mode , p -> src . buf , src_stride , pd -> dst . buf , dst_stride , i , j , 0 ) ;
model_rd_for_sb_y ( cpi , bsize_tx , x , xd , & rate , & dist , & var_y , & sse_y ) ;
p -> src . buf = src_buf_base ;
pd -> dst . buf = dst_buf_base ;
args -> rate += rate ;
args -> dist += dist ;
}
| 0
|
327,109
|
static inline void gen_405_mulladd_insn(DisasContext *ctx, int opc2, int opc3,
int ra, int rb, int rt, int Rc)
{
TCGv t0, t1;
t0 = tcg_temp_local_new();
t1 = tcg_temp_local_new();
switch (opc3 & 0x0D) {
case 0x05:
/* macchw - macchw. - macchwo - macchwo. */
/* macchws - macchws. - macchwso - macchwso. */
/* nmacchw - nmacchw. - nmacchwo - nmacchwo. */
/* nmacchws - nmacchws. - nmacchwso - nmacchwso. */
/* mulchw - mulchw. */
tcg_gen_ext16s_tl(t0, cpu_gpr[ra]);
tcg_gen_sari_tl(t1, cpu_gpr[rb], 16);
tcg_gen_ext16s_tl(t1, t1);
break;
case 0x04:
/* macchwu - macchwu. - macchwuo - macchwuo. */
/* macchwsu - macchwsu. - macchwsuo - macchwsuo. */
/* mulchwu - mulchwu. */
tcg_gen_ext16u_tl(t0, cpu_gpr[ra]);
tcg_gen_shri_tl(t1, cpu_gpr[rb], 16);
tcg_gen_ext16u_tl(t1, t1);
break;
case 0x01:
/* machhw - machhw. - machhwo - machhwo. */
/* machhws - machhws. - machhwso - machhwso. */
/* nmachhw - nmachhw. - nmachhwo - nmachhwo. */
/* nmachhws - nmachhws. - nmachhwso - nmachhwso. */
/* mulhhw - mulhhw. */
tcg_gen_sari_tl(t0, cpu_gpr[ra], 16);
tcg_gen_ext16s_tl(t0, t0);
tcg_gen_sari_tl(t1, cpu_gpr[rb], 16);
tcg_gen_ext16s_tl(t1, t1);
break;
case 0x00:
/* machhwu - machhwu. - machhwuo - machhwuo. */
/* machhwsu - machhwsu. - machhwsuo - machhwsuo. */
/* mulhhwu - mulhhwu. */
tcg_gen_shri_tl(t0, cpu_gpr[ra], 16);
tcg_gen_ext16u_tl(t0, t0);
tcg_gen_shri_tl(t1, cpu_gpr[rb], 16);
tcg_gen_ext16u_tl(t1, t1);
break;
case 0x0D:
/* maclhw - maclhw. - maclhwo - maclhwo. */
/* maclhws - maclhws. - maclhwso - maclhwso. */
/* nmaclhw - nmaclhw. - nmaclhwo - nmaclhwo. */
/* nmaclhws - nmaclhws. - nmaclhwso - nmaclhwso. */
/* mullhw - mullhw. */
tcg_gen_ext16s_tl(t0, cpu_gpr[ra]);
tcg_gen_ext16s_tl(t1, cpu_gpr[rb]);
break;
case 0x0C:
/* maclhwu - maclhwu. - maclhwuo - maclhwuo. */
/* maclhwsu - maclhwsu. - maclhwsuo - maclhwsuo. */
/* mullhwu - mullhwu. */
tcg_gen_ext16u_tl(t0, cpu_gpr[ra]);
tcg_gen_ext16u_tl(t1, cpu_gpr[rb]);
break;
}
if (opc2 & 0x04) {
/* (n)multiply-and-accumulate (0x0C / 0x0E) */
tcg_gen_mul_tl(t1, t0, t1);
if (opc2 & 0x02) {
/* nmultiply-and-accumulate (0x0E) */
tcg_gen_sub_tl(t0, cpu_gpr[rt], t1);
} else {
/* multiply-and-accumulate (0x0C) */
tcg_gen_add_tl(t0, cpu_gpr[rt], t1);
}
if (opc3 & 0x12) {
/* Check overflow and/or saturate */
int l1 = gen_new_label();
if (opc3 & 0x10) {
/* Start with XER OV disabled, the most likely case */
tcg_gen_movi_tl(cpu_ov, 0);
}
if (opc3 & 0x01) {
/* Signed */
tcg_gen_xor_tl(t1, cpu_gpr[rt], t1);
tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l1);
tcg_gen_xor_tl(t1, cpu_gpr[rt], t0);
tcg_gen_brcondi_tl(TCG_COND_LT, t1, 0, l1);
if (opc3 & 0x02) {
/* Saturate */
tcg_gen_sari_tl(t0, cpu_gpr[rt], 31);
tcg_gen_xori_tl(t0, t0, 0x7fffffff);
}
} else {
/* Unsigned */
tcg_gen_brcond_tl(TCG_COND_GEU, t0, t1, l1);
if (opc3 & 0x02) {
/* Saturate */
tcg_gen_movi_tl(t0, UINT32_MAX);
}
}
if (opc3 & 0x10) {
/* Check overflow */
tcg_gen_movi_tl(cpu_ov, 1);
tcg_gen_movi_tl(cpu_so, 1);
}
gen_set_label(l1);
tcg_gen_mov_tl(cpu_gpr[rt], t0);
}
} else {
tcg_gen_mul_tl(cpu_gpr[rt], t0, t1);
}
tcg_temp_free(t0);
tcg_temp_free(t1);
if (unlikely(Rc) != 0) {
/* Update Rc0 */
gen_set_Rc0(ctx, cpu_gpr[rt]);
}
}
| 0
|
381,321
|
bool SettingsPage(CWebSock& WebSock, CTemplate& Tmpl) {
Tmpl.SetFile("settings.tmpl");
if (!WebSock.GetParam("submitted").ToUInt()) {
CString sBindHosts, sMotd;
Tmpl["Action"] = "settings";
Tmpl["Title"] = "Settings";
Tmpl["StatusPrefix"] = CZNC::Get().GetStatusPrefix();
Tmpl["MaxBufferSize"] = CString(CZNC::Get().GetMaxBufferSize());
Tmpl["ConnectDelay"] = CString(CZNC::Get().GetConnectDelay());
Tmpl["ServerThrottle"] = CString(CZNC::Get().GetServerThrottle());
Tmpl["AnonIPLimit"] = CString(CZNC::Get().GetAnonIPLimit());
Tmpl["ProtectWebSessions"] = CString(CZNC::Get().GetProtectWebSessions());
const VCString& vsBindHosts = CZNC::Get().GetBindHosts();
for (unsigned int a = 0; a < vsBindHosts.size(); a++) {
CTemplate& l = Tmpl.AddRow("BindHostLoop");
l["BindHost"] = vsBindHosts[a];
}
const VCString& vsMotd = CZNC::Get().GetMotd();
for (unsigned int b = 0; b < vsMotd.size(); b++) {
CTemplate& l = Tmpl.AddRow("MOTDLoop");
l["Line"] = vsMotd[b];
}
const vector<CListener*>& vpListeners = CZNC::Get().GetListeners();
for (unsigned int c = 0; c < vpListeners.size(); c++) {
CListener* pListener = vpListeners[c];
CTemplate& l = Tmpl.AddRow("ListenLoop");
l["Port"] = CString(pListener->GetPort());
l["BindHost"] = pListener->GetBindHost();
l["IsWeb"] = CString(pListener->GetAcceptType() != CListener::ACCEPT_IRC);
l["IsIRC"] = CString(pListener->GetAcceptType() != CListener::ACCEPT_HTTP);
l["URIPrefix"] = pListener->GetURIPrefix() + "/";
// simple protection for user from shooting his own foot
// TODO check also for hosts/families
// such check is only here, user still can forge HTTP request to delete web port
l["SuggestDeletion"] = CString(pListener->GetPort() != WebSock.GetLocalPort());
#ifdef HAVE_LIBSSL
if (pListener->IsSSL()) {
l["IsSSL"] = "true";
}
#endif
#ifdef HAVE_IPV6
switch (pListener->GetAddrType()) {
case ADDR_IPV4ONLY:
l["IsIPV4"] = "true";
break;
case ADDR_IPV6ONLY:
l["IsIPV6"] = "true";
break;
case ADDR_ALL:
l["IsIPV4"] = "true";
l["IsIPV6"] = "true";
break;
}
#else
l["IsIPV4"] = "true";
#endif
}
vector<CString> vDirs;
WebSock.GetAvailSkins(vDirs);
for (unsigned int d = 0; d < vDirs.size(); d++) {
const CString& SubDir = vDirs[d];
CTemplate& l = Tmpl.AddRow("SkinLoop");
l["Name"] = SubDir;
if (SubDir == CZNC::Get().GetSkinName()) {
l["Checked"] = "true";
}
}
set<CModInfo> ssGlobalMods;
CZNC::Get().GetModules().GetAvailableMods(ssGlobalMods, CModInfo::GlobalModule);
for (set<CModInfo>::iterator it = ssGlobalMods.begin(); it != ssGlobalMods.end(); ++it) {
const CModInfo& Info = *it;
CTemplate& l = Tmpl.AddRow("ModuleLoop");
CModule *pModule = CZNC::Get().GetModules().FindModule(Info.GetName());
if (pModule) {
l["Checked"] = "true";
l["Args"] = pModule->GetArgs();
if (CModInfo::GlobalModule == GetType() && Info.GetName() == GetModName()) {
l["Disabled"] = "true";
}
}
l["Name"] = Info.GetName();
l["Description"] = Info.GetDescription();
l["Wiki"] = Info.GetWikiPage();
l["HasArgs"] = CString(Info.GetHasArgs());
l["ArgsHelpText"] = Info.GetArgsHelpText();
}
return true;
}
CString sArg;
sArg = WebSock.GetParam("statusprefix"); CZNC::Get().SetStatusPrefix(sArg);
sArg = WebSock.GetParam("maxbufsize"); CZNC::Get().SetMaxBufferSize(sArg.ToUInt());
sArg = WebSock.GetParam("connectdelay"); CZNC::Get().SetConnectDelay(sArg.ToUInt());
sArg = WebSock.GetParam("serverthrottle"); CZNC::Get().SetServerThrottle(sArg.ToUInt());
sArg = WebSock.GetParam("anoniplimit"); CZNC::Get().SetAnonIPLimit(sArg.ToUInt());
sArg = WebSock.GetParam("protectwebsessions"); CZNC::Get().SetProtectWebSessions(sArg.ToBool());
VCString vsArgs;
WebSock.GetRawParam("motd").Split("\n", vsArgs);
CZNC::Get().ClearMotd();
unsigned int a = 0;
for (a = 0; a < vsArgs.size(); a++) {
CZNC::Get().AddMotd(vsArgs[a].TrimRight_n());
}
WebSock.GetRawParam("bindhosts").Split("\n", vsArgs);
CZNC::Get().ClearBindHosts();
for (a = 0; a < vsArgs.size(); a++) {
CZNC::Get().AddBindHost(vsArgs[a].Trim_n());
}
CZNC::Get().SetSkinName(WebSock.GetParam("skin"));
set<CString> ssArgs;
WebSock.GetParamValues("loadmod", ssArgs);
for (set<CString>::iterator it = ssArgs.begin(); it != ssArgs.end(); ++it) {
CString sModRet;
CString sModName = (*it).TrimRight_n("\r");
CString sModLoadError;
if (!sModName.empty()) {
CString sArgs = WebSock.GetParam("modargs_" + sModName);
CModule *pMod = CZNC::Get().GetModules().FindModule(sModName);
if (!pMod) {
if (!CZNC::Get().GetModules().LoadModule(sModName, sArgs, CModInfo::GlobalModule, NULL, NULL, sModRet)) {
sModLoadError = "Unable to load module [" + sModName + "] [" + sModRet + "]";
}
} else if (pMod->GetArgs() != sArgs) {
if (!CZNC::Get().GetModules().ReloadModule(sModName, sArgs, NULL, NULL, sModRet)) {
sModLoadError = "Unable to reload module [" + sModName + "] [" + sModRet + "]";
}
}
if (!sModLoadError.empty()) {
DEBUG(sModLoadError);
WebSock.GetSession()->AddError(sModLoadError);
}
}
}
const CModules& vCurMods = CZNC::Get().GetModules();
set<CString> ssUnloadMods;
for (a = 0; a < vCurMods.size(); a++) {
CModule* pCurMod = vCurMods[a];
if (ssArgs.find(pCurMod->GetModName()) == ssArgs.end() &&
(CModInfo::GlobalModule != GetType() || pCurMod->GetModName() != GetModName())) {
ssUnloadMods.insert(pCurMod->GetModName());
}
}
for (set<CString>::iterator it2 = ssUnloadMods.begin(); it2 != ssUnloadMods.end(); ++it2) {
CZNC::Get().GetModules().UnloadModule(*it2);
}
if (!CZNC::Get().WriteConfig()) {
WebSock.GetSession()->AddError("Settings changed, but config was not written");
}
WebSock.Redirect(GetWebPath() + "settings");
/* we don't want the template to be printed while we redirect */
return false;
}
| 0
|
343,244
|
static int ffserver_apply_stream_config(AVCodecContext *enc, const AVDictionary *conf, AVDictionary **opts)
{
AVDictionaryEntry *e;
int ret = 0;
/* Return values from ffserver_set_*_param are ignored.
Values are initially parsed and checked before inserting to AVDictionary. */
//video params
if ((e = av_dict_get(conf, "VideoBitRateRangeMin", NULL, 0)))
ffserver_set_int_param(&enc->rc_min_rate, e->value, 1000, INT_MIN, INT_MAX, NULL, 0, NULL);
if ((e = av_dict_get(conf, "VideoBitRateRangeMax", NULL, 0)))
ffserver_set_int_param(&enc->rc_max_rate, e->value, 1000, INT_MIN, INT_MAX, NULL, 0, NULL);
if ((e = av_dict_get(conf, "Debug", NULL, 0)))
ffserver_set_int_param(&enc->debug, e->value, 0, INT_MIN, INT_MAX, NULL, 0, NULL);
if ((e = av_dict_get(conf, "Strict", NULL, 0)))
ffserver_set_int_param(&enc->strict_std_compliance, e->value, 0, INT_MIN, INT_MAX, NULL, 0, NULL);
if ((e = av_dict_get(conf, "VideoBufferSize", NULL, 0)))
ffserver_set_int_param(&enc->rc_buffer_size, e->value, 8*1024, INT_MIN, INT_MAX, NULL, 0, NULL);
if ((e = av_dict_get(conf, "VideoBitRateTolerance", NULL, 0)))
ffserver_set_int_param(&enc->bit_rate_tolerance, e->value, 1000, INT_MIN, INT_MAX, NULL, 0, NULL);
if ((e = av_dict_get(conf, "VideoBitRate", NULL, 0)))
ffserver_set_int_param(&enc->bit_rate, e->value, 1000, INT_MIN, INT_MAX, NULL, 0, NULL);
if ((e = av_dict_get(conf, "VideoSizeWidth", NULL, 0)))
ffserver_set_int_param(&enc->width, e->value, 0, INT_MIN, INT_MAX, NULL, 0, NULL);
if ((e = av_dict_get(conf, "VideoSizeHeight", NULL, 0)))
ffserver_set_int_param(&enc->height, e->value, 0, INT_MIN, INT_MAX, NULL, 0, NULL);
if ((e = av_dict_get(conf, "PixelFormat", NULL, 0))) {
int val;
ffserver_set_int_param(&val, e->value, 0, INT_MIN, INT_MAX, NULL, 0, NULL);
enc->pix_fmt = val;
}
if ((e = av_dict_get(conf, "VideoGopSize", NULL, 0)))
ffserver_set_int_param(&enc->gop_size, e->value, 0, INT_MIN, INT_MAX, NULL, 0, NULL);
if ((e = av_dict_get(conf, "VideoFrameRateNum", NULL, 0)))
ffserver_set_int_param(&enc->time_base.num, e->value, 0, INT_MIN, INT_MAX, NULL, 0, NULL);
if ((e = av_dict_get(conf, "VideoFrameRateDen", NULL, 0)))
ffserver_set_int_param(&enc->time_base.den, e->value, 0, INT_MIN, INT_MAX, NULL, 0, NULL);
if ((e = av_dict_get(conf, "VideoQDiff", NULL, 0)))
ffserver_set_int_param(&enc->max_qdiff, e->value, 0, INT_MIN, INT_MAX, NULL, 0, NULL);
if ((e = av_dict_get(conf, "VideoQMax", NULL, 0)))
ffserver_set_int_param(&enc->qmax, e->value, 0, INT_MIN, INT_MAX, NULL, 0, NULL);
if ((e = av_dict_get(conf, "VideoQMin", NULL, 0)))
ffserver_set_int_param(&enc->qmin, e->value, 0, INT_MIN, INT_MAX, NULL, 0, NULL);
if ((e = av_dict_get(conf, "LumiMask", NULL, 0)))
ffserver_set_float_param(&enc->lumi_masking, e->value, 0, -FLT_MAX, FLT_MAX, NULL, 0, NULL);
if ((e = av_dict_get(conf, "DarkMask", NULL, 0)))
ffserver_set_float_param(&enc->dark_masking, e->value, 0, -FLT_MAX, FLT_MAX, NULL, 0, NULL);
if (av_dict_get(conf, "BitExact", NULL, 0))
enc->flags |= CODEC_FLAG_BITEXACT;
if (av_dict_get(conf, "DctFastint", NULL, 0))
enc->dct_algo = FF_DCT_FASTINT;
if (av_dict_get(conf, "IdctSimple", NULL, 0))
enc->idct_algo = FF_IDCT_SIMPLE;
if (av_dict_get(conf, "VideoHighQuality", NULL, 0))
enc->mb_decision = FF_MB_DECISION_BITS;
if ((e = av_dict_get(conf, "VideoTag", NULL, 0)))
enc->codec_tag = MKTAG(e->value[0], e->value[1], e->value[2], e->value[3]);
if (av_dict_get(conf, "Qscale", NULL, 0)) {
enc->flags |= CODEC_FLAG_QSCALE;
ffserver_set_int_param(&enc->global_quality, e->value, FF_QP2LAMBDA, INT_MIN, INT_MAX, NULL, 0, NULL);
}
if (av_dict_get(conf, "Video4MotionVector", NULL, 0)) {
enc->mb_decision = FF_MB_DECISION_BITS; //FIXME remove
enc->flags |= CODEC_FLAG_4MV;
}
//audio params
if ((e = av_dict_get(conf, "AudioChannels", NULL, 0)))
ffserver_set_int_param(&enc->channels, e->value, 0, INT_MIN, INT_MAX, NULL, 0, NULL);
if ((e = av_dict_get(conf, "AudioSampleRate", NULL, 0)))
ffserver_set_int_param(&enc->sample_rate, e->value, 0, INT_MIN, INT_MAX, NULL, 0, NULL);
if ((e = av_dict_get(conf, "AudioBitRate", NULL, 0)))
ffserver_set_int_param(&enc->bit_rate, e->value, 0, INT_MIN, INT_MAX, NULL, 0, NULL);
av_opt_set_dict2(enc, opts, AV_OPT_SEARCH_CHILDREN);
e = NULL;
while (e = av_dict_get(*opts, "", e, AV_DICT_IGNORE_SUFFIX)) {
av_log(NULL, AV_LOG_ERROR, "Provided AVOption '%s' doesn't match any existing option.\n", e->key);
ret = AVERROR(EINVAL);
}
return ret;
}
| 0
|
262,794
|
static void ImportBlackQuantum(const Image *image,QuantumInfo *quantum_info,
const MagickSizeType number_pixels,const unsigned char *magick_restrict p,
Quantum *magick_restrict q,ExceptionInfo *exception)
{
QuantumAny
range;
register ssize_t
x;
unsigned int
pixel;
if (image->colorspace != CMYKColorspace)
{
(void) ThrowMagickException(exception,GetMagickModule(),ImageError,
"ColorSeparatedImageRequired","`%s'",image->filename);
return;
}
switch (quantum_info->depth)
{
case 8:
{
unsigned char
pixel;
for (x=0; x < (ssize_t) number_pixels; x++)
{
p=PushCharPixel(p,&pixel);
SetPixelBlack(image,ScaleCharToQuantum(pixel),q);
p+=quantum_info->pad;
q+=GetPixelChannels(image);
}
break;
}
case 16:
{
unsigned short
pixel;
if (quantum_info->format == FloatingPointQuantumFormat)
{
for (x=0; x < (ssize_t) number_pixels; x++)
{
p=PushShortPixel(quantum_info->endian,p,&pixel);
SetPixelBlack(image,ClampToQuantum(QuantumRange*
HalfToSinglePrecision(pixel)),q);
p+=quantum_info->pad;
q+=GetPixelChannels(image);
}
break;
}
for (x=0; x < (ssize_t) number_pixels; x++)
{
p=PushShortPixel(quantum_info->endian,p,&pixel);
SetPixelBlack(image,ScaleShortToQuantum(pixel),q);
p+=quantum_info->pad;
q+=GetPixelChannels(image);
}
break;
}
case 32:
{
unsigned int
pixel;
if (quantum_info->format == FloatingPointQuantumFormat)
{
float
pixel;
for (x=0; x < (ssize_t) number_pixels; x++)
{
p=PushFloatPixel(quantum_info,p,&pixel);
SetPixelBlack(image,ClampToQuantum(pixel),q);
p+=quantum_info->pad;
q+=GetPixelChannels(image);
}
break;
}
for (x=0; x < (ssize_t) number_pixels; x++)
{
p=PushLongPixel(quantum_info->endian,p,&pixel);
SetPixelBlack(image,ScaleLongToQuantum(pixel),q);
p+=quantum_info->pad;
q+=GetPixelChannels(image);
}
break;
}
case 64:
{
if (quantum_info->format == FloatingPointQuantumFormat)
{
double
pixel;
for (x=0; x < (ssize_t) number_pixels; x++)
{
p=PushDoublePixel(quantum_info,p,&pixel);
SetPixelBlack(image,ClampToQuantum(pixel),q);
p+=quantum_info->pad;
q+=GetPixelChannels(image);
}
break;
}
}
default:
{
range=GetQuantumRange(quantum_info->depth);
for (x=0; x < (ssize_t) number_pixels; x++)
{
p=PushQuantumPixel(quantum_info,p,&pixel);
SetPixelBlack(image,ScaleAnyToQuantum(pixel,range),q);
p+=quantum_info->pad;
q+=GetPixelChannels(image);
}
break;
}
}
}
| 0
|
122,094
|
void btrfs_test_destroy_inode(struct inode *inode)
{
btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
}
| 0
|
67,893
|
raptor_turtle_writer_increase_indent(raptor_turtle_writer *turtle_writer)
{
turtle_writer->depth += turtle_writer->indent;
}
| 0
|
125,678
|
s32 gf_media_hevc_read_sps_ex(char *data, u32 size, HEVCState *hevc, u32 *vui_flag_pos)
{
GF_BitStream *bs;
char *data_without_emulation_bytes = NULL;
u32 data_without_emulation_bytes_size = 0;
s32 sps_id= -1;
u8 layer_id;
if (vui_flag_pos) *vui_flag_pos = 0;
data_without_emulation_bytes_size = avc_emulation_bytes_remove_count(data, size);
if (!data_without_emulation_bytes_size) {
bs = gf_bs_new(data, size, GF_BITSTREAM_READ);
} else {
/*still contains emulation bytes*/
data_without_emulation_bytes = gf_malloc(size*sizeof(char));
data_without_emulation_bytes_size = avc_remove_emulation_bytes(data, data_without_emulation_bytes, size);
bs = gf_bs_new(data_without_emulation_bytes, data_without_emulation_bytes_size, GF_BITSTREAM_READ);
}
if (!bs) goto exit;
if (! hevc_parse_nal_header(bs, NULL, NULL, &layer_id)) goto exit;
sps_id = gf_media_hevc_read_sps_bs(bs, hevc, layer_id, vui_flag_pos);
exit:
if (bs) gf_bs_del(bs);
if (data_without_emulation_bytes) gf_free(data_without_emulation_bytes);
return sps_id;
}
| 0
|
383,461
|
FLAC_API FLAC__bool FLAC__stream_encoder_finish(FLAC__StreamEncoder *encoder)
{
FLAC__bool error = false;
FLAC__ASSERT(0 != encoder);
FLAC__ASSERT(0 != encoder->private_);
FLAC__ASSERT(0 != encoder->protected_);
if(encoder->protected_->state == FLAC__STREAM_ENCODER_UNINITIALIZED)
return true;
if(encoder->protected_->state == FLAC__STREAM_ENCODER_OK && !encoder->private_->is_being_deleted) {
if(encoder->private_->current_sample_number != 0) {
const FLAC__bool is_fractional_block = encoder->protected_->blocksize != encoder->private_->current_sample_number;
encoder->protected_->blocksize = encoder->private_->current_sample_number;
if(!process_frame_(encoder, is_fractional_block, /*is_last_block=*/true))
error = true;
}
}
if(encoder->protected_->do_md5)
FLAC__MD5Final(encoder->private_->streaminfo.data.stream_info.md5sum, &encoder->private_->md5context);
if(!encoder->private_->is_being_deleted) {
if(encoder->protected_->state == FLAC__STREAM_ENCODER_OK) {
if(encoder->private_->seek_callback) {
#if FLAC__HAS_OGG
if(encoder->private_->is_ogg)
update_ogg_metadata_(encoder);
else
#endif
update_metadata_(encoder);
/* check if an error occurred while updating metadata */
if(encoder->protected_->state != FLAC__STREAM_ENCODER_OK)
error = true;
}
if(encoder->private_->metadata_callback)
encoder->private_->metadata_callback(encoder, &encoder->private_->streaminfo, encoder->private_->client_data);
}
if(encoder->protected_->verify && 0 != encoder->private_->verify.decoder && !FLAC__stream_decoder_finish(encoder->private_->verify.decoder)) {
if(!error)
encoder->protected_->state = FLAC__STREAM_ENCODER_VERIFY_MISMATCH_IN_AUDIO_DATA;
error = true;
}
}
if(0 != encoder->private_->file) {
if(encoder->private_->file != stdout)
fclose(encoder->private_->file);
encoder->private_->file = 0;
}
#if FLAC__HAS_OGG
if(encoder->private_->is_ogg)
FLAC__ogg_encoder_aspect_finish(&encoder->protected_->ogg_encoder_aspect);
#endif
free_(encoder);
set_defaults_(encoder);
if(!error)
encoder->protected_->state = FLAC__STREAM_ENCODER_UNINITIALIZED;
return !error;
}
| 0
|
378,043
|
struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *tab)
{
struct qdisc_rate_table *rtab;
if (tab == NULL || r->rate == 0 || r->cell_log == 0 ||
nla_len(tab) != TC_RTAB_SIZE)
return NULL;
for (rtab = qdisc_rtab_list; rtab; rtab = rtab->next) {
if (!memcmp(&rtab->rate, r, sizeof(struct tc_ratespec)) &&
!memcmp(&rtab->data, nla_data(tab), 1024)) {
rtab->refcnt++;
return rtab;
}
}
rtab = kmalloc(sizeof(*rtab), GFP_KERNEL);
if (rtab) {
rtab->rate = *r;
rtab->refcnt = 1;
memcpy(rtab->data, nla_data(tab), 1024);
if (r->linklayer == TC_LINKLAYER_UNAWARE)
r->linklayer = __detect_linklayer(r, rtab->data);
rtab->next = qdisc_rtab_list;
qdisc_rtab_list = rtab;
}
return rtab;
}
| 0
|
233,712
|
void HTMLMediaElement::stopPeriodicTimers() {
m_progressEventTimer.stop();
m_playbackProgressTimer.stop();
m_checkViewportIntersectionTimer.stop();
}
| 0
|
77,333
|
static enum_func_status
php_mysqlnd_read_error_from_line(zend_uchar *buf, size_t buf_len,
char *error, int error_buf_len,
unsigned int *error_no, char *sqlstate TSRMLS_DC)
{
zend_uchar *p = buf;
int error_msg_len= 0;
DBG_ENTER("php_mysqlnd_read_error_from_line");
*error_no = CR_UNKNOWN_ERROR;
memcpy(sqlstate, unknown_sqlstate, MYSQLND_SQLSTATE_LENGTH);
if (buf_len > 2) {
*error_no = uint2korr(p);
p+= 2;
/*
sqlstate is following. No need to check for buf_left_len as we checked > 2 above,
if it was >=2 then we would need a check
*/
if (*p == '#') {
++p;
if ((buf_len - (p - buf)) >= MYSQLND_SQLSTATE_LENGTH) {
memcpy(sqlstate, p, MYSQLND_SQLSTATE_LENGTH);
p+= MYSQLND_SQLSTATE_LENGTH;
} else {
goto end;
}
}
if ((buf_len - (p - buf)) > 0) {
error_msg_len = MIN((int)((buf_len - (p - buf))), (int) (error_buf_len - 1));
memcpy(error, p, error_msg_len);
}
}
end:
sqlstate[MYSQLND_SQLSTATE_LENGTH] = '\0';
error[error_msg_len]= '\0';
DBG_RETURN(FAIL);
| 0
|
515,467
|
void Http2Session::HandleAltSvcFrame(const nghttp2_frame* frame) {
if (!(js_fields_->bitfield & (1 << kSessionHasAltsvcListeners))) return;
Isolate* isolate = env()->isolate();
HandleScope scope(isolate);
Local<Context> context = env()->context();
Context::Scope context_scope(context);
int32_t id = GetFrameID(frame);
nghttp2_extension ext = frame->ext;
nghttp2_ext_altsvc* altsvc = static_cast<nghttp2_ext_altsvc*>(ext.payload);
Debug(this, "handling altsvc frame");
Local<Value> argv[3] = {
Integer::New(isolate, id),
OneByteString(isolate, altsvc->origin, altsvc->origin_len),
OneByteString(isolate, altsvc->field_value, altsvc->field_value_len)
};
MakeCallback(env()->http2session_on_altsvc_function(),
arraysize(argv), argv);
}
| 0
|
415,471
|
int WavInFile::read(float *buffer, int maxElems)
{
unsigned int afterDataRead;
int numBytes;
int numElems;
int bytesPerSample;
assert(buffer);
bytesPerSample = header.format.bits_per_sample / 8;
if ((bytesPerSample < 1) || (bytesPerSample > 4))
{
stringstream ss;
ss << "\nOnly 8/16/24/32 bit sample WAV files supported. Can't open WAV file with ";
ss << (int)header.format.bits_per_sample;
ss << " bit sample format. ";
ST_THROW_RT_ERROR(ss.str().c_str());
}
numBytes = maxElems * bytesPerSample;
afterDataRead = dataRead + numBytes;
if (afterDataRead > header.data.data_len)
{
// Don't read more samples than are marked available in header
numBytes = (int)header.data.data_len - (int)dataRead;
assert(numBytes >= 0);
}
// read raw data into temporary buffer
char *temp = (char*)getConvBuffer(numBytes);
numBytes = (int)fread(temp, 1, numBytes, fptr);
dataRead += numBytes;
numElems = numBytes / bytesPerSample;
// swap byte ordert & convert to float, depending on sample format
switch (bytesPerSample)
{
case 1:
{
unsigned char *temp2 = (unsigned char*)temp;
double conv = 1.0 / 128.0;
for (int i = 0; i < numElems; i ++)
{
buffer[i] = (float)(temp2[i] * conv - 1.0);
}
break;
}
case 2:
{
short *temp2 = (short*)temp;
double conv = 1.0 / 32768.0;
for (int i = 0; i < numElems; i ++)
{
short value = temp2[i];
buffer[i] = (float)(_swap16(value) * conv);
}
break;
}
case 3:
{
char *temp2 = (char *)temp;
double conv = 1.0 / 8388608.0;
for (int i = 0; i < numElems; i ++)
{
int value = *((int*)temp2);
value = _swap32(value) & 0x00ffffff; // take 24 bits
value |= (value & 0x00800000) ? 0xff000000 : 0; // extend minus sign bits
buffer[i] = (float)(value * conv);
temp2 += 3;
}
break;
}
case 4:
{
int *temp2 = (int *)temp;
double conv = 1.0 / 2147483648.0;
assert(sizeof(int) == 4);
for (int i = 0; i < numElems; i ++)
{
int value = temp2[i];
buffer[i] = (float)(_swap32(value) * conv);
}
break;
}
}
return numElems;
}
| 0
|
329,028
|
print_ipc_cmd(int cmd)
{
#define output_cmd(val) \
if( cmd == val ) { \
gemu_log(#val); \
return; \
}
cmd &= 0xff;
/* General IPC commands */
output_cmd( IPC_RMID );
output_cmd( IPC_SET );
output_cmd( IPC_STAT );
output_cmd( IPC_INFO );
/* msgctl() commands */
#ifdef __USER_MISC
output_cmd( MSG_STAT );
output_cmd( MSG_INFO );
#endif
/* shmctl() commands */
output_cmd( SHM_LOCK );
output_cmd( SHM_UNLOCK );
output_cmd( SHM_STAT );
output_cmd( SHM_INFO );
/* semctl() commands */
output_cmd( GETPID );
output_cmd( GETVAL );
output_cmd( GETALL );
output_cmd( GETNCNT );
output_cmd( GETZCNT );
output_cmd( SETVAL );
output_cmd( SETALL );
output_cmd( SEM_STAT );
output_cmd( SEM_INFO );
output_cmd( IPC_RMID );
output_cmd( IPC_RMID );
output_cmd( IPC_RMID );
output_cmd( IPC_RMID );
output_cmd( IPC_RMID );
output_cmd( IPC_RMID );
output_cmd( IPC_RMID );
output_cmd( IPC_RMID );
output_cmd( IPC_RMID );
/* Some value we don't recognize */
gemu_log("%d",cmd);
}
| 1
|
299,019
|
static void brcmf_fill_bss_param(struct brcmf_if *ifp, struct station_info *si)
{
struct {
__le32 len;
struct brcmf_bss_info_le bss_le;
} *buf;
u16 capability;
int err;
buf = kzalloc(WL_BSS_INFO_MAX, GFP_KERNEL);
if (!buf)
return;
buf->len = cpu_to_le32(WL_BSS_INFO_MAX);
err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_BSS_INFO, buf,
WL_BSS_INFO_MAX);
if (err) {
brcmf_err("Failed to get bss info (%d)\n", err);
return;
}
si->filled |= BIT(NL80211_STA_INFO_BSS_PARAM);
si->bss_param.beacon_interval = le16_to_cpu(buf->bss_le.beacon_period);
si->bss_param.dtim_period = buf->bss_le.dtim_period;
capability = le16_to_cpu(buf->bss_le.capability);
if (capability & IEEE80211_HT_STBC_PARAM_DUAL_CTS_PROT)
si->bss_param.flags |= BSS_PARAM_FLAGS_CTS_PROT;
if (capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
si->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_PREAMBLE;
if (capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)
si->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_SLOT_TIME;
}
| 0
|
212,341
|
void X509_STORE_CTX_set_time(X509_STORE_CTX *ctx, unsigned long flags,
time_t t)
{
X509_VERIFY_PARAM_set_time(ctx->param, t);
}
| 0
|
501,294
|
static size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf,
ZSTD_hufCTables_t* nextHuf,
ZSTD_strategy strategy, int disableLiteralCompression,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
void* workspace, size_t wkspSize,
const int bmi2)
{
size_t const minGain = ZSTD_minGain(srcSize, strategy);
size_t const lhSize = 3 + (srcSize >= 1 KB) + (srcSize >= 16 KB);
BYTE* const ostart = (BYTE*)dst;
U32 singleStream = srcSize < 256;
symbolEncodingType_e hType = set_compressed;
size_t cLitSize;
DEBUGLOG(5,"ZSTD_compressLiterals (disableLiteralCompression=%i)",
disableLiteralCompression);
/* Prepare nextEntropy assuming reusing the existing table */
memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
if (disableLiteralCompression)
return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
/* small ? don't even attempt compression (speed opt) */
# define COMPRESS_LITERALS_SIZE_MIN 63
{ size_t const minLitSize = (prevHuf->repeatMode == HUF_repeat_valid) ? 6 : COMPRESS_LITERALS_SIZE_MIN;
if (srcSize <= minLitSize) return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
}
if (dstCapacity < lhSize+1) return ERROR(dstSize_tooSmall); /* not enough space for compression */
{ HUF_repeat repeat = prevHuf->repeatMode;
int const preferRepeat = strategy < ZSTD_lazy ? srcSize <= 1024 : 0;
if (repeat == HUF_repeat_valid && lhSize == 3) singleStream = 1;
cLitSize = singleStream ? HUF_compress1X_repeat(ostart+lhSize, dstCapacity-lhSize, src, srcSize, 255, 11,
workspace, wkspSize, (HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2)
: HUF_compress4X_repeat(ostart+lhSize, dstCapacity-lhSize, src, srcSize, 255, 11,
workspace, wkspSize, (HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2);
if (repeat != HUF_repeat_none) {
/* reused the existing table */
hType = set_repeat;
}
}
if ((cLitSize==0) | (cLitSize >= srcSize - minGain) | ERR_isError(cLitSize)) {
memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
}
if (cLitSize==1) {
memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
return ZSTD_compressRleLiteralsBlock(dst, dstCapacity, src, srcSize);
}
if (hType == set_compressed) {
/* using a newly constructed table */
nextHuf->repeatMode = HUF_repeat_check;
}
/* Build header */
switch(lhSize)
{
case 3: /* 2 - 2 - 10 - 10 */
{ U32 const lhc = hType + ((!singleStream) << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<14);
MEM_writeLE24(ostart, lhc);
break;
}
case 4: /* 2 - 2 - 14 - 14 */
{ U32 const lhc = hType + (2 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<18);
MEM_writeLE32(ostart, lhc);
break;
}
case 5: /* 2 - 2 - 18 - 18 */
{ U32 const lhc = hType + (3 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<22);
MEM_writeLE32(ostart, lhc);
ostart[4] = (BYTE)(cLitSize >> 10);
break;
}
default: /* not possible : lhSize is {3,4,5} */
assert(0);
}
return lhSize+cLitSize;
}
| 0
|
72,246
|
static struct dentry *vfat_mount(struct file_system_type *fs_type,
int flags, const char *dev_name,
void *data)
{
return mount_bdev(fs_type, flags, dev_name, data, vfat_fill_super);
}
| 0
|
185,947
|
void SocketStream::set_addresses(const AddressList& addresses) {
addresses_ = addresses;
}
| 0
|
57,805
|
R_API ut64 r_bin_java_code_attr_calc_size(RBinJavaAttrInfo *attr) {
RListIter *iter;
// RListIter *iter_tmp;
ut64 size = 0;
if (attr) {
// attr = r_bin_java_default_attr_new (buffer, sz, buf_offset);
size += 6;
// attr->info.code_attr.max_stack = R_BIN_JAVA_USHORT (buffer, 0);
size += 2;
// attr->info.code_attr.max_locals = R_BIN_JAVA_USHORT (buffer, 2);
size += 2;
// attr->info.code_attr.code_length = R_BIN_JAVA_UINT (buffer, 4);
size += 2;
if (attr->info.code_attr.code) {
size += attr->info.code_attr.code_length;
}
// attr->info.code_attr.exception_table_length = R_BIN_JAVA_USHORT (buffer, offset);
size += 2;
// RBinJavaExceptionEntry *exc_entry;
// r_list_foreach_safe (attr->info.code_attr.exception_table, iter, iter_tmp, exc_entry) {
r_list_foreach_iter (attr->info.code_attr.exception_table, iter) {
// exc_entry->start_pc = R_BIN_JAVA_USHORT (buffer,offset);
size += 2;
// exc_entry->end_pc = R_BIN_JAVA_USHORT (buffer,offset);
size += 2;
// exc_entry->handler_pc = R_BIN_JAVA_USHORT (buffer,offset);
size += 2;
// exc_entry->catch_type = R_BIN_JAVA_USHORT (buffer, offset);
size += 2;
}
// attr->info.code_attr.attributes_count = R_BIN_JAVA_USHORT (buffer, offset);
size += 2;
// RBinJavaAttrInfo *_attr;
if (attr->info.code_attr.attributes_count > 0) {
// r_list_foreach_safe (attr->info.code_attr.attributes, iter, iter_tmp, _attr) {
r_list_foreach_iter (attr->info.code_attr.attributes, iter) {
size += r_bin_java_attr_calc_size (attr);
}
}
}
return size;
}
| 0
|
1,011
|
void mt_init ( mtrand * mt , uint32_t seed ) {
int i ;
mt -> mt_buffer_ [ 0 ] = seed ;
mt -> mt_index_ = MT_LEN ;
for ( i = 1 ;
i < MT_LEN ;
i ++ ) {
mt -> mt_buffer_ [ i ] = ( 1812433253UL * ( mt -> mt_buffer_ [ i - 1 ] ^ ( mt -> mt_buffer_ [ i - 1 ] >> 30 ) ) + i ) ;
}
}
| 1
|
155,419
|
MONGO_EXPORT void gridfile_writer_init( gridfile *gfile, gridfs *gfs,
const char *remote_name, const char *content_type ) {
gfile->gfs = gfs;
bson_oid_gen( &( gfile->id ) );
gfile->chunk_num = 0;
gfile->length = 0;
gfile->pending_len = 0;
gfile->pending_data = NULL;
gfile->remote_name = ( char * )bson_malloc( strlen( remote_name ) + 1 );
strcpy( ( char * )gfile->remote_name, remote_name );
gfile->content_type = ( char * )bson_malloc( strlen( content_type ) + 1 );
strcpy( ( char * )gfile->content_type, content_type );
}
| 0
|
338,652
|
static void rgb24_to_yuvj444p(AVPicture *dst, AVPicture *src,
int width, int height)
{
int src_wrap, x, y;
int r, g, b;
uint8_t *lum, *cb, *cr;
const uint8_t *p;
lum = dst->data[0];
cb = dst->data[1];
cr = dst->data[2];
src_wrap = src->linesize[0] - width * BPP;
p = src->data[0];
for(y=0;y<height;y++) {
for(x=0;x<width;x++) {
RGB_IN(r, g, b, p);
lum[0] = RGB_TO_Y(r, g, b);
cb[0] = RGB_TO_U(r, g, b, 0);
cr[0] = RGB_TO_V(r, g, b, 0);
cb++;
cr++;
lum++;
}
p += src_wrap;
lum += dst->linesize[0] - width;
cb += dst->linesize[1] - width;
cr += dst->linesize[2] - width;
}
}
| 0
|
173,233
|
bgp_attr_aggregate_intern (struct bgp *bgp, u_char origin,
struct aspath *aspath,
struct community *community, int as_set)
{
struct attr attr;
struct attr *new;
struct attr_extra *attre;
memset (&attr, 0, sizeof (struct attr));
attre = bgp_attr_extra_get (&attr);
/* Origin attribute. */
attr.origin = origin;
attr.flag |= ATTR_FLAG_BIT (BGP_ATTR_ORIGIN);
/* AS path attribute. */
if (aspath)
attr.aspath = aspath_intern (aspath);
else
attr.aspath = aspath_empty ();
attr.flag |= ATTR_FLAG_BIT (BGP_ATTR_AS_PATH);
/* Next hop attribute. */
attr.flag |= ATTR_FLAG_BIT (BGP_ATTR_NEXT_HOP);
if (community)
{
attr.community = community;
attr.flag |= ATTR_FLAG_BIT (BGP_ATTR_COMMUNITIES);
}
attre->weight = BGP_ATTR_DEFAULT_WEIGHT;
#ifdef HAVE_IPV6
attre->mp_nexthop_len = IPV6_MAX_BYTELEN;
#endif
if (! as_set)
attr.flag |= ATTR_FLAG_BIT (BGP_ATTR_ATOMIC_AGGREGATE);
attr.flag |= ATTR_FLAG_BIT (BGP_ATTR_AGGREGATOR);
if (CHECK_FLAG (bgp->config, BGP_CONFIG_CONFEDERATION))
attre->aggregator_as = bgp->confed_id;
else
attre->aggregator_as = bgp->as;
attre->aggregator_addr = bgp->router_id;
new = bgp_attr_intern (&attr);
bgp_attr_extra_free (&attr);
aspath_unintern (&new->aspath);
return new;
}
| 0
|
256,289
|
SPL_METHOD(DirectoryIterator, getFilename)
{
spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC);
if (zend_parse_parameters_none() == FAILURE) {
return;
}
RETURN_STRING(intern->u.dir.entry.d_name, 1);
}
| 1
|
494,462
|
start_unicast_message(struct neighbour *neigh, int type, int len)
{
if(unicast_neighbour) {
if(neigh != unicast_neighbour ||
unicast_buffered + len + 2 >=
MIN(UNICAST_BUFSIZE, babel_get_if_nfo(neigh->ifp)->bufsize))
flush_unicast(0);
}
if(!unicast_buffer)
unicast_buffer = malloc(UNICAST_BUFSIZE);
if(!unicast_buffer) {
flog_err(EC_BABEL_MEMORY, "malloc(unicast_buffer): %s",
safe_strerror(errno));
return -1;
}
unicast_neighbour = neigh;
unicast_buffer[unicast_buffered++] = type;
unicast_buffer[unicast_buffered++] = len;
return 1;
}
| 0
|
204,821
|
void RenderThreadImpl::CreateFrame(mojom::CreateFrameParamsPtr params) {
base::debug::SetCrashKeyValue("newframe_routing_id",
base::IntToString(params->routing_id));
base::debug::SetCrashKeyValue("newframe_proxy_id",
base::IntToString(params->proxy_routing_id));
base::debug::SetCrashKeyValue("newframe_opener_id",
base::IntToString(params->opener_routing_id));
base::debug::SetCrashKeyValue("newframe_parent_id",
base::IntToString(params->parent_routing_id));
base::debug::SetCrashKeyValue("newframe_widget_id",
base::IntToString(
params->widget_params->routing_id));
base::debug::SetCrashKeyValue("newframe_widget_hidden",
params->widget_params->hidden ? "yes" : "no");
base::debug::SetCrashKeyValue("newframe_replicated_origin",
params->replication_state.origin.Serialize());
CompositorDependencies* compositor_deps = this;
RenderFrameImpl::CreateFrame(
params->routing_id, std::move(params->interface_provider),
params->proxy_routing_id, params->opener_routing_id,
params->parent_routing_id, params->previous_sibling_routing_id,
params->devtools_frame_token, params->replication_state, compositor_deps,
*params->widget_params, params->frame_owner_properties);
}
| 0
|
380,679
|
varsearch(struct block *l, struct tbl **vpp, const char *vn, uint32_t h)
{
register struct tbl *vp;
if (l) {
varsearch_loop:
if ((vp = ktsearch(&l->vars, vn, h)) != NULL)
goto varsearch_out;
if (l->next != NULL) {
l = l->next;
goto varsearch_loop;
}
}
vp = NULL;
varsearch_out:
*vpp = vp;
return (l);
}
| 0
|
366,704
|
static int cap_xfrm_state_pol_flow_match(struct xfrm_state *x,
struct xfrm_policy *xp,
struct flowi *fl)
{
return 1;
}
| 0
|
389,069
|
static void read_final_goodbye(int f_in, int f_out)
{
int i, iflags, xlen;
uchar fnamecmp_type;
char xname[MAXPATHLEN];
shutting_down = True;
if (protocol_version < 29)
i = read_int(f_in);
else {
i = read_ndx_and_attrs(f_in, f_out, &iflags, &fnamecmp_type, xname, &xlen);
if (protocol_version >= 31 && i == NDX_DONE) {
if (am_sender)
write_ndx(f_out, NDX_DONE);
else {
if (batch_gen_fd >= 0) {
while (read_int(batch_gen_fd) != NDX_DEL_STATS) {}
read_del_stats(batch_gen_fd);
}
write_int(f_out, NDX_DONE);
}
i = read_ndx_and_attrs(f_in, f_out, &iflags, &fnamecmp_type, xname, &xlen);
}
}
if (i != NDX_DONE) {
rprintf(FERROR, "Invalid packet at end of run (%d) [%s]\n",
i, who_am_i());
exit_cleanup(RERR_PROTOCOL);
}
}
| 0
|
24,183
|
static bool check_for_exec ( const_os_ptr op ) {
if ( ! r_has_attr ( op , a_execute ) && ref_type_uses_access ( r_type ( op ) ) && ( r_has_attr ( op , a_executable ) || ! r_has_type ( op , t_dictionary ) ) ) {
return_error ( gs_error_invalidaccess ) ;
}
return 0 ;
}
| 0
|
137,177
|
int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct page *page = vmf->page;
loff_t size;
unsigned long len;
int ret = -EINVAL;
void *fsdata;
struct file *file = vma->vm_file;
struct inode *inode = file->f_path.dentry->d_inode;
struct address_space *mapping = inode->i_mapping;
/*
* Get i_alloc_sem to stop truncates messing with the inode. We cannot
* get i_mutex because we are already holding mmap_sem.
*/
down_read(&inode->i_alloc_sem);
size = i_size_read(inode);
if (page->mapping != mapping || size <= page_offset(page)
|| !PageUptodate(page)) {
/* page got truncated from under us? */
goto out_unlock;
}
ret = 0;
if (PageMappedToDisk(page))
goto out_unlock;
if (page->index == size >> PAGE_CACHE_SHIFT)
len = size & ~PAGE_CACHE_MASK;
else
len = PAGE_CACHE_SIZE;
lock_page(page);
/*
* return if we have all the buffers mapped. This avoid
* the need to call write_begin/write_end which does a
* journal_start/journal_stop which can block and take
* long time
*/
if (page_has_buffers(page)) {
if (!walk_page_buffers(NULL, page_buffers(page), 0, len, NULL,
ext4_bh_unmapped)) {
unlock_page(page);
goto out_unlock;
}
}
unlock_page(page);
/*
* OK, we need to fill the hole... Do write_begin write_end
* to do block allocation/reservation.We are not holding
* inode.i__mutex here. That allow * parallel write_begin,
* write_end call. lock_page prevent this from happening
* on the same page though
*/
ret = mapping->a_ops->write_begin(file, mapping, page_offset(page),
len, AOP_FLAG_UNINTERRUPTIBLE, &page, &fsdata);
if (ret < 0)
goto out_unlock;
ret = mapping->a_ops->write_end(file, mapping, page_offset(page),
len, len, page, fsdata);
if (ret < 0)
goto out_unlock;
ret = 0;
out_unlock:
if (ret)
ret = VM_FAULT_SIGBUS;
up_read(&inode->i_alloc_sem);
return ret;
}
| 0
|
497,744
|
bool Binary::remove(LOAD_COMMAND_TYPES type) {
bool removed = false;
while (LoadCommand* cmd = get(type)) {
removed = remove(*cmd);
}
return removed;
}
| 0
|
104,364
|
static u32 netif_receive_generic_xdp(struct sk_buff *skb,
struct bpf_prog *xdp_prog)
{
struct xdp_buff xdp;
u32 act = XDP_DROP;
void *orig_data;
int hlen, off;
u32 mac_len;
/* Reinjected packets coming from act_mirred or similar should
* not get XDP generic processing.
*/
if (skb_cloned(skb))
return XDP_PASS;
if (skb_linearize(skb))
goto do_drop;
/* The XDP program wants to see the packet starting at the MAC
* header.
*/
mac_len = skb->data - skb_mac_header(skb);
hlen = skb_headlen(skb) + mac_len;
xdp.data = skb->data - mac_len;
xdp.data_end = xdp.data + hlen;
xdp.data_hard_start = skb->data - skb_headroom(skb);
orig_data = xdp.data;
act = bpf_prog_run_xdp(xdp_prog, &xdp);
off = xdp.data - orig_data;
if (off > 0)
__skb_pull(skb, off);
else if (off < 0)
__skb_push(skb, -off);
skb->mac_header += off;
switch (act) {
case XDP_REDIRECT:
case XDP_TX:
__skb_push(skb, mac_len);
/* fall through */
case XDP_PASS:
break;
default:
bpf_warn_invalid_xdp_action(act);
/* fall through */
case XDP_ABORTED:
trace_xdp_exception(skb->dev, xdp_prog, act);
/* fall through */
case XDP_DROP:
do_drop:
kfree_skb(skb);
break;
}
return act;
| 0
|
152,486
|
void JavascriptArray::ArraySpliceHelper(JavascriptArray* pnewArr, JavascriptArray* pArr, uint32 start, uint32 deleteLen, Var* insertArgs, uint32 insertLen, ScriptContext *scriptContext)
{
// Skip pnewArr->EnsureHead(): we don't use existing segment at all.
Recycler *recycler = scriptContext->GetRecycler();
SparseArraySegmentBase** prevSeg = &pArr->head; // holds the next pointer of previous
SparseArraySegmentBase** prevPrevSeg = &pArr->head; // this holds the previous pointer to prevSeg dirty trick.
SparseArraySegmentBase* savePrev = nullptr;
Assert(pArr->head); // We should never have a null head.
pArr->EnsureHead<T>();
SparseArraySegment<T>* startSeg = (SparseArraySegment<T>*)pArr->head;
const uint32 limit = start + deleteLen;
uint32 rightLimit;
if (UInt32Math::Add(startSeg->left, startSeg->size, &rightLimit))
{
rightLimit = JavascriptArray::MaxArrayLength;
}
// Find out the segment to start delete
while (startSeg && (rightLimit <= start))
{
savePrev = startSeg;
prevPrevSeg = prevSeg;
prevSeg = &startSeg->next;
startSeg = (SparseArraySegment<T>*)startSeg->next;
if (startSeg)
{
if (UInt32Math::Add(startSeg->left, startSeg->size, &rightLimit))
{
rightLimit = JavascriptArray::MaxArrayLength;
}
}
}
// handle inlined segment
SparseArraySegmentBase* inlineHeadSegment = nullptr;
bool hasInlineSegment = false;
// The following if else set is used to determine whether a shallow or hard copy is needed
if (JavascriptNativeArray::Is(pArr))
{
if (JavascriptNativeFloatArray::Is(pArr))
{
inlineHeadSegment = DetermineInlineHeadSegmentPointer<JavascriptNativeFloatArray, 0, true>((JavascriptNativeFloatArray*)pArr);
}
else if (JavascriptNativeIntArray::Is(pArr))
{
inlineHeadSegment = DetermineInlineHeadSegmentPointer<JavascriptNativeIntArray, 0, true>((JavascriptNativeIntArray*)pArr);
}
Assert(inlineHeadSegment);
hasInlineSegment = (startSeg == (SparseArraySegment<T>*)inlineHeadSegment);
}
else
{
// This will result in false positives. It is used because DetermineInlineHeadSegmentPointer
// does not handle Arrays that change type e.g. from JavascriptNativeIntArray to JavascriptArray
// This conversion in particular is problematic because JavascriptNativeIntArray is larger than JavascriptArray
// so the returned head segment ptr never equals pArr->head. So we will default to using this and deal with
// false positives. It is better than always doing a hard copy.
hasInlineSegment = HasInlineHeadSegment(pArr->head->length);
}
if (startSeg)
{
// Delete Phase
if (startSeg->left <= start && (startSeg->left + startSeg->length) >= limit)
{
// All splice happens in one segment.
SparseArraySegmentBase *nextSeg = startSeg->next;
// Splice the segment first, which might OOM throw but the array would be intact.
JavascriptArray::ArraySegmentSpliceHelper(pnewArr, (SparseArraySegment<T>*)startSeg, (SparseArraySegment<T>**)prevSeg, start, deleteLen, insertArgs, insertLen, recycler);
while (nextSeg)
{
// adjust next segments left
nextSeg->left = nextSeg->left - deleteLen + insertLen;
if (nextSeg->next == nullptr)
{
nextSeg->EnsureSizeInBound();
}
nextSeg = nextSeg->next;
}
if (*prevSeg)
{
(*prevSeg)->EnsureSizeInBound();
}
return;
}
else
{
SparseArraySegment<T>* newHeadSeg = nullptr; // pnewArr->head is null
SparseArraySegmentBase** prevNewHeadSeg = &(pnewArr->head);
// delete till deleteLen and reuse segments for new array if it is possible.
// 3 steps -
//1. delete 1st segment (which may be partial delete)
// 2. delete next n complete segments
// 3. delete last segment (which again may be partial delete)
// Step (1) -- WOOB 1116297: When left >= start, step (1) is skipped, resulting in pNewArr->head->left != 0. We need to touch up pNewArr.
if (startSeg->left < start)
{
if (start < startSeg->left + startSeg->length)
{
uint32 headDeleteLen = startSeg->left + startSeg->length - start;
if (startSeg->next)
{
// We know the new segment will have a next segment, so allocate it as non-leaf.
newHeadSeg = SparseArraySegment<T>::template AllocateSegmentImpl<false>(recycler, 0, headDeleteLen, headDeleteLen, nullptr);
}
else
{
newHeadSeg = SparseArraySegment<T>::AllocateSegment(recycler, 0, headDeleteLen, headDeleteLen, nullptr);
}
newHeadSeg = SparseArraySegment<T>::CopySegment(recycler, newHeadSeg, 0, startSeg, start, headDeleteLen);
newHeadSeg->next = nullptr;
*prevNewHeadSeg = newHeadSeg;
prevNewHeadSeg = &newHeadSeg->next;
startSeg->Truncate(start);
}
savePrev = startSeg;
prevPrevSeg = prevSeg;
prevSeg = &startSeg->next;
startSeg = (SparseArraySegment<T>*)startSeg->next;
}
// Step (2) first we should do a hard copy if we have an inline head Segment
else if (hasInlineSegment && nullptr != startSeg)
{
// start should be in between left and left + length
if (startSeg->left <= start && start < startSeg->left + startSeg->length)
{
uint32 headDeleteLen = startSeg->left + startSeg->length - start;
if (startSeg->next)
{
// We know the new segment will have a next segment, so allocate it as non-leaf.
newHeadSeg = SparseArraySegment<T>::template AllocateSegmentImpl<false>(recycler, 0, headDeleteLen, headDeleteLen, nullptr);
}
else
{
newHeadSeg = SparseArraySegment<T>::AllocateSegment(recycler, 0, headDeleteLen, headDeleteLen, nullptr);
}
newHeadSeg = SparseArraySegment<T>::CopySegment(recycler, newHeadSeg, 0, startSeg, start, headDeleteLen);
*prevNewHeadSeg = newHeadSeg;
prevNewHeadSeg = &newHeadSeg->next;
// Remove the entire segment from the original array
*prevSeg = startSeg->next;
startSeg = (SparseArraySegment<T>*)startSeg->next;
}
// if we have an inline head segment with 0 elements, remove it
else if (startSeg->left == 0 && startSeg->length == 0)
{
Assert(startSeg->size != 0);
*prevSeg = startSeg->next;
startSeg = (SparseArraySegment<T>*)startSeg->next;
}
}
// Step (2) proper
SparseArraySegmentBase *temp = nullptr;
while (startSeg && (startSeg->left + startSeg->length) <= limit)
{
temp = startSeg->next;
// move that entire segment to new array
startSeg->left = startSeg->left - start;
startSeg->next = nullptr;
*prevNewHeadSeg = startSeg;
prevNewHeadSeg = &startSeg->next;
// Remove the entire segment from the original array
*prevSeg = temp;
startSeg = (SparseArraySegment<T>*)temp;
}
// Step(2) above could delete the original head segment entirely, causing current head not
// starting from 0. Then if any of the following throw, we have a corrupted array. Need
// protection here.
bool dummyHeadNodeInserted = false;
if (!savePrev && (!startSeg || startSeg->left != 0))
{
Assert(pArr->head == startSeg);
pArr->EnsureHeadStartsFromZero<T>(recycler);
Assert(pArr->head && pArr->head->next == startSeg);
savePrev = pArr->head;
prevPrevSeg = prevSeg;
prevSeg = &pArr->head->next;
dummyHeadNodeInserted = true;
}
// Step (3)
if (startSeg && (startSeg->left < limit))
{
// copy the first part of the last segment to be deleted to new array
uint32 headDeleteLen = start + deleteLen - startSeg->left ;
newHeadSeg = SparseArraySegment<T>::AllocateSegment(recycler, startSeg->left - start, headDeleteLen, (SparseArraySegmentBase *)nullptr);
newHeadSeg = SparseArraySegment<T>::CopySegment(recycler, newHeadSeg, startSeg->left - start, startSeg, startSeg->left, headDeleteLen);
newHeadSeg->next = nullptr;
*prevNewHeadSeg = newHeadSeg;
prevNewHeadSeg = &newHeadSeg->next;
// move the last segment
memmove(startSeg->elements, startSeg->elements + headDeleteLen, sizeof(T) * (startSeg->length - headDeleteLen));
startSeg->left = startSeg->left + headDeleteLen; // We are moving the left ahead to point to the right index
startSeg->length = startSeg->length - headDeleteLen;
startSeg->Truncate(startSeg->left + startSeg->length);
startSeg->EnsureSizeInBound(); // Just truncated, size might exceed next.left
}
if (startSeg && ((startSeg->left - deleteLen + insertLen) == 0) && dummyHeadNodeInserted)
{
Assert(start + insertLen == 0);
// Remove the dummy head node to preserve array consistency.
pArr->head = startSeg;
savePrev = nullptr;
prevSeg = &pArr->head;
}
while (startSeg)
{
startSeg->left = startSeg->left - deleteLen + insertLen ;
if (startSeg->next == nullptr)
{
startSeg->EnsureSizeInBound();
}
startSeg = (SparseArraySegment<T>*)startSeg->next;
}
}
}
// The size of pnewArr head allocated in above step 1 might exceed next.left concatenated in step 2/3.
pnewArr->head->EnsureSizeInBound();
if (savePrev)
{
savePrev->EnsureSizeInBound();
}
// insert elements
if (insertLen > 0)
{
Assert(!JavascriptNativeIntArray::Is(pArr) && !JavascriptNativeFloatArray::Is(pArr));
// InsertPhase
SparseArraySegment<T> *segInsert = nullptr;
// see if we are just about the right of the previous segment
Assert(!savePrev || savePrev->left <= start);
if (savePrev && (start - savePrev->left < savePrev->size))
{
segInsert = (SparseArraySegment<T>*)savePrev;
uint32 spaceLeft = segInsert->size - (start - segInsert->left);
if(spaceLeft < insertLen)
{
if (!segInsert->next)
{
segInsert = segInsert->GrowByMin(recycler, insertLen - spaceLeft);
}
else
{
segInsert = segInsert->GrowByMinMax(recycler, insertLen - spaceLeft, segInsert->next->left - segInsert->left - segInsert->size);
}
}
*prevPrevSeg = segInsert;
segInsert->length = start + insertLen - segInsert->left;
}
else
{
segInsert = SparseArraySegment<T>::AllocateSegment(recycler, start, insertLen, *prevSeg);
segInsert->next = *prevSeg;
*prevSeg = segInsert;
savePrev = segInsert;
}
uint32 relativeStart = start - segInsert->left;
// inserted elements starts at argument 3 of splice(start, deleteNumber, insertelem1, insertelem2, insertelem3, ...);
js_memcpy_s(segInsert->elements + relativeStart, sizeof(T) * insertLen, insertArgs, sizeof(T) * insertLen);
}
}
| 0
|
337,847
|
static BlockDriver *find_protocol(const char *filename)
{
BlockDriver *drv1;
char protocol[128];
int len;
const char *p;
#ifdef _WIN32
if (is_windows_drive(filename) ||
is_windows_drive_prefix(filename))
return bdrv_find_format("raw");
#endif
p = strchr(filename, ':');
if (!p)
return bdrv_find_format("raw");
len = p - filename;
if (len > sizeof(protocol) - 1)
len = sizeof(protocol) - 1;
memcpy(protocol, filename, len);
protocol[len] = '\0';
QLIST_FOREACH(drv1, &bdrv_drivers, list) {
if (drv1->protocol_name &&
!strcmp(drv1->protocol_name, protocol)) {
return drv1;
}
}
return NULL;
}
| 0
|
454,410
|
const char* ExpressionLn::getOpName() const {
return "$ln";
}
| 0
|
29,175
|
static int selinux_is_sblabel_mnt ( struct super_block * sb ) {
struct superblock_security_struct * sbsec = sb -> s_security ;
return sbsec -> behavior == SECURITY_FS_USE_XATTR || sbsec -> behavior == SECURITY_FS_USE_TRANS || sbsec -> behavior == SECURITY_FS_USE_TASK || sbsec -> behavior == SECURITY_FS_USE_NATIVE || ! strcmp ( sb -> s_type -> name , "sysfs" ) || ! strcmp ( sb -> s_type -> name , "pstore" ) || ! strcmp ( sb -> s_type -> name , "debugfs" ) || ! strcmp ( sb -> s_type -> name , "rootfs" ) ;
}
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.