idx
int64 | func
string | target
int64 |
|---|---|---|
293,031
|
static struct user_ta_elf *find_ta_elf(const TEE_UUID *uuid,
struct user_ta_ctx *utc)
{
struct user_ta_elf *elf;
TAILQ_FOREACH(elf, &utc->elfs, link)
if (!memcmp(&elf->uuid, uuid, sizeof(*uuid)))
return elf;
return NULL;
}
| 0
|
494,496
|
static CURLcode gzip_unencode_write(struct Curl_easy *data,
struct contenc_writer *writer,
const char *buf, size_t nbytes)
{
struct zlib_params *zp = (struct zlib_params *) &writer->params;
z_stream *z = &zp->z; /* zlib state structure */
if(zp->zlib_init == ZLIB_INIT_GZIP) {
/* Let zlib handle the gzip decompression entirely */
z->next_in = (Bytef *) buf;
z->avail_in = (uInt) nbytes;
/* Now uncompress the data */
return inflate_stream(data, writer, ZLIB_INIT_GZIP);
}
#ifndef OLD_ZLIB_SUPPORT
/* Support for old zlib versions is compiled away and we are running with
an old version, so return an error. */
return exit_zlib(data, z, &zp->zlib_init, CURLE_WRITE_ERROR);
#else
/* This next mess is to get around the potential case where there isn't
* enough data passed in to skip over the gzip header. If that happens, we
* malloc a block and copy what we have then wait for the next call. If
* there still isn't enough (this is definitely a worst-case scenario), we
* make the block bigger, copy the next part in and keep waiting.
*
* This is only required with zlib versions < 1.2.0.4 as newer versions
* can handle the gzip header themselves.
*/
switch(zp->zlib_init) {
/* Skip over gzip header? */
case ZLIB_INIT:
{
/* Initial call state */
ssize_t hlen;
switch(check_gzip_header((unsigned char *) buf, nbytes, &hlen)) {
case GZIP_OK:
z->next_in = (Bytef *) buf + hlen;
z->avail_in = (uInt) (nbytes - hlen);
zp->zlib_init = ZLIB_GZIP_INFLATING; /* Inflating stream state */
break;
case GZIP_UNDERFLOW:
/* We need more data so we can find the end of the gzip header. It's
* possible that the memory block we malloc here will never be freed if
* the transfer abruptly aborts after this point. Since it's unlikely
* that circumstances will be right for this code path to be followed in
* the first place, and it's even more unlikely for a transfer to fail
* immediately afterwards, it should seldom be a problem.
*/
z->avail_in = (uInt) nbytes;
z->next_in = malloc(z->avail_in);
if(!z->next_in) {
return exit_zlib(data, z, &zp->zlib_init, CURLE_OUT_OF_MEMORY);
}
memcpy(z->next_in, buf, z->avail_in);
zp->zlib_init = ZLIB_GZIP_HEADER; /* Need more gzip header data state */
/* We don't have any data to inflate yet */
return CURLE_OK;
case GZIP_BAD:
default:
return exit_zlib(data, z, &zp->zlib_init, process_zlib_error(data, z));
}
}
break;
case ZLIB_GZIP_HEADER:
{
/* Need more gzip header data state */
ssize_t hlen;
z->avail_in += (uInt) nbytes;
z->next_in = Curl_saferealloc(z->next_in, z->avail_in);
if(!z->next_in) {
return exit_zlib(data, z, &zp->zlib_init, CURLE_OUT_OF_MEMORY);
}
/* Append the new block of data to the previous one */
memcpy(z->next_in + z->avail_in - nbytes, buf, nbytes);
switch(check_gzip_header(z->next_in, z->avail_in, &hlen)) {
case GZIP_OK:
/* This is the zlib stream data */
free(z->next_in);
/* Don't point into the malloced block since we just freed it */
z->next_in = (Bytef *) buf + hlen + nbytes - z->avail_in;
z->avail_in = (uInt) (z->avail_in - hlen);
zp->zlib_init = ZLIB_GZIP_INFLATING; /* Inflating stream state */
break;
case GZIP_UNDERFLOW:
/* We still don't have any data to inflate! */
return CURLE_OK;
case GZIP_BAD:
default:
return exit_zlib(data, z, &zp->zlib_init, process_zlib_error(data, z));
}
}
break;
case ZLIB_EXTERNAL_TRAILER:
z->next_in = (Bytef *) buf;
z->avail_in = (uInt) nbytes;
return process_trailer(data, zp);
case ZLIB_GZIP_INFLATING:
default:
/* Inflating stream state */
z->next_in = (Bytef *) buf;
z->avail_in = (uInt) nbytes;
break;
}
if(z->avail_in == 0) {
/* We don't have any data to inflate; wait until next time */
return CURLE_OK;
}
/* We've parsed the header, now uncompress the data */
return inflate_stream(data, writer, ZLIB_GZIP_INFLATING);
#endif
}
| 0
|
271,893
|
Status DepthwiseConv2DNativeShapeImpl(shape_inference::InferenceContext* c,
bool supports_explicit_padding) {
ShapeHandle input_shape;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 4, &input_shape));
ShapeHandle filter_shape;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 4, &filter_shape));
std::vector<int32> strides;
TF_RETURN_IF_ERROR(c->GetAttr("strides", &strides));
if (strides.size() != 4) {
return errors::InvalidArgument(
"DepthwiseConv2D requires the stride attribute to contain 4 values, "
"but got: ",
strides.size());
}
std::vector<int32> dilations;
if (!c->GetAttr("dilations", &dilations).ok()) {
dilations.resize(4, 1);
}
if (dilations.size() != 4) {
return errors::InvalidArgument(
"DepthwiseConv2D requires the dilations attribute to contain 4 values, "
"but got: ",
dilations.size());
}
string data_format_str;
Status s = c->GetAttr("data_format", &data_format_str);
TensorFormat data_format;
if (!s.ok() || !FormatFromString(data_format_str, &data_format)) {
data_format = FORMAT_NHWC;
}
int32_t stride_rows;
int32_t stride_cols;
int32_t dilation_rows;
int32_t dilation_cols;
if (data_format == FORMAT_NCHW) {
// Canonicalize input shape to NHWC so the shape inference code below can
// process it.
input_shape =
c->MakeShape({{c->Dim(input_shape, 0), c->Dim(input_shape, 2),
c->Dim(input_shape, 3), c->Dim(input_shape, 1)}});
stride_rows = strides[2];
stride_cols = strides[3];
dilation_rows = dilations[2];
dilation_cols = dilations[3];
} else {
stride_rows = strides[1];
stride_cols = strides[2];
dilation_rows = dilations[1];
dilation_cols = dilations[2];
}
DimensionHandle batch_size_dim = c->Dim(input_shape, 0);
DimensionHandle in_rows_dim = c->Dim(input_shape, 1);
DimensionHandle in_cols_dim = c->Dim(input_shape, 2);
DimensionHandle filter_rows_dim = c->Dim(filter_shape, 0);
DimensionHandle filter_cols_dim = c->Dim(filter_shape, 1);
DimensionHandle input_depth = c->Dim(filter_shape, 2);
DimensionHandle depth_multiplier = c->Dim(filter_shape, 3);
// Check that the input depths are compatible.
TF_RETURN_IF_ERROR(
c->Merge(c->Dim(input_shape, 3), input_depth, &input_depth));
DimensionHandle output_depth;
TF_RETURN_IF_ERROR(c->Multiply(input_depth, depth_multiplier, &output_depth));
Padding padding;
TF_RETURN_IF_ERROR(c->GetAttr("padding", &padding));
std::vector<int64_t> explicit_paddings;
if (supports_explicit_padding) {
Status status = c->GetAttr("explicit_paddings", &explicit_paddings);
// Use the default value, which is an empty list, if the attribute is not
// found. Otherwise return the error to the caller.
if (!status.ok() && !errors::IsNotFound(status)) {
return status;
}
TF_RETURN_IF_ERROR(CheckValidPadding(padding, explicit_paddings,
/*num_dims=*/4, data_format));
} else {
DCHECK(padding != Padding::EXPLICIT);
}
// TODO(mrry,shlens): Raise an error if the stride would cause
// information in the input to be ignored. This will require a change
// in the kernel implementation.
DimensionHandle output_rows, output_cols;
int64_t pad_rows_before = -1, pad_rows_after = -1;
int64_t pad_cols_before = -1, pad_cols_after = -1;
if (padding == Padding::EXPLICIT) {
GetExplicitPaddingForDim(explicit_paddings, data_format, 'H',
&pad_rows_before, &pad_rows_after);
GetExplicitPaddingForDim(explicit_paddings, data_format, 'W',
&pad_cols_before, &pad_cols_after);
}
TF_RETURN_IF_ERROR(GetWindowedOutputSizeFromDimsV2(
c, in_rows_dim, filter_rows_dim, dilation_rows, stride_rows, padding,
pad_rows_before, pad_rows_after, &output_rows));
TF_RETURN_IF_ERROR(GetWindowedOutputSizeFromDimsV2(
c, in_cols_dim, filter_cols_dim, dilation_cols, stride_cols, padding,
pad_cols_before, pad_cols_after, &output_cols));
ShapeHandle output_shape;
if (data_format == FORMAT_NCHW) {
output_shape =
c->MakeShape({batch_size_dim, output_depth, output_rows, output_cols});
} else {
output_shape =
c->MakeShape({batch_size_dim, output_rows, output_cols, output_depth});
}
c->set_output(0, output_shape);
return Status::OK();
}
| 0
|
516,535
|
close_mysql_tables(THD *thd)
{
if (! thd->in_sub_stmt)
trans_commit_stmt(thd);
close_thread_tables(thd);
thd->release_transactional_locks();
}
| 0
|
262,454
|
R_API RBinJavaAttrInfo *r_bin_java_line_number_table_attr_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz, ut64 buf_offset) {
ut32 i = 0;
ut64 curpos, offset = 0;
RBinJavaLineNumberAttribute *lnattr;
if (sz < 6) {
return NULL;
}
RBinJavaAttrInfo *attr = r_bin_java_default_attr_new (bin, buffer, sz, buf_offset);
if (!attr) {
return NULL;
}
offset += 6;
attr->type = R_BIN_JAVA_ATTR_TYPE_LINE_NUMBER_TABLE_ATTR;
attr->info.line_number_table_attr.line_number_table_length = R_BIN_JAVA_USHORT (buffer, offset);
offset += 2;
attr->info.line_number_table_attr.line_number_table = r_list_newf (free);
ut32 linenum_len = attr->info.line_number_table_attr.line_number_table_length;
RList *linenum_list = attr->info.line_number_table_attr.line_number_table;
for (i = 0; i < linenum_len; i++) {
curpos = buf_offset + offset;
// eprintf ("%"PFMT64x" %"PFMT64x"\n", curpos, sz);
// XXX if (curpos + 8 >= sz) break;
lnattr = R_NEW0 (RBinJavaLineNumberAttribute);
if (!lnattr) {
break;
}
// wtf it works
if (offset - 2 > sz) {
R_FREE (lnattr);
break;
}
lnattr->start_pc = R_BIN_JAVA_USHORT (buffer, offset);
offset += 2;
lnattr->line_number = R_BIN_JAVA_USHORT (buffer, offset);
offset += 2;
lnattr->file_offset = curpos;
lnattr->size = 4;
r_list_append (linenum_list, lnattr);
}
attr->size = offset;
return attr;
}
| 0
|
245,832
|
DOMTokenList* HTMLIFrameElement::sandbox() const {
return sandbox_.Get();
}
| 0
|
382,351
|
StreamConnection(pgsocket server_fd, Port *port)
{
/* accept connection and fill in the client (remote) address */
port->raddr.salen = sizeof(port->raddr.addr);
if ((port->sock = accept(server_fd,
(struct sockaddr *) & port->raddr.addr,
&port->raddr.salen)) == PGINVALID_SOCKET)
{
ereport(LOG,
(errcode_for_socket_access(),
errmsg("could not accept new connection: %m")));
/*
* If accept() fails then postmaster.c will still see the server
* socket as read-ready, and will immediately try again. To avoid
* uselessly sucking lots of CPU, delay a bit before trying again.
* (The most likely reason for failure is being out of kernel file
* table slots; we can do little except hope some will get freed up.)
*/
pg_usleep(100000L); /* wait 0.1 sec */
return STATUS_ERROR;
}
#ifdef SCO_ACCEPT_BUG
/*
* UnixWare 7+ and OpenServer 5.0.4 are known to have this bug, but it
* shouldn't hurt to catch it for all versions of those platforms.
*/
if (port->raddr.addr.ss_family == 0)
port->raddr.addr.ss_family = AF_UNIX;
#endif
/* fill in the server (local) address */
port->laddr.salen = sizeof(port->laddr.addr);
if (getsockname(port->sock,
(struct sockaddr *) & port->laddr.addr,
&port->laddr.salen) < 0)
{
elog(LOG, "getsockname() failed: %m");
return STATUS_ERROR;
}
/* select NODELAY and KEEPALIVE options if it's a TCP connection */
if (!IS_AF_UNIX(port->laddr.addr.ss_family))
{
int on;
#ifdef TCP_NODELAY
on = 1;
if (setsockopt(port->sock, IPPROTO_TCP, TCP_NODELAY,
(char *) &on, sizeof(on)) < 0)
{
elog(LOG, "setsockopt(TCP_NODELAY) failed: %m");
return STATUS_ERROR;
}
#endif
on = 1;
if (setsockopt(port->sock, SOL_SOCKET, SO_KEEPALIVE,
(char *) &on, sizeof(on)) < 0)
{
elog(LOG, "setsockopt(SO_KEEPALIVE) failed: %m");
return STATUS_ERROR;
}
#ifdef WIN32
/*
* This is a Win32 socket optimization. The ideal size is 32k.
* http://support.microsoft.com/kb/823764/EN-US/
*/
on = PQ_SEND_BUFFER_SIZE * 4;
if (setsockopt(port->sock, SOL_SOCKET, SO_SNDBUF, (char *) &on,
sizeof(on)) < 0)
{
elog(LOG, "setsockopt(SO_SNDBUF) failed: %m");
return STATUS_ERROR;
}
#endif
/*
* Also apply the current keepalive parameters. If we fail to set a
* parameter, don't error out, because these aren't universally
* supported. (Note: you might think we need to reset the GUC
* variables to 0 in such a case, but it's not necessary because the
* show hooks for these variables report the truth anyway.)
*/
(void) pq_setkeepalivesidle(tcp_keepalives_idle, port);
(void) pq_setkeepalivesinterval(tcp_keepalives_interval, port);
(void) pq_setkeepalivescount(tcp_keepalives_count, port);
}
return STATUS_OK;
}
| 0
|
452,845
|
WandExport PixelIterator *NewPixelIterator(MagickWand *wand)
{
const char
*quantum;
ExceptionInfo
*exception;
Image
*image;
PixelIterator
*iterator;
size_t
depth;
CacheView
*view;
depth=MAGICKCORE_QUANTUM_DEPTH;
quantum=GetMagickQuantumDepth(&depth);
if (depth != MAGICKCORE_QUANTUM_DEPTH)
ThrowWandFatalException(WandError,"QuantumDepthMismatch",quantum);
assert(wand != (MagickWand *) NULL);
image=GetImageFromMagickWand(wand);
if (image == (Image *) NULL)
return((PixelIterator *) NULL);
exception=AcquireExceptionInfo();
view=AcquireVirtualCacheView(image,exception);
if (view == (CacheView *) NULL)
return((PixelIterator *) NULL);
iterator=(PixelIterator *) AcquireMagickMemory(sizeof(*iterator));
if (iterator == (PixelIterator *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
GetExceptionMessage(errno));
(void) memset(iterator,0,sizeof(*iterator));
iterator->id=AcquireWandId();
(void) FormatLocaleString(iterator->name,MagickPathExtent,"%s-%.20g",
PixelIteratorId,(double) iterator->id);
iterator->exception=exception;
iterator->view=view;
SetGeometry(image,&iterator->region);
iterator->region.width=image->columns;
iterator->region.height=image->rows;
iterator->region.x=0;
iterator->region.y=0;
iterator->pixel_wands=NewPixelWands(iterator->region.width);
iterator->y=0;
iterator->debug=IsEventLogging();
if (iterator->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",iterator->name);
iterator->signature=MagickWandSignature;
return(iterator);
}
| 0
|
23,935
|
static void flush_packet ( AVFormatContext * s ) {
ASFContext * asf = s -> priv_data ;
int packet_hdr_size , packet_filled_size ;
av_assert0 ( asf -> packet_timestamp_end >= asf -> packet_timestamp_start ) ;
if ( asf -> is_streamed ) put_chunk ( s , 0x4424 , s -> packet_size , 0 ) ;
packet_hdr_size = put_payload_parsing_info ( s , asf -> packet_timestamp_start , asf -> packet_timestamp_end - asf -> packet_timestamp_start , asf -> packet_nb_payloads , asf -> packet_size_left ) ;
packet_filled_size = PACKET_SIZE - asf -> packet_size_left ;
av_assert0 ( packet_hdr_size <= asf -> packet_size_left ) ;
memset ( asf -> packet_buf + packet_filled_size , 0 , asf -> packet_size_left ) ;
avio_write ( s -> pb , asf -> packet_buf , s -> packet_size - packet_hdr_size ) ;
avio_flush ( s -> pb ) ;
asf -> nb_packets ++ ;
asf -> packet_nb_payloads = 0 ;
asf -> packet_timestamp_start = - 1 ;
asf -> packet_timestamp_end = - 1 ;
ffio_init_context ( & asf -> pb , asf -> packet_buf , s -> packet_size , 1 , NULL , NULL , NULL , NULL ) ;
}
| 0
|
393,100
|
xmlSchemaBucketCreate(xmlSchemaParserCtxtPtr pctxt,
int type, const xmlChar *targetNamespace)
{
xmlSchemaBucketPtr ret;
int size;
xmlSchemaPtr mainSchema;
if (WXS_CONSTRUCTOR(pctxt)->mainSchema == NULL) {
PERROR_INT("xmlSchemaBucketCreate",
"no main schema on constructor");
return(NULL);
}
mainSchema = WXS_CONSTRUCTOR(pctxt)->mainSchema;
/* Create the schema bucket. */
if (WXS_IS_BUCKET_INCREDEF(type))
size = sizeof(xmlSchemaInclude);
else
size = sizeof(xmlSchemaImport);
ret = (xmlSchemaBucketPtr) xmlMalloc(size);
if (ret == NULL) {
xmlSchemaPErrMemory(NULL, "allocating schema bucket", NULL);
return(NULL);
}
memset(ret, 0, size);
ret->targetNamespace = targetNamespace;
ret->type = type;
ret->globals = xmlSchemaItemListCreate();
if (ret->globals == NULL) {
xmlFree(ret);
return(NULL);
}
ret->locals = xmlSchemaItemListCreate();
if (ret->locals == NULL) {
xmlFree(ret);
return(NULL);
}
/*
* The following will assure that only the first bucket is marked as
* XML_SCHEMA_SCHEMA_MAIN and it points to the *main* schema.
* For each following import buckets an xmlSchema will be created.
* An xmlSchema will be created for every distinct targetNamespace.
* We assign the targetNamespace to the schemata here.
*/
if (! WXS_HAS_BUCKETS(pctxt)) {
if (WXS_IS_BUCKET_INCREDEF(type)) {
PERROR_INT("xmlSchemaBucketCreate",
"first bucket but it's an include or redefine");
xmlSchemaBucketFree(ret);
return(NULL);
}
/* Force the type to be XML_SCHEMA_SCHEMA_MAIN. */
ret->type = XML_SCHEMA_SCHEMA_MAIN;
/* Point to the *main* schema. */
WXS_CONSTRUCTOR(pctxt)->mainBucket = ret;
WXS_IMPBUCKET(ret)->schema = mainSchema;
/*
* Ensure that the main schema gets a targetNamespace.
*/
mainSchema->targetNamespace = targetNamespace;
} else {
if (type == XML_SCHEMA_SCHEMA_MAIN) {
PERROR_INT("xmlSchemaBucketCreate",
"main bucket but it's not the first one");
xmlSchemaBucketFree(ret);
return(NULL);
} else if (type == XML_SCHEMA_SCHEMA_IMPORT) {
/*
* Create a schema for imports and assign the
* targetNamespace.
*/
WXS_IMPBUCKET(ret)->schema = xmlSchemaNewSchema(pctxt);
if (WXS_IMPBUCKET(ret)->schema == NULL) {
xmlSchemaBucketFree(ret);
return(NULL);
}
WXS_IMPBUCKET(ret)->schema->targetNamespace = targetNamespace;
}
}
if (WXS_IS_BUCKET_IMPMAIN(type)) {
int res;
/*
* Imports go into the "schemasImports" slot of the main *schema*.
* Note that we create an import entry for the main schema as well; i.e.,
* even if there's only one schema, we'll get an import.
*/
if (mainSchema->schemasImports == NULL) {
mainSchema->schemasImports = xmlHashCreateDict(5,
WXS_CONSTRUCTOR(pctxt)->dict);
if (mainSchema->schemasImports == NULL) {
xmlSchemaBucketFree(ret);
return(NULL);
}
}
if (targetNamespace == NULL)
res = xmlHashAddEntry(mainSchema->schemasImports,
XML_SCHEMAS_NO_NAMESPACE, ret);
else
res = xmlHashAddEntry(mainSchema->schemasImports,
targetNamespace, ret);
if (res != 0) {
PERROR_INT("xmlSchemaBucketCreate",
"failed to add the schema bucket to the hash");
xmlSchemaBucketFree(ret);
return(NULL);
}
} else {
/* Set the @ownerImport of an include bucket. */
if (WXS_IS_BUCKET_IMPMAIN(WXS_CONSTRUCTOR(pctxt)->bucket->type))
WXS_INCBUCKET(ret)->ownerImport =
WXS_IMPBUCKET(WXS_CONSTRUCTOR(pctxt)->bucket);
else
WXS_INCBUCKET(ret)->ownerImport =
WXS_INCBUCKET(WXS_CONSTRUCTOR(pctxt)->bucket)->ownerImport;
/* Includes got into the "includes" slot of the *main* schema. */
if (mainSchema->includes == NULL) {
mainSchema->includes = xmlSchemaItemListCreate();
if (mainSchema->includes == NULL) {
xmlSchemaBucketFree(ret);
return(NULL);
}
}
xmlSchemaItemListAdd(mainSchema->includes, ret);
}
/*
* Add to list of all buckets; this is used for lookup
* during schema construction time only.
*/
if (xmlSchemaItemListAdd(WXS_CONSTRUCTOR(pctxt)->buckets, ret) == -1)
return(NULL);
return(ret);
}
| 0
|
286,219
|
ui::AXNode* FindNodeWithChildTreeId(ui::AXNode* node, int child_tree_id) {
if (child_tree_id == node->data().GetIntAttribute(ui::AX_ATTR_CHILD_TREE_ID))
return node;
for (int i = 0; i < node->child_count(); ++i) {
ui::AXNode* result =
FindNodeWithChildTreeId(node->ChildAtIndex(i), child_tree_id);
if (result)
return result;
}
return nullptr;
}
| 0
|
503,983
|
static int _sqlite_commit_txn(void *db, const sasl_utils_t *utils)
{
return _sqlite_exec(db, "COMMIT TRANSACTION", NULL, 0, NULL, utils);
}
| 0
|
396,312
|
static void emit(JF, int value)
{
emitraw(J, F, value);
}
| 0
|
156,649
|
static int process_block(struct archive_read* a) {
const uint8_t* p;
struct rar5* rar = get_context(a);
int ret;
/* If we don't have any data to be processed, this most probably means
* we need to switch to the next volume. */
if(rar->main.volume && rar->file.bytes_remaining == 0) {
ret = advance_multivolume(a);
if(ret != ARCHIVE_OK)
return ret;
}
if(rar->cstate.block_parsing_finished) {
ssize_t block_size;
ssize_t to_skip;
ssize_t cur_block_size;
/* The header size won't be bigger than 6 bytes. */
if(!read_ahead(a, 6, &p)) {
/* Failed to prefetch data block header. */
return ARCHIVE_EOF;
}
/*
* Read block_size by parsing block header. Validate the header
* by calculating CRC byte stored inside the header. Size of
* the header is not constant (block size can be stored either
* in 1 or 2 bytes), that's why block size is left out from the
* `compressed_block_header` structure and returned by
* `parse_block_header` as the second argument. */
ret = parse_block_header(a, p, &block_size,
&rar->last_block_hdr);
if(ret != ARCHIVE_OK) {
return ret;
}
/* Skip block header. Next data is huffman tables,
* if present. */
to_skip = sizeof(struct compressed_block_header) +
bf_byte_count(&rar->last_block_hdr) + 1;
if(ARCHIVE_OK != consume(a, to_skip))
return ARCHIVE_EOF;
rar->file.bytes_remaining -= to_skip;
/* The block size gives information about the whole block size,
* but the block could be stored in split form when using
* multi-volume archives. In this case, the block size will be
* bigger than the actual data stored in this file. Remaining
* part of the data will be in another file. */
cur_block_size =
rar5_min(rar->file.bytes_remaining, block_size);
if(block_size > rar->file.bytes_remaining) {
/* If current blocks' size is bigger than our data
* size, this means we have a multivolume archive.
* In this case, skip all base headers until the end
* of the file, proceed to next "partXXX.rar" volume,
* find its signature, skip all headers up to the first
* FILE base header, and continue from there.
*
* Note that `merge_block` will update the `rar`
* context structure quite extensively. */
ret = merge_block(a, block_size, &p);
if(ret != ARCHIVE_OK) {
return ret;
}
cur_block_size = block_size;
/* Current stream pointer should be now directly
* *after* the block that spanned through multiple
* archive files. `p` pointer should have the data of
* the *whole* block (merged from partial blocks
* stored in multiple archives files). */
} else {
rar->cstate.switch_multivolume = 0;
/* Read the whole block size into memory. This can take
* up to 8 megabytes of memory in theoretical cases.
* Might be worth to optimize this and use a standard
* chunk of 4kb's. */
if(!read_ahead(a, 4 + cur_block_size, &p)) {
/* Failed to prefetch block data. */
return ARCHIVE_EOF;
}
}
rar->cstate.block_buf = p;
rar->cstate.cur_block_size = cur_block_size;
rar->cstate.block_parsing_finished = 0;
rar->bits.in_addr = 0;
rar->bits.bit_addr = 0;
if(bf_is_table_present(&rar->last_block_hdr)) {
/* Load Huffman tables. */
ret = parse_tables(a, rar, p);
if(ret != ARCHIVE_OK) {
/* Error during decompression of Huffman
* tables. */
return ret;
}
}
} else {
/* Block parsing not finished, reuse previous memory buffer. */
p = rar->cstate.block_buf;
}
/* Uncompress the block, or a part of it, depending on how many bytes
* will be generated by uncompressing the block.
*
* In case too many bytes will be generated, calling this function
* again will resume the uncompression operation. */
ret = do_uncompress_block(a, p);
if(ret != ARCHIVE_OK) {
return ret;
}
if(rar->cstate.block_parsing_finished &&
rar->cstate.switch_multivolume == 0 &&
rar->cstate.cur_block_size > 0)
{
/* If we're processing a normal block, consume the whole
* block. We can do this because we've already read the whole
* block to memory. */
if(ARCHIVE_OK != consume(a, rar->cstate.cur_block_size))
return ARCHIVE_FATAL;
rar->file.bytes_remaining -= rar->cstate.cur_block_size;
} else if(rar->cstate.switch_multivolume) {
/* Don't consume the block if we're doing multivolume
* processing. The volume switching function will consume
* the proper count of bytes instead. */
rar->cstate.switch_multivolume = 0;
}
return ARCHIVE_OK;
}
| 0
|
113,348
|
OVS_REQUIRES(ipf->ipf_lock)
{
struct ipf_list *ipf_list;
HMAP_FOR_EACH_WITH_HASH (ipf_list, node, hash, &ipf->frag_lists) {
if (ipf_list_key_eq(&ipf_list->key, key)) {
return ipf_list;
}
}
return NULL;
}
| 0
|
161,371
|
ImagingTiffVersion(void)
{
return TIFFGetVersion();
}
| 0
|
301,016
|
GF_Err dmax_box_size(GF_Box *s)
{
s->size += 4;
return GF_OK;
}
| 0
|
173,260
|
static int sched_rt_global_constraints(void)
{
unsigned long flags;
int i;
raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
for_each_possible_cpu(i) {
struct rt_rq *rt_rq = &cpu_rq(i)->rt;
raw_spin_lock(&rt_rq->rt_runtime_lock);
rt_rq->rt_runtime = global_rt_runtime();
raw_spin_unlock(&rt_rq->rt_runtime_lock);
}
raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
return 0;
}
| 0
|
495,823
|
void gf_av1_reset_state(AV1State *state, Bool is_destroy)
{
GF_List *l1, *l2;
if (state->frame_state.header_obus) {
while (gf_list_count(state->frame_state.header_obus)) {
GF_AV1_OBUArrayEntry *a = (GF_AV1_OBUArrayEntry*)gf_list_pop_back(state->frame_state.header_obus);
if (a->obu) gf_free(a->obu);
gf_free(a);
}
}
if (state->frame_state.frame_obus) {
while (gf_list_count(state->frame_state.frame_obus)) {
GF_AV1_OBUArrayEntry *a = (GF_AV1_OBUArrayEntry*)gf_list_pop_back(state->frame_state.frame_obus);
if (a->obu) gf_free(a->obu);
gf_free(a);
}
}
l1 = state->frame_state.frame_obus;
l2 = state->frame_state.header_obus;
memset(&state->frame_state, 0, sizeof(AV1StateFrame));
state->frame_state.is_first_frame = GF_TRUE;
if (is_destroy) {
gf_list_del(l1);
gf_list_del(l2);
if (state->bs) {
u32 size, asize=0;
u8 *ptr=NULL;
//detach BS internal buffer
gf_bs_get_content_no_truncate(state->bs, &ptr, &size, &asize);
//avoid double free, cf issue 1893
if (ptr != state->frame_obus) {
gf_free(ptr);
}
if (state->frame_obus) {
gf_free(state->frame_obus);
state->frame_obus = NULL;
state->frame_obus_alloc = 0;
}
gf_bs_del(state->bs);
}
state->bs = NULL;
}
else {
state->frame_state.frame_obus = l1;
state->frame_state.header_obus = l2;
if (state->bs)
gf_bs_seek(state->bs, 0);
}
}
| 0
|
265,546
|
int callback_glewlwyd_check_user_profile_valid (const struct _u_request * request, struct _u_response * response, void * user_data) {
struct config_elements * config = (struct config_elements *)user_data;
char * session_uid;
json_t * j_user;
int ret, res;
if ((session_uid = get_session_id(config, request)) != NULL) {
j_user = get_current_user_for_session(config, session_uid);
if (check_result_value(j_user, G_OK) && json_object_get(json_object_get(j_user, "user"), "enabled") == json_true()) {
if ((res = is_scope_list_valid_for_session(config, config->profile_scope, session_uid)) == G_OK) {
if (ulfius_set_response_shared_data(response, json_deep_copy(json_object_get(j_user, "user")), (void (*)(void *))&json_decref) != U_OK) {
ret = U_CALLBACK_ERROR;
} else {
ret = U_CALLBACK_IGNORE;
}
} else {
if (res == G_ERROR) {
y_log_message(Y_LOG_LEVEL_ERROR, "callback_glewlwyd_check_user_session - Error is_scope_list_valid_for_session");
}
ret = U_CALLBACK_UNAUTHORIZED;
}
} else {
ret = U_CALLBACK_UNAUTHORIZED;
}
json_decref(j_user);
} else {
ret = U_CALLBACK_UNAUTHORIZED;
}
o_free(session_uid);
return ret;
}
| 0
|
395,193
|
create_info_message_area (const gchar *primary_text,
const gchar *secondary_text,
EogErrorMessageAreaButtons buttons)
{
GtkWidget *message_area;
/* create a new message area */
message_area = gtk_info_bar_new ();
/* add requested buttons to the message area */
add_message_area_buttons (message_area, buttons);
/* set message type */
gtk_info_bar_set_message_type (GTK_INFO_BAR (message_area),
GTK_MESSAGE_INFO);
/* set text and icon */
set_message_area_text_and_icon (GTK_INFO_BAR (message_area),
"dialog-information",
primary_text,
secondary_text);
return message_area;
}
| 0
|
478,368
|
CImg<T>& select(CImgDisplay &disp,
const unsigned int feature_type=2, unsigned int *const XYZ=0,
const bool exit_on_anykey=false,
const bool is_deep_selection_default=false) {
return get_select(disp,feature_type,XYZ,exit_on_anykey,is_deep_selection_default).move_to(*this);
}
| 0
|
69,221
|
static double mp_cut(_cimg_math_parser& mp) {
double val = _mp_arg(2), cmin = _mp_arg(3), cmax = _mp_arg(4);
return val<cmin?cmin:val>cmax?cmax:val;
| 0
|
308,760
|
int tls_construct_server_key_exchange(SSL *s)
{
#ifndef OPENSSL_NO_DH
EVP_PKEY *pkdh = NULL;
int j;
#endif
#ifndef OPENSSL_NO_EC
unsigned char *encodedPoint = NULL;
int encodedlen = 0;
int curve_id = 0;
#endif
EVP_PKEY *pkey;
const EVP_MD *md = NULL;
unsigned char *p, *d;
int al, i;
unsigned long type;
int n;
const BIGNUM *r[4];
int nr[4], kn;
BUF_MEM *buf;
EVP_MD_CTX *md_ctx = EVP_MD_CTX_new();
if (md_ctx == NULL) {
SSLerr(SSL_F_TLS_CONSTRUCT_SERVER_KEY_EXCHANGE, ERR_R_MALLOC_FAILURE);
al = SSL_AD_INTERNAL_ERROR;
goto f_err;
}
type = s->s3->tmp.new_cipher->algorithm_mkey;
buf = s->init_buf;
r[0] = r[1] = r[2] = r[3] = NULL;
n = 0;
#ifndef OPENSSL_NO_PSK
if (type & SSL_PSK) {
/*
* reserve size for record length and PSK identity hint
*/
n += 2;
if (s->cert->psk_identity_hint)
n += strlen(s->cert->psk_identity_hint);
}
/* Plain PSK or RSAPSK nothing to do */
if (type & (SSL_kPSK | SSL_kRSAPSK)) {
} else
#endif /* !OPENSSL_NO_PSK */
#ifndef OPENSSL_NO_DH
if (type & (SSL_kDHE | SSL_kDHEPSK)) {
CERT *cert = s->cert;
EVP_PKEY *pkdhp = NULL;
DH *dh;
if (s->cert->dh_tmp_auto) {
DH *dhp = ssl_get_auto_dh(s);
pkdh = EVP_PKEY_new();
if (pkdh == NULL || dhp == NULL) {
DH_free(dhp);
al = SSL_AD_INTERNAL_ERROR;
SSLerr(SSL_F_TLS_CONSTRUCT_SERVER_KEY_EXCHANGE,
ERR_R_INTERNAL_ERROR);
goto f_err;
}
EVP_PKEY_assign_DH(pkdh, dhp);
pkdhp = pkdh;
} else {
pkdhp = cert->dh_tmp;
}
if ((pkdhp == NULL) && (s->cert->dh_tmp_cb != NULL)) {
DH *dhp = s->cert->dh_tmp_cb(s, 0, 1024);
pkdh = ssl_dh_to_pkey(dhp);
if (pkdh == NULL) {
al = SSL_AD_INTERNAL_ERROR;
SSLerr(SSL_F_TLS_CONSTRUCT_SERVER_KEY_EXCHANGE,
ERR_R_INTERNAL_ERROR);
goto f_err;
}
pkdhp = pkdh;
}
if (pkdhp == NULL) {
al = SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_TLS_CONSTRUCT_SERVER_KEY_EXCHANGE,
SSL_R_MISSING_TMP_DH_KEY);
goto f_err;
}
if (!ssl_security(s, SSL_SECOP_TMP_DH,
EVP_PKEY_security_bits(pkdhp), 0, pkdhp)) {
al = SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_TLS_CONSTRUCT_SERVER_KEY_EXCHANGE,
SSL_R_DH_KEY_TOO_SMALL);
goto f_err;
}
if (s->s3->tmp.pkey != NULL) {
SSLerr(SSL_F_TLS_CONSTRUCT_SERVER_KEY_EXCHANGE,
ERR_R_INTERNAL_ERROR);
goto err;
}
s->s3->tmp.pkey = ssl_generate_pkey(pkdhp);
if (s->s3->tmp.pkey == NULL) {
SSLerr(SSL_F_TLS_CONSTRUCT_SERVER_KEY_EXCHANGE, ERR_R_EVP_LIB);
goto err;
}
dh = EVP_PKEY_get0_DH(s->s3->tmp.pkey);
EVP_PKEY_free(pkdh);
pkdh = NULL;
DH_get0_pqg(dh, &r[0], NULL, &r[1]);
DH_get0_key(dh, &r[2], NULL);
} else
#endif
#ifndef OPENSSL_NO_EC
if (type & (SSL_kECDHE | SSL_kECDHEPSK)) {
int nid;
if (s->s3->tmp.pkey != NULL) {
SSLerr(SSL_F_TLS_CONSTRUCT_SERVER_KEY_EXCHANGE,
ERR_R_INTERNAL_ERROR);
goto err;
}
/* Get NID of appropriate shared curve */
nid = tls1_shared_curve(s, -2);
curve_id = tls1_ec_nid2curve_id(nid);
if (curve_id == 0) {
SSLerr(SSL_F_TLS_CONSTRUCT_SERVER_KEY_EXCHANGE,
SSL_R_UNSUPPORTED_ELLIPTIC_CURVE);
goto err;
}
s->s3->tmp.pkey = ssl_generate_pkey_curve(curve_id);
/* Generate a new key for this curve */
if (s->s3->tmp.pkey == NULL) {
al = SSL_AD_INTERNAL_ERROR;
SSLerr(SSL_F_TLS_CONSTRUCT_SERVER_KEY_EXCHANGE, ERR_R_EVP_LIB);
goto f_err;
}
/* Encode the public key. */
encodedlen = EVP_PKEY_get1_tls_encodedpoint(s->s3->tmp.pkey,
&encodedPoint);
if (encodedlen == 0) {
SSLerr(SSL_F_TLS_CONSTRUCT_SERVER_KEY_EXCHANGE, ERR_R_EC_LIB);
goto err;
}
/*
* We only support named (not generic) curves in ECDH ephemeral key
* exchanges. In this situation, we need four additional bytes to
* encode the entire ServerECDHParams structure.
*/
n += 4 + encodedlen;
/*
* We'll generate the serverKeyExchange message explicitly so we
* can set these to NULLs
*/
r[0] = NULL;
r[1] = NULL;
r[2] = NULL;
r[3] = NULL;
} else
#endif /* !OPENSSL_NO_EC */
#ifndef OPENSSL_NO_SRP
if (type & SSL_kSRP) {
if ((s->srp_ctx.N == NULL) ||
(s->srp_ctx.g == NULL) ||
(s->srp_ctx.s == NULL) || (s->srp_ctx.B == NULL)) {
SSLerr(SSL_F_TLS_CONSTRUCT_SERVER_KEY_EXCHANGE,
SSL_R_MISSING_SRP_PARAM);
goto err;
}
r[0] = s->srp_ctx.N;
r[1] = s->srp_ctx.g;
r[2] = s->srp_ctx.s;
r[3] = s->srp_ctx.B;
} else
#endif
{
al = SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_TLS_CONSTRUCT_SERVER_KEY_EXCHANGE,
SSL_R_UNKNOWN_KEY_EXCHANGE_TYPE);
goto f_err;
}
for (i = 0; i < 4 && r[i] != NULL; i++) {
nr[i] = BN_num_bytes(r[i]);
#ifndef OPENSSL_NO_SRP
if ((i == 2) && (type & SSL_kSRP))
n += 1 + nr[i];
else
#endif
#ifndef OPENSSL_NO_DH
/*-
* for interoperability with some versions of the Microsoft TLS
* stack, we need to zero pad the DHE pub key to the same length
* as the prime, so use the length of the prime here
*/
if ((i == 2) && (type & (SSL_kDHE | SSL_kDHEPSK)))
n += 2 + nr[0];
else
#endif
n += 2 + nr[i];
}
if (!(s->s3->tmp.new_cipher->algorithm_auth & (SSL_aNULL | SSL_aSRP))
&& !(s->s3->tmp.new_cipher->algorithm_mkey & SSL_PSK)) {
if ((pkey = ssl_get_sign_pkey(s, s->s3->tmp.new_cipher, &md))
== NULL) {
al = SSL_AD_DECODE_ERROR;
goto f_err;
}
kn = EVP_PKEY_size(pkey);
/* Allow space for signature algorithm */
if (SSL_USE_SIGALGS(s))
kn += 2;
/* Allow space for signature length */
kn += 2;
} else {
pkey = NULL;
kn = 0;
}
if (!BUF_MEM_grow_clean(buf, n + SSL_HM_HEADER_LENGTH(s) + kn)) {
SSLerr(SSL_F_TLS_CONSTRUCT_SERVER_KEY_EXCHANGE, ERR_LIB_BUF);
goto err;
}
d = p = ssl_handshake_start(s);
#ifndef OPENSSL_NO_PSK
if (type & SSL_PSK) {
/* copy PSK identity hint */
if (s->cert->psk_identity_hint) {
size_t len = strlen(s->cert->psk_identity_hint);
if (len > PSK_MAX_IDENTITY_LEN) {
/*
* Should not happen - we already checked this when we set
* the identity hint
*/
SSLerr(SSL_F_TLS_CONSTRUCT_SERVER_KEY_EXCHANGE,
ERR_R_INTERNAL_ERROR);
goto err;
}
s2n(len, p);
memcpy(p, s->cert->psk_identity_hint, len);
p += len;
} else {
s2n(0, p);
}
}
#endif
for (i = 0; i < 4 && r[i] != NULL; i++) {
#ifndef OPENSSL_NO_SRP
if ((i == 2) && (type & SSL_kSRP)) {
*p = nr[i];
p++;
} else
#endif
#ifndef OPENSSL_NO_DH
/*-
* for interoperability with some versions of the Microsoft TLS
* stack, we need to zero pad the DHE pub key to the same length
* as the prime
*/
if ((i == 2) && (type & (SSL_kDHE | SSL_kDHEPSK))) {
s2n(nr[0], p);
for (j = 0; j < (nr[0] - nr[2]); ++j) {
*p = 0;
++p;
}
} else
#endif
s2n(nr[i], p);
BN_bn2bin(r[i], p);
p += nr[i];
}
#ifndef OPENSSL_NO_EC
if (type & (SSL_kECDHE | SSL_kECDHEPSK)) {
/*
* XXX: For now, we only support named (not generic) curves. In
* this situation, the serverKeyExchange message has: [1 byte
* CurveType], [2 byte CurveName] [1 byte length of encoded
* point], followed by the actual encoded point itself
*/
*p = NAMED_CURVE_TYPE;
p += 1;
*p = 0;
p += 1;
*p = curve_id;
p += 1;
*p = encodedlen;
p += 1;
memcpy(p, encodedPoint, encodedlen);
OPENSSL_free(encodedPoint);
encodedPoint = NULL;
p += encodedlen;
}
#endif
/* not anonymous */
if (pkey != NULL) {
/*
* n is the length of the params, they start at &(d[4]) and p
* points to the space at the end.
*/
if (md) {
/* send signature algorithm */
if (SSL_USE_SIGALGS(s)) {
if (!tls12_get_sigandhash(p, pkey, md)) {
/* Should never happen */
al = SSL_AD_INTERNAL_ERROR;
SSLerr(SSL_F_TLS_CONSTRUCT_SERVER_KEY_EXCHANGE,
ERR_R_INTERNAL_ERROR);
goto f_err;
}
p += 2;
}
#ifdef SSL_DEBUG
fprintf(stderr, "Using hash %s\n", EVP_MD_name(md));
#endif
if (EVP_SignInit_ex(md_ctx, md, NULL) <= 0
|| EVP_SignUpdate(md_ctx, &(s->s3->client_random[0]),
SSL3_RANDOM_SIZE) <= 0
|| EVP_SignUpdate(md_ctx, &(s->s3->server_random[0]),
SSL3_RANDOM_SIZE) <= 0
|| EVP_SignUpdate(md_ctx, d, n) <= 0
|| EVP_SignFinal(md_ctx, &(p[2]),
(unsigned int *)&i, pkey) <= 0) {
SSLerr(SSL_F_TLS_CONSTRUCT_SERVER_KEY_EXCHANGE, ERR_LIB_EVP);
al = SSL_AD_INTERNAL_ERROR;
goto f_err;
}
s2n(i, p);
n += i + 2;
if (SSL_USE_SIGALGS(s))
n += 2;
} else {
/* Is this error check actually needed? */
al = SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_TLS_CONSTRUCT_SERVER_KEY_EXCHANGE,
SSL_R_UNKNOWN_PKEY_TYPE);
goto f_err;
}
}
if (!ssl_set_handshake_header(s, SSL3_MT_SERVER_KEY_EXCHANGE, n)) {
al = SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_TLS_CONSTRUCT_SERVER_KEY_EXCHANGE, ERR_R_INTERNAL_ERROR);
goto f_err;
}
EVP_MD_CTX_free(md_ctx);
return 1;
f_err:
ssl3_send_alert(s, SSL3_AL_FATAL, al);
err:
#ifndef OPENSSL_NO_DH
EVP_PKEY_free(pkdh);
#endif
#ifndef OPENSSL_NO_EC
OPENSSL_free(encodedPoint);
#endif
EVP_MD_CTX_free(md_ctx);
ossl_statem_set_error(s);
return 0;
}
| 0
|
328,420
|
static SlirpState *slirp_lookup(Monitor *mon, const char *vlan,
const char *stack)
{
VLANClientState *vc;
if (vlan) {
vc = qemu_find_vlan_client_by_name(mon, strtol(vlan, NULL, 0), stack);
if (!vc) {
return NULL;
}
if (strcmp(vc->model, "user")) {
monitor_printf(mon, "invalid device specified\n");
return NULL;
}
return vc->opaque;
} else {
if (TAILQ_EMPTY(&slirp_stacks)) {
monitor_printf(mon, "user mode network stack not in use\n");
return NULL;
}
return TAILQ_FIRST(&slirp_stacks);
}
}
| 0
|
297,367
|
static int vmci_transport_reply_reset(struct vmci_transport_packet *pkt)
{
return vmci_transport_reply_control_pkt_fast(
pkt,
VMCI_TRANSPORT_PACKET_TYPE_RST,
0, 0, NULL,
VMCI_INVALID_HANDLE);
}
| 0
|
283,078
|
void InlineTextBox::computeRectForReplacementMarker(int /*tx*/, int /*ty*/, const DocumentMarker& marker, RenderStyle* style, const Font& font)
{
int y = selectionTop();
int h = selectionHeight();
int sPos = max(marker.startOffset - m_start, (unsigned)0);
int ePos = min(marker.endOffset - m_start, (unsigned)m_len);
TextRun run(textRenderer()->text()->characters() + m_start, m_len, textRenderer()->allowTabs(), textPos(), m_toAdd, !isLeftToRightDirection(), m_dirOverride || style->visuallyOrdered());
IntPoint startPoint = IntPoint(m_x, y);
IntRect markerRect = enclosingIntRect(font.selectionRectForText(run, startPoint, h, sPos, ePos));
markerRect = renderer()->localToAbsoluteQuad(FloatRect(markerRect)).enclosingBoundingBox();
renderer()->document()->markers()->setRenderedRectForMarker(renderer()->node(), marker, markerRect);
}
| 0
|
332,075
|
static void bdrv_cow_init(void)
{
bdrv_register(&bdrv_cow);
}
| 0
|
298,950
|
rs_filter_set_recursive(RSFilter *filter, ...)
{
va_list ap;
gchar *property_name;
RSFilter *current_filter;
GParamSpec *spec;
RSFilter *first_seen_here = NULL;
GTypeValueTable *table = NULL;
GType type = 0;
union CValue {
gint v_int;
glong v_long;
gint64 v_int64;
gdouble v_double;
gpointer v_pointer;
} value;
g_return_if_fail(RS_IS_FILTER(filter));
va_start(ap, filter);
/* Loop through all properties */
while ((property_name = va_arg(ap, gchar *)))
{
/* We set table to NULL for every property to indicate that we (again)
* have an "unknown" type */
table = NULL;
current_filter = filter;
/* Iterate through all filters previous to filter */
do {
if ((spec = g_object_class_find_property(G_OBJECT_GET_CLASS(current_filter), property_name)))
if (spec->flags & G_PARAM_WRITABLE)
{
/* If we got no GTypeValueTable at this point, we aquire
* one. We rely on all filters using the same type for all
* properties equally named */
if (!table)
{
first_seen_here = current_filter;
type = spec->value_type;
table = g_type_value_table_peek(type);
/* If we have no valuetable, we're screwed, bail out */
if (!table)
g_error("No GTypeValueTable found for '%s'", g_type_name(type));
switch (table->collect_format[0])
{
case 'i': value.v_int = va_arg(ap, gint); break;
case 'l': value.v_long = va_arg(ap, glong); break;
case 'd': value.v_double = va_arg(ap, gdouble); break;
case 'p': value.v_pointer = va_arg(ap, gpointer); break;
default: g_error("Don't know how to collect for '%s'", g_type_name(type)); break;
}
}
if (table)
{
/* We try to catch cases where different filters use
* the same property name for different types */
if (type != spec->value_type)
g_warning("Diverging types found for property '%s' (on filter '%s' and '%s')",
property_name,
RS_FILTER_NAME(first_seen_here),
RS_FILTER_NAME(current_filter));
switch (table->collect_format[0])
{
case 'i': g_object_set(current_filter, property_name, value.v_int, NULL); break;
case 'l': g_object_set(current_filter, property_name, value.v_long, NULL); break;
case 'd': g_object_set(current_filter, property_name, value.v_double, NULL); break;
case 'p': g_object_set(current_filter, property_name, value.v_pointer, NULL); break;
default: break;
}
}
}
} while (RS_IS_FILTER(current_filter = current_filter->previous));
if (!table)
{
// g_warning("Property: %s could not be found in filter chain. Skipping further properties", property_name);
va_end(ap);
return;
}
}
va_end(ap);
}
| 0
|
289,293
|
void nautilus_file_operations_copy ( GList * files , GArray * relative_item_points , GFile * target_dir , GtkWindow * parent_window , NautilusCopyCallback done_callback , gpointer done_callback_data ) {
GTask * task ;
CopyMoveJob * job ;
job = op_job_new ( CopyMoveJob , parent_window ) ;
job -> desktop_location = nautilus_get_desktop_location ( ) ;
job -> done_callback = done_callback ;
job -> done_callback_data = done_callback_data ;
job -> files = g_list_copy_deep ( files , ( GCopyFunc ) g_object_ref , NULL ) ;
job -> destination = g_object_ref ( target_dir ) ;
nautilus_progress_info_set_destination ( ( ( CommonJob * ) job ) -> progress , target_dir ) ;
if ( relative_item_points != NULL && relative_item_points -> len > 0 ) {
job -> icon_positions = g_memdup ( relative_item_points -> data , sizeof ( GdkPoint ) * relative_item_points -> len ) ;
job -> n_icon_positions = relative_item_points -> len ;
}
job -> debuting_files = g_hash_table_new_full ( g_file_hash , ( GEqualFunc ) g_file_equal , g_object_unref , NULL ) ;
inhibit_power_manager ( ( CommonJob * ) job , _ ( "Copying Files" ) ) ;
if ( ! nautilus_file_undo_manager_is_operating ( ) ) {
GFile * src_dir ;
src_dir = g_file_get_parent ( files -> data ) ;
job -> common . undo_info = nautilus_file_undo_info_ext_new ( NAUTILUS_FILE_UNDO_OP_COPY , g_list_length ( files ) , src_dir , target_dir ) ;
g_object_unref ( src_dir ) ;
}
task = g_task_new ( NULL , job -> common . cancellable , copy_task_done , job ) ;
g_task_set_task_data ( task , job , NULL ) ;
g_task_run_in_thread ( task , copy_task_thread_func ) ;
g_object_unref ( task ) ;
}
| 0
|
444,758
|
TEST_P(DownstreamProtocolIntegrationTest, TestTwoFiltersDecodeHeadersReturnsStopAll) {
config_helper_.addFilter(R"EOF(
name: decode-headers-return-stop-all-filter
)EOF");
config_helper_.addFilter(R"EOF(
name: decode-headers-return-stop-all-filter
)EOF");
config_helper_.addFilter(R"EOF(
name: passthrough-filter
)EOF");
initialize();
codec_client_ = makeHttpConnection(lookupPort("http"));
// Sends a request with headers and data.
changeHeadersForStopAllTests(default_request_headers_, false);
auto encoder_decoder = codec_client_->startRequest(default_request_headers_);
request_encoder_ = &encoder_decoder.first;
auto response = std::move(encoder_decoder.second);
for (int i = 0; i < count_ - 1; i++) {
codec_client_->sendData(*request_encoder_, size_, false);
}
codec_client_->sendData(*request_encoder_, size_, true);
waitForNextUpstreamRequest();
upstream_request_->encodeHeaders(default_response_headers_, true);
response->waitForEndStream();
ASSERT_TRUE(response->complete());
EXPECT_EQ(count_ * size_ + added_decoded_data_size_, upstream_request_->bodyLength());
EXPECT_EQ(true, upstream_request_->complete());
// Sends a request with headers, data, and trailers.
auto encoder_decoder_2 = codec_client_->startRequest(default_request_headers_);
request_encoder_ = &encoder_decoder_2.first;
response = std::move(encoder_decoder_2.second);
for (int i = 0; i < count_; i++) {
codec_client_->sendData(*request_encoder_, size_, false);
}
Http::TestRequestTrailerMapImpl request_trailers{{"trailer", "trailer"}};
codec_client_->sendTrailers(*request_encoder_, request_trailers);
waitForNextUpstreamRequest();
upstream_request_->encodeHeaders(default_response_headers_, true);
response->waitForEndStream();
verifyUpStreamRequestAfterStopAllFilter();
}
| 0
|
496,665
|
TEST_P(Security, BuiltinAuthenticationAndAccessAndCryptoPlugin_PermissionsEnableDiscoveryDisableAccessNone_validation_ok_enable_discovery_enable_access_none)
// *INDENT-ON*
{
PubSubReader<HelloWorldType> reader(TEST_TOPIC_NAME);
PubSubWriter<HelloWorldType> writer(TEST_TOPIC_NAME);
std::string governance_file("governance_enable_discovery_disable_access_none.smime");
BuiltinAuthenticationAndAccessAndCryptoPlugin_Permissions_validation_ok_common(reader, writer, governance_file);
}
| 0
|
311,649
|
int64_t MetricsLog::GetBuildTime() {
static int64_t integral_build_time = 0;
if (!integral_build_time)
integral_build_time = static_cast<int64_t>(base::GetBuildTime().ToTimeT());
return integral_build_time;
}
| 0
|
400,410
|
sig_handler endprog(int signal_number MY_ATTRIBUTE((unused)))
{
interrupted=1;
}
| 0
|
425,348
|
static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
{
struct dmar_drhd_unit *drhd;
u32 vtbar;
int rc;
/* We know that this device on this chipset has its own IOMMU.
* If we find it under a different IOMMU, then the BIOS is lying
* to us. Hope that the IOMMU for this device is actually
* disabled, and it needs no translation...
*/
rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
if (rc) {
/* "can't" happen */
dev_info(&pdev->dev, "failed to run vt-d quirk\n");
return;
}
vtbar &= 0xffff0000;
/* we know that the this iommu should be at offset 0xa000 from vtbar */
drhd = dmar_find_matched_drhd_unit(pdev);
if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
TAINT_FIRMWARE_WORKAROUND,
"BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
}
| 0
|
275,121
|
HTMLBodyElement::HTMLBodyElement(Document& document)
: HTMLElement(bodyTag, document)
{
ScriptWrappable::init(this);
}
| 0
|
64,203
|
RawTile TileManager::getTile( int resolution, int tile, int xangle, int yangle, int layers, CompressionType ctype ){
RawTile* rawtile = NULL;
string tileCompression;
string compName;
// Time the tile retrieval
if( loglevel >= 3 ) tile_timer.start();
/* Try to get the encoded tile directly from our cache first.
Otherwise decode one from the source image and add it to the cache
*/
switch( ctype )
{
case JPEG:
if( (rawtile = tileCache->getTile( image->getImagePath(), resolution, tile,
xangle, yangle, JPEG, compressor->getQuality() )) ) break;
if( (rawtile = tileCache->getTile( image->getImagePath(), resolution, tile,
xangle, yangle, UNCOMPRESSED, 0 )) ) break;
break;
case PNG:
if( (rawtile = tileCache->getTile( image->getImagePath(), resolution, tile,
xangle, yangle, PNG, compressor->getQuality() )) ) break;
if( (rawtile = tileCache->getTile( image->getImagePath(), resolution, tile,
xangle, yangle, UNCOMPRESSED, 0 )) ) break;
break;
case UNCOMPRESSED:
if( (rawtile = tileCache->getTile( image->getImagePath(), resolution, tile,
xangle, yangle, UNCOMPRESSED, 0 )) ) break;
break;
default:
break;
}
if( loglevel >= 3 ){
// Define our compression names for logging purposes
switch( ctype ){
case JPEG: compName = "JPEG"; break;
case PNG: compName = "PNG"; break;
case DEFLATE: compName = "DEFLATE"; break;
case UNCOMPRESSED: compName = "UNCOMPRESSED"; break;
default: break;
}
}
// If we haven't been able to get a tile, get a raw one
if( !rawtile || (rawtile && (rawtile->timestamp < image->timestamp)) ){
if( rawtile && (rawtile->timestamp < image->timestamp) ){
if( loglevel >= 3 ) *logfile << "TileManager :: Tile has old timestamp "
<< rawtile->timestamp << " - " << image->timestamp
<< " ... updating" << endl;
}
if( loglevel >= 4 ) *logfile << "TileManager :: Cache Miss for resolution: " << resolution
<< ", tile: " << tile
<< ", compression: " << compName
<< ", quality: " << compressor->getQuality() << endl
<< "TileManager :: Cache Size: " << tileCache->getNumElements()
<< " tiles, " << tileCache->getMemorySize() << " MB" << endl;
RawTile newtile = this->getNewTile( resolution, tile, xangle, yangle, layers, ctype );
if( loglevel >= 3 ) *logfile << "TileManager :: Total Tile Access Time: "
<< tile_timer.getTime() << " microseconds" << endl;
return newtile;
}
if( loglevel >= 3 ) *logfile << "TileManager :: Cache Hit for resolution: " << resolution
<< ", tile: " << tile
<< ", compression: " << compName
<< ", quality: " << compressor->getQuality() << endl
<< "TileManager :: Cache Size: "
<< tileCache->getNumElements() << " tiles, "
<< tileCache->getMemorySize() << " MB" << endl;
// Check whether the compression used for out tile matches our requested compression type. If not, we must convert
// Perform JPEG compression iff we have an 8 bit per channel image and either 1 or 3 bands
// PNG compression can have 8 or 16 bits and alpha channels
if( (rawtile->compressionType == UNCOMPRESSED) &&
( ( ctype==JPEG && rawtile->bpc==8 && (rawtile->channels==1 || rawtile->channels==3) ) || ctype==PNG ) ){
// Rawtile is a pointer to the cache data, so we need to create a copy of it in case we compress it
RawTile ttt( *rawtile );
// Crop if this is an edge tile
if( ( (ttt.width != image->getTileWidth()) || (ttt.height != image->getTileHeight()) ) && ttt.padded ){
if( loglevel >= 5 ) * logfile << "TileManager :: Cropping tile" << endl;
this->crop( &ttt );
}
if( loglevel >=2 ) compression_timer.start();
unsigned int oldlen = rawtile->dataLength;
unsigned int newlen = compressor->Compress( ttt );
if( loglevel >= 3 ) *logfile << "TileManager :: " << compName << " requested, but UNCOMPRESSED compression found in cache." << endl
<< "TileManager :: " << compName << " Compression Time: "
<< compression_timer.getTime() << " microseconds" << endl
<< "TileManager :: Compression Ratio: " << newlen << "/" << oldlen << " = "
<< ( (float)newlen/(float)oldlen ) << endl;
// Add our compressed tile to the cache
if( loglevel >= 3 ) insert_timer.start();
tileCache->insert( ttt );
if( loglevel >= 3 ) *logfile << "TileManager :: Tile cache insertion time: " << insert_timer.getTime()
<< " microseconds" << endl;
if( loglevel >= 3 ) *logfile << "TileManager :: Total Tile Access Time: "
<< tile_timer.getTime() << " microseconds" << endl;
return RawTile( ttt );
}
if( loglevel >= 3 ) *logfile << "TileManager :: Total Tile Access Time: "
<< tile_timer.getTime() << " microseconds" << endl;
return RawTile( *rawtile );
}
| 0
|
499,898
|
ContentProvider(QPDFObjectHandle from_page) :
from_page(from_page)
{
}
| 0
|
172,587
|
void socket_register(socket_t *socket, reactor_t *reactor, void *context, socket_cb read_cb, socket_cb write_cb) {
assert(socket != NULL);
socket_unregister(socket);
socket->read_ready = read_cb;
socket->write_ready = write_cb;
socket->context = context;
void (*read_fn)(void *) = (read_cb != NULL) ? internal_read_ready : NULL;
void (*write_fn)(void *) = (write_cb != NULL) ? internal_write_ready : NULL;
socket->reactor_object = reactor_register(reactor, socket->fd, socket, read_fn, write_fn);
}
| 0
|
38,577
|
static pj_bool_t mod_pjsua_on_rx_request(pjsip_rx_data *rdata)
{
pj_bool_t processed = PJ_FALSE;
PJSUA_LOCK();
if (rdata->msg_info.msg->line.req.method.id == PJSIP_INVITE_METHOD) {
processed = pjsua_call_on_incoming(rdata);
}
PJSUA_UNLOCK();
return processed;
}
| 0
|
364,281
|
gif_set_get_frame_info (GifContext *context)
{
context->state = GIF_GET_FRAME_INFO;
}
| 0
|
247,577
|
BrowserWindow* Browser::CreateBrowserWindow() {
if (type() == Browser::TYPE_APP_PANEL &&
CommandLine::ForCurrentProcess()->HasSwitch(switches::kEnablePanels))
return PanelManager::GetInstance()->CreatePanel(this);
return BrowserWindow::CreateBrowserWindow(this);
}
| 0
|
282,634
|
void FramebufferManager::OnTextureRefDetached(TextureRef* texture) {
for (TextureDetachObserverVector::iterator it =
texture_detach_observers_.begin();
it != texture_detach_observers_.end();
++it) {
TextureDetachObserver* observer = *it;
observer->OnTextureRefDetachedFromFramebuffer(texture);
}
}
| 0
|
470,414
|
void avahi_s_host_name_resolver_free(AvahiSHostNameResolver *r) {
assert(r);
AVAHI_LLIST_REMOVE(AvahiSHostNameResolver, resolver, r->server->host_name_resolvers, r);
if (r->record_browser_a)
avahi_s_record_browser_free(r->record_browser_a);
if (r->record_browser_aaaa)
avahi_s_record_browser_free(r->record_browser_aaaa);
if (r->time_event)
avahi_time_event_free(r->time_event);
if (r->address_record)
avahi_record_unref(r->address_record);
avahi_free(r->host_name);
avahi_free(r);
}
| 0
|
519,539
|
double Field_real::get_double(const char *str, size_t length, CHARSET_INFO *cs,
int *error)
{
char *end;
double nr= my_strntod(cs,(char*) str, length, &end, error);
if (unlikely(*error))
{
set_warning(ER_WARN_DATA_OUT_OF_RANGE, 1);
*error= 1;
}
else if (get_thd()->count_cuted_fields > CHECK_FIELD_EXPRESSION &&
check_edom_and_truncation("double", str == end,
cs, str, length, end))
*error= 1;
return nr;
}
| 0
|
138,033
|
void __fsnotify_vfsmount_delete(struct vfsmount *mnt)
{
fsnotify_clear_marks_by_mount(mnt);
}
| 0
|
77,578
|
R_API int *r_str_split_lines(char *str, int *count) {
int i;
int lines = 0;
if (!str) {
return NULL;
}
int *indexes = NULL;
// count lines
for (i = 0; str[i]; i++) {
if (str[i] == '\n') {
lines++;
}
}
// allocate and set indexes
indexes = calloc (sizeof (int), lines + 1);
if (!indexes) {
return NULL;
}
int line = 0;
indexes[line++] = 0;
for (i = 0; str[i]; i++) {
if (str[i] == '\n') {
str[i] = 0;
indexes[line++] = i + 1;
}
}
if (count) {
*count = line;
}
return indexes;
}
| 0
|
19,825
|
static int try_grow_lower ( MAIN_WINDOW_REC * window , int count ) {
MAIN_WINDOW_REC * grow_win ;
grow_win = mainwindows_find_lower ( window ) ;
if ( grow_win != NULL ) {
MAIN_WINDOW_REC * win ;
GSList * grow_list , * shrink_list , * tmp ;
grow_list = mainwindows_get_line ( grow_win ) ;
shrink_list = mainwindows_get_line ( window ) ;
for ( tmp = grow_list ;
tmp != NULL ;
tmp = tmp -> next ) {
win = tmp -> data ;
win -> first_line -= count ;
}
for ( tmp = shrink_list ;
tmp != NULL ;
tmp = tmp -> next ) {
win = tmp -> data ;
win -> last_line -= count ;
}
mainwindows_resize_two ( grow_list , shrink_list , count ) ;
g_slist_free ( shrink_list ) ;
g_slist_free ( grow_list ) ;
}
return grow_win != NULL ;
}
| 0
|
221,680
|
WORD32 ih264d_delete_gap_frm_sliding(dpb_manager_t *ps_dpb_mgr,
WORD32 i4_frame_num,
UWORD8 *pu1_del_node)
{
WORD8 i1_gap_idx, i, j, j_min;
WORD32 *pi4_gaps_start_frm_num, *pi4_gaps_end_frm_num, i4_gap_frame_num;
WORD32 i4_start_frm_num, i4_end_frm_num;
WORD32 i4_max_frm_num;
WORD32 i4_frm_num, i4_gap_frm_num_min;
/* find the least frame num from gaps and current DPB node */
/* Delete the least one */
*pu1_del_node = 1;
if(0 == ps_dpb_mgr->u1_num_gaps)
return OK;
pi4_gaps_start_frm_num = ps_dpb_mgr->ai4_gaps_start_frm_num;
pi4_gaps_end_frm_num = ps_dpb_mgr->ai4_gaps_end_frm_num;
i4_gap_frame_num = INVALID_FRAME_NUM;
i4_max_frm_num = ps_dpb_mgr->i4_max_frm_num;
i1_gap_idx = -1;
if(INVALID_FRAME_NUM != i4_frame_num)
{
i4_gap_frame_num = i4_frame_num;
for(i = 0; i < MAX_FRAMES; i++)
{
i4_start_frm_num = pi4_gaps_start_frm_num[i];
if(INVALID_FRAME_NUM != i4_start_frm_num)
{
i4_end_frm_num = pi4_gaps_end_frm_num[i];
if(i4_end_frm_num < i4_max_frm_num)
{
if(i4_start_frm_num <= i4_gap_frame_num)
{
i4_gap_frame_num = i4_start_frm_num;
i1_gap_idx = i;
}
}
else
{
if(((i4_start_frm_num <= i4_gap_frame_num)
&& (i4_gap_frame_num <= i4_max_frm_num))
|| ((i4_start_frm_num >= i4_gap_frame_num)
&& ((i4_gap_frame_num
+ i4_max_frm_num)
>= i4_end_frm_num)))
{
i4_gap_frame_num = i4_start_frm_num;
i1_gap_idx = i;
}
}
}
}
}
else
{
/* no valid short term buffers, delete one gap from the least start */
/* of gap sequence */
i4_gap_frame_num = pi4_gaps_start_frm_num[0];
i1_gap_idx = 0;
for(i = 1; i < MAX_FRAMES; i++)
{
if(INVALID_FRAME_NUM != pi4_gaps_start_frm_num[i])
{
if(pi4_gaps_start_frm_num[i] < i4_gap_frame_num)
{
i4_gap_frame_num = pi4_gaps_start_frm_num[i];
i1_gap_idx = i;
}
}
}
if(INVALID_FRAME_NUM == i4_gap_frame_num)
{
UWORD32 i4_error_code;
i4_error_code = ERROR_DBP_MANAGER_T;
return i4_error_code;
}
}
if(-1 != i1_gap_idx)
{
/* find least frame_num in the poc_map, which is in this range */
i4_start_frm_num = pi4_gaps_start_frm_num[i1_gap_idx];
if(i4_start_frm_num < 0)
i4_start_frm_num += i4_max_frm_num;
i4_end_frm_num = pi4_gaps_end_frm_num[i1_gap_idx];
if(i4_end_frm_num < 0)
i4_end_frm_num += i4_max_frm_num;
i4_gap_frm_num_min = 0xfffffff;
j_min = MAX_FRAMES;
for(j = 0; j < MAX_FRAMES; j++)
{
i4_frm_num = ps_dpb_mgr->ai4_poc_buf_id_map[j][2];
if((i4_start_frm_num <= i4_frm_num)
&& (i4_end_frm_num >= i4_frm_num))
{
if(i4_frm_num < i4_gap_frm_num_min)
{
j_min = j;
i4_gap_frm_num_min = i4_frm_num;
}
}
}
if(j_min != MAX_FRAMES)
{
ps_dpb_mgr->ai4_poc_buf_id_map[j_min][0] = -1;
ps_dpb_mgr->ai4_poc_buf_id_map[j_min][1] = 0x7fffffff;
ps_dpb_mgr->ai4_poc_buf_id_map[j_min][2] = GAP_FRAME_NUM;
ps_dpb_mgr->i1_gaps_deleted++;
ps_dpb_mgr->ai1_gaps_per_seq[i1_gap_idx]--;
ps_dpb_mgr->u1_num_gaps--;
*pu1_del_node = 0;
if(0 == ps_dpb_mgr->ai1_gaps_per_seq[i1_gap_idx])
{
ps_dpb_mgr->ai4_gaps_start_frm_num[i1_gap_idx] =
INVALID_FRAME_NUM;
ps_dpb_mgr->ai4_gaps_end_frm_num[i1_gap_idx] = 0;
}
}
}
return OK;
}
| 0
|
163,587
|
InlineFlowBox::~InlineFlowBox()
{
if (!m_hasBadChildList)
for (InlineBox* child = firstChild(); child; child = child->nextOnLine())
child->setHasBadParent();
}
| 0
|
439,257
|
[[nodiscard]] Guard add(Connection& conn) {
std::lock_guard lock{mutex};
connections.push_back(conn);
return Guard{this, &conn};
}
| 0
|
79,559
|
virDomainGetJobStats(virDomainPtr domain,
int *type,
virTypedParameterPtr *params,
int *nparams,
unsigned int flags)
{
virConnectPtr conn;
VIR_DOMAIN_DEBUG(domain, "type=%p, params=%p, nparams=%p, flags=%x",
type, params, nparams, flags);
virResetLastError();
virCheckDomainReturn(domain, -1);
virCheckNonNullArgGoto(type, error);
virCheckNonNullArgGoto(params, error);
virCheckNonNullArgGoto(nparams, error);
conn = domain->conn;
if (conn->driver->domainGetJobStats) {
int ret;
ret = conn->driver->domainGetJobStats(domain, type, params,
nparams, flags);
if (ret < 0)
goto error;
return ret;
}
virReportUnsupportedError();
error:
virDispatchError(domain->conn);
return -1;
}
| 0
|
389,992
|
output_buffer& HandShakeHeader::get(output_buffer& out) const
{
return out << *this;
}
| 0
|
6,061
|
static void merge_param(HashTable *params, zval *zdata, zval ***current_param, zval ***current_args TSRMLS_DC)
{
zval **ptr, **zdata_ptr;
php_http_array_hashkey_t hkey = php_http_array_hashkey_init(0);
#if 0
{
zval tmp;
INIT_PZVAL_ARRAY(&tmp, params);
fprintf(stderr, "params = ");
zend_print_zval_r(&tmp, 1 TSRMLS_CC);
fprintf(stderr, "\n");
}
#endif
hkey.type = zend_hash_get_current_key_ex(Z_ARRVAL_P(zdata), &hkey.str, &hkey.len, &hkey.num, hkey.dup, NULL);
if ((hkey.type == HASH_KEY_IS_STRING && !zend_hash_exists(params, hkey.str, hkey.len))
|| (hkey.type == HASH_KEY_IS_LONG && !zend_hash_index_exists(params, hkey.num))
) {
zval *tmp, *arg, **args;
/* create the entry if it doesn't exist */
zend_hash_get_current_data(Z_ARRVAL_P(zdata), (void *) &ptr);
Z_ADDREF_PP(ptr);
MAKE_STD_ZVAL(tmp);
array_init(tmp);
add_assoc_zval_ex(tmp, ZEND_STRS("value"), *ptr);
MAKE_STD_ZVAL(arg);
array_init(arg);
zend_hash_update(Z_ARRVAL_P(tmp), "arguments", sizeof("arguments"), (void *) &arg, sizeof(zval *), (void *) &args);
*current_args = args;
if (hkey.type == HASH_KEY_IS_STRING) {
zend_hash_update(params, hkey.str, hkey.len, (void *) &tmp, sizeof(zval *), (void *) &ptr);
} else {
zend_hash_index_update(params, hkey.num, (void *) &tmp, sizeof(zval *), (void *) &ptr);
}
} else {
/* merge */
if (hkey.type == HASH_KEY_IS_STRING) {
zend_hash_find(params, hkey.str, hkey.len, (void *) &ptr);
} else {
zend_hash_index_find(params, hkey.num, (void *) &ptr);
}
zdata_ptr = &zdata;
if (Z_TYPE_PP(ptr) == IS_ARRAY
&& SUCCESS == zend_hash_find(Z_ARRVAL_PP(ptr), "value", sizeof("value"), (void *) &ptr)
&& SUCCESS == zend_hash_get_current_data(Z_ARRVAL_PP(zdata_ptr), (void *) &zdata_ptr)
) {
/*
* params = [arr => [value => [0 => 1]]]
* ^- ptr
* zdata = [arr => [0 => NULL]]
* ^- zdata_ptr
*/
zval **test_ptr;
while (Z_TYPE_PP(zdata_ptr) == IS_ARRAY
&& SUCCESS == zend_hash_get_current_data(Z_ARRVAL_PP(zdata_ptr), (void *) &test_ptr)
) {
if (Z_TYPE_PP(test_ptr) == IS_ARRAY) {
/* now find key in ptr */
if (HASH_KEY_IS_STRING == zend_hash_get_current_key_ex(Z_ARRVAL_PP(zdata_ptr), &hkey.str, &hkey.len, &hkey.num, hkey.dup, NULL)) {
if (SUCCESS == zend_hash_find(Z_ARRVAL_PP(ptr), hkey.str, hkey.len, (void *) &ptr)) {
zdata_ptr = test_ptr;
} else {
Z_ADDREF_PP(test_ptr);
zend_hash_update(Z_ARRVAL_PP(ptr), hkey.str, hkey.len, (void *) test_ptr, sizeof(zval *), (void *) &ptr);
break;
}
} else {
if (SUCCESS == zend_hash_index_find(Z_ARRVAL_PP(ptr), hkey.num, (void *) &ptr)) {
zdata_ptr = test_ptr;
} else if (hkey.num) {
Z_ADDREF_PP(test_ptr);
zend_hash_index_update(Z_ARRVAL_PP(ptr), hkey.num, (void *) test_ptr, sizeof(zval *), (void *) &ptr);
break;
} else {
Z_ADDREF_PP(test_ptr);
zend_hash_next_index_insert(Z_ARRVAL_PP(ptr), (void *) test_ptr, sizeof(zval *), (void *) &ptr);
break;
}
}
} else {
/* this is the leaf */
Z_ADDREF_PP(test_ptr);
if (Z_TYPE_PP(ptr) != IS_ARRAY) {
zval_dtor(*ptr);
array_init(*ptr);
}
if (HASH_KEY_IS_STRING == zend_hash_get_current_key_ex(Z_ARRVAL_PP(zdata_ptr), &hkey.str, &hkey.len, &hkey.num, hkey.dup, NULL)) {
zend_hash_update(Z_ARRVAL_PP(ptr), hkey.str, hkey.len, (void *) test_ptr, sizeof(zval *), (void *) &ptr);
} else if (hkey.num) {
zend_hash_index_update(Z_ARRVAL_PP(ptr), hkey.num, (void *) test_ptr, sizeof(zval *), (void *) &ptr);
} else {
zend_hash_next_index_insert(Z_ARRVAL_PP(ptr), (void *) test_ptr, sizeof(zval *), (void *) &ptr);
}
break;
}
}
}
}
/* bubble up */
while (Z_TYPE_PP(ptr) == IS_ARRAY && SUCCESS == zend_hash_get_current_data(Z_ARRVAL_PP(ptr), (void *) &ptr));
*current_param = ptr;
}
| 1
|
79,459
|
static bool will_write_block(struct port *port)
{
bool ret;
if (!port->guest_connected) {
/* Port got hot-unplugged. Let's exit. */
return false;
}
if (!port->host_connected)
return true;
spin_lock_irq(&port->outvq_lock);
/*
* Check if the Host has consumed any buffers since we last
* sent data (this is only applicable for nonblocking ports).
*/
reclaim_consumed_buffers(port);
ret = port->outvq_full;
spin_unlock_irq(&port->outvq_lock);
return ret;
}
| 0
|
457,673
|
int __close_range(unsigned fd, unsigned max_fd, unsigned int flags)
{
unsigned int cur_max;
struct task_struct *me = current;
struct files_struct *cur_fds = me->files, *fds = NULL;
if (flags & ~CLOSE_RANGE_UNSHARE)
return -EINVAL;
if (fd > max_fd)
return -EINVAL;
rcu_read_lock();
cur_max = files_fdtable(cur_fds)->max_fds;
rcu_read_unlock();
/* cap to last valid index into fdtable */
cur_max--;
if (flags & CLOSE_RANGE_UNSHARE) {
int ret;
unsigned int max_unshare_fds = NR_OPEN_MAX;
/*
* If the requested range is greater than the current maximum,
* we're closing everything so only copy all file descriptors
* beneath the lowest file descriptor.
*/
if (max_fd >= cur_max)
max_unshare_fds = fd;
ret = unshare_fd(CLONE_FILES, max_unshare_fds, &fds);
if (ret)
return ret;
/*
* We used to share our file descriptor table, and have now
* created a private one, make sure we're using it below.
*/
if (fds)
swap(cur_fds, fds);
}
max_fd = min(max_fd, cur_max);
while (fd <= max_fd) {
struct file *file;
file = pick_file(cur_fds, fd++);
if (!file)
continue;
filp_close(file, cur_fds);
cond_resched();
}
if (fds) {
/*
* We're done closing the files we were supposed to. Time to install
* the new file descriptor table and drop the old one.
*/
task_lock(me);
me->files = cur_fds;
task_unlock(me);
put_files_struct(fds);
}
return 0;
}
| 0
|
177,352
|
SoftAACEncoder2::~SoftAACEncoder2() {
aacEncClose(&mAACEncoder);
onReset();
}
| 0
|
84,332
|
static void wmv2_idct_row(short * b)
{
int s1, s2;
int a0, a1, a2, a3, a4, a5, a6, a7;
/* step 1 */
a1 = W1 * b[1] + W7 * b[7];
a7 = W7 * b[1] - W1 * b[7];
a5 = W5 * b[5] + W3 * b[3];
a3 = W3 * b[5] - W5 * b[3];
a2 = W2 * b[2] + W6 * b[6];
a6 = W6 * b[2] - W2 * b[6];
a0 = W0 * b[0] + W0 * b[4];
a4 = W0 * b[0] - W0 * b[4];
/* step 2 */
s1 = (181 * (a1 - a5 + a7 - a3) + 128) >> 8; // 1, 3, 5, 7
s2 = (181 * (a1 - a5 - a7 + a3) + 128) >> 8;
/* step 3 */
b[0] = (a0 + a2 + a1 + a5 + (1 << 7)) >> 8;
b[1] = (a4 + a6 + s1 + (1 << 7)) >> 8;
b[2] = (a4 - a6 + s2 + (1 << 7)) >> 8;
b[3] = (a0 - a2 + a7 + a3 + (1 << 7)) >> 8;
b[4] = (a0 - a2 - a7 - a3 + (1 << 7)) >> 8;
b[5] = (a4 - a6 - s2 + (1 << 7)) >> 8;
b[6] = (a4 + a6 - s1 + (1 << 7)) >> 8;
b[7] = (a0 + a2 - a1 - a5 + (1 << 7)) >> 8;
}
| 1
|
418,274
|
void RGWListMultipart::execute()
{
map<string, bufferlist> xattrs;
string meta_oid;
RGWMPObj mp;
op_ret = get_params();
if (op_ret < 0)
return;
mp.init(s->object.name, upload_id);
meta_oid = mp.get_meta();
op_ret = get_multipart_info(store, s, meta_oid, &policy, xattrs);
if (op_ret < 0)
return;
op_ret = list_multipart_parts(store, s, upload_id, meta_oid, max_parts,
marker, parts, NULL, &truncated);
}
| 0
|
214,877
|
v8::Handle<v8::Value> V8ThrowException::throwException(v8::Handle<v8::Value> exception, v8::Isolate* isolate)
{
if (!v8::V8::IsExecutionTerminating())
isolate->ThrowException(exception);
return v8::Undefined(isolate);
}
| 0
|
429,081
|
ModuleExport void UnregisterHEICImage(void)
{
(void) UnregisterMagickInfo("HEIC");
}
| 0
|
40,478
|
void iwpvt_prng_destroy(struct iw_context *ctx, struct iw_prng *prng)
{
if(prng) iw_free(ctx,(void*)prng);
}
| 0
|
19,135
|
static inline void SetPixelMagenta ( const Image * restrict image , const Quantum magenta , Quantum * restrict pixel ) {
pixel [ image -> channel_map [ MagentaPixelChannel ] . offset ] = magenta ;
}
| 0
|
342,314
|
static int nvenc_find_free_reg_resource(AVCodecContext *avctx)
{
NvencContext *ctx = avctx->priv_data;
NvencDynLoadFunctions *dl_fn = &ctx->nvenc_dload_funcs;
NV_ENCODE_API_FUNCTION_LIST *p_nvenc = &dl_fn->nvenc_funcs;
int i;
if (ctx->nb_registered_frames == FF_ARRAY_ELEMS(ctx->registered_frames)) {
for (i = 0; i < ctx->nb_registered_frames; i++) {
if (!ctx->registered_frames[i].mapped) {
if (ctx->registered_frames[i].regptr) {
p_nvenc->nvEncUnregisterResource(ctx->nvencoder,
ctx->registered_frames[i].regptr);
ctx->registered_frames[i].regptr = NULL;
}
return i;
}
}
} else {
return ctx->nb_registered_frames++;
}
av_log(avctx, AV_LOG_ERROR, "Too many registered CUDA frames\n");
return AVERROR(ENOMEM);
}
| 0
|
206,547
|
const VisibleSelection& FrameSelection::ComputeVisibleSelectionInDOMTree()
const {
return selection_editor_->ComputeVisibleSelectionInDOMTree();
}
| 0
|
252,779
|
Tab* TabStrip::FindTabForEvent(const gfx::Point& point) {
if (touch_layout_.get()) {
int active_tab_index = touch_layout_->active_index();
if (active_tab_index != -1) {
Tab* tab = FindTabForEventFrom(point, active_tab_index, -1);
if (!tab)
tab = FindTabForEventFrom(point, active_tab_index + 1, 1);
return tab;
} else if (tab_count()) {
return FindTabForEventFrom(point, 0, 1);
}
} else {
for (int i = 0; i < tab_count(); ++i) {
if (IsPointInTab(tab_at(i), point))
return tab_at(i);
}
}
return NULL;
}
| 0
|
491,005
|
static RBinWasmElementEntry *parse_element_entry(RBinWasmObj *bin, ut64 bound, ut32 index) {
RBuffer *b = bin->buf;
RBinWasmElementEntry *elem = R_NEW0 (RBinWasmElementEntry);
if (elem) {
elem->sec_i = index;
elem->file_offset = r_buf_tell (b);
if (!consume_u32_r (b, bound, &elem->index)) {
goto beach;
}
if (!consume_init_expr_r (b, bound, R_BIN_WASM_END_OF_CODE, NULL)) {
goto beach;
}
if (!consume_u32_r (b, bound, &elem->num_elem)) {
goto beach;
}
ut32 j = 0;
while (r_buf_tell (b) <= bound && j < elem->num_elem) {
// TODO: allocate space and fill entry
if (!consume_u32_r (b, bound, NULL)) {
goto beach;
}
}
}
return elem;
beach:
free (elem);
return NULL;
}
| 0
|
301,965
|
int proc_nr_files(ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
files_stat.nr_files = get_nr_files();
return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
}
| 0
|
521,066
|
CHARSET_INFO *charset_for_protocol(void) const
{
return type_handler()->charset_for_protocol(this);
};
| 0
|
356,745
|
static int jpc_encrawrefpass(jpc_bitstream_t *out, int bitpos, int vcausalflag, jas_matrix_t *flags,
jas_matrix_t *data, int term, long *nmsedec)
{
int i;
int j;
int k;
int one;
int vscanlen;
int width;
int height;
int frowstep;
int drowstep;
int fstripestep;
int dstripestep;
jpc_fix_t *fstripestart;
jpc_fix_t *dstripestart;
jpc_fix_t *fvscanstart;
jpc_fix_t *dvscanstart;
jpc_fix_t *dp;
jpc_fix_t *fp;
*nmsedec = 0;
width = jas_matrix_numcols(data);
height = jas_matrix_numrows(data);
frowstep = jas_matrix_rowstep(flags);
drowstep = jas_matrix_rowstep(data);
fstripestep = frowstep << 2;
dstripestep = drowstep << 2;
one = 1 << (bitpos + JPC_NUMEXTRABITS);
fstripestart = jas_matrix_getref(flags, 1, 1);
dstripestart = jas_matrix_getref(data, 0, 0);
for (i = height; i > 0; i -= 4, fstripestart += fstripestep,
dstripestart += dstripestep) {
fvscanstart = fstripestart;
dvscanstart = dstripestart;
vscanlen = JAS_MIN(i, 4);
for (j = width; j > 0; --j, ++fvscanstart, ++dvscanstart) {
fp = fvscanstart;
dp = dvscanstart;
k = vscanlen;
rawrefpass_step(fp, dp, bitpos, one, nmsedec,
out, vcausalflag);
if (--k <= 0) {
continue;
}
fp += frowstep;
dp += drowstep;
rawrefpass_step(fp, dp, bitpos, one, nmsedec,
out, vcausalflag);
if (--k <= 0) {
continue;
}
fp += frowstep;
dp += drowstep;
rawrefpass_step(fp, dp, bitpos, one, nmsedec,
out, vcausalflag);
if (--k <= 0) {
continue;
}
fp += frowstep;
dp += drowstep;
rawrefpass_step(fp, dp, bitpos, one, nmsedec,
out, vcausalflag);
}
}
if (term) {
jpc_bitstream_outalign(out, 0x2a);
}
return 0;
}
| 0
|
417,924
|
void RGWRESTMgr::register_default_mgr(RGWRESTMgr *mgr)
{
delete default_mgr;
default_mgr = mgr;
}
| 0
|
144,255
|
batchNumMsgs(batch_t *pBatch) {
return pBatch->nElem;
}
| 0
|
187,095
|
void WebPageProxy::didPerformDragControllerAction(uint64_t resultOperation)
{
m_currentDragOperation = static_cast<DragOperation>(resultOperation);
}
| 0
|
200,250
|
xmlBufCreateSize(size_t size) {
xmlBufPtr ret;
ret = (xmlBufPtr) xmlMalloc(sizeof(xmlBuf));
if (ret == NULL) {
xmlBufMemoryError(NULL, "creating buffer");
return(NULL);
}
ret->compat_use = 0;
ret->use = 0;
ret->error = 0;
ret->buffer = NULL;
ret->alloc = xmlBufferAllocScheme;
ret->size = (size ? size+2 : 0); /* +1 for ending null */
ret->compat_size = (int) ret->size;
if (ret->size){
ret->content = (xmlChar *) xmlMallocAtomic(ret->size * sizeof(xmlChar));
if (ret->content == NULL) {
xmlBufMemoryError(ret, "creating buffer");
xmlFree(ret);
return(NULL);
}
ret->content[0] = 0;
} else
ret->content = NULL;
ret->contentIO = NULL;
return(ret);
}
| 0
|
52,114
|
srs_init(srs_t *srs)
{
memset(srs, 0, sizeof(srs_t));
srs->secrets = NULL;
srs->numsecrets = 0;
srs->separator = '=';
srs->maxage = 21;
srs->hashlength = 4;
srs->hashmin = srs->hashlength;
srs->alwaysrewrite = FALSE;
}
| 0
|
284,722
|
bool AccessibilityTreeContainsNodeWithName(BrowserAccessibility* node,
const std::string& name) {
if (node->GetStringAttribute(ax::mojom::StringAttribute::kName) == name)
return true;
for (unsigned i = 0; i < node->PlatformChildCount(); i++) {
if (AccessibilityTreeContainsNodeWithName(node->PlatformGetChild(i), name))
return true;
}
return false;
}
| 0
|
376,145
|
static inline void inet_frag_lru_del(struct inet_frag_queue *q)
{
spin_lock(&q->net->lru_lock);
list_del(&q->lru_list);
spin_unlock(&q->net->lru_lock);
}
| 0
|
211,643
|
error::Error GLES2DecoderPassthroughImpl::DoResumeTransformFeedback() {
api()->glResumeTransformFeedbackFn();
return error::kNoError;
}
| 0
|
311,763
|
static void dumpFrameScrollPosition(WKBundleFrameRef frame, StringBuilder& stringBuilder, FrameNamePolicy shouldIncludeFrameName = ShouldNotIncludeFrameName)
{
double x = numericWindowPropertyValue(frame, "pageXOffset");
double y = numericWindowPropertyValue(frame, "pageYOffset");
if (fabs(x) <= 0.00000001 && fabs(y) <= 0.00000001)
return;
if (shouldIncludeFrameName) {
WKRetainPtr<WKStringRef> name(AdoptWK, WKBundleFrameCopyName(frame));
stringBuilder.appendLiteral("frame '");
stringBuilder.append(toWTFString(name));
stringBuilder.appendLiteral("' ");
}
stringBuilder.appendLiteral("scrolled to ");
stringBuilder.append(WTF::String::number(x));
stringBuilder.append(',');
stringBuilder.append(WTF::String::number(y));
stringBuilder.append('\n');
}
| 0
|
70,571
|
ofpact_hdrs_equal(const struct ofpact_hdrs *a,
const struct ofpact_hdrs *b)
{
return (a->vendor == b->vendor
&& a->type == b->type
&& a->ofp_version == b->ofp_version);
}
| 0
|
428,926
|
static PHP_INI_MH(OnUpdateInputEncoding)
{
if (ZSTR_LEN(new_value) >= ICONV_CSNMAXLEN) {
return FAILURE;
}
if (stage & (PHP_INI_STAGE_ACTIVATE | PHP_INI_STAGE_RUNTIME)) {
php_error_docref("ref.iconv", E_DEPRECATED, "Use of iconv.input_encoding is deprecated");
}
OnUpdateString(entry, new_value, mh_arg1, mh_arg2, mh_arg3, stage);
return SUCCESS;
}
| 0
|
316,506
|
void ide_exec_cmd(IDEBus *bus, uint32_t val)
{
IDEState *s;
bool complete;
#if defined(DEBUG_IDE)
printf("ide: CMD=%02x\n", val);
#endif
s = idebus_active_if(bus);
/* ignore commands to non existent slave */
if (s != bus->ifs && !s->blk) {
return;
}
/* Only DEVICE RESET is allowed while BSY or/and DRQ are set */
if ((s->status & (BUSY_STAT|DRQ_STAT)) && val != WIN_DEVICE_RESET)
return;
if (!ide_cmd_permitted(s, val)) {
ide_abort_command(s);
ide_set_irq(s->bus);
return;
}
s->status = READY_STAT | BUSY_STAT;
s->error = 0;
s->io_buffer_offset = 0;
complete = ide_cmd_table[val].handler(s, val);
if (complete) {
s->status &= ~BUSY_STAT;
assert(!!s->error == !!(s->status & ERR_STAT));
if ((ide_cmd_table[val].flags & SET_DSC) && !s->error) {
s->status |= SEEK_STAT;
}
ide_cmd_done(s);
ide_set_irq(s->bus);
}
}
| 0
|
395,687
|
void manager_enumerate(Manager *m) {
UnitType c;
assert(m);
/* Let's ask every type to load all units from disk/kernel
* that it might know */
for (c = 0; c < _UNIT_TYPE_MAX; c++) {
if (!unit_type_supported(c)) {
log_debug("Unit type .%s is not supported on this system.", unit_type_to_string(c));
continue;
}
if (!unit_vtable[c]->enumerate)
continue;
unit_vtable[c]->enumerate(m);
}
manager_dispatch_load_queue(m);
}
| 0
|
9,907
|
static void JNI_WebApkUpdateManager_StoreWebApkUpdateRequestToFile(
JNIEnv* env,
const JavaParamRef<jstring>& java_update_request_path,
const JavaParamRef<jstring>& java_start_url,
const JavaParamRef<jstring>& java_scope,
const JavaParamRef<jstring>& java_name,
const JavaParamRef<jstring>& java_short_name,
const JavaParamRef<jstring>& java_primary_icon_url,
const JavaParamRef<jobject>& java_primary_icon_bitmap,
const JavaParamRef<jstring>& java_badge_icon_url,
const JavaParamRef<jobject>& java_badge_icon_bitmap,
const JavaParamRef<jobjectArray>& java_icon_urls,
const JavaParamRef<jobjectArray>& java_icon_hashes,
jint java_display_mode,
jint java_orientation,
jlong java_theme_color,
jlong java_background_color,
const JavaParamRef<jstring>& java_web_manifest_url,
const JavaParamRef<jstring>& java_webapk_package,
jint java_webapk_version,
jboolean java_is_manifest_stale,
jint java_update_reason,
const JavaParamRef<jobject>& java_callback) {
DCHECK_CURRENTLY_ON(content::BrowserThread::UI);
std::string update_request_path =
ConvertJavaStringToUTF8(env, java_update_request_path);
ShortcutInfo info(GURL(ConvertJavaStringToUTF8(env, java_start_url)));
info.scope = GURL(ConvertJavaStringToUTF8(env, java_scope));
info.name = ConvertJavaStringToUTF16(env, java_name);
info.short_name = ConvertJavaStringToUTF16(env, java_short_name);
info.user_title = info.short_name;
info.display = static_cast<blink::WebDisplayMode>(java_display_mode);
info.orientation =
static_cast<blink::WebScreenOrientationLockType>(java_orientation);
info.theme_color = (int64_t)java_theme_color;
info.background_color = (int64_t)java_background_color;
info.best_primary_icon_url =
GURL(ConvertJavaStringToUTF8(env, java_primary_icon_url));
info.best_badge_icon_url =
GURL(ConvertJavaStringToUTF8(env, java_badge_icon_url));
info.manifest_url = GURL(ConvertJavaStringToUTF8(env, java_web_manifest_url));
base::android::AppendJavaStringArrayToStringVector(env, java_icon_urls,
&info.icon_urls);
std::vector<std::string> icon_hashes;
base::android::AppendJavaStringArrayToStringVector(env, java_icon_hashes,
&icon_hashes);
std::map<std::string, std::string> icon_url_to_murmur2_hash;
for (size_t i = 0; i < info.icon_urls.size(); ++i)
icon_url_to_murmur2_hash[info.icon_urls[i]] = icon_hashes[i];
gfx::JavaBitmap java_primary_icon_bitmap_lock(java_primary_icon_bitmap);
SkBitmap primary_icon =
gfx::CreateSkBitmapFromJavaBitmap(java_primary_icon_bitmap_lock);
primary_icon.setImmutable();
SkBitmap badge_icon;
if (!java_badge_icon_bitmap.is_null()) {
gfx::JavaBitmap java_badge_icon_bitmap_lock(java_badge_icon_bitmap);
gfx::CreateSkBitmapFromJavaBitmap(java_badge_icon_bitmap_lock);
badge_icon.setImmutable();
}
std::string webapk_package;
ConvertJavaStringToUTF8(env, java_webapk_package, &webapk_package);
WebApkUpdateReason update_reason =
static_cast<WebApkUpdateReason>(java_update_reason);
WebApkInstaller::StoreUpdateRequestToFile(
base::FilePath(update_request_path), info, primary_icon, badge_icon,
webapk_package, std::to_string(java_webapk_version),
icon_url_to_murmur2_hash, java_is_manifest_stale, update_reason,
base::BindOnce(&base::android::RunBooleanCallbackAndroid,
ScopedJavaGlobalRef<jobject>(java_callback)));
}
| 1
|
356,146
|
generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
unsigned long *nr_segs, loff_t pos, loff_t *ppos,
size_t count, size_t ocount)
{
struct file *file = iocb->ki_filp;
struct address_space *mapping = file->f_mapping;
struct inode *inode = mapping->host;
ssize_t written;
size_t write_len;
pgoff_t end;
if (count != ocount)
*nr_segs = iov_shorten((struct iovec *)iov, *nr_segs, count);
/*
* Unmap all mmappings of the file up-front.
*
* This will cause any pte dirty bits to be propagated into the
* pageframes for the subsequent filemap_write_and_wait().
*/
write_len = iov_length(iov, *nr_segs);
end = (pos + write_len - 1) >> PAGE_CACHE_SHIFT;
if (mapping_mapped(mapping))
unmap_mapping_range(mapping, pos, write_len, 0);
written = filemap_write_and_wait(mapping);
if (written)
goto out;
/*
* After a write we want buffered reads to be sure to go to disk to get
* the new data. We invalidate clean cached page from the region we're
* about to write. We do this *before* the write so that we can return
* -EIO without clobbering -EIOCBQUEUED from ->direct_IO().
*/
if (mapping->nrpages) {
written = invalidate_inode_pages2_range(mapping,
pos >> PAGE_CACHE_SHIFT, end);
if (written)
goto out;
}
written = mapping->a_ops->direct_IO(WRITE, iocb, iov, pos, *nr_segs);
/*
* Finally, try again to invalidate clean pages which might have been
* cached by non-direct readahead, or faulted in by get_user_pages()
* if the source of the write was an mmap'ed region of the file
* we're writing. Either one is a pretty crazy thing to do,
* so we don't support it 100%. If this invalidation
* fails, tough, the write still worked...
*/
if (mapping->nrpages) {
invalidate_inode_pages2_range(mapping,
pos >> PAGE_CACHE_SHIFT, end);
}
if (written > 0) {
loff_t end = pos + written;
if (end > i_size_read(inode) && !S_ISBLK(inode->i_mode)) {
i_size_write(inode, end);
mark_inode_dirty(inode);
}
*ppos = end;
}
/*
* Sync the fs metadata but not the minor inode changes and
* of course not the data as we did direct DMA for the IO.
* i_mutex is held, which protects generic_osync_inode() from
* livelocking. AIO O_DIRECT ops attempt to sync metadata here.
*/
out:
if ((written >= 0 || written == -EIOCBQUEUED) &&
((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
int err = generic_osync_inode(inode, mapping, OSYNC_METADATA);
if (err < 0)
written = err;
}
return written;
}
| 0
|
496,378
|
CURLcode Curl_ossl_verifyhost(struct Curl_easy *data, struct connectdata *conn,
X509 *server_cert)
{
bool matched = FALSE;
int target = GEN_DNS; /* target type, GEN_DNS or GEN_IPADD */
size_t addrlen = 0;
STACK_OF(GENERAL_NAME) *altnames;
#ifdef ENABLE_IPV6
struct in6_addr addr;
#else
struct in_addr addr;
#endif
CURLcode result = CURLE_OK;
bool dNSName = FALSE; /* if a dNSName field exists in the cert */
bool iPAddress = FALSE; /* if a iPAddress field exists in the cert */
const char * const hostname = SSL_HOST_NAME();
const char * const dispname = SSL_HOST_DISPNAME();
size_t hostlen = strlen(hostname);
#ifdef ENABLE_IPV6
if(conn->bits.ipv6_ip &&
Curl_inet_pton(AF_INET6, hostname, &addr)) {
target = GEN_IPADD;
addrlen = sizeof(struct in6_addr);
}
else
#endif
if(Curl_inet_pton(AF_INET, hostname, &addr)) {
target = GEN_IPADD;
addrlen = sizeof(struct in_addr);
}
/* get a "list" of alternative names */
altnames = X509_get_ext_d2i(server_cert, NID_subject_alt_name, NULL, NULL);
if(altnames) {
#ifdef OPENSSL_IS_BORINGSSL
size_t numalts;
size_t i;
#else
int numalts;
int i;
#endif
bool dnsmatched = FALSE;
bool ipmatched = FALSE;
/* get amount of alternatives, RFC2459 claims there MUST be at least
one, but we don't depend on it... */
numalts = sk_GENERAL_NAME_num(altnames);
/* loop through all alternatives - until a dnsmatch */
for(i = 0; (i < numalts) && !dnsmatched; i++) {
/* get a handle to alternative name number i */
const GENERAL_NAME *check = sk_GENERAL_NAME_value(altnames, i);
if(check->type == GEN_DNS)
dNSName = TRUE;
else if(check->type == GEN_IPADD)
iPAddress = TRUE;
/* only check alternatives of the same type the target is */
if(check->type == target) {
/* get data and length */
const char *altptr = (char *)ASN1_STRING_get0_data(check->d.ia5);
size_t altlen = (size_t) ASN1_STRING_length(check->d.ia5);
switch(target) {
case GEN_DNS: /* name/pattern comparison */
/* The OpenSSL man page explicitly says: "In general it cannot be
assumed that the data returned by ASN1_STRING_data() is null
terminated or does not contain embedded nulls." But also that
"The actual format of the data will depend on the actual string
type itself: for example for an IA5String the data will be ASCII"
It has been however verified that in 0.9.6 and 0.9.7, IA5String
is always null-terminated.
*/
if((altlen == strlen(altptr)) &&
/* if this isn't true, there was an embedded zero in the name
string and we cannot match it. */
subj_alt_hostcheck(data,
altptr,
altlen, hostname, hostlen, dispname)) {
dnsmatched = TRUE;
}
break;
case GEN_IPADD: /* IP address comparison */
/* compare alternative IP address if the data chunk is the same size
our server IP address is */
if((altlen == addrlen) && !memcmp(altptr, &addr, altlen)) {
ipmatched = TRUE;
infof(data,
" subjectAltName: host \"%s\" matched cert's IP address!",
dispname);
}
break;
}
}
}
GENERAL_NAMES_free(altnames);
if(dnsmatched || ipmatched)
matched = TRUE;
}
if(matched)
/* an alternative name matched */
;
else if(dNSName || iPAddress) {
infof(data, " subjectAltName does not match %s", dispname);
failf(data, "SSL: no alternative certificate subject name matches "
"target host name '%s'", dispname);
result = CURLE_PEER_FAILED_VERIFICATION;
}
else {
/* we have to look to the last occurrence of a commonName in the
distinguished one to get the most significant one. */
int i = -1;
unsigned char *peer_CN = NULL;
int peerlen = 0;
/* The following is done because of a bug in 0.9.6b */
X509_NAME *name = X509_get_subject_name(server_cert);
if(name) {
int j;
while((j = X509_NAME_get_index_by_NID(name, NID_commonName, i)) >= 0)
i = j;
}
/* we have the name entry and we will now convert this to a string
that we can use for comparison. Doing this we support BMPstring,
UTF8, etc. */
if(i >= 0) {
ASN1_STRING *tmp =
X509_NAME_ENTRY_get_data(X509_NAME_get_entry(name, i));
/* In OpenSSL 0.9.7d and earlier, ASN1_STRING_to_UTF8 fails if the input
is already UTF-8 encoded. We check for this case and copy the raw
string manually to avoid the problem. This code can be made
conditional in the future when OpenSSL has been fixed. */
if(tmp) {
if(ASN1_STRING_type(tmp) == V_ASN1_UTF8STRING) {
peerlen = ASN1_STRING_length(tmp);
if(peerlen >= 0) {
peer_CN = OPENSSL_malloc(peerlen + 1);
if(peer_CN) {
memcpy(peer_CN, ASN1_STRING_get0_data(tmp), peerlen);
peer_CN[peerlen] = '\0';
}
else
result = CURLE_OUT_OF_MEMORY;
}
}
else /* not a UTF8 name */
peerlen = ASN1_STRING_to_UTF8(&peer_CN, tmp);
if(peer_CN && (curlx_uztosi(strlen((char *)peer_CN)) != peerlen)) {
/* there was a terminating zero before the end of string, this
cannot match and we return failure! */
failf(data, "SSL: illegal cert name field");
result = CURLE_PEER_FAILED_VERIFICATION;
}
}
}
if(result)
/* error already detected, pass through */
;
else if(!peer_CN) {
failf(data,
"SSL: unable to obtain common name from peer certificate");
result = CURLE_PEER_FAILED_VERIFICATION;
}
else if(!Curl_cert_hostcheck((const char *)peer_CN,
peerlen, hostname, hostlen)) {
failf(data, "SSL: certificate subject name '%s' does not match "
"target host name '%s'", peer_CN, dispname);
result = CURLE_PEER_FAILED_VERIFICATION;
}
else {
infof(data, " common name: %s (matched)", peer_CN);
}
if(peer_CN)
OPENSSL_free(peer_CN);
}
return result;
}
| 0
|
253,310
|
void PrintPreviewDialogController::SaveInitiatorTitle(
WebContents* preview_dialog) {
WebContents* initiator = GetInitiator(preview_dialog);
if (initiator && preview_dialog->GetWebUI()) {
PrintPreviewUI* print_preview_ui = static_cast<PrintPreviewUI*>(
preview_dialog->GetWebUI()->GetController());
print_preview_ui->SetInitiatorTitle(
PrintViewManager::FromWebContents(initiator)->RenderSourceName());
}
}
| 0
|
490,852
|
static pid_t do_cmd(char *cmd, char *machine, char *user, char **remote_argv, int remote_argc,
int *f_in_p, int *f_out_p)
{
int i, argc = 0;
char *args[MAX_ARGS], *need_to_free = NULL;
pid_t pid;
int dash_l_set = 0;
if (!read_batch && !local_server) {
char *t, *f, in_quote = '\0';
char *rsh_env = getenv(RSYNC_RSH_ENV);
if (!cmd)
cmd = rsh_env;
if (!cmd)
cmd = RSYNC_RSH;
cmd = need_to_free = strdup(cmd);
for (t = f = cmd; *f; f++) {
if (*f == ' ')
continue;
/* Comparison leaves rooms for server_options(). */
if (argc >= MAX_ARGS - MAX_SERVER_ARGS)
goto arg_overflow;
args[argc++] = t;
while (*f != ' ' || in_quote) {
if (!*f) {
if (in_quote) {
rprintf(FERROR,
"Missing trailing-%c in remote-shell command.\n",
in_quote);
exit_cleanup(RERR_SYNTAX);
}
f--;
break;
}
if (*f == '\'' || *f == '"') {
if (!in_quote) {
in_quote = *f++;
continue;
}
if (*f == in_quote && *++f != in_quote) {
in_quote = '\0';
continue;
}
}
*t++ = *f++;
}
*t++ = '\0';
}
/* NOTE: must preserve t == start of command name until the end of the args handling! */
if ((t = strrchr(cmd, '/')) != NULL)
t++;
else
t = cmd;
/* Check to see if we've already been given '-l user' in the remote-shell command. */
for (i = 0; i < argc-1; i++) {
if (!strcmp(args[i], "-l") && args[i+1][0] != '-')
dash_l_set = 1;
}
#ifdef HAVE_REMSH
/* remsh (on HPUX) takes the arguments the other way around */
args[argc++] = machine;
if (user && !(daemon_connection && dash_l_set)) {
args[argc++] = "-l";
args[argc++] = user;
}
#else
if (user && !(daemon_connection && dash_l_set)) {
args[argc++] = "-l";
args[argc++] = user;
}
#ifdef AF_INET
if (default_af_hint == AF_INET && strcmp(t, "ssh") == 0)
args[argc++] = "-4"; /* we're using ssh so we can add a -4 option */
#endif
#ifdef AF_INET6
if (default_af_hint == AF_INET6 && strcmp(t, "ssh") == 0)
args[argc++] = "-6"; /* we're using ssh so we can add a -6 option */
#endif
args[argc++] = machine;
#endif
args[argc++] = rsync_path;
if (blocking_io < 0 && (strcmp(t, "rsh") == 0 || strcmp(t, "remsh") == 0))
blocking_io = 1;
if (daemon_connection > 0) {
args[argc++] = "--server";
args[argc++] = "--daemon";
} else
server_options(args, &argc);
if (argc >= MAX_ARGS - 2)
goto arg_overflow;
}
args[argc++] = ".";
if (!daemon_connection) {
while (remote_argc > 0) {
if (argc >= MAX_ARGS - 1) {
arg_overflow:
rprintf(FERROR, "internal: args[] overflowed in do_cmd()\n");
exit_cleanup(RERR_SYNTAX);
}
args[argc++] = safe_arg(NULL, *remote_argv++);
remote_argc--;
}
}
args[argc] = NULL;
if (DEBUG_GTE(CMD, 2)) {
for (i = 0; i < argc; i++)
rprintf(FCLIENT, "cmd[%d]=%s ", i, args[i]);
rprintf(FCLIENT, "\n");
}
if (read_batch) {
int from_gen_pipe[2];
set_allow_inc_recurse();
if (fd_pair(from_gen_pipe) < 0) {
rsyserr(FERROR, errno, "pipe");
exit_cleanup(RERR_IPC);
}
batch_gen_fd = from_gen_pipe[0];
*f_out_p = from_gen_pipe[1];
*f_in_p = batch_fd;
pid = (pid_t)-1; /* no child pid */
#ifdef ICONV_CONST
setup_iconv();
#endif
trust_sender_filter = 1;
} else if (local_server) {
/* If the user didn't request --[no-]whole-file, force
* it on, but only if we're not batch processing. */
if (whole_file < 0 && !write_batch)
whole_file = 1;
set_allow_inc_recurse();
pid = local_child(argc, args, f_in_p, f_out_p, child_main);
#ifdef ICONV_CONST
setup_iconv();
#endif
} else {
pid = piped_child(args, f_in_p, f_out_p);
#ifdef ICONV_CONST
setup_iconv();
#endif
if (protect_args && !daemon_connection)
send_protected_args(*f_out_p, args);
}
if (need_to_free)
free(need_to_free);
return pid;
}
| 0
|
164,910
|
void HTMLElement::insertAdjacentHTML(const String& where, const String& markup, ExceptionCode& ec)
{
Element* contextElement = contextElementForInsertion(where, this, ec);
if (!contextElement)
return;
ExceptionCode ignoredEc = 0; // FIXME: We should propagate a syntax error exception out here.
RefPtr<DocumentFragment> fragment = createFragmentForInnerOuterHTML(markup, this, ignoredEc);
if (ignoredEc)
return;
insertAdjacent(where, fragment.get(), ec);
}
| 0
|
402,876
|
bool is_reference(t_field* tfield) { return tfield->get_reference(); }
| 0
|
371,037
|
int LibRaw::raw2image_ex(void)
{
CHECK_ORDER_LOW(LIBRAW_PROGRESS_LOAD_RAW);
raw2image_start();
// process cropping
int do_crop = 0;
unsigned save_filters = imgdata.idata.filters;
unsigned save_width = S.width;
if (~O.cropbox[2] && ~O.cropbox[3])
{
int crop[4],c,filt;
for(int c=0;c<4;c++)
{
crop[c] = O.cropbox[c];
if(crop[c]<0)
crop[c]=0;
}
if(IO.fwidth)
{
crop[0] = (crop[0]/4)*4;
crop[1] = (crop[1]/4)*4;
}
do_crop = 1;
crop[2] = MIN (crop[2], (signed) S.width-crop[0]);
crop[3] = MIN (crop[3], (signed) S.height-crop[1]);
if (crop[2] <= 0 || crop[3] <= 0)
throw LIBRAW_EXCEPTION_BAD_CROP;
// adjust sizes!
S.left_margin+=crop[0];
S.top_margin+=crop[1];
S.width=crop[2];
S.height=crop[3];
S.iheight = (S.height + IO.shrink) >> IO.shrink;
S.iwidth = (S.width + IO.shrink) >> IO.shrink;
if(!IO.fwidth && imgdata.idata.filters)
{
for (filt=c=0; c < 16; c++)
filt |= FC((c >> 1)+(crop[1]),
(c & 1)+(crop[0])) << c*2;
imgdata.idata.filters = filt;
}
}
if(IO.fwidth)
{
ushort fiwidth,fiheight;
if(do_crop)
{
IO.fuji_width = S.width >> !libraw_internal_data.unpacker_data.fuji_layout;
IO.fwidth = (S.height >> libraw_internal_data.unpacker_data.fuji_layout) + IO.fuji_width;
IO.fheight = IO.fwidth - 1;
}
fiheight = (IO.fheight + IO.shrink) >> IO.shrink;
fiwidth = (IO.fwidth + IO.shrink) >> IO.shrink;
if(imgdata.image)
{
imgdata.image = (ushort (*)[4])realloc(imgdata.image,fiheight*fiwidth*sizeof (*imgdata.image));
memset(imgdata.image,0,fiheight*fiwidth *sizeof (*imgdata.image));
}
else
imgdata.image = (ushort (*)[4]) calloc (fiheight*fiwidth, sizeof (*imgdata.image));
merror (imgdata.image, "raw2image_ex()");
int cblk[4],i;
for(i=0;i<4;i++)
cblk[i] = C.cblack[i]+C.black;
ZERO(C.channel_maximum);
int row,col;
for(row=0;row<S.height;row++)
{
for(col=0;col<S.width;col++)
{
int r,c;
if (libraw_internal_data.unpacker_data.fuji_layout) {
r = IO.fuji_width - 1 - col + (row >> 1);
c = col + ((row+1) >> 1);
} else {
r = IO.fuji_width - 1 + row - (col >> 1);
c = row + ((col+1) >> 1);
}
int val = imgdata.rawdata.raw_image[(row+S.top_margin)*S.raw_width
+(col+S.left_margin)];
int cc = FCF(row,col);
if(val > cblk[cc])
val -= cblk[cc];
else
val = 0;
imgdata.image[((r) >> IO.shrink)*fiwidth + ((c) >> IO.shrink)][cc] = val;
if(C.channel_maximum[cc] < val) C.channel_maximum[cc] = val;
}
}
C.maximum -= C.black;
ZERO(C.cblack);
C.black = 0;
// restore fuji sizes!
S.height = IO.fheight;
S.width = IO.fwidth;
S.iheight = (S.height + IO.shrink) >> IO.shrink;
S.iwidth = (S.width + IO.shrink) >> IO.shrink;
S.raw_height -= 2*S.top_margin;
}
else
{
if(imgdata.image)
{
imgdata.image = (ushort (*)[4]) realloc (imgdata.image,S.iheight*S.iwidth
*sizeof (*imgdata.image));
memset(imgdata.image,0,S.iheight*S.iwidth *sizeof (*imgdata.image));
}
else
imgdata.image = (ushort (*)[4]) calloc (S.iheight*S.iwidth, sizeof (*imgdata.image));
merror (imgdata.image, "raw2image_ex()");
libraw_decoder_info_t decoder_info;
get_decoder_info(&decoder_info);
if(decoder_info.decoder_flags & LIBRAW_DECODER_FLATFIELD)
{
if(decoder_info.decoder_flags & LIBRAW_DECODER_USEBAYER2)
#if defined(LIBRAW_USE_OPENMP)
#pragma omp parallel for default(shared)
#endif
for(int row = 0; row < S.height; row++)
for(int col = 0; col < S.width; col++)
imgdata.image[(row >> IO.shrink)*S.iwidth + (col>>IO.shrink)][fc(row,col)]
= imgdata.rawdata.raw_image[(row+S.top_margin)*S.raw_width
+(col+S.left_margin)];
else
#if defined(LIBRAW_USE_OPENMP)
#pragma omp parallel for default(shared)
#endif
for(int row = 0; row < S.height; row++)
{
int colors[2];
for (int xx=0;xx<2;xx++)
colors[xx] = COLOR(row,xx);
for(int col = 0; col < S.width; col++)
{
int cc = colors[col&1];
imgdata.image[(row >> IO.shrink)*S.iwidth + (col>>IO.shrink)][cc] =
imgdata.rawdata.raw_image[(row+S.top_margin)*S.raw_width
+(col+S.left_margin)];
}
}
}
else if (decoder_info.decoder_flags & LIBRAW_DECODER_4COMPONENT)
{
#define FC0(row,col) (save_filters >> ((((row) << 1 & 14) + ((col) & 1)) << 1) & 3)
if(IO.shrink)
#if defined(LIBRAW_USE_OPENMP)
#pragma omp parallel for default(shared)
#endif
for(int row = 0; row < S.height; row++)
for(int col = 0; col < S.width; col++)
imgdata.image[(row >> IO.shrink)*S.iwidth + (col>>IO.shrink)][FC(row,col)]
= imgdata.rawdata.color_image[(row+S.top_margin)*S.raw_width
+S.left_margin+col]
[FC0(row+S.top_margin,col+S.left_margin)];
#undef FC0
else
#if defined(LIBRAW_USE_OPENMP)
#pragma omp parallel for default(shared)
#endif
for(int row = 0; row < S.height; row++)
memmove(&imgdata.image[row*S.width],
&imgdata.rawdata.color_image[(row+S.top_margin)*S.raw_width+S.left_margin],
S.width*sizeof(*imgdata.image));
}
else if(decoder_info.decoder_flags & LIBRAW_DECODER_LEGACY)
{
if(do_crop)
#if defined(LIBRAW_USE_OPENMP)
#pragma omp parallel for default(shared)
#endif
for(int row = 0; row < S.height; row++)
memmove(&imgdata.image[row*S.width],
&imgdata.rawdata.color_image[(row+S.top_margin)*save_width+S.left_margin],
S.width*sizeof(*imgdata.image));
else
memmove(imgdata.image,imgdata.rawdata.color_image,
S.width*S.height*sizeof(*imgdata.image));
}
if(imgdata.rawdata.use_ph1_correct) // Phase one unpacked!
phase_one_correct();
}
return LIBRAW_SUCCESS;
}
| 0
|
210,877
|
static zval *php_zip_get_property_ptr_ptr(zval *object, zval *member, int type, void **cache_slot) /* {{{ */
{
ze_zip_object *obj;
zval tmp_member;
zval *retval = NULL;
zip_prop_handler *hnd = NULL;
zend_object_handlers *std_hnd;
if (Z_TYPE_P(member) != IS_STRING) {
ZVAL_COPY(&tmp_member, member);
convert_to_string(&tmp_member);
member = &tmp_member;
cache_slot = NULL;
}
obj = Z_ZIP_P(object);
if (obj->prop_handler != NULL) {
hnd = zend_hash_find_ptr(obj->prop_handler, Z_STR_P(member));
}
if (hnd == NULL) {
std_hnd = zend_get_std_object_handlers();
retval = std_hnd->get_property_ptr_ptr(object, member, type, cache_slot);
}
if (member == &tmp_member) {
zval_dtor(member);
}
return retval;
}
/* }}} */
| 0
|
49,037
|
int64_t NgxEspRequest::GetGrpcResponseBytes() {
ngx_esp_request_ctx_t *ctx = ngx_http_esp_get_module_ctx(r_);
return ctx->grpc_response_bytes;
}
| 0
|
125,047
|
static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
struct kvm_msr_entry *entries,
int (*do_msr)(struct kvm_vcpu *vcpu,
unsigned index, u64 *data))
{
int i, idx;
idx = srcu_read_lock(&vcpu->kvm->srcu);
for (i = 0; i < msrs->nmsrs; ++i)
if (do_msr(vcpu, entries[i].index, &entries[i].data))
break;
srcu_read_unlock(&vcpu->kvm->srcu, idx);
return i;
}
| 0
|
381,978
|
bm_delta2_search (char const **tpp, char const *ep, char const *sp, int len,
char const *trans, char gc1, char gc2,
unsigned char const *d1, kwset_t kwset)
{
char const *tp = *tpp;
int d = len, skip = 0;
while (true)
{
int i = 2;
if (tr (trans, tp[-2]) == gc2)
{
while (++i <= d)
if (tr (trans, tp[-i]) != tr (trans, sp[-i]))
break;
if (i > d)
{
for (i = d + skip + 1; i <= len; ++i)
if (tr (trans, tp[-i]) != tr (trans, sp[-i]))
break;
if (i > len)
{
*tpp = tp - len;
return true;
}
}
}
tp += d = kwset->shift[i - 2];
if (tp > ep)
break;
if (tr (trans, tp[-1]) != gc1)
{
if (d1)
tp += d1[U(tp[-1])];
break;
}
skip = i - 1;
}
*tpp = tp;
return false;
}
| 0
|
389,305
|
NCR_ModifyMaxdelaydevratio(NCR_Instance inst, double new_max_delay_dev_ratio)
{
inst->max_delay_dev_ratio = new_max_delay_dev_ratio;
LOG(LOGS_INFO, LOGF_NtpCore, "Source %s new max delay dev ratio %f",
UTI_IPToString(&inst->remote_addr.ip_addr), new_max_delay_dev_ratio);
}
| 0
|
418,271
|
virtual void pre_exec() {}
| 0
|
324,168
|
static int gxf_packet(AVFormatContext *s, AVPacket *pkt) {
ByteIOContext *pb = s->pb;
pkt_type_t pkt_type;
int pkt_len;
while (!url_feof(pb)) {
int track_type, track_id, ret;
int field_nr;
if (!parse_packet_header(pb, &pkt_type, &pkt_len)) {
if (!url_feof(pb))
av_log(s, AV_LOG_ERROR, "GXF: sync lost\n");
return -1;
}
if (pkt_type == PKT_FLT) {
gxf_read_index(s, pkt_len);
continue;
}
if (pkt_type != PKT_MEDIA) {
url_fskip(pb, pkt_len);
continue;
}
if (pkt_len < 16) {
av_log(s, AV_LOG_ERROR, "GXF: invalid media packet length\n");
continue;
}
pkt_len -= 16;
track_type = get_byte(pb);
track_id = get_byte(pb);
field_nr = get_be32(pb);
get_be32(pb); // field information
get_be32(pb); // "timeline" field number
get_byte(pb); // flags
get_byte(pb); // reserved
// NOTE: there is also data length information in the
// field information, it might be better to take this into account
// as well.
ret = av_get_packet(pb, pkt, pkt_len);
pkt->stream_index = get_sindex(s, track_id, track_type);
pkt->dts = field_nr;
return ret;
}
return AVERROR(EIO);
}
| 1
|
318,854
|
static inline bool migration_bitmap_test_and_reset_dirty(MemoryRegion *mr,
ram_addr_t offset)
{
bool ret;
int nr = (mr->ram_addr + offset) >> TARGET_PAGE_BITS;
ret = test_and_clear_bit(nr, migration_bitmap);
if (ret) {
migration_dirty_pages--;
}
return ret;
}
| 0
|
59,434
|
void pcre_reinit() {
PCRECache::CacheKind kind;
if (RuntimeOption::EvalPCRECacheType == "static") {
kind = PCRECache::CacheKind::Static;
} else if (RuntimeOption::EvalPCRECacheType == "lru") {
kind = PCRECache::CacheKind::Lru;
} else if (RuntimeOption::EvalPCRECacheType == "scalable") {
kind = PCRECache::CacheKind::Scalable;
} else {
Logger::Warning("Eval.PCRECacheType should be either static, "
"lru or scalable");
kind = PCRECache::CacheKind::Scalable;
}
s_pcreCache.reinit(kind);
}
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.