idx
int64 | func
string | target
int64 |
|---|---|---|
468,381
|
g_socket_client_init (GSocketClient *client)
{
client->priv = g_socket_client_get_instance_private (client);
client->priv->type = G_SOCKET_TYPE_STREAM;
client->priv->app_proxies = g_hash_table_new_full (g_str_hash,
g_str_equal,
g_free,
NULL);
}
| 0
|
224,762
|
void iref_box_del(GF_Box *s)
{
GF_ItemReferenceBox *ptr = (GF_ItemReferenceBox *)s;
if (ptr == NULL) return;
gf_list_del(ptr->references);
gf_free(ptr);
}
| 0
|
220,928
|
static void mpgviddmx_finalize(GF_Filter *filter)
{
GF_MPGVidDmxCtx *ctx = gf_filter_get_udta(filter);
if (ctx->bs) gf_bs_del(ctx->bs);
if (ctx->vparser) gf_m4v_parser_del_no_bs(ctx->vparser);
if (ctx->indexes) gf_free(ctx->indexes);
if (ctx->hdr_store) gf_free(ctx->hdr_store);
if (ctx->pck_queue) {
while (gf_list_count(ctx->pck_queue)) {
GF_FilterPacket *pck = gf_list_pop_back(ctx->pck_queue);
gf_filter_pck_discard(pck);
}
gf_list_del(ctx->pck_queue);
}
if (ctx->src_pck) gf_filter_pck_unref(ctx->src_pck);
if (ctx->importer) {
GF_LOG(GF_LOG_INFO, GF_LOG_AUTHOR, ("%s Import results: %d VOPs (%d Is - %d Ps - %d Bs)\n", ctx->is_mpg12 ? "MPEG-1/2" : "MPEG-4 (Part 2)", ctx->nb_frames, ctx->nb_i, ctx->nb_p, ctx->nb_b));
if (ctx->nb_b) {
GF_LOG(GF_LOG_INFO, GF_LOG_AUTHOR, ("\t%d max consecutive B-frames%s\n", ctx->max_b, ctx->is_packed ? " - packed bitstream" : "" ));
}
if (ctx->is_vfr && ctx->nb_b && ctx->is_packed) {
GF_LOG(GF_LOG_INFO, GF_LOG_AUTHOR, ("Warning: Mix of non-coded frames: packed bitstream and encoder skiped - unpredictable timing\n"));
}
}
}
| 0
|
359,368
|
DEFUN (show_bgp_summary,
show_bgp_summary_cmd,
"show bgp summary",
SHOW_STR
BGP_STR
"Summary of BGP neighbor status\n")
{
return bgp_show_summary_vty (vty, NULL, AFI_IP6, SAFI_UNICAST);
}
| 0
|
275,958
|
static uECC_word_t regularize_k(const uECC_word_t * const k,
uECC_word_t *k0,
uECC_word_t *k1,
uECC_Curve curve) {
wordcount_t num_n_words = BITS_TO_WORDS(curve->num_n_bits);
bitcount_t num_n_bits = curve->num_n_bits;
uECC_word_t carry = uECC_vli_add(k0, k, curve->n, num_n_words) ||
(num_n_bits < ((bitcount_t)num_n_words * uECC_WORD_SIZE * 8) &&
uECC_vli_testBit(k0, num_n_bits));
uECC_vli_add(k1, k0, curve->n, num_n_words);
return carry;
}
| 0
|
208,983
|
jas_image_t *jp2_decode(jas_stream_t *in, char *optstr)
{
jp2_box_t *box;
int found;
jas_image_t *image;
jp2_dec_t *dec;
bool samedtype;
int dtype;
unsigned int i;
jp2_cmap_t *cmapd;
jp2_pclr_t *pclrd;
jp2_cdef_t *cdefd;
unsigned int channo;
int newcmptno;
int_fast32_t *lutents;
#if 0
jp2_cdefchan_t *cdefent;
int cmptno;
#endif
jp2_cmapent_t *cmapent;
jas_icchdr_t icchdr;
jas_iccprof_t *iccprof;
dec = 0;
box = 0;
image = 0;
if (!(dec = jp2_dec_create())) {
goto error;
}
/* Get the first box. This should be a JP box. */
if (!(box = jp2_box_get(in))) {
jas_eprintf("error: cannot get box\n");
goto error;
}
if (box->type != JP2_BOX_JP) {
jas_eprintf("error: expecting signature box\n");
goto error;
}
if (box->data.jp.magic != JP2_JP_MAGIC) {
jas_eprintf("incorrect magic number\n");
goto error;
}
jp2_box_destroy(box);
box = 0;
/* Get the second box. This should be a FTYP box. */
if (!(box = jp2_box_get(in))) {
goto error;
}
if (box->type != JP2_BOX_FTYP) {
jas_eprintf("expecting file type box\n");
goto error;
}
jp2_box_destroy(box);
box = 0;
/* Get more boxes... */
found = 0;
while ((box = jp2_box_get(in))) {
if (jas_getdbglevel() >= 1) {
jas_eprintf("box type %s\n", box->info->name);
}
switch (box->type) {
case JP2_BOX_JP2C:
found = 1;
break;
case JP2_BOX_IHDR:
if (!dec->ihdr) {
dec->ihdr = box;
box = 0;
}
break;
case JP2_BOX_BPCC:
if (!dec->bpcc) {
dec->bpcc = box;
box = 0;
}
break;
case JP2_BOX_CDEF:
if (!dec->cdef) {
dec->cdef = box;
box = 0;
}
break;
case JP2_BOX_PCLR:
if (!dec->pclr) {
dec->pclr = box;
box = 0;
}
break;
case JP2_BOX_CMAP:
if (!dec->cmap) {
dec->cmap = box;
box = 0;
}
break;
case JP2_BOX_COLR:
if (!dec->colr) {
dec->colr = box;
box = 0;
}
break;
}
if (box) {
jp2_box_destroy(box);
box = 0;
}
if (found) {
break;
}
}
if (!found) {
jas_eprintf("error: no code stream found\n");
goto error;
}
if (!(dec->image = jpc_decode(in, optstr))) {
jas_eprintf("error: cannot decode code stream\n");
goto error;
}
/* An IHDR box must be present. */
if (!dec->ihdr) {
jas_eprintf("error: missing IHDR box\n");
goto error;
}
/* Does the number of components indicated in the IHDR box match
the value specified in the code stream? */
if (dec->ihdr->data.ihdr.numcmpts != JAS_CAST(uint, jas_image_numcmpts(dec->image))) {
jas_eprintf("warning: number of components mismatch\n");
}
/* At least one component must be present. */
if (!jas_image_numcmpts(dec->image)) {
jas_eprintf("error: no components\n");
goto error;
}
/* Determine if all components have the same data type. */
samedtype = true;
dtype = jas_image_cmptdtype(dec->image, 0);
for (i = 1; i < JAS_CAST(uint, jas_image_numcmpts(dec->image)); ++i) {
if (jas_image_cmptdtype(dec->image, i) != dtype) {
samedtype = false;
break;
}
}
/* Is the component data type indicated in the IHDR box consistent
with the data in the code stream? */
if ((samedtype && dec->ihdr->data.ihdr.bpc != JP2_DTYPETOBPC(dtype)) ||
(!samedtype && dec->ihdr->data.ihdr.bpc != JP2_IHDR_BPCNULL)) {
jas_eprintf("warning: component data type mismatch\n");
}
/* Is the compression type supported? */
if (dec->ihdr->data.ihdr.comptype != JP2_IHDR_COMPTYPE) {
jas_eprintf("error: unsupported compression type\n");
goto error;
}
if (dec->bpcc) {
/* Is the number of components indicated in the BPCC box
consistent with the code stream data? */
if (dec->bpcc->data.bpcc.numcmpts != JAS_CAST(uint, jas_image_numcmpts(
dec->image))) {
jas_eprintf("warning: number of components mismatch\n");
}
/* Is the component data type information indicated in the BPCC
box consistent with the code stream data? */
if (!samedtype) {
for (i = 0; i < JAS_CAST(uint, jas_image_numcmpts(dec->image)); ++i) {
if (jas_image_cmptdtype(dec->image, i) != JP2_BPCTODTYPE(dec->bpcc->data.bpcc.bpcs[i])) {
jas_eprintf("warning: component data type mismatch\n");
}
}
} else {
jas_eprintf("warning: superfluous BPCC box\n");
}
}
/* A COLR box must be present. */
if (!dec->colr) {
jas_eprintf("error: no COLR box\n");
goto error;
}
switch (dec->colr->data.colr.method) {
case JP2_COLR_ENUM:
jas_image_setclrspc(dec->image, jp2_getcs(&dec->colr->data.colr));
break;
case JP2_COLR_ICC:
iccprof = jas_iccprof_createfrombuf(dec->colr->data.colr.iccp,
dec->colr->data.colr.iccplen);
assert(iccprof);
jas_iccprof_gethdr(iccprof, &icchdr);
jas_eprintf("ICC Profile CS %08x\n", icchdr.colorspc);
jas_image_setclrspc(dec->image, fromiccpcs(icchdr.colorspc));
dec->image->cmprof_ = jas_cmprof_createfromiccprof(iccprof);
assert(dec->image->cmprof_);
jas_iccprof_destroy(iccprof);
break;
}
/* If a CMAP box is present, a PCLR box must also be present. */
if (dec->cmap && !dec->pclr) {
jas_eprintf("warning: missing PCLR box or superfluous CMAP box\n");
jp2_box_destroy(dec->cmap);
dec->cmap = 0;
}
/* If a CMAP box is not present, a PCLR box must not be present. */
if (!dec->cmap && dec->pclr) {
jas_eprintf("warning: missing CMAP box or superfluous PCLR box\n");
jp2_box_destroy(dec->pclr);
dec->pclr = 0;
}
/* Determine the number of channels (which is essentially the number
of components after any palette mappings have been applied). */
dec->numchans = dec->cmap ? dec->cmap->data.cmap.numchans : JAS_CAST(uint, jas_image_numcmpts(dec->image));
/* Perform a basic sanity check on the CMAP box if present. */
if (dec->cmap) {
for (i = 0; i < dec->numchans; ++i) {
/* Is the component number reasonable? */
if (dec->cmap->data.cmap.ents[i].cmptno >= JAS_CAST(uint, jas_image_numcmpts(dec->image))) {
jas_eprintf("error: invalid component number in CMAP box\n");
goto error;
}
/* Is the LUT index reasonable? */
if (dec->cmap->data.cmap.ents[i].pcol >= dec->pclr->data.pclr.numchans) {
jas_eprintf("error: invalid CMAP LUT index\n");
goto error;
}
}
}
/* Allocate space for the channel-number to component-number LUT. */
if (!(dec->chantocmptlut = jas_malloc(dec->numchans * sizeof(uint_fast16_t)))) {
jas_eprintf("error: no memory\n");
goto error;
}
if (!dec->cmap) {
for (i = 0; i < dec->numchans; ++i) {
dec->chantocmptlut[i] = i;
}
} else {
cmapd = &dec->cmap->data.cmap;
pclrd = &dec->pclr->data.pclr;
cdefd = &dec->cdef->data.cdef;
for (channo = 0; channo < cmapd->numchans; ++channo) {
cmapent = &cmapd->ents[channo];
if (cmapent->map == JP2_CMAP_DIRECT) {
dec->chantocmptlut[channo] = channo;
} else if (cmapent->map == JP2_CMAP_PALETTE) {
lutents = jas_malloc(pclrd->numlutents * sizeof(int_fast32_t));
for (i = 0; i < pclrd->numlutents; ++i) {
lutents[i] = pclrd->lutdata[cmapent->pcol + i * pclrd->numchans];
}
newcmptno = jas_image_numcmpts(dec->image);
jas_image_depalettize(dec->image, cmapent->cmptno, pclrd->numlutents, lutents, JP2_BPCTODTYPE(pclrd->bpc[cmapent->pcol]), newcmptno);
dec->chantocmptlut[channo] = newcmptno;
jas_free(lutents);
#if 0
if (dec->cdef) {
cdefent = jp2_cdef_lookup(cdefd, channo);
if (!cdefent) {
abort();
}
jas_image_setcmpttype(dec->image, newcmptno, jp2_getct(jas_image_clrspc(dec->image), cdefent->type, cdefent->assoc));
} else {
jas_image_setcmpttype(dec->image, newcmptno, jp2_getct(jas_image_clrspc(dec->image), 0, channo + 1));
}
#endif
}
}
}
/* Mark all components as being of unknown type. */
for (i = 0; i < JAS_CAST(uint, jas_image_numcmpts(dec->image)); ++i) {
jas_image_setcmpttype(dec->image, i, JAS_IMAGE_CT_UNKNOWN);
}
/* Determine the type of each component. */
if (dec->cdef) {
for (i = 0; i < dec->numchans; ++i) {
jas_image_setcmpttype(dec->image,
dec->chantocmptlut[dec->cdef->data.cdef.ents[i].channo],
jp2_getct(jas_image_clrspc(dec->image),
dec->cdef->data.cdef.ents[i].type, dec->cdef->data.cdef.ents[i].assoc));
}
} else {
for (i = 0; i < dec->numchans; ++i) {
jas_image_setcmpttype(dec->image, dec->chantocmptlut[i],
jp2_getct(jas_image_clrspc(dec->image), 0, i + 1));
}
}
/* Delete any components that are not of interest. */
for (i = jas_image_numcmpts(dec->image); i > 0; --i) {
if (jas_image_cmpttype(dec->image, i - 1) == JAS_IMAGE_CT_UNKNOWN) {
jas_image_delcmpt(dec->image, i - 1);
}
}
/* Ensure that some components survived. */
if (!jas_image_numcmpts(dec->image)) {
jas_eprintf("error: no components\n");
goto error;
}
#if 0
jas_eprintf("no of components is %d\n", jas_image_numcmpts(dec->image));
#endif
/* Prevent the image from being destroyed later. */
image = dec->image;
dec->image = 0;
jp2_dec_destroy(dec);
return image;
error:
if (box) {
jp2_box_destroy(box);
}
if (dec) {
jp2_dec_destroy(dec);
}
return 0;
}
| 1
|
264,648
|
GF_Err gf_bifs_flush_command_list(GF_BifsDecoder *codec)
{
GF_BitStream *bs;
GF_Err e;
CommandBufferItem *cbi;
GF_SceneGraph *prev_root = codec->current_graph;
M_QuantizationParameter *prev_qp = codec->ActiveQP;
u32 prev_qp_count = gf_list_count(codec->QPs);
u32 NbPass = gf_list_count(codec->command_buffers);
codec->ActiveQP = NULL;
GF_List *nextPass = gf_list_new();
while (NbPass) {
while (gf_list_count(codec->command_buffers)) {
cbi = (CommandBufferItem *)gf_list_get(codec->command_buffers, 0);
gf_list_rem(codec->command_buffers, 0);
codec->current_graph = gf_node_get_graph(cbi->node);
e = GF_OK;
if (cbi->cb->bufferSize) {
bs = gf_bs_new((char*)cbi->cb->buffer, cbi->cb->bufferSize, GF_BITSTREAM_READ);
gf_bs_set_eos_callback(bs, BM_EndOfStream, codec);
e = BM_ParseCommand(codec, bs, cbi->cb->commandList);
gf_bs_del(bs);
}
if (!e) {
gf_node_unregister(cbi->node, NULL);
gf_free(cbi);
continue;
}
/*this may be an error or a dependency pb - reset coimmand list and move to next pass*/
while (gf_list_count(cbi->cb->commandList)) {
u32 i;
GF_CommandField *cf;
GF_Command *com = (GF_Command *)gf_list_get(cbi->cb->commandList, 0);
gf_list_rem(cbi->cb->commandList, 0);
cf = (GF_CommandField *) gf_list_get(com->command_fields, 0);
if (cf && cf->fieldType==GF_SG_VRML_SFCOMMANDBUFFER) {
for (i=0; i<gf_list_count(codec->command_buffers); i++) {
CommandBufferItem *cbi2 = (CommandBufferItem *)gf_list_get(codec->command_buffers, i);
if (cbi2->cb == cf->field_ptr) {
gf_node_unregister(cbi2->node, NULL);
gf_free(cbi2);
gf_list_rem(codec->command_buffers, i);
i--;
}
}
}
gf_sg_command_del(com);
}
gf_list_add(nextPass, cbi);
}
if (!gf_list_count(nextPass)) break;
/*prepare next pass*/
while (gf_list_count(nextPass)) {
cbi = (CommandBufferItem *)gf_list_get(nextPass, 0);
gf_list_rem(nextPass, 0);
gf_list_add(codec->command_buffers, cbi);
}
NbPass --;
if (NbPass > gf_list_count(codec->command_buffers)) NbPass = gf_list_count(codec->command_buffers);
//restore QP state
while (gf_list_count(codec->QPs) > prev_qp_count) {
gf_list_rem(codec->QPs, 0); //QPs are inserted at head of list
}
codec->ActiveQP = NULL;
codec->LastError = GF_OK;
}
gf_list_del(nextPass);
codec->current_graph = prev_root;
codec->ActiveQP = prev_qp;
return GF_OK;
}
| 0
|
328,911
|
R_API void r_bin_java_print_element_value_summary(RBinJavaElementValue *element_value) {
RBinJavaCPTypeObj *obj;
RBinJavaElementValue *ev_element = NULL;
RListIter *iter = NULL, *iter_tmp = NULL;
char *name;
if (!element_value) {
eprintf ("Attempting to print an invalid RBinJavaElementValuePair *pair.\n");
return;
}
name = ((RBinJavaElementValueMetas *) element_value->metas->type_info)->name;
eprintf ("Element Value information:\n");
eprintf (" EV Pair File Offset: 0x%08"PFMT64x "\n", element_value->file_offset);
eprintf (" EV Value Type (%d): %s\n", element_value->tag, name);
switch (element_value->tag) {
case R_BIN_JAVA_EV_TAG_BYTE:
case R_BIN_JAVA_EV_TAG_CHAR:
case R_BIN_JAVA_EV_TAG_DOUBLE:
case R_BIN_JAVA_EV_TAG_FLOAT:
case R_BIN_JAVA_EV_TAG_INT:
case R_BIN_JAVA_EV_TAG_LONG:
case R_BIN_JAVA_EV_TAG_SHORT:
case R_BIN_JAVA_EV_TAG_BOOLEAN:
case R_BIN_JAVA_EV_TAG_STRING:
eprintf (" EV Value Constant Value index: 0x%02x\n", element_value->value.const_value.const_value_idx);
eprintf (" EV Value Constant Value Information:\n");
obj = element_value->value.const_value.const_value_cp_obj;
if (obj && obj->metas && obj->metas->type_info) {
((RBinJavaCPTypeMetas *) obj->metas->type_info)->allocs->print_summary (obj);
}
break;
case R_BIN_JAVA_EV_TAG_ENUM:
eprintf (" EV Value Enum Constant Value Const Name Index: 0x%02x\n", element_value->value.enum_const_value.const_name_idx);
eprintf (" EV Value Enum Constant Value Type Name Index: 0x%02x\n", element_value->value.enum_const_value.type_name_idx);
eprintf (" EV Value Enum Constant Value Const CP Information:\n");
obj = element_value->value.enum_const_value.const_name_cp_obj;
if (obj && obj->metas && obj->metas->type_info) {
((RBinJavaCPTypeMetas *) obj->metas->type_info)->allocs->print_summary (obj);
}
eprintf (" EV Value Enum Constant Value Type CP Information:\n");
obj = element_value->value.enum_const_value.type_name_cp_obj;
if (obj && obj->metas && obj->metas->type_info) {
((RBinJavaCPTypeMetas *) obj->metas->type_info)->allocs->print_summary (obj);
}
break;
case R_BIN_JAVA_EV_TAG_CLASS:
eprintf (" EV Value Class Info Index: 0x%02x\n", element_value->value.class_value.class_info_idx);
eprintf (" EV Value Class Info CP Information:\n");
obj = element_value->value.class_value.class_info_cp_obj;
if (obj && obj->metas && obj->metas->type_info) {
((RBinJavaCPTypeMetas *) obj->metas->type_info)->allocs->print_summary (obj);
}
break;
case R_BIN_JAVA_EV_TAG_ARRAY:
eprintf (" EV Value Array Value Number of Values: 0x%04x\n", element_value->value.array_value.num_values);
eprintf (" EV Value Array Values\n");
r_list_foreach_safe (element_value->value.array_value.values, iter, iter_tmp, ev_element) {
r_bin_java_print_element_value_summary (ev_element);
}
break;
case R_BIN_JAVA_EV_TAG_ANNOTATION:
eprintf (" EV Annotation Information:\n");
r_bin_java_print_annotation_summary (&element_value->value.annotation_value);
break;
default:
// eprintf unable to handle tag
break;
}
}
| 0
|
241,056
|
static void trim_key()
{
char *p;
int i;
for (i=0, p=booth_conf->authkey; i < booth_conf->authkey_len; i++, p++)
if (!isascii(*p))
return;
p = booth_conf->authkey;
while (booth_conf->authkey_len > 0 && isspace(*p)) {
p++;
booth_conf->authkey_len--;
}
memmove(booth_conf->authkey, p, booth_conf->authkey_len);
p = booth_conf->authkey + booth_conf->authkey_len - 1;
while (booth_conf->authkey_len > 0 && isspace(*p)) {
booth_conf->authkey_len--;
p--;
}
}
| 0
|
275,492
|
njs_vm_value_string_set(njs_vm_t *vm, njs_value_t *value, const u_char *start,
uint32_t size)
{
return njs_string_set(vm, value, start, size);
}
| 0
|
224,486
|
static GF_Err txtin_webvtt_setup(GF_Filter *filter, GF_TXTIn *ctx)
{
GF_Err e;
u32 ID, OCR_ES_ID, file_size, w, h;
Bool is_srt;
char *ext;
ctx->src = gf_fopen(ctx->file_name, "rb");
if (!ctx->src) return GF_URL_ERROR;
file_size = (u32) gf_fsize(ctx->src);
ctx->unicode_type = gf_text_get_utf_type(ctx->src);
if (ctx->unicode_type<0) {
gf_fclose(ctx->src);
ctx->src = NULL;
GF_LOG(GF_LOG_ERROR, GF_LOG_PARSER, ("[TXTIn] Unsupported SRT UTF encoding\n"));
return GF_NOT_SUPPORTED;
}
ext = gf_file_ext_start(ctx->file_name);
is_srt = (ext && !strnicmp(ext, ".srt", 4)) ? GF_TRUE : GF_FALSE;
if (!ctx->timescale) ctx->timescale = 1000;
OCR_ES_ID = ID = 0;
if (!ctx->opid) ctx->opid = gf_filter_pid_new(filter);
gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_STREAM_TYPE, &PROP_UINT(GF_STREAM_TEXT) );
gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_CODECID, &PROP_UINT(GF_CODECID_WEBVTT) );
gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_TIMESCALE, &PROP_UINT(ctx->timescale) );
gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_DOWN_SIZE, &PROP_LONGUINT(file_size) );
w = ctx->width;
h = ctx->height;
if (!ID) ID = 1;
gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_ID, &PROP_UINT(ID) );
if (OCR_ES_ID) gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_CLOCK_ID, &PROP_UINT(OCR_ES_ID) );
gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_WIDTH, &PROP_UINT(w) );
gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_HEIGHT, &PROP_UINT(h) );
if (ctx->zorder) gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_ZORDER, &PROP_SINT(ctx->zorder) );
if (ctx->lang) gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_LANGUAGE, &PROP_STRING( ctx->lang) );
ctx->vttparser = gf_webvtt_parser_new();
e = gf_webvtt_parser_init(ctx->vttparser, ctx->src, ctx->unicode_type, is_srt, ctx, gf_webvtt_import_report, gf_webvtt_flush_sample, gf_webvtt_import_header);
if (e != GF_OK) {
gf_webvtt_parser_del(ctx->vttparser);
ctx->vttparser = NULL;
GF_LOG(GF_LOG_ERROR, GF_LOG_PARSER, ("[TXTIn] WebVTT parser init error %s\n", gf_error_to_string(e) ));
}
//get the header
e = gf_webvtt_parser_parse(ctx->vttparser);
txtin_probe_duration(ctx);
return e;
}
| 0
|
233,883
|
*/
static void php_wddx_process_data(void *user_data, const XML_Char *s, int len)
{
st_entry *ent;
wddx_stack *stack = (wddx_stack *)user_data;
if (!wddx_stack_is_empty(stack) && !stack->done) {
wddx_stack_top(stack, (void**)&ent);
switch (ent->type) {
case ST_BINARY:
case ST_STRING:
if (Z_STRLEN(ent->data) == 0) {
zval_ptr_dtor(&ent->data);
ZVAL_STRINGL(&ent->data, (char *)s, len);
} else {
Z_STR(ent->data) = zend_string_extend(Z_STR(ent->data), Z_STRLEN(ent->data) + len, 0);
memcpy(Z_STRVAL(ent->data) + Z_STRLEN(ent->data) - len, (char *)s, len);
Z_STRVAL(ent->data)[Z_STRLEN(ent->data)] = '\0';
}
break;
case ST_NUMBER:
ZVAL_STRINGL(&ent->data, (char *)s, len);
convert_scalar_to_number(&ent->data);
break;
case ST_BOOLEAN:
if (!strcmp((char *)s, "true")) {
ZVAL_TRUE(&ent->data);
} else if (!strcmp((char *)s, "false")) {
ZVAL_FALSE(&ent->data);
} else {
zval_ptr_dtor(&ent->data);
if (ent->varname) {
efree(ent->varname);
ent->varname = NULL;
}
ZVAL_UNDEF(&ent->data);
}
break;
case ST_DATETIME: {
char *tmp;
tmp = emalloc(len + 1);
memcpy(tmp, (char *)s, len);
tmp[len] = '\0';
Z_LVAL(ent->data) = php_parse_date(tmp, NULL);
/* date out of range < 1969 or > 2038 */
if (Z_LVAL(ent->data) == -1) {
ZVAL_STRINGL(&ent->data, (char *)s, len);
}
efree(tmp);
}
break;
default:
break;
}
}
| 0
|
317,209
|
static int smack_msg_msg_alloc_security(struct msg_msg *msg)
{
struct smack_known **blob = smack_msg_msg(msg);
*blob = smk_of_current();
return 0;
}
| 0
|
265,538
|
void *mempool_getbuffer(MemoryPoolHandle handle, size_t expected_buffer_size) {
int rc;
int bufs_to_allocate;
int bufs_that_can_be_allocated = 0;
struct memory_pool_element *pool_item = NULL;
struct mempool *pool = (struct mempool *)handle;
char *log_msg_fmt =
"mempool(%p): mempool_getbuffer called for invalid "
"expected_buffer_size(%zu), current pool manages only "
"mempool_item_size(%zu)";
char log_msg[300];
if (pool == NULL) {
return NULL;
}
if (pool->mempool_item_size != expected_buffer_size) {
// This should never happen unless mempool is used wrongly.
if (pool->log_callback_func) {
snprintf(log_msg, sizeof(log_msg), log_msg_fmt, (void *)pool,
expected_buffer_size, pool->mempool_item_size);
pool->log_callback_func(MEMPOOL_LOG_FATAL, log_msg);
return NULL;
}
}
if ((pool->flags & ENABLE_LOCKING) != 0) {
pthread_mutex_lock(&pool->lock);
}
/* If the free list is empty then expand the pool's free list */
if (pool->free_bufs_in_pool == 0) {
bufs_to_allocate = pool->expandable_size / pool->mempool_item_size;
bufs_that_can_be_allocated = pool_can_expand_by(pool);
if (bufs_that_can_be_allocated > 0) {
/* We can at least allocate
min(bufs_that_can_be_allocated, bufs_to_allocate) */
bufs_to_allocate = ((bufs_to_allocate > bufs_that_can_be_allocated)
? bufs_that_can_be_allocated
: bufs_to_allocate);
rc = freelist_allocate(pool, bufs_to_allocate);
if (rc != 0) {
if ((pool->flags & ENABLE_LOCKING) != 0) {
pthread_mutex_unlock(&pool->lock);
}
return NULL;
}
} else {
/* We cannot allocate any more buffers, reached max threshold */
if ((pool->flags & ENABLE_LOCKING) != 0) {
pthread_mutex_unlock(&pool->lock);
}
return NULL;
}
}
/* Done with expansion of pool in case of pre allocated pools */
/* Logic of allocation from free list */
/* If there is an item on the pool's free list, then take that... */
if (pool->free_list != NULL) {
pool_item = pool->free_list;
pool->free_list = pool_item->next;
pool_item->next = (struct memory_pool_element *)NULL;
pool->free_bufs_in_pool--;
}
if (pool_item) {
pool->number_of_bufs_shared++;
}
if ((pool->flags & ENABLE_LOCKING) != 0) {
pthread_mutex_unlock(&pool->lock);
}
return (void *)pool_item;
}
| 0
|
331,783
|
QPaintEngineEx::QPaintEngineEx()
: QPaintEngine(*new QPaintEngineExPrivate, AllFeatures)
{
extended = true;
}
| 0
|
206,815
|
static MagickBooleanType SetGrayscaleImage(Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
PixelInfo
*colormap;
register ssize_t
i;
ssize_t
*colormap_index,
j,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->type != GrayscaleType)
(void) TransformImageColorspace(image,GRAYColorspace,exception);
if (image->storage_class == PseudoClass)
colormap_index=(ssize_t *) AcquireQuantumMemory(image->colors+1,
sizeof(*colormap_index));
else
colormap_index=(ssize_t *) AcquireQuantumMemory(MaxColormapSize+1,
sizeof(*colormap_index));
if (colormap_index == (ssize_t *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
if (image->storage_class != PseudoClass)
{
(void) memset(colormap_index,(-1),MaxColormapSize*
sizeof(*colormap_index));
if (AcquireImageColormap(image,MaxColormapSize,exception) == MagickFalse)
{
colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
image->colors=0;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register size_t
intensity;
intensity=ScaleQuantumToMap(GetPixelRed(image,q));
if (colormap_index[intensity] < 0)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SetGrayscaleImage)
#endif
if (colormap_index[intensity] < 0)
{
colormap_index[intensity]=(ssize_t) image->colors;
image->colormap[image->colors].red=(double)
GetPixelRed(image,q);
image->colormap[image->colors].green=(double)
GetPixelGreen(image,q);
image->colormap[image->colors].blue=(double)
GetPixelBlue(image,q);
image->colors++;
}
}
SetPixelIndex(image,(Quantum) colormap_index[intensity],q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
}
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].alpha=(double) i;
qsort((void *) image->colormap,image->colors,sizeof(PixelInfo),
IntensityCompare);
colormap=(PixelInfo *) AcquireQuantumMemory(image->colors,sizeof(*colormap));
if (colormap == (PixelInfo *) NULL)
{
colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
j=0;
colormap[j]=image->colormap[0];
for (i=0; i < (ssize_t) image->colors; i++)
{
if (IsPixelInfoEquivalent(&colormap[j],&image->colormap[i]) == MagickFalse)
{
j++;
colormap[j]=image->colormap[i];
}
colormap_index[(ssize_t) image->colormap[i].alpha]=j;
}
image->colors=(size_t) (j+1);
image->colormap=(PixelInfo *) RelinquishMagickMemory(image->colormap);
image->colormap=colormap;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelIndex(image,(Quantum) colormap_index[ScaleQuantumToMap(
GetPixelIndex(image,q))],q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index);
image->type=GrayscaleType;
if (SetImageMonochrome(image,exception) != MagickFalse)
image->type=BilevelType;
return(status);
}
| 1
|
307,868
|
uint ciEnv::compile_id() {
if (task() == NULL) return 0;
return task()->compile_id();
}
| 0
|
385,875
|
static int path_lookupat(int dfd, const char *name,
unsigned int flags, struct nameidata *nd)
{
struct file *base = NULL;
struct path path;
int err;
/*
* Path walking is largely split up into 2 different synchronisation
* schemes, rcu-walk and ref-walk (explained in
* Documentation/filesystems/path-lookup.txt). These share much of the
* path walk code, but some things particularly setup, cleanup, and
* following mounts are sufficiently divergent that functions are
* duplicated. Typically there is a function foo(), and its RCU
* analogue, foo_rcu().
*
* -ECHILD is the error number of choice (just to avoid clashes) that
* is returned if some aspect of an rcu-walk fails. Such an error must
* be handled by restarting a traditional ref-walk (which will always
* be able to complete).
*/
err = path_init(dfd, name, flags | LOOKUP_PARENT, nd, &base);
if (unlikely(err))
return err;
current->total_link_count = 0;
err = link_path_walk(name, nd);
if (!err && !(flags & LOOKUP_PARENT)) {
err = lookup_last(nd, &path);
while (err > 0) {
void *cookie;
struct path link = path;
err = may_follow_link(&link, nd);
if (unlikely(err))
break;
nd->flags |= LOOKUP_PARENT;
err = follow_link(&link, nd, &cookie);
if (err)
break;
err = lookup_last(nd, &path);
put_link(nd, &link, cookie);
}
}
if (!err)
err = complete_walk(nd);
if (!err && nd->flags & LOOKUP_DIRECTORY) {
if (!can_lookup(nd->inode)) {
path_put(&nd->path);
err = -ENOTDIR;
}
}
if (base)
fput(base);
if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT)) {
path_put(&nd->root);
nd->root.mnt = NULL;
}
return err;
}
| 0
|
264,261
|
void buffer_reset(Buffer *buffer)
{
buffer->offset = 0;
}
| 0
|
292,227
|
inbound_next_nick (session *sess, char *nick, int error,
const message_tags_data *tags_data)
{
char *newnick;
server *serv = sess->server;
ircnet *net;
serv->nickcount++;
switch (serv->nickcount)
{
case 2:
newnick = prefs.hex_irc_nick2;
net = serv->network;
/* use network specific "Second choice"? */
if (net && !(net->flags & FLAG_USE_GLOBAL) && net->nick2)
{
newnick = net->nick2;
}
serv->p_change_nick (serv, newnick);
if (error)
{
EMIT_SIGNAL_TIMESTAMP (XP_TE_NICKERROR, sess, nick, newnick, NULL, NULL,
0, tags_data->timestamp);
}
else
{
EMIT_SIGNAL_TIMESTAMP (XP_TE_NICKCLASH, sess, nick, newnick, NULL, NULL,
0, tags_data->timestamp);
}
break;
case 3:
serv->p_change_nick (serv, prefs.hex_irc_nick3);
if (error)
{
EMIT_SIGNAL_TIMESTAMP (XP_TE_NICKERROR, sess, nick, prefs.hex_irc_nick3,
NULL, NULL, 0, tags_data->timestamp);
}
else
{
EMIT_SIGNAL_TIMESTAMP (XP_TE_NICKCLASH, sess, nick, prefs.hex_irc_nick3,
NULL, NULL, 0, tags_data->timestamp);
}
break;
default:
EMIT_SIGNAL_TIMESTAMP (XP_TE_NICKFAIL, sess, NULL, NULL, NULL, NULL, 0,
tags_data->timestamp);
}
}
| 0
|
226,235
|
GF_Err stdp_box_size(GF_Box *s)
{
GF_DegradationPriorityBox *ptr = (GF_DegradationPriorityBox *)s;
ptr->size += (2 * ptr->nb_entries);
return GF_OK;
}
| 0
|
512,556
|
Item_hex_string(THD *thd, const char *str, size_t str_length):
Item_hex_constant(thd, str, str_length) {}
| 0
|
313,850
|
get_visual_text(
cmdarg_T *cap,
char_u **pp, // return: start of selected text
int *lenp) // return: length of selected text
{
if (VIsual_mode != 'V')
unadjust_for_sel();
if (VIsual.lnum != curwin->w_cursor.lnum)
{
if (cap != NULL)
clearopbeep(cap->oap);
return FAIL;
}
if (VIsual_mode == 'V')
{
*pp = ml_get_curline();
*lenp = (int)STRLEN(*pp);
}
else
{
if (LT_POS(curwin->w_cursor, VIsual))
{
*pp = ml_get_pos(&curwin->w_cursor);
*lenp = VIsual.col - curwin->w_cursor.col + 1;
}
else
{
*pp = ml_get_pos(&VIsual);
*lenp = curwin->w_cursor.col - VIsual.col + 1;
}
if (**pp == NUL)
*lenp = 0;
if (*lenp > 0)
{
if (has_mbyte)
// Correct the length to include all bytes of the last
// character.
*lenp += (*mb_ptr2len)(*pp + (*lenp - 1)) - 1;
else if ((*pp)[*lenp - 1] == NUL)
// Do not include a trailing NUL.
*lenp -= 1;
}
}
reset_VIsual_and_resel();
return OK;
}
| 0
|
443,693
|
init(void)
{
#ifdef USE_CALLOUT
int id;
OnigEncoding enc;
char* name;
unsigned int args[4];
OnigValue opts[4];
enc = ONIG_ENCODING_UTF16_LE;
name = "F\000A\000I\000L\000\000\000"; BC0_P(name, fail);
name = "M\000I\000S\000M\000A\000T\000C\000H\000\000\000"; BC0_P(name, mismatch);
name = "M\000A\000X\000\000\000";
args[0] = ONIG_TYPE_TAG | ONIG_TYPE_LONG;
args[1] = ONIG_TYPE_CHAR;
opts[0].c = 'X';
BC_B_O(name, max, 2, args, 1, opts);
name = "E\000R\000R\000O\000R\000\000\000";
args[0] = ONIG_TYPE_LONG; opts[0].l = ONIG_ABORT;
BC_P_O(name, error, 1, args, 1, opts);
name = "C\000O\000U\000N\000T\000\000\000";
args[0] = ONIG_TYPE_CHAR; opts[0].c = '>';
BC_B_O(name, count, 1, args, 1, opts);
name = "T\000O\000T\000A\000L\000_\000C\000O\000U\000N\000T\000\000\000";
args[0] = ONIG_TYPE_CHAR; opts[0].c = '>';
BC_B_O(name, total_count, 1, args, 1, opts);
name = "C\000M\000P\000\000\000";
args[0] = ONIG_TYPE_TAG | ONIG_TYPE_LONG;
args[1] = ONIG_TYPE_STRING;
args[2] = ONIG_TYPE_TAG | ONIG_TYPE_LONG;
BC_P(name, cmp, 3, args);
#endif /* USE_CALLOUT */
return ONIG_NORMAL;
}
| 0
|
236,151
|
GF_Box *hclr_box_new()
{
ISOM_DECL_BOX_ALLOC(GF_TextHighlightColorBox, GF_ISOM_BOX_TYPE_HCLR);
return (GF_Box *) tmp;
}
| 0
|
312,399
|
call_qftf_func(qf_list_T *qfl, int qf_winid, long start_idx, long end_idx)
{
callback_T *cb = &qftf_cb;
list_T *qftf_list = NULL;
static int recursive = FALSE;
if (recursive)
return NULL; // this doesn't work properly recursively
recursive = TRUE;
// If 'quickfixtextfunc' is set, then use the user-supplied function to get
// the text to display. Use the local value of 'quickfixtextfunc' if it is
// set.
if (qfl->qf_qftf_cb.cb_name != NULL)
cb = &qfl->qf_qftf_cb;
if (cb->cb_name != NULL)
{
typval_T args[1];
dict_T *d;
typval_T rettv;
// create the dict argument
if ((d = dict_alloc_lock(VAR_FIXED)) == NULL)
{
recursive = FALSE;
return NULL;
}
dict_add_number(d, "quickfix", (long)IS_QF_LIST(qfl));
dict_add_number(d, "winid", (long)qf_winid);
dict_add_number(d, "id", (long)qfl->qf_id);
dict_add_number(d, "start_idx", start_idx);
dict_add_number(d, "end_idx", end_idx);
++d->dv_refcount;
args[0].v_type = VAR_DICT;
args[0].vval.v_dict = d;
qftf_list = NULL;
if (call_callback(cb, 0, &rettv, 1, args) != FAIL)
{
if (rettv.v_type == VAR_LIST)
{
qftf_list = rettv.vval.v_list;
qftf_list->lv_refcount++;
}
clear_tv(&rettv);
}
dict_unref(d);
}
recursive = FALSE;
return qftf_list;
}
| 0
|
328,942
|
R_API ut64 r_bin_java_element_value_calc_size(RBinJavaElementValue *element_value) {
RListIter *iter, *iter_tmp;
RBinJavaElementValue *ev_element;
RBinJavaElementValuePair *evps;
ut64 sz = 0;
if (!element_value) {
return sz;
}
// tag
sz += 1;
switch (element_value->tag) {
case R_BIN_JAVA_EV_TAG_BYTE:
case R_BIN_JAVA_EV_TAG_CHAR:
case R_BIN_JAVA_EV_TAG_DOUBLE:
case R_BIN_JAVA_EV_TAG_FLOAT:
case R_BIN_JAVA_EV_TAG_INT:
case R_BIN_JAVA_EV_TAG_LONG:
case R_BIN_JAVA_EV_TAG_SHORT:
case R_BIN_JAVA_EV_TAG_BOOLEAN:
case R_BIN_JAVA_EV_TAG_STRING:
// look up value in bin->cp_list
// (ut16) read and set const_value.const_value_idx
// element_value->value.const_value.const_value_idx = r_bin_java_read_short(bin, bin->b->cur);
sz += 2;
break;
case R_BIN_JAVA_EV_TAG_ENUM:
// (ut16) read and set enum_const_value.type_name_idx
// element_value->value.enum_const_value.type_name_idx = r_bin_java_read_short(bin, bin->b->cur);
sz += 2;
// (ut16) read and set enum_const_value.const_name_idx
// element_value->value.enum_const_value.const_name_idx = r_bin_java_read_short(bin, bin->b->cur);
sz += 2;
break;
case R_BIN_JAVA_EV_TAG_CLASS:
// (ut16) read and set class_value.class_info_idx
// element_value->value.class_value.class_info_idx = r_bin_java_read_short(bin, bin->b->cur);
sz += 2;
break;
case R_BIN_JAVA_EV_TAG_ARRAY:
// (ut16) read and set array_value.num_values
// element_value->value.array_value.num_values = r_bin_java_read_short(bin, bin->b->cur);
sz += 2;
r_list_foreach_safe (element_value->value.array_value.values, iter, iter_tmp, ev_element) {
if (ev_element) {
sz += r_bin_java_element_value_calc_size (ev_element);
}
}
break;
case R_BIN_JAVA_EV_TAG_ANNOTATION:
// annotation new is not used here.
// (ut16) read and set annotation_value.type_idx;
// element_value->value.annotation_value.type_idx = r_bin_java_read_short(bin, bin->b->cur);
sz += 2;
// (ut16) read and set annotation_value.num_element_value_pairs;
// element_value->value.annotation_value.num_element_value_pairs = r_bin_java_read_short(bin, bin->b->cur);
sz += 2;
element_value->value.annotation_value.element_value_pairs = r_list_newf (r_bin_java_element_pair_free);
r_list_foreach_safe (element_value->value.annotation_value.element_value_pairs, iter, iter_tmp, evps) {
if (evps) {
sz += r_bin_java_element_pair_calc_size (evps);
}
}
break;
default:
// eprintf unable to handle tag
break;
}
return sz;
}
| 0
|
308,192
|
static int fastrpc_create_maps(struct fastrpc_invoke_ctx *ctx)
{
struct device *dev = ctx->fl->sctx->dev;
int i, err;
for (i = 0; i < ctx->nscalars; ++i) {
/* Make sure reserved field is set to 0 */
if (ctx->args[i].reserved)
return -EINVAL;
if (ctx->args[i].fd == 0 || ctx->args[i].fd == -1 ||
ctx->args[i].length == 0)
continue;
err = fastrpc_map_create(ctx->fl, ctx->args[i].fd,
ctx->args[i].length, &ctx->maps[i]);
if (err) {
dev_err(dev, "Error Creating map %d\n", err);
return -EINVAL;
}
}
return 0;
}
| 0
|
225,547
|
size_t TfLiteIntArrayGetSizeInBytes(int size) {
static TfLiteIntArray dummy;
size_t computed_size = sizeof(dummy) + sizeof(dummy.data[0]) * size;
#if defined(_MSC_VER)
// Context for why this is needed is in http://b/189926408#comment21
computed_size -= sizeof(dummy.data[0]);
#endif
return computed_size;
}
| 0
|
516,257
|
static int virtio_net_max_tx_queue_size(VirtIONet *n)
{
NetClientState *peer = n->nic_conf.peers.ncs[0];
/*
* Backends other than vhost-user don't support max queue size.
*/
if (!peer) {
return VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE;
}
if (peer->info->type != NET_CLIENT_DRIVER_VHOST_USER) {
return VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE;
}
return VIRTQUEUE_MAX_SIZE;
}
| 0
|
343,167
|
static inline int esp_remove_trailer(struct sk_buff *skb)
{
struct xfrm_state *x = xfrm_input_state(skb);
struct xfrm_offload *xo = xfrm_offload(skb);
struct crypto_aead *aead = x->data;
int alen, hlen, elen;
int padlen, trimlen;
__wsum csumdiff;
u8 nexthdr[2];
int ret;
alen = crypto_aead_authsize(aead);
hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
elen = skb->len - hlen;
if (xo && (xo->flags & XFRM_ESP_NO_TRAILER)) {
ret = xo->proto;
goto out;
}
if (skb_copy_bits(skb, skb->len - alen - 2, nexthdr, 2))
BUG();
ret = -EINVAL;
padlen = nexthdr[0];
if (padlen + 2 + alen >= elen) {
net_dbg_ratelimited("ipsec esp packet is garbage padlen=%d, elen=%d\n",
padlen + 2, elen - alen);
goto out;
}
trimlen = alen + padlen + 2;
if (skb->ip_summed == CHECKSUM_COMPLETE) {
csumdiff = skb_checksum(skb, skb->len - trimlen, trimlen, 0);
skb->csum = csum_block_sub(skb->csum, csumdiff,
skb->len - trimlen);
}
pskb_trim(skb, skb->len - trimlen);
ret = nexthdr[1];
out:
return ret;
}
| 0
|
369,302
|
__must_hold(&ctx->completion_lock)
{
u32 seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
struct io_kiocb *req, *tmp;
spin_lock_irq(&ctx->timeout_lock);
list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
u32 events_needed, events_got;
if (io_is_timeout_noseq(req))
break;
/*
* Since seq can easily wrap around over time, subtract
* the last seq at which timeouts were flushed before comparing.
* Assuming not more than 2^31-1 events have happened since,
* these subtractions won't have wrapped, so we can check if
* target is in [last_seq, current_seq] by comparing the two.
*/
events_needed = req->timeout.target_seq - ctx->cq_last_tm_flush;
events_got = seq - ctx->cq_last_tm_flush;
if (events_got < events_needed)
break;
io_kill_timeout(req, 0);
}
ctx->cq_last_tm_flush = seq;
spin_unlock_irq(&ctx->timeout_lock);
}
| 0
|
269,511
|
static MagickBooleanType ReadProfile(Image *image,const char *name,
const unsigned char *datum,ssize_t length,ExceptionInfo *exception)
{
MagickBooleanType
status;
StringInfo
*profile;
if (length < 4)
return(MagickFalse);
profile=BlobToStringInfo(datum,(size_t) length);
if (profile == (StringInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=SetImageProfile(image,name,profile,exception);
profile=DestroyStringInfo(profile);
return(status);
}
| 0
|
234,807
|
static int read_one_dev(struct extent_buffer *leaf,
struct btrfs_dev_item *dev_item)
{
struct btrfs_fs_info *fs_info = leaf->fs_info;
struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
struct btrfs_device *device;
u64 devid;
int ret;
u8 fs_uuid[BTRFS_FSID_SIZE];
u8 dev_uuid[BTRFS_UUID_SIZE];
devid = btrfs_device_id(leaf, dev_item);
read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
BTRFS_UUID_SIZE);
read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
BTRFS_FSID_SIZE);
if (memcmp(fs_uuid, fs_devices->metadata_uuid, BTRFS_FSID_SIZE)) {
fs_devices = open_seed_devices(fs_info, fs_uuid);
if (IS_ERR(fs_devices))
return PTR_ERR(fs_devices);
}
device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
fs_uuid);
if (!device) {
if (!btrfs_test_opt(fs_info, DEGRADED)) {
btrfs_report_missing_device(fs_info, devid,
dev_uuid, true);
return -ENOENT;
}
device = add_missing_dev(fs_devices, devid, dev_uuid);
if (IS_ERR(device)) {
btrfs_err(fs_info,
"failed to add missing dev %llu: %ld",
devid, PTR_ERR(device));
return PTR_ERR(device);
}
btrfs_report_missing_device(fs_info, devid, dev_uuid, false);
} else {
if (!device->bdev) {
if (!btrfs_test_opt(fs_info, DEGRADED)) {
btrfs_report_missing_device(fs_info,
devid, dev_uuid, true);
return -ENOENT;
}
btrfs_report_missing_device(fs_info, devid,
dev_uuid, false);
}
if (!device->bdev &&
!test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
/*
* this happens when a device that was properly setup
* in the device info lists suddenly goes bad.
* device->bdev is NULL, and so we have to set
* device->missing to one here
*/
device->fs_devices->missing_devices++;
set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
}
/* Move the device to its own fs_devices */
if (device->fs_devices != fs_devices) {
ASSERT(test_bit(BTRFS_DEV_STATE_MISSING,
&device->dev_state));
list_move(&device->dev_list, &fs_devices->devices);
device->fs_devices->num_devices--;
fs_devices->num_devices++;
device->fs_devices->missing_devices--;
fs_devices->missing_devices++;
device->fs_devices = fs_devices;
}
}
if (device->fs_devices != fs_info->fs_devices) {
BUG_ON(test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state));
if (device->generation !=
btrfs_device_generation(leaf, dev_item))
return -EINVAL;
}
fill_device_from_item(leaf, dev_item, device);
if (device->bdev) {
u64 max_total_bytes = i_size_read(device->bdev->bd_inode);
if (device->total_bytes > max_total_bytes) {
btrfs_err(fs_info,
"device total_bytes should be at most %llu but found %llu",
max_total_bytes, device->total_bytes);
return -EINVAL;
}
}
set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
device->fs_devices->total_rw_bytes += device->total_bytes;
atomic64_add(device->total_bytes - device->bytes_used,
&fs_info->free_chunk_space);
}
ret = 0;
return ret;
}
| 0
|
326,100
|
regrepeat(
char_u *p,
long maxcount) // maximum number of matches allowed
{
long count = 0;
char_u *scan;
char_u *opnd;
int mask;
int testval = 0;
scan = rex.input; // Make local copy of rex.input for speed.
opnd = OPERAND(p);
switch (OP(p))
{
case ANY:
case ANY + ADD_NL:
while (count < maxcount)
{
// Matching anything means we continue until end-of-line (or
// end-of-file for ANY + ADD_NL), only limited by maxcount.
while (*scan != NUL && count < maxcount)
{
++count;
MB_PTR_ADV(scan);
}
if (!REG_MULTI || !WITH_NL(OP(p)) || rex.lnum > rex.reg_maxline
|| rex.reg_line_lbr || count == maxcount)
break;
++count; // count the line-break
reg_nextline();
scan = rex.input;
if (got_int)
break;
}
break;
case IDENT:
case IDENT + ADD_NL:
testval = TRUE;
// FALLTHROUGH
case SIDENT:
case SIDENT + ADD_NL:
while (count < maxcount)
{
if (vim_isIDc(PTR2CHAR(scan)) && (testval || !VIM_ISDIGIT(*scan)))
{
MB_PTR_ADV(scan);
}
else if (*scan == NUL)
{
if (!REG_MULTI || !WITH_NL(OP(p)) || rex.lnum > rex.reg_maxline
|| rex.reg_line_lbr)
break;
reg_nextline();
scan = rex.input;
if (got_int)
break;
}
else if (rex.reg_line_lbr && *scan == '\n' && WITH_NL(OP(p)))
++scan;
else
break;
++count;
}
break;
case KWORD:
case KWORD + ADD_NL:
testval = TRUE;
// FALLTHROUGH
case SKWORD:
case SKWORD + ADD_NL:
while (count < maxcount)
{
if (vim_iswordp_buf(scan, rex.reg_buf)
&& (testval || !VIM_ISDIGIT(*scan)))
{
MB_PTR_ADV(scan);
}
else if (*scan == NUL)
{
if (!REG_MULTI || !WITH_NL(OP(p)) || rex.lnum > rex.reg_maxline
|| rex.reg_line_lbr)
break;
reg_nextline();
scan = rex.input;
if (got_int)
break;
}
else if (rex.reg_line_lbr && *scan == '\n' && WITH_NL(OP(p)))
++scan;
else
break;
++count;
}
break;
case FNAME:
case FNAME + ADD_NL:
testval = TRUE;
// FALLTHROUGH
case SFNAME:
case SFNAME + ADD_NL:
while (count < maxcount)
{
if (vim_isfilec(PTR2CHAR(scan)) && (testval || !VIM_ISDIGIT(*scan)))
{
MB_PTR_ADV(scan);
}
else if (*scan == NUL)
{
if (!REG_MULTI || !WITH_NL(OP(p)) || rex.lnum > rex.reg_maxline
|| rex.reg_line_lbr)
break;
reg_nextline();
scan = rex.input;
if (got_int)
break;
}
else if (rex.reg_line_lbr && *scan == '\n' && WITH_NL(OP(p)))
++scan;
else
break;
++count;
}
break;
case PRINT:
case PRINT + ADD_NL:
testval = TRUE;
// FALLTHROUGH
case SPRINT:
case SPRINT + ADD_NL:
while (count < maxcount)
{
if (*scan == NUL)
{
if (!REG_MULTI || !WITH_NL(OP(p)) || rex.lnum > rex.reg_maxline
|| rex.reg_line_lbr)
break;
reg_nextline();
scan = rex.input;
if (got_int)
break;
}
else if (vim_isprintc(PTR2CHAR(scan)) == 1
&& (testval || !VIM_ISDIGIT(*scan)))
{
MB_PTR_ADV(scan);
}
else if (rex.reg_line_lbr && *scan == '\n' && WITH_NL(OP(p)))
++scan;
else
break;
++count;
}
break;
case WHITE:
case WHITE + ADD_NL:
testval = mask = RI_WHITE;
do_class:
while (count < maxcount)
{
int l;
if (*scan == NUL)
{
if (!REG_MULTI || !WITH_NL(OP(p)) || rex.lnum > rex.reg_maxline
|| rex.reg_line_lbr)
break;
reg_nextline();
scan = rex.input;
if (got_int)
break;
}
else if (has_mbyte && (l = (*mb_ptr2len)(scan)) > 1)
{
if (testval != 0)
break;
scan += l;
}
else if ((class_tab[*scan] & mask) == testval)
++scan;
else if (rex.reg_line_lbr && *scan == '\n' && WITH_NL(OP(p)))
++scan;
else
break;
++count;
}
break;
case NWHITE:
case NWHITE + ADD_NL:
mask = RI_WHITE;
goto do_class;
case DIGIT:
case DIGIT + ADD_NL:
testval = mask = RI_DIGIT;
goto do_class;
case NDIGIT:
case NDIGIT + ADD_NL:
mask = RI_DIGIT;
goto do_class;
case HEX:
case HEX + ADD_NL:
testval = mask = RI_HEX;
goto do_class;
case NHEX:
case NHEX + ADD_NL:
mask = RI_HEX;
goto do_class;
case OCTAL:
case OCTAL + ADD_NL:
testval = mask = RI_OCTAL;
goto do_class;
case NOCTAL:
case NOCTAL + ADD_NL:
mask = RI_OCTAL;
goto do_class;
case WORD:
case WORD + ADD_NL:
testval = mask = RI_WORD;
goto do_class;
case NWORD:
case NWORD + ADD_NL:
mask = RI_WORD;
goto do_class;
case HEAD:
case HEAD + ADD_NL:
testval = mask = RI_HEAD;
goto do_class;
case NHEAD:
case NHEAD + ADD_NL:
mask = RI_HEAD;
goto do_class;
case ALPHA:
case ALPHA + ADD_NL:
testval = mask = RI_ALPHA;
goto do_class;
case NALPHA:
case NALPHA + ADD_NL:
mask = RI_ALPHA;
goto do_class;
case LOWER:
case LOWER + ADD_NL:
testval = mask = RI_LOWER;
goto do_class;
case NLOWER:
case NLOWER + ADD_NL:
mask = RI_LOWER;
goto do_class;
case UPPER:
case UPPER + ADD_NL:
testval = mask = RI_UPPER;
goto do_class;
case NUPPER:
case NUPPER + ADD_NL:
mask = RI_UPPER;
goto do_class;
case EXACTLY:
{
int cu, cl;
// This doesn't do a multi-byte character, because a MULTIBYTECODE
// would have been used for it. It does handle single-byte
// characters, such as latin1.
if (rex.reg_ic)
{
cu = MB_TOUPPER(*opnd);
cl = MB_TOLOWER(*opnd);
while (count < maxcount && (*scan == cu || *scan == cl))
{
count++;
scan++;
}
}
else
{
cu = *opnd;
while (count < maxcount && *scan == cu)
{
count++;
scan++;
}
}
break;
}
case MULTIBYTECODE:
{
int i, len, cf = 0;
// Safety check (just in case 'encoding' was changed since
// compiling the program).
if ((len = (*mb_ptr2len)(opnd)) > 1)
{
if (rex.reg_ic && enc_utf8)
cf = utf_fold(utf_ptr2char(opnd));
while (count < maxcount && (*mb_ptr2len)(scan) >= len)
{
for (i = 0; i < len; ++i)
if (opnd[i] != scan[i])
break;
if (i < len && (!rex.reg_ic || !enc_utf8
|| utf_fold(utf_ptr2char(scan)) != cf))
break;
scan += len;
++count;
}
}
}
break;
case ANYOF:
case ANYOF + ADD_NL:
testval = TRUE;
// FALLTHROUGH
case ANYBUT:
case ANYBUT + ADD_NL:
while (count < maxcount)
{
int len;
if (*scan == NUL)
{
if (!REG_MULTI || !WITH_NL(OP(p)) || rex.lnum > rex.reg_maxline
|| rex.reg_line_lbr)
break;
reg_nextline();
scan = rex.input;
if (got_int)
break;
}
else if (rex.reg_line_lbr && *scan == '\n' && WITH_NL(OP(p)))
++scan;
else if (has_mbyte && (len = (*mb_ptr2len)(scan)) > 1)
{
if ((cstrchr(opnd, (*mb_ptr2char)(scan)) == NULL) == testval)
break;
scan += len;
}
else
{
if ((cstrchr(opnd, *scan) == NULL) == testval)
break;
++scan;
}
++count;
}
break;
case NEWL:
while (count < maxcount
&& ((*scan == NUL && rex.lnum <= rex.reg_maxline
&& !rex.reg_line_lbr && REG_MULTI)
|| (*scan == '\n' && rex.reg_line_lbr)))
{
count++;
if (rex.reg_line_lbr)
ADVANCE_REGINPUT();
else
reg_nextline();
scan = rex.input;
if (got_int)
break;
}
break;
default: // Oh dear. Called inappropriately.
iemsg(_(e_corrupted_regexp_program));
#ifdef DEBUG
printf("Called regrepeat with op code %d\n", OP(p));
#endif
break;
}
rex.input = scan;
return (int)count;
}
| 0
|
224,186
|
bool operator()(const Tensor& lhs, const Tensor& rhs) const {
return std::equal_to<int64_t>{}(lhs.scalar<int64_t>()(),
rhs.scalar<int64_t>()());
}
| 0
|
401,498
|
static int crng_slow_load(const char *cp, size_t len)
{
unsigned long flags;
static unsigned char lfsr = 1;
unsigned char tmp;
unsigned i, max = CHACHA_KEY_SIZE;
const char * src_buf = cp;
char * dest_buf = (char *) &primary_crng.state[4];
if (!spin_trylock_irqsave(&primary_crng.lock, flags))
return 0;
if (crng_init != 0) {
spin_unlock_irqrestore(&primary_crng.lock, flags);
return 0;
}
if (len > max)
max = len;
for (i = 0; i < max ; i++) {
tmp = lfsr;
lfsr >>= 1;
if (tmp & 1)
lfsr ^= 0xE1;
tmp = dest_buf[i % CHACHA_KEY_SIZE];
dest_buf[i % CHACHA_KEY_SIZE] ^= src_buf[i % len] ^ lfsr;
lfsr += (tmp << 3) | (tmp >> 5);
}
spin_unlock_irqrestore(&primary_crng.lock, flags);
return 1;
}
| 0
|
286,744
|
size_t SWTPM_NVRAM_FileKey_Size(void)
{
return filekey.symkey.userKeyLength;
}
| 0
|
230,992
|
prepare_tagged_break(mrb_state *mrb, uint32_t tag, const struct RProc *proc, mrb_value val)
{
if (break_tag_p((struct RBreak*)mrb->exc, tag)) {
mrb_break_tag_set((struct RBreak*)mrb->exc, tag);
}
else {
mrb->exc = (struct RObject*)break_new(mrb, tag, proc, val);
}
}
| 0
|
234,142
|
get_TAG_name (unsigned long tag)
{
const char *name = get_DW_TAG_name ((unsigned int) tag);
if (name == NULL)
{
static char buffer[100];
if (tag >= DW_TAG_lo_user && tag <= DW_TAG_hi_user)
snprintf (buffer, sizeof (buffer), _("User TAG value: %#lx"), tag);
else
snprintf (buffer, sizeof (buffer), _("Unknown TAG value: %#lx"), tag);
return buffer;
}
return name;
}
| 0
|
432,246
|
static bool prepare_mmio_access(MemoryRegion *mr)
{
return true;
}
| 0
|
508,874
|
bool st_select_lex::optimize_unflattened_subqueries(bool const_only)
{
SELECT_LEX_UNIT *next_unit= NULL;
for (SELECT_LEX_UNIT *un= first_inner_unit();
un;
un= next_unit ? next_unit : un->next_unit())
{
Item_subselect *subquery_predicate= un->item;
next_unit= NULL;
if (subquery_predicate)
{
if (!subquery_predicate->fixed)
{
/*
This subquery was excluded as part of some expression so it is
invisible from all prepared expression.
*/
next_unit= un->next_unit();
un->exclude_level();
if (next_unit)
continue;
break;
}
if (subquery_predicate->substype() == Item_subselect::IN_SUBS)
{
Item_in_subselect *in_subs= (Item_in_subselect*) subquery_predicate;
if (in_subs->is_jtbm_merged)
continue;
}
if (const_only && !subquery_predicate->const_item())
{
/* Skip non-constant subqueries if the caller asked so. */
continue;
}
bool empty_union_result= true;
bool is_correlated_unit= false;
bool first= true;
bool union_plan_saved= false;
/*
If the subquery is a UNION, optimize all the subqueries in the UNION. If
there is no UNION, then the loop will execute once for the subquery.
*/
for (SELECT_LEX *sl= un->first_select(); sl; sl= sl->next_select())
{
JOIN *inner_join= sl->join;
if (first)
first= false;
else
{
if (!union_plan_saved)
{
union_plan_saved= true;
if (un->save_union_explain(un->thd->lex->explain))
return true; /* Failure */
}
}
if (!inner_join)
continue;
SELECT_LEX *save_select= un->thd->lex->current_select;
ulonglong save_options;
int res;
/* We need only 1 row to determine existence */
un->set_limit(un->global_parameters());
un->thd->lex->current_select= sl;
save_options= inner_join->select_options;
if (options & SELECT_DESCRIBE)
{
/* Optimize the subquery in the context of EXPLAIN. */
sl->set_explain_type(FALSE);
sl->options|= SELECT_DESCRIBE;
inner_join->select_options|= SELECT_DESCRIBE;
}
if ((res= inner_join->optimize()))
return TRUE;
if (!inner_join->cleaned)
sl->update_used_tables();
sl->update_correlated_cache();
is_correlated_unit|= sl->is_correlated;
inner_join->select_options= save_options;
un->thd->lex->current_select= save_select;
Explain_query *eq;
if ((eq= inner_join->thd->lex->explain))
{
Explain_select *expl_sel;
if ((expl_sel= eq->get_select(inner_join->select_lex->select_number)))
{
sl->set_explain_type(TRUE);
expl_sel->select_type= sl->type;
}
}
if (empty_union_result)
{
/*
If at least one subquery in a union is non-empty, the UNION result
is non-empty. If there is no UNION, the only subquery is non-empy.
*/
empty_union_result= inner_join->empty_result();
}
if (res)
return TRUE;
}
if (empty_union_result)
subquery_predicate->no_rows_in_result();
if (is_correlated_unit)
{
/*
Some parts of UNION are not correlated. This means we will need to
re-execute the whole UNION every time. Mark all parts of the UNION
as correlated so that they are prepared to be executed multiple
times (if we don't do that, some part of the UNION may free its
execution data at the end of first execution and crash on the second
execution)
*/
for (SELECT_LEX *sl= un->first_select(); sl; sl= sl->next_select())
sl->uncacheable |= UNCACHEABLE_DEPENDENT;
}
else
un->uncacheable&= ~UNCACHEABLE_DEPENDENT;
subquery_predicate->is_correlated= is_correlated_unit;
}
}
return FALSE;
}
| 0
|
384,792
|
getvcol_nolist(pos_T *posp)
{
int list_save = curwin->w_p_list;
colnr_T vcol;
curwin->w_p_list = FALSE;
if (posp->coladd)
getvvcol(curwin, posp, NULL, &vcol, NULL);
else
getvcol(curwin, posp, NULL, &vcol, NULL);
curwin->w_p_list = list_save;
return vcol;
}
| 0
|
292,163
|
void LinkResolver::runtime_resolve_special_method(CallInfo& result,
const LinkInfo& link_info,
const methodHandle& resolved_method,
Handle recv, TRAPS) {
Klass* resolved_klass = link_info.resolved_klass();
// resolved method is selected method unless we have an old-style lookup
// for a superclass method
// Invokespecial for a superinterface, resolved method is selected method,
// no checks for shadowing
methodHandle sel_method(THREAD, resolved_method());
if (link_info.check_access() &&
// check if the method is not <init>
resolved_method->name() != vmSymbols::object_initializer_name()) {
// check if this is an old-style super call and do a new lookup if so
// a) check if ACC_SUPER flag is set for the current class
Klass* current_klass = link_info.current_klass();
if ((current_klass->is_super() || !AllowNonVirtualCalls) &&
// b) check if the class of the resolved_klass is a superclass
// (not supertype in order to exclude interface classes) of the current class.
// This check is not performed for super.invoke for interface methods
// in super interfaces.
current_klass->is_subclass_of(resolved_klass) &&
current_klass != resolved_klass
) {
// Lookup super method
Klass* super_klass = current_klass->super();
sel_method = lookup_instance_method_in_klasses(super_klass,
resolved_method->name(),
resolved_method->signature(),
Klass::find_private, CHECK);
// check if found
if (sel_method.is_null()) {
ResourceMark rm(THREAD);
stringStream ss;
ss.print("'");
resolved_method->print_external_name(&ss);
ss.print("'");
THROW_MSG(vmSymbols::java_lang_AbstractMethodError(), ss.as_string());
// check loader constraints if found a different method
} else if (sel_method() != resolved_method()) {
check_method_loader_constraints(link_info, sel_method, "method", CHECK);
}
}
// Check that the class of objectref (the receiver) is the current class or interface,
// or a subtype of the current class or interface (the sender), otherwise invokespecial
// throws IllegalAccessError.
// The verifier checks that the sender is a subtype of the class in the I/MR operand.
// The verifier also checks that the receiver is a subtype of the sender, if the sender is
// a class. If the sender is an interface, the check has to be performed at runtime.
InstanceKlass* sender = InstanceKlass::cast(current_klass);
sender = sender->is_anonymous() ? sender->host_klass() : sender;
if (sender->is_interface() && recv.not_null()) {
Klass* receiver_klass = recv->klass();
if (!receiver_klass->is_subtype_of(sender)) {
ResourceMark rm(THREAD);
char buf[500];
jio_snprintf(buf, sizeof(buf),
"Receiver class %s must be the current class or a subtype of interface %s",
receiver_klass->external_name(),
sender->external_name());
THROW_MSG(vmSymbols::java_lang_IllegalAccessError(), buf);
}
}
}
// check if not static
if (sel_method->is_static()) {
ResourceMark rm(THREAD);
stringStream ss;
ss.print("Expecting non-static method '");
resolved_method->print_external_name(&ss);
ss.print("'");
THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), ss.as_string());
}
// check if abstract
if (sel_method->is_abstract()) {
ResourceMark rm(THREAD);
stringStream ss;
ss.print("'");
Method::print_external_name(&ss, resolved_klass, sel_method->name(), sel_method->signature());
ss.print("'");
THROW_MSG(vmSymbols::java_lang_AbstractMethodError(), ss.as_string());
}
if (log_develop_is_enabled(Trace, itables)) {
trace_method_resolution("invokespecial selected method: resolved-class:",
resolved_klass, resolved_klass, sel_method, true);
}
// setup result
result.set_static(resolved_klass, sel_method, CHECK);
}
| 0
|
244,312
|
static u32 sgpd_size_entry(u32 grouping_type, void *entry)
{
switch (grouping_type) {
case GF_ISOM_SAMPLE_GROUP_ROLL:
case GF_ISOM_SAMPLE_GROUP_PROL:
return 2;
case GF_ISOM_SAMPLE_GROUP_TELE:
case GF_ISOM_SAMPLE_GROUP_RAP:
case GF_ISOM_SAMPLE_GROUP_SAP:
case GF_ISOM_SAMPLE_GROUP_SYNC:
return 1;
case GF_ISOM_SAMPLE_GROUP_TSCL:
return 20;
case GF_ISOM_SAMPLE_GROUP_LBLI:
return 2;
case GF_ISOM_SAMPLE_GROUP_TSAS:
case GF_ISOM_SAMPLE_GROUP_STSA:
return 0;
case GF_ISOM_SAMPLE_GROUP_SEIG:
{
GF_CENCSampleEncryptionGroupEntry *seig = (GF_CENCSampleEncryptionGroupEntry *)entry;
Bool use_mkey = seig->key_info[0] ? GF_TRUE : GF_FALSE;
if (use_mkey) {
return 3 + seig->key_info_size-1;
}
return seig->key_info_size; //== 3 + (seig->key_info_size-3);
}
case GF_ISOM_SAMPLE_GROUP_OINF:
return gf_isom_oinf_size_entry(entry);
case GF_ISOM_SAMPLE_GROUP_LINF:
return gf_isom_linf_size_entry(entry);
case GF_ISOM_SAMPLE_GROUP_SPOR:
{
GF_SubpictureOrderEntry *spor = (GF_SubpictureOrderEntry *)entry;
u32 s = 2 + 2*spor->num_subpic_ref_idx;
if (spor->subpic_id_info_flag) {
s += 3;
}
return s;
}
case GF_ISOM_SAMPLE_GROUP_SULM:
{
GF_SubpictureLayoutMapEntry *sulm = (GF_SubpictureLayoutMapEntry *) entry;
return 6 + 2*sulm->nb_entries;
}
default:
return ((GF_DefaultSampleGroupDescriptionEntry *)entry)->length;
}
}
| 0
|
226,344
|
GF_Err tmin_box_size(GF_Box *s)
{
s->size += 4;
return GF_OK;
}
| 0
|
301,486
|
similar_chars(slang_T *slang, int c1, int c2)
{
int m1, m2;
char_u buf[MB_MAXBYTES + 1];
hashitem_T *hi;
if (c1 >= 256)
{
buf[mb_char2bytes(c1, buf)] = 0;
hi = hash_find(&slang->sl_map_hash, buf);
if (HASHITEM_EMPTY(hi))
m1 = 0;
else
m1 = mb_ptr2char(hi->hi_key + STRLEN(hi->hi_key) + 1);
}
else
m1 = slang->sl_map_array[c1];
if (m1 == 0)
return FALSE;
if (c2 >= 256)
{
buf[mb_char2bytes(c2, buf)] = 0;
hi = hash_find(&slang->sl_map_hash, buf);
if (HASHITEM_EMPTY(hi))
m2 = 0;
else
m2 = mb_ptr2char(hi->hi_key + STRLEN(hi->hi_key) + 1);
}
else
m2 = slang->sl_map_array[c2];
return m1 == m2;
}
| 0
|
380,946
|
ins_del(void)
{
int temp;
if (stop_arrow() == FAIL)
return;
if (gchar_cursor() == NUL) // delete newline
{
temp = curwin->w_cursor.col;
if (!can_bs(BS_EOL) // only if "eol" included
|| do_join(2, FALSE, TRUE, FALSE, FALSE) == FAIL)
vim_beep(BO_BS);
else
{
curwin->w_cursor.col = temp;
// Adjust orig_line_count in case more lines have been deleted than
// have been added. That makes sure, that open_line() later
// can access all buffer lines correctly
if (State & VREPLACE_FLAG &&
orig_line_count > curbuf->b_ml.ml_line_count)
orig_line_count = curbuf->b_ml.ml_line_count;
}
}
else if (del_char(FALSE) == FAIL) // delete char under cursor
vim_beep(BO_BS);
did_ai = FALSE;
did_si = FALSE;
can_si = FALSE;
can_si_back = FALSE;
AppendCharToRedobuff(K_DEL);
}
| 0
|
246,699
|
u32 parse_store_mode(char *arg_val, u32 opt)
{
do_save = GF_TRUE;
if ((opt == 0) || (opt == 1)) {
interleaving_time = atof(arg_val) / 1000;
if (!interleaving_time) do_flat = 2;
open_edit = GF_TRUE;
no_inplace = GF_TRUE;
if (opt==1) old_interleave = 1;
} else if (opt==2) {
interleaving_time = atof(arg_val);
do_frag = GF_TRUE;
} else {
force_new = 2;
interleaving_time = 0.5;
do_flat = 1;
}
return 0;
}
| 0
|
473,906
|
strhash(st_data_t arg)
{
register const char *string = (const char *)arg;
return st_hash(string, strlen(string), FNV1_32A_INIT);
}
| 0
|
353,176
|
static void setSat(unsigned char rIn, unsigned char gIn, unsigned char bIn, int sat,
unsigned char *rOut, unsigned char *gOut, unsigned char *bOut) {
int rgbMin, rgbMid, rgbMax;
unsigned char *minOut, *midOut, *maxOut;
if (rIn < gIn) {
rgbMin = rIn; minOut = rOut;
rgbMid = gIn; midOut = gOut;
} else {
rgbMin = gIn; minOut = gOut;
rgbMid = rIn; midOut = rOut;
}
if (bIn > rgbMid) {
rgbMax = bIn; maxOut = bOut;
} else if (bIn > rgbMin) {
rgbMax = rgbMid; maxOut = midOut;
rgbMid = bIn; midOut = bOut;
} else {
rgbMax = rgbMid; maxOut = midOut;
rgbMid = rgbMin; midOut = minOut;
rgbMin = bIn; minOut = bOut;
}
if (rgbMax > rgbMin) {
*midOut = (unsigned char)((rgbMid - rgbMin) * sat) / (rgbMax - rgbMin);
*maxOut = (unsigned char)sat;
} else {
*midOut = *maxOut = 0;
}
*minOut = 0;
}
| 0
|
369,914
|
static int proc_fd_permission(struct inode *inode, int mask)
{
int rv = generic_permission(inode, mask);
if (rv == 0)
return 0;
if (task_pid(current) == proc_pid(inode))
rv = 0;
return rv;
}
| 0
|
224,210
|
~TopicTree() {
delete root;
}
| 0
|
233,948
|
void DocumentSourceUnionWith::reattachToOperationContext(OperationContext* opCtx) {
// We have a pipeline we're going to be executing across multiple calls to getNext(), so we
// use Pipeline::reattachToOperationContext() to take care of updating the Pipeline's
// ExpressionContext.
if (_pipeline) {
_pipeline->reattachToOperationContext(opCtx);
}
}
| 0
|
312,443
|
qf_free_all(win_T *wp)
{
int i;
qf_info_T *qi = &ql_info;
if (wp != NULL)
{
// location list
ll_free_all(&wp->w_llist);
ll_free_all(&wp->w_llist_ref);
}
else
// quickfix list
for (i = 0; i < qi->qf_listcount; ++i)
qf_free(qf_get_list(qi, i));
}
| 0
|
473,946
|
get_case_fold_codes_by_str(OnigCaseFoldType flag,
const OnigUChar* p, const OnigUChar* end, OnigCaseFoldCodeItem items[],
OnigEncoding enc)
{
return onigenc_unicode_get_case_fold_codes_by_str(enc, flag, p, end, items);
}
| 0
|
229,302
|
void cql_server::response::write_short_bytes(bytes b)
{
write_short(cast_if_fits<uint16_t>(b.size()));
_body.write(b);
}
| 0
|
436,134
|
*/
static void io_uring_del_tctx_node(unsigned long index)
{
struct io_uring_task *tctx = current->io_uring;
struct io_tctx_node *node;
if (!tctx)
return;
node = xa_erase(&tctx->xa, index);
if (!node)
return;
WARN_ON_ONCE(current != node->task);
WARN_ON_ONCE(list_empty(&node->ctx_node));
mutex_lock(&node->ctx->uring_lock);
list_del(&node->ctx_node);
mutex_unlock(&node->ctx->uring_lock);
if (tctx->last == node->ctx)
tctx->last = NULL;
kfree(node);
| 0
|
294,397
|
date_s__jisx0301(int argc, VALUE *argv, VALUE klass)
{
VALUE str, opt;
rb_scan_args(argc, argv, "1:", &str, &opt);
check_limit(str, opt);
return date__jisx0301(str);
}
| 0
|
421,400
|
static void nl(void)
{
if (minify < 2)
putchar('\n');
}
| 0
|
279,920
|
append_redir(
char_u *buf,
int buflen,
char_u *opt,
char_u *fname)
{
char_u *p;
char_u *end;
end = buf + STRLEN(buf);
// find "%s"
for (p = opt; (p = vim_strchr(p, '%')) != NULL; ++p)
{
if (p[1] == 's') // found %s
break;
if (p[1] == '%') // skip %%
++p;
}
if (p != NULL)
{
#ifdef MSWIN
*end++ = ' '; // not really needed? Not with sh, ksh or bash
#endif
vim_snprintf((char *)end, (size_t)(buflen - (end - buf)),
(char *)opt, (char *)fname);
}
else
vim_snprintf((char *)end, (size_t)(buflen - (end - buf)),
#ifdef FEAT_QUICKFIX
" %s %s",
#else
" %s%s", // " > %s" causes problems on Amiga
#endif
(char *)opt, (char *)fname);
}
| 0
|
244,350
|
GF_Err fpar_box_size(GF_Box *s)
{
FilePartitionBox *ptr = (FilePartitionBox *)s;
ptr->size += 13 + (ptr->version ? 8 : 4);
if (ptr->scheme_specific_info)
ptr->size += strlen(ptr->scheme_specific_info);
ptr->size+= ptr->nb_entries * 6;
return GF_OK;
}
| 0
|
207,754
|
static size_t push_pipe(struct iov_iter *i, size_t size,
int *iter_headp, size_t *offp)
{
struct pipe_inode_info *pipe = i->pipe;
unsigned int p_tail = pipe->tail;
unsigned int p_mask = pipe->ring_size - 1;
unsigned int iter_head;
size_t off;
ssize_t left;
if (unlikely(size > i->count))
size = i->count;
if (unlikely(!size))
return 0;
left = size;
data_start(i, &iter_head, &off);
*iter_headp = iter_head;
*offp = off;
if (off) {
left -= PAGE_SIZE - off;
if (left <= 0) {
pipe->bufs[iter_head & p_mask].len += size;
return size;
}
pipe->bufs[iter_head & p_mask].len = PAGE_SIZE;
iter_head++;
}
while (!pipe_full(iter_head, p_tail, pipe->max_usage)) {
struct pipe_buffer *buf = &pipe->bufs[iter_head & p_mask];
struct page *page = alloc_page(GFP_USER);
if (!page)
break;
buf->ops = &default_pipe_buf_ops;
buf->page = page;
buf->offset = 0;
buf->len = min_t(ssize_t, left, PAGE_SIZE);
left -= buf->len;
iter_head++;
pipe->head = iter_head;
if (left == 0)
return size;
}
return size - left;
}
| 1
|
275,937
|
unsigned uECC_curve_num_bytes(uECC_Curve curve) {
return curve->num_bytes;
}
| 0
|
503,864
|
SCM_DEFINE (scm_umask, "umask", 0, 1, 0,
(SCM mode),
"If @var{mode} is omitted, returns a decimal number representing the current\n"
"file creation mask. Otherwise the file creation mask is set to\n"
"@var{mode} and the previous value is returned.\n\n"
"E.g., @code{(umask #o022)} sets the mask to octal 22, decimal 18.")
#define FUNC_NAME s_scm_umask
{
mode_t mask;
if (SCM_UNBNDP (mode))
{
mask = umask (0);
umask (mask);
}
else
{
mask = umask (scm_to_uint (mode));
}
return scm_from_uint (mask);
}
| 0
|
430,401
|
static int __parse_vlan_from_nlattrs(struct sw_flow_match *match,
u64 *key_attrs, bool inner,
const struct nlattr **a, bool is_mask,
bool log)
{
int err;
const struct nlattr *encap;
if (!is_mask)
err = validate_vlan_from_nlattrs(match, *key_attrs, inner,
a, log);
else
err = validate_vlan_mask_from_nlattrs(match, *key_attrs, inner,
a, log);
if (err <= 0)
return err;
err = encode_vlan_from_nlattrs(match, a, is_mask, inner);
if (err)
return err;
*key_attrs &= ~(1 << OVS_KEY_ATTR_ENCAP);
*key_attrs &= ~(1 << OVS_KEY_ATTR_VLAN);
*key_attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE);
encap = a[OVS_KEY_ATTR_ENCAP];
if (!is_mask)
err = parse_flow_nlattrs(encap, a, key_attrs, log);
else
err = parse_flow_mask_nlattrs(encap, a, key_attrs, log);
return err;
}
| 0
|
434,094
|
alist_new(void)
{
curwin->w_alist = ALLOC_ONE(alist_T);
if (curwin->w_alist == NULL)
{
curwin->w_alist = &global_alist;
++global_alist.al_refcount;
}
else
{
curwin->w_alist->al_refcount = 1;
curwin->w_alist->id = ++max_alist_id;
alist_init(curwin->w_alist);
}
}
| 0
|
247,692
|
const std::string& clientSession() const { return client_session_; }
| 0
|
221,463
|
check_parental_controls (FlatpakDecomposed *app_ref,
FlatpakDeploy *deploy,
GCancellable *cancellable,
GError **error)
{
#ifdef HAVE_LIBMALCONTENT
g_autoptr(MctManager) manager = NULL;
g_autoptr(MctAppFilter) app_filter = NULL;
g_autoptr(GDBusConnection) system_bus = NULL;
g_autoptr(GError) local_error = NULL;
g_autoptr(GDesktopAppInfo) app_info = NULL;
gboolean allowed = FALSE;
system_bus = g_bus_get_sync (G_BUS_TYPE_SYSTEM, NULL, error);
if (system_bus == NULL)
return FALSE;
manager = mct_manager_new (system_bus);
app_filter = mct_manager_get_app_filter (manager, getuid (),
MCT_GET_APP_FILTER_FLAGS_INTERACTIVE,
cancellable, &local_error);
if (g_error_matches (local_error, MCT_APP_FILTER_ERROR, MCT_APP_FILTER_ERROR_DISABLED))
{
g_debug ("Skipping parental controls check for %s since parental "
"controls are disabled globally", flatpak_decomposed_get_ref (app_ref));
return TRUE;
}
else if (g_error_matches (local_error, G_DBUS_ERROR, G_DBUS_ERROR_SERVICE_UNKNOWN) ||
g_error_matches (local_error, G_DBUS_ERROR, G_DBUS_ERROR_NAME_HAS_NO_OWNER))
{
g_debug ("Skipping parental controls check for %s since a required "
"service was not found", flatpak_decomposed_get_ref (app_ref));
return TRUE;
}
else if (local_error != NULL)
{
g_propagate_error (error, g_steal_pointer (&local_error));
return FALSE;
}
/* Always filter by app ID. Additionally, filter by app info (which runs
* multiple checks, including whether the app ID, executable path and
* content types are allowed) if available. If the flatpak contains
* multiple .desktop files, we use the main one. The app ID check is
* always done, as the binary executed by `flatpak run` isn’t necessarily
* extracted from a .desktop file. */
allowed = mct_app_filter_is_flatpak_ref_allowed (app_filter, flatpak_decomposed_get_ref (app_ref));
/* Look up the app’s main .desktop file. */
if (deploy != NULL && allowed)
{
g_autoptr(GFile) deploy_dir = NULL;
const char *deploy_path;
g_autofree char *desktop_file_name = NULL;
g_autofree char *desktop_file_path = NULL;
g_autofree char *app_id = flatpak_decomposed_dup_id (app_ref);
deploy_dir = flatpak_deploy_get_dir (deploy);
deploy_path = flatpak_file_get_path_cached (deploy_dir);
desktop_file_name = g_strconcat (app_id, ".desktop", NULL);
desktop_file_path = g_build_path (G_DIR_SEPARATOR_S,
deploy_path,
"export",
"share",
"applications",
desktop_file_name,
NULL);
app_info = g_desktop_app_info_new_from_filename (desktop_file_path);
}
if (app_info != NULL)
allowed = allowed && mct_app_filter_is_appinfo_allowed (app_filter,
G_APP_INFO (app_info));
if (!allowed)
return flatpak_fail_error (error, FLATPAK_ERROR_PERMISSION_DENIED,
/* Translators: The placeholder is for an app ref. */
_("Running %s is not allowed by the policy set by your administrator"),
flatpak_decomposed_get_ref (app_ref));
#endif /* HAVE_LIBMALCONTENT */
return TRUE;
}
| 0
|
230,625
|
void fill_luma_motion_vector_predictors(base_context* ctx,
const slice_segment_header* shdr,
de265_image* img,
int xC,int yC,int nCS,int xP,int yP,
int nPbW,int nPbH, int l,
int refIdx, int partIdx,
MotionVector out_mvpList[2])
{
// 8.5.3.1.6: derive two spatial vector predictors A (0) and B (1)
uint8_t availableFlagLXN[2];
MotionVector mvLXN[2];
derive_spatial_luma_vector_prediction(ctx, img, shdr, xC,yC, nCS, xP,yP,
nPbW,nPbH, l, refIdx, partIdx,
availableFlagLXN, mvLXN);
// 8.5.3.1.7: if we only have one spatial vector or both spatial vectors are the same,
// derive a temporal predictor
uint8_t availableFlagLXCol;
MotionVector mvLXCol;
if (availableFlagLXN[0] &&
availableFlagLXN[1] &&
(mvLXN[0].x != mvLXN[1].x || mvLXN[0].y != mvLXN[1].y)) {
availableFlagLXCol = 0;
}
else {
derive_temporal_luma_vector_prediction(ctx, img, shdr,
xP,yP, nPbW,nPbH, refIdx,l,
&mvLXCol, &availableFlagLXCol);
}
// --- build candidate vector list with exactly two entries ---
int numMVPCandLX=0;
// spatial predictor A
if (availableFlagLXN[0])
{
out_mvpList[numMVPCandLX++] = mvLXN[0];
}
// spatial predictor B (if not same as A)
if (availableFlagLXN[1] &&
(!availableFlagLXN[0] || // in case A in not available, but mvLXA initialized to same as mvLXB
(mvLXN[0].x != mvLXN[1].x || mvLXN[0].y != mvLXN[1].y)))
{
out_mvpList[numMVPCandLX++] = mvLXN[1];
}
// temporal predictor
if (availableFlagLXCol)
{
out_mvpList[numMVPCandLX++] = mvLXCol;
}
// fill with zero predictors
while (numMVPCandLX<2) {
out_mvpList[numMVPCandLX].x = 0;
out_mvpList[numMVPCandLX].y = 0;
numMVPCandLX++;
}
assert(numMVPCandLX==2);
}
| 0
|
337,853
|
struct sctp_chunk *sctp_chunkify(struct sk_buff *skb,
const struct sctp_association *asoc,
struct sock *sk, gfp_t gfp)
{
struct sctp_chunk *retval;
retval = kmem_cache_zalloc(sctp_chunk_cachep, gfp);
if (!retval)
goto nodata;
if (!sk)
pr_debug("%s: chunkifying skb:%p w/o an sk\n", __func__, skb);
INIT_LIST_HEAD(&retval->list);
retval->skb = skb;
retval->asoc = (struct sctp_association *)asoc;
retval->singleton = 1;
retval->fast_retransmit = SCTP_CAN_FRTX;
/* Polish the bead hole. */
INIT_LIST_HEAD(&retval->transmitted_list);
INIT_LIST_HEAD(&retval->frag_list);
SCTP_DBG_OBJCNT_INC(chunk);
refcount_set(&retval->refcnt, 1);
nodata:
return retval;
}
| 0
|
488,408
|
static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
unsigned long addr, unsigned long end,
pte_fn_t fn, void *data)
{
pud_t *pud;
unsigned long next;
int err;
pud = pud_alloc(mm, pgd, addr);
if (!pud)
return -ENOMEM;
do {
next = pud_addr_end(addr, end);
err = apply_to_pmd_range(mm, pud, addr, next, fn, data);
if (err)
break;
} while (pud++, addr = next, addr != end);
return err;
}
| 0
|
195,231
|
s32 gf_avc_parse_nalu(GF_BitStream *bs, AVCState *avc)
{
u8 idr_flag;
s32 slice, ret;
u32 nal_hdr;
AVCSliceInfo n_state;
gf_bs_enable_emulation_byte_removal(bs, GF_TRUE);
nal_hdr = gf_bs_read_u8(bs);
slice = 0;
memcpy(&n_state, &avc->s_info, sizeof(AVCSliceInfo));
avc->last_nal_type_parsed = n_state.nal_unit_type = nal_hdr & 0x1F;
n_state.nal_ref_idc = (nal_hdr >> 5) & 0x3;
idr_flag = 0;
switch (n_state.nal_unit_type) {
case GF_AVC_NALU_ACCESS_UNIT:
case GF_AVC_NALU_END_OF_SEQ:
case GF_AVC_NALU_END_OF_STREAM:
ret = 1;
break;
case GF_AVC_NALU_SVC_SLICE:
SVC_ReadNal_header_extension(bs, &n_state.NalHeader);
// slice buffer - read the info and compare.
/*ret = */svc_parse_slice(bs, avc, &n_state);
if (avc->s_info.nal_ref_idc) {
n_state.poc_lsb_prev = avc->s_info.poc_lsb;
n_state.poc_msb_prev = avc->s_info.poc_msb;
}
avc_compute_poc(&n_state);
if (avc->s_info.poc != n_state.poc) {
memcpy(&avc->s_info, &n_state, sizeof(AVCSliceInfo));
return 1;
}
memcpy(&avc->s_info, &n_state, sizeof(AVCSliceInfo));
return 0;
case GF_AVC_NALU_SVC_PREFIX_NALU:
SVC_ReadNal_header_extension(bs, &n_state.NalHeader);
return 0;
case GF_AVC_NALU_IDR_SLICE:
case GF_AVC_NALU_NON_IDR_SLICE:
case GF_AVC_NALU_DP_A_SLICE:
case GF_AVC_NALU_DP_B_SLICE:
case GF_AVC_NALU_DP_C_SLICE:
slice = 1;
/* slice buffer - read the info and compare.*/
ret = avc_parse_slice(bs, avc, idr_flag, &n_state);
if (ret < 0) return ret;
ret = 0;
if (
((avc->s_info.nal_unit_type > GF_AVC_NALU_IDR_SLICE) || (avc->s_info.nal_unit_type < GF_AVC_NALU_NON_IDR_SLICE))
&& (avc->s_info.nal_unit_type != GF_AVC_NALU_SVC_SLICE)
) {
break;
}
if (avc->s_info.frame_num != n_state.frame_num) {
ret = 1;
break;
}
if (avc->s_info.field_pic_flag != n_state.field_pic_flag) {
ret = 1;
break;
}
if ((avc->s_info.nal_ref_idc != n_state.nal_ref_idc) &&
(!avc->s_info.nal_ref_idc || !n_state.nal_ref_idc)) {
ret = 1;
break;
}
assert(avc->s_info.sps);
if (avc->s_info.sps->poc_type == n_state.sps->poc_type) {
if (!avc->s_info.sps->poc_type) {
if (!n_state.bottom_field_flag && (avc->s_info.poc_lsb != n_state.poc_lsb)) {
ret = 1;
break;
}
if (avc->s_info.delta_poc_bottom != n_state.delta_poc_bottom) {
ret = 1;
break;
}
}
else if (avc->s_info.sps->poc_type == 1) {
if (avc->s_info.delta_poc[0] != n_state.delta_poc[0]) {
ret = 1;
break;
}
if (avc->s_info.delta_poc[1] != n_state.delta_poc[1]) {
ret = 1;
break;
}
}
}
if (n_state.nal_unit_type == GF_AVC_NALU_IDR_SLICE) {
if (avc->s_info.nal_unit_type != GF_AVC_NALU_IDR_SLICE) { /*IdrPicFlag differs in value*/
ret = 1;
break;
}
else if (avc->s_info.idr_pic_id != n_state.idr_pic_id) { /*both IDR and idr_pic_id differs*/
ret = 1;
break;
}
}
break;
case GF_AVC_NALU_SEQ_PARAM:
avc->last_ps_idx = gf_avc_read_sps_bs_internal(bs, avc, 0, NULL, nal_hdr);
if (avc->last_ps_idx < 0) return -1;
return 0;
case GF_AVC_NALU_PIC_PARAM:
avc->last_ps_idx = gf_avc_read_pps_bs_internal(bs, avc, nal_hdr);
if (avc->last_ps_idx < 0) return -1;
return 0;
case GF_AVC_NALU_SVC_SUBSEQ_PARAM:
avc->last_ps_idx = gf_avc_read_sps_bs_internal(bs, avc, 1, NULL, nal_hdr);
if (avc->last_ps_idx < 0) return -1;
return 0;
case GF_AVC_NALU_SEQ_PARAM_EXT:
avc->last_ps_idx = (s32) gf_bs_read_ue(bs);
if (avc->last_ps_idx < 0) return -1;
return 0;
case GF_AVC_NALU_SEI:
case GF_AVC_NALU_FILLER_DATA:
return 0;
default:
if (avc->s_info.nal_unit_type <= GF_AVC_NALU_IDR_SLICE) ret = 1;
//To detect change of AU when multiple sps and pps in stream
else if ((nal_hdr & 0x1F) == GF_AVC_NALU_SEI && avc->s_info.nal_unit_type == GF_AVC_NALU_SVC_SLICE)
ret = 1;
else if ((nal_hdr & 0x1F) == GF_AVC_NALU_SEQ_PARAM && avc->s_info.nal_unit_type == GF_AVC_NALU_SVC_SLICE)
ret = 1;
else
ret = 0;
break;
}
/* save _prev values */
if (ret && avc->s_info.sps) {
n_state.frame_num_offset_prev = avc->s_info.frame_num_offset;
if ((avc->s_info.sps->poc_type != 2) || (avc->s_info.nal_ref_idc != 0))
n_state.frame_num_prev = avc->s_info.frame_num;
if (avc->s_info.nal_ref_idc) {
n_state.poc_lsb_prev = avc->s_info.poc_lsb;
n_state.poc_msb_prev = avc->s_info.poc_msb;
}
}
if (slice)
avc_compute_poc(&n_state);
memcpy(&avc->s_info, &n_state, sizeof(AVCSliceInfo));
return ret;
}
| 1
|
195,092
|
Literal *hermes::evalUnaryOperator(
UnaryOperatorInst::OpKind kind,
IRBuilder &builder,
Literal *operand) {
switch (kind) {
case UnaryOperatorInst::OpKind::MinusKind:
// Negate constant integers.
switch (operand->getKind()) {
case ValueKind::LiteralNumberKind:
if (auto *literalNum = llvh::dyn_cast<LiteralNumber>(operand)) {
auto V = -literalNum->getValue();
return builder.getLiteralNumber(V);
}
break;
case ValueKind::LiteralUndefinedKind:
return builder.getLiteralNaN();
case ValueKind::LiteralBoolKind:
if (evalIsTrue(builder, operand)) {
return builder.getLiteralNumber(-1);
} else { // evalIsFalse(operand)
return builder.getLiteralNegativeZero();
}
case ValueKind::LiteralNullKind:
return builder.getLiteralNegativeZero();
default:
break;
}
break;
case UnaryOperatorInst::OpKind::TypeofKind:
switch (operand->getKind()) {
case ValueKind::GlobalObjectKind:
case ValueKind::LiteralNullKind:
return builder.getLiteralString("object");
case ValueKind::LiteralUndefinedKind:
return builder.getLiteralString("undefined");
case ValueKind::LiteralBoolKind:
return builder.getLiteralString("boolean");
case ValueKind::LiteralNumberKind:
return builder.getLiteralString("number");
case ValueKind::LiteralStringKind:
return builder.getLiteralString("string");
default:
llvm_unreachable("Invalid literal kind.");
}
break;
case UnaryOperatorInst::OpKind::BangKind:
if (evalIsTrue(builder, operand)) {
return builder.getLiteralBool(false);
}
if (evalIsFalse(builder, operand)) {
return builder.getLiteralBool(true);
}
break;
case UnaryOperatorInst::OpKind::VoidKind:
return builder.getLiteralUndefined();
default:
break;
}
return nullptr;
}
| 1
|
338,112
|
void WasmBinaryWriter::writeGlobals() {
if (importInfo->getNumDefinedGlobals() == 0) {
return;
}
BYN_TRACE("== writeglobals\n");
auto start = startSection(BinaryConsts::Section::Global);
// Count and emit the total number of globals after tuple globals have been
// expanded into their constituent parts.
Index num = 0;
ModuleUtils::iterDefinedGlobals(
*wasm, [&num](Global* global) { num += global->type.size(); });
o << U32LEB(num);
ModuleUtils::iterDefinedGlobals(*wasm, [&](Global* global) {
BYN_TRACE("write one\n");
size_t i = 0;
for (const auto& t : global->type) {
writeType(t);
o << U32LEB(global->mutable_);
if (global->type.size() == 1) {
writeExpression(global->init);
} else {
writeExpression(global->init->cast<TupleMake>()->operands[i]);
}
o << int8_t(BinaryConsts::End);
++i;
}
});
finishSection(start);
}
| 0
|
244,092
|
static void ctrn_write_ctso(GF_TrackFragmentRunBox *ctrn, GF_BitStream *bs, u32 ctso, u32 field_size)
{
if (!field_size) return;
if (ctrn->ctso_multiplier) {
gf_bs_write_int(bs, ctso / ctrn->ctso_multiplier, field_size);
} else {
gf_bs_write_int(bs, ctso, field_size);
}
}
| 0
|
487,656
|
asmlinkage long sys_times(struct tms __user * tbuf)
{
/*
* In the SMP world we might just be unlucky and have one of
* the times increment as we use it. Since the value is an
* atomically safe type this is just fine. Conceptually its
* as if the syscall took an instant longer to occur.
*/
if (tbuf) {
struct tms tmp;
struct task_struct *tsk = current;
struct task_struct *t;
cputime_t utime, stime, cutime, cstime;
spin_lock_irq(&tsk->sighand->siglock);
utime = tsk->signal->utime;
stime = tsk->signal->stime;
t = tsk;
do {
utime = cputime_add(utime, t->utime);
stime = cputime_add(stime, t->stime);
t = next_thread(t);
} while (t != tsk);
cutime = tsk->signal->cutime;
cstime = tsk->signal->cstime;
spin_unlock_irq(&tsk->sighand->siglock);
tmp.tms_utime = cputime_to_clock_t(utime);
tmp.tms_stime = cputime_to_clock_t(stime);
tmp.tms_cutime = cputime_to_clock_t(cutime);
tmp.tms_cstime = cputime_to_clock_t(cstime);
if (copy_to_user(tbuf, &tmp, sizeof(struct tms)))
return -EFAULT;
}
return (long) jiffies_64_to_clock_t(get_jiffies_64());
}
| 0
|
379,686
|
R_API RAnalVar *r_anal_var_get_dst_var(RAnalVar *var) {
r_return_val_if_fail (var, NULL);
RAnalVarAccess *acc;
r_vector_foreach (&var->accesses, acc) {
if (!(acc->type & R_ANAL_VAR_ACCESS_TYPE_READ)) {
continue;
}
ut64 addr = var->fcn->addr + acc->offset;
RPVector *used_vars = r_anal_function_get_vars_used_at (var->fcn, addr);
void **it;
r_pvector_foreach (used_vars, it) {
RAnalVar *used_var = *it;
if (used_var == var) {
continue;
}
RAnalVarAccess *other_acc = r_anal_var_get_access_at (used_var, addr);
if (other_acc && other_acc->type & R_ANAL_VAR_ACCESS_TYPE_WRITE) {
return used_var;
}
}
}
return NULL;
}
| 0
|
293,739
|
static RList *entries(RBinFile *bf) {
RList *ret;
RBinObject *obj = bf ? bf->o : NULL;
if (!obj || !obj->bin_obj || !(ret = r_list_newf (free))) {
return NULL;
}
RKernelCacheObj *kobj = (RKernelCacheObj*) obj->bin_obj;
ut64 entry_vaddr = kobj->mach0->entry;
if (kobj->pa2va_exec <= entry_vaddr) {
ut64 entry_paddr = entry_vaddr - kobj->pa2va_exec;
RBinAddr *ba = newEntry (entry_paddr, entry_vaddr, 0);
if (ba) {
r_list_append (ret, ba);
}
}
process_constructors (kobj, kobj->mach0, ret, 0, true, R_K_CONSTRUCTOR_TO_ENTRY, NULL);
return ret;
}
| 0
|
384,873
|
pathcmp(const char *p, const char *q, int maxlen)
{
int i, j;
int c1, c2;
const char *s = NULL;
for (i = 0, j = 0; maxlen < 0 || (i < maxlen && j < maxlen);)
{
c1 = PTR2CHAR((char_u *)p + i);
c2 = PTR2CHAR((char_u *)q + j);
// End of "p": check if "q" also ends or just has a slash.
if (c1 == NUL)
{
if (c2 == NUL) // full match
return 0;
s = q;
i = j;
break;
}
// End of "q": check if "p" just has a slash.
if (c2 == NUL)
{
s = p;
break;
}
if ((p_fic ? MB_TOUPPER(c1) != MB_TOUPPER(c2) : c1 != c2)
#ifdef BACKSLASH_IN_FILENAME
// consider '/' and '\\' to be equal
&& !((c1 == '/' && c2 == '\\')
|| (c1 == '\\' && c2 == '/'))
#endif
)
{
if (vim_ispathsep(c1))
return -1;
if (vim_ispathsep(c2))
return 1;
return p_fic ? MB_TOUPPER(c1) - MB_TOUPPER(c2)
: c1 - c2; // no match
}
i += mb_ptr2len((char_u *)p + i);
j += mb_ptr2len((char_u *)q + j);
}
if (s == NULL) // "i" or "j" ran into "maxlen"
return 0;
c1 = PTR2CHAR((char_u *)s + i);
c2 = PTR2CHAR((char_u *)s + i + mb_ptr2len((char_u *)s + i));
// ignore a trailing slash, but not "//" or ":/"
if (c2 == NUL
&& i > 0
&& !after_pathsep((char_u *)s, (char_u *)s + i)
#ifdef BACKSLASH_IN_FILENAME
&& (c1 == '/' || c1 == '\\')
#else
&& c1 == '/'
#endif
)
return 0; // match with trailing slash
if (s == q)
return -1; // no match
return 1;
}
| 0
|
459,148
|
static void tcf_block_offload_init(struct flow_block_offload *bo,
struct net_device *dev, struct Qdisc *sch,
enum flow_block_command command,
enum flow_block_binder_type binder_type,
struct flow_block *flow_block,
bool shared, struct netlink_ext_ack *extack)
{
bo->net = dev_net(dev);
bo->command = command;
bo->binder_type = binder_type;
bo->block = flow_block;
bo->block_shared = shared;
bo->extack = extack;
bo->sch = sch;
bo->cb_list_head = &flow_block->cb_list;
INIT_LIST_HEAD(&bo->cb_list);
}
| 0
|
455,283
|
subshell_exit (s)
int s;
{
fflush (stdout);
fflush (stderr);
/* Do trap[0] if defined. Allow it to override the exit status
passed to us. */
if (signal_is_trapped (0))
s = run_exit_trap ();
sh_exit (s);
}
| 0
|
220,396
|
mrb_ary_aget(mrb_state *mrb, mrb_value self)
{
struct RArray *a = mrb_ary_ptr(self);
mrb_int i;
mrb_int len, alen;
mrb_value index;
if (mrb_get_argc(mrb) == 1) {
index = mrb_get_arg1(mrb);
switch (mrb_type(index)) {
/* a[n..m] */
case MRB_TT_RANGE:
if (mrb_range_beg_len(mrb, index, &i, &len, ARY_LEN(a), TRUE) == MRB_RANGE_OK) {
return ary_subseq(mrb, a, i, len);
}
else {
return mrb_nil_value();
}
case MRB_TT_INTEGER:
return mrb_ary_ref(mrb, self, mrb_integer(index));
default:
return mrb_ary_ref(mrb, self, aget_index(mrb, index));
}
}
mrb_get_args(mrb, "oi", &index, &len);
i = aget_index(mrb, index);
alen = ARY_LEN(a);
if (i < 0) i += alen;
if (i < 0 || alen < i) return mrb_nil_value();
if (len < 0) return mrb_nil_value();
if (alen == i) return mrb_ary_new(mrb);
if (len > alen - i) len = alen - i;
return ary_subseq(mrb, a, i, len);
}
| 0
|
386,566
|
void DL_Dxf::addDimAngular(DL_CreationInterface* creationInterface) {
DL_DimensionData d = getDimData();
// angular dimension:
DL_DimAngular2LData da(
// definition point 1
getRealValue(13, 0.0),
getRealValue(23, 0.0),
getRealValue(33, 0.0),
// definition point 2
getRealValue(14, 0.0),
getRealValue(24, 0.0),
getRealValue(34, 0.0),
// definition point 3
getRealValue(15, 0.0),
getRealValue(25, 0.0),
getRealValue(35, 0.0),
// definition point 4
getRealValue(16, 0.0),
getRealValue(26, 0.0),
getRealValue(36, 0.0));
creationInterface->addDimAngular(d, da);
}
| 0
|
409,504
|
termgui_mch_get_color(char_u *name)
{
return gui_get_color_cmn(name);
}
| 0
|
270,370
|
static inline void ok_png_premultiply(uint8_t *dst) {
const uint8_t a = dst[3];
if (a == 0) {
dst[0] = 0;
dst[1] = 0;
dst[2] = 0;
} else if (a < 255) {
dst[0] = (a * dst[0] + 127) / 255;
dst[1] = (a * dst[1] + 127) / 255;
dst[2] = (a * dst[2] + 127) / 255;
}
}
| 0
|
346,445
|
estack_init(void)
{
estack_T *entry;
if (ga_grow(&exestack, 10) == FAIL)
mch_exit(0);
entry = ((estack_T *)exestack.ga_data) + exestack.ga_len;
entry->es_type = ETYPE_TOP;
entry->es_name = NULL;
entry->es_lnum = 0;
#ifdef FEAT_EVAL
entry->es_info.ufunc = NULL;
#endif
++exestack.ga_len;
}
| 0
|
308,162
|
static int fastrpc_get_meta_size(struct fastrpc_invoke_ctx *ctx)
{
int size = 0;
size = (sizeof(struct fastrpc_remote_arg) +
sizeof(struct fastrpc_invoke_buf) +
sizeof(struct fastrpc_phy_page)) * ctx->nscalars +
sizeof(u64) * FASTRPC_MAX_FDLIST +
sizeof(u32) * FASTRPC_MAX_CRCLIST;
return size;
}
| 0
|
366,221
|
static int flags_to_propagation_type(int ms_flags)
{
int type = ms_flags & ~(MS_REC | MS_SILENT);
/* Fail if any non-propagation flags are set */
if (type & ~(MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE))
return 0;
/* Only one propagation flag should be set */
if (!is_power_of_2(type))
return 0;
return type;
}
| 0
|
139,224
|
ui::Layer* OverlayWindowViews::GetResizeHandleLayer() {
return resize_handle_view_->layer();
}
| 0
|
346,466
|
load_start_packages(void)
{
did_source_packages = TRUE;
do_in_path(p_pp, (char_u *)"pack/*/start/*", DIP_ALL + DIP_DIR,
add_pack_plugin, &APP_LOAD);
}
| 0
|
384,809
|
getftypest(stat_T *st)
{
char *t;
if (S_ISREG(st->st_mode))
t = "file";
else if (S_ISDIR(st->st_mode))
t = "dir";
else if (S_ISLNK(st->st_mode))
t = "link";
else if (S_ISBLK(st->st_mode))
t = "bdev";
else if (S_ISCHR(st->st_mode))
t = "cdev";
else if (S_ISFIFO(st->st_mode))
t = "fifo";
else if (S_ISSOCK(st->st_mode))
t = "socket";
else
t = "other";
return (char_u*)t;
}
| 0
|
200,379
|
RList *r_bin_ne_get_segments(r_bin_ne_obj_t *bin) {
int i;
if (!bin) {
return NULL;
}
RList *segments = r_list_newf (free);
for (i = 0; i < bin->ne_header->SegCount; i++) {
RBinSection *bs = R_NEW0 (RBinSection);
if (!bs) {
return segments;
}
NE_image_segment_entry *se = &bin->segment_entries[i];
bs->size = se->length;
bs->vsize = se->minAllocSz ? se->minAllocSz : 64000;
bs->bits = R_SYS_BITS_16;
bs->is_data = se->flags & IS_DATA;
bs->perm = __translate_perms (se->flags);
bs->paddr = (ut64)se->offset * bin->alignment;
bs->name = r_str_newf ("%s.%" PFMT64d, se->flags & IS_MOVEABLE ? "MOVEABLE" : "FIXED", bs->paddr);
bs->is_segment = true;
r_list_append (segments, bs);
}
bin->segments = segments;
return segments;
}
| 1
|
292,199
|
inbound_identified (server *serv) /* 'MODE +e MYSELF' on freenode */
{
if (serv->joindelay_tag)
{
/* stop waiting, just auto JOIN now */
fe_timeout_remove (serv->joindelay_tag);
serv->joindelay_tag = 0;
check_autojoin_channels (serv);
}
}
| 0
|
317,097
|
static int smack_socket_socketpair(struct socket *socka,
struct socket *sockb)
{
struct socket_smack *asp = socka->sk->sk_security;
struct socket_smack *bsp = sockb->sk->sk_security;
asp->smk_packet = bsp->smk_out;
bsp->smk_packet = asp->smk_out;
return 0;
}
| 0
|
421,395
|
static void pblock(int d, js_Ast *block)
{
assert(block->type == STM_BLOCK);
pc('{'); nl();
pstmlist(d, block->a);
in(d); pc('}');
}
| 0
|
455,278
|
set_shell_name (argv0)
char *argv0;
{
/* Here's a hack. If the name of this shell is "sh", then don't do
any startup files; just try to be more like /bin/sh. */
shell_name = argv0 ? base_pathname (argv0) : PROGRAM;
if (argv0 && *argv0 == '-')
{
if (*shell_name == '-')
shell_name++;
login_shell = 1;
}
if (shell_name[0] == 's' && shell_name[1] == 'h' && shell_name[2] == '\0')
act_like_sh++;
if (shell_name[0] == 's' && shell_name[1] == 'u' && shell_name[2] == '\0')
su_shell++;
shell_name = argv0 ? argv0 : PROGRAM;
FREE (dollar_vars[0]);
dollar_vars[0] = savestring (shell_name);
/* A program may start an interactive shell with
"execl ("/bin/bash", "-", NULL)".
If so, default the name of this shell to our name. */
if (!shell_name || !*shell_name || (shell_name[0] == '-' && !shell_name[1]))
shell_name = PROGRAM;
}
| 0
|
267,846
|
vm_spread_operation (vm_frame_ctx_t *frame_ctx_p) /**< frame context */
{
JERRY_ASSERT (frame_ctx_p->byte_code_p[0] == CBC_EXT_OPCODE);
uint8_t opcode = frame_ctx_p->byte_code_p[1];
ecma_value_t completion_value;
ecma_value_t collection = *(--frame_ctx_p->stack_top_p);
ecma_collection_t *collection_p = ECMA_GET_INTERNAL_VALUE_POINTER (ecma_collection_t, collection);
ecma_value_t func_value = *(--frame_ctx_p->stack_top_p);
bool is_call_prop = opcode >= CBC_EXT_SPREAD_CALL_PROP;
if (frame_ctx_p->byte_code_p[1] == CBC_EXT_SPREAD_NEW)
{
const char *constructor_message_p = ecma_check_constructor (func_value);
if (constructor_message_p != ECMA_IS_VALID_CONSTRUCTOR)
{
completion_value = ecma_raise_type_error (constructor_message_p);
}
else
{
ecma_object_t *constructor_obj_p = ecma_get_object_from_value (func_value);
completion_value = ecma_op_function_construct (constructor_obj_p,
constructor_obj_p,
collection_p->buffer_p,
collection_p->item_count);
}
}
else
{
ecma_value_t this_value = is_call_prop ? frame_ctx_p->stack_top_p[-2] : ECMA_VALUE_UNDEFINED;
if (!ecma_is_value_object (func_value)
|| !ecma_op_object_is_callable (ecma_get_object_from_value (func_value)))
{
completion_value = ecma_raise_type_error (ECMA_ERR_MSG (ecma_error_expected_a_function));
}
else
{
ecma_object_t *func_obj_p = ecma_get_object_from_value (func_value);
completion_value = ecma_op_function_call (func_obj_p,
this_value,
collection_p->buffer_p,
collection_p->item_count);
}
if (is_call_prop)
{
ecma_free_value (*(--frame_ctx_p->stack_top_p));
ecma_free_value (*(--frame_ctx_p->stack_top_p));
}
}
ecma_collection_free (collection_p);
ecma_free_value (func_value);
if (JERRY_UNLIKELY (ECMA_IS_VALUE_ERROR (completion_value)))
{
#if JERRY_DEBUGGER
JERRY_CONTEXT (debugger_exception_byte_code_p) = frame_ctx_p->byte_code_p;
#endif /* JERRY_DEBUGGER */
frame_ctx_p->byte_code_p = (uint8_t *) vm_error_byte_code_p;
}
else
{
uint32_t opcode_data = vm_decode_table[(CBC_END + 1) + opcode];
if (!(opcode_data & (VM_OC_PUT_STACK | VM_OC_PUT_BLOCK)))
{
ecma_fast_free_value (completion_value);
}
else if (opcode_data & VM_OC_PUT_STACK)
{
*frame_ctx_p->stack_top_p++ = completion_value;
}
else
{
ecma_fast_free_value (VM_GET_REGISTER (frame_ctx_p, 0));
VM_GET_REGISTERS (frame_ctx_p)[0] = completion_value;
}
/* EXT_OPCODE, SPREAD_OPCODE, BYTE_ARG */
frame_ctx_p->byte_code_p += 3;
}
} /* vm_spread_operation */
| 0
|
336,490
|
SPICE_GNUC_VISIBLE void spice_server_destroy(SpiceServer *reds)
{
/* remove the server from the list of servers so that we don't attempt to
* free it again at exit */
pthread_mutex_lock(&global_reds_lock);
servers = g_list_remove(servers, reds);
pthread_mutex_unlock(&global_reds_lock);
for (auto qxl: reds->qxl_instances) {
red_qxl_destroy(qxl);
}
if (reds->inputs_channel) {
reds->inputs_channel->destroy();
}
/* This requires a bit of explanation on how reference counting is
* not enough. The full reply is in docs/spice_threading_model.txt,
* mainly the RedChannels are owned by both RedsState and
* RedChannelClient so we need both to get destroyed. This call
* remove RedChannelClients */
if (reds->main_channel) {
reds->main_channel->destroy();
}
red_timer_remove(reds->mig_timer);
if (reds->ctx) {
SSL_CTX_free(reds->ctx);
}
reds->main_dispatcher.reset();
reds_cleanup_net(reds);
reds->agent_dev.reset();
// NOTE: don't replace with g_list_free_full as this function that passed callback
// don't change the list while unreferencing in this case will change it.
reds->char_devices.clear();
spice_buffer_free(&reds->client_monitors_config);
red_record_unref(reds->record);
reds_cleanup(reds);
#ifdef RED_STATISTICS
stat_file_free(reds->stat_file);
#endif
reds_config_free(reds->config);
delete reds;
}
| 0
|
241,310
|
mrb_class_inherited(mrb_state *mrb, struct RClass *super, struct RClass *klass)
{
mrb_value s;
mrb_sym mid;
if (!super)
super = mrb->object_class;
super->flags |= MRB_FL_CLASS_IS_INHERITED;
s = mrb_obj_value(super);
mrb_mc_clear_by_class(mrb, klass);
mid = MRB_SYM(inherited);
if (!mrb_func_basic_p(mrb, s, mid, mrb_do_nothing)) {
mrb_value c = mrb_obj_value(klass);
mrb_funcall_argv(mrb, s, mid, 1, &c);
}
}
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.