idx
int64 | func
string | target
int64 |
|---|---|---|
225,831
|
void vmhd_box_del(GF_Box *s)
{
GF_VideoMediaHeaderBox *ptr = (GF_VideoMediaHeaderBox *)s;
if (ptr == NULL) return;
gf_free(ptr);
| 0
|
226,426
|
explicit Iterator(const typename Iterator::Params& params)
: DatasetIterator<Dataset<T>>(params),
num_elements_(params.dataset->sparse_tensor_.shape()[0]),
dense_shape_(DT_INT64, {params.dataset->sparse_tensor_.dims() - 1}),
group_iterable_(params.dataset->sparse_tensor_.group({0})),
iter_(group_iterable_.begin()) {
for (size_t i = 0; i < dense_shape_.NumElements(); ++i) {
dense_shape_.vec<int64_t>()(i) =
params.dataset->sparse_tensor_.shape()[i + 1];
}
}
| 0
|
230,293
|
njs_array_handler_includes(njs_vm_t *vm, njs_iterator_args_t *args,
njs_value_t *entry, int64_t n)
{
if (!njs_is_valid(entry)) {
entry = njs_value_arg(&njs_value_undefined);
}
if (njs_values_same_zero(args->argument, entry)) {
njs_set_true(&vm->retval);
return NJS_DONE;
}
return NJS_OK;
}
| 0
|
317,243
|
static void ipc_init_security(struct ipc_security_struct *isec, u16 sclass)
{
isec->sclass = sclass;
isec->sid = current_sid();
}
| 0
|
512,401
|
Item_hex_constant(THD *thd, const char *str, size_t str_length):
Item_literal(thd)
{
hex_string_init(thd, str, str_length);
}
| 0
|
230,299
|
njs_array_length_set(njs_vm_t *vm, njs_value_t *value,
njs_object_prop_t *prev, njs_value_t *setval)
{
double num, idx;
int64_t prev_length;
uint32_t i, length;
njs_int_t ret;
njs_array_t *array, *keys;
array = njs_object_proto_lookup(njs_object(value), NJS_ARRAY, njs_array_t);
if (njs_slow_path(array == NULL)) {
return NJS_DECLINED;
}
ret = njs_value_to_number(vm, setval, &num);
if (njs_slow_path(ret != NJS_OK)) {
return ret;
}
length = (uint32_t) njs_number_to_length(num);
if ((double) length != num) {
njs_range_error(vm, "Invalid array length");
return NJS_ERROR;
}
ret = njs_value_to_length(vm, &prev->value, &prev_length);
if (njs_slow_path(ret != NJS_OK)) {
return ret;
}
keys = NULL;
if (length < prev_length) {
keys = njs_array_indices(vm, value);
if (njs_slow_path(keys == NULL)) {
return NJS_ERROR;
}
if (keys->length != 0) {
i = keys->length - 1;
do {
idx = njs_string_to_index(&keys->start[i]);
if (idx >= length) {
ret = njs_value_property_delete(vm, value, &keys->start[i],
NULL);
if (njs_slow_path(ret == NJS_ERROR)) {
goto done;
}
}
} while (i-- != 0);
}
}
ret = njs_array_length_redefine(vm, value, length);
if (njs_slow_path(ret != NJS_OK)) {
return ret;
}
ret = NJS_OK;
done:
if (keys != NULL) {
njs_array_destroy(vm, keys);
}
return ret;
}
| 0
|
432,691
|
static void util_set_brush(wmfAPI *API, wmfDC *dc,const BrushApply brush_apply)
{
wmf_magick_t
*ddata = WMF_MAGICK_GetData(API);
wmfBrush
*brush = WMF_DC_BRUSH(dc);
/* Set polygon fill rule */
switch (WMF_DC_POLYFILL(dc)) /* Is this correct ?? */
{
case WINDING:
DrawSetClipRule(WmfDrawingWand,NonZeroRule);
break;
case ALTERNATE:
default:
DrawSetClipRule(WmfDrawingWand,EvenOddRule);
break;
}
switch (WMF_BRUSH_STYLE(brush))
{
case BS_SOLID /* 0 */:
/* WMF_BRUSH_COLOR specifies brush color, WMF_BRUSH_HATCH
ignored */
{
if ( brush_apply == BrushApplyStroke )
draw_stroke_color_rgb(API,WMF_BRUSH_COLOR(brush));
else
draw_fill_color_rgb(API,WMF_BRUSH_COLOR(brush));
break;
}
case BS_HOLLOW /* 1 */: /* BS_HOLLOW & BS_NULL share enum */
/* WMF_BRUSH_COLOR and WMF_BRUSH_HATCH ignored */
{
if ( brush_apply == BrushApplyStroke )
draw_stroke_color_string(WmfDrawingWand,"none");
else
draw_fill_color_string(WmfDrawingWand,"none");
break;
}
case BS_HATCHED /* 2 */:
/* WMF_BRUSH_COLOR specifies the hatch color, WMF_BRUSH_HATCH
specifies the hatch brush style. If WMF_DC_OPAQUE, then
WMF_DC_BACKGROUND specifies hatch background color. */
{
DrawPushDefs(WmfDrawingWand);
draw_pattern_push(API, ddata->pattern_id, 8, 8);
(void) PushDrawingWand(WmfDrawingWand);
if (WMF_DC_OPAQUE(dc))
{
if ( brush_apply == BrushApplyStroke )
draw_stroke_color_rgb(API,WMF_DC_BACKGROUND(dc));
else
draw_fill_color_rgb(API,WMF_DC_BACKGROUND(dc));
DrawRectangle(WmfDrawingWand, 0, 0, 7, 7 );
}
DrawSetStrokeAntialias(WmfDrawingWand, MagickFalse);
DrawSetStrokeWidth(WmfDrawingWand, 1);
draw_stroke_color_rgb(API,WMF_BRUSH_COLOR(brush));
switch ((unsigned int) WMF_BRUSH_HATCH(brush))
{
case HS_HORIZONTAL: /* ----- */
{
DrawLine(WmfDrawingWand, 0, 3, 7,3);
break;
}
case HS_VERTICAL: /* ||||| */
{
DrawLine(WmfDrawingWand, 3, 0, 3, 7);
break;
}
case HS_FDIAGONAL: /* \\\\\ */
{
DrawLine(WmfDrawingWand, 0, 0, 7, 7);
break;
}
case HS_BDIAGONAL: /* / */
{
DrawLine(WmfDrawingWand, 0, 7, 7, 0 );
break;
}
case HS_CROSS: /* +++++ */
{
DrawLine(WmfDrawingWand, 0, 3, 7, 3 );
DrawLine(WmfDrawingWand, 3, 0, 3, 7 );
break;
}
case HS_DIAGCROSS: /* xxxxx */
{
DrawLine(WmfDrawingWand, 0, 0, 7, 7 );
DrawLine(WmfDrawingWand, 0, 7, 7, 0 );
break;
}
default:
{
printf("util_set_brush: unexpected brush hatch enumeration %u\n",
(unsigned int)WMF_BRUSH_HATCH(brush));
}
}
(void) PopDrawingWand(WmfDrawingWand);
(void) DrawPopPattern(WmfDrawingWand);
DrawPopDefs(WmfDrawingWand);
{
char
pattern_id[MagickPathExtent];
(void) FormatLocaleString(pattern_id,MagickPathExtent,"#brush_%lu",
ddata->pattern_id);
if (brush_apply == BrushApplyStroke )
(void) DrawSetStrokePatternURL(WmfDrawingWand,pattern_id);
else
(void) DrawSetFillPatternURL(WmfDrawingWand,pattern_id);
++ddata->pattern_id;
}
break;
}
case BS_PATTERN /* 3 */:
/* WMF_BRUSH_COLOR ignored, WMF_BRUSH_HATCH provides handle to
bitmap */
{
printf("util_set_brush: BS_PATTERN not supported\n");
break;
}
case BS_INDEXED /* 4 */:
{
printf("util_set_brush: BS_INDEXED not supported\n");
break;
}
case BS_DIBPATTERN /* 5 */:
{
wmfBMP
*brush_bmp = WMF_BRUSH_BITMAP(brush);
if (brush_bmp && brush_bmp->data != 0)
{
CompositeOperator
mode;
const Image
*image;
MagickWand
*magick_wand;
image = (Image*)brush_bmp->data;
mode = CopyCompositeOp; /* Default is copy */
switch (WMF_DC_ROP(dc))
{
/* Binary raster ops */
case R2_BLACK:
printf("util_set_brush: R2_BLACK ROP2 mode not supported!\n");
break;
case R2_NOTMERGEPEN:
printf("util_set_brush: R2_NOTMERGEPEN ROP2 mode not supported!\n");
break;
case R2_MASKNOTPEN:
printf("util_set_brush R2_MASKNOTPEN ROP2 mode not supported!\n");
break;
case R2_NOTCOPYPEN:
printf("util_set_brush: R2_NOTCOPYPEN ROP2 mode not supported!\n");
break;
case R2_MASKPENNOT:
printf("util_set_brush: R2_MASKPENNOT ROP2 mode not supported!\n");
break;
case R2_NOT:
printf("util_set_brush: R2_NOT ROP2 mode not supported!\n");
break;
case R2_XORPEN:
printf("util_set_brush: R2_XORPEN ROP2 mode not supported!\n");
break;
case R2_NOTMASKPEN:
printf("util_set_brush: R2_NOTMASKPEN ROP2 mode not supported!\n");
break;
case R2_MASKPEN:
printf("util_set_brush: R2_MASKPEN ROP2 mode not supported!\n");
break;
case R2_NOTXORPEN:
printf("util_set_brush: R2_NOTXORPEN ROP2 mode not supported!\n");
break;
case R2_NOP:
printf("util_set_brush: R2_NOP ROP2 mode not supported!\n");
break;
case R2_MERGENOTPEN:
printf("util_set_brush: R2_MERGENOTPEN ROP2 mode not supported!\n");
break;
case R2_COPYPEN:
mode = CopyCompositeOp;
break;
case R2_MERGEPENNOT:
printf("util_set_brush: R2_MERGEPENNOT ROP2 mode not supported!\n");
break;
case R2_MERGEPEN:
printf("util_set_brush: R2_MERGEPEN ROP2 mode not supported!\n");
break;
case R2_WHITE:
printf("util_set_brush: R2_WHITE ROP2 mode not supported!\n");
break;
default:
{
printf("util_set_brush: unexpected ROP2 enumeration %u!\n",
(unsigned int)WMF_DC_ROP(dc));
}
}
DrawPushDefs(WmfDrawingWand);
draw_pattern_push(API, ddata->pattern_id, brush_bmp->width,
brush_bmp->height);
magick_wand=NewMagickWandFromImage(image);
(void) DrawComposite(WmfDrawingWand,mode, 0, 0, brush_bmp->width,
brush_bmp->height, magick_wand);
magick_wand=DestroyMagickWand(magick_wand);
(void) DrawPopPattern(WmfDrawingWand);
DrawPopDefs(WmfDrawingWand);
{
char
pattern_id[MagickPathExtent];
(void) FormatLocaleString(pattern_id,MagickPathExtent,"#brush_%lu",
ddata->pattern_id);
if ( brush_apply == BrushApplyStroke )
(void) DrawSetStrokePatternURL(WmfDrawingWand,pattern_id);
else
(void) DrawSetFillPatternURL(WmfDrawingWand,pattern_id);
++ddata->pattern_id;
}
}
else
printf("util_set_brush: no BMP image data!\n");
break;
}
case BS_DIBPATTERNPT /* 6 */:
/* WMF_BRUSH_COLOR ignored, WMF_BRUSH_HATCH provides pointer to
DIB */
{
printf("util_set_brush: BS_DIBPATTERNPT not supported\n");
break;
}
case BS_PATTERN8X8 /* 7 */:
{
printf("util_set_brush: BS_PATTERN8X8 not supported\n");
break;
}
case BS_DIBPATTERN8X8 /* 8 */:
{
printf("util_set_brush: BS_DIBPATTERN8X8 not supported\n");
break;
}
default:
{
}
}
}
| 0
|
432,243
|
static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range)
{
if (view->nr == view->nr_allocated) {
view->nr_allocated = MAX(2 * view->nr, 10);
view->ranges = g_realloc(view->ranges,
view->nr_allocated * sizeof(*view->ranges));
}
memmove(view->ranges + pos + 1, view->ranges + pos,
(view->nr - pos) * sizeof(FlatRange));
view->ranges[pos] = *range;
++view->nr;
}
| 0
|
242,949
|
void mbedtls_ssl_dtls_replay_reset( mbedtls_ssl_context *ssl )
{
ssl->in_window_top = 0;
ssl->in_window = 0;
}
| 0
|
346,464
|
add_pack_dir_to_rtp(char_u *fname)
{
char_u *p4, *p3, *p2, *p1, *p;
char_u *entry;
char_u *insp = NULL;
int c;
char_u *new_rtp;
int keep;
size_t oldlen;
size_t addlen;
size_t new_rtp_len;
char_u *afterdir = NULL;
size_t afterlen = 0;
char_u *after_insp = NULL;
char_u *ffname = NULL;
size_t fname_len;
char_u *buf = NULL;
char_u *rtp_ffname;
int match;
int retval = FAIL;
p4 = p3 = p2 = p1 = get_past_head(fname);
for (p = p1; *p; MB_PTR_ADV(p))
if (vim_ispathsep_nocolon(*p))
{
p4 = p3; p3 = p2; p2 = p1; p1 = p;
}
// now we have:
// rtp/pack/name/start/name
// p4 p3 p2 p1
//
// find the part up to "pack" in 'runtimepath'
c = *++p4; // append pathsep in order to expand symlink
*p4 = NUL;
ffname = fix_fname(fname);
*p4 = c;
if (ffname == NULL)
return FAIL;
// Find "ffname" in "p_rtp", ignoring '/' vs '\' differences.
// Also stop at the first "after" directory.
fname_len = STRLEN(ffname);
buf = alloc(MAXPATHL);
if (buf == NULL)
goto theend;
for (entry = p_rtp; *entry != NUL; )
{
char_u *cur_entry = entry;
copy_option_part(&entry, buf, MAXPATHL, ",");
if (insp == NULL)
{
add_pathsep(buf);
rtp_ffname = fix_fname(buf);
if (rtp_ffname == NULL)
goto theend;
match = vim_fnamencmp(rtp_ffname, ffname, fname_len) == 0;
vim_free(rtp_ffname);
if (match)
// Insert "ffname" after this entry (and comma).
insp = entry;
}
if ((p = (char_u *)strstr((char *)buf, "after")) != NULL
&& p > buf
&& vim_ispathsep(p[-1])
&& (vim_ispathsep(p[5]) || p[5] == NUL || p[5] == ','))
{
if (insp == NULL)
// Did not find "ffname" before the first "after" directory,
// insert it before this entry.
insp = cur_entry;
after_insp = cur_entry;
break;
}
}
if (insp == NULL)
// Both "fname" and "after" not found, append at the end.
insp = p_rtp + STRLEN(p_rtp);
// check if rtp/pack/name/start/name/after exists
afterdir = concat_fnames(fname, (char_u *)"after", TRUE);
if (afterdir != NULL && mch_isdir(afterdir))
afterlen = STRLEN(afterdir) + 1; // add one for comma
oldlen = STRLEN(p_rtp);
addlen = STRLEN(fname) + 1; // add one for comma
new_rtp = alloc(oldlen + addlen + afterlen + 1); // add one for NUL
if (new_rtp == NULL)
goto theend;
// We now have 'rtp' parts: {keep}{keep_after}{rest}.
// Create new_rtp, first: {keep},{fname}
keep = (int)(insp - p_rtp);
mch_memmove(new_rtp, p_rtp, keep);
new_rtp_len = keep;
if (*insp == NUL)
new_rtp[new_rtp_len++] = ','; // add comma before
mch_memmove(new_rtp + new_rtp_len, fname, addlen - 1);
new_rtp_len += addlen - 1;
if (*insp != NUL)
new_rtp[new_rtp_len++] = ','; // add comma after
if (afterlen > 0 && after_insp != NULL)
{
int keep_after = (int)(after_insp - p_rtp);
// Add to new_rtp: {keep},{fname}{keep_after},{afterdir}
mch_memmove(new_rtp + new_rtp_len, p_rtp + keep,
keep_after - keep);
new_rtp_len += keep_after - keep;
mch_memmove(new_rtp + new_rtp_len, afterdir, afterlen - 1);
new_rtp_len += afterlen - 1;
new_rtp[new_rtp_len++] = ',';
keep = keep_after;
}
if (p_rtp[keep] != NUL)
// Append rest: {keep},{fname}{keep_after},{afterdir}{rest}
mch_memmove(new_rtp + new_rtp_len, p_rtp + keep, oldlen - keep + 1);
else
new_rtp[new_rtp_len] = NUL;
if (afterlen > 0 && after_insp == NULL)
{
// Append afterdir when "after" was not found:
// {keep},{fname}{rest},{afterdir}
STRCAT(new_rtp, ",");
STRCAT(new_rtp, afterdir);
}
set_option_value_give_err((char_u *)"rtp", 0L, new_rtp, 0);
vim_free(new_rtp);
retval = OK;
theend:
vim_free(buf);
vim_free(ffname);
vim_free(afterdir);
return retval;
}
| 0
|
294,447
|
d_trunc(VALUE d, VALUE *fr)
{
VALUE rd;
if (wholenum_p(d)) {
rd = to_integer(d);
*fr = INT2FIX(0);
}
else {
rd = f_idiv(d, INT2FIX(1));
*fr = f_mod(d, INT2FIX(1));
}
return rd;
}
| 0
|
234,870
|
struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info,
u64 logical, u64 length)
{
struct extent_map_tree *em_tree;
struct extent_map *em;
em_tree = &fs_info->mapping_tree;
read_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, logical, length);
read_unlock(&em_tree->lock);
if (!em) {
btrfs_crit(fs_info, "unable to find logical %llu length %llu",
logical, length);
return ERR_PTR(-EINVAL);
}
if (em->start > logical || em->start + em->len < logical) {
btrfs_crit(fs_info,
"found a bad mapping, wanted %llu-%llu, found %llu-%llu",
logical, length, em->start, em->start + em->len);
free_extent_map(em);
return ERR_PTR(-EINVAL);
}
/* callers are responsible for dropping em's ref. */
return em;
}
| 0
|
337,801
|
struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
struct sctp_chunk *asconf)
{
union sctp_addr_param *addr_param;
struct sctp_addip_chunk *addip;
struct sctp_chunk *asconf_ack;
bool all_param_pass = true;
struct sctp_addiphdr *hdr;
int length = 0, chunk_len;
union sctp_params param;
__be16 err_code;
__u32 serial;
addip = (struct sctp_addip_chunk *)asconf->chunk_hdr;
chunk_len = ntohs(asconf->chunk_hdr->length) -
sizeof(struct sctp_chunkhdr);
hdr = (struct sctp_addiphdr *)asconf->skb->data;
serial = ntohl(hdr->serial);
/* Skip the addiphdr and store a pointer to address parameter. */
length = sizeof(*hdr);
addr_param = (union sctp_addr_param *)(asconf->skb->data + length);
chunk_len -= length;
/* Skip the address parameter and store a pointer to the first
* asconf parameter.
*/
length = ntohs(addr_param->p.length);
chunk_len -= length;
/* create an ASCONF_ACK chunk.
* Based on the definitions of parameters, we know that the size of
* ASCONF_ACK parameters are less than or equal to the fourfold of ASCONF
* parameters.
*/
asconf_ack = sctp_make_asconf_ack(asoc, serial, chunk_len * 4);
if (!asconf_ack)
goto done;
/* Process the TLVs contained within the ASCONF chunk. */
sctp_walk_params(param, addip, addip_hdr.params) {
/* Skip preceeding address parameters. */
if (param.p->type == SCTP_PARAM_IPV4_ADDRESS ||
param.p->type == SCTP_PARAM_IPV6_ADDRESS)
continue;
err_code = sctp_process_asconf_param(asoc, asconf,
param.addip);
/* ADDIP 4.1 A7)
* If an error response is received for a TLV parameter,
* all TLVs with no response before the failed TLV are
* considered successful if not reported. All TLVs after
* the failed response are considered unsuccessful unless
* a specific success indication is present for the parameter.
*/
if (err_code != SCTP_ERROR_NO_ERROR)
all_param_pass = false;
if (!all_param_pass)
sctp_add_asconf_response(asconf_ack, param.addip->crr_id,
err_code, param.addip);
/* ADDIP 4.3 D11) When an endpoint receiving an ASCONF to add
* an IP address sends an 'Out of Resource' in its response, it
* MUST also fail any subsequent add or delete requests bundled
* in the ASCONF.
*/
if (err_code == SCTP_ERROR_RSRC_LOW)
goto done;
}
done:
asoc->peer.addip_serial++;
/* If we are sending a new ASCONF_ACK hold a reference to it in assoc
* after freeing the reference to old asconf ack if any.
*/
if (asconf_ack) {
sctp_chunk_hold(asconf_ack);
list_add_tail(&asconf_ack->transmitted_list,
&asoc->asconf_ack_list);
}
return asconf_ack;
}
| 0
|
477,288
|
static struct tipc_aead *tipc_aead_get(struct tipc_aead __rcu *aead)
{
struct tipc_aead *tmp;
rcu_read_lock();
tmp = rcu_dereference(aead);
if (unlikely(!tmp || !refcount_inc_not_zero(&tmp->refcnt)))
tmp = NULL;
rcu_read_unlock();
return tmp;
}
| 0
|
244,346
|
GF_Box *sbgp_box_new()
{
ISOM_DECL_BOX_ALLOC(GF_SampleGroupBox, GF_ISOM_BOX_TYPE_SBGP);
return (GF_Box *)tmp;
}
| 0
|
226,253
|
GF_Box *mfra_box_new()
{
ISOM_DECL_BOX_ALLOC(GF_MovieFragmentRandomAccessBox, GF_ISOM_BOX_TYPE_MFRA);
tmp->tfra_list = gf_list_new();
return (GF_Box *)tmp;
}
| 0
|
473,972
|
mbc_case_fold(OnigCaseFoldType flag,
const UChar** pp, const UChar* end, UChar* lower,
OnigEncoding enc)
{
int len;
const UChar* p = *pp;
if (ONIGENC_IS_MBC_ASCII(p)) {
*lower = ONIGENC_ASCII_CODE_TO_LOWER_CASE(*p);
(*pp)++;
return 1;
}
else {
int i;
len = mbc_enc_len(p, end, enc);
for (i = 0; i < len; i++) {
*lower++ = *p++;
}
(*pp) += len;
return len; /* return byte length of converted char to lower */
}
}
| 0
|
226,353
|
GF_UserDataMap *udta_getEntry(GF_UserDataBox *ptr, u32 box_type, bin128 *uuid)
{
u32 i;
GF_UserDataMap *map;
if (ptr == NULL) return NULL;
i=0;
while ((map = (GF_UserDataMap *)gf_list_enum(ptr->recordList, &i))) {
if (map->boxType == box_type) {
if ((box_type != GF_ISOM_BOX_TYPE_UUID) || !uuid) return map;
if (!memcmp(map->uuid, *uuid, 16)) return map;
}
}
return NULL;
| 0
|
398,550
|
static inline ut64 dwarf_read_initial_length(RZ_OUT bool *is_64bit, bool big_endian, const ut8 **buf, const ut8 *buf_end) {
ut64 r = READ32(*buf);
if (r == DWARF_INIT_LEN_64) {
r = READ64(*buf);
*is_64bit = true;
} else {
*is_64bit = false;
}
return r;
}
| 0
|
219,920
|
const char *gf_isom_get_payt_info(GF_ISOFile *the_file, u32 trackNumber, u32 index, u32 *payID)
{
u32 i, count;
GF_TrackBox *trak;
GF_UserDataMap *map;
GF_HintInfoBox *hinf;
GF_PAYTBox *payt;
trak = gf_isom_get_track_from_file(the_file, trackNumber);
if (!trak || !index) return NULL;
if (!CheckHintFormat(trak, GF_ISOM_HINT_RTP)) return NULL;
map = udta_getEntry(trak->udta, GF_ISOM_BOX_TYPE_HINF, NULL);
if (!map) return NULL;
if (gf_list_count(map->boxes) != 1) return NULL;
hinf = (GF_HintInfoBox *)gf_list_get(map->boxes, 0);
count = 0;
i = 0;
while ((payt = (GF_PAYTBox*)gf_list_enum(hinf->child_boxes, &i))) {
if (payt->type == GF_ISOM_BOX_TYPE_PAYT) {
count++;
if (count == index) {
if (payID) *payID=payt->payloadCode;
return payt->payloadString;
}
}
}
return NULL;
}
| 0
|
338,055
|
void WasmBinaryWriter::writeData(const char* data, size_t size) {
for (size_t i = 0; i < size; i++) {
o << int8_t(data[i]);
}
}
| 0
|
333,055
|
nfa_get_reganch(nfa_state_T *start, int depth)
{
nfa_state_T *p = start;
if (depth > 4)
return 0;
while (p != NULL)
{
switch (p->c)
{
case NFA_BOL:
case NFA_BOF:
return 1; // yes!
case NFA_ZSTART:
case NFA_ZEND:
case NFA_CURSOR:
case NFA_VISUAL:
case NFA_MOPEN:
case NFA_MOPEN1:
case NFA_MOPEN2:
case NFA_MOPEN3:
case NFA_MOPEN4:
case NFA_MOPEN5:
case NFA_MOPEN6:
case NFA_MOPEN7:
case NFA_MOPEN8:
case NFA_MOPEN9:
case NFA_NOPEN:
#ifdef FEAT_SYN_HL
case NFA_ZOPEN:
case NFA_ZOPEN1:
case NFA_ZOPEN2:
case NFA_ZOPEN3:
case NFA_ZOPEN4:
case NFA_ZOPEN5:
case NFA_ZOPEN6:
case NFA_ZOPEN7:
case NFA_ZOPEN8:
case NFA_ZOPEN9:
#endif
p = p->out;
break;
case NFA_SPLIT:
return nfa_get_reganch(p->out, depth + 1)
&& nfa_get_reganch(p->out1, depth + 1);
default:
return 0; // noooo
}
}
return 0;
}
| 0
|
459,185
|
int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp,
bool add, flow_setup_cb_t *cb,
enum tc_setup_type type, void *type_data,
void *cb_priv, u32 *flags, unsigned int *in_hw_count)
{
int err = cb(type, type_data, cb_priv);
if (err) {
if (add && tc_skip_sw(*flags))
return err;
} else {
tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, 1,
add);
}
return 0;
}
| 0
|
224,987
|
pqDropConnection(PGconn *conn, bool flushInput)
{
/* Drop any SSL state */
pqsecure_close(conn);
/* Close the socket itself */
if (conn->sock != PGINVALID_SOCKET)
closesocket(conn->sock);
conn->sock = PGINVALID_SOCKET;
/* Optionally discard any unread data */
if (flushInput)
conn->inStart = conn->inCursor = conn->inEnd = 0;
/* Always discard any unsent data */
conn->outCount = 0;
/* Free authentication/encryption state */
#ifdef ENABLE_GSS
{
OM_uint32 min_s;
if (conn->gcred != GSS_C_NO_CREDENTIAL)
{
gss_release_cred(&min_s, &conn->gcred);
conn->gcred = GSS_C_NO_CREDENTIAL;
}
if (conn->gctx)
gss_delete_sec_context(&min_s, &conn->gctx, GSS_C_NO_BUFFER);
if (conn->gtarg_nam)
gss_release_name(&min_s, &conn->gtarg_nam);
if (conn->gss_SendBuffer)
{
free(conn->gss_SendBuffer);
conn->gss_SendBuffer = NULL;
}
if (conn->gss_RecvBuffer)
{
free(conn->gss_RecvBuffer);
conn->gss_RecvBuffer = NULL;
}
if (conn->gss_ResultBuffer)
{
free(conn->gss_ResultBuffer);
conn->gss_ResultBuffer = NULL;
}
conn->gssenc = false;
}
#endif
#ifdef ENABLE_SSPI
if (conn->sspitarget)
{
free(conn->sspitarget);
conn->sspitarget = NULL;
}
if (conn->sspicred)
{
FreeCredentialsHandle(conn->sspicred);
free(conn->sspicred);
conn->sspicred = NULL;
}
if (conn->sspictx)
{
DeleteSecurityContext(conn->sspictx);
free(conn->sspictx);
conn->sspictx = NULL;
}
conn->usesspi = 0;
#endif
if (conn->sasl_state)
{
conn->sasl->free(conn->sasl_state);
conn->sasl_state = NULL;
}
}
| 0
|
369,344
|
static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_async_msghdr iomsg, *kmsg;
struct io_sr_msg *sr = &req->sr_msg;
struct socket *sock;
struct io_buffer *kbuf;
unsigned flags;
int ret, min_ret = 0;
bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
sock = sock_from_file(req->file);
if (unlikely(!sock))
return -ENOTSOCK;
if (req_has_async_data(req)) {
kmsg = req->async_data;
} else {
ret = io_recvmsg_copy_hdr(req, &iomsg);
if (ret)
return ret;
kmsg = &iomsg;
}
if (req->flags & REQ_F_BUFFER_SELECT) {
kbuf = io_recv_buffer_select(req, issue_flags);
if (IS_ERR(kbuf))
return PTR_ERR(kbuf);
kmsg->fast_iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
kmsg->fast_iov[0].iov_len = req->sr_msg.len;
iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->fast_iov,
1, req->sr_msg.len);
}
flags = req->sr_msg.msg_flags;
if (force_nonblock)
flags |= MSG_DONTWAIT;
if (flags & MSG_WAITALL)
min_ret = iov_iter_count(&kmsg->msg.msg_iter);
ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.umsg,
kmsg->uaddr, flags);
if (ret < min_ret) {
if (ret == -EAGAIN && force_nonblock)
return io_setup_async_msg(req, kmsg);
if (ret == -ERESTARTSYS)
ret = -EINTR;
if (ret > 0 && io_net_retry(sock, flags)) {
sr->done_io += ret;
req->flags |= REQ_F_PARTIAL_IO;
return io_setup_async_msg(req, kmsg);
}
req_set_fail(req);
} else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
req_set_fail(req);
}
/* fast path, check for non-NULL to avoid function call */
if (kmsg->free_iov)
kfree(kmsg->free_iov);
req->flags &= ~REQ_F_NEED_CLEANUP;
if (ret >= 0)
ret += sr->done_io;
else if (sr->done_io)
ret = sr->done_io;
__io_req_complete(req, issue_flags, ret, io_put_kbuf(req, issue_flags));
return 0;
}
| 0
|
369,377
|
*/
static int io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_timeout_rem *tr = &req->timeout_rem;
struct io_ring_ctx *ctx = req->ctx;
int ret;
if (!(req->timeout_rem.flags & IORING_TIMEOUT_UPDATE)) {
spin_lock(&ctx->completion_lock);
spin_lock_irq(&ctx->timeout_lock);
ret = io_timeout_cancel(ctx, tr->addr);
spin_unlock_irq(&ctx->timeout_lock);
spin_unlock(&ctx->completion_lock);
} else {
enum hrtimer_mode mode = io_translate_timeout_mode(tr->flags);
spin_lock_irq(&ctx->timeout_lock);
if (tr->ltimeout)
ret = io_linked_timeout_update(ctx, tr->addr, &tr->ts, mode);
else
ret = io_timeout_update(ctx, tr->addr, &tr->ts, mode);
spin_unlock_irq(&ctx->timeout_lock);
}
if (ret < 0)
req_set_fail(req);
io_req_complete_post(req, ret, 0);
return 0;
| 0
|
393,510
|
static SQInteger thread_wakeup(HSQUIRRELVM v)
{
SQObjectPtr o = stack_get(v,1);
if(sq_type(o) == OT_THREAD) {
SQVM *thread = _thread(o);
SQInteger state = sq_getvmstate(thread);
if(state != SQ_VMSTATE_SUSPENDED) {
switch(state) {
case SQ_VMSTATE_IDLE:
return sq_throwerror(v,_SC("cannot wakeup a idle thread"));
break;
case SQ_VMSTATE_RUNNING:
return sq_throwerror(v,_SC("cannot wakeup a running thread"));
break;
}
}
SQInteger wakeupret = sq_gettop(v)>1?SQTrue:SQFalse;
if(wakeupret) {
sq_move(thread,v,2);
}
if(SQ_SUCCEEDED(sq_wakeupvm(thread,wakeupret,SQTrue,SQTrue,SQFalse))) {
sq_move(v,thread,-1);
sq_pop(thread,1); //pop retval
if(sq_getvmstate(thread) == SQ_VMSTATE_IDLE) {
sq_settop(thread,1); //pop roottable
}
return 1;
}
sq_settop(thread,1);
v->_lasterror = thread->_lasterror;
return SQ_ERROR;
}
return sq_throwerror(v,_SC("wrong parameter"));
}
| 0
|
359,369
|
DEFUN (no_neighbor_attr_unchanged2,
no_neighbor_attr_unchanged2_cmd,
NO_NEIGHBOR_CMD2 "attribute-unchanged as-path (next-hop|med)",
NO_STR
NEIGHBOR_STR
NEIGHBOR_ADDR_STR2
"BGP attribute is propagated unchanged to this neighbor\n"
"As-path attribute\n"
"Nexthop attribute\n"
"Med attribute\n")
{
u_int16_t flags = PEER_FLAG_AS_PATH_UNCHANGED;
if (strncmp (argv[1], "next-hop", 1) == 0)
SET_FLAG (flags, PEER_FLAG_NEXTHOP_UNCHANGED);
else if (strncmp (argv[1], "med", 1) == 0)
SET_FLAG (flags, PEER_FLAG_MED_UNCHANGED);
return peer_af_flag_unset_vty (vty, argv[0], bgp_node_afi (vty),
bgp_node_safi (vty), flags);
}
| 0
|
274,891
|
TEST(QuantizedComparisonsTest, EqualInt8Quantized) {
const float kMin = -127.f;
const float kMax = 127.f;
ComparisonOpModel model({TensorType_INT8, {1, 2, 2, 1}, kMin, kMax},
{TensorType_INT8, {1, 2, 2, 1}, kMin, kMax},
TensorType_INT8, BuiltinOperator_EQUAL);
model.QuantizeAndPopulate<int8_t>(model.input1(), {1, -9, 7, 3});
model.QuantizeAndPopulate<int8_t>(model.input2(), {-1, 2, 7, 5});
model.Invoke();
EXPECT_THAT(model.GetOutput(), ElementsAre(false, false, true, false));
}
| 0
|
273,883
|
static void handle_QUIT(ctrl_t *ctrl, char *arg)
{
send_msg(ctrl->sd, "221 Goodbye.\r\n");
uev_exit(ctrl->ctx);
}
| 0
|
225,965
|
GF_Err dmlp_box_read(GF_Box *s, GF_BitStream *bs)
{
GF_TrueHDConfigBox *ptr = (GF_TrueHDConfigBox *)s;
ISOM_DECREASE_SIZE(ptr, 10)
ptr->format_info = gf_bs_read_u32(bs);
ptr->peak_data_rate = gf_bs_read_int(bs, 15);
gf_bs_read_int(bs, 1);
gf_bs_read_u32(bs);
return GF_OK;
| 0
|
427,211
|
static void setvararg (FuncState *fs, int nparams) {
fs->f->is_vararg = 1;
luaK_codeABC(fs, OP_VARARGPREP, nparams, 0, 0);
}
| 0
|
359,663
|
DEFUN (bgp_router_id,
bgp_router_id_cmd,
"bgp router-id A.B.C.D",
BGP_STR
"Override configured router identifier\n"
"Manually configured router identifier\n")
{
int ret;
struct in_addr id;
struct bgp *bgp;
bgp = vty->index;
ret = inet_aton (argv[0], &id);
if (! ret)
{
vty_out (vty, "%% Malformed bgp router identifier%s", VTY_NEWLINE);
return CMD_WARNING;
}
bgp->router_id_static = id;
bgp_router_id_set (bgp, &id);
return CMD_SUCCESS;
}
| 0
|
509,570
|
bool maria_flush_logs(handlerton *hton)
{
return MY_TEST(translog_purge_at_flush());
}
| 0
|
257,457
|
TEST_F(AutoParallelTest, SimpleParallel) {
tensorflow::Scope s = tensorflow::Scope::DisabledShapeInferenceScope();
Output constant_a = ops::Const(s.WithOpName("constant_a"), 1.0f, {1});
Output constant_b = ops::Const(s.WithOpName("constant_b"), 1, {1});
Output var = ops::Variable(s.WithOpName("var"), {1}, DT_FLOAT);
Output assign = ops::Assign(s.WithOpName("assign"), {var}, {constant_a});
Output identity = ops::Identity(s.WithOpName("identity"), {var});
Output fifo_queue = ops::FIFOQueue(s.WithOpName("fifo_queue"), {DT_FLOAT});
auto dequeue = ops::QueueDequeueMany(s.WithOpName("dequeue"), {fifo_queue},
{constant_b}, {DT_FLOAT});
Output add = ops::AddN(s.WithOpName("add"), {constant_a, dequeue[0]});
Output learning_rate = ops::Const(s.WithOpName("learning_rate"), 0.01f, {1});
Output apply_gradient = ops::ApplyGradientDescent(
s.WithOpName("apply_gradient"), {var}, {learning_rate}, {add});
GrapplerItem item;
item.init_ops.push_back("assign");
item.fetch.push_back("apply_gradient");
item.init_ops.push_back("assign");
TF_CHECK_OK(s.ToGraphDef(&item.graph));
AutoParallel parallel(2);
GraphDef output;
Status status = parallel.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
EXPECT_EQ(21, output.node_size());
const NodeDef& node_assign = output.node(0);
EXPECT_EQ("assign", node_assign.name());
EXPECT_EQ("AutoParallel-Replica-0/constant_a", node_assign.input(1));
const NodeDef& node_constant_b = output.node(1);
EXPECT_EQ("constant_b", node_constant_b.name());
const NodeDef& node_fifo_queue = output.node(2);
EXPECT_EQ("fifo_queue", node_fifo_queue.name());
const NodeDef& node_identity = output.node(3);
EXPECT_EQ("identity", node_identity.name());
EXPECT_EQ("var", node_identity.input(0));
const NodeDef& node_var = output.node(4);
EXPECT_EQ("var", node_var.name());
const NodeDef& node_div_const0 = output.node(5);
EXPECT_EQ("AutoParallel-Replica-0/AutoParallel-Div-Const",
node_div_const0.name());
const NodeDef& node_div0 = output.node(6);
EXPECT_EQ("AutoParallel-Replica-0/AutoParallel-Div-apply_gradient",
node_div0.name());
const NodeDef& node_add0 = output.node(7);
EXPECT_EQ("AutoParallel-Replica-0/add", node_add0.name());
const NodeDef& node_gradient0 = output.node(8);
EXPECT_EQ("AutoParallel-Replica-0/apply_gradient", node_gradient0.name());
const NodeDef& node_constant_a0 = output.node(9);
EXPECT_EQ("AutoParallel-Replica-0/constant_a", node_constant_a0.name());
const NodeDef& node_dequeue0 = output.node(10);
EXPECT_EQ("AutoParallel-Replica-0/dequeue", node_dequeue0.name());
const NodeDef& node_learning_rate0 = output.node(11);
EXPECT_EQ("AutoParallel-Replica-0/learning_rate", node_learning_rate0.name());
const NodeDef& node_div_const1 = output.node(12);
EXPECT_EQ("AutoParallel-Replica-1/AutoParallel-Div-Const",
node_div_const1.name());
const NodeDef& node_div1 = output.node(13);
EXPECT_EQ("AutoParallel-Replica-1/AutoParallel-Div-apply_gradient",
node_div1.name());
const NodeDef& node_add1 = output.node(14);
EXPECT_EQ("AutoParallel-Replica-1/add", node_add1.name());
const NodeDef& node_gradient1 = output.node(15);
EXPECT_EQ("AutoParallel-Replica-1/apply_gradient", node_gradient1.name());
const NodeDef& node_constant_a1 = output.node(16);
EXPECT_EQ("AutoParallel-Replica-1/constant_a", node_constant_a1.name());
const NodeDef& node_dequeue1 = output.node(17);
EXPECT_EQ("AutoParallel-Replica-1/dequeue", node_dequeue1.name());
const NodeDef& node_learning_rate1 = output.node(18);
EXPECT_EQ("AutoParallel-Replica-1/learning_rate", node_learning_rate1.name());
const NodeDef& node_fetch = output.node(19);
EXPECT_EQ("AutoParallel-Control-Fetch", node_fetch.name());
EXPECT_EQ("^AutoParallel-Replica-0/apply_gradient", node_fetch.input(0));
EXPECT_EQ("^AutoParallel-Replica-1/apply_gradient", node_fetch.input(1));
const NodeDef& node_gradient = output.node(20);
EXPECT_EQ("apply_gradient", node_gradient.name());
EXPECT_EQ("^AutoParallel-Control-Fetch", node_gradient.input(0));
}
| 0
|
472,373
|
bool ciEnv::jvmti_state_changed() const {
// Some classes were redefined
if (_jvmti_redefinition_count != JvmtiExport::redefinition_count()) {
return true;
}
if (!_jvmti_can_access_local_variables &&
JvmtiExport::can_access_local_variables()) {
return true;
}
if (!_jvmti_can_hotswap_or_post_breakpoint &&
JvmtiExport::can_hotswap_or_post_breakpoint()) {
return true;
}
if (!_jvmti_can_post_on_exceptions &&
JvmtiExport::can_post_on_exceptions()) {
return true;
}
if (!_jvmti_can_pop_frame &&
JvmtiExport::can_pop_frame()) {
return true;
}
if (!_jvmti_can_get_owned_monitor_info &&
JvmtiExport::can_get_owned_monitor_info()) {
return true;
}
if (!_jvmti_can_walk_any_space &&
JvmtiExport::can_walk_any_space()) {
return true;
}
return false;
}
| 0
|
198,146
|
void Compute(OpKernelContext* const context) override {
// node_id_range
const Tensor* node_id_range_t;
OP_REQUIRES_OK(context, context->input("node_id_range", &node_id_range_t));
const auto node_id_range = node_id_range_t->vec<int32>();
const int32_t node_id_first = node_id_range(0); // inclusive
const int32_t node_id_last = node_id_range(1); // exclusive
const Tensor* stats_summary_t;
OP_REQUIRES_OK(context, context->input("stats_summary", &stats_summary_t));
TTypes<float, 4>::ConstTensor stats_summary =
stats_summary_t->tensor<float, 4>();
const int32_t feature_dims = stats_summary_t->dim_size(1);
// The last bucket is for default/missing value.
const int32_t num_buckets = stats_summary_t->dim_size(2) - 1;
const int32_t logits_dim = logits_dim_;
const int32_t hessian_dim = stats_summary_t->dim_size(3) - logits_dim;
DCHECK_GT(hessian_dim, 0);
DCHECK_LE(hessian_dim, logits_dim * logits_dim);
const Tensor* l1_t;
OP_REQUIRES_OK(context, context->input("l1", &l1_t));
const auto l1 = l1_t->scalar<float>()();
DCHECK_GE(l1, 0);
if (logits_dim_ > 1) {
// Multi-class L1 regularization not supported yet.
DCHECK_EQ(l1, 0);
}
const Tensor* l2_t;
OP_REQUIRES_OK(context, context->input("l2", &l2_t));
const auto l2 = l2_t->scalar<float>()();
DCHECK_GE(l2, 0);
const Tensor* tree_complexity_t;
OP_REQUIRES_OK(context,
context->input("tree_complexity", &tree_complexity_t));
const auto tree_complexity = tree_complexity_t->scalar<float>()();
const Tensor* min_node_weight_t;
OP_REQUIRES_OK(context,
context->input("min_node_weight", &min_node_weight_t));
const auto min_node_weight = min_node_weight_t->scalar<float>()();
std::vector<int32> output_node_ids;
std::vector<float> output_gains;
std::vector<int32> output_feature_dimensions;
std::vector<int32> output_thresholds;
std::vector<Eigen::VectorXf> output_left_node_contribs;
std::vector<Eigen::VectorXf> output_right_node_contribs;
std::vector<string> output_split_types;
// TODO(tanzheny) parallelize the computation.
// Iterate each node and find the best gain per node.
for (int32_t node_id = node_id_first; node_id < node_id_last; ++node_id) {
float best_gain = std::numeric_limits<float>::lowest();
int32_t best_bucket = 0;
int32_t best_f_dim = 0;
string best_split_type;
Eigen::VectorXf best_contrib_for_left(logits_dim);
Eigen::VectorXf best_contrib_for_right(logits_dim);
float parent_gain;
// Including default bucket.
ConstMatrixMap stats_mat(&stats_summary(node_id, 0, 0, 0),
num_buckets + 1, logits_dim + hessian_dim);
const Eigen::VectorXf total_grad =
stats_mat.leftCols(logits_dim).colwise().sum();
const Eigen::VectorXf total_hess =
stats_mat.rightCols(hessian_dim).colwise().sum();
if (total_hess.norm() < min_node_weight) {
continue;
}
Eigen::VectorXf parent_weight(logits_dim);
CalculateWeightsAndGains(total_grad, total_hess, l1, l2, &parent_weight,
&parent_gain);
if (split_type_ == "inequality") {
CalculateBestInequalitySplit(
stats_summary, node_id, feature_dims, logits_dim, hessian_dim,
num_buckets, min_node_weight, l1, l2, &best_gain, &best_bucket,
&best_f_dim, &best_split_type, &best_contrib_for_left,
&best_contrib_for_right);
} else {
CalculateBestEqualitySplit(
stats_summary, total_grad, total_hess, node_id, feature_dims,
logits_dim, hessian_dim, num_buckets, l1, l2, &best_gain,
&best_bucket, &best_f_dim, &best_split_type, &best_contrib_for_left,
&best_contrib_for_right);
}
if (best_gain == std::numeric_limits<float>::lowest()) {
// Do not add the node if not split if found.
continue;
}
output_node_ids.push_back(node_id);
// Remove the parent gain for the parent node.
output_gains.push_back(best_gain - parent_gain);
output_feature_dimensions.push_back(best_f_dim);
// default direction is fixed for dense splits.
// TODO(tanzheny) account for default values.
output_split_types.push_back(best_split_type);
output_thresholds.push_back(best_bucket);
output_left_node_contribs.push_back(best_contrib_for_left);
output_right_node_contribs.push_back(best_contrib_for_right);
} // for node id
const int num_nodes = output_node_ids.size();
// output_node_ids
Tensor* output_node_ids_t = nullptr;
OP_REQUIRES_OK(context, context->allocate_output("node_ids", {num_nodes},
&output_node_ids_t));
auto output_node_ids_vec = output_node_ids_t->vec<int32>();
// output_gains
Tensor* output_gains_t;
OP_REQUIRES_OK(context, context->allocate_output("gains", {num_nodes},
&output_gains_t));
auto output_gains_vec = output_gains_t->vec<float>();
// output_feature_dimensions
Tensor* output_feature_dimension_t;
OP_REQUIRES_OK(context,
context->allocate_output("feature_dimensions", {num_nodes},
&output_feature_dimension_t));
auto output_feature_dimensions_vec =
output_feature_dimension_t->vec<int32>();
// output_thresholds
Tensor* output_thresholds_t;
OP_REQUIRES_OK(context, context->allocate_output("thresholds", {num_nodes},
&output_thresholds_t));
auto output_thresholds_vec = output_thresholds_t->vec<int32>();
// output_left_node_contribs
Tensor* output_left_node_contribs_t;
OP_REQUIRES_OK(context, context->allocate_output(
"left_node_contribs", {num_nodes, logits_dim},
&output_left_node_contribs_t));
auto output_left_node_contribs_matrix =
output_left_node_contribs_t->matrix<float>();
// output_right_node_contribs
Tensor* output_right_node_contribs_t;
OP_REQUIRES_OK(context, context->allocate_output(
"right_node_contribs", {num_nodes, logits_dim},
&output_right_node_contribs_t));
auto output_right_node_contribs_matrix =
output_right_node_contribs_t->matrix<float>();
// split type
Tensor* output_split_types_t;
OP_REQUIRES_OK(
context, context->allocate_output("split_with_default_directions",
{num_nodes}, &output_split_types_t));
auto output_split_types_vec = output_split_types_t->vec<tstring>();
// Sets output tensors from vectors.
for (int i = 0; i < num_nodes; ++i) {
output_node_ids_vec(i) = output_node_ids[i];
// Adjust the gains to penalize by tree complexity.
output_gains_vec(i) = output_gains[i] - tree_complexity;
output_feature_dimensions_vec(i) = output_feature_dimensions[i];
output_thresholds_vec(i) = output_thresholds[i];
for (int j = 0; j < logits_dim; ++j) {
output_left_node_contribs_matrix(i, j) =
output_left_node_contribs[i][j];
output_right_node_contribs_matrix(i, j) =
output_right_node_contribs[i][j];
}
output_split_types_vec(i) = output_split_types[i];
}
}
| 1
|
439,503
|
static struct inode *read_inode(unsigned int start_block, unsigned int offset)
{
static union squashfs_inode_header_3 header;
long long start = sBlk.s.inode_table_start + start_block;
int bytes = lookup_entry(inode_table_hash, start);
char *block_ptr = inode_table + bytes + offset;
static struct inode i;
TRACE("read_inode: reading inode [%d:%d]\n", start_block, offset);
if(bytes == -1)
EXIT_UNSQUASH("read_inode: inode table block %lld not found\n",
start);
if(swap) {
squashfs_base_inode_header_3 sinode;
memcpy(&sinode, block_ptr, sizeof(header.base));
SQUASHFS_SWAP_BASE_INODE_HEADER_3(&header.base, &sinode,
sizeof(squashfs_base_inode_header_3));
} else
memcpy(&header.base, block_ptr, sizeof(header.base));
i.xattr = SQUASHFS_INVALID_XATTR;
i.uid = (uid_t) uid_table[header.base.uid];
i.gid = header.base.guid == SQUASHFS_GUIDS ? i.uid :
(uid_t) guid_table[header.base.guid];
i.mode = lookup_type[header.base.inode_type] | header.base.mode;
i.type = header.base.inode_type;
i.time = header.base.mtime;
i.inode_number = header.base.inode_number;
switch(header.base.inode_type) {
case SQUASHFS_DIR_TYPE: {
squashfs_dir_inode_header_3 *inode = &header.dir;
if(swap) {
squashfs_dir_inode_header_3 sinode;
memcpy(&sinode, block_ptr, sizeof(header.dir));
SQUASHFS_SWAP_DIR_INODE_HEADER_3(&header.dir,
&sinode);
} else
memcpy(&header.dir, block_ptr,
sizeof(header.dir));
i.data = inode->file_size;
i.offset = inode->offset;
i.start = inode->start_block;
break;
}
case SQUASHFS_LDIR_TYPE: {
squashfs_ldir_inode_header_3 *inode = &header.ldir;
if(swap) {
squashfs_ldir_inode_header_3 sinode;
memcpy(&sinode, block_ptr, sizeof(header.ldir));
SQUASHFS_SWAP_LDIR_INODE_HEADER_3(&header.ldir,
&sinode);
} else
memcpy(&header.ldir, block_ptr,
sizeof(header.ldir));
i.data = inode->file_size;
i.offset = inode->offset;
i.start = inode->start_block;
break;
}
case SQUASHFS_FILE_TYPE: {
squashfs_reg_inode_header_3 *inode = &header.reg;
if(swap) {
squashfs_reg_inode_header_3 sinode;
memcpy(&sinode, block_ptr, sizeof(sinode));
SQUASHFS_SWAP_REG_INODE_HEADER_3(inode,
&sinode);
} else
memcpy(inode, block_ptr, sizeof(*inode));
i.data = inode->file_size;
i.frag_bytes = inode->fragment == SQUASHFS_INVALID_FRAG
? 0 : inode->file_size % sBlk.s.block_size;
i.fragment = inode->fragment;
i.offset = inode->offset;
i.blocks = inode->fragment == SQUASHFS_INVALID_FRAG ?
(i.data + sBlk.s.block_size - 1) >>
sBlk.s.block_log :
i.data >> sBlk.s.block_log;
i.start = inode->start_block;
i.sparse = 1;
i.block_ptr = block_ptr + sizeof(*inode);
break;
}
case SQUASHFS_LREG_TYPE: {
squashfs_lreg_inode_header_3 *inode = &header.lreg;
if(swap) {
squashfs_lreg_inode_header_3 sinode;
memcpy(&sinode, block_ptr, sizeof(sinode));
SQUASHFS_SWAP_LREG_INODE_HEADER_3(inode,
&sinode);
} else
memcpy(inode, block_ptr, sizeof(*inode));
i.data = inode->file_size;
i.frag_bytes = inode->fragment == SQUASHFS_INVALID_FRAG
? 0 : inode->file_size % sBlk.s.block_size;
i.fragment = inode->fragment;
i.offset = inode->offset;
i.blocks = inode->fragment == SQUASHFS_INVALID_FRAG ?
(inode->file_size + sBlk.s.block_size - 1) >>
sBlk.s.block_log :
inode->file_size >> sBlk.s.block_log;
i.start = inode->start_block;
i.sparse = 1;
i.block_ptr = block_ptr + sizeof(*inode);
break;
}
case SQUASHFS_SYMLINK_TYPE: {
squashfs_symlink_inode_header_3 *inodep =
&header.symlink;
if(swap) {
squashfs_symlink_inode_header_3 sinodep;
memcpy(&sinodep, block_ptr, sizeof(sinodep));
SQUASHFS_SWAP_SYMLINK_INODE_HEADER_3(inodep,
&sinodep);
} else
memcpy(inodep, block_ptr, sizeof(*inodep));
i.symlink = malloc(inodep->symlink_size + 1);
if(i.symlink == NULL)
EXIT_UNSQUASH("read_inode: failed to malloc "
"symlink data\n");
strncpy(i.symlink, block_ptr +
sizeof(squashfs_symlink_inode_header_3),
inodep->symlink_size);
i.symlink[inodep->symlink_size] = '\0';
i.data = inodep->symlink_size;
break;
}
case SQUASHFS_BLKDEV_TYPE:
case SQUASHFS_CHRDEV_TYPE: {
squashfs_dev_inode_header_3 *inodep = &header.dev;
if(swap) {
squashfs_dev_inode_header_3 sinodep;
memcpy(&sinodep, block_ptr, sizeof(sinodep));
SQUASHFS_SWAP_DEV_INODE_HEADER_3(inodep,
&sinodep);
} else
memcpy(inodep, block_ptr, sizeof(*inodep));
i.data = inodep->rdev;
break;
}
case SQUASHFS_FIFO_TYPE:
case SQUASHFS_SOCKET_TYPE:
i.data = 0;
break;
default:
EXIT_UNSQUASH("Unknown inode type %d in read_inode!\n",
header.base.inode_type);
}
return &i;
}
| 0
|
230,286
|
njs_array_handler_map(njs_vm_t *vm, njs_iterator_args_t *args,
njs_value_t *entry, int64_t n)
{
njs_int_t ret;
njs_array_t *retval;
njs_value_t this;
retval = args->data;
if (retval->object.fast_array) {
njs_set_invalid(&retval->start[n]);
}
if (njs_is_valid(entry)) {
ret = njs_array_iterator_call(vm, args, entry, n);
if (njs_slow_path(ret != NJS_OK)) {
return ret;
}
if (njs_is_valid(&vm->retval)) {
if (retval->object.fast_array) {
retval->start[n] = vm->retval;
} else {
njs_set_array(&this, retval);
ret = njs_value_property_i64_set(vm, &this, n, &vm->retval);
if (njs_slow_path(ret != NJS_OK)) {
return ret;
}
}
}
}
return NJS_OK;
}
| 0
|
294,370
|
k_numeric_p(VALUE x)
{
return f_kind_of_p(x, rb_cNumeric);
}
| 0
|
508,817
|
void st_select_lex_node::fast_exclude()
{
if (link_prev)
{
if ((*link_prev= link_next))
link_next->link_prev= link_prev;
}
// Remove slave structure
for (; slave; slave= slave->next)
slave->fast_exclude();
}
| 0
|
243,995
|
GF_Box *prhd_box_new()
{
ISOM_DECL_BOX_ALLOC(GF_ProjectionHeaderBox, GF_ISOM_BOX_TYPE_PRHD);
return (GF_Box *)tmp;
}
| 0
|
369,387
|
static __cold void io_uring_drop_tctx_refs(struct task_struct *task)
{
struct io_uring_task *tctx = task->io_uring;
unsigned int refs = tctx->cached_refs;
if (refs) {
tctx->cached_refs = 0;
percpu_counter_sub(&tctx->inflight, refs);
put_task_struct_many(task, refs);
}
}
| 0
|
278,281
|
copy_indent(int size, char_u *src)
{
char_u *p = NULL;
char_u *line = NULL;
char_u *s;
int todo;
int ind_len;
int line_len = 0;
int tab_pad;
int ind_done;
int round;
#ifdef FEAT_VARTABS
int ind_col;
#endif
// Round 1: compute the number of characters needed for the indent
// Round 2: copy the characters.
for (round = 1; round <= 2; ++round)
{
todo = size;
ind_len = 0;
ind_done = 0;
#ifdef FEAT_VARTABS
ind_col = 0;
#endif
s = src;
// Count/copy the usable portion of the source line
while (todo > 0 && VIM_ISWHITE(*s))
{
if (*s == TAB)
{
#ifdef FEAT_VARTABS
tab_pad = tabstop_padding(ind_done, curbuf->b_p_ts,
curbuf->b_p_vts_array);
#else
tab_pad = (int)curbuf->b_p_ts
- (ind_done % (int)curbuf->b_p_ts);
#endif
// Stop if this tab will overshoot the target
if (todo < tab_pad)
break;
todo -= tab_pad;
ind_done += tab_pad;
#ifdef FEAT_VARTABS
ind_col += tab_pad;
#endif
}
else
{
--todo;
++ind_done;
#ifdef FEAT_VARTABS
++ind_col;
#endif
}
++ind_len;
if (p != NULL)
*p++ = *s;
++s;
}
// Fill to next tabstop with a tab, if possible
#ifdef FEAT_VARTABS
tab_pad = tabstop_padding(ind_done, curbuf->b_p_ts,
curbuf->b_p_vts_array);
#else
tab_pad = (int)curbuf->b_p_ts - (ind_done % (int)curbuf->b_p_ts);
#endif
if (todo >= tab_pad && !curbuf->b_p_et)
{
todo -= tab_pad;
++ind_len;
#ifdef FEAT_VARTABS
ind_col += tab_pad;
#endif
if (p != NULL)
*p++ = TAB;
}
// Add tabs required for indent
if (!curbuf->b_p_et)
{
#ifdef FEAT_VARTABS
for (;;)
{
tab_pad = tabstop_padding(ind_col, curbuf->b_p_ts,
curbuf->b_p_vts_array);
if (todo < tab_pad)
break;
todo -= tab_pad;
++ind_len;
ind_col += tab_pad;
if (p != NULL)
*p++ = TAB;
}
#else
while (todo >= (int)curbuf->b_p_ts)
{
todo -= (int)curbuf->b_p_ts;
++ind_len;
if (p != NULL)
*p++ = TAB;
}
#endif
}
// Count/add spaces required for indent
while (todo > 0)
{
--todo;
++ind_len;
if (p != NULL)
*p++ = ' ';
}
if (p == NULL)
{
// Allocate memory for the result: the copied indent, new indent
// and the rest of the line.
line_len = (int)STRLEN(ml_get_curline()) + 1;
line = alloc(ind_len + line_len);
if (line == NULL)
return FALSE;
p = line;
}
}
// Append the original line
mch_memmove(p, ml_get_curline(), (size_t)line_len);
// Replace the line
ml_replace(curwin->w_cursor.lnum, line, FALSE);
// Put the cursor after the indent.
curwin->w_cursor.col = ind_len;
return TRUE;
}
| 0
|
238,796
|
fuzzy_match(
char_u *str,
char_u *pat_arg,
int matchseq,
int *outScore,
int_u *matches,
int maxMatches)
{
int recursionCount = 0;
int len = MB_CHARLEN(str);
char_u *save_pat;
char_u *pat;
char_u *p;
int complete = FALSE;
int score = 0;
int numMatches = 0;
int matchCount;
*outScore = 0;
save_pat = vim_strsave(pat_arg);
if (save_pat == NULL)
return FALSE;
pat = save_pat;
p = pat;
// Try matching each word in 'pat_arg' in 'str'
while (TRUE)
{
if (matchseq)
complete = TRUE;
else
{
// Extract one word from the pattern (separated by space)
p = skipwhite(p);
if (*p == NUL)
break;
pat = p;
while (*p != NUL && !VIM_ISWHITE(PTR2CHAR(p)))
{
if (has_mbyte)
MB_PTR_ADV(p);
else
++p;
}
if (*p == NUL) // processed all the words
complete = TRUE;
*p = NUL;
}
score = 0;
recursionCount = 0;
matchCount = fuzzy_match_recursive(pat, str, 0, &score, str, len, NULL,
matches + numMatches, maxMatches - numMatches,
0, &recursionCount);
if (matchCount == 0)
{
numMatches = 0;
break;
}
// Accumulate the match score and the number of matches
*outScore += score;
numMatches += matchCount;
if (complete)
break;
// try matching the next word
++p;
}
vim_free(save_pat);
return numMatches != 0;
}
| 0
|
364,737
|
emacs_tags_file_eof(findtags_state_T *st)
{
if (!incstack_idx) // reached end of file. stop processing.
return FALSE;
// reached the end of an included tags file. pop it.
--incstack_idx;
fclose(st->fp); // end of this file ...
st->fp = incstack[incstack_idx].fp;
STRCPY(st->tag_fname, incstack[incstack_idx].etag_fname);
vim_free(incstack[incstack_idx].etag_fname);
st->is_etag = TRUE; // (only etags can include)
return TRUE;
}
| 0
|
219,025
|
float QuantizedTypeMinAsFloat(DataType data_type) {
switch (data_type) {
case DT_QINT8:
return Eigen::NumTraits<qint8>::lowest();
case DT_QUINT8:
return Eigen::NumTraits<quint8>::lowest();
case DT_QINT16:
return Eigen::NumTraits<qint16>::lowest();
case DT_QUINT16:
return Eigen::NumTraits<quint16>::lowest();
case DT_QINT32:
return Eigen::NumTraits<qint32>::lowest();
default:
return 0.0f;
}
}
| 0
|
263,306
|
char *_q_fgets(char *str, size_t size, FILE *fp)
{
int c;
char *ptr;
for (ptr = str; size > 1; size--) {
c = fgetc(fp);
if (c == EOF) break;
*ptr++ = (char)c;
if (c == '\n') break;
}
*ptr = '\0';
if (strlen(str) == 0) return NULL;
return str;
}
| 0
|
244,023
|
GF_Box *trep_box_new()
{
ISOM_DECL_BOX_ALLOC(GF_TrackExtensionPropertiesBox, GF_ISOM_BOX_TYPE_TREP);
tmp->child_boxes = gf_list_new();
return (GF_Box *)tmp;
}
| 0
|
345,132
|
dump_whole_state(struct pxa3xx_gcu_priv *priv)
{
struct pxa3xx_gcu_shared *sh = priv->shared;
u32 base = gc_readl(priv, REG_GCRBBR);
QDUMP("DUMP");
printk(KERN_DEBUG "== PXA3XX-GCU DUMP ==\n"
"%s, STATUS 0x%02lx, B 0x%08lx [%ld], E %5ld, H %5ld, T %5ld\n",
sh->hw_running ? "running" : "idle ",
gc_readl(priv, REG_GCISCR),
gc_readl(priv, REG_GCRBBR),
gc_readl(priv, REG_GCRBLR),
(gc_readl(priv, REG_GCRBEXHR) - base) / 4,
(gc_readl(priv, REG_GCRBHR) - base) / 4,
(gc_readl(priv, REG_GCRBTR) - base) / 4);
}
| 0
|
432,352
|
vhost_user_get_inflight_fd(struct virtio_net **pdev,
struct vhu_msg_context *ctx,
int main_fd __rte_unused)
{
struct rte_vhost_inflight_info_packed *inflight_packed;
uint64_t pervq_inflight_size, mmap_size;
uint16_t num_queues, queue_size;
struct virtio_net *dev = *pdev;
int fd, i, j;
int numa_node = SOCKET_ID_ANY;
void *addr;
if (validate_msg_fds(dev, ctx, 0) != 0)
return RTE_VHOST_MSG_RESULT_ERR;
if (ctx->msg.size != sizeof(ctx->msg.payload.inflight)) {
VHOST_LOG_CONFIG(ERR, "(%s) invalid get_inflight_fd message size is %d\n",
dev->ifname, ctx->msg.size);
return RTE_VHOST_MSG_RESULT_ERR;
}
/*
* If VQ 0 has already been allocated, try to allocate on the same
* NUMA node. It can be reallocated later in numa_realloc().
*/
if (dev->nr_vring > 0)
numa_node = dev->virtqueue[0]->numa_node;
if (dev->inflight_info == NULL) {
dev->inflight_info = rte_zmalloc_socket("inflight_info",
sizeof(struct inflight_mem_info), 0, numa_node);
if (!dev->inflight_info) {
VHOST_LOG_CONFIG(ERR, "(%s) failed to alloc dev inflight area\n",
dev->ifname);
return RTE_VHOST_MSG_RESULT_ERR;
}
dev->inflight_info->fd = -1;
}
num_queues = ctx->msg.payload.inflight.num_queues;
queue_size = ctx->msg.payload.inflight.queue_size;
VHOST_LOG_CONFIG(INFO, "(%s) get_inflight_fd num_queues: %u\n",
dev->ifname, ctx->msg.payload.inflight.num_queues);
VHOST_LOG_CONFIG(INFO, "(%s) get_inflight_fd queue_size: %u\n",
dev->ifname, ctx->msg.payload.inflight.queue_size);
if (vq_is_packed(dev))
pervq_inflight_size = get_pervq_shm_size_packed(queue_size);
else
pervq_inflight_size = get_pervq_shm_size_split(queue_size);
mmap_size = num_queues * pervq_inflight_size;
addr = inflight_mem_alloc(dev, "vhost-inflight", mmap_size, &fd);
if (!addr) {
VHOST_LOG_CONFIG(ERR, "(%s) failed to alloc vhost inflight area\n", dev->ifname);
ctx->msg.payload.inflight.mmap_size = 0;
return RTE_VHOST_MSG_RESULT_ERR;
}
memset(addr, 0, mmap_size);
if (dev->inflight_info->addr) {
munmap(dev->inflight_info->addr, dev->inflight_info->size);
dev->inflight_info->addr = NULL;
}
if (dev->inflight_info->fd >= 0) {
close(dev->inflight_info->fd);
dev->inflight_info->fd = -1;
}
dev->inflight_info->addr = addr;
dev->inflight_info->size = ctx->msg.payload.inflight.mmap_size = mmap_size;
dev->inflight_info->fd = ctx->fds[0] = fd;
ctx->msg.payload.inflight.mmap_offset = 0;
ctx->fd_num = 1;
if (vq_is_packed(dev)) {
for (i = 0; i < num_queues; i++) {
inflight_packed =
(struct rte_vhost_inflight_info_packed *)addr;
inflight_packed->used_wrap_counter = 1;
inflight_packed->old_used_wrap_counter = 1;
for (j = 0; j < queue_size; j++)
inflight_packed->desc[j].next = j + 1;
addr = (void *)((char *)addr + pervq_inflight_size);
}
}
VHOST_LOG_CONFIG(INFO, "(%s) send inflight mmap_size: %"PRIu64"\n",
dev->ifname, ctx->msg.payload.inflight.mmap_size);
VHOST_LOG_CONFIG(INFO, "(%s) send inflight mmap_offset: %"PRIu64"\n",
dev->ifname, ctx->msg.payload.inflight.mmap_offset);
VHOST_LOG_CONFIG(INFO, "(%s) send inflight fd: %d\n", dev->ifname, ctx->fds[0]);
return RTE_VHOST_MSG_RESULT_REPLY;
}
| 0
|
361,307
|
stl_add_facet(stl_file *stl, stl_facet *new_facet) {
if (stl->error) return;
stl->stats.facets_added += 1;
if(stl->stats.facets_malloced < stl->stats.number_of_facets + 1) {
stl->facet_start = (stl_facet*)realloc(stl->facet_start,
(sizeof(stl_facet) * (stl->stats.facets_malloced + 256)));
if(stl->facet_start == NULL) perror("stl_add_facet");
stl->neighbors_start = (stl_neighbors*)realloc(stl->neighbors_start,
(sizeof(stl_neighbors) * (stl->stats.facets_malloced + 256)));
if(stl->neighbors_start == NULL) perror("stl_add_facet");
stl->stats.facets_malloced += 256;
}
stl->facet_start[stl->stats.number_of_facets] = *new_facet;
/* note that the normal vector is not set here, just initialized to 0 */
stl->facet_start[stl->stats.number_of_facets].normal.x = 0.0;
stl->facet_start[stl->stats.number_of_facets].normal.y = 0.0;
stl->facet_start[stl->stats.number_of_facets].normal.z = 0.0;
stl->neighbors_start[stl->stats.number_of_facets].neighbor[0] = -1;
stl->neighbors_start[stl->stats.number_of_facets].neighbor[1] = -1;
stl->neighbors_start[stl->stats.number_of_facets].neighbor[2] = -1;
stl->stats.number_of_facets += 1;
}
| 0
|
247,679
|
TEST_P(SslSocketTest, TicketSessionResumptionDifferentMatchSAN) {
const std::string server_ctx_yaml1 = R"EOF(
session_ticket_keys:
keys:
filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ticket_key_a"
common_tls_context:
tls_certificates:
certificate_chain:
filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/unittest_cert.pem"
private_key:
filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/unittest_key.pem"
validation_context:
trusted_ca:
filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem"
match_subject_alt_names:
- exact: "spiffe://lyft.com/test-team"
)EOF";
const std::string server_ctx_yaml2 = R"EOF(
session_ticket_keys:
keys:
filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ticket_key_a"
common_tls_context:
tls_certificates:
certificate_chain:
filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/unittest_cert.pem"
private_key:
filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/unittest_key.pem"
validation_context:
trusted_ca:
filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem"
match_subject_alt_names:
- prefix: "spiffe://lyft.com/test-team"
")EOF";
const std::string client_ctx_yaml = R"EOF(
common_tls_context:
tls_certificates:
certificate_chain:
filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_cert.pem"
private_key:
filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_key.pem"
)EOF";
testTicketSessionResumption(server_ctx_yaml1, {}, server_ctx_yaml1, {}, client_ctx_yaml, true,
GetParam());
testTicketSessionResumption(server_ctx_yaml1, {}, server_ctx_yaml2, {}, client_ctx_yaml, false,
GetParam());
}
| 0
|
312,589
|
trigger_cexpr_autocmd(int cmdidx)
{
char_u *au_name = cexpr_get_auname(cmdidx);
if (au_name != NULL && apply_autocmds(EVENT_QUICKFIXCMDPRE, au_name,
curbuf->b_fname, TRUE, curbuf))
{
if (aborting())
return FAIL;
}
return OK;
}
| 0
|
264,712
|
void AddNodeToConstantGraph(
Node* n, std::unordered_map<Node*, std::vector<Node*>>* node_map,
Graph* constant_graph) {
std::vector<Node*>& added = (*node_map)[n];
added.push_back(constant_graph->CopyNode(n));
for (const Edge* in_edge : n->in_edges()) {
// Don't copy control edges to the constant graph.
if (!in_edge->IsControlEdge()) {
Node* in = in_edge->src();
auto it = node_map->find(in);
CHECK(it != node_map->end())
<< n->DebugString() << " <-" << in->DebugString();
if (it->second.size() == 1) {
constant_graph->AddEdge(it->second[0], in_edge->src_output(), added[0],
in_edge->dst_input());
} else {
// The original source node had multiple outputs and was replaced by a
// vector of constants, so the edge comes from the 0th output of the kth
// added constant, rather than the kth output of the added node as in
// the standard case above.
constant_graph->AddEdge(it->second[in_edge->src_output()], 0, added[0],
in_edge->dst_input());
}
}
}
}
| 0
|
230,631
|
const PBMotion& get_mv_info(int x,int y) const override { return img->get_mv_info(x,y); }
| 0
|
308,167
|
static long fastrpc_device_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct fastrpc_user *fl = (struct fastrpc_user *)file->private_data;
char __user *argp = (char __user *)arg;
int err;
switch (cmd) {
case FASTRPC_IOCTL_INVOKE:
err = fastrpc_invoke(fl, argp);
break;
case FASTRPC_IOCTL_INIT_ATTACH:
err = fastrpc_init_attach(fl);
break;
case FASTRPC_IOCTL_INIT_CREATE:
err = fastrpc_init_create_process(fl, argp);
break;
case FASTRPC_IOCTL_ALLOC_DMA_BUFF:
err = fastrpc_dmabuf_alloc(fl, argp);
break;
default:
err = -ENOTTY;
break;
}
return err;
}
| 0
|
312,511
|
qf_open_new_file_win(qf_info_T *ll_ref)
{
int flags;
flags = WSP_ABOVE;
if (ll_ref != NULL)
flags |= WSP_NEWLOC;
if (win_split(0, flags) == FAIL)
return FAIL; // not enough room for window
p_swb = empty_option; // don't split again
swb_flags = 0;
RESET_BINDING(curwin);
if (ll_ref != NULL)
// The new window should use the location list from the
// location list window
win_set_loclist(curwin, ll_ref);
return OK;
}
| 0
|
273,067
|
quality_is_equal(struct media_quality *a, struct media_quality *b)
{
return (a->sample_rate == b->sample_rate && a->bits_per_sample == b->bits_per_sample && a->channels == b->channels && a->bit_rate == b->bit_rate);
}
| 0
|
195,399
|
bool IsIdentityConsumingSwitch(const MutableGraphView& graph,
const NodeDef& node) {
if ((IsIdentity(node) || IsIdentityNSingleInput(node)) &&
node.input_size() > 0) {
TensorId tensor_id = ParseTensorName(node.input(0));
if (IsTensorIdControlling(tensor_id)) {
return false;
}
NodeDef* input_node = graph.GetNode(tensor_id.node());
return IsSwitch(*input_node);
}
return false;
}
| 1
|
247,543
|
Envoy::Ssl::ClientValidationStatus DefaultCertValidator::verifyCertificate(
X509* cert, const std::vector<std::string>& verify_san_list,
const std::vector<SanMatcherPtr>& subject_alt_name_matchers) {
Envoy::Ssl::ClientValidationStatus validated = Envoy::Ssl::ClientValidationStatus::NotValidated;
if (!verify_san_list.empty()) {
if (!verifySubjectAltName(cert, verify_san_list)) {
stats_.fail_verify_san_.inc();
return Envoy::Ssl::ClientValidationStatus::Failed;
}
validated = Envoy::Ssl::ClientValidationStatus::Validated;
}
if (!subject_alt_name_matchers.empty()) {
if (!matchSubjectAltName(cert, subject_alt_name_matchers)) {
stats_.fail_verify_san_.inc();
return Envoy::Ssl::ClientValidationStatus::Failed;
}
validated = Envoy::Ssl::ClientValidationStatus::Validated;
}
if (!verify_certificate_hash_list_.empty() || !verify_certificate_spki_list_.empty()) {
const bool valid_certificate_hash =
!verify_certificate_hash_list_.empty() &&
verifyCertificateHashList(cert, verify_certificate_hash_list_);
const bool valid_certificate_spki =
!verify_certificate_spki_list_.empty() &&
verifyCertificateSpkiList(cert, verify_certificate_spki_list_);
if (!valid_certificate_hash && !valid_certificate_spki) {
stats_.fail_verify_cert_hash_.inc();
return Envoy::Ssl::ClientValidationStatus::Failed;
}
validated = Envoy::Ssl::ClientValidationStatus::Validated;
}
return validated;
}
| 0
|
466,165
|
static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
unsigned size, bool sign_extension)
{
int rc = X86EMUL_CONTINUE;
op->type = OP_IMM;
op->bytes = size;
op->addr.mem.ea = ctxt->_eip;
/* NB. Immediates are sign-extended as necessary. */
switch (op->bytes) {
case 1:
op->val = insn_fetch(s8, ctxt);
break;
case 2:
op->val = insn_fetch(s16, ctxt);
break;
case 4:
op->val = insn_fetch(s32, ctxt);
break;
}
if (!sign_extension) {
switch (op->bytes) {
case 1:
op->val &= 0xff;
break;
case 2:
op->val &= 0xffff;
break;
case 4:
op->val &= 0xffffffff;
break;
}
}
done:
return rc;
}
| 0
|
294,569
|
m_mon(union DateData *x)
{
if (simple_dat_p(x)) {
get_s_civil(x);
#ifndef USE_PACK
return x->s.mon;
#else
return EX_MON(x->s.pc);
#endif
}
else {
get_c_civil(x);
#ifndef USE_PACK
return x->c.mon;
#else
return EX_MON(x->c.pc);
#endif
}
}
| 0
|
208,430
|
static ssize_t hid_debug_events_read(struct file *file, char __user *buffer,
size_t count, loff_t *ppos)
{
struct hid_debug_list *list = file->private_data;
int ret = 0, len;
DECLARE_WAITQUEUE(wait, current);
mutex_lock(&list->read_mutex);
while (ret == 0) {
if (list->head == list->tail) {
add_wait_queue(&list->hdev->debug_wait, &wait);
set_current_state(TASK_INTERRUPTIBLE);
while (list->head == list->tail) {
if (file->f_flags & O_NONBLOCK) {
ret = -EAGAIN;
break;
}
if (signal_pending(current)) {
ret = -ERESTARTSYS;
break;
}
if (!list->hdev || !list->hdev->debug) {
ret = -EIO;
set_current_state(TASK_RUNNING);
goto out;
}
/* allow O_NONBLOCK from other threads */
mutex_unlock(&list->read_mutex);
schedule();
mutex_lock(&list->read_mutex);
set_current_state(TASK_INTERRUPTIBLE);
}
set_current_state(TASK_RUNNING);
remove_wait_queue(&list->hdev->debug_wait, &wait);
}
if (ret)
goto out;
/* pass the ringbuffer contents to userspace */
copy_rest:
if (list->tail == list->head)
goto out;
if (list->tail > list->head) {
len = list->tail - list->head;
if (copy_to_user(buffer + ret, &list->hid_debug_buf[list->head], len)) {
ret = -EFAULT;
goto out;
}
ret += len;
list->head += len;
} else {
len = HID_DEBUG_BUFSIZE - list->head;
if (copy_to_user(buffer, &list->hid_debug_buf[list->head], len)) {
ret = -EFAULT;
goto out;
}
list->head = 0;
ret += len;
goto copy_rest;
}
}
out:
mutex_unlock(&list->read_mutex);
return ret;
}
| 1
|
463,120
|
static void annotation_get_partition(annotate_state_t *state,
struct annotate_entry_list *entry)
{
struct buf value = BUF_INITIALIZER;
int r;
assert(state);
assert(state->which == ANNOTATION_SCOPE_MAILBOX);
r = annotate_state_need_mbentry(state);
assert(r == 0);
/* Make sure its a local mailbox */
if (state->mbentry->server) goto out;
/* Check ACL */
if (!state->isadmin &&
(!state->mbentry->acl ||
!(cyrus_acl_myrights(state->auth_state, state->mbentry->acl) & ACL_LOOKUP)))
goto out;
buf_appendcstr(&value, state->mbentry->partition);
output_entryatt(state, entry->name, "", &value);
out:
buf_free(&value);
}
| 0
|
231,690
|
void onServerReadDataFromOpen(
QuicServerConnectionState& conn,
ServerEvents::ReadData& readData) {
CHECK_EQ(conn.state, ServerState::Open);
// Don't bother parsing if the data is empty.
if (!readData.networkData.data ||
readData.networkData.data->computeChainDataLength() == 0) {
return;
}
if (!conn.readCodec) {
// First packet from the peer
folly::io::Cursor cursor(readData.networkData.data.get());
auto initialByte = cursor.readBE<uint8_t>();
auto parsedLongHeader = parseLongHeaderInvariant(initialByte, cursor);
if (!parsedLongHeader) {
VLOG(4) << "Could not parse initial packet header";
if (conn.qLogger) {
conn.qLogger->addPacketDrop(
0,
QuicTransportStatsCallback::toString(
PacketDropReason::PARSE_ERROR));
}
QUIC_STATS(
conn.statsCallback, onPacketDropped, PacketDropReason::PARSE_ERROR);
return;
}
QuicVersion version = parsedLongHeader->invariant.version;
if (version == QuicVersion::VERSION_NEGOTIATION) {
VLOG(4) << "Server droppiong VN packet";
if (conn.qLogger) {
conn.qLogger->addPacketDrop(
0,
QuicTransportStatsCallback::toString(
PacketDropReason::INVALID_PACKET));
}
QUIC_STATS(
conn.statsCallback,
onPacketDropped,
PacketDropReason::INVALID_PACKET);
return;
}
const auto& clientConnectionId = parsedLongHeader->invariant.srcConnId;
const auto& initialDestinationConnectionId =
parsedLongHeader->invariant.dstConnId;
if (initialDestinationConnectionId.size() < kDefaultConnectionIdSize) {
VLOG(4) << "Initial connectionid too small";
if (conn.qLogger) {
conn.qLogger->addPacketDrop(
0,
QuicTransportStatsCallback::toString(
PacketDropReason::INITIAL_CONNID_SMALL));
}
QUIC_STATS(
conn.statsCallback,
onPacketDropped,
PacketDropReason::INITIAL_CONNID_SMALL);
return;
}
CHECK(conn.connIdAlgo) << "ConnectionIdAlgo is not set.";
CHECK(!conn.serverConnectionId.has_value());
// serverConnIdParams must be set by the QuicServerTransport
CHECK(conn.serverConnIdParams);
auto newServerConnIdData = conn.createAndAddNewSelfConnId();
CHECK(newServerConnIdData.has_value());
conn.serverConnectionId = newServerConnIdData->connId;
QUIC_STATS(conn.statsCallback, onStatelessReset);
conn.serverHandshakeLayer->accept(
std::make_shared<ServerTransportParametersExtension>(
version,
conn.transportSettings.advertisedInitialConnectionWindowSize,
conn.transportSettings.advertisedInitialBidiLocalStreamWindowSize,
conn.transportSettings.advertisedInitialBidiRemoteStreamWindowSize,
conn.transportSettings.advertisedInitialUniStreamWindowSize,
conn.transportSettings.advertisedInitialMaxStreamsBidi,
conn.transportSettings.advertisedInitialMaxStreamsUni,
conn.transportSettings.idleTimeout,
conn.transportSettings.ackDelayExponent,
conn.transportSettings.maxRecvPacketSize,
conn.transportSettings.partialReliabilityEnabled,
*newServerConnIdData->token,
conn.serverConnectionId.value(),
initialDestinationConnectionId));
conn.transportParametersEncoded = true;
const CryptoFactory& cryptoFactory =
conn.serverHandshakeLayer->getCryptoFactory();
conn.readCodec = std::make_unique<QuicReadCodec>(QuicNodeType::Server);
conn.readCodec->setInitialReadCipher(cryptoFactory.getClientInitialCipher(
initialDestinationConnectionId, version));
conn.readCodec->setClientConnectionId(clientConnectionId);
conn.readCodec->setServerConnectionId(*conn.serverConnectionId);
if (conn.qLogger) {
conn.qLogger->setScid(conn.serverConnectionId);
conn.qLogger->setDcid(initialDestinationConnectionId);
}
conn.readCodec->setCodecParameters(
CodecParameters(conn.peerAckDelayExponent, version));
conn.initialWriteCipher = cryptoFactory.getServerInitialCipher(
initialDestinationConnectionId, version);
conn.readCodec->setInitialHeaderCipher(
cryptoFactory.makeClientInitialHeaderCipher(
initialDestinationConnectionId, version));
conn.initialHeaderCipher = cryptoFactory.makeServerInitialHeaderCipher(
initialDestinationConnectionId, version);
conn.peerAddress = conn.originalPeerAddress;
}
BufQueue udpData;
udpData.append(std::move(readData.networkData.data));
for (uint16_t processedPackets = 0;
!udpData.empty() && processedPackets < kMaxNumCoalescedPackets;
processedPackets++) {
size_t dataSize = udpData.chainLength();
auto parsedPacket = conn.readCodec->parsePacket(udpData, conn.ackStates);
size_t packetSize = dataSize - udpData.chainLength();
switch (parsedPacket.type()) {
case CodecResult::Type::CIPHER_UNAVAILABLE: {
handleCipherUnavailable(
parsedPacket.cipherUnavailable(), conn, packetSize, readData);
break;
}
case CodecResult::Type::RETRY: {
VLOG(10) << "drop because the server is not supposed to "
<< "receive a retry " << conn;
if (conn.qLogger) {
conn.qLogger->addPacketDrop(packetSize, kRetry);
}
QUIC_TRACE(packet_drop, conn, "retry");
break;
}
case CodecResult::Type::STATELESS_RESET: {
VLOG(10) << "drop because reset " << conn;
if (conn.qLogger) {
conn.qLogger->addPacketDrop(packetSize, kReset);
}
QUIC_TRACE(packet_drop, conn, "reset");
break;
}
case CodecResult::Type::NOTHING: {
VLOG(10) << "drop cipher unavailable, no data " << conn;
if (conn.qLogger) {
conn.qLogger->addPacketDrop(packetSize, kCipherUnavailable);
}
QUIC_TRACE(packet_drop, conn, "cipher_unavailable");
break;
}
case CodecResult::Type::REGULAR_PACKET:
break;
}
RegularQuicPacket* regularOptional = parsedPacket.regularPacket();
if (!regularOptional) {
// We were unable to parse the packet, drop for now. All the drop reasons
// should have already been logged into QLogger and QuicTrace inside the
// previous switch-case block. We just need to update QUIC_STATS here.
VLOG(10) << "Not able to parse QUIC packet " << conn;
QUIC_STATS(
conn.statsCallback, onPacketDropped, PacketDropReason::PARSE_ERROR);
continue;
}
auto protectionLevel = regularOptional->header.getProtectionType();
auto encryptionLevel = protectionTypeToEncryptionLevel(protectionLevel);
auto packetNum = regularOptional->header.getPacketSequenceNum();
auto packetNumberSpace = regularOptional->header.getPacketNumberSpace();
// TODO: enforce constraints on other protection levels.
auto& regularPacket = *regularOptional;
bool isProtectedPacket = protectionLevel == ProtectionType::ZeroRtt ||
protectionLevel == ProtectionType::KeyPhaseZero ||
protectionLevel == ProtectionType::KeyPhaseOne;
if (!isProtectedPacket) {
for (auto& quicFrame : regularPacket.frames) {
auto isPadding = quicFrame.asPaddingFrame();
auto isAck = quicFrame.asReadAckFrame();
auto isClose = quicFrame.asConnectionCloseFrame();
auto isCrypto = quicFrame.asReadCryptoFrame();
auto isPing = quicFrame.asPingFrame();
// TODO: add path challenge and response
if (!isPadding && !isAck && !isClose && !isCrypto && !isPing) {
QUIC_STATS(
conn.statsCallback,
onPacketDropped,
PacketDropReason::PROTOCOL_VIOLATION);
if (conn.qLogger) {
conn.qLogger->addPacketDrop(
packetSize,
QuicTransportStatsCallback::toString(
PacketDropReason::PROTOCOL_VIOLATION));
}
throw QuicTransportException(
"Invalid frame", TransportErrorCode::PROTOCOL_VIOLATION);
}
}
}
CHECK(conn.clientConnectionId);
if (conn.qLogger) {
conn.qLogger->addPacket(regularPacket, packetSize);
}
// We assume that the higher layer takes care of validating that the version
// is supported.
if (!conn.version) {
LongHeader* longHeader = regularPacket.header.asLong();
if (!longHeader) {
throw QuicTransportException(
"Invalid packet type", TransportErrorCode::PROTOCOL_VIOLATION);
}
conn.version = longHeader->getVersion();
if (conn.version == QuicVersion::MVFST_EXPERIMENTAL) {
setExperimentalSettings(conn);
}
}
if (conn.peerAddress != readData.peer) {
if (encryptionLevel != EncryptionLevel::AppData) {
if (conn.qLogger) {
conn.qLogger->addPacketDrop(
packetSize,
QuicTransportStatsCallback::toString(
PacketDropReason::PEER_ADDRESS_CHANGE));
}
QUIC_STATS(
conn.statsCallback,
onPacketDropped,
PacketDropReason::PEER_ADDRESS_CHANGE);
throw QuicTransportException(
"Migration not allowed during handshake",
TransportErrorCode::INVALID_MIGRATION);
}
if (conn.transportSettings.disableMigration) {
if (conn.qLogger) {
conn.qLogger->addPacketDrop(
packetSize,
QuicTransportStatsCallback::toString(
PacketDropReason::PEER_ADDRESS_CHANGE));
}
QUIC_STATS(
conn.statsCallback,
onPacketDropped,
PacketDropReason::PEER_ADDRESS_CHANGE);
throw QuicTransportException(
"Migration disabled", TransportErrorCode::INVALID_MIGRATION);
}
}
auto& ackState = getAckState(conn, packetNumberSpace);
bool outOfOrder = updateLargestReceivedPacketNum(
ackState, packetNum, readData.networkData.receiveTimePoint);
if (outOfOrder) {
QUIC_STATS(conn.statsCallback, onOutOfOrderPacketReceived);
}
DCHECK(hasReceivedPackets(conn));
bool pktHasRetransmittableData = false;
bool pktHasCryptoData = false;
bool isNonProbingPacket = false;
bool handshakeConfirmedThisLoop = false;
// TODO: possibly drop the packet here, but rolling back state of
// what we've already processed is difficult.
for (auto& quicFrame : regularPacket.frames) {
switch (quicFrame.type()) {
case QuicFrame::Type::ReadAckFrame: {
VLOG(10) << "Server received ack frame packet=" << packetNum << " "
<< conn;
isNonProbingPacket = true;
ReadAckFrame& ackFrame = *quicFrame.asReadAckFrame();
processAckFrame(
conn,
packetNumberSpace,
ackFrame,
[&](const OutstandingPacket& packet,
const QuicWriteFrame& packetFrame,
const ReadAckFrame&) {
switch (packetFrame.type()) {
case QuicWriteFrame::Type::WriteStreamFrame: {
const WriteStreamFrame& frame =
*packetFrame.asWriteStreamFrame();
VLOG(4)
<< "Server received ack for stream=" << frame.streamId
<< " offset=" << frame.offset << " fin=" << frame.fin
<< " len=" << frame.len << " " << conn;
auto ackedStream =
conn.streamManager->getStream(frame.streamId);
if (ackedStream) {
sendAckSMHandler(*ackedStream, frame);
}
break;
}
case QuicWriteFrame::Type::WriteCryptoFrame: {
const WriteCryptoFrame& frame =
*packetFrame.asWriteCryptoFrame();
auto cryptoStream =
getCryptoStream(*conn.cryptoState, encryptionLevel);
processCryptoStreamAck(
*cryptoStream, frame.offset, frame.len);
break;
}
case QuicWriteFrame::Type::RstStreamFrame: {
const RstStreamFrame& frame =
*packetFrame.asRstStreamFrame();
VLOG(4) << "Server received ack for reset stream="
<< frame.streamId << " " << conn;
auto stream = conn.streamManager->getStream(frame.streamId);
if (stream) {
sendRstAckSMHandler(*stream);
}
break;
}
case QuicWriteFrame::Type::WriteAckFrame: {
const WriteAckFrame& frame = *packetFrame.asWriteAckFrame();
DCHECK(!frame.ackBlocks.empty());
VLOG(4) << "Server received ack for largestAcked="
<< frame.ackBlocks.front().end << " " << conn;
commonAckVisitorForAckFrame(ackState, frame);
break;
}
case QuicWriteFrame::Type::PingFrame:
if (!packet.metadata.isD6DProbe) {
conn.pendingEvents.cancelPingTimeout = true;
}
return;
case QuicWriteFrame::Type::QuicSimpleFrame: {
const QuicSimpleFrame& frame =
*packetFrame.asQuicSimpleFrame();
// ACK of HandshakeDone is a server-specific behavior.
if (frame.asHandshakeDoneFrame() &&
conn.version != QuicVersion::MVFST_D24) {
// Call handshakeConfirmed outside of the packet
// processing loop to avoid a re-entrancy.
handshakeConfirmedThisLoop = true;
}
break;
}
default: {
break;
}
}
},
markPacketLoss,
readData.networkData.receiveTimePoint);
break;
}
case QuicFrame::Type::RstStreamFrame: {
RstStreamFrame& frame = *quicFrame.asRstStreamFrame();
VLOG(10) << "Server received reset stream=" << frame.streamId << " "
<< conn;
pktHasRetransmittableData = true;
isNonProbingPacket = true;
auto stream = conn.streamManager->getStream(frame.streamId);
if (!stream) {
break;
}
receiveRstStreamSMHandler(*stream, frame);
break;
}
case QuicFrame::Type::ReadCryptoFrame: {
pktHasRetransmittableData = true;
pktHasCryptoData = true;
isNonProbingPacket = true;
ReadCryptoFrame& cryptoFrame = *quicFrame.asReadCryptoFrame();
VLOG(10) << "Server received crypto data offset="
<< cryptoFrame.offset
<< " len=" << cryptoFrame.data->computeChainDataLength()
<< " currentReadOffset="
<< getCryptoStream(*conn.cryptoState, encryptionLevel)
->currentReadOffset
<< " " << conn;
appendDataToReadBuffer(
*getCryptoStream(*conn.cryptoState, encryptionLevel),
StreamBuffer(
std::move(cryptoFrame.data), cryptoFrame.offset, false));
break;
}
case QuicFrame::Type::ReadStreamFrame: {
ReadStreamFrame& frame = *quicFrame.asReadStreamFrame();
VLOG(10) << "Server received stream data for stream="
<< frame.streamId << ", offset=" << frame.offset
<< " len=" << frame.data->computeChainDataLength()
<< " fin=" << frame.fin << " " << conn;
pktHasRetransmittableData = true;
isNonProbingPacket = true;
auto stream = conn.streamManager->getStream(frame.streamId);
// Ignore data from closed streams that we don't have the
// state for any more.
if (stream) {
receiveReadStreamFrameSMHandler(*stream, std::move(frame));
}
break;
}
case QuicFrame::Type::MaxDataFrame: {
MaxDataFrame& connWindowUpdate = *quicFrame.asMaxDataFrame();
VLOG(10) << "Server received max data offset="
<< connWindowUpdate.maximumData << " " << conn;
pktHasRetransmittableData = true;
isNonProbingPacket = true;
handleConnWindowUpdate(conn, connWindowUpdate, packetNum);
break;
}
case QuicFrame::Type::MaxStreamDataFrame: {
MaxStreamDataFrame& streamWindowUpdate =
*quicFrame.asMaxStreamDataFrame();
VLOG(10) << "Server received max stream data stream="
<< streamWindowUpdate.streamId
<< " offset=" << streamWindowUpdate.maximumData << " "
<< conn;
if (isReceivingStream(conn.nodeType, streamWindowUpdate.streamId)) {
throw QuicTransportException(
"Received MaxStreamDataFrame for receiving stream.",
TransportErrorCode::STREAM_STATE_ERROR);
}
pktHasRetransmittableData = true;
isNonProbingPacket = true;
auto stream =
conn.streamManager->getStream(streamWindowUpdate.streamId);
if (stream) {
handleStreamWindowUpdate(
*stream, streamWindowUpdate.maximumData, packetNum);
}
break;
}
case QuicFrame::Type::DataBlockedFrame: {
VLOG(10) << "Server received blocked " << conn;
pktHasRetransmittableData = true;
isNonProbingPacket = true;
handleConnBlocked(conn);
break;
}
case QuicFrame::Type::StreamDataBlockedFrame: {
StreamDataBlockedFrame& blocked =
*quicFrame.asStreamDataBlockedFrame();
VLOG(10) << "Server received blocked stream=" << blocked.streamId
<< " " << conn;
pktHasRetransmittableData = true;
isNonProbingPacket = true;
auto stream = conn.streamManager->getStream(blocked.streamId);
if (stream) {
handleStreamBlocked(*stream);
}
break;
}
case QuicFrame::Type::StreamsBlockedFrame: {
StreamsBlockedFrame& blocked = *quicFrame.asStreamsBlockedFrame();
// peer wishes to open a stream, but is unable to due to the maximum
// stream limit set by us
// TODO implement the handler
isNonProbingPacket = true;
VLOG(10) << "Server received streams blocked limit="
<< blocked.streamLimit << ", " << conn;
break;
}
case QuicFrame::Type::ConnectionCloseFrame: {
isNonProbingPacket = true;
ConnectionCloseFrame& connFrame = *quicFrame.asConnectionCloseFrame();
auto errMsg = folly::to<std::string>(
"Server closed by peer reason=", connFrame.reasonPhrase);
VLOG(4) << errMsg << " " << conn;
// we want to deliver app callbacks with the peer supplied error,
// but send a NO_ERROR to the peer.
QUIC_TRACE(recvd_close, conn, errMsg.c_str());
if (conn.qLogger) {
conn.qLogger->addTransportStateUpdate(getPeerClose(errMsg));
}
conn.peerConnectionError = std::make_pair(
QuicErrorCode(connFrame.errorCode), std::move(errMsg));
if (getSendConnFlowControlBytesWire(conn) == 0 &&
conn.flowControlState.sumCurStreamBufferLen) {
VLOG(2) << "Client gives up a flow control blocked connection";
}
throw QuicTransportException(
"Peer closed", TransportErrorCode::NO_ERROR);
break;
}
case QuicFrame::Type::PingFrame:
isNonProbingPacket = true;
// Ping isn't retransmittable data. But we would like to ack them
// early.
pktHasRetransmittableData = true;
break;
case QuicFrame::Type::PaddingFrame:
break;
case QuicFrame::Type::QuicSimpleFrame: {
pktHasRetransmittableData = true;
QuicSimpleFrame& simpleFrame = *quicFrame.asQuicSimpleFrame();
isNonProbingPacket |= updateSimpleFrameOnPacketReceived(
conn, simpleFrame, packetNum, readData.peer != conn.peerAddress);
break;
}
default: {
break;
}
}
}
if (handshakeConfirmedThisLoop) {
handshakeConfirmed(conn);
}
// Update writable limit before processing the handshake data. This is so
// that if we haven't decided whether or not to validate the peer, we won't
// increase the limit.
updateWritableByteLimitOnRecvPacket(conn);
if (conn.peerAddress != readData.peer) {
// TODO use new conn id, make sure the other endpoint has new conn id
if (isNonProbingPacket) {
if (packetNum == ackState.largestReceivedPacketNum) {
ShortHeader* shortHeader = regularPacket.header.asShort();
bool intentionalMigration = false;
if (shortHeader &&
shortHeader->getConnectionId() != conn.serverConnectionId) {
intentionalMigration = true;
}
onConnectionMigration(conn, readData.peer, intentionalMigration);
}
} else {
// Server will need to response with PathResponse to the new address
// while not updating peerAddress to new address
if (conn.qLogger) {
conn.qLogger->addPacketDrop(
packetSize,
QuicTransportStatsCallback::toString(
PacketDropReason::PEER_ADDRESS_CHANGE));
}
QUIC_STATS(
conn.statsCallback,
onPacketDropped,
PacketDropReason::PEER_ADDRESS_CHANGE);
throw QuicTransportException(
"Probing not supported yet", TransportErrorCode::INVALID_MIGRATION);
}
}
// Try reading bytes off of crypto, and performing a handshake.
auto data = readDataFromCryptoStream(
*getCryptoStream(*conn.cryptoState, encryptionLevel));
if (data) {
conn.serverHandshakeLayer->doHandshake(std::move(data), encryptionLevel);
try {
updateHandshakeState(conn);
} catch (...) {
if (conn.qLogger) {
conn.qLogger->addPacketDrop(
packetSize,
QuicTransportStatsCallback::toString(
PacketDropReason::TRANSPORT_PARAMETER_ERROR));
}
QUIC_STATS(
conn.statsCallback,
onPacketDropped,
QuicTransportStatsCallback::PacketDropReason::
TRANSPORT_PARAMETER_ERROR);
throw;
}
}
updateAckSendStateOnRecvPacket(
conn,
ackState,
outOfOrder,
pktHasRetransmittableData,
pktHasCryptoData);
if (encryptionLevel == EncryptionLevel::Handshake &&
conn.version != QuicVersion::MVFST_D24 && conn.initialWriteCipher) {
conn.initialWriteCipher.reset();
conn.initialHeaderCipher.reset();
conn.readCodec->setInitialReadCipher(nullptr);
conn.readCodec->setInitialHeaderCipher(nullptr);
implicitAckCryptoStream(conn, EncryptionLevel::Initial);
}
QUIC_STATS(conn.statsCallback, onPacketProcessed);
}
VLOG_IF(4, !udpData.empty())
<< "Leaving " << udpData.chainLength()
<< " bytes unprocessed after attempting to process "
<< kMaxNumCoalescedPackets << " packets.";
}
| 0
|
443,700
|
is_valid_mbc_string(const UChar* s, const UChar* end)
{
return onigenc_length_check_is_valid_mbc_string(ONIG_ENCODING_UTF32_LE, s, end);
}
| 0
|
459,203
|
tcf_chain0_head_change_cb_del(struct tcf_block *block,
struct tcf_block_ext_info *ei)
{
struct tcf_filter_chain_list_item *item;
mutex_lock(&block->lock);
list_for_each_entry(item, &block->chain0.filter_chain_list, list) {
if ((!ei->chain_head_change && !ei->chain_head_change_priv) ||
(item->chain_head_change == ei->chain_head_change &&
item->chain_head_change_priv == ei->chain_head_change_priv)) {
if (block->chain0.chain)
tcf_chain_head_change_item(item, NULL);
list_del(&item->list);
mutex_unlock(&block->lock);
kfree(item);
return;
}
}
mutex_unlock(&block->lock);
WARN_ON(1);
}
| 0
|
234,859
|
static u64 btrfs_dev_stats_value(const struct extent_buffer *eb,
const struct btrfs_dev_stats_item *ptr,
int index)
{
u64 val;
read_extent_buffer(eb, &val,
offsetof(struct btrfs_dev_stats_item, values) +
((unsigned long)ptr) + (index * sizeof(u64)),
sizeof(val));
return val;
}
| 0
|
220,026
|
void Compute(OpKernelContext* context) override {
SparseTensorsMap* map = nullptr;
OP_REQUIRES_OK(context, GetMap(context, false /* is_writing */, &map));
const Tensor& sparse_handles = context->input(0);
OP_REQUIRES(context, TensorShapeUtils::IsVector(sparse_handles.shape()),
errors::InvalidArgument(
"sparse_handles should be a vector but received shape ",
sparse_handles.shape().DebugString()));
int64_t N = sparse_handles.shape().dim_size(0);
OP_REQUIRES(
context, N > 0,
errors::InvalidArgument("Must have at least 1 serialized SparseTensor, "
"but input matrix has 0 rows"));
std::vector<Tensor> indices_to_concat;
std::vector<Tensor> values_to_concat;
std::vector<TensorShape> shapes_to_concat;
const auto& sparse_handles_t = sparse_handles.vec<int64_t>();
std::vector<SparseTensor> sparse_tensors;
OP_REQUIRES_OK(context, map->RetrieveAndClearSparseTensors(
context, sparse_handles_t, &sparse_tensors));
for (int64_t i = 0; i < N; ++i) {
const SparseTensor& st = sparse_tensors[i];
const Tensor& output_indices = st.indices();
const Tensor& output_values = st.values();
const auto output_shape = st.shape();
OP_REQUIRES(context, TensorShapeUtils::IsMatrix(output_indices.shape()),
errors::InvalidArgument(
"Expected sparse_handles[", i,
"] to represent an index matrix but received shape ",
output_indices.shape().DebugString()));
OP_REQUIRES(context, TensorShapeUtils::IsVector(output_values.shape()),
errors::InvalidArgument(
"Expected sparse_handles[", i,
"] to represent a values vector but received shape ",
output_values.shape().DebugString()));
OP_REQUIRES(
context, DataTypeToEnum<T>::value == output_values.dtype(),
errors::InvalidArgument(
"Requested SparseTensor of type ",
DataTypeString(DataTypeToEnum<T>::value), " but SparseTensor[", i,
"].values.dtype() == ", DataTypeString(output_values.dtype())));
int64_t num_entries = output_indices.dim_size(0);
OP_REQUIRES(context, num_entries == output_values.dim_size(0),
errors::InvalidArgument(
"Expected row counts of SparseTensor[", i,
"].indices and SparseTensor[", i,
"].values to match but they do not: ", num_entries,
" vs. ", output_values.dim_size(0)));
int rank = output_indices.dim_size(1);
OP_REQUIRES(
context, rank == output_shape.size(),
errors::InvalidArgument("Expected column counts of SparseTensor[", i,
"].indices to match size of SparseTensor[", i,
"].shape "
"but they do not: ",
rank, " vs. ", output_shape.size()));
// Now we expand each SparseTensors' indices and shape by
// prefixing a dimension
Tensor expanded_indices(
DT_INT64, TensorShape({num_entries, 1 + output_indices.dim_size(1)}));
Tensor expanded_shape(DT_INT64, TensorShape({1 + rank}));
const auto& output_indices_t = output_indices.matrix<int64_t>();
auto expanded_indices_t = expanded_indices.matrix<int64_t>();
auto expanded_shape_t = expanded_shape.vec<int64_t>();
expanded_indices_t.chip<1>(0).setZero();
Eigen::DSizes<Eigen::DenseIndex, 2> indices_start(0, 1);
Eigen::DSizes<Eigen::DenseIndex, 2> indices_sizes(num_entries, rank);
expanded_indices_t.slice(indices_start, indices_sizes) = output_indices_t;
expanded_shape_t(0) = 1;
// TODO: copy shape from TensorShape to &expanded_shape_t(1)
// std::copy_n(&output_shape_t(0), rank, &expanded_shape_t(1));
for (int i = 0; i < rank; ++i) {
expanded_shape_t(i + 1) = output_shape[i];
}
TensorShape expanded_tensor_shape(expanded_shape_t);
indices_to_concat.push_back(std::move(expanded_indices));
values_to_concat.push_back(output_values);
shapes_to_concat.push_back(std::move(expanded_tensor_shape));
}
int rank = -1;
for (int i = 0; i < N; ++i) {
if (rank < 0) rank = shapes_to_concat[i].dims();
OP_REQUIRES(context, rank == shapes_to_concat[i].dims(),
errors::InvalidArgument(
"Inconsistent rank across SparseTensors: rank prior to "
"SparseTensor[",
i, "] was: ", rank, " but rank of SparseTensor[", i,
"] is: ", shapes_to_concat[i].dims()));
}
// SparseTensor::Concat requires consistent shape for all but the
// primary order dimension (dimension 0 in this case). So we get
// the maximum value across all the input SparseTensors for each
// dimension and use that.
TensorShape preconcat_shape(shapes_to_concat[0]);
for (int i = 0; i < N; ++i) {
for (int d = 0; d < rank; ++d) {
preconcat_shape.set_dim(d, std::max(preconcat_shape.dim_size(d),
shapes_to_concat[i].dim_size(d)));
}
}
// Dimension 0 is the primary dimension.
gtl::InlinedVector<int64_t, 8> std_order(rank);
std::iota(std_order.begin(), std_order.end(), 0);
std::vector<SparseTensor> tensors_to_concat;
tensors_to_concat.reserve(N);
for (int i = 0; i < N; ++i) {
SparseTensor tensor;
OP_REQUIRES_OK(context,
SparseTensor::Create(std::move(indices_to_concat[i]),
std::move(values_to_concat[i]),
preconcat_shape, std_order, &tensor));
tensors_to_concat.push_back(std::move(tensor));
}
auto output = SparseTensor::Concat<T>(tensors_to_concat);
Tensor final_output_shape(DT_INT64, TensorShape({output.dims()}));
std::copy_n(output.shape().data(), output.dims(),
final_output_shape.vec<int64_t>().data());
context->set_output(0, output.indices());
context->set_output(1, output.values());
context->set_output(2, final_output_shape);
}
| 0
|
230,311
|
njs_is_concat_spreadable(njs_vm_t *vm, njs_value_t *value)
{
njs_int_t ret;
njs_value_t retval;
static const njs_value_t key =
njs_wellknown_symbol(NJS_SYMBOL_IS_CONCAT_SPREADABLE);
if (njs_slow_path(!njs_is_object(value))) {
return NJS_DECLINED;
}
ret = njs_value_property(vm, value, njs_value_arg(&key), &retval);
if (njs_slow_path(ret == NJS_ERROR)) {
return NJS_ERROR;
}
if (njs_is_defined(&retval)) {
return njs_bool(&retval) ? NJS_OK : NJS_DECLINED;
}
return njs_is_array(value) ? NJS_OK : NJS_DECLINED;
}
| 0
|
247,092
|
void gf_filter_get_session_caps(GF_Filter *filter, GF_FilterSessionCaps *caps)
{
if (caps) {
if (filter) {
(*caps) = filter->session->caps;
} else {
memset(caps, 0, sizeof(GF_FilterSessionCaps));
}
}
}
| 0
|
232,926
|
static CURLcode client_init_writer(struct Curl_easy *data,
struct contenc_writer *writer)
{
(void) data;
return writer->downstream? CURLE_WRITE_ERROR: CURLE_OK;
}
| 0
|
212,834
|
processDataRcvd(ptcpsess_t *const __restrict__ pThis,
char **buff,
const int buffLen,
struct syslogTime *stTime,
const time_t ttGenTime,
multi_submit_t *pMultiSub,
unsigned *const __restrict__ pnMsgs)
{
DEFiRet;
char c = **buff;
int octatesToCopy, octatesToDiscard;
if(pThis->inputState == eAtStrtFram) {
if(pThis->bSuppOctetFram && isdigit((int) c)) {
pThis->inputState = eInOctetCnt;
pThis->iOctetsRemain = 0;
pThis->eFraming = TCP_FRAMING_OCTET_COUNTING;
} else if(pThis->bSPFramingFix && c == ' ') {
/* Cisco very occasionally sends a SP after a LF, which
* thrashes framing if not taken special care of. Here,
* we permit space *in front of the next frame* and
* ignore it.
*/
FINALIZE;
} else {
pThis->inputState = eInMsg;
pThis->eFraming = TCP_FRAMING_OCTET_STUFFING;
}
}
if(pThis->inputState == eInOctetCnt) {
if(isdigit(c)) {
pThis->iOctetsRemain = pThis->iOctetsRemain * 10 + c - '0';
} else { /* done with the octet count, so this must be the SP terminator */
DBGPRINTF("TCP Message with octet-counter, size %d.\n", pThis->iOctetsRemain);
if(c != ' ') {
errmsg.LogError(0, NO_ERRCODE, "Framing Error in received TCP message: "
"delimiter is not SP but has ASCII value %d.", c);
}
if(pThis->iOctetsRemain < 1) {
/* TODO: handle the case where the octet count is 0! */
DBGPRINTF("Framing Error: invalid octet count\n");
errmsg.LogError(0, NO_ERRCODE, "Framing Error in received TCP message: "
"invalid octet count %d.", pThis->iOctetsRemain);
} else if(pThis->iOctetsRemain > iMaxLine) {
/* while we can not do anything against it, we can at least log an indication
* that something went wrong) -- rgerhards, 2008-03-14
*/
DBGPRINTF("truncating message with %d octets - max msg size is %d\n",
pThis->iOctetsRemain, iMaxLine);
errmsg.LogError(0, NO_ERRCODE, "received oversize message: size is %d bytes, "
"max msg size is %d, truncating...", pThis->iOctetsRemain, iMaxLine);
}
pThis->inputState = eInMsg;
}
} else {
assert(pThis->inputState == eInMsg);
if (pThis->eFraming == TCP_FRAMING_OCTET_STUFFING) {
if(pThis->iMsg >= iMaxLine) {
/* emergency, we now need to flush, no matter if we are at end of message or not... */
int i = 1;
char currBuffChar;
while(i < buffLen && ((currBuffChar = (*buff)[i]) != '\n'
&& (pThis->pLstn->pSrv->iAddtlFrameDelim == TCPSRV_NO_ADDTL_DELIMITER
|| currBuffChar != pThis->pLstn->pSrv->iAddtlFrameDelim))) {
i++;
}
LogError(0, NO_ERRCODE, "error: message received is at least %d byte larger than max msg"
" size; message will be split starting at: \"%.*s\"\n", i, (i < 32) ? i : 32, *buff);
doSubmitMsg(pThis, stTime, ttGenTime, pMultiSub);
++(*pnMsgs);
/* we might think if it is better to ignore the rest of the
* message than to treat it as a new one. Maybe this is a good
* candidate for a configuration parameter...
* rgerhards, 2006-12-04
*/
}
if ((c == '\n')
|| ((pThis->pLstn->pSrv->iAddtlFrameDelim != TCPSRV_NO_ADDTL_DELIMITER)
&& (c == pThis->pLstn->pSrv->iAddtlFrameDelim))
) { /* record delimiter? */
doSubmitMsg(pThis, stTime, ttGenTime, pMultiSub);
++(*pnMsgs);
pThis->inputState = eAtStrtFram;
} else {
/* IMPORTANT: here we copy the actual frame content to the message - for BOTH framing modes!
* If we have a message that is larger than the max msg size, we truncate it. This is the best
* we can do in light of what the engine supports. -- rgerhards, 2008-03-14
*/
if(pThis->iMsg < iMaxLine) {
*(pThis->pMsg + pThis->iMsg++) = c;
}
}
} else {
assert(pThis->eFraming == TCP_FRAMING_OCTET_COUNTING);
octatesToCopy = pThis->iOctetsRemain;
octatesToDiscard = 0;
if (buffLen < octatesToCopy) {
octatesToCopy = buffLen;
}
if (octatesToCopy + pThis->iMsg > iMaxLine) {
octatesToDiscard = octatesToCopy - (iMaxLine - pThis->iMsg);
octatesToCopy = iMaxLine - pThis->iMsg;
}
memcpy(pThis->pMsg + pThis->iMsg, *buff, octatesToCopy);
pThis->iMsg += octatesToCopy;
pThis->iOctetsRemain -= (octatesToCopy + octatesToDiscard);
*buff += (octatesToCopy + octatesToDiscard - 1);
if (pThis->iOctetsRemain == 0) {
/* we have end of frame! */
doSubmitMsg(pThis, stTime, ttGenTime, pMultiSub);
++(*pnMsgs);
pThis->inputState = eAtStrtFram;
}
}
}
finalize_it:
RETiRet;
}
| 1
|
229,352
|
const string& DeviceNameOrUnspecified(Device* device) {
static string* unspecified_string = new string("<unspecified>");
return (device == nullptr) ? *unspecified_string : device->name();
}
| 0
|
513,115
|
static int check_func_long(THD *thd, struct st_mysql_sys_var *var,
void *save, st_mysql_value *value)
{
my_bool fixed1, fixed2;
long long orig, val;
struct my_option options;
value->val_int(value, &orig);
val= orig;
plugin_opt_set_limits(&options, var);
if (var->flags & PLUGIN_VAR_UNSIGNED)
{
if ((fixed1= (!value->is_unsigned(value) && val < 0)))
val=0;
*(ulong *)save= (ulong) getopt_ull_limit_value((ulonglong) val, &options,
&fixed2);
}
else
{
if ((fixed1= (value->is_unsigned(value) && val < 0)))
val=LONGLONG_MAX;
*(long *)save= (long) getopt_ll_limit_value(val, &options, &fixed2);
}
return throw_bounds_warning(thd, var->name, fixed1 || fixed2,
value->is_unsigned(value), (longlong) orig);
}
| 0
|
238,594
|
static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env)
{
struct bpf_insn *insn = env->prog->insnsi;
int insn_cnt = env->prog->len;
int i;
for (i = 0; i < insn_cnt; i++, insn++) {
if (insn->code != (BPF_LD | BPF_IMM | BPF_DW))
continue;
if (insn->src_reg == BPF_PSEUDO_FUNC)
continue;
insn->src_reg = 0;
}
}
| 0
|
427,230
|
static void forbody (LexState *ls, int base, int line, int nvars, int isgen) {
/* forbody -> DO block */
static const OpCode forprep[2] = {OP_FORPREP, OP_TFORPREP};
static const OpCode forloop[2] = {OP_FORLOOP, OP_TFORLOOP};
BlockCnt bl;
FuncState *fs = ls->fs;
int prep, endfor;
checknext(ls, TK_DO);
prep = luaK_codeABx(fs, forprep[isgen], base, 0);
enterblock(fs, &bl, 0); /* scope for declared variables */
adjustlocalvars(ls, nvars);
luaK_reserveregs(fs, nvars);
block(ls);
leaveblock(fs); /* end of scope for declared variables */
fixforjump(fs, prep, luaK_getlabel(fs), 0);
if (isgen) { /* generic for? */
luaK_codeABC(fs, OP_TFORCALL, base, 0, nvars);
luaK_fixline(fs, line);
}
endfor = luaK_codeABx(fs, forloop[isgen], base, 0);
fixforjump(fs, endfor, prep + 1, 1);
luaK_fixline(fs, line);
}
| 0
|
221,489
|
flatpak_run_setup_usr_links (FlatpakBwrap *bwrap,
GFile *runtime_files,
const char *sysroot)
{
int i;
if (runtime_files == NULL)
return;
for (i = 0; flatpak_abs_usrmerged_dirs[i] != NULL; i++)
{
const char *subdir = flatpak_abs_usrmerged_dirs[i];
g_autoptr(GFile) runtime_subdir = NULL;
g_assert (subdir[0] == '/');
/* Skip the '/' when using as a subdirectory of the runtime */
runtime_subdir = g_file_get_child (runtime_files, subdir + 1);
if (g_file_query_exists (runtime_subdir, NULL))
{
g_autofree char *link = g_strconcat ("usr", subdir, NULL);
g_autofree char *create = NULL;
if (sysroot != NULL)
create = g_strconcat (sysroot, subdir, NULL);
else
create = g_strdup (subdir);
flatpak_bwrap_add_args (bwrap,
"--symlink", link, create,
NULL);
}
else
{
g_debug ("%s does not exist",
flatpak_file_get_path_cached (runtime_subdir));
}
}
}
| 0
|
259,154
|
static int mov_read_packet(AVFormatContext *s, AVPacket *pkt)
{
MOVContext *mov = s->priv_data;
MOVStreamContext *sc;
AVIndexEntry *sample;
AVStream *st = NULL;
int64_t current_index;
int ret;
mov->fc = s;
retry:
sample = mov_find_next_sample(s, &st);
if (!sample || (mov->next_root_atom && sample->pos > mov->next_root_atom)) {
if (!mov->next_root_atom)
return AVERROR_EOF;
if ((ret = mov_switch_root(s, mov->next_root_atom, -1)) < 0)
return ret;
goto retry;
}
sc = st->priv_data;
/* must be done just before reading, to avoid infinite loop on sample */
current_index = sc->current_index;
mov_current_sample_inc(sc);
if (mov->next_root_atom) {
sample->pos = FFMIN(sample->pos, mov->next_root_atom);
sample->size = FFMIN(sample->size, (mov->next_root_atom - sample->pos));
}
if (st->discard != AVDISCARD_ALL) {
int64_t ret64 = avio_seek(sc->pb, sample->pos, SEEK_SET);
if (ret64 != sample->pos) {
av_log(mov->fc, AV_LOG_ERROR, "stream %d, offset 0x%"PRIx64": partial file\n",
sc->ffindex, sample->pos);
if (should_retry(sc->pb, ret64)) {
mov_current_sample_dec(sc);
} else if (ret64 < 0) {
return (int)ret64;
}
return AVERROR_INVALIDDATA;
}
if (st->discard == AVDISCARD_NONKEY && !(sample->flags & AVINDEX_KEYFRAME)) {
av_log(mov->fc, AV_LOG_DEBUG, "Nonkey frame from stream %d discarded due to AVDISCARD_NONKEY\n", sc->ffindex);
goto retry;
}
if (st->codecpar->codec_id == AV_CODEC_ID_EIA_608 && sample->size > 8)
ret = get_eia608_packet(sc->pb, pkt, sample->size);
else
ret = av_get_packet(sc->pb, pkt, sample->size);
if (ret < 0) {
if (should_retry(sc->pb, ret)) {
mov_current_sample_dec(sc);
}
return ret;
}
#if CONFIG_DV_DEMUXER
if (mov->dv_demux && sc->dv_audio_container) {
ret = avpriv_dv_produce_packet(mov->dv_demux, pkt, pkt->data, pkt->size, pkt->pos);
av_packet_unref(pkt);
if (ret < 0)
return ret;
ret = avpriv_dv_get_packet(mov->dv_demux, pkt);
if (ret < 0)
return ret;
}
#endif
if (sc->has_palette) {
uint8_t *pal;
pal = av_packet_new_side_data(pkt, AV_PKT_DATA_PALETTE, AVPALETTE_SIZE);
if (!pal) {
av_log(mov->fc, AV_LOG_ERROR, "Cannot append palette to packet\n");
} else {
memcpy(pal, sc->palette, AVPALETTE_SIZE);
sc->has_palette = 0;
}
}
if (st->codecpar->codec_id == AV_CODEC_ID_MP3 && !ffstream(st)->need_parsing && pkt->size > 4) {
if (ff_mpa_check_header(AV_RB32(pkt->data)) < 0)
ffstream(st)->need_parsing = AVSTREAM_PARSE_FULL;
}
}
pkt->stream_index = sc->ffindex;
pkt->dts = sample->timestamp;
if (sample->flags & AVINDEX_DISCARD_FRAME) {
pkt->flags |= AV_PKT_FLAG_DISCARD;
}
if (sc->ctts_data && sc->ctts_index < sc->ctts_count) {
pkt->pts = pkt->dts + sc->dts_shift + sc->ctts_data[sc->ctts_index].duration;
/* update ctts context */
sc->ctts_sample++;
if (sc->ctts_index < sc->ctts_count &&
sc->ctts_data[sc->ctts_index].count == sc->ctts_sample) {
sc->ctts_index++;
sc->ctts_sample = 0;
}
} else {
int64_t next_dts = (sc->current_sample < ffstream(st)->nb_index_entries) ?
ffstream(st)->index_entries[sc->current_sample].timestamp : st->duration;
if (next_dts >= pkt->dts)
pkt->duration = next_dts - pkt->dts;
pkt->pts = pkt->dts;
}
if (st->discard == AVDISCARD_ALL)
goto retry;
if (sc->sdtp_data && sc->current_sample <= sc->sdtp_count) {
uint8_t sample_flags = sc->sdtp_data[sc->current_sample - 1];
uint8_t sample_is_depended_on = (sample_flags >> 2) & 0x3;
pkt->flags |= sample_is_depended_on == MOV_SAMPLE_DEPENDENCY_NO ? AV_PKT_FLAG_DISPOSABLE : 0;
}
pkt->flags |= sample->flags & AVINDEX_KEYFRAME ? AV_PKT_FLAG_KEY : 0;
pkt->pos = sample->pos;
/* Multiple stsd handling. */
if (sc->stsc_data) {
if (sc->stsc_data[sc->stsc_index].id > 0 &&
sc->stsc_data[sc->stsc_index].id - 1 < sc->stsd_count &&
sc->stsc_data[sc->stsc_index].id - 1 != sc->last_stsd_index) {
ret = mov_change_extradata(sc, pkt);
if (ret < 0)
return ret;
}
/* Update the stsc index for the next sample */
sc->stsc_sample++;
if (mov_stsc_index_valid(sc->stsc_index, sc->stsc_count) &&
mov_get_stsc_samples(sc, sc->stsc_index) == sc->stsc_sample) {
sc->stsc_index++;
sc->stsc_sample = 0;
}
}
if (mov->aax_mode)
aax_filter(pkt->data, pkt->size, mov);
ret = cenc_filter(mov, st, sc, pkt, current_index);
if (ret < 0) {
return ret;
}
return 0;
}
| 0
|
279,948
|
ex_copy(linenr_T line1, linenr_T line2, linenr_T n)
{
linenr_T count;
char_u *p;
count = line2 - line1 + 1;
if ((cmdmod.cmod_flags & CMOD_LOCKMARKS) == 0)
{
curbuf->b_op_start.lnum = n + 1;
curbuf->b_op_end.lnum = n + count;
curbuf->b_op_start.col = curbuf->b_op_end.col = 0;
}
/*
* there are three situations:
* 1. destination is above line1
* 2. destination is between line1 and line2
* 3. destination is below line2
*
* n = destination (when starting)
* curwin->w_cursor.lnum = destination (while copying)
* line1 = start of source (while copying)
* line2 = end of source (while copying)
*/
if (u_save(n, n + 1) == FAIL)
return;
curwin->w_cursor.lnum = n;
while (line1 <= line2)
{
// need to use vim_strsave() because the line will be unlocked within
// ml_append()
p = vim_strsave(ml_get(line1));
if (p != NULL)
{
ml_append(curwin->w_cursor.lnum, p, (colnr_T)0, FALSE);
vim_free(p);
}
// situation 2: skip already copied lines
if (line1 == n)
line1 = curwin->w_cursor.lnum;
++line1;
if (curwin->w_cursor.lnum < line1)
++line1;
if (curwin->w_cursor.lnum < line2)
++line2;
++curwin->w_cursor.lnum;
}
appended_lines_mark(n, count);
if (VIsual_active)
check_pos(curbuf, &VIsual);
msgmore((long)count);
}
| 0
|
294,650
|
d_lite_ld(VALUE self)
{
get_d1(self);
return f_sub(m_real_local_jd(dat), INT2FIX(2299160));
}
| 0
|
252,354
|
static int hufCompress(const unsigned short raw[], int nRaw,
char compressed[]) {
if (nRaw == 0) return 0;
std::vector<long long> freq(HUF_ENCSIZE);
countFrequencies(freq, raw, nRaw);
int im = 0;
int iM = 0;
hufBuildEncTable(freq.data(), &im, &iM);
char *tableStart = compressed + 20;
char *tableEnd = tableStart;
hufPackEncTable(freq.data(), im, iM, &tableEnd);
int tableLength = tableEnd - tableStart;
char *dataStart = tableEnd;
int nBits = hufEncode(freq.data(), raw, nRaw, iM, dataStart);
int data_length = (nBits + 7) / 8;
writeUInt(compressed, im);
writeUInt(compressed + 4, iM);
writeUInt(compressed + 8, tableLength);
writeUInt(compressed + 12, nBits);
writeUInt(compressed + 16, 0); // room for future extensions
return dataStart + data_length - compressed;
}
| 0
|
275,975
|
unsigned uECC_curve_num_n_bytes(uECC_Curve curve) {
return BITS_TO_BYTES(curve->num_n_bits);
}
| 0
|
221,395
|
static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
{
u32 offset, msr, value;
int write, mask;
if (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)))
return NESTED_EXIT_HOST;
msr = svm->vcpu.arch.regs[VCPU_REGS_RCX];
offset = svm_msrpm_offset(msr);
write = svm->vmcb->control.exit_info_1 & 1;
mask = 1 << ((2 * (msr & 0xf)) + write);
if (offset == MSR_INVALID)
return NESTED_EXIT_DONE;
/* Offset is in 32 bit units but need in 8 bit units */
offset *= 4;
if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.ctl.msrpm_base_pa + offset, &value, 4))
return NESTED_EXIT_DONE;
return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
}
| 0
|
411,798
|
on_sigterm_pipe (GIOChannel *channel,
GIOCondition condition,
gpointer data)
{
A11yBusLauncher *app = data;
g_main_loop_quit (app->loop);
return FALSE;
}
| 0
|
387,759
|
int InstanceKlass::mark_dependent_nmethods(KlassDepChange& changes) {
return dependencies().mark_dependent_nmethods(changes);
}
| 0
|
274,880
|
TEST(ComparisonsTest, GreaterInt) {
ComparisonOpModel model({1, 1, 1, 4}, {1, 1, 1, 4}, TensorType_INT32,
BuiltinOperator_GREATER);
model.PopulateTensor<int>(model.input1(), {-1, 9, 7, 3});
model.PopulateTensor<int>(model.input2(), {1, 2, 7, 5});
model.Invoke();
EXPECT_THAT(model.GetOutput(), ElementsAre(false, true, false, false));
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1, 1, 4));
}
| 0
|
252,410
|
int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName,
const char *pComment, mz_uint flags) {
mz_uint file_index;
size_t name_len, comment_len;
if ((!pZip) || (!pZip->m_pState) || (!pName) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_READING))
return -1;
if (((flags & (MZ_ZIP_FLAG_IGNORE_PATH | MZ_ZIP_FLAG_CASE_SENSITIVE)) == 0) &&
(!pComment) && (pZip->m_pState->m_sorted_central_dir_offsets.m_size))
return mz_zip_reader_locate_file_binary_search(pZip, pName);
name_len = strlen(pName);
if (name_len > 0xFFFF) return -1;
comment_len = pComment ? strlen(pComment) : 0;
if (comment_len > 0xFFFF) return -1;
for (file_index = 0; file_index < pZip->m_total_files; file_index++) {
const mz_uint8 *pHeader = &MZ_ZIP_ARRAY_ELEMENT(
&pZip->m_pState->m_central_dir, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32,
file_index));
mz_uint filename_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_FILENAME_LEN_OFS);
const char *pFilename =
(const char *)pHeader + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
if (filename_len < name_len) continue;
if (comment_len) {
mz_uint file_extra_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_EXTRA_LEN_OFS),
file_comment_len =
MZ_READ_LE16(pHeader + MZ_ZIP_CDH_COMMENT_LEN_OFS);
const char *pFile_comment = pFilename + filename_len + file_extra_len;
if ((file_comment_len != comment_len) ||
(!mz_zip_reader_string_equal(pComment, pFile_comment,
file_comment_len, flags)))
continue;
}
if ((flags & MZ_ZIP_FLAG_IGNORE_PATH) && (filename_len)) {
int ofs = filename_len - 1;
do {
if ((pFilename[ofs] == '/') || (pFilename[ofs] == '\\') ||
(pFilename[ofs] == ':'))
break;
} while (--ofs >= 0);
ofs++;
pFilename += ofs;
filename_len -= ofs;
}
if ((filename_len == name_len) &&
(mz_zip_reader_string_equal(pName, pFilename, filename_len, flags)))
return file_index;
}
return -1;
}
| 0
|
259,161
|
static inline int mov_stsc_index_valid(unsigned int index, unsigned int count)
{
return index < count - 1;
}
| 0
|
244,340
|
GF_Err pdin_box_size(GF_Box *s)
{
GF_ProgressiveDownloadBox *ptr = (GF_ProgressiveDownloadBox *)s;
ptr->size += 8*ptr->count;
return GF_OK;
}
| 0
|
418,786
|
check_termcode_mouse(
char_u *tp,
int *slen,
char_u *key_name,
char_u *modifiers_start,
int idx,
int *modifiers)
{
int j;
char_u *p;
# if !defined(UNIX) || defined(FEAT_MOUSE_XTERM) || defined(FEAT_GUI) \
|| defined(FEAT_MOUSE_GPM) || defined(FEAT_SYSMOUSE)
char_u bytes[6];
int num_bytes;
# endif
int mouse_code = 0; // init for GCC
int is_click, is_drag;
int is_release, release_is_ambiguous;
int wheel_code = 0;
int current_button;
static int orig_num_clicks = 1;
static int orig_mouse_code = 0x0;
# ifdef CHECK_DOUBLE_CLICK
static int orig_mouse_col = 0;
static int orig_mouse_row = 0;
static struct timeval orig_mouse_time = {0, 0};
// time of previous mouse click
struct timeval mouse_time; // time of current mouse click
long timediff; // elapsed time in msec
# endif
is_click = is_drag = is_release = release_is_ambiguous = FALSE;
# if !defined(UNIX) || defined(FEAT_MOUSE_XTERM) || defined(FEAT_GUI) \
|| defined(FEAT_MOUSE_GPM) || defined(FEAT_SYSMOUSE)
if (key_name[0] == KS_MOUSE
# ifdef FEAT_MOUSE_GPM
|| key_name[0] == KS_GPM_MOUSE
# endif
)
{
/*
* For xterm we get "<t_mouse>scr", where s == encoded button state:
* 0x20 = left button down
* 0x21 = middle button down
* 0x22 = right button down
* 0x23 = any button release
* 0x60 = button 4 down (scroll wheel down)
* 0x61 = button 5 down (scroll wheel up)
* add 0x04 for SHIFT
* add 0x08 for ALT
* add 0x10 for CTRL
* add 0x20 for mouse drag (0x40 is drag with left button)
* add 0x40 for mouse move (0x80 is move, 0x81 too)
* 0x43 (drag + release) is also move
* c == column + ' ' + 1 == column + 33
* r == row + ' ' + 1 == row + 33
*
* The coordinates are passed on through global variables. Ugly, but
* this avoids trouble with mouse clicks at an unexpected moment and
* allows for mapping them.
*/
for (;;)
{
# ifdef FEAT_GUI
if (gui.in_use)
{
// GUI uses more bits for columns > 223
num_bytes = get_bytes_from_buf(tp + *slen, bytes, 5);
if (num_bytes == -1) // not enough coordinates
return -1;
mouse_code = bytes[0];
mouse_col = 128 * (bytes[1] - ' ' - 1)
+ bytes[2] - ' ' - 1;
mouse_row = 128 * (bytes[3] - ' ' - 1)
+ bytes[4] - ' ' - 1;
}
else
# endif
{
num_bytes = get_bytes_from_buf(tp + *slen, bytes, 3);
if (num_bytes == -1) // not enough coordinates
return -1;
mouse_code = bytes[0];
mouse_col = bytes[1] - ' ' - 1;
mouse_row = bytes[2] - ' ' - 1;
}
*slen += num_bytes;
// If the following bytes is also a mouse code and it has the same
// code, dump this one and get the next. This makes dragging a
// whole lot faster.
# ifdef FEAT_GUI
if (gui.in_use)
j = 3;
else
# endif
j = get_termcode_len(idx);
if (STRNCMP(tp, tp + *slen, (size_t)j) == 0
&& tp[*slen + j] == mouse_code
&& tp[*slen + j + 1] != NUL
&& tp[*slen + j + 2] != NUL
# ifdef FEAT_GUI
&& (!gui.in_use
|| (tp[*slen + j + 3] != NUL
&& tp[*slen + j + 4] != NUL))
# endif
)
*slen += j;
else
break;
}
}
if (key_name[0] == KS_URXVT_MOUSE
|| key_name[0] == KS_SGR_MOUSE
|| key_name[0] == KS_SGR_MOUSE_RELEASE)
{
// URXVT 1015 mouse reporting mode:
// Almost identical to xterm mouse mode, except the values are decimal
// instead of bytes.
//
// \033[%d;%d;%dM
// ^-- row
// ^----- column
// ^-------- code
//
// SGR 1006 mouse reporting mode:
// Almost identical to xterm mouse mode, except the values are decimal
// instead of bytes.
//
// \033[<%d;%d;%dM
// ^-- row
// ^----- column
// ^-------- code
//
// \033[<%d;%d;%dm : mouse release event
// ^-- row
// ^----- column
// ^-------- code
p = modifiers_start;
if (p == NULL)
return -1;
mouse_code = getdigits(&p);
if (*p++ != ';')
return -1;
// when mouse reporting is SGR, add 32 to mouse code
if (key_name[0] == KS_SGR_MOUSE
|| key_name[0] == KS_SGR_MOUSE_RELEASE)
mouse_code += 32;
mouse_col = getdigits(&p) - 1;
if (*p++ != ';')
return -1;
mouse_row = getdigits(&p) - 1;
// The modifiers were the mouse coordinates, not the modifier keys
// (alt/shift/ctrl/meta) state.
*modifiers = 0;
}
if (key_name[0] == KS_SGR_MOUSE
|| key_name[0] == KS_SGR_MOUSE_RELEASE)
{
if (key_name[0] == KS_SGR_MOUSE_RELEASE)
{
is_release = TRUE;
// This is used below to set held_button.
mouse_code |= MOUSE_RELEASE;
}
}
else
{
release_is_ambiguous = TRUE;
if ((mouse_code & MOUSE_RELEASE) == MOUSE_RELEASE)
is_release = TRUE;
}
if (key_name[0] == KS_MOUSE
# ifdef FEAT_MOUSE_GPM
|| key_name[0] == KS_GPM_MOUSE
# endif
# ifdef FEAT_MOUSE_URXVT
|| key_name[0] == KS_URXVT_MOUSE
# endif
|| key_name[0] == KS_SGR_MOUSE
|| key_name[0] == KS_SGR_MOUSE_RELEASE)
{
# if !defined(MSWIN)
/*
* Handle old style mouse events.
* Recognize the xterm mouse wheel, but not in the GUI, the
* Linux console with GPM and the MS-DOS or Win32 console
* (multi-clicks use >= 0x60).
*/
if (mouse_code >= MOUSEWHEEL_LOW
# ifdef FEAT_GUI
&& !gui.in_use
# endif
# ifdef FEAT_MOUSE_GPM
&& key_name[0] != KS_GPM_MOUSE
# endif
)
{
# if defined(UNIX)
if (use_xterm_mouse() > 1 && mouse_code >= 0x80)
// mouse-move event, using MOUSE_DRAG works
mouse_code = MOUSE_DRAG;
else
# endif
// Keep the mouse_code before it's changed, so that we
// remember that it was a mouse wheel click.
wheel_code = mouse_code;
}
# ifdef FEAT_MOUSE_XTERM
else if (held_button == MOUSE_RELEASE
# ifdef FEAT_GUI
&& !gui.in_use
# endif
&& (mouse_code == 0x23 || mouse_code == 0x24
|| mouse_code == 0x40 || mouse_code == 0x41))
{
// Apparently 0x23 and 0x24 are used by rxvt scroll wheel.
// And 0x40 and 0x41 are used by some xterm emulator.
wheel_code = mouse_code - (mouse_code >= 0x40 ? 0x40 : 0x23)
+ MOUSEWHEEL_LOW;
}
# endif
# if defined(UNIX)
else if (use_xterm_mouse() > 1)
{
if (mouse_code & MOUSE_DRAG_XTERM)
mouse_code |= MOUSE_DRAG;
}
# endif
# ifdef FEAT_XCLIPBOARD
else if (!(mouse_code & MOUSE_DRAG & ~MOUSE_CLICK_MASK))
{
if (is_release)
stop_xterm_trace();
else
start_xterm_trace(mouse_code);
}
# endif
# endif
}
# endif // !UNIX || FEAT_MOUSE_XTERM
# ifdef FEAT_MOUSE_NET
if (key_name[0] == KS_NETTERM_MOUSE)
{
int mc, mr;
// expect a rather limited sequence like: balancing {
// \033}6,45\r
// '6' is the row, 45 is the column
p = tp + *slen;
mr = getdigits(&p);
if (*p++ != ',')
return -1;
mc = getdigits(&p);
if (*p++ != '\r')
return -1;
mouse_col = mc - 1;
mouse_row = mr - 1;
mouse_code = MOUSE_LEFT;
*slen += (int)(p - (tp + *slen));
}
# endif // FEAT_MOUSE_NET
# ifdef FEAT_MOUSE_JSB
if (key_name[0] == KS_JSBTERM_MOUSE)
{
int mult, val, iter, button, status;
/*
* JSBTERM Input Model
* \033[0~zw uniq escape sequence
* (L-x) Left button pressed - not pressed x not reporting
* (M-x) Middle button pressed - not pressed x not reporting
* (R-x) Right button pressed - not pressed x not reporting
* (SDmdu) Single , Double click, m: mouse move, d: button down,
* u: button up
* ### X cursor position padded to 3 digits
* ### Y cursor position padded to 3 digits
* (s-x) SHIFT key pressed - not pressed x not reporting
* (c-x) CTRL key pressed - not pressed x not reporting
* \033\\ terminating sequence
*/
p = tp + *slen;
button = mouse_code = 0;
switch (*p++)
{
case 'L': button = 1; break;
case '-': break;
case 'x': break; // ignore sequence
default: return -1; // Unknown Result
}
switch (*p++)
{
case 'M': button |= 2; break;
case '-': break;
case 'x': break; // ignore sequence
default: return -1; // Unknown Result
}
switch (*p++)
{
case 'R': button |= 4; break;
case '-': break;
case 'x': break; // ignore sequence
default: return -1; // Unknown Result
}
status = *p++;
for (val = 0, mult = 100, iter = 0; iter < 3; iter++,
mult /= 10, p++)
if (*p >= '0' && *p <= '9')
val += (*p - '0') * mult;
else
return -1;
mouse_col = val;
for (val = 0, mult = 100, iter = 0; iter < 3; iter++,
mult /= 10, p++)
if (*p >= '0' && *p <= '9')
val += (*p - '0') * mult;
else
return -1;
mouse_row = val;
switch (*p++)
{
case 's': button |= 8; break; // SHIFT key Pressed
case '-': break; // Not Pressed
case 'x': break; // Not Reporting
default: return -1; // Unknown Result
}
switch (*p++)
{
case 'c': button |= 16; break; // CTRL key Pressed
case '-': break; // Not Pressed
case 'x': break; // Not Reporting
default: return -1; // Unknown Result
}
if (*p++ != '\033')
return -1;
if (*p++ != '\\')
return -1;
switch (status)
{
case 'D': // Double Click
case 'S': // Single Click
if (button & 1) mouse_code |= MOUSE_LEFT;
if (button & 2) mouse_code |= MOUSE_MIDDLE;
if (button & 4) mouse_code |= MOUSE_RIGHT;
if (button & 8) mouse_code |= MOUSE_SHIFT;
if (button & 16) mouse_code |= MOUSE_CTRL;
break;
case 'm': // Mouse move
if (button & 1) mouse_code |= MOUSE_LEFT;
if (button & 2) mouse_code |= MOUSE_MIDDLE;
if (button & 4) mouse_code |= MOUSE_RIGHT;
if (button & 8) mouse_code |= MOUSE_SHIFT;
if (button & 16) mouse_code |= MOUSE_CTRL;
if ((button & 7) != 0)
{
held_button = mouse_code;
mouse_code |= MOUSE_DRAG;
}
is_drag = TRUE;
showmode();
break;
case 'd': // Button Down
if (button & 1) mouse_code |= MOUSE_LEFT;
if (button & 2) mouse_code |= MOUSE_MIDDLE;
if (button & 4) mouse_code |= MOUSE_RIGHT;
if (button & 8) mouse_code |= MOUSE_SHIFT;
if (button & 16) mouse_code |= MOUSE_CTRL;
break;
case 'u': // Button Up
is_release = TRUE;
if (button & 1)
mouse_code |= MOUSE_LEFT;
if (button & 2)
mouse_code |= MOUSE_MIDDLE;
if (button & 4)
mouse_code |= MOUSE_RIGHT;
if (button & 8)
mouse_code |= MOUSE_SHIFT;
if (button & 16)
mouse_code |= MOUSE_CTRL;
break;
default: return -1; // Unknown Result
}
*slen += (p - (tp + *slen));
}
# endif // FEAT_MOUSE_JSB
# ifdef FEAT_MOUSE_DEC
if (key_name[0] == KS_DEC_MOUSE)
{
/*
* The DEC Locator Input Model
* Netterm delivers the code sequence:
* \033[2;4;24;80&w (left button down)
* \033[3;0;24;80&w (left button up)
* \033[6;1;24;80&w (right button down)
* \033[7;0;24;80&w (right button up)
* CSI Pe ; Pb ; Pr ; Pc ; Pp & w
* Pe is the event code
* Pb is the button code
* Pr is the row coordinate
* Pc is the column coordinate
* Pp is the third coordinate (page number)
* Pe, the event code indicates what event caused this report
* The following event codes are defined:
* 0 - request, the terminal received an explicit request for a
* locator report, but the locator is unavailable
* 1 - request, the terminal received an explicit request for a
* locator report
* 2 - left button down
* 3 - left button up
* 4 - middle button down
* 5 - middle button up
* 6 - right button down
* 7 - right button up
* 8 - fourth button down
* 9 - fourth button up
* 10 - locator outside filter rectangle
* Pb, the button code, ASCII decimal 0-15 indicating which buttons are
* down if any. The state of the four buttons on the locator
* correspond to the low four bits of the decimal value, "1" means
* button depressed
* 0 - no buttons down,
* 1 - right,
* 2 - middle,
* 4 - left,
* 8 - fourth
* Pr is the row coordinate of the locator position in the page,
* encoded as an ASCII decimal value. If Pr is omitted, the locator
* position is undefined (outside the terminal window for example).
* Pc is the column coordinate of the locator position in the page,
* encoded as an ASCII decimal value. If Pc is omitted, the locator
* position is undefined (outside the terminal window for example).
* Pp is the page coordinate of the locator position encoded as an
* ASCII decimal value. The page coordinate may be omitted if the
* locator is on page one (the default). We ignore it anyway.
*/
int Pe, Pb, Pr, Pc;
p = tp + *slen;
// get event status
Pe = getdigits(&p);
if (*p++ != ';')
return -1;
// get button status
Pb = getdigits(&p);
if (*p++ != ';')
return -1;
// get row status
Pr = getdigits(&p);
if (*p++ != ';')
return -1;
// get column status
Pc = getdigits(&p);
// the page parameter is optional
if (*p == ';')
{
p++;
(void)getdigits(&p);
}
if (*p++ != '&')
return -1;
if (*p++ != 'w')
return -1;
mouse_code = 0;
switch (Pe)
{
case 0: return -1; // position request while unavailable
case 1: // a response to a locator position request includes
// the status of all buttons
Pb &= 7; // mask off and ignore fourth button
if (Pb & 4)
mouse_code = MOUSE_LEFT;
if (Pb & 2)
mouse_code = MOUSE_MIDDLE;
if (Pb & 1)
mouse_code = MOUSE_RIGHT;
if (Pb)
{
held_button = mouse_code;
mouse_code |= MOUSE_DRAG;
WantQueryMouse = TRUE;
}
is_drag = TRUE;
showmode();
break;
case 2: mouse_code = MOUSE_LEFT;
WantQueryMouse = TRUE;
break;
case 3: mouse_code = MOUSE_LEFT;
is_release = TRUE;
break;
case 4: mouse_code = MOUSE_MIDDLE;
WantQueryMouse = TRUE;
break;
case 5: mouse_code = MOUSE_MIDDLE;
is_release = TRUE;
break;
case 6: mouse_code = MOUSE_RIGHT;
WantQueryMouse = TRUE;
break;
case 7: mouse_code = MOUSE_RIGHT;
is_release = TRUE;
break;
case 8: return -1; // fourth button down
case 9: return -1; // fourth button up
case 10: return -1; // mouse outside of filter rectangle
default: return -1; // should never occur
}
mouse_col = Pc - 1;
mouse_row = Pr - 1;
*slen += (int)(p - (tp + *slen));
}
# endif // FEAT_MOUSE_DEC
# ifdef FEAT_MOUSE_PTERM
if (key_name[0] == KS_PTERM_MOUSE)
{
int button, num_clicks, action;
p = tp + *slen;
action = getdigits(&p);
if (*p++ != ';')
return -1;
mouse_row = getdigits(&p);
if (*p++ != ';')
return -1;
mouse_col = getdigits(&p);
if (*p++ != ';')
return -1;
button = getdigits(&p);
mouse_code = 0;
switch (button)
{
case 4: mouse_code = MOUSE_LEFT; break;
case 1: mouse_code = MOUSE_RIGHT; break;
case 2: mouse_code = MOUSE_MIDDLE; break;
default: return -1;
}
switch (action)
{
case 31: // Initial press
if (*p++ != ';')
return -1;
num_clicks = getdigits(&p); // Not used
break;
case 32: // Release
is_release = TRUE;
break;
case 33: // Drag
held_button = mouse_code;
mouse_code |= MOUSE_DRAG;
break;
default:
return -1;
}
if (*p++ != 't')
return -1;
*slen += (p - (tp + *slen));
}
# endif // FEAT_MOUSE_PTERM
// Interpret the mouse code
current_button = (mouse_code & MOUSE_CLICK_MASK);
if (is_release)
current_button |= MOUSE_RELEASE;
if (current_button == MOUSE_RELEASE
# ifdef FEAT_MOUSE_XTERM
&& wheel_code == 0
# endif
)
{
/*
* If we get a mouse drag or release event when there is no mouse
* button held down (held_button == MOUSE_RELEASE), produce a K_IGNORE
* below.
* (can happen when you hold down two buttons and then let them go, or
* click in the menu bar, but not on a menu, and drag into the text).
*/
if ((mouse_code & MOUSE_DRAG) == MOUSE_DRAG)
is_drag = TRUE;
current_button = held_button;
}
else
{
if (wheel_code == 0)
{
# ifdef CHECK_DOUBLE_CLICK
# ifdef FEAT_MOUSE_GPM
/*
* Only for Unix, when GUI not active, we handle multi-clicks here, but
* not for GPM mouse events.
*/
# ifdef FEAT_GUI
if (key_name[0] != KS_GPM_MOUSE && !gui.in_use)
# else
if (key_name[0] != KS_GPM_MOUSE)
# endif
# else
# ifdef FEAT_GUI
if (!gui.in_use)
# endif
# endif
{
/*
* Compute the time elapsed since the previous mouse click.
*/
gettimeofday(&mouse_time, NULL);
if (orig_mouse_time.tv_sec == 0)
{
/*
* Avoid computing the difference between mouse_time
* and orig_mouse_time for the first click, as the
* difference would be huge and would cause
* multiplication overflow.
*/
timediff = p_mouset;
}
else
timediff = time_diff_ms(&orig_mouse_time, &mouse_time);
orig_mouse_time = mouse_time;
if (mouse_code == orig_mouse_code
&& timediff < p_mouset
&& orig_num_clicks != 4
&& orig_mouse_col == mouse_col
&& orig_mouse_row == mouse_row
&& (is_mouse_topline(curwin)
// Double click in tab pages line also works
// when window contents changes.
|| (mouse_row == 0 && firstwin->w_winrow > 0))
)
++orig_num_clicks;
else
orig_num_clicks = 1;
orig_mouse_col = mouse_col;
orig_mouse_row = mouse_row;
set_mouse_topline(curwin);
}
# if defined(FEAT_GUI) || defined(FEAT_MOUSE_GPM)
else
orig_num_clicks = NUM_MOUSE_CLICKS(mouse_code);
# endif
# else
orig_num_clicks = NUM_MOUSE_CLICKS(mouse_code);
# endif
is_click = TRUE;
}
orig_mouse_code = mouse_code;
}
if (!is_drag)
held_button = mouse_code & MOUSE_CLICK_MASK;
/*
* Translate the actual mouse event into a pseudo mouse event.
* First work out what modifiers are to be used.
*/
if (orig_mouse_code & MOUSE_SHIFT)
*modifiers |= MOD_MASK_SHIFT;
if (orig_mouse_code & MOUSE_CTRL)
*modifiers |= MOD_MASK_CTRL;
if (orig_mouse_code & MOUSE_ALT)
*modifiers |= MOD_MASK_ALT;
if (orig_num_clicks == 2)
*modifiers |= MOD_MASK_2CLICK;
else if (orig_num_clicks == 3)
*modifiers |= MOD_MASK_3CLICK;
else if (orig_num_clicks == 4)
*modifiers |= MOD_MASK_4CLICK;
// Work out our pseudo mouse event. Note that MOUSE_RELEASE gets added,
// then it's not mouse up/down.
key_name[0] = KS_EXTRA;
if (wheel_code != 0 && (!is_release || release_is_ambiguous))
{
if (wheel_code & MOUSE_CTRL)
*modifiers |= MOD_MASK_CTRL;
if (wheel_code & MOUSE_ALT)
*modifiers |= MOD_MASK_ALT;
if (wheel_code & 1 && wheel_code & 2)
key_name[1] = (int)KE_MOUSELEFT;
else if (wheel_code & 2)
key_name[1] = (int)KE_MOUSERIGHT;
else if (wheel_code & 1)
key_name[1] = (int)KE_MOUSEUP;
else
key_name[1] = (int)KE_MOUSEDOWN;
held_button = MOUSE_RELEASE;
}
else
key_name[1] = get_pseudo_mouse_code(current_button, is_click, is_drag);
// Make sure the mouse position is valid. Some terminals may return weird
// values.
if (mouse_col >= Columns)
mouse_col = Columns - 1;
if (mouse_row >= Rows)
mouse_row = Rows - 1;
return 0;
}
| 0
|
349,249
|
static int read_fragment_table(long long *table_start)
{
/*
* Note on overflow limits:
* Size of SBlk.s.fragments is 2^32 (unsigned int)
* Max size of bytes is 2^32*16 or 2^36
* Max indexes is (2^32*16)/8K or 2^23
* Max length is ((2^32*16)/8K)*8 or 2^26 or 64M
*/
int res, i;
long long bytes = SQUASHFS_FRAGMENT_BYTES_3((long long) sBlk.s.fragments);
int indexes = SQUASHFS_FRAGMENT_INDEXES_3((long long) sBlk.s.fragments);
int length = SQUASHFS_FRAGMENT_INDEX_BYTES_3((long long) sBlk.s.fragments);
long long *fragment_table_index;
/*
* The size of the index table (length bytes) should match the
* table start and end points
*/
if(length != (*table_start - sBlk.s.fragment_table_start)) {
ERROR("read_fragment_table: Bad fragment count in super block\n");
return FALSE;
}
TRACE("read_fragment_table: %d fragments, reading %d fragment indexes "
"from 0x%llx\n", sBlk.s.fragments, indexes,
sBlk.s.fragment_table_start);
fragment_table_index = alloc_index_table(indexes);
fragment_table = malloc(bytes);
if(fragment_table == NULL)
EXIT_UNSQUASH("read_fragment_table: failed to allocate "
"fragment table\n");
if(swap) {
long long *sfragment_table_index = salloc_index_table(indexes);
res = read_fs_bytes(fd, sBlk.s.fragment_table_start,
length, sfragment_table_index);
if(res == FALSE) {
ERROR("read_fragment_table: failed to read fragment "
"table index\n");
return FALSE;
}
SQUASHFS_SWAP_FRAGMENT_INDEXES_3(fragment_table_index,
sfragment_table_index, indexes);
} else {
res = read_fs_bytes(fd, sBlk.s.fragment_table_start,
length, fragment_table_index);
if(res == FALSE) {
ERROR("read_fragment_table: failed to read fragment "
"table index\n");
return FALSE;
}
}
for(i = 0; i < indexes; i++) {
int expected = (i + 1) != indexes ? SQUASHFS_METADATA_SIZE :
bytes & (SQUASHFS_METADATA_SIZE - 1);
int length = read_block(fd, fragment_table_index[i], NULL,
expected, ((char *) fragment_table) + ((long long) i *
SQUASHFS_METADATA_SIZE));
TRACE("Read fragment table block %d, from 0x%llx, length %d\n",
i, fragment_table_index[i], length);
if(length == FALSE) {
ERROR("read_fragment_table: failed to read fragment "
"table block\n");
return FALSE;
}
}
if(swap) {
squashfs_fragment_entry_3 sfragment;
for(i = 0; i < sBlk.s.fragments; i++) {
SQUASHFS_SWAP_FRAGMENT_ENTRY_3((&sfragment),
(&fragment_table[i]));
memcpy((char *) &fragment_table[i], (char *) &sfragment,
sizeof(squashfs_fragment_entry_3));
}
}
*table_start = fragment_table_index[0];
return TRUE;
}
| 0
|
336,515
|
static inline void openssl_global_init(void)
{
}
| 0
|
220,208
|
Node::Node()
: id_(-1),
cost_id_(-1),
class_(NC_UNINITIALIZED),
props_(nullptr),
assigned_device_name_index_(0),
while_ctx_(nullptr) {}
| 0
|
328,897
|
R_API char *r_bin_java_get_desc_from_cp_item_list(RList *cp_list, ut64 idx) {
/*
Given a constant pool object FieldRef, MethodRef, or InterfaceMethodRef
return the actual descriptor string.
@rvalue ut8* (user frees) or NULL
*/
RBinJavaCPTypeObj *obj = r_bin_java_get_item_from_cp_item_list (cp_list, idx);
if (!cp_list) {
return NULL;
}
return r_bin_java_get_item_desc_from_cp_item_list (cp_list, obj, MAX_CPITEMS);
}
| 0
|
498,116
|
const char *cgit_httpscheme(void)
{
if (ctx.env.https && !strcmp(ctx.env.https, "on"))
return "https://";
else
return "http://";
}
| 0
|
317,252
|
static int selinux_mmap_addr(unsigned long addr)
{
int rc = 0;
if (addr < CONFIG_LSM_MMAP_MIN_ADDR) {
u32 sid = current_sid();
rc = avc_has_perm(&selinux_state,
sid, sid, SECCLASS_MEMPROTECT,
MEMPROTECT__MMAP_ZERO, NULL);
}
return rc;
}
| 0
|
310,093
|
putp(const char *string)
{
return (tputs(string, 1, _nc_outch));
}
| 0
|
195,549
|
bool JSON_parser(Variant &z, const char *p, int length, bool const assoc,
int depth, int64_t options) {
// No GC safepoints during JSON parsing, please. Code is not re-entrant.
NoHandleSurpriseScope no_surprise(SafepointFlags);
json_parser *json = s_json_parser.get(); /* the parser state */
// Clear and reuse the thread-local string buffers. They are only freed if
// they exceed kMaxPersistentStringBufferCapacity at exit or if the thread
// is explicitly flushed (e.g., due to being idle).
json->initSb(length);
SCOPE_EXIT {
constexpr int kMaxPersistentStringBufferCapacity = 256 * 1024;
if (json->sb_cap > kMaxPersistentStringBufferCapacity) json->flushSb();
};
// SimpleParser only handles the most common set of options. Also, only use it
// if its array nesting depth check is *more* restrictive than what the user
// asks for, to ensure that the precise semantics of the general case is
// applied for all nesting overflows.
if (assoc &&
options == (options & (k_JSON_FB_LOOSE |
k_JSON_FB_DARRAYS |
k_JSON_FB_DARRAYS_AND_VARRAYS |
k_JSON_FB_HACK_ARRAYS |
k_JSON_FB_THRIFT_SIMPLE_JSON |
k_JSON_FB_LEGACY_HACK_ARRAYS)) &&
depth >= SimpleParser::kMaxArrayDepth &&
length <= RuntimeOption::EvalSimpleJsonMaxLength &&
SimpleParser::TryParse(p, length, json->tl_buffer.tv, z,
get_container_type_from_options(options),
options & k_JSON_FB_THRIFT_SIMPLE_JSON)) {
return true;
}
int b; /* the next character */
int c; /* the next character class */
int s; /* the next state */
int state = 0;
/*<fb>*/
bool const loose = options & k_JSON_FB_LOOSE;
JSONContainerType const container_type =
get_container_type_from_options(options);
int qchr = 0;
int8_t const *byte_class;
int8_t const (*next_state_table)[32];
if (loose) {
byte_class = loose_ascii_class;
next_state_table = loose_state_transition_table;
} else {
byte_class = ascii_class;
next_state_table = state_transition_table;
}
/*</fb>*/
UncheckedBuffer *buf = &json->sb_buf;
UncheckedBuffer *key = &json->sb_key;
DataType type = kInvalidDataType;
unsigned short escaped_bytes = 0;
auto reset_type = [&] { type = kInvalidDataType; };
json->depth = depth;
// Since the stack is maintainined on a per request basis, for performance
// reasons, it only makes sense to expand if necessary and cycles are wasted
// contracting. Calls with a depth other than default should be rare.
if (depth > json->stack.size()) {
json->stack.resize(depth);
}
SCOPE_EXIT {
if (json->stack.empty()) return;
for (int i = 0; i <= json->mark; i++) {
json->stack[i].key.reset();
json->stack[i].val.unset();
}
json->mark = -1;
};
json->mark = json->top = -1;
push(json, Mode::DONE);
UTF8To16Decoder decoder(p, length, loose);
for (;;) {
b = decoder.decode();
// Fast-case most common transition: append a simple string character.
if (state == 3 && type == KindOfString) {
while (b != '\"' && b != '\\' && b != '\'' && b <= 127 && b >= ' ') {
buf->append((char)b);
b = decoder.decode();
}
}
if (b == UTF8_END) break; // UTF-8 decoding finishes successfully.
if (b == UTF8_ERROR) {
s_json_parser->error_code = JSON_ERROR_UTF8;
return false;
}
assertx(b >= 0);
if ((b & 127) == b) {
/*<fb>*/
c = byte_class[b];
/*</fb>*/
if (c <= S_ERR) {
s_json_parser->error_code = JSON_ERROR_CTRL_CHAR;
return false;
}
} else {
c = S_ETC;
}
/*
Get the next state from the transition table.
*/
/*<fb>*/
s = next_state_table[state][c];
if (s == -4) {
if (b != qchr) {
s = 3;
} else {
qchr = 0;
}
}
/*</fb>*/
if (s < 0) {
/*
Perform one of the predefined actions.
*/
switch (s) {
/*
empty }
*/
case -9:
/*<fb>*/
if (json->top == 1) z = json->stack[json->top].val;
else {
/*</fb>*/
attach_zval(json, json->stack[json->top].key, assoc, container_type);
/*<fb>*/
}
/*</fb>*/
if (!pop(json, Mode::KEY)) {
return false;
}
state = 9;
break;
/*
{
*/
case -8:
if (!push(json, Mode::KEY)) {
s_json_parser->error_code = JSON_ERROR_DEPTH;
return false;
}
state = 1;
if (json->top > 0) {
Variant &top = json->stack[json->top].val;
/*<fb>*/
if (container_type == JSONContainerType::COLLECTIONS) {
// stable_maps is meaningless
top = req::make<c_Map>();
} else {
/*</fb>*/
if (!assoc) {
top = SystemLib::AllocStdClassObject();
/* <fb> */
} else if (container_type == JSONContainerType::HACK_ARRAYS) {
top = Array::CreateDict();
} else if (container_type == JSONContainerType::DARRAYS ||
container_type == JSONContainerType::DARRAYS_AND_VARRAYS)
{
top = Array::CreateDArray();
/* </fb> */
} else if (
container_type == JSONContainerType::LEGACY_HACK_ARRAYS) {
auto arr = staticEmptyDictArray()->copy();
arr->setLegacyArray(true);
top = arr;
} else {
top = Array::CreateDArray();
}
/*<fb>*/
}
/*</fb>*/
json->stack[json->top].key = copy_and_clear(*key);
reset_type();
}
break;
/*
}
*/
case -7:
/*** BEGIN Facebook: json_utf8_loose ***/
/*
If this is a trailing comma in an object definition,
we're in Mode::KEY. In that case, throw that off the
stack and restore Mode::OBJECT so that we pretend the
trailing comma just didn't happen.
*/
if (loose) {
if (pop(json, Mode::KEY)) {
push(json, Mode::OBJECT);
}
}
/*** END Facebook: json_utf8_loose ***/
if (type != kInvalidDataType &&
json->stack[json->top].mode == Mode::OBJECT) {
Variant mval;
json_create_zval(mval, *buf, type, options);
Variant &top = json->stack[json->top].val;
object_set(json, top, copy_and_clear(*key),
mval, assoc, container_type);
buf->clear();
reset_type();
}
/*<fb>*/
if (json->top == 1) z = json->stack[json->top].val;
else {
/*</fb>*/
attach_zval(json, json->stack[json->top].key,
assoc, container_type);
/*<fb>*/
}
/*</fb>*/
if (!pop(json, Mode::OBJECT)) {
s_json_parser->error_code = JSON_ERROR_STATE_MISMATCH;
return false;
}
state = 9;
break;
/*
[
*/
case -6:
if (!push(json, Mode::ARRAY)) {
s_json_parser->error_code = JSON_ERROR_DEPTH;
return false;
}
state = 2;
if (json->top > 0) {
Variant &top = json->stack[json->top].val;
/*<fb>*/
if (container_type == JSONContainerType::COLLECTIONS) {
top = req::make<c_Vector>();
} else if (container_type == JSONContainerType::HACK_ARRAYS) {
top = Array::CreateVec();
} else if (container_type == JSONContainerType::DARRAYS_AND_VARRAYS) {
top = Array::CreateVArray();
} else if (container_type == JSONContainerType::DARRAYS) {
top = Array::CreateDArray();
} else if (container_type == JSONContainerType::LEGACY_HACK_ARRAYS) {
auto arr = staticEmptyVecArray()->copy();
arr->setLegacyArray(true);
top = arr;
} else {
top = Array::CreateDArray();
}
/*</fb>*/
json->stack[json->top].key = copy_and_clear(*key);
reset_type();
}
break;
/*
]
*/
case -5:
{
if (type != kInvalidDataType &&
json->stack[json->top].mode == Mode::ARRAY) {
Variant mval;
json_create_zval(mval, *buf, type, options);
auto& top = json->stack[json->top].val;
if (container_type == JSONContainerType::COLLECTIONS) {
collections::append(top.getObjectData(), mval.asTypedValue());
} else {
top.asArrRef().append(mval);
}
buf->clear();
reset_type();
}
/*<fb>*/
if (json->top == 1) z = json->stack[json->top].val;
else {
/*</fb>*/
attach_zval(json, json->stack[json->top].key, assoc,
container_type);
/*<fb>*/
}
/*</fb>*/
if (!pop(json, Mode::ARRAY)) {
s_json_parser->error_code = JSON_ERROR_STATE_MISMATCH;
return false;
}
state = 9;
}
break;
/*
"
*/
case -4:
switch (json->stack[json->top].mode) {
case Mode::KEY:
state = 27;
std::swap(buf, key);
reset_type();
break;
case Mode::ARRAY:
case Mode::OBJECT:
state = 9;
break;
case Mode::DONE:
if (type == KindOfString) {
z = copy_and_clear(*buf);
state = 9;
break;
}
/* fall through if not KindOfString */
default:
s_json_parser->error_code = JSON_ERROR_SYNTAX;
return false;
}
break;
/*
,
*/
case -3:
{
Variant mval;
if (type != kInvalidDataType &&
(json->stack[json->top].mode == Mode::OBJECT ||
json->stack[json->top].mode == Mode::ARRAY)) {
json_create_zval(mval, *buf, type, options);
}
switch (json->stack[json->top].mode) {
case Mode::OBJECT:
if (pop(json, Mode::OBJECT) &&
push(json, Mode::KEY)) {
if (type != kInvalidDataType) {
Variant &top = json->stack[json->top].val;
object_set(
json,
top,
copy_and_clear(*key),
mval,
assoc,
container_type
);
}
state = 29;
}
break;
case Mode::ARRAY:
if (type != kInvalidDataType) {
auto& top = json->stack[json->top].val;
if (container_type == JSONContainerType::COLLECTIONS) {
collections::append(top.getObjectData(), mval.asTypedValue());
} else {
top.asArrRef().append(mval);
}
}
state = 28;
break;
default:
s_json_parser->error_code = JSON_ERROR_SYNTAX;
return false;
}
buf->clear();
reset_type();
check_non_safepoint_surprise();
}
break;
/*<fb>*/
/*
: (after unquoted string)
*/
case -10:
if (json->stack[json->top].mode == Mode::KEY) {
state = 27;
std::swap(buf, key);
reset_type();
s = -2;
} else {
s = 3;
break;
}
/*</fb>*/
/*
:
*/
case -2:
if (pop(json, Mode::KEY) && push(json, Mode::OBJECT)) {
state = 28;
break;
}
/*
syntax error
*/
case -1:
s_json_parser->error_code = JSON_ERROR_SYNTAX;
return false;
}
} else {
/*
Change the state and iterate.
*/
bool is_tsimplejson = options & k_JSON_FB_THRIFT_SIMPLE_JSON;
if (type == KindOfString) {
if (/*<fb>*/(/*</fb>*/s == 3/*<fb>*/ || s == 30)/*</fb>*/ &&
state != 8) {
if (state != 4) {
utf16_to_utf8(*buf, b);
} else {
switch (b) {
case 'b': buf->append('\b'); break;
case 't': buf->append('\t'); break;
case 'n': buf->append('\n'); break;
case 'f': buf->append('\f'); break;
case 'r': buf->append('\r'); break;
default:
utf16_to_utf8(*buf, b);
break;
}
}
} else if (s == 6) {
if (UNLIKELY(is_tsimplejson)) {
if (UNLIKELY(b != '0')) {
s_json_parser->error_code = JSON_ERROR_SYNTAX;
return false;
}
escaped_bytes = 0;
} else {
escaped_bytes = dehexchar(b) << 12;
}
} else if (s == 7) {
if (UNLIKELY(is_tsimplejson)) {
if (UNLIKELY(b != '0')) {
s_json_parser->error_code = JSON_ERROR_SYNTAX;
return false;
}
} else {
escaped_bytes += dehexchar(b) << 8;
}
} else if (s == 8) {
escaped_bytes += dehexchar(b) << 4;
} else if (s == 3 && state == 8) {
escaped_bytes += dehexchar(b);
if (UNLIKELY(is_tsimplejson)) {
buf->append((char)escaped_bytes);
} else {
utf16_to_utf8(*buf, escaped_bytes);
}
}
} else if ((type == kInvalidDataType || type == KindOfNull) &&
(c == S_DIG || c == S_ZER)) {
type = KindOfInt64;
buf->append((char)b);
} else if (type == KindOfInt64 && s == 24) {
type = KindOfDouble;
buf->append((char)b);
} else if ((type == kInvalidDataType || type == KindOfNull ||
type == KindOfInt64) &&
c == S_DOT) {
type = KindOfDouble;
buf->append((char)b);
} else if (type != KindOfString && c == S_QUO) {
type = KindOfString;
/*<fb>*/qchr = b;/*</fb>*/
} else if ((type == kInvalidDataType || type == KindOfNull ||
type == KindOfInt64 || type == KindOfDouble) &&
((state == 12 && s == 9) ||
(state == 16 && s == 9))) {
type = KindOfBoolean;
} else if (type == kInvalidDataType && state == 19 && s == 9) {
type = KindOfNull;
} else if (type != KindOfString && c > S_WSP) {
utf16_to_utf8(*buf, b);
}
state = s;
}
}
if (state == 9 && pop(json, Mode::DONE)) {
s_json_parser->error_code = JSON_ERROR_NONE;
return true;
}
s_json_parser->error_code = JSON_ERROR_SYNTAX;
return false;
}
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.