idx
int64 | func
string | target
int64 |
|---|---|---|
398,534
|
RZ_API void rz_bin_dwarf_line_header_free_file_cache(const RzBinDwarfLineHeader *hdr, RzBinDwarfLineFileCache fnc) {
if (!fnc) {
return;
}
for (size_t i = 0; i < hdr->file_names_count; i++) {
free(fnc[i]);
}
free(fnc);
}
| 0
|
291,793
|
int rtrs_clt_reconnect_from_sysfs(struct rtrs_clt_path *clt_path)
{
enum rtrs_clt_state old_state;
int err = -EBUSY;
bool changed;
changed = rtrs_clt_change_state_get_old(clt_path,
RTRS_CLT_RECONNECTING,
&old_state);
if (changed) {
clt_path->reconnect_attempts = 0;
queue_delayed_work(rtrs_wq, &clt_path->reconnect_dwork, 0);
}
if (changed || old_state == RTRS_CLT_RECONNECTING) {
/*
* flush_delayed_work() queues pending work for immediate
* execution, so do the flush if we have queued something
* right now or work is pending.
*/
flush_delayed_work(&clt_path->reconnect_dwork);
err = (READ_ONCE(clt_path->state) ==
RTRS_CLT_CONNECTED ? 0 : -ENOTCONN);
}
return err;
}
| 0
|
512,481
|
longlong Item_func_bit_and::val_int()
{
DBUG_ASSERT(fixed == 1);
ulonglong arg1= (ulonglong) args[0]->val_int();
if (args[0]->null_value)
{
null_value=1; /* purecov: inspected */
return 0; /* purecov: inspected */
}
ulonglong arg2= (ulonglong) args[1]->val_int();
if (args[1]->null_value)
{
null_value=1; /* purecov: inspected */
return 0; /* purecov: inspected */
}
null_value=0;
return (longlong) (arg1 & arg2);
}
| 0
|
404,713
|
SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
{
return ksys_dup3(oldfd, newfd, flags);
}
| 0
|
196,726
|
njs_array_prototype_sort(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs,
njs_index_t unused)
{
int64_t i, und, len, nlen, length;
njs_int_t ret, fast_path;
njs_array_t *array;
njs_value_t *this, *comparefn, *start, *strings;
njs_array_sort_ctx_t ctx;
njs_array_sort_slot_t *p, *end, *slots, *nslots;
comparefn = njs_arg(args, nargs, 1);
if (njs_is_defined(comparefn)) {
if (njs_slow_path(!njs_is_function(comparefn))) {
njs_type_error(vm, "comparefn must be callable or undefined");
return NJS_ERROR;
}
ctx.function = njs_function(comparefn);
} else {
ctx.function = NULL;
}
this = njs_argument(args, 0);
ret = njs_value_to_object(vm, this);
if (njs_slow_path(ret != NJS_OK)) {
return ret;
}
ret = njs_value_length(vm, this, &length);
if (njs_slow_path(ret != NJS_OK)) {
return ret;
}
if (njs_slow_path(length < 2)) {
vm->retval = *this;
return NJS_OK;
}
slots = NULL;
ctx.vm = vm;
ctx.strings.separate = 0;
ctx.strings.pointer = 0;
ctx.exception = 0;
fast_path = njs_is_fast_array(this);
if (njs_fast_path(fast_path)) {
array = njs_array(this);
start = array->start;
slots = njs_mp_alloc(vm->mem_pool,
sizeof(njs_array_sort_slot_t) * length);
if (njs_slow_path(slots == NULL)) {
return NJS_ERROR;
}
und = 0;
p = slots;
for (i = 0; i < length; i++) {
if (njs_slow_path(!njs_is_valid(&start[i]))) {
fast_path = 0;
njs_mp_free(vm->mem_pool, slots);
slots = NULL;
goto slow_path;
}
if (njs_slow_path(njs_is_undefined(&start[i]))) {
und++;
continue;
}
p->value = start[i];
p->pos = i;
p->str = NULL;
p++;
}
len = p - slots;
} else {
slow_path:
und = 0;
p = NULL;
end = NULL;
for (i = 0; i < length; i++) {
if (p >= end) {
nlen = njs_min(njs_max((p - slots) * 2, 8), length);
nslots = njs_mp_alloc(vm->mem_pool,
sizeof(njs_array_sort_slot_t) * nlen);
if (njs_slow_path(nslots == NULL)) {
njs_memory_error(vm);
return NJS_ERROR;
}
if (slots != NULL) {
p = (void *) njs_cpymem(nslots, slots,
sizeof(njs_array_sort_slot_t) * (p - slots));
njs_mp_free(vm->mem_pool, slots);
} else {
p = nslots;
}
slots = nslots;
end = slots + nlen;
}
ret = njs_value_property_i64(vm, this, i, &p->value);
if (njs_slow_path(ret == NJS_ERROR)) {
ret = NJS_ERROR;
goto exception;
}
if (ret == NJS_DECLINED) {
continue;
}
if (njs_is_undefined(&p->value)) {
und++;
continue;
}
p->pos = i;
p->str = NULL;
p++;
}
len = p - slots;
}
strings = njs_arr_init(vm->mem_pool, &ctx.strings, NULL, len + 1,
sizeof(njs_value_t));
if (njs_slow_path(strings == NULL)) {
ret = NJS_ERROR;
goto exception;
}
njs_qsort(slots, len, sizeof(njs_array_sort_slot_t), njs_array_compare,
&ctx);
if (ctx.exception) {
ret = NJS_ERROR;
goto exception;
}
if (njs_fast_path(fast_path)) {
array = njs_array(this);
start = array->start;
for (i = 0; i < len; i++) {
start[i] = slots[i].value;
}
for (i = len; und-- > 0; i++) {
start[i] = njs_value_undefined;
}
} else {
for (i = 0; i < len; i++) {
if (slots[i].pos != i) {
ret = njs_value_property_i64_set(vm, this, i, &slots[i].value);
if (njs_slow_path(ret == NJS_ERROR)) {
goto exception;
}
}
}
for (i = len; und-- > 0; i++) {
ret = njs_value_property_i64_set(vm, this, i,
njs_value_arg(&njs_value_undefined));
if (njs_slow_path(ret == NJS_ERROR)) {
goto exception;
}
}
for (; i < length; i++) {
ret = njs_value_property_i64_delete(vm, this, i, NULL);
if (njs_slow_path(ret == NJS_ERROR)) {
goto exception;
}
}
}
vm->retval = *this;
ret = NJS_OK;
exception:
if (slots != NULL) {
njs_mp_free(vm->mem_pool, slots);
}
njs_arr_destroy(&ctx.strings);
return ret;
}
| 1
|
233,851
|
void fmtutil_handle_plist(deark *c, dbuf *f, i64 pos, i64 len,
de_finfo *fi, unsigned int flags)
{
if(de_get_ext_option_bool(c, "extractplist", 0)) {
dbuf_create_file_from_slice(f, pos, len,
fi?NULL:"plist", fi, DE_CREATEFLAG_IS_AUX);
return;
}
de_run_module_by_id_on_slice(c, "plist", NULL, f, pos, len);
}
| 0
|
473,833
|
us_ascii_mbc_enc_len(const UChar* p, const UChar* e, OnigEncoding enc)
{
if (*p & 0x80)
return ONIGENC_CONSTRUCT_MBCLEN_INVALID();
return ONIGENC_CONSTRUCT_MBCLEN_CHARFOUND(1);
}
| 0
|
244,248
|
GF_Err hnti_box_read(GF_Box *s, GF_BitStream *bs)
{
return gf_isom_box_array_read(s, bs);
}
| 0
|
446,410
|
static void rebase_info1_free(RzDyldRebaseInfo1 *rebase_info) {
if (!rebase_info) {
return;
}
free(rebase_info->toc);
free(rebase_info->entries);
free(rebase_info);
}
| 0
|
459,029
|
http_isfiltered(const struct http *fm, unsigned u, unsigned how)
{
const char *e;
const struct http_hdrflg *f;
if (fm->hdf[u] & HDF_FILTER)
return (1);
if (u < HTTP_HDR_FIRST)
return (0);
e = strchr(fm->hd[u].b, ':');
if (e == NULL)
return (0);
f = http_hdr_flags(fm->hd[u].b, e);
return (f != NULL && f->flag & how);
}
| 0
|
273,932
|
static void handle_MLSD(ctrl_t *ctrl, char *arg)
{
list(ctrl, arg, 3);
}
| 0
|
273,106
|
clock_gettime(clockid_t clock_id, struct timespec *tp)
{
static int clock_init = 0;
static clock_serv_t clock;
mach_timespec_t mts;
int ret;
if (! clock_init) {
clock_init = 1;
if (host_get_clock_service(mach_host_self(), CALENDAR_CLOCK, &clock))
abort(); /* unlikely */
}
if(! tp)
return -1;
switch (clock_id) {
case CLOCK_REALTIME:
/* query mach for calendar time */
ret = clock_get_time(clock, &mts);
if (! ret) {
tp->tv_sec = mts.tv_sec;
tp->tv_nsec = mts.tv_nsec;
}
break;
case CLOCK_MONOTONIC:
/* query mach for monotinic time */
ret = clock_get_time(clock_port, &mts);
if (! ret) {
tp->tv_sec = mts.tv_sec;
tp->tv_nsec = mts.tv_nsec;
}
break;
default:
ret = -1;
break;
}
return ret;
}
| 0
|
333,086
|
nfa_regatom(void)
{
int c;
int charclass;
int equiclass;
int collclass;
int got_coll_char;
char_u *p;
char_u *endp;
char_u *old_regparse = regparse;
int extra = 0;
int emit_range;
int negated;
int result;
int startc = -1;
int save_prev_at_start = prev_at_start;
c = getchr();
switch (c)
{
case NUL:
EMSG_RET_FAIL(_(e_nul_found));
case Magic('^'):
EMIT(NFA_BOL);
break;
case Magic('$'):
EMIT(NFA_EOL);
#if defined(FEAT_SYN_HL) || defined(PROTO)
had_eol = TRUE;
#endif
break;
case Magic('<'):
EMIT(NFA_BOW);
break;
case Magic('>'):
EMIT(NFA_EOW);
break;
case Magic('_'):
c = no_Magic(getchr());
if (c == NUL)
EMSG_RET_FAIL(_(e_nul_found));
if (c == '^') // "\_^" is start-of-line
{
EMIT(NFA_BOL);
break;
}
if (c == '$') // "\_$" is end-of-line
{
EMIT(NFA_EOL);
#if defined(FEAT_SYN_HL) || defined(PROTO)
had_eol = TRUE;
#endif
break;
}
extra = NFA_ADD_NL;
// "\_[" is collection plus newline
if (c == '[')
goto collection;
// "\_x" is character class plus newline
// FALLTHROUGH
/*
* Character classes.
*/
case Magic('.'):
case Magic('i'):
case Magic('I'):
case Magic('k'):
case Magic('K'):
case Magic('f'):
case Magic('F'):
case Magic('p'):
case Magic('P'):
case Magic('s'):
case Magic('S'):
case Magic('d'):
case Magic('D'):
case Magic('x'):
case Magic('X'):
case Magic('o'):
case Magic('O'):
case Magic('w'):
case Magic('W'):
case Magic('h'):
case Magic('H'):
case Magic('a'):
case Magic('A'):
case Magic('l'):
case Magic('L'):
case Magic('u'):
case Magic('U'):
p = vim_strchr(classchars, no_Magic(c));
if (p == NULL)
{
if (extra == NFA_ADD_NL)
{
semsg(_(e_ill_char_class), c);
rc_did_emsg = TRUE;
return FAIL;
}
siemsg("INTERNAL: Unknown character class char: %d", c);
return FAIL;
}
// When '.' is followed by a composing char ignore the dot, so that
// the composing char is matched here.
if (enc_utf8 && c == Magic('.') && utf_iscomposing(peekchr()))
{
old_regparse = regparse;
c = getchr();
goto nfa_do_multibyte;
}
EMIT(nfa_classcodes[p - classchars]);
if (extra == NFA_ADD_NL)
{
EMIT(NFA_NEWL);
EMIT(NFA_OR);
regflags |= RF_HASNL;
}
break;
case Magic('n'):
if (reg_string)
// In a string "\n" matches a newline character.
EMIT(NL);
else
{
// In buffer text "\n" matches the end of a line.
EMIT(NFA_NEWL);
regflags |= RF_HASNL;
}
break;
case Magic('('):
if (nfa_reg(REG_PAREN) == FAIL)
return FAIL; // cascaded error
break;
case Magic('|'):
case Magic('&'):
case Magic(')'):
semsg(_(e_misplaced), no_Magic(c));
return FAIL;
case Magic('='):
case Magic('?'):
case Magic('+'):
case Magic('@'):
case Magic('*'):
case Magic('{'):
// these should follow an atom, not form an atom
semsg(_(e_misplaced), no_Magic(c));
return FAIL;
case Magic('~'):
{
char_u *lp;
// Previous substitute pattern.
// Generated as "\%(pattern\)".
if (reg_prev_sub == NULL)
{
emsg(_(e_no_previous_substitute_regular_expression));
return FAIL;
}
for (lp = reg_prev_sub; *lp != NUL; MB_CPTR_ADV(lp))
{
EMIT(PTR2CHAR(lp));
if (lp != reg_prev_sub)
EMIT(NFA_CONCAT);
}
EMIT(NFA_NOPEN);
break;
}
case Magic('1'):
case Magic('2'):
case Magic('3'):
case Magic('4'):
case Magic('5'):
case Magic('6'):
case Magic('7'):
case Magic('8'):
case Magic('9'):
{
int refnum = no_Magic(c) - '1';
if (!seen_endbrace(refnum + 1))
return FAIL;
EMIT(NFA_BACKREF1 + refnum);
rex.nfa_has_backref = TRUE;
}
break;
case Magic('z'):
c = no_Magic(getchr());
switch (c)
{
case 's':
EMIT(NFA_ZSTART);
if (re_mult_next("\\zs") == FAIL)
return FAIL;
break;
case 'e':
EMIT(NFA_ZEND);
rex.nfa_has_zend = TRUE;
if (re_mult_next("\\ze") == FAIL)
return FAIL;
break;
#ifdef FEAT_SYN_HL
case '1':
case '2':
case '3':
case '4':
case '5':
case '6':
case '7':
case '8':
case '9':
// \z1...\z9
if ((reg_do_extmatch & REX_USE) == 0)
EMSG_RET_FAIL(_(e_z1_not_allowed));
EMIT(NFA_ZREF1 + (no_Magic(c) - '1'));
// No need to set rex.nfa_has_backref, the sub-matches don't
// change when \z1 .. \z9 matches or not.
re_has_z = REX_USE;
break;
case '(':
// \z(
if ((reg_do_extmatch & REX_SET) == 0)
EMSG_RET_FAIL(_(e_z_not_allowed));
if (nfa_reg(REG_ZPAREN) == FAIL)
return FAIL; // cascaded error
re_has_z = REX_SET;
break;
#endif
default:
semsg(_("E867: (NFA) Unknown operator '\\z%c'"),
no_Magic(c));
return FAIL;
}
break;
case Magic('%'):
c = no_Magic(getchr());
switch (c)
{
// () without a back reference
case '(':
if (nfa_reg(REG_NPAREN) == FAIL)
return FAIL;
EMIT(NFA_NOPEN);
break;
case 'd': // %d123 decimal
case 'o': // %o123 octal
case 'x': // %xab hex 2
case 'u': // %uabcd hex 4
case 'U': // %U1234abcd hex 8
{
long nr;
switch (c)
{
case 'd': nr = getdecchrs(); break;
case 'o': nr = getoctchrs(); break;
case 'x': nr = gethexchrs(2); break;
case 'u': nr = gethexchrs(4); break;
case 'U': nr = gethexchrs(8); break;
default: nr = -1; break;
}
if (nr < 0 || nr > INT_MAX)
EMSG2_RET_FAIL(
_("E678: Invalid character after %s%%[dxouU]"),
reg_magic == MAGIC_ALL);
// A NUL is stored in the text as NL
// TODO: what if a composing character follows?
EMIT(nr == 0 ? 0x0a : nr);
}
break;
// Catch \%^ and \%$ regardless of where they appear in the
// pattern -- regardless of whether or not it makes sense.
case '^':
EMIT(NFA_BOF);
break;
case '$':
EMIT(NFA_EOF);
break;
case '#':
EMIT(NFA_CURSOR);
break;
case 'V':
EMIT(NFA_VISUAL);
break;
case 'C':
EMIT(NFA_ANY_COMPOSING);
break;
case '[':
{
int n;
// \%[abc]
for (n = 0; (c = peekchr()) != ']'; ++n)
{
if (c == NUL)
EMSG2_RET_FAIL(_(e_missing_sb),
reg_magic == MAGIC_ALL);
// recursive call!
if (nfa_regatom() == FAIL)
return FAIL;
}
getchr(); // get the ]
if (n == 0)
EMSG2_RET_FAIL(_(e_empty_sb),
reg_magic == MAGIC_ALL);
EMIT(NFA_OPT_CHARS);
EMIT(n);
// Emit as "\%(\%[abc]\)" to be able to handle
// "\%[abc]*" which would cause the empty string to be
// matched an unlimited number of times. NFA_NOPEN is
// added only once at a position, while NFA_SPLIT is
// added multiple times. This is more efficient than
// not allowing NFA_SPLIT multiple times, it is used
// a lot.
EMIT(NFA_NOPEN);
break;
}
default:
{
long_u n = 0;
int cmp = c;
int cur = FALSE;
if (c == '<' || c == '>')
c = getchr();
if (no_Magic(c) == '.')
{
cur = TRUE;
c = getchr();
}
while (VIM_ISDIGIT(c))
{
long_u tmp;
if (cur)
semsg(_(e_regexp_number_after_dot_pos_search),
no_Magic(c));
tmp = n * 10 + (c - '0');
if (tmp < n)
{
// overflow.
emsg(_(e_value_too_large));
return FAIL;
}
n = tmp;
c = getchr();
}
if (c == 'l' || c == 'c' || c == 'v')
{
long_u limit = INT_MAX;
if (c == 'l')
{
if (cur)
n = curwin->w_cursor.lnum;
// \%{n}l \%{n}<l \%{n}>l
EMIT(cmp == '<' ? NFA_LNUM_LT :
cmp == '>' ? NFA_LNUM_GT : NFA_LNUM);
if (save_prev_at_start)
at_start = TRUE;
}
else if (c == 'c')
{
if (cur)
{
n = curwin->w_cursor.col;
n++;
}
// \%{n}c \%{n}<c \%{n}>c
EMIT(cmp == '<' ? NFA_COL_LT :
cmp == '>' ? NFA_COL_GT : NFA_COL);
}
else
{
if (cur)
{
colnr_T vcol = 0;
getvvcol(curwin, &curwin->w_cursor,
NULL, NULL, &vcol);
n = ++vcol;
}
// \%{n}v \%{n}<v \%{n}>v
EMIT(cmp == '<' ? NFA_VCOL_LT :
cmp == '>' ? NFA_VCOL_GT : NFA_VCOL);
limit = INT_MAX / MB_MAXBYTES;
}
if (n >= limit)
{
emsg(_(e_value_too_large));
return FAIL;
}
EMIT((int)n);
break;
}
else if (c == '\'' && n == 0)
{
// \%'m \%<'m \%>'m
EMIT(cmp == '<' ? NFA_MARK_LT :
cmp == '>' ? NFA_MARK_GT : NFA_MARK);
EMIT(getchr());
break;
}
}
semsg(_("E867: (NFA) Unknown operator '\\%%%c'"),
no_Magic(c));
return FAIL;
}
break;
case Magic('['):
collection:
/*
* [abc] uses NFA_START_COLL - NFA_END_COLL
* [^abc] uses NFA_START_NEG_COLL - NFA_END_NEG_COLL
* Each character is produced as a regular state, using
* NFA_CONCAT to bind them together.
* Besides normal characters there can be:
* - character classes NFA_CLASS_*
* - ranges, two characters followed by NFA_RANGE.
*/
p = regparse;
endp = skip_anyof(p);
if (*endp == ']')
{
/*
* Try to reverse engineer character classes. For example,
* recognize that [0-9] stands for \d and [A-Za-z_] for \h,
* and perform the necessary substitutions in the NFA.
*/
result = nfa_recognize_char_class(regparse, endp,
extra == NFA_ADD_NL);
if (result != FAIL)
{
if (result >= NFA_FIRST_NL && result <= NFA_LAST_NL)
{
EMIT(result - NFA_ADD_NL);
EMIT(NFA_NEWL);
EMIT(NFA_OR);
}
else
EMIT(result);
regparse = endp;
MB_PTR_ADV(regparse);
return OK;
}
/*
* Failed to recognize a character class. Use the simple
* version that turns [abc] into 'a' OR 'b' OR 'c'
*/
startc = -1;
negated = FALSE;
if (*regparse == '^') // negated range
{
negated = TRUE;
MB_PTR_ADV(regparse);
EMIT(NFA_START_NEG_COLL);
}
else
EMIT(NFA_START_COLL);
if (*regparse == '-')
{
startc = '-';
EMIT(startc);
EMIT(NFA_CONCAT);
MB_PTR_ADV(regparse);
}
// Emit the OR branches for each character in the []
emit_range = FALSE;
while (regparse < endp)
{
int oldstartc = startc;
startc = -1;
got_coll_char = FALSE;
if (*regparse == '[')
{
// Check for [: :], [= =], [. .]
equiclass = collclass = 0;
charclass = get_char_class(®parse);
if (charclass == CLASS_NONE)
{
equiclass = get_equi_class(®parse);
if (equiclass == 0)
collclass = get_coll_element(®parse);
}
// Character class like [:alpha:]
if (charclass != CLASS_NONE)
{
switch (charclass)
{
case CLASS_ALNUM:
EMIT(NFA_CLASS_ALNUM);
break;
case CLASS_ALPHA:
EMIT(NFA_CLASS_ALPHA);
break;
case CLASS_BLANK:
EMIT(NFA_CLASS_BLANK);
break;
case CLASS_CNTRL:
EMIT(NFA_CLASS_CNTRL);
break;
case CLASS_DIGIT:
EMIT(NFA_CLASS_DIGIT);
break;
case CLASS_GRAPH:
EMIT(NFA_CLASS_GRAPH);
break;
case CLASS_LOWER:
wants_nfa = TRUE;
EMIT(NFA_CLASS_LOWER);
break;
case CLASS_PRINT:
EMIT(NFA_CLASS_PRINT);
break;
case CLASS_PUNCT:
EMIT(NFA_CLASS_PUNCT);
break;
case CLASS_SPACE:
EMIT(NFA_CLASS_SPACE);
break;
case CLASS_UPPER:
wants_nfa = TRUE;
EMIT(NFA_CLASS_UPPER);
break;
case CLASS_XDIGIT:
EMIT(NFA_CLASS_XDIGIT);
break;
case CLASS_TAB:
EMIT(NFA_CLASS_TAB);
break;
case CLASS_RETURN:
EMIT(NFA_CLASS_RETURN);
break;
case CLASS_BACKSPACE:
EMIT(NFA_CLASS_BACKSPACE);
break;
case CLASS_ESCAPE:
EMIT(NFA_CLASS_ESCAPE);
break;
case CLASS_IDENT:
EMIT(NFA_CLASS_IDENT);
break;
case CLASS_KEYWORD:
EMIT(NFA_CLASS_KEYWORD);
break;
case CLASS_FNAME:
EMIT(NFA_CLASS_FNAME);
break;
}
EMIT(NFA_CONCAT);
continue;
}
// Try equivalence class [=a=] and the like
if (equiclass != 0)
{
result = nfa_emit_equi_class(equiclass);
if (result == FAIL)
{
// should never happen
EMSG_RET_FAIL(_("E868: Error building NFA with equivalence class!"));
}
continue;
}
// Try collating class like [. .]
if (collclass != 0)
{
startc = collclass; // allow [.a.]-x as a range
// Will emit the proper atom at the end of the
// while loop.
}
}
// Try a range like 'a-x' or '\t-z'. Also allows '-' as a
// start character.
if (*regparse == '-' && oldstartc != -1)
{
emit_range = TRUE;
startc = oldstartc;
MB_PTR_ADV(regparse);
continue; // reading the end of the range
}
// Now handle simple and escaped characters.
// Only "\]", "\^", "\]" and "\\" are special in Vi. Vim
// accepts "\t", "\e", etc., but only when the 'l' flag in
// 'cpoptions' is not included.
// Posix doesn't recognize backslash at all.
if (*regparse == '\\'
&& !reg_cpo_bsl
&& regparse + 1 <= endp
&& (vim_strchr(REGEXP_INRANGE, regparse[1]) != NULL
|| (!reg_cpo_lit
&& vim_strchr(REGEXP_ABBR, regparse[1])
!= NULL)
)
)
{
MB_PTR_ADV(regparse);
if (*regparse == 'n')
startc = (reg_string || emit_range
|| regparse[1] == '-') ? NL : NFA_NEWL;
else if (*regparse == 'd'
|| *regparse == 'o'
|| *regparse == 'x'
|| *regparse == 'u'
|| *regparse == 'U'
)
{
// TODO(RE) This needs more testing
startc = coll_get_char();
got_coll_char = TRUE;
MB_PTR_BACK(old_regparse, regparse);
}
else
{
// \r,\t,\e,\b
startc = backslash_trans(*regparse);
}
}
// Normal printable char
if (startc == -1)
startc = PTR2CHAR(regparse);
// Previous char was '-', so this char is end of range.
if (emit_range)
{
int endc = startc;
startc = oldstartc;
if (startc > endc)
EMSG_RET_FAIL(_(e_reverse_range));
if (endc > startc + 2)
{
// Emit a range instead of the sequence of
// individual characters.
if (startc == 0)
// \x00 is translated to \x0a, start at \x01.
EMIT(1);
else
--post_ptr; // remove NFA_CONCAT
EMIT(endc);
EMIT(NFA_RANGE);
EMIT(NFA_CONCAT);
}
else if (has_mbyte && ((*mb_char2len)(startc) > 1
|| (*mb_char2len)(endc) > 1))
{
// Emit the characters in the range.
// "startc" was already emitted, so skip it.
//
for (c = startc + 1; c <= endc; c++)
{
EMIT(c);
EMIT(NFA_CONCAT);
}
}
else
{
#ifdef EBCDIC
int alpha_only = FALSE;
// for alphabetical range skip the gaps
// 'i'-'j', 'r'-'s', 'I'-'J' and 'R'-'S'.
if (isalpha(startc) && isalpha(endc))
alpha_only = TRUE;
#endif
// Emit the range. "startc" was already emitted, so
// skip it.
for (c = startc + 1; c <= endc; c++)
#ifdef EBCDIC
if (!alpha_only || isalpha(startc))
#endif
{
EMIT(c);
EMIT(NFA_CONCAT);
}
}
emit_range = FALSE;
startc = -1;
}
else
{
// This char (startc) is not part of a range. Just
// emit it.
// Normally, simply emit startc. But if we get char
// code=0 from a collating char, then replace it with
// 0x0a.
// This is needed to completely mimic the behaviour of
// the backtracking engine.
if (startc == NFA_NEWL)
{
// Line break can't be matched as part of the
// collection, add an OR below. But not for negated
// range.
if (!negated)
extra = NFA_ADD_NL;
}
else
{
if (got_coll_char == TRUE && startc == 0)
EMIT(0x0a);
else
EMIT(startc);
EMIT(NFA_CONCAT);
}
}
MB_PTR_ADV(regparse);
} // while (p < endp)
MB_PTR_BACK(old_regparse, regparse);
if (*regparse == '-') // if last, '-' is just a char
{
EMIT('-');
EMIT(NFA_CONCAT);
}
// skip the trailing ]
regparse = endp;
MB_PTR_ADV(regparse);
// Mark end of the collection.
if (negated == TRUE)
EMIT(NFA_END_NEG_COLL);
else
EMIT(NFA_END_COLL);
// \_[] also matches \n but it's not negated
if (extra == NFA_ADD_NL)
{
EMIT(reg_string ? NL : NFA_NEWL);
EMIT(NFA_OR);
}
return OK;
} // if exists closing ]
if (reg_strict)
EMSG_RET_FAIL(_(e_missingbracket));
// FALLTHROUGH
default:
{
int plen;
nfa_do_multibyte:
// plen is length of current char with composing chars
if (enc_utf8 && ((*mb_char2len)(c)
!= (plen = utfc_ptr2len(old_regparse))
|| utf_iscomposing(c)))
{
int i = 0;
// A base character plus composing characters, or just one
// or more composing characters.
// This requires creating a separate atom as if enclosing
// the characters in (), where NFA_COMPOSING is the ( and
// NFA_END_COMPOSING is the ). Note that right now we are
// building the postfix form, not the NFA itself;
// a composing char could be: a, b, c, NFA_COMPOSING
// where 'b' and 'c' are chars with codes > 256.
for (;;)
{
EMIT(c);
if (i > 0)
EMIT(NFA_CONCAT);
if ((i += utf_char2len(c)) >= plen)
break;
c = utf_ptr2char(old_regparse + i);
}
EMIT(NFA_COMPOSING);
regparse = old_regparse + plen;
}
else
{
c = no_Magic(c);
EMIT(c);
}
return OK;
}
}
return OK;
}
| 0
|
387,581
|
static int snd_ctl_elem_list_user(struct snd_card *card,
struct snd_ctl_elem_list __user *_list)
{
struct snd_ctl_elem_list list;
int err;
if (copy_from_user(&list, _list, sizeof(list)))
return -EFAULT;
err = snd_ctl_elem_list(card, &list);
if (err)
return err;
if (copy_to_user(_list, &list, sizeof(list)))
return -EFAULT;
return 0;
}
| 0
|
292,218
|
inbound_cap_ack (server *serv, char *nick, char *extensions,
const message_tags_data *tags_data)
{
EMIT_SIGNAL_TIMESTAMP (XP_TE_CAPACK, serv->server_session, nick, extensions,
NULL, NULL, 0, tags_data->timestamp);
if (strstr (extensions, "identify-msg") != NULL)
{
serv->have_idmsg = TRUE;
}
if (strstr (extensions, "multi-prefix") != NULL)
{
serv->have_namesx = TRUE;
}
if (strstr (extensions, "away-notify") != NULL)
{
serv->have_awaynotify = TRUE;
}
if (strstr (extensions, "account-notify") != NULL)
{
serv->have_accnotify = TRUE;
}
if (strstr (extensions, "extended-join") != NULL)
{
serv->have_extjoin = TRUE;
}
if (strstr (extensions, "userhost-in-names") != NULL)
{
serv->have_uhnames = TRUE;
}
if (strstr (extensions, "server-time") != NULL)
{
serv->have_server_time = TRUE;
}
if (strstr (extensions, "sasl") != NULL)
{
serv->have_sasl = TRUE;
serv->sent_saslauth = FALSE;
#ifdef USE_OPENSSL
if (serv->loginmethod == LOGIN_SASLEXTERNAL)
{
serv->sasl_mech = MECH_EXTERNAL;
tcp_send_len (serv, "AUTHENTICATE EXTERNAL\r\n", 23);
}
else
{
/* default to most secure, it will fallback if not supported */
serv->sasl_mech = MECH_AES;
tcp_send_len (serv, "AUTHENTICATE DH-AES\r\n", 21);
}
#else
serv->sasl_mech = MECH_PLAIN;
tcp_send_len (serv, "AUTHENTICATE PLAIN\r\n", 20);
#endif
}
}
| 0
|
235,251
|
static bool check_buffer(struct torture_context *tctx,
uint8_t *buf, unsigned int seed, int len, const char *location)
{
int i;
srandom(seed);
for (i=0;i<len;i++) {
uint8_t v = random();
if (buf[i] != v) {
torture_fail(tctx, talloc_asprintf(tctx, "Buffer incorrect at %s! ofs=%d buf=0x%x correct=0x%x\n",
location, i, buf[i], v));
return false;
}
}
return true;
}
| 0
|
391,670
|
NTSTATUS smbd_calculate_access_mask(connection_struct *conn,
const struct smb_filename *smb_fname,
bool use_privs,
uint32_t access_mask,
uint32_t *access_mask_out)
{
NTSTATUS status;
uint32_t orig_access_mask = access_mask;
uint32_t rejected_share_access;
/*
* Convert GENERIC bits to specific bits.
*/
se_map_generic(&access_mask, &file_generic_mapping);
/* Calculate MAXIMUM_ALLOWED_ACCESS if requested. */
if (access_mask & MAXIMUM_ALLOWED_ACCESS) {
status = smbd_calculate_maximum_allowed_access(
conn, smb_fname, use_privs, &access_mask);
if (!NT_STATUS_IS_OK(status)) {
return status;
}
access_mask &= conn->share_access;
}
rejected_share_access = access_mask & ~(conn->share_access);
if (rejected_share_access) {
DEBUG(10, ("smbd_calculate_access_mask: Access denied on "
"file %s: rejected by share access mask[0x%08X] "
"orig[0x%08X] mapped[0x%08X] reject[0x%08X]\n",
smb_fname_str_dbg(smb_fname),
conn->share_access,
orig_access_mask, access_mask,
rejected_share_access));
return NT_STATUS_ACCESS_DENIED;
}
*access_mask_out = access_mask;
return NT_STATUS_OK;
}
| 0
|
233,838
|
int fmtutil_macbitmap_read_colortable(deark *c, dbuf *f,
struct fmtutil_macbitmap_info *bi, i64 pos, i64 *bytes_used)
{
i64 ct_id;
u32 ct_flags;
i64 ct_size;
i64 k, z;
u32 s[4];
u8 cr, cg, cb;
u32 clr;
char tmps[64];
*bytes_used = 0;
de_dbg(c, "color table at %"I64_FMT, pos);
de_dbg_indent(c, 1);
ct_id = dbuf_getu32be(f, pos);
ct_flags = (u32)dbuf_getu16be(f, pos+4); // a.k.a. transIndex
ct_size = dbuf_getu16be(f, pos+6);
bi->num_pal_entries = ct_size+1;
de_dbg(c, "color table id=0x%08x, flags=0x%04x, colors=%d", (unsigned int)ct_id,
(unsigned int)ct_flags, (int)bi->num_pal_entries);
for(k=0; k<bi->num_pal_entries; k++) {
for(z=0; z<4; z++) {
s[z] = (u32)dbuf_getu16be(f, pos+8+8*k+2*z);
}
cr = (u8)(s[1]>>8);
cg = (u8)(s[2]>>8);
cb = (u8)(s[3]>>8);
clr = DE_MAKE_RGB(cr,cg,cb);
de_snprintf(tmps, sizeof(tmps), "(%5d,%5d,%5d,idx=%3d) "DE_CHAR_RIGHTARROW" ",
(int)s[1], (int)s[2], (int)s[3], (int)s[0]);
de_dbg_pal_entry2(c, k, clr, tmps, NULL, NULL);
// Some files don't have the palette indices set. Most PICT decoders ignore
// the indices if the "device" flag of ct_flags is set, and that seems to
// work (though it's not clearly documented).
if(ct_flags & 0x8000U) {
s[0] = (u32)k;
}
if(s[0]<=255) {
bi->pal[s[0]] = clr;
}
}
de_dbg_indent(c, -1);
*bytes_used = 8 + 8*bi->num_pal_entries;
return 1;
}
| 0
|
338,238
|
uint16_t WasmBinaryBuilder::getInt16() {
BYN_TRACE("<==\n");
auto ret = uint16_t(getInt8());
ret |= uint16_t(getInt8()) << 8;
BYN_TRACE("getInt16: " << ret << "/0x" << std::hex << ret << std::dec
<< " ==>\n");
return ret;
}
| 0
|
247,130
|
static Bool gf_fsess_get_user_pass(void *usr_cbk, const char *site_url, char *usr_name, char *password)
{
GF_Event evt;
GF_FilterSession *fsess = (GF_FilterSession *)usr_cbk;
evt.type = GF_EVENT_AUTHORIZATION;
evt.auth.site_url = site_url;
evt.auth.user = usr_name;
evt.auth.password = password;
return gf_fs_forward_gf_event(fsess, &evt, GF_FALSE, GF_FALSE);
}
| 0
|
386,569
|
void DL_Dxf::addDimLinear(DL_CreationInterface* creationInterface) {
DL_DimensionData d = getDimData();
// horizontal / vertical / rotated dimension:
DL_DimLinearData dl(
// definition point 1
getRealValue(13, 0.0),
getRealValue(23, 0.0),
getRealValue(33, 0.0),
// definition point 2
getRealValue(14, 0.0),
getRealValue(24, 0.0),
getRealValue(34, 0.0),
// angle
getRealValue(50, 0.0),
// oblique
getRealValue(52, 0.0));
creationInterface->addDimLinear(d, dl);
}
| 0
|
90,874
|
const GURL& lru_origin() const { return lru_origin_; }
| 0
|
455,317
|
set_bash_input ()
{
/* Make sure the fd from which we are reading input is not in
no-delay mode. */
#if defined (BUFFERED_INPUT)
if (interactive == 0)
sh_unset_nodelay_mode (default_buffered_input);
else
#endif /* !BUFFERED_INPUT */
sh_unset_nodelay_mode (fileno (stdin));
/* with_input_from_stdin really means `with_input_from_readline' */
if (interactive && no_line_editing == 0)
with_input_from_stdin ();
#if defined (BUFFERED_INPUT)
else if (interactive == 0)
with_input_from_buffered_stream (default_buffered_input, dollar_vars[0]);
#endif /* BUFFERED_INPUT */
else
with_input_from_stream (default_input, dollar_vars[0]);
}
| 0
|
275,527
|
njs_vm_bind(njs_vm_t *vm, const njs_str_t *var_name, const njs_value_t *value,
njs_bool_t shared)
{
njs_int_t ret;
njs_object_t *global;
njs_lvlhsh_t *hash;
njs_object_prop_t *prop;
njs_lvlhsh_query_t lhq;
prop = njs_object_prop_alloc(vm, &njs_value_undefined, value, 1);
if (njs_slow_path(prop == NULL)) {
return NJS_ERROR;
}
ret = njs_string_new(vm, &prop->name, var_name->start, var_name->length, 0);
if (njs_slow_path(ret != NJS_OK)) {
return NJS_ERROR;
}
lhq.value = prop;
lhq.key = *var_name;
lhq.key_hash = njs_djb_hash(lhq.key.start, lhq.key.length);
lhq.replace = 1;
lhq.pool = vm->mem_pool;
lhq.proto = &njs_object_hash_proto;
global = &vm->global_object;
hash = shared ? &global->shared_hash : &global->hash;
ret = njs_lvlhsh_insert(hash, &lhq);
if (njs_slow_path(ret != NJS_OK)) {
njs_internal_error(vm, "lvlhsh insert failed");
return ret;
}
return NJS_OK;
}
| 0
|
463,200
|
static void annotation_get_fromdb(annotate_state_t *state,
struct annotate_entry_list *entry)
{
const char *mboxname = (state->mailbox ? state->mailbox->name : "");
state->found = 0;
annotatemore_findall(mboxname, state->uid, entry->name, 0, &rw_cb, state, 0);
if (state->found != state->attribs &&
(!strchr(entry->name, '%') && !strchr(entry->name, '*'))) {
/* some results not found for an explicitly specified entry,
* make sure we emit explicit NILs */
struct buf empty = BUF_INITIALIZER;
if (!(state->found & (ATTRIB_VALUE_PRIV|ATTRIB_SIZE_PRIV)) &&
(state->attribs & (ATTRIB_VALUE_PRIV|ATTRIB_SIZE_PRIV))) {
/* store up value.priv and/or size.priv */
output_entryatt(state, entry->name, state->userid, &empty);
}
if (!(state->found & (ATTRIB_VALUE_SHARED|ATTRIB_SIZE_SHARED)) &&
(state->attribs & (ATTRIB_VALUE_SHARED|ATTRIB_SIZE_SHARED))) {
/* store up value.shared and/or size.shared */
output_entryatt(state, entry->name, "", &empty);
}
/* flush any stored attribute-value pairs */
flush_entryatt(state);
}
}
| 0
|
236,128
|
GF_Err styl_box_read(GF_Box *s, GF_BitStream *bs)
{
u32 i;
GF_TextStyleBox*ptr = (GF_TextStyleBox*)s;
ISOM_DECREASE_SIZE(ptr, 2);
ptr->entry_count = gf_bs_read_u16(bs);
if (ptr->size / GPP_STYLE_SIZE < ptr->entry_count)
return GF_ISOM_INVALID_FILE;
if (ptr->entry_count) {
ptr->styles = (GF_StyleRecord*)gf_malloc(sizeof(GF_StyleRecord)*ptr->entry_count);
if (!ptr->styles) return GF_OUT_OF_MEM;
for (i=0; i<ptr->entry_count; i++) {
ISOM_DECREASE_SIZE(ptr, GPP_STYLE_SIZE);
gpp_read_style(bs, &ptr->styles[i]);
}
}
return GF_OK;
}
| 0
|
294,548
|
d_lite_friday_p(VALUE self)
{
get_d1(self);
return f_boolcast(m_wday(dat) == 5);
}
| 0
|
430,396
|
static int __ip_tun_to_nlattr(struct sk_buff *skb,
const struct ip_tunnel_key *output,
const void *tun_opts, int swkey_tun_opts_len,
unsigned short tun_proto, u8 mode)
{
if (output->tun_flags & TUNNEL_KEY &&
nla_put_be64(skb, OVS_TUNNEL_KEY_ATTR_ID, output->tun_id,
OVS_TUNNEL_KEY_ATTR_PAD))
return -EMSGSIZE;
if (mode & IP_TUNNEL_INFO_BRIDGE)
return nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_IPV4_INFO_BRIDGE)
? -EMSGSIZE : 0;
switch (tun_proto) {
case AF_INET:
if (output->u.ipv4.src &&
nla_put_in_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV4_SRC,
output->u.ipv4.src))
return -EMSGSIZE;
if (output->u.ipv4.dst &&
nla_put_in_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV4_DST,
output->u.ipv4.dst))
return -EMSGSIZE;
break;
case AF_INET6:
if (!ipv6_addr_any(&output->u.ipv6.src) &&
nla_put_in6_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV6_SRC,
&output->u.ipv6.src))
return -EMSGSIZE;
if (!ipv6_addr_any(&output->u.ipv6.dst) &&
nla_put_in6_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV6_DST,
&output->u.ipv6.dst))
return -EMSGSIZE;
break;
}
if (output->tos &&
nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TOS, output->tos))
return -EMSGSIZE;
if (nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TTL, output->ttl))
return -EMSGSIZE;
if ((output->tun_flags & TUNNEL_DONT_FRAGMENT) &&
nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT))
return -EMSGSIZE;
if ((output->tun_flags & TUNNEL_CSUM) &&
nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_CSUM))
return -EMSGSIZE;
if (output->tp_src &&
nla_put_be16(skb, OVS_TUNNEL_KEY_ATTR_TP_SRC, output->tp_src))
return -EMSGSIZE;
if (output->tp_dst &&
nla_put_be16(skb, OVS_TUNNEL_KEY_ATTR_TP_DST, output->tp_dst))
return -EMSGSIZE;
if ((output->tun_flags & TUNNEL_OAM) &&
nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_OAM))
return -EMSGSIZE;
if (swkey_tun_opts_len) {
if (output->tun_flags & TUNNEL_GENEVE_OPT &&
nla_put(skb, OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS,
swkey_tun_opts_len, tun_opts))
return -EMSGSIZE;
else if (output->tun_flags & TUNNEL_VXLAN_OPT &&
vxlan_opt_to_nlattr(skb, tun_opts, swkey_tun_opts_len))
return -EMSGSIZE;
else if (output->tun_flags & TUNNEL_ERSPAN_OPT &&
nla_put(skb, OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS,
swkey_tun_opts_len, tun_opts))
return -EMSGSIZE;
}
return 0;
}
| 0
|
246,238
|
inline float randRange(float min, float max) {
return min+((float)rand()/(float)RAND_MAX)*(max-min);
}
| 0
|
264,718
|
void FindConstantFoldableNodes(
const Graph* graph, const ConstantFoldingOptions& opts,
std::vector<Node*>* nodes,
std::unordered_map<const Node*, gtl::FlatSet<Node*>>* constant_control_deps,
std::unordered_map<const Node*, std::vector<Tensor>>*
shape_replacement_map) {
bool internal_node_inserted = false;
// Walk the nodes in data flow order.
ReverseDFS(
*graph, nullptr,
[nodes, constant_control_deps, shape_replacement_map,
&internal_node_inserted, &opts](Node* n) {
ConsiderConstantFoldableNode(n, opts, nodes, constant_control_deps,
shape_replacement_map,
&internal_node_inserted);
},
NodeComparatorName());
// If we have inserted just leaf level nodes, then there is nothing to fold.
if (!internal_node_inserted) {
nodes->clear();
constant_control_deps->clear();
}
}
| 0
|
462,261
|
static pj_status_t decode_unknown_attr(pj_pool_t *pool,
const pj_uint8_t *buf,
const pj_stun_msg_hdr *msghdr,
void **p_attr)
{
pj_stun_unknown_attr *attr;
const pj_uint16_t *punk_attr;
unsigned i;
PJ_UNUSED_ARG(msghdr);
attr = PJ_POOL_ZALLOC_T(pool, pj_stun_unknown_attr);
GETATTRHDR(buf, &attr->hdr);
attr->attr_count = (attr->hdr.length >> 1);
if (attr->attr_count > PJ_STUN_MAX_ATTR)
return PJ_ETOOMANY;
punk_attr = (const pj_uint16_t*)(buf + ATTR_HDR_LEN);
for (i=0; i<attr->attr_count; ++i) {
attr->attrs[i] = pj_ntohs(punk_attr[i]);
}
/* Done */
*p_attr = attr;
return PJ_SUCCESS;
}
| 0
|
294,619
|
c_valid_start_p(double sg)
{
if (isnan(sg))
return 0;
if (isinf(sg))
return 1;
if (sg < REFORM_BEGIN_JD || sg > REFORM_END_JD)
return 0;
return 1;
}
| 0
|
498,135
|
void html_hidden(const char *name, const char *value)
{
html("<input type='hidden' name='");
html_attr(name);
html("' value='");
html_attr(value);
html("'/>");
}
| 0
|
308,181
|
static struct fastrpc_session_ctx *fastrpc_session_alloc(
struct fastrpc_channel_ctx *cctx)
{
struct fastrpc_session_ctx *session = NULL;
unsigned long flags;
int i;
spin_lock_irqsave(&cctx->lock, flags);
for (i = 0; i < cctx->sesscount; i++) {
if (!cctx->session[i].used && cctx->session[i].valid) {
cctx->session[i].used = true;
session = &cctx->session[i];
break;
}
}
spin_unlock_irqrestore(&cctx->lock, flags);
return session;
}
| 0
|
512,752
|
int save_in_field(Field *field_arg, bool no_conversions)
{
return field_arg->save_in_field_ignore_value(false);
}
| 0
|
256,999
|
static void route4_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
unsigned long base)
{
struct route4_filter *f = fh;
if (f && f->res.classid == classid) {
if (cl)
__tcf_bind_filter(q, &f->res, base);
else
__tcf_unbind_filter(q, &f->res);
}
}
| 0
|
225,780
|
GF_Box *fiin_box_new()
{
ISOM_DECL_BOX_ALLOC(FDItemInformationBox, GF_ISOM_BOX_TYPE_FIIN);
return (GF_Box *)tmp;
| 0
|
329,922
|
composite_glyphs_via_mask (void *_dst,
cairo_operator_t op,
cairo_surface_t *_src,
int src_x,
int src_y,
int dst_x,
int dst_y,
cairo_composite_glyphs_info_t *info)
{
cairo_scaled_glyph_t *glyph_cache[64];
pixman_image_t *white = _pixman_image_for_color (CAIRO_COLOR_WHITE);
cairo_scaled_glyph_t *scaled_glyph;
uint8_t buf[2048];
pixman_image_t *mask;
pixman_format_code_t format;
cairo_status_t status;
int i;
TRACE ((stderr, "%s\n", __FUNCTION__));
if (unlikely (white == NULL))
return _cairo_error (CAIRO_STATUS_NO_MEMORY);
/* XXX convert the glyphs to common formats a8/a8r8g8b8 to hit
* optimised paths through pixman. Should we increase the bit
* depth of the target surface, we should reconsider the appropriate
* mask formats.
*/
status = _cairo_scaled_glyph_lookup (info->font,
info->glyphs[0].index,
CAIRO_SCALED_GLYPH_INFO_SURFACE,
&scaled_glyph);
if (unlikely (status)) {
pixman_image_unref (white);
return status;
}
memset (glyph_cache, 0, sizeof (glyph_cache));
glyph_cache[info->glyphs[0].index % ARRAY_LENGTH (glyph_cache)] = scaled_glyph;
format = PIXMAN_a8;
i = (info->extents.width + 3) & ~3;
if (scaled_glyph->surface->base.content & CAIRO_CONTENT_COLOR) {
format = PIXMAN_a8r8g8b8;
i = info->extents.width * 4;
}
if (i * info->extents.height > (int) sizeof (buf)) {
mask = pixman_image_create_bits (format,
info->extents.width,
info->extents.height,
NULL, 0);
} else {
memset (buf, 0, i * info->extents.height);
mask = pixman_image_create_bits (format,
info->extents.width,
info->extents.height,
(uint32_t *)buf, i);
}
if (unlikely (mask == NULL)) {
pixman_image_unref (white);
return _cairo_error (CAIRO_STATUS_NO_MEMORY);
}
status = CAIRO_STATUS_SUCCESS;
for (i = 0; i < info->num_glyphs; i++) {
unsigned long glyph_index = info->glyphs[i].index;
int cache_index = glyph_index % ARRAY_LENGTH (glyph_cache);
cairo_image_surface_t *glyph_surface;
int x, y;
scaled_glyph = glyph_cache[cache_index];
if (scaled_glyph == NULL ||
_cairo_scaled_glyph_index (scaled_glyph) != glyph_index)
{
status = _cairo_scaled_glyph_lookup (info->font, glyph_index,
CAIRO_SCALED_GLYPH_INFO_SURFACE,
&scaled_glyph);
if (unlikely (status)) {
pixman_image_unref (mask);
pixman_image_unref (white);
return status;
}
glyph_cache[cache_index] = scaled_glyph;
}
glyph_surface = scaled_glyph->surface;
if (glyph_surface->width && glyph_surface->height) {
if (glyph_surface->base.content & CAIRO_CONTENT_COLOR &&
format == PIXMAN_a8) {
pixman_image_t *ca_mask;
format = PIXMAN_a8r8g8b8;
ca_mask = pixman_image_create_bits (format,
info->extents.width,
info->extents.height,
NULL, 0);
if (unlikely (ca_mask == NULL)) {
pixman_image_unref (mask);
pixman_image_unref (white);
return _cairo_error (CAIRO_STATUS_NO_MEMORY);
}
pixman_image_composite32 (PIXMAN_OP_SRC,
white, mask, ca_mask,
0, 0,
0, 0,
0, 0,
info->extents.width,
info->extents.height);
pixman_image_unref (mask);
mask = ca_mask;
}
/* round glyph locations to the nearest pixel */
/* XXX: FRAGILE: We're ignoring device_transform scaling here. A bug? */
x = _cairo_lround (info->glyphs[i].x -
glyph_surface->base.device_transform.x0);
y = _cairo_lround (info->glyphs[i].y -
glyph_surface->base.device_transform.y0);
if (glyph_surface->pixman_format == format) {
pixman_image_composite32 (PIXMAN_OP_ADD,
glyph_surface->pixman_image, NULL, mask,
0, 0,
0, 0,
x - info->extents.x, y - info->extents.y,
glyph_surface->width,
glyph_surface->height);
} else {
pixman_image_composite32 (PIXMAN_OP_ADD,
white, glyph_surface->pixman_image, mask,
0, 0,
0, 0,
x - info->extents.x, y - info->extents.y,
glyph_surface->width,
glyph_surface->height);
}
}
}
if (format == PIXMAN_a8r8g8b8)
pixman_image_set_component_alpha (mask, TRUE);
pixman_image_composite32 (_pixman_operator (op),
((cairo_image_source_t *)_src)->pixman_image,
mask,
to_pixman_image (_dst),
info->extents.x + src_x, info->extents.y + src_y,
0, 0,
info->extents.x - dst_x, info->extents.y - dst_y,
info->extents.width, info->extents.height);
pixman_image_unref (mask);
pixman_image_unref (white);
return CAIRO_STATUS_SUCCESS;
}
| 0
|
401,573
|
static void __init init_std_data(struct entropy_store *r)
{
int i;
ktime_t now = ktime_get_real();
unsigned long rv;
mix_pool_bytes(r, &now, sizeof(now));
for (i = r->poolinfo->poolbytes; i > 0; i -= sizeof(rv)) {
if (!arch_get_random_seed_long(&rv) &&
!arch_get_random_long(&rv))
rv = random_get_entropy();
mix_pool_bytes(r, &rv, sizeof(rv));
}
mix_pool_bytes(r, utsname(), sizeof(*(utsname())));
}
| 0
|
222,910
|
NodeContext* GetNodeContext(const NodeDef* node) {
auto it = node_to_context_.find(node);
if (it == node_to_context_.end()) {
return nullptr;
}
return &it->second;
}
| 0
|
437,281
|
set_optimize_map(regex_t* reg, OptMap* m)
{
int i;
for (i = 0; i < ONIG_CHAR_TABLE_SIZE; i++)
reg->map[i] = m->map[i];
reg->optimize = OPTIMIZE_MAP;
reg->dmin = m->mmd.min;
reg->dmax = m->mmd.max;
if (reg->dmin != INFINITE_LEN) {
reg->threshold_len = reg->dmin + 1;
}
}
| 0
|
247,549
|
TEST_P(SslSocketTest, GetNoUriWithDnsSan) {
const std::string client_ctx_yaml = R"EOF(
common_tls_context:
tls_certificates:
certificate_chain:
filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem"
private_key:
filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_key.pem"
)EOF";
const std::string server_ctx_yaml = R"EOF(
common_tls_context:
tls_certificates:
certificate_chain:
filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/unittest_cert.pem"
private_key:
filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/unittest_key.pem"
validation_context:
trusted_ca:
filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem"
)EOF";
// The SAN field only has DNS, expect "" for uriSanPeerCertificate().
TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, true, GetParam());
testUtil(test_options.setExpectedSerialNumber(TEST_SAN_DNS_CERT_SERIAL));
}
| 0
|
474,001
|
mbc_case_fold(OnigCaseFoldType flag, const UChar** pp, const UChar* end ARG_UNUSED,
UChar* lower, OnigEncoding enc ARG_UNUSED)
{
const UChar* p = *pp;
if (*p == 0xdf && (flag & INTERNAL_ONIGENC_CASE_FOLD_MULTI_CHAR) != 0) {
*lower++ = 's';
*lower = 's';
(*pp)++;
return 2;
}
*lower = ONIGENC_ISO_8859_1_TO_LOWER_CASE(*p);
(*pp)++;
return 1;
}
| 0
|
455,307
|
history_completion_generator (hint_text, state)
const char *hint_text;
int state;
{
static int local_index, len;
static const char *text;
/* If this is the first call to the generator, then initialize the
list of strings to complete over. */
if (state == 0)
{
if (dabbrev_expand_active) /* This is kind of messy */
rl_completion_suppress_append = 1;
local_index = 0;
build_history_completion_array ();
text = hint_text;
len = strlen (text);
}
while (history_completion_array && history_completion_array[local_index])
{
/* XXX - should this use completion-ignore-case? */
if (strncmp (text, history_completion_array[local_index++], len) == 0)
return (savestring (history_completion_array[local_index - 1]));
}
return ((char *)NULL);
}
| 0
|
513,368
|
calc_group_buffer(JOIN *join,ORDER *group)
{
uint key_length=0, parts=0, null_parts=0;
if (group)
join->group= 1;
for (; group ; group=group->next)
{
Item *group_item= *group->item;
Field *field= group_item->get_tmp_table_field();
if (field)
{
enum_field_types type;
if ((type= field->type()) == MYSQL_TYPE_BLOB)
key_length+=MAX_BLOB_WIDTH; // Can't be used as a key
else if (type == MYSQL_TYPE_VARCHAR || type == MYSQL_TYPE_VAR_STRING)
key_length+= field->field_length + HA_KEY_BLOB_LENGTH;
else if (type == MYSQL_TYPE_BIT)
{
/* Bit is usually stored as a longlong key for group fields */
key_length+= 8; // Big enough
}
else
key_length+= field->pack_length();
}
else
{
switch (group_item->cmp_type()) {
case REAL_RESULT:
key_length+= sizeof(double);
break;
case INT_RESULT:
key_length+= sizeof(longlong);
break;
case DECIMAL_RESULT:
key_length+= my_decimal_get_binary_size(group_item->max_length -
(group_item->decimals ? 1 : 0),
group_item->decimals);
break;
case TIME_RESULT:
{
/*
As items represented as DATE/TIME fields in the group buffer
have STRING_RESULT result type, we increase the length
by 8 as maximum pack length of such fields.
*/
key_length+= 8;
break;
}
case STRING_RESULT:
{
enum enum_field_types type= group_item->field_type();
if (type == MYSQL_TYPE_BLOB)
key_length+= MAX_BLOB_WIDTH; // Can't be used as a key
else
{
/*
Group strings are taken as varstrings and require an length field.
A field is not yet created by create_tmp_field()
and the sizes should match up.
*/
key_length+= group_item->max_length + HA_KEY_BLOB_LENGTH;
}
break;
}
default:
/* This case should never be choosen */
DBUG_ASSERT(0);
my_error(ER_OUT_OF_RESOURCES, MYF(ME_FATALERROR));
}
}
parts++;
if (group_item->maybe_null)
null_parts++;
}
join->tmp_table_param.group_length=key_length+null_parts;
join->tmp_table_param.group_parts=parts;
join->tmp_table_param.group_null_parts=null_parts;
}
| 0
|
259,290
|
static int64_t get_stream_info_time(MOVFragmentStreamInfo * frag_stream_info)
{
av_assert0(frag_stream_info);
if (frag_stream_info->sidx_pts != AV_NOPTS_VALUE)
return frag_stream_info->sidx_pts;
if (frag_stream_info->first_tfra_pts != AV_NOPTS_VALUE)
return frag_stream_info->first_tfra_pts;
return frag_stream_info->tfdt_dts;
}
| 0
|
474,047
|
fetch_token(OnigToken* tok, UChar** src, UChar* end, ScanEnv* env)
{
int r, num;
OnigCodePoint c;
OnigEncoding enc = env->enc;
const OnigSyntaxType* syn = env->syntax;
UChar* prev;
UChar* p = *src;
PFETCH_READY;
start:
if (PEND) {
tok->type = TK_EOT;
return tok->type;
}
tok->type = TK_STRING;
tok->base = 0;
tok->backp = p;
PFETCH(c);
if (IS_MC_ESC_CODE(c, syn)) {
if (PEND) return ONIGERR_END_PATTERN_AT_ESCAPE;
tok->backp = p;
PFETCH(c);
tok->u.c = c;
tok->escaped = 1;
switch (c) {
case '*':
if (! IS_SYNTAX_OP(syn, ONIG_SYN_OP_ESC_ASTERISK_ZERO_INF)) break;
tok->type = TK_OP_REPEAT;
tok->u.repeat.lower = 0;
tok->u.repeat.upper = REPEAT_INFINITE;
goto greedy_check;
break;
case '+':
if (! IS_SYNTAX_OP(syn, ONIG_SYN_OP_ESC_PLUS_ONE_INF)) break;
tok->type = TK_OP_REPEAT;
tok->u.repeat.lower = 1;
tok->u.repeat.upper = REPEAT_INFINITE;
goto greedy_check;
break;
case '?':
if (! IS_SYNTAX_OP(syn, ONIG_SYN_OP_ESC_QMARK_ZERO_ONE)) break;
tok->type = TK_OP_REPEAT;
tok->u.repeat.lower = 0;
tok->u.repeat.upper = 1;
greedy_check:
if (!PEND && PPEEK_IS('?') &&
IS_SYNTAX_OP(syn, ONIG_SYN_OP_QMARK_NON_GREEDY)) {
PFETCH(c);
tok->u.repeat.greedy = 0;
tok->u.repeat.possessive = 0;
}
else {
possessive_check:
if (!PEND && PPEEK_IS('+') &&
((IS_SYNTAX_OP2(syn, ONIG_SYN_OP2_PLUS_POSSESSIVE_REPEAT) &&
tok->type != TK_INTERVAL) ||
(IS_SYNTAX_OP2(syn, ONIG_SYN_OP2_PLUS_POSSESSIVE_INTERVAL) &&
tok->type == TK_INTERVAL))) {
PFETCH(c);
tok->u.repeat.greedy = 1;
tok->u.repeat.possessive = 1;
}
else {
tok->u.repeat.greedy = 1;
tok->u.repeat.possessive = 0;
}
}
break;
case '{':
if (! IS_SYNTAX_OP(syn, ONIG_SYN_OP_ESC_BRACE_INTERVAL)) break;
r = fetch_range_quantifier(&p, end, tok, env);
if (r < 0) return r; /* error */
if (r == 0) goto greedy_check;
else if (r == 2) { /* {n} */
if (IS_SYNTAX_BV(syn, ONIG_SYN_FIXED_INTERVAL_IS_GREEDY_ONLY))
goto possessive_check;
goto greedy_check;
}
/* r == 1 : normal char */
break;
case '|':
if (! IS_SYNTAX_OP(syn, ONIG_SYN_OP_ESC_VBAR_ALT)) break;
tok->type = TK_ALT;
break;
case '(':
if (! IS_SYNTAX_OP(syn, ONIG_SYN_OP_ESC_LPAREN_SUBEXP)) break;
tok->type = TK_SUBEXP_OPEN;
break;
case ')':
if (! IS_SYNTAX_OP(syn, ONIG_SYN_OP_ESC_LPAREN_SUBEXP)) break;
tok->type = TK_SUBEXP_CLOSE;
break;
case 'w':
if (! IS_SYNTAX_OP(syn, ONIG_SYN_OP_ESC_W_WORD)) break;
tok->type = TK_CHAR_TYPE;
tok->u.prop.ctype = ONIGENC_CTYPE_W;
tok->u.prop.not = 0;
break;
case 'W':
if (! IS_SYNTAX_OP(syn, ONIG_SYN_OP_ESC_W_WORD)) break;
tok->type = TK_CHAR_TYPE;
tok->u.prop.ctype = ONIGENC_CTYPE_W;
tok->u.prop.not = 1;
break;
case 'b':
if (! IS_SYNTAX_OP(syn, ONIG_SYN_OP_ESC_B_WORD_BOUND)) break;
tok->type = TK_ANCHOR;
tok->u.anchor = ANCHOR_WORD_BOUND;
break;
case 'B':
if (! IS_SYNTAX_OP(syn, ONIG_SYN_OP_ESC_B_WORD_BOUND)) break;
tok->type = TK_ANCHOR;
tok->u.anchor = ANCHOR_NOT_WORD_BOUND;
break;
#ifdef USE_WORD_BEGIN_END
case '<':
if (! IS_SYNTAX_OP(syn, ONIG_SYN_OP_ESC_LTGT_WORD_BEGIN_END)) break;
tok->type = TK_ANCHOR;
tok->u.anchor = ANCHOR_WORD_BEGIN;
break;
case '>':
if (! IS_SYNTAX_OP(syn, ONIG_SYN_OP_ESC_LTGT_WORD_BEGIN_END)) break;
tok->type = TK_ANCHOR;
tok->u.anchor = ANCHOR_WORD_END;
break;
#endif
case 's':
if (! IS_SYNTAX_OP(syn, ONIG_SYN_OP_ESC_S_WHITE_SPACE)) break;
tok->type = TK_CHAR_TYPE;
tok->u.prop.ctype = ONIGENC_CTYPE_S;
tok->u.prop.not = 0;
break;
case 'S':
if (! IS_SYNTAX_OP(syn, ONIG_SYN_OP_ESC_S_WHITE_SPACE)) break;
tok->type = TK_CHAR_TYPE;
tok->u.prop.ctype = ONIGENC_CTYPE_S;
tok->u.prop.not = 1;
break;
case 'd':
if (! IS_SYNTAX_OP(syn, ONIG_SYN_OP_ESC_D_DIGIT)) break;
tok->type = TK_CHAR_TYPE;
tok->u.prop.ctype = ONIGENC_CTYPE_D;
tok->u.prop.not = 0;
break;
case 'D':
if (! IS_SYNTAX_OP(syn, ONIG_SYN_OP_ESC_D_DIGIT)) break;
tok->type = TK_CHAR_TYPE;
tok->u.prop.ctype = ONIGENC_CTYPE_D;
tok->u.prop.not = 1;
break;
case 'h':
if (! IS_SYNTAX_OP2(syn, ONIG_SYN_OP2_ESC_H_XDIGIT)) break;
tok->type = TK_CHAR_TYPE;
tok->u.prop.ctype = ONIGENC_CTYPE_XDIGIT;
tok->u.prop.not = 0;
break;
case 'H':
if (! IS_SYNTAX_OP2(syn, ONIG_SYN_OP2_ESC_H_XDIGIT)) break;
tok->type = TK_CHAR_TYPE;
tok->u.prop.ctype = ONIGENC_CTYPE_XDIGIT;
tok->u.prop.not = 1;
break;
case 'A':
if (! IS_SYNTAX_OP(syn, ONIG_SYN_OP_ESC_AZ_BUF_ANCHOR)) break;
begin_buf:
tok->type = TK_ANCHOR;
tok->u.subtype = ANCHOR_BEGIN_BUF;
break;
case 'Z':
if (! IS_SYNTAX_OP(syn, ONIG_SYN_OP_ESC_AZ_BUF_ANCHOR)) break;
tok->type = TK_ANCHOR;
tok->u.subtype = ANCHOR_SEMI_END_BUF;
break;
case 'z':
if (! IS_SYNTAX_OP(syn, ONIG_SYN_OP_ESC_AZ_BUF_ANCHOR)) break;
end_buf:
tok->type = TK_ANCHOR;
tok->u.subtype = ANCHOR_END_BUF;
break;
case 'G':
if (! IS_SYNTAX_OP(syn, ONIG_SYN_OP_ESC_CAPITAL_G_BEGIN_ANCHOR)) break;
tok->type = TK_ANCHOR;
tok->u.subtype = ANCHOR_BEGIN_POSITION;
break;
case '`':
if (! IS_SYNTAX_OP2(syn, ONIG_SYN_OP2_ESC_GNU_BUF_ANCHOR)) break;
goto begin_buf;
break;
case '\'':
if (! IS_SYNTAX_OP2(syn, ONIG_SYN_OP2_ESC_GNU_BUF_ANCHOR)) break;
goto end_buf;
break;
case 'x':
if (PEND) break;
prev = p;
if (PPEEK_IS('{') && IS_SYNTAX_OP(syn, ONIG_SYN_OP_ESC_X_BRACE_HEX8)) {
PINC;
num = scan_unsigned_hexadecimal_number(&p, end, 8, enc);
if (num < 0) return ONIGERR_TOO_BIG_WIDE_CHAR_VALUE;
if (!PEND) {
if (ONIGENC_IS_CODE_XDIGIT(enc, PPEEK))
return ONIGERR_TOO_LONG_WIDE_CHAR_VALUE;
}
if ((p > prev + enclen(enc, prev, end)) && !PEND && PPEEK_IS('}')) {
PINC;
tok->type = TK_CODE_POINT;
tok->u.code = (OnigCodePoint )num;
}
else {
/* can't read nothing or invalid format */
p = prev;
}
}
else if (IS_SYNTAX_OP(syn, ONIG_SYN_OP_ESC_X_HEX2)) {
num = scan_unsigned_hexadecimal_number(&p, end, 2, enc);
if (num < 0) return ONIGERR_TOO_BIG_NUMBER;
if (p == prev) { /* can't read nothing. */
num = 0; /* but, it's not error */
}
tok->type = TK_RAW_BYTE;
tok->base = 16;
tok->u.c = num;
}
break;
case 'u':
if (PEND) break;
prev = p;
if (IS_SYNTAX_OP2(syn, ONIG_SYN_OP2_ESC_U_HEX4)) {
num = scan_unsigned_hexadecimal_number(&p, end, 4, enc);
if (num < 0) return ONIGERR_TOO_BIG_NUMBER;
if (p == prev) { /* can't read nothing. */
num = 0; /* but, it's not error */
}
tok->type = TK_CODE_POINT;
tok->base = 16;
tok->u.code = (OnigCodePoint )num;
}
break;
case '1': case '2': case '3': case '4':
case '5': case '6': case '7': case '8': case '9':
PUNFETCH;
prev = p;
num = onig_scan_unsigned_number(&p, end, enc);
if (num < 0 || num > ONIG_MAX_BACKREF_NUM) {
goto skip_backref;
}
if (IS_SYNTAX_OP(syn, ONIG_SYN_OP_DECIMAL_BACKREF) &&
(num <= env->num_mem || num <= 9)) { /* This spec. from GNU regex */
if (IS_SYNTAX_BV(syn, ONIG_SYN_STRICT_CHECK_BACKREF)) {
if (num > env->num_mem || IS_NULL(SCANENV_MEM_NODES(env)[num]))
return ONIGERR_INVALID_BACKREF;
}
tok->type = TK_BACKREF;
tok->u.backref.num = 1;
tok->u.backref.ref1 = num;
tok->u.backref.by_name = 0;
#ifdef USE_BACKREF_WITH_LEVEL
tok->u.backref.exist_level = 0;
#endif
break;
}
skip_backref:
if (c == '8' || c == '9') {
/* normal char */
p = prev; PINC;
break;
}
p = prev;
/* fall through */
case '0':
if (IS_SYNTAX_OP(syn, ONIG_SYN_OP_ESC_OCTAL3)) {
prev = p;
num = scan_unsigned_octal_number(&p, end, (c == '0' ? 2:3), enc);
if (num < 0) return ONIGERR_TOO_BIG_NUMBER;
if (p == prev) { /* can't read nothing. */
num = 0; /* but, it's not error */
}
tok->type = TK_RAW_BYTE;
tok->base = 8;
tok->u.c = num;
}
else if (c != '0') {
PINC;
}
break;
#ifdef USE_NAMED_GROUP
case 'k':
if (IS_SYNTAX_OP2(syn, ONIG_SYN_OP2_ESC_K_NAMED_BACKREF)) {
PFETCH(c);
if (c == '<' || c == '\'') {
UChar* name_end;
int* backs;
int back_num;
prev = p;
#ifdef USE_BACKREF_WITH_LEVEL
name_end = NULL_UCHARP; /* no need. escape gcc warning. */
r = fetch_name_with_level((OnigCodePoint )c, &p, end, &name_end,
env, &back_num, &tok->u.backref.level);
if (r == 1) tok->u.backref.exist_level = 1;
else tok->u.backref.exist_level = 0;
#else
r = fetch_name(&p, end, &name_end, env, &back_num, 1);
#endif
if (r < 0) return r;
if (back_num != 0) {
if (back_num < 0) {
back_num = BACKREF_REL_TO_ABS(back_num, env);
if (back_num <= 0)
return ONIGERR_INVALID_BACKREF;
}
if (IS_SYNTAX_BV(syn, ONIG_SYN_STRICT_CHECK_BACKREF)) {
if (back_num > env->num_mem ||
IS_NULL(SCANENV_MEM_NODES(env)[back_num]))
return ONIGERR_INVALID_BACKREF;
}
tok->type = TK_BACKREF;
tok->u.backref.by_name = 0;
tok->u.backref.num = 1;
tok->u.backref.ref1 = back_num;
}
else {
num = onig_name_to_group_numbers(env->reg, prev, name_end, &backs);
if (num <= 0) {
onig_scan_env_set_error_string(env,
ONIGERR_UNDEFINED_NAME_REFERENCE, prev, name_end);
return ONIGERR_UNDEFINED_NAME_REFERENCE;
}
if (IS_SYNTAX_BV(syn, ONIG_SYN_STRICT_CHECK_BACKREF)) {
int i;
for (i = 0; i < num; i++) {
if (backs[i] > env->num_mem ||
IS_NULL(SCANENV_MEM_NODES(env)[backs[i]]))
return ONIGERR_INVALID_BACKREF;
}
}
tok->type = TK_BACKREF;
tok->u.backref.by_name = 1;
if (num == 1) {
tok->u.backref.num = 1;
tok->u.backref.ref1 = backs[0];
}
else {
tok->u.backref.num = num;
tok->u.backref.refs = backs;
}
}
}
else {
PUNFETCH;
onig_syntax_warn(env, "invalid back reference");
}
}
break;
#endif
#ifdef USE_SUBEXP_CALL
case 'g':
if (IS_SYNTAX_OP2(syn, ONIG_SYN_OP2_ESC_G_SUBEXP_CALL)) {
PFETCH(c);
if (c == '<' || c == '\'') {
int gnum;
UChar* name_end;
prev = p;
r = fetch_name((OnigCodePoint )c, &p, end, &name_end, env, &gnum, 1);
if (r < 0) return r;
tok->type = TK_CALL;
tok->u.call.name = prev;
tok->u.call.name_end = name_end;
tok->u.call.gnum = gnum;
}
else {
onig_syntax_warn(env, "invalid subexp call");
PUNFETCH;
}
}
break;
#endif
case 'Q':
if (IS_SYNTAX_OP2(syn, ONIG_SYN_OP2_ESC_CAPITAL_Q_QUOTE)) {
tok->type = TK_QUOTE_OPEN;
}
break;
case 'p':
case 'P':
if (PPEEK_IS('{') &&
IS_SYNTAX_OP2(syn, ONIG_SYN_OP2_ESC_P_BRACE_CHAR_PROPERTY)) {
PINC;
tok->type = TK_CHAR_PROPERTY;
tok->u.prop.not = (c == 'P' ? 1 : 0);
if (IS_SYNTAX_OP2(syn, ONIG_SYN_OP2_ESC_P_BRACE_CIRCUMFLEX_NOT)) {
PFETCH(c);
if (c == '^') {
tok->u.prop.not = (tok->u.prop.not == 0 ? 1 : 0);
}
else
PUNFETCH;
}
}
else {
onig_syntax_warn(env, "invalid Unicode Property \\%c", c);
}
break;
default:
PUNFETCH;
num = fetch_escaped_value(&p, end, env);
if (num < 0) return num;
/* set_raw: */
if (tok->u.c != num) {
tok->type = TK_CODE_POINT;
tok->u.code = (OnigCodePoint )num;
}
else { /* string */
p = tok->backp + enclen(enc, tok->backp, end);
}
break;
}
}
else {
tok->u.c = c;
tok->escaped = 0;
#ifdef USE_VARIABLE_META_CHARS
if ((c != ONIG_INEFFECTIVE_META_CHAR) &&
IS_SYNTAX_OP(syn, ONIG_SYN_OP_VARIABLE_META_CHARACTERS)) {
if (c == MC_ANYCHAR(syn))
goto any_char;
else if (c == MC_ANYTIME(syn))
goto anytime;
else if (c == MC_ZERO_OR_ONE_TIME(syn))
goto zero_or_one_time;
else if (c == MC_ONE_OR_MORE_TIME(syn))
goto one_or_more_time;
else if (c == MC_ANYCHAR_ANYTIME(syn)) {
tok->type = TK_ANYCHAR_ANYTIME;
goto out;
}
}
#endif
switch (c) {
case '.':
if (! IS_SYNTAX_OP(syn, ONIG_SYN_OP_DOT_ANYCHAR)) break;
#ifdef USE_VARIABLE_META_CHARS
any_char:
#endif
tok->type = TK_ANYCHAR;
break;
case '*':
if (! IS_SYNTAX_OP(syn, ONIG_SYN_OP_ASTERISK_ZERO_INF)) break;
#ifdef USE_VARIABLE_META_CHARS
anytime:
#endif
tok->type = TK_OP_REPEAT;
tok->u.repeat.lower = 0;
tok->u.repeat.upper = REPEAT_INFINITE;
goto greedy_check;
break;
case '+':
if (! IS_SYNTAX_OP(syn, ONIG_SYN_OP_PLUS_ONE_INF)) break;
#ifdef USE_VARIABLE_META_CHARS
one_or_more_time:
#endif
tok->type = TK_OP_REPEAT;
tok->u.repeat.lower = 1;
tok->u.repeat.upper = REPEAT_INFINITE;
goto greedy_check;
break;
case '?':
if (! IS_SYNTAX_OP(syn, ONIG_SYN_OP_QMARK_ZERO_ONE)) break;
#ifdef USE_VARIABLE_META_CHARS
zero_or_one_time:
#endif
tok->type = TK_OP_REPEAT;
tok->u.repeat.lower = 0;
tok->u.repeat.upper = 1;
goto greedy_check;
break;
case '{':
if (! IS_SYNTAX_OP(syn, ONIG_SYN_OP_BRACE_INTERVAL)) break;
r = fetch_range_quantifier(&p, end, tok, env);
if (r < 0) return r; /* error */
if (r == 0) goto greedy_check;
else if (r == 2) { /* {n} */
if (IS_SYNTAX_BV(syn, ONIG_SYN_FIXED_INTERVAL_IS_GREEDY_ONLY))
goto possessive_check;
goto greedy_check;
}
/* r == 1 : normal char */
break;
case '|':
if (! IS_SYNTAX_OP(syn, ONIG_SYN_OP_VBAR_ALT)) break;
tok->type = TK_ALT;
break;
case '(':
if (PPEEK_IS('?') &&
IS_SYNTAX_OP2(syn, ONIG_SYN_OP2_QMARK_GROUP_EFFECT)) {
PINC;
if (PPEEK_IS('#')) {
PFETCH(c);
while (1) {
if (PEND) return ONIGERR_END_PATTERN_IN_GROUP;
PFETCH(c);
if (c == MC_ESC(syn)) {
if (!PEND) PFETCH(c);
}
else {
if (c == ')') break;
}
}
goto start;
}
PUNFETCH;
}
if (! IS_SYNTAX_OP(syn, ONIG_SYN_OP_LPAREN_SUBEXP)) break;
tok->type = TK_SUBEXP_OPEN;
break;
case ')':
if (! IS_SYNTAX_OP(syn, ONIG_SYN_OP_LPAREN_SUBEXP)) break;
tok->type = TK_SUBEXP_CLOSE;
break;
case '^':
if (! IS_SYNTAX_OP(syn, ONIG_SYN_OP_LINE_ANCHOR)) break;
tok->type = TK_ANCHOR;
tok->u.subtype = (IS_SINGLELINE(env->option)
? ANCHOR_BEGIN_BUF : ANCHOR_BEGIN_LINE);
break;
case '$':
if (! IS_SYNTAX_OP(syn, ONIG_SYN_OP_LINE_ANCHOR)) break;
tok->type = TK_ANCHOR;
tok->u.subtype = (IS_SINGLELINE(env->option)
? ANCHOR_SEMI_END_BUF : ANCHOR_END_LINE);
break;
case '[':
if (! IS_SYNTAX_OP(syn, ONIG_SYN_OP_BRACKET_CC)) break;
tok->type = TK_CC_OPEN;
break;
case ']':
if (*src > env->pattern) /* /].../ is allowed. */
CLOSE_BRACKET_WITHOUT_ESC_WARN(env, (UChar* )"]");
break;
case '#':
if (IS_EXTEND(env->option)) {
while (!PEND) {
PFETCH(c);
if (ONIGENC_IS_CODE_NEWLINE(enc, c))
break;
}
goto start;
break;
}
break;
case ' ': case '\t': case '\n': case '\r': case '\f':
if (IS_EXTEND(env->option))
goto start;
break;
default:
/* string */
break;
}
}
#ifdef USE_VARIABLE_META_CHARS
out:
#endif
*src = p;
return tok->type;
}
| 0
|
487,636
|
asmlinkage long sys_setfsuid(uid_t uid)
{
int old_fsuid;
old_fsuid = current->fsuid;
if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS))
return old_fsuid;
if (uid == current->uid || uid == current->euid ||
uid == current->suid || uid == current->fsuid ||
capable(CAP_SETUID)) {
if (uid != old_fsuid) {
current->mm->dumpable = suid_dumpable;
smp_wmb();
}
current->fsuid = uid;
}
key_fsuid_changed(current);
proc_id_connector(current, PROC_EVENT_UID);
security_task_post_setuid(old_fsuid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS);
return old_fsuid;
}
| 0
|
359,347
|
DEFUN (clear_bgp_peer_in_prefix_filter,
clear_bgp_peer_in_prefix_filter_cmd,
"clear bgp (A.B.C.D|X:X::X:X) in prefix-filter",
CLEAR_STR
BGP_STR
"BGP neighbor address to clear\n"
"BGP IPv6 neighbor to clear\n"
"Soft reconfig inbound update\n"
"Push out the existing ORF prefix-list\n")
{
return bgp_clear_vty (vty, NULL, AFI_IP6, SAFI_UNICAST, clear_peer,
BGP_CLEAR_SOFT_IN_ORF_PREFIX, argv[0]);
}
| 0
|
335,424
|
eval_vars(
char_u *src, // pointer into commandline
char_u *srcstart, // beginning of valid memory for src
int *usedlen, // characters after src that are used
linenr_T *lnump, // line number for :e command, or NULL
char **errormsg, // pointer to error message
int *escaped, // return value has escaped white space (can
// be NULL)
int empty_is_error) // empty result is considered an error
{
int i;
char_u *s;
char_u *result;
char_u *resultbuf = NULL;
int resultlen;
buf_T *buf;
int valid = VALID_HEAD + VALID_PATH; // assume valid result
int spec_idx;
int tilde_file = FALSE;
int skip_mod = FALSE;
char_u strbuf[30];
*errormsg = NULL;
if (escaped != NULL)
*escaped = FALSE;
/*
* Check if there is something to do.
*/
spec_idx = find_cmdline_var(src, usedlen);
if (spec_idx < 0) // no match
{
*usedlen = 1;
return NULL;
}
/*
* Skip when preceded with a backslash "\%" and "\#".
* Note: In "\\%" the % is also not recognized!
*/
if (src > srcstart && src[-1] == '\\')
{
*usedlen = 0;
STRMOVE(src - 1, src); // remove backslash
return NULL;
}
/*
* word or WORD under cursor
*/
if (spec_idx == SPEC_CWORD || spec_idx == SPEC_CCWORD
|| spec_idx == SPEC_CEXPR)
{
resultlen = find_ident_under_cursor(&result,
spec_idx == SPEC_CWORD ? (FIND_IDENT | FIND_STRING)
: spec_idx == SPEC_CEXPR ? (FIND_IDENT | FIND_STRING | FIND_EVAL)
: FIND_STRING);
if (resultlen == 0)
{
*errormsg = "";
return NULL;
}
}
/*
* '#': Alternate file name
* '%': Current file name
* File name under the cursor
* File name for autocommand
* and following modifiers
*/
else
{
int off = 0;
switch (spec_idx)
{
case SPEC_PERC:
#ifdef FEAT_EVAL
if (!in_vim9script() || src[1] != '%')
#endif
{
// '%': current file
if (curbuf->b_fname == NULL)
{
result = (char_u *)"";
valid = 0; // Must have ":p:h" to be valid
}
else
{
result = curbuf->b_fname;
tilde_file = STRCMP(result, "~") == 0;
}
break;
}
#ifdef FEAT_EVAL
// "%%" alternate file
off = 1;
#endif
// FALLTHROUGH
case SPEC_HASH: // '#' or "#99": alternate file
if (off == 0 ? src[1] == '#' : src[2] == '%')
{
// "##" or "%%%": the argument list
result = arg_all();
resultbuf = result;
*usedlen = off + 2;
if (escaped != NULL)
*escaped = TRUE;
skip_mod = TRUE;
break;
}
s = src + off + 1;
if (*s == '<') // "#<99" uses v:oldfiles
++s;
i = (int)getdigits(&s);
if (s == src + off + 2 && src[off + 1] == '-')
// just a minus sign, don't skip over it
s--;
*usedlen = (int)(s - src); // length of what we expand
if (src[off + 1] == '<' && i != 0)
{
if (*usedlen < off + 2)
{
// Should we give an error message for #<text?
*usedlen = off + 1;
return NULL;
}
#ifdef FEAT_EVAL
result = list_find_str(get_vim_var_list(VV_OLDFILES),
(long)i);
if (result == NULL)
{
*errormsg = "";
return NULL;
}
#else
*errormsg = _(e_hashsmall_is_not_available_without_the_eval_feature);
return NULL;
#endif
}
else
{
if (i == 0 && src[off + 1] == '<' && *usedlen > off + 1)
*usedlen = off + 1;
buf = buflist_findnr(i);
if (buf == NULL)
{
*errormsg = _(e_no_alternate_file_name_to_substitute_for_hash);
return NULL;
}
if (lnump != NULL)
*lnump = ECMD_LAST;
if (buf->b_fname == NULL)
{
result = (char_u *)"";
valid = 0; // Must have ":p:h" to be valid
}
else
{
result = buf->b_fname;
tilde_file = STRCMP(result, "~") == 0;
}
}
break;
#ifdef FEAT_SEARCHPATH
case SPEC_CFILE: // file name under cursor
result = file_name_at_cursor(FNAME_MESS|FNAME_HYP, 1L, NULL);
if (result == NULL)
{
*errormsg = "";
return NULL;
}
resultbuf = result; // remember allocated string
break;
#endif
case SPEC_AFILE: // file name for autocommand
result = autocmd_fname;
if (result != NULL && !autocmd_fname_full)
{
// Still need to turn the fname into a full path. It is
// postponed to avoid a delay when <afile> is not used.
autocmd_fname_full = TRUE;
result = FullName_save(autocmd_fname, FALSE);
vim_free(autocmd_fname);
autocmd_fname = result;
}
if (result == NULL)
{
*errormsg = _(e_no_autocommand_file_name_to_substitute_for_afile);
return NULL;
}
result = shorten_fname1(result);
break;
case SPEC_ABUF: // buffer number for autocommand
if (autocmd_bufnr <= 0)
{
*errormsg = _(e_no_autocommand_buffer_name_to_substitute_for_abuf);
return NULL;
}
sprintf((char *)strbuf, "%d", autocmd_bufnr);
result = strbuf;
break;
case SPEC_AMATCH: // match name for autocommand
result = autocmd_match;
if (result == NULL)
{
*errormsg = _(e_no_autocommand_match_name_to_substitute_for_amatch);
return NULL;
}
break;
case SPEC_SFILE: // file name for ":so" command
result = estack_sfile(ESTACK_SFILE);
if (result == NULL)
{
*errormsg = _(e_no_source_file_name_to_substitute_for_sfile);
return NULL;
}
resultbuf = result; // remember allocated string
break;
case SPEC_STACK: // call stack
result = estack_sfile(ESTACK_STACK);
if (result == NULL)
{
*errormsg = _(e_no_call_stack_to_substitute_for_stack);
return NULL;
}
resultbuf = result; // remember allocated string
break;
case SPEC_SCRIPT: // script file name
result = estack_sfile(ESTACK_SCRIPT);
if (result == NULL)
{
*errormsg = _(e_no_script_file_name_to_substitute_for_script);
return NULL;
}
resultbuf = result; // remember allocated string
break;
case SPEC_SLNUM: // line in file for ":so" command
if (SOURCING_NAME == NULL || SOURCING_LNUM == 0)
{
*errormsg = _(e_no_line_number_to_use_for_slnum);
return NULL;
}
sprintf((char *)strbuf, "%ld", SOURCING_LNUM);
result = strbuf;
break;
#ifdef FEAT_EVAL
case SPEC_SFLNUM: // line in script file
if (current_sctx.sc_lnum + SOURCING_LNUM == 0)
{
*errormsg = _(e_no_line_number_to_use_for_sflnum);
return NULL;
}
sprintf((char *)strbuf, "%ld",
(long)(current_sctx.sc_lnum + SOURCING_LNUM));
result = strbuf;
break;
case SPEC_SID:
if (current_sctx.sc_sid <= 0)
{
*errormsg = _(e_using_sid_not_in_script_context);
return NULL;
}
sprintf((char *)strbuf, "<SNR>%d_", current_sctx.sc_sid);
result = strbuf;
break;
#endif
#ifdef FEAT_CLIENTSERVER
case SPEC_CLIENT: // Source of last submitted input
sprintf((char *)strbuf, PRINTF_HEX_LONG_U,
(long_u)clientWindow);
result = strbuf;
break;
#endif
default:
result = (char_u *)""; // avoid gcc warning
break;
}
resultlen = (int)STRLEN(result); // length of new string
if (src[*usedlen] == '<') // remove the file name extension
{
++*usedlen;
if ((s = vim_strrchr(result, '.')) != NULL && s >= gettail(result))
resultlen = (int)(s - result);
}
else if (!skip_mod)
{
valid |= modify_fname(src, tilde_file, usedlen, &result, &resultbuf,
&resultlen);
if (result == NULL)
{
*errormsg = "";
return NULL;
}
}
}
if (resultlen == 0 || valid != VALID_HEAD + VALID_PATH)
{
if (empty_is_error)
{
if (valid != VALID_HEAD + VALID_PATH)
*errormsg = _(e_empty_file_name_for_percent_or_hash_only_works_with_ph);
else
*errormsg = _(e_evaluates_to_an_empty_string);
}
result = NULL;
}
else
result = vim_strnsave(result, resultlen);
vim_free(resultbuf);
return result;
}
| 0
|
454,759
|
static int ismt_access(struct i2c_adapter *adap, u16 addr,
unsigned short flags, char read_write, u8 command,
int size, union i2c_smbus_data *data)
{
int ret;
unsigned long time_left;
dma_addr_t dma_addr = 0; /* address of the data buffer */
u8 dma_size = 0;
enum dma_data_direction dma_direction = 0;
struct ismt_desc *desc;
struct ismt_priv *priv = i2c_get_adapdata(adap);
struct device *dev = &priv->pci_dev->dev;
u8 *dma_buffer = PTR_ALIGN(&priv->buffer[0], 16);
desc = &priv->hw[priv->head];
/* Initialize the DMA buffer */
memset(priv->buffer, 0, sizeof(priv->buffer));
/* Initialize the descriptor */
memset(desc, 0, sizeof(struct ismt_desc));
desc->tgtaddr_rw = ISMT_DESC_ADDR_RW(addr, read_write);
/* Always clear the log entries */
memset(priv->log, 0, ISMT_LOG_ENTRIES * sizeof(u32));
/* Initialize common control bits */
if (likely(pci_dev_msi_enabled(priv->pci_dev)))
desc->control = ISMT_DESC_INT | ISMT_DESC_FAIR;
else
desc->control = ISMT_DESC_FAIR;
if ((flags & I2C_CLIENT_PEC) && (size != I2C_SMBUS_QUICK)
&& (size != I2C_SMBUS_I2C_BLOCK_DATA))
desc->control |= ISMT_DESC_PEC;
switch (size) {
case I2C_SMBUS_QUICK:
dev_dbg(dev, "I2C_SMBUS_QUICK\n");
break;
case I2C_SMBUS_BYTE:
if (read_write == I2C_SMBUS_WRITE) {
/*
* Send Byte
* The command field contains the write data
*/
dev_dbg(dev, "I2C_SMBUS_BYTE: WRITE\n");
desc->control |= ISMT_DESC_CWRL;
desc->wr_len_cmd = command;
} else {
/* Receive Byte */
dev_dbg(dev, "I2C_SMBUS_BYTE: READ\n");
dma_size = 1;
dma_direction = DMA_FROM_DEVICE;
desc->rd_len = 1;
}
break;
case I2C_SMBUS_BYTE_DATA:
if (read_write == I2C_SMBUS_WRITE) {
/*
* Write Byte
* Command plus 1 data byte
*/
dev_dbg(dev, "I2C_SMBUS_BYTE_DATA: WRITE\n");
desc->wr_len_cmd = 2;
dma_size = 2;
dma_direction = DMA_TO_DEVICE;
dma_buffer[0] = command;
dma_buffer[1] = data->byte;
} else {
/* Read Byte */
dev_dbg(dev, "I2C_SMBUS_BYTE_DATA: READ\n");
desc->control |= ISMT_DESC_CWRL;
desc->wr_len_cmd = command;
desc->rd_len = 1;
dma_size = 1;
dma_direction = DMA_FROM_DEVICE;
}
break;
case I2C_SMBUS_WORD_DATA:
if (read_write == I2C_SMBUS_WRITE) {
/* Write Word */
dev_dbg(dev, "I2C_SMBUS_WORD_DATA: WRITE\n");
desc->wr_len_cmd = 3;
dma_size = 3;
dma_direction = DMA_TO_DEVICE;
dma_buffer[0] = command;
dma_buffer[1] = data->word & 0xff;
dma_buffer[2] = data->word >> 8;
} else {
/* Read Word */
dev_dbg(dev, "I2C_SMBUS_WORD_DATA: READ\n");
desc->wr_len_cmd = command;
desc->control |= ISMT_DESC_CWRL;
desc->rd_len = 2;
dma_size = 2;
dma_direction = DMA_FROM_DEVICE;
}
break;
case I2C_SMBUS_PROC_CALL:
dev_dbg(dev, "I2C_SMBUS_PROC_CALL\n");
desc->wr_len_cmd = 3;
desc->rd_len = 2;
dma_size = 3;
dma_direction = DMA_BIDIRECTIONAL;
dma_buffer[0] = command;
dma_buffer[1] = data->word & 0xff;
dma_buffer[2] = data->word >> 8;
break;
case I2C_SMBUS_BLOCK_DATA:
if (read_write == I2C_SMBUS_WRITE) {
/* Block Write */
dev_dbg(dev, "I2C_SMBUS_BLOCK_DATA: WRITE\n");
dma_size = data->block[0] + 1;
dma_direction = DMA_TO_DEVICE;
desc->wr_len_cmd = dma_size;
desc->control |= ISMT_DESC_BLK;
dma_buffer[0] = command;
memcpy(&dma_buffer[1], &data->block[1], dma_size - 1);
} else {
/* Block Read */
dev_dbg(dev, "I2C_SMBUS_BLOCK_DATA: READ\n");
dma_size = I2C_SMBUS_BLOCK_MAX;
dma_direction = DMA_FROM_DEVICE;
desc->rd_len = dma_size;
desc->wr_len_cmd = command;
desc->control |= (ISMT_DESC_BLK | ISMT_DESC_CWRL);
}
break;
case I2C_SMBUS_BLOCK_PROC_CALL:
dev_dbg(dev, "I2C_SMBUS_BLOCK_PROC_CALL\n");
if (data->block[0] > I2C_SMBUS_BLOCK_MAX)
return -EINVAL;
dma_size = I2C_SMBUS_BLOCK_MAX;
desc->tgtaddr_rw = ISMT_DESC_ADDR_RW(addr, 1);
desc->wr_len_cmd = data->block[0] + 1;
desc->rd_len = dma_size;
desc->control |= ISMT_DESC_BLK;
dma_direction = DMA_BIDIRECTIONAL;
dma_buffer[0] = command;
memcpy(&dma_buffer[1], &data->block[1], data->block[0]);
break;
case I2C_SMBUS_I2C_BLOCK_DATA:
/* Make sure the length is valid */
if (data->block[0] < 1)
data->block[0] = 1;
if (data->block[0] > I2C_SMBUS_BLOCK_MAX)
data->block[0] = I2C_SMBUS_BLOCK_MAX;
if (read_write == I2C_SMBUS_WRITE) {
/* i2c Block Write */
dev_dbg(dev, "I2C_SMBUS_I2C_BLOCK_DATA: WRITE\n");
dma_size = data->block[0] + 1;
dma_direction = DMA_TO_DEVICE;
desc->wr_len_cmd = dma_size;
desc->control |= ISMT_DESC_I2C;
dma_buffer[0] = command;
memcpy(&dma_buffer[1], &data->block[1], dma_size - 1);
} else {
/* i2c Block Read */
dev_dbg(dev, "I2C_SMBUS_I2C_BLOCK_DATA: READ\n");
dma_size = data->block[0];
dma_direction = DMA_FROM_DEVICE;
desc->rd_len = dma_size;
desc->wr_len_cmd = command;
desc->control |= (ISMT_DESC_I2C | ISMT_DESC_CWRL);
/*
* Per the "Table 15-15. I2C Commands",
* in the External Design Specification (EDS),
* (Document Number: 508084, Revision: 2.0),
* the _rw bit must be 0
*/
desc->tgtaddr_rw = ISMT_DESC_ADDR_RW(addr, 0);
}
break;
default:
dev_err(dev, "Unsupported transaction %d\n",
size);
return -EOPNOTSUPP;
}
/* map the data buffer */
if (dma_size != 0) {
dev_dbg(dev, " dev=%p\n", dev);
dev_dbg(dev, " data=%p\n", data);
dev_dbg(dev, " dma_buffer=%p\n", dma_buffer);
dev_dbg(dev, " dma_size=%d\n", dma_size);
dev_dbg(dev, " dma_direction=%d\n", dma_direction);
dma_addr = dma_map_single(dev,
dma_buffer,
dma_size,
dma_direction);
if (dma_mapping_error(dev, dma_addr)) {
dev_err(dev, "Error in mapping dma buffer %p\n",
dma_buffer);
return -EIO;
}
dev_dbg(dev, " dma_addr = %pad\n", &dma_addr);
desc->dptr_low = lower_32_bits(dma_addr);
desc->dptr_high = upper_32_bits(dma_addr);
}
reinit_completion(&priv->cmp);
/* Add the descriptor */
ismt_submit_desc(priv);
/* Now we wait for interrupt completion, 1s */
time_left = wait_for_completion_timeout(&priv->cmp, HZ*1);
/* unmap the data buffer */
if (dma_size != 0)
dma_unmap_single(dev, dma_addr, dma_size, dma_direction);
if (unlikely(!time_left)) {
dev_err(dev, "completion wait timed out\n");
ret = -ETIMEDOUT;
goto out;
}
/* do any post processing of the descriptor here */
ret = ismt_process_desc(desc, data, priv, size, read_write);
out:
/* Update the ring pointer */
priv->head++;
priv->head %= ISMT_DESC_ENTRIES;
return ret;
}
| 0
|
508,784
|
static int rr_index(READ_RECORD *info)
{
int tmp= info->table->file->ha_index_next(info->record);
if (tmp)
tmp= rr_handle_error(info, tmp);
return tmp;
}
| 0
|
210,273
|
createRandomCursorExecutor(const CollectionPtr& coll,
const boost::intrusive_ptr<ExpressionContext>& expCtx,
long long sampleSize,
long long numRecords,
boost::optional<BucketUnpacker> bucketUnpacker) {
OperationContext* opCtx = expCtx->opCtx;
// Verify that we are already under a collection lock. We avoid taking locks ourselves in this
// function because double-locking forces any PlanExecutor we create to adopt a NO_YIELD policy.
invariant(opCtx->lockState()->isCollectionLockedForMode(coll->ns(), MODE_IS));
static const double kMaxSampleRatioForRandCursor = 0.05;
if (!expCtx->ns.isTimeseriesBucketsCollection()) {
if (sampleSize > numRecords * kMaxSampleRatioForRandCursor || numRecords <= 100) {
return std::pair{nullptr, false};
}
} else {
// Suppose that a time-series bucket collection is observed to contain 200 buckets, and the
// 'gTimeseriesBucketMaxCount' parameter is set to 1000. If all buckets are full, then the
// maximum possible measurment count would be 200 * 1000 = 200,000. While the
// 'SampleFromTimeseriesBucket' plan is more efficient when the sample size is small
// relative to the total number of measurements in the time-series collection, for larger
// sample sizes the top-k sort based sample is faster. Experiments have approximated that
// the tipping point is roughly when the requested sample size is greater than 1% of the
// maximum possible number of measurements in the collection (i.e. numBuckets *
// maxMeasurementsPerBucket).
static const double kCoefficient = 0.01;
if (sampleSize > kCoefficient * numRecords * gTimeseriesBucketMaxCount) {
return std::pair{nullptr, false};
}
}
// Attempt to get a random cursor from the RecordStore.
auto rsRandCursor = coll->getRecordStore()->getRandomCursor(opCtx);
if (!rsRandCursor) {
// The storage engine has no random cursor support.
return std::pair{nullptr, false};
}
// Build a MultiIteratorStage and pass it the random-sampling RecordCursor.
auto ws = std::make_unique<WorkingSet>();
std::unique_ptr<PlanStage> root =
std::make_unique<MultiIteratorStage>(expCtx.get(), ws.get(), coll);
static_cast<MultiIteratorStage*>(root.get())->addIterator(std::move(rsRandCursor));
// If the incoming operation is sharded, use the CSS to infer the filtering metadata for the
// collection, otherwise treat it as unsharded
auto collectionFilter =
CollectionShardingState::get(opCtx, coll->ns())
->getOwnershipFilter(
opCtx, CollectionShardingState::OrphanCleanupPolicy::kDisallowOrphanCleanup);
TrialStage* trialStage = nullptr;
// Because 'numRecords' includes orphan documents, our initial decision to optimize the $sample
// cursor may have been mistaken. For sharded collections, build a TRIAL plan that will switch
// to a collection scan if the ratio of orphaned to owned documents encountered over the first
// 100 works() is such that we would have chosen not to optimize.
static const size_t kMaxPresampleSize = 100;
if (collectionFilter.isSharded() && !expCtx->ns.isTimeseriesBucketsCollection()) {
// The ratio of owned to orphaned documents must be at least equal to the ratio between the
// requested sampleSize and the maximum permitted sampleSize for the original constraints to
// be satisfied. For instance, if there are 200 documents and the sampleSize is 5, then at
// least (5 / (200*0.05)) = (5/10) = 50% of those documents must be owned. If less than 5%
// of the documents in the collection are owned, we default to the backup plan.
const auto minAdvancedToWorkRatio = std::max(
sampleSize / (numRecords * kMaxSampleRatioForRandCursor), kMaxSampleRatioForRandCursor);
// The trial plan is SHARDING_FILTER-MULTI_ITERATOR.
auto randomCursorPlan = std::make_unique<ShardFilterStage>(
expCtx.get(), collectionFilter, ws.get(), std::move(root));
// The backup plan is SHARDING_FILTER-COLLSCAN.
std::unique_ptr<PlanStage> collScanPlan = std::make_unique<CollectionScan>(
expCtx.get(), coll, CollectionScanParams{}, ws.get(), nullptr);
collScanPlan = std::make_unique<ShardFilterStage>(
expCtx.get(), collectionFilter, ws.get(), std::move(collScanPlan));
// Place a TRIAL stage at the root of the plan tree, and pass it the trial and backup plans.
root = std::make_unique<TrialStage>(expCtx.get(),
ws.get(),
std::move(randomCursorPlan),
std::move(collScanPlan),
kMaxPresampleSize,
minAdvancedToWorkRatio);
trialStage = static_cast<TrialStage*>(root.get());
} else if (expCtx->ns.isTimeseriesBucketsCollection()) {
// We can't take ARHASH optimization path for a direct $sample on the system.buckets
// collection because data is in compressed form. If we did have a direct $sample on the
// system.buckets collection, then the 'bucketUnpacker' would not be set up properly. We
// also should bail out early if a $sample is made against a time series collection that is
// empty. If we don't the 'minAdvancedToWorkRatio' can be nan/-nan depending on the
// architecture.
if (!(bucketUnpacker && numRecords)) {
return std::pair{nullptr, false};
}
// Use a 'TrialStage' to run a trial between 'SampleFromTimeseriesBucket' and
// 'UnpackTimeseriesBucket' with $sample left in the pipeline in-place. If the buckets are
// not sufficiently full, or the 'SampleFromTimeseriesBucket' plan draws too many
// duplicates, then we will fall back to the 'TrialStage' backup plan. This backup plan uses
// the top-k sort sampling approach.
//
// Suppose the 'gTimeseriesBucketMaxCount' is 1000, but each bucket only contains 500
// documents on average. The observed trial advanced/work ratio approximates the average
// bucket fullness, noted here as "abf". In this example, abf = 500 / 1000 = 0.5.
// Experiments have shown that the optimized 'SampleFromTimeseriesBucket' algorithm performs
// better than backup plan when
//
// sampleSize < 0.02 * abf * numRecords * gTimeseriesBucketMaxCount
//
// This inequality can be rewritten as
//
// abf > sampleSize / (0.02 * numRecords * gTimeseriesBucketMaxCount)
//
// Therefore, if the advanced/work ratio exceeds this threshold, we will use the
// 'SampleFromTimeseriesBucket' plan. Note that as the sample size requested by the user
// becomes larger with respect to the number of buckets, we require a higher advanced/work
// ratio in order to justify using 'SampleFromTimeseriesBucket'.
//
// Additionally, we require the 'TrialStage' to approximate the abf as at least 0.25. When
// buckets are mostly empty, the 'SampleFromTimeseriesBucket' will be inefficient due to a
// lot of sampling "misses".
static const auto kCoefficient = 0.02;
static const auto kMinBucketFullness = 0.25;
const auto minAdvancedToWorkRatio = std::max(
std::min(sampleSize / (kCoefficient * numRecords * gTimeseriesBucketMaxCount), 1.0),
kMinBucketFullness);
auto arhashPlan = std::make_unique<SampleFromTimeseriesBucket>(
expCtx.get(),
ws.get(),
std::move(root),
*bucketUnpacker,
// By using a quantity slightly higher than 'kMaxPresampleSize', we ensure that the
// 'SampleFromTimeseriesBucket' stage won't fail due to too many consecutive sampling
// attempts during the 'TrialStage's trial period.
kMaxPresampleSize + 5,
sampleSize,
gTimeseriesBucketMaxCount);
std::unique_ptr<PlanStage> collScanPlan = std::make_unique<CollectionScan>(
expCtx.get(), coll, CollectionScanParams{}, ws.get(), nullptr);
auto topkSortPlan = std::make_unique<UnpackTimeseriesBucket>(
expCtx.get(), ws.get(), std::move(collScanPlan), *bucketUnpacker);
root = std::make_unique<TrialStage>(expCtx.get(),
ws.get(),
std::move(arhashPlan),
std::move(topkSortPlan),
kMaxPresampleSize,
minAdvancedToWorkRatio);
trialStage = static_cast<TrialStage*>(root.get());
}
auto execStatus = plan_executor_factory::make(expCtx,
std::move(ws),
std::move(root),
&coll,
opCtx->inMultiDocumentTransaction()
? PlanYieldPolicy::YieldPolicy::INTERRUPT_ONLY
: PlanYieldPolicy::YieldPolicy::YIELD_AUTO,
QueryPlannerParams::RETURN_OWNED_DATA);
if (!execStatus.isOK()) {
return execStatus.getStatus();
}
// For sharded collections, the root of the plan tree is a TrialStage that may have chosen
// either a random-sampling cursor trial plan or a COLLSCAN backup plan. We can only optimize
// the $sample aggregation stage if the trial plan was chosen.
return std::pair{std::move(execStatus.getValue()),
!trialStage || !trialStage->pickedBackupPlan()};
}
| 1
|
446,401
|
RZ_API void rz_dyldcache_symbols_from_locsym(RzDyldCache *cache, RzDyldBinImage *bin, RzList *symbols, SetU *hash) {
RzDyldLocSym *locsym = cache->locsym;
if (!locsym) {
return;
}
if (bin->nlist_start_index >= locsym->nlists_count ||
bin->nlist_start_index + bin->nlist_count > locsym->nlists_count) {
RZ_LOG_ERROR("dyldcache: malformed local symbol entry\n");
return;
}
ut64 nlists_size = sizeof(struct MACH0_(nlist)) * bin->nlist_count;
struct MACH0_(nlist) *nlists = RZ_NEWS0(struct MACH0_(nlist), bin->nlist_count);
if (!nlists) {
return;
}
ut64 nlists_offset = locsym->local_symbols_offset + locsym->nlists_offset +
bin->nlist_start_index * sizeof(struct MACH0_(nlist));
if (rz_buf_fread_at(cache->buf, nlists_offset, (ut8 *)nlists, "iccsl", bin->nlist_count) != nlists_size) {
free(nlists);
return;
}
ut32 j;
for (j = 0; j != bin->nlist_count; j++) {
struct MACH0_(nlist) *nlist = &nlists[j];
if (set_u_contains(hash, (ut64)nlist->n_value)) {
continue;
}
set_u_add(hash, (ut64)nlist->n_value);
if (nlist->n_strx >= locsym->strings_size) {
continue;
}
RzBinSymbol *sym = RZ_NEW0(RzBinSymbol);
if (!sym) {
break;
}
sym->type = "LOCAL";
sym->vaddr = nlist->n_value;
ut64 slide = rz_dyldcache_get_slide(cache);
sym->paddr = va2pa(nlist->n_value, cache->n_maps, cache->maps, cache->buf, slide, NULL, NULL);
char *symstr = rz_buf_get_string(cache->buf, locsym->local_symbols_offset + locsym->strings_offset + nlist->n_strx);
if (symstr) {
sym->name = symstr;
} else {
static ut32 k = 0;
sym->name = rz_str_newf("unk_local%d", k++);
}
rz_list_append(symbols, sym);
}
free(nlists);
}
| 0
|
274,872
|
TEST(ComparisonsTest, GreaterQuantizedSmallRange) {
ComparisonOpModel model({TensorType_UINT8, {1, 2, 2, 1}, 0.0, 1.0},
{TensorType_UINT8, {1, 2, 2, 1}, 0.0, 2.0},
TensorType_UINT8, BuiltinOperator_GREATER);
model.QuantizeAndPopulate<uint8_t>(model.input1(), {1.0, 0.5, 0.35, 0.1});
model.QuantizeAndPopulate<uint8_t>(model.input2(), {1.01, 0.25, 0.3, 0.4});
model.Invoke();
EXPECT_THAT(model.GetOutput(), ElementsAre(false, true, true, false));
}
| 0
|
436,125
|
*/
static int io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_timeout_rem *tr = &req->timeout_rem;
struct io_ring_ctx *ctx = req->ctx;
int ret;
spin_lock_irq(&ctx->completion_lock);
if (!(req->timeout_rem.flags & IORING_TIMEOUT_UPDATE))
ret = io_timeout_cancel(ctx, tr->addr);
else
ret = io_timeout_update(ctx, tr->addr, &tr->ts,
io_translate_timeout_mode(tr->flags));
io_cqring_fill_event(ctx, req->user_data, ret, 0);
io_commit_cqring(ctx);
spin_unlock_irq(&ctx->completion_lock);
io_cqring_ev_posted(ctx);
if (ret < 0)
req_set_fail(req);
io_put_req(req);
return 0;
| 0
|
359,513
|
DEFUN (no_bgp_log_neighbor_changes,
no_bgp_log_neighbor_changes_cmd,
"no bgp log-neighbor-changes",
NO_STR
"BGP specific commands\n"
"Log neighbor up/down and reset reason\n")
{
struct bgp *bgp;
bgp = vty->index;
bgp_flag_unset (bgp, BGP_FLAG_LOG_NEIGHBOR_CHANGES);
return CMD_SUCCESS;
}
| 0
|
512,408
|
virtual void make_send_field(THD *thd, Send_field *field)
{ orig_item->make_send_field(thd, field); }
| 0
|
450,386
|
static void vnc_zrle_start(VncState *vs)
{
buffer_reset(&vs->zrle->zrle);
/* make the output buffer be the zlib buffer, so we can compress it later */
vs->zrle->tmp = vs->output;
vs->output = vs->zrle->zrle;
}
| 0
|
229,309
|
std::unique_ptr<cql_server::response> cql_server::connection::make_unavailable_error(int16_t stream, exceptions::exception_code err, sstring msg, db::consistency_level cl, int32_t required, int32_t alive, const tracing::trace_state_ptr& tr_state) const
{
auto response = std::make_unique<cql_server::response>(stream, cql_binary_opcode::ERROR, tr_state);
response->write_int(static_cast<int32_t>(err));
response->write_string(msg);
response->write_consistency(cl);
response->write_int(required);
response->write_int(alive);
return response;
}
| 0
|
462,574
|
void controller::mark_all_read(unsigned int pos) {
if (pos < feeds.size()) {
scope_measure m("controller::mark_all_read");
std::lock_guard<std::mutex> feedslock(feeds_mutex);
std::shared_ptr<rss_feed> feed = feeds[pos];
if (feed->rssurl().substr(0,6) == "query:") {
rsscache->mark_all_read(feed);
} else {
rsscache->mark_all_read(feed->rssurl());
if (api) {
api->mark_all_read(feed->rssurl());
}
}
m.stopover("after rsscache->mark_all_read, before iteration over items");
std::lock_guard<std::mutex> lock(feed->item_mutex);
std::vector<std::shared_ptr<rss_item>>& items = feed->items();
if (items.size() > 0) {
bool notify = items[0]->feedurl() != feed->rssurl();
LOG(level::DEBUG, "controller::mark_all_read: notify = %s", notify ? "yes" : "no");
for (auto item : items) {
item->set_unread_nowrite_notify(false, notify);
}
}
}
}
| 0
|
384,770
|
f_screenpos(typval_T *argvars UNUSED, typval_T *rettv)
{
dict_T *dict;
win_T *wp;
pos_T pos;
int row = 0;
int scol = 0, ccol = 0, ecol = 0;
if (rettv_dict_alloc(rettv) != OK)
return;
dict = rettv->vval.v_dict;
if (in_vim9script()
&& (check_for_number_arg(argvars, 0) == FAIL
|| check_for_number_arg(argvars, 1) == FAIL
|| check_for_number_arg(argvars, 2) == FAIL))
return;
wp = find_win_by_nr_or_id(&argvars[0]);
if (wp == NULL)
return;
pos.lnum = tv_get_number(&argvars[1]);
pos.col = tv_get_number(&argvars[2]) - 1;
pos.coladd = 0;
textpos2screenpos(wp, &pos, &row, &scol, &ccol, &ecol);
dict_add_number(dict, "row", row);
dict_add_number(dict, "col", scol);
dict_add_number(dict, "curscol", ccol);
dict_add_number(dict, "endcol", ecol);
}
| 0
|
353,162
|
void SplashOutputDev::endType3Char(GfxState *state) {
T3GlyphStack *t3gs;
if (t3GlyphStack->cacheTag) {
--nestCount;
memcpy(t3GlyphStack->cacheData, bitmap->getDataPtr(),
t3GlyphStack->cache->glyphSize);
delete bitmap;
delete splash;
bitmap = t3GlyphStack->origBitmap;
splash = t3GlyphStack->origSplash;
const double *ctm = state->getCTM();
state->setCTM(ctm[0], ctm[1], ctm[2], ctm[3],
t3GlyphStack->origCTM4, t3GlyphStack->origCTM5);
updateCTM(state, 0, 0, 0, 0, 0, 0);
drawType3Glyph(state, t3GlyphStack->cache,
t3GlyphStack->cacheTag, t3GlyphStack->cacheData);
}
t3gs = t3GlyphStack;
t3GlyphStack = t3gs->next;
delete t3gs;
}
| 0
|
427,186
|
static int searchvar (FuncState *fs, TString *n, expdesc *var) {
int i;
for (i = cast_int(fs->nactvar) - 1; i >= 0; i--) {
Vardesc *vd = getlocalvardesc(fs, i);
if (eqstr(n, vd->vd.name)) { /* found? */
if (vd->vd.kind == RDKCTC) /* compile-time constant? */
init_exp(var, VCONST, fs->firstlocal + i);
else /* real variable */
init_var(fs, var, i);
return var->k;
}
}
return -1; /* not found */
}
| 0
|
218,799
|
static void XDrawBeveledMatte(Display *display,const XWindowInfo *window_info,
const XWidgetInfo *matte_info)
{
/*
Draw matte.
*/
XDrawBevel(display,window_info,matte_info);
XDrawMatte(display,window_info,matte_info);
}
| 0
|
447,073
|
long FileIo::read(byte* buf, long rcount)
{
assert(p_->fp_ != 0);
if (p_->switchMode(Impl::opRead) != 0) return 0;
return (long)std::fread(buf, 1, rcount, p_->fp_);
}
| 0
|
254,720
|
njs_typed_array_prototype_byte_offset(njs_vm_t *vm, njs_value_t *args,
njs_uint_t nargs, njs_index_t unused)
{
size_t byte_offset;
njs_value_t *this;
njs_typed_array_t *array;
this = njs_argument(args, 0);
if (!njs_is_typed_array(this) && !njs_is_data_view(this)) {
njs_type_error(vm, "Method TypedArray.prototype.byteOffset called "
"on incompatible receiver");
return NJS_ERROR;
}
array = njs_typed_array(this);
byte_offset = njs_typed_array_offset(array);
if (njs_slow_path(njs_is_detached_buffer(array->buffer))) {
if (njs_is_data_view(this)) {
njs_type_error(vm, "detached buffer");
return NJS_ERROR;
}
byte_offset = 0;
}
njs_set_number(&vm->retval, byte_offset);
return NJS_OK;
}
| 0
|
346,432
|
sourcing_a_script(exarg_T *eap)
{
return (getline_equal(eap->getline, eap->cookie, getsourceline));
}
| 0
|
384,294
|
gs_heap_alloc_byte_array(gs_memory_t * mem, uint num_elements, uint elt_size,
client_name_t cname)
{
ulong lsize = (ulong) num_elements * elt_size;
if (lsize != (uint) lsize)
return 0;
return gs_heap_alloc_bytes(mem, (uint) lsize, cname);
}
| 0
|
369,283
|
static int __init io_uring_init(void)
{
#define __BUILD_BUG_VERIFY_ELEMENT(stype, eoffset, etype, ename) do { \
BUILD_BUG_ON(offsetof(stype, ename) != eoffset); \
BUILD_BUG_ON(sizeof(etype) != sizeof_field(stype, ename)); \
} while (0)
#define BUILD_BUG_SQE_ELEM(eoffset, etype, ename) \
__BUILD_BUG_VERIFY_ELEMENT(struct io_uring_sqe, eoffset, etype, ename)
BUILD_BUG_ON(sizeof(struct io_uring_sqe) != 64);
BUILD_BUG_SQE_ELEM(0, __u8, opcode);
BUILD_BUG_SQE_ELEM(1, __u8, flags);
BUILD_BUG_SQE_ELEM(2, __u16, ioprio);
BUILD_BUG_SQE_ELEM(4, __s32, fd);
BUILD_BUG_SQE_ELEM(8, __u64, off);
BUILD_BUG_SQE_ELEM(8, __u64, addr2);
BUILD_BUG_SQE_ELEM(16, __u64, addr);
BUILD_BUG_SQE_ELEM(16, __u64, splice_off_in);
BUILD_BUG_SQE_ELEM(24, __u32, len);
BUILD_BUG_SQE_ELEM(28, __kernel_rwf_t, rw_flags);
BUILD_BUG_SQE_ELEM(28, /* compat */ int, rw_flags);
BUILD_BUG_SQE_ELEM(28, /* compat */ __u32, rw_flags);
BUILD_BUG_SQE_ELEM(28, __u32, fsync_flags);
BUILD_BUG_SQE_ELEM(28, /* compat */ __u16, poll_events);
BUILD_BUG_SQE_ELEM(28, __u32, poll32_events);
BUILD_BUG_SQE_ELEM(28, __u32, sync_range_flags);
BUILD_BUG_SQE_ELEM(28, __u32, msg_flags);
BUILD_BUG_SQE_ELEM(28, __u32, timeout_flags);
BUILD_BUG_SQE_ELEM(28, __u32, accept_flags);
BUILD_BUG_SQE_ELEM(28, __u32, cancel_flags);
BUILD_BUG_SQE_ELEM(28, __u32, open_flags);
BUILD_BUG_SQE_ELEM(28, __u32, statx_flags);
BUILD_BUG_SQE_ELEM(28, __u32, fadvise_advice);
BUILD_BUG_SQE_ELEM(28, __u32, splice_flags);
BUILD_BUG_SQE_ELEM(32, __u64, user_data);
BUILD_BUG_SQE_ELEM(40, __u16, buf_index);
BUILD_BUG_SQE_ELEM(40, __u16, buf_group);
BUILD_BUG_SQE_ELEM(42, __u16, personality);
BUILD_BUG_SQE_ELEM(44, __s32, splice_fd_in);
BUILD_BUG_SQE_ELEM(44, __u32, file_index);
BUILD_BUG_ON(sizeof(struct io_uring_files_update) !=
sizeof(struct io_uring_rsrc_update));
BUILD_BUG_ON(sizeof(struct io_uring_rsrc_update) >
sizeof(struct io_uring_rsrc_update2));
/* ->buf_index is u16 */
BUILD_BUG_ON(IORING_MAX_REG_BUFFERS >= (1u << 16));
/* should fit into one byte */
BUILD_BUG_ON(SQE_VALID_FLAGS >= (1 << 8));
BUILD_BUG_ON(SQE_COMMON_FLAGS >= (1 << 8));
BUILD_BUG_ON((SQE_VALID_FLAGS | SQE_COMMON_FLAGS) != SQE_VALID_FLAGS);
BUILD_BUG_ON(ARRAY_SIZE(io_op_defs) != IORING_OP_LAST);
BUILD_BUG_ON(__REQ_F_LAST_BIT > 8 * sizeof(int));
req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC |
SLAB_ACCOUNT);
return 0;
| 0
|
270,395
|
void ok_inflater_free(ok_inflater *inflater) {
if (inflater) {
ok_png_allocator allocator = inflater->allocator;
void *allocator_user_data = inflater->allocator_user_data;
allocator.free(allocator_user_data, inflater->buffer);
allocator.free(allocator_user_data, inflater->code_length_huffman);
allocator.free(allocator_user_data, inflater->literal_huffman);
allocator.free(allocator_user_data, inflater->distance_huffman);
allocator.free(allocator_user_data, inflater->fixed_literal_huffman);
allocator.free(allocator_user_data, inflater->fixed_distance_huffman);
allocator.free(allocator_user_data, inflater);
}
}
| 0
|
281,621
|
void CLASS stretch()
{
ushort newdim, (*img)[4], *pix0, *pix1;
int row, col, c;
double rc, frac;
if (pixel_aspect == 1) return;
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_STRETCH,0,2);
#endif
#ifdef DCRAW_VERBOSE
if (verbose) fprintf (stderr,_("Stretching the image...\n"));
#endif
if (pixel_aspect < 1) {
newdim = height / pixel_aspect + 0.5;
img = (ushort (*)[4]) calloc (width, newdim*sizeof *img);
merror (img, "stretch()");
for (rc=row=0; row < newdim; row++, rc+=pixel_aspect) {
frac = rc - (c = rc);
pix0 = pix1 = image[c*width];
if (c+1 < height) pix1 += width*4;
for (col=0; col < width; col++, pix0+=4, pix1+=4)
FORCC img[row*width+col][c] = pix0[c]*(1-frac) + pix1[c]*frac + 0.5;
}
height = newdim;
} else {
newdim = width * pixel_aspect + 0.5;
img = (ushort (*)[4]) calloc (height, newdim*sizeof *img);
merror (img, "stretch()");
for (rc=col=0; col < newdim; col++, rc+=1/pixel_aspect) {
frac = rc - (c = rc);
pix0 = pix1 = image[c];
if (c+1 < width) pix1 += 4;
for (row=0; row < height; row++, pix0+=width*4, pix1+=width*4)
FORCC img[row*newdim+col][c] = pix0[c]*(1-frac) + pix1[c]*frac + 0.5;
}
width = newdim;
}
free (image);
image = img;
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_STRETCH,1,2);
#endif
}
| 0
|
337,796
|
static void *sctp_addto_param(struct sctp_chunk *chunk, int len,
const void *data)
{
int chunklen = ntohs(chunk->chunk_hdr->length);
void *target;
target = skb_put(chunk->skb, len);
if (data)
memcpy(target, data, len);
else
memset(target, 0, len);
/* Adjust the chunk length field. */
chunk->chunk_hdr->length = htons(chunklen + len);
chunk->chunk_end = skb_tail_pointer(chunk->skb);
return target;
}
| 0
|
455,323
|
_cygwin32_check_tmp ()
{
struct stat sb;
if (stat ("/tmp", &sb) < 0)
internal_warning (_("could not find /tmp, please create!"));
else
{
if (S_ISDIR (sb.st_mode) == 0)
internal_warning (_("/tmp must be a valid directory name"));
}
}
| 0
|
246,709
|
u32 parse_split(char *arg_val, u32 opt)
{
switch (opt) {
case 0://-split
split_duration = atof(arg_val);
if (split_duration < 0) split_duration = 0;
split_size = 0;
break;
case 1: //-split-rap, -splitr
split_duration = -1;
split_size = -1;
break;
case 2: //-split-size, -splits
split_size = (u32)atoi(arg_val);
split_duration = 0;
break;
case 3: //-split-chunk, -splitx
case 4: //-splitz
case 5: //-splitg
case 6: //-splitf
adjust_split_end = opt-3;
if (!strstr(arg_val, ":") && !strstr(arg_val, "-")) {
M4_LOG(GF_LOG_ERROR, ("Chunk extraction usage: \"-split* start:end\" expressed in seconds\n"));
return 2;
}
if (strstr(arg_val, "end")) {
if (strstr(arg_val, "end-")) {
Double dur_end=0;
sscanf(arg_val, "%lf:end-%lf", &split_start, &dur_end);
split_duration = -2 - dur_end;
} else {
sscanf(arg_val, "%lf:end", &split_start);
split_duration = -2;
}
} else {
split_range_str = arg_val;
}
split_size = 0;
break;
}
return 0;
}
| 0
|
222,573
|
string Name(int node_index, int output_index) const {
if (output_index == 0) {
return Name(node_index);
} else {
return strings::StrCat(Name(node_index), ":", output_index);
}
}
| 0
|
289,328
|
static int _snd_pcm_hw_param_max(struct snd_pcm_hw_params *params,
snd_pcm_hw_param_t var, unsigned int val,
int dir)
{
int changed;
int open = 0;
if (dir) {
if (dir < 0) {
open = 1;
} else if (dir > 0) {
open = 1;
val++;
}
}
if (hw_is_mask(var)) {
if (val == 0 && open) {
snd_mask_none(hw_param_mask(params, var));
changed = -EINVAL;
} else
changed = snd_mask_refine_max(hw_param_mask(params, var),
val - !!open);
} else if (hw_is_interval(var))
changed = snd_interval_refine_max(hw_param_interval(params, var),
val, open);
else
return -EINVAL;
if (changed > 0) {
params->cmask |= 1 << var;
params->rmask |= 1 << var;
}
return changed;
}
| 0
|
450,426
|
static int send_sub_rect(VncState *vs, int x, int y, int w, int h)
{
uint32_t bg = 0, fg = 0;
int colors;
int ret = 0;
#ifdef CONFIG_VNC_JPEG
bool force_jpeg = false;
bool allow_jpeg = true;
#endif
if (!color_count_palette) {
color_count_palette = g_malloc(sizeof(VncPalette));
vnc_tight_cleanup_notifier.notify = vnc_tight_cleanup;
qemu_thread_atexit_add(&vnc_tight_cleanup_notifier);
}
vnc_framebuffer_update(vs, x, y, w, h, vs->tight->type);
vnc_tight_start(vs);
vnc_raw_send_framebuffer_update(vs, x, y, w, h);
vnc_tight_stop(vs);
#ifdef CONFIG_VNC_JPEG
if (!vs->vd->non_adaptive && vs->tight->quality != (uint8_t)-1) {
double freq = vnc_update_freq(vs, x, y, w, h);
if (freq < tight_jpeg_conf[vs->tight->quality].jpeg_freq_min) {
allow_jpeg = false;
}
if (freq >= tight_jpeg_conf[vs->tight->quality].jpeg_freq_threshold) {
force_jpeg = true;
vnc_sent_lossy_rect(vs, x, y, w, h);
}
}
#endif
colors = tight_fill_palette(vs, x, y, w * h, &bg, &fg, color_count_palette);
#ifdef CONFIG_VNC_JPEG
if (allow_jpeg && vs->tight->quality != (uint8_t)-1) {
ret = send_sub_rect_jpeg(vs, x, y, w, h, bg, fg, colors,
color_count_palette, force_jpeg);
} else {
ret = send_sub_rect_nojpeg(vs, x, y, w, h, bg, fg, colors,
color_count_palette);
}
#else
ret = send_sub_rect_nojpeg(vs, x, y, w, h, bg, fg, colors,
color_count_palette);
#endif
return ret;
}
| 0
|
231,738
|
virtual void initializeServerHandshake() {
fakeHandshake = new FakeServerHandshake(
server->getNonConstConn(), getFizzServerContext());
}
| 0
|
439,164
|
static Image *ReadAAIImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
Image
*image;
MagickBooleanType
status;
register ssize_t
x;
register PixelPacket
*q;
register unsigned char
*p;
size_t
height,
length,
width;
ssize_t
count,
y;
unsigned char
*pixels;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImage(image_info);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Read AAI Dune image.
*/
width=ReadBlobLSBLong(image);
height=ReadBlobLSBLong(image);
if (EOFBlob(image) != MagickFalse)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if ((width == 0UL) || (height == 0UL))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
do
{
/*
Convert AAI raster image to pixel packets.
*/
image->columns=width;
image->rows=height;
image->depth=8;
if ((image_info->ping != MagickFalse) && (image_info->number_scenes != 0))
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
status=SetImageExtent(image,image->columns,image->rows);
if (status == MagickFalse)
{
InheritException(exception,&image->exception);
return(DestroyImageList(image));
}
pixels=(unsigned char *) AcquireQuantumMemory(image->columns,
4*sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
length=(size_t) 4*image->columns;
for (y=0; y < (ssize_t) image->rows; y++)
{
count=ReadBlob(image,length,pixels);
if ((size_t) count != length)
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowReaderException(CorruptImageError,"UnableToReadImageData");
}
p=pixels;
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelBlue(q,ScaleCharToQuantum(*p++));
SetPixelGreen(q,ScaleCharToQuantum(*p++));
SetPixelRed(q,ScaleCharToQuantum(*p++));
if (*p == 254)
*p=255;
SetPixelAlpha(q,ScaleCharToQuantum(*p++));
if (q->opacity != OpaqueOpacity)
image->matte=MagickTrue;
q++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
break;
}
/*
Proceed to next image.
*/
if (image_info->number_scenes != 0)
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
width=ReadBlobLSBLong(image);
height=ReadBlobLSBLong(image);
if ((width != 0UL) && (height != 0UL))
{
/*
Allocate next image structure.
*/
AcquireNextImage(image_info,image);
if (GetNextImageInList(image) == (Image *) NULL)
{
status=MagickFalse;
return((Image *) NULL);
}
image=SyncNextImageInList(image);
status=SetImageProgress(image,LoadImagesTag,TellBlob(image),
GetBlobSize(image));
if (status == MagickFalse)
break;
}
} while ((width != 0UL) && (height != 0UL));
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
| 0
|
234,250
|
print_dwarf_view (dwarf_vma value, unsigned num_bytes, int force)
{
int len;
if (!num_bytes)
len = 4;
else
len = num_bytes * 2;
assert (value == (unsigned long) value);
if (value || force)
printf ("v%0*lx ", len - 1, (unsigned long) value);
else
printf ("%*s", len + 1, "");
}
| 0
|
245,178
|
parse_show_engine_innodb_status(MYSQL *connection)
{
MYSQL_RES *mysql_result;
MYSQL_ROW row;
mysql_result = xb_mysql_query(connection, "SHOW ENGINE INNODB STATUS",
true);
ut_ad(mysql_num_fields(mysql_result) == 3);
if ((row = mysql_fetch_row(mysql_result))) {
std::stringstream data(row[2]);
std::string line;
while (std::getline(data, line)) {
lsn_t lsn;
if (sscanf(line.c_str(), "Log flushed up to " LSN_PF,
&lsn) == 1) {
backup_redo_log_flushed_lsn = lsn;
}
}
}
mysql_free_result(mysql_result);
}
| 0
|
276,442
|
void Compute(OpKernelContext* context) override {
// Looks up the resource.
core::RefCountPtr<BoostedTreesEnsembleResource> tree_ensemble_resource;
OP_REQUIRES_OK(context, LookupResource(context, HandleFromInput(context, 0),
&tree_ensemble_resource));
tf_shared_lock l(*tree_ensemble_resource->get_mutex());
// Sets the outputs.
const int num_trees = tree_ensemble_resource->num_trees();
const int num_finalized_trees =
(num_trees <= 0 ||
tree_ensemble_resource->IsTreeFinalized(num_trees - 1))
? num_trees
: num_trees - 1;
const int num_attempted_layers =
tree_ensemble_resource->GetNumLayersAttempted();
// growing_metadata
Tensor* output_stamp_token_t = nullptr;
Tensor* output_num_trees_t = nullptr;
Tensor* output_num_finalized_trees_t = nullptr;
Tensor* output_num_attempted_layers_t = nullptr;
Tensor* output_last_layer_nodes_range_t = nullptr;
OP_REQUIRES_OK(context, context->allocate_output(0, TensorShape(),
&output_stamp_token_t));
OP_REQUIRES_OK(context, context->allocate_output(1, TensorShape(),
&output_num_trees_t));
OP_REQUIRES_OK(context,
context->allocate_output(2, TensorShape(),
&output_num_finalized_trees_t));
OP_REQUIRES_OK(context,
context->allocate_output(3, TensorShape(),
&output_num_attempted_layers_t));
OP_REQUIRES_OK(context, context->allocate_output(
4, {2}, &output_last_layer_nodes_range_t));
output_stamp_token_t->scalar<int64>()() = tree_ensemble_resource->stamp();
output_num_trees_t->scalar<int32>()() = num_trees;
output_num_finalized_trees_t->scalar<int32>()() = num_finalized_trees;
output_num_attempted_layers_t->scalar<int32>()() = num_attempted_layers;
int32_t range_start;
int32_t range_end;
tree_ensemble_resource->GetLastLayerNodesRange(&range_start, &range_end);
output_last_layer_nodes_range_t->vec<int32>()(0) = range_start;
// For a completely empty ensemble, this will be 0. To make it a valid range
// we add this max cond.
output_last_layer_nodes_range_t->vec<int32>()(1) = std::max(1, range_end);
}
| 0
|
294,612
|
c_gregorian_last_day_of_month(int y, int m)
{
assert(m >= 1 && m <= 12);
return monthtab[c_gregorian_leap_p(y) ? 1 : 0][m];
}
| 0
|
477,946
|
int qtm_decompress(struct qtm_stream *qtm, off_t out_bytes) {
unsigned int frame_start, frame_end, window_posn, match_offset, range;
unsigned char *window, *i_ptr, *i_end, *runsrc, *rundest;
int i, j, selector, extra, sym, match_length, ret;
unsigned short H, L, C, symf;
register unsigned int bit_buffer;
register unsigned char bits_left;
unsigned char bits_needed, bit_run;
/* easy answers */
if (!qtm || (out_bytes < 0)) return CL_ENULLARG;
if (qtm->error) return qtm->error;
/* flush out any stored-up bytes before we begin */
i = qtm->o_end - qtm->o_ptr;
if ((off_t) i > out_bytes) i = (int) out_bytes;
if (i) {
if (qtm->wflag && (ret = mspack_write(qtm->ofd, qtm->o_ptr, i, qtm->file)) != CL_SUCCESS) {
return qtm->error = ret;
}
qtm->o_ptr += i;
out_bytes -= i;
}
if (out_bytes == 0) return CL_SUCCESS;
/* restore local state */
QTM_RESTORE_BITS;
window = qtm->window;
window_posn = qtm->window_posn;
frame_start = qtm->frame_start;
H = qtm->H;
L = qtm->L;
C = qtm->C;
/* while we do not have enough decoded bytes in reserve: */
while ((qtm->o_end - qtm->o_ptr) < out_bytes) {
/* read header if necessary. Initialises H, L and C */
if (!qtm->header_read) {
H = 0xFFFF; L = 0; QTM_READ_BITS(C, 16);
qtm->header_read = 1;
}
/* decode more, at most up to to frame boundary */
frame_end = window_posn + (out_bytes - (qtm->o_end - qtm->o_ptr));
if ((frame_start + QTM_FRAME_SIZE) < frame_end) {
frame_end = frame_start + QTM_FRAME_SIZE;
}
while (window_posn < frame_end) {
QTM_GET_SYMBOL(qtm->model7, selector);
if (selector < 4) {
struct qtm_model *mdl = (selector == 0) ? &qtm->model0 :
((selector == 1) ? &qtm->model1 :
((selector == 2) ? &qtm->model2 :
&qtm->model3));
QTM_GET_SYMBOL((*mdl), sym);
window[window_posn++] = sym;
}
else {
switch (selector) {
case 4: /* selector 4 = fixed length match (3 bytes) */
QTM_GET_SYMBOL(qtm->model4, sym);
QTM_READ_BITS(extra, qtm->extra_bits[sym]);
match_offset = qtm->position_base[sym] + extra + 1;
match_length = 3;
break;
case 5: /* selector 5 = fixed length match (4 bytes) */
QTM_GET_SYMBOL(qtm->model5, sym);
QTM_READ_BITS(extra, qtm->extra_bits[sym]);
match_offset = qtm->position_base[sym] + extra + 1;
match_length = 4;
break;
case 6: /* selector 6 = variable length match */
QTM_GET_SYMBOL(qtm->model6len, sym);
QTM_READ_BITS(extra, qtm->length_extra[sym]);
match_length = qtm->length_base[sym] + extra + 5;
QTM_GET_SYMBOL(qtm->model6, sym);
QTM_READ_BITS(extra, qtm->extra_bits[sym]);
match_offset = qtm->position_base[sym] + extra + 1;
break;
default:
/* should be impossible, model7 can only return 0-6 */
return qtm->error = CL_EFORMAT;
}
if (window_posn + match_length > qtm->window_size) {
cli_dbgmsg("qtm_decompress: match ran over window wrap\n");
return qtm->error = CL_EFORMAT;
}
rundest = &window[window_posn];
i = match_length;
/* does match offset wrap the window? */
if (match_offset > window_posn) {
/* j = length from match offset to end of window */
j = match_offset - window_posn;
if (j > (int) qtm->window_size) {
cli_dbgmsg("qtm_decompress: match offset beyond window boundaries\n");
return qtm->error = CL_EFORMAT;
}
runsrc = &window[qtm->window_size - j];
if (j < i) {
/* if match goes over the window edge, do two copy runs */
i -= j; while (j-- > 0) *rundest++ = *runsrc++;
runsrc = window;
}
while (i-- > 0) *rundest++ = *runsrc++;
}
else {
runsrc = rundest - match_offset;
if(i > (int) (qtm->window_size - window_posn))
i = qtm->window_size - window_posn;
while (i-- > 0) *rundest++ = *runsrc++;
}
window_posn += match_length;
}
} /* while (window_posn < frame_end) */
qtm->o_end = &window[window_posn];
/* another frame completed? */
if ((window_posn - frame_start) >= QTM_FRAME_SIZE) {
if ((window_posn - frame_start) != QTM_FRAME_SIZE) {
cli_dbgmsg("qtm_decompress: overshot frame alignment\n");
return qtm->error = CL_EFORMAT;
}
/* re-align input */
if (bits_left & 7) QTM_REMOVE_BITS(bits_left & 7);
do { QTM_READ_BITS(i, 8); } while (i != 0xFF);
qtm->header_read = 0;
/* window wrap? */
if (window_posn == qtm->window_size) {
/* flush all currently stored data */
i = (qtm->o_end - qtm->o_ptr);
if (qtm->wflag && (ret = mspack_write(qtm->ofd, qtm->o_ptr, i, qtm->file)) != CL_SUCCESS) {
return qtm->error = ret;
}
out_bytes -= i;
qtm->o_ptr = &window[0];
qtm->o_end = &window[0];
window_posn = 0;
}
frame_start = window_posn;
}
} /* while (more bytes needed) */
if (out_bytes) {
i = (int) out_bytes;
if (qtm->wflag && (ret = mspack_write(qtm->ofd, qtm->o_ptr, i, qtm->file)) != CL_SUCCESS) {
return qtm->error = ret;
}
qtm->o_ptr += i;
}
/* store local state */
QTM_STORE_BITS;
qtm->window_posn = window_posn;
qtm->frame_start = frame_start;
qtm->H = H;
qtm->L = L;
qtm->C = C;
return CL_SUCCESS;
}
| 0
|
317,260
|
static void smack_task_getsecid_obj(struct task_struct *p, u32 *secid)
{
struct smack_known *skp = smk_of_task_struct_obj(p);
*secid = skp->smk_secid;
}
| 0
|
226,957
|
IRC_PROTOCOL_CALLBACK(error)
{
char *ptr_args;
IRC_PROTOCOL_MIN_ARGS(2);
ptr_args = (argv_eol[1][0] == ':') ? argv_eol[1] + 1 : argv_eol[1];
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (server, NULL, command, NULL, NULL),
date,
irc_protocol_tags (command, NULL, NULL, NULL),
"%s%s",
weechat_prefix ("error"),
ptr_args);
if (strncmp (ptr_args, "Closing Link", 12) == 0)
{
irc_server_disconnect (server, !server->is_connected, 1);
}
return WEECHAT_RC_OK;
}
| 0
|
436,091
|
static int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
const struct io_uring_sqe *sqe)
{
struct io_submit_link *link = &ctx->submit_state.link;
int ret;
ret = io_init_req(ctx, req, sqe);
if (unlikely(ret)) {
fail_req:
if (link->head) {
/* fail even hard links since we don't submit */
req_set_fail(link->head);
io_req_complete_failed(link->head, -ECANCELED);
link->head = NULL;
}
io_req_complete_failed(req, ret);
return ret;
}
ret = io_req_prep(req, sqe);
if (unlikely(ret))
goto fail_req;
/* don't need @sqe from now on */
trace_io_uring_submit_sqe(ctx, req, req->opcode, req->user_data,
req->flags, true,
ctx->flags & IORING_SETUP_SQPOLL);
/*
* If we already have a head request, queue this one for async
* submittal once the head completes. If we don't have a head but
* IOSQE_IO_LINK is set in the sqe, start a new head. This one will be
* submitted sync once the chain is complete. If none of those
* conditions are true (normal request), then just queue it.
*/
if (link->head) {
struct io_kiocb *head = link->head;
ret = io_req_prep_async(req);
if (unlikely(ret))
goto fail_req;
trace_io_uring_link(ctx, req, head);
link->last->link = req;
link->last = req;
/* last request of a link, enqueue the link */
if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) {
link->head = NULL;
io_queue_sqe(head);
}
} else {
if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) {
link->head = req;
link->last = req;
} else {
io_queue_sqe(req);
}
}
return 0;
| 0
|
516,250
|
static void virtio_net_reset(VirtIODevice *vdev)
{
VirtIONet *n = VIRTIO_NET(vdev);
int i;
/* Reset back to compatibility mode */
n->promisc = 1;
n->allmulti = 0;
n->alluni = 0;
n->nomulti = 0;
n->nouni = 0;
n->nobcast = 0;
/* multiqueue is disabled by default */
n->curr_queues = 1;
timer_del(n->announce_timer.tm);
n->announce_timer.round = 0;
n->status &= ~VIRTIO_NET_S_ANNOUNCE;
/* Flush any MAC and VLAN filter table state */
n->mac_table.in_use = 0;
n->mac_table.first_multi = 0;
n->mac_table.multi_overflow = 0;
n->mac_table.uni_overflow = 0;
memset(n->mac_table.macs, 0, MAC_TABLE_ENTRIES * ETH_ALEN);
memcpy(&n->mac[0], &n->nic->conf->macaddr, sizeof(n->mac));
qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac);
memset(n->vlans, 0, MAX_VLAN >> 3);
/* Flush any async TX */
for (i = 0; i < n->max_queues; i++) {
NetClientState *nc = qemu_get_subqueue(n->nic, i);
if (nc->peer) {
qemu_flush_or_purge_queued_packets(nc->peer, true);
assert(!virtio_net_get_subqueue(nc)->async_tx.elem);
}
}
}
| 0
|
476,139
|
static int fill_ext_compat(struct usb_configuration *c, u8 *buf)
{
int i, count;
count = 16;
buf += 16;
for (i = 0; i < c->next_interface_id; ++i) {
struct usb_function *f;
int j;
f = c->interface[i];
for (j = 0; j < f->os_desc_n; ++j) {
struct usb_os_desc *d;
if (i != f->os_desc_table[j].if_id)
continue;
d = f->os_desc_table[j].os_desc;
if (d && d->ext_compat_id) {
*buf++ = i;
*buf++ = 0x01;
memcpy(buf, d->ext_compat_id, 16);
buf += 22;
} else {
++buf;
*buf = 0x01;
buf += 23;
}
count += 24;
if (count + 24 >= USB_COMP_EP0_OS_DESC_BUFSIZ)
return count;
}
}
return count;
}
| 0
|
312,591
|
incr_quickfix_busy(void)
{
quickfix_busy++;
}
| 0
|
455,311
|
unset_bash_input (check_zero)
int check_zero;
{
#if defined (BUFFERED_INPUT)
if ((check_zero && default_buffered_input >= 0) ||
(check_zero == 0 && default_buffered_input > 0))
{
close_buffered_fd (default_buffered_input);
default_buffered_input = bash_input.location.buffered_fd = -1;
bash_input.type = st_none; /* XXX */
}
#else /* !BUFFERED_INPUT */
if (default_input)
{
fclose (default_input);
default_input = (FILE *)NULL;
}
#endif /* !BUFFERED_INPUT */
}
| 0
|
421,381
|
static void slist(int d, js_Ast *list)
{
pc('[');
while (list) {
assert(list->type == AST_LIST);
snode(d, list->a);
list = list->b;
if (list)
pc(' ');
}
pc(']');
}
| 0
|
247,339
|
static int pgpPrtSigParams(pgpTag tag, uint8_t pubkey_algo,
const uint8_t *p, const uint8_t *h, size_t hlen,
pgpDigParams sigp)
{
int rc = 1; /* assume failure */
const uint8_t * pend = h + hlen;
int i;
pgpDigAlg sigalg = pgpSignatureNew(pubkey_algo);
for (i = 0; i < sigalg->mpis && pend - p >= 2; i++) {
int mpil = pgpMpiLen(p);
if (pend - p < mpil)
break;
if (sigalg->setmpi(sigalg, i, p))
break;
p += mpil;
}
/* Does the size and number of MPI's match our expectations? */
if (p == pend && i == sigalg->mpis)
rc = 0;
/* We can't handle more than one sig at a time */
if (rc == 0 && sigp->alg == NULL && sigp->tag == PGPTAG_SIGNATURE)
sigp->alg = sigalg;
else
pgpDigAlgFree(sigalg);
return rc;
}
| 0
|
245,693
|
njs_array_prototype_unshift(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs,
njs_index_t unused)
{
double idx;
int64_t from, to, length;
njs_int_t ret;
njs_uint_t n;
njs_array_t *array, *keys;
njs_value_t *this, entry;
this = njs_argument(args, 0);
length = 0;
n = nargs - 1;
ret = njs_value_to_object(vm, this);
if (njs_slow_path(ret != NJS_OK)) {
return ret;
}
if (njs_fast_path(njs_is_fast_array(this))) {
array = njs_array(this);
if (n != 0) {
ret = njs_array_expand(vm, array, n, 0);
if (njs_slow_path(ret != NJS_OK)) {
return ret;
}
array->length += n;
n = nargs;
do {
n--;
/* GC: njs_retain(&args[n]); */
array->start--;
array->start[0] = args[n];
} while (n > 1);
}
njs_set_number(&vm->retval, array->length);
return NJS_OK;
}
ret = njs_object_length(vm, this, &length);
if (njs_slow_path(ret == NJS_ERROR)) {
return ret;
}
if (n == 0) {
goto done;
}
if (njs_slow_path((length + n) > NJS_MAX_LENGTH)) {
njs_type_error(vm, "Invalid length");
return NJS_ERROR;
}
if (!njs_fast_object(length)) {
keys = njs_array_indices(vm, this);
if (njs_slow_path(keys == NULL)) {
return NJS_ERROR;
}
from = keys->length;
while (from > 0) {
ret = njs_value_property_delete(vm, this, &keys->start[--from],
&entry, 1);
if (njs_slow_path(ret == NJS_ERROR)) {
njs_array_destroy(vm, keys);
return ret;
}
if (ret == NJS_OK) {
idx = njs_string_to_index(&keys->start[from]) + n;
ret = njs_value_property_i64_set(vm, this, idx, &entry);
if (njs_slow_path(ret == NJS_ERROR)) {
njs_array_destroy(vm, keys);
return ret;
}
}
}
njs_array_destroy(vm, keys);
length += n;
goto copy;
}
from = length;
length += n;
to = length;
while (from > 0) {
ret = njs_value_property_i64_delete(vm, this, --from, &entry);
if (njs_slow_path(ret == NJS_ERROR)) {
return ret;
}
to--;
if (ret == NJS_OK) {
ret = njs_value_property_i64_set(vm, this, to, &entry);
if (njs_slow_path(ret == NJS_ERROR)) {
return ret;
}
}
}
copy:
for (n = 1; n < nargs; n++) {
ret = njs_value_property_i64_set(vm, this, n - 1, &args[n]);
if (njs_slow_path(ret == NJS_ERROR)) {
return ret;
}
}
done:
ret = njs_object_length_set(vm, this, length);
if (njs_slow_path(ret == NJS_ERROR)) {
return ret;
}
njs_set_number(&vm->retval, length);
return NJS_OK;
}
| 0
|
261,188
|
int MqttClient_Ping_ex(MqttClient *client, MqttPing* ping)
{
int rc, len;
/* Validate required arguments */
if (client == NULL || ping == NULL) {
return MQTT_CODE_ERROR_BAD_ARG;
}
if (ping->stat == MQTT_MSG_BEGIN) {
#ifdef WOLFMQTT_MULTITHREAD
/* Lock send socket mutex */
rc = wm_SemLock(&client->lockSend);
if (rc != 0) {
return rc;
}
#endif
/* Encode the subscribe packet */
rc = MqttEncode_Ping(client->tx_buf, client->tx_buf_len, ping);
#ifdef WOLFMQTT_DEBUG_CLIENT
PRINTF("MqttClient_EncodePacket: Len %d, Type %s (%d), ID %d, QoS %d",
rc, MqttPacket_TypeDesc(MQTT_PACKET_TYPE_PING_REQ),
MQTT_PACKET_TYPE_PING_REQ, 0, 0);
#endif
if (rc <= 0) {
#ifdef WOLFMQTT_MULTITHREAD
wm_SemUnlock(&client->lockSend);
#endif
return rc;
}
len = rc;
#ifdef WOLFMQTT_MULTITHREAD
rc = wm_SemLock(&client->lockClient);
if (rc == 0) {
/* inform other threads of expected response */
rc = MqttClient_RespList_Add(client, MQTT_PACKET_TYPE_PING_RESP, 0,
&ping->pendResp, ping);
wm_SemUnlock(&client->lockClient);
}
if (rc != 0) {
wm_SemUnlock(&client->lockSend);
return rc; /* Error locking client */
}
#endif
/* Send ping req packet */
rc = MqttPacket_Write(client, client->tx_buf, len);
#ifdef WOLFMQTT_MULTITHREAD
wm_SemUnlock(&client->lockSend);
#endif
if (rc != len) {
#ifdef WOLFMQTT_MULTITHREAD
if (wm_SemLock(&client->lockClient) == 0) {
MqttClient_RespList_Remove(client, &ping->pendResp);
wm_SemUnlock(&client->lockClient);
}
#endif
return rc;
}
ping->stat = MQTT_MSG_WAIT;
}
/* Wait for ping resp packet */
rc = MqttClient_WaitType(client, ping, MQTT_PACKET_TYPE_PING_RESP, 0,
client->cmd_timeout_ms);
#ifdef WOLFMQTT_NONBLOCK
if (rc == MQTT_CODE_CONTINUE)
return rc;
#endif
#ifdef WOLFMQTT_MULTITHREAD
if (wm_SemLock(&client->lockClient) == 0) {
MqttClient_RespList_Remove(client, &ping->pendResp);
wm_SemUnlock(&client->lockClient);
}
#endif
/* reset state */
ping->stat = MQTT_MSG_BEGIN;
return rc;
}
| 0
|
312,571
|
qf_parse_get_fields(
char_u *linebuf,
int linelen,
efm_T *fmt_ptr,
qffields_T *fields,
int qf_multiline,
int qf_multiscan,
char_u **tail)
{
regmatch_T regmatch;
int status = QF_FAIL;
int r;
if (qf_multiscan &&
vim_strchr((char_u *)"OPQ", fmt_ptr->prefix) == NULL)
return QF_FAIL;
fields->namebuf[0] = NUL;
fields->module[0] = NUL;
fields->pattern[0] = NUL;
if (!qf_multiscan)
fields->errmsg[0] = NUL;
fields->lnum = 0;
fields->end_lnum = 0;
fields->col = 0;
fields->end_col = 0;
fields->use_viscol = FALSE;
fields->enr = -1;
fields->type = 0;
*tail = NULL;
// Always ignore case when looking for a matching error.
regmatch.rm_ic = TRUE;
regmatch.regprog = fmt_ptr->prog;
r = vim_regexec(®match, linebuf, (colnr_T)0);
fmt_ptr->prog = regmatch.regprog;
if (r)
status = qf_parse_match(linebuf, linelen, fmt_ptr, ®match,
fields, qf_multiline, qf_multiscan, tail);
return status;
}
| 0
|
326,604
|
_archive_write_disk_finish_entry(struct archive *_a)
{
struct archive_write_disk *a = (struct archive_write_disk *)_a;
int ret = ARCHIVE_OK;
archive_check_magic(&a->archive, ARCHIVE_WRITE_DISK_MAGIC,
ARCHIVE_STATE_HEADER | ARCHIVE_STATE_DATA,
"archive_write_finish_entry");
if (a->archive.state & ARCHIVE_STATE_HEADER)
return (ARCHIVE_OK);
archive_clear_error(&a->archive);
/* Pad or truncate file to the right size. */
if (a->fd < 0) {
/* There's no file. */
} else if (a->filesize < 0) {
/* File size is unknown, so we can't set the size. */
} else if (a->fd_offset == a->filesize) {
/* Last write ended at exactly the filesize; we're done. */
/* Hopefully, this is the common case. */
#if defined(__APPLE__) && defined(UF_COMPRESSED) && defined(HAVE_ZLIB_H)
} else if (a->todo & TODO_HFS_COMPRESSION) {
char null_d[1024];
ssize_t r;
if (a->file_remaining_bytes)
memset(null_d, 0, sizeof(null_d));
while (a->file_remaining_bytes) {
if (a->file_remaining_bytes > sizeof(null_d))
r = hfs_write_data_block(
a, null_d, sizeof(null_d));
else
r = hfs_write_data_block(
a, null_d, a->file_remaining_bytes);
if (r < 0)
return ((int)r);
}
#endif
} else {
#if HAVE_FTRUNCATE
if (ftruncate(a->fd, a->filesize) == -1 &&
a->filesize == 0) {
archive_set_error(&a->archive, errno,
"File size could not be restored");
return (ARCHIVE_FAILED);
}
#endif
/*
* Not all platforms implement the XSI option to
* extend files via ftruncate. Stat() the file again
* to see what happened.
*/
a->pst = NULL;
if ((ret = lazy_stat(a)) != ARCHIVE_OK)
return (ret);
/* We can use lseek()/write() to extend the file if
* ftruncate didn't work or isn't available. */
if (a->st.st_size < a->filesize) {
const char nul = '\0';
if (lseek(a->fd, a->filesize - 1, SEEK_SET) < 0) {
archive_set_error(&a->archive, errno,
"Seek failed");
return (ARCHIVE_FATAL);
}
if (write(a->fd, &nul, 1) < 0) {
archive_set_error(&a->archive, errno,
"Write to restore size failed");
return (ARCHIVE_FATAL);
}
a->pst = NULL;
}
}
/* Restore metadata. */
/*
* This is specific to Mac OS X.
* If the current file is an AppleDouble file, it should be
* linked with the data fork file and remove it.
*/
if (a->todo & TODO_APPLEDOUBLE) {
int r2 = fixup_appledouble(a, a->name);
if (r2 == ARCHIVE_EOF) {
/* The current file has been successfully linked
* with the data fork file and removed. So there
* is nothing to do on the current file. */
goto finish_metadata;
}
if (r2 < ret) ret = r2;
}
/*
* Look up the "real" UID only if we're going to need it.
* TODO: the TODO_SGID condition can be dropped here, can't it?
*/
if (a->todo & (TODO_OWNER | TODO_SUID | TODO_SGID)) {
a->uid = archive_write_disk_uid(&a->archive,
archive_entry_uname(a->entry),
archive_entry_uid(a->entry));
}
/* Look up the "real" GID only if we're going to need it. */
/* TODO: the TODO_SUID condition can be dropped here, can't it? */
if (a->todo & (TODO_OWNER | TODO_SGID | TODO_SUID)) {
a->gid = archive_write_disk_gid(&a->archive,
archive_entry_gname(a->entry),
archive_entry_gid(a->entry));
}
/*
* Restore ownership before set_mode tries to restore suid/sgid
* bits. If we set the owner, we know what it is and can skip
* a stat() call to examine the ownership of the file on disk.
*/
if (a->todo & TODO_OWNER) {
int r2 = set_ownership(a);
if (r2 < ret) ret = r2;
}
/*
* HYPOTHESIS:
* If we're not root, we won't be setting any security
* attributes that may be wiped by the set_mode() routine
* below. We also can't set xattr on non-owner-writable files,
* which may be the state after set_mode(). Perform
* set_xattrs() first based on these constraints.
*/
if (a->user_uid != 0 &&
(a->todo & TODO_XATTR)) {
int r2 = set_xattrs(a);
if (r2 < ret) ret = r2;
}
/*
* set_mode must precede ACLs on systems such as Solaris and
* FreeBSD where setting the mode implicitly clears extended ACLs
*/
if (a->todo & TODO_MODE) {
int r2 = set_mode(a, a->mode);
if (r2 < ret) ret = r2;
}
/*
* Security-related extended attributes (such as
* security.capability on Linux) have to be restored last,
* since they're implicitly removed by other file changes.
* We do this last only when root.
*/
if (a->user_uid == 0 &&
(a->todo & TODO_XATTR)) {
int r2 = set_xattrs(a);
if (r2 < ret) ret = r2;
}
/*
* Some flags prevent file modification; they must be restored after
* file contents are written.
*/
if (a->todo & TODO_FFLAGS) {
int r2 = set_fflags(a);
if (r2 < ret) ret = r2;
}
/*
* Time must follow most other metadata;
* otherwise atime will get changed.
*/
if (a->todo & TODO_TIMES) {
int r2 = set_times_from_entry(a);
if (r2 < ret) ret = r2;
}
/*
* Mac extended metadata includes ACLs.
*/
if (a->todo & TODO_MAC_METADATA) {
const void *metadata;
size_t metadata_size;
metadata = archive_entry_mac_metadata(a->entry, &metadata_size);
if (metadata != NULL && metadata_size > 0) {
int r2 = set_mac_metadata(a, archive_entry_pathname(
a->entry), metadata, metadata_size);
if (r2 < ret) ret = r2;
}
}
/*
* ACLs must be restored after timestamps because there are
* ACLs that prevent attribute changes (including time).
*/
if (a->todo & TODO_ACLS) {
int r2;
r2 = archive_write_disk_set_acls(&a->archive, a->fd,
archive_entry_pathname(a->entry),
archive_entry_acl(a->entry),
archive_entry_mode(a->entry));
if (r2 < ret) ret = r2;
}
finish_metadata:
/* If there's an fd, we can close it now. */
if (a->fd >= 0) {
close(a->fd);
a->fd = -1;
if (a->tmpname) {
if (rename(a->tmpname, a->name) == -1) {
archive_set_error(&a->archive, errno,
"Failed to rename temporary file");
ret = ARCHIVE_FAILED;
unlink(a->tmpname);
}
a->tmpname = NULL;
}
}
/* If there's an entry, we can release it now. */
archive_entry_free(a->entry);
a->entry = NULL;
a->archive.state = ARCHIVE_STATE_HEADER;
return (ret);
}
| 0
|
337,780
|
static struct sctp_chunk *sctp_make_op_error_space(
const struct sctp_association *asoc,
const struct sctp_chunk *chunk,
size_t size)
{
struct sctp_chunk *retval;
retval = sctp_make_control(asoc, SCTP_CID_ERROR, 0,
sizeof(struct sctp_errhdr) + size,
GFP_ATOMIC);
if (!retval)
goto nodata;
/* RFC 2960 6.4 Multi-homed SCTP Endpoints
*
* An endpoint SHOULD transmit reply chunks (e.g., SACK,
* HEARTBEAT ACK, etc.) to the same destination transport
* address from which it received the DATA or control chunk
* to which it is replying.
*
*/
if (chunk)
retval->transport = chunk->transport;
nodata:
return retval;
}
| 0
|
195,039
|
void operator()(OpKernelContext* ctx, const Tensor& input,
const Tensor& filter, int row_stride, int col_stride,
int row_dilation, int col_dilation, const Padding& padding,
const std::vector<int64_t>& explicit_paddings, Tensor* output,
TensorFormat data_format) {
DCHECK(data_format == FORMAT_NHWC)
<< "Grouped conv implementation only "
"supports NHWC tensor format for now.";
const int64_t in_depth = input.dim_size(3);
const int64_t patch_depth = filter.dim_size(2);
const int64_t num_groups = in_depth / patch_depth;
// Shuffle input/filter tensors to have group as a leading dimension.
std::array<int64_t, 5> shuffle({3, 0, 1, 2, 4});
// Compute pre shuffle dimemnsions.
auto pre_shuffle = [&](const Tensor& tensor) -> std::array<int64, 5> {
return {tensor.dim_size(0), tensor.dim_size(1), tensor.dim_size(2),
num_groups, tensor.dim_size(3) / num_groups};
};
// Compute post shuffle dimemnsions.
auto post_shuffle = [&](const Tensor& tensor) -> std::array<int64, 5> {
return {num_groups, tensor.dim_size(0), tensor.dim_size(1),
tensor.dim_size(2), tensor.dim_size(3) / num_groups};
};
auto& device = ctx->eigen_device<CPUDevice>();
absl::BlockingCounter shuffles_completed(2);
auto on_shuffled = [&]() { shuffles_completed.DecrementCount(); };
// Shuffle input into temporary tensor.
Tensor input_shuffled(input.dtype(), TensorShape(post_shuffle(input)));
input_shuffled.tensor<T, 5>().device(device, on_shuffled) =
input.shaped<T, 5>(pre_shuffle(input)).shuffle(shuffle);
// Shuffle filter into temporary tensor.
Tensor filter_shuffled(filter.dtype(), TensorShape(post_shuffle(filter)));
filter_shuffled.tensor<T, 5>().device(device, on_shuffled) =
filter.shaped<T, 5>(pre_shuffle(filter)).shuffle(shuffle);
// Wait for the completion of input/filter shuffles.
shuffles_completed.Wait();
// Write group convolution results into temporary output tensor.
Tensor output_shuffled(output->dtype(), TensorShape(post_shuffle(*output)));
for (int64_t i = 0; i < num_groups; ++i) {
// TODO(ezhulenev): Run this loop using `parallelFor` (regular parallelFor
// will lead to deadlock, SpatialConvolution has to use async Eigen
// assignment). This requires small changes to Eigen to support async
// exeuction for tensor chipping operation.
// TODO(ezhulenev): Grouped convolution should also support 1x1 filter
// optimization.
auto input_slice = input_shuffled.tensor<T, 5>().template chip<0>(i);
auto filter_slice = filter_shuffled.tensor<T, 5>().template chip<0>(i);
auto output_slice = output_shuffled.tensor<T, 5>().template chip<0>(i);
if (padding == EXPLICIT) {
functor::SpatialConvolution<CPUDevice, T>()(
ctx->eigen_device<CPUDevice>(), output_slice, input_slice,
filter_slice, row_stride, col_stride, row_dilation, col_dilation,
static_cast<int>(explicit_paddings[2]),
static_cast<int>(explicit_paddings[3]),
static_cast<int>(explicit_paddings[4]),
static_cast<int>(explicit_paddings[5]));
} else {
functor::SpatialConvolution<CPUDevice, T>()(
ctx->eigen_device<CPUDevice>(), output_slice, input_slice,
filter_slice, row_stride, col_stride, row_dilation, col_dilation,
BrainPadding2EigenPadding(padding));
}
}
// Shuffle temporary output back into pre-shuffled shape.
std::array<int64_t, 5> rev_shuffle({1, 2, 3, 0, 4});
output->shaped<T, 5>(pre_shuffle(*output)).device(device) =
output_shuffled.tensor<T, 5>().shuffle(rev_shuffle);
}
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.