idx
int64 | func
string | target
int64 |
|---|---|---|
261,920
|
njs_string_match_multiple(njs_vm_t *vm, njs_value_t *args,
njs_regexp_pattern_t *pattern)
{
size_t c0, c1;
int32_t size, length;
njs_int_t ret;
njs_utf8_t utf8;
njs_array_t *array;
const u_char *p, *start, *end;
njs_regexp_utf8_t type;
njs_string_prop_t string;
njs_set_number(&args[1].data.u.regexp->last_index, 0);
vm->retval = njs_value_null;
(void) njs_string_prop(&string, &args[0]);
utf8 = NJS_STRING_BYTE;
type = NJS_REGEXP_BYTE;
if (string.length != 0) {
utf8 = NJS_STRING_ASCII;
type = NJS_REGEXP_UTF8;
if (string.length != string.size) {
utf8 = NJS_STRING_UTF8;
}
}
if (njs_regex_is_valid(&pattern->regex[type])) {
array = njs_array_alloc(vm, 0, 0, NJS_ARRAY_SPARE);
if (njs_slow_path(array == NULL)) {
return NJS_ERROR;
}
p = string.start;
end = p + string.size;
do {
ret = njs_regexp_match(vm, &pattern->regex[type], p, 0, string.size,
vm->single_match_data);
if (ret < 0) {
if (njs_fast_path(ret == NJS_DECLINED)) {
break;
}
njs_internal_error(vm, "njs_regexp_match() failed");
return NJS_ERROR;
}
ret = njs_array_expand(vm, array, 0, 1);
if (njs_slow_path(ret != NJS_OK)) {
return ret;
}
c0 = njs_regex_capture(vm->single_match_data, 0);
c1 = njs_regex_capture(vm->single_match_data, 1);
start = p + c0;
if (c1 == 0) {
if (start < end) {
p = (utf8 != NJS_STRING_BYTE) ? njs_utf8_next(start, end)
: start + 1;
string.size = end - p;
} else {
/* To exit the loop. */
p++;
}
size = 0;
length = 0;
} else {
p += c1;
string.size -= c1;
size = c1 - c0;
length = njs_string_calc_length(utf8, start, size);
}
ret = njs_string_new(vm, &array->start[array->length],
start, size, length);
if (njs_slow_path(ret != NJS_OK)) {
return ret;
}
array->length++;
} while (p <= end);
njs_set_array(&vm->retval, array);
}
return NJS_OK;
}
| 0
|
272,344
|
generate_spc_string(cms_context *cms, SECItem *ssp, char *str, int len)
{
SpcString ss;
memset(&ss, '\0', sizeof (ss));
SECITEM_AllocItem(cms->arena, &ss.unicode, len);
if (len != 0) {
if (!ss.unicode.data)
cnreterr(-1, cms, "could not allocate memory");
memcpy(ss.unicode.data, str, len);
}
ss.unicode.type = siBMPString;
if (SEC_ASN1EncodeItem(cms->arena, ssp, &ss, SpcStringTemplate) == NULL)
cnreterr(-1, cms, "could not encode SpcString");
return 0;
}
| 0
|
313,830
|
nv_exmode(cmdarg_T *cap)
{
// Ignore 'Q' in Visual mode, just give a beep.
if (VIsual_active)
vim_beep(BO_EX);
else if (!checkclearop(cap->oap))
do_exmode(FALSE);
}
| 0
|
261,944
|
njs_decode_hex_length(const njs_str_t *src, size_t *out_size)
{
if (out_size != NULL) {
*out_size = src->length / 2;
}
return 0;
}
| 0
|
273,877
|
static void handle_NOOP(ctrl_t *ctrl, char *arg)
{
send_msg(ctrl->sd, "200 NOOP OK.\r\n");
}
| 0
|
274,661
|
callbacks_zoom_in_activate (GtkMenuItem *menuitem,
gpointer user_data)
{
render_zoom_display (ZOOM_IN, 0, 0, 0);
}
| 0
|
427,228
|
static void mainfunc (LexState *ls, FuncState *fs) {
BlockCnt bl;
Upvaldesc *env;
open_func(ls, fs, &bl);
setvararg(fs, 0); /* main function is always declared vararg */
env = allocupvalue(fs); /* ...set environment upvalue */
env->instack = 1;
env->idx = 0;
env->kind = VDKREG;
env->name = ls->envn;
luaC_objbarrier(ls->L, fs->f, env->name);
luaX_next(ls); /* read first token */
statlist(ls); /* parse main body */
check(ls, TK_EOS);
close_func(ls);
}
| 0
|
238,785
|
save_re_pat(int idx, char_u *pat, int magic)
{
if (spats[idx].pat != pat)
{
vim_free(spats[idx].pat);
spats[idx].pat = vim_strsave(pat);
spats[idx].magic = magic;
spats[idx].no_scs = no_smartcase;
last_idx = idx;
#ifdef FEAT_SEARCH_EXTRA
// If 'hlsearch' set and search pat changed: need redraw.
if (p_hls)
redraw_all_later(SOME_VALID);
set_no_hlsearch(FALSE);
#endif
}
}
| 0
|
225,649
|
GF_Err mdia_on_child_box(GF_Box *s, GF_Box *a, Bool is_rem)
{
GF_MediaBox *ptr = (GF_MediaBox *)s;
switch(a->type) {
case GF_ISOM_BOX_TYPE_MDHD:
BOX_FIELD_ASSIGN(mediaHeader, GF_MediaHeaderBox)
return GF_OK;
case GF_ISOM_BOX_TYPE_HDLR:
BOX_FIELD_ASSIGN(handler, GF_HandlerBox)
return GF_OK;
case GF_ISOM_BOX_TYPE_MINF:
BOX_FIELD_ASSIGN(information, GF_MediaInformationBox)
return GF_OK;
}
return GF_OK;
}
| 0
|
482,540
|
findRuleName(const CharsString *name, const TranslationTableHeader *table) {
const RuleName *ruleName = table->ruleNames;
while (ruleName) {
if ((name->length == ruleName->length) &&
(memcmp(&name->chars[0], ruleName->name, CHARSIZE * name->length) == 0))
return ruleName->ruleOffset;
ruleName = ruleName->next;
}
return 0;
}
| 0
|
436,144
|
static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx)
{
struct io_submit_state *state = &ctx->submit_state;
BUILD_BUG_ON(ARRAY_SIZE(state->reqs) < IO_REQ_ALLOC_BATCH);
if (!state->free_reqs) {
gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
int ret, i;
if (io_flush_cached_reqs(ctx))
goto got_req;
ret = kmem_cache_alloc_bulk(req_cachep, gfp, IO_REQ_ALLOC_BATCH,
state->reqs);
/*
* Bulk alloc is all-or-nothing. If we fail to get a batch,
* retry single alloc to be on the safe side.
*/
if (unlikely(ret <= 0)) {
state->reqs[0] = kmem_cache_alloc(req_cachep, gfp);
if (!state->reqs[0])
return NULL;
ret = 1;
}
/*
* Don't initialise the fields below on every allocation, but
* do that in advance and keep valid on free.
*/
for (i = 0; i < ret; i++) {
struct io_kiocb *req = state->reqs[i];
req->ctx = ctx;
req->link = NULL;
req->async_data = NULL;
/* not necessary, but safer to zero */
req->result = 0;
}
state->free_reqs = ret;
}
got_req:
state->free_reqs--;
return state->reqs[state->free_reqs];
}
| 0
|
489,149
|
static sctp_disposition_t sctp_sf_violation_chunk(
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
static const char err_str[]="The following chunk violates protocol:";
if (!asoc)
return sctp_sf_violation(ep, asoc, type, arg, commands);
return sctp_sf_abort_violation(ep, asoc, arg, commands, err_str,
sizeof(err_str));
}
| 0
|
225,680
|
void dmed_box_del(GF_Box *s)
{
gf_free((GF_DMEDBox *)s);
}
| 0
|
294,387
|
datetime_s_commercial(int argc, VALUE *argv, VALUE klass)
{
VALUE vy, vw, vd, vh, vmin, vs, vof, vsg, y, fr, fr2, ret;
int w, d, h, min, s, rof;
double sg;
rb_scan_args(argc, argv, "08", &vy, &vw, &vd, &vh, &vmin, &vs, &vof, &vsg);
y = INT2FIX(-4712);
w = 1;
d = 1;
h = min = s = 0;
fr2 = INT2FIX(0);
rof = 0;
sg = DEFAULT_SG;
switch (argc) {
case 8:
val2sg(vsg, sg);
case 7:
val2off(vof, rof);
case 6:
check_numeric(vs, "second");
num2int_with_frac(s, positive_inf);
case 5:
check_numeric(vmin, "minute");
num2int_with_frac(min, 5);
case 4:
check_numeric(vh, "hour");
num2int_with_frac(h, 4);
case 3:
check_numeric(vd, "cwday");
num2int_with_frac(d, 3);
case 2:
check_numeric(vw, "cweek");
w = NUM2INT(vw);
case 1:
check_numeric(vy, "year");
y = vy;
}
{
VALUE nth;
int ry, rw, rd, rh, rmin, rs, rjd, rjd2, ns;
if (!valid_commercial_p(y, w, d, sg,
&nth, &ry,
&rw, &rd, &rjd,
&ns))
rb_raise(eDateError, "invalid date");
if (!c_valid_time_p(h, min, s, &rh, &rmin, &rs))
rb_raise(eDateError, "invalid date");
canon24oc();
rjd2 = jd_local_to_utc(rjd,
time_to_df(rh, rmin, rs),
rof);
ret = d_complex_new_internal(klass,
nth, rjd2,
0, INT2FIX(0),
rof, sg,
0, 0, 0,
rh, rmin, rs,
HAVE_JD | HAVE_TIME);
}
add_frac();
return ret;
}
| 0
|
259,215
|
static int mov_read_stsz(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{
AVStream *st;
MOVStreamContext *sc;
unsigned int i, entries, sample_size, field_size, num_bytes;
GetBitContext gb;
unsigned char* buf;
int ret;
if (c->fc->nb_streams < 1)
return 0;
st = c->fc->streams[c->fc->nb_streams-1];
sc = st->priv_data;
avio_r8(pb); /* version */
avio_rb24(pb); /* flags */
if (atom.type == MKTAG('s','t','s','z')) {
sample_size = avio_rb32(pb);
if (!sc->sample_size) /* do not overwrite value computed in stsd */
sc->sample_size = sample_size;
sc->stsz_sample_size = sample_size;
field_size = 32;
} else {
sample_size = 0;
avio_rb24(pb); /* reserved */
field_size = avio_r8(pb);
}
entries = avio_rb32(pb);
av_log(c->fc, AV_LOG_TRACE, "sample_size = %u sample_count = %u\n", sc->sample_size, entries);
sc->sample_count = entries;
if (sample_size)
return 0;
if (field_size != 4 && field_size != 8 && field_size != 16 && field_size != 32) {
av_log(c->fc, AV_LOG_ERROR, "Invalid sample field size %u\n", field_size);
return AVERROR_INVALIDDATA;
}
if (!entries)
return 0;
if (entries >= (INT_MAX - 4 - 8 * AV_INPUT_BUFFER_PADDING_SIZE) / field_size)
return AVERROR_INVALIDDATA;
if (sc->sample_sizes)
av_log(c->fc, AV_LOG_WARNING, "Duplicated STSZ atom\n");
av_free(sc->sample_sizes);
sc->sample_count = 0;
sc->sample_sizes = av_malloc_array(entries, sizeof(*sc->sample_sizes));
if (!sc->sample_sizes)
return AVERROR(ENOMEM);
num_bytes = (entries*field_size+4)>>3;
buf = av_malloc(num_bytes+AV_INPUT_BUFFER_PADDING_SIZE);
if (!buf) {
av_freep(&sc->sample_sizes);
return AVERROR(ENOMEM);
}
ret = ffio_read_size(pb, buf, num_bytes);
if (ret < 0) {
av_freep(&sc->sample_sizes);
av_free(buf);
av_log(c->fc, AV_LOG_WARNING, "STSZ atom truncated\n");
return 0;
}
init_get_bits(&gb, buf, 8*num_bytes);
for (i = 0; i < entries; i++) {
sc->sample_sizes[i] = get_bits_long(&gb, field_size);
if (sc->sample_sizes[i] < 0) {
av_free(buf);
av_log(c->fc, AV_LOG_ERROR, "Invalid sample size %d\n", sc->sample_sizes[i]);
return AVERROR_INVALIDDATA;
}
sc->data_size += sc->sample_sizes[i];
}
sc->sample_count = i;
av_free(buf);
return 0;
}
| 0
|
369,957
|
static struct dentry *proc_map_files_lookup(struct inode *dir,
struct dentry *dentry, struct nameidata *nd)
{
unsigned long vm_start, vm_end;
struct vm_area_struct *vma;
struct task_struct *task;
struct dentry *result;
struct mm_struct *mm;
result = ERR_PTR(-EACCES);
if (!capable(CAP_SYS_ADMIN))
goto out;
result = ERR_PTR(-ENOENT);
task = get_proc_task(dir);
if (!task)
goto out;
result = ERR_PTR(-EACCES);
if (lock_trace(task))
goto out_put_task;
result = ERR_PTR(-ENOENT);
if (dname_to_vma_addr(dentry, &vm_start, &vm_end))
goto out_unlock;
mm = get_task_mm(task);
if (!mm)
goto out_unlock;
down_read(&mm->mmap_sem);
vma = find_exact_vma(mm, vm_start, vm_end);
if (!vma)
goto out_no_vma;
result = proc_map_files_instantiate(dir, dentry, task, vma->vm_file);
out_no_vma:
up_read(&mm->mmap_sem);
mmput(mm);
out_unlock:
unlock_trace(task);
out_put_task:
put_task_struct(task);
out:
return result;
}
| 0
|
226,438
|
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
mutex_lock l(mu_);
if (i_ == num_elements_) {
*end_of_sequence = true;
return Status::OK();
}
out_tensors->clear();
out_tensors->reserve(3);
const int rank = Iterator::dataset()->sparse_tensor_.dims();
if (i_ > next_non_empty_i_ && iter_ != group_iterable_.end()) {
// We still have elements to consume from `group_iterable_`
// and we have emitted all elements up to and including the
// current position.
sparse::Group group = *iter_;
const auto indices = group.indices();
const auto values = group.values<T>();
const int64_t num_entries = values.size();
next_non_empty_i_ = indices(0, 0);
next_indices_ = Tensor(DT_INT64, {num_entries, rank - 1});
next_values_ = Tensor(DataTypeToEnum<T>::value, {num_entries});
auto next_indices_t = next_indices_.matrix<int64_t>();
auto next_values_t = next_values_.vec<T>();
for (int64_t i = 0; i < num_entries; ++i) {
for (int d = 1; d < rank; ++d) {
next_indices_t(i, d - 1) = indices(i, d);
}
next_values_t(i) = values(i);
}
++iter_;
}
if (i_ == next_non_empty_i_) {
// The current position is non-empty in the input
// `SparseTensor`, and we have already read the value from the
// `GroupIterable`.
out_tensors->push_back(std::move(next_indices_));
out_tensors->push_back(std::move(next_values_));
out_tensors->push_back(dense_shape_);
next_non_empty_i_ = kNextNonEmptyUnknown;
} else {
DCHECK(i_ < next_non_empty_i_ || iter_ == group_iterable_.end());
// The current position is empty in the input `SparseTensor`,
// so emit empty indices and values.
out_tensors->push_back(Tensor(DT_INT64, TensorShape({0, rank - 1})));
out_tensors->push_back(Tensor(DataTypeToEnum<T>::value, {0}));
out_tensors->push_back(dense_shape_);
}
++i_;
*end_of_sequence = false;
return Status::OK();
}
| 0
|
359,211
|
static size_t bpf_ringbuf_rec_pg_off(struct bpf_ringbuf *rb,
struct bpf_ringbuf_hdr *hdr)
{
return ((void *)hdr - (void *)rb) >> PAGE_SHIFT;
}
| 0
|
369,901
|
static int tid_fd_revalidate(struct dentry *dentry, struct nameidata *nd)
{
struct inode *inode;
struct task_struct *task;
int fd;
struct files_struct *files;
const struct cred *cred;
if (nd && nd->flags & LOOKUP_RCU)
return -ECHILD;
inode = dentry->d_inode;
task = get_proc_task(inode);
fd = proc_fd(inode);
if (task) {
files = get_files_struct(task);
if (files) {
rcu_read_lock();
if (fcheck_files(files, fd)) {
rcu_read_unlock();
put_files_struct(files);
if (task_dumpable(task)) {
rcu_read_lock();
cred = __task_cred(task);
inode->i_uid = cred->euid;
inode->i_gid = cred->egid;
rcu_read_unlock();
} else {
inode->i_uid = 0;
inode->i_gid = 0;
}
inode->i_mode &= ~(S_ISUID | S_ISGID);
security_task_to_inode(task, inode);
put_task_struct(task);
return 1;
}
rcu_read_unlock();
put_files_struct(files);
}
put_task_struct(task);
}
d_drop(dentry);
return 0;
}
| 0
|
261,438
|
static int decode_significant_coeff_flag(thread_context* tctx,
int xC,int yC,
const uint8_t* coded_sub_block_flag,
int sbWidth,
int cIdx,
int scanIdx)
{
logtrace(LogSlice,"# significant_coeff_flag (xC:%d yC:%d sbWidth:%d cIdx:%d scanIdx:%d)\n",
xC,yC,sbWidth,cIdx,scanIdx);
int sigCtx;
// if log2TrafoSize==2
if (sbWidth==1) {
sigCtx = ctxIdxMap[(yC<<2) + xC];
}
else if (xC+yC==0) {
sigCtx = 0;
}
else {
int xS = xC>>2;
int yS = yC>>2;
int prevCsbf = 0;
if (xS < sbWidth-1) { prevCsbf += coded_sub_block_flag[xS+1 +yS*sbWidth]; }
if (yS < sbWidth-1) { prevCsbf += coded_sub_block_flag[xS+(1+yS)*sbWidth]<<1; }
int xP = xC & 3;
int yP = yC & 3;
logtrace(LogSlice,"posInSubset: %d,%d\n",xP,yP);
logtrace(LogSlice,"prevCsbf: %d\n",prevCsbf);
//printf("%d | %d %d\n",prevCsbf,xP,yP);
switch (prevCsbf) {
case 0:
//sigCtx = (xP+yP==0) ? 2 : (xP+yP<3) ? 1 : 0;
sigCtx = (xP+yP>=3) ? 0 : (xP+yP>0) ? 1 : 2;
break;
case 1:
sigCtx = (yP==0) ? 2 : (yP==1) ? 1 : 0;
break;
case 2:
sigCtx = (xP==0) ? 2 : (xP==1) ? 1 : 0;
break;
default:
sigCtx = 2;
break;
}
logtrace(LogSlice,"a) sigCtx=%d\n",sigCtx);
if (cIdx==0) {
if (xS+yS > 0) sigCtx+=3;
logtrace(LogSlice,"b) sigCtx=%d\n",sigCtx);
// if log2TrafoSize==3
if (sbWidth==2) {
sigCtx += (scanIdx==0) ? 9 : 15;
} else {
sigCtx += 21;
}
logtrace(LogSlice,"c) sigCtx=%d\n",sigCtx);
}
else {
// if log2TrafoSize==3
if (sbWidth==2) {
sigCtx+=9;
}
else {
sigCtx+=12;
}
}
}
int ctxIdxInc;
if (cIdx==0) { ctxIdxInc=sigCtx; }
else { ctxIdxInc=27+sigCtx; }
int context = tctx->shdr->initType*42 + ctxIdxInc;
logtrace(LogSlice,"context: %d\n",context);
int bit = decode_CABAC_bit(&tctx->cabac_decoder,
&tctx->ctx_model[CONTEXT_MODEL_SIGNIFICANT_COEFF_FLAG + context]);
return bit;
}
| 0
|
218,826
|
static void XSetMatteColor(Display *display,const XWindowInfo *window_info,
const MagickStatusType raised)
{
if (window_info->depth == 1)
{
/*
Monochrome window.
*/
if (raised)
(void) XSetForeground(display,window_info->widget_context,
XWhitePixel(display,window_info->screen));
else
(void) XSetForeground(display,window_info->widget_context,
XBlackPixel(display,window_info->screen));
}
else
if (raised)
(void) XSetForeground(display,window_info->widget_context,
window_info->pixel_info->matte_color.pixel);
else
(void) XSetForeground(display,window_info->widget_context,
window_info->pixel_info->depth_color.pixel);
}
| 0
|
204,138
|
static void write_response(ESPState *s)
{
uint32_t n;
trace_esp_write_response(s->status);
fifo8_reset(&s->fifo);
esp_fifo_push(s, s->status);
esp_fifo_push(s, 0);
if (s->dma) {
if (s->dma_memory_write) {
s->dma_memory_write(s->dma_opaque,
(uint8_t *)fifo8_pop_buf(&s->fifo, 2, &n), 2);
s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST;
s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
s->rregs[ESP_RSEQ] = SEQ_CD;
} else {
s->pdma_cb = write_response_pdma_cb;
esp_raise_drq(s);
return;
}
} else {
s->ti_size = 2;
s->rregs[ESP_RFLAGS] = 2;
}
esp_raise_irq(s);
}
| 1
|
405,384
|
static struct xfrm_policy *xfrm_migrate_policy_find(const struct xfrm_selector *sel,
u8 dir, u8 type, struct net *net, u32 if_id)
{
struct xfrm_policy *pol, *ret = NULL;
struct hlist_head *chain;
u32 priority = ~0U;
spin_lock_bh(&net->xfrm.xfrm_policy_lock);
chain = policy_hash_direct(net, &sel->daddr, &sel->saddr, sel->family, dir);
hlist_for_each_entry(pol, chain, bydst) {
if ((if_id == 0 || pol->if_id == if_id) &&
xfrm_migrate_selector_match(sel, &pol->selector) &&
pol->type == type) {
ret = pol;
priority = ret->priority;
break;
}
}
chain = &net->xfrm.policy_inexact[dir];
hlist_for_each_entry(pol, chain, bydst_inexact_list) {
if ((pol->priority >= priority) && ret)
break;
if ((if_id == 0 || pol->if_id == if_id) &&
xfrm_migrate_selector_match(sel, &pol->selector) &&
pol->type == type) {
ret = pol;
break;
}
}
xfrm_pol_hold(ret);
spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
return ret;
}
| 0
|
313,865
|
nv_ident(cmdarg_T *cap)
{
char_u *ptr = NULL;
char_u *buf;
unsigned buflen;
char_u *newbuf;
char_u *p;
char_u *kp; // value of 'keywordprg'
int kp_help; // 'keywordprg' is ":he"
int kp_ex; // 'keywordprg' starts with ":"
int n = 0; // init for GCC
int cmdchar;
int g_cmd; // "g" command
int tag_cmd = FALSE;
char_u *aux_ptr;
if (cap->cmdchar == 'g') // "g*", "g#", "g]" and "gCTRL-]"
{
cmdchar = cap->nchar;
g_cmd = TRUE;
}
else
{
cmdchar = cap->cmdchar;
g_cmd = FALSE;
}
if (cmdchar == POUND) // the pound sign, '#' for English keyboards
cmdchar = '#';
// The "]", "CTRL-]" and "K" commands accept an argument in Visual mode.
if (cmdchar == ']' || cmdchar == Ctrl_RSB || cmdchar == 'K')
{
if (VIsual_active && get_visual_text(cap, &ptr, &n) == FAIL)
return;
if (checkclearopq(cap->oap))
return;
}
if (ptr == NULL && (n = find_ident_under_cursor(&ptr,
(cmdchar == '*' || cmdchar == '#')
? FIND_IDENT|FIND_STRING : FIND_IDENT)) == 0)
{
clearop(cap->oap);
return;
}
// Allocate buffer to put the command in. Inserting backslashes can
// double the length of the word. p_kp / curbuf->b_p_kp could be added
// and some numbers.
kp = (*curbuf->b_p_kp == NUL ? p_kp : curbuf->b_p_kp);
kp_help = (*kp == NUL || STRCMP(kp, ":he") == 0
|| STRCMP(kp, ":help") == 0);
if (kp_help && *skipwhite(ptr) == NUL)
{
emsg(_(e_no_identifier_under_cursor)); // found white space only
return;
}
kp_ex = (*kp == ':');
buflen = (unsigned)(n * 2 + 30 + STRLEN(kp));
buf = alloc(buflen);
if (buf == NULL)
return;
buf[0] = NUL;
switch (cmdchar)
{
case '*':
case '#':
// Put cursor at start of word, makes search skip the word
// under the cursor.
// Call setpcmark() first, so "*``" puts the cursor back where
// it was.
setpcmark();
curwin->w_cursor.col = (colnr_T) (ptr - ml_get_curline());
if (!g_cmd && vim_iswordp(ptr))
STRCPY(buf, "\\<");
no_smartcase = TRUE; // don't use 'smartcase' now
break;
case 'K':
n = nv_K_getcmd(cap, kp, kp_help, kp_ex, &ptr, n, buf, buflen);
if (n == 0)
return;
break;
case ']':
tag_cmd = TRUE;
#ifdef FEAT_CSCOPE
if (p_cst)
STRCPY(buf, "cstag ");
else
#endif
STRCPY(buf, "ts ");
break;
default:
tag_cmd = TRUE;
if (curbuf->b_help)
STRCPY(buf, "he! ");
else
{
if (g_cmd)
STRCPY(buf, "tj ");
else if (cap->count0 == 0)
STRCPY(buf, "ta ");
else
sprintf((char *)buf, ":%ldta ", cap->count0);
}
}
// Now grab the chars in the identifier
if (cmdchar == 'K' && !kp_help)
{
ptr = vim_strnsave(ptr, n);
if (kp_ex)
// Escape the argument properly for an Ex command
p = vim_strsave_fnameescape(ptr, VSE_NONE);
else
// Escape the argument properly for a shell command
p = vim_strsave_shellescape(ptr, TRUE, TRUE);
vim_free(ptr);
if (p == NULL)
{
vim_free(buf);
return;
}
newbuf = vim_realloc(buf, STRLEN(buf) + STRLEN(p) + 1);
if (newbuf == NULL)
{
vim_free(buf);
vim_free(p);
return;
}
buf = newbuf;
STRCAT(buf, p);
vim_free(p);
}
else
{
if (cmdchar == '*')
aux_ptr = (char_u *)(magic_isset() ? "/.*~[^$\\" : "/^$\\");
else if (cmdchar == '#')
aux_ptr = (char_u *)(magic_isset() ? "/?.*~[^$\\" : "/?^$\\");
else if (tag_cmd)
{
if (curbuf->b_help)
// ":help" handles unescaped argument
aux_ptr = (char_u *)"";
else
aux_ptr = (char_u *)"\\|\"\n[";
}
else
aux_ptr = (char_u *)"\\|\"\n*?[";
p = buf + STRLEN(buf);
while (n-- > 0)
{
// put a backslash before \ and some others
if (vim_strchr(aux_ptr, *ptr) != NULL)
*p++ = '\\';
// When current byte is a part of multibyte character, copy all
// bytes of that character.
if (has_mbyte)
{
int i;
int len = (*mb_ptr2len)(ptr) - 1;
for (i = 0; i < len && n >= 1; ++i, --n)
*p++ = *ptr++;
}
*p++ = *ptr++;
}
*p = NUL;
}
// Execute the command.
if (cmdchar == '*' || cmdchar == '#')
{
if (!g_cmd && (has_mbyte
? vim_iswordp(mb_prevptr(ml_get_curline(), ptr))
: vim_iswordc(ptr[-1])))
STRCAT(buf, "\\>");
// put pattern in search history
init_history();
add_to_history(HIST_SEARCH, buf, TRUE, NUL);
(void)normal_search(cap, cmdchar == '*' ? '/' : '?', buf, 0, NULL);
}
else
{
g_tag_at_cursor = TRUE;
do_cmdline_cmd(buf);
g_tag_at_cursor = FALSE;
}
vim_free(buf);
}
| 0
|
299,895
|
read_named_list(tree_node **anchorp, int *numberp, int max, uschar *s,
uschar *tname)
{
BOOL forcecache = FALSE;
uschar *ss;
tree_node *t;
namedlist_block *nb = store_get(sizeof(namedlist_block));
if (Ustrncmp(s, "_cache", 6) == 0)
{
forcecache = TRUE;
s += 6;
}
if (!isspace(*s))
log_write(0, LOG_PANIC_DIE|LOG_CONFIG_IN, "unrecognized configuration line");
if (*numberp >= max)
log_write(0, LOG_PANIC_DIE|LOG_CONFIG_IN, "too many named %ss (max is %d)\n",
tname, max);
while (isspace(*s)) s++;
ss = s;
while (isalnum(*s) || *s == '_') s++;
t = store_get(sizeof(tree_node) + s-ss);
Ustrncpy(t->name, ss, s-ss);
t->name[s-ss] = 0;
while (isspace(*s)) s++;
if (!tree_insertnode(anchorp, t))
log_write(0, LOG_PANIC_DIE|LOG_CONFIG_IN,
"duplicate name \"%s\" for a named %s", t->name, tname);
t->data.ptr = nb;
nb->number = *numberp;
*numberp += 1;
if (*s++ != '=') log_write(0, LOG_PANIC_DIE|LOG_CONFIG_IN,
"missing '=' after \"%s\"", t->name);
while (isspace(*s)) s++;
nb->string = read_string(s, t->name);
nb->cache_data = NULL;
/* Check the string for any expansions; if any are found, mark this list
uncacheable unless the user has explicited forced caching. */
if (!forcecache && Ustrchr(nb->string, '$') != NULL) nb->number = -1;
}
| 0
|
473,933
|
onigenc_mbn_is_mbc_ambiguous(OnigEncoding enc, OnigCaseFoldType flag,
const UChar** pp ARG_UNUSED, const UChar* end ARG_UNUSED)
{
const UChar* p = *pp;
if (ONIGENC_IS_MBC_ASCII(p)) {
(*pp)++;
return ONIGENC_IS_ASCII_CODE_CASE_AMBIG(*p);
}
(*pp) += enclen(enc, p);
return FALSE;
}
| 0
|
436,111
|
static bool io_flush_cached_reqs(struct io_ring_ctx *ctx)
{
struct io_submit_state *state = &ctx->submit_state;
struct io_comp_state *cs = &state->comp;
int nr;
/*
* If we have more than a batch's worth of requests in our IRQ side
* locked cache, grab the lock and move them over to our submission
* side cache.
*/
if (READ_ONCE(ctx->locked_free_nr) > IO_COMPL_BATCH)
io_flush_cached_locked_reqs(ctx, cs);
nr = state->free_reqs;
while (!list_empty(&cs->free_list)) {
struct io_kiocb *req = list_first_entry(&cs->free_list,
struct io_kiocb, compl.list);
list_del(&req->compl.list);
state->reqs[nr++] = req;
if (nr == ARRAY_SIZE(state->reqs))
break;
}
state->free_reqs = nr;
return nr != 0;
}
| 0
|
393,481
|
static SQInteger base_seterrorhandler(HSQUIRRELVM v)
{
sq_seterrorhandler(v);
return 0;
}
| 0
|
226,142
|
void rely_box_del(GF_Box *s)
{
GF_RelyHintBox *rely = (GF_RelyHintBox *)s;
gf_free(rely);
}
| 0
|
261,431
|
static int decode_merge_idx(thread_context* tctx)
{
logtrace(LogSlice,"# merge_idx\n");
if (tctx->shdr->MaxNumMergeCand <= 1) {
logtrace(LogSymbols,"$1 merge_idx=%d\n",0);
return 0;
}
// TU coding, first bin is CABAC, remaining are bypass.
// cMax = MaxNumMergeCand-1
int idx = decode_CABAC_bit(&tctx->cabac_decoder,
&tctx->ctx_model[CONTEXT_MODEL_MERGE_IDX]);
if (idx==0) {
// nothing
}
else {
idx=1;
while (idx<tctx->shdr->MaxNumMergeCand-1) {
if (decode_CABAC_bypass(&tctx->cabac_decoder)) {
idx++;
}
else {
break;
}
}
}
logtrace(LogSlice,"> merge_idx = %d\n",idx);
logtrace(LogSymbols,"$1 merge_idx=%d\n",idx);
return idx;
}
| 0
|
321,739
|
static long aspeed_lpc_ctrl_ioctl(struct file *file, unsigned int cmd,
unsigned long param)
{
struct aspeed_lpc_ctrl *lpc_ctrl = file_aspeed_lpc_ctrl(file);
struct device *dev = file->private_data;
void __user *p = (void __user *)param;
struct aspeed_lpc_ctrl_mapping map;
u32 addr;
u32 size;
long rc;
if (copy_from_user(&map, p, sizeof(map)))
return -EFAULT;
if (map.flags != 0)
return -EINVAL;
switch (cmd) {
case ASPEED_LPC_CTRL_IOCTL_GET_SIZE:
/* The flash windows don't report their size */
if (map.window_type != ASPEED_LPC_CTRL_WINDOW_MEMORY)
return -EINVAL;
/* Support more than one window id in the future */
if (map.window_id != 0)
return -EINVAL;
/* If memory-region is not described in device tree */
if (!lpc_ctrl->mem_size) {
dev_dbg(dev, "Didn't find reserved memory\n");
return -ENXIO;
}
map.size = lpc_ctrl->mem_size;
return copy_to_user(p, &map, sizeof(map)) ? -EFAULT : 0;
case ASPEED_LPC_CTRL_IOCTL_MAP:
/*
* The top half of HICR7 is the MSB of the BMC address of the
* mapping.
* The bottom half of HICR7 is the MSB of the HOST LPC
* firmware space address of the mapping.
*
* The 1 bits in the top of half of HICR8 represent the bits
* (in the requested address) that should be ignored and
* replaced with those from the top half of HICR7.
* The 1 bits in the bottom half of HICR8 represent the bits
* (in the requested address) that should be kept and pass
* into the BMC address space.
*/
/*
* It doesn't make sense to talk about a size or offset with
* low 16 bits set. Both HICR7 and HICR8 talk about the top 16
* bits of addresses and sizes.
*/
if ((map.size & 0x0000ffff) || (map.offset & 0x0000ffff))
return -EINVAL;
/*
* Because of the way the masks work in HICR8 offset has to
* be a multiple of size.
*/
if (map.offset & (map.size - 1))
return -EINVAL;
if (map.window_type == ASPEED_LPC_CTRL_WINDOW_FLASH) {
if (!lpc_ctrl->pnor_size) {
dev_dbg(dev, "Didn't find host pnor flash\n");
return -ENXIO;
}
addr = lpc_ctrl->pnor_base;
size = lpc_ctrl->pnor_size;
} else if (map.window_type == ASPEED_LPC_CTRL_WINDOW_MEMORY) {
/* If memory-region is not described in device tree */
if (!lpc_ctrl->mem_size) {
dev_dbg(dev, "Didn't find reserved memory\n");
return -ENXIO;
}
addr = lpc_ctrl->mem_base;
size = lpc_ctrl->mem_size;
} else {
return -EINVAL;
}
/* Check overflow first! */
if (map.offset + map.size < map.offset ||
map.offset + map.size > size)
return -EINVAL;
if (map.size == 0 || map.size > size)
return -EINVAL;
addr += map.offset;
/*
* addr (host lpc address) is safe regardless of values. This
* simply changes the address the host has to request on its
* side of the LPC bus. This cannot impact the hosts own
* memory space by surprise as LPC specific accessors are
* required. The only strange thing that could be done is
* setting the lower 16 bits but the shift takes care of that.
*/
rc = regmap_write(lpc_ctrl->regmap, HICR7,
(addr | (map.addr >> 16)));
if (rc)
return rc;
rc = regmap_write(lpc_ctrl->regmap, HICR8,
(~(map.size - 1)) | ((map.size >> 16) - 1));
if (rc)
return rc;
/*
* Switch to FWH2AHB mode, AST2600 only.
*
* The other bits in this register are interrupt status bits
* that are cleared by writing 1. As we don't want to clear
* them, set only the bit of interest.
*/
if (lpc_ctrl->fwh2ahb)
regmap_write(lpc_ctrl->regmap, HICR6, SW_FWH2AHB);
/*
* Enable LPC FHW cycles. This is required for the host to
* access the regions specified.
*/
return regmap_update_bits(lpc_ctrl->regmap, HICR5,
HICR5_ENFWH | HICR5_ENL2H,
HICR5_ENFWH | HICR5_ENL2H);
}
return -EINVAL;
}
| 0
|
313,742
|
check_visual_highlight(void)
{
static int did_check = FALSE;
if (full_screen)
{
if (!did_check && HL_ATTR(HLF_V) == 0)
msg(_("Warning: terminal cannot highlight"));
did_check = TRUE;
}
}
| 0
|
417,054
|
PeakAutoAdjustFilter() :
mixerShift(0),
masterVolume(256),
lastPeakValue(0)
{
}
| 0
|
253,619
|
smb2_copychunk_range(const unsigned int xid,
struct cifsFileInfo *srcfile,
struct cifsFileInfo *trgtfile, u64 src_off,
u64 len, u64 dest_off)
{
int rc;
unsigned int ret_data_len;
struct copychunk_ioctl *pcchunk;
struct copychunk_ioctl_rsp *retbuf = NULL;
struct cifs_tcon *tcon;
int chunks_copied = 0;
bool chunk_sizes_updated = false;
ssize_t bytes_written, total_bytes_written = 0;
pcchunk = kmalloc(sizeof(struct copychunk_ioctl), GFP_KERNEL);
if (pcchunk == NULL)
return -ENOMEM;
cifs_dbg(FYI, "%s: about to call request res key\n", __func__);
/* Request a key from the server to identify the source of the copy */
rc = SMB2_request_res_key(xid, tlink_tcon(srcfile->tlink),
srcfile->fid.persistent_fid,
srcfile->fid.volatile_fid, pcchunk);
/* Note: request_res_key sets res_key null only if rc !=0 */
if (rc)
goto cchunk_out;
/* For now array only one chunk long, will make more flexible later */
pcchunk->ChunkCount = cpu_to_le32(1);
pcchunk->Reserved = 0;
pcchunk->Reserved2 = 0;
tcon = tlink_tcon(trgtfile->tlink);
while (len > 0) {
pcchunk->SourceOffset = cpu_to_le64(src_off);
pcchunk->TargetOffset = cpu_to_le64(dest_off);
pcchunk->Length =
cpu_to_le32(min_t(u32, len, tcon->max_bytes_chunk));
/* Request server copy to target from src identified by key */
kfree(retbuf);
retbuf = NULL;
rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
trgtfile->fid.volatile_fid, FSCTL_SRV_COPYCHUNK_WRITE,
true /* is_fsctl */, (char *)pcchunk,
sizeof(struct copychunk_ioctl), CIFSMaxBufSize,
(char **)&retbuf, &ret_data_len);
if (rc == 0) {
if (ret_data_len !=
sizeof(struct copychunk_ioctl_rsp)) {
cifs_tcon_dbg(VFS, "Invalid cchunk response size\n");
rc = -EIO;
goto cchunk_out;
}
if (retbuf->TotalBytesWritten == 0) {
cifs_dbg(FYI, "no bytes copied\n");
rc = -EIO;
goto cchunk_out;
}
/*
* Check if server claimed to write more than we asked
*/
if (le32_to_cpu(retbuf->TotalBytesWritten) >
le32_to_cpu(pcchunk->Length)) {
cifs_tcon_dbg(VFS, "Invalid copy chunk response\n");
rc = -EIO;
goto cchunk_out;
}
if (le32_to_cpu(retbuf->ChunksWritten) != 1) {
cifs_tcon_dbg(VFS, "Invalid num chunks written\n");
rc = -EIO;
goto cchunk_out;
}
chunks_copied++;
bytes_written = le32_to_cpu(retbuf->TotalBytesWritten);
src_off += bytes_written;
dest_off += bytes_written;
len -= bytes_written;
total_bytes_written += bytes_written;
cifs_dbg(FYI, "Chunks %d PartialChunk %d Total %zu\n",
le32_to_cpu(retbuf->ChunksWritten),
le32_to_cpu(retbuf->ChunkBytesWritten),
bytes_written);
} else if (rc == -EINVAL) {
if (ret_data_len != sizeof(struct copychunk_ioctl_rsp))
goto cchunk_out;
cifs_dbg(FYI, "MaxChunks %d BytesChunk %d MaxCopy %d\n",
le32_to_cpu(retbuf->ChunksWritten),
le32_to_cpu(retbuf->ChunkBytesWritten),
le32_to_cpu(retbuf->TotalBytesWritten));
/*
* Check if this is the first request using these sizes,
* (ie check if copy succeed once with original sizes
* and check if the server gave us different sizes after
* we already updated max sizes on previous request).
* if not then why is the server returning an error now
*/
if ((chunks_copied != 0) || chunk_sizes_updated)
goto cchunk_out;
/* Check that server is not asking us to grow size */
if (le32_to_cpu(retbuf->ChunkBytesWritten) <
tcon->max_bytes_chunk)
tcon->max_bytes_chunk =
le32_to_cpu(retbuf->ChunkBytesWritten);
else
goto cchunk_out; /* server gave us bogus size */
/* No need to change MaxChunks since already set to 1 */
chunk_sizes_updated = true;
} else
goto cchunk_out;
}
cchunk_out:
kfree(pcchunk);
kfree(retbuf);
if (rc)
return rc;
else
return total_bytes_written;
}
| 0
|
205,630
|
static int io_rw_init_file(struct io_kiocb *req, fmode_t mode)
{
struct kiocb *kiocb = &req->rw.kiocb;
struct io_ring_ctx *ctx = req->ctx;
struct file *file = req->file;
int ret;
if (unlikely(!file || !(file->f_mode & mode)))
return -EBADF;
if (!io_req_ffs_set(req))
req->flags |= io_file_get_flags(file) << REQ_F_SUPPORT_NOWAIT_BIT;
kiocb->ki_flags = iocb_flags(file);
ret = kiocb_set_rw_flags(kiocb, req->rw.flags);
if (unlikely(ret))
return ret;
/*
* If the file is marked O_NONBLOCK, still allow retry for it if it
* supports async. Otherwise it's impossible to use O_NONBLOCK files
* reliably. If not, or it IOCB_NOWAIT is set, don't retry.
*/
if ((kiocb->ki_flags & IOCB_NOWAIT) ||
((file->f_flags & O_NONBLOCK) && !io_file_supports_nowait(req)))
req->flags |= REQ_F_NOWAIT;
if (ctx->flags & IORING_SETUP_IOPOLL) {
if (!(kiocb->ki_flags & IOCB_DIRECT) || !file->f_op->iopoll)
return -EOPNOTSUPP;
kiocb->ki_flags |= IOCB_HIPRI | IOCB_ALLOC_CACHE;
kiocb->ki_complete = io_complete_rw_iopoll;
req->iopoll_completed = 0;
} else {
if (kiocb->ki_flags & IOCB_HIPRI)
return -EINVAL;
kiocb->ki_complete = io_complete_rw;
}
return 0;
}
| 1
|
486,790
|
static inline uint64_t rx_desc_get_buffer(CadenceGEMState *s, uint32_t *desc)
{
uint64_t ret = desc[0] & ~0x3UL;
if (s->regs[GEM_DMACFG] & GEM_DMACFG_ADDR_64B) {
ret |= (uint64_t)desc[2] << 32;
}
return ret;
}
| 0
|
343,320
|
static int checkvalidaddr(const struct sockaddr_storage * const addr)
{
if (addr == NULL) {
return 0;
}
/* Some versions of MacOS X have broken IN* macros */
#ifdef __APPLE_CC__
return 1;
#endif
if (STORAGE_FAMILY(*addr) == AF_INET6) {
if (IN6_IS_ADDR_MULTICAST(&STORAGE_SIN_ADDR6_NF_CONST(*addr)) ||
IN6_IS_ADDR_UNSPECIFIED(&STORAGE_SIN_ADDR6_NF_CONST(*addr))) {
return 0;
}
return 1;
} else if (STORAGE_FAMILY(*addr) == AF_INET) {
if (ntohl(STORAGE_SIN_ADDR_CONST(*addr)) == INADDR_ANY ||
ntohl(STORAGE_SIN_ADDR_CONST(*addr)) == INADDR_NONE ||
ntohl(STORAGE_SIN_ADDR_CONST(*addr)) == INADDR_BROADCAST ||
IN_MULTICAST(ntohl(STORAGE_SIN_ADDR_CONST(*addr)))) {
return 0;
}
return 1;
}
return 0;
}
| 0
|
440,872
|
LogFilePrep(const char *fname, const char *backup, const char *idstring)
{
char *logFileName = NULL;
/* the format string below is controlled by the user,
this code should never be called with elevated privileges */
if (asprintf(&logFileName, fname, idstring) == -1)
FatalError("Cannot allocate space for the log file name\n");
if (backup && *backup) {
struct stat buf;
if (!stat(logFileName, &buf) && S_ISREG(buf.st_mode)) {
char *suffix;
char *oldLog;
if ((asprintf(&suffix, backup, idstring) == -1) ||
(asprintf(&oldLog, "%s%s", logFileName, suffix) == -1)) {
FatalError("Cannot allocate space for the log file name\n");
}
free(suffix);
if (rename(logFileName, oldLog) == -1) {
FatalError("Cannot move old log file \"%s\" to \"%s\"\n",
logFileName, oldLog);
}
free(oldLog);
}
}
else {
if (remove(logFileName) != 0 && errno != ENOENT) {
FatalError("Cannot remove old log file \"%s\": %s\n",
logFileName, strerror(errno));
}
}
return logFileName;
}
| 0
|
310,302
|
dirserv_get_routerdescs(smartlist_t *descs_out, const char *key,
const char **msg)
{
*msg = NULL;
if (!strcmp(key, "/tor/server/all")) {
routerlist_t *rl = router_get_routerlist();
SMARTLIST_FOREACH(rl->routers, routerinfo_t *, r,
smartlist_add(descs_out, &(r->cache_info)));
} else if (!strcmp(key, "/tor/server/authority")) {
routerinfo_t *ri = router_get_my_routerinfo();
if (ri)
smartlist_add(descs_out, &(ri->cache_info));
} else if (!strcmpstart(key, "/tor/server/d/")) {
smartlist_t *digests = smartlist_create();
key += strlen("/tor/server/d/");
dir_split_resource_into_fingerprints(key, digests, NULL,
DSR_HEX|DSR_SORT_UNIQ);
SMARTLIST_FOREACH(digests, const char *, d,
{
signed_descriptor_t *sd = router_get_by_descriptor_digest(d);
if (sd)
smartlist_add(descs_out,sd);
});
SMARTLIST_FOREACH(digests, char *, d, tor_free(d));
smartlist_free(digests);
} else if (!strcmpstart(key, "/tor/server/fp/")) {
smartlist_t *digests = smartlist_create();
time_t cutoff = time(NULL) - ROUTER_MAX_AGE_TO_PUBLISH;
key += strlen("/tor/server/fp/");
dir_split_resource_into_fingerprints(key, digests, NULL,
DSR_HEX|DSR_SORT_UNIQ);
SMARTLIST_FOREACH(digests, const char *, d,
{
if (router_digest_is_me(d)) {
/* make sure desc_routerinfo exists */
routerinfo_t *ri = router_get_my_routerinfo();
if (ri)
smartlist_add(descs_out, &(ri->cache_info));
} else {
routerinfo_t *ri = router_get_by_digest(d);
/* Don't actually serve a descriptor that everyone will think is
* expired. This is an (ugly) workaround to keep buggy 0.1.1.10
* Tors from downloading descriptors that they will throw away.
*/
if (ri && ri->cache_info.published_on > cutoff)
smartlist_add(descs_out, &(ri->cache_info));
}
});
SMARTLIST_FOREACH(digests, char *, d, tor_free(d));
smartlist_free(digests);
} else {
*msg = "Key not recognized";
return -1;
}
if (!smartlist_len(descs_out)) {
*msg = "Servers unavailable";
return -1;
}
return 0;
}
| 0
|
301,370
|
static int vfswrap_rename(vfs_handle_struct *handle,
const struct smb_filename *smb_fname_src,
const struct smb_filename *smb_fname_dst)
{
int result = -1;
START_PROFILE(syscall_rename);
if (smb_fname_src->stream_name || smb_fname_dst->stream_name) {
errno = ENOENT;
goto out;
}
result = rename(smb_fname_src->base_name, smb_fname_dst->base_name);
out:
END_PROFILE(syscall_rename);
return result;
}
| 0
|
238,487
|
static inline u32 vlog_alignment(u32 pos)
{
return round_up(max(pos + BPF_LOG_MIN_ALIGNMENT / 2, BPF_LOG_ALIGNMENT),
BPF_LOG_MIN_ALIGNMENT) - pos - 1;
}
| 0
|
281,106
|
static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net,
struct xfrm_flo *xflo,
const struct flowi *fl,
int num_xfrms,
u16 family)
{
int err;
struct net_device *dev;
struct dst_entry *dst;
struct dst_entry *dst1;
struct xfrm_dst *xdst;
xdst = xfrm_alloc_dst(net, family);
if (IS_ERR(xdst))
return xdst;
if (!(xflo->flags & XFRM_LOOKUP_QUEUE) ||
net->xfrm.sysctl_larval_drop ||
num_xfrms <= 0)
return xdst;
dst = xflo->dst_orig;
dst1 = &xdst->u.dst;
dst_hold(dst);
xdst->route = dst;
dst_copy_metrics(dst1, dst);
dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
dst1->flags |= DST_HOST | DST_XFRM_QUEUE;
dst1->lastuse = jiffies;
dst1->input = dst_discard;
dst1->output = xdst_queue_output;
dst_hold(dst);
dst1->child = dst;
dst1->path = dst;
xfrm_init_path((struct xfrm_dst *)dst1, dst, 0);
err = -ENODEV;
dev = dst->dev;
if (!dev)
goto free_dst;
err = xfrm_fill_dst(xdst, dev, fl);
if (err)
goto free_dst;
out:
return xdst;
free_dst:
dst_release(dst1);
xdst = ERR_PTR(err);
goto out;
}
| 0
|
398,532
|
RZ_API const char *rz_bin_dwarf_get_attr_name(ut64 attr_code) {
if (attr_code < RZ_ARRAY_SIZE(dwarf_attr_encodings)) {
return dwarf_attr_encodings[attr_code];
}
// the below codes are much sparser, so putting them in an array would require a lot of
// unused memory
switch (attr_code) {
case DW_AT_lo_user:
return "DW_AT_lo_user";
case DW_AT_MIPS_linkage_name:
return "DW_AT_MIPS_linkage_name";
case DW_AT_GNU_call_site_value:
return "DW_AT_GNU_call_site_value";
case DW_AT_GNU_call_site_data_value:
return "DW_AT_GNU_call_site_data_value";
case DW_AT_GNU_call_site_target:
return "DW_AT_GNU_call_site_target";
case DW_AT_GNU_call_site_target_clobbered:
return "DW_AT_GNU_call_site_target_clobbered";
case DW_AT_GNU_tail_call:
return "DW_AT_GNU_tail_call";
case DW_AT_GNU_all_tail_call_sites:
return "DW_AT_GNU_all_tail_call_sites";
case DW_AT_GNU_all_call_sites:
return "DW_AT_GNU_all_call_sites";
case DW_AT_GNU_all_source_call_sites:
return "DW_AT_GNU_all_source_call_sites";
case DW_AT_GNU_macros:
return "DW_AT_GNU_macros";
case DW_AT_GNU_deleted:
return "DW_AT_GNU_deleted";
case DW_AT_GNU_dwo_name:
return "DW_AT_GNU_dwo_name";
case DW_AT_GNU_dwo_id:
return "DW_AT_GNU_dwo_id";
case DW_AT_GNU_ranges_base:
return "DW_AT_GNU_ranges_base";
case DW_AT_GNU_addr_base:
return "DW_AT_GNU_addr_base";
case DW_AT_GNU_pubnames:
return "DW_AT_GNU_pubnames";
case DW_AT_GNU_pubtypes:
return "DW_AT_GNU_pubtypes";
case DW_AT_hi_user:
return "DW_AT_hi_user";
default:
return NULL;
}
}
| 0
|
387,582
|
int snd_ctl_enum_info(struct snd_ctl_elem_info *info, unsigned int channels,
unsigned int items, const char *const names[])
{
info->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
info->count = channels;
info->value.enumerated.items = items;
if (!items)
return 0;
if (info->value.enumerated.item >= items)
info->value.enumerated.item = items - 1;
WARN(strlen(names[info->value.enumerated.item]) >= sizeof(info->value.enumerated.name),
"ALSA: too long item name '%s'\n",
names[info->value.enumerated.item]);
strscpy(info->value.enumerated.name,
names[info->value.enumerated.item],
sizeof(info->value.enumerated.name));
return 0;
}
| 0
|
270,364
|
static bool ok_png_read_header(ok_png_decoder *decoder, uint32_t chunk_length) {
ok_png *png = decoder->png;
if (chunk_length != 13) {
ok_png_error(png, OK_PNG_ERROR_INVALID, "Invalid IHDR chunk length");
return false;
}
uint8_t chunk_data[13];
if (!ok_read(decoder, chunk_data, sizeof(chunk_data))) {
return false;
}
png->width = readBE32(chunk_data);
png->height = readBE32(chunk_data + 4);
png->bpp = 4; // Always decoding to 32-bit color
decoder->bit_depth = chunk_data[8];
decoder->color_type = chunk_data[9];
uint8_t compression_method = chunk_data[10];
uint8_t filter_method = chunk_data[11];
decoder->interlace_method = chunk_data[12];
uint64_t stride = (uint64_t)png->width * png->bpp;
if (compression_method != 0) {
ok_png_error(png, OK_PNG_ERROR_INVALID, "Invalid compression method");
return false;
} else if (filter_method != 0) {
ok_png_error(png, OK_PNG_ERROR_INVALID, "Invalid filter method");
return false;
} else if (decoder->interlace_method != 0 && decoder->interlace_method != 1) {
ok_png_error(png, OK_PNG_ERROR_INVALID, "Invalid interlace method");
return false;
} else if (stride > UINT32_MAX) {
ok_png_error(png, OK_PNG_ERROR_UNSUPPORTED, "Width too large");
return false;
}
const int c = decoder->color_type;
const int b = decoder->bit_depth;
const bool valid =
(c == OK_PNG_COLOR_TYPE_GRAYSCALE && (b == 1 || b == 2 || b == 4 || b == 8 || b == 16)) ||
(c == OK_PNG_COLOR_TYPE_RGB && (b == 8 || b == 16)) ||
(c == OK_PNG_COLOR_TYPE_PALETTE && (b == 1 || b == 2 || b == 4 || b == 8)) ||
(c == OK_PNG_COLOR_TYPE_GRAYSCALE_WITH_ALPHA && (b == 8 || b == 16)) ||
(c == OK_PNG_COLOR_TYPE_RGB_WITH_ALPHA && (b == 8 || b == 16));
if (!valid) {
ok_png_error(png, OK_PNG_ERROR_INVALID, "Invalid combination of color type and bit depth");
return false;
}
png->stride = (uint32_t)stride;
png->has_alpha = (c == OK_PNG_COLOR_TYPE_GRAYSCALE_WITH_ALPHA ||
c == OK_PNG_COLOR_TYPE_RGB_WITH_ALPHA);
decoder->interlace_pass = 0;
decoder->ready_for_next_interlace_pass = true;
return true;
}
| 0
|
277,493
|
MOBI_RET mobi_parse_indx(const MOBIPdbRecord *indx_record, MOBIIndx *indx, MOBITagx *tagx, MOBIOrdt *ordt) {
if (indx_record == NULL || indx == NULL || tagx == NULL || ordt == NULL) {
debug_print("%s", "index structure not initialized\n");
return MOBI_INIT_FAILED;
}
MOBI_RET ret = MOBI_SUCCESS;
MOBIBuffer *buf = mobi_buffer_init_null(indx_record->data, indx_record->size);
if (buf == NULL) {
debug_print("%s\n", "Memory allocation failed");
return MOBI_MALLOC_FAILED;
}
char indx_magic[5];
mobi_buffer_getstring(indx_magic, buf, 4); /* 0: INDX magic */
const uint32_t header_length = mobi_buffer_get32(buf); /* 4: header length */
if (strncmp(indx_magic, INDX_MAGIC, 4) != 0 ||
header_length == 0 || header_length > indx_record->size) {
debug_print("INDX wrong magic: %s or header length: %u\n", indx_magic, header_length);
mobi_buffer_free_null(buf);
return MOBI_DATA_CORRUPT;
}
mobi_buffer_seek(buf, 4); /* 8: unk, usually zeroes */
const uint32_t type = mobi_buffer_get32(buf); /* 12: 0 - normal, 2 - inflection */
mobi_buffer_seek(buf, 4); /* 16: unk */
const uint32_t idxt_offset = mobi_buffer_get32(buf); /* 20: IDXT offset */
const uint32_t entries_count = mobi_buffer_get32(buf); /* 24: entries count */
if (entries_count > INDX_RECORD_MAXCNT) {
debug_print("Too many index entries (%u)\n", entries_count);
mobi_buffer_free_null(buf);
return MOBI_DATA_CORRUPT;
}
/* if record contains TAGX section, read it (and ORDT) and return */
if (mobi_buffer_match_magic_offset(buf, TAGX_MAGIC, header_length) && indx->total_entries_count == 0) {
buf->maxlen = header_length;
/* TAGX metadata */
uint32_t encoding = mobi_buffer_get32(buf); /* 28: encoding */
if (encoding == MOBI_NOTSET) { encoding = MOBI_CP1252; }
mobi_buffer_seek(buf, 4); /* 32 */
const uint32_t total_entries_count = mobi_buffer_get32(buf); /* 36: total entries count */
if (total_entries_count > INDX_TOTAL_MAXCNT) {
debug_print("Too many total index entries (%u)\n", total_entries_count);
mobi_buffer_free_null(buf);
return MOBI_DATA_CORRUPT;
}
uint32_t ordt_offset = mobi_buffer_get32(buf); /* 40: ORDT offset; currently not used */
if (ordt_offset + ORDT_RECORD_MAXCNT + 4 > indx_record->size) {
ordt_offset = 0;
}
uint32_t ligt_offset = mobi_buffer_get32(buf); /* 44: LIGT offset; currently static table used instead */
uint32_t ligt_entries_count = mobi_buffer_get32(buf); /* 48: LIGT entries count */
if (ligt_offset + 4 * ligt_entries_count + 4 > indx_record->size) {
ligt_offset = 0;
ligt_entries_count = 0;
}
const uint32_t cncx_records_count = mobi_buffer_get32(buf); /* 52: CNCX entries count */
if (cncx_records_count > CNCX_RECORD_MAXCNT) {
debug_print("Too many CNCX records (%u)\n", cncx_records_count);
mobi_buffer_free_null(buf);
return MOBI_DATA_CORRUPT;
}
/* 56: unk count */
/* 60-148: phonetizer */
uint32_t ordt_type = 0;
uint32_t ordt_entries_count = 0;
uint32_t ordt1_offset = 0;
uint32_t ordt2_offset = 0;
uint32_t index_name_offset = 0;
uint32_t index_name_length = 0;
if (header_length >= 180) {
mobi_buffer_setpos(buf, 164);
ordt_type = mobi_buffer_get32(buf); /* 164: ORDT type */
ordt_entries_count = mobi_buffer_get32(buf); /* 168: ORDT entries count */
ordt1_offset = mobi_buffer_get32(buf); /* 172: ORDT1 offset; currently not used */
ordt2_offset = mobi_buffer_get32(buf); /* 176: ORDT2 offset */
const size_t entry_size = (ordt_type == 0) ? 1 : 2;
if (ordt1_offset + entry_size * ordt_entries_count > indx_record->size
|| ordt2_offset + 2 * ordt_entries_count > indx_record->size) {
ordt1_offset = 0;
ordt2_offset = 0;
ordt_entries_count = 0;
}
index_name_offset = mobi_buffer_get32(buf); /* 180: Index name offset */
index_name_length = mobi_buffer_get32(buf); /* 184: Index name length */
}
buf->maxlen = indx_record->size;
mobi_buffer_setpos(buf, header_length);
ret = mobi_parse_tagx(buf, tagx);
if (ret != MOBI_SUCCESS) {
mobi_buffer_free_null(buf);
return ret;
}
if (ordt_entries_count > 0) {
/* parse ORDT sections */
ordt->offsets_count = ordt_entries_count;
ordt->type = ordt_type;
ordt->ordt1_pos = ordt1_offset;
ordt->ordt2_pos = ordt2_offset;
ret = mobi_parse_ordt(buf, ordt);
debug_print("ORDT: %u, %u, %u, %u\n", ordt_type, ordt_entries_count, ordt1_offset, ordt2_offset);
if (ret != MOBI_SUCCESS) {
mobi_buffer_free_null(buf);
return ret;
}
}
if (index_name_offset > 0 && index_name_length > 0) {
if (index_name_length <= header_length - index_name_offset && index_name_length < INDX_NAME_SIZEMAX) {
mobi_buffer_setpos(buf, index_name_offset);
char *name = malloc(index_name_length + 1);
if (name == NULL) {
debug_print("%s", "Memory allocation failed\n");
mobi_buffer_free_null(buf);
return MOBI_MALLOC_FAILED;
}
mobi_buffer_getstring(name, buf, index_name_length);
indx->orth_index_name = name;
debug_print("Orth index name: %s\n", name);
}
}
indx->encoding = encoding;
indx->type = type;
indx->entries_count = entries_count;
indx->total_entries_count = total_entries_count;
if (ligt_entries_count != 0 && !mobi_buffer_match_magic_offset(buf, LIGT_MAGIC, ligt_offset)) {
ligt_offset = 0;
ligt_entries_count = 0;
}
indx->ligt_offset = ligt_offset;
indx->ligt_entries_count = ligt_entries_count;
indx->ordt_offset = ordt_offset;
indx->cncx_records_count = cncx_records_count;
} else {
/* else parse IDXT entries offsets */
if (idxt_offset == 0) {
debug_print("%s", "Missing IDXT offset\n");
mobi_buffer_free_null(buf);
return MOBI_DATA_CORRUPT;
}
if (idxt_offset + 2 * entries_count + 4 > indx_record->size ) {
debug_print("IDXT entries beyond record end%s", "\n");
mobi_buffer_free_null(buf);
return MOBI_DATA_CORRUPT;
}
mobi_buffer_setpos(buf, idxt_offset);
MOBIIdxt idxt;
uint32_t *offsets = malloc((entries_count + 1) * sizeof(uint32_t));
if (offsets == NULL) {
mobi_buffer_free_null(buf);
debug_print("%s\n", "Memory allocation failed");
return MOBI_MALLOC_FAILED;
}
idxt.offsets = offsets;
ret = mobi_parse_idxt(buf, &idxt, entries_count);
if (ret != MOBI_SUCCESS) {
debug_print("%s", "IDXT parsing failed\n");
mobi_buffer_free_null(buf);
free(offsets);
return ret;
}
/* parse entries */
if (entries_count > 0) {
if (indx->entries == NULL) {
indx->entries = malloc(indx->total_entries_count * sizeof(MOBIIndexEntry));
if (indx->entries == NULL) {
mobi_buffer_free_null(buf);
free(offsets);
debug_print("%s\n", "Memory allocation failed");
return MOBI_MALLOC_FAILED;
}
}
size_t i = 0;
while (i < entries_count) {
ret = mobi_parse_index_entry(indx, idxt, tagx, ordt, buf, i++);
if (ret != MOBI_SUCCESS) {
mobi_buffer_free_null(buf);
free(offsets);
return ret;
}
}
indx->entries_count += entries_count;
}
free(offsets);
}
mobi_buffer_free_null(buf);
return MOBI_SUCCESS;
}
| 0
|
234,132
|
fetch_indirect_string (dwarf_vma offset)
{
struct dwarf_section *section = &debug_displays [str].section;
const unsigned char * ret;
if (section->start == NULL)
return (const unsigned char *) _("<no .debug_str section>");
if (offset >= section->size)
{
warn (_("DW_FORM_strp offset too big: 0x%s\n"),
dwarf_vmatoa ("x", offset));
return (const unsigned char *) _("<offset is too big>");
}
ret = section->start + offset;
/* Unfortunately we cannot rely upon the .debug_str section ending with a
NUL byte. Since our caller is expecting to receive a well formed C
string we test for the lack of a terminating byte here. */
if (strnlen ((const char *) ret, section->size - offset)
== section->size - offset)
ret = (const unsigned char *)
_("<no NUL byte at end of .debug_str section>");
return ret;
}
| 0
|
90,205
|
virtual bool cellular_connecting() const {
return cellular_ ? cellular_->connecting() : false;
}
| 0
|
261,758
|
string RtmpProtocol::get_C1_digest(const uint8_t *ptr,char **digestPos){
/* 764bytes digest结构
offset: 4bytes
random-data: (offset)bytes
digest-data: 32bytes
random-data: (764-4-offset-32)bytes
*/
int offset = 0;
for (int i = 0; i < C1_OFFSET_SIZE; ++i) {
offset += ptr[i];
}
offset %= (C1_SCHEMA_SIZE - C1_DIGEST_SIZE - C1_OFFSET_SIZE);
*digestPos = (char *) ptr + C1_OFFSET_SIZE + offset;
string digest(*digestPos, C1_DIGEST_SIZE);
//DebugL << "digest offset:" << offset << ",digest:" << hexdump(digest.data(),digest.size());
return digest;
}
| 0
|
222,523
|
string FunctionLibraryDefinition::FindGradientHelper(const string& func) const {
return gtl::FindWithDefault(func_grad_, func, "");
}
| 0
|
208,654
|
PHP_MINIT_FUNCTION(snmp)
{
netsnmp_log_handler *logh;
zend_class_entry ce, cex;
le_snmp_session = zend_register_list_destructors_ex(php_snmp_session_destructor, NULL, PHP_SNMP_SESSION_RES_NAME, module_number);
init_snmp("snmpapp");
#ifdef NETSNMP_DS_LIB_DONT_PERSIST_STATE
/* Prevent update of the snmpapp.conf file */
netsnmp_ds_set_boolean(NETSNMP_DS_LIBRARY_ID, NETSNMP_DS_LIB_DONT_PERSIST_STATE, 1);
#endif
/* Disable logging, use exit status'es and related variabled to detect errors */
shutdown_snmp_logging();
logh = netsnmp_register_loghandler(NETSNMP_LOGHANDLER_NONE, LOG_ERR);
if (logh) {
logh->pri_max = LOG_ERR;
}
memcpy(&php_snmp_object_handlers, zend_get_std_object_handlers(), sizeof(zend_object_handlers));
php_snmp_object_handlers.read_property = php_snmp_read_property;
php_snmp_object_handlers.write_property = php_snmp_write_property;
php_snmp_object_handlers.has_property = php_snmp_has_property;
php_snmp_object_handlers.get_properties = php_snmp_get_properties;
/* Register SNMP Class */
INIT_CLASS_ENTRY(ce, "SNMP", php_snmp_class_methods);
ce.create_object = php_snmp_object_new;
php_snmp_object_handlers.clone_obj = NULL;
php_snmp_ce = zend_register_internal_class(&ce TSRMLS_CC);
/* Register SNMP Class properties */
zend_hash_init(&php_snmp_properties, 0, NULL, NULL, 1);
PHP_SNMP_ADD_PROPERTIES(&php_snmp_properties, php_snmp_property_entries);
REGISTER_LONG_CONSTANT("SNMP_OID_OUTPUT_SUFFIX", NETSNMP_OID_OUTPUT_SUFFIX, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("SNMP_OID_OUTPUT_MODULE", NETSNMP_OID_OUTPUT_MODULE, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("SNMP_OID_OUTPUT_FULL", NETSNMP_OID_OUTPUT_FULL, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("SNMP_OID_OUTPUT_NUMERIC", NETSNMP_OID_OUTPUT_NUMERIC, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("SNMP_OID_OUTPUT_UCD", NETSNMP_OID_OUTPUT_UCD, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("SNMP_OID_OUTPUT_NONE", NETSNMP_OID_OUTPUT_NONE, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("SNMP_VALUE_LIBRARY", SNMP_VALUE_LIBRARY, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("SNMP_VALUE_PLAIN", SNMP_VALUE_PLAIN, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("SNMP_VALUE_OBJECT", SNMP_VALUE_OBJECT, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("SNMP_BIT_STR", ASN_BIT_STR, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("SNMP_OCTET_STR", ASN_OCTET_STR, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("SNMP_OPAQUE", ASN_OPAQUE, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("SNMP_NULL", ASN_NULL, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("SNMP_OBJECT_ID", ASN_OBJECT_ID, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("SNMP_IPADDRESS", ASN_IPADDRESS, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("SNMP_COUNTER", ASN_GAUGE, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("SNMP_UNSIGNED", ASN_UNSIGNED, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("SNMP_TIMETICKS", ASN_TIMETICKS, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("SNMP_UINTEGER", ASN_UINTEGER, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("SNMP_INTEGER", ASN_INTEGER, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("SNMP_COUNTER64", ASN_COUNTER64, CONST_CS | CONST_PERSISTENT);
REGISTER_SNMP_CLASS_CONST_LONG("VERSION_1", SNMP_VERSION_1);
REGISTER_SNMP_CLASS_CONST_LONG("VERSION_2c", SNMP_VERSION_2c);
REGISTER_SNMP_CLASS_CONST_LONG("VERSION_2C", SNMP_VERSION_2c);
REGISTER_SNMP_CLASS_CONST_LONG("VERSION_3", SNMP_VERSION_3);
REGISTER_SNMP_CLASS_CONST_LONG("ERRNO_NOERROR", PHP_SNMP_ERRNO_NOERROR);
REGISTER_SNMP_CLASS_CONST_LONG("ERRNO_ANY", PHP_SNMP_ERRNO_ANY);
REGISTER_SNMP_CLASS_CONST_LONG("ERRNO_GENERIC", PHP_SNMP_ERRNO_GENERIC);
REGISTER_SNMP_CLASS_CONST_LONG("ERRNO_TIMEOUT", PHP_SNMP_ERRNO_TIMEOUT);
REGISTER_SNMP_CLASS_CONST_LONG("ERRNO_ERROR_IN_REPLY", PHP_SNMP_ERRNO_ERROR_IN_REPLY);
REGISTER_SNMP_CLASS_CONST_LONG("ERRNO_OID_NOT_INCREASING", PHP_SNMP_ERRNO_OID_NOT_INCREASING);
REGISTER_SNMP_CLASS_CONST_LONG("ERRNO_OID_PARSING_ERROR", PHP_SNMP_ERRNO_OID_PARSING_ERROR);
REGISTER_SNMP_CLASS_CONST_LONG("ERRNO_MULTIPLE_SET_QUERIES", PHP_SNMP_ERRNO_MULTIPLE_SET_QUERIES);
/* Register SNMPException class */
INIT_CLASS_ENTRY(cex, "SNMPException", NULL);
#ifdef HAVE_SPL
php_snmp_exception_ce = zend_register_internal_class_ex(&cex, spl_ce_RuntimeException, NULL TSRMLS_CC);
#else
php_snmp_exception_ce = zend_register_internal_class_ex(&cex, zend_exception_get_default(TSRMLS_C), NULL TSRMLS_CC);
#endif
return SUCCESS;
}
| 1
|
234,157
|
check_uvalue (const unsigned char * start,
dwarf_vma uvalue,
const unsigned char * end)
{
dwarf_vma max_uvalue = end - start;
/* See PR 17512: file: 008-103549-0.001:0.1.
and PR 24829 for examples of where these tests are triggered. */
if (uvalue > max_uvalue)
{
warn (_("Corrupt attribute block length: %lx\n"), (long) uvalue);
uvalue = max_uvalue;
}
return uvalue;
}
| 0
|
462,563
|
void controller::save_feed(std::shared_ptr<rss_feed> feed, unsigned int pos) {
if (!feed->is_empty()) {
LOG(level::DEBUG, "controller::save_feed: feed is nonempty, saving");
rsscache->externalize_rssfeed(feed, ign.matches_resetunread(feed->rssurl()));
LOG(level::DEBUG, "controller::save_feed: after externalize_rssfeed");
bool ignore_disp = (cfg.get_configvalue("ignore-mode") == "display");
feed = rsscache->internalize_rssfeed(feed->rssurl(), ignore_disp ? &ign : nullptr);
LOG(level::DEBUG, "controller::save_feed: after internalize_rssfeed");
feed->set_tags(urlcfg->get_tags(feed->rssurl()));
{
unsigned int order = feeds[pos]->get_order();
std::lock_guard<std::mutex> itemlock(feeds[pos]->item_mutex);
feeds[pos]->clear_items();
feed->set_order(order);
}
feeds[pos] = feed;
v->notify_itemlist_change(feeds[pos]);
} else {
LOG(level::DEBUG, "controller::save_feed: feed is empty, not saving");
}
}
| 0
|
247,706
|
TEST_P(SslSocketTest, FailedClientCertificateDefaultExpirationVerification) {
envoy::config::listener::v3::Listener listener;
envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext client;
configureServerAndExpiredClientCertificate(listener, client, /*server_config=*/{});
TestUtilOptionsV2 test_options(listener, client, false, GetParam());
testUtilV2(test_options.setExpectedClientCertUri("spiffe://lyft.com/test-team")
.setExpectedTransportFailureReasonContains("SSLV3_ALERT_CERTIFICATE_EXPIRED"));
}
| 0
|
231,790
|
TEST_F(QuicServerTransportTest, TestClientAddressChanges) {
auto qLogger = std::make_shared<FileQLogger>(VantagePoint::Server);
server->getNonConstConn().qLogger = qLogger;
StreamId streamId = 4;
clientAddr = folly::SocketAddress("127.0.0.1", 2000);
auto data = IOBuf::copyBuffer("data");
EXPECT_THROW(
recvEncryptedStream(streamId, *data, 0, true), std::runtime_error);
EXPECT_TRUE(verifyFramePresent(
serverWrites,
*makeClientEncryptedCodec(),
QuicFrame::Type::ConnectionCloseFrame));
std::vector<int> indices =
getQLogEventIndices(QLogEventType::PacketDrop, qLogger);
EXPECT_EQ(indices.size(), 1);
auto tmp = std::move(qLogger->logs[indices[0]]);
auto event = dynamic_cast<QLogPacketDropEvent*>(tmp.get());
EXPECT_EQ(event->packetSize, 29);
EXPECT_EQ(
event->dropReason,
QuicTransportStatsCallback::toString(
PacketDropReason::PEER_ADDRESS_CHANGE));
}
| 0
|
366,335
|
static void mnt_free_id(struct mount *mnt)
{
ida_free(&mnt_id_ida, mnt->mnt_id);
}
| 0
|
355,634
|
typval2string(typval_T *tv, int convert)
{
garray_T ga;
char_u *retval;
#ifdef FEAT_FLOAT
char_u numbuf[NUMBUFLEN];
#endif
if (convert && tv->v_type == VAR_LIST)
{
ga_init2(&ga, sizeof(char), 80);
if (tv->vval.v_list != NULL)
{
list_join(&ga, tv->vval.v_list, (char_u *)"\n", TRUE, FALSE, 0);
if (tv->vval.v_list->lv_len > 0)
ga_append(&ga, NL);
}
ga_append(&ga, NUL);
retval = (char_u *)ga.ga_data;
}
#ifdef FEAT_FLOAT
else if (convert && tv->v_type == VAR_FLOAT)
{
vim_snprintf((char *)numbuf, NUMBUFLEN, "%g", tv->vval.v_float);
retval = vim_strsave(numbuf);
}
#endif
else
retval = vim_strsave(tv_get_string(tv));
return retval;
}
| 0
|
405,325
|
xfrm_policy_inexact_insert_node(struct net *net,
struct rb_root *root,
xfrm_address_t *addr,
u16 family, u8 prefixlen, u8 dir)
{
struct xfrm_pol_inexact_node *cached = NULL;
struct rb_node **p, *parent = NULL;
struct xfrm_pol_inexact_node *node;
p = &root->rb_node;
while (*p) {
int delta;
parent = *p;
node = rb_entry(*p, struct xfrm_pol_inexact_node, node);
delta = xfrm_policy_addr_delta(addr, &node->addr,
node->prefixlen,
family);
if (delta == 0 && prefixlen >= node->prefixlen) {
WARN_ON_ONCE(cached); /* ipsec policies got lost */
return node;
}
if (delta < 0)
p = &parent->rb_left;
else
p = &parent->rb_right;
if (prefixlen < node->prefixlen) {
delta = xfrm_policy_addr_delta(addr, &node->addr,
prefixlen,
family);
if (delta)
continue;
/* This node is a subnet of the new prefix. It needs
* to be removed and re-inserted with the smaller
* prefix and all nodes that are now also covered
* by the reduced prefixlen.
*/
rb_erase(&node->node, root);
if (!cached) {
xfrm_pol_inexact_node_init(node, addr,
prefixlen);
cached = node;
} else {
/* This node also falls within the new
* prefixlen. Merge the to-be-reinserted
* node and this one.
*/
xfrm_policy_inexact_node_merge(net, node,
cached, family);
kfree_rcu(node, rcu);
}
/* restart */
p = &root->rb_node;
parent = NULL;
}
}
node = cached;
if (!node) {
node = xfrm_pol_inexact_node_alloc(addr, prefixlen);
if (!node)
return NULL;
}
rb_link_node_rcu(&node->node, parent, p);
rb_insert_color(&node->node, root);
return node;
}
| 0
|
463,122
|
static void annotation_get_freespace_percent_most(annotate_state_t *state,
struct annotate_entry_list *entry)
{
uint64_t avail = 0;
uint64_t total = 0;
struct buf value = BUF_INITIALIZER;
(void) partlist_local_find_freespace_most(1, &avail, &total, NULL, NULL);
buf_printf(&value, "%" PRIuMAX ";%" PRIuMAX, (uintmax_t)avail, (uintmax_t)total);
output_entryatt(state, entry->name, "", &value);
buf_free(&value);
}
| 0
|
247,710
|
const std::vector<std::string>& expectedLocalUri() const { return expected_local_uri_; }
| 0
|
238,643
|
static void do_refine_retval_range(struct bpf_reg_state *regs, int ret_type,
int func_id,
struct bpf_call_arg_meta *meta)
{
struct bpf_reg_state *ret_reg = ®s[BPF_REG_0];
if (ret_type != RET_INTEGER ||
(func_id != BPF_FUNC_get_stack &&
func_id != BPF_FUNC_get_task_stack &&
func_id != BPF_FUNC_probe_read_str &&
func_id != BPF_FUNC_probe_read_kernel_str &&
func_id != BPF_FUNC_probe_read_user_str))
return;
ret_reg->smax_value = meta->msize_max_value;
ret_reg->s32_max_value = meta->msize_max_value;
ret_reg->smin_value = -MAX_ERRNO;
ret_reg->s32_min_value = -MAX_ERRNO;
__reg_deduce_bounds(ret_reg);
__reg_bound_offset(ret_reg);
__update_reg_bounds(ret_reg);
}
| 0
|
450,826
|
void st21nfca_se_init(struct nfc_hci_dev *hdev)
{
struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev);
init_completion(&info->se_info.req_completion);
/* initialize timers */
timer_setup(&info->se_info.bwi_timer, st21nfca_se_wt_timeout, 0);
info->se_info.bwi_active = false;
timer_setup(&info->se_info.se_active_timer,
st21nfca_se_activation_timeout, 0);
info->se_info.se_active = false;
info->se_info.count_pipes = 0;
info->se_info.expected_pipes = 0;
info->se_info.xch_error = false;
info->se_info.wt_timeout =
ST21NFCA_BWI_TO_TIMEOUT(ST21NFCA_ATR_DEFAULT_BWI);
}
| 0
|
517,432
|
static void print_alerts(HttpResponse res, Mail_T s) {
for (Mail_T r = s; r; r = r->next) {
StringBuffer_append(res->outputbuffer,
"<tr class='stripe'><td>Alert mail to</td>"
"<td>%s</td></tr>", r->to ? r->to : "");
StringBuffer_append(res->outputbuffer, "<tr><td>Alert on</td><td>");
if (r->events == Event_Null) {
StringBuffer_append(res->outputbuffer, "No events");
} else if (r->events == Event_All) {
StringBuffer_append(res->outputbuffer, "All events");
} else {
if (IS_EVENT_SET(r->events, Event_Action))
StringBuffer_append(res->outputbuffer, "Action ");
if (IS_EVENT_SET(r->events, Event_ByteIn))
StringBuffer_append(res->outputbuffer, "ByteIn ");
if (IS_EVENT_SET(r->events, Event_ByteOut))
StringBuffer_append(res->outputbuffer, "ByteOut ");
if (IS_EVENT_SET(r->events, Event_Checksum))
StringBuffer_append(res->outputbuffer, "Checksum ");
if (IS_EVENT_SET(r->events, Event_Connection))
StringBuffer_append(res->outputbuffer, "Connection ");
if (IS_EVENT_SET(r->events, Event_Content))
StringBuffer_append(res->outputbuffer, "Content ");
if (IS_EVENT_SET(r->events, Event_Data))
StringBuffer_append(res->outputbuffer, "Data ");
if (IS_EVENT_SET(r->events, Event_Exec))
StringBuffer_append(res->outputbuffer, "Exec ");
if (IS_EVENT_SET(r->events, Event_Exist))
StringBuffer_append(res->outputbuffer, "Exist ");
if (IS_EVENT_SET(r->events, Event_FsFlag))
StringBuffer_append(res->outputbuffer, "Fsflags ");
if (IS_EVENT_SET(r->events, Event_Gid))
StringBuffer_append(res->outputbuffer, "Gid ");
if (IS_EVENT_SET(r->events, Event_Instance))
StringBuffer_append(res->outputbuffer, "Instance ");
if (IS_EVENT_SET(r->events, Event_Invalid))
StringBuffer_append(res->outputbuffer, "Invalid ");
if (IS_EVENT_SET(r->events, Event_Link))
StringBuffer_append(res->outputbuffer, "Link ");
if (IS_EVENT_SET(r->events, Event_NonExist))
StringBuffer_append(res->outputbuffer, "Nonexist ");
if (IS_EVENT_SET(r->events, Event_Permission))
StringBuffer_append(res->outputbuffer, "Permission ");
if (IS_EVENT_SET(r->events, Event_PacketIn))
StringBuffer_append(res->outputbuffer, "PacketIn ");
if (IS_EVENT_SET(r->events, Event_PacketOut))
StringBuffer_append(res->outputbuffer, "PacketOut ");
if (IS_EVENT_SET(r->events, Event_Pid))
StringBuffer_append(res->outputbuffer, "PID ");
if (IS_EVENT_SET(r->events, Event_Icmp))
StringBuffer_append(res->outputbuffer, "Ping ");
if (IS_EVENT_SET(r->events, Event_PPid))
StringBuffer_append(res->outputbuffer, "PPID ");
if (IS_EVENT_SET(r->events, Event_Resource))
StringBuffer_append(res->outputbuffer, "Resource ");
if (IS_EVENT_SET(r->events, Event_Saturation))
StringBuffer_append(res->outputbuffer, "Saturation ");
if (IS_EVENT_SET(r->events, Event_Size))
StringBuffer_append(res->outputbuffer, "Size ");
if (IS_EVENT_SET(r->events, Event_Speed))
StringBuffer_append(res->outputbuffer, "Speed ");
if (IS_EVENT_SET(r->events, Event_Status))
StringBuffer_append(res->outputbuffer, "Status ");
if (IS_EVENT_SET(r->events, Event_Timeout))
StringBuffer_append(res->outputbuffer, "Timeout ");
if (IS_EVENT_SET(r->events, Event_Timestamp))
StringBuffer_append(res->outputbuffer, "Timestamp ");
if (IS_EVENT_SET(r->events, Event_Uid))
StringBuffer_append(res->outputbuffer, "Uid ");
if (IS_EVENT_SET(r->events, Event_Uptime))
StringBuffer_append(res->outputbuffer, "Uptime ");
}
StringBuffer_append(res->outputbuffer, "</td></tr>");
if (r->reminder) {
StringBuffer_append(res->outputbuffer,
"<tr><td>Alert reminder</td><td>%u cycles</td></tr>",
r->reminder);
}
}
}
| 0
|
238,328
|
static struct digest_algo *digest_algo_get_by_algo(enum hash_algo algo)
{
struct digest_algo *d = NULL;
struct digest_algo *tmp;
int priority = -1;
list_for_each_entry(tmp, &digests, list) {
if (tmp->base.algo != algo)
continue;
if (tmp->base.priority <= priority)
continue;
d = tmp;
priority = tmp->base.priority;
}
return d;
}
| 0
|
436,069
|
static int io_register_iowq_aff(struct io_ring_ctx *ctx, void __user *arg,
unsigned len)
{
struct io_uring_task *tctx = current->io_uring;
cpumask_var_t new_mask;
int ret;
if (!tctx || !tctx->io_wq)
return -EINVAL;
if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
return -ENOMEM;
cpumask_clear(new_mask);
if (len > cpumask_size())
len = cpumask_size();
if (copy_from_user(new_mask, arg, len)) {
free_cpumask_var(new_mask);
return -EFAULT;
}
ret = io_wq_cpu_affinity(tctx->io_wq, new_mask);
free_cpumask_var(new_mask);
return ret;
| 0
|
206,676
|
update_topline(void)
{
long line_count;
int halfheight;
int n;
linenr_T old_topline;
#ifdef FEAT_DIFF
int old_topfill;
#endif
#ifdef FEAT_FOLDING
linenr_T lnum;
#endif
int check_topline = FALSE;
int check_botline = FALSE;
long *so_ptr = curwin->w_p_so >= 0 ? &curwin->w_p_so : &p_so;
int save_so = *so_ptr;
// If there is no valid screen and when the window height is zero just use
// the cursor line.
if (!screen_valid(TRUE) || curwin->w_height == 0)
{
check_cursor_lnum();
curwin->w_topline = curwin->w_cursor.lnum;
curwin->w_botline = curwin->w_topline;
curwin->w_valid |= VALID_BOTLINE|VALID_BOTLINE_AP;
curwin->w_scbind_pos = 1;
return;
}
check_cursor_moved(curwin);
if (curwin->w_valid & VALID_TOPLINE)
return;
// When dragging with the mouse, don't scroll that quickly
if (mouse_dragging > 0)
*so_ptr = mouse_dragging - 1;
old_topline = curwin->w_topline;
#ifdef FEAT_DIFF
old_topfill = curwin->w_topfill;
#endif
/*
* If the buffer is empty, always set topline to 1.
*/
if (BUFEMPTY()) // special case - file is empty
{
if (curwin->w_topline != 1)
redraw_later(NOT_VALID);
curwin->w_topline = 1;
curwin->w_botline = 2;
curwin->w_valid |= VALID_BOTLINE|VALID_BOTLINE_AP;
curwin->w_scbind_pos = 1;
}
/*
* If the cursor is above or near the top of the window, scroll the window
* to show the line the cursor is in, with 'scrolloff' context.
*/
else
{
if (curwin->w_topline > 1)
{
// If the cursor is above topline, scrolling is always needed.
// If the cursor is far below topline and there is no folding,
// scrolling down is never needed.
if (curwin->w_cursor.lnum < curwin->w_topline)
check_topline = TRUE;
else if (check_top_offset())
check_topline = TRUE;
}
#ifdef FEAT_DIFF
// Check if there are more filler lines than allowed.
if (!check_topline && curwin->w_topfill > diff_check_fill(curwin,
curwin->w_topline))
check_topline = TRUE;
#endif
if (check_topline)
{
halfheight = curwin->w_height / 2 - 1;
if (halfheight < 2)
halfheight = 2;
#ifdef FEAT_FOLDING
if (hasAnyFolding(curwin))
{
// Count the number of logical lines between the cursor and
// topline + scrolloff (approximation of how much will be
// scrolled).
n = 0;
for (lnum = curwin->w_cursor.lnum;
lnum < curwin->w_topline + *so_ptr; ++lnum)
{
++n;
// stop at end of file or when we know we are far off
if (lnum >= curbuf->b_ml.ml_line_count || n >= halfheight)
break;
(void)hasFolding(lnum, NULL, &lnum);
}
}
else
#endif
n = curwin->w_topline + *so_ptr - curwin->w_cursor.lnum;
// If we weren't very close to begin with, we scroll to put the
// cursor in the middle of the window. Otherwise put the cursor
// near the top of the window.
if (n >= halfheight)
scroll_cursor_halfway(FALSE);
else
{
scroll_cursor_top(scrolljump_value(), FALSE);
check_botline = TRUE;
}
}
else
{
#ifdef FEAT_FOLDING
// Make sure topline is the first line of a fold.
(void)hasFolding(curwin->w_topline, &curwin->w_topline, NULL);
#endif
check_botline = TRUE;
}
}
/*
* If the cursor is below the bottom of the window, scroll the window
* to put the cursor on the window.
* When w_botline is invalid, recompute it first, to avoid a redraw later.
* If w_botline was approximated, we might need a redraw later in a few
* cases, but we don't want to spend (a lot of) time recomputing w_botline
* for every small change.
*/
if (check_botline)
{
if (!(curwin->w_valid & VALID_BOTLINE_AP))
validate_botline();
if (curwin->w_botline <= curbuf->b_ml.ml_line_count)
{
if (curwin->w_cursor.lnum < curwin->w_botline)
{
if (((long)curwin->w_cursor.lnum
>= (long)curwin->w_botline - *so_ptr
#ifdef FEAT_FOLDING
|| hasAnyFolding(curwin)
#endif
))
{
lineoff_T loff;
// Cursor is (a few lines) above botline, check if there are
// 'scrolloff' window lines below the cursor. If not, need to
// scroll.
n = curwin->w_empty_rows;
loff.lnum = curwin->w_cursor.lnum;
#ifdef FEAT_FOLDING
// In a fold go to its last line.
(void)hasFolding(loff.lnum, NULL, &loff.lnum);
#endif
#ifdef FEAT_DIFF
loff.fill = 0;
n += curwin->w_filler_rows;
#endif
loff.height = 0;
while (loff.lnum < curwin->w_botline
#ifdef FEAT_DIFF
&& (loff.lnum + 1 < curwin->w_botline || loff.fill == 0)
#endif
)
{
n += loff.height;
if (n >= *so_ptr)
break;
botline_forw(&loff);
}
if (n >= *so_ptr)
// sufficient context, no need to scroll
check_botline = FALSE;
}
else
// sufficient context, no need to scroll
check_botline = FALSE;
}
if (check_botline)
{
#ifdef FEAT_FOLDING
if (hasAnyFolding(curwin))
{
// Count the number of logical lines between the cursor and
// botline - scrolloff (approximation of how much will be
// scrolled).
line_count = 0;
for (lnum = curwin->w_cursor.lnum;
lnum >= curwin->w_botline - *so_ptr; --lnum)
{
++line_count;
// stop at end of file or when we know we are far off
if (lnum <= 0 || line_count > curwin->w_height + 1)
break;
(void)hasFolding(lnum, &lnum, NULL);
}
}
else
#endif
line_count = curwin->w_cursor.lnum - curwin->w_botline
+ 1 + *so_ptr;
if (line_count <= curwin->w_height + 1)
scroll_cursor_bot(scrolljump_value(), FALSE);
else
scroll_cursor_halfway(FALSE);
}
}
}
curwin->w_valid |= VALID_TOPLINE;
/*
* Need to redraw when topline changed.
*/
if (curwin->w_topline != old_topline
#ifdef FEAT_DIFF
|| curwin->w_topfill != old_topfill
#endif
)
{
dollar_vcol = -1;
if (curwin->w_skipcol != 0)
{
curwin->w_skipcol = 0;
redraw_later(NOT_VALID);
}
else
redraw_later(VALID);
// May need to set w_skipcol when cursor in w_topline.
if (curwin->w_cursor.lnum == curwin->w_topline)
validate_cursor();
}
*so_ptr = save_so;
}
| 1
|
445,956
|
fr_window_unmap (GtkWidget *widget)
{
FrWindow *window = FR_WINDOW (widget);
GtkSortType order;
int column_id;
if (gtk_tree_sortable_get_sort_column_id (GTK_TREE_SORTABLE (window->priv->list_store),
&column_id,
&order))
{
g_settings_set_enum (window->priv->settings_listing, PREF_LISTING_SORT_METHOD, column_id);
g_settings_set_enum (window->priv->settings_listing, PREF_LISTING_SORT_TYPE, order);
}
GTK_WIDGET_CLASS (fr_window_parent_class)->unmap (widget);
}
| 0
|
498,155
|
const char *cgit_loginurl(void)
{
static const char *login_url;
if (!login_url)
login_url = fmtalloc("%s?p=login", cgit_rooturl());
return login_url;
}
| 0
|
265,459
|
static int sqfs_get_lregfile_info(struct squashfs_lreg_inode *lreg,
struct squashfs_file_info *finfo,
struct squashfs_fragment_block_entry *fentry,
__le32 blksz)
{
int datablk_count = 0, ret;
finfo->size = get_unaligned_le64(&lreg->file_size);
finfo->offset = get_unaligned_le32(&lreg->offset);
finfo->start = get_unaligned_le64(&lreg->start_block);
finfo->frag = SQFS_IS_FRAGMENTED(get_unaligned_le32(&lreg->fragment));
if (finfo->frag && finfo->offset == 0xFFFFFFFF)
return -EINVAL;
if (finfo->size < 1 || finfo->start == 0x7FFFFFFF)
return -EINVAL;
if (finfo->frag) {
datablk_count = finfo->size / le32_to_cpu(blksz);
ret = sqfs_frag_lookup(get_unaligned_le32(&lreg->fragment),
fentry);
if (ret < 0)
return -EINVAL;
finfo->comp = ret;
if (fentry->size < 1 || fentry->start == 0x7FFFFFFF)
return -EINVAL;
} else {
datablk_count = DIV_ROUND_UP(finfo->size, le32_to_cpu(blksz));
}
finfo->blk_sizes = malloc(datablk_count * sizeof(u32));
if (!finfo->blk_sizes)
return -ENOMEM;
return datablk_count;
}
| 0
|
229,177
|
static void virtio_serial_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass);
QLIST_INIT(&vserdevices.devices);
dc->props = virtio_serial_properties;
set_bit(DEVICE_CATEGORY_INPUT, dc->categories);
vdc->realize = virtio_serial_device_realize;
vdc->unrealize = virtio_serial_device_unrealize;
vdc->get_features = get_features;
vdc->get_config = get_config;
vdc->set_status = set_status;
vdc->reset = vser_reset;
vdc->save = virtio_serial_save_device;
vdc->load = virtio_serial_load_device;
hc->plug = virtser_port_device_plug;
hc->unplug = qdev_simple_device_unplug_cb;
}
| 0
|
256,169
|
ALWAYS_INLINE void MulAdd3Way128(const Packet a1, const Packet a2,
const Packet a3, const float** inp1,
const float** inp2, const float** inp3,
float** out) {
if (kNumOperands == 8) {
FourMulAdd3Way(a1, a2, a3, inp1, inp2, inp3, out);
FourMulAdd3Way(a1, a2, a3, inp1, inp2, inp3, out);
FourMulAdd3Way(a1, a2, a3, inp1, inp2, inp3, out);
FourMulAdd3Way(a1, a2, a3, inp1, inp2, inp3, out);
} else {
DCHECK_LE(4 * kNumOperands, 128);
for (int i = 0; i < 128 / (4 * kNumOperands); ++i) {
MulAdd3Way(a1, a2, a3, inp1, inp2, inp3, out);
MulAdd3Way(a1, a2, a3, inp1, inp2, inp3, out);
MulAdd3Way(a1, a2, a3, inp1, inp2, inp3, out);
MulAdd3Way(a1, a2, a3, inp1, inp2, inp3, out);
}
}
}
| 0
|
294,697
|
test_unit_v2v(VALUE i,
VALUE (* conv1)(VALUE),
VALUE (* conv2)(VALUE))
{
VALUE c, o;
c = (*conv1)(i);
o = (*conv2)(c);
return f_eqeq_p(o, i);
}
| 0
|
225,681
|
#ifndef GPAC_DISABLE_ISOM_WRITE
static u32 sgpd_size_entry(u32 grouping_type, void *entry)
{
switch (grouping_type) {
case GF_ISOM_SAMPLE_GROUP_ROLL:
case GF_ISOM_SAMPLE_GROUP_PROL:
return 2;
case GF_ISOM_SAMPLE_GROUP_TELE:
case GF_ISOM_SAMPLE_GROUP_RAP:
case GF_ISOM_SAMPLE_GROUP_SAP:
case GF_ISOM_SAMPLE_GROUP_SYNC:
return 1;
case GF_ISOM_SAMPLE_GROUP_TSCL:
return 20;
case GF_ISOM_SAMPLE_GROUP_LBLI:
return 2;
case GF_ISOM_SAMPLE_GROUP_TSAS:
case GF_ISOM_SAMPLE_GROUP_STSA:
return 0;
case GF_ISOM_SAMPLE_GROUP_SEIG:
{
GF_CENCSampleEncryptionGroupEntry *seig = (GF_CENCSampleEncryptionGroupEntry *)entry;
Bool use_mkey = seig->key_info[0] ? GF_TRUE : GF_FALSE;
if (use_mkey) {
return 3 + seig->key_info_size-1;
}
return seig->key_info_size; //== 3 + (seig->key_info_size-3);
}
case GF_ISOM_SAMPLE_GROUP_OINF:
return gf_isom_oinf_size_entry(entry);
case GF_ISOM_SAMPLE_GROUP_LINF:
return gf_isom_linf_size_entry(entry);
case GF_ISOM_SAMPLE_GROUP_SPOR:
{
GF_SubpictureOrderEntry *spor = (GF_SubpictureOrderEntry *)entry;
u32 s = 2 + 2*spor->num_subpic_ref_idx;
if (spor->subpic_id_info_flag) {
s += 3;
}
return s;
}
case GF_ISOM_SAMPLE_GROUP_SULM:
{
GF_SubpictureLayoutMapEntry *sulm = (GF_SubpictureLayoutMapEntry *) entry;
return 6 + 2*sulm->nb_entries;
}
default:
return ((GF_DefaultSampleGroupDescriptionEntry *)entry)->length;
}
| 0
|
359,456
|
bgp_write_rsclient_summary (struct vty *vty, struct peer *rsclient,
afi_t afi, safi_t safi)
{
char timebuf[BGP_UPTIME_LEN];
char rmbuf[14];
const char *rmname;
struct peer *peer;
struct listnode *node, *nnode;
int len;
int count = 0;
if (CHECK_FLAG (rsclient->sflags, PEER_STATUS_GROUP))
{
for (ALL_LIST_ELEMENTS (rsclient->group->peer, node, nnode, peer))
{
count++;
bgp_write_rsclient_summary (vty, peer, afi, safi);
}
return count;
}
len = vty_out (vty, "%s", rsclient->host);
len = 16 - len;
if (len < 1)
vty_out (vty, "%s%*s", VTY_NEWLINE, 16, " ");
else
vty_out (vty, "%*s", len, " ");
vty_out (vty, "4 ");
vty_out (vty, "%5d ", rsclient->as);
rmname = ROUTE_MAP_EXPORT_NAME(&rsclient->filter[afi][safi]);
if ( rmname && strlen (rmname) > 13 )
{
sprintf (rmbuf, "%13s", "...");
rmname = strncpy (rmbuf, rmname, 10);
}
else if (! rmname)
rmname = "<none>";
vty_out (vty, " %13s ", rmname);
rmname = ROUTE_MAP_IMPORT_NAME(&rsclient->filter[afi][safi]);
if ( rmname && strlen (rmname) > 13 )
{
sprintf (rmbuf, "%13s", "...");
rmname = strncpy (rmbuf, rmname, 10);
}
else if (! rmname)
rmname = "<none>";
vty_out (vty, " %13s ", rmname);
vty_out (vty, "%8s", peer_uptime (rsclient->uptime, timebuf, BGP_UPTIME_LEN));
if (CHECK_FLAG (rsclient->flags, PEER_FLAG_SHUTDOWN))
vty_out (vty, " Idle (Admin)");
else if (CHECK_FLAG (rsclient->sflags, PEER_STATUS_PREFIX_OVERFLOW))
vty_out (vty, " Idle (PfxCt)");
else
vty_out (vty, " %-11s", LOOKUP(bgp_status_msg, rsclient->status));
vty_out (vty, "%s", VTY_NEWLINE);
return 1;
}
| 0
|
252,315
|
static bool hufBuildDecTable(const long long *hcode, // i : encoding table
int im, // i : min index in hcode
int iM, // i : max index in hcode
HufDec *hdecod) // o: (allocated by caller)
// decoding table [HUF_DECSIZE]
{
//
// Init hashtable & loop on all codes.
// Assumes that hufClearDecTable(hdecod) has already been called.
//
for (; im <= iM; im++) {
long long c = hufCode(hcode[im]);
int l = hufLength(hcode[im]);
if (c >> l) {
//
// Error: c is supposed to be an l-bit code,
// but c contains a value that is greater
// than the largest l-bit number.
//
// invalidTableEntry();
return false;
}
if (l > HUF_DECBITS) {
//
// Long code: add a secondary entry
//
HufDec *pl = hdecod + (c >> (l - HUF_DECBITS));
if (pl->len) {
//
// Error: a short code has already
// been stored in table entry *pl.
//
// invalidTableEntry();
return false;
}
pl->lit++;
if (pl->p) {
int *p = pl->p;
pl->p = new int[pl->lit];
for (int i = 0; i < pl->lit - 1; ++i) pl->p[i] = p[i];
delete[] p;
} else {
pl->p = new int[1];
}
pl->p[pl->lit - 1] = im;
} else if (l) {
//
// Short code: init all primary entries
//
HufDec *pl = hdecod + (c << (HUF_DECBITS - l));
for (long long i = 1ULL << (HUF_DECBITS - l); i > 0; i--, pl++) {
if (pl->len || pl->p) {
//
// Error: a short code or a long code has
// already been stored in table entry *pl.
//
// invalidTableEntry();
return false;
}
pl->len = l;
pl->lit = im;
}
}
}
return true;
}
| 0
|
336,659
|
static void reds_late_initialization(RedsState *reds)
{
// do only once
if (reds->late_initialization_done) {
return;
}
// create stream channels for streaming devices
for (auto dev: reds->char_devices) {
auto stream_dev = dynamic_cast<StreamDevice*>(dev.get());
if (stream_dev) {
stream_dev->create_channel();
}
}
reds->late_initialization_done = true;
}
| 0
|
220,100
|
nfs4_file_open(struct inode *inode, struct file *filp)
{
struct nfs_open_context *ctx;
struct dentry *dentry = file_dentry(filp);
struct dentry *parent = NULL;
struct inode *dir;
unsigned openflags = filp->f_flags;
struct iattr attr;
int err;
/*
* If no cached dentry exists or if it's negative, NFSv4 handled the
* opens in ->lookup() or ->create().
*
* We only get this far for a cached positive dentry. We skipped
* revalidation, so handle it here by dropping the dentry and returning
* -EOPENSTALE. The VFS will retry the lookup/create/open.
*/
dprintk("NFS: open file(%pd2)\n", dentry);
err = nfs_check_flags(openflags);
if (err)
return err;
if ((openflags & O_ACCMODE) == 3)
openflags--;
/* We can't create new files here */
openflags &= ~(O_CREAT|O_EXCL);
parent = dget_parent(dentry);
dir = d_inode(parent);
ctx = alloc_nfs_open_context(file_dentry(filp), filp->f_mode, filp);
err = PTR_ERR(ctx);
if (IS_ERR(ctx))
goto out;
attr.ia_valid = ATTR_OPEN;
if (openflags & O_TRUNC) {
attr.ia_valid |= ATTR_SIZE;
attr.ia_size = 0;
filemap_write_and_wait(inode->i_mapping);
}
inode = NFS_PROTO(dir)->open_context(dir, ctx, openflags, &attr, NULL);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
switch (err) {
default:
goto out_put_ctx;
case -ENOENT:
case -ESTALE:
case -EISDIR:
case -ENOTDIR:
case -ELOOP:
goto out_drop;
}
}
if (inode != d_inode(dentry))
goto out_drop;
nfs_file_set_open_context(filp, ctx);
nfs_fscache_open_file(inode, filp);
err = 0;
out_put_ctx:
put_nfs_open_context(ctx);
out:
dput(parent);
return err;
out_drop:
d_drop(dentry);
err = -EOPENSTALE;
goto out_put_ctx;
}
| 0
|
244,145
|
GF_Box *jp2h_box_new()
{
ISOM_DECL_BOX_ALLOC(GF_J2KHeaderBox, GF_ISOM_BOX_TYPE_JP2H);
return (GF_Box *)tmp;
}
| 0
|
366,267
|
static void __touch_mnt_namespace(struct mnt_namespace *ns)
{
if (ns && ns->event != event) {
ns->event = event;
wake_up_interruptible(&ns->poll);
}
}
| 0
|
226,042
|
void dref_box_del(GF_Box *s)
{
GF_DataReferenceBox *ptr = (GF_DataReferenceBox *) s;
if (ptr == NULL) return;
gf_free(ptr);
}
| 0
|
274,870
|
TEST(ComparisonsTest, GreaterBroadcastTwoD) {
ComparisonOpModel model({1, 1, 2, 4}, {1, 1, 1, 4}, TensorType_INT32,
BuiltinOperator_GREATER);
model.PopulateTensor<int>(model.input1(), {-1, 9, 7, 3, 2, 4, 2, 8});
model.PopulateTensor<int>(model.input2(), {7, 1, 2, 4});
model.Invoke();
EXPECT_THAT(model.GetOutput(),
ElementsAre(false, true, true, false, false, true, false, true));
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1, 2, 4));
}
| 0
|
459,207
|
tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp)
{
struct tcf_proto *tp_next = __tcf_get_next_proto(chain, tp);
if (tp)
tcf_proto_put(tp, true, NULL);
return tp_next;
}
| 0
|
313,745
|
nv_home(cmdarg_T *cap)
{
// CTRL-HOME is like "gg"
if (mod_mask & MOD_MASK_CTRL)
nv_goto(cap);
else
{
cap->count0 = 1;
nv_pipe(cap);
}
ins_at_eol = FALSE; // Don't move cursor past eol (only necessary in a
// one-character line).
}
| 0
|
488,376
|
static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
struct vm_area_struct *vma, pgd_t *pgd,
unsigned long addr, unsigned long end,
long *zap_work, struct zap_details *details)
{
pud_t *pud;
unsigned long next;
pud = pud_offset(pgd, addr);
do {
next = pud_addr_end(addr, end);
if (pud_none_or_clear_bad(pud)) {
(*zap_work)--;
continue;
}
next = zap_pmd_range(tlb, vma, pud, addr, next,
zap_work, details);
} while (pud++, addr = next, (addr != end && *zap_work > 0));
return addr;
}
| 0
|
231,526
|
update_map (char * const mapping, const char * const map_file)
{
const size_t map_len = strlen (mapping);
const int fd = xopen (map_file, O_WRONLY, 0);
xwrite (fd, mapping, map_len);
xclose (fd);
}
| 0
|
261,963
|
njs_decode_utf8(njs_str_t *dst, const njs_str_t *src)
{
njs_unicode_decode_t ctx;
njs_utf8_decode_init(&ctx);
(void) njs_utf8_stream_encode(&ctx, src->start, src->start + src->length,
dst->start, 1, 0);
}
| 0
|
226,436
|
const DataTypeVector& output_dtypes() const override { return dtypes_; }
| 0
|
242,667
|
dissect_header_lens_v1(tvbuff_t *tvb, int offset, proto_tree *tree, int encoding, int * const *hf_indexes)
{
int param_count;
proto_item *ti;
proto_tree *len_tree;
for (param_count = 0; hf_indexes[param_count]; param_count++);
ti = proto_tree_add_item(tree, hf_se_param_lens, tvb, offset, param_count * SYSDIG_PARAM_SIZE, ENC_NA);
len_tree = proto_item_add_subtree(ti, ett_sysdig_parm_lens);
for (param_count = 0; hf_indexes[param_count]; param_count++) {
proto_tree_add_item(len_tree, hf_se_param_len, tvb, offset + (param_count * SYSDIG_PARAM_SIZE), SYSDIG_PARAM_SIZE, encoding);
}
proto_item_set_len(ti, param_count * SYSDIG_PARAM_SIZE);
return param_count * SYSDIG_PARAM_SIZE;
}
| 0
|
386,578
|
void DL_Dxf::writeVertex(DL_WriterA& dw,
const DL_VertexData& data) {
if (version==DL_VERSION_2000) {
dw.dxfReal(10, data.x);
dw.dxfReal(20, data.y);
if (fabs(data.bulge)>1.0e-10) {
dw.dxfReal(42, data.bulge);
}
} else {
dw.entity("VERTEX");
//dw.entityAttributes(attrib);
dw.dxfString(8, polylineLayer);
dw.coord(DL_VERTEX_COORD_CODE, data.x, data.y, data.z);
if (fabs(data.bulge)>1.0e-10) {
dw.dxfReal(42, data.bulge);
}
}
}
| 0
|
512,843
|
void Item_equal::merge_into_list(THD *thd, List<Item_equal> *list,
bool save_merged,
bool only_intersected)
{
Item_equal *item;
List_iterator<Item_equal> it(*list);
Item_equal *merge_into= NULL;
while((item= it++))
{
if (!merge_into)
{
if (item->merge_with_check(thd, this, save_merged))
merge_into= item;
}
else
{
if (merge_into->merge_with_check(thd, item, false))
it.remove();
}
}
if (!only_intersected && !merge_into)
list->push_back(this, thd->mem_root);
}
| 0
|
462,275
|
PJ_DEF(pj_status_t) pj_stun_msg_add_uint64_attr(pj_pool_t *pool,
pj_stun_msg *msg,
int attr_type,
const pj_timestamp *value)
{
pj_stun_uint64_attr *attr = NULL;
pj_status_t status;
status = pj_stun_uint64_attr_create(pool, attr_type, value, &attr);
if (status != PJ_SUCCESS)
return status;
return pj_stun_msg_add_attr(msg, &attr->hdr);
}
| 0
|
326,121
|
regmbc(int c)
{
if (!has_mbyte && c > 0xff)
return;
if (regcode == JUST_CALC_SIZE)
regsize += (*mb_char2len)(c);
else
regcode += (*mb_char2bytes)(c, regcode);
}
| 0
|
413,837
|
static void print_nest_host_error_on(stringStream* ss, Klass* ref_klass, Klass* sel_klass) {
assert(ref_klass->is_instance_klass(), "must be");
assert(sel_klass->is_instance_klass(), "must be");
InstanceKlass* ref_ik = InstanceKlass::cast(ref_klass);
InstanceKlass* sel_ik = InstanceKlass::cast(sel_klass);
const char* nest_host_error_1 = ref_ik->nest_host_error();
const char* nest_host_error_2 = sel_ik->nest_host_error();
if (nest_host_error_1 != NULL || nest_host_error_2 != NULL) {
ss->print(", (%s%s%s)",
(nest_host_error_1 != NULL) ? nest_host_error_1 : "",
(nest_host_error_1 != NULL && nest_host_error_2 != NULL) ? ", " : "",
(nest_host_error_2 != NULL) ? nest_host_error_2 : "");
}
}
| 0
|
222,915
|
Status UpdateOutputShapesUsingAnnotatedInformation(const NodeDef& node,
NodeContext* c) const {
const auto& attr = node.attr();
if (attr.count(kOutputSame) == 0 || !attr.at(kOutputSame).b() ||
attr.count(kOutputShapes) == 0)
return Status::OK();
InferenceContext* ic = c->inference_context.get();
int output_size = attr.at(kOutputShapes).list().shape_size();
for (int i = 0; i < ic->num_outputs(); i++) {
// Annotated Switch node has only one output. Propagate the shape to all
// the outputs.
int shape_index = IsSwitch(node) ? 0 : i;
if (shape_index >= output_size) {
LOG(WARNING)
<< "UpdateOutputShapesUsingAnnotatedInformation() -- node: "
<< node.name() << ", inferred output shape size "
<< ic->num_outputs() << ", annotated output shape size "
<< output_size;
break;
}
const TensorShapeProto& shape =
attr.at(kOutputShapes).list().shape(shape_index);
if (shape.dim().empty()) continue;
ShapeHandle output_shape;
TF_RETURN_IF_ERROR(ic->MakeShapeFromShapeProto(shape, &output_shape));
// Check if annotated shapes are incompatible with inferred shapes.
if ((ic->FullyDefined(ic->output(i)) &&
!SameShapes(ic->output(i), output_shape)) ||
(!ic->FullyDefined(ic->output(i)) &&
!CompatibleShapes(ic->output(i), output_shape))) {
LOG(WARNING)
<< "UpdateOutputShapesUsingAnnotatedInformation() -- node: "
<< node.name() << ", inferred output shape "
<< "doesn't match for i=" << i << ": "
<< "ic->output(k): " << ic->DebugString(ic->output(i))
<< ", annotated output shape: " << ic->DebugString(output_shape)
<< " -- " << node.DebugString();
c->shape_incompatible = true;
}
// Only use annotated shapes if the inference shape is unknown and
// compatible with annotated shapes.
if (!ic->FullyDefined(ic->output(i)) &&
CompatibleShapes(ic->output(i), output_shape)) {
VLOG(3) << "UpdateOutputShapesUsingAnnotatedInformation() -- node: "
<< node.name() << ", inferred output shape " << i << ": "
<< "ic->output(i): " << ic->DebugString(ic->output(i))
<< ", annotated output shape: " << ic->DebugString(output_shape)
<< " -- " << node.ShortDebugString();
ic->set_output(i, output_shape);
}
}
return Status::OK();
}
| 0
|
413,703
|
static void print_hint_h_format(HintNode *node) {
switch (node->type) {
case HINT_NODE_ADDR: {
const RAnalAddrHintRecord *record;
r_vector_foreach (node->addr_hints, record) {
switch (record->type) {
case R_ANAL_ADDR_HINT_TYPE_IMMBASE:
r_cons_printf (" immbase=%d", record->immbase);
break;
case R_ANAL_ADDR_HINT_TYPE_JUMP:
r_cons_printf (" jump=0x%08"PFMT64x, record->jump);
break;
case R_ANAL_ADDR_HINT_TYPE_FAIL:
r_cons_printf (" fail=0x%08"PFMT64x, record->fail);
break;
case R_ANAL_ADDR_HINT_TYPE_STACKFRAME:
r_cons_printf (" stackframe=0x%"PFMT64x, record->stackframe);
break;
case R_ANAL_ADDR_HINT_TYPE_PTR:
r_cons_printf (" ptr=0x%"PFMT64x, record->ptr);
break;
case R_ANAL_ADDR_HINT_TYPE_NWORD:
r_cons_printf (" nword=%d", record->nword);
break;
case R_ANAL_ADDR_HINT_TYPE_RET:
r_cons_printf (" ret=0x%08"PFMT64x, record->retval);
break;
case R_ANAL_ADDR_HINT_TYPE_NEW_BITS:
r_cons_printf (" newbits=%d", record->newbits);
break;
case R_ANAL_ADDR_HINT_TYPE_SIZE:
r_cons_printf (" size=%"PFMT64u, record->size);
break;
case R_ANAL_ADDR_HINT_TYPE_SYNTAX:
r_cons_printf (" syntax='%s'", record->syntax);
break;
case R_ANAL_ADDR_HINT_TYPE_OPTYPE: {
const char *type = r_anal_optype_to_string (record->optype);
if (type) {
r_cons_printf (" type='%s'", type);
}
break;
}
case R_ANAL_ADDR_HINT_TYPE_OPCODE:
r_cons_printf (" opcode='%s'", record->opcode);
break;
case R_ANAL_ADDR_HINT_TYPE_TYPE_OFFSET:
r_cons_printf (" offset='%s'", record->type_offset);
break;
case R_ANAL_ADDR_HINT_TYPE_ESIL:
r_cons_printf (" esil='%s'", record->esil);
break;
case R_ANAL_ADDR_HINT_TYPE_HIGH:
r_cons_printf (" high=true");
break;
case R_ANAL_ADDR_HINT_TYPE_VAL:
r_cons_printf (" val=0x%08"PFMT64x, record->val);
break;
}
}
break;
}
case HINT_NODE_ARCH:
if (node->arch) {
r_cons_printf (" arch='%s'", node->arch);
} else {
r_cons_print (" arch=RESET");
}
break;
case HINT_NODE_BITS:
if (node->bits) {
r_cons_printf (" bits=%d", node->bits);
} else {
r_cons_print (" bits=RESET");
}
break;
}
}
| 0
|
387,840
|
oop InstanceKlass::init_lock() const {
// return the init lock from the mirror
oop lock = java_lang_Class::init_lock(java_mirror());
// Prevent reordering with any access of initialization state
OrderAccess::loadload();
assert((oop)lock != NULL || !is_not_initialized(), // initialized or in_error state
"only fully initialized state can have a null lock");
return lock;
}
| 0
|
346,470
|
ex_scriptencoding(exarg_T *eap)
{
source_cookie_T *sp;
char_u *name;
if (!sourcing_a_script(eap))
{
emsg(_(e_scriptencoding_used_outside_of_sourced_file));
return;
}
if (*eap->arg != NUL)
{
name = enc_canonize(eap->arg);
if (name == NULL) // out of memory
return;
}
else
name = eap->arg;
// Setup for conversion from the specified encoding to 'encoding'.
sp = (source_cookie_T *)getline_cookie(eap->getline, eap->cookie);
convert_setup(&sp->conv, name, p_enc);
if (name != eap->arg)
vim_free(name);
}
| 0
|
225,487
|
Status CheckFaninIsRegular(const TensorId& fanin, ErrorHandler handler) {
if (!IsTensorIdRegular(fanin)) {
return handler(absl::Substitute("fanin '$0' must be a regular tensor id",
fanin.ToString()));
}
return Status::OK();
}
| 0
|
417,064
|
void PlayerGeneric::lastPattern()
{
if (player)
player->lastPattern();
}
| 0
|
513,094
|
Item_func_nullif::int_op()
{
DBUG_ASSERT(fixed == 1);
longlong value;
if (!compare())
{
null_value=1;
return 0;
}
value= args[2]->val_int();
null_value= args[2]->null_value;
return value;
}
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.