idx
int64 | func
string | target
int64 |
|---|---|---|
259,175
|
static int mov_read_pitm(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{
avio_rb32(pb); // version & flags.
c->primary_item_id = avio_rb16(pb);
return atom.size;
}
| 0
|
483,494
|
static int __init __find_uefi_params(unsigned long node,
struct param_info *info,
struct params *params)
{
const void *prop;
void *dest;
u64 val;
int i, len;
for (i = 0; i < EFI_FDT_PARAMS_SIZE; i++) {
prop = of_get_flat_dt_prop(node, params[i].propname, &len);
if (!prop) {
info->missing = params[i].name;
return 0;
}
dest = info->params + params[i].offset;
info->found++;
val = of_read_number(prop, len / sizeof(u32));
if (params[i].size == sizeof(u32))
*(u32 *)dest = val;
else
*(u64 *)dest = val;
if (efi_enabled(EFI_DBG))
pr_info(" %s: 0x%0*llx\n", params[i].name,
params[i].size * 2, val);
}
return 1;
}
| 0
|
512,334
|
void Item_func_in::mark_as_condition_AND_part(TABLE_LIST *embedding)
{
THD *thd= current_thd;
Query_arena *arena, backup;
arena= thd->activate_stmt_arena_if_needed(&backup);
if (!transform_into_subq_checked)
{
if ((transform_into_subq= to_be_transformed_into_in_subq(thd)))
thd->lex->current_select->in_funcs.push_back(this, thd->mem_root);
transform_into_subq_checked= true;
}
if (arena)
thd->restore_active_arena(arena, &backup);
emb_on_expr_nest= embedding;
}
| 0
|
366,195
|
static int do_move_mount(struct path *old_path, struct path *new_path)
{
struct mnt_namespace *ns;
struct mount *p;
struct mount *old;
struct mount *parent;
struct mountpoint *mp, *old_mp;
int err;
bool attached;
mp = lock_mount(new_path);
if (IS_ERR(mp))
return PTR_ERR(mp);
old = real_mount(old_path->mnt);
p = real_mount(new_path->mnt);
parent = old->mnt_parent;
attached = mnt_has_parent(old);
old_mp = old->mnt_mp;
ns = old->mnt_ns;
err = -EINVAL;
/* The mountpoint must be in our namespace. */
if (!check_mnt(p))
goto out;
/* The thing moved must be mounted... */
if (!is_mounted(&old->mnt))
goto out;
/* ... and either ours or the root of anon namespace */
if (!(attached ? check_mnt(old) : is_anon_ns(ns)))
goto out;
if (old->mnt.mnt_flags & MNT_LOCKED)
goto out;
if (old_path->dentry != old_path->mnt->mnt_root)
goto out;
if (d_is_dir(new_path->dentry) !=
d_is_dir(old_path->dentry))
goto out;
/*
* Don't move a mount residing in a shared parent.
*/
if (attached && IS_MNT_SHARED(parent))
goto out;
/*
* Don't move a mount tree containing unbindable mounts to a destination
* mount which is shared.
*/
if (IS_MNT_SHARED(p) && tree_contains_unbindable(old))
goto out;
err = -ELOOP;
if (!check_for_nsfs_mounts(old))
goto out;
for (; mnt_has_parent(p); p = p->mnt_parent)
if (p == old)
goto out;
err = attach_recursive_mnt(old, real_mount(new_path->mnt), mp,
attached);
if (err)
goto out;
/* if the mount is moved, it should no longer be expire
* automatically */
list_del_init(&old->mnt_expire);
if (attached)
put_mountpoint(old_mp);
out:
unlock_mount(mp);
if (!err) {
if (attached)
mntput_no_expire(parent);
else
free_mnt_ns(ns);
}
return err;
}
| 0
|
238,414
|
static void __mark_reg_unknown(const struct bpf_verifier_env *env,
struct bpf_reg_state *reg)
{
/*
* Clear type, id, off, and union(map_ptr, range) and
* padding between 'type' and union
*/
memset(reg, 0, offsetof(struct bpf_reg_state, var_off));
reg->type = SCALAR_VALUE;
reg->var_off = tnum_unknown;
reg->frameno = 0;
reg->precise = env->subprog_cnt > 1 || !env->bpf_capable;
__mark_reg_unbounded(reg);
}
| 0
|
355,648
|
eval_to_string_safe(
char_u *arg,
int use_sandbox,
int keep_script_version)
{
char_u *retval;
funccal_entry_T funccal_entry;
int save_sc_version = current_sctx.sc_version;
int save_garbage = may_garbage_collect;
if (!keep_script_version)
current_sctx.sc_version = 1;
save_funccal(&funccal_entry);
if (use_sandbox)
++sandbox;
++textwinlock;
may_garbage_collect = FALSE;
retval = eval_to_string(arg, FALSE);
if (use_sandbox)
--sandbox;
--textwinlock;
may_garbage_collect = save_garbage;
restore_funccal();
current_sctx.sc_version = save_sc_version;
return retval;
}
| 0
|
413,636
|
static RList *anal_graph_to(RCore *core, ut64 addr, int depth, HtUP *avoid) {
RAnalFunction *cur_fcn = r_anal_get_fcn_in (core->anal, core->offset, 0);
RList *list = r_list_new ();
HtUP *state = ht_up_new0 ();
if (!list || !state || !cur_fcn) {
r_list_free (list);
ht_up_free (state);
return NULL;
}
// forward search
if (anal_path_exists (core, core->offset, addr, list, depth - 1, state, avoid)) {
ht_up_free (state);
return list;
}
// backward search
RList *xrefs = r_anal_xrefs_get (core->anal, cur_fcn->addr);
if (xrefs) {
RListIter *iter;
RAnalRef *xref = NULL;
r_list_foreach (xrefs, iter, xref) {
if (xref->type == R_ANAL_REF_TYPE_CALL) {
ut64 offset = core->offset;
core->offset = xref->addr;
r_list_free (list);
list = anal_graph_to (core, addr, depth - 1, avoid);
core->offset = offset;
if (list && r_list_length (list)) {
r_list_free (xrefs);
ht_up_free (state);
return list;
}
}
}
}
r_list_free (xrefs);
ht_up_free (state);
r_list_free (list);
return NULL;
}
| 0
|
344,239
|
lua_Number luaV_modf (lua_State *L, lua_Number m, lua_Number n) {
lua_Number r;
luai_nummod(L, m, n, r);
return r;
}
| 0
|
221,646
|
bool hermes::evalIsFalse(IRBuilder &builder, Literal *operand) {
if (auto *lit = evalToBoolean(builder, operand))
return !lit->getValue();
return false;
}
| 0
|
448,911
|
unsigned long ZEXPORT inflateCodesUsed(strm)
z_streamp strm;
{
struct inflate_state FAR *state;
if (inflateStateCheck(strm)) return (unsigned long)-1;
state = (struct inflate_state FAR *)strm->state;
return (unsigned long)(state->next - state->codes);
}
| 0
|
231,669
|
void updateWritableByteLimitOnRecvPacket(QuicServerConnectionState& conn) {
// When we receive a packet we increase the limit again. The reasoning this is
// that a peer can do the same by opening a new connection.
if (conn.writableBytesLimit) {
conn.writableBytesLimit = *conn.writableBytesLimit +
conn.transportSettings.limitedCwndInMss * conn.udpSendPacketLen;
}
}
| 0
|
221,394
|
static bool nested_vmcb_check_cr3_cr4(struct kvm_vcpu *vcpu,
struct vmcb_save_area *save)
{
/*
* These checks are also performed by KVM_SET_SREGS,
* except that EFER.LMA is not checked by SVM against
* CR0.PG && EFER.LME.
*/
if ((save->efer & EFER_LME) && (save->cr0 & X86_CR0_PG)) {
if (CC(!(save->cr4 & X86_CR4_PAE)) ||
CC(!(save->cr0 & X86_CR0_PE)) ||
CC(kvm_vcpu_is_illegal_gpa(vcpu, save->cr3)))
return false;
}
if (CC(!kvm_is_valid_cr4(vcpu, save->cr4)))
return false;
return true;
}
| 0
|
473,851
|
st_init_table_with_size(const struct st_hash_type *type, st_index_t size)
{
st_table *tbl;
#ifdef HASH_LOG
# if HASH_LOG+0 < 0
{
const char *e = getenv("ST_HASH_LOG");
if (!e || !*e) init_st = 1;
}
# endif
if (init_st == 0) {
init_st = 1;
atexit(stat_col);
}
#endif
size = new_size(size); /* round up to prime number */
tbl = alloc(st_table);
tbl->type = type;
tbl->num_entries = 0;
tbl->entries_packed = type == &type_numhash && size/2 <= MAX_PACKED_NUMHASH;
tbl->num_bins = size;
tbl->bins = (st_table_entry **)Calloc(size, sizeof(st_table_entry*));
tbl->head = 0;
tbl->tail = 0;
return tbl;
}
| 0
|
229,293
|
virtual void visit(const messages::result_message::schema_change& m) override {
auto change = m.get_change();
switch (change->type) {
case event::event_type::SCHEMA_CHANGE: {
auto sc = static_pointer_cast<event::schema_change>(change);
_response.write_int(0x0005);
_response.serialize(*sc, _version);
break;
}
default:
assert(0);
}
}
| 0
|
445,888
|
open_files_extract_ready_cb (GObject *source_object,
GAsyncResult *result,
gpointer user_data)
{
OpenFilesData *odata = user_data;
GError *error = NULL;
open_files_data_ref (odata);
fr_archive_operation_finish (FR_ARCHIVE (source_object), result, &error);
_archive_operation_completed (odata->window, FR_ACTION_EXTRACTING_FILES, error);
if (error == NULL)
fr_window_open_extracted_files (odata);
open_files_data_unref (odata);
_g_error_free (error);
}
| 0
|
257,698
|
static int wstunnel_check_request(request_st * const r, handler_ctx * const hctx) {
const buffer * const vers =
http_header_request_get(r, HTTP_HEADER_OTHER, CONST_STR_LEN("Sec-WebSocket-Version"));
const long hybivers = (NULL != vers)
? light_isdigit(*vers->ptr) ? strtol(vers->ptr, NULL, 10) : -1
: 0;
if (hybivers < 0 || hybivers > INT_MAX) {
DEBUG_LOG_ERR("%s", "invalid Sec-WebSocket-Version");
r->http_status = 400; /* Bad Request */
return -1;
}
/*(redundant since HTTP/1.1 required in mod_wstunnel_check_extension())*/
if (!r->http_host || buffer_is_blank(r->http_host)) {
DEBUG_LOG_ERR("%s", "Host header does not exist");
r->http_status = 400; /* Bad Request */
return -1;
}
if (!wstunnel_is_allowed_origin(r, hctx)) {
return -1;
}
return (int)hybivers;
}
| 0
|
473,961
|
code_to_mbclen(OnigCodePoint code, OnigEncoding enc ARG_UNUSED)
{
if ((code & 0xffffff80) == 0) return 1;
else if ((code & 0xfffff800) == 0) return 2;
else if ((code & 0xffff0000) == 0) return 3;
else if ((code & 0xffe00000) == 0) return 4;
else if ((code & 0xfc000000) == 0) return 5;
else if ((code & 0x80000000) == 0) return 6;
#ifdef USE_INVALID_CODE_SCHEME
else if (code == INVALID_CODE_FE) return 1;
else if (code == INVALID_CODE_FF) return 1;
#endif
else
return ONIGERR_TOO_BIG_WIDE_CHAR_VALUE;
}
| 0
|
411,940
|
is_anon_tgs_request_p(const KDC_REQ_BODY *b,
const EncTicketPart *tgt)
{
KDCOptions f = b->kdc_options;
/*
* Versions of Heimdal from 1.0 to 7.6, inclusive, send both the
* request-anonymous and cname-in-addl-tkt flags for constrained
* delegation requests. A true anonymous TGS request will only
* have the request-anonymous flag set. (A corollary of this is
* that it is not possible to support anonymous constrained
* delegation requests, although they would be of limited utility.)
*/
return tgt->flags.anonymous ||
(f.request_anonymous && !f.cname_in_addl_tkt && !b->additional_tickets);
}
| 0
|
401,532
|
void add_input_randomness(unsigned int type, unsigned int code,
unsigned int value)
{
static unsigned char last_value;
/* ignore autorepeat and the like */
if (value == last_value)
return;
last_value = value;
add_timer_randomness(&input_timer_state,
(type << 4) ^ code ^ (code >> 4) ^ value);
trace_add_input_randomness(ENTROPY_BITS(&input_pool));
}
| 0
|
329,927
|
_cairo_image_compositor_reset_static_data (void)
{
CAIRO_MUTEX_LOCK (_cairo_glyph_cache_mutex);
if (global_glyph_cache)
pixman_glyph_cache_destroy (global_glyph_cache);
global_glyph_cache = NULL;
CAIRO_MUTEX_UNLOCK (_cairo_glyph_cache_mutex);
}
| 0
|
513,165
|
static void free_plugin_mem(struct st_plugin_dl *p)
{
#ifdef HAVE_DLOPEN
if (p->ptr_backup)
{
DBUG_ASSERT(p->nbackups);
DBUG_ASSERT(p->handle);
restore_ptr_backup(p->nbackups, p->ptr_backup);
my_free(p->ptr_backup);
}
if (p->handle)
dlclose(p->handle);
#endif
my_free(p->dl.str);
if (p->allocated)
my_free(p->plugins);
}
| 0
|
310,291
|
dirserv_add_descriptor(routerinfo_t *ri, const char **msg, const char *source)
{
was_router_added_t r;
routerinfo_t *ri_old;
char *desc, *nickname;
size_t desclen = 0;
*msg = NULL;
/* If it's too big, refuse it now. Otherwise we'll cache it all over the
* network and it'll clog everything up. */
if (ri->cache_info.signed_descriptor_len > MAX_DESCRIPTOR_UPLOAD_SIZE) {
log_notice(LD_DIR, "Somebody attempted to publish a router descriptor '%s'"
" (source: %s) with size %d. Either this is an attack, or the "
"MAX_DESCRIPTOR_UPLOAD_SIZE (%d) constant is too low.",
ri->nickname, source, (int)ri->cache_info.signed_descriptor_len,
MAX_DESCRIPTOR_UPLOAD_SIZE);
*msg = "Router descriptor was too large";
control_event_or_authdir_new_descriptor("REJECTED",
ri->cache_info.signed_descriptor_body,
ri->cache_info.signed_descriptor_len, *msg);
routerinfo_free(ri);
return ROUTER_AUTHDIR_REJECTS;
}
/* Check whether this descriptor is semantically identical to the last one
* from this server. (We do this here and not in router_add_to_routerlist
* because we want to be able to accept the newest router descriptor that
* another authority has, so we all converge on the same one.) */
ri_old = router_get_by_digest(ri->cache_info.identity_digest);
if (ri_old && ri_old->cache_info.published_on < ri->cache_info.published_on
&& router_differences_are_cosmetic(ri_old, ri)
&& !router_is_me(ri)) {
log_info(LD_DIRSERV,
"Not replacing descriptor from %s (source: %s); "
"differences are cosmetic.",
router_describe(ri), source);
*msg = "Not replacing router descriptor; no information has changed since "
"the last one with this identity.";
control_event_or_authdir_new_descriptor("DROPPED",
ri->cache_info.signed_descriptor_body,
ri->cache_info.signed_descriptor_len, *msg);
routerinfo_free(ri);
return ROUTER_WAS_NOT_NEW;
}
/* Make a copy of desc, since router_add_to_routerlist might free
* ri and its associated signed_descriptor_t. */
desclen = ri->cache_info.signed_descriptor_len;
desc = tor_strndup(ri->cache_info.signed_descriptor_body, desclen);
nickname = tor_strdup(ri->nickname);
/* Tell if we're about to need to launch a test if we add this. */
ri->needs_retest_if_added =
dirserv_should_launch_reachability_test(ri, ri_old);
r = router_add_to_routerlist(ri, msg, 0, 0);
if (!WRA_WAS_ADDED(r)) {
/* unless the routerinfo was fine, just out-of-date */
if (WRA_WAS_REJECTED(r))
control_event_or_authdir_new_descriptor("REJECTED", desc, desclen, *msg);
log_info(LD_DIRSERV,
"Did not add descriptor from '%s' (source: %s): %s.",
nickname, source, *msg ? *msg : "(no message)");
} else {
smartlist_t *changed;
control_event_or_authdir_new_descriptor("ACCEPTED", desc, desclen, *msg);
changed = smartlist_create();
smartlist_add(changed, ri);
routerlist_descriptors_added(changed, 0);
smartlist_free(changed);
if (!*msg) {
*msg = ri->is_valid ? "Descriptor for valid server accepted" :
"Descriptor for invalid server accepted";
}
log_info(LD_DIRSERV,
"Added descriptor from '%s' (source: %s): %s.",
nickname, source, *msg);
}
tor_free(desc);
tor_free(nickname);
return r;
}
| 0
|
238,466
|
static int kfunc_desc_cmp_by_id_off(const void *a, const void *b)
{
const struct bpf_kfunc_desc *d0 = a;
const struct bpf_kfunc_desc *d1 = b;
/* func_id is not greater than BTF_MAX_TYPE */
return d0->func_id - d1->func_id ?: d0->offset - d1->offset;
}
| 0
|
253,616
|
smb3_init_transform_rq(struct TCP_Server_Info *server, int num_rqst,
struct smb_rqst *new_rq, struct smb_rqst *old_rq)
{
struct page **pages;
struct smb2_transform_hdr *tr_hdr = new_rq[0].rq_iov[0].iov_base;
unsigned int npages;
unsigned int orig_len = 0;
int i, j;
int rc = -ENOMEM;
for (i = 1; i < num_rqst; i++) {
npages = old_rq[i - 1].rq_npages;
pages = kmalloc_array(npages, sizeof(struct page *),
GFP_KERNEL);
if (!pages)
goto err_free;
new_rq[i].rq_pages = pages;
new_rq[i].rq_npages = npages;
new_rq[i].rq_offset = old_rq[i - 1].rq_offset;
new_rq[i].rq_pagesz = old_rq[i - 1].rq_pagesz;
new_rq[i].rq_tailsz = old_rq[i - 1].rq_tailsz;
new_rq[i].rq_iov = old_rq[i - 1].rq_iov;
new_rq[i].rq_nvec = old_rq[i - 1].rq_nvec;
orig_len += smb_rqst_len(server, &old_rq[i - 1]);
for (j = 0; j < npages; j++) {
pages[j] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
if (!pages[j])
goto err_free;
}
/* copy pages form the old */
for (j = 0; j < npages; j++) {
char *dst, *src;
unsigned int offset, len;
rqst_page_get_length(&new_rq[i], j, &len, &offset);
dst = (char *) kmap(new_rq[i].rq_pages[j]) + offset;
src = (char *) kmap(old_rq[i - 1].rq_pages[j]) + offset;
memcpy(dst, src, len);
kunmap(new_rq[i].rq_pages[j]);
kunmap(old_rq[i - 1].rq_pages[j]);
}
}
/* fill the 1st iov with a transform header */
fill_transform_hdr(tr_hdr, orig_len, old_rq, server->cipher_type);
rc = crypt_message(server, num_rqst, new_rq, 1);
cifs_dbg(FYI, "Encrypt message returned %d\n", rc);
if (rc)
goto err_free;
return rc;
err_free:
smb3_free_compound_rqst(num_rqst - 1, &new_rq[1]);
return rc;
}
| 0
|
225,095
|
const OpDef::ArgDef* FindInputArg(StringPiece name, const OpDef& op_def) {
for (int i = 0; i < op_def.input_arg_size(); ++i) {
if (op_def.input_arg(i).name() == name) {
return &op_def.input_arg(i);
}
}
return nullptr;
}
| 0
|
277,669
|
get_word_gray_row (j_compress_ptr cinfo, cjpeg_source_ptr sinfo)
/* This version is for reading raw-word-format PGM files with any maxval */
{
ppm_source_ptr source = (ppm_source_ptr) sinfo;
register JSAMPROW ptr;
register U_CHAR * bufferptr;
register JSAMPLE *rescale = source->rescale;
JDIMENSION col;
if (! ReadOK(source->pub.input_file, source->iobuffer, source->buffer_width))
ERREXIT(cinfo, JERR_INPUT_EOF);
ptr = source->pub.buffer[0];
bufferptr = source->iobuffer;
for (col = cinfo->image_width; col > 0; col--) {
register int temp;
temp = UCH(*bufferptr++) << 8;
temp |= UCH(*bufferptr++);
*ptr++ = rescale[temp];
}
return 1;
}
| 0
|
508,298
|
TABLE *find_locked_table(TABLE *list, const char *db, const char *table_name)
{
char key[MAX_DBKEY_LENGTH];
uint key_length= tdc_create_key(key, db, table_name);
for (TABLE *table= list; table ; table=table->next)
{
if (table->s->table_cache_key.length == key_length &&
!memcmp(table->s->table_cache_key.str, key, key_length))
return table;
}
return(0);
}
| 0
|
294,635
|
d_lite_jd(VALUE self)
{
get_d1(self);
return m_real_local_jd(dat);
}
| 0
|
476,099
|
int config_ep_by_speed_and_alt(struct usb_gadget *g,
struct usb_function *f,
struct usb_ep *_ep,
u8 alt)
{
struct usb_endpoint_descriptor *chosen_desc = NULL;
struct usb_interface_descriptor *int_desc = NULL;
struct usb_descriptor_header **speed_desc = NULL;
struct usb_ss_ep_comp_descriptor *comp_desc = NULL;
int want_comp_desc = 0;
struct usb_descriptor_header **d_spd; /* cursor for speed desc */
struct usb_composite_dev *cdev;
bool incomplete_desc = false;
if (!g || !f || !_ep)
return -EIO;
/* select desired speed */
switch (g->speed) {
case USB_SPEED_SUPER_PLUS:
if (gadget_is_superspeed_plus(g)) {
if (f->ssp_descriptors) {
speed_desc = f->ssp_descriptors;
want_comp_desc = 1;
break;
}
incomplete_desc = true;
}
fallthrough;
case USB_SPEED_SUPER:
if (gadget_is_superspeed(g)) {
if (f->ss_descriptors) {
speed_desc = f->ss_descriptors;
want_comp_desc = 1;
break;
}
incomplete_desc = true;
}
fallthrough;
case USB_SPEED_HIGH:
if (gadget_is_dualspeed(g)) {
if (f->hs_descriptors) {
speed_desc = f->hs_descriptors;
break;
}
incomplete_desc = true;
}
fallthrough;
default:
speed_desc = f->fs_descriptors;
}
cdev = get_gadget_data(g);
if (incomplete_desc)
WARNING(cdev,
"%s doesn't hold the descriptors for current speed\n",
f->name);
/* find correct alternate setting descriptor */
for_each_desc(speed_desc, d_spd, USB_DT_INTERFACE) {
int_desc = (struct usb_interface_descriptor *)*d_spd;
if (int_desc->bAlternateSetting == alt) {
speed_desc = d_spd;
goto intf_found;
}
}
return -EIO;
intf_found:
/* find descriptors */
for_each_desc(speed_desc, d_spd, USB_DT_ENDPOINT) {
chosen_desc = (struct usb_endpoint_descriptor *)*d_spd;
if (chosen_desc->bEndpointAddress == _ep->address)
goto ep_found;
}
return -EIO;
ep_found:
/* commit results */
_ep->maxpacket = usb_endpoint_maxp(chosen_desc);
_ep->desc = chosen_desc;
_ep->comp_desc = NULL;
_ep->maxburst = 0;
_ep->mult = 1;
if (g->speed == USB_SPEED_HIGH && (usb_endpoint_xfer_isoc(_ep->desc) ||
usb_endpoint_xfer_int(_ep->desc)))
_ep->mult = usb_endpoint_maxp_mult(_ep->desc);
if (!want_comp_desc)
return 0;
/*
* Companion descriptor should follow EP descriptor
* USB 3.0 spec, #9.6.7
*/
comp_desc = (struct usb_ss_ep_comp_descriptor *)*(++d_spd);
if (!comp_desc ||
(comp_desc->bDescriptorType != USB_DT_SS_ENDPOINT_COMP))
return -EIO;
_ep->comp_desc = comp_desc;
if (g->speed >= USB_SPEED_SUPER) {
switch (usb_endpoint_type(_ep->desc)) {
case USB_ENDPOINT_XFER_ISOC:
/* mult: bits 1:0 of bmAttributes */
_ep->mult = (comp_desc->bmAttributes & 0x3) + 1;
fallthrough;
case USB_ENDPOINT_XFER_BULK:
case USB_ENDPOINT_XFER_INT:
_ep->maxburst = comp_desc->bMaxBurst + 1;
break;
default:
if (comp_desc->bMaxBurst != 0)
ERROR(cdev, "ep0 bMaxBurst must be 0\n");
_ep->maxburst = 1;
break;
}
}
return 0;
}
| 0
|
238,454
|
static int sanitize_check_bounds(struct bpf_verifier_env *env,
const struct bpf_insn *insn,
const struct bpf_reg_state *dst_reg)
{
u32 dst = insn->dst_reg;
/* For unprivileged we require that resulting offset must be in bounds
* in order to be able to sanitize access later on.
*/
if (env->bypass_spec_v1)
return 0;
switch (dst_reg->type) {
case PTR_TO_STACK:
if (check_stack_access_for_ptr_arithmetic(env, dst, dst_reg,
dst_reg->off + dst_reg->var_off.value))
return -EACCES;
break;
case PTR_TO_MAP_VALUE:
if (check_map_access(env, dst, dst_reg->off, 1, false)) {
verbose(env, "R%d pointer arithmetic of map value goes out of range, "
"prohibited for !root\n", dst);
return -EACCES;
}
break;
default:
break;
}
return 0;
}
| 0
|
512,500
|
bool eq(const Item_args *other, bool binary_cmp) const
{
for (uint i= 0; i < arg_count ; i++)
{
if (!args[i]->eq(other->args[i], binary_cmp))
return false;
}
return true;
}
| 0
|
139,222
|
gfx::Size OverlayWindowViews::GetMaximumSize() const {
return max_size_;
}
| 0
|
247,528
|
TEST_P(SslSocketTest, ClientCertificateSpkiVerification) {
envoy::config::listener::v3::Listener listener;
envoy::config::listener::v3::FilterChain* filter_chain = listener.add_filter_chains();
envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext tls_context;
envoy::extensions::transport_sockets::tls::v3::TlsCertificate* server_cert =
tls_context.mutable_common_tls_context()->add_tls_certificates();
server_cert->mutable_certificate_chain()->set_filename(TestEnvironment::substitute(
"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem"));
server_cert->mutable_private_key()->set_filename(TestEnvironment::substitute(
"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_key.pem"));
envoy::extensions::transport_sockets::tls::v3::CertificateValidationContext*
server_validation_ctx =
tls_context.mutable_common_tls_context()->mutable_validation_context();
server_validation_ctx->mutable_trusted_ca()->set_filename(TestEnvironment::substitute(
"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem"));
server_validation_ctx->add_verify_certificate_spki(TEST_SAN_DNS_CERT_SPKI);
server_validation_ctx->add_verify_certificate_spki(TEST_SAN_URI_CERT_SPKI);
updateFilterChain(tls_context, *filter_chain);
envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext client;
envoy::extensions::transport_sockets::tls::v3::TlsCertificate* client_cert =
client.mutable_common_tls_context()->add_tls_certificates();
client_cert->mutable_certificate_chain()->set_filename(TestEnvironment::substitute(
"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_cert.pem"));
client_cert->mutable_private_key()->set_filename(TestEnvironment::substitute(
"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_key.pem"));
TestUtilOptionsV2 test_options(listener, client, true, GetParam());
testUtilV2(test_options.setExpectedClientCertUri("spiffe://lyft.com/test-team")
.setExpectedServerCertDigest(TEST_SAN_DNS_CERT_256_HASH));
// Works even with client renegotiation.
client.set_allow_renegotiation(true);
testUtilV2(test_options);
}
| 0
|
238,544
|
static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
/* stack frame we're writing to */
struct bpf_func_state *state,
int off, int size, int value_regno,
int insn_idx)
{
struct bpf_func_state *cur; /* state of the current function */
int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err;
u32 dst_reg = env->prog->insnsi[insn_idx].dst_reg;
struct bpf_reg_state *reg = NULL;
err = grow_stack_state(state, round_up(slot + 1, BPF_REG_SIZE));
if (err)
return err;
/* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0,
* so it's aligned access and [off, off + size) are within stack limits
*/
if (!env->allow_ptr_leaks &&
state->stack[spi].slot_type[0] == STACK_SPILL &&
size != BPF_REG_SIZE) {
verbose(env, "attempt to corrupt spilled pointer on stack\n");
return -EACCES;
}
cur = env->cur_state->frame[env->cur_state->curframe];
if (value_regno >= 0)
reg = &cur->regs[value_regno];
if (!env->bypass_spec_v4) {
bool sanitize = reg && is_spillable_regtype(reg->type);
for (i = 0; i < size; i++) {
if (state->stack[spi].slot_type[i] == STACK_INVALID) {
sanitize = true;
break;
}
}
if (sanitize)
env->insn_aux_data[insn_idx].sanitize_stack_spill = true;
}
mark_stack_slot_scratched(env, spi);
if (reg && !(off % BPF_REG_SIZE) && register_is_bounded(reg) &&
!register_is_null(reg) && env->bpf_capable) {
if (dst_reg != BPF_REG_FP) {
/* The backtracking logic can only recognize explicit
* stack slot address like [fp - 8]. Other spill of
* scalar via different register has to be conservative.
* Backtrack from here and mark all registers as precise
* that contributed into 'reg' being a constant.
*/
err = mark_chain_precision(env, value_regno);
if (err)
return err;
}
save_register_state(state, spi, reg, size);
} else if (reg && is_spillable_regtype(reg->type)) {
/* register containing pointer is being spilled into stack */
if (size != BPF_REG_SIZE) {
verbose_linfo(env, insn_idx, "; ");
verbose(env, "invalid size of register spill\n");
return -EACCES;
}
if (state != cur && reg->type == PTR_TO_STACK) {
verbose(env, "cannot spill pointers to stack into stack frame of the caller\n");
return -EINVAL;
}
save_register_state(state, spi, reg, size);
} else {
u8 type = STACK_MISC;
/* regular write of data into stack destroys any spilled ptr */
state->stack[spi].spilled_ptr.type = NOT_INIT;
/* Mark slots as STACK_MISC if they belonged to spilled ptr. */
if (is_spilled_reg(&state->stack[spi]))
for (i = 0; i < BPF_REG_SIZE; i++)
scrub_spilled_slot(&state->stack[spi].slot_type[i]);
/* only mark the slot as written if all 8 bytes were written
* otherwise read propagation may incorrectly stop too soon
* when stack slots are partially written.
* This heuristic means that read propagation will be
* conservative, since it will add reg_live_read marks
* to stack slots all the way to first state when programs
* writes+reads less than 8 bytes
*/
if (size == BPF_REG_SIZE)
state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
/* when we zero initialize stack slots mark them as such */
if (reg && register_is_null(reg)) {
/* backtracking doesn't work for STACK_ZERO yet. */
err = mark_chain_precision(env, value_regno);
if (err)
return err;
type = STACK_ZERO;
}
/* Mark slots affected by this stack write. */
for (i = 0; i < size; i++)
state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] =
type;
}
return 0;
}
| 0
|
445,994
|
_encrypt_operation_completed_with_error (FrWindow *window,
FrAction action,
GError *error)
{
gboolean opens_dialog;
g_return_if_fail (error != NULL);
#ifdef DEBUG
debug (DEBUG_INFO, "%s [DONE] (FR::Window)\n", action_names[action]);
#endif
_fr_window_stop_activity_mode (window);
_handle_archive_operation_error (window, window->archive, action, error, NULL, &opens_dialog);
if (opens_dialog)
return;
close_progress_dialog (window, FALSE);
fr_window_stop_batch (window);
}
| 0
|
261,452
|
void free_significant_coeff_ctxIdx_lookupTable()
{
free(ctxIdxLookup[0][0][0][0]);
ctxIdxLookup[0][0][0][0]=NULL;
}
| 0
|
369,309
|
static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_connect *conn = &req->connect;
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->rw_flags ||
sqe->splice_fd_in)
return -EINVAL;
conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
conn->addr_len = READ_ONCE(sqe->addr2);
return 0;
}
| 0
|
484,723
|
void mobi_buffer_free_null(MOBIBuffer *buf) {
if (buf == NULL) { return; }
free(buf);
}
| 0
|
220,430
|
mrb_ary_unshift(mrb_state *mrb, mrb_value self, mrb_value item)
{
struct RArray *a = mrb_ary_ptr(self);
mrb_int len = ARY_LEN(a);
if (ARY_SHARED_P(a)
&& a->as.heap.aux.shared->refcnt == 1 /* shared only referenced from this array */
&& a->as.heap.ptr - a->as.heap.aux.shared->ptr >= 1) /* there's room for unshifted item */ {
a->as.heap.ptr--;
a->as.heap.ptr[0] = item;
}
else {
mrb_value *ptr;
ary_modify(mrb, a);
if (ARY_CAPA(a) < len + 1)
ary_expand_capa(mrb, a, len + 1);
ptr = ARY_PTR(a);
value_move(ptr + 1, ptr, len);
ptr[0] = item;
}
ARY_SET_LEN(a, len+1);
mrb_field_write_barrier_value(mrb, (struct RBasic*)a, item);
return self;
}
| 0
|
225,440
|
static const struct v4l2l_format *format_by_fourcc(int fourcc)
{
unsigned int i;
for (i = 0; i < FORMATS; i++) {
if (formats[i].fourcc == fourcc)
return formats + i;
}
dprintk("unsupported format '%c%c%c%c'\n", (fourcc >> 0) & 0xFF,
(fourcc >> 8) & 0xFF, (fourcc >> 16) & 0xFF,
(fourcc >> 24) & 0xFF);
return NULL;
}
| 0
|
468,337
|
main (int argc, char *argv[])
{
g_test_init (&argc, &argv, NULL);
g_test_add_func ("/socket-client/happy-eyeballs/slow", test_happy_eyeballs);
g_test_add_func ("/socket-client/happy-eyeballs/cancellation", test_happy_eyeballs_cancel);
return g_test_run ();
}
| 0
|
220,031
|
string DebugString() const override { return "A SparseTensorsMap"; }
| 0
|
244,047
|
void proj_type_box_del(GF_Box *s)
{
gf_free(s);
}
| 0
|
218,974
|
Status ConstantFolding::FoldNode(NodeDef* node, GraphDef* output_graph,
bool* result_too_large) {
*result_too_large = false;
if (IsMerge(*node)) {
return FoldMergeNode(node, output_graph);
}
std::vector<NodeDef> const_nodes;
TF_RETURN_IF_ERROR(
EvaluateOneFoldable(*node, &const_nodes, result_too_large));
VLOG(2) << "Folded node: " << SummarizeNodeDef(*node);
NodeDef* constant_output = nullptr;
for (int i = 0, end = const_nodes.size(); i < end; i++) {
NodeDef* const_node = &const_nodes[i];
VLOG(3) << "Generated constant node: " << SummarizeNodeDef(*const_node);
if (const_node->name().empty()) {
// Dead output: we can't create a constant to encode its value, so we'll
// just skip it. We'll preserve the edges that originate from that
// output below to preserve the overall behavior of the graph wrt dead
// edges.
continue;
}
// Returns `true` iff `const_node` already has control input named `input`.
const auto is_duplicate_control_input = [&](const string& input) -> bool {
auto it = absl::c_find(const_node->input(), input);
return it != const_node->input().end();
};
// Forward control dependencies.
for (const string& input : node->input()) {
// Forward control dependencies from folded node.
if (IsControlInput(input)) {
if (!is_duplicate_control_input(input)) {
*const_node->add_input() = input;
}
}
// Forward control dependencies from constant inputs to folded node.
if (!IsControlInput(input)) {
NodeDef* input_node = node_map_->GetNode(input);
for (const string& fanin_of_input : input_node->input()) {
if (!is_duplicate_control_input(fanin_of_input)) {
*const_node->add_input() = fanin_of_input;
}
}
}
}
// We rewrite the existing node if it only has a single output, and
// create new nodes otherwise.
if (const_nodes.size() == 1) {
node->set_op("Const");
// Note we need to clear the inputs in NodeMap before we clear the inputs
// in the node, otherwise NodeMap would see empty inputs and effectively
// does nothing.
node_map_->RemoveInputs(node->name());
node->clear_input();
*node->mutable_input() = const_node->input();
for (const auto& input : node->input()) {
node_map_->AddOutput(NodeName(input), node->name());
}
*node->mutable_attr() = const_node->attr();
break;
} else {
if (node_map_->GetNode(const_node->name())) {
// Intended name already exists.
return errors::AlreadyExists(strings::StrCat(
const_node->name(), " already present in the graph"));
}
NodeDef* added_node = output_graph->add_node();
*added_node = *const_node;
added_node->set_device(node->device());
node_map_->AddNode(added_node->name(), added_node);
for (const auto& input : added_node->input()) {
node_map_->AddOutput(NodeName(input), added_node->name());
}
// All the constant nodes encoding output values have the same control
// dependencies (since these are the control dependencies of the node
// we're trying to fold). Record one such constant node.
constant_output = added_node;
}
}
if (const_nodes.size() > 1) {
// We make a copy because we mutate the nodes.
auto outputs = node_map_->GetOutputs(node->name());
for (NodeDef* output : outputs) {
for (int i = 0; i < output->input_size(); i++) {
int port;
string node_name = ParseNodeName(output->input(i), &port);
if (node_name == node->name()) {
if (port < 0) {
// Propagate control dependencies if possible. If not, we'll just
// preserve the existing control dependencies.
if (constant_output != nullptr) {
node_map_->UpdateInput(node_name, NodeName(output->input(i)),
constant_output->name());
*output->mutable_input(i) = AsControlDependency(*constant_output);
}
} else if (port < static_cast<int>(const_nodes.size()) &&
!const_nodes[port].name().empty()) {
// Replace alive outputs with the corresponding constant.
node_map_->UpdateInput(output->name(), NodeName(output->input(i)),
const_nodes[port].name());
*output->mutable_input(i) = const_nodes[port].name();
} else {
// Leave this edge alone.
VLOG(3) << "Preserving edge from " << node->name() << ":" << port
<< "[" << node->op() << "] to " << output->name() << ":"
<< i << "[" << output->op() << "]";
}
}
}
}
outputs = node_map_->GetOutputs(node->name());
if (outputs.empty() && has_fetch_ &&
nodes_to_preserve_.find(node->name()) == nodes_to_preserve_.end()) {
node_map_->RemoveInputs(node->name());
node->clear_input();
}
}
return Status::OK();
}
| 0
|
224,174
|
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (index >= dtypes_.size()) {
return Status(errors::InvalidArgument(
"Index '", index, "' for key '", key.scalar<int64_t>()(),
"' was out of bounds '", dtypes_.size(), "'."));
}
return Status::OK();
}
| 0
|
421,399
|
static void pvarlist(int d, js_Ast *list)
{
while (list) {
assert(list->type == AST_LIST);
pvar(d, list->a);
list = list->b;
if (list)
comma();
}
}
| 0
|
242,666
|
dissect_header_lens_v2(tvbuff_t *tvb, wtap_syscall_header* syscall_header, int offset, proto_tree *tree, int encoding)
{
guint32 param_count;
proto_item *ti;
proto_tree *len_tree;
ti = proto_tree_add_item(tree, hf_se_param_lens, tvb, offset, syscall_header->nparams * SYSDIG_PARAM_SIZE_V2, ENC_NA);
len_tree = proto_item_add_subtree(ti, ett_sysdig_parm_lens);
for (param_count = 0; param_count < syscall_header->nparams; param_count++) {
proto_tree_add_item(len_tree, hf_se_param_len, tvb, offset + (param_count * SYSDIG_PARAM_SIZE_V2), SYSDIG_PARAM_SIZE_V2, encoding);
}
proto_item_set_len(ti, syscall_header->nparams * SYSDIG_PARAM_SIZE_V2);
return syscall_header->nparams * SYSDIG_PARAM_SIZE_V2;
}
| 0
|
310,008
|
drv_CanHandle(TERMINAL_CONTROL_BLOCK * TCB, const char *tname, int *errret)
{
bool result = FALSE;
int status;
TERMINAL *termp;
SCREEN *sp;
START_TRACE();
T((T_CALLED("tinfo::drv_CanHandle(%p)"), (void *) TCB));
assert(TCB != 0 && tname != 0);
termp = (TERMINAL *) TCB;
sp = TCB->csp;
TCB->magic = TCBMAGIC;
#if (NCURSES_USE_DATABASE || NCURSES_USE_TERMCAP)
status = _nc_setup_tinfo(tname, &TerminalType(termp));
T(("_nc_setup_tinfo returns %d", status));
#else
T(("no database available"));
status = TGETENT_NO;
#endif
/* try fallback list if entry on disk */
if (status != TGETENT_YES) {
const TERMTYPE2 *fallback = _nc_fallback2(tname);
if (fallback) {
T(("found fallback entry"));
TerminalType(termp) = *fallback;
status = TGETENT_YES;
}
}
if (status != TGETENT_YES) {
NCURSES_SP_NAME(del_curterm) (NCURSES_SP_ARGx termp);
if (status == TGETENT_ERR) {
ret_error0(status, "terminals database is inaccessible\n");
} else if (status == TGETENT_NO) {
ret_error1(status, "unknown terminal type.\n",
tname, NO_COPY);
} else {
ret_error0(status, "unexpected return-code\n");
}
}
result = TRUE;
#if NCURSES_EXT_NUMBERS
_nc_export_termtype2(&termp->type, &TerminalType(termp));
#endif
#if !USE_REENTRANT
save_ttytype(termp);
#endif
if (command_character)
_nc_tinfo_cmdch(termp, *command_character);
/*
* If an application calls setupterm() rather than initscr() or
* newterm(), we will not have the def_prog_mode() call in
* _nc_setupscreen(). Do it now anyway, so we can initialize the
* baudrate.
*/
if (sp == 0 && NC_ISATTY(termp->Filedes)) {
get_baudrate(termp);
}
#if NCURSES_EXT_NUMBERS
#define cleanup_termtype() \
_nc_free_termtype2(&TerminalType(termp)); \
_nc_free_termtype(&termp->type)
#else
#define cleanup_termtype() \
_nc_free_termtype2(&TerminalType(termp))
#endif
if (generic_type) {
/*
* BSD 4.3's termcap contains mis-typed "gn" for wy99. Do a sanity
* check before giving up.
*/
if ((VALID_STRING(cursor_address)
|| (VALID_STRING(cursor_down) && VALID_STRING(cursor_home)))
&& VALID_STRING(clear_screen)) {
cleanup_termtype();
ret_error1(TGETENT_YES, "terminal is not really generic.\n",
tname, NO_COPY);
} else {
cleanup_termtype();
ret_error1(TGETENT_NO, "I need something more specific.\n",
tname, NO_COPY);
}
}
if (hard_copy) {
cleanup_termtype();
ret_error1(TGETENT_YES, "I can't handle hardcopy terminals.\n",
tname, NO_COPY);
}
returnBool(result);
}
| 0
|
224,496
|
static void ttxt_parse_text_style(GF_TXTIn *ctx, GF_XMLNode *n, GF_StyleRecord *style)
{
u32 i=0;
GF_XMLAttribute *att;
memset(style, 0, sizeof(GF_StyleRecord));
style->fontID = 1;
style->font_size = ctx->fontsize ;
style->text_color = 0xFFFFFFFF;
while ( (att=(GF_XMLAttribute *)gf_list_enum(n->attributes, &i))) {
if (!stricmp(att->name, "fromChar")) style->startCharOffset = atoi(att->value);
else if (!stricmp(att->name, "toChar")) style->endCharOffset = atoi(att->value);
else if (!stricmp(att->name, "fontID")) style->fontID = atoi(att->value);
else if (!stricmp(att->name, "fontSize")) style->font_size = atoi(att->value);
else if (!stricmp(att->name, "color")) style->text_color = ttxt_get_color(att->value);
else if (!stricmp(att->name, "styles")) {
if (strstr(att->value, "Bold")) style->style_flags |= GF_TXT_STYLE_BOLD;
if (strstr(att->value, "Italic")) style->style_flags |= GF_TXT_STYLE_ITALIC;
if (strstr(att->value, "Underlined")) style->style_flags |= GF_TXT_STYLE_UNDERLINED;
if (strstr(att->value, "Strikethrough")) style->style_flags |= GF_TXT_STYLE_STRIKETHROUGH;
}
}
}
| 0
|
405,711
|
static int xemaclite_mdio_write(struct mii_bus *bus, int phy_id, int reg,
u16 val)
{
struct net_local *lp = bus->priv;
u32 ctrl_reg;
dev_dbg(&lp->ndev->dev,
"%s(phy_id=%i, reg=%x, val=%x)\n", __func__,
phy_id, reg, val);
if (xemaclite_mdio_wait(lp))
return -ETIMEDOUT;
/* Write the PHY address, register number and clear the OP bit in the
* MDIO Address register and then write the value into the MDIO Write
* Data register. Finally, set the Status bit in the MDIO Control
* register to start a MDIO write transaction.
*/
ctrl_reg = xemaclite_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET);
xemaclite_writel(~XEL_MDIOADDR_OP_MASK &
((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg),
lp->base_addr + XEL_MDIOADDR_OFFSET);
xemaclite_writel(val, lp->base_addr + XEL_MDIOWR_OFFSET);
xemaclite_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK,
lp->base_addr + XEL_MDIOCTRL_OFFSET);
return 0;
}
| 0
|
196,829
|
void Compute(OpKernelContext* ctx) override {
const Tensor& val = ctx->input(0);
int64 id = ctx->session_state()->GetNewId();
TensorStore::TensorAndKey tk{val, id, requested_device()};
OP_REQUIRES_OK(ctx, ctx->tensor_store()->AddTensor(name(), tk));
Tensor* handle = nullptr;
OP_REQUIRES_OK(ctx, ctx->allocate_output(0, TensorShape({}), &handle));
if (ctx->expected_output_dtype(0) == DT_RESOURCE) {
ResourceHandle resource_handle = MakeResourceHandle<Tensor>(
ctx, SessionState::kTensorHandleResourceTypeName,
tk.GetHandle(name()));
resource_handle.set_maybe_type_name(
SessionState::kTensorHandleResourceTypeName);
handle->scalar<ResourceHandle>()() = resource_handle;
} else {
// Legacy behavior in V1.
handle->flat<tstring>().setConstant(tk.GetHandle(name()));
}
}
| 1
|
317,318
|
static int smack_set_mnt_opts(struct super_block *sb,
void *mnt_opts,
unsigned long kern_flags,
unsigned long *set_kern_flags)
{
struct dentry *root = sb->s_root;
struct inode *inode = d_backing_inode(root);
struct superblock_smack *sp = smack_superblock(sb);
struct inode_smack *isp;
struct smack_known *skp;
struct smack_mnt_opts *opts = mnt_opts;
bool transmute = false;
if (sp->smk_flags & SMK_SB_INITIALIZED)
return 0;
if (inode->i_security == NULL) {
int rc = lsm_inode_alloc(inode);
if (rc)
return rc;
}
if (!smack_privileged(CAP_MAC_ADMIN)) {
/*
* Unprivileged mounts don't get to specify Smack values.
*/
if (opts)
return -EPERM;
/*
* Unprivileged mounts get root and default from the caller.
*/
skp = smk_of_current();
sp->smk_root = skp;
sp->smk_default = skp;
/*
* For a handful of fs types with no user-controlled
* backing store it's okay to trust security labels
* in the filesystem. The rest are untrusted.
*/
if (sb->s_user_ns != &init_user_ns &&
sb->s_magic != SYSFS_MAGIC && sb->s_magic != TMPFS_MAGIC &&
sb->s_magic != RAMFS_MAGIC) {
transmute = true;
sp->smk_flags |= SMK_SB_UNTRUSTED;
}
}
sp->smk_flags |= SMK_SB_INITIALIZED;
if (opts) {
if (opts->fsdefault) {
skp = smk_import_entry(opts->fsdefault, 0);
if (IS_ERR(skp))
return PTR_ERR(skp);
sp->smk_default = skp;
}
if (opts->fsfloor) {
skp = smk_import_entry(opts->fsfloor, 0);
if (IS_ERR(skp))
return PTR_ERR(skp);
sp->smk_floor = skp;
}
if (opts->fshat) {
skp = smk_import_entry(opts->fshat, 0);
if (IS_ERR(skp))
return PTR_ERR(skp);
sp->smk_hat = skp;
}
if (opts->fsroot) {
skp = smk_import_entry(opts->fsroot, 0);
if (IS_ERR(skp))
return PTR_ERR(skp);
sp->smk_root = skp;
}
if (opts->fstransmute) {
skp = smk_import_entry(opts->fstransmute, 0);
if (IS_ERR(skp))
return PTR_ERR(skp);
sp->smk_root = skp;
transmute = true;
}
}
/*
* Initialize the root inode.
*/
init_inode_smack(inode, sp->smk_root);
if (transmute) {
isp = smack_inode(inode);
isp->smk_flags |= SMK_INODE_TRANSMUTE;
}
return 0;
}
| 0
|
328,978
|
R_API char *r_bin_java_print_unknown_cp_stringify(RBinJavaCPTypeObj *obj) {
return r_str_newf ("%d.0x%04"PFMT64x ".%s", obj->metas->ord,
obj->file_offset + obj->loadaddr, ((RBinJavaCPTypeMetas *) obj->metas->type_info)->name);
}
| 0
|
512,682
|
bool Item_func_in::prepare_predicant_and_values(THD *thd, uint *found_types)
{
uint type_cnt;
have_null= false;
add_predicant(this, 0);
for (uint i= 1 ; i < arg_count; i++)
{
if (add_value_skip_null(Item_func_in::func_name(), this, i, &have_null))
return true;
}
all_values_added(&m_comparator, &type_cnt, found_types);
arg_types_compatible= type_cnt < 2;
#ifndef DBUG_OFF
Predicant_to_list_comparator::debug_print(thd);
#endif
return false;
}
| 0
|
197,998
|
void Compute(OpKernelContext* context) override {
const Tensor& input = context->input(0);
const TensorShape& input_shape = input.shape();
const int32 input_dims = input_shape.dims();
const Tensor& segment_id = context->input(1);
const TensorShape& segment_id_shape = segment_id.shape();
const int32 segment_dims = segment_id_shape.dims();
const Tensor& num_segments_tensor = context->input(2);
auto num_segments = num_segments_tensor.scalar<NUM_SEGMENTS_TYPE>()();
OP_REQUIRES(context, segment_dims != 0,
errors::InvalidArgument("Segment_id cannot have rank 0"));
OP_REQUIRES(
context, segment_dims <= input_dims,
errors::OutOfRange("Invalid segment_id rank ", segment_dims,
" for input with ", input_dims, " dimension(s)"));
for (auto i = 0; i < segment_dims; i++) {
OP_REQUIRES(
context, segment_id_shape.dim_size(i) == input_shape.dim_size(i),
errors::InvalidArgument(
"Segment dimension is ", segment_id_shape.dim_size(i),
" while input dimension is ", input_dims, " in rank ", i));
}
// Making output tensor.
Tensor* output_tensor = nullptr;
TensorShape output_shape =
GetOutputShape(input_shape, segment_id_shape, num_segments);
OP_REQUIRES_OK(context, context->allocate_output("output", output_shape,
&output_tensor));
// Preparating flat tensors.
auto output_flat = output_tensor->flat<tstring>();
auto flat_segment_id = segment_id.flat<INDICES_TYPE>();
auto flat_input = input.flat<tstring>();
for (int i = 0; i < flat_segment_id.size(); i++) {
OP_REQUIRES(
context,
((flat_segment_id(i) < num_segments) && (flat_segment_id(i) >= 0)),
errors::InvalidArgument(
"segment_ids are not allowed to exceed num_segments or"
" to have negative values."));
}
int64 big_stride;
int64 small_stride;
std::tie(big_stride, small_stride) =
GetStrides<INDICES_TYPE>(input_shape, segment_id_shape);
auto relative_offset_set =
GetFlattenedRelativeOffsets<INDICES_TYPE>(small_stride, big_stride);
for (auto start_offset = 0; start_offset < big_stride; start_offset++) {
for (auto i = 0; i < relative_offset_set.size(); i++) {
auto output_index = start_offset + flat_segment_id(i) * big_stride;
auto offset = start_offset + relative_offset_set[i];
if (output_flat(output_index).length() != 0)
output_flat(output_index).append(separator_.c_str());
output_flat(output_index).append(flat_input(offset));
}
}
}
| 1
|
310,163
|
init_extended_pair(int pair, int f, int b)
{
return NCURSES_SP_NAME(init_extended_pair) (CURRENT_SCREEN, pair, f, b);
}
| 0
|
473,910
|
next_state_class(CClassNode* cc, OnigCodePoint* vs, enum CCVALTYPE* type,
enum CCSTATE* state, ScanEnv* env)
{
int r;
if (*state == CCS_RANGE)
return ONIGERR_CHAR_CLASS_VALUE_AT_END_OF_RANGE;
if (*state == CCS_VALUE && *type != CCV_CLASS) {
if (*type == CCV_SB)
BITSET_SET_BIT_CHKDUP(cc->bs, (int )(*vs));
else if (*type == CCV_CODE_POINT) {
r = add_code_range(&(cc->mbuf), env, *vs, *vs);
if (r < 0) return r;
}
}
*state = CCS_VALUE;
*type = CCV_CLASS;
return 0;
}
| 0
|
455,291
|
run_wordexp (words)
char *words;
{
int code, nw, nb;
WORD_LIST *wl, *tl, *result;
code = setjmp_nosigs (top_level);
if (code != NOT_JUMPED)
{
switch (code)
{
/* Some kind of throw to top_level has occurred. */
case FORCE_EOF:
return last_command_exit_value = 127;
case ERREXIT:
case EXITPROG:
return last_command_exit_value;
case DISCARD:
return last_command_exit_value = 1;
default:
command_error ("run_wordexp", CMDERR_BADJUMP, code, 0);
}
}
/* Run it through the parser to get a list of words and expand them */
if (words && *words)
{
with_input_from_string (words, "--wordexp");
if (parse_command () != 0)
return (126);
if (global_command == 0)
{
printf ("0\n0\n");
return (0);
}
if (global_command->type != cm_simple)
return (126);
wl = global_command->value.Simple->words;
if (protected_mode)
for (tl = wl; tl; tl = tl->next)
tl->word->flags |= W_NOCOMSUB|W_NOPROCSUB;
result = wl ? expand_words_no_vars (wl) : (WORD_LIST *)0;
}
else
result = (WORD_LIST *)0;
last_command_exit_value = 0;
if (result == 0)
{
printf ("0\n0\n");
return (0);
}
/* Count up the number of words and bytes, and print them. Don't count
the trailing NUL byte. */
for (nw = nb = 0, wl = result; wl; wl = wl->next)
{
nw++;
nb += strlen (wl->word->word);
}
printf ("%u\n%u\n", nw, nb);
/* Print each word on a separate line. This will have to be changed when
the interface to glibc is completed. */
for (wl = result; wl; wl = wl->next)
printf ("%s\n", wl->word->word);
return (0);
}
| 0
|
405,394
|
static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
{
/* Code (such as __xfrm4_bundle_create()) sets dst->obsolete
* to DST_OBSOLETE_FORCE_CHK to force all XFRM destinations to
* get validated by dst_ops->check on every use. We do this
* because when a normal route referenced by an XFRM dst is
* obsoleted we do not go looking around for all parent
* referencing XFRM dsts so that we can invalidate them. It
* is just too much work. Instead we make the checks here on
* every use. For example:
*
* XFRM dst A --> IPv4 dst X
*
* X is the "xdst->route" of A (X is also the "dst->path" of A
* in this example). If X is marked obsolete, "A" will not
* notice. That's what we are validating here via the
* stale_bundle() check.
*
* When a dst is removed from the fib tree, DST_OBSOLETE_DEAD will
* be marked on it.
* This will force stale_bundle() to fail on any xdst bundle with
* this dst linked in it.
*/
if (dst->obsolete < 0 && !stale_bundle(dst))
return dst;
return NULL;
}
| 0
|
344,778
|
set_nodelay(int fd)
{
int opt;
socklen_t optlen;
optlen = sizeof opt;
if (getsockopt(fd, IPPROTO_TCP, TCP_NODELAY, &opt, &optlen) == -1) {
debug("getsockopt TCP_NODELAY: %.100s", strerror(errno));
return;
}
if (opt == 1) {
debug2("fd %d is TCP_NODELAY", fd);
return;
}
opt = 1;
debug2("fd %d setting TCP_NODELAY", fd);
if (setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, &opt, sizeof opt) == -1)
error("setsockopt TCP_NODELAY: %.100s", strerror(errno));
}
| 0
|
512,339
|
bool is_bool_literal() const { return true; }
| 0
|
464,942
|
mbfl_filt_conv_big5_wchar(int c, mbfl_convert_filter *filter)
{
int k;
int c1, w, c2;
switch (filter->status) {
case 0:
if (filter->from->no_encoding == mbfl_no_encoding_cp950) {
c1 = 0x80;
} else {
c1 = 0xa0;
}
if (c >= 0 && c <= 0x80) { /* latin */
CK((*filter->output_function)(c, filter->data));
} else if (c == 0xff) {
CK((*filter->output_function)(0xf8f8, filter->data));
} else if (c > c1 && c < 0xff) { /* dbcs lead byte */
filter->status = 1;
filter->cache = c;
} else {
w = c & MBFL_WCSGROUP_MASK;
w |= MBFL_WCSGROUP_THROUGH;
CK((*filter->output_function)(w, filter->data));
}
break;
case 1: /* dbcs second byte */
filter->status = 0;
c1 = filter->cache;
if ((c > 0x39 && c < 0x7f) | (c > 0xa0 && c < 0xff)) {
if (c < 0x7f){
w = (c1 - 0xa1)*157 + (c - 0x40);
} else {
w = (c1 - 0xa1)*157 + (c - 0xa1) + 0x3f;
}
if (w >= 0 && w < big5_ucs_table_size) {
w = big5_ucs_table[w];
} else {
w = 0;
}
if (filter->from->no_encoding == mbfl_no_encoding_cp950) {
/* PUA for CP950 */
if (w <= 0 && is_in_cp950_pua(c1, c)) {
c2 = c1 << 8 | c;
for (k = 0; k < sizeof(cp950_pua_tbl)/(sizeof(unsigned short)*4); k++) {
if (c2 >= cp950_pua_tbl[k][2] && c2 <= cp950_pua_tbl[k][3]) {
break;
}
}
if ((cp950_pua_tbl[k][2] & 0xff) == 0x40) {
w = 157*(c1 - (cp950_pua_tbl[k][2]>>8)) + c - (c >= 0xa1 ? 0x62 : 0x40)
+ cp950_pua_tbl[k][0];
} else {
w = c2 - cp950_pua_tbl[k][2] + cp950_pua_tbl[k][0];
}
}
}
if (w <= 0) {
w = (c1 << 8) | c;
w &= MBFL_WCSPLANE_MASK;
w |= MBFL_WCSPLANE_BIG5;
}
CK((*filter->output_function)(w, filter->data));
} else if ((c >= 0 && c < 0x21) || c == 0x7f) { /* CTLs */
CK((*filter->output_function)(c, filter->data));
} else {
w = (c1 << 8) | c;
w &= MBFL_WCSGROUP_MASK;
w |= MBFL_WCSGROUP_THROUGH;
CK((*filter->output_function)(w, filter->data));
}
break;
default:
filter->status = 0;
break;
}
return c;
}
| 0
|
336,582
|
SPICE_GNUC_VISIBLE int spice_server_set_playback_compression(SpiceServer *reds, int enable)
{
reds->config->playback_compression = !!enable;
snd_set_playback_compression(enable);
return 0;
}
| 0
|
225,909
|
void ccst_box_del(GF_Box *s)
{
GF_CodingConstraintsBox *ptr = (GF_CodingConstraintsBox *)s;
if (ptr) gf_free(ptr);
return;
}
| 0
|
277,474
|
MOBI_RET mobi_get_indxentry_tagvalue(uint32_t *tagvalue, const MOBIIndexEntry *entry, const unsigned tag_arr[]) {
if (entry == NULL) {
debug_print("%s", "INDX entry not initialized\n");
return MOBI_INIT_FAILED;
}
size_t i = 0;
while (i < entry->tags_count) {
if (entry->tags[i].tagid == tag_arr[0]) {
if (entry->tags[i].tagvalues_count > tag_arr[1]) {
*tagvalue = entry->tags[i].tagvalues[tag_arr[1]];
return MOBI_SUCCESS;
}
break;
}
i++;
}
//debug_print("tag[%i][%i] not found in entry: %s\n", tag_arr[0], tag_arr[1], entry->label);
return MOBI_DATA_CORRUPT;
}
| 0
|
273,060
|
clock_getres(clockid_t clock_id, struct timespec *res)
{
if (! res)
return -1;
/* hardcode ms resolution */
res->tv_sec = 0;
res->tv_nsec = 1000;
return 0;
}
| 0
|
244,296
|
GF_Err dac3_box_read(GF_Box *s, GF_BitStream *bs)
{
GF_AC3ConfigBox *ptr = (GF_AC3ConfigBox *)s;
if (ptr == NULL) return GF_BAD_PARAM;
return gf_odf_ac3_config_parse_bs(bs, ptr->cfg.is_ec3, &ptr->cfg);
}
| 0
|
317,185
|
static int bpf_fd_pass(struct file *file, u32 sid)
{
struct bpf_security_struct *bpfsec;
struct bpf_prog *prog;
struct bpf_map *map;
int ret;
if (file->f_op == &bpf_map_fops) {
map = file->private_data;
bpfsec = map->security;
ret = avc_has_perm(&selinux_state,
sid, bpfsec->sid, SECCLASS_BPF,
bpf_map_fmode_to_av(file->f_mode), NULL);
if (ret)
return ret;
} else if (file->f_op == &bpf_prog_fops) {
prog = file->private_data;
bpfsec = prog->aux->security;
ret = avc_has_perm(&selinux_state,
sid, bpfsec->sid, SECCLASS_BPF,
BPF__PROG_RUN, NULL);
if (ret)
return ret;
}
return 0;
}
| 0
|
513,040
|
bool Item_equal::fix_length_and_dec()
{
Item *item= get_first(NO_PARTICULAR_TAB, NULL);
const Type_handler *handler= item->type_handler();
eval_item= handler->make_cmp_item(current_thd, item->collation.collation);
return eval_item == NULL;
}
| 0
|
90,863
|
void NotifyOriginNoLongerInUse(const GURL& origin) {
quota_manager_->NotifyOriginNoLongerInUse(origin);
}
| 0
|
312,467
|
qf_init(win_T *wp,
char_u *efile,
char_u *errorformat,
int newlist, // TRUE: start a new error list
char_u *qf_title,
char_u *enc)
{
qf_info_T *qi = &ql_info;
if (wp != NULL)
{
qi = ll_get_or_alloc_list(wp);
if (qi == NULL)
return FAIL;
}
return qf_init_ext(qi, qi->qf_curlist, efile, curbuf, NULL, errorformat,
newlist, (linenr_T)0, (linenr_T)0, qf_title, enc);
}
| 0
|
180,235
|
v8::Handle<v8::Value> V8ThrowException::throwGeneralError(v8::Isolate* isolate, const String& message)
{
v8::Handle<v8::Value> exception = V8ThrowException::createGeneralError(isolate, message);
return V8ThrowException::throwException(exception, isolate);
}
| 0
|
250,690
|
int HttpFile::saveAs(const std::string &fileName) const
{
return implPtr_->saveAs(fileName);
}
| 0
|
205,806
|
void sdb_edit(procinfo *pi)
{
char * filename = omStrDup("/tmp/sd000000");
sprintf(filename+7,"%d",getpid());
FILE *fp=fopen(filename,"w");
if (fp==NULL)
{
Print("cannot open %s\n",filename);
omFree(filename);
return;
}
if (pi->language!= LANG_SINGULAR)
{
Print("cannot edit type %d\n",pi->language);
fclose(fp);
fp=NULL;
}
else
{
const char *editor=getenv("EDITOR");
if (editor==NULL)
editor=getenv("VISUAL");
if (editor==NULL)
editor="vi";
editor=omStrDup(editor);
if (pi->data.s.body==NULL)
{
iiGetLibProcBuffer(pi);
if (pi->data.s.body==NULL)
{
PrintS("cannot get the procedure body\n");
fclose(fp);
si_unlink(filename);
omFree(filename);
return;
}
}
fwrite(pi->data.s.body,1,strlen(pi->data.s.body),fp);
fclose(fp);
int pid=fork();
if (pid!=0)
{
si_wait(&pid);
}
else if(pid==0)
{
if (strchr(editor,' ')==NULL)
{
execlp(editor,editor,filename,NULL);
Print("cannot exec %s\n",editor);
}
else
{
char *p=(char *)omAlloc(strlen(editor)+strlen(filename)+2);
sprintf(p,"%s %s",editor,filename);
system(p);
}
exit(0);
}
else
{
PrintS("cannot fork\n");
}
fp=fopen(filename,"r");
if (fp==NULL)
{
Print("cannot read from %s\n",filename);
}
else
{
fseek(fp,0L,SEEK_END);
long len=ftell(fp);
fseek(fp,0L,SEEK_SET);
omFree((ADDRESS)pi->data.s.body);
pi->data.s.body=(char *)omAlloc((int)len+1);
myfread( pi->data.s.body, len, 1, fp);
pi->data.s.body[len]='\0';
fclose(fp);
}
}
si_unlink(filename);
omFree(filename);
}
| 1
|
384,124
|
raptor_xml_writer_start_element_common(raptor_xml_writer* xml_writer,
raptor_xml_element* element,
int auto_empty)
{
raptor_iostream* iostr = xml_writer->iostr;
raptor_namespace_stack *nstack = xml_writer->nstack;
int depth = xml_writer->depth;
int auto_indent = XML_WRITER_AUTO_INDENT(xml_writer);
struct nsd *nspace_declarations = NULL;
size_t nspace_declarations_count = 0;
unsigned int i;
if(nstack) {
int nspace_max_count = element->attribute_count * 2; /* attr and value */
if(element->name->nspace)
nspace_max_count++;
if(element->declared_nspaces)
nspace_max_count += raptor_sequence_size(element->declared_nspaces);
if(element->xml_language)
nspace_max_count++;
nspace_declarations = RAPTOR_CALLOC(struct nsd*, nspace_max_count,
sizeof(struct nsd));
if(!nspace_declarations)
return 1;
}
if(element->name->nspace) {
if(nstack && !raptor_namespaces_namespace_in_scope(nstack, element->name->nspace)) {
nspace_declarations[0].declaration=
raptor_namespace_format_as_xml(element->name->nspace,
&nspace_declarations[0].length);
if(!nspace_declarations[0].declaration)
goto error;
nspace_declarations[0].nspace = element->name->nspace;
nspace_declarations_count++;
}
}
if(nstack && element->attributes) {
for(i = 0; i < element->attribute_count; i++) {
/* qname */
if(element->attributes[i]->nspace) {
/* Check if we need a namespace declaration attribute */
if(nstack &&
!raptor_namespaces_namespace_in_scope(nstack, element->attributes[i]->nspace) && element->attributes[i]->nspace != element->name->nspace) {
/* not in scope and not same as element (so already going to be declared)*/
unsigned int j;
int declare_me = 1;
/* check it wasn't an earlier declaration too */
for(j = 0; j < nspace_declarations_count; j++)
if(nspace_declarations[j].nspace == element->attributes[j]->nspace) {
declare_me = 0;
break;
}
if(declare_me) {
nspace_declarations[nspace_declarations_count].declaration=
raptor_namespace_format_as_xml(element->attributes[i]->nspace,
&nspace_declarations[nspace_declarations_count].length);
if(!nspace_declarations[nspace_declarations_count].declaration)
goto error;
nspace_declarations[nspace_declarations_count].nspace = element->attributes[i]->nspace;
nspace_declarations_count++;
}
}
}
/* Add the attribute's value */
nspace_declarations[nspace_declarations_count].declaration=
raptor_qname_format_as_xml(element->attributes[i],
&nspace_declarations[nspace_declarations_count].length);
if(!nspace_declarations[nspace_declarations_count].declaration)
goto error;
nspace_declarations[nspace_declarations_count].nspace = NULL;
nspace_declarations_count++;
}
}
if(nstack && element->declared_nspaces &&
raptor_sequence_size(element->declared_nspaces) > 0) {
for(i = 0; i< (unsigned int)raptor_sequence_size(element->declared_nspaces); i++) {
raptor_namespace* nspace = (raptor_namespace*)raptor_sequence_get_at(element->declared_nspaces, i);
unsigned int j;
int declare_me = 1;
/* check it wasn't an earlier declaration too */
for(j = 0; j < nspace_declarations_count; j++)
if(nspace_declarations[j].nspace == nspace) {
declare_me = 0;
break;
}
if(declare_me) {
nspace_declarations[nspace_declarations_count].declaration=
raptor_namespace_format_as_xml(nspace,
&nspace_declarations[nspace_declarations_count].length);
if(!nspace_declarations[nspace_declarations_count].declaration)
goto error;
nspace_declarations[nspace_declarations_count].nspace = nspace;
nspace_declarations_count++;
}
}
}
if(nstack && element->xml_language) {
size_t lang_len = strlen(RAPTOR_GOOD_CAST(char*, element->xml_language));
#define XML_LANG_PREFIX_LEN 10
size_t buf_length = XML_LANG_PREFIX_LEN + lang_len + 1;
unsigned char* buffer = RAPTOR_MALLOC(unsigned char*, buf_length + 1);
const char quote = '\"';
unsigned char* p;
memcpy(buffer, "xml:lang=\"", XML_LANG_PREFIX_LEN);
p = buffer + XML_LANG_PREFIX_LEN;
p += raptor_xml_escape_string(xml_writer->world,
element->xml_language, lang_len,
p, buf_length, quote);
*p++ = quote;
*p = '\0';
nspace_declarations[nspace_declarations_count].declaration = buffer;
nspace_declarations[nspace_declarations_count].length = buf_length;
nspace_declarations[nspace_declarations_count].nspace = NULL;
nspace_declarations_count++;
}
raptor_iostream_write_byte('<', iostr);
if(element->name->nspace && element->name->nspace->prefix_length > 0) {
raptor_iostream_counted_string_write((const char*)element->name->nspace->prefix,
element->name->nspace->prefix_length,
iostr);
raptor_iostream_write_byte(':', iostr);
}
raptor_iostream_counted_string_write((const char*)element->name->local_name,
element->name->local_name_length,
iostr);
/* declare namespaces and attributes */
if(nspace_declarations_count) {
int need_indent = 0;
/* sort them into the canonical order */
qsort((void*)nspace_declarations,
nspace_declarations_count, sizeof(struct nsd),
raptor_xml_writer_nsd_compare);
/* declare namespaces first */
for(i = 0; i < nspace_declarations_count; i++) {
if(!nspace_declarations[i].nspace)
continue;
if(auto_indent && need_indent) {
/* indent attributes */
raptor_xml_writer_newline(xml_writer);
xml_writer->depth++;
raptor_xml_writer_indent(xml_writer);
xml_writer->depth--;
}
raptor_iostream_write_byte(' ', iostr);
raptor_iostream_counted_string_write((const char*)nspace_declarations[i].declaration,
nspace_declarations[i].length,
iostr);
RAPTOR_FREE(char*, nspace_declarations[i].declaration);
nspace_declarations[i].declaration = NULL;
need_indent = 1;
if(raptor_namespace_stack_start_namespace(nstack,
(raptor_namespace*)nspace_declarations[i].nspace,
depth))
goto error;
}
/* declare attributes */
for(i = 0; i < nspace_declarations_count; i++) {
if(nspace_declarations[i].nspace)
continue;
if(auto_indent && need_indent) {
/* indent attributes */
raptor_xml_writer_newline(xml_writer);
xml_writer->depth++;
raptor_xml_writer_indent(xml_writer);
xml_writer->depth--;
}
raptor_iostream_write_byte(' ', iostr);
raptor_iostream_counted_string_write((const char*)nspace_declarations[i].declaration,
nspace_declarations[i].length,
iostr);
need_indent = 1;
RAPTOR_FREE(char*, nspace_declarations[i].declaration);
nspace_declarations[i].declaration = NULL;
}
}
if(!auto_empty)
raptor_iostream_write_byte('>', iostr);
if(nstack)
RAPTOR_FREE(stringarray, nspace_declarations);
return 0;
/* Clean up nspace_declarations on error */
error:
for(i = 0; i < nspace_declarations_count; i++) {
if(nspace_declarations[i].declaration)
RAPTOR_FREE(char*, nspace_declarations[i].declaration);
}
RAPTOR_FREE(stringarray, nspace_declarations);
return 1;
}
| 0
|
387,866
|
Method* InstanceKlass::method_with_orig_idnum(int idnum) {
if (idnum >= methods()->length()) {
return NULL;
}
Method* m = methods()->at(idnum);
if (m != NULL && m->orig_method_idnum() == idnum) {
return m;
}
// Obsolete method idnum does not match the original idnum
for (int index = 0; index < methods()->length(); ++index) {
m = methods()->at(index);
if (m->orig_method_idnum() == idnum) {
return m;
}
}
// None found, return null for the caller to handle.
return NULL;
}
| 0
|
343,314
|
void dopass(char *password)
{
static unsigned int tapping;
char *hd;
#if !defined(MINIMAL) && defined(HAVE_GETGROUPS) && defined(DISPLAY_GROUPS)
gid_t *groups = NULL;
int ngroups;
# if defined(NGROUPS_MAX) && NGROUPS_MAX > 0
int ngroups_max = NGROUPS_MAX; /* Use the compile time value */
# else
int ngroups_max = 1; /* use a sane default */
# endif
#endif
if (loggedin != 0) {
if (guest != 0) {
addreply_noformat(230, MSG_NO_PASSWORD_NEEDED);
#ifdef LOG_ANON_EMAIL
snprintf(account, sizeof account, "ftp: <%s> ", password);
#endif
} else {
addreply_noformat(530, MSG_CANT_DO_TWICE);
}
return;
}
if (*account == 0) {
addreply_noformat(530, MSG_WHOAREYOU);
return;
}
if (strlen(password) >= MAX_PASSWORD_LEN) {
addreply_noformat(530, MSG_LINE_TOO_LONG);
return;
}
authresult = pw_check(account, password, &ctrlconn, &peer);
pure_memzero(password, strlen(password));
if (authresult.auth_ok != 1) {
tapping++;
randomsleep(tapping);
addreply_noformat(530, MSG_AUTH_FAILED);
doreply();
if (tapping > MAX_PASSWD_TRIES) {
logfile(LOG_ERR, MSG_AUTH_TOOMANY);
_EXIT(EXIT_FAILURE);
}
logfile(LOG_WARNING, MSG_AUTH_FAILED_LOG, account);
return;
}
if (authresult.uid < useruid) {
logfile(LOG_WARNING, MSG_ACCOUNT_DISABLED " (uid < %lu)",
account, (unsigned long) useruid);
randomsleep(tapping);
if (tapping >= MAX_PASSWD_TRIES) {
addreply_noformat(530, MSG_AUTH_FAILED);
doreply();
_EXIT(EXIT_FAILURE);
}
addreply_noformat(530, MSG_NOTRUST);
doreply();
return;
}
#ifdef PER_USER_LIMITS
if (per_user_max > 0U && ftpwho_read_count(account) >= per_user_max) {
addreply(421, MSG_PERUSER_MAX, (unsigned long) per_user_max);
doreply();
_EXIT(1);
}
#endif
/* Add username and primary group to the uid/gid cache */
(void) getname(authresult.uid);
(void) getgroup(authresult.gid);
if (
#if defined(WITH_LDAP) || defined(WITH_MYSQL) || defined(WITH_PGSQL) || defined(WITH_PUREDB) || defined(WITH_EXTAUTH)
doinitsupgroups(NULL, authresult.uid, authresult.gid) != 0
#else
doinitsupgroups(account, (uid_t) -1, authresult.gid) != 0
#endif
) {
#if defined(WIN32) || defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__)
(void) 0;
#else
die(421, LOG_WARNING, MSG_NOTRUST);
#endif
}
/* handle /home/user/./public_html form */
if ((root_directory = strdup(authresult.dir)) == NULL) {
die_mem();
}
hd = strstr(root_directory, "/./");
if (hd != NULL) {
if (chrooted != 0) {
die(421, LOG_DEBUG, MSG_CANT_DO_TWICE);
}
if (create_home_and_chdir(root_directory)) {
die(421, LOG_ERR, MSG_NO_HOMEDIR);
}
*++hd = 0;
hd++;
if (chroot(root_directory) || chdir(hd)) {
die(421, LOG_ERR, MSG_NO_HOMEDIR);
}
chrooted = 1;
#ifdef RATIOS
if (ratio_for_non_anon == 0) {
ratio_upload = ratio_download = 0U;
}
if (check_trustedgroup(authresult.uid, authresult.gid) != 0) {
dot_write_ok = dot_read_ok = 1;
ratio_upload = ratio_download = 0U;
keepallfiles = 0;
}
#endif
} else {
(void) free(root_directory);
root_directory = (char *) "/";
if (create_home_and_chdir(authresult.dir)) {
die(421, LOG_ERR, MSG_NO_HOMEDIR);
}
}
if (getcwd(wd, sizeof wd - (size_t) 1U) == NULL) {
wd[0] = '/';
wd[1] = 0;
}
#ifndef NON_ROOT_FTP
if (setgid(authresult.gid) || setegid(authresult.gid)) {
_EXIT(EXIT_FAILURE);
}
#endif
if (check_trustedgroup(authresult.uid, authresult.gid) != 0) {
userchroot = 0;
dot_write_ok = dot_read_ok = 1;
keepallfiles = 0;
#ifdef RATIOS
ratio_upload = ratio_download = 0U;
#endif
#ifdef QUOTAS
user_quota_files = user_quota_size = ULONG_LONG_MAX;
#endif
}
#ifdef QUOTAS
if (hasquota() == 0) {
userchroot = 1;
}
#endif
if (loggedin == 0) {
candownload = 1; /* real users can always download */
}
#ifdef THROTTLING
if ((throttling == 2) || (guest != 0 && throttling == 1)) {
addreply_noformat(0, MSG_BANDWIDTH_RESTRICTED);
(void) nice(NICE_VALUE);
} else {
throttling_delay = throttling_bandwidth_dl =
throttling_bandwidth_ul = 0UL;
}
#endif
#if !defined(MINIMAL) && defined(HAVE_GETGROUPS) && defined(DISPLAY_GROUPS)
# ifdef SAFE_GETGROUPS_0
ngroups = getgroups(0, NULL);
if (ngroups > ngroups_max) {
ngroups_max = ngroups;
}
# elif defined(_SC_NGROUPS_MAX)
/* get the run time value */
ngroups = (int) sysconf(_SC_NGROUPS_MAX);
if (ngroups > ngroups_max) {
ngroups_max = ngroups;
}
# endif
if ((groups = malloc(sizeof(GETGROUPS_T) * ngroups_max)) == NULL) {
die_mem();
}
ngroups = getgroups(ngroups_max, groups);
if (guest == 0 && ngroups > 0) {
char reply[80 + MAX_USER_LENGTH];
const char *q;
size_t p;
if (SNCHECK(snprintf(reply, sizeof reply,
MSG_USER_GROUP_ACCESS ": ", account),
sizeof reply)) {
_EXIT(EXIT_FAILURE);
}
p = strlen(reply);
do {
ngroups--;
if ((ngroups != 0 && groups[ngroups] == groups[0]) ||
(q = getgroup(groups[ngroups])) == NULL) {
continue;
}
if (p + strlen(q) > 75) {
reply[p] = 0;
addreply(0, "%s", reply);
*reply = 0;
p = (size_t) 0U;
}
reply[p++] = ' ';
while (*q != 0 && p < sizeof reply - (size_t) 1U) {
reply[p++] = *q++;
}
} while (ngroups > 0);
reply[p] = 0;
addreply(0, "%s", reply);
}
free(groups);
#endif
if (guest == 0 && allowfxp == 1) {
addreply_noformat(0, MSG_FXP_SUPPORT);
}
#ifdef RATIOS
if (ratio_for_non_anon != 0 && ratio_upload > 0) {
addreply(0, MSG_RATIO, ratio_upload, ratio_download);
}
#endif
if (userchroot != 0 && chrooted == 0) {
if (chdir(wd) || chroot(wd)) { /* should never fail */
die(421, LOG_ERR, MSG_CHROOT_FAILED);
}
chrooted = 1;
#ifdef RATIOS
if (ratio_for_non_anon == 0) {
ratio_upload = ratio_download = 0U;
}
#endif
{
const size_t rd_len = strlen(wd) + sizeof "/";
if ((root_directory = malloc(rd_len)) == NULL) {
die_mem();
}
snprintf(root_directory, rd_len, "%s/", wd);
}
wd[0] = '/';
wd[1] = 0;
if (chdir(wd)) {
_EXIT(EXIT_FAILURE);
}
addreply(230, MSG_CURRENT_RESTRICTED_DIR_IS, wd);
} else {
addreply(230, MSG_CURRENT_DIR_IS, wd);
}
#ifndef NON_ROOT_FTP
disablesignals();
# ifndef WITHOUT_PRIVSEP
if (setuid(authresult.uid) != 0 || seteuid(authresult.uid) != 0) {
_EXIT(EXIT_FAILURE);
}
# else
if (seteuid(authresult.uid) != 0) {
_EXIT(EXIT_FAILURE);
}
# ifdef USE_CAPABILITIES
drop_login_caps();
# endif
# endif
enablesignals();
#endif
logfile(LOG_INFO, MSG_IS_NOW_LOGGED_IN, account);
#ifdef FTPWHO
if (shm_data_cur != NULL) {
ftpwho_lock();
strncpy(shm_data_cur->account, account,
sizeof shm_data_cur->account - (size_t) 1U);
shm_data_cur->account[sizeof shm_data_cur->account - 1U] = 0;
ftpwho_unlock();
state_needs_update = 1;
}
#endif
loggedin = 1;
if (getcwd(wd, sizeof wd - (size_t) 1U) == NULL) {
wd[0] = '/';
wd[1] = 0;
}
#ifndef MINIMAL
dobanner(0);
#endif
#ifdef QUOTAS
displayquota(NULL);
#endif
#ifdef WITH_BONJOUR
refreshManager();
#endif
}
| 0
|
384,823
|
f_browsedir(typval_T *argvars UNUSED, typval_T *rettv)
{
# ifdef FEAT_BROWSE
char_u *title;
char_u *initdir;
char_u buf[NUMBUFLEN];
if (in_vim9script()
&& (check_for_string_arg(argvars, 0) == FAIL
|| check_for_string_arg(argvars, 1) == FAIL))
return;
title = tv_get_string_chk(&argvars[0]);
initdir = tv_get_string_buf_chk(&argvars[1], buf);
if (title == NULL || initdir == NULL)
rettv->vval.v_string = NULL;
else
rettv->vval.v_string = do_browse(BROWSE_DIR,
title, NULL, NULL, initdir, NULL, curbuf);
# else
rettv->vval.v_string = NULL;
# endif
rettv->v_type = VAR_STRING;
}
| 0
|
333,091
|
report_state(char *action,
regsub_T *sub,
nfa_state_T *state,
int lid,
nfa_pim_T *pim)
{
int col;
if (sub->in_use <= 0)
col = -1;
else if (REG_MULTI)
col = sub->list.multi[0].start_col;
else
col = (int)(sub->list.line[0].start - rex.line);
nfa_set_code(state->c);
fprintf(log_fd, "> %s state %d to list %d. char %d: %s (start col %d)%s\n",
action, abs(state->id), lid, state->c, code, col,
pim_info(pim));
}
| 0
|
236,207
|
void dims_box_del(GF_Box *s)
{
gf_isom_sample_entry_predestroy((GF_SampleEntryBox *)s);
gf_free(s);
}
| 0
|
430,344
|
struct hlist_node *seq_hlist_start(struct hlist_head *head, loff_t pos)
{
struct hlist_node *node;
hlist_for_each(node, head)
if (pos-- == 0)
return node;
return NULL;
}
| 0
|
261,885
|
njs_string_hex(njs_vm_t *vm, njs_value_t *value, const njs_str_t *src)
{
size_t length;
njs_str_t dst;
length = njs_encode_hex_length(src, &dst.length);
dst.start = njs_string_alloc(vm, value, dst.length, length);
if (njs_fast_path(dst.start != NULL)) {
njs_encode_hex(&dst, src);
return NJS_OK;
}
return NJS_ERROR;
}
| 0
|
294,544
|
rt__valid_commercial_p(VALUE y, VALUE w, VALUE d, VALUE sg)
{
VALUE nth, rjd2;
int ry, rw, rd, rjd, ns;
if (!valid_commercial_p(y, NUM2INT(w), NUM2INT(d), NUM2DBL(sg),
&nth, &ry,
&rw, &rd, &rjd,
&ns))
return Qnil;
encode_jd(nth, rjd, &rjd2);
return rjd2;
}
| 0
|
220,407
|
mrb_ary_splat(mrb_state *mrb, mrb_value v)
{
mrb_value ary;
struct RArray *a;
if (mrb_array_p(v)) {
a = ary_dup(mrb, mrb_ary_ptr(v));
return mrb_obj_value(a);
}
if (!mrb_respond_to(mrb, v, MRB_SYM(to_a))) {
return mrb_ary_new_from_values(mrb, 1, &v);
}
ary = mrb_funcall_id(mrb, v, MRB_SYM(to_a), 0);
if (mrb_nil_p(ary)) {
return mrb_ary_new_from_values(mrb, 1, &v);
}
mrb_ensure_array_type(mrb, ary);
a = mrb_ary_ptr(ary);
a = ary_dup(mrb, a);
return mrb_obj_value(a);
}
| 0
|
222,528
|
string FunctionLibraryRuntime::ExecutorType(const InstantiateOptions& options,
AttrSlice attrs) {
if (!options.executor_type.empty()) {
return options.executor_type;
} else if (const AttrValue* executor_attr = attrs.Find(kExecutorAttr)) {
return executor_attr->s();
} else {
return string();
}
}
| 0
|
301,501
|
suggest_try_soundalike_prep(void)
{
langp_T *lp;
int lpi;
slang_T *slang;
// Do this for all languages that support sound folding and for which a
// .sug file has been loaded.
for (lpi = 0; lpi < curwin->w_s->b_langp.ga_len; ++lpi)
{
lp = LANGP_ENTRY(curwin->w_s->b_langp, lpi);
slang = lp->lp_slang;
if (slang->sl_sal.ga_len > 0 && slang->sl_sbyts != NULL)
// prepare the hashtable used by add_sound_suggest()
hash_init(&slang->sl_sounddone);
}
}
| 0
|
256,394
|
static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter)
{
struct bio_vec *bvec;
struct bvec_iter_all iter_all;
bio_for_each_segment_all(bvec, bio, iter_all) {
ssize_t ret;
ret = copy_page_to_iter(bvec->bv_page,
bvec->bv_offset,
bvec->bv_len,
&iter);
if (!iov_iter_count(&iter))
break;
if (ret < bvec->bv_len)
return -EFAULT;
}
return 0;
}
| 0
|
301,411
|
static int vfswrap_stat(vfs_handle_struct *handle,
struct smb_filename *smb_fname)
{
int result = -1;
START_PROFILE(syscall_stat);
if (smb_fname->stream_name) {
errno = ENOENT;
goto out;
}
result = sys_stat(smb_fname->base_name, &smb_fname->st,
lp_fake_dir_create_times(SNUM(handle->conn)));
out:
END_PROFILE(syscall_stat);
return result;
}
| 0
|
234,133
|
get_line_filename_and_dirname (dwarf_vma line_offset,
dwarf_vma fileidx,
unsigned char **dir_name)
{
struct dwarf_section *section = &debug_displays [line].section;
unsigned char *hdrptr, *dirtable, *file_name;
unsigned int offset_size;
unsigned int version, opcode_base;
dwarf_vma length, diridx;
const unsigned char * end;
*dir_name = NULL;
if (section->start == NULL
|| line_offset >= section->size
|| fileidx == 0)
return NULL;
hdrptr = section->start + line_offset;
end = section->start + section->size;
SAFE_BYTE_GET_AND_INC (length, hdrptr, 4, end);
if (length == 0xffffffff)
{
/* This section is 64-bit DWARF 3. */
SAFE_BYTE_GET_AND_INC (length, hdrptr, 8, end);
offset_size = 8;
}
else
offset_size = 4;
if (length > (size_t) (end - hdrptr)
|| length < 2 + offset_size + 1 + 3 + 1)
return NULL;
end = hdrptr + length;
SAFE_BYTE_GET_AND_INC (version, hdrptr, 2, end);
if (version != 2 && version != 3 && version != 4)
return NULL;
hdrptr += offset_size + 1;/* Skip prologue_length and min_insn_length. */
if (version >= 4)
hdrptr++; /* Skip max_ops_per_insn. */
hdrptr += 3; /* Skip default_is_stmt, line_base, line_range. */
SAFE_BYTE_GET_AND_INC (opcode_base, hdrptr, 1, end);
if (opcode_base == 0
|| opcode_base - 1 >= (size_t) (end - hdrptr))
return NULL;
hdrptr += opcode_base - 1;
dirtable = hdrptr;
/* Skip over dirname table. */
while (*hdrptr != '\0')
{
hdrptr += strnlen ((char *) hdrptr, end - hdrptr);
if (hdrptr < end)
hdrptr++;
if (hdrptr >= end)
return NULL;
}
hdrptr++; /* Skip the NUL at the end of the table. */
/* Now skip over preceding filename table entries. */
for (; hdrptr < end && *hdrptr != '\0' && fileidx > 1; fileidx--)
{
hdrptr += strnlen ((char *) hdrptr, end - hdrptr);
if (hdrptr < end)
hdrptr++;
SKIP_ULEB (hdrptr, end);
SKIP_ULEB (hdrptr, end);
SKIP_ULEB (hdrptr, end);
}
if (hdrptr >= end || *hdrptr == '\0')
return NULL;
file_name = hdrptr;
hdrptr += strnlen ((char *) hdrptr, end - hdrptr);
if (hdrptr < end)
hdrptr++;
if (hdrptr >= end)
return NULL;
READ_ULEB (diridx, hdrptr, end);
if (diridx == 0)
return file_name;
for (; dirtable < end && *dirtable != '\0' && diridx > 1; diridx--)
{
dirtable += strnlen ((char *) dirtable, end - dirtable);
if (dirtable < end)
dirtable++;
}
if (dirtable >= end || *dirtable == '\0')
return NULL;
*dir_name = dirtable;
return file_name;
}
| 0
|
381,857
|
static void udf_update_extent_cache(struct inode *inode, loff_t estart,
struct extent_position *pos)
{
struct udf_inode_info *iinfo = UDF_I(inode);
spin_lock(&iinfo->i_extent_cache_lock);
/* Invalidate previously cached extent */
__udf_clear_extent_cache(inode);
if (pos->bh)
get_bh(pos->bh);
memcpy(&iinfo->cached_extent.epos, pos, sizeof(*pos));
iinfo->cached_extent.lstart = estart;
switch (iinfo->i_alloc_type) {
case ICBTAG_FLAG_AD_SHORT:
iinfo->cached_extent.epos.offset -= sizeof(struct short_ad);
break;
case ICBTAG_FLAG_AD_LONG:
iinfo->cached_extent.epos.offset -= sizeof(struct long_ad);
break;
}
spin_unlock(&iinfo->i_extent_cache_lock);
}
| 0
|
386,556
|
void DL_Dxf::writeArc(DL_WriterA& dw,
const DL_ArcData& data,
const DL_Attributes& attrib) {
dw.entity("ARC");
if (version==DL_VERSION_2000) {
dw.dxfString(100, "AcDbEntity");
}
dw.entityAttributes(attrib);
if (version==DL_VERSION_2000) {
dw.dxfString(100, "AcDbCircle");
}
dw.coord(10, data.cx, data.cy, data.cz);
dw.dxfReal(40, data.radius);
if (version==DL_VERSION_2000) {
dw.dxfString(100, "AcDbArc");
}
dw.dxfReal(50, data.angle1);
dw.dxfReal(51, data.angle2);
}
| 0
|
222,900
|
static int64_t Unknown() { return -1; }
| 0
|
263,512
|
static int sco_sock_release(struct socket *sock)
{
struct sock *sk = sock->sk;
int err = 0;
BT_DBG("sock %p, sk %p", sock, sk);
if (!sk)
return 0;
sco_sock_close(sk);
if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime &&
!(current->flags & PF_EXITING)) {
lock_sock(sk);
err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime);
release_sock(sk);
}
sock_orphan(sk);
sco_sock_kill(sk);
return err;
}
| 0
|
222,568
|
string FunctionLibraryDefinition::FindGradient(const string& func) const {
tf_shared_lock l(mu_);
return gtl::FindWithDefault(func_grad_, func, "");
}
| 0
|
359,631
|
DEFUN (clear_ip_bgp_peer_ipv4_in_prefix_filter,
clear_ip_bgp_peer_ipv4_in_prefix_filter_cmd,
"clear ip bgp A.B.C.D ipv4 (unicast|multicast) in prefix-filter",
CLEAR_STR
IP_STR
BGP_STR
"BGP neighbor address to clear\n"
"Address family\n"
"Address Family modifier\n"
"Address Family modifier\n"
"Soft reconfig inbound update\n"
"Push out the existing ORF prefix-list\n")
{
if (strncmp (argv[1], "m", 1) == 0)
return bgp_clear_vty (vty, NULL, AFI_IP, SAFI_MULTICAST, clear_peer,
BGP_CLEAR_SOFT_IN_ORF_PREFIX, argv[0]);
return bgp_clear_vty (vty, NULL, AFI_IP, SAFI_UNICAST, clear_peer,
BGP_CLEAR_SOFT_IN_ORF_PREFIX, argv[0]);
}
| 0
|
512,947
|
double val_real_from_item(Item *item)
{
DBUG_ASSERT(is_fixed());
double value= item->val_real();
null_value= item->null_value;
return value;
}
| 0
|
359,651
|
DEFUN (clear_bgp_all_soft_in,
clear_bgp_all_soft_in_cmd,
"clear bgp * soft in",
CLEAR_STR
BGP_STR
"Clear all peers\n"
"Soft reconfig\n"
"Soft reconfig inbound update\n")
{
if (argc == 1)
return bgp_clear_vty (vty, argv[0], AFI_IP6, SAFI_UNICAST, clear_all,
BGP_CLEAR_SOFT_IN, NULL);
return bgp_clear_vty (vty, NULL, AFI_IP6, SAFI_UNICAST, clear_all,
BGP_CLEAR_SOFT_IN, NULL);
}
| 0
|
225,765
|
GF_Err moov_on_child_box(GF_Box *s, GF_Box *a, Bool is_rem)
{
GF_MovieBox *ptr = (GF_MovieBox *)s;
switch (a->type) {
case GF_ISOM_BOX_TYPE_IODS:
BOX_FIELD_ASSIGN(iods, GF_ObjectDescriptorBox)
//if no IOD, delete the box
if (ptr->iods && !ptr->iods->descriptor) {
ptr->iods = NULL;
gf_isom_box_del_parent(&s->child_boxes, a);
}
return GF_OK;
case GF_ISOM_BOX_TYPE_MVHD:
BOX_FIELD_ASSIGN(mvhd, GF_MovieHeaderBox)
return GF_OK;
case GF_ISOM_BOX_TYPE_UDTA:
BOX_FIELD_ASSIGN(udta, GF_UserDataBox)
return GF_OK;
#ifndef GPAC_DISABLE_ISOM_FRAGMENTS
case GF_ISOM_BOX_TYPE_MVEX:
BOX_FIELD_ASSIGN(mvex, GF_MovieExtendsBox)
if (ptr->mvex)
ptr->mvex->mov = ptr->mov;
return GF_OK;
#endif
case GF_ISOM_BOX_TYPE_META:
BOX_FIELD_ASSIGN(meta, GF_MetaBox)
return GF_OK;
case GF_ISOM_BOX_TYPE_TRAK:
if (is_rem) {
gf_list_del_item(ptr->trackList, a);
return GF_OK;
}
{
GF_TrackBox *tk = (GF_TrackBox *)a;
//set our pointer to this obj
tk->moov = ptr;
tk->index = gf_list_count(ptr->trackList);
if (tk->References) {
GF_TrackReferenceTypeBox *dpnd=NULL;
Track_FindRef(tk, GF_ISOM_REF_BASE, &dpnd);
if (dpnd)
tk->nb_base_refs = dpnd->trackIDCount;
}
}
return gf_list_add(ptr->trackList, a);
}
return GF_OK;
}
| 0
|
447,047
|
byte* FileIo::mmap(bool isWriteable)
{
assert(p_->fp_ != 0);
if (munmap() != 0) {
#ifdef EXV_UNICODE_PATH
if (p_->wpMode_ == Impl::wpUnicode) {
throw WError(2, wpath(), strError().c_str(), "munmap");
}
else
#endif
{
throw Error(2, path(), strError(), "munmap");
}
}
p_->mappedLength_ = size();
p_->isWriteable_ = isWriteable;
if (p_->isWriteable_ && p_->switchMode(Impl::opWrite) != 0) {
#ifdef EXV_UNICODE_PATH
if (p_->wpMode_ == Impl::wpUnicode) {
throw WError(16, wpath(), strError().c_str());
}
else
#endif
{
throw Error(16, path(), strError());
}
}
#if defined EXV_HAVE_MMAP && defined EXV_HAVE_MUNMAP
int prot = PROT_READ;
if (p_->isWriteable_) {
prot |= PROT_WRITE;
}
void* rc = ::mmap(0, p_->mappedLength_, prot, MAP_SHARED, fileno(p_->fp_), 0);
if (MAP_FAILED == rc) {
#ifdef EXV_UNICODE_PATH
if (p_->wpMode_ == Impl::wpUnicode) {
throw WError(2, wpath(), strError().c_str(), "mmap");
}
else
#endif
{
throw Error(2, path(), strError(), "mmap");
}
}
p_->pMappedArea_ = static_cast<byte*>(rc);
#elif defined WIN32 && !defined __CYGWIN__
// Windows implementation
// TODO: An attempt to map a file with a length of 0 (zero) fails with
// an error code of ERROR_FILE_INVALID.
// Applications should test for files with a length of 0 (zero) and
// reject those files.
DWORD dwAccess = FILE_MAP_READ;
DWORD flProtect = PAGE_READONLY;
if (isWriteable) {
dwAccess = FILE_MAP_WRITE;
flProtect = PAGE_READWRITE;
}
HANDLE hPh = GetCurrentProcess();
HANDLE hFd = (HANDLE)_get_osfhandle(fileno(p_->fp_));
if (hFd == INVALID_HANDLE_VALUE) {
#ifdef EXV_UNICODE_PATH
if (p_->wpMode_ == Impl::wpUnicode) {
throw WError(2, wpath(), "MSG1", "_get_osfhandle");
}
else
#endif
{
throw Error(2, path(), "MSG1", "_get_osfhandle");
}
}
if (!DuplicateHandle(hPh, hFd, hPh, &p_->hFile_, 0, false, DUPLICATE_SAME_ACCESS)) {
#ifdef EXV_UNICODE_PATH
if (p_->wpMode_ == Impl::wpUnicode) {
throw WError(2, wpath(), "MSG2", "DuplicateHandle");
}
else
#endif
{
throw Error(2, path(), "MSG2", "DuplicateHandle");
}
}
p_->hMap_ = CreateFileMapping(p_->hFile_, 0, flProtect, 0, (DWORD) p_->mappedLength_, 0);
if (p_->hMap_ == 0 ) {
#ifdef EXV_UNICODE_PATH
if (p_->wpMode_ == Impl::wpUnicode) {
throw WError(2, wpath(), "MSG3", "CreateFileMapping");
}
else
#endif
{
throw Error(2, path(), "MSG3", "CreateFileMapping");
}
}
void* rc = MapViewOfFile(p_->hMap_, dwAccess, 0, 0, 0);
if (rc == 0) {
#ifdef EXV_UNICODE_PATH
if (p_->wpMode_ == Impl::wpUnicode) {
throw WError(2, wpath(), "MSG4", "CreateFileMapping");
}
else
#endif
{
throw Error(2, path(), "MSG4", "CreateFileMapping");
}
}
p_->pMappedArea_ = static_cast<byte*>(rc);
#else
// Workaround for platforms without mmap: Read the file into memory
DataBuf buf(static_cast<long>(p_->mappedLength_));
if (read(buf.pData_, buf.size_) != buf.size_) {
#ifdef EXV_UNICODE_PATH
if (p_->wpMode_ == Impl::wpUnicode) {
throw WError(2, wpath(), strError().c_str(), "FileIo::read");
}
else
#endif
{
throw Error(2, path(), strError(), "FileIo::read");
}
}
if (error()) {
#ifdef EXV_UNICODE_PATH
if (p_->wpMode_ == Impl::wpUnicode) {
throw WError(2, wpath(), strError().c_str(), "FileIo::mmap");
}
else
#endif
{
throw Error(2, path(), strError(), "FileIo::mmap");
}
}
p_->pMappedArea_ = buf.release().first;
p_->isMalloced_ = true;
#endif
return p_->pMappedArea_;
}
| 0
|
243,003
|
int mbedtls_ssl_check_pending( const mbedtls_ssl_context *ssl )
{
/*
* Case A: We're currently holding back
* a message for further processing.
*/
if( ssl->keep_current_message == 1 )
{
MBEDTLS_SSL_DEBUG_MSG( 3, ( "ssl_check_pending: record held back for processing" ) );
return( 1 );
}
/*
* Case B: Further records are pending in the current datagram.
*/
#if defined(MBEDTLS_SSL_PROTO_DTLS)
if( ssl->conf->transport == MBEDTLS_SSL_TRANSPORT_DATAGRAM &&
ssl->in_left > ssl->next_record_offset )
{
MBEDTLS_SSL_DEBUG_MSG( 3, ( "ssl_check_pending: more records within current datagram" ) );
return( 1 );
}
#endif /* MBEDTLS_SSL_PROTO_DTLS */
/*
* Case C: A handshake message is being processed.
*/
if( ssl->in_hslen > 0 && ssl->in_hslen < ssl->in_msglen )
{
MBEDTLS_SSL_DEBUG_MSG( 3, ( "ssl_check_pending: more handshake messages within current record" ) );
return( 1 );
}
/*
* Case D: An application data message is being processed
*/
if( ssl->in_offt != NULL )
{
MBEDTLS_SSL_DEBUG_MSG( 3, ( "ssl_check_pending: application data record is being processed" ) );
return( 1 );
}
/*
* In all other cases, the rest of the message can be dropped.
* As in ssl_get_next_record, this needs to be adapted if
* we implement support for multiple alerts in single records.
*/
MBEDTLS_SSL_DEBUG_MSG( 3, ( "ssl_check_pending: nothing pending" ) );
return( 0 );
}
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.