idx
int64
func
string
target
int64
246,445
static RPVector *parse_sub_section_vec(RBinWasmObj *bin, RBinWasmSection *sec) { RPVector **cache = NULL; RPVectorFree pfree = (RPVectorFree)free; ParseEntryFcn parser; switch (sec->id) { case R_BIN_WASM_SECTION_TYPE: parser = (ParseEntryFcn)parse_type_entry; pfree = (RPVectorFree)free_type_entry; cache = &bin->g_types; break; case R_BIN_WASM_SECTION_IMPORT: parser = (ParseEntryFcn)parse_import_entry; pfree = (RPVectorFree)free_import_entry; cache = &bin->g_imports; break; case R_BIN_WASM_SECTION_FUNCTION: parser = (ParseEntryFcn)parse_function_entry; cache = &bin->g_funcs; break; case R_BIN_WASM_SECTION_TABLE: parser = (ParseEntryFcn)parse_table_entry; cache = &bin->g_tables; break; case R_BIN_WASM_SECTION_MEMORY: parser = (ParseEntryFcn)parse_memory_entry; cache = &bin->g_memories; break; case R_BIN_WASM_SECTION_GLOBAL: parser = (ParseEntryFcn)parse_global_entry; cache = &bin->g_globals; break; case R_BIN_WASM_SECTION_EXPORT: parser = (ParseEntryFcn)parse_export_entry; pfree = (RPVectorFree)free_export_entry; cache = &bin->g_exports; break; case R_BIN_WASM_SECTION_ELEMENT: parser = (ParseEntryFcn)parse_element_entry; cache = &bin->g_elements; break; case R_BIN_WASM_SECTION_CODE: parser = (ParseEntryFcn)parse_code_entry; pfree = (RPVectorFree)free_code_entry; cache = &bin->g_codes; break; case R_BIN_WASM_SECTION_DATA: parser = (ParseEntryFcn)parse_data_entry; cache = &bin->g_datas; break; default: return NULL; } RBuffer *buf = bin->buf; ut64 offset = sec->payload_data; ut64 len = sec->payload_len; ut64 bound = offset + len - 1; if (bound >= r_buf_size (buf)) { r_warn_if_reached (); // section parsing should prevent this eprintf ("[wasm] End of %s section data is beyond file end\n", sec->name); return NULL; } if (r_buf_seek (buf, offset, R_BUF_SET) != offset) { return NULL; } *cache = parse_vec (bin, bound, parser, pfree); return *cache; }
0
512,480
Item** addr(uint i) { return arg_count ? args + i : NULL; }
0
244,303
GF_Err fdpa_box_size(GF_Box *s) { u32 i; GF_FDpacketBox *ptr = (GF_FDpacketBox *)s; ptr->size += 5; for (i=0; i<ptr->header_ext_count; i++) { ptr->size += 1; if (ptr->headers[i].header_extension_type > 127) { ptr->size += 3; } else { ptr->size += 1 + ptr->headers[i].data_length; } } return GF_OK; }
0
441,826
SProcXkbSetIndicatorMap(ClientPtr client) { REQUEST(xkbSetIndicatorMapReq); swaps(&stuff->length); REQUEST_AT_LEAST_SIZE(xkbSetIndicatorMapReq); swaps(&stuff->deviceSpec); swapl(&stuff->which); return ProcXkbSetIndicatorMap(client); }
0
389,674
tv_get_bool(typval_T *varp) { return tv_get_bool_or_number_chk(varp, NULL, TRUE); }
0
240,595
void DoCompute(OpKernelContext* c) { core::RefCountPtr<Var> v; OP_REQUIRES_OK(c, LookupResource(c, HandleFromInput(c, 0), &v)); Tensor* params = v->tensor(); const Tensor& indices = c->input(1); const Tensor& updates = c->input(2); // Check that rank(updates.shape) = rank(indices.shape + params.shape[1:]) OP_REQUIRES(c, updates.dims() == 0 || updates.dims() == indices.dims() + params->dims() - 1, errors::InvalidArgument( "Must have updates.shape = indices.shape + " "params.shape[1:] or updates.shape = [], got ", "updates.shape ", updates.shape().DebugString(), ", indices.shape ", indices.shape().DebugString(), ", params.shape ", params->shape().DebugString())); // Check that we have enough index space const int64_t N_big = indices.NumElements(); OP_REQUIRES( c, N_big <= std::numeric_limits<Index>::max(), errors::InvalidArgument("indices has too many elements for ", DataTypeString(DataTypeToEnum<Index>::v()), " indexing: ", N_big, " > ", std::numeric_limits<Index>::max())); const Index N = static_cast<Index>(N_big); OP_REQUIRES( c, params->dim_size(0) <= std::numeric_limits<Index>::max(), errors::InvalidArgument("params.shape[0] too large for ", DataTypeString(DataTypeToEnum<Index>::v()), " indexing: ", params->dim_size(0), " > ", std::numeric_limits<Index>::max())); // Prevent division by 0 if (isCPUDevice<Device>() && op == tensorflow::scatter_op::UpdateOp::DIV) { OP_REQUIRES(c, ValidateInput<T>(updates), errors::InvalidArgument("updates must not contain 0")); } if (N > 0) { auto indices_flat = indices.flat<Index>(); auto params_flat = params->flat_outer_dims<T>(); if (TensorShapeUtils::IsScalar(updates.shape())) { const auto update = updates.scalar<T>(); functor::ScatterScalarFunctor<Device, T, Index, op> functor; const Index bad_i = functor(c, c->template eigen_device<Device>(), params_flat, update, indices_flat); OP_REQUIRES(c, bad_i < 0, errors::InvalidArgument( "indices", SliceDebugString(indices.shape(), bad_i), " = ", indices_flat(bad_i), " is not in [0, ", params->dim_size(0), ")")); } else { int64_t num_updates = updates.NumElements(); OP_REQUIRES( c, TensorShapeUtils::StartsWith(updates.shape(), indices.shape()), errors::InvalidArgument( "The shape of indices (", indices.shape().DebugString(), ") must be a prefix of the shape of updates (", updates.shape().DebugString(), ")")); auto updates_flat = updates.shaped<T, 2>({N, num_updates / N}); functor::ScatterFunctor<Device, T, Index, op> functor; const Index bad_i = functor(c, c->template eigen_device<Device>(), params_flat, updates_flat, indices_flat); OP_REQUIRES(c, bad_i < 0, errors::InvalidArgument( "indices", SliceDebugString(indices.shape(), bad_i), " = ", indices_flat(bad_i), " is not in [0, ", params->dim_size(0), ")")); } } }
0
401,526
static __init void timer_base_init_expiry_lock(struct timer_base *base) { spin_lock_init(&base->expiry_lock); }
0
463,118
static int annotate_state_set_scope(annotate_state_t *state, const mbentry_t *mbentry, struct mailbox *mailbox, unsigned int uid) { int r = 0; annotate_db_t *oldd = NULL; int oldwhich = state->which; init_internal(); /* Carefully preserve the reference on the old DB just in case it * turns out to be the same as the new DB, so we avoid the overhead * of an unnecessary cyrusdb_open/close pair. */ oldd = state->d; state->d = NULL; annotate_state_unset_scope(state); if (mbentry) { assert(!mailbox); assert(!uid); if (!mbentry->server) { /* local mailbox */ r = mailbox_open_iwl(mbentry->name, &mailbox); if (r) goto out; state->ourmailbox = mailbox; } state->mbentry = mbentry; state->which = ANNOTATION_SCOPE_MAILBOX; } else if (uid) { assert(mailbox); state->which = ANNOTATION_SCOPE_MESSAGE; } else if (mailbox) { assert(!uid); state->which = ANNOTATION_SCOPE_MAILBOX; } else { assert(!mailbox); assert(!uid); state->which = ANNOTATION_SCOPE_SERVER; } assert(oldwhich == ANNOTATION_SCOPE_UNKNOWN || oldwhich == state->which); state->mailbox = mailbox; state->uid = uid; r = _annotate_getdb(mailbox ? mailbox->name : NULL, uid, CYRUSDB_CREATE, &state->d); out: annotate_putdb(&oldd); return r; }
0
405,336
static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x, int tos, int oif, xfrm_address_t *prev_saddr, xfrm_address_t *prev_daddr, int family, u32 mark) { struct net *net = xs_net(x); xfrm_address_t *saddr = &x->props.saddr; xfrm_address_t *daddr = &x->id.daddr; struct dst_entry *dst; if (x->type->flags & XFRM_TYPE_LOCAL_COADDR) { saddr = x->coaddr; daddr = prev_daddr; } if (x->type->flags & XFRM_TYPE_REMOTE_COADDR) { saddr = prev_saddr; daddr = x->coaddr; } dst = __xfrm_dst_lookup(net, tos, oif, saddr, daddr, family, mark); if (!IS_ERR(dst)) { if (prev_saddr != saddr) memcpy(prev_saddr, saddr, sizeof(*prev_saddr)); if (prev_daddr != daddr) memcpy(prev_daddr, daddr, sizeof(*prev_daddr)); } return dst; }
0
220,466
XlaCompiler::Options GenerateCompilerOptions( const XlaCompilationCache& cache, const FunctionLibraryRuntime& function_library, DeviceBase* device, se::Stream* stream, const XlaPlatformInfo& platform_info, bool has_ref_vars) { XlaCompiler::Options options; options.client = static_cast<xla::LocalClient*>(cache.client()); if (stream != nullptr) { options.device_ordinal = stream->parent()->device_ordinal(); } options.device_type = cache.device_type(); options.flib_def = function_library.GetFunctionLibraryDefinition(); options.graph_def_version = function_library.graph_def_version(); options.allow_cpu_custom_calls = (platform_info.platform_id() == se::host::kHostPlatformId); options.device_allocator = GetAllocator(device, stream, platform_info); if (platform_info.xla_device_metadata()) { options.shape_determination_fns = platform_info.xla_device_metadata()->default_shape_determination_fns(); } // If reference variables are not present in the graph, we can safely alias // passthrough parameters without performing a copy. options.alias_passthrough_params = !has_ref_vars && !platform_info.is_on_xla_device(); return options; }
0
385,851
int finish_open(struct file *file, struct dentry *dentry, int (*open)(struct inode *, struct file *), int *opened) { int error; BUG_ON(*opened & FILE_OPENED); /* once it's opened, it's opened */ file->f_path.dentry = dentry; error = do_dentry_open(file, open, current_cred()); if (!error) *opened |= FILE_OPENED; return error; }
0
198,010
static int string_scan_range(RList *list, RBinFile *bf, int min, const ut64 from, const ut64 to, int type, int raw, RBinSection *section) { RBin *bin = bf->rbin; ut8 tmp[R_STRING_SCAN_BUFFER_SIZE]; ut64 str_start, needle = from; int count = 0, i, rc, runes; int str_type = R_STRING_TYPE_DETECT; // if list is null it means its gonna dump r_return_val_if_fail (bf, -1); if (type == -1) { type = R_STRING_TYPE_DETECT; } if (from == to) { return 0; } if (from > to) { eprintf ("Invalid range to find strings 0x%"PFMT64x" .. 0x%"PFMT64x"\n", from, to); return -1; } st64 len = (st64)(to - from); if (len < 1 || len > ST32_MAX) { eprintf ("String scan range is invalid (%"PFMT64d" bytes)\n", len); return -1; } ut8 *buf = calloc (len, 1); if (!buf || !min) { free (buf); return -1; } st64 vdelta = 0, pdelta = 0; RBinSection *s = NULL; bool ascii_only = false; PJ *pj = NULL; if (bf->strmode == R_MODE_JSON && !list) { pj = pj_new (); if (pj) { pj_a (pj); } } r_buf_read_at (bf->buf, from, buf, len); char *charset = r_sys_getenv ("RABIN2_CHARSET"); if (!R_STR_ISEMPTY (charset)) { RCharset *ch = r_charset_new (); if (r_charset_use (ch, charset)) { int outlen = len * 4; ut8 *out = calloc (len, 4); if (out) { int res = r_charset_encode_str (ch, out, outlen, buf, len); int i; // TODO unknown chars should be translated to null bytes for (i = 0; i < res; i++) { if (out[i] == '?') { out[i] = 0; } } len = res; free (buf); buf = out; } else { eprintf ("Cannot allocate\n"); } } else { eprintf ("Invalid value for RABIN2_CHARSET.\n"); } r_charset_free (ch); } free (charset); RConsIsBreaked is_breaked = (bin && bin->consb.is_breaked)? bin->consb.is_breaked: NULL; // may oobread while (needle < to) { if (is_breaked && is_breaked ()) { break; } // smol optimization if (needle + 4 < to) { ut32 n1 = r_read_le32 (buf + needle - from); if (!n1) { needle += 4; continue; } } rc = r_utf8_decode (buf + needle - from, to - needle, NULL); if (!rc) { needle++; continue; } bool addr_aligned = !(needle % 4); if (type == R_STRING_TYPE_DETECT) { char *w = (char *)buf + needle + rc - from; if (((to - needle) > 8 + rc)) { // TODO: support le and be bool is_wide32le = (needle + rc + 2 < to) && (!w[0] && !w[1] && !w[2] && w[3] && !w[4]); // reduce false positives if (is_wide32le) { if (!w[5] && !w[6] && w[7] && w[8]) { is_wide32le = false; } } if (!addr_aligned) { is_wide32le = false; } ///is_wide32be &= (n1 < 0xff && n11 < 0xff); // false; // n11 < 0xff; if (is_wide32le && addr_aligned) { str_type = R_STRING_TYPE_WIDE32; // asume big endian,is there little endian w32? } else { // bool is_wide = (n1 && n2 && n1 < 0xff && (!n2 || n2 < 0xff)); bool is_wide = needle + rc + 4 < to && !w[0] && w[1] && !w[2] && w[3] && !w[4]; str_type = is_wide? R_STRING_TYPE_WIDE: R_STRING_TYPE_ASCII; } } else { if (rc > 1) { str_type = R_STRING_TYPE_UTF8; // could be charset if set :? } else { str_type = R_STRING_TYPE_ASCII; } } } else if (type == R_STRING_TYPE_UTF8) { str_type = R_STRING_TYPE_ASCII; // initial assumption } else { str_type = type; } runes = 0; str_start = needle; /* Eat a whole C string */ for (i = 0; i < sizeof (tmp) - 4 && needle < to; i += rc) { RRune r = {0}; if (str_type == R_STRING_TYPE_WIDE32) { rc = r_utf32le_decode (buf + needle - from, to - needle, &r); if (rc) { rc = 4; } } else if (str_type == R_STRING_TYPE_WIDE) { rc = r_utf16le_decode (buf + needle - from, to - needle, &r); if (rc == 1) { rc = 2; } } else { rc = r_utf8_decode (buf + needle - from, to - needle, &r); if (rc > 1) { str_type = R_STRING_TYPE_UTF8; } } /* Invalid sequence detected */ if (!rc || (ascii_only && r > 0x7f)) { needle++; break; } needle += rc; if (r_isprint (r) && r != '\\') { if (str_type == R_STRING_TYPE_WIDE32) { if (r == 0xff) { r = 0; } } rc = r_utf8_encode (tmp + i, r); runes++; /* Print the escape code */ } else if (r && r < 0x100 && strchr ("\b\v\f\n\r\t\a\033\\", (char)r)) { if ((i + 32) < sizeof (tmp) && r < 93) { tmp[i + 0] = '\\'; tmp[i + 1] = " abtnvfr e " " " " " " \\"[r]; } else { // string too long break; } rc = 2; runes++; } else { /* \0 marks the end of C-strings */ break; } } tmp[i++] = '\0'; if (runes < min && runes >= 2 && str_type == R_STRING_TYPE_ASCII && needle < to) { // back up past the \0 to the last char just in case it starts a wide string needle -= 2; } if (runes >= min) { // reduce false positives int j, num_blocks, *block_list; int *freq_list = NULL, expected_ascii, actual_ascii, num_chars; if (str_type == R_STRING_TYPE_ASCII) { for (j = 0; j < i; j++) { char ch = tmp[j]; if (ch != '\n' && ch != '\r' && ch != '\t') { if (!IS_PRINTABLE (tmp[j])) { continue; } } } } switch (str_type) { case R_STRING_TYPE_UTF8: case R_STRING_TYPE_WIDE: case R_STRING_TYPE_WIDE32: num_blocks = 0; block_list = r_utf_block_list ((const ut8*)tmp, i - 1, str_type == R_STRING_TYPE_WIDE? &freq_list: NULL); if (block_list) { for (j = 0; block_list[j] != -1; j++) { num_blocks++; } } if (freq_list) { num_chars = 0; actual_ascii = 0; for (j = 0; freq_list[j] != -1; j++) { num_chars += freq_list[j]; if (!block_list[j]) { // ASCII actual_ascii = freq_list[j]; } } free (freq_list); expected_ascii = num_blocks ? num_chars / num_blocks : 0; if (actual_ascii > expected_ascii) { ascii_only = true; needle = str_start; free (block_list); continue; } } free (block_list); if (num_blocks > R_STRING_MAX_UNI_BLOCKS) { needle++; continue; } } RBinString *bs = R_NEW0 (RBinString); if (!bs) { break; } bs->type = str_type; bs->length = runes; bs->size = needle - str_start; bs->ordinal = count++; // TODO: move into adjust_offset switch (str_type) { case R_STRING_TYPE_WIDE: if (str_start - from > 1) { const ut8 *p = buf + str_start - 2 - from; if (p[0] == 0xff && p[1] == 0xfe) { str_start -= 2; // \xff\xfe } } break; case R_STRING_TYPE_WIDE32: if (str_start - from > 3) { const ut8 *p = buf + str_start - 4 - from; if (p[0] == 0xff && p[1] == 0xfe) { str_start -= 4; // \xff\xfe\x00\x00 } } break; } if (!s) { if (section) { s = section; } else if (bf->o) { s = r_bin_get_section_at (bf->o, str_start, false); } if (s) { vdelta = s->vaddr; pdelta = s->paddr; } } ut64 baddr = bf->loadaddr && bf->o? bf->o->baddr: bf->loadaddr; bs->paddr = str_start + baddr; bs->vaddr = str_start - pdelta + vdelta + baddr; bs->string = r_str_ndup ((const char *)tmp, i); if (list) { r_list_append (list, bs); if (bf->o) { ht_up_insert (bf->o->strings_db, bs->vaddr, bs); } } else { print_string (bf, bs, raw, pj); r_bin_string_free (bs); } if (from == 0 && to == bf->size) { /* force lookup section at the next one */ s = NULL; } } ascii_only = false; } free (buf); if (pj) { pj_end (pj); if (bin) { RIO *io = bin->iob.io; if (io) { io->cb_printf ("%s", pj_string (pj)); } } pj_free (pj); } return count; }
1
317,331
static int inode_doinit_use_xattr(struct inode *inode, struct dentry *dentry, u32 def_sid, u32 *sid) { #define INITCONTEXTLEN 255 char *context; unsigned int len; int rc; len = INITCONTEXTLEN; context = kmalloc(len + 1, GFP_NOFS); if (!context) return -ENOMEM; context[len] = '\0'; rc = __vfs_getxattr(dentry, inode, XATTR_NAME_SELINUX, context, len); if (rc == -ERANGE) { kfree(context); /* Need a larger buffer. Query for the right size. */ rc = __vfs_getxattr(dentry, inode, XATTR_NAME_SELINUX, NULL, 0); if (rc < 0) return rc; len = rc; context = kmalloc(len + 1, GFP_NOFS); if (!context) return -ENOMEM; context[len] = '\0'; rc = __vfs_getxattr(dentry, inode, XATTR_NAME_SELINUX, context, len); } if (rc < 0) { kfree(context); if (rc != -ENODATA) { pr_warn("SELinux: %s: getxattr returned %d for dev=%s ino=%ld\n", __func__, -rc, inode->i_sb->s_id, inode->i_ino); return rc; } *sid = def_sid; return 0; } rc = security_context_to_sid_default(&selinux_state, context, rc, sid, def_sid, GFP_NOFS); if (rc) { char *dev = inode->i_sb->s_id; unsigned long ino = inode->i_ino; if (rc == -EINVAL) { pr_notice_ratelimited("SELinux: inode=%lu on dev=%s was found to have an invalid context=%s. This indicates you may need to relabel the inode or the filesystem in question.\n", ino, dev, context); } else { pr_warn("SELinux: %s: context_to_sid(%s) returned %d for dev=%s ino=%ld\n", __func__, context, -rc, dev, ino); } } kfree(context); return 0; }
0
221,385
static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index) { struct vcpu_svm *svm = to_svm(vcpu); u64 cr3 = svm->nested.ctl.nested_cr3; u64 pdpte; int ret; ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(cr3), &pdpte, offset_in_page(cr3) + index * 8, 8); if (ret) return 0; return pdpte; }
0
225,678
GF_Err xtra_box_size(GF_Box *s) { GF_XtraBox *ptr = (GF_XtraBox *)s; u32 i, count = gf_list_count(ptr->tags); for (i=0; i<count; i++) { GF_XtraTag *tag = gf_list_get(ptr->tags, i); ptr->size += 18 + (u32) strlen(tag->name) + tag->prop_size; } return GF_OK;
0
273,925
static void handle_RNTO(ctrl_t *ctrl, char *arg) { }
0
90,123
bool Connected() const { return true; }
0
262,092
explicit BoostedTreesSparseCalculateBestFeatureSplitOp( OpKernelConstruction* const context) : OpKernel(context) { // TODO(crawles): Using logits_dim_ for multi-class split. OP_REQUIRES_OK(context, context->GetAttr("logits_dimension", &logits_dim_)); // TODO(tanzheny): Using this for equality split. OP_REQUIRES_OK(context, context->GetAttr("split_type", &split_type_)); }
0
294,509
d_lite_next_year(int argc, VALUE *argv, VALUE self) { VALUE n; rb_scan_args(argc, argv, "01", &n); if (argc < 1) n = INT2FIX(1); return d_lite_rshift(self, f_mul(n, INT2FIX(12))); }
0
255,768
init_lockup_state(lookup_state_t *state, node_t *root, const char *path) { apr_size_t len = strlen(path); if ( (len > state->parent_path->len) && state->parent_path->len && (path[state->parent_path->len] == '/') && !memcmp(path, state->parent_path->data, state->parent_path->len)) { /* The PARENT_PATH of the previous lookup is actually a parent path * of PATH. The CURRENT node list already matches the parent path * and we only have to set the correct rights info. */ state->rights = state->parent_rights; /* Tell the caller where to proceed. */ return path + state->parent_path->len; } /* Start lookup at ROOT for the full PATH. */ state->rights = root->rights; state->parent_rights = root->rights; apr_array_clear(state->next); apr_array_clear(state->current); APR_ARRAY_PUSH(state->current, node_t *) = root; /* Var-segment rules match empty segments as well */ if (root->pattern_sub_nodes && root->pattern_sub_nodes->any_var) { node_t *node = root->pattern_sub_nodes->any_var; /* This is non-recursive due to ACL normalization. */ combine_access(&state->rights, &node->rights); combine_right_limits(&state->rights, &node->rights); APR_ARRAY_PUSH(state->current, node_t *) = node; } svn_stringbuf_setempty(state->parent_path); svn_stringbuf_setempty(state->scratch_pad); return path; }
0
366,242
int finish_automount(struct vfsmount *m, struct path *path) { struct dentry *dentry = path->dentry; struct mountpoint *mp; struct mount *mnt; int err; if (!m) return 0; if (IS_ERR(m)) return PTR_ERR(m); mnt = real_mount(m); /* The new mount record should have at least 2 refs to prevent it being * expired before we get a chance to add it */ BUG_ON(mnt_get_count(mnt) < 2); if (m->mnt_sb == path->mnt->mnt_sb && m->mnt_root == dentry) { err = -ELOOP; goto discard; } /* * we don't want to use lock_mount() - in this case finding something * that overmounts our mountpoint to be means "quitely drop what we've * got", not "try to mount it on top". */ inode_lock(dentry->d_inode); namespace_lock(); if (unlikely(cant_mount(dentry))) { err = -ENOENT; goto discard_locked; } rcu_read_lock(); if (unlikely(__lookup_mnt(path->mnt, dentry))) { rcu_read_unlock(); err = 0; goto discard_locked; } rcu_read_unlock(); mp = get_mountpoint(dentry); if (IS_ERR(mp)) { err = PTR_ERR(mp); goto discard_locked; } err = do_add_mount(mnt, mp, path, path->mnt->mnt_flags | MNT_SHRINKABLE); unlock_mount(mp); if (unlikely(err)) goto discard; mntput(m); return 0; discard_locked: namespace_unlock(); inode_unlock(dentry->d_inode); discard: /* remove m from any expiration list it may be on */ if (!list_empty(&mnt->mnt_expire)) { namespace_lock(); list_del_init(&mnt->mnt_expire); namespace_unlock(); } mntput(m); mntput(m); return err; }
0
387,880
bool InstanceKlass::has_stored_fingerprint() const { #if INCLUDE_AOT return should_store_fingerprint() || is_shared(); #else return false; #endif }
0
226,341
GF_Err pmax_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_PMAXBox *ptr = (GF_PMAXBox *)s; if (ptr == NULL) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->maxSize); return GF_OK; }
0
401,556
static inline void timer_base_init_expiry_lock(struct timer_base *base) { }
0
326,629
set_fflags_platform(struct archive_write_disk *a, int fd, const char *name, mode_t mode, unsigned long set, unsigned long clear) { int ret; int myfd = fd; int newflags, oldflags; /* * Linux has no define for the flags that are only settable by * the root user. This code may seem a little complex, but * there seem to be some Linux systems that lack these * defines. (?) The code below degrades reasonably gracefully * if sf_mask is incomplete. */ const int sf_mask = 0 #if defined(FS_IMMUTABLE_FL) | FS_IMMUTABLE_FL #elif defined(EXT2_IMMUTABLE_FL) | EXT2_IMMUTABLE_FL #endif #if defined(FS_APPEND_FL) | FS_APPEND_FL #elif defined(EXT2_APPEND_FL) | EXT2_APPEND_FL #endif #if defined(FS_JOURNAL_DATA_FL) | FS_JOURNAL_DATA_FL #endif ; if (set == 0 && clear == 0) return (ARCHIVE_OK); /* Only regular files and dirs can have flags. */ if (!S_ISREG(mode) && !S_ISDIR(mode)) return (ARCHIVE_OK); /* If we weren't given an fd, open it ourselves. */ if (myfd < 0) { myfd = open(name, O_RDONLY | O_NONBLOCK | O_BINARY | O_CLOEXEC | O_NOFOLLOW); __archive_ensure_cloexec_flag(myfd); } if (myfd < 0) return (ARCHIVE_OK); /* * XXX As above, this would be way simpler if we didn't have * to read the current flags from disk. XXX */ ret = ARCHIVE_OK; /* Read the current file flags. */ if (ioctl(myfd, #ifdef FS_IOC_GETFLAGS FS_IOC_GETFLAGS, #else EXT2_IOC_GETFLAGS, #endif &oldflags) < 0) goto fail; /* Try setting the flags as given. */ newflags = (oldflags & ~clear) | set; if (ioctl(myfd, #ifdef FS_IOC_SETFLAGS FS_IOC_SETFLAGS, #else EXT2_IOC_SETFLAGS, #endif &newflags) >= 0) goto cleanup; if (errno != EPERM) goto fail; /* If we couldn't set all the flags, try again with a subset. */ newflags &= ~sf_mask; oldflags &= sf_mask; newflags |= oldflags; if (ioctl(myfd, #ifdef FS_IOC_SETFLAGS FS_IOC_SETFLAGS, #else EXT2_IOC_SETFLAGS, #endif &newflags) >= 0) goto cleanup; /* We couldn't set the flags, so report the failure. */ fail: archive_set_error(&a->archive, errno, "Failed to set file flags"); ret = ARCHIVE_WARN; cleanup: if (fd < 0) close(myfd); return (ret); }
0
227,004
IRC_PROTOCOL_CALLBACK(729) { char *pos_args; struct t_irc_channel *ptr_channel; struct t_gui_buffer *ptr_buffer; struct t_irc_modelist *ptr_modelist; IRC_PROTOCOL_MIN_ARGS(5); pos_args = (argc > 5) ? ((argv_eol[5][0] == ':') ? argv_eol[5] + 1 : argv_eol[5]) : NULL; ptr_channel = irc_channel_search (server, argv[3]); ptr_buffer = (ptr_channel && ptr_channel->nicks) ? ptr_channel->buffer : server->buffer; ptr_modelist = irc_modelist_search (ptr_channel, argv[4][0]); if (ptr_modelist) { if (ptr_modelist->state != IRC_MODELIST_STATE_RECEIVING) { /* * remove all items if no quiet was received before * the end of quiet list */ irc_modelist_item_free_all (ptr_modelist); } ptr_modelist->state = IRC_MODELIST_STATE_RECEIVED; } weechat_printf_date_tags ( irc_msgbuffer_get_target_buffer ( server, NULL, command, "quietlist", ptr_buffer), date, irc_protocol_tags (command, "irc_numeric", NULL, NULL), "%s%s[%s%s%s]%s%s%s", weechat_prefix ("network"), IRC_COLOR_CHAT_DELIMITERS, IRC_COLOR_CHAT_CHANNEL, argv[3], IRC_COLOR_CHAT_DELIMITERS, IRC_COLOR_RESET, (pos_args) ? " " : "", (pos_args) ? pos_args : ""); return WEECHAT_RC_OK; }
0
199,836
PJ_DEF(int) pj_scan_get_char( pj_scanner *scanner ) { int chr = *scanner->curptr; if (!chr) { pj_scan_syntax_err(scanner); return 0; } ++scanner->curptr; if (PJ_SCAN_IS_PROBABLY_SPACE(*scanner->curptr) && scanner->skip_ws) { pj_scan_skip_whitespace(scanner); } return chr; }
1
226,245
GF_Err tref_box_read(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_array_read_ex(s, bs, s->type);
0
317,363
static void smack_cred_getsecid(const struct cred *cred, u32 *secid) { struct smack_known *skp; rcu_read_lock(); skp = smk_of_task(smack_cred(cred)); *secid = skp->smk_secid; rcu_read_unlock(); }
0
442,797
int main(int argc, char *argv[]) { int res; struct Configurable config; memset(&config, 0, sizeof(struct Configurable)); config.errors = stderr; /* default errors to stderr */ checkfds(); res = operate(&config, argc, argv); free_config_fields(&config); #ifdef __NOVELL_LIBC__ pressanykey(); #endif #ifdef VMS if (res > CURL_LAST) res = CURL_LAST; /* If CURL_LAST exceeded then */ return (vms_cond[res]|vms_show); /* curlmsg.h is out of sync. */ #else return res; #endif }
0
430,391
static void nlattr_set(struct nlattr *attr, u8 val, const struct ovs_len_tbl *tbl) { struct nlattr *nla; int rem; /* The nlattr stream should already have been validated */ nla_for_each_nested(nla, attr, rem) { if (tbl[nla_type(nla)].len == OVS_ATTR_NESTED) nlattr_set(nla, val, tbl[nla_type(nla)].next ? : tbl); else memset(nla_data(nla), val, nla_len(nla)); if (nla_type(nla) == OVS_KEY_ATTR_CT_STATE) *(u32 *)nla_data(nla) &= CT_SUPPORTED_MASK; } }
0
238,649
static void __reg32_deduce_bounds(struct bpf_reg_state *reg) { /* Learn sign from signed bounds. * If we cannot cross the sign boundary, then signed and unsigned bounds * are the same, so combine. This works even in the negative case, e.g. * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff. */ if (reg->s32_min_value >= 0 || reg->s32_max_value < 0) { reg->s32_min_value = reg->u32_min_value = max_t(u32, reg->s32_min_value, reg->u32_min_value); reg->s32_max_value = reg->u32_max_value = min_t(u32, reg->s32_max_value, reg->u32_max_value); return; } /* Learn sign from unsigned bounds. Signed bounds cross the sign * boundary, so we must be careful. */ if ((s32)reg->u32_max_value >= 0) { /* Positive. We can't learn anything from the smin, but smax * is positive, hence safe. */ reg->s32_min_value = reg->u32_min_value; reg->s32_max_value = reg->u32_max_value = min_t(u32, reg->s32_max_value, reg->u32_max_value); } else if ((s32)reg->u32_min_value < 0) { /* Negative. We can't learn anything from the smax, but smin * is negative, hence safe. */ reg->s32_min_value = reg->u32_min_value = max_t(u32, reg->s32_min_value, reg->u32_min_value); reg->s32_max_value = reg->u32_max_value; } }
0
352,991
generalizedTimeNormalize( slap_mask_t usage, Syntax *syntax, MatchingRule *mr, struct berval *val, struct berval *normalized, void *ctx ) { int parts[9], rc; unsigned int len; struct berval fraction; rc = check_time_syntax(val, 0, parts, &fraction); if (rc != LDAP_SUCCESS) { return rc; } len = STRLENOF("YYYYmmddHHMMSSZ") + fraction.bv_len; normalized->bv_val = slap_sl_malloc( len + 1, ctx ); if ( BER_BVISNULL( normalized ) ) { return LBER_ERROR_MEMORY; } sprintf( normalized->bv_val, "%02d%02d%02d%02d%02d%02d%02d", parts[0], parts[1], parts[2] + 1, parts[3] + 1, parts[4], parts[5], parts[6] ); if ( !BER_BVISEMPTY( &fraction ) ) { memcpy( normalized->bv_val + STRLENOF("YYYYmmddHHMMSSZ")-1, fraction.bv_val, fraction.bv_len ); normalized->bv_val[STRLENOF("YYYYmmddHHMMSSZ")-1] = '.'; } strcpy( normalized->bv_val + len-1, "Z" ); normalized->bv_len = len; return LDAP_SUCCESS; }
0
281,162
void __init xfrm_init(void) { flow_cache_hp_init(); register_pernet_subsys(&xfrm_net_ops); seqcount_init(&xfrm_policy_hash_generation); xfrm_input_init(); }
0
512,458
my_decimal *Item_func_coalesce::decimal_op(my_decimal *decimal_value) { DBUG_ASSERT(fixed == 1); null_value= 0; for (uint i= 0; i < arg_count; i++) { my_decimal *res= args[i]->val_decimal(decimal_value); if (!args[i]->null_value) return res; } null_value=1; return 0; }
0
384,677
negotiate_handshake_newstyle_options (void) { GET_CONN; struct nbd_new_option new_option; size_t nr_options; bool list_seen = false; uint64_t version; uint32_t option; uint32_t optlen; struct nbd_export_name_option_reply handshake_finish; const char *optname; uint64_t exportsize; struct backend *b; for (nr_options = MAX_NR_OPTIONS; nr_options > 0; --nr_options) { CLEANUP_FREE char *data = NULL; if (conn_recv_full (&new_option, sizeof new_option, "reading option: conn->recv: %m") == -1) return -1; version = be64toh (new_option.version); if (version != NBD_NEW_VERSION) { nbdkit_error ("unknown option version %" PRIx64 ", expecting %" PRIx64, version, NBD_NEW_VERSION); return -1; } /* There is a maximum option length we will accept, regardless * of the option type. */ optlen = be32toh (new_option.optlen); if (optlen > MAX_REQUEST_SIZE) { nbdkit_error ("client option data too long (%" PRIu32 ")", optlen); return -1; } data = malloc (optlen + 1); /* Allowing a trailing NUL helps some uses */ if (data == NULL) { nbdkit_error ("malloc: %m"); return -1; } option = be32toh (new_option.option); optname = name_of_nbd_opt (option); /* If the client lacks fixed newstyle support, it should only send * NBD_OPT_EXPORT_NAME. */ if (!(conn->cflags & NBD_FLAG_FIXED_NEWSTYLE) && option != NBD_OPT_EXPORT_NAME) { if (send_newstyle_option_reply (option, NBD_REP_ERR_INVALID)) return -1; continue; } /* In --tls=require / FORCEDTLS mode the only options allowed * before TLS negotiation are NBD_OPT_ABORT and NBD_OPT_STARTTLS. */ if (tls == 2 && !conn->using_tls && !(option == NBD_OPT_ABORT || option == NBD_OPT_STARTTLS)) { if (send_newstyle_option_reply (option, NBD_REP_ERR_TLS_REQD)) return -1; continue; } switch (option) { case NBD_OPT_EXPORT_NAME: if (conn_recv_full (data, optlen, "read: %s: %m", name_of_nbd_opt (option)) == -1) return -1; if (check_export_name (option, data, optlen, optlen) == -1) return -1; /* We have to finish the handshake by sending handshake_finish. * On failure, we have to disconnect. */ if (finish_newstyle_options (&exportsize, data, optlen) == -1) return -1; memset (&handshake_finish, 0, sizeof handshake_finish); handshake_finish.exportsize = htobe64 (exportsize); handshake_finish.eflags = htobe16 (conn->eflags); if (conn->send (&handshake_finish, (conn->cflags & NBD_FLAG_NO_ZEROES) ? offsetof (struct nbd_export_name_option_reply, zeroes) : sizeof handshake_finish, 0) == -1) { nbdkit_error ("write: %s: %m", optname); return -1; } break; case NBD_OPT_ABORT: if (send_newstyle_option_reply (option, NBD_REP_ACK) == -1) return -1; debug ("client sent %s to abort the connection", name_of_nbd_opt (option)); return -1; case NBD_OPT_LIST: if (optlen != 0) { if (send_newstyle_option_reply (option, NBD_REP_ERR_INVALID) == -1) return -1; if (conn_recv_full (data, optlen, "read: %s: %m", name_of_nbd_opt (option)) == -1) return -1; continue; } if (list_seen) { debug ("newstyle negotiation: %s: export list already advertised", name_of_nbd_opt (option)); if (send_newstyle_option_reply (option, NBD_REP_ERR_INVALID) == -1) return -1; continue; } else { /* Send back the exportname list. */ debug ("newstyle negotiation: %s: advertising exports", name_of_nbd_opt (option)); if (send_newstyle_option_reply_exportnames (option, &nr_options) == -1) return -1; list_seen = true; } break; case NBD_OPT_STARTTLS: if (optlen != 0) { if (send_newstyle_option_reply (option, NBD_REP_ERR_INVALID) == -1) return -1; if (conn_recv_full (data, optlen, "read: %s: %m", name_of_nbd_opt (option)) == -1) return -1; continue; } if (tls == 0) { /* --tls=off (NOTLS mode). */ #ifdef HAVE_GNUTLS #define NO_TLS_REPLY NBD_REP_ERR_POLICY #else #define NO_TLS_REPLY NBD_REP_ERR_UNSUP #endif if (send_newstyle_option_reply (option, NO_TLS_REPLY) == -1) return -1; } else /* --tls=on or --tls=require */ { /* We can't upgrade to TLS twice on the same connection. */ if (conn->using_tls) { if (send_newstyle_option_reply (option, NBD_REP_ERR_INVALID) == -1) return -1; continue; } /* We have to send the (unencrypted) reply before starting * the handshake. */ if (send_newstyle_option_reply (option, NBD_REP_ACK) == -1) return -1; /* Upgrade the connection to TLS. Also performs access control. */ if (crypto_negotiate_tls (conn->sockin, conn->sockout) == -1) return -1; conn->using_tls = true; debug ("using TLS on this connection"); /* Wipe out any cached state. */ conn->structured_replies = false; free (conn->exportname_from_set_meta_context); conn->exportname_from_set_meta_context = NULL; conn->meta_context_base_allocation = false; for_each_backend (b) { free (conn->default_exportname[b->i]); conn->default_exportname[b->i] = NULL; } } break; case NBD_OPT_INFO: case NBD_OPT_GO: if (conn_recv_full (data, optlen, "read: %s: %m", optname) == -1) return -1; if (optlen < 6) { /* 32 bit export length + 16 bit nr info */ debug ("newstyle negotiation: %s option length < 6", optname); if (send_newstyle_option_reply (option, NBD_REP_ERR_INVALID) == -1) return -1; continue; } { uint32_t exportnamelen; uint16_t nrinfos; uint16_t info; size_t i; /* Validate the name length and number of INFO requests. */ memcpy (&exportnamelen, &data[0], 4); exportnamelen = be32toh (exportnamelen); if (exportnamelen > optlen-6 /* NB optlen >= 6, see above */) { debug ("newstyle negotiation: %s: export name too long", optname); if (send_newstyle_option_reply (option, NBD_REP_ERR_INVALID) == -1) return -1; continue; } memcpy (&nrinfos, &data[exportnamelen+4], 2); nrinfos = be16toh (nrinfos); if (optlen != 4 + exportnamelen + 2 + 2*nrinfos) { debug ("newstyle negotiation: %s: " "number of information requests incorrect", optname); if (send_newstyle_option_reply (option, NBD_REP_ERR_INVALID) == -1) return -1; continue; } /* As with NBD_OPT_EXPORT_NAME we print the export name and * save it in the connection. If an earlier * NBD_OPT_SET_META_CONTEXT used an export name, it must match * or else we drop the support for that context. */ if (check_export_name (option, &data[4], exportnamelen, optlen - 6) == -1) { if (send_newstyle_option_reply (option, NBD_REP_ERR_INVALID) == -1) return -1; continue; } /* The spec is confusing, but it is required that we send back * NBD_INFO_EXPORT, even if the client did not request it! * qemu client in particular does not request this, but will * fail if we don't send it. Note that if .open fails, but we * succeed at .close, then we merely return an error to the * client and let them try another NBD_OPT, rather than * disconnecting. */ if (finish_newstyle_options (&exportsize, &data[4], exportnamelen) == -1) { if (conn->top_context) { if (backend_finalize (conn->top_context) == -1) return -1; backend_close (conn->top_context); conn->top_context = NULL; } if (send_newstyle_option_reply (option, NBD_REP_ERR_UNKNOWN) == -1) return -1; continue; } if (send_newstyle_option_reply_info_export (option, NBD_REP_INFO, NBD_INFO_EXPORT, exportsize) == -1) return -1; /* For now we send NBD_INFO_NAME and NBD_INFO_DESCRIPTION if * requested, and ignore all other info requests (including * NBD_INFO_EXPORT if it was requested, because we replied * already above). */ for (i = 0; i < nrinfos; ++i) { memcpy (&info, &data[4 + exportnamelen + 2 + i*2], 2); info = be16toh (info); switch (info) { case NBD_INFO_EXPORT: /* ignore - reply sent above */ break; case NBD_INFO_NAME: { const char *name = &data[4]; size_t namelen = exportnamelen; if (exportnamelen == 0) { name = backend_default_export (top, read_only); if (!name) { debug ("newstyle negotiation: %s: " "NBD_INFO_NAME: no name to send", optname); break; } namelen = -1; } if (send_newstyle_option_reply_info_str (option, NBD_REP_INFO, NBD_INFO_NAME, name, namelen) == -1) return -1; } break; case NBD_INFO_DESCRIPTION: { const char *desc = backend_export_description (conn->top_context); if (!desc) { debug ("newstyle negotiation: %s: " "NBD_INFO_DESCRIPTION: no description to send", optname); break; } if (send_newstyle_option_reply_info_str (option, NBD_REP_INFO, NBD_INFO_DESCRIPTION, desc, -1) == -1) return -1; } break; default: debug ("newstyle negotiation: %s: " "ignoring NBD_INFO_* request %u (%s)", optname, (unsigned) info, name_of_nbd_info (info)); break; } } } /* Unlike NBD_OPT_EXPORT_NAME, NBD_OPT_GO sends back an ACK * or ERROR packet. If this was NBD_OPT_LIST, call .close. */ if (send_newstyle_option_reply (option, NBD_REP_ACK) == -1) return -1; if (option == NBD_OPT_INFO) { if (backend_finalize (conn->top_context) == -1) return -1; backend_close (conn->top_context); conn->top_context = NULL; } break; case NBD_OPT_STRUCTURED_REPLY: if (optlen != 0) { if (send_newstyle_option_reply (option, NBD_REP_ERR_INVALID) == -1) return -1; if (conn_recv_full (data, optlen, "read: %s: %m", name_of_nbd_opt (option)) == -1) return -1; continue; } debug ("newstyle negotiation: %s: client requested structured replies", name_of_nbd_opt (option)); if (no_sr) { /* Must fail with ERR_UNSUP for qemu 4.2 to remain happy; * but failing with ERR_POLICY would have been nicer. */ if (send_newstyle_option_reply (option, NBD_REP_ERR_UNSUP) == -1) return -1; debug ("newstyle negotiation: %s: structured replies are disabled", name_of_nbd_opt (option)); break; } if (send_newstyle_option_reply (option, NBD_REP_ACK) == -1) return -1; conn->structured_replies = true; break; case NBD_OPT_LIST_META_CONTEXT: case NBD_OPT_SET_META_CONTEXT: { uint32_t opt_index; uint32_t exportnamelen; uint32_t nr_queries; uint32_t querylen; const char *what; if (conn_recv_full (data, optlen, "read: %s: %m", optname) == -1) return -1; /* Note that we support base:allocation whether or not the plugin * supports can_extents. */ if (!conn->structured_replies) { if (send_newstyle_option_reply (option, NBD_REP_ERR_INVALID) == -1) return -1; continue; } /* Minimum length of the option payload is: * 32 bit export name length followed by empty export name * + 32 bit number of queries followed by no queries * = 8 bytes. */ what = "optlen < 8"; if (optlen < 8) { opt_meta_invalid_option_len: debug ("newstyle negotiation: %s: invalid option length: %s", optname, what); if (send_newstyle_option_reply (option, NBD_REP_ERR_INVALID) == -1) return -1; continue; } memcpy (&exportnamelen, &data[0], 4); exportnamelen = be32toh (exportnamelen); what = "validating export name"; if (check_export_name (option, &data[4], exportnamelen, optlen - 8) == -1) goto opt_meta_invalid_option_len; /* Remember the export name: the NBD spec says that if the client * later uses NBD_OPT_GO on a different export, then the context * returned here is not usable. */ if (option == NBD_OPT_SET_META_CONTEXT) { conn->exportname_from_set_meta_context = strndup (&data[4], exportnamelen); if (conn->exportname_from_set_meta_context == NULL) { nbdkit_error ("malloc: %m"); return -1; } } opt_index = 4 + exportnamelen; /* Read the number of queries. */ what = "reading number of queries"; if (opt_index+4 > optlen) goto opt_meta_invalid_option_len; memcpy (&nr_queries, &data[opt_index], 4); nr_queries = be32toh (nr_queries); opt_index += 4; /* for LIST: nr_queries == 0 means return all meta contexts * for SET: nr_queries == 0 means reset all contexts */ debug ("newstyle negotiation: %s: %s count: %d", optname, option == NBD_OPT_LIST_META_CONTEXT ? "query" : "set", nr_queries); if (option == NBD_OPT_SET_META_CONTEXT) conn->meta_context_base_allocation = false; if (nr_queries == 0) { if (option == NBD_OPT_LIST_META_CONTEXT) { if (send_newstyle_option_reply_meta_context (option, NBD_REP_META_CONTEXT, 0, "base:allocation") == -1) return -1; } if (send_newstyle_option_reply (option, NBD_REP_ACK) == -1) return -1; } else { /* Read and answer each query. */ while (nr_queries > 0) { what = "reading query string length"; if (opt_index+4 > optlen) goto opt_meta_invalid_option_len; memcpy (&querylen, &data[opt_index], 4); querylen = be32toh (querylen); opt_index += 4; what = "reading query string"; if (check_string (option, &data[opt_index], querylen, optlen - opt_index, "meta context query") == -1) goto opt_meta_invalid_option_len; debug ("newstyle negotiation: %s: %s %.*s", optname, option == NBD_OPT_LIST_META_CONTEXT ? "query" : "set", (int) querylen, &data[opt_index]); /* For LIST, "base:" returns all supported contexts in the * base namespace. We only support "base:allocation". */ if (option == NBD_OPT_LIST_META_CONTEXT && querylen == 5 && strncmp (&data[opt_index], "base:", 5) == 0) { if (send_newstyle_option_reply_meta_context (option, NBD_REP_META_CONTEXT, 0, "base:allocation") == -1) return -1; } /* "base:allocation" requested by name. */ else if (querylen == 15 && strncmp (&data[opt_index], "base:allocation", 15) == 0) { if (send_newstyle_option_reply_meta_context (option, NBD_REP_META_CONTEXT, option == NBD_OPT_SET_META_CONTEXT ? base_allocation_id : 0, "base:allocation") == -1) return -1; if (option == NBD_OPT_SET_META_CONTEXT) conn->meta_context_base_allocation = true; } /* Every other query must be ignored. */ opt_index += querylen; nr_queries--; } if (send_newstyle_option_reply (option, NBD_REP_ACK) == -1) return -1; } debug ("newstyle negotiation: %s: reply complete", optname); } break; default: /* Unknown option. */ if (send_newstyle_option_reply (option, NBD_REP_ERR_UNSUP) == -1) return -1; if (conn_recv_full (data, optlen, "reading unknown option data: conn->recv: %m") == -1) return -1; } /* Note, since it's not very clear from the protocol doc, that the * client must send NBD_OPT_EXPORT_NAME or NBD_OPT_GO last, and * that ends option negotiation. */ if (option == NBD_OPT_EXPORT_NAME || option == NBD_OPT_GO) break; } if (nr_options == 0) { nbdkit_error ("client spent too much time negotiating without selecting " "an export"); return -1; } /* In --tls=require / FORCEDTLS mode, we must have upgraded to TLS * by the time we finish option negotiation. If not, give up. */ if (tls == 2 && !conn->using_tls) { nbdkit_error ("non-TLS client tried to connect in --tls=require mode"); return -1; } return 0; }
0
245,192
xb_mysql_numrows(MYSQL *connection, const char *query, bool die_on_error) { my_ulonglong rows_count = 0; MYSQL_RES *result = xb_mysql_query(connection, query, true, die_on_error); if (result) { rows_count = mysql_num_rows(result); mysql_free_result(result); } return rows_count; }
0
484,805
static int talk_to_netback(struct xenbus_device *dev, struct netfront_info *info) { const char *message; struct xenbus_transaction xbt; int err; unsigned int feature_split_evtchn; unsigned int i = 0; unsigned int max_queues = 0; struct netfront_queue *queue = NULL; unsigned int num_queues = 1; u8 addr[ETH_ALEN]; info->netdev->irq = 0; /* Check if backend is trusted. */ info->bounce = !xennet_trusted || !xenbus_read_unsigned(dev->nodename, "trusted", 1); /* Check if backend supports multiple queues */ max_queues = xenbus_read_unsigned(info->xbdev->otherend, "multi-queue-max-queues", 1); num_queues = min(max_queues, xennet_max_queues); /* Check feature-split-event-channels */ feature_split_evtchn = xenbus_read_unsigned(info->xbdev->otherend, "feature-split-event-channels", 0); /* Read mac addr. */ err = xen_net_read_mac(dev, addr); if (err) { xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename); goto out_unlocked; } eth_hw_addr_set(info->netdev, addr); info->netback_has_xdp_headroom = xenbus_read_unsigned(info->xbdev->otherend, "feature-xdp-headroom", 0); if (info->netback_has_xdp_headroom) { /* set the current xen-netfront xdp state */ err = talk_to_netback_xdp(info, info->netfront_xdp_enabled ? NETBACK_XDP_HEADROOM_ENABLE : NETBACK_XDP_HEADROOM_DISABLE); if (err) goto out_unlocked; } rtnl_lock(); if (info->queues) xennet_destroy_queues(info); /* For the case of a reconnect reset the "broken" indicator. */ info->broken = false; err = xennet_create_queues(info, &num_queues); if (err < 0) { xenbus_dev_fatal(dev, err, "creating queues"); kfree(info->queues); info->queues = NULL; goto out; } rtnl_unlock(); /* Create shared ring, alloc event channel -- for each queue */ for (i = 0; i < num_queues; ++i) { queue = &info->queues[i]; err = setup_netfront(dev, queue, feature_split_evtchn); if (err) goto destroy_ring; } again: err = xenbus_transaction_start(&xbt); if (err) { xenbus_dev_fatal(dev, err, "starting transaction"); goto destroy_ring; } if (xenbus_exists(XBT_NIL, info->xbdev->otherend, "multi-queue-max-queues")) { /* Write the number of queues */ err = xenbus_printf(xbt, dev->nodename, "multi-queue-num-queues", "%u", num_queues); if (err) { message = "writing multi-queue-num-queues"; goto abort_transaction_no_dev_fatal; } } if (num_queues == 1) { err = write_queue_xenstore_keys(&info->queues[0], &xbt, 0); /* flat */ if (err) goto abort_transaction_no_dev_fatal; } else { /* Write the keys for each queue */ for (i = 0; i < num_queues; ++i) { queue = &info->queues[i]; err = write_queue_xenstore_keys(queue, &xbt, 1); /* hierarchical */ if (err) goto abort_transaction_no_dev_fatal; } } /* The remaining keys are not queue-specific */ err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u", 1); if (err) { message = "writing request-rx-copy"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1); if (err) { message = "writing feature-rx-notify"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1); if (err) { message = "writing feature-sg"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", 1); if (err) { message = "writing feature-gso-tcpv4"; goto abort_transaction; } err = xenbus_write(xbt, dev->nodename, "feature-gso-tcpv6", "1"); if (err) { message = "writing feature-gso-tcpv6"; goto abort_transaction; } err = xenbus_write(xbt, dev->nodename, "feature-ipv6-csum-offload", "1"); if (err) { message = "writing feature-ipv6-csum-offload"; goto abort_transaction; } err = xenbus_transaction_end(xbt, 0); if (err) { if (err == -EAGAIN) goto again; xenbus_dev_fatal(dev, err, "completing transaction"); goto destroy_ring; } return 0; abort_transaction: xenbus_dev_fatal(dev, err, "%s", message); abort_transaction_no_dev_fatal: xenbus_transaction_end(xbt, 1); destroy_ring: xennet_disconnect_backend(info); rtnl_lock(); xennet_destroy_queues(info); out: rtnl_unlock(); out_unlocked: device_unregister(&dev->dev); return err; }
0
483,485
static int generic_ops_register(void) { generic_ops.get_variable = efi.get_variable; generic_ops.set_variable = efi.set_variable; generic_ops.set_variable_nonblocking = efi.set_variable_nonblocking; generic_ops.get_next_variable = efi.get_next_variable; generic_ops.query_variable_store = efi_query_variable_store; return efivars_register(&generic_efivars, &generic_ops, efi_kobj); }
0
401,577
void get_random_bytes(void *buf, int nbytes) { static void *previous; warn_unseeded_randomness(&previous); _get_random_bytes(buf, nbytes); }
0
232,830
void Compute(OpKernelContext* const context) override { // Read float features list; OpInputList float_features_list; OP_REQUIRES_OK( context, context->input_list(kFloatFeaturesName, &float_features_list)); OpInputList bucket_boundaries_list; OP_REQUIRES_OK(context, context->input_list(kBucketBoundariesName, &bucket_boundaries_list)); OP_REQUIRES(context, tensorflow::TensorShapeUtils::IsVector( bucket_boundaries_list[0].shape()), errors::InvalidArgument( strings::Printf("Buckets should be flat vectors."))); OpOutputList buckets_list; OP_REQUIRES_OK(context, context->output_list(kBucketsName, &buckets_list)); auto do_quantile_get_quantiles = [&](const int64_t begin, const int64_t end) { // Iterating over all resources for (int64_t feature_idx = begin; feature_idx < end; feature_idx++) { const Tensor& values_tensor = float_features_list[feature_idx]; const int64_t num_values = values_tensor.dim_size(0); Tensor* output_t = nullptr; OP_REQUIRES_OK(context, buckets_list.allocate( feature_idx, TensorShape({num_values}), &output_t)); auto output = output_t->flat<int32>(); const std::vector<float>& bucket_boundaries_vector = GetBuckets(feature_idx, bucket_boundaries_list); auto flat_values = values_tensor.flat<float>(); const auto& iter_begin = bucket_boundaries_vector.begin(); const auto& iter_end = bucket_boundaries_vector.end(); for (int64_t instance = 0; instance < num_values; instance++) { if (iter_begin == iter_end) { output(instance) = 0; continue; } const float value = flat_values(instance); auto bucket_iter = std::lower_bound(iter_begin, iter_end, value); if (bucket_iter == iter_end) { --bucket_iter; } const int32_t bucket = static_cast<int32>(bucket_iter - iter_begin); // Bucket id. output(instance) = bucket; } } }; // TODO(tanzheny): comment on the magic number. const int64_t kCostPerUnit = 500 * num_features_; const DeviceBase::CpuWorkerThreads& worker_threads = *context->device()->tensorflow_cpu_worker_threads(); Shard(worker_threads.num_threads, worker_threads.workers, num_features_, kCostPerUnit, do_quantile_get_quantiles); }
0
231,799
TEST_F(QuicServerTransportTest, DestroyWithoutClosing) { StreamId streamId = server->createBidirectionalStream().value(); MockReadCallback readCb; server->setReadCallback(streamId, &readCb); EXPECT_CALL(connCallback, onConnectionError(_)).Times(0); EXPECT_CALL(connCallback, onConnectionEnd()).Times(0); MockDeliveryCallback deliveryCallback; auto write = IOBuf::copyBuffer("no"); server->writeChain(streamId, write->clone(), true, &deliveryCallback); EXPECT_CALL(deliveryCallback, onCanceled(_, _)); EXPECT_CALL(readCb, readError(_, _)); server.reset(); }
0
274,632
ctcompare(const char *a, /* I - First string */ const char *b) /* I - Second string */ { int result = 0; /* Result */ while (*a && *b) { result |= *a ^ *b; a ++; b ++; } // either both *a and *b == '\0', or one points inside a string, // so factor that in. result |= (*a ^ *b); return (result); }
0
432,248
CPUState *qemu_get_cpu(struct uc_struct *uc, int index) { CPUState *cpu = uc->cpu; if (cpu->cpu_index == index) { return cpu; } return NULL; }
0
269,325
ReduceDetails SparseTensorReduceHelper(const SparseTensor &sp, gtl::ArraySlice<int32> axes_slice, bool keep_dims) { ReduceDetails reduction; std::vector<int32> reduction_axes(axes_slice.begin(), axes_slice.end()); int ndims = sp.dims(); for (int64_t i = 0; i < reduction_axes.size(); ++i) { reduction_axes[i] = (reduction_axes[i] + ndims) % ndims; } std::sort(reduction_axes.begin(), reduction_axes.end()); // (0) Calculate the grouping dimensions: // group_by_dims == {0, .., NDIMS-1} \ reduction_axes. std::vector<int64> perm(ndims); std::iota(perm.begin(), perm.end(), 0); // Requires perm and reduction_axes_ be sorted; group_by_dims will be // sorted as well. std::set_difference( perm.begin(), perm.end(), reduction_axes.begin(), reduction_axes.end(), std::inserter(reduction.group_by_dims, reduction.group_by_dims.begin())); // Now append the rest of the axes (the complement of group_by_dims_); // result is used by Reorder(). reduction.reorder_dims = reduction.group_by_dims; std::set_difference(perm.begin(), perm.end(), reduction.group_by_dims.begin(), reduction.group_by_dims.end(), std::back_inserter(reduction.reorder_dims)); // (1) Calculate the shape after reduction. auto sp_shape = sp.shape(); std::vector<int64> out_dim_sizes; if (keep_dims) { out_dim_sizes.reserve(ndims); auto beg = reduction.group_by_dims.begin(); auto end = reduction.group_by_dims.end(); for (int d = 0; d < ndims; ++d) { if (std::find(beg, end, d) == end) { out_dim_sizes.push_back(1); // A reduced axis. } else { out_dim_sizes.push_back(sp_shape[d]); } } } else { out_dim_sizes = sp.PickDims(reduction.group_by_dims); } reduction.reduced_shape = TensorShape(out_dim_sizes); return reduction; }
0
289,296
static int snd_pcm_oss_capture_position_fixup(struct snd_pcm_substream *substream, snd_pcm_sframes_t *delay) { struct snd_pcm_runtime *runtime; snd_pcm_uframes_t frames; int err = 0; while (1) { err = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DELAY, delay); if (err < 0) break; runtime = substream->runtime; if (*delay <= (snd_pcm_sframes_t)runtime->buffer_size) break; /* in case of overrun, skip whole periods like OSS/Linux driver does */ /* until avail(delay) <= buffer_size */ frames = (*delay - runtime->buffer_size) + runtime->period_size - 1; frames /= runtime->period_size; frames *= runtime->period_size; err = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_FORWARD, &frames); if (err < 0) break; } return err; }
0
512,572
Field *create_tmp_field_ex(TABLE *table, Tmp_field_src *src, const Tmp_field_param *param) { /* We can get to here when using a CURSOR for a query with NAME_CONST(): DECLARE c CURSOR FOR SELECT NAME_CONST('x','y') FROM t1; OPEN c; */ return tmp_table_field_from_field_type_maybe_null(table, src, param, type() == Item::NULL_ITEM); }
0
409,493
scroll_start(void) { if (*T_VS != NUL && *T_CVS != NUL) { MAY_WANT_TO_LOG_THIS; out_str(T_VS); out_str(T_CVS); screen_start(); // don't know where cursor is now } }
0
450,429
static int send_solid_rect(VncState *vs) { size_t bytes; vnc_write_u8(vs, VNC_TIGHT_FILL << 4); /* no flushing, no filter */ if (vs->tight->pixel24) { tight_pack24(vs, vs->tight->tight.buffer, 1, &vs->tight->tight.offset); bytes = 3; } else { bytes = vs->client_pf.bytes_per_pixel; } vnc_write(vs, vs->tight->tight.buffer, bytes); return 1; }
0
226,345
GF_Err trex_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_TrackExtendsBox *ptr = (GF_TrackExtendsBox *) s; if (!s) return GF_BAD_PARAM; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->trackID); //we always write 1 in trex default sample desc as using 0 breaks chrome/opera/... gf_bs_write_u32(bs, ptr->def_sample_desc_index ? ptr->def_sample_desc_index : 1); gf_bs_write_u32(bs, ptr->def_sample_duration); gf_bs_write_u32(bs, ptr->def_sample_size); gf_bs_write_u32(bs, ptr->def_sample_flags); return GF_OK;
0
503,871
SCM_DEFINE (scm_lstat, "lstat", 1, 0, 0, (SCM str), "Similar to @code{stat}, but does not follow symbolic links, i.e.,\n" "it will return information about a symbolic link itself, not the\n" "file it points to. @var{str} must be a string.") #define FUNC_NAME s_scm_lstat { int rv; struct stat_or_stat64 stat_temp; STRING_SYSCALL (str, c_str, rv = lstat_or_lstat64 (c_str, &stat_temp)); if (rv != 0) { int en = errno; SCM_SYSERROR_MSG ("~A: ~S", scm_list_2 (scm_strerror (scm_from_int (en)), str), en); } return scm_stat2scm (&stat_temp); }
0
261,217
int SN_Client_WaitMessage(MqttClient *client, int timeout_ms) { if (client == NULL) return MQTT_CODE_ERROR_BAD_ARG; return SN_Client_WaitMessage_ex(client, &client->msgSN, timeout_ms); }
0
224,527
Status Conv3DShape(shape_inference::InferenceContext* c) { ShapeHandle input_shape; TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 5, &input_shape)); ShapeHandle filter_shape; TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 5, &filter_shape)); string data_format; Status s = c->GetAttr("data_format", &data_format); std::vector<int32> dilations; TF_RETURN_IF_ERROR(c->GetAttr("dilations", &dilations)); if (dilations.size() != 5) { return errors::InvalidArgument( "Conv3D requires the dilation attribute to contain 5 values, but got: ", dilations.size()); } std::vector<int32> strides; TF_RETURN_IF_ERROR(c->GetAttr("strides", &strides)); if (strides.size() != 5) { return errors::InvalidArgument( "Conv3D requires the stride attribute to contain 5 values, but got: ", strides.size()); } int32_t stride_planes, stride_rows, stride_cols; int32_t dilation_planes, dilation_rows, dilation_cols; if (s.ok() && data_format == "NCDHW") { // Convert input_shape to NDHWC. auto dim = [&](char dimension) { return c->Dim(input_shape, GetTensorDimIndex<3>(FORMAT_NCHW, dimension)); }; input_shape = c->MakeShape({{dim('N'), dim('0'), dim('1'), dim('2'), dim('C')}}); stride_planes = strides[2]; stride_rows = strides[3]; stride_cols = strides[4]; dilation_planes = dilations[2]; dilation_cols = dilations[3]; dilation_rows = dilations[4]; } else { stride_planes = strides[1]; stride_rows = strides[2]; stride_cols = strides[3]; dilation_planes = dilations[1]; dilation_cols = dilations[2]; dilation_rows = dilations[3]; } DimensionHandle batch_size_dim = c->Dim(input_shape, 0); DimensionHandle in_planes_dim = c->Dim(input_shape, 1); DimensionHandle in_rows_dim = c->Dim(input_shape, 2); DimensionHandle in_cols_dim = c->Dim(input_shape, 3); DimensionHandle input_depth_dim = c->Dim(input_shape, 4); DimensionHandle filter_planes_dim = c->Dim(filter_shape, 0); DimensionHandle filter_rows_dim = c->Dim(filter_shape, 1); DimensionHandle filter_cols_dim = c->Dim(filter_shape, 2); DimensionHandle filter_input_depth_dim = c->Dim(filter_shape, 3); DimensionHandle output_depth_dim = c->Dim(filter_shape, 4); // Check that the input tensor and the filter tensor agree on the channel // count. if (c->ValueKnown(input_depth_dim) && c->ValueKnown(filter_input_depth_dim)) { int64_t input_depth_value = c->Value(input_depth_dim), filter_input_depth_value = c->Value(filter_input_depth_dim); if (filter_input_depth_value == 0) return errors::InvalidArgument("Depth of filter must not be 0"); if (input_depth_value % filter_input_depth_value != 0) return errors::InvalidArgument( "Depth of input (", input_depth_value, ") is not a multiple of input depth of filter (", filter_input_depth_value, ")"); if (input_depth_value != filter_input_depth_value) { int64_t num_groups = input_depth_value / filter_input_depth_value; if (c->ValueKnown(output_depth_dim)) { int64_t output_depth_value = c->Value(output_depth_dim); if (num_groups == 0) return errors::InvalidArgument("Number of groups must not be 0"); if (output_depth_value % num_groups != 0) return errors::InvalidArgument( "Depth of output (", output_depth_value, ") is not a multiple of the number of groups (", num_groups, ")"); } } } Padding padding; TF_RETURN_IF_ERROR(c->GetAttr("padding", &padding)); DimensionHandle output_planes, output_rows, output_cols; TF_RETURN_IF_ERROR(GetWindowedOutputSizeFromDimsV2( c, in_planes_dim, filter_planes_dim, dilation_planes, stride_planes, padding, -1, -1, &output_planes)); TF_RETURN_IF_ERROR(GetWindowedOutputSizeFromDimsV2( c, in_rows_dim, filter_rows_dim, dilation_rows, stride_rows, padding, -1, -1, &output_rows)); TF_RETURN_IF_ERROR(GetWindowedOutputSizeFromDimsV2( c, in_cols_dim, filter_cols_dim, dilation_cols, stride_cols, padding, -1, -1, &output_cols)); ShapeHandle output_shape; if (data_format == "NCDHW") { output_shape = c->MakeShape({batch_size_dim, output_depth_dim, output_planes, output_rows, output_cols}); } else { output_shape = c->MakeShape({batch_size_dim, output_planes, output_rows, output_cols, output_depth_dim}); } c->set_output(0, output_shape); return Status::OK(); }
0
333,068
skip_to_start(int c, colnr_T *colp) { char_u *s; // Used often, do some work to avoid call overhead. if (!rex.reg_ic && !has_mbyte) s = vim_strbyte(rex.line + *colp, c); else s = cstrchr(rex.line + *colp, c); if (s == NULL) return FAIL; *colp = (int)(s - rex.line); return OK; }
0
279,950
do_filter( linenr_T line1, linenr_T line2, exarg_T *eap, // for forced 'ff' and 'fenc' char_u *cmd, int do_in, int do_out) { char_u *itmp = NULL; char_u *otmp = NULL; linenr_T linecount; linenr_T read_linecount; pos_T cursor_save; char_u *cmd_buf; buf_T *old_curbuf = curbuf; int shell_flags = 0; pos_T orig_start = curbuf->b_op_start; pos_T orig_end = curbuf->b_op_end; int save_cmod_flags = cmdmod.cmod_flags; #ifdef FEAT_FILTERPIPE int stmp = p_stmp; #endif if (*cmd == NUL) // no filter command return; // Temporarily disable lockmarks since that's needed to propagate changed // regions of the buffer for foldUpdate(), linecount, etc. cmdmod.cmod_flags &= ~CMOD_LOCKMARKS; cursor_save = curwin->w_cursor; linecount = line2 - line1 + 1; curwin->w_cursor.lnum = line1; curwin->w_cursor.col = 0; changed_line_abv_curs(); invalidate_botline(); /* * When using temp files: * 1. * Form temp file names * 2. * Write the lines to a temp file * 3. Run the filter command on the temp file * 4. * Read the output of the command into the buffer * 5. * Delete the original lines to be filtered * 6. * Remove the temp files * * When writing the input with a pipe or when catching the output with a * pipe only need to do 3. */ if (do_out) shell_flags |= SHELL_DOOUT; #ifdef FEAT_FILTERPIPE # ifdef VIMDLL if (!gui.in_use && !gui.starting) stmp = 1; // Console mode doesn't support filterpipe. # endif if (!do_in && do_out && !stmp) { // Use a pipe to fetch stdout of the command, do not use a temp file. shell_flags |= SHELL_READ; curwin->w_cursor.lnum = line2; } else if (do_in && !do_out && !stmp) { // Use a pipe to write stdin of the command, do not use a temp file. shell_flags |= SHELL_WRITE; curbuf->b_op_start.lnum = line1; curbuf->b_op_end.lnum = line2; } else if (do_in && do_out && !stmp) { // Use a pipe to write stdin and fetch stdout of the command, do not // use a temp file. shell_flags |= SHELL_READ|SHELL_WRITE; curbuf->b_op_start.lnum = line1; curbuf->b_op_end.lnum = line2; curwin->w_cursor.lnum = line2; } else #endif if ((do_in && (itmp = vim_tempname('i', FALSE)) == NULL) || (do_out && (otmp = vim_tempname('o', FALSE)) == NULL)) { emsg(_(e_cant_get_temp_file_name)); goto filterend; } /* * The writing and reading of temp files will not be shown. * Vi also doesn't do this and the messages are not very informative. */ ++no_wait_return; // don't call wait_return() while busy if (itmp != NULL && buf_write(curbuf, itmp, NULL, line1, line2, eap, FALSE, FALSE, FALSE, TRUE) == FAIL) { msg_putchar('\n'); // keep message from buf_write() --no_wait_return; #if defined(FEAT_EVAL) if (!aborting()) #endif (void)semsg(_(e_cant_create_file_str), itmp); // will call wait_return goto filterend; } if (curbuf != old_curbuf) goto filterend; if (!do_out) msg_putchar('\n'); // Create the shell command in allocated memory. cmd_buf = make_filter_cmd(cmd, itmp, otmp); if (cmd_buf == NULL) goto filterend; windgoto((int)Rows - 1, 0); cursor_on(); /* * When not redirecting the output the command can write anything to the * screen. If 'shellredir' is equal to ">", screen may be messed up by * stderr output of external command. Clear the screen later. * If do_in is FALSE, this could be something like ":r !cat", which may * also mess up the screen, clear it later. */ if (!do_out || STRCMP(p_srr, ">") == 0 || !do_in) redraw_later_clear(); if (do_out) { if (u_save((linenr_T)(line2), (linenr_T)(line2 + 1)) == FAIL) { vim_free(cmd_buf); goto error; } redraw_curbuf_later(VALID); } read_linecount = curbuf->b_ml.ml_line_count; /* * When call_shell() fails wait_return() is called to give the user a * chance to read the error messages. Otherwise errors are ignored, so you * can see the error messages from the command that appear on stdout; use * 'u' to fix the text * Switch to cooked mode when not redirecting stdin, avoids that something * like ":r !cat" hangs. * Pass on the SHELL_DOOUT flag when the output is being redirected. */ if (call_shell(cmd_buf, SHELL_FILTER | SHELL_COOKED | shell_flags)) { redraw_later_clear(); wait_return(FALSE); } vim_free(cmd_buf); did_check_timestamps = FALSE; need_check_timestamps = TRUE; // When interrupting the shell command, it may still have produced some // useful output. Reset got_int here, so that readfile() won't cancel // reading. ui_breakcheck(); got_int = FALSE; if (do_out) { if (otmp != NULL) { if (readfile(otmp, NULL, line2, (linenr_T)0, (linenr_T)MAXLNUM, eap, READ_FILTER) != OK) { #if defined(FEAT_EVAL) if (!aborting()) #endif { msg_putchar('\n'); semsg(_(e_cant_read_file_str), otmp); } goto error; } if (curbuf != old_curbuf) goto filterend; } read_linecount = curbuf->b_ml.ml_line_count - read_linecount; if (shell_flags & SHELL_READ) { curbuf->b_op_start.lnum = line2 + 1; curbuf->b_op_end.lnum = curwin->w_cursor.lnum; appended_lines_mark(line2, read_linecount); } if (do_in) { if ((cmdmod.cmod_flags & CMOD_KEEPMARKS) || vim_strchr(p_cpo, CPO_REMMARK) == NULL) { if (read_linecount >= linecount) // move all marks from old lines to new lines mark_adjust(line1, line2, linecount, 0L); else if (save_cmod_flags & CMOD_LOCKMARKS) { // Move marks from the lines below the new lines down by // the number of lines lost. // Move marks from the lines that will be deleted to the // new lines and below. mark_adjust(line2 + 1, (linenr_T)MAXLNUM, linecount - read_linecount, 0L); mark_adjust(line1, line2, linecount, 0L); } else { // move marks from old lines to new lines, delete marks // that are in deleted lines mark_adjust(line1, line1 + read_linecount - 1, linecount, 0L); mark_adjust(line1 + read_linecount, line2, MAXLNUM, 0L); } } /* * Put cursor on first filtered line for ":range!cmd". * Adjust '[ and '] (set by buf_write()). */ curwin->w_cursor.lnum = line1; del_lines(linecount, TRUE); curbuf->b_op_start.lnum -= linecount; // adjust '[ curbuf->b_op_end.lnum -= linecount; // adjust '] write_lnum_adjust(-linecount); // adjust last line // for next write #ifdef FEAT_FOLDING foldUpdate(curwin, curbuf->b_op_start.lnum, curbuf->b_op_end.lnum); #endif } else { /* * Put cursor on last new line for ":r !cmd". */ linecount = curbuf->b_op_end.lnum - curbuf->b_op_start.lnum + 1; curwin->w_cursor.lnum = curbuf->b_op_end.lnum; } beginline(BL_WHITE | BL_FIX); // cursor on first non-blank --no_wait_return; if (linecount > p_report) { if (do_in) { vim_snprintf(msg_buf, sizeof(msg_buf), _("%ld lines filtered"), (long)linecount); if (msg(msg_buf) && !msg_scroll) // save message to display it after redraw set_keep_msg((char_u *)msg_buf, 0); } else msgmore((long)linecount); } } else { error: // put cursor back in same position for ":w !cmd" curwin->w_cursor = cursor_save; --no_wait_return; wait_return(FALSE); } filterend: cmdmod.cmod_flags = save_cmod_flags; if (curbuf != old_curbuf) { --no_wait_return; emsg(_(e_filter_autocommands_must_not_change_current_buffer)); } else if (cmdmod.cmod_flags & CMOD_LOCKMARKS) { curbuf->b_op_start = orig_start; curbuf->b_op_end = orig_end; } if (itmp != NULL) mch_remove(itmp); if (otmp != NULL) mch_remove(otmp); vim_free(itmp); vim_free(otmp); }
0
240,609
Status CopyVariable(int output_idx, OpKernelContext* ctx, const Tensor* t) { Tensor* output; Notification n; Status status; AllocatorAttributes attr; if (t->dtype() == DT_VARIANT) { attr.set_on_host(true); } TF_RETURN_IF_ERROR( ctx->allocate_output(output_idx, t->shape(), &output, attr)); if (t->dtype() == DT_VARIANT) { output->flat<Variant>() = t->flat<Variant>(); } else if (ctx->op_device_context() != nullptr) { // TODO(apassos): remove the down_cast by just returning Device* from // OpKernelContext Device* device = down_cast<Device*>(ctx->device()); ctx->op_device_context()->CopyTensorInSameDevice( t, device, output, [&n, &status](const Status& s) { status = s; n.Notify(); }); n.WaitForNotification(); return status; } else { switch (t->dtype()) { #define HANDLER(type) \ case DataTypeToEnum<type>::value: \ output->flat<type>() = t->flat<type>(); \ break; TF_CALL_ALL_TYPES(HANDLER); #undef HANDLER default: return errors::Internal("Unsupported dtype", t->dtype()); } } return Status::OK(); }
0
246,491
RList *r_bin_wasm_get_custom_names(RBinWasmObj *bin) { RList *customs = NULL; r_return_val_if_fail (bin && bin->g_sections, NULL); if (bin->g_names) { return bin->g_names; } if (!(customs = r_bin_wasm_get_sections_by_id (bin->g_sections, R_BIN_WASM_SECTION_CUSTOM))) { return r_list_new (); } // support for multiple "name" sections against spec RBinWasmSection *cust = (RBinWasmSection *)r_list_first (customs); if (!cust || !cust->name) { r_list_free (customs); return r_list_new (); } if (strcmp (cust->name, "name")) { r_list_free (customs); return r_list_new (); } bin->g_names = r_bin_wasm_get_custom_name_entries (bin, cust); r_list_free (customs); return bin->g_names; }
0
211,839
do_buffer_ext( int action, int start, int dir, // FORWARD or BACKWARD int count, // buffer number or number of buffers int flags) // DOBUF_FORCEIT etc. { buf_T *buf; buf_T *bp; int unload = (action == DOBUF_UNLOAD || action == DOBUF_DEL || action == DOBUF_WIPE || action == DOBUF_WIPE_REUSE); switch (start) { case DOBUF_FIRST: buf = firstbuf; break; case DOBUF_LAST: buf = lastbuf; break; default: buf = curbuf; break; } if (start == DOBUF_MOD) // find next modified buffer { while (count-- > 0) { do { buf = buf->b_next; if (buf == NULL) buf = firstbuf; } while (buf != curbuf && !bufIsChanged(buf)); } if (!bufIsChanged(buf)) { emsg(_(e_no_modified_buffer_found)); return FAIL; } } else if (start == DOBUF_FIRST && count) // find specified buffer number { while (buf != NULL && buf->b_fnum != count) buf = buf->b_next; } else { bp = NULL; while (count > 0 || (!unload && !buf->b_p_bl && bp != buf)) { // remember the buffer where we start, we come back there when all // buffers are unlisted. if (bp == NULL) bp = buf; if (dir == FORWARD) { buf = buf->b_next; if (buf == NULL) buf = firstbuf; } else { buf = buf->b_prev; if (buf == NULL) buf = lastbuf; } // don't count unlisted buffers if (unload || buf->b_p_bl) { --count; bp = NULL; // use this buffer as new starting point } if (bp == buf) { // back where we started, didn't find anything. emsg(_(e_there_is_no_listed_buffer)); return FAIL; } } } if (buf == NULL) // could not find it { if (start == DOBUF_FIRST) { // don't warn when deleting if (!unload) semsg(_(e_buffer_nr_does_not_exist), count); } else if (dir == FORWARD) emsg(_(e_cannot_go_beyond_last_buffer)); else emsg(_(e_cannot_go_before_first_buffer)); return FAIL; } #ifdef FEAT_PROP_POPUP if ((flags & DOBUF_NOPOPUP) && bt_popup(buf) # ifdef FEAT_TERMINAL && !bt_terminal(buf) #endif ) return OK; #endif #ifdef FEAT_GUI need_mouse_correct = TRUE; #endif /* * delete buffer "buf" from memory and/or the list */ if (unload) { int forward; bufref_T bufref; if (!can_unload_buffer(buf)) return FAIL; set_bufref(&bufref, buf); // When unloading or deleting a buffer that's already unloaded and // unlisted: fail silently. if (action != DOBUF_WIPE && action != DOBUF_WIPE_REUSE && buf->b_ml.ml_mfp == NULL && !buf->b_p_bl) return FAIL; if ((flags & DOBUF_FORCEIT) == 0 && bufIsChanged(buf)) { #if defined(FEAT_GUI_DIALOG) || defined(FEAT_CON_DIALOG) if ((p_confirm || (cmdmod.cmod_flags & CMOD_CONFIRM)) && p_write) { dialog_changed(buf, FALSE); if (!bufref_valid(&bufref)) // Autocommand deleted buffer, oops! It's not changed // now. return FAIL; // If it's still changed fail silently, the dialog already // mentioned why it fails. if (bufIsChanged(buf)) return FAIL; } else #endif { semsg(_(e_no_write_since_last_change_for_buffer_nr_add_bang_to_override), buf->b_fnum); return FAIL; } } // When closing the current buffer stop Visual mode. if (buf == curbuf && VIsual_active) end_visual_mode(); // If deleting the last (listed) buffer, make it empty. // The last (listed) buffer cannot be unloaded. FOR_ALL_BUFFERS(bp) if (bp->b_p_bl && bp != buf) break; if (bp == NULL && buf == curbuf) return empty_curbuf(TRUE, (flags & DOBUF_FORCEIT), action); // If the deleted buffer is the current one, close the current window // (unless it's the only window). Repeat this so long as we end up in // a window with this buffer. while (buf == curbuf && !(curwin->w_closing || curwin->w_buffer->b_locked > 0) && (!ONE_WINDOW || first_tabpage->tp_next != NULL)) { if (win_close(curwin, FALSE) == FAIL) break; } // If the buffer to be deleted is not the current one, delete it here. if (buf != curbuf) { close_windows(buf, FALSE); if (buf != curbuf && bufref_valid(&bufref) && buf->b_nwindows <= 0) close_buffer(NULL, buf, action, FALSE, FALSE); return OK; } /* * Deleting the current buffer: Need to find another buffer to go to. * There should be another, otherwise it would have been handled * above. However, autocommands may have deleted all buffers. * First use au_new_curbuf.br_buf, if it is valid. * Then prefer the buffer we most recently visited. * Else try to find one that is loaded, after the current buffer, * then before the current buffer. * Finally use any buffer. */ buf = NULL; // selected buffer bp = NULL; // used when no loaded buffer found if (au_new_curbuf.br_buf != NULL && bufref_valid(&au_new_curbuf)) buf = au_new_curbuf.br_buf; else if (curwin->w_jumplistlen > 0) { int jumpidx; jumpidx = curwin->w_jumplistidx - 1; if (jumpidx < 0) jumpidx = curwin->w_jumplistlen - 1; forward = jumpidx; while (jumpidx != curwin->w_jumplistidx) { buf = buflist_findnr(curwin->w_jumplist[jumpidx].fmark.fnum); if (buf != NULL) { if (buf == curbuf || !buf->b_p_bl) buf = NULL; // skip current and unlisted bufs else if (buf->b_ml.ml_mfp == NULL) { // skip unloaded buf, but may keep it for later if (bp == NULL) bp = buf; buf = NULL; } } if (buf != NULL) // found a valid buffer: stop searching break; // advance to older entry in jump list if (!jumpidx && curwin->w_jumplistidx == curwin->w_jumplistlen) break; if (--jumpidx < 0) jumpidx = curwin->w_jumplistlen - 1; if (jumpidx == forward) // List exhausted for sure break; } } if (buf == NULL) // No previous buffer, Try 2'nd approach { forward = TRUE; buf = curbuf->b_next; for (;;) { if (buf == NULL) { if (!forward) // tried both directions break; buf = curbuf->b_prev; forward = FALSE; continue; } // in non-help buffer, try to skip help buffers, and vv if (buf->b_help == curbuf->b_help && buf->b_p_bl) { if (buf->b_ml.ml_mfp != NULL) // found loaded buffer break; if (bp == NULL) // remember unloaded buf for later bp = buf; } if (forward) buf = buf->b_next; else buf = buf->b_prev; } } if (buf == NULL) // No loaded buffer, use unloaded one buf = bp; if (buf == NULL) // No loaded buffer, find listed one { FOR_ALL_BUFFERS(buf) if (buf->b_p_bl && buf != curbuf) break; } if (buf == NULL) // Still no buffer, just take one { if (curbuf->b_next != NULL) buf = curbuf->b_next; else buf = curbuf->b_prev; } } if (buf == NULL) { // Autocommands must have wiped out all other buffers. Only option // now is to make the current buffer empty. return empty_curbuf(FALSE, (flags & DOBUF_FORCEIT), action); } /* * make "buf" the current buffer */ if (action == DOBUF_SPLIT) // split window first { // If 'switchbuf' contains "useopen": jump to first window containing // "buf" if one exists if ((swb_flags & SWB_USEOPEN) && buf_jump_open_win(buf)) return OK; // If 'switchbuf' contains "usetab": jump to first window in any tab // page containing "buf" if one exists if ((swb_flags & SWB_USETAB) && buf_jump_open_tab(buf)) return OK; if (win_split(0, 0) == FAIL) return FAIL; } // go to current buffer - nothing to do if (buf == curbuf) return OK; // Check if the current buffer may be abandoned. if (action == DOBUF_GOTO && !can_abandon(curbuf, (flags & DOBUF_FORCEIT))) { #if defined(FEAT_GUI_DIALOG) || defined(FEAT_CON_DIALOG) if ((p_confirm || (cmdmod.cmod_flags & CMOD_CONFIRM)) && p_write) { bufref_T bufref; set_bufref(&bufref, buf); dialog_changed(curbuf, FALSE); if (!bufref_valid(&bufref)) // Autocommand deleted buffer, oops! return FAIL; } if (bufIsChanged(curbuf)) #endif { no_write_message(); return FAIL; } } // Go to the other buffer. set_curbuf(buf, action); if (action == DOBUF_SPLIT) RESET_BINDING(curwin); // reset 'scrollbind' and 'cursorbind' #if defined(FEAT_EVAL) if (aborting()) // autocmds may abort script processing return FAIL; #endif return OK; }
1
459,104
static struct tcf_chain *tcf_chain_lookup_rcu(const struct tcf_block *block, u32 chain_index) { struct tcf_chain *chain; list_for_each_entry_rcu(chain, &block->chain_list, list) { if (chain->index == chain_index) return chain; } return NULL; }
0
336,579
void RedCharDeviceVDIPort::send_msg_to_client(RedPipeItem *msg, RedCharDeviceClientOpaque *opaque) { RedClient *client = (RedClient *) opaque; RedVDIReadBuf *agent_data_buf = static_cast<RedVDIReadBuf*>(msg); client->get_main()->push_agent_data(red::shared_ptr<RedAgentDataPipeItem>(agent_data_buf)); }
0
238,444
static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode, bool is_jmp32) { if (__is_pointer_value(false, reg)) { if (!reg_type_not_null(reg->type)) return -1; /* If pointer is valid tests against zero will fail so we can * use this to direct branch taken. */ if (val != 0) return -1; switch (opcode) { case BPF_JEQ: return 0; case BPF_JNE: return 1; default: return -1; } } if (is_jmp32) return is_branch32_taken(reg, val, opcode); return is_branch64_taken(reg, val, opcode); }
0
418,796
mouse_model_popup(void) { return (p_mousem[0] == 'p'); }
0
294,607
m_wnumx(union DateData *x, int f) { int ry, rw, rd; c_jd_to_weeknum(m_local_jd(x), f, m_virtual_sg(x), /* !=m_sg() */ &ry, &rw, &rd); return rw; }
0
261,942
njs_string_decode_base64_core(njs_vm_t *vm, njs_value_t *value, const njs_str_t *src, njs_bool_t url) { size_t length; const u_char *basis; njs_str_t dst; basis = (url) ? njs_basis64url : njs_basis64; length = njs_decode_base64_length_core(src, basis, &dst.length); if (njs_slow_path(dst.length == 0)) { vm->retval = njs_string_empty; return NJS_OK; } dst.start = njs_string_alloc(vm, value, dst.length, length); if (njs_slow_path(dst.start == NULL)) { return NJS_ERROR; } njs_decode_base64_core(&dst, src, basis); return NJS_OK; }
0
439,096
static Image *ReadGIFImage(const ImageInfo *image_info,ExceptionInfo *exception) { #define BitSet(byte,bit) (((byte) & (bit)) == (bit)) #define LSBFirstOrder(x,y) (((y) << 8) | (x)) #define ThrowGIFException(exception,message) \ { \ if (profiles != (LinkedListInfo *) NULL) \ profiles=DestroyLinkedList(profiles,DestroyGIFProfile); \ if (global_colormap != (unsigned char *) NULL) \ global_colormap=(unsigned char *) RelinquishMagickMemory(global_colormap); \ if (meta_image != (Image *) NULL) \ meta_image=DestroyImage(meta_image); \ ThrowReaderException((exception),(message)); \ } Image *image, *meta_image; LinkedListInfo *profiles; MagickBooleanType status; register ssize_t i; register unsigned char *p; size_t duration, global_colors, image_count, local_colors, one; ssize_t count, opacity; unsigned char background, buffer[257], c, flag, *global_colormap, magick[12]; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=AcquireImage(image_info); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Determine if this a GIF file. */ count=ReadBlob(image,6,magick); if ((count != 6) || ((LocaleNCompare((char *) magick,"GIF87",5) != 0) && (LocaleNCompare((char *) magick,"GIF89",5) != 0))) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); (void) memset(buffer,0,sizeof(buffer)); meta_image=AcquireImage(image_info); /* metadata container */ meta_image->page.width=ReadBlobLSBShort(image); meta_image->page.height=ReadBlobLSBShort(image); flag=(unsigned char) ReadBlobByte(image); background=(unsigned char) ReadBlobByte(image); c=(unsigned char) ReadBlobByte(image); /* reserved */ profiles=(LinkedListInfo *) NULL; one=1; global_colors=one << (((size_t) flag & 0x07)+1); global_colormap=(unsigned char *) AcquireQuantumMemory((size_t) MagickMax(global_colors,256),3UL*sizeof(*global_colormap)); if (global_colormap == (unsigned char *) NULL) ThrowGIFException(ResourceLimitError,"MemoryAllocationFailed"); (void) memset(global_colormap,0,3*MagickMax(global_colors,256)* sizeof(*global_colormap)); if (BitSet((int) flag,0x80) != 0) { count=ReadBlob(image,(size_t) (3*global_colors),global_colormap); if (count != (ssize_t) (3*global_colors)) ThrowGIFException(CorruptImageError,"InsufficientImageDataInFile"); } duration=0; opacity=(-1); image_count=0; for ( ; ; ) { count=ReadBlob(image,1,&c); if (count != 1) break; if (c == (unsigned char) ';') break; /* terminator */ if (c == (unsigned char) '!') { /* GIF Extension block. */ count=ReadBlob(image,1,&c); if (count != 1) ThrowGIFException(CorruptImageError,"UnableToReadExtensionBlock"); (void) memset(buffer,0,sizeof(buffer)); switch (c) { case 0xf9: { /* Read graphics control extension. */ while (ReadBlobBlock(image,buffer) != 0) ; meta_image->dispose=(DisposeType) ((buffer[0] >> 2) & 0x07); meta_image->delay=((size_t) buffer[2] << 8) | buffer[1]; if ((ssize_t) (buffer[0] & 0x01) == 0x01) opacity=(ssize_t) buffer[3]; break; } case 0xfe: { char *comments; size_t length; /* Read comment extension. */ comments=AcquireString((char *) NULL); for (length=0; ; length+=count) { count=ReadBlobBlock(image,buffer); if (count == 0) break; buffer[count]='\0'; (void) ConcatenateString(&comments,(const char *) buffer); } (void) SetImageProperty(meta_image,"comment",comments); comments=DestroyString(comments); break; } case 0xff: { MagickBooleanType loop; /* Read Netscape Loop extension. */ loop=MagickFalse; if (ReadBlobBlock(image,buffer) != 0) loop=LocaleNCompare((char *) buffer,"NETSCAPE2.0",11) == 0 ? MagickTrue : MagickFalse; if (loop != MagickFalse) while (ReadBlobBlock(image,buffer) != 0) { meta_image->iterations=((size_t) buffer[2] << 8) | buffer[1]; if (meta_image->iterations != 0) meta_image->iterations++; } else { char name[MaxTextExtent]; int block_length, info_length, reserved_length; MagickBooleanType i8bim, icc, iptc, magick; StringInfo *profile; unsigned char *info; /* Store GIF application extension as a generic profile. */ icc=LocaleNCompare((char *) buffer,"ICCRGBG1012",11) == 0 ? MagickTrue : MagickFalse; magick=LocaleNCompare((char *) buffer,"ImageMagick",11) == 0 ? MagickTrue : MagickFalse; i8bim=LocaleNCompare((char *) buffer,"MGK8BIM0000",11) == 0 ? MagickTrue : MagickFalse; iptc=LocaleNCompare((char *) buffer,"MGKIPTC0000",11) == 0 ? MagickTrue : MagickFalse; (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Reading GIF application extension"); info=(unsigned char *) AcquireQuantumMemory(255UL, sizeof(*info)); if (info == (unsigned char *) NULL) ThrowGIFException(ResourceLimitError, "MemoryAllocationFailed"); (void) memset(info,0,255UL*sizeof(*info)); reserved_length=255; for (info_length=0; ; ) { block_length=(int) ReadBlobBlock(image,&info[info_length]); if (block_length == 0) break; info_length+=block_length; if (info_length > (reserved_length-255)) { reserved_length+=4096; info=(unsigned char *) ResizeQuantumMemory(info,(size_t) reserved_length,sizeof(*info)); if (info == (unsigned char *) NULL) { info=(unsigned char *) RelinquishMagickMemory(info); ThrowGIFException(ResourceLimitError, "MemoryAllocationFailed"); } } } profile=BlobToStringInfo(info,(size_t) info_length); if (profile == (StringInfo *) NULL) { info=(unsigned char *) RelinquishMagickMemory(info); ThrowGIFException(ResourceLimitError, "MemoryAllocationFailed"); } if (i8bim != MagickFalse) (void) CopyMagickString(name,"8bim",sizeof(name)); else if (icc != MagickFalse) (void) CopyMagickString(name,"icc",sizeof(name)); else if (iptc != MagickFalse) (void) CopyMagickString(name,"iptc",sizeof(name)); else if (magick != MagickFalse) { (void) CopyMagickString(name,"magick",sizeof(name)); meta_image->gamma=StringToDouble((char *) info+6, (char **) NULL); } else (void) FormatLocaleString(name,sizeof(name),"gif:%.11s", buffer); info=(unsigned char *) RelinquishMagickMemory(info); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " profile name=%s",name); if (magick != MagickFalse) profile=DestroyStringInfo(profile); else { if (profiles == (LinkedListInfo *) NULL) profiles=NewLinkedList(0); SetStringInfoName(profile,name); (void) AppendValueToLinkedList(profiles,profile); } } break; } default: { while (ReadBlobBlock(image,buffer) != 0) ; break; } } } if (c != (unsigned char) ',') continue; if (image_count != 0) { /* Allocate next image structure. */ AcquireNextImage(image_info,image); if (GetNextImageInList(image) == (Image *) NULL) { status=MagickFalse; break; } image=SyncNextImageInList(image); } image_count++; /* Read image attributes. */ meta_image->page.x=(ssize_t) ReadBlobLSBShort(image); meta_image->page.y=(ssize_t) ReadBlobLSBShort(image); meta_image->scene=image->scene; (void) CloneImageProperties(image,meta_image); DestroyImageProperties(meta_image); image->storage_class=PseudoClass; image->compression=LZWCompression; image->columns=ReadBlobLSBShort(image); image->rows=ReadBlobLSBShort(image); image->depth=8; flag=(unsigned char) ReadBlobByte(image); image->interlace=BitSet((int) flag,0x40) != 0 ? GIFInterlace : NoInterlace; local_colors=BitSet((int) flag,0x80) == 0 ? global_colors : one << ((size_t) (flag & 0x07)+1); image->colors=local_colors; if (opacity >= (ssize_t) image->colors) image->colors=(size_t) (opacity+1); image->ticks_per_second=100; image->matte=opacity >= 0 ? MagickTrue : MagickFalse; if ((image->columns == 0) || (image->rows == 0)) ThrowGIFException(CorruptImageError,"NegativeOrZeroImageSize"); /* Inititialize colormap. */ if (AcquireImageColormap(image,image->colors) == MagickFalse) ThrowGIFException(ResourceLimitError,"MemoryAllocationFailed"); if (BitSet((int) flag,0x80) == 0) { /* Use global colormap. */ p=global_colormap; for (i=0; i < (ssize_t) image->colors; i++) { image->colormap[i].red=ScaleCharToQuantum(*p++); image->colormap[i].green=ScaleCharToQuantum(*p++); image->colormap[i].blue=ScaleCharToQuantum(*p++); if (i == opacity) { image->colormap[i].opacity=(Quantum) TransparentOpacity; image->transparent_color=image->colormap[opacity]; } } image->background_color=image->colormap[MagickMin((ssize_t) background, (ssize_t) image->colors-1)]; } else { unsigned char *colormap; /* Read local colormap. */ colormap=(unsigned char *) AcquireQuantumMemory((size_t) MagickMax(local_colors,256),3UL*sizeof(*colormap)); if (colormap == (unsigned char *) NULL) ThrowGIFException(ResourceLimitError,"MemoryAllocationFailed"); (void) memset(colormap,0,3*MagickMax(local_colors,256)* sizeof(*colormap)); count=ReadBlob(image,(3*local_colors)*sizeof(*colormap),colormap); if (count != (ssize_t) (3*local_colors)) { colormap=(unsigned char *) RelinquishMagickMemory(colormap); ThrowGIFException(CorruptImageError,"InsufficientImageDataInFile"); } p=colormap; for (i=0; i < (ssize_t) image->colors; i++) { image->colormap[i].red=ScaleCharToQuantum(*p++); image->colormap[i].green=ScaleCharToQuantum(*p++); image->colormap[i].blue=ScaleCharToQuantum(*p++); if (i == opacity) image->colormap[i].opacity=(Quantum) TransparentOpacity; } colormap=(unsigned char *) RelinquishMagickMemory(colormap); } if (image->gamma == 1.0) { for (i=0; i < (ssize_t) image->colors; i++) if (IsGrayPixel(image->colormap+i) == MagickFalse) break; (void) SetImageColorspace(image,i == (ssize_t) image->colors ? LinearGRAYColorspace : RGBColorspace); } if ((image_info->ping != MagickFalse) && (image_info->number_scenes != 0)) if (image->scene >= (image_info->scene+image_info->number_scenes-1)) break; status=SetImageExtent(image,image->columns,image->rows); if (status == MagickFalse) { if (profiles != (LinkedListInfo *) NULL) profiles=DestroyLinkedList(profiles,DestroyGIFProfile); global_colormap=(unsigned char *) RelinquishMagickMemory( global_colormap); meta_image=DestroyImage(meta_image); InheritException(exception,&image->exception); return(DestroyImageList(image)); } /* Decode image. */ if (image_info->ping != MagickFalse) status=PingGIFImage(image); else status=DecodeImage(image,opacity); InheritException(exception,&image->exception); if ((image_info->ping == MagickFalse) && (status == MagickFalse)) ThrowGIFException(CorruptImageError,"CorruptImage"); if (profiles != (LinkedListInfo *) NULL) { StringInfo *profile; /* Set image profiles. */ ResetLinkedListIterator(profiles); profile=(StringInfo *) GetNextValueInLinkedList(profiles); while (profile != (StringInfo *) NULL) { (void) SetImageProfile(image,GetStringInfoName(profile),profile); profile=(StringInfo *) GetNextValueInLinkedList(profiles); } profiles=DestroyLinkedList(profiles,DestroyGIFProfile); } duration+=image->delay*image->iterations; if (image_info->number_scenes != 0) if (image->scene >= (image_info->scene+image_info->number_scenes-1)) break; opacity=(-1); status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) image->scene- 1,image->scene); if (status == MagickFalse) break; } image->duration=duration; if (profiles != (LinkedListInfo *) NULL) profiles=DestroyLinkedList(profiles,DestroyGIFProfile); meta_image=DestroyImage(meta_image); global_colormap=(unsigned char *) RelinquishMagickMemory(global_colormap); if ((image->columns == 0) || (image->rows == 0)) ThrowReaderException(CorruptImageError,"NegativeOrZeroImageSize"); (void) CloseBlob(image); if (status == MagickFalse) return(DestroyImageList(image)); return(GetFirstImageInList(image)); }
0
314,759
cdf_dump_dir(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, const cdf_sat_t *ssat, const cdf_stream_t *sst, const cdf_dir_t *dir) { size_t i, j; cdf_directory_t *d; char name[__arraycount(d->d_name)]; cdf_stream_t scn; struct timeval ts; static const char *types[] = { "empty", "user storage", "user stream", "lockbytes", "property", "root storage" }; for (i = 0; i < dir->dir_len; i++) { d = &dir->dir_tab[i]; for (j = 0; j < sizeof(name); j++) name[j] = (char)CDF_TOLE2(d->d_name[j]); (void)fprintf(stderr, "Directory %" SIZE_T_FORMAT "u: %s\n", i, name); if (d->d_type < __arraycount(types)) (void)fprintf(stderr, "Type: %s\n", types[d->d_type]); else (void)fprintf(stderr, "Type: %d\n", d->d_type); (void)fprintf(stderr, "Color: %s\n", d->d_color ? "black" : "red"); (void)fprintf(stderr, "Left child: %d\n", d->d_left_child); (void)fprintf(stderr, "Right child: %d\n", d->d_right_child); (void)fprintf(stderr, "Flags: 0x%x\n", d->d_flags); cdf_timestamp_to_timespec(&ts, d->d_created); (void)fprintf(stderr, "Created %s", cdf_ctime(&ts.tv_sec)); cdf_timestamp_to_timespec(&ts, d->d_modified); (void)fprintf(stderr, "Modified %s", cdf_ctime(&ts.tv_sec)); (void)fprintf(stderr, "Stream %d\n", d->d_stream_first_sector); (void)fprintf(stderr, "Size %d\n", d->d_size); switch (d->d_type) { case CDF_DIR_TYPE_USER_STORAGE: (void)fprintf(stderr, "Storage: %d\n", d->d_storage); break; case CDF_DIR_TYPE_USER_STREAM: if (sst == NULL) break; if (cdf_read_sector_chain(info, h, sat, ssat, sst, d->d_stream_first_sector, d->d_size, &scn) == -1) { warn("Can't read stream for %s at %d len %d", name, d->d_stream_first_sector, d->d_size); break; } cdf_dump_stream(h, &scn); free(scn.sst_tab); break; default: break; } } }
0
413,610
static void cccb(void *u) { esil_anal_stop = true; eprintf ("^C\n"); }
0
512,603
bool Item_equal::fix_fields(THD *thd, Item **ref) { DBUG_ASSERT(fixed == 0); Item_equal_fields_iterator it(*this); Item *item; Field *first_equal_field= NULL; Field *last_equal_field= NULL; Field *prev_equal_field= NULL; not_null_tables_cache= used_tables_cache= 0; const_item_cache= 0; while ((item= it++)) { table_map tmp_table_map; used_tables_cache|= item->used_tables(); tmp_table_map= item->not_null_tables(); not_null_tables_cache|= tmp_table_map; DBUG_ASSERT(!item->with_sum_func() && !item->with_subquery()); if (item->maybe_null) maybe_null= 1; if (!item->get_item_equal()) item->set_item_equal(this); if (link_equal_fields && item->real_item()->type() == FIELD_ITEM) { last_equal_field= ((Item_field *) (item->real_item()))->field; if (!prev_equal_field) first_equal_field= last_equal_field; else prev_equal_field->next_equal_field= last_equal_field; prev_equal_field= last_equal_field; } } if (prev_equal_field && last_equal_field != first_equal_field) last_equal_field->next_equal_field= first_equal_field; if (fix_length_and_dec()) return TRUE; fixed= 1; return FALSE; }
0
273,403
void Compute(OpKernelContext* ctx) override { const Tensor* seq_len_max_tensor = nullptr; OP_REQUIRES_OK(ctx, ctx->input("seq_len_max", &seq_len_max_tensor)); const Tensor* x; OP_REQUIRES_OK(ctx, ctx->input("x", &x)); OP_REQUIRES(ctx, x->dims() == 3, errors::InvalidArgument("x must be 3D")); const int64_t timelen = x->dim_size(0); const int64_t batch_size = x->dim_size(1); const int64_t input_size = x->dim_size(2); const Tensor* cs_prev_tensor = nullptr; OP_REQUIRES_OK(ctx, ctx->input("cs_prev", &cs_prev_tensor)); OP_REQUIRES(ctx, cs_prev_tensor->dims() == 2, errors::InvalidArgument("cs_prev must be 2D")); OP_REQUIRES(ctx, cs_prev_tensor->dim_size(0) == batch_size, errors::InvalidArgument("cs_prev.dims(0) != batch_size: ", cs_prev_tensor->dim_size(0), " vs. ", batch_size)); const int64_t cell_size = cs_prev_tensor->dim_size(1); if (batch_size * input_size % 2 == 1) { LOG(WARNING) << "BlockLSTMOp is inefficient when both batch_size and " << "input_size are odd. You are using: batch_size=" << batch_size << ", input_size=" << input_size; } if (batch_size * cell_size % 2 == 1) { LOG(WARNING) << "BlockLSTMOp is inefficient when both batch_size and " << "cell_size are odd. You are using: batch_size=" << batch_size << ", cell_size=" << cell_size; } const Tensor* h_prev_tensor = nullptr; OP_REQUIRES_OK(ctx, ctx->input("h_prev", &h_prev_tensor)); OP_REQUIRES(ctx, h_prev_tensor->dims() == 2, errors::InvalidArgument("h_prev must be 2D")); OP_REQUIRES(ctx, h_prev_tensor->dim_size(0) == batch_size, errors::InvalidArgument("h_prev.dims(0) != batch_size: ", h_prev_tensor->dim_size(0), " vs. ", batch_size)); OP_REQUIRES(ctx, h_prev_tensor->dim_size(1) == cell_size, errors::InvalidArgument( "h_prev.dims(1) != cell_size: ", h_prev_tensor->dim_size(1), " vs. ", cell_size)); const Tensor* w_tensor = nullptr; OP_REQUIRES_OK(ctx, ctx->input("w", &w_tensor)); OP_REQUIRES(ctx, w_tensor->dims() == 2, errors::InvalidArgument("w must be 2D")); OP_REQUIRES(ctx, w_tensor->dim_size(0) == input_size + cell_size, errors::InvalidArgument( "w.dim_size(0) != input_size + cell_size: ", w_tensor->dim_size(0), " vs. ", input_size + cell_size)); OP_REQUIRES(ctx, w_tensor->dim_size(1) == cell_size * 4, errors::InvalidArgument( "w.dim_size(1) != cell_size * 4: ", w_tensor->dim_size(1), " vs. ", cell_size * 4)); const Tensor* wci_tensor = nullptr; OP_REQUIRES_OK(ctx, ctx->input("wci", &wci_tensor)); OP_REQUIRES(ctx, wci_tensor->dims() == 1, errors::InvalidArgument("wci must be 1D")); OP_REQUIRES(ctx, wci_tensor->dim_size(0) == cell_size, errors::InvalidArgument( "wci.dim_size(0) != cell_size: ", wci_tensor->dim_size(0), " vs. ", cell_size)); const Tensor* wcf_tensor = nullptr; OP_REQUIRES_OK(ctx, ctx->input("wcf", &wcf_tensor)); OP_REQUIRES(ctx, wcf_tensor->dims() == 1, errors::InvalidArgument("wcf must be 1D")); OP_REQUIRES(ctx, wcf_tensor->dim_size(0) == cell_size, errors::InvalidArgument( "wcf.dim_size(0) != cell_size: ", wcf_tensor->dim_size(0), " vs. ", cell_size)); const Tensor* wco_tensor = nullptr; OP_REQUIRES_OK(ctx, ctx->input("wco", &wco_tensor)); OP_REQUIRES(ctx, wco_tensor->dims() == 1, errors::InvalidArgument("wco must be 1D")); OP_REQUIRES(ctx, wco_tensor->dim_size(0) == cell_size, errors::InvalidArgument( "wco.dim_size(0) != cell_size: ", wco_tensor->dim_size(0), " vs. ", cell_size)); const Tensor* b_tensor = nullptr; OP_REQUIRES_OK(ctx, ctx->input("b", &b_tensor)); OP_REQUIRES(ctx, b_tensor->dims() == 1, errors::InvalidArgument("b must be 1D")); OP_REQUIRES(ctx, b_tensor->dim_size(0) == cell_size * 4, errors::InvalidArgument( "b.dim_size(0) != cell_size * 4: ", b_tensor->dim_size(0), " vs. ", cell_size * 4)); TensorShape batch_cell_shape({timelen, batch_size, cell_size}); Tensor* i_out; OP_REQUIRES_OK(ctx, ctx->allocate_output("i", batch_cell_shape, &i_out)); Tensor* cs_out; OP_REQUIRES_OK(ctx, ctx->allocate_output("cs", batch_cell_shape, &cs_out)); Tensor* f_out; OP_REQUIRES_OK(ctx, ctx->allocate_output("f", batch_cell_shape, &f_out)); Tensor* o_out; OP_REQUIRES_OK(ctx, ctx->allocate_output("o", batch_cell_shape, &o_out)); Tensor* ci_out; OP_REQUIRES_OK(ctx, ctx->allocate_output("ci", batch_cell_shape, &ci_out)); Tensor* co_out; OP_REQUIRES_OK(ctx, ctx->allocate_output("co", batch_cell_shape, &co_out)); Tensor* h_out; OP_REQUIRES_OK(ctx, ctx->allocate_output("h", batch_cell_shape, &h_out)); Tensor xh_tensor; OP_REQUIRES_OK(ctx, ctx->allocate_temp( DataTypeToEnum<T>::v(), TensorShape({batch_size, input_size + cell_size}), &xh_tensor)); Tensor gates_tensor; OP_REQUIRES_OK(ctx, ctx->allocate_temp(DataTypeToEnum<T>::v(), TensorShape({batch_size, cell_size * 4}), &gates_tensor)); const Device& device = ctx->eigen_device<Device>(); const int64_t seq_len_max = seq_len_max_tensor->scalar<int64_t>()(); SliceHelper<Device, T> slicer(ctx); for (int64_t t = 0; t < seq_len_max; ++t) { const Tensor x_tensor = slicer.InputSlice(*x, t, "x"); const Tensor& cs_prev_tensor2 = t == 0 ? *cs_prev_tensor : slicer.OutputSlice(cs_out, t - 1, "cs_prev"); const Tensor& h_prev_tensor2 = t == 0 ? *h_prev_tensor : slicer.OutputSlice(h_out, t - 1, "h_prev"); Tensor i_tensor = slicer.OutputSlice(i_out, t, "i_out"); Tensor cs_tensor = slicer.OutputSlice(cs_out, t, "cs_out"); Tensor f_tensor = slicer.OutputSlice(f_out, t, "f_out"); Tensor o_tensor = slicer.OutputSlice(o_out, t, "o_out"); Tensor ci_tensor = slicer.OutputSlice(ci_out, t, "ci_out"); Tensor co_tensor = slicer.OutputSlice(co_out, t, "co_out"); Tensor h_tensor = slicer.OutputSlice(h_out, t, "h_out"); functor::LSTMBlockCellFprop<Device, T, USE_CUBLAS, gate_layout>( batch_size, input_size, cell_size)( ctx, device, forget_bias_, cell_clip_, use_peephole_, x_tensor.matrix<T>(), cs_prev_tensor2.matrix<T>(), h_prev_tensor2.matrix<T>(), w_tensor->matrix<T>(), wci_tensor->vec<T>(), wcf_tensor->vec<T>(), wco_tensor->vec<T>(), b_tensor->vec<T>(), xh_tensor.matrix<T>(), i_tensor.matrix<T>(), cs_tensor.matrix<T>(), f_tensor.matrix<T>(), o_tensor.matrix<T>(), ci_tensor.matrix<T>(), co_tensor.matrix<T>(), gates_tensor.matrix<T>(), h_tensor.matrix<T>()); slicer.FinishTimeStep(); } if (seq_len_max < timelen) { Tensor cs_tensor = cs_out->Slice(seq_len_max, timelen); Tensor h_tensor = h_out->Slice(seq_len_max, timelen); functor::TensorUnalignedZero<Device, T>()(device, cs_tensor.unaligned_flat<T>()); functor::TensorUnalignedZero<Device, T>()(device, h_tensor.unaligned_flat<T>()); } }
0
301,470
badword_captype(char_u *word, char_u *end) { int flags = captype(word, end); int c; int l, u; int first; char_u *p; if (flags & WF_KEEPCAP) { // Count the number of UPPER and lower case letters. l = u = 0; first = FALSE; for (p = word; p < end; MB_PTR_ADV(p)) { c = PTR2CHAR(p); if (SPELL_ISUPPER(c)) { ++u; if (p == word) first = TRUE; } else ++l; } // If there are more UPPER than lower case letters suggest an // ALLCAP word. Otherwise, if the first letter is UPPER then // suggest ONECAP. Exception: "ALl" most likely should be "All", // require three upper case letters. if (u > l && u > 2) flags |= WF_ALLCAP; else if (first) flags |= WF_ONECAP; if (u >= 2 && l >= 2) // maCARONI maCAroni flags |= WF_MIXCAP; } return flags; }
0
512,501
void Item_equal::merge(THD *thd, Item_equal *item) { Item *c= item->get_const(); if (c) item->equal_items.pop(); equal_items.append(&item->equal_items); if (c) { /* The flag cond_false will be set to TRUE after this if the multiple equality already contains a constant and its value is not equal to the value of c. */ add_const(thd, c); } cond_false|= item->cond_false; }
0
222,569
Status FunctionLibraryDefinition::RemoveFunctionHelper(const string& func) { const auto& i = function_defs_.find(func); if (i == function_defs_.end()) { return errors::InvalidArgument("Tried to remove non-existent function '", func, "'."); } function_defs_.erase(i); return Status::OK(); }
0
372,350
int sdb_checkline(char f) { int i; char ff=f>>1; for(i=0;i<7;i++) { if((ff & 1) && (yylineno==sdb_lines[i])) return i+1; ff>>=1; if (ff==0) return 0; } return 0; }
0
231,674
TEST_F( QuicUnencryptedServerTransportTest, IncreaseLimitAfterReceivingNewPacket) { auto qLogger = std::make_shared<FileQLogger>(VantagePoint::Server); server->getNonConstConn().qLogger = qLogger; getFakeHandshakeLayer()->allowZeroRttKeys(); server->getNonConstConn().transportSettings.zeroRttSourceTokenMatchingPolicy = ZeroRttSourceTokenMatchingPolicy::LIMIT_IF_NO_EXACT_MATCH; auto originalUdpSize = server->getConn().udpSendPacketLen; setupClientReadCodec(); recvClientHello(); EXPECT_EQ( *server->getNonConstConn().writableBytesLimit, server->getConn().transportSettings.limitedCwndInMss * originalUdpSize); recvClientHello(); // in tests the udp packet length changes auto expectedLen = server->getConn().transportSettings.limitedCwndInMss * originalUdpSize + server->getConn().transportSettings.limitedCwndInMss * server->getConn().udpSendPacketLen; EXPECT_NE(originalUdpSize, server->getConn().udpSendPacketLen); EXPECT_EQ(*server->getNonConstConn().writableBytesLimit, expectedLen); std::vector<int> indices = getQLogEventIndices(QLogEventType::TransportStateUpdate, qLogger); EXPECT_EQ(indices.size(), 3); std::array<::std::string, 3> updateArray = { kDerivedZeroRttReadCipher, kDerivedOneRttWriteCipher, kTransportReady}; for (int i = 0; i < 3; ++i) { auto tmp = std::move(qLogger->logs[indices[i]]); auto event = dynamic_cast<QLogTransportStateUpdateEvent*>(tmp.get()); EXPECT_EQ(event->update, updateArray[i]); } }
0
369,438
static void io_req_task_queue(struct io_kiocb *req) { req->io_task_work.func = io_req_task_submit; io_req_task_work_add(req, false); }
0
450,319
static void vnc_listen_io(QIONetListener *listener, QIOChannelSocket *cioc, void *opaque) { VncDisplay *vd = opaque; bool isWebsock = listener == vd->wslistener; qio_channel_set_name(QIO_CHANNEL(cioc), isWebsock ? "vnc-ws-server" : "vnc-server"); qio_channel_set_delay(QIO_CHANNEL(cioc), false); vnc_connect(vd, cioc, false, isWebsock); }
0
364,773
free_tag_stuff(void) { ga_clear_strings(&tag_fnames); if (curwin != NULL) do_tag(NULL, DT_FREE, 0, 0, 0); tag_freematch(); # if defined(FEAT_QUICKFIX) tagstack_clear_entry(&ptag_entry); # endif }
0
274,768
LCN ntfs_attr_vcn_to_lcn(ntfs_attr *na, const VCN vcn) { LCN lcn; BOOL is_retry = FALSE; if (!na || !NAttrNonResident(na) || vcn < 0) return (LCN)LCN_EINVAL; ntfs_log_trace("Entering for inode 0x%llx, attr 0x%x.\n", (unsigned long long)na->ni->mft_no, le32_to_cpu(na->type)); retry: /* Convert vcn to lcn. If that fails map the runlist and retry once. */ lcn = ntfs_rl_vcn_to_lcn(na->rl, vcn); if (lcn >= 0) return lcn; if (!is_retry && !ntfs_attr_map_runlist(na, vcn)) { is_retry = TRUE; goto retry; } /* * If the attempt to map the runlist failed, or we are getting * LCN_RL_NOT_MAPPED despite having mapped the attribute extent * successfully, something is really badly wrong... */ if (!is_retry || lcn == (LCN)LCN_RL_NOT_MAPPED) return (LCN)LCN_EIO; /* lcn contains the appropriate error code. */ return lcn; }
0
473,978
utf32le_is_mbc_newline(const UChar* p, const UChar* end, OnigEncoding enc ARG_UNUSED) { if (p + 3 < end) { if (*p == 0x0a && *(p+1) == 0 && *(p+2) == 0 && *(p+3) == 0) return 1; #ifdef USE_UNICODE_ALL_LINE_TERMINATORS if (( #ifndef USE_CRNL_AS_LINE_TERMINATOR *p == 0x0d || #endif *p == 0x85) && *(p+1) == 0x00 && (p+2) == 0x00 && *(p+3) == 0x00) return 1; if (*(p+1) == 0x20 && (*p == 0x29 || *p == 0x28) && *(p+2) == 0x00 && *(p+3) == 0x00) return 1; #endif } return 0; }
0
195,293
gen_hash(codegen_scope *s, node *tree, int val, int limit) { int slimit = GEN_VAL_STACK_MAX; if (cursp() >= GEN_LIT_ARY_MAX) slimit = INT16_MAX; int len = 0; mrb_bool update = FALSE; while (tree) { if (nint(tree->car->car->car) == NODE_KW_REST_ARGS) { if (len > 0) { pop_n(len*2); if (!update) { genop_2(s, OP_HASH, cursp(), len); } else { pop(); genop_2(s, OP_HASHADD, cursp(), len); } push(); } codegen(s, tree->car->cdr, val); if (len > 0 || update) { pop(); pop(); genop_1(s, OP_HASHCAT, cursp()); push(); } update = TRUE; len = 0; } else { codegen(s, tree->car->car, val); codegen(s, tree->car->cdr, val); len++; } tree = tree->cdr; if (val && cursp() >= slimit) { pop_n(len*2); if (!update) { genop_2(s, OP_HASH, cursp(), len); } else { pop(); genop_2(s, OP_HASHADD, cursp(), len); } push(); update = TRUE; len = 0; } } if (update) { if (val && len > 0) { pop_n(len*2+1); genop_2(s, OP_HASHADD, cursp(), len); push(); } return -1; /* variable length */ } return len; }
1
195,216
Status BuildInputArgIndex(const OpDef::ArgDef& arg_def, AttrSlice attr_values, const FunctionDef::ArgAttrs* arg_attrs, bool ints_on_device, int64_t resource_arg_unique_id) { bool is_type_list; DataTypeVector dtypes; TF_RETURN_IF_ERROR( ArgNumType(attr_values, arg_def, &is_type_list, &dtypes)); CHECK_GE(dtypes.size(), size_t{1}); int arg_index = result_.nodes.size(); TF_RETURN_IF_ERROR( AddItem(arg_def.name(), {true, arg_index, 0, is_type_list, dtypes})); // Creates dtypes.size() nodes in the graph. for (size_t i = 0; i < dtypes.size(); ++i) { TF_RETURN_IF_ERROR(AddItem(strings::StrCat(arg_def.name(), ":", i), {true, arg_index, 0, false, {dtypes[i]}})); DCHECK_EQ(arg_index, result_.nodes.size()); string name = arg_def.name(); if (dtypes.size() > 1) { strings::StrAppend(&name, "_", i); } NodeDef* gnode = AddNode(name); if (ints_on_device && dtypes[i] == DataType::DT_INT32) { gnode->set_op(FunctionLibraryDefinition::kDeviceArgOp); } else { gnode->set_op(FunctionLibraryDefinition::kArgOp); } DataType dtype = arg_def.is_ref() ? MakeRefType(dtypes[i]) : dtypes[i]; AddAttr("T", dtype, gnode); AddAttr("index", arg_index, gnode); if (resource_arg_unique_id >= 0) { AddAttr("_resource_arg_unique_id", resource_arg_unique_id, gnode); } if (arg_attrs) { for (const auto& arg_attr : arg_attrs->attr()) { AddAttr(arg_attr.first, arg_attr.second, gnode->mutable_attr()); } } result_.arg_types.push_back(dtypes[i]); ++arg_index; } return Status::OK(); }
1
344,762
opt_dequote(const char **sp, const char **errstrp) { const char *s = *sp; char *ret; size_t i; *errstrp = NULL; if (*s != '"') { *errstrp = "missing start quote"; return NULL; } s++; if ((ret = malloc(strlen((s)) + 1)) == NULL) { *errstrp = "memory allocation failed"; return NULL; } for (i = 0; *s != '\0' && *s != '"';) { if (s[0] == '\\' && s[1] == '"') s++; ret[i++] = *s++; } if (*s == '\0') { *errstrp = "missing end quote"; free(ret); return NULL; } ret[i] = '\0'; s++; *sp = s; return ret; }
0
246,716
static void PrintSplitUsage() { u32 i=0; gf_sys_format_help(helpout, help_flags, " \n" "# File splitting\n" "MP4Box can split input files by size, duration or extract a given part of the file to new IsoMedia file(s).\n" "This requires that at most one track in the input file has non random-access points (typically one video track at most).\n" "Splitting will ignore all MPEG-4 Systems tracks and hint tracks, but will try to split private media tracks.\n" "The input file must have enough random access points in order to be split. If this is not the case, you will have to re-encode the content.\n" "You can add media to a file and split it in the same pass. In this case, the destination file (the one which would be obtained without splitting) will not be stored.\n" " \n" "Time ranges are specified as follows:\n" "- `S-E`: `S` start and `E` end times, formatted as `HH:MM:SS.ms`, `MM:SS.ms` or time in seconds (int, double, fraction)\n" "- `S:E`: `S` start time and `E` end times in seconds (int, double, fraction)\n" "- `S:end` or `S:end-N`: `S` start time in seconds (int, double), `N` number of seconds (int, double) before the end\n" " \n" "MP4Box splitting runs a filter session using the `reframer` filter as follows:\n" "- `splitrange` option of the reframer is always set\n" "- source is demuxed with `alltk` option set\n" "- start and end ranges are passed to `xs` and `xe` options of the reframer\n" "- for `-splitz`, options `xadjust` and `xround=after` are enforced\n" "- for `-splitg`, options `xadjust` and `xround=before` are enforced\n" "- for `-splitf`, option `xround=seek` is enforced and `propbe_ref`set if not specified at prompt\n" "- for `-splitx`, option `xround=closest` and `propbe_ref` are enforced if not specified at prompt\n" " \n" "The output file(s) storage mode can be specified using -flat, -newfs, -inter and -frag\n" " \n" ); i=0; while (m4b_split_args[i].name) { GF_GPACArg *arg = (GF_GPACArg *) &m4b_split_args[i]; i++; gf_sys_print_arg(helpout, help_flags, arg, "mp4box-split"); } }
0
294,701
ns_to_sec(VALUE n) { if (FIXNUM_P(n)) return rb_rational_new2(n, INT2FIX(SECOND_IN_NANOSECONDS)); return f_quo(n, INT2FIX(SECOND_IN_NANOSECONDS)); }
0
263,314
char *_q_makeword(char *str, char stop) { char *word; int len, i; for (len = 0; ((str[len] != stop) && (str[len])); len++); word = (char *)malloc(sizeof(char) * (len + 1)); for (i = 0; i < len; i++) word[i] = str[i]; word[i] = '\0'; if (str[len])len++; for (i = len; str[i]; i++) str[i - len] = str[i]; str[i - len] = '\0'; return word; }
0
244,249
void lsr1_box_del(GF_Box *s) { GF_LASeRSampleEntryBox *ptr = (GF_LASeRSampleEntryBox *)s; if (ptr == NULL) return; gf_isom_sample_entry_predestroy((GF_SampleEntryBox *)s); if (ptr->slc) gf_odf_desc_del((GF_Descriptor *)ptr->slc); gf_free(ptr); }
0
336,676
SPICE_GNUC_VISIBLE int spice_server_set_ticket(SpiceServer *reds, const char *passwd, int lifetime, int fail_if_connected, int disconnect_if_connected) { if (reds_main_channel_connected(reds)) { if (fail_if_connected) { return -1; } if (disconnect_if_connected) { reds_disconnect(reds); } } on_activating_ticketing(reds); reds->config->ticketing_enabled = TRUE; if (lifetime == 0) { reds->config->taTicket.expiration_time = INT_MAX; } else { time_t now = spice_get_monotonic_time_ns() / NSEC_PER_SEC; reds->config->taTicket.expiration_time = now + lifetime; } if (passwd != NULL) { if (strlen(passwd) > SPICE_MAX_PASSWORD_LENGTH) return -1; g_strlcpy(reds->config->taTicket.password, passwd, sizeof(reds->config->taTicket.password)); } else { memset(reds->config->taTicket.password, 0, sizeof(reds->config->taTicket.password)); reds->config->taTicket.expiration_time = 0; } return 0; }
0
261,393
static enum InterPredIdc decode_inter_pred_idc(thread_context* tctx, int x0, int y0, int nPbW, int nPbH, int ctDepth) { logtrace(LogSlice,"# inter_pred_idc\n"); int value; context_model* model = &tctx->ctx_model[CONTEXT_MODEL_INTER_PRED_IDC]; if (nPbW+nPbH==12) { value = decode_CABAC_bit(&tctx->cabac_decoder, &model[4]); } else { int bit0 = decode_CABAC_bit(&tctx->cabac_decoder, &model[ctDepth]); if (bit0==0) { value = decode_CABAC_bit(&tctx->cabac_decoder, &model[4]); } else { value = 2; } } logtrace(LogSlice,"> inter_pred_idc = %d (%s)\n",value, value==0 ? "L0" : (value==1 ? "L1" : "BI")); logtrace(LogSymbols,"$1 decode_inter_pred_idx=%d\n",value+1); return (enum InterPredIdc) (value+1); }
0
466,110
static int task_switch_16(struct x86_emulate_ctxt *ctxt, u16 tss_selector, u16 old_tss_sel, ulong old_tss_base, struct desc_struct *new_desc) { struct x86_emulate_ops *ops = ctxt->ops; struct tss_segment_16 tss_seg; int ret; u32 new_tss_base = get_desc_base(new_desc); ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg, &ctxt->exception); if (ret != X86EMUL_CONTINUE) /* FIXME: need to provide precise fault address */ return ret; save_state_to_tss16(ctxt, &tss_seg); ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg, &ctxt->exception); if (ret != X86EMUL_CONTINUE) /* FIXME: need to provide precise fault address */ return ret; ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg, &ctxt->exception); if (ret != X86EMUL_CONTINUE) /* FIXME: need to provide precise fault address */ return ret; if (old_tss_sel != 0xffff) { tss_seg.prev_task_link = old_tss_sel; ret = ops->write_std(ctxt, new_tss_base, &tss_seg.prev_task_link, sizeof tss_seg.prev_task_link, &ctxt->exception); if (ret != X86EMUL_CONTINUE) /* FIXME: need to provide precise fault address */ return ret; } return load_state_from_tss16(ctxt, &tss_seg); }
0
226,208
void nump_box_del(GF_Box *s) { gf_free((GF_NUMPBox *)s); }
0
513,341
mysql_select(THD *thd, TABLE_LIST *tables, uint wild_num, List<Item> &fields, COND *conds, uint og_num, ORDER *order, ORDER *group, Item *having, ORDER *proc_param, ulonglong select_options, select_result *result, SELECT_LEX_UNIT *unit, SELECT_LEX *select_lex) { int err= 0; bool free_join= 1; DBUG_ENTER("mysql_select"); select_lex->context.resolve_in_select_list= TRUE; JOIN *join; if (select_lex->join != 0) { join= select_lex->join; /* is it single SELECT in derived table, called in derived table creation */ if (select_lex->linkage != DERIVED_TABLE_TYPE || (select_options & SELECT_DESCRIBE)) { if (select_lex->linkage != GLOBAL_OPTIONS_TYPE) { /* Original join tabs might be overwritten at first subselect execution. So we need to restore them. */ Item_subselect *subselect= select_lex->master_unit()->item; if (subselect && subselect->is_uncacheable() && join->reinit()) DBUG_RETURN(TRUE); } else { if ((err= join->prepare( tables, wild_num, conds, og_num, order, false, group, having, proc_param, select_lex, unit))) { goto err; } } } free_join= 0; join->select_options= select_options; } else { if (thd->lex->describe) select_options|= SELECT_DESCRIBE; /* When in EXPLAIN, delay deleting the joins so that they are still available when we're producing EXPLAIN EXTENDED warning text. */ if (select_options & SELECT_DESCRIBE) free_join= 0; if (!(join= new (thd->mem_root) JOIN(thd, fields, select_options, result))) DBUG_RETURN(TRUE); THD_STAGE_INFO(thd, stage_init); thd->lex->used_tables=0; if ((err= join->prepare(tables, wild_num, conds, og_num, order, false, group, having, proc_param, select_lex, unit))) { goto err; } } if ((err= join->optimize())) { goto err; // 1 } if (thd->lex->describe & DESCRIBE_EXTENDED) { join->conds_history= join->conds; join->having_history= (join->having?join->having:join->tmp_having); } if (thd->is_error()) goto err; join->exec(); if (thd->lex->describe & DESCRIBE_EXTENDED) { select_lex->where= join->conds_history; select_lex->having= join->having_history; } err: if (free_join) { THD_STAGE_INFO(thd, stage_end); err|= select_lex->cleanup(); DBUG_RETURN(err || thd->is_error()); } DBUG_RETURN(join->error ? join->error: err); }
0
211,126
static MOBI_RET mobi_parse_index_entry(MOBIIndx *indx, const MOBIIdxt idxt, const MOBITagx *tagx, const MOBIOrdt *ordt, MOBIBuffer *buf, const size_t curr_number) { if (indx == NULL) { debug_print("%s", "INDX structure not initialized\n"); return MOBI_INIT_FAILED; } const size_t entry_offset = indx->entries_count; const size_t entry_length = idxt.offsets[curr_number + 1] - idxt.offsets[curr_number]; mobi_buffer_setpos(buf, idxt.offsets[curr_number]); size_t entry_number = curr_number + entry_offset; if (entry_number >= indx->total_entries_count) { debug_print("Entry number beyond array: %zu\n", entry_number); return MOBI_DATA_CORRUPT; } /* save original record maxlen */ const size_t buf_maxlen = buf->maxlen; if (buf->offset + entry_length >= buf_maxlen) { debug_print("Entry length too long: %zu\n", entry_length); return MOBI_DATA_CORRUPT; } buf->maxlen = buf->offset + entry_length; size_t label_length = mobi_buffer_get8(buf); if (label_length > entry_length) { debug_print("Label length too long: %zu\n", label_length); return MOBI_DATA_CORRUPT; } char text[INDX_LABEL_SIZEMAX]; /* FIXME: what is ORDT1 for? */ if (ordt->ordt2) { label_length = mobi_getstring_ordt(ordt, buf, (unsigned char*) text, label_length); } else { label_length = mobi_indx_get_label((unsigned char*) text, buf, label_length, indx->ligt_entries_count); } indx->entries[entry_number].label = malloc(label_length + 1); if (indx->entries[entry_number].label == NULL) { debug_print("Memory allocation failed (%zu bytes)\n", label_length); return MOBI_MALLOC_FAILED; } strncpy(indx->entries[entry_number].label, text, label_length + 1); //debug_print("tag label[%zu]: %s\n", entry_number, indx->entries[entry_number].label); unsigned char *control_bytes; control_bytes = buf->data + buf->offset; mobi_buffer_seek(buf, (int) tagx->control_byte_count); indx->entries[entry_number].tags_count = 0; indx->entries[entry_number].tags = NULL; if (tagx->tags_count > 0) { typedef struct { uint8_t tag; uint8_t tag_value_count; uint32_t value_count; uint32_t value_bytes; } MOBIPtagx; MOBIPtagx *ptagx = malloc(tagx->tags_count * sizeof(MOBIPtagx)); if (ptagx == NULL) { debug_print("Memory allocation failed (%zu bytes)\n", tagx->tags_count * sizeof(MOBIPtagx)); return MOBI_MALLOC_FAILED; } uint32_t ptagx_count = 0; size_t len; size_t i = 0; while (i < tagx->tags_count) { if (tagx->tags[i].control_byte == 1) { control_bytes++; i++; continue; } uint32_t value = control_bytes[0] & tagx->tags[i].bitmask; if (value != 0) { /* FIXME: is it safe to use MOBI_NOTSET? */ uint32_t value_count = MOBI_NOTSET; uint32_t value_bytes = MOBI_NOTSET; /* all bits of masked value are set */ if (value == tagx->tags[i].bitmask) { /* more than 1 bit set */ if (mobi_bitcount(tagx->tags[i].bitmask) > 1) { /* read value bytes from entry */ len = 0; value_bytes = mobi_buffer_get_varlen(buf, &len); } else { value_count = 1; } } else { uint8_t mask = tagx->tags[i].bitmask; while ((mask & 1) == 0) { mask >>= 1; value >>= 1; } value_count = value; } ptagx[ptagx_count].tag = tagx->tags[i].tag; ptagx[ptagx_count].tag_value_count = tagx->tags[i].values_count; ptagx[ptagx_count].value_count = value_count; ptagx[ptagx_count].value_bytes = value_bytes; ptagx_count++; } i++; } indx->entries[entry_number].tags = malloc(tagx->tags_count * sizeof(MOBIIndexTag)); if (indx->entries[entry_number].tags == NULL) { debug_print("Memory allocation failed (%zu bytes)\n", tagx->tags_count * sizeof(MOBIIndexTag)); free(ptagx); return MOBI_MALLOC_FAILED; } i = 0; while (i < ptagx_count) { uint32_t tagvalues_count = 0; /* FIXME: is it safe to use MOBI_NOTSET? */ /* value count is set */ uint32_t tagvalues[INDX_TAGVALUES_MAX]; if (ptagx[i].value_count != MOBI_NOTSET) { size_t count = ptagx[i].value_count * ptagx[i].tag_value_count; while (count-- && tagvalues_count < INDX_TAGVALUES_MAX) { len = 0; const uint32_t value_bytes = mobi_buffer_get_varlen(buf, &len); tagvalues[tagvalues_count++] = value_bytes; } /* value count is not set */ } else { /* read value_bytes bytes */ len = 0; while (len < ptagx[i].value_bytes && tagvalues_count < INDX_TAGVALUES_MAX) { const uint32_t value_bytes = mobi_buffer_get_varlen(buf, &len); tagvalues[tagvalues_count++] = value_bytes; } } if (tagvalues_count) { const size_t arr_size = tagvalues_count * sizeof(*indx->entries[entry_number].tags[i].tagvalues); indx->entries[entry_number].tags[i].tagvalues = malloc(arr_size); if (indx->entries[entry_number].tags[i].tagvalues == NULL) { debug_print("Memory allocation failed (%zu bytes)\n", arr_size); free(ptagx); return MOBI_MALLOC_FAILED; } memcpy(indx->entries[entry_number].tags[i].tagvalues, tagvalues, arr_size); } else { indx->entries[entry_number].tags[i].tagvalues = NULL; } indx->entries[entry_number].tags[i].tagid = ptagx[i].tag; indx->entries[entry_number].tags[i].tagvalues_count = tagvalues_count; indx->entries[entry_number].tags_count++; i++; } free(ptagx); } /* restore buffer maxlen */ buf->maxlen = buf_maxlen; return MOBI_SUCCESS; }
1
225,607
GF_Box *stdp_box_new() { ISOM_DECL_BOX_ALLOC(GF_DegradationPriorityBox, GF_ISOM_BOX_TYPE_STDP); return (GF_Box *)tmp; }
0
463,476
static int ax25_release(struct socket *sock) { struct sock *sk = sock->sk; ax25_cb *ax25; ax25_dev *ax25_dev; if (sk == NULL) return 0; sock_hold(sk); lock_sock(sk); sock_orphan(sk); ax25 = sk_to_ax25(sk); ax25_dev = ax25->ax25_dev; if (sk->sk_type == SOCK_SEQPACKET) { switch (ax25->state) { case AX25_STATE_0: release_sock(sk); ax25_disconnect(ax25, 0); lock_sock(sk); ax25_destroy_socket(ax25); break; case AX25_STATE_1: case AX25_STATE_2: ax25_send_control(ax25, AX25_DISC, AX25_POLLON, AX25_COMMAND); release_sock(sk); ax25_disconnect(ax25, 0); lock_sock(sk); if (!sock_flag(ax25->sk, SOCK_DESTROY)) ax25_destroy_socket(ax25); break; case AX25_STATE_3: case AX25_STATE_4: ax25_clear_queues(ax25); ax25->n2count = 0; switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) { case AX25_PROTO_STD_SIMPLEX: case AX25_PROTO_STD_DUPLEX: ax25_send_control(ax25, AX25_DISC, AX25_POLLON, AX25_COMMAND); ax25_stop_t2timer(ax25); ax25_stop_t3timer(ax25); ax25_stop_idletimer(ax25); break; #ifdef CONFIG_AX25_DAMA_SLAVE case AX25_PROTO_DAMA_SLAVE: ax25_stop_t3timer(ax25); ax25_stop_idletimer(ax25); break; #endif } ax25_calculate_t1(ax25); ax25_start_t1timer(ax25); ax25->state = AX25_STATE_2; sk->sk_state = TCP_CLOSE; sk->sk_shutdown |= SEND_SHUTDOWN; sk->sk_state_change(sk); sock_set_flag(sk, SOCK_DESTROY); break; default: break; } } else { sk->sk_state = TCP_CLOSE; sk->sk_shutdown |= SEND_SHUTDOWN; sk->sk_state_change(sk); ax25_destroy_socket(ax25); } if (ax25_dev) { del_timer_sync(&ax25->timer); del_timer_sync(&ax25->t1timer); del_timer_sync(&ax25->t2timer); del_timer_sync(&ax25->t3timer); del_timer_sync(&ax25->idletimer); dev_put_track(ax25_dev->dev, &ax25_dev->dev_tracker); ax25_dev_put(ax25_dev); } sock->sk = NULL; release_sock(sk); sock_put(sk); return 0; }
0
210,223
void vrend_renderer_blit(struct vrend_context *ctx, uint32_t dst_handle, uint32_t src_handle, const struct pipe_blit_info *info) { struct vrend_resource *src_res, *dst_res; src_res = vrend_renderer_ctx_res_lookup(ctx, src_handle); dst_res = vrend_renderer_ctx_res_lookup(ctx, dst_handle); if (!src_res) { report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, src_handle); return; } if (!dst_res) { report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, dst_handle); return; } if (ctx->in_error) return; if (info->render_condition_enable == false) vrend_pause_render_condition(ctx, true); VREND_DEBUG(dbg_blit, ctx, "BLIT: rc:%d scissor:%d filter:%d alpha:%d mask:0x%x\n" " From %s(%s) ms:%d [%d, %d, %d]+[%d, %d, %d] lvl:%d\n" " To %s(%s) ms:%d [%d, %d, %d]+[%d, %d, %d] lvl:%d\n", info->render_condition_enable, info->scissor_enable, info->filter, info->alpha_blend, info->mask, util_format_name(src_res->base.format), util_format_name(info->src.format), src_res->base.nr_samples, info->src.box.x, info->src.box.y, info->src.box.z, info->src.box.width, info->src.box.height, info->src.box.depth, info->src.level, util_format_name(dst_res->base.format), util_format_name(info->dst.format), dst_res->base.nr_samples, info->dst.box.x, info->dst.box.y, info->dst.box.z, info->dst.box.width, info->dst.box.height, info->dst.box.depth, info->dst.level); /* The Gallium blit function can be called for a general blit that may * scale, convert the data, and apply some rander states, or it is called via * glCopyImageSubData. If the src or the dst image are equal, or the two * images formats are the same, then Galliums such calles are redirected * to resource_copy_region, in this case and if no render states etx need * to be applied, forward the call to glCopyImageSubData, otherwise do a * normal blit. */ if (has_feature(feat_copy_image) && (!info->render_condition_enable || !ctx->sub->cond_render_gl_mode) && format_is_copy_compatible(info->src.format,info->dst.format, false) && !info->scissor_enable && (info->filter == PIPE_TEX_FILTER_NEAREST) && !info->alpha_blend && (info->mask == PIPE_MASK_RGBA) && src_res->base.nr_samples == dst_res->base.nr_samples && info->src.box.width == info->dst.box.width && info->src.box.height == info->dst.box.height && info->src.box.depth == info->dst.box.depth) { VREND_DEBUG(dbg_blit, ctx, " Use glCopyImageSubData\n"); vrend_copy_sub_image(src_res, dst_res, info->src.level, &info->src.box, info->dst.level, info->dst.box.x, info->dst.box.y, info->dst.box.z); } else { VREND_DEBUG(dbg_blit, ctx, " Use blit_int\n"); vrend_renderer_blit_int(ctx, src_res, dst_res, info); } if (info->render_condition_enable == false) vrend_pause_render_condition(ctx, false); }
1
508,902
int my_wc_mb_utf8_escape_single_quote(CHARSET_INFO *cs, my_wc_t wc, uchar *str, uchar *end) { return my_wc_mb_utf8_escape(cs, wc, str, end, '\'', 0); }
0
384,803
unix_expandpath( garray_T *gap, char_u *path, int wildoff, int flags, // EW_* flags int didstar) // expanded "**" once already { char_u *buf; char_u *path_end; char_u *p, *s, *e; int start_len = gap->ga_len; char_u *pat; regmatch_T regmatch; int starts_with_dot; int matches; int len; int starstar = FALSE; static int stardepth = 0; // depth for "**" expansion DIR *dirp; struct dirent *dp; // Expanding "**" may take a long time, check for CTRL-C. if (stardepth > 0) { ui_breakcheck(); if (got_int) return 0; } // make room for file name buf = alloc(STRLEN(path) + BASENAMELEN + 5); if (buf == NULL) return 0; /* * Find the first part in the path name that contains a wildcard. * When EW_ICASE is set every letter is considered to be a wildcard. * Copy it into "buf", including the preceding characters. */ p = buf; s = buf; e = NULL; path_end = path; while (*path_end != NUL) { // May ignore a wildcard that has a backslash before it; it will // be removed by rem_backslash() or file_pat_to_reg_pat() below. if (path_end >= path + wildoff && rem_backslash(path_end)) *p++ = *path_end++; else if (*path_end == '/') { if (e != NULL) break; s = p + 1; } else if (path_end >= path + wildoff && (vim_strchr((char_u *)"*?[{~$", *path_end) != NULL || (!p_fic && (flags & EW_ICASE) && vim_isalpha(PTR2CHAR(path_end))))) e = p; if (has_mbyte) { len = (*mb_ptr2len)(path_end); STRNCPY(p, path_end, len); p += len; path_end += len; } else *p++ = *path_end++; } e = p; *e = NUL; // Now we have one wildcard component between "s" and "e". // Remove backslashes between "wildoff" and the start of the wildcard // component. for (p = buf + wildoff; p < s; ++p) if (rem_backslash(p)) { STRMOVE(p, p + 1); --e; --s; } // Check for "**" between "s" and "e". for (p = s; p < e; ++p) if (p[0] == '*' && p[1] == '*') starstar = TRUE; // convert the file pattern to a regexp pattern starts_with_dot = *s == '.'; pat = file_pat_to_reg_pat(s, e, NULL, FALSE); if (pat == NULL) { vim_free(buf); return 0; } // compile the regexp into a program if (flags & EW_ICASE) regmatch.rm_ic = TRUE; // 'wildignorecase' set else regmatch.rm_ic = p_fic; // ignore case when 'fileignorecase' is set if (flags & (EW_NOERROR | EW_NOTWILD)) ++emsg_silent; regmatch.regprog = vim_regcomp(pat, RE_MAGIC); if (flags & (EW_NOERROR | EW_NOTWILD)) --emsg_silent; vim_free(pat); if (regmatch.regprog == NULL && (flags & EW_NOTWILD) == 0) { vim_free(buf); return 0; } // If "**" is by itself, this is the first time we encounter it and more // is following then find matches without any directory. if (!didstar && stardepth < 100 && starstar && e - s == 2 && *path_end == '/') { STRCPY(s, path_end + 1); ++stardepth; (void)unix_expandpath(gap, buf, (int)(s - buf), flags, TRUE); --stardepth; } // open the directory for scanning *s = NUL; dirp = opendir(*buf == NUL ? "." : (char *)buf); // Find all matching entries if (dirp != NULL) { for (;;) { dp = readdir(dirp); if (dp == NULL) break; if ((dp->d_name[0] != '.' || starts_with_dot || ((flags & EW_DODOT) && dp->d_name[1] != NUL && (dp->d_name[1] != '.' || dp->d_name[2] != NUL))) && ((regmatch.regprog != NULL && vim_regexec(&regmatch, (char_u *)dp->d_name, (colnr_T)0)) || ((flags & EW_NOTWILD) && fnamencmp(path + (s - buf), dp->d_name, e - s) == 0))) { STRCPY(s, dp->d_name); len = STRLEN(buf); if (starstar && stardepth < 100) { // For "**" in the pattern first go deeper in the tree to // find matches. STRCPY(buf + len, "/**"); STRCPY(buf + len + 3, path_end); ++stardepth; (void)unix_expandpath(gap, buf, len + 1, flags, TRUE); --stardepth; } STRCPY(buf + len, path_end); if (mch_has_exp_wildcard(path_end)) // handle more wildcards { // need to expand another component of the path // remove backslashes for the remaining components only (void)unix_expandpath(gap, buf, len + 1, flags, FALSE); } else { stat_T sb; // no more wildcards, check if there is a match // remove backslashes for the remaining components only if (*path_end != NUL) backslash_halve(buf + len + 1); // add existing file or symbolic link if ((flags & EW_ALLLINKS) ? mch_lstat((char *)buf, &sb) >= 0 : mch_getperm(buf) >= 0) { #ifdef MACOS_CONVERT size_t precomp_len = STRLEN(buf)+1; char_u *precomp_buf = mac_precompose_path(buf, precomp_len, &precomp_len); if (precomp_buf) { mch_memmove(buf, precomp_buf, precomp_len); vim_free(precomp_buf); } #endif addfile(gap, buf, flags); } } } } closedir(dirp); } vim_free(buf); vim_regfree(regmatch.regprog); matches = gap->ga_len - start_len; if (matches > 0) qsort(((char_u **)gap->ga_data) + start_len, matches, sizeof(char_u *), pstrcmp); return matches; }
0
212,857
qf_fill_buffer(qf_list_T *qfl, buf_T *buf, qfline_T *old_last, int qf_winid) { linenr_T lnum; qfline_T *qfp; int old_KeyTyped = KeyTyped; list_T *qftf_list = NULL; listitem_T *qftf_li = NULL; if (old_last == NULL) { if (buf != curbuf) { internal_error("qf_fill_buffer()"); return; } // delete all existing lines while ((curbuf->b_ml.ml_flags & ML_EMPTY) == 0) (void)ml_delete((linenr_T)1); } // Check if there is anything to display if (qfl != NULL) { char_u dirname[MAXPATHL]; int invalid_val = FALSE; int prev_bufnr = -1; *dirname = NUL; // Add one line for each error if (old_last == NULL) { qfp = qfl->qf_start; lnum = 0; } else { if (old_last->qf_next != NULL) qfp = old_last->qf_next; else qfp = old_last; lnum = buf->b_ml.ml_line_count; } qftf_list = call_qftf_func(qfl, qf_winid, (long)(lnum + 1), (long)qfl->qf_count); if (qftf_list != NULL) qftf_li = qftf_list->lv_first; while (lnum < qfl->qf_count) { char_u *qftf_str = NULL; // Use the text supplied by the user defined function (if any). // If the returned value is not string, then ignore the rest // of the returned values and use the default. if (qftf_li != NULL && !invalid_val) { qftf_str = tv_get_string_chk(&qftf_li->li_tv); if (qftf_str == NULL) invalid_val = TRUE; } if (qf_buf_add_line(buf, lnum, qfp, dirname, prev_bufnr != qfp->qf_fnum, qftf_str) == FAIL) break; prev_bufnr = qfp->qf_fnum; ++lnum; qfp = qfp->qf_next; if (qfp == NULL) break; if (qftf_li != NULL) qftf_li = qftf_li->li_next; } if (old_last == NULL) // Delete the empty line which is now at the end (void)ml_delete(lnum + 1); } // correct cursor position check_lnums(TRUE); if (old_last == NULL) { // Set the 'filetype' to "qf" each time after filling the buffer. // This resembles reading a file into a buffer, it's more logical when // using autocommands. ++curbuf_lock; set_option_value_give_err((char_u *)"ft", 0L, (char_u *)"qf", OPT_LOCAL); curbuf->b_p_ma = FALSE; keep_filetype = TRUE; // don't detect 'filetype' apply_autocmds(EVENT_BUFREADPOST, (char_u *)"quickfix", NULL, FALSE, curbuf); apply_autocmds(EVENT_BUFWINENTER, (char_u *)"quickfix", NULL, FALSE, curbuf); keep_filetype = FALSE; --curbuf_lock; // make sure it will be redrawn redraw_curbuf_later(UPD_NOT_VALID); } // Restore KeyTyped, setting 'filetype' may reset it. KeyTyped = old_KeyTyped; }
1
438,669
static void virtio_rpmsg_release_device(struct device *dev) { struct rpmsg_device *rpdev = to_rpmsg_device(dev); struct virtio_rpmsg_channel *vch = to_virtio_rpmsg_channel(rpdev); kfree(vch); }
0
473,915
add_code_range_to_buf0(BBuf** pbuf, ScanEnv* env, OnigCodePoint from, OnigCodePoint to, int checkdup) { int r, inc_n, pos; int low, high, bound, x; OnigCodePoint n, *data; BBuf* bbuf; if (from > to) { n = from; from = to; to = n; } if (IS_NULL(*pbuf)) { r = new_code_range(pbuf); if (r) return r; bbuf = *pbuf; n = 0; } else { bbuf = *pbuf; GET_CODE_POINT(n, bbuf->p); } data = (OnigCodePoint* )(bbuf->p); data++; for (low = 0, bound = n; low < bound; ) { x = (low + bound) >> 1; if (from > data[x*2 + 1]) low = x + 1; else bound = x; } for (high = low, bound = n; high < bound; ) { x = (high + bound) >> 1; if (to >= data[x*2] - 1) high = x + 1; else bound = x; } inc_n = low + 1 - high; if (n + inc_n > ONIG_MAX_MULTI_BYTE_RANGES_NUM) return ONIGERR_TOO_MANY_MULTI_BYTE_RANGES; if (inc_n != 1) { if (checkdup && to >= data[low*2]) CC_DUP_WARN(env); if (from > data[low*2]) from = data[low*2]; if (to < data[(high - 1)*2 + 1]) to = data[(high - 1)*2 + 1]; } if (inc_n != 0 && (OnigCodePoint )high < n) { int from_pos = SIZE_CODE_POINT * (1 + high * 2); int to_pos = SIZE_CODE_POINT * (1 + (low + 1) * 2); int size = (n - high) * 2 * SIZE_CODE_POINT; if (inc_n > 0) { BBUF_MOVE_RIGHT(bbuf, from_pos, to_pos, size); } else { BBUF_MOVE_LEFT_REDUCE(bbuf, from_pos, to_pos); } } pos = SIZE_CODE_POINT * (1 + low * 2); BBUF_ENSURE_SIZE(bbuf, pos + SIZE_CODE_POINT * 2); BBUF_WRITE_CODE_POINT(bbuf, pos, from); BBUF_WRITE_CODE_POINT(bbuf, pos + SIZE_CODE_POINT, to); n += inc_n; BBUF_WRITE_CODE_POINT(bbuf, 0, n); return 0; }
0