idx
int64
func
string
target
int64
309,821
drv_kpad(TERMINAL_CONTROL_BLOCK * TCB, int flag) { int ret = ERR; SCREEN *sp; AssertTCB(); sp = TCB->csp; if (sp) { if (flag) { (void) __nc_putp_flush(sp, "keypad_xmit", keypad_xmit); } else if (!flag && keypad_local) { (void) __nc_putp_flush(sp, "keypad_local", keypad_local); } if (flag && !sp->_tried) { _nc_init_keytry(sp); sp->_tried = TRUE; } ret = OK; } return ret; }
0
488,404
static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, unsigned long addr, unsigned long end, unsigned long floor, unsigned long ceiling) { pud_t *pud; unsigned long next; unsigned long start; start = addr; pud = pud_offset(pgd, addr); do { next = pud_addr_end(addr, end); if (pud_none_or_clear_bad(pud)) continue; free_pmd_range(tlb, pud, addr, next, floor, ceiling); } while (pud++, addr = next, addr != end); start &= PGDIR_MASK; if (start < floor) return; if (ceiling) { ceiling &= PGDIR_MASK; if (!ceiling) return; } if (end - 1 > ceiling - 1) return; pud = pud_offset(pgd, start); pgd_clear(pgd); pud_free_tlb(tlb, pud); }
0
317,187
static struct smack_known *smack_from_netlbl(const struct sock *sk, u16 family, struct sk_buff *skb) { struct netlbl_lsm_secattr secattr; struct socket_smack *ssp = NULL; struct smack_known *skp = NULL; netlbl_secattr_init(&secattr); if (sk) ssp = sk->sk_security; if (netlbl_skbuff_getattr(skb, family, &secattr) == 0) { skp = smack_from_secattr(&secattr, ssp); if (secattr.flags & NETLBL_SECATTR_CACHEABLE) netlbl_cache_add(skb, family, &skp->smk_netlabel); } netlbl_secattr_destroy(&secattr); return skp; }
0
231,693
auto& drainTimeout() { return drainTimeout_; }
0
326,626
_archive_write_disk_free(struct archive *_a) { struct archive_write_disk *a; int ret; if (_a == NULL) return (ARCHIVE_OK); archive_check_magic(_a, ARCHIVE_WRITE_DISK_MAGIC, ARCHIVE_STATE_ANY | ARCHIVE_STATE_FATAL, "archive_write_disk_free"); a = (struct archive_write_disk *)_a; ret = _archive_write_disk_close(&a->archive); archive_write_disk_set_group_lookup(&a->archive, NULL, NULL, NULL); archive_write_disk_set_user_lookup(&a->archive, NULL, NULL, NULL); archive_entry_free(a->entry); archive_string_free(&a->_name_data); archive_string_free(&a->_tmpname_data); archive_string_free(&a->archive.error_string); archive_string_free(&a->path_safe); a->archive.magic = 0; __archive_clean(&a->archive); free(a->decmpfs_header_p); free(a->resource_fork); free(a->compressed_buffer); free(a->uncompressed_buffer); #if defined(__APPLE__) && defined(UF_COMPRESSED) && defined(HAVE_SYS_XATTR_H)\ && defined(HAVE_ZLIB_H) if (a->stream_valid) { switch (deflateEnd(&a->stream)) { case Z_OK: break; default: archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Failed to clean up compressor"); ret = ARCHIVE_FATAL; break; } } #endif free(a); return (ret); }
0
317,357
static int smack_shm_shmctl(struct kern_ipc_perm *isp, int cmd) { int may; switch (cmd) { case IPC_STAT: case SHM_STAT: case SHM_STAT_ANY: may = MAY_READ; break; case IPC_SET: case SHM_LOCK: case SHM_UNLOCK: case IPC_RMID: may = MAY_READWRITE; break; case IPC_INFO: case SHM_INFO: /* * System level information. */ return 0; default: return -EINVAL; } return smk_curacc_shm(isp, may); }
0
220,182
std::string Graph::NewName(StringPiece prefix) { return strings::StrCat(prefix, "/_", name_counter_++); }
0
245,701
add_header_to_connection (orderedmap hashofheaders, char *header, size_t len) { char *sep; /* Get rid of the new line and return at the end */ len -= chomp (header, len); sep = strchr (header, ':'); if (!sep) return 0; /* just skip invalid header, do not give error */ /* Blank out colons, spaces, and tabs. */ while (*sep == ':' || *sep == ' ' || *sep == '\t') *sep++ = '\0'; /* Calculate the new length of just the data */ len -= sep - header - 1; return orderedmap_append (hashofheaders, header, sep); }
0
227,003
IRC_PROTOCOL_CALLBACK(kill) { char *pos_comment; struct t_irc_channel *ptr_channel; struct t_irc_nick *ptr_nick, *ptr_nick_killed; IRC_PROTOCOL_MIN_ARGS(3); IRC_PROTOCOL_CHECK_HOST; pos_comment = (argc > 3) ? ((argv_eol[3][0] == ':') ? argv_eol[3] + 1 : argv_eol[3]) : NULL; for (ptr_channel = server->channels; ptr_channel; ptr_channel = ptr_channel->next_channel) { ptr_nick = irc_nick_search (server, ptr_channel, nick); ptr_nick_killed = irc_nick_search (server, ptr_channel, argv[2]); if (pos_comment) { weechat_printf_date_tags ( irc_msgbuffer_get_target_buffer (server, NULL, command, NULL, ptr_channel->buffer), date, irc_protocol_tags (command, NULL, NULL, address), _("%s%sYou were killed by %s%s%s %s(%s%s%s)"), weechat_prefix ("quit"), IRC_COLOR_MESSAGE_KICK, irc_nick_color_for_msg (server, 1, ptr_nick, nick), nick, IRC_COLOR_MESSAGE_KICK, IRC_COLOR_CHAT_DELIMITERS, IRC_COLOR_REASON_KICK, pos_comment, IRC_COLOR_CHAT_DELIMITERS); } else { weechat_printf_date_tags ( irc_msgbuffer_get_target_buffer (server, NULL, command, NULL, ptr_channel->buffer), date, irc_protocol_tags (command, NULL, NULL, address), _("%s%sYou were killed by %s%s%s"), weechat_prefix ("quit"), IRC_COLOR_MESSAGE_KICK, irc_nick_color_for_msg (server, 1, ptr_nick, nick), nick, IRC_COLOR_MESSAGE_KICK); } if (irc_server_strcasecmp (server, argv[2], server->nick) == 0) { /* * my nick was killed => free all nicks, channel is not active any * more */ irc_nick_free_all (server, ptr_channel); irc_channel_modelist_set_state (ptr_channel, IRC_MODELIST_STATE_MODIFIED); irc_bar_item_update_channel (); } else { /* * someone was killed on channel (but not me) => remove only this * nick */ if (ptr_nick_killed) irc_nick_free (server, ptr_channel, ptr_nick_killed); } } return WEECHAT_RC_OK; }
0
226,354
GF_Box *segr_box_new() { ISOM_DECL_BOX_ALLOC(FDSessionGroupBox, GF_ISOM_BOX_TYPE_SEGR); return (GF_Box *)tmp;
0
225,676
GF_Box *chnl_box_new() { ISOM_DECL_BOX_ALLOC(GF_ChannelLayoutBox, GF_ISOM_BOX_TYPE_CHNL); return (GF_Box *)tmp;
0
474,431
ObjectFlush( OBJECT *object ) { object->attributes.occupied = CLEAR; }
0
492,675
vte_sequence_handler_dc (VteTerminal *terminal, GValueArray *params) { VteScreen *screen; VteRowData *rowdata; long col; screen = terminal->pvt->screen; if (_vte_ring_next(screen->row_data) > screen->cursor_current.row) { long len; /* Get the data for the row which the cursor points to. */ rowdata = _vte_ring_index_writable (screen->row_data, screen->cursor_current.row); g_assert(rowdata != NULL); col = screen->cursor_current.col; len = _vte_row_data_length (rowdata); /* Remove the column. */ if (col < len) { _vte_row_data_remove (rowdata, col); if (screen->fill_defaults.attr.back != VTE_DEF_BG) { _vte_row_data_fill (rowdata, &screen->fill_defaults, terminal->column_count); len = terminal->column_count; } /* Repaint this row. */ _vte_invalidate_cells(terminal, col, len - col, screen->cursor_current.row, 1); } } /* We've modified the display. Make a note of it. */ terminal->pvt->text_deleted_flag = TRUE; }
0
294,593
strftimev(const char *fmt, VALUE self, void (*func)(VALUE, struct tmx *)) { char buffer[SMALLBUF], *buf = buffer; struct tmx tmx; long len; VALUE str; (*func)(self, &tmx); len = date_strftime_alloc(&buf, fmt, &tmx); RB_GC_GUARD(self); str = rb_usascii_str_new(buf, len); if (buf != buffer) xfree(buf); return str; }
0
442,564
void memslot_info_reset(RedMemSlotInfo *info) { uint32_t i; for (i = 0; i < info->num_memslots_groups; ++i) { memset(info->mem_slots[i], 0, sizeof(MemSlot) * info->num_memslots); } }
0
240,294
get_reg_contents(int regname, int flags) { long i; char_u *retval; int allocated; long len; // Don't allow using an expression register inside an expression if (regname == '=') { if (flags & GREG_NO_EXPR) return NULL; if (flags & GREG_EXPR_SRC) return getreg_wrap_one_line(get_expr_line_src(), flags); return getreg_wrap_one_line(get_expr_line(), flags); } if (regname == '@') // "@@" is used for unnamed register regname = '"'; // check for valid regname if (regname != NUL && !valid_yank_reg(regname, FALSE)) return NULL; # ifdef FEAT_CLIPBOARD regname = may_get_selection(regname); # endif if (get_spec_reg(regname, &retval, &allocated, FALSE)) { if (retval == NULL) return NULL; if (allocated) return getreg_wrap_one_line(retval, flags); return getreg_wrap_one_line(vim_strsave(retval), flags); } get_yank_register(regname, FALSE); if (y_current->y_array == NULL) return NULL; if (flags & GREG_LIST) { list_T *list = list_alloc(); int error = FALSE; if (list == NULL) return NULL; for (i = 0; i < y_current->y_size; ++i) if (list_append_string(list, y_current->y_array[i], -1) == FAIL) error = TRUE; if (error) { list_free(list); return NULL; } return (char_u *)list; } // Compute length of resulting string. len = 0; for (i = 0; i < y_current->y_size; ++i) { len += (long)STRLEN(y_current->y_array[i]); // Insert a newline between lines and after last line if // y_type is MLINE. if (y_current->y_type == MLINE || i < y_current->y_size - 1) ++len; } retval = alloc(len + 1); // Copy the lines of the yank register into the string. if (retval != NULL) { len = 0; for (i = 0; i < y_current->y_size; ++i) { STRCPY(retval + len, y_current->y_array[i]); len += (long)STRLEN(retval + len); // Insert a NL between lines and after the last line if y_type is // MLINE. if (y_current->y_type == MLINE || i < y_current->y_size - 1) retval[len++] = '\n'; } retval[len] = NUL; } return retval; }
0
247,749
TEST_P(SslSocketTest, NoCertUntrustedPermitted) { const std::string client_ctx_yaml = R"EOF( common_tls_context: )EOF"; const std::string server_ctx_yaml = R"EOF( common_tls_context: tls_certificates: certificate_chain: filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_cert.pem" private_key: filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_key.pem" validation_context: trusted_ca: filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/fake_ca_cert.pem" trust_chain_verification: ACCEPT_UNTRUSTED verify_certificate_hash: "0000000000000000000000000000000000000000000000000000000000000000" )EOF"; TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, true, GetParam()); testUtil(test_options.setExpectedServerStats("ssl.no_certificate") .setExpectNoCert() .setExpectNoCertChain()); }
0
512,338
void Item_func_nullif::print(String *str, enum_query_type query_type) { /* NULLIF(a,b) is implemented according to the SQL standard as a short for CASE WHEN a=b THEN NULL ELSE a END The constructor of Item_func_nullif sets args[0] and args[2] to the same item "a", and sets args[1] to "b". If "this" is a part of a WHERE or ON condition, then: - the left "a" is a subject to equal field propagation with ANY_SUBST. - the right "a" is a subject to equal field propagation with IDENTITY_SUBST. Therefore, after equal field propagation args[0] and args[2] can point to different items. */ if ((query_type & QT_ITEM_ORIGINAL_FUNC_NULLIF) || (arg_count == 2) || (args[0] == args[2])) { /* If QT_ITEM_ORIGINAL_FUNC_NULLIF is requested, that means we want the original NULLIF() representation, e.g. when we are in: SHOW CREATE {VIEW|FUNCTION|PROCEDURE} The original representation is possible only if args[0] and args[2] still point to the same Item. The caller must never pass call print() with QT_ITEM_ORIGINAL_FUNC_NULLIF if an expression has undergone some optimization (e.g. equal field propagation done in optimize_cond()) already and NULLIF() potentially has two different representations of "a": - one "a" for comparison - another "a" for the returned value! */ DBUG_ASSERT(arg_count == 2 || args[0] == args[2] || current_thd->lex->context_analysis_only); str->append(func_name()); str->append('('); if (arg_count == 2) args[0]->print(str, query_type); else args[2]->print(str, query_type); str->append(','); args[1]->print(str, query_type); str->append(')'); } else { /* args[0] and args[2] are different items. This is possible after WHERE optimization (equal fields propagation etc), e.g. in EXPLAIN EXTENDED or EXPLAIN FORMAT=JSON. As it's not possible to print as a function with 2 arguments any more, do it in the CASE style. */ str->append(STRING_WITH_LEN("(case when ")); args[0]->print(str, query_type); str->append(STRING_WITH_LEN(" = ")); args[1]->print(str, query_type); str->append(STRING_WITH_LEN(" then NULL else ")); args[2]->print(str, query_type); str->append(STRING_WITH_LEN(" end)")); } }
0
413,845
Method* LinkResolver::linktime_resolve_virtual_method(const LinkInfo& link_info, TRAPS) { // normal method resolution Method* resolved_method = resolve_method(link_info, Bytecodes::_invokevirtual, CHECK_NULL); assert(resolved_method->name() != vmSymbols::object_initializer_name(), "should have been checked in verifier"); assert(resolved_method->name() != vmSymbols::class_initializer_name (), "should have been checked in verifier"); // check if private interface method Klass* resolved_klass = link_info.resolved_klass(); Klass* current_klass = link_info.current_klass(); // This is impossible, if resolve_klass is an interface, we've thrown icce in resolve_method if (resolved_klass->is_interface() && resolved_method->is_private()) { ResourceMark rm(THREAD); stringStream ss; ss.print("private interface method requires invokespecial, not invokevirtual: method '"); resolved_method->print_external_name(&ss); ss.print("', caller-class: %s", (current_klass == NULL ? "<null>" : current_klass->internal_name())); THROW_MSG_NULL(vmSymbols::java_lang_IncompatibleClassChangeError(), ss.as_string()); } // check if not static if (resolved_method->is_static()) { ResourceMark rm(THREAD); stringStream ss; ss.print("Expecting non-static method '"); resolved_method->print_external_name(&ss); ss.print("'"); THROW_MSG_NULL(vmSymbols::java_lang_IncompatibleClassChangeError(), ss.as_string()); } if (log_develop_is_enabled(Trace, vtables)) { trace_method_resolution("invokevirtual resolved method: caller-class:", current_klass, resolved_klass, resolved_method, false); } return resolved_method; }
0
482,520
free_macro(const Macro *macro) { if (macro) { free((char *)macro->name); free((char *)macro->definition); free((int *)macro->substitutions); free((Macro *)macro); } }
0
274,854
TEST(ComparisonsTest, NotEqualBroadcastString) { if (SingleOpModel::GetForceUseNnapi()) { return; } ComparisonOpModel model({1, 1, 1, 4}, {1, 1, 1, 1}, TensorType_STRING, BuiltinOperator_NOT_EQUAL); model.PopulateTensor<std::string>(model.input1(), {"A", "B", "A", "B"}); model.PopulateTensor<std::string>(model.input2(), {"A"}); model.Invoke(); EXPECT_THAT(model.GetOutput(), ElementsAre(false, true, false, true)); EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1, 1, 4)); }
0
275,924
unsigned uECC_curve_num_n_bits(uECC_Curve curve) { return curve->num_n_bits; }
0
237,886
lsquic_qeh_init (struct qpack_enc_hdl *qeh, struct lsquic_conn *conn) { assert(!(qeh->qeh_flags & QEH_INITIALIZED)); qeh->qeh_conn = conn; lsquic_frab_list_init(&qeh->qeh_fral, 0x400, NULL, NULL, NULL); lsqpack_enc_preinit(&qeh->qeh_encoder, (void *) conn); qeh->qeh_flags |= QEH_INITIALIZED; qeh->qeh_max_prefix_size = lsqpack_enc_header_block_prefix_size(&qeh->qeh_encoder); if (qeh->qeh_dec_sm_in) lsquic_stream_wantread(qeh->qeh_dec_sm_in, 1); LSQ_DEBUG("initialized"); }
0
232,321
GF_Err gf_isom_box_parse(GF_Box **outBox, GF_BitStream *bs) { return gf_isom_box_parse_ex(outBox, bs, 0, GF_FALSE, 0); }
0
261,196
int wm_SemUnlock(wm_Sem *s){ dispatch_semaphore_signal(*s); return 0; }
0
402,629
encode_algorithm_id(cms_context *cms, SECItem *der, SECOidTag tag) { SECAlgorithmID id; int rc = generate_algorithm_id(cms, &id, tag); if (rc < 0) return rc; void *ret; ret = SEC_ASN1EncodeItem(cms->arena, der, &id, SECOID_AlgorithmIDTemplate); if (ret == NULL) cmsreterr(-1, cms, "could not encode Algorithm ID"); return 0; }
0
321,741
static struct aspeed_lpc_ctrl *file_aspeed_lpc_ctrl(struct file *file) { return container_of(file->private_data, struct aspeed_lpc_ctrl, miscdev); }
0
432,268
void memory_unmap(struct uc_struct *uc, MemoryRegion *mr) { int i; hwaddr addr; // Make sure all pages associated with the MemoryRegion are flushed // Only need to do this if we are in a running state if (uc->cpu) { for (addr = mr->addr; addr < mr->end; addr += uc->target_page_size) { tlb_flush_page(uc->cpu, addr); } } memory_region_del_subregion(uc->system_memory, mr); for (i = 0; i < uc->mapped_block_count; i++) { if (uc->mapped_blocks[i] == mr) { uc->mapped_block_count--; //shift remainder of array down over deleted pointer memmove(&uc->mapped_blocks[i], &uc->mapped_blocks[i + 1], sizeof(MemoryRegion*) * (uc->mapped_block_count - i)); mr->destructor(mr); g_free(mr); break; } } }
0
328,938
R_API char *r_bin_java_get_name_from_cp_item_list(RList *cp_list, ut64 idx) { /* Given a constant pool object Class, FieldRef, MethodRef, or InterfaceMethodRef return the actual descriptor string. @param cp_list: RList of RBinJavaCPTypeObj * @param obj object to look up the name for @rvalue ut8* (user frees) or NULL */ RBinJavaCPTypeObj *obj = r_bin_java_get_item_from_cp_item_list ( cp_list, idx); if (obj && cp_list) { return r_bin_java_get_item_name_from_cp_item_list ( cp_list, obj, MAX_CPITEMS); } return NULL; }
0
220,106
static loff_t nfs42_remap_file_range(struct file *src_file, loff_t src_off, struct file *dst_file, loff_t dst_off, loff_t count, unsigned int remap_flags) { struct inode *dst_inode = file_inode(dst_file); struct nfs_server *server = NFS_SERVER(dst_inode); struct inode *src_inode = file_inode(src_file); unsigned int bs = server->clone_blksize; bool same_inode = false; int ret; /* NFS does not support deduplication. */ if (remap_flags & REMAP_FILE_DEDUP) return -EOPNOTSUPP; if (remap_flags & ~REMAP_FILE_ADVISORY) return -EINVAL; if (IS_SWAPFILE(dst_inode) || IS_SWAPFILE(src_inode)) return -ETXTBSY; /* check alignment w.r.t. clone_blksize */ ret = -EINVAL; if (bs) { if (!IS_ALIGNED(src_off, bs) || !IS_ALIGNED(dst_off, bs)) goto out; if (!IS_ALIGNED(count, bs) && i_size_read(src_inode) != (src_off + count)) goto out; } if (src_inode == dst_inode) same_inode = true; /* XXX: do we lock at all? what if server needs CB_RECALL_LAYOUT? */ if (same_inode) { inode_lock(src_inode); } else if (dst_inode < src_inode) { inode_lock_nested(dst_inode, I_MUTEX_PARENT); inode_lock_nested(src_inode, I_MUTEX_CHILD); } else { inode_lock_nested(src_inode, I_MUTEX_PARENT); inode_lock_nested(dst_inode, I_MUTEX_CHILD); } /* flush all pending writes on both src and dst so that server * has the latest data */ ret = nfs_sync_inode(src_inode); if (ret) goto out_unlock; ret = nfs_sync_inode(dst_inode); if (ret) goto out_unlock; ret = nfs42_proc_clone(src_file, dst_file, src_off, dst_off, count); /* truncate inode page cache of the dst range so that future reads can fetch * new data from server */ if (!ret) truncate_inode_pages_range(&dst_inode->i_data, dst_off, dst_off + count - 1); out_unlock: if (same_inode) { inode_unlock(src_inode); } else if (dst_inode < src_inode) { inode_unlock(src_inode); inode_unlock(dst_inode); } else { inode_unlock(dst_inode); inode_unlock(src_inode); } out: return ret < 0 ? ret : count; }
0
175,701
virtual bool ethernet_connecting() const { return ethernet_ ? ethernet_->connecting() : false; }
0
294,505
datetime_s_now(int argc, VALUE *argv, VALUE klass) { VALUE vsg, nth, ret; double sg; #ifdef HAVE_CLOCK_GETTIME struct timespec ts; #else struct timeval tv; #endif time_t sec; struct tm tm; long sf, of; int y, ry, m, d, h, min, s; rb_scan_args(argc, argv, "01", &vsg); if (argc < 1) sg = DEFAULT_SG; else sg = NUM2DBL(vsg); #ifdef HAVE_CLOCK_GETTIME if (clock_gettime(CLOCK_REALTIME, &ts) == -1) rb_sys_fail("clock_gettime"); sec = ts.tv_sec; #else if (gettimeofday(&tv, NULL) == -1) rb_sys_fail("gettimeofday"); sec = tv.tv_sec; #endif tzset(); if (!localtime_r(&sec, &tm)) rb_sys_fail("localtime"); y = tm.tm_year + 1900; m = tm.tm_mon + 1; d = tm.tm_mday; h = tm.tm_hour; min = tm.tm_min; s = tm.tm_sec; if (s == 60) s = 59; #ifdef HAVE_STRUCT_TM_TM_GMTOFF of = tm.tm_gmtoff; #elif defined(HAVE_TIMEZONE) #if defined(HAVE_ALTZONE) && !defined(_AIX) of = (long)-((tm.tm_isdst > 0) ? altzone : timezone); #else of = (long)-timezone; if (tm.tm_isdst) { time_t sec2; tm.tm_isdst = 0; sec2 = mktime(&tm); of += (long)difftime(sec2, sec); } #endif #elif defined(HAVE_TIMEGM) { time_t sec2; sec2 = timegm(&tm); of = (long)difftime(sec2, sec); } #else { struct tm tm2; time_t sec2; if (!gmtime_r(&sec, &tm2)) rb_sys_fail("gmtime"); tm2.tm_isdst = tm.tm_isdst; sec2 = mktime(&tm2); of = (long)difftime(sec, sec2); } #endif #ifdef HAVE_CLOCK_GETTIME sf = ts.tv_nsec; #else sf = tv.tv_usec * 1000; #endif if (of < -DAY_IN_SECONDS || of > DAY_IN_SECONDS) { of = 0; rb_warning("invalid offset is ignored"); } decode_year(INT2FIX(y), -1, &nth, &ry); ret = d_complex_new_internal(klass, nth, 0, 0, LONG2NUM(sf), (int)of, GREGORIAN, ry, m, d, h, min, s, HAVE_CIVIL | HAVE_TIME); { get_d1(ret); set_sg(dat, sg); } return ret; }
0
466,183
static int em_sub(struct x86_emulate_ctxt *ctxt) { emulate_2op_SrcV(ctxt, "sub"); return X86EMUL_CONTINUE; }
0
498,096
void cgit_log_link(const char *name, const char *title, const char *class, const char *head, const char *rev, const char *path, int ofs, const char *grep, const char *pattern, int showmsg, int follow) { char *delim; delim = repolink(title, class, "log", head, path); if (rev && ctx.qry.head && strcmp(rev, ctx.qry.head)) { html(delim); html("id="); html_url_arg(rev); delim = "&amp;"; } if (grep && pattern) { html(delim); html("qt="); html_url_arg(grep); delim = "&amp;"; html(delim); html("q="); html_url_arg(pattern); } if (ofs > 0) { html(delim); html("ofs="); htmlf("%d", ofs); delim = "&amp;"; } if (showmsg) { html(delim); html("showmsg=1"); delim = "&amp;"; } if (follow) { html(delim); html("follow=1"); } html("'>"); html_txt(name); html("</a>"); }
0
229,151
size_t virtio_serial_guest_ready(VirtIOSerialPort *port) { VirtIODevice *vdev = VIRTIO_DEVICE(port->vser); VirtQueue *vq = port->ivq; unsigned int bytes; if (!virtio_queue_ready(vq) || !(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK) || virtio_queue_empty(vq)) { return 0; } if (use_multiport(port->vser) && !port->guest_connected) { return 0; } virtqueue_get_avail_bytes(vq, &bytes, NULL, 4096, 0); return bytes; }
0
221,518
flatpak_run_add_x11_args (FlatpakBwrap *bwrap, gboolean allowed) { g_autofree char *x11_socket = NULL; const char *display; /* Always cover /tmp/.X11-unix, that way we never see the host one in case * we have access to the host /tmp. If you request X access we'll put the right * thing in this anyway. * * We need to be a bit careful here, because there are two situations in * which potentially hostile processes have access to /tmp and could * create symlinks, which in principle could cause us to create the * directory and mount the tmpfs at the target of the symlink instead * of in the intended place: * * - With --filesystem=/tmp, it's the host /tmp - but because of the * special historical status of /tmp/.X11-unix, we can assume that * it is pre-created by the host system before user code gets to run. * * - When /tmp is shared between all instances of the same app ID, * in principle the app has control over what's in /tmp, but in * practice it can't interfere with /tmp/.X11-unix, because we do * this unconditionally - therefore by the time app code runs, * /tmp/.X11-unix is already a mount point, meaning the app cannot * rename or delete it. */ flatpak_bwrap_add_args (bwrap, "--tmpfs", "/tmp/.X11-unix", NULL); if (!allowed) { flatpak_bwrap_unset_env (bwrap, "DISPLAY"); return; } g_debug ("Allowing x11 access"); display = g_getenv ("DISPLAY"); if (display && display[0] == ':' && g_ascii_isdigit (display[1])) { const char *display_nr = &display[1]; const char *display_nr_end = display_nr; g_autofree char *d = NULL; while (g_ascii_isdigit (*display_nr_end)) display_nr_end++; d = g_strndup (display_nr, display_nr_end - display_nr); x11_socket = g_strdup_printf ("/tmp/.X11-unix/X%s", d); flatpak_bwrap_add_args (bwrap, "--ro-bind", x11_socket, "/tmp/.X11-unix/X99", NULL); flatpak_bwrap_set_env (bwrap, "DISPLAY", ":99.0", TRUE); #ifdef ENABLE_XAUTH g_auto(GLnxTmpfile) xauth_tmpf = { 0, }; if (glnx_open_anonymous_tmpfile_full (O_RDWR | O_CLOEXEC, "/tmp", &xauth_tmpf, NULL)) { FILE *output = fdopen (xauth_tmpf.fd, "wb"); if (output != NULL) { /* fd is now owned by output, steal it from the tmpfile */ int tmp_fd = dup (glnx_steal_fd (&xauth_tmpf.fd)); if (tmp_fd != -1) { static const char dest[] = "/run/flatpak/Xauthority"; write_xauth (d, output); flatpak_bwrap_add_args_data_fd (bwrap, "--ro-bind-data", tmp_fd, dest); flatpak_bwrap_set_env (bwrap, "XAUTHORITY", dest, TRUE); } fclose (output); if (tmp_fd != -1) lseek (tmp_fd, 0, SEEK_SET); } } #endif } else { flatpak_bwrap_unset_env (bwrap, "DISPLAY"); } }
0
384,773
validate_virtcol_win(win_T *wp) { check_cursor_moved(wp); if (!(wp->w_valid & VALID_VIRTCOL)) { getvvcol(wp, &wp->w_cursor, NULL, &(wp->w_virtcol), NULL); wp->w_valid |= VALID_VIRTCOL; #ifdef FEAT_SYN_HL if (wp->w_p_cuc && !pum_visible()) redraw_win_later(wp, SOME_VALID); #endif } }
0
436,117
static void io_async_task_func(struct io_kiocb *req) { struct async_poll *apoll = req->apoll; struct io_ring_ctx *ctx = req->ctx; trace_io_uring_task_run(req->ctx, req, req->opcode, req->user_data); if (io_poll_rewait(req, &apoll->poll)) { spin_unlock_irq(&ctx->completion_lock); return; } hash_del(&req->hash_node); io_poll_remove_double(req); spin_unlock_irq(&ctx->completion_lock); if (!READ_ONCE(apoll->poll.canceled)) io_req_task_submit(req); else io_req_complete_failed(req, -ECANCELED);
0
328,925
R_API RBinJavaInterfaceInfo *r_bin_java_interface_new(RBinJavaObj *bin, const ut8 *buffer, ut64 sz) { IFDBG eprintf ("Parsing RBinJavaInterfaceInfo\n"); RBinJavaInterfaceInfo *ifobj = R_NEW0 (RBinJavaInterfaceInfo); if (ifobj) { if (buffer) { ifobj->class_info_idx = R_BIN_JAVA_USHORT (buffer, 0); ifobj->cp_class = r_bin_java_get_item_from_bin_cp_list (bin, ifobj->class_info_idx); if (ifobj->cp_class) { ifobj->name = r_bin_java_get_item_name_from_bin_cp_list (bin, ifobj->cp_class); } else { ifobj->name = r_str_dup (NULL, "NULL"); } ifobj->size = 2; } else { ifobj->class_info_idx = 0; ifobj->name = r_str_dup (NULL, "NULL"); } } return ifobj; }
0
366,290
static void set_mount_attributes(struct mount *mnt, unsigned int mnt_flags) { mnt_flags |= mnt->mnt.mnt_flags & ~MNT_USER_SETTABLE_MASK; mnt->mnt.mnt_flags = mnt_flags; touch_mnt_namespace(mnt->mnt_ns); }
0
415,220
cmd_readcert (assuan_context_t ctx, char *line) { ctrl_t ctrl = assuan_get_pointer (ctx); int rc; unsigned char *cert; size_t ncert; if ((rc = open_card (ctrl, NULL))) return rc; line = xstrdup (line); /* Need a copy of the line. */ rc = app_readcert (ctrl->app_ctx, line, &cert, &ncert); if (rc) log_error ("app_readcert failed: %s\n", gpg_strerror (rc)); xfree (line); line = NULL; if (!rc) { rc = assuan_send_data (ctx, cert, ncert); xfree (cert); if (rc) return rc; } TEST_CARD_REMOVAL (ctrl, rc); return rc; }
0
208,464
static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, int closing, int tx_ring) { struct pgv *pg_vec = NULL; struct packet_sock *po = pkt_sk(sk); unsigned long *rx_owner_map = NULL; int was_running, order = 0; struct packet_ring_buffer *rb; struct sk_buff_head *rb_queue; __be16 num; int err; /* Added to avoid minimal code churn */ struct tpacket_req *req = &req_u->req; rb = tx_ring ? &po->tx_ring : &po->rx_ring; rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue; err = -EBUSY; if (!closing) { if (atomic_read(&po->mapped)) goto out; if (packet_read_pending(rb)) goto out; } if (req->tp_block_nr) { unsigned int min_frame_size; /* Sanity tests and some calculations */ err = -EBUSY; if (unlikely(rb->pg_vec)) goto out; switch (po->tp_version) { case TPACKET_V1: po->tp_hdrlen = TPACKET_HDRLEN; break; case TPACKET_V2: po->tp_hdrlen = TPACKET2_HDRLEN; break; case TPACKET_V3: po->tp_hdrlen = TPACKET3_HDRLEN; break; } err = -EINVAL; if (unlikely((int)req->tp_block_size <= 0)) goto out; if (unlikely(!PAGE_ALIGNED(req->tp_block_size))) goto out; min_frame_size = po->tp_hdrlen + po->tp_reserve; if (po->tp_version >= TPACKET_V3 && req->tp_block_size < BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv) + min_frame_size) goto out; if (unlikely(req->tp_frame_size < min_frame_size)) goto out; if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1))) goto out; rb->frames_per_block = req->tp_block_size / req->tp_frame_size; if (unlikely(rb->frames_per_block == 0)) goto out; if (unlikely(rb->frames_per_block > UINT_MAX / req->tp_block_nr)) goto out; if (unlikely((rb->frames_per_block * req->tp_block_nr) != req->tp_frame_nr)) goto out; err = -ENOMEM; order = get_order(req->tp_block_size); pg_vec = alloc_pg_vec(req, order); if (unlikely(!pg_vec)) goto out; switch (po->tp_version) { case TPACKET_V3: /* Block transmit is not supported yet */ if (!tx_ring) { init_prb_bdqc(po, rb, pg_vec, req_u); } else { struct tpacket_req3 *req3 = &req_u->req3; if (req3->tp_retire_blk_tov || req3->tp_sizeof_priv || req3->tp_feature_req_word) { err = -EINVAL; goto out_free_pg_vec; } } break; default: if (!tx_ring) { rx_owner_map = bitmap_alloc(req->tp_frame_nr, GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO); if (!rx_owner_map) goto out_free_pg_vec; } break; } } /* Done */ else { err = -EINVAL; if (unlikely(req->tp_frame_nr)) goto out; } /* Detach socket from network */ spin_lock(&po->bind_lock); was_running = po->running; num = po->num; if (was_running) { WRITE_ONCE(po->num, 0); __unregister_prot_hook(sk, false); } spin_unlock(&po->bind_lock); synchronize_net(); err = -EBUSY; mutex_lock(&po->pg_vec_lock); if (closing || atomic_read(&po->mapped) == 0) { err = 0; spin_lock_bh(&rb_queue->lock); swap(rb->pg_vec, pg_vec); if (po->tp_version <= TPACKET_V2) swap(rb->rx_owner_map, rx_owner_map); rb->frame_max = (req->tp_frame_nr - 1); rb->head = 0; rb->frame_size = req->tp_frame_size; spin_unlock_bh(&rb_queue->lock); swap(rb->pg_vec_order, order); swap(rb->pg_vec_len, req->tp_block_nr); rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE; po->prot_hook.func = (po->rx_ring.pg_vec) ? tpacket_rcv : packet_rcv; skb_queue_purge(rb_queue); if (atomic_read(&po->mapped)) pr_err("packet_mmap: vma is busy: %d\n", atomic_read(&po->mapped)); } mutex_unlock(&po->pg_vec_lock); spin_lock(&po->bind_lock); if (was_running) { WRITE_ONCE(po->num, num); register_prot_hook(sk); } spin_unlock(&po->bind_lock); if (pg_vec && (po->tp_version > TPACKET_V2)) { /* Because we don't support block-based V3 on tx-ring */ if (!tx_ring) prb_shutdown_retire_blk_timer(po, rb_queue); } out_free_pg_vec: bitmap_free(rx_owner_map); if (pg_vec) free_pg_vec(pg_vec, order, req->tp_block_nr); out: return err; }
1
226,331
GF_Err lsr1_box_read(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_LASeRSampleEntryBox *ptr = (GF_LASeRSampleEntryBox*)s; e = gf_isom_base_sample_entry_read((GF_SampleEntryBox *)ptr, bs); if (e) return e; ISOM_DECREASE_SIZE(ptr, 8); return gf_isom_box_array_read(s, bs);
0
312,458
qf_parse_fmt_m(regmatch_T *rmp, int midx, qffields_T *fields) { char_u *p; int len; if (rmp->startp[midx] == NULL || rmp->endp[midx] == NULL) return QF_FAIL; len = (int)(rmp->endp[midx] - rmp->startp[midx]); if (len >= fields->errmsglen) { // len + null terminator if ((p = vim_realloc(fields->errmsg, len + 1)) == NULL) return QF_NOMEM; fields->errmsg = p; fields->errmsglen = len + 1; } vim_strncpy(fields->errmsg, rmp->startp[midx], len); return QF_OK; }
0
253,711
static void ccp_update_sg_workarea(struct ccp_sg_workarea *wa, unsigned int len) { unsigned int nbytes = min_t(u64, len, wa->bytes_left); unsigned int sg_combined_len = 0; if (!wa->sg) return; wa->sg_used += nbytes; wa->bytes_left -= nbytes; if (wa->sg_used == sg_dma_len(wa->dma_sg)) { /* Advance to the next DMA scatterlist entry */ wa->dma_sg = sg_next(wa->dma_sg); /* In the case that the DMA mapped scatterlist has entries * that have been merged, the non-DMA mapped scatterlist * must be advanced multiple times for each merged entry. * This ensures that the current non-DMA mapped entry * corresponds to the current DMA mapped entry. */ do { sg_combined_len += wa->sg->length; wa->sg = sg_next(wa->sg); } while (wa->sg_used > sg_combined_len); wa->sg_used = 0; } }
0
462,236
static void* clone_binary_attr(pj_pool_t *pool, const void *src) { const pj_stun_binary_attr *asrc = (const pj_stun_binary_attr*)src; pj_stun_binary_attr *dst = PJ_POOL_ALLOC_T(pool, pj_stun_binary_attr); pj_memcpy(dst, src, sizeof(pj_stun_binary_attr)); if (asrc->length) { dst->data = (pj_uint8_t*) pj_pool_alloc(pool, asrc->length); pj_memcpy(dst->data, asrc->data, asrc->length); } return (void*)dst; }
0
384,918
skipwhite(char_u *q) { char_u *p = q; while (VIM_ISWHITE(*p)) ++p; return p; }
0
512,418
bool Item_cond::excl_dep_on_table(table_map tab_map) { if (used_tables() & OUTER_REF_TABLE_BIT) return false; if (!(used_tables() & ~tab_map)) return true; List_iterator_fast<Item> li(list); Item *item; while ((item= li++)) { if (!item->excl_dep_on_table(tab_map)) return false; } return true; }
0
508,301
bool get_key_map_from_key_list(key_map *map, TABLE *table, List<String> *index_list) { List_iterator_fast<String> it(*index_list); String *name; uint pos; map->clear_all(); while ((name=it++)) { if (table->s->keynames.type_names == 0 || (pos= find_type(&table->s->keynames, name->ptr(), name->length(), 1)) <= 0) { my_error(ER_KEY_DOES_NOT_EXITS, MYF(0), name->c_ptr(), table->pos_in_table_list->alias); map->set_all(); return 1; } map->set_bit(pos-1); } return 0; }
0
436,047
static bool io_register_op_must_quiesce(int op) { switch (op) { case IORING_REGISTER_BUFFERS: case IORING_UNREGISTER_BUFFERS: case IORING_REGISTER_FILES: case IORING_UNREGISTER_FILES: case IORING_REGISTER_FILES_UPDATE: case IORING_REGISTER_PROBE: case IORING_REGISTER_PERSONALITY: case IORING_UNREGISTER_PERSONALITY: case IORING_REGISTER_FILES2: case IORING_REGISTER_FILES_UPDATE2: case IORING_REGISTER_BUFFERS2: case IORING_REGISTER_BUFFERS_UPDATE: case IORING_REGISTER_IOWQ_AFF: case IORING_UNREGISTER_IOWQ_AFF: return false; default: return true; }
0
338,232
bool WasmBinaryBuilder::maybeVisitI31New(Expression*& out, uint32_t code) { if (code != BinaryConsts::I31New) { return false; } auto* curr = allocator.alloc<I31New>(); curr->value = popNonVoidExpression(); curr->finalize(); out = curr; return true; }
0
234,240
add_dwo_name (const char * name, dwarf_vma cu_offset) { add_dwo_info (name, cu_offset, DWO_NAME); }
0
477,811
int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu) { struct rtas_token_definition *d; struct rtas_args args; rtas_arg_t *orig_rets; gpa_t args_phys; int rc; /* * r4 contains the guest physical address of the RTAS args * Mask off the top 4 bits since this is a guest real address */ args_phys = kvmppc_get_gpr(vcpu, 4) & KVM_PAM; vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); rc = kvm_read_guest(vcpu->kvm, args_phys, &args, sizeof(args)); srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); if (rc) goto fail; /* * args->rets is a pointer into args->args. Now that we've * copied args we need to fix it up to point into our copy, * not the guest args. We also need to save the original * value so we can restore it on the way out. */ orig_rets = args.rets; if (be32_to_cpu(args.nargs) >= ARRAY_SIZE(args.args)) { /* * Don't overflow our args array: ensure there is room for * at least rets[0] (even if the call specifies 0 nret). * * Each handler must then check for the correct nargs and nret * values, but they may always return failure in rets[0]. */ rc = -EINVAL; goto fail; } args.rets = &args.args[be32_to_cpu(args.nargs)]; mutex_lock(&vcpu->kvm->arch.rtas_token_lock); rc = -ENOENT; list_for_each_entry(d, &vcpu->kvm->arch.rtas_tokens, list) { if (d->token == be32_to_cpu(args.token)) { d->handler->handler(vcpu, &args); rc = 0; break; } } mutex_unlock(&vcpu->kvm->arch.rtas_token_lock); if (rc == 0) { args.rets = orig_rets; rc = kvm_write_guest(vcpu->kvm, args_phys, &args, sizeof(args)); if (rc) goto fail; } return rc; fail: /* * We only get here if the guest has called RTAS with a bogus * args pointer or nargs/nret values that would overflow the * array. That means we can't get to the args, and so we can't * fail the RTAS call. So fail right out to userspace, which * should kill the guest. * * SLOF should actually pass the hcall return value from the * rtas handler call in r3, so enter_rtas could be modified to * return a failure indication in r3 and we could return such * errors to the guest rather than failing to host userspace. * However old guests that don't test for failure could then * continue silently after errors, so for now we won't do this. */ return rc; }
0
282,978
LJ_NOINLINE static void unwindstack(lua_State *L, TValue *top) { lj_func_closeuv(L, top); if (top < L->top-1) { copyTV(L, top, L->top-1); L->top = top+1; } lj_state_relimitstack(L); }
0
459,514
static __u64 count_kernel_ip(struct perf_callchain_entry *trace) { __u64 nr_kernel = 0; while (nr_kernel < trace->nr) { if (trace->ip[nr_kernel] == PERF_CONTEXT_USER) break; nr_kernel++; } return nr_kernel; }
0
317,249
static int selinux_inet_sys_rcv_skb(struct net *ns, int ifindex, char *addrp, u16 family, u32 peer_sid, struct common_audit_data *ad) { int err; u32 if_sid; u32 node_sid; err = sel_netif_sid(ns, ifindex, &if_sid); if (err) return err; err = avc_has_perm(&selinux_state, peer_sid, if_sid, SECCLASS_NETIF, NETIF__INGRESS, ad); if (err) return err; err = sel_netnode_sid(addrp, family, &node_sid); if (err) return err; return avc_has_perm(&selinux_state, peer_sid, node_sid, SECCLASS_NODE, NODE__RECVFROM, ad); }
0
294,688
df_to_time(int df, int *h, int *min, int *s) { *h = df / HOUR_IN_SECONDS; df %= HOUR_IN_SECONDS; *min = df / MINUTE_IN_SECONDS; *s = df % MINUTE_IN_SECONDS; }
0
273,071
log_fatal_null(int domain, const char *func, int line) { DPRINTF(E_FATAL, domain, "%s returned NULL at line %d\n", func, line); abort(); }
0
489,213
int hfsplus_rename_cat(u32 cnid, struct inode *src_dir, struct qstr *src_name, struct inode *dst_dir, struct qstr *dst_name) { struct super_block *sb; struct hfs_find_data src_fd, dst_fd; hfsplus_cat_entry entry; int entry_size, type; int err = 0; dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n", cnid, src_dir->i_ino, src_name->name, dst_dir->i_ino, dst_name->name); sb = src_dir->i_sb; hfs_find_init(HFSPLUS_SB(sb).cat_tree, &src_fd); dst_fd = src_fd; /* find the old dir entry and read the data */ hfsplus_cat_build_key(sb, src_fd.search_key, src_dir->i_ino, src_name); err = hfs_brec_find(&src_fd); if (err) goto out; hfs_bnode_read(src_fd.bnode, &entry, src_fd.entryoffset, src_fd.entrylength); /* create new dir entry with the data from the old entry */ hfsplus_cat_build_key(sb, dst_fd.search_key, dst_dir->i_ino, dst_name); err = hfs_brec_find(&dst_fd); if (err != -ENOENT) { if (!err) err = -EEXIST; goto out; } err = hfs_brec_insert(&dst_fd, &entry, src_fd.entrylength); if (err) goto out; dst_dir->i_size++; dst_dir->i_mtime = dst_dir->i_ctime = CURRENT_TIME_SEC; mark_inode_dirty(dst_dir); /* finally remove the old entry */ hfsplus_cat_build_key(sb, src_fd.search_key, src_dir->i_ino, src_name); err = hfs_brec_find(&src_fd); if (err) goto out; err = hfs_brec_remove(&src_fd); if (err) goto out; src_dir->i_size--; src_dir->i_mtime = src_dir->i_ctime = CURRENT_TIME_SEC; mark_inode_dirty(src_dir); /* remove old thread entry */ hfsplus_cat_build_key(sb, src_fd.search_key, cnid, NULL); err = hfs_brec_find(&src_fd); if (err) goto out; type = hfs_bnode_read_u16(src_fd.bnode, src_fd.entryoffset); err = hfs_brec_remove(&src_fd); if (err) goto out; /* create new thread entry */ hfsplus_cat_build_key(sb, dst_fd.search_key, cnid, NULL); entry_size = hfsplus_fill_cat_thread(sb, &entry, type, dst_dir->i_ino, dst_name); err = hfs_brec_find(&dst_fd); if (err != -ENOENT) { if (!err) err = -EEXIST; goto out; } err = hfs_brec_insert(&dst_fd, &entry, entry_size); out: hfs_bnode_put(dst_fd.bnode); hfs_find_exit(&src_fd); return err; }
0
238,509
static int add_kfunc_call(struct bpf_verifier_env *env, u32 func_id, s16 offset) { const struct btf_type *func, *func_proto; struct bpf_kfunc_btf_tab *btf_tab; struct bpf_kfunc_desc_tab *tab; struct bpf_prog_aux *prog_aux; struct bpf_kfunc_desc *desc; const char *func_name; struct btf *desc_btf; unsigned long addr; int err; prog_aux = env->prog->aux; tab = prog_aux->kfunc_tab; btf_tab = prog_aux->kfunc_btf_tab; if (!tab) { if (!btf_vmlinux) { verbose(env, "calling kernel function is not supported without CONFIG_DEBUG_INFO_BTF\n"); return -ENOTSUPP; } if (!env->prog->jit_requested) { verbose(env, "JIT is required for calling kernel function\n"); return -ENOTSUPP; } if (!bpf_jit_supports_kfunc_call()) { verbose(env, "JIT does not support calling kernel function\n"); return -ENOTSUPP; } if (!env->prog->gpl_compatible) { verbose(env, "cannot call kernel function from non-GPL compatible program\n"); return -EINVAL; } tab = kzalloc(sizeof(*tab), GFP_KERNEL); if (!tab) return -ENOMEM; prog_aux->kfunc_tab = tab; } /* func_id == 0 is always invalid, but instead of returning an error, be * conservative and wait until the code elimination pass before returning * error, so that invalid calls that get pruned out can be in BPF programs * loaded from userspace. It is also required that offset be untouched * for such calls. */ if (!func_id && !offset) return 0; if (!btf_tab && offset) { btf_tab = kzalloc(sizeof(*btf_tab), GFP_KERNEL); if (!btf_tab) return -ENOMEM; prog_aux->kfunc_btf_tab = btf_tab; } desc_btf = find_kfunc_desc_btf(env, func_id, offset, NULL); if (IS_ERR(desc_btf)) { verbose(env, "failed to find BTF for kernel function\n"); return PTR_ERR(desc_btf); } if (find_kfunc_desc(env->prog, func_id, offset)) return 0; if (tab->nr_descs == MAX_KFUNC_DESCS) { verbose(env, "too many different kernel function calls\n"); return -E2BIG; } func = btf_type_by_id(desc_btf, func_id); if (!func || !btf_type_is_func(func)) { verbose(env, "kernel btf_id %u is not a function\n", func_id); return -EINVAL; } func_proto = btf_type_by_id(desc_btf, func->type); if (!func_proto || !btf_type_is_func_proto(func_proto)) { verbose(env, "kernel function btf_id %u does not have a valid func_proto\n", func_id); return -EINVAL; } func_name = btf_name_by_offset(desc_btf, func->name_off); addr = kallsyms_lookup_name(func_name); if (!addr) { verbose(env, "cannot find address for kernel function %s\n", func_name); return -EINVAL; } desc = &tab->descs[tab->nr_descs++]; desc->func_id = func_id; desc->imm = BPF_CALL_IMM(addr); desc->offset = offset; err = btf_distill_func_proto(&env->log, desc_btf, func_proto, func_name, &desc->func_model); if (!err) sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]), kfunc_desc_cmp_by_id_off, NULL); return err; }
0
453,016
static int nft_immediate_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]) { struct nft_immediate_expr *priv = nft_expr_priv(expr); struct nft_data_desc desc; int err; if (tb[NFTA_IMMEDIATE_DREG] == NULL || tb[NFTA_IMMEDIATE_DATA] == NULL) return -EINVAL; err = nft_data_init(ctx, &priv->data, sizeof(priv->data), &desc, tb[NFTA_IMMEDIATE_DATA]); if (err < 0) return err; priv->dlen = desc.len; err = nft_parse_register_store(ctx, tb[NFTA_IMMEDIATE_DREG], &priv->dreg, &priv->data, desc.type, desc.len); if (err < 0) goto err1; if (priv->dreg == NFT_REG_VERDICT) { struct nft_chain *chain = priv->data.verdict.chain; switch (priv->data.verdict.code) { case NFT_JUMP: case NFT_GOTO: if (nft_chain_is_bound(chain)) { err = -EBUSY; goto err1; } chain->bound = true; break; default: break; } } return 0; err1: nft_data_release(&priv->data, desc.type); return err; }
0
221,634
bool isKnownBroadcastable(ShapeComponentAnalysis& analysis, ValueRange bcasted_shapes, Value output_shape) { auto output_shape_dims = analysis.GetValueInfo(output_shape); if (!output_shape_dims) return false; for (Value shape : bcasted_shapes) { auto shape_dims = analysis.GetValueInfo(shape); if (!shape_dims) return false; // Iterate backwards over the smallest input shape. for (auto zip : llvm::zip(llvm::reverse(*output_shape_dims), llvm::reverse(*shape_dims))) { const auto& first = std::get<0>(zip); const auto& second = std::get<1>(zip); // TODO(ezhulenev): What to do with dimensions statically known to be // zero? // Numpy can only broadcast [0] with [1], however Tensorflow can broadcast // [0] with any dimension size, and produces dimension of size [0]. // Currently we'll conservatively return failure and will not proceed with // a rewrite. if (first.isConstant(0) || second.isConstant(0)) return false; // If either shape has a static one dimension the broadcast will always // succeed. if (first.isConstant(1) || second.isConstant(1)) continue; // Otherwise dims have to be equal. if (first != second) return false; } } return true; }
0
450,418
int vnc_zywrle_send_framebuffer_update(VncState *vs, int x, int y, int w, int h) { vs->zrle->type = VNC_ENCODING_ZYWRLE; return zrle_send_framebuffer_update(vs, x, y, w, h); }
0
314,773
cdf_unpack_dir(cdf_directory_t *d, char *buf) { size_t len = 0; CDF_UNPACKA(d->d_name); CDF_UNPACK(d->d_namelen); CDF_UNPACK(d->d_type); CDF_UNPACK(d->d_color); CDF_UNPACK(d->d_left_child); CDF_UNPACK(d->d_right_child); CDF_UNPACK(d->d_storage); CDF_UNPACKA(d->d_storage_uuid); CDF_UNPACK(d->d_flags); CDF_UNPACK(d->d_created); CDF_UNPACK(d->d_modified); CDF_UNPACK(d->d_stream_first_sector); CDF_UNPACK(d->d_size); CDF_UNPACK(d->d_unused0); }
0
220,163
void DecodeImageV2Op::DecodeBMP(const uint8* input, const int row_size, uint8* const output, const int width, const int height, const int output_channels, const int input_channels, bool top_down) { for (int i = 0; i < height; i++) { int src_pos; int dst_pos; for (int j = 0; j < width; j++) { if (!top_down) { src_pos = ((height - 1 - i) * row_size) + j * input_channels; } else { src_pos = i * row_size + j * input_channels; } dst_pos = (i * width + j) * output_channels; switch (input_channels) { case 1: output[dst_pos] = input[src_pos]; // Set 2nd and 3rd channels if user requested for 3 or 4 channels. // Repeat 1st channel's value. if (output_channels == 3 || output_channels == 4) { output[dst_pos + 1] = input[src_pos]; output[dst_pos + 2] = input[src_pos]; } // Set 4th channel (alpha) to maximum value if user requested for // 4 channels. if (output_channels == 4) { output[dst_pos + 3] = UINT8_MAX; } break; case 3: // BGR -> RGB output[dst_pos] = input[src_pos + 2]; output[dst_pos + 1] = input[src_pos + 1]; output[dst_pos + 2] = input[src_pos]; // Set 4th channel (alpha) to maximum value if the user requested for // 4 channels and the input image has 3 channels only. if (output_channels == 4) { output[dst_pos + 3] = UINT8_MAX; } break; case 4: // BGRA -> RGBA output[dst_pos] = input[src_pos + 2]; output[dst_pos + 1] = input[src_pos + 1]; output[dst_pos + 2] = input[src_pos]; // Set 4th channel only if the user requested for 4 channels. If not, // then user requested 3 channels; skip this step. if (output_channels == 4) { output[dst_pos + 3] = input[src_pos + 3]; } break; default: LOG(FATAL) << "Unexpected number of channels: " << input_channels; break; } } } }
0
509,562
int ha_maria::index_last(uchar * buf) { DBUG_ASSERT(inited == INDEX); register_handler(file); int error= maria_rlast(file, buf, active_index); return error; }
0
242,995
int mbedtls_ssl_check_timer( mbedtls_ssl_context *ssl ) { if( ssl->f_get_timer == NULL ) return( 0 ); if( ssl->f_get_timer( ssl->p_timer ) == 2 ) { MBEDTLS_SSL_DEBUG_MSG( 3, ( "timer expired" ) ); return( -1 ); } return( 0 ); }
0
459,020
http_Unset(struct http *hp, hdr_t hdr) { uint16_t u, v; for (v = u = HTTP_HDR_FIRST; u < hp->nhd; u++) { Tcheck(hp->hd[u]); if (http_IsHdr(&hp->hd[u], hdr)) { http_VSLH_del(hp, u); continue; } if (v != u) { memcpy(&hp->hd[v], &hp->hd[u], sizeof *hp->hd); memcpy(&hp->hdf[v], &hp->hdf[u], sizeof *hp->hdf); } v++; } hp->nhd = v; }
0
348,430
static void mkiss_write_wakeup(struct tty_struct *tty) { struct mkiss *ax = mkiss_get(tty); int actual; if (!ax) return; if (ax->xleft <= 0) { /* Now serial buffer is almost free & we can start * transmission of another packet */ clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); netif_wake_queue(ax->dev); goto out; } actual = tty->ops->write(tty, ax->xhead, ax->xleft); ax->xleft -= actual; ax->xhead += actual; out: mkiss_put(ax); }
0
210,420
fill_threshhold_buffer(byte *dest_strip, byte *src_strip, int src_width, int left_offset, int left_width, int num_tiles, int right_width) { byte *ptr_out_temp = dest_strip; int ii; /* Left part */ memcpy(dest_strip, src_strip + left_offset, left_width); ptr_out_temp += left_width; /* Now the full parts */ for (ii = 0; ii < num_tiles; ii++){ memcpy(ptr_out_temp, src_strip, src_width); ptr_out_temp += src_width; } /* Now the remainder */ memcpy(ptr_out_temp, src_strip, right_width); #ifdef PACIFY_VALGRIND ptr_out_temp += right_width; ii = (dest_strip-ptr_out_temp) % (LAND_BITS-1); if (ii > 0) memset(ptr_out_temp, 0, ii); #endif }
1
234,871
void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev) { struct btrfs_fs_devices *fs_devices; lockdep_assert_held(&srcdev->fs_info->fs_devices->device_list_mutex); /* * in case of fs with no seed, srcdev->fs_devices will point * to fs_devices of fs_info. However when the dev being replaced is * a seed dev it will point to the seed's local fs_devices. In short * srcdev will have its correct fs_devices in both the cases. */ fs_devices = srcdev->fs_devices; list_del_rcu(&srcdev->dev_list); list_del(&srcdev->dev_alloc_list); fs_devices->num_devices--; if (test_bit(BTRFS_DEV_STATE_MISSING, &srcdev->dev_state)) fs_devices->missing_devices--; if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &srcdev->dev_state)) fs_devices->rw_devices--; if (srcdev->bdev) fs_devices->open_devices--; }
0
349,257
int read_super_2(squashfs_operations **s_ops, void *s) { squashfs_super_block_3 *sBlk_3 = s; if(sBlk_3->s_magic != SQUASHFS_MAGIC || sBlk_3->s_major != 2 || sBlk_3->s_minor > 1) return -1; sBlk.s.s_magic = sBlk_3->s_magic; sBlk.s.inodes = sBlk_3->inodes; sBlk.s.mkfs_time = sBlk_3->mkfs_time; sBlk.s.block_size = sBlk_3->block_size; sBlk.s.fragments = sBlk_3->fragments; sBlk.s.block_log = sBlk_3->block_log; sBlk.s.flags = sBlk_3->flags; sBlk.s.s_major = sBlk_3->s_major; sBlk.s.s_minor = sBlk_3->s_minor; sBlk.s.root_inode = sBlk_3->root_inode; sBlk.s.bytes_used = sBlk_3->bytes_used_2; sBlk.s.inode_table_start = sBlk_3->inode_table_start; sBlk.s.directory_table_start = sBlk_3->directory_table_start_2; sBlk.s.fragment_table_start = sBlk_3->fragment_table_start_2; sBlk.s.inode_table_start = sBlk_3->inode_table_start_2; sBlk.no_uids = sBlk_3->no_uids; sBlk.no_guids = sBlk_3->no_guids; sBlk.uid_start = sBlk_3->uid_start_2; sBlk.guid_start = sBlk_3->guid_start_2; sBlk.s.xattr_id_table_start = SQUASHFS_INVALID_BLK; *s_ops = &ops; /* * 2.x filesystems use gzip compression. */ comp = lookup_compressor("gzip"); if(sBlk_3->s_minor == 0) needs_sorting = TRUE; return TRUE; }
0
232,342
Bool gf_isom_get_subsample_types(GF_ISOFile *movie, u32 track, u32 subs_index, u32 *flags) { GF_SubSampleInformationBox *sub_samples=NULL; GF_TrackBox *trak = gf_isom_get_track_from_file(movie, track); if (!track || !subs_index) return GF_FALSE; if (!trak->Media || !trak->Media->information->sampleTable || !trak->Media->information->sampleTable->sub_samples) return GF_FALSE; sub_samples = gf_list_get(trak->Media->information->sampleTable->sub_samples, subs_index-1); if (!sub_samples) return GF_FALSE; *flags = sub_samples->flags; return GF_TRUE; }
0
430,340
void seq_pad(struct seq_file *m, char c) { int size = m->pad_until - m->count; if (size > 0) { if (size + m->count > m->size) { seq_set_overflow(m); return; } memset(m->buf + m->count, ' ', size); m->count += size; } if (c) seq_putc(m, c); }
0
254,068
void clear() { key_value_pairs_.clear(); url_.clear(); }
0
259,177
static MOVFragmentStreamInfo * get_current_frag_stream_info( MOVFragmentIndex *frag_index) { MOVFragmentIndexItem *item; if (frag_index->current < 0 || frag_index->current >= frag_index->nb_items) return NULL; item = &frag_index->item[frag_index->current]; if (item->current >= 0 && item->current < item->nb_stream_info) return &item->stream_info[item->current]; // This shouldn't happen return NULL; }
0
508,371
bool is_locked_view(THD *thd, TABLE_LIST *t) { DBUG_ENTER("check_locked_view"); /* Is this table a view and not a base table? (it is work around to allow to open view with locked tables, real fix will be made after definition cache will be made) Since opening of view which was not explicitly locked by LOCK TABLES breaks metadata locking protocol (potentially can lead to deadlocks) it should be disallowed. */ if (thd->mdl_context.is_lock_owner(MDL_key::TABLE, t->db, t->table_name, MDL_SHARED)) { char path[FN_REFLEN + 1]; build_table_filename(path, sizeof(path) - 1, t->db, t->table_name, reg_ext, 0); /* Note that we can't be 100% sure that it is a view since it's possible that we either simply have not found unused TABLE instance in THD::open_tables list or were unable to open table during prelocking process (in this case in theory we still should hold shared metadata lock on it). */ if (dd_frm_is_view(thd, path)) { /* If parent_l of the table_list is non null then a merge table has this view as child table, which is not supported. */ if (t->parent_l) { my_error(ER_WRONG_MRG_TABLE, MYF(0)); DBUG_RETURN(FALSE); } if (!tdc_open_view(thd, t, CHECK_METADATA_VERSION)) { DBUG_ASSERT(t->view != 0); DBUG_RETURN(TRUE); // VIEW } } } DBUG_RETURN(FALSE); }
0
432,692
static void ipa_functions(wmfAPI *API) { wmf_magick_t *ddata = 0; wmfFunctionReference *FR = (wmfFunctionReference *) API->function_reference; /* IPA function reference links */ FR->device_open = ipa_device_open; FR->device_close = ipa_device_close; FR->device_begin = ipa_device_begin; FR->device_end = ipa_device_end; FR->flood_interior = ipa_flood_interior; FR->flood_exterior = ipa_flood_exterior; FR->draw_pixel = ipa_draw_pixel; FR->draw_pie = ipa_draw_pie; FR->draw_chord = ipa_draw_chord; FR->draw_arc = ipa_draw_arc; FR->draw_ellipse = ipa_draw_ellipse; FR->draw_line = ipa_draw_line; FR->poly_line = ipa_poly_line; FR->draw_polygon = ipa_draw_polygon; #if defined(MAGICKCORE_WMF_DELEGATE) FR->draw_polypolygon = ipa_draw_polypolygon; #endif FR->draw_rectangle = ipa_draw_rectangle; FR->rop_draw = ipa_rop_draw; FR->bmp_draw = ipa_bmp_draw; FR->bmp_read = ipa_bmp_read; FR->bmp_free = ipa_bmp_free; FR->draw_text = ipa_draw_text; FR->udata_init = ipa_udata_init; FR->udata_copy = ipa_udata_copy; FR->udata_set = ipa_udata_set; FR->udata_free = ipa_udata_free; FR->region_frame = ipa_region_frame; FR->region_paint = ipa_region_paint; FR->region_clip = ipa_region_clip; /* Allocate device data structure */ ddata = (wmf_magick_t *) wmf_malloc(API, sizeof(wmf_magick_t)); if (ERR(API)) return; (void) ResetMagickMemory((void *) ddata, 0, sizeof(wmf_magick_t)); API->device_data = (void *) ddata; /* Device data defaults */ ddata->image = 0; }
0
312,441
make_get_fullcmd(char_u *makecmd, char_u *fname) { char_u *cmd; unsigned len; len = (unsigned)STRLEN(p_shq) * 2 + (unsigned)STRLEN(makecmd) + 1; if (*p_sp != NUL) len += (unsigned)STRLEN(p_sp) + (unsigned)STRLEN(fname) + 3; cmd = alloc_id(len, aid_qf_makecmd); if (cmd == NULL) return NULL; sprintf((char *)cmd, "%s%s%s", (char *)p_shq, (char *)makecmd, (char *)p_shq); // If 'shellpipe' empty: don't redirect to 'errorfile'. if (*p_sp != NUL) append_redir(cmd, len, p_sp, fname); // Display the fully formed command. Output a newline if there's something // else than the :make command that was typed (in which case the cursor is // in column 0). if (msg_col == 0) msg_didout = FALSE; msg_start(); msg_puts(":!"); msg_outtrans(cmd); // show what we are doing return cmd; }
0
252,415
static void tdefl_start_static_block(tdefl_compressor *d) { mz_uint i; mz_uint8 *p = &d->m_huff_code_sizes[0][0]; for (i = 0; i <= 143; ++i) *p++ = 8; for (; i <= 255; ++i) *p++ = 9; for (; i <= 279; ++i) *p++ = 7; for (; i <= 287; ++i) *p++ = 8; memset(d->m_huff_code_sizes[1], 5, 32); tdefl_optimize_huffman_table(d, 0, 288, 15, MZ_TRUE); tdefl_optimize_huffman_table(d, 1, 32, 15, MZ_TRUE); TDEFL_PUT_BITS(1, 2); }
0
424,524
static BOOL yuv_to_rgb(PresentationContext* presentation, BYTE* dest) { const BYTE* pYUVPoint[3]; H264_CONTEXT* h264 = presentation->h264; BYTE** ppYUVData; ppYUVData = h264->pYUVData; pYUVPoint[0] = ppYUVData[0]; pYUVPoint[1] = ppYUVData[1]; pYUVPoint[2] = ppYUVData[2]; if (!yuv_context_decode(presentation->yuv, pYUVPoint, h264->iStride, PIXEL_FORMAT_BGRX32, dest, h264->width * 4)) { WLog_ERR(TAG, "error in yuv_to_rgb conversion"); return FALSE; } return TRUE; }
0
243,999
GF_Box *stri_box_new() { ISOM_DECL_BOX_ALLOC(GF_SubTrackInformationBox, GF_ISOM_BOX_TYPE_STRI); return (GF_Box *)tmp; }
0
294,614
div_day(VALUE d, VALUE *f) { if (f) *f = f_mod(d, INT2FIX(1)); return f_floor(d); }
0
337,826
bool sctp_verify_asconf(const struct sctp_association *asoc, struct sctp_chunk *chunk, bool addr_param_needed, struct sctp_paramhdr **errp) { struct sctp_addip_chunk *addip; bool addr_param_seen = false; union sctp_params param; addip = (struct sctp_addip_chunk *)chunk->chunk_hdr; sctp_walk_params(param, addip, addip_hdr.params) { size_t length = ntohs(param.p->length); *errp = param.p; switch (param.p->type) { case SCTP_PARAM_ERR_CAUSE: break; case SCTP_PARAM_IPV4_ADDRESS: if (length != sizeof(struct sctp_ipv4addr_param)) return false; /* ensure there is only one addr param and it's in the * beginning of addip_hdr params, or we reject it. */ if (param.v != addip->addip_hdr.params) return false; addr_param_seen = true; break; case SCTP_PARAM_IPV6_ADDRESS: if (length != sizeof(struct sctp_ipv6addr_param)) return false; if (param.v != addip->addip_hdr.params) return false; addr_param_seen = true; break; case SCTP_PARAM_ADD_IP: case SCTP_PARAM_DEL_IP: case SCTP_PARAM_SET_PRIMARY: /* In ASCONF chunks, these need to be first. */ if (addr_param_needed && !addr_param_seen) return false; length = ntohs(param.addip->param_hdr.length); if (length < sizeof(struct sctp_addip_param) + sizeof(**errp)) return false; break; case SCTP_PARAM_SUCCESS_REPORT: case SCTP_PARAM_ADAPTATION_LAYER_IND: if (length != sizeof(struct sctp_addip_param)) return false; break; default: /* This is unknown to us, reject! */ return false; } } /* Remaining sanity checks. */ if (addr_param_needed && !addr_param_seen) return false; if (!addr_param_needed && addr_param_seen) return false; if (param.v != chunk->chunk_end) return false; return true; }
0
293,758
static void create_initterm_syms(RKext *kext, RList *ret, int type, ut64 *pointers) { int i = 0; int count = 0; for (; pointers[i]; i++) { ut64 func_vaddr = pointers[i]; ut64 text_start = kext->vaddr; ut64 text_end = text_start + kext->text_range.size; if (text_start == text_end) { continue; } if (text_start > func_vaddr || func_vaddr >= text_end) { continue; } RBinSymbol *sym = R_NEW0 (RBinSymbol); if (!sym) { break; } sym->name = r_str_newf ("%s.%s.%d", kext_short_name (kext), (type == R_BIN_ENTRY_TYPE_INIT) ? "init" : "fini", count++); sym->vaddr = func_vaddr; sym->paddr = func_vaddr - kext->pa2va_exec; sym->size = 0; sym->forwarder = "NONE"; sym->bind = "GLOBAL"; sym->type = "FUNC"; r_list_append (ret, sym); } }
0
504,607
static enum TIFFReadDirEntryErr TIFFReadDirEntryDataAndRealloc( TIFF* tif, uint64 offset, tmsize_t size, void** pdest) { #if SIZEOF_VOIDP == 8 || SIZEOF_SIZE_T == 8 tmsize_t threshold = INITIAL_THRESHOLD; #endif tmsize_t already_read = 0; assert( !isMapped(tif) ); if (!SeekOK(tif,offset)) return(TIFFReadDirEntryErrIo); /* On 64 bit processes, read first a maximum of 1 MB, then 10 MB, etc */ /* so as to avoid allocating too much memory in case the file is too */ /* short. We could ask for the file size, but this might be */ /* expensive with some I/O layers (think of reading a gzipped file) */ /* Restrict to 64 bit processes, so as to avoid reallocs() */ /* on 32 bit processes where virtual memory is scarce. */ while( already_read < size ) { void* new_dest; tmsize_t bytes_read; tmsize_t to_read = size - already_read; #if SIZEOF_VOIDP == 8 || SIZEOF_SIZE_T == 8 if( to_read >= threshold && threshold < MAX_THRESHOLD ) { to_read = threshold; threshold *= THRESHOLD_MULTIPLIER; } #endif new_dest = (uint8*) _TIFFrealloc( *pdest, already_read + to_read); if( new_dest == NULL ) { TIFFErrorExt(tif->tif_clientdata, tif->tif_name, "Failed to allocate memory for %s " "(%ld elements of %ld bytes each)", "TIFFReadDirEntryArray", (long) 1, (long) already_read + to_read); return TIFFReadDirEntryErrAlloc; } *pdest = new_dest; bytes_read = TIFFReadFile(tif, (char*)*pdest + already_read, to_read); already_read += bytes_read; if (bytes_read != to_read) { return TIFFReadDirEntryErrIo; } } return TIFFReadDirEntryErrOk; }
0
513,324
AGGR_OP::end_send() { enum_nested_loop_state rc= NESTED_LOOP_OK; TABLE *table= join_tab->table; JOIN *join= join_tab->join; // All records were stored, send them further int tmp, new_errno= 0; if ((rc= put_record(true)) < NESTED_LOOP_OK) return rc; if ((tmp= table->file->extra(HA_EXTRA_NO_CACHE))) { DBUG_PRINT("error",("extra(HA_EXTRA_NO_CACHE) failed")); new_errno= tmp; } if ((tmp= table->file->ha_index_or_rnd_end())) { DBUG_PRINT("error",("ha_index_or_rnd_end() failed")); new_errno= tmp; } if (new_errno) { table->file->print_error(new_errno,MYF(0)); return NESTED_LOOP_ERROR; } // Update ref array join_tab->join->set_items_ref_array(*join_tab->ref_array); bool keep_last_filesort_result = join_tab->filesort ? false : true; if (join_tab->window_funcs_step) { if (join_tab->window_funcs_step->exec(join, keep_last_filesort_result)) return NESTED_LOOP_ERROR; } table->reginfo.lock_type= TL_UNLOCK; bool in_first_read= true; /* Reset the counter before copying rows from internal temporary table to INSERT table. */ join_tab->join->thd->get_stmt_da()->reset_current_row_for_warning(); while (rc == NESTED_LOOP_OK) { int error; if (in_first_read) { in_first_read= false; error= join_init_read_record(join_tab); } else error= join_tab->read_record.read_record(&join_tab->read_record); if (error > 0 || (join->thd->is_error())) // Fatal error rc= NESTED_LOOP_ERROR; else if (error < 0) break; else if (join->thd->killed) // Aborted by user { join->thd->send_kill_message(); rc= NESTED_LOOP_KILLED; } else { rc= evaluate_join_record(join, join_tab, 0); } } if (keep_last_filesort_result) { delete join_tab->filesort_result; join_tab->filesort_result= NULL; } // Finish rnd scn after sending records if (join_tab->table->file->inited) join_tab->table->file->ha_rnd_end(); return rc; }
0
232,832
void Compute(OpKernelContext* const context) override { // Read float features list; OpInputList float_features_list; OP_REQUIRES_OK( context, context->input_list(kFloatFeaturesName, &float_features_list)); // Parse example weights and get batch size. const Tensor* example_weights_t; OP_REQUIRES_OK(context, context->input(kExampleWeightsName, &example_weights_t)); DCHECK(float_features_list.size() > 0) << "Got empty feature list"; auto example_weights = example_weights_t->flat<float>(); const int64_t weight_size = example_weights.size(); const int64_t batch_size = float_features_list[0].flat<float>().size(); OP_REQUIRES( context, weight_size == 1 || weight_size == batch_size, errors::InvalidArgument(strings::Printf( "Weights should be a single value or same size as features."))); const Tensor* epsilon_t; OP_REQUIRES_OK(context, context->input(kEpsilonName, &epsilon_t)); float epsilon = epsilon_t->scalar<float>()(); OpOutputList summaries_output_list; OP_REQUIRES_OK( context, context->output_list(kSummariesName, &summaries_output_list)); auto do_quantile_summary_gen = [&](const int64_t begin, const int64_t end) { // Iterating features. for (int64_t index = begin; index < end; index++) { const auto feature_values = float_features_list[index].flat<float>(); QuantileStream stream(epsilon, batch_size + 1); // Run quantile summary generation. for (int64_t j = 0; j < batch_size; j++) { stream.PushEntry(feature_values(j), (weight_size > 1) ? example_weights(j) : example_weights(0)); } stream.Finalize(); const auto summary_entry_list = stream.GetFinalSummary().GetEntryList(); Tensor* output_t; OP_REQUIRES_OK( context, summaries_output_list.allocate( index, TensorShape({static_cast<int64>(summary_entry_list.size()), 4}), &output_t)); auto output = output_t->matrix<float>(); for (auto row = 0; row < summary_entry_list.size(); row++) { const auto& entry = summary_entry_list[row]; output(row, 0) = entry.value; output(row, 1) = entry.weight; output(row, 2) = entry.min_rank; output(row, 3) = entry.max_rank; } } }; // TODO(tanzheny): comment on the magic number. const int64_t kCostPerUnit = 500 * batch_size; const DeviceBase::CpuWorkerThreads& worker_threads = *context->device()->tensorflow_cpu_worker_threads(); Shard(worker_threads.num_threads, worker_threads.workers, num_features_, kCostPerUnit, do_quantile_summary_gen); }
0
281,096
int xfrm_policy_register_afinfo(const struct xfrm_policy_afinfo *afinfo, int family) { int err = 0; if (WARN_ON(family >= ARRAY_SIZE(xfrm_policy_afinfo))) return -EAFNOSUPPORT; spin_lock(&xfrm_policy_afinfo_lock); if (unlikely(xfrm_policy_afinfo[family] != NULL)) err = -EEXIST; else { struct dst_ops *dst_ops = afinfo->dst_ops; if (likely(dst_ops->kmem_cachep == NULL)) dst_ops->kmem_cachep = xfrm_dst_cache; if (likely(dst_ops->check == NULL)) dst_ops->check = xfrm_dst_check; if (likely(dst_ops->default_advmss == NULL)) dst_ops->default_advmss = xfrm_default_advmss; if (likely(dst_ops->mtu == NULL)) dst_ops->mtu = xfrm_mtu; if (likely(dst_ops->negative_advice == NULL)) dst_ops->negative_advice = xfrm_negative_advice; if (likely(dst_ops->link_failure == NULL)) dst_ops->link_failure = xfrm_link_failure; if (likely(dst_ops->neigh_lookup == NULL)) dst_ops->neigh_lookup = xfrm_neigh_lookup; if (likely(!dst_ops->confirm_neigh)) dst_ops->confirm_neigh = xfrm_confirm_neigh; rcu_assign_pointer(xfrm_policy_afinfo[family], afinfo); } spin_unlock(&xfrm_policy_afinfo_lock); return err; }
0
513,043
Item *in_double::create_item(THD *thd) { return new (thd->mem_root) Item_float(thd, 0.0, 0); }
0
326,911
static void vidtv_s302m_alloc_au(struct vidtv_encoder *e) { struct vidtv_access_unit *sync_au = NULL; struct vidtv_access_unit *temp = NULL; if (e->sync && e->sync->is_video_encoder) { sync_au = e->sync->access_units; while (sync_au) { temp = vidtv_s302m_access_unit_init(e->access_units); if (!e->access_units) e->access_units = temp; sync_au = sync_au->next; } return; } e->access_units = vidtv_s302m_access_unit_init(NULL); }
0
229,167
static void virtser_port_device_realize(DeviceState *dev, Error **errp) { VirtIOSerialPort *port = VIRTIO_SERIAL_PORT(dev); VirtIOSerialPortClass *vsc = VIRTIO_SERIAL_PORT_GET_CLASS(port); VirtIOSerialBus *bus = VIRTIO_SERIAL_BUS(qdev_get_parent_bus(dev)); int max_nr_ports; bool plugging_port0; Error *err = NULL; port->vser = bus->vser; port->bh = qemu_bh_new(flush_queued_data_bh, port); assert(vsc->have_data); /* * Is the first console port we're seeing? If so, put it up at * location 0. This is done for backward compatibility (old * kernel, new qemu). */ plugging_port0 = vsc->is_console && !find_port_by_id(port->vser, 0); if (find_port_by_id(port->vser, port->id)) { error_setg(errp, "virtio-serial-bus: A port already exists at id %u", port->id); return; } if (port->name != NULL && find_port_by_name(port->name)) { error_setg(errp, "virtio-serial-bus: A port already exists by name %s", port->name); return; } if (port->id == VIRTIO_CONSOLE_BAD_ID) { if (plugging_port0) { port->id = 0; } else { port->id = find_free_port_id(port->vser); if (port->id == VIRTIO_CONSOLE_BAD_ID) { error_setg(errp, "virtio-serial-bus: Maximum port limit for " "this device reached"); return; } } } max_nr_ports = port->vser->serial.max_virtserial_ports; if (port->id >= max_nr_ports) { error_setg(errp, "virtio-serial-bus: Out-of-range port id specified, " "max. allowed: %u", max_nr_ports - 1); return; } vsc->realize(dev, &err); if (err != NULL) { error_propagate(errp, err); return; } port->elem.out_num = 0; }
0
513,327
join_read_prev(READ_RECORD *info) { int error; if ((error= info->table->file->ha_index_prev(info->record))) return report_error(info->table, error); return 0; }
0
318,779
drill_parse_coordinate(gerb_file_t *fd, char firstchar, gerbv_image_t *image, drill_state_t *state, ssize_t file_line) { int read; gerbv_drill_stats_t *stats = image->drill_stats; if(state->coordinate_mode == DRILL_MODE_ABSOLUTE) { if (firstchar == 'X') { state->curr_x = read_double(fd, state->number_format, image->format->omit_zeros, state->decimals); if ((read = (char)gerb_fgetc(fd)) == 'Y') { state->curr_y = read_double(fd, state->number_format, image->format->omit_zeros, state->decimals); } else { gerb_ungetc(fd); } } else if (firstchar == 'Y') { state->curr_y = read_double(fd, state->number_format, image->format->omit_zeros, state->decimals); } } else if(state->coordinate_mode == DRILL_MODE_INCREMENTAL) { if (firstchar == 'X') { state->curr_x += read_double(fd, state->number_format, image->format->omit_zeros, state->decimals); if((read = (char)gerb_fgetc(fd)) == 'Y') { state->curr_y += read_double(fd, state->number_format, image->format->omit_zeros, state->decimals); } else { gerb_ungetc(fd); } } else if (firstchar == 'Y') { state->curr_y += read_double(fd, state->number_format, image->format->omit_zeros, state->decimals); } } else { gerbv_stats_printf(stats->error_list, GERBV_MESSAGE_ERROR, -1, _("Coordinate mode is not absolute and not incremental " "at line %ld in file \"%s\""), file_line, fd->filename); } } /* drill_parse_coordinate */
0
90,191
void UpdateNetworkStatus(const char* path, const char* key, const Value* value) { if (key == NULL || value == NULL) return; if (!BrowserThread::CurrentlyOn(BrowserThread::UI)) { BrowserThread::PostTask( BrowserThread::UI, FROM_HERE, NewRunnableMethod(this, &NetworkLibraryImpl::UpdateNetworkStatus, path, key, value)); return; } bool boolval = false; int intval = 0; std::string stringval; Network* network; if (ethernet_->service_path() == path) { network = ethernet_; } else { CellularNetwork* cellular = GetWirelessNetworkByPath(cellular_networks_, path); WifiNetwork* wifi = GetWirelessNetworkByPath(wifi_networks_, path); if (cellular == NULL && wifi == NULL) return; WirelessNetwork* wireless; if (wifi != NULL) wireless = static_cast<WirelessNetwork*>(wifi); else wireless = static_cast<WirelessNetwork*>(cellular); if (strcmp(key, kSignalStrengthProperty) == 0) { if (value->GetAsInteger(&intval)) wireless->set_strength(intval); } else if (cellular != NULL) { if (strcmp(key, kRestrictedPoolProperty) == 0) { if (value->GetAsBoolean(&boolval)) cellular->set_restricted_pool(boolval); } else if (strcmp(key, kActivationStateProperty) == 0) { if (value->GetAsString(&stringval)) cellular->set_activation_state(ParseActivationState(stringval)); } else if (strcmp(key, kPaymentURLProperty) == 0) { if (value->GetAsString(&stringval)) cellular->set_payment_url(stringval); } else if (strcmp(key, kNetworkTechnologyProperty) == 0) { if (value->GetAsString(&stringval)) cellular->set_network_technology( ParseNetworkTechnology(stringval)); } else if (strcmp(key, kRoamingStateProperty) == 0) { if (value->GetAsString(&stringval)) cellular->set_roaming_state(ParseRoamingState(stringval)); } } network = wireless; } if (strcmp(key, kIsActiveProperty) == 0) { if (value->GetAsBoolean(&boolval)) network->set_active(boolval); } else if (strcmp(key, kStateProperty) == 0) { if (value->GetAsString(&stringval)) network->set_state(ParseState(stringval)); } NotifyNetworkChanged(network); }
0
477,351
R_API RBinJavaAttrInfo *r_bin_java_code_attr_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz, ut64 buf_offset) { RBinJavaAttrInfo *attr = NULL, *_attr = NULL; ut32 k = 0, curpos; ut64 offset = 0; attr = r_bin_java_default_attr_new (bin, buffer, sz, buf_offset); if (!attr) { return NULL; } if (sz < 16 || sz > buf_offset) {// sz > buf_offset) { free (attr); return NULL; } offset += 6; attr->type = R_BIN_JAVA_ATTR_TYPE_CODE_ATTR; attr->info.code_attr.max_stack = attr->is_attr_in_old_format ? buffer[offset] : R_BIN_JAVA_USHORT (buffer, offset); offset += attr->is_attr_in_old_format ? 1 : 2; attr->info.code_attr.max_locals = attr->is_attr_in_old_format ? buffer[offset] : R_BIN_JAVA_USHORT (buffer, offset); offset += attr->is_attr_in_old_format ? 1 : 2; attr->info.code_attr.code_length = attr->is_attr_in_old_format ? R_BIN_JAVA_USHORT(buffer, offset) : R_BIN_JAVA_UINT (buffer, offset); offset += attr->is_attr_in_old_format ? 2 : 4; // BUG: possible unsigned integer overflow here attr->info.code_attr.code_offset = buf_offset + offset; attr->info.code_attr.code = (ut8 *) malloc (attr->info.code_attr.code_length); if (!attr->info.code_attr.code) { eprintf ("Handling Code Attributes: Unable to allocate memory " "(%u bytes) for a code.\n", attr->info.code_attr.code_length); return attr; } R_BIN_JAVA_GLOBAL_BIN->current_code_attr = attr; { int len = attr->info.code_attr.code_length; memset (attr->info.code_attr.code, 0, len); if (offset + len >= sz) { return attr; } memcpy (attr->info.code_attr.code, buffer + offset, len); offset += len; } attr->info.code_attr.exception_table_length = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; attr->info.code_attr.exception_table = r_list_newf (free); for (k = 0; k < attr->info.code_attr.exception_table_length; k++) { curpos = buf_offset + offset; if (curpos + 8 > sz) { return attr; } RBinJavaExceptionEntry *e = R_NEW0 (RBinJavaExceptionEntry); if (!e) { free (attr); return NULL; } e->file_offset = curpos; e->start_pc = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; e->end_pc = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; e->handler_pc = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; e->catch_type = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; r_list_append (attr->info.code_attr.exception_table, e); e->size = 8; } attr->info.code_attr.attributes_count = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; // IFDBG eprintf (" code Attributes_count: %d\n", attr->info.code_attr.attributes_count); // XXX - attr->info.code_attr.attributes is not freed because one of the code attributes is improperly parsed. attr->info.code_attr.attributes = r_list_newf (r_bin_java_attribute_free); if (attr->info.code_attr.attributes_count > 0) { for (k = 0; k < attr->info.code_attr.attributes_count; k++) { int size = (offset < sz) ? sz - offset : 0; if (size > sz || size <= 0) { break; } _attr = r_bin_java_read_next_attr_from_buffer (bin, buffer + offset, size, buf_offset + offset); if (!_attr) { eprintf ("[X] r_bin_java_code_attr_new: Error unable to parse remainder of classfile after Method's Code Attribute: %d.\n", k); break; } IFDBG eprintf("Parsing @ 0x%"PFMT64x " (%s) = 0x%"PFMT64x " bytes, %p\n", _attr->file_offset, _attr->name, _attr->size, _attr); offset += _attr->size; r_list_append (attr->info.code_attr.attributes, _attr); if (_attr->type == R_BIN_JAVA_ATTR_TYPE_LOCAL_VARIABLE_TABLE_ATTR) { IFDBG eprintf("Parsed the LocalVariableTable, preparing the implicit mthod frame.\n"); // r_bin_java_print_attr_summary(_attr); attr->info.code_attr.implicit_frame = r_bin_java_build_stack_frame_from_local_variable_table (R_BIN_JAVA_GLOBAL_BIN, _attr); attr->info.code_attr.implicit_frame->file_offset = buf_offset; IFDBG r_bin_java_print_stack_map_frame_summary(attr->info.code_attr.implicit_frame); // r_list_append (attr->info.code_attr.attributes, attr->info.code_attr.implicit_frame); } // if (offset > sz) { // eprintf ("[X] r_bin_java: Error unable to parse remainder of classfile after Attribute: %d.\n", k); // break; // } } } if (attr->info.code_attr.implicit_frame == NULL) { // build a default implicit_frame attr->info.code_attr.implicit_frame = r_bin_java_default_stack_frame (); // r_list_append (attr->info.code_attr.attributes, attr->info.code_attr.implicit_frame); } attr->size = offset; return attr; }
0
359,841
static Image *SparseColorOption(const Image *image, const SparseColorMethod method,const char *arguments,ExceptionInfo *exception) { char token[MagickPathExtent]; const char *p; double *sparse_arguments; Image *sparse_image; PixelInfo color; MagickBooleanType error; size_t x; size_t number_arguments, number_colors; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); /* Limit channels according to image add up number of values needed per color. */ number_colors=0; if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) number_colors++; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) number_colors++; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) number_colors++; if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) number_colors++; if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && image->alpha_trait != UndefinedPixelTrait) number_colors++; /* Read string, to determine number of arguments needed, */ p=arguments; x=0; while( *p != '\0' ) { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') continue; if ( isalpha((int) ((unsigned char) *token)) || *token == '#' ) x += number_colors; /* color argument found */ else x++; /* floating point argument */ } /* control points and color values */ if ((x % (2+number_colors)) != 0) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","'%s': %s", "sparse-color", "Invalid number of Arguments"); return( (Image *) NULL); } error=MagickFalse; number_arguments=x; /* Allocate and fill in the floating point arguments */ sparse_arguments=(double *) AcquireQuantumMemory(number_arguments, sizeof(*sparse_arguments)); if (sparse_arguments == (double *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),ResourceLimitError, "MemoryAllocationFailed","%s","SparseColorOption"); return( (Image *) NULL); } (void) memset(sparse_arguments,0,number_arguments* sizeof(*sparse_arguments)); p=arguments; x=0; while ((*p != '\0') && (x < number_arguments)) { /* X coordinate */ *token=','; while (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == '\0') break; if ( isalpha((int) ((unsigned char) *token)) || *token == '#' ) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError, "InvalidArgument", "'%s': %s", "sparse-color", "Color found, instead of X-coord"); error=MagickTrue; break; } sparse_arguments[x++]=StringToDouble(token,(char **) NULL); /* Y coordinate */ *token=','; while (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == '\0') break; if ( isalpha((int) ((unsigned char) *token)) || *token == '#' ) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError, "InvalidArgument", "'%s': %s", "sparse-color", "Color found, instead of Y-coord"); error=MagickTrue; break; } sparse_arguments[x++]=StringToDouble(token,(char **) NULL); /* color name or function given in string argument */ *token=','; while (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == '\0') break; if ( isalpha((int) ((unsigned char) *token)) || *token == '#' ) { /* Color string given */ (void) QueryColorCompliance(token,AllCompliance,&color, exception); if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) sparse_arguments[x++] = QuantumScale*color.red; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) sparse_arguments[x++] = QuantumScale*color.green; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) sparse_arguments[x++] = QuantumScale*color.blue; if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) sparse_arguments[x++] = QuantumScale*color.black; if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && image->alpha_trait != UndefinedPixelTrait) sparse_arguments[x++] = QuantumScale*color.alpha; } else { /* Colors given as a set of floating point values - experimental */ /* NB: token contains the first floating point value to use! */ if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) { while (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); if ((*token == '\0') || isalpha((int) ((unsigned char) *token)) || *token == '#' ) break; sparse_arguments[x++]=StringToDouble(token,(char **) NULL); *token=','; /* used this token - get another */ } if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) { while (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); if ((*token == '\0') || isalpha((int) ((unsigned char) *token)) || *token == '#' ) break; sparse_arguments[x++]=StringToDouble(token,(char **) NULL); *token=','; /* used this token - get another */ } if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) { while (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); if ((*token == '\0') || isalpha((int) ((unsigned char) *token)) || *token == '#' ) break; sparse_arguments[x++]=StringToDouble(token,(char **) NULL); *token = ','; /* used this token - get another */ } if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) { while (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); if ((*token == '\0') || isalpha((int) ((unsigned char) *token)) || *token == '#' ) break; sparse_arguments[x++]=StringToDouble(token,(char **) NULL); *token=','; /* used this token - get another */ } if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && image->alpha_trait != UndefinedPixelTrait) { while (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); if ((*token == '\0') || isalpha((int) ((unsigned char) *token)) || *token == '#' ) break; sparse_arguments[x++]=StringToDouble(token,(char **) NULL); *token = ','; /* used this token - get another */ } } } if (error != MagickFalse) { sparse_arguments=(double *) RelinquishMagickMemory(sparse_arguments); return((Image *) NULL); } if (number_arguments != x) { sparse_arguments=(double *) RelinquishMagickMemory(sparse_arguments); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","'%s': %s","sparse-color","Argument Parsing Error"); return((Image *) NULL); } /* Call the Sparse Color Interpolation function with the parsed arguments */ sparse_image=SparseColorImage(image,method,number_arguments,sparse_arguments, exception); sparse_arguments=(double *) RelinquishMagickMemory(sparse_arguments); return( sparse_image ); }
0
489,153
int sctp_process_asconf_ack(struct sctp_association *asoc, struct sctp_chunk *asconf_ack) { struct sctp_chunk *asconf = asoc->addip_last_asconf; union sctp_addr_param *addr_param; sctp_addip_param_t *asconf_param; int length = 0; int asconf_len = asconf->skb->len; int all_param_pass = 0; int no_err = 1; int retval = 0; __be16 err_code = SCTP_ERROR_NO_ERROR; /* Skip the chunkhdr and addiphdr from the last asconf sent and store * a pointer to address parameter. */ length = sizeof(sctp_addip_chunk_t); addr_param = (union sctp_addr_param *)(asconf->skb->data + length); asconf_len -= length; /* Skip the address parameter in the last asconf sent and store a * pointer to the first asconf parameter. */ length = ntohs(addr_param->v4.param_hdr.length); asconf_param = (sctp_addip_param_t *)((void *)addr_param + length); asconf_len -= length; /* ADDIP 4.1 * A8) If there is no response(s) to specific TLV parameter(s), and no * failures are indicated, then all request(s) are considered * successful. */ if (asconf_ack->skb->len == sizeof(sctp_addiphdr_t)) all_param_pass = 1; /* Process the TLVs contained in the last sent ASCONF chunk. */ while (asconf_len > 0) { if (all_param_pass) err_code = SCTP_ERROR_NO_ERROR; else { err_code = sctp_get_asconf_response(asconf_ack, asconf_param, no_err); if (no_err && (SCTP_ERROR_NO_ERROR != err_code)) no_err = 0; } switch (err_code) { case SCTP_ERROR_NO_ERROR: retval = sctp_asconf_param_success(asoc, asconf_param); break; case SCTP_ERROR_RSRC_LOW: retval = 1; break; case SCTP_ERROR_INV_PARAM: /* Disable sending this type of asconf parameter in * future. */ asoc->peer.addip_disabled_mask |= asconf_param->param_hdr.type; break; case SCTP_ERROR_REQ_REFUSED: case SCTP_ERROR_DEL_LAST_IP: case SCTP_ERROR_DEL_SRC_IP: default: break; } /* Skip the processed asconf parameter and move to the next * one. */ length = ntohs(asconf_param->param_hdr.length); asconf_param = (sctp_addip_param_t *)((void *)asconf_param + length); asconf_len -= length; } /* Free the cached last sent asconf chunk. */ list_del_init(&asconf->transmitted_list); sctp_chunk_free(asconf); asoc->addip_last_asconf = NULL; /* Send the next asconf chunk from the addip chunk queue. */ if (!list_empty(&asoc->addip_chunk_list)) { struct list_head *entry = asoc->addip_chunk_list.next; asconf = list_entry(entry, struct sctp_chunk, list); list_del_init(entry); /* Hold the chunk until an ASCONF_ACK is received. */ sctp_chunk_hold(asconf); if (sctp_primitive_ASCONF(asoc, asconf)) sctp_chunk_free(asconf); else asoc->addip_last_asconf = asconf; } return retval; }
0
313,567
static void rose_remove_neigh(struct rose_neigh *rose_neigh) { struct rose_neigh *s; del_timer_sync(&rose_neigh->ftimer); del_timer_sync(&rose_neigh->t0timer); skb_queue_purge(&rose_neigh->queue); if ((s = rose_neigh_list) == rose_neigh) { rose_neigh_list = rose_neigh->next; if (rose_neigh->ax25) ax25_cb_put(rose_neigh->ax25); kfree(rose_neigh->digipeat); kfree(rose_neigh); return; } while (s != NULL && s->next != NULL) { if (s->next == rose_neigh) { s->next = rose_neigh->next; if (rose_neigh->ax25) ax25_cb_put(rose_neigh->ax25); kfree(rose_neigh->digipeat); kfree(rose_neigh); return; } s = s->next; } }
0
229,311
cql_server::connection::make_topology_change_event(const event::topology_change& event) const { auto response = std::make_unique<cql_server::response>(-1, cql_binary_opcode::EVENT, tracing::trace_state_ptr()); response->write_string("TOPOLOGY_CHANGE"); response->write_string(to_string(event.change)); response->write_inet(event.node); return response; }
0