idx
int64
func
string
target
int64
317,285
static int selinux_perf_event_read(struct perf_event *event) { struct perf_event_security_struct *perfsec = event->security; u32 sid = current_sid(); return avc_has_perm(&selinux_state, sid, perfsec->sid, SECCLASS_PERF_EVENT, PERF_EVENT__READ, NULL); }
0
359,533
DEFUN (no_neighbor_filter_list, no_neighbor_filter_list_cmd, NO_NEIGHBOR_CMD2 "filter-list WORD (in|out)", NO_STR NEIGHBOR_STR NEIGHBOR_ADDR_STR2 "Establish BGP filters\n" "AS path access-list name\n" "Filter incoming routes\n" "Filter outgoing routes\n") { return peer_aslist_unset_vty (vty, argv[0], bgp_node_afi (vty), bgp_node_safi (vty), argv[2]); }
0
247,556
TestUtilOptionsV2& setExpectedProtocolVersion(const std::string& expected_protocol_version) { expected_protocol_version_ = expected_protocol_version; return *this; }
0
484,795
static int netfront_probe(struct xenbus_device *dev, const struct xenbus_device_id *id) { int err; struct net_device *netdev; struct netfront_info *info; netdev = xennet_create_dev(dev); if (IS_ERR(netdev)) { err = PTR_ERR(netdev); xenbus_dev_fatal(dev, err, "creating netdev"); return err; } info = netdev_priv(netdev); dev_set_drvdata(&dev->dev, info); #ifdef CONFIG_SYSFS info->netdev->sysfs_groups[0] = &xennet_dev_group; #endif return 0; }
0
383,324
gdImageCharUp (gdImagePtr im, gdFontPtr f, int x, int y, int c, int color) { int cx, cy; int px, py; int fline; cx = 0; cy = 0; #ifdef CHARSET_EBCDIC c = ASC (c); #endif /*CHARSET_EBCDIC */ if ((c < f->offset) || (c >= (f->offset + f->nchars))) { return; } fline = (c - f->offset) * f->h * f->w; for (py = y; (py > (y - f->w)); py--) { for (px = x; (px < (x + f->h)); px++) { if (f->data[fline + cy * f->w + cx]) { gdImageSetPixel (im, px, py, color); } cy++; } cy = 0; cx++; } }
0
247,657
bool DefaultCertValidator::matchSubjectAltName( X509* cert, const std::vector<SanMatcherPtr>& subject_alt_name_matchers) { bssl::UniquePtr<GENERAL_NAMES> san_names( static_cast<GENERAL_NAMES*>(X509_get_ext_d2i(cert, NID_subject_alt_name, nullptr, nullptr))); if (san_names == nullptr) { return false; } for (const auto& config_san_matcher : subject_alt_name_matchers) { for (const GENERAL_NAME* general_name : san_names.get()) { if (config_san_matcher->match(general_name)) { return true; } } } return false; }
0
328,837
R_API char *r_bin_java_print_name_and_type_cp_stringify(RBinJavaCPTypeObj *obj) { ut32 size = 255, consumed = 0; char *value = malloc (size); if (value) { memset (value, 0, size); consumed = snprintf (value, size, "%d.0x%04"PFMT64x ".%s.%d.%d", obj->metas->ord, obj->file_offset + obj->loadaddr, ((RBinJavaCPTypeMetas *) obj->metas->type_info)->name, obj->info.cp_name_and_type.name_idx, obj->info.cp_name_and_type.descriptor_idx); if (consumed >= size - 1) { free (value); size += size >> 1; value = malloc (size); if (value) { memset (value, 0, size); (void)snprintf (value, size, "%d.0x%04"PFMT64x ".%s.%d.%d", obj->metas->ord, obj->file_offset + obj->loadaddr, ((RBinJavaCPTypeMetas *) obj->metas->type_info)->name, obj->info.cp_name_and_type.name_idx, obj->info.cp_name_and_type.descriptor_idx); } } } return value; }
0
242,265
static int PamConversationCallback(int num_msg, #if defined(__sun) struct pam_message** msgm, #else const struct pam_message** msgm, #endif struct pam_response** response, void* appdata_ptr) { if (!appdata_ptr) { Dmsg0(debuglevel, "pam_conv_callback pointer error\n"); return PAM_BUF_ERR; } if ((num_msg <= 0) || (num_msg > PAM_MAX_NUM_MSG)) { Dmsg0(debuglevel, "pam_conv_callback wrong number of messages\n"); return (PAM_CONV_ERR); } struct pam_response* resp = static_cast<pam_response*>( calloc(num_msg, sizeof(struct pam_response))); if (!resp) { Dmsg0(debuglevel, "pam_conv_callback memory error\n"); return PAM_BUF_ERR; } PamData* pam_data = static_cast<PamData*>(appdata_ptr); bool error = false; int i = 0; for (; i < num_msg && !error; i++) { switch (msgm[i]->msg_style) { case PAM_PROMPT_ECHO_OFF: case PAM_PROMPT_ECHO_ON: if (!PamConvSendMessage(pam_data->UA_sock_, msgm[i]->msg, msgm[i]->msg_style)) { error = true; break; } if (pam_data->UA_sock_->IsStop() || pam_data->UA_sock_->IsError()) { error = true; break; } if (pam_data->UA_sock_->recv()) { resp[i].resp = strdup(pam_data->UA_sock_->msg); resp[i].resp_retcode = 0; } if (pam_data->UA_sock_->IsStop() || pam_data->UA_sock_->IsError()) { error = true; break; } break; case PAM_ERROR_MSG: case PAM_TEXT_INFO: if (!PamConvSendMessage(pam_data->UA_sock_, msgm[i]->msg, PAM_PROMPT_ECHO_ON)) { error = true; } break; default: Dmsg3(debuglevel, "message[%d]: pam error type: %d error: \"%s\"\n", 1, msgm[i]->msg_style, msgm[i]->msg); error = true; break; } /* switch (msgm[i]->msg_style) { */ } /* for( ; i < num_msg ..) */ if (error) { for (int i = 0; i < num_msg; ++i) { if (resp[i].resp) { memset(resp[i].resp, 0, strlen(resp[i].resp)); free(resp[i].resp); } } memset(resp, 0, num_msg * sizeof *resp); free(resp); *response = nullptr; return PAM_CONV_ERR; } *response = resp; return PAM_SUCCESS; }
0
222,668
char *get_socket_path(const char *_token) { char *path; char *token = xstrdup(_token); for (char *c = token; *c; c++) { if (*c == '/' || *c == '.') *c = '='; } xasprintf(&path, TMATE_WORKDIR "/sessions/%s", token); free(token); return path; }
0
333,054
sub_equal(regsub_T *sub1, regsub_T *sub2) { int i; int todo; linenr_T s1; linenr_T s2; char_u *sp1; char_u *sp2; todo = sub1->in_use > sub2->in_use ? sub1->in_use : sub2->in_use; if (REG_MULTI) { for (i = 0; i < todo; ++i) { if (i < sub1->in_use) s1 = sub1->list.multi[i].start_lnum; else s1 = -1; if (i < sub2->in_use) s2 = sub2->list.multi[i].start_lnum; else s2 = -1; if (s1 != s2) return FALSE; if (s1 != -1 && sub1->list.multi[i].start_col != sub2->list.multi[i].start_col) return FALSE; if (rex.nfa_has_backref) { if (i < sub1->in_use) s1 = sub1->list.multi[i].end_lnum; else s1 = -1; if (i < sub2->in_use) s2 = sub2->list.multi[i].end_lnum; else s2 = -1; if (s1 != s2) return FALSE; if (s1 != -1 && sub1->list.multi[i].end_col != sub2->list.multi[i].end_col) return FALSE; } } } else { for (i = 0; i < todo; ++i) { if (i < sub1->in_use) sp1 = sub1->list.line[i].start; else sp1 = NULL; if (i < sub2->in_use) sp2 = sub2->list.line[i].start; else sp2 = NULL; if (sp1 != sp2) return FALSE; if (rex.nfa_has_backref) { if (i < sub1->in_use) sp1 = sub1->list.line[i].end; else sp1 = NULL; if (i < sub2->in_use) sp2 = sub2->list.line[i].end; else sp2 = NULL; if (sp1 != sp2) return FALSE; } } } return TRUE; }
0
231,736
TEST_F(QuicServerTransportTest, TestRegisterPMTUZeroBlackholeDetection) { server->handleKnobParams( {{static_cast<uint64_t>( TransportKnobParamId::ZERO_PMTU_BLACKHOLE_DETECTION), 1}}); EXPECT_TRUE(server->getConn().d6d.noBlackholeDetection); }
0
450,328
static void vnc_refresh(DisplayChangeListener *dcl) { VncDisplay *vd = container_of(dcl, VncDisplay, dcl); VncState *vs, *vn; int has_dirty, rects = 0; if (QTAILQ_EMPTY(&vd->clients)) { update_displaychangelistener(&vd->dcl, VNC_REFRESH_INTERVAL_MAX); return; } graphic_hw_update(vd->dcl.con); if (vnc_trylock_display(vd)) { update_displaychangelistener(&vd->dcl, VNC_REFRESH_INTERVAL_BASE); return; } has_dirty = vnc_refresh_server_surface(vd); vnc_unlock_display(vd); QTAILQ_FOREACH_SAFE(vs, &vd->clients, next, vn) { rects += vnc_update_client(vs, has_dirty); /* vs might be free()ed here */ } if (has_dirty && rects) { vd->dcl.update_interval /= 2; if (vd->dcl.update_interval < VNC_REFRESH_INTERVAL_BASE) { vd->dcl.update_interval = VNC_REFRESH_INTERVAL_BASE; } } else { vd->dcl.update_interval += VNC_REFRESH_INTERVAL_INC; if (vd->dcl.update_interval > VNC_REFRESH_INTERVAL_MAX) { vd->dcl.update_interval = VNC_REFRESH_INTERVAL_MAX; } } }
0
261,923
njs_string_prototype_split(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, njs_index_t unused) { size_t size; uint32_t limit; njs_int_t ret; njs_utf8_t utf8; njs_bool_t undefined; njs_value_t *this, *separator, *value; njs_value_t separator_lvalue, limit_lvalue, splitter; njs_array_t *array; const u_char *p, *start, *next, *last, *end; njs_string_prop_t string, split; njs_value_t arguments[3]; static const njs_value_t split_key = njs_wellknown_symbol(NJS_SYMBOL_SPLIT); this = njs_argument(args, 0); if (njs_slow_path(njs_is_null_or_undefined(this))) { njs_type_error(vm, "cannot convert \"%s\"to object", njs_type_string(this->type)); return NJS_ERROR; } separator = njs_lvalue_arg(&separator_lvalue, args, nargs, 1); value = njs_lvalue_arg(&limit_lvalue, args, nargs, 2); if (!njs_is_null_or_undefined(separator)) { ret = njs_value_method(vm, separator, njs_value_arg(&split_key), &splitter); if (njs_slow_path(ret != NJS_OK)) { return ret; } if (njs_is_defined(&splitter)) { arguments[0] = *this; arguments[1] = *value; return njs_function_call(vm, njs_function(&splitter), separator, arguments, 2, &vm->retval); } } ret = njs_value_to_string(vm, this, this); if (njs_slow_path(ret != NJS_OK)) { return ret; } array = njs_array_alloc(vm, 0, 0, NJS_ARRAY_SPARE); if (njs_slow_path(array == NULL)) { return NJS_ERROR; } limit = UINT32_MAX; if (njs_is_defined(value)) { ret = njs_value_to_uint32(vm, value, &limit); if (njs_slow_path(ret != NJS_OK)) { return ret; } } undefined = njs_is_undefined(separator); ret = njs_value_to_string(vm, separator, separator); if (njs_slow_path(ret != NJS_OK)) { return ret; } if (njs_slow_path(limit == 0)) { goto done; } if (njs_slow_path(undefined)) { goto single; } (void) njs_string_prop(&string, this); (void) njs_string_prop(&split, separator); if (njs_slow_path(string.size == 0)) { if (split.size != 0) { goto single; } goto done; } utf8 = NJS_STRING_BYTE; if (string.length != 0) { utf8 = NJS_STRING_ASCII; if (string.length != string.size) { utf8 = NJS_STRING_UTF8; } } start = string.start; end = string.start + string.size; last = end - split.size; do { for (p = start; p <= last; p++) { if (memcmp(p, split.start, split.size) == 0) { goto found; } } p = end; found: next = p + split.size; /* Empty split string. */ if (p == next) { p = (utf8 != NJS_STRING_BYTE) ? njs_utf8_next(p, end) : p + 1; next = p; } size = p - start; ret = njs_string_split_part_add(vm, array, utf8, start, size); if (njs_slow_path(ret != NJS_OK)) { return ret; } start = next; limit--; } while (limit != 0 && p < end); goto done; single: value = njs_array_push(vm, array); if (njs_slow_path(value == NULL)) { return NJS_ERROR; } *value = *this; done: njs_set_array(&vm->retval, array); return NJS_OK; }
0
232,311
GF_ISOFile *gf_isom_new_movie() { GF_ISOFile *mov = (GF_ISOFile*)gf_malloc(sizeof(GF_ISOFile)); if (mov == NULL) { gf_isom_set_last_error(NULL, GF_OUT_OF_MEM); return NULL; } memset(mov, 0, sizeof(GF_ISOFile)); /*init the boxes*/ mov->TopBoxes = gf_list_new(); if (!mov->TopBoxes) { gf_isom_set_last_error(NULL, GF_OUT_OF_MEM); gf_free(mov); return NULL; } /*default storage mode is flat*/ mov->storageMode = GF_ISOM_STORE_FLAT; mov->es_id_default_sync = -1; return mov; }
0
213,076
static void compile_xclass_matchingpath(compiler_common *common, PCRE2_SPTR cc, jump_list **backtracks) { DEFINE_COMPILER; jump_list *found = NULL; jump_list **list = (cc[0] & XCL_NOT) == 0 ? &found : backtracks; sljit_uw c, charoffset, max = 256, min = READ_CHAR_MAX; struct sljit_jump *jump = NULL; PCRE2_SPTR ccbegin; int compares, invertcmp, numberofcmps; #if defined SUPPORT_UNICODE && (PCRE2_CODE_UNIT_WIDTH == 8 || PCRE2_CODE_UNIT_WIDTH == 16) BOOL utf = common->utf; #endif /* SUPPORT_UNICODE && PCRE2_CODE_UNIT_WIDTH == [8|16] */ #ifdef SUPPORT_UNICODE sljit_u32 unicode_status = 0; int typereg = TMP1; const sljit_u32 *other_cases; sljit_uw typeoffset; #endif /* SUPPORT_UNICODE */ /* Scanning the necessary info. */ cc++; ccbegin = cc; compares = 0; if (cc[-1] & XCL_MAP) { min = 0; cc += 32 / sizeof(PCRE2_UCHAR); } while (*cc != XCL_END) { compares++; if (*cc == XCL_SINGLE) { cc ++; GETCHARINCTEST(c, cc); if (c > max) max = c; if (c < min) min = c; #ifdef SUPPORT_UNICODE unicode_status |= XCLASS_SAVE_CHAR; #endif /* SUPPORT_UNICODE */ } else if (*cc == XCL_RANGE) { cc ++; GETCHARINCTEST(c, cc); if (c < min) min = c; GETCHARINCTEST(c, cc); if (c > max) max = c; #ifdef SUPPORT_UNICODE unicode_status |= XCLASS_SAVE_CHAR; #endif /* SUPPORT_UNICODE */ } #ifdef SUPPORT_UNICODE else { SLJIT_ASSERT(*cc == XCL_PROP || *cc == XCL_NOTPROP); cc++; if (*cc == PT_CLIST) { other_cases = PRIV(ucd_caseless_sets) + cc[1]; while (*other_cases != NOTACHAR) { if (*other_cases > max) max = *other_cases; if (*other_cases < min) min = *other_cases; other_cases++; } } else { max = READ_CHAR_MAX; min = 0; } switch(*cc) { case PT_ANY: /* Any either accepts everything or ignored. */ if (cc[-1] == XCL_PROP) { compile_char1_matchingpath(common, OP_ALLANY, cc, backtracks, FALSE); if (list == backtracks) add_jump(compiler, backtracks, JUMP(SLJIT_JUMP)); return; } break; case PT_LAMP: case PT_GC: case PT_PC: case PT_ALNUM: unicode_status |= XCLASS_HAS_TYPE; break; case PT_SCX: unicode_status |= XCLASS_HAS_SCRIPT_EXTENSION; if (cc[-1] == XCL_NOTPROP) { unicode_status |= XCLASS_SCRIPT_EXTENSION_NOTPROP; break; } compares++; /* Fall through */ case PT_SC: unicode_status |= XCLASS_HAS_SCRIPT; break; case PT_SPACE: case PT_PXSPACE: case PT_WORD: case PT_PXGRAPH: case PT_PXPRINT: case PT_PXPUNCT: unicode_status |= XCLASS_SAVE_CHAR | XCLASS_HAS_TYPE; break; case PT_CLIST: case PT_UCNC: unicode_status |= XCLASS_SAVE_CHAR; break; case PT_BOOL: unicode_status |= XCLASS_HAS_BOOL; break; case PT_BIDICL: unicode_status |= XCLASS_HAS_BIDICL; break; default: SLJIT_UNREACHABLE(); break; } cc += 2; } #endif /* SUPPORT_UNICODE */ } SLJIT_ASSERT(compares > 0); /* We are not necessary in utf mode even in 8 bit mode. */ cc = ccbegin; if ((cc[-1] & XCL_NOT) != 0) read_char(common, min, max, backtracks, READ_CHAR_UPDATE_STR_PTR); else { #ifdef SUPPORT_UNICODE read_char(common, min, max, (unicode_status & XCLASS_NEEDS_UCD) ? backtracks : NULL, 0); #else /* !SUPPORT_UNICODE */ read_char(common, min, max, NULL, 0); #endif /* SUPPORT_UNICODE */ } if ((cc[-1] & XCL_HASPROP) == 0) { if ((cc[-1] & XCL_MAP) != 0) { jump = CMP(SLJIT_GREATER, TMP1, 0, SLJIT_IMM, 255); if (!optimize_class(common, (const sljit_u8 *)cc, (((const sljit_u8 *)cc)[31] & 0x80) != 0, TRUE, &found)) { OP2(SLJIT_AND, TMP2, 0, TMP1, 0, SLJIT_IMM, 0x7); OP2(SLJIT_LSHR, TMP1, 0, TMP1, 0, SLJIT_IMM, 3); OP1(SLJIT_MOV_U8, TMP1, 0, SLJIT_MEM1(TMP1), (sljit_sw)cc); OP2(SLJIT_SHL, TMP2, 0, SLJIT_IMM, 1, TMP2, 0); OP2U(SLJIT_AND | SLJIT_SET_Z, TMP1, 0, TMP2, 0); add_jump(compiler, &found, JUMP(SLJIT_NOT_ZERO)); } add_jump(compiler, backtracks, JUMP(SLJIT_JUMP)); JUMPHERE(jump); cc += 32 / sizeof(PCRE2_UCHAR); } else { OP2(SLJIT_SUB, TMP2, 0, TMP1, 0, SLJIT_IMM, min); add_jump(compiler, (cc[-1] & XCL_NOT) == 0 ? backtracks : &found, CMP(SLJIT_GREATER, TMP2, 0, SLJIT_IMM, max - min)); } } else if ((cc[-1] & XCL_MAP) != 0) { OP1(SLJIT_MOV, RETURN_ADDR, 0, TMP1, 0); #ifdef SUPPORT_UNICODE unicode_status |= XCLASS_CHAR_SAVED; #endif /* SUPPORT_UNICODE */ if (!optimize_class(common, (const sljit_u8 *)cc, FALSE, TRUE, list)) { #if PCRE2_CODE_UNIT_WIDTH == 8 jump = NULL; if (common->utf) #endif /* PCRE2_CODE_UNIT_WIDTH == 8 */ jump = CMP(SLJIT_GREATER, TMP1, 0, SLJIT_IMM, 255); OP2(SLJIT_AND, TMP2, 0, TMP1, 0, SLJIT_IMM, 0x7); OP2(SLJIT_LSHR, TMP1, 0, TMP1, 0, SLJIT_IMM, 3); OP1(SLJIT_MOV_U8, TMP1, 0, SLJIT_MEM1(TMP1), (sljit_sw)cc); OP2(SLJIT_SHL, TMP2, 0, SLJIT_IMM, 1, TMP2, 0); OP2U(SLJIT_AND | SLJIT_SET_Z, TMP1, 0, TMP2, 0); add_jump(compiler, list, JUMP(SLJIT_NOT_ZERO)); #if PCRE2_CODE_UNIT_WIDTH == 8 if (common->utf) #endif /* PCRE2_CODE_UNIT_WIDTH == 8 */ JUMPHERE(jump); } OP1(SLJIT_MOV, TMP1, 0, RETURN_ADDR, 0); cc += 32 / sizeof(PCRE2_UCHAR); } #ifdef SUPPORT_UNICODE if (unicode_status & XCLASS_NEEDS_UCD) { if ((unicode_status & (XCLASS_SAVE_CHAR | XCLASS_CHAR_SAVED)) == XCLASS_SAVE_CHAR) OP1(SLJIT_MOV, RETURN_ADDR, 0, TMP1, 0); #if PCRE2_CODE_UNIT_WIDTH == 32 if (!common->utf) { jump = CMP(SLJIT_LESS, TMP1, 0, SLJIT_IMM, MAX_UTF_CODE_POINT + 1); OP1(SLJIT_MOV, TMP1, 0, SLJIT_IMM, UNASSIGNED_UTF_CHAR); JUMPHERE(jump); } #endif /* PCRE2_CODE_UNIT_WIDTH == 32 */ OP2(SLJIT_LSHR, TMP2, 0, TMP1, 0, SLJIT_IMM, UCD_BLOCK_SHIFT); OP2(SLJIT_SHL, TMP2, 0, TMP2, 0, SLJIT_IMM, 1); OP1(SLJIT_MOV_U16, TMP2, 0, SLJIT_MEM1(TMP2), (sljit_sw)PRIV(ucd_stage1)); OP2(SLJIT_AND, TMP1, 0, TMP1, 0, SLJIT_IMM, UCD_BLOCK_MASK); OP2(SLJIT_SHL, TMP2, 0, TMP2, 0, SLJIT_IMM, UCD_BLOCK_SHIFT); OP2(SLJIT_ADD, TMP1, 0, TMP1, 0, TMP2, 0); OP1(SLJIT_MOV, TMP2, 0, SLJIT_IMM, (sljit_sw)PRIV(ucd_stage2)); OP1(SLJIT_MOV_U16, TMP2, 0, SLJIT_MEM2(TMP2, TMP1), 1); OP2(SLJIT_SHL, TMP1, 0, TMP2, 0, SLJIT_IMM, 3); OP2(SLJIT_SHL, TMP2, 0, TMP2, 0, SLJIT_IMM, 2); OP2(SLJIT_ADD, TMP2, 0, TMP2, 0, TMP1, 0); ccbegin = cc; if (unicode_status & XCLASS_HAS_BIDICL) { OP1(SLJIT_MOV_U16, TMP1, 0, SLJIT_MEM1(TMP2), (sljit_sw)PRIV(ucd_records) + SLJIT_OFFSETOF(ucd_record, scriptx_bidiclass)); OP2(SLJIT_LSHR, TMP1, 0, TMP1, 0, SLJIT_IMM, UCD_BIDICLASS_SHIFT); while (*cc != XCL_END) { if (*cc == XCL_SINGLE) { cc ++; GETCHARINCTEST(c, cc); } else if (*cc == XCL_RANGE) { cc ++; GETCHARINCTEST(c, cc); GETCHARINCTEST(c, cc); } else { SLJIT_ASSERT(*cc == XCL_PROP || *cc == XCL_NOTPROP); cc++; if (*cc == PT_BIDICL) { compares--; invertcmp = (compares == 0 && list != backtracks); if (cc[-1] == XCL_NOTPROP) invertcmp ^= 0x1; jump = CMP(SLJIT_EQUAL ^ invertcmp, TMP1, 0, SLJIT_IMM, (int)cc[1]); add_jump(compiler, compares > 0 ? list : backtracks, jump); } cc += 2; } } cc = ccbegin; } if (unicode_status & XCLASS_HAS_BOOL) { OP1(SLJIT_MOV_U16, TMP1, 0, SLJIT_MEM1(TMP2), (sljit_sw)PRIV(ucd_records) + SLJIT_OFFSETOF(ucd_record, bprops)); OP2(SLJIT_AND, TMP1, 0, TMP1, 0, SLJIT_IMM, UCD_BPROPS_MASK); OP2(SLJIT_SHL, TMP1, 0, TMP1, 0, SLJIT_IMM, 2); while (*cc != XCL_END) { if (*cc == XCL_SINGLE) { cc ++; GETCHARINCTEST(c, cc); } else if (*cc == XCL_RANGE) { cc ++; GETCHARINCTEST(c, cc); GETCHARINCTEST(c, cc); } else { SLJIT_ASSERT(*cc == XCL_PROP || *cc == XCL_NOTPROP); cc++; if (*cc == PT_BOOL) { compares--; invertcmp = (compares == 0 && list != backtracks); if (cc[-1] == XCL_NOTPROP) invertcmp ^= 0x1; OP2U(SLJIT_AND32 | SLJIT_SET_Z, SLJIT_MEM1(TMP1), (sljit_sw)(PRIV(ucd_boolprop_sets) + (cc[1] >> 5)), SLJIT_IMM, (sljit_sw)1 << (cc[1] & 0x1f)); add_jump(compiler, compares > 0 ? list : backtracks, JUMP(SLJIT_NOT_ZERO ^ invertcmp)); } cc += 2; } } cc = ccbegin; } if (unicode_status & XCLASS_HAS_SCRIPT) { OP1(SLJIT_MOV_U8, TMP1, 0, SLJIT_MEM1(TMP2), (sljit_sw)PRIV(ucd_records) + SLJIT_OFFSETOF(ucd_record, script)); while (*cc != XCL_END) { if (*cc == XCL_SINGLE) { cc ++; GETCHARINCTEST(c, cc); } else if (*cc == XCL_RANGE) { cc ++; GETCHARINCTEST(c, cc); GETCHARINCTEST(c, cc); } else { SLJIT_ASSERT(*cc == XCL_PROP || *cc == XCL_NOTPROP); cc++; switch (*cc) { case PT_SCX: if (cc[-1] == XCL_NOTPROP) break; /* Fall through */ case PT_SC: compares--; invertcmp = (compares == 0 && list != backtracks); if (cc[-1] == XCL_NOTPROP) invertcmp ^= 0x1; add_jump(compiler, compares > 0 ? list : backtracks, CMP(SLJIT_EQUAL ^ invertcmp, TMP1, 0, SLJIT_IMM, (int)cc[1])); } cc += 2; } } cc = ccbegin; } if (unicode_status & XCLASS_HAS_SCRIPT_EXTENSION) { OP1(SLJIT_MOV_U16, TMP1, 0, SLJIT_MEM1(TMP2), (sljit_sw)PRIV(ucd_records) + SLJIT_OFFSETOF(ucd_record, scriptx_bidiclass)); OP2(SLJIT_AND, TMP1, 0, TMP1, 0, SLJIT_IMM, UCD_SCRIPTX_MASK); OP2(SLJIT_SHL, TMP1, 0, TMP1, 0, SLJIT_IMM, 2); if (unicode_status & XCLASS_SCRIPT_EXTENSION_NOTPROP) { if (unicode_status & XCLASS_HAS_TYPE) { if (unicode_status & XCLASS_SAVE_CHAR) { OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), LOCALS0, TMP2, 0); unicode_status |= XCLASS_SCRIPT_EXTENSION_RESTORE_LOCALS0; } else { OP1(SLJIT_MOV, RETURN_ADDR, 0, TMP2, 0); unicode_status |= XCLASS_SCRIPT_EXTENSION_RESTORE_RETURN_ADDR; } } OP1(SLJIT_MOV_U8, TMP2, 0, SLJIT_MEM1(TMP2), (sljit_sw)PRIV(ucd_records) + SLJIT_OFFSETOF(ucd_record, script)); } while (*cc != XCL_END) { if (*cc == XCL_SINGLE) { cc ++; GETCHARINCTEST(c, cc); } else if (*cc == XCL_RANGE) { cc ++; GETCHARINCTEST(c, cc); GETCHARINCTEST(c, cc); } else { SLJIT_ASSERT(*cc == XCL_PROP || *cc == XCL_NOTPROP); cc++; if (*cc == PT_SCX) { compares--; invertcmp = (compares == 0 && list != backtracks); jump = NULL; if (cc[-1] == XCL_NOTPROP) { jump = CMP(SLJIT_EQUAL, TMP2, 0, SLJIT_IMM, (int)cc[1]); if (invertcmp) { add_jump(compiler, backtracks, jump); jump = NULL; } invertcmp ^= 0x1; } OP2U(SLJIT_AND32 | SLJIT_SET_Z, SLJIT_MEM1(TMP1), (sljit_sw)(PRIV(ucd_script_sets) + (cc[1] >> 5)), SLJIT_IMM, (sljit_sw)1 << (cc[1] & 0x1f)); add_jump(compiler, compares > 0 ? list : backtracks, JUMP(SLJIT_NOT_ZERO ^ invertcmp)); if (jump != NULL) JUMPHERE(jump); } cc += 2; } } if (unicode_status & XCLASS_SCRIPT_EXTENSION_RESTORE_LOCALS0) OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_SP), LOCALS0); else if (unicode_status & XCLASS_SCRIPT_EXTENSION_RESTORE_RETURN_ADDR) OP1(SLJIT_MOV, TMP2, 0, RETURN_ADDR, 0); cc = ccbegin; } if (unicode_status & XCLASS_SAVE_CHAR) OP1(SLJIT_MOV, TMP1, 0, RETURN_ADDR, 0); if (unicode_status & XCLASS_HAS_TYPE) { if (unicode_status & XCLASS_SAVE_CHAR) typereg = RETURN_ADDR; OP1(SLJIT_MOV_U8, typereg, 0, SLJIT_MEM1(TMP2), (sljit_sw)PRIV(ucd_records) + SLJIT_OFFSETOF(ucd_record, chartype)); } } #endif /* SUPPORT_UNICODE */ /* Generating code. */ charoffset = 0; numberofcmps = 0; #ifdef SUPPORT_UNICODE typeoffset = 0; #endif /* SUPPORT_UNICODE */ while (*cc != XCL_END) { compares--; invertcmp = (compares == 0 && list != backtracks); jump = NULL; if (*cc == XCL_SINGLE) { cc ++; GETCHARINCTEST(c, cc); if (numberofcmps < 3 && (*cc == XCL_SINGLE || *cc == XCL_RANGE)) { OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, (sljit_sw)(c - charoffset)); OP_FLAGS(numberofcmps == 0 ? SLJIT_MOV : SLJIT_OR, TMP2, 0, SLJIT_EQUAL); numberofcmps++; } else if (numberofcmps > 0) { OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, (sljit_sw)(c - charoffset)); OP_FLAGS(SLJIT_OR | SLJIT_SET_Z, TMP2, 0, SLJIT_EQUAL); jump = JUMP(SLJIT_NOT_ZERO ^ invertcmp); numberofcmps = 0; } else { jump = CMP(SLJIT_EQUAL ^ invertcmp, TMP1, 0, SLJIT_IMM, (sljit_sw)(c - charoffset)); numberofcmps = 0; } } else if (*cc == XCL_RANGE) { cc ++; GETCHARINCTEST(c, cc); SET_CHAR_OFFSET(c); GETCHARINCTEST(c, cc); if (numberofcmps < 3 && (*cc == XCL_SINGLE || *cc == XCL_RANGE)) { OP2U(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, TMP1, 0, SLJIT_IMM, (sljit_sw)(c - charoffset)); OP_FLAGS(numberofcmps == 0 ? SLJIT_MOV : SLJIT_OR, TMP2, 0, SLJIT_LESS_EQUAL); numberofcmps++; } else if (numberofcmps > 0) { OP2U(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, TMP1, 0, SLJIT_IMM, (sljit_sw)(c - charoffset)); OP_FLAGS(SLJIT_OR | SLJIT_SET_Z, TMP2, 0, SLJIT_LESS_EQUAL); jump = JUMP(SLJIT_NOT_ZERO ^ invertcmp); numberofcmps = 0; } else { jump = CMP(SLJIT_LESS_EQUAL ^ invertcmp, TMP1, 0, SLJIT_IMM, (sljit_sw)(c - charoffset)); numberofcmps = 0; } } #ifdef SUPPORT_UNICODE else { SLJIT_ASSERT(*cc == XCL_PROP || *cc == XCL_NOTPROP); if (*cc == XCL_NOTPROP) invertcmp ^= 0x1; cc++; switch(*cc) { case PT_ANY: if (!invertcmp) jump = JUMP(SLJIT_JUMP); break; case PT_LAMP: OP2U(SLJIT_SUB | SLJIT_SET_Z, typereg, 0, SLJIT_IMM, ucp_Lu - typeoffset); OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_EQUAL); OP2U(SLJIT_SUB | SLJIT_SET_Z, typereg, 0, SLJIT_IMM, ucp_Ll - typeoffset); OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_EQUAL); OP2U(SLJIT_SUB | SLJIT_SET_Z, typereg, 0, SLJIT_IMM, ucp_Lt - typeoffset); OP_FLAGS(SLJIT_OR | SLJIT_SET_Z, TMP2, 0, SLJIT_EQUAL); jump = JUMP(SLJIT_NOT_ZERO ^ invertcmp); break; case PT_GC: c = PRIV(ucp_typerange)[(int)cc[1] * 2]; SET_TYPE_OFFSET(c); jump = CMP(SLJIT_LESS_EQUAL ^ invertcmp, typereg, 0, SLJIT_IMM, PRIV(ucp_typerange)[(int)cc[1] * 2 + 1] - c); break; case PT_PC: jump = CMP(SLJIT_EQUAL ^ invertcmp, typereg, 0, SLJIT_IMM, (int)cc[1] - typeoffset); break; case PT_SC: case PT_SCX: case PT_BOOL: case PT_BIDICL: compares++; /* Do nothing. */ break; case PT_SPACE: case PT_PXSPACE: SET_CHAR_OFFSET(9); OP2U(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, TMP1, 0, SLJIT_IMM, 0xd - 0x9); OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_LESS_EQUAL); OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, 0x85 - 0x9); OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_EQUAL); OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, 0x180e - 0x9); OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_EQUAL); SET_TYPE_OFFSET(ucp_Zl); OP2U(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, typereg, 0, SLJIT_IMM, ucp_Zs - ucp_Zl); OP_FLAGS(SLJIT_OR | SLJIT_SET_Z, TMP2, 0, SLJIT_LESS_EQUAL); jump = JUMP(SLJIT_NOT_ZERO ^ invertcmp); break; case PT_WORD: OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, (sljit_sw)(CHAR_UNDERSCORE - charoffset)); OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_EQUAL); /* Fall through. */ case PT_ALNUM: SET_TYPE_OFFSET(ucp_Ll); OP2U(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, typereg, 0, SLJIT_IMM, ucp_Lu - ucp_Ll); OP_FLAGS((*cc == PT_ALNUM) ? SLJIT_MOV : SLJIT_OR, TMP2, 0, SLJIT_LESS_EQUAL); SET_TYPE_OFFSET(ucp_Nd); OP2U(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, typereg, 0, SLJIT_IMM, ucp_No - ucp_Nd); OP_FLAGS(SLJIT_OR | SLJIT_SET_Z, TMP2, 0, SLJIT_LESS_EQUAL); jump = JUMP(SLJIT_NOT_ZERO ^ invertcmp); break; case PT_CLIST: other_cases = PRIV(ucd_caseless_sets) + cc[1]; /* At least three characters are required. Otherwise this case would be handled by the normal code path. */ SLJIT_ASSERT(other_cases[0] != NOTACHAR && other_cases[1] != NOTACHAR && other_cases[2] != NOTACHAR); SLJIT_ASSERT(other_cases[0] < other_cases[1] && other_cases[1] < other_cases[2]); /* Optimizing character pairs, if their difference is power of 2. */ if (is_powerof2(other_cases[1] ^ other_cases[0])) { if (charoffset == 0) OP2(SLJIT_OR, TMP2, 0, TMP1, 0, SLJIT_IMM, other_cases[1] ^ other_cases[0]); else { OP2(SLJIT_ADD, TMP2, 0, TMP1, 0, SLJIT_IMM, (sljit_sw)charoffset); OP2(SLJIT_OR, TMP2, 0, TMP2, 0, SLJIT_IMM, other_cases[1] ^ other_cases[0]); } OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP2, 0, SLJIT_IMM, other_cases[1]); OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_EQUAL); other_cases += 2; } else if (is_powerof2(other_cases[2] ^ other_cases[1])) { if (charoffset == 0) OP2(SLJIT_OR, TMP2, 0, TMP1, 0, SLJIT_IMM, other_cases[2] ^ other_cases[1]); else { OP2(SLJIT_ADD, TMP2, 0, TMP1, 0, SLJIT_IMM, (sljit_sw)charoffset); OP2(SLJIT_OR, TMP2, 0, TMP2, 0, SLJIT_IMM, other_cases[1] ^ other_cases[0]); } OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP2, 0, SLJIT_IMM, other_cases[2]); OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_EQUAL); OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, (sljit_sw)(other_cases[0] - charoffset)); OP_FLAGS(SLJIT_OR | ((other_cases[3] == NOTACHAR) ? SLJIT_SET_Z : 0), TMP2, 0, SLJIT_EQUAL); other_cases += 3; } else { OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, (sljit_sw)(*other_cases++ - charoffset)); OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_EQUAL); } while (*other_cases != NOTACHAR) { OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, (sljit_sw)(*other_cases++ - charoffset)); OP_FLAGS(SLJIT_OR | ((*other_cases == NOTACHAR) ? SLJIT_SET_Z : 0), TMP2, 0, SLJIT_EQUAL); } jump = JUMP(SLJIT_NOT_ZERO ^ invertcmp); break; case PT_UCNC: OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, (sljit_sw)(CHAR_DOLLAR_SIGN - charoffset)); OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_EQUAL); OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, (sljit_sw)(CHAR_COMMERCIAL_AT - charoffset)); OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_EQUAL); OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, (sljit_sw)(CHAR_GRAVE_ACCENT - charoffset)); OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_EQUAL); SET_CHAR_OFFSET(0xa0); OP2U(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, TMP1, 0, SLJIT_IMM, (sljit_sw)(0xd7ff - charoffset)); OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_LESS_EQUAL); SET_CHAR_OFFSET(0); OP2U(SLJIT_SUB | SLJIT_SET_GREATER_EQUAL, TMP1, 0, SLJIT_IMM, 0xe000 - 0); OP_FLAGS(SLJIT_OR | SLJIT_SET_Z, TMP2, 0, SLJIT_GREATER_EQUAL); jump = JUMP(SLJIT_NOT_ZERO ^ invertcmp); break; case PT_PXGRAPH: /* C and Z groups are the farthest two groups. */ SET_TYPE_OFFSET(ucp_Ll); OP2U(SLJIT_SUB | SLJIT_SET_GREATER, typereg, 0, SLJIT_IMM, ucp_So - ucp_Ll); OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_GREATER); jump = CMP(SLJIT_NOT_EQUAL, typereg, 0, SLJIT_IMM, ucp_Cf - ucp_Ll); /* In case of ucp_Cf, we overwrite the result. */ SET_CHAR_OFFSET(0x2066); OP2U(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, TMP1, 0, SLJIT_IMM, 0x2069 - 0x2066); OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_LESS_EQUAL); OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, 0x061c - 0x2066); OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_EQUAL); OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, 0x180e - 0x2066); OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_EQUAL); JUMPHERE(jump); jump = CMP(SLJIT_ZERO ^ invertcmp, TMP2, 0, SLJIT_IMM, 0); break; case PT_PXPRINT: /* C and Z groups are the farthest two groups. */ SET_TYPE_OFFSET(ucp_Ll); OP2U(SLJIT_SUB | SLJIT_SET_GREATER, typereg, 0, SLJIT_IMM, ucp_So - ucp_Ll); OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_GREATER); OP2U(SLJIT_SUB | SLJIT_SET_Z, typereg, 0, SLJIT_IMM, ucp_Zs - ucp_Ll); OP_FLAGS(SLJIT_AND, TMP2, 0, SLJIT_NOT_EQUAL); jump = CMP(SLJIT_NOT_EQUAL, typereg, 0, SLJIT_IMM, ucp_Cf - ucp_Ll); /* In case of ucp_Cf, we overwrite the result. */ SET_CHAR_OFFSET(0x2066); OP2U(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, TMP1, 0, SLJIT_IMM, 0x2069 - 0x2066); OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_LESS_EQUAL); OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, 0x061c - 0x2066); OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_EQUAL); JUMPHERE(jump); jump = CMP(SLJIT_ZERO ^ invertcmp, TMP2, 0, SLJIT_IMM, 0); break; case PT_PXPUNCT: SET_TYPE_OFFSET(ucp_Sc); OP2U(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, typereg, 0, SLJIT_IMM, ucp_So - ucp_Sc); OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_LESS_EQUAL); SET_CHAR_OFFSET(0); OP2U(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, TMP1, 0, SLJIT_IMM, 0x7f); OP_FLAGS(SLJIT_AND, TMP2, 0, SLJIT_LESS_EQUAL); SET_TYPE_OFFSET(ucp_Pc); OP2U(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, typereg, 0, SLJIT_IMM, ucp_Ps - ucp_Pc); OP_FLAGS(SLJIT_OR | SLJIT_SET_Z, TMP2, 0, SLJIT_LESS_EQUAL); jump = JUMP(SLJIT_NOT_ZERO ^ invertcmp); break; default: SLJIT_UNREACHABLE(); break; } cc += 2; } #endif /* SUPPORT_UNICODE */ if (jump != NULL) add_jump(compiler, compares > 0 ? list : backtracks, jump); } if (found != NULL) set_jumps(found, LABEL()); }
1
384,181
static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask, u8 policy, u32 flags, struct netlink_ext_ack *extack) { const struct nlattr * const *nla = ctx->nla; struct nft_stats __percpu *stats = NULL; struct nft_table *table = ctx->table; struct nft_base_chain *basechain; struct net *net = ctx->net; char name[NFT_NAME_MAXLEN]; struct nft_rule_blob *blob; struct nft_trans *trans; struct nft_chain *chain; unsigned int data_size; int err; if (table->use == UINT_MAX) return -EOVERFLOW; if (nla[NFTA_CHAIN_HOOK]) { struct nft_chain_hook hook; if (flags & NFT_CHAIN_BINDING) return -EOPNOTSUPP; err = nft_chain_parse_hook(net, nla, &hook, family, extack, true); if (err < 0) return err; basechain = kzalloc(sizeof(*basechain), GFP_KERNEL_ACCOUNT); if (basechain == NULL) { nft_chain_release_hook(&hook); return -ENOMEM; } chain = &basechain->chain; if (nla[NFTA_CHAIN_COUNTERS]) { stats = nft_stats_alloc(nla[NFTA_CHAIN_COUNTERS]); if (IS_ERR(stats)) { nft_chain_release_hook(&hook); kfree(basechain); return PTR_ERR(stats); } rcu_assign_pointer(basechain->stats, stats); } err = nft_basechain_init(basechain, family, &hook, flags); if (err < 0) { nft_chain_release_hook(&hook); kfree(basechain); return err; } } else { if (flags & NFT_CHAIN_BASE) return -EINVAL; if (flags & NFT_CHAIN_HW_OFFLOAD) return -EOPNOTSUPP; chain = kzalloc(sizeof(*chain), GFP_KERNEL_ACCOUNT); if (chain == NULL) return -ENOMEM; chain->flags = flags; } ctx->chain = chain; INIT_LIST_HEAD(&chain->rules); chain->handle = nf_tables_alloc_handle(table); chain->table = table; if (nla[NFTA_CHAIN_NAME]) { chain->name = nla_strdup(nla[NFTA_CHAIN_NAME], GFP_KERNEL_ACCOUNT); } else { if (!(flags & NFT_CHAIN_BINDING)) { err = -EINVAL; goto err_destroy_chain; } snprintf(name, sizeof(name), "__chain%llu", ++chain_id); chain->name = kstrdup(name, GFP_KERNEL_ACCOUNT); } if (!chain->name) { err = -ENOMEM; goto err_destroy_chain; } if (nla[NFTA_CHAIN_USERDATA]) { chain->udata = nla_memdup(nla[NFTA_CHAIN_USERDATA], GFP_KERNEL_ACCOUNT); if (chain->udata == NULL) { err = -ENOMEM; goto err_destroy_chain; } chain->udlen = nla_len(nla[NFTA_CHAIN_USERDATA]); } data_size = offsetof(struct nft_rule_dp, data); /* last rule */ blob = nf_tables_chain_alloc_rules(data_size); if (!blob) { err = -ENOMEM; goto err_destroy_chain; } RCU_INIT_POINTER(chain->blob_gen_0, blob); RCU_INIT_POINTER(chain->blob_gen_1, blob); err = nf_tables_register_hook(net, table, chain); if (err < 0) goto err_destroy_chain; trans = nft_trans_chain_add(ctx, NFT_MSG_NEWCHAIN); if (IS_ERR(trans)) { err = PTR_ERR(trans); goto err_unregister_hook; } nft_trans_chain_policy(trans) = NFT_CHAIN_POLICY_UNSET; if (nft_is_base_chain(chain)) nft_trans_chain_policy(trans) = policy; err = nft_chain_add(table, chain); if (err < 0) { nft_trans_destroy(trans); goto err_unregister_hook; } if (stats) static_branch_inc(&nft_counters_enabled); table->use++; return 0; err_unregister_hook: nf_tables_unregister_hook(net, table, chain); err_destroy_chain: nf_tables_chain_destroy(ctx); return err; }
0
223,471
static void do_utfreadnewline_invalid(compiler_common *common) { /* Slow decoding a UTF-8 character, specialized for newlines. TMP1 contains the first byte of the character (>= 0xc0). Return char value in TMP1. */ DEFINE_COMPILER; struct sljit_label *loop; struct sljit_label *skip_start; struct sljit_label *three_byte_exit; struct sljit_jump *jump[5]; sljit_emit_fast_enter(compiler, RETURN_ADDR, 0); if (common->nltype != NLTYPE_ANY) { SLJIT_ASSERT(common->nltype != NLTYPE_FIXED || common->newline < 128); /* All newlines are ascii, just skip intermediate octets. */ jump[0] = CMP(SLJIT_GREATER_EQUAL, STR_PTR, 0, STR_END, 0); loop = LABEL(); if (sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_SUPP | SLJIT_MEM_POST, TMP2, SLJIT_MEM1(STR_PTR), IN_UCHARS(1)) == SLJIT_SUCCESS) sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_POST, TMP2, SLJIT_MEM1(STR_PTR), IN_UCHARS(1)); else { OP1(MOV_UCHAR, TMP2, 0, SLJIT_MEM1(STR_PTR), IN_UCHARS(0)); OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); } OP2(SLJIT_AND, TMP2, 0, TMP2, 0, SLJIT_IMM, 0xc0); CMPTO(SLJIT_EQUAL, TMP2, 0, SLJIT_IMM, 0x80, loop); OP2(SLJIT_SUB, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); JUMPHERE(jump[0]); OP1(SLJIT_MOV, TMP1, 0, SLJIT_IMM, INVALID_UTF_CHAR); OP_SRC(SLJIT_FAST_RETURN, RETURN_ADDR, 0); return; } jump[0] = CMP(SLJIT_GREATER_EQUAL, STR_PTR, 0, STR_END, 0); OP1(MOV_UCHAR, TMP2, 0, SLJIT_MEM1(STR_PTR), IN_UCHARS(0)); OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); jump[1] = CMP(SLJIT_EQUAL, TMP1, 0, SLJIT_IMM, 0xc2); jump[2] = CMP(SLJIT_EQUAL, TMP1, 0, SLJIT_IMM, 0xe2); skip_start = LABEL(); OP2(SLJIT_AND, TMP2, 0, TMP2, 0, SLJIT_IMM, 0xc0); jump[3] = CMP(SLJIT_NOT_EQUAL, TMP2, 0, SLJIT_IMM, 0x80); /* Skip intermediate octets. */ loop = LABEL(); jump[4] = CMP(SLJIT_GREATER_EQUAL, STR_PTR, 0, STR_END, 0); OP1(MOV_UCHAR, TMP2, 0, SLJIT_MEM1(STR_PTR), IN_UCHARS(0)); OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); OP2(SLJIT_AND, TMP2, 0, TMP2, 0, SLJIT_IMM, 0xc0); CMPTO(SLJIT_EQUAL, TMP2, 0, SLJIT_IMM, 0x80, loop); JUMPHERE(jump[3]); OP2(SLJIT_SUB, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); three_byte_exit = LABEL(); JUMPHERE(jump[0]); JUMPHERE(jump[4]); OP1(SLJIT_MOV, TMP1, 0, SLJIT_IMM, INVALID_UTF_CHAR); OP_SRC(SLJIT_FAST_RETURN, RETURN_ADDR, 0); /* Two byte long newline: 0x85. */ JUMPHERE(jump[1]); CMPTO(SLJIT_NOT_EQUAL, TMP2, 0, SLJIT_IMM, 0x85, skip_start); OP1(SLJIT_MOV, TMP1, 0, SLJIT_IMM, 0x85); OP_SRC(SLJIT_FAST_RETURN, RETURN_ADDR, 0); /* Three byte long newlines: 0x2028 and 0x2029. */ JUMPHERE(jump[2]); CMPTO(SLJIT_NOT_EQUAL, TMP2, 0, SLJIT_IMM, 0x80, skip_start); CMPTO(SLJIT_GREATER_EQUAL, STR_PTR, 0, STR_END, 0, three_byte_exit); OP1(MOV_UCHAR, TMP2, 0, SLJIT_MEM1(STR_PTR), IN_UCHARS(0)); OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); OP2(SLJIT_SUB, TMP1, 0, TMP2, 0, SLJIT_IMM, 0x80); CMPTO(SLJIT_GREATER_EQUAL, TMP1, 0, SLJIT_IMM, 0x40, skip_start); OP1(SLJIT_MOV, TMP2, 0, SLJIT_IMM, 0x2000); OP2(SLJIT_OR, TMP1, 0, TMP1, 0, TMP2, 0); OP_SRC(SLJIT_FAST_RETURN, RETURN_ADDR, 0); }
0
234,190
display_debug_info (struct dwarf_section *section, void *file) { return process_debug_info (section, file, section->abbrev_sec, false, false); }
0
401,500
static void timers_update_migration(void) { if (sysctl_timer_migration && tick_nohz_active) static_branch_enable(&timers_migration_enabled); else static_branch_disable(&timers_migration_enabled); }
0
476,126
static int len_ext_prop(struct usb_configuration *c, int interface) { struct usb_function *f; struct usb_os_desc *d; int j, res; res = 10; /* header length */ f = c->interface[interface]; for (j = 0; j < f->os_desc_n; ++j) { if (interface != f->os_desc_table[j].if_id) continue; d = f->os_desc_table[j].os_desc; if (d) return min(res + d->ext_prop_len, 4096); } return res; }
0
216,654
auth_request_get_var_expand_table_full(const struct auth_request *auth_request, auth_request_escape_func_t *escape_func, unsigned int *count) { const unsigned int auth_count = N_ELEMENTS(auth_request_var_expand_static_tab); struct var_expand_table *tab, *ret_tab; const char *orig_user, *auth_user; if (escape_func == NULL) escape_func = escape_none; /* keep the extra fields at the beginning. the last static_tab field contains the ending NULL-fields. */ tab = ret_tab = t_malloc((*count + auth_count) * sizeof(*tab)); memset(tab, 0, *count * sizeof(*tab)); tab += *count; *count += auth_count; memcpy(tab, auth_request_var_expand_static_tab, auth_count * sizeof(*tab)); tab[0].value = escape_func(auth_request->user, auth_request); tab[1].value = escape_func(t_strcut(auth_request->user, '@'), auth_request); tab[2].value = strchr(auth_request->user, '@'); if (tab[2].value != NULL) tab[2].value = escape_func(tab[2].value+1, auth_request); tab[3].value = escape_func(auth_request->service, auth_request); /* tab[4] = we have no home dir */ if (auth_request->local_ip.family != 0) tab[5].value = net_ip2addr(&auth_request->local_ip); if (auth_request->remote_ip.family != 0) tab[6].value = net_ip2addr(&auth_request->remote_ip); tab[7].value = dec2str(auth_request->client_pid); if (auth_request->mech_password != NULL) { tab[8].value = escape_func(auth_request->mech_password, auth_request); } if (auth_request->userdb_lookup) { tab[9].value = auth_request->userdb == NULL ? "" : dec2str(auth_request->userdb->userdb->id); } else { tab[9].value = auth_request->passdb == NULL ? "" : dec2str(auth_request->passdb->passdb->id); } tab[10].value = auth_request->mech_name == NULL ? "" : escape_func(auth_request->mech_name, auth_request); tab[11].value = auth_request->secured ? "secured" : ""; tab[12].value = dec2str(auth_request->local_port); tab[13].value = dec2str(auth_request->remote_port); tab[14].value = auth_request->valid_client_cert ? "valid" : ""; if (auth_request->requested_login_user != NULL) { const char *login_user = auth_request->requested_login_user; tab[15].value = escape_func(login_user, auth_request); tab[16].value = escape_func(t_strcut(login_user, '@'), auth_request); tab[17].value = strchr(login_user, '@'); if (tab[17].value != NULL) { tab[17].value = escape_func(tab[17].value+1, auth_request); } } tab[18].value = auth_request->session_id == NULL ? NULL : escape_func(auth_request->session_id, auth_request); if (auth_request->real_local_ip.family != 0) tab[19].value = net_ip2addr(&auth_request->real_local_ip); if (auth_request->real_remote_ip.family != 0) tab[20].value = net_ip2addr(&auth_request->real_remote_ip); tab[21].value = dec2str(auth_request->real_local_port); tab[22].value = dec2str(auth_request->real_remote_port); tab[23].value = strchr(auth_request->user, '@'); if (tab[23].value != NULL) { tab[23].value = escape_func(t_strcut(tab[23].value+1, '@'), auth_request); } tab[24].value = strrchr(auth_request->user, '@'); if (tab[24].value != NULL) tab[24].value = escape_func(tab[24].value+1, auth_request); tab[25].value = auth_request->master_user == NULL ? NULL : escape_func(auth_request->master_user, auth_request); tab[26].value = auth_request->session_pid == (pid_t)-1 ? NULL : dec2str(auth_request->session_pid); orig_user = auth_request->original_username != NULL ? auth_request->original_username : auth_request->user; tab[27].value = escape_func(orig_user, auth_request); tab[28].value = escape_func(t_strcut(orig_user, '@'), auth_request); tab[29].value = strchr(orig_user, '@'); if (tab[29].value != NULL) tab[29].value = escape_func(tab[29].value+1, auth_request); if (auth_request->master_user != NULL) auth_user = auth_request->master_user; else auth_user = orig_user; tab[30].value = escape_func(auth_user, auth_request); tab[31].value = escape_func(t_strcut(auth_user, '@'), auth_request); tab[32].value = strchr(auth_user, '@'); if (tab[32].value != NULL) tab[32].value = escape_func(tab[32].value+1, auth_request); if (auth_request->local_name != NULL) tab[33].value = escape_func(auth_request->local_name, auth_request); else tab[33].value = ""; return ret_tab; }
1
455,389
xfs_iget( xfs_mount_t *mp, xfs_trans_t *tp, xfs_ino_t ino, uint flags, uint lock_flags, xfs_inode_t **ipp) { xfs_inode_t *ip; int error; xfs_perag_t *pag; xfs_agino_t agino; /* * xfs_reclaim_inode() uses the ILOCK to ensure an inode * doesn't get freed while it's being referenced during a * radix tree traversal here. It assumes this function * aqcuires only the ILOCK (and therefore it has no need to * involve the IOLOCK in this synchronization). */ ASSERT((lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) == 0); /* reject inode numbers outside existing AGs */ if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount) return -EINVAL; XFS_STATS_INC(mp, xs_ig_attempts); /* get the perag structure and ensure that it's inode capable */ pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino)); agino = XFS_INO_TO_AGINO(mp, ino); again: error = 0; rcu_read_lock(); ip = radix_tree_lookup(&pag->pag_ici_root, agino); if (ip) { error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags); if (error) goto out_error_or_again; } else { rcu_read_unlock(); if (flags & XFS_IGET_INCORE) { error = -ENODATA; goto out_error_or_again; } XFS_STATS_INC(mp, xs_ig_missed); error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip, flags, lock_flags); if (error) goto out_error_or_again; } xfs_perag_put(pag); *ipp = ip; /* * If we have a real type for an on-disk inode, we can setup the inode * now. If it's a new inode being created, xfs_ialloc will handle it. */ if (xfs_iflags_test(ip, XFS_INEW) && VFS_I(ip)->i_mode != 0) xfs_setup_existing_inode(ip); return 0; out_error_or_again: if (!(flags & XFS_IGET_INCORE) && error == -EAGAIN) { delay(1); goto again; } xfs_perag_put(pag); return error; }
0
250,680
string_view HttpFile::getFileExtension() const { return implPtr_->getFileExtension(); }
0
432,323
static RAMBlock *qemu_get_ram_block(struct uc_struct *uc, ram_addr_t addr) { RAMBlock *block; block = uc->ram_list.mru_block; if (block && addr - block->offset < block->max_length) { return block; } RAMBLOCK_FOREACH(block) { if (addr - block->offset < block->max_length) { goto found; } } fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr); abort(); found: uc->ram_list.mru_block = block; return block; }
0
275,933
static void apply_z(uECC_word_t * X1, uECC_word_t * Y1, const uECC_word_t * const Z, uECC_Curve curve) { uECC_word_t t1[uECC_MAX_WORDS]; uECC_vli_modSquare_fast(t1, Z, curve); /* z^2 */ uECC_vli_modMult_fast(X1, X1, t1, curve); /* x1 * z^2 */ uECC_vli_modMult_fast(t1, t1, Z, curve); /* z^3 */ uECC_vli_modMult_fast(Y1, Y1, t1, curve); /* y1 * z^3 */ }
0
200,305
pcx_write_rle(const byte * from, const byte * end, int step, gp_file * file) { /* * The PCX format theoretically allows encoding runs of 63 * identical bytes, but some readers can't handle repetition * counts greater than 15. */ #define MAX_RUN_COUNT 15 int max_run = step * MAX_RUN_COUNT; while (from < end) { byte data = *from; from += step; if (data != *from || from == end) { if (data >= 0xc0) gp_fputc(0xc1, file); } else { const byte *start = from; while ((from < end) && (*from == data)) from += step; /* Now (from - start) / step + 1 is the run length. */ while (from - start >= max_run) { gp_fputc(0xc0 + MAX_RUN_COUNT, file); gp_fputc(data, file); start += max_run; } if (from > start || data >= 0xc0) gp_fputc((from - start) / step + 0xc1, file); } gp_fputc(data, file); } #undef MAX_RUN_COUNT }
1
349,888
int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self, struct hw_atl_utils_fw_rpc **rpc) { struct aq_hw_atl_utils_fw_rpc_tid_s sw; struct aq_hw_atl_utils_fw_rpc_tid_s fw; int err = 0; do { sw.val = aq_hw_read_reg(self, HW_ATL_RPC_CONTROL_ADR); self->rpc_tid = sw.tid; err = readx_poll_timeout_atomic(hw_atl_utils_rpc_state_get, self, fw.val, sw.tid == fw.tid, 1000U, 100000U); if (err < 0) goto err_exit; err = aq_hw_err_from_flags(self); if (err < 0) goto err_exit; if (fw.len == 0xFFFFU) { if (sw.len > sizeof(self->rpc)) { printk(KERN_INFO "Invalid sw len: %x\n", sw.len); err = -EINVAL; goto err_exit; } err = hw_atl_utils_fw_rpc_call(self, sw.len); if (err < 0) goto err_exit; } } while (sw.tid != fw.tid || 0xFFFFU == fw.len); if (rpc) { if (fw.len) { if (fw.len > sizeof(self->rpc)) { printk(KERN_INFO "Invalid fw len: %x\n", fw.len); err = -EINVAL; goto err_exit; } err = hw_atl_utils_fw_downld_dwords(self, self->rpc_addr, (u32 *)(void *) &self->rpc, (fw.len + sizeof(u32) - sizeof(u8)) / sizeof(u32)); if (err < 0) goto err_exit; } *rpc = &self->rpc; } err_exit: return err; }
0
247,095
GF_Err gf_fs_set_separators(GF_FilterSession *session, const char *separator_set) { if (!session) return GF_BAD_PARAM; if (separator_set && (strlen(separator_set)<5)) return GF_BAD_PARAM; if (separator_set) { session->sep_args = separator_set[0]; session->sep_name = separator_set[1]; session->sep_frag = separator_set[2]; session->sep_list = separator_set[3]; session->sep_neg = separator_set[4]; } else { session->sep_args = ':'; session->sep_name = '='; session->sep_frag = '#'; session->sep_list = ','; session->sep_neg = '!'; } return GF_OK; }
0
207,804
void update_process_times(int user_tick) { struct task_struct *p = current; /* Note: this timer irq context must be accounted for as well. */ account_process_tick(p, user_tick); run_local_timers(); rcu_sched_clock_irq(user_tick); #ifdef CONFIG_IRQ_WORK if (in_irq()) irq_work_tick(); #endif scheduler_tick(); if (IS_ENABLED(CONFIG_POSIX_TIMERS)) run_posix_cpu_timers(); }
1
338,209
void WasmBinaryBuilder::visitGlobalSet(GlobalSet* curr) { BYN_TRACE("zz node: GlobalSet\n"); auto index = getU32LEB(); if (index < globalImports.size()) { auto* import = globalImports[index]; curr->name = import->name; } else { Index adjustedIndex = index - globalImports.size(); if (adjustedIndex >= globals.size()) { throwError("invalid global index"); } curr->name = globals[adjustedIndex]->name; } curr->value = popNonVoidExpression(); globalRefs[index].push_back(curr); // we don't know the final name yet curr->finalize(); }
0
503,874
SCM_DEFINE (scm_readdir, "readdir", 1, 0, 0, (SCM port), "Return (as a string) the next directory entry from the directory stream\n" "@var{port}. If there is no remaining entry to be read then the\n" "end of file object is returned.") #define FUNC_NAME s_scm_readdir { struct dirent_or_dirent64 *rdent; SCM_VALIDATE_DIR (1, port); if (!SCM_DIR_OPEN_P (port)) SCM_MISC_ERROR ("Directory ~S is not open.", scm_list_1 (port)); #if HAVE_READDIR_R /* As noted in the glibc manual, on various systems (such as Solaris) the d_name[] field is only 1 char and you're expected to size the dirent buffer for readdir_r based on NAME_MAX. The SCM_MAX expressions below effectively give either sizeof(d_name) or NAME_MAX+1, whichever is bigger. On solaris 10 there's no NAME_MAX constant, it's necessary to use pathconf(). We prefer NAME_MAX though, since it should be a constant and will therefore save a system call. We also prefer it since dirfd() is not available everywhere. An alternative to dirfd() would be to open() the directory and then use fdopendir(), if the latter is available. That'd let us hold the fd somewhere in the smob, or just the dirent size calculated once. */ { struct dirent_or_dirent64 de; /* just for sizeof */ DIR *ds = (DIR *) SCM_SMOB_DATA_1 (port); #ifdef NAME_MAX char buf [SCM_MAX (sizeof (de), sizeof (de) - sizeof (de.d_name) + NAME_MAX + 1)]; #else char *buf; long name_max = fpathconf (dirfd (ds), _PC_NAME_MAX); if (name_max == -1) SCM_SYSERROR; buf = alloca (SCM_MAX (sizeof (de), sizeof (de) - sizeof (de.d_name) + name_max + 1)); #endif errno = 0; SCM_SYSCALL (readdir_r_or_readdir64_r (ds, (struct dirent_or_dirent64 *) buf, &rdent)); if (errno != 0) SCM_SYSERROR; if (! rdent) return SCM_EOF_VAL; return (rdent ? scm_from_locale_stringn (rdent->d_name, NAMLEN (rdent)) : SCM_EOF_VAL); } #else { SCM ret; scm_dynwind_begin (0); scm_i_dynwind_pthread_mutex_lock (&scm_i_misc_mutex); errno = 0; SCM_SYSCALL (rdent = readdir_or_readdir64 ((DIR *) SCM_SMOB_DATA_1 (port))); if (errno != 0) SCM_SYSERROR; ret = (rdent ? scm_from_locale_stringn (rdent->d_name, NAMLEN (rdent)) : SCM_EOF_VAL); scm_dynwind_end (); return ret; } #endif }
0
101,666
void WebProcessProxy::didClearPluginSiteData(uint64_t callbackID) { m_context->pluginSiteDataManager()->didClearSiteData(callbackID); }
0
432,300
static void flatviews_init(struct uc_struct *uc) { if (uc->flat_views) { return; } uc->flat_views = g_hash_table_new_full(NULL, NULL, NULL, (GDestroyNotify) flatview_unref); if (!uc->empty_view) { uc->empty_view = generate_memory_topology(uc, NULL); /* We keep it alive forever in the global variable. */ flatview_ref(uc->empty_view); g_hash_table_replace(uc->flat_views, NULL, uc->empty_view); } }
0
225,049
pqDropServerData(PGconn *conn) { PGnotify *notify; pgParameterStatus *pstatus; /* Forget pending notifies */ notify = conn->notifyHead; while (notify != NULL) { PGnotify *prev = notify; notify = notify->next; free(prev); } conn->notifyHead = conn->notifyTail = NULL; pqFreeCommandQueue(conn->cmd_queue_head); conn->cmd_queue_head = conn->cmd_queue_tail = NULL; pqFreeCommandQueue(conn->cmd_queue_recycle); conn->cmd_queue_recycle = NULL; /* Reset ParameterStatus data, as well as variables deduced from it */ pstatus = conn->pstatus; while (pstatus != NULL) { pgParameterStatus *prev = pstatus; pstatus = pstatus->next; free(prev); } conn->pstatus = NULL; conn->client_encoding = PG_SQL_ASCII; conn->std_strings = false; conn->default_transaction_read_only = PG_BOOL_UNKNOWN; conn->in_hot_standby = PG_BOOL_UNKNOWN; conn->sversion = 0; /* Drop large-object lookup data */ if (conn->lobjfuncs) free(conn->lobjfuncs); conn->lobjfuncs = NULL; /* Reset assorted other per-connection state */ conn->last_sqlstate[0] = '\0'; conn->auth_req_received = false; conn->password_needed = false; conn->write_failed = false; if (conn->write_err_msg) free(conn->write_err_msg); conn->write_err_msg = NULL; conn->be_pid = 0; conn->be_key = 0; }
0
256,953
string TraceString(const OpKernelContext& ctx, bool verbose) const override { string op = profiler::TraceMeOp(name_view(), type_string_view()); string equation = strings::StrCat("(", equation_, ")"); if (verbose) { string shape = ShapeTraceString(ctx); if (!shape.empty()) { return profiler::TraceMeEncode( std::move(op), {{"equation", equation}, {"shape", shape}}); } } return profiler::TraceMeEncode(std::move(op), {{"equation", equation}}); }
0
498,092
void cgit_print_error_page(int code, const char *msg, const char *fmt, ...) { va_list ap; ctx.page.expires = ctx.cfg.cache_dynamic_ttl; ctx.page.status = code; ctx.page.statusmsg = msg; cgit_print_http_headers(); cgit_print_docstart(); cgit_print_pageheader(); va_start(ap, fmt); cgit_vprint_error(fmt, ap); va_end(ap); cgit_print_docend(); }
0
326,110
use_multibytecode(int c) { return has_mbyte && (*mb_char2len)(c) > 1 && (re_multi_type(peekchr()) != NOT_MULTI || (enc_utf8 && utf_iscomposing(c))); }
0
225,390
static void vm_open(struct vm_area_struct *vma) { struct v4l2l_buffer *buf; MARK(); buf = vma->vm_private_data; buf->use_count++; }
0
328,955
R_API RBinJavaBootStrapMethod *r_bin_java_bootstrap_method_new(ut8 *buffer, ut64 sz, ut64 buf_offset) { RBinJavaBootStrapArgument *bsm_arg = NULL; ut32 i = 0; ut64 offset = 0; RBinJavaBootStrapMethod *bsm = R_NEW0 (RBinJavaBootStrapMethod); if (!bsm) { // TODO eprintf failed to allocate bytes for bootstrap_method. return bsm; } memset (bsm, 0, sizeof (RBinJavaBootStrapMethod)); bsm->file_offset = buf_offset; bsm->bootstrap_method_ref = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; bsm->num_bootstrap_arguments = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; bsm->bootstrap_arguments = r_list_new (); for (i = 0; i < bsm->num_bootstrap_arguments; i++) { if (offset >= sz) { break; } // bsm_arg = r_bin_java_bootstrap_method_argument_new (bin, bin->b->cur); bsm_arg = r_bin_java_bootstrap_method_argument_new (buffer + offset, sz - offset, buf_offset + offset); if (bsm_arg) { offset += bsm_arg->size; r_list_append (bsm->bootstrap_arguments, (void *) bsm_arg); } else { // TODO eprintf Failed to read the %d boot strap method. } } bsm->size = offset; return bsm; }
0
513,216
sys_var *find_sys_var(THD *thd, const char *str, size_t length) { return find_sys_var_ex(thd, str, length, false, false); }
0
512,956
longlong val_int() { return to_datetime(current_thd).to_longlong(); }
0
224,465
static void ttxt_dom_progress(void *cbk, u64 cur_samp, u64 count) { GF_TXTIn *ctx = (GF_TXTIn *)cbk; ctx->end = count; }
0
513,096
void set_geometry_type(uint type) { DBUG_ASSERT(type <= m_geometry_type_unknown); m_geometry_type= type; }
0
282,857
static bool rsi_map_rates(u16 rate, int *offset) { int kk; for (kk = 0; kk < ARRAY_SIZE(rsi_mcsrates); kk++) { if (rate == mcs[kk]) { *offset = kk; return false; } } for (kk = 0; kk < ARRAY_SIZE(rsi_rates); kk++) { if (rate == rsi_rates[kk].bitrate / 5) { *offset = kk; break; } } return true; }
0
508,826
void st_select_lex_node::include_global(st_select_lex_node **plink) { if ((link_next= *plink)) link_next->link_prev= &link_next; link_prev= plink; *plink= this; }
0
424,945
static int iwl_trans_pcie_clear_persistence_bit(struct iwl_trans *trans) { u32 hpm, wprot; switch (trans->trans_cfg->device_family) { case IWL_DEVICE_FAMILY_9000: wprot = PREG_PRPH_WPROT_9000; break; case IWL_DEVICE_FAMILY_22000: wprot = PREG_PRPH_WPROT_22000; break; default: return 0; } hpm = iwl_read_umac_prph_no_grab(trans, HPM_DEBUG); if (hpm != 0xa5a5a5a0 && (hpm & PERSISTENCE_BIT)) { u32 wprot_val = iwl_read_umac_prph_no_grab(trans, wprot); if (wprot_val & PREG_WFPM_ACCESS) { IWL_ERR(trans, "Error, can not clear persistence bit\n"); return -EPERM; } iwl_write_umac_prph_no_grab(trans, HPM_DEBUG, hpm & ~PERSISTENCE_BIT); } return 0; }
0
413,653
R_API void r_core_anal_coderefs(RCore *core, ut64 addr) { RAnalFunction *fcn = r_anal_get_fcn_in (core->anal, addr, -1); if (fcn) { const char *me = fcn->name; RListIter *iter; RAnalRef *ref; RList *refs = r_anal_function_get_refs (fcn); r_cons_printf ("agn %s\n", me); r_list_foreach (refs, iter, ref) { r_strf_buffer (32); RFlagItem *item = r_flag_get_i (core->flags, ref->addr); const char *dst = item? item->name: r_strf ("0x%08"PFMT64x, ref->addr); r_cons_printf ("agn %s\n", dst); r_cons_printf ("age %s %s\n", me, dst); } r_list_free (refs); } else { eprintf("Not in a function. Use 'df' to define it.\n"); } }
0
508,925
void st_select_lex::collect_grouping_fields(THD *thd) { grouping_tmp_fields.empty(); List_iterator<Item> li(join->fields_list); Item *item= li++; for (uint i= 0; i < master_unit()->derived->table->s->fields; i++, (item=li++)) { for (ORDER *ord= join->group_list; ord; ord= ord->next) { if ((*ord->item)->eq((Item*)item, 0)) { Grouping_tmp_field *grouping_tmp_field= new Grouping_tmp_field(master_unit()->derived->table->field[i], item); grouping_tmp_fields.push_back(grouping_tmp_field); } } } }
0
310,019
show_tty_change(TTY * old_settings, TTY * new_settings, const char *name, int which, unsigned def) { unsigned older, newer; char *p; newer = new_settings->c_cc[which]; older = old_settings->c_cc[which]; if (older == newer && older == def) return; (void) fprintf(stderr, "%s %s ", name, older == newer ? "is" : "set to"); if (DISABLED(newer)) { (void) fprintf(stderr, "undef.\n"); /* * Check 'delete' before 'backspace', since the key_backspace value * is ambiguous. */ } else if (newer == 0177) { (void) fprintf(stderr, "delete.\n"); } else if ((p = key_backspace) != 0 && newer == (unsigned char) p[0] && p[1] == '\0') { (void) fprintf(stderr, "backspace.\n"); } else if (newer < 040) { newer ^= 0100; (void) fprintf(stderr, "control-%c (^%c).\n", UChar(newer), UChar(newer)); } else (void) fprintf(stderr, "%c.\n", UChar(newer)); }
0
225,674
GF_Box *pasp_box_new() { ISOM_DECL_BOX_ALLOC(GF_PixelAspectRatioBox, GF_ISOM_BOX_TYPE_PASP); return (GF_Box *)tmp;
0
267,859
Status ModularFrameDecoder::DecodeQuantTable( size_t required_size_x, size_t required_size_y, BitReader* br, QuantEncoding* encoding, size_t idx, ModularFrameDecoder* modular_frame_decoder) { JXL_RETURN_IF_ERROR(F16Coder::Read(br, &encoding->qraw.qtable_den)); if (encoding->qraw.qtable_den < kAlmostZero) { // qtable[] values are already checked for <= 0 so the denominator may not // be negative. return JXL_FAILURE("Invalid qtable_den: value too small"); } Image image(required_size_x, required_size_y, 8, 3); ModularOptions options; if (modular_frame_decoder) { JXL_RETURN_IF_ERROR(ModularGenericDecompress( br, image, /*header=*/nullptr, ModularStreamId::QuantTable(idx).ID(modular_frame_decoder->frame_dim), &options, /*undo_transforms=*/-1, &modular_frame_decoder->tree, &modular_frame_decoder->code, &modular_frame_decoder->context_map)); } else { JXL_RETURN_IF_ERROR(ModularGenericDecompress(br, image, /*header=*/nullptr, 0, &options, /*undo_transforms=*/-1)); } if (!encoding->qraw.qtable) { encoding->qraw.qtable = new std::vector<int>(); } encoding->qraw.qtable->resize(required_size_x * required_size_y * 3); for (size_t c = 0; c < 3; c++) { for (size_t y = 0; y < required_size_y; y++) { int* JXL_RESTRICT row = image.channel[c].Row(y); for (size_t x = 0; x < required_size_x; x++) { (*encoding->qraw.qtable)[c * required_size_x * required_size_y + y * required_size_x + x] = row[x]; if (row[x] <= 0) { return JXL_FAILURE("Invalid raw quantization table"); } } } } return true; }
0
384,907
gen_expand_wildcards( int num_pat, // number of input patterns char_u **pat, // array of input patterns int *num_file, // resulting number of files char_u ***file, // array of resulting files int flags) // EW_* flags { int i; garray_T ga; char_u *p; static int recursive = FALSE; int add_pat; int retval = OK; #if defined(FEAT_SEARCHPATH) int did_expand_in_path = FALSE; #endif /* * expand_env() is called to expand things like "~user". If this fails, * it calls ExpandOne(), which brings us back here. In this case, always * call the machine specific expansion function, if possible. Otherwise, * return FAIL. */ if (recursive) #ifdef SPECIAL_WILDCHAR return mch_expand_wildcards(num_pat, pat, num_file, file, flags); #else return FAIL; #endif #ifdef SPECIAL_WILDCHAR /* * If there are any special wildcard characters which we cannot handle * here, call machine specific function for all the expansion. This * avoids starting the shell for each argument separately. * For `=expr` do use the internal function. */ for (i = 0; i < num_pat; i++) { if (has_special_wildchar(pat[i]) # ifdef VIM_BACKTICK && !(vim_backtick(pat[i]) && pat[i][1] == '=') # endif ) return mch_expand_wildcards(num_pat, pat, num_file, file, flags); } #endif recursive = TRUE; /* * The matching file names are stored in a growarray. Init it empty. */ ga_init2(&ga, sizeof(char_u *), 30); for (i = 0; i < num_pat; ++i) { add_pat = -1; p = pat[i]; #ifdef VIM_BACKTICK if (vim_backtick(p)) { add_pat = expand_backtick(&ga, p, flags); if (add_pat == -1) retval = FAIL; } else #endif { /* * First expand environment variables, "~/" and "~user/". */ if ((has_env_var(p) && !(flags & EW_NOTENV)) || *p == '~') { p = expand_env_save_opt(p, TRUE); if (p == NULL) p = pat[i]; #ifdef UNIX /* * On Unix, if expand_env() can't expand an environment * variable, use the shell to do that. Discard previously * found file names and start all over again. */ else if (has_env_var(p) || *p == '~') { vim_free(p); ga_clear_strings(&ga); i = mch_expand_wildcards(num_pat, pat, num_file, file, flags|EW_KEEPDOLLAR); recursive = FALSE; return i; } #endif } /* * If there are wildcards: Expand file names and add each match to * the list. If there is no match, and EW_NOTFOUND is given, add * the pattern. * If there are no wildcards: Add the file name if it exists or * when EW_NOTFOUND is given. */ if (mch_has_exp_wildcard(p)) { #if defined(FEAT_SEARCHPATH) if ((flags & EW_PATH) && !mch_isFullName(p) && !(p[0] == '.' && (vim_ispathsep(p[1]) || (p[1] == '.' && vim_ispathsep(p[2])))) ) { // :find completion where 'path' is used. // Recursiveness is OK here. recursive = FALSE; add_pat = expand_in_path(&ga, p, flags); recursive = TRUE; did_expand_in_path = TRUE; } else #endif add_pat = mch_expandpath(&ga, p, flags); } } if (add_pat == -1 || (add_pat == 0 && (flags & EW_NOTFOUND))) { char_u *t = backslash_halve_save(p); // When EW_NOTFOUND is used, always add files and dirs. Makes // "vim c:/" work. if (flags & EW_NOTFOUND) addfile(&ga, t, flags | EW_DIR | EW_FILE); else addfile(&ga, t, flags); if (t != p) vim_free(t); } #if defined(FEAT_SEARCHPATH) if (did_expand_in_path && ga.ga_len > 0 && (flags & EW_PATH)) uniquefy_paths(&ga, p); #endif if (p != pat[i]) vim_free(p); } // When returning FAIL the array must be freed here. if (retval == FAIL) ga_clear(&ga); *num_file = ga.ga_len; *file = (ga.ga_data != NULL) ? (char_u **)ga.ga_data : (char_u **)_("no matches"); recursive = FALSE; return ((flags & EW_EMPTYOK) || ga.ga_data != NULL) ? retval : FAIL; }
0
474,063
onig_error_code_to_str(s, code, va_alist) UChar* s; int code; va_dcl #endif { UChar *p, *q; OnigErrorInfo* einfo; size_t len; int is_over; UChar parbuf[MAX_ERROR_PAR_LEN]; va_list vargs; va_init_list(vargs, code); switch (code) { case ONIGERR_UNDEFINED_NAME_REFERENCE: case ONIGERR_UNDEFINED_GROUP_REFERENCE: case ONIGERR_MULTIPLEX_DEFINED_NAME: case ONIGERR_MULTIPLEX_DEFINITION_NAME_CALL: case ONIGERR_INVALID_GROUP_NAME: case ONIGERR_INVALID_CHAR_IN_GROUP_NAME: case ONIGERR_INVALID_CHAR_PROPERTY_NAME: einfo = va_arg(vargs, OnigErrorInfo*); len = to_ascii(einfo->enc, einfo->par, einfo->par_end, parbuf, MAX_ERROR_PAR_LEN - 3, &is_over); q = onig_error_code_to_format(code); p = s; while (*q != '\0') { if (*q == '%') { q++; if (*q == 'n') { /* '%n': name */ xmemcpy(p, parbuf, len); p += len; if (is_over != 0) { xmemcpy(p, "...", 3); p += 3; } q++; } else goto normal_char; } else { normal_char: *p++ = *q++; } } *p = '\0'; len = p - s; break; default: q = onig_error_code_to_format(code); len = onigenc_str_bytelen_null(ONIG_ENCODING_ASCII, q); xmemcpy(s, q, len); s[len] = '\0'; break; } va_end(vargs); return (int)len; }
0
362,306
static int usb_audio_suspend(struct usb_interface *intf, pm_message_t message) { struct snd_usb_audio *chip = usb_get_intfdata(intf); struct snd_usb_stream *as; struct usb_mixer_interface *mixer; struct list_head *p; if (chip == (void *)-1L) return 0; chip->autosuspended = !!PMSG_IS_AUTO(message); if (!chip->autosuspended) snd_power_change_state(chip->card, SNDRV_CTL_POWER_D3hot); if (!chip->num_suspended_intf++) { list_for_each_entry(as, &chip->pcm_list, list) { snd_pcm_suspend_all(as->pcm); snd_usb_pcm_suspend(as); as->substream[0].need_setup_ep = as->substream[1].need_setup_ep = true; } list_for_each(p, &chip->midi_list) snd_usbmidi_suspend(p); list_for_each_entry(mixer, &chip->mixer_list, list) snd_usb_mixer_suspend(mixer); } return 0; }
0
481,795
int qh_register_handler(const char *name, const char *description, unsigned int options, qh_handler handler) { struct query_handler *qh = NULL; int result = 0; if (name == NULL) { logit(NSLOG_RUNTIME_ERROR, TRUE, "qh: Failed to register handler with no name\n"); return -1; } if (handler == NULL) { logit(NSLOG_RUNTIME_ERROR, TRUE, "qh: Failed to register handler '%s': No handler function specified\n", name); return -1; } if (strlen(name) > 128) { logit(NSLOG_RUNTIME_ERROR, TRUE, "qh: Failed to register handler '%s': Name too long\n", name); return -ENAMETOOLONG; } /* names must be unique */ if (qh_find_handler(name)) { logit(NSLOG_RUNTIME_WARNING, TRUE, "qh: Handler '%s' registered more than once\n", name); return -1; } qh = calloc(1, sizeof(*qh)); if (qh == NULL) { logit(NSLOG_RUNTIME_ERROR, TRUE, "qh: Failed to allocate memory for handler '%s'\n", name); return -errno; } qh->name = name; qh->description = description; qh->handler = handler; qh->options = options; qh->next_qh = qhandlers; if (qhandlers) { qhandlers->prev_qh = qh; } qhandlers = qh; result = dkhash_insert(qh_table, qh->name, NULL, qh); if (result < 0) { logit(NSLOG_RUNTIME_ERROR, TRUE, "qh: Failed to insert query handler '%s' (%p) into hash table %p (%d): %s\n", name, qh, qh_table, result, strerror(errno)); free(qh); return result; } return 0; }
0
275,986
uECC_VLI_API void uECC_vli_nativeToBytes(uint8_t *bytes, int num_bytes, const uECC_word_t *native) { wordcount_t i; for (i = 0; i < num_bytes; ++i) { unsigned b = num_bytes - 1 - i; bytes[i] = native[b / uECC_WORD_SIZE] >> (8 * (b % uECC_WORD_SIZE)); } }
0
438,666
static void rpmsg_upref_sleepers(struct virtproc_info *vrp) { /* support multiple concurrent senders */ mutex_lock(&vrp->tx_lock); /* are we the first sleeping context waiting for tx buffers ? */ if (atomic_inc_return(&vrp->sleepers) == 1) /* enable "tx-complete" interrupts before dozing off */ virtqueue_enable_cb(vrp->svq); mutex_unlock(&vrp->tx_lock); }
0
244,245
GF_Err proj_type_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_ProjectionTypeBox *ptr = (GF_ProjectionTypeBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; if (ptr->type==GF_ISOM_BOX_TYPE_CBMP) { gf_bs_write_u32(bs, ptr->layout); gf_bs_write_u32(bs, ptr->padding); } else if (ptr->type==GF_ISOM_BOX_TYPE_EQUI) { gf_bs_write_u32(bs, ptr->bounds_top); gf_bs_write_u32(bs, ptr->bounds_bottom); gf_bs_write_u32(bs, ptr->bounds_left); gf_bs_write_u32(bs, ptr->bounds_right); } else { gf_bs_write_u32(bs, ptr->crc); gf_bs_write_u32(bs, ptr->encoding_4cc); } return GF_OK; }
0
225,852
GF_Box *trgt_box_new() { ISOM_DECL_BOX_ALLOC(GF_TrackGroupTypeBox, GF_ISOM_BOX_TYPE_TRGT); return (GF_Box *)tmp;
0
310,065
usage(void) { static const char *msg[] = { "Usage: dots_termcap [options]" ,"" ,"Options:" ," -T TERM override $TERM" #if HAVE_USE_ENV ," -e allow environment $LINES / $COLUMNS" #endif ," -f use tigetnum rather than <term.h> mapping" ," -m SIZE set margin (default: 2)" ," -r SECS self-interrupt/exit after specified number of seconds" ," -s MSECS delay 1% of the time (default: 1 msecs)" }; size_t n; for (n = 0; n < SIZEOF(msg); n++) fprintf(stderr, "%s\n", msg[n]); ExitProgram(EXIT_FAILURE); }
0
294,724
f_kind_of_p(VALUE x, VALUE c) { return rb_obj_is_kind_of(x, c); }
0
229,312
cql_server::connection::process_on_shard(::shared_ptr<messages::result_message::bounce_to_shard> bounce_msg, uint16_t stream, fragmented_temporary_buffer::istream is, service::client_state& cs, service_permit permit, tracing::trace_state_ptr trace_state, Process process_fn) { return _server.container().invoke_on(*bounce_msg->move_to_shard(), _server._config.bounce_request_smp_service_group, [this, is = std::move(is), cs = cs.move_to_other_shard(), stream, permit = std::move(permit), process_fn, gt = tracing::global_trace_state_ptr(std::move(trace_state)), cached_vals = std::move(bounce_msg->take_cached_pk_function_calls())] (cql_server& server) { service::client_state client_state = cs.get(); return do_with(bytes_ostream(), std::move(client_state), std::move(cached_vals), [this, &server, is = std::move(is), stream, process_fn, trace_state = tracing::trace_state_ptr(gt)] (bytes_ostream& linearization_buffer, service::client_state& client_state, cql3::computed_function_values& cached_vals) mutable { request_reader in(is, linearization_buffer); return process_fn(client_state, server._query_processor, in, stream, _version, _cql_serialization_format, /* FIXME */empty_service_permit(), std::move(trace_state), false, std::move(cached_vals)).then([] (auto msg) { // result here has to be foreign ptr return std::get<cql_server::result_with_foreign_response_ptr>(std::move(msg)); }); }); }); }
0
274,692
callbacks_drawingarea_motion_notify_event (GtkWidget *widget, GdkEventMotion *event) { int x, y; GdkModifierType state; if (event->is_hint) gdk_window_get_pointer (event->window, &x, &y, &state); else { x = event->x; y = event->y; state = event->state; } switch (screen.state) { case IN_MOVE: { if (screen.last_x != 0 || screen.last_y != 0) { /* Move pixmap to get a snappier feel of movement */ screen.off_x += x - screen.last_x; screen.off_y += y - screen.last_y; } screenRenderInfo.lowerLeftX -= ((x - screen.last_x) / screenRenderInfo.scaleFactorX); screenRenderInfo.lowerLeftY += ((y - screen.last_y) / screenRenderInfo.scaleFactorY); callbacks_force_expose_event_for_screen (); callbacks_update_scrollbar_positions (); screen.last_x = x; screen.last_y = y; break; } case IN_ZOOM_OUTLINE: { if (screen.last_x || screen.last_y) render_draw_zoom_outline(screen.centered_outline_zoom); screen.last_x = x; screen.last_y = y; render_draw_zoom_outline(screen.centered_outline_zoom); break; } case IN_MEASURE: { /* clear the previous drawn line by drawing over it */ render_toggle_measure_line(); callbacks_screen2board(&(screen.measure_stop_x), &(screen.measure_stop_y), x, y); /* screen.last_[xy] are updated to move the ruler pointers */ screen.last_x = x; screen.last_y = y; /* draw the new line and write the new distance */ render_draw_measure_distance(); break; } case IN_SELECTION_DRAG: { if (screen.last_x || screen.last_y) render_draw_selection_box_outline(); screen.last_x = x; screen.last_y = y; render_draw_selection_box_outline(); break; } default: screen.last_x = x; screen.last_y = y; break; } callbacks_update_statusbar_coordinates (x, y); callbacks_update_ruler_pointers (); return TRUE; } /* motion_notify_event */
0
205,736
static inline void fuse_make_bad(struct inode *inode) { set_bit(FUSE_I_BAD, &get_fuse_inode(inode)->state); }
1
274,752
int ntfs_attr_map_whole_runlist(ntfs_attr *na) { VCN next_vcn, last_vcn, highest_vcn; ntfs_attr_search_ctx *ctx; ntfs_volume *vol = na->ni->vol; ATTR_RECORD *a; int ret = -1; int not_mapped; ntfs_log_enter("Entering for inode %llu, attr 0x%x.\n", (unsigned long long)na->ni->mft_no, le32_to_cpu(na->type)); /* avoid multiple full runlist mappings */ if (NAttrFullyMapped(na)) { ret = 0; goto out; } ctx = ntfs_attr_get_search_ctx(na->ni, NULL); if (!ctx) goto out; /* Map all attribute extents one by one. */ next_vcn = last_vcn = highest_vcn = 0; a = NULL; while (1) { runlist_element *rl; not_mapped = 0; if (ntfs_rl_vcn_to_lcn(na->rl, next_vcn) == LCN_RL_NOT_MAPPED) not_mapped = 1; if (ntfs_attr_lookup(na->type, na->name, na->name_len, CASE_SENSITIVE, next_vcn, NULL, 0, ctx)) break; a = ctx->attr; if (not_mapped) { /* Decode the runlist. */ rl = ntfs_mapping_pairs_decompress(na->ni->vol, a, na->rl); if (!rl) goto err_out; na->rl = rl; } /* Are we in the first extent? */ if (!next_vcn) { if (a->lowest_vcn) { errno = EIO; ntfs_log_perror("First extent of inode %llu " "attribute has non-zero lowest_vcn", (unsigned long long)na->ni->mft_no); goto err_out; } /* Get the last vcn in the attribute. */ last_vcn = sle64_to_cpu(a->allocated_size) >> vol->cluster_size_bits; } /* Get the lowest vcn for the next extent. */ highest_vcn = sle64_to_cpu(a->highest_vcn); next_vcn = highest_vcn + 1; /* Only one extent or error, which we catch below. */ if (next_vcn <= 0) { errno = ENOENT; break; } /* Avoid endless loops due to corruption. */ if (next_vcn < sle64_to_cpu(a->lowest_vcn)) { errno = EIO; ntfs_log_perror("Inode %llu has corrupt attribute list", (unsigned long long)na->ni->mft_no); goto err_out; } } if (!a) { ntfs_log_perror("Couldn't find attribute for runlist mapping"); goto err_out; } /* * Cannot check highest_vcn when the last runlist has * been modified earlier, as runlists and sizes may be * updated without highest_vcn being in sync, when * HOLES_DELAY is used */ if (not_mapped && highest_vcn && highest_vcn != last_vcn - 1) { errno = EIO; ntfs_log_perror("Failed to load full runlist: inode: %llu " "highest_vcn: 0x%llx last_vcn: 0x%llx", (unsigned long long)na->ni->mft_no, (long long)highest_vcn, (long long)last_vcn); goto err_out; } if (errno == ENOENT) { NAttrSetFullyMapped(na); ret = 0; } err_out: ntfs_attr_put_search_ctx(ctx); out: ntfs_log_leave("\n"); return ret; }
0
195,340
void Compute(OpKernelContext *ctx) override { const Tensor *indices_t, *values_t, *shape_t, *dense_t; OP_REQUIRES_OK(ctx, ctx->input("sp_indices", &indices_t)); OP_REQUIRES_OK(ctx, ctx->input("sp_values", &values_t)); OP_REQUIRES_OK(ctx, ctx->input("sp_shape", &shape_t)); OP_REQUIRES_OK(ctx, ctx->input("dense", &dense_t)); // Validations. OP_REQUIRES(ctx, TensorShapeUtils::IsMatrix(indices_t->shape()), errors::InvalidArgument( "Input sp_indices should be a matrix but received shape: ", indices_t->shape().DebugString())); OP_REQUIRES(ctx, TensorShapeUtils::IsVector(values_t->shape()) && TensorShapeUtils::IsVector(shape_t->shape()), errors::InvalidArgument( "Inputs sp_values and sp_shape should be vectors " "but received shapes: ", values_t->shape().DebugString(), " and ", shape_t->shape().DebugString())); OP_REQUIRES( ctx, TensorShapeUtils::IsVector(shape_t->shape()), errors::InvalidArgument("Input sp_shape must be a vector. Got: ", shape_t->shape().DebugString())); OP_REQUIRES( ctx, values_t->dim_size(0) == indices_t->dim_size(0), errors::InvalidArgument( "The first dimension of values and indices should match. (", values_t->dim_size(0), " vs. ", indices_t->dim_size(0), ")")); OP_REQUIRES( ctx, shape_t->shape().dim_size(0) == indices_t->shape().dim_size(1), errors::InvalidArgument( "Number of dimensions must match second dimension of indices. ", "Got ", shape_t->shape().dim_size(0), " dimensions, indices shape: ", indices_t->shape().DebugString())); OP_REQUIRES(ctx, shape_t->NumElements() > 0, errors::InvalidArgument( "The shape argument requires at least one element.")); const auto indices_mat = indices_t->matrix<int64_t>(); const auto shape_vec = shape_t->vec<int64_t>(); const auto lhs_dims = BCast::FromShape(TensorShape(shape_vec)); const auto rhs_dims = BCast::FromShape(dense_t->shape()); BCast b(lhs_dims, rhs_dims, false); // false for keeping the same num dims. // True iff (size(lhs) >= size(rhs)) and all dims in lhs is greater or equal // to dims in rhs (from right to left). auto VecGreaterEq = [](ArraySlice<int64_t> lhs, ArraySlice<int64_t> rhs) { if (lhs.size() < rhs.size()) return false; for (size_t i = 0; i < rhs.size(); ++i) { if (lhs[lhs.size() - 1 - i] < rhs[rhs.size() - 1 - i]) return false; } return true; }; OP_REQUIRES(ctx, VecGreaterEq(lhs_dims, rhs_dims) && b.IsValid(), errors::InvalidArgument( "SparseDenseBinaryOpShared broadcasts dense to sparse " "only; got incompatible shapes: [", absl::StrJoin(lhs_dims, ","), "] vs. [", absl::StrJoin(rhs_dims, ","), "]")); Tensor *output_values = nullptr; Tensor dense_gathered; const int64_t nnz = indices_t->dim_size(0); OP_REQUIRES_OK(ctx, ctx->allocate_output(0, TensorShape({nnz}), &output_values)); OP_REQUIRES_OK( ctx, ctx->allocate_temp(DataTypeToEnum<T>::value, TensorShape({nnz}), &dense_gathered)); bool op_is_div = false; if (absl::StrContains(ctx->op_kernel().type_string_view(), "Div")) { op_is_div = true; } // Pulls relevant entries from the dense side, with reshape and broadcasting // *of the dense side* taken into account. Use a TensorRef to avoid blowing // up memory. // // We can directly use the sparse indices to look up dense side, because // "b.y_reshape()" and "b.y_bcast()" are guaranteed to have rank "ndims". auto dense_gathered_flat = dense_gathered.flat<T>(); const int ndims = lhs_dims.size(); switch (ndims) { #define CASE(NDIM) \ case NDIM: { \ TensorRef<Eigen::Tensor<const T, NDIM, Eigen::RowMajor>> rhs_ref = \ dense_t->shaped<T, NDIM>(b.y_reshape()) \ .broadcast(BCast::ToIndexArray<NDIM>(b.y_bcast())); \ Eigen::array<Eigen::DenseIndex, NDIM> idx; \ bool indices_valid = true; \ for (int i = 0; i < nnz; ++i) { \ for (int d = 0; d < NDIM; ++d) { \ idx[d] = internal::SubtleMustCopy(indices_mat(i, d)); \ if (!FastBoundsCheck(idx[d], rhs_ref.dimension(d))) { \ indices_valid = false; \ } \ } \ OP_REQUIRES( \ ctx, indices_valid, \ errors::InvalidArgument("Provided indices are out-of-bounds w.r.t. " \ "dense side with broadcasted shape")); \ dense_gathered_flat(i) = rhs_ref.coeff(idx); \ if (op_is_div) { \ OP_REQUIRES(ctx, dense_gathered_flat(i) != 0, \ errors::InvalidArgument( \ "SparseDenseCwiseDiv cannot divide by zero," \ "but input dense tensor contains zero ")); \ } \ } \ break; \ } CASE(1); CASE(2); CASE(3); CASE(4); CASE(5); default: OP_REQUIRES( ctx, false, errors::InvalidArgument("Only tensors with ranks between 1 and 5 " "are currently supported. Tensor rank: ", ndims)); #undef CASE } output_values->flat<T>().device(ctx->eigen_device<Device>()) = values_t->flat<T>().binaryExpr(dense_gathered_flat, typename Functor::func()); }
1
513,080
bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) { return type_handler()->Item_get_date_with_warn(thd, this, ltime, fuzzydate); }
0
336,130
static void __net_exit ip6gre_exit_net(struct net *net) { LIST_HEAD(list); rtnl_lock(); ip6gre_destroy_tunnels(net, &list); unregister_netdevice_many(&list); rtnl_unlock(); }
0
275,510
njs_vm_memory_pool(njs_vm_t *vm) { return vm->mem_pool; }
0
195,091
llvm::Optional<Value> simplifyBroadcast(ShapeComponentAnalysis& analysis, ValueRange shapes, Location loc, OpBuilder* builder) { // First find the input shape with the largest rank. SmallVector<ArrayRef<ShapeComponentAnalysis::SymbolicExpr>> shapes_found; size_t maxRank = 0; for (const auto &shape : llvm::enumerate(shapes)) { auto found_shape = analysis.GetValueInfo(shape.value()); if (!found_shape) return {}; shapes_found.push_back(*found_shape); maxRank = std::max(maxRank, found_shape->size()); } SmallVector<const ShapeComponentAnalysis::SymbolicExpr*> joined_dimensions( maxRank); SmallVector<std::pair<Value, int64_t>> shape_and_rank_for_dim(maxRank); for (const auto &shape : llvm::enumerate(shapes_found)) { for (const auto &dim : llvm::enumerate(llvm::reverse(shape.value()))) { // 1 dimensions don't contribute to the final result. if (dim.value().isConstant(1)) continue; // If it's not a 1 dimension it will be present in the result. Remember // where it came from. auto index = maxRank - dim.index() - 1; if (!joined_dimensions[index]) { joined_dimensions[index] = &dim.value(); shape_and_rank_for_dim[index] = std::make_pair(shapes[shape.index()], shape.value().size()); continue; } // Bail if the dimensions are neither equal nor 1. if (*joined_dimensions[index] != dim.value()) return {}; } } // If the output is the same as one of the inputs just return that. if (llvm::is_splat(shape_and_rank_for_dim) && shape_and_rank_for_dim[0].first) { return shape_and_rank_for_dim[0].first; } // Otherwise rematerialize the shape from the pieces we have. SmallVector<Value> elements; for (int i = 0; i != maxRank; ++i) { // 1 dimensions are filtered above, recreate the constant. if (!shape_and_rank_for_dim[i].first) { auto one = builder->getIntegerAttr( shapes[0].getType().cast<RankedTensorType>().getElementType(), 1); elements.push_back(builder->create<ConstantOp>(loc, one)); continue; } // Extract from one of the shapes, accounting for the reverse indexing // performed by broadcast. Value index = builder->create<ConstantIndexOp>( loc, i - maxRank + shape_and_rank_for_dim[i].second); elements.push_back(builder->create<tensor::ExtractOp>( loc, shape_and_rank_for_dim[i].first, index)); } return Value(builder->create<tensor::FromElementsOp>(loc, elements)); }
1
364,756
tagstack_push_item( win_T *wp, char_u *tagname, int cur_fnum, int cur_match, pos_T mark, int fnum, char_u *user_data) { taggy_T *tagstack = wp->w_tagstack; int idx = wp->w_tagstacklen; // top of the stack // if the tagstack is full: remove the oldest entry if (idx >= TAGSTACKSIZE) { tagstack_shift(wp); idx = TAGSTACKSIZE - 1; } wp->w_tagstacklen++; tagstack[idx].tagname = tagname; tagstack[idx].cur_fnum = cur_fnum; tagstack[idx].cur_match = cur_match; if (tagstack[idx].cur_match < 0) tagstack[idx].cur_match = 0; tagstack[idx].fmark.mark = mark; tagstack[idx].fmark.fnum = fnum; tagstack[idx].user_data = user_data; }
0
506,440
static struct auth_request *mech_rpa_auth_new(void) { struct rpa_auth_request *request; pool_t pool; pool = pool_alloconly_create(MEMPOOL_GROWING"rpa_auth_request", 2048); request = p_new(pool, struct rpa_auth_request, 1); request->pool = pool; request->phase = 0; request->auth_request.pool = pool; return &request->auth_request; }
0
463,197
static int annotation_set_todb(annotate_state_t *state, struct annotate_entry_list *entry, int maywrite) { int r = 0; if (entry->have_shared) r = write_entry(state->mailbox, state->uid, entry->name, "", &entry->shared, 0, state->silent, NULL, maywrite); if (!r && entry->have_priv) r = write_entry(state->mailbox, state->uid, entry->name, state->userid, &entry->priv, 0, state->silent, NULL, maywrite); return r; }
0
377,482
void r_coresym_cache_element_free(RCoreSymCacheElement *element) { if (!element) { return; } size_t i; if (element->segments) { for (i = 0; i < element->hdr->n_segments; i++) { r_coresym_cache_element_segment_fini (&element->segments[i]); } } if (element->sections) { for (i = 0; i < element->hdr->n_sections; i++) { r_coresym_cache_element_section_fini (&element->sections[i]); } } if (element->symbols) { for (i = 0; i < element->hdr->n_symbols; i++) { r_coresym_cache_element_symbol_fini (&element->symbols[i]); } } if (element->lined_symbols) { for (i = 0; i < element->hdr->n_lined_symbols; i++) { r_coresym_cache_element_lined_symbol_fini (&element->lined_symbols[i]); } } if (element->line_info) { for (i = 0; i < element->hdr->n_line_info; i++) { r_coresym_cache_element_line_info_fini (&element->line_info[i]); } } free (element->segments); free (element->sections); free (element->symbols); free (element->lined_symbols); free (element->line_info); free (element->hdr); free (element->file_name); free (element->binary_version); free (element); }
0
484,729
void mobi_buffer_resize(MOBIBuffer *buf, const size_t newlen) { unsigned char *tmp = realloc(buf->data, newlen); if (tmp == NULL) { debug_print("%s", "Buffer allocation failed\n"); buf->error = MOBI_MALLOC_FAILED; return; } buf->data = tmp; buf->maxlen = newlen; if (buf->offset >= newlen) { buf->offset = newlen - 1; } debug_print("Buffer successfully resized to %zu\n", newlen); buf->error = MOBI_SUCCESS; }
0
385,925
static inline int check_sticky(struct inode *dir, struct inode *inode) { kuid_t fsuid = current_fsuid(); if (!(dir->i_mode & S_ISVTX)) return 0; if (uid_eq(inode->i_uid, fsuid)) return 0; if (uid_eq(dir->i_uid, fsuid)) return 0; return !inode_capable(inode, CAP_FOWNER); }
0
413,862
void LinkResolver::resolve_handle_call(CallInfo& result, const LinkInfo& link_info, TRAPS) { // JSR 292: this must be an implicitly generated method MethodHandle.invokeExact(*...) or similar Klass* resolved_klass = link_info.resolved_klass(); assert(resolved_klass == vmClasses::MethodHandle_klass() || resolved_klass == vmClasses::VarHandle_klass(), ""); assert(MethodHandles::is_signature_polymorphic_name(link_info.name()), ""); Handle resolved_appendix; Method* m = lookup_polymorphic_method(link_info, &resolved_appendix, CHECK); methodHandle resolved_method(THREAD, m); if (link_info.check_access()) { Symbol* name = link_info.name(); vmIntrinsics::ID iid = MethodHandles::signature_polymorphic_name_id(name); if (MethodHandles::is_signature_polymorphic_intrinsic(iid)) { // Check if method can be accessed by the referring class. // MH.linkTo* invocations are not rewritten to invokehandle. assert(iid == vmIntrinsicID::_invokeBasic, "%s", vmIntrinsics::name_at(iid)); Klass* current_klass = link_info.current_klass(); assert(current_klass != NULL , "current_klass should not be null"); check_method_accessability(current_klass, resolved_klass, resolved_method->method_holder(), resolved_method, CHECK); } else { // Java code is free to arbitrarily link signature-polymorphic invokers. assert(iid == vmIntrinsics::_invokeGeneric, "not an invoker: %s", vmIntrinsics::name_at(iid)); assert(MethodHandles::is_signature_polymorphic_public_name(resolved_klass, name), "not public"); } } result.set_handle(resolved_klass, resolved_method, resolved_appendix, CHECK); }
0
281,053
static const void *xfrm_get_dst_nexthop(const struct dst_entry *dst, const void *daddr) { const struct dst_entry *path = dst->path; for (; dst != path; dst = dst->child) { const struct xfrm_state *xfrm = dst->xfrm; if (xfrm->props.mode == XFRM_MODE_TRANSPORT) continue; if (xfrm->type->flags & XFRM_TYPE_REMOTE_COADDR) daddr = xfrm->coaddr; else if (!(xfrm->type->flags & XFRM_TYPE_LOCAL_COADDR)) daddr = &xfrm->id.daddr; } return daddr; }
0
293,525
PJ_DEF(void) pj_cis_add_num(pj_cis_t *cis) { pj_cis_add_range( cis, '0', '9'+1); }
0
225,118
uint64 RepeatedAttrDefHash( const protobuf::RepeatedPtrField<OpDef::AttrDef>& a) { // Insert AttrDefs into map to deterministically sort by name std::map<string, const OpDef::AttrDef*> a_set; for (const OpDef::AttrDef& def : a) { a_set[def.name()] = &def; } // Iterate and combines hashes of keys and values uint64 h = 0xDECAFCAFFE; for (const auto& pair : a_set) { h = Hash64(pair.first.data(), pair.first.size(), h); h = Hash64Combine(AttrDefHash(*pair.second), h); } return h; }
0
383,331
gdImageColorDeallocate (gdImagePtr im, int color) { if (im->trueColor) { return; } /* Mark it open. */ im->open[color] = 1; }
0
205,734
static pyc_object *get_complex_object(RzBinPycObj *pyc, RzBuffer *buffer) { pyc_object *ret = NULL; bool error = false; ut32 size = 0; ut32 n1 = 0; ut32 n2 = 0; ret = RZ_NEW0(pyc_object); if (!ret) { return NULL; } if ((pyc->magic_int & 0xffff) <= 62061) { n1 = get_ut8(buffer, &error); } else { n1 = get_st32(buffer, &error); } if (error) { free(ret); return NULL; } ut8 *s1 = malloc(n1 + 1); if (!s1) { return NULL; } /* object contain string representation of the number */ size = rz_buf_read(buffer, s1, n1); if (size != n1) { RZ_FREE(s1); RZ_FREE(ret); return NULL; } s1[n1] = '\0'; if ((pyc->magic_int & 0xffff) <= 62061) { n2 = get_ut8(buffer, &error); } else n2 = get_st32(buffer, &error); if (error) { return NULL; } ut8 *s2 = malloc(n2 + 1); if (!s2) { return NULL; } /* object contain string representation of the number */ size = rz_buf_read(buffer, s2, n2); if (size != n2) { RZ_FREE(s1); RZ_FREE(s2); RZ_FREE(ret); return NULL; } s2[n2] = '\0'; ret->type = TYPE_COMPLEX; ret->data = rz_str_newf("%s+%sj", s1, s2); RZ_FREE(s1); RZ_FREE(s2); if (!ret->data) { RZ_FREE(ret); return NULL; } return ret; }
1
398,539
static inline RzBinDwarfLocRange *create_loc_range(ut64 start, ut64 end, RzBinDwarfBlock *block) { RzBinDwarfLocRange *range = RZ_NEW0(RzBinDwarfLocRange); if (range) { range->start = start; range->end = end; range->expression = block; } return range; }
0
223,424
static void compile_assert_backtrackingpath(compiler_common *common, struct backtrack_common *current) { DEFINE_COMPILER; PCRE2_SPTR cc = current->cc; PCRE2_UCHAR bra = OP_BRA; struct sljit_jump *brajump = NULL; SLJIT_ASSERT(*cc != OP_BRAMINZERO); if (*cc == OP_BRAZERO) { bra = *cc; cc++; } if (bra == OP_BRAZERO) { SLJIT_ASSERT(current->topbacktracks == NULL); OP1(SLJIT_MOV, STR_PTR, 0, SLJIT_MEM1(STACK_TOP), STACK(0)); } if (CURRENT_AS(assert_backtrack)->framesize < 0) { set_jumps(current->topbacktracks, LABEL()); if (bra == OP_BRAZERO) { OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), STACK(0), SLJIT_IMM, 0); CMPTO(SLJIT_NOT_EQUAL, STR_PTR, 0, SLJIT_IMM, 0, CURRENT_AS(assert_backtrack)->matchingpath); free_stack(common, 1); } return; } if (bra == OP_BRAZERO) { if (*cc == OP_ASSERT_NOT || *cc == OP_ASSERTBACK_NOT) { OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), STACK(0), SLJIT_IMM, 0); CMPTO(SLJIT_NOT_EQUAL, STR_PTR, 0, SLJIT_IMM, 0, CURRENT_AS(assert_backtrack)->matchingpath); free_stack(common, 1); return; } free_stack(common, 1); brajump = CMP(SLJIT_EQUAL, STR_PTR, 0, SLJIT_IMM, 0); } if (*cc == OP_ASSERT || *cc == OP_ASSERTBACK) { OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(SLJIT_SP), CURRENT_AS(assert_backtrack)->private_data_ptr); add_jump(compiler, &common->revertframes, JUMP(SLJIT_FAST_CALL)); OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(STACK_TOP), STACK(-2)); OP2(SLJIT_ADD, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, (CURRENT_AS(assert_backtrack)->framesize - 1) * sizeof(sljit_sw)); OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), CURRENT_AS(assert_backtrack)->private_data_ptr, TMP1, 0); set_jumps(current->topbacktracks, LABEL()); } else set_jumps(current->topbacktracks, LABEL()); if (bra == OP_BRAZERO) { /* We know there is enough place on the stack. */ OP2(SLJIT_SUB, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, sizeof(sljit_sw)); OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), STACK(0), SLJIT_IMM, 0); JUMPTO(SLJIT_JUMP, CURRENT_AS(assert_backtrack)->matchingpath); JUMPHERE(brajump); } }
0
256,413
static void parse_rtcp_fb(pjmedia_rtcp_session *sess, const void *pkt, pj_size_t size) { unsigned cnt = 1; pjmedia_rtcp_fb_nack nack[1]; //pjmedia_rtcp_fb_sli sli[1]; //pjmedia_rtcp_fb_rpsi rpsi; pjmedia_event ev; pj_timestamp ts_now; pj_get_timestamp(&ts_now); if (pjmedia_rtcp_fb_parse_nack(pkt, size, &cnt, nack)==PJ_SUCCESS) { pjmedia_event_init(&ev, PJMEDIA_EVENT_RX_RTCP_FB, &ts_now, sess); ev.data.rx_rtcp_fb.cap.type = PJMEDIA_RTCP_FB_NACK; ev.data.rx_rtcp_fb.msg.nack = nack[0]; pjmedia_event_publish(NULL, sess, &ev, 0); } else if (pjmedia_rtcp_fb_parse_pli(pkt, size)==PJ_SUCCESS) { pjmedia_event_init(&ev, PJMEDIA_EVENT_RX_RTCP_FB, &ts_now, sess); ev.data.rx_rtcp_fb.cap.type = PJMEDIA_RTCP_FB_NACK; pj_strset2(&ev.data.rx_rtcp_fb.cap.param, (char*)"pli"); pjmedia_event_publish(NULL, sess, &ev, 0); /* For other FB type implementations later } else if (pjmedia_rtcp_fb_parse_sli(pkt, size, &cnt, sli)==PJ_SUCCESS) { } else if (pjmedia_rtcp_fb_parse_rpsi(pkt, size, &rpsi)==PJ_SUCCESS) { */ } else { /* Ignore unknown RTCP Feedback */ TRACE_((sess->name, "Received unknown RTCP feedback")); } }
0
259,252
static int mov_read_trak(MOVContext *c, AVIOContext *pb, MOVAtom atom) { AVStream *st; MOVStreamContext *sc; int ret; st = avformat_new_stream(c->fc, NULL); if (!st) return AVERROR(ENOMEM); st->id = -1; sc = av_mallocz(sizeof(MOVStreamContext)); if (!sc) return AVERROR(ENOMEM); st->priv_data = sc; st->codecpar->codec_type = AVMEDIA_TYPE_DATA; sc->ffindex = st->index; c->trak_index = st->index; if ((ret = mov_read_default(c, pb, atom)) < 0) return ret; c->trak_index = -1; // Here stsc refers to a chunk not described in stco. This is technically invalid, // but we can overlook it (clearing stsc) whenever stts_count == 0 (indicating no samples). if (!sc->chunk_count && !sc->stts_count && sc->stsc_count) { sc->stsc_count = 0; av_freep(&sc->stsc_data); } /* sanity checks */ if ((sc->chunk_count && (!sc->stts_count || !sc->stsc_count || (!sc->sample_size && !sc->sample_count))) || (!sc->chunk_count && sc->sample_count)) { av_log(c->fc, AV_LOG_ERROR, "stream %d, missing mandatory atoms, broken header\n", st->index); return 0; } if (sc->stsc_count && sc->stsc_data[ sc->stsc_count - 1 ].first > sc->chunk_count) { av_log(c->fc, AV_LOG_ERROR, "stream %d, contradictionary STSC and STCO\n", st->index); return AVERROR_INVALIDDATA; } fix_timescale(c, sc); avpriv_set_pts_info(st, 64, 1, sc->time_scale); mov_build_index(c, st); if (sc->dref_id-1 < sc->drefs_count && sc->drefs[sc->dref_id-1].path) { MOVDref *dref = &sc->drefs[sc->dref_id - 1]; if (c->enable_drefs) { if (mov_open_dref(c, &sc->pb, c->fc->url, dref) < 0) av_log(c->fc, AV_LOG_ERROR, "stream %d, error opening alias: path='%s', dir='%s', " "filename='%s', volume='%s', nlvl_from=%d, nlvl_to=%d\n", st->index, dref->path, dref->dir, dref->filename, dref->volume, dref->nlvl_from, dref->nlvl_to); } else { av_log(c->fc, AV_LOG_WARNING, "Skipped opening external track: " "stream %d, alias: path='%s', dir='%s', " "filename='%s', volume='%s', nlvl_from=%d, nlvl_to=%d." "Set enable_drefs to allow this.\n", st->index, dref->path, dref->dir, dref->filename, dref->volume, dref->nlvl_from, dref->nlvl_to); } } else { sc->pb = c->fc->pb; sc->pb_is_copied = 1; } if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) { if (!st->sample_aspect_ratio.num && st->codecpar->width && st->codecpar->height && sc->height && sc->width && (st->codecpar->width != sc->width || st->codecpar->height != sc->height)) { st->sample_aspect_ratio = av_d2q(((double)st->codecpar->height * sc->width) / ((double)st->codecpar->width * sc->height), INT_MAX); } #if FF_API_R_FRAME_RATE if (sc->stts_count == 1 || (sc->stts_count == 2 && sc->stts_data[1].count == 1)) av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, sc->time_scale, sc->stts_data[0].duration, INT_MAX); #endif } // done for ai5q, ai52, ai55, ai1q, ai12 and ai15. if (!st->codecpar->extradata_size && st->codecpar->codec_id == AV_CODEC_ID_H264 && TAG_IS_AVCI(st->codecpar->codec_tag)) { ret = ff_generate_avci_extradata(st); if (ret < 0) return ret; } switch (st->codecpar->codec_id) { #if CONFIG_H261_DECODER case AV_CODEC_ID_H261: #endif #if CONFIG_H263_DECODER case AV_CODEC_ID_H263: #endif #if CONFIG_MPEG4_DECODER case AV_CODEC_ID_MPEG4: #endif st->codecpar->width = 0; /* let decoder init width/height */ st->codecpar->height= 0; break; } // If the duration of the mp3 packets is not constant, then they could need a parser if (st->codecpar->codec_id == AV_CODEC_ID_MP3 && sc->stts_count > 3 && sc->stts_count*10 > st->nb_frames && sc->time_scale == st->codecpar->sample_rate) { ffstream(st)->need_parsing = AVSTREAM_PARSE_FULL; } /* Do not need those anymore. */ av_freep(&sc->chunk_offsets); av_freep(&sc->sample_sizes); av_freep(&sc->keyframes); av_freep(&sc->stts_data); av_freep(&sc->stps_data); av_freep(&sc->elst_data); av_freep(&sc->rap_group); av_freep(&sc->sync_group); av_freep(&sc->sgpd_sync); return 0; }
0
390,622
ProcXkbSetMap(ClientPtr client) { DeviceIntPtr dev; char * tmp; int rc; REQUEST(xkbSetMapReq); REQUEST_AT_LEAST_SIZE(xkbSetMapReq); if (!(client->xkbClientFlags&_XkbClientInitialized)) return BadAccess; CHK_KBD_DEVICE(dev, stuff->deviceSpec, client, DixManageAccess); CHK_MASK_LEGAL(0x01,stuff->present,XkbAllMapComponentsMask); tmp = (char *)&stuff[1]; /* Check if we can to the SetMap on the requested device. If this succeeds, do the same thing for all extension devices (if needed). If any of them fails, fail. */ rc = _XkbSetMapChecks(client, dev, stuff, tmp); if (rc != Success) return rc; if (stuff->deviceSpec == XkbUseCoreKbd) { DeviceIntPtr other; for (other = inputInfo.devices; other; other = other->next) { if ((other != dev) && other->key && !other->isMaster && (other->u.master == dev)) { rc = XaceHook(XACE_DEVICE_ACCESS, client, other, DixManageAccess); if (rc == Success) { rc = _XkbSetMapChecks(client, other, stuff, tmp); if (rc != Success) return rc; } } } } /* We know now that we will succed with the SetMap. In theory anyway. */ rc = _XkbSetMap(client, dev, stuff, tmp); if (rc != Success) return rc; if (stuff->deviceSpec == XkbUseCoreKbd) { DeviceIntPtr other; for (other = inputInfo.devices; other; other = other->next) { if ((other != dev) && other->key && !other->isMaster && (other->u.master == dev)) { rc = XaceHook(XACE_DEVICE_ACCESS, client, other, DixManageAccess); if (rc == Success) _XkbSetMap(client, other, stuff, tmp); /* ignore rc. if the SetMap failed although the check above reported true there isn't much we can do. we still need to set all other devices, hoping that at least they stay in sync. */ } } } return client->noClientException; }
0
294,369
d_lite_zero(VALUE x) { return INT2FIX(0); }
0
317,144
static void smack_task_getsecid_subj(struct task_struct *p, u32 *secid) { struct smack_known *skp = smk_of_task_struct_subj(p); *secid = skp->smk_secid; }
0
438,671
static int rpmsg_probe(struct virtio_device *vdev) { vq_callback_t *vq_cbs[] = { rpmsg_recv_done, rpmsg_xmit_done }; static const char * const names[] = { "input", "output" }; struct virtqueue *vqs[2]; struct virtproc_info *vrp; struct virtio_rpmsg_channel *vch = NULL; struct rpmsg_device *rpdev_ns, *rpdev_ctrl; void *bufs_va; int err = 0, i; size_t total_buf_space; bool notify; vrp = kzalloc(sizeof(*vrp), GFP_KERNEL); if (!vrp) return -ENOMEM; vrp->vdev = vdev; idr_init(&vrp->endpoints); mutex_init(&vrp->endpoints_lock); mutex_init(&vrp->tx_lock); init_waitqueue_head(&vrp->sendq); /* We expect two virtqueues, rx and tx (and in this order) */ err = virtio_find_vqs(vdev, 2, vqs, vq_cbs, names, NULL); if (err) goto free_vrp; vrp->rvq = vqs[0]; vrp->svq = vqs[1]; /* we expect symmetric tx/rx vrings */ WARN_ON(virtqueue_get_vring_size(vrp->rvq) != virtqueue_get_vring_size(vrp->svq)); /* we need less buffers if vrings are small */ if (virtqueue_get_vring_size(vrp->rvq) < MAX_RPMSG_NUM_BUFS / 2) vrp->num_bufs = virtqueue_get_vring_size(vrp->rvq) * 2; else vrp->num_bufs = MAX_RPMSG_NUM_BUFS; vrp->buf_size = MAX_RPMSG_BUF_SIZE; total_buf_space = vrp->num_bufs * vrp->buf_size; /* allocate coherent memory for the buffers */ bufs_va = dma_alloc_coherent(vdev->dev.parent, total_buf_space, &vrp->bufs_dma, GFP_KERNEL); if (!bufs_va) { err = -ENOMEM; goto vqs_del; } dev_dbg(&vdev->dev, "buffers: va %pK, dma %pad\n", bufs_va, &vrp->bufs_dma); /* half of the buffers is dedicated for RX */ vrp->rbufs = bufs_va; /* and half is dedicated for TX */ vrp->sbufs = bufs_va + total_buf_space / 2; /* set up the receive buffers */ for (i = 0; i < vrp->num_bufs / 2; i++) { struct scatterlist sg; void *cpu_addr = vrp->rbufs + i * vrp->buf_size; rpmsg_sg_init(&sg, cpu_addr, vrp->buf_size); err = virtqueue_add_inbuf(vrp->rvq, &sg, 1, cpu_addr, GFP_KERNEL); WARN_ON(err); /* sanity check; this can't really happen */ } /* suppress "tx-complete" interrupts */ virtqueue_disable_cb(vrp->svq); vdev->priv = vrp; rpdev_ctrl = rpmsg_virtio_add_ctrl_dev(vdev); if (IS_ERR(rpdev_ctrl)) { err = PTR_ERR(rpdev_ctrl); goto free_coherent; } /* if supported by the remote processor, enable the name service */ if (virtio_has_feature(vdev, VIRTIO_RPMSG_F_NS)) { vch = kzalloc(sizeof(*vch), GFP_KERNEL); if (!vch) { err = -ENOMEM; goto free_ctrldev; } /* Link the channel to our vrp */ vch->vrp = vrp; /* Assign public information to the rpmsg_device */ rpdev_ns = &vch->rpdev; rpdev_ns->ops = &virtio_rpmsg_ops; rpdev_ns->little_endian = virtio_is_little_endian(vrp->vdev); rpdev_ns->dev.parent = &vrp->vdev->dev; rpdev_ns->dev.release = virtio_rpmsg_release_device; err = rpmsg_ns_register_device(rpdev_ns); if (err) /* vch will be free in virtio_rpmsg_release_device() */ goto free_ctrldev; } /* * Prepare to kick but don't notify yet - we can't do this before * device is ready. */ notify = virtqueue_kick_prepare(vrp->rvq); /* From this point on, we can notify and get callbacks. */ virtio_device_ready(vdev); /* tell the remote processor it can start sending messages */ /* * this might be concurrent with callbacks, but we are only * doing notify, not a full kick here, so that's ok. */ if (notify) virtqueue_notify(vrp->rvq); dev_info(&vdev->dev, "rpmsg host is online\n"); return 0; free_ctrldev: rpmsg_virtio_del_ctrl_dev(rpdev_ctrl); free_coherent: dma_free_coherent(vdev->dev.parent, total_buf_space, bufs_va, vrp->bufs_dma); vqs_del: vdev->config->del_vqs(vrp->vdev); free_vrp: kfree(vrp); return err; }
0
238,598
static void mark_stack_slot_scratched(struct bpf_verifier_env *env, u32 spi) { env->scratched_stack_slots |= 1ULL << spi; }
0
474,458
IsObjectPresent( TPMI_DH_OBJECT handle // IN: handle to be checked ) { UINT32 slotIndex = handle - TRANSIENT_FIRST; // Since the handle is just an index into the array that is zero based, any // handle value outsize of the range of: // TRANSIENT_FIRST -- (TRANSIENT_FIRST + MAX_LOADED_OBJECT - 1) // will now be greater than or equal to MAX_LOADED_OBJECTS if(slotIndex >= MAX_LOADED_OBJECTS) return FALSE; // Indicate if the slot is occupied return (s_objects[slotIndex].attributes.occupied == TRUE); }
0
440,876
LogWrite(int verb, const char *f, ...) { va_list args; va_start(args, f); LogVWrite(verb, f, args); va_end(args); }
0
301,379
static int vfswrap_statvfs(struct vfs_handle_struct *handle, const char *path, vfs_statvfs_struct *statbuf) { return sys_statvfs(path, statbuf); }
0
225,631
void proj_type_box_del(GF_Box *s) { gf_free(s);
0
421,398
const char *jsP_aststring(enum js_AstType type) { if (type < nelem(astname)-1) return astname[type]; return "<unknown>"; }
0
259,184
static int mov_read_schm(MOVContext *c, AVIOContext *pb, MOVAtom atom) { AVStream *st; MOVStreamContext *sc; if (c->fc->nb_streams < 1) return 0; st = c->fc->streams[c->fc->nb_streams-1]; sc = st->priv_data; if (sc->pseudo_stream_id != 0) { av_log(c->fc, AV_LOG_ERROR, "schm boxes are only supported in first sample descriptor\n"); return AVERROR_PATCHWELCOME; } if (atom.size < 8) return AVERROR_INVALIDDATA; avio_rb32(pb); /* version and flags */ if (!sc->cenc.default_encrypted_sample) { sc->cenc.default_encrypted_sample = av_encryption_info_alloc(0, 16, 16); if (!sc->cenc.default_encrypted_sample) { return AVERROR(ENOMEM); } } sc->cenc.default_encrypted_sample->scheme = avio_rb32(pb); return 0; }
0
211,461
parse_cmd_address(exarg_T *eap, char **errormsg, int silent) { int address_count = 1; linenr_T lnum; int need_check_cursor = FALSE; int ret = FAIL; // Repeat for all ',' or ';' separated addresses. for (;;) { eap->line1 = eap->line2; eap->line2 = default_address(eap); eap->cmd = skipwhite(eap->cmd); lnum = get_address(eap, &eap->cmd, eap->addr_type, eap->skip, silent, eap->addr_count == 0, address_count++); if (eap->cmd == NULL) // error detected goto theend; if (lnum == MAXLNUM) { if (*eap->cmd == '%') // '%' - all lines { ++eap->cmd; switch (eap->addr_type) { case ADDR_LINES: case ADDR_OTHER: eap->line1 = 1; eap->line2 = curbuf->b_ml.ml_line_count; break; case ADDR_LOADED_BUFFERS: { buf_T *buf = firstbuf; while (buf->b_next != NULL && buf->b_ml.ml_mfp == NULL) buf = buf->b_next; eap->line1 = buf->b_fnum; buf = lastbuf; while (buf->b_prev != NULL && buf->b_ml.ml_mfp == NULL) buf = buf->b_prev; eap->line2 = buf->b_fnum; break; } case ADDR_BUFFERS: eap->line1 = firstbuf->b_fnum; eap->line2 = lastbuf->b_fnum; break; case ADDR_WINDOWS: case ADDR_TABS: if (IS_USER_CMDIDX(eap->cmdidx)) { eap->line1 = 1; eap->line2 = eap->addr_type == ADDR_WINDOWS ? LAST_WIN_NR : LAST_TAB_NR; } else { // there is no Vim command which uses '%' and // ADDR_WINDOWS or ADDR_TABS *errormsg = _(e_invalid_range); goto theend; } break; case ADDR_TABS_RELATIVE: case ADDR_UNSIGNED: case ADDR_QUICKFIX: *errormsg = _(e_invalid_range); goto theend; case ADDR_ARGUMENTS: if (ARGCOUNT == 0) eap->line1 = eap->line2 = 0; else { eap->line1 = 1; eap->line2 = ARGCOUNT; } break; case ADDR_QUICKFIX_VALID: #ifdef FEAT_QUICKFIX eap->line1 = 1; eap->line2 = qf_get_valid_size(eap); if (eap->line2 == 0) eap->line2 = 1; #endif break; case ADDR_NONE: // Will give an error later if a range is found. break; } ++eap->addr_count; } else if (*eap->cmd == '*' && vim_strchr(p_cpo, CPO_STAR) == NULL) { pos_T *fp; // '*' - visual area if (eap->addr_type != ADDR_LINES) { *errormsg = _(e_invalid_range); goto theend; } ++eap->cmd; if (!eap->skip) { fp = getmark('<', FALSE); if (check_mark(fp) == FAIL) goto theend; eap->line1 = fp->lnum; fp = getmark('>', FALSE); if (check_mark(fp) == FAIL) goto theend; eap->line2 = fp->lnum; ++eap->addr_count; } } } else eap->line2 = lnum; eap->addr_count++; if (*eap->cmd == ';') { if (!eap->skip) { curwin->w_cursor.lnum = eap->line2; // Don't leave the cursor on an illegal line or column, but do // accept zero as address, so 0;/PATTERN/ works correctly. // Check the cursor position before returning. if (eap->line2 > 0) check_cursor(); need_check_cursor = TRUE; } } else if (*eap->cmd != ',') break; ++eap->cmd; } // One address given: set start and end lines. if (eap->addr_count == 1) { eap->line1 = eap->line2; // ... but only implicit: really no address given if (lnum == MAXLNUM) eap->addr_count = 0; } ret = OK; theend: if (need_check_cursor) check_cursor(); return ret; }
1
430,417
static int __parse_flow_nlattrs(const struct nlattr *attr, const struct nlattr *a[], u64 *attrsp, bool log, bool nz) { const struct nlattr *nla; u64 attrs; int rem; attrs = *attrsp; nla_for_each_nested(nla, attr, rem) { u16 type = nla_type(nla); int expected_len; if (type > OVS_KEY_ATTR_MAX) { OVS_NLERR(log, "Key type %d is out of range max %d", type, OVS_KEY_ATTR_MAX); return -EINVAL; } if (type == OVS_KEY_ATTR_PACKET_TYPE || type == OVS_KEY_ATTR_ND_EXTENSIONS || type == OVS_KEY_ATTR_TUNNEL_INFO) { OVS_NLERR(log, "Key type %d is not supported", type); return -EINVAL; } if (attrs & (1ULL << type)) { OVS_NLERR(log, "Duplicate key (type %d).", type); return -EINVAL; } expected_len = ovs_key_lens[type].len; if (!check_attr_len(nla_len(nla), expected_len)) { OVS_NLERR(log, "Key %d has unexpected len %d expected %d", type, nla_len(nla), expected_len); return -EINVAL; } if (!nz || !is_all_zero(nla_data(nla), nla_len(nla))) { attrs |= 1ULL << type; a[type] = nla; } } if (rem) { OVS_NLERR(log, "Message has %d unknown bytes.", rem); return -EINVAL; } *attrsp = attrs; return 0; }
0
300,822
static int tipc_release(struct socket *sock) { struct sock *sk = sock->sk; struct tipc_sock *tsk; /* * Exit if socket isn't fully initialized (occurs when a failed accept() * releases a pre-allocated child socket that was never used) */ if (sk == NULL) return 0; tsk = tipc_sk(sk); lock_sock(sk); trace_tipc_sk_release(sk, NULL, TIPC_DUMP_ALL, " "); __tipc_shutdown(sock, TIPC_ERR_NO_PORT); sk->sk_shutdown = SHUTDOWN_MASK; tipc_sk_leave(tsk); tipc_sk_withdraw(tsk, NULL); __skb_queue_purge(&tsk->mc_method.deferredq); sk_stop_timer(sk, &sk->sk_timer); tipc_sk_remove(tsk); sock_orphan(sk); /* Reject any messages that accumulated in backlog queue */ release_sock(sk); tipc_dest_list_purge(&tsk->cong_links); tsk->cong_link_cnt = 0; call_rcu(&tsk->rcu, tipc_sk_callback); sock->sk = NULL; return 0; }
0