idx
int64
func
string
target
int64
523,222
void LEX::fix_first_select_number() { SELECT_LEX *first= first_select_lex(); if (first && first->select_number != 1) { uint num= first->select_number; for (SELECT_LEX *sel= all_selects_list; sel; sel= sel->next_select_in_list()) { if (sel->select_number < num) sel->select_number++; } first->select_number= 1; } }
0
344,158
static inline void preempt_conditional_cli(struct pt_regs *regs) { if (regs->flags & X86_EFLAGS_IF) local_irq_disable(); dec_preempt_count(); }
1
208,238
virtual ~InlineLoginUIOAuth2Delegate() {}
0
457,805
static bool __io_uring_cancel_task_requests(struct io_ring_ctx *ctx, struct task_struct *task, struct files_struct *files) { bool ret; ret = io_uring_cancel_files(ctx, files); if (!files) { enum io_wq_cancel cret; cret = io_wq_cancel_cb(ctx->io_wq, io_cancel_task_cb, task, true); if (cret != IO_WQ_CANCEL_NOTFOUND) ret = true; /* SQPOLL thread does its own polling */ if (!(ctx->flags & IORING_SETUP_SQPOLL)) { while (!list_empty_careful(&ctx->iopoll_list)) { io_iopoll_try_reap_events(ctx); ret = true; } } ret |= io_poll_remove_all(ctx, task); ret |= io_kill_timeouts(ctx, task); } return ret; }
0
405,573
read_children(struct archive_read *a, struct file_info *parent) { struct iso9660 *iso9660; const unsigned char *b, *p; struct file_info *multi; size_t step, skip_size; iso9660 = (struct iso9660 *)(a->format->data); /* flush any remaining bytes from the last round to ensure * we're positioned */ if (iso9660->entry_bytes_unconsumed) { __archive_read_consume(a, iso9660->entry_bytes_unconsumed); iso9660->entry_bytes_unconsumed = 0; } if (iso9660->current_position > parent->offset) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Ignoring out-of-order directory (%s) %jd > %jd", parent->name.s, (intmax_t)iso9660->current_position, (intmax_t)parent->offset); return (ARCHIVE_WARN); } if (parent->offset + parent->size > iso9660->volume_size) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Directory is beyond end-of-media: %s", parent->name.s); return (ARCHIVE_WARN); } if (iso9660->current_position < parent->offset) { int64_t skipsize; skipsize = parent->offset - iso9660->current_position; skipsize = __archive_read_consume(a, skipsize); if (skipsize < 0) return ((int)skipsize); iso9660->current_position = parent->offset; } step = (size_t)(((parent->size + iso9660->logical_block_size -1) / iso9660->logical_block_size) * iso9660->logical_block_size); b = __archive_read_ahead(a, step, NULL); if (b == NULL) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Failed to read full block when scanning " "ISO9660 directory list"); return (ARCHIVE_FATAL); } iso9660->current_position += step; multi = NULL; skip_size = step; while (step) { p = b; b += iso9660->logical_block_size; step -= iso9660->logical_block_size; for (; *p != 0 && p < b && p + *p <= b; p += *p) { struct file_info *child; /* N.B.: these special directory identifiers * are 8 bit "values" even on a * Joliet CD with UCS-2 (16bit) encoding. */ /* Skip '.' entry. */ if (*(p + DR_name_len_offset) == 1 && *(p + DR_name_offset) == '\0') continue; /* Skip '..' entry. */ if (*(p + DR_name_len_offset) == 1 && *(p + DR_name_offset) == '\001') continue; child = parse_file_info(a, parent, p, b - p); if (child == NULL) { __archive_read_consume(a, skip_size); return (ARCHIVE_FATAL); } if (child->cl_offset == 0 && (child->multi_extent || multi != NULL)) { struct content *con; if (multi == NULL) { multi = child; multi->contents.first = NULL; multi->contents.last = &(multi->contents.first); } con = malloc(sizeof(struct content)); if (con == NULL) { archive_set_error( &a->archive, ENOMEM, "No memory for multi extent"); __archive_read_consume(a, skip_size); return (ARCHIVE_FATAL); } con->offset = child->offset; con->size = child->size; con->next = NULL; *multi->contents.last = con; multi->contents.last = &(con->next); if (multi == child) { if (add_entry(a, iso9660, child) != ARCHIVE_OK) return (ARCHIVE_FATAL); } else { multi->size += child->size; if (!child->multi_extent) multi = NULL; } } else if (add_entry(a, iso9660, child) != ARCHIVE_OK) return (ARCHIVE_FATAL); } } __archive_read_consume(a, skip_size); /* Read data which recorded by RRIP "CE" extension. */ if (read_CE(a, iso9660) != ARCHIVE_OK) return (ARCHIVE_FATAL); return (ARCHIVE_OK); }
0
294,315
static int disk_events_set_dfl_poll_msecs(const char *val, const struct kernel_param *kp) { struct disk_events *ev; int ret; ret = param_set_ulong(val, kp); if (ret < 0) return ret; mutex_lock(&disk_events_mutex); list_for_each_entry(ev, &disk_events, node) disk_flush_events(ev->disk, 0); mutex_unlock(&disk_events_mutex); return 0; }
0
140,543
Nick(User* source, const std::string& newnick) : ClientProtocol::Message("NICK", source) { PushParamRef(newnick); }
0
37,836
static int parser_errcb(struct libmnt_table *tb __attribute__ ((__unused__)), const char *filename, int line) { warnx(_("%s: parse error at line %d -- ignored"), filename, line); ++parse_nerrors; return 1; }
0
497,524
static void ok_inflater_make_huffman_tree_from_array(ok_inflater_huffman_tree *tree, const uint8_t *code_length, int length) { tree->bits = 1; // Count the number of codes for each code length. // Let code_length_count[n] be the number of codes of length n, n >= 1. unsigned int code_length_count[MAX_CODE_LENGTH]; int i; for (i = 0; i < MAX_CODE_LENGTH; i++) { code_length_count[i] = 0; } for (i = 0; i < length; i++) { code_length_count[code_length[i]]++; } // Find the numerical value of the smallest code for each code length: unsigned int next_code[MAX_CODE_LENGTH]; unsigned int code = 0; for (i = 1; i < MAX_CODE_LENGTH; i++) { code = (code + code_length_count[i - 1]) << 1; next_code[i] = code; if (code_length_count[i] != 0) { tree->bits = (unsigned int)i; } } // Init lookup table const unsigned int max = 1 << tree->bits; memset(tree->lookup_table, 0, sizeof(tree->lookup_table[0]) * max); // Assign numerical values to all codes, using consecutive values for all // codes of the same length with the base values determined at step 2. // Codes that are never used (which have a bit length of zero) must not be // assigned a value. for (i = 0; i < length; i++) { unsigned int len = code_length[i]; if (len != 0) { code = next_code[len]; next_code[len]++; unsigned int value = (unsigned int)i | (len << VALUE_BITS); tree->lookup_table[ok_inflater_reverse_bits(code, len)] = (uint16_t)value; } } // Fill in the missing parts of the lookup table int next_limit = 1; int num_bits = 0; int mask = 0; for (i = 1; i < (int)max; i++) { if (i == next_limit) { mask = (1 << num_bits) - 1; num_bits++; next_limit <<= 1; } if (tree->lookup_table[i] == 0) { tree->lookup_table[i] = tree->lookup_table[i & mask]; } } }
0
508,020
int SSL_use_RSAPrivateKey_file(SSL *ssl, const char *file, int type) { int j,ret=0; BIO *in; RSA *rsa=NULL; in=BIO_new(BIO_s_file_internal()); if (in == NULL) { SSLerr(SSL_F_SSL_USE_RSAPRIVATEKEY_FILE,ERR_R_BUF_LIB); goto end; } if (BIO_read_filename(in,file) <= 0) { SSLerr(SSL_F_SSL_USE_RSAPRIVATEKEY_FILE,ERR_R_SYS_LIB); goto end; } if (type == SSL_FILETYPE_ASN1) { j=ERR_R_ASN1_LIB; rsa=d2i_RSAPrivateKey_bio(in,NULL); } else if (type == SSL_FILETYPE_PEM) { j=ERR_R_PEM_LIB; rsa=PEM_read_bio_RSAPrivateKey(in,NULL, ssl->ctx->default_passwd_callback,ssl->ctx->default_passwd_callback_userdata); } else { SSLerr(SSL_F_SSL_USE_RSAPRIVATEKEY_FILE,SSL_R_BAD_SSL_FILETYPE); goto end; } if (rsa == NULL) { SSLerr(SSL_F_SSL_USE_RSAPRIVATEKEY_FILE,j); goto end; } ret=SSL_use_RSAPrivateKey(ssl,rsa); RSA_free(rsa); end: if (in != NULL) BIO_free(in); return(ret); }
0
117,272
QString attribute(NodePtr node, const QString &name) const override { QSvgNode *n = svgNode(node); if ((!n->nodeId().isEmpty() && (name == QLatin1String("id") || name == QLatin1String("xml:id")))) return n->nodeId(); if (!n->xmlClass().isEmpty() && name == QLatin1String("class")) return n->xmlClass(); return QString(); }
0
241,462
void CL_Clientinfo_f( void ) { Com_Printf( "--------- Client Information ---------\n" ); Com_Printf( "state: %i\n", clc.state ); Com_Printf( "Server: %s\n", clc.servername ); Com_Printf( "User info settings:\n" ); Info_Print( Cvar_InfoString( CVAR_USERINFO ) ); Com_Printf( "--------------------------------------\n" ); }
0
379,045
static void php_info_print_stream_hash(const char *name, HashTable *ht TSRMLS_DC) /* {{{ */ { char *key; uint len; if (ht) { if (zend_hash_num_elements(ht)) { HashPosition pos; if (!sapi_module.phpinfo_as_text) { php_info_printf("<tr><td class=\"e\">Registered %s</td><td class=\"v\">", name); } else { php_info_printf("\nRegistered %s => ", name); } zend_hash_internal_pointer_reset_ex(ht, &pos); while (zend_hash_get_current_key_ex(ht, &key, &len, NULL, 0, &pos) == HASH_KEY_IS_STRING) { if (!sapi_module.phpinfo_as_text) { php_info_print_html_esc(key, len-1); } else { php_info_print(key); } zend_hash_move_forward_ex(ht, &pos); if (zend_hash_get_current_key_ex(ht, &key, &len, NULL, 0, &pos) == HASH_KEY_IS_STRING) { php_info_print(", "); } else { break; } } if (!sapi_module.phpinfo_as_text) { php_info_print("</td></tr>\n"); } } else { char reg_name[128]; snprintf(reg_name, sizeof(reg_name), "Registered %s", name); php_info_print_table_row(2, reg_name, "none registered"); } } else { php_info_print_table_row(2, name, "disabled"); }
0
338,193
void ff_h264_filter_mb( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize) { const int mb_xy= mb_x + mb_y*h->mb_stride; const int mb_type = h->cur_pic.mb_type[mb_xy]; const int mvy_limit = IS_INTERLACED(mb_type) ? 2 : 4; int first_vertical_edge_done = 0; av_unused int dir; int chroma = !(CONFIG_GRAY && (h->flags&CODEC_FLAG_GRAY)); int qp_bd_offset = 6 * (h->sps.bit_depth_luma - 8); int a = h->slice_alpha_c0_offset - qp_bd_offset; int b = h->slice_beta_offset - qp_bd_offset; if (FRAME_MBAFF(h) // and current and left pair do not have the same interlaced type && IS_INTERLACED(mb_type^h->left_type[LTOP]) // and left mb is in available to us && h->left_type[LTOP]) { /* First vertical edge is different in MBAFF frames * There are 8 different bS to compute and 2 different Qp */ DECLARE_ALIGNED(8, int16_t, bS)[8]; int qp[2]; int bqp[2]; int rqp[2]; int mb_qp, mbn0_qp, mbn1_qp; int i; first_vertical_edge_done = 1; if( IS_INTRA(mb_type) ) { AV_WN64A(&bS[0], 0x0004000400040004ULL); AV_WN64A(&bS[4], 0x0004000400040004ULL); } else { static const uint8_t offset[2][2][8]={ { {3+4*0, 3+4*0, 3+4*0, 3+4*0, 3+4*1, 3+4*1, 3+4*1, 3+4*1}, {3+4*2, 3+4*2, 3+4*2, 3+4*2, 3+4*3, 3+4*3, 3+4*3, 3+4*3}, },{ {3+4*0, 3+4*1, 3+4*2, 3+4*3, 3+4*0, 3+4*1, 3+4*2, 3+4*3}, {3+4*0, 3+4*1, 3+4*2, 3+4*3, 3+4*0, 3+4*1, 3+4*2, 3+4*3}, } }; const uint8_t *off= offset[MB_FIELD(h)][mb_y&1]; for( i = 0; i < 8; i++ ) { int j= MB_FIELD(h) ? i>>2 : i&1; int mbn_xy = h->left_mb_xy[LEFT(j)]; int mbn_type= h->left_type[LEFT(j)]; if( IS_INTRA( mbn_type ) ) bS[i] = 4; else{ bS[i] = 1 + !!(h->non_zero_count_cache[12+8*(i>>1)] | ((!h->pps.cabac && IS_8x8DCT(mbn_type)) ? (h->cbp_table[mbn_xy] & (((MB_FIELD(h) ? (i&2) : (mb_y&1)) ? 8 : 2) << 12)) : h->non_zero_count[mbn_xy][ off[i] ])); } } } mb_qp = h->cur_pic.qscale_table[mb_xy]; mbn0_qp = h->cur_pic.qscale_table[h->left_mb_xy[0]]; mbn1_qp = h->cur_pic.qscale_table[h->left_mb_xy[1]]; qp[0] = ( mb_qp + mbn0_qp + 1 ) >> 1; bqp[0] = ( get_chroma_qp( h, 0, mb_qp ) + get_chroma_qp( h, 0, mbn0_qp ) + 1 ) >> 1; rqp[0] = ( get_chroma_qp( h, 1, mb_qp ) + get_chroma_qp( h, 1, mbn0_qp ) + 1 ) >> 1; qp[1] = ( mb_qp + mbn1_qp + 1 ) >> 1; bqp[1] = ( get_chroma_qp( h, 0, mb_qp ) + get_chroma_qp( h, 0, mbn1_qp ) + 1 ) >> 1; rqp[1] = ( get_chroma_qp( h, 1, mb_qp ) + get_chroma_qp( h, 1, mbn1_qp ) + 1 ) >> 1; /* Filter edge */ tprintf(h->avctx, "filter mb:%d/%d MBAFF, QPy:%d/%d, QPb:%d/%d QPr:%d/%d ls:%d uvls:%d", mb_x, mb_y, qp[0], qp[1], bqp[0], bqp[1], rqp[0], rqp[1], linesize, uvlinesize); { int i; for (i = 0; i < 8; i++) tprintf(h->avctx, " bS[%d]:%d", i, bS[i]); tprintf(h->avctx, "\n"); } if (MB_FIELD(h)) { filter_mb_mbaff_edgev ( h, img_y , linesize, bS , 1, qp [0], a, b, 1 ); filter_mb_mbaff_edgev ( h, img_y + 8* linesize, linesize, bS+4, 1, qp [1], a, b, 1 ); if (chroma){ if (CHROMA444(h)) { filter_mb_mbaff_edgev ( h, img_cb, uvlinesize, bS , 1, bqp[0], a, b, 1 ); filter_mb_mbaff_edgev ( h, img_cb + 8*uvlinesize, uvlinesize, bS+4, 1, bqp[1], a, b, 1 ); filter_mb_mbaff_edgev ( h, img_cr, uvlinesize, bS , 1, rqp[0], a, b, 1 ); filter_mb_mbaff_edgev ( h, img_cr + 8*uvlinesize, uvlinesize, bS+4, 1, rqp[1], a, b, 1 ); } else if (CHROMA422(h)) { filter_mb_mbaff_edgecv(h, img_cb, uvlinesize, bS , 1, bqp[0], a, b, 1); filter_mb_mbaff_edgecv(h, img_cb + 8*uvlinesize, uvlinesize, bS+4, 1, bqp[1], a, b, 1); filter_mb_mbaff_edgecv(h, img_cr, uvlinesize, bS , 1, rqp[0], a, b, 1); filter_mb_mbaff_edgecv(h, img_cr + 8*uvlinesize, uvlinesize, bS+4, 1, rqp[1], a, b, 1); }else{ filter_mb_mbaff_edgecv( h, img_cb, uvlinesize, bS , 1, bqp[0], a, b, 1 ); filter_mb_mbaff_edgecv( h, img_cb + 4*uvlinesize, uvlinesize, bS+4, 1, bqp[1], a, b, 1 ); filter_mb_mbaff_edgecv( h, img_cr, uvlinesize, bS , 1, rqp[0], a, b, 1 ); filter_mb_mbaff_edgecv( h, img_cr + 4*uvlinesize, uvlinesize, bS+4, 1, rqp[1], a, b, 1 ); } } }else{ filter_mb_mbaff_edgev ( h, img_y , 2* linesize, bS , 2, qp [0], a, b, 1 ); filter_mb_mbaff_edgev ( h, img_y + linesize, 2* linesize, bS+1, 2, qp [1], a, b, 1 ); if (chroma){ if (CHROMA444(h)) { filter_mb_mbaff_edgev ( h, img_cb, 2*uvlinesize, bS , 2, bqp[0], a, b, 1 ); filter_mb_mbaff_edgev ( h, img_cb + uvlinesize, 2*uvlinesize, bS+1, 2, bqp[1], a, b, 1 ); filter_mb_mbaff_edgev ( h, img_cr, 2*uvlinesize, bS , 2, rqp[0], a, b, 1 ); filter_mb_mbaff_edgev ( h, img_cr + uvlinesize, 2*uvlinesize, bS+1, 2, rqp[1], a, b, 1 ); }else{ filter_mb_mbaff_edgecv( h, img_cb, 2*uvlinesize, bS , 2, bqp[0], a, b, 1 ); filter_mb_mbaff_edgecv( h, img_cb + uvlinesize, 2*uvlinesize, bS+1, 2, bqp[1], a, b, 1 ); filter_mb_mbaff_edgecv( h, img_cr, 2*uvlinesize, bS , 2, rqp[0], a, b, 1 ); filter_mb_mbaff_edgecv( h, img_cr + uvlinesize, 2*uvlinesize, bS+1, 2, rqp[1], a, b, 1 ); } } } } #if CONFIG_SMALL for( dir = 0; dir < 2; dir++ ) filter_mb_dir(h, mb_x, mb_y, img_y, img_cb, img_cr, linesize, uvlinesize, mb_xy, mb_type, mvy_limit, dir ? 0 : first_vertical_edge_done, a, b, chroma, dir); #else filter_mb_dir(h, mb_x, mb_y, img_y, img_cb, img_cr, linesize, uvlinesize, mb_xy, mb_type, mvy_limit, first_vertical_edge_done, a, b, chroma, 0); filter_mb_dir(h, mb_x, mb_y, img_y, img_cb, img_cr, linesize, uvlinesize, mb_xy, mb_type, mvy_limit, 0, a, b, chroma, 1); #endif }
0
281,827
bool CanSaveAsComplete(const std::string& contents_mime_type) { return contents_mime_type == "text/html" || contents_mime_type == "application/xhtml+xml"; }
0
387,504
cooked( struct parse *pcmd, FILE *fp ) { rawmode = 0; (void) fprintf(fp, "Output set to cooked\n"); return; }
0
417,964
int RGWSetBucketWebsite::verify_permission() { return verify_bucket_owner_or_policy(s, rgw::IAM::s3PutBucketWebsite); }
0
431,137
Status processUsers(OperationContext* opCtx, AuthorizationManager* authzManager, StringData usersCollName, StringData db, bool drop) { // When the "drop" argument has been provided, we use this set to store the users // that are currently in the system, and remove from it as we encounter // same-named users in the collection we are restoring from. Once we've fully // moved over the temp users collection into its final location, we drop // any users that previously existed there but weren't in the temp collection. // This is so that we can completely replace the system.users // collection with the users from the temp collection, without removing all // users at the beginning and thus potentially locking ourselves out by having // no users in the whole system for a time. stdx::unordered_set<UserName> usersToDrop; if (drop) { // Create map of the users currently in the DB BSONObj query = db.empty() ? BSONObj() : BSON(AuthorizationManager::USER_DB_FIELD_NAME << db); BSONObj fields = BSON(AuthorizationManager::USER_NAME_FIELD_NAME << 1 << AuthorizationManager::USER_DB_FIELD_NAME << 1); Status status = queryAuthzDocument(opCtx, AuthorizationManager::usersCollectionNamespace, query, fields, [&](const BSONObj& userObj) { usersToDrop.insert(extractUserNameFromBSON(userObj)); }); if (!status.isOK()) { return status; } } Status status = queryAuthzDocument( opCtx, NamespaceString(usersCollName), db.empty() ? BSONObj() : BSON(AuthorizationManager::USER_DB_FIELD_NAME << db), BSONObj(), [&](const BSONObj& userObj) { return addUser(opCtx, authzManager, db, drop, &usersToDrop, userObj); }); if (!status.isOK()) { return status; } if (drop) { long long numRemoved; for (const UserName& userName : usersToDrop) { audit::logDropUser(Client::getCurrent(), userName); status = removePrivilegeDocuments(opCtx, BSON(AuthorizationManager::USER_NAME_FIELD_NAME << userName.getUser().toString() << AuthorizationManager::USER_DB_FIELD_NAME << userName.getDB().toString()), &numRemoved); if (!status.isOK()) { return status; } dassert(numRemoved == 1); } } return Status::OK(); }
0
485,760
static int snd_pcm_oss_set_subdivide(struct snd_pcm_oss_file *pcm_oss_file, int subdivide) { int err = -EINVAL, idx; for (idx = 1; idx >= 0; --idx) { struct snd_pcm_substream *substream = pcm_oss_file->streams[idx]; struct snd_pcm_runtime *runtime; if (substream == NULL) continue; runtime = substream->runtime; err = lock_params(runtime); if (err < 0) return err; err = snd_pcm_oss_set_subdivide1(substream, subdivide); unlock_params(runtime); if (err < 0) return err; } return err; }
0
254,241
base::FilePath WebRunnerBrowserContext::GetCachePath() const { NOTIMPLEMENTED(); return base::FilePath(); }
0
39,517
void QPaintEngineEx::drawLines(const QLineF *lines, int lineCount) { int elementCount = lineCount << 1; while (elementCount > 0) { int count = qMin(elementCount, 32); QVectorPath path((const qreal *) lines, count, qpaintengineex_line_types_16, QVectorPath::LinesHint); stroke(path, state()->pen); elementCount -= 32; lines += 16; } }
0
299,971
njs_vm_start(njs_vm_t *vm) { njs_int_t ret; ret = njs_module_load(vm); if (njs_slow_path(ret != NJS_OK)) { return ret; } ret = njs_vmcode_interpreter(vm, vm->start, NULL, NULL); return (ret == NJS_ERROR) ? NJS_ERROR : NJS_OK; }
0
519,857
void begin_dataset() {}
0
74,565
bgp_print(netdissect_options *ndo, const u_char *dat, int length) { const u_char *p; const u_char *ep; const u_char *start; const u_char marker[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, }; struct bgp bgp; uint16_t hlen; ep = dat + length; if (ndo->ndo_snapend < dat + length) ep = ndo->ndo_snapend; ND_PRINT((ndo, ": BGP")); if (ndo->ndo_vflag < 1) /* lets be less chatty */ return; p = dat; start = p; while (p < ep) { if (!ND_TTEST2(p[0], 1)) break; if (p[0] != 0xff) { p++; continue; } if (!ND_TTEST2(p[0], sizeof(marker))) break; if (memcmp(p, marker, sizeof(marker)) != 0) { p++; continue; } /* found BGP header */ ND_TCHECK2(p[0], BGP_SIZE); /*XXX*/ memcpy(&bgp, p, BGP_SIZE); if (start != p) ND_PRINT((ndo, " [|BGP]")); hlen = ntohs(bgp.bgp_len); if (hlen < BGP_SIZE) { ND_PRINT((ndo, "\n[|BGP Bogus header length %u < %u]", hlen, BGP_SIZE)); break; } if (ND_TTEST2(p[0], hlen)) { if (!bgp_header_print(ndo, p, hlen)) return; p += hlen; start = p; } else { ND_PRINT((ndo, "\n[|BGP %s]", tok2str(bgp_msg_values, "Unknown Message Type", bgp.bgp_type))); break; } } return; trunc: ND_PRINT((ndo, " [|BGP]")); }
0
22,512
void proto_reg_handoff_zbee_zcl_scenes ( void ) { dissector_handle_t scenes_handle ; scenes_handle = find_dissector ( ZBEE_PROTOABBREV_ZCL_SCENES ) ; dissector_add_uint ( "zbee.zcl.cluster" , ZBEE_ZCL_CID_SCENES , scenes_handle ) ; zbee_zcl_init_cluster ( proto_zbee_zcl_scenes , ett_zbee_zcl_scenes , ZBEE_ZCL_CID_SCENES , hf_zbee_zcl_scenes_attr_id , hf_zbee_zcl_scenes_srv_rx_cmd_id , hf_zbee_zcl_scenes_srv_tx_cmd_id , ( zbee_zcl_fn_attr_data ) dissect_zcl_scenes_attr_data ) ; }
0
346,036
receive( struct recvbuf *rbufp ) { register struct peer *peer; /* peer structure pointer */ register struct pkt *pkt; /* receive packet pointer */ u_char hisversion; /* packet version */ u_char hisleap; /* packet leap indicator */ u_char hismode; /* packet mode */ u_char hisstratum; /* packet stratum */ u_short restrict_mask; /* restrict bits */ const char *hm_str; /* hismode string */ const char *am_str; /* association match string */ int kissCode = NOKISS; /* Kiss Code */ int has_mac; /* length of MAC field */ int authlen; /* offset of MAC field */ int is_authentic = 0; /* cryptosum ok */ int retcode = AM_NOMATCH; /* match code */ keyid_t skeyid = 0; /* key IDs */ u_int32 opcode = 0; /* extension field opcode */ sockaddr_u *dstadr_sin; /* active runway */ struct peer *peer2; /* aux peer structure pointer */ endpt *match_ep; /* newpeer() local address */ l_fp p_org; /* origin timestamp */ l_fp p_rec; /* receive timestamp */ l_fp p_xmt; /* transmit timestamp */ #ifdef AUTOKEY char hostname[NTP_MAXSTRLEN + 1]; char *groupname = NULL; struct autokey *ap; /* autokey structure pointer */ int rval; /* cookie snatcher */ keyid_t pkeyid = 0, tkeyid = 0; /* key IDs */ #endif /* AUTOKEY */ #ifdef HAVE_NTP_SIGND static unsigned char zero_key[16]; #endif /* HAVE_NTP_SIGND */ /* * Monitor the packet and get restrictions. Note that the packet * length for control and private mode packets must be checked * by the service routines. Some restrictions have to be handled * later in order to generate a kiss-o'-death packet. */ /* * Bogus port check is before anything, since it probably * reveals a clogging attack. */ sys_received++; if (0 == SRCPORT(&rbufp->recv_srcadr)) { sys_badlength++; return; /* bogus port */ } restrict_mask = restrictions(&rbufp->recv_srcadr); pkt = &rbufp->recv_pkt; DPRINTF(2, ("receive: at %ld %s<-%s flags %x restrict %03x org %#010x.%08x xmt %#010x.%08x\n", current_time, stoa(&rbufp->dstadr->sin), stoa(&rbufp->recv_srcadr), rbufp->dstadr->flags, restrict_mask, ntohl(pkt->org.l_ui), ntohl(pkt->org.l_uf), ntohl(pkt->xmt.l_ui), ntohl(pkt->xmt.l_uf))); hisversion = PKT_VERSION(pkt->li_vn_mode); hisleap = PKT_LEAP(pkt->li_vn_mode); hismode = (int)PKT_MODE(pkt->li_vn_mode); hisstratum = PKT_TO_STRATUM(pkt->stratum); if (restrict_mask & RES_IGNORE) { sys_restricted++; return; /* ignore everything */ } if (hismode == MODE_PRIVATE) { if (!ntp_mode7 || (restrict_mask & RES_NOQUERY)) { sys_restricted++; return; /* no query private */ } process_private(rbufp, ((restrict_mask & RES_NOMODIFY) == 0)); return; } if (hismode == MODE_CONTROL) { if (restrict_mask & RES_NOQUERY) { sys_restricted++; return; /* no query control */ } process_control(rbufp, restrict_mask); return; } if (restrict_mask & RES_DONTSERVE) { sys_restricted++; return; /* no time serve */ } /* * This is for testing. If restricted drop ten percent of * surviving packets. */ if (restrict_mask & RES_FLAKE) { if ((double)ntp_random() / 0x7fffffff < .1) { sys_restricted++; return; /* no flakeway */ } } /* * Version check must be after the query packets, since they * intentionally use an early version. */ if (hisversion == NTP_VERSION) { sys_newversion++; /* new version */ } else if ( !(restrict_mask & RES_VERSION) && hisversion >= NTP_OLDVERSION) { sys_oldversion++; /* previous version */ } else { sys_badlength++; return; /* old version */ } /* * Figure out his mode and validate the packet. This has some * legacy raunch that probably should be removed. In very early * NTP versions mode 0 was equivalent to what later versions * would interpret as client mode. */ if (hismode == MODE_UNSPEC) { if (hisversion == NTP_OLDVERSION) { hismode = MODE_CLIENT; } else { sys_badlength++; return; /* invalid mode */ } } /* * Parse the extension field if present. We figure out whether * an extension field is present by measuring the MAC size. If * the number of words following the packet header is 0, no MAC * is present and the packet is not authenticated. If 1, the * packet is a crypto-NAK; if 3, the packet is authenticated * with DES; if 5, the packet is authenticated with MD5; if 6, * the packet is authenticated with SHA. If 2 or * 4, the packet * is a runt and discarded forthwith. If greater than 6, an * extension field is present, so we subtract the length of the * field and go around again. */ authlen = LEN_PKT_NOMAC; has_mac = rbufp->recv_length - authlen; while (has_mac > 0) { u_int32 len; #ifdef AUTOKEY u_int32 hostlen; struct exten *ep; #endif /*AUTOKEY */ if (has_mac % 4 != 0 || has_mac < (int)MIN_MAC_LEN) { sys_badlength++; return; /* bad length */ } if (has_mac <= (int)MAX_MAC_LEN) { skeyid = ntohl(((u_int32 *)pkt)[authlen / 4]); break; } else { opcode = ntohl(((u_int32 *)pkt)[authlen / 4]); len = opcode & 0xffff; if ( len % 4 != 0 || len < 4 || (int)len + authlen > rbufp->recv_length) { sys_badlength++; return; /* bad length */ } #ifdef AUTOKEY /* * Extract calling group name for later. If * sys_groupname is non-NULL, there must be * a group name provided to elicit a response. */ if ( (opcode & 0x3fff0000) == CRYPTO_ASSOC && sys_groupname != NULL) { ep = (struct exten *)&((u_int32 *)pkt)[authlen / 4]; hostlen = ntohl(ep->vallen); if ( hostlen >= sizeof(hostname) || hostlen > len - offsetof(struct exten, pkt)) { sys_badlength++; return; /* bad length */ } memcpy(hostname, &ep->pkt, hostlen); hostname[hostlen] = '\0'; groupname = strchr(hostname, '@'); if (groupname == NULL) { sys_declined++; return; } groupname++; } #endif /* AUTOKEY */ authlen += len; has_mac -= len; } } /* * If has_mac is < 0 we had a malformed packet. */ if (has_mac < 0) { sys_badlength++; return; /* bad length */ } /* * If authentication required, a MAC must be present. */ if (restrict_mask & RES_DONTTRUST && has_mac == 0) { sys_restricted++; return; /* access denied */ } /* * Update the MRU list and finger the cloggers. It can be a * little expensive, so turn it off for production use. * RES_LIMITED and RES_KOD will be cleared in the returned * restrict_mask unless one or both actions are warranted. */ restrict_mask = ntp_monitor(rbufp, restrict_mask); if (restrict_mask & RES_LIMITED) { sys_limitrejected++; if ( !(restrict_mask & RES_KOD) || MODE_BROADCAST == hismode || MODE_SERVER == hismode) { if (MODE_SERVER == hismode) DPRINTF(1, ("Possibly self-induced rate limiting of MODE_SERVER from %s\n", stoa(&rbufp->recv_srcadr))); return; /* rate exceeded */ } if (hismode == MODE_CLIENT) fast_xmit(rbufp, MODE_SERVER, skeyid, restrict_mask); else fast_xmit(rbufp, MODE_ACTIVE, skeyid, restrict_mask); return; /* rate exceeded */ } restrict_mask &= ~RES_KOD; /* * We have tossed out as many buggy packets as possible early in * the game to reduce the exposure to a clogging attack. Now we * have to burn some cycles to find the association and * authenticate the packet if required. Note that we burn only * digest cycles, again to reduce exposure. There may be no * matching association and that's okay. * * More on the autokey mambo. Normally the local interface is * found when the association was mobilized with respect to a * designated remote address. We assume packets arriving from * the remote address arrive via this interface and the local * address used to construct the autokey is the unicast address * of the interface. However, if the sender is a broadcaster, * the interface broadcast address is used instead. * Notwithstanding this technobabble, if the sender is a * multicaster, the broadcast address is null, so we use the * unicast address anyway. Don't ask. */ peer = findpeer(rbufp, hismode, &retcode); dstadr_sin = &rbufp->dstadr->sin; NTOHL_FP(&pkt->org, &p_org); NTOHL_FP(&pkt->rec, &p_rec); NTOHL_FP(&pkt->xmt, &p_xmt); hm_str = modetoa(hismode); am_str = amtoa(retcode); /* * Authentication is conditioned by three switches: * * NOPEER (RES_NOPEER) do not mobilize an association unless * authenticated * NOTRUST (RES_DONTTRUST) do not allow access unless * authenticated (implies NOPEER) * enable (sys_authenticate) master NOPEER switch, by default * on * * The NOPEER and NOTRUST can be specified on a per-client basis * using the restrict command. The enable switch if on implies * NOPEER for all clients. There are four outcomes: * * NONE The packet has no MAC. * OK the packet has a MAC and authentication succeeds * ERROR the packet has a MAC and authentication fails * CRYPTO crypto-NAK. The MAC has four octets only. * * Note: The AUTH(x, y) macro is used to filter outcomes. If x * is zero, acceptable outcomes of y are NONE and OK. If x is * one, the only acceptable outcome of y is OK. */ if (has_mac == 0) { restrict_mask &= ~RES_MSSNTP; is_authentic = AUTH_NONE; /* not required */ DPRINTF(2, ("receive: at %ld %s<-%s mode %d/%s:%s len %d org %#010x.%08x xmt %#010x.%08x NOMAC\n", current_time, stoa(dstadr_sin), stoa(&rbufp->recv_srcadr), hismode, hm_str, am_str, authlen, ntohl(pkt->org.l_ui), ntohl(pkt->org.l_uf), ntohl(pkt->xmt.l_ui), ntohl(pkt->xmt.l_uf))); } else if (has_mac == 4) { restrict_mask &= ~RES_MSSNTP; is_authentic = AUTH_CRYPTO; /* crypto-NAK */ DPRINTF(2, ("receive: at %ld %s<-%s mode %d/%s:%s keyid %08x len %d auth %d org %#010x.%08x xmt %#010x.%08x MAC4\n", current_time, stoa(dstadr_sin), stoa(&rbufp->recv_srcadr), hismode, hm_str, am_str, skeyid, authlen + has_mac, is_authentic, ntohl(pkt->org.l_ui), ntohl(pkt->org.l_uf), ntohl(pkt->xmt.l_ui), ntohl(pkt->xmt.l_uf))); #ifdef HAVE_NTP_SIGND /* * If the signature is 20 bytes long, the last 16 of * which are zero, then this is a Microsoft client * wanting AD-style authentication of the server's * reply. * * This is described in Microsoft's WSPP docs, in MS-SNTP: * http://msdn.microsoft.com/en-us/library/cc212930.aspx */ } else if ( has_mac == MAX_MD5_LEN && (restrict_mask & RES_MSSNTP) && (retcode == AM_FXMIT || retcode == AM_NEWPASS) && (memcmp(zero_key, (char *)pkt + authlen + 4, MAX_MD5_LEN - 4) == 0)) { is_authentic = AUTH_NONE; #endif /* HAVE_NTP_SIGND */ } else { restrict_mask &= ~RES_MSSNTP; #ifdef AUTOKEY /* * For autokey modes, generate the session key * and install in the key cache. Use the socket * broadcast or unicast address as appropriate. */ if (crypto_flags && skeyid > NTP_MAXKEY) { /* * More on the autokey dance (AKD). A cookie is * constructed from public and private values. * For broadcast packets, the cookie is public * (zero). For packets that match no * association, the cookie is hashed from the * addresses and private value. For server * packets, the cookie was previously obtained * from the server. For symmetric modes, the * cookie was previously constructed using an * agreement protocol; however, should PKI be * unavailable, we construct a fake agreement as * the EXOR of the peer and host cookies. * * hismode ephemeral persistent * ======================================= * active 0 cookie# * passive 0% cookie# * client sys cookie 0% * server 0% sys cookie * broadcast 0 0 * * # if unsync, 0 * % can't happen */ if (has_mac < (int)MAX_MD5_LEN) { sys_badauth++; return; } if (hismode == MODE_BROADCAST) { /* * For broadcaster, use the interface * broadcast address when available; * otherwise, use the unicast address * found when the association was * mobilized. However, if this is from * the wildcard interface, game over. */ if ( crypto_flags && rbufp->dstadr == ANY_INTERFACE_CHOOSE(&rbufp->recv_srcadr)) { sys_restricted++; return; /* no wildcard */ } pkeyid = 0; if (!SOCK_UNSPEC(&rbufp->dstadr->bcast)) dstadr_sin = &rbufp->dstadr->bcast; } else if (peer == NULL) { pkeyid = session_key( &rbufp->recv_srcadr, dstadr_sin, 0, sys_private, 0); } else { pkeyid = peer->pcookie; } /* * The session key includes both the public * values and cookie. In case of an extension * field, the cookie used for authentication * purposes is zero. Note the hash is saved for * use later in the autokey mambo. */ if (authlen > (int)LEN_PKT_NOMAC && pkeyid != 0) { session_key(&rbufp->recv_srcadr, dstadr_sin, skeyid, 0, 2); tkeyid = session_key( &rbufp->recv_srcadr, dstadr_sin, skeyid, pkeyid, 0); } else { tkeyid = session_key( &rbufp->recv_srcadr, dstadr_sin, skeyid, pkeyid, 2); } } #endif /* AUTOKEY */ /* * Compute the cryptosum. Note a clogging attack may * succeed in bloating the key cache. If an autokey, * purge it immediately, since we won't be needing it * again. If the packet is authentic, it can mobilize an * association. Note that there is no key zero. */ if (!authdecrypt(skeyid, (u_int32 *)pkt, authlen, has_mac)) is_authentic = AUTH_ERROR; else is_authentic = AUTH_OK; #ifdef AUTOKEY if (crypto_flags && skeyid > NTP_MAXKEY) authtrust(skeyid, 0); #endif /* AUTOKEY */ DPRINTF(2, ("receive: at %ld %s<-%s mode %d/%s:%s keyid %08x len %d auth %d org %#010x.%08x xmt %#010x.%08x\n", current_time, stoa(dstadr_sin), stoa(&rbufp->recv_srcadr), hismode, hm_str, am_str, skeyid, authlen + has_mac, is_authentic, ntohl(pkt->org.l_ui), ntohl(pkt->org.l_uf), ntohl(pkt->xmt.l_ui), ntohl(pkt->xmt.l_uf))); } /* * The association matching rules are implemented by a set of * routines and an association table. A packet matching an * association is processed by the peer process for that * association. If there are no errors, an ephemeral association * is mobilized: a broadcast packet mobilizes a broadcast client * aassociation; a manycast server packet mobilizes a manycast * client association; a symmetric active packet mobilizes a * symmetric passive association. */ switch (retcode) { /* * This is a client mode packet not matching any association. If * an ordinary client, simply toss a server mode packet back * over the fence. If a manycast client, we have to work a * little harder. */ case AM_FXMIT: /* * If authentication OK, send a server reply; otherwise, * send a crypto-NAK. */ if (!(rbufp->dstadr->flags & INT_MCASTOPEN)) { if (AUTH(restrict_mask & RES_DONTTRUST, is_authentic)) { fast_xmit(rbufp, MODE_SERVER, skeyid, restrict_mask); } else if (is_authentic == AUTH_ERROR) { fast_xmit(rbufp, MODE_SERVER, 0, restrict_mask); sys_badauth++; } else { sys_restricted++; } return; /* hooray */ } /* * This must be manycast. Do not respond if not * configured as a manycast server. */ if (!sys_manycastserver) { sys_restricted++; return; /* not enabled */ } #ifdef AUTOKEY /* * Do not respond if not the same group. */ if (group_test(groupname, NULL)) { sys_declined++; return; } #endif /* AUTOKEY */ /* * Do not respond if we are not synchronized or our * stratum is greater than the manycaster or the * manycaster has already synchronized to us. */ if ( sys_leap == LEAP_NOTINSYNC || sys_stratum >= hisstratum || (!sys_cohort && sys_stratum == hisstratum + 1) || rbufp->dstadr->addr_refid == pkt->refid) { sys_declined++; return; /* no help */ } /* * Respond only if authentication succeeds. Don't do a * crypto-NAK, as that would not be useful. */ if (AUTH(restrict_mask & RES_DONTTRUST, is_authentic)) fast_xmit(rbufp, MODE_SERVER, skeyid, restrict_mask); return; /* hooray */ /* * This is a server mode packet returned in response to a client * mode packet sent to a multicast group address (for * manycastclient) or to a unicast address (for pool). The * origin timestamp is a good nonce to reliably associate the * reply with what was sent. If there is no match, that's * curious and could be an intruder attempting to clog, so we * just ignore it. * * If the packet is authentic and the manycastclient or pool * association is found, we mobilize a client association and * copy pertinent variables from the manycastclient or pool * association to the new client association. If not, just * ignore the packet. * * There is an implosion hazard at the manycast client, since * the manycast servers send the server packet immediately. If * the guy is already here, don't fire up a duplicate. */ case AM_MANYCAST: #ifdef AUTOKEY /* * Do not respond if not the same group. */ if (group_test(groupname, NULL)) { sys_declined++; return; } #endif /* AUTOKEY */ if ((peer2 = findmanycastpeer(rbufp)) == NULL) { sys_restricted++; return; /* not enabled */ } if (!AUTH( (!(peer2->cast_flags & MDF_POOL) && sys_authenticate) || (restrict_mask & (RES_NOPEER | RES_DONTTRUST)), is_authentic)) { sys_restricted++; return; /* access denied */ } /* * Do not respond if unsynchronized or stratum is below * the floor or at or above the ceiling. */ if ( hisleap == LEAP_NOTINSYNC || hisstratum < sys_floor || hisstratum >= sys_ceiling) { sys_declined++; return; /* no help */ } peer = newpeer(&rbufp->recv_srcadr, NULL, rbufp->dstadr, MODE_CLIENT, hisversion, peer2->minpoll, peer2->maxpoll, FLAG_PREEMPT | (FLAG_IBURST & peer2->flags), MDF_UCAST | MDF_UCLNT, 0, skeyid, sys_ident); if (NULL == peer) { sys_declined++; return; /* ignore duplicate */ } /* * After each ephemeral pool association is spun, * accelerate the next poll for the pool solicitor so * the pool will fill promptly. */ if (peer2->cast_flags & MDF_POOL) peer2->nextdate = current_time + 1; /* * Further processing of the solicitation response would * simply detect its origin timestamp as bogus for the * brand-new association (it matches the prototype * association) and tinker with peer->nextdate delaying * first sync. */ return; /* solicitation response handled */ /* * This is the first packet received from a broadcast server. If * the packet is authentic and we are enabled as broadcast * client, mobilize a broadcast client association. We don't * kiss any frogs here. */ case AM_NEWBCL: #ifdef AUTOKEY /* * Do not respond if not the same group. */ if (group_test(groupname, sys_ident)) { sys_declined++; return; } #endif /* AUTOKEY */ if (sys_bclient == 0) { sys_restricted++; return; /* not enabled */ } if (!AUTH(sys_authenticate | (restrict_mask & (RES_NOPEER | RES_DONTTRUST)), is_authentic)) { sys_restricted++; return; /* access denied */ } /* * Do not respond if unsynchronized or stratum is below * the floor or at or above the ceiling. */ if ( hisleap == LEAP_NOTINSYNC || hisstratum < sys_floor || hisstratum >= sys_ceiling) { sys_declined++; return; /* no help */ } #ifdef AUTOKEY /* * Do not respond if Autokey and the opcode is not a * CRYPTO_ASSOC response with association ID. */ if ( crypto_flags && skeyid > NTP_MAXKEY && (opcode & 0xffff0000) != (CRYPTO_ASSOC | CRYPTO_RESP)) { sys_declined++; return; /* protocol error */ } #endif /* AUTOKEY */ /* * Broadcasts received via a multicast address may * arrive after a unicast volley has begun * with the same remote address. newpeer() will not * find duplicate associations on other local endpoints * if a non-NULL endpoint is supplied. multicastclient * ephemeral associations are unique across all local * endpoints. */ if (!(INT_MCASTOPEN & rbufp->dstadr->flags)) match_ep = rbufp->dstadr; else match_ep = NULL; /* * Determine whether to execute the initial volley. */ if (sys_bdelay != 0) { #ifdef AUTOKEY /* * If a two-way exchange is not possible, * neither is Autokey. */ if (crypto_flags && skeyid > NTP_MAXKEY) { sys_restricted++; return; /* no autokey */ } #endif /* AUTOKEY */ /* * Do not execute the volley. Start out in * broadcast client mode. */ peer = newpeer(&rbufp->recv_srcadr, NULL, match_ep, MODE_BCLIENT, hisversion, pkt->ppoll, pkt->ppoll, FLAG_PREEMPT, MDF_BCLNT, 0, skeyid, sys_ident); if (NULL == peer) { sys_restricted++; return; /* ignore duplicate */ } else { peer->delay = sys_bdelay; peer->bxmt = p_xmt; } break; } /* * Execute the initial volley in order to calibrate the * propagation delay and run the Autokey protocol. * * Note that the minpoll is taken from the broadcast * packet, normally 6 (64 s) and that the poll interval * is fixed at this value. */ peer = newpeer(&rbufp->recv_srcadr, NULL, match_ep, MODE_CLIENT, hisversion, pkt->ppoll, pkt->ppoll, FLAG_BC_VOL | FLAG_IBURST | FLAG_PREEMPT, MDF_BCLNT, 0, skeyid, sys_ident); if (NULL == peer) { sys_restricted++; return; /* ignore duplicate */ } peer->bxmt = p_xmt; #ifdef AUTOKEY if (skeyid > NTP_MAXKEY) crypto_recv(peer, rbufp); #endif /* AUTOKEY */ return; /* hooray */ /* * This is the first packet received from a symmetric active * peer. If the packet is authentic and the first he sent, * mobilize a passive association. If not, kiss the frog. */ case AM_NEWPASS: #ifdef AUTOKEY /* * Do not respond if not the same group. */ if (group_test(groupname, sys_ident)) { sys_declined++; return; } #endif /* AUTOKEY */ if (!AUTH(sys_authenticate | (restrict_mask & (RES_NOPEER | RES_DONTTRUST)), is_authentic)) { /* * If authenticated but cannot mobilize an * association, send a symmetric passive * response without mobilizing an association. * This is for drat broken Windows clients. See * Microsoft KB 875424 for preferred workaround. */ if (AUTH(restrict_mask & RES_DONTTRUST, is_authentic)) { fast_xmit(rbufp, MODE_PASSIVE, skeyid, restrict_mask); return; /* hooray */ } if (is_authentic == AUTH_ERROR) { fast_xmit(rbufp, MODE_ACTIVE, 0, restrict_mask); sys_restricted++; return; } /* [Bug 2941] * If we got here, the packet isn't part of an * existing association, it isn't correctly * authenticated, and it didn't meet either of * the previous two special cases so we should * just drop it on the floor. For example, * crypto-NAKs (is_authentic == AUTH_CRYPTO) * will make it this far. This is just * debug-printed and not logged to avoid log * flooding. */ DPRINTF(2, ("receive: at %ld refusing to mobilize passive association" " with unknown peer %s mode %d/%s:%s keyid %08x len %d auth %d\n", current_time, stoa(&rbufp->recv_srcadr), hismode, hm_str, am_str, skeyid, (authlen + has_mac), is_authentic)); sys_declined++; return; } /* * Do not respond if synchronized and if stratum is * below the floor or at or above the ceiling. Note, * this allows an unsynchronized peer to synchronize to * us. It would be very strange if he did and then was * nipped, but that could only happen if we were * operating at the top end of the range. It also means * we will spin an ephemeral association in response to * MODE_ACTIVE KoDs, which will time out eventually. */ if ( hisleap != LEAP_NOTINSYNC && (hisstratum < sys_floor || hisstratum >= sys_ceiling)) { sys_declined++; return; /* no help */ } /* * The message is correctly authenticated and allowed. * Mobilize a symmetric passive association. */ if ((peer = newpeer(&rbufp->recv_srcadr, NULL, rbufp->dstadr, MODE_PASSIVE, hisversion, pkt->ppoll, NTP_MAXDPOLL, 0, MDF_UCAST, 0, skeyid, sys_ident)) == NULL) { sys_declined++; return; /* ignore duplicate */ } break; /* * Process regular packet. Nothing special. */ case AM_PROCPKT: #ifdef AUTOKEY /* * Do not respond if not the same group. */ if (group_test(groupname, peer->ident)) { sys_declined++; return; } #endif /* AUTOKEY */ if (MODE_BROADCAST == hismode) { u_char poll; int bail = 0; DPRINTF(2, ("receive: PROCPKT/BROADCAST: prev pkt %ld seconds ago, ppoll: %d, %d secs\n", (current_time - peer->timelastrec), peer->ppoll, (1 << peer->ppoll) )); /* Things we can check: * * Did the poll interval change? * Is the poll interval in the packet in-range? * Did this packet arrive too soon? * Is the timestamp in this packet monotonic * with respect to the previous packet? */ /* This is noteworthy, not error-worthy */ if (pkt->ppoll != peer->ppoll) { msyslog(LOG_INFO, "receive: broadcast poll from %s changed from %ud to %ud", stoa(&rbufp->recv_srcadr), peer->ppoll, pkt->ppoll); } poll = min(peer->maxpoll, max(peer->minpoll, pkt->ppoll)); /* This is error-worthy */ if (pkt->ppoll != poll) { msyslog(LOG_INFO, "receive: broadcast poll of %ud from %s is out-of-range (%d to %d)!", pkt->ppoll, stoa(&rbufp->recv_srcadr), peer->minpoll, peer->maxpoll); ++bail; } if ( (current_time - peer->timelastrec) < (1 << pkt->ppoll)) { msyslog(LOG_INFO, "receive: broadcast packet from %s arrived after %ld, not %d seconds!", stoa(&rbufp->recv_srcadr), (current_time - peer->timelastrec), (1 << pkt->ppoll) ); ++bail; } if (L_ISGT(&peer->bxmt, &p_xmt)) { msyslog(LOG_INFO, "receive: broadcast packet from %s contains non-monotonic timestamp: %#010x.%08x -> %#010x.%08x", stoa(&rbufp->recv_srcadr), peer->bxmt.l_ui, peer->bxmt.l_uf, p_xmt.l_ui, p_xmt.l_uf ); ++bail; } peer->bxmt = p_xmt; if (bail) { peer->timelastrec = current_time; sys_declined++; return; } } break; /* * A passive packet matches a passive association. This is * usually the result of reconfiguring a client on the fly. As * this association might be legitimate and this packet an * attempt to deny service, just ignore it. */ case AM_ERR: sys_declined++; return; /* * For everything else there is the bit bucket. */ default: sys_declined++; return; } #ifdef AUTOKEY /* * If the association is configured for Autokey, the packet must * have a public key ID; if not, the packet must have a * symmetric key ID. */ if ( is_authentic != AUTH_CRYPTO && ( ((peer->flags & FLAG_SKEY) && skeyid <= NTP_MAXKEY) || (!(peer->flags & FLAG_SKEY) && skeyid > NTP_MAXKEY))) { sys_badauth++; return; } #endif /* AUTOKEY */ peer->received++; peer->flash &= ~PKT_TEST_MASK; if (peer->flags & FLAG_XBOGUS) { peer->flags &= ~FLAG_XBOGUS; peer->flash |= TEST3; } /* * Next comes a rigorous schedule of timestamp checking. If the * transmit timestamp is zero, the server has not initialized in * interleaved modes or is horribly broken. */ if (L_ISZERO(&p_xmt)) { peer->flash |= TEST3; /* unsynch */ /* * If the transmit timestamp duplicates a previous one, the * packet is a replay. This prevents the bad guys from replaying * the most recent packet, authenticated or not. */ } else if (L_ISEQU(&peer->xmt, &p_xmt)) { peer->flash |= TEST1; /* duplicate */ peer->oldpkt++; return; /* * If this is a broadcast mode packet, skip further checking. If * an initial volley, bail out now and let the client do its * stuff. If the origin timestamp is nonzero, this is an * interleaved broadcast. so restart the protocol. */ } else if (hismode == MODE_BROADCAST) { if (!L_ISZERO(&p_org) && !(peer->flags & FLAG_XB)) { peer->flags |= FLAG_XB; peer->aorg = p_xmt; peer->borg = rbufp->recv_time; report_event(PEVNT_XLEAVE, peer, NULL); return; } /* * Basic mode checks: * * If there is no origin timestamp, it's an initial packet. * * Otherwise, check for bogus packet in basic mode. * If it is bogus, switch to interleaved mode and resynchronize, * but only after confirming the packet is not bogus in * symmetric interleaved mode. * * This could also mean somebody is forging packets claiming to * be from us, attempting to cause our server to KoD us. */ } else if (peer->flip == 0) { if (0 < hisstratum && L_ISZERO(&p_org)) { L_CLR(&peer->aorg); } else if (!L_ISEQU(&p_org, &peer->aorg)) { peer->bogusorg++; peer->flash |= TEST2; /* bogus */ msyslog(LOG_INFO, "receive: Unexpected origin timestamp %#010x.%08x from %s xmt %#010x.%08x", ntohl(pkt->org.l_ui), ntohl(pkt->org.l_uf), ntoa(&peer->srcadr), ntohl(pkt->xmt.l_ui), ntohl(pkt->xmt.l_uf)); if ( !L_ISZERO(&peer->dst) && L_ISEQU(&p_org, &peer->dst)) { /* Might be the start of an interleave */ peer->flip = 1; report_event(PEVNT_XLEAVE, peer, NULL); } return; /* Bogus or possible interleave packet */ } else { L_CLR(&peer->aorg); } /* * Check for valid nonzero timestamp fields. */ } else if (L_ISZERO(&p_org) || L_ISZERO(&p_rec) || L_ISZERO(&peer->dst)) { peer->flash |= TEST3; /* unsynch */ /* * Check for bogus packet in interleaved symmetric mode. This * can happen if a packet is lost, duplicated or crossed. If * found, flip and resynchronize. */ } else if ( !L_ISZERO(&peer->dst) && !L_ISEQU(&p_org, &peer->dst)) { peer->bogusorg++; peer->flags |= FLAG_XBOGUS; peer->flash |= TEST2; /* bogus */ return; /* Bogus packet, we are done */ } /* * If this is a crypto_NAK, the server cannot authenticate a * client packet. The server might have just changed keys. Clear * the association and restart the protocol. */ if (is_authentic == AUTH_CRYPTO) { report_event(PEVNT_AUTH, peer, "crypto_NAK"); peer->flash |= TEST5; /* bad auth */ peer->badauth++; if (peer->flags & FLAG_PREEMPT) { unpeer(peer); return; } #ifdef AUTOKEY if (peer->crypto) peer_clear(peer, "AUTH"); #endif /* AUTOKEY */ return; /* * If the digest fails or it's missing for authenticated * associations, the client cannot authenticate a server * reply to a client packet previously sent. The loopback check * is designed to avoid a bait-and-switch attack, which was * possible in past versions. If symmetric modes, return a * crypto-NAK. The peer should restart the protocol. */ } else if (!AUTH(peer->keyid || has_mac || (restrict_mask & RES_DONTTRUST), is_authentic)) { report_event(PEVNT_AUTH, peer, "digest"); peer->flash |= TEST5; /* bad auth */ peer->badauth++; if ( has_mac && (hismode == MODE_ACTIVE || hismode == MODE_PASSIVE)) fast_xmit(rbufp, MODE_ACTIVE, 0, restrict_mask); if (peer->flags & FLAG_PREEMPT) { unpeer(peer); return; } #ifdef AUTOKEY if (peer->crypto) peer_clear(peer, "AUTH"); #endif /* AUTOKEY */ return; } /* * Update the state variables. */ if (peer->flip == 0) { if (hismode != MODE_BROADCAST) peer->rec = p_xmt; peer->dst = rbufp->recv_time; } peer->xmt = p_xmt; /* * Set the peer ppoll to the maximum of the packet ppoll and the * peer minpoll. If a kiss-o'-death, set the peer minpoll to * this maximum and advance the headway to give the sender some * headroom. Very intricate. */ /* * Check for any kiss codes. Note this is only used when a server * responds to a packet request */ kissCode = kiss_code_check(hisleap, hisstratum, hismode, pkt->refid); /* * Check to see if this is a RATE Kiss Code * Currently this kiss code will accept whatever poll * rate that the server sends */ peer->ppoll = max(peer->minpoll, pkt->ppoll); if (kissCode == RATEKISS) { peer->selbroken++; /* Increment the KoD count */ report_event(PEVNT_RATE, peer, NULL); if (pkt->ppoll > peer->minpoll) peer->minpoll = peer->ppoll; peer->burst = peer->retry = 0; peer->throttle = (NTP_SHIFT + 1) * (1 << peer->minpoll); poll_update(peer, pkt->ppoll); return; /* kiss-o'-death */ } if (kissCode != NOKISS) { peer->selbroken++; /* Increment the KoD count */ return; /* Drop any other kiss code packets */ } /* * That was hard and I am sweaty, but the packet is squeaky * clean. Get on with real work. */ peer->timereceived = current_time; peer->timelastrec = current_time; if (is_authentic == AUTH_OK) peer->flags |= FLAG_AUTHENTIC; else peer->flags &= ~FLAG_AUTHENTIC; #ifdef AUTOKEY /* * More autokey dance. The rules of the cha-cha are as follows: * * 1. If there is no key or the key is not auto, do nothing. * * 2. If this packet is in response to the one just previously * sent or from a broadcast server, do the extension fields. * Otherwise, assume bogosity and bail out. * * 3. If an extension field contains a verified signature, it is * self-authenticated and we sit the dance. * * 4. If this is a server reply, check only to see that the * transmitted key ID matches the received key ID. * * 5. Check to see that one or more hashes of the current key ID * matches the previous key ID or ultimate original key ID * obtained from the broadcaster or symmetric peer. If no * match, sit the dance and call for new autokey values. * * In case of crypto error, fire the orchestra, stop dancing and * restart the protocol. */ if (peer->flags & FLAG_SKEY) { /* * Decrement remaining autokey hashes. This isn't * perfect if a packet is lost, but results in no harm. */ ap = (struct autokey *)peer->recval.ptr; if (ap != NULL) { if (ap->seq > 0) ap->seq--; } peer->flash |= TEST8; rval = crypto_recv(peer, rbufp); if (rval == XEVNT_OK) { peer->unreach = 0; } else { if (rval == XEVNT_ERR) { report_event(PEVNT_RESTART, peer, "crypto error"); peer_clear(peer, "CRYP"); peer->flash |= TEST9; /* bad crypt */ if (peer->flags & FLAG_PREEMPT) unpeer(peer); } return; } /* * If server mode, verify the receive key ID matches * the transmit key ID. */ if (hismode == MODE_SERVER) { if (skeyid == peer->keyid) peer->flash &= ~TEST8; /* * If an extension field is present, verify only that it * has been correctly signed. We don't need a sequence * check here, but the sequence continues. */ } else if (!(peer->flash & TEST8)) { peer->pkeyid = skeyid; /* * Now the fun part. Here, skeyid is the current ID in * the packet, pkeyid is the ID in the last packet and * tkeyid is the hash of skeyid. If the autokey values * have not been received, this is an automatic error. * If so, check that the tkeyid matches pkeyid. If not, * hash tkeyid and try again. If the number of hashes * exceeds the number remaining in the sequence, declare * a successful failure and refresh the autokey values. */ } else if (ap != NULL) { int i; for (i = 0; ; i++) { if ( tkeyid == peer->pkeyid || tkeyid == ap->key) { peer->flash &= ~TEST8; peer->pkeyid = skeyid; ap->seq -= i; break; } if (i > ap->seq) { peer->crypto &= ~CRYPTO_FLAG_AUTO; break; } tkeyid = session_key( &rbufp->recv_srcadr, dstadr_sin, tkeyid, pkeyid, 0); } if (peer->flash & TEST8) report_event(PEVNT_AUTH, peer, "keylist"); } if (!(peer->crypto & CRYPTO_FLAG_PROV)) /* test 9 */ peer->flash |= TEST8; /* bad autokey */ /* * The maximum lifetime of the protocol is about one * week before restarting the Autokey protocol to * refresh certificates and leapseconds values. */ if (current_time > peer->refresh) { report_event(PEVNT_RESTART, peer, "crypto refresh"); peer_clear(peer, "TIME"); return; } } #endif /* AUTOKEY */ /* * The dance is complete and the flash bits have been lit. Toss * the packet over the fence for processing, which may light up * more flashers. */ process_packet(peer, pkt, rbufp->recv_length); /* * In interleaved mode update the state variables. Also adjust the * transmit phase to avoid crossover. */ if (peer->flip != 0) { peer->rec = p_rec; peer->dst = rbufp->recv_time; if (peer->nextdate - current_time < (1U << min(peer->ppoll, peer->hpoll)) / 2) peer->nextdate++; else peer->nextdate--; } }
1
96,308
static int ieee80211_use_mfp(__le16 fc, struct sta_info *sta, struct sk_buff *skb) { if (!ieee80211_is_mgmt(fc)) return 0; if (sta == NULL || !test_sta_flag(sta, WLAN_STA_MFP)) return 0; if (!ieee80211_is_robust_mgmt_frame(skb)) return 0; return 1; }
0
31,116
static void writer_print_data ( WriterContext * wctx , const char * name , uint8_t * data , int size ) { AVBPrint bp ; int offset = 0 , l , i ; av_bprint_init ( & bp , 0 , AV_BPRINT_SIZE_UNLIMITED ) ; av_bprintf ( & bp , "\n" ) ; while ( size ) { av_bprintf ( & bp , "%08x: " , offset ) ; l = FFMIN ( size , 16 ) ; for ( i = 0 ; i < l ; i ++ ) { av_bprintf ( & bp , "%02x" , data [ i ] ) ; if ( i & 1 ) av_bprintf ( & bp , " " ) ; } av_bprint_chars ( & bp , ' ' , 41 - 2 * i - i / 2 ) ; for ( i = 0 ; i < l ; i ++ ) av_bprint_chars ( & bp , data [ i ] - 32U < 95 ? data [ i ] : '.' , 1 ) ; av_bprintf ( & bp , "\n" ) ; offset += l ; data += l ; size -= l ; } writer_print_string ( wctx , name , bp . str , 0 ) ; av_bprint_finalize ( & bp , NULL ) ; }
0
77,547
int cil_resolve_blockinherit_copy(struct cil_tree_node *current, void *extra_args) { struct cil_block *block = current->data; struct cil_args_resolve *args = extra_args; struct cil_db *db = NULL; struct cil_list_item *item = NULL; int rc = SEPOL_ERR; // This block is not inherited if (block->bi_nodes == NULL) { rc = SEPOL_OK; goto exit; } db = args->db; // Make sure this is the original block and not a merged block from a blockinherit if (current != block->datum.nodes->head->data) { rc = SEPOL_OK; goto exit; } cil_list_for_each(item, block->bi_nodes) { rc = cil_check_recursive_blockinherit(item->data); if (rc != SEPOL_OK) { goto exit; } rc = cil_copy_ast(db, current, item->data); if (rc != SEPOL_OK) { cil_log(CIL_ERR, "Failed to copy block contents into blockinherit\n"); goto exit; } } return SEPOL_OK; exit: return rc; }
0
398,847
gst_date_time_to_iso8601_string (GstDateTime * datetime) { g_return_val_if_fail (datetime != NULL, NULL); if (datetime->fields == GST_DATE_TIME_FIELDS_INVALID) return NULL; return __gst_date_time_serialize (datetime, FALSE); }
0
20,138
static int dissect_h245_Ind_clockRecovery ( tvbuff_t * tvb _U_ , int offset _U_ , asn1_ctx_t * actx _U_ , proto_tree * tree _U_ , int hf_index _U_ ) { offset = dissect_per_choice ( tvb , offset , actx , tree , hf_index , ett_h245_Ind_clockRecovery , Ind_clockRecovery_choice , NULL ) ; return offset ; }
0
341,357
char *g_strdup(const char *s) { char *dup; size_t i; if (!s) { return NULL; } __coverity_string_null_sink__(s); __coverity_string_size_sink__(s); dup = __coverity_alloc_nosize__(); __coverity_mark_as_afm_allocated__(dup, AFM_free); for (i = 0; (dup[i] = s[i]); i++) ; return dup; }
0
371,381
void ZrtpQueue::setSignSas(bool sasSignMode) { signSas = sasSignMode; }
0
148,780
void operator()(OpKernelContext* context, const Tensor& y_backprop, const Tensor& x, const Tensor& scale, const Tensor* offset, const Tensor& mean, const Tensor& inv_variance, const Tensor* y, U epsilon, FusedBatchNormActivationMode activation_mode, Tensor* x_backprop, Tensor* scale_backprop, Tensor* offset_backprop, Tensor* side_input_backprop, bool use_reserved_space, TensorFormat tensor_format) { auto* stream = context->op_device_context()->stream(); OP_REQUIRES(context, stream, errors::Internal("No GPU stream available")); const int64_t batch_size = GetTensorDim(x, tensor_format, 'N'); const int64_t channels = GetTensorDim(x, tensor_format, 'C'); const int64_t height = GetTensorDim(x, tensor_format, 'H'); const int64_t width = GetTensorDim(x, tensor_format, 'W'); #if GOOGLE_CUDA // Check if cuDNN batch normalization has a fast NHWC implementation: // (1) Tensorflow enabled batchnorm spatial persistence, and // FusedBatchNormGradV3 passed non-null reserve space and allocator. const bool fast_nhwc_batch_norm = BatchnormSpatialPersistentEnabled() && DataTypeToEnum<T>::value == DT_HALF && use_reserved_space; #else // fast NHWC implementation is a CUDA only feature const bool fast_nhwc_batch_norm = false; #endif // If input tensor is in NHWC format, and we have a fast cuDNN // implementation, there is no need to do data format conversion. TensorFormat compute_format = fast_nhwc_batch_norm && tensor_format == FORMAT_NHWC ? FORMAT_NHWC : FORMAT_NCHW; VLOG(2) << "FusedBatchNormGrad:" << " batch_size: " << batch_size << " channels: " << channels << " height: " << height << " width: " << width << " y_backprop shape: " << y_backprop.shape().DebugString() << " x shape: " << x.shape().DebugString() << " scale shape: " << scale.shape().DebugString() << " activation mode: " << ToString(activation_mode) << " tensor format: " << ToString(tensor_format) << " compute format: " << ToString(compute_format); // Inputs Tensor y_backprop_maybe_transformed = y_backprop; Tensor x_maybe_transformed = x; Tensor y_backprop_transformed; Tensor x_transformed; // Outputs Tensor x_backprop_transformed; se::DeviceMemory<T> x_backprop_ptr; if (tensor_format == compute_format) { x_backprop_ptr = StreamExecutorUtil::AsDeviceMemory<T>(*x_backprop); } else if (tensor_format == FORMAT_NHWC && compute_format == FORMAT_NCHW) { // Transform inputs from 'NHWC' to 'NCHW' OP_REQUIRES_OK(context, context->allocate_temp( DataTypeToEnum<T>::value, ShapeFromFormat(FORMAT_NCHW, batch_size, height, width, channels), &y_backprop_transformed)); functor::NHWCToNCHW<GPUDevice, T, 4>()( context->eigen_device<GPUDevice>(), const_cast<const Tensor&>(y_backprop_maybe_transformed) .tensor<T, 4>(), y_backprop_transformed.tensor<T, 4>()); y_backprop_maybe_transformed = y_backprop_transformed; OP_REQUIRES_OK(context, context->allocate_temp( DataTypeToEnum<T>::value, ShapeFromFormat(FORMAT_NCHW, batch_size, height, width, channels), &x_transformed)); functor::NHWCToNCHW<GPUDevice, T, 4>()( context->eigen_device<GPUDevice>(), const_cast<const Tensor&>(x_maybe_transformed).tensor<T, 4>(), x_transformed.tensor<T, 4>()); x_maybe_transformed = x_transformed; // Allocate memory for transformed outputs in 'NCHW' OP_REQUIRES_OK(context, context->allocate_temp( DataTypeToEnum<T>::value, ShapeFromFormat(FORMAT_NCHW, batch_size, height, width, channels), &x_backprop_transformed)); x_backprop_ptr = StreamExecutorUtil::AsDeviceMemory<T>(x_backprop_transformed); } else { context->SetStatus(errors::Internal( "Unsupported tensor format: ", ToString(tensor_format), " and compute format: ", ToString(compute_format))); return; } const se::dnn::DataLayout data_layout = compute_format == FORMAT_NHWC ? se::dnn::DataLayout::kBatchYXDepth : se::dnn::DataLayout::kBatchDepthYX; se::dnn::BatchDescriptor x_desc; x_desc.set_count(batch_size) .set_feature_map_count(channels) .set_height(height) .set_width(width) .set_layout(data_layout); se::dnn::BatchDescriptor scale_offset_desc; scale_offset_desc.set_count(1) .set_feature_map_count(channels) .set_height(1) .set_width(1) .set_layout(se::dnn::DataLayout::kBatchDepthYX); auto y_backprop_ptr = StreamExecutorUtil::AsDeviceMemory<T>(y_backprop_maybe_transformed); auto x_ptr = StreamExecutorUtil::AsDeviceMemory<T>(x_maybe_transformed); auto scale_ptr = StreamExecutorUtil::AsDeviceMemory<U>(scale); auto offset_ptr = offset != nullptr ? StreamExecutorUtil::AsDeviceMemory<U>(*offset) : se::DeviceMemory<U>(); auto mean_ptr = StreamExecutorUtil::AsDeviceMemory<U>(mean); auto inv_variance_ptr = StreamExecutorUtil::AsDeviceMemory<U>(inv_variance); auto y_ptr = y != nullptr ? StreamExecutorUtil::AsDeviceMemory<T>(*y) : se::DeviceMemory<T>(); auto scale_backprop_ptr = StreamExecutorUtil::AsDeviceMemory<U>(*scale_backprop); auto offset_backprop_ptr = StreamExecutorUtil::AsDeviceMemory<U>(*offset_backprop); auto side_input_backprop_ptr = side_input_backprop != nullptr ? StreamExecutorUtil::AsDeviceMemory<T>(*side_input_backprop) : se::DeviceMemory<T>(); std::unique_ptr<functor::CudnnBatchNormAllocatorInTemp<uint8>> workspace_allocator; DeviceMemory<uint8>* reserve_space_data_ptr = nullptr; DeviceMemory<uint8> reserve_space_data; #if CUDNN_VERSION >= 7402 if (use_reserved_space) { const Tensor& reserve_space = context->input(5); workspace_allocator.reset( new functor::CudnnBatchNormAllocatorInTemp<uint8>(context)); // the cudnn kernel outputs inverse variance in forward and reuse it in // backward if (reserve_space.dims() != 0) { reserve_space_data = functor::CastDeviceMemory<uint8, U>( const_cast<Tensor*>(&reserve_space)); reserve_space_data_ptr = &reserve_space_data; } } #endif // CUDNN_VERSION >= 7402 bool cudnn_launch_status = stream ->ThenBatchNormalizationBackward( y_backprop_ptr, x_ptr, scale_ptr, offset_ptr, mean_ptr, inv_variance_ptr, y_ptr, x_desc, scale_offset_desc, static_cast<double>(epsilon), AsDnnActivationMode(activation_mode), &x_backprop_ptr, &scale_backprop_ptr, &offset_backprop_ptr, &side_input_backprop_ptr, reserve_space_data_ptr, workspace_allocator.get()) .ok(); if (!cudnn_launch_status) { context->SetStatus( errors::Internal("cuDNN launch failure : input shape (", x.shape().DebugString(), ")")); } if (tensor_format == FORMAT_NHWC && compute_format == FORMAT_NCHW) { functor::NCHWToNHWC<GPUDevice, T, 4>()( context->eigen_device<GPUDevice>(), const_cast<const Tensor&>(x_backprop_transformed).tensor<T, 4>(), x_backprop->tensor<T, 4>()); } }
0
76,366
int blosc_free_resources(void) { /* Return if Blosc is not initialized */ if (!g_initlib) return -1; return release_threadpool(g_global_context); }
0
259,157
static int ops_traces_mod(struct ftrace_ops *ops) { struct ftrace_hash *hash; hash = ops->filter_hash; return ftrace_hash_empty(hash); }
0
39,157
read_StreamsInfo(struct archive_read *a, struct _7z_stream_info *si) { struct _7zip *zip = (struct _7zip *)a->format->data; const unsigned char *p; unsigned i; memset(si, 0, sizeof(*si)); if ((p = header_bytes(a, 1)) == NULL) return (-1); if (*p == kPackInfo) { uint64_t packPos; if (read_PackInfo(a, &(si->pi)) < 0) return (-1); if (si->pi.positions == NULL || si->pi.sizes == NULL) return (-1); /* * Calculate packed stream positions. */ packPos = si->pi.pos; for (i = 0; i < si->pi.numPackStreams; i++) { si->pi.positions[i] = packPos; packPos += si->pi.sizes[i]; if (packPos > zip->header_offset) return (-1); } if ((p = header_bytes(a, 1)) == NULL) return (-1); } if (*p == kUnPackInfo) { uint32_t packIndex; struct _7z_folder *f; if (read_CodersInfo(a, &(si->ci)) < 0) return (-1); /* * Calculate packed stream indexes. */ packIndex = 0; f = si->ci.folders; for (i = 0; i < si->ci.numFolders; i++) { f[i].packIndex = packIndex; packIndex += (uint32_t)f[i].numPackedStreams; if (packIndex > si->pi.numPackStreams) return (-1); } if ((p = header_bytes(a, 1)) == NULL) return (-1); } if (*p == kSubStreamsInfo) { if (read_SubStreamsInfo(a, &(si->ss), si->ci.folders, (size_t)si->ci.numFolders) < 0) return (-1); if ((p = header_bytes(a, 1)) == NULL) return (-1); } /* * Must be kEnd. */ if (*p != kEnd) return (-1); return (0); }
0
295,080
static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx, phys_addr_t phys, pgprot_t flags) { pv_ops.mmu.set_fixmap(idx, phys, flags); }
0
93,813
size_t HTTPSession::sendCertificateRequest( std::unique_ptr<folly::IOBuf> certificateRequestContext, std::vector<fizz::Extension> extensions) { // Check if both sending and receiving peer have advertised valid // SETTINGS_HTTP_CERT_AUTH setting. Otherwise, the frames for secondary // authentication should not be sent. auto ingressSettings = codec_->getIngressSettings(); auto egressSettings = codec_->getEgressSettings(); if (ingressSettings && egressSettings) { if (ingressSettings->getSetting(SettingsId::SETTINGS_HTTP_CERT_AUTH, 0) == 0 || egressSettings->getSetting(SettingsId::SETTINGS_HTTP_CERT_AUTH, 0) == 0) { VLOG(4) << "Secondary certificate authentication is not supported."; return 0; } } auto authRequest = secondAuthManager_->createAuthRequest( std::move(certificateRequestContext), std::move(extensions)); auto encodedSize = codec_->generateCertificateRequest( writeBuf_, authRequest.first, std::move(authRequest.second)); if (encodedSize > 0) { scheduleWrite(); } else { VLOG(4) << "Failed to generate CERTIFICATE_REQUEST frame."; } return encodedSize; }
0
268,962
static int read_fragment_table(long long *table_start) { /* * Note on overflow limits: * Size of SBlk.s.fragments is 2^32 (unsigned int) * Max size of bytes is 2^32*16 or 2^36 * Max indexes is (2^32*16)/8K or 2^23 * Max length is ((2^32*16)/8K)*8 or 2^26 or 64M */ int res; unsigned int i; long long bytes = SQUASHFS_FRAGMENT_BYTES((long long) sBlk.s.fragments); int indexes = SQUASHFS_FRAGMENT_INDEXES((long long) sBlk.s.fragments); int length = SQUASHFS_FRAGMENT_INDEX_BYTES((long long) sBlk.s.fragments); long long *fragment_table_index; /* * The size of the index table (length bytes) should match the * table start and end points */ if(length != (*table_start - sBlk.s.fragment_table_start)) { ERROR("read_fragment_table: Bad fragment count in super block\n"); return FALSE; } TRACE("read_fragment_table: %u fragments, reading %d fragment indexes " "from 0x%llx\n", sBlk.s.fragments, indexes, sBlk.s.fragment_table_start); fragment_table_index = alloc_index_table(indexes); fragment_table = malloc(bytes); if(fragment_table == NULL) MEM_ERROR(); res = read_fs_bytes(fd, sBlk.s.fragment_table_start, length, fragment_table_index); if(res == FALSE) { ERROR("read_fragment_table: failed to read fragment table " "index\n"); return FALSE; } SQUASHFS_INSWAP_FRAGMENT_INDEXES(fragment_table_index, indexes); for(i = 0; i < indexes; i++) { int expected = (i + 1) != indexes ? SQUASHFS_METADATA_SIZE : bytes & (SQUASHFS_METADATA_SIZE - 1); int length = read_block(fd, fragment_table_index[i], NULL, expected, ((char *) fragment_table) + (i * SQUASHFS_METADATA_SIZE)); TRACE("Read fragment table block %d, from 0x%llx, length %d\n", i, fragment_table_index[i], length); if(length == FALSE) { ERROR("read_fragment_table: failed to read fragment " "table index\n"); return FALSE; } } for(i = 0; i < sBlk.s.fragments; i++) SQUASHFS_INSWAP_FRAGMENT_ENTRY(&fragment_table[i]); *table_start = fragment_table_index[0]; return TRUE; }
0
178,650
WORD32 ih264d_video_decode(iv_obj_t *dec_hdl, void *pv_api_ip, void *pv_api_op) { /* ! */ dec_struct_t * ps_dec = (dec_struct_t *)(dec_hdl->pv_codec_handle); WORD32 i4_err_status = 0; UWORD8 *pu1_buf = NULL; WORD32 buflen; UWORD32 u4_max_ofst, u4_length_of_start_code = 0; UWORD32 bytes_consumed = 0; UWORD32 cur_slice_is_nonref = 0; UWORD32 u4_next_is_aud; UWORD32 u4_first_start_code_found = 0; WORD32 ret = 0,api_ret_value = IV_SUCCESS; WORD32 header_data_left = 0,frame_data_left = 0; UWORD8 *pu1_bitstrm_buf; ivd_video_decode_ip_t *ps_dec_ip; ivd_video_decode_op_t *ps_dec_op; ithread_set_name((void*)"Parse_thread"); ps_dec_ip = (ivd_video_decode_ip_t *)pv_api_ip; ps_dec_op = (ivd_video_decode_op_t *)pv_api_op; { UWORD32 u4_size; u4_size = ps_dec_op->u4_size; memset(ps_dec_op, 0, sizeof(ivd_video_decode_op_t)); ps_dec_op->u4_size = u4_size; } ps_dec->pv_dec_out = ps_dec_op; if(ps_dec->init_done != 1) { return IV_FAIL; } /*Data memory barries instruction,so that bitstream write by the application is complete*/ DATA_SYNC(); if(0 == ps_dec->u1_flushfrm) { if(ps_dec_ip->pv_stream_buffer == NULL) { ps_dec_op->u4_error_code |= 1 << IVD_UNSUPPORTEDPARAM; ps_dec_op->u4_error_code |= IVD_DEC_FRM_BS_BUF_NULL; return IV_FAIL; } if(ps_dec_ip->u4_num_Bytes <= 0) { ps_dec_op->u4_error_code |= 1 << IVD_UNSUPPORTEDPARAM; ps_dec_op->u4_error_code |= IVD_DEC_NUMBYTES_INV; return IV_FAIL; } } ps_dec->u1_pic_decode_done = 0; ps_dec_op->u4_num_bytes_consumed = 0; ps_dec->ps_out_buffer = NULL; if(ps_dec_ip->u4_size >= offsetof(ivd_video_decode_ip_t, s_out_buffer)) ps_dec->ps_out_buffer = &ps_dec_ip->s_out_buffer; ps_dec->u4_fmt_conv_cur_row = 0; ps_dec->u4_output_present = 0; ps_dec->s_disp_op.u4_error_code = 1; ps_dec->u4_fmt_conv_num_rows = FMT_CONV_NUM_ROWS; if(0 == ps_dec->u4_share_disp_buf && ps_dec->i4_decode_header == 0) { UWORD32 i; if((ps_dec->ps_out_buffer->u4_num_bufs == 0) || (ps_dec->ps_out_buffer->u4_num_bufs > IVD_VIDDEC_MAX_IO_BUFFERS)) { ps_dec_op->u4_error_code |= 1 << IVD_UNSUPPORTEDPARAM; ps_dec_op->u4_error_code |= IVD_DISP_FRM_ZERO_OP_BUFS; return IV_FAIL; } for(i = 0; i < ps_dec->ps_out_buffer->u4_num_bufs; i++) { if(ps_dec->ps_out_buffer->pu1_bufs[i] == NULL) { ps_dec_op->u4_error_code |= 1 << IVD_UNSUPPORTEDPARAM; ps_dec_op->u4_error_code |= IVD_DISP_FRM_OP_BUF_NULL; return IV_FAIL; } if(ps_dec->ps_out_buffer->u4_min_out_buf_size[i] == 0) { ps_dec_op->u4_error_code |= 1 << IVD_UNSUPPORTEDPARAM; ps_dec_op->u4_error_code |= IVD_DISP_FRM_ZERO_OP_BUF_SIZE; return IV_FAIL; } } } if(ps_dec->u4_total_frames_decoded >= NUM_FRAMES_LIMIT) { ps_dec_op->u4_error_code = ERROR_FRAME_LIMIT_OVER; return IV_FAIL; } /* ! */ ps_dec->u4_ts = ps_dec_ip->u4_ts; ps_dec_op->u4_error_code = 0; ps_dec_op->e_pic_type = -1; ps_dec_op->u4_output_present = 0; ps_dec_op->u4_frame_decoded_flag = 0; ps_dec->i4_frametype = -1; ps_dec->i4_content_type = -1; ps_dec->u4_slice_start_code_found = 0; /* In case the deocder is not in flush mode(in shared mode), then decoder has to pick up a buffer to write current frame. Check if a frame is available in such cases */ if(ps_dec->u1_init_dec_flag == 1 && ps_dec->u4_share_disp_buf == 1 && ps_dec->u1_flushfrm == 0) { UWORD32 i; WORD32 disp_avail = 0, free_id; /* Check if at least one buffer is available with the codec */ /* If not then return to application with error */ for(i = 0; i < ps_dec->u1_pic_bufs; i++) { if(0 == ps_dec->u4_disp_buf_mapping[i] || 1 == ps_dec->u4_disp_buf_to_be_freed[i]) { disp_avail = 1; break; } } if(0 == disp_avail) { /* If something is queued for display wait for that buffer to be returned */ ps_dec_op->u4_error_code = IVD_DEC_REF_BUF_NULL; ps_dec_op->u4_error_code |= (1 << IVD_UNSUPPORTEDPARAM); return (IV_FAIL); } while(1) { pic_buffer_t *ps_pic_buf; ps_pic_buf = (pic_buffer_t *)ih264_buf_mgr_get_next_free( (buf_mgr_t *)ps_dec->pv_pic_buf_mgr, &free_id); if(ps_pic_buf == NULL) { UWORD32 i, display_queued = 0; /* check if any buffer was given for display which is not returned yet */ for(i = 0; i < (MAX_DISP_BUFS_NEW); i++) { if(0 != ps_dec->u4_disp_buf_mapping[i]) { display_queued = 1; break; } } /* If some buffer is queued for display, then codec has to singal an error and wait for that buffer to be returned. If nothing is queued for display then codec has ownership of all display buffers and it can reuse any of the existing buffers and continue decoding */ if(1 == display_queued) { /* If something is queued for display wait for that buffer to be returned */ ps_dec_op->u4_error_code = IVD_DEC_REF_BUF_NULL; ps_dec_op->u4_error_code |= (1 << IVD_UNSUPPORTEDPARAM); return (IV_FAIL); } } else { /* If the buffer is with display, then mark it as in use and then look for a buffer again */ if(1 == ps_dec->u4_disp_buf_mapping[free_id]) { ih264_buf_mgr_set_status( (buf_mgr_t *)ps_dec->pv_pic_buf_mgr, free_id, BUF_MGR_IO); } else { /** * Found a free buffer for present call. Release it now. * Will be again obtained later. */ ih264_buf_mgr_release((buf_mgr_t *)ps_dec->pv_pic_buf_mgr, free_id, BUF_MGR_IO); break; } } } } if(ps_dec->u1_flushfrm) { if(ps_dec->u1_init_dec_flag == 0) { /*Come out of flush mode and return*/ ps_dec->u1_flushfrm = 0; return (IV_FAIL); } ih264d_get_next_display_field(ps_dec, ps_dec->ps_out_buffer, &(ps_dec->s_disp_op)); if(0 == ps_dec->s_disp_op.u4_error_code) { /* check output buffer size given by the application */ if(check_app_out_buf_size(ps_dec) != IV_SUCCESS) { ps_dec_op->u4_error_code= IVD_DISP_FRM_ZERO_OP_BUF_SIZE; return (IV_FAIL); } ps_dec->u4_fmt_conv_cur_row = 0; ps_dec->u4_fmt_conv_num_rows = ps_dec->s_disp_frame_info.u4_y_ht; ih264d_format_convert(ps_dec, &(ps_dec->s_disp_op), ps_dec->u4_fmt_conv_cur_row, ps_dec->u4_fmt_conv_num_rows); ps_dec->u4_fmt_conv_cur_row += ps_dec->u4_fmt_conv_num_rows; ps_dec->u4_output_present = 1; } ih264d_release_display_field(ps_dec, &(ps_dec->s_disp_op)); ps_dec_op->u4_pic_wd = (UWORD32)ps_dec->u2_disp_width; ps_dec_op->u4_pic_ht = (UWORD32)ps_dec->u2_disp_height; ps_dec_op->u4_new_seq = 0; ps_dec_op->u4_output_present = ps_dec->u4_output_present; ps_dec_op->u4_progressive_frame_flag = ps_dec->s_disp_op.u4_progressive_frame_flag; ps_dec_op->e_output_format = ps_dec->s_disp_op.e_output_format; ps_dec_op->s_disp_frm_buf = ps_dec->s_disp_op.s_disp_frm_buf; ps_dec_op->e4_fld_type = ps_dec->s_disp_op.e4_fld_type; ps_dec_op->u4_ts = ps_dec->s_disp_op.u4_ts; ps_dec_op->u4_disp_buf_id = ps_dec->s_disp_op.u4_disp_buf_id; /*In the case of flush ,since no frame is decoded set pic type as invalid*/ ps_dec_op->u4_is_ref_flag = -1; ps_dec_op->e_pic_type = IV_NA_FRAME; ps_dec_op->u4_frame_decoded_flag = 0; if(0 == ps_dec->s_disp_op.u4_error_code) { return (IV_SUCCESS); } else return (IV_FAIL); } if(ps_dec->u1_res_changed == 1) { /*if resolution has changed and all buffers have been flushed, reset decoder*/ ih264d_init_decoder(ps_dec); } ps_dec->u4_prev_nal_skipped = 0; ps_dec->u2_cur_mb_addr = 0; ps_dec->u2_total_mbs_coded = 0; ps_dec->u2_cur_slice_num = 0; ps_dec->cur_dec_mb_num = 0; ps_dec->cur_recon_mb_num = 0; ps_dec->u4_first_slice_in_pic = 1; ps_dec->u1_slice_header_done = 0; ps_dec->u1_dangling_field = 0; ps_dec->u4_dec_thread_created = 0; ps_dec->u4_bs_deblk_thread_created = 0; ps_dec->u4_cur_bs_mb_num = 0; ps_dec->u4_start_recon_deblk = 0; ps_dec->u4_sps_cnt_in_process = 0; DEBUG_THREADS_PRINTF(" Starting process call\n"); ps_dec->u4_pic_buf_got = 0; do { WORD32 buf_size; pu1_buf = (UWORD8*)ps_dec_ip->pv_stream_buffer + ps_dec_op->u4_num_bytes_consumed; u4_max_ofst = ps_dec_ip->u4_num_Bytes - ps_dec_op->u4_num_bytes_consumed; /* If dynamic bitstream buffer is not allocated and * header decode is done, then allocate dynamic bitstream buffer */ if((NULL == ps_dec->pu1_bits_buf_dynamic) && (ps_dec->i4_header_decoded & 1)) { WORD32 size; void *pv_buf; void *pv_mem_ctxt = ps_dec->pv_mem_ctxt; size = MAX(256000, ps_dec->u2_pic_wd * ps_dec->u2_pic_ht * 3 / 2); pv_buf = ps_dec->pf_aligned_alloc(pv_mem_ctxt, 128, size + EXTRA_BS_OFFSET); RETURN_IF((NULL == pv_buf), IV_FAIL); ps_dec->pu1_bits_buf_dynamic = pv_buf; ps_dec->u4_dynamic_bits_buf_size = size; } if(ps_dec->pu1_bits_buf_dynamic) { pu1_bitstrm_buf = ps_dec->pu1_bits_buf_dynamic; buf_size = ps_dec->u4_dynamic_bits_buf_size; } else { pu1_bitstrm_buf = ps_dec->pu1_bits_buf_static; buf_size = ps_dec->u4_static_bits_buf_size; } u4_next_is_aud = 0; buflen = ih264d_find_start_code(pu1_buf, 0, u4_max_ofst, &u4_length_of_start_code, &u4_next_is_aud); if(buflen == -1) buflen = 0; /* Ignore bytes beyond the allocated size of intermediate buffer */ /* Since 8 bytes are read ahead, ensure 8 bytes are free at the end of the buffer, which will be memset to 0 after emulation prevention */ buflen = MIN(buflen, buf_size - 8); bytes_consumed = buflen + u4_length_of_start_code; ps_dec_op->u4_num_bytes_consumed += bytes_consumed; { UWORD8 u1_firstbyte, u1_nal_ref_idc; if(ps_dec->i4_app_skip_mode == IVD_SKIP_B) { u1_firstbyte = *(pu1_buf + u4_length_of_start_code); u1_nal_ref_idc = (UWORD8)(NAL_REF_IDC(u1_firstbyte)); if(u1_nal_ref_idc == 0) { /*skip non reference frames*/ cur_slice_is_nonref = 1; continue; } else { if(1 == cur_slice_is_nonref) { /*We have encountered a referenced frame,return to app*/ ps_dec_op->u4_num_bytes_consumed -= bytes_consumed; ps_dec_op->e_pic_type = IV_B_FRAME; ps_dec_op->u4_error_code = IVD_DEC_FRM_SKIPPED; ps_dec_op->u4_error_code |= (1 << IVD_UNSUPPORTEDPARAM); ps_dec_op->u4_frame_decoded_flag = 0; ps_dec_op->u4_size = sizeof(ivd_video_decode_op_t); /*signal the decode thread*/ ih264d_signal_decode_thread(ps_dec); /* close deblock thread if it is not closed yet*/ if(ps_dec->u4_num_cores == 3) { ih264d_signal_bs_deblk_thread(ps_dec); } return (IV_FAIL); } } } } if(buflen) { memcpy(pu1_bitstrm_buf, pu1_buf + u4_length_of_start_code, buflen); /* Decoder may read extra 8 bytes near end of the frame */ if((buflen + 8) < buf_size) { memset(pu1_bitstrm_buf + buflen, 0, 8); } u4_first_start_code_found = 1; } else { /*start code not found*/ if(u4_first_start_code_found == 0) { /*no start codes found in current process call*/ ps_dec->i4_error_code = ERROR_START_CODE_NOT_FOUND; ps_dec_op->u4_error_code |= 1 << IVD_INSUFFICIENTDATA; if(ps_dec->u4_pic_buf_got == 0) { ih264d_fill_output_struct_from_context(ps_dec, ps_dec_op); ps_dec_op->u4_error_code = ps_dec->i4_error_code; ps_dec_op->u4_frame_decoded_flag = 0; return (IV_FAIL); } else { ps_dec->u1_pic_decode_done = 1; continue; } } else { /* a start code has already been found earlier in the same process call*/ frame_data_left = 0; header_data_left = 0; continue; } } ps_dec->u4_return_to_app = 0; ret = ih264d_parse_nal_unit(dec_hdl, ps_dec_op, pu1_bitstrm_buf, buflen); if(ret != OK) { UWORD32 error = ih264d_map_error(ret); ps_dec_op->u4_error_code = error | ret; api_ret_value = IV_FAIL; if((ret == IVD_RES_CHANGED) || (ret == IVD_MEM_ALLOC_FAILED) || (ret == ERROR_UNAVAIL_PICBUF_T) || (ret == ERROR_UNAVAIL_MVBUF_T) || (ret == ERROR_INV_SPS_PPS_T) || (ret == IVD_DISP_FRM_ZERO_OP_BUF_SIZE)) { ps_dec->u4_slice_start_code_found = 0; break; } if((ret == ERROR_INCOMPLETE_FRAME) || (ret == ERROR_DANGLING_FIELD_IN_PIC)) { ps_dec_op->u4_num_bytes_consumed -= bytes_consumed; api_ret_value = IV_FAIL; break; } if(ret == ERROR_IN_LAST_SLICE_OF_PIC) { api_ret_value = IV_FAIL; break; } } if(ps_dec->u4_return_to_app) { /*We have encountered a referenced frame,return to app*/ ps_dec_op->u4_num_bytes_consumed -= bytes_consumed; ps_dec_op->u4_error_code = IVD_DEC_FRM_SKIPPED; ps_dec_op->u4_error_code |= (1 << IVD_UNSUPPORTEDPARAM); ps_dec_op->u4_frame_decoded_flag = 0; ps_dec_op->u4_size = sizeof(ivd_video_decode_op_t); /*signal the decode thread*/ ih264d_signal_decode_thread(ps_dec); /* close deblock thread if it is not closed yet*/ if(ps_dec->u4_num_cores == 3) { ih264d_signal_bs_deblk_thread(ps_dec); } return (IV_FAIL); } header_data_left = ((ps_dec->i4_decode_header == 1) && (ps_dec->i4_header_decoded != 3) && (ps_dec_op->u4_num_bytes_consumed < ps_dec_ip->u4_num_Bytes)); frame_data_left = (((ps_dec->i4_decode_header == 0) && ((ps_dec->u1_pic_decode_done == 0) || (u4_next_is_aud == 1))) && (ps_dec_op->u4_num_bytes_consumed < ps_dec_ip->u4_num_Bytes)); } while(( header_data_left == 1)||(frame_data_left == 1)); if((ps_dec->u4_pic_buf_got == 1) && (ret != IVD_MEM_ALLOC_FAILED) && ps_dec->u2_total_mbs_coded < ps_dec->u2_frm_ht_in_mbs * ps_dec->u2_frm_wd_in_mbs) { WORD32 num_mb_skipped; WORD32 prev_slice_err; pocstruct_t temp_poc; WORD32 ret1; WORD32 ht_in_mbs; ht_in_mbs = ps_dec->u2_pic_ht >> (4 + ps_dec->ps_cur_slice->u1_field_pic_flag); num_mb_skipped = (ht_in_mbs * ps_dec->u2_frm_wd_in_mbs) - ps_dec->u2_total_mbs_coded; if(ps_dec->u4_first_slice_in_pic && (ps_dec->u4_pic_buf_got == 0)) prev_slice_err = 1; else prev_slice_err = 2; if(ps_dec->u4_first_slice_in_pic && (ps_dec->u2_total_mbs_coded == 0)) prev_slice_err = 1; ret1 = ih264d_mark_err_slice_skip(ps_dec, num_mb_skipped, ps_dec->u1_nal_unit_type == IDR_SLICE_NAL, ps_dec->ps_cur_slice->u2_frame_num, &temp_poc, prev_slice_err); if((ret1 == ERROR_UNAVAIL_PICBUF_T) || (ret1 == ERROR_UNAVAIL_MVBUF_T) || (ret1 == ERROR_INV_SPS_PPS_T)) { ret = ret1; } } if((ret == IVD_RES_CHANGED) || (ret == IVD_MEM_ALLOC_FAILED) || (ret == ERROR_UNAVAIL_PICBUF_T) || (ret == ERROR_UNAVAIL_MVBUF_T) || (ret == ERROR_INV_SPS_PPS_T)) { /* signal the decode thread */ ih264d_signal_decode_thread(ps_dec); /* close deblock thread if it is not closed yet */ if(ps_dec->u4_num_cores == 3) { ih264d_signal_bs_deblk_thread(ps_dec); } /* dont consume bitstream for change in resolution case */ if(ret == IVD_RES_CHANGED) { ps_dec_op->u4_num_bytes_consumed -= bytes_consumed; } return IV_FAIL; } if(ps_dec->u1_separate_parse) { /* If Format conversion is not complete, complete it here */ if(ps_dec->u4_num_cores == 2) { /*do deblocking of all mbs*/ if((ps_dec->u4_nmb_deblk == 0) &&(ps_dec->u4_start_recon_deblk == 1) && (ps_dec->ps_cur_sps->u1_mb_aff_flag == 0)) { UWORD32 u4_num_mbs,u4_max_addr; tfr_ctxt_t s_tfr_ctxt; tfr_ctxt_t *ps_tfr_cxt = &s_tfr_ctxt; pad_mgr_t *ps_pad_mgr = &ps_dec->s_pad_mgr; /*BS is done for all mbs while parsing*/ u4_max_addr = (ps_dec->u2_frm_wd_in_mbs * ps_dec->u2_frm_ht_in_mbs) - 1; ps_dec->u4_cur_bs_mb_num = u4_max_addr + 1; ih264d_init_deblk_tfr_ctxt(ps_dec, ps_pad_mgr, ps_tfr_cxt, ps_dec->u2_frm_wd_in_mbs, 0); u4_num_mbs = u4_max_addr - ps_dec->u4_cur_deblk_mb_num + 1; DEBUG_PERF_PRINTF("mbs left for deblocking= %d \n",u4_num_mbs); if(u4_num_mbs != 0) ih264d_check_mb_map_deblk(ps_dec, u4_num_mbs, ps_tfr_cxt,1); ps_dec->u4_start_recon_deblk = 0; } } /*signal the decode thread*/ ih264d_signal_decode_thread(ps_dec); /* close deblock thread if it is not closed yet*/ if(ps_dec->u4_num_cores == 3) { ih264d_signal_bs_deblk_thread(ps_dec); } } DATA_SYNC(); if((ps_dec_op->u4_error_code & 0xff) != ERROR_DYNAMIC_RESOLUTION_NOT_SUPPORTED) { ps_dec_op->u4_pic_wd = (UWORD32)ps_dec->u2_disp_width; ps_dec_op->u4_pic_ht = (UWORD32)ps_dec->u2_disp_height; } if(ps_dec->i4_header_decoded != 3) { ps_dec_op->u4_error_code |= (1 << IVD_INSUFFICIENTDATA); } if(ps_dec->i4_decode_header == 1 && ps_dec->i4_header_decoded != 3) { ps_dec_op->u4_error_code |= (1 << IVD_INSUFFICIENTDATA); } if(ps_dec->u4_prev_nal_skipped) { /*We have encountered a referenced frame,return to app*/ ps_dec_op->u4_error_code = IVD_DEC_FRM_SKIPPED; ps_dec_op->u4_error_code |= (1 << IVD_UNSUPPORTEDPARAM); ps_dec_op->u4_frame_decoded_flag = 0; ps_dec_op->u4_size = sizeof(ivd_video_decode_op_t); /* close deblock thread if it is not closed yet*/ if(ps_dec->u4_num_cores == 3) { ih264d_signal_bs_deblk_thread(ps_dec); } return (IV_FAIL); } if((ps_dec->u4_pic_buf_got == 1) && (ERROR_DANGLING_FIELD_IN_PIC != i4_err_status)) { /* * For field pictures, set the bottom and top picture decoded u4_flag correctly. */ if(ps_dec->ps_cur_slice->u1_field_pic_flag) { if(1 == ps_dec->ps_cur_slice->u1_bottom_field_flag) { ps_dec->u1_top_bottom_decoded |= BOT_FIELD_ONLY; } else { ps_dec->u1_top_bottom_decoded |= TOP_FIELD_ONLY; } } else { ps_dec->u1_top_bottom_decoded = TOP_FIELD_ONLY | BOT_FIELD_ONLY; } /* if new frame in not found (if we are still getting slices from previous frame) * ih264d_deblock_display is not called. Such frames will not be added to reference /display */ if ((ps_dec->ps_dec_err_status->u1_err_flag & REJECT_CUR_PIC) == 0) { /* Calling Function to deblock Picture and Display */ ret = ih264d_deblock_display(ps_dec); } /*set to complete ,as we dont support partial frame decode*/ if(ps_dec->i4_header_decoded == 3) { ps_dec->u2_total_mbs_coded = ps_dec->ps_cur_sps->u2_max_mb_addr + 1; } /*Update the i4_frametype at the end of picture*/ if(ps_dec->ps_cur_slice->u1_nal_unit_type == IDR_SLICE_NAL) { ps_dec->i4_frametype = IV_IDR_FRAME; } else if(ps_dec->i4_pic_type == B_SLICE) { ps_dec->i4_frametype = IV_B_FRAME; } else if(ps_dec->i4_pic_type == P_SLICE) { ps_dec->i4_frametype = IV_P_FRAME; } else if(ps_dec->i4_pic_type == I_SLICE) { ps_dec->i4_frametype = IV_I_FRAME; } else { H264_DEC_DEBUG_PRINT("Shouldn't come here\n"); } ps_dec->i4_content_type = ps_dec->ps_cur_slice->u1_field_pic_flag; ps_dec->u4_total_frames_decoded = ps_dec->u4_total_frames_decoded + 2; ps_dec->u4_total_frames_decoded = ps_dec->u4_total_frames_decoded - ps_dec->ps_cur_slice->u1_field_pic_flag; } /* close deblock thread if it is not closed yet*/ if(ps_dec->u4_num_cores == 3) { ih264d_signal_bs_deblk_thread(ps_dec); } { /* In case the decoder is configured to run in low delay mode, * then get display buffer and then format convert. * Note in this mode, format conversion does not run paralelly in a thread and adds to the codec cycles */ if((IVD_DECODE_FRAME_OUT == ps_dec->e_frm_out_mode) && ps_dec->u1_init_dec_flag) { ih264d_get_next_display_field(ps_dec, ps_dec->ps_out_buffer, &(ps_dec->s_disp_op)); if(0 == ps_dec->s_disp_op.u4_error_code) { ps_dec->u4_fmt_conv_cur_row = 0; ps_dec->u4_output_present = 1; } } ih264d_fill_output_struct_from_context(ps_dec, ps_dec_op); /* If Format conversion is not complete, complete it here */ if(ps_dec->u4_output_present && (ps_dec->u4_fmt_conv_cur_row < ps_dec->s_disp_frame_info.u4_y_ht)) { ps_dec->u4_fmt_conv_num_rows = ps_dec->s_disp_frame_info.u4_y_ht - ps_dec->u4_fmt_conv_cur_row; ih264d_format_convert(ps_dec, &(ps_dec->s_disp_op), ps_dec->u4_fmt_conv_cur_row, ps_dec->u4_fmt_conv_num_rows); ps_dec->u4_fmt_conv_cur_row += ps_dec->u4_fmt_conv_num_rows; } ih264d_release_display_field(ps_dec, &(ps_dec->s_disp_op)); } if(ps_dec->i4_decode_header == 1 && (ps_dec->i4_header_decoded & 1) == 1) { ps_dec_op->u4_progressive_frame_flag = 1; if((NULL != ps_dec->ps_cur_sps) && (1 == (ps_dec->ps_cur_sps->u1_is_valid))) { if((0 == ps_dec->ps_sps->u1_frame_mbs_only_flag) && (0 == ps_dec->ps_sps->u1_mb_aff_flag)) ps_dec_op->u4_progressive_frame_flag = 0; } } if((TOP_FIELD_ONLY | BOT_FIELD_ONLY) == ps_dec->u1_top_bottom_decoded) { ps_dec->u1_top_bottom_decoded = 0; } /*--------------------------------------------------------------------*/ /* Do End of Pic processing. */ /* Should be called only if frame was decoded in previous process call*/ /*--------------------------------------------------------------------*/ if(ps_dec->u4_pic_buf_got == 1) { if(1 == ps_dec->u1_last_pic_not_decoded) { ret = ih264d_end_of_pic_dispbuf_mgr(ps_dec); if(ret != OK) return ret; ret = ih264d_end_of_pic(ps_dec); if(ret != OK) return ret; } else { ret = ih264d_end_of_pic(ps_dec); if(ret != OK) return ret; } } /*Data memory barrier instruction,so that yuv write by the library is complete*/ DATA_SYNC(); H264_DEC_DEBUG_PRINT("The num bytes consumed: %d\n", ps_dec_op->u4_num_bytes_consumed); return api_ret_value; }
0
232,731
RenderFrameHostManager::RenderFrameHostManager( FrameTreeNode* frame_tree_node, RenderFrameHostDelegate* render_frame_delegate, RenderWidgetHostDelegate* render_widget_delegate, Delegate* delegate) : frame_tree_node_(frame_tree_node), delegate_(delegate), render_frame_delegate_(render_frame_delegate), render_widget_delegate_(render_widget_delegate), weak_factory_(this) { DCHECK(frame_tree_node_); }
0
332,998
static int xwma_read_header(AVFormatContext *s, AVFormatParameters *ap) { int64_t size, av_uninit(data_size); uint32_t dpds_table_size = 0; uint32_t *dpds_table = 0; unsigned int tag; AVIOContext *pb = s->pb; AVStream *st; XWMAContext *xwma = s->priv_data; int i; /* The following code is mostly copied from wav.c, with some * minor alterations. */ /* check RIFF header */ tag = avio_rl32(pb); if (tag != MKTAG('R', 'I', 'F', 'F')) return -1; avio_rl32(pb); /* file size */ tag = avio_rl32(pb); if (tag != MKTAG('X', 'W', 'M', 'A')) return -1; /* parse fmt header */ tag = avio_rl32(pb); if (tag != MKTAG('f', 'm', 't', ' ')) return -1; size = avio_rl32(pb); st = av_new_stream(s, 0); if (!st) return AVERROR(ENOMEM); ff_get_wav_header(pb, st->codec, size); st->need_parsing = AVSTREAM_PARSE_NONE; /* All xWMA files I have seen contained WMAv2 data. If there are files * using WMA Pro or some other codec, then we need to figure out the right * extradata for that. Thus, ask the user for feedback, but try to go on * anyway. */ if (st->codec->codec_id != CODEC_ID_WMAV2) { av_log(s, AV_LOG_WARNING, "unexpected codec (tag 0x04%x; id %d)\n", st->codec->codec_tag, st->codec->codec_id); av_log_ask_for_sample(s, NULL); } else { /* In all xWMA files I have seen, there is no extradata. But the WMA * codecs require extradata, so we provide our own fake extradata. * * First, check that there really was no extradata in the header. If * there was, then try to use, after asking the the user to provide a * sample of this unusual file. */ if (st->codec->extradata_size != 0) { /* Surprise, surprise: We *did* get some extradata. No idea * if it will work, but just go on and try it, after asking * the user for a sample. */ av_log(s, AV_LOG_WARNING, "unexpected extradata (%d bytes)\n", st->codec->extradata_size); av_log_ask_for_sample(s, NULL); } else { st->codec->extradata_size = 6; st->codec->extradata = av_mallocz(6 + FF_INPUT_BUFFER_PADDING_SIZE); if (!st->codec->extradata) return AVERROR(ENOMEM); /* setup extradata with our experimentally obtained value */ st->codec->extradata[4] = 31; } } /* set the sample rate */ av_set_pts_info(st, 64, 1, st->codec->sample_rate); /* parse the remaining RIFF chunks */ for (;;) { if (pb->eof_reached) return -1; /* read next chunk tag */ tag = avio_rl32(pb); size = avio_rl32(pb); if (tag == MKTAG('d', 'a', 't', 'a')) { /* We assume that the data chunk comes last. */ break; } else if (tag == MKTAG('d','p','d','s')) { /* Quoting the MSDN xWMA docs on the dpds chunk: "Contains the * decoded packet cumulative data size array, each element is the * number of bytes accumulated after the corresponding xWMA packet * is decoded in order" * * Each packet has size equal to st->codec->block_align, which in * all cases I saw so far was always 2230. Thus, we can use the * dpds data to compute a seeking index. */ /* Error out if there is more than one dpds chunk. */ if (dpds_table) { av_log(s, AV_LOG_ERROR, "two dpds chunks present\n"); return -1; } /* Compute the number of entries in the dpds chunk. */ if (size & 3) { /* Size should be divisible by four */ av_log(s, AV_LOG_WARNING, "dpds chunk size "PRId64" not divisible by 4\n", size); } dpds_table_size = size / 4; if (dpds_table_size == 0 || dpds_table_size >= INT_MAX / 4) { av_log(s, AV_LOG_ERROR, "dpds chunk size "PRId64" invalid\n", size); return -1; } /* Allocate some temporary storage to keep the dpds data around. * for processing later on. */ dpds_table = av_malloc(dpds_table_size * sizeof(uint32_t)); if (!dpds_table) { return AVERROR(ENOMEM); } for (i = 0; i < dpds_table_size; ++i) { dpds_table[i] = avio_rl32(pb); size -= 4; } } avio_skip(pb, size); } /* Determine overall data length */ if (size < 0) return -1; if (!size) { xwma->data_end = INT64_MAX; } else xwma->data_end = avio_tell(pb) + size; if (dpds_table && dpds_table_size) { int64_t cur_pos; const uint32_t bytes_per_sample = (st->codec->channels * st->codec->bits_per_coded_sample) >> 3; /* Estimate the duration from the total number of output bytes. */ const uint64_t total_decoded_bytes = dpds_table[dpds_table_size - 1]; st->duration = total_decoded_bytes / bytes_per_sample; /* Use the dpds data to build a seek table. We can only do this after * we know the offset to the data chunk, as we need that to determine * the actual offset to each input block. * Note: If we allowed ourselves to assume that the data chunk always * follows immediately after the dpds block, we could of course guess * the data block's start offset already while reading the dpds chunk. * I decided against that, just in case other chunks ever are * discovered. */ cur_pos = avio_tell(pb); for (i = 0; i < dpds_table_size; ++i) { /* From the number of output bytes that would accumulate in the * output buffer after decoding the first (i+1) packets, we compute * an offset / timestamp pair. */ av_add_index_entry(st, cur_pos + (i+1) * st->codec->block_align, /* pos */ dpds_table[i] / bytes_per_sample, /* timestamp */ st->codec->block_align, /* size */ 0, /* duration */ AVINDEX_KEYFRAME); } } else if (st->codec->bit_rate) { /* No dpds chunk was present (or only an empty one), so estimate * the total duration using the average bits per sample and the * total data length. */ st->duration = (size<<3) * st->codec->sample_rate / st->codec->bit_rate; } av_free(dpds_table); return 0; }
1
279,669
void ChromeContentBrowserClient::RequestFileSystemPermissionOnUIThread( int render_process_id, int render_frame_id, const GURL& url, bool allowed_by_default, const base::Callback<void(bool)>& callback) { DCHECK_CURRENTLY_ON(BrowserThread::UI); extensions::WebViewPermissionHelper* web_view_permission_helper = extensions::WebViewPermissionHelper::FromFrameID( render_process_id, render_frame_id); web_view_permission_helper->RequestFileSystemPermission(url, allowed_by_default, callback); }
0
50,560
static int ext4_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t rdev) { handle_t *handle; struct inode *inode; int err, retries = 0; if (!new_valid_dev(rdev)) return -EINVAL; dquot_initialize(dir); retry: handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) + EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 + EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb)); if (IS_ERR(handle)) return PTR_ERR(handle); if (IS_DIRSYNC(dir)) ext4_handle_sync(handle); inode = ext4_new_inode(handle, dir, mode, &dentry->d_name, 0, NULL); err = PTR_ERR(inode); if (!IS_ERR(inode)) { init_special_inode(inode, inode->i_mode, rdev); inode->i_op = &ext4_special_inode_operations; err = ext4_add_nondir(handle, dentry, inode); } ext4_journal_stop(handle); if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries)) goto retry; return err; }
0
149,772
u64 dma_get_required_mask(struct device *dev) { u32 low_totalram = ((max_pfn - 1) << PAGE_SHIFT); u32 high_totalram = ((max_pfn - 1) >> (32 - PAGE_SHIFT)); u64 mask; if (!high_totalram) { /* convert to mask just covering totalram */ low_totalram = (1 << (fls(low_totalram) - 1)); low_totalram += low_totalram - 1; mask = low_totalram; } else { high_totalram = (1 << (fls(high_totalram) - 1)); high_totalram += high_totalram - 1; mask = (((u64)high_totalram) << 32) + 0xffffffff; } return mask; }
0
373,266
struct socket_context *tls_init_client(struct socket_context *socket_ctx, struct tevent_fd *fde, const char *ca_path) { struct tls_context *tls; int ret = 0; const int cert_type_priority[] = { GNUTLS_CRT_X509, GNUTLS_CRT_OPENPGP, 0 }; struct socket_context *new_sock; NTSTATUS nt_status; nt_status = socket_create_with_ops(socket_ctx, &tls_socket_ops, &new_sock, SOCKET_TYPE_STREAM, socket_ctx->flags | SOCKET_FLAG_ENCRYPT); if (!NT_STATUS_IS_OK(nt_status)) { return NULL; } tls = talloc(new_sock, struct tls_context); if (tls == NULL) return NULL; tls->socket = socket_ctx; talloc_steal(tls, socket_ctx); tls->fde = fde; new_sock->private_data = tls; gnutls_global_init(); gnutls_certificate_allocate_credentials(&tls->xcred); gnutls_certificate_set_x509_trust_file(tls->xcred, ca_path, GNUTLS_X509_FMT_PEM); TLSCHECK(gnutls_init(&tls->session, GNUTLS_CLIENT)); TLSCHECK(gnutls_set_default_priority(tls->session)); gnutls_certificate_type_set_priority(tls->session, cert_type_priority); TLSCHECK(gnutls_credentials_set(tls->session, GNUTLS_CRD_CERTIFICATE, tls->xcred)); talloc_set_destructor(tls, tls_destructor); gnutls_transport_set_ptr(tls->session, (gnutls_transport_ptr)tls); gnutls_transport_set_pull_function(tls->session, (gnutls_pull_func)tls_pull); gnutls_transport_set_push_function(tls->session, (gnutls_push_func)tls_push); #if GNUTLS_VERSION_MAJOR < 3 gnutls_transport_set_lowat(tls->session, 0); #endif tls->tls_detect = false; tls->output_pending = false; tls->done_handshake = false; tls->have_first_byte = false; tls->tls_enabled = true; tls->interrupted = false; new_sock->state = SOCKET_STATE_CLIENT_CONNECTED; return new_sock; failed: DEBUG(0,("TLS init connection failed - %s\n", gnutls_strerror(ret))); tls->tls_enabled = false; return new_sock; }
0
212,575
PHP_METHOD(Phar, unlinkArchive) { char *fname, *error, *zname, *arch, *entry; size_t fname_len; int zname_len, arch_len, entry_len; phar_archive_data *phar; if (zend_parse_parameters(ZEND_NUM_ARGS(), "p", &fname, &fname_len) == FAILURE) { RETURN_FALSE; } if (!fname_len) { zend_throw_exception_ex(phar_ce_PharException, 0, "Unknown phar archive \"\""); return; } if (FAILURE == phar_open_from_filename(fname, fname_len, NULL, 0, REPORT_ERRORS, &phar, &error)) { if (error) { zend_throw_exception_ex(phar_ce_PharException, 0, "Unknown phar archive \"%s\": %s", fname, error); efree(error); } else { zend_throw_exception_ex(phar_ce_PharException, 0, "Unknown phar archive \"%s\"", fname); } return; } zname = (char*)zend_get_executed_filename(); zname_len = strlen(zname); if (zname_len > 7 && !memcmp(zname, "phar://", 7) && SUCCESS == phar_split_fname(zname, zname_len, &arch, &arch_len, &entry, &entry_len, 2, 0)) { if (arch_len == fname_len && !memcmp(arch, fname, arch_len)) { zend_throw_exception_ex(phar_ce_PharException, 0, "phar archive \"%s\" cannot be unlinked from within itself", fname); efree(arch); efree(entry); return; } efree(arch); efree(entry); } if (phar->is_persistent) { zend_throw_exception_ex(phar_ce_PharException, 0, "phar archive \"%s\" is in phar.cache_list, cannot unlinkArchive()", fname); return; } if (phar->refcount) { zend_throw_exception_ex(phar_ce_PharException, 0, "phar archive \"%s\" has open file handles or objects. fclose() all file handles, and unset() all objects prior to calling unlinkArchive()", fname); return; } fname = estrndup(phar->fname, phar->fname_len); /* invalidate phar cache */ PHAR_G(last_phar) = NULL; PHAR_G(last_phar_name) = PHAR_G(last_alias) = NULL; phar_archive_delref(phar); unlink(fname); efree(fname); RETURN_TRUE; }
0
192,367
handle_group_features_stats_request(struct ofconn *ofconn, const struct ofp_header *request) { struct ofproto *p = ofconn_get_ofproto(ofconn); struct ofpbuf *msg; msg = ofputil_encode_group_features_reply(&p->ogf, request); if (msg) { ofconn_send_reply(ofconn, msg); } return 0; }
0
340,017
static int init_er(MpegEncContext *s) { ERContext *er = &s->er; int mb_array_size = s->mb_height * s->mb_stride; int i; er->avctx = s->avctx; er->mecc = &s->mecc; er->mb_index2xy = s->mb_index2xy; er->mb_num = s->mb_num; er->mb_width = s->mb_width; er->mb_height = s->mb_height; er->mb_stride = s->mb_stride; er->b8_stride = s->b8_stride; er->er_temp_buffer = av_malloc(s->mb_height * s->mb_stride); er->error_status_table = av_mallocz(mb_array_size); if (!er->er_temp_buffer || !er->error_status_table) goto fail; er->mbskip_table = s->mbskip_table; er->mbintra_table = s->mbintra_table; for (i = 0; i < FF_ARRAY_ELEMS(s->dc_val); i++) er->dc_val[i] = s->dc_val[i]; er->decode_mb = mpeg_er_decode_mb; er->opaque = s; return 0; fail: av_freep(&er->er_temp_buffer); av_freep(&er->error_status_table); return AVERROR(ENOMEM); }
0
70,945
struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len, gfp_t gfp_mask) { struct page_frag_cache *nc; unsigned long flags; struct sk_buff *skb; bool pfmemalloc; void *data; len += NET_SKB_PAD; if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) || (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) { skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE); if (!skb) goto skb_fail; goto skb_success; } len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); len = SKB_DATA_ALIGN(len); if (sk_memalloc_socks()) gfp_mask |= __GFP_MEMALLOC; local_irq_save(flags); nc = this_cpu_ptr(&netdev_alloc_cache); data = page_frag_alloc(nc, len, gfp_mask); pfmemalloc = nc->pfmemalloc; local_irq_restore(flags); if (unlikely(!data)) return NULL; skb = __build_skb(data, len); if (unlikely(!skb)) { skb_free_frag(data); return NULL; } /* use OR instead of assignment to avoid clearing of bits in mask */ if (pfmemalloc) skb->pfmemalloc = 1; skb->head_frag = 1; skb_success: skb_reserve(skb, NET_SKB_PAD); skb->dev = dev; skb_fail: return skb; }
0
215,395
xmlSkipBlankChars(xmlParserCtxtPtr ctxt) { int res = 0; /* * It's Okay to use CUR/NEXT here since all the blanks are on * the ASCII range. */ if ((ctxt->inputNr == 1) && (ctxt->instate != XML_PARSER_DTD)) { const xmlChar *cur; /* * if we are in the document content, go really fast */ cur = ctxt->input->cur; while (IS_BLANK_CH(*cur)) { if (*cur == '\n') { ctxt->input->line++; ctxt->input->col = 1; } else { ctxt->input->col++; } cur++; res++; if (*cur == 0) { ctxt->input->cur = cur; xmlParserInputGrow(ctxt->input, INPUT_CHUNK); cur = ctxt->input->cur; } } ctxt->input->cur = cur; } else { int cur; do { cur = CUR; while ((IS_BLANK_CH(cur) && /* CHECKED tstblanks.xml */ (ctxt->instate != XML_PARSER_EOF))) { NEXT; cur = CUR; res++; } while ((cur == 0) && (ctxt->inputNr > 1) && (ctxt->instate != XML_PARSER_COMMENT)) { xmlPopInput(ctxt); cur = CUR; } /* * Need to handle support of entities branching here */ if (*ctxt->input->cur == '%') xmlParserHandlePEReference(ctxt); } while ((IS_BLANK(cur)) && /* CHECKED tstblanks.xml */ (ctxt->instate != XML_PARSER_EOF)); } return(res); }
0
119,057
nfsd4_restorefh(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, void *arg) { if (!cstate->save_fh.fh_dentry) return nfserr_restorefh; fh_dup2(&cstate->current_fh, &cstate->save_fh); if (HAS_STATE_ID(cstate, SAVED_STATE_ID_FLAG)) { memcpy(&cstate->current_stateid, &cstate->save_stateid, sizeof(stateid_t)); SET_STATE_ID(cstate, CURRENT_STATE_ID_FLAG); } return nfs_ok; }
0
139,705
static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_pathconf *pathconf) { struct nfs4_pathconf_arg args = { .fh = fhandle, .bitmask = server->attr_bitmask, }; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_PATHCONF], .rpc_argp = &args, .rpc_resp = pathconf, }; /* None of the pathconf attributes are mandatory to implement */ if ((args.bitmask[0] & nfs4_pathconf_bitmap[0]) == 0) { memset(pathconf, 0, sizeof(*pathconf)); return 0; } nfs_fattr_init(pathconf->fattr); return rpc_call_sync(server->client, &msg, 0); }
0
95,015
const Tensor* CreateQuantizedFlatbufferTensor(int size) { using flatbuffers::Offset; flatbuffers::FlatBufferBuilder* builder = BuilderInstance(); const Offset<QuantizationParameters> quant_params = CreateQuantizationParameters( *builder, /*min=*/builder->CreateVector<float>({0.1f}), /*max=*/builder->CreateVector<float>({0.2f}), /*scale=*/builder->CreateVector<float>({0.3f}), /*zero_point=*/builder->CreateVector<int64_t>({100ll})); constexpr size_t tensor_shape_size = 1; const int32_t tensor_shape[tensor_shape_size] = {size}; const Offset<Tensor> tensor_offset = CreateTensor( *builder, builder->CreateVector(tensor_shape, tensor_shape_size), TensorType_INT32, 0, builder->CreateString("test_tensor"), quant_params, false); builder->Finish(tensor_offset); void* tensor_pointer = builder->GetBufferPointer(); const Tensor* tensor = flatbuffers::GetRoot<Tensor>(tensor_pointer); return tensor; }
0
347,250
static SEND_DCC_REC *dcc_send_create(IRC_SERVER_REC *server, CHAT_DCC_REC *chat, const char *nick, const char *arg) { SEND_DCC_REC *dcc; dcc = g_new0(SEND_DCC_REC, 1); dcc->orig_type = module_get_uniq_id_str("DCC", "GET"); dcc->type = module_get_uniq_id_str("DCC", "SEND"); dcc->fhandle = -1; dcc->queue = -1; dcc_init_rec(DCC(dcc), server, chat, nick, arg); return dcc; }
1
401,033
inline word* SegmentBuilder::getPtrUnchecked(SegmentWordCount offset) { return const_cast<word*>(ptr.begin() + offset); }
0
473,124
date_s__httpdate(int argc, VALUE *argv, VALUE klass) { VALUE str, opt; rb_scan_args(argc, argv, "1:", &str, &opt); check_limit(str, opt); return date__httpdate(str); }
0
456,440
static void con_start(struct tty_struct *tty) { int console_num; if (!tty) return; console_num = tty->index; if (!vc_cons_allocated(console_num)) return; vt_kbd_con_start(console_num); }
0
119,619
void ctap_store_rk(int index,CTAP_residentKey * rk) { ctap_overwrite_rk(index, rk); }
0
333,992
static void decode_band_structure(GetBitContext *gbc, int blk, int eac3, int ecpl, int start_subband, int end_subband, const uint8_t *default_band_struct, int *num_bands, uint8_t *band_sizes) { int subbnd, bnd, n_subbands, n_bands=0; uint8_t bnd_sz[22]; uint8_t coded_band_struct[22]; const uint8_t *band_struct; n_subbands = end_subband - start_subband; /* decode band structure from bitstream or use default */ if (!eac3 || get_bits1(gbc)) { for (subbnd = 0; subbnd < n_subbands - 1; subbnd++) { coded_band_struct[subbnd] = get_bits1(gbc); } band_struct = coded_band_struct; } else if (!blk) { band_struct = &default_band_struct[start_subband+1]; } else { /* no change in band structure */ return; } /* calculate number of bands and band sizes based on band structure. note that the first 4 subbands in enhanced coupling span only 6 bins instead of 12. */ if (num_bands || band_sizes ) { n_bands = n_subbands; bnd_sz[0] = ecpl ? 6 : 12; for (bnd = 0, subbnd = 1; subbnd < n_subbands; subbnd++) { int subbnd_size = (ecpl && subbnd < 4) ? 6 : 12; if (band_struct[subbnd - 1]) { n_bands--; bnd_sz[bnd] += subbnd_size; } else { bnd_sz[++bnd] = subbnd_size; } } } /* set optional output params */ if (num_bands) *num_bands = n_bands; if (band_sizes) memcpy(band_sizes, bnd_sz, n_bands); }
1
121,114
int32 JavascriptArray::HeadSegmentIndexOfHelper(Var search, uint32 &fromIndex, uint32 toIndex, bool includesAlgorithm, ScriptContext * scriptContext) { Assert(Is(GetTypeId()) && !JavascriptNativeArray::Is(GetTypeId())); if (!HasNoMissingValues() || fromIndex >= GetHead()->length) { return -1; } bool isSearchTaggedInt = TaggedInt::Is(search); // We need to cast head segment to SparseArraySegment<Var> to have access to GetElement (onSparseArraySegment<T>). Because there are separate overloads of this // virtual method on JavascriptNativeIntArray and JavascriptNativeFloatArray, we know this version of this method will only be called for true JavascriptArray, and not for // either of the derived native arrays, so the elements of each segment used here must be Vars. Hence, the cast is safe. SparseArraySegment<Var>* head = static_cast<SparseArraySegment<Var>*>(GetHead()); uint32 toIndexTrimmed = toIndex <= head->length ? toIndex : head->length; for (uint32 i = fromIndex; i < toIndexTrimmed; i++) { Var element = head->GetElement(i); if (isSearchTaggedInt && TaggedInt::Is(element)) { if (search == element) { return i; } } else if (includesAlgorithm && JavascriptConversion::SameValueZero(element, search)) { //Array.prototype.includes return i; } else if (JavascriptOperators::StrictEqual(element, search, scriptContext)) { //Array.prototype.indexOf return i; } } // Element not found in the head segment. Keep looking only if the range of indices extends past // the head segment. fromIndex = toIndex > GetHead()->length ? GetHead()->length : -1; return -1; }
0
90,656
static Jsi_OpCodes *code_efinal(jsi_Pstate *p, jsi_Pline *line) { JSI_NEW_CODESLN(0,OP_EFINAL, 0); }
0
373,838
void markReset( const char * toMark = 0) { if( toMark == 0 ) toMark = mark; verify( toMark ); nextjsobj = toMark; }
0
52,751
void proto_unregister(struct proto *prot) { mutex_lock(&proto_list_mutex); release_proto_idx(prot); list_del(&prot->node); mutex_unlock(&proto_list_mutex); kmem_cache_destroy(prot->slab); prot->slab = NULL; req_prot_cleanup(prot->rsk_prot); if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) { kmem_cache_destroy(prot->twsk_prot->twsk_slab); kfree(prot->twsk_prot->twsk_slab_name); prot->twsk_prot->twsk_slab = NULL; } }
0
471,789
unsigned long listTypeLength(const robj *subject) { if (subject->encoding == OBJ_ENCODING_QUICKLIST) { return quicklistCount(subject->ptr); } else { serverPanic("Unknown list encoding"); } }
0
304,724
void JavascriptNativeIntArray::ExtractSnapObjectDataInto(TTD::NSSnapObjects::SnapObject* objData, TTD::SlabAllocator& alloc) { TTD::NSSnapObjects::SnapArrayInfo<int32>* sai = TTD::NSSnapObjects::ExtractArrayValues<int32>(this, alloc); TTD::NSSnapObjects::StdExtractSetKindSpecificInfo<TTD::NSSnapObjects::SnapArrayInfo<int32>*, TTD::NSSnapObjects::SnapObjectType::SnapNativeIntArrayObject>(objData, sai); }
0
488,030
NTSTATUS refuse_symlink_fsp(const files_struct *fsp) { if (!VALID_STAT(fsp->fsp_name->st)) { return NT_STATUS_ACCESS_DENIED; } if (S_ISLNK(fsp->fsp_name->st.st_ex_mode)) { return NT_STATUS_ACCESS_DENIED; } if (fsp_get_pathref_fd(fsp) == -1) { return NT_STATUS_ACCESS_DENIED; } return NT_STATUS_OK; }
0
158,588
int DNS::GetIP6(const char *name) { DNSHeader h; int id; int length; if ((length = this->MakePayload(name, DNS_QUERY_AAAA, 1, (unsigned char*)&h.payload)) == -1) return -1; DNSRequest* req = this->AddQuery(&h, id, name); if ((!req) || (req->SendRequests(&h, length, DNS_QUERY_AAAA) == -1)) return -1; return id; }
0
338,112
static void gen_spr_book3s_pmu_sup(CPUPPCState *env) { spr_register(env, SPR_POWER_MMCR0, "MMCR0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_POWER_MMCR1, "MMCR1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_POWER_MMCRA, "MMCRA", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_POWER_PMC1, "PMC1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_POWER_PMC2, "PMC2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_POWER_PMC3, "PMC3", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_POWER_PMC4, "PMC4", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_POWER_PMC5, "PMC5", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_POWER_PMC6, "PMC6", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_POWER_SIAR, "SIAR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_POWER_SDAR, "SDAR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); }
1
124,540
MakeValue(typename WCharHelper<wchar_t, Char>::Supported value) { int_value = value; }
0
194,990
void QuicClientPromisedInfo::OnPromiseHeaders(const SpdyHeaderBlock& headers) { SpdyHeaderBlock::const_iterator it = headers.find(kHttp2MethodHeader); if (it == headers.end()) { QUIC_DVLOG(1) << "Promise for stream " << id_ << " has no method"; Reset(QUIC_INVALID_PROMISE_METHOD); return; } if (!(it->second == "GET" || it->second == "HEAD")) { QUIC_DVLOG(1) << "Promise for stream " << id_ << " has invalid method " << it->second; Reset(QUIC_INVALID_PROMISE_METHOD); return; } if (!SpdyUtils::UrlIsValid(headers)) { QUIC_DVLOG(1) << "Promise for stream " << id_ << " has invalid URL " << url_; Reset(QUIC_INVALID_PROMISE_URL); return; } if (!session_->IsAuthorized(SpdyUtils::GetHostNameFromHeaderBlock(headers))) { Reset(QUIC_UNAUTHORIZED_PROMISE_URL); return; } request_headers_.reset(new SpdyHeaderBlock(headers.Clone())); }
0
190,193
GpuProcessHost::~GpuProcessHost() { DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); if (in_process_gpu_thread_) DCHECK(process_); SendOutstandingReplies(EstablishChannelStatus::GPU_HOST_INVALID); #if defined(OS_MACOSX) ca_transaction_gpu_coordinator_->HostWillBeDestroyed(); ca_transaction_gpu_coordinator_ = nullptr; #endif if (status_ == UNKNOWN) { RunRequestGPUInfoCallbacks(gpu::GPUInfo()); } else { DCHECK(request_gpu_info_callbacks_.empty()); } while (!queued_messages_.empty()) { delete queued_messages_.front(); queued_messages_.pop(); } if (g_gpu_process_hosts[kind_] == this) g_gpu_process_hosts[kind_] = nullptr; #if defined(OS_ANDROID) UMA_HISTOGRAM_COUNTS_100("GPU.AtExitSurfaceCount", gpu::GpuSurfaceTracker::Get()->GetSurfaceCount()); #endif std::string message; bool block_offscreen_contexts = true; if (!in_process_ && process_launched_) { ChildProcessTerminationInfo info = process_->GetTerminationInfo(false /* known_dead */); UMA_HISTOGRAM_ENUMERATION("GPU.GPUProcessTerminationStatus2", ConvertToGpuTerminationStatus(info.status), GpuTerminationStatus::MAX_ENUM); if (info.status == base::TERMINATION_STATUS_NORMAL_TERMINATION || info.status == base::TERMINATION_STATUS_ABNORMAL_TERMINATION || info.status == base::TERMINATION_STATUS_PROCESS_CRASHED) { base::UmaHistogramSparse("GPU.GPUProcessExitCode", std::max(0, std::min(100, info.exit_code))); } switch (info.status) { case base::TERMINATION_STATUS_NORMAL_TERMINATION: #if defined(OS_ANDROID) block_offscreen_contexts = false; #endif message = "The GPU process exited normally. Everything is okay."; break; case base::TERMINATION_STATUS_ABNORMAL_TERMINATION: message = base::StringPrintf("The GPU process exited with code %d.", info.exit_code); break; case base::TERMINATION_STATUS_PROCESS_WAS_KILLED: message = "You killed the GPU process! Why?"; break; #if defined(OS_CHROMEOS) case base::TERMINATION_STATUS_PROCESS_WAS_KILLED_BY_OOM: message = "The GUP process was killed due to out of memory."; break; #endif case base::TERMINATION_STATUS_PROCESS_CRASHED: message = "The GPU process crashed!"; break; case base::TERMINATION_STATUS_LAUNCH_FAILED: message = "The GPU process failed to start!"; break; default: break; } } if (block_offscreen_contexts) BlockLiveOffscreenContexts(); BrowserThread::PostTask( BrowserThread::UI, FROM_HERE, base::BindOnce(&OnGpuProcessHostDestroyedOnUI, host_id_, message)); }
0
476,379
void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result, CodeEmitInfo* info) { if (address->type() == T_LONG) { address = new LIR_Address(address->base(), address->index(), address->scale(), address->disp(), T_DOUBLE); // Transfer the value atomically by using FP moves. This means // the value has to be moved between CPU and FPU registers. In // SSE0 and SSE1 mode it has to be moved through spill slot but in // SSE2+ mode it can be moved directly. LIR_Opr temp_double = new_register(T_DOUBLE); __ volatile_move(LIR_OprFact::address(address), temp_double, T_LONG, info); __ volatile_move(temp_double, result, T_LONG); if (UseSSE < 2) { // no spill slot needed in SSE2 mode because xmm->cpu register move is possible set_vreg_flag(result, must_start_in_memory); } } else { __ load(address, result, info); } }
0
432,555
bgp_rx_packet(struct bgp_conn *conn, byte *pkt, unsigned len) { byte type = pkt[18]; DBG("BGP: Got packet %02x (%d bytes)\n", type, len); if (conn->bgp->p.mrtdump & MD_MESSAGES) bgp_dump_message(conn, pkt, len); switch (type) { case PKT_OPEN: return bgp_rx_open(conn, pkt, len); case PKT_UPDATE: return bgp_rx_update(conn, pkt, len); case PKT_NOTIFICATION: return bgp_rx_notification(conn, pkt, len); case PKT_KEEPALIVE: return bgp_rx_keepalive(conn); case PKT_ROUTE_REFRESH: return bgp_rx_route_refresh(conn, pkt, len); default: bgp_error(conn, 1, 3, pkt+18, 1); } }
0
384,472
_asn1_check_identifier (asn1_node node) { asn1_node p, p2; char name2[ASN1_MAX_NAME_SIZE * 2 + 2]; if (node == NULL) return ASN1_ELEMENT_NOT_FOUND; p = node; while (p) { if (p->value && type_field (p->type) == ASN1_ETYPE_IDENTIFIER) { _asn1_str_cpy (name2, sizeof (name2), node->name); _asn1_str_cat (name2, sizeof (name2), "."); _asn1_str_cat (name2, sizeof (name2), (char *) p->value); p2 = asn1_find_node (node, name2); if (p2 == NULL) { if (p->value) _asn1_strcpy (_asn1_identifierMissing, p->value); else _asn1_strcpy (_asn1_identifierMissing, "(null)"); return ASN1_IDENTIFIER_NOT_FOUND; } } else if ((type_field (p->type) == ASN1_ETYPE_OBJECT_ID) && (p->type & CONST_DEFAULT)) { p2 = p->down; if (p2 && (type_field (p2->type) == ASN1_ETYPE_DEFAULT)) { _asn1_str_cpy (name2, sizeof (name2), node->name); _asn1_str_cat (name2, sizeof (name2), "."); _asn1_str_cat (name2, sizeof (name2), (char *) p2->value); _asn1_strcpy (_asn1_identifierMissing, p2->value); p2 = asn1_find_node (node, name2); if (!p2 || (type_field (p2->type) != ASN1_ETYPE_OBJECT_ID) || !(p2->type & CONST_ASSIGN)) return ASN1_IDENTIFIER_NOT_FOUND; else _asn1_identifierMissing[0] = 0; } } else if ((type_field (p->type) == ASN1_ETYPE_OBJECT_ID) && (p->type & CONST_ASSIGN)) { p2 = p->down; if (p2 && (type_field (p2->type) == ASN1_ETYPE_CONSTANT)) { if (p2->value && !isdigit (p2->value[0])) { _asn1_str_cpy (name2, sizeof (name2), node->name); _asn1_str_cat (name2, sizeof (name2), "."); _asn1_str_cat (name2, sizeof (name2), (char *) p2->value); _asn1_strcpy (_asn1_identifierMissing, p2->value); p2 = asn1_find_node (node, name2); if (!p2 || (type_field (p2->type) != ASN1_ETYPE_OBJECT_ID) || !(p2->type & CONST_ASSIGN)) return ASN1_IDENTIFIER_NOT_FOUND; else _asn1_identifierMissing[0] = 0; } } } if (p->down) { p = p->down; } else if (p->right) p = p->right; else { while (1) { p = _asn1_get_up (p); if (p == node) { p = NULL; break; } if (p->right) { p = p->right; break; } } } } return ASN1_SUCCESS; }
0
171,487
const char* AutofillDialogViews::NotificationArea::GetClassName() const { return kNotificationAreaClassName; }
0
150,098
void testToStringCharsRequired() { TEST_ASSERT(testToStringCharsRequiredHelper(L"http://www.example.com/")); TEST_ASSERT(testToStringCharsRequiredHelper(L"http://www.example.com:80/")); TEST_ASSERT(testToStringCharsRequiredHelper(L"http://user:pass@www.example.com/")); TEST_ASSERT(testToStringCharsRequiredHelper(L"http://www.example.com/index.html")); TEST_ASSERT(testToStringCharsRequiredHelper(L"http://www.example.com/?abc")); TEST_ASSERT(testToStringCharsRequiredHelper(L"http://www.example.com/#def")); TEST_ASSERT(testToStringCharsRequiredHelper(L"http://www.example.com/?abc#def")); TEST_ASSERT(testToStringCharsRequiredHelper(L"/test")); TEST_ASSERT(testToStringCharsRequiredHelper(L"test")); }
0
354,519
static int dev_ifconf(unsigned int fd, unsigned int cmd, unsigned long arg) { struct ifconf32 ifc32; struct ifconf ifc; struct ifconf __user *uifc; struct ifreq32 __user *ifr32; struct ifreq __user *ifr; unsigned int i, j; int err; if (copy_from_user(&ifc32, compat_ptr(arg), sizeof(struct ifconf32))) return -EFAULT; if (ifc32.ifcbuf == 0) { ifc32.ifc_len = 0; ifc.ifc_len = 0; ifc.ifc_req = NULL; uifc = compat_alloc_user_space(sizeof(struct ifconf)); } else { size_t len =((ifc32.ifc_len / sizeof (struct ifreq32)) + 1) * sizeof (struct ifreq); uifc = compat_alloc_user_space(sizeof(struct ifconf) + len); ifc.ifc_len = len; ifr = ifc.ifc_req = (void __user *)(uifc + 1); ifr32 = compat_ptr(ifc32.ifcbuf); for (i = 0; i < ifc32.ifc_len; i += sizeof (struct ifreq32)) { if (copy_in_user(ifr, ifr32, sizeof(struct ifreq32))) return -EFAULT; ifr++; ifr32++; } } if (copy_to_user(uifc, &ifc, sizeof(struct ifconf))) return -EFAULT; err = sys_ioctl (fd, SIOCGIFCONF, (unsigned long)uifc); if (err) return err; if (copy_from_user(&ifc, uifc, sizeof(struct ifconf))) return -EFAULT; ifr = ifc.ifc_req; ifr32 = compat_ptr(ifc32.ifcbuf); for (i = 0, j = 0; i + sizeof (struct ifreq32) <= ifc32.ifc_len && j < ifc.ifc_len; i += sizeof (struct ifreq32), j += sizeof (struct ifreq)) { if (copy_in_user(ifr32, ifr, sizeof (struct ifreq32))) return -EFAULT; ifr32++; ifr++; } if (ifc32.ifcbuf == 0) { /* Translate from 64-bit structure multiple to * a 32-bit one. */ i = ifc.ifc_len; i = ((i / sizeof(struct ifreq)) * sizeof(struct ifreq32)); ifc32.ifc_len = i; } else { ifc32.ifc_len = i; } if (copy_to_user(compat_ptr(arg), &ifc32, sizeof(struct ifconf32))) return -EFAULT; return 0; }
0
82,902
Variant HHVM_FUNCTION(mcrypt_enc_get_algorithms_name, const Resource& td) { auto pm = get_valid_mcrypt_resource(td); if (!pm) { return false; } char *name = mcrypt_enc_get_algorithms_name(pm->m_td); String ret(name, CopyString); mcrypt_free(name); return ret; }
0
141,867
static void row_dim_write(zval *object, zval *member, zval *value TSRMLS_DC) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "This PDORow is not from a writable result set");
0
200,140
void RenderThreadImpl::OnCreateNewView(const ViewMsg_New_Params& params) { EnsureWebKitInitialized(); RenderViewImpl::Create( params.parent_window, MSG_ROUTING_NONE, params.renderer_preferences, params.web_preferences, new SharedRenderViewCounter(0), params.view_id, params.surface_id, params.session_storage_namespace_id, params.frame_name, params.next_page_id, params.screen_info, params.guest); }
0
253,946
void FS_WriteFile( const char *qpath, const void *buffer, int size ) { fileHandle_t f; if ( !fs_searchpaths ) { Com_Error( ERR_FATAL, "Filesystem call made without initialization" ); } if ( !qpath || !buffer ) { Com_Error( ERR_FATAL, "FS_WriteFile: NULL parameter" ); } f = FS_FOpenFileWrite( qpath ); if ( !f ) { Com_Printf( "Failed to open %s\n", qpath ); return; } FS_Write( buffer, size, f ); FS_FCloseFile( f ); }
0
481,901
static void caps_reset_bit(int nr) { uint64_t mask = 1LLU << nr; filter &= ~mask; }
0
436,910
static void fts3UpdateDocTotals( int *pRC, /* The result code */ Fts3Table *p, /* Table being updated */ u32 *aSzIns, /* Size increases */ u32 *aSzDel, /* Size decreases */ int nChng /* Change in the number of documents */ ){ char *pBlob; /* Storage for BLOB written into %_stat */ int nBlob; /* Size of BLOB written into %_stat */ u32 *a; /* Array of integers that becomes the BLOB */ sqlite3_stmt *pStmt; /* Statement for reading and writing */ int i; /* Loop counter */ int rc; /* Result code from subfunctions */ const int nStat = p->nColumn+2; if( *pRC ) return; a = sqlite3_malloc64( (sizeof(u32)+10)*(sqlite3_int64)nStat ); if( a==0 ){ *pRC = SQLITE_NOMEM; return; } pBlob = (char*)&a[nStat]; rc = fts3SqlStmt(p, SQL_SELECT_STAT, &pStmt, 0); if( rc ){ sqlite3_free(a); *pRC = rc; return; } sqlite3_bind_int(pStmt, 1, FTS_STAT_DOCTOTAL); if( sqlite3_step(pStmt)==SQLITE_ROW ){ fts3DecodeIntArray(nStat, a, sqlite3_column_blob(pStmt, 0), sqlite3_column_bytes(pStmt, 0)); }else{ memset(a, 0, sizeof(u32)*(nStat) ); } rc = sqlite3_reset(pStmt); if( rc!=SQLITE_OK ){ sqlite3_free(a); *pRC = rc; return; } if( nChng<0 && a[0]<(u32)(-nChng) ){ a[0] = 0; }else{ a[0] += nChng; } for(i=0; i<p->nColumn+1; i++){ u32 x = a[i+1]; if( x+aSzIns[i] < aSzDel[i] ){ x = 0; }else{ x = x + aSzIns[i] - aSzDel[i]; } a[i+1] = x; } fts3EncodeIntArray(nStat, a, pBlob, &nBlob); rc = fts3SqlStmt(p, SQL_REPLACE_STAT, &pStmt, 0); if( rc ){ sqlite3_free(a); *pRC = rc; return; } sqlite3_bind_int(pStmt, 1, FTS_STAT_DOCTOTAL); sqlite3_bind_blob(pStmt, 2, pBlob, nBlob, SQLITE_STATIC); sqlite3_step(pStmt); *pRC = sqlite3_reset(pStmt); sqlite3_bind_null(pStmt, 2); sqlite3_free(a); }
0
430,196
int htp_hdr_keycb(llhttp_t *htp, const char *data, size_t len) { auto upstream = static_cast<HttpsUpstream *>(htp->data); auto downstream = upstream->get_downstream(); auto &req = downstream->request(); auto &httpconf = get_config()->http; if (req.fs.buffer_size() + len > httpconf.request_header_field_buffer) { if (LOG_ENABLED(INFO)) { ULOG(INFO, upstream) << "Too large header block size=" << req.fs.buffer_size() + len; } if (downstream->get_request_state() == DownstreamState::INITIAL) { downstream->set_request_state( DownstreamState::HTTP1_REQUEST_HEADER_TOO_LARGE); } llhttp_set_error_reason(htp, "too large header"); return HPE_USER; } if (downstream->get_request_state() == DownstreamState::INITIAL) { if (req.fs.header_key_prev()) { req.fs.append_last_header_key(data, len); } else { if (req.fs.num_fields() >= httpconf.max_request_header_fields) { if (LOG_ENABLED(INFO)) { ULOG(INFO, upstream) << "Too many header field num=" << req.fs.num_fields() + 1; } downstream->set_request_state( DownstreamState::HTTP1_REQUEST_HEADER_TOO_LARGE); llhttp_set_error_reason(htp, "too many headers"); return HPE_USER; } req.fs.alloc_add_header_name(StringRef{data, len}); } } else { // trailer part if (req.fs.trailer_key_prev()) { req.fs.append_last_trailer_key(data, len); } else { if (req.fs.num_fields() >= httpconf.max_request_header_fields) { if (LOG_ENABLED(INFO)) { ULOG(INFO, upstream) << "Too many header field num=" << req.fs.num_fields() + 1; } llhttp_set_error_reason(htp, "too many headers"); return HPE_USER; } req.fs.alloc_add_trailer_name(StringRef{data, len}); } } return 0; }
0
466,652
Http::StripPortType stripPortType() const override { return strip_port_type_; }
0
58,316
Variant f_libxml_get_last_error() { xmlErrorPtr error = xmlGetLastError(); if (error) { return create_libxmlerror(*error); } return false; }
0
290,182
static PyObject * string_new ( PyTypeObject * type , PyObject * args , PyObject * kwds ) { PyObject * x = NULL ; static char * kwlist [ ] = { "object" , 0 } ; if ( type != & PyString_Type ) return str_subtype_new ( type , args , kwds ) ; if ( ! PyArg_ParseTupleAndKeywords ( args , kwds , "|O:str" , kwlist , & x ) ) return NULL ; if ( x == NULL ) return PyString_FromString ( "" ) ; return PyObject_Str ( x ) ; }
0
147,036
GF_Box *mdat_New() { ISOM_DECL_BOX_ALLOC(GF_MediaDataBox, GF_ISOM_BOX_TYPE_MDAT); return (GF_Box *)tmp; }
0
22,559
static void vmsvga_class_init ( ObjectClass * klass , void * data ) { DeviceClass * dc = DEVICE_CLASS ( klass ) ; PCIDeviceClass * k = PCI_DEVICE_CLASS ( klass ) ; k -> init = pci_vmsvga_initfn ; k -> romfile = "vgabios-vmware.bin" ; k -> vendor_id = PCI_VENDOR_ID_VMWARE ; k -> device_id = SVGA_PCI_DEVICE_ID ; k -> class_id = PCI_CLASS_DISPLAY_VGA ; k -> subsystem_vendor_id = PCI_VENDOR_ID_VMWARE ; k -> subsystem_id = SVGA_PCI_DEVICE_ID ; dc -> reset = vmsvga_reset ; dc -> vmsd = & vmstate_vmware_vga ; dc -> props = vga_vmware_properties ; dc -> hotpluggable = false ; set_bit ( DEVICE_CATEGORY_DISPLAY , dc -> categories ) ; }
0
412,757
static int h2_parse_header_table_size(char **args, int section_type, struct proxy *curpx, struct proxy *defpx, const char *file, int line, char **err) { if (too_many_args(1, args, err, NULL)) return -1; h2_settings_header_table_size = atoi(args[1]); if (h2_settings_header_table_size < 4096 || h2_settings_header_table_size > 65536) { memprintf(err, "'%s' expects a numeric value between 4096 and 65536.", args[0]); return -1; } return 0; }
0
257,860
static int vdpau_h264_end_frame ( AVCodecContext * avctx ) { AVVDPAUContext * hwctx = avctx -> hwaccel_context ; H264Context * h = avctx -> priv_data ; VdpVideoSurface surf = ff_vdpau_get_surface_id ( h -> cur_pic_ptr ) ; hwctx -> render ( hwctx -> decoder , surf , ( void * ) & hwctx -> info , hwctx -> bitstream_buffers_used , hwctx -> bitstream_buffers ) ; ff_h264_draw_horiz_band ( h , 0 , h -> avctx -> height ) ; hwctx -> bitstream_buffers_used = 0 ; return 0 ; }
0
96,274
static void kvm_cpu_vmxoff(void) { asm volatile (__ex(ASM_VMX_VMXOFF) : : : "cc"); intel_pt_handle_vmx(0); cr4_clear_bits(X86_CR4_VMXE); }
0
103,107
rsvg_filter_get_bg (RsvgFilterContext * ctx) { if (!ctx->bg_surface) ctx->bg_surface = rsvg_compile_bg (ctx->ctx); return ctx->bg_surface; }
0
160,369
static void fips_expand_key_bits(BYTE* in, BYTE* out) { BYTE buf[21], c; int i, b, p, r; /* reverse every byte in the key */ for (i = 0; i < 21; i++) buf[i] = fips_reverse_table[in[i]]; /* insert a zero-bit after every 7th bit */ for (i = 0, b = 0; i < 24; i++, b += 7) { p = b / 8; r = b % 8; if (r <= 1) { out[i] = (buf[p] << r) & 0xfe; } else { /* c is accumulator */ c = buf[p] << r; c |= buf[p + 1] >> (8 - r); out[i] = c & 0xfe; } } /* reverse every byte */ /* alter lsb so the byte has odd parity */ for (i = 0; i < 24; i++) out[i] = fips_oddparity_table[fips_reverse_table[out[i]]]; }
0
285,782
void CSSStyleSheetResource::SaveParsedStyleSheet(StyleSheetContents* sheet) { DCHECK(sheet); DCHECK(sheet->IsCacheableForResource()); if (!GetMemoryCache()->Contains(this)) { SetParsedStyleSheetCache(nullptr); return; } SetParsedStyleSheetCache(sheet); }
0
52,286
f_float2nr(typval_T *argvars, typval_T *rettv) { float_T f = 0.0; if (get_float_arg(argvars, &f) == OK) { if (f <= -VARNUM_MAX + DBL_EPSILON) rettv->vval.v_number = -VARNUM_MAX; else if (f >= VARNUM_MAX - DBL_EPSILON) rettv->vval.v_number = VARNUM_MAX; else rettv->vval.v_number = (varnumber_T)f; } }
0
315,119
RenderFrameImpl* RenderFrameImpl::FromRoutingID(int routing_id) { DCHECK(RenderThread::IsMainThread()); auto iter = g_routing_id_frame_map.Get().find(routing_id); if (iter != g_routing_id_frame_map.Get().end()) return iter->second; return nullptr; }
0
401,116
glue(glue(cirrus_bitblt_rop_bkwd_transp_, ROP_NAME),_16)(CirrusVGAState *s, uint32_t dstaddr, uint32_t srcaddr, int dstpitch, int srcpitch, int bltwidth, int bltheight) { int x,y; uint16_t transp = s->vga.gr[0x34] | (uint16_t)s->vga.gr[0x35] << 8; dstpitch += bltwidth; srcpitch += bltwidth; for (y = 0; y < bltheight; y++) { for (x = 0; x < bltwidth; x+=2) { ROP_OP_TR_16(s, dstaddr, cirrus_src16(s, srcaddr), transp); dstaddr -= 2; srcaddr -= 2; } dstaddr += dstpitch; srcaddr += srcpitch; } }
0