idx
int64
func
string
target
int64
245,159
write_xtrabackup_info(MYSQL *connection) { MYSQL_STMT *stmt; MYSQL_BIND bind[19]; const char *uuid = NULL; char *server_version = NULL; char* xtrabackup_info_data = NULL; int idx; my_bool null = TRUE; const char *ins_query = "insert into PERCONA_SCHEMA.xtrabackup_history(" "uuid, name, tool_name, tool_command, tool_version, " "ibbackup_version, server_version, start_time, end_time, " "lock_time, binlog_pos, innodb_from_lsn, innodb_to_lsn, " "partial, incremental, format, compact, compressed, " "encrypted) " "values(?,?,?,?,?,?,?,from_unixtime(?),from_unixtime(?)," "?,?,?,?,?,?,?,?,?,?)"; ut_ad((uint)xtrabackup_stream_fmt < array_elements(xb_stream_format_name)); const char *stream_format_name = xb_stream_format_name[xtrabackup_stream_fmt]; history_end_time = time(NULL); xtrabackup_info_data = get_xtrabackup_info(connection); if (!backup_file_printf(XTRABACKUP_INFO, "%s", xtrabackup_info_data)) { goto cleanup; } if (!opt_history) { goto cleanup; } uuid = get_backup_uuid(connection); server_version = read_mysql_one_value(connection, "SELECT VERSION()"); xb_mysql_query(connection, "CREATE DATABASE IF NOT EXISTS PERCONA_SCHEMA", false); xb_mysql_query(connection, "CREATE TABLE IF NOT EXISTS PERCONA_SCHEMA.xtrabackup_history(" "uuid VARCHAR(40) NOT NULL PRIMARY KEY," "name VARCHAR(255) DEFAULT NULL," "tool_name VARCHAR(255) DEFAULT NULL," "tool_command TEXT DEFAULT NULL," "tool_version VARCHAR(255) DEFAULT NULL," "ibbackup_version VARCHAR(255) DEFAULT NULL," "server_version VARCHAR(255) DEFAULT NULL," "start_time TIMESTAMP NULL DEFAULT NULL," "end_time TIMESTAMP NULL DEFAULT NULL," "lock_time BIGINT UNSIGNED DEFAULT NULL," "binlog_pos TEXT DEFAULT NULL," "innodb_from_lsn BIGINT UNSIGNED DEFAULT NULL," "innodb_to_lsn BIGINT UNSIGNED DEFAULT NULL," "partial ENUM('Y', 'N') DEFAULT NULL," "incremental ENUM('Y', 'N') DEFAULT NULL," "format ENUM('file', 'tar', 'xbstream') DEFAULT NULL," "compact ENUM('Y', 'N') DEFAULT NULL," "compressed ENUM('Y', 'N') DEFAULT NULL," "encrypted ENUM('Y', 'N') DEFAULT NULL" ") CHARACTER SET utf8 ENGINE=innodb", false); /* Upgrade from previous versions */ xb_mysql_query(connection, "ALTER TABLE PERCONA_SCHEMA.xtrabackup_history MODIFY COLUMN " "binlog_pos TEXT DEFAULT NULL", false); stmt = mysql_stmt_init(connection); mysql_stmt_prepare(stmt, ins_query, strlen(ins_query)); memset(bind, 0, sizeof(bind)); idx = 0; /* uuid */ bind[idx].buffer_type = MYSQL_TYPE_STRING; bind[idx].buffer = (char*)uuid; bind[idx].buffer_length = strlen(uuid); ++idx; /* name */ bind[idx].buffer_type = MYSQL_TYPE_STRING; bind[idx].buffer = (char*)(opt_history); bind[idx].buffer_length = strlen(opt_history); if (!(opt_history && *opt_history)) { bind[idx].is_null = &null; } ++idx; /* tool_name */ bind[idx].buffer_type = MYSQL_TYPE_STRING; bind[idx].buffer = tool_name; bind[idx].buffer_length = strlen(tool_name); ++idx; /* tool_command */ bind[idx].buffer_type = MYSQL_TYPE_STRING; bind[idx].buffer = tool_args; bind[idx].buffer_length = strlen(tool_args); ++idx; /* tool_version */ bind[idx].buffer_type = MYSQL_TYPE_STRING; bind[idx].buffer = (char*)(XTRABACKUP_VERSION); bind[idx].buffer_length = strlen(XTRABACKUP_VERSION); ++idx; /* ibbackup_version */ bind[idx].buffer_type = MYSQL_TYPE_STRING; bind[idx].buffer = (char*)(XTRABACKUP_VERSION); bind[idx].buffer_length = strlen(XTRABACKUP_VERSION); ++idx; /* server_version */ bind[idx].buffer_type = MYSQL_TYPE_STRING; bind[idx].buffer = server_version; bind[idx].buffer_length = strlen(server_version); ++idx; /* start_time */ bind[idx].buffer_type = MYSQL_TYPE_LONG; bind[idx].buffer = &history_start_time; ++idx; /* end_time */ bind[idx].buffer_type = MYSQL_TYPE_LONG; bind[idx].buffer = &history_end_time; ++idx; /* lock_time */ bind[idx].buffer_type = MYSQL_TYPE_LONG; bind[idx].buffer = &history_lock_time; ++idx; /* binlog_pos */ bind[idx].buffer_type = MYSQL_TYPE_STRING; bind[idx].buffer = mysql_binlog_position; if (mysql_binlog_position != NULL) { bind[idx].buffer_length = strlen(mysql_binlog_position); } else { bind[idx].is_null = &null; } ++idx; /* innodb_from_lsn */ bind[idx].buffer_type = MYSQL_TYPE_LONGLONG; bind[idx].buffer = (char*)(&incremental_lsn); ++idx; /* innodb_to_lsn */ bind[idx].buffer_type = MYSQL_TYPE_LONGLONG; bind[idx].buffer = (char*)(&metadata_to_lsn); ++idx; /* partial (Y | N) */ bind[idx].buffer_type = MYSQL_TYPE_STRING; bind[idx].buffer = (char*)((xtrabackup_tables || xtrabackup_tables_exclude || xtrabackup_tables_file || xtrabackup_databases || xtrabackup_databases_exclude || xtrabackup_databases_file) ? "Y" : "N"); bind[idx].buffer_length = 1; ++idx; /* incremental (Y | N) */ bind[idx].buffer_type = MYSQL_TYPE_STRING; bind[idx].buffer = (char*)( (xtrabackup_incremental || xtrabackup_incremental_basedir || opt_incremental_history_name || opt_incremental_history_uuid) ? "Y" : "N"); bind[idx].buffer_length = 1; ++idx; /* format (file | tar | xbstream) */ bind[idx].buffer_type = MYSQL_TYPE_STRING; bind[idx].buffer = (char*)(stream_format_name); bind[idx].buffer_length = strlen(stream_format_name); ++idx; /* compact (Y | N) */ bind[idx].buffer_type = MYSQL_TYPE_STRING; bind[idx].buffer = (char*)(xtrabackup_compact ? "Y" : "N"); bind[idx].buffer_length = 1; ++idx; /* compressed (Y | N) */ bind[idx].buffer_type = MYSQL_TYPE_STRING; bind[idx].buffer = (char*)(xtrabackup_compress ? "Y" : "N"); bind[idx].buffer_length = 1; ++idx; /* encrypted (Y | N) */ bind[idx].buffer_type = MYSQL_TYPE_STRING; bind[idx].buffer = (char*)(xtrabackup_encrypt ? "Y" : "N"); bind[idx].buffer_length = 1; ++idx; ut_ad(idx == 19); mysql_stmt_bind_param(stmt, bind); mysql_stmt_execute(stmt); mysql_stmt_close(stmt); cleanup: free(xtrabackup_info_data); free(server_version); return(true); }
0
255,035
static int adts_write_packet(AVFormatContext *s, AVPacket *pkt) { ADTSContext *adts = s->priv_data; AVCodecParameters *par = s->streams[0]->codecpar; AVIOContext *pb = s->pb; uint8_t buf[ADTS_HEADER_SIZE]; if (!pkt->size) return 0; if (!par->extradata_size) { uint8_t *side_data; size_t side_data_size; int ret; side_data = av_packet_get_side_data(pkt, AV_PKT_DATA_NEW_EXTRADATA, &side_data_size); if (side_data_size) { ret = adts_decode_extradata(s, adts, side_data, side_data_size); if (ret < 0) return ret; ret = ff_alloc_extradata(par, side_data_size); if (ret < 0) return ret; memcpy(par->extradata, side_data, side_data_size); } } if (adts->write_adts) { int err = adts_write_frame_header(adts, buf, pkt->size, adts->pce_size); if (err < 0) return err; avio_write(pb, buf, ADTS_HEADER_SIZE); if (adts->pce_size) { avio_write(pb, adts->pce_data, adts->pce_size); adts->pce_size = 0; } } avio_write(pb, pkt->data, pkt->size); return 0; }
0
409,513
handle_version_response(int first, int *arg, int argc, char_u *tp) { // The xterm version. It is set to zero when it can't be an actual xterm // version. int version = arg[1]; LOG_TR(("Received CRV response: %s", tp)); crv_status.tr_progress = STATUS_GOT; did_cursorhold = TRUE; // Reset terminal properties that are set based on the termresponse. // Mainly useful for tests that send the termresponse multiple times. // For testing all props can be reset. init_term_props( #ifdef FEAT_EVAL reset_term_props_on_termresponse #else FALSE #endif ); // If this code starts with CSI, you can bet that the // terminal uses 8-bit codes. if (tp[0] == CSI) switch_to_8bit(); // Screen sends 40500. // rxvt sends its version number: "20703" is 2.7.3. // Ignore it for when the user has set 'term' to xterm, // even though it's an rxvt. if (version > 20000) version = 0; // Figure out more if the response is CSI > 99 ; 99 ; 99 c if (first == '>' && argc == 3) { int need_flush = FALSE; // mintty 2.9.5 sends 77;20905;0c. // (77 is ASCII 'M' for mintty.) if (arg[0] == 77) { // mintty can do SGR mouse reporting term_props[TPR_MOUSE].tpr_status = TPR_MOUSE_SGR; } // If xterm version >= 141 try to get termcap codes. For other // terminals the request should be ignored. if (version >= 141 && p_xtermcodes) { LOG_TR(("Enable checking for XT codes")); check_for_codes = TRUE; need_gather = TRUE; req_codes_from_term(); } // libvterm sends 0;100;0 if (version == 100 && arg[0] == 0 && arg[2] == 0) { // If run from Vim $COLORS is set to the number of // colors the terminal supports. Otherwise assume // 256, libvterm supports even more. if (mch_getenv((char_u *)"COLORS") == NULL) may_adjust_color_count(256); // Libvterm can handle SGR mouse reporting. term_props[TPR_MOUSE].tpr_status = TPR_MOUSE_SGR; } if (version == 95) { // Mac Terminal.app sends 1;95;0 if (arg[0] == 1 && arg[2] == 0) { term_props[TPR_UNDERLINE_RGB].tpr_status = TPR_YES; term_props[TPR_MOUSE].tpr_status = TPR_MOUSE_SGR; } // iTerm2 sends 0;95;0 else if (arg[0] == 0 && arg[2] == 0) { // iTerm2 can do SGR mouse reporting term_props[TPR_MOUSE].tpr_status = TPR_MOUSE_SGR; } // old iTerm2 sends 0;95; else if (arg[0] == 0 && arg[2] == -1) term_props[TPR_UNDERLINE_RGB].tpr_status = TPR_YES; } // screen sends 83;40500;0 83 is 'S' in ASCII. if (arg[0] == 83) { // screen supports SGR mouse codes since 4.7.0 if (arg[1] >= 40700) term_props[TPR_MOUSE].tpr_status = TPR_MOUSE_SGR; else term_props[TPR_MOUSE].tpr_status = TPR_MOUSE_XTERM; } // If no recognized terminal has set mouse behavior, assume xterm. if (term_props[TPR_MOUSE].tpr_status == TPR_UNKNOWN) { // Xterm version 277 supports SGR. // Xterm version >= 95 supports mouse dragging. if (version >= 277) term_props[TPR_MOUSE].tpr_status = TPR_MOUSE_SGR; else if (version >= 95) term_props[TPR_MOUSE].tpr_status = TPR_MOUSE_XTERM2; } // Detect terminals that set $TERM to something like // "xterm-256color" but are not fully xterm compatible. // // Gnome terminal sends 1;3801;0, 1;4402;0 or 1;2501;0. // Newer Gnome-terminal sends 65;6001;1. // xfce4-terminal sends 1;2802;0. // screen sends 83;40500;0 // Assuming any version number over 2500 is not an // xterm (without the limit for rxvt and screen). if (arg[1] >= 2500) term_props[TPR_UNDERLINE_RGB].tpr_status = TPR_YES; else if (version == 136 && arg[2] == 0) { term_props[TPR_UNDERLINE_RGB].tpr_status = TPR_YES; // PuTTY sends 0;136;0 if (arg[0] == 0) { // supports sgr-like mouse reporting. term_props[TPR_MOUSE].tpr_status = TPR_MOUSE_SGR; } // vandyke SecureCRT sends 1;136;0 } // Konsole sends 0;115;0 - but t_u8 does not actually work, therefore // commented out. // else if (version == 115 && arg[0] == 0 && arg[2] == 0) // term_props[TPR_UNDERLINE_RGB].tpr_status = TPR_YES; // GNU screen sends 83;30600;0, 83;40500;0, etc. // 30600/40500 is a version number of GNU screen. DA2 support is added // on 3.6. DCS string has a special meaning to GNU screen, but xterm // compatibility checking does not detect GNU screen. if (arg[0] == 83 && arg[1] >= 30600) { term_props[TPR_CURSOR_STYLE].tpr_status = TPR_NO; term_props[TPR_CURSOR_BLINK].tpr_status = TPR_NO; } // Xterm first responded to this request at patch level // 95, so assume anything below 95 is not xterm and hopefully supports // the underline RGB color sequence. if (version < 95) term_props[TPR_UNDERLINE_RGB].tpr_status = TPR_YES; // Getting the cursor style is only supported properly by xterm since // version 279 (otherwise it returns 0x18). if (version < 279) term_props[TPR_CURSOR_STYLE].tpr_status = TPR_NO; /* * Take action on the detected properties. */ // Unless the underline RGB color is expected to work, disable "t_8u". // It does not work for the real Xterm, it resets the background color. // This may cause some flicker. Alternative would be to set "t_8u" // here if the terminal is expected to support it, but that might // conflict with what was set in the .vimrc. if (term_props[TPR_UNDERLINE_RGB].tpr_status != TPR_YES && *T_8U != NUL && !option_was_set((char_u *)"t_8u")) { set_string_option_direct((char_u *)"t_8u", -1, (char_u *)"", OPT_FREE, 0); } if (*T_8U != NUL && write_t_8u_state == MAYBE) // Did skip writing t_8u, a complete redraw is needed. redraw_later_clear(); write_t_8u_state = OK; // can output t_8u now // Only set 'ttymouse' automatically if it was not set // by the user already. if (!option_was_set((char_u *)"ttym") && (term_props[TPR_MOUSE].tpr_status == TPR_MOUSE_XTERM2 || term_props[TPR_MOUSE].tpr_status == TPR_MOUSE_SGR)) { set_option_value_give_err((char_u *)"ttym", 0L, term_props[TPR_MOUSE].tpr_status == TPR_MOUSE_SGR ? (char_u *)"sgr" : (char_u *)"xterm2", 0); } // Only request the cursor style if t_SH and t_RS are // set. Only supported properly by xterm since version // 279 (otherwise it returns 0x18). // Only when getting the cursor style was detected to work. // Not for Terminal.app, it can't handle t_RS, it // echoes the characters to the screen. if (rcs_status.tr_progress == STATUS_GET && term_props[TPR_CURSOR_STYLE].tpr_status == TPR_YES && *T_CSH != NUL && *T_CRS != NUL) { MAY_WANT_TO_LOG_THIS; LOG_TR(("Sending cursor style request")); out_str(T_CRS); termrequest_sent(&rcs_status); need_flush = TRUE; } // Only request the cursor blink mode if t_RC set. Not // for Gnome terminal, it can't handle t_RC, it // echoes the characters to the screen. // Only when getting the cursor style was detected to work. if (rbm_status.tr_progress == STATUS_GET && term_props[TPR_CURSOR_BLINK].tpr_status == TPR_YES && *T_CRC != NUL) { MAY_WANT_TO_LOG_THIS; LOG_TR(("Sending cursor blink mode request")); out_str(T_CRC); termrequest_sent(&rbm_status); need_flush = TRUE; } if (need_flush) out_flush(); } }
0
390,565
_XkbSetNames(ClientPtr client, DeviceIntPtr dev, xkbSetNamesReq *stuff) { XkbDescRec *xkb; XkbNamesRec *names; CARD32 *tmp; xkbNamesNotify nn; tmp = (CARD32 *)&stuff[1]; xkb = dev->key->xkbInfo->desc; names = xkb->names; if (XkbAllocNames(xkb,stuff->which,stuff->nRadioGroups, stuff->nKeyAliases)!=Success) { return BadAlloc; } bzero(&nn,sizeof(xkbNamesNotify)); nn.changed= stuff->which; tmp = (CARD32 *)&stuff[1]; if (stuff->which&XkbKeycodesNameMask) names->keycodes= *tmp++; if (stuff->which&XkbGeometryNameMask) names->geometry= *tmp++; if (stuff->which&XkbSymbolsNameMask) names->symbols= *tmp++; if (stuff->which&XkbPhysSymbolsNameMask) names->phys_symbols= *tmp++; if (stuff->which&XkbTypesNameMask) names->types= *tmp++; if (stuff->which&XkbCompatNameMask) names->compat= *tmp++; if ((stuff->which&XkbKeyTypeNamesMask)&&(stuff->nTypes>0)) { register unsigned i; register XkbKeyTypePtr type; type= &xkb->map->types[stuff->firstType]; for (i=0;i<stuff->nTypes;i++,type++) { type->name= *tmp++; } nn.firstType= stuff->firstType; nn.nTypes= stuff->nTypes; } if (stuff->which&XkbKTLevelNamesMask) { register XkbKeyTypePtr type; register unsigned i; CARD8 *width; width = (CARD8 *)tmp; tmp= (CARD32 *)(((char *)tmp)+XkbPaddedSize(stuff->nKTLevels)); type= &xkb->map->types[stuff->firstKTLevel]; for (i=0;i<stuff->nKTLevels;i++,type++) { if (width[i]>0) { if (type->level_names) { register unsigned n; for (n=0;n<width[i];n++) { type->level_names[n]= tmp[n]; } } tmp+= width[i]; } } nn.firstLevelName= 0; nn.nLevelNames= stuff->nTypes; } if (stuff->which&XkbIndicatorNamesMask) { tmp= _XkbCopyMaskedAtoms(tmp,names->indicators,XkbNumIndicators, stuff->indicators); nn.changedIndicators= stuff->indicators; } if (stuff->which&XkbVirtualModNamesMask) { tmp= _XkbCopyMaskedAtoms(tmp,names->vmods,XkbNumVirtualMods, stuff->virtualMods); nn.changedVirtualMods= stuff->virtualMods; } if (stuff->which&XkbGroupNamesMask) { tmp= _XkbCopyMaskedAtoms(tmp,names->groups,XkbNumKbdGroups, stuff->groupNames); nn.changedVirtualMods= stuff->groupNames; } if (stuff->which&XkbKeyNamesMask) { memcpy((char*)&names->keys[stuff->firstKey],(char *)tmp, stuff->nKeys*XkbKeyNameLength); tmp+= stuff->nKeys; nn.firstKey= stuff->firstKey; nn.nKeys= stuff->nKeys; } if (stuff->which&XkbKeyAliasesMask) { if (stuff->nKeyAliases>0) { register int na= stuff->nKeyAliases; if (XkbAllocNames(xkb,XkbKeyAliasesMask,0,na)!=Success) return BadAlloc; memcpy((char *)names->key_aliases,(char *)tmp, stuff->nKeyAliases*sizeof(XkbKeyAliasRec)); tmp+= stuff->nKeyAliases*2; } else if (names->key_aliases!=NULL) { _XkbFree(names->key_aliases); names->key_aliases= NULL; names->num_key_aliases= 0; } nn.nAliases= names->num_key_aliases; } if (stuff->which&XkbRGNamesMask) { if (stuff->nRadioGroups>0) { register unsigned i,nrg; nrg= stuff->nRadioGroups; if (XkbAllocNames(xkb,XkbRGNamesMask,nrg,0)!=Success) return BadAlloc; for (i=0;i<stuff->nRadioGroups;i++) { names->radio_groups[i]= tmp[i]; } tmp+= stuff->nRadioGroups; } else if (names->radio_groups) { _XkbFree(names->radio_groups); names->radio_groups= NULL; names->num_rg= 0; } nn.nRadioGroups= names->num_rg; } if (nn.changed) { Bool needExtEvent; needExtEvent= (nn.changed&XkbIndicatorNamesMask)!=0; XkbSendNamesNotify(dev,&nn); if (needExtEvent) { XkbSrvLedInfoPtr sli; xkbExtensionDeviceNotify edev; register int i; register unsigned bit; sli= XkbFindSrvLedInfo(dev,XkbDfltXIClass,XkbDfltXIId, XkbXI_IndicatorsMask); sli->namesPresent= 0; for (i=0,bit=1;i<XkbNumIndicators;i++,bit<<=1) { if (names->indicators[i]!=None) sli->namesPresent|= bit; } bzero(&edev,sizeof(xkbExtensionDeviceNotify)); edev.reason= XkbXI_IndicatorNamesMask; edev.ledClass= KbdFeedbackClass; edev.ledID= dev->kbdfeed->ctrl.id; edev.ledsDefined= sli->namesPresent|sli->mapsPresent; edev.ledState= sli->effectiveState; edev.firstBtn= 0; edev.nBtns= 0; edev.supported= XkbXI_AllFeaturesMask; edev.unsupported= 0; XkbSendExtensionDeviceNotify(dev,client,&edev); } } return Success; }
0
281,090
static unsigned long xfrm_new_hash_mask(unsigned int old_hmask) { return ((old_hmask + 1) << 1) - 1; }
0
293,504
ins_compl_add_tv(typval_T *tv, int dir, int fast) { char_u *word; int dup = FALSE; int empty = FALSE; int flags = fast ? CP_FAST : 0; char_u *(cptext[CPT_COUNT]); typval_T user_data; int status; user_data.v_type = VAR_UNKNOWN; if (tv->v_type == VAR_DICT && tv->vval.v_dict != NULL) { word = dict_get_string(tv->vval.v_dict, (char_u *)"word", FALSE); cptext[CPT_ABBR] = dict_get_string(tv->vval.v_dict, (char_u *)"abbr", FALSE); cptext[CPT_MENU] = dict_get_string(tv->vval.v_dict, (char_u *)"menu", FALSE); cptext[CPT_KIND] = dict_get_string(tv->vval.v_dict, (char_u *)"kind", FALSE); cptext[CPT_INFO] = dict_get_string(tv->vval.v_dict, (char_u *)"info", FALSE); dict_get_tv(tv->vval.v_dict, (char_u *)"user_data", &user_data); if (dict_get_string(tv->vval.v_dict, (char_u *)"icase", FALSE) != NULL && dict_get_number(tv->vval.v_dict, (char_u *)"icase")) flags |= CP_ICASE; if (dict_get_string(tv->vval.v_dict, (char_u *)"dup", FALSE) != NULL) dup = dict_get_number(tv->vval.v_dict, (char_u *)"dup"); if (dict_get_string(tv->vval.v_dict, (char_u *)"empty", FALSE) != NULL) empty = dict_get_number(tv->vval.v_dict, (char_u *)"empty"); if (dict_get_string(tv->vval.v_dict, (char_u *)"equal", FALSE) != NULL && dict_get_number(tv->vval.v_dict, (char_u *)"equal")) flags |= CP_EQUAL; } else { word = tv_get_string_chk(tv); CLEAR_FIELD(cptext); } if (word == NULL || (!empty && *word == NUL)) { clear_tv(&user_data); return FAIL; } status = ins_compl_add(word, -1, NULL, cptext, &user_data, dir, flags, dup); if (status != OK) clear_tv(&user_data); return status; }
0
442,567
void memslot_info_del_slot(RedMemSlotInfo *info, uint32_t slot_group_id, uint32_t slot_id) { spice_return_if_fail(info->num_memslots_groups > slot_group_id); spice_return_if_fail(info->num_memslots > slot_id); info->mem_slots[slot_group_id][slot_id].virt_start_addr = 0; info->mem_slots[slot_group_id][slot_id].virt_end_addr = 0; }
0
345,222
static int con_unify_unimap(struct vc_data *conp, struct uni_pagedir *p) { int i, j, k; struct uni_pagedir *q; for (i = 0; i < MAX_NR_CONSOLES; i++) { if (!vc_cons_allocated(i)) continue; q = *vc_cons[i].d->vc_uni_pagedir_loc; if (!q || q == p || q->sum != p->sum) continue; for (j = 0; j < 32; j++) { u16 **p1, **q1; p1 = p->uni_pgdir[j]; q1 = q->uni_pgdir[j]; if (!p1 && !q1) continue; if (!p1 || !q1) break; for (k = 0; k < 32; k++) { if (!p1[k] && !q1[k]) continue; if (!p1[k] || !q1[k]) break; if (memcmp(p1[k], q1[k], 64*sizeof(u16))) break; } if (k < 32) break; } if (j == 32) { q->refcount++; *conp->vc_uni_pagedir_loc = q; con_release_unimap(p); kfree(p); return 1; } } return 0; }
0
294,687
c_valid_julian_p(int y, int m, int d, int *rm, int *rd) { int last; if (m < 0) m += 13; if (m < 1 || m > 12) return 0; last = c_julian_last_day_of_month(y, m); if (d < 0) d = last + d + 1; if (d < 1 || d > last) return 0; *rm = m; *rd = d; return 1; }
0
249,983
static GF_Err WriteInterleaved(MovieWriter *mw, GF_BitStream *bs, Bool drift_inter) { GF_Err e; u32 i; s32 moov_meta_pos=-1; GF_Box *a, *cprt_box=NULL; u64 firstSize, finalSize, offset, finalOffset; GF_List *writers = gf_list_new(); GF_ISOFile *movie = mw->movie; //first setup the writers e = SetupWriters(mw, writers, 1); if (e) goto exit; if (movie->is_jp2) { gf_bs_write_u32(bs, 12); gf_bs_write_u32(bs, GF_ISOM_BOX_TYPE_JP); gf_bs_write_u32(bs, 0x0D0A870A); } if (movie->brand) { e = gf_isom_box_size((GF_Box *)movie->brand); if (e) goto exit; e = gf_isom_box_write((GF_Box *)movie->brand, bs); if (e) goto exit; } if (movie->pdin) { e = gf_isom_box_size((GF_Box *)movie->pdin); if (e) goto exit; e = gf_isom_box_write((GF_Box *)movie->pdin, bs); if (e) goto exit; } //write all boxes before moov i=0; while ((a = (GF_Box*)gf_list_enum(movie->TopBoxes, &i))) { switch (a->type) { case GF_ISOM_BOX_TYPE_MOOV: case GF_ISOM_BOX_TYPE_META: moov_meta_pos = i-1; break; case GF_ISOM_BOX_TYPE_FTYP: case GF_ISOM_BOX_TYPE_PDIN: case GF_ISOM_BOX_TYPE_MDAT: break; case GF_ISOM_BOX_TYPE_FREE: //for backward compat with old arch, keep copyright before moov if (((GF_FreeSpaceBox*)a)->dataSize>4) { GF_FreeSpaceBox *fr = (GF_FreeSpaceBox*) a; if ((fr->dataSize>20) && !strncmp(fr->data, "IsoMedia File", 13)) { cprt_box = a; break; } } default: if (moov_meta_pos<0) { e = gf_isom_box_size(a); if (e) goto exit; e = gf_isom_box_write(a, bs); if (e) goto exit; } break; } } e = DoInterleave(mw, writers, bs, 1, gf_bs_get_position(bs), drift_inter); if (e) goto exit; firstSize = GetMoovAndMetaSize(movie, writers); offset = firstSize; if (movie->mdat && movie->mdat->dataSize) offset += 8 + (movie->mdat->dataSize > 0xFFFFFFFF ? 8 : 0); e = ShiftOffset(movie, writers, offset); if (e) goto exit; //get the size and see if it has changed (eg, we moved to 64 bit offsets) finalSize = GetMoovAndMetaSize(movie, writers); if (firstSize != finalSize) { finalOffset = finalSize; if (movie->mdat && movie->mdat->dataSize) finalOffset += 8 + (movie->mdat->dataSize > 0xFFFFFFFF ? 8 : 0); //OK, now we're sure about the final size -> shift the offsets //we don't need to re-emulate, as the only thing that changed is the offset //so just shift the offset e = ShiftOffset(movie, writers, finalOffset - offset); if (e) goto exit; /*firstSize = */GetMoovAndMetaSize(movie, writers); } //get real sample offsets for meta items if (movie->meta) { store_meta_item_sample_ref_offsets(movie, writers, movie->meta); } //now write our stuff e = WriteMoovAndMeta(movie, writers, bs); if (e) goto exit; /*we have 8 extra bytes for large size (not computed in gf_isom_box_size) */ if (movie->mdat && movie->mdat->dataSize) { if (movie->mdat->dataSize > 0xFFFFFFFF) movie->mdat->dataSize += 8; e = gf_isom_box_size((GF_Box *)movie->mdat); if (e) goto exit; e = gf_isom_box_write((GF_Box *)movie->mdat, bs); if (e) goto exit; } //we don't need the offset as we are writing... ResetWriters(writers); e = DoInterleave(mw, writers, bs, 0, 0, drift_inter); if (e) goto exit; //then the rest i=0; while ((a = (GF_Box*)gf_list_enum(movie->TopBoxes, &i))) { if ((i-1 < (u32) moov_meta_pos) && (a != cprt_box)) continue; switch (a->type) { case GF_ISOM_BOX_TYPE_MOOV: case GF_ISOM_BOX_TYPE_META: case GF_ISOM_BOX_TYPE_FTYP: case GF_ISOM_BOX_TYPE_PDIN: case GF_ISOM_BOX_TYPE_MDAT: break; default: e = gf_isom_box_size(a); if (e) goto exit; e = gf_isom_box_write(a, bs); if (e) goto exit; } } exit: CleanWriters(writers); gf_list_del(writers); return e; }
0
335,416
expand_sfile(char_u *arg) { char *errormsg; int len; char_u *result; char_u *newres; char_u *repl; int srclen; char_u *p; result = vim_strsave(arg); if (result == NULL) return NULL; for (p = result; *p; ) { if (STRNCMP(p, "<sfile>", 7) != 0) ++p; else { // replace "<sfile>" with the sourced file name, and do ":" stuff repl = eval_vars(p, result, &srclen, NULL, &errormsg, NULL, TRUE); if (errormsg != NULL) { if (*errormsg) emsg(errormsg); vim_free(result); return NULL; } if (repl == NULL) // no match (cannot happen) { p += srclen; continue; } len = (int)STRLEN(result) - srclen + (int)STRLEN(repl) + 1; newres = alloc(len); if (newres == NULL) { vim_free(repl); vim_free(result); return NULL; } mch_memmove(newres, result, (size_t)(p - result)); STRCPY(newres + (p - result), repl); len = (int)STRLEN(newres); STRCAT(newres, p + srclen); vim_free(repl); vim_free(result); result = newres; p = newres + len; // continue after the match } } return result; }
0
326,590
archive_write_disk_set_user_lookup(struct archive *_a, void *private_data, int64_t (*lookup_uid)(void *private, const char *uname, int64_t uid), void (*cleanup_uid)(void *private)) { struct archive_write_disk *a = (struct archive_write_disk *)_a; archive_check_magic(&a->archive, ARCHIVE_WRITE_DISK_MAGIC, ARCHIVE_STATE_ANY, "archive_write_disk_set_user_lookup"); if (a->cleanup_uid != NULL && a->lookup_uid_data != NULL) (a->cleanup_uid)(a->lookup_uid_data); a->lookup_uid = lookup_uid; a->cleanup_uid = cleanup_uid; a->lookup_uid_data = private_data; return (ARCHIVE_OK); }
0
236,134
GF_Err tsel_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; GF_TrackSelectionBox *ptr = (GF_TrackSelectionBox *) s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs,ptr->switchGroup); for (i = 0; i < ptr->attributeListCount; i++ ) { gf_bs_write_u32(bs, ptr->attributeList[i]); } return GF_OK; }
0
458,919
find_start_rawstring(int ind_maxcomment) // XXX { pos_T *pos; char_u *line; char_u *p; int cur_maxcomment = ind_maxcomment; for (;;) { pos = findmatchlimit(NULL, 'R', FM_BACKWARD, cur_maxcomment); if (pos == NULL) break; // Check if the raw string start we found is inside a string. // If it is then restrict the search to below this line and try again. line = ml_get(pos->lnum); for (p = line; *p && (colnr_T)(p - line) < pos->col; ++p) p = skip_string(p); if ((colnr_T)(p - line) <= pos->col) break; cur_maxcomment = curwin->w_cursor.lnum - pos->lnum - 1; if (cur_maxcomment <= 0) { pos = NULL; break; } } return pos; }
0
413,593
static int __addrs_cmp(void *_a, void *_b) { ut64 a = r_num_get (NULL, _a); ut64 b = r_num_get (NULL, _b); if (a > b) { return 1; } if (a < b) { return -1; } return 0; }
0
359,394
community_list_show (struct vty *vty, struct community_list *list) { struct community_entry *entry; for (entry = list->head; entry; entry = entry->next) { if (entry == list->head) { if (all_digit (list->name)) vty_out (vty, "Community %s list %s%s", entry->style == COMMUNITY_LIST_STANDARD ? "standard" : "(expanded) access", list->name, VTY_NEWLINE); else vty_out (vty, "Named Community %s list %s%s", entry->style == COMMUNITY_LIST_STANDARD ? "standard" : "expanded", list->name, VTY_NEWLINE); } if (entry->any) vty_out (vty, " %s%s", community_direct_str (entry->direct), VTY_NEWLINE); else vty_out (vty, " %s %s%s", community_direct_str (entry->direct), entry->style == COMMUNITY_LIST_STANDARD ? community_str (entry->u.com) : entry->config, VTY_NEWLINE); } }
0
413,594
static char *get_title(ut64 addr) { return r_str_newf ("0x%"PFMT64x, addr); }
0
346,465
get_new_scriptitem(int *error) { static scid_T last_current_SID = 0; int sid = ++last_current_SID; scriptitem_T *si = NULL; if (ga_grow(&script_items, (int)(sid - script_items.ga_len)) == FAIL) { *error = FAIL; return sid; } while (script_items.ga_len < sid) { si = ALLOC_CLEAR_ONE(scriptitem_T); if (si == NULL) { *error = FAIL; return sid; } ++script_items.ga_len; SCRIPT_ITEM(script_items.ga_len) = si; si->sn_name = NULL; si->sn_version = 1; // Allocate the local script variables to use for this script. new_script_vars(script_items.ga_len); ga_init2(&si->sn_var_vals, sizeof(svar_T), 10); hash_init(&si->sn_all_vars.dv_hashtab); ga_init2(&si->sn_imports, sizeof(imported_T), 10); ga_init2(&si->sn_type_list, sizeof(type_T), 10); # ifdef FEAT_PROFILE si->sn_prof_on = FALSE; # endif } // "si" can't be NULL, check only to avoid a compiler warning if (si != NULL) // Used to check script variable index is still valid. si->sn_script_seq = current_sctx.sc_seq; return sid; }
0
264,236
static int vnc_update_client(VncState *vs, int has_dirty, bool sync) { if (vs->need_update && vs->csock != -1) { VncDisplay *vd = vs->vd; VncJob *job; int y; int height, width; int n = 0; if (vs->output.offset && !vs->audio_cap && !vs->force_update) /* kernel send buffers are full -> drop frames to throttle */ return 0; if (!has_dirty && !vs->audio_cap && !vs->force_update) return 0; /* * Send screen updates to the vnc client using the server * surface and server dirty map. guest surface updates * happening in parallel don't disturb us, the next pass will * send them to the client. */ job = vnc_job_new(vs); height = MIN(pixman_image_get_height(vd->server), vs->client_height); width = MIN(pixman_image_get_width(vd->server), vs->client_width); y = 0; for (;;) { int x, h; unsigned long x2; unsigned long offset = find_next_bit((unsigned long *) &vs->dirty, height * VNC_DIRTY_BPL(vs), y * VNC_DIRTY_BPL(vs)); if (offset == height * VNC_DIRTY_BPL(vs)) { /* no more dirty bits */ break; } y = offset / VNC_DIRTY_BPL(vs); x = offset % VNC_DIRTY_BPL(vs); x2 = find_next_zero_bit((unsigned long *) &vs->dirty[y], VNC_DIRTY_BPL(vs), x); bitmap_clear(vs->dirty[y], x, x2 - x); h = find_and_clear_dirty_height(vs, y, x, x2, height); x2 = MIN(x2, width / VNC_DIRTY_PIXELS_PER_BIT); if (x2 > x) { n += vnc_job_add_rect(job, x * VNC_DIRTY_PIXELS_PER_BIT, y, (x2 - x) * VNC_DIRTY_PIXELS_PER_BIT, h); } } vnc_job_push(job); if (sync) { vnc_jobs_join(vs); } vs->force_update = 0; return n; } if (vs->csock == -1) { vnc_disconnect_finish(vs); } else if (sync) { vnc_jobs_join(vs); } return 0; }
0
368,800
gs_setdevice(gs_gstate * pgs, gx_device * dev) { int code = gs_setdevice_no_erase(pgs, dev); if (code == 1) code = gs_erasepage(pgs); return code; }
0
313,853
set_vcount_ca(cmdarg_T *cap, int *set_prevcount) { long count = cap->count0; // multiply with cap->opcount the same way as above if (cap->opcount != 0) count = cap->opcount * (count == 0 ? 1 : count); set_vcount(count, count == 0 ? 1 : count, *set_prevcount); *set_prevcount = FALSE; // only set v:prevcount once }
0
231,707
void triggerCryptoEvent() { onCryptoEventAvailable(); }
0
224,573
Status DepthwiseConv2DNativeShapeImpl(shape_inference::InferenceContext* c, bool supports_explicit_padding) { ShapeHandle input_shape; TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 4, &input_shape)); ShapeHandle filter_shape; TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 4, &filter_shape)); std::vector<int32> strides; TF_RETURN_IF_ERROR(c->GetAttr("strides", &strides)); if (strides.size() != 4) { return errors::InvalidArgument( "DepthwiseConv2D requires the stride attribute to contain 4 values, " "but got: ", strides.size()); } std::vector<int32> dilations; if (!c->GetAttr("dilations", &dilations).ok()) { dilations.resize(4, 1); } if (dilations.size() != 4) { return errors::InvalidArgument( "DepthwiseConv2D requires the dilations attribute to contain 4 values, " "but got: ", dilations.size()); } string data_format_str; Status s = c->GetAttr("data_format", &data_format_str); TensorFormat data_format; if (!s.ok() || !FormatFromString(data_format_str, &data_format)) { data_format = FORMAT_NHWC; } int32_t stride_rows; int32_t stride_cols; int32_t dilation_rows; int32_t dilation_cols; if (data_format == FORMAT_NCHW) { // Canonicalize input shape to NHWC so the shape inference code below can // process it. input_shape = c->MakeShape({{c->Dim(input_shape, 0), c->Dim(input_shape, 2), c->Dim(input_shape, 3), c->Dim(input_shape, 1)}}); stride_rows = strides[2]; stride_cols = strides[3]; dilation_rows = dilations[2]; dilation_cols = dilations[3]; } else { stride_rows = strides[1]; stride_cols = strides[2]; dilation_rows = dilations[1]; dilation_cols = dilations[2]; } DimensionHandle batch_size_dim = c->Dim(input_shape, 0); DimensionHandle in_rows_dim = c->Dim(input_shape, 1); DimensionHandle in_cols_dim = c->Dim(input_shape, 2); DimensionHandle filter_rows_dim = c->Dim(filter_shape, 0); DimensionHandle filter_cols_dim = c->Dim(filter_shape, 1); DimensionHandle input_depth = c->Dim(filter_shape, 2); DimensionHandle depth_multiplier = c->Dim(filter_shape, 3); // Check that the input depths are compatible. TF_RETURN_IF_ERROR( c->Merge(c->Dim(input_shape, 3), input_depth, &input_depth)); DimensionHandle output_depth; TF_RETURN_IF_ERROR(c->Multiply(input_depth, depth_multiplier, &output_depth)); Padding padding; TF_RETURN_IF_ERROR(c->GetAttr("padding", &padding)); std::vector<int64_t> explicit_paddings; if (supports_explicit_padding) { Status status = c->GetAttr("explicit_paddings", &explicit_paddings); // Use the default value, which is an empty list, if the attribute is not // found. Otherwise return the error to the caller. if (!status.ok() && !errors::IsNotFound(status)) { return status; } TF_RETURN_IF_ERROR(CheckValidPadding(padding, explicit_paddings, /*num_dims=*/4, data_format)); } else { DCHECK(padding != Padding::EXPLICIT); } // TODO(mrry,shlens): Raise an error if the stride would cause // information in the input to be ignored. This will require a change // in the kernel implementation. DimensionHandle output_rows, output_cols; int64_t pad_rows_before = -1, pad_rows_after = -1; int64_t pad_cols_before = -1, pad_cols_after = -1; if (padding == Padding::EXPLICIT) { GetExplicitPaddingForDim(explicit_paddings, data_format, 'H', &pad_rows_before, &pad_rows_after); GetExplicitPaddingForDim(explicit_paddings, data_format, 'W', &pad_cols_before, &pad_cols_after); } TF_RETURN_IF_ERROR(GetWindowedOutputSizeFromDimsV2( c, in_rows_dim, filter_rows_dim, dilation_rows, stride_rows, padding, pad_rows_before, pad_rows_after, &output_rows)); TF_RETURN_IF_ERROR(GetWindowedOutputSizeFromDimsV2( c, in_cols_dim, filter_cols_dim, dilation_cols, stride_cols, padding, pad_cols_before, pad_cols_after, &output_cols)); ShapeHandle output_shape; if (data_format == FORMAT_NCHW) { output_shape = c->MakeShape({batch_size_dim, output_depth, output_rows, output_cols}); } else { output_shape = c->MakeShape({batch_size_dim, output_rows, output_cols, output_depth}); } c->set_output(0, output_shape); return Status::OK(); }
0
436,161
static int __io_sqe_files_update(struct io_ring_ctx *ctx, struct io_uring_rsrc_update2 *up, unsigned nr_args) { u64 __user *tags = u64_to_user_ptr(up->tags); __s32 __user *fds = u64_to_user_ptr(up->data); struct io_rsrc_data *data = ctx->file_data; struct io_fixed_file *file_slot; struct file *file; int fd, i, err = 0; unsigned int done; bool needs_switch = false; if (!ctx->file_data) return -ENXIO; if (up->offset + nr_args > ctx->nr_user_files) return -EINVAL; for (done = 0; done < nr_args; done++) { u64 tag = 0; if ((tags && copy_from_user(&tag, &tags[done], sizeof(tag))) || copy_from_user(&fd, &fds[done], sizeof(fd))) { err = -EFAULT; break; } if ((fd == IORING_REGISTER_FILES_SKIP || fd == -1) && tag) { err = -EINVAL; break; } if (fd == IORING_REGISTER_FILES_SKIP) continue; i = array_index_nospec(up->offset + done, ctx->nr_user_files); file_slot = io_fixed_file_slot(&ctx->file_table, i); if (file_slot->file_ptr) { file = (struct file *)(file_slot->file_ptr & FFS_MASK); err = io_queue_rsrc_removal(data, up->offset + done, ctx->rsrc_node, file); if (err) break; file_slot->file_ptr = 0; needs_switch = true; } if (fd != -1) { file = fget(fd); if (!file) { err = -EBADF; break; } /* * Don't allow io_uring instances to be registered. If * UNIX isn't enabled, then this causes a reference * cycle and this instance can never get freed. If UNIX * is enabled we'll handle it just fine, but there's * still no point in allowing a ring fd as it doesn't * support regular read/write anyway. */ if (file->f_op == &io_uring_fops) { fput(file); err = -EBADF; break; } *io_get_tag_slot(data, up->offset + done) = tag; io_fixed_file_set(file_slot, file); err = io_sqe_file_register(ctx, file, i); if (err) { file_slot->file_ptr = 0; fput(file); break; } } } if (needs_switch) io_rsrc_node_switch(ctx, data); return done ? done : err;
0
291,815
static void fail_all_outstanding_reqs(struct rtrs_clt_path *clt_path) { struct rtrs_clt_sess *clt = clt_path->clt; struct rtrs_clt_io_req *req; int i, err; if (!clt_path->reqs) return; for (i = 0; i < clt_path->queue_depth; ++i) { req = &clt_path->reqs[i]; if (!req->in_use) continue; /* * Safely (without notification) complete failed request. * After completion this request is still useble and can * be failovered to another path. */ complete_rdma_req(req, -ECONNABORTED, false, true); err = rtrs_clt_failover_req(clt, req); if (err) /* Failover failed, notify anyway */ req->conf(req->priv, err); } }
0
90,829
StorageType type() const { return type_; }
0
317,189
static int selinux_add_mnt_opt(const char *option, const char *val, int len, void **mnt_opts) { int token = Opt_error; int rc, i; for (i = 0; i < ARRAY_SIZE(tokens); i++) { if (strcmp(option, tokens[i].name) == 0) { token = tokens[i].opt; break; } } if (token == Opt_error) return -EINVAL; if (token != Opt_seclabel) { val = kmemdup_nul(val, len, GFP_KERNEL); if (!val) { rc = -ENOMEM; goto free_opt; } } rc = selinux_add_opt(token, val, mnt_opts); if (unlikely(rc)) { kfree(val); goto free_opt; } return rc; free_opt: if (*mnt_opts) { selinux_free_mnt_opts(*mnt_opts); *mnt_opts = NULL; } return rc; }
0
508,791
void lex_unlock_plugins(LEX *lex) { DBUG_ENTER("lex_unlock_plugins"); /* release used plugins */ if (lex->plugins.elements) /* No function call and no mutex if no plugins. */ { plugin_unlock_list(0, (plugin_ref*)lex->plugins.buffer, lex->plugins.elements); } reset_dynamic(&lex->plugins); DBUG_VOID_RETURN; }
0
221,639
explicit SymbolicShapeOptimizationPass(bool constraints_only) { this->optimize_only_constraints = constraints_only; }
0
238,460
static void __mark_reg_unbounded(struct bpf_reg_state *reg) { reg->smin_value = S64_MIN; reg->smax_value = S64_MAX; reg->umin_value = 0; reg->umax_value = U64_MAX; reg->s32_min_value = S32_MIN; reg->s32_max_value = S32_MAX; reg->u32_min_value = 0; reg->u32_max_value = U32_MAX; }
0
221,687
bool Socket::breadyForOutput(int timeout) { //if (!isssl) { return BaseSocket::breadyForOutput(timeout); //} //return true; }
0
401,551
int del_timer_sync(struct timer_list *timer) { int ret; #ifdef CONFIG_LOCKDEP unsigned long flags; /* * If lockdep gives a backtrace here, please reference * the synchronization rules above. */ local_irq_save(flags); lock_map_acquire(&timer->lockdep_map); lock_map_release(&timer->lockdep_map); local_irq_restore(flags); #endif /* * don't use it in hardirq context, because it * could lead to deadlock. */ WARN_ON(in_irq() && !(timer->flags & TIMER_IRQSAFE)); do { ret = try_to_del_timer_sync(timer); if (unlikely(ret < 0)) { del_timer_wait_running(timer); cpu_relax(); } } while (ret < 0); return ret; }
0
215,262
static void gem_transmit(CadenceGEMState *s) { uint32_t desc[DESC_MAX_NUM_WORDS]; hwaddr packet_desc_addr; uint8_t *p; unsigned total_bytes; int q = 0; /* Do nothing if transmit is not enabled. */ if (!(s->regs[GEM_NWCTRL] & GEM_NWCTRL_TXENA)) { return; } DB_PRINT("\n"); /* The packet we will hand off to QEMU. * Packets scattered across multiple descriptors are gathered to this * one contiguous buffer first. */ p = s->tx_packet; total_bytes = 0; for (q = s->num_priority_queues - 1; q >= 0; q--) { /* read current descriptor */ packet_desc_addr = gem_get_tx_desc_addr(s, q); DB_PRINT("read descriptor 0x%" HWADDR_PRIx "\n", packet_desc_addr); address_space_read(&s->dma_as, packet_desc_addr, MEMTXATTRS_UNSPECIFIED, desc, sizeof(uint32_t) * gem_get_desc_len(s, false)); /* Handle all descriptors owned by hardware */ while (tx_desc_get_used(desc) == 0) { /* Do nothing if transmit is not enabled. */ if (!(s->regs[GEM_NWCTRL] & GEM_NWCTRL_TXENA)) { return; } print_gem_tx_desc(desc, q); /* The real hardware would eat this (and possibly crash). * For QEMU let's lend a helping hand. */ if ((tx_desc_get_buffer(s, desc) == 0) || (tx_desc_get_length(desc) == 0)) { DB_PRINT("Invalid TX descriptor @ 0x%" HWADDR_PRIx "\n", packet_desc_addr); break; } if (tx_desc_get_length(desc) > gem_get_max_buf_len(s, true) - (p - s->tx_packet)) { qemu_log_mask(LOG_GUEST_ERROR, "TX descriptor @ 0x%" \ HWADDR_PRIx " too large: size 0x%x space 0x%zx\n", packet_desc_addr, tx_desc_get_length(desc), gem_get_max_buf_len(s, true) - (p - s->tx_packet)); gem_set_isr(s, q, GEM_INT_AMBA_ERR); break; } /* Gather this fragment of the packet from "dma memory" to our * contig buffer. */ address_space_read(&s->dma_as, tx_desc_get_buffer(s, desc), MEMTXATTRS_UNSPECIFIED, p, tx_desc_get_length(desc)); p += tx_desc_get_length(desc); total_bytes += tx_desc_get_length(desc); /* Last descriptor for this packet; hand the whole thing off */ if (tx_desc_get_last(desc)) { uint32_t desc_first[DESC_MAX_NUM_WORDS]; hwaddr desc_addr = gem_get_tx_desc_addr(s, q); /* Modify the 1st descriptor of this packet to be owned by * the processor. */ address_space_read(&s->dma_as, desc_addr, MEMTXATTRS_UNSPECIFIED, desc_first, sizeof(desc_first)); tx_desc_set_used(desc_first); address_space_write(&s->dma_as, desc_addr, MEMTXATTRS_UNSPECIFIED, desc_first, sizeof(desc_first)); /* Advance the hardware current descriptor past this packet */ if (tx_desc_get_wrap(desc)) { s->tx_desc_addr[q] = gem_get_tx_queue_base_addr(s, q); } else { s->tx_desc_addr[q] = packet_desc_addr + 4 * gem_get_desc_len(s, false); } DB_PRINT("TX descriptor next: 0x%08x\n", s->tx_desc_addr[q]); s->regs[GEM_TXSTATUS] |= GEM_TXSTATUS_TXCMPL; gem_set_isr(s, q, GEM_INT_TXCMPL); /* Handle interrupt consequences */ gem_update_int_status(s); /* Is checksum offload enabled? */ if (s->regs[GEM_DMACFG] & GEM_DMACFG_TXCSUM_OFFL) { net_checksum_calculate(s->tx_packet, total_bytes, CSUM_ALL); } /* Update MAC statistics */ gem_transmit_updatestats(s, s->tx_packet, total_bytes); /* Send the packet somewhere */ if (s->phy_loop || (s->regs[GEM_NWCTRL] & GEM_NWCTRL_LOCALLOOP)) { gem_receive(qemu_get_queue(s->nic), s->tx_packet, total_bytes); } else { qemu_send_packet(qemu_get_queue(s->nic), s->tx_packet, total_bytes); } /* Prepare for next packet */ p = s->tx_packet; total_bytes = 0; } /* read next descriptor */ if (tx_desc_get_wrap(desc)) { if (s->regs[GEM_DMACFG] & GEM_DMACFG_ADDR_64B) { packet_desc_addr = s->regs[GEM_TBQPH]; packet_desc_addr <<= 32; } else { packet_desc_addr = 0; } packet_desc_addr |= gem_get_tx_queue_base_addr(s, q); } else { packet_desc_addr += 4 * gem_get_desc_len(s, false); } DB_PRINT("read descriptor 0x%" HWADDR_PRIx "\n", packet_desc_addr); address_space_read(&s->dma_as, packet_desc_addr, MEMTXATTRS_UNSPECIFIED, desc, sizeof(uint32_t) * gem_get_desc_len(s, false)); } if (tx_desc_get_used(desc)) { s->regs[GEM_TXSTATUS] |= GEM_TXSTATUS_USED; /* IRQ TXUSED is defined only for queue 0 */ if (q == 0) { gem_set_isr(s, 0, GEM_INT_TXUSED); } gem_update_int_status(s); } } }
1
364,733
parse_tag_line( char_u *lbuf, // line to be parsed #ifdef FEAT_EMACS_TAGS int is_etag, #endif tagptrs_T *tagp) { char_u *p; #ifdef FEAT_EMACS_TAGS if (is_etag) // emacs-style tag file return emacs_tags_parse_line(lbuf, tagp); #endif // Isolate the tagname, from lbuf up to the first white tagp->tagname = lbuf; p = vim_strchr(lbuf, TAB); if (p == NULL) return FAIL; tagp->tagname_end = p; // Isolate file name, from first to second white space if (*p != NUL) ++p; tagp->fname = p; p = vim_strchr(p, TAB); if (p == NULL) return FAIL; tagp->fname_end = p; // find start of search command, after second white space if (*p != NUL) ++p; if (*p == NUL) return FAIL; tagp->command = p; return OK; }
0
329,944
lerp8x4 (uint32_t src, uint8_t a, uint32_t dst) { return (add8x2_8x2 (mul8x2_8 (src, a), mul8x2_8 (dst, ~a)) | add8x2_8x2 (mul8x2_8 (src >> G_SHIFT, a), mul8x2_8 (dst >> G_SHIFT, ~a)) << G_SHIFT); }
0
273,917
static char *time_to_str(time_t mtime) { struct tm *t = localtime(&mtime); static char str[20]; setlocale(LC_TIME, "C"); strftime(str, sizeof(str), "%b %e %H:%M", t); return str; }
0
196,316
int digest_generic_verify(struct digest *d, const unsigned char *md) { int ret; int len = digest_length(d); unsigned char *tmp; tmp = xmalloc(len); ret = digest_final(d, tmp); if (ret) goto end; ret = memcmp(md, tmp, len); ret = ret ? -EINVAL : 0; end: free(tmp); return ret; }
1
365,761
static int sixpack_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { struct sixpack *sp = sp_get(tty); struct net_device *dev; unsigned int tmp, err; if (!sp) return -ENXIO; dev = sp->dev; switch(cmd) { case SIOCGIFNAME: err = copy_to_user((void __user *) arg, dev->name, strlen(dev->name) + 1) ? -EFAULT : 0; break; case SIOCGIFENCAP: err = put_user(0, (int __user *) arg); break; case SIOCSIFENCAP: if (get_user(tmp, (int __user *) arg)) { err = -EFAULT; break; } sp->mode = tmp; dev->addr_len = AX25_ADDR_LEN; dev->hard_header_len = AX25_KISS_HEADER_LEN + AX25_MAX_HEADER_LEN + 3; dev->type = ARPHRD_AX25; err = 0; break; case SIOCSIFHWADDR: { char addr[AX25_ADDR_LEN]; if (copy_from_user(&addr, (void __user *)arg, AX25_ADDR_LEN)) { err = -EFAULT; break; } netif_tx_lock_bh(dev); __dev_addr_set(dev, &addr, AX25_ADDR_LEN); netif_tx_unlock_bh(dev); err = 0; break; } default: err = tty_mode_ioctl(tty, cmd, arg); } sp_put(sp); return err; }
0
498,099
void cgit_tree_link(const char *name, const char *title, const char *class, const char *head, const char *rev, const char *path) { reporevlink("tree", name, title, class, head, rev, path); }
0
326,616
archive_write_disk_vtable(void) { static struct archive_vtable av; static int inited = 0; if (!inited) { av.archive_close = _archive_write_disk_close; av.archive_filter_bytes = _archive_write_disk_filter_bytes; av.archive_free = _archive_write_disk_free; av.archive_write_header = _archive_write_disk_header; av.archive_write_finish_entry = _archive_write_disk_finish_entry; av.archive_write_data = _archive_write_disk_data; av.archive_write_data_block = _archive_write_disk_data_block; inited = 1; } return (&av); }
0
196,889
int pgpPrtParams(const uint8_t * pkts, size_t pktlen, unsigned int pkttype, pgpDigParams * ret) { const uint8_t *p = pkts; const uint8_t *pend = pkts + pktlen; pgpDigParams digp = NULL; struct pgpPkt pkt; int rc = -1; /* assume failure */ while (p < pend) { if (decodePkt(p, (pend - p), &pkt)) break; if (digp == NULL) { if (pkttype && pkt.tag != pkttype) { break; } else { digp = pgpDigParamsNew(pkt.tag); } } if (pgpPrtPkt(&pkt, digp)) break; p += (pkt.body - pkt.head) + pkt.blen; if (pkttype == PGPTAG_SIGNATURE) break; } rc = (digp && (p == pend)) ? 0 : -1; if (ret && rc == 0) { *ret = digp; } else { pgpDigParamsFree(digp); } return rc; }
1
301,014
pcx256_print_page(gx_device_printer * pdev, gp_file * file) { pcx_header header; int code; header = pcx_header_prototype; header.version = version_3_0; header.bpp = 8; header.nplanes = 1; assign_ushort(header.palinfo, (pdev->color_info.num_components > 1 ? palinfo_color : palinfo_gray)); code = pcx_write_page(pdev, file, &header, false); if (code >= 0) { /* Write out the palette. */ gp_fputc(0x0c, file); code = pc_write_palette((gx_device *) pdev, 256, file); } return code; }
0
261,406
bool advanceCtbAddr(thread_context* tctx) { tctx->CtbAddrInTS++; return setCtbAddrFromTS(tctx); }
0
195,230
void pjmedia_rtcp_xr_rx_rtcp_xr( pjmedia_rtcp_xr_session *sess, const void *pkt, pj_size_t size) { const pjmedia_rtcp_xr_pkt *rtcp_xr = (pjmedia_rtcp_xr_pkt*) pkt; const pjmedia_rtcp_xr_rb_rr_time *rb_rr_time = NULL; const pjmedia_rtcp_xr_rb_dlrr *rb_dlrr = NULL; const pjmedia_rtcp_xr_rb_stats *rb_stats = NULL; const pjmedia_rtcp_xr_rb_voip_mtc *rb_voip_mtc = NULL; const pjmedia_rtcp_xr_rb_header *rb_hdr = (pjmedia_rtcp_xr_rb_header*) rtcp_xr->buf; unsigned pkt_len, rb_len; if (rtcp_xr->common.pt != RTCP_XR) return; pkt_len = pj_ntohs((pj_uint16_t)rtcp_xr->common.length); if ((pkt_len + 1) > (size / 4)) return; /* Parse report rpt_types */ while ((pj_int32_t*)rb_hdr < (pj_int32_t*)pkt + pkt_len) { rb_len = pj_ntohs((pj_uint16_t)rb_hdr->length); /* Just skip any block with length == 0 (no report content) */ if (rb_len) { switch (rb_hdr->bt) { case BT_RR_TIME: rb_rr_time = (pjmedia_rtcp_xr_rb_rr_time*) rb_hdr; break; case BT_DLRR: rb_dlrr = (pjmedia_rtcp_xr_rb_dlrr*) rb_hdr; break; case BT_STATS: rb_stats = (pjmedia_rtcp_xr_rb_stats*) rb_hdr; break; case BT_VOIP_METRICS: rb_voip_mtc = (pjmedia_rtcp_xr_rb_voip_mtc*) rb_hdr; break; default: break; } } rb_hdr = (pjmedia_rtcp_xr_rb_header*) ((pj_int32_t*)rb_hdr + rb_len + 1); } /* Receiving RR Time */ if (rb_rr_time) { /* Save LRR from NTP timestamp of the RR time block report */ sess->rx_lrr = ((pj_ntohl(rb_rr_time->ntp_sec) & 0x0000FFFF) << 16) | ((pj_ntohl(rb_rr_time->ntp_frac) >> 16) & 0xFFFF); /* Calculate RR arrival time for DLRR */ pj_get_timestamp(&sess->rx_lrr_time); TRACE_((sess->name, "Rx RTCP SR: ntp_ts=%p", sess->rx_lrr, (pj_uint32_t)(sess->rx_lrr_time.u64*65536/ sess->rtcp_session->ts_freq.u64))); } /* Receiving DLRR */ if (rb_dlrr) { pj_uint32_t lrr, now, dlrr; pj_uint64_t eedelay; pjmedia_rtcp_ntp_rec ntp; /* LRR is the middle 32bit of NTP. It has 1/65536 second * resolution */ lrr = pj_ntohl(rb_dlrr->item.lrr); /* DLRR is delay since LRR, also in 1/65536 resolution */ dlrr = pj_ntohl(rb_dlrr->item.dlrr); /* Get current time, and convert to 1/65536 resolution */ pjmedia_rtcp_get_ntp_time(sess->rtcp_session, &ntp); now = ((ntp.hi & 0xFFFF) << 16) + (ntp.lo >> 16); /* End-to-end delay is (now-lrr-dlrr) */ eedelay = now - lrr - dlrr; /* Convert end to end delay to usec (keeping the calculation in * 64bit space):: * sess->ee_delay = (eedelay * 1000) / 65536; */ if (eedelay < 4294) { eedelay = (eedelay * 1000000) >> 16; } else { eedelay = (eedelay * 1000) >> 16; eedelay *= 1000; } TRACE_((sess->name, "Rx RTCP XR DLRR: lrr=%p, dlrr=%p (%d:%03dms), " "now=%p, rtt=%p", lrr, dlrr, dlrr/65536, (dlrr%65536)*1000/65536, now, (pj_uint32_t)eedelay)); /* Only save calculation if "now" is greater than lrr, or * otherwise rtt will be invalid */ if (now-dlrr >= lrr) { unsigned rtt = (pj_uint32_t)eedelay; /* Check that eedelay value really makes sense. * We allow up to 30 seconds RTT! */ if (eedelay <= 30 * 1000 * 1000UL) { /* "Normalize" rtt value that is exceptionally high. * For such values, "normalize" the rtt to be three times * the average value. */ if (rtt>((unsigned)sess->stat.rtt.mean*3) && sess->stat.rtt.n!=0) { unsigned orig_rtt = rtt; rtt = (unsigned)sess->stat.rtt.mean*3; PJ_LOG(5,(sess->name, "RTT value %d usec is normalized to %d usec", orig_rtt, rtt)); } TRACE_((sess->name, "RTCP RTT is set to %d usec", rtt)); pj_math_stat_update(&sess->stat.rtt, rtt); } } else { PJ_LOG(5, (sess->name, "Internal RTCP NTP clock skew detected: " "lrr=%p, now=%p, dlrr=%p (%d:%03dms), " "diff=%d", lrr, now, dlrr, dlrr/65536, (dlrr%65536)*1000/65536, dlrr-(now-lrr))); } } /* Receiving Statistics Summary */ if (rb_stats) { pj_uint8_t flags = rb_stats->header.specific; pj_bzero(&sess->stat.tx.stat_sum, sizeof(sess->stat.tx.stat_sum)); /* Range of packets sequence reported in this blocks */ sess->stat.tx.stat_sum.begin_seq = pj_ntohs(rb_stats->begin_seq); sess->stat.tx.stat_sum.end_seq = pj_ntohs(rb_stats->end_seq); /* Get flags of valid fields */ sess->stat.tx.stat_sum.l = (flags & (1 << 7)) != 0; sess->stat.tx.stat_sum.d = (flags & (1 << 6)) != 0; sess->stat.tx.stat_sum.j = (flags & (1 << 5)) != 0; sess->stat.tx.stat_sum.t = (flags & (3 << 3)) != 0; /* Fetch the reports info */ if (sess->stat.tx.stat_sum.l) { sess->stat.tx.stat_sum.lost = pj_ntohl(rb_stats->lost); } if (sess->stat.tx.stat_sum.d) { sess->stat.tx.stat_sum.dup = pj_ntohl(rb_stats->dup); } if (sess->stat.tx.stat_sum.j) { sess->stat.tx.stat_sum.jitter.min = pj_ntohl(rb_stats->jitter_min); sess->stat.tx.stat_sum.jitter.max = pj_ntohl(rb_stats->jitter_max); sess->stat.tx.stat_sum.jitter.mean= pj_ntohl(rb_stats->jitter_mean); pj_math_stat_set_stddev(&sess->stat.tx.stat_sum.jitter, pj_ntohl(rb_stats->jitter_dev)); } if (sess->stat.tx.stat_sum.t) { sess->stat.tx.stat_sum.toh.min = rb_stats->toh_min; sess->stat.tx.stat_sum.toh.max = rb_stats->toh_max; sess->stat.tx.stat_sum.toh.mean= rb_stats->toh_mean; pj_math_stat_set_stddev(&sess->stat.tx.stat_sum.toh, pj_ntohl(rb_stats->toh_dev)); } pj_gettimeofday(&sess->stat.tx.stat_sum.update); } /* Receiving VoIP Metrics */ if (rb_voip_mtc) { sess->stat.tx.voip_mtc.loss_rate = rb_voip_mtc->loss_rate; sess->stat.tx.voip_mtc.discard_rate = rb_voip_mtc->discard_rate; sess->stat.tx.voip_mtc.burst_den = rb_voip_mtc->burst_den; sess->stat.tx.voip_mtc.gap_den = rb_voip_mtc->gap_den; sess->stat.tx.voip_mtc.burst_dur = pj_ntohs(rb_voip_mtc->burst_dur); sess->stat.tx.voip_mtc.gap_dur = pj_ntohs(rb_voip_mtc->gap_dur); sess->stat.tx.voip_mtc.rnd_trip_delay = pj_ntohs(rb_voip_mtc->rnd_trip_delay); sess->stat.tx.voip_mtc.end_sys_delay = pj_ntohs(rb_voip_mtc->end_sys_delay); /* signal & noise level encoded in two's complement form */ sess->stat.tx.voip_mtc.signal_lvl = (pj_int8_t) ((rb_voip_mtc->signal_lvl > 127)? ((int)rb_voip_mtc->signal_lvl - 256) : rb_voip_mtc->signal_lvl); sess->stat.tx.voip_mtc.noise_lvl = (pj_int8_t) ((rb_voip_mtc->noise_lvl > 127)? ((int)rb_voip_mtc->noise_lvl - 256) : rb_voip_mtc->noise_lvl); sess->stat.tx.voip_mtc.rerl = rb_voip_mtc->rerl; sess->stat.tx.voip_mtc.gmin = rb_voip_mtc->gmin; sess->stat.tx.voip_mtc.r_factor = rb_voip_mtc->r_factor; sess->stat.tx.voip_mtc.ext_r_factor = rb_voip_mtc->ext_r_factor; sess->stat.tx.voip_mtc.mos_lq = rb_voip_mtc->mos_lq; sess->stat.tx.voip_mtc.mos_cq = rb_voip_mtc->mos_cq; sess->stat.tx.voip_mtc.rx_config = rb_voip_mtc->rx_config; sess->stat.tx.voip_mtc.jb_nom = pj_ntohs(rb_voip_mtc->jb_nom); sess->stat.tx.voip_mtc.jb_max = pj_ntohs(rb_voip_mtc->jb_max); sess->stat.tx.voip_mtc.jb_abs_max = pj_ntohs(rb_voip_mtc->jb_abs_max); pj_gettimeofday(&sess->stat.tx.voip_mtc.update); } }
1
225,109
bool IsSubsetOf(const T& sub, const T& super) { for (const auto& o : sub) { bool found = false; for (const auto& n : super) { if (o == n) { found = true; break; } } if (!found) return false; } return true; }
0
427,797
void sev_es_create_vcpu(struct vcpu_svm *svm) { /* * Set the GHCB MSR value as per the GHCB specification when creating * a vCPU for an SEV-ES guest. */ set_ghcb_msr(svm, GHCB_MSR_SEV_INFO(GHCB_VERSION_MAX, GHCB_VERSION_MIN, sev_enc_bit)); }
0
333,093
append(Ptrlist *l1, Ptrlist *l2) { Ptrlist *oldl1; oldl1 = l1; while (l1->next) l1 = l1->next; l1->next = l2; return oldl1; }
0
466,140
static int em_idiv_ex(struct x86_emulate_ctxt *ctxt) { u8 de = 0; emulate_1op_rax_rdx(ctxt, "idiv", de); if (de) return emulate_de(ctxt); return X86EMUL_CONTINUE; }
0
344,739
parse_absolute_time(const char *s, uint64_t *tp) { struct tm tm; time_t tt; char buf[32], *fmt; *tp = 0; /* * POSIX strptime says "The application shall ensure that there * is white-space or other non-alphanumeric characters between * any two conversion specifications" so arrange things this way. */ switch (strlen(s)) { case 8: /* YYYYMMDD */ fmt = "%Y-%m-%d"; snprintf(buf, sizeof(buf), "%.4s-%.2s-%.2s", s, s + 4, s + 6); break; case 12: /* YYYYMMDDHHMM */ fmt = "%Y-%m-%dT%H:%M"; snprintf(buf, sizeof(buf), "%.4s-%.2s-%.2sT%.2s:%.2s", s, s + 4, s + 6, s + 8, s + 10); break; case 14: /* YYYYMMDDHHMMSS */ fmt = "%Y-%m-%dT%H:%M:%S"; snprintf(buf, sizeof(buf), "%.4s-%.2s-%.2sT%.2s:%.2s:%.2s", s, s + 4, s + 6, s + 8, s + 10, s + 12); break; default: return SSH_ERR_INVALID_FORMAT; } memset(&tm, 0, sizeof(tm)); if (strptime(buf, fmt, &tm) == NULL) return SSH_ERR_INVALID_FORMAT; if ((tt = mktime(&tm)) < 0) return SSH_ERR_INVALID_FORMAT; /* success */ *tp = (uint64_t)tt; return 0; }
0
225,787
} static u32 ctrn_ctts_to_index(GF_TrackFragmentRunBox *ctrn, s32 ctts) { if (!(ctrn->flags & GF_ISOM_TRUN_CTS_OFFSET)) return 0; if (!ctts) return 0; if (ctrn->version) { if (ctrn->ctso_multiplier) return ctrn_s32_to_index(ctts / ctrn->ctso_multiplier); return ctrn_s32_to_index(ctts); } assert(ctts>0); if (ctrn->ctso_multiplier) return ctrn_u32_to_index((u32)ctts / ctrn->ctso_multiplier); return ctrn_s32_to_index((u32)ctts);
0
276,996
fiber_resume(mrb_state *mrb, mrb_value self) { const mrb_value *a; mrb_int len; mrb_bool vmexec = FALSE; mrb_get_args(mrb, "*!", &a, &len); if (mrb->c->ci->cci > 0) { vmexec = TRUE; } return fiber_switch(mrb, self, len, a, TRUE, vmexec); }
0
238,441
static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx, int *insn_idx, bool pop_log) { struct bpf_verifier_state *cur = env->cur_state; struct bpf_verifier_stack_elem *elem, *head = env->head; int err; if (env->head == NULL) return -ENOENT; if (cur) { err = copy_verifier_state(cur, &head->st); if (err) return err; } if (pop_log) bpf_vlog_reset(&env->log, head->log_pos); if (insn_idx) *insn_idx = head->insn_idx; if (prev_insn_idx) *prev_insn_idx = head->prev_insn_idx; elem = head->next; free_verifier_state(&head->st, false); kfree(head); env->head = elem; env->stack_size--; return 0; }
0
252,394
static mz_bool mz_zip_writer_create_central_dir_header( mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size, mz_uint16 extra_size, mz_uint16 comment_size, mz_uint64 uncomp_size, mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date, mz_uint64 local_header_ofs, mz_uint32 ext_attributes) { (void)pZip; memset(pDst, 0, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_SIG_OFS, MZ_ZIP_CENTRAL_DIR_HEADER_SIG); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_VERSION_NEEDED_OFS, method ? 20 : 0); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_BIT_FLAG_OFS, bit_flags); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_METHOD_OFS, method); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_TIME_OFS, dos_time); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_DATE_OFS, dos_date); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_CRC32_OFS, uncomp_crc32); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS, comp_size); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS, uncomp_size); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILENAME_LEN_OFS, filename_size); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_EXTRA_LEN_OFS, extra_size); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_COMMENT_LEN_OFS, comment_size); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS, ext_attributes); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_LOCAL_HEADER_OFS, local_header_ofs); return MZ_TRUE; }
0
90,203
virtual bool cellular_connected() const { return cellular_ ? cellular_->connected() : false; }
0
432,206
void cpu_address_space_init(CPUState *cpu, int asidx, MemoryRegion *mr) { /* Target code should have set num_ases before calling us */ assert(asidx < cpu->num_ases); if (!cpu->cpu_ases) { cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases); cpu->cpu_ases[0].cpu = cpu; cpu->cpu_ases[0].as = &(cpu->uc->address_space_memory); cpu->cpu_ases[0].tcg_as_listener.commit = tcg_commit; memory_listener_register(&(cpu->cpu_ases[0].tcg_as_listener), cpu->cpu_ases[0].as); } /* arm security memory */ if (asidx > 0) { cpu->cpu_ases[asidx].cpu = cpu; cpu->cpu_ases[asidx].as = &(cpu->uc->address_space_memory); cpu->cpu_ases[asidx].tcg_as_listener.commit = tcg_commit; memory_listener_register(&(cpu->cpu_ases[asidx].tcg_as_listener), cpu->cpu_ases[asidx].as); } }
0
273,886
static void do_PORT(ctrl_t *ctrl, int pending) { if (!ctrl->data_address[0]) { /* Check if previous command was PASV */ if (ctrl->data_sd == -1 && ctrl->data_listen_sd == -1) { if (pending == 1 && ctrl->d_num == -1) do_MLST(ctrl); return; } ctrl->pending = pending; return; } if (open_data_connection(ctrl)) { do_abort(ctrl); send_msg(ctrl->sd, "425 TCP connection cannot be established.\r\n"); return; } if (pending != 1 || ctrl->list_mode != 2) send_msg(ctrl->sd, "150 Data connection opened; transfer starting.\r\n"); switch (pending) { case 3: uev_io_init(ctrl->ctx, &ctrl->data_watcher, do_STOR, ctrl, ctrl->data_sd, UEV_READ); break; case 2: uev_io_init(ctrl->ctx, &ctrl->data_watcher, do_RETR, ctrl, ctrl->data_sd, UEV_WRITE); break; case 1: uev_io_init(ctrl->ctx, &ctrl->data_watcher, do_LIST, ctrl, ctrl->data_sd, UEV_WRITE); break; } ctrl->pending = 0; }
0
512,620
bool set_limit_clause_param(longlong nr) { value.set_handler(&type_handler_longlong); set_int(nr, MY_INT64_NUM_DECIMAL_DIGITS); return !unsigned_flag && value.integer < 0; }
0
195,218
gen_assignment(codegen_scope *s, node *tree, node *rhs, int sp, int val) { int idx; int type = nint(tree->car); switch (type) { case NODE_GVAR: case NODE_ARG: case NODE_LVAR: case NODE_IVAR: case NODE_CVAR: case NODE_CONST: case NODE_NIL: case NODE_MASGN: if (rhs) { codegen(s, rhs, VAL); pop(); sp = cursp(); } break; case NODE_COLON2: case NODE_CALL: case NODE_SCALL: /* keep evaluation order */ break; case NODE_NVAR: codegen_error(s, "Can't assign to numbered parameter"); break; default: codegen_error(s, "unknown lhs"); break; } tree = tree->cdr; switch (type) { case NODE_GVAR: gen_setxv(s, OP_SETGV, sp, nsym(tree), val); break; case NODE_ARG: case NODE_LVAR: idx = lv_idx(s, nsym(tree)); if (idx > 0) { if (idx != sp) { gen_move(s, idx, sp, val); } break; } else { /* upvar */ gen_setupvar(s, sp, nsym(tree)); } break; case NODE_IVAR: gen_setxv(s, OP_SETIV, sp, nsym(tree), val); break; case NODE_CVAR: gen_setxv(s, OP_SETCV, sp, nsym(tree), val); break; case NODE_CONST: gen_setxv(s, OP_SETCONST, sp, nsym(tree), val); break; case NODE_COLON2: if (sp) { gen_move(s, cursp(), sp, 0); } sp = cursp(); push(); codegen(s, tree->car, VAL); if (rhs) { codegen(s, rhs, VAL); pop(); gen_move(s, sp, cursp(), 0); } pop_n(2); idx = new_sym(s, nsym(tree->cdr)); genop_2(s, OP_SETMCNST, sp, idx); break; case NODE_CALL: case NODE_SCALL: { int noself = 0, safe = (type == NODE_SCALL), skip = 0, top, call, n = 0; mrb_sym mid = nsym(tree->cdr->car); top = cursp(); if (val || sp == cursp()) { push(); /* room for retval */ } call = cursp(); if (!tree->car) { noself = 1; push(); } else { codegen(s, tree->car, VAL); /* receiver */ } if (safe) { int recv = cursp()-1; gen_move(s, cursp(), recv, 1); skip = genjmp2_0(s, OP_JMPNIL, cursp(), val); } tree = tree->cdr->cdr->car; if (tree) { if (tree->car) { /* positional arguments */ n = gen_values(s, tree->car, VAL, (tree->cdr->car)?13:14); if (n < 0) { /* variable length */ n = 15; push(); } } if (tree->cdr->car) { /* keyword arguments */ gen_hash(s, tree->cdr->car->cdr, VAL, 0); if (n < 14) { n++; push(); } else { pop(); genop_2(s, OP_ARYPUSH, cursp(), 1); } } } if (rhs) { codegen(s, rhs, VAL); pop(); } else { gen_move(s, cursp(), sp, 0); } if (val) { gen_move(s, top, cursp(), 1); } if (n < 14) { n++; } else { pop(); genop_2(s, OP_ARYPUSH, cursp(), 1); } s->sp = call; if (mid == MRB_OPSYM_2(s->mrb, aref) && n == 2) { genop_1(s, OP_SETIDX, cursp()); } else { genop_3(s, noself ? OP_SSEND : OP_SEND, cursp(), new_sym(s, attrsym(s, mid)), n); } if (safe) { dispatch(s, skip); } s->sp = top; } break; case NODE_MASGN: gen_vmassignment(s, tree->car, sp, val); break; /* splat without assignment */ case NODE_NIL: break; default: codegen_error(s, "unknown lhs"); break; } if (val) push(); }
1
261,950
njs_encode_base64_length(const njs_str_t *src, size_t *out_size) { size_t size; size = (src->length == 0) ? 0 : njs_base64_encoded_length(src->length); if (out_size != NULL) { *out_size = size; } return size; }
0
466,152
static int em_bt(struct x86_emulate_ctxt *ctxt) { /* Disable writeback. */ ctxt->dst.type = OP_NONE; /* only subword offset */ ctxt->src.val &= (ctxt->dst.bytes << 3) - 1; emulate_2op_SrcV_nobyte(ctxt, "bt"); return X86EMUL_CONTINUE; }
0
195,095
int Socket::startSslClient(const std::string &certificate_path, String hostname) { if (isssl) { stopSsl(); } ERR_clear_error(); #if OPENSSL_VERSION_NUMBER < 0x10100000L ctx = SSL_CTX_new(SSLv23_client_method()); #else ctx = SSL_CTX_new(TLS_client_method()); #endif if (ctx == NULL) { #ifdef NETDEBUG std::cout << thread_id << "Error ssl context is null (check that openssl has been inited)" << std::endl; #endif log_ssl_errors("Error ssl context is null for %s", hostname.c_str()); return -1; } //set the timeout for the ssl session if (SSL_CTX_set_timeout(ctx, 130l) < 1) { SSL_CTX_free(ctx); ctx = NULL; return -1; } //load certs ERR_clear_error(); if (certificate_path.length()) { if (!SSL_CTX_load_verify_locations(ctx, NULL, certificate_path.c_str())) { #ifdef NETDEBUG std::cout << thread_id << "couldnt load certificates" << std::endl; #endif log_ssl_errors("couldnt load certificates from %s", certificate_path.c_str()); //tidy up SSL_CTX_free(ctx); ctx = NULL; return -2; } } else if (!SSL_CTX_set_default_verify_paths(ctx)) //use default if no certPpath given { #ifdef NETDEBUG std::cout << thread_id << "couldnt load certificates" << std::endl; #endif log_ssl_errors("couldnt load default certificates for %s", hostname.c_str()); //tidy up SSL_CTX_free(ctx); ctx = NULL; return -2; } // add validation params ERR_clear_error(); X509_VERIFY_PARAM *x509_param = X509_VERIFY_PARAM_new(); if (!x509_param) { log_ssl_errors("couldnt add validation params for %s", hostname.c_str()); //X509_VERIFY_PARAM_free(x509_param); SSL_CTX_free(ctx); ctx = NULL; return -2; } ERR_clear_error(); if (!X509_VERIFY_PARAM_set_flags(x509_param, X509_V_FLAG_TRUSTED_FIRST)) { log_ssl_errors("couldnt add validation params for %s", hostname.c_str()); X509_VERIFY_PARAM_free(x509_param); SSL_CTX_free(ctx); ctx = NULL; return -2; } ERR_clear_error(); if (!SSL_CTX_set1_param(ctx, x509_param)) { log_ssl_errors("couldnt add validation params for %s", hostname.c_str()); X509_VERIFY_PARAM_free(x509_param); SSL_CTX_free(ctx); ctx = NULL; return -2; } X509_VERIFY_PARAM_free(x509_param); // try not freeing this as SSL_CTX_free seems to be ring to free it //hand socket over to ssl lib ERR_clear_error(); ssl = SSL_new(ctx); SSL_set_options(ssl, SSL_OP_ALL); SSL_set_mode(ssl, SSL_MODE_AUTO_RETRY); SSL_set_connect_state(ssl); //fcntl(this->getFD() ,F_SETFL, O_NONBLOCK); // blocking mode used currently SSL_set_fd(ssl, this->getFD()); SSL_set_tlsext_host_name(ssl, hostname.c_str()); //make io non blocking as select wont tell us if we can do a read without blocking //BIO_set_nbio(SSL_get_rbio(ssl),1l); // blocking mode used currently //BIO_set_nbio(SSL_get_wbio(ssl),1l); // blocking mode used currently ERR_clear_error(); int rc = SSL_connect(ssl); if (rc < 0) { log_ssl_errors("ssl_connect failed to %s", hostname.c_str()); #ifdef NETDEBUG std::cout << thread_id << "ssl_connect failed with error " << SSL_get_error(ssl, rc) << std::endl; #endif // tidy up SSL_free(ssl); ssl = NULL; SSL_CTX_free(ctx); ctx = NULL; return -3; } //should be safer to do this last as nothing will ever try to use a ssl socket that isnt fully setup isssl = true; issslserver = false; return 0; }
1
261,993
void Curl_conncache_remove_conn(struct Curl_easy *data, struct connectdata *conn, bool lock) { struct connectbundle *bundle = conn->bundle; struct conncache *connc = data->state.conn_cache; /* The bundle pointer can be NULL, since this function can be called due to a failed connection attempt, before being added to a bundle */ if(bundle) { if(lock) { CONNCACHE_LOCK(data); } bundle_remove_conn(bundle, conn); if(bundle->num_connections == 0) conncache_remove_bundle(connc, bundle); conn->bundle = NULL; /* removed from it */ if(connc) { connc->num_conn--; DEBUGF(infof(data, "The cache now contains %zu members", connc->num_conn)); } if(lock) { CONNCACHE_UNLOCK(data); } } }
0
463,221
static int annotation_set_mailboxopt(annotate_state_t *state, struct annotate_entry_list *entry, int maywrite) { struct mailbox *mailbox = state->mailbox; uint32_t flag = (unsigned long)entry->desc->rock; unsigned long newopts; assert(mailbox); newopts = mailbox->i.options; if (entry->shared.s && !strcmp(entry->shared.s, "true")) { newopts |= flag; } else { newopts &= ~flag; } /* only mark dirty if there's been a change */ if (mailbox->i.options != newopts) { if (!maywrite) return IMAP_PERMISSION_DENIED; mailbox_index_dirty(mailbox); mailbox_modseq_dirty(mailbox); mailbox->i.options = newopts; mboxlist_update_foldermodseq(mailbox->name, mailbox->i.highestmodseq); } return 0; }
0
446,101
static int atusb_get_and_clear_error(struct atusb *atusb) { int err = atusb->err; atusb->err = 0; return err; }
0
508,827
void st_select_lex_node::include_neighbour(st_select_lex_node *before) { if ((next= before->next)) next->prev= &next; prev= &before->next; before->next= this; master= before->master; slave= 0; }
0
196,698
void SparseFillEmptyRowsOpImpl(OpKernelContext* context, AsyncOpKernel::DoneCallback done = nullptr) { // Note that setting this empty lambda as the default parameter value directly // can cause strange compiler/linker errors, so we do it like this instead. if (!done) { done = [] {}; } const int kIndicesInput = 0; const int kValuesInput = 1; const int kDenseShapeInput = 2; const int kDefaultValueInput = 3; const Tensor& indices_t = context->input(kIndicesInput); const Tensor& values_t = context->input(kValuesInput); const Tensor& dense_shape_t = context->input(kDenseShapeInput); const Tensor& default_value_t = context->input(kDefaultValueInput); OP_REQUIRES_ASYNC( context, TensorShapeUtils::IsVector(dense_shape_t.shape()), errors::InvalidArgument("dense_shape must be a vector, saw: ", dense_shape_t.shape().DebugString()), done); OP_REQUIRES_ASYNC(context, TensorShapeUtils::IsMatrix(indices_t.shape()), errors::InvalidArgument("indices must be a matrix, saw: ", indices_t.shape().DebugString()), done); OP_REQUIRES_ASYNC(context, TensorShapeUtils::IsVector(values_t.shape()), errors::InvalidArgument("values must be a vector, saw: ", values_t.shape().DebugString()), done); OP_REQUIRES_ASYNC( context, TensorShapeUtils::IsScalar(default_value_t.shape()), errors::InvalidArgument("default_value must be a scalar, saw: ", default_value_t.shape().DebugString()), done); // TODO(ebrevdo): add shape checks between values, indices, // Also add check that dense rank > 0. OP_REQUIRES_ASYNC(context, dense_shape_t.NumElements() != 0, errors::InvalidArgument("Dense shape cannot be empty."), done); using FunctorType = functor::SparseFillEmptyRows<Device, T, Tindex>; OP_REQUIRES_OK_ASYNC(context, FunctorType()(context, default_value_t, indices_t, values_t, dense_shape_t, done), done); }
1
369,293
static ssize_t loop_rw_iter(int rw, struct io_kiocb *req, struct iov_iter *iter) { struct kiocb *kiocb = &req->rw.kiocb; struct file *file = req->file; ssize_t ret = 0; loff_t *ppos; /* * Don't support polled IO through this interface, and we can't * support non-blocking either. For the latter, this just causes * the kiocb to be handled from an async context. */ if (kiocb->ki_flags & IOCB_HIPRI) return -EOPNOTSUPP; if ((kiocb->ki_flags & IOCB_NOWAIT) && !(kiocb->ki_filp->f_flags & O_NONBLOCK)) return -EAGAIN; ppos = io_kiocb_ppos(kiocb); while (iov_iter_count(iter)) { struct iovec iovec; ssize_t nr; if (!iov_iter_is_bvec(iter)) { iovec = iov_iter_iovec(iter); } else { iovec.iov_base = u64_to_user_ptr(req->rw.addr); iovec.iov_len = req->rw.len; } if (rw == READ) { nr = file->f_op->read(file, iovec.iov_base, iovec.iov_len, ppos); } else { nr = file->f_op->write(file, iovec.iov_base, iovec.iov_len, ppos); } if (nr < 0) { if (!ret) ret = nr; break; } ret += nr; if (!iov_iter_is_bvec(iter)) { iov_iter_advance(iter, nr); } else { req->rw.addr += nr; req->rw.len -= nr; if (!req->rw.len) break; } if (nr != iovec.iov_len) break; } return ret; }
0
253,600
smb2_negotiate_wsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx) { struct TCP_Server_Info *server = tcon->ses->server; unsigned int wsize; /* start with specified wsize, or default */ wsize = ctx->wsize ? ctx->wsize : CIFS_DEFAULT_IOSIZE; wsize = min_t(unsigned int, wsize, server->max_write); if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU)) wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE); return wsize; }
0
517,431
static void printFavicon(HttpResponse res) { static size_t l; Socket_T S = res->S; static unsigned char *favicon = NULL; if (! favicon) { favicon = CALLOC(sizeof(unsigned char), strlen(FAVICON_ICO)); l = decode_base64(favicon, FAVICON_ICO); } if (l) { res->is_committed = true; Socket_print(S, "HTTP/1.0 200 OK\r\n"); Socket_print(S, "Content-length: %lu\r\n", (unsigned long)l); Socket_print(S, "Content-Type: image/x-icon\r\n"); Socket_print(S, "Connection: close\r\n\r\n"); if (Socket_write(S, favicon, l) < 0) { LogError("Error sending favicon data -- %s\n", STRERROR); } } }
0
226,332
} static u32 ctrn_read_flags(GF_BitStream *bs, u32 nbbits) { u32 val = gf_bs_read_int(bs, nbbits); if (nbbits==16) val <<= 16; else if (nbbits==8) val <<= 24; return val;
0
409,446
free_cur_term() { # ifdef HAVE_DEL_CURTERM if (cur_term) del_curterm(cur_term); # endif }
0
232,340
GF_Err GetNextMediaTime(GF_TrackBox *trak, u64 movieTime, u64 *OutMovieTime) { u32 i; u64 time; GF_EdtsEntry *ent; *OutMovieTime = 0; if (! trak->editBox || !trak->editBox->editList) return GF_BAD_PARAM; time = 0; ent = NULL; i=0; while ((ent = (GF_EdtsEntry *)gf_list_enum(trak->editBox->editList->entryList, &i))) { if (gf_timestamp_greater_or_equal(time, trak->moov->mvhd->timeScale, movieTime, trak->Media->mediaHeader->timeScale)) { /*skip empty edits*/ if (ent->mediaTime >= 0) { *OutMovieTime = time * trak->Media->mediaHeader->timeScale / trak->moov->mvhd->timeScale; if (*OutMovieTime>0) *OutMovieTime -= 1; return GF_OK; } } time += ent->segmentDuration; } //request for a bigger time that what we can give: return the last sample (undefined behavior...) *OutMovieTime = trak->moov->mvhd->duration; return GF_EOS; }
0
389,748
check_for_opt_chan_or_job_arg(typval_T *args, int idx) { return (args[idx].v_type == VAR_UNKNOWN || check_for_chan_or_job_arg(args, idx) != FAIL); }
0
512,473
bool Item_func_case_simple::fix_length_and_dec() { THD *thd= current_thd; return (aggregate_then_and_else_arguments(thd, when_count() + 1) || aggregate_switch_and_when_arguments(thd, false)); }
0
208,533
xmlStringLenDecodeEntities(xmlParserCtxtPtr ctxt, const xmlChar *str, int len, int what, xmlChar end, xmlChar end2, xmlChar end3) { xmlChar *buffer = NULL; size_t buffer_size = 0; size_t nbchars = 0; xmlChar *current = NULL; xmlChar *rep = NULL; const xmlChar *last; xmlEntityPtr ent; int c,l; if ((ctxt == NULL) || (str == NULL) || (len < 0)) return(NULL); last = str + len; if (((ctxt->depth > 40) && ((ctxt->options & XML_PARSE_HUGE) == 0)) || (ctxt->depth > 1024)) { xmlFatalErr(ctxt, XML_ERR_ENTITY_LOOP, NULL); return(NULL); } /* * allocate a translation buffer. */ buffer_size = XML_PARSER_BIG_BUFFER_SIZE; buffer = (xmlChar *) xmlMallocAtomic(buffer_size); if (buffer == NULL) goto mem_error; /* * OK loop until we reach one of the ending char or a size limit. * we are operating on already parsed values. */ if (str < last) c = CUR_SCHAR(str, l); else c = 0; while ((c != 0) && (c != end) && /* non input consuming loop */ (c != end2) && (c != end3)) { if (c == 0) break; if ((c == '&') && (str[1] == '#')) { int val = xmlParseStringCharRef(ctxt, &str); if (val != 0) { COPY_BUF(0,buffer,nbchars,val); } if (nbchars + XML_PARSER_BUFFER_SIZE > buffer_size) { growBuffer(buffer, XML_PARSER_BUFFER_SIZE); } } else if ((c == '&') && (what & XML_SUBSTITUTE_REF)) { if (xmlParserDebugEntities) xmlGenericError(xmlGenericErrorContext, "String decoding Entity Reference: %.30s\n", str); ent = xmlParseStringEntityRef(ctxt, &str); if ((ctxt->lastError.code == XML_ERR_ENTITY_LOOP) || (ctxt->lastError.code == XML_ERR_INTERNAL_ERROR)) goto int_error; xmlParserEntityCheck(ctxt, 0, ent, 0); if (ent != NULL) ctxt->nbentities += ent->checked / 2; if ((ent != NULL) && (ent->etype == XML_INTERNAL_PREDEFINED_ENTITY)) { if (ent->content != NULL) { COPY_BUF(0,buffer,nbchars,ent->content[0]); if (nbchars + XML_PARSER_BUFFER_SIZE > buffer_size) { growBuffer(buffer, XML_PARSER_BUFFER_SIZE); } } else { xmlFatalErrMsg(ctxt, XML_ERR_INTERNAL_ERROR, "predefined entity has no content\n"); } } else if ((ent != NULL) && (ent->content != NULL)) { ctxt->depth++; rep = xmlStringDecodeEntities(ctxt, ent->content, what, 0, 0, 0); ctxt->depth--; if ((ctxt->lastError.code == XML_ERR_ENTITY_LOOP) || (ctxt->lastError.code == XML_ERR_INTERNAL_ERROR)) goto int_error; if (rep != NULL) { current = rep; while (*current != 0) { /* non input consuming loop */ buffer[nbchars++] = *current++; if (nbchars + XML_PARSER_BUFFER_SIZE > buffer_size) { if (xmlParserEntityCheck(ctxt, nbchars, ent, 0)) goto int_error; growBuffer(buffer, XML_PARSER_BUFFER_SIZE); } } xmlFree(rep); rep = NULL; } } else if (ent != NULL) { int i = xmlStrlen(ent->name); const xmlChar *cur = ent->name; buffer[nbchars++] = '&'; if (nbchars + i + XML_PARSER_BUFFER_SIZE > buffer_size) { growBuffer(buffer, i + XML_PARSER_BUFFER_SIZE); } for (;i > 0;i--) buffer[nbchars++] = *cur++; buffer[nbchars++] = ';'; } } else if (c == '%' && (what & XML_SUBSTITUTE_PEREF)) { if (xmlParserDebugEntities) xmlGenericError(xmlGenericErrorContext, "String decoding PE Reference: %.30s\n", str); ent = xmlParseStringPEReference(ctxt, &str); if (ctxt->lastError.code == XML_ERR_ENTITY_LOOP) goto int_error; xmlParserEntityCheck(ctxt, 0, ent, 0); if (ent != NULL) ctxt->nbentities += ent->checked / 2; if (ent != NULL) { if (ent->content == NULL) { xmlLoadEntityContent(ctxt, ent); } ctxt->depth++; rep = xmlStringDecodeEntities(ctxt, ent->content, what, 0, 0, 0); ctxt->depth--; if (rep != NULL) { current = rep; while (*current != 0) { /* non input consuming loop */ buffer[nbchars++] = *current++; if (nbchars + XML_PARSER_BUFFER_SIZE > buffer_size) { if (xmlParserEntityCheck(ctxt, nbchars, ent, 0)) goto int_error; growBuffer(buffer, XML_PARSER_BUFFER_SIZE); } } xmlFree(rep); rep = NULL; } } } else { COPY_BUF(l,buffer,nbchars,c); str += l; if (nbchars + XML_PARSER_BUFFER_SIZE > buffer_size) { growBuffer(buffer, XML_PARSER_BUFFER_SIZE); } } if (str < last) c = CUR_SCHAR(str, l); else c = 0; } buffer[nbchars] = 0; return(buffer); mem_error: xmlErrMemory(ctxt, NULL); int_error: if (rep != NULL) xmlFree(rep); if (buffer != NULL) xmlFree(buffer); return(NULL); }
1
206,044
void ZRLE_DECODE (const Rect& r, rdr::InStream* is, rdr::ZlibInStream* zis, const PixelFormat& pf, ModifiablePixelBuffer* pb) { int length = is->readU32(); zis->setUnderlying(is, length); Rect t; PIXEL_T buf[64 * 64]; for (t.tl.y = r.tl.y; t.tl.y < r.br.y; t.tl.y += 64) { t.br.y = __rfbmin(r.br.y, t.tl.y + 64); for (t.tl.x = r.tl.x; t.tl.x < r.br.x; t.tl.x += 64) { t.br.x = __rfbmin(r.br.x, t.tl.x + 64); int mode = zis->readU8(); bool rle = mode & 128; int palSize = mode & 127; PIXEL_T palette[128]; for (int i = 0; i < palSize; i++) { palette[i] = READ_PIXEL(zis); } if (palSize == 1) { PIXEL_T pix = palette[0]; pb->fillRect(pf, t, &pix); continue; } if (!rle) { if (palSize == 0) { // raw #ifdef CPIXEL for (PIXEL_T* ptr = buf; ptr < buf+t.area(); ptr++) { *ptr = READ_PIXEL(zis); } #else zis->readBytes(buf, t.area() * (BPP / 8)); #endif } else { // packed pixels int bppp = ((palSize > 16) ? 8 : ((palSize > 4) ? 4 : ((palSize > 2) ? 2 : 1))); PIXEL_T* ptr = buf; for (int i = 0; i < t.height(); i++) { PIXEL_T* eol = ptr + t.width(); rdr::U8 byte = 0; rdr::U8 nbits = 0; while (ptr < eol) { if (nbits == 0) { byte = zis->readU8(); nbits = 8; } nbits -= bppp; rdr::U8 index = (byte >> nbits) & ((1 << bppp) - 1) & 127; *ptr++ = palette[index]; } } } } else { if (palSize == 0) { // plain RLE PIXEL_T* ptr = buf; PIXEL_T* end = ptr + t.area(); while (ptr < end) { PIXEL_T pix = READ_PIXEL(zis); int len = 1; int b; do { b = zis->readU8(); len += b; } while (b == 255); if (end - ptr < len) { throw Exception ("ZRLE decode error"); } while (len-- > 0) *ptr++ = pix; } } else { // palette RLE PIXEL_T* ptr = buf; PIXEL_T* end = ptr + t.area(); while (ptr < end) { int index = zis->readU8(); int len = 1; if (index & 128) { int b; do { b = zis->readU8(); len += b; } while (b == 255); if (end - ptr < len) { throw Exception ("ZRLE decode error"); } } index &= 127; PIXEL_T pix = palette[index]; while (len-- > 0) *ptr++ = pix; } } } pb->imageRect(pf, t, buf); } } zis->removeUnderlying(); }
1
512,609
Item *get_copy(THD *thd) { return get_item_copy<Item_date_literal>(thd, this); }
0
265,062
set_colour_attribute(zattr atr, int fg_bg, int flags) { char *ptr; int do_free, is_prompt = (flags & TSC_PROMPT) ? 1 : 0; int colour, tc, def, use_termcap, use_truecolor; int is_default_zle_highlight = 1; if (fg_bg == COL_SEQ_FG) { colour = txtchangeget(atr, TXT_ATTR_FG_COL); tc = TCFGCOLOUR; def = txtchangeisset(atr, TXTNOFGCOLOUR); use_truecolor = txtchangeisset(atr, TXT_ATTR_FG_24BIT); use_termcap = txtchangeisset(atr, TXT_ATTR_FG_TERMCAP); } else { colour = txtchangeget(atr, TXT_ATTR_BG_COL); tc = TCBGCOLOUR; def = txtchangeisset(atr, TXTNOBGCOLOUR); use_truecolor = txtchangeisset(atr, TXT_ATTR_BG_24BIT); use_termcap = txtchangeisset(atr, TXT_ATTR_BG_TERMCAP); } /* Test if current zle_highlight settings are customized, or * the typical "standard" codes */ if (0 != strcmp(fg_bg_sequences[fg_bg].start, fg_bg == COL_SEQ_FG ? TC_COL_FG_START : TC_COL_BG_START) || /* the same in-fix for both FG and BG */ 0 != strcmp(fg_bg_sequences[fg_bg].def, TC_COL_FG_DEFAULT) || /* the same suffix for both FG and BG */ 0 != strcmp(fg_bg_sequences[fg_bg].end, TC_COL_FG_END)) { is_default_zle_highlight = 0; } /* * If we're not restoring the default, and either have a * colour value that is too large for ANSI, or have been told * to use the termcap sequence, try to use the termcap sequence. * True color is not covered by termcap. * * We have already sanitised the values we allow from the * highlighting variables, so much of this shouldn't be * necessary at this point, but we might as well be safe. */ if (!def && !use_truecolor && (is_default_zle_highlight && (colour > 7 || use_termcap))) { /* * We can if it's available, and either we couldn't get * the maximum number of colours, or the colour is in range. */ if (tccan(tc) && (tccolours < 0 || colour < tccolours)) { if (is_prompt) { if (!bv->dontcount) { addbufspc(1); *bv->bp++ = Inpar; } tputs(tgoto(tcstr[tc], colour, colour), 1, putstr); if (!bv->dontcount) { addbufspc(1); *bv->bp++ = Outpar; } } else { tputs(tgoto(tcstr[tc], colour, colour), 1, putshout); } /* That worked. */ return; } /* * Nope, that didn't work. * If 0 to 7, assume standard ANSI works, if 8 to 255, assume * typical 256-color escapes works, otherwise it won't. */ if (colour > 255) return; } if ((do_free = (colseq_buf == NULL))) { /* This can happen when moving the cursor in trashzle() */ allocate_colour_buffer(); } /* Build the reset-code: .start + .def + . end * or the typical true-color code: .start + 8;2;%d;%d;%d + .end * or the typical 256-color code: .start + 8;5;%d + .end */ if (use_truecolor) strcpy(colseq_buf, fg_bg == COL_SEQ_FG ? TC_COL_FG_START : TC_COL_BG_START); else strcpy(colseq_buf, fg_bg_sequences[fg_bg].start); ptr = colseq_buf + strlen(colseq_buf); if (def) { if (use_truecolor) strcpy(ptr, fg_bg == COL_SEQ_FG ? TC_COL_FG_DEFAULT : TC_COL_BG_DEFAULT); else strcpy(ptr, fg_bg_sequences[fg_bg].def); while (*ptr) ptr++; } else if (use_truecolor) { ptr += sprintf(ptr, "8;2;%d;%d;%d", colour >> 16, (colour >> 8) & 0xff, colour & 0xff); } else if (colour > 7 && colour <= 255) { ptr += sprintf(ptr, "%d", colour); } else *ptr++ = colour + '0'; if (use_truecolor) strcpy(ptr, fg_bg == COL_SEQ_FG ? TC_COL_FG_END : TC_COL_BG_END); else strcpy(ptr, fg_bg_sequences[fg_bg].end); if (is_prompt) { if (!bv->dontcount) { addbufspc(1); *bv->bp++ = Inpar; } tputs(colseq_buf, 1, putstr); if (!bv->dontcount) { addbufspc(1); *bv->bp++ = Outpar; } } else tputs(colseq_buf, 1, putshout); if (do_free) free_colour_buffer(); }
0
462,225
PJ_DEF(pj_status_t) pj_stun_msg_create( pj_pool_t *pool, unsigned msg_type, pj_uint32_t magic, const pj_uint8_t tsx_id[12], pj_stun_msg **p_msg) { pj_stun_msg *msg; PJ_ASSERT_RETURN(pool && msg_type && p_msg, PJ_EINVAL); msg = PJ_POOL_ZALLOC_T(pool, pj_stun_msg); *p_msg = msg; return pj_stun_msg_init(msg, msg_type, magic, tsx_id); }
0
225,935
GF_Err tpay_box_read(GF_Box *s, GF_BitStream *bs) { GF_TPAYBox *ptr = (GF_TPAYBox *)s; ISOM_DECREASE_SIZE(ptr, 4); ptr->nbBytes = gf_bs_read_u32(bs); return GF_OK; }
0
483,510
static __init int efivar_ssdt_iter(efi_char16_t *name, efi_guid_t vendor, unsigned long name_size, void *data) { struct efivar_entry *entry; struct list_head *list = data; char utf8_name[EFIVAR_SSDT_NAME_MAX]; int limit = min_t(unsigned long, EFIVAR_SSDT_NAME_MAX, name_size); ucs2_as_utf8(utf8_name, name, limit - 1); if (strncmp(utf8_name, efivar_ssdt, limit) != 0) return 0; entry = kmalloc(sizeof(*entry), GFP_KERNEL); if (!entry) return 0; memcpy(entry->var.VariableName, name, name_size); memcpy(&entry->var.VendorGuid, &vendor, sizeof(efi_guid_t)); efivar_entry_add(entry, list); return 0; }
0
229,324
bool IntArgsAndRetvalsOnDevice(EagerOperation* op) { // Most TF ops expect and generate int32 tensors on the host (or a TPU/XLA // device). This is not the case with IteratorGetNext since it is possible to // build int32 datasets that produce outputs on device when using // prefetch_to_device. // When running call ops, by default we assume that the int32 outputs are on a // host (except for the XLA/TPU case). So we need to special case // IteratorGetNext such that its eager behavior matches the wrapped one. // TODO(b/208435025): Remove this if we end up deciding that int32 outputs // from IteratorGetNext should indeed live on host. return op->Name() == "IteratorGetNext"; }
0
336,608
static void reds_handle_other_links(RedsState *reds, RedLinkInfo *link) { RedChannel *channel; RedClient *client = NULL; SpiceLinkMess *link_mess; RedsMigTargetClient *mig_client; link_mess = link->link_mess; if (reds->main_channel) { client = reds->main_channel->get_client_by_link_id(link_mess->connection_id); } // TODO: MC: broke migration (at least for the dont-drop-connection kind). // On migration we should get a connection_id to expect (must be a security measure) // where do we store it? on reds, but should be a list (MC). if (!client) { reds_send_link_result(link, SPICE_LINK_ERR_BAD_CONNECTION_ID); return; } // TODO: MC: be less lenient. Tally connections from same connection_id (by same client). if (!(channel = reds_find_channel(reds, link_mess->channel_type, link_mess->channel_id))) { reds_send_link_result(link, SPICE_LINK_ERR_CHANNEL_NOT_AVAILABLE); return; } reds_send_link_result(link, SPICE_LINK_ERR_OK); reds_info_new_channel(link, link_mess->connection_id); mig_client = reds_mig_target_client_find(reds, client); /* * In semi-seamless migration, we activate the channels only * after migration is completed. Since, the session starts almost from * scratch we don't mind if we skip some messages in between the src session end and * dst session start. * In seamless migration, in order to keep the continuousness of the session, and * in order not to lose any data, we activate the target channels before * migration completes, as soon as we receive SPICE_MSGC_MAIN_MIGRATE_DST_DO_SEAMLESS. * If a channel connects before receiving SPICE_MSGC_MAIN_MIGRATE_DST_DO_SEAMLESS, * reds_on_migrate_dst_set_seamless will take care of activating it */ if (client->during_migrate_at_target() && !reds->dst_do_seamless_migrate) { spice_assert(mig_client); reds_mig_target_client_add_pending_link(mig_client, link_mess, link->stream); link->link_mess = NULL; } else { spice_assert(!mig_client); reds_channel_do_link(channel, client, link_mess, link->stream); } link->stream = NULL; }
0
252,364
mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits, int strategy) { mz_uint comp_flags = s_tdefl_num_probes[(level >= 0) ? MZ_MIN(10, level) : MZ_DEFAULT_LEVEL] | ((level <= 3) ? TDEFL_GREEDY_PARSING_FLAG : 0); if (window_bits > 0) comp_flags |= TDEFL_WRITE_ZLIB_HEADER; if (!level) comp_flags |= TDEFL_FORCE_ALL_RAW_BLOCKS; else if (strategy == MZ_FILTERED) comp_flags |= TDEFL_FILTER_MATCHES; else if (strategy == MZ_HUFFMAN_ONLY) comp_flags &= ~TDEFL_MAX_PROBES_MASK; else if (strategy == MZ_FIXED) comp_flags |= TDEFL_FORCE_ALL_STATIC_BLOCKS; else if (strategy == MZ_RLE) comp_flags |= TDEFL_RLE_MATCHES; return comp_flags; }
0
353,207
void SplashOutputDev::updateLineCap(GfxState *state) { splash->setLineCap(state->getLineCap()); }
0
473,953
utf16le_get_case_fold_codes_by_str(OnigCaseFoldType flag, const OnigUChar* p, const OnigUChar* end, OnigCaseFoldCodeItem items[], OnigEncoding enc) { return onigenc_unicode_get_case_fold_codes_by_str(enc, flag, p, end, items); }
0
359,609
peer_update_source_vty (struct vty *vty, const char *peer_str, const char *source_str) { struct peer *peer; union sockunion *su; peer = peer_and_group_lookup_vty (vty, peer_str); if (! peer) return CMD_WARNING; if (source_str) { su = sockunion_str2su (source_str); if (su) { peer_update_source_addr_set (peer, su); sockunion_free (su); } else peer_update_source_if_set (peer, source_str); } else peer_update_source_unset (peer); return CMD_SUCCESS; }
0
226,060
GF_Err txtc_box_size(GF_Box *s) { GF_TextConfigBox *ptr = (GF_TextConfigBox *)s; if (ptr->config) ptr->size += strlen(ptr->config); ptr->size++; return GF_OK;
0
308,196
static void fastrpc_context_get(struct fastrpc_invoke_ctx *ctx) { kref_get(&ctx->refcount); }
0
400,106
PortForwardSourceResponse PortForwardHandler::createSource( const PortForwardSourceRequest& pfsr, string* sourceName, uid_t userid, gid_t groupid) { try { if (pfsr.has_source() && sourceName) { throw runtime_error( "Do not set a source when forwarding named pipes with environment " "variables"); } SocketEndpoint source; if (pfsr.has_source()) { source = pfsr.source(); if (source.has_name()) { throw runtime_error( "Named socket tunneling is only allowed with temporary filenames."); } } else { // Make a random file to forward the pipe string sourcePattern = GetTempDirectory() + string("et_forward_sock_XXXXXX"); string sourceDirectory = string(mkdtemp(&sourcePattern[0])); FATAL_FAIL(::chmod(sourceDirectory.c_str(), S_IRUSR | S_IWUSR | S_IXUSR)); FATAL_FAIL(::chown(sourceDirectory.c_str(), userid, groupid)); string sourcePath = string(sourceDirectory) + "/sock"; source.set_name(sourcePath); if (sourceName == nullptr) { STFATAL << "Tried to create a pipe but without a place to put the name!"; } *sourceName = sourcePath; LOG(INFO) << "Creating pipe at " << sourcePath; } if (pfsr.source().has_port()) { if (sourceName != nullptr) { STFATAL << "Tried to create a port forward but with a place to put " "the name!"; } auto handler = shared_ptr<ForwardSourceHandler>(new ForwardSourceHandler( networkSocketHandler, source, pfsr.destination())); sourceHandlers.push_back(handler); return PortForwardSourceResponse(); } else { if (userid < 0 || groupid < 0) { STFATAL << "Tried to create a unix socket forward with no userid/groupid"; } auto handler = shared_ptr<ForwardSourceHandler>(new ForwardSourceHandler( pipeSocketHandler, source, pfsr.destination())); FATAL_FAIL(::chmod(source.name().c_str(), S_IRUSR | S_IWUSR | S_IXUSR)); FATAL_FAIL(::chown(source.name().c_str(), userid, groupid)); sourceHandlers.push_back(handler); return PortForwardSourceResponse(); } } catch (const std::runtime_error& ex) { PortForwardSourceResponse pfsr; pfsr.set_error(ex.what()); return pfsr; } }
0
512,460
virtual bool val_bool() { return type_handler()->Item_val_bool(this); }
0
224,564
Status MaxPoolShapeWithExplicitPadding(shape_inference::InferenceContext* c) { return MaxPoolShapeImpl(c, /*supports_explicit_padding=*/true); }
0
513,171
int plugin_init(int *argc, char **argv, int flags) { uint i; struct st_maria_plugin **builtins; struct st_maria_plugin *plugin; struct st_plugin_int tmp, *plugin_ptr, **reap; MEM_ROOT tmp_root; bool reaped_mandatory_plugin= false; bool mandatory= true; LEX_STRING MyISAM= { C_STRING_WITH_LEN("MyISAM") }; DBUG_ENTER("plugin_init"); if (initialized) DBUG_RETURN(0); dlopen_count =0; init_alloc_root(&plugin_mem_root, 4096, 4096, MYF(0)); init_alloc_root(&plugin_vars_mem_root, 4096, 4096, MYF(0)); init_alloc_root(&tmp_root, 4096, 4096, MYF(0)); if (my_hash_init(&bookmark_hash, &my_charset_bin, 32, 0, 0, get_bookmark_hash_key, NULL, HASH_UNIQUE)) goto err; /* The 80 is from 2016-04-27 when we had 71 default plugins Big enough to avoid many mallocs even in future */ if (my_init_dynamic_array(&plugin_dl_array, sizeof(struct st_plugin_dl *), 16, 16, MYF(0)) || my_init_dynamic_array(&plugin_array, sizeof(struct st_plugin_int *), 80, 32, MYF(0))) goto err; for (i= 0; i < MYSQL_MAX_PLUGIN_TYPE_NUM; i++) { if (my_hash_init(&plugin_hash[i], system_charset_info, 32, 0, 0, get_plugin_hash_key, NULL, HASH_UNIQUE)) goto err; } /* prepare debug_sync service */ DBUG_ASSERT(strcmp(list_of_services[1].name, "debug_sync_service") == 0); list_of_services[1].service= *(void**)&debug_sync_C_callback_ptr; /* prepare encryption_keys service */ finalize_encryption_plugin(0); mysql_mutex_lock(&LOCK_plugin); initialized= 1; /* First we register builtin plugins */ if (global_system_variables.log_warnings >= 9) sql_print_information("Initializing built-in plugins"); for (builtins= mysql_mandatory_plugins; *builtins || mandatory; builtins++) { if (!*builtins) { builtins= mysql_optional_plugins; mandatory= false; if (!*builtins) break; } for (plugin= *builtins; plugin->info; plugin++) { if (opt_ignore_builtin_innodb && !my_strnncoll(&my_charset_latin1, (const uchar*) plugin->name, 6, (const uchar*) "InnoDB", 6)) continue; bzero(&tmp, sizeof(tmp)); tmp.plugin= plugin; tmp.name.str= (char *)plugin->name; tmp.name.length= strlen(plugin->name); tmp.state= 0; tmp.load_option= mandatory ? PLUGIN_FORCE : PLUGIN_ON; for (i=0; i < array_elements(override_plugin_load_policy); i++) { if (!my_strcasecmp(&my_charset_latin1, plugin->name, override_plugin_load_policy[i].plugin_name)) { tmp.load_option= override_plugin_load_policy[i].override; break; } } free_root(&tmp_root, MYF(MY_MARK_BLOCKS_FREE)); tmp.state= PLUGIN_IS_UNINITIALIZED; if (register_builtin(plugin, &tmp, &plugin_ptr)) goto err_unlock; } } /* First, we initialize only MyISAM - that should almost always succeed (almost always, because plugins can be loaded outside of the server, too). */ plugin_ptr= plugin_find_internal(&MyISAM, MYSQL_STORAGE_ENGINE_PLUGIN); DBUG_ASSERT(plugin_ptr || !mysql_mandatory_plugins[0]); if (plugin_ptr) { DBUG_ASSERT(plugin_ptr->load_option == PLUGIN_FORCE); if (plugin_initialize(&tmp_root, plugin_ptr, argc, argv, false)) goto err_unlock; /* set the global default storage engine variable so that it will not be null in any child thread. */ global_system_variables.table_plugin = intern_plugin_lock(NULL, plugin_int_to_ref(plugin_ptr)); DBUG_ASSERT(plugin_ptr->ref_count == 1); } mysql_mutex_unlock(&LOCK_plugin); /* Register (not initialize!) all dynamic plugins */ if (!(flags & PLUGIN_INIT_SKIP_DYNAMIC_LOADING)) { I_List_iterator<i_string> iter(opt_plugin_load_list); i_string *item; if (global_system_variables.log_warnings >= 9) sql_print_information("Initializing plugins specified on the command line"); while (NULL != (item= iter++)) plugin_load_list(&tmp_root, item->ptr); if (!(flags & PLUGIN_INIT_SKIP_PLUGIN_TABLE)) { char path[FN_REFLEN + 1]; build_table_filename(path, sizeof(path) - 1, "mysql", "plugin", reg_ext, 0); char engine_name_buf[NAME_CHAR_LEN + 1]; LEX_STRING maybe_myisam= { engine_name_buf, 0 }; frm_type_enum frm_type= dd_frm_type(NULL, path, &maybe_myisam); /* if mysql.plugin table is MyISAM - load it right away */ if (frm_type == FRMTYPE_TABLE && !strcasecmp(maybe_myisam.str, "MyISAM")) { plugin_load(&tmp_root); flags|= PLUGIN_INIT_SKIP_PLUGIN_TABLE; } } } /* Now we initialize all remaining plugins */ mysql_mutex_lock(&LOCK_plugin); reap= (st_plugin_int **) my_alloca((plugin_array.elements+1) * sizeof(void*)); *(reap++)= NULL; for(;;) { for (i=0; i < MYSQL_MAX_PLUGIN_TYPE_NUM; i++) { HASH *hash= plugin_hash + plugin_type_initialization_order[i]; for (uint idx= 0; idx < hash->records; idx++) { plugin_ptr= (struct st_plugin_int *) my_hash_element(hash, idx); if (plugin_ptr->state == PLUGIN_IS_UNINITIALIZED) { if (plugin_initialize(&tmp_root, plugin_ptr, argc, argv, (flags & PLUGIN_INIT_SKIP_INITIALIZATION))) { plugin_ptr->state= PLUGIN_IS_DYING; *(reap++)= plugin_ptr; } } } } /* load and init plugins from the plugin table (unless done already) */ if (flags & PLUGIN_INIT_SKIP_PLUGIN_TABLE) break; mysql_mutex_unlock(&LOCK_plugin); plugin_load(&tmp_root); flags|= PLUGIN_INIT_SKIP_PLUGIN_TABLE; mysql_mutex_lock(&LOCK_plugin); } /* Check if any plugins have to be reaped */ while ((plugin_ptr= *(--reap))) { mysql_mutex_unlock(&LOCK_plugin); if (plugin_is_forced(plugin_ptr)) reaped_mandatory_plugin= TRUE; plugin_deinitialize(plugin_ptr, true); mysql_mutex_lock(&LOCK_plugin); plugin_del(plugin_ptr); } mysql_mutex_unlock(&LOCK_plugin); my_afree(reap); if (reaped_mandatory_plugin) goto err; free_root(&tmp_root, MYF(0)); DBUG_RETURN(0); err_unlock: mysql_mutex_unlock(&LOCK_plugin); err: free_root(&tmp_root, MYF(0)); DBUG_RETURN(1); }
0
293,938
fix_indent(void) { if (p_paste) return; # ifdef FEAT_LISP if (curbuf->b_p_lisp && curbuf->b_p_ai) fixthisline(get_lisp_indent); # endif # if defined(FEAT_LISP) && defined(FEAT_CINDENT) else # endif # ifdef FEAT_CINDENT if (cindent_on()) do_c_expr_indent(); # endif }
0
224,561
Status DimensionsFromShape(ShapeHandle shape, TensorFormat format, DimensionHandle* batch_dim, gtl::MutableArraySlice<DimensionHandle> spatial_dims, DimensionHandle* filter_dim, InferenceContext* context) { const int32_t rank = GetTensorDimsFromSpatialDims(spatial_dims.size(), format); // Batch. *batch_dim = context->Dim(shape, GetTensorBatchDimIndex(rank, format)); // Spatial. for (int spatial_dim_index = 0, end = spatial_dims.size(); spatial_dim_index < end; ++spatial_dim_index) { spatial_dims[spatial_dim_index] = context->Dim( shape, GetTensorSpatialDimIndex(rank, format, spatial_dim_index)); } // Channel. *filter_dim = context->Dim(shape, GetTensorFeatureDimIndex(rank, format)); if (format == FORMAT_NCHW_VECT_C) { TF_RETURN_IF_ERROR(context->Multiply( *filter_dim, context->Dim(shape, GetTensorInnerFeatureDimIndex(rank, format)), filter_dim)); } return Status::OK(); }
0
443,162
static void jfs_write_failed(struct address_space *mapping, loff_t to) { struct inode *inode = mapping->host; if (to > inode->i_size) { truncate_pagecache(inode, inode->i_size); jfs_truncate(inode); } }
0
254,712
njs_typed_array_of(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, njs_index_t unused) { double num; uint32_t length, i; njs_int_t ret; njs_value_t *this; njs_value_t argument; njs_typed_array_t *array; this = njs_argument(args, 0); if (njs_slow_path(!njs_is_constructor(this))) { njs_type_error(vm, "%s is not a constructor", njs_type_string(this->type)); return NJS_ERROR; } length = nargs - 1; njs_set_number(&argument, length); ret = njs_typed_array_create(vm, this, &argument, 1, &vm->retval); if (njs_slow_path(ret != NJS_OK)) { return NJS_ERROR; } array = njs_typed_array(&vm->retval); for (i = 0; i < length; i++) { ret = njs_value_to_number(vm, njs_argument(args, i + 1), &num); if (njs_slow_path(ret != NJS_OK)) { return NJS_ERROR; } njs_typed_array_prop_set(vm, array, i, num); } njs_set_typed_array(&vm->retval, array); return NJS_OK; }
0
229,310
process_batch_internal(service::client_state& client_state, distributed<cql3::query_processor>& qp, request_reader in, uint16_t stream, cql_protocol_version_type version, cql_serialization_format serialization_format, service_permit permit, tracing::trace_state_ptr trace_state, bool init_trace, cql3::computed_function_values cached_pk_fn_calls) { if (version == 1) { throw exceptions::protocol_exception("BATCH messages are not support in version 1 of the protocol"); } const auto type = in.read_byte(); const unsigned n = in.read_short(); std::vector<cql3::statements::batch_statement::single_statement> modifications; std::vector<std::vector<cql3::raw_value_view>> values; std::unordered_map<cql3::prepared_cache_key_type, cql3::authorized_prepared_statements_cache::value_type> pending_authorization_entries; modifications.reserve(n); values.reserve(n); if (init_trace) { tracing::begin(trace_state, "Execute batch of CQL3 queries", client_state.get_client_address()); } for ([[gnu::unused]] auto i : boost::irange(0u, n)) { const auto kind = in.read_byte(); std::unique_ptr<cql3::statements::prepared_statement> stmt_ptr; cql3::statements::prepared_statement::checked_weak_ptr ps; bool needs_authorization(kind == 0); switch (kind) { case 0: { auto query = in.read_long_string_view(); stmt_ptr = qp.local().get_statement(query, client_state); ps = stmt_ptr->checked_weak_from_this(); if (init_trace) { tracing::add_query(trace_state, query); } break; } case 1: { cql3::prepared_cache_key_type cache_key(in.read_short_bytes()); auto& id = cql3::prepared_cache_key_type::cql_id(cache_key); // First, try to lookup in the cache of already authorized statements. If the corresponding entry is not found there // look for the prepared statement and then authorize it. ps = qp.local().get_prepared(client_state.user(), cache_key); if (!ps) { ps = qp.local().get_prepared(cache_key); if (!ps) { throw exceptions::prepared_query_not_found_exception(id); } // authorize a particular prepared statement only once needs_authorization = pending_authorization_entries.emplace(std::move(cache_key), ps->checked_weak_from_this()).second; } if (init_trace) { tracing::add_query(trace_state, ps->statement->raw_cql_statement); } break; } default: throw exceptions::protocol_exception( "Invalid query kind in BATCH messages. Must be 0 or 1 but got " + std::to_string(int(kind))); } if (dynamic_cast<cql3::statements::modification_statement*>(ps->statement.get()) == nullptr) { throw exceptions::invalid_request_exception("Invalid statement in batch: only UPDATE, INSERT and DELETE statements are allowed."); } ::shared_ptr<cql3::statements::modification_statement> modif_statement_ptr = static_pointer_cast<cql3::statements::modification_statement>(ps->statement); if (init_trace) { tracing::add_table_name(trace_state, modif_statement_ptr->keyspace(), modif_statement_ptr->column_family()); tracing::add_prepared_statement(trace_state, ps); } modifications.emplace_back(std::move(modif_statement_ptr), needs_authorization); std::vector<cql3::raw_value_view> tmp; in.read_value_view_list(version, tmp); auto stmt = ps->statement; if (stmt->get_bound_terms() != tmp.size()) { throw exceptions::invalid_request_exception(format("There were {:d} markers(?) in CQL but {:d} bound variables", stmt->get_bound_terms(), tmp.size())); } values.emplace_back(std::move(tmp)); } auto q_state = std::make_unique<cql_query_state>(client_state, trace_state, std::move(permit)); auto& query_state = q_state->query_state; // #563. CQL v2 encodes query_options in v1 format for batch requests. q_state->options = std::make_unique<cql3::query_options>(cql3::query_options::make_batch_options(std::move(*in.read_options(version < 3 ? 1 : version, serialization_format, qp.local().get_cql_config())), std::move(values))); auto& options = *q_state->options; if (!cached_pk_fn_calls.empty()) { options.set_cached_pk_function_calls(std::move(cached_pk_fn_calls)); } if (init_trace) { tracing::set_consistency_level(trace_state, options.get_consistency()); tracing::set_optional_serial_consistency_level(trace_state, options.get_serial_consistency()); tracing::add_prepared_query_options(trace_state, options); tracing::trace(trace_state, "Creating a batch statement"); } auto batch = ::make_shared<cql3::statements::batch_statement>(cql3::statements::batch_statement::type(type), std::move(modifications), cql3::attributes::none(), qp.local().get_cql_stats()); return qp.local().execute_batch_without_checking_exception_message(batch, query_state, options, std::move(pending_authorization_entries)) .then([stream, batch, q_state = std::move(q_state), trace_state = query_state.get_trace_state(), version] (auto msg) { if (msg->move_to_shard()) { return process_fn_return_type(dynamic_pointer_cast<messages::result_message::bounce_to_shard>(msg)); } else if (msg->is_exception()) { return process_fn_return_type(convert_error_message_to_coordinator_result(msg.get())); } else { tracing::trace(q_state->query_state.get_trace_state(), "Done processing - preparing a result"); return process_fn_return_type(make_foreign(make_result(stream, *msg, trace_state, version))); } }); }
0
226,228
GF_Box *ssix_box_new() { ISOM_DECL_BOX_ALLOC(GF_SubsegmentIndexBox, GF_ISOM_BOX_TYPE_SSIX); return (GF_Box *)tmp;
0
488,424
static inline int is_cow_mapping(unsigned int flags) { return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; }
0