idx
int64 | func
string | target
int64 |
|---|---|---|
347,054
|
_krb5_extract_ticket(krb5_context context,
krb5_kdc_rep *rep,
krb5_creds *creds,
krb5_keyblock *key,
krb5_const_pointer keyseed,
krb5_key_usage key_usage,
krb5_addresses *addrs,
unsigned nonce,
unsigned flags,
krb5_data *request,
krb5_decrypt_proc decrypt_proc,
krb5_const_pointer decryptarg)
{
krb5_error_code ret;
krb5_principal tmp_principal;
size_t len = 0;
time_t tmp_time;
krb5_timestamp sec_now;
/* decrypt */
if (decrypt_proc == NULL)
decrypt_proc = decrypt_tkt;
ret = (*decrypt_proc)(context, key, key_usage, decryptarg, rep);
if (ret)
goto out;
if (rep->enc_part.flags.enc_pa_rep && request) {
krb5_crypto crypto = NULL;
Checksum cksum;
PA_DATA *pa = NULL;
int idx = 0;
_krb5_debug(context, 5, "processing enc-ap-rep");
if (rep->enc_part.encrypted_pa_data == NULL ||
(pa = krb5_find_padata(rep->enc_part.encrypted_pa_data->val,
rep->enc_part.encrypted_pa_data->len,
KRB5_PADATA_REQ_ENC_PA_REP,
&idx)) == NULL)
{
_krb5_debug(context, 5, "KRB5_PADATA_REQ_ENC_PA_REP missing");
ret = KRB5KRB_AP_ERR_MODIFIED;
goto out;
}
ret = krb5_crypto_init(context, key, 0, &crypto);
if (ret)
goto out;
ret = decode_Checksum(pa->padata_value.data,
pa->padata_value.length,
&cksum, NULL);
if (ret) {
krb5_crypto_destroy(context, crypto);
goto out;
}
ret = krb5_verify_checksum(context, crypto,
KRB5_KU_AS_REQ,
request->data, request->length,
&cksum);
krb5_crypto_destroy(context, crypto);
free_Checksum(&cksum);
_krb5_debug(context, 5, "enc-ap-rep: %svalid", (ret == 0) ? "" : "in");
if (ret)
goto out;
}
/* save session key */
creds->session.keyvalue.length = 0;
creds->session.keyvalue.data = NULL;
creds->session.keytype = rep->enc_part.key.keytype;
ret = krb5_data_copy (&creds->session.keyvalue,
rep->enc_part.key.keyvalue.data,
rep->enc_part.key.keyvalue.length);
if (ret) {
krb5_clear_error_message(context);
goto out;
}
/* compare client and save */
ret = _krb5_principalname2krb5_principal(context,
&tmp_principal,
rep->kdc_rep.cname,
rep->kdc_rep.crealm);
if (ret)
goto out;
/* check client referral and save principal */
/* anonymous here ? */
if((flags & EXTRACT_TICKET_ALLOW_CNAME_MISMATCH) == 0) {
ret = check_client_referral(context, rep,
creds->client,
tmp_principal,
&creds->session);
if (ret) {
krb5_free_principal (context, tmp_principal);
goto out;
}
}
krb5_free_principal (context, creds->client);
creds->client = tmp_principal;
/* check server referral and save principal */
ret = _krb5_principalname2krb5_principal (context,
&tmp_principal,
rep->kdc_rep.ticket.sname,
rep->kdc_rep.ticket.realm);
if (ret)
goto out;
if((flags & EXTRACT_TICKET_ALLOW_SERVER_MISMATCH) == 0){
ret = check_server_referral(context,
rep,
flags,
creds->server,
tmp_principal,
&creds->session);
if (ret) {
krb5_free_principal (context, tmp_principal);
goto out;
}
}
krb5_free_principal(context, creds->server);
creds->server = tmp_principal;
/* verify names */
if(flags & EXTRACT_TICKET_MATCH_REALM){
const char *srealm = krb5_principal_get_realm(context, creds->server);
const char *crealm = krb5_principal_get_realm(context, creds->client);
if (strcmp(rep->enc_part.srealm, srealm) != 0 ||
strcmp(rep->enc_part.srealm, crealm) != 0)
{
ret = KRB5KRB_AP_ERR_MODIFIED;
krb5_clear_error_message(context);
goto out;
}
}
/* compare nonces */
if (nonce != (unsigned)rep->enc_part.nonce) {
ret = KRB5KRB_AP_ERR_MODIFIED;
krb5_set_error_message(context, ret, N_("malloc: out of memory", ""));
goto out;
}
/* set kdc-offset */
krb5_timeofday (context, &sec_now);
if (rep->enc_part.flags.initial
&& (flags & EXTRACT_TICKET_TIMESYNC)
&& context->kdc_sec_offset == 0
&& krb5_config_get_bool (context, NULL,
"libdefaults",
"kdc_timesync",
NULL)) {
context->kdc_sec_offset = rep->enc_part.authtime - sec_now;
krb5_timeofday (context, &sec_now);
}
/* check all times */
if (rep->enc_part.starttime) {
tmp_time = *rep->enc_part.starttime;
} else
tmp_time = rep->enc_part.authtime;
if (creds->times.starttime == 0
&& labs(tmp_time - sec_now) > context->max_skew) {
ret = KRB5KRB_AP_ERR_SKEW;
krb5_set_error_message (context, ret,
N_("time skew (%ld) larger than max (%ld)", ""),
labs(tmp_time - sec_now),
(long)context->max_skew);
goto out;
}
if (creds->times.starttime != 0
&& tmp_time != creds->times.starttime) {
krb5_clear_error_message (context);
ret = KRB5KRB_AP_ERR_MODIFIED;
goto out;
}
creds->times.starttime = tmp_time;
if (rep->enc_part.renew_till) {
tmp_time = *rep->enc_part.renew_till;
} else
tmp_time = 0;
if (creds->times.renew_till != 0
&& tmp_time > creds->times.renew_till) {
krb5_clear_error_message (context);
ret = KRB5KRB_AP_ERR_MODIFIED;
goto out;
}
creds->times.renew_till = tmp_time;
creds->times.authtime = rep->enc_part.authtime;
if (creds->times.endtime != 0
&& rep->enc_part.endtime > creds->times.endtime) {
krb5_clear_error_message (context);
ret = KRB5KRB_AP_ERR_MODIFIED;
goto out;
}
creds->times.endtime = rep->enc_part.endtime;
if(rep->enc_part.caddr)
krb5_copy_addresses (context, rep->enc_part.caddr, &creds->addresses);
else if(addrs)
krb5_copy_addresses (context, addrs, &creds->addresses);
else {
creds->addresses.len = 0;
creds->addresses.val = NULL;
}
creds->flags.b = rep->enc_part.flags;
creds->authdata.len = 0;
creds->authdata.val = NULL;
/* extract ticket */
ASN1_MALLOC_ENCODE(Ticket, creds->ticket.data, creds->ticket.length,
&rep->kdc_rep.ticket, &len, ret);
if(ret)
goto out;
if (creds->ticket.length != len)
krb5_abortx(context, "internal error in ASN.1 encoder");
creds->second_ticket.length = 0;
creds->second_ticket.data = NULL;
out:
memset (rep->enc_part.key.keyvalue.data, 0,
rep->enc_part.key.keyvalue.length);
return ret;
}
| 1
|
226,922
|
void LayerTreeCoordinator::layerFlushTimerFired(Timer<LayerTreeCoordinator>*)
{
performScheduledLayerFlush();
}
| 0
|
470,114
|
static void svm_msr_filter_changed(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
u32 i;
/*
* Set intercept permissions for all direct access MSRs again. They
* will automatically get filtered through the MSR filter, so we are
* back in sync after this.
*/
for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
u32 msr = direct_access_msrs[i].index;
u32 read = test_bit(i, svm->shadow_msr_intercept.read);
u32 write = test_bit(i, svm->shadow_msr_intercept.write);
set_msr_interception_bitmap(vcpu, svm->msrpm, msr, read, write);
}
}
| 0
|
266,190
|
CongestionAndRttState moveCurrentCongestionAndRttState(
QuicServerConnectionState& conn) {
CongestionAndRttState state;
state.peerAddress = conn.peerAddress;
state.recordTime = Clock::now();
state.congestionController = std::move(conn.congestionController);
state.srtt = conn.lossState.srtt;
state.lrtt = conn.lossState.lrtt;
state.rttvar = conn.lossState.rttvar;
state.mrtt = conn.lossState.mrtt;
return state;
}
| 0
|
140,090
|
const unsigned char* lodepng_chunk_next_const(const unsigned char* chunk)
{
unsigned total_chunk_length = lodepng_chunk_length(chunk) + 12;
return &chunk[total_chunk_length];
}
| 0
|
367,117
|
static int cap_unix_may_send(struct socket *sock, struct socket *other)
{
return 0;
}
| 0
|
323,666
|
static int handle_sigp_single_dst(S390CPU *dst_cpu, uint8_t order,
uint64_t param, uint64_t *status_reg)
{
SigpInfo si = {
.param = param,
.status_reg = status_reg,
};
/* cpu available? */
if (dst_cpu == NULL) {
return SIGP_CC_NOT_OPERATIONAL;
}
/* only resets can break pending orders */
if (dst_cpu->env.sigp_order != 0 &&
order != SIGP_CPU_RESET &&
order != SIGP_INITIAL_CPU_RESET) {
return SIGP_CC_BUSY;
}
switch (order) {
case SIGP_START:
run_on_cpu(CPU(dst_cpu), sigp_start, RUN_ON_CPU_HOST_PTR(&si));
break;
case SIGP_STOP:
run_on_cpu(CPU(dst_cpu), sigp_stop, RUN_ON_CPU_HOST_PTR(&si));
break;
case SIGP_RESTART:
run_on_cpu(CPU(dst_cpu), sigp_restart, RUN_ON_CPU_HOST_PTR(&si));
break;
case SIGP_STOP_STORE_STATUS:
run_on_cpu(CPU(dst_cpu), sigp_stop_and_store_status, RUN_ON_CPU_HOST_PTR(&si));
break;
case SIGP_STORE_STATUS_ADDR:
run_on_cpu(CPU(dst_cpu), sigp_store_status_at_address, RUN_ON_CPU_HOST_PTR(&si));
break;
case SIGP_STORE_ADTL_STATUS:
run_on_cpu(CPU(dst_cpu), sigp_store_adtl_status, RUN_ON_CPU_HOST_PTR(&si));
break;
case SIGP_SET_PREFIX:
run_on_cpu(CPU(dst_cpu), sigp_set_prefix, RUN_ON_CPU_HOST_PTR(&si));
break;
case SIGP_INITIAL_CPU_RESET:
run_on_cpu(CPU(dst_cpu), sigp_initial_cpu_reset, RUN_ON_CPU_HOST_PTR(&si));
break;
case SIGP_CPU_RESET:
run_on_cpu(CPU(dst_cpu), sigp_cpu_reset, RUN_ON_CPU_HOST_PTR(&si));
break;
default:
set_sigp_status(&si, SIGP_STAT_INVALID_ORDER);
}
return si.cc;
}
| 0
|
521,294
|
bool Item_func_in::fix_for_row_comparison_using_bisection(THD *thd)
{
if (unlikely(!(array= new (thd->mem_root) in_row(thd, arg_count-1, 0))))
return true;
cmp_item_row *cmp= &((in_row*)array)->tmp;
if (cmp->prepare_comparators(thd, func_name(), this, 0))
return true;
fix_in_vector();
return false;
}
| 0
|
143
|
guint16 de_mid ( tvbuff_t * tvb , proto_tree * tree , packet_info * pinfo , guint32 offset , guint len , gchar * add_string , int string_len ) {
guint8 oct ;
guint32 curr_offset ;
guint32 value ;
gboolean odd ;
const gchar * digit_str ;
proto_item * ti ;
curr_offset = offset ;
oct = tvb_get_guint8 ( tvb , curr_offset ) ;
switch ( oct & 0x07 ) {
case 0 : proto_tree_add_item ( tree , hf_gsm_a_unused , tvb , curr_offset , 1 , ENC_BIG_ENDIAN ) ;
proto_tree_add_item ( tree , hf_gsm_a_odd_even_ind , tvb , curr_offset , 1 , ENC_BIG_ENDIAN ) ;
proto_tree_add_item ( tree , hf_gsm_a_mobile_identity_type , tvb , curr_offset , 1 , ENC_BIG_ENDIAN ) ;
if ( add_string ) g_snprintf ( add_string , string_len , " - No Identity Code" ) ;
curr_offset ++ ;
if ( len > 1 ) {
expert_add_info ( pinfo , tree , & ei_gsm_a_format_not_supported ) ;
}
curr_offset += len - 1 ;
break ;
case 3 : case 1 : odd = oct & 0x08 ;
proto_tree_add_item ( tree , hf_gsm_a_id_dig_1 , tvb , curr_offset , 1 , ENC_BIG_ENDIAN ) ;
proto_tree_add_item ( tree , hf_gsm_a_odd_even_ind , tvb , curr_offset , 1 , ENC_BIG_ENDIAN ) ;
proto_tree_add_item ( tree , hf_gsm_a_mobile_identity_type , tvb , curr_offset , 1 , ENC_BIG_ENDIAN ) ;
if ( ( oct & 0x07 ) == 3 ) {
digit_str = tvb_bcd_dig_to_wmem_packet_str ( tvb , curr_offset , len - ( curr_offset - offset ) , NULL , TRUE ) ;
proto_tree_add_string_format ( tree , hf_gsm_a_imeisv , tvb , curr_offset , len - ( curr_offset - offset ) , digit_str , "BCD Digits: %s" , digit_str ) ;
}
else {
digit_str = dissect_e212_imsi ( tvb , pinfo , tree , curr_offset , len - ( curr_offset - offset ) , TRUE ) ;
}
if ( sccp_assoc && ! sccp_assoc -> calling_party ) {
sccp_assoc -> calling_party = wmem_strdup_printf ( wmem_file_scope ( ) , ( ( oct & 0x07 ) == 3 ) ? "IMEISV: %s" : "IMSI: %s" , digit_str ) ;
}
if ( add_string ) g_snprintf ( add_string , string_len , " - %s (%s)" , ( ( oct & 0x07 ) == 3 ) ? "IMEISV" : "IMSI" , digit_str ) ;
curr_offset += len - ( curr_offset - offset ) ;
if ( ! odd ) {
proto_tree_add_item ( tree , hf_gsm_a_filler , tvb , curr_offset - 1 , 1 , ENC_NA ) ;
}
break ;
case 2 : proto_tree_add_uint_format_value ( tree , hf_gsm_a_identity_digit1 , tvb , curr_offset , 1 , oct , "%c" , Dgt1_9_bcd . out [ ( oct & 0xf0 ) >> 4 ] ) ;
proto_tree_add_item ( tree , hf_gsm_a_odd_even_ind , tvb , curr_offset , 1 , ENC_BIG_ENDIAN ) ;
proto_tree_add_item ( tree , hf_gsm_a_mobile_identity_type , tvb , curr_offset , 1 , ENC_BIG_ENDIAN ) ;
curr_offset ++ ;
if ( curr_offset - offset >= len ) return ( curr_offset - offset ) ;
digit_str = tvb_bcd_dig_to_wmem_packet_str ( tvb , curr_offset , len - ( curr_offset - offset ) , NULL , FALSE ) ;
proto_tree_add_string_format ( tree , hf_gsm_a_imei , tvb , curr_offset , len - ( curr_offset - offset ) , digit_str , "BCD Digits: %s" , digit_str ) ;
if ( add_string ) g_snprintf ( add_string , string_len , " - IMEI (%s)" , digit_str ) ;
curr_offset += len - ( curr_offset - offset ) ;
break ;
case 4 : proto_tree_add_item ( tree , hf_gsm_a_unused , tvb , curr_offset , 1 , ENC_BIG_ENDIAN ) ;
proto_tree_add_item ( tree , hf_gsm_a_odd_even_ind , tvb , curr_offset , 1 , ENC_BIG_ENDIAN ) ;
proto_tree_add_item ( tree , hf_gsm_a_mobile_identity_type , tvb , curr_offset , 1 , ENC_BIG_ENDIAN ) ;
curr_offset ++ ;
value = tvb_get_ntohl ( tvb , curr_offset ) ;
proto_tree_add_uint ( tree , hf_gsm_a_tmsi , tvb , curr_offset , 4 , value ) ;
if ( add_string ) g_snprintf ( add_string , string_len , " - TMSI/P-TMSI (0x%04x)" , value ) ;
curr_offset += 4 ;
break ;
case 5 : proto_tree_add_bits_item ( tree , hf_gsm_a_spare_bits , tvb , curr_offset << 3 , 2 , ENC_BIG_ENDIAN ) ;
proto_tree_add_item ( tree , hf_gsm_a_mbs_ses_id_ind , tvb , curr_offset , 1 , ENC_BIG_ENDIAN ) ;
proto_tree_add_item ( tree , hf_gsm_a_tmgi_mcc_mnc_ind , tvb , curr_offset , 1 , ENC_BIG_ENDIAN ) ;
proto_tree_add_item ( tree , hf_gsm_a_odd_even_ind , tvb , curr_offset , 1 , ENC_BIG_ENDIAN ) ;
proto_tree_add_item ( tree , hf_gsm_a_mobile_identity_type , tvb , curr_offset , 1 , ENC_BIG_ENDIAN ) ;
curr_offset ++ ;
proto_tree_add_item ( tree , hf_gsm_a_mbs_service_id , tvb , curr_offset , 3 , ENC_BIG_ENDIAN ) ;
curr_offset += 3 ;
if ( ( oct & 0x10 ) == 0x10 ) {
curr_offset = dissect_e212_mcc_mnc ( tvb , pinfo , tree , curr_offset , E212_NONE , TRUE ) ;
}
if ( ( oct & 0x20 ) == 0x20 ) {
proto_tree_add_item ( tree , hf_gsm_a_mbs_session_id , tvb , curr_offset , 1 , ENC_BIG_ENDIAN ) ;
curr_offset ++ ;
}
break ;
default : proto_tree_add_item ( tree , hf_gsm_a_odd_even_ind , tvb , curr_offset , 1 , ENC_BIG_ENDIAN ) ;
ti = proto_tree_add_item ( tree , hf_gsm_a_mobile_identity_type , tvb , curr_offset , 1 , ENC_BIG_ENDIAN ) ;
expert_add_info_format ( pinfo , ti , & ei_gsm_a_mobile_identity_type , "Unknown format %u" , ( oct & 0x07 ) ) ;
if ( add_string ) g_snprintf ( add_string , string_len , " - Format Unknown" ) ;
curr_offset += len ;
break ;
}
EXTRANEOUS_DATA_CHECK ( len , curr_offset - offset , pinfo , & ei_gsm_a_extraneous_data ) ;
return ( curr_offset - offset ) ;
}
| 1
|
388,283
|
static int extent_same_check_offsets(struct inode *inode, u64 off, u64 *plen,
u64 olen)
{
u64 len = *plen;
u64 bs = BTRFS_I(inode)->root->fs_info->sb->s_blocksize;
if (off + olen > inode->i_size || off + olen < off)
return -EINVAL;
/* if we extend to eof, continue to block boundary */
if (off + len == inode->i_size)
*plen = len = ALIGN(inode->i_size, bs) - off;
/* Check that we are block aligned - btrfs_clone() requires this */
if (!IS_ALIGNED(off, bs) || !IS_ALIGNED(off + len, bs))
return -EINVAL;
return 0;
}
| 0
|
271,766
|
static void parse_from_commit(struct branch *b, char *buf, unsigned long size)
{
if (!buf || size < 46)
die("Not a valid commit: %s", sha1_to_hex(b->sha1));
if (memcmp("tree ", buf, 5)
|| get_sha1_hex(buf + 5, b->branch_tree.versions[1].sha1))
die("The commit %s is corrupt", sha1_to_hex(b->sha1));
hashcpy(b->branch_tree.versions[0].sha1,
b->branch_tree.versions[1].sha1);
}
| 0
|
399,613
|
hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
{
ktime_t expires_next;
if (!cpu_base->hres_active)
return;
expires_next = __hrtimer_get_next_event(cpu_base);
if (skip_equal && expires_next == cpu_base->expires_next)
return;
cpu_base->expires_next = expires_next;
/*
* If a hang was detected in the last timer interrupt then we
* leave the hang delay active in the hardware. We want the
* system to make progress. That also prevents the following
* scenario:
* T1 expires 50ms from now
* T2 expires 5s from now
*
* T1 is removed, so this code is called and would reprogram
* the hardware to 5s from now. Any hrtimer_start after that
* will not reprogram the hardware due to hang_detected being
* set. So we'd effectivly block all timers until the T2 event
* fires.
*/
if (cpu_base->hang_detected)
return;
tick_program_event(cpu_base->expires_next, 1);
}
| 0
|
241,360
|
void VideoRendererBase::AttemptFlush_Locked() {
lock_.AssertAcquired();
DCHECK_EQ(kFlushing, state_);
ready_frames_.clear();
if (!pending_paint_ && !pending_read_) {
state_ = kFlushed;
current_frame_ = NULL;
base::ResetAndReturn(&flush_cb_).Run();
}
}
| 0
|
85,404
|
closeTransport(PRIVATE_ASSOCIATIONKEY ** association)
{
closeTransportTCP(association);
}
| 0
|
446,964
|
static bool paged_attrs_same(const char * const *attrs_1,
const char * const *attrs_2) {
int i;
if (attrs_1 == NULL || attrs_2 == NULL) {
if (attrs_1 == NULL && attrs_2 == NULL) {
return true;
}
return false;
}
for (i=0; attrs_1[i] != NULL; i++) {
if (!ldb_attr_in_list(attrs_2, attrs_1[i])) {
return false;
}
}
return true;
}
| 0
|
319,448
|
void qmp_block_set_io_throttle(const char *device, int64_t bps, int64_t bps_rd,
int64_t bps_wr,
int64_t iops,
int64_t iops_rd,
int64_t iops_wr,
bool has_bps_max,
int64_t bps_max,
bool has_bps_rd_max,
int64_t bps_rd_max,
bool has_bps_wr_max,
int64_t bps_wr_max,
bool has_iops_max,
int64_t iops_max,
bool has_iops_rd_max,
int64_t iops_rd_max,
bool has_iops_wr_max,
int64_t iops_wr_max,
bool has_iops_size,
int64_t iops_size, Error **errp)
{
ThrottleConfig cfg;
BlockDriverState *bs;
bs = bdrv_find(device);
if (!bs) {
error_set(errp, QERR_DEVICE_NOT_FOUND, device);
return;
}
memset(&cfg, 0, sizeof(cfg));
cfg.buckets[THROTTLE_BPS_TOTAL].avg = bps;
cfg.buckets[THROTTLE_BPS_READ].avg = bps_rd;
cfg.buckets[THROTTLE_BPS_WRITE].avg = bps_wr;
cfg.buckets[THROTTLE_OPS_TOTAL].avg = iops;
cfg.buckets[THROTTLE_OPS_READ].avg = iops_rd;
cfg.buckets[THROTTLE_OPS_WRITE].avg = iops_wr;
if (has_bps_max) {
cfg.buckets[THROTTLE_BPS_TOTAL].max = bps_max;
}
if (has_bps_rd_max) {
cfg.buckets[THROTTLE_BPS_READ].max = bps_rd_max;
}
if (has_bps_wr_max) {
cfg.buckets[THROTTLE_BPS_WRITE].max = bps_wr_max;
}
if (has_iops_max) {
cfg.buckets[THROTTLE_OPS_TOTAL].max = iops_max;
}
if (has_iops_rd_max) {
cfg.buckets[THROTTLE_OPS_READ].max = iops_rd_max;
}
if (has_iops_wr_max) {
cfg.buckets[THROTTLE_OPS_WRITE].max = iops_wr_max;
}
if (has_iops_size) {
cfg.op_size = iops_size;
}
if (!check_throttle_config(&cfg, errp)) {
return;
}
aio_context = bdrv_get_aio_context(bs);
aio_context_acquire(aio_context);
if (!bs->io_limits_enabled && throttle_enabled(&cfg)) {
bdrv_io_limits_enable(bs);
} else if (bs->io_limits_enabled && !throttle_enabled(&cfg)) {
bdrv_io_limits_disable(bs);
}
if (bs->io_limits_enabled) {
bdrv_set_io_limits(bs, &cfg);
}
aio_context_release(aio_context);
}
| 1
|
447,668
|
static bool net_tx_pkt_parse_headers(struct NetTxPkt *pkt)
{
struct iovec *l2_hdr, *l3_hdr;
size_t bytes_read;
size_t full_ip6hdr_len;
uint16_t l3_proto;
assert(pkt);
l2_hdr = &pkt->vec[NET_TX_PKT_L2HDR_FRAG];
l3_hdr = &pkt->vec[NET_TX_PKT_L3HDR_FRAG];
bytes_read = iov_to_buf(pkt->raw, pkt->raw_frags, 0, l2_hdr->iov_base,
ETH_MAX_L2_HDR_LEN);
if (bytes_read < sizeof(struct eth_header)) {
l2_hdr->iov_len = 0;
return false;
}
l2_hdr->iov_len = sizeof(struct eth_header);
switch (be16_to_cpu(PKT_GET_ETH_HDR(l2_hdr->iov_base)->h_proto)) {
case ETH_P_VLAN:
l2_hdr->iov_len += sizeof(struct vlan_header);
break;
case ETH_P_DVLAN:
l2_hdr->iov_len += 2 * sizeof(struct vlan_header);
break;
}
if (bytes_read < l2_hdr->iov_len) {
l2_hdr->iov_len = 0;
l3_hdr->iov_len = 0;
pkt->packet_type = ETH_PKT_UCAST;
return false;
} else {
l2_hdr->iov_len = ETH_MAX_L2_HDR_LEN;
l2_hdr->iov_len = eth_get_l2_hdr_length(l2_hdr->iov_base);
pkt->packet_type = get_eth_packet_type(l2_hdr->iov_base);
}
l3_proto = eth_get_l3_proto(l2_hdr, 1, l2_hdr->iov_len);
switch (l3_proto) {
case ETH_P_IP:
bytes_read = iov_to_buf(pkt->raw, pkt->raw_frags, l2_hdr->iov_len,
l3_hdr->iov_base, sizeof(struct ip_header));
if (bytes_read < sizeof(struct ip_header)) {
l3_hdr->iov_len = 0;
return false;
}
l3_hdr->iov_len = IP_HDR_GET_LEN(l3_hdr->iov_base);
if (l3_hdr->iov_len < sizeof(struct ip_header)) {
l3_hdr->iov_len = 0;
return false;
}
pkt->l4proto = IP_HDR_GET_P(l3_hdr->iov_base);
if (IP_HDR_GET_LEN(l3_hdr->iov_base) != sizeof(struct ip_header)) {
/* copy optional IPv4 header data if any*/
bytes_read = iov_to_buf(pkt->raw, pkt->raw_frags,
l2_hdr->iov_len + sizeof(struct ip_header),
l3_hdr->iov_base + sizeof(struct ip_header),
l3_hdr->iov_len - sizeof(struct ip_header));
if (bytes_read < l3_hdr->iov_len - sizeof(struct ip_header)) {
l3_hdr->iov_len = 0;
return false;
}
}
break;
case ETH_P_IPV6:
{
eth_ip6_hdr_info hdrinfo;
if (!eth_parse_ipv6_hdr(pkt->raw, pkt->raw_frags, l2_hdr->iov_len,
&hdrinfo)) {
l3_hdr->iov_len = 0;
return false;
}
pkt->l4proto = hdrinfo.l4proto;
full_ip6hdr_len = hdrinfo.full_hdr_len;
if (full_ip6hdr_len > ETH_MAX_IP_DGRAM_LEN) {
l3_hdr->iov_len = 0;
return false;
}
bytes_read = iov_to_buf(pkt->raw, pkt->raw_frags, l2_hdr->iov_len,
l3_hdr->iov_base, full_ip6hdr_len);
if (bytes_read < full_ip6hdr_len) {
l3_hdr->iov_len = 0;
return false;
} else {
l3_hdr->iov_len = full_ip6hdr_len;
}
break;
}
default:
l3_hdr->iov_len = 0;
break;
}
net_tx_pkt_calculate_hdr_len(pkt);
return true;
}
| 0
|
447,985
|
_XimSetICValuesCheck(
Xim im,
INT16 len,
XPointer data,
XPointer arg)
{
Xic ic = (Xic)arg;
CARD16 *buf_s = (CARD16 *)((CARD8 *)data + XIM_HEADER_SIZE);
CARD8 major_opcode = *((CARD8 *)data);
CARD8 minor_opcode = *((CARD8 *)data + 1);
XIMID imid = buf_s[0];
XICID icid = buf_s[1];
if ((major_opcode == XIM_SET_IC_VALUES_REPLY)
&& (minor_opcode == 0)
&& (imid == im->private.proto.imid)
&& (icid == ic->private.proto.icid))
return True;
if ((major_opcode == XIM_ERROR)
&& (minor_opcode == 0)
&& (buf_s[2] & XIM_IMID_VALID)
&& (imid == im->private.proto.imid)
&& (buf_s[2] & XIM_ICID_VALID)
&& (icid == ic->private.proto.icid))
return True;
return False;
}
| 0
|
262,898
|
getroom(
spellinfo_T *spin,
size_t len, /* length needed */
int align) /* align for pointer */
{
char_u *p;
sblock_T *bl = spin->si_blocks;
if (align && bl != NULL)
/* Round size up for alignment. On some systems structures need to be
* aligned to the size of a pointer (e.g., SPARC). */
bl->sb_used = (bl->sb_used + sizeof(char *) - 1)
& ~(sizeof(char *) - 1);
if (bl == NULL || bl->sb_used + len > SBLOCKSIZE)
{
if (len >= SBLOCKSIZE)
bl = NULL;
else
/* Allocate a block of memory. It is not freed until much later. */
bl = (sblock_T *)alloc_clear(
(unsigned)(sizeof(sblock_T) + SBLOCKSIZE));
if (bl == NULL)
{
if (!spin->si_did_emsg)
{
EMSG(_("E845: Insufficient memory, word list will be incomplete"));
spin->si_did_emsg = TRUE;
}
return NULL;
}
bl->sb_next = spin->si_blocks;
spin->si_blocks = bl;
bl->sb_used = 0;
++spin->si_blocks_cnt;
}
p = bl->sb_data + bl->sb_used;
bl->sb_used += (int)len;
return p;
}
| 0
|
231,953
|
std::string ChromeContentRendererClient::GetNavigationErrorHtml(
const WebURLRequest& failed_request,
const WebURLError& error) {
GURL failed_url = error.unreachableURL;
std::string html;
const Extension* extension = NULL;
int resource_id;
DictionaryValue error_strings;
if (failed_url.is_valid() && !failed_url.SchemeIs(chrome::kExtensionScheme))
extension = extension_dispatcher_->extensions()->GetByURL(failed_url);
if (extension) {
LocalizedError::GetAppErrorStrings(error, failed_url, extension,
&error_strings);
resource_id = IDR_ERROR_APP_HTML;
} else {
if (error.domain == WebString::fromUTF8(net::kErrorDomain) &&
error.reason == net::ERR_CACHE_MISS &&
EqualsASCII(failed_request.httpMethod(), "POST")) {
LocalizedError::GetFormRepostStrings(failed_url, &error_strings);
} else {
LocalizedError::GetStrings(error, &error_strings);
}
resource_id = IDR_NET_ERROR_HTML;
}
const base::StringPiece template_html(
ResourceBundle::GetSharedInstance().GetRawDataResource(resource_id));
if (template_html.empty()) {
NOTREACHED() << "unable to load template. ID: " << resource_id;
} else {
html = jstemplate_builder::GetTemplatesHtml(
template_html, &error_strings, "t");
}
return html;
}
| 0
|
40,787
|
static void write_bin_response(conn *c, void *d, int hlen, int keylen, int dlen) {
if (!c->noreply || c->cmd == PROTOCOL_BINARY_CMD_GET ||
c->cmd == PROTOCOL_BINARY_CMD_GETK) {
add_bin_header(c, 0, hlen, keylen, dlen);
if(dlen > 0) {
add_iov(c, d, dlen);
}
conn_set_state(c, conn_mwrite);
c->write_and_go = conn_new_cmd;
} else {
conn_set_state(c, conn_new_cmd);
}
}
| 0
|
108,762
|
static inline void schedule_debug(struct task_struct *prev)
{
/*
* Test if we are atomic. Since do_exit() needs to call into
* schedule() atomically, we ignore that path for now.
* Otherwise, whine if we are scheduling when we should not be.
*/
if (unlikely(in_atomic_preempt_off() && !prev->exit_state))
__schedule_bug(prev);
profile_hit(SCHED_PROFILING, __builtin_return_address(0));
schedstat_inc(this_rq(), sched_count);
#ifdef CONFIG_SCHEDSTATS
if (unlikely(prev->lock_depth >= 0)) {
schedstat_inc(this_rq(), bkl_count);
schedstat_inc(prev, sched_info.bkl_count);
}
#endif
}
| 0
|
418,560
|
snmpLookupNodeStr(mib_tree_entry *root, const char *str)
{
oid *name;
int namelen;
mib_tree_entry *e;
if (root)
e = root;
else
e = mib_tree_head;
if (! snmpCreateOidFromStr(str, &name, &namelen))
return NULL;
/* I wish there were some kind of sensible existing tree traversal
* routine to use. I'll worry about that later */
if (namelen <= 1) {
xfree(name);
return e; /* XXX it should only be this? */
}
int i, r = 1;
while (r < namelen) {
/* Find the child node which matches this */
for (i = 0; i < e->children && e->leaves[i]->name[r] != name[r]; ++i) ; // seek-loop
/* Are we pointing to that node? */
if (i >= e->children)
break;
assert(e->leaves[i]->name[r] == name[r]);
/* Skip to that node! */
e = e->leaves[i];
++r;
}
xfree(name);
return e;
}
| 0
|
447,097
|
Guint FoFiTrueType::scanLookupList(Guint listIndex, Guint orgGID)
{
Guint lookupTable;
Guint subTableCount;
Guint subTable;
Guint i;
Guint gid = 0;
Guint pos;
if (gsubLookupList == 0) return 0; /* no lookup list */
pos = gsubLookupList+2+listIndex*2;
lookupTable = getU16BE(pos,&parsedOk);
/* read lookup table */
pos = gsubLookupList+lookupTable+4;
subTableCount = getU16BE(pos,&parsedOk);
pos += 2;;
for (i = 0;i < subTableCount;i++) {
subTable = getU16BE(pos,&parsedOk);
pos += 2;
if ((gid = scanLookupSubTable(gsubLookupList+lookupTable+subTable,orgGID))
!= 0) break;
}
return gid;
}
| 0
|
192,978
|
_dbus_strerror (int error_number)
{
#ifdef DBUS_WINCE
return "unknown";
#else
const char *msg;
switch (error_number)
{
case WSAEINTR:
return "Interrupted function call";
case WSAEACCES:
return "Permission denied";
case WSAEFAULT:
return "Bad address";
case WSAEINVAL:
return "Invalid argument";
case WSAEMFILE:
return "Too many open files";
case WSAEWOULDBLOCK:
return "Resource temporarily unavailable";
case WSAEINPROGRESS:
return "Operation now in progress";
case WSAEALREADY:
return "Operation already in progress";
case WSAENOTSOCK:
return "Socket operation on nonsocket";
case WSAEDESTADDRREQ:
return "Destination address required";
case WSAEMSGSIZE:
return "Message too long";
case WSAEPROTOTYPE:
return "Protocol wrong type for socket";
case WSAENOPROTOOPT:
return "Bad protocol option";
case WSAEPROTONOSUPPORT:
return "Protocol not supported";
case WSAESOCKTNOSUPPORT:
return "Socket type not supported";
case WSAEOPNOTSUPP:
return "Operation not supported";
case WSAEPFNOSUPPORT:
return "Protocol family not supported";
case WSAEAFNOSUPPORT:
return "Address family not supported by protocol family";
case WSAEADDRINUSE:
return "Address already in use";
case WSAEADDRNOTAVAIL:
return "Cannot assign requested address";
case WSAENETDOWN:
return "Network is down";
case WSAENETUNREACH:
return "Network is unreachable";
case WSAENETRESET:
return "Network dropped connection on reset";
case WSAECONNABORTED:
return "Software caused connection abort";
case WSAECONNRESET:
return "Connection reset by peer";
case WSAENOBUFS:
return "No buffer space available";
case WSAEISCONN:
return "Socket is already connected";
case WSAENOTCONN:
return "Socket is not connected";
case WSAESHUTDOWN:
return "Cannot send after socket shutdown";
case WSAETIMEDOUT:
return "Connection timed out";
case WSAECONNREFUSED:
return "Connection refused";
case WSAEHOSTDOWN:
return "Host is down";
case WSAEHOSTUNREACH:
return "No route to host";
case WSAEPROCLIM:
return "Too many processes";
case WSAEDISCON:
return "Graceful shutdown in progress";
case WSATYPE_NOT_FOUND:
return "Class type not found";
case WSAHOST_NOT_FOUND:
return "Host not found";
case WSATRY_AGAIN:
return "Nonauthoritative host not found";
case WSANO_RECOVERY:
return "This is a nonrecoverable error";
case WSANO_DATA:
return "Valid name, no data record of requested type";
case WSA_INVALID_HANDLE:
return "Specified event object handle is invalid";
case WSA_INVALID_PARAMETER:
return "One or more parameters are invalid";
case WSA_IO_INCOMPLETE:
return "Overlapped I/O event object not in signaled state";
case WSA_IO_PENDING:
return "Overlapped operations will complete later";
case WSA_NOT_ENOUGH_MEMORY:
return "Insufficient memory available";
case WSA_OPERATION_ABORTED:
return "Overlapped operation aborted";
#ifdef WSAINVALIDPROCTABLE
case WSAINVALIDPROCTABLE:
return "Invalid procedure table from service provider";
#endif
#ifdef WSAINVALIDPROVIDER
case WSAINVALIDPROVIDER:
return "Invalid service provider version number";
#endif
#ifdef WSAPROVIDERFAILEDINIT
case WSAPROVIDERFAILEDINIT:
return "Unable to initialize a service provider";
#endif
case WSASYSCALLFAILURE:
return "System call failure";
}
msg = strerror (error_number);
if (msg == NULL)
msg = "unknown";
return msg;
#endif //DBUS_WINCE
}
| 0
|
836
|
static void _LMBCSToUnicodeWithOffsets ( UConverterToUnicodeArgs * args , UErrorCode * err ) {
char LMBCS [ ULMBCS_CHARSIZE_MAX ] ;
UChar uniChar ;
const char * saveSource ;
const char * pStartLMBCS = args -> source ;
const char * errSource = NULL ;
int8_t savebytes = 0 ;
while ( U_SUCCESS ( * err ) && args -> sourceLimit > args -> source && args -> targetLimit > args -> target ) {
saveSource = args -> source ;
if ( args -> converter -> toULength ) {
const char * saveSourceLimit ;
size_t size_old = args -> converter -> toULength ;
size_t size_new_maybe_1 = sizeof ( LMBCS ) - size_old ;
size_t size_new_maybe_2 = args -> sourceLimit - args -> source ;
size_t size_new = ( size_new_maybe_1 < size_new_maybe_2 ) ? size_new_maybe_1 : size_new_maybe_2 ;
uprv_memcpy ( LMBCS , args -> converter -> toUBytes , size_old ) ;
uprv_memcpy ( LMBCS + size_old , args -> source , size_new ) ;
saveSourceLimit = args -> sourceLimit ;
args -> source = errSource = LMBCS ;
args -> sourceLimit = LMBCS + size_old + size_new ;
savebytes = ( int8_t ) ( size_old + size_new ) ;
uniChar = ( UChar ) _LMBCSGetNextUCharWorker ( args , err ) ;
args -> source = saveSource + ( ( args -> source - LMBCS ) - size_old ) ;
args -> sourceLimit = saveSourceLimit ;
if ( * err == U_TRUNCATED_CHAR_FOUND ) {
args -> converter -> toULength = savebytes ;
uprv_memcpy ( args -> converter -> toUBytes , LMBCS , savebytes ) ;
args -> source = args -> sourceLimit ;
* err = U_ZERO_ERROR ;
return ;
}
else {
args -> converter -> toULength = 0 ;
}
}
else {
errSource = saveSource ;
uniChar = ( UChar ) _LMBCSGetNextUCharWorker ( args , err ) ;
savebytes = ( int8_t ) ( args -> source - saveSource ) ;
}
if ( U_SUCCESS ( * err ) ) {
if ( uniChar < 0xfffe ) {
* ( args -> target ) ++ = uniChar ;
if ( args -> offsets ) {
* ( args -> offsets ) ++ = ( int32_t ) ( saveSource - pStartLMBCS ) ;
}
}
else if ( uniChar == 0xfffe ) {
* err = U_INVALID_CHAR_FOUND ;
}
else {
* err = U_ILLEGAL_CHAR_FOUND ;
}
}
}
if ( U_SUCCESS ( * err ) && args -> sourceLimit > args -> source && args -> targetLimit <= args -> target ) {
* err = U_BUFFER_OVERFLOW_ERROR ;
}
else if ( U_FAILURE ( * err ) ) {
args -> converter -> toULength = savebytes ;
if ( savebytes > 0 ) {
uprv_memcpy ( args -> converter -> toUBytes , errSource , savebytes ) ;
}
if ( * err == U_TRUNCATED_CHAR_FOUND ) {
* err = U_ZERO_ERROR ;
}
}
}
| 1
|
473,878
|
void fd_install(unsigned int fd, struct file *file)
{
struct files_struct *files = current->files;
struct fdtable *fdt;
rcu_read_lock_sched();
if (unlikely(files->resize_in_progress)) {
rcu_read_unlock_sched();
spin_lock(&files->file_lock);
fdt = files_fdtable(files);
BUG_ON(fdt->fd[fd] != NULL);
rcu_assign_pointer(fdt->fd[fd], file);
spin_unlock(&files->file_lock);
return;
}
/* coupled with smp_wmb() in expand_fdtable() */
smp_rmb();
fdt = rcu_dereference_sched(files->fdt);
BUG_ON(fdt->fd[fd] != NULL);
rcu_assign_pointer(fdt->fd[fd], file);
rcu_read_unlock_sched();
}
| 0
|
251,516
|
PassRefPtr<HTMLCollection> Document::forms()
{
return ensureCachedCollection(DocForms);
}
| 0
|
81,855
|
static int __ip_append_data(struct sock *sk, struct sk_buff_head *queue,
struct inet_cork *cork,
int getfrag(void *from, char *to, int offset,
int len, int odd, struct sk_buff *skb),
void *from, int length, int transhdrlen,
unsigned int flags)
{
struct inet_sock *inet = inet_sk(sk);
struct sk_buff *skb;
struct ip_options *opt = cork->opt;
int hh_len;
int exthdrlen;
int mtu;
int copy;
int err;
int offset = 0;
unsigned int maxfraglen, fragheaderlen;
int csummode = CHECKSUM_NONE;
struct rtable *rt = (struct rtable *)cork->dst;
exthdrlen = transhdrlen ? rt->dst.header_len : 0;
length += exthdrlen;
transhdrlen += exthdrlen;
mtu = cork->fragsize;
hh_len = LL_RESERVED_SPACE(rt->dst.dev);
fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
if (cork->length + length > 0xFFFF - fragheaderlen) {
ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->inet_dport,
mtu-exthdrlen);
return -EMSGSIZE;
}
/*
* transhdrlen > 0 means that this is the first fragment and we wish
* it won't be fragmented in the future.
*/
if (transhdrlen &&
length + fragheaderlen <= mtu &&
rt->dst.dev->features & NETIF_F_V4_CSUM &&
!exthdrlen)
csummode = CHECKSUM_PARTIAL;
skb = skb_peek_tail(queue);
cork->length += length;
if (((length > mtu) || (skb && skb_is_gso(skb))) &&
(sk->sk_protocol == IPPROTO_UDP) &&
(rt->dst.dev->features & NETIF_F_UFO)) {
err = ip_ufo_append_data(sk, queue, getfrag, from, length,
hh_len, fragheaderlen, transhdrlen,
mtu, flags);
if (err)
goto error;
return 0;
}
/* So, what's going on in the loop below?
*
* We use calculated fragment length to generate chained skb,
* each of segments is IP fragment ready for sending to network after
* adding appropriate IP header.
*/
if (!skb)
goto alloc_new_skb;
while (length > 0) {
/* Check if the remaining data fits into current packet. */
copy = mtu - skb->len;
if (copy < length)
copy = maxfraglen - skb->len;
if (copy <= 0) {
char *data;
unsigned int datalen;
unsigned int fraglen;
unsigned int fraggap;
unsigned int alloclen;
struct sk_buff *skb_prev;
alloc_new_skb:
skb_prev = skb;
if (skb_prev)
fraggap = skb_prev->len - maxfraglen;
else
fraggap = 0;
/*
* If remaining data exceeds the mtu,
* we know we need more fragment(s).
*/
datalen = length + fraggap;
if (datalen > mtu - fragheaderlen)
datalen = maxfraglen - fragheaderlen;
fraglen = datalen + fragheaderlen;
if ((flags & MSG_MORE) &&
!(rt->dst.dev->features&NETIF_F_SG))
alloclen = mtu;
else
alloclen = fraglen;
/* The last fragment gets additional space at tail.
* Note, with MSG_MORE we overallocate on fragments,
* because we have no idea what fragment will be
* the last.
*/
if (datalen == length + fraggap) {
alloclen += rt->dst.trailer_len;
/* make sure mtu is not reached */
if (datalen > mtu - fragheaderlen - rt->dst.trailer_len)
datalen -= ALIGN(rt->dst.trailer_len, 8);
}
if (transhdrlen) {
skb = sock_alloc_send_skb(sk,
alloclen + hh_len + 15,
(flags & MSG_DONTWAIT), &err);
} else {
skb = NULL;
if (atomic_read(&sk->sk_wmem_alloc) <=
2 * sk->sk_sndbuf)
skb = sock_wmalloc(sk,
alloclen + hh_len + 15, 1,
sk->sk_allocation);
if (unlikely(skb == NULL))
err = -ENOBUFS;
else
/* only the initial fragment is
time stamped */
cork->tx_flags = 0;
}
if (skb == NULL)
goto error;
/*
* Fill in the control structures
*/
skb->ip_summed = csummode;
skb->csum = 0;
skb_reserve(skb, hh_len);
skb_shinfo(skb)->tx_flags = cork->tx_flags;
/*
* Find where to start putting bytes.
*/
data = skb_put(skb, fraglen);
skb_set_network_header(skb, exthdrlen);
skb->transport_header = (skb->network_header +
fragheaderlen);
data += fragheaderlen;
if (fraggap) {
skb->csum = skb_copy_and_csum_bits(
skb_prev, maxfraglen,
data + transhdrlen, fraggap, 0);
skb_prev->csum = csum_sub(skb_prev->csum,
skb->csum);
data += fraggap;
pskb_trim_unique(skb_prev, maxfraglen);
}
copy = datalen - transhdrlen - fraggap;
if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
err = -EFAULT;
kfree_skb(skb);
goto error;
}
offset += copy;
length -= datalen - fraggap;
transhdrlen = 0;
exthdrlen = 0;
csummode = CHECKSUM_NONE;
/*
* Put the packet on the pending queue.
*/
__skb_queue_tail(queue, skb);
continue;
}
if (copy > length)
copy = length;
if (!(rt->dst.dev->features&NETIF_F_SG)) {
unsigned int off;
off = skb->len;
if (getfrag(from, skb_put(skb, copy),
offset, copy, off, skb) < 0) {
__skb_trim(skb, off);
err = -EFAULT;
goto error;
}
} else {
int i = skb_shinfo(skb)->nr_frags;
skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
struct page *page = cork->page;
int off = cork->off;
unsigned int left;
if (page && (left = PAGE_SIZE - off) > 0) {
if (copy >= left)
copy = left;
if (page != frag->page) {
if (i == MAX_SKB_FRAGS) {
err = -EMSGSIZE;
goto error;
}
get_page(page);
skb_fill_page_desc(skb, i, page, off, 0);
frag = &skb_shinfo(skb)->frags[i];
}
} else if (i < MAX_SKB_FRAGS) {
if (copy > PAGE_SIZE)
copy = PAGE_SIZE;
page = alloc_pages(sk->sk_allocation, 0);
if (page == NULL) {
err = -ENOMEM;
goto error;
}
cork->page = page;
cork->off = 0;
skb_fill_page_desc(skb, i, page, 0, 0);
frag = &skb_shinfo(skb)->frags[i];
} else {
err = -EMSGSIZE;
goto error;
}
if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) {
err = -EFAULT;
goto error;
}
cork->off += copy;
frag->size += copy;
skb->len += copy;
skb->data_len += copy;
skb->truesize += copy;
atomic_add(copy, &sk->sk_wmem_alloc);
}
offset += copy;
length -= copy;
}
return 0;
error:
cork->length -= length;
IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
return err;
}
| 0
|
408,896
|
page_objects_sort(fz_context *ctx, page_objects *po)
{
int i, j;
int n = po->len;
/* Step 1: Make a heap */
/* Invariant: Valid heap in [0..i), unsorted elements in [i..n) */
for (i = 1; i < n; i++)
{
/* Now bubble backwards to maintain heap invariant */
j = i;
while (j != 0)
{
int tmp;
int k = (j-1)>>1;
if (po->object[k] >= po->object[j])
break;
tmp = po->object[k];
po->object[k] = po->object[j];
po->object[j] = tmp;
j = k;
}
}
/* Step 2: Heap sort */
/* Invariant: valid heap in [0..i), sorted list in [i..n) */
/* Initially: i = n */
for (i = n-1; i > 0; i--)
{
/* Swap the maximum (0th) element from the page_objects into its place
* in the sorted list (position i). */
int tmp = po->object[0];
po->object[0] = po->object[i];
po->object[i] = tmp;
/* Now, the page_objects is invalid because the 0th element is out
* of place. Bubble it until the page_objects is valid. */
j = 0;
while (1)
{
/* Children are k and k+1 */
int k = (j+1)*2-1;
/* If both children out of the page_objects, we're done */
if (k > i-1)
break;
/* If both are in the page_objects, pick the larger one */
if (k < i-1 && po->object[k] < po->object[k+1])
k++;
/* If j is bigger than k (i.e. both of its children),
* we're done */
if (po->object[j] > po->object[k])
break;
tmp = po->object[k];
po->object[k] = po->object[j];
po->object[j] = tmp;
j = k;
}
}
}
| 0
|
190,932
|
void SVGDocumentExtensions::rebuildAllElementReferencesForTarget(SVGElement* referencedElement)
{
ASSERT(referencedElement);
HashMap<SVGElement*, OwnPtr<HashSet<SVGElement*> > >::iterator it = m_elementDependencies.find(referencedElement);
if (it == m_elementDependencies.end())
return;
ASSERT(it->key == referencedElement);
Vector<SVGElement*> toBeNotified;
HashSet<SVGElement*>* referencingElements = it->value.get();
HashSet<SVGElement*>::iterator setEnd = referencingElements->end();
for (HashSet<SVGElement*>::iterator setIt = referencingElements->begin(); setIt != setEnd; ++setIt)
toBeNotified.append(*setIt);
Vector<SVGElement*>::iterator vectorEnd = toBeNotified.end();
for (Vector<SVGElement*>::iterator vectorIt = toBeNotified.begin(); vectorIt != vectorEnd; ++vectorIt) {
if (HashSet<SVGElement*>* referencingElements = setOfElementsReferencingTarget(referencedElement)) {
if (referencingElements->contains(*vectorIt))
(*vectorIt)->svgAttributeChanged(XLinkNames::hrefAttr);
}
}
}
| 0
|
486,190
|
dns_enable_merge (void (*f)(const char *, const char *, const char *))
{
merge_enable = 1;
merge_logger = f;
}
| 0
|
168,474
|
g_verify_token_header(gss_OID_const mech,
unsigned int *body_size,
unsigned char **buf_in,
int tok_type,
unsigned int toksize)
{
unsigned char *buf = *buf_in;
int seqsize;
gss_OID_desc toid;
int ret = 0;
unsigned int bytes;
if (toksize-- < 1)
return (G_BAD_TOK_HEADER);
if (*buf++ != HEADER_ID)
return (G_BAD_TOK_HEADER);
if ((seqsize = gssint_get_der_length(&buf, toksize, &bytes)) < 0)
return (G_BAD_TOK_HEADER);
if ((seqsize + bytes) != toksize)
return (G_BAD_TOK_HEADER);
if (toksize-- < 1)
return (G_BAD_TOK_HEADER);
if (*buf++ != MECH_OID)
return (G_BAD_TOK_HEADER);
if (toksize-- < 1)
return (G_BAD_TOK_HEADER);
toid.length = *buf++;
if (toksize < toid.length)
return (G_BAD_TOK_HEADER);
else
toksize -= toid.length;
toid.elements = buf;
buf += toid.length;
if (!g_OID_equal(&toid, mech))
ret = G_WRONG_MECH;
/*
* G_WRONG_MECH is not returned immediately because it's more important
* to return G_BAD_TOK_HEADER if the token header is in fact bad
*/
if (toksize < 2)
return (G_BAD_TOK_HEADER);
else
toksize -= 2;
if (!ret) {
*buf_in = buf;
*body_size = toksize;
}
return (ret);
}
| 0
|
234,835
|
MediaElementAudioSourceNode* MediaElementAudioSourceNode::Create(
AudioContext* context,
const MediaElementAudioSourceOptions& options,
ExceptionState& exception_state) {
return Create(*context, *options.mediaElement(), exception_state);
}
| 0
|
116,368
|
ConfigHelper::buildBaseListener(const std::string& name, const std::string& address,
const std::string& filter_chains) {
API_NO_BOOST(envoy::config::listener::v3::Listener) listener;
TestUtility::loadFromYaml(fmt::format(
R"EOF(
name: {}
address:
socket_address:
address: {}
port_value: 0
filter_chains:
{}
)EOF",
name, address, filter_chains),
listener);
return listener;
}
| 0
|
378,672
|
static bool vfswrap_strict_lock(struct vfs_handle_struct *handle,
files_struct *fsp,
struct lock_struct *plock)
{
SMB_ASSERT(plock->lock_type == READ_LOCK ||
plock->lock_type == WRITE_LOCK);
return strict_lock_default(fsp, plock);
}
| 0
|
407,959
|
static int setcharset(unsigned char **p, unsigned char *charset)
{
setcharset_state state = CURLFNM_SCHS_DEFAULT;
unsigned char rangestart = 0;
unsigned char lastchar = 0;
bool something_found = FALSE;
unsigned char c;
for(;;) {
c = **p;
if(!c)
return SETCHARSET_FAIL;
switch(state) {
case CURLFNM_SCHS_DEFAULT:
if(ISALNUM(c)) { /* ASCII value */
rangestart = c;
charset[c] = 1;
(*p)++;
state = CURLFNM_SCHS_MAYRANGE;
something_found = TRUE;
}
else if(c == ']') {
if(something_found)
return SETCHARSET_OK;
something_found = TRUE;
state = CURLFNM_SCHS_RIGHTBR;
charset[c] = 1;
(*p)++;
}
else if(c == '[') {
char c2 = *((*p) + 1);
if(c2 == ':') { /* there has to be a keyword */
(*p) += 2;
if(parsekeyword(p, charset)) {
state = CURLFNM_SCHS_DEFAULT;
}
else
return SETCHARSET_FAIL;
}
else {
charset[c] = 1;
(*p)++;
}
something_found = TRUE;
}
else if(c == '?' || c == '*') {
something_found = TRUE;
charset[c] = 1;
(*p)++;
}
else if(c == '^' || c == '!') {
if(!something_found) {
if(charset[CURLFNM_NEGATE]) {
charset[c] = 1;
something_found = TRUE;
}
else
charset[CURLFNM_NEGATE] = 1; /* negate charset */
}
else
charset[c] = 1;
(*p)++;
}
else if(c == '\\') {
c = *(++(*p));
if(ISPRINT((c))) {
something_found = TRUE;
state = CURLFNM_SCHS_MAYRANGE;
charset[c] = 1;
rangestart = c;
(*p)++;
}
else
return SETCHARSET_FAIL;
}
else {
charset[c] = 1;
(*p)++;
something_found = TRUE;
}
break;
case CURLFNM_SCHS_MAYRANGE:
if(c == '-') {
charset[c] = 1;
(*p)++;
lastchar = '-';
state = CURLFNM_SCHS_MAYRANGE2;
}
else if(c == '[') {
state = CURLFNM_SCHS_DEFAULT;
}
else if(ISALNUM(c)) {
charset[c] = 1;
(*p)++;
}
else if(c == '\\') {
c = *(++(*p));
if(ISPRINT(c)) {
charset[c] = 1;
(*p)++;
}
else
return SETCHARSET_FAIL;
}
else if(c == ']') {
return SETCHARSET_OK;
}
else
return SETCHARSET_FAIL;
break;
case CURLFNM_SCHS_MAYRANGE2:
if(c == ']') {
return SETCHARSET_OK;
}
else if(c == '\\') {
c = *(++(*p));
if(ISPRINT(c)) {
charset[c] = 1;
state = CURLFNM_SCHS_DEFAULT;
(*p)++;
}
else
return SETCHARSET_FAIL;
}
else if(c >= rangestart) {
if((ISLOWER(c) && ISLOWER(rangestart)) ||
(ISDIGIT(c) && ISDIGIT(rangestart)) ||
(ISUPPER(c) && ISUPPER(rangestart))) {
charset[lastchar] = 0;
rangestart++;
while(rangestart++ <= c)
charset[rangestart-1] = 1;
(*p)++;
state = CURLFNM_SCHS_DEFAULT;
}
else
return SETCHARSET_FAIL;
}
else
return SETCHARSET_FAIL;
break;
case CURLFNM_SCHS_RIGHTBR:
if(c == '[') {
state = CURLFNM_SCHS_RIGHTBRLEFTBR;
charset[c] = 1;
(*p)++;
}
else if(c == ']') {
return SETCHARSET_OK;
}
else if(ISPRINT(c)) {
charset[c] = 1;
(*p)++;
state = CURLFNM_SCHS_DEFAULT;
}
else
/* used 'goto fail' instead of 'return SETCHARSET_FAIL' to avoid a
* nonsense warning 'statement not reached' at end of the fnc when
* compiling on Solaris */
goto fail;
break;
case CURLFNM_SCHS_RIGHTBRLEFTBR:
if(c == ']') {
return SETCHARSET_OK;
}
else {
state = CURLFNM_SCHS_DEFAULT;
charset[c] = 1;
(*p)++;
}
break;
}
}
fail:
return SETCHARSET_FAIL;
}
| 0
|
62,469
|
void LoRaMacProcess( void )
{
uint8_t noTx = false;
LoRaMacHandleIrqEvents( );
LoRaMacClassBProcess( );
// MAC proceeded a state and is ready to check
if( MacCtx.MacFlags.Bits.MacDone == 1 )
{
LoRaMacEnableRequests( LORAMAC_REQUEST_HANDLING_OFF );
LoRaMacCheckForRxAbort( );
// An error occurs during transmitting
if( IsRequestPending( ) > 0 )
{
noTx |= LoRaMacCheckForBeaconAcquisition( );
}
if( noTx == 0x00 )
{
LoRaMacHandleMlmeRequest( );
LoRaMacHandleMcpsRequest( );
}
LoRaMacHandleRequestEvents( );
LoRaMacHandleScheduleUplinkEvent( );
LoRaMacEnableRequests( LORAMAC_REQUEST_HANDLING_ON );
}
LoRaMacHandleIndicationEvents( );
if( MacCtx.RxSlot == RX_SLOT_WIN_CLASS_C )
{
OpenContinuousRxCWindow( );
}
}
| 0
|
216,848
|
static bool verify_vbr_checksum(struct exfat_dev* dev, void* sector,
off_t sector_size)
{
uint32_t vbr_checksum;
int i;
if (exfat_pread(dev, sector, sector_size, 0) < 0)
{
exfat_error("failed to read boot sector");
return false;
}
vbr_checksum = exfat_vbr_start_checksum(sector, sector_size);
for (i = 1; i < 11; i++)
{
if (exfat_pread(dev, sector, sector_size, i * sector_size) < 0)
{
exfat_error("failed to read VBR sector");
return false;
}
vbr_checksum = exfat_vbr_add_checksum(sector, sector_size,
vbr_checksum);
}
if (exfat_pread(dev, sector, sector_size, i * sector_size) < 0)
{
exfat_error("failed to read VBR checksum sector");
return false;
}
for (i = 0; i < sector_size / sizeof(vbr_checksum); i++)
if (le32_to_cpu(((const le32_t*) sector)[i]) != vbr_checksum)
{
exfat_error("invalid VBR checksum 0x%x (expected 0x%x)",
le32_to_cpu(((const le32_t*) sector)[i]), vbr_checksum);
return false;
}
return true;
}
| 0
|
25,764
|
static void std_conv_pixmap ( fz_context * ctx , fz_pixmap * dst , fz_pixmap * src , fz_colorspace * prf , const fz_default_colorspaces * default_cs , const fz_color_params * color_params , int copy_spots ) {
float srcv [ FZ_MAX_COLORS ] ;
float dstv [ FZ_MAX_COLORS ] ;
int srcn , dstn ;
int k , i ;
size_t w = src -> w ;
int h = src -> h ;
ptrdiff_t d_line_inc = dst -> stride - w * dst -> n ;
ptrdiff_t s_line_inc = src -> stride - w * src -> n ;
int da = dst -> alpha ;
int sa = src -> alpha ;
fz_colorspace * ss = src -> colorspace ;
fz_colorspace * ds = dst -> colorspace ;
unsigned char * s = src -> samples ;
unsigned char * d = dst -> samples ;
if ( ( int ) w < 0 || h < 0 ) return ;
if ( color_params == NULL ) color_params = fz_default_color_params ( ctx ) ;
srcn = ss -> n ;
dstn = ds -> n ;
assert ( src -> w == dst -> w && src -> h == dst -> h ) ;
assert ( src -> n == srcn + sa ) ;
assert ( dst -> n == dstn + da ) ;
if ( d_line_inc == 0 && s_line_inc == 0 ) {
w *= h ;
h = 1 ;
}
if ( ( fz_colorspace_is_lab ( ctx , ss ) || fz_colorspace_is_lab_icc ( ctx , ss ) ) && srcn == 3 ) {
fz_color_converter cc ;
fz_find_color_converter ( ctx , & cc , NULL , ds , ss , color_params ) ;
while ( h -- ) {
size_t ww = w ;
while ( ww -- ) {
srcv [ 0 ] = * s ++ / 255.0f * 100 ;
srcv [ 1 ] = * s ++ - 128 ;
srcv [ 2 ] = * s ++ - 128 ;
cc . convert ( ctx , & cc , dstv , srcv ) ;
for ( k = 0 ;
k < dstn ;
k ++ ) * d ++ = dstv [ k ] * 255 ;
if ( da ) * d ++ = ( sa ? * s : 255 ) ;
s += sa ;
}
d += d_line_inc ;
s += s_line_inc ;
}
fz_drop_color_converter ( ctx , & cc ) ;
}
else if ( w * h < 256 ) {
fz_color_converter cc ;
fz_find_color_converter ( ctx , & cc , NULL , ds , ss , color_params ) ;
while ( h -- ) {
size_t ww = w ;
while ( ww -- ) {
for ( k = 0 ;
k < srcn ;
k ++ ) srcv [ k ] = * s ++ / 255.0f ;
cc . convert ( ctx , & cc , dstv , srcv ) ;
for ( k = 0 ;
k < dstn ;
k ++ ) * d ++ = dstv [ k ] * 255 ;
if ( da ) * d ++ = ( sa ? * s : 255 ) ;
s += sa ;
}
d += d_line_inc ;
s += s_line_inc ;
}
fz_drop_color_converter ( ctx , & cc ) ;
}
else if ( srcn == 1 ) {
unsigned char lookup [ FZ_MAX_COLORS * 256 ] ;
fz_color_converter cc ;
fz_find_color_converter ( ctx , & cc , NULL , ds , ss , color_params ) ;
for ( i = 0 ;
i < 256 ;
i ++ ) {
srcv [ 0 ] = i / 255.0f ;
cc . convert ( ctx , & cc , dstv , srcv ) ;
for ( k = 0 ;
k < dstn ;
k ++ ) lookup [ i * dstn + k ] = dstv [ k ] * 255 ;
}
fz_drop_color_converter ( ctx , & cc ) ;
while ( h -- ) {
size_t ww = w ;
while ( ww -- ) {
i = * s ++ ;
for ( k = 0 ;
k < dstn ;
k ++ ) * d ++ = lookup [ i * dstn + k ] ;
if ( da ) * d ++ = ( sa ? * s : 255 ) ;
s += sa ;
}
d += d_line_inc ;
s += s_line_inc ;
}
}
else {
fz_hash_table * lookup ;
unsigned char * color ;
unsigned char dummy = s [ 0 ] ^ 255 ;
unsigned char * sold = & dummy ;
unsigned char * dold ;
fz_color_converter cc ;
lookup = fz_new_hash_table ( ctx , 509 , srcn , - 1 , NULL ) ;
fz_find_color_converter ( ctx , & cc , NULL , ds , ss , color_params ) ;
fz_try ( ctx ) {
while ( h -- ) {
size_t ww = w ;
while ( ww -- ) {
if ( * s == * sold && memcmp ( sold , s , srcn ) == 0 ) {
sold = s ;
memcpy ( d , dold , dstn ) ;
d += dstn ;
s += srcn ;
if ( da ) * d ++ = ( sa ? * s : 255 ) ;
s += sa ;
}
else {
sold = s ;
dold = d ;
color = fz_hash_find ( ctx , lookup , s ) ;
if ( color ) {
memcpy ( d , color , dstn ) ;
s += srcn ;
d += dstn ;
if ( da ) * d ++ = ( sa ? * s : 255 ) ;
s += sa ;
}
else {
for ( k = 0 ;
k < srcn ;
k ++ ) srcv [ k ] = * s ++ / 255.0f ;
cc . convert ( ctx , & cc , dstv , srcv ) ;
for ( k = 0 ;
k < dstn ;
k ++ ) * d ++ = dstv [ k ] * 255 ;
fz_hash_insert ( ctx , lookup , s - srcn , d - dstn ) ;
if ( da ) * d ++ = ( sa ? * s : 255 ) ;
s += sa ;
}
}
}
d += d_line_inc ;
s += s_line_inc ;
}
}
fz_always ( ctx ) fz_drop_color_converter ( ctx , & cc ) ;
fz_catch ( ctx ) fz_rethrow ( ctx ) ;
fz_drop_hash_table ( ctx , lookup ) ;
}
}
| 0
|
156,991
|
void WebContents::GoForward() {
if (!ElectronBrowserClient::Get()->CanUseCustomSiteInstance()) {
electron::ElectronBrowserClient::SuppressRendererProcessRestartForOnce();
}
web_contents()->GetController().GoForward();
}
| 0
|
131,915
|
mode_t enc_untrusted_umask(mode_t mask) {
return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_umask,
mask);
}
| 0
|
86,366
|
rsvg_new_filter_primitive_specular_lighting (const char *element_name, RsvgNode *parent)
{
RsvgFilterPrimitiveSpecularLighting *filter;
filter = g_new0 (RsvgFilterPrimitiveSpecularLighting, 1);
filter->super.in = g_string_new ("none");
filter->super.result = g_string_new ("none");
filter->surfaceScale = 1;
filter->specularConstant = 1;
filter->specularExponent = 1;
filter->lightingcolor = 0xFFFFFFFF;
filter->super.render = rsvg_filter_primitive_specular_lighting_render;
return rsvg_rust_cnode_new (RSVG_NODE_TYPE_FILTER_PRIMITIVE_SPECULAR_LIGHTING,
parent,
rsvg_state_new (),
filter,
rsvg_filter_primitive_specular_lighting_set_atts,
rsvg_filter_draw,
rsvg_filter_primitive_free);
}
| 0
|
159,480
|
LibRaw_byte_buffer::~LibRaw_byte_buffer()
{
if(do_free) free(buf);
}
| 0
|
260,192
|
void RemoteDevicePropertiesWidget::update(const RemoteFsDevice::Details &d, bool create, bool isConnected)
{
int t=d.isLocalFile() ? Type_File : Type_SshFs;
setEnabled(d.isLocalFile() || !isConnected);
infoLabel->setVisible(create);
orig=d;
name->setText(d.name);
sshPort->setValue(22);
connectionNote->setVisible(!d.isLocalFile() && isConnected);
sshFolder->setText(QString());
sshHost->setText(QString());
sshUser->setText(QString());
fileFolder->setText(QString());
switch (t) {
case Type_SshFs: {
sshFolder->setText(d.url.path());
if (0!=d.url.port()) {
sshPort->setValue(d.url.port());
}
sshHost->setText(d.url.host());
sshUser->setText(d.url.userName());
sshExtra->setText(d.extraOptions);
break;
}
case Type_File:
fileFolder->setText(d.url.path());
break;
}
name->setEnabled(d.isLocalFile() || !isConnected);
connect(type, SIGNAL(currentIndexChanged(int)), this, SLOT(setType()));
for (int i=1; i<type->count(); ++i) {
if (type->itemData(i).toInt()==t) {
type->setCurrentIndex(i);
stackedWidget->setCurrentIndex(i);
break;
}
}
connect(name, SIGNAL(textChanged(const QString &)), this, SLOT(checkSaveable()));
connect(sshHost, SIGNAL(textChanged(const QString &)), this, SLOT(checkSaveable()));
connect(sshUser, SIGNAL(textChanged(const QString &)), this, SLOT(checkSaveable()));
connect(sshFolder, SIGNAL(textChanged(const QString &)), this, SLOT(checkSaveable()));
connect(sshPort, SIGNAL(valueChanged(int)), this, SLOT(checkSaveable()));
connect(sshExtra, SIGNAL(textChanged(const QString &)), this, SLOT(checkSaveable()));
connect(fileFolder, SIGNAL(textChanged(const QString &)), this, SLOT(checkSaveable()));
modified=false;
setType();
checkSaveable();
}
| 0
|
201,836
|
void LoadingDataCollector::CleanupAbandonedNavigations(
const NavigationID& navigation_id) {
if (stats_collector_)
stats_collector_->CleanupAbandonedStats();
static const base::TimeDelta max_navigation_age =
base::TimeDelta::FromSeconds(config_.max_navigation_lifetime_seconds);
base::TimeTicks time_now = base::TimeTicks::Now();
for (auto it = inflight_navigations_.begin();
it != inflight_navigations_.end();) {
if ((it->first.tab_id == navigation_id.tab_id) ||
(time_now - it->first.creation_time > max_navigation_age)) {
inflight_navigations_.erase(it++);
} else {
++it;
}
}
}
| 0
|
52,082
|
static int ssl_parse_session_ticket_ext( mbedtls_ssl_context *ssl,
unsigned char *buf,
size_t len )
{
int ret;
mbedtls_ssl_session session;
mbedtls_ssl_session_init( &session );
if( ssl->conf->f_ticket_parse == NULL ||
ssl->conf->f_ticket_write == NULL )
{
return( 0 );
}
/* Remember the client asked us to send a new ticket */
ssl->handshake->new_session_ticket = 1;
MBEDTLS_SSL_DEBUG_MSG( 3, ( "ticket length: %d", len ) );
if( len == 0 )
return( 0 );
#if defined(MBEDTLS_SSL_RENEGOTIATION)
if( ssl->renego_status != MBEDTLS_SSL_INITIAL_HANDSHAKE )
{
MBEDTLS_SSL_DEBUG_MSG( 3, ( "ticket rejected: renegotiating" ) );
return( 0 );
}
#endif /* MBEDTLS_SSL_RENEGOTIATION */
/*
* Failures are ok: just ignore the ticket and proceed.
*/
if( ( ret = ssl->conf->f_ticket_parse( ssl->conf->p_ticket, &session,
buf, len ) ) != 0 )
{
mbedtls_ssl_session_free( &session );
if( ret == MBEDTLS_ERR_SSL_INVALID_MAC )
MBEDTLS_SSL_DEBUG_MSG( 3, ( "ticket is not authentic" ) );
else if( ret == MBEDTLS_ERR_SSL_SESSION_TICKET_EXPIRED )
MBEDTLS_SSL_DEBUG_MSG( 3, ( "ticket is expired" ) );
else
MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_ssl_ticket_parse", ret );
return( 0 );
}
/*
* Keep the session ID sent by the client, since we MUST send it back to
* inform them we're accepting the ticket (RFC 5077 section 3.4)
*/
session.id_len = ssl->session_negotiate->id_len;
memcpy( &session.id, ssl->session_negotiate->id, session.id_len );
mbedtls_ssl_session_free( ssl->session_negotiate );
memcpy( ssl->session_negotiate, &session, sizeof( mbedtls_ssl_session ) );
/* Zeroize instead of free as we copied the content */
mbedtls_zeroize( &session, sizeof( mbedtls_ssl_session ) );
MBEDTLS_SSL_DEBUG_MSG( 3, ( "session successfully restored from ticket" ) );
ssl->handshake->resume = 1;
/* Don't send a new ticket after all, this one is OK */
ssl->handshake->new_session_ticket = 0;
return( 0 );
}
| 0
|
64,474
|
static gboolean handle_store(MonoThread *thread)
{
mono_threads_lock ();
THREAD_DEBUG (g_message ("%s: thread %p ID %"G_GSIZE_FORMAT, __func__, thread, (gsize)thread->internal_thread->tid));
if (threads_starting_up)
mono_g_hash_table_remove (threads_starting_up, thread);
if (shutting_down) {
mono_threads_unlock ();
return FALSE;
}
if(threads==NULL) {
MONO_GC_REGISTER_ROOT_FIXED (threads);
threads=mono_g_hash_table_new_type (NULL, NULL, MONO_HASH_VALUE_GC);
}
/* We don't need to duplicate thread->handle, because it is
* only closed when the thread object is finalized by the GC.
*/
g_assert (thread->internal_thread);
mono_g_hash_table_insert(threads, (gpointer)(gsize)(thread->internal_thread->tid),
thread->internal_thread);
mono_threads_unlock ();
return TRUE;
}
| 0
|
82,640
|
and_code_range_buf(BBuf* bbuf1, int not1, BBuf* bbuf2, int not2, BBuf** pbuf)
{
int r;
OnigCodePoint i, j, n1, n2, *data1, *data2;
OnigCodePoint from, to, from1, to1, from2, to2;
*pbuf = (BBuf* )NULL;
if (IS_NULL(bbuf1)) {
if (not1 != 0 && IS_NOT_NULL(bbuf2)) /* not1 != 0 -> not2 == 0 */
return bbuf_clone(pbuf, bbuf2);
return 0;
}
else if (IS_NULL(bbuf2)) {
if (not2 != 0)
return bbuf_clone(pbuf, bbuf1);
return 0;
}
if (not1 != 0)
SWAP_BB_NOT(bbuf1, not1, bbuf2, not2);
data1 = (OnigCodePoint* )(bbuf1->p);
data2 = (OnigCodePoint* )(bbuf2->p);
GET_CODE_POINT(n1, data1);
GET_CODE_POINT(n2, data2);
data1++;
data2++;
if (not2 == 0 && not1 == 0) { /* 1 AND 2 */
for (i = 0; i < n1; i++) {
from1 = data1[i*2];
to1 = data1[i*2+1];
for (j = 0; j < n2; j++) {
from2 = data2[j*2];
to2 = data2[j*2+1];
if (from2 > to1) break;
if (to2 < from1) continue;
from = MAX(from1, from2);
to = MIN(to1, to2);
r = add_code_range_to_buf(pbuf, from, to);
if (r != 0) return r;
}
}
}
else if (not1 == 0) { /* 1 AND (not 2) */
for (i = 0; i < n1; i++) {
from1 = data1[i*2];
to1 = data1[i*2+1];
r = and_code_range1(pbuf, from1, to1, data2, n2);
if (r != 0) return r;
}
}
return 0;
}
| 0
|
330,671
|
static inline void RENAME(yuy2toyv12)(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
long width, long height,
long lumStride, long chromStride, long srcStride)
{
long y;
const long chromWidth= width>>1;
for(y=0; y<height; y+=2)
{
#ifdef HAVE_MMX
asm volatile(
"xor %%"REG_a", %%"REG_a" \n\t"
"pcmpeqw %%mm7, %%mm7 \n\t"
"psrlw $8, %%mm7 \n\t" // FF,00,FF,00...
ASMALIGN16
"1: \n\t"
PREFETCH" 64(%0, %%"REG_a", 4) \n\t"
"movq (%0, %%"REG_a", 4), %%mm0 \n\t" // YUYV YUYV(0)
"movq 8(%0, %%"REG_a", 4), %%mm1\n\t" // YUYV YUYV(4)
"movq %%mm0, %%mm2 \n\t" // YUYV YUYV(0)
"movq %%mm1, %%mm3 \n\t" // YUYV YUYV(4)
"psrlw $8, %%mm0 \n\t" // U0V0 U0V0(0)
"psrlw $8, %%mm1 \n\t" // U0V0 U0V0(4)
"pand %%mm7, %%mm2 \n\t" // Y0Y0 Y0Y0(0)
"pand %%mm7, %%mm3 \n\t" // Y0Y0 Y0Y0(4)
"packuswb %%mm1, %%mm0 \n\t" // UVUV UVUV(0)
"packuswb %%mm3, %%mm2 \n\t" // YYYY YYYY(0)
MOVNTQ" %%mm2, (%1, %%"REG_a", 2)\n\t"
"movq 16(%0, %%"REG_a", 4), %%mm1\n\t" // YUYV YUYV(8)
"movq 24(%0, %%"REG_a", 4), %%mm2\n\t" // YUYV YUYV(12)
"movq %%mm1, %%mm3 \n\t" // YUYV YUYV(8)
"movq %%mm2, %%mm4 \n\t" // YUYV YUYV(12)
"psrlw $8, %%mm1 \n\t" // U0V0 U0V0(8)
"psrlw $8, %%mm2 \n\t" // U0V0 U0V0(12)
"pand %%mm7, %%mm3 \n\t" // Y0Y0 Y0Y0(8)
"pand %%mm7, %%mm4 \n\t" // Y0Y0 Y0Y0(12)
"packuswb %%mm2, %%mm1 \n\t" // UVUV UVUV(8)
"packuswb %%mm4, %%mm3 \n\t" // YYYY YYYY(8)
MOVNTQ" %%mm3, 8(%1, %%"REG_a", 2)\n\t"
"movq %%mm0, %%mm2 \n\t" // UVUV UVUV(0)
"movq %%mm1, %%mm3 \n\t" // UVUV UVUV(8)
"psrlw $8, %%mm0 \n\t" // V0V0 V0V0(0)
"psrlw $8, %%mm1 \n\t" // V0V0 V0V0(8)
"pand %%mm7, %%mm2 \n\t" // U0U0 U0U0(0)
"pand %%mm7, %%mm3 \n\t" // U0U0 U0U0(8)
"packuswb %%mm1, %%mm0 \n\t" // VVVV VVVV(0)
"packuswb %%mm3, %%mm2 \n\t" // UUUU UUUU(0)
MOVNTQ" %%mm0, (%3, %%"REG_a") \n\t"
MOVNTQ" %%mm2, (%2, %%"REG_a") \n\t"
"add $8, %%"REG_a" \n\t"
"cmp %4, %%"REG_a" \n\t"
" jb 1b \n\t"
::"r"(src), "r"(ydst), "r"(udst), "r"(vdst), "g" (chromWidth)
: "memory", "%"REG_a
);
ydst += lumStride;
src += srcStride;
asm volatile(
"xor %%"REG_a", %%"REG_a" \n\t"
ASMALIGN16
"1: \n\t"
PREFETCH" 64(%0, %%"REG_a", 4) \n\t"
"movq (%0, %%"REG_a", 4), %%mm0 \n\t" // YUYV YUYV(0)
"movq 8(%0, %%"REG_a", 4), %%mm1\n\t" // YUYV YUYV(4)
"movq 16(%0, %%"REG_a", 4), %%mm2\n\t" // YUYV YUYV(8)
"movq 24(%0, %%"REG_a", 4), %%mm3\n\t" // YUYV YUYV(12)
"pand %%mm7, %%mm0 \n\t" // Y0Y0 Y0Y0(0)
"pand %%mm7, %%mm1 \n\t" // Y0Y0 Y0Y0(4)
"pand %%mm7, %%mm2 \n\t" // Y0Y0 Y0Y0(8)
"pand %%mm7, %%mm3 \n\t" // Y0Y0 Y0Y0(12)
"packuswb %%mm1, %%mm0 \n\t" // YYYY YYYY(0)
"packuswb %%mm3, %%mm2 \n\t" // YYYY YYYY(8)
MOVNTQ" %%mm0, (%1, %%"REG_a", 2)\n\t"
MOVNTQ" %%mm2, 8(%1, %%"REG_a", 2)\n\t"
"add $8, %%"REG_a" \n\t"
"cmp %4, %%"REG_a" \n\t"
" jb 1b \n\t"
::"r"(src), "r"(ydst), "r"(udst), "r"(vdst), "g" (chromWidth)
: "memory", "%"REG_a
);
#else
long i;
for(i=0; i<chromWidth; i++)
{
ydst[2*i+0] = src[4*i+0];
udst[i] = src[4*i+1];
ydst[2*i+1] = src[4*i+2];
vdst[i] = src[4*i+3];
}
ydst += lumStride;
src += srcStride;
for(i=0; i<chromWidth; i++)
{
ydst[2*i+0] = src[4*i+0];
ydst[2*i+1] = src[4*i+2];
}
#endif
udst += chromStride;
vdst += chromStride;
ydst += lumStride;
src += srcStride;
}
#ifdef HAVE_MMX
asm volatile( EMMS" \n\t"
SFENCE" \n\t"
:::"memory");
#endif
}
| 0
|
69,264
|
validate_cursor(void)
{
check_cursor_moved(curwin);
if ((curwin->w_valid & (VALID_WCOL|VALID_WROW)) != (VALID_WCOL|VALID_WROW))
curs_columns(TRUE);
}
| 0
|
118,088
|
static void ff_jref_idct2_add(uint8_t *dest, ptrdiff_t line_size, int16_t *block)
{
ff_j_rev_dct2 (block);
add_pixels_clamped2_c(block, dest, line_size);
}
| 0
|
218,225
|
size_t mptsas_config_sas_io_unit_1(MPTSASState *s, uint8_t **data, int address)
{
size_t size = MPTSAS_CONFIG_PACK_EXT(1, MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT, 0x07,
"*w*w*w*wb*b*b*b"
repl(MPTSAS_NUM_PORTS, "*s12"),
MPTSAS_NUM_PORTS);
if (data) {
size_t ofs = size - MPTSAS_NUM_PORTS * MPTSAS_CONFIG_SAS_IO_UNIT_1_SIZE;
int i;
for (i = 0; i < MPTSAS_NUM_PORTS; i++) {
SCSIDevice *dev = mptsas_phy_get_device(s, i, NULL, NULL);
fill(*data + ofs, MPTSAS_CONFIG_SAS_IO_UNIT_1_SIZE,
"bbbblww", i, 0, 0,
(MPI_SAS_IOUNIT0_RATE_3_0 << 4) | MPI_SAS_IOUNIT0_RATE_1_5,
(dev
? MPI_SAS_DEVICE_INFO_END_DEVICE | MPI_SAS_DEVICE_INFO_SSP_TARGET
: MPI_SAS_DEVICE_INFO_NO_DEVICE),
0, 0);
ofs += MPTSAS_CONFIG_SAS_IO_UNIT_1_SIZE;
}
assert(ofs == size);
}
return size;
}
| 0
|
231,021
|
AutofillDialogViews::NotificationArea::~NotificationArea() {}
| 0
|
444,600
|
TEST_F(HttpConnectionManagerImplTest, RequestTimeoutIsNotDisarmedOnIncompleteRequestWithHeader) {
request_timeout_ = std::chrono::milliseconds(10);
setup(false, "");
EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status {
Event::MockTimer* request_timer = setUpTimer();
EXPECT_CALL(*request_timer, enableTimer(request_timeout_, _)).Times(1);
EXPECT_CALL(*request_timer, disableTimer()).Times(0);
RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_);
RequestHeaderMapPtr headers{
new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}};
// the second parameter 'false' leaves the stream open
decoder->decodeHeaders(std::move(headers), false);
return Http::okStatus();
}));
Buffer::OwnedImpl fake_input("1234");
conn_manager_->onData(fake_input, false); // kick off request
EXPECT_EQ(0U, stats_.named_.downstream_rq_timeout_.value());
}
| 0
|
335,801
|
static void vhost_dev_sync_region(struct vhost_dev *dev,
uint64_t mfirst, uint64_t mlast,
uint64_t rfirst, uint64_t rlast)
{
uint64_t start = MAX(mfirst, rfirst);
uint64_t end = MIN(mlast, rlast);
vhost_log_chunk_t *from = dev->log + start / VHOST_LOG_CHUNK;
vhost_log_chunk_t *to = dev->log + end / VHOST_LOG_CHUNK + 1;
uint64_t addr = (start / VHOST_LOG_CHUNK) * VHOST_LOG_CHUNK;
assert(end / VHOST_LOG_CHUNK < dev->log_size);
assert(start / VHOST_LOG_CHUNK < dev->log_size);
if (end < start) {
return;
}
for (;from < to; ++from) {
vhost_log_chunk_t log;
int bit;
/* We first check with non-atomic: much cheaper,
* and we expect non-dirty to be the common case. */
if (!*from) {
addr += VHOST_LOG_CHUNK;
continue;
}
/* Data must be read atomically. We don't really
* need the barrier semantics of __sync
* builtins, but it's easier to use them than
* roll our own. */
log = __sync_fetch_and_and(from, 0);
while ((bit = sizeof(log) > sizeof(int) ?
ffsll(log) : ffs(log))) {
bit -= 1;
cpu_physical_memory_set_dirty(addr + bit * VHOST_LOG_PAGE);
log &= ~(0x1ull << bit);
}
addr += VHOST_LOG_CHUNK;
}
}
| 1
|
223,706
|
ReceiveFileWriterCallback()
{
}
| 0
|
240,278
|
void ChromotingInstance::PauseVideo(bool pause) {
if (!IsConnected()) {
return;
}
protocol::VideoControl video_control;
video_control.set_enable(!pause);
host_connection_->host_stub()->ControlVideo(video_control);
}
| 0
|
314,035
|
void RenderFrameHostImpl::ForEachImmediateLocalRoot(
const base::Callback<void(RenderFrameHostImpl*)>& callback) {
if (!frame_tree_node_->child_count())
return;
base::queue<FrameTreeNode*> queue;
for (size_t index = 0; index < frame_tree_node_->child_count(); ++index)
queue.push(frame_tree_node_->child_at(index));
while (queue.size()) {
FrameTreeNode* current = queue.front();
queue.pop();
if (current->current_frame_host()->is_local_root()) {
callback.Run(current->current_frame_host());
} else {
for (size_t index = 0; index < current->child_count(); ++index)
queue.push(current->child_at(index));
}
}
}
| 0
|
466,323
|
TEST_P(DownstreamProtocolIntegrationTest, TestEncodeHeadersReturnsStopAllWatermark) {
// Metadata is not supported in QUICHE.
EXCLUDE_DOWNSTREAM_HTTP3
config_helper_.addFilter(R"EOF(
name: encode-headers-return-stop-all-filter
)EOF");
config_helper_.addConfigModifier(
[&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&
hcm) -> void { hcm.mutable_http2_protocol_options()->set_allow_metadata(true); });
// Sets initial stream window to min value to make the upstream sensitive to a low watermark.
config_helper_.addConfigModifier(
[&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&
hcm) -> void {
hcm.mutable_http2_protocol_options()->mutable_initial_stream_window_size()->set_value(
::Envoy::Http2::Utility::OptionsLimits::MIN_INITIAL_STREAM_WINDOW_SIZE);
});
initialize();
codec_client_ = makeHttpConnection(lookupPort("http"));
// Upstream responds with headers, data and trailers.
auto response = codec_client_->makeRequestWithBody(default_request_headers_, 10);
waitForNextUpstreamRequest();
changeHeadersForStopAllTests(default_response_headers_, true);
upstream_request_->encodeHeaders(default_response_headers_, false);
for (int i = 0; i < count_ - 1; i++) {
upstream_request_->encodeData(size_, false);
}
// Gives buffer 1s to react to buffer limit.
absl::SleepFor(absl::Seconds(1));
upstream_request_->encodeData(size_, false);
Http::TestResponseTrailerMapImpl response_trailers{{"response", "trailer"}};
upstream_request_->encodeTrailers(response_trailers);
ASSERT_TRUE(response->waitForEndStream());
ASSERT_TRUE(response->complete());
// Data is added in encodeData for all protocols, and encodeTrailers for HTTP/2 and above.
int times_added = (upstreamProtocol() == FakeHttpConnection::Type::HTTP1 ||
downstreamProtocol() == Http::CodecClient::Type::HTTP1)
? 1
: 2;
if (downstreamProtocol() == Http::CodecClient::Type::HTTP1 &&
upstreamProtocol() == FakeHttpConnection::Type::HTTP3) {
// TODO(alyssawilk) Figure out why the bytes mismatch with the test expectation below.
return;
}
EXPECT_EQ(count_ * size_ + added_decoded_data_size_ * times_added, response->body().size());
}
| 0
|
410,458
|
QUtil::srandom(unsigned int seed)
{
#ifdef HAVE_RANDOM
::srandom(seed);
#else
srand(seed);
#endif
}
| 0
|
184,638
|
void InspectorResourceAgent::didFinishXHRLoading(ThreadableLoaderClient* client, unsigned long identifier, ScriptString sourceString, const String&, const String&, unsigned)
{
m_pendingXHRReplayData.remove(client);
}
| 0
|
316,192
|
void AppCacheUpdateJob::URLFetcher::Start() {
request_->set_first_party_for_cookies(job_->manifest_url_);
if (fetch_type_ == MANIFEST_FETCH && job_->doing_full_update_check_)
request_->SetLoadFlags(request_->load_flags() | net::LOAD_BYPASS_CACHE);
else if (existing_response_headers_.get())
AddConditionalHeaders(existing_response_headers_.get());
request_->Start();
}
| 0
|
222,343
|
static void padstr(char *str, const char *src, int len)
{
int i, v;
for(i = 0; i < len; i++) {
if (*src)
v = *src++;
else
v = ' ';
str[i^1] = v;
}
}
| 0
|
172,205
|
bool HTMLInputElement::tooLong() const
{
return willValidate() && tooLong(value(), CheckDirtyFlag);
}
| 0
|
299,664
|
static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
struct list_head *dirty_bgs = &trans->transaction->dirty_bgs;
struct list_head *io_bgs = &trans->transaction->io_bgs;
struct list_head *next;
struct extent_buffer *eb;
int ret;
eb = btrfs_lock_root_node(fs_info->tree_root);
ret = btrfs_cow_block(trans, fs_info->tree_root, eb, NULL,
0, &eb, BTRFS_NESTING_COW);
btrfs_tree_unlock(eb);
free_extent_buffer(eb);
if (ret)
return ret;
ret = btrfs_run_dev_stats(trans);
if (ret)
return ret;
ret = btrfs_run_dev_replace(trans);
if (ret)
return ret;
ret = btrfs_run_qgroups(trans);
if (ret)
return ret;
ret = btrfs_setup_space_cache(trans);
if (ret)
return ret;
again:
while (!list_empty(&fs_info->dirty_cowonly_roots)) {
struct btrfs_root *root;
next = fs_info->dirty_cowonly_roots.next;
list_del_init(next);
root = list_entry(next, struct btrfs_root, dirty_list);
clear_bit(BTRFS_ROOT_DIRTY, &root->state);
if (root != fs_info->extent_root)
list_add_tail(&root->dirty_list,
&trans->transaction->switch_commits);
ret = update_cowonly_root(trans, root);
if (ret)
return ret;
}
/* Now flush any delayed refs generated by updating all of the roots */
ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
if (ret)
return ret;
while (!list_empty(dirty_bgs) || !list_empty(io_bgs)) {
ret = btrfs_write_dirty_block_groups(trans);
if (ret)
return ret;
/*
* We're writing the dirty block groups, which could generate
* delayed refs, which could generate more dirty block groups,
* so we want to keep this flushing in this loop to make sure
* everything gets run.
*/
ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
if (ret)
return ret;
}
if (!list_empty(&fs_info->dirty_cowonly_roots))
goto again;
list_add_tail(&fs_info->extent_root->dirty_list,
&trans->transaction->switch_commits);
/* Update dev-replace pointer once everything is committed */
fs_info->dev_replace.committed_cursor_left =
fs_info->dev_replace.cursor_left_last_write_of_item;
return 0;
}
| 0
|
94,801
|
static void add_sit_entry(unsigned int segno, struct list_head *head)
{
struct sit_entry_set *ses;
unsigned int start_segno = START_SEGNO(segno);
list_for_each_entry(ses, head, set_list) {
if (ses->start_segno == start_segno) {
ses->entry_cnt++;
adjust_sit_entry_set(ses, head);
return;
}
}
ses = grab_sit_entry_set();
ses->start_segno = start_segno;
ses->entry_cnt++;
list_add(&ses->set_list, head);
}
| 0
|
406,529
|
virtual Item *replace_equal_field(uchar * arg) { return this; }
| 0
|
387,640
|
__libxml2_xzcompressed(xzFile f) {
return xz_compressed(f);
}
| 0
|
136,573
|
PHP_FUNCTION(imageloadfont)
{
char *file;
int file_name, hdr_size = sizeof(gdFont) - sizeof(char *);
int ind, body_size, n = 0, b, i, body_size_check;
gdFontPtr font;
php_stream *stream;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "p", &file, &file_name) == FAILURE) {
return;
}
stream = php_stream_open_wrapper(file, "rb", IGNORE_PATH | IGNORE_URL_WIN | REPORT_ERRORS, NULL);
if (stream == NULL) {
RETURN_FALSE;
}
/* Only supports a architecture-dependent binary dump format
* at the moment.
* The file format is like this on machines with 32-byte integers:
*
* byte 0-3: (int) number of characters in the font
* byte 4-7: (int) value of first character in the font (often 32, space)
* byte 8-11: (int) pixel width of each character
* byte 12-15: (int) pixel height of each character
* bytes 16-: (char) array with character data, one byte per pixel
* in each character, for a total of
* (nchars*width*height) bytes.
*/
font = (gdFontPtr) emalloc(sizeof(gdFont));
b = 0;
while (b < hdr_size && (n = php_stream_read(stream, (char*)&font[b], hdr_size - b))) {
b += n;
}
if (!n) {
efree(font);
if (php_stream_eof(stream)) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "End of file while reading header");
} else {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Error while reading header");
}
php_stream_close(stream);
RETURN_FALSE;
}
i = php_stream_tell(stream);
php_stream_seek(stream, 0, SEEK_END);
body_size_check = php_stream_tell(stream) - hdr_size;
php_stream_seek(stream, i, SEEK_SET);
body_size = font->w * font->h * font->nchars;
if (body_size != body_size_check) {
font->w = FLIPWORD(font->w);
font->h = FLIPWORD(font->h);
font->nchars = FLIPWORD(font->nchars);
body_size = font->w * font->h * font->nchars;
}
if (overflow2(font->nchars, font->h) || overflow2(font->nchars * font->h, font->w )) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Error reading font, invalid font header");
efree(font);
php_stream_close(stream);
RETURN_FALSE;
}
if (body_size != body_size_check) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Error reading font");
efree(font);
php_stream_close(stream);
RETURN_FALSE;
}
font->data = emalloc(body_size);
b = 0;
while (b < body_size && (n = php_stream_read(stream, &font->data[b], body_size - b))) {
b += n;
}
if (!n) {
efree(font->data);
efree(font);
if (php_stream_eof(stream)) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "End of file while reading body");
} else {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Error while reading body");
}
php_stream_close(stream);
RETURN_FALSE;
}
php_stream_close(stream);
/* Adding 5 to the font index so we will never have font indices
* that overlap with the old fonts (with indices 1-5). The first
* list index given out is always 1.
*/
ind = 5 + zend_list_insert(font, le_gd_font TSRMLS_CC);
RETURN_LONG(ind);
}
| 0
|
237,702
|
bool SendCloseTabJSONRequest(
AutomationMessageSender* sender,
int browser_index,
int tab_index,
std::string* error_msg) {
DictionaryValue dict;
dict.SetString("command", "CloseTab");
dict.SetInteger("windex", browser_index);
dict.SetInteger("tab_index", tab_index);
DictionaryValue reply_dict;
return SendAutomationJSONRequest(sender, dict, &reply_dict, error_msg);
}
| 0
|
35,156
|
R_API void r_bin_java_print_annotation_default_attr_summary(RBinJavaAttrInfo *attr) {
if (attr && attr->type == R_BIN_JAVA_ATTR_TYPE_ANNOTATION_DEFAULT_ATTR) {
eprintf ("Annotation Default Attribute Information:\n");
eprintf (" Attribute Offset: 0x%08"PFMT64x "\n", attr->file_offset);
eprintf (" Attribute Name Index: %d (%s)\n", attr->name_idx, attr->name);
eprintf (" Attribute Length: %d\n", attr->length);
r_bin_java_print_element_value_summary ((attr->info.annotation_default_attr.default_value));
} else {
// TODO: eprintf attr is invalid
}
}
| 0
|
71,040
|
rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf, u32 changed)
{
struct rtl8xxxu_priv *priv = hw->priv;
struct device *dev = &priv->udev->dev;
struct ieee80211_sta *sta;
u32 val32;
u8 val8;
if (changed & BSS_CHANGED_ASSOC) {
dev_dbg(dev, "Changed ASSOC: %i!\n", bss_conf->assoc);
rtl8xxxu_set_linktype(priv, vif->type);
if (bss_conf->assoc) {
u32 ramask;
int sgi = 0;
rcu_read_lock();
sta = ieee80211_find_sta(vif, bss_conf->bssid);
if (!sta) {
dev_info(dev, "%s: ASSOC no sta found\n",
__func__);
rcu_read_unlock();
goto error;
}
if (sta->ht_cap.ht_supported)
dev_info(dev, "%s: HT supported\n", __func__);
if (sta->vht_cap.vht_supported)
dev_info(dev, "%s: VHT supported\n", __func__);
/* TODO: Set bits 28-31 for rate adaptive id */
ramask = (sta->supp_rates[0] & 0xfff) |
sta->ht_cap.mcs.rx_mask[0] << 12 |
sta->ht_cap.mcs.rx_mask[1] << 20;
if (sta->ht_cap.cap &
(IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_SGI_20))
sgi = 1;
rcu_read_unlock();
priv->fops->update_rate_mask(priv, ramask, sgi);
rtl8xxxu_write8(priv, REG_BCN_MAX_ERR, 0xff);
rtl8xxxu_stop_tx_beacon(priv);
/* joinbss sequence */
rtl8xxxu_write16(priv, REG_BCN_PSR_RPT,
0xc000 | bss_conf->aid);
priv->fops->report_connect(priv, 0, true);
} else {
val8 = rtl8xxxu_read8(priv, REG_BEACON_CTRL);
val8 |= BEACON_DISABLE_TSF_UPDATE;
rtl8xxxu_write8(priv, REG_BEACON_CTRL, val8);
priv->fops->report_connect(priv, 0, false);
}
}
if (changed & BSS_CHANGED_ERP_PREAMBLE) {
dev_dbg(dev, "Changed ERP_PREAMBLE: Use short preamble %i\n",
bss_conf->use_short_preamble);
val32 = rtl8xxxu_read32(priv, REG_RESPONSE_RATE_SET);
if (bss_conf->use_short_preamble)
val32 |= RSR_ACK_SHORT_PREAMBLE;
else
val32 &= ~RSR_ACK_SHORT_PREAMBLE;
rtl8xxxu_write32(priv, REG_RESPONSE_RATE_SET, val32);
}
if (changed & BSS_CHANGED_ERP_SLOT) {
dev_dbg(dev, "Changed ERP_SLOT: short_slot_time %i\n",
bss_conf->use_short_slot);
if (bss_conf->use_short_slot)
val8 = 9;
else
val8 = 20;
rtl8xxxu_write8(priv, REG_SLOT, val8);
}
if (changed & BSS_CHANGED_BSSID) {
dev_dbg(dev, "Changed BSSID!\n");
rtl8xxxu_set_bssid(priv, bss_conf->bssid);
}
if (changed & BSS_CHANGED_BASIC_RATES) {
dev_dbg(dev, "Changed BASIC_RATES!\n");
rtl8xxxu_set_basic_rates(priv, bss_conf->basic_rates);
}
error:
return;
}
| 0
|
424,108
|
static int bus_append_unit_property(sd_bus_message *m, const char *field, const char *eq) {
ConditionType t = _CONDITION_TYPE_INVALID;
bool is_condition = false;
int r;
if (STR_IN_SET(field,
"Description", "SourcePath", "OnFailureJobMode",
"JobTimeoutAction", "JobTimeoutRebootArgument",
"StartLimitAction", "FailureAction", "SuccessAction",
"RebootArgument", "CollectMode"))
return bus_append_string(m, field, eq);
if (STR_IN_SET(field,
"StopWhenUnneeded", "RefuseManualStart", "RefuseManualStop",
"AllowIsolate", "IgnoreOnIsolate", "DefaultDependencies"))
return bus_append_parse_boolean(m, field, eq);
if (STR_IN_SET(field, "JobTimeoutSec", "JobRunningTimeoutSec", "StartLimitIntervalSec"))
return bus_append_parse_sec_rename(m, field, eq);
if (streq(field, "StartLimitBurst"))
return bus_append_safe_atou(m, field, eq);
if (STR_IN_SET(field, "SuccessActionExitStatus", "FailureActionExitStatus")) {
if (isempty(eq))
r = sd_bus_message_append(m, "(sv)", field, "i", -1);
else {
uint8_t u;
r = safe_atou8(eq, &u);
if (r < 0)
return log_error_errno(r, "Failed to parse %s=%s", field, eq);
r = sd_bus_message_append(m, "(sv)", field, "i", (int) u);
}
if (r < 0)
return bus_log_create_error(r);
return 1;
}
if (unit_dependency_from_string(field) >= 0 ||
STR_IN_SET(field, "Documentation", "RequiresMountsFor"))
return bus_append_strv(m, field, eq, EXTRACT_QUOTES);
t = condition_type_from_string(field);
if (t >= 0)
is_condition = true;
else
t = assert_type_from_string(field);
if (t >= 0) {
if (isempty(eq))
r = sd_bus_message_append(m, "(sv)", is_condition ? "Conditions" : "Asserts", "a(sbbs)", 0);
else {
const char *p = eq;
int trigger, negate;
trigger = *p == '|';
if (trigger)
p++;
negate = *p == '!';
if (negate)
p++;
r = sd_bus_message_append(m, "(sv)", is_condition ? "Conditions" : "Asserts", "a(sbbs)", 1,
field, trigger, negate, p);
}
if (r < 0)
return bus_log_create_error(r);
return 1;
}
return 0;
}
| 0
|
182,410
|
void WebPageProxy::reattachToWebProcess()
{
m_isValid = true;
context()->relaunchProcessIfNecessary();
process()->addExistingWebPage(this, m_pageID);
initializeWebPage();
m_pageClient->didRelaunchProcess();
}
| 0
|
252,112
|
static void JNI_SendTabToSelfAndroidBridge_GetAllGuids(
JNIEnv* env,
const JavaParamRef<jobject>& j_profile,
const JavaParamRef<jobject>& j_guid_list_obj) {
SendTabToSelfModel* model = GetModel(j_profile);
if (model->IsReady()) {
std::vector<std::string> all_ids = model->GetAllGuids();
for (std::vector<std::string>::iterator it = all_ids.begin();
it != all_ids.end(); ++it) {
ScopedJavaLocalRef<jstring> j_guid = ConvertUTF8ToJavaString(env, *it);
Java_SendTabToSelfAndroidBridge_addToGuidList(env, j_guid_list_obj,
j_guid);
}
}
}
| 0
|
332,764
|
test_tls_get_ipaddr(const char *addrstr,
char **data,
int *datalen)
{
struct addrinfo *res;
struct addrinfo hints;
memset(&hints, 0, sizeof(hints));
hints.ai_flags = AI_NUMERICHOST;
g_assert(getaddrinfo(addrstr, NULL, &hints, &res) == 0);
*datalen = res->ai_addrlen;
*data = g_new(char, *datalen);
memcpy(*data, res->ai_addr, *datalen);
}
| 1
|
441,242
|
void RGWDeleteBucketReplication_ObjStore_S3::update_sync_policy(rgw_sync_policy_info *policy)
{
policy->groups.erase(enabled_group_id);
policy->groups.erase(disabled_group_id);
}
| 0
|
184,262
|
bool omx_video::execute_flush_all(void)
{
unsigned long p1 = 0; // Parameter - 1
unsigned long p2 = 0; // Parameter - 2
unsigned long ident = 0;
bool bRet = true;
DEBUG_PRINT_LOW("execute_flush_all");
/*Generate EBD for all Buffers in the ETBq*/
pthread_mutex_lock(&m_lock);
while (m_etb_q.m_size) {
m_etb_q.pop_entry(&p1,&p2,&ident);
if (ident == OMX_COMPONENT_GENERATE_ETB) {
pending_input_buffers++;
empty_buffer_done(&m_cmp,(OMX_BUFFERHEADERTYPE *)p2);
} else if (ident == OMX_COMPONENT_GENERATE_EBD) {
empty_buffer_done(&m_cmp,(OMX_BUFFERHEADERTYPE *)p1);
} else if(ident == OMX_COMPONENT_GENERATE_ETB_OPQ) {
m_pCallbacks.EmptyBufferDone(&m_cmp,m_app_data,(OMX_BUFFERHEADERTYPE *)p2);
}
}
if(mUseProxyColorFormat) {
if(psource_frame) {
m_pCallbacks.EmptyBufferDone(&m_cmp,m_app_data,psource_frame);
psource_frame = NULL;
}
while(m_opq_meta_q.m_size) {
unsigned long p1,p2,id;
m_opq_meta_q.pop_entry(&p1,&p2,&id);
m_pCallbacks.EmptyBufferDone(&m_cmp,m_app_data,
(OMX_BUFFERHEADERTYPE *)p1);
}
if(pdest_frame){
m_opq_pmem_q.insert_entry((unsigned long)pdest_frame,0,0);
pdest_frame = NULL;
}
}
/*Generate FBD for all Buffers in the FTBq*/
DEBUG_PRINT_LOW("execute_output_flush");
while (m_ftb_q.m_size) {
m_ftb_q.pop_entry(&p1,&p2,&ident);
if (ident == OMX_COMPONENT_GENERATE_FTB ) {
pending_output_buffers++;
fill_buffer_done(&m_cmp,(OMX_BUFFERHEADERTYPE *)p2);
} else if (ident == OMX_COMPONENT_GENERATE_FBD) {
fill_buffer_done(&m_cmp,(OMX_BUFFERHEADERTYPE *)p1);
}
}
pthread_mutex_unlock(&m_lock);
/*Check if there are buffers with the Driver*/
if (dev_flush(PORT_INDEX_BOTH)) {
DEBUG_PRINT_ERROR("ERROR: dev_flush() Failed");
return false;
}
return bRet;
}
| 0
|
94,238
|
static int watchdog_nmi_enable(int cpu)
{
struct perf_event_attr *wd_attr;
struct perf_event *event = per_cpu(watchdog_ev, cpu);
/* is it already setup and enabled? */
if (event && event->state > PERF_EVENT_STATE_OFF)
goto out;
/* it is setup but not enabled */
if (event != NULL)
goto out_enable;
wd_attr = &wd_hw_attr;
wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh);
hw_nmi_watchdog_set_attr(wd_attr);
/* Try to register using hardware perf events */
event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback);
if (!IS_ERR(event)) {
printk(KERN_INFO "NMI watchdog enabled, takes one hw-pmu counter.\n");
goto out_save;
}
/* vary the KERN level based on the returned errno */
if (PTR_ERR(event) == -EOPNOTSUPP)
printk(KERN_INFO "NMI watchdog disabled (cpu%i): not supported (no LAPIC?)\n", cpu);
else if (PTR_ERR(event) == -ENOENT)
printk(KERN_WARNING "NMI watchdog disabled (cpu%i): hardware events not enabled\n", cpu);
else
printk(KERN_ERR "NMI watchdog disabled (cpu%i): unable to create perf event: %ld\n", cpu, PTR_ERR(event));
return PTR_ERR(event);
/* success path */
out_save:
per_cpu(watchdog_ev, cpu) = event;
out_enable:
perf_event_enable(per_cpu(watchdog_ev, cpu));
out:
return 0;
}
| 0
|
43,712
|
void o2nm_node_put(struct o2nm_node *node)
{
config_item_put(&node->nd_item);
}
| 0
|
519,666
|
static uchar *get_field_name(Field **buff, size_t *length,
my_bool not_used __attribute__((unused)))
{
*length= (uint) strlen((*buff)->field_name);
return (uchar*) (*buff)->field_name;
}
| 0
|
41,293
|
onig_get_passed_args_num_by_callout_args(OnigCalloutArgs* args)
{
int num;
CalloutListEntry* e;
num = args->num;
e = onig_reg_callout_list_at(args->regex, num);
if (IS_NULL(e)) return ONIGERR_INVALID_ARGUMENT;
if (e->of == ONIG_CALLOUT_OF_NAME) {
return e->u.arg.passed_num;
}
return ONIGERR_INVALID_ARGUMENT;
}
| 0
|
252,439
|
void SavePackageNotificationObserver::Observe(
int type,
const content::NotificationSource& source,
const content::NotificationDetails& details) {
if (type == content::NOTIFICATION_SAVE_PACKAGE_SUCCESSFULLY_FINISHED) {
if (automation_) {
AutomationJSONReply(automation_,
reply_message_.release()).SendSuccess(NULL);
}
delete this;
} else {
NOTREACHED();
}
}
| 0
|
152,161
|
archive_read_format_iso9660_bid(struct archive_read *a, int best_bid)
{
struct iso9660 *iso9660;
ssize_t bytes_read;
const unsigned char *p;
int seenTerminator;
/* If there's already a better bid than we can ever
make, don't bother testing. */
if (best_bid > 48)
return (-1);
iso9660 = (struct iso9660 *)(a->format->data);
/*
* Skip the first 32k (reserved area) and get the first
* 8 sectors of the volume descriptor table. Of course,
* if the I/O layer gives us more, we'll take it.
*/
#define RESERVED_AREA (SYSTEM_AREA_BLOCK * LOGICAL_BLOCK_SIZE)
p = __archive_read_ahead(a,
RESERVED_AREA + 8 * LOGICAL_BLOCK_SIZE,
&bytes_read);
if (p == NULL)
return (-1);
/* Skip the reserved area. */
bytes_read -= RESERVED_AREA;
p += RESERVED_AREA;
/* Check each volume descriptor. */
seenTerminator = 0;
for (; bytes_read > LOGICAL_BLOCK_SIZE;
bytes_read -= LOGICAL_BLOCK_SIZE, p += LOGICAL_BLOCK_SIZE) {
/* Do not handle undefined Volume Descriptor Type. */
if (p[0] >= 4 && p[0] <= 254)
return (0);
/* Standard Identifier must be "CD001" */
if (memcmp(p + 1, "CD001", 5) != 0)
return (0);
if (isPVD(iso9660, p))
continue;
if (!iso9660->joliet.location) {
if (isJolietSVD(iso9660, p))
continue;
}
if (isBootRecord(iso9660, p))
continue;
if (isEVD(iso9660, p))
continue;
if (isSVD(iso9660, p))
continue;
if (isVolumePartition(iso9660, p))
continue;
if (isVDSetTerminator(iso9660, p)) {
seenTerminator = 1;
break;
}
return (0);
}
/*
* ISO 9660 format must have Primary Volume Descriptor and
* Volume Descriptor Set Terminator.
*/
if (seenTerminator && iso9660->primary.location > 16)
return (48);
/* We didn't find a valid PVD; return a bid of zero. */
return (0);
}
| 0
|
246,113
|
static int ReleaseVP9FrameBuffer(void *user_priv,
vpx_codec_frame_buffer_t *fb) {
ExternalFrameBufferMD5Test *const md5Test =
reinterpret_cast<ExternalFrameBufferMD5Test*>(user_priv);
return md5Test->fb_list_.ReturnFrameBuffer(fb);
}
| 0
|
435,771
|
static int gitmodules_cb(const char *var, const char *value, void *data)
{
struct repository *repo = data;
return submodule_config_option(repo, var, value);
}
| 0
|
62,330
|
static bool blk_pm_allow_request(struct request *rq)
{
return true;
}
| 0
|
458,320
|
dp_packet_batch_is_full(const struct dp_packet_batch *batch)
{
return dp_packet_batch_size(batch) == NETDEV_MAX_BURST;
}
| 0
|
369,774
|
static void qxl_del_memslot(PCIQXLDevice *d, uint32_t slot_id)
{
dprint(d, 1, "%s: slot %d\n", __FUNCTION__, slot_id);
qemu_spice_del_memslot(&d->ssd, MEMSLOT_GROUP_HOST, slot_id);
d->guest_slots[slot_id].active = 0;
}
| 0
|
137,083
|
evdns_base_free(struct evdns_base *base, int fail_requests)
{
EVDNS_LOCK(base);
evdns_base_free_and_unlock(base, fail_requests);
}
| 0
|
218,980
|
bool WebMediaPlayerImpl::DidLoadingProgress() {
DCHECK(main_task_runner_->BelongsToCurrentThread());
const bool pipeline_progress = pipeline_controller_.DidLoadingProgress();
const bool data_progress = buffered_data_source_host_.DidLoadingProgress();
return pipeline_progress || data_progress;
}
| 0
|
232,655
|
AppListControllerDelegate::~AppListControllerDelegate() {}
| 0
|
59,035
|
static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len)
{
int i;
if (len == 1)
return;
/* NOTE: fake 'exit' subprog should be updated as well. */
for (i = 0; i <= env->subprog_cnt; i++) {
if (env->subprog_info[i].start <= off)
continue;
env->subprog_info[i].start += len - 1;
}
}
| 0
|
61,496
|
static int crypto_init_blkcipher_ops_sync(struct crypto_tfm *tfm)
{
struct blkcipher_tfm *crt = &tfm->crt_blkcipher;
struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
unsigned long align = crypto_tfm_alg_alignmask(tfm) + 1;
unsigned long addr;
crt->setkey = setkey;
crt->encrypt = alg->encrypt;
crt->decrypt = alg->decrypt;
addr = (unsigned long)crypto_tfm_ctx(tfm);
addr = ALIGN(addr, align);
addr += ALIGN(tfm->__crt_alg->cra_ctxsize, align);
crt->iv = (void *)addr;
return 0;
}
| 0
|
38,953
|
static void nfs4_proc_rename_setup(struct rpc_message *msg, struct inode *dir)
{
struct nfs_server *server = NFS_SERVER(dir);
struct nfs_renameargs *arg = msg->rpc_argp;
struct nfs_renameres *res = msg->rpc_resp;
msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME];
arg->bitmask = server->attr_bitmask;
res->server = server;
nfs41_init_sequence(&arg->seq_args, &res->seq_res, 1);
}
| 0
|
10,879
|
PHP_FUNCTION(snmp_set_enum_print)
{
long a1;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "l", &a1) == FAILURE) {
RETURN_FALSE;
}
netsnmp_ds_set_boolean(NETSNMP_DS_LIBRARY_ID, NETSNMP_DS_LIB_PRINT_NUMERIC_ENUM, (int) a1);
RETURN_TRUE;
}
| 1
|
372,270
|
cmsBool CMSEXPORT cmsIsMatrixShaper(cmsHPROFILE hProfile)
{
switch (cmsGetColorSpace(hProfile)) {
case cmsSigGrayData:
return cmsIsTag(hProfile, cmsSigGrayTRCTag);
case cmsSigRgbData:
return (cmsIsTag(hProfile, cmsSigRedColorantTag) &&
cmsIsTag(hProfile, cmsSigGreenColorantTag) &&
cmsIsTag(hProfile, cmsSigBlueColorantTag) &&
cmsIsTag(hProfile, cmsSigRedTRCTag) &&
cmsIsTag(hProfile, cmsSigGreenTRCTag) &&
cmsIsTag(hProfile, cmsSigBlueTRCTag));
default:
return FALSE;
}
}
| 0
|
185,248
|
GBool SplashFTFont::getGlyph(int c, int xFrac, int yFrac,
SplashGlyphBitmap *bitmap, int x0, int y0, SplashClip *clip, SplashClipResult *clipRes) {
return SplashFont::getGlyph(c, xFrac, 0, bitmap, x0, y0, clip, clipRes);
}
| 0
|
212,532
|
const ParsedFeaturePolicy Document::GetOwnerContainerPolicy() const {
if (frame_ && frame_->Owner())
return frame_->Owner()->ContainerPolicy();
return ParsedFeaturePolicy();
}
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.