idx
int64 | func
string | target
int64 |
|---|---|---|
487,662
|
void WebContents::IncrementCapturerCount(gin::Arguments* args) {
gfx::Size size;
bool stay_hidden = false;
bool stay_awake = false;
// get size arguments if they exist
args->GetNext(&size);
// get stayHidden arguments if they exist
args->GetNext(&stay_hidden);
// get stayAwake arguments if they exist
args->GetNext(&stay_awake);
std::ignore =
web_contents()->IncrementCapturerCount(size, stay_hidden, stay_awake);
}
| 0
|
425,152
|
g_socket_client_get_socket_type (GSocketClient *client)
{
return client->priv->type;
}
| 0
|
24,754
|
static void vc1_mc_4mv_luma ( VC1Context * v , int n , int dir ) {
MpegEncContext * s = & v -> s ;
DSPContext * dsp = & v -> s . dsp ;
uint8_t * srcY ;
int dxy , mx , my , src_x , src_y ;
int off ;
int fieldmv = ( v -> fcm == ILACE_FRAME ) ? v -> blk_mv_type [ s -> block_index [ n ] ] : 0 ;
int v_edge_pos = s -> v_edge_pos >> v -> field_mode ;
if ( ( ! v -> field_mode || ( v -> ref_field_type [ dir ] == 1 && v -> cur_field_type == 1 ) ) && ! v -> s . last_picture . f . data [ 0 ] ) return ;
mx = s -> mv [ dir ] [ n ] [ 0 ] ;
my = s -> mv [ dir ] [ n ] [ 1 ] ;
if ( ! dir ) {
if ( v -> field_mode ) {
if ( ( v -> cur_field_type != v -> ref_field_type [ dir ] ) && v -> cur_field_type ) srcY = s -> current_picture . f . data [ 0 ] ;
else srcY = s -> last_picture . f . data [ 0 ] ;
}
else srcY = s -> last_picture . f . data [ 0 ] ;
}
else srcY = s -> next_picture . f . data [ 0 ] ;
if ( v -> field_mode ) {
if ( v -> cur_field_type != v -> ref_field_type [ dir ] ) my = my - 2 + 4 * v -> cur_field_type ;
}
if ( s -> pict_type == AV_PICTURE_TYPE_P && n == 3 && v -> field_mode ) {
int same_count = 0 , opp_count = 0 , k ;
int chosen_mv [ 2 ] [ 4 ] [ 2 ] , f ;
int tx , ty ;
for ( k = 0 ;
k < 4 ;
k ++ ) {
f = v -> mv_f [ 0 ] [ s -> block_index [ k ] + v -> blocks_off ] ;
chosen_mv [ f ] [ f ? opp_count : same_count ] [ 0 ] = s -> mv [ 0 ] [ k ] [ 0 ] ;
chosen_mv [ f ] [ f ? opp_count : same_count ] [ 1 ] = s -> mv [ 0 ] [ k ] [ 1 ] ;
opp_count += f ;
same_count += 1 - f ;
}
f = opp_count > same_count ;
switch ( f ? opp_count : same_count ) {
case 4 : tx = median4 ( chosen_mv [ f ] [ 0 ] [ 0 ] , chosen_mv [ f ] [ 1 ] [ 0 ] , chosen_mv [ f ] [ 2 ] [ 0 ] , chosen_mv [ f ] [ 3 ] [ 0 ] ) ;
ty = median4 ( chosen_mv [ f ] [ 0 ] [ 1 ] , chosen_mv [ f ] [ 1 ] [ 1 ] , chosen_mv [ f ] [ 2 ] [ 1 ] , chosen_mv [ f ] [ 3 ] [ 1 ] ) ;
break ;
case 3 : tx = mid_pred ( chosen_mv [ f ] [ 0 ] [ 0 ] , chosen_mv [ f ] [ 1 ] [ 0 ] , chosen_mv [ f ] [ 2 ] [ 0 ] ) ;
ty = mid_pred ( chosen_mv [ f ] [ 0 ] [ 1 ] , chosen_mv [ f ] [ 1 ] [ 1 ] , chosen_mv [ f ] [ 2 ] [ 1 ] ) ;
break ;
case 2 : tx = ( chosen_mv [ f ] [ 0 ] [ 0 ] + chosen_mv [ f ] [ 1 ] [ 0 ] ) / 2 ;
ty = ( chosen_mv [ f ] [ 0 ] [ 1 ] + chosen_mv [ f ] [ 1 ] [ 1 ] ) / 2 ;
break ;
}
s -> current_picture . motion_val [ 1 ] [ s -> block_index [ 0 ] + v -> blocks_off ] [ 0 ] = tx ;
s -> current_picture . motion_val [ 1 ] [ s -> block_index [ 0 ] + v -> blocks_off ] [ 1 ] = ty ;
for ( k = 0 ;
k < 4 ;
k ++ ) v -> mv_f [ 1 ] [ s -> block_index [ k ] + v -> blocks_off ] = f ;
}
if ( v -> fcm == ILACE_FRAME ) {
int qx , qy ;
int width = s -> avctx -> coded_width ;
int height = s -> avctx -> coded_height >> 1 ;
qx = ( s -> mb_x * 16 ) + ( mx >> 2 ) ;
qy = ( s -> mb_y * 8 ) + ( my >> 3 ) ;
if ( qx < - 17 ) mx -= 4 * ( qx + 17 ) ;
else if ( qx > width ) mx -= 4 * ( qx - width ) ;
if ( qy < - 18 ) my -= 8 * ( qy + 18 ) ;
else if ( qy > height + 1 ) my -= 8 * ( qy - height - 1 ) ;
}
if ( ( v -> fcm == ILACE_FRAME ) && fieldmv ) off = ( ( n > 1 ) ? s -> linesize : 0 ) + ( n & 1 ) * 8 ;
else off = s -> linesize * 4 * ( n & 2 ) + ( n & 1 ) * 8 ;
if ( v -> field_mode && v -> cur_field_type ) off += s -> current_picture_ptr -> f . linesize [ 0 ] ;
src_x = s -> mb_x * 16 + ( n & 1 ) * 8 + ( mx >> 2 ) ;
if ( ! fieldmv ) src_y = s -> mb_y * 16 + ( n & 2 ) * 4 + ( my >> 2 ) ;
else src_y = s -> mb_y * 16 + ( ( n > 1 ) ? 1 : 0 ) + ( my >> 2 ) ;
if ( v -> profile != PROFILE_ADVANCED ) {
src_x = av_clip ( src_x , - 16 , s -> mb_width * 16 ) ;
src_y = av_clip ( src_y , - 16 , s -> mb_height * 16 ) ;
}
else {
src_x = av_clip ( src_x , - 17 , s -> avctx -> coded_width ) ;
if ( v -> fcm == ILACE_FRAME ) {
if ( src_y & 1 ) src_y = av_clip ( src_y , - 17 , s -> avctx -> coded_height + 1 ) ;
else src_y = av_clip ( src_y , - 18 , s -> avctx -> coded_height ) ;
}
else {
src_y = av_clip ( src_y , - 18 , s -> avctx -> coded_height + 1 ) ;
}
}
srcY += src_y * s -> linesize + src_x ;
if ( v -> field_mode && v -> ref_field_type [ dir ] ) srcY += s -> current_picture_ptr -> f . linesize [ 0 ] ;
if ( fieldmv && ! ( src_y & 1 ) ) v_edge_pos -- ;
if ( fieldmv && ( src_y & 1 ) && src_y < 4 ) src_y -- ;
if ( v -> rangeredfrm || ( v -> mv_mode == MV_PMODE_INTENSITY_COMP ) || s -> h_edge_pos < 13 || v_edge_pos < 23 || ( unsigned ) ( src_x - s -> mspel ) > s -> h_edge_pos - ( mx & 3 ) - 8 - s -> mspel * 2 || ( unsigned ) ( src_y - ( s -> mspel << fieldmv ) ) > v_edge_pos - ( my & 3 ) - ( ( 8 + s -> mspel * 2 ) << fieldmv ) ) {
srcY -= s -> mspel * ( 1 + ( s -> linesize << fieldmv ) ) ;
s -> vdsp . emulated_edge_mc ( s -> edge_emu_buffer , srcY , s -> linesize , 9 + s -> mspel * 2 , ( 9 + s -> mspel * 2 ) << fieldmv , src_x - s -> mspel , src_y - ( s -> mspel << fieldmv ) , s -> h_edge_pos , v_edge_pos ) ;
srcY = s -> edge_emu_buffer ;
if ( v -> rangeredfrm ) {
int i , j ;
uint8_t * src ;
src = srcY ;
for ( j = 0 ;
j < 9 + s -> mspel * 2 ;
j ++ ) {
for ( i = 0 ;
i < 9 + s -> mspel * 2 ;
i ++ ) src [ i ] = ( ( src [ i ] - 128 ) >> 1 ) + 128 ;
src += s -> linesize << fieldmv ;
}
}
if ( v -> mv_mode == MV_PMODE_INTENSITY_COMP ) {
int i , j ;
uint8_t * src ;
src = srcY ;
for ( j = 0 ;
j < 9 + s -> mspel * 2 ;
j ++ ) {
for ( i = 0 ;
i < 9 + s -> mspel * 2 ;
i ++ ) src [ i ] = v -> luty [ src [ i ] ] ;
src += s -> linesize << fieldmv ;
}
}
srcY += s -> mspel * ( 1 + ( s -> linesize << fieldmv ) ) ;
}
if ( s -> mspel ) {
dxy = ( ( my & 3 ) << 2 ) | ( mx & 3 ) ;
v -> vc1dsp . put_vc1_mspel_pixels_tab [ dxy ] ( s -> dest [ 0 ] + off , srcY , s -> linesize << fieldmv , v -> rnd ) ;
}
else {
dxy = ( my & 2 ) | ( ( mx & 2 ) >> 1 ) ;
if ( ! v -> rnd ) dsp -> put_pixels_tab [ 1 ] [ dxy ] ( s -> dest [ 0 ] + off , srcY , s -> linesize , 8 ) ;
else dsp -> put_no_rnd_pixels_tab [ 1 ] [ dxy ] ( s -> dest [ 0 ] + off , srcY , s -> linesize , 8 ) ;
}
}
| 0
|
394,018
|
try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
{
unsigned long flags;
int cpu, success = 0;
/*
* If we are going to wake up a thread waiting for CONDITION we
* need to ensure that CONDITION=1 done by the caller can not be
* reordered with p->state check below. This pairs with mb() in
* set_current_state() the waiting thread does.
*/
smp_mb__before_spinlock();
raw_spin_lock_irqsave(&p->pi_lock, flags);
if (!(p->state & state))
goto out;
trace_sched_waking(p);
success = 1; /* we're going to change ->state */
cpu = task_cpu(p);
if (p->on_rq && ttwu_remote(p, wake_flags))
goto stat;
#ifdef CONFIG_SMP
/*
* Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be
* possible to, falsely, observe p->on_cpu == 0.
*
* One must be running (->on_cpu == 1) in order to remove oneself
* from the runqueue.
*
* [S] ->on_cpu = 1; [L] ->on_rq
* UNLOCK rq->lock
* RMB
* LOCK rq->lock
* [S] ->on_rq = 0; [L] ->on_cpu
*
* Pairs with the full barrier implied in the UNLOCK+LOCK on rq->lock
* from the consecutive calls to schedule(); the first switching to our
* task, the second putting it to sleep.
*/
smp_rmb();
/*
* If the owning (remote) cpu is still in the middle of schedule() with
* this task as prev, wait until its done referencing the task.
*
* Pairs with the smp_store_release() in finish_lock_switch().
*
* This ensures that tasks getting woken will be fully ordered against
* their previous state and preserve Program Order.
*/
smp_cond_acquire(!p->on_cpu);
p->sched_contributes_to_load = !!task_contributes_to_load(p);
p->state = TASK_WAKING;
if (p->sched_class->task_waking)
p->sched_class->task_waking(p);
cpu = select_task_rq(p, p->wake_cpu, SD_BALANCE_WAKE, wake_flags);
if (task_cpu(p) != cpu) {
wake_flags |= WF_MIGRATED;
set_task_cpu(p, cpu);
}
#endif /* CONFIG_SMP */
ttwu_queue(p, cpu);
stat:
if (schedstat_enabled())
ttwu_stat(p, cpu, wake_flags);
out:
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
return success;
}
| 0
|
83,620
|
void usage() {
printf("This is nbd-server version " VERSION "\n");
printf("Usage: [ip:|ip6@]port file_to_export [size][kKmM] [-l authorize_file] [-r] [-m] [-c] [-C configuration file] [-p PID file name] [-o section name] [-M max connections]\n"
"\t-r|--read-only\t\tread only\n"
"\t-m|--multi-file\t\tmultiple file\n"
"\t-c|--copy-on-write\tcopy on write\n"
"\t-C|--config-file\tspecify an alternate configuration file\n"
"\t-l|--authorize-file\tfile with list of hosts that are allowed to\n\t\t\t\tconnect.\n"
"\t-p|--pid-file\t\tspecify a filename to write our PID to\n"
"\t-o|--output-config\toutput a config file section for what you\n\t\t\t\tspecified on the command line, with the\n\t\t\t\tspecified section name\n"
"\t-M|--max-connections\tspecify the maximum number of opened connections\n\n"
"\tif port is set to 0, stdin is used (for running from inetd).\n"
"\tif file_to_export contains '%%s', it is substituted with the IP\n"
"\t\taddress of the machine trying to connect\n"
"\tif ip is set, it contains the local IP address on which we're listening.\n\tif not, the server will listen on all local IP addresses\n");
printf("Using configuration file %s\n", CFILE);
}
| 0
|
57,536
|
static int msgctl_down(struct ipc_namespace *ns, int msqid, int cmd,
struct msqid_ds __user *buf, int version)
{
struct kern_ipc_perm *ipcp;
struct msqid64_ds uninitialized_var(msqid64);
struct msg_queue *msq;
int err;
if (cmd == IPC_SET) {
if (copy_msqid_from_user(&msqid64, buf, version))
return -EFAULT;
}
ipcp = ipcctl_pre_down(ns, &msg_ids(ns), msqid, cmd,
&msqid64.msg_perm, msqid64.msg_qbytes);
if (IS_ERR(ipcp))
return PTR_ERR(ipcp);
msq = container_of(ipcp, struct msg_queue, q_perm);
err = security_msg_queue_msgctl(msq, cmd);
if (err)
goto out_unlock;
switch (cmd) {
case IPC_RMID:
freeque(ns, ipcp);
goto out_up;
case IPC_SET:
if (msqid64.msg_qbytes > ns->msg_ctlmnb &&
!capable(CAP_SYS_RESOURCE)) {
err = -EPERM;
goto out_unlock;
}
err = ipc_update_perm(&msqid64.msg_perm, ipcp);
if (err)
goto out_unlock;
msq->q_qbytes = msqid64.msg_qbytes;
msq->q_ctime = get_seconds();
/* sleeping receivers might be excluded by
* stricter permissions.
*/
expunge_all(msq, -EAGAIN);
/* sleeping senders might be able to send
* due to a larger queue size.
*/
ss_wakeup(&msq->q_senders, 0);
break;
default:
err = -EINVAL;
}
out_unlock:
msg_unlock(msq);
out_up:
up_write(&msg_ids(ns).rw_mutex);
return err;
}
| 0
|
167,215
|
void SyncBackendHost::HandleClearServerDataSucceededOnFrontendLoop() {
if (!frontend_)
return;
frontend_->OnClearServerDataSucceeded();
}
| 0
|
427,545
|
static efi_status_t efi_thunk_get_time(efi_time_t *tm, efi_time_cap_t *tc)
{
efi_status_t status;
u32 phys_tm, phys_tc;
unsigned long flags;
spin_lock(&rtc_lock);
spin_lock_irqsave(&efi_runtime_lock, flags);
phys_tm = virt_to_phys_or_null(tm);
phys_tc = virt_to_phys_or_null(tc);
status = efi_thunk(get_time, phys_tm, phys_tc);
spin_unlock_irqrestore(&efi_runtime_lock, flags);
spin_unlock(&rtc_lock);
return status;
}
| 0
|
495,814
|
static s32 gf_vvc_read_sps_bs_internal(GF_BitStream *bs, VVCState *vvc, u8 layer_id, u32 *vui_flag_pos)
{
s32 vps_id, sps_id;
u32 i, CtbSizeY;
VVC_SPS *sps;
u8 sps_ptl_dpb_hrd_params_present_flag;
if (vui_flag_pos) *vui_flag_pos = 0;
sps_id = gf_bs_read_int_log(bs, 4, "sps_id");
if ((sps_id<0) || (sps_id >= 16)) {
return -1;
}
vps_id = gf_bs_read_int_log(bs, 4, "vps_id");
if ((vps_id<0) || (vps_id >= 16)) {
return -1;
}
if (!vps_id && !vvc->vps[0].state) {
vvc->vps[0].state = 1;
vvc->vps[0].num_ptl = 1;
vvc->vps[0].max_layers = 1;
vvc->vps[0].all_layers_independent = 1;
}
sps = &vvc->sps[sps_id];
if (!sps->state) {
sps->state = 1;
sps->id = sps_id;
sps->vps_id = vps_id;
}
sps->max_sublayers = 1 + gf_bs_read_int_log(bs, 3, "max_sublayers_minus1");
sps->chroma_format_idc = gf_bs_read_int_log(bs, 2, "chroma_format_idc");
sps->log2_ctu_size = 5 + gf_bs_read_int_log(bs, 2, "log2_ctu_size_minus5");
CtbSizeY = 1<<sps->log2_ctu_size;
sps_ptl_dpb_hrd_params_present_flag = gf_bs_read_int_log(bs, 1, "sps_ptl_dpb_hrd_params_present_flag");
if (sps_ptl_dpb_hrd_params_present_flag) {
VVC_ProfileTierLevel ptl, *p_ptl;
if (sps->vps_id) {
p_ptl = &ptl;
} else {
p_ptl = &vvc->vps[0].ptl[0];
}
memset(p_ptl, 0, sizeof(VVC_ProfileTierLevel));
p_ptl->pt_present = 1;
p_ptl->ptl_max_tid = sps->max_sublayers-1;
vvc_profile_tier_level(bs, p_ptl, 0);
}
sps->gdr_enabled = gf_bs_read_int_log(bs, 1, "gdr_enabled");
sps->ref_pic_resampling = gf_bs_read_int_log(bs, 1, "ref_pic_resampling");
if (sps->ref_pic_resampling)
sps->res_change_in_clvs = gf_bs_read_int_log(bs, 1, "res_change_in_clvs");
sps->width = gf_bs_read_ue_log(bs, "width");
sps->height = gf_bs_read_ue_log(bs, "height");
sps->conf_window = gf_bs_read_int_log(bs, 1, "conformance_window_present_flag");
if (sps->conf_window) {
u32 SubWidthC, SubHeightC;
sps->cw_left = gf_bs_read_ue_log(bs, "conformance_window_left");
sps->cw_right = gf_bs_read_ue_log(bs, "conformance_window_right");
sps->cw_top = gf_bs_read_ue_log(bs, "conformance_window_top");
sps->cw_bottom = gf_bs_read_ue_log(bs, "conformance_window_bottom");
if (sps->chroma_format_idc == 1) {
SubWidthC = SubHeightC = 2;
} else if (sps->chroma_format_idc == 2) {
SubWidthC = 2;
SubHeightC = 1;
} else {
SubWidthC = SubHeightC = 1;
}
sps->width -= SubWidthC * (sps->cw_left + sps->cw_right);
sps->height -= SubHeightC * (sps->cw_top + sps->cw_bottom);
}
sps->subpic_info_present = gf_bs_read_int_log(bs, 1, "subpic_info_present");
if (sps->subpic_info_present) {
sps->nb_subpics = 1 + gf_bs_read_ue_log(bs, "nb_subpics_minus1");
if (sps->nb_subpics>1) {
u32 tmpWidthVal, tmpHeightVal;
sps->independent_subpic_flags = gf_bs_read_int_log(bs, 1, "independent_subpic_flags");
sps->subpic_same_size = gf_bs_read_int_log(bs, 1, "subpic_same_size");
tmpWidthVal = (sps->width + CtbSizeY-1) / CtbSizeY;
tmpWidthVal = gf_get_bit_size(tmpWidthVal);
tmpHeightVal = (sps->height + CtbSizeY-1) / CtbSizeY;
tmpHeightVal = gf_get_bit_size(tmpHeightVal);
for (i=0; i<sps->nb_subpics; i++) {
if( !sps->subpic_same_size || !i) {
if (i && (sps->width > CtbSizeY))
gf_bs_read_int_log(bs, tmpWidthVal, "subpic_ctu_top_left_x");
if (i && (sps->height > CtbSizeY))
gf_bs_read_int_log(bs, tmpHeightVal, "subpic_ctu_top_left_y");
if ((i+1 < sps->nb_subpics) && (sps->width > CtbSizeY))
gf_bs_read_int_log(bs, tmpWidthVal, "subpic_width_minus1");
if ((i+1 < sps->nb_subpics) && (sps->height > CtbSizeY))
gf_bs_read_int_log(bs, tmpHeightVal, "subpic_height_minus1");
}
if (!sps->independent_subpic_flags) {
gf_bs_read_int_log(bs, 1, "subpic_treated_as_pic_flag");
gf_bs_read_int_log(bs, 1, "loop_filter_across_subpic_enabled_flag");
}
}
sps->subpicid_len = gf_bs_read_ue_log(bs, "subpic_id_len_minus1") + 1;
sps->subpicid_mapping_explicit = gf_bs_read_int_log(bs, 1, "subpic_id_mapping_explicitly_signalled_flag");
if (sps->subpicid_mapping_explicit) {
sps->subpicid_mapping_present = gf_bs_read_int_log(bs, 1, "subpic_id_mapping_present_flag");
if (sps->subpicid_mapping_present) {
for (i=0; i<sps->nb_subpics; i++) {
gf_bs_read_ue_log(bs, "subpic_id");
}
}
}
}
} else {
sps->nb_subpics = 1;
}
sps->bitdepth = gf_bs_read_ue_log(bs, "bitdepth_minus8") + 8;
sps->entropy_coding_sync_enabled_flag = gf_bs_read_int_log(bs, 1, "entropy_coding_sync_enabled_flag");
sps->entry_point_offsets_present_flag = gf_bs_read_int_log(bs, 1, "entry_point_offsets_present_flag");
sps->log2_max_poc_lsb = 4 + gf_bs_read_int_log(bs, 4, "log2_max_poc_lsb_minus4");
if ((sps->poc_msb_cycle_flag = gf_bs_read_int_log(bs, 1, "poc_msb_cycle_flag")))
sps->poc_msb_cycle_len = 1 + gf_bs_read_ue_log(bs, "poc_msb_cycle_len_minus1");
u8 sps_num_extra_ph_bits = 8 * gf_bs_read_int_log(bs, 2, "sps_num_extra_ph_bytes");
for (i=0; i<sps_num_extra_ph_bits; i++) {
if (gf_bs_read_int_log_idx(bs, 1, "extra_ph_bit_present_flag", 1))
sps->ph_num_extra_bits++;
}
u8 sps_num_extra_sh_bits = 8 * gf_bs_read_int_log(bs, 2, "num_extra_sh_bytes");
for (i=0; i<sps_num_extra_sh_bits; i++) {
if (gf_bs_read_int_log_idx(bs, 1, "extra_sh_bit_present_flag", i))
sps->sh_num_extra_bits++;
}
if (sps_ptl_dpb_hrd_params_present_flag) {
u8 sps_sublayer_dpb_params_flag = 0;
if (sps->max_sublayers>1) {
sps_sublayer_dpb_params_flag = gf_bs_read_int_log(bs, 1, "sps_sublayer_dpb_params_flag");
}
for (i=(sps_sublayer_dpb_params_flag ? 0 : sps->max_sublayers-1); i < sps->max_sublayers; i++ ) {
gf_bs_read_ue_log_idx(bs, "dpb_max_dec_pic_buffering_minus1", i);
gf_bs_read_ue_log_idx(bs, "dpb_max_num_reorder_pics", i);
gf_bs_read_ue_log_idx(bs, "dpb_max_latency_increase_plus1", i);
}
}
gf_bs_read_ue_log(bs, "sps_log2_min_luma_coding_block_size_minus2");
sps->partition_constraints_override_enabled_flag = gf_bs_read_int_log(bs, 1, "sps_partition_constraints_override_enabled_flag");
gf_bs_read_ue_log(bs, "sps_log2_min_luma_coding_block_size_minus2");
u8 sps_max_mtt_hierarchy_depth_intra_slice_luma = gf_bs_read_ue_log(bs, "sps_max_mtt_hierarchy_depth_intra_slice_luma");
if (sps_max_mtt_hierarchy_depth_intra_slice_luma != 0) {
gf_bs_read_ue_log(bs, "sps_log2_diff_max_bt_min_qt_intra_slice_luma");
gf_bs_read_ue_log(bs, "sps_log2_diff_max_tt_min_qt_intra_slice_luma");
}
u8 sps_qtbtt_dual_tree_intra_flag = 0;
if (sps->chroma_format_idc) {
sps_qtbtt_dual_tree_intra_flag = gf_bs_read_int_log(bs, 1, "sps_qtbtt_dual_tree_intra_flag");
}
if (sps_qtbtt_dual_tree_intra_flag) {
gf_bs_read_ue_log(bs, "sps_log2_diff_min_qt_min_cb_intra_slice_chroma");
u8 sps_max_mtt_hierarchy_depth_intra_slice_chroma = gf_bs_read_ue_log(bs, "sps_max_mtt_hierarchy_depth_intra_slice_chroma");
if( sps_max_mtt_hierarchy_depth_intra_slice_chroma != 0) {
gf_bs_read_ue_log(bs, "sps_log2_diff_max_bt_min_qt_intra_slice_chroma");
gf_bs_read_ue_log(bs, "sps_log2_diff_max_tt_min_qt_intra_slice_chroma");
}
}
gf_bs_read_ue_log(bs, "sps_log2_diff_min_qt_min_cb_inter_slice");
u8 sps_max_mtt_hierarchy_depth_inter_slice = gf_bs_read_ue_log(bs, "sps_max_mtt_hierarchy_depth_inter_slice");
if (sps_max_mtt_hierarchy_depth_inter_slice != 0) {
gf_bs_read_ue_log(bs, "sps_log2_diff_max_bt_min_qt_inter_slice");
gf_bs_read_ue_log(bs, "sps_log2_diff_max_tt_min_qt_inter_slice");
}
u8 max_luma_transform_size_64_flag = 0;
if (CtbSizeY > 32) {
max_luma_transform_size_64_flag = gf_bs_read_int_log(bs, 1, "sps_max_luma_transform_size_64_flag");
}
sps->transform_skip_enabled_flag = gf_bs_read_int_log(bs, 1, "sps_transform_skip_enabled_flag");
if (sps->transform_skip_enabled_flag) {
gf_bs_read_ue_log(bs, "sps_log2_transform_skip_max_size_minus2");
gf_bs_read_int_log(bs, 1, "sps_bdpcm_enabled_flag");
}
if (gf_bs_read_int_log(bs, 1, "sps_mts_enabled_flag")) {
gf_bs_read_int_log(bs, 1, "sps_explicit_mts_intra_enabled_flag");
gf_bs_read_int_log(bs, 1, "sps_explicit_mts_inter_enabled_flag");
}
Bool lfnst_enabled_flag = gf_bs_read_int_log(bs, 1, "sps_lfnst_enabled_flag");
sps->joint_cbcr_enabled_flag = 0;
if (sps->chroma_format_idc) {
sps->joint_cbcr_enabled_flag = gf_bs_read_int_log(bs, 1, "sps_joint_cbcr_enabled_flag");
u8 sps_same_qp_table_for_chroma_flag = gf_bs_read_int_log(bs, 1, "sps_same_qp_table_for_chroma_flag");
u32 numQpTables = sps_same_qp_table_for_chroma_flag ? 1 : (sps->joint_cbcr_enabled_flag ? 3 : 2);
for (i=0; i<numQpTables; i++) {
gf_bs_read_se_log_idx(bs, "sps_qp_table_start_minus26", i);
u32 j, sps_num_points_in_qp_table = 1 + gf_bs_read_ue_log_idx(bs, "sps_num_points_in_qp_table_minus1", i);
for (j=0; j<sps_num_points_in_qp_table; j++) {
gf_bs_read_ue_log_idx2(bs, "sps_delta_qp_in_val_minus1", i, j);
gf_bs_read_ue_log_idx2(bs, "sps_delta_qp_diff_val", i, j);
}
}
}
sps->sao_enabled_flag = gf_bs_read_int_log(bs, 1, "sps_sao_enabled_flag");
sps->alf_enabled_flag = gf_bs_read_int_log(bs, 1, "sps_alf_enabled_flag");
if (sps->alf_enabled_flag && sps->chroma_format_idc) {
sps->ccalf_enabled_flag = gf_bs_read_int_log(bs, 1, "sps_ccalf_enabled_flag");
}
sps->lmcs_enabled_flag = gf_bs_read_int_log(bs, 1, "sps_lmcs_enabled_flag");
sps->weighted_pred_flag = gf_bs_read_int_log(bs, 1, "sps_weighted_pred_flag");
sps->weighted_bipred_flag = gf_bs_read_int_log(bs, 1, "sps_weighted_bipred_flag");
sps->long_term_ref_pics_flag = gf_bs_read_int_log(bs, 1, "sps_long_term_ref_pics_flag");
if (sps->vps_id>0)
sps->inter_layer_prediction_enabled_flag = gf_bs_read_int_log(bs, 1, "sps_inter_layer_prediction_enabled_flag");
sps->idr_rpl_present_flag = gf_bs_read_int_log(bs, 1, "sps_idr_rpl_present_flag");
u32 sps_rpl1_same_as_rpl0 = gf_bs_read_int_log(bs, 1, "sps_rpl1_same_as_rpl0_flag") ? 1: 2;
for (i=0; i<sps_rpl1_same_as_rpl0; i++) {
u32 j;
sps->num_ref_pic_lists[i] = gf_bs_read_ue_log_idx(bs, "sps_num_ref_pic_lists", i);
for (j=0; j<sps->num_ref_pic_lists[i]; j++) {
s32 res = vvc_parse_ref_pic_list_struct(bs, sps, i, j, &sps->rps[i][j]);
if (res<0) return res;
}
}
gf_bs_read_int_log(bs, 1, "sps_ref_wraparound_enabled_flag");
sps->temporal_mvp_enabled_flag = gf_bs_read_int_log(bs, 1, "sps_temporal_mvp_enabled_flag");
if (sps->temporal_mvp_enabled_flag) {
gf_bs_read_int_log(bs, 1, "sps_sbtmvp_enabled_flag");
}
Bool amvr_enabled = gf_bs_read_int_log(bs, 1, "sps_amvr_enabled_flag");
sps->bdof_control_present_in_ph_flag = 0;
if (gf_bs_read_int_log(bs, 1, "sps_bdof_enabled_flag")) {
sps->bdof_control_present_in_ph_flag = gf_bs_read_int_log(bs, 1, "sps_bdof_control_present_in_ph_flag");
}
gf_bs_read_int_log(bs, 1, "sps_smvd_enabled_flag");
sps->dmvr_control_present_in_ph_flag = 0;
if (gf_bs_read_int_log(bs, 1, "sps_dmvr_enabled_flag")) {
sps->dmvr_control_present_in_ph_flag = gf_bs_read_int_log(bs, 1, "sps_dmvr_control_present_in_ph_flag");
}
sps->mmvd_fullpel_only_enabled_flag = 0;
if (gf_bs_read_int_log(bs, 1, "sps_mmvd_enabled_flag")) {
sps->mmvd_fullpel_only_enabled_flag = gf_bs_read_int_log(bs, 1, "sps_mmvd_fullpel_only_enabled_flag");
}
u32 MaxNumMergeCand = 6 - gf_bs_read_ue_log(bs, "sps_six_minus_max_num_merge_cand");
sps->prof_control_present_in_ph_flag = 0;
gf_bs_read_int_log(bs, 1, "sps_sbt_enabled_flag");
if (gf_bs_read_int_log(bs, 1, "sps_affine_enabled_flag")) {
gf_bs_read_ue_log(bs, "sps_five_minus_max_num_subblock_merge_cand");
gf_bs_read_int_log(bs, 1, "sps_6param_affine_enabled_flag");
if (amvr_enabled) {
gf_bs_read_int_log(bs, 1, "sps_affine_amvr_enabled_flag");
}
if (gf_bs_read_int_log(bs, 1, "sps_affine_prof_enabled_flag")) {
sps->prof_control_present_in_ph_flag = gf_bs_read_int_log(bs, 1, "sps_prof_control_present_in_ph_flag");
}
}
gf_bs_read_int_log(bs, 1, "sps_bcw_enabled_flag");
gf_bs_read_int_log(bs, 1, "sps_ciip_enabled_flag");
if (MaxNumMergeCand >= 2) {
Bool gpm_enabled_flag = gf_bs_read_int_log(bs, 1, "sps_gpm_enabled_flag");
if (gpm_enabled_flag && (MaxNumMergeCand >= 3)) {
gf_bs_read_ue_log(bs, "sps_max_num_merge_cand_minus_max_num_gpm_cand");
}
}
gf_bs_read_ue_log(bs, "sps_log2_parallel_merge_level_minus2");
gf_bs_read_int_log(bs, 1, "sps_isp_enabled_flag");
gf_bs_read_int_log(bs, 1, "sps_mrl_enabled_flag");
gf_bs_read_int_log(bs, 1, "sps_mip_enabled_flag");
if (sps->chroma_format_idc != 0) {
gf_bs_read_int_log(bs, 1, "sps_cclm_enabled_flag");
}
if (sps->chroma_format_idc == 1) {
gf_bs_read_int_log(bs, 1, "sps_chroma_horizontal_collocated_flag");
gf_bs_read_int_log(bs, 1, "sps_chroma_vertical_collocated_flag");
}
Bool act_enabled_flag = GF_FALSE;
Bool palette_enabled_flag = gf_bs_read_int_log(bs, 1, "sps_palette_enabled_flag");
if ((sps->chroma_format_idc == 3) && !max_luma_transform_size_64_flag) {
act_enabled_flag = gf_bs_read_int_log(bs, 1, "sps_act_enabled_flag");
}
if (sps->transform_skip_enabled_flag || palette_enabled_flag) {
gf_bs_read_ue_log(bs, "sps_min_qp_prime_ts");
}
if (gf_bs_read_int_log(bs, 1, "sps_ibc_enabled_flag")) {
gf_bs_read_ue_log(bs, "sps_six_minus_max_num_ibc_merge_cand");
}
if (gf_bs_read_int_log(bs, 1, "sps_ladf_enabled_flag")) {
u32 num_ladf_intervals_minus2 = gf_bs_read_int_log(bs, 2, "sps_num_ladf_intervals_minus2");
gf_bs_read_se_log(bs, "sps_ladf_lowest_interval_qp_offset");
for (i=0; i<num_ladf_intervals_minus2+1; i++) {
gf_bs_read_se_log_idx(bs, "sps_ladf_qp_offset", i);
gf_bs_read_ue_log_idx(bs, "sps_ladf_delta_threshold_minus1", i);
}
}
sps->explicit_scaling_list_enabled_flag = gf_bs_read_int_log(bs, 1, "sps_explicit_scaling_list_enabled_flag");
if (lfnst_enabled_flag && sps->explicit_scaling_list_enabled_flag) {
gf_bs_read_int_log(bs, 1, "sps_scaling_matrix_for_lfnst_disabled_flag");
}
Bool scaling_matrix_for_alternative_colour_space_disabled_flag = 0;
if (act_enabled_flag && sps->explicit_scaling_list_enabled_flag) {
scaling_matrix_for_alternative_colour_space_disabled_flag = gf_bs_read_int_log(bs, 1, "sps_scaling_matrix_for_alternative_colour_space_disabled_flag");
}
if (scaling_matrix_for_alternative_colour_space_disabled_flag) {
gf_bs_read_int_log(bs, 1, "sps_scaling_matrix_designated_colour_space_flag");
}
sps->dep_quant_enabled_flag = gf_bs_read_int_log(bs, 1, "sps_dep_quant_enabled_flag");
sps->sign_data_hiding_enabled_flag = gf_bs_read_int_log(bs, 1, "sps_sign_data_hiding_enabled_flag");
sps->virtual_boundaries_enabled_flag = gf_bs_read_int_log(bs, 1, "sps_virtual_boundaries_enabled_flag");
if (sps->virtual_boundaries_enabled_flag) {
sps->virtual_boundaries_present_flag = gf_bs_read_int_log(bs, 1, "sps_virtual_boundaries_present_flag");
if (sps->virtual_boundaries_present_flag) {
u32 num_virtual_boundaries = gf_bs_read_ue_log(bs, "sps_num_ver_virtual_boundaries");
for (i=0; i<num_virtual_boundaries; i++) {
gf_bs_read_ue_log_idx(bs, "sps_virtual_boundary_pos_x_minus1", i);
}
num_virtual_boundaries = gf_bs_read_ue_log(bs, "sps_num_hor_virtual_boundaries");
for (i=0; i<num_virtual_boundaries; i++) {
gf_bs_read_ue_log_idx(bs, "sps_virtual_boundary_pos_y_minus1", i);
}
}
}
if (sps_ptl_dpb_hrd_params_present_flag) {
if (gf_bs_read_int_log(bs, 1, "sps_timing_hrd_params_present_flag")) {
Bool general_nal_hrd_params_present_flag, general_vcl_hrd_params_present_flag, general_du_hrd_params_present_flag;
u32 hrd_cpb_cnt_minus1=0;
u32 sublayer_cpb_params_present_flag = 0;
vvc_parse_general_timing_hrd_parameters(bs, sps, NULL, &general_nal_hrd_params_present_flag, &general_vcl_hrd_params_present_flag, &general_du_hrd_params_present_flag, &hrd_cpb_cnt_minus1);
if (sps->max_sublayers > 1) {
sublayer_cpb_params_present_flag = gf_bs_read_int_log(bs, 1, "sps_sublayer_cpb_params_present_flag");
}
u32 firstSubLayer = sublayer_cpb_params_present_flag ? 0 : sps->max_sublayers - 1;
vvc_parse_ols_timing_hrd_parameters(bs, firstSubLayer, sps->max_sublayers-1, general_nal_hrd_params_present_flag, general_vcl_hrd_params_present_flag, general_du_hrd_params_present_flag, hrd_cpb_cnt_minus1);
}
}
gf_bs_read_int_log(bs, 1, "sps_field_seq_flag");
if (vui_flag_pos) {
*vui_flag_pos = (u32)gf_bs_get_bit_offset(bs);
}
//all this to get to VUI !!!
if (gf_bs_read_int_log(bs, 1, "sps_vui_parameters_present_flag")) {
gf_bs_read_ue_log(bs, "sps_vui_payload_size_minus1");
while (!gf_bs_is_align(bs)) {
gf_bs_read_int_log(bs, 1, "sps_vui_alignment_zero_bit");
}
//vui parameters
Bool vui_progressive_source_flag = gf_bs_read_int_log(bs, 1, "vui_progressive_source_flag");
Bool vui_interlaced_source_flag = gf_bs_read_int_log(bs, 1, "vui_interlaced_source_flag");
gf_bs_read_int_log(bs, 1, "vui_non_packed_constraint_flag");
gf_bs_read_int_log(bs, 1, "vui_non_projected_constraint_flag");
sps->aspect_ratio_info_present_flag = gf_bs_read_int_log(bs, 1, "vui_aspect_ratio_info_present_flag");
if (sps->aspect_ratio_info_present_flag) {
gf_bs_read_int_log(bs, 1, "vui_aspect_ratio_constant_flag");
sps->sar_idc = gf_bs_read_int_log(bs, 8, "vui_aspect_ratio_idc");
if (sps->sar_idc== 0xFF) {
sps->sar_width = gf_bs_read_int_log(bs, 16, "vui_sar_width");
sps->sar_height = gf_bs_read_int_log(bs, 16, "vui_sar_height");
}
}
sps->overscan_info_present_flag = gf_bs_read_int_log(bs, 1, "vui_overscan_info_present_flag");
if (sps->overscan_info_present_flag) {
gf_bs_read_int_log(bs, 1, "vui_overscan_appropriate_flag");
}
sps->colour_description_present_flag = gf_bs_read_int_log(bs, 1, "vui_colour_description_present_flag");
if (sps->colour_description_present_flag) {
sps->colour_primaries = gf_bs_read_int_log(bs, 8, "vui_colour_primaries");
sps->transfer_characteristics = gf_bs_read_int_log(bs, 8, "vui_transfer_characteristics");
sps->matrix_coefficients = gf_bs_read_int_log(bs, 8, "vui_matrix_coeffs");
sps->video_full_range_flag = gf_bs_read_int_log(bs, 1, "vui_full_range_flag");
}
if (gf_bs_read_int_log(bs, 1, " vui_chroma_loc_info_present_flag")) {
if (vui_progressive_source_flag && !vui_interlaced_source_flag) {
gf_bs_read_ue_log(bs, "vui_chroma_sample_loc_type_frame");
} else {
gf_bs_read_ue_log(bs, "vui_chroma_sample_loc_type_top_field");
gf_bs_read_ue_log(bs, "vui_chroma_sample_loc_type_bottom_field");
}
}
//WE DON'T PARSE vui_payload_bit_equal_to_one because we dont parse the rest (sps extensions)
//if needed, see rewrite_vui code
}
return sps_id;
}
| 0
|
76,112
|
add_llist_tags(
char_u *tag,
int num_matches,
char_u **matches)
{
list_T *list;
char_u tag_name[128 + 1];
char_u *fname;
char_u *cmd;
int i;
char_u *p;
tagptrs_T tagp;
fname = alloc(MAXPATHL + 1);
cmd = alloc(CMDBUFFSIZE + 1);
list = list_alloc();
if (list == NULL || fname == NULL || cmd == NULL)
{
vim_free(cmd);
vim_free(fname);
if (list != NULL)
list_free(list);
return FAIL;
}
for (i = 0; i < num_matches; ++i)
{
int len, cmd_len;
long lnum;
dict_T *dict;
parse_match(matches[i], &tagp);
// Save the tag name
len = (int)(tagp.tagname_end - tagp.tagname);
if (len > 128)
len = 128;
vim_strncpy(tag_name, tagp.tagname, len);
tag_name[len] = NUL;
// Save the tag file name
p = tag_full_fname(&tagp);
if (p == NULL)
continue;
vim_strncpy(fname, p, MAXPATHL);
vim_free(p);
// Get the line number or the search pattern used to locate
// the tag.
lnum = 0;
if (isdigit(*tagp.command))
// Line number is used to locate the tag
lnum = atol((char *)tagp.command);
else
{
char_u *cmd_start, *cmd_end;
// Search pattern is used to locate the tag
// Locate the end of the command
cmd_start = tagp.command;
cmd_end = tagp.command_end;
if (cmd_end == NULL)
{
for (p = tagp.command;
*p && *p != '\r' && *p != '\n'; ++p)
;
cmd_end = p;
}
// Now, cmd_end points to the character after the
// command. Adjust it to point to the last
// character of the command.
cmd_end--;
// Skip the '/' and '?' characters at the
// beginning and end of the search pattern.
if (*cmd_start == '/' || *cmd_start == '?')
cmd_start++;
if (*cmd_end == '/' || *cmd_end == '?')
cmd_end--;
len = 0;
cmd[0] = NUL;
// If "^" is present in the tag search pattern, then
// copy it first.
if (*cmd_start == '^')
{
STRCPY(cmd, "^");
cmd_start++;
len++;
}
// Precede the tag pattern with \V to make it very
// nomagic.
STRCAT(cmd, "\\V");
len += 2;
cmd_len = (int)(cmd_end - cmd_start + 1);
if (cmd_len > (CMDBUFFSIZE - 5))
cmd_len = CMDBUFFSIZE - 5;
STRNCAT(cmd, cmd_start, cmd_len);
len += cmd_len;
if (cmd[len - 1] == '$')
{
// Replace '$' at the end of the search pattern
// with '\$'
cmd[len - 1] = '\\';
cmd[len] = '$';
len++;
}
cmd[len] = NUL;
}
if ((dict = dict_alloc()) == NULL)
continue;
if (list_append_dict(list, dict) == FAIL)
{
vim_free(dict);
continue;
}
dict_add_string(dict, "text", tag_name);
dict_add_string(dict, "filename", fname);
dict_add_number(dict, "lnum", lnum);
if (lnum == 0)
dict_add_string(dict, "pattern", cmd);
}
vim_snprintf((char *)IObuff, IOSIZE, "ltag %s", tag);
set_errorlist(curwin, list, ' ', IObuff, NULL);
list_free(list);
vim_free(fname);
vim_free(cmd);
return OK;
}
| 0
|
103,478
|
syscall_get_enter_fields(struct ftrace_event_call *call)
{
struct syscall_metadata *entry = call->data;
return &entry->enter_fields;
}
| 0
|
247,175
|
void reference_dct_2d(int16_t input[64], double output[64]) {
for (int i = 0; i < 8; ++i) {
double temp_in[8], temp_out[8];
for (int j = 0; j < 8; ++j)
temp_in[j] = input[j*8 + i];
reference_dct_1d(temp_in, temp_out);
for (int j = 0; j < 8; ++j)
output[j*8 + i] = temp_out[j];
}
for (int i = 0; i < 8; ++i) {
double temp_in[8], temp_out[8];
for (int j = 0; j < 8; ++j)
temp_in[j] = output[j + i*8];
reference_dct_1d(temp_in, temp_out);
for (int j = 0; j < 8; ++j)
output[j + i*8] = temp_out[j];
}
for (int i = 0; i < 64; ++i)
output[i] *= 2;
}
| 0
|
11,297
|
static void ssl_check_for_safari(SSL *s, const unsigned char *data,
const unsigned char *limit)
{
unsigned short type, size;
static const unsigned char kSafariExtensionsBlock[] = {
0x00, 0x0a, /* elliptic_curves extension */
0x00, 0x08, /* 8 bytes */
0x00, 0x06, /* 6 bytes of curve ids */
0x00, 0x17, /* P-256 */
0x00, 0x18, /* P-384 */
0x00, 0x19, /* P-521 */
0x00, 0x0b, /* ec_point_formats */
0x00, 0x02, /* 2 bytes */
0x01, /* 1 point format */
0x00, /* uncompressed */
};
/* The following is only present in TLS 1.2 */
static const unsigned char kSafariTLS12ExtensionsBlock[] = {
0x00, 0x0d, /* signature_algorithms */
0x00, 0x0c, /* 12 bytes */
0x00, 0x0a, /* 10 bytes */
0x05, 0x01, /* SHA-384/RSA */
0x04, 0x01, /* SHA-256/RSA */
0x02, 0x01, /* SHA-1/RSA */
0x04, 0x03, /* SHA-256/ECDSA */
0x02, 0x03, /* SHA-1/ECDSA */
};
if (data >= (limit - 2))
return;
data += 2;
if (data > (limit - 4))
return;
n2s(data, type);
n2s(data, size);
if (type != TLSEXT_TYPE_server_name)
return;
if (data + size > limit)
return;
data += size;
if (TLS1_get_client_version(s) >= TLS1_2_VERSION) {
const size_t len1 = sizeof(kSafariExtensionsBlock);
const size_t len2 = sizeof(kSafariTLS12ExtensionsBlock);
if (data + len1 + len2 != limit)
return;
if (memcmp(data, kSafariExtensionsBlock, len1) != 0)
return;
if (memcmp(data + len1, kSafariTLS12ExtensionsBlock, len2) != 0)
return;
} else {
const size_t len = sizeof(kSafariExtensionsBlock);
if (data + len != limit)
return;
if (memcmp(data, kSafariExtensionsBlock, len) != 0)
return;
}
s->s3->is_probably_safari = 1;
}
| 1
|
461,883
|
}
static int
iscsi_multicast_skb(struct sk_buff *skb, uint32_t group, gfp_t gfp)
| 0
|
510,978
|
static void patch_fn(struct cgit_context *ctx)
{
cgit_print_patch(ctx->qry.sha1);
}
| 0
|
163,305
|
static void stroke_add_conn(private_stroke_socket_t *this, stroke_msg_t *msg)
{
pop_string(msg, &msg->add_conn.name);
DBG1(DBG_CFG, "received stroke: add connection '%s'", msg->add_conn.name);
DBG2(DBG_CFG, "conn %s", msg->add_conn.name);
pop_end(msg, "left", &msg->add_conn.me);
pop_end(msg, "right", &msg->add_conn.other);
pop_string(msg, &msg->add_conn.eap_identity);
pop_string(msg, &msg->add_conn.aaa_identity);
pop_string(msg, &msg->add_conn.xauth_identity);
pop_string(msg, &msg->add_conn.algorithms.ike);
pop_string(msg, &msg->add_conn.algorithms.esp);
pop_string(msg, &msg->add_conn.algorithms.ah);
pop_string(msg, &msg->add_conn.ikeme.mediated_by);
pop_string(msg, &msg->add_conn.ikeme.peerid);
DBG_OPT(" eap_identity=%s", msg->add_conn.eap_identity);
DBG_OPT(" aaa_identity=%s", msg->add_conn.aaa_identity);
DBG_OPT(" xauth_identity=%s", msg->add_conn.xauth_identity);
DBG_OPT(" ike=%s", msg->add_conn.algorithms.ike);
DBG_OPT(" esp=%s", msg->add_conn.algorithms.esp);
DBG_OPT(" ah=%s", msg->add_conn.algorithms.ah);
DBG_OPT(" dpddelay=%d", msg->add_conn.dpd.delay);
DBG_OPT(" dpdtimeout=%d", msg->add_conn.dpd.timeout);
DBG_OPT(" dpdaction=%d", msg->add_conn.dpd.action);
DBG_OPT(" closeaction=%d", msg->add_conn.close_action);
DBG_OPT(" sha256_96=%s", msg->add_conn.sha256_96 ? "yes" : "no");
DBG_OPT(" mediation=%s", msg->add_conn.ikeme.mediation ? "yes" : "no");
DBG_OPT(" mediated_by=%s", msg->add_conn.ikeme.mediated_by);
DBG_OPT(" me_peerid=%s", msg->add_conn.ikeme.peerid);
DBG_OPT(" keyexchange=ikev%u", msg->add_conn.version);
this->config->add(this->config, msg);
this->attribute->add_dns(this->attribute, msg);
this->handler->add_attributes(this->handler, msg);
}
| 0
|
348,057
|
static void prepare_cmd(struct argv_array *out, const struct child_process *cmd)
{
if (!cmd->argv[0])
die("BUG: command is empty");
/*
* Add SHELL_PATH so in the event exec fails with ENOEXEC we can
* attempt to interpret the command with 'sh'.
*/
argv_array_push(out, SHELL_PATH);
if (cmd->git_cmd) {
argv_array_push(out, "git");
argv_array_pushv(out, cmd->argv);
} else if (cmd->use_shell) {
prepare_shell_cmd(out, cmd->argv);
} else {
argv_array_pushv(out, cmd->argv);
}
/*
* If there are no '/' characters in the command then perform a path
* lookup and use the resolved path as the command to exec. If there
* are no '/' characters or if the command wasn't found in the path,
* have exec attempt to invoke the command directly.
*/
if (!strchr(out->argv[1], '/')) {
char *program = locate_in_PATH(out->argv[1]);
if (program) {
free((char *)out->argv[1]);
out->argv[1] = program;
}
}
}
| 1
|
346,682
|
extract_group_icon_cursor_resource(WinLibrary *fi, WinResource *wr, char *lang,
int *ressize, bool is_icon)
{
Win32CursorIconDir *icondir;
Win32CursorIconFileDir *fileicondir;
char *memory;
int c, size, offset, skipped;
/* get resource data and size */
icondir = (Win32CursorIconDir *) get_resource_entry(fi, wr, &size);
if (icondir == NULL) {
/* get_resource_entry will print error */
return NULL;
}
/* calculate total size of output file */
RETURN_IF_BAD_POINTER(NULL, icondir->count);
skipped = 0;
for (c = 0 ; c < icondir->count ; c++) {
int level;
int iconsize;
char name[14];
WinResource *fwr;
RETURN_IF_BAD_POINTER(NULL, icondir->entries[c]);
/*printf("%d. bytes_in_res=%d width=%d height=%d planes=%d bit_count=%d\n", c,
icondir->entries[c].bytes_in_res,
(is_icon ? icondir->entries[c].res_info.icon.width : icondir->entries[c].res_info.cursor.width),
(is_icon ? icondir->entries[c].res_info.icon.height : icondir->entries[c].res_info.cursor.height),
icondir->entries[c].plane_count,
icondir->entries[c].bit_count);*/
/* find the corresponding icon resource */
snprintf(name, sizeof(name)/sizeof(char), "-%d", icondir->entries[c].res_id);
fwr = find_resource(fi, (is_icon ? "-3" : "-1"), name, lang, &level);
if (fwr == NULL) {
warn(_("%s: could not find `%s' in `%s' resource."),
fi->name, &name[1], (is_icon ? "group_icon" : "group_cursor"));
return NULL;
}
if (get_resource_entry(fi, fwr, &iconsize) != NULL) {
if (iconsize == 0) {
warn(_("%s: icon resource `%s' is empty, skipping"), fi->name, name);
skipped++;
continue;
}
if (iconsize != icondir->entries[c].bytes_in_res) {
warn(_("%s: mismatch of size in icon resource `%s' and group (%d vs %d)"), fi->name, name, iconsize, icondir->entries[c].bytes_in_res);
}
size += iconsize < icondir->entries[c].bytes_in_res ? icondir->entries[c].bytes_in_res : iconsize;
/* cursor resources have two additional WORDs that contain
* hotspot info */
if (!is_icon)
size -= sizeof(uint16_t)*2;
}
}
offset = sizeof(Win32CursorIconFileDir) + (icondir->count-skipped) * sizeof(Win32CursorIconFileDirEntry);
size += offset;
*ressize = size;
/* allocate that much memory */
memory = xmalloc(size);
fileicondir = (Win32CursorIconFileDir *) memory;
/* transfer Win32CursorIconDir structure members */
fileicondir->reserved = icondir->reserved;
fileicondir->type = icondir->type;
fileicondir->count = icondir->count - skipped;
/* transfer each cursor/icon: Win32CursorIconDirEntry and data */
skipped = 0;
for (c = 0 ; c < icondir->count ; c++) {
int level;
char name[14];
WinResource *fwr;
char *data;
/* find the corresponding icon resource */
snprintf(name, sizeof(name)/sizeof(char), "-%d", icondir->entries[c].res_id);
fwr = find_resource(fi, (is_icon ? "-3" : "-1"), name, lang, &level);
if (fwr == NULL) {
warn(_("%s: could not find `%s' in `%s' resource."),
fi->name, &name[1], (is_icon ? "group_icon" : "group_cursor"));
return NULL;
}
/* get data and size of that resource */
data = get_resource_entry(fi, fwr, &size);
if (data == NULL) {
/* get_resource_entry has printed error */
return NULL;
}
if (size == 0) {
skipped++;
continue;
}
/* copy ICONDIRENTRY (not including last dwImageOffset) */
memcpy(&fileicondir->entries[c-skipped], &icondir->entries[c],
sizeof(Win32CursorIconFileDirEntry)-sizeof(uint32_t));
/* special treatment for cursors */
if (!is_icon) {
fileicondir->entries[c-skipped].width = icondir->entries[c].res_info.cursor.width;
fileicondir->entries[c-skipped].height = icondir->entries[c].res_info.cursor.height / 2;
fileicondir->entries[c-skipped].color_count = 0;
fileicondir->entries[c-skipped].reserved = 0;
}
/* set image offset and increase it */
fileicondir->entries[c-skipped].dib_offset = offset;
/* transfer resource into file memory */
if (size > icondir->entries[c].bytes_in_res)
size = icondir->entries[c].bytes_in_res;
if (is_icon) {
memcpy(&memory[offset], data, size);
} else {
fileicondir->entries[c-skipped].hotspot_x = ((uint16_t *) data)[0];
fileicondir->entries[c-skipped].hotspot_y = ((uint16_t *) data)[1];
memcpy(&memory[offset], data+sizeof(uint16_t)*2,
size-sizeof(uint16_t)*2);
offset -= sizeof(uint16_t)*2;
}
/* increase the offset pointer */
offset += icondir->entries[c].bytes_in_res;
}
return (void *) memory;
}
| 1
|
243,602
|
SPL_METHOD(Array, key)
{
if (zend_parse_parameters_none() == FAILURE) {
return;
}
spl_array_iterator_key(getThis(), return_value TSRMLS_CC);
} /* }}} */
void spl_array_iterator_key(zval *object, zval *return_value TSRMLS_DC) /* {{{ */
| 0
|
310,205
|
pdf_show_shade(fz_context *ctx, pdf_run_processor *pr, fz_shade *shd)
{
pdf_gstate *gstate = pr->gstate + pr->gtop;
fz_rect bbox;
softmask_save softmask = { NULL };
if (pr->super.hidden)
return;
fz_bound_shade(ctx, shd, &gstate->ctm, &bbox);
gstate = pdf_begin_group(ctx, pr, &bbox, &softmask);
/* FIXME: The gstate->ctm in the next line may be wrong; maybe
* it should be the parent gstates ctm? */
fz_fill_shade(ctx, pr->dev, shd, &gstate->ctm, gstate->fill.alpha, &gstate->fill.color_params);
pdf_end_group(ctx, pr, &softmask);
}
| 0
|
280,973
|
void ContainerNode::insertBeforeCommon(Node* nextChild, Node* newChild)
{
NoEventDispatchAssertion assertNoEventDispatch;
ASSERT(newChild);
ASSERT(!newChild->parentNode()); // Use insertBefore if you need to handle reparenting (and want DOM mutation events).
ASSERT(!newChild->nextSibling());
ASSERT(!newChild->previousSibling());
ASSERT(!newChild->isShadowRoot());
Node* prev = nextChild->previousSibling();
ASSERT(m_lastChild != prev);
nextChild->setPreviousSibling(newChild);
if (prev) {
ASSERT(m_firstChild != nextChild);
ASSERT(prev->nextSibling() == nextChild);
prev->setNextSibling(newChild);
} else {
ASSERT(m_firstChild == nextChild);
m_firstChild = newChild;
}
newChild->setParentOrShadowHostNode(this);
newChild->setPreviousSibling(prev);
newChild->setNextSibling(nextChild);
}
| 0
|
384,447
|
static int evdev_flush(struct file *file, fl_owner_t id)
{
struct evdev_client *client = file->private_data;
struct evdev *evdev = client->evdev;
int retval;
retval = mutex_lock_interruptible(&evdev->mutex);
if (retval)
return retval;
if (!evdev->exist || client->revoked)
retval = -ENODEV;
else
retval = input_flush_device(&evdev->handle, file);
mutex_unlock(&evdev->mutex);
return retval;
}
| 0
|
32,845
|
static inline void activate_mm(struct mm_struct *prev,
struct mm_struct *next)
{
switch_mm(prev, next, current);
cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
set_user_asce(next);
}
| 0
|
341,550
|
void vnc_display_init(DisplayState *ds)
{
VncDisplay *vs = g_malloc0(sizeof(*vs));
dcl = g_malloc0(sizeof(DisplayChangeListener));
ds->opaque = vs;
dcl->idle = 1;
vnc_display = vs;
vs->lsock = -1;
#ifdef CONFIG_VNC_WS
vs->lwebsock = -1;
#endif
vs->ds = ds;
QTAILQ_INIT(&vs->clients);
vs->expires = TIME_MAX;
if (keyboard_layout)
vs->kbd_layout = init_keyboard_layout(name2keysym, keyboard_layout);
else
vs->kbd_layout = init_keyboard_layout(name2keysym, "en-us");
if (!vs->kbd_layout)
exit(1);
qemu_mutex_init(&vs->mutex);
vnc_start_worker_thread();
dcl->ops = &dcl_ops;
register_displaychangelistener(ds, dcl);
}
| 1
|
520,942
|
bool Item_func_like::find_selective_predicates_list_processor(void *arg)
{
find_selective_predicates_list_processor_data *data=
(find_selective_predicates_list_processor_data *) arg;
if (use_sampling && used_tables() == data->table->map)
{
THD *thd= data->table->in_use;
COND_STATISTIC *stat;
Item *arg0;
if (!(stat= (COND_STATISTIC *) thd->alloc(sizeof(COND_STATISTIC))))
return TRUE;
stat->cond= this;
arg0= args[0]->real_item();
if (args[1]->const_item() && arg0->type() == FIELD_ITEM)
stat->field_arg= ((Item_field *)arg0)->field;
else
stat->field_arg= NULL;
data->list.push_back(stat, thd->mem_root);
}
return FALSE;
}
| 0
|
110,201
|
static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
int rotate, unsigned int num)
{
unsigned int x;
if (rotate == FB_ROTATE_UR) {
for (x = 0;
x < num && image->dx + image->width <= info->var.xres;
x++) {
info->fbops->fb_imageblit(info, image);
image->dx += image->width + 8;
}
} else if (rotate == FB_ROTATE_UD) {
for (x = 0; x < num && image->dx >= 0; x++) {
info->fbops->fb_imageblit(info, image);
image->dx -= image->width + 8;
}
} else if (rotate == FB_ROTATE_CW) {
for (x = 0;
x < num && image->dy + image->height <= info->var.yres;
x++) {
info->fbops->fb_imageblit(info, image);
image->dy += image->height + 8;
}
} else if (rotate == FB_ROTATE_CCW) {
for (x = 0; x < num && image->dy >= 0; x++) {
info->fbops->fb_imageblit(info, image);
image->dy -= image->height + 8;
}
}
}
| 0
|
343,271
|
static inline void yuv2yuvXinC(int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
uint8_t *dest, uint8_t *uDest, uint8_t *vDest, int dstW, int chrDstW)
{
//FIXME Optimize (just quickly writen not opti..)
int i;
for (i=0; i<dstW; i++)
{
int val=1<<18;
int j;
for (j=0; j<lumFilterSize; j++)
val += lumSrc[j][i] * lumFilter[j];
dest[i]= av_clip_uint8(val>>19);
}
if (uDest)
for (i=0; i<chrDstW; i++)
{
int u=1<<18;
int v=1<<18;
int j;
for (j=0; j<chrFilterSize; j++)
{
u += chrSrc[j][i] * chrFilter[j];
v += chrSrc[j][i + 2048] * chrFilter[j];
}
uDest[i]= av_clip_uint8(u>>19);
vDest[i]= av_clip_uint8(v>>19);
}
}
| 1
|
256,591
|
static void http_chunked_request_done ( struct evhttp_request * req , void * arg ) {
if ( req -> response_code != HTTP_OK ) {
fprintf ( stderr , "FAILED\n" ) ;
exit ( 1 ) ;
}
if ( evhttp_find_header ( req -> input_headers , "Transfer-Encoding" ) == NULL ) {
fprintf ( stderr , "FAILED\n" ) ;
exit ( 1 ) ;
}
if ( EVBUFFER_LENGTH ( req -> input_buffer ) != 13 + 18 + 8 ) {
fprintf ( stderr , "FAILED\n" ) ;
exit ( 1 ) ;
}
if ( strncmp ( ( char * ) EVBUFFER_DATA ( req -> input_buffer ) , "This is funnybut not hilarious.bwv 1052" , + 18 + 8 ) ) {
fprintf ( stderr , "FAILED\n" ) ;
exit ( 1 ) ;
}
test_ok = 1 ;
event_loopexit ( NULL ) ;
}
| 0
|
500,486
|
QPDFWriter::writeHeader()
{
writeString("%PDF-");
writeString(this->m->final_pdf_version);
if (this->m->pclm)
{
// PCLm version
writeString("\n%PCLm 1.0\n");
}
else
{
// This string of binary characters would not be valid UTF-8, so
// it really should be treated as binary.
writeString("\n%\xbf\xf7\xa2\xfe\n");
}
writeStringQDF("%QDF-1.0\n\n");
// Note: do not write extra header text here. Linearized PDFs
// must include the entire linearization parameter dictionary
// within the first 1024 characters of the PDF file, so for
// linearized files, we have to write extra header text after the
// linearization parameter dictionary.
}
| 0
|
73,980
|
tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
u32 prior_snd_una, struct tcp_sacktag_state *state)
{
struct tcp_sock *tp = tcp_sk(sk);
const unsigned char *ptr = (skb_transport_header(ack_skb) +
TCP_SKB_CB(ack_skb)->sacked);
struct tcp_sack_block_wire *sp_wire = (struct tcp_sack_block_wire *)(ptr+2);
struct tcp_sack_block sp[TCP_NUM_SACKS];
struct tcp_sack_block *cache;
struct sk_buff *skb;
int num_sacks = min(TCP_NUM_SACKS, (ptr[1] - TCPOLEN_SACK_BASE) >> 3);
int used_sacks;
bool found_dup_sack = false;
int i, j;
int first_sack_index;
state->flag = 0;
state->reord = tp->packets_out;
if (!tp->sacked_out) {
if (WARN_ON(tp->fackets_out))
tp->fackets_out = 0;
tcp_highest_sack_reset(sk);
}
found_dup_sack = tcp_check_dsack(sk, ack_skb, sp_wire,
num_sacks, prior_snd_una);
if (found_dup_sack)
state->flag |= FLAG_DSACKING_ACK;
/* Eliminate too old ACKs, but take into
* account more or less fresh ones, they can
* contain valid SACK info.
*/
if (before(TCP_SKB_CB(ack_skb)->ack_seq, prior_snd_una - tp->max_window))
return 0;
if (!tp->packets_out)
goto out;
used_sacks = 0;
first_sack_index = 0;
for (i = 0; i < num_sacks; i++) {
bool dup_sack = !i && found_dup_sack;
sp[used_sacks].start_seq = get_unaligned_be32(&sp_wire[i].start_seq);
sp[used_sacks].end_seq = get_unaligned_be32(&sp_wire[i].end_seq);
if (!tcp_is_sackblock_valid(tp, dup_sack,
sp[used_sacks].start_seq,
sp[used_sacks].end_seq)) {
int mib_idx;
if (dup_sack) {
if (!tp->undo_marker)
mib_idx = LINUX_MIB_TCPDSACKIGNOREDNOUNDO;
else
mib_idx = LINUX_MIB_TCPDSACKIGNOREDOLD;
} else {
/* Don't count olds caused by ACK reordering */
if ((TCP_SKB_CB(ack_skb)->ack_seq != tp->snd_una) &&
!after(sp[used_sacks].end_seq, tp->snd_una))
continue;
mib_idx = LINUX_MIB_TCPSACKDISCARD;
}
NET_INC_STATS(sock_net(sk), mib_idx);
if (i == 0)
first_sack_index = -1;
continue;
}
/* Ignore very old stuff early */
if (!after(sp[used_sacks].end_seq, prior_snd_una))
continue;
used_sacks++;
}
/* order SACK blocks to allow in order walk of the retrans queue */
for (i = used_sacks - 1; i > 0; i--) {
for (j = 0; j < i; j++) {
if (after(sp[j].start_seq, sp[j + 1].start_seq)) {
swap(sp[j], sp[j + 1]);
/* Track where the first SACK block goes to */
if (j == first_sack_index)
first_sack_index = j + 1;
}
}
}
skb = tcp_write_queue_head(sk);
state->fack_count = 0;
i = 0;
if (!tp->sacked_out) {
/* It's already past, so skip checking against it */
cache = tp->recv_sack_cache + ARRAY_SIZE(tp->recv_sack_cache);
} else {
cache = tp->recv_sack_cache;
/* Skip empty blocks in at head of the cache */
while (tcp_sack_cache_ok(tp, cache) && !cache->start_seq &&
!cache->end_seq)
cache++;
}
while (i < used_sacks) {
u32 start_seq = sp[i].start_seq;
u32 end_seq = sp[i].end_seq;
bool dup_sack = (found_dup_sack && (i == first_sack_index));
struct tcp_sack_block *next_dup = NULL;
if (found_dup_sack && ((i + 1) == first_sack_index))
next_dup = &sp[i + 1];
/* Skip too early cached blocks */
while (tcp_sack_cache_ok(tp, cache) &&
!before(start_seq, cache->end_seq))
cache++;
/* Can skip some work by looking recv_sack_cache? */
if (tcp_sack_cache_ok(tp, cache) && !dup_sack &&
after(end_seq, cache->start_seq)) {
/* Head todo? */
if (before(start_seq, cache->start_seq)) {
skb = tcp_sacktag_skip(skb, sk, state,
start_seq);
skb = tcp_sacktag_walk(skb, sk, next_dup,
state,
start_seq,
cache->start_seq,
dup_sack);
}
/* Rest of the block already fully processed? */
if (!after(end_seq, cache->end_seq))
goto advance_sp;
skb = tcp_maybe_skipping_dsack(skb, sk, next_dup,
state,
cache->end_seq);
/* ...tail remains todo... */
if (tcp_highest_sack_seq(tp) == cache->end_seq) {
/* ...but better entrypoint exists! */
skb = tcp_highest_sack(sk);
if (!skb)
break;
state->fack_count = tp->fackets_out;
cache++;
goto walk;
}
skb = tcp_sacktag_skip(skb, sk, state, cache->end_seq);
/* Check overlap against next cached too (past this one already) */
cache++;
continue;
}
if (!before(start_seq, tcp_highest_sack_seq(tp))) {
skb = tcp_highest_sack(sk);
if (!skb)
break;
state->fack_count = tp->fackets_out;
}
skb = tcp_sacktag_skip(skb, sk, state, start_seq);
walk:
skb = tcp_sacktag_walk(skb, sk, next_dup, state,
start_seq, end_seq, dup_sack);
advance_sp:
i++;
}
/* Clear the head of the cache sack blocks so we can skip it next time */
for (i = 0; i < ARRAY_SIZE(tp->recv_sack_cache) - used_sacks; i++) {
tp->recv_sack_cache[i].start_seq = 0;
tp->recv_sack_cache[i].end_seq = 0;
}
for (j = 0; j < used_sacks; j++)
tp->recv_sack_cache[i++] = sp[j];
if ((state->reord < tp->fackets_out) &&
((inet_csk(sk)->icsk_ca_state != TCP_CA_Loss) || tp->undo_marker))
tcp_update_reordering(sk, tp->fackets_out - state->reord, 0);
tcp_verify_left_out(tp);
out:
#if FASTRETRANS_DEBUG > 0
WARN_ON((int)tp->sacked_out < 0);
WARN_ON((int)tp->lost_out < 0);
WARN_ON((int)tp->retrans_out < 0);
WARN_ON((int)tcp_packets_in_flight(tp) < 0);
#endif
return state->flag;
}
| 0
|
354,093
|
set_attribute_9(TERMTYPE2 *tp, int flag)
{
const char *value;
char *result;
value = tparm(set_attributes, 0, 0, 0, 0, 0, 0, 0, 0, flag);
if (PRESENT(value))
result = strdup(value);
else
result = 0;
return result;
}
| 1
|
403,357
|
PHP_FUNCTION(openssl_get_cert_locations)
{
array_init(return_value);
add_assoc_string(return_value, "default_cert_file", (char *) X509_get_default_cert_file(), 1);
add_assoc_string(return_value, "default_cert_file_env", (char *) X509_get_default_cert_file_env(), 1);
add_assoc_string(return_value, "default_cert_dir", (char *) X509_get_default_cert_dir(), 1);
add_assoc_string(return_value, "default_cert_dir_env", (char *) X509_get_default_cert_dir_env(), 1);
add_assoc_string(return_value, "default_private_dir", (char *) X509_get_default_private_dir(), 1);
add_assoc_string(return_value, "default_default_cert_area", (char *) X509_get_default_cert_area(), 1);
add_assoc_string(return_value, "ini_cafile",
zend_ini_string("openssl.cafile", sizeof("openssl.cafile"), 0), 1);
add_assoc_string(return_value, "ini_capath",
zend_ini_string("openssl.capath", sizeof("openssl.capath"), 0), 1);
}
| 0
|
405,247
|
ossl_asn1_tag_class(VALUE obj)
{
VALUE s;
s = ossl_asn1_get_tag_class(obj);
if (NIL_P(s) || s == sym_UNIVERSAL)
return V_ASN1_UNIVERSAL;
else if (s == sym_APPLICATION)
return V_ASN1_APPLICATION;
else if (s == sym_CONTEXT_SPECIFIC)
return V_ASN1_CONTEXT_SPECIFIC;
else if (s == sym_PRIVATE)
return V_ASN1_PRIVATE;
else
ossl_raise(eASN1Error, "invalid tag class");
}
| 0
|
429,573
|
std::string XPathIo::writeDataToFile(const std::string& orgPath) {
Protocol prot = fileProtocol(orgPath);
// generating the name for temp file.
std::time_t timestamp = std::time(NULL);
std::stringstream ss;
ss << timestamp << XPathIo::TEMP_FILE_EXT;
std::string path = ss.str();
if (prot == pStdin) {
if (isatty(fileno(stdin)))
throw Error(kerInputDataReadFailed);
#if defined(_MSC_VER) || defined(__MINGW__)
// convert stdin to binary
if (_setmode(_fileno(stdin), _O_BINARY) == -1)
throw Error(kerInputDataReadFailed);
#endif
std::ofstream fs(path.c_str(), std::ios::out | std::ios::binary | std::ios::trunc);
// read stdin and write to the temp file.
char readBuf[100*1024];
std::streamsize readBufSize = 0;
do {
std::cin.read(readBuf, sizeof(readBuf));
readBufSize = std::cin.gcount();
if (readBufSize > 0) {
fs.write (readBuf, readBufSize);
}
} while(readBufSize);
fs.close();
} else if (prot == pDataUri) {
std::ofstream fs(path.c_str(), std::ios::out | std::ios::binary | std::ios::trunc);
// read data uri and write to the temp file.
size_t base64Pos = orgPath.find("base64,");
if (base64Pos == std::string::npos) {
fs.close();
throw Error(kerErrorMessage, "No base64 data");
}
std::string data = orgPath.substr(base64Pos+7);
char* decodeData = new char[data.length()];
long size = base64decode(data.c_str(), decodeData, data.length());
if (size > 0) {
fs.write(decodeData, size);
fs.close();
} else {
fs.close();
throw Error(kerErrorMessage, "Unable to decode base 64.");
}
delete[] decodeData;
}
return path;
}
| 0
|
381,662
|
static __init int vdso_setup(char *s)
{
vdso64_enabled = simple_strtoul(s, NULL, 0);
return 0;
}
| 0
|
168,029
|
xmlXPathSubstringAfterFunction(xmlXPathParserContextPtr ctxt, int nargs) {
xmlXPathObjectPtr str;
xmlXPathObjectPtr find;
xmlBufferPtr target;
const xmlChar *point;
int offset;
CHECK_ARITY(2);
CAST_TO_STRING;
find = valuePop(ctxt);
CAST_TO_STRING;
str = valuePop(ctxt);
target = xmlBufferCreate();
if (target) {
point = xmlStrstr(str->stringval, find->stringval);
if (point) {
offset = (int)(point - str->stringval) + xmlStrlen(find->stringval);
xmlBufferAdd(target, &str->stringval[offset],
xmlStrlen(str->stringval) - offset);
}
valuePush(ctxt, xmlXPathCacheNewString(ctxt->context,
xmlBufferContent(target)));
xmlBufferFree(target);
}
xmlXPathReleaseObject(ctxt->context, str);
xmlXPathReleaseObject(ctxt->context, find);
}
| 0
|
392,576
|
int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
void *base_addr)
{
static int smap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata;
static int dmap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata;
size_t dyn_size = ai->dyn_size;
size_t size_sum = ai->static_size + ai->reserved_size + dyn_size;
struct pcpu_chunk *schunk, *dchunk = NULL;
unsigned long *group_offsets;
size_t *group_sizes;
unsigned long *unit_off;
unsigned int cpu;
int *unit_map;
int group, unit, i;
#define PCPU_SETUP_BUG_ON(cond) do { \
if (unlikely(cond)) { \
pr_emerg("failed to initialize, %s\n", #cond); \
pr_emerg("cpu_possible_mask=%*pb\n", \
cpumask_pr_args(cpu_possible_mask)); \
pcpu_dump_alloc_info(KERN_EMERG, ai); \
BUG(); \
} \
} while (0)
/* sanity checks */
PCPU_SETUP_BUG_ON(ai->nr_groups <= 0);
#ifdef CONFIG_SMP
PCPU_SETUP_BUG_ON(!ai->static_size);
PCPU_SETUP_BUG_ON(offset_in_page(__per_cpu_start));
#endif
PCPU_SETUP_BUG_ON(!base_addr);
PCPU_SETUP_BUG_ON(offset_in_page(base_addr));
PCPU_SETUP_BUG_ON(ai->unit_size < size_sum);
PCPU_SETUP_BUG_ON(offset_in_page(ai->unit_size));
PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE);
PCPU_SETUP_BUG_ON(ai->dyn_size < PERCPU_DYNAMIC_EARLY_SIZE);
PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0);
/* process group information and build config tables accordingly */
group_offsets = memblock_virt_alloc(ai->nr_groups *
sizeof(group_offsets[0]), 0);
group_sizes = memblock_virt_alloc(ai->nr_groups *
sizeof(group_sizes[0]), 0);
unit_map = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_map[0]), 0);
unit_off = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_off[0]), 0);
for (cpu = 0; cpu < nr_cpu_ids; cpu++)
unit_map[cpu] = UINT_MAX;
pcpu_low_unit_cpu = NR_CPUS;
pcpu_high_unit_cpu = NR_CPUS;
for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
const struct pcpu_group_info *gi = &ai->groups[group];
group_offsets[group] = gi->base_offset;
group_sizes[group] = gi->nr_units * ai->unit_size;
for (i = 0; i < gi->nr_units; i++) {
cpu = gi->cpu_map[i];
if (cpu == NR_CPUS)
continue;
PCPU_SETUP_BUG_ON(cpu >= nr_cpu_ids);
PCPU_SETUP_BUG_ON(!cpu_possible(cpu));
PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX);
unit_map[cpu] = unit + i;
unit_off[cpu] = gi->base_offset + i * ai->unit_size;
/* determine low/high unit_cpu */
if (pcpu_low_unit_cpu == NR_CPUS ||
unit_off[cpu] < unit_off[pcpu_low_unit_cpu])
pcpu_low_unit_cpu = cpu;
if (pcpu_high_unit_cpu == NR_CPUS ||
unit_off[cpu] > unit_off[pcpu_high_unit_cpu])
pcpu_high_unit_cpu = cpu;
}
}
pcpu_nr_units = unit;
for_each_possible_cpu(cpu)
PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX);
/* we're done parsing the input, undefine BUG macro and dump config */
#undef PCPU_SETUP_BUG_ON
pcpu_dump_alloc_info(KERN_DEBUG, ai);
pcpu_nr_groups = ai->nr_groups;
pcpu_group_offsets = group_offsets;
pcpu_group_sizes = group_sizes;
pcpu_unit_map = unit_map;
pcpu_unit_offsets = unit_off;
/* determine basic parameters */
pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT;
pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
pcpu_atom_size = ai->atom_size;
pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) +
BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long);
/*
* Allocate chunk slots. The additional last slot is for
* empty chunks.
*/
pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2;
pcpu_slot = memblock_virt_alloc(
pcpu_nr_slots * sizeof(pcpu_slot[0]), 0);
for (i = 0; i < pcpu_nr_slots; i++)
INIT_LIST_HEAD(&pcpu_slot[i]);
/*
* Initialize static chunk. If reserved_size is zero, the
* static chunk covers static area + dynamic allocation area
* in the first chunk. If reserved_size is not zero, it
* covers static area + reserved area (mostly used for module
* static percpu allocation).
*/
schunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
INIT_LIST_HEAD(&schunk->list);
INIT_LIST_HEAD(&schunk->map_extend_list);
schunk->base_addr = base_addr;
schunk->map = smap;
schunk->map_alloc = ARRAY_SIZE(smap);
schunk->immutable = true;
bitmap_fill(schunk->populated, pcpu_unit_pages);
schunk->nr_populated = pcpu_unit_pages;
if (ai->reserved_size) {
schunk->free_size = ai->reserved_size;
pcpu_reserved_chunk = schunk;
pcpu_reserved_chunk_limit = ai->static_size + ai->reserved_size;
} else {
schunk->free_size = dyn_size;
dyn_size = 0; /* dynamic area covered */
}
schunk->contig_hint = schunk->free_size;
schunk->map[0] = 1;
schunk->map[1] = ai->static_size;
schunk->map_used = 1;
if (schunk->free_size)
schunk->map[++schunk->map_used] = ai->static_size + schunk->free_size;
schunk->map[schunk->map_used] |= 1;
/* init dynamic chunk if necessary */
if (dyn_size) {
dchunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
INIT_LIST_HEAD(&dchunk->list);
INIT_LIST_HEAD(&dchunk->map_extend_list);
dchunk->base_addr = base_addr;
dchunk->map = dmap;
dchunk->map_alloc = ARRAY_SIZE(dmap);
dchunk->immutable = true;
bitmap_fill(dchunk->populated, pcpu_unit_pages);
dchunk->nr_populated = pcpu_unit_pages;
dchunk->contig_hint = dchunk->free_size = dyn_size;
dchunk->map[0] = 1;
dchunk->map[1] = pcpu_reserved_chunk_limit;
dchunk->map[2] = (pcpu_reserved_chunk_limit + dchunk->free_size) | 1;
dchunk->map_used = 2;
}
/* link the first chunk in */
pcpu_first_chunk = dchunk ?: schunk;
pcpu_nr_empty_pop_pages +=
pcpu_count_occupied_pages(pcpu_first_chunk, 1);
pcpu_chunk_relocate(pcpu_first_chunk, -1);
/* we're done */
pcpu_base_addr = base_addr;
return 0;
}
| 0
|
5,854
|
R_API RBinJavaAttrInfo *r_bin_java_local_variable_type_table_attr_new(ut8 *buffer, ut64 sz, ut64 buf_offset) {
RBinJavaLocalVariableTypeAttribute *lvattr;
ut64 offset = 6;
ut32 i = 0;
RBinJavaAttrInfo *attr = r_bin_java_default_attr_new (buffer, sz, 0);
if (!attr) {
return NULL;
}
attr->type = R_BIN_JAVA_ATTR_TYPE_LOCAL_VARIABLE_TYPE_TABLE_ATTR;
attr->info.local_variable_type_table_attr.table_length = R_BIN_JAVA_USHORT (buffer, offset);
offset += 2;
attr->info.local_variable_type_table_attr.local_variable_table = r_list_newf (r_bin_java_local_variable_type_table_attr_entry_free);
for (i = 0; i < attr->info.local_variable_type_table_attr.table_length; i++) {
ut64 curpos = buf_offset + offset;
lvattr = R_NEW0 (RBinJavaLocalVariableTypeAttribute);
if (!lvattr) {
perror ("calloc");
break;
}
lvattr->start_pc = R_BIN_JAVA_USHORT (buffer, offset);
offset += 2;
lvattr->length = R_BIN_JAVA_USHORT (buffer, offset);
offset += 2;
lvattr->name_idx = R_BIN_JAVA_USHORT (buffer, offset);
offset += 2;
lvattr->signature_idx = R_BIN_JAVA_USHORT (buffer, offset);
offset += 2;
lvattr->index = R_BIN_JAVA_USHORT (buffer, offset);
offset += 2;
lvattr->file_offset = curpos;
lvattr->name = r_bin_java_get_utf8_from_bin_cp_list (R_BIN_JAVA_GLOBAL_BIN, lvattr->name_idx);
lvattr->size = 10;
if (!lvattr->name) {
lvattr->name = strdup ("NULL");
eprintf ("r_bin_java_local_variable_type_table_attr_new: Unable to find the name for %d index.\n", lvattr->name_idx);
}
lvattr->signature = r_bin_java_get_utf8_from_bin_cp_list (R_BIN_JAVA_GLOBAL_BIN, lvattr->signature_idx);
if (!lvattr->signature) {
lvattr->signature = strdup ("NULL");
eprintf ("r_bin_java_local_variable_type_table_attr_new: Unable to find the descriptor for %d index.\n", lvattr->signature_idx);
}
r_list_append (attr->info.local_variable_type_table_attr.local_variable_table, lvattr);
}
// IFDBG r_bin_java_print_local_variable_type_table_attr_summary(attr);
attr->size = offset;
return attr;
}
| 1
|
231,991
|
static Layer* FindFirstScrollableLayer(Layer* layer) {
if (!layer)
return NULL;
if (layer->scrollable())
return layer;
for (size_t i = 0; i < layer->children().size(); ++i) {
Layer* found = FindFirstScrollableLayer(layer->children()[i].get());
if (found)
return found;
}
return NULL;
}
| 0
|
495,539
|
Bool gf_isom_moov_first(GF_ISOFile *movie)
{
u32 i;
for (i=0; i<gf_list_count(movie->TopBoxes); i++) {
GF_Box *b = (GF_Box*)gf_list_get(movie->TopBoxes, i);
if (b->type == GF_ISOM_BOX_TYPE_MOOV) return GF_TRUE;
if (b->type == GF_ISOM_BOX_TYPE_MDAT) return GF_FALSE;
}
return GF_FALSE;
}
| 0
|
152,953
|
FunctionContext::~FunctionContext() {
irGen_->functionContext_ = oldContext_;
}
| 0
|
464,234
|
void *zrealloc_usable(void *ptr, size_t size, size_t *usable) {
#ifndef HAVE_MALLOC_SIZE
void *realptr;
#endif
size_t oldsize;
void *newptr;
if (size == 0 && ptr != NULL) {
zfree(ptr);
*usable = 0;
return NULL;
}
if (ptr == NULL) return zmalloc_usable(size, usable);
#ifdef HAVE_MALLOC_SIZE
oldsize = zmalloc_size(ptr);
newptr = realloc(ptr,size);
if (!newptr) zmalloc_oom_handler(size);
update_zmalloc_stat_free(oldsize);
update_zmalloc_stat_alloc(*usable = zmalloc_size(newptr));
return newptr;
#else
realptr = (char*)ptr-PREFIX_SIZE;
oldsize = *((size_t*)realptr);
newptr = realloc(realptr,size+PREFIX_SIZE);
if (!newptr) zmalloc_oom_handler(size);
*((size_t*)newptr) = *usable = size;
update_zmalloc_stat_free(oldsize);
update_zmalloc_stat_alloc(size);
return (char*)newptr+PREFIX_SIZE;
#endif
}
| 0
|
237,317
|
static std::string WrapWithTH(std::string text) {
return "<th>" + text + "</th>";
}
| 0
|
497,492
|
struct razer_report razer_chroma_misc_get_scroll_mode(void)
{
struct razer_report report = get_razer_report(0x02, 0x94, 0x02);
report.arguments[0] = VARSTORE;
return report;
}
| 0
|
263,861
|
static MagickBooleanType IsXWD(const unsigned char *magick,const size_t length)
{
if (length < 8)
return(MagickFalse);
if (memcmp(magick+1,"\000\000",2) == 0)
{
if (memcmp(magick+4,"\007\000\000",3) == 0)
return(MagickTrue);
if (memcmp(magick+5,"\000\000\007",3) == 0)
return(MagickTrue);
}
return(MagickFalse);
}
| 0
|
26,870
|
static int virLogSetDefaultOutputToFile ( const char * filename , bool privileged ) {
int ret = - 1 ;
char * logdir = NULL ;
mode_t old_umask ;
if ( privileged ) {
if ( virAsprintf ( & virLogDefaultOutput , "%d:file:%s/log/libvirt/%s" , virLogDefaultPriority , LOCALSTATEDIR , filename ) < 0 ) goto cleanup ;
}
else {
if ( ! ( logdir = virGetUserCacheDirectory ( ) ) ) goto cleanup ;
old_umask = umask ( 077 ) ;
if ( virFileMakePath ( logdir ) < 0 ) {
umask ( old_umask ) ;
goto cleanup ;
}
umask ( old_umask ) ;
if ( virAsprintf ( & virLogDefaultOutput , "%d:file:%s/%s" , virLogDefaultPriority , logdir , filename ) < 0 ) goto cleanup ;
}
ret = 0 ;
cleanup : VIR_FREE ( logdir ) ;
return ret ;
}
| 0
|
213,417
|
image_draw_decide_cb (int image_id, void *data)
{
return (image_id == GPOINTER_TO_INT (data));
}
| 0
|
498,404
|
TEST_F(RouterTest, HttpsInternalRedirectSucceeded) {
auto ssl_connection = std::make_shared<Ssl::MockConnectionInfo>();
enableRedirects(3);
setNumPreviousRedirect(1);
sendRequest();
redirect_headers_->setLocation("https://www.foo.com");
EXPECT_CALL(connection_, ssl()).WillOnce(Return(ssl_connection));
EXPECT_CALL(callbacks_, clearRouteCache());
EXPECT_CALL(callbacks_, recreateStream(_)).WillOnce(Return(true));
response_decoder_->decodeHeaders(std::move(redirect_headers_), false);
EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_
.counter("upstream_internal_redirect_succeeded_total")
.value());
// In production, the HCM recreateStream would have called this.
router_.onDestroy();
}
| 0
|
495,477
|
njs_string_prototype_char_code_at(njs_vm_t *vm, njs_value_t *args,
njs_uint_t nargs, njs_index_t unused)
{
double num;
size_t length;
int64_t index;
uint32_t code;
njs_int_t ret;
const u_char *start, *end;
njs_string_prop_t string;
njs_unicode_decode_t ctx;
ret = njs_string_object_validate(vm, njs_argument(args, 0));
if (njs_slow_path(ret != NJS_OK)) {
return ret;
}
length = njs_string_prop(&string, njs_argument(args, 0));
ret = njs_value_to_integer(vm, njs_arg(args, nargs, 1), &index);
if (njs_slow_path(ret != NJS_OK)) {
return ret;
}
if (njs_slow_path(index < 0 || index >= (int64_t) length)) {
num = NAN;
goto done;
}
if (length == string.size) {
/* Byte or ASCII string. */
code = string.start[index];
} else {
njs_utf8_decode_init(&ctx);
/* UTF-8 string. */
end = string.start + string.size;
start = njs_string_offset(string.start, end, index);
code = njs_utf8_decode(&ctx, &start, end);
}
num = code;
done:
njs_set_number(&vm->retval, num);
return NJS_OK;
}
| 0
|
405,911
|
ofputil_protocol_is_valid(enum ofputil_protocol protocol)
{
return protocol & OFPUTIL_P_ANY && is_pow2(protocol);
}
| 0
|
412,521
|
pk_transaction_install_signature (PkTransaction *transaction,
GVariant *params,
GDBusMethodInvocation *context)
{
gboolean ret;
const gchar *key_id;
const gchar *package_id;
PkSigTypeEnum sig_type;
g_autoptr(GError) error = NULL;
g_return_if_fail (PK_IS_TRANSACTION (transaction));
g_return_if_fail (transaction->priv->tid != NULL);
g_variant_get (params, "(u&s&s)",
&sig_type,
&key_id,
&package_id);
g_debug ("InstallSignature method called: %s, %s, %s",
pk_sig_type_enum_to_string (sig_type),
key_id,
package_id);
/* not implemented yet */
if (!pk_backend_is_implemented (transaction->priv->backend,
PK_ROLE_ENUM_INSTALL_SIGNATURE)) {
g_set_error (&error,
PK_TRANSACTION_ERROR,
PK_TRANSACTION_ERROR_NOT_SUPPORTED,
"InstallSignature not supported by backend");
pk_transaction_set_state (transaction, PK_TRANSACTION_STATE_ERROR);
goto out;
}
/* check for sanity */
ret = pk_transaction_strvalidate (key_id, &error);
if (!ret) {
pk_transaction_set_state (transaction, PK_TRANSACTION_STATE_ERROR);
goto out;
}
/* check package_id (';;;repo-id' is used for the repo key) */
ret = pk_package_id_check (package_id);
if (!ret && !g_str_has_prefix (package_id, ";;;")) {
g_set_error (&error,
PK_TRANSACTION_ERROR,
PK_TRANSACTION_ERROR_PACKAGE_ID_INVALID,
"The package id '%s' is not valid", package_id);
pk_transaction_set_state (transaction, PK_TRANSACTION_STATE_ERROR);
goto out;
}
/* save so we can run later */
transaction->priv->cached_package_id = g_strdup (package_id);
transaction->priv->cached_key_id = g_strdup (key_id);
pk_transaction_set_role (transaction, PK_ROLE_ENUM_INSTALL_SIGNATURE);
/* try to get authorization */
ret = pk_transaction_obtain_authorization (transaction,
PK_ROLE_ENUM_INSTALL_SIGNATURE,
&error);
if (!ret) {
pk_transaction_set_state (transaction, PK_TRANSACTION_STATE_ERROR);
goto out;
}
out:
pk_transaction_dbus_return (context, error);
}
| 0
|
501,843
|
static void free_urlhandle(struct Curl_URL *u)
{
free(u->scheme);
free(u->user);
free(u->password);
free(u->options);
free(u->host);
free(u->zoneid);
free(u->port);
free(u->path);
free(u->query);
free(u->fragment);
free(u->scratch);
free(u->temppath);
}
| 0
|
449,417
|
nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *reg)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
struct nouveau_mem *mem = nouveau_mem(reg);
int ret;
ret = nouveau_mem_host(reg, &nvbe->ttm);
if (ret)
return ret;
ret = nouveau_mem_map(mem, &mem->cli->vmm.vmm, &mem->vma[0]);
if (ret) {
nouveau_mem_fini(mem);
return ret;
}
nvbe->mem = mem;
return 0;
}
| 0
|
411,612
|
**/
CImg<T>& noise(const double sigma, const unsigned int noise_type=0) {
if (is_empty()) return *this;
const Tfloat vmin = (Tfloat)cimg::type<T>::min(), vmax = (Tfloat)cimg::type<T>::max();
Tfloat nsigma = (Tfloat)sigma, m = 0, M = 0;
if (nsigma==0 && noise_type!=3) return *this;
if (nsigma<0 || noise_type==2) m = (Tfloat)min_max(M);
if (nsigma<0) nsigma = (Tfloat)(-nsigma*(M-m)/100.0);
switch (noise_type) {
case 0 : { // Gaussian noise
cimg_rof(*this,ptrd,T) {
Tfloat val = (Tfloat)(*ptrd + nsigma*cimg::grand());
if (val>vmax) val = vmax;
if (val<vmin) val = vmin;
*ptrd = (T)val;
}
} break;
case 1 : { // Uniform noise
cimg_rof(*this,ptrd,T) {
Tfloat val = (Tfloat)(*ptrd + nsigma*cimg::rand(-1,1));
if (val>vmax) val = vmax;
if (val<vmin) val = vmin;
*ptrd = (T)val;
}
} break;
case 2 : { // Salt & Pepper noise
if (nsigma<0) nsigma = -nsigma;
if (M==m) { m = 0; M = cimg::type<T>::is_float()?(Tfloat)1:(Tfloat)cimg::type<T>::max(); }
cimg_rof(*this,ptrd,T) if (cimg::rand(100)<nsigma) *ptrd = (T)(cimg::rand()<0.5?M:m);
} break;
case 3 : { // Poisson Noise
cimg_rof(*this,ptrd,T) *ptrd = (T)cimg::prand(*ptrd);
} break;
case 4 : { // Rice noise
const Tfloat sqrt2 = (Tfloat)std::sqrt(2.0);
cimg_rof(*this,ptrd,T) {
const Tfloat
val0 = (Tfloat)*ptrd/sqrt2,
re = (Tfloat)(val0 + nsigma*cimg::grand()),
im = (Tfloat)(val0 + nsigma*cimg::grand());
Tfloat val = cimg::hypot(re,im);
if (val>vmax) val = vmax;
if (val<vmin) val = vmin;
*ptrd = (T)val;
}
} break;
default :
throw CImgArgumentException(_cimg_instance
"noise(): Invalid specified noise type %d "
"(should be { 0=gaussian | 1=uniform | 2=salt&Pepper | 3=poisson }).",
cimg_instance,
noise_type);
}
return *this;
| 0
|
213,094
|
LogLuvDecodeStrip(TIFF* tif, uint8* bp, tmsize_t cc, uint16 s)
{
tmsize_t rowlen = TIFFScanlineSize(tif);
if (rowlen == 0)
return 0;
assert(cc%rowlen == 0);
while (cc && (*tif->tif_decoderow)(tif, bp, rowlen, s)) {
bp += rowlen;
cc -= rowlen;
}
return (cc == 0);
}
| 0
|
245,057
|
void tst_QQuickWebView::removeFromCanvas()
{
showWebView();
QQuickItem* parent = webView()->parentItem();
QQuickItem noCanvasItem;
webView()->setParentItem(&noCanvasItem);
QTest::qWait(200);
webView()->setParentItem(parent);
webView()->setVisible(true);
QTest::qWait(200);
}
| 0
|
81,132
|
GF_Err gf_isom_set_audio_layout(GF_ISOFile *movie, u32 trackNumber, u32 sampleDescriptionIndex, GF_AudioChannelLayout *layout)
{
GF_Err e;
GF_TrackBox *trak;
GF_SampleEntryBox *entry;
GF_AudioSampleEntryBox*aud_entry;
GF_SampleDescriptionBox *stsd;
GF_ChannelLayoutBox *chnl;
e = CanAccessMovie(movie, GF_ISOM_OPEN_WRITE);
if (e) return e;
trak = gf_isom_get_track_from_file(movie, trackNumber);
if (!trak) return GF_BAD_PARAM;
stsd = trak->Media->information->sampleTable->SampleDescription;
if (!stsd) {
return movie->LastError = GF_ISOM_INVALID_FILE;
}
if (!sampleDescriptionIndex || sampleDescriptionIndex > gf_list_count(stsd->child_boxes)) {
return movie->LastError = GF_BAD_PARAM;
}
entry = (GF_SampleEntryBox *)gf_list_get(stsd->child_boxes, sampleDescriptionIndex - 1);
//no support for generic sample entries (eg, no MPEG4 descriptor)
if (entry == NULL) return GF_BAD_PARAM;
if (!movie->keep_utc)
trak->Media->mediaHeader->modificationTime = gf_isom_get_mp4time();
if (entry->internal_type != GF_ISOM_SAMPLE_ENTRY_AUDIO) return GF_BAD_PARAM;
aud_entry = (GF_AudioSampleEntryBox*) entry;
if (aud_entry->qtff_mode) {
u32 sr = aud_entry->samplerate_hi;
if (aud_entry->type==GF_ISOM_BOX_TYPE_MLPA) {
sr <<= 16;
sr |= aud_entry->samplerate_lo;
}
e = gf_isom_set_audio_info(movie, trackNumber, sampleDescriptionIndex, sr, aud_entry->channel_count, (u8) aud_entry->bitspersample, GF_IMPORT_AUDIO_SAMPLE_ENTRY_v1_MPEG);
if (e) return e;
}
chnl = (GF_ChannelLayoutBox *) gf_isom_box_find_child(aud_entry->child_boxes, GF_ISOM_BOX_TYPE_CHNL);
if (!chnl) {
chnl = (GF_ChannelLayoutBox *)gf_isom_box_new_parent(&aud_entry->child_boxes, GF_ISOM_BOX_TYPE_CHNL);
if (!chnl) return GF_OUT_OF_MEM;
}
aud_entry->channel_count = layout->channels_count;
memcpy(&chnl->layout, layout, sizeof(GF_AudioChannelLayout));
return GF_OK;
}
| 0
|
146,792
|
wipe_dummy_buffer(buf_T *buf, char_u *dirname_start)
{
// If any autocommand opened a window on the dummy buffer, close that
// window. If we can't close them all then give up.
while (buf->b_nwindows > 0)
{
int did_one = FALSE;
win_T *wp;
if (firstwin->w_next != NULL)
FOR_ALL_WINDOWS(wp)
if (wp->w_buffer == buf)
{
if (win_close(wp, FALSE) == OK)
did_one = TRUE;
break;
}
if (!did_one)
return;
}
if (curbuf != buf && buf->b_nwindows == 0) // safety check
{
#if defined(FEAT_EVAL)
cleanup_T cs;
// Reset the error/interrupt/exception state here so that aborting()
// returns FALSE when wiping out the buffer. Otherwise it doesn't
// work when got_int is set.
enter_cleanup(&cs);
#endif
wipe_buffer(buf, TRUE);
#if defined(FEAT_EVAL)
// Restore the error/interrupt/exception state if not discarded by a
// new aborting error, interrupt, or uncaught exception.
leave_cleanup(&cs);
#endif
// When autocommands/'autochdir' option changed directory: go back.
restore_start_dir(dirname_start);
}
}
| 0
|
405,545
|
test_bson_append_dbpointer (void)
{
bson_oid_t oid;
bson_t *b;
bson_t *b2;
b = bson_new ();
bson_oid_init_from_string (&oid, "0123abcd0123abcd0123abcd");
BSON_ASSERT (bson_append_dbpointer (b, "dbpointer", -1, "foo", &oid));
b2 = get_bson ("test28.bson");
BSON_ASSERT_BSON_EQUAL (b, b2);
bson_destroy (b);
bson_destroy (b2);
}
| 0
|
370,845
|
rb_str_new_shared(VALUE str)
{
VALUE str2 = str_new3(rb_obj_class(str), str);
OBJ_INFECT(str2, str);
return str2;
}
| 0
|
476,143
|
void InstanceKlass::ensure_space_for_methodids(int start_offset) {
int new_jmeths = 0;
int length = methods()->length();
for (int index = start_offset; index < length; index++) {
Method* m = methods()->at(index);
jmethodID id = m->find_jmethod_id_or_null();
if (id == NULL) {
new_jmeths++;
}
}
if (new_jmeths != 0) {
Method::ensure_jmethod_ids(class_loader_data(), new_jmeths);
}
}
| 0
|
225,209
|
int PrintWebViewHelper::PrintPreviewContext::total_page_count() const {
DCHECK(IsRendering());
return total_page_count_;
}
| 0
|
420,727
|
static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
{
struct l2cap_conn *conn = chan->conn;
struct l2cap_le_conn_rsp rsp;
u16 result;
if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
result = L2CAP_CR_LE_AUTHORIZATION;
else
result = L2CAP_CR_LE_BAD_PSM;
l2cap_state_change(chan, BT_DISCONN);
rsp.dcid = cpu_to_le16(chan->scid);
rsp.mtu = cpu_to_le16(chan->imtu);
rsp.mps = cpu_to_le16(chan->mps);
rsp.credits = cpu_to_le16(chan->rx_credits);
rsp.result = cpu_to_le16(result);
l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
&rsp);
}
| 0
|
360,226
|
invalidate_deep_counts (NautilusFile *file)
{
file->details->deep_counts_status = NAUTILUS_REQUEST_NOT_STARTED;
}
| 0
|
97,231
|
void ptrace_disable(struct task_struct *task)
{
memset(&task->thread.per_user, 0, sizeof(task->thread.per_user));
memset(&task->thread.per_event, 0, sizeof(task->thread.per_event));
clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
clear_pt_regs_flag(task_pt_regs(task), PIF_PER_TRAP);
task->thread.per_flags = 0;
}
| 0
|
217,710
|
ErrorResilienceTestLarge()
: EncoderTest(GET_PARAM(0)),
psnr_(0.0),
nframes_(0),
mismatch_psnr_(0.0),
mismatch_nframes_(0),
encoding_mode_(GET_PARAM(1)) {
Reset();
}
| 0
|
242,791
|
ExtensionFunction* NewExtensionFunction() {
return new T();
}
| 0
|
166,733
|
xps_parse_digits(char *s, int *digit)
{
*digit = 0;
while (*s >= '0' && *s <= '9')
{
*digit = *digit * 10 + (*s - '0');
s ++;
}
return s;
}
| 0
|
449,146
|
CNF_GetRtcOnUtc(void)
{
return rtc_on_utc;
}
| 0
|
18,311
|
TEST_F ( ProfileInfoCacheTest , GAIAName ) {
GetCache ( ) -> AddProfileToCache ( GetProfilePath ( "path_1" ) , ASCIIToUTF16 ( "Person 1" ) , base : : string16 ( ) , 0 , std : : string ( ) ) ;
base : : string16 profile_name ( ASCIIToUTF16 ( "Person 2" ) ) ;
GetCache ( ) -> AddProfileToCache ( GetProfilePath ( "path_2" ) , profile_name , base : : string16 ( ) , 0 , std : : string ( ) ) ;
int index1 = GetCache ( ) -> GetIndexOfProfileWithPath ( GetProfilePath ( "path_1" ) ) ;
int index2 = GetCache ( ) -> GetIndexOfProfileWithPath ( GetProfilePath ( "path_2" ) ) ;
EXPECT_TRUE ( GetCache ( ) -> GetGAIANameOfProfileAtIndex ( index1 ) . empty ( ) ) ;
EXPECT_TRUE ( GetCache ( ) -> GetGAIANameOfProfileAtIndex ( index2 ) . empty ( ) ) ;
base : : string16 gaia_name ( ASCIIToUTF16 ( "Pat Smith" ) ) ;
GetCache ( ) -> SetGAIANameOfProfileAtIndex ( index2 , gaia_name ) ;
index1 = GetCache ( ) -> GetIndexOfProfileWithPath ( GetProfilePath ( "path_1" ) ) ;
index2 = GetCache ( ) -> GetIndexOfProfileWithPath ( GetProfilePath ( "path_2" ) ) ;
EXPECT_TRUE ( GetCache ( ) -> GetGAIANameOfProfileAtIndex ( index1 ) . empty ( ) ) ;
EXPECT_EQ ( gaia_name , GetCache ( ) -> GetGAIANameOfProfileAtIndex ( index2 ) ) ;
EXPECT_EQ ( gaia_name , GetCache ( ) -> GetNameOfProfileAtIndex ( index2 ) ) ;
base : : string16 custom_name ( ASCIIToUTF16 ( "Custom name" ) ) ;
GetCache ( ) -> SetNameOfProfileAtIndex ( index2 , custom_name ) ;
GetCache ( ) -> SetProfileIsUsingDefaultNameAtIndex ( index2 , false ) ;
index1 = GetCache ( ) -> GetIndexOfProfileWithPath ( GetProfilePath ( "path_1" ) ) ;
index2 = GetCache ( ) -> GetIndexOfProfileWithPath ( GetProfilePath ( "path_2" ) ) ;
EXPECT_EQ ( custom_name , GetCache ( ) -> GetNameOfProfileAtIndex ( index2 ) ) ;
EXPECT_EQ ( gaia_name , GetCache ( ) -> GetGAIANameOfProfileAtIndex ( index2 ) ) ;
}
| 0
|
9,060
|
static gboolean is_correct_filename(const char *value)
{
return printable_str(value) && !strchr(value, '/') && !strchr(value, '.');
}
| 1
|
156,324
|
flatpak_context_reset_non_permissions (FlatpakContext *context)
{
g_hash_table_remove_all (context->env_vars);
}
| 0
|
509,713
|
static void test_frm_bug()
{
MYSQL_STMT *stmt;
MYSQL_BIND my_bind[2];
MYSQL_RES *result;
MYSQL_ROW row;
FILE *test_file;
char data_dir[FN_REFLEN];
char test_frm[FN_REFLEN];
int rc;
myheader("test_frm_bug");
mysql_autocommit(mysql, TRUE);
rc= mysql_query(mysql, "drop table if exists test_frm_bug");
myquery(rc);
rc= mysql_query(mysql, "flush tables");
myquery(rc);
stmt= mysql_simple_prepare(mysql, "show variables like 'datadir'");
check_stmt(stmt);
rc= mysql_stmt_execute(stmt);
check_execute(stmt, rc);
bzero((char*) my_bind, sizeof(my_bind));
my_bind[0].buffer_type= MYSQL_TYPE_STRING;
my_bind[0].buffer= data_dir;
my_bind[0].buffer_length= FN_REFLEN;
my_bind[1]= my_bind[0];
rc= mysql_stmt_bind_result(stmt, my_bind);
check_execute(stmt, rc);
rc= mysql_stmt_fetch(stmt);
check_execute(stmt, rc);
if (!opt_silent)
fprintf(stdout, "\n data directory: %s", data_dir);
rc= mysql_stmt_fetch(stmt);
DIE_UNLESS(rc == MYSQL_NO_DATA);
strxmov(test_frm, data_dir, "/", current_db, "/", "test_frm_bug.frm", NullS);
if (!opt_silent)
fprintf(stdout, "\n test_frm: %s", test_frm);
if (!(test_file= my_fopen(test_frm, (int) (O_RDWR | O_CREAT), MYF(MY_WME))))
{
fprintf(stdout, "\n ERROR: my_fopen failed for '%s'", test_frm);
fprintf(stdout, "\n test cancelled");
exit(1);
}
if (!opt_silent)
fprintf(test_file, "this is a junk file for test");
rc= mysql_query(mysql, "SHOW TABLE STATUS like 'test_frm_bug'");
myquery(rc);
result= mysql_store_result(mysql);
mytest(result);/* It can't be NULL */
rc= my_process_result_set(result);
DIE_UNLESS(rc == 1);
mysql_data_seek(result, 0);
row= mysql_fetch_row(result);
mytest(row);
if (!opt_silent)
fprintf(stdout, "\n Comment: %s", row[17]);
DIE_UNLESS(row[17] != 0);
mysql_free_result(result);
mysql_stmt_close(stmt);
my_fclose(test_file, MYF(0));
mysql_query(mysql, "drop table if exists test_frm_bug");
}
| 0
|
186,079
|
void ExtensionFunctionDispatcher::Dispatch(
const ExtensionHostMsg_Request_Params& params,
RenderViewHost* render_view_host) {
ExtensionService* service = profile()->GetExtensionService();
ExtensionProcessManager* process_manager =
extensions::ExtensionSystem::Get(profile())->process_manager();
extensions::ProcessMap* process_map = service->process_map();
if (!service || !process_map)
return;
const Extension* extension = service->extensions()->GetByID(
params.extension_id);
if (!extension)
extension = service->extensions()->GetHostedAppByURL(ExtensionURLInfo(
WebSecurityOrigin::createFromString(params.source_origin),
params.source_url));
scoped_refptr<ExtensionFunction> function(
CreateExtensionFunction(params, extension,
render_view_host->GetProcess()->GetID(),
*(service->process_map()),
extensions::ExtensionAPI::GetSharedInstance(),
profile(), render_view_host, render_view_host,
render_view_host->GetRoutingID()));
scoped_ptr<ListValue> args(params.arguments.DeepCopy());
if (!function) {
LogFailure(extension,
params.name,
args.Pass(),
kAccessDenied,
profile());
return;
}
UIThreadExtensionFunction* function_ui =
function->AsUIThreadExtensionFunction();
if (!function_ui) {
NOTREACHED();
return;
}
function_ui->set_dispatcher(AsWeakPtr());
function_ui->set_profile(profile_);
function->set_include_incognito(service->CanCrossIncognito(extension));
if (!CheckPermissions(function, extension, params, render_view_host,
render_view_host->GetRoutingID())) {
LogFailure(extension,
params.name,
args.Pass(),
kAccessDenied,
profile());
return;
}
ExtensionsQuotaService* quota = service->quota_service();
std::string violation_error = quota->Assess(extension->id(),
function,
¶ms.arguments,
base::TimeTicks::Now());
if (violation_error.empty()) {
ExternalProtocolHandler::PermitLaunchUrl();
LogSuccess(extension, params.name, args.Pass(), profile());
function->Run();
} else {
LogFailure(extension,
params.name,
args.Pass(),
kQuotaExceeded,
profile());
function->OnQuotaExceeded(violation_error);
}
if (!service->extensions()->GetByID(params.extension_id))
return;
process_manager->IncrementLazyKeepaliveCount(extension);
}
| 0
|
459,379
|
cb_log_host(const union sudo_defs_val *sd_un)
{
debug_decl(cb_syslog_maxlen, SUDOERS_DEBUG_PLUGIN);
eventlog_set_omit_hostname(!sd_un->flag);
debug_return_bool(true);
}
| 0
|
103,625
|
void log() {
RequestHeaderMap* request_headers = nullptr;
if (filter_manager_callbacks_.requestHeaders()) {
request_headers = filter_manager_callbacks_.requestHeaders().ptr();
}
ResponseHeaderMap* response_headers = nullptr;
if (filter_manager_callbacks_.responseHeaders()) {
response_headers = filter_manager_callbacks_.responseHeaders().ptr();
}
ResponseTrailerMap* response_trailers = nullptr;
if (filter_manager_callbacks_.responseTrailers()) {
response_trailers = filter_manager_callbacks_.responseTrailers().ptr();
}
for (const auto& log_handler : access_log_handlers_) {
log_handler->log(request_headers, response_headers, response_trailers, stream_info_);
}
}
| 0
|
129,694
|
static struct inode *hugetlbfs_get_inode(struct super_block *sb,
struct inode *dir,
umode_t mode, dev_t dev)
{
struct inode *inode;
inode = new_inode(sb);
if (inode) {
struct hugetlbfs_inode_info *info;
inode->i_ino = get_next_ino();
inode_init_owner(inode, dir, mode);
inode->i_mapping->a_ops = &hugetlbfs_aops;
inode->i_mapping->backing_dev_info =&hugetlbfs_backing_dev_info;
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
INIT_LIST_HEAD(&inode->i_mapping->private_list);
info = HUGETLBFS_I(inode);
/*
* The policy is initialized here even if we are creating a
* private inode because initialization simply creates an
* an empty rb tree and calls spin_lock_init(), later when we
* call mpol_free_shared_policy() it will just return because
* the rb tree will still be empty.
*/
mpol_shared_policy_init(&info->policy, NULL);
switch (mode & S_IFMT) {
default:
init_special_inode(inode, mode, dev);
break;
case S_IFREG:
inode->i_op = &hugetlbfs_inode_operations;
inode->i_fop = &hugetlbfs_file_operations;
break;
case S_IFDIR:
inode->i_op = &hugetlbfs_dir_inode_operations;
inode->i_fop = &simple_dir_operations;
/* directory inodes start off with i_nlink == 2 (for "." entry) */
inc_nlink(inode);
break;
case S_IFLNK:
inode->i_op = &page_symlink_inode_operations;
break;
}
lockdep_annotate_inode_mutex_key(inode);
}
return inode;
}
| 0
|
476,579
|
evict_writes (struct rw_file *rwf, uint64_t offset, size_t len)
{
static __thread struct write_window window[NR_WINDOWS];
/* Evict the oldest window from the page cache. */
if (window[0].len > 0) {
sync_file_range (rwf->fd, window[0].offset, window[0].len,
SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE|
SYNC_FILE_RANGE_WAIT_AFTER);
posix_fadvise (rwf->fd, window[0].offset, window[0].len,
POSIX_FADV_DONTNEED);
}
/* Move the Nth window to N-1. */
memmove (&window[0], &window[1], sizeof window[0] * (NR_WINDOWS-1));
/* Set up the current window and tell Linux to start writing it out
* to disk (asynchronously).
*/
sync_file_range (rwf->fd, offset, len, SYNC_FILE_RANGE_WRITE);
window[NR_WINDOWS-1].offset = offset;
window[NR_WINDOWS-1].len = len;
}
| 0
|
204,867
|
bool Syncer::ExitRequested() {
base::AutoLock lock(early_exit_requested_lock_);
return early_exit_requested_;
}
| 0
|
2,939
|
static __be32 nfsd3_proc_setacl(struct svc_rqst * rqstp,
struct nfsd3_setaclargs *argp,
struct nfsd3_attrstat *resp)
{
struct inode *inode;
svc_fh *fh;
__be32 nfserr = 0;
int error;
fh = fh_copy(&resp->fh, &argp->fh);
nfserr = fh_verify(rqstp, &resp->fh, 0, NFSD_MAY_SATTR);
if (nfserr)
goto out;
inode = d_inode(fh->fh_dentry);
if (!IS_POSIXACL(inode) || !inode->i_op->set_acl) {
error = -EOPNOTSUPP;
goto out_errno;
}
error = fh_want_write(fh);
if (error)
goto out_errno;
error = inode->i_op->set_acl(inode, argp->acl_access, ACL_TYPE_ACCESS);
if (error)
goto out_drop_write;
error = inode->i_op->set_acl(inode, argp->acl_default,
ACL_TYPE_DEFAULT);
out_drop_write:
fh_drop_write(fh);
out_errno:
nfserr = nfserrno(error);
out:
/* argp->acl_{access,default} may have been allocated in
nfs3svc_decode_setaclargs. */
posix_acl_release(argp->acl_access);
posix_acl_release(argp->acl_default);
RETURN_STATUS(nfserr);
}
| 1
|
166,906
|
void FrameLoader::loadArchive(PassRefPtr<Archive> prpArchive)
{
RefPtr<Archive> archive = prpArchive;
ArchiveResource* mainResource = archive->mainResource();
ASSERT(mainResource);
if (!mainResource)
return;
SubstituteData substituteData(mainResource->data(), mainResource->mimeType(), mainResource->textEncoding(), KURL());
ResourceRequest request(mainResource->url());
#if PLATFORM(MAC)
request.applyWebArchiveHackForMail();
#endif
RefPtr<DocumentLoader> documentLoader = m_client->createDocumentLoader(request, substituteData);
documentLoader->addAllArchiveResources(archive.get());
load(documentLoader.get());
}
| 0
|
224,567
|
void WebFrame::startDownload(const WebCore::ResourceRequest& request)
{
ASSERT(m_policyDownloadID);
DownloadManager::shared().startDownload(m_policyDownloadID, page(), request);
m_policyDownloadID = 0;
}
| 0
|
206,473
|
hb_shaper_get_run_language(ASS_Shaper *shaper, hb_script_t script)
{
hb_language_t lang;
if (shaper->language != HB_LANGUAGE_INVALID)
return shaper->language;
lang = script_to_language(script);
if (lang == HB_LANGUAGE_INVALID)
lang = hb_language_get_default();
return lang;
}
| 0
|
222,906
|
static ssize_t virtio_net_receive(NetClientState *nc, const uint8_t *buf, size_t size)
{
VirtIONet *n = qemu_get_nic_opaque(nc);
VirtIONetQueue *q = virtio_net_get_subqueue(nc);
VirtIODevice *vdev = VIRTIO_DEVICE(n);
struct iovec mhdr_sg[VIRTQUEUE_MAX_SIZE];
struct virtio_net_hdr_mrg_rxbuf mhdr;
unsigned mhdr_cnt = 0;
size_t offset, i, guest_offset;
if (!virtio_net_can_receive(nc)) {
return -1;
}
/* hdr_len refers to the header we supply to the guest */
if (!virtio_net_has_buffers(q, size + n->guest_hdr_len - n->host_hdr_len)) {
return 0;
}
if (!receive_filter(n, buf, size))
return size;
offset = i = 0;
while (offset < size) {
VirtQueueElement elem;
int len, total;
const struct iovec *sg = elem.in_sg;
total = 0;
if (virtqueue_pop(q->rx_vq, &elem) == 0) {
if (i == 0)
return -1;
error_report("virtio-net unexpected empty queue: "
"i %zd mergeable %d offset %zd, size %zd, "
"guest hdr len %zd, host hdr len %zd guest features 0x%x",
i, n->mergeable_rx_bufs, offset, size,
n->guest_hdr_len, n->host_hdr_len, vdev->guest_features);
exit(1);
}
if (elem.in_num < 1) {
error_report("virtio-net receive queue contains no in buffers");
exit(1);
}
if (i == 0) {
assert(offset == 0);
if (n->mergeable_rx_bufs) {
mhdr_cnt = iov_copy(mhdr_sg, ARRAY_SIZE(mhdr_sg),
sg, elem.in_num,
offsetof(typeof(mhdr), num_buffers),
sizeof(mhdr.num_buffers));
}
receive_header(n, sg, elem.in_num, buf, size);
offset = n->host_hdr_len;
total += n->guest_hdr_len;
guest_offset = n->guest_hdr_len;
} else {
guest_offset = 0;
}
/* copy in packet. ugh */
len = iov_from_buf(sg, elem.in_num, guest_offset,
buf + offset, size - offset);
total += len;
offset += len;
/* If buffers can't be merged, at this point we
* must have consumed the complete packet.
* Otherwise, drop it. */
if (!n->mergeable_rx_bufs && offset < size) {
#if 0
error_report("virtio-net truncated non-mergeable packet: "
"i %zd mergeable %d offset %zd, size %zd, "
"guest hdr len %zd, host hdr len %zd",
i, n->mergeable_rx_bufs,
offset, size, n->guest_hdr_len, n->host_hdr_len);
#endif
return size;
}
/* signal other side */
virtqueue_fill(q->rx_vq, &elem, total, i++);
}
if (mhdr_cnt) {
stw_p(&mhdr.num_buffers, i);
iov_from_buf(mhdr_sg, mhdr_cnt,
0,
&mhdr.num_buffers, sizeof mhdr.num_buffers);
}
virtqueue_flush(q->rx_vq, i);
virtio_notify(vdev, q->rx_vq);
return size;
}
| 0
|
455,788
|
void SdamServerSelector::_verifyMaxstalenessWireVersions(TopologyDescriptionPtr topologyDescription,
Seconds maxStalenessSeconds) {
for (auto& server : topologyDescription->getServers()) {
uassert(ErrorCodes::IncompatibleServerVersion,
"Incompatible wire version",
server->getMaxWireVersion() >= WireVersion::COMMANDS_ACCEPT_WRITE_CONCERN);
}
}
| 0
|
223,340
|
void WebGLRenderingContextBase::TexImageByGPU(
TexImageFunctionID function_id,
WebGLTexture* texture,
GLenum target,
GLint level,
GLint xoffset,
GLint yoffset,
GLint zoffset,
CanvasImageSource* image,
const IntRect& source_sub_rectangle) {
DCHECK(image->IsCanvasElement() || image->IsImageBitmap());
int width = source_sub_rectangle.Width();
int height = source_sub_rectangle.Height();
ScopedTexture2DRestorer restorer(this);
GLuint target_texture = texture->Object();
bool possible_direct_copy = false;
if (function_id == kTexImage2D || function_id == kTexSubImage2D) {
possible_direct_copy = Extensions3DUtil::CanUseCopyTextureCHROMIUM(target);
}
GLint copy_x_offset = xoffset;
GLint copy_y_offset = yoffset;
GLenum copy_target = target;
if (!possible_direct_copy) {
ContextGL()->GenTextures(1, &target_texture);
ContextGL()->BindTexture(GL_TEXTURE_2D, target_texture);
ContextGL()->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER,
GL_NEAREST);
ContextGL()->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER,
GL_NEAREST);
ContextGL()->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S,
GL_CLAMP_TO_EDGE);
ContextGL()->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T,
GL_CLAMP_TO_EDGE);
ContextGL()->TexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0,
GL_RGBA, GL_UNSIGNED_BYTE, nullptr);
copy_x_offset = 0;
copy_y_offset = 0;
copy_target = GL_TEXTURE_2D;
}
{
ScopedUnpackParametersResetRestore temporaryResetUnpack(this);
if (image->IsCanvasElement()) {
TexImageCanvasByGPU(function_id, static_cast<HTMLCanvasElement*>(image),
copy_target, target_texture, copy_x_offset,
copy_y_offset, source_sub_rectangle);
} else {
TexImageBitmapByGPU(static_cast<ImageBitmap*>(image), copy_target,
target_texture, !unpack_flip_y_, copy_x_offset,
copy_y_offset, source_sub_rectangle);
}
}
if (!possible_direct_copy) {
GLuint tmp_fbo;
ContextGL()->GenFramebuffers(1, &tmp_fbo);
ContextGL()->BindFramebuffer(GL_FRAMEBUFFER, tmp_fbo);
ContextGL()->FramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
GL_TEXTURE_2D, target_texture, 0);
ContextGL()->BindTexture(texture->GetTarget(), texture->Object());
if (function_id == kTexImage2D) {
ContextGL()->CopyTexSubImage2D(target, level, 0, 0, 0, 0, width, height);
} else if (function_id == kTexSubImage2D) {
ContextGL()->CopyTexSubImage2D(target, level, xoffset, yoffset, 0, 0,
width, height);
} else if (function_id == kTexSubImage3D) {
ContextGL()->CopyTexSubImage3D(target, level, xoffset, yoffset, zoffset,
0, 0, width, height);
}
ContextGL()->FramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
GL_TEXTURE_2D, 0, 0);
RestoreCurrentFramebuffer();
ContextGL()->DeleteFramebuffers(1, &tmp_fbo);
ContextGL()->DeleteTextures(1, &target_texture);
}
}
| 0
|
141,464
|
GF_Err stri_box_read(GF_Box *s, GF_BitStream *bs)
{
size_t i;
GF_SubTrackInformationBox *ptr = (GF_SubTrackInformationBox *)s;
ISOM_DECREASE_SIZE(ptr, 8)
ptr->switch_group = gf_bs_read_u16(bs);
ptr->alternate_group = gf_bs_read_u16(bs);
ptr->sub_track_id = gf_bs_read_u32(bs);
ptr->attribute_count = ptr->size / 4;
GF_SAFE_ALLOC_N(ptr->attribute_list, (size_t)ptr->attribute_count, u32);
if (!ptr->attribute_list) return GF_OUT_OF_MEM;
for (i = 0; i < ptr->attribute_count; i++) {
ISOM_DECREASE_SIZE(ptr, 4)
ptr->attribute_list[i] = gf_bs_read_u32(bs);
}
return GF_OK;
| 0
|
7,409
|
private int
mcopy(struct magic_set *ms, union VALUETYPE *p, int type, int indir,
const unsigned char *s, uint32_t offset, size_t nbytes, size_t linecnt)
{
/*
* Note: FILE_SEARCH and FILE_REGEX do not actually copy
* anything, but setup pointers into the source
*/
if (indir == 0) {
switch (type) {
case FILE_SEARCH:
ms->search.s = RCAST(const char *, s) + offset;
ms->search.s_len = nbytes - offset;
ms->search.offset = offset;
return 0;
case FILE_REGEX: {
const char *b;
const char *c;
const char *last; /* end of search region */
const char *buf; /* start of search region */
const char *end;
size_t lines;
if (s == NULL) {
ms->search.s_len = 0;
ms->search.s = NULL;
return 0;
}
buf = RCAST(const char *, s) + offset;
end = last = RCAST(const char *, s) + nbytes;
/* mget() guarantees buf <= last */
for (lines = linecnt, b = buf; lines && b < end &&
((b = CAST(const char *,
memchr(c = b, '\n', CAST(size_t, (end - b)))))
|| (b = CAST(const char *,
memchr(c, '\r', CAST(size_t, (end - c))))));
lines--, b++) {
last = b;
if (b[0] == '\r' && b[1] == '\n')
b++;
}
if (lines)
last = RCAST(const char *, s) + nbytes;
ms->search.s = buf;
ms->search.s_len = last - buf;
ms->search.offset = offset;
ms->search.rm_len = 0;
return 0;
}
case FILE_BESTRING16:
case FILE_LESTRING16: {
const unsigned char *src = s + offset;
const unsigned char *esrc = s + nbytes;
char *dst = p->s;
char *edst = &p->s[sizeof(p->s) - 1];
if (type == FILE_BESTRING16)
src++;
/* check that offset is within range */
if (offset >= nbytes)
break;
for (/*EMPTY*/; src < esrc; src += 2, dst++) {
if (dst < edst)
*dst = *src;
else
break;
if (*dst == '\0') {
if (type == FILE_BESTRING16 ?
*(src - 1) != '\0' :
*(src + 1) != '\0')
*dst = ' ';
}
}
*edst = '\0';
return 0;
}
case FILE_STRING: /* XXX - these two should not need */
case FILE_PSTRING: /* to copy anything, but do anyway. */
default:
break;
}
}
if (offset >= nbytes) {
(void)memset(p, '\0', sizeof(*p));
return 0;
}
if (nbytes - offset < sizeof(*p))
nbytes = nbytes - offset;
else
nbytes = sizeof(*p);
(void)memcpy(p, s + offset, nbytes);
/*
* the usefulness of padding with zeroes eludes me, it
* might even cause problems
*/
if (nbytes < sizeof(*p))
(void)memset(((char *)(void *)p) + nbytes, '\0',
sizeof(*p) - nbytes);
| 1
|
70,374
|
static void AuthPAMUserPwdStartFunc(rfbClientPtr cl)
{
cl->state = RFB_AUTHENTICATION;
}
| 0
|
442,439
|
void checkValue (void* sampleRawData,
int sampleCount,
int channelType,
int dwx,
int dwy)
{
for (int l = 0; l < sampleCount; l++)
{
if (channelType == 0)
{
unsigned int* value = (unsigned int*)(sampleRawData);
if (value[l] != static_cast<unsigned int>((dwy * width + dwx) % 2049))
cout << dwx << ", " << dwy << " error, should be "
<< (dwy * width + dwx) % 2049 << ", is " << value[l]
<< endl << flush;
assert (value[l] == static_cast<unsigned int>((dwy * width + dwx) % 2049));
}
if (channelType == 1)
{
half* value = (half*)(sampleRawData);
if (value[l] != (dwy * width + dwx) % 2049)
cout << dwx << ", " << dwy << " error, should be "
<< (dwy * width + dwx) % 2049 << ", is " << value[l]
<< endl << flush;
assert (value[l] == (dwy * width + dwx) % 2049);
}
if (channelType == 2)
{
float* value = (float*)(sampleRawData);
if (value[l] != (dwy * width + dwx) % 2049)
cout << dwx << ", " << dwy << " error, should be "
<< (dwy * width + dwx) % 2049 << ", is " << value[l]
<< endl << flush;
assert (value[l] == (dwy * width + dwx) % 2049);
}
}
}
| 0
|
477,557
|
static void tcf_chain_head_change_item(struct tcf_filter_chain_list_item *item,
struct tcf_proto *tp_head)
{
if (item->chain_head_change)
item->chain_head_change(tp_head, item->chain_head_change_priv);
}
| 0
|
348,865
|
static struct domain_device *sas_ex_discover_expander(
struct domain_device *parent, int phy_id)
{
struct sas_expander_device *parent_ex = rphy_to_expander_device(parent->rphy);
struct ex_phy *phy = &parent->ex_dev.ex_phy[phy_id];
struct domain_device *child = NULL;
struct sas_rphy *rphy;
struct sas_expander_device *edev;
struct asd_sas_port *port;
int res;
if (phy->routing_attr == DIRECT_ROUTING) {
pr_warn("ex %016llx:%02d:D <--> ex %016llx:0x%x is not allowed\n",
SAS_ADDR(parent->sas_addr), phy_id,
SAS_ADDR(phy->attached_sas_addr),
phy->attached_phy_id);
return NULL;
}
child = sas_alloc_device();
if (!child)
return NULL;
phy->port = sas_port_alloc(&parent->rphy->dev, phy_id);
/* FIXME: better error handling */
BUG_ON(sas_port_add(phy->port) != 0);
switch (phy->attached_dev_type) {
case SAS_EDGE_EXPANDER_DEVICE:
rphy = sas_expander_alloc(phy->port,
SAS_EDGE_EXPANDER_DEVICE);
break;
case SAS_FANOUT_EXPANDER_DEVICE:
rphy = sas_expander_alloc(phy->port,
SAS_FANOUT_EXPANDER_DEVICE);
break;
default:
rphy = NULL; /* shut gcc up */
BUG();
}
port = parent->port;
child->rphy = rphy;
get_device(&rphy->dev);
edev = rphy_to_expander_device(rphy);
child->dev_type = phy->attached_dev_type;
kref_get(&parent->kref);
child->parent = parent;
child->port = port;
child->iproto = phy->attached_iproto;
child->tproto = phy->attached_tproto;
memcpy(child->sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE);
sas_hash_addr(child->hashed_sas_addr, child->sas_addr);
sas_ex_get_linkrate(parent, child, phy);
edev->level = parent_ex->level + 1;
parent->port->disc.max_level = max(parent->port->disc.max_level,
edev->level);
sas_init_dev(child);
sas_fill_in_rphy(child, rphy);
sas_rphy_add(rphy);
spin_lock_irq(&parent->port->dev_list_lock);
list_add_tail(&child->dev_list_node, &parent->port->dev_list);
spin_unlock_irq(&parent->port->dev_list_lock);
res = sas_discover_expander(child);
if (res) {
sas_rphy_delete(rphy);
spin_lock_irq(&parent->port->dev_list_lock);
list_del(&child->dev_list_node);
spin_unlock_irq(&parent->port->dev_list_lock);
sas_put_device(child);
return NULL;
}
list_add_tail(&child->siblings, &parent->ex_dev.children);
return child;
}
| 1
|
485,882
|
sctp_disposition_t sctp_sf_violation(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
return SCTP_DISPOSITION_VIOLATION;
}
| 0
|
416,848
|
static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
struct net *net = sock_net(skb->sk);
struct rtnl_link *handlers;
int err = -EOPNOTSUPP;
rtnl_doit_func doit;
unsigned int flags;
int kind;
int family;
int type;
type = nlh->nlmsg_type;
if (type > RTM_MAX)
return -EOPNOTSUPP;
type -= RTM_BASE;
/* All the messages must have at least 1 byte length */
if (nlmsg_len(nlh) < sizeof(struct rtgenmsg))
return 0;
family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
kind = type&3;
if (kind != 2 && !netlink_net_capable(skb, CAP_NET_ADMIN))
return -EPERM;
if (family >= ARRAY_SIZE(rtnl_msg_handlers))
family = PF_UNSPEC;
rcu_read_lock();
handlers = rcu_dereference(rtnl_msg_handlers[family]);
if (!handlers) {
family = PF_UNSPEC;
handlers = rcu_dereference(rtnl_msg_handlers[family]);
}
if (kind == 2 && nlh->nlmsg_flags&NLM_F_DUMP) {
struct sock *rtnl;
rtnl_dumpit_func dumpit;
u16 min_dump_alloc = 0;
dumpit = READ_ONCE(handlers[type].dumpit);
if (!dumpit) {
family = PF_UNSPEC;
handlers = rcu_dereference(rtnl_msg_handlers[PF_UNSPEC]);
if (!handlers)
goto err_unlock;
dumpit = READ_ONCE(handlers[type].dumpit);
if (!dumpit)
goto err_unlock;
}
refcount_inc(&rtnl_msg_handlers_ref[family]);
if (type == RTM_GETLINK - RTM_BASE)
min_dump_alloc = rtnl_calcit(skb, nlh);
rcu_read_unlock();
rtnl = net->rtnl;
{
struct netlink_dump_control c = {
.dump = dumpit,
.min_dump_alloc = min_dump_alloc,
};
err = netlink_dump_start(rtnl, skb, nlh, &c);
}
refcount_dec(&rtnl_msg_handlers_ref[family]);
return err;
}
doit = READ_ONCE(handlers[type].doit);
if (!doit) {
family = PF_UNSPEC;
handlers = rcu_dereference(rtnl_msg_handlers[family]);
}
flags = READ_ONCE(handlers[type].flags);
if (flags & RTNL_FLAG_DOIT_UNLOCKED) {
refcount_inc(&rtnl_msg_handlers_ref[family]);
doit = READ_ONCE(handlers[type].doit);
rcu_read_unlock();
if (doit)
err = doit(skb, nlh, extack);
refcount_dec(&rtnl_msg_handlers_ref[family]);
return err;
}
rcu_read_unlock();
rtnl_lock();
handlers = rtnl_dereference(rtnl_msg_handlers[family]);
if (handlers) {
doit = READ_ONCE(handlers[type].doit);
if (doit)
err = doit(skb, nlh, extack);
}
rtnl_unlock();
return err;
err_unlock:
rcu_read_unlock();
return -EOPNOTSUPP;
}
| 0
|
411,433
|
}
static double nan() {
#ifdef NAN
return (double)NAN;
#else
const double val_nan = -std::sqrt(-1.0); return val_nan;
#endif
| 0
|
332,697
|
void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
const ARMCPRegInfo *r, void *opaque)
{
/* Define implementations of coprocessor registers.
* We store these in a hashtable because typically
* there are less than 150 registers in a space which
* is 16*16*16*8*8 = 262144 in size.
* Wildcarding is supported for the crm, opc1 and opc2 fields.
* If a register is defined twice then the second definition is
* used, so this can be used to define some generic registers and
* then override them with implementation specific variations.
* At least one of the original and the second definition should
* include ARM_CP_OVERRIDE in its type bits -- this is just a guard
* against accidental use.
*/
int crm, opc1, opc2;
int crmmin = (r->crm == CP_ANY) ? 0 : r->crm;
int crmmax = (r->crm == CP_ANY) ? 15 : r->crm;
int opc1min = (r->opc1 == CP_ANY) ? 0 : r->opc1;
int opc1max = (r->opc1 == CP_ANY) ? 7 : r->opc1;
int opc2min = (r->opc2 == CP_ANY) ? 0 : r->opc2;
int opc2max = (r->opc2 == CP_ANY) ? 7 : r->opc2;
/* 64 bit registers have only CRm and Opc1 fields */
assert(!((r->type & ARM_CP_64BIT) && (r->opc2 || r->crn)));
/* Check that the register definition has enough info to handle
* reads and writes if they are permitted.
*/
if (!(r->type & (ARM_CP_SPECIAL|ARM_CP_CONST))) {
if (r->access & PL3_R) {
assert(r->fieldoffset || r->readfn);
}
if (r->access & PL3_W) {
assert(r->fieldoffset || r->writefn);
}
}
/* Bad type field probably means missing sentinel at end of reg list */
assert(cptype_valid(r->type));
for (crm = crmmin; crm <= crmmax; crm++) {
for (opc1 = opc1min; opc1 <= opc1max; opc1++) {
for (opc2 = opc2min; opc2 <= opc2max; opc2++) {
uint32_t *key = g_new(uint32_t, 1);
ARMCPRegInfo *r2 = g_memdup(r, sizeof(ARMCPRegInfo));
int is64 = (r->type & ARM_CP_64BIT) ? 1 : 0;
*key = ENCODE_CP_REG(r->cp, is64, r->crn, crm, opc1, opc2);
if (opaque) {
r2->opaque = opaque;
}
/* Make sure reginfo passed to helpers for wildcarded regs
* has the correct crm/opc1/opc2 for this reg, not CP_ANY:
*/
r2->crm = crm;
r2->opc1 = opc1;
r2->opc2 = opc2;
/* By convention, for wildcarded registers only the first
* entry is used for migration; the others are marked as
* NO_MIGRATE so we don't try to transfer the register
* multiple times. Special registers (ie NOP/WFI) are
* never migratable.
*/
if ((r->type & ARM_CP_SPECIAL) ||
((r->crm == CP_ANY) && crm != 0) ||
((r->opc1 == CP_ANY) && opc1 != 0) ||
((r->opc2 == CP_ANY) && opc2 != 0)) {
r2->type |= ARM_CP_NO_MIGRATE;
}
/* Overriding of an existing definition must be explicitly
* requested.
*/
if (!(r->type & ARM_CP_OVERRIDE)) {
ARMCPRegInfo *oldreg;
oldreg = g_hash_table_lookup(cpu->cp_regs, key);
if (oldreg && !(oldreg->type & ARM_CP_OVERRIDE)) {
fprintf(stderr, "Register redefined: cp=%d %d bit "
"crn=%d crm=%d opc1=%d opc2=%d, "
"was %s, now %s\n", r2->cp, 32 + 32 * is64,
r2->crn, r2->crm, r2->opc1, r2->opc2,
oldreg->name, r2->name);
g_assert_not_reached();
}
}
g_hash_table_insert(cpu->cp_regs, key, r2);
}
}
}
}
| 0
|
265,053
|
connection_ap_handshake_send_resolve(entry_connection_t *ap_conn)
{
int payload_len, command;
const char *string_addr;
char inaddr_buf[REVERSE_LOOKUP_NAME_BUF_LEN];
origin_circuit_t *circ;
edge_connection_t *edge_conn = ENTRY_TO_EDGE_CONN(ap_conn);
connection_t *base_conn = TO_CONN(edge_conn);
tor_assert(edge_conn->on_circuit);
circ = TO_ORIGIN_CIRCUIT(edge_conn->on_circuit);
tor_assert(base_conn->type == CONN_TYPE_AP);
tor_assert(base_conn->state == AP_CONN_STATE_CIRCUIT_WAIT);
tor_assert(ap_conn->socks_request);
tor_assert(circ->base_.purpose == CIRCUIT_PURPOSE_C_GENERAL);
command = ap_conn->socks_request->command;
tor_assert(SOCKS_COMMAND_IS_RESOLVE(command));
edge_conn->stream_id = get_unique_stream_id_by_circ(circ);
if (edge_conn->stream_id==0) {
/* XXXX+ Instead of closing this stream, we should make it get
* retried on another circuit. */
connection_mark_unattached_ap(ap_conn, END_STREAM_REASON_INTERNAL);
/* Mark this circuit "unusable for new streams". */
mark_circuit_unusable_for_new_conns(circ);
return -1;
}
if (command == SOCKS_COMMAND_RESOLVE) {
string_addr = ap_conn->socks_request->address;
payload_len = (int)strlen(string_addr)+1;
} else {
/* command == SOCKS_COMMAND_RESOLVE_PTR */
const char *a = ap_conn->socks_request->address;
tor_addr_t addr;
int r;
/* We're doing a reverse lookup. The input could be an IP address, or
* could be an .in-addr.arpa or .ip6.arpa address */
r = tor_addr_parse_PTR_name(&addr, a, AF_UNSPEC, 1);
if (r <= 0) {
log_warn(LD_APP, "Rejecting ill-formed reverse lookup of %s",
safe_str_client(a));
connection_mark_unattached_ap(ap_conn, END_STREAM_REASON_INTERNAL);
return -1;
}
r = tor_addr_to_PTR_name(inaddr_buf, sizeof(inaddr_buf), &addr);
if (r < 0) {
log_warn(LD_BUG, "Couldn't generate reverse lookup hostname of %s",
safe_str_client(a));
connection_mark_unattached_ap(ap_conn, END_STREAM_REASON_INTERNAL);
return -1;
}
string_addr = inaddr_buf;
payload_len = (int)strlen(inaddr_buf)+1;
tor_assert(payload_len <= (int)sizeof(inaddr_buf));
}
log_debug(LD_APP,
"Sending relay cell to begin stream %d.", edge_conn->stream_id);
if (connection_edge_send_command(edge_conn,
RELAY_COMMAND_RESOLVE,
string_addr, payload_len) < 0)
return -1; /* circuit is closed, don't continue */
if (!base_conn->address) {
/* This might be unnecessary. XXXX */
base_conn->address = tor_addr_to_str_dup(&base_conn->addr);
}
base_conn->state = AP_CONN_STATE_RESOLVE_WAIT;
log_info(LD_APP,"Address sent for resolve, ap socket "TOR_SOCKET_T_FORMAT
", n_circ_id %u",
base_conn->s, (unsigned)circ->base_.n_circ_id);
control_event_stream_status(ap_conn, STREAM_EVENT_SENT_RESOLVE, 0);
return 0;
}
| 0
|
111,301
|
static bool ptrace_freeze_traced(struct task_struct *task)
{
bool ret = false;
/* Lockless, nobody but us can set this flag */
if (task->jobctl & JOBCTL_LISTENING)
return ret;
spin_lock_irq(&task->sighand->siglock);
if (task_is_traced(task) && !__fatal_signal_pending(task)) {
task->state = __TASK_TRACED;
ret = true;
}
spin_unlock_irq(&task->sighand->siglock);
return ret;
}
| 0
|
217,244
|
void RenderViewHostImpl::JavaScriptDialogClosed(IPC::Message* reply_msg,
bool success,
const string16& user_input) {
GetProcess()->SetIgnoreInputEvents(false);
bool is_waiting =
is_waiting_for_beforeunload_ack_ || is_waiting_for_unload_ack_;
if (is_waiting) {
StartHangMonitorTimeout(TimeDelta::FromMilliseconds(
success ? kUnloadTimeoutMS : hung_renderer_delay_ms_));
}
ViewHostMsg_RunJavaScriptMessage::WriteReplyParams(reply_msg,
success, user_input);
Send(reply_msg);
if (is_waiting && are_javascript_messages_suppressed_)
delegate_->RendererUnresponsive(this, is_waiting);
}
| 0
|
380,121
|
PHP_FUNCTION(sqlite_fetch_column_types)
{
zval *zdb;
struct php_sqlite_db *db;
char *tbl, *sql;
int tbl_len;
char *errtext = NULL;
zval *object = getThis();
struct php_sqlite_result res;
const char **rowdata, **colnames, *tail;
int i, ncols;
long result_type = PHPSQLITE_ASSOC;
if (object) {
if (FAILURE == zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s|l", &tbl, &tbl_len, &result_type)) {
return;
}
DB_FROM_OBJECT(db, object);
} else {
if (FAILURE == zend_parse_parameters_ex(ZEND_PARSE_PARAMS_QUIET,
ZEND_NUM_ARGS() TSRMLS_CC, "sr|l", &tbl, &tbl_len, &zdb, &result_type) &&
FAILURE == zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rs|l", &zdb, &tbl, &tbl_len, &result_type)) {
return;
}
DB_FROM_ZVAL(db, &zdb);
}
if (!(sql = sqlite_mprintf("SELECT * FROM '%q' LIMIT 1", tbl))) {
RETURN_FALSE;
}
sqlite_exec(db->db, "PRAGMA show_datatypes = ON", NULL, NULL, NULL);
db->last_err_code = sqlite_compile(db->db, sql, &tail, &res.vm, &errtext);
sqlite_freemem(sql);
if (db->last_err_code != SQLITE_OK) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "%s", errtext);
sqlite_freemem(errtext);
RETVAL_FALSE;
goto done;
}
sqlite_step(res.vm, &ncols, &rowdata, &colnames);
array_init(return_value);
for (i = 0; i < ncols; i++) {
if (result_type == PHPSQLITE_ASSOC) {
char *colname = estrdup((char *)colnames[i]);
if (SQLITE_G(assoc_case) == 1) {
php_sqlite_strtoupper(colname);
} else if (SQLITE_G(assoc_case) == 2) {
php_sqlite_strtolower(colname);
}
add_assoc_string(return_value, colname, colnames[ncols + i] ? (char *)colnames[ncols + i] : "", 1);
efree(colname);
}
if (result_type == PHPSQLITE_NUM) {
add_index_string(return_value, i, colnames[ncols + i] ? (char *)colnames[ncols + i] : "", 1);
}
}
if (res.vm) {
sqlite_finalize(res.vm, NULL);
}
done:
sqlite_exec(db->db, "PRAGMA show_datatypes = OFF", NULL, NULL, NULL);
}
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.