idx
int64 | func
string | target
int64 |
|---|---|---|
231,882
|
void AwContents::RequestProtectedMediaIdentifierPermission(
const GURL& origin,
const base::Callback<void(bool)>& callback) {
permission_request_handler_->SendRequest(
scoped_ptr<AwPermissionRequestDelegate>(new SimplePermissionRequest(
origin, AwPermissionRequest::ProtectedMediaId, callback)));
}
| 0
|
513,579
|
uint16_t tls1_shared_group(SSL *s, int nmatch)
{
const uint16_t *pref, *supp;
size_t num_pref, num_supp, i;
int k;
/* Can't do anything on client side */
if (s->server == 0)
return 0;
if (nmatch == -2) {
if (tls1_suiteb(s)) {
/*
* For Suite B ciphersuite determines curve: we already know
* these are acceptable due to previous checks.
*/
unsigned long cid = s->s3.tmp.new_cipher->id;
if (cid == TLS1_CK_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256)
return TLSEXT_curve_P_256;
if (cid == TLS1_CK_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384)
return TLSEXT_curve_P_384;
/* Should never happen */
return 0;
}
/* If not Suite B just return first preference shared curve */
nmatch = 0;
}
/*
* If server preference set, our groups are the preference order
* otherwise peer decides.
*/
if (s->options & SSL_OP_CIPHER_SERVER_PREFERENCE) {
tls1_get_supported_groups(s, &pref, &num_pref);
tls1_get_peer_groups(s, &supp, &num_supp);
} else {
tls1_get_peer_groups(s, &pref, &num_pref);
tls1_get_supported_groups(s, &supp, &num_supp);
}
for (k = 0, i = 0; i < num_pref; i++) {
uint16_t id = pref[i];
if (!tls1_in_list(id, supp, num_supp)
|| !tls_group_allowed(s, id, SSL_SECOP_CURVE_SHARED))
continue;
if (nmatch == k)
return id;
k++;
}
if (nmatch == -1)
return k;
/* Out of range (nmatch > k). */
return 0;
}
| 0
|
138,150
|
static void icmp_timestamp(struct sk_buff *skb)
{
struct timespec tv;
struct icmp_bxm icmp_param;
/*
* Too short.
*/
if (skb->len < 4)
goto out_err;
/*
* Fill in the current time as ms since midnight UT:
*/
getnstimeofday(&tv);
icmp_param.data.times[1] = htonl((tv.tv_sec % 86400) * MSEC_PER_SEC +
tv.tv_nsec / NSEC_PER_MSEC);
icmp_param.data.times[2] = icmp_param.data.times[1];
if (skb_copy_bits(skb, 0, &icmp_param.data.times[0], 4))
BUG();
icmp_param.data.icmph = *icmp_hdr(skb);
icmp_param.data.icmph.type = ICMP_TIMESTAMPREPLY;
icmp_param.data.icmph.code = 0;
icmp_param.skb = skb;
icmp_param.offset = 0;
icmp_param.data_len = 0;
icmp_param.head_len = sizeof(struct icmphdr) + 12;
icmp_reply(&icmp_param, skb);
out:
return;
out_err:
ICMP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), ICMP_MIB_INERRORS);
goto out;
}
| 0
|
152,412
|
get_scriptlocal_funcname(char_u *funcname)
{
char sid_buf[25];
int off;
char_u *newname;
char_u *p = funcname;
if (funcname == NULL)
return NULL;
if (STRNCMP(funcname, "s:", 2) != 0
&& STRNCMP(funcname, "<SID>", 5) != 0)
{
ufunc_T *ufunc;
// The function name does not have a script-local prefix. Try finding
// it when in a Vim9 script and there is no "g:" prefix.
if (!in_vim9script() || STRNCMP(funcname, "g:", 2) == 0)
return NULL;
ufunc = find_func(funcname, FALSE);
if (ufunc == NULL || func_is_global(ufunc)
|| (p = vim_strchr(ufunc->uf_name, '_')) == NULL)
return NULL;
++p;
off = 0;
}
else
off = *funcname == 's' ? 2 : 5;
if (!SCRIPT_ID_VALID(current_sctx.sc_sid))
{
emsg(_(e_using_sid_not_in_script_context));
return NULL;
}
// Expand s: prefix into <SNR>nr_<name>
vim_snprintf(sid_buf, sizeof(sid_buf), "<SNR>%ld_",
(long)current_sctx.sc_sid);
newname = alloc(STRLEN(sid_buf) + STRLEN(p + off) + 1);
if (newname == NULL)
return NULL;
STRCPY(newname, sid_buf);
STRCAT(newname, p + off);
return newname;
}
| 0
|
84,374
|
void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size,
struct pt_regs *regs, struct hlist_head *head, int rctx,
struct task_struct *task)
{
struct perf_sample_data data;
struct perf_event *event;
struct perf_raw_record raw = {
.frag = {
.size = entry_size,
.data = record,
},
};
perf_sample_data_init(&data, 0, 0);
data.raw = &raw;
perf_trace_buf_update(record, event_type);
hlist_for_each_entry_rcu(event, head, hlist_entry) {
if (perf_tp_event_match(event, &data, regs))
perf_swevent_event(event, count, &data, regs);
}
/*
* If we got specified a target task, also iterate its context and
* deliver this event there too.
*/
if (task && task != current) {
struct perf_event_context *ctx;
struct trace_entry *entry = record;
rcu_read_lock();
ctx = rcu_dereference(task->perf_event_ctxp[perf_sw_context]);
if (!ctx)
goto unlock;
list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
if (event->attr.type != PERF_TYPE_TRACEPOINT)
continue;
if (event->attr.config != entry->type)
continue;
if (perf_tp_event_match(event, &data, regs))
perf_swevent_event(event, count, &data, regs);
}
unlock:
rcu_read_unlock();
}
perf_swevent_put_recursion_context(rctx);
}
| 0
|
367,882
|
static int cap_task_setrlimit(unsigned int resource, struct rlimit *new_rlim)
{
return 0;
}
| 0
|
273,280
|
static int set_bit_to_user(int nr, void __user *addr)
{
unsigned long log = (unsigned long)addr;
struct page *page;
void *base;
int bit = nr + (log % PAGE_SIZE) * 8;
int r;
r = get_user_pages_fast(log, 1, 1, &page);
if (r < 0)
return r;
BUG_ON(r != 1);
base = kmap_atomic(page);
set_bit(bit, base);
kunmap_atomic(base);
set_page_dirty_lock(page);
put_page(page);
return 0;
}
| 0
|
13,519
|
const BlockEntry* Segment::GetBlock(const CuePoint& cp,
const CuePoint::TrackPosition& tp) {
Cluster** const ii = m_clusters;
Cluster** i = ii;
const long count = m_clusterCount + m_clusterPreloadCount;
Cluster** const jj = ii + count;
Cluster** j = jj;
while (i < j) {
Cluster** const k = i + (j - i) / 2;
assert(k < jj);
Cluster* const pCluster = *k;
assert(pCluster);
const long long pos = pCluster->GetPosition();
assert(pos >= 0);
if (pos < tp.m_pos)
i = k + 1;
else if (pos > tp.m_pos)
j = k;
else
return pCluster->GetEntry(cp, tp);
}
assert(i == j);
Cluster* const pCluster = Cluster::Create(this, -1, tp.m_pos); //, -1);
assert(pCluster);
const ptrdiff_t idx = i - m_clusters;
PreloadCluster(pCluster, idx);
assert(m_clusters);
assert(m_clusterPreloadCount > 0);
assert(m_clusters[idx] == pCluster);
return pCluster->GetEntry(cp, tp);
}
| 1
|
440,071
|
MagickExport void XHighlightLine(Display *display,Window window,
GC annotate_context,const XSegment *highlight_info)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(display != (Display *) NULL);
assert(window != (Window) NULL);
assert(annotate_context != (GC) NULL);
assert(highlight_info != (XSegment *) NULL);
(void) XDrawLine(display,window,annotate_context,highlight_info->x1,
highlight_info->y1,highlight_info->x2,highlight_info->y2);
}
| 0
|
93,544
|
static void arcmsr_hbaA_message_isr(struct AdapterControlBlock *acb)
{
struct MessageUnit_A __iomem *reg = acb->pmuA;
/*clear interrupt and message state*/
writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT, ®->outbound_intstatus);
schedule_work(&acb->arcmsr_do_message_isr_bh);
}
| 0
|
66,270
|
static int nfs_size_need_update(const struct inode *inode, const struct nfs_fattr *fattr)
{
return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
}
| 0
|
233,647
|
void GLManager::Initialize(const GLManager::Options& options) {
GpuDriverBugWorkarounds platform_workarounds(
g_gpu_feature_info.enabled_gpu_driver_bug_workarounds);
InitializeWithWorkaroundsImpl(options, platform_workarounds);
}
| 0
|
24,106
|
static void set_pseudo_header_frame4 ( union wtap_pseudo_header * pseudo_header , struct frame4_rec * frame4 ) {
guint32 StatusWord ;
guint8 aal_type , hl_type ;
guint16 vpi , vci ;
pseudo_header -> atm . flags = 0 ;
StatusWord = pletoh32 ( & frame4 -> atm_info . StatusWord ) ;
if ( StatusWord & SW_RAW_CELL ) pseudo_header -> atm . flags |= ATM_RAW_CELL ;
aal_type = frame4 -> atm_info . AppTrafType & ATT_AALTYPE ;
hl_type = frame4 -> atm_info . AppTrafType & ATT_HLTYPE ;
vpi = pletoh16 ( & frame4 -> atm_info . Vpi ) ;
vci = pletoh16 ( & frame4 -> atm_info . Vci ) ;
switch ( aal_type ) {
case ATT_AAL_UNKNOWN : if ( vpi == 0 && vci == 5 ) pseudo_header -> atm . aal = AAL_SIGNALLING ;
else pseudo_header -> atm . aal = AAL_UNKNOWN ;
pseudo_header -> atm . type = TRAF_UNKNOWN ;
pseudo_header -> atm . subtype = TRAF_ST_UNKNOWN ;
break ;
case ATT_AAL1 : pseudo_header -> atm . aal = AAL_1 ;
pseudo_header -> atm . type = TRAF_UNKNOWN ;
pseudo_header -> atm . subtype = TRAF_ST_UNKNOWN ;
break ;
case ATT_AAL3_4 : pseudo_header -> atm . aal = AAL_3_4 ;
pseudo_header -> atm . type = TRAF_UNKNOWN ;
pseudo_header -> atm . subtype = TRAF_ST_UNKNOWN ;
break ;
case ATT_AAL5 : pseudo_header -> atm . aal = AAL_5 ;
switch ( hl_type ) {
case ATT_HL_UNKNOWN : pseudo_header -> atm . type = TRAF_UNKNOWN ;
pseudo_header -> atm . subtype = TRAF_ST_UNKNOWN ;
break ;
case ATT_HL_LLCMX : pseudo_header -> atm . type = TRAF_LLCMX ;
pseudo_header -> atm . subtype = TRAF_ST_UNKNOWN ;
break ;
case ATT_HL_VCMX : pseudo_header -> atm . type = TRAF_VCMX ;
switch ( frame4 -> atm_info . AppHLType ) {
case AHLT_UNKNOWN : pseudo_header -> atm . subtype = TRAF_ST_UNKNOWN ;
break ;
case AHLT_VCMX_802_3_FCS : pseudo_header -> atm . subtype = TRAF_ST_VCMX_802_3_FCS ;
break ;
case AHLT_VCMX_802_4_FCS : pseudo_header -> atm . subtype = TRAF_ST_VCMX_802_4_FCS ;
break ;
case AHLT_VCMX_802_5_FCS : pseudo_header -> atm . subtype = TRAF_ST_VCMX_802_5_FCS ;
break ;
case AHLT_VCMX_FDDI_FCS : pseudo_header -> atm . subtype = TRAF_ST_VCMX_FDDI_FCS ;
break ;
case AHLT_VCMX_802_6_FCS : pseudo_header -> atm . subtype = TRAF_ST_VCMX_802_6_FCS ;
break ;
case AHLT_VCMX_802_3 : pseudo_header -> atm . subtype = TRAF_ST_VCMX_802_3 ;
break ;
case AHLT_VCMX_802_4 : pseudo_header -> atm . subtype = TRAF_ST_VCMX_802_4 ;
break ;
case AHLT_VCMX_802_5 : pseudo_header -> atm . subtype = TRAF_ST_VCMX_802_5 ;
break ;
case AHLT_VCMX_FDDI : pseudo_header -> atm . subtype = TRAF_ST_VCMX_FDDI ;
break ;
case AHLT_VCMX_802_6 : pseudo_header -> atm . subtype = TRAF_ST_VCMX_802_6 ;
break ;
case AHLT_VCMX_FRAGMENTS : pseudo_header -> atm . subtype = TRAF_ST_VCMX_FRAGMENTS ;
break ;
case AHLT_VCMX_BPDU : pseudo_header -> atm . subtype = TRAF_ST_VCMX_BPDU ;
break ;
default : pseudo_header -> atm . subtype = TRAF_ST_UNKNOWN ;
break ;
}
break ;
case ATT_HL_LANE : pseudo_header -> atm . type = TRAF_LANE ;
switch ( frame4 -> atm_info . AppHLType ) {
case AHLT_UNKNOWN : pseudo_header -> atm . subtype = TRAF_ST_UNKNOWN ;
break ;
case AHLT_LANE_LE_CTRL : pseudo_header -> atm . subtype = TRAF_ST_LANE_LE_CTRL ;
break ;
case AHLT_LANE_802_3 : pseudo_header -> atm . subtype = TRAF_ST_LANE_802_3 ;
break ;
case AHLT_LANE_802_5 : pseudo_header -> atm . subtype = TRAF_ST_LANE_802_5 ;
break ;
case AHLT_LANE_802_3_MC : pseudo_header -> atm . subtype = TRAF_ST_LANE_802_3_MC ;
break ;
case AHLT_LANE_802_5_MC : pseudo_header -> atm . subtype = TRAF_ST_LANE_802_5_MC ;
break ;
default : pseudo_header -> atm . subtype = TRAF_ST_UNKNOWN ;
break ;
}
break ;
case ATT_HL_ILMI : pseudo_header -> atm . type = TRAF_ILMI ;
pseudo_header -> atm . subtype = TRAF_ST_UNKNOWN ;
break ;
case ATT_HL_FRMR : pseudo_header -> atm . type = TRAF_FR ;
pseudo_header -> atm . subtype = TRAF_ST_UNKNOWN ;
break ;
case ATT_HL_SPANS : pseudo_header -> atm . type = TRAF_SPANS ;
pseudo_header -> atm . subtype = TRAF_ST_UNKNOWN ;
break ;
case ATT_HL_IPSILON : pseudo_header -> atm . type = TRAF_IPSILON ;
switch ( frame4 -> atm_info . AppHLType ) {
case AHLT_UNKNOWN : pseudo_header -> atm . subtype = TRAF_ST_UNKNOWN ;
break ;
case AHLT_IPSILON_FT0 : pseudo_header -> atm . subtype = TRAF_ST_IPSILON_FT0 ;
break ;
case AHLT_IPSILON_FT1 : pseudo_header -> atm . subtype = TRAF_ST_IPSILON_FT1 ;
break ;
case AHLT_IPSILON_FT2 : pseudo_header -> atm . subtype = TRAF_ST_IPSILON_FT2 ;
break ;
default : pseudo_header -> atm . subtype = TRAF_ST_UNKNOWN ;
break ;
}
break ;
default : pseudo_header -> atm . type = TRAF_UNKNOWN ;
pseudo_header -> atm . subtype = TRAF_ST_UNKNOWN ;
break ;
}
break ;
case ATT_AAL_USER : pseudo_header -> atm . aal = AAL_USER ;
pseudo_header -> atm . type = TRAF_UNKNOWN ;
pseudo_header -> atm . subtype = TRAF_ST_UNKNOWN ;
break ;
case ATT_AAL_SIGNALLING : pseudo_header -> atm . aal = AAL_SIGNALLING ;
pseudo_header -> atm . type = TRAF_UNKNOWN ;
pseudo_header -> atm . subtype = TRAF_ST_UNKNOWN ;
break ;
case ATT_OAMCELL : pseudo_header -> atm . aal = AAL_OAMCELL ;
pseudo_header -> atm . type = TRAF_UNKNOWN ;
pseudo_header -> atm . subtype = TRAF_ST_UNKNOWN ;
break ;
default : pseudo_header -> atm . aal = AAL_UNKNOWN ;
pseudo_header -> atm . type = TRAF_UNKNOWN ;
pseudo_header -> atm . subtype = TRAF_ST_UNKNOWN ;
break ;
}
pseudo_header -> atm . vpi = vpi ;
pseudo_header -> atm . vci = vci ;
pseudo_header -> atm . channel = pletoh16 ( & frame4 -> atm_info . channel ) ;
pseudo_header -> atm . cells = pletoh16 ( & frame4 -> atm_info . cells ) ;
pseudo_header -> atm . aal5t_u2u = pletoh16 ( & frame4 -> atm_info . Trailer . aal5t_u2u ) ;
pseudo_header -> atm . aal5t_len = pletoh16 ( & frame4 -> atm_info . Trailer . aal5t_len ) ;
pseudo_header -> atm . aal5t_chksum = pntoh32 ( & frame4 -> atm_info . Trailer . aal5t_chksum ) ;
}
| 0
|
521,415
|
bool Item_subselect::exec()
{
subselect_engine *org_engine= engine;
DBUG_ENTER("Item_subselect::exec");
DBUG_ASSERT(fixed);
DBUG_ASSERT(!eliminated);
/*
Do not execute subselect in case of a fatal error
or if the query has been killed.
*/
if (thd->is_error() || thd->killed)
DBUG_RETURN(true);
DBUG_ASSERT(!thd->lex->context_analysis_only);
/*
Simulate a failure in sub-query execution. Used to test e.g.
out of memory or query being killed conditions.
*/
DBUG_EXECUTE_IF("subselect_exec_fail", DBUG_RETURN(true););
bool res= engine->exec();
#ifndef DBUG_OFF
++exec_counter;
#endif
if (engine != org_engine)
{
/*
If the subquery engine changed during execution due to lazy subquery
optimization, or because the original engine found a more efficient other
engine, re-execute the subquery with the new engine.
*/
DBUG_RETURN(exec());
}
DBUG_RETURN(res);
}
| 0
|
354,087
|
onscreen_mvcur(NCURSES_SP_DCLx
int yold, int xold,
int ynew, int xnew, int ovw,
NCURSES_SP_OUTC myOutCh)
/* onscreen move from (yold, xold) to (ynew, xnew) */
{
string_desc result;
char buffer[OPT_SIZE];
int tactic = 0, newcost, usecost = INFINITY;
int t5_cr_cost;
#if defined(MAIN) || defined(NCURSES_TEST)
struct timeval before, after;
gettimeofday(&before, NULL);
#endif /* MAIN */
#define NullResult _nc_str_null(&result, sizeof(buffer))
#define InitResult _nc_str_init(&result, buffer, sizeof(buffer))
/* tactic #0: use direct cursor addressing */
if (_nc_safe_strcpy(InitResult, TPARM_2(SP_PARM->_address_cursor, ynew, xnew))) {
tactic = 0;
usecost = SP_PARM->_cup_cost;
#if defined(TRACE) || defined(NCURSES_TEST)
if (!(_nc_optimize_enable & OPTIMIZE_MVCUR))
goto nonlocal;
#endif /* TRACE */
/*
* We may be able to tell in advance that the full optimization
* will probably not be worth its overhead. Also, don't try to
* use local movement if the current attribute is anything but
* A_NORMAL...there are just too many ways this can screw up
* (like, say, local-movement \n getting mapped to some obscure
* character because A_ALTCHARSET is on).
*/
if (yold == -1 || xold == -1 || NOT_LOCAL(SP_PARM, yold, xold, ynew, xnew)) {
#if defined(MAIN) || defined(NCURSES_TEST)
if (!profiling) {
(void) fputs("nonlocal\n", stderr);
goto nonlocal; /* always run the optimizer if profiling */
}
#else
goto nonlocal;
#endif /* MAIN */
}
}
#ifndef NO_OPTIMIZE
/* tactic #1: use local movement */
if (yold != -1 && xold != -1
&& ((newcost = relative_move(NCURSES_SP_ARGx
NullResult,
yold, xold,
ynew, xnew, ovw)) != INFINITY)
&& newcost < usecost) {
tactic = 1;
usecost = newcost;
}
/* tactic #2: use carriage-return + local movement */
if (yold != -1 && carriage_return
&& ((newcost = relative_move(NCURSES_SP_ARGx
NullResult,
yold, 0,
ynew, xnew, ovw)) != INFINITY)
&& SP_PARM->_cr_cost + newcost < usecost) {
tactic = 2;
usecost = SP_PARM->_cr_cost + newcost;
}
/* tactic #3: use home-cursor + local movement */
if (cursor_home
&& ((newcost = relative_move(NCURSES_SP_ARGx
NullResult,
0, 0,
ynew, xnew, ovw)) != INFINITY)
&& SP_PARM->_home_cost + newcost < usecost) {
tactic = 3;
usecost = SP_PARM->_home_cost + newcost;
}
/* tactic #4: use home-down + local movement */
if (cursor_to_ll
&& ((newcost = relative_move(NCURSES_SP_ARGx
NullResult,
screen_lines(SP_PARM) - 1, 0,
ynew, xnew, ovw)) != INFINITY)
&& SP_PARM->_ll_cost + newcost < usecost) {
tactic = 4;
usecost = SP_PARM->_ll_cost + newcost;
}
/*
* tactic #5: use left margin for wrap to right-hand side,
* unless strange wrap behavior indicated by xenl might hose us.
*/
t5_cr_cost = (xold > 0 ? SP_PARM->_cr_cost : 0);
if (auto_left_margin && !eat_newline_glitch
&& yold > 0 && cursor_left
&& ((newcost = relative_move(NCURSES_SP_ARGx
NullResult,
yold - 1, screen_columns(SP_PARM) - 1,
ynew, xnew, ovw)) != INFINITY)
&& t5_cr_cost + SP_PARM->_cub1_cost + newcost < usecost) {
tactic = 5;
usecost = t5_cr_cost + SP_PARM->_cub1_cost + newcost;
}
/*
* These cases are ordered by estimated relative frequency.
*/
if (tactic)
InitResult;
switch (tactic) {
case 1:
(void) relative_move(NCURSES_SP_ARGx
&result,
yold, xold,
ynew, xnew, ovw);
break;
case 2:
(void) _nc_safe_strcpy(&result, carriage_return);
(void) relative_move(NCURSES_SP_ARGx
&result,
yold, 0,
ynew, xnew, ovw);
break;
case 3:
(void) _nc_safe_strcpy(&result, cursor_home);
(void) relative_move(NCURSES_SP_ARGx
&result, 0, 0,
ynew, xnew, ovw);
break;
case 4:
(void) _nc_safe_strcpy(&result, cursor_to_ll);
(void) relative_move(NCURSES_SP_ARGx
&result,
screen_lines(SP_PARM) - 1, 0,
ynew, xnew, ovw);
break;
case 5:
if (xold > 0)
(void) _nc_safe_strcat(&result, carriage_return);
(void) _nc_safe_strcat(&result, cursor_left);
(void) relative_move(NCURSES_SP_ARGx
&result,
yold - 1, screen_columns(SP_PARM) - 1,
ynew, xnew, ovw);
break;
}
#endif /* !NO_OPTIMIZE */
nonlocal:
#if defined(MAIN) || defined(NCURSES_TEST)
gettimeofday(&after, NULL);
diff = after.tv_usec - before.tv_usec
+ (after.tv_sec - before.tv_sec) * 1000000;
if (!profiling)
(void) fprintf(stderr,
"onscreen: %d microsec, %f 28.8Kbps char-equivalents\n",
(int) diff, diff / 288);
#endif /* MAIN */
if (usecost != INFINITY) {
TR(TRACE_MOVE, ("mvcur tactic %d", tactic));
TPUTS_TRACE("mvcur");
NCURSES_SP_NAME(tputs) (NCURSES_SP_ARGx
buffer, 1, myOutCh);
SP_PARM->_cursrow = ynew;
SP_PARM->_curscol = xnew;
return (OK);
} else
return (ERR);
}
| 1
|
283,830
|
void ResourceDispatcherHostImpl::CancelBlockedRequestsForRoute(
const GlobalFrameRoutingId& global_routing_id) {
ProcessBlockedRequestsForRoute(global_routing_id, true);
}
| 0
|
228,048
|
DownloadRequestLimiter::TabDownloadState::~TabDownloadState() {
DCHECK(callbacks_.empty());
DCHECK(!factory_.HasWeakPtrs());
}
| 0
|
188,419
|
bool FileSystemPolicy::SetInformationFileAction(
EvalResult eval_result, const ClientInfo& client_info,
HANDLE target_file_handle, void* file_info, uint32 length,
uint32 info_class, IO_STATUS_BLOCK* io_block,
NTSTATUS* nt_status) {
if (ASK_BROKER != eval_result) {
*nt_status = STATUS_ACCESS_DENIED;
return true;
}
NtSetInformationFileFunction NtSetInformationFile = NULL;
ResolveNTFunctionPtr("NtSetInformationFile", &NtSetInformationFile);
HANDLE local_handle = NULL;
if (!::DuplicateHandle(client_info.process, target_file_handle,
::GetCurrentProcess(), &local_handle, 0, FALSE,
DUPLICATE_SAME_ACCESS)) {
*nt_status = STATUS_ACCESS_DENIED;
return true;
}
ScopedHandle handle(local_handle);
FILE_INFORMATION_CLASS file_info_class =
static_cast<FILE_INFORMATION_CLASS>(info_class);
*nt_status = NtSetInformationFile(local_handle, io_block, file_info, length,
file_info_class);
return true;
}
| 0
|
291,530
|
size_t ndp_msg_opt_slladdr_len(struct ndp_msg *msg, int offset)
{
return ETH_ALEN;
}
| 0
|
203,806
|
bool SendReloadJSONRequest(
AutomationMessageSender* sender,
int browser_index,
int tab_index,
std::string* error_msg) {
DictionaryValue dict;
dict.SetString("command", "Reload");
dict.SetInteger("windex", browser_index);
dict.SetInteger("tab_index", tab_index);
DictionaryValue reply_dict;
return SendAutomationJSONRequest(sender, dict, &reply_dict, error_msg);
}
| 0
|
387,943
|
png_read_filter_row_paeth_multibyte_pixel(png_row_infop row_info, png_bytep row,
png_const_bytep prev_row)
{
int bpp = (row_info->pixel_depth + 7) >> 3;
png_bytep rp_end = row + bpp;
/* Process the first pixel in the row completely (this is the same as 'up'
* because there is only one candidate predictor for the first row).
*/
while (row < rp_end)
{
int a = *row + *prev_row++;
*row++ = (png_byte)a;
}
/* Remainder */
rp_end += row_info->rowbytes - bpp;
while (row < rp_end)
{
int a, b, c, pa, pb, pc, p;
c = *(prev_row - bpp);
a = *(row - bpp);
b = *prev_row++;
p = b - c;
pc = a - c;
#ifdef PNG_USE_ABS
pa = abs(p);
pb = abs(pc);
pc = abs(p + pc);
#else
pa = p < 0 ? -p : p;
pb = pc < 0 ? -pc : pc;
pc = (p + pc) < 0 ? -(p + pc) : p + pc;
#endif
if (pb < pa) pa = pb, a = b;
if (pc < pa) a = c;
a += *row;
*row++ = (png_byte)a;
}
}
| 0
|
116,296
|
static void swoole_serialize_object(seriaString *buffer, zval *obj, size_t start)
{
zend_string *name = Z_OBJCE_P(obj)->name;
if (GC_IS_RECURSIVE(Z_OBJPROP_P(obj)))
{
zend_throw_exception_ex(NULL, 0, "the object %s has cycle ref.", name->val);
return;
}
if (name->len > 0xffff)
{//so long?
zend_throw_exception_ex(NULL, 0, "the object name is too long.");
}
else
{
SERIA_SET_ENTRY_SHORT(buffer, name->len);
swoole_string_cpy(buffer, name->val, name->len);
}
zend_class_entry *ce = Z_OBJ_P(obj)->ce;
if (ce && zend_hash_exists(&ce->function_table, Z_STR(swSeriaG.sleep_fname)))
{
zval retval;
if (call_user_function_ex(NULL, obj, &swSeriaG.sleep_fname, &retval, 0, 0, 1, NULL) == SUCCESS)
{
if (EG(exception))
{
zval_dtor(&retval);
return;
}
if (Z_TYPE(retval) == IS_ARRAY)
{
zend_string *prop_key;
zval *prop_value, *sleep_value;
const char *prop_name, *class_name;
size_t prop_key_len;
int got_num = 0;
//for the zero malloc
zend_array tmp_arr;
zend_array *ht = (zend_array *) & tmp_arr;
#if PHP_VERSION_ID >= 70300
_zend_hash_init(ht, zend_hash_num_elements(Z_ARRVAL(retval)), ZVAL_PTR_DTOR, 0);
#else
_zend_hash_init(ht, zend_hash_num_elements(Z_ARRVAL(retval)), ZVAL_PTR_DTOR, 0 ZEND_FILE_LINE_CC);
#endif
ht->nTableMask = -(ht)->nTableSize;
ALLOCA_FLAG(use_heap);
void *ht_addr = do_alloca(HT_SIZE(ht), use_heap);
HT_SET_DATA_ADDR(ht, ht_addr);
ht->u.flags |= HASH_FLAG_INITIALIZED;
HT_HASH_RESET(ht);
//just clean property do not add null when does not exist
//we double for each, cause we do not malloc and release it
ZEND_HASH_FOREACH_STR_KEY_VAL(Z_OBJPROP_P(obj), prop_key, prop_value)
{
//get origin property name
zend_unmangle_property_name_ex(prop_key, &class_name, &prop_name, &prop_key_len);
ZEND_HASH_FOREACH_VAL(Z_ARRVAL(retval), sleep_value)
{
if (Z_TYPE_P(sleep_value) == IS_STRING &&
Z_STRLEN_P(sleep_value) == prop_key_len &&
memcmp(Z_STRVAL_P(sleep_value), prop_name, prop_key_len) == 0)
{
got_num++;
//add mangle key,unmangle in unseria
_zend_hash_add_or_update(ht, prop_key, prop_value, HASH_UPDATE ZEND_FILE_LINE_CC);
break;
}
}
ZEND_HASH_FOREACH_END();
}
ZEND_HASH_FOREACH_END();
//there some member not in property
if (zend_hash_num_elements(Z_ARRVAL(retval)) > got_num)
{
php_error_docref(NULL TSRMLS_CC, E_NOTICE, "__sleep() retrun a member but does not exist in property");
}
seria_array_type(ht, buffer, start, buffer->offset);
swoole_serialize_arr(buffer, ht);
ZSTR_ALLOCA_FREE(ht_addr, use_heap);
zval_dtor(&retval);
return;
}
else
{
php_error_docref(NULL TSRMLS_CC, E_NOTICE, " __sleep should return an array only containing the "
"names of instance-variables to serialize");
zval_dtor(&retval);
}
}
}
seria_array_type(Z_OBJPROP_P(obj), buffer, start, buffer->offset);
swoole_serialize_arr(buffer, Z_OBJPROP_P(obj));
// printf("hash2 %u\n",ce->properties_info.arData[0].key->h);
}
| 0
|
48,358
|
void nl80211_send_roamed(struct cfg80211_registered_device *rdev,
struct net_device *netdev,
struct cfg80211_roam_info *info, gfp_t gfp)
{
struct sk_buff *msg;
void *hdr;
const u8 *bssid = info->bss ? info->bss->bssid : info->bssid;
msg = nlmsg_new(100 + info->req_ie_len + info->resp_ie_len +
info->fils.kek_len + info->fils.pmk_len +
(info->fils.pmkid ? WLAN_PMKID_LEN : 0), gfp);
if (!msg)
return;
hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_ROAM);
if (!hdr) {
nlmsg_free(msg);
return;
}
if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid) ||
(info->req_ie &&
nla_put(msg, NL80211_ATTR_REQ_IE, info->req_ie_len,
info->req_ie)) ||
(info->resp_ie &&
nla_put(msg, NL80211_ATTR_RESP_IE, info->resp_ie_len,
info->resp_ie)) ||
(info->fils.update_erp_next_seq_num &&
nla_put_u16(msg, NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM,
info->fils.erp_next_seq_num)) ||
(info->fils.kek &&
nla_put(msg, NL80211_ATTR_FILS_KEK, info->fils.kek_len,
info->fils.kek)) ||
(info->fils.pmk &&
nla_put(msg, NL80211_ATTR_PMK, info->fils.pmk_len, info->fils.pmk)) ||
(info->fils.pmkid &&
nla_put(msg, NL80211_ATTR_PMKID, WLAN_PMKID_LEN, info->fils.pmkid)))
goto nla_put_failure;
genlmsg_end(msg, hdr);
genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0,
NL80211_MCGRP_MLME, gfp);
return;
nla_put_failure:
nlmsg_free(msg);
}
| 0
|
471,280
|
void rdbReportError(int corruption_error, int linenum, char *reason, ...) {
va_list ap;
char msg[1024];
int len;
len = snprintf(msg,sizeof(msg),
"Internal error in RDB reading offset %llu, function at rdb.c:%d -> ",
(unsigned long long)server.loading_loaded_bytes, linenum);
va_start(ap,reason);
vsnprintf(msg+len,sizeof(msg)-len,reason,ap);
va_end(ap);
if (!rdbCheckMode) {
if (rdbFileBeingLoaded || corruption_error) {
serverLog(LL_WARNING, "%s", msg);
char *argv[2] = {"",rdbFileBeingLoaded};
redis_check_rdb_main(2,argv,NULL);
} else {
serverLog(LL_WARNING, "%s. Failure loading rdb format from socket, assuming connection error, resuming operation.", msg);
return;
}
} else {
rdbCheckError("%s",msg);
}
serverLog(LL_WARNING, "Terminating server after rdb file reading failure.");
exit(1);
}
| 0
|
362,109
|
usage( void )
{
fprintf( stderr,
"ftdiff: a simple program to proof several text hinting modes\n"
"-----------------------------------------------------------\n"
"\n"
"Usage: ftdiff [options] fontfile [fontfile2 ...]\n"
"\n"
" -r R use resolution R dpi (default: 72 dpi)\n"
" -s S set character size to S points (default: 16 pt)\n"
" -f TEXTFILE change displayed text, using text in TEXTFILE\n"
"\n" );
exit( 1 );
}
| 0
|
387,488
|
keytype(
struct parse *pcmd,
FILE *fp
)
{
const char * digest_name;
size_t digest_len;
int key_type;
if (!pcmd->nargs) {
fprintf(fp, "keytype is %s with %lu octet digests\n",
keytype_name(info_auth_keytype),
(u_long)info_auth_hashlen);
return;
}
digest_name = pcmd->argval[0].string;
digest_len = 0;
key_type = keytype_from_text(digest_name, &digest_len);
if (!key_type) {
fprintf(fp, "keytype is not valid. "
#ifdef OPENSSL
"Type \"help keytype\" for the available digest types.\n");
#else
"Only \"md5\" is available.\n");
#endif
return;
}
info_auth_keytype = key_type;
info_auth_hashlen = digest_len;
}
| 0
|
496,456
|
transformIndexConstraint(Constraint *constraint, CreateStmtContext *cxt)
{
IndexStmt *index;
List *notnullcmds = NIL;
ListCell *lc;
index = makeNode(IndexStmt);
index->unique = (constraint->contype != CONSTR_EXCLUSION);
index->primary = (constraint->contype == CONSTR_PRIMARY);
if (index->primary)
{
if (cxt->pkey != NULL)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TABLE_DEFINITION),
errmsg("multiple primary keys for table \"%s\" are not allowed",
cxt->relation->relname),
parser_errposition(cxt->pstate, constraint->location)));
cxt->pkey = index;
/*
* In ALTER TABLE case, a primary index might already exist, but
* DefineIndex will check for it.
*/
}
index->isconstraint = true;
index->deferrable = constraint->deferrable;
index->initdeferred = constraint->initdeferred;
if (constraint->conname != NULL)
index->idxname = pstrdup(constraint->conname);
else
index->idxname = NULL; /* DefineIndex will choose name */
index->relation = cxt->relation;
index->accessMethod = constraint->access_method ? constraint->access_method : DEFAULT_INDEX_TYPE;
index->options = constraint->options;
index->tableSpace = constraint->indexspace;
index->whereClause = constraint->where_clause;
index->indexParams = NIL;
index->indexIncludingParams = NIL;
index->excludeOpNames = NIL;
index->idxcomment = NULL;
index->indexOid = InvalidOid;
index->oldNode = InvalidOid;
index->transformed = false;
index->concurrent = false;
index->if_not_exists = false;
index->reset_default_tblspc = constraint->reset_default_tblspc;
/*
* If it's ALTER TABLE ADD CONSTRAINT USING INDEX, look up the index and
* verify it's usable, then extract the implied column name list. (We
* will not actually need the column name list at runtime, but we need it
* now to check for duplicate column entries below.)
*/
if (constraint->indexname != NULL)
{
char *index_name = constraint->indexname;
Relation heap_rel = cxt->rel;
Oid index_oid;
Relation index_rel;
Form_pg_index index_form;
oidvector *indclass;
Datum indclassDatum;
bool isnull;
int i;
/* Grammar should not allow this with explicit column list */
Assert(constraint->keys == NIL);
/* Grammar should only allow PRIMARY and UNIQUE constraints */
Assert(constraint->contype == CONSTR_PRIMARY ||
constraint->contype == CONSTR_UNIQUE);
/* Must be ALTER, not CREATE, but grammar doesn't enforce that */
if (!cxt->isalter)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cannot use an existing index in CREATE TABLE"),
parser_errposition(cxt->pstate, constraint->location)));
/* Look for the index in the same schema as the table */
index_oid = get_relname_relid(index_name, RelationGetNamespace(heap_rel));
if (!OidIsValid(index_oid))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("index \"%s\" does not exist", index_name),
parser_errposition(cxt->pstate, constraint->location)));
/* Open the index (this will throw an error if it is not an index) */
index_rel = index_open(index_oid, AccessShareLock);
index_form = index_rel->rd_index;
/* Check that it does not have an associated constraint already */
if (OidIsValid(get_index_constraint(index_oid)))
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("index \"%s\" is already associated with a constraint",
index_name),
parser_errposition(cxt->pstate, constraint->location)));
/* Perform validity checks on the index */
if (index_form->indrelid != RelationGetRelid(heap_rel))
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("index \"%s\" does not belong to table \"%s\"",
index_name, RelationGetRelationName(heap_rel)),
parser_errposition(cxt->pstate, constraint->location)));
if (!index_form->indisvalid)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("index \"%s\" is not valid", index_name),
parser_errposition(cxt->pstate, constraint->location)));
if (!index_form->indisunique)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("\"%s\" is not a unique index", index_name),
errdetail("Cannot create a primary key or unique constraint using such an index."),
parser_errposition(cxt->pstate, constraint->location)));
if (RelationGetIndexExpressions(index_rel) != NIL)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("index \"%s\" contains expressions", index_name),
errdetail("Cannot create a primary key or unique constraint using such an index."),
parser_errposition(cxt->pstate, constraint->location)));
if (RelationGetIndexPredicate(index_rel) != NIL)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("\"%s\" is a partial index", index_name),
errdetail("Cannot create a primary key or unique constraint using such an index."),
parser_errposition(cxt->pstate, constraint->location)));
/*
* It's probably unsafe to change a deferred index to non-deferred. (A
* non-constraint index couldn't be deferred anyway, so this case
* should never occur; no need to sweat, but let's check it.)
*/
if (!index_form->indimmediate && !constraint->deferrable)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("\"%s\" is a deferrable index", index_name),
errdetail("Cannot create a non-deferrable constraint using a deferrable index."),
parser_errposition(cxt->pstate, constraint->location)));
/*
* Insist on it being a btree. That's the only kind that supports
* uniqueness at the moment anyway; but we must have an index that
* exactly matches what you'd get from plain ADD CONSTRAINT syntax,
* else dump and reload will produce a different index (breaking
* pg_upgrade in particular).
*/
if (index_rel->rd_rel->relam != get_index_am_oid(DEFAULT_INDEX_TYPE, false))
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("index \"%s\" is not a btree", index_name),
parser_errposition(cxt->pstate, constraint->location)));
/* Must get indclass the hard way */
indclassDatum = SysCacheGetAttr(INDEXRELID, index_rel->rd_indextuple,
Anum_pg_index_indclass, &isnull);
Assert(!isnull);
indclass = (oidvector *) DatumGetPointer(indclassDatum);
for (i = 0; i < index_form->indnatts; i++)
{
int16 attnum = index_form->indkey.values[i];
const FormData_pg_attribute *attform;
char *attname;
Oid defopclass;
/*
* We shouldn't see attnum == 0 here, since we already rejected
* expression indexes. If we do, SystemAttributeDefinition will
* throw an error.
*/
if (attnum > 0)
{
Assert(attnum <= heap_rel->rd_att->natts);
attform = TupleDescAttr(heap_rel->rd_att, attnum - 1);
}
else
attform = SystemAttributeDefinition(attnum);
attname = pstrdup(NameStr(attform->attname));
if (i < index_form->indnkeyatts)
{
/*
* Insist on default opclass and sort options. While the
* index would still work as a constraint with non-default
* settings, it might not provide exactly the same uniqueness
* semantics as you'd get from a normally-created constraint;
* and there's also the dump/reload problem mentioned above.
*/
defopclass = GetDefaultOpClass(attform->atttypid,
index_rel->rd_rel->relam);
if (indclass->values[i] != defopclass ||
index_rel->rd_indoption[i] != 0)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("index \"%s\" column number %d does not have default sorting behavior", index_name, i + 1),
errdetail("Cannot create a primary key or unique constraint using such an index."),
parser_errposition(cxt->pstate, constraint->location)));
constraint->keys = lappend(constraint->keys, makeString(attname));
}
else
constraint->including = lappend(constraint->including, makeString(attname));
}
/* Close the index relation but keep the lock */
relation_close(index_rel, NoLock);
index->indexOid = index_oid;
}
/*
* If it's an EXCLUDE constraint, the grammar returns a list of pairs of
* IndexElems and operator names. We have to break that apart into
* separate lists.
*/
if (constraint->contype == CONSTR_EXCLUSION)
{
foreach(lc, constraint->exclusions)
{
List *pair = (List *) lfirst(lc);
IndexElem *elem;
List *opname;
Assert(list_length(pair) == 2);
elem = linitial_node(IndexElem, pair);
opname = lsecond_node(List, pair);
index->indexParams = lappend(index->indexParams, elem);
index->excludeOpNames = lappend(index->excludeOpNames, opname);
}
}
/*
* For UNIQUE and PRIMARY KEY, we just have a list of column names.
*
* Make sure referenced keys exist. If we are making a PRIMARY KEY index,
* also make sure they are NOT NULL.
*/
else
{
foreach(lc, constraint->keys)
{
char *key = strVal(lfirst(lc));
bool found = false;
bool forced_not_null = false;
ColumnDef *column = NULL;
ListCell *columns;
IndexElem *iparam;
/* Make sure referenced column exists. */
foreach(columns, cxt->columns)
{
column = castNode(ColumnDef, lfirst(columns));
if (strcmp(column->colname, key) == 0)
{
found = true;
break;
}
}
if (found)
{
/*
* column is defined in the new table. For PRIMARY KEY, we
* can apply the NOT NULL constraint cheaply here ... unless
* the column is marked is_from_type, in which case marking it
* here would be ineffective (see MergeAttributes).
*/
if (constraint->contype == CONSTR_PRIMARY &&
!column->is_from_type)
{
column->is_not_null = true;
forced_not_null = true;
}
}
else if (SystemAttributeByName(key) != NULL)
{
/*
* column will be a system column in the new table, so accept
* it. System columns can't ever be null, so no need to worry
* about PRIMARY/NOT NULL constraint.
*/
found = true;
}
else if (cxt->inhRelations)
{
/* try inherited tables */
ListCell *inher;
foreach(inher, cxt->inhRelations)
{
RangeVar *inh = castNode(RangeVar, lfirst(inher));
Relation rel;
int count;
rel = table_openrv(inh, AccessShareLock);
/* check user requested inheritance from valid relkind */
if (rel->rd_rel->relkind != RELKIND_RELATION &&
rel->rd_rel->relkind != RELKIND_FOREIGN_TABLE &&
rel->rd_rel->relkind != RELKIND_PARTITIONED_TABLE)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("inherited relation \"%s\" is not a table or foreign table",
inh->relname)));
for (count = 0; count < rel->rd_att->natts; count++)
{
Form_pg_attribute inhattr = TupleDescAttr(rel->rd_att,
count);
char *inhname = NameStr(inhattr->attname);
if (inhattr->attisdropped)
continue;
if (strcmp(key, inhname) == 0)
{
found = true;
/*
* It's tempting to set forced_not_null if the
* parent column is already NOT NULL, but that
* seems unsafe because the column's NOT NULL
* marking might disappear between now and
* execution. Do the runtime check to be safe.
*/
break;
}
}
table_close(rel, NoLock);
if (found)
break;
}
}
/*
* In the ALTER TABLE case, don't complain about index keys not
* created in the command; they may well exist already.
* DefineIndex will complain about them if not.
*/
if (!found && !cxt->isalter)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
errmsg("column \"%s\" named in key does not exist", key),
parser_errposition(cxt->pstate, constraint->location)));
/* Check for PRIMARY KEY(foo, foo) */
foreach(columns, index->indexParams)
{
iparam = (IndexElem *) lfirst(columns);
if (iparam->name && strcmp(key, iparam->name) == 0)
{
if (index->primary)
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_COLUMN),
errmsg("column \"%s\" appears twice in primary key constraint",
key),
parser_errposition(cxt->pstate, constraint->location)));
else
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_COLUMN),
errmsg("column \"%s\" appears twice in unique constraint",
key),
parser_errposition(cxt->pstate, constraint->location)));
}
}
/* OK, add it to the index definition */
iparam = makeNode(IndexElem);
iparam->name = pstrdup(key);
iparam->expr = NULL;
iparam->indexcolname = NULL;
iparam->collation = NIL;
iparam->opclass = NIL;
iparam->ordering = SORTBY_DEFAULT;
iparam->nulls_ordering = SORTBY_NULLS_DEFAULT;
index->indexParams = lappend(index->indexParams, iparam);
/*
* For a primary-key column, also create an item for ALTER TABLE
* SET NOT NULL if we couldn't ensure it via is_not_null above.
*/
if (constraint->contype == CONSTR_PRIMARY && !forced_not_null)
{
AlterTableCmd *notnullcmd = makeNode(AlterTableCmd);
notnullcmd->subtype = AT_SetNotNull;
notnullcmd->name = pstrdup(key);
notnullcmds = lappend(notnullcmds, notnullcmd);
}
}
}
/*
* Add included columns to index definition. This is much like the
* simple-column-name-list code above, except that we don't worry about
* NOT NULL marking; included columns in a primary key should not be
* forced NOT NULL. We don't complain about duplicate columns, either,
* though maybe we should?
*/
foreach(lc, constraint->including)
{
char *key = strVal(lfirst(lc));
bool found = false;
ColumnDef *column = NULL;
ListCell *columns;
IndexElem *iparam;
foreach(columns, cxt->columns)
{
column = lfirst_node(ColumnDef, columns);
if (strcmp(column->colname, key) == 0)
{
found = true;
break;
}
}
if (!found)
{
if (SystemAttributeByName(key) != NULL)
{
/*
* column will be a system column in the new table, so accept
* it.
*/
found = true;
}
else if (cxt->inhRelations)
{
/* try inherited tables */
ListCell *inher;
foreach(inher, cxt->inhRelations)
{
RangeVar *inh = lfirst_node(RangeVar, inher);
Relation rel;
int count;
rel = table_openrv(inh, AccessShareLock);
/* check user requested inheritance from valid relkind */
if (rel->rd_rel->relkind != RELKIND_RELATION &&
rel->rd_rel->relkind != RELKIND_FOREIGN_TABLE &&
rel->rd_rel->relkind != RELKIND_PARTITIONED_TABLE)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("inherited relation \"%s\" is not a table or foreign table",
inh->relname)));
for (count = 0; count < rel->rd_att->natts; count++)
{
Form_pg_attribute inhattr = TupleDescAttr(rel->rd_att,
count);
char *inhname = NameStr(inhattr->attname);
if (inhattr->attisdropped)
continue;
if (strcmp(key, inhname) == 0)
{
found = true;
break;
}
}
table_close(rel, NoLock);
if (found)
break;
}
}
}
/*
* In the ALTER TABLE case, don't complain about index keys not
* created in the command; they may well exist already. DefineIndex
* will complain about them if not.
*/
if (!found && !cxt->isalter)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
errmsg("column \"%s\" named in key does not exist", key),
parser_errposition(cxt->pstate, constraint->location)));
/* OK, add it to the index definition */
iparam = makeNode(IndexElem);
iparam->name = pstrdup(key);
iparam->expr = NULL;
iparam->indexcolname = NULL;
iparam->collation = NIL;
iparam->opclass = NIL;
index->indexIncludingParams = lappend(index->indexIncludingParams, iparam);
}
/*
* If we found anything that requires run-time SET NOT NULL, build a full
* ALTER TABLE command for that and add it to cxt->alist.
*/
if (notnullcmds)
{
AlterTableStmt *alterstmt = makeNode(AlterTableStmt);
alterstmt->relation = copyObject(cxt->relation);
alterstmt->cmds = notnullcmds;
alterstmt->relkind = OBJECT_TABLE;
alterstmt->missing_ok = false;
cxt->alist = lappend(cxt->alist, alterstmt);
}
return index;
}
| 0
|
60,318
|
static void mapNext(ArrayIter& it) { ++it; }
| 0
|
402,985
|
void t_cpp_generator::generate_struct_result_writer(ofstream& out,
t_struct* tstruct,
bool pointers) {
string name = tstruct->get_name();
const vector<t_field*>& fields = tstruct->get_sorted_members();
vector<t_field*>::const_iterator f_iter;
if (gen_templates_) {
out << indent() << "template <class Protocol_>" << endl << indent() << "uint32_t "
<< tstruct->get_name() << "::write(Protocol_* oprot) const {" << endl;
} else {
indent(out) << "uint32_t " << tstruct->get_name()
<< "::write(::apache::thrift::protocol::TProtocol* oprot) const {" << endl;
}
indent_up();
out << endl << indent() << "uint32_t xfer = 0;" << endl << endl;
indent(out) << "xfer += oprot->writeStructBegin(\"" << name << "\");" << endl;
bool first = true;
for (f_iter = fields.begin(); f_iter != fields.end(); ++f_iter) {
if (first) {
first = false;
out << endl << indent() << "if ";
} else {
out << " else if ";
}
out << "(this->__isset." << (*f_iter)->get_name() << ") {" << endl;
indent_up();
// Write field header
out << indent() << "xfer += oprot->writeFieldBegin("
<< "\"" << (*f_iter)->get_name() << "\", " << type_to_enum((*f_iter)->get_type()) << ", "
<< (*f_iter)->get_key() << ");" << endl;
// Write field contents
if (pointers) {
generate_serialize_field(out, *f_iter, "(*(this->", "))");
} else {
generate_serialize_field(out, *f_iter, "this->");
}
// Write field closer
indent(out) << "xfer += oprot->writeFieldEnd();" << endl;
indent_down();
indent(out) << "}";
}
// Write the struct map
out << endl << indent() << "xfer += oprot->writeFieldStop();" << endl << indent()
<< "xfer += oprot->writeStructEnd();" << endl << indent() << "return xfer;" << endl;
indent_down();
indent(out) << "}" << endl << endl;
}
| 0
|
211,180
|
bool RenderWidgetHostViewAura::GetCompositionTextRange(
gfx::Range* range) const {
NOTIMPLEMENTED();
return false;
}
| 0
|
69,587
|
TensorShape PoolParameters::forward_output_shape() {
if (depth_window == 1) {
// Spatial pooling
return ShapeFromFormat(data_format, tensor_in_batch, out_height, out_width,
depth);
} else {
// Depthwise pooling
return TensorShape(
{tensor_in_batch, tensor_in_rows, tensor_in_cols, out_depth});
}
}
| 0
|
338,253
|
int qemu_opts_set(QemuOptsList *list, const char *id,
const char *name, const char *value)
{
QemuOpts *opts;
opts = qemu_opts_create(list, id, 1);
if (opts == NULL) {
return -1;
}
return qemu_opt_set(opts, name, value);
}
| 1
|
81,369
|
void Statement::Work_Run(napi_env e, void* data) {
STATEMENT_INIT(RunBaton);
STATEMENT_MUTEX(mtx);
sqlite3_mutex_enter(mtx);
// Make sure that we also reset when there are no parameters.
if (!baton->parameters.size()) {
sqlite3_reset(stmt->_handle);
}
if (stmt->Bind(baton->parameters)) {
stmt->status = sqlite3_step(stmt->_handle);
if (!(stmt->status == SQLITE_ROW || stmt->status == SQLITE_DONE)) {
stmt->message = std::string(sqlite3_errmsg(stmt->db->_handle));
}
else {
baton->inserted_id = sqlite3_last_insert_rowid(stmt->db->_handle);
baton->changes = sqlite3_changes(stmt->db->_handle);
}
}
sqlite3_mutex_leave(mtx);
}
| 0
|
274,343
|
static void flush_queued_data_bh(void *opaque)
{
VirtIOSerialPort *port = opaque;
flush_queued_data(port);
}
| 0
|
454,834
|
void __init udbg_init_rtas_panel(void)
{
udbg_putc = call_rtas_display_status_delay;
}
| 0
|
114,647
|
static void kvm_destroy_vm(struct kvm *kvm)
{
int i;
struct mm_struct *mm = kvm->mm;
kvm_arch_sync_events(kvm);
raw_spin_lock(&kvm_lock);
list_del(&kvm->vm_list);
raw_spin_unlock(&kvm_lock);
kvm_free_irq_routing(kvm);
for (i = 0; i < KVM_NR_BUSES; i++)
kvm_io_bus_destroy(kvm->buses[i]);
kvm_coalesced_mmio_free(kvm);
#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
#else
kvm_arch_flush_shadow_all(kvm);
#endif
kvm_arch_destroy_vm(kvm);
kvm_free_physmem(kvm);
cleanup_srcu_struct(&kvm->srcu);
kvm_arch_free_vm(kvm);
hardware_disable_all();
mmdrop(mm);
}
| 0
|
461,198
|
bool vsock_find_cid(unsigned int cid)
{
if (transport_g2h && cid == transport_g2h->get_local_cid())
return true;
if (transport_h2g && cid == VMADDR_CID_HOST)
return true;
if (transport_local && cid == VMADDR_CID_LOCAL)
return true;
return false;
}
| 0
|
212,109
|
static int php_pgsql_fd_flush(php_stream *stream) /* {{{ */
{
return FAILURE;
}
/* }}} */
| 0
|
150,211
|
template<typename t>
CImg<t> _permute_axes(const char *const order, const t&) const {
if (is_empty() || !order) return CImg<t>(*this,false);
CImg<t> res;
const T* ptrs = _data;
unsigned char s_code[4] = { 0,1,2,3 }, n_code[4] = { 0 };
for (unsigned int l = 0; order[l]; ++l) {
int c = cimg::lowercase(order[l]);
if (c!='x' && c!='y' && c!='z' && c!='c') { *s_code = 4; break; }
else { ++n_code[c%=4]; s_code[l] = c; }
}
if (*order && *s_code<4 && *n_code<=1 && n_code[1]<=1 && n_code[2]<=1 && n_code[3]<=1) {
const unsigned int code = (s_code[0]<<12) | (s_code[1]<<8) | (s_code[2]<<4) | (s_code[3]);
ulongT wh, whd;
switch (code) {
case 0x0123 : // xyzc
return +*this;
case 0x0132 : // xycz
res.assign(_width,_height,_spectrum,_depth);
wh = (ulongT)res._width*res._height; whd = wh*res._depth;
cimg_forXYZC(*this,x,y,z,c) res(x,y,c,z,wh,whd) = (t)*(ptrs++);
break;
case 0x0213 : // xzyc
res.assign(_width,_depth,_height,_spectrum);
wh = (ulongT)res._width*res._height; whd = wh*res._depth;
cimg_forXYZC(*this,x,y,z,c) res(x,z,y,c,wh,whd) = (t)*(ptrs++);
break;
case 0x0231 : // xzcy
res.assign(_width,_depth,_spectrum,_height);
wh = (ulongT)res._width*res._height; whd = wh*res._depth;
cimg_forXYZC(*this,x,y,z,c) res(x,z,c,y,wh,whd) = (t)*(ptrs++);
break;
case 0x0312 : // xcyz
res.assign(_width,_spectrum,_height,_depth);
wh = (ulongT)res._width*res._height; whd = wh*res._depth;
cimg_forXYZC(*this,x,y,z,c) res(x,c,y,z,wh,whd) = (t)*(ptrs++);
break;
case 0x0321 : // xczy
res.assign(_width,_spectrum,_depth,_height);
wh = (ulongT)res._width*res._height; whd = wh*res._depth;
cimg_forXYZC(*this,x,y,z,c) res(x,c,z,y,wh,whd) = (t)*(ptrs++);
break;
case 0x1023 : // yxzc
res.assign(_height,_width,_depth,_spectrum);
wh = (ulongT)res._width*res._height; whd = wh*res._depth;
cimg_forXYZC(*this,x,y,z,c) res(y,x,z,c,wh,whd) = (t)*(ptrs++);
break;
case 0x1032 : // yxcz
res.assign(_height,_width,_spectrum,_depth);
wh = (ulongT)res._width*res._height; whd = wh*res._depth;
cimg_forXYZC(*this,x,y,z,c) res(y,x,c,z,wh,whd) = (t)*(ptrs++);
break;
case 0x1203 : // yzxc
res.assign(_height,_depth,_width,_spectrum);
wh = (ulongT)res._width*res._height; whd = wh*res._depth;
cimg_forXYZC(*this,x,y,z,c) res(y,z,x,c,wh,whd) = (t)*(ptrs++);
break;
case 0x1230 : // yzcx
res.assign(_height,_depth,_spectrum,_width);
switch (_width) {
case 1 : {
t *ptr_r = res.data(0,0,0,0);
for (unsigned int siz = _height*_depth*_spectrum; siz; --siz) {
*(ptr_r++) = (t)*(ptrs++);
}
} break;
case 2 : {
t *ptr_r = res.data(0,0,0,0), *ptr_g = res.data(0,0,0,1);
for (unsigned int siz = _height*_depth*_spectrum; siz; --siz) {
*(ptr_r++) = (t)ptrs[0];
*(ptr_g++) = (t)ptrs[1];
ptrs+=2;
}
} break;
case 3 : { // Optimization for the classical conversion from interleaved RGB to planar RGB
t *ptr_r = res.data(0,0,0,0), *ptr_g = res.data(0,0,0,1), *ptr_b = res.data(0,0,0,2);
for (unsigned int siz = _height*_depth*_spectrum; siz; --siz) {
*(ptr_r++) = (t)ptrs[0];
*(ptr_g++) = (t)ptrs[1];
*(ptr_b++) = (t)ptrs[2];
ptrs+=3;
}
} break;
case 4 : { // Optimization for the classical conversion from interleaved RGBA to planar RGBA
t
*ptr_r = res.data(0,0,0,0), *ptr_g = res.data(0,0,0,1),
*ptr_b = res.data(0,0,0,2), *ptr_a = res.data(0,0,0,3);
for (unsigned int siz = _height*_depth*_spectrum; siz; --siz) {
*(ptr_r++) = (t)ptrs[0];
*(ptr_g++) = (t)ptrs[1];
*(ptr_b++) = (t)ptrs[2];
*(ptr_a++) = (t)ptrs[3];
ptrs+=4;
}
} break;
default : {
wh = (ulongT)res._width*res._height; whd = wh*res._depth;
cimg_forXYZC(*this,x,y,z,c) res(y,z,c,x,wh,whd) = *(ptrs++);
return res;
}
}
break;
case 0x1302 : // ycxz
res.assign(_height,_spectrum,_width,_depth);
wh = (ulongT)res._width*res._height; whd = wh*res._depth;
cimg_forXYZC(*this,x,y,z,c) res(y,c,x,z,wh,whd) = (t)*(ptrs++);
break;
case 0x1320 : // yczx
res.assign(_height,_spectrum,_depth,_width);
wh = (ulongT)res._width*res._height; whd = wh*res._depth;
cimg_forXYZC(*this,x,y,z,c) res(y,c,z,x,wh,whd) = (t)*(ptrs++);
break;
case 0x2013 : // zxyc
res.assign(_depth,_width,_height,_spectrum);
wh = (ulongT)res._width*res._height; whd = wh*res._depth;
cimg_forXYZC(*this,x,y,z,c) res(z,x,y,c,wh,whd) = (t)*(ptrs++);
break;
case 0x2031 : // zxcy
res.assign(_depth,_width,_spectrum,_height);
wh = (ulongT)res._width*res._height; whd = wh*res._depth;
cimg_forXYZC(*this,x,y,z,c) res(z,x,c,y,wh,whd) = (t)*(ptrs++);
break;
case 0x2103 : // zyxc
res.assign(_depth,_height,_width,_spectrum);
wh = (ulongT)res._width*res._height; whd = wh*res._depth;
cimg_forXYZC(*this,x,y,z,c) res(z,y,x,c,wh,whd) = (t)*(ptrs++);
break;
case 0x2130 : // zycx
res.assign(_depth,_height,_spectrum,_width);
wh = (ulongT)res._width*res._height; whd = wh*res._depth;
cimg_forXYZC(*this,x,y,z,c) res(z,y,c,x,wh,whd) = (t)*(ptrs++);
break;
case 0x2301 : // zcxy
res.assign(_depth,_spectrum,_width,_height);
wh = (ulongT)res._width*res._height; whd = wh*res._depth;
cimg_forXYZC(*this,x,y,z,c) res(z,c,x,y,wh,whd) = (t)*(ptrs++);
break;
case 0x2310 : // zcyx
res.assign(_depth,_spectrum,_height,_width);
wh = (ulongT)res._width*res._height; whd = wh*res._depth;
cimg_forXYZC(*this,x,y,z,c) res(z,c,y,x,wh,whd) = (t)*(ptrs++);
break;
case 0x3012 : // cxyz
res.assign(_spectrum,_width,_height,_depth);
switch (_spectrum) {
case 1 : {
const T *ptr_r = data(0,0,0,0);
t *ptrd = res._data;
for (ulongT siz = (ulongT)_width*_height*_depth; siz; --siz) *(ptrd++) = (t)*(ptr_r++);
} break;
case 2 : {
const T *ptr_r = data(0,0,0,0), *ptr_g = data(0,0,0,1);
t *ptrd = res._data;
for (ulongT siz = (ulongT)_width*_height*_depth; siz; --siz) {
ptrd[0] = (t)*(ptr_r++);
ptrd[1] = (t)*(ptr_g++);
ptrd+=2;
}
} break;
case 3 : { // Optimization for the classical conversion from planar RGB to interleaved RGB
const T *ptr_r = data(0,0,0,0), *ptr_g = data(0,0,0,1), *ptr_b = data(0,0,0,2);
t *ptrd = res._data;
for (ulongT siz = (ulongT)_width*_height*_depth; siz; --siz) {
ptrd[0] = (t)*(ptr_r++);
ptrd[1] = (t)*(ptr_g++);
ptrd[2] = (t)*(ptr_b++);
ptrd+=3;
}
} break;
case 4 : { // Optimization for the classical conversion from planar RGBA to interleaved RGBA
const T *ptr_r = data(0,0,0,0), *ptr_g = data(0,0,0,1), *ptr_b = data(0,0,0,2), *ptr_a = data(0,0,0,3);
t *ptrd = res._data;
for (ulongT siz = (ulongT)_width*_height*_depth; siz; --siz) {
ptrd[0] = (t)*(ptr_r++);
ptrd[1] = (t)*(ptr_g++);
ptrd[2] = (t)*(ptr_b++);
ptrd[3] = (t)*(ptr_a++);
ptrd+=4;
}
} break;
default : {
wh = (ulongT)res._width*res._height; whd = wh*res._depth;
cimg_forXYZC(*this,x,y,z,c) res(c,x,y,z,wh,whd) = (t)*(ptrs++);
}
}
break;
case 0x3021 : // cxzy
res.assign(_spectrum,_width,_depth,_height);
wh = (ulongT)res._width*res._height; whd = wh*res._depth;
cimg_forXYZC(*this,x,y,z,c) res(c,x,z,y,wh,whd) = (t)*(ptrs++);
break;
case 0x3102 : // cyxz
res.assign(_spectrum,_height,_width,_depth);
wh = (ulongT)res._width*res._height; whd = wh*res._depth;
cimg_forXYZC(*this,x,y,z,c) res(c,y,x,z,wh,whd) = (t)*(ptrs++);
break;
case 0x3120 : // cyzx
res.assign(_spectrum,_height,_depth,_width);
wh = (ulongT)res._width*res._height; whd = wh*res._depth;
cimg_forXYZC(*this,x,y,z,c) res(c,y,z,x,wh,whd) = (t)*(ptrs++);
break;
case 0x3201 : // czxy
res.assign(_spectrum,_depth,_width,_height);
wh = (ulongT)res._width*res._height; whd = wh*res._depth;
cimg_forXYZC(*this,x,y,z,c) res(c,z,x,y,wh,whd) = (t)*(ptrs++);
break;
case 0x3210 : // czyx
res.assign(_spectrum,_depth,_height,_width);
wh = (ulongT)res._width*res._height; whd = wh*res._depth;
cimg_forXYZC(*this,x,y,z,c) res(c,z,y,x,wh,whd) = (t)*(ptrs++);
break;
}
}
if (!res)
throw CImgArgumentException(_cimg_instance
"permute_axes(): Invalid specified permutation '%s'.",
cimg_instance,
order);
return res;
| 0
|
146,651
|
static const char *parse_value(cJSON *item,const char *value,const char **ep)
{
if (!value) return 0; /* Fail on null. */
if (!strncmp(value,"null",4)) { item->type=cJSON_NULL; return value+4; }
if (!strncmp(value,"false",5)) { item->type=cJSON_False; return value+5; }
if (!strncmp(value,"true",4)) { item->type=cJSON_True; item->valueint=1; return value+4; }
if (*value=='\"') { return parse_string(item,value,ep); }
if (*value=='-' || (*value>='0' && *value<='9')) { return parse_number(item,value); }
if (*value=='[') { return parse_array(item,value,ep); }
if (*value=='{') { return parse_object(item,value,ep); }
*ep=value;return 0; /* failure. */
}
| 0
|
481,837
|
char *split_comma(char *str) {
EUID_ASSERT();
if (str == NULL || *str == '\0')
return NULL;
char *ptr = strchr(str, ',');
if (!ptr)
return NULL;
*ptr = '\0';
ptr++;
if (*ptr == '\0')
return NULL;
return ptr;
}
| 0
|
314,702
|
static waiting_command_t *get_waiting_command(command_opcode_t opcode) {
pthread_mutex_lock(&commands_pending_response_lock);
for (const list_node_t *node = list_begin(commands_pending_response);
node != list_end(commands_pending_response);
node = list_next(node)) {
waiting_command_t *wait_entry = list_node(node);
if (!wait_entry || wait_entry->opcode != opcode)
continue;
list_remove(commands_pending_response, wait_entry);
pthread_mutex_unlock(&commands_pending_response_lock);
return wait_entry;
}
pthread_mutex_unlock(&commands_pending_response_lock);
return NULL;
}
| 0
|
206,611
|
jlong AwContents::ReleasePopupAwContents(JNIEnv* env, jobject obj) {
DCHECK_CURRENTLY_ON(BrowserThread::UI);
return reinterpret_cast<intptr_t>(pending_contents_.release());
}
| 0
|
23,933
|
IN_PROC_BROWSER_TEST_F ( UnloadTest , BrowserCloseWithInnerFocusedFrame ) {
NavigateToDataURL ( INNER_FRAME_WITH_FOCUS_HTML , "innerframewithfocus" ) ;
content : : WindowedNotificationObserver window_observer ( chrome : : NOTIFICATION_BROWSER_CLOSED , content : : NotificationService : : AllSources ( ) ) ;
chrome : : CloseWindow ( browser ( ) ) ;
ClickModalDialogButton ( true ) ;
window_observer . Wait ( ) ;
}
| 0
|
45,846
|
static av_cold int decode_end(AVCodecContext *avctx)
{
Mpeg4DecContext *ctx = avctx->priv_data;
int i;
if (!avctx->internal->is_copy) {
for (i = 0; i < 12; i++)
ff_free_vlc(&ctx->studio_intra_tab[i]);
ff_free_vlc(&ctx->studio_luma_dc);
ff_free_vlc(&ctx->studio_chroma_dc);
}
return ff_h263_decode_end(avctx);
}
| 0
|
494,852
|
static void emit_dependencies(StrList *list)
{
FILE *deps;
int linepos, len;
StrList *l, *nl;
bool wmake = (quote_for_make == quote_for_wmake);
const char *wrapstr, *nulltarget;
wrapstr = wmake ? " &\n " : " \\\n ";
nulltarget = wmake ? "\t%null\n" : "";
if (depend_file && strcmp(depend_file, "-")) {
deps = nasm_open_write(depend_file, NF_TEXT);
if (!deps) {
nasm_error(ERR_NONFATAL|ERR_NOFILE|ERR_USAGE,
"unable to write dependency file `%s'", depend_file);
return;
}
} else {
deps = stdout;
}
linepos = fprintf(deps, "%s :", depend_target);
list_for_each(l, list) {
char *file = quote_for_make(l->str);
len = strlen(file);
if (linepos + len > 62 && linepos > 1) {
fputs(wrapstr, deps);
linepos = 1;
}
fprintf(deps, " %s", file);
linepos += len+1;
nasm_free(file);
}
fprintf(deps, "\n\n");
list_for_each_safe(l, nl, list) {
if (depend_emit_phony) {
char *file = quote_for_make(l->str);
fprintf(deps, "%s :\n%s\n", file, nulltarget);
nasm_free(file);
}
nasm_free(l);
}
if (deps != stdout)
fclose(deps);
}
| 0
|
311,842
|
bool MediaControlPanelElement::isOpaque() const {
return m_opaque;
}
| 0
|
324,506
|
static void init_proc_750cx (CPUPPCState *env)
{
gen_spr_ne_601(env);
gen_spr_7xx(env);
/* XXX : not implemented */
spr_register(env, SPR_L2CR, "L2CR",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, NULL,
0x00000000);
/* Time base */
gen_tbl(env);
/* Thermal management */
gen_spr_thrm(env);
/* This register is not implemented but is present for compatibility */
spr_register(env, SPR_SDA, "SDA",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
/* Hardware implementation registers */
/* XXX : not implemented */
spr_register(env, SPR_HID0, "HID0",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
/* XXX : not implemented */
spr_register(env, SPR_HID1, "HID1",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
/* Memory management */
gen_low_BATs(env);
/* PowerPC 750cx has 8 DBATs and 8 IBATs */
gen_high_BATs(env);
init_excp_750cx(env);
env->dcache_line_size = 32;
env->icache_line_size = 32;
/* Allocate hardware IRQ controller */
ppc6xx_irq_init(env);
}
| 1
|
192,110
|
Browser::~Browser() {
VLOG_IF(1, g_log_bug53991) << "~Browser: " << profile_->IsOffTheRecord()
<< "; stillActive="
<< BrowserList::IsOffTheRecordSessionActive();
if (profile_->GetProfileSyncService())
profile_->GetProfileSyncService()->RemoveObserver(this);
BrowserList::RemoveBrowser(this);
#if defined(OS_WIN) || defined(OS_LINUX)
if (!BrowserList::HasBrowserWithProfile(profile_)) {
profile_->ResetTabRestoreService();
}
#endif
SessionService* session_service = profile_->GetSessionService();
if (session_service)
session_service->WindowClosed(session_id_);
TabRestoreService* tab_restore_service = profile()->GetTabRestoreService();
if (tab_restore_service)
tab_restore_service->BrowserClosed(this);
if (profile_->IsOffTheRecord() &&
!BrowserList::IsOffTheRecordSessionActive()) {
profile_->GetOriginalProfile()->DestroyOffTheRecordProfile();
}
if (select_file_dialog_.get())
select_file_dialog_->ListenerDestroyed();
TabRestoreServiceDestroyed(tab_restore_service_);
}
| 0
|
78,520
|
static int vp8_decode_mb_row_no_filter(AVCodecContext *avctx, void *tdata,
int jobnr, int threadnr)
{
return decode_mb_row_no_filter(avctx, tdata, jobnr, threadnr, 0);
}
| 0
|
79,484
|
was_set_insecurely(char_u *opt, int opt_flags)
{
int idx = findoption(opt);
long_u *flagp;
if (idx >= 0)
{
flagp = insecure_flag(idx, opt_flags);
return (*flagp & P_INSECURE) != 0;
}
internal_error("was_set_insecurely()");
return -1;
}
| 0
|
136,472
|
static PyObject *__pyx_pw_17clickhouse_driver_14bufferedreader_20BufferedSocketReader_7__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf_17clickhouse_driver_14bufferedreader_20BufferedSocketReader_6__setstate_cython__(((struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedSocketReader *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
| 0
|
145,753
|
MagickExport Image *FlopImage(const Image *image,ExceptionInfo *exception)
{
#define FlopImageTag "Flop/Image"
CacheView
*flop_view,
*image_view;
Image
*flop_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
flop_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception);
if (flop_image == (Image *) NULL)
return((Image *) NULL);
/*
Flop each row.
*/
status=MagickTrue;
progress=0;
page=image->page;
image_view=AcquireVirtualCacheView(image,exception);
flop_view=AcquireAuthenticCacheView(flop_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,flop_image,1,1)
#endif
for (y=0; y < (ssize_t) flop_image->rows; y++)
{
register const Quantum
*restrict p;
register ssize_t
x;
register Quantum
*restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(flop_view,0,y,flop_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
q+=GetPixelChannels(flop_image)*flop_image->columns;
for (x=0; x < (ssize_t) flop_image->columns; x++)
{
register ssize_t
i;
q-=GetPixelChannels(flop_image);
if (GetPixelReadMask(image,p) == 0)
{
p+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
PixelTrait flop_traits=GetPixelChannelTraits(flop_image,channel);
if ((traits == UndefinedPixelTrait) ||
(flop_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(flop_image,channel,p[i],q);
}
p+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(flop_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_FlopImage)
#endif
proceed=SetImageProgress(image,FlopImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
flop_view=DestroyCacheView(flop_view);
image_view=DestroyCacheView(image_view);
flop_image->type=image->type;
if (page.width != 0)
page.x=(ssize_t) (page.width-flop_image->columns-page.x);
flop_image->page=page;
if (status == MagickFalse)
flop_image=DestroyImage(flop_image);
return(flop_image);
}
| 0
|
463,614
|
static void free_fixed_rsrc_data(struct fixed_rsrc_data *data)
{
percpu_ref_exit(&data->refs);
kfree(data->table);
kfree(data);
| 0
|
414,064
|
int wmi_addba_rx_resp(struct wil6210_priv *wil, u8 cid, u8 tid, u8 token,
u16 status, bool amsdu, u16 agg_wsize, u16 timeout)
{
int rc;
struct wmi_rcp_addba_resp_cmd cmd = {
.cidxtid = mk_cidxtid(cid, tid),
.dialog_token = token,
.status_code = cpu_to_le16(status),
/* bit 0: A-MSDU supported
* bit 1: policy (should be 0 for us)
* bits 2..5: TID
* bits 6..15: buffer size
*/
.ba_param_set = cpu_to_le16((amsdu ? 1 : 0) | (tid << 2) |
(agg_wsize << 6)),
.ba_timeout = cpu_to_le16(timeout),
};
struct {
struct wmi_cmd_hdr wmi;
struct wmi_rcp_addba_resp_sent_event evt;
} __packed reply;
wil_dbg_wmi(wil,
"ADDBA response for CID %d TID %d size %d timeout %d status %d AMSDU%s\n",
cid, tid, agg_wsize, timeout, status, amsdu ? "+" : "-");
rc = wmi_call(wil, WMI_RCP_ADDBA_RESP_CMDID, &cmd, sizeof(cmd),
WMI_RCP_ADDBA_RESP_SENT_EVENTID, &reply, sizeof(reply),
100);
if (rc)
return rc;
if (reply.evt.status) {
wil_err(wil, "ADDBA response failed with status %d\n",
le16_to_cpu(reply.evt.status));
rc = -EINVAL;
}
return rc;
}
| 0
|
362,793
|
static inline int sk_mem_pages(int amt)
{
return (amt + SK_MEM_QUANTUM - 1) >> SK_MEM_QUANTUM_SHIFT;
}
| 0
|
281,962
|
status_t CameraService::connectDevice(
const sp<ICameraDeviceCallbacks>& cameraCb,
int cameraId,
const String16& clientPackageName,
int clientUid,
/*out*/
sp<ICameraDeviceUser>& device)
{
String8 clientName8(clientPackageName);
int callingPid = getCallingPid();
LOG1("CameraService::connectDevice E (pid %d \"%s\", id %d)", callingPid,
clientName8.string(), cameraId);
status_t status = validateConnect(cameraId, /*inout*/clientUid);
if (status != OK) {
return status;
}
sp<CameraDeviceClient> client;
{
Mutex::Autolock lock(mServiceLock);
{
sp<BasicClient> client;
if (!canConnectUnsafe(cameraId, clientPackageName,
cameraCb->asBinder(),
/*out*/client)) {
return -EBUSY;
}
}
int facing = -1;
int deviceVersion = getDeviceVersion(cameraId, &facing);
if (isValidCameraId(cameraId)) {
updateStatus(ICameraServiceListener::STATUS_NOT_AVAILABLE,
cameraId);
}
switch(deviceVersion) {
case CAMERA_DEVICE_API_VERSION_1_0:
ALOGW("Camera using old HAL version: %d", deviceVersion);
return -EOPNOTSUPP;
case CAMERA_DEVICE_API_VERSION_2_0:
case CAMERA_DEVICE_API_VERSION_2_1:
case CAMERA_DEVICE_API_VERSION_3_0:
client = new CameraDeviceClient(this, cameraCb, clientPackageName,
cameraId, facing, callingPid, clientUid, getpid());
break;
case -1:
ALOGE("Invalid camera id %d", cameraId);
return BAD_VALUE;
default:
ALOGE("Unknown camera device HAL version: %d", deviceVersion);
return INVALID_OPERATION;
}
status_t status = connectFinishUnsafe(client, client->getRemote());
if (status != OK) {
updateStatus(ICameraServiceListener::STATUS_PRESENT, cameraId);
return status;
}
LOG1("CameraService::connectDevice X (id %d, this pid is %d)", cameraId,
getpid());
mClient[cameraId] = client;
}
device = client;
return OK;
}
| 0
|
28,653
|
char * qemuDomainGetSecretAESAlias ( const char * srcalias , bool isLuks ) {
char * alias ;
if ( ! srcalias ) {
virReportError ( VIR_ERR_INVALID_ARG , "%s" , _ ( "encrypted secret alias requires valid source alias" ) ) ;
return NULL ;
}
if ( isLuks ) ignore_value ( virAsprintf ( & alias , "%s-luks-secret0" , srcalias ) ) ;
else ignore_value ( virAsprintf ( & alias , "%s-secret0" , srcalias ) ) ;
return alias ;
}
| 0
|
16,673
|
gcry_error_t gcry_mpi_scan ( struct gcry_mpi * * ret_mpi , enum gcry_mpi_format format , const void * buffer_arg , size_t buflen , size_t * nscanned ) {
const unsigned char * buffer = ( const unsigned char * ) buffer_arg ;
struct gcry_mpi * a = NULL ;
unsigned int len ;
int secure = ( buffer && gcry_is_secure ( buffer ) ) ;
if ( format == GCRYMPI_FMT_SSH ) len = 0 ;
else len = buflen ;
if ( format == GCRYMPI_FMT_STD ) {
const unsigned char * s = buffer ;
a = secure ? mpi_alloc_secure ( ( len + BYTES_PER_MPI_LIMB - 1 ) / BYTES_PER_MPI_LIMB ) : mpi_alloc ( ( len + BYTES_PER_MPI_LIMB - 1 ) / BYTES_PER_MPI_LIMB ) ;
if ( len ) {
a -> sign = ! ! ( * s & 0x80 ) ;
if ( a -> sign ) {
mpi_free ( a ) ;
return gcry_error ( GPG_ERR_INTERNAL ) ;
}
else _gcry_mpi_set_buffer ( a , s , len , 0 ) ;
}
if ( ret_mpi ) {
mpi_normalize ( a ) ;
* ret_mpi = a ;
}
else mpi_free ( a ) ;
return 0 ;
}
else if ( format == GCRYMPI_FMT_USG ) {
a = secure ? mpi_alloc_secure ( ( len + BYTES_PER_MPI_LIMB - 1 ) / BYTES_PER_MPI_LIMB ) : mpi_alloc ( ( len + BYTES_PER_MPI_LIMB - 1 ) / BYTES_PER_MPI_LIMB ) ;
if ( len ) _gcry_mpi_set_buffer ( a , buffer , len , 0 ) ;
if ( ret_mpi ) {
mpi_normalize ( a ) ;
* ret_mpi = a ;
}
else mpi_free ( a ) ;
return 0 ;
}
else if ( format == GCRYMPI_FMT_PGP ) {
a = mpi_read_from_buffer ( buffer , & len , secure ) ;
if ( nscanned ) * nscanned = len ;
if ( ret_mpi && a ) {
mpi_normalize ( a ) ;
* ret_mpi = a ;
}
else if ( a ) {
mpi_free ( a ) ;
a = NULL ;
}
return a ? 0 : gcry_error ( GPG_ERR_INV_OBJ ) ;
}
else if ( format == GCRYMPI_FMT_SSH ) {
const unsigned char * s = buffer ;
size_t n ;
if ( len && len < 4 ) return gcry_error ( GPG_ERR_TOO_SHORT ) ;
n = ( s [ 0 ] << 24 | s [ 1 ] << 16 | s [ 2 ] << 8 | s [ 3 ] ) ;
s += 4 ;
if ( len ) len -= 4 ;
if ( len && n > len ) return gcry_error ( GPG_ERR_TOO_LARGE ) ;
a = secure ? mpi_alloc_secure ( ( n + BYTES_PER_MPI_LIMB - 1 ) / BYTES_PER_MPI_LIMB ) : mpi_alloc ( ( n + BYTES_PER_MPI_LIMB - 1 ) / BYTES_PER_MPI_LIMB ) ;
if ( n ) {
a -> sign = ! ! ( * s & 0x80 ) ;
if ( a -> sign ) {
mpi_free ( a ) ;
return gcry_error ( GPG_ERR_INTERNAL ) ;
}
else _gcry_mpi_set_buffer ( a , s , n , 0 ) ;
}
if ( nscanned ) * nscanned = n + 4 ;
if ( ret_mpi ) {
mpi_normalize ( a ) ;
* ret_mpi = a ;
}
else mpi_free ( a ) ;
return 0 ;
}
else if ( format == GCRYMPI_FMT_HEX ) {
if ( buflen ) return gcry_error ( GPG_ERR_INV_ARG ) ;
a = secure ? mpi_alloc_secure ( 0 ) : mpi_alloc ( 0 ) ;
if ( mpi_fromstr ( a , ( const char * ) buffer ) ) {
mpi_free ( a ) ;
return gcry_error ( GPG_ERR_INV_OBJ ) ;
}
if ( ret_mpi ) {
mpi_normalize ( a ) ;
* ret_mpi = a ;
}
else mpi_free ( a ) ;
return 0 ;
}
else return gcry_error ( GPG_ERR_INV_ARG ) ;
}
| 0
|
224,735
|
void WebSettingsImpl::setLocalStorageEnabled(bool enabled)
{
m_settings->setLocalStorageEnabled(enabled);
}
| 0
|
431,533
|
bool run(OperationContext* opCtx,
const string& dbname,
const BSONObj& cmdObj,
BSONObjBuilder& result) {
RoleName roleName;
Status status = auth::parseDropRoleCommand(cmdObj, dbname, &roleName);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
ServiceContext* serviceContext = opCtx->getClient()->getServiceContext();
stdx::lock_guard<stdx::mutex> lk(getAuthzDataMutex(serviceContext));
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
status = requireAuthSchemaVersion26Final(opCtx, authzManager);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
if (RoleGraph::isBuiltinRole(roleName)) {
return appendCommandStatus(
result,
Status(ErrorCodes::InvalidRoleModification,
str::stream() << roleName.getFullName()
<< " is a built-in role and cannot be modified."));
}
BSONObj roleDoc;
status = authzManager->getRoleDescription(opCtx, roleName, &roleDoc);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
// Remove this role from all users
long long nMatched;
status = updateAuthzDocuments(
opCtx,
AuthorizationManager::usersCollectionNamespace,
BSON("roles" << BSON("$elemMatch" << BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME
<< roleName.getRole()
<< AuthorizationManager::ROLE_DB_FIELD_NAME
<< roleName.getDB()))),
BSON("$pull" << BSON("roles" << BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME
<< roleName.getRole()
<< AuthorizationManager::ROLE_DB_FIELD_NAME
<< roleName.getDB()))),
false,
true,
&nMatched);
// Must invalidate even on bad status - what if the write succeeded but the GLE failed?
authzManager->invalidateUserCache();
if (!status.isOK()) {
ErrorCodes::Error code = status.code() == ErrorCodes::UnknownError
? ErrorCodes::UserModificationFailed
: status.code();
return appendCommandStatus(result,
Status(code,
str::stream() << "Failed to remove role "
<< roleName.getFullName()
<< " from all users: "
<< status.reason()));
}
// Remove this role from all other roles
status = updateAuthzDocuments(
opCtx,
AuthorizationManager::rolesCollectionNamespace,
BSON("roles" << BSON("$elemMatch" << BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME
<< roleName.getRole()
<< AuthorizationManager::ROLE_DB_FIELD_NAME
<< roleName.getDB()))),
BSON("$pull" << BSON("roles" << BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME
<< roleName.getRole()
<< AuthorizationManager::ROLE_DB_FIELD_NAME
<< roleName.getDB()))),
false,
true,
&nMatched);
// Must invalidate even on bad status - what if the write succeeded but the GLE failed?
authzManager->invalidateUserCache();
if (!status.isOK()) {
ErrorCodes::Error code = status.code() == ErrorCodes::UnknownError
? ErrorCodes::RoleModificationFailed
: status.code();
return appendCommandStatus(
result,
Status(code,
str::stream() << "Removed role " << roleName.getFullName()
<< " from all users but failed to remove from all roles: "
<< status.reason()));
}
audit::logDropRole(Client::getCurrent(), roleName);
// Finally, remove the actual role document
status = removeRoleDocuments(opCtx,
BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME
<< roleName.getRole()
<< AuthorizationManager::ROLE_DB_FIELD_NAME
<< roleName.getDB()),
&nMatched);
// Must invalidate even on bad status - what if the write succeeded but the GLE failed?
authzManager->invalidateUserCache();
if (!status.isOK()) {
return appendCommandStatus(
result,
Status(status.code(),
str::stream() << "Removed role " << roleName.getFullName()
<< " from all users and roles but failed to actually delete"
" the role itself: "
<< status.reason()));
}
dassert(nMatched == 0 || nMatched == 1);
if (nMatched == 0) {
return appendCommandStatus(
result,
Status(ErrorCodes::RoleNotFound,
str::stream() << "Role '" << roleName.getFullName() << "' not found"));
}
return true;
}
| 0
|
137,129
|
Jsi_RC Jsi_InitMySql(Jsi_Interp *interp, int release)
{
if (release) {
if (!--mydbObjCmd.init)
mysql_library_end();
return Jsi_DoneMySql(interp);
}
Jsi_Hash* dbSys;
#if JSI_USE_STUBS
if (Jsi_StubsInit(interp, 0) != JSI_OK)
return JSI_ERROR;
#endif
#ifndef JSI_OMIT_THREADS
if (mydbObjCmd.init == 0 && mysql_library_init(0, NULL, NULL))
return Jsi_LogError("failed to initialize MySQL library\n");
#else
return Jsi_LogError("Threads required for mysql");
#endif
Jsi_Value *info = Jsi_ValueNew1(interp);
Jsi_JSONParseFmt(interp, &info, "{pkgVer:%d}", MYSQL_VERSION_ID);
Jsi_PkgOpts dbPkgOpts = { mydb_ObjCmd_Specs, &mydbObjCmd, mysqlCmds, info};
Jsi_RC rc = Jsi_PkgProvideEx(interp, "MySql", 1.1, Jsi_InitMySql, &dbPkgOpts);
Jsi_DecrRefCount(interp, info);
if (rc != JSI_OK)
rc = JSI_ERROR;
else if (!(dbSys = Jsi_UserObjRegister(interp, &mysqlobject)))
rc = Jsi_LogError("Failed to init mysql extension");
else if (!Jsi_CommandCreateSpecs(interp, mysqlobject.name, mysqlCmds, dbSys, JSI_CMDSPEC_ISOBJ))
rc = JSI_ERROR;
if (rc == JSI_OK)
mydbObjCmd.init++;
else
mysql_library_end();
return rc;
}
| 0
|
328,112
|
static int dshow_read_packet(AVFormatContext *s, AVPacket *pkt)
{
struct dshow_ctx *ctx = s->priv_data;
AVPacketList *pktl = NULL;
while (!ctx->eof && !pktl) {
WaitForSingleObject(ctx->mutex, INFINITE);
pktl = ctx->pktl;
if (pktl) {
*pkt = pktl->pkt;
ctx->pktl = ctx->pktl->next;
av_free(pktl);
ctx->curbufsize -= pkt->size;
}
ResetEvent(ctx->event[1]);
ReleaseMutex(ctx->mutex);
if (!pktl) {
if (dshow_check_event_queue(ctx->media_event) < 0) {
ctx->eof = 1;
} else if (s->flags & AVFMT_FLAG_NONBLOCK) {
return AVERROR(EAGAIN);
} else {
WaitForMultipleObjects(2, ctx->event, 0, INFINITE);
}
}
}
return ctx->eof ? AVERROR(EIO) : pkt->size;
}
| 1
|
483,269
|
void cgroup_fork(struct task_struct *child)
{
RCU_INIT_POINTER(child->cgroups, &init_css_set);
INIT_LIST_HEAD(&child->cg_list);
}
| 0
|
146,045
|
if (err == MP_OKAY) {
mG = wc_ecc_new_point_h(key->heap);
mQ = wc_ecc_new_point_h(key->heap);
if (mQ == NULL || mG == NULL)
err = MEMORY_E;
}
| 0
|
308,820
|
void RecordOffliningPreviewsUMA(const ClientId& client_id,
content::PreviewsState previews_state) {
bool is_previews_enabled =
(previews_state != content::PreviewsTypes::PREVIEWS_OFF &&
previews_state != content::PreviewsTypes::PREVIEWS_NO_TRANSFORM);
base::UmaHistogramBoolean(
AddHistogramSuffix(client_id,
"OfflinePages.Background.OffliningPreviewStatus"),
is_previews_enabled);
}
| 0
|
261,537
|
static void nft_verdict_uninit(const struct nft_data *data)
{
switch (data->verdict) {
case NFT_JUMP:
case NFT_GOTO:
data->chain->use--;
break;
}
}
| 0
|
311,694
|
static struct dentry *proc_root_lookup(struct inode * dir, struct dentry * dentry, unsigned int flags)
{
if (!proc_pid_lookup(dir, dentry, flags))
return NULL;
return proc_lookup(dir, dentry, flags);
}
| 0
|
488,313
|
static int NAttrFlag(ntfs_attr *na, FILE_ATTR_FLAGS flag)
{
if (na->type == AT_DATA && na->name == AT_UNNAMED)
return (na->ni->flags & flag);
return 0;
}
| 0
|
462,784
|
TEST_F(QueryPlannerTest, IndexBoundsOrOfNegations) {
addIndex(BSON("a" << 1));
runQuery(fromjson("{$or: [{a: {$ne: 3}}, {a: {$ne: 4}}]}"));
assertNumSolutions(2U);
assertSolutionExists("{cscan: {dir: 1}}");
assertSolutionExists(
"{fetch: {filter: null, node: {ixscan: {pattern: {a:1}, "
"bounds: {a: [['MinKey','MaxKey',true,true]]}}}}}");
}
| 0
|
513,040
|
virtual void endPage() {}
| 0
|
478,391
|
const char *what() const throw() { return _message; }
| 0
|
59,308
|
fixup_appledouble(struct archive_write_disk *a, const char *pathname)
{
(void)a; /* UNUSED */
(void)pathname; /* UNUSED */
return (ARCHIVE_OK);
}
| 0
|
438,616
|
static void rcs_submission_override(struct intel_engine_cs *engine)
{
switch (INTEL_GEN(engine->i915)) {
case 12:
engine->emit_flush = gen12_emit_flush_render;
engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_rcs;
break;
case 11:
engine->emit_flush = gen11_emit_flush_render;
engine->emit_fini_breadcrumb = gen11_emit_fini_breadcrumb_rcs;
break;
default:
engine->emit_flush = gen8_emit_flush_render;
engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_rcs;
break;
}
}
| 0
|
388,676
|
NTSTATUS smb_vfs_call_get_compression(vfs_handle_struct *handle,
TALLOC_CTX *mem_ctx,
struct files_struct *fsp,
struct smb_filename *smb_fname,
uint16_t *_compression_fmt)
{
VFS_FIND(get_compression);
return handle->fns->get_compression_fn(handle, mem_ctx, fsp, smb_fname,
_compression_fmt);
}
| 0
|
272,217
|
void Archive::SeekToNext()
{
Seek(NextBlockPos,SEEK_SET);
}
| 0
|
38,850
|
static int proc_pid_syscall(struct task_struct *task, char *buffer)
{
long nr;
unsigned long args[6], sp, pc;
int res = lock_trace(task);
if (res)
return res;
if (task_current_syscall(task, &nr, args, 6, &sp, &pc))
res = sprintf(buffer, "running\n");
else if (nr < 0)
res = sprintf(buffer, "%ld 0x%lx 0x%lx\n", nr, sp, pc);
else
res = sprintf(buffer,
"%ld 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx\n",
nr,
args[0], args[1], args[2], args[3], args[4], args[5],
sp, pc);
unlock_trace(task);
return res;
}
| 0
|
479,798
|
CImg<T>& HSLtoRGB() {
if (_spectrum!=3)
throw CImgInstanceException(_cimg_instance
"HSLtoRGB(): Instance is not a HSL image.",
cimg_instance);
T *p1 = data(0,0,0,0), *p2 = data(0,0,0,1), *p3 = data(0,0,0,2);
const longT whd = (longT)width()*height()*depth();
cimg_pragma_openmp(parallel for cimg_openmp_if_size(whd,256))
for (longT N = 0; N<whd; ++N) {
const Tfloat
H = cimg::mod((Tfloat)p1[N]/60,(Tfloat)6),
S = (Tfloat)p2[N],
L = (Tfloat)p3[N],
C = (1 - cimg::abs(2*L - 1))*S,
X = C*(1 - cimg::abs(cimg::mod(H,(Tfloat)2) - 1)),
m = L - C/2;
Tfloat R, G, B;
switch ((int)H) {
case 0 : R = C; G = X; B = 0; break;
case 1 : R = X; G = C; B = 0; break;
case 2 : R = 0; G = C; B = X; break;
case 3 : R = 0; G = X; B = C; break;
case 4 : R = X; G = 0; B = C; break;
default : R = C; G = 0; B = X;
}
p1[N] = (T)((R + m)*255);
p2[N] = (T)((G + m)*255);
p3[N] = (T)((B + m)*255);
}
return *this;
}
| 0
|
59,940
|
static ssize_t srpt_tpg_attrib_srp_max_rsp_size_show(struct config_item *item,
char *page)
{
struct se_portal_group *se_tpg = attrib_to_tpg(item);
struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
return sprintf(page, "%u\n", sport->port_attrib.srp_max_rsp_size);
}
| 0
|
112,040
|
static unsigned int hc_entries(unsigned int cnt)
{
cnt = cnt & 7 ? (cnt / 8) + 1 : cnt / 8;
return cnt < avail_tree_table_sz ? cnt : avail_tree_table_sz - 1;
}
| 0
|
77,921
|
void __sched wait_for_completion(struct completion *x)
{
wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
}
| 0
|
213,495
|
OMXCodecObserver() {
}
| 0
|
34,728
|
static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
nodemask_t *nodes_allowed)
{
unsigned long min_count, ret;
if (h->order >= MAX_ORDER)
return h->max_huge_pages;
/*
* Increase the pool size
* First take pages out of surplus state. Then make up the
* remaining difference by allocating fresh huge pages.
*
* We might race with alloc_buddy_huge_page() here and be unable
* to convert a surplus huge page to a normal huge page. That is
* not critical, though, it just means the overall size of the
* pool might be one hugepage larger than it needs to be, but
* within all the constraints specified by the sysctls.
*/
spin_lock(&hugetlb_lock);
while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
if (!adjust_pool_surplus(h, nodes_allowed, -1))
break;
}
while (count > persistent_huge_pages(h)) {
/*
* If this allocation races such that we no longer need the
* page, free_huge_page will handle it by freeing the page
* and reducing the surplus.
*/
spin_unlock(&hugetlb_lock);
ret = alloc_fresh_huge_page(h, nodes_allowed);
spin_lock(&hugetlb_lock);
if (!ret)
goto out;
/* Bail for signals. Probably ctrl-c from user */
if (signal_pending(current))
goto out;
}
/*
* Decrease the pool size
* First return free pages to the buddy allocator (being careful
* to keep enough around to satisfy reservations). Then place
* pages into surplus state as needed so the pool will shrink
* to the desired size as pages become free.
*
* By placing pages into the surplus state independent of the
* overcommit value, we are allowing the surplus pool size to
* exceed overcommit. There are few sane options here. Since
* alloc_buddy_huge_page() is checking the global counter,
* though, we'll note that we're not allowed to exceed surplus
* and won't grow the pool anywhere else. Not until one of the
* sysctls are changed, or the surplus pages go out of use.
*/
min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
min_count = max(count, min_count);
try_to_free_low(h, min_count, nodes_allowed);
while (min_count < persistent_huge_pages(h)) {
if (!free_pool_huge_page(h, nodes_allowed, 0))
break;
}
while (count < persistent_huge_pages(h)) {
if (!adjust_pool_surplus(h, nodes_allowed, 1))
break;
}
out:
ret = persistent_huge_pages(h);
spin_unlock(&hugetlb_lock);
return ret;
}
| 0
|
92,736
|
static void ext4_handle_error(struct super_block *sb)
{
struct ext4_super_block *es = EXT4_SB(sb)->s_es;
EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
if (sb->s_flags & MS_RDONLY)
return;
if (!test_opt(sb, ERRORS_CONT)) {
journal_t *journal = EXT4_SB(sb)->s_journal;
EXT4_SB(sb)->s_mount_flags |= EXT4_MF_FS_ABORTED;
if (journal)
jbd2_journal_abort(journal, -EIO);
}
if (test_opt(sb, ERRORS_RO)) {
ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
sb->s_flags |= MS_RDONLY;
}
ext4_commit_super(sb, 1);
if (test_opt(sb, ERRORS_PANIC))
panic("EXT4-fs (device %s): panic forced after error\n",
sb->s_id);
}
| 0
|
228,043
|
void InspectorPageAgent::navigateToHistoryEntry(ErrorString*, int)
{ }
| 0
|
455,223
|
TEST_F(QueryPlannerTest, IndexBoundsAndWithNestedOr) {
addIndex(BSON("a" << 1));
runQuery(fromjson("{$and: [{a: 1, $or: [{a: 2}, {a: 3}]}]}"));
// Given that the index over 'a' isn't multikey, we ideally won't generate any solutions
// since we know the query describes an empty set if 'a' isn't multikey. Any solutions
// below are "this is how it currently works" instead of "this is how it should work."
// It's kind of iffy to look for indexed solutions so we don't...
size_t matches = 0;
matches += numSolutionMatches(
"{cscan: {dir: 1, filter: "
"{$or: [{a: 2, a:1}, {a: 3, a:1}]}}}");
matches += numSolutionMatches(
"{cscan: {dir: 1, filter: "
"{$and: [{$or: [{a: 2}, {a: 3}]}, {a: 1}]}}}");
ASSERT_GREATER_THAN_OR_EQUALS(matches, 1U);
}
| 0
|
449,027
|
UTI_TimespecToDouble(struct timespec *ts)
{
return ts->tv_sec + 1.0e-9 * ts->tv_nsec;
}
| 0
|
127,932
|
static int foreach_comment(void *user, const char *k, const char *v) {
RAnalMetaUserItem *ui = user;
RCore *core = ui->anal->user;
const char *cmd = ui->user;
if (!strncmp (k, "meta.C.", 7)) {
char *cmt = (char *)sdb_decode (v, 0);
if (!cmt) cmt = strdup ("");
//eprintf ("--> %s = %s\n", k + 7, cmt);
r_core_cmdf (core, "s %s", k + 7);
r_core_cmd0 (core, cmd);
free (cmt);
}
return 1;
}
| 0
|
462,058
|
void t2p_read_tiff_data(T2P* t2p, TIFF* input){
int i=0;
uint16* r = NULL;
uint16* g = NULL;
uint16* b = NULL;
uint16* a = NULL;
uint16 xuint16;
uint16* xuint16p;
float* xfloatp;
t2p->pdf_transcode = T2P_TRANSCODE_ENCODE;
t2p->pdf_sample = T2P_SAMPLE_NOTHING;
t2p->pdf_switchdecode = t2p->pdf_colorspace_invert;
TIFFSetDirectory(input, t2p->tiff_pages[t2p->pdf_page].page_directory);
TIFFGetField(input, TIFFTAG_IMAGEWIDTH, &(t2p->tiff_width));
if(t2p->tiff_width == 0){
TIFFError(
TIFF2PDF_MODULE,
"No support for %s with zero width",
TIFFFileName(input) );
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
TIFFGetField(input, TIFFTAG_IMAGELENGTH, &(t2p->tiff_length));
if(t2p->tiff_length == 0){
TIFFError(
TIFF2PDF_MODULE,
"No support for %s with zero length",
TIFFFileName(input) );
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
if(TIFFGetField(input, TIFFTAG_COMPRESSION, &(t2p->tiff_compression)) == 0){
TIFFError(
TIFF2PDF_MODULE,
"No support for %s with no compression tag",
TIFFFileName(input) );
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
if( TIFFIsCODECConfigured(t2p->tiff_compression) == 0){
TIFFError(
TIFF2PDF_MODULE,
"No support for %s with compression type %u: not configured",
TIFFFileName(input),
t2p->tiff_compression
);
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
TIFFGetFieldDefaulted(input, TIFFTAG_BITSPERSAMPLE, &(t2p->tiff_bitspersample));
switch(t2p->tiff_bitspersample){
case 1:
case 2:
case 4:
case 8:
break;
case 0:
TIFFWarning(
TIFF2PDF_MODULE,
"Image %s has 0 bits per sample, assuming 1",
TIFFFileName(input));
t2p->tiff_bitspersample=1;
break;
default:
TIFFError(
TIFF2PDF_MODULE,
"No support for %s with %u bits per sample",
TIFFFileName(input),
t2p->tiff_bitspersample);
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
TIFFGetFieldDefaulted(input, TIFFTAG_SAMPLESPERPIXEL, &(t2p->tiff_samplesperpixel));
if(t2p->tiff_samplesperpixel>4){
TIFFError(
TIFF2PDF_MODULE,
"No support for %s with %u samples per pixel",
TIFFFileName(input),
t2p->tiff_samplesperpixel);
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
if(t2p->tiff_samplesperpixel==0){
TIFFWarning(
TIFF2PDF_MODULE,
"Image %s has 0 samples per pixel, assuming 1",
TIFFFileName(input));
t2p->tiff_samplesperpixel=1;
}
if(TIFFGetField(input, TIFFTAG_SAMPLEFORMAT, &xuint16) != 0 ){
switch(xuint16){
case 0:
case 1:
case 4:
break;
default:
TIFFError(
TIFF2PDF_MODULE,
"No support for %s with sample format %u",
TIFFFileName(input),
xuint16);
t2p->t2p_error = T2P_ERR_ERROR;
return;
break;
}
}
TIFFGetFieldDefaulted(input, TIFFTAG_FILLORDER, &(t2p->tiff_fillorder));
if(TIFFGetField(input, TIFFTAG_PHOTOMETRIC, &(t2p->tiff_photometric)) == 0){
TIFFError(
TIFF2PDF_MODULE,
"No support for %s with no photometric interpretation tag",
TIFFFileName(input) );
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
switch(t2p->tiff_photometric){
case PHOTOMETRIC_MINISWHITE:
case PHOTOMETRIC_MINISBLACK:
if (t2p->tiff_bitspersample==1){
t2p->pdf_colorspace=T2P_CS_BILEVEL;
if(t2p->tiff_photometric==PHOTOMETRIC_MINISWHITE){
t2p->pdf_switchdecode ^= 1;
}
} else {
t2p->pdf_colorspace=T2P_CS_GRAY;
if(t2p->tiff_photometric==PHOTOMETRIC_MINISWHITE){
t2p->pdf_switchdecode ^= 1;
}
}
break;
case PHOTOMETRIC_RGB:
t2p->pdf_colorspace=T2P_CS_RGB;
if(t2p->tiff_samplesperpixel == 3){
break;
}
if(TIFFGetField(input, TIFFTAG_INDEXED, &xuint16)){
if(xuint16==1)
goto photometric_palette;
}
if(t2p->tiff_samplesperpixel > 3) {
if(t2p->tiff_samplesperpixel == 4) {
t2p->pdf_colorspace = T2P_CS_RGB;
if(TIFFGetField(input,
TIFFTAG_EXTRASAMPLES,
&xuint16, &xuint16p)
&& xuint16 == 1) {
if(xuint16p[0] == EXTRASAMPLE_ASSOCALPHA){
if( t2p->tiff_bitspersample != 8 )
{
TIFFError(
TIFF2PDF_MODULE,
"No support for BitsPerSample=%d for RGBA",
t2p->tiff_bitspersample);
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
t2p->pdf_sample=T2P_SAMPLE_RGBAA_TO_RGB;
break;
}
if(xuint16p[0] == EXTRASAMPLE_UNASSALPHA){
if( t2p->tiff_bitspersample != 8 )
{
TIFFError(
TIFF2PDF_MODULE,
"No support for BitsPerSample=%d for RGBA",
t2p->tiff_bitspersample);
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
t2p->pdf_sample=T2P_SAMPLE_RGBA_TO_RGB;
break;
}
TIFFWarning(
TIFF2PDF_MODULE,
"RGB image %s has 4 samples per pixel, assuming RGBA",
TIFFFileName(input));
break;
}
t2p->pdf_colorspace=T2P_CS_CMYK;
t2p->pdf_switchdecode ^= 1;
TIFFWarning(
TIFF2PDF_MODULE,
"RGB image %s has 4 samples per pixel, assuming inverse CMYK",
TIFFFileName(input));
break;
} else {
TIFFError(
TIFF2PDF_MODULE,
"No support for RGB image %s with %u samples per pixel",
TIFFFileName(input),
t2p->tiff_samplesperpixel);
t2p->t2p_error = T2P_ERR_ERROR;
break;
}
} else {
TIFFError(
TIFF2PDF_MODULE,
"No support for RGB image %s with %u samples per pixel",
TIFFFileName(input),
t2p->tiff_samplesperpixel);
t2p->t2p_error = T2P_ERR_ERROR;
break;
}
case PHOTOMETRIC_PALETTE:
photometric_palette:
if(t2p->tiff_samplesperpixel!=1){
TIFFError(
TIFF2PDF_MODULE,
"No support for palettized image %s with not one sample per pixel",
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
t2p->pdf_colorspace=T2P_CS_RGB | T2P_CS_PALETTE;
t2p->pdf_palettesize=0x0001<<t2p->tiff_bitspersample;
if(!TIFFGetField(input, TIFFTAG_COLORMAP, &r, &g, &b)){
TIFFError(
TIFF2PDF_MODULE,
"Palettized image %s has no color map",
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
if(r == NULL || g == NULL || b == NULL){
TIFFError(
TIFF2PDF_MODULE,
"Error getting 3 components from color map");
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
if(t2p->pdf_palette != NULL){
_TIFFfree(t2p->pdf_palette);
t2p->pdf_palette=NULL;
}
t2p->pdf_palette = (unsigned char*)
_TIFFmalloc(TIFFSafeMultiply(tmsize_t,t2p->pdf_palettesize,3));
if(t2p->pdf_palette==NULL){
TIFFError(
TIFF2PDF_MODULE,
"Can't allocate %u bytes of memory for t2p_read_tiff_image, %s",
t2p->pdf_palettesize,
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
for(i=0;i<t2p->pdf_palettesize;i++){
t2p->pdf_palette[(i*3)] = (unsigned char) (r[i]>>8);
t2p->pdf_palette[(i*3)+1]= (unsigned char) (g[i]>>8);
t2p->pdf_palette[(i*3)+2]= (unsigned char) (b[i]>>8);
}
t2p->pdf_palettesize *= 3;
break;
case PHOTOMETRIC_SEPARATED:
if(TIFFGetField(input, TIFFTAG_INDEXED, &xuint16)){
if(xuint16==1){
goto photometric_palette_cmyk;
}
}
if( TIFFGetField(input, TIFFTAG_INKSET, &xuint16) ){
if(xuint16 != INKSET_CMYK){
TIFFError(
TIFF2PDF_MODULE,
"No support for %s because its inkset is not CMYK",
TIFFFileName(input) );
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
}
if(t2p->tiff_samplesperpixel==4){
t2p->pdf_colorspace=T2P_CS_CMYK;
} else {
TIFFError(
TIFF2PDF_MODULE,
"No support for %s because it has %u samples per pixel",
TIFFFileName(input),
t2p->tiff_samplesperpixel);
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
break;
photometric_palette_cmyk:
if(t2p->tiff_samplesperpixel!=1){
TIFFError(
TIFF2PDF_MODULE,
"No support for palettized CMYK image %s with not one sample per pixel",
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
t2p->pdf_colorspace=T2P_CS_CMYK | T2P_CS_PALETTE;
t2p->pdf_palettesize=0x0001<<t2p->tiff_bitspersample;
if(!TIFFGetField(input, TIFFTAG_COLORMAP, &r, &g, &b, &a)){
TIFFError(
TIFF2PDF_MODULE,
"Palettized image %s has no color map",
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
if(r == NULL || g == NULL || b == NULL || a == NULL){
TIFFError(
TIFF2PDF_MODULE,
"Error getting 4 components from color map");
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
if(t2p->pdf_palette != NULL){
_TIFFfree(t2p->pdf_palette);
t2p->pdf_palette=NULL;
}
t2p->pdf_palette = (unsigned char*)
_TIFFmalloc(TIFFSafeMultiply(tmsize_t,t2p->pdf_palettesize,4));
if(t2p->pdf_palette==NULL){
TIFFError(
TIFF2PDF_MODULE,
"Can't allocate %u bytes of memory for t2p_read_tiff_image, %s",
t2p->pdf_palettesize,
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
for(i=0;i<t2p->pdf_palettesize;i++){
t2p->pdf_palette[(i*4)] = (unsigned char) (r[i]>>8);
t2p->pdf_palette[(i*4)+1]= (unsigned char) (g[i]>>8);
t2p->pdf_palette[(i*4)+2]= (unsigned char) (b[i]>>8);
t2p->pdf_palette[(i*4)+3]= (unsigned char) (a[i]>>8);
}
t2p->pdf_palettesize *= 4;
break;
case PHOTOMETRIC_YCBCR:
t2p->pdf_colorspace=T2P_CS_RGB;
if(t2p->tiff_samplesperpixel==1){
t2p->pdf_colorspace=T2P_CS_GRAY;
t2p->tiff_photometric=PHOTOMETRIC_MINISBLACK;
break;
}
t2p->pdf_sample=T2P_SAMPLE_YCBCR_TO_RGB;
#ifdef JPEG_SUPPORT
if(t2p->pdf_defaultcompression==T2P_COMPRESS_JPEG){
t2p->pdf_sample=T2P_SAMPLE_NOTHING;
}
#endif
break;
case PHOTOMETRIC_CIELAB:
if( t2p->tiff_samplesperpixel != 3){
TIFFError(
TIFF2PDF_MODULE,
"Unsupported samplesperpixel = %d for CIELAB",
t2p->tiff_samplesperpixel);
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
if( t2p->tiff_bitspersample != 8){
TIFFError(
TIFF2PDF_MODULE,
"Invalid bitspersample = %d for CIELAB",
t2p->tiff_bitspersample);
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
t2p->pdf_labrange[0]= -127;
t2p->pdf_labrange[1]= 127;
t2p->pdf_labrange[2]= -127;
t2p->pdf_labrange[3]= 127;
t2p->pdf_sample=T2P_SAMPLE_LAB_SIGNED_TO_UNSIGNED;
t2p->pdf_colorspace=T2P_CS_LAB;
break;
case PHOTOMETRIC_ICCLAB:
t2p->pdf_labrange[0]= 0;
t2p->pdf_labrange[1]= 255;
t2p->pdf_labrange[2]= 0;
t2p->pdf_labrange[3]= 255;
t2p->pdf_colorspace=T2P_CS_LAB;
break;
case PHOTOMETRIC_ITULAB:
if( t2p->tiff_samplesperpixel != 3){
TIFFError(
TIFF2PDF_MODULE,
"Unsupported samplesperpixel = %d for ITULAB",
t2p->tiff_samplesperpixel);
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
if( t2p->tiff_bitspersample != 8){
TIFFError(
TIFF2PDF_MODULE,
"Invalid bitspersample = %d for ITULAB",
t2p->tiff_bitspersample);
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
t2p->pdf_labrange[0]=-85;
t2p->pdf_labrange[1]=85;
t2p->pdf_labrange[2]=-75;
t2p->pdf_labrange[3]=124;
t2p->pdf_sample=T2P_SAMPLE_LAB_SIGNED_TO_UNSIGNED;
t2p->pdf_colorspace=T2P_CS_LAB;
break;
case PHOTOMETRIC_LOGL:
case PHOTOMETRIC_LOGLUV:
TIFFError(
TIFF2PDF_MODULE,
"No support for %s with photometric interpretation LogL/LogLuv",
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return;
default:
TIFFError(
TIFF2PDF_MODULE,
"No support for %s with photometric interpretation %u",
TIFFFileName(input),
t2p->tiff_photometric);
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
if(TIFFGetField(input, TIFFTAG_PLANARCONFIG, &(t2p->tiff_planar))){
switch(t2p->tiff_planar){
case 0:
TIFFWarning(
TIFF2PDF_MODULE,
"Image %s has planar configuration 0, assuming 1",
TIFFFileName(input));
t2p->tiff_planar=PLANARCONFIG_CONTIG;
case PLANARCONFIG_CONTIG:
break;
case PLANARCONFIG_SEPARATE:
t2p->pdf_sample=T2P_SAMPLE_PLANAR_SEPARATE_TO_CONTIG;
if(t2p->tiff_bitspersample!=8){
TIFFError(
TIFF2PDF_MODULE,
"No support for %s with separated planar configuration and %u bits per sample",
TIFFFileName(input),
t2p->tiff_bitspersample);
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
break;
default:
TIFFError(
TIFF2PDF_MODULE,
"No support for %s with planar configuration %u",
TIFFFileName(input),
t2p->tiff_planar);
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
}
TIFFGetFieldDefaulted(input, TIFFTAG_ORIENTATION,
&(t2p->tiff_orientation));
if(t2p->tiff_orientation>8){
TIFFWarning(TIFF2PDF_MODULE,
"Image %s has orientation %u, assuming 0",
TIFFFileName(input), t2p->tiff_orientation);
t2p->tiff_orientation=0;
}
if(TIFFGetField(input, TIFFTAG_XRESOLUTION, &(t2p->tiff_xres) ) == 0){
t2p->tiff_xres=0.0;
}
if(TIFFGetField(input, TIFFTAG_YRESOLUTION, &(t2p->tiff_yres) ) == 0){
t2p->tiff_yres=0.0;
}
TIFFGetFieldDefaulted(input, TIFFTAG_RESOLUTIONUNIT,
&(t2p->tiff_resunit));
if(t2p->tiff_resunit == RESUNIT_CENTIMETER) {
t2p->tiff_xres *= 2.54F;
t2p->tiff_yres *= 2.54F;
} else if (t2p->tiff_resunit != RESUNIT_INCH
&& t2p->pdf_centimeters != 0) {
t2p->tiff_xres *= 2.54F;
t2p->tiff_yres *= 2.54F;
}
t2p_compose_pdf_page(t2p);
if( t2p->t2p_error == T2P_ERR_ERROR )
return;
t2p->pdf_transcode = T2P_TRANSCODE_ENCODE;
/* It seems that T2P_TRANSCODE_RAW mode doesn't support separate->contig */
/* conversion. At least t2p_read_tiff_size and t2p_read_tiff_size_tile */
/* do not take into account the number of samples, and thus */
/* that can cause heap buffer overflows such as in */
/* http://bugzilla.maptools.org/show_bug.cgi?id=2715 */
if(t2p->pdf_nopassthrough==0 && t2p->tiff_planar!=PLANARCONFIG_SEPARATE){
#ifdef CCITT_SUPPORT
if(t2p->tiff_compression==COMPRESSION_CCITTFAX4
){
if(TIFFIsTiled(input) || (TIFFNumberOfStrips(input)==1) ){
t2p->pdf_transcode = T2P_TRANSCODE_RAW;
t2p->pdf_compression=T2P_COMPRESS_G4;
}
}
#endif
#ifdef ZIP_SUPPORT
if(t2p->tiff_compression== COMPRESSION_ADOBE_DEFLATE
|| t2p->tiff_compression==COMPRESSION_DEFLATE){
if(TIFFIsTiled(input) || (TIFFNumberOfStrips(input)==1) ){
uint16 predictor;
t2p->pdf_transcode = T2P_TRANSCODE_RAW;
t2p->pdf_compression=T2P_COMPRESS_ZIP;
TIFFGetField(input, TIFFTAG_PREDICTOR, &predictor);
t2p->pdf_compressionquality = predictor;
/* TIFFTAG_ZIPQUALITY is always Z_DEFAULT_COMPRESSION on reading */
}
}
#endif
#ifdef OJPEG_SUPPORT
if(t2p->tiff_compression==COMPRESSION_OJPEG){
t2p->pdf_transcode = T2P_TRANSCODE_RAW;
t2p->pdf_compression=T2P_COMPRESS_JPEG;
t2p_process_ojpeg_tables(t2p, input);
}
#endif
#ifdef JPEG_SUPPORT
if(t2p->tiff_compression==COMPRESSION_JPEG){
t2p->pdf_transcode = T2P_TRANSCODE_RAW;
t2p->pdf_compression=T2P_COMPRESS_JPEG;
}
#endif
(void)0;
}
if(t2p->pdf_transcode!=T2P_TRANSCODE_RAW){
t2p->pdf_compression = t2p->pdf_defaultcompression;
}
#ifdef JPEG_SUPPORT
if(t2p->pdf_defaultcompression==T2P_COMPRESS_JPEG){
if(t2p->pdf_colorspace & T2P_CS_PALETTE){
t2p->pdf_sample|=T2P_SAMPLE_REALIZE_PALETTE;
t2p->pdf_colorspace ^= T2P_CS_PALETTE;
t2p->tiff_pages[t2p->pdf_page].page_extra--;
}
}
if(t2p->tiff_compression==COMPRESSION_JPEG){
if(t2p->tiff_planar==PLANARCONFIG_SEPARATE){
TIFFError(
TIFF2PDF_MODULE,
"No support for %s with JPEG compression and separated planar configuration",
TIFFFileName(input));
t2p->t2p_error=T2P_ERR_ERROR;
return;
}
}
#endif
#ifdef OJPEG_SUPPORT
if(t2p->tiff_compression==COMPRESSION_OJPEG){
if(t2p->tiff_planar==PLANARCONFIG_SEPARATE){
TIFFError(
TIFF2PDF_MODULE,
"No support for %s with OJPEG compression and separated planar configuration",
TIFFFileName(input));
t2p->t2p_error=T2P_ERR_ERROR;
return;
}
}
#endif
if(t2p->pdf_sample & T2P_SAMPLE_REALIZE_PALETTE){
if(t2p->pdf_colorspace & T2P_CS_CMYK){
t2p->tiff_samplesperpixel=4;
t2p->tiff_photometric=PHOTOMETRIC_SEPARATED;
} else {
t2p->tiff_samplesperpixel=3;
t2p->tiff_photometric=PHOTOMETRIC_RGB;
}
}
if (TIFFGetField(input, TIFFTAG_TRANSFERFUNCTION,
&(t2p->tiff_transferfunction[0]),
&(t2p->tiff_transferfunction[1]),
&(t2p->tiff_transferfunction[2]))) {
if((t2p->tiff_transferfunction[1] != (uint16*) NULL) &&
(t2p->tiff_transferfunction[2] != (uint16*) NULL)
) {
t2p->tiff_transferfunctioncount=3;
} else {
t2p->tiff_transferfunctioncount=1;
}
} else {
t2p->tiff_transferfunctioncount=0;
}
if(TIFFGetField(input, TIFFTAG_WHITEPOINT, &xfloatp)!=0){
t2p->tiff_whitechromaticities[0]=xfloatp[0];
t2p->tiff_whitechromaticities[1]=xfloatp[1];
if(t2p->pdf_colorspace & T2P_CS_GRAY){
t2p->pdf_colorspace |= T2P_CS_CALGRAY;
}
if(t2p->pdf_colorspace & T2P_CS_RGB){
t2p->pdf_colorspace |= T2P_CS_CALRGB;
}
}
if(TIFFGetField(input, TIFFTAG_PRIMARYCHROMATICITIES, &xfloatp)!=0){
t2p->tiff_primarychromaticities[0]=xfloatp[0];
t2p->tiff_primarychromaticities[1]=xfloatp[1];
t2p->tiff_primarychromaticities[2]=xfloatp[2];
t2p->tiff_primarychromaticities[3]=xfloatp[3];
t2p->tiff_primarychromaticities[4]=xfloatp[4];
t2p->tiff_primarychromaticities[5]=xfloatp[5];
if(t2p->pdf_colorspace & T2P_CS_RGB){
t2p->pdf_colorspace |= T2P_CS_CALRGB;
}
}
if(t2p->pdf_colorspace & T2P_CS_LAB){
if(TIFFGetField(input, TIFFTAG_WHITEPOINT, &xfloatp) != 0){
t2p->tiff_whitechromaticities[0]=xfloatp[0];
t2p->tiff_whitechromaticities[1]=xfloatp[1];
} else {
t2p->tiff_whitechromaticities[0]=0.3457F; /* 0.3127F; */
t2p->tiff_whitechromaticities[1]=0.3585F; /* 0.3290F; */
}
}
if(TIFFGetField(input,
TIFFTAG_ICCPROFILE,
&(t2p->tiff_iccprofilelength),
&(t2p->tiff_iccprofile))!=0){
t2p->pdf_colorspace |= T2P_CS_ICCBASED;
} else {
t2p->tiff_iccprofilelength=0;
t2p->tiff_iccprofile=NULL;
}
#ifdef CCITT_SUPPORT
if( t2p->tiff_bitspersample==1 &&
t2p->tiff_samplesperpixel==1){
t2p->pdf_compression = T2P_COMPRESS_G4;
}
#endif
return;
}
| 0
|
149,577
|
TEST(ComparisonsTest, NotEqualString) {
if (SingleOpModel::GetForceUseNnapi()) {
return;
}
ComparisonOpModel model({1, 1, 1, 1, 4}, {1, 1, 1, 1, 4}, TensorType_STRING,
BuiltinOperator_NOT_EQUAL);
model.PopulateTensor<std::string>(model.input1(), {"A", "B", "C", "D"});
model.PopulateTensor<std::string>(model.input2(), {"A", "C", "B", "D"});
model.Invoke();
EXPECT_THAT(model.GetOutput(), ElementsAre(false, true, true, false));
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1, 1, 1, 4));
}
| 0
|
362,723
|
*/
xmlNodePtr
xmlXPathNextFollowing(xmlXPathParserContextPtr ctxt, xmlNodePtr cur) {
if ((ctxt == NULL) || (ctxt->context == NULL)) return(NULL);
if ((cur != NULL) && (cur->type != XML_ATTRIBUTE_NODE) &&
(cur->type != XML_NAMESPACE_DECL) && (cur->children != NULL))
return(cur->children);
if (cur == NULL) {
cur = ctxt->context->node;
if (cur->type == XML_NAMESPACE_DECL)
return(NULL);
if (cur->type == XML_ATTRIBUTE_NODE)
cur = cur->parent;
}
if (cur == NULL) return(NULL) ; /* ERROR */
if (cur->next != NULL) return(cur->next) ;
do {
cur = cur->parent;
if (cur == NULL) break;
if (cur == (xmlNodePtr) ctxt->context->doc) return(NULL);
if (cur->next != NULL) return(cur->next);
} while (cur != NULL);
| 0
|
69,568
|
void tac_copy_addr_info(struct addrinfo *p_dst, const struct addrinfo *p_src) {
if (p_dst && p_src) {
p_dst->ai_flags = p_src->ai_flags;
p_dst->ai_family = p_src->ai_family;
p_dst->ai_socktype = p_src->ai_socktype;
p_dst->ai_protocol = p_src->ai_protocol;
p_dst->ai_addrlen = p_src->ai_addrlen;
/* ipv6 check */
if (p_dst->ai_family == AF_INET6) {
memcpy (p_dst->ai_addr, p_src->ai_addr, sizeof(struct sockaddr_in6));
memset ((struct sockaddr_in6*)p_dst->ai_addr, 0 , sizeof(struct sockaddr_in6));
memcpy ((struct sockaddr_in6*)p_dst->ai_addr, (struct sockaddr_in6*)p_src->ai_addr, sizeof(struct sockaddr_in6));
} else {
memcpy (p_dst->ai_addr, p_src->ai_addr, sizeof(struct sockaddr));
}
p_dst->ai_canonname = NULL; /* we do not care it */
p_dst->ai_next = NULL; /* no more chain */
}
}
| 0
|
438,680
|
static bool virtual_matches(const struct virtual_engine *ve,
const struct i915_request *rq,
const struct intel_engine_cs *engine)
{
const struct intel_engine_cs *inflight;
if (!(rq->execution_mask & engine->mask)) /* We peeked too soon! */
return false;
/*
* We track when the HW has completed saving the context image
* (i.e. when we have seen the final CS event switching out of
* the context) and must not overwrite the context image before
* then. This restricts us to only using the active engine
* while the previous virtualized request is inflight (so
* we reuse the register offsets). This is a very small
* hystersis on the greedy seelction algorithm.
*/
inflight = intel_context_inflight(&ve->context);
if (inflight && inflight != engine)
return false;
return true;
}
| 0
|
369,954
|
void in6_dev_finish_destroy(struct inet6_dev *idev)
{
struct net_device *dev = idev->dev;
WARN_ON(!list_empty(&idev->addr_list));
WARN_ON(idev->mc_list != NULL);
WARN_ON(timer_pending(&idev->rs_timer));
#ifdef NET_REFCNT_DEBUG
pr_debug("%s: %s\n", __func__, dev ? dev->name : "NIL");
#endif
dev_put(dev);
if (!idev->dead) {
pr_warn("Freeing alive inet6 device %p\n", idev);
return;
}
snmp6_free_dev(idev);
kfree_rcu(idev, rcu);
}
| 0
|
481,923
|
void __init pt_regs_check(void)
{
BUILD_BUG_ON(offsetof(struct pt_regs, gpr) !=
offsetof(struct user_pt_regs, gpr));
BUILD_BUG_ON(offsetof(struct pt_regs, nip) !=
offsetof(struct user_pt_regs, nip));
BUILD_BUG_ON(offsetof(struct pt_regs, msr) !=
offsetof(struct user_pt_regs, msr));
BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
offsetof(struct user_pt_regs, orig_gpr3));
BUILD_BUG_ON(offsetof(struct pt_regs, ctr) !=
offsetof(struct user_pt_regs, ctr));
BUILD_BUG_ON(offsetof(struct pt_regs, link) !=
offsetof(struct user_pt_regs, link));
BUILD_BUG_ON(offsetof(struct pt_regs, xer) !=
offsetof(struct user_pt_regs, xer));
BUILD_BUG_ON(offsetof(struct pt_regs, ccr) !=
offsetof(struct user_pt_regs, ccr));
#ifdef __powerpc64__
BUILD_BUG_ON(offsetof(struct pt_regs, softe) !=
offsetof(struct user_pt_regs, softe));
#else
BUILD_BUG_ON(offsetof(struct pt_regs, mq) !=
offsetof(struct user_pt_regs, mq));
#endif
BUILD_BUG_ON(offsetof(struct pt_regs, trap) !=
offsetof(struct user_pt_regs, trap));
BUILD_BUG_ON(offsetof(struct pt_regs, dar) !=
offsetof(struct user_pt_regs, dar));
BUILD_BUG_ON(offsetof(struct pt_regs, dear) !=
offsetof(struct user_pt_regs, dar));
BUILD_BUG_ON(offsetof(struct pt_regs, dsisr) !=
offsetof(struct user_pt_regs, dsisr));
BUILD_BUG_ON(offsetof(struct pt_regs, esr) !=
offsetof(struct user_pt_regs, dsisr));
BUILD_BUG_ON(offsetof(struct pt_regs, result) !=
offsetof(struct user_pt_regs, result));
BUILD_BUG_ON(sizeof(struct user_pt_regs) > sizeof(struct pt_regs));
// Now check that the pt_regs offsets match the uapi #defines
#define CHECK_REG(_pt, _reg) \
BUILD_BUG_ON(_pt != (offsetof(struct user_pt_regs, _reg) / \
sizeof(unsigned long)));
CHECK_REG(PT_R0, gpr[0]);
CHECK_REG(PT_R1, gpr[1]);
CHECK_REG(PT_R2, gpr[2]);
CHECK_REG(PT_R3, gpr[3]);
CHECK_REG(PT_R4, gpr[4]);
CHECK_REG(PT_R5, gpr[5]);
CHECK_REG(PT_R6, gpr[6]);
CHECK_REG(PT_R7, gpr[7]);
CHECK_REG(PT_R8, gpr[8]);
CHECK_REG(PT_R9, gpr[9]);
CHECK_REG(PT_R10, gpr[10]);
CHECK_REG(PT_R11, gpr[11]);
CHECK_REG(PT_R12, gpr[12]);
CHECK_REG(PT_R13, gpr[13]);
CHECK_REG(PT_R14, gpr[14]);
CHECK_REG(PT_R15, gpr[15]);
CHECK_REG(PT_R16, gpr[16]);
CHECK_REG(PT_R17, gpr[17]);
CHECK_REG(PT_R18, gpr[18]);
CHECK_REG(PT_R19, gpr[19]);
CHECK_REG(PT_R20, gpr[20]);
CHECK_REG(PT_R21, gpr[21]);
CHECK_REG(PT_R22, gpr[22]);
CHECK_REG(PT_R23, gpr[23]);
CHECK_REG(PT_R24, gpr[24]);
CHECK_REG(PT_R25, gpr[25]);
CHECK_REG(PT_R26, gpr[26]);
CHECK_REG(PT_R27, gpr[27]);
CHECK_REG(PT_R28, gpr[28]);
CHECK_REG(PT_R29, gpr[29]);
CHECK_REG(PT_R30, gpr[30]);
CHECK_REG(PT_R31, gpr[31]);
CHECK_REG(PT_NIP, nip);
CHECK_REG(PT_MSR, msr);
CHECK_REG(PT_ORIG_R3, orig_gpr3);
CHECK_REG(PT_CTR, ctr);
CHECK_REG(PT_LNK, link);
CHECK_REG(PT_XER, xer);
CHECK_REG(PT_CCR, ccr);
#ifdef CONFIG_PPC64
CHECK_REG(PT_SOFTE, softe);
#else
CHECK_REG(PT_MQ, mq);
#endif
CHECK_REG(PT_TRAP, trap);
CHECK_REG(PT_DAR, dar);
CHECK_REG(PT_DSISR, dsisr);
CHECK_REG(PT_RESULT, result);
#undef CHECK_REG
BUILD_BUG_ON(PT_REGS_COUNT != sizeof(struct user_pt_regs) / sizeof(unsigned long));
/*
* PT_DSCR isn't a real reg, but it's important that it doesn't overlap the
* real registers.
*/
BUILD_BUG_ON(PT_DSCR < sizeof(struct user_pt_regs) / sizeof(unsigned long));
// ptrace_get/put_fpr() rely on PPC32 and VSX being incompatible
BUILD_BUG_ON(IS_ENABLED(CONFIG_PPC32) && IS_ENABLED(CONFIG_VSX));
}
| 0
|
465,756
|
static void fuse_link_write_file(struct file *file)
{
struct inode *inode = file_inode(file);
struct fuse_inode *fi = get_fuse_inode(inode);
struct fuse_file *ff = file->private_data;
/*
* file may be written through mmap, so chain it onto the
* inodes's write_file list
*/
spin_lock(&fi->lock);
if (list_empty(&ff->write_entry))
list_add(&ff->write_entry, &fi->write_files);
spin_unlock(&fi->lock);
}
| 0
|
231,348
|
bool SendGetAppModalDialogMessageJSONRequest(
AutomationMessageSender* sender,
std::string* message,
std::string* error_msg) {
DictionaryValue dict;
dict.SetString("command", "GetAppModalDialogMessage");
DictionaryValue reply_dict;
if (!SendAutomationJSONRequest(sender, dict, &reply_dict, error_msg))
return false;
return reply_dict.GetString("message", message);
}
| 0
|
437,228
|
static int ttusb_dec_stop_feed(struct dvb_demux_feed *dvbdmxfeed)
{
dprintk("%s\n", __func__);
switch (dvbdmxfeed->type) {
case DMX_TYPE_TS:
return ttusb_dec_stop_ts_feed(dvbdmxfeed);
break;
case DMX_TYPE_SEC:
return ttusb_dec_stop_sec_feed(dvbdmxfeed);
break;
}
return 0;
}
| 0
|
178,011
|
void GetLastSession() {
profile()->GetSessionService()->TabClosed(controller().window_id(),
controller().session_id(),
false);
ReopenDatabase();
Time close_time;
session_helper_.ReadWindows(&windows_);
}
| 0
|
388,831
|
void smb2cli_session_stop_replay(struct smbXcli_session *session)
{
session->smb2->replay_active = false;
}
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.