idx
int64 | func
string | target
int64 |
|---|---|---|
299,904
|
auths_init(void)
{
auth_instance *au, *bu;
readconf_driver_init(US"authenticator",
(driver_instance **)(&auths), /* chain anchor */
(driver_info *)auths_available, /* available drivers */
sizeof(auth_info), /* size of info block */
&auth_defaults, /* default values for generic options */
sizeof(auth_instance), /* size of instance block */
optionlist_auths, /* generic options */
optionlist_auths_size);
for (au = auths; au != NULL; au = au->next)
{
if (au->public_name == NULL)
log_write(0, LOG_PANIC_DIE|LOG_CONFIG, "no public name specified for "
"the %s authenticator", au->name);
for (bu = au->next; bu != NULL; bu = bu->next)
{
if (strcmpic(au->public_name, bu->public_name) == 0)
{
if ((au->client && bu->client) || (au->server && bu->server))
log_write(0, LOG_PANIC_DIE|LOG_CONFIG, "two %s authenticators "
"(%s and %s) have the same public name (%s)",
(au->client)? US"client" : US"server", au->name, bu->name,
au->public_name);
}
}
}
}
| 0
|
216,903
|
bool st_select_lex::optimize_unflattened_subqueries(bool const_only)
{
SELECT_LEX_UNIT *next_unit= NULL;
for (SELECT_LEX_UNIT *un= first_inner_unit();
un;
un= next_unit ? next_unit : un->next_unit())
{
Item_subselect *subquery_predicate= un->item;
next_unit= NULL;
if (subquery_predicate)
{
if (!subquery_predicate->fixed)
{
/*
This subquery was excluded as part of some expression so it is
invisible from all prepared expression.
*/
next_unit= un->next_unit();
un->exclude_level();
if (next_unit)
continue;
break;
}
if (subquery_predicate->substype() == Item_subselect::IN_SUBS)
{
Item_in_subselect *in_subs= (Item_in_subselect*) subquery_predicate;
if (in_subs->is_jtbm_merged)
continue;
}
if (const_only && !subquery_predicate->const_item())
{
/* Skip non-constant subqueries if the caller asked so. */
continue;
}
bool empty_union_result= true;
bool is_correlated_unit= false;
bool first= true;
bool union_plan_saved= false;
/*
If the subquery is a UNION, optimize all the subqueries in the UNION. If
there is no UNION, then the loop will execute once for the subquery.
*/
for (SELECT_LEX *sl= un->first_select(); sl; sl= sl->next_select())
{
JOIN *inner_join= sl->join;
if (first)
first= false;
else
{
if (!union_plan_saved)
{
union_plan_saved= true;
if (un->save_union_explain(un->thd->lex->explain))
return true; /* Failure */
}
}
if (!inner_join)
continue;
SELECT_LEX *save_select= un->thd->lex->current_select;
ulonglong save_options;
int res;
/* We need only 1 row to determine existence */
un->set_limit(un->global_parameters());
un->thd->lex->current_select= sl;
save_options= inner_join->select_options;
if (options & SELECT_DESCRIBE)
{
/* Optimize the subquery in the context of EXPLAIN. */
sl->set_explain_type(FALSE);
sl->options|= SELECT_DESCRIBE;
inner_join->select_options|= SELECT_DESCRIBE;
}
if ((res= inner_join->optimize()))
return TRUE;
if (!inner_join->cleaned)
sl->update_used_tables();
sl->update_correlated_cache();
is_correlated_unit|= sl->is_correlated;
inner_join->select_options= save_options;
un->thd->lex->current_select= save_select;
Explain_query *eq;
if ((eq= inner_join->thd->lex->explain))
{
Explain_select *expl_sel;
if ((expl_sel= eq->get_select(inner_join->select_lex->select_number)))
{
sl->set_explain_type(TRUE);
expl_sel->select_type= sl->type;
}
}
if (empty_union_result)
{
/*
If at least one subquery in a union is non-empty, the UNION result
is non-empty. If there is no UNION, the only subquery is non-empy.
*/
empty_union_result= inner_join->empty_result();
}
if (res)
return TRUE;
}
if (empty_union_result)
subquery_predicate->no_rows_in_result();
if (!is_correlated_unit)
un->uncacheable&= ~UNCACHEABLE_DEPENDENT;
subquery_predicate->is_correlated= is_correlated_unit;
}
}
return FALSE;
}
| 1
|
225,627
|
GF_Err mfro_box_write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_MovieFragmentRandomAccessOffsetBox *ptr = (GF_MovieFragmentRandomAccessOffsetBox *)s;
e = gf_isom_full_box_write(s, bs);
if (e) return e;
gf_bs_write_u32(bs, ptr->container_size);
return GF_OK;
}
| 0
|
512,914
|
Item *get_copy(THD *thd)
{ return get_item_copy<Item_copy_string>(thd, this); }
| 0
|
275,983
|
int uECC_valid_public_key(const uint8_t *public_key, uECC_Curve curve) {
#if uECC_VLI_NATIVE_LITTLE_ENDIAN
uECC_word_t *_public = (uECC_word_t *)public_key;
#else
uECC_word_t _public[uECC_MAX_WORDS * 2];
#endif
#if uECC_VLI_NATIVE_LITTLE_ENDIAN == 0
uECC_vli_bytesToNative(_public, public_key, curve->num_bytes);
uECC_vli_bytesToNative(
_public + curve->num_words, public_key + curve->num_bytes, curve->num_bytes);
#endif
return uECC_valid_point(_public, curve);
}
| 0
|
436,133
|
static void io_submit_flush_completions(struct io_ring_ctx *ctx)
{
struct io_comp_state *cs = &ctx->submit_state.comp;
int i, nr = cs->nr;
struct req_batch rb;
spin_lock_irq(&ctx->completion_lock);
for (i = 0; i < nr; i++) {
struct io_kiocb *req = cs->reqs[i];
__io_cqring_fill_event(ctx, req->user_data, req->result,
req->compl.cflags);
}
io_commit_cqring(ctx);
spin_unlock_irq(&ctx->completion_lock);
io_cqring_ev_posted(ctx);
io_init_req_batch(&rb);
for (i = 0; i < nr; i++) {
struct io_kiocb *req = cs->reqs[i];
/* submission and completion refs */
if (req_ref_sub_and_test(req, 2))
io_req_free_batch(&rb, req, &ctx->submit_state);
}
io_req_free_batch_finish(ctx, &rb);
cs->nr = 0;
}
| 0
|
509,536
|
ha_maria::ha_maria(handlerton *hton, TABLE_SHARE *table_arg):
handler(hton, table_arg), file(0),
int_table_flags(HA_NULL_IN_KEY | HA_CAN_FULLTEXT | HA_CAN_SQL_HANDLER |
HA_BINLOG_ROW_CAPABLE | HA_BINLOG_STMT_CAPABLE |
HA_DUPLICATE_POS | HA_CAN_INDEX_BLOBS | HA_AUTO_PART_KEY |
HA_FILE_BASED | HA_CAN_GEOMETRY | TRANSACTION_STATE |
HA_CAN_BIT_FIELD | HA_CAN_RTREEKEYS | HA_CAN_REPAIR |
HA_CAN_VIRTUAL_COLUMNS | HA_CAN_EXPORT |
HA_HAS_RECORDS | HA_STATS_RECORDS_IS_EXACT |
HA_CAN_TABLES_WITHOUT_ROLLBACK),
can_enable_indexes(0), bulk_insert_single_undo(BULK_INSERT_NONE)
{}
| 0
|
508,842
|
bool st_select_lex::add_item_to_list(THD *thd, Item *item)
{
DBUG_ENTER("st_select_lex::add_item_to_list");
DBUG_PRINT("info", ("Item: %p", item));
DBUG_RETURN(item_list.push_back(item, thd->mem_root));
}
| 0
|
509,522
|
FT_INFO *ha_maria::ft_init_ext(uint flags, uint inx, String * key)
{
return maria_ft_init_search(flags, file, inx,
(uchar *) key->ptr(), key->length(),
key->charset(), table->record[0]);
}
| 0
|
488,360
|
static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma,
unsigned long addr, unsigned long end)
{
pud_t *src_pud, *dst_pud;
unsigned long next;
dst_pud = pud_alloc(dst_mm, dst_pgd, addr);
if (!dst_pud)
return -ENOMEM;
src_pud = pud_offset(src_pgd, addr);
do {
next = pud_addr_end(addr, end);
if (pud_none_or_clear_bad(src_pud))
continue;
if (copy_pmd_range(dst_mm, src_mm, dst_pud, src_pud,
vma, addr, next))
return -ENOMEM;
} while (dst_pud++, src_pud++, addr = next, addr != end);
return 0;
}
| 0
|
405,717
|
static void xemaclite_update_address(struct net_local *drvdata,
u8 *address_ptr)
{
void __iomem *addr;
u32 reg_data;
/* Determine the expected Tx buffer address */
addr = drvdata->base_addr + drvdata->next_tx_buf_to_use;
xemaclite_aligned_write(address_ptr, (u32 __force *)addr, ETH_ALEN);
xemaclite_writel(ETH_ALEN, addr + XEL_TPLR_OFFSET);
/* Update the MAC address in the EmacLite */
reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET);
xemaclite_writel(reg_data | XEL_TSR_PROG_MAC_ADDR, addr + XEL_TSR_OFFSET);
/* Wait for EmacLite to finish with the MAC address update */
while ((xemaclite_readl(addr + XEL_TSR_OFFSET) &
XEL_TSR_PROG_MAC_ADDR) != 0)
;
}
| 0
|
294,412
|
date_s_xmlschema(int argc, VALUE *argv, VALUE klass)
{
VALUE str, sg, opt;
rb_scan_args(argc, argv, "02:", &str, &sg, &opt);
if (!NIL_P(opt)) argc--;
switch (argc) {
case 0:
str = rb_str_new2("-4712-01-01");
case 1:
sg = INT2FIX(DEFAULT_SG);
}
{
int argc2 = 1;
VALUE argv2[2];
argv2[0] = str;
if (!NIL_P(opt)) argv2[argc2++] = opt;
VALUE hash = date_s__xmlschema(argc2, argv2, klass);
return d_new_by_frags(klass, hash, sg);
}
}
| 0
|
508,920
|
void SELECT_LEX::update_used_tables()
{
TABLE_LIST *tl;
List_iterator<TABLE_LIST> ti(leaf_tables);
while ((tl= ti++))
{
if (tl->table && !tl->is_view_or_derived())
{
TABLE_LIST *embedding= tl->embedding;
for (embedding= tl->embedding; embedding; embedding=embedding->embedding)
{
if (embedding->is_view_or_derived())
{
DBUG_ASSERT(embedding->is_merged_derived());
TABLE *tab= tl->table;
tab->covering_keys= tab->s->keys_for_keyread;
tab->covering_keys.intersect(tab->keys_in_use_for_query);
/*
View/derived was merged. Need to recalculate read_set/vcol_set
bitmaps here. For example:
CREATE VIEW v1 AS SELECT f1,f2,f3 FROM t1;
SELECT f1 FROM v1;
Initially, the view definition will put all f1,f2,f3 in the
read_set for t1. But after the view is merged, only f1 should
be in the read_set.
*/
bitmap_clear_all(tab->read_set);
if (tab->vcol_set)
bitmap_clear_all(tab->vcol_set);
break;
}
}
}
}
ti.rewind();
while ((tl= ti++))
{
TABLE_LIST *embedding= tl;
do
{
bool maybe_null;
if ((maybe_null= MY_TEST(embedding->outer_join)))
{
tl->table->maybe_null= maybe_null;
break;
}
}
while ((embedding= embedding->embedding));
if (tl->on_expr)
{
tl->on_expr->update_used_tables();
tl->on_expr->walk(&Item::eval_not_null_tables, 0, NULL);
}
/*
- There is no need to check sj_on_expr, because merged semi-joins inject
sj_on_expr into the parent's WHERE clase.
- For non-merged semi-joins (aka JTBMs), we need to check their
left_expr. There is no need to check the rest of the subselect, we know
it is uncorrelated and so cannot refer to any tables in this select.
*/
if (tl->jtbm_subselect)
{
Item *left_expr= tl->jtbm_subselect->left_expr;
left_expr->walk(&Item::update_table_bitmaps_processor, FALSE, NULL);
}
embedding= tl->embedding;
while (embedding)
{
if (embedding->on_expr &&
embedding->nested_join->join_list.head() == tl)
{
embedding->on_expr->update_used_tables();
embedding->on_expr->walk(&Item::eval_not_null_tables, 0, NULL);
}
tl= embedding;
embedding= tl->embedding;
}
}
if (join->conds)
{
join->conds->update_used_tables();
join->conds->walk(&Item::eval_not_null_tables, 0, NULL);
}
if (join->having)
{
join->having->update_used_tables();
}
Item *item;
List_iterator_fast<Item> it(join->all_fields);
select_list_tables= 0;
while ((item= it++))
{
item->update_used_tables();
select_list_tables|= item->used_tables();
}
Item_outer_ref *ref;
List_iterator_fast<Item_outer_ref> ref_it(inner_refs_list);
while ((ref= ref_it++))
{
item= ref->outer_ref;
item->update_used_tables();
}
for (ORDER *order= group_list.first; order; order= order->next)
(*order->item)->update_used_tables();
if (!master_unit()->is_union() || master_unit()->global_parameters() != this)
{
for (ORDER *order= order_list.first; order; order= order->next)
(*order->item)->update_used_tables();
}
join->result->update_used_tables();
}
| 0
|
214,272
|
find_next_quote(
char_u *line,
int col,
int quotechar,
char_u *escape) // escape characters, can be NULL
{
int c;
for (;;)
{
c = line[col];
if (c == NUL)
return -1;
else if (escape != NULL && vim_strchr(escape, c))
++col;
else if (c == quotechar)
break;
if (has_mbyte)
col += (*mb_ptr2len)(line + col);
else
++col;
}
return col;
}
| 1
|
336,624
|
static void reds_send_link_result(RedLinkInfo *link, uint32_t error)
{
error = GUINT32_TO_LE(error);
red_stream_write_all(link->stream, &error, sizeof(error));
}
| 0
|
281,069
|
static int xfrm_bundle_ok(struct xfrm_dst *first)
{
struct dst_entry *dst = &first->u.dst;
struct xfrm_dst *last;
u32 mtu;
if (!dst_check(dst->path, ((struct xfrm_dst *)dst)->path_cookie) ||
(dst->dev && !netif_running(dst->dev)))
return 0;
if (dst->flags & DST_XFRM_QUEUE)
return 1;
last = NULL;
do {
struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
if (dst->xfrm->km.state != XFRM_STATE_VALID)
return 0;
if (xdst->xfrm_genid != dst->xfrm->genid)
return 0;
if (xdst->num_pols > 0 &&
xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
return 0;
mtu = dst_mtu(dst->child);
if (xdst->child_mtu_cached != mtu) {
last = xdst;
xdst->child_mtu_cached = mtu;
}
if (!dst_check(xdst->route, xdst->route_cookie))
return 0;
mtu = dst_mtu(xdst->route);
if (xdst->route_mtu_cached != mtu) {
last = xdst;
xdst->route_mtu_cached = mtu;
}
dst = dst->child;
} while (dst->xfrm);
if (likely(!last))
return 1;
mtu = last->child_mtu_cached;
for (;;) {
dst = &last->u.dst;
mtu = xfrm_state_mtu(dst->xfrm, mtu);
if (mtu > last->route_mtu_cached)
mtu = last->route_mtu_cached;
dst_metric_set(dst, RTAX_MTU, mtu);
if (last == first)
break;
last = (struct xfrm_dst *)last->u.dst.next;
last->child_mtu_cached = mtu;
}
return 1;
}
| 0
|
439,081
|
ModuleExport size_t RegisterDPXImage(void)
{
MagickInfo
*entry;
static const char
*DPXNote =
{
"Digital Moving Picture Exchange Bitmap, Version 2.0.\n"
"See SMPTE 268M-2003 specification at http://www.smtpe.org\n"
};
entry=SetMagickInfo("DPX");
entry->decoder=(DecodeImageHandler *) ReadDPXImage;
entry->encoder=(EncodeImageHandler *) WriteDPXImage;
entry->magick=(IsImageFormatHandler *) IsDPX;
entry->seekable_stream=MagickTrue;
entry->adjoin=MagickFalse;
entry->description=ConstantString("SMPTE 268M-2003 (DPX 2.0)");
entry->note=ConstantString(DPXNote);
entry->module=ConstantString("DPX");
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
| 0
|
247,702
|
const std::string& expectedPeerCert() const { return expected_peer_cert_; }
| 0
|
246,653
|
static void naludmx_end_access_unit(GF_NALUDmxCtx *ctx)
{
//finalize current fram flags - we will flush(send) later on
naludmx_finalize_au_flags(ctx);
ctx->has_islice = GF_FALSE;
ctx->first_slice_in_au = GF_TRUE;
ctx->sei_recovery_frame_count = -1;
ctx->au_sap = GF_FILTER_SAP_NONE;
ctx->au_sap2_poc_reset = GF_FALSE;
ctx->bottom_field_flag = GF_FALSE;
}
| 0
|
273,069
|
mutex_init(pthread_mutex_t *mutex)
{
pthread_mutexattr_t mattr;
int err;
CHECK_ERR(L_MISC, pthread_mutexattr_init(&mattr));
CHECK_ERR(L_MISC, pthread_mutexattr_settype(&mattr, PTHREAD_MUTEX_ERRORCHECK));
err = pthread_mutex_init(mutex, &mattr);
CHECK_ERR(L_MISC, pthread_mutexattr_destroy(&mattr));
return err;
}
| 0
|
234,203
|
display_debug_frames (struct dwarf_section *section,
void *file ATTRIBUTE_UNUSED)
{
unsigned char *start = section->start;
unsigned char *end = start + section->size;
unsigned char *section_start = start;
Frame_Chunk *chunks = NULL, *forward_refs = NULL;
Frame_Chunk *remembered_state = NULL;
Frame_Chunk *rs;
bool is_eh = strcmp (section->name, ".eh_frame") == 0;
unsigned int max_regs = 0;
const char *bad_reg = _("bad register: ");
unsigned int saved_eh_addr_size = eh_addr_size;
introduce (section, false);
while (start < end)
{
unsigned char *saved_start;
unsigned char *block_end;
dwarf_vma length;
dwarf_vma cie_id;
Frame_Chunk *fc;
Frame_Chunk *cie;
int need_col_headers = 1;
unsigned char *augmentation_data = NULL;
bfd_size_type augmentation_data_len = 0;
unsigned int encoded_ptr_size = saved_eh_addr_size;
unsigned int offset_size;
bool all_nops;
static Frame_Chunk fde_fc;
saved_start = start;
SAFE_BYTE_GET_AND_INC (length, start, 4, end);
if (length == 0)
{
printf ("\n%08lx ZERO terminator\n\n",
(unsigned long)(saved_start - section_start));
/* Skip any zero terminators that directly follow.
A corrupt section size could have loaded a whole
slew of zero filled memory bytes. eg
PR 17512: file: 070-19381-0.004. */
while (start < end && * start == 0)
++ start;
continue;
}
if (length == 0xffffffff)
{
SAFE_BYTE_GET_AND_INC (length, start, 8, end);
offset_size = 8;
}
else
offset_size = 4;
if (length > (size_t) (end - start))
{
warn ("Invalid length 0x%s in FDE at %#08lx\n",
dwarf_vmatoa_1 (NULL, length, offset_size),
(unsigned long) (saved_start - section_start));
block_end = end;
}
else
block_end = start + length;
SAFE_BYTE_GET_AND_INC (cie_id, start, offset_size, block_end);
if (is_eh ? (cie_id == 0) : ((offset_size == 4 && cie_id == DW_CIE_ID)
|| (offset_size == 8 && cie_id == DW64_CIE_ID)))
{
int version;
unsigned int mreg;
start = read_cie (start, block_end, &cie, &version,
&augmentation_data_len, &augmentation_data);
/* PR 17512: file: 027-135133-0.005. */
if (cie == NULL)
break;
fc = cie;
fc->next = chunks;
chunks = fc;
fc->chunk_start = saved_start;
mreg = max_regs > 0 ? max_regs - 1 : 0;
if (mreg < fc->ra)
mreg = fc->ra;
if (frame_need_space (fc, mreg) < 0)
break;
if (fc->fde_encoding)
encoded_ptr_size = size_of_encoded_value (fc->fde_encoding);
printf ("\n%08lx ", (unsigned long) (saved_start - section_start));
print_dwarf_vma (length, fc->ptr_size);
print_dwarf_vma (cie_id, offset_size);
if (do_debug_frames_interp)
{
printf ("CIE \"%s\" cf=%d df=%d ra=%d\n", fc->augmentation,
fc->code_factor, fc->data_factor, fc->ra);
}
else
{
printf ("CIE\n");
printf (" Version: %d\n", version);
printf (" Augmentation: \"%s\"\n", fc->augmentation);
if (version >= 4)
{
printf (" Pointer Size: %u\n", fc->ptr_size);
printf (" Segment Size: %u\n", fc->segment_size);
}
printf (" Code alignment factor: %u\n", fc->code_factor);
printf (" Data alignment factor: %d\n", fc->data_factor);
printf (" Return address column: %d\n", fc->ra);
if (augmentation_data_len)
display_augmentation_data (augmentation_data, augmentation_data_len);
putchar ('\n');
}
}
else
{
unsigned char *look_for;
unsigned long segment_selector;
dwarf_vma cie_off;
cie_off = cie_id;
if (is_eh)
{
dwarf_vma sign = (dwarf_vma) 1 << (offset_size * 8 - 1);
cie_off = (cie_off ^ sign) - sign;
cie_off = start - 4 - section_start - cie_off;
}
look_for = section_start + cie_off;
if (cie_off <= (dwarf_vma) (saved_start - section_start))
{
for (cie = chunks; cie ; cie = cie->next)
if (cie->chunk_start == look_for)
break;
}
else if (cie_off >= section->size)
cie = NULL;
else
{
for (cie = forward_refs; cie ; cie = cie->next)
if (cie->chunk_start == look_for)
break;
if (!cie)
{
unsigned int off_size;
unsigned char *cie_scan;
cie_scan = look_for;
off_size = 4;
SAFE_BYTE_GET_AND_INC (length, cie_scan, 4, end);
if (length == 0xffffffff)
{
SAFE_BYTE_GET_AND_INC (length, cie_scan, 8, end);
off_size = 8;
}
if (length != 0 && length <= (size_t) (end - cie_scan))
{
dwarf_vma c_id;
unsigned char *cie_end = cie_scan + length;
SAFE_BYTE_GET_AND_INC (c_id, cie_scan, off_size,
cie_end);
if (is_eh
? c_id == 0
: ((off_size == 4 && c_id == DW_CIE_ID)
|| (off_size == 8 && c_id == DW64_CIE_ID)))
{
int version;
unsigned int mreg;
read_cie (cie_scan, cie_end, &cie, &version,
&augmentation_data_len, &augmentation_data);
/* PR 17512: file: 3450-2098-0.004. */
if (cie == NULL)
{
warn (_("Failed to read CIE information\n"));
break;
}
cie->next = forward_refs;
forward_refs = cie;
cie->chunk_start = look_for;
mreg = max_regs > 0 ? max_regs - 1 : 0;
if (mreg < cie->ra)
mreg = cie->ra;
if (frame_need_space (cie, mreg) < 0)
{
warn (_("Invalid max register\n"));
break;
}
if (cie->fde_encoding)
encoded_ptr_size
= size_of_encoded_value (cie->fde_encoding);
}
}
}
}
fc = &fde_fc;
memset (fc, 0, sizeof (Frame_Chunk));
if (!cie)
{
fc->ncols = 0;
fc->col_type = (short int *) xmalloc (sizeof (short int));
fc->col_offset = (int *) xmalloc (sizeof (int));
if (frame_need_space (fc, max_regs > 0 ? max_regs - 1 : 0) < 0)
{
warn (_("Invalid max register\n"));
break;
}
cie = fc;
fc->augmentation = "";
fc->fde_encoding = 0;
fc->ptr_size = eh_addr_size;
fc->segment_size = 0;
}
else
{
fc->ncols = cie->ncols;
fc->col_type = (short int *) xcmalloc (fc->ncols, sizeof (short int));
fc->col_offset = (int *) xcmalloc (fc->ncols, sizeof (int));
memcpy (fc->col_type, cie->col_type, fc->ncols * sizeof (short int));
memcpy (fc->col_offset, cie->col_offset, fc->ncols * sizeof (int));
fc->augmentation = cie->augmentation;
fc->ptr_size = cie->ptr_size;
eh_addr_size = cie->ptr_size;
fc->segment_size = cie->segment_size;
fc->code_factor = cie->code_factor;
fc->data_factor = cie->data_factor;
fc->cfa_reg = cie->cfa_reg;
fc->cfa_offset = cie->cfa_offset;
fc->ra = cie->ra;
if (frame_need_space (fc, max_regs > 0 ? max_regs - 1: 0) < 0)
{
warn (_("Invalid max register\n"));
break;
}
fc->fde_encoding = cie->fde_encoding;
}
if (fc->fde_encoding)
encoded_ptr_size = size_of_encoded_value (fc->fde_encoding);
segment_selector = 0;
if (fc->segment_size)
{
if (fc->segment_size > sizeof (segment_selector))
{
/* PR 17512: file: 9e196b3e. */
warn (_("Probably corrupt segment size: %d - using 4 instead\n"), fc->segment_size);
fc->segment_size = 4;
}
SAFE_BYTE_GET_AND_INC (segment_selector, start,
fc->segment_size, block_end);
}
fc->pc_begin = get_encoded_value (&start, fc->fde_encoding, section,
block_end);
/* FIXME: It appears that sometimes the final pc_range value is
encoded in less than encoded_ptr_size bytes. See the x86_64
run of the "objcopy on compressed debug sections" test for an
example of this. */
SAFE_BYTE_GET_AND_INC (fc->pc_range, start, encoded_ptr_size,
block_end);
if (cie->augmentation[0] == 'z')
{
READ_ULEB (augmentation_data_len, start, block_end);
augmentation_data = start;
/* PR 17512 file: 722-8446-0.004 and PR 22386. */
if (augmentation_data_len > (bfd_size_type) (block_end - start))
{
warn (_("Augmentation data too long: 0x%s, "
"expected at most %#lx\n"),
dwarf_vmatoa ("x", augmentation_data_len),
(unsigned long) (block_end - start));
start = block_end;
augmentation_data = NULL;
augmentation_data_len = 0;
}
start += augmentation_data_len;
}
printf ("\n%08lx %s %s FDE ",
(unsigned long)(saved_start - section_start),
dwarf_vmatoa_1 (NULL, length, fc->ptr_size),
dwarf_vmatoa_1 (NULL, cie_id, offset_size));
if (cie->chunk_start)
printf ("cie=%08lx",
(unsigned long) (cie->chunk_start - section_start));
else
/* Ideally translate "invalid " to 8 chars, trailing space
is optional. */
printf (_("cie=invalid "));
printf (" pc=");
if (fc->segment_size)
printf ("%04lx:", segment_selector);
printf ("%s..%s\n",
dwarf_vmatoa_1 (NULL, fc->pc_begin, fc->ptr_size),
dwarf_vmatoa_1 (NULL, fc->pc_begin + fc->pc_range, fc->ptr_size));
if (! do_debug_frames_interp && augmentation_data_len)
{
display_augmentation_data (augmentation_data, augmentation_data_len);
putchar ('\n');
}
}
/* At this point, fc is the current chunk, cie (if any) is set, and
we're about to interpret instructions for the chunk. */
/* ??? At present we need to do this always, since this sizes the
fc->col_type and fc->col_offset arrays, which we write into always.
We should probably split the interpreted and non-interpreted bits
into two different routines, since there's so much that doesn't
really overlap between them. */
if (1 || do_debug_frames_interp)
{
/* Start by making a pass over the chunk, allocating storage
and taking note of what registers are used. */
unsigned char *tmp = start;
while (start < block_end)
{
unsigned int reg, op, opa;
unsigned long temp;
op = *start++;
opa = op & 0x3f;
if (op & 0xc0)
op &= 0xc0;
/* Warning: if you add any more cases to this switch, be
sure to add them to the corresponding switch below. */
reg = -1u;
switch (op)
{
case DW_CFA_advance_loc:
break;
case DW_CFA_offset:
SKIP_ULEB (start, block_end);
reg = opa;
break;
case DW_CFA_restore:
reg = opa;
break;
case DW_CFA_set_loc:
if ((size_t) (block_end - start) < encoded_ptr_size)
start = block_end;
else
start += encoded_ptr_size;
break;
case DW_CFA_advance_loc1:
if ((size_t) (block_end - start) < 1)
start = block_end;
else
start += 1;
break;
case DW_CFA_advance_loc2:
if ((size_t) (block_end - start) < 2)
start = block_end;
else
start += 2;
break;
case DW_CFA_advance_loc4:
if ((size_t) (block_end - start) < 4)
start = block_end;
else
start += 4;
break;
case DW_CFA_offset_extended:
case DW_CFA_val_offset:
READ_ULEB (reg, start, block_end);
SKIP_ULEB (start, block_end);
break;
case DW_CFA_restore_extended:
READ_ULEB (reg, start, block_end);
break;
case DW_CFA_undefined:
READ_ULEB (reg, start, block_end);
break;
case DW_CFA_same_value:
READ_ULEB (reg, start, block_end);
break;
case DW_CFA_register:
READ_ULEB (reg, start, block_end);
SKIP_ULEB (start, block_end);
break;
case DW_CFA_def_cfa:
SKIP_ULEB (start, block_end);
SKIP_ULEB (start, block_end);
break;
case DW_CFA_def_cfa_register:
SKIP_ULEB (start, block_end);
break;
case DW_CFA_def_cfa_offset:
SKIP_ULEB (start, block_end);
break;
case DW_CFA_def_cfa_expression:
READ_ULEB (temp, start, block_end);
if ((size_t) (block_end - start) < temp)
start = block_end;
else
start += temp;
break;
case DW_CFA_expression:
case DW_CFA_val_expression:
READ_ULEB (reg, start, block_end);
READ_ULEB (temp, start, block_end);
if ((size_t) (block_end - start) < temp)
start = block_end;
else
start += temp;
break;
case DW_CFA_offset_extended_sf:
case DW_CFA_val_offset_sf:
READ_ULEB (reg, start, block_end);
SKIP_SLEB (start, block_end);
break;
case DW_CFA_def_cfa_sf:
SKIP_ULEB (start, block_end);
SKIP_SLEB (start, block_end);
break;
case DW_CFA_def_cfa_offset_sf:
SKIP_SLEB (start, block_end);
break;
case DW_CFA_MIPS_advance_loc8:
if ((size_t) (block_end - start) < 8)
start = block_end;
else
start += 8;
break;
case DW_CFA_GNU_args_size:
SKIP_ULEB (start, block_end);
break;
case DW_CFA_GNU_negative_offset_extended:
READ_ULEB (reg, start, block_end);
SKIP_ULEB (start, block_end);
break;
default:
break;
}
if (reg != -1u && frame_need_space (fc, reg) >= 0)
{
/* Don't leave any reg as DW_CFA_unreferenced so
that frame_display_row prints name of regs in
header, and all referenced regs in each line. */
if (reg >= cie->ncols
|| cie->col_type[reg] == DW_CFA_unreferenced)
fc->col_type[reg] = DW_CFA_undefined;
else
fc->col_type[reg] = cie->col_type[reg];
}
}
start = tmp;
}
all_nops = true;
/* Now we know what registers are used, make a second pass over
the chunk, this time actually printing out the info. */
while (start < block_end)
{
unsigned op, opa;
unsigned long ul, roffs;
/* Note: It is tempting to use an unsigned long for 'reg' but there
are various functions, notably frame_space_needed() that assume that
reg is an unsigned int. */
unsigned int reg;
dwarf_signed_vma l;
dwarf_vma ofs;
dwarf_vma vma;
const char *reg_prefix = "";
op = *start++;
opa = op & 0x3f;
if (op & 0xc0)
op &= 0xc0;
/* Make a note if something other than DW_CFA_nop happens. */
if (op != DW_CFA_nop)
all_nops = false;
/* Warning: if you add any more cases to this switch, be
sure to add them to the corresponding switch above. */
switch (op)
{
case DW_CFA_advance_loc:
if (do_debug_frames_interp)
frame_display_row (fc, &need_col_headers, &max_regs);
else
printf (" DW_CFA_advance_loc: %d to %s\n",
opa * fc->code_factor,
dwarf_vmatoa_1 (NULL,
fc->pc_begin + opa * fc->code_factor,
fc->ptr_size));
fc->pc_begin += opa * fc->code_factor;
break;
case DW_CFA_offset:
READ_ULEB (roffs, start, block_end);
if (opa >= fc->ncols)
reg_prefix = bad_reg;
if (! do_debug_frames_interp || *reg_prefix != '\0')
printf (" DW_CFA_offset: %s%s at cfa%+ld\n",
reg_prefix, regname (opa, 0),
roffs * fc->data_factor);
if (*reg_prefix == '\0')
{
fc->col_type[opa] = DW_CFA_offset;
fc->col_offset[opa] = roffs * fc->data_factor;
}
break;
case DW_CFA_restore:
if (opa >= fc->ncols)
reg_prefix = bad_reg;
if (! do_debug_frames_interp || *reg_prefix != '\0')
printf (" DW_CFA_restore: %s%s\n",
reg_prefix, regname (opa, 0));
if (*reg_prefix != '\0')
break;
if (opa >= cie->ncols
|| cie->col_type[opa] == DW_CFA_unreferenced)
{
fc->col_type[opa] = DW_CFA_undefined;
fc->col_offset[opa] = 0;
}
else
{
fc->col_type[opa] = cie->col_type[opa];
fc->col_offset[opa] = cie->col_offset[opa];
}
break;
case DW_CFA_set_loc:
vma = get_encoded_value (&start, fc->fde_encoding, section,
block_end);
if (do_debug_frames_interp)
frame_display_row (fc, &need_col_headers, &max_regs);
else
printf (" DW_CFA_set_loc: %s\n",
dwarf_vmatoa_1 (NULL, vma, fc->ptr_size));
fc->pc_begin = vma;
break;
case DW_CFA_advance_loc1:
SAFE_BYTE_GET_AND_INC (ofs, start, 1, block_end);
if (do_debug_frames_interp)
frame_display_row (fc, &need_col_headers, &max_regs);
else
printf (" DW_CFA_advance_loc1: %ld to %s\n",
(unsigned long) (ofs * fc->code_factor),
dwarf_vmatoa_1 (NULL,
fc->pc_begin + ofs * fc->code_factor,
fc->ptr_size));
fc->pc_begin += ofs * fc->code_factor;
break;
case DW_CFA_advance_loc2:
SAFE_BYTE_GET_AND_INC (ofs, start, 2, block_end);
if (do_debug_frames_interp)
frame_display_row (fc, &need_col_headers, &max_regs);
else
printf (" DW_CFA_advance_loc2: %ld to %s\n",
(unsigned long) (ofs * fc->code_factor),
dwarf_vmatoa_1 (NULL,
fc->pc_begin + ofs * fc->code_factor,
fc->ptr_size));
fc->pc_begin += ofs * fc->code_factor;
break;
case DW_CFA_advance_loc4:
SAFE_BYTE_GET_AND_INC (ofs, start, 4, block_end);
if (do_debug_frames_interp)
frame_display_row (fc, &need_col_headers, &max_regs);
else
printf (" DW_CFA_advance_loc4: %ld to %s\n",
(unsigned long) (ofs * fc->code_factor),
dwarf_vmatoa_1 (NULL,
fc->pc_begin + ofs * fc->code_factor,
fc->ptr_size));
fc->pc_begin += ofs * fc->code_factor;
break;
case DW_CFA_offset_extended:
READ_ULEB (reg, start, block_end);
READ_ULEB (roffs, start, block_end);
if (reg >= fc->ncols)
reg_prefix = bad_reg;
if (! do_debug_frames_interp || *reg_prefix != '\0')
printf (" DW_CFA_offset_extended: %s%s at cfa%+ld\n",
reg_prefix, regname (reg, 0),
roffs * fc->data_factor);
if (*reg_prefix == '\0')
{
fc->col_type[reg] = DW_CFA_offset;
fc->col_offset[reg] = roffs * fc->data_factor;
}
break;
case DW_CFA_val_offset:
READ_ULEB (reg, start, block_end);
READ_ULEB (roffs, start, block_end);
if (reg >= fc->ncols)
reg_prefix = bad_reg;
if (! do_debug_frames_interp || *reg_prefix != '\0')
printf (" DW_CFA_val_offset: %s%s is cfa%+ld\n",
reg_prefix, regname (reg, 0),
roffs * fc->data_factor);
if (*reg_prefix == '\0')
{
fc->col_type[reg] = DW_CFA_val_offset;
fc->col_offset[reg] = roffs * fc->data_factor;
}
break;
case DW_CFA_restore_extended:
READ_ULEB (reg, start, block_end);
if (reg >= fc->ncols)
reg_prefix = bad_reg;
if (! do_debug_frames_interp || *reg_prefix != '\0')
printf (" DW_CFA_restore_extended: %s%s\n",
reg_prefix, regname (reg, 0));
if (*reg_prefix != '\0')
break;
if (reg >= cie->ncols
|| cie->col_type[reg] == DW_CFA_unreferenced)
{
fc->col_type[reg] = DW_CFA_undefined;
fc->col_offset[reg] = 0;
}
else
{
fc->col_type[reg] = cie->col_type[reg];
fc->col_offset[reg] = cie->col_offset[reg];
}
break;
case DW_CFA_undefined:
READ_ULEB (reg, start, block_end);
if (reg >= fc->ncols)
reg_prefix = bad_reg;
if (! do_debug_frames_interp || *reg_prefix != '\0')
printf (" DW_CFA_undefined: %s%s\n",
reg_prefix, regname (reg, 0));
if (*reg_prefix == '\0')
{
fc->col_type[reg] = DW_CFA_undefined;
fc->col_offset[reg] = 0;
}
break;
case DW_CFA_same_value:
READ_ULEB (reg, start, block_end);
if (reg >= fc->ncols)
reg_prefix = bad_reg;
if (! do_debug_frames_interp || *reg_prefix != '\0')
printf (" DW_CFA_same_value: %s%s\n",
reg_prefix, regname (reg, 0));
if (*reg_prefix == '\0')
{
fc->col_type[reg] = DW_CFA_same_value;
fc->col_offset[reg] = 0;
}
break;
case DW_CFA_register:
READ_ULEB (reg, start, block_end);
READ_ULEB (roffs, start, block_end);
if (reg >= fc->ncols)
reg_prefix = bad_reg;
if (! do_debug_frames_interp || *reg_prefix != '\0')
{
printf (" DW_CFA_register: %s%s in ",
reg_prefix, regname (reg, 0));
puts (regname (roffs, 0));
}
if (*reg_prefix == '\0')
{
fc->col_type[reg] = DW_CFA_register;
fc->col_offset[reg] = roffs;
}
break;
case DW_CFA_remember_state:
if (! do_debug_frames_interp)
printf (" DW_CFA_remember_state\n");
rs = (Frame_Chunk *) xmalloc (sizeof (Frame_Chunk));
rs->cfa_offset = fc->cfa_offset;
rs->cfa_reg = fc->cfa_reg;
rs->ra = fc->ra;
rs->cfa_exp = fc->cfa_exp;
rs->ncols = fc->ncols;
rs->col_type = (short int *) xcmalloc (rs->ncols,
sizeof (* rs->col_type));
rs->col_offset = (int *) xcmalloc (rs->ncols, sizeof (* rs->col_offset));
memcpy (rs->col_type, fc->col_type, rs->ncols * sizeof (* fc->col_type));
memcpy (rs->col_offset, fc->col_offset, rs->ncols * sizeof (* fc->col_offset));
rs->next = remembered_state;
remembered_state = rs;
break;
case DW_CFA_restore_state:
if (! do_debug_frames_interp)
printf (" DW_CFA_restore_state\n");
rs = remembered_state;
if (rs)
{
remembered_state = rs->next;
fc->cfa_offset = rs->cfa_offset;
fc->cfa_reg = rs->cfa_reg;
fc->ra = rs->ra;
fc->cfa_exp = rs->cfa_exp;
if (frame_need_space (fc, rs->ncols - 1) < 0)
{
warn (_("Invalid column number in saved frame state\n"));
fc->ncols = 0;
break;
}
memcpy (fc->col_type, rs->col_type, rs->ncols * sizeof (* rs->col_type));
memcpy (fc->col_offset, rs->col_offset,
rs->ncols * sizeof (* rs->col_offset));
free (rs->col_type);
free (rs->col_offset);
free (rs);
}
else if (do_debug_frames_interp)
printf ("Mismatched DW_CFA_restore_state\n");
break;
case DW_CFA_def_cfa:
READ_ULEB (fc->cfa_reg, start, block_end);
READ_ULEB (fc->cfa_offset, start, block_end);
fc->cfa_exp = 0;
if (! do_debug_frames_interp)
printf (" DW_CFA_def_cfa: %s ofs %d\n",
regname (fc->cfa_reg, 0), (int) fc->cfa_offset);
break;
case DW_CFA_def_cfa_register:
READ_ULEB (fc->cfa_reg, start, block_end);
fc->cfa_exp = 0;
if (! do_debug_frames_interp)
printf (" DW_CFA_def_cfa_register: %s\n",
regname (fc->cfa_reg, 0));
break;
case DW_CFA_def_cfa_offset:
READ_ULEB (fc->cfa_offset, start, block_end);
if (! do_debug_frames_interp)
printf (" DW_CFA_def_cfa_offset: %d\n", (int) fc->cfa_offset);
break;
case DW_CFA_nop:
if (! do_debug_frames_interp)
printf (" DW_CFA_nop\n");
break;
case DW_CFA_def_cfa_expression:
READ_ULEB (ul, start, block_end);
if (ul > (size_t) (block_end - start))
{
printf (_(" DW_CFA_def_cfa_expression: <corrupt len %lu>\n"), ul);
break;
}
if (! do_debug_frames_interp)
{
printf (" DW_CFA_def_cfa_expression (");
decode_location_expression (start, eh_addr_size, 0, -1,
ul, 0, section);
printf (")\n");
}
fc->cfa_exp = 1;
start += ul;
break;
case DW_CFA_expression:
READ_ULEB (reg, start, block_end);
READ_ULEB (ul, start, block_end);
if (reg >= fc->ncols)
reg_prefix = bad_reg;
/* PR 17512: file: 069-133014-0.006. */
/* PR 17512: file: 98c02eb4. */
if (ul > (size_t) (block_end - start))
{
printf (_(" DW_CFA_expression: <corrupt len %lu>\n"), ul);
break;
}
if (! do_debug_frames_interp || *reg_prefix != '\0')
{
printf (" DW_CFA_expression: %s%s (",
reg_prefix, regname (reg, 0));
decode_location_expression (start, eh_addr_size, 0, -1,
ul, 0, section);
printf (")\n");
}
if (*reg_prefix == '\0')
fc->col_type[reg] = DW_CFA_expression;
start += ul;
break;
case DW_CFA_val_expression:
READ_ULEB (reg, start, block_end);
READ_ULEB (ul, start, block_end);
if (reg >= fc->ncols)
reg_prefix = bad_reg;
if (ul > (size_t) (block_end - start))
{
printf (" DW_CFA_val_expression: <corrupt len %lu>\n", ul);
break;
}
if (! do_debug_frames_interp || *reg_prefix != '\0')
{
printf (" DW_CFA_val_expression: %s%s (",
reg_prefix, regname (reg, 0));
decode_location_expression (start, eh_addr_size, 0, -1,
ul, 0, section);
printf (")\n");
}
if (*reg_prefix == '\0')
fc->col_type[reg] = DW_CFA_val_expression;
start += ul;
break;
case DW_CFA_offset_extended_sf:
READ_ULEB (reg, start, block_end);
READ_SLEB (l, start, block_end);
if (reg >= fc->ncols)
reg_prefix = bad_reg;
if (! do_debug_frames_interp || *reg_prefix != '\0')
printf (" DW_CFA_offset_extended_sf: %s%s at cfa%+ld\n",
reg_prefix, regname (reg, 0),
(long)(l * fc->data_factor));
if (*reg_prefix == '\0')
{
fc->col_type[reg] = DW_CFA_offset;
fc->col_offset[reg] = l * fc->data_factor;
}
break;
case DW_CFA_val_offset_sf:
READ_ULEB (reg, start, block_end);
READ_SLEB (l, start, block_end);
if (reg >= fc->ncols)
reg_prefix = bad_reg;
if (! do_debug_frames_interp || *reg_prefix != '\0')
printf (" DW_CFA_val_offset_sf: %s%s is cfa%+ld\n",
reg_prefix, regname (reg, 0),
(long)(l * fc->data_factor));
if (*reg_prefix == '\0')
{
fc->col_type[reg] = DW_CFA_val_offset;
fc->col_offset[reg] = l * fc->data_factor;
}
break;
case DW_CFA_def_cfa_sf:
READ_ULEB (fc->cfa_reg, start, block_end);
READ_SLEB (l, start, block_end);
l *= fc->data_factor;
fc->cfa_offset = l;
fc->cfa_exp = 0;
if (! do_debug_frames_interp)
printf (" DW_CFA_def_cfa_sf: %s ofs %ld\n",
regname (fc->cfa_reg, 0), (long) l);
break;
case DW_CFA_def_cfa_offset_sf:
READ_SLEB (l, start, block_end);
l *= fc->data_factor;
fc->cfa_offset = l;
if (! do_debug_frames_interp)
printf (" DW_CFA_def_cfa_offset_sf: %ld\n", (long) l);
break;
case DW_CFA_MIPS_advance_loc8:
SAFE_BYTE_GET_AND_INC (ofs, start, 8, block_end);
if (do_debug_frames_interp)
frame_display_row (fc, &need_col_headers, &max_regs);
else
printf (" DW_CFA_MIPS_advance_loc8: %ld to %s\n",
(unsigned long) (ofs * fc->code_factor),
dwarf_vmatoa_1 (NULL,
fc->pc_begin + ofs * fc->code_factor,
fc->ptr_size));
fc->pc_begin += ofs * fc->code_factor;
break;
case DW_CFA_GNU_window_save:
if (! do_debug_frames_interp)
printf (" DW_CFA_GNU_window_save\n");
break;
case DW_CFA_GNU_args_size:
READ_ULEB (ul, start, block_end);
if (! do_debug_frames_interp)
printf (" DW_CFA_GNU_args_size: %ld\n", ul);
break;
case DW_CFA_GNU_negative_offset_extended:
READ_ULEB (reg, start, block_end);
READ_SLEB (l, start, block_end);
l = - l;
if (reg >= fc->ncols)
reg_prefix = bad_reg;
if (! do_debug_frames_interp || *reg_prefix != '\0')
printf (" DW_CFA_GNU_negative_offset_extended: %s%s at cfa%+ld\n",
reg_prefix, regname (reg, 0),
(long)(l * fc->data_factor));
if (*reg_prefix == '\0')
{
fc->col_type[reg] = DW_CFA_offset;
fc->col_offset[reg] = l * fc->data_factor;
}
break;
default:
if (op >= DW_CFA_lo_user && op <= DW_CFA_hi_user)
printf (_(" DW_CFA_??? (User defined call frame op: %#x)\n"), op);
else
warn (_("Unsupported or unknown Dwarf Call Frame Instruction number: %#x\n"), op);
start = block_end;
}
}
/* Interpret the CFA - as long as it is not completely full of NOPs. */
if (do_debug_frames_interp && ! all_nops)
frame_display_row (fc, &need_col_headers, &max_regs);
if (fde_fc.col_type != NULL)
{
free (fde_fc.col_type);
fde_fc.col_type = NULL;
}
if (fde_fc.col_offset != NULL)
{
free (fde_fc.col_offset);
fde_fc.col_offset = NULL;
}
start = block_end;
eh_addr_size = saved_eh_addr_size;
}
printf ("\n");
while (remembered_state != NULL)
{
rs = remembered_state;
remembered_state = rs->next;
free (rs->col_type);
free (rs->col_offset);
rs->next = NULL; /* Paranoia. */
free (rs);
}
while (chunks != NULL)
{
rs = chunks;
chunks = rs->next;
free (rs->col_type);
free (rs->col_offset);
rs->next = NULL; /* Paranoia. */
free (rs);
}
while (forward_refs != NULL)
{
rs = forward_refs;
forward_refs = rs->next;
free (rs->col_type);
free (rs->col_offset);
rs->next = NULL; /* Paranoia. */
free (rs);
}
return 1;
}
| 0
|
453,003
|
static int nft_immediate_offload(struct nft_offload_ctx *ctx,
struct nft_flow_rule *flow,
const struct nft_expr *expr)
{
const struct nft_immediate_expr *priv = nft_expr_priv(expr);
if (priv->dreg == NFT_REG_VERDICT)
return nft_immediate_offload_verdict(ctx, flow, priv);
memcpy(&ctx->regs[priv->dreg].data, &priv->data, sizeof(priv->data));
return 0;
}
| 0
|
256,401
|
static int bio_copy_from_iter(struct bio *bio, struct iov_iter *iter)
{
struct bio_vec *bvec;
struct bvec_iter_all iter_all;
bio_for_each_segment_all(bvec, bio, iter_all) {
ssize_t ret;
ret = copy_page_from_iter(bvec->bv_page,
bvec->bv_offset,
bvec->bv_len,
iter);
if (!iov_iter_count(iter))
break;
if (ret < bvec->bv_len)
return -EFAULT;
}
return 0;
}
| 0
|
459,188
|
static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q,
struct tcf_block_ext_info *ei, bool rtnl_held)
{
if (refcount_dec_and_mutex_lock(&block->refcnt, &block->lock)) {
/* Flushing/putting all chains will cause the block to be
* deallocated when last chain is freed. However, if chain_list
* is empty, block has to be manually deallocated. After block
* reference counter reached 0, it is no longer possible to
* increment it or add new chains to block.
*/
bool free_block = list_empty(&block->chain_list);
mutex_unlock(&block->lock);
if (tcf_block_shared(block))
tcf_block_remove(block, block->net);
if (q)
tcf_block_offload_unbind(block, q, ei);
if (free_block)
tcf_block_destroy(block);
else
tcf_block_flush_all_chains(block, rtnl_held);
} else if (q) {
tcf_block_offload_unbind(block, q, ei);
}
}
| 0
|
513,158
|
uchar* global_value_ptr(THD *thd, const LEX_STRING *base)
{ return do_value_ptr(thd, OPT_GLOBAL, base); }
| 0
|
230,271
|
njs_array_destroy(njs_vm_t *vm, njs_array_t *array)
{
if (array->data != NULL) {
njs_mp_free(vm->mem_pool, array->data);
}
/* TODO: destroy keys. */
njs_mp_free(vm->mem_pool, array);
}
| 0
|
252,325
|
static bool ComputeChannelLayout(std::vector<size_t> *channel_offset_list,
int *pixel_data_size, size_t *channel_offset,
int num_channels,
const EXRChannelInfo *channels) {
channel_offset_list->resize(static_cast<size_t>(num_channels));
(*pixel_data_size) = 0;
(*channel_offset) = 0;
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
(*channel_offset_list)[c] = (*channel_offset);
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
(*pixel_data_size) += sizeof(unsigned short);
(*channel_offset) += sizeof(unsigned short);
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
(*pixel_data_size) += sizeof(float);
(*channel_offset) += sizeof(float);
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
(*pixel_data_size) += sizeof(unsigned int);
(*channel_offset) += sizeof(unsigned int);
} else {
// ???
return false;
}
}
return true;
}
| 0
|
508,811
|
bool is_keyword(const char *name, uint len)
{
DBUG_ASSERT(len != 0);
return get_hash_symbol(name,len,0)!=0;
}
| 0
|
225,775
|
GF_Err moof_box_size(GF_Box *s)
{
u32 pos=0;
GF_MovieFragmentBox *ptr = (GF_MovieFragmentBox *) s;
if (!s) return GF_BAD_PARAM;
//Header First
gf_isom_check_position(s, (GF_Box *)ptr->mfhd, &pos);
//then PSSH
gf_isom_check_position_list(s, ptr->PSSHs, &pos);
//then the track list
gf_isom_check_position_list(s, ptr->TrackList, &pos);
return GF_OK;
}
| 0
|
248,759
|
static void strstore(char **str, const char *newstr)
{
free(*str);
*str = strdup(newstr);
}
| 0
|
247,668
|
const std::string& expectedServerCertDigest() const { return expected_server_cert_digest_; }
| 0
|
317,014
|
static int selinux_peerlbl_enabled(void)
{
return (selinux_policycap_alwaysnetwork() ||
netlbl_enabled() || selinux_xfrm_enabled());
}
| 0
|
234,755
|
static int find_free_dev_extent_start(struct btrfs_device *device,
u64 num_bytes, u64 search_start, u64 *start,
u64 *len)
{
struct btrfs_fs_info *fs_info = device->fs_info;
struct btrfs_root *root = fs_info->dev_root;
struct btrfs_key key;
struct btrfs_dev_extent *dev_extent;
struct btrfs_path *path;
u64 hole_size;
u64 max_hole_start;
u64 max_hole_size;
u64 extent_end;
u64 search_end = device->total_bytes;
int ret;
int slot;
struct extent_buffer *l;
search_start = dev_extent_search_start(device, search_start);
WARN_ON(device->zone_info &&
!IS_ALIGNED(num_bytes, device->zone_info->zone_size));
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
max_hole_start = search_start;
max_hole_size = 0;
again:
if (search_start >= search_end ||
test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
ret = -ENOSPC;
goto out;
}
path->reada = READA_FORWARD;
path->search_commit_root = 1;
path->skip_locking = 1;
key.objectid = device->devid;
key.offset = search_start;
key.type = BTRFS_DEV_EXTENT_KEY;
ret = btrfs_search_backwards(root, &key, path);
if (ret < 0)
goto out;
while (1) {
l = path->nodes[0];
slot = path->slots[0];
if (slot >= btrfs_header_nritems(l)) {
ret = btrfs_next_leaf(root, path);
if (ret == 0)
continue;
if (ret < 0)
goto out;
break;
}
btrfs_item_key_to_cpu(l, &key, slot);
if (key.objectid < device->devid)
goto next;
if (key.objectid > device->devid)
break;
if (key.type != BTRFS_DEV_EXTENT_KEY)
goto next;
if (key.offset > search_start) {
hole_size = key.offset - search_start;
dev_extent_hole_check(device, &search_start, &hole_size,
num_bytes);
if (hole_size > max_hole_size) {
max_hole_start = search_start;
max_hole_size = hole_size;
}
/*
* If this free space is greater than which we need,
* it must be the max free space that we have found
* until now, so max_hole_start must point to the start
* of this free space and the length of this free space
* is stored in max_hole_size. Thus, we return
* max_hole_start and max_hole_size and go back to the
* caller.
*/
if (hole_size >= num_bytes) {
ret = 0;
goto out;
}
}
dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
extent_end = key.offset + btrfs_dev_extent_length(l,
dev_extent);
if (extent_end > search_start)
search_start = extent_end;
next:
path->slots[0]++;
cond_resched();
}
/*
* At this point, search_start should be the end of
* allocated dev extents, and when shrinking the device,
* search_end may be smaller than search_start.
*/
if (search_end > search_start) {
hole_size = search_end - search_start;
if (dev_extent_hole_check(device, &search_start, &hole_size,
num_bytes)) {
btrfs_release_path(path);
goto again;
}
if (hole_size > max_hole_size) {
max_hole_start = search_start;
max_hole_size = hole_size;
}
}
/* See above. */
if (max_hole_size < num_bytes)
ret = -ENOSPC;
else
ret = 0;
out:
btrfs_free_path(path);
*start = max_hole_start;
if (len)
*len = max_hole_size;
return ret;
}
| 0
|
427,179
|
static void check_conflict (LexState *ls, struct LHS_assign *lh, expdesc *v) {
FuncState *fs = ls->fs;
int extra = fs->freereg; /* eventual position to save local variable */
int conflict = 0;
for (; lh; lh = lh->prev) { /* check all previous assignments */
if (vkisindexed(lh->v.k)) { /* assignment to table field? */
if (lh->v.k == VINDEXUP) { /* is table an upvalue? */
if (v->k == VUPVAL && lh->v.u.ind.t == v->u.info) {
conflict = 1; /* table is the upvalue being assigned now */
lh->v.k = VINDEXSTR;
lh->v.u.ind.t = extra; /* assignment will use safe copy */
}
}
else { /* table is a register */
if (v->k == VLOCAL && lh->v.u.ind.t == v->u.var.ridx) {
conflict = 1; /* table is the local being assigned now */
lh->v.u.ind.t = extra; /* assignment will use safe copy */
}
/* is index the local being assigned? */
if (lh->v.k == VINDEXED && v->k == VLOCAL &&
lh->v.u.ind.idx == v->u.var.ridx) {
conflict = 1;
lh->v.u.ind.idx = extra; /* previous assignment will use safe copy */
}
}
}
}
if (conflict) {
/* copy upvalue/local value to a temporary (in position 'extra') */
if (v->k == VLOCAL)
luaK_codeABC(fs, OP_MOVE, extra, v->u.var.ridx, 0);
else
luaK_codeABC(fs, OP_GETUPVAL, extra, v->u.info, 0);
luaK_reserveregs(fs, 1);
}
}
| 0
|
436,059
|
static bool io_match_task(struct io_kiocb *head, struct task_struct *task,
bool cancel_all)
{
struct io_kiocb *req;
if (task && head->task != task)
return false;
if (cancel_all)
return true;
io_for_each_link(req, head) {
if (req->flags & REQ_F_INFLIGHT)
return true;
}
return false;
}
| 0
|
275,992
|
void uECC_compress(const uint8_t *public_key, uint8_t *compressed, uECC_Curve curve) {
wordcount_t i;
for (i = 0; i < curve->num_bytes; ++i) {
compressed[i+1] = public_key[i];
}
#if uECC_VLI_NATIVE_LITTLE_ENDIAN
compressed[0] = 2 + (public_key[curve->num_bytes] & 0x01);
#else
compressed[0] = 2 + (public_key[curve->num_bytes * 2 - 1] & 0x01);
#endif
}
| 0
|
210,453
|
jetp3852_print_page(gx_device_printer *pdev, gp_file *prn_stream)
{
#define DATA_SIZE (LINE_SIZE * 8)
unsigned int cnt_2prn;
unsigned int count,tempcnt;
unsigned char vtp,cntc1,cntc2;
int line_size_color_plane;
byte data[DATA_SIZE];
byte plane_data[LINE_SIZE * 3];
/* Set initial condition for printer */
gp_fputs("\033@",prn_stream);
/* Send each scan line in turn */
{
int lnum;
int line_size = gdev_mem_bytes_per_scan_line((gx_device *)pdev);
int num_blank_lines = 0;
if (line_size > DATA_SIZE) {
emprintf2(pdev->memory, "invalid resolution and/or width gives line_size = %d, max. is %d\n",
line_size, DATA_SIZE);
return_error(gs_error_rangecheck);
}
for ( lnum = 0; lnum < pdev->height; lnum++ ) {
byte *end_data = data + line_size;
gdev_prn_copy_scan_lines(pdev, lnum,
(byte *)data, line_size);
/* Remove trailing 0s. */
while ( end_data > data && end_data[-1] == 0 )
end_data--;
if ( end_data == data ) {
/* Blank line */
num_blank_lines++;
} else {
int i;
byte *odp;
byte *row;
/* Pad with 0s to fill out the last */
/* block of 8 bytes. */
memset(end_data, 0, 7);
/* Transpose the data to get pixel planes. */
for ( i = 0, odp = plane_data; i < DATA_SIZE;
i += 8, odp++
) { /* The following is for 16-bit machines */
#define spread3(c)\
{ 0, c, c*0x100, c*0x101, c*0x10000L, c*0x10001L, c*0x10100L, c*0x10101L }
static ulong spr40[8] = spread3(0x40);
static ulong spr8[8] = spread3(8);
static ulong spr2[8] = spread3(2);
register byte *dp = data + i;
register ulong pword =
(spr40[dp[0]] << 1) +
(spr40[dp[1]]) +
(spr40[dp[2]] >> 1) +
(spr8[dp[3]] << 1) +
(spr8[dp[4]]) +
(spr8[dp[5]] >> 1) +
(spr2[dp[6]]) +
(spr2[dp[7]] >> 1);
odp[0] = (byte)(pword >> 16);
odp[LINE_SIZE] = (byte)(pword >> 8);
odp[LINE_SIZE*2] = (byte)(pword);
}
/* Skip blank lines if any */
if ( num_blank_lines > 0 ) {
/* Do "dot skips" */
while(num_blank_lines > 255) {
gp_fputs("\033e\377",prn_stream);
num_blank_lines -= 255;
}
vtp = num_blank_lines;
gp_fprintf(prn_stream,"\033e%c",vtp);
num_blank_lines = 0;
}
/* Transfer raster graphics in the order R, G, B. */
/* Apparently it is stored in B, G, R */
/* Calculate the amount of data to send by what */
/* Ghostscript tells us the scan line_size in (bytes) */
count = line_size / 3;
line_size_color_plane = count / 3;
cnt_2prn = line_size_color_plane * 3 + 5;
tempcnt = cnt_2prn;
cntc1 = (tempcnt & 0xFF00) >> 8;
cntc2 = (tempcnt & 0x00FF);
gp_fprintf(prn_stream, "\033[O%c%c\200\037",cntc2,cntc1);
gp_fputc('\000',prn_stream);
gp_fputs("\124\124",prn_stream);
for ( row = plane_data + LINE_SIZE * 2, i = 0;
i < 3; row -= LINE_SIZE, i++ ) {
int jj;
byte ctemp;
odp = row;
/* Complement bytes */
for (jj=0; jj< line_size_color_plane; jj++) {
ctemp = *odp;
*odp++ = ~ctemp;
}
gp_fwrite(row, sizeof(byte),
line_size_color_plane, prn_stream);
}
}
}
}
/* eject page */
gp_fputs("\014", prn_stream);
return 0;
}
| 1
|
294,640
|
mk_ary_of_str(long len, const char *a[])
{
VALUE o;
long i;
o = rb_ary_new2(len);
for (i = 0; i < len; i++) {
VALUE e;
if (!a[i])
e = Qnil;
else {
e = rb_usascii_str_new2(a[i]);
rb_obj_freeze(e);
}
rb_ary_push(o, e);
}
rb_obj_freeze(o);
return o;
}
| 0
|
336,610
|
bool reds_config_get_playback_compression(RedsState *reds)
{
return reds->config->playback_compression;
}
| 0
|
196,894
|
Integer InvertibleRWFunction::CalculateInverse(RandomNumberGenerator &rng, const Integer &x) const
{
DoQuickSanityCheck();
ModularArithmetic modn(m_n);
Integer r, rInv;
do { // do this in a loop for people using small numbers for testing
r.Randomize(rng, Integer::One(), m_n - Integer::One());
rInv = modn.MultiplicativeInverse(r);
} while (rInv.IsZero());
Integer re = modn.Square(r);
re = modn.Multiply(re, x); // blind
Integer cp=re%m_p, cq=re%m_q;
if (Jacobi(cp, m_p) * Jacobi(cq, m_q) != 1)
{
cp = cp.IsOdd() ? (cp+m_p) >> 1 : cp >> 1;
cq = cq.IsOdd() ? (cq+m_q) >> 1 : cq >> 1;
}
#pragma omp parallel
#pragma omp sections
{
#pragma omp section
cp = ModularSquareRoot(cp, m_p);
#pragma omp section
cq = ModularSquareRoot(cq, m_q);
}
Integer y = CRT(cq, m_q, cp, m_p, m_u);
y = modn.Multiply(y, rInv); // unblind
y = STDMIN(y, m_n-y);
if (ApplyFunction(y) != x) // check
throw Exception(Exception::OTHER_ERROR, "InvertibleRWFunction: computational error during private key operation");
return y;
}
| 1
|
384,806
|
mkdir_recurse(char_u *dir, int prot)
{
char_u *p;
char_u *updir;
int r = FAIL;
// Get end of directory name in "dir".
// We're done when it's "/" or "c:/".
p = gettail_sep(dir);
if (p <= get_past_head(dir))
return OK;
// If the directory exists we're done. Otherwise: create it.
updir = vim_strnsave(dir, p - dir);
if (updir == NULL)
return FAIL;
if (mch_isdir(updir))
r = OK;
else if (mkdir_recurse(updir, prot) == OK)
r = vim_mkdir_emsg(updir, prot);
vim_free(updir);
return r;
}
| 0
|
405,377
|
static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net,
struct xfrm_flo *xflo,
const struct flowi *fl,
int num_xfrms,
u16 family)
{
int err;
struct net_device *dev;
struct dst_entry *dst;
struct dst_entry *dst1;
struct xfrm_dst *xdst;
xdst = xfrm_alloc_dst(net, family);
if (IS_ERR(xdst))
return xdst;
if (!(xflo->flags & XFRM_LOOKUP_QUEUE) ||
net->xfrm.sysctl_larval_drop ||
num_xfrms <= 0)
return xdst;
dst = xflo->dst_orig;
dst1 = &xdst->u.dst;
dst_hold(dst);
xdst->route = dst;
dst_copy_metrics(dst1, dst);
dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
dst1->flags |= DST_XFRM_QUEUE;
dst1->lastuse = jiffies;
dst1->input = dst_discard;
dst1->output = xdst_queue_output;
dst_hold(dst);
xfrm_dst_set_child(xdst, dst);
xdst->path = dst;
xfrm_init_path((struct xfrm_dst *)dst1, dst, 0);
err = -ENODEV;
dev = dst->dev;
if (!dev)
goto free_dst;
err = xfrm_fill_dst(xdst, dev, fl);
if (err)
goto free_dst;
out:
return xdst;
free_dst:
dst_release(dst1);
xdst = ERR_PTR(err);
goto out;
}
| 0
|
443,301
|
ex_find(exarg_T *eap)
{
char_u *fname;
int count;
fname = find_file_in_path(eap->arg, (int)STRLEN(eap->arg), FNAME_MESS,
TRUE, curbuf->b_ffname);
if (eap->addr_count > 0)
{
// Repeat finding the file "count" times. This matters when it
// appears several times in the path.
count = eap->line2;
while (fname != NULL && --count > 0)
{
vim_free(fname);
fname = find_file_in_path(NULL, 0, FNAME_MESS,
FALSE, curbuf->b_ffname);
}
}
if (fname != NULL)
{
eap->arg = fname;
do_exedit(eap, NULL);
vim_free(fname);
}
}
| 0
|
224,482
|
static s64 ttml_get_timestamp(GF_TXTIn *ctx, char *value)
{
u32 h, m, s, ms, f, sf;
s64 ts = -1;
u32 len = (u32) strlen(value);
//tick metrick - cannot be fractional
if (len && (value[len-1]=='t')) {
value[len-1] = 0;
ts = (s64) (atoi(value) * 1000);
value[len-1] = 't';
if (ctx->tick_rate)
ts /= ctx->tick_rate;
}
//hours metric, can be fractional
else if (len && (value[len-1]=='h')) {
value[len-1] = 0;
ts = (s64) (atof(value) * 1000 * 3600);
value[len-1] = 'h';
}
//minutes metric, can be fractional
else if (len && (value[len-1]=='m')) {
value[len-1] = 0;
ts = (s64) (atof(value) * 1000 * 60);
value[len-1] = 'm';
}
else if (len && (value[len-1]=='s')) {
//milliseconds metric, can be fractional but we work at 1ms clock resolution anyway
if ((len > 1) && (value[len-2]=='m')) {
value[len-2] = 0;
ts = (s64) (atof(value));
value[len-2] = 'm';
}
//seconds metric, can be fractional
else {
value[len-1] = 0;
ts = (s64) (atof(value) * 1000);
value[len-1] = 's';
}
}
//frames metric, can be fractional
else if (len && (value[len-1]=='f')) {
f = sf = 0;
value[len-1] = 0;
if (sscanf(value, "%u.%u", &f, &sf) != 2) {
sscanf(value, "%u", &f);
sf = 0;
}
value[len-1] = 'f';
if (!ctx->ttml_fps_num) {
GF_LOG(GF_LOG_WARNING, GF_LOG_PARSER, ("[TTML EBU-TTD] time indicates frames but no frame rate set, assuming 25 FPS\n"));
ctx->ttml_fps_num = 25;
ctx->ttml_fps_den = 1;
}
if (sf && !ctx->ttml_sfps) {
GF_LOG(GF_LOG_WARNING, GF_LOG_PARSER, ("[TTML EBU-TTD] time indicates subframes but no subFrameRate set, assuming 1\n"));
ctx->ttml_sfps = 1;
}
ts = ((s64) 1000 * f * ctx->ttml_fps_den) / ctx->ttml_fps_num;
if (sf)
ts += ((s64) 1000 * sf * ctx->ttml_fps_den / ctx->ttml_sfps) / ctx->ttml_fps_num;
}
else if (sscanf(value, "%u:%u:%u.%u", &h, &m, &s, &ms) == 4) {
ts = (h*3600 + m*60+s)*1000+ms;
}
else if (sscanf(value, "%u:%u:%u:%u.%u", &h, &m, &s, &f, &sf) == 5) {
ts = (h*3600 + m*60+s)*1000;
if (!ctx->ttml_fps_num) {
GF_LOG(GF_LOG_WARNING, GF_LOG_PARSER, ("[TTML EBU-TTD] time indicates frames but no frame rate set, assuming 25 FPS\n"));
ctx->ttml_fps_num = 25;
ctx->ttml_fps_den = 1;
}
if (!ctx->ttml_sfps) {
GF_LOG(GF_LOG_WARNING, GF_LOG_PARSER, ("[TTML EBU-TTD] time indicates subframes but no subFrameRate set, assuming 1\n"));
ctx->ttml_sfps = 1;
}
ts += ((s64) 1000 * f * ctx->ttml_fps_den) / ctx->ttml_fps_num;
ts += ((s64) 1000 * sf * ctx->ttml_fps_den / ctx->ttml_sfps) / ctx->ttml_fps_num;
}
else if (sscanf(value, "%u:%u:%u:%u", &h, &m, &s, &f) == 4) {
ts = (h*3600 + m*60+s)*1000;
if (!ctx->ttml_fps_num) {
GF_LOG(GF_LOG_WARNING, GF_LOG_PARSER, ("[TTML EBU-TTD] time indicates frames but no frame rate set, assuming 25 FPS\n"));
ctx->ttml_fps_num = 25;
ctx->ttml_fps_den = 1;
}
ts += ((s64) 1000 * f * ctx->ttml_fps_den) / ctx->ttml_fps_num;
}
else if (sscanf(value, "%u:%u:%u", &h, &m, &s) == 3) {
ts = (h*3600 + m*60+s)*1000;
}
return ts;
}
| 0
|
264,373
|
inline protobuf::RepeatedField<int32>* MutableTensorProtoData<Eigen::half>(
TensorProto* t) {
return t->mutable_half_val();
}
| 0
|
413,695
|
static const char *reg_name_for_access(RAnalOp* op, RAnalVarAccessType type) {
if (type == R_ANAL_VAR_ACCESS_TYPE_WRITE) {
if (op->dst && op->dst->reg) {
return op->dst->reg->name;
}
} else {
if (op->src[0] && op->src[0]->reg) {
return op->src[0]->reg->name;
}
}
return NULL;
}
| 0
|
430,435
|
static int ip_tun_from_nlattr(const struct nlattr *attr,
struct sw_flow_match *match, bool is_mask,
bool log)
{
bool ttl = false, ipv4 = false, ipv6 = false;
bool info_bridge_mode = false;
__be16 tun_flags = 0;
int opts_type = 0;
struct nlattr *a;
int rem;
nla_for_each_nested(a, attr, rem) {
int type = nla_type(a);
int err;
if (type > OVS_TUNNEL_KEY_ATTR_MAX) {
OVS_NLERR(log, "Tunnel attr %d out of range max %d",
type, OVS_TUNNEL_KEY_ATTR_MAX);
return -EINVAL;
}
if (!check_attr_len(nla_len(a),
ovs_tunnel_key_lens[type].len)) {
OVS_NLERR(log, "Tunnel attr %d has unexpected len %d expected %d",
type, nla_len(a), ovs_tunnel_key_lens[type].len);
return -EINVAL;
}
switch (type) {
case OVS_TUNNEL_KEY_ATTR_ID:
SW_FLOW_KEY_PUT(match, tun_key.tun_id,
nla_get_be64(a), is_mask);
tun_flags |= TUNNEL_KEY;
break;
case OVS_TUNNEL_KEY_ATTR_IPV4_SRC:
SW_FLOW_KEY_PUT(match, tun_key.u.ipv4.src,
nla_get_in_addr(a), is_mask);
ipv4 = true;
break;
case OVS_TUNNEL_KEY_ATTR_IPV4_DST:
SW_FLOW_KEY_PUT(match, tun_key.u.ipv4.dst,
nla_get_in_addr(a), is_mask);
ipv4 = true;
break;
case OVS_TUNNEL_KEY_ATTR_IPV6_SRC:
SW_FLOW_KEY_PUT(match, tun_key.u.ipv6.src,
nla_get_in6_addr(a), is_mask);
ipv6 = true;
break;
case OVS_TUNNEL_KEY_ATTR_IPV6_DST:
SW_FLOW_KEY_PUT(match, tun_key.u.ipv6.dst,
nla_get_in6_addr(a), is_mask);
ipv6 = true;
break;
case OVS_TUNNEL_KEY_ATTR_TOS:
SW_FLOW_KEY_PUT(match, tun_key.tos,
nla_get_u8(a), is_mask);
break;
case OVS_TUNNEL_KEY_ATTR_TTL:
SW_FLOW_KEY_PUT(match, tun_key.ttl,
nla_get_u8(a), is_mask);
ttl = true;
break;
case OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT:
tun_flags |= TUNNEL_DONT_FRAGMENT;
break;
case OVS_TUNNEL_KEY_ATTR_CSUM:
tun_flags |= TUNNEL_CSUM;
break;
case OVS_TUNNEL_KEY_ATTR_TP_SRC:
SW_FLOW_KEY_PUT(match, tun_key.tp_src,
nla_get_be16(a), is_mask);
break;
case OVS_TUNNEL_KEY_ATTR_TP_DST:
SW_FLOW_KEY_PUT(match, tun_key.tp_dst,
nla_get_be16(a), is_mask);
break;
case OVS_TUNNEL_KEY_ATTR_OAM:
tun_flags |= TUNNEL_OAM;
break;
case OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS:
if (opts_type) {
OVS_NLERR(log, "Multiple metadata blocks provided");
return -EINVAL;
}
err = genev_tun_opt_from_nlattr(a, match, is_mask, log);
if (err)
return err;
tun_flags |= TUNNEL_GENEVE_OPT;
opts_type = type;
break;
case OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS:
if (opts_type) {
OVS_NLERR(log, "Multiple metadata blocks provided");
return -EINVAL;
}
err = vxlan_tun_opt_from_nlattr(a, match, is_mask, log);
if (err)
return err;
tun_flags |= TUNNEL_VXLAN_OPT;
opts_type = type;
break;
case OVS_TUNNEL_KEY_ATTR_PAD:
break;
case OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS:
if (opts_type) {
OVS_NLERR(log, "Multiple metadata blocks provided");
return -EINVAL;
}
err = erspan_tun_opt_from_nlattr(a, match, is_mask,
log);
if (err)
return err;
tun_flags |= TUNNEL_ERSPAN_OPT;
opts_type = type;
break;
case OVS_TUNNEL_KEY_ATTR_IPV4_INFO_BRIDGE:
info_bridge_mode = true;
ipv4 = true;
break;
default:
OVS_NLERR(log, "Unknown IP tunnel attribute %d",
type);
return -EINVAL;
}
}
SW_FLOW_KEY_PUT(match, tun_key.tun_flags, tun_flags, is_mask);
if (is_mask)
SW_FLOW_KEY_MEMSET_FIELD(match, tun_proto, 0xff, true);
else
SW_FLOW_KEY_PUT(match, tun_proto, ipv6 ? AF_INET6 : AF_INET,
false);
if (rem > 0) {
OVS_NLERR(log, "IP tunnel attribute has %d unknown bytes.",
rem);
return -EINVAL;
}
if (ipv4 && ipv6) {
OVS_NLERR(log, "Mixed IPv4 and IPv6 tunnel attributes");
return -EINVAL;
}
if (!is_mask) {
if (!ipv4 && !ipv6) {
OVS_NLERR(log, "IP tunnel dst address not specified");
return -EINVAL;
}
if (ipv4) {
if (info_bridge_mode) {
if (match->key->tun_key.u.ipv4.src ||
match->key->tun_key.u.ipv4.dst ||
match->key->tun_key.tp_src ||
match->key->tun_key.tp_dst ||
match->key->tun_key.ttl ||
match->key->tun_key.tos ||
tun_flags & ~TUNNEL_KEY) {
OVS_NLERR(log, "IPv4 tun info is not correct");
return -EINVAL;
}
} else if (!match->key->tun_key.u.ipv4.dst) {
OVS_NLERR(log, "IPv4 tunnel dst address is zero");
return -EINVAL;
}
}
if (ipv6 && ipv6_addr_any(&match->key->tun_key.u.ipv6.dst)) {
OVS_NLERR(log, "IPv6 tunnel dst address is zero");
return -EINVAL;
}
if (!ttl && !info_bridge_mode) {
OVS_NLERR(log, "IP tunnel TTL not specified.");
return -EINVAL;
}
}
return opts_type;
}
| 0
|
424,934
|
static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
{
if (trans->cfg->apmg_not_supported)
return;
if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold))
iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
~APMG_PS_CTRL_MSK_PWR_SRC);
else
iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
~APMG_PS_CTRL_MSK_PWR_SRC);
}
| 0
|
401,487
|
static ssize_t extract_entropy(struct entropy_store *r, void *buf,
size_t nbytes, int min, int reserved)
{
__u8 tmp[EXTRACT_SIZE];
unsigned long flags;
/* if last_data isn't primed, we need EXTRACT_SIZE extra bytes */
if (fips_enabled) {
spin_lock_irqsave(&r->lock, flags);
if (!r->last_data_init) {
r->last_data_init = 1;
spin_unlock_irqrestore(&r->lock, flags);
trace_extract_entropy(r->name, EXTRACT_SIZE,
ENTROPY_BITS(r), _RET_IP_);
extract_buf(r, tmp);
spin_lock_irqsave(&r->lock, flags);
memcpy(r->last_data, tmp, EXTRACT_SIZE);
}
spin_unlock_irqrestore(&r->lock, flags);
}
trace_extract_entropy(r->name, nbytes, ENTROPY_BITS(r), _RET_IP_);
nbytes = account(r, nbytes, min, reserved);
return _extract_entropy(r, buf, nbytes, fips_enabled);
}
| 0
|
318,967
|
f_test_option_not_set(typval_T *argvars, typval_T *rettv UNUSED)
{
char_u *name = (char_u *)"";
if (in_vim9script() && check_for_string_arg(argvars, 0) == FAIL)
return;
if (argvars[0].v_type != VAR_STRING)
emsg(_(e_invalid_argument));
else
{
name = tv_get_string(&argvars[0]);
if (reset_option_was_set(name) == FAIL)
semsg(_(e_invalid_argument_str), name);
}
}
| 0
|
256,156
|
ALWAYS_INLINE void LoadTwoScalars(const bfloat16** data, Packet* l1,
Packet* l2) {
if (kNumOperands >= 2) {
auto tmp = ConvertTwoBfloat16ToFloat(*data);
*l1 = Eigen::internal::pbroadcast_first<Packet>(tmp);
*l2 = Eigen::internal::pbroadcast_second<Packet>(tmp);
*data += 2;
} else {
LoadSingleScalar(data, l1);
LoadSingleScalar(data, l2);
}
}
| 0
|
247,602
|
const std::string& expectedRequestedServerName() const { return expected_requested_server_name_; }
| 0
|
294,522
|
d_lite_new_start(int argc, VALUE *argv, VALUE self)
{
VALUE vsg;
double sg;
rb_scan_args(argc, argv, "01", &vsg);
sg = DEFAULT_SG;
if (argc >= 1)
val2sg(vsg, sg);
return dup_obj_with_new_start(self, sg);
}
| 0
|
477,304
|
static inline void tipc_crypto_key_set_state(struct tipc_crypto *c,
u8 new_passive,
u8 new_active,
u8 new_pending)
{
struct tipc_key old = c->key;
char buf[32];
c->key.keys = ((new_passive & KEY_MASK) << (KEY_BITS * 2)) |
((new_active & KEY_MASK) << (KEY_BITS)) |
((new_pending & KEY_MASK));
pr_debug("%s: key changing %s ::%pS\n", c->name,
tipc_key_change_dump(old, c->key, buf),
__builtin_return_address(0));
}
| 0
|
489,169
|
struct sctp_chunk *sctp_make_heartbeat(const struct sctp_association *asoc,
const struct sctp_transport *transport,
const void *payload, const size_t paylen)
{
struct sctp_chunk *retval = sctp_make_chunk(asoc, SCTP_CID_HEARTBEAT,
0, paylen);
if (!retval)
goto nodata;
/* Cast away the 'const', as this is just telling the chunk
* what transport it belongs to.
*/
retval->transport = (struct sctp_transport *) transport;
retval->subh.hbs_hdr = sctp_addto_chunk(retval, paylen, payload);
nodata:
return retval;
}
| 0
|
274,654
|
callbacks_get_col_num_from_tree_view_col (GtkTreeViewColumn *col)
{
GList *cols;
gint num;
g_return_val_if_fail ( col != NULL, -1 );
g_return_val_if_fail ( col->tree_view != NULL, -1 );
cols = gtk_tree_view_get_columns(GTK_TREE_VIEW(col->tree_view));
num = g_list_index(cols, (gpointer) col);
g_list_free(cols);
return num;
}
| 0
|
338,062
|
void WasmBinaryWriter::writeType(Type type) {
if (type.isRef() && !type.isBasic()) {
if (type.isNullable()) {
o << S32LEB(BinaryConsts::EncodedType::nullable);
} else {
o << S32LEB(BinaryConsts::EncodedType::nonnullable);
}
writeHeapType(type.getHeapType());
return;
}
if (type.isRtt()) {
auto rtt = type.getRtt();
if (rtt.hasDepth()) {
o << S32LEB(BinaryConsts::EncodedType::rtt_n);
o << U32LEB(rtt.depth);
} else {
o << S32LEB(BinaryConsts::EncodedType::rtt);
}
writeIndexedHeapType(rtt.heapType);
return;
}
int ret = 0;
TODO_SINGLE_COMPOUND(type);
switch (type.getBasic()) {
// None only used for block signatures. TODO: Separate out?
case Type::none:
ret = BinaryConsts::EncodedType::Empty;
break;
case Type::i32:
ret = BinaryConsts::EncodedType::i32;
break;
case Type::i64:
ret = BinaryConsts::EncodedType::i64;
break;
case Type::f32:
ret = BinaryConsts::EncodedType::f32;
break;
case Type::f64:
ret = BinaryConsts::EncodedType::f64;
break;
case Type::v128:
ret = BinaryConsts::EncodedType::v128;
break;
case Type::funcref:
ret = BinaryConsts::EncodedType::funcref;
break;
case Type::externref:
ret = BinaryConsts::EncodedType::externref;
break;
case Type::anyref:
ret = BinaryConsts::EncodedType::anyref;
break;
case Type::eqref:
ret = BinaryConsts::EncodedType::eqref;
break;
case Type::i31ref:
ret = BinaryConsts::EncodedType::i31ref;
break;
case Type::dataref:
ret = BinaryConsts::EncodedType::dataref;
break;
default:
WASM_UNREACHABLE("unexpected type");
}
o << S32LEB(ret);
}
| 0
|
359,595
|
DEFUN (show_bgp_memory,
show_bgp_memory_cmd,
"show bgp memory",
SHOW_STR
BGP_STR
"Global BGP memory statistics\n")
{
char memstrbuf[MTYPE_MEMSTR_LEN];
unsigned long count;
/* RIB related usage stats */
count = mtype_stats_alloc (MTYPE_BGP_NODE);
vty_out (vty, "%ld RIB nodes, using %s of memory%s", count,
mtype_memstr (memstrbuf, sizeof (memstrbuf),
count * sizeof (struct bgp_node)),
VTY_NEWLINE);
count = mtype_stats_alloc (MTYPE_BGP_ROUTE);
vty_out (vty, "%ld BGP routes, using %s of memory%s", count,
mtype_memstr (memstrbuf, sizeof (memstrbuf),
count * sizeof (struct bgp_info)),
VTY_NEWLINE);
if ((count = mtype_stats_alloc (MTYPE_BGP_ROUTE_EXTRA)))
vty_out (vty, "%ld BGP route ancillaries, using %s of memory%s", count,
mtype_memstr (memstrbuf, sizeof (memstrbuf),
count * sizeof (struct bgp_info_extra)),
VTY_NEWLINE);
if ((count = mtype_stats_alloc (MTYPE_BGP_STATIC)))
vty_out (vty, "%ld Static routes, using %s of memory%s", count,
mtype_memstr (memstrbuf, sizeof (memstrbuf),
count * sizeof (struct bgp_static)),
VTY_NEWLINE);
/* Adj-In/Out */
if ((count = mtype_stats_alloc (MTYPE_BGP_ADJ_IN)))
vty_out (vty, "%ld Adj-In entries, using %s of memory%s", count,
mtype_memstr (memstrbuf, sizeof (memstrbuf),
count * sizeof (struct bgp_adj_in)),
VTY_NEWLINE);
if ((count = mtype_stats_alloc (MTYPE_BGP_ADJ_OUT)))
vty_out (vty, "%ld Adj-Out entries, using %s of memory%s", count,
mtype_memstr (memstrbuf, sizeof (memstrbuf),
count * sizeof (struct bgp_adj_out)),
VTY_NEWLINE);
if ((count = mtype_stats_alloc (MTYPE_BGP_NEXTHOP_CACHE)))
vty_out (vty, "%ld Nexthop cache entries, using %s of memory%s", count,
mtype_memstr (memstrbuf, sizeof (memstrbuf),
count * sizeof (struct bgp_nexthop_cache)),
VTY_NEWLINE);
if ((count = mtype_stats_alloc (MTYPE_BGP_DAMP_INFO)))
vty_out (vty, "%ld Dampening entries, using %s of memory%s", count,
mtype_memstr (memstrbuf, sizeof (memstrbuf),
count * sizeof (struct bgp_damp_info)),
VTY_NEWLINE);
/* Attributes */
count = attr_count();
vty_out (vty, "%ld BGP attributes, using %s of memory%s", count,
mtype_memstr (memstrbuf, sizeof (memstrbuf),
count * sizeof(struct attr)),
VTY_NEWLINE);
if ((count = mtype_stats_alloc (MTYPE_ATTR_EXTRA)))
vty_out (vty, "%ld BGP extra attributes, using %s of memory%s", count,
mtype_memstr (memstrbuf, sizeof (memstrbuf),
count * sizeof(struct attr_extra)),
VTY_NEWLINE);
if ((count = attr_unknown_count()))
vty_out (vty, "%ld unknown attributes%s", count, VTY_NEWLINE);
/* AS_PATH attributes */
count = aspath_count ();
vty_out (vty, "%ld BGP AS-PATH entries, using %s of memory%s", count,
mtype_memstr (memstrbuf, sizeof (memstrbuf),
count * sizeof (struct aspath)),
VTY_NEWLINE);
count = mtype_stats_alloc (MTYPE_AS_SEG);
vty_out (vty, "%ld BGP AS-PATH segments, using %s of memory%s", count,
mtype_memstr (memstrbuf, sizeof (memstrbuf),
count * sizeof (struct assegment)),
VTY_NEWLINE);
/* Other attributes */
if ((count = community_count ()))
vty_out (vty, "%ld BGP community entries, using %s of memory%s", count,
mtype_memstr (memstrbuf, sizeof (memstrbuf),
count * sizeof (struct community)),
VTY_NEWLINE);
if ((count = mtype_stats_alloc (MTYPE_ECOMMUNITY)))
vty_out (vty, "%ld BGP community entries, using %s of memory%s", count,
mtype_memstr (memstrbuf, sizeof (memstrbuf),
count * sizeof (struct ecommunity)),
VTY_NEWLINE);
if ((count = mtype_stats_alloc (MTYPE_CLUSTER)))
vty_out (vty, "%ld Cluster lists, using %s of memory%s", count,
mtype_memstr (memstrbuf, sizeof (memstrbuf),
count * sizeof (struct cluster_list)),
VTY_NEWLINE);
/* Peer related usage */
count = mtype_stats_alloc (MTYPE_BGP_PEER);
vty_out (vty, "%ld peers, using %s of memory%s", count,
mtype_memstr (memstrbuf, sizeof (memstrbuf),
count * sizeof (struct peer)),
VTY_NEWLINE);
if ((count = mtype_stats_alloc (MTYPE_PEER_GROUP)))
vty_out (vty, "%ld peer groups, using %s of memory%s", count,
mtype_memstr (memstrbuf, sizeof (memstrbuf),
count * sizeof (struct peer_group)),
VTY_NEWLINE);
/* Other */
if ((count = mtype_stats_alloc (MTYPE_HASH)))
vty_out (vty, "%ld hash tables, using %s of memory%s", count,
mtype_memstr (memstrbuf, sizeof (memstrbuf),
count * sizeof (struct hash)),
VTY_NEWLINE);
if ((count = mtype_stats_alloc (MTYPE_HASH_BACKET)))
vty_out (vty, "%ld hash buckets, using %s of memory%s", count,
mtype_memstr (memstrbuf, sizeof (memstrbuf),
count * sizeof (struct hash_backet)),
VTY_NEWLINE);
if ((count = mtype_stats_alloc (MTYPE_BGP_REGEXP)))
vty_out (vty, "%ld compiled regexes, using %s of memory%s", count,
mtype_memstr (memstrbuf, sizeof (memstrbuf),
count * sizeof (regex_t)),
VTY_NEWLINE);
return CMD_SUCCESS;
}
| 0
|
263,393
|
Status GetAxisForPackAndUnpack(InferenceContext* c, int32_t rank_after_pack,
int32* axis) {
TF_RETURN_IF_ERROR(c->GetAttr("axis", axis));
if (*axis < -1 * rank_after_pack || *axis >= rank_after_pack) {
return errors::InvalidArgument("Invalid axis: ", *axis, "; must be in [",
-1 * rank_after_pack, ",", rank_after_pack,
")");
}
if (*axis < 0) *axis = (rank_after_pack + *axis);
return Status::OK();
}
| 0
|
248,256
|
DLLIMPORT unsigned int cfg_num(cfg_t *cfg)
{
if (!cfg)
return 0;
return (unsigned int)cfg_numopts(cfg->opts);
}
| 0
|
216,905
|
bool st_select_lex::setup_ref_array(THD *thd, uint order_group_num)
{
if (!((options & SELECT_DISTINCT) && !group_list.elements))
hidden_bit_fields= 0;
// find_order_in_list() may need some extra space, so multiply by two.
order_group_num*= 2;
/*
We have to create array in prepared statement memory if it is a
prepared statement
*/
Query_arena *arena= thd->stmt_arena;
const uint n_elems= (n_sum_items +
n_child_sum_items +
item_list.elements +
select_n_reserved +
select_n_having_items +
select_n_where_fields +
order_group_num +
hidden_bit_fields +
fields_in_window_functions) * 5;
if (!ref_pointer_array.is_null())
{
/*
We need to take 'n_sum_items' into account when allocating the array,
and this may actually increase during the optimization phase due to
MIN/MAX rewrite in Item_in_subselect::single_value_transformer.
In the usual case we can reuse the array from the prepare phase.
If we need a bigger array, we must allocate a new one.
*/
if (ref_pointer_array.size() >= n_elems)
return false;
}
Item **array= static_cast<Item**>(arena->alloc(sizeof(Item*) * n_elems));
if (array != NULL)
ref_pointer_array= Ref_ptr_array(array, n_elems);
return array == NULL;
}
| 1
|
222,892
|
static void NormalizeShapeForOutput(TensorShapeProto* shape) {
for (int i = 0; i < shape->dim_size(); i++) {
if (shape->dim(i).size() < -1) {
VLOG(2) << "Normalizing dimension: " << i << " from "
<< shape->dim(i).size() << " to -1";
shape->mutable_dim(i)->set_size(-1);
}
}
}
| 0
|
317,140
|
static int smack_file_send_sigiotask(struct task_struct *tsk,
struct fown_struct *fown, int signum)
{
struct smack_known **blob;
struct smack_known *skp;
struct smack_known *tkp = smk_of_task(smack_cred(tsk->cred));
const struct cred *tcred;
struct file *file;
int rc;
struct smk_audit_info ad;
/*
* struct fown_struct is never outside the context of a struct file
*/
file = container_of(fown, struct file, f_owner);
/* we don't log here as rc can be overriden */
blob = smack_file(file);
skp = *blob;
rc = smk_access(skp, tkp, MAY_DELIVER, NULL);
rc = smk_bu_note("sigiotask", skp, tkp, MAY_DELIVER, rc);
rcu_read_lock();
tcred = __task_cred(tsk);
if (rc != 0 && smack_privileged_cred(CAP_MAC_OVERRIDE, tcred))
rc = 0;
rcu_read_unlock();
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_TASK);
smk_ad_setfield_u_tsk(&ad, tsk);
smack_log(skp->smk_known, tkp->smk_known, MAY_DELIVER, rc, &ad);
return rc;
}
| 0
|
226,227
|
GF_Err video_sample_entry_on_child_box(GF_Box *s, GF_Box *a, Bool is_rem)
{
GF_MPEGVisualSampleEntryBox *ptr = (GF_MPEGVisualSampleEntryBox *)s;
switch (a->type) {
case GF_ISOM_BOX_TYPE_ESDS:
BOX_FIELD_ASSIGN(esd, GF_ESDBox)
break;
case GF_ISOM_BOX_TYPE_RINF:
BOX_FIELD_ASSIGN(rinf, GF_RestrictedSchemeInfoBox)
break;
case GF_ISOM_BOX_TYPE_AVCC:
BOX_FIELD_ASSIGN(avc_config, GF_AVCConfigurationBox)
break;
case GF_ISOM_BOX_TYPE_HVCC:
BOX_FIELD_ASSIGN(hevc_config, GF_HEVCConfigurationBox)
break;
case GF_ISOM_BOX_TYPE_VVCC:
BOX_FIELD_ASSIGN(vvc_config, GF_VVCConfigurationBox)
break;
case GF_ISOM_BOX_TYPE_SVCC:
BOX_FIELD_ASSIGN(svc_config, GF_AVCConfigurationBox)
break;
case GF_ISOM_BOX_TYPE_MVCC:
BOX_FIELD_ASSIGN(mvc_config, GF_AVCConfigurationBox)
break;
case GF_ISOM_BOX_TYPE_LHVC:
BOX_FIELD_ASSIGN(lhvc_config, GF_HEVCConfigurationBox)
break;
case GF_ISOM_BOX_TYPE_AV1C:
BOX_FIELD_ASSIGN(av1_config, GF_AV1ConfigurationBox)
break;
case GF_ISOM_BOX_TYPE_VPCC:
BOX_FIELD_ASSIGN(vp_config, GF_VPConfigurationBox)
break;
case GF_ISOM_BOX_TYPE_DVCC:
case GF_ISOM_BOX_TYPE_DVVC:
BOX_FIELD_ASSIGN(dovi_config, GF_DOVIConfigurationBox)
break;
case GF_ISOM_BOX_TYPE_UUID:
if (! memcmp(((GF_UnknownUUIDBox*)a)->uuid, GF_ISOM_IPOD_EXT, 16)) {
BOX_FIELD_ASSIGN(ipod_ext, GF_UnknownUUIDBox)
} else {
return GF_OK;
}
break;
case GF_ISOM_BOX_TYPE_D263:
BOX_FIELD_ASSIGN(cfg_3gpp, GF_3GPPConfigBox)
/*for 3GP config, remember sample entry type in config*/
if (ptr->cfg_3gpp)
ptr->cfg_3gpp->cfg.type = ptr->type;
break;
case GF_ISOM_BOX_TYPE_JP2H:
BOX_FIELD_ASSIGN(jp2h, GF_J2KHeaderBox)
return GF_OK;
case GF_ISOM_BOX_TYPE_PASP:
case GF_ISOM_BOX_TYPE_CLAP:
case GF_ISOM_BOX_TYPE_COLR:
case GF_ISOM_BOX_TYPE_MDCV:
case GF_ISOM_BOX_TYPE_CLLI:
case GF_ISOM_BOX_TYPE_CCST:
case GF_ISOM_BOX_TYPE_AUXI:
case GF_ISOM_BOX_TYPE_RVCC:
case GF_ISOM_BOX_TYPE_M4DS:
if (!is_rem && !gf_isom_box_check_unique(s->child_boxes, a)) {
ERROR_ON_DUPLICATED_BOX(a, ptr)
}
return GF_OK;
}
return GF_OK;
}
| 0
|
246,478
|
static inline RBinWasmCustomNameLocalNames *parse_custom_names_local(RBuffer *b, ut64 bound) {
RBinWasmCustomNameLocalNames *local = R_NEW0 (RBinWasmCustomNameLocalNames);
if (!local) {
return NULL;
}
if (!consume_u32_r (b, bound, &local->count)) {
goto beach;
}
local->locals = r_list_newf ((RListFree)wasm_custom_name_local_free);
if (local->locals) {
size_t i;
for (i = 0; i < local->count; i++) {
RBinWasmCustomNameLocalName *local_name = parse_local_name (b, bound);
if (!local_name || !r_list_append (local->locals, local_name)) {
wasm_custom_name_local_free (local_name);
goto beach;
}
}
return local;
}
beach:
wasm_custom_local_names_free (local);
return NULL;
}
| 0
|
508,801
|
Item *st_select_lex::build_cond_for_grouping_fields(THD *thd, Item *cond,
bool no_top_clones)
{
if (cond->get_extraction_flag() == FULL_EXTRACTION_FL)
{
if (no_top_clones)
return cond;
cond->clear_extraction_flag();
return cond->build_clone(thd, thd->mem_root);
}
if (cond->type() == Item::COND_ITEM)
{
bool cond_and= false;
Item_cond *new_cond;
if (((Item_cond*) cond)->functype() == Item_func::COND_AND_FUNC)
{
cond_and= true;
new_cond= new (thd->mem_root) Item_cond_and(thd);
}
else
new_cond= new (thd->mem_root) Item_cond_or(thd);
if (!new_cond)
return 0;
List_iterator<Item> li(*((Item_cond*) cond)->argument_list());
Item *item;
while ((item=li++))
{
if (item->get_extraction_flag() == NO_EXTRACTION_FL)
{
DBUG_ASSERT(cond_and);
item->clear_extraction_flag();
continue;
}
Item *fix= build_cond_for_grouping_fields(thd, item,
no_top_clones & cond_and);
if (!fix)
{
if (cond_and)
continue;
break;
}
new_cond->argument_list()->push_back(fix, thd->mem_root);
}
if (!cond_and && item)
{
while((item= li++))
item->clear_extraction_flag();
return 0;
}
switch (new_cond->argument_list()->elements)
{
case 0:
return 0;
case 1:
return new_cond->argument_list()->head();
default:
return new_cond;
}
}
return 0;
}
| 0
|
314,748
|
cdf_read(const cdf_info_t *info, off_t off, void *buf, size_t len)
{
size_t siz = (size_t)off + len;
if ((off_t)(off + len) != (off_t)siz) {
errno = EINVAL;
return -1;
}
if (info->i_buf != NULL && info->i_len >= siz) {
(void)memcpy(buf, &info->i_buf[off], len);
return (ssize_t)len;
}
if (info->i_fd == -1)
return -1;
if (FINFO_LSEEK_FUNC(info->i_fd, off, SEEK_SET) == (off_t)-1)
return -1;
if (FINFO_READ_FUNC(info->i_fd, buf, len) != (ssize_t)len)
return -1;
return (ssize_t)len;
}
| 0
|
439,057
|
ModuleExport size_t RegisterRAWImage(void)
{
MagickInfo
*entry;
entry=SetMagickInfo("R");
entry->decoder=(DecodeImageHandler *) ReadRAWImage;
entry->encoder=(EncodeImageHandler *) WriteRAWImage;
entry->raw=MagickTrue;
entry->endian_support=MagickTrue;
entry->format_type=ImplicitFormatType;
entry->description=ConstantString("Raw red samples");
entry->module=ConstantString("RAW");
(void) RegisterMagickInfo(entry);
entry=SetMagickInfo("C");
entry->decoder=(DecodeImageHandler *) ReadRAWImage;
entry->encoder=(EncodeImageHandler *) WriteRAWImage;
entry->raw=MagickTrue;
entry->endian_support=MagickTrue;
entry->format_type=ImplicitFormatType;
entry->description=ConstantString("Raw cyan samples");
entry->module=ConstantString("RAW");
(void) RegisterMagickInfo(entry);
entry=SetMagickInfo("G");
entry->decoder=(DecodeImageHandler *) ReadRAWImage;
entry->encoder=(EncodeImageHandler *) WriteRAWImage;
entry->raw=MagickTrue;
entry->endian_support=MagickTrue;
entry->format_type=ImplicitFormatType;
entry->description=ConstantString("Raw green samples");
entry->module=ConstantString("RAW");
(void) RegisterMagickInfo(entry);
entry=SetMagickInfo("M");
entry->decoder=(DecodeImageHandler *) ReadRAWImage;
entry->encoder=(EncodeImageHandler *) WriteRAWImage;
entry->raw=MagickTrue;
entry->endian_support=MagickTrue;
entry->format_type=ImplicitFormatType;
entry->description=ConstantString("Raw magenta samples");
entry->module=ConstantString("RAW");
(void) RegisterMagickInfo(entry);
entry=SetMagickInfo("B");
entry->decoder=(DecodeImageHandler *) ReadRAWImage;
entry->encoder=(EncodeImageHandler *) WriteRAWImage;
entry->raw=MagickTrue;
entry->endian_support=MagickTrue;
entry->format_type=ImplicitFormatType;
entry->description=ConstantString("Raw blue samples");
entry->module=ConstantString("RAW");
(void) RegisterMagickInfo(entry);
entry=SetMagickInfo("Y");
entry->decoder=(DecodeImageHandler *) ReadRAWImage;
entry->encoder=(EncodeImageHandler *) WriteRAWImage;
entry->raw=MagickTrue;
entry->endian_support=MagickTrue;
entry->format_type=ImplicitFormatType;
entry->description=ConstantString("Raw yellow samples");
entry->module=ConstantString("RAW");
(void) RegisterMagickInfo(entry);
entry=SetMagickInfo("A");
entry->decoder=(DecodeImageHandler *) ReadRAWImage;
entry->encoder=(EncodeImageHandler *) WriteRAWImage;
entry->raw=MagickTrue;
entry->endian_support=MagickTrue;
entry->format_type=ImplicitFormatType;
entry->description=ConstantString("Raw alpha samples");
entry->module=ConstantString("RAW");
(void) RegisterMagickInfo(entry);
entry=SetMagickInfo("O");
entry->decoder=(DecodeImageHandler *) ReadRAWImage;
entry->encoder=(EncodeImageHandler *) WriteRAWImage;
entry->raw=MagickTrue;
entry->endian_support=MagickTrue;
entry->format_type=ImplicitFormatType;
entry->description=ConstantString("Raw opacity samples");
entry->module=ConstantString("RAW");
(void) RegisterMagickInfo(entry);
entry=SetMagickInfo("K");
entry->decoder=(DecodeImageHandler *) ReadRAWImage;
entry->encoder=(EncodeImageHandler *) WriteRAWImage;
entry->raw=MagickTrue;
entry->endian_support=MagickTrue;
entry->format_type=ImplicitFormatType;
entry->description=ConstantString("Raw black samples");
entry->module=ConstantString("RAW");
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
| 0
|
242,970
|
int mbedtls_ssl_prepare_handshake_record( mbedtls_ssl_context *ssl )
{
if( ssl->in_msglen < mbedtls_ssl_hs_hdr_len( ssl ) )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "handshake message too short: %" MBEDTLS_PRINTF_SIZET,
ssl->in_msglen ) );
return( MBEDTLS_ERR_SSL_INVALID_RECORD );
}
ssl->in_hslen = mbedtls_ssl_hs_hdr_len( ssl ) + ssl_get_hs_total_len( ssl );
MBEDTLS_SSL_DEBUG_MSG( 3, ( "handshake message: msglen ="
" %" MBEDTLS_PRINTF_SIZET ", type = %u, hslen = %" MBEDTLS_PRINTF_SIZET,
ssl->in_msglen, ssl->in_msg[0], ssl->in_hslen ) );
#if defined(MBEDTLS_SSL_PROTO_DTLS)
if( ssl->conf->transport == MBEDTLS_SSL_TRANSPORT_DATAGRAM )
{
int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED;
unsigned int recv_msg_seq = ( ssl->in_msg[4] << 8 ) | ssl->in_msg[5];
if( ssl_check_hs_header( ssl ) != 0 )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "invalid handshake header" ) );
return( MBEDTLS_ERR_SSL_INVALID_RECORD );
}
if( ssl->handshake != NULL &&
( ( ssl->state != MBEDTLS_SSL_HANDSHAKE_OVER &&
recv_msg_seq != ssl->handshake->in_msg_seq ) ||
( ssl->state == MBEDTLS_SSL_HANDSHAKE_OVER &&
ssl->in_msg[0] != MBEDTLS_SSL_HS_CLIENT_HELLO ) ) )
{
if( recv_msg_seq > ssl->handshake->in_msg_seq )
{
MBEDTLS_SSL_DEBUG_MSG( 2, ( "received future handshake message of sequence number %u (next %u)",
recv_msg_seq,
ssl->handshake->in_msg_seq ) );
return( MBEDTLS_ERR_SSL_EARLY_MESSAGE );
}
/* Retransmit only on last message from previous flight, to avoid
* too many retransmissions.
* Besides, No sane server ever retransmits HelloVerifyRequest */
if( recv_msg_seq == ssl->handshake->in_flight_start_seq - 1 &&
ssl->in_msg[0] != MBEDTLS_SSL_HS_HELLO_VERIFY_REQUEST )
{
MBEDTLS_SSL_DEBUG_MSG( 2, ( "received message from last flight, "
"message_seq = %u, start_of_flight = %u",
recv_msg_seq,
ssl->handshake->in_flight_start_seq ) );
if( ( ret = mbedtls_ssl_resend( ssl ) ) != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_ssl_resend", ret );
return( ret );
}
}
else
{
MBEDTLS_SSL_DEBUG_MSG( 2, ( "dropping out-of-sequence message: "
"message_seq = %u, expected = %u",
recv_msg_seq,
ssl->handshake->in_msg_seq ) );
}
return( MBEDTLS_ERR_SSL_CONTINUE_PROCESSING );
}
/* Wait until message completion to increment in_msg_seq */
/* Message reassembly is handled alongside buffering of future
* messages; the commonality is that both handshake fragments and
* future messages cannot be forwarded immediately to the
* handshake logic layer. */
if( ssl_hs_is_proper_fragment( ssl ) == 1 )
{
MBEDTLS_SSL_DEBUG_MSG( 2, ( "found fragmented DTLS handshake message" ) );
return( MBEDTLS_ERR_SSL_EARLY_MESSAGE );
}
}
else
#endif /* MBEDTLS_SSL_PROTO_DTLS */
/* With TLS we don't handle fragmentation (for now) */
if( ssl->in_msglen < ssl->in_hslen )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "TLS handshake fragmentation not supported" ) );
return( MBEDTLS_ERR_SSL_FEATURE_UNAVAILABLE );
}
return( 0 );
}
| 0
|
221,523
|
get_dconf_data (const char *app_id,
const char **paths,
const char *migrate_path,
char **defaults,
gsize *defaults_size,
char **values,
gsize *values_size,
char **locks,
gsize *locks_size)
{
#ifdef HAVE_DCONF
DConfClient *client = NULL;
g_autofree char *prefix = NULL;
#endif
g_autoptr(GKeyFile) defaults_data = NULL;
g_autoptr(GKeyFile) values_data = NULL;
g_autoptr(GString) locks_data = NULL;
defaults_data = g_key_file_new ();
values_data = g_key_file_new ();
locks_data = g_string_new ("");
#ifdef HAVE_DCONF
client = dconf_client_new ();
prefix = flatpak_dconf_path_for_app_id (app_id);
if (migrate_path)
{
g_debug ("Add values in dir '%s', prefix is '%s'", migrate_path, prefix);
if (flatpak_dconf_path_is_similar (migrate_path, prefix))
add_dconf_dir_to_keyfile (values_data, client, migrate_path, DCONF_READ_USER_VALUE);
else
g_warning ("Ignoring D-Conf migrate-path setting %s", migrate_path);
}
g_debug ("Add defaults in dir %s", prefix);
add_dconf_dir_to_keyfile (defaults_data, client, prefix, DCONF_READ_DEFAULT_VALUE);
g_debug ("Add locks in dir %s", prefix);
add_dconf_locks_to_list (locks_data, client, prefix);
/* We allow extra paths for defaults and locks, but not for user values */
if (paths)
{
int i;
for (i = 0; paths[i]; i++)
{
if (dconf_is_dir (paths[i], NULL))
{
g_debug ("Add defaults in dir %s", paths[i]);
add_dconf_dir_to_keyfile (defaults_data, client, paths[i], DCONF_READ_DEFAULT_VALUE);
g_debug ("Add locks in dir %s", paths[i]);
add_dconf_locks_to_list (locks_data, client, paths[i]);
}
else if (dconf_is_key (paths[i], NULL))
{
g_debug ("Add individual key %s", paths[i]);
add_dconf_key_to_keyfile (defaults_data, client, paths[i], DCONF_READ_DEFAULT_VALUE);
add_dconf_key_to_keyfile (values_data, client, paths[i], DCONF_READ_USER_VALUE);
}
else
{
g_warning ("Ignoring settings path '%s': neither dir nor key", paths[i]);
}
}
}
#endif
*defaults = g_key_file_to_data (defaults_data, defaults_size, NULL);
*values = g_key_file_to_data (values_data, values_size, NULL);
*locks_size = locks_data->len;
*locks = g_string_free (g_steal_pointer (&locks_data), FALSE);
#ifdef HAVE_DCONF
g_object_unref (client);
#endif
}
| 0
|
219,010
|
Status ConstantFolding::MaterializeShapes(const GraphProperties& properties) {
// We may add some nodes to the graph to encode control dependencies and hold
// the materialized shapes: there is no need to process these added nodes, so
// only iterate over the nodes of the input graph.
const int node_count = graph_->node_size();
for (int node_idx = 0; node_idx < node_count; ++node_idx) {
NodeDef* node = graph_->mutable_node(node_idx);
const string op = node->op();
if (op != "Shape" && op != "Size" && op != "Rank" && op != "ShapeN" &&
op != "TensorArraySizeV3") {
continue;
}
const std::vector<OpInfo::TensorProperties>& output =
properties.GetOutputProperties(node->name());
const std::vector<OpInfo::TensorProperties>& input =
properties.GetInputProperties(node->name());
if (input.empty() || output.empty()) {
continue;
}
if (op == "Shape" || op == "Size" || op == "Rank") {
CHECK_EQ(1, output.size());
CHECK_EQ(1, input.size());
const DataType type = output[0].dtype();
CHECK(type == DT_INT32 || type == DT_INT64);
const PartialTensorShape shape(input[0].shape());
if ((op != "Rank" && !shape.IsFullyDefined()) ||
(op == "Rank" && shape.unknown_rank())) {
continue;
}
Tensor constant_value(type);
if (!ConvertShapeToConstant(op, type, shape, &constant_value).ok()) {
continue;
}
// TODO(rmlarsen): Remove this workaround for b/150861569
// The bug involves an expression of the form Shape(ExpandDims(x)
// with an incorrectly inferred zero-size first dimension.
if (op == "Shape") {
if (shape.dims() > 0 && shape.dim_size(0) == 0) continue;
}
// Repurpose the existing node to be the constant.
// Device placement is preserved.
graph_modified_ = true;
node->set_op("Const");
EraseRegularNodeAttributes(node);
(*node->mutable_attr())["dtype"].set_type(type);
constant_value.AsProtoTensorContent(
(*node->mutable_attr())["value"].mutable_tensor());
// Turn the data input into a control dependency: this is needed to
// ensure that the constant value will only be run in the
// cases where the shape/rank/size would have been run in
// the original graph.
string ctrl_dep =
AddControlDependency(node->input(0), graph_, node_map_.get());
node_map_->UpdateInput(node->name(), node->input(0), ctrl_dep);
node->set_input(0, ctrl_dep);
// Done with the Shape/Size/Rank node, move to the next node.
continue;
}
if (op == "TensorArraySizeV3") {
const NodeDef* array = CHECK_NOTNULL(node_map_->GetNode(node->input(0)));
if (array->input_size() == 0 ||
(array->attr().count("dynamic_size") != 0 &&
array->attr().at("dynamic_size").b())) {
continue;
}
const NodeDef* array_size =
CHECK_NOTNULL(node_map_->GetNode(array->input(0)));
if (IsReallyConstant(*array_size)) {
// Don't materialize 0 sizes to avoid triggering incorrect static
// checks. A 0 sized array that can't grow isn't useful anyway.
if (array_size->attr().count("value") == 0) {
continue;
}
const TensorProto& raw_val = array_size->attr().at("value").tensor();
if (raw_val.dtype() != DT_INT32) {
continue;
}
Tensor value(raw_val.dtype(), raw_val.tensor_shape());
if (!value.FromProto(raw_val)) {
continue;
}
if (value.flat<int32>()(0) == 0) {
continue;
}
graph_modified_ = true;
node->set_op("Const");
*node->mutable_attr() = array_size->attr();
node->set_input(0, AsControlDependency(NodeName(node->input(0))));
node->set_input(1, AddControlDependency(NodeName(node->input(1)),
graph_, node_map_.get()));
}
continue;
}
// Handle ShapeN materialization case.
// It's possible that not all input tensors have known shapes.
CHECK_EQ(op, "ShapeN");
CHECK_EQ(input.size(), output.size());
const NodeDef* const shape_n_node = node;
for (int port_idx = 0, idx_limit = output.size(); port_idx < idx_limit;
++port_idx) {
const DataType type = output[port_idx].dtype();
CHECK(type == DT_INT32 || type == DT_INT64);
const PartialTensorShape shape(input[port_idx].shape());
if (!shape.IsFullyDefined()) {
continue;
}
Tensor constant_value(type);
auto status = ConvertShapeToConstant(op, type, shape, &constant_value);
if (!status.ok()) {
continue;
}
// We make a copy because we mutate the nodes.
auto fanouts = node_map_->GetOutputs(shape_n_node->name());
// Find all nodes consuming this shape and connect them through the new
// constant node instead.
for (NodeDef* output : fanouts) {
// Track whether there are any direct edges left between shape_n_node
// and this output node after the transformation.
bool direct_edges_exist = false;
for (int k = 0; k < output->input_size(); ++k) {
int port;
const string node_name = ParseNodeName(output->input(k), &port);
if (node_name == shape_n_node->name() && port == port_idx) {
// Create a const node as ShapeN's output if not already.
const string const_name = OptimizedNodeName(
*shape_n_node, strings::StrCat("-matshapes-", port_idx));
if (node_map_->GetNode(const_name) == nullptr) {
NodeDef* added_node = graph_->add_node();
added_node->set_name(const_name);
added_node->set_op("Const");
added_node->set_device(shape_n_node->device());
node_map_->AddNode(added_node->name(), added_node);
(*added_node->mutable_attr())["dtype"].set_type(type);
constant_value.AsProtoTensorContent(
(*added_node->mutable_attr())["value"].mutable_tensor());
// We add a control dependency to the original ShapeN node,
// so that the node will only be run if all inputs of the
// original ShapeN node are run.
string ctrl_dep = AddControlDependency(shape_n_node->name(),
graph_, node_map_.get());
*added_node->add_input() = ctrl_dep;
node_map_->AddOutput(NodeName(ctrl_dep), added_node->name());
}
*output->mutable_input(k) = const_name;
node_map_->AddOutput(const_name, output->name());
graph_modified_ = true;
}
if (node_name == shape_n_node->name() && port != port_idx) {
direct_edges_exist = true;
}
}
if (!direct_edges_exist) {
node_map_->RemoveOutput(node->name(), output->name());
}
}
}
}
return Status::OK();
}
| 0
|
336,005
|
static void sr9700_set_multicast(struct net_device *netdev)
{
struct usbnet *dev = netdev_priv(netdev);
/* We use the 20 byte dev->data for our 8 byte filter buffer
* to avoid allocating memory that is tricky to free later
*/
u8 *hashes = (u8 *)&dev->data;
/* rx_ctl setting : enable, disable_long, disable_crc */
u8 rx_ctl = RCR_RXEN | RCR_DIS_CRC | RCR_DIS_LONG;
memset(hashes, 0x00, SR_MCAST_SIZE);
/* broadcast address */
hashes[SR_MCAST_SIZE - 1] |= SR_MCAST_ADDR_FLAG;
if (netdev->flags & IFF_PROMISC) {
rx_ctl |= RCR_PRMSC;
} else if (netdev->flags & IFF_ALLMULTI ||
netdev_mc_count(netdev) > SR_MCAST_MAX) {
rx_ctl |= RCR_RUNT;
} else if (!netdev_mc_empty(netdev)) {
struct netdev_hw_addr *ha;
netdev_for_each_mc_addr(ha, netdev) {
u32 crc = ether_crc(ETH_ALEN, ha->addr) >> 26;
hashes[crc >> 3] |= 1 << (crc & 0x7);
}
}
sr_write_async(dev, SR_MAR, SR_MCAST_SIZE, hashes);
sr_write_reg_async(dev, SR_RCR, rx_ctl);
}
| 0
|
279,922
|
rename_buffer(char_u *new_fname)
{
char_u *fname, *sfname, *xfname;
buf_T *buf;
buf = curbuf;
apply_autocmds(EVENT_BUFFILEPRE, NULL, NULL, FALSE, curbuf);
// buffer changed, don't change name now
if (buf != curbuf)
return FAIL;
#ifdef FEAT_EVAL
if (aborting()) // autocmds may abort script processing
return FAIL;
#endif
/*
* The name of the current buffer will be changed.
* A new (unlisted) buffer entry needs to be made to hold the old file
* name, which will become the alternate file name.
* But don't set the alternate file name if the buffer didn't have a
* name.
*/
fname = curbuf->b_ffname;
sfname = curbuf->b_sfname;
xfname = curbuf->b_fname;
curbuf->b_ffname = NULL;
curbuf->b_sfname = NULL;
if (setfname(curbuf, new_fname, NULL, TRUE) == FAIL)
{
curbuf->b_ffname = fname;
curbuf->b_sfname = sfname;
return FAIL;
}
curbuf->b_flags |= BF_NOTEDITED;
if (xfname != NULL && *xfname != NUL)
{
buf = buflist_new(fname, xfname, curwin->w_cursor.lnum, 0);
if (buf != NULL && (cmdmod.cmod_flags & CMOD_KEEPALT) == 0)
curwin->w_alt_fnum = buf->b_fnum;
}
vim_free(fname);
vim_free(sfname);
apply_autocmds(EVENT_BUFFILEPOST, NULL, NULL, FALSE, curbuf);
// Change directories when the 'acd' option is set.
DO_AUTOCHDIR;
return OK;
}
| 0
|
294,501
|
date_s_gregorian_leap_p(VALUE klass, VALUE y)
{
VALUE nth;
int ry;
check_numeric(y, "year");
decode_year(y, -1, &nth, &ry);
return f_boolcast(c_gregorian_leap_p(ry));
}
| 0
|
253,639
|
smb2_close_file(const unsigned int xid, struct cifs_tcon *tcon,
struct cifs_fid *fid)
{
SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
}
| 0
|
417,099
|
mp_sint32 PlayerGeneric::getPanningSeparation() const
{
if (player)
return player->getPanningSeparation();
return panningSeparation;
}
| 0
|
481,268
|
static int mlx5_fpga_conn_post_recv(struct mlx5_fpga_conn *conn,
struct mlx5_fpga_dma_buf *buf)
{
struct mlx5_wqe_data_seg *data;
unsigned int ix;
int err = 0;
err = mlx5_fpga_conn_map_buf(conn, buf);
if (unlikely(err))
goto out;
if (unlikely(conn->qp.rq.pc - conn->qp.rq.cc >= conn->qp.rq.size)) {
mlx5_fpga_conn_unmap_buf(conn, buf);
return -EBUSY;
}
ix = conn->qp.rq.pc & (conn->qp.rq.size - 1);
data = mlx5_wq_cyc_get_wqe(&conn->qp.wq.rq, ix);
data->byte_count = cpu_to_be32(buf->sg[0].size);
data->lkey = cpu_to_be32(conn->fdev->conn_res.mkey.key);
data->addr = cpu_to_be64(buf->sg[0].dma_addr);
conn->qp.rq.pc++;
conn->qp.rq.bufs[ix] = buf;
/* Make sure that descriptors are written before doorbell record. */
dma_wmb();
*conn->qp.wq.rq.db = cpu_to_be32(conn->qp.rq.pc & 0xffff);
out:
return err;
}
| 0
|
218,854
|
ImmutableExecutorState::FrameInfo* ImmutableExecutorState::EnsureFrameInfo(
const string& fname) {
auto iter = frame_info_.find(fname);
if (iter != frame_info_.end()) {
return iter->second.get();
} else {
auto frame_info = absl::make_unique<FrameInfo>(fname);
absl::string_view fname_view = frame_info->name;
auto emplace_result =
frame_info_.emplace(fname_view, std::move(frame_info));
return emplace_result.first->second.get();
}
}
| 0
|
267,862
|
Status ModularFrameDecoder::DecodeGroup(const Rect& rect, BitReader* reader,
int minShift, int maxShift,
const ModularStreamId& stream,
bool zerofill,
PassesDecoderState* dec_state,
ImageBundle* output) {
JXL_DASSERT(stream.kind == ModularStreamId::kModularDC ||
stream.kind == ModularStreamId::kModularAC);
const size_t xsize = rect.xsize();
const size_t ysize = rect.ysize();
Image gi(xsize, ysize, full_image.bitdepth, 0);
// start at the first bigger-than-groupsize non-metachannel
size_t c = full_image.nb_meta_channels;
for (; c < full_image.channel.size(); c++) {
Channel& fc = full_image.channel[c];
if (fc.w > frame_dim.group_dim || fc.h > frame_dim.group_dim) break;
}
size_t beginc = c;
for (; c < full_image.channel.size(); c++) {
Channel& fc = full_image.channel[c];
int shift = std::min(fc.hshift, fc.vshift);
if (shift > maxShift) continue;
if (shift < minShift) continue;
Rect r(rect.x0() >> fc.hshift, rect.y0() >> fc.vshift,
rect.xsize() >> fc.hshift, rect.ysize() >> fc.vshift, fc.w, fc.h);
if (r.xsize() == 0 || r.ysize() == 0) continue;
if (zerofill && use_full_image) {
for (size_t y = 0; y < r.ysize(); ++y) {
pixel_type* const JXL_RESTRICT row_out = r.Row(&fc.plane, y);
memset(row_out, 0, r.xsize() * sizeof(*row_out));
}
} else {
Channel gc(r.xsize(), r.ysize());
if (zerofill) ZeroFillImage(&gc.plane);
gc.hshift = fc.hshift;
gc.vshift = fc.vshift;
gi.channel.emplace_back(std::move(gc));
}
}
if (zerofill && use_full_image) return true;
ModularOptions options;
if (!zerofill) {
if (!ModularGenericDecompress(
reader, gi, /*header=*/nullptr, stream.ID(frame_dim), &options,
/*undo_transforms=*/-1, &tree, &code, &context_map)) {
return JXL_FAILURE("Failed to decode modular group");
}
}
// Undo global transforms that have been pushed to the group level
if (!use_full_image) {
for (auto t : global_transform) {
JXL_RETURN_IF_ERROR(t.Inverse(gi, global_header.wp_header));
}
JXL_RETURN_IF_ERROR(ModularImageToDecodedRect(
gi, dec_state, nullptr, output, rect.Crop(dec_state->decoded)));
return true;
}
int gic = 0;
for (c = beginc; c < full_image.channel.size(); c++) {
Channel& fc = full_image.channel[c];
int shift = std::min(fc.hshift, fc.vshift);
if (shift > maxShift) continue;
if (shift < minShift) continue;
Rect r(rect.x0() >> fc.hshift, rect.y0() >> fc.vshift,
rect.xsize() >> fc.hshift, rect.ysize() >> fc.vshift, fc.w, fc.h);
if (r.xsize() == 0 || r.ysize() == 0) continue;
JXL_ASSERT(use_full_image);
CopyImageTo(/*rect_from=*/Rect(0, 0, r.xsize(), r.ysize()),
/*from=*/gi.channel[gic].plane,
/*rect_to=*/r, /*to=*/&fc.plane);
gic++;
}
return true;
}
| 0
|
369,878
|
static bool has_pid_permissions(struct pid_namespace *pid,
struct task_struct *task,
int hide_pid_min)
{
if (pid->hide_pid < hide_pid_min)
return true;
if (in_group_p(pid->pid_gid))
return true;
return ptrace_may_access(task, PTRACE_MODE_READ);
}
| 0
|
409,401
|
term_7to8bit(char_u *p)
{
if (*p == ESC)
{
if (p[1] == '[')
return CSI;
if (p[1] == ']')
return OSC;
if (p[1] == 'O')
return 0x8f;
}
return 0;
}
| 0
|
463,211
|
static int split_attribs(const char *data, int datalen,
struct buf *value, struct annotate_metadata *mdata)
{
unsigned long tmp; /* for alignment */
const char *tmps;
const char *end = data + datalen;
/* initialize metadata */
memset(mdata, 0, sizeof(struct annotate_metadata));
/* xxx sanity check the data? */
if (datalen <= 0)
return 1;
/*
* Sigh...this is dumb. We take care to be machine independent by
* storing the length in network byte order...but the size of the
* length field depends on whether we're running on a 32b or 64b
* platform.
*/
memcpy(&tmp, data, sizeof(unsigned long));
data += sizeof(unsigned long); /* skip to value */
buf_init_ro(value, data, ntohl(tmp));
/*
* In records written by older versions of Cyrus, there will be
* binary encoded content-type and modifiedsince values after the
* data. We don't care about those anymore, so we just ignore them
* and skip to the entry's metadata.
*/
tmps = data + ntohl(tmp) + 1; /* Skip zero-terminated value */
if (tmps < end) {
tmps += strlen(tmps) + 1; /* Skip zero-terminated content-type */
tmps += sizeof(unsigned long); /* Skip modifiedsince value */
}
if (tmps < end) {
/* make sure ntohll's input is correctly aligned */
modseq_t modseq;
memcpy(&modseq, tmps, sizeof(modseq));
mdata->modseq = ntohll(modseq);
tmps += sizeof(modseq_t);
}
if (tmps < end) {
mdata->flags = *tmps;
tmps++;
}
/* normalise deleted entries */
if (mdata->flags & ANNOTATE_FLAG_DELETED) {
buf_reset(value);
}
return 0;
}
| 0
|
195,308
|
setup_seccomp (FlatpakBwrap *bwrap,
const char *arch,
gulong allowed_personality,
FlatpakRunFlags run_flags,
GError **error)
{
gboolean multiarch = (run_flags & FLATPAK_RUN_FLAG_MULTIARCH) != 0;
gboolean devel = (run_flags & FLATPAK_RUN_FLAG_DEVEL) != 0;
__attribute__((cleanup (cleanup_seccomp))) scmp_filter_ctx seccomp = NULL;
/**** BEGIN NOTE ON CODE SHARING
*
* There are today a number of different Linux container
* implementations. That will likely continue for long into the
* future. But we can still try to share code, and it's important
* to do so because it affects what library and application writers
* can do, and we should support code portability between different
* container tools.
*
* This syscall blocklist is copied from linux-user-chroot, which was in turn
* clearly influenced by the Sandstorm.io blocklist.
*
* If you make any changes here, I suggest sending the changes along
* to other sandbox maintainers. Using the libseccomp list is also
* an appropriate venue:
* https://groups.google.com/forum/#!forum/libseccomp
*
* A non-exhaustive list of links to container tooling that might
* want to share this blocklist:
*
* https://github.com/sandstorm-io/sandstorm
* in src/sandstorm/supervisor.c++
* https://github.com/flatpak/flatpak.git
* in common/flatpak-run.c
* https://git.gnome.org/browse/linux-user-chroot
* in src/setup-seccomp.c
*
* Other useful resources:
* https://github.com/systemd/systemd/blob/HEAD/src/shared/seccomp-util.c
* https://github.com/moby/moby/blob/HEAD/profiles/seccomp/default.json
*
**** END NOTE ON CODE SHARING
*/
struct
{
int scall;
int errnum;
struct scmp_arg_cmp *arg;
} syscall_blocklist[] = {
/* Block dmesg */
{SCMP_SYS (syslog), EPERM},
/* Useless old syscall */
{SCMP_SYS (uselib), EPERM},
/* Don't allow disabling accounting */
{SCMP_SYS (acct), EPERM},
/* 16-bit code is unnecessary in the sandbox, and modify_ldt is a
historic source of interesting information leaks. */
{SCMP_SYS (modify_ldt), EPERM},
/* Don't allow reading current quota use */
{SCMP_SYS (quotactl), EPERM},
/* Don't allow access to the kernel keyring */
{SCMP_SYS (add_key), EPERM},
{SCMP_SYS (keyctl), EPERM},
{SCMP_SYS (request_key), EPERM},
/* Scary VM/NUMA ops */
{SCMP_SYS (move_pages), EPERM},
{SCMP_SYS (mbind), EPERM},
{SCMP_SYS (get_mempolicy), EPERM},
{SCMP_SYS (set_mempolicy), EPERM},
{SCMP_SYS (migrate_pages), EPERM},
/* Don't allow subnamespace setups: */
{SCMP_SYS (unshare), EPERM},
{SCMP_SYS (setns), EPERM},
{SCMP_SYS (mount), EPERM},
{SCMP_SYS (umount), EPERM},
{SCMP_SYS (umount2), EPERM},
{SCMP_SYS (pivot_root), EPERM},
#if defined(__s390__) || defined(__s390x__) || defined(__CRIS__)
/* Architectures with CONFIG_CLONE_BACKWARDS2: the child stack
* and flags arguments are reversed so the flags come second */
{SCMP_SYS (clone), EPERM, &SCMP_A1 (SCMP_CMP_MASKED_EQ, CLONE_NEWUSER, CLONE_NEWUSER)},
#else
/* Normally the flags come first */
{SCMP_SYS (clone), EPERM, &SCMP_A0 (SCMP_CMP_MASKED_EQ, CLONE_NEWUSER, CLONE_NEWUSER)},
#endif
/* Don't allow faking input to the controlling tty (CVE-2017-5226) */
{SCMP_SYS (ioctl), EPERM, &SCMP_A1 (SCMP_CMP_MASKED_EQ, 0xFFFFFFFFu, (int) TIOCSTI)},
/* seccomp can't look into clone3()'s struct clone_args to check whether
* the flags are OK, so we have no choice but to block clone3().
* Return ENOSYS so user-space will fall back to clone().
* (GHSA-67h7-w3jq-vh4q; see also https://github.com/moby/moby/commit/9f6b562d) */
{SCMP_SYS (clone3), ENOSYS},
/* New mount manipulation APIs can also change our VFS. There's no
* legitimate reason to do these in the sandbox, so block all of them
* rather than thinking about which ones might be dangerous.
* (GHSA-67h7-w3jq-vh4q) */
{SCMP_SYS (open_tree), ENOSYS},
{SCMP_SYS (move_mount), ENOSYS},
{SCMP_SYS (fsopen), ENOSYS},
{SCMP_SYS (fsconfig), ENOSYS},
{SCMP_SYS (fsmount), ENOSYS},
{SCMP_SYS (fspick), ENOSYS},
{SCMP_SYS (mount_setattr), ENOSYS},
};
struct
{
int scall;
int errnum;
struct scmp_arg_cmp *arg;
} syscall_nondevel_blocklist[] = {
/* Profiling operations; we expect these to be done by tools from outside
* the sandbox. In particular perf has been the source of many CVEs.
*/
{SCMP_SYS (perf_event_open), EPERM},
/* Don't allow you to switch to bsd emulation or whatnot */
{SCMP_SYS (personality), EPERM, &SCMP_A0 (SCMP_CMP_NE, allowed_personality)},
{SCMP_SYS (ptrace), EPERM}
};
/* Blocklist all but unix, inet, inet6 and netlink */
struct
{
int family;
FlatpakRunFlags flags_mask;
} socket_family_allowlist[] = {
/* NOTE: Keep in numerical order */
{ AF_UNSPEC, 0 },
{ AF_LOCAL, 0 },
{ AF_INET, 0 },
{ AF_INET6, 0 },
{ AF_NETLINK, 0 },
{ AF_CAN, FLATPAK_RUN_FLAG_CANBUS },
{ AF_BLUETOOTH, FLATPAK_RUN_FLAG_BLUETOOTH },
};
int last_allowed_family;
int i, r;
g_auto(GLnxTmpfile) seccomp_tmpf = { 0, };
seccomp = seccomp_init (SCMP_ACT_ALLOW);
if (!seccomp)
return flatpak_fail_error (error, FLATPAK_ERROR_SETUP_FAILED, _("Initialize seccomp failed"));
if (arch != NULL)
{
uint32_t arch_id = 0;
const uint32_t *extra_arches = NULL;
if (strcmp (arch, "i386") == 0)
{
arch_id = SCMP_ARCH_X86;
}
else if (strcmp (arch, "x86_64") == 0)
{
arch_id = SCMP_ARCH_X86_64;
extra_arches = seccomp_x86_64_extra_arches;
}
else if (strcmp (arch, "arm") == 0)
{
arch_id = SCMP_ARCH_ARM;
}
#ifdef SCMP_ARCH_AARCH64
else if (strcmp (arch, "aarch64") == 0)
{
arch_id = SCMP_ARCH_AARCH64;
extra_arches = seccomp_aarch64_extra_arches;
}
#endif
/* We only really need to handle arches on multiarch systems.
* If only one arch is supported the default is fine */
if (arch_id != 0)
{
/* This *adds* the target arch, instead of replacing the
native one. This is not ideal, because we'd like to only
allow the target arch, but we can't really disallow the
native arch at this point, because then bubblewrap
couldn't continue running. */
r = seccomp_arch_add (seccomp, arch_id);
if (r < 0 && r != -EEXIST)
return flatpak_fail_error (error, FLATPAK_ERROR_SETUP_FAILED, _("Failed to add architecture to seccomp filter"));
if (multiarch && extra_arches != NULL)
{
for (i = 0; extra_arches[i] != 0; i++)
{
r = seccomp_arch_add (seccomp, extra_arches[i]);
if (r < 0 && r != -EEXIST)
return flatpak_fail_error (error, FLATPAK_ERROR_SETUP_FAILED, _("Failed to add multiarch architecture to seccomp filter"));
}
}
}
}
/* TODO: Should we filter the kernel keyring syscalls in some way?
* We do want them to be used by desktop apps, but they could also perhaps
* leak system stuff or secrets from other apps.
*/
for (i = 0; i < G_N_ELEMENTS (syscall_blocklist); i++)
{
int scall = syscall_blocklist[i].scall;
int errnum = syscall_blocklist[i].errnum;
g_return_val_if_fail (errnum == EPERM || errnum == ENOSYS, FALSE);
if (syscall_blocklist[i].arg)
r = seccomp_rule_add (seccomp, SCMP_ACT_ERRNO (errnum), scall, 1, *syscall_blocklist[i].arg);
else
r = seccomp_rule_add (seccomp, SCMP_ACT_ERRNO (errnum), scall, 0);
if (r < 0 && r == -EFAULT /* unknown syscall */)
return flatpak_fail_error (error, FLATPAK_ERROR_SETUP_FAILED, _("Failed to block syscall %d"), scall);
}
if (!devel)
{
for (i = 0; i < G_N_ELEMENTS (syscall_nondevel_blocklist); i++)
{
int scall = syscall_nondevel_blocklist[i].scall;
int errnum = syscall_nondevel_blocklist[i].errnum;
g_return_val_if_fail (errnum == EPERM || errnum == ENOSYS, FALSE);
if (syscall_nondevel_blocklist[i].arg)
r = seccomp_rule_add (seccomp, SCMP_ACT_ERRNO (errnum), scall, 1, *syscall_nondevel_blocklist[i].arg);
else
r = seccomp_rule_add (seccomp, SCMP_ACT_ERRNO (errnum), scall, 0);
if (r < 0 && r == -EFAULT /* unknown syscall */)
return flatpak_fail_error (error, FLATPAK_ERROR_SETUP_FAILED, _("Failed to block syscall %d"), scall);
}
}
/* Socket filtering doesn't work on e.g. i386, so ignore failures here
* However, we need to user seccomp_rule_add_exact to avoid libseccomp doing
* something else: https://github.com/seccomp/libseccomp/issues/8 */
last_allowed_family = -1;
for (i = 0; i < G_N_ELEMENTS (socket_family_allowlist); i++)
{
int family = socket_family_allowlist[i].family;
int disallowed;
if (socket_family_allowlist[i].flags_mask != 0 &&
(socket_family_allowlist[i].flags_mask & run_flags) != socket_family_allowlist[i].flags_mask)
continue;
for (disallowed = last_allowed_family + 1; disallowed < family; disallowed++)
{
/* Blocklist the in-between valid families */
seccomp_rule_add_exact (seccomp, SCMP_ACT_ERRNO (EAFNOSUPPORT), SCMP_SYS (socket), 1, SCMP_A0 (SCMP_CMP_EQ, disallowed));
}
last_allowed_family = family;
}
/* Blocklist the rest */
seccomp_rule_add_exact (seccomp, SCMP_ACT_ERRNO (EAFNOSUPPORT), SCMP_SYS (socket), 1, SCMP_A0 (SCMP_CMP_GE, last_allowed_family + 1));
if (!glnx_open_anonymous_tmpfile_full (O_RDWR | O_CLOEXEC, "/tmp", &seccomp_tmpf, error))
return FALSE;
if (seccomp_export_bpf (seccomp, seccomp_tmpf.fd) != 0)
return flatpak_fail_error (error, FLATPAK_ERROR_SETUP_FAILED, _("Failed to export bpf"));
lseek (seccomp_tmpf.fd, 0, SEEK_SET);
flatpak_bwrap_add_args_data_fd (bwrap,
"--seccomp", glnx_steal_fd (&seccomp_tmpf.fd), NULL);
return TRUE;
}
| 1
|
357,663
|
void SQClass::Finalize() {
_attributes.Null();
_NULL_SQOBJECT_VECTOR(_defaultvalues,_defaultvalues.size());
_methods.resize(0);
_NULL_SQOBJECT_VECTOR(_metamethods,MT_LAST);
__ObjRelease(_members);
if(_base) {
__ObjRelease(_base);
}
}
| 0
|
455,422
|
xfs_inode_free_quota_cowblocks(
struct xfs_inode *ip)
{
return __xfs_inode_free_quota_eofblocks(ip, xfs_icache_free_cowblocks);
}
| 0
|
247,685
|
const std::string& expectedPeerSubject() const { return expected_peer_subject_; }
| 0
|
212,818
|
static pj_status_t decode_errcode_attr(pj_pool_t *pool,
const pj_uint8_t *buf,
const pj_stun_msg_hdr *msghdr,
void **p_attr)
{
pj_stun_errcode_attr *attr;
pj_str_t value;
PJ_UNUSED_ARG(msghdr);
/* Create the attribute */
attr = PJ_POOL_ZALLOC_T(pool, pj_stun_errcode_attr);
GETATTRHDR(buf, &attr->hdr);
attr->err_code = buf[6] * 100 + buf[7];
/* Get pointer to the string in the message */
value.ptr = ((char*)buf + ATTR_HDR_LEN + 4);
value.slen = attr->hdr.length - 4;
/* Copy the string to the attribute */
pj_strdup(pool, &attr->reason, &value);
/* Done */
*p_attr = attr;
return PJ_SUCCESS;
}
| 1
|
413,622
|
R_API char *r_core_anal_fcn_autoname(RCore *core, ut64 addr, int dump, int mode) {
RAnalFunction *fcn = r_anal_get_fcn_in (core->anal, addr, 0);
return fcn? anal_fcn_autoname (core, fcn, dump, mode): NULL;
}
| 0
|
229,295
|
void cql_server::response::write_inet(socket_address inet)
{
auto addr = inet.addr();
write_byte(uint8_t(addr.size()));
auto * p = static_cast<const int8_t*>(addr.data());
_body.write(bytes_view(p, addr.size()));
write_int(inet.port());
}
| 0
|
229,337
|
Status GetDeviceForInput(const EagerOperation& op, const EagerContext& ctx,
TensorHandle* tensor_handle, Device** result) {
Device* cpu_device = ctx.HostCPU();
string device_name;
if (tensor_handle->Type() != TensorHandle::LOCAL) {
Device* device = tensor_handle->device();
device_name = device != nullptr ? device->name() : cpu_device->name();
*result = (device == nullptr ? cpu_device : device);
} else if (tensor_handle->dtype == DT_RESOURCE) {
// Use the resource's actual device because it is the device that will
// influence partitioning the multi-device function.
const Tensor* tensor;
// TODO(fishx): Avoid blocking here.
TF_RETURN_IF_ERROR(tensor_handle->Tensor(&tensor));
if (tensor->NumElements() == 0) {
return errors::InvalidArgument("Empty resource handle");
}
const ResourceHandle& handle = tensor->flat<ResourceHandle>()(0);
device_name = handle.device();
Device* input_device;
TF_RETURN_IF_ERROR(
ctx.FindDeviceFromName(device_name.c_str(), &input_device));
*result = input_device;
} else {
Device* device = tensor_handle->device();
const bool is_tpu = device != nullptr && device->device_type() == "TPU";
// int32 return values can be placed on TPUs.
const bool use_host_memory =
is_tpu ? MTypeFromDTypeIntsOnDevice(tensor_handle->dtype)
: MTypeFromDType(tensor_handle->dtype);
if (use_host_memory) {
*result = cpu_device;
} else {
// Eager ops executing as functions should have their preferred inputs set
// to the op's device. This allows us to avoid expensive D2H copies if a
// mirror of the tensor already exists on the op's device.
if (!op.is_function() && device != nullptr && device != cpu_device) {
device = absl::get<Device*>(op.Device());
}
*result = (device == nullptr ? cpu_device : device);
}
}
return Status::OK();
}
| 0
|
222,562
|
bool RegisterOp(const string& op, Creator func) {
CHECK(GetOpGradFactory()->insert({op, func}).second)
<< "Duplicated gradient for " << op;
return true;
}
| 0
|
244,316
|
static u32 ctrn_ctts_to_index(GF_TrackFragmentRunBox *ctrn, s32 ctts)
{
if (!(ctrn->flags & GF_ISOM_TRUN_CTS_OFFSET))
return 0;
if (!ctts) return 0;
if (ctrn->version) {
if (ctrn->ctso_multiplier) return ctrn_s32_to_index(ctts / ctrn->ctso_multiplier);
return ctrn_s32_to_index(ctts);
}
assert(ctts>0);
if (ctrn->ctso_multiplier) return ctrn_u32_to_index((u32)ctts / ctrn->ctso_multiplier);
return ctrn_s32_to_index((u32)ctts);
}
| 0
|
225,876
|
GF_Err urn_box_size(GF_Box *s)
{
GF_DataEntryURNBox *ptr = (GF_DataEntryURNBox *)s;
if ( !(ptr->flags & 1)) {
if (ptr->nameURN) ptr->size += 1 + strlen(ptr->nameURN);
if (ptr->location) ptr->size += 1 + strlen(ptr->location);
}
return GF_OK;
}
| 0
|
344,750
|
hpdelim2(char **cp, char *delim)
{
char *s, *old;
if (cp == NULL || *cp == NULL)
return NULL;
old = s = *cp;
if (*s == '[') {
if ((s = strchr(s, ']')) == NULL)
return NULL;
else
s++;
} else if ((s = strpbrk(s, ":/")) == NULL)
s = *cp + strlen(*cp); /* skip to end (see first case below) */
switch (*s) {
case '\0':
*cp = NULL; /* no more fields*/
break;
case ':':
case '/':
if (delim != NULL)
*delim = *s;
*s = '\0'; /* terminate */
*cp = s + 1;
break;
default:
return NULL;
}
return old;
}
| 0
|
384,292
|
gs_malloc_release(gs_memory_t *mem)
{
gs_malloc_memory_t * malloc_memory_default;
if (mem == NULL)
return;
/* Use gs_debug['a'] if gs_debug[':'] is set to dump the heap stats */
if (gs_debug[':']) {
void *temp;
char save_debug_a = gs_debug['a'];
gs_debug['a'] = 1;
temp = (char *)gs_alloc_bytes_immovable(mem, 8, "gs_malloc_release");
gs_debug['a'] = save_debug_a;
gs_free_object(mem, temp, "gs_malloc_release");
}
#ifdef USE_RETRY_MEMORY_WRAPPER
malloc_memory_default = gs_malloc_unwrap(mem);
#else
malloc_memory_default = (gs_malloc_memory_t *)mem;
#endif
gs_lib_ctx_fin((gs_memory_t *)malloc_memory_default);
gs_malloc_memory_release(malloc_memory_default);
}
| 0
|
90,156
|
void Init() {
VLOG(1) << "Getting initial CrOS network info.";
UpdateSystemInfo();
}
| 0
|
482,682
|
flx_decode_brun (GstFlxDec * flxdec, guchar * data, guchar * dest)
{
gulong count, lines, row;
guchar x;
g_return_val_if_fail (flxdec != NULL, FALSE);
lines = flxdec->hdr.height;
while (lines--) {
/* packet count.
* should not be used anymore, since the flc format can
* contain more then 255 RLE packets. we use the frame
* width instead.
*/
data++;
row = flxdec->hdr.width;
while (row) {
count = *data++;
if (count > 0x7f) {
/* literal run */
count = 0x100 - count;
if ((glong) row - (glong) count < 0) {
GST_ERROR_OBJECT (flxdec, "Invalid BRUN packet detected.");
return FALSE;
}
row -= count;
while (count--)
*dest++ = *data++;
} else {
if ((glong) row - (glong) count < 0) {
GST_ERROR_OBJECT (flxdec, "Invalid BRUN packet detected.");
return FALSE;
}
/* replicate run */
row -= count;
x = *data++;
while (count--)
*dest++ = x;
}
}
}
return TRUE;
}
| 0
|
338,237
|
bool WasmBinaryBuilder::maybeVisitSIMDExtract(Expression*& out, uint32_t code) {
SIMDExtract* curr;
switch (code) {
case BinaryConsts::I8x16ExtractLaneS:
curr = allocator.alloc<SIMDExtract>();
curr->op = ExtractLaneSVecI8x16;
curr->index = getLaneIndex(16);
break;
case BinaryConsts::I8x16ExtractLaneU:
curr = allocator.alloc<SIMDExtract>();
curr->op = ExtractLaneUVecI8x16;
curr->index = getLaneIndex(16);
break;
case BinaryConsts::I16x8ExtractLaneS:
curr = allocator.alloc<SIMDExtract>();
curr->op = ExtractLaneSVecI16x8;
curr->index = getLaneIndex(8);
break;
case BinaryConsts::I16x8ExtractLaneU:
curr = allocator.alloc<SIMDExtract>();
curr->op = ExtractLaneUVecI16x8;
curr->index = getLaneIndex(8);
break;
case BinaryConsts::I32x4ExtractLane:
curr = allocator.alloc<SIMDExtract>();
curr->op = ExtractLaneVecI32x4;
curr->index = getLaneIndex(4);
break;
case BinaryConsts::I64x2ExtractLane:
curr = allocator.alloc<SIMDExtract>();
curr->op = ExtractLaneVecI64x2;
curr->index = getLaneIndex(2);
break;
case BinaryConsts::F32x4ExtractLane:
curr = allocator.alloc<SIMDExtract>();
curr->op = ExtractLaneVecF32x4;
curr->index = getLaneIndex(4);
break;
case BinaryConsts::F64x2ExtractLane:
curr = allocator.alloc<SIMDExtract>();
curr->op = ExtractLaneVecF64x2;
curr->index = getLaneIndex(2);
break;
default:
return false;
}
curr->vec = popNonVoidExpression();
curr->finalize();
out = curr;
return true;
}
| 0
|
265,457
|
static int sqfs_tokenize(char **tokens, int count, const char *str)
{
int i, j, ret = 0;
char *aux, *strc;
strc = strdup(str);
if (!strc)
return -ENOMEM;
if (!strcmp(strc, "/")) {
tokens[0] = strdup(strc);
if (!tokens[0]) {
ret = -ENOMEM;
goto free_strc;
}
} else {
for (j = 0; j < count; j++) {
aux = strtok(!j ? strc : NULL, "/");
tokens[j] = strdup(aux);
if (!tokens[j]) {
for (i = 0; i < j; i++)
free(tokens[i]);
ret = -ENOMEM;
goto free_strc;
}
}
}
free_strc:
free(strc);
return ret;
}
| 0
|
421,391
|
void jsC_dumpfunction(js_State *J, js_Function *F)
{
js_Instruction *p = F->code;
js_Instruction *end = F->code + F->codelen;
char *s;
double n;
int i;
minify = 0;
printf("%s(%d)\n", F->name, F->numparams);
if (F->strict) printf("\tstrict\n");
if (F->lightweight) printf("\tlightweight\n");
if (F->arguments) printf("\targuments\n");
printf("\tsource %s:%d\n", F->filename, F->line);
for (i = 0; i < F->funlen; ++i)
printf("\tfunction %d %s\n", i, F->funtab[i]->name);
for (i = 0; i < F->varlen; ++i)
printf("\tlocal %d %s\n", i + 1, F->vartab[i]);
printf("{\n");
while (p < end) {
int ln = *p++;
int c = *p++;
printf("%5d(%3d): ", (int)(p - F->code) - 2, ln);
ps(opname[c]);
switch (c) {
case OP_INTEGER:
printf(" %ld", (long)((*p++) - 32768));
break;
case OP_NUMBER:
memcpy(&n, p, sizeof(n));
p += sizeof(n) / sizeof(*p);
printf(" %.9g", n);
break;
case OP_STRING:
memcpy(&s, p, sizeof(s));
p += sizeof(s) / sizeof(*p);
pc(' ');
pstr(s);
break;
case OP_NEWREGEXP:
pc(' ');
memcpy(&s, p, sizeof(s));
p += sizeof(s) / sizeof(*p);
pregexp(s, *p++);
break;
case OP_GETVAR:
case OP_HASVAR:
case OP_SETVAR:
case OP_DELVAR:
case OP_GETPROP_S:
case OP_SETPROP_S:
case OP_DELPROP_S:
case OP_CATCH:
memcpy(&s, p, sizeof(s));
p += sizeof(s) / sizeof(*p);
pc(' ');
ps(s);
break;
case OP_GETLOCAL:
case OP_SETLOCAL:
case OP_DELLOCAL:
printf(" %s", F->vartab[*p++ - 1]);
break;
case OP_CLOSURE:
case OP_CALL:
case OP_NEW:
case OP_JUMP:
case OP_JTRUE:
case OP_JFALSE:
case OP_JCASE:
case OP_TRY:
printf(" %ld", (long)*p++);
break;
}
nl();
}
printf("}\n");
for (i = 0; i < F->funlen; ++i) {
if (F->funtab[i] != F) {
printf("function %d ", i);
jsC_dumpfunction(J, F->funtab[i]);
}
}
}
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.