idx
int64 | func
string | target
int64 |
|---|---|---|
336,531
|
SPICE_GNUC_VISIBLE int spice_server_init(SpiceServer *reds, SpiceCoreInterface *core)
{
int ret;
ret = do_spice_init(reds, core);
if (reds->config->renderers->len == 0) {
reds_add_renderer(reds, default_renderer);
}
if (reds->config->video_codecs->len == 0) {
reds_set_video_codecs_from_string(reds, default_video_codecs, NULL);
}
return ret;
}
| 0
|
395,092
|
redraw_buf_and_status_later(buf_T *buf, int type)
{
win_T *wp;
#ifdef FEAT_WILDMENU
if (wild_menu_showing != 0)
// Don't redraw while the command line completion is displayed, it
// would disappear.
return;
#endif
FOR_ALL_WINDOWS(wp)
{
if (wp->w_buffer == buf)
{
redraw_win_later(wp, type);
wp->w_redr_status = TRUE;
}
}
}
| 0
|
202,888
|
int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
{
u8 *tail;
int nfrags;
int esph_offset;
struct page *page;
struct sk_buff *trailer;
int tailen = esp->tailen;
/* this is non-NULL only with TCP/UDP Encapsulation */
if (x->encap) {
int err = esp_output_encap(x, skb, esp);
if (err < 0)
return err;
}
if (!skb_cloned(skb)) {
if (tailen <= skb_tailroom(skb)) {
nfrags = 1;
trailer = skb;
tail = skb_tail_pointer(trailer);
goto skip_cow;
} else if ((skb_shinfo(skb)->nr_frags < MAX_SKB_FRAGS)
&& !skb_has_frag_list(skb)) {
int allocsize;
struct sock *sk = skb->sk;
struct page_frag *pfrag = &x->xfrag;
esp->inplace = false;
allocsize = ALIGN(tailen, L1_CACHE_BYTES);
spin_lock_bh(&x->lock);
if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
spin_unlock_bh(&x->lock);
goto cow;
}
page = pfrag->page;
get_page(page);
tail = page_address(page) + pfrag->offset;
esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
nfrags = skb_shinfo(skb)->nr_frags;
__skb_fill_page_desc(skb, nfrags, page, pfrag->offset,
tailen);
skb_shinfo(skb)->nr_frags = ++nfrags;
pfrag->offset = pfrag->offset + allocsize;
spin_unlock_bh(&x->lock);
nfrags++;
skb->len += tailen;
skb->data_len += tailen;
skb->truesize += tailen;
if (sk && sk_fullsock(sk))
refcount_add(tailen, &sk->sk_wmem_alloc);
goto out;
}
}
cow:
esph_offset = (unsigned char *)esp->esph - skb_transport_header(skb);
nfrags = skb_cow_data(skb, tailen, &trailer);
if (nfrags < 0)
goto out;
tail = skb_tail_pointer(trailer);
esp->esph = (struct ip_esp_hdr *)(skb_transport_header(skb) + esph_offset);
skip_cow:
esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
pskb_put(skb, trailer, tailen);
out:
return nfrags;
}
| 1
|
253,727
|
static void ccp_process_data(struct ccp_data *src, struct ccp_data *dst,
struct ccp_op *op)
{
op->init = 0;
if (dst) {
if (op->dst.u.dma.address == dst->dm_wa.dma.address)
ccp_empty_queue_buf(dst);
else
ccp_update_sg_workarea(&dst->sg_wa,
op->dst.u.dma.length);
}
}
| 0
|
484,054
|
teardown_secureChannel(void) {
UA_SecureChannel_close(&testChannel);
dummyPolicy.clear(&dummyPolicy);
testingConnection.close(&testingConnection);
}
| 0
|
213,998
|
FindEmptyObjectSlot(
TPMI_DH_OBJECT *handle // OUT: (optional)
)
{
UINT32 i;
OBJECT *object;
for(i = 0; i < MAX_LOADED_OBJECTS; i++)
{
object = &s_objects[i];
if(object->attributes.occupied == CLEAR)
{
if(handle)
*handle = i + TRANSIENT_FIRST;
// Initialize the object attributes
MemorySet(&object->attributes, 0, sizeof(OBJECT_ATTRIBUTES));
return object;
}
}
return NULL;
}
| 1
|
380,954
|
ins_shift(int c, int lastc)
{
if (stop_arrow() == FAIL)
return;
AppendCharToRedobuff(c);
/*
* 0^D and ^^D: remove all indent.
*/
if (c == Ctrl_D && (lastc == '0' || lastc == '^')
&& curwin->w_cursor.col > 0)
{
--curwin->w_cursor.col;
(void)del_char(FALSE); // delete the '^' or '0'
// In Replace mode, restore the characters that '^' or '0' replaced.
if (State & REPLACE_FLAG)
replace_pop_ins();
if (lastc == '^')
old_indent = get_indent(); // remember curr. indent
change_indent(INDENT_SET, 0, TRUE, 0, TRUE);
}
else
change_indent(c == Ctrl_D ? INDENT_DEC : INDENT_INC, 0, TRUE, 0, TRUE);
if (did_ai && *skipwhite(ml_get_curline()) != NUL)
did_ai = FALSE;
did_si = FALSE;
can_si = FALSE;
can_si_back = FALSE;
can_cindent = FALSE; // no cindenting after ^D or ^T
}
| 0
|
215,921
|
bmexec_trans (kwset_t kwset, char const *text, size_t size)
{
unsigned char const *d1;
char const *ep, *sp, *tp;
int d;
int len = kwset->mind;
char const *trans = kwset->trans;
if (len == 0)
return 0;
if (len > size)
return -1;
if (len == 1)
{
tp = memchr_kwset (text, size, kwset);
return tp ? tp - text : -1;
}
d1 = kwset->delta;
sp = kwset->target + len;
tp = text + len;
char gc1 = kwset->gc1;
char gc2 = kwset->gc2;
/* Significance of 12: 1 (initial offset) + 10 (skip loop) + 1 (md2). */
if (size > 12 * len)
/* 11 is not a bug, the initial offset happens only once. */
for (ep = text + size - 11 * len; tp <= ep; )
{
char const *tp0 = tp;
d = d1[U(tp[-1])], tp += d;
d = d1[U(tp[-1])], tp += d;
if (d != 0)
{
d = d1[U(tp[-1])], tp += d;
d = d1[U(tp[-1])], tp += d;
d = d1[U(tp[-1])], tp += d;
if (d != 0)
{
d = d1[U(tp[-1])], tp += d;
d = d1[U(tp[-1])], tp += d;
d = d1[U(tp[-1])], tp += d;
if (d != 0)
{
d = d1[U(tp[-1])], tp += d;
d = d1[U(tp[-1])], tp += d;
/* As a heuristic, prefer memchr to seeking by
delta1 when the latter doesn't advance much. */
int advance_heuristic = 16 * sizeof (long);
if (advance_heuristic <= tp - tp0)
goto big_advance;
tp--;
tp = memchr_kwset (tp, text + size - tp, kwset);
if (! tp)
return -1;
tp++;
}
}
}
if (bm_delta2_search (&tp, ep, sp, len, trans, gc1, gc2, d1, kwset))
return tp - text;
big_advance:;
}
/* Now we have only a few characters left to search. We
carefully avoid ever producing an out-of-bounds pointer. */
ep = text + size;
d = d1[U(tp[-1])];
while (d <= ep - tp)
{
d = d1[U((tp += d)[-1])];
if (d != 0)
continue;
if (bm_delta2_search (&tp, ep, sp, len, trans, gc1, gc2, NULL, kwset))
return tp - text;
}
return -1;
}
| 1
|
409,484
|
term_fg_rgb_color(guicolor_T rgb)
{
term_rgb_color(T_8F, rgb);
}
| 0
|
206,555
|
static int dynamicGetbuf (gdIOCtxPtr ctx, void *buf, int len)
{
int rlen, remain;
dpIOCtxPtr dctx;
dynamicPtr *dp;
dctx = (dpIOCtxPtr) ctx;
dp = dctx->dp;
remain = dp->logicalSize - dp->pos;
if (remain >= len) {
rlen = len;
} else {
if (remain == 0) {
return EOF;
}
rlen = remain;
}
memcpy(buf, (void *) ((char *) dp->data + dp->pos), rlen);
dp->pos += rlen;
return rlen;
}
| 1
|
336,685
|
static void reds_handle_read_header_done(void *opaque)
{
RedLinkInfo *link = (RedLinkInfo *)opaque;
SpiceLinkHeader *header = &link->link_header;
header->major_version = GUINT32_FROM_LE(header->major_version);
header->minor_version = GUINT32_FROM_LE(header->minor_version);
header->size = GUINT32_FROM_LE(header->size);
if (header->major_version != SPICE_VERSION_MAJOR) {
if (header->major_version > 0) {
reds_send_link_error(link, SPICE_LINK_ERR_VERSION_MISMATCH);
}
spice_warning("version mismatch");
reds_link_free(link);
return;
}
/* the check for 4096 is to avoid clients to cause arbitrary big memory allocations */
if (header->size < sizeof(SpiceLinkMess) || header->size > 4096) {
reds_send_link_error(link, SPICE_LINK_ERR_INVALID_DATA);
spice_warning("bad size %u", header->size);
reds_link_free(link);
return;
}
link->link_mess = (SpiceLinkMess*) g_malloc(header->size);
red_stream_async_read(link->stream,
(uint8_t *)link->link_mess,
header->size,
reds_handle_read_link_done,
link);
}
| 0
|
459,111
|
static int __tcf_qdisc_cl_find(struct Qdisc *q, u32 parent, unsigned long *cl,
int ifindex, struct netlink_ext_ack *extack)
{
if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
return 0;
/* Do we search for filter, attached to class? */
if (TC_H_MIN(parent)) {
const struct Qdisc_class_ops *cops = q->ops->cl_ops;
*cl = cops->find(q, parent);
if (*cl == 0) {
NL_SET_ERR_MSG(extack, "Specified class doesn't exist");
return -ENOENT;
}
}
return 0;
}
| 0
|
300,782
|
static u16 tsk_blocks(int len)
{
return ((len / FLOWCTL_BLK_SZ) + 1);
}
| 0
|
253,594
|
void close_cached_dir_lease_locked(struct cached_fid *cfid)
{
if (cfid->has_lease) {
cfid->has_lease = false;
kref_put(&cfid->refcount, smb2_close_cached_fid);
}
}
| 0
|
505,651
|
smtp_command_parse_finish_data(struct smtp_command_parser *parser)
{
const unsigned char *data;
size_t size;
int ret;
parser->error_code = SMTP_COMMAND_PARSE_ERROR_NONE;
parser->error = NULL;
if (parser->data == NULL)
return 1;
if (parser->data->eof) {
i_stream_unref(&parser->data);
return 1;
}
while ((ret = i_stream_read_data(parser->data, &data, &size, 0)) > 0)
i_stream_skip(parser->data, size);
if (ret == 0 || parser->data->stream_errno != 0) {
switch (parser->data->stream_errno) {
case 0:
return 0;
case EIO:
smtp_command_parser_error(parser,
SMTP_COMMAND_PARSE_ERROR_BROKEN_COMMAND,
"Invalid command data");
break;
case EMSGSIZE:
smtp_command_parser_error(parser,
SMTP_COMMAND_PARSE_ERROR_DATA_TOO_LARGE,
"Command data too large");
break;
default:
smtp_command_parser_error(parser,
SMTP_COMMAND_PARSE_ERROR_BROKEN_STREAM,
"Stream error while skipping command data: "
"%s", i_stream_get_error(parser->data));
}
return -1;
}
i_stream_unref(&parser->data);
return 1;
}
| 0
|
274,652
|
static void show_no_layers_warning (void) {
gchar *str = g_new(gchar, MAX_DISTLEN);
utf8_strncpy(str, _("No layers are currently loaded. A layer must be loaded first."), MAX_DISTLEN - 7);
utf8_snprintf(screen.statusbar.diststr, MAX_DISTLEN, "<b>%s</b>", str);
g_free(str);
callbacks_update_statusbar();
}
| 0
|
386,484
|
bool DL_Dxf::handleXData(DL_CreationInterface* creationInterface) {
if (groupCode==1001) {
creationInterface->addXDataApp(groupValue);
return true;
}
else if (groupCode>=1000 && groupCode<=1009) {
creationInterface->addXDataString(groupCode, groupValue);
return true;
}
else if (groupCode>=1010 && groupCode<=1059) {
creationInterface->addXDataReal(groupCode, toReal(groupValue));
return true;
}
else if (groupCode>=1060 && groupCode<=1070) {
creationInterface->addXDataInt(groupCode, toInt(groupValue));
return true;
}
else if (groupCode==1071) {
creationInterface->addXDataInt(groupCode, toInt(groupValue));
return true;
}
return false;
}
| 0
|
225,467
|
bool CanDedupControlWithRegularInput(const MutableGraphView& graph,
absl::string_view control_node_name) {
NodeDef* control_node = graph.GetNode(control_node_name);
if (control_node == nullptr) {
return false;
}
return CanDedupControlWithRegularInput(graph, *control_node);
}
| 0
|
235,654
|
PJ_DEF(void) pj_dns_packet_dup(pj_pool_t *pool,
const pj_dns_parsed_packet*p,
unsigned options,
pj_dns_parsed_packet **p_dst)
{
pj_dns_parsed_packet *dst;
unsigned nametable_count = 0;
#if PJ_DNS_MAX_NAMES_IN_NAMETABLE
pj_str_t nametable[PJ_DNS_MAX_NAMES_IN_NAMETABLE];
#else
pj_str_t *nametable = NULL;
#endif
unsigned i;
PJ_ASSERT_ON_FAIL(pool && p && p_dst, return);
/* Create packet and copy header */
*p_dst = dst = PJ_POOL_ZALLOC_T(pool, pj_dns_parsed_packet);
pj_memcpy(&dst->hdr, &p->hdr, sizeof(p->hdr));
/* Initialize section counts in the target packet to zero.
* If memory allocation fails during copying process, the target packet
* should have a correct section counts.
*/
dst->hdr.qdcount = 0;
dst->hdr.anscount = 0;
dst->hdr.nscount = 0;
dst->hdr.arcount = 0;
/* Copy query section */
if (p->hdr.qdcount && (options & PJ_DNS_NO_QD)==0) {
dst->q = (pj_dns_parsed_query*)
pj_pool_alloc(pool, p->hdr.qdcount *
sizeof(pj_dns_parsed_query));
for (i=0; i<p->hdr.qdcount; ++i) {
copy_query(pool, &dst->q[i], &p->q[i],
&nametable_count, nametable);
++dst->hdr.qdcount;
}
}
/* Copy answer section */
if (p->hdr.anscount && (options & PJ_DNS_NO_ANS)==0) {
dst->ans = (pj_dns_parsed_rr*)
pj_pool_alloc(pool, p->hdr.anscount *
sizeof(pj_dns_parsed_rr));
for (i=0; i<p->hdr.anscount; ++i) {
copy_rr(pool, &dst->ans[i], &p->ans[i],
&nametable_count, nametable);
++dst->hdr.anscount;
}
}
/* Copy NS section */
if (p->hdr.nscount && (options & PJ_DNS_NO_NS)==0) {
dst->ns = (pj_dns_parsed_rr*)
pj_pool_alloc(pool, p->hdr.nscount *
sizeof(pj_dns_parsed_rr));
for (i=0; i<p->hdr.nscount; ++i) {
copy_rr(pool, &dst->ns[i], &p->ns[i],
&nametable_count, nametable);
++dst->hdr.nscount;
}
}
/* Copy additional info section */
if (p->hdr.arcount && (options & PJ_DNS_NO_AR)==0) {
dst->arr = (pj_dns_parsed_rr*)
pj_pool_alloc(pool, p->hdr.arcount *
sizeof(pj_dns_parsed_rr));
for (i=0; i<p->hdr.arcount; ++i) {
copy_rr(pool, &dst->arr[i], &p->arr[i],
&nametable_count, nametable);
++dst->hdr.arcount;
}
}
}
| 0
|
476,095
|
static void composite_unbind(struct usb_gadget *gadget)
{
__composite_unbind(gadget, true);
}
| 0
|
261,769
|
njs_function_frame_save(njs_vm_t *vm, njs_frame_t *frame, u_char *pc)
{
size_t value_count, n;
njs_value_t *start, *end, *p, **new, *value, **local;
njs_function_t *function;
njs_native_frame_t *active, *native;
*frame = *vm->active_frame;
frame->previous_active_frame = NULL;
native = &frame->native;
native->size = 0;
native->free = NULL;
native->free_size = 0;
active = &vm->active_frame->native;
value_count = njs_function_frame_value_count(active);
function = active->function;
new = (njs_value_t **) ((u_char *) native + NJS_FRAME_SIZE);
value = (njs_value_t *) (new + value_count
+ function->u.lambda->temp);
native->arguments = value;
native->arguments_offset = value + (function->args_offset - 1);
native->local = new + njs_function_frame_args_count(active);
native->temp = new + value_count;
native->pc = pc;
start = njs_function_frame_values(active, &end);
p = native->arguments;
while (start < end) {
*p = *start++;
*new++ = p++;
}
/* Move all arguments. */
p = native->arguments;
local = native->local + function->args_offset;
for (n = 0; n < function->args_count; n++) {
if (!njs_is_valid(p)) {
njs_set_undefined(p);
}
*local++ = p++;
}
return NJS_OK;
}
| 0
|
446,117
|
static int atusb_read_reg(struct atusb *atusb, u8 reg)
{
struct usb_device *usb_dev = atusb->usb_dev;
int ret;
u8 *buffer;
u8 value;
buffer = kmalloc(1, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
dev_dbg(&usb_dev->dev, "%s: reg = 0x%x\n", __func__, reg);
ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0),
ATUSB_REG_READ, ATUSB_REQ_FROM_DEV,
0, reg, buffer, 1, 1000);
if (ret >= 0) {
value = buffer[0];
kfree(buffer);
return value;
} else {
kfree(buffer);
return ret;
}
}
| 0
|
512,341
|
Item *get_copy(THD *thd)
{ return get_item_copy<Item_field_row>(thd, this); }
| 0
|
508,329
|
fill_record(THD *thd, TABLE *table, Field **ptr, List<Item> &values,
bool ignore_errors, bool use_value)
{
List_iterator_fast<Item> v(values);
List<TABLE> tbl_list;
Item *value;
Field *field;
bool abort_on_warning_saved= thd->abort_on_warning;
uint autoinc_index= table->next_number_field
? table->next_number_field->field_index
: ~0U;
DBUG_ENTER("fill_record");
if (!*ptr)
{
/* No fields to update, quite strange!*/
DBUG_RETURN(0);
}
/*
On INSERT or UPDATE fields are checked to be from the same table,
thus we safely can take table from the first field.
*/
DBUG_ASSERT((*ptr)->table == table);
/*
Reset the table->auto_increment_field_not_null as it is valid for
only one row.
*/
table->auto_increment_field_not_null= FALSE;
while ((field = *ptr++) && ! thd->is_error())
{
/* Ensure that all fields are from the same table */
DBUG_ASSERT(field->table == table);
value=v++;
if (field->field_index == autoinc_index)
table->auto_increment_field_not_null= TRUE;
if (field->vcol_info &&
!value->vcol_assignment_allowed_value() &&
table->s->table_category != TABLE_CATEGORY_TEMPORARY)
{
push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_WARNING_NON_DEFAULT_VALUE_FOR_VIRTUAL_COLUMN,
ER_THD(thd, ER_WARNING_NON_DEFAULT_VALUE_FOR_VIRTUAL_COLUMN),
field->field_name, table->s->table_name.str);
}
if (use_value)
value->save_val(field);
else
if (value->save_in_field(field, 0) < 0)
goto err;
field->set_has_explicit_value();
}
/* Update virtual fields */
thd->abort_on_warning= FALSE;
if (table->vfield &&
table->update_virtual_fields(table->file, VCOL_UPDATE_FOR_WRITE))
goto err;
thd->abort_on_warning= abort_on_warning_saved;
DBUG_RETURN(thd->is_error());
err:
thd->abort_on_warning= abort_on_warning_saved;
table->auto_increment_field_not_null= FALSE;
DBUG_RETURN(TRUE);
}
| 0
|
293,768
|
static RBinAddr *newEntry(ut64 haddr, ut64 vaddr, int type) {
RBinAddr *ptr = R_NEW0 (RBinAddr);
if (!ptr) {
return NULL;
}
ptr->paddr = haddr;
ptr->vaddr = vaddr;
ptr->hpaddr = haddr;
ptr->bits = 64;
ptr->type = type;
return ptr;
}
| 0
|
432,303
|
void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
bool is_write, hwaddr access_len)
{
if (buffer != as->uc->bounce.buffer) {
MemoryRegion *mr;
ram_addr_t addr1;
mr = memory_region_from_host(as->uc, buffer, &addr1);
assert(mr != NULL);
if (is_write) {
invalidate_and_set_dirty(mr, addr1, access_len);
}
return;
}
if (is_write) {
address_space_write(as, as->uc->bounce.addr, MEMTXATTRS_UNSPECIFIED,
as->uc->bounce.buffer, access_len);
}
qemu_vfree(as->uc->bounce.buffer);
as->uc->bounce.buffer = NULL;
}
| 0
|
90,757
|
void QuotaManager::DidGetGlobalQuotaForEviction(
QuotaStatusCode status,
StorageType type,
int64 quota) {
DCHECK_EQ(type, kStorageTypeTemporary);
if (status != kQuotaStatusOk) {
eviction_context_.get_usage_and_quota_callback->Run(
status, 0, 0, 0, 0);
eviction_context_.get_usage_and_quota_callback.reset();
return;
}
eviction_context_.quota = quota;
GetAvailableSpace(callback_factory_.
NewCallback(&QuotaManager::DidGetAvailableSpaceForEviction));
}
| 0
|
482,488
|
addBackwardRuleWithMultipleCells(widechar *cells, int dotslen,
TranslationTableOffset ruleOffset, TranslationTableRule *rule,
TranslationTableHeader *table) {
/* direction = 1, dotslen > 1 */
TranslationTableOffset *backRule = &table->backRules[_lou_stringHash(cells, 0, NULL)];
if (rule->opcode == CTO_SwapCc) return;
int ruleLength = dotslen + rule->charslen;
while (*backRule) {
TranslationTableRule *r = (TranslationTableRule *)&table->ruleArea[*backRule];
int rLength = r->dotslen + r->charslen;
if (ruleLength > rLength) break;
if (rLength == ruleLength)
if ((r->opcode == CTO_Always) && (rule->opcode != CTO_Always)) break;
backRule = &r->dotsnext;
}
rule->dotsnext = *backRule;
*backRule = ruleOffset;
}
| 0
|
229,344
|
Status EagerKernelExecute(
EagerContext* ctx, const absl::InlinedVector<TensorHandle*, 4>& op_inputs,
const absl::optional<EagerFunctionParams>& eager_func_params,
const core::RefCountPtr<KernelAndDevice>& kernel,
GraphCollector* graph_collector, CancellationManager* cancellation_manager,
absl::Span<TensorHandle*> retvals,
const absl::optional<ManagedStackTrace>& stack_trace) {
profiler::TraceMe activity("EagerKernelExecute",
profiler::TraceMeLevel::kInfo);
std::vector<EagerKernelRet> outputs(1);
ExecuteNodeArgs inputs(op_inputs.size());
TF_RETURN_IF_ERROR(inputs.Init(ctx, op_inputs, kernel));
// TODO(apassos) figure out how to record stats for ops which are a part of
// functions.
// TODO(b/111859745): When we support recovering from kernel/device errors, we
// would need to call XlaDevice::EnsureDeviceContextOk() before using an XLA
// device. We don't call it now because it is an unneeded overhead (it
// acquires a lock) and we can't recover from errors anyway.
ScopedStepContainer* container = ctx->StepContainer();
CoordinationServiceAgent* coord_agent = nullptr;
#if !defined(IS_MOBILE_PLATFORM)
if (ctx->GetDistributedManager() != nullptr)
coord_agent = ctx->GetDistributedManager()->GetCoordinationServiceAgent();
#endif // !IS_MOBILE_PLATFORM
TF_RETURN_IF_ERROR(kernel->Run(container, inputs, &outputs,
cancellation_manager, eager_func_params,
stack_trace, coord_agent));
if (graph_collector != nullptr) {
CollectGraphs(ctx);
}
if (TF_PREDICT_FALSE(retvals.size() != outputs.size())) {
return errors::Internal(
"EagerKernelExecute returns a list of ", outputs.size(),
" tensors but ", retvals.size(),
" is expected. This should never "
"happen. Please file a bug with the TensorFlow team.");
}
return GetKernelOutputs(&outputs, retvals.size(), retvals.data(), ctx,
kernel.get(), eager_func_params);
}
| 0
|
294,618
|
c_valid_nth_kday_p(int y, int m, int n, int k, double sg,
int *rm, int *rn, int *rk, int *rjd, int *ns)
{
int ns2, ry2, rm2, rn2, rk2;
if (k < 0)
k += 7;
if (n < 0) {
int t, ny, nm, rjd2;
t = y * 12 + m;
ny = DIV(t, 12);
nm = MOD(t, 12) + 1;
c_nth_kday_to_jd(ny, nm, 1, k, sg, &rjd2, &ns2);
c_jd_to_nth_kday(rjd2 + n * 7, sg, &ry2, &rm2, &rn2, &rk2);
if (ry2 != y || rm2 != m)
return 0;
n = rn2;
}
c_nth_kday_to_jd(y, m, n, k, sg, rjd, ns);
c_jd_to_nth_kday(*rjd, sg, &ry2, rm, rn, rk);
if (y != ry2 || m != *rm || n != *rn || k != *rk)
return 0;
return 1;
}
| 0
|
338,225
|
void WasmBinaryBuilder::readImports() {
BYN_TRACE("== readImports\n");
size_t num = getU32LEB();
BYN_TRACE("num: " << num << std::endl);
Builder builder(wasm);
size_t tableCounter = 0;
size_t memoryCounter = 0;
size_t functionCounter = 0;
size_t globalCounter = 0;
size_t tagCounter = 0;
for (size_t i = 0; i < num; i++) {
BYN_TRACE("read one\n");
auto module = getInlineString();
auto base = getInlineString();
auto kind = (ExternalKind)getU32LEB();
// We set a unique prefix for the name based on the kind. This ensures no
// collisions between them, which can't occur here (due to the index i) but
// could occur later due to the names section.
switch (kind) {
case ExternalKind::Function: {
Name name(std::string("fimport$") + std::to_string(functionCounter++));
auto index = getU32LEB();
functionTypes.push_back(getTypeByIndex(index));
auto type = getTypeByIndex(index);
if (!type.isSignature()) {
throwError(std::string("Imported function ") + module.str + '.' +
base.str +
"'s type must be a signature. Given: " + type.toString());
}
auto curr = builder.makeFunction(name, type, {});
curr->module = module;
curr->base = base;
functionImports.push_back(curr.get());
wasm.addFunction(std::move(curr));
break;
}
case ExternalKind::Table: {
Name name(std::string("timport$") + std::to_string(tableCounter++));
auto table = builder.makeTable(name);
table->module = module;
table->base = base;
table->type = getType();
bool is_shared;
Type indexType;
getResizableLimits(table->initial,
table->max,
is_shared,
indexType,
Table::kUnlimitedSize);
if (is_shared) {
throwError("Tables may not be shared");
}
if (indexType == Type::i64) {
throwError("Tables may not be 64-bit");
}
tableImports.push_back(table.get());
wasm.addTable(std::move(table));
break;
}
case ExternalKind::Memory: {
Name name(std::string("mimport$") + std::to_string(memoryCounter++));
wasm.memory.module = module;
wasm.memory.base = base;
wasm.memory.name = name;
wasm.memory.exists = true;
getResizableLimits(wasm.memory.initial,
wasm.memory.max,
wasm.memory.shared,
wasm.memory.indexType,
Memory::kUnlimitedSize);
break;
}
case ExternalKind::Global: {
Name name(std::string("gimport$") + std::to_string(globalCounter++));
auto type = getConcreteType();
auto mutable_ = getU32LEB();
auto curr =
builder.makeGlobal(name,
type,
nullptr,
mutable_ ? Builder::Mutable : Builder::Immutable);
curr->module = module;
curr->base = base;
globalImports.push_back(curr.get());
wasm.addGlobal(std::move(curr));
break;
}
case ExternalKind::Tag: {
Name name(std::string("eimport$") + std::to_string(tagCounter++));
getInt8(); // Reserved 'attribute' field
auto index = getU32LEB();
auto curr = builder.makeTag(name, getSignatureByTypeIndex(index));
curr->module = module;
curr->base = base;
wasm.addTag(std::move(curr));
break;
}
default: {
throwError("bad import kind");
}
}
}
}
| 0
|
210,866
|
SProcXkbSelectEvents(ClientPtr client)
{
REQUEST(xkbSelectEventsReq);
swaps(&stuff->length);
REQUEST_AT_LEAST_SIZE(xkbSelectEventsReq);
swaps(&stuff->deviceSpec);
swaps(&stuff->affectWhich);
swaps(&stuff->clear);
swaps(&stuff->selectAll);
swaps(&stuff->affectMap);
swaps(&stuff->map);
if ((stuff->affectWhich & (~XkbMapNotifyMask)) != 0) {
union {
BOOL *b;
CARD8 *c8;
CARD16 *c16;
CARD32 *c32;
} from;
register unsigned bit, ndx, maskLeft, dataLeft, size;
from.c8 = (CARD8 *) &stuff[1];
dataLeft = (stuff->length * 4) - SIZEOF(xkbSelectEventsReq);
maskLeft = (stuff->affectWhich & (~XkbMapNotifyMask));
for (ndx = 0, bit = 1; (maskLeft != 0); ndx++, bit <<= 1) {
if (((bit & maskLeft) == 0) || (ndx == XkbMapNotify))
continue;
maskLeft &= ~bit;
if ((stuff->selectAll & bit) || (stuff->clear & bit))
continue;
switch (ndx) {
case XkbNewKeyboardNotify:
case XkbStateNotify:
case XkbNamesNotify:
case XkbAccessXNotify:
case XkbExtensionDeviceNotify:
size = 2;
break;
case XkbControlsNotify:
case XkbIndicatorStateNotify:
case XkbIndicatorMapNotify:
size = 4;
break;
case XkbBellNotify:
case XkbActionMessage:
case XkbCompatMapNotify:
size = 1;
break;
default:
client->errorValue = _XkbErrCode2(0x1, bit);
return BadValue;
}
if (dataLeft < (size * 2))
return BadLength;
if (size == 2) {
swaps(&from.c16[0]);
swaps(&from.c16[1]);
}
else if (size == 4) {
swapl(&from.c32[0]);
swapl(&from.c32[1]);
}
else {
size = 2;
}
from.c8 += (size * 2);
dataLeft -= (size * 2);
}
if (dataLeft > 2) {
ErrorF("[xkb] Extra data (%d bytes) after SelectEvents\n",
dataLeft);
return BadLength;
}
}
return ProcXkbSelectEvents(client);
}
| 1
|
249,953
|
file_accessible (char const *file)
{
# if defined _LIBC || HAVE_FACCESSAT
return __faccessat (AT_FDCWD, file, F_OK, AT_EACCESS) == 0;
# else
struct stat st;
return __stat (file, &st) == 0 || errno == EOVERFLOW;
# endif
}
| 0
|
247,728
|
TestUtilOptionsV2& setExpectedServerStats(const std::string& expected_server_stats) {
TestUtilOptionsBase::setExpectedServerStats(expected_server_stats);
return *this;
}
| 0
|
310,335
|
connection_dirserv_flushed_some(dir_connection_t *conn)
{
tor_assert(conn->_base.state == DIR_CONN_STATE_SERVER_WRITING);
if (buf_datalen(conn->_base.outbuf) >= DIRSERV_BUFFER_MIN)
return 0;
switch (conn->dir_spool_src) {
case DIR_SPOOL_EXTRA_BY_DIGEST:
case DIR_SPOOL_EXTRA_BY_FP:
case DIR_SPOOL_SERVER_BY_DIGEST:
case DIR_SPOOL_SERVER_BY_FP:
return connection_dirserv_add_servers_to_outbuf(conn);
case DIR_SPOOL_MICRODESC:
return connection_dirserv_add_microdescs_to_outbuf(conn);
case DIR_SPOOL_CACHED_DIR:
return connection_dirserv_add_dir_bytes_to_outbuf(conn);
case DIR_SPOOL_NETWORKSTATUS:
return connection_dirserv_add_networkstatus_bytes_to_outbuf(conn);
case DIR_SPOOL_NONE:
default:
return 0;
}
}
| 0
|
448,926
|
int ZEXPORT inflateGetHeader(strm, head)
z_streamp strm;
gz_headerp head;
{
struct inflate_state FAR *state;
/* check state */
if (inflateStateCheck(strm)) return Z_STREAM_ERROR;
state = (struct inflate_state FAR *)strm->state;
if ((state->wrap & 2) == 0) return Z_STREAM_ERROR;
/* save header structure */
state->head = head;
head->done = 0;
return Z_OK;
}
| 0
|
231,047
|
static BaseType_t prvIsQueueEmpty( const Queue_t * pxQueue )
{
BaseType_t xReturn;
taskENTER_CRITICAL();
{
if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
{
xReturn = pdTRUE;
}
else
{
xReturn = pdFALSE;
}
}
taskEXIT_CRITICAL();
return xReturn;
}
| 0
|
291,788
|
static int rtrs_clt_rdma_cm_handler(struct rdma_cm_id *cm_id,
struct rdma_cm_event *ev)
{
struct rtrs_clt_con *con = cm_id->context;
struct rtrs_path *s = con->c.path;
struct rtrs_clt_path *clt_path = to_clt_path(s);
int cm_err = 0;
switch (ev->event) {
case RDMA_CM_EVENT_ADDR_RESOLVED:
cm_err = rtrs_rdma_addr_resolved(con);
break;
case RDMA_CM_EVENT_ROUTE_RESOLVED:
cm_err = rtrs_rdma_route_resolved(con);
break;
case RDMA_CM_EVENT_ESTABLISHED:
cm_err = rtrs_rdma_conn_established(con, ev);
if (!cm_err) {
/*
* Report success and wake up. Here we abuse state_wq,
* i.e. wake up without state change, but we set cm_err.
*/
flag_success_on_conn(con);
wake_up(&clt_path->state_wq);
return 0;
}
break;
case RDMA_CM_EVENT_REJECTED:
cm_err = rtrs_rdma_conn_rejected(con, ev);
break;
case RDMA_CM_EVENT_DISCONNECTED:
/* No message for disconnecting */
cm_err = -ECONNRESET;
break;
case RDMA_CM_EVENT_CONNECT_ERROR:
case RDMA_CM_EVENT_UNREACHABLE:
case RDMA_CM_EVENT_ADDR_CHANGE:
case RDMA_CM_EVENT_TIMEWAIT_EXIT:
rtrs_wrn(s, "CM error (CM event: %s, err: %d)\n",
rdma_event_msg(ev->event), ev->status);
cm_err = -ECONNRESET;
break;
case RDMA_CM_EVENT_ADDR_ERROR:
case RDMA_CM_EVENT_ROUTE_ERROR:
rtrs_wrn(s, "CM error (CM event: %s, err: %d)\n",
rdma_event_msg(ev->event), ev->status);
cm_err = -EHOSTUNREACH;
break;
case RDMA_CM_EVENT_DEVICE_REMOVAL:
/*
* Device removal is a special case. Queue close and return 0.
*/
rtrs_clt_close_conns(clt_path, false);
return 0;
default:
rtrs_err(s, "Unexpected RDMA CM error (CM event: %s, err: %d)\n",
rdma_event_msg(ev->event), ev->status);
cm_err = -ECONNRESET;
break;
}
if (cm_err) {
/*
* cm error makes sense only on connection establishing,
* in other cases we rely on normal procedure of reconnecting.
*/
flag_error_on_conn(con, cm_err);
rtrs_rdma_error_recovery(con);
}
return 0;
}
| 0
|
450,830
|
int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host,
u8 event, struct sk_buff *skb)
{
int r = 0;
struct device *dev = &hdev->ndev->dev;
struct nfc_evt_transaction *transaction;
pr_debug("connectivity gate event: %x\n", event);
switch (event) {
case ST21NFCA_EVT_CONNECTIVITY:
r = nfc_se_connectivity(hdev->ndev, host);
break;
case ST21NFCA_EVT_TRANSACTION:
/*
* According to specification etsi 102 622
* 11.2.2.4 EVT_TRANSACTION Table 52
* Description Tag Length
* AID 81 5 to 16
* PARAMETERS 82 0 to 255
*/
if (skb->len < NFC_MIN_AID_LENGTH + 2 &&
skb->data[0] != NFC_EVT_TRANSACTION_AID_TAG)
return -EPROTO;
transaction = devm_kzalloc(dev, skb->len - 2, GFP_KERNEL);
if (!transaction)
return -ENOMEM;
transaction->aid_len = skb->data[1];
/* Checking if the length of the AID is valid */
if (transaction->aid_len > sizeof(transaction->aid))
return -EINVAL;
memcpy(transaction->aid, &skb->data[2],
transaction->aid_len);
/* Check next byte is PARAMETERS tag (82) */
if (skb->data[transaction->aid_len + 2] !=
NFC_EVT_TRANSACTION_PARAMS_TAG)
return -EPROTO;
transaction->params_len = skb->data[transaction->aid_len + 3];
/* Total size is allocated (skb->len - 2) minus fixed array members */
if (transaction->params_len > ((skb->len - 2) - sizeof(struct nfc_evt_transaction)))
return -EINVAL;
memcpy(transaction->params, skb->data +
transaction->aid_len + 4, transaction->params_len);
r = nfc_se_transaction(hdev->ndev, host, transaction);
break;
default:
nfc_err(&hdev->ndev->dev, "Unexpected event on connectivity gate\n");
return 1;
}
kfree_skb(skb);
return r;
}
| 0
|
240,592
|
explicit ResourceGatherNdOp(OpKernelConstruction* c) : OpKernel(c) {}
| 0
|
474,447
|
ObjectGetPublicAttributes(
TPM_HANDLE handle
)
{
return HandleToObject(handle)->publicArea.objectAttributes;
}
| 0
|
450,422
|
void vnc_write(VncState *vs, const void *data, size_t len)
{
assert(vs->magic == VNC_MAGIC);
if (vs->disconnecting) {
return;
}
/* Protection against malicious client/guest to prevent our output
* buffer growing without bound if client stops reading data. This
* should rarely trigger, because we have earlier throttling code
* which stops issuing framebuffer updates and drops audio data
* if the throttle_output_offset value is exceeded. So we only reach
* this higher level if a huge number of pseudo-encodings get
* triggered while data can't be sent on the socket.
*
* NB throttle_output_offset can be zero during early protocol
* handshake, or from the job thread's VncState clone
*/
if (vs->throttle_output_offset != 0 &&
(vs->output.offset / VNC_THROTTLE_OUTPUT_LIMIT_SCALE) >
vs->throttle_output_offset) {
trace_vnc_client_output_limit(vs, vs->ioc, vs->output.offset,
vs->throttle_output_offset);
vnc_disconnect_start(vs);
return;
}
buffer_reserve(&vs->output, len);
if (vs->ioc != NULL && buffer_empty(&vs->output)) {
if (vs->ioc_tag) {
g_source_remove(vs->ioc_tag);
}
vs->ioc_tag = qio_channel_add_watch(
vs->ioc, G_IO_IN | G_IO_OUT, vnc_client_io, vs, NULL);
}
buffer_append(&vs->output, data, len);
}
| 0
|
195,691
|
mrb_vm_exec(mrb_state *mrb, const struct RProc *proc, const mrb_code *pc)
{
/* mrb_assert(MRB_PROC_CFUNC_P(proc)) */
const mrb_irep *irep = proc->body.irep;
const mrb_pool_value *pool = irep->pool;
const mrb_sym *syms = irep->syms;
mrb_code insn;
int ai = mrb_gc_arena_save(mrb);
struct mrb_jmpbuf *prev_jmp = mrb->jmp;
struct mrb_jmpbuf c_jmp;
uint32_t a;
uint16_t b;
uint16_t c;
mrb_sym mid;
const struct mrb_irep_catch_handler *ch;
#ifdef DIRECT_THREADED
static const void * const optable[] = {
#define OPCODE(x,_) &&L_OP_ ## x,
#include "mruby/ops.h"
#undef OPCODE
};
#endif
mrb_bool exc_catched = FALSE;
RETRY_TRY_BLOCK:
MRB_TRY(&c_jmp) {
if (exc_catched) {
exc_catched = FALSE;
mrb_gc_arena_restore(mrb, ai);
if (mrb->exc && mrb->exc->tt == MRB_TT_BREAK)
goto L_BREAK;
goto L_RAISE;
}
mrb->jmp = &c_jmp;
mrb_vm_ci_proc_set(mrb->c->ci, proc);
#define regs (mrb->c->ci->stack)
INIT_DISPATCH {
CASE(OP_NOP, Z) {
/* do nothing */
NEXT;
}
CASE(OP_MOVE, BB) {
regs[a] = regs[b];
NEXT;
}
CASE(OP_LOADL, BB) {
switch (pool[b].tt) { /* number */
case IREP_TT_INT32:
regs[a] = mrb_int_value(mrb, (mrb_int)pool[b].u.i32);
break;
case IREP_TT_INT64:
#if defined(MRB_INT64)
regs[a] = mrb_int_value(mrb, (mrb_int)pool[b].u.i64);
break;
#else
#if defined(MRB_64BIT)
if (INT32_MIN <= pool[b].u.i64 && pool[b].u.i64 <= INT32_MAX) {
regs[a] = mrb_int_value(mrb, (mrb_int)pool[b].u.i64);
break;
}
#endif
goto L_INT_OVERFLOW;
#endif
case IREP_TT_BIGINT:
#ifdef MRB_USE_BIGINT
{
const char *s = pool[b].u.str;
regs[a] = mrb_bint_new_str(mrb, s+2, (mrb_int)s[0], (mrb_int)s[1]);
}
break;
#else
goto L_INT_OVERFLOW;
#endif
#ifndef MRB_NO_FLOAT
case IREP_TT_FLOAT:
regs[a] = mrb_float_value(mrb, pool[b].u.f);
break;
#endif
default:
/* should not happen (tt:string) */
regs[a] = mrb_nil_value();
break;
}
NEXT;
}
CASE(OP_LOADI, BB) {
SET_FIXNUM_VALUE(regs[a], b);
NEXT;
}
CASE(OP_LOADINEG, BB) {
SET_FIXNUM_VALUE(regs[a], -b);
NEXT;
}
CASE(OP_LOADI__1,B) goto L_LOADI;
CASE(OP_LOADI_0,B) goto L_LOADI;
CASE(OP_LOADI_1,B) goto L_LOADI;
CASE(OP_LOADI_2,B) goto L_LOADI;
CASE(OP_LOADI_3,B) goto L_LOADI;
CASE(OP_LOADI_4,B) goto L_LOADI;
CASE(OP_LOADI_5,B) goto L_LOADI;
CASE(OP_LOADI_6,B) goto L_LOADI;
CASE(OP_LOADI_7, B) {
L_LOADI:
SET_FIXNUM_VALUE(regs[a], (mrb_int)insn - (mrb_int)OP_LOADI_0);
NEXT;
}
CASE(OP_LOADI16, BS) {
SET_FIXNUM_VALUE(regs[a], (mrb_int)(int16_t)b);
NEXT;
}
CASE(OP_LOADI32, BSS) {
SET_INT_VALUE(mrb, regs[a], (int32_t)(((uint32_t)b<<16)+c));
NEXT;
}
CASE(OP_LOADSYM, BB) {
SET_SYM_VALUE(regs[a], syms[b]);
NEXT;
}
CASE(OP_LOADNIL, B) {
SET_NIL_VALUE(regs[a]);
NEXT;
}
CASE(OP_LOADSELF, B) {
regs[a] = regs[0];
NEXT;
}
CASE(OP_LOADT, B) {
SET_TRUE_VALUE(regs[a]);
NEXT;
}
CASE(OP_LOADF, B) {
SET_FALSE_VALUE(regs[a]);
NEXT;
}
CASE(OP_GETGV, BB) {
mrb_value val = mrb_gv_get(mrb, syms[b]);
regs[a] = val;
NEXT;
}
CASE(OP_SETGV, BB) {
mrb_gv_set(mrb, syms[b], regs[a]);
NEXT;
}
CASE(OP_GETSV, BB) {
mrb_value val = mrb_vm_special_get(mrb, syms[b]);
regs[a] = val;
NEXT;
}
CASE(OP_SETSV, BB) {
mrb_vm_special_set(mrb, syms[b], regs[a]);
NEXT;
}
CASE(OP_GETIV, BB) {
regs[a] = mrb_iv_get(mrb, regs[0], syms[b]);
NEXT;
}
CASE(OP_SETIV, BB) {
mrb_iv_set(mrb, regs[0], syms[b], regs[a]);
NEXT;
}
CASE(OP_GETCV, BB) {
mrb_value val;
val = mrb_vm_cv_get(mrb, syms[b]);
regs[a] = val;
NEXT;
}
CASE(OP_SETCV, BB) {
mrb_vm_cv_set(mrb, syms[b], regs[a]);
NEXT;
}
CASE(OP_GETIDX, B) {
mrb_value va = regs[a], vb = regs[a+1];
switch (mrb_type(va)) {
case MRB_TT_ARRAY:
if (!mrb_integer_p(vb)) goto getidx_fallback;
regs[a] = mrb_ary_entry(va, mrb_integer(vb));
break;
case MRB_TT_HASH:
va = mrb_hash_get(mrb, va, vb);
regs[a] = va;
break;
case MRB_TT_STRING:
switch (mrb_type(vb)) {
case MRB_TT_INTEGER:
case MRB_TT_STRING:
case MRB_TT_RANGE:
va = mrb_str_aref(mrb, va, vb, mrb_undef_value());
regs[a] = va;
break;
default:
goto getidx_fallback;
}
break;
default:
getidx_fallback:
mid = MRB_OPSYM(aref);
goto L_SEND_SYM;
}
NEXT;
}
CASE(OP_SETIDX, B) {
c = 2;
mid = MRB_OPSYM(aset);
SET_NIL_VALUE(regs[a+3]);
goto L_SENDB_SYM;
}
CASE(OP_GETCONST, BB) {
mrb_value v = mrb_vm_const_get(mrb, syms[b]);
regs[a] = v;
NEXT;
}
CASE(OP_SETCONST, BB) {
mrb_vm_const_set(mrb, syms[b], regs[a]);
NEXT;
}
CASE(OP_GETMCNST, BB) {
mrb_value v = mrb_const_get(mrb, regs[a], syms[b]);
regs[a] = v;
NEXT;
}
CASE(OP_SETMCNST, BB) {
mrb_const_set(mrb, regs[a+1], syms[b], regs[a]);
NEXT;
}
CASE(OP_GETUPVAR, BBB) {
mrb_value *regs_a = regs + a;
struct REnv *e = uvenv(mrb, c);
if (e && b < MRB_ENV_LEN(e)) {
*regs_a = e->stack[b];
}
else {
*regs_a = mrb_nil_value();
}
NEXT;
}
CASE(OP_SETUPVAR, BBB) {
struct REnv *e = uvenv(mrb, c);
if (e) {
mrb_value *regs_a = regs + a;
if (b < MRB_ENV_LEN(e)) {
e->stack[b] = *regs_a;
mrb_write_barrier(mrb, (struct RBasic*)e);
}
}
NEXT;
}
CASE(OP_JMP, S) {
pc += (int16_t)a;
JUMP;
}
CASE(OP_JMPIF, BS) {
if (mrb_test(regs[a])) {
pc += (int16_t)b;
JUMP;
}
NEXT;
}
CASE(OP_JMPNOT, BS) {
if (!mrb_test(regs[a])) {
pc += (int16_t)b;
JUMP;
}
NEXT;
}
CASE(OP_JMPNIL, BS) {
if (mrb_nil_p(regs[a])) {
pc += (int16_t)b;
JUMP;
}
NEXT;
}
CASE(OP_JMPUW, S) {
a = (uint32_t)((pc - irep->iseq) + (int16_t)a);
CHECKPOINT_RESTORE(RBREAK_TAG_JUMP) {
struct RBreak *brk = (struct RBreak*)mrb->exc;
mrb_value target = mrb_break_value_get(brk);
mrb_assert(mrb_integer_p(target));
a = (uint32_t)mrb_integer(target);
mrb_assert(a >= 0 && a < irep->ilen);
}
CHECKPOINT_MAIN(RBREAK_TAG_JUMP) {
ch = catch_handler_find(mrb, mrb->c->ci, pc, MRB_CATCH_FILTER_ENSURE);
if (ch) {
/* avoiding a jump from a catch handler into the same handler */
if (a < mrb_irep_catch_handler_unpack(ch->begin) || a >= mrb_irep_catch_handler_unpack(ch->end)) {
THROW_TAGGED_BREAK(mrb, RBREAK_TAG_JUMP, proc, mrb_fixnum_value(a));
}
}
}
CHECKPOINT_END(RBREAK_TAG_JUMP);
mrb->exc = NULL; /* clear break object */
pc = irep->iseq + a;
JUMP;
}
CASE(OP_EXCEPT, B) {
mrb_value exc;
if (mrb->exc == NULL) {
exc = mrb_nil_value();
}
else {
switch (mrb->exc->tt) {
case MRB_TT_BREAK:
case MRB_TT_EXCEPTION:
exc = mrb_obj_value(mrb->exc);
break;
default:
mrb_assert(!"bad mrb_type");
exc = mrb_nil_value();
break;
}
mrb->exc = NULL;
}
regs[a] = exc;
NEXT;
}
CASE(OP_RESCUE, BB) {
mrb_value exc = regs[a]; /* exc on stack */
mrb_value e = regs[b];
struct RClass *ec;
switch (mrb_type(e)) {
case MRB_TT_CLASS:
case MRB_TT_MODULE:
break;
default:
{
mrb_value exc;
exc = mrb_exc_new_lit(mrb, E_TYPE_ERROR,
"class or module required for rescue clause");
mrb_exc_set(mrb, exc);
goto L_RAISE;
}
}
ec = mrb_class_ptr(e);
regs[b] = mrb_bool_value(mrb_obj_is_kind_of(mrb, exc, ec));
NEXT;
}
CASE(OP_RAISEIF, B) {
mrb_value exc = regs[a];
if (mrb_break_p(exc)) {
mrb->exc = mrb_obj_ptr(exc);
goto L_BREAK;
}
mrb_exc_set(mrb, exc);
if (mrb->exc) {
goto L_RAISE;
}
NEXT;
}
CASE(OP_SSEND, BBB) {
regs[a] = regs[0];
insn = OP_SEND;
}
goto L_SENDB;
CASE(OP_SSENDB, BBB) {
regs[a] = regs[0];
}
goto L_SENDB;
CASE(OP_SEND, BBB)
goto L_SENDB;
L_SEND_SYM:
c = 1;
/* push nil after arguments */
SET_NIL_VALUE(regs[a+2]);
goto L_SENDB_SYM;
CASE(OP_SENDB, BBB)
L_SENDB:
mid = syms[b];
L_SENDB_SYM:
{
mrb_callinfo *ci = mrb->c->ci;
mrb_method_t m;
struct RClass *cls;
mrb_value recv, blk;
ARGUMENT_NORMALIZE(a, &c, insn);
recv = regs[a];
cls = mrb_class(mrb, recv);
m = mrb_method_search_vm(mrb, &cls, mid);
if (MRB_METHOD_UNDEF_P(m)) {
m = prepare_missing(mrb, recv, mid, &cls, a, &c, blk, 0);
mid = MRB_SYM(method_missing);
}
/* push callinfo */
ci = cipush(mrb, a, 0, cls, NULL, mid, c);
if (MRB_METHOD_CFUNC_P(m)) {
if (MRB_METHOD_PROC_P(m)) {
struct RProc *p = MRB_METHOD_PROC(m);
mrb_vm_ci_proc_set(ci, p);
recv = p->body.func(mrb, recv);
}
else {
if (MRB_METHOD_NOARG_P(m)) {
check_method_noarg(mrb, ci);
}
recv = MRB_METHOD_FUNC(m)(mrb, recv);
}
mrb_gc_arena_shrink(mrb, ai);
if (mrb->exc) goto L_RAISE;
ci = mrb->c->ci;
if (mrb_proc_p(blk)) {
struct RProc *p = mrb_proc_ptr(blk);
if (p && !MRB_PROC_STRICT_P(p) && MRB_PROC_ENV(p) == mrb_vm_ci_env(&ci[-1])) {
p->flags |= MRB_PROC_ORPHAN;
}
}
if (!ci->u.target_class) { /* return from context modifying method (resume/yield) */
if (ci->cci == CINFO_RESUMED) {
mrb->jmp = prev_jmp;
return recv;
}
else {
mrb_assert(!MRB_PROC_CFUNC_P(ci[-1].proc));
proc = ci[-1].proc;
irep = proc->body.irep;
pool = irep->pool;
syms = irep->syms;
}
}
ci->stack[0] = recv;
/* pop stackpos */
ci = cipop(mrb);
pc = ci->pc;
}
else {
/* setup environment for calling method */
mrb_vm_ci_proc_set(ci, (proc = MRB_METHOD_PROC(m)));
irep = proc->body.irep;
pool = irep->pool;
syms = irep->syms;
mrb_stack_extend(mrb, (irep->nregs < 4) ? 4 : irep->nregs);
pc = irep->iseq;
}
}
JUMP;
CASE(OP_CALL, Z) {
mrb_callinfo *ci = mrb->c->ci;
mrb_value recv = ci->stack[0];
struct RProc *m = mrb_proc_ptr(recv);
/* replace callinfo */
ci->u.target_class = MRB_PROC_TARGET_CLASS(m);
mrb_vm_ci_proc_set(ci, m);
if (MRB_PROC_ENV_P(m)) {
ci->mid = MRB_PROC_ENV(m)->mid;
}
/* prepare stack */
if (MRB_PROC_CFUNC_P(m)) {
recv = MRB_PROC_CFUNC(m)(mrb, recv);
mrb_gc_arena_shrink(mrb, ai);
if (mrb->exc) goto L_RAISE;
/* pop stackpos */
ci = cipop(mrb);
pc = ci->pc;
ci[1].stack[0] = recv;
irep = mrb->c->ci->proc->body.irep;
}
else {
/* setup environment for calling method */
proc = m;
irep = m->body.irep;
if (!irep) {
mrb->c->ci->stack[0] = mrb_nil_value();
a = 0;
c = OP_R_NORMAL;
goto L_OP_RETURN_BODY;
}
mrb_int nargs = mrb_ci_bidx(ci)+1;
if (nargs < irep->nregs) {
mrb_stack_extend(mrb, irep->nregs);
stack_clear(regs+nargs, irep->nregs-nargs);
}
if (MRB_PROC_ENV_P(m)) {
regs[0] = MRB_PROC_ENV(m)->stack[0];
}
pc = irep->iseq;
}
pool = irep->pool;
syms = irep->syms;
JUMP;
}
CASE(OP_SUPER, BB) {
mrb_method_t m;
struct RClass *cls;
mrb_callinfo *ci = mrb->c->ci;
mrb_value recv, blk;
const struct RProc *p = ci->proc;
mrb_sym mid = ci->mid;
struct RClass* target_class = MRB_PROC_TARGET_CLASS(p);
if (MRB_PROC_ENV_P(p) && p->e.env->mid && p->e.env->mid != mid) { /* alias support */
mid = p->e.env->mid; /* restore old mid */
}
if (mid == 0 || !target_class) {
mrb_value exc = mrb_exc_new_lit(mrb, E_NOMETHOD_ERROR, "super called outside of method");
mrb_exc_set(mrb, exc);
goto L_RAISE;
}
if (target_class->flags & MRB_FL_CLASS_IS_PREPENDED) {
target_class = mrb_vm_ci_target_class(ci);
}
else if (target_class->tt == MRB_TT_MODULE) {
target_class = mrb_vm_ci_target_class(ci);
if (!target_class || target_class->tt != MRB_TT_ICLASS) {
goto super_typeerror;
}
}
recv = regs[0];
if (!mrb_obj_is_kind_of(mrb, recv, target_class)) {
super_typeerror: ;
mrb_value exc = mrb_exc_new_lit(mrb, E_TYPE_ERROR,
"self has wrong type to call super in this context");
mrb_exc_set(mrb, exc);
goto L_RAISE;
}
ARGUMENT_NORMALIZE(a, &b, OP_SUPER);
cls = target_class->super;
m = mrb_method_search_vm(mrb, &cls, mid);
if (MRB_METHOD_UNDEF_P(m)) {
m = prepare_missing(mrb, recv, mid, &cls, a, &b, blk, 1);
mid = MRB_SYM(method_missing);
}
/* push callinfo */
ci = cipush(mrb, a, 0, cls, NULL, mid, b);
/* prepare stack */
ci->stack[0] = recv;
if (MRB_METHOD_CFUNC_P(m)) {
mrb_value v;
if (MRB_METHOD_PROC_P(m)) {
mrb_vm_ci_proc_set(ci, MRB_METHOD_PROC(m));
}
v = MRB_METHOD_CFUNC(m)(mrb, recv);
mrb_gc_arena_restore(mrb, ai);
if (mrb->exc) goto L_RAISE;
ci = mrb->c->ci;
mrb_assert(!mrb_break_p(v));
if (!mrb_vm_ci_target_class(ci)) { /* return from context modifying method (resume/yield) */
if (ci->cci == CINFO_RESUMED) {
mrb->jmp = prev_jmp;
return v;
}
else {
mrb_assert(!MRB_PROC_CFUNC_P(ci[-1].proc));
proc = ci[-1].proc;
irep = proc->body.irep;
pool = irep->pool;
syms = irep->syms;
}
}
mrb->c->ci->stack[0] = v;
ci = cipop(mrb);
pc = ci->pc;
}
else {
/* setup environment for calling method */
mrb_vm_ci_proc_set(ci, (proc = MRB_METHOD_PROC(m)));
irep = proc->body.irep;
pool = irep->pool;
syms = irep->syms;
mrb_stack_extend(mrb, (irep->nregs < 4) ? 4 : irep->nregs);
pc = irep->iseq;
}
JUMP;
}
CASE(OP_ARGARY, BS) {
mrb_int m1 = (b>>11)&0x3f;
mrb_int r = (b>>10)&0x1;
mrb_int m2 = (b>>5)&0x1f;
mrb_int kd = (b>>4)&0x1;
mrb_int lv = (b>>0)&0xf;
mrb_value *stack;
if (mrb->c->ci->mid == 0 || mrb_vm_ci_target_class(mrb->c->ci) == NULL) {
mrb_value exc;
L_NOSUPER:
exc = mrb_exc_new_lit(mrb, E_NOMETHOD_ERROR, "super called outside of method");
mrb_exc_set(mrb, exc);
goto L_RAISE;
}
if (lv == 0) stack = regs + 1;
else {
struct REnv *e = uvenv(mrb, lv-1);
if (!e) goto L_NOSUPER;
if (MRB_ENV_LEN(e) <= m1+r+m2+1)
goto L_NOSUPER;
stack = e->stack + 1;
}
if (r == 0) {
regs[a] = mrb_ary_new_from_values(mrb, m1+m2, stack);
}
else {
mrb_value *pp = NULL;
struct RArray *rest;
mrb_int len = 0;
if (mrb_array_p(stack[m1])) {
struct RArray *ary = mrb_ary_ptr(stack[m1]);
pp = ARY_PTR(ary);
len = ARY_LEN(ary);
}
regs[a] = mrb_ary_new_capa(mrb, m1+len+m2);
rest = mrb_ary_ptr(regs[a]);
if (m1 > 0) {
stack_copy(ARY_PTR(rest), stack, m1);
}
if (len > 0) {
stack_copy(ARY_PTR(rest)+m1, pp, len);
}
if (m2 > 0) {
stack_copy(ARY_PTR(rest)+m1+len, stack+m1+1, m2);
}
ARY_SET_LEN(rest, m1+len+m2);
}
if (kd) {
regs[a+1] = stack[m1+r+m2];
regs[a+2] = stack[m1+r+m2+1];
}
else {
regs[a+1] = stack[m1+r+m2];
}
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_ENTER, W) {
mrb_int m1 = MRB_ASPEC_REQ(a);
mrb_int o = MRB_ASPEC_OPT(a);
mrb_int r = MRB_ASPEC_REST(a);
mrb_int m2 = MRB_ASPEC_POST(a);
mrb_int kd = (MRB_ASPEC_KEY(a) > 0 || MRB_ASPEC_KDICT(a))? 1 : 0;
/* unused
int b = MRB_ASPEC_BLOCK(a);
*/
mrb_int const len = m1 + o + r + m2;
mrb_callinfo *ci = mrb->c->ci;
mrb_int argc = ci->n;
mrb_value *argv = regs+1;
mrb_value * const argv0 = argv;
mrb_int const kw_pos = len + kd; /* where kwhash should be */
mrb_int const blk_pos = kw_pos + 1; /* where block should be */
mrb_value blk = regs[mrb_ci_bidx(ci)];
mrb_value kdict = mrb_nil_value();
/* keyword arguments */
if (ci->nk > 0) {
mrb_int kidx = mrb_ci_kidx(ci);
kdict = regs[kidx];
if (!mrb_hash_p(kdict) || mrb_hash_size(mrb, kdict) == 0) {
kdict = mrb_nil_value();
ci->nk = 0;
}
}
if (!kd && !mrb_nil_p(kdict)) {
if (argc < 14) {
ci->n++;
argc++; /* include kdict in normal arguments */
}
else if (argc == 14) {
/* pack arguments and kdict */
regs[1] = mrb_ary_new_from_values(mrb, argc+1, ®s[1]);
argc = ci->n = 15;
}
else {/* argc == 15 */
/* push kdict to packed arguments */
mrb_ary_push(mrb, regs[1], regs[2]);
}
ci->nk = 0;
}
if (kd && MRB_ASPEC_KEY(a) > 0 && mrb_hash_p(kdict)) {
kdict = mrb_hash_dup(mrb, kdict);
}
/* arguments is passed with Array */
if (argc == 15) {
struct RArray *ary = mrb_ary_ptr(regs[1]);
argv = ARY_PTR(ary);
argc = (int)ARY_LEN(ary);
mrb_gc_protect(mrb, regs[1]);
}
/* strict argument check */
if (ci->proc && MRB_PROC_STRICT_P(ci->proc)) {
if (argc < m1 + m2 || (r == 0 && argc > len)) {
argnum_error(mrb, m1+m2);
goto L_RAISE;
}
}
/* extract first argument array to arguments */
else if (len > 1 && argc == 1 && mrb_array_p(argv[0])) {
mrb_gc_protect(mrb, argv[0]);
argc = (int)RARRAY_LEN(argv[0]);
argv = RARRAY_PTR(argv[0]);
}
/* rest arguments */
mrb_value rest = mrb_nil_value();
if (argc < len) {
mrb_int mlen = m2;
if (argc < m1+m2) {
mlen = m1 < argc ? argc - m1 : 0;
}
/* copy mandatory and optional arguments */
if (argv0 != argv && argv) {
value_move(®s[1], argv, argc-mlen); /* m1 + o */
}
if (argc < m1) {
stack_clear(®s[argc+1], m1-argc);
}
/* copy post mandatory arguments */
if (mlen) {
value_move(®s[len-m2+1], &argv[argc-mlen], mlen);
}
if (mlen < m2) {
stack_clear(®s[len-m2+mlen+1], m2-mlen);
}
/* initialize rest arguments with empty Array */
if (r) {
rest = mrb_ary_new_capa(mrb, 0);
regs[m1+o+1] = rest;
}
/* skip initializer of passed arguments */
if (o > 0 && argc > m1+m2)
pc += (argc - m1 - m2)*3;
}
else {
mrb_int rnum = 0;
if (argv0 != argv) {
value_move(®s[1], argv, m1+o);
}
if (r) {
rnum = argc-m1-o-m2;
rest = mrb_ary_new_from_values(mrb, rnum, argv+m1+o);
regs[m1+o+1] = rest;
}
if (m2 > 0 && argc-m2 > m1) {
value_move(®s[m1+o+r+1], &argv[m1+o+rnum], m2);
}
pc += o*3;
}
/* need to be update blk first to protect blk from GC */
regs[blk_pos] = blk; /* move block */
if (kd) {
if (mrb_nil_p(kdict))
kdict = mrb_hash_new_capa(mrb, 0);
regs[kw_pos] = kdict; /* set kwhash */
}
/* format arguments for generated code */
mrb->c->ci->n = (uint8_t)len;
/* clear local (but non-argument) variables */
if (irep->nlocals-blk_pos-1 > 0) {
stack_clear(®s[blk_pos+1], irep->nlocals-blk_pos-1);
}
JUMP;
}
CASE(OP_KARG, BB) {
mrb_value k = mrb_symbol_value(syms[b]);
mrb_int kidx = mrb_ci_kidx(mrb->c->ci);
mrb_value kdict, v;
if (kidx < 0 || !mrb_hash_p(kdict=regs[kidx]) || !mrb_hash_key_p(mrb, kdict, k)) {
mrb_value str = mrb_format(mrb, "missing keyword: %v", k);
mrb_exc_set(mrb, mrb_exc_new_str(mrb, E_ARGUMENT_ERROR, str));
goto L_RAISE;
}
v = mrb_hash_get(mrb, kdict, k);
regs[a] = v;
mrb_hash_delete_key(mrb, kdict, k);
NEXT;
}
CASE(OP_KEY_P, BB) {
mrb_value k = mrb_symbol_value(syms[b]);
mrb_int kidx = mrb_ci_kidx(mrb->c->ci);
mrb_value kdict;
mrb_bool key_p = FALSE;
if (kidx >= 0 && mrb_hash_p(kdict=regs[kidx])) {
key_p = mrb_hash_key_p(mrb, kdict, k);
}
regs[a] = mrb_bool_value(key_p);
NEXT;
}
CASE(OP_KEYEND, Z) {
mrb_int kidx = mrb_ci_kidx(mrb->c->ci);
mrb_value kdict;
if (kidx >= 0 && mrb_hash_p(kdict=regs[kidx]) && !mrb_hash_empty_p(mrb, kdict)) {
mrb_value keys = mrb_hash_keys(mrb, kdict);
mrb_value key1 = RARRAY_PTR(keys)[0];
mrb_value str = mrb_format(mrb, "unknown keyword: %v", key1);
mrb_exc_set(mrb, mrb_exc_new_str(mrb, E_ARGUMENT_ERROR, str));
goto L_RAISE;
}
NEXT;
}
CASE(OP_BREAK, B) {
c = OP_R_BREAK;
goto L_RETURN;
}
CASE(OP_RETURN_BLK, B) {
c = OP_R_RETURN;
goto L_RETURN;
}
CASE(OP_RETURN, B)
c = OP_R_NORMAL;
L_RETURN:
{
mrb_callinfo *ci;
ci = mrb->c->ci;
if (ci->mid) {
mrb_value blk = regs[mrb_ci_bidx(ci)];
if (mrb_proc_p(blk)) {
struct RProc *p = mrb_proc_ptr(blk);
if (!MRB_PROC_STRICT_P(p) &&
ci > mrb->c->cibase && MRB_PROC_ENV(p) == mrb_vm_ci_env(&ci[-1])) {
p->flags |= MRB_PROC_ORPHAN;
}
}
}
if (mrb->exc) {
L_RAISE:
ci = mrb->c->ci;
if (ci == mrb->c->cibase) {
ch = catch_handler_find(mrb, ci, pc, MRB_CATCH_FILTER_ALL);
if (ch == NULL) goto L_FTOP;
goto L_CATCH;
}
while ((ch = catch_handler_find(mrb, ci, pc, MRB_CATCH_FILTER_ALL)) == NULL) {
ci = cipop(mrb);
if (ci[1].cci == CINFO_SKIP && prev_jmp) {
mrb->jmp = prev_jmp;
MRB_THROW(prev_jmp);
}
pc = ci[0].pc;
if (ci == mrb->c->cibase) {
ch = catch_handler_find(mrb, ci, pc, MRB_CATCH_FILTER_ALL);
if (ch == NULL) {
L_FTOP: /* fiber top */
if (mrb->c == mrb->root_c) {
mrb->c->ci->stack = mrb->c->stbase;
goto L_STOP;
}
else {
struct mrb_context *c = mrb->c;
c->status = MRB_FIBER_TERMINATED;
mrb->c = c->prev;
c->prev = NULL;
goto L_RAISE;
}
}
break;
}
}
L_CATCH:
if (ch == NULL) goto L_STOP;
if (FALSE) {
L_CATCH_TAGGED_BREAK: /* from THROW_TAGGED_BREAK() or UNWIND_ENSURE() */
ci = mrb->c->ci;
}
proc = ci->proc;
irep = proc->body.irep;
pool = irep->pool;
syms = irep->syms;
mrb_stack_extend(mrb, irep->nregs);
pc = irep->iseq + mrb_irep_catch_handler_unpack(ch->target);
}
else {
mrb_int acc;
mrb_value v;
ci = mrb->c->ci;
v = regs[a];
mrb_gc_protect(mrb, v);
switch (c) {
case OP_R_RETURN:
/* Fall through to OP_R_NORMAL otherwise */
if (ci->cci == CINFO_NONE && MRB_PROC_ENV_P(proc) && !MRB_PROC_STRICT_P(proc)) {
const struct RProc *dst;
mrb_callinfo *cibase;
cibase = mrb->c->cibase;
dst = top_proc(mrb, proc);
if (MRB_PROC_ENV_P(dst)) {
struct REnv *e = MRB_PROC_ENV(dst);
if (!MRB_ENV_ONSTACK_P(e) || (e->cxt && e->cxt != mrb->c)) {
localjump_error(mrb, LOCALJUMP_ERROR_RETURN);
goto L_RAISE;
}
}
/* check jump destination */
while (cibase <= ci && ci->proc != dst) {
if (ci->cci > CINFO_NONE) { /* jump cross C boundary */
localjump_error(mrb, LOCALJUMP_ERROR_RETURN);
goto L_RAISE;
}
ci--;
}
if (ci <= cibase) { /* no jump destination */
localjump_error(mrb, LOCALJUMP_ERROR_RETURN);
goto L_RAISE;
}
ci = mrb->c->ci;
while (cibase <= ci && ci->proc != dst) {
CHECKPOINT_RESTORE(RBREAK_TAG_RETURN_BLOCK) {
cibase = mrb->c->cibase;
dst = top_proc(mrb, proc);
}
CHECKPOINT_MAIN(RBREAK_TAG_RETURN_BLOCK) {
UNWIND_ENSURE(mrb, ci, pc, RBREAK_TAG_RETURN_BLOCK, proc, v);
}
CHECKPOINT_END(RBREAK_TAG_RETURN_BLOCK);
ci = cipop(mrb);
pc = ci->pc;
}
proc = ci->proc;
mrb->exc = NULL; /* clear break object */
break;
}
/* fallthrough */
case OP_R_NORMAL:
NORMAL_RETURN:
if (ci == mrb->c->cibase) {
struct mrb_context *c;
c = mrb->c;
if (!c->prev) { /* toplevel return */
regs[irep->nlocals] = v;
goto CHECKPOINT_LABEL_MAKE(RBREAK_TAG_STOP);
}
if (!c->vmexec && c->prev->ci == c->prev->cibase) {
mrb_value exc = mrb_exc_new_lit(mrb, E_FIBER_ERROR, "double resume");
mrb_exc_set(mrb, exc);
goto L_RAISE;
}
CHECKPOINT_RESTORE(RBREAK_TAG_RETURN_TOPLEVEL) {
c = mrb->c;
}
CHECKPOINT_MAIN(RBREAK_TAG_RETURN_TOPLEVEL) {
UNWIND_ENSURE(mrb, ci, pc, RBREAK_TAG_RETURN_TOPLEVEL, proc, v);
}
CHECKPOINT_END(RBREAK_TAG_RETURN_TOPLEVEL);
/* automatic yield at the end */
c->status = MRB_FIBER_TERMINATED;
mrb->c = c->prev;
mrb->c->status = MRB_FIBER_RUNNING;
c->prev = NULL;
if (c->vmexec) {
mrb_gc_arena_restore(mrb, ai);
c->vmexec = FALSE;
mrb->jmp = prev_jmp;
return v;
}
ci = mrb->c->ci;
}
CHECKPOINT_RESTORE(RBREAK_TAG_RETURN) {
/* do nothing */
}
CHECKPOINT_MAIN(RBREAK_TAG_RETURN) {
UNWIND_ENSURE(mrb, ci, pc, RBREAK_TAG_RETURN, proc, v);
}
CHECKPOINT_END(RBREAK_TAG_RETURN);
mrb->exc = NULL; /* clear break object */
break;
case OP_R_BREAK:
if (MRB_PROC_STRICT_P(proc)) goto NORMAL_RETURN;
if (MRB_PROC_ORPHAN_P(proc)) {
mrb_value exc;
L_BREAK_ERROR:
exc = mrb_exc_new_lit(mrb, E_LOCALJUMP_ERROR,
"break from proc-closure");
mrb_exc_set(mrb, exc);
goto L_RAISE;
}
if (!MRB_PROC_ENV_P(proc) || !MRB_ENV_ONSTACK_P(MRB_PROC_ENV(proc))) {
goto L_BREAK_ERROR;
}
else {
struct REnv *e = MRB_PROC_ENV(proc);
if (e->cxt != mrb->c) {
goto L_BREAK_ERROR;
}
}
CHECKPOINT_RESTORE(RBREAK_TAG_BREAK) {
/* do nothing */
}
CHECKPOINT_MAIN(RBREAK_TAG_BREAK) {
UNWIND_ENSURE(mrb, ci, pc, RBREAK_TAG_BREAK, proc, v);
}
CHECKPOINT_END(RBREAK_TAG_BREAK);
/* break from fiber block */
if (ci == mrb->c->cibase && ci->pc) {
struct mrb_context *c = mrb->c;
mrb->c = c->prev;
c->prev = NULL;
ci = mrb->c->ci;
}
if (ci->cci > CINFO_NONE) {
ci = cipop(mrb);
mrb->exc = (struct RObject*)break_new(mrb, RBREAK_TAG_BREAK, proc, v);
mrb_gc_arena_restore(mrb, ai);
mrb->c->vmexec = FALSE;
mrb->jmp = prev_jmp;
MRB_THROW(prev_jmp);
}
if (FALSE) {
struct RBreak *brk;
L_BREAK:
brk = (struct RBreak*)mrb->exc;
proc = mrb_break_proc_get(brk);
v = mrb_break_value_get(brk);
ci = mrb->c->ci;
switch (mrb_break_tag_get(brk)) {
#define DISPATCH_CHECKPOINTS(n, i) case n: goto CHECKPOINT_LABEL_MAKE(n);
RBREAK_TAG_FOREACH(DISPATCH_CHECKPOINTS)
#undef DISPATCH_CHECKPOINTS
default:
mrb_assert(!"wrong break tag");
}
}
while (mrb->c->cibase < ci && ci[-1].proc != proc->upper) {
if (ci[-1].cci == CINFO_SKIP) {
goto L_BREAK_ERROR;
}
CHECKPOINT_RESTORE(RBREAK_TAG_BREAK_UPPER) {
/* do nothing */
}
CHECKPOINT_MAIN(RBREAK_TAG_BREAK_UPPER) {
UNWIND_ENSURE(mrb, ci, pc, RBREAK_TAG_BREAK_UPPER, proc, v);
}
CHECKPOINT_END(RBREAK_TAG_BREAK_UPPER);
ci = cipop(mrb);
pc = ci->pc;
}
CHECKPOINT_RESTORE(RBREAK_TAG_BREAK_INTARGET) {
/* do nothing */
}
CHECKPOINT_MAIN(RBREAK_TAG_BREAK_INTARGET) {
UNWIND_ENSURE(mrb, ci, pc, RBREAK_TAG_BREAK_INTARGET, proc, v);
}
CHECKPOINT_END(RBREAK_TAG_BREAK_INTARGET);
if (ci == mrb->c->cibase) {
goto L_BREAK_ERROR;
}
mrb->exc = NULL; /* clear break object */
break;
default:
/* cannot happen */
break;
}
mrb_assert(ci == mrb->c->ci);
mrb_assert(mrb->exc == NULL);
if (mrb->c->vmexec && !mrb_vm_ci_target_class(ci)) {
mrb_gc_arena_restore(mrb, ai);
mrb->c->vmexec = FALSE;
mrb->jmp = prev_jmp;
return v;
}
acc = ci->cci;
ci = cipop(mrb);
if (acc == CINFO_SKIP || acc == CINFO_DIRECT) {
mrb_gc_arena_restore(mrb, ai);
mrb->jmp = prev_jmp;
return v;
}
pc = ci->pc;
DEBUG(fprintf(stderr, "from :%s\n", mrb_sym_name(mrb, ci->mid)));
proc = ci->proc;
irep = proc->body.irep;
pool = irep->pool;
syms = irep->syms;
ci[1].stack[0] = v;
mrb_gc_arena_restore(mrb, ai);
}
JUMP;
}
CASE(OP_BLKPUSH, BS) {
int m1 = (b>>11)&0x3f;
int r = (b>>10)&0x1;
int m2 = (b>>5)&0x1f;
int kd = (b>>4)&0x1;
int lv = (b>>0)&0xf;
mrb_value *stack;
if (lv == 0) stack = regs + 1;
else {
struct REnv *e = uvenv(mrb, lv-1);
if (!e || (!MRB_ENV_ONSTACK_P(e) && e->mid == 0) ||
MRB_ENV_LEN(e) <= m1+r+m2+1) {
localjump_error(mrb, LOCALJUMP_ERROR_YIELD);
goto L_RAISE;
}
stack = e->stack + 1;
}
if (mrb_nil_p(stack[m1+r+m2+kd])) {
localjump_error(mrb, LOCALJUMP_ERROR_YIELD);
goto L_RAISE;
}
regs[a] = stack[m1+r+m2+kd];
NEXT;
}
#if !defined(MRB_USE_BIGINT) || defined(MRB_INT32)
L_INT_OVERFLOW:
{
mrb_value exc = mrb_exc_new_lit(mrb, E_RANGE_ERROR, "integer overflow");
mrb_exc_set(mrb, exc);
}
goto L_RAISE;
#endif
#define TYPES2(a,b) ((((uint16_t)(a))<<8)|(((uint16_t)(b))&0xff))
#define OP_MATH(op_name) \
/* need to check if op is overridden */ \
switch (TYPES2(mrb_type(regs[a]),mrb_type(regs[a+1]))) { \
OP_MATH_CASE_INTEGER(op_name); \
OP_MATH_CASE_FLOAT(op_name, integer, float); \
OP_MATH_CASE_FLOAT(op_name, float, integer); \
OP_MATH_CASE_FLOAT(op_name, float, float); \
OP_MATH_CASE_STRING_##op_name(); \
default: \
mid = MRB_OPSYM(op_name); \
goto L_SEND_SYM; \
} \
NEXT;
#define OP_MATH_CASE_INTEGER(op_name) \
case TYPES2(MRB_TT_INTEGER, MRB_TT_INTEGER): \
{ \
mrb_int x = mrb_integer(regs[a]), y = mrb_integer(regs[a+1]), z; \
if (mrb_int_##op_name##_overflow(x, y, &z)) { \
OP_MATH_OVERFLOW_INT(op_name,x,y); \
} \
else \
SET_INT_VALUE(mrb,regs[a], z); \
} \
break
#ifdef MRB_NO_FLOAT
#define OP_MATH_CASE_FLOAT(op_name, t1, t2) (void)0
#else
#define OP_MATH_CASE_FLOAT(op_name, t1, t2) \
case TYPES2(OP_MATH_TT_##t1, OP_MATH_TT_##t2): \
{ \
mrb_float z = mrb_##t1(regs[a]) OP_MATH_OP_##op_name mrb_##t2(regs[a+1]); \
SET_FLOAT_VALUE(mrb, regs[a], z); \
} \
break
#endif
#ifdef MRB_USE_BIGINT
#define OP_MATH_OVERFLOW_INT(op,x,y) regs[a] = mrb_bint_##op##_ii(mrb,x,y)
#else
#define OP_MATH_OVERFLOW_INT(op,x,y) goto L_INT_OVERFLOW
#endif
#define OP_MATH_CASE_STRING_add() \
case TYPES2(MRB_TT_STRING, MRB_TT_STRING): \
regs[a] = mrb_str_plus(mrb, regs[a], regs[a+1]); \
mrb_gc_arena_restore(mrb, ai); \
break
#define OP_MATH_CASE_STRING_sub() (void)0
#define OP_MATH_CASE_STRING_mul() (void)0
#define OP_MATH_OP_add +
#define OP_MATH_OP_sub -
#define OP_MATH_OP_mul *
#define OP_MATH_TT_integer MRB_TT_INTEGER
#define OP_MATH_TT_float MRB_TT_FLOAT
CASE(OP_ADD, B) {
OP_MATH(add);
}
CASE(OP_SUB, B) {
OP_MATH(sub);
}
CASE(OP_MUL, B) {
OP_MATH(mul);
}
CASE(OP_DIV, B) {
#ifndef MRB_NO_FLOAT
mrb_float x, y, f;
#endif
/* need to check if op is overridden */
switch (TYPES2(mrb_type(regs[a]),mrb_type(regs[a+1]))) {
case TYPES2(MRB_TT_INTEGER,MRB_TT_INTEGER):
{
mrb_int x = mrb_integer(regs[a]);
mrb_int y = mrb_integer(regs[a+1]);
mrb_int div = mrb_div_int(mrb, x, y);
SET_INT_VALUE(mrb, regs[a], div);
}
NEXT;
#ifndef MRB_NO_FLOAT
case TYPES2(MRB_TT_INTEGER,MRB_TT_FLOAT):
x = (mrb_float)mrb_integer(regs[a]);
y = mrb_float(regs[a+1]);
break;
case TYPES2(MRB_TT_FLOAT,MRB_TT_INTEGER):
x = mrb_float(regs[a]);
y = (mrb_float)mrb_integer(regs[a+1]);
break;
case TYPES2(MRB_TT_FLOAT,MRB_TT_FLOAT):
x = mrb_float(regs[a]);
y = mrb_float(regs[a+1]);
break;
#endif
default:
mid = MRB_OPSYM(div);
goto L_SEND_SYM;
}
#ifndef MRB_NO_FLOAT
f = mrb_div_float(x, y);
SET_FLOAT_VALUE(mrb, regs[a], f);
#endif
NEXT;
}
#define OP_MATHI(op_name) \
/* need to check if op is overridden */ \
switch (mrb_type(regs[a])) { \
OP_MATHI_CASE_INTEGER(op_name); \
OP_MATHI_CASE_FLOAT(op_name); \
default: \
SET_INT_VALUE(mrb,regs[a+1], b); \
mid = MRB_OPSYM(op_name); \
goto L_SEND_SYM; \
} \
NEXT;
#define OP_MATHI_CASE_INTEGER(op_name) \
case MRB_TT_INTEGER: \
{ \
mrb_int x = mrb_integer(regs[a]), y = (mrb_int)b, z; \
if (mrb_int_##op_name##_overflow(x, y, &z)) { \
OP_MATH_OVERFLOW_INT(op_name,x,y); \
} \
else \
SET_INT_VALUE(mrb,regs[a], z); \
} \
break
#ifdef MRB_NO_FLOAT
#define OP_MATHI_CASE_FLOAT(op_name) (void)0
#else
#define OP_MATHI_CASE_FLOAT(op_name) \
case MRB_TT_FLOAT: \
{ \
mrb_float z = mrb_float(regs[a]) OP_MATH_OP_##op_name b; \
SET_FLOAT_VALUE(mrb, regs[a], z); \
} \
break
#endif
CASE(OP_ADDI, BB) {
OP_MATHI(add);
}
CASE(OP_SUBI, BB) {
OP_MATHI(sub);
}
#define OP_CMP_BODY(op,v1,v2) (v1(regs[a]) op v2(regs[a+1]))
#ifdef MRB_NO_FLOAT
#define OP_CMP(op,sym) do {\
int result;\
/* need to check if - is overridden */\
switch (TYPES2(mrb_type(regs[a]),mrb_type(regs[a+1]))) {\
case TYPES2(MRB_TT_INTEGER,MRB_TT_INTEGER):\
result = OP_CMP_BODY(op,mrb_fixnum,mrb_fixnum);\
break;\
default:\
mid = MRB_OPSYM(sym);\
goto L_SEND_SYM;\
}\
if (result) {\
SET_TRUE_VALUE(regs[a]);\
}\
else {\
SET_FALSE_VALUE(regs[a]);\
}\
} while(0)
#else
#define OP_CMP(op, sym) do {\
int result;\
/* need to check if - is overridden */\
switch (TYPES2(mrb_type(regs[a]),mrb_type(regs[a+1]))) {\
case TYPES2(MRB_TT_INTEGER,MRB_TT_INTEGER):\
result = OP_CMP_BODY(op,mrb_fixnum,mrb_fixnum);\
break;\
case TYPES2(MRB_TT_INTEGER,MRB_TT_FLOAT):\
result = OP_CMP_BODY(op,mrb_fixnum,mrb_float);\
break;\
case TYPES2(MRB_TT_FLOAT,MRB_TT_INTEGER):\
result = OP_CMP_BODY(op,mrb_float,mrb_fixnum);\
break;\
case TYPES2(MRB_TT_FLOAT,MRB_TT_FLOAT):\
result = OP_CMP_BODY(op,mrb_float,mrb_float);\
break;\
default:\
mid = MRB_OPSYM(sym);\
goto L_SEND_SYM;\
}\
if (result) {\
SET_TRUE_VALUE(regs[a]);\
}\
else {\
SET_FALSE_VALUE(regs[a]);\
}\
} while(0)
#endif
CASE(OP_EQ, B) {
if (mrb_obj_eq(mrb, regs[a], regs[a+1])) {
SET_TRUE_VALUE(regs[a]);
}
else {
OP_CMP(==,eq);
}
NEXT;
}
CASE(OP_LT, B) {
OP_CMP(<,lt);
NEXT;
}
CASE(OP_LE, B) {
OP_CMP(<=,le);
NEXT;
}
CASE(OP_GT, B) {
OP_CMP(>,gt);
NEXT;
}
CASE(OP_GE, B) {
OP_CMP(>=,ge);
NEXT;
}
CASE(OP_ARRAY, BB) {
regs[a] = mrb_ary_new_from_values(mrb, b, ®s[a]);
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_ARRAY2, BBB) {
regs[a] = mrb_ary_new_from_values(mrb, c, ®s[b]);
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_ARYCAT, B) {
mrb_value splat = mrb_ary_splat(mrb, regs[a+1]);
if (mrb_nil_p(regs[a])) {
regs[a] = splat;
}
else {
mrb_assert(mrb_array_p(regs[a]));
mrb_ary_concat(mrb, regs[a], splat);
}
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_ARYPUSH, BB) {
mrb_assert(mrb_array_p(regs[a]));
for (mrb_int i=0; i<b; i++) {
mrb_ary_push(mrb, regs[a], regs[a+i+1]);
}
NEXT;
}
CASE(OP_ARYDUP, B) {
mrb_value ary = regs[a];
if (mrb_array_p(ary)) {
ary = mrb_ary_new_from_values(mrb, RARRAY_LEN(ary), RARRAY_PTR(ary));
}
else {
ary = mrb_ary_new_from_values(mrb, 1, &ary);
}
regs[a] = ary;
NEXT;
}
CASE(OP_AREF, BBB) {
mrb_value v = regs[b];
if (!mrb_array_p(v)) {
if (c == 0) {
regs[a] = v;
}
else {
SET_NIL_VALUE(regs[a]);
}
}
else {
v = mrb_ary_ref(mrb, v, c);
regs[a] = v;
}
NEXT;
}
CASE(OP_ASET, BBB) {
mrb_assert(mrb_array_p(regs[a]));
mrb_ary_set(mrb, regs[b], c, regs[a]);
NEXT;
}
CASE(OP_APOST, BBB) {
mrb_value v = regs[a];
int pre = b;
int post = c;
struct RArray *ary;
int len, idx;
if (!mrb_array_p(v)) {
v = mrb_ary_new_from_values(mrb, 1, ®s[a]);
}
ary = mrb_ary_ptr(v);
len = (int)ARY_LEN(ary);
if (len > pre + post) {
v = mrb_ary_new_from_values(mrb, len - pre - post, ARY_PTR(ary)+pre);
regs[a++] = v;
while (post--) {
regs[a++] = ARY_PTR(ary)[len-post-1];
}
}
else {
v = mrb_ary_new_capa(mrb, 0);
regs[a++] = v;
for (idx=0; idx+pre<len; idx++) {
regs[a+idx] = ARY_PTR(ary)[pre+idx];
}
while (idx < post) {
SET_NIL_VALUE(regs[a+idx]);
idx++;
}
}
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_INTERN, B) {
mrb_assert(mrb_string_p(regs[a]));
mrb_sym sym = mrb_intern_str(mrb, regs[a]);
regs[a] = mrb_symbol_value(sym);
NEXT;
}
CASE(OP_SYMBOL, BB) {
size_t len;
mrb_sym sym;
mrb_assert((pool[b].tt&IREP_TT_NFLAG)==0);
len = pool[b].tt >> 2;
if (pool[b].tt & IREP_TT_SFLAG) {
sym = mrb_intern_static(mrb, pool[b].u.str, len);
}
else {
sym = mrb_intern(mrb, pool[b].u.str, len);
}
regs[a] = mrb_symbol_value(sym);
NEXT;
}
CASE(OP_STRING, BB) {
mrb_int len;
mrb_assert((pool[b].tt&IREP_TT_NFLAG)==0);
len = pool[b].tt >> 2;
if (pool[b].tt & IREP_TT_SFLAG) {
regs[a] = mrb_str_new_static(mrb, pool[b].u.str, len);
}
else {
regs[a] = mrb_str_new(mrb, pool[b].u.str, len);
}
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_STRCAT, B) {
mrb_assert(mrb_string_p(regs[a]));
mrb_str_concat(mrb, regs[a], regs[a+1]);
NEXT;
}
CASE(OP_HASH, BB) {
mrb_value hash = mrb_hash_new_capa(mrb, b);
int i;
int lim = a+b*2;
for (i=a; i<lim; i+=2) {
mrb_hash_set(mrb, hash, regs[i], regs[i+1]);
}
regs[a] = hash;
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_HASHADD, BB) {
mrb_value hash;
int i;
int lim = a+b*2+1;
hash = regs[a];
mrb_ensure_hash_type(mrb, hash);
for (i=a+1; i<lim; i+=2) {
mrb_hash_set(mrb, hash, regs[i], regs[i+1]);
}
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_HASHCAT, B) {
mrb_value hash = regs[a];
mrb_assert(mrb_hash_p(hash));
mrb_hash_merge(mrb, hash, regs[a+1]);
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_LAMBDA, BB)
c = OP_L_LAMBDA;
L_MAKE_LAMBDA:
{
struct RProc *p;
const mrb_irep *nirep = irep->reps[b];
if (c & OP_L_CAPTURE) {
p = mrb_closure_new(mrb, nirep);
}
else {
p = mrb_proc_new(mrb, nirep);
p->flags |= MRB_PROC_SCOPE;
}
if (c & OP_L_STRICT) p->flags |= MRB_PROC_STRICT;
regs[a] = mrb_obj_value(p);
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_BLOCK, BB) {
c = OP_L_BLOCK;
goto L_MAKE_LAMBDA;
}
CASE(OP_METHOD, BB) {
c = OP_L_METHOD;
goto L_MAKE_LAMBDA;
}
CASE(OP_RANGE_INC, B) {
mrb_value v = mrb_range_new(mrb, regs[a], regs[a+1], FALSE);
regs[a] = v;
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_RANGE_EXC, B) {
mrb_value v = mrb_range_new(mrb, regs[a], regs[a+1], TRUE);
regs[a] = v;
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_OCLASS, B) {
regs[a] = mrb_obj_value(mrb->object_class);
NEXT;
}
CASE(OP_CLASS, BB) {
struct RClass *c = 0, *baseclass;
mrb_value base, super;
mrb_sym id = syms[b];
base = regs[a];
super = regs[a+1];
if (mrb_nil_p(base)) {
baseclass = MRB_PROC_TARGET_CLASS(mrb->c->ci->proc);
if (!baseclass) baseclass = mrb->object_class;
base = mrb_obj_value(baseclass);
}
c = mrb_vm_define_class(mrb, base, super, id);
regs[a] = mrb_obj_value(c);
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_MODULE, BB) {
struct RClass *cls = 0, *baseclass;
mrb_value base;
mrb_sym id = syms[b];
base = regs[a];
if (mrb_nil_p(base)) {
baseclass = MRB_PROC_TARGET_CLASS(mrb->c->ci->proc);
if (!baseclass) baseclass = mrb->object_class;
base = mrb_obj_value(baseclass);
}
cls = mrb_vm_define_module(mrb, base, id);
regs[a] = mrb_obj_value(cls);
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_EXEC, BB)
{
mrb_value recv = regs[a];
struct RProc *p;
const mrb_irep *nirep = irep->reps[b];
/* prepare closure */
p = mrb_proc_new(mrb, nirep);
p->c = NULL;
mrb_field_write_barrier(mrb, (struct RBasic*)p, (struct RBasic*)proc);
MRB_PROC_SET_TARGET_CLASS(p, mrb_class_ptr(recv));
p->flags |= MRB_PROC_SCOPE;
/* prepare call stack */
cipush(mrb, a, 0, mrb_class_ptr(recv), p, 0, 0);
irep = p->body.irep;
pool = irep->pool;
syms = irep->syms;
mrb_stack_extend(mrb, irep->nregs);
stack_clear(regs+1, irep->nregs-1);
pc = irep->iseq;
JUMP;
}
CASE(OP_DEF, BB) {
struct RClass *target = mrb_class_ptr(regs[a]);
struct RProc *p = mrb_proc_ptr(regs[a+1]);
mrb_method_t m;
mrb_sym mid = syms[b];
MRB_METHOD_FROM_PROC(m, p);
mrb_define_method_raw(mrb, target, mid, m);
mrb_method_added(mrb, target, mid);
mrb_gc_arena_restore(mrb, ai);
regs[a] = mrb_symbol_value(mid);
NEXT;
}
CASE(OP_SCLASS, B) {
regs[a] = mrb_singleton_class(mrb, regs[a]);
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_TCLASS, B) {
struct RClass *target = check_target_class(mrb);
if (!target) goto L_RAISE;
regs[a] = mrb_obj_value(target);
NEXT;
}
CASE(OP_ALIAS, BB) {
struct RClass *target = check_target_class(mrb);
if (!target) goto L_RAISE;
mrb_alias_method(mrb, target, syms[a], syms[b]);
mrb_method_added(mrb, target, syms[a]);
NEXT;
}
CASE(OP_UNDEF, B) {
struct RClass *target = check_target_class(mrb);
if (!target) goto L_RAISE;
mrb_undef_method_id(mrb, target, syms[a]);
NEXT;
}
CASE(OP_DEBUG, Z) {
FETCH_BBB();
#ifdef MRB_USE_DEBUG_HOOK
mrb->debug_op_hook(mrb, irep, pc, regs);
#else
#ifndef MRB_NO_STDIO
printf("OP_DEBUG %d %d %d\n", a, b, c);
#else
abort();
#endif
#endif
NEXT;
}
CASE(OP_ERR, B) {
size_t len = pool[a].tt >> 2;
mrb_value exc;
mrb_assert((pool[a].tt&IREP_TT_NFLAG)==0);
exc = mrb_exc_new(mrb, E_LOCALJUMP_ERROR, pool[a].u.str, len);
mrb_exc_set(mrb, exc);
goto L_RAISE;
}
CASE(OP_EXT1, Z) {
insn = READ_B();
switch (insn) {
#define OPCODE(insn,ops) case OP_ ## insn: FETCH_ ## ops ## _1(); mrb->c->ci->pc = pc; goto L_OP_ ## insn ## _BODY;
#include "mruby/ops.h"
#undef OPCODE
}
pc--;
NEXT;
}
CASE(OP_EXT2, Z) {
insn = READ_B();
switch (insn) {
#define OPCODE(insn,ops) case OP_ ## insn: FETCH_ ## ops ## _2(); mrb->c->ci->pc = pc; goto L_OP_ ## insn ## _BODY;
#include "mruby/ops.h"
#undef OPCODE
}
pc--;
NEXT;
}
CASE(OP_EXT3, Z) {
uint8_t insn = READ_B();
switch (insn) {
#define OPCODE(insn,ops) case OP_ ## insn: FETCH_ ## ops ## _3(); mrb->c->ci->pc = pc; goto L_OP_ ## insn ## _BODY;
#include "mruby/ops.h"
#undef OPCODE
}
pc--;
NEXT;
}
CASE(OP_STOP, Z) {
/* stop VM */
CHECKPOINT_RESTORE(RBREAK_TAG_STOP) {
/* do nothing */
}
CHECKPOINT_MAIN(RBREAK_TAG_STOP) {
UNWIND_ENSURE(mrb, mrb->c->ci, pc, RBREAK_TAG_STOP, proc, mrb_nil_value());
}
CHECKPOINT_END(RBREAK_TAG_STOP);
L_STOP:
mrb->jmp = prev_jmp;
if (mrb->exc) {
mrb_assert(mrb->exc->tt == MRB_TT_EXCEPTION);
return mrb_obj_value(mrb->exc);
}
return regs[irep->nlocals];
}
}
END_DISPATCH;
#undef regs
}
MRB_CATCH(&c_jmp) {
mrb_callinfo *ci = mrb->c->ci;
while (ci > mrb->c->cibase && ci->cci == CINFO_DIRECT) {
ci = cipop(mrb);
}
exc_catched = TRUE;
pc = ci->pc;
goto RETRY_TRY_BLOCK;
}
MRB_END_EXC(&c_jmp);
}
| 1
|
293,532
|
PJ_DEF(int) pj_scan_peek_n( pj_scanner *scanner,
pj_size_t len, pj_str_t *out)
{
char *endpos = scanner->curptr + len;
if (endpos > scanner->end) {
pj_scan_syntax_err(scanner);
return -1;
}
pj_strset(out, scanner->curptr, len);
return *endpos;
}
| 0
|
247,739
|
TestUtilOptions& setExpectedSha1Digest(const std::string& expected_sha1_digest) {
expected_sha1_digest_ = expected_sha1_digest;
return *this;
}
| 0
|
361,763
|
static void em28xx_check_usb_descriptor(struct em28xx *dev,
struct usb_device *udev,
struct usb_interface *intf,
int alt, int ep,
bool *has_vendor_audio,
bool *has_video,
bool *has_dvb)
{
const struct usb_endpoint_descriptor *e;
int sizedescr, size;
/*
* NOTE:
*
* Old logic with support for isoc transfers only was:
* 0x82 isoc => analog
* 0x83 isoc => audio
* 0x84 isoc => digital
*
* New logic with support for bulk transfers
* 0x82 isoc => analog
* 0x82 bulk => analog
* 0x83 isoc* => audio
* 0x84 isoc => digital
* 0x84 bulk => analog or digital**
* 0x85 isoc => digital TS2
* 0x85 bulk => digital TS2
* (*: audio should always be isoc)
* (**: analog, if ep 0x82 is isoc, otherwise digital)
*
* The new logic preserves backwards compatibility and
* reflects the endpoint configurations we have seen
* so far. But there might be devices for which this
* logic is not sufficient...
*/
e = &intf->altsetting[alt].endpoint[ep].desc;
if (!usb_endpoint_dir_in(e))
return;
sizedescr = le16_to_cpu(e->wMaxPacketSize);
size = sizedescr & 0x7ff;
if (udev->speed == USB_SPEED_HIGH)
size = size * hb_mult(sizedescr);
/* Only inspect input endpoints */
switch (e->bEndpointAddress) {
case 0x82:
*has_video = true;
if (usb_endpoint_xfer_isoc(e)) {
dev->analog_ep_isoc = e->bEndpointAddress;
dev->alt_max_pkt_size_isoc[alt] = size;
} else if (usb_endpoint_xfer_bulk(e)) {
dev->analog_ep_bulk = e->bEndpointAddress;
}
return;
case 0x83:
if (usb_endpoint_xfer_isoc(e))
*has_vendor_audio = true;
else
dev_err(&intf->dev,
"error: skipping audio endpoint 0x83, because it uses bulk transfers !\n");
return;
case 0x84:
if (*has_video && (usb_endpoint_xfer_bulk(e))) {
dev->analog_ep_bulk = e->bEndpointAddress;
} else {
if (usb_endpoint_xfer_isoc(e)) {
if (size > dev->dvb_max_pkt_size_isoc) {
/*
* 2) some manufacturers (e.g. Terratec)
* disable endpoints by setting
* wMaxPacketSize to 0 bytes for all
* alt settings. So far, we've seen
* this for DVB isoc endpoints only.
*/
*has_dvb = true;
dev->dvb_ep_isoc = e->bEndpointAddress;
dev->dvb_max_pkt_size_isoc = size;
dev->dvb_alt_isoc = alt;
}
} else {
*has_dvb = true;
dev->dvb_ep_bulk = e->bEndpointAddress;
}
}
return;
case 0x85:
if (usb_endpoint_xfer_isoc(e)) {
if (size > dev->dvb_max_pkt_size_isoc_ts2) {
dev->dvb_ep_isoc_ts2 = e->bEndpointAddress;
dev->dvb_max_pkt_size_isoc_ts2 = size;
dev->dvb_alt_isoc = alt;
}
} else {
dev->dvb_ep_bulk_ts2 = e->bEndpointAddress;
}
return;
}
}
| 0
|
384,804
|
transchar_hex(char_u *buf, int c)
{
int i = 0;
buf[0] = '<';
if (c > 255)
{
buf[++i] = nr2hex((unsigned)c >> 12);
buf[++i] = nr2hex((unsigned)c >> 8);
}
buf[++i] = nr2hex((unsigned)c >> 4);
buf[++i] = nr2hex((unsigned)c);
buf[++i] = '>';
buf[++i] = NUL;
}
| 0
|
300,807
|
int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct netlink_callback *cb,
struct tipc_sock *tsk, u32 sk_filter_state,
u64 (*tipc_diag_gen_cookie)(struct sock *sk))
{
struct sock *sk = &tsk->sk;
struct nlattr *attrs;
struct nlattr *stat;
/*filter response w.r.t sk_state*/
if (!(sk_filter_state & (1 << sk->sk_state)))
return 0;
attrs = nla_nest_start_noflag(skb, TIPC_NLA_SOCK);
if (!attrs)
goto msg_cancel;
if (__tipc_nl_add_sk_info(skb, tsk))
goto attr_msg_cancel;
if (nla_put_u32(skb, TIPC_NLA_SOCK_TYPE, (u32)sk->sk_type) ||
nla_put_u32(skb, TIPC_NLA_SOCK_TIPC_STATE, (u32)sk->sk_state) ||
nla_put_u32(skb, TIPC_NLA_SOCK_INO, sock_i_ino(sk)) ||
nla_put_u32(skb, TIPC_NLA_SOCK_UID,
from_kuid_munged(sk_user_ns(NETLINK_CB(cb->skb).sk),
sock_i_uid(sk))) ||
nla_put_u64_64bit(skb, TIPC_NLA_SOCK_COOKIE,
tipc_diag_gen_cookie(sk),
TIPC_NLA_SOCK_PAD))
goto attr_msg_cancel;
stat = nla_nest_start_noflag(skb, TIPC_NLA_SOCK_STAT);
if (!stat)
goto attr_msg_cancel;
if (nla_put_u32(skb, TIPC_NLA_SOCK_STAT_RCVQ,
skb_queue_len(&sk->sk_receive_queue)) ||
nla_put_u32(skb, TIPC_NLA_SOCK_STAT_SENDQ,
skb_queue_len(&sk->sk_write_queue)) ||
nla_put_u32(skb, TIPC_NLA_SOCK_STAT_DROP,
atomic_read(&sk->sk_drops)))
goto stat_msg_cancel;
if (tsk->cong_link_cnt &&
nla_put_flag(skb, TIPC_NLA_SOCK_STAT_LINK_CONG))
goto stat_msg_cancel;
if (tsk_conn_cong(tsk) &&
nla_put_flag(skb, TIPC_NLA_SOCK_STAT_CONN_CONG))
goto stat_msg_cancel;
nla_nest_end(skb, stat);
if (tsk->group)
if (tipc_group_fill_sock_diag(tsk->group, skb))
goto stat_msg_cancel;
nla_nest_end(skb, attrs);
return 0;
stat_msg_cancel:
nla_nest_cancel(skb, stat);
attr_msg_cancel:
nla_nest_cancel(skb, attrs);
msg_cancel:
return -EMSGSIZE;
}
| 0
|
445,898
|
archive_extraction_ready_for_encryption_cb (GObject *source_object,
GAsyncResult *result,
gpointer user_data)
{
EncryptData *edata = user_data;
FrWindow *window = edata->window;
GList *list;
GError *error = NULL;
if (! fr_archive_operation_finish (FR_ARCHIVE (source_object), result, &error)) {
_encrypt_operation_completed_with_error (window, FR_ACTION_ENCRYPTING_ARCHIVE, error);
return;
}
fr_archive_action_started (window->archive, FR_ACTION_ENCRYPTING_ARCHIVE);
list = g_list_prepend (NULL, edata->temp_extraction_dir);
fr_archive_add_files (edata->new_archive,
list,
edata->temp_extraction_dir,
NULL,
FALSE,
FALSE,
edata->password,
edata->encrypt_header,
window->priv->compression,
0,
window->priv->cancellable,
archive_add_ready_for_encryption_cb,
edata);
g_list_free (list);
}
| 0
|
222,512
|
Status FunctionCallFrame::SetRetval(int index, const Tensor& val) {
if (index < 0 || static_cast<size_t>(index) >= rets_.size()) {
return errors::InvalidArgument("SetRetval ", index, " is not within [0, ",
rets_.size(), ")");
}
if (val.dtype() != ret_types_[index]) {
return errors::InvalidArgument(
"Expects ret[", index, "] to be ", DataTypeString(ret_types_[index]),
", but ", DataTypeString(val.dtype()), " is provided.");
}
Retval* item = &rets_[index];
if (!item->has_val) {
item->has_val = true;
item->val = val;
} else {
return errors::Internal("Retval[", index, "] has already been set.");
}
return Status::OK();
}
| 0
|
218,856
|
Status ImmutableExecutorState::BuildControlFlowInfo(const Graph* g,
ControlFlowInfo* cf_info) {
const int num_nodes = g->num_node_ids();
cf_info->frame_names.resize(num_nodes);
std::vector<Node*> parent_nodes;
parent_nodes.resize(num_nodes);
std::vector<bool> visited;
visited.resize(num_nodes);
string frame_name;
std::deque<Node*> ready;
// Initialize with the root nodes.
for (Node* n : g->nodes()) {
if (n->in_edges().empty()) {
visited[n->id()] = true;
cf_info->unique_frame_names.insert(frame_name);
ready.push_back(n);
}
}
while (!ready.empty()) {
Node* curr_node = ready.front();
int curr_id = curr_node->id();
ready.pop_front();
Node* parent = nullptr;
if (IsEnter(curr_node)) {
// Enter a child frame.
TF_RETURN_IF_ERROR(
GetNodeAttr(curr_node->attrs(), "frame_name", &frame_name));
parent = curr_node;
} else if (IsExit(curr_node)) {
// Exit to the parent frame.
parent = parent_nodes[curr_id];
if (!parent) {
return errors::InvalidArgument(
"Invalid Exit op: Cannot find a corresponding Enter op.");
}
frame_name = cf_info->frame_names[parent->id()];
parent = parent_nodes[parent->id()];
} else {
parent = parent_nodes[curr_id];
frame_name = cf_info->frame_names[curr_id];
}
for (const Edge* out_edge : curr_node->out_edges()) {
Node* out = out_edge->dst();
if (IsSink(out)) continue;
const int out_id = out->id();
// Add to ready queue if not visited.
bool is_visited = visited[out_id];
if (!is_visited) {
ready.push_back(out);
visited[out_id] = true;
// Process the node 'out'.
cf_info->frame_names[out_id] = frame_name;
parent_nodes[out_id] = parent;
cf_info->unique_frame_names.insert(frame_name);
}
}
}
return Status::OK();
}
| 0
|
195,965
|
void Compute(OpKernelContext* ctx) override {
const Tensor* hypothesis_indices;
const Tensor* hypothesis_values;
const Tensor* hypothesis_shape;
const Tensor* truth_indices;
const Tensor* truth_values;
const Tensor* truth_shape;
OP_REQUIRES_OK(ctx, ctx->input("hypothesis_indices", &hypothesis_indices));
OP_REQUIRES_OK(ctx, ctx->input("hypothesis_values", &hypothesis_values));
OP_REQUIRES_OK(ctx, ctx->input("hypothesis_shape", &hypothesis_shape));
OP_REQUIRES_OK(ctx, ctx->input("truth_indices", &truth_indices));
OP_REQUIRES_OK(ctx, ctx->input("truth_values", &truth_values));
OP_REQUIRES_OK(ctx, ctx->input("truth_shape", &truth_shape));
OP_REQUIRES_OK(
ctx, ValidateShapes(ctx, *hypothesis_indices, *hypothesis_values,
*hypothesis_shape, *truth_indices, *truth_values,
*truth_shape));
TensorShape hypothesis_st_shape;
OP_REQUIRES_OK(ctx,
TensorShapeUtils::MakeShape(
hypothesis_shape->vec<int64_t>().data(),
hypothesis_shape->NumElements(), &hypothesis_st_shape));
TensorShape truth_st_shape;
OP_REQUIRES_OK(ctx, TensorShapeUtils::MakeShape(
truth_shape->vec<int64_t>().data(),
truth_shape->NumElements(), &truth_st_shape));
// Assume indices are sorted in row-major order.
std::vector<int64_t> sorted_order(truth_st_shape.dims());
std::iota(sorted_order.begin(), sorted_order.end(), 0);
sparse::SparseTensor hypothesis;
OP_REQUIRES_OK(ctx, sparse::SparseTensor::Create(
*hypothesis_indices, *hypothesis_values,
hypothesis_st_shape, sorted_order, &hypothesis));
sparse::SparseTensor truth;
OP_REQUIRES_OK(ctx, sparse::SparseTensor::Create(
*truth_indices, *truth_values, truth_st_shape,
sorted_order, &truth));
// Group dims 0, 1, ..., RANK - 1. The very last dim is assumed
// to store the variable length sequences.
std::vector<int64_t> group_dims(truth_st_shape.dims() - 1);
std::iota(group_dims.begin(), group_dims.end(), 0);
TensorShape output_shape;
for (int d = 0; d < static_cast<int>(group_dims.size()); ++d) {
output_shape.AddDim(std::max(hypothesis_st_shape.dim_size(d),
truth_st_shape.dim_size(d)));
}
const auto output_elements = output_shape.num_elements();
OP_REQUIRES(
ctx, output_elements > 0,
errors::InvalidArgument("Got output shape ", output_shape.DebugString(),
" which has 0 elements"));
Tensor* output = nullptr;
OP_REQUIRES_OK(ctx, ctx->allocate_output("output", output_shape, &output));
auto output_t = output->flat<float>();
output_t.setZero();
std::vector<int64_t> output_strides(output_shape.dims());
output_strides[output_shape.dims() - 1] = 1;
for (int d = output_shape.dims() - 2; d >= 0; --d) {
output_strides[d] = output_strides[d + 1] * output_shape.dim_size(d + 1);
}
auto hypothesis_grouper = hypothesis.group(group_dims);
auto truth_grouper = truth.group(group_dims);
auto hypothesis_iter = hypothesis_grouper.begin();
auto truth_iter = truth_grouper.begin();
auto cmp = std::equal_to<T>();
while (hypothesis_iter != hypothesis_grouper.end() &&
truth_iter != truth_grouper.end()) {
sparse::Group truth_i = *truth_iter;
sparse::Group hypothesis_j = *hypothesis_iter;
std::vector<int64_t> g_truth = truth_i.group();
std::vector<int64_t> g_hypothesis = hypothesis_j.group();
auto truth_seq = truth_i.values<T>();
auto hypothesis_seq = hypothesis_j.values<T>();
if (g_truth == g_hypothesis) {
auto loc = std::inner_product(g_truth.begin(), g_truth.end(),
output_strides.begin(), int64_t{0});
OP_REQUIRES(
ctx, loc < output_elements,
errors::Internal("Got an inner product ", loc,
" which would require in writing to outside of "
"the buffer for the output tensor (max elements ",
output_elements, ")"));
output_t(loc) =
gtl::LevenshteinDistance<T>(truth_seq, hypothesis_seq, cmp);
if (normalize_) output_t(loc) /= truth_seq.size();
++hypothesis_iter;
++truth_iter;
} else if (g_truth > g_hypothesis) { // zero-length truth
auto loc = std::inner_product(g_hypothesis.begin(), g_hypothesis.end(),
output_strides.begin(), int64_t{0});
OP_REQUIRES(
ctx, loc < output_elements,
errors::Internal("Got an inner product ", loc,
" which would require in writing to outside of "
"the buffer for the output tensor (max elements ",
output_elements, ")"));
output_t(loc) = hypothesis_seq.size();
if (normalize_ && output_t(loc) != 0.0f) {
output_t(loc) = std::numeric_limits<float>::infinity();
}
++hypothesis_iter;
} else { // zero-length hypothesis
auto loc = std::inner_product(g_truth.begin(), g_truth.end(),
output_strides.begin(), int64_t{0});
OP_REQUIRES(
ctx, loc < output_elements,
errors::Internal("Got an inner product ", loc,
" which would require in writing to outside of "
"the buffer for the output tensor (max elements ",
output_elements, ")"));
output_t(loc) = (normalize_) ? 1.0 : truth_seq.size();
++truth_iter;
}
}
while (hypothesis_iter != hypothesis_grouper.end()) { // zero-length truths
sparse::Group hypothesis_j = *hypothesis_iter;
std::vector<int64_t> g_hypothesis = hypothesis_j.group();
auto hypothesis_seq = hypothesis_j.values<T>();
auto loc = std::inner_product(g_hypothesis.begin(), g_hypothesis.end(),
output_strides.begin(), int64_t{0});
OP_REQUIRES(
ctx, loc < output_elements,
errors::Internal("Got an inner product ", loc,
" which would require in writing to outside of the "
"buffer for the output tensor (max elements ",
output_elements, ")"));
output_t(loc) = hypothesis_seq.size();
if (normalize_ && output_t(loc) != 0.0f) {
output_t(loc) = std::numeric_limits<float>::infinity();
}
++hypothesis_iter;
}
while (truth_iter != truth_grouper.end()) { // missing hypotheses
sparse::Group truth_i = *truth_iter;
std::vector<int64_t> g_truth = truth_i.group();
auto truth_seq = truth_i.values<T>();
auto loc = std::inner_product(g_truth.begin(), g_truth.end(),
output_strides.begin(), int64_t{0});
OP_REQUIRES(
ctx, loc < output_elements,
errors::Internal("Got an inner product ", loc,
" which would require in writing to outside of the "
"buffer for the output tensor (max elements ",
output_elements, ")"));
output_t(loc) = (normalize_) ? 1.0 : truth_seq.size();
++truth_iter;
}
}
| 1
|
405,372
|
static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
const struct flowi *fl,
u16 family, u8 dir,
u32 if_id)
{
struct xfrm_pol_inexact_candidates cand;
const xfrm_address_t *daddr, *saddr;
struct xfrm_pol_inexact_bin *bin;
struct xfrm_policy *pol, *ret;
struct hlist_head *chain;
unsigned int sequence;
int err;
daddr = xfrm_flowi_daddr(fl, family);
saddr = xfrm_flowi_saddr(fl, family);
if (unlikely(!daddr || !saddr))
return NULL;
rcu_read_lock();
retry:
do {
sequence = read_seqcount_begin(&net->xfrm.xfrm_policy_hash_generation);
chain = policy_hash_direct(net, daddr, saddr, family, dir);
} while (read_seqcount_retry(&net->xfrm.xfrm_policy_hash_generation, sequence));
ret = NULL;
hlist_for_each_entry_rcu(pol, chain, bydst) {
err = xfrm_policy_match(pol, fl, type, family, dir, if_id);
if (err) {
if (err == -ESRCH)
continue;
else {
ret = ERR_PTR(err);
goto fail;
}
} else {
ret = pol;
break;
}
}
bin = xfrm_policy_inexact_lookup_rcu(net, type, family, dir, if_id);
if (!bin || !xfrm_policy_find_inexact_candidates(&cand, bin, saddr,
daddr))
goto skip_inexact;
pol = xfrm_policy_eval_candidates(&cand, ret, fl, type,
family, dir, if_id);
if (pol) {
ret = pol;
if (IS_ERR(pol))
goto fail;
}
skip_inexact:
if (read_seqcount_retry(&net->xfrm.xfrm_policy_hash_generation, sequence))
goto retry;
if (ret && !xfrm_pol_hold_rcu(ret))
goto retry;
fail:
rcu_read_unlock();
return ret;
}
| 0
|
275,948
|
uECC_VLI_API void uECC_vli_mmod(uECC_word_t *result,
uECC_word_t *product,
const uECC_word_t *mod,
wordcount_t num_words) {
uECC_word_t mod_multiple[2 * uECC_MAX_WORDS];
uECC_word_t tmp[2 * uECC_MAX_WORDS];
uECC_word_t *v[2] = {tmp, product};
uECC_word_t index;
/* Shift mod so its highest set bit is at the maximum position. */
bitcount_t shift = (num_words * 2 * uECC_WORD_BITS) - uECC_vli_numBits(mod, num_words);
wordcount_t word_shift = shift / uECC_WORD_BITS;
wordcount_t bit_shift = shift % uECC_WORD_BITS;
uECC_word_t carry = 0;
uECC_vli_clear(mod_multiple, word_shift);
if (bit_shift > 0) {
for(index = 0; index < (uECC_word_t)num_words; ++index) {
mod_multiple[word_shift + index] = (mod[index] << bit_shift) | carry;
carry = mod[index] >> (uECC_WORD_BITS - bit_shift);
}
} else {
uECC_vli_set(mod_multiple + word_shift, mod, num_words);
}
for (index = 1; shift >= 0; --shift) {
uECC_word_t borrow = 0;
wordcount_t i;
for (i = 0; i < num_words * 2; ++i) {
uECC_word_t diff = v[index][i] - mod_multiple[i] - borrow;
if (diff != v[index][i]) {
borrow = (diff > v[index][i]);
}
v[1 - index][i] = diff;
}
index = !(index ^ borrow); /* Swap the index if there was no borrow */
uECC_vli_rshift1(mod_multiple, num_words);
mod_multiple[num_words - 1] |= mod_multiple[num_words] << (uECC_WORD_BITS - 1);
uECC_vli_rshift1(mod_multiple + num_words, num_words);
}
uECC_vli_set(result, v[index], num_words);
}
| 0
|
313,135
|
testPathRelativePrepare(void)
{
size_t i;
for (i = 0; i < G_N_ELEMENTS(backingchain); i++) {
backingchain[i].type = VIR_STORAGE_TYPE_FILE;
if (i < G_N_ELEMENTS(backingchain) - 1)
backingchain[i].backingStore = &backingchain[i + 1];
else
backingchain[i].backingStore = NULL;
backingchain[i].relPath = NULL;
}
/* normal relative backing chain */
backingchain[0].path = (char *) "/path/to/some/img";
backingchain[1].path = (char *) "/path/to/some/asdf";
backingchain[1].relPath = (char *) "asdf";
backingchain[2].path = (char *) "/path/to/some/test";
backingchain[2].relPath = (char *) "test";
backingchain[3].path = (char *) "/path/to/some/blah";
backingchain[3].relPath = (char *) "blah";
/* ovirt's backing chain */
backingchain[4].path = (char *) "/path/to/volume/image1";
backingchain[5].path = (char *) "/path/to/volume/image2";
backingchain[5].relPath = (char *) "../volume/image2";
backingchain[6].path = (char *) "/path/to/volume/image3";
backingchain[6].relPath = (char *) "../volume/image3";
backingchain[7].path = (char *) "/path/to/volume/image4";
backingchain[7].relPath = (char *) "../volume/image4";
/* some arbitrarily crazy backing chains */
backingchain[8].path = (char *) "/crazy/base/image";
backingchain[9].path = (char *) "/crazy/base/directory/stuff/volumes/garbage/image2";
backingchain[9].relPath = (char *) "directory/stuff/volumes/garbage/image2";
backingchain[10].path = (char *) "/crazy/base/directory/image3";
backingchain[10].relPath = (char *) "../../../image3";
backingchain[11].path = (char *) "/crazy/base/blah/image4";
backingchain[11].relPath = (char *) "../blah/image4";
}
| 0
|
310,160
|
drv_release(TERMINAL_CONTROL_BLOCK * TCB GCC_UNUSED)
{
}
| 0
|
328,926
|
R_API RList *r_bin_java_get_lib_names(RBinJavaObj *bin) {
RList *lib_names = r_list_newf (free);
RListIter *iter;
RBinJavaCPTypeObj *cp_obj = NULL;
if (!bin) {
return lib_names;
}
r_list_foreach (bin->cp_list, iter, cp_obj) {
if (cp_obj && cp_obj->tag == R_BIN_JAVA_CP_CLASS &&
(bin->cf2.this_class != cp_obj->info.cp_class.name_idx || !is_class_interface (bin, cp_obj))) {
char *name = r_bin_java_get_item_name_from_bin_cp_list (bin, cp_obj);
if (name) {
r_list_append (lib_names, name);
}
}
}
return lib_names;
}
| 0
|
263,492
|
static struct sock *sco_sock_alloc(struct net *net, struct socket *sock,
int proto, gfp_t prio, int kern)
{
struct sock *sk;
sk = sk_alloc(net, PF_BLUETOOTH, prio, &sco_proto, kern);
if (!sk)
return NULL;
sock_init_data(sock, sk);
INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
sk->sk_destruct = sco_sock_destruct;
sk->sk_sndtimeo = SCO_CONN_TIMEOUT;
sock_reset_flag(sk, SOCK_ZAPPED);
sk->sk_protocol = proto;
sk->sk_state = BT_OPEN;
sco_pi(sk)->setting = BT_VOICE_CVSD_16BIT;
bt_sock_link(&sco_sk_list, sk);
return sk;
}
| 0
|
301,439
|
static uint32_t vfswrap_fs_capabilities(struct vfs_handle_struct *handle,
enum timestamp_set_resolution *p_ts_res)
{
connection_struct *conn = handle->conn;
uint32_t caps = FILE_CASE_SENSITIVE_SEARCH | FILE_CASE_PRESERVED_NAMES;
struct smb_filename *smb_fname_cpath = NULL;
struct vfs_statvfs_struct statbuf;
int ret;
ZERO_STRUCT(statbuf);
ret = sys_statvfs(conn->connectpath, &statbuf);
if (ret == 0) {
caps = statbuf.FsCapabilities;
}
*p_ts_res = TIMESTAMP_SET_SECONDS;
/* Work out what timestamp resolution we can
* use when setting a timestamp. */
smb_fname_cpath = synthetic_smb_fname(talloc_tos(), conn->connectpath,
NULL, NULL);
if (smb_fname_cpath == NULL) {
return caps;
}
ret = SMB_VFS_STAT(conn, smb_fname_cpath);
if (ret == -1) {
TALLOC_FREE(smb_fname_cpath);
return caps;
}
if (smb_fname_cpath->st.st_ex_mtime.tv_nsec ||
smb_fname_cpath->st.st_ex_atime.tv_nsec ||
smb_fname_cpath->st.st_ex_ctime.tv_nsec) {
/* If any of the normal UNIX directory timestamps
* have a non-zero tv_nsec component assume
* we might be able to set sub-second timestamps.
* See what filetime set primitives we have.
*/
#if defined(HAVE_UTIMENSAT)
*p_ts_res = TIMESTAMP_SET_NT_OR_BETTER;
#elif defined(HAVE_UTIMES)
/* utimes allows msec timestamps to be set. */
*p_ts_res = TIMESTAMP_SET_MSEC;
#elif defined(HAVE_UTIME)
/* utime only allows sec timestamps to be set. */
*p_ts_res = TIMESTAMP_SET_SECONDS;
#endif
DEBUG(10,("vfswrap_fs_capabilities: timestamp "
"resolution of %s "
"available on share %s, directory %s\n",
*p_ts_res == TIMESTAMP_SET_MSEC ? "msec" : "sec",
lp_servicename(talloc_tos(), conn->params->service),
conn->connectpath ));
}
TALLOC_FREE(smb_fname_cpath);
return caps;
}
| 0
|
430,405
|
bool ovs_nla_get_ufid(struct sw_flow_id *sfid, const struct nlattr *attr,
bool log)
{
sfid->ufid_len = get_ufid_len(attr, log);
if (sfid->ufid_len)
memcpy(sfid->ufid, nla_data(attr), sfid->ufid_len);
return sfid->ufid_len;
}
| 0
|
261,938
|
njs_string_bytes_from_array_like(njs_vm_t *vm, njs_value_t *value)
{
u_char *p;
int64_t length;
uint32_t u32;
njs_int_t ret;
njs_array_t *array;
njs_value_t *octet, index, prop;
njs_array_buffer_t *buffer;
array = NULL;
buffer = NULL;
switch (value->type) {
case NJS_ARRAY:
array = njs_array(value);
length = array->length;
break;
case NJS_ARRAY_BUFFER:
case NJS_TYPED_ARRAY:
if (njs_is_typed_array(value)) {
buffer = njs_typed_array(value)->buffer;
} else {
buffer = njs_array_buffer(value);
}
length = buffer->size;
break;
default:
ret = njs_object_length(vm, value, &length);
if (njs_slow_path(ret == NJS_ERROR)) {
return ret;
}
}
p = njs_string_alloc(vm, &vm->retval, length, 0);
if (njs_slow_path(p == NULL)) {
return NJS_ERROR;
}
if (array != NULL) {
octet = array->start;
while (length != 0) {
ret = njs_value_to_uint32(vm, octet, &u32);
if (njs_slow_path(ret != NJS_OK)) {
return ret;
}
*p++ = (u_char) u32;
octet++;
length--;
}
} else if (buffer != NULL) {
memcpy(p, buffer->u.u8, length);
} else {
p += length - 1;
while (length != 0) {
njs_set_number(&index, length - 1);
ret = njs_value_property(vm, value, &index, &prop);
if (njs_slow_path(ret == NJS_ERROR)) {
return ret;
}
ret = njs_value_to_uint32(vm, &prop, &u32);
if (njs_slow_path(ret != NJS_OK)) {
return ret;
}
*p-- = (u_char) u32;
length--;
}
}
return NJS_OK;
}
| 0
|
466,178
|
static int em_adc(struct x86_emulate_ctxt *ctxt)
{
emulate_2op_SrcV(ctxt, "adc");
return X86EMUL_CONTINUE;
}
| 0
|
359,528
|
DEFUN (no_neighbor_send_community,
no_neighbor_send_community_cmd,
NO_NEIGHBOR_CMD2 "send-community",
NO_STR
NEIGHBOR_STR
NEIGHBOR_ADDR_STR2
"Send Community attribute to this neighbor\n")
{
return peer_af_flag_unset_vty (vty, argv[0], bgp_node_afi (vty),
bgp_node_safi (vty),
PEER_FLAG_SEND_COMMUNITY);
}
| 0
|
219,036
|
bool ConstantFolding::PartialAssocOpConstFolding(GraphDef* optimized_graph,
GraphProperties* properties,
NodeDef* node) {
// Partial constant folding for associative operators:
// Split AddN/AccumulateNV2 to enable partial
// folding of ops when more than one but not all inputs are constant.
// For AddN and AccumulateNV2, we may furthermore reorder inputs, since
// addition is commutative.
if (!IsAggregate(*node) || !IsCommutative(*node)) return false;
const int num_non_control_inputs = NumNonControlInputs(*node);
if (num_non_control_inputs <= 2) return false;
const int num_control_inputs = node->input_size() - num_non_control_inputs;
std::vector<int> const_inputs;
std::vector<int> nonconst_inputs;
for (int i = 0; i < node->input_size(); ++i) {
const string& input = node->input(i);
const NodeDef* input_node = node_map_->GetNode(NodeName(input));
if (input_node == nullptr) return false;
if (!IsControlInput(input) && IsReallyConstant(*input_node)) {
const_inputs.push_back(i);
} else {
// Non-const and control inputs.
nonconst_inputs.push_back(i);
}
}
// Promote AccumulateNV2 with all constant inputs to AddN, since it is
// a fake node that cannot be constant folded by itself.
int const_inputs_size = const_inputs.size();
if (const_inputs_size == num_non_control_inputs &&
node->op() == "AccumulateNV2") {
node->set_op("AddN");
node->mutable_attr()->erase("shape");
return true;
}
const string new_node_name = OptimizedNodeName(
*node, strings::StrCat("_partial_split_", const_inputs_size));
if (const_inputs_size > 1 && const_inputs_size < num_non_control_inputs &&
!node_map_->NodeExists(new_node_name)) {
NodeDef* added_node = optimized_graph->add_node();
*added_node = *node;
// Always use AddN for the constant node, since AccumulateNV2 is a fake
// node that cannot be constant folded, since it does not have a kernel.
added_node->set_op("AddN");
added_node->mutable_attr()->erase("shape");
added_node->set_name(new_node_name);
node_map_->AddNode(added_node->name(), added_node);
added_node->clear_input();
for (int i : const_inputs) {
added_node->add_input(node->input(i));
node_map_->UpdateOutput(NodeName(node->input(i)), node->name(),
added_node->name());
}
// Overwrite the first const input with the added node.
node->set_input(const_inputs[0], added_node->name());
node_map_->AddOutput(added_node->name(), node->name());
nonconst_inputs.push_back(const_inputs[0]);
// Compact the remaining inputs to the original node.
std::sort(nonconst_inputs.begin(), nonconst_inputs.end());
int idx = 0;
for (int i : nonconst_inputs) {
if (idx != i) {
node->set_input(idx, node->input(i));
}
++idx;
}
node->mutable_input()->DeleteSubrange(nonconst_inputs.size(),
const_inputs.size() - 1);
(*node->mutable_attr())["N"].set_i(node->input_size() - num_control_inputs);
properties->ClearInputProperties(node->name());
(*added_node->mutable_attr())["N"].set_i(const_inputs.size());
return true;
}
return false;
}
| 0
|
455,175
|
MOBI_RET mobi_load_filename(MOBIData *m, const char *path) {
FILE *file = fopen(path, "rb");
if (file == NULL) {
debug_print("%s", "File not found\n");
return MOBI_FILE_NOT_FOUND;
}
const MOBI_RET ret = mobi_load_file(m, file);
fclose(file);
return ret;
}
| 0
|
289,289
|
static int snd_pcm_oss_reset(struct snd_pcm_oss_file *pcm_oss_file)
{
struct snd_pcm_substream *substream;
struct snd_pcm_runtime *runtime;
int i;
for (i = 0; i < 2; i++) {
substream = pcm_oss_file->streams[i];
if (!substream)
continue;
runtime = substream->runtime;
snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL);
mutex_lock(&runtime->oss.params_lock);
runtime->oss.prepare = 1;
runtime->oss.buffer_used = 0;
runtime->oss.prev_hw_ptr_period = 0;
runtime->oss.period_ptr = 0;
mutex_unlock(&runtime->oss.params_lock);
}
return 0;
}
| 0
|
234,727
|
static void update_dev_time(const char *path_name)
{
struct file *filp;
filp = filp_open(path_name, O_RDWR, 0);
if (IS_ERR(filp))
return;
file_update_time(filp);
filp_close(filp, NULL);
}
| 0
|
459,525
|
static int stack_map_update_elem(struct bpf_map *map, void *key, void *value,
u64 map_flags)
{
return -EINVAL;
}
| 0
|
244,007
|
GF_Err fecr_box_write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
u32 i;
FECReservoirBox *ptr = (FECReservoirBox *) s;
if (!s) return GF_BAD_PARAM;
e = gf_isom_full_box_write(s, bs);
if (e) return e;
gf_bs_write_int(bs, ptr->nb_entries, ptr->version ? 32 : 16);
for (i=0; i<ptr->nb_entries; i++) {
gf_bs_write_int(bs, ptr->entries[i].item_id, ptr->version ? 32 : 16);
gf_bs_write_u32(bs, ptr->entries[i].symbol_count);
}
return GF_OK;
}
| 0
|
427,731
|
cdf_read_sat(const cdf_info_t *info, cdf_header_t *h, cdf_sat_t *sat)
{
size_t i, j, k;
size_t ss = CDF_SEC_SIZE(h);
cdf_secid_t *msa, mid, sec;
size_t nsatpersec = (ss / sizeof(mid)) - 1;
for (i = 0; i < __arraycount(h->h_master_sat); i++)
if (h->h_master_sat[i] == CDF_SECID_FREE)
break;
#define CDF_SEC_LIMIT (UINT32_MAX / (64 * ss))
if ((nsatpersec > 0 &&
h->h_num_sectors_in_master_sat > CDF_SEC_LIMIT / nsatpersec) ||
i > CDF_SEC_LIMIT) {
DPRINTF(("Number of sectors in master SAT too big %u %"
SIZE_T_FORMAT "u\n", h->h_num_sectors_in_master_sat, i));
errno = EFTYPE;
return -1;
}
sat->sat_len = h->h_num_sectors_in_master_sat * nsatpersec + i;
DPRINTF(("sat_len = %" SIZE_T_FORMAT "u ss = %" SIZE_T_FORMAT "u\n",
sat->sat_len, ss));
if ((sat->sat_tab = CAST(cdf_secid_t *, CDF_CALLOC(sat->sat_len, ss)))
== NULL)
return -1;
for (i = 0; i < __arraycount(h->h_master_sat); i++) {
if (h->h_master_sat[i] < 0)
break;
if (cdf_read_sector(info, sat->sat_tab, ss * i, ss, h,
h->h_master_sat[i]) != CAST(ssize_t, ss)) {
DPRINTF(("Reading sector %d", h->h_master_sat[i]));
goto out1;
}
}
if ((msa = CAST(cdf_secid_t *, CDF_CALLOC(1, ss))) == NULL)
goto out1;
mid = h->h_secid_first_sector_in_master_sat;
for (j = 0; j < h->h_num_sectors_in_master_sat; j++) {
if (mid < 0)
goto out;
if (j >= CDF_LOOP_LIMIT) {
DPRINTF(("Reading master sector loop limit"));
goto out3;
}
if (cdf_read_sector(info, msa, 0, ss, h, mid) !=
CAST(ssize_t, ss)) {
DPRINTF(("Reading master sector %d", mid));
goto out2;
}
for (k = 0; k < nsatpersec; k++, i++) {
sec = CDF_TOLE4(CAST(uint32_t, msa[k]));
if (sec < 0)
goto out;
if (i >= sat->sat_len) {
DPRINTF(("Out of bounds reading MSA %"
SIZE_T_FORMAT "u >= %" SIZE_T_FORMAT "u",
i, sat->sat_len));
goto out3;
}
if (cdf_read_sector(info, sat->sat_tab, ss * i, ss, h,
sec) != CAST(ssize_t, ss)) {
DPRINTF(("Reading sector %d",
CDF_TOLE4(msa[k])));
goto out2;
}
}
mid = CDF_TOLE4(CAST(uint32_t, msa[nsatpersec]));
}
out:
sat->sat_len = i;
free(msa);
return 0;
out3:
errno = EFTYPE;
out2:
free(msa);
out1:
free(sat->sat_tab);
return -1;
}
| 0
|
314,498
|
static void parse_origin(pj_scanner *scanner, pjmedia_sdp_session *ses,
volatile parse_context *ctx)
{
pj_str_t str;
ctx->last_error = PJMEDIA_SDP_EINORIGIN;
/* check equal sign */
if (*(scanner->curptr+1) != '=') {
on_scanner_error(scanner);
return;
}
/* o= */
pj_scan_advance_n(scanner, 2, SKIP_WS);
/* username. */
pj_scan_get_until_ch(scanner, ' ', &ses->origin.user);
pj_scan_get_char(scanner);
/* id */
pj_scan_get_until_ch(scanner, ' ', &str);
ses->origin.id = pj_strtoul(&str);
pj_scan_get_char(scanner);
/* version */
pj_scan_get_until_ch(scanner, ' ', &str);
ses->origin.version = pj_strtoul(&str);
pj_scan_get_char(scanner);
/* network-type */
pj_scan_get_until_ch(scanner, ' ', &ses->origin.net_type);
pj_scan_get_char(scanner);
/* addr-type */
pj_scan_get_until_ch(scanner, ' ', &ses->origin.addr_type);
pj_scan_get_char(scanner);
/* address */
pj_scan_get_until_chr(scanner, " \t\r\n", &ses->origin.addr);
/* We've got what we're looking for, skip anything until newline */
pj_scan_skip_line(scanner);
}
| 0
|
463,176
|
EXPORTED size_t sizeentryatts(const struct entryattlist *l)
{
size_t sz = 0;
struct attvaluelist *av;
for ( ; l ; l = l->next)
for (av = l->attvalues ; av ; av = av->next)
sz += av->value.len;
return sz;
}
| 0
|
473,954
|
euctw_mbc_case_fold(OnigCaseFoldType flag, const UChar** pp, const UChar* end,
UChar* lower, OnigEncoding enc)
{
return onigenc_mbn_mbc_case_fold(enc, flag,
pp, end, lower);
}
| 0
|
379,658
|
R_API int r_anal_var_count_all(RAnalFunction *fcn) {
r_return_val_if_fail (fcn, 0);
return r_pvector_len (&fcn->vars);
}
| 0
|
377,483
|
static void r_coresym_cache_element_lined_symbol_fini(RCoreSymCacheElementLinedSymbol *sym) {
if (sym) {
r_coresym_cache_element_symbol_fini (&sym->sym);
r_coresym_cache_element_flc_fini (&sym->flc);
}
}
| 0
|
389,758
|
jas_image_t *converttosrgb(jas_image_t *inimage)
{
jas_image_t *outimage;
jas_cmpixmap_t inpixmap;
jas_cmpixmap_t outpixmap;
jas_cmcmptfmt_t incmptfmts[16];
jas_cmcmptfmt_t outcmptfmts[16];
outprof = jas_cmprof_createfromclrspc(JAS_CLRSPC_SRGB);
assert(outprof);
xform = jas_cmxform_create(jas_image_cmprof(inimage), outprof, 0, JAS_CMXFORM_FWD, JAS_CMXFORM_INTENT_PER, JAS_CMXFORM_OPTM_SPEED);
assert(xform);
inpixmap.numcmpts = jas_image_numcmpts(oldimage);
outpixmap.numcmpts = 3;
for (i = 0; i < inpixmap.numcmpts; ++i) {
inpixmap.cmptfmts[i] = &incmptfmts[i];
}
for (i = 0; i < outpixmap.numcmpts; ++i)
outpixmap.cmptfmts[i] = &outcmptfmts[i];
if (jas_cmxform_apply(xform, &inpixmap, &outpixmap))
abort();
jas_xform_destroy(xform);
jas_cmprof_destroy(outprof);
return 0;
}
| 0
|
224,292
|
gopherMimeCreate(GopherStateData * gopherState)
{
StoreEntry *entry = gopherState->entry;
const char *mime_type = NULL;
const char *mime_enc = NULL;
switch (gopherState->type_id) {
case GOPHER_DIRECTORY:
case GOPHER_INDEX:
case GOPHER_HTML:
case GOPHER_WWW:
case GOPHER_CSO:
mime_type = "text/html";
break;
case GOPHER_GIF:
case GOPHER_IMAGE:
case GOPHER_PLUS_IMAGE:
mime_type = "image/gif";
break;
case GOPHER_SOUND:
case GOPHER_PLUS_SOUND:
mime_type = "audio/basic";
break;
case GOPHER_PLUS_MOVIE:
mime_type = "video/mpeg";
break;
case GOPHER_MACBINHEX:
case GOPHER_DOSBIN:
case GOPHER_UUENCODED:
case GOPHER_BIN:
/* Rightnow We have no idea what it is. */
mime_enc = mimeGetContentEncoding(gopherState->request);
mime_type = mimeGetContentType(gopherState->request);
if (!mime_type)
mime_type = def_gopher_bin;
break;
case GOPHER_FILE:
default:
mime_enc = mimeGetContentEncoding(gopherState->request);
mime_type = mimeGetContentType(gopherState->request);
if (!mime_type)
mime_type = def_gopher_text;
break;
}
assert(entry->isEmpty());
HttpReply *reply = new HttpReply;
entry->buffer();
reply->setHeaders(Http::scOkay, "Gatewaying", mime_type, -1, -1, -2);
if (mime_enc)
reply->header.putStr(Http::HdrType::CONTENT_ENCODING, mime_enc);
entry->replaceHttpReply(reply);
gopherState->reply_ = reply;
}
| 0
|
222,845
|
Status Merge(DimensionHandle d1, DimensionHandle d2, int64_t* result) {
const int64_t dim1 = InferenceContext::Value(d1);
const int64_t dim2 = InferenceContext::Value(d2);
if (dim1 >= 0 && dim2 >= 0) {
CHECK_EQ(dim1, dim2);
return RefineDim(dim1, result);
} else if (dim1 >= 0 && dim2 < 0) {
return RefineDim(dim1, result);
} else if (dim1 < 0 && dim2 >= 0) {
return RefineDim(dim2, result);
} else if (dim1 < -1) {
return RefineDim(dim1, result);
} else if (dim2 < -1) {
return RefineDim(dim2, result);
} else {
CHECK_EQ(dim1, dim2);
CHECK_EQ(-1, dim1);
return RefineDim(-1, result);
}
return Status::OK();
}
| 0
|
500,067
|
static krb5_deltat get_rc_clockskew(krb5_context context)
{
krb5_rcache rc;
krb5_deltat clockskew;
if (krb5_rc_default(context, &rc)) return KSSL_CLOCKSKEW;
if (krb5_rc_initialize(context, rc, 0)) return KSSL_CLOCKSKEW;
if (krb5_rc_get_lifespan(context, rc, &clockskew)) {
clockskew = KSSL_CLOCKSKEW;
}
(void) krb5_rc_destroy(context, rc);
return clockskew;
}
| 0
|
338,200
|
bool WasmBinaryBuilder::maybeVisitAtomicNotify(Expression*& out, uint8_t code) {
if (code != BinaryConsts::AtomicNotify) {
return false;
}
auto* curr = allocator.alloc<AtomicNotify>();
BYN_TRACE("zz node: AtomicNotify\n");
curr->type = Type::i32;
curr->notifyCount = popNonVoidExpression();
curr->ptr = popNonVoidExpression();
Address readAlign;
readMemoryAccess(readAlign, curr->offset);
if (readAlign != curr->type.getByteSize()) {
throwError("Align of AtomicNotify must match size");
}
curr->finalize();
out = curr;
return true;
}
| 0
|
450,404
|
static void vnc_tight_cleanup(Notifier *n, void *value)
{
g_free(color_count_palette);
color_count_palette = NULL;
}
| 0
|
195,800
|
void fmtutil_macbitmap_read_pixmap_only_fields(deark *c, dbuf *f, struct fmtutil_macbitmap_info *bi,
i64 pos)
{
i64 pixmap_version;
i64 pack_size;
i64 plane_bytes;
i64 n;
de_dbg(c, "additional PixMap header fields, at %d", (int)pos);
de_dbg_indent(c, 1);
pixmap_version = dbuf_getu16be(f, pos+0);
de_dbg(c, "pixmap version: %d", (int)pixmap_version);
bi->packing_type = dbuf_getu16be(f, pos+2);
de_dbg(c, "packing type: %d", (int)bi->packing_type);
pack_size = dbuf_getu32be(f, pos+4);
de_dbg(c, "pixel data length: %d", (int)pack_size);
bi->hdpi = pict_read_fixed(f, pos+8);
bi->vdpi = pict_read_fixed(f, pos+12);
de_dbg(c, "dpi: %.2f"DE_CHAR_TIMES"%.2f", bi->hdpi, bi->vdpi);
bi->pixeltype = dbuf_getu16be(f, pos+16);
bi->pixelsize = dbuf_getu16be(f, pos+18);
bi->cmpcount = dbuf_getu16be(f, pos+20);
bi->cmpsize = dbuf_getu16be(f, pos+22);
de_dbg(c, "pixel type=%d, bits/pixel=%d, components/pixel=%d, bits/comp=%d",
(int)bi->pixeltype, (int)bi->pixelsize, (int)bi->cmpcount, (int)bi->cmpsize);
bi->pdwidth = (bi->rowbytes*8)/bi->pixelsize;
if(bi->pdwidth < bi->npwidth) {
bi->pdwidth = bi->npwidth;
}
plane_bytes = dbuf_getu32be(f, pos+24);
de_dbg(c, "plane bytes: %d", (int)plane_bytes);
bi->pmTable = (u32)dbuf_getu32be(f, pos+28);
de_dbg(c, "pmTable: 0x%08x", (unsigned int)bi->pmTable);
n = dbuf_getu32be(f, pos+32);
de_dbg(c, "pmReserved: 0x%08x", (unsigned int)n);
de_dbg_indent(c, -1);
}
| 1
|
473,987
|
big5_code_to_mbc(OnigCodePoint code, UChar *buf, OnigEncoding enc)
{
return onigenc_mb2_code_to_mbc(enc, code, buf);
}
| 0
|
430,375
|
char *mangle_path(char *s, const char *p, const char *esc)
{
while (s <= p) {
char c = *p++;
if (!c) {
return s;
} else if (!strchr(esc, c)) {
*s++ = c;
} else if (s + 4 > p) {
break;
} else {
*s++ = '\\';
*s++ = '0' + ((c & 0300) >> 6);
*s++ = '0' + ((c & 070) >> 3);
*s++ = '0' + (c & 07);
}
}
return NULL;
}
| 0
|
455,330
|
main (argc, argv, env)
int argc;
char **argv, **env;
#endif /* !NO_MAIN_ENV_ARG */
{
register int i;
int code, old_errexit_flag;
#if defined (RESTRICTED_SHELL)
int saverst;
#endif
volatile int locally_skip_execution;
volatile int arg_index, top_level_arg_index;
#ifdef __OPENNT
char **env;
env = environ;
#endif /* __OPENNT */
USE_VAR(argc);
USE_VAR(argv);
USE_VAR(env);
USE_VAR(code);
USE_VAR(old_errexit_flag);
#if defined (RESTRICTED_SHELL)
USE_VAR(saverst);
#endif
/* Catch early SIGINTs. */
code = setjmp_nosigs (top_level);
if (code)
exit (2);
xtrace_init ();
#if defined (USING_BASH_MALLOC) && defined (DEBUG) && !defined (DISABLE_MALLOC_WRAPPERS)
malloc_set_register (1); /* XXX - change to 1 for malloc debugging */
#endif
check_dev_tty ();
#ifdef __CYGWIN__
_cygwin32_check_tmp ();
#endif /* __CYGWIN__ */
/* Wait forever if we are debugging a login shell. */
while (debugging_login_shell) sleep (3);
set_default_locale ();
running_setuid = uidget ();
if (getenv ("POSIXLY_CORRECT") || getenv ("POSIX_PEDANTIC"))
posixly_correct = 1;
#if defined (USE_GNU_MALLOC_LIBRARY)
mcheck (programming_error, (void (*) ())0);
#endif /* USE_GNU_MALLOC_LIBRARY */
if (setjmp_sigs (subshell_top_level))
{
argc = subshell_argc;
argv = subshell_argv;
env = subshell_envp;
sourced_env = 0;
}
shell_reinitialized = 0;
/* Initialize `local' variables for all `invocations' of main (). */
arg_index = 1;
if (arg_index > argc)
arg_index = argc;
command_execution_string = shell_script_filename = (char *)NULL;
want_pending_command = locally_skip_execution = read_from_stdin = 0;
default_input = stdin;
#if defined (BUFFERED_INPUT)
default_buffered_input = -1;
#endif
/* Fix for the `infinite process creation' bug when running shell scripts
from startup files on System V. */
login_shell = make_login_shell = 0;
/* If this shell has already been run, then reinitialize it to a
vanilla state. */
if (shell_initialized || shell_name)
{
/* Make sure that we do not infinitely recurse as a login shell. */
if (*shell_name == '-')
shell_name++;
shell_reinitialize ();
if (setjmp_nosigs (top_level))
exit (2);
}
shell_environment = env;
set_shell_name (argv[0]);
shell_start_time = NOW; /* NOW now defined in general.h */
/* Parse argument flags from the input line. */
/* Find full word arguments first. */
arg_index = parse_long_options (argv, arg_index, argc);
if (want_initial_help)
{
show_shell_usage (stdout, 1);
exit (EXECUTION_SUCCESS);
}
if (do_version)
{
show_shell_version (1);
exit (EXECUTION_SUCCESS);
}
echo_input_at_read = verbose_flag; /* --verbose given */
/* All done with full word options; do standard shell option parsing.*/
this_command_name = shell_name; /* for error reporting */
arg_index = parse_shell_options (argv, arg_index, argc);
/* If user supplied the "--login" (or -l) flag, then set and invert
LOGIN_SHELL. */
if (make_login_shell)
{
login_shell++;
login_shell = -login_shell;
}
set_login_shell ("login_shell", login_shell != 0);
if (dump_po_strings)
dump_translatable_strings = 1;
if (dump_translatable_strings)
read_but_dont_execute = 1;
if (running_setuid && privileged_mode == 0)
disable_priv_mode ();
/* Need to get the argument to a -c option processed in the
above loop. The next arg is a command to execute, and the
following args are $0...$n respectively. */
if (want_pending_command)
{
command_execution_string = argv[arg_index];
if (command_execution_string == 0)
{
report_error (_("%s: option requires an argument"), "-c");
exit (EX_BADUSAGE);
}
arg_index++;
}
this_command_name = (char *)NULL;
/* First, let the outside world know about our interactive status.
A shell is interactive if the `-i' flag was given, or if all of
the following conditions are met:
no -c command
no arguments remaining or the -s flag given
standard input is a terminal
standard error is a terminal
Refer to Posix.2, the description of the `sh' utility. */
if (forced_interactive || /* -i flag */
(!command_execution_string && /* No -c command and ... */
wordexp_only == 0 && /* No --wordexp and ... */
((arg_index == argc) || /* no remaining args or... */
read_from_stdin) && /* -s flag with args, and */
isatty (fileno (stdin)) && /* Input is a terminal and */
isatty (fileno (stderr)))) /* error output is a terminal. */
init_interactive ();
else
init_noninteractive ();
/*
* Some systems have the bad habit of starting login shells with lots of open
* file descriptors. For instance, most systems that have picked up the
* pre-4.0 Sun YP code leave a file descriptor open each time you call one
* of the getpw* functions, and it's set to be open across execs. That
* means one for login, one for xterm, one for shelltool, etc. There are
* also systems that open persistent FDs to other agents or files as part
* of process startup; these need to be set to be close-on-exec.
*/
if (login_shell && interactive_shell)
{
for (i = 3; i < 20; i++)
SET_CLOSE_ON_EXEC (i);
}
/* If we're in a strict Posix.2 mode, turn on interactive comments,
alias expansion in non-interactive shells, and other Posix.2 things. */
if (posixly_correct)
{
bind_variable ("POSIXLY_CORRECT", "y", 0);
sv_strict_posix ("POSIXLY_CORRECT");
}
/* Now we run the shopt_alist and process the options. */
if (shopt_alist)
run_shopt_alist ();
/* From here on in, the shell must be a normal functioning shell.
Variables from the environment are expected to be set, etc. */
shell_initialize ();
set_default_lang ();
set_default_locale_vars ();
/*
* M-x term -> TERM=eterm-color INSIDE_EMACS='251,term:0.96' (eterm)
* M-x shell -> TERM='dumb' INSIDE_EMACS='25.1,comint' (no line editing)
*
* Older versions of Emacs may set EMACS to 't' or to something like
* '22.1 (term:0.96)' instead of (or in addition to) setting INSIDE_EMACS.
* They may set TERM to 'eterm' instead of 'eterm-color'. They may have
* a now-obsolete command that sets neither EMACS nor INSIDE_EMACS:
* M-x terminal -> TERM='emacs-em7955' (line editing)
*/
if (interactive_shell)
{
char *term, *emacs, *inside_emacs;
int emacs_term, in_emacs;
term = get_string_value ("TERM");
emacs = get_string_value ("EMACS");
inside_emacs = get_string_value ("INSIDE_EMACS");
if (inside_emacs)
{
emacs_term = strstr (inside_emacs, ",term:") != 0;
in_emacs = 1;
}
else if (emacs)
{
/* Infer whether we are in an older Emacs. */
emacs_term = strstr (emacs, " (term:") != 0;
in_emacs = emacs_term || STREQ (emacs, "t");
}
else
in_emacs = emacs_term = 0;
/* Not sure any emacs terminal emulator sets TERM=emacs any more */
no_line_editing |= STREQ (term, "emacs");
no_line_editing |= in_emacs && STREQ (term, "dumb");
/* running_under_emacs == 2 for `eterm' */
running_under_emacs = in_emacs || STREQN (term, "emacs", 5);
running_under_emacs += emacs_term && STREQN (term, "eterm", 5);
if (running_under_emacs)
gnu_error_format = 1;
}
top_level_arg_index = arg_index;
old_errexit_flag = exit_immediately_on_error;
/* Give this shell a place to longjmp to before executing the
startup files. This allows users to press C-c to abort the
lengthy startup. */
code = setjmp_sigs (top_level);
if (code)
{
if (code == EXITPROG || code == ERREXIT)
exit_shell (last_command_exit_value);
else
{
#if defined (JOB_CONTROL)
/* Reset job control, since run_startup_files turned it off. */
set_job_control (interactive_shell);
#endif
/* Reset value of `set -e', since it's turned off before running
the startup files. */
exit_immediately_on_error += old_errexit_flag;
locally_skip_execution++;
}
}
arg_index = top_level_arg_index;
/* Execute the start-up scripts. */
if (interactive_shell == 0)
{
unbind_variable ("PS1");
unbind_variable ("PS2");
interactive = 0;
#if 0
/* This has already been done by init_noninteractive */
expand_aliases = posixly_correct;
#endif
}
else
{
change_flag ('i', FLAG_ON);
interactive = 1;
}
#if defined (RESTRICTED_SHELL)
/* Set restricted_shell based on whether the basename of $0 indicates that
the shell should be restricted or if the `-r' option was supplied at
startup. */
restricted_shell = shell_is_restricted (shell_name);
/* If the `-r' option is supplied at invocation, make sure that the shell
is not in restricted mode when running the startup files. */
saverst = restricted;
restricted = 0;
#endif
/* Set positional parameters before running startup files. top_level_arg_index
holds the index of the current argument before setting the positional
parameters, so any changes performed in the startup files won't affect
later option processing. */
if (wordexp_only)
; /* nothing yet */
else if (command_execution_string)
arg_index = bind_args (argv, arg_index, argc, 0); /* $0 ... $n */
else if (arg_index != argc && read_from_stdin == 0)
{
shell_script_filename = argv[arg_index++];
arg_index = bind_args (argv, arg_index, argc, 1); /* $1 ... $n */
}
else
arg_index = bind_args (argv, arg_index, argc, 1); /* $1 ... $n */
/* The startup files are run with `set -e' temporarily disabled. */
if (locally_skip_execution == 0 && running_setuid == 0)
{
old_errexit_flag = exit_immediately_on_error;
exit_immediately_on_error = 0;
run_startup_files ();
exit_immediately_on_error += old_errexit_flag;
}
/* If we are invoked as `sh', turn on Posix mode. */
if (act_like_sh)
{
bind_variable ("POSIXLY_CORRECT", "y", 0);
sv_strict_posix ("POSIXLY_CORRECT");
}
#if defined (RESTRICTED_SHELL)
/* Turn on the restrictions after executing the startup files. This
means that `bash -r' or `set -r' invoked from a startup file will
turn on the restrictions after the startup files are executed. */
restricted = saverst || restricted;
if (shell_reinitialized == 0)
maybe_make_restricted (shell_name);
#endif /* RESTRICTED_SHELL */
#if defined (WORDEXP_OPTION)
if (wordexp_only)
{
startup_state = 3;
last_command_exit_value = run_wordexp (argv[top_level_arg_index]);
exit_shell (last_command_exit_value);
}
#endif
cmd_init (); /* initialize the command object caches */
uwp_init ();
if (command_execution_string)
{
startup_state = 2;
if (debugging_mode)
start_debugger ();
#if defined (ONESHOT)
executing = 1;
run_one_command (command_execution_string);
exit_shell (last_command_exit_value);
#else /* ONESHOT */
with_input_from_string (command_execution_string, "-c");
goto read_and_execute;
#endif /* !ONESHOT */
}
/* Get possible input filename and set up default_buffered_input or
default_input as appropriate. */
if (shell_script_filename)
open_shell_script (shell_script_filename);
else if (interactive == 0)
{
/* In this mode, bash is reading a script from stdin, which is a
pipe or redirected file. */
#if defined (BUFFERED_INPUT)
default_buffered_input = fileno (stdin); /* == 0 */
#else
setbuf (default_input, (char *)NULL);
#endif /* !BUFFERED_INPUT */
read_from_stdin = 1;
}
else if (top_level_arg_index == argc) /* arg index before startup files */
/* "If there are no operands and the -c option is not specified, the -s
option shall be assumed." */
read_from_stdin = 1;
set_bash_input ();
if (debugging_mode && locally_skip_execution == 0 && running_setuid == 0 && (reading_shell_script || interactive_shell == 0))
start_debugger ();
/* Do the things that should be done only for interactive shells. */
if (interactive_shell)
{
/* Set up for checking for presence of mail. */
reset_mail_timer ();
init_mail_dates ();
#if defined (HISTORY)
/* Initialize the interactive history stuff. */
bash_initialize_history ();
/* Don't load the history from the history file if we've already
saved some lines in this session (e.g., by putting `history -s xx'
into one of the startup files). */
if (shell_initialized == 0 && history_lines_this_session == 0)
load_history ();
#endif /* HISTORY */
/* Initialize terminal state for interactive shells after the
.bash_profile and .bashrc are interpreted. */
get_tty_state ();
}
#if !defined (ONESHOT)
read_and_execute:
#endif /* !ONESHOT */
shell_initialized = 1;
if (pretty_print_mode && interactive_shell)
{
internal_warning (_("pretty-printing mode ignored in interactive shells"));
pretty_print_mode = 0;
}
if (pretty_print_mode)
exit_shell (pretty_print_loop ());
/* Read commands until exit condition. */
reader_loop ();
exit_shell (last_command_exit_value);
}
| 0
|
359,646
|
bgp_clear (struct vty *vty, struct bgp *bgp, afi_t afi, safi_t safi,
enum clear_sort sort,enum bgp_clear_type stype, const char *arg)
{
int ret;
struct peer *peer;
struct listnode *node, *nnode;
/* Clear all neighbors. */
if (sort == clear_all)
{
for (ALL_LIST_ELEMENTS (bgp->peer, node, nnode, peer))
{
if (stype == BGP_CLEAR_SOFT_NONE)
ret = peer_clear (peer);
else
ret = peer_clear_soft (peer, afi, safi, stype);
if (ret < 0)
bgp_clear_vty_error (vty, peer, afi, safi, ret);
}
return 0;
}
/* Clear specified neighbors. */
if (sort == clear_peer)
{
union sockunion su;
int ret;
/* Make sockunion for lookup. */
ret = str2sockunion (arg, &su);
if (ret < 0)
{
vty_out (vty, "Malformed address: %s%s", arg, VTY_NEWLINE);
return -1;
}
peer = peer_lookup (bgp, &su);
if (! peer)
{
vty_out (vty, "%%BGP: Unknown neighbor - \"%s\"%s", arg, VTY_NEWLINE);
return -1;
}
if (stype == BGP_CLEAR_SOFT_NONE)
ret = peer_clear (peer);
else
ret = peer_clear_soft (peer, afi, safi, stype);
if (ret < 0)
bgp_clear_vty_error (vty, peer, afi, safi, ret);
return 0;
}
/* Clear all peer-group members. */
if (sort == clear_group)
{
struct peer_group *group;
group = peer_group_lookup (bgp, arg);
if (! group)
{
vty_out (vty, "%%BGP: No such peer-group %s%s", arg, VTY_NEWLINE);
return -1;
}
for (ALL_LIST_ELEMENTS (group->peer, node, nnode, peer))
{
if (stype == BGP_CLEAR_SOFT_NONE)
{
ret = peer_clear (peer);
continue;
}
if (! peer->af_group[afi][safi])
continue;
ret = peer_clear_soft (peer, afi, safi, stype);
if (ret < 0)
bgp_clear_vty_error (vty, peer, afi, safi, ret);
}
return 0;
}
if (sort == clear_external)
{
for (ALL_LIST_ELEMENTS (bgp->peer, node, nnode, peer))
{
if (peer_sort (peer) == BGP_PEER_IBGP)
continue;
if (stype == BGP_CLEAR_SOFT_NONE)
ret = peer_clear (peer);
else
ret = peer_clear_soft (peer, afi, safi, stype);
if (ret < 0)
bgp_clear_vty_error (vty, peer, afi, safi, ret);
}
return 0;
}
if (sort == clear_as)
{
as_t as;
unsigned long as_ul;
char *endptr = NULL;
int find = 0;
as_ul = strtoul(arg, &endptr, 10);
if ((as_ul == ULONG_MAX) || (*endptr != '\0') || (as_ul > USHRT_MAX))
{
vty_out (vty, "Invalid AS number%s", VTY_NEWLINE);
return -1;
}
as = (as_t) as_ul;
for (ALL_LIST_ELEMENTS (bgp->peer, node, nnode, peer))
{
if (peer->as != as)
continue;
find = 1;
if (stype == BGP_CLEAR_SOFT_NONE)
ret = peer_clear (peer);
else
ret = peer_clear_soft (peer, afi, safi, stype);
if (ret < 0)
bgp_clear_vty_error (vty, peer, afi, safi, ret);
}
if (! find)
vty_out (vty, "%%BGP: No peer is configured with AS %s%s", arg,
VTY_NEWLINE);
return 0;
}
return 0;
}
| 0
|
225,545
|
void TfLiteTensorFree(TfLiteTensor* t) {
TfLiteTensorDataFree(t);
if (t->dims) TfLiteIntArrayFree(t->dims);
t->dims = NULL;
if (t->dims_signature) {
TfLiteIntArrayFree((TfLiteIntArray *) t->dims_signature);
}
t->dims_signature = NULL;
TfLiteQuantizationFree(&t->quantization);
TfLiteSparsityFree(t->sparsity);
t->sparsity = NULL;
}
| 0
|
387,806
|
void InstanceKlass::deallocate_interfaces(ClassLoaderData* loader_data,
const Klass* super_klass,
Array<Klass*>* local_interfaces,
Array<Klass*>* transitive_interfaces) {
// Only deallocate transitive interfaces if not empty, same as super class
// or same as local interfaces. See code in parseClassFile.
Array<Klass*>* ti = transitive_interfaces;
if (ti != Universe::the_empty_klass_array() && ti != local_interfaces) {
// check that the interfaces don't come from super class
Array<Klass*>* sti = (super_klass == NULL) ? NULL :
InstanceKlass::cast(super_klass)->transitive_interfaces();
if (ti != sti && ti != NULL && !ti->is_shared()) {
MetadataFactory::free_array<Klass*>(loader_data, ti);
}
}
// local interfaces can be empty
if (local_interfaces != Universe::the_empty_klass_array() &&
local_interfaces != NULL && !local_interfaces->is_shared()) {
MetadataFactory::free_array<Klass*>(loader_data, local_interfaces);
}
}
| 0
|
462,283
|
PJ_DEF(const char*) pj_stun_get_method_name(unsigned msg_type)
{
unsigned method = PJ_STUN_GET_METHOD(msg_type);
if (method >= PJ_ARRAY_SIZE(stun_method_names))
return "???";
return stun_method_names[method];
}
| 0
|
503,877
|
SCM_DEFINE (scm_opendir, "opendir", 1, 0, 0,
(SCM dirname),
"Open the directory specified by @var{dirname} and return a directory\n"
"stream.")
#define FUNC_NAME s_scm_opendir
{
DIR *ds;
STRING_SYSCALL (dirname, c_dirname, ds = opendir (c_dirname));
if (ds == NULL)
SCM_SYSERROR;
SCM_RETURN_NEWSMOB (scm_tc16_dir | (SCM_DIR_FLAG_OPEN<<16), ds);
}
| 0
|
246,489
|
static RBinWasmTypeEntry *parse_type_entry(RBinWasmObj *bin, ut64 bound, ut32 index) {
RBuffer *b = bin->buf;
RBinWasmTypeEntry *type = R_NEW0 (RBinWasmTypeEntry);
if (!type) {
return NULL;
}
type->sec_i = index;
type->file_offset = r_buf_tell (b);
if (!consume_u7_r (b, bound, &type->form)) {
goto beach;
}
if (type->form != R_BIN_WASM_VALUETYPE_FUNC) {
R_LOG_WARN ("Halting types section parsing at invalid type 0x%02x at offset: 0x%" PFMTSZx "\n", type->form, type->file_offset);
goto beach;
}
type->args = parse_type_vector (b, bound);
if (!type->args) {
goto beach;
}
type->rets = parse_type_vector (b, bound);
if (!type->rets) {
goto beach;
}
r_bin_wasm_type_entry_to_string (type);
return type;
beach:
free_type_entry (type);
return NULL;
}
| 0
|
466,122
|
static int decode_modrm(struct x86_emulate_ctxt *ctxt,
struct operand *op)
{
u8 sib;
int index_reg = 0, base_reg = 0, scale;
int rc = X86EMUL_CONTINUE;
ulong modrm_ea = 0;
if (ctxt->rex_prefix) {
ctxt->modrm_reg = (ctxt->rex_prefix & 4) << 1; /* REX.R */
index_reg = (ctxt->rex_prefix & 2) << 2; /* REX.X */
ctxt->modrm_rm = base_reg = (ctxt->rex_prefix & 1) << 3; /* REG.B */
}
ctxt->modrm = insn_fetch(u8, ctxt);
ctxt->modrm_mod |= (ctxt->modrm & 0xc0) >> 6;
ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
ctxt->modrm_rm |= (ctxt->modrm & 0x07);
ctxt->modrm_seg = VCPU_SREG_DS;
if (ctxt->modrm_mod == 3) {
op->type = OP_REG;
op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
op->addr.reg = decode_register(ctxt->modrm_rm,
ctxt->regs, ctxt->d & ByteOp);
if (ctxt->d & Sse) {
op->type = OP_XMM;
op->bytes = 16;
op->addr.xmm = ctxt->modrm_rm;
read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
return rc;
}
fetch_register_operand(op);
return rc;
}
op->type = OP_MEM;
if (ctxt->ad_bytes == 2) {
unsigned bx = ctxt->regs[VCPU_REGS_RBX];
unsigned bp = ctxt->regs[VCPU_REGS_RBP];
unsigned si = ctxt->regs[VCPU_REGS_RSI];
unsigned di = ctxt->regs[VCPU_REGS_RDI];
/* 16-bit ModR/M decode. */
switch (ctxt->modrm_mod) {
case 0:
if (ctxt->modrm_rm == 6)
modrm_ea += insn_fetch(u16, ctxt);
break;
case 1:
modrm_ea += insn_fetch(s8, ctxt);
break;
case 2:
modrm_ea += insn_fetch(u16, ctxt);
break;
}
switch (ctxt->modrm_rm) {
case 0:
modrm_ea += bx + si;
break;
case 1:
modrm_ea += bx + di;
break;
case 2:
modrm_ea += bp + si;
break;
case 3:
modrm_ea += bp + di;
break;
case 4:
modrm_ea += si;
break;
case 5:
modrm_ea += di;
break;
case 6:
if (ctxt->modrm_mod != 0)
modrm_ea += bp;
break;
case 7:
modrm_ea += bx;
break;
}
if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
(ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
ctxt->modrm_seg = VCPU_SREG_SS;
modrm_ea = (u16)modrm_ea;
} else {
/* 32/64-bit ModR/M decode. */
if ((ctxt->modrm_rm & 7) == 4) {
sib = insn_fetch(u8, ctxt);
index_reg |= (sib >> 3) & 7;
base_reg |= sib & 7;
scale = sib >> 6;
if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
modrm_ea += insn_fetch(s32, ctxt);
else
modrm_ea += ctxt->regs[base_reg];
if (index_reg != 4)
modrm_ea += ctxt->regs[index_reg] << scale;
} else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
if (ctxt->mode == X86EMUL_MODE_PROT64)
ctxt->rip_relative = 1;
} else
modrm_ea += ctxt->regs[ctxt->modrm_rm];
switch (ctxt->modrm_mod) {
case 0:
if (ctxt->modrm_rm == 5)
modrm_ea += insn_fetch(s32, ctxt);
break;
case 1:
modrm_ea += insn_fetch(s8, ctxt);
break;
case 2:
modrm_ea += insn_fetch(s32, ctxt);
break;
}
}
op->addr.mem.ea = modrm_ea;
done:
return rc;
}
| 0
|
261,960
|
njs_decode_hex(njs_str_t *dst, const njs_str_t *src)
{
u_char *p;
size_t len;
njs_int_t c;
njs_uint_t i, n;
const u_char *start;
n = 0;
p = dst->start;
start = src->start;
len = src->length;
for (i = 0; i < len; i++) {
c = njs_char_to_hex(start[i]);
if (njs_slow_path(c < 0)) {
break;
}
n = n * 16 + c;
if ((i & 1) != 0) {
*p++ = (u_char) n;
n = 0;
}
}
dst->length -= (dst->start + dst->length) - p;
}
| 0
|
221,162
|
GF_Err gf_odf_avc_cfg_write_bs(GF_AVCConfig *cfg, GF_BitStream *bs)
{
u32 i, count;
if (!cfg) return GF_BAD_PARAM;
count = gf_list_count(cfg->sequenceParameterSets);
if (!cfg->write_annex_b) {
gf_bs_write_int(bs, cfg->configurationVersion, 8);
gf_bs_write_int(bs, cfg->AVCProfileIndication , 8);
gf_bs_write_int(bs, cfg->profile_compatibility, 8);
gf_bs_write_int(bs, cfg->AVCLevelIndication, 8);
gf_bs_write_int(bs, 0x3F, 6);
gf_bs_write_int(bs, cfg->nal_unit_size - 1, 2);
gf_bs_write_int(bs, 0x7, 3);
gf_bs_write_int(bs, count, 5);
}
for (i=0; i<count; i++) {
GF_NALUFFParam *sl = (GF_NALUFFParam *)gf_list_get(cfg->sequenceParameterSets, i);
if (!cfg->write_annex_b) {
gf_bs_write_u16(bs, sl->size);
} else {
gf_bs_write_u32(bs, 1);
}
gf_bs_write_data(bs, sl->data, sl->size);
}
count = gf_list_count(cfg->pictureParameterSets);
if (!cfg->write_annex_b) {
gf_bs_write_int(bs, count, 8);
}
for (i=0; i<count; i++) {
GF_NALUFFParam *sl = (GF_NALUFFParam *)gf_list_get(cfg->pictureParameterSets, i);
if (!cfg->write_annex_b) {
gf_bs_write_u16(bs, sl->size);
} else {
gf_bs_write_u32(bs, 1);
}
gf_bs_write_data(bs, sl->data, sl->size);
}
if (gf_avc_is_rext_profile(cfg->AVCProfileIndication)) {
if (!cfg->write_annex_b) {
gf_bs_write_int(bs, 0xFF, 6);
gf_bs_write_int(bs, cfg->chroma_format, 2);
gf_bs_write_int(bs, 0xFF, 5);
gf_bs_write_int(bs, cfg->luma_bit_depth - 8, 3);
gf_bs_write_int(bs, 0xFF, 5);
gf_bs_write_int(bs, cfg->chroma_bit_depth - 8, 3);
}
count = cfg->sequenceParameterSetExtensions ? gf_list_count(cfg->sequenceParameterSetExtensions) : 0;
if (!cfg->write_annex_b) {
gf_bs_write_u8(bs, count);
}
for (i=0; i<count; i++) {
GF_NALUFFParam *sl = (GF_NALUFFParam *) gf_list_get(cfg->sequenceParameterSetExtensions, i);
if (!cfg->write_annex_b) {
gf_bs_write_u16(bs, sl->size);
} else {
gf_bs_write_u32(bs, 1);
}
gf_bs_write_data(bs, sl->data, sl->size);
}
}
return GF_OK;
}
| 0
|
317,198
|
static unsigned int selinux_ipv4_forward(void *priv,
struct sk_buff *skb,
const struct nf_hook_state *state)
{
return selinux_ip_forward(skb, state->in, PF_INET);
}
| 0
|
317,128
|
static int selinux_sb_remount(struct super_block *sb, void *mnt_opts)
{
struct selinux_mnt_opts *opts = mnt_opts;
struct superblock_security_struct *sbsec = selinux_superblock(sb);
u32 sid;
int rc;
if (!(sbsec->flags & SE_SBINITIALIZED))
return 0;
if (!opts)
return 0;
if (opts->fscontext) {
rc = parse_sid(sb, opts->fscontext, &sid);
if (rc)
return rc;
if (bad_option(sbsec, FSCONTEXT_MNT, sbsec->sid, sid))
goto out_bad_option;
}
if (opts->context) {
rc = parse_sid(sb, opts->context, &sid);
if (rc)
return rc;
if (bad_option(sbsec, CONTEXT_MNT, sbsec->mntpoint_sid, sid))
goto out_bad_option;
}
if (opts->rootcontext) {
struct inode_security_struct *root_isec;
root_isec = backing_inode_security(sb->s_root);
rc = parse_sid(sb, opts->rootcontext, &sid);
if (rc)
return rc;
if (bad_option(sbsec, ROOTCONTEXT_MNT, root_isec->sid, sid))
goto out_bad_option;
}
if (opts->defcontext) {
rc = parse_sid(sb, opts->defcontext, &sid);
if (rc)
return rc;
if (bad_option(sbsec, DEFCONTEXT_MNT, sbsec->def_sid, sid))
goto out_bad_option;
}
return 0;
out_bad_option:
pr_warn("SELinux: unable to change security options "
"during remount (dev %s, type=%s)\n", sb->s_id,
sb->s_type->name);
return -EINVAL;
}
| 0
|
509,576
|
int ha_maria::start_stmt(THD *thd, thr_lock_type lock_type)
{
TRN *trn;
if (file->s->base.born_transactional)
{
trn= THD_TRN;
DBUG_ASSERT(trn); // this may be called only after external_lock()
DBUG_ASSERT(trnman_has_locked_tables(trn));
DBUG_ASSERT(lock_type != TL_UNLOCK);
DBUG_ASSERT(file->trn == trn);
/*
As external_lock() was already called, don't increment locked_tables.
Note that we call the function below possibly several times when
statement starts (once per table). This is ok as long as that function
does cheap operations. Otherwise, we will need to do it only on first
call to start_stmt().
*/
trnman_new_statement(trn);
#ifdef EXTRA_DEBUG
if (!(trnman_get_flags(trn) & TRN_STATE_INFO_LOGGED) &&
trnman_get_flags(trn) & TRN_STATE_TABLES_CAN_CHANGE)
{
trnman_set_flags(trn, trnman_get_flags(trn) | TRN_STATE_INFO_LOGGED);
(void) translog_log_debug_info(trn, LOGREC_DEBUG_INFO_QUERY,
(uchar*) thd->query(),
thd->query_length());
}
#endif
}
return 0;
}
| 0
|
369,145
|
static int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_provide_buf *p = &req->pbuf;
struct io_ring_ctx *ctx = req->ctx;
struct io_buffer_list *bl;
int ret = 0;
bool needs_lock = issue_flags & IO_URING_F_UNLOCKED;
io_ring_submit_lock(ctx, needs_lock);
lockdep_assert_held(&ctx->uring_lock);
bl = io_buffer_get_list(ctx, p->bgid);
if (unlikely(!bl)) {
bl = kmalloc(sizeof(*bl), GFP_KERNEL);
if (!bl) {
ret = -ENOMEM;
goto err;
}
io_buffer_add_list(ctx, bl, p->bgid);
}
ret = io_add_buffers(ctx, p, bl);
err:
if (ret < 0)
req_set_fail(req);
/* complete before unlock, IOPOLL may need the lock */
__io_req_complete(req, issue_flags, ret, 0);
io_ring_submit_unlock(ctx, needs_lock);
return 0;
}
| 0
|
238,503
|
static void __reg_assign_32_into_64(struct bpf_reg_state *reg)
{
reg->umin_value = reg->u32_min_value;
reg->umax_value = reg->u32_max_value;
/* Attempt to pull 32-bit signed bounds into 64-bit bounds but must
* be positive otherwise set to worse case bounds and refine later
* from tnum.
*/
if (__reg32_bound_s64(reg->s32_min_value) &&
__reg32_bound_s64(reg->s32_max_value)) {
reg->smin_value = reg->s32_min_value;
reg->smax_value = reg->s32_max_value;
} else {
reg->smin_value = 0;
reg->smax_value = U32_MAX;
}
}
| 0
|
509,549
|
int ha_maria::end_bulk_insert()
{
int first_error, error;
my_bool abort= file->s->deleting;
DBUG_ENTER("ha_maria::end_bulk_insert");
if ((first_error= maria_end_bulk_insert(file, abort)))
abort= 1;
if ((error= maria_extra(file, HA_EXTRA_NO_CACHE, 0)))
{
first_error= first_error ? first_error : error;
abort= 1;
}
if (!abort && can_enable_indexes)
if ((error= enable_indexes(HA_KEY_SWITCH_NONUNIQ_SAVE)))
first_error= first_error ? first_error : error;
if (bulk_insert_single_undo != BULK_INSERT_NONE)
{
/*
Table was transactional just before start_bulk_insert().
No need to flush pages if we did a repair (which already flushed).
*/
if ((error= _ma_reenable_logging_for_table(file,
bulk_insert_single_undo ==
BULK_INSERT_SINGLE_UNDO_AND_NO_REPAIR)))
first_error= first_error ? first_error : error;
bulk_insert_single_undo= BULK_INSERT_NONE; // Safety
}
can_enable_indexes= 0;
DBUG_RETURN(first_error);
}
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.