idx
int64 | func
string | target
int64 |
|---|---|---|
224,283
|
GopherStateData(FwdState *aFwd) :
entry(aFwd->entry),
conversion(NORMAL),
HTML_header_added(0),
HTML_pre(0),
type_id(GOPHER_FILE /* '0' */),
overflowed(false),
cso_recno(0),
len(0),
buf(NULL),
fwd(aFwd)
{
*request = 0;
buf = (char *)memAllocate(MEM_4K_BUF);
entry->lock("gopherState");
*replybuf = 0;
}
| 0
|
244,040
|
GF_Box *dmlp_box_new()
{
ISOM_DECL_BOX_ALLOC(GF_TrueHDConfigBox, GF_ISOM_BOX_TYPE_DMLP);
return (GF_Box *)tmp;
}
| 0
|
498,624
|
flip_line (guchar *buf,
tga_info *info)
{
guchar temp;
guchar *alt;
gint x, s;
alt = buf + (info->bytes * (info->width - 1));
for (x = 0; x * 2 < info->width; x++)
{
for (s = 0; s < info->bytes; ++s)
{
temp = buf[s];
buf[s] = alt[s];
alt[s] = temp;
}
buf += info->bytes;
alt -= info->bytes;
}
}
| 0
|
500,080
|
kssl_krb5_kt_default(krb5_context con,
krb5_keytab * kt)
{
if (!krb5_loaded)
load_krb5_dll();
if ( p_krb5_kt_default )
return(p_krb5_kt_default(con,kt));
else
return KRB5KRB_ERR_GENERIC;
}
| 0
|
512,801
|
longlong val_int() { return cached_time.to_longlong(); }
| 0
|
436,130
|
static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
long min)
{
struct io_kiocb *req, *tmp;
LIST_HEAD(done);
bool spin;
int ret;
/*
* Only spin for completions if we don't have multiple devices hanging
* off our complete list, and we're under the requested amount.
*/
spin = !ctx->poll_multi_queue && *nr_events < min;
ret = 0;
list_for_each_entry_safe(req, tmp, &ctx->iopoll_list, inflight_entry) {
struct kiocb *kiocb = &req->rw.kiocb;
/*
* Move completed and retryable entries to our local lists.
* If we find a request that requires polling, break out
* and complete those lists first, if we have entries there.
*/
if (READ_ONCE(req->iopoll_completed)) {
list_move_tail(&req->inflight_entry, &done);
continue;
}
if (!list_empty(&done))
break;
ret = kiocb->ki_filp->f_op->iopoll(kiocb, spin);
if (ret < 0)
break;
/* iopoll may have completed current req */
if (READ_ONCE(req->iopoll_completed))
list_move_tail(&req->inflight_entry, &done);
if (ret && spin)
spin = false;
ret = 0;
}
if (!list_empty(&done))
io_iopoll_complete(ctx, nr_events, &done);
return ret;
}
| 0
|
409,437
|
cursor_unsleep(void)
{
cursor_is_asleep = FALSE;
cursor_on();
}
| 0
|
463,130
|
static void store_proxy(const char *server, void *data __attribute__((unused)),
void *rock)
{
struct proxy_rock *prock = (struct proxy_rock *) rock;
proxy_store_func(server, prock->mbox_pat, prock->entryatts);
}
| 0
|
442,959
|
win_redr_status(win_T *wp, int ignore_pum UNUSED)
{
int row;
char_u *p;
int len;
int fillchar;
int attr;
int this_ru_col;
static int busy = FALSE;
// It's possible to get here recursively when 'statusline' (indirectly)
// invokes ":redrawstatus". Simply ignore the call then.
if (busy)
return;
busy = TRUE;
row = statusline_row(wp);
wp->w_redr_status = FALSE;
if (wp->w_status_height == 0)
{
// no status line, can only be last window
redraw_cmdline = TRUE;
}
else if (!redrawing()
// don't update status line when popup menu is visible and may be
// drawn over it, unless it will be redrawn later
|| (!ignore_pum && pum_visible()))
{
// Don't redraw right now, do it later.
wp->w_redr_status = TRUE;
}
#ifdef FEAT_STL_OPT
else if (*p_stl != NUL || *wp->w_p_stl != NUL)
{
// redraw custom status line
redraw_custom_statusline(wp);
}
#endif
else
{
fillchar = fillchar_status(&attr, wp);
get_trans_bufname(wp->w_buffer);
p = NameBuff;
len = (int)STRLEN(p);
if ((bt_help(wp->w_buffer)
#ifdef FEAT_QUICKFIX
|| wp->w_p_pvw
#endif
|| bufIsChanged(wp->w_buffer)
|| wp->w_buffer->b_p_ro)
&& len < MAXPATHL - 1)
*(p + len++) = ' ';
if (bt_help(wp->w_buffer))
{
vim_snprintf((char *)p + len, MAXPATHL - len, "%s", _("[Help]"));
len += (int)STRLEN(p + len);
}
#ifdef FEAT_QUICKFIX
if (wp->w_p_pvw)
{
vim_snprintf((char *)p + len, MAXPATHL - len, "%s", _("[Preview]"));
len += (int)STRLEN(p + len);
}
#endif
if (bufIsChanged(wp->w_buffer)
#ifdef FEAT_TERMINAL
&& !bt_terminal(wp->w_buffer)
#endif
)
{
vim_snprintf((char *)p + len, MAXPATHL - len, "%s", "[+]");
len += (int)STRLEN(p + len);
}
if (wp->w_buffer->b_p_ro)
{
vim_snprintf((char *)p + len, MAXPATHL - len, "%s", _("[RO]"));
len += (int)STRLEN(p + len);
}
this_ru_col = ru_col - (Columns - wp->w_width);
if (this_ru_col < (wp->w_width + 1) / 2)
this_ru_col = (wp->w_width + 1) / 2;
if (this_ru_col <= 1)
{
p = (char_u *)"<"; // No room for file name!
len = 1;
}
else if (has_mbyte)
{
int clen = 0, i;
// Count total number of display cells.
clen = mb_string2cells(p, -1);
// Find first character that will fit.
// Going from start to end is much faster for DBCS.
for (i = 0; p[i] != NUL && clen >= this_ru_col - 1;
i += (*mb_ptr2len)(p + i))
clen -= (*mb_ptr2cells)(p + i);
len = clen;
if (i > 0)
{
p = p + i - 1;
*p = '<';
++len;
}
}
else if (len > this_ru_col - 1)
{
p += len - (this_ru_col - 1);
*p = '<';
len = this_ru_col - 1;
}
screen_puts(p, row, wp->w_wincol, attr);
screen_fill(row, row + 1, len + wp->w_wincol,
this_ru_col + wp->w_wincol, fillchar, fillchar, attr);
if (get_keymap_str(wp, (char_u *)"<%s>", NameBuff, MAXPATHL)
&& (int)(this_ru_col - len) > (int)(STRLEN(NameBuff) + 1))
screen_puts(NameBuff, row, (int)(this_ru_col - STRLEN(NameBuff)
- 1 + wp->w_wincol), attr);
#ifdef FEAT_CMDL_INFO
win_redr_ruler(wp, TRUE, ignore_pum);
#endif
}
/*
* May need to draw the character below the vertical separator.
*/
if (wp->w_vsep_width != 0 && wp->w_status_height != 0 && redrawing())
{
if (stl_connected(wp))
fillchar = fillchar_status(&attr, wp);
else
fillchar = fillchar_vsep(&attr);
screen_putchar(fillchar, row, W_ENDCOL(wp), attr);
}
busy = FALSE;
}
| 0
|
294,633
|
d_lite_england(VALUE self)
{
return dup_obj_with_new_start(self, ENGLAND);
}
| 0
|
329,921
|
_cairo_image_traps_compositor_get (void)
{
static cairo_atomic_once_t once = CAIRO_ATOMIC_ONCE_INIT;
static cairo_traps_compositor_t compositor;
if (_cairo_atomic_init_once_enter(&once)) {
_cairo_traps_compositor_init(&compositor,
&__cairo_no_compositor);
compositor.acquire = acquire;
compositor.release = release;
compositor.set_clip_region = set_clip_region;
compositor.pattern_to_surface = _cairo_image_source_create_for_pattern;
compositor.draw_image_boxes = draw_image_boxes;
//compositor.copy_boxes = copy_boxes;
compositor.fill_boxes = fill_boxes;
compositor.check_composite = check_composite;
compositor.composite = composite;
compositor.lerp = lerp;
//compositor.check_composite_boxes = check_composite_boxes;
compositor.composite_boxes = composite_boxes;
//compositor.check_composite_traps = check_composite_traps;
compositor.composite_traps = composite_traps;
//compositor.check_composite_tristrip = check_composite_traps;
#if PIXMAN_VERSION >= PIXMAN_VERSION_ENCODE(0,22,0)
compositor.composite_tristrip = composite_tristrip;
#endif
compositor.check_composite_glyphs = check_composite_glyphs;
compositor.composite_glyphs = composite_glyphs;
_cairo_atomic_init_once_leave(&once);
}
return &compositor.base;
}
| 0
|
226,262
|
GF_Box *dref_box_new()
{
ISOM_DECL_BOX_ALLOC(GF_DataReferenceBox, GF_ISOM_BOX_TYPE_DREF);
return (GF_Box *)tmp;
}
| 0
|
437,318
|
compile_tree(Node* node, regex_t* reg, ScanEnv* env)
{
int n, len, pos, r = 0;
switch (NODE_TYPE(node)) {
case NODE_LIST:
do {
r = compile_tree(NODE_CAR(node), reg, env);
} while (r == 0 && IS_NOT_NULL(node = NODE_CDR(node)));
break;
case NODE_ALT:
{
Node* x = node;
len = 0;
do {
len += compile_length_tree(NODE_CAR(x), reg);
if (IS_NOT_NULL(NODE_CDR(x))) {
len += SIZE_OP_PUSH + SIZE_OP_JUMP;
}
} while (IS_NOT_NULL(x = NODE_CDR(x)));
pos = reg->used + len; /* goal position */
do {
len = compile_length_tree(NODE_CAR(node), reg);
if (IS_NOT_NULL(NODE_CDR(node))) {
enum OpCode push = NODE_IS_SUPER(node) ? OP_PUSH_SUPER : OP_PUSH;
r = add_opcode_rel_addr(reg, push, len + SIZE_OP_JUMP);
if (r != 0) break;
}
r = compile_tree(NODE_CAR(node), reg, env);
if (r != 0) break;
if (IS_NOT_NULL(NODE_CDR(node))) {
len = pos - (reg->used + SIZE_OP_JUMP);
r = add_opcode_rel_addr(reg, OP_JUMP, len);
if (r != 0) break;
}
} while (IS_NOT_NULL(node = NODE_CDR(node)));
}
break;
case NODE_STRING:
if (NODE_STRING_IS_RAW(node))
r = compile_string_raw_node(STR_(node), reg);
else
r = compile_string_node(node, reg);
break;
case NODE_CCLASS:
r = compile_cclass_node(CCLASS_(node), reg);
break;
case NODE_CTYPE:
{
int op;
switch (CTYPE_(node)->ctype) {
case CTYPE_ANYCHAR:
if (IS_MULTILINE(CTYPE_OPTION(node, reg)))
r = add_opcode(reg, OP_ANYCHAR_ML);
else
r = add_opcode(reg, OP_ANYCHAR);
break;
case ONIGENC_CTYPE_WORD:
if (CTYPE_(node)->ascii_mode == 0) {
op = CTYPE_(node)->not != 0 ? OP_NO_WORD : OP_WORD;
}
else {
op = CTYPE_(node)->not != 0 ? OP_NO_WORD_ASCII : OP_WORD_ASCII;
}
r = add_opcode(reg, op);
break;
default:
return ONIGERR_TYPE_BUG;
break;
}
}
break;
case NODE_BACKREF:
{
BackRefNode* br = BACKREF_(node);
if (NODE_IS_CHECKER(node)) {
#ifdef USE_BACKREF_WITH_LEVEL
if (NODE_IS_NEST_LEVEL(node)) {
r = add_opcode(reg, OP_BACKREF_CHECK_WITH_LEVEL);
if (r != 0) return r;
r = add_length(reg, br->nest_level);
if (r != 0) return r;
}
else
#endif
{
r = add_opcode(reg, OP_BACKREF_CHECK);
if (r != 0) return r;
}
goto add_bacref_mems;
}
else {
#ifdef USE_BACKREF_WITH_LEVEL
if (NODE_IS_NEST_LEVEL(node)) {
r = add_opcode(reg, OP_BACKREF_WITH_LEVEL);
if (r != 0) return r;
r = add_option(reg, (reg->options & ONIG_OPTION_IGNORECASE));
if (r != 0) return r;
r = add_length(reg, br->nest_level);
if (r != 0) return r;
goto add_bacref_mems;
}
else
#endif
if (br->back_num == 1) {
n = br->back_static[0];
if (IS_IGNORECASE(reg->options)) {
r = add_opcode(reg, OP_BACKREF_N_IC);
if (r != 0) return r;
r = add_mem_num(reg, n);
}
else {
switch (n) {
case 1: r = add_opcode(reg, OP_BACKREF1); break;
case 2: r = add_opcode(reg, OP_BACKREF2); break;
default:
r = add_opcode(reg, OP_BACKREF_N);
if (r != 0) return r;
r = add_mem_num(reg, n);
break;
}
}
}
else {
int i;
int* p;
if (IS_IGNORECASE(reg->options)) {
r = add_opcode(reg, OP_BACKREF_MULTI_IC);
}
else {
r = add_opcode(reg, OP_BACKREF_MULTI);
}
if (r != 0) return r;
add_bacref_mems:
r = add_length(reg, br->back_num);
if (r != 0) return r;
p = BACKREFS_P(br);
for (i = br->back_num - 1; i >= 0; i--) {
r = add_mem_num(reg, p[i]);
if (r != 0) return r;
}
}
}
}
break;
#ifdef USE_CALL
case NODE_CALL:
r = compile_call(CALL_(node), reg, env);
break;
#endif
case NODE_QUANT:
r = compile_quantifier_node(QUANT_(node), reg, env);
break;
case NODE_ENCLOSURE:
r = compile_enclosure_node(ENCLOSURE_(node), reg, env);
break;
case NODE_ANCHOR:
r = compile_anchor_node(ANCHOR_(node), reg, env);
break;
case NODE_GIMMICK:
r = compile_gimmick_node(GIMMICK_(node), reg);
break;
default:
#ifdef ONIG_DEBUG
fprintf(stderr, "compile_tree: undefined node type %d\n", NODE_TYPE(node));
#endif
break;
}
return r;
}
| 0
|
218,970
|
Status ConstantFolding::RemoveShuffleOrTranspose(
const GraphProperties& properties, bool use_shape_info,
GraphDef* optimized_graph, NodeDef* node) {
if (!use_shape_info || !(IsShuffle(*node) || IsTranspose(*node)))
return Status::OK();
Tensor permutation_tensor;
if (GetTensorFromConstNode(node->input(1), &permutation_tensor) &&
properties.HasInputProperties(node->name())) {
const auto& shape = properties.GetInputProperties(node->name())[0].shape();
std::vector<int> permutation;
for (int j = 0; j < permutation_tensor.NumElements(); ++j) {
if (permutation_tensor.dtype() == DT_INT64) {
permutation.push_back(permutation_tensor.vec<int64_t>()(j));
} else {
permutation.push_back(permutation_tensor.vec<int>()(j));
}
}
int permutation_size = permutation.size();
if (permutation_size != shape.dim_size()) {
// Number of elements in perm should be same as dim_size. Skip if not.
return Status::OK();
}
// The node is replaceable iff
// dim_size == 0 || all dims have size 1 ||
// all dims with > 1 size are not permuted.
bool replaceable = true;
for (int j = 0; replaceable && j < shape.dim_size(); ++j) {
replaceable &= shape.dim(j).size() == 1 || j == permutation[j];
}
if (replaceable) {
ReplaceOperationWithIdentity(0, properties, node, optimized_graph);
}
}
return Status::OK();
}
| 0
|
484,768
|
static bool xennet_tx_buf_gc(struct netfront_queue *queue)
{
RING_IDX cons, prod;
unsigned short id;
struct sk_buff *skb;
bool more_to_do;
bool work_done = false;
const struct device *dev = &queue->info->netdev->dev;
BUG_ON(!netif_carrier_ok(queue->info->netdev));
do {
prod = queue->tx.sring->rsp_prod;
if (RING_RESPONSE_PROD_OVERFLOW(&queue->tx, prod)) {
dev_alert(dev, "Illegal number of responses %u\n",
prod - queue->tx.rsp_cons);
goto err;
}
rmb(); /* Ensure we see responses up to 'rp'. */
for (cons = queue->tx.rsp_cons; cons != prod; cons++) {
struct xen_netif_tx_response txrsp;
work_done = true;
RING_COPY_RESPONSE(&queue->tx, cons, &txrsp);
if (txrsp.status == XEN_NETIF_RSP_NULL)
continue;
id = txrsp.id;
if (id >= RING_SIZE(&queue->tx)) {
dev_alert(dev,
"Response has incorrect id (%u)\n",
id);
goto err;
}
if (queue->tx_link[id] != TX_PENDING) {
dev_alert(dev,
"Response for inactive request\n");
goto err;
}
queue->tx_link[id] = TX_LINK_NONE;
skb = queue->tx_skbs[id];
queue->tx_skbs[id] = NULL;
if (unlikely(!gnttab_end_foreign_access_ref(
queue->grant_tx_ref[id]))) {
dev_alert(dev,
"Grant still in use by backend domain\n");
goto err;
}
gnttab_release_grant_reference(
&queue->gref_tx_head, queue->grant_tx_ref[id]);
queue->grant_tx_ref[id] = INVALID_GRANT_REF;
queue->grant_tx_page[id] = NULL;
add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, id);
dev_kfree_skb_irq(skb);
}
queue->tx.rsp_cons = prod;
RING_FINAL_CHECK_FOR_RESPONSES(&queue->tx, more_to_do);
} while (more_to_do);
xennet_maybe_wake_tx(queue);
return work_done;
err:
queue->info->broken = true;
dev_alert(dev, "Disabled for further use\n");
return work_done;
}
| 0
|
224,891
|
void Compute(OpKernelContext* context) override {
// Here's the basic idea:
// Batch and depth dimension are independent from row and col dimension. And
// because FractionalAvgPool currently only support pooling along row and
// col, we can basically think of this 4D tensor backpropagation as
// operation of a series of 2D planes.
//
// For each element of a 'slice' (2D plane) of output_backprop, we need to
// figure out its contributors when doing FractionalAvgPool operation. This
// can be done based on row_pooling_sequence, col_pooling_seq and
// overlapping.
// Once we figure out the original contributors, we just need to evenly
// divide the value of this element among these contributors.
//
// Internally, we divide the out_backprop tensor and store it in a temporary
// tensor of double type. And cast it to the corresponding type.
typedef Eigen::Map<const Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>>
ConstEigenMatrixMap;
typedef Eigen::Map<Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic>>
EigenDoubleMatrixMap;
// Grab the inputs.
const Tensor& orig_input_tensor_shape = context->input(0);
OP_REQUIRES(context,
orig_input_tensor_shape.dims() == 1 &&
orig_input_tensor_shape.NumElements() == 4,
errors::InvalidArgument("original input tensor shape must be"
"1-dimensional and 4 elements"));
const Tensor& out_backprop = context->input(1);
const Tensor& row_seq_tensor = context->input(2);
const Tensor& col_seq_tensor = context->input(3);
const int64_t out_batch = out_backprop.dim_size(0);
const int64_t out_rows = out_backprop.dim_size(1);
const int64_t out_cols = out_backprop.dim_size(2);
const int64_t out_depth = out_backprop.dim_size(3);
OP_REQUIRES(context, row_seq_tensor.NumElements() > out_rows,
errors::InvalidArgument("Given out_backprop shape ",
out_backprop.shape().DebugString(),
", row_seq_tensor must have at least ",
out_rows + 1, " elements, but got ",
row_seq_tensor.NumElements()));
OP_REQUIRES(context, col_seq_tensor.NumElements() > out_cols,
errors::InvalidArgument("Given out_backprop shape ",
out_backprop.shape().DebugString(),
", col_seq_tensor must have at least ",
out_cols + 1, " elements, but got ",
col_seq_tensor.NumElements()));
auto row_seq_tensor_flat = row_seq_tensor.flat<int64_t>();
auto col_seq_tensor_flat = col_seq_tensor.flat<int64_t>();
auto orig_input_tensor_shape_flat = orig_input_tensor_shape.flat<int64_t>();
const int64_t in_batch = orig_input_tensor_shape_flat(0);
const int64_t in_rows = orig_input_tensor_shape_flat(1);
const int64_t in_cols = orig_input_tensor_shape_flat(2);
const int64_t in_depth = orig_input_tensor_shape_flat(3);
OP_REQUIRES(
context, in_batch != 0,
errors::InvalidArgument("Batch dimension of input must not be 0"));
OP_REQUIRES(
context, in_rows != 0,
errors::InvalidArgument("Rows dimension of input must not be 0"));
OP_REQUIRES(
context, in_cols != 0,
errors::InvalidArgument("Columns dimension of input must not be 0"));
OP_REQUIRES(
context, in_depth != 0,
errors::InvalidArgument("Depth dimension of input must not be 0"));
constexpr int tensor_in_and_out_dims = 4;
// Transform orig_input_tensor_shape into TensorShape
TensorShape in_shape;
for (auto i = 0; i < tensor_in_and_out_dims; ++i) {
in_shape.AddDim(orig_input_tensor_shape_flat(i));
}
// Create intermediate in_backprop.
Tensor in_backprop_tensor_temp;
OP_REQUIRES_OK(context, context->forward_input_or_allocate_temp(
{0}, DataTypeToEnum<double>::v(), in_shape,
&in_backprop_tensor_temp));
in_backprop_tensor_temp.flat<double>().setZero();
// Transform 4D tensor to 2D matrix.
EigenDoubleMatrixMap in_backprop_tensor_temp_mat(
in_backprop_tensor_temp.flat<double>().data(), in_depth,
in_cols * in_rows * in_batch);
ConstEigenMatrixMap out_backprop_mat(out_backprop.flat<T>().data(),
out_depth,
out_cols * out_rows * out_batch);
// Loop through each element of out_backprop and evenly distribute the
// element to the corresponding pooling cell.
const int64_t in_max_row_index = in_rows - 1;
const int64_t in_max_col_index = in_cols - 1;
for (int64_t b = 0; b < out_batch; ++b) {
for (int64_t r = 0; r < out_rows; ++r) {
const int64_t in_row_start = row_seq_tensor_flat(r);
int64_t in_row_end = overlapping_ ? row_seq_tensor_flat(r + 1)
: row_seq_tensor_flat(r + 1) - 1;
in_row_end = std::min(in_row_end, in_max_row_index);
OP_REQUIRES(context, in_row_start >= 0 && in_row_end >= 0,
errors::InvalidArgument(
"Row sequence tensor values must not be negative, got ",
row_seq_tensor_flat));
for (int64_t c = 0; c < out_cols; ++c) {
const int64_t in_col_start = col_seq_tensor_flat(c);
int64_t in_col_end = overlapping_ ? col_seq_tensor_flat(c + 1)
: col_seq_tensor_flat(c + 1) - 1;
in_col_end = std::min(in_col_end, in_max_col_index);
OP_REQUIRES(
context, in_col_start >= 0 && in_col_end >= 0,
errors::InvalidArgument(
"Column sequence tensor values must not be negative, got ",
col_seq_tensor_flat));
const int64_t num_elements_in_pooling_cell =
(in_row_end - in_row_start + 1) * (in_col_end - in_col_start + 1);
const int64_t out_index = (b * out_rows + r) * out_cols + c;
// Now we can evenly distribute out_backprop(b, h, w, *) to
// in_backprop(b, hs:he, ws:we, *).
for (int64_t in_r = in_row_start; in_r <= in_row_end; ++in_r) {
for (int64_t in_c = in_col_start; in_c <= in_col_end; ++in_c) {
const int64_t in_index = (b * in_rows + in_r) * in_cols + in_c;
// Walk through each channel (depth).
for (int64_t d = 0; d < out_depth; ++d) {
const double out_backprop_element = static_cast<double>(
out_backprop_mat.coeffRef(d, out_index));
double& in_backprop_ref =
in_backprop_tensor_temp_mat.coeffRef(d, in_index);
in_backprop_ref +=
out_backprop_element / num_elements_in_pooling_cell;
}
}
}
}
}
}
// Depending on the type, cast double to type T.
Tensor* in_backprop_tensor = nullptr;
OP_REQUIRES_OK(context, context->forward_input_or_allocate_output(
{0}, 0, in_shape, &in_backprop_tensor));
auto in_backprop_tensor_flat = in_backprop_tensor->flat<T>();
auto in_backprop_tensor_temp_flat = in_backprop_tensor_temp.flat<double>();
for (int64_t i = 0; i < in_backprop_tensor_flat.size(); ++i) {
in_backprop_tensor_flat(i) =
static_cast<T>(in_backprop_tensor_temp_flat(i));
}
}
| 0
|
247,342
|
int pgpDigParamsCmp(pgpDigParams p1, pgpDigParams p2)
{
int rc = 1; /* assume different, eg if either is NULL */
if (p1 && p2) {
/* XXX Should we compare something else too? */
if (p1->tag != p2->tag)
goto exit;
if (p1->hash_algo != p2->hash_algo)
goto exit;
if (p1->pubkey_algo != p2->pubkey_algo)
goto exit;
if (p1->version != p2->version)
goto exit;
if (p1->sigtype != p2->sigtype)
goto exit;
if (memcmp(p1->signid, p2->signid, sizeof(p1->signid)) != 0)
goto exit;
if (p1->userid && p2->userid && strcmp(p1->userid, p2->userid) != 0)
goto exit;
/* Parameters match ... at least for our purposes */
rc = 0;
}
exit:
return rc;
}
| 0
|
336,594
|
void reds_send_device_display_info(RedsState *reds)
{
if (!reds->agent_dev->priv->agent_attached) {
return;
}
if (!reds->agent_dev->priv->agent_supports_graphics_device_info) {
return;
}
g_debug("Sending device display info to the agent:");
SpiceMarshaller *m = spice_marshaller_new();
reds_marshall_device_display_info(reds, m);
RedCharDeviceWriteBuffer *char_dev_buf = vdagent_new_write_buffer(reds->agent_dev.get(),
VD_AGENT_GRAPHICS_DEVICE_INFO,
spice_marshaller_get_total_size(m),
true);
if (!char_dev_buf) {
spice_marshaller_destroy(m);
reds->pending_device_display_info_message = true;
return;
}
VDInternalBuf *internal_buf = (VDInternalBuf *)char_dev_buf->buf;
int free_info;
size_t len_info;
uint8_t *info = spice_marshaller_linearize(m, 0, &len_info, &free_info);
memcpy(&internal_buf->u.graphics_device_info, info, len_info);
if (free_info) {
free(info);
}
spice_marshaller_destroy(m);
reds->pending_device_display_info_message = false;
reds->agent_dev->write_buffer_add(char_dev_buf);
}
| 0
|
369,158
|
static int io_register_personality(struct io_ring_ctx *ctx)
{
const struct cred *creds;
u32 id;
int ret;
creds = get_current_cred();
ret = xa_alloc_cyclic(&ctx->personalities, &id, (void *)creds,
XA_LIMIT(0, USHRT_MAX), &ctx->pers_next, GFP_KERNEL);
if (ret < 0) {
put_cred(creds);
return ret;
}
return id;
| 0
|
355,652
|
eval3(char_u **arg, typval_T *rettv, evalarg_T *evalarg)
{
char_u *p;
int getnext;
/*
* Get the first variable.
*/
if (eval4(arg, rettv, evalarg) == FAIL)
return FAIL;
/*
* Handle the "&&" operator.
*/
p = eval_next_non_blank(*arg, evalarg, &getnext);
if (p[0] == '&' && p[1] == '&')
{
evalarg_T *evalarg_used = evalarg;
evalarg_T local_evalarg;
int orig_flags;
int evaluate;
long result = TRUE;
typval_T var2;
int error = FALSE;
int vim9script = in_vim9script();
if (evalarg == NULL)
{
init_evalarg(&local_evalarg);
evalarg_used = &local_evalarg;
}
orig_flags = evalarg_used->eval_flags;
evaluate = orig_flags & EVAL_EVALUATE;
if (evaluate)
{
if (vim9script)
result = tv_get_bool_chk(rettv, &error);
else if (tv_get_number_chk(rettv, &error) == 0)
result = FALSE;
clear_tv(rettv);
if (error)
return FAIL;
}
/*
* Repeat until there is no following "&&".
*/
while (p[0] == '&' && p[1] == '&')
{
if (getnext)
*arg = eval_next_line(evalarg_used);
else
{
if (evaluate && vim9script && !VIM_ISWHITE(p[-1]))
{
error_white_both(p, 2);
clear_tv(rettv);
return FAIL;
}
*arg = p;
}
/*
* Get the second variable.
*/
if (evaluate && in_vim9script() && !IS_WHITE_OR_NUL((*arg)[2]))
{
error_white_both(*arg, 2);
clear_tv(rettv);
return FAIL;
}
*arg = skipwhite_and_linebreak(*arg + 2, evalarg_used);
evalarg_used->eval_flags = result ? orig_flags
: orig_flags & ~EVAL_EVALUATE;
CLEAR_FIELD(var2);
if (eval4(arg, &var2, evalarg_used) == FAIL)
return FAIL;
/*
* Compute the result.
*/
if (evaluate && result)
{
if (vim9script)
result = tv_get_bool_chk(&var2, &error);
else if (tv_get_number_chk(&var2, &error) == 0)
result = FALSE;
clear_tv(&var2);
if (error)
return FAIL;
}
if (evaluate)
{
if (vim9script)
{
rettv->v_type = VAR_BOOL;
rettv->vval.v_number = result ? VVAL_TRUE : VVAL_FALSE;
}
else
{
rettv->v_type = VAR_NUMBER;
rettv->vval.v_number = result;
}
}
p = eval_next_non_blank(*arg, evalarg_used, &getnext);
}
if (evalarg == NULL)
clear_evalarg(&local_evalarg, NULL);
else
evalarg->eval_flags = orig_flags;
}
return OK;
}
| 0
|
452,996
|
static void nft_immediate_destroy(const struct nft_ctx *ctx,
const struct nft_expr *expr)
{
const struct nft_immediate_expr *priv = nft_expr_priv(expr);
const struct nft_data *data = &priv->data;
struct nft_rule *rule, *n;
struct nft_ctx chain_ctx;
struct nft_chain *chain;
if (priv->dreg != NFT_REG_VERDICT)
return;
switch (data->verdict.code) {
case NFT_JUMP:
case NFT_GOTO:
chain = data->verdict.chain;
if (!nft_chain_is_bound(chain))
break;
chain_ctx = *ctx;
chain_ctx.chain = chain;
list_for_each_entry_safe(rule, n, &chain->rules, list)
nf_tables_rule_release(&chain_ctx, rule);
nf_tables_chain_destroy(&chain_ctx);
break;
default:
break;
}
}
| 0
|
222,530
|
string DebugStringWhole(const GraphDef& gdef) {
string ret;
for (const auto& fdef : gdef.library().function()) {
strings::StrAppend(&ret, Print(fdef));
}
strings::StrAppend(&ret, "\n");
for (const auto& ndef : gdef.node()) {
strings::StrAppend(&ret, Print(ndef), "\n");
}
return ret;
}
| 0
|
393,474
|
static SQInteger closure_bindenv(HSQUIRRELVM v)
{
if(SQ_FAILED(sq_bindenv(v,1)))
return SQ_ERROR;
return 1;
}
| 0
|
437,388
|
renumber_node_backref(Node* node, GroupNumRemap* map)
{
int i, pos, n, old_num;
int *backs;
BackRefNode* bn = BACKREF_(node);
if (! NODE_IS_BY_NAME(node))
return ONIGERR_NUMBERED_BACKREF_OR_CALL_NOT_ALLOWED;
old_num = bn->back_num;
if (IS_NULL(bn->back_dynamic))
backs = bn->back_static;
else
backs = bn->back_dynamic;
for (i = 0, pos = 0; i < old_num; i++) {
n = map[backs[i]].new_val;
if (n > 0) {
backs[pos] = n;
pos++;
}
}
bn->back_num = pos;
return 0;
}
| 0
|
90,192
|
void UpdateSystemInfo() {
if (EnsureCrosLoaded()) {
UpdateNetworkManagerStatus();
}
}
| 0
|
279,903
|
ex_global(exarg_T *eap)
{
linenr_T lnum; // line number according to old situation
int ndone = 0;
int type; // first char of cmd: 'v' or 'g'
char_u *cmd; // command argument
char_u delim; // delimiter, normally '/'
char_u *pat;
regmmatch_T regmatch;
int match;
int which_pat;
// When nesting the command works on one line. This allows for
// ":g/found/v/notfound/command".
if (global_busy && (eap->line1 != 1
|| eap->line2 != curbuf->b_ml.ml_line_count))
{
// will increment global_busy to break out of the loop
emsg(_(e_cannot_do_global_recursive_with_range));
return;
}
if (eap->forceit) // ":global!" is like ":vglobal"
type = 'v';
else
type = *eap->cmd;
cmd = eap->arg;
which_pat = RE_LAST; // default: use last used regexp
#ifdef FEAT_EVAL
if (in_vim9script() && check_global_and_subst(eap->cmd, eap->arg) == FAIL)
return;
#endif
/*
* undocumented vi feature:
* "\/" and "\?": use previous search pattern.
* "\&": use previous substitute pattern.
*/
if (*cmd == '\\')
{
++cmd;
if (vim_strchr((char_u *)"/?&", *cmd) == NULL)
{
emsg(_(e_backslash_should_be_followed_by));
return;
}
if (*cmd == '&')
which_pat = RE_SUBST; // use previous substitute pattern
else
which_pat = RE_SEARCH; // use previous search pattern
++cmd;
pat = (char_u *)"";
}
else if (*cmd == NUL)
{
emsg(_(e_regular_expression_missing_from_global));
return;
}
else if (check_regexp_delim(*cmd) == FAIL)
{
return;
}
else
{
delim = *cmd; // get the delimiter
if (delim)
++cmd; // skip delimiter if there is one
pat = cmd; // remember start of pattern
cmd = skip_regexp_ex(cmd, delim, magic_isset(), &eap->arg, NULL, NULL);
if (cmd[0] == delim) // end delimiter found
*cmd++ = NUL; // replace it with a NUL
}
if (search_regcomp(pat, RE_BOTH, which_pat, SEARCH_HIS, ®match) == FAIL)
{
emsg(_(e_invalid_command));
return;
}
if (global_busy)
{
lnum = curwin->w_cursor.lnum;
match = vim_regexec_multi(®match, curwin, curbuf, lnum,
(colnr_T)0, NULL, NULL);
if ((type == 'g' && match) || (type == 'v' && !match))
global_exe_one(cmd, lnum);
}
else
{
/*
* pass 1: set marks for each (not) matching line
*/
for (lnum = eap->line1; lnum <= eap->line2 && !got_int; ++lnum)
{
// a match on this line?
match = vim_regexec_multi(®match, curwin, curbuf, lnum,
(colnr_T)0, NULL, NULL);
if (regmatch.regprog == NULL)
break; // re-compiling regprog failed
if ((type == 'g' && match) || (type == 'v' && !match))
{
ml_setmarked(lnum);
ndone++;
}
line_breakcheck();
}
/*
* pass 2: execute the command for each line that has been marked
*/
if (got_int)
msg(_(e_interrupted));
else if (ndone == 0)
{
if (type == 'v')
smsg(_("Pattern found in every line: %s"), pat);
else
smsg(_("Pattern not found: %s"), pat);
}
else
{
#ifdef FEAT_CLIPBOARD
start_global_changes();
#endif
global_exe(cmd);
#ifdef FEAT_CLIPBOARD
end_global_changes();
#endif
}
ml_clearmarked(); // clear rest of the marks
}
vim_regfree(regmatch.regprog);
}
| 0
|
282,984
|
static ptrdiff_t finderrfunc(lua_State *L)
{
cTValue *frame = L->base-1, *bot = tvref(L->stack);
void *cf = L->cframe;
while (frame > bot && cf) {
while (cframe_nres(cframe_raw(cf)) < 0) { /* cframe without frame? */
if (frame >= restorestack(L, -cframe_nres(cf)))
break;
if (cframe_errfunc(cf) >= 0) /* Error handler not inherited (-1)? */
return cframe_errfunc(cf);
cf = cframe_prev(cf); /* Else unwind cframe and continue searching. */
if (cf == NULL)
return 0;
}
switch (frame_typep(frame)) {
case FRAME_LUA:
case FRAME_LUAP:
frame = frame_prevl(frame);
break;
case FRAME_C:
cf = cframe_prev(cf);
/* fallthrough */
case FRAME_VARG:
frame = frame_prevd(frame);
break;
case FRAME_CONT:
#if LJ_HASFFI
if ((frame-1)->u32.lo == LJ_CONT_FFI_CALLBACK)
cf = cframe_prev(cf);
#endif
frame = frame_prevd(frame);
break;
case FRAME_CP:
if (cframe_canyield(cf)) return 0;
if (cframe_errfunc(cf) >= 0)
return cframe_errfunc(cf);
cf = cframe_prev(cf);
frame = frame_prevd(frame);
break;
case FRAME_PCALL:
case FRAME_PCALLH:
if (frame_ftsz(frame) >= (ptrdiff_t)(2*sizeof(TValue))) /* xpcall? */
return savestack(L, frame-1); /* Point to xpcall's errorfunc. */
return 0;
default:
lua_assert(0);
return 0;
}
}
return 0;
}
| 0
|
445,987
|
_fr_window_notify_creation_complete (FrWindow *window)
{
char *basename;
char *message;
NotifyNotification *notification;
gboolean notification_supports_actions;
GList *caps;
NotifyData *notify_data;
basename = _g_file_get_display_basename (window->priv->saving_file);
/* Translators: %s is a filename */
message = g_strdup_printf (_("\"%s\" created successfully"), basename);
notification = notify_notification_new (window->priv->batch_title, message, "file-roller");
notify_notification_set_hint_string (notification, "desktop-entry", "file-roller");
notify_data = g_new0 (NotifyData, 1);
notify_data->window = window;
notify_data->window_closed = FALSE;
g_signal_connect (notification,
"closed",
G_CALLBACK (notification_closed_cb),
notify_data);
notification_supports_actions = FALSE;
caps = notify_get_server_caps ();
if (caps != NULL) {
notification_supports_actions = g_list_find_custom (caps, "actions", (GCompareFunc) strcmp) != NULL;
_g_string_list_free (caps);
}
if (notification_supports_actions) {
notify_notification_add_action (notification,
"document-open-symbolic",
_("Open"),
notify_action_open_archive_cb,
notify_data,
NULL);
/*notify_notification_set_hint (notification,
"action-icons",
g_variant_new_boolean (TRUE));*/
}
notify_notification_show (notification, NULL);
g_free (message);
g_free (basename);
}
| 0
|
500,068
|
kssl_krb5_get_credentials(krb5_context CO,
krb5_const krb5_flags F,
krb5_ccache CC,
krb5_creds * pCR,
krb5_creds ** ppCR)
{
if (!krb5_loaded)
load_krb5_dll();
if ( p_krb5_get_credentials )
return(p_krb5_get_credentials(CO,F,CC,pCR,ppCR));
else
return KRB5KRB_ERR_GENERIC;
}
| 0
|
226,239
|
GF_Err traf_box_size(GF_Box *s)
{
u32 pos=0;
GF_TrackFragmentBox *ptr = (GF_TrackFragmentBox *) s;
//Header first
gf_isom_check_position(s, (GF_Box *)ptr->tfhd, &pos);
gf_isom_check_position_list(s, ptr->sub_samples, &pos);
gf_isom_check_position(s, (GF_Box *)ptr->tfdt, &pos);
//cmaf-like
if (ptr->truns_first) {
gf_isom_check_position_list(s, ptr->TrackRuns, &pos);
gf_isom_check_position_list(s, ptr->sai_sizes, &pos);
gf_isom_check_position_list(s, ptr->sai_offsets, &pos);
//senc MUST be after saio in GPAC, as senc writing uses info from saio writing
gf_isom_check_position(s, (GF_Box *)ptr->sample_encryption, &pos);
gf_isom_check_position_list(s, ptr->sampleGroupsDescription, &pos);
gf_isom_check_position_list(s, ptr->sampleGroups, &pos);
//subsamples will be last
} else {
gf_isom_check_position_list(s, ptr->sampleGroupsDescription, &pos);
gf_isom_check_position_list(s, ptr->sampleGroups, &pos);
gf_isom_check_position_list(s, ptr->sai_sizes, &pos);
gf_isom_check_position_list(s, ptr->sai_offsets, &pos);
gf_isom_check_position(s, (GF_Box *)ptr->sample_encryption, &pos);
gf_isom_check_position_list(s, ptr->TrackRuns, &pos);
}
//when sdtp is present (smooth-like) write it after the trun box
gf_isom_check_position(s, (GF_Box *)ptr->sdtp, &pos);
//tfxd should be last ...
if (ptr->tfxd)
gf_isom_check_position(s, (GF_Box *)ptr->tfxd, &pos);
return GF_OK;
}
| 0
|
355,630
|
partial_unref(partial_T *pt)
{
if (pt != NULL)
{
if (--pt->pt_refcount <= 0)
partial_free(pt);
// If the reference count goes down to one, the funcstack may be the
// only reference and can be freed if no other partials reference it.
else if (pt->pt_refcount == 1 && pt->pt_funcstack != NULL)
funcstack_check_refcount(pt->pt_funcstack);
}
}
| 0
|
437,688
|
static inline int cx23888_ir_and_or4(struct cx23885_dev *dev, u32 addr,
u32 and_mask, u32 or_value)
{
cx_andor(addr, ~and_mask, or_value);
return 0;
}
| 0
|
462,436
|
processWorkItem(epolld_t *epd)
{
int continue_polling = 1;
switch(epd->typ) {
case epolld_lstn:
/* listener never stops polling (except server shutdown) */
lstnActivity((ptcplstn_t *) epd->ptr);
break;
case epolld_sess:
sessActivity((ptcpsess_t *) epd->ptr, &continue_polling);
break;
default:
errmsg.LogError(0, RS_RET_INTERNAL_ERROR,
"error: invalid epolld_type_t %d after epoll", epd->typ);
break;
}
if (continue_polling == 1) {
epoll_ctl(epollfd, EPOLL_CTL_MOD, epd->sock, &(epd->ev));
}
}
| 0
|
248,259
|
DLLIMPORT void *cfg_getnptr(cfg_t *cfg, const char *name, unsigned int index)
{
return cfg_opt_getnptr(cfg_getopt(cfg, name), index);
}
| 0
|
437,716
|
static inline u16 count_to_clock_divider(unsigned int d)
{
if (d > RXCLK_RCD + 1)
d = RXCLK_RCD;
else if (d < 2)
d = 1;
else
d--;
return (u16) d;
}
| 0
|
233,897
|
*/
static int wddx_stack_destroy(wddx_stack *stack)
{
register int i;
if (stack->elements) {
for (i = 0; i < stack->top; i++) {
zval_ptr_dtor(&((st_entry *)stack->elements[i])->data);
if (((st_entry *)stack->elements[i])->varname) {
efree(((st_entry *)stack->elements[i])->varname);
}
efree(stack->elements[i]);
}
efree(stack->elements);
}
return SUCCESS;
| 0
|
402,662
|
generate_spc_signer_info(cms_context *cms, SpcSignerInfo *sip)
{
if (!sip)
return -1;
SpcSignerInfo si;
memset(&si, '\0', sizeof (si));
if (SEC_ASN1EncodeInteger(cms->arena, &si.CMSVersion, 1) == NULL) {
cms->log(cms, LOG_ERR, "could not encode CMSVersion: %s",
PORT_ErrorToString(PORT_GetError()));
goto err;
}
si.sid.signerType = signerTypeIssuerAndSerialNumber;
si.sid.signerValue.iasn.issuer = cms->cert->derIssuer;
si.sid.signerValue.iasn.serial = cms->cert->serialNumber;
if (generate_algorithm_id(cms, &si.digestAlgorithm,
digest_get_digest_oid(cms)) < 0)
goto err;
if (cms->raw_signature) {
memcpy(&si.signedAttrs, cms->raw_signed_attrs,
sizeof (si.signedAttrs));
memcpy(&si.signature, cms->raw_signature, sizeof(si.signature));
} else {
if (generate_signed_attributes(cms, &si.signedAttrs) < 0)
goto err;
if (sign_blob(cms, &si.signature, &si.signedAttrs) < 0)
goto err;
}
si.signedAttrs.data[0] = SEC_ASN1_CONTEXT_SPECIFIC | 0 |
SEC_ASN1_CONSTRUCTED;
if (generate_algorithm_id(cms, &si.signatureAlgorithm,
digest_get_encryption_oid(cms)) < 0)
goto err;
if (generate_unsigned_attributes(cms, &si.unsignedAttrs) < 0)
goto err;
memcpy(sip, &si, sizeof(si));
return 0;
err:
return -1;
}
| 0
|
231,756
|
folly::Optional<ClientTransportParameters> getClientTransportParams()
override {
std::vector<TransportParameter> transportParams;
transportParams.push_back(encodeIntegerParameter(
TransportParameterId::initial_max_stream_data_bidi_local,
kDefaultStreamWindowSize));
transportParams.push_back(encodeIntegerParameter(
TransportParameterId::initial_max_stream_data_bidi_remote,
kDefaultStreamWindowSize));
transportParams.push_back(encodeIntegerParameter(
TransportParameterId::initial_max_stream_data_uni,
kDefaultStreamWindowSize));
transportParams.push_back(encodeIntegerParameter(
TransportParameterId::initial_max_streams_bidi,
kDefaultMaxStreamsBidirectional));
transportParams.push_back(encodeIntegerParameter(
TransportParameterId::initial_max_streams_uni,
kDefaultMaxStreamsUnidirectional));
transportParams.push_back(encodeIntegerParameter(
TransportParameterId::initial_max_data, kDefaultConnectionWindowSize));
transportParams.push_back(encodeIntegerParameter(
TransportParameterId::idle_timeout, kDefaultIdleTimeout.count()));
transportParams.push_back(encodeIntegerParameter(
TransportParameterId::max_packet_size, maxRecvPacketSize));
if (clientActiveConnectionIdLimit_) {
transportParams.push_back(encodeIntegerParameter(
TransportParameterId::active_connection_id_limit,
*clientActiveConnectionIdLimit_));
}
transportParams.push_back(encodeConnIdParameter(
TransportParameterId::initial_source_connection_id,
getTestConnectionId()));
return ClientTransportParameters{std::move(transportParams)};
}
| 0
|
256,457
|
void janet_lib_array(JanetTable *env) {
JanetRegExt array_cfuns[] = {
JANET_CORE_REG("array/new", cfun_array_new),
JANET_CORE_REG("array/new-filled", cfun_array_new_filled),
JANET_CORE_REG("array/fill", cfun_array_fill),
JANET_CORE_REG("array/pop", cfun_array_pop),
JANET_CORE_REG("array/peek", cfun_array_peek),
JANET_CORE_REG("array/push", cfun_array_push),
JANET_CORE_REG("array/ensure", cfun_array_ensure),
JANET_CORE_REG("array/slice", cfun_array_slice),
JANET_CORE_REG("array/concat", cfun_array_concat),
JANET_CORE_REG("array/insert", cfun_array_insert),
JANET_CORE_REG("array/remove", cfun_array_remove),
JANET_CORE_REG("array/trim", cfun_array_trim),
JANET_CORE_REG("array/clear", cfun_array_clear),
JANET_REG_END
};
janet_core_cfuns_ext(env, NULL, array_cfuns);
}
| 0
|
384,785
|
read_file_or_blob(typval_T *argvars, typval_T *rettv, int always_blob)
{
int binary = FALSE;
int blob = always_blob;
int failed = FALSE;
char_u *fname;
FILE *fd;
char_u buf[(IOSIZE/256)*256]; // rounded to avoid odd + 1
int io_size = sizeof(buf);
int readlen; // size of last fread()
char_u *prev = NULL; // previously read bytes, if any
long prevlen = 0; // length of data in prev
long prevsize = 0; // size of prev buffer
long maxline = MAXLNUM;
long cnt = 0;
char_u *p; // position in buf
char_u *start; // start of current line
if (argvars[1].v_type != VAR_UNKNOWN)
{
if (STRCMP(tv_get_string(&argvars[1]), "b") == 0)
binary = TRUE;
if (STRCMP(tv_get_string(&argvars[1]), "B") == 0)
blob = TRUE;
if (argvars[2].v_type != VAR_UNKNOWN)
maxline = (long)tv_get_number(&argvars[2]);
}
if ((blob ? rettv_blob_alloc(rettv) : rettv_list_alloc(rettv)) == FAIL)
return;
// Always open the file in binary mode, library functions have a mind of
// their own about CR-LF conversion.
fname = tv_get_string(&argvars[0]);
if (mch_isdir(fname))
{
semsg(_(e_src_is_directory), fname);
return;
}
if (*fname == NUL || (fd = mch_fopen((char *)fname, READBIN)) == NULL)
{
semsg(_(e_cant_open_file_str), *fname == NUL ? (char_u *)_("<empty>") : fname);
return;
}
if (blob)
{
if (read_blob(fd, rettv->vval.v_blob) == FAIL)
{
semsg(_(e_cant_read_file_str), fname);
// An empty blob is returned on error.
blob_free(rettv->vval.v_blob);
rettv->vval.v_blob = NULL;
}
fclose(fd);
return;
}
while (cnt < maxline || maxline < 0)
{
readlen = (int)fread(buf, 1, io_size, fd);
// This for loop processes what was read, but is also entered at end
// of file so that either:
// - an incomplete line gets written
// - a "binary" file gets an empty line at the end if it ends in a
// newline.
for (p = buf, start = buf;
p < buf + readlen || (readlen <= 0 && (prevlen > 0 || binary));
++p)
{
if (readlen <= 0 || *p == '\n')
{
listitem_T *li;
char_u *s = NULL;
long_u len = p - start;
// Finished a line. Remove CRs before NL.
if (readlen > 0 && !binary)
{
while (len > 0 && start[len - 1] == '\r')
--len;
// removal may cross back to the "prev" string
if (len == 0)
while (prevlen > 0 && prev[prevlen - 1] == '\r')
--prevlen;
}
if (prevlen == 0)
s = vim_strnsave(start, len);
else
{
// Change "prev" buffer to be the right size. This way
// the bytes are only copied once, and very long lines are
// allocated only once.
if ((s = vim_realloc(prev, prevlen + len + 1)) != NULL)
{
mch_memmove(s + prevlen, start, len);
s[prevlen + len] = NUL;
prev = NULL; // the list will own the string
prevlen = prevsize = 0;
}
}
if (s == NULL)
{
do_outofmem_msg((long_u) prevlen + len + 1);
failed = TRUE;
break;
}
if ((li = listitem_alloc()) == NULL)
{
vim_free(s);
failed = TRUE;
break;
}
li->li_tv.v_type = VAR_STRING;
li->li_tv.v_lock = 0;
li->li_tv.vval.v_string = s;
list_append(rettv->vval.v_list, li);
start = p + 1; // step over newline
if ((++cnt >= maxline && maxline >= 0) || readlen <= 0)
break;
}
else if (*p == NUL)
*p = '\n';
// Check for utf8 "bom"; U+FEFF is encoded as EF BB BF. Do this
// when finding the BF and check the previous two bytes.
else if (*p == 0xbf && enc_utf8 && !binary)
{
// Find the two bytes before the 0xbf. If p is at buf, or buf
// + 1, these may be in the "prev" string.
char_u back1 = p >= buf + 1 ? p[-1]
: prevlen >= 1 ? prev[prevlen - 1] : NUL;
char_u back2 = p >= buf + 2 ? p[-2]
: p == buf + 1 && prevlen >= 1 ? prev[prevlen - 1]
: prevlen >= 2 ? prev[prevlen - 2] : NUL;
if (back2 == 0xef && back1 == 0xbb)
{
char_u *dest = p - 2;
// Usually a BOM is at the beginning of a file, and so at
// the beginning of a line; then we can just step over it.
if (start == dest)
start = p + 1;
else
{
// have to shuffle buf to close gap
int adjust_prevlen = 0;
if (dest < buf)
{
// must be 1 or 2
adjust_prevlen = (int)(buf - dest);
dest = buf;
}
if (readlen > p - buf + 1)
mch_memmove(dest, p + 1, readlen - (p - buf) - 1);
readlen -= 3 - adjust_prevlen;
prevlen -= adjust_prevlen;
p = dest - 1;
}
}
}
} // for
if (failed || (cnt >= maxline && maxline >= 0) || readlen <= 0)
break;
if (start < p)
{
// There's part of a line in buf, store it in "prev".
if (p - start + prevlen >= prevsize)
{
// need bigger "prev" buffer
char_u *newprev;
// A common use case is ordinary text files and "prev" gets a
// fragment of a line, so the first allocation is made
// small, to avoid repeatedly 'allocing' large and
// 'reallocing' small.
if (prevsize == 0)
prevsize = (long)(p - start);
else
{
long grow50pc = (prevsize * 3) / 2;
long growmin = (long)((p - start) * 2 + prevlen);
prevsize = grow50pc > growmin ? grow50pc : growmin;
}
newprev = vim_realloc(prev, prevsize);
if (newprev == NULL)
{
do_outofmem_msg((long_u)prevsize);
failed = TRUE;
break;
}
prev = newprev;
}
// Add the line part to end of "prev".
mch_memmove(prev + prevlen, start, p - start);
prevlen += (long)(p - start);
}
} // while
// For a negative line count use only the lines at the end of the file,
// free the rest.
if (!failed && maxline < 0)
while (cnt > -maxline)
{
listitem_remove(rettv->vval.v_list, rettv->vval.v_list->lv_first);
--cnt;
}
if (failed)
{
// an empty list is returned on error
list_free(rettv->vval.v_list);
rettv_list_alloc(rettv);
}
vim_free(prev);
fclose(fd);
}
| 0
|
335,434
|
one_letter_cmd(char_u *p, cmdidx_T *idx)
{
if (in_vim9script())
return FALSE;
if (*p == 'k')
{
*idx = CMD_k;
return TRUE;
}
if (p[0] == 's'
&& ((p[1] == 'c' && (p[2] == NUL || (p[2] != 's' && p[2] != 'r'
&& (p[3] == NUL || (p[3] != 'i' && p[4] != 'p')))))
|| p[1] == 'g'
|| (p[1] == 'i' && p[2] != 'm' && p[2] != 'l' && p[2] != 'g')
|| p[1] == 'I'
|| (p[1] == 'r' && p[2] != 'e')))
{
*idx = CMD_substitute;
return TRUE;
}
return FALSE;
}
| 0
|
247,720
|
TEST_P(SslSocketTest, GetCertDigestServerCertWithIntermediateCA) {
const std::string client_ctx_yaml = R"EOF(
common_tls_context:
tls_certificates:
certificate_chain:
filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/no_san_cert.pem"
private_key:
filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/no_san_key.pem"
)EOF";
const std::string server_ctx_yaml = R"EOF(
common_tls_context:
tls_certificates:
certificate_chain:
filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns3_chain.pem"
private_key:
filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns3_key.pem"
validation_context:
trusted_ca:
filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem"
)EOF";
TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, true, GetParam());
testUtil(test_options.setExpectedSha256Digest(TEST_NO_SAN_CERT_256_HASH)
.setExpectedSha1Digest(TEST_NO_SAN_CERT_1_HASH)
.setExpectedSerialNumber(TEST_NO_SAN_CERT_SERIAL));
}
| 0
|
294,460
|
set_of(union DateData *x, int of)
{
assert(complex_dat_p(x));
get_c_jd(x);
get_c_df(x);
clear_civil(x);
x->c.of = of;
}
| 0
|
359,355
|
DEFUN (router_bgp,
router_bgp_cmd,
"router bgp <1-65535>",
ROUTER_STR
BGP_STR
AS_STR)
{
int ret;
as_t as;
struct bgp *bgp;
const char *name = NULL;
VTY_GET_INTEGER_RANGE ("AS", as, argv[0], 1, 65535);
if (argc == 2)
name = argv[1];
ret = bgp_get (&bgp, &as, name);
switch (ret)
{
case BGP_ERR_MULTIPLE_INSTANCE_NOT_SET:
vty_out (vty, "Please specify 'bgp multiple-instance' first%s",
VTY_NEWLINE);
return CMD_WARNING;
case BGP_ERR_AS_MISMATCH:
vty_out (vty, "BGP is already running; AS is %d%s", as, VTY_NEWLINE);
return CMD_WARNING;
case BGP_ERR_INSTANCE_MISMATCH:
vty_out (vty, "BGP view name and AS number mismatch%s", VTY_NEWLINE);
vty_out (vty, "BGP instance is already running; AS is %d%s",
as, VTY_NEWLINE);
return CMD_WARNING;
}
vty->node = BGP_NODE;
vty->index = bgp;
return CMD_SUCCESS;
}
| 0
|
225,103
|
Status AllowedTypeValue(DataType dt, const OpDef::AttrDef& attr) {
const AttrValue& allowed_values(attr.allowed_values());
for (auto allowed : allowed_values.list().type()) {
if (dt == allowed) {
return Status::OK();
}
}
string allowed_str;
for (int i = 0; i < allowed_values.list().type_size(); ++i) {
if (!allowed_str.empty()) {
strings::StrAppend(&allowed_str, ", ");
}
strings::StrAppend(&allowed_str,
DataTypeString(allowed_values.list().type(i)));
}
return errors::InvalidArgument(
"Value for attr '", attr.name(), "' of ", DataTypeString(dt),
" is not in the list of allowed values: ", allowed_str);
}
| 0
|
512,640
|
Item *Item_func_ne::negated_item(THD *thd) /* a != b -> a = b */
{
return new (thd->mem_root) Item_func_eq(thd, args[0], args[1]);
}
| 0
|
491,950
|
int fuse_fsync_common(struct file *file, struct dentry *de, int datasync,
int isdir)
{
struct inode *inode = de->d_inode;
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_file *ff = file->private_data;
struct fuse_req *req;
struct fuse_fsync_in inarg;
int err;
if (is_bad_inode(inode))
return -EIO;
if ((!isdir && fc->no_fsync) || (isdir && fc->no_fsyncdir))
return 0;
/*
* Start writeback against all dirty pages of the inode, then
* wait for all outstanding writes, before sending the FSYNC
* request.
*/
err = write_inode_now(inode, 0);
if (err)
return err;
fuse_sync_writes(inode);
req = fuse_get_req(fc);
if (IS_ERR(req))
return PTR_ERR(req);
memset(&inarg, 0, sizeof(inarg));
inarg.fh = ff->fh;
inarg.fsync_flags = datasync ? 1 : 0;
req->in.h.opcode = isdir ? FUSE_FSYNCDIR : FUSE_FSYNC;
req->in.h.nodeid = get_node_id(inode);
req->in.numargs = 1;
req->in.args[0].size = sizeof(inarg);
req->in.args[0].value = &inarg;
fuse_request_send(fc, req);
err = req->out.h.error;
fuse_put_request(fc, req);
if (err == -ENOSYS) {
if (isdir)
fc->no_fsyncdir = 1;
else
fc->no_fsync = 1;
err = 0;
}
return err;
}
| 0
|
344,267
|
static const char *varinfo (lua_State *L, const TValue *o) {
CallInfo *ci = L->ci;
const char *name = NULL; /* to avoid warnings */
const char *kind = NULL;
if (isLua(ci)) {
kind = getupvalname(ci, o, &name); /* check whether 'o' is an upvalue */
if (!kind && isinstack(ci, o)) /* no? try a register */
kind = getobjname(ci_func(ci)->p, currentpc(ci),
cast_int(cast(StkId, o) - (ci->func + 1)), &name);
}
return formatvarinfo(L, kind, name);
}
| 0
|
462,559
|
void controller::mark_deleted(const std::string& guid, bool b) {
rsscache->mark_item_deleted(guid, b);
}
| 0
|
221,694
|
Socket *Socket::accept() {
peer_adr_length = sizeof(struct sockaddr_in);
s_errno = 0;
errno = 0;
// int newfd = this->baseAccept((struct sockaddr *)&peer_adr, &peer_adr_length);
int newfd = ::accept(sck, (struct sockaddr *) &peer_adr, &peer_adr_length);
if (newfd > 0) {
Socket *s = new Socket(newfd, my_adr, peer_adr);
s->setPort(my_port);
return s;
} else {
s_errno = errno;
return NULL;
}
}
| 0
|
386,550
|
void DL_Dxf::addLeader(DL_CreationInterface* creationInterface) {
// leader (arrow)
DL_LeaderData le(
// arrow head flag
getIntValue(71, 1),
// leader path type
getIntValue(72, 0),
// Leader creation flag
getIntValue(73, 3),
// Hookline direction flag
getIntValue(74, 1),
// Hookline flag
getIntValue(75, 0),
// Text annotation height
getRealValue(40, 1.0),
// Text annotation width
getRealValue(41, 1.0),
// Number of vertices in leader
getIntValue(76, 0)
);
creationInterface->addLeader(le);
for (int i=0; i<maxLeaderVertices; i++) {
DL_LeaderVertexData d(leaderVertices[i*3],
leaderVertices[i*3+1],
leaderVertices[i*3+2]);
creationInterface->addLeaderVertex(d);
}
creationInterface->endEntity();
}
| 0
|
292,605
|
int puma_parser_has_error(puma_parser *parser) {
return parser->cs == puma_parser_error;
}
| 0
|
484,754
|
static void xennet_poll_controller(struct net_device *dev)
{
/* Poll each queue */
struct netfront_info *info = netdev_priv(dev);
unsigned int num_queues = dev->real_num_tx_queues;
unsigned int i;
if (info->broken)
return;
for (i = 0; i < num_queues; ++i)
xennet_interrupt(0, &info->queues[i]);
}
| 0
|
246,207
|
Status SparseCountSparseOutputShapeFn(InferenceContext *c) {
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 2, &unused));
auto rank = c->Dim(c->input(0), 1);
auto nvals = c->UnknownDim();
c->set_output(0, c->Matrix(nvals, rank)); // out.indices
c->set_output(1, c->Vector(nvals)); // out.values
c->set_output(2, c->Vector(rank)); // out.dense_shape
return Status::OK();
}
| 0
|
349,253
|
static void read_block_list(unsigned int *block_list, long long start,
unsigned int offset, int blocks)
{
int res;
TRACE("read_block_list: blocks %d\n", blocks);
if(swap) {
char *block_ptr = malloc(blocks * sizeof(unsigned int));
if(block_ptr == NULL)
MEM_ERROR();
res = read_inode_data(block_ptr, &start, &offset, blocks * sizeof(unsigned int));
if(res == FALSE)
EXIT_UNSQUASH("read_block_list: failed to read "
"inode index %lld:%d\n", start, offset);
SQUASHFS_SWAP_INTS_3(block_list, block_ptr, blocks);
free(block_ptr);
} else {
res = read_inode_data(block_list, &start, &offset, blocks * sizeof(unsigned int));
if(res == FALSE)
EXIT_UNSQUASH("read_block_list: failed to read "
"inode index %lld:%d\n", start, offset);
}
}
| 0
|
242,637
|
static GF_Err isoffin_process(GF_Filter *filter)
{
ISOMReader *read = gf_filter_get_udta(filter);
u32 i, count = gf_list_count(read->channels);
Bool is_active = GF_FALSE;
Bool in_is_eos = GF_FALSE;
Bool check_forced_end = GF_FALSE;
Bool has_new_data = GF_FALSE;
u64 min_offset_plus_one = 0;
u32 nb_forced_end=0;
if (read->in_error)
return read->in_error;
if (read->pid) {
Bool fetch_input = GF_TRUE;
//we failed at loading the init segment during a dash switch, retry
if (!read->is_partial_download && !read->mem_load_mode && (read->moov_not_loaded==2) ) {
isoffin_configure_pid(filter, read->pid, GF_FALSE);
if (read->moov_not_loaded) return GF_OK;
}
if (read->mem_load_mode==2) {
if (!read->force_fetch && read->mem_blob.size > read->mstore_size) {
fetch_input = GF_FALSE;
}
read->force_fetch = GF_FALSE;
}
while (fetch_input) {
GF_FilterPacket *pck = gf_filter_pid_get_packet(read->pid);
if (!pck) {
//we issued a seek, wait for the first packet to be received before fetching channels
//otherwise we could end up reading from the wrong cache
if (read->wait_for_source) {
//something went wrong during the seek request
if (gf_filter_pid_is_eos(read->pid))
return GF_EOS;
return GF_OK;
}
break;
}
read->wait_for_source = GF_FALSE;
if (read->mem_load_mode) {
u32 data_size;
const u8 *pck_data = gf_filter_pck_get_data(pck, &data_size);
isoffin_push_buffer(filter, read, pck_data, data_size);
}
//we just had a switch but init seg is not completely done: input packet is only a part of the init, drop it
else if (read->moov_not_loaded==2) {
gf_filter_pid_drop_packet(read->pid);
return GF_OK;
}
gf_filter_pid_drop_packet(read->pid);
has_new_data = GF_TRUE;
if (read->in_error)
return read->in_error;
}
if (gf_filter_pid_is_eos(read->pid)) {
read->input_loaded = GF_TRUE;
in_is_eos = GF_TRUE;
}
if (read->input_is_stop) {
read->input_loaded = GF_TRUE;
in_is_eos = GF_TRUE;
read->input_is_stop = GF_FALSE;
}
if (!read->frag_type && read->input_loaded) {
in_is_eos = GF_TRUE;
}
//segment is invalid, wait for eos on input an send eos on all channels
if (read->invalid_segment) {
if (!in_is_eos) return GF_OK;
read->invalid_segment = GF_FALSE;
for (i=0; i<count; i++) {
ISOMChannel *ch = gf_list_get(read->channels, i);
if (!ch->playing) {
continue;
}
if (!ch->eos_sent) {
ch->eos_sent = GF_TRUE;
gf_filter_pid_set_eos(ch->pid);
}
}
read->eos_signaled = GF_TRUE;
return GF_EOS;
}
} else if (read->extern_mov) {
in_is_eos = GF_TRUE;
read->input_loaded = GF_TRUE;
}
if (read->moov_not_loaded==1) {
if (read->mem_load_mode)
return GF_OK;
read->moov_not_loaded = GF_FALSE;
return isoffin_setup(filter, read);
}
if (read->refresh_fragmented) {
const GF_PropertyValue *prop;
if (in_is_eos) {
read->refresh_fragmented = GF_FALSE;
} else {
prop = gf_filter_pid_get_property(read->pid, GF_PROP_PID_FILE_CACHED);
if (prop && prop->value.boolean)
read->refresh_fragmented = GF_FALSE;
}
if (has_new_data) {
u64 bytesMissing=0;
GF_Err e;
const char *new_url = NULL;
prop = gf_filter_pid_get_property(read->pid, GF_PROP_PID_FILEPATH);
if (prop) new_url = prop->value.string;
e = gf_isom_refresh_fragmented(read->mov, &bytesMissing, new_url);
if (e && (e!= GF_ISOM_INCOMPLETE_FILE)) {
GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("[IsoMedia] Failed to refresh current segment: %s\n", gf_error_to_string(e) ));
read->refresh_fragmented = GF_FALSE;
} else {
GF_LOG(GF_LOG_DEBUG, GF_LOG_DASH, ("[IsoMedia] Refreshing current segment at UTC "LLU" - "LLU" bytes still missing - input is EOS %d\n", gf_net_get_utc(), bytesMissing, in_is_eos));
}
if (!read->refresh_fragmented && (e==GF_ISOM_INCOMPLETE_FILE)) {
GF_LOG(GF_LOG_WARNING, GF_LOG_DASH, ("[IsoMedia] Incomplete Segment received - "LLU" bytes missing but EOF found\n", bytesMissing ));
}
#ifndef GPAC_DISABLE_LOG
if (gf_log_tool_level_on(GF_LOG_DASH, GF_LOG_DEBUG)) {
for (i=0; i<count; i++) {
ISOMChannel *ch = gf_list_get(read->channels, i);
GF_LOG(GF_LOG_DEBUG, GF_LOG_DASH, ("[IsoMedia] refresh track %d fragment - cur sample %d - new sample count %d\n", ch->track, ch->sample_num, gf_isom_get_sample_count(ch->owner->mov, ch->track) ));
}
}
#endif
isor_check_producer_ref_time(read);
if (!read->frag_type)
read->refresh_fragmented = GF_FALSE;
}
}
for (i=0; i<count; i++) {
u8 *data;
u32 nb_pck=50;
ISOMChannel *ch;
ch = gf_list_get(read->channels, i);
if (!ch->playing) {
nb_forced_end++;
continue;
}
//eos not sent on this channel, we are active
if (!ch->eos_sent)
is_active = GF_TRUE;
while (nb_pck) {
ch->sample_data_offset = 0;
if (!read->full_segment_flush && gf_filter_pid_would_block(ch->pid) )
break;
if (ch->item_id) {
isor_reader_get_sample_from_item(ch);
} else {
isor_reader_get_sample(ch);
}
if (read->stsd && (ch->last_sample_desc_index != read->stsd) && ch->sample) {
isor_reader_release_sample(ch);
continue;
}
if (ch->sample) {
u32 sample_dur;
u8 dep_flags;
u8 *subs_buf;
u32 subs_buf_size;
GF_FilterPacket *pck;
if (ch->needs_pid_reconfig) {
isor_update_channel_config(ch);
ch->needs_pid_reconfig = GF_FALSE;
}
//we have at least two samples, update GF_PROP_PID_HAS_SYNC if needed
if (ch->check_has_rap && (gf_isom_get_sample_count(ch->owner->mov, ch->track)>1) && (gf_isom_has_sync_points(ch->owner->mov, ch->track)==1)) {
ch->check_has_rap = GF_FALSE;
ch->has_rap = GF_TRUE;
gf_filter_pid_set_property(ch->pid, GF_PROP_PID_HAS_SYNC, &PROP_BOOL(ch->has_rap) );
}
//strip param sets from payload, trigger reconfig if needed
isor_reader_check_config(ch);
if (read->nodata) {
pck = gf_filter_pck_new_shared(ch->pid, NULL, ch->sample->dataLength, NULL);
if (!pck) return GF_OUT_OF_MEM;
} else {
pck = gf_filter_pck_new_alloc(ch->pid, ch->sample->dataLength, &data);
if (!pck) return GF_OUT_OF_MEM;
memcpy(data, ch->sample->data, ch->sample->dataLength);
}
gf_filter_pck_set_dts(pck, ch->dts);
gf_filter_pck_set_cts(pck, ch->cts);
if (ch->sample->IsRAP==-1) {
gf_filter_pck_set_sap(pck, GF_FILTER_SAP_1);
ch->redundant = 1;
} else {
gf_filter_pck_set_sap(pck, (GF_FilterSAPType) ch->sample->IsRAP);
}
if (ch->sap_3)
gf_filter_pck_set_sap(pck, GF_FILTER_SAP_3);
else if (ch->sap_4_type) {
gf_filter_pck_set_sap(pck, (ch->sap_4_type==GF_ISOM_SAMPLE_PREROLL) ? GF_FILTER_SAP_4_PROL : GF_FILTER_SAP_4);
gf_filter_pck_set_roll_info(pck, ch->roll);
}
sample_dur = ch->au_duration;
if (ch->sample->nb_pack)
sample_dur *= ch->sample->nb_pack;
gf_filter_pck_set_duration(pck, sample_dur);
gf_filter_pck_set_seek_flag(pck, ch->seek_flag);
//for now we only signal xPS mask for non-sap
if (ch->xps_mask && !gf_filter_pck_get_sap(pck) ) {
gf_filter_pck_set_property(pck, GF_PROP_PCK_XPS_MASK, &PROP_UINT(ch->xps_mask) );
}
dep_flags = ch->isLeading;
dep_flags <<= 2;
dep_flags |= ch->dependsOn;
dep_flags <<= 2;
dep_flags |= ch->dependedOn;
dep_flags <<= 2;
dep_flags |= ch->redundant;
if (dep_flags)
gf_filter_pck_set_dependency_flags(pck, dep_flags);
gf_filter_pck_set_crypt_flags(pck, ch->pck_encrypted ? GF_FILTER_PCK_CRYPT : 0);
gf_filter_pck_set_seq_num(pck, ch->sample_num);
subs_buf = gf_isom_sample_get_subsamples_buffer(read->mov, ch->track, ch->sample_num, &subs_buf_size);
if (subs_buf) {
gf_filter_pck_set_property(pck, GF_PROP_PCK_SUBS, &PROP_DATA_NO_COPY(subs_buf, subs_buf_size) );
}
if (ch->sai_buffer && ch->pck_encrypted) {
assert(ch->sai_buffer_size);
gf_filter_pck_set_property(pck, GF_PROP_PCK_CENC_SAI, &PROP_DATA(ch->sai_buffer, ch->sai_buffer_size) );
}
if (read->sigfrag) {
GF_ISOFragmentBoundaryInfo finfo;
if (gf_isom_sample_is_fragment_start(read->mov, ch->track, ch->sample_num, &finfo) ) {
u64 start=0;
u32 traf_start = finfo.seg_start_plus_one ? 2 : 1;
if (finfo.seg_start_plus_one)
gf_filter_pck_set_property(pck, GF_PROP_PCK_CUE_START, &PROP_BOOL(GF_TRUE));
gf_filter_pck_set_property(pck, GF_PROP_PCK_FRAG_START, &PROP_UINT(traf_start));
start = finfo.frag_start;
if (finfo.seg_start_plus_one) start = finfo.seg_start_plus_one-1;
gf_filter_pck_set_property(pck, GF_PROP_PCK_FRAG_RANGE, &PROP_FRAC64_INT(start, finfo.mdat_end));
if (finfo.moof_template) {
gf_filter_pck_set_property(pck, GF_PROP_PCK_MOOF_TEMPLATE, &PROP_DATA((u8 *)finfo.moof_template, finfo.moof_template_size));
}
if (finfo.sidx_end) {
gf_filter_pck_set_property(pck, GF_PROP_PCK_SIDX_RANGE, &PROP_FRAC64_INT(finfo.sidx_start , finfo.sidx_end));
}
if (read->seg_name_changed) {
const GF_PropertyValue *p = gf_filter_pid_get_property(read->pid, GF_PROP_PID_URL);
read->seg_name_changed = GF_FALSE;
if (p && p->value.string) {
gf_filter_pck_set_property(pck, GF_PROP_PID_URL, &PROP_STRING(p->value.string));
}
}
}
}
if (ch->sender_ntp) {
gf_filter_pck_set_property(pck, GF_PROP_PCK_SENDER_NTP, &PROP_LONGUINT(ch->sender_ntp));
if (ch->ntp_at_server_ntp) {
gf_filter_pck_set_property(pck, GF_PROP_PCK_RECEIVER_NTP, &PROP_LONGUINT(ch->ntp_at_server_ntp));
}
}
ch->eos_sent = GF_FALSE;
//this might not be the true end of stream
if ((ch->streamType==GF_STREAM_AUDIO) && (ch->sample_num == gf_isom_get_sample_count(read->mov, ch->track))) {
gf_filter_pck_set_property(pck, GF_PROP_PCK_END_RANGE, &PROP_BOOL(GF_TRUE));
}
gf_filter_pck_send(pck);
isor_reader_release_sample(ch);
ch->last_valid_sample_data_offset = ch->sample_data_offset;
nb_pck--;
} else if (ch->last_state==GF_EOS) {
if (ch->playing == 2) {
if (in_is_eos) {
ch->playing = GF_FALSE;
} else {
nb_forced_end++;
check_forced_end = GF_TRUE;
}
}
if (in_is_eos && !ch->eos_sent) {
void *tfrf;
const void *gf_isom_get_tfrf(GF_ISOFile *movie, u32 trackNumber);
ch->eos_sent = GF_TRUE;
read->eos_signaled = GF_TRUE;
tfrf = (void *) gf_isom_get_tfrf(read->mov, ch->track);
if (tfrf) {
gf_filter_pid_set_info_str(ch->pid, "smooth_tfrf", &PROP_POINTER(tfrf) );
ch->last_has_tfrf = GF_TRUE;
} else if (ch->last_has_tfrf) {
gf_filter_pid_set_info_str(ch->pid, "smooth_tfrf", NULL);
ch->last_has_tfrf = GF_FALSE;
}
gf_filter_pid_set_eos(ch->pid);
}
break;
} else if (ch->last_state==GF_ISOM_INVALID_FILE) {
if (!ch->eos_sent) {
ch->eos_sent = GF_TRUE;
read->eos_signaled = GF_TRUE;
gf_filter_pid_set_eos(ch->pid);
}
return ch->last_state;
} else {
read->force_fetch = GF_TRUE;
break;
}
}
if (!min_offset_plus_one || (min_offset_plus_one - 1 > ch->last_valid_sample_data_offset))
min_offset_plus_one = 1 + ch->last_valid_sample_data_offset;
}
if (read->mem_load_mode && min_offset_plus_one) {
isoffin_purge_mem(read, min_offset_plus_one-1);
}
//we reached end of playback due to play range request, we must send eos - however for safety reason with DASH, we first need to cancel the input
if (read->pid && check_forced_end && (nb_forced_end==count)) {
//abort input
GF_FilterEvent evt;
GF_FEVT_INIT(evt, GF_FEVT_STOP, read->pid);
gf_filter_pid_send_event(read->pid, &evt);
}
if (!is_active) {
return GF_EOS;
}
//if (in_is_eos)
// gf_filter_ask_rt_reschedule(filter, 1);
return GF_OK;
}
| 0
|
316,992
|
static int selinux_netlink_send(struct sock *sk, struct sk_buff *skb)
{
int rc = 0;
unsigned int msg_len;
unsigned int data_len = skb->len;
unsigned char *data = skb->data;
struct nlmsghdr *nlh;
struct sk_security_struct *sksec = sk->sk_security;
u16 sclass = sksec->sclass;
u32 perm;
while (data_len >= nlmsg_total_size(0)) {
nlh = (struct nlmsghdr *)data;
/* NOTE: the nlmsg_len field isn't reliably set by some netlink
* users which means we can't reject skb's with bogus
* length fields; our solution is to follow what
* netlink_rcv_skb() does and simply skip processing at
* messages with length fields that are clearly junk
*/
if (nlh->nlmsg_len < NLMSG_HDRLEN || nlh->nlmsg_len > data_len)
return 0;
rc = selinux_nlmsg_lookup(sclass, nlh->nlmsg_type, &perm);
if (rc == 0) {
rc = sock_has_perm(sk, perm);
if (rc)
return rc;
} else if (rc == -EINVAL) {
/* -EINVAL is a missing msg/perm mapping */
pr_warn_ratelimited("SELinux: unrecognized netlink"
" message: protocol=%hu nlmsg_type=%hu sclass=%s"
" pid=%d comm=%s\n",
sk->sk_protocol, nlh->nlmsg_type,
secclass_map[sclass - 1].name,
task_pid_nr(current), current->comm);
if (enforcing_enabled(&selinux_state) &&
!security_get_allow_unknown(&selinux_state))
return rc;
rc = 0;
} else if (rc == -ENOENT) {
/* -ENOENT is a missing socket/class mapping, ignore */
rc = 0;
} else {
return rc;
}
/* move to the next message after applying netlink padding */
msg_len = NLMSG_ALIGN(nlh->nlmsg_len);
if (msg_len >= data_len)
return 0;
data_len -= msg_len;
data += msg_len;
}
return rc;
}
| 0
|
254,906
|
intrusive_ptr<DocumentSource> DocumentSourceGroup::optimize() {
// TODO: If all _idExpressions are ExpressionConstants after optimization, then we know there
// will be only one group. We should take advantage of that to avoid going through the hash
// table.
for (size_t i = 0; i < _idExpressions.size(); i++) {
_idExpressions[i] = _idExpressions[i]->optimize();
}
for (auto&& accumulatedField : _accumulatedFields) {
accumulatedField.expr.initializer = accumulatedField.expr.initializer->optimize();
accumulatedField.expr.argument = accumulatedField.expr.argument->optimize();
}
return this;
}
| 0
|
414,928
|
xmlXPathFormatNumber(double number, char buffer[], int buffersize)
{
switch (xmlXPathIsInf(number)) {
case 1:
if (buffersize > (int)sizeof("Infinity"))
snprintf(buffer, buffersize, "Infinity");
break;
case -1:
if (buffersize > (int)sizeof("-Infinity"))
snprintf(buffer, buffersize, "-Infinity");
break;
default:
if (xmlXPathIsNaN(number)) {
if (buffersize > (int)sizeof("NaN"))
snprintf(buffer, buffersize, "NaN");
} else if (number == 0 && xmlXPathGetSign(number) != 0) {
snprintf(buffer, buffersize, "0");
} else if (number == ((int) number)) {
char work[30];
char *ptr, *cur;
int value = (int) number;
ptr = &buffer[0];
if (value == 0) {
*ptr++ = '0';
} else {
snprintf(work, 29, "%d", value);
cur = &work[0];
while ((*cur) && (ptr - buffer < buffersize)) {
*ptr++ = *cur++;
}
}
if (ptr - buffer < buffersize) {
*ptr = 0;
} else if (buffersize > 0) {
ptr--;
*ptr = 0;
}
} else {
/*
For the dimension of work,
DBL_DIG is number of significant digits
EXPONENT is only needed for "scientific notation"
3 is sign, decimal point, and terminating zero
LOWER_DOUBLE_EXP is max number of leading zeroes in fraction
Note that this dimension is slightly (a few characters)
larger than actually necessary.
*/
char work[DBL_DIG + EXPONENT_DIGITS + 3 + LOWER_DOUBLE_EXP];
int integer_place, fraction_place;
char *ptr;
char *after_fraction;
double absolute_value;
int size;
absolute_value = fabs(number);
/*
* First choose format - scientific or regular floating point.
* In either case, result is in work, and after_fraction points
* just past the fractional part.
*/
if ( ((absolute_value > UPPER_DOUBLE) ||
(absolute_value < LOWER_DOUBLE)) &&
(absolute_value != 0.0) ) {
/* Use scientific notation */
integer_place = DBL_DIG + EXPONENT_DIGITS + 1;
fraction_place = DBL_DIG - 1;
size = snprintf(work, sizeof(work),"%*.*e",
integer_place, fraction_place, number);
while ((size > 0) && (work[size] != 'e')) size--;
}
else {
/* Use regular notation */
if (absolute_value > 0.0) {
integer_place = (int)log10(absolute_value);
if (integer_place > 0)
fraction_place = DBL_DIG - integer_place - 1;
else
fraction_place = DBL_DIG - integer_place;
} else {
fraction_place = 1;
}
size = snprintf(work, sizeof(work), "%0.*f",
fraction_place, number);
}
/* Remove leading spaces sometimes inserted by snprintf */
while (work[0] == ' ') {
for (ptr = &work[0];(ptr[0] = ptr[1]);ptr++);
size--;
}
/* Remove fractional trailing zeroes */
after_fraction = work + size;
ptr = after_fraction;
while (*(--ptr) == '0')
;
if (*ptr != '.')
ptr++;
while ((*ptr++ = *after_fraction++) != 0);
/* Finally copy result back to caller */
size = strlen(work) + 1;
if (size > buffersize) {
work[buffersize - 1] = 0;
size = buffersize;
}
memmove(buffer, work, size);
}
break;
}
}
| 0
|
195,085
|
setup_seccomp (FlatpakBwrap *bwrap,
const char *arch,
gulong allowed_personality,
FlatpakRunFlags run_flags,
GError **error)
{
gboolean multiarch = (run_flags & FLATPAK_RUN_FLAG_MULTIARCH) != 0;
gboolean devel = (run_flags & FLATPAK_RUN_FLAG_DEVEL) != 0;
__attribute__((cleanup (cleanup_seccomp))) scmp_filter_ctx seccomp = NULL;
/**** BEGIN NOTE ON CODE SHARING
*
* There are today a number of different Linux container
* implementations. That will likely continue for long into the
* future. But we can still try to share code, and it's important
* to do so because it affects what library and application writers
* can do, and we should support code portability between different
* container tools.
*
* This syscall blocklist is copied from linux-user-chroot, which was in turn
* clearly influenced by the Sandstorm.io blocklist.
*
* If you make any changes here, I suggest sending the changes along
* to other sandbox maintainers. Using the libseccomp list is also
* an appropriate venue:
* https://groups.google.com/forum/#!forum/libseccomp
*
* A non-exhaustive list of links to container tooling that might
* want to share this blocklist:
*
* https://github.com/sandstorm-io/sandstorm
* in src/sandstorm/supervisor.c++
* https://github.com/flatpak/flatpak.git
* in common/flatpak-run.c
* https://git.gnome.org/browse/linux-user-chroot
* in src/setup-seccomp.c
*
**** END NOTE ON CODE SHARING
*/
struct
{
int scall;
int errnum;
struct scmp_arg_cmp *arg;
} syscall_blocklist[] = {
/* Block dmesg */
{SCMP_SYS (syslog), EPERM},
/* Useless old syscall */
{SCMP_SYS (uselib), EPERM},
/* Don't allow disabling accounting */
{SCMP_SYS (acct), EPERM},
/* 16-bit code is unnecessary in the sandbox, and modify_ldt is a
historic source of interesting information leaks. */
{SCMP_SYS (modify_ldt), EPERM},
/* Don't allow reading current quota use */
{SCMP_SYS (quotactl), EPERM},
/* Don't allow access to the kernel keyring */
{SCMP_SYS (add_key), EPERM},
{SCMP_SYS (keyctl), EPERM},
{SCMP_SYS (request_key), EPERM},
/* Scary VM/NUMA ops */
{SCMP_SYS (move_pages), EPERM},
{SCMP_SYS (mbind), EPERM},
{SCMP_SYS (get_mempolicy), EPERM},
{SCMP_SYS (set_mempolicy), EPERM},
{SCMP_SYS (migrate_pages), EPERM},
/* Don't allow subnamespace setups: */
{SCMP_SYS (unshare), EPERM},
{SCMP_SYS (mount), EPERM},
{SCMP_SYS (pivot_root), EPERM},
#if defined(__s390__) || defined(__s390x__) || defined(__CRIS__)
/* Architectures with CONFIG_CLONE_BACKWARDS2: the child stack
* and flags arguments are reversed so the flags come second */
{SCMP_SYS (clone), EPERM, &SCMP_A1 (SCMP_CMP_MASKED_EQ, CLONE_NEWUSER, CLONE_NEWUSER)},
#else
/* Normally the flags come first */
{SCMP_SYS (clone), EPERM, &SCMP_A0 (SCMP_CMP_MASKED_EQ, CLONE_NEWUSER, CLONE_NEWUSER)},
#endif
/* Don't allow faking input to the controlling tty (CVE-2017-5226) */
{SCMP_SYS (ioctl), EPERM, &SCMP_A1 (SCMP_CMP_MASKED_EQ, 0xFFFFFFFFu, (int) TIOCSTI)},
};
struct
{
int scall;
int errnum;
struct scmp_arg_cmp *arg;
} syscall_nondevel_blocklist[] = {
/* Profiling operations; we expect these to be done by tools from outside
* the sandbox. In particular perf has been the source of many CVEs.
*/
{SCMP_SYS (perf_event_open), EPERM},
/* Don't allow you to switch to bsd emulation or whatnot */
{SCMP_SYS (personality), EPERM, &SCMP_A0 (SCMP_CMP_NE, allowed_personality)},
{SCMP_SYS (ptrace), EPERM}
};
/* Blocklist all but unix, inet, inet6 and netlink */
struct
{
int family;
FlatpakRunFlags flags_mask;
} socket_family_allowlist[] = {
/* NOTE: Keep in numerical order */
{ AF_UNSPEC, 0 },
{ AF_LOCAL, 0 },
{ AF_INET, 0 },
{ AF_INET6, 0 },
{ AF_NETLINK, 0 },
{ AF_CAN, FLATPAK_RUN_FLAG_CANBUS },
{ AF_BLUETOOTH, FLATPAK_RUN_FLAG_BLUETOOTH },
};
int last_allowed_family;
int i, r;
g_auto(GLnxTmpfile) seccomp_tmpf = { 0, };
seccomp = seccomp_init (SCMP_ACT_ALLOW);
if (!seccomp)
return flatpak_fail_error (error, FLATPAK_ERROR_SETUP_FAILED, _("Initialize seccomp failed"));
if (arch != NULL)
{
uint32_t arch_id = 0;
const uint32_t *extra_arches = NULL;
if (strcmp (arch, "i386") == 0)
{
arch_id = SCMP_ARCH_X86;
}
else if (strcmp (arch, "x86_64") == 0)
{
arch_id = SCMP_ARCH_X86_64;
extra_arches = seccomp_x86_64_extra_arches;
}
else if (strcmp (arch, "arm") == 0)
{
arch_id = SCMP_ARCH_ARM;
}
#ifdef SCMP_ARCH_AARCH64
else if (strcmp (arch, "aarch64") == 0)
{
arch_id = SCMP_ARCH_AARCH64;
extra_arches = seccomp_aarch64_extra_arches;
}
#endif
/* We only really need to handle arches on multiarch systems.
* If only one arch is supported the default is fine */
if (arch_id != 0)
{
/* This *adds* the target arch, instead of replacing the
native one. This is not ideal, because we'd like to only
allow the target arch, but we can't really disallow the
native arch at this point, because then bubblewrap
couldn't continue running. */
r = seccomp_arch_add (seccomp, arch_id);
if (r < 0 && r != -EEXIST)
return flatpak_fail_error (error, FLATPAK_ERROR_SETUP_FAILED, _("Failed to add architecture to seccomp filter"));
if (multiarch && extra_arches != NULL)
{
for (i = 0; extra_arches[i] != 0; i++)
{
r = seccomp_arch_add (seccomp, extra_arches[i]);
if (r < 0 && r != -EEXIST)
return flatpak_fail_error (error, FLATPAK_ERROR_SETUP_FAILED, _("Failed to add multiarch architecture to seccomp filter"));
}
}
}
}
/* TODO: Should we filter the kernel keyring syscalls in some way?
* We do want them to be used by desktop apps, but they could also perhaps
* leak system stuff or secrets from other apps.
*/
for (i = 0; i < G_N_ELEMENTS (syscall_blocklist); i++)
{
int scall = syscall_blocklist[i].scall;
int errnum = syscall_blocklist[i].errnum;
g_return_val_if_fail (errnum == EPERM || errnum == ENOSYS, FALSE);
if (syscall_blocklist[i].arg)
r = seccomp_rule_add (seccomp, SCMP_ACT_ERRNO (errnum), scall, 1, *syscall_blocklist[i].arg);
else
r = seccomp_rule_add (seccomp, SCMP_ACT_ERRNO (errnum), scall, 0);
if (r < 0 && r == -EFAULT /* unknown syscall */)
return flatpak_fail_error (error, FLATPAK_ERROR_SETUP_FAILED, _("Failed to block syscall %d"), scall);
}
if (!devel)
{
for (i = 0; i < G_N_ELEMENTS (syscall_nondevel_blocklist); i++)
{
int scall = syscall_nondevel_blocklist[i].scall;
int errnum = syscall_nondevel_blocklist[i].errnum;
g_return_val_if_fail (errnum == EPERM || errnum == ENOSYS, FALSE);
if (syscall_nondevel_blocklist[i].arg)
r = seccomp_rule_add (seccomp, SCMP_ACT_ERRNO (errnum), scall, 1, *syscall_nondevel_blocklist[i].arg);
else
r = seccomp_rule_add (seccomp, SCMP_ACT_ERRNO (errnum), scall, 0);
if (r < 0 && r == -EFAULT /* unknown syscall */)
return flatpak_fail_error (error, FLATPAK_ERROR_SETUP_FAILED, _("Failed to block syscall %d"), scall);
}
}
/* Socket filtering doesn't work on e.g. i386, so ignore failures here
* However, we need to user seccomp_rule_add_exact to avoid libseccomp doing
* something else: https://github.com/seccomp/libseccomp/issues/8 */
last_allowed_family = -1;
for (i = 0; i < G_N_ELEMENTS (socket_family_allowlist); i++)
{
int family = socket_family_allowlist[i].family;
int disallowed;
if (socket_family_allowlist[i].flags_mask != 0 &&
(socket_family_allowlist[i].flags_mask & run_flags) != socket_family_allowlist[i].flags_mask)
continue;
for (disallowed = last_allowed_family + 1; disallowed < family; disallowed++)
{
/* Blocklist the in-between valid families */
seccomp_rule_add_exact (seccomp, SCMP_ACT_ERRNO (EAFNOSUPPORT), SCMP_SYS (socket), 1, SCMP_A0 (SCMP_CMP_EQ, disallowed));
}
last_allowed_family = family;
}
/* Blocklist the rest */
seccomp_rule_add_exact (seccomp, SCMP_ACT_ERRNO (EAFNOSUPPORT), SCMP_SYS (socket), 1, SCMP_A0 (SCMP_CMP_GE, last_allowed_family + 1));
if (!glnx_open_anonymous_tmpfile_full (O_RDWR | O_CLOEXEC, "/tmp", &seccomp_tmpf, error))
return FALSE;
if (seccomp_export_bpf (seccomp, seccomp_tmpf.fd) != 0)
return flatpak_fail_error (error, FLATPAK_ERROR_SETUP_FAILED, _("Failed to export bpf"));
lseek (seccomp_tmpf.fd, 0, SEEK_SET);
flatpak_bwrap_add_args_data_fd (bwrap,
"--seccomp", glnx_steal_fd (&seccomp_tmpf.fd), NULL);
return TRUE;
}
| 1
|
312,410
|
make_get_auname(cmdidx_T cmdidx)
{
switch (cmdidx)
{
case CMD_make: return (char_u *)"make";
case CMD_lmake: return (char_u *)"lmake";
case CMD_grep: return (char_u *)"grep";
case CMD_lgrep: return (char_u *)"lgrep";
case CMD_grepadd: return (char_u *)"grepadd";
case CMD_lgrepadd: return (char_u *)"lgrepadd";
default: return NULL;
}
}
| 0
|
252,295
|
mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len) {
(void)pStream;
// This is really over conservative. (And lame, but it's actually pretty
// tricky to compute a true upper bound given the way tdefl's blocking works.)
return MZ_MAX(128 + (source_len * 110) / 100,
128 + source_len + ((source_len / (31 * 1024)) + 1) * 5);
}
| 0
|
482,531
|
create_macro(const char *name, const widechar *definition, int definition_length,
const int *substitutions, int substitution_count, int argument_count) {
Macro *m = malloc(sizeof(Macro));
m->name = strdup(name);
widechar *definition_copy = malloc(definition_length * sizeof(widechar));
memcpy(definition_copy, definition, definition_length * sizeof(widechar));
m->definition = definition_copy;
m->definition_length = definition_length;
int *substitutions_copy = malloc(2 * substitution_count * sizeof(int));
memcpy(substitutions_copy, substitutions, 2 * substitution_count * sizeof(int));
m->substitutions = substitutions_copy;
m->substitution_count = substitution_count;
m->argument_count = argument_count;
return m;
}
| 0
|
484,743
|
static int xennet_get_extras(struct netfront_queue *queue,
struct xen_netif_extra_info *extras,
RING_IDX rp)
{
struct xen_netif_extra_info extra;
struct device *dev = &queue->info->netdev->dev;
RING_IDX cons = queue->rx.rsp_cons;
int err = 0;
do {
struct sk_buff *skb;
grant_ref_t ref;
if (unlikely(cons + 1 == rp)) {
if (net_ratelimit())
dev_warn(dev, "Missing extra info\n");
err = -EBADR;
break;
}
RING_COPY_RESPONSE(&queue->rx, ++cons, &extra);
if (unlikely(!extra.type ||
extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
if (net_ratelimit())
dev_warn(dev, "Invalid extra type: %d\n",
extra.type);
err = -EINVAL;
} else {
extras[extra.type - 1] = extra;
}
skb = xennet_get_rx_skb(queue, cons);
ref = xennet_get_rx_ref(queue, cons);
xennet_move_rx_slot(queue, skb, ref);
} while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
xennet_set_rx_rsp_cons(queue, cons);
return err;
}
| 0
|
487,626
|
int srcu_notifier_chain_unregister(struct srcu_notifier_head *nh,
struct notifier_block *n)
{
int ret;
/*
* This code gets used during boot-up, when task switching is
* not yet working and interrupts must remain disabled. At
* such times we must not call mutex_lock().
*/
if (unlikely(system_state == SYSTEM_BOOTING))
return notifier_chain_unregister(&nh->head, n);
mutex_lock(&nh->mutex);
ret = notifier_chain_unregister(&nh->head, n);
mutex_unlock(&nh->mutex);
synchronize_srcu(&nh->srcu);
return ret;
}
| 0
|
265,044
|
free_colour_buffer(void)
{
if (--colseq_buf_allocs)
return;
DPUTS(!colseq_buf, "Freeing colour sequence buffer without alloc");
/* Free buffer for colour code composition */
free(colseq_buf);
colseq_buf = NULL;
}
| 0
|
336,643
|
static void reds_channel_init_auth_caps(RedLinkInfo *link, RedChannel *channel)
{
RedsState *reds = link->reds;
if (reds->config->sasl_enabled && !link->skip_auth) {
channel->set_common_cap(SPICE_COMMON_CAP_AUTH_SASL);
} else {
channel->set_common_cap(SPICE_COMMON_CAP_AUTH_SPICE);
}
}
| 0
|
369,193
|
static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
__must_hold(&ctx->uring_lock)
{
unsigned int entries = io_sqring_entries(ctx);
int submitted = 0;
if (unlikely(!entries))
return 0;
/* make sure SQ entry isn't read before tail */
nr = min3(nr, ctx->sq_entries, entries);
io_get_task_refs(nr);
io_submit_state_start(&ctx->submit_state, nr);
do {
const struct io_uring_sqe *sqe;
struct io_kiocb *req;
if (unlikely(!io_alloc_req_refill(ctx))) {
if (!submitted)
submitted = -EAGAIN;
break;
}
req = io_alloc_req(ctx);
sqe = io_get_sqe(ctx);
if (unlikely(!sqe)) {
wq_stack_add_head(&req->comp_list, &ctx->submit_state.free_list);
break;
}
/* will complete beyond this point, count as submitted */
submitted++;
if (io_submit_sqe(ctx, req, sqe)) {
/*
* Continue submitting even for sqe failure if the
* ring was setup with IORING_SETUP_SUBMIT_ALL
*/
if (!(ctx->flags & IORING_SETUP_SUBMIT_ALL))
break;
}
} while (submitted < nr);
if (unlikely(submitted != nr)) {
int ref_used = (submitted == -EAGAIN) ? 0 : submitted;
int unused = nr - ref_used;
current->io_uring->cached_refs += unused;
}
io_submit_state_end(ctx);
/* Commit SQ ring head once we've consumed and submitted all SQEs */
io_commit_sqring(ctx);
return submitted;
| 0
|
512,745
|
void Item_is_not_null_test::update_used_tables()
{
if (!args[0]->maybe_null)
used_tables_cache= 0; /* is always true */
else
args[0]->update_used_tables();
}
| 0
|
413,842
|
void LinkResolver::resolve_invokedynamic(CallInfo& result, const constantPoolHandle& pool, int indy_index, TRAPS) {
ConstantPoolCacheEntry* cpce = pool->invokedynamic_cp_cache_entry_at(indy_index);
int pool_index = cpce->constant_pool_index();
// Resolve the bootstrap specifier (BSM + optional arguments).
BootstrapInfo bootstrap_specifier(pool, pool_index, indy_index);
// Check if CallSite has been bound already or failed already, and short circuit:
{
bool is_done = bootstrap_specifier.resolve_previously_linked_invokedynamic(result, CHECK);
if (is_done) return;
}
// The initial step in Call Site Specifier Resolution is to resolve the symbolic
// reference to a method handle which will be the bootstrap method for a dynamic
// call site. If resolution for the java.lang.invoke.MethodHandle for the bootstrap
// method fails, then a MethodHandleInError is stored at the corresponding bootstrap
// method's CP index for the CONSTANT_MethodHandle_info. So, there is no need to
// set the indy_rf flag since any subsequent invokedynamic instruction which shares
// this bootstrap method will encounter the resolution of MethodHandleInError.
resolve_dynamic_call(result, bootstrap_specifier, CHECK);
LogTarget(Debug, methodhandles, indy) lt_indy;
if (lt_indy.is_enabled()) {
LogStream ls(lt_indy);
bootstrap_specifier.print_msg_on(&ls, "resolve_invokedynamic");
}
// The returned linkage result is provisional up to the moment
// the interpreter or runtime performs a serialized check of
// the relevant CPCE::f1 field. This is done by the caller
// of this method, via CPCE::set_dynamic_call, which uses
// an ObjectLocker to do the final serialization of updates
// to CPCE state, including f1.
// Log dynamic info to CDS classlist.
ArchiveUtils::log_to_classlist(&bootstrap_specifier, CHECK);
}
| 0
|
411,913
|
networkstatus_parse_detached_signatures(const char *s, const char *eos)
{
/* XXXX there is too much duplicate shared between this function and
* networkstatus_parse_vote_from_string(). */
directory_token_t *tok;
memarea_t *area = NULL;
digests_t *digests;
smartlist_t *tokens = smartlist_create();
ns_detached_signatures_t *sigs =
tor_malloc_zero(sizeof(ns_detached_signatures_t));
sigs->digests = strmap_new();
sigs->signatures = strmap_new();
if (!eos)
eos = s + strlen(s);
area = memarea_new();
if (tokenize_string(area,s, eos, tokens,
networkstatus_detached_signature_token_table, 0)) {
log_warn(LD_DIR, "Error tokenizing detached networkstatus signatures");
goto err;
}
/* Grab all the digest-like tokens. */
SMARTLIST_FOREACH_BEGIN(tokens, directory_token_t *, _tok) {
const char *algname;
digest_algorithm_t alg;
const char *flavor;
const char *hexdigest;
size_t expected_length;
tok = _tok;
if (tok->tp == K_CONSENSUS_DIGEST) {
algname = "sha1";
alg = DIGEST_SHA1;
flavor = "ns";
hexdigest = tok->args[0];
} else if (tok->tp == K_ADDITIONAL_DIGEST) {
int a = crypto_digest_algorithm_parse_name(tok->args[1]);
if (a<0) {
log_warn(LD_DIR, "Unrecognized algorithm name %s", tok->args[0]);
continue;
}
alg = (digest_algorithm_t) a;
flavor = tok->args[0];
algname = tok->args[1];
hexdigest = tok->args[2];
} else {
continue;
}
expected_length =
(alg == DIGEST_SHA1) ? HEX_DIGEST_LEN : HEX_DIGEST256_LEN;
if (strlen(hexdigest) != expected_length) {
log_warn(LD_DIR, "Wrong length on consensus-digest in detached "
"networkstatus signatures");
goto err;
}
digests = detached_get_digests(sigs, flavor);
tor_assert(digests);
if (!tor_mem_is_zero(digests->d[alg], DIGEST256_LEN)) {
log_warn(LD_DIR, "Multiple digests for %s with %s on detached "
"signatures document", flavor, algname);
continue;
}
if (base16_decode(digests->d[alg], DIGEST256_LEN,
hexdigest, strlen(hexdigest)) < 0) {
log_warn(LD_DIR, "Bad encoding on consensus-digest in detached "
"networkstatus signatures");
goto err;
}
} SMARTLIST_FOREACH_END(_tok);
tok = find_by_keyword(tokens, K_VALID_AFTER);
if (parse_iso_time(tok->args[0], &sigs->valid_after)) {
log_warn(LD_DIR, "Bad valid-after in detached networkstatus signatures");
goto err;
}
tok = find_by_keyword(tokens, K_FRESH_UNTIL);
if (parse_iso_time(tok->args[0], &sigs->fresh_until)) {
log_warn(LD_DIR, "Bad fresh-until in detached networkstatus signatures");
goto err;
}
tok = find_by_keyword(tokens, K_VALID_UNTIL);
if (parse_iso_time(tok->args[0], &sigs->valid_until)) {
log_warn(LD_DIR, "Bad valid-until in detached networkstatus signatures");
goto err;
}
SMARTLIST_FOREACH_BEGIN(tokens, directory_token_t *, _tok) {
const char *id_hexdigest;
const char *sk_hexdigest;
const char *algname;
const char *flavor;
digest_algorithm_t alg;
char id_digest[DIGEST_LEN];
char sk_digest[DIGEST_LEN];
smartlist_t *siglist;
document_signature_t *sig;
int is_duplicate;
tok = _tok;
if (tok->tp == K_DIRECTORY_SIGNATURE) {
tor_assert(tok->n_args >= 2);
flavor = "ns";
algname = "sha1";
id_hexdigest = tok->args[0];
sk_hexdigest = tok->args[1];
} else if (tok->tp == K_ADDITIONAL_SIGNATURE) {
tor_assert(tok->n_args >= 4);
flavor = tok->args[0];
algname = tok->args[1];
id_hexdigest = tok->args[2];
sk_hexdigest = tok->args[3];
} else {
continue;
}
{
int a = crypto_digest_algorithm_parse_name(algname);
if (a<0) {
log_warn(LD_DIR, "Unrecognized algorithm name %s", algname);
continue;
}
alg = (digest_algorithm_t) a;
}
if (!tok->object_type ||
strcmp(tok->object_type, "SIGNATURE") ||
tok->object_size < 128 || tok->object_size > 512) {
log_warn(LD_DIR, "Bad object type or length on directory-signature");
goto err;
}
if (strlen(id_hexdigest) != HEX_DIGEST_LEN ||
base16_decode(id_digest, sizeof(id_digest),
id_hexdigest, HEX_DIGEST_LEN) < 0) {
log_warn(LD_DIR, "Error decoding declared identity %s in "
"network-status vote.", escaped(id_hexdigest));
goto err;
}
if (strlen(sk_hexdigest) != HEX_DIGEST_LEN ||
base16_decode(sk_digest, sizeof(sk_digest),
sk_hexdigest, HEX_DIGEST_LEN) < 0) {
log_warn(LD_DIR, "Error decoding declared signing key digest %s in "
"network-status vote.", escaped(sk_hexdigest));
goto err;
}
siglist = detached_get_signatures(sigs, flavor);
is_duplicate = 0;
SMARTLIST_FOREACH(siglist, document_signature_t *, s, {
if (s->alg == alg &&
tor_memeq(id_digest, s->identity_digest, DIGEST_LEN) &&
tor_memeq(sk_digest, s->signing_key_digest, DIGEST_LEN)) {
is_duplicate = 1;
}
});
if (is_duplicate) {
log_warn(LD_DIR, "Two signatures with identical keys and algorithm "
"found.");
continue;
}
sig = tor_malloc_zero(sizeof(document_signature_t));
sig->alg = alg;
memcpy(sig->identity_digest, id_digest, DIGEST_LEN);
memcpy(sig->signing_key_digest, sk_digest, DIGEST_LEN);
if (tok->object_size >= INT_MAX || tok->object_size >= SIZE_T_CEILING) {
tor_free(sig);
goto err;
}
sig->signature = tor_memdup(tok->object_body, tok->object_size);
sig->signature_len = (int) tok->object_size;
smartlist_add(siglist, sig);
} SMARTLIST_FOREACH_END(_tok);
goto done;
err:
ns_detached_signatures_free(sigs);
sigs = NULL;
done:
SMARTLIST_FOREACH(tokens, directory_token_t *, t, token_clear(t));
smartlist_free(tokens);
if (area) {
DUMP_AREA(area, "detached signatures");
memarea_drop_all(area);
}
return sigs;
}
| 0
|
225,430
|
static int v4l2_loopback_close(struct file *file)
{
struct v4l2_loopback_opener *opener;
struct v4l2_loopback_device *dev;
int iswriter = 0;
MARK();
opener = fh_to_opener(file->private_data);
dev = v4l2loopback_getdevice(file);
if (WRITER == opener->type)
iswriter = 1;
atomic_dec(&dev->open_count);
if (dev->open_count.counter == 0) {
del_timer_sync(&dev->sustain_timer);
del_timer_sync(&dev->timeout_timer);
}
try_free_buffers(dev);
v4l2_fh_del(&opener->fh);
v4l2_fh_exit(&opener->fh);
kfree(opener);
if (iswriter) {
dev->ready_for_output = 1;
}
MARK();
return 0;
}
| 0
|
455,302
|
init_unix_command_map ()
{
emacs_std_cmd_xmap = rl_make_bare_keymap ();
emacs_std_cmd_xmap[CTRL('X')].type = ISKMAP;
emacs_std_cmd_xmap[CTRL('X')].function = KEYMAP_TO_FUNCTION (rl_make_bare_keymap ());
emacs_std_cmd_xmap[ESC].type = ISKMAP;
emacs_std_cmd_xmap[ESC].function = KEYMAP_TO_FUNCTION (rl_make_bare_keymap ());
#if defined (VI_MODE)
vi_insert_cmd_xmap = rl_make_bare_keymap ();
vi_movement_cmd_xmap = rl_make_bare_keymap ();
#endif
}
| 0
|
222,881
|
GraphProperties::GetOutputProperties(const string& node_name) const {
auto it = output_properties_.find(node_name);
if (it != output_properties_.end()) {
return it->second;
}
return missing_properties_;
}
| 0
|
211,506
|
int ZEXPORT inflate(strm, flush)
z_streamp strm;
int flush;
{
struct inflate_state FAR *state;
z_const unsigned char FAR *next; /* next input */
unsigned char FAR *put; /* next output */
unsigned have, left; /* available input and output */
unsigned long hold; /* bit buffer */
unsigned bits; /* bits in bit buffer */
unsigned in, out; /* save starting available input and output */
unsigned copy; /* number of stored or match bytes to copy */
unsigned char FAR *from; /* where to copy match bytes from */
code here; /* current decoding table entry */
code last; /* parent table entry */
unsigned len; /* length to copy for repeats, bits to drop */
int ret; /* return code */
#ifdef GUNZIP
unsigned char hbuf[4]; /* buffer for gzip header crc calculation */
#endif
static const unsigned short order[19] = /* permutation of code lengths */
{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
if (inflateStateCheck(strm) || strm->next_out == Z_NULL ||
(strm->next_in == Z_NULL && strm->avail_in != 0))
return Z_STREAM_ERROR;
state = (struct inflate_state FAR *)strm->state;
if (state->mode == TYPE) state->mode = TYPEDO; /* skip check */
LOAD();
in = have;
out = left;
ret = Z_OK;
for (;;)
switch (state->mode) {
case HEAD:
if (state->wrap == 0) {
state->mode = TYPEDO;
break;
}
NEEDBITS(16);
#ifdef GUNZIP
if ((state->wrap & 2) && hold == 0x8b1f) { /* gzip header */
if (state->wbits == 0)
state->wbits = 15;
state->check = crc32(0L, Z_NULL, 0);
CRC2(state->check, hold);
INITBITS();
state->mode = FLAGS;
break;
}
if (state->head != Z_NULL)
state->head->done = -1;
if (!(state->wrap & 1) || /* check if zlib header allowed */
#else
if (
#endif
((BITS(8) << 8) + (hold >> 8)) % 31) {
strm->msg = (char *)"incorrect header check";
state->mode = BAD;
break;
}
if (BITS(4) != Z_DEFLATED) {
strm->msg = (char *)"unknown compression method";
state->mode = BAD;
break;
}
DROPBITS(4);
len = BITS(4) + 8;
if (state->wbits == 0)
state->wbits = len;
if (len > 15 || len > state->wbits) {
strm->msg = (char *)"invalid window size";
state->mode = BAD;
break;
}
state->dmax = 1U << len;
state->flags = 0; /* indicate zlib header */
Tracev((stderr, "inflate: zlib header ok\n"));
strm->adler = state->check = adler32(0L, Z_NULL, 0);
state->mode = hold & 0x200 ? DICTID : TYPE;
INITBITS();
break;
#ifdef GUNZIP
case FLAGS:
NEEDBITS(16);
state->flags = (int)(hold);
if ((state->flags & 0xff) != Z_DEFLATED) {
strm->msg = (char *)"unknown compression method";
state->mode = BAD;
break;
}
if (state->flags & 0xe000) {
strm->msg = (char *)"unknown header flags set";
state->mode = BAD;
break;
}
if (state->head != Z_NULL)
state->head->text = (int)((hold >> 8) & 1);
if ((state->flags & 0x0200) && (state->wrap & 4))
CRC2(state->check, hold);
INITBITS();
state->mode = TIME;
/* fallthrough */
case TIME:
NEEDBITS(32);
if (state->head != Z_NULL)
state->head->time = hold;
if ((state->flags & 0x0200) && (state->wrap & 4))
CRC4(state->check, hold);
INITBITS();
state->mode = OS;
/* fallthrough */
case OS:
NEEDBITS(16);
if (state->head != Z_NULL) {
state->head->xflags = (int)(hold & 0xff);
state->head->os = (int)(hold >> 8);
}
if ((state->flags & 0x0200) && (state->wrap & 4))
CRC2(state->check, hold);
INITBITS();
state->mode = EXLEN;
/* fallthrough */
case EXLEN:
if (state->flags & 0x0400) {
NEEDBITS(16);
state->length = (unsigned)(hold);
if (state->head != Z_NULL)
state->head->extra_len = (unsigned)hold;
if ((state->flags & 0x0200) && (state->wrap & 4))
CRC2(state->check, hold);
INITBITS();
}
else if (state->head != Z_NULL)
state->head->extra = Z_NULL;
state->mode = EXTRA;
/* fallthrough */
case EXTRA:
if (state->flags & 0x0400) {
copy = state->length;
if (copy > have) copy = have;
if (copy) {
if (state->head != Z_NULL &&
state->head->extra != Z_NULL) {
len = state->head->extra_len - state->length;
zmemcpy(state->head->extra + len, next,
len + copy > state->head->extra_max ?
state->head->extra_max - len : copy);
}
if ((state->flags & 0x0200) && (state->wrap & 4))
state->check = crc32(state->check, next, copy);
have -= copy;
next += copy;
state->length -= copy;
}
if (state->length) goto inf_leave;
}
state->length = 0;
state->mode = NAME;
/* fallthrough */
case NAME:
if (state->flags & 0x0800) {
if (have == 0) goto inf_leave;
copy = 0;
do {
len = (unsigned)(next[copy++]);
if (state->head != Z_NULL &&
state->head->name != Z_NULL &&
state->length < state->head->name_max)
state->head->name[state->length++] = (Bytef)len;
} while (len && copy < have);
if ((state->flags & 0x0200) && (state->wrap & 4))
state->check = crc32(state->check, next, copy);
have -= copy;
next += copy;
if (len) goto inf_leave;
}
else if (state->head != Z_NULL)
state->head->name = Z_NULL;
state->length = 0;
state->mode = COMMENT;
/* fallthrough */
case COMMENT:
if (state->flags & 0x1000) {
if (have == 0) goto inf_leave;
copy = 0;
do {
len = (unsigned)(next[copy++]);
if (state->head != Z_NULL &&
state->head->comment != Z_NULL &&
state->length < state->head->comm_max)
state->head->comment[state->length++] = (Bytef)len;
} while (len && copy < have);
if ((state->flags & 0x0200) && (state->wrap & 4))
state->check = crc32(state->check, next, copy);
have -= copy;
next += copy;
if (len) goto inf_leave;
}
else if (state->head != Z_NULL)
state->head->comment = Z_NULL;
state->mode = HCRC;
/* fallthrough */
case HCRC:
if (state->flags & 0x0200) {
NEEDBITS(16);
if ((state->wrap & 4) && hold != (state->check & 0xffff)) {
strm->msg = (char *)"header crc mismatch";
state->mode = BAD;
break;
}
INITBITS();
}
if (state->head != Z_NULL) {
state->head->hcrc = (int)((state->flags >> 9) & 1);
state->head->done = 1;
}
strm->adler = state->check = crc32(0L, Z_NULL, 0);
state->mode = TYPE;
break;
#endif
case DICTID:
NEEDBITS(32);
strm->adler = state->check = ZSWAP32(hold);
INITBITS();
state->mode = DICT;
/* fallthrough */
case DICT:
if (state->havedict == 0) {
RESTORE();
return Z_NEED_DICT;
}
strm->adler = state->check = adler32(0L, Z_NULL, 0);
state->mode = TYPE;
/* fallthrough */
case TYPE:
if (flush == Z_BLOCK || flush == Z_TREES) goto inf_leave;
/* fallthrough */
case TYPEDO:
if (state->last) {
BYTEBITS();
state->mode = CHECK;
break;
}
NEEDBITS(3);
state->last = BITS(1);
DROPBITS(1);
switch (BITS(2)) {
case 0: /* stored block */
Tracev((stderr, "inflate: stored block%s\n",
state->last ? " (last)" : ""));
state->mode = STORED;
break;
case 1: /* fixed block */
fixedtables(state);
Tracev((stderr, "inflate: fixed codes block%s\n",
state->last ? " (last)" : ""));
state->mode = LEN_; /* decode codes */
if (flush == Z_TREES) {
DROPBITS(2);
goto inf_leave;
}
break;
case 2: /* dynamic block */
Tracev((stderr, "inflate: dynamic codes block%s\n",
state->last ? " (last)" : ""));
state->mode = TABLE;
break;
case 3:
strm->msg = (char *)"invalid block type";
state->mode = BAD;
}
DROPBITS(2);
break;
case STORED:
BYTEBITS(); /* go to byte boundary */
NEEDBITS(32);
if ((hold & 0xffff) != ((hold >> 16) ^ 0xffff)) {
strm->msg = (char *)"invalid stored block lengths";
state->mode = BAD;
break;
}
state->length = (unsigned)hold & 0xffff;
Tracev((stderr, "inflate: stored length %u\n",
state->length));
INITBITS();
state->mode = COPY_;
if (flush == Z_TREES) goto inf_leave;
/* fallthrough */
case COPY_:
state->mode = COPY;
/* fallthrough */
case COPY:
copy = state->length;
if (copy) {
if (copy > have) copy = have;
if (copy > left) copy = left;
if (copy == 0) goto inf_leave;
zmemcpy(put, next, copy);
have -= copy;
next += copy;
left -= copy;
put += copy;
state->length -= copy;
break;
}
Tracev((stderr, "inflate: stored end\n"));
state->mode = TYPE;
break;
case TABLE:
NEEDBITS(14);
state->nlen = BITS(5) + 257;
DROPBITS(5);
state->ndist = BITS(5) + 1;
DROPBITS(5);
state->ncode = BITS(4) + 4;
DROPBITS(4);
#ifndef PKZIP_BUG_WORKAROUND
if (state->nlen > 286 || state->ndist > 30) {
strm->msg = (char *)"too many length or distance symbols";
state->mode = BAD;
break;
}
#endif
Tracev((stderr, "inflate: table sizes ok\n"));
state->have = 0;
state->mode = LENLENS;
/* fallthrough */
case LENLENS:
while (state->have < state->ncode) {
NEEDBITS(3);
state->lens[order[state->have++]] = (unsigned short)BITS(3);
DROPBITS(3);
}
while (state->have < 19)
state->lens[order[state->have++]] = 0;
state->next = state->codes;
state->lencode = (const code FAR *)(state->next);
state->lenbits = 7;
ret = inflate_table(CODES, state->lens, 19, &(state->next),
&(state->lenbits), state->work);
if (ret) {
strm->msg = (char *)"invalid code lengths set";
state->mode = BAD;
break;
}
Tracev((stderr, "inflate: code lengths ok\n"));
state->have = 0;
state->mode = CODELENS;
/* fallthrough */
case CODELENS:
while (state->have < state->nlen + state->ndist) {
for (;;) {
here = state->lencode[BITS(state->lenbits)];
if ((unsigned)(here.bits) <= bits) break;
PULLBYTE();
}
if (here.val < 16) {
DROPBITS(here.bits);
state->lens[state->have++] = here.val;
}
else {
if (here.val == 16) {
NEEDBITS(here.bits + 2);
DROPBITS(here.bits);
if (state->have == 0) {
strm->msg = (char *)"invalid bit length repeat";
state->mode = BAD;
break;
}
len = state->lens[state->have - 1];
copy = 3 + BITS(2);
DROPBITS(2);
}
else if (here.val == 17) {
NEEDBITS(here.bits + 3);
DROPBITS(here.bits);
len = 0;
copy = 3 + BITS(3);
DROPBITS(3);
}
else {
NEEDBITS(here.bits + 7);
DROPBITS(here.bits);
len = 0;
copy = 11 + BITS(7);
DROPBITS(7);
}
if (state->have + copy > state->nlen + state->ndist) {
strm->msg = (char *)"invalid bit length repeat";
state->mode = BAD;
break;
}
while (copy--)
state->lens[state->have++] = (unsigned short)len;
}
}
/* handle error breaks in while */
if (state->mode == BAD) break;
/* check for end-of-block code (better have one) */
if (state->lens[256] == 0) {
strm->msg = (char *)"invalid code -- missing end-of-block";
state->mode = BAD;
break;
}
/* build code tables -- note: do not change the lenbits or distbits
values here (9 and 6) without reading the comments in inftrees.h
concerning the ENOUGH constants, which depend on those values */
state->next = state->codes;
state->lencode = (const code FAR *)(state->next);
state->lenbits = 9;
ret = inflate_table(LENS, state->lens, state->nlen, &(state->next),
&(state->lenbits), state->work);
if (ret) {
strm->msg = (char *)"invalid literal/lengths set";
state->mode = BAD;
break;
}
state->distcode = (const code FAR *)(state->next);
state->distbits = 6;
ret = inflate_table(DISTS, state->lens + state->nlen, state->ndist,
&(state->next), &(state->distbits), state->work);
if (ret) {
strm->msg = (char *)"invalid distances set";
state->mode = BAD;
break;
}
Tracev((stderr, "inflate: codes ok\n"));
state->mode = LEN_;
if (flush == Z_TREES) goto inf_leave;
/* fallthrough */
case LEN_:
state->mode = LEN;
/* fallthrough */
case LEN:
if (have >= 6 && left >= 258) {
RESTORE();
inflate_fast(strm, out);
LOAD();
if (state->mode == TYPE)
state->back = -1;
break;
}
state->back = 0;
for (;;) {
here = state->lencode[BITS(state->lenbits)];
if ((unsigned)(here.bits) <= bits) break;
PULLBYTE();
}
if (here.op && (here.op & 0xf0) == 0) {
last = here;
for (;;) {
here = state->lencode[last.val +
(BITS(last.bits + last.op) >> last.bits)];
if ((unsigned)(last.bits + here.bits) <= bits) break;
PULLBYTE();
}
DROPBITS(last.bits);
state->back += last.bits;
}
DROPBITS(here.bits);
state->back += here.bits;
state->length = (unsigned)here.val;
if ((int)(here.op) == 0) {
Tracevv((stderr, here.val >= 0x20 && here.val < 0x7f ?
"inflate: literal '%c'\n" :
"inflate: literal 0x%02x\n", here.val));
state->mode = LIT;
break;
}
if (here.op & 32) {
Tracevv((stderr, "inflate: end of block\n"));
state->back = -1;
state->mode = TYPE;
break;
}
if (here.op & 64) {
strm->msg = (char *)"invalid literal/length code";
state->mode = BAD;
break;
}
state->extra = (unsigned)(here.op) & 15;
state->mode = LENEXT;
/* fallthrough */
case LENEXT:
if (state->extra) {
NEEDBITS(state->extra);
state->length += BITS(state->extra);
DROPBITS(state->extra);
state->back += state->extra;
}
Tracevv((stderr, "inflate: length %u\n", state->length));
state->was = state->length;
state->mode = DIST;
/* fallthrough */
case DIST:
for (;;) {
here = state->distcode[BITS(state->distbits)];
if ((unsigned)(here.bits) <= bits) break;
PULLBYTE();
}
if ((here.op & 0xf0) == 0) {
last = here;
for (;;) {
here = state->distcode[last.val +
(BITS(last.bits + last.op) >> last.bits)];
if ((unsigned)(last.bits + here.bits) <= bits) break;
PULLBYTE();
}
DROPBITS(last.bits);
state->back += last.bits;
}
DROPBITS(here.bits);
state->back += here.bits;
if (here.op & 64) {
strm->msg = (char *)"invalid distance code";
state->mode = BAD;
break;
}
state->offset = (unsigned)here.val;
state->extra = (unsigned)(here.op) & 15;
state->mode = DISTEXT;
/* fallthrough */
case DISTEXT:
if (state->extra) {
NEEDBITS(state->extra);
state->offset += BITS(state->extra);
DROPBITS(state->extra);
state->back += state->extra;
}
#ifdef INFLATE_STRICT
if (state->offset > state->dmax) {
strm->msg = (char *)"invalid distance too far back";
state->mode = BAD;
break;
}
#endif
Tracevv((stderr, "inflate: distance %u\n", state->offset));
state->mode = MATCH;
/* fallthrough */
case MATCH:
if (left == 0) goto inf_leave;
copy = out - left;
if (state->offset > copy) { /* copy from window */
copy = state->offset - copy;
if (copy > state->whave) {
if (state->sane) {
strm->msg = (char *)"invalid distance too far back";
state->mode = BAD;
break;
}
#ifdef INFLATE_ALLOW_INVALID_DISTANCE_TOOFAR_ARRR
Trace((stderr, "inflate.c too far\n"));
copy -= state->whave;
if (copy > state->length) copy = state->length;
if (copy > left) copy = left;
left -= copy;
state->length -= copy;
do {
*put++ = 0;
} while (--copy);
if (state->length == 0) state->mode = LEN;
break;
#endif
}
if (copy > state->wnext) {
copy -= state->wnext;
from = state->window + (state->wsize - copy);
}
else
from = state->window + (state->wnext - copy);
if (copy > state->length) copy = state->length;
}
else { /* copy from output */
from = put - state->offset;
copy = state->length;
}
if (copy > left) copy = left;
left -= copy;
state->length -= copy;
do {
*put++ = *from++;
} while (--copy);
if (state->length == 0) state->mode = LEN;
break;
case LIT:
if (left == 0) goto inf_leave;
*put++ = (unsigned char)(state->length);
left--;
state->mode = LEN;
break;
case CHECK:
if (state->wrap) {
NEEDBITS(32);
out -= left;
strm->total_out += out;
state->total += out;
if ((state->wrap & 4) && out)
strm->adler = state->check =
UPDATE_CHECK(state->check, put - out, out);
out = left;
if ((state->wrap & 4) && (
#ifdef GUNZIP
state->flags ? hold :
#endif
ZSWAP32(hold)) != state->check) {
strm->msg = (char *)"incorrect data check";
state->mode = BAD;
break;
}
INITBITS();
Tracev((stderr, "inflate: check matches trailer\n"));
}
#ifdef GUNZIP
state->mode = LENGTH;
/* fallthrough */
case LENGTH:
if (state->wrap && state->flags) {
NEEDBITS(32);
if ((state->wrap & 4) && hold != (state->total & 0xffffffff)) {
strm->msg = (char *)"incorrect length check";
state->mode = BAD;
break;
}
INITBITS();
Tracev((stderr, "inflate: length matches trailer\n"));
}
#endif
state->mode = DONE;
/* fallthrough */
case DONE:
ret = Z_STREAM_END;
goto inf_leave;
case BAD:
ret = Z_DATA_ERROR;
goto inf_leave;
case MEM:
return Z_MEM_ERROR;
case SYNC:
/* fallthrough */
default:
return Z_STREAM_ERROR;
}
/*
Return from inflate(), updating the total counts and the check value.
If there was no progress during the inflate() call, return a buffer
error. Call updatewindow() to create and/or update the window state.
Note: a memory error from inflate() is non-recoverable.
*/
inf_leave:
RESTORE();
if (state->wsize || (out != strm->avail_out && state->mode < BAD &&
(state->mode < CHECK || flush != Z_FINISH)))
if (updatewindow(strm, strm->next_out, out - strm->avail_out)) {
state->mode = MEM;
return Z_MEM_ERROR;
}
in -= strm->avail_in;
out -= strm->avail_out;
strm->total_in += in;
strm->total_out += out;
state->total += out;
if ((state->wrap & 4) && out)
strm->adler = state->check =
UPDATE_CHECK(state->check, strm->next_out - out, out);
strm->data_type = (int)state->bits + (state->last ? 64 : 0) +
(state->mode == TYPE ? 128 : 0) +
(state->mode == LEN_ || state->mode == COPY_ ? 256 : 0);
if (((in == 0 && out == 0) || flush == Z_FINISH) && ret == Z_OK)
ret = Z_BUF_ERROR;
return ret;
}
| 1
|
401,530
|
int timer_reduce(struct timer_list *timer, unsigned long expires)
{
return __mod_timer(timer, expires, MOD_TIMER_REDUCE);
}
| 0
|
424,921
|
static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans,
const struct fw_img *image,
int cpu,
int *first_ucode_section)
{
int shift_param;
int i, ret = 0, sec_num = 0x1;
u32 val, last_read_idx = 0;
if (cpu == 1) {
shift_param = 0;
*first_ucode_section = 0;
} else {
shift_param = 16;
(*first_ucode_section)++;
}
for (i = *first_ucode_section; i < image->num_sec; i++) {
last_read_idx = i;
/*
* CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
* CPU1 to CPU2.
* PAGING_SEPARATOR_SECTION delimiter - separate between
* CPU2 non paged to CPU2 paging sec.
*/
if (!image->sec[i].data ||
image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION ||
image->sec[i].offset == PAGING_SEPARATOR_SECTION) {
IWL_DEBUG_FW(trans,
"Break since Data not valid or Empty section, sec = %d\n",
i);
break;
}
ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
if (ret)
return ret;
/* Notify ucode of loaded section number and status */
val = iwl_read_direct32(trans, FH_UCODE_LOAD_STATUS);
val = val | (sec_num << shift_param);
iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, val);
sec_num = (sec_num << 1) | 0x1;
}
*first_ucode_section = last_read_idx;
iwl_enable_interrupts(trans);
if (trans->trans_cfg->use_tfh) {
if (cpu == 1)
iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS,
0xFFFF);
else
iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS,
0xFFFFFFFF);
} else {
if (cpu == 1)
iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS,
0xFFFF);
else
iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS,
0xFFFFFFFF);
}
return 0;
}
| 0
|
432,197
|
void memory_listener_register(MemoryListener *listener, AddressSpace *as)
{
listener->address_space = as;
QTAILQ_INSERT_TAIL(&as->uc->memory_listeners, listener, link);
QTAILQ_INSERT_TAIL(&as->listeners, listener, link_as);
listener_add_address_space(listener, as);
}
| 0
|
474,040
|
gbk_left_adjust_char_head(const UChar* start, const UChar* s, const UChar* end, OnigEncoding enc)
{
const UChar *p;
int len;
if (s <= start) return (UChar* )s;
p = s;
if (GBK_ISMB_TRAIL(*p)) {
while (p > start) {
if (! GBK_ISMB_FIRST(*--p)) {
p++;
break;
}
}
}
len = enclen(enc, p, end);
if (p + len > s) return (UChar* )p;
p += len;
return (UChar* )(p + ((s - p) & ~1));
}
| 0
|
338,224
|
bool WasmBinaryBuilder::maybeVisitSIMDLoad(Expression*& out, uint32_t code) {
if (code == BinaryConsts::V128Load) {
auto* curr = allocator.alloc<Load>();
curr->type = Type::v128;
curr->bytes = 16;
readMemoryAccess(curr->align, curr->offset);
curr->isAtomic = false;
curr->ptr = popNonVoidExpression();
curr->finalize();
out = curr;
return true;
}
SIMDLoad* curr;
switch (code) {
case BinaryConsts::V128Load8Splat:
curr = allocator.alloc<SIMDLoad>();
curr->op = Load8SplatVec128;
break;
case BinaryConsts::V128Load16Splat:
curr = allocator.alloc<SIMDLoad>();
curr->op = Load16SplatVec128;
break;
case BinaryConsts::V128Load32Splat:
curr = allocator.alloc<SIMDLoad>();
curr->op = Load32SplatVec128;
break;
case BinaryConsts::V128Load64Splat:
curr = allocator.alloc<SIMDLoad>();
curr->op = Load64SplatVec128;
break;
case BinaryConsts::V128Load8x8S:
curr = allocator.alloc<SIMDLoad>();
curr->op = Load8x8SVec128;
break;
case BinaryConsts::V128Load8x8U:
curr = allocator.alloc<SIMDLoad>();
curr->op = Load8x8UVec128;
break;
case BinaryConsts::V128Load16x4S:
curr = allocator.alloc<SIMDLoad>();
curr->op = Load16x4SVec128;
break;
case BinaryConsts::V128Load16x4U:
curr = allocator.alloc<SIMDLoad>();
curr->op = Load16x4UVec128;
break;
case BinaryConsts::V128Load32x2S:
curr = allocator.alloc<SIMDLoad>();
curr->op = Load32x2SVec128;
break;
case BinaryConsts::V128Load32x2U:
curr = allocator.alloc<SIMDLoad>();
curr->op = Load32x2UVec128;
break;
case BinaryConsts::V128Load32Zero:
curr = allocator.alloc<SIMDLoad>();
curr->op = Load32ZeroVec128;
break;
case BinaryConsts::V128Load64Zero:
curr = allocator.alloc<SIMDLoad>();
curr->op = Load64ZeroVec128;
break;
default:
return false;
}
readMemoryAccess(curr->align, curr->offset);
curr->ptr = popNonVoidExpression();
curr->finalize();
out = curr;
return true;
}
| 0
|
308,158
|
static void *fastrpc_kmap(struct dma_buf *dmabuf, unsigned long pgnum)
{
struct fastrpc_buf *buf = dmabuf->priv;
return buf->virt ? buf->virt + pgnum * PAGE_SIZE : NULL;
}
| 0
|
221,645
|
llvh::Optional<NumericOrder> getNumericOrder(Literal *LHS, Literal *RHS) {
auto *L = llvh::dyn_cast<LiteralNumber>(LHS);
auto *R = llvh::dyn_cast<LiteralNumber>(RHS);
if (!L || !R)
return llvh::None;
double l = L->getValue();
double r = R->getValue();
if (l < r)
return NumericOrder::LessThan;
if (l > r)
return NumericOrder::GreaterThan;
if (std::isnan(l) || std::isnan(r))
return NumericOrder::Unordered;
return NumericOrder::Equal;
}
| 0
|
273,905
|
static int check_user_pass(ctrl_t *ctrl)
{
if (!ctrl->name[0])
return -1;
if (!strcmp("anonymous", ctrl->name))
return 1;
return 0;
}
| 0
|
385,883
|
SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
{
const struct cred *old_cred;
struct cred *override_cred;
struct path path;
struct inode *inode;
int res;
unsigned int lookup_flags = LOOKUP_FOLLOW;
if (mode & ~S_IRWXO) /* where's F_OK, X_OK, W_OK, R_OK? */
return -EINVAL;
override_cred = prepare_creds();
if (!override_cred)
return -ENOMEM;
override_cred->fsuid = override_cred->uid;
override_cred->fsgid = override_cred->gid;
if (!issecure(SECURE_NO_SETUID_FIXUP)) {
/* Clear the capabilities if we switch to a non-root user */
kuid_t root_uid = make_kuid(override_cred->user_ns, 0);
if (!uid_eq(override_cred->uid, root_uid))
cap_clear(override_cred->cap_effective);
else
override_cred->cap_effective =
override_cred->cap_permitted;
}
old_cred = override_creds(override_cred);
retry:
res = user_path_at(dfd, filename, lookup_flags, &path);
if (res)
goto out;
inode = path.dentry->d_inode;
if ((mode & MAY_EXEC) && S_ISREG(inode->i_mode)) {
/*
* MAY_EXEC on regular files is denied if the fs is mounted
* with the "noexec" flag.
*/
res = -EACCES;
if (path.mnt->mnt_flags & MNT_NOEXEC)
goto out_path_release;
}
res = inode_permission(inode, mode | MAY_ACCESS);
/* SuS v2 requires we report a read only fs too */
if (res || !(mode & S_IWOTH) || special_file(inode->i_mode))
goto out_path_release;
/*
* This is a rare case where using __mnt_is_readonly()
* is OK without a mnt_want/drop_write() pair. Since
* no actual write to the fs is performed here, we do
* not need to telegraph to that to anyone.
*
* By doing this, we accept that this access is
* inherently racy and know that the fs may change
* state before we even see this result.
*/
if (__mnt_is_readonly(path.mnt))
res = -EROFS;
out_path_release:
path_put(&path);
if (retry_estale(res, lookup_flags)) {
lookup_flags |= LOOKUP_REVAL;
goto retry;
}
out:
revert_creds(old_cred);
put_cred(override_cred);
return res;
}
| 0
|
508,823
|
TABLE_LIST *LEX::unlink_first_table(bool *link_to_local)
{
TABLE_LIST *first;
if ((first= query_tables))
{
/*
Exclude from global table list
*/
if ((query_tables= query_tables->next_global))
query_tables->prev_global= &query_tables;
else
query_tables_last= &query_tables;
first->next_global= 0;
/*
and from local list if it is not empty
*/
if ((*link_to_local= MY_TEST(select_lex.table_list.first)))
{
select_lex.context.table_list=
select_lex.context.first_name_resolution_table= first->next_local;
select_lex.table_list.first= first->next_local;
select_lex.table_list.elements--; //safety
first->next_local= 0;
/*
Ensure that the global list has the same first table as the local
list.
*/
first_lists_tables_same();
}
}
return first;
}
| 0
|
226,126
|
GF_Err reftype_box_size(GF_Box *s)
{
GF_TrackReferenceTypeBox *ptr = (GF_TrackReferenceTypeBox *)s;
if (ptr->trackIDCount)
ptr->size += (ptr->trackIDCount * sizeof(u32));
return GF_OK;
| 0
|
225,640
|
void rssr_box_del(GF_Box *s)
{
gf_free(s);
}
| 0
|
443,694
|
is_valid_mbc_string(const UChar* s, const UChar* end)
{
return onigenc_length_check_is_valid_mbc_string(ONIG_ENCODING_UTF16_BE, s, end);
}
| 0
|
241,042
|
static int setup_transport(void)
{
int rv;
rv = transport()->init(message_recv);
if (rv < 0) {
log_error("failed to init booth_transport %s", transport()->name);
goto out;
}
rv = booth_transport[TCP].init(NULL);
if (rv < 0) {
log_error("failed to init booth_transport[TCP]");
goto out;
}
out:
return rv;
}
| 0
|
206,946
|
cmdopts_t *cmdopts_parse(int argc, char **argv)
{
enum {
CMDOPT_HELP = 0,
CMDOPT_VERBOSE,
CMDOPT_QUIET,
CMDOPT_INFILE,
CMDOPT_INFMT,
CMDOPT_INOPT,
CMDOPT_OUTFILE,
CMDOPT_OUTFMT,
CMDOPT_OUTOPT,
CMDOPT_VERSION,
CMDOPT_DEBUG,
CMDOPT_CMPTNO,
CMDOPT_SRGB,
CMDOPT_MAXMEM,
CMDOPT_LIST_ENABLED_CODECS,
CMDOPT_LIST_ALL_CODECS,
CMDOPT_ENABLE_FORMAT,
CMDOPT_ENABLE_ALL_FORMATS,
};
static const jas_opt_t cmdoptions[] = {
{CMDOPT_HELP, "help", 0},
{CMDOPT_VERBOSE, "verbose", 0},
{CMDOPT_QUIET, "quiet", 0},
{CMDOPT_QUIET, "q", 0},
{CMDOPT_INFILE, "input", JAS_OPT_HASARG},
{CMDOPT_INFILE, "f", JAS_OPT_HASARG},
{CMDOPT_INFMT, "input-format", JAS_OPT_HASARG},
{CMDOPT_INFMT, "t", JAS_OPT_HASARG},
{CMDOPT_INOPT, "input-option", JAS_OPT_HASARG},
{CMDOPT_INOPT, "o", JAS_OPT_HASARG},
{CMDOPT_OUTFILE, "output", JAS_OPT_HASARG},
{CMDOPT_OUTFILE, "F", JAS_OPT_HASARG},
{CMDOPT_OUTFMT, "output-format", JAS_OPT_HASARG},
{CMDOPT_OUTFMT, "T", JAS_OPT_HASARG},
{CMDOPT_OUTOPT, "output-option", JAS_OPT_HASARG},
{CMDOPT_OUTOPT, "O", JAS_OPT_HASARG},
{CMDOPT_VERSION, "version", 0},
{CMDOPT_DEBUG, "debug-level", JAS_OPT_HASARG},
{CMDOPT_CMPTNO, "cmptno", JAS_OPT_HASARG},
{CMDOPT_SRGB, "force-srgb", 0},
{CMDOPT_SRGB, "S", 0},
{CMDOPT_MAXMEM, "memory-limit", JAS_OPT_HASARG},
{CMDOPT_LIST_ENABLED_CODECS, "list-enabled-formats", 0},
{CMDOPT_LIST_ALL_CODECS, "list-all-formats", 0},
{CMDOPT_ENABLE_FORMAT, "enable-format", JAS_OPT_HASARG},
{CMDOPT_ENABLE_ALL_FORMATS, "enable-all-formats", 0},
{-1, 0, 0}
};
cmdopts_t *cmdopts;
int c;
if (!(cmdopts = malloc(sizeof(cmdopts_t)))) {
fprintf(stderr, "error: insufficient memory\n");
exit(EXIT_FAILURE);
}
cmdopts->infile = 0;
cmdopts->infmt = -1;
cmdopts->infmt_str = 0;
cmdopts->inopts = 0;
cmdopts->inoptsbuf[0] = '\0';
cmdopts->outfile = 0;
cmdopts->outfmt = -1;
cmdopts->outfmt_str = 0;
cmdopts->outopts = 0;
cmdopts->outoptsbuf[0] = '\0';
cmdopts->verbose = 0;
cmdopts->version = 0;
cmdopts->cmptno = -1;
cmdopts->debug = 0;
cmdopts->srgb = 0;
cmdopts->list_codecs = 0;
cmdopts->list_codecs_all = 0;
cmdopts->help = 0;
cmdopts->max_mem = get_default_max_mem_usage();
cmdopts->enable_format = 0;
cmdopts->enable_all_formats = 0;
while ((c = jas_getopt(argc, argv, cmdoptions)) != EOF) {
switch (c) {
case CMDOPT_HELP:
cmdopts->help = 1;
break;
case CMDOPT_VERBOSE:
cmdopts->verbose = 1;
break;
case CMDOPT_QUIET:
cmdopts->verbose = -1;
break;
case CMDOPT_VERSION:
cmdopts->version = 1;
break;
case CMDOPT_LIST_ENABLED_CODECS:
cmdopts->list_codecs = 1;
cmdopts->list_codecs_all = 0;
break;
case CMDOPT_LIST_ALL_CODECS:
cmdopts->list_codecs = 1;
cmdopts->list_codecs_all = 1;
break;
case CMDOPT_DEBUG:
cmdopts->debug = atoi(jas_optarg);
break;
case CMDOPT_INFILE:
cmdopts->infile = jas_optarg;
break;
case CMDOPT_INFMT:
cmdopts->infmt_str= jas_optarg;
break;
case CMDOPT_INOPT:
addopt(cmdopts->inoptsbuf, OPTSMAX, jas_optarg);
cmdopts->inopts = cmdopts->inoptsbuf;
break;
case CMDOPT_OUTFILE:
cmdopts->outfile = jas_optarg;
break;
case CMDOPT_OUTFMT:
cmdopts->outfmt_str = jas_optarg;
break;
case CMDOPT_OUTOPT:
addopt(cmdopts->outoptsbuf, OPTSMAX, jas_optarg);
cmdopts->outopts = cmdopts->outoptsbuf;
break;
case CMDOPT_CMPTNO:
cmdopts->cmptno = atoi(jas_optarg);
break;
case CMDOPT_SRGB:
cmdopts->srgb = 1;
break;
case CMDOPT_MAXMEM:
cmdopts->max_mem = strtoull(jas_optarg, 0, 10);
break;
case CMDOPT_ENABLE_FORMAT:
cmdopts->enable_format = jas_optarg;
break;
case CMDOPT_ENABLE_ALL_FORMATS:
cmdopts->enable_all_formats = 1;
break;
default:
badusage();
break;
}
}
while (jas_optind < argc) {
fprintf(stderr,
"warning: ignoring bogus command line argument %s\n",
argv[jas_optind]);
++jas_optind;
}
if (cmdopts->version || cmdopts->list_codecs || cmdopts->help) {
goto done;
}
if (!cmdopts->outfmt_str && !cmdopts->outfile) {
fprintf(stderr, "error: cannot determine output format\n");
badusage();
}
done:
return cmdopts;
}
| 1
|
313,800
|
nv_bck_word(cmdarg_T *cap)
{
cap->oap->motion_type = MCHAR;
cap->oap->inclusive = FALSE;
curwin->w_set_curswant = TRUE;
if (bck_word(cap->count1, cap->arg, FALSE) == FAIL)
clearopbeep(cap->oap);
#ifdef FEAT_FOLDING
else if ((fdo_flags & FDO_HOR) && KeyTyped && cap->oap->op_type == OP_NOP)
foldOpenCursor();
#endif
}
| 0
|
333,060
|
nfa_emit_equi_class(int c)
{
#define EMIT2(c) EMIT(c); EMIT(NFA_CONCAT);
if (enc_utf8 || STRCMP(p_enc, "latin1") == 0
|| STRCMP(p_enc, "iso-8859-15") == 0)
{
#ifdef EBCDIC
# define A_circumflex 0x62
# define A_diaeresis 0x63
# define A_grave 0x64
# define A_acute 0x65
# define A_virguilla 0x66
# define A_ring 0x67
# define C_cedilla 0x68
# define E_acute 0x71
# define E_circumflex 0x72
# define E_diaeresis 0x73
# define E_grave 0x74
# define I_acute 0x75
# define I_circumflex 0x76
# define I_diaeresis 0x77
# define I_grave 0x78
# define N_virguilla 0x69
# define O_circumflex 0xeb
# define O_diaeresis 0xec
# define O_grave 0xed
# define O_acute 0xee
# define O_virguilla 0xef
# define O_slash 0x80
# define U_circumflex 0xfb
# define U_diaeresis 0xfc
# define U_grave 0xfd
# define U_acute 0xfe
# define Y_acute 0xba
# define a_grave 0x42
# define a_acute 0x43
# define a_circumflex 0x44
# define a_virguilla 0x45
# define a_diaeresis 0x46
# define a_ring 0x47
# define c_cedilla 0x48
# define e_grave 0x51
# define e_acute 0x52
# define e_circumflex 0x53
# define e_diaeresis 0x54
# define i_grave 0x55
# define i_acute 0x56
# define i_circumflex 0x57
# define i_diaeresis 0x58
# define n_virguilla 0x49
# define o_grave 0xcb
# define o_acute 0xcc
# define o_circumflex 0xcd
# define o_virguilla 0xce
# define o_diaeresis 0xcf
# define o_slash 0x70
# define u_grave 0xdb
# define u_acute 0xdc
# define u_circumflex 0xdd
# define u_diaeresis 0xde
# define y_acute 0x8d
# define y_diaeresis 0xdf
#else
# define A_grave 0xc0
# define A_acute 0xc1
# define A_circumflex 0xc2
# define A_virguilla 0xc3
# define A_diaeresis 0xc4
# define A_ring 0xc5
# define C_cedilla 0xc7
# define E_grave 0xc8
# define E_acute 0xc9
# define E_circumflex 0xca
# define E_diaeresis 0xcb
# define I_grave 0xcc
# define I_acute 0xcd
# define I_circumflex 0xce
# define I_diaeresis 0xcf
# define N_virguilla 0xd1
# define O_grave 0xd2
# define O_acute 0xd3
# define O_circumflex 0xd4
# define O_virguilla 0xd5
# define O_diaeresis 0xd6
# define O_slash 0xd8
# define U_grave 0xd9
# define U_acute 0xda
# define U_circumflex 0xdb
# define U_diaeresis 0xdc
# define Y_acute 0xdd
# define a_grave 0xe0
# define a_acute 0xe1
# define a_circumflex 0xe2
# define a_virguilla 0xe3
# define a_diaeresis 0xe4
# define a_ring 0xe5
# define c_cedilla 0xe7
# define e_grave 0xe8
# define e_acute 0xe9
# define e_circumflex 0xea
# define e_diaeresis 0xeb
# define i_grave 0xec
# define i_acute 0xed
# define i_circumflex 0xee
# define i_diaeresis 0xef
# define n_virguilla 0xf1
# define o_grave 0xf2
# define o_acute 0xf3
# define o_circumflex 0xf4
# define o_virguilla 0xf5
# define o_diaeresis 0xf6
# define o_slash 0xf8
# define u_grave 0xf9
# define u_acute 0xfa
# define u_circumflex 0xfb
# define u_diaeresis 0xfc
# define y_acute 0xfd
# define y_diaeresis 0xff
#endif
switch (c)
{
case 'A': case A_grave: case A_acute: case A_circumflex:
case A_virguilla: case A_diaeresis: case A_ring:
case 0x100: case 0x102: case 0x104: case 0x1cd:
case 0x1de: case 0x1e0: case 0x1fa: case 0x200:
case 0x202: case 0x226: case 0x23a: case 0x1e00:
case 0x1ea0: case 0x1ea2: case 0x1ea4: case 0x1ea6:
case 0x1ea8: case 0x1eaa: case 0x1eac: case 0x1eae:
case 0x1eb0: case 0x1eb2: case 0x1eb4: case 0x1eb6:
EMIT2('A') EMIT2(A_grave) EMIT2(A_acute)
EMIT2(A_circumflex) EMIT2(A_virguilla)
EMIT2(A_diaeresis) EMIT2(A_ring)
EMIT2(0x100) EMIT2(0x102) EMIT2(0x104)
EMIT2(0x1cd) EMIT2(0x1de) EMIT2(0x1e0)
EMIT2(0x1fa) EMIT2(0x200) EMIT2(0x202)
EMIT2(0x226) EMIT2(0x23a) EMIT2(0x1e00)
EMIT2(0x1ea0) EMIT2(0x1ea2) EMIT2(0x1ea4)
EMIT2(0x1ea6) EMIT2(0x1ea8) EMIT2(0x1eaa)
EMIT2(0x1eac) EMIT2(0x1eae) EMIT2(0x1eb0)
EMIT2(0x1eb2) EMIT2(0x1eb6) EMIT2(0x1eb4)
return OK;
case 'B': case 0x181: case 0x243: case 0x1e02:
case 0x1e04: case 0x1e06:
EMIT2('B')
EMIT2(0x181) EMIT2(0x243) EMIT2(0x1e02)
EMIT2(0x1e04) EMIT2(0x1e06)
return OK;
case 'C': case C_cedilla: case 0x106: case 0x108:
case 0x10a: case 0x10c: case 0x187: case 0x23b:
case 0x1e08: case 0xa792:
EMIT2('C') EMIT2(C_cedilla)
EMIT2(0x106) EMIT2(0x108) EMIT2(0x10a)
EMIT2(0x10c) EMIT2(0x187) EMIT2(0x23b)
EMIT2(0x1e08) EMIT2(0xa792)
return OK;
case 'D': case 0x10e: case 0x110: case 0x18a:
case 0x1e0a: case 0x1e0c: case 0x1e0e: case 0x1e10:
case 0x1e12:
EMIT2('D') EMIT2(0x10e) EMIT2(0x110) EMIT2(0x18a)
EMIT2(0x1e0a) EMIT2(0x1e0c) EMIT2(0x1e0e)
EMIT2(0x1e10) EMIT2(0x1e12)
return OK;
case 'E': case E_grave: case E_acute: case E_circumflex:
case E_diaeresis: case 0x112: case 0x114: case 0x116:
case 0x118: case 0x11a: case 0x204: case 0x206:
case 0x228: case 0x246: case 0x1e14: case 0x1e16:
case 0x1e18: case 0x1e1a: case 0x1e1c: case 0x1eb8:
case 0x1eba: case 0x1ebc: case 0x1ebe: case 0x1ec0:
case 0x1ec2: case 0x1ec4: case 0x1ec6:
EMIT2('E') EMIT2(E_grave) EMIT2(E_acute)
EMIT2(E_circumflex) EMIT2(E_diaeresis)
EMIT2(0x112) EMIT2(0x114) EMIT2(0x116)
EMIT2(0x118) EMIT2(0x11a) EMIT2(0x204)
EMIT2(0x206) EMIT2(0x228) EMIT2(0x246)
EMIT2(0x1e14) EMIT2(0x1e16) EMIT2(0x1e18)
EMIT2(0x1e1a) EMIT2(0x1e1c) EMIT2(0x1eb8)
EMIT2(0x1eba) EMIT2(0x1ebc) EMIT2(0x1ebe)
EMIT2(0x1ec0) EMIT2(0x1ec2) EMIT2(0x1ec4)
EMIT2(0x1ec6)
return OK;
case 'F': case 0x191: case 0x1e1e: case 0xa798:
EMIT2('F') EMIT2(0x191) EMIT2(0x1e1e) EMIT2(0xa798)
return OK;
case 'G': case 0x11c: case 0x11e: case 0x120:
case 0x122: case 0x193: case 0x1e4: case 0x1e6:
case 0x1f4: case 0x1e20: case 0xa7a0:
EMIT2('G') EMIT2(0x11c) EMIT2(0x11e) EMIT2(0x120)
EMIT2(0x122) EMIT2(0x193) EMIT2(0x1e4)
EMIT2(0x1e6) EMIT2(0x1f4) EMIT2(0x1e20)
EMIT2(0xa7a0)
return OK;
case 'H': case 0x124: case 0x126: case 0x21e:
case 0x1e22: case 0x1e24: case 0x1e26: case 0x1e28:
case 0x1e2a: case 0x2c67:
EMIT2('H') EMIT2(0x124) EMIT2(0x126) EMIT2(0x21e)
EMIT2(0x1e22) EMIT2(0x1e24) EMIT2(0x1e26)
EMIT2(0x1e28) EMIT2(0x1e2a) EMIT2(0x2c67)
return OK;
case 'I': case I_grave: case I_acute: case I_circumflex:
case I_diaeresis: case 0x128: case 0x12a: case 0x12c:
case 0x12e: case 0x130: case 0x197: case 0x1cf:
case 0x208: case 0x20a: case 0x1e2c: case 0x1e2e:
case 0x1ec8: case 0x1eca:
EMIT2('I') EMIT2(I_grave) EMIT2(I_acute)
EMIT2(I_circumflex) EMIT2(I_diaeresis)
EMIT2(0x128) EMIT2(0x12a) EMIT2(0x12c)
EMIT2(0x12e) EMIT2(0x130) EMIT2(0x197)
EMIT2(0x1cf) EMIT2(0x208) EMIT2(0x20a)
EMIT2(0x1e2c) EMIT2(0x1e2e) EMIT2(0x1ec8)
EMIT2(0x1eca)
return OK;
case 'J': case 0x134: case 0x248:
EMIT2('J') EMIT2(0x134) EMIT2(0x248)
return OK;
case 'K': case 0x136: case 0x198: case 0x1e8: case 0x1e30:
case 0x1e32: case 0x1e34: case 0x2c69: case 0xa740:
EMIT2('K') EMIT2(0x136) EMIT2(0x198) EMIT2(0x1e8)
EMIT2(0x1e30) EMIT2(0x1e32) EMIT2(0x1e34)
EMIT2(0x2c69) EMIT2(0xa740)
return OK;
case 'L': case 0x139: case 0x13b: case 0x13d:
case 0x13f: case 0x141: case 0x23d: case 0x1e36:
case 0x1e38: case 0x1e3a: case 0x1e3c: case 0x2c60:
EMIT2('L') EMIT2(0x139) EMIT2(0x13b)
EMIT2(0x13d) EMIT2(0x13f) EMIT2(0x141)
EMIT2(0x23d) EMIT2(0x1e36) EMIT2(0x1e38)
EMIT2(0x1e3a) EMIT2(0x1e3c) EMIT2(0x2c60)
return OK;
case 'M': case 0x1e3e: case 0x1e40: case 0x1e42:
EMIT2('M') EMIT2(0x1e3e) EMIT2(0x1e40)
EMIT2(0x1e42)
return OK;
case 'N': case N_virguilla:
case 0x143: case 0x145: case 0x147: case 0x1f8:
case 0x1e44: case 0x1e46: case 0x1e48: case 0x1e4a:
case 0xa7a4:
EMIT2('N') EMIT2(N_virguilla)
EMIT2(0x143) EMIT2(0x145) EMIT2(0x147)
EMIT2(0x1f8) EMIT2(0x1e44) EMIT2(0x1e46)
EMIT2(0x1e48) EMIT2(0x1e4a) EMIT2(0xa7a4)
return OK;
case 'O': case O_grave: case O_acute: case O_circumflex:
case O_virguilla: case O_diaeresis: case O_slash:
case 0x14c: case 0x14e: case 0x150: case 0x19f:
case 0x1a0: case 0x1d1: case 0x1ea: case 0x1ec:
case 0x1fe: case 0x20c: case 0x20e: case 0x22a:
case 0x22c: case 0x22e: case 0x230: case 0x1e4c:
case 0x1e4e: case 0x1e50: case 0x1e52: case 0x1ecc:
case 0x1ece: case 0x1ed0: case 0x1ed2: case 0x1ed4:
case 0x1ed6: case 0x1ed8: case 0x1eda: case 0x1edc:
case 0x1ede: case 0x1ee0: case 0x1ee2:
EMIT2('O') EMIT2(O_grave) EMIT2(O_acute)
EMIT2(O_circumflex) EMIT2(O_virguilla)
EMIT2(O_diaeresis) EMIT2(O_slash)
EMIT2(0x14c) EMIT2(0x14e) EMIT2(0x150)
EMIT2(0x19f) EMIT2(0x1a0) EMIT2(0x1d1)
EMIT2(0x1ea) EMIT2(0x1ec) EMIT2(0x1fe)
EMIT2(0x20c) EMIT2(0x20e) EMIT2(0x22a)
EMIT2(0x22c) EMIT2(0x22e) EMIT2(0x230)
EMIT2(0x1e4c) EMIT2(0x1e4e) EMIT2(0x1e50)
EMIT2(0x1e52) EMIT2(0x1ecc) EMIT2(0x1ece)
EMIT2(0x1ed0) EMIT2(0x1ed2) EMIT2(0x1ed4)
EMIT2(0x1ed6) EMIT2(0x1ed8) EMIT2(0x1eda)
EMIT2(0x1edc) EMIT2(0x1ede) EMIT2(0x1ee0)
EMIT2(0x1ee2)
return OK;
case 'P': case 0x1a4: case 0x1e54: case 0x1e56: case 0x2c63:
EMIT2('P') EMIT2(0x1a4) EMIT2(0x1e54) EMIT2(0x1e56)
EMIT2(0x2c63)
return OK;
case 'Q': case 0x24a:
EMIT2('Q') EMIT2(0x24a)
return OK;
case 'R': case 0x154: case 0x156: case 0x158: case 0x210:
case 0x212: case 0x24c: case 0x1e58: case 0x1e5a:
case 0x1e5c: case 0x1e5e: case 0x2c64: case 0xa7a6:
EMIT2('R') EMIT2(0x154) EMIT2(0x156) EMIT2(0x158)
EMIT2(0x210) EMIT2(0x212) EMIT2(0x24c) EMIT2(0x1e58)
EMIT2(0x1e5a) EMIT2(0x1e5c) EMIT2(0x1e5e) EMIT2(0x2c64)
EMIT2(0xa7a6)
return OK;
case 'S': case 0x15a: case 0x15c: case 0x15e: case 0x160:
case 0x218: case 0x1e60: case 0x1e62: case 0x1e64:
case 0x1e66: case 0x1e68: case 0x2c7e: case 0xa7a8:
EMIT2('S') EMIT2(0x15a) EMIT2(0x15c) EMIT2(0x15e)
EMIT2(0x160) EMIT2(0x218) EMIT2(0x1e60) EMIT2(0x1e62)
EMIT2(0x1e64) EMIT2(0x1e66) EMIT2(0x1e68) EMIT2(0x2c7e)
EMIT2(0xa7a8)
return OK;
case 'T': case 0x162: case 0x164: case 0x166: case 0x1ac:
case 0x1ae: case 0x21a: case 0x23e: case 0x1e6a: case 0x1e6c:
case 0x1e6e: case 0x1e70:
EMIT2('T') EMIT2(0x162) EMIT2(0x164) EMIT2(0x166)
EMIT2(0x1ac) EMIT2(0x1ae) EMIT2(0x23e) EMIT2(0x21a)
EMIT2(0x1e6a) EMIT2(0x1e6c) EMIT2(0x1e6e) EMIT2(0x1e70)
return OK;
case 'U': case U_grave: case U_acute: case U_diaeresis:
case U_circumflex: case 0x168: case 0x16a: case 0x16c:
case 0x16e: case 0x170: case 0x172: case 0x1af:
case 0x1d3: case 0x1d5: case 0x1d7: case 0x1d9:
case 0x1db: case 0x214: case 0x216: case 0x244:
case 0x1e72: case 0x1e74: case 0x1e76: case 0x1e78:
case 0x1e7a: case 0x1ee4: case 0x1ee6: case 0x1ee8:
case 0x1eea: case 0x1eec: case 0x1eee: case 0x1ef0:
EMIT2('U') EMIT2(U_grave) EMIT2(U_acute)
EMIT2(U_diaeresis) EMIT2(U_circumflex)
EMIT2(0x168) EMIT2(0x16a)
EMIT2(0x16c) EMIT2(0x16e) EMIT2(0x170)
EMIT2(0x172) EMIT2(0x1af) EMIT2(0x1d3)
EMIT2(0x1d5) EMIT2(0x1d7) EMIT2(0x1d9)
EMIT2(0x1db) EMIT2(0x214) EMIT2(0x216)
EMIT2(0x244) EMIT2(0x1e72) EMIT2(0x1e74)
EMIT2(0x1e76) EMIT2(0x1e78) EMIT2(0x1e7a)
EMIT2(0x1ee4) EMIT2(0x1ee6) EMIT2(0x1ee8)
EMIT2(0x1eea) EMIT2(0x1eec) EMIT2(0x1eee)
EMIT2(0x1ef0)
return OK;
case 'V': case 0x1b2: case 0x1e7c: case 0x1e7e:
EMIT2('V') EMIT2(0x1b2) EMIT2(0x1e7c) EMIT2(0x1e7e)
return OK;
case 'W': case 0x174: case 0x1e80: case 0x1e82: case 0x1e84:
case 0x1e86: case 0x1e88:
EMIT2('W') EMIT2(0x174) EMIT2(0x1e80) EMIT2(0x1e82)
EMIT2(0x1e84) EMIT2(0x1e86) EMIT2(0x1e88)
return OK;
case 'X': case 0x1e8a: case 0x1e8c:
EMIT2('X') EMIT2(0x1e8a) EMIT2(0x1e8c)
return OK;
case 'Y': case Y_acute: case 0x176: case 0x178:
case 0x1b3: case 0x232: case 0x24e: case 0x1e8e:
case 0x1ef2: case 0x1ef4: case 0x1ef6: case 0x1ef8:
EMIT2('Y') EMIT2(Y_acute)
EMIT2(0x176) EMIT2(0x178) EMIT2(0x1b3)
EMIT2(0x232) EMIT2(0x24e) EMIT2(0x1e8e)
EMIT2(0x1ef2) EMIT2(0x1ef4) EMIT2(0x1ef6)
EMIT2(0x1ef8)
return OK;
case 'Z': case 0x179: case 0x17b: case 0x17d:
case 0x1b5: case 0x1e90: case 0x1e92: case 0x1e94:
case 0x2c6b:
EMIT2('Z') EMIT2(0x179) EMIT2(0x17b) EMIT2(0x17d)
EMIT2(0x1b5) EMIT2(0x1e90) EMIT2(0x1e92)
EMIT2(0x1e94) EMIT2(0x2c6b)
return OK;
case 'a': case a_grave: case a_acute: case a_circumflex:
case a_virguilla: case a_diaeresis: case a_ring:
case 0x101: case 0x103: case 0x105: case 0x1ce:
case 0x1df: case 0x1e1: case 0x1fb: case 0x201:
case 0x203: case 0x227: case 0x1d8f: case 0x1e01:
case 0x1e9a: case 0x1ea1: case 0x1ea3: case 0x1ea5:
case 0x1ea7: case 0x1ea9: case 0x1eab: case 0x1ead:
case 0x1eaf: case 0x1eb1: case 0x1eb3: case 0x1eb5:
case 0x1eb7: case 0x2c65:
EMIT2('a') EMIT2(a_grave) EMIT2(a_acute)
EMIT2(a_circumflex) EMIT2(a_virguilla)
EMIT2(a_diaeresis) EMIT2(a_ring)
EMIT2(0x101) EMIT2(0x103) EMIT2(0x105)
EMIT2(0x1ce) EMIT2(0x1df) EMIT2(0x1e1)
EMIT2(0x1fb) EMIT2(0x201) EMIT2(0x203)
EMIT2(0x227) EMIT2(0x1d8f) EMIT2(0x1e01)
EMIT2(0x1e9a) EMIT2(0x1ea1) EMIT2(0x1ea3)
EMIT2(0x1ea5) EMIT2(0x1ea7) EMIT2(0x1ea9)
EMIT2(0x1eab) EMIT2(0x1ead) EMIT2(0x1eaf)
EMIT2(0x1eb1) EMIT2(0x1eb3) EMIT2(0x1eb5)
EMIT2(0x1eb7) EMIT2(0x2c65)
return OK;
case 'b': case 0x180: case 0x253: case 0x1d6c: case 0x1d80:
case 0x1e03: case 0x1e05: case 0x1e07:
EMIT2('b') EMIT2(0x180) EMIT2(0x253) EMIT2(0x1d6c)
EMIT2(0x1d80) EMIT2(0x1e03) EMIT2(0x1e05) EMIT2(0x1e07)
return OK;
case 'c': case c_cedilla: case 0x107: case 0x109: case 0x10b:
case 0x10d: case 0x188: case 0x23c: case 0x1e09: case 0xa793:
case 0xa794:
EMIT2('c') EMIT2(c_cedilla)
EMIT2(0x107) EMIT2(0x109) EMIT2(0x10b)
EMIT2(0x10d) EMIT2(0x188) EMIT2(0x23c)
EMIT2(0x1e09) EMIT2(0xa793) EMIT2(0xa794)
return OK;
case 'd': case 0x10f: case 0x111: case 0x257: case 0x1d6d:
case 0x1d81: case 0x1d91: case 0x1e0b: case 0x1e0d: case 0x1e0f:
case 0x1e11: case 0x1e13:
EMIT2('d') EMIT2(0x10f) EMIT2(0x111)
EMIT2(0x257) EMIT2(0x1d6d) EMIT2(0x1d81)
EMIT2(0x1d91) EMIT2(0x1e0b) EMIT2(0x1e0d)
EMIT2(0x1e0f) EMIT2(0x1e11) EMIT2(0x1e13)
return OK;
case 'e': case e_grave: case e_acute: case e_circumflex:
case e_diaeresis: case 0x113: case 0x115: case 0x117:
case 0x119: case 0x11b: case 0x205: case 0x207:
case 0x229: case 0x247: case 0x1d92: case 0x1e15:
case 0x1e17: case 0x1e19: case 0x1e1b: case 0x1e1d:
case 0x1eb9: case 0x1ebb: case 0x1ebd: case 0x1ebf:
case 0x1ec1: case 0x1ec3: case 0x1ec5: case 0x1ec7:
EMIT2('e') EMIT2(e_grave) EMIT2(e_acute)
EMIT2(e_circumflex) EMIT2(e_diaeresis)
EMIT2(0x113) EMIT2(0x115)
EMIT2(0x117) EMIT2(0x119) EMIT2(0x11b)
EMIT2(0x205) EMIT2(0x207) EMIT2(0x229)
EMIT2(0x247) EMIT2(0x1d92) EMIT2(0x1e15)
EMIT2(0x1e17) EMIT2(0x1e19) EMIT2(0x1e1b)
EMIT2(0x1e1d) EMIT2(0x1eb9) EMIT2(0x1ebb)
EMIT2(0x1ebd) EMIT2(0x1ebf) EMIT2(0x1ec1)
EMIT2(0x1ec3) EMIT2(0x1ec5) EMIT2(0x1ec7)
return OK;
case 'f': case 0x192: case 0x1d6e: case 0x1d82:
case 0x1e1f: case 0xa799:
EMIT2('f') EMIT2(0x192) EMIT2(0x1d6e) EMIT2(0x1d82)
EMIT2(0x1e1f) EMIT2(0xa799)
return OK;
case 'g': case 0x11d: case 0x11f: case 0x121: case 0x123:
case 0x1e5: case 0x1e7: case 0x1f5: case 0x260: case 0x1d83:
case 0x1e21: case 0xa7a1:
EMIT2('g') EMIT2(0x11d) EMIT2(0x11f) EMIT2(0x121)
EMIT2(0x123) EMIT2(0x1e5) EMIT2(0x1e7)
EMIT2(0x1f5) EMIT2(0x260) EMIT2(0x1d83)
EMIT2(0x1e21) EMIT2(0xa7a1)
return OK;
case 'h': case 0x125: case 0x127: case 0x21f: case 0x1e23:
case 0x1e25: case 0x1e27: case 0x1e29: case 0x1e2b:
case 0x1e96: case 0x2c68: case 0xa795:
EMIT2('h') EMIT2(0x125) EMIT2(0x127) EMIT2(0x21f)
EMIT2(0x1e23) EMIT2(0x1e25) EMIT2(0x1e27)
EMIT2(0x1e29) EMIT2(0x1e2b) EMIT2(0x1e96)
EMIT2(0x2c68) EMIT2(0xa795)
return OK;
case 'i': case i_grave: case i_acute: case i_circumflex:
case i_diaeresis: case 0x129: case 0x12b: case 0x12d:
case 0x12f: case 0x1d0: case 0x209: case 0x20b:
case 0x268: case 0x1d96: case 0x1e2d: case 0x1e2f:
case 0x1ec9: case 0x1ecb:
EMIT2('i') EMIT2(i_grave) EMIT2(i_acute)
EMIT2(i_circumflex) EMIT2(i_diaeresis)
EMIT2(0x129) EMIT2(0x12b) EMIT2(0x12d)
EMIT2(0x12f) EMIT2(0x1d0) EMIT2(0x209)
EMIT2(0x20b) EMIT2(0x268) EMIT2(0x1d96)
EMIT2(0x1e2d) EMIT2(0x1e2f) EMIT2(0x1ec9)
EMIT2(0x1ecb) EMIT2(0x1ecb)
return OK;
case 'j': case 0x135: case 0x1f0: case 0x249:
EMIT2('j') EMIT2(0x135) EMIT2(0x1f0) EMIT2(0x249)
return OK;
case 'k': case 0x137: case 0x199: case 0x1e9: case 0x1d84:
case 0x1e31: case 0x1e33: case 0x1e35: case 0x2c6a: case 0xa741:
EMIT2('k') EMIT2(0x137) EMIT2(0x199) EMIT2(0x1e9)
EMIT2(0x1d84) EMIT2(0x1e31) EMIT2(0x1e33)
EMIT2(0x1e35) EMIT2(0x2c6a) EMIT2(0xa741)
return OK;
case 'l': case 0x13a: case 0x13c: case 0x13e: case 0x140:
case 0x142: case 0x19a: case 0x1e37: case 0x1e39: case 0x1e3b:
case 0x1e3d: case 0x2c61:
EMIT2('l') EMIT2(0x13a) EMIT2(0x13c)
EMIT2(0x13e) EMIT2(0x140) EMIT2(0x142)
EMIT2(0x19a) EMIT2(0x1e37) EMIT2(0x1e39)
EMIT2(0x1e3b) EMIT2(0x1e3d) EMIT2(0x2c61)
return OK;
case 'm': case 0x1d6f: case 0x1e3f: case 0x1e41: case 0x1e43:
EMIT2('m') EMIT2(0x1d6f) EMIT2(0x1e3f)
EMIT2(0x1e41) EMIT2(0x1e43)
return OK;
case 'n': case n_virguilla: case 0x144: case 0x146: case 0x148:
case 0x149: case 0x1f9: case 0x1d70: case 0x1d87: case 0x1e45:
case 0x1e47: case 0x1e49: case 0x1e4b: case 0xa7a5:
EMIT2('n') EMIT2(n_virguilla)
EMIT2(0x144) EMIT2(0x146) EMIT2(0x148)
EMIT2(0x149) EMIT2(0x1f9) EMIT2(0x1d70)
EMIT2(0x1d87) EMIT2(0x1e45) EMIT2(0x1e47)
EMIT2(0x1e49) EMIT2(0x1e4b) EMIT2(0xa7a5)
return OK;
case 'o': case o_grave: case o_acute: case o_circumflex:
case o_virguilla: case o_diaeresis: case o_slash:
case 0x14d: case 0x14f: case 0x151: case 0x1a1:
case 0x1d2: case 0x1eb: case 0x1ed: case 0x1ff:
case 0x20d: case 0x20f: case 0x22b: case 0x22d:
case 0x22f: case 0x231: case 0x275: case 0x1e4d:
case 0x1e4f: case 0x1e51: case 0x1e53: case 0x1ecd:
case 0x1ecf: case 0x1ed1: case 0x1ed3: case 0x1ed5:
case 0x1ed7: case 0x1ed9: case 0x1edb: case 0x1edd:
case 0x1edf: case 0x1ee1: case 0x1ee3:
EMIT2('o') EMIT2(o_grave) EMIT2(o_acute)
EMIT2(o_circumflex) EMIT2(o_virguilla)
EMIT2(o_diaeresis) EMIT2(o_slash)
EMIT2(0x14d) EMIT2(0x14f) EMIT2(0x151)
EMIT2(0x1a1) EMIT2(0x1d2) EMIT2(0x1eb)
EMIT2(0x1ed) EMIT2(0x1ff) EMIT2(0x20d)
EMIT2(0x20f) EMIT2(0x22b) EMIT2(0x22d)
EMIT2(0x22f) EMIT2(0x231) EMIT2(0x275)
EMIT2(0x1e4d) EMIT2(0x1e4f) EMIT2(0x1e51)
EMIT2(0x1e53) EMIT2(0x1ecd) EMIT2(0x1ecf)
EMIT2(0x1ed1) EMIT2(0x1ed3) EMIT2(0x1ed5)
EMIT2(0x1ed7) EMIT2(0x1ed9) EMIT2(0x1edb)
EMIT2(0x1edd) EMIT2(0x1edf) EMIT2(0x1ee1)
EMIT2(0x1ee3)
return OK;
case 'p': case 0x1a5: case 0x1d71: case 0x1d7d: case 0x1d88:
case 0x1e55: case 0x1e57:
EMIT2('p') EMIT2(0x1a5) EMIT2(0x1d71) EMIT2(0x1d7d)
EMIT2(0x1d88) EMIT2(0x1e55) EMIT2(0x1e57)
return OK;
case 'q': case 0x24b: case 0x2a0:
EMIT2('q') EMIT2(0x24b) EMIT2(0x2a0)
return OK;
case 'r': case 0x155: case 0x157: case 0x159: case 0x211:
case 0x213: case 0x24d: case 0x27d: case 0x1d72: case 0x1d73:
case 0x1d89: case 0x1e59: case 0x1e5b: case 0x1e5d: case 0x1e5f:
case 0xa7a7:
EMIT2('r') EMIT2(0x155) EMIT2(0x157) EMIT2(0x159)
EMIT2(0x211) EMIT2(0x213) EMIT2(0x24d) EMIT2(0x27d)
EMIT2(0x1d72) EMIT2(0x1d73) EMIT2(0x1d89) EMIT2(0x1e59)
EMIT2(0x1e5b) EMIT2(0x1e5d) EMIT2(0x1e5f) EMIT2(0xa7a7)
return OK;
case 's': case 0x15b: case 0x15d: case 0x15f: case 0x161:
case 0x219: case 0x23f: case 0x1d74: case 0x1d8a: case 0x1e61:
case 0x1e63: case 0x1e65: case 0x1e67: case 0x1e69: case 0xa7a9:
EMIT2('s') EMIT2(0x15b) EMIT2(0x15d) EMIT2(0x15f)
EMIT2(0x161) EMIT2(0x219) EMIT2(0x23f) EMIT2(0x1d74)
EMIT2(0x1d8a) EMIT2(0x1e61) EMIT2(0x1e63) EMIT2(0x1e65)
EMIT2(0x1e67) EMIT2(0x1e69) EMIT2(0xa7a9)
return OK;
case 't': case 0x163: case 0x165: case 0x167: case 0x1ab:
case 0x1ad: case 0x21b: case 0x288: case 0x1d75: case 0x1e6b:
case 0x1e6d: case 0x1e6f: case 0x1e71: case 0x1e97: case 0x2c66:
EMIT2('t') EMIT2(0x163) EMIT2(0x165) EMIT2(0x167)
EMIT2(0x1ab) EMIT2(0x1ad) EMIT2(0x21b) EMIT2(0x288)
EMIT2(0x1d75) EMIT2(0x1e6b) EMIT2(0x1e6d) EMIT2(0x1e6f)
EMIT2(0x1e71) EMIT2(0x1e97) EMIT2(0x2c66)
return OK;
case 'u': case u_grave: case u_acute: case u_circumflex:
case u_diaeresis: case 0x169: case 0x16b: case 0x16d:
case 0x16f: case 0x171: case 0x173: case 0x1b0: case 0x1d4:
case 0x1d6: case 0x1d8: case 0x1da: case 0x1dc: case 0x215:
case 0x217: case 0x289: case 0x1d7e: case 0x1d99: case 0x1e73:
case 0x1e75: case 0x1e77: case 0x1e79: case 0x1e7b:
case 0x1ee5: case 0x1ee7: case 0x1ee9: case 0x1eeb:
case 0x1eed: case 0x1eef: case 0x1ef1:
EMIT2('u') EMIT2(u_grave) EMIT2(u_acute)
EMIT2(u_circumflex) EMIT2(u_diaeresis)
EMIT2(0x169) EMIT2(0x16b)
EMIT2(0x16d) EMIT2(0x16f) EMIT2(0x171)
EMIT2(0x173) EMIT2(0x1d6) EMIT2(0x1d8)
EMIT2(0x215) EMIT2(0x217) EMIT2(0x1b0)
EMIT2(0x1d4) EMIT2(0x1da) EMIT2(0x1dc)
EMIT2(0x289) EMIT2(0x1e73) EMIT2(0x1d7e)
EMIT2(0x1d99) EMIT2(0x1e75) EMIT2(0x1e77)
EMIT2(0x1e79) EMIT2(0x1e7b) EMIT2(0x1ee5)
EMIT2(0x1ee7) EMIT2(0x1ee9) EMIT2(0x1eeb)
EMIT2(0x1eed) EMIT2(0x1eef) EMIT2(0x1ef1)
return OK;
case 'v': case 0x28b: case 0x1d8c: case 0x1e7d: case 0x1e7f:
EMIT2('v') EMIT2(0x28b) EMIT2(0x1d8c) EMIT2(0x1e7d)
EMIT2(0x1e7f)
return OK;
case 'w': case 0x175: case 0x1e81: case 0x1e83: case 0x1e85:
case 0x1e87: case 0x1e89: case 0x1e98:
EMIT2('w') EMIT2(0x175) EMIT2(0x1e81) EMIT2(0x1e83)
EMIT2(0x1e85) EMIT2(0x1e87) EMIT2(0x1e89) EMIT2(0x1e98)
return OK;
case 'x': case 0x1e8b: case 0x1e8d:
EMIT2('x') EMIT2(0x1e8b) EMIT2(0x1e8d)
return OK;
case 'y': case y_acute: case y_diaeresis: case 0x177:
case 0x1b4: case 0x233: case 0x24f: case 0x1e8f:
case 0x1e99: case 0x1ef3: case 0x1ef5: case 0x1ef7:
case 0x1ef9:
EMIT2('y') EMIT2(y_acute) EMIT2(y_diaeresis)
EMIT2(0x177) EMIT2(0x1b4) EMIT2(0x233) EMIT2(0x24f)
EMIT2(0x1e8f) EMIT2(0x1e99) EMIT2(0x1ef3)
EMIT2(0x1ef5) EMIT2(0x1ef7) EMIT2(0x1ef9)
return OK;
case 'z': case 0x17a: case 0x17c: case 0x17e: case 0x1b6:
case 0x1d76: case 0x1d8e: case 0x1e91: case 0x1e93:
case 0x1e95: case 0x2c6c:
EMIT2('z') EMIT2(0x17a) EMIT2(0x17c) EMIT2(0x17e)
EMIT2(0x1b6) EMIT2(0x1d76) EMIT2(0x1d8e) EMIT2(0x1e91)
EMIT2(0x1e93) EMIT2(0x1e95) EMIT2(0x2c6c)
return OK;
// default: character itself
}
}
EMIT2(c);
return OK;
#undef EMIT2
#undef EMIT2
}
| 0
|
462,435
|
DataRcvd(ptcpsess_t *pThis, char *pData, size_t iLen)
{
struct syslogTime stTime;
DEFiRet;
pThis->pLstn->rcvdBytes += iLen;
if(pThis->compressionMode >= COMPRESS_STREAM_ALWAYS)
iRet = DataRcvdCompressed(pThis, pData, iLen);
else
iRet = DataRcvdUncompressed(pThis, pData, iLen, &stTime, 0);
RETiRet;
}
| 0
|
299,981
|
static void __exit elo_driver_exit(void)
{
hid_unregister_driver(&elo_driver);
destroy_workqueue(wq);
}
| 0
|
344,816
|
strdelim_internal(char **s, int split_equals)
{
char *old;
int wspace = 0;
if (*s == NULL)
return NULL;
old = *s;
*s = strpbrk(*s,
split_equals ? WHITESPACE QUOTE "=" : WHITESPACE QUOTE);
if (*s == NULL)
return (old);
if (*s[0] == '\"') {
memmove(*s, *s + 1, strlen(*s)); /* move nul too */
/* Find matching quote */
if ((*s = strpbrk(*s, QUOTE)) == NULL) {
return (NULL); /* no matching quote */
} else {
*s[0] = '\0';
*s += strspn(*s + 1, WHITESPACE) + 1;
return (old);
}
}
/* Allow only one '=' to be skipped */
if (split_equals && *s[0] == '=')
wspace = 1;
*s[0] = '\0';
/* Skip any extra whitespace after first token */
*s += strspn(*s + 1, WHITESPACE) + 1;
if (split_equals && *s[0] == '=' && !wspace)
*s += strspn(*s + 1, WHITESPACE) + 1;
return (old);
}
| 0
|
273,404
|
void Compute(OpKernelContext* ctx) override {
const Tensor* seq_len_max_tensor = nullptr;
OP_REQUIRES_OK(ctx, ctx->input("seq_len_max", &seq_len_max_tensor));
const Tensor* x;
OP_REQUIRES_OK(ctx, ctx->input("x", &x));
OP_REQUIRES(ctx, x->dims() == 3, errors::InvalidArgument("x must be 3D"));
const int64_t timelen = x->dim_size(0);
const int64_t batch_size = x->dim_size(1);
const int64_t input_size = x->dim_size(2);
const Tensor* cs_prev_tensor = nullptr;
OP_REQUIRES_OK(ctx, ctx->input("cs_prev", &cs_prev_tensor));
const Tensor* h_prev_tensor = nullptr;
OP_REQUIRES_OK(ctx, ctx->input("h_prev", &h_prev_tensor));
const Tensor* w_tensor = nullptr;
OP_REQUIRES_OK(ctx, ctx->input("w", &w_tensor));
const int64_t cell_size = w_tensor->dim_size(1) / 4;
OP_REQUIRES(ctx, input_size + cell_size == w_tensor->dim_size(0),
errors::InvalidArgument(
"w matrix rows don't match: ", input_size + cell_size,
" vs. ", w_tensor->dim_size(0)));
const Tensor* wci_tensor = nullptr;
OP_REQUIRES_OK(ctx, ctx->input("wci", &wci_tensor));
const Tensor* wcf_tensor = nullptr;
OP_REQUIRES_OK(ctx, ctx->input("wcf", &wcf_tensor));
const Tensor* wco_tensor = nullptr;
OP_REQUIRES_OK(ctx, ctx->input("wco", &wco_tensor));
const Tensor* b_tensor = nullptr;
OP_REQUIRES_OK(ctx, ctx->input("b", &b_tensor));
OP_REQUIRES(
ctx, cell_size == b_tensor->dim_size(0) / 4,
errors::InvalidArgument("w and b cell_size don't match: ", cell_size,
" vs. ", b_tensor->dim_size(0)));
const Tensor* i_out = nullptr;
OP_REQUIRES_OK(ctx, ctx->input("i", &i_out));
const Tensor* cs_out = nullptr;
OP_REQUIRES_OK(ctx, ctx->input("cs", &cs_out));
const Tensor* f_out = nullptr;
OP_REQUIRES_OK(ctx, ctx->input("f", &f_out));
const Tensor* o_out = nullptr;
OP_REQUIRES_OK(ctx, ctx->input("o", &o_out));
const Tensor* ci_out = nullptr;
OP_REQUIRES_OK(ctx, ctx->input("ci", &ci_out));
const Tensor* co_out = nullptr;
OP_REQUIRES_OK(ctx, ctx->input("co", &co_out));
const Tensor* h_out = nullptr;
OP_REQUIRES_OK(ctx, ctx->input("h", &h_out));
const Tensor* cs_grad = nullptr;
OP_REQUIRES_OK(ctx, ctx->input("cs_grad", &cs_grad));
const Tensor* h_grad = nullptr;
OP_REQUIRES_OK(ctx, ctx->input("h_grad", &h_grad));
TensorShape batch_input_shape({timelen, batch_size, input_size});
Tensor* x_grad;
OP_REQUIRES_OK(ctx,
ctx->allocate_output("x_grad", batch_input_shape, &x_grad));
Tensor* cs_prev_grad_tensor = nullptr;
OP_REQUIRES_OK(ctx,
ctx->allocate_output("cs_prev_grad", cs_prev_tensor->shape(),
&cs_prev_grad_tensor));
Tensor* h_prev_grad_tensor = nullptr;
OP_REQUIRES_OK(ctx,
ctx->allocate_output("h_prev_grad", h_prev_tensor->shape(),
&h_prev_grad_tensor));
Tensor* w_grad_tensor = nullptr;
OP_REQUIRES_OK(
ctx, ctx->allocate_output("w_grad", w_tensor->shape(), &w_grad_tensor));
Tensor* wci_grad_tensor = nullptr;
OP_REQUIRES_OK(ctx, ctx->allocate_output("wci_grad", wci_tensor->shape(),
&wci_grad_tensor));
Tensor* wcf_grad_tensor = nullptr;
OP_REQUIRES_OK(ctx, ctx->allocate_output("wcf_grad", wcf_tensor->shape(),
&wcf_grad_tensor));
Tensor* wco_grad_tensor = nullptr;
OP_REQUIRES_OK(ctx, ctx->allocate_output("wco_grad", wco_tensor->shape(),
&wco_grad_tensor));
Tensor* b_grad_tensor = nullptr;
OP_REQUIRES_OK(
ctx, ctx->allocate_output("b_grad", b_tensor->shape(), &b_grad_tensor));
TensorShape batch_cell_shape({batch_size, cell_size});
Tensor xh_tensor;
OP_REQUIRES_OK(ctx, ctx->allocate_temp(
DataTypeToEnum<T>::v(),
TensorShape({batch_size, input_size + cell_size}),
&xh_tensor));
Tensor xh_grad_tensor;
OP_REQUIRES_OK(ctx, ctx->allocate_temp(DataTypeToEnum<T>::v(),
xh_tensor.shape(), &xh_grad_tensor));
Tensor do_tensor;
OP_REQUIRES_OK(ctx, ctx->allocate_temp(DataTypeToEnum<T>::v(),
batch_cell_shape, &do_tensor));
Tensor dcs_tensor;
OP_REQUIRES_OK(ctx, ctx->allocate_temp(DataTypeToEnum<T>::v(),
batch_cell_shape, &dcs_tensor));
Tensor dci_tensor;
OP_REQUIRES_OK(ctx, ctx->allocate_temp(DataTypeToEnum<T>::v(),
batch_cell_shape, &dci_tensor));
Tensor df_tensor;
OP_REQUIRES_OK(ctx, ctx->allocate_temp(DataTypeToEnum<T>::v(),
batch_cell_shape, &df_tensor));
Tensor di_tensor;
OP_REQUIRES_OK(ctx, ctx->allocate_temp(DataTypeToEnum<T>::v(),
batch_cell_shape, &di_tensor));
Tensor dgates_tensor;
OP_REQUIRES_OK(ctx,
ctx->allocate_temp(DataTypeToEnum<T>::v(),
TensorShape({batch_size, cell_size * 4}),
&dgates_tensor));
Tensor cs_grad_tensor;
OP_REQUIRES_OK(ctx, ctx->allocate_temp(DataTypeToEnum<T>::v(),
batch_cell_shape, &cs_grad_tensor));
Tensor h_grad_tensor;
OP_REQUIRES_OK(ctx, ctx->allocate_temp(DataTypeToEnum<T>::v(),
batch_cell_shape, &h_grad_tensor));
const Device& device = ctx->eigen_device<Device>();
functor::TensorZero<Device, T>()(device, cs_grad_tensor.flat<T>());
functor::TensorZero<Device, T>()(device, cs_prev_grad_tensor->flat<T>());
functor::TensorZero<Device, T>()(device, h_grad_tensor.flat<T>());
functor::TensorZero<Device, T>()(device, h_prev_grad_tensor->flat<T>());
functor::TensorZero<Device, T>()(device, w_grad_tensor->flat<T>());
functor::TensorZero<Device, T>()(device, wci_grad_tensor->flat<T>());
functor::TensorZero<Device, T>()(device, wcf_grad_tensor->flat<T>());
functor::TensorZero<Device, T>()(device, wco_grad_tensor->flat<T>());
functor::TensorZero<Device, T>()(device, b_grad_tensor->flat<T>());
const int64_t seq_len_max = seq_len_max_tensor->scalar<int64_t>()();
SliceHelper<Device, T> slicer(ctx);
for (int64_t t = seq_len_max - 1; t >= 0; --t) {
const Tensor& x_tensor = slicer.InputSlice(*x, t, "x");
const Tensor& cs_prev_tensor2 =
t == 0 ? *cs_prev_tensor
: slicer.InputSlice(*cs_out, t - 1, "cs_prev");
const Tensor& h_prev_tensor2 =
t == 0 ? *h_prev_tensor : slicer.InputSlice(*h_out, t - 1, "h_prev");
const Tensor& i_tensor = slicer.InputSlice(*i_out, t, "i_out");
const Tensor& cs_tensor = slicer.InputSlice(*cs_out, t, "cs_out");
const Tensor& f_tensor = slicer.InputSlice(*f_out, t, "f_out");
const Tensor& o_tensor = slicer.InputSlice(*o_out, t, "o_out");
const Tensor& ci_tensor = slicer.InputSlice(*ci_out, t, "ci_out");
const Tensor& co_tensor = slicer.InputSlice(*co_out, t, "co_out");
// Grab previous CS grad.
const Tensor& const_cs_prev_grad_tensor = *cs_prev_grad_tensor;
const Tensor const_cs_grad_slice =
slicer.InputSlice(*cs_grad, t, "cs_grad");
functor::TensorAdd<Device, T>()(
device, const_cs_prev_grad_tensor.flat<T>(),
const_cs_grad_slice.flat<T>(), cs_grad_tensor.flat<T>());
// Combine previous h grad and h grad coming on top.
const Tensor& const_h_prev_grad_tensor = *h_prev_grad_tensor;
const Tensor const_h_grad_slice = slicer.InputSlice(*h_grad, t, "h_grad");
functor::TensorAdd<Device, T>()(
device, const_h_prev_grad_tensor.flat<T>(),
const_h_grad_slice.flat<T>(), h_grad_tensor.flat<T>());
const Tensor& const_cs_grad_tensor = cs_grad_tensor;
const Tensor& const_h_grad_tensor = h_grad_tensor;
Tensor x_grad_tensor = slicer.OutputSlice(x_grad, t, "x_grad");
functor::BlockLSTMBprop<Device, T, USE_CUBLAS, gate_layout>(
batch_size, input_size, cell_size)(
ctx, device, use_peephole_, x_tensor.matrix<T>(),
cs_prev_tensor2.matrix<T>(), h_prev_tensor2.matrix<T>(),
w_tensor->matrix<T>(), wci_tensor->vec<T>(), wcf_tensor->vec<T>(),
wco_tensor->vec<T>(), b_tensor->vec<T>(), xh_tensor.matrix<T>(),
i_tensor.matrix<T>(), cs_tensor.matrix<T>(), f_tensor.matrix<T>(),
o_tensor.matrix<T>(), ci_tensor.matrix<T>(), co_tensor.matrix<T>(),
const_cs_grad_tensor.matrix<T>(), const_h_grad_tensor.matrix<T>(),
do_tensor.matrix<T>(), dcs_tensor.matrix<T>(), dci_tensor.matrix<T>(),
df_tensor.matrix<T>(), di_tensor.matrix<T>(),
dgates_tensor.matrix<T>(), cs_prev_grad_tensor->matrix<T>(),
h_prev_grad_tensor->matrix<T>(), xh_grad_tensor.matrix<T>(),
x_grad_tensor.matrix<T>(), w_grad_tensor->matrix<T>(),
wci_grad_tensor->vec<T>(), wcf_grad_tensor->vec<T>(),
wco_grad_tensor->vec<T>(), b_grad_tensor->vec<T>());
slicer.FinishTimeStep();
}
if (seq_len_max < timelen) {
Tensor x_grad_tensor = x_grad->Slice(seq_len_max, timelen);
functor::TensorUnalignedZero<Device, T>()(
device, x_grad_tensor.unaligned_flat<T>());
}
}
| 0
|
453,042
|
static void nft_flow_rule_offload_abort(struct net *net,
struct nft_trans *trans)
{
struct nftables_pernet *nft_net = nft_pernet(net);
int err = 0;
list_for_each_entry_continue_reverse(trans, &nft_net->commit_list, list) {
if (trans->ctx.family != NFPROTO_NETDEV)
continue;
switch (trans->msg_type) {
case NFT_MSG_NEWCHAIN:
if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD) ||
nft_trans_chain_update(trans))
continue;
err = nft_flow_offload_chain(trans->ctx.chain, NULL,
FLOW_BLOCK_UNBIND);
break;
case NFT_MSG_DELCHAIN:
if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
continue;
err = nft_flow_offload_chain(trans->ctx.chain, NULL,
FLOW_BLOCK_BIND);
break;
case NFT_MSG_NEWRULE:
if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
continue;
err = nft_flow_offload_rule(trans->ctx.chain,
nft_trans_rule(trans),
NULL, FLOW_CLS_DESTROY);
break;
case NFT_MSG_DELRULE:
if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
continue;
err = nft_flow_offload_rule(trans->ctx.chain,
nft_trans_rule(trans),
nft_trans_flow_rule(trans),
FLOW_CLS_REPLACE);
break;
}
if (WARN_ON_ONCE(err))
break;
}
}
| 0
|
509,509
|
void ha_maria::start_bulk_insert(ha_rows rows, uint flags)
{
DBUG_ENTER("ha_maria::start_bulk_insert");
THD *thd= table->in_use;
MARIA_SHARE *share= file->s;
bool index_disabled= 0;
DBUG_PRINT("info", ("start_bulk_insert: rows %lu", (ulong) rows));
/* don't enable row cache if too few rows */
if ((!rows || rows > MARIA_MIN_ROWS_TO_USE_WRITE_CACHE) && !has_long_unique())
{
ulonglong size= thd->variables.read_buff_size, tmp;
if (rows)
{
if (file->state->records)
{
MARIA_INFO maria_info;
maria_status(file, &maria_info, HA_STATUS_NO_LOCK |HA_STATUS_VARIABLE);
set_if_smaller(size, maria_info.mean_reclength * rows);
}
else if (table->s->avg_row_length)
set_if_smaller(size, (size_t) (table->s->avg_row_length * rows));
}
tmp= (ulong) size; // Safe becasue of limits
maria_extra(file, HA_EXTRA_WRITE_CACHE, (void*) &tmp);
}
can_enable_indexes= (maria_is_all_keys_active(share->state.key_map,
share->base.keys));
bulk_insert_single_undo= BULK_INSERT_NONE;
if (!(specialflag & SPECIAL_SAFE_MODE))
{
/*
Only disable old index if the table was empty and we are inserting
a lot of rows.
We should not do this for only a few rows as this is slower and
we don't want to update the key statistics based of only a few rows.
Index file rebuild requires an exclusive lock, so if versioning is on
don't do it (see how ha_maria::store_lock() tries to predict repair).
We can repair index only if we have an exclusive (TL_WRITE) lock or
if this is inside an ALTER TABLE, in which case lock_type == TL_UNLOCK.
To see if table is empty, we shouldn't rely on the old record
count from our transaction's start (if that old count is 0 but
now there are records in the table, we would wrongly destroy
them). So we need to look at share->state.state.records. As a
safety net for now, we don't remove the test of
file->state->records, because there is uncertainty on what will
happen during repair if the two states disagree.
We also have to check in case of transactional tables that the
user has not used LOCK TABLE on the table twice.
*/
if ((file->state->records == 0) &&
(share->state.state.records == 0) && can_enable_indexes &&
(!rows || rows >= MARIA_MIN_ROWS_TO_DISABLE_INDEXES) &&
(file->lock.type == TL_WRITE || file->lock.type == TL_UNLOCK) &&
(!share->have_versioning || !share->now_transactional ||
file->used_tables->use_count == 1))
{
/**
@todo for a single-row INSERT SELECT, we will go into repair, which
is more costly (flushes, syncs) than a row write.
*/
if (file->open_flags & HA_OPEN_INTERNAL_TABLE)
{
/* Internal table; If we get a duplicate something is very wrong */
file->update|= HA_STATE_CHANGED;
index_disabled= share->base.keys > 0;
maria_clear_all_keys_active(file->s->state.key_map);
}
else
{
my_bool all_keys= MY_TEST(flags & HA_CREATE_UNIQUE_INDEX_BY_SORT);
/*
Deactivate all indexes that can be recreated fast.
These include packed keys on which sorting will use more temporary
space than the max allowed file length or for which the unpacked keys
will take much more space than packed keys.
Note that 'rows' may be zero for the case when we don't know how many
rows we will put into the file.
*/
MARIA_SHARE *share= file->s;
MARIA_KEYDEF *key=share->keyinfo;
uint i;
DBUG_ASSERT(share->state.state.records == 0 &&
(!rows || rows >= MARIA_MIN_ROWS_TO_DISABLE_INDEXES));
for (i=0 ; i < share->base.keys ; i++,key++)
{
if (!(key->flag & (HA_SPATIAL | HA_AUTO_KEY | HA_RTREE_INDEX)) &&
! maria_too_big_key_for_sort(key,rows) && share->base.auto_key != i+1 &&
(all_keys || !(key->flag & HA_NOSAME)) &&
table->key_info[i].algorithm != HA_KEY_ALG_LONG_HASH)
{
maria_clear_key_active(share->state.key_map, i);
index_disabled= 1;
file->update|= HA_STATE_CHANGED;
file->create_unique_index_by_sort= all_keys;
}
}
}
if (share->now_transactional)
{
bulk_insert_single_undo= BULK_INSERT_SINGLE_UNDO_AND_NO_REPAIR;
write_log_record_for_bulk_insert(file);
_ma_tmp_disable_logging_for_table(file, TRUE);
/*
Pages currently in the page cache have type PAGECACHE_LSN_PAGE, we
are not allowed to overwrite them with PAGECACHE_PLAIN_PAGE, so
throw them away. It is not losing data, because we just wrote and
forced an UNDO which will for sure empty the table if we crash. The
upcoming unique-key insertions however need a proper index, so we
cannot leave the corrupted on-disk index file, thus we truncate it.
*/
maria_delete_all_rows(file);
}
}
else if (!file->bulk_insert &&
(!rows || rows >= MARIA_MIN_ROWS_TO_USE_BULK_INSERT))
{
maria_init_bulk_insert(file,
(size_t) thd->variables.bulk_insert_buff_size,
rows);
}
}
can_enable_indexes= index_disabled;
DBUG_VOID_RETURN;
}
| 0
|
379,659
|
R_API void r_anal_extract_rarg(RAnal *anal, RAnalOp *op, RAnalFunction *fcn, int *reg_set, int *count) {
int i, argc = 0;
r_return_if_fail (anal && op && fcn);
const char *opsreg = op->src[0] ? get_regname (anal, op->src[0]) : NULL;
const char *opdreg = op->dst ? get_regname (anal, op->dst) : NULL;
const int size = (fcn->bits ? fcn->bits : anal->bits) / 8;
if (!fcn->cc) {
R_LOG_DEBUG ("No calling convention for function '%s' to extract register arguments\n", fcn->name);
return;
}
char *fname = r_type_func_guess (anal->sdb_types, fcn->name);
Sdb *TDB = anal->sdb_types;
int max_count = r_anal_cc_max_arg (anal, fcn->cc);
if (!max_count || (*count >= max_count)) {
free (fname);
return;
}
if (fname) {
argc = r_type_func_args_count (TDB, fname);
}
bool is_call = (op->type & 0xf) == R_ANAL_OP_TYPE_CALL || (op->type & 0xf) == R_ANAL_OP_TYPE_UCALL;
if (is_call && *count < max_count) {
RList *callee_rargs_l = NULL;
int callee_rargs = 0;
char *callee = NULL;
ut64 offset = op->jump == UT64_MAX ? op->ptr : op->jump;
RAnalFunction *f = r_anal_get_function_at (anal, offset);
if (!f) {
RCore *core = (RCore *)anal->coreb.core;
RFlagItem *flag = r_flag_get_by_spaces (core->flags, offset, R_FLAGS_FS_IMPORTS, NULL);
if (flag) {
callee = r_type_func_guess (TDB, flag->name);
if (callee) {
const char *cc = r_anal_cc_func (anal, callee);
if (cc && !strcmp (fcn->cc, cc)) {
callee_rargs = R_MIN (max_count, r_type_func_args_count (TDB, callee));
}
}
}
} else if (!f->is_variadic && !strcmp (fcn->cc, f->cc)) {
callee = r_type_func_guess (TDB, f->name);
if (callee) {
callee_rargs = R_MIN (max_count, r_type_func_args_count (TDB, callee));
}
callee_rargs = callee_rargs
? callee_rargs
: r_anal_var_count (anal, f, R_ANAL_VAR_KIND_REG, 1);
callee_rargs_l = r_anal_var_list (anal, f, R_ANAL_VAR_KIND_REG);
}
int i;
for (i = 0; i < callee_rargs; i++) {
if (reg_set[i]) {
continue;
}
const char *vname = NULL;
char *type = NULL;
char *name = NULL;
int delta = 0;
const char *regname = r_anal_cc_arg (anal, fcn->cc, i);
RRegItem *ri = r_reg_get (anal->reg, regname, -1);
if (ri) {
delta = ri->index;
}
if (fname) {
type = r_type_func_args_type (TDB, fname, i);
vname = r_type_func_args_name (TDB, fname, i);
}
if (!vname && callee) {
type = r_type_func_args_type (TDB, callee, i);
vname = r_type_func_args_name (TDB, callee, i);
}
if (vname) {
reg_set[i] = 1;
} else {
RListIter *it;
RAnalVar *arg, *found_arg = NULL;
r_list_foreach (callee_rargs_l, it, arg) {
if (r_anal_var_get_argnum (arg) == i) {
found_arg = arg;
break;
}
}
if (found_arg) {
type = strdup (found_arg->type);
vname = name = strdup (found_arg->name);
}
}
if (!vname) {
name = r_str_newf ("arg%u", (int)i + 1);
vname = name;
}
r_anal_function_set_var (fcn, delta, R_ANAL_VAR_KIND_REG, type, size, true, vname);
(*count)++;
free (name);
free (type);
}
free (callee);
r_list_free (callee_rargs_l);
free (fname);
return;
}
for (i = 0; i < max_count; i++) {
const char *regname = r_anal_cc_arg (anal, fcn->cc, i);
if (regname) {
int delta = 0;
RRegItem *ri = NULL;
RAnalVar *var = NULL;
bool is_used_like_an_arg = is_used_like_arg (regname, opsreg, opdreg, op, anal);
if (reg_set[i] != 2 && is_used_like_an_arg) {
ri = r_reg_get (anal->reg, regname, -1);
if (ri) {
delta = ri->index;
}
}
if (reg_set[i] == 1 && is_used_like_an_arg) {
var = r_anal_function_get_var (fcn, R_ANAL_VAR_KIND_REG, delta);
} else if (reg_set[i] != 2 && is_used_like_an_arg) {
const char *vname = NULL;
char *type = NULL;
char *name = NULL;
if ((i < argc) && fname) {
type = r_type_func_args_type (TDB, fname, i);
vname = r_type_func_args_name (TDB, fname, i);
}
if (!vname) {
name = r_str_newf ("arg%d", i + 1);
vname = name;
}
var = r_anal_function_set_var (fcn, delta, R_ANAL_VAR_KIND_REG, type, size, true, vname);
free (name);
free (type);
(*count)++;
} else {
if (is_reg_in_src (regname, anal, op) || STR_EQUAL (opdreg, regname)) {
reg_set[i] = 2;
}
continue;
}
if (is_reg_in_src (regname, anal, op) || STR_EQUAL (regname, opdreg)) {
reg_set[i] = 1;
}
if (var) {
r_anal_var_set_access (var, var->regname, op->addr, R_ANAL_VAR_ACCESS_TYPE_READ, 0);
r_meta_set_string (anal, R_META_TYPE_VARTYPE, op->addr, var->name);
}
}
}
const char *selfreg = r_anal_cc_self (anal, fcn->cc);
if (selfreg) {
bool is_used_like_an_arg = is_used_like_arg (selfreg, opsreg, opdreg, op, anal);
if (reg_set[i] != 2 && is_used_like_an_arg) {
int delta = 0;
char *vname = strdup ("self");
RRegItem *ri = r_reg_get (anal->reg, selfreg, -1);
if (ri) {
delta = ri->index;
}
RAnalVar *newvar = r_anal_function_set_var (fcn, delta, R_ANAL_VAR_KIND_REG, 0, size, true, vname);
if (newvar) {
r_anal_var_set_access (newvar, newvar->regname, op->addr, R_ANAL_VAR_ACCESS_TYPE_READ, 0);
}
r_meta_set_string (anal, R_META_TYPE_VARTYPE, op->addr, vname);
free (vname);
(*count)++;
} else {
if (is_reg_in_src (selfreg, anal, op) || STR_EQUAL (opdreg, selfreg)) {
reg_set[i] = 2;
}
}
i++;
}
const char *errorreg = r_anal_cc_error (anal, fcn->cc);
if (errorreg) {
if (reg_set[i] == 0 && STR_EQUAL (opdreg, errorreg)) {
int delta = 0;
char *vname = strdup ("error");
RRegItem *ri = r_reg_get (anal->reg, errorreg, -1);
if (ri) {
delta = ri->index;
}
RAnalVar *newvar = r_anal_function_set_var (fcn, delta, R_ANAL_VAR_KIND_REG, 0, size, true, vname);
if (newvar) {
r_anal_var_set_access (newvar, newvar->regname, op->addr, R_ANAL_VAR_ACCESS_TYPE_READ, 0);
}
r_meta_set_string (anal, R_META_TYPE_VARTYPE, op->addr, vname);
free (vname);
(*count)++;
reg_set[i] = 2;
}
}
free (fname);
}
| 0
|
512,520
|
TYPELIB *get_typelib() const { return NULL; }
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.