idx
int64 | func
string | target
int64 |
|---|---|---|
488,392
|
static __init int vdso_fixup_features(struct lib32_elfinfo *v32,
struct lib64_elfinfo *v64)
{
void *start32;
unsigned long size32;
#ifdef CONFIG_PPC64
void *start64;
unsigned long size64;
start64 = find_section64(v64->hdr, "__ftr_fixup", &size64);
if (start64)
do_feature_fixups(cur_cpu_spec->cpu_features,
start64, start64 + size64);
start64 = find_section64(v64->hdr, "__fw_ftr_fixup", &size64);
if (start64)
do_feature_fixups(powerpc_firmware_features,
start64, start64 + size64);
#endif /* CONFIG_PPC64 */
start32 = find_section32(v32->hdr, "__ftr_fixup", &size32);
if (start32)
do_feature_fixups(cur_cpu_spec->cpu_features,
start32, start32 + size32);
#ifdef CONFIG_PPC64
start32 = find_section32(v32->hdr, "__fw_ftr_fixup", &size32);
if (start32)
do_feature_fixups(powerpc_firmware_features,
start32, start32 + size32);
#endif /* CONFIG_PPC64 */
return 0;
}
| 0
|
224,862
|
void Compute(OpKernelContext *ctx) override {
const Tensor *indices_t, *values_t, *shape_t, *dense_t;
OP_REQUIRES_OK(ctx, ctx->input("sp_indices", &indices_t));
OP_REQUIRES_OK(ctx, ctx->input("sp_values", &values_t));
OP_REQUIRES_OK(ctx, ctx->input("sp_shape", &shape_t));
OP_REQUIRES_OK(ctx, ctx->input("dense", &dense_t));
// Validations.
OP_REQUIRES(ctx, TensorShapeUtils::IsMatrix(indices_t->shape()),
errors::InvalidArgument(
"Input sp_indices should be a matrix but received shape: ",
indices_t->shape().DebugString()));
OP_REQUIRES(ctx,
TensorShapeUtils::IsVector(values_t->shape()) &&
TensorShapeUtils::IsVector(shape_t->shape()),
errors::InvalidArgument(
"Inputs sp_values and sp_shape should be vectors "
"but received shapes: ",
values_t->shape().DebugString(), " and ",
shape_t->shape().DebugString()));
OP_REQUIRES(
ctx, TensorShapeUtils::IsVector(shape_t->shape()),
errors::InvalidArgument("Input sp_shape must be a vector. Got: ",
shape_t->shape().DebugString()));
OP_REQUIRES(
ctx, values_t->dim_size(0) == indices_t->dim_size(0),
errors::InvalidArgument(
"The first dimension of values and indices should match. (",
values_t->dim_size(0), " vs. ", indices_t->dim_size(0), ")"));
OP_REQUIRES(
ctx, shape_t->shape().dim_size(0) == indices_t->shape().dim_size(1),
errors::InvalidArgument(
"Number of dimensions must match second dimension of indices. ",
"Got ", shape_t->shape().dim_size(0),
" dimensions, indices shape: ", indices_t->shape().DebugString()));
OP_REQUIRES(ctx, shape_t->NumElements() > 0,
errors::InvalidArgument(
"The shape argument requires at least one element."));
const auto indices_mat = indices_t->matrix<int64_t>();
const auto shape_vec = shape_t->vec<int64_t>();
TensorShape lhs_shape;
OP_REQUIRES_OK(ctx, TensorShape::BuildTensorShape(shape_vec, &lhs_shape));
const auto lhs_dims = BCast::FromShape(lhs_shape);
const auto rhs_dims = BCast::FromShape(dense_t->shape());
BCast b(lhs_dims, rhs_dims, false); // false for keeping the same num dims.
// True iff (size(lhs) >= size(rhs)) and all dims in lhs is greater or equal
// to dims in rhs (from right to left).
auto VecGreaterEq = [](ArraySlice<int64_t> lhs, ArraySlice<int64_t> rhs) {
if (lhs.size() < rhs.size()) return false;
for (size_t i = 0; i < rhs.size(); ++i) {
if (lhs[lhs.size() - 1 - i] < rhs[rhs.size() - 1 - i]) return false;
}
return true;
};
OP_REQUIRES(ctx, VecGreaterEq(lhs_dims, rhs_dims) && b.IsValid(),
errors::InvalidArgument(
"SparseDenseBinaryOpShared broadcasts dense to sparse "
"only; got incompatible shapes: [",
absl::StrJoin(lhs_dims, ","), "] vs. [",
absl::StrJoin(rhs_dims, ","), "]"));
Tensor *output_values = nullptr;
Tensor dense_gathered;
const int64_t nnz = indices_t->dim_size(0);
OP_REQUIRES_OK(ctx,
ctx->allocate_output(0, TensorShape({nnz}), &output_values));
OP_REQUIRES_OK(
ctx, ctx->allocate_temp(DataTypeToEnum<T>::value, TensorShape({nnz}),
&dense_gathered));
bool op_is_div = false;
if (absl::StrContains(ctx->op_kernel().type_string_view(), "Div")) {
op_is_div = true;
}
// Pulls relevant entries from the dense side, with reshape and broadcasting
// *of the dense side* taken into account. Use a TensorRef to avoid blowing
// up memory.
//
// We can directly use the sparse indices to look up dense side, because
// "b.y_reshape()" and "b.y_bcast()" are guaranteed to have rank "ndims".
auto dense_gathered_flat = dense_gathered.flat<T>();
const int ndims = lhs_dims.size();
switch (ndims) {
#define CASE(NDIM) \
case NDIM: { \
TensorRef<Eigen::Tensor<const T, NDIM, Eigen::RowMajor>> rhs_ref = \
dense_t->shaped<T, NDIM>(b.y_reshape()) \
.broadcast(BCast::ToIndexArray<NDIM>(b.y_bcast())); \
Eigen::array<Eigen::DenseIndex, NDIM> idx; \
bool indices_valid = true; \
for (int i = 0; i < nnz; ++i) { \
for (int d = 0; d < NDIM; ++d) { \
idx[d] = internal::SubtleMustCopy(indices_mat(i, d)); \
if (!FastBoundsCheck(idx[d], rhs_ref.dimension(d))) { \
indices_valid = false; \
} \
} \
OP_REQUIRES( \
ctx, indices_valid, \
errors::InvalidArgument("Provided indices are out-of-bounds w.r.t. " \
"dense side with broadcasted shape")); \
dense_gathered_flat(i) = rhs_ref.coeff(idx); \
if (op_is_div) { \
OP_REQUIRES(ctx, dense_gathered_flat(i) != 0, \
errors::InvalidArgument( \
"SparseDenseCwiseDiv cannot divide by zero," \
"but input dense tensor contains zero ")); \
} \
} \
break; \
}
CASE(1);
CASE(2);
CASE(3);
CASE(4);
CASE(5);
default:
OP_REQUIRES(
ctx, false,
errors::InvalidArgument("Only tensors with ranks between 1 and 5 "
"are currently supported. Tensor rank: ",
ndims));
#undef CASE
}
output_values->flat<T>().device(ctx->eigen_device<Device>()) =
values_t->flat<T>().binaryExpr(dense_gathered_flat,
typename Functor::func());
}
| 0
|
484,797
|
static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct netfront_info *np = netdev_priv(dev);
struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
struct xen_netif_tx_request *first_tx;
unsigned int i;
int notify;
int slots;
struct page *page;
unsigned int offset;
unsigned int len;
unsigned long flags;
struct netfront_queue *queue = NULL;
struct xennet_gnttab_make_txreq info = { };
unsigned int num_queues = dev->real_num_tx_queues;
u16 queue_index;
struct sk_buff *nskb;
/* Drop the packet if no queues are set up */
if (num_queues < 1)
goto drop;
if (unlikely(np->broken))
goto drop;
/* Determine which queue to transmit this SKB on */
queue_index = skb_get_queue_mapping(skb);
queue = &np->queues[queue_index];
/* If skb->len is too big for wire format, drop skb and alert
* user about misconfiguration.
*/
if (unlikely(skb->len > XEN_NETIF_MAX_TX_SIZE)) {
net_alert_ratelimited(
"xennet: skb->len = %u, too big for wire format\n",
skb->len);
goto drop;
}
slots = xennet_count_skb_slots(skb);
if (unlikely(slots > MAX_XEN_SKB_FRAGS + 1)) {
net_dbg_ratelimited("xennet: skb rides the rocket: %d slots, %d bytes\n",
slots, skb->len);
if (skb_linearize(skb))
goto drop;
}
page = virt_to_page(skb->data);
offset = offset_in_page(skb->data);
/* The first req should be at least ETH_HLEN size or the packet will be
* dropped by netback.
*
* If the backend is not trusted bounce all data to zeroed pages to
* avoid exposing contiguous data on the granted page not belonging to
* the skb.
*/
if (np->bounce || unlikely(PAGE_SIZE - offset < ETH_HLEN)) {
nskb = bounce_skb(skb);
if (!nskb)
goto drop;
dev_consume_skb_any(skb);
skb = nskb;
page = virt_to_page(skb->data);
offset = offset_in_page(skb->data);
}
len = skb_headlen(skb);
spin_lock_irqsave(&queue->tx_lock, flags);
if (unlikely(!netif_carrier_ok(dev) ||
(slots > 1 && !xennet_can_sg(dev)) ||
netif_needs_gso(skb, netif_skb_features(skb)))) {
spin_unlock_irqrestore(&queue->tx_lock, flags);
goto drop;
}
/* First request for the linear area. */
info.queue = queue;
info.skb = skb;
info.page = page;
first_tx = xennet_make_first_txreq(&info, offset, len);
offset += info.tx_local.size;
if (offset == PAGE_SIZE) {
page++;
offset = 0;
}
len -= info.tx_local.size;
if (skb->ip_summed == CHECKSUM_PARTIAL)
/* local packet? */
first_tx->flags |= XEN_NETTXF_csum_blank |
XEN_NETTXF_data_validated;
else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
/* remote but checksummed. */
first_tx->flags |= XEN_NETTXF_data_validated;
/* Optional extra info after the first request. */
if (skb_shinfo(skb)->gso_size) {
struct xen_netif_extra_info *gso;
gso = (struct xen_netif_extra_info *)
RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
first_tx->flags |= XEN_NETTXF_extra_info;
gso->u.gso.size = skb_shinfo(skb)->gso_size;
gso->u.gso.type = (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) ?
XEN_NETIF_GSO_TYPE_TCPV6 :
XEN_NETIF_GSO_TYPE_TCPV4;
gso->u.gso.pad = 0;
gso->u.gso.features = 0;
gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
gso->flags = 0;
}
/* Requests for the rest of the linear area. */
xennet_make_txreqs(&info, page, offset, len);
/* Requests for all the frags. */
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
xennet_make_txreqs(&info, skb_frag_page(frag),
skb_frag_off(frag),
skb_frag_size(frag));
}
/* First request has the packet length. */
first_tx->size = skb->len;
/* timestamp packet in software */
skb_tx_timestamp(skb);
xennet_mark_tx_pending(queue);
RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify);
if (notify)
notify_remote_via_irq(queue->tx_irq);
u64_stats_update_begin(&tx_stats->syncp);
tx_stats->bytes += skb->len;
tx_stats->packets++;
u64_stats_update_end(&tx_stats->syncp);
/* Note: It is not safe to access skb after xennet_tx_buf_gc()! */
xennet_tx_buf_gc(queue);
if (!netfront_tx_slot_available(queue))
netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
spin_unlock_irqrestore(&queue->tx_lock, flags);
return NETDEV_TX_OK;
drop:
dev->stats.tx_dropped++;
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
| 0
|
434,102
|
get_arglist(garray_T *gap, char_u *str, int escaped)
{
ga_init2(gap, (int)sizeof(char_u *), 20);
while (*str != NUL)
{
if (ga_grow(gap, 1) == FAIL)
{
ga_clear(gap);
return FAIL;
}
((char_u **)gap->ga_data)[gap->ga_len++] = str;
// If str is escaped, don't handle backslashes or spaces
if (!escaped)
return OK;
// Isolate one argument, change it in-place, put a NUL after it.
str = do_one_arg(str);
}
return OK;
}
| 0
|
309,951
|
mypair(int fg, int bg)
{
int result;
#if HAVE_ALLOC_PAIR
if (x_option) {
result = alloc_pair(fg, bg);
} else
#endif
{
int pair = (fg * COLORS) + bg;
result = (pair >= COLOR_PAIRS) ? -1 : pair;
}
return result;
}
| 0
|
344,252
|
static int forprep (lua_State *L, StkId ra) {
TValue *pinit = s2v(ra);
TValue *plimit = s2v(ra + 1);
TValue *pstep = s2v(ra + 2);
if (ttisinteger(pinit) && ttisinteger(pstep)) { /* integer loop? */
lua_Integer init = ivalue(pinit);
lua_Integer step = ivalue(pstep);
lua_Integer limit;
if (step == 0)
luaG_runerror(L, "'for' step is zero");
setivalue(s2v(ra + 3), init); /* control variable */
if (forlimit(L, init, plimit, &limit, step))
return 1; /* skip the loop */
else { /* prepare loop counter */
lua_Unsigned count;
if (step > 0) { /* ascending loop? */
count = l_castS2U(limit) - l_castS2U(init);
if (step != 1) /* avoid division in the too common case */
count /= l_castS2U(step);
}
else { /* step < 0; descending loop */
count = l_castS2U(init) - l_castS2U(limit);
/* 'step+1' avoids negating 'mininteger' */
count /= l_castS2U(-(step + 1)) + 1u;
}
/* store the counter in place of the limit (which won't be
needed anymore) */
setivalue(plimit, l_castU2S(count));
}
}
else { /* try making all values floats */
lua_Number init; lua_Number limit; lua_Number step;
if (l_unlikely(!tonumber(plimit, &limit)))
luaG_forerror(L, plimit, "limit");
if (l_unlikely(!tonumber(pstep, &step)))
luaG_forerror(L, pstep, "step");
if (l_unlikely(!tonumber(pinit, &init)))
luaG_forerror(L, pinit, "initial value");
if (step == 0)
luaG_runerror(L, "'for' step is zero");
if (luai_numlt(0, step) ? luai_numlt(limit, init)
: luai_numlt(init, limit))
return 1; /* skip the loop */
else {
/* make sure internal values are all floats */
setfltvalue(plimit, limit);
setfltvalue(pstep, step);
setfltvalue(s2v(ra), init); /* internal index */
setfltvalue(s2v(ra + 3), init); /* control variable */
}
}
return 0;
}
| 0
|
359,571
|
DEFUN (no_bgp_redistribute_ipv6_metric,
no_bgp_redistribute_ipv6_metric_cmd,
"no redistribute (connected|kernel|ospf6|ripng|static) metric <0-4294967295>",
NO_STR
"Redistribute information from another routing protocol\n"
"Connected\n"
"Kernel routes\n"
"Open Shurtest Path First (OSPFv3)\n"
"Routing Information Protocol (RIPng)\n"
"Static routes\n"
"Metric for redistributed routes\n"
"Default metric\n")
{
int type;
type = bgp_str2route_type (AFI_IP6, argv[0]);
if (! type)
{
vty_out (vty, "%% Invalid route type%s", VTY_NEWLINE);
return CMD_WARNING;
}
bgp_redistribute_metric_unset (vty->index, AFI_IP6, type);
return CMD_SUCCESS;
}
| 0
|
387,607
|
int snd_ctl_request_layer(const char *module_name)
{
struct snd_ctl_layer_ops *lops;
if (module_name == NULL)
return 0;
down_read(&snd_ctl_layer_rwsem);
for (lops = snd_ctl_layer; lops; lops = lops->next)
if (strcmp(lops->module_name, module_name) == 0)
break;
up_read(&snd_ctl_layer_rwsem);
if (lops)
return 0;
return request_module(module_name);
}
| 0
|
254,746
|
njs_typed_array_compare_u16(const void *a, const void *b, void *c)
{
return *((const uint16_t *) a) - *((const uint16_t *) b);
}
| 0
|
207,461
|
at_bitmap input_bmp_reader(gchar * filename, at_input_opts_type * opts, at_msg_func msg_func, gpointer msg_data, gpointer user_data)
{
FILE *fd;
unsigned char buffer[64];
int ColormapSize, rowbytes, Maps;
gboolean Grey = FALSE;
unsigned char ColorMap[256][3];
at_bitmap image = at_bitmap_init(0, 0, 0, 1);
unsigned char *image_storage;
at_exception_type exp = at_exception_new(msg_func, msg_data);
char magick[2];
Bitmap_Channel masks[4];
fd = fopen(filename, "rb");
if (!fd) {
LOG("Can't open \"%s\"\n", filename);
at_exception_fatal(&exp, "bmp: cannot open input file");
goto cleanup;
}
/* It is a File. Now is it a Bitmap? Read the shortest possible header. */
if (!ReadOK(fd, magick, 2) ||
!(!strncmp(magick, "BA", 2) ||
!strncmp(magick, "BM", 2) ||
!strncmp(magick, "IC", 2) ||
!strncmp(magick, "PT", 2) ||
!strncmp(magick, "CI", 2) ||
!strncmp(magick, "CP", 2)))
{
LOG("%s is not a valid BMP file", filename);
at_exception_fatal(&exp, "bmp: invalid input file");
goto cleanup;
}
while (!strncmp(magick, "BA", 2))
{
if (!ReadOK(fd, buffer, 12))
{
LOG("%s is not a valid BMP file", filename);
at_exception_fatal(&exp, "bmp: invalid input file");
goto cleanup;
}
if (!ReadOK(fd, magick, 2))
{
LOG("%s is not a valid BMP file", filename);
at_exception_fatal(&exp, "bmp: invalid input file");
goto cleanup;
}
}
if (!ReadOK(fd, buffer, 12))////
{
LOG("%s is not a valid BMP file", filename);
at_exception_fatal(&exp, "bmp: invalid input file");
goto cleanup;
}
/* bring them to the right byteorder. Not too nice, but it should work */
Bitmap_File_Head.bfSize = ToL(&buffer[0x00]);
Bitmap_File_Head.zzHotX = ToS(&buffer[0x04]);
Bitmap_File_Head.zzHotY = ToS(&buffer[0x06]);
Bitmap_File_Head.bfOffs = ToL(&buffer[0x08]);
if (!ReadOK(fd, buffer, 4))
{
LOG("%s is not a valid BMP file", filename);
at_exception_fatal(&exp, "bmp: invalid input file");
goto cleanup;
}
Bitmap_File_Head.biSize = ToL(&buffer[0x00]);
/* What kind of bitmap is it? */
if (Bitmap_File_Head.biSize == 12) { /* OS/2 1.x ? */
if (!ReadOK(fd, buffer, 8)) {
LOG("Error reading BMP file header\n");
at_exception_fatal(&exp, "Error reading BMP file header");
goto cleanup;
}
Bitmap_Head.biWidth = ToS(&buffer[0x00]); /* 12 */
Bitmap_Head.biHeight = ToS(&buffer[0x02]); /* 14 */
Bitmap_Head.biPlanes = ToS(&buffer[0x04]); /* 16 */
Bitmap_Head.biBitCnt = ToS(&buffer[0x06]); /* 18 */
Bitmap_Head.biCompr = 0;
Bitmap_Head.biSizeIm = 0;
Bitmap_Head.biXPels = Bitmap_Head.biYPels = 0;
Bitmap_Head.biClrUsed = 0;
Bitmap_Head.biClrImp = 0;
Bitmap_Head.masks[0] = 0;
Bitmap_Head.masks[1] = 0;
Bitmap_Head.masks[2] = 0;
Bitmap_Head.masks[3] = 0;
memset(masks, 0, sizeof(masks));
Maps = 3;
} else if (Bitmap_File_Head.biSize == 40) { /* Windows 3.x */
if (!ReadOK(fd, buffer, 36))
{
LOG ("Error reading BMP file header\n");
at_exception_fatal(&exp, "Error reading BMP file header");
goto cleanup;
}
Bitmap_Head.biWidth = ToL(&buffer[0x00]); /* 12 */
Bitmap_Head.biHeight = ToL(&buffer[0x04]); /* 16 */
Bitmap_Head.biPlanes = ToS(&buffer[0x08]); /* 1A */
Bitmap_Head.biBitCnt = ToS(&buffer[0x0A]); /* 1C */
Bitmap_Head.biCompr = ToL(&buffer[0x0C]); /* 1E */
Bitmap_Head.biSizeIm = ToL(&buffer[0x10]); /* 22 */
Bitmap_Head.biXPels = ToL(&buffer[0x14]); /* 26 */
Bitmap_Head.biYPels = ToL(&buffer[0x18]); /* 2A */
Bitmap_Head.biClrUsed = ToL(&buffer[0x1C]); /* 2E */
Bitmap_Head.biClrImp = ToL(&buffer[0x20]); /* 32 */
Bitmap_Head.masks[0] = 0;
Bitmap_Head.masks[1] = 0;
Bitmap_Head.masks[2] = 0;
Bitmap_Head.masks[3] = 0;
Maps = 4;
memset(masks, 0, sizeof(masks));
if (Bitmap_Head.biCompr == BI_BITFIELDS)
{
if (!ReadOK(fd, buffer, 3 * sizeof(unsigned long)))
{
LOG("Error reading BMP file header\n");
at_exception_fatal(&exp, "Error reading BMP file header");
goto cleanup;
}
Bitmap_Head.masks[0] = ToL(&buffer[0x00]);
Bitmap_Head.masks[1] = ToL(&buffer[0x04]);
Bitmap_Head.masks[2] = ToL(&buffer[0x08]);
ReadChannelMasks(&Bitmap_Head.masks[0], masks, 3);
}
else if (Bitmap_Head.biCompr == BI_RGB)
{
setMasksDefault(Bitmap_Head.biBitCnt, masks);
}
else if ((Bitmap_Head.biCompr != BI_RLE4) &&
(Bitmap_Head.biCompr != BI_RLE8))
{
/* BI_ALPHABITFIELDS, etc. */
LOG("Unsupported compression in BMP file\n");
at_exception_fatal(&exp, "Unsupported compression in BMP file");
goto cleanup;
}
}
else if (Bitmap_File_Head.biSize >= 56 &&
Bitmap_File_Head.biSize <= 64)
{
/* enhanced Windows format with bit masks */
if (!ReadOK (fd, buffer, Bitmap_File_Head.biSize - 4))
{
LOG("Error reading BMP file header\n");
at_exception_fatal(&exp, "Error reading BMP file header");
goto cleanup;
}
Bitmap_Head.biWidth = ToL(&buffer[0x00]); /* 12 */
Bitmap_Head.biHeight = ToL(&buffer[0x04]); /* 16 */
Bitmap_Head.biPlanes = ToS(&buffer[0x08]); /* 1A */
Bitmap_Head.biBitCnt = ToS(&buffer[0x0A]); /* 1C */
Bitmap_Head.biCompr = ToL(&buffer[0x0C]); /* 1E */
Bitmap_Head.biSizeIm = ToL(&buffer[0x10]); /* 22 */
Bitmap_Head.biXPels = ToL(&buffer[0x14]); /* 26 */
Bitmap_Head.biYPels = ToL(&buffer[0x18]); /* 2A */
Bitmap_Head.biClrUsed = ToL(&buffer[0x1C]); /* 2E */
Bitmap_Head.biClrImp = ToL(&buffer[0x20]); /* 32 */
Bitmap_Head.masks[0] = ToL(&buffer[0x24]); /* 36 */
Bitmap_Head.masks[1] = ToL(&buffer[0x28]); /* 3A */
Bitmap_Head.masks[2] = ToL(&buffer[0x2C]); /* 3E */
Bitmap_Head.masks[3] = ToL(&buffer[0x30]); /* 42 */
Maps = 4;
ReadChannelMasks(&Bitmap_Head.masks[0], masks, 4);
}
else if (Bitmap_File_Head.biSize == 108 ||
Bitmap_File_Head.biSize == 124)
{
/* BMP Version 4 or 5 */
if (!ReadOK(fd, buffer, Bitmap_File_Head.biSize - 4))
{
LOG("Error reading BMP file header\n");
at_exception_fatal(&exp, "Error reading BMP file header");
goto cleanup;
}
Bitmap_Head.biWidth = ToL(&buffer[0x00]);
Bitmap_Head.biHeight = ToL(&buffer[0x04]);
Bitmap_Head.biPlanes = ToS(&buffer[0x08]);
Bitmap_Head.biBitCnt = ToS(&buffer[0x0A]);
Bitmap_Head.biCompr = ToL(&buffer[0x0C]);
Bitmap_Head.biSizeIm = ToL(&buffer[0x10]);
Bitmap_Head.biXPels = ToL(&buffer[0x14]);
Bitmap_Head.biYPels = ToL(&buffer[0x18]);
Bitmap_Head.biClrUsed = ToL(&buffer[0x1C]);
Bitmap_Head.biClrImp = ToL(&buffer[0x20]);
Bitmap_Head.masks[0] = ToL(&buffer[0x24]);
Bitmap_Head.masks[1] = ToL(&buffer[0x28]);
Bitmap_Head.masks[2] = ToL(&buffer[0x2C]);
Bitmap_Head.masks[3] = ToL(&buffer[0x30]);
Maps = 4;
if (Bitmap_Head.biCompr == BI_BITFIELDS)
{
ReadChannelMasks(&Bitmap_Head.masks[0], masks, 4);
}
else if (Bitmap_Head.biCompr == BI_RGB)
{
setMasksDefault(Bitmap_Head.biBitCnt, masks);
}
} else {
LOG("Error reading BMP file header\n");
at_exception_fatal(&exp, "Error reading BMP file header");
goto cleanup;
}
/* Valid options 1, 4, 8, 16, 24, 32 */
/* 16 is awful, we should probably shoot whoever invented it */
switch (Bitmap_Head.biBitCnt)
{
case 1:
case 2:
case 4:
case 8:
case 16:
case 24:
case 32:
break;
default:
LOG("%s is not a valid BMP file", filename);
at_exception_fatal(&exp, "bmp: invalid input file");
goto cleanup;
}
/* There should be some colors used! */
ColormapSize = (Bitmap_File_Head.bfOffs - Bitmap_File_Head.biSize - 14) / Maps;
if ((Bitmap_Head.biClrUsed == 0) &&
(Bitmap_Head.biBitCnt <= 8))
{
ColormapSize = Bitmap_Head.biClrUsed = 1 << Bitmap_Head.biBitCnt;
}
if (ColormapSize > 256)
ColormapSize = 256;
/* Sanity checks */
if (Bitmap_Head.biHeight == 0 ||
Bitmap_Head.biWidth == 0)
{
LOG("%s is not a valid BMP file", filename);
at_exception_fatal(&exp, "bmp: invalid input file");
goto cleanup;
}
/* biHeight may be negative, but -2147483648 is dangerous because:
-2147483648 == -(-2147483648) */
if (Bitmap_Head.biWidth < 0 ||
Bitmap_Head.biHeight == -2147483648)
{
LOG("%s is not a valid BMP file", filename);
at_exception_fatal(&exp, "bmp: invalid input file");
goto cleanup;
}
if (Bitmap_Head.biPlanes != 1)
{
LOG("%s is not a valid BMP file", filename);
at_exception_fatal(&exp, "bmp: invalid input file");
goto cleanup;
}
if (Bitmap_Head.biClrUsed > 256 &&
Bitmap_Head.biBitCnt <= 8)
{
LOG("%s is not a valid BMP file", filename);
at_exception_fatal(&exp, "bmp: invalid input file");
goto cleanup;
}
/* protect against integer overflows caused by malicious BMPs */
/* use divisions in comparisons to avoid type overflows */
if (((unsigned long)Bitmap_Head.biWidth) > (unsigned int)0x7fffffff / Bitmap_Head.biBitCnt ||
((unsigned long)Bitmap_Head.biWidth) > ((unsigned int)0x7fffffff /abs(Bitmap_Head.biHeight)) / 4)
{
LOG("%s is not a valid BMP file", filename);
at_exception_fatal(&exp, "bmp: invalid input file");
goto cleanup;
}
/* Windows and OS/2 declare filler so that rows are a multiple of
* word length (32 bits == 4 bytes)
*/
unsigned long overflowTest = Bitmap_Head.biWidth * Bitmap_Head.biBitCnt;
if (overflowTest / Bitmap_Head.biWidth != Bitmap_Head.biBitCnt) {
LOG("Error reading BMP file header. Width is too large\n");
at_exception_fatal(&exp, "Error reading BMP file header. Width is too large");
goto cleanup;
}
rowbytes = ((Bitmap_Head.biWidth * Bitmap_Head.biBitCnt - 1) / 32) * 4 + 4;
#ifdef DEBUG
printf("\nSize: %u, Colors: %u, Bits: %u, Width: %u, Height: %u, Comp: %u, Zeile: %u\n", Bitmap_File_Head.bfSize, Bitmap_Head.biClrUsed, Bitmap_Head.biBitCnt, Bitmap_Head.biWidth, Bitmap_Head.biHeight, Bitmap_Head.biCompr, rowbytes);
#endif
if (Bitmap_Head.biBitCnt <= 8)
{
#ifdef DEBUG
printf("Colormap read\n");
#endif
/* Get the Colormap */
if (!ReadColorMap(fd, ColorMap, ColormapSize, Maps, &Grey, &exp))
goto cleanup;
}
fseek(fd, Bitmap_File_Head.bfOffs, SEEK_SET);
/* Get the Image and return the ID or -1 on error */
image_storage = ReadImage(fd,
Bitmap_Head.biWidth, Bitmap_Head.biHeight,
ColorMap,
Bitmap_Head.biClrUsed,
Bitmap_Head.biBitCnt, Bitmap_Head.biCompr, rowbytes,
Grey,
masks,
&exp);
image = at_bitmap_init(image_storage, (unsigned short)Bitmap_Head.biWidth, (unsigned short)Bitmap_Head.biHeight, Grey ? 1 : 3);
cleanup:
fclose(fd);
return (image);
}
| 1
|
348,443
|
static int ax_set_mac_address(struct net_device *dev, void *addr)
{
struct sockaddr_ax25 *sa = addr;
netif_tx_lock_bh(dev);
netif_addr_lock(dev);
__dev_addr_set(dev, &sa->sax25_call, AX25_ADDR_LEN);
netif_addr_unlock(dev);
netif_tx_unlock_bh(dev);
return 0;
}
| 0
|
466,101
|
static unsigned seg_override(struct x86_emulate_ctxt *ctxt)
{
if (!ctxt->has_seg_override)
return 0;
return ctxt->seg_override;
}
| 0
|
353,155
|
void SplashOutputDev::updateCTM(GfxState *state, double m11, double m12,
double m21, double m22,
double m31, double m32) {
SplashCoord mat[6];
const double *ctm = state->getCTM();
mat[0] = (SplashCoord)ctm[0];
mat[1] = (SplashCoord)ctm[1];
mat[2] = (SplashCoord)ctm[2];
mat[3] = (SplashCoord)ctm[3];
mat[4] = (SplashCoord)ctm[4];
mat[5] = (SplashCoord)ctm[5];
splash->setMatrix(mat);
}
| 0
|
384,817
|
f_tempname(typval_T *argvars UNUSED, typval_T *rettv)
{
static int x = 'A';
rettv->v_type = VAR_STRING;
rettv->vval.v_string = vim_tempname(x, FALSE);
// Advance 'x' to use A-Z and 0-9, so that there are at least 34 different
// names. Skip 'I' and 'O', they are used for shell redirection.
do
{
if (x == 'Z')
x = '0';
else if (x == '9')
x = 'A';
else
++x;
} while (x == 'I' || x == 'O');
}
| 0
|
293,530
|
PJ_DEF(void) pj_scan_get_until_ch( pj_scanner *scanner,
int until_char, pj_str_t *out)
{
register char *s = scanner->curptr;
if (s >= scanner->end) {
pj_scan_syntax_err(scanner);
return;
}
while (PJ_SCAN_CHECK_EOF(s) && *s != until_char) {
++s;
}
pj_strset3(out, scanner->curptr, s);
scanner->curptr = s;
if (PJ_SCAN_IS_PROBABLY_SPACE(*s) && scanner->skip_ws) {
pj_scan_skip_whitespace(scanner);
}
}
| 0
|
261,398
|
void initialize_CABAC_models(thread_context* tctx)
{
const int QPY = tctx->shdr->SliceQPY;
const int initType = tctx->shdr->initType;
assert(initType >= 0 && initType <= 2);
tctx->ctx_model.init(initType, QPY);
for (int i=0;i<4;i++) {
tctx->StatCoeff[i] = 0;
}
}
| 0
|
203,614
|
pxa3xx_gcu_write(struct file *file, const char *buff,
size_t count, loff_t *offp)
{
int ret;
unsigned long flags;
struct pxa3xx_gcu_batch *buffer;
struct pxa3xx_gcu_priv *priv = to_pxa3xx_gcu_priv(file);
int words = count / 4;
/* Does not need to be atomic. There's a lock in user space,
* but anyhow, this is just for statistics. */
priv->shared->num_writes++;
priv->shared->num_words += words;
/* Last word reserved for batch buffer end command */
if (words >= PXA3XX_GCU_BATCH_WORDS)
return -E2BIG;
/* Wait for a free buffer */
if (!priv->free) {
ret = pxa3xx_gcu_wait_free(priv);
if (ret < 0)
return ret;
}
/*
* Get buffer from free list
*/
spin_lock_irqsave(&priv->spinlock, flags);
buffer = priv->free;
priv->free = buffer->next;
spin_unlock_irqrestore(&priv->spinlock, flags);
/* Copy data from user into buffer */
ret = copy_from_user(buffer->ptr, buff, words * 4);
if (ret) {
spin_lock_irqsave(&priv->spinlock, flags);
buffer->next = priv->free;
priv->free = buffer;
spin_unlock_irqrestore(&priv->spinlock, flags);
return -EFAULT;
}
buffer->length = words;
/* Append batch buffer end command */
buffer->ptr[words] = 0x01000000;
/*
* Add buffer to ready list
*/
spin_lock_irqsave(&priv->spinlock, flags);
buffer->next = NULL;
if (priv->ready) {
BUG_ON(priv->ready_last == NULL);
priv->ready_last->next = buffer;
} else
priv->ready = buffer;
priv->ready_last = buffer;
if (!priv->shared->hw_running)
run_ready(priv);
spin_unlock_irqrestore(&priv->spinlock, flags);
return words * 4;
}
| 1
|
337,795
|
static int sctp_process_missing_param(const struct sctp_association *asoc,
enum sctp_param paramtype,
struct sctp_chunk *chunk,
struct sctp_chunk **errp)
{
struct __sctp_missing report;
__u16 len;
len = SCTP_PAD4(sizeof(report));
/* Make an ERROR chunk, preparing enough room for
* returning multiple unknown parameters.
*/
if (!*errp)
*errp = sctp_make_op_error_space(asoc, chunk, len);
if (*errp) {
report.num_missing = htonl(1);
report.type = paramtype;
sctp_init_cause(*errp, SCTP_ERROR_MISS_PARAM,
sizeof(report));
sctp_addto_chunk(*errp, sizeof(report), &report);
}
/* Stop processing this chunk. */
return 0;
}
| 0
|
90,793
|
void QuotaManagerProxy::NotifyOriginInUse(
const GURL& origin) {
if (!io_thread_->BelongsToCurrentThread()) {
io_thread_->PostTask(FROM_HERE, NewRunnableMethod(
this, &QuotaManagerProxy::NotifyOriginInUse, origin));
return;
}
if (manager_)
manager_->NotifyOriginInUse(origin);
}
| 0
|
373,532
|
ipf_expiry_list_add(struct ovs_list *frag_exp_list, struct ipf_list *ipf_list,
long long now)
/* OVS_REQUIRES(ipf->ipf_lock) */
{
enum {
IPF_FRAG_LIST_TIMEOUT = 15000,
};
ipf_list->expiration = now + IPF_FRAG_LIST_TIMEOUT;
ovs_list_push_back(frag_exp_list, &ipf_list->list_node);
}
| 0
|
276,923
|
static int do_i2c_show_bus(struct cmd_tbl *cmdtp, int flag, int argc,
char *const argv[])
{
if (argc == 1) {
/* show all busses */
#if CONFIG_IS_ENABLED(DM_I2C)
struct udevice *bus;
struct uclass *uc;
int ret;
ret = uclass_get(UCLASS_I2C, &uc);
if (ret)
return CMD_RET_FAILURE;
uclass_foreach_dev(bus, uc)
show_bus(bus);
#else
int i;
for (i = 0; i < CONFIG_SYS_NUM_I2C_BUSES; i++) {
printf("Bus %d:\t%s", i, I2C_ADAP_NR(i)->name);
#ifndef CONFIG_SYS_I2C_DIRECT_BUS
int j;
for (j = 0; j < CONFIG_SYS_I2C_MAX_HOPS; j++) {
if (i2c_bus[i].next_hop[j].chip == 0)
break;
printf("->%s@0x%2x:%d",
i2c_bus[i].next_hop[j].mux.name,
i2c_bus[i].next_hop[j].chip,
i2c_bus[i].next_hop[j].channel);
}
#endif
printf("\n");
}
#endif
} else {
int i;
/* show specific bus */
i = dectoul(argv[1], NULL);
#if CONFIG_IS_ENABLED(DM_I2C)
struct udevice *bus;
int ret;
ret = uclass_get_device_by_seq(UCLASS_I2C, i, &bus);
if (ret) {
printf("Invalid bus %d: err=%d\n", i, ret);
return CMD_RET_FAILURE;
}
show_bus(bus);
#else
if (i >= CONFIG_SYS_NUM_I2C_BUSES) {
printf("Invalid bus %d\n", i);
return -1;
}
printf("Bus %d:\t%s", i, I2C_ADAP_NR(i)->name);
#ifndef CONFIG_SYS_I2C_DIRECT_BUS
int j;
for (j = 0; j < CONFIG_SYS_I2C_MAX_HOPS; j++) {
if (i2c_bus[i].next_hop[j].chip == 0)
break;
printf("->%s@0x%2x:%d",
i2c_bus[i].next_hop[j].mux.name,
i2c_bus[i].next_hop[j].chip,
i2c_bus[i].next_hop[j].channel);
}
#endif
printf("\n");
#endif
}
return 0;
}
| 0
|
224,996
|
PQreset(PGconn *conn)
{
if (conn)
{
closePGconn(conn);
if (connectDBStart(conn) && connectDBComplete(conn))
{
/*
* Notify event procs of successful reset. We treat an event proc
* failure as disabling the connection ... good idea?
*/
int i;
for (i = 0; i < conn->nEvents; i++)
{
PGEventConnReset evt;
evt.conn = conn;
if (!conn->events[i].proc(PGEVT_CONNRESET, &evt,
conn->events[i].passThrough))
{
conn->status = CONNECTION_BAD;
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("PGEventProc \"%s\" failed during PGEVT_CONNRESET event\n"),
conn->events[i].name);
break;
}
}
}
}
}
| 0
|
512,475
|
Item_datetime_literal_for_invalid_dates(THD *thd,
const Datetime *ltime, uint dec_arg)
:Item_datetime_literal(thd, ltime, dec_arg)
{
maybe_null= false;
}
| 0
|
224,992
|
PQparameterStatus(const PGconn *conn, const char *paramName)
{
const pgParameterStatus *pstatus;
if (!conn || !paramName)
return NULL;
for (pstatus = conn->pstatus; pstatus != NULL; pstatus = pstatus->next)
{
if (strcmp(pstatus->name, paramName) == 0)
return pstatus->value;
}
return NULL;
}
| 0
|
234,189
|
parse_gnu_debuglink (struct dwarf_section * section, void * data)
{
const char * name;
unsigned int crc_offset;
unsigned long * crc32 = (unsigned long *) data;
/* The name is first.
The CRC value is stored after the filename, aligned up to 4 bytes. */
name = (const char *) section->start;
crc_offset = strnlen (name, section->size) + 1;
if (crc_offset == 1)
return NULL;
crc_offset = (crc_offset + 3) & ~3;
if (crc_offset + 4 > section->size)
return NULL;
* crc32 = byte_get (section->start + crc_offset, 4);
return name;
}
| 0
|
226,958
|
IRC_PROTOCOL_CALLBACK(327)
{
char *pos_realname;
struct t_gui_buffer *ptr_buffer;
IRC_PROTOCOL_MIN_ARGS(6);
pos_realname = (argc > 6) ?
((argv_eol[6][0] == ':') ? argv_eol[6] + 1 : argv_eol[6]) : NULL;
ptr_buffer = irc_msgbuffer_get_target_buffer (server, argv[3],
command, "whois", NULL);
if (pos_realname && pos_realname[0])
{
weechat_printf_date_tags (
ptr_buffer,
date,
irc_protocol_tags (command, "irc_numeric", NULL, NULL),
"%s%s[%s%s%s] %s%s %s %s(%s%s%s)",
weechat_prefix ("network"),
IRC_COLOR_CHAT_DELIMITERS,
irc_nick_color_for_msg (server, 1, NULL, argv[3]),
argv[3],
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_CHAT_HOST,
argv[4],
argv[5],
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_RESET,
pos_realname,
IRC_COLOR_CHAT_DELIMITERS);
}
else
{
weechat_printf_date_tags (
ptr_buffer,
date,
irc_protocol_tags (command, "irc_numeric", NULL, NULL),
"%s%s[%s%s%s] %s%s %s",
weechat_prefix ("network"),
IRC_COLOR_CHAT_DELIMITERS,
irc_nick_color_for_msg (server, 1, NULL, argv[3]),
argv[3],
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_CHAT_HOST,
argv[4],
argv[5]);
}
return WEECHAT_RC_OK;
}
| 0
|
294,630
|
d_lite_next(VALUE self)
{
return d_lite_next_day(0, (VALUE *)NULL, self);
}
| 0
|
214,948
|
static int qh_help(int sd, char *buf, unsigned int len)
{
struct query_handler *qh = NULL;
if (!*buf || !strcmp(buf, "help")) {
nsock_printf_nul(sd,
" help <name> show help for handler <name>\n"
" help list list registered handlers\n");
return 0;
}
if (!strcmp(buf, "list")) {
for (qh = qhandlers; qh != NULL; qh = qh->next_qh) {
nsock_printf(sd, "%-10s %s\n", qh->name, qh->description ? qh->description : "(No description available)");
}
nsock_printf(sd, "%c", 0);
return 0;
}
qh = qh_find_handler(buf);
if (qh == NULL) {
nsock_printf_nul(sd, "No handler named '%s' is registered\n", buf);
} else if (qh->handler(sd, "help", 4) > 200) {
nsock_printf_nul(sd, "The handler %s doesn't have any help yet.", buf);
}
return 0;
}
| 1
|
404,746
|
static inline void __range_cloexec(struct files_struct *cur_fds,
unsigned int fd, unsigned int max_fd)
{
struct fdtable *fdt;
/* make sure we're using the correct maximum value */
spin_lock(&cur_fds->file_lock);
fdt = files_fdtable(cur_fds);
max_fd = min(last_fd(fdt), max_fd);
if (fd <= max_fd)
bitmap_set(fdt->close_on_exec, fd, max_fd - fd + 1);
spin_unlock(&cur_fds->file_lock);
}
| 0
|
267,860
|
void MultiplySum(const size_t xsize,
const pixel_type* const JXL_RESTRICT row_in,
const pixel_type* const JXL_RESTRICT row_in_Y,
const float factor, float* const JXL_RESTRICT row_out) {
const HWY_FULL(float) df;
const Rebind<pixel_type, HWY_FULL(float)> di; // assumes pixel_type <= float
const auto factor_v = Set(df, factor);
for (size_t x = 0; x < xsize; x += Lanes(di)) {
const auto in = Load(di, row_in + x) + Load(di, row_in_Y + x);
const auto out = ConvertTo(df, in) * factor_v;
Store(out, df, row_out + x);
}
}
| 0
|
355,628
|
partial_free(partial_T *pt)
{
int i;
for (i = 0; i < pt->pt_argc; ++i)
clear_tv(&pt->pt_argv[i]);
vim_free(pt->pt_argv);
dict_unref(pt->pt_dict);
if (pt->pt_name != NULL)
{
func_unref(pt->pt_name);
vim_free(pt->pt_name);
}
else
func_ptr_unref(pt->pt_func);
// "out_up" is no longer used, decrement refcount on partial that owns it.
partial_unref(pt->pt_outer.out_up_partial);
// Decrease the reference count for the context of a closure. If down
// to the minimum it may be time to free it.
if (pt->pt_funcstack != NULL)
{
--pt->pt_funcstack->fs_refcount;
funcstack_check_refcount(pt->pt_funcstack);
}
vim_free(pt);
}
| 0
|
328,835
|
R_API void r_bin_java_print_methodhandle_cp_summary(RBinJavaCPTypeObj *obj) {
ut8 ref_kind;
if (!obj) {
eprintf ("Attempting to print an invalid RBinJavaCPTypeObj* RBinJavaCPTypeMethodHandle.\n");
return;
}
ref_kind = obj->info.cp_method_handle.reference_kind;
eprintf ("MethodHandle ConstantPool Type (%d) ", obj->metas->ord);
eprintf (" Offset: 0x%08"PFMT64x"", obj->file_offset);
eprintf (" Reference Kind = (0x%02x) %s\n", ref_kind, R_BIN_JAVA_REF_METAS[ref_kind].name);
eprintf (" Reference Index = %d\n", obj->info.cp_method_handle.reference_index);
}
| 0
|
208,987
|
PlayerGeneric::~PlayerGeneric()
{
if (mixer)
delete mixer;
if (player)
{
if (mixer->isActive() && !mixer->isDeviceRemoved(player))
mixer->removeDevice(player);
delete player;
}
delete[] audioDriverName;
delete listener;
}
| 1
|
508,865
|
ha_rows st_select_lex::get_limit()
{
ulonglong val= HA_POS_ERROR;
if (select_limit)
{
/*
fix_fields() has not been called for select_limit. That's due to the
historical reasons -- this item could be only of type Item_int, and
Item_int does not require fix_fields(). Thus, fix_fields() was never
called for select_limit.
Some time ago, Item_splocal was also allowed for LIMIT / OFFSET clauses.
However, the fix_fields() behavior was not updated, which led to a crash
in some cases.
There is no single place where to call fix_fields() for LIMIT / OFFSET
items during the fix-fields-phase. Thus, for the sake of readability,
it was decided to do it here, on the evaluation phase (which is a
violation of design, but we chose the lesser of two evils).
We can call fix_fields() here, because select_limit can be of two
types only: Item_int and Item_splocal. Item_int::fix_fields() is trivial,
and Item_splocal::fix_fields() (or rather Item_sp_variable::fix_fields())
has the following properties:
1) it does not affect other items;
2) it does not fail.
Nevertheless DBUG_ASSERT was added to catch future changes in
fix_fields() implementation. Also added runtime check against a result
of fix_fields() in order to handle error condition in non-debug build.
*/
bool fix_fields_successful= true;
if (!select_limit->fixed)
{
fix_fields_successful= !select_limit->fix_fields(master_unit()->thd,
NULL);
DBUG_ASSERT(fix_fields_successful);
}
val= fix_fields_successful ? select_limit->val_uint() : HA_POS_ERROR;
}
return (ha_rows)val;
}
| 0
|
261,764
|
void RtmpProtocol::sendRtmp(uint8_t type, uint32_t stream_index, const std::string &buffer, uint32_t stamp, int chunk_id) {
sendRtmp(type, stream_index, std::make_shared<BufferString>(buffer), stamp, chunk_id);
}
| 0
|
226,260
|
GF_Err ccst_box_read(GF_Box *s, GF_BitStream *bs)
{
GF_CodingConstraintsBox *ptr = (GF_CodingConstraintsBox *)s;
ISOM_DECREASE_SIZE(ptr, 4);
ptr->all_ref_pics_intra = gf_bs_read_int(bs, 1);
ptr->intra_pred_used = gf_bs_read_int(bs, 1);
ptr->max_ref_per_pic = gf_bs_read_int(bs, 4);
ptr->reserved = gf_bs_read_int(bs, 26);
return GF_OK;
}
| 0
|
328,987
|
R_API void r_bin_java_print_methodtype_cp_summary(RBinJavaCPTypeObj *obj) {
if (!obj) {
eprintf ("Attempting to print an invalid RBinJavaCPTypeObj* RBinJavaCPTypeMethodType.\n");
return;
}
printf ("MethodType ConstantPool Type (%d) ", obj->metas->ord);
printf (" Offset: 0x%08"PFMT64x "", obj->file_offset);
printf (" Descriptor Index = 0x%02x\n", obj->info.cp_method_type.descriptor_index);
}
| 0
|
349,527
|
static void virtbt_tx_done(struct virtqueue *vq)
{
struct sk_buff *skb;
unsigned int len;
while ((skb = virtqueue_get_buf(vq, &len)))
kfree_skb(skb);
}
| 0
|
401,533
|
u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
{
struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
u64 expires = KTIME_MAX;
unsigned long nextevt;
bool is_max_delta;
/*
* Pretend that there is no timer pending if the cpu is offline.
* Possible pending timers will be migrated later to an active cpu.
*/
if (cpu_is_offline(smp_processor_id()))
return expires;
raw_spin_lock(&base->lock);
nextevt = __next_timer_interrupt(base);
is_max_delta = (nextevt == base->clk + NEXT_TIMER_MAX_DELTA);
base->next_expiry = nextevt;
/*
* We have a fresh next event. Check whether we can forward the
* base. We can only do that when @basej is past base->clk
* otherwise we might rewind base->clk.
*/
if (time_after(basej, base->clk)) {
if (time_after(nextevt, basej))
base->clk = basej;
else if (time_after(nextevt, base->clk))
base->clk = nextevt;
}
if (time_before_eq(nextevt, basej)) {
expires = basem;
base->is_idle = false;
} else {
if (!is_max_delta)
expires = basem + (u64)(nextevt - basej) * TICK_NSEC;
/*
* If we expect to sleep more than a tick, mark the base idle.
* Also the tick is stopped so any added timer must forward
* the base clk itself to keep granularity small. This idle
* logic is only maintained for the BASE_STD base, deferrable
* timers may still see large granularity skew (by design).
*/
if ((expires - basem) > TICK_NSEC) {
base->must_forward_clk = true;
base->is_idle = true;
}
}
raw_spin_unlock(&base->lock);
return cmp_next_hrtimer_event(basem, expires);
}
| 0
|
244,093
|
GF_Box *tsro_box_new()
{
ISOM_DECL_BOX_ALLOC(GF_TimeOffHintEntryBox, GF_ISOM_BOX_TYPE_TSRO);
return (GF_Box *)tmp;
}
| 0
|
366,306
|
SYSCALL_DEFINE3(fsmount, int, fs_fd, unsigned int, flags,
unsigned int, attr_flags)
{
struct mnt_namespace *ns;
struct fs_context *fc;
struct file *file;
struct path newmount;
struct mount *mnt;
struct fd f;
unsigned int mnt_flags = 0;
long ret;
if (!may_mount())
return -EPERM;
if ((flags & ~(FSMOUNT_CLOEXEC)) != 0)
return -EINVAL;
if (attr_flags & ~FSMOUNT_VALID_FLAGS)
return -EINVAL;
mnt_flags = attr_flags_to_mnt_flags(attr_flags);
switch (attr_flags & MOUNT_ATTR__ATIME) {
case MOUNT_ATTR_STRICTATIME:
break;
case MOUNT_ATTR_NOATIME:
mnt_flags |= MNT_NOATIME;
break;
case MOUNT_ATTR_RELATIME:
mnt_flags |= MNT_RELATIME;
break;
default:
return -EINVAL;
}
f = fdget(fs_fd);
if (!f.file)
return -EBADF;
ret = -EINVAL;
if (f.file->f_op != &fscontext_fops)
goto err_fsfd;
fc = f.file->private_data;
ret = mutex_lock_interruptible(&fc->uapi_mutex);
if (ret < 0)
goto err_fsfd;
/* There must be a valid superblock or we can't mount it */
ret = -EINVAL;
if (!fc->root)
goto err_unlock;
ret = -EPERM;
if (mount_too_revealing(fc->root->d_sb, &mnt_flags)) {
pr_warn("VFS: Mount too revealing\n");
goto err_unlock;
}
ret = -EBUSY;
if (fc->phase != FS_CONTEXT_AWAITING_MOUNT)
goto err_unlock;
ret = -EPERM;
if ((fc->sb_flags & SB_MANDLOCK) && !may_mandlock())
goto err_unlock;
newmount.mnt = vfs_create_mount(fc);
if (IS_ERR(newmount.mnt)) {
ret = PTR_ERR(newmount.mnt);
goto err_unlock;
}
newmount.dentry = dget(fc->root);
newmount.mnt->mnt_flags = mnt_flags;
/* We've done the mount bit - now move the file context into more or
* less the same state as if we'd done an fspick(). We don't want to
* do any memory allocation or anything like that at this point as we
* don't want to have to handle any errors incurred.
*/
vfs_clean_context(fc);
ns = alloc_mnt_ns(current->nsproxy->mnt_ns->user_ns, true);
if (IS_ERR(ns)) {
ret = PTR_ERR(ns);
goto err_path;
}
mnt = real_mount(newmount.mnt);
mnt->mnt_ns = ns;
ns->root = mnt;
ns->mounts = 1;
list_add(&mnt->mnt_list, &ns->list);
mntget(newmount.mnt);
/* Attach to an apparent O_PATH fd with a note that we need to unmount
* it, not just simply put it.
*/
file = dentry_open(&newmount, O_PATH, fc->cred);
if (IS_ERR(file)) {
dissolve_on_fput(newmount.mnt);
ret = PTR_ERR(file);
goto err_path;
}
file->f_mode |= FMODE_NEED_UNMOUNT;
ret = get_unused_fd_flags((flags & FSMOUNT_CLOEXEC) ? O_CLOEXEC : 0);
if (ret >= 0)
fd_install(ret, file);
else
fput(file);
err_path:
path_put(&newmount);
err_unlock:
mutex_unlock(&fc->uapi_mutex);
err_fsfd:
fdput(f);
return ret;
}
| 0
|
336,513
|
SPICE_GNUC_VISIBLE int spice_server_add_ssl_client(SpiceServer *reds, int socket, int skip_auth)
{
RedLinkInfo *link;
if (!(link = reds_init_client_ssl_connection(reds, socket))) {
return -1;
}
link->skip_auth = skip_auth;
return 0;
}
| 0
|
244,060
|
GF_Err paen_box_read(GF_Box *s, GF_BitStream *bs)
{
return gf_isom_box_array_read(s, bs);
}
| 0
|
508,909
|
static LEX_STRING get_quoted_token(Lex_input_stream *lip,
uint skip,
uint length, char quote)
{
LEX_STRING tmp;
const char *from, *end;
char *to;
lip->yyUnget(); // ptr points now after last token char
tmp.length= length;
tmp.str=(char*) lip->m_thd->alloc(tmp.length+1);
from= lip->get_tok_start() + skip;
to= tmp.str;
end= to+length;
lip->m_cpp_text_start= lip->get_cpp_tok_start() + skip;
lip->m_cpp_text_end= lip->m_cpp_text_start + length;
for ( ; to != end; )
{
if ((*to++= *from++) == quote)
{
from++; // Skip double quotes
lip->m_cpp_text_start++;
}
}
*to= 0; // End null for safety
return tmp;
}
| 0
|
369,418
|
*/
static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
const sigset_t __user *sig, size_t sigsz,
struct __kernel_timespec __user *uts)
{
struct io_wait_queue iowq;
struct io_rings *rings = ctx->rings;
ktime_t timeout = KTIME_MAX;
int ret;
do {
io_cqring_overflow_flush(ctx);
if (io_cqring_events(ctx) >= min_events)
return 0;
if (!io_run_task_work())
break;
} while (1);
if (sig) {
#ifdef CONFIG_COMPAT
if (in_compat_syscall())
ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
sigsz);
else
#endif
ret = set_user_sigmask(sig, sigsz);
if (ret)
return ret;
}
if (uts) {
struct timespec64 ts;
if (get_timespec64(&ts, uts))
return -EFAULT;
timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
}
init_waitqueue_func_entry(&iowq.wq, io_wake_function);
iowq.wq.private = current;
INIT_LIST_HEAD(&iowq.wq.entry);
iowq.ctx = ctx;
iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
iowq.cq_tail = READ_ONCE(ctx->rings->cq.head) + min_events;
trace_io_uring_cqring_wait(ctx, min_events);
do {
/* if we can't even flush overflow, don't wait for more */
if (!io_cqring_overflow_flush(ctx)) {
ret = -EBUSY;
break;
}
prepare_to_wait_exclusive(&ctx->cq_wait, &iowq.wq,
TASK_INTERRUPTIBLE);
ret = io_cqring_wait_schedule(ctx, &iowq, timeout);
finish_wait(&ctx->cq_wait, &iowq.wq);
cond_resched();
} while (ret > 0);
restore_saved_sigmask_unless(ret == -EINTR);
return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
| 0
|
358,129
|
START_TEST (test_send_sonmp)
{
int n;
/* Packet we should build:
IEEE 802.3 Ethernet
Destination: Bay-Networks-(Synoptics)-autodiscovery (01:00:81:00:01:00)
Source: 5e:10:8e:e7:84:ad (5e:10:8e:e7:84:ad)
Length: 19
Logical-Link Control
DSAP: SNAP (0xaa)
IG Bit: Individual
SSAP: SNAP (0xaa)
CR Bit: Command
Control field: U, func=UI (0x03)
000. 00.. = Command: Unnumbered Information (0x00)
.... ..11 = Frame type: Unnumbered frame (0x03)
Organization Code: Nortel Networks SONMP (0x000081)
PID: SONMP segment hello (0x01a2)
Nortel Networks / SynOptics Network Management Protocol
NMM IP address: 172.17.142.37 (172.17.142.37)
Segment Identifier: 0x000004
Chassis type: Unknown (1)
Backplane type: ethernet, fast ethernet and gigabit ethernet (12)
NMM state: New (3)
Number of links: 1
IEEE 802.3 Ethernet
Destination: Bay-Networks-(Synoptics)-autodiscovery (01:00:81:00:01:01)
Source: 5e:10:8e:e7:84:ad (5e:10:8e:e7:84:ad)
Length: 19
Logical-Link Control
DSAP: SNAP (0xaa)
IG Bit: Individual
SSAP: SNAP (0xaa)
CR Bit: Command
Control field: U, func=UI (0x03)
000. 00.. = Command: Unnumbered Information (0x00)
.... ..11 = Frame type: Unnumbered frame (0x03)
Organization Code: Nortel Networks SONMP (0x000081)
PID: SONMP flatnet hello (0x01a1)
Nortel Networks / SynOptics Network Management Protocol
NMM IP address: 172.17.142.37 (172.17.142.37)
Segment Identifier: 0x000004
Chassis type: Unknown (1)
Backplane type: ethernet, fast ethernet and gigabit ethernet (12)
NMM state: New (3)
Number of links: 1
*/
char pkt1[] = {
0x01, 0x00, 0x81, 0x00, 0x01, 0x00, 0x5e, 0x10,
0x8e, 0xe7, 0x84, 0xad, 0x00, 0x13, 0xaa, 0xaa,
0x03, 0x00, 0x00, 0x81, 0x01, 0xa2, 0xac, 0x11,
0x8e, 0x25, 0x00, 0x00, 0x04, 0x01, 0x0c, 0x03,
0x01 };
char pkt2[] = {
0x01, 0x00, 0x81, 0x00, 0x01, 0x01, 0x5e, 0x10,
0x8e, 0xe7, 0x84, 0xad, 0x00, 0x13, 0xaa, 0xaa,
0x03, 0x00, 0x00, 0x81, 0x01, 0xa1, 0xac, 0x11,
0x8e, 0x25, 0x00, 0x00, 0x04, 0x01, 0x0c, 0x03,
0x01 };
struct packet *pkt;
in_addr_t addr;
struct lldpd_mgmt *mgmt;
/* Populate port and chassis */
hardware.h_lport.p_id_subtype = LLDP_PORTID_SUBTYPE_IFNAME;
hardware.h_lport.p_id = "Not used";
hardware.h_lport.p_id_len = strlen(hardware.h_lport.p_id);
chassis.c_id_subtype = LLDP_CHASSISID_SUBTYPE_LLADDR;
chassis.c_id = macaddress;
chassis.c_id_len = ETHER_ADDR_LEN;
TAILQ_INIT(&chassis.c_mgmt);
addr = inet_addr("172.17.142.37");
mgmt = lldpd_alloc_mgmt(LLDPD_AF_IPV4,
&addr, sizeof(in_addr_t), 0);
if (mgmt == NULL)
ck_abort();
TAILQ_INSERT_TAIL(&chassis.c_mgmt, mgmt, m_entries);
/* Build packet */
n = sonmp_send(NULL, &hardware);
if (n != 0) {
fail("unable to build packet");
return;
}
if (TAILQ_EMPTY(&pkts)) {
fail("no packets sent");
return;
}
pkt = TAILQ_FIRST(&pkts);
ck_assert_int_eq(pkt->size, sizeof(pkt1));
fail_unless(memcmp(pkt->data, pkt1, sizeof(pkt1)) == 0);
pkt = TAILQ_NEXT(pkt, next);
if (!pkt) {
fail("need one more packet");
return;
}
ck_assert_int_eq(pkt->size, sizeof(pkt2));
fail_unless(memcmp(pkt->data, pkt2, sizeof(pkt2)) == 0);
fail_unless(TAILQ_NEXT(pkt, next) == NULL, "more than two packets sent");
}
| 0
|
270,357
|
parser_parse_export_statement (parser_context_t *context_p) /**< context */
{
JERRY_ASSERT (context_p->token.type == LEXER_KEYW_EXPORT);
JERRY_ASSERT (context_p->module_names_p == NULL);
parser_module_check_request_place (context_p);
bool consume_last_statement = false;
lexer_next_token (context_p);
switch (context_p->token.type)
{
case LEXER_KEYW_DEFAULT:
{
scanner_location_t location;
scanner_get_location (&location, context_p);
context_p->status_flags |= PARSER_MODULE_STORE_IDENT;
lexer_next_token (context_p);
if (context_p->token.type == LEXER_LITERAL
&& lexer_token_is_async (context_p)
&& context_p->next_scanner_info_p->source_p == context_p->source_p
&& context_p->next_scanner_info_p->type == SCANNER_TYPE_FUNCTION)
{
#if JERRY_FUNCTION_TO_STRING
context_p->function_start_p = context_p->token.lit_location.char_p;
#endif /* JERRY_FUNCTION_TO_STRING */
lexer_next_token (context_p);
}
if (context_p->token.type == LEXER_KEYW_CLASS)
{
context_p->status_flags |= PARSER_MODULE_DEFAULT_CLASS_OR_FUNC;
parser_parse_class (context_p, true);
consume_last_statement = true;
}
else if (context_p->token.type == LEXER_KEYW_FUNCTION)
{
context_p->status_flags |= PARSER_MODULE_DEFAULT_CLASS_OR_FUNC;
parser_parse_function_statement (context_p);
consume_last_statement = true;
}
else
{
/* Assignment expression */
scanner_set_location (context_p, &location);
/* 15.2.3.5 Use the synthetic name '*default*' as the identifier. */
lexer_construct_literal_object (context_p, &lexer_default_literal, lexer_default_literal.type);
context_p->token.lit_location.type = LEXER_IDENT_LITERAL;
parser_emit_cbc_literal_from_token (context_p, CBC_PUSH_LITERAL);
/* Do not overwrite this identifier. */
context_p->status_flags &= (uint32_t) ~PARSER_MODULE_STORE_IDENT;
context_p->module_identifier_lit_p = context_p->lit_object.literal_p;
/* Fake an assignment to the default identifier */
context_p->token.type = LEXER_ASSIGN;
parser_parse_expression_statement (context_p, PARSE_EXPR_NO_COMMA | PARSE_EXPR_HAS_LITERAL);
}
ecma_string_t *name_p = parser_new_ecma_string_from_literal (context_p->module_identifier_lit_p);
ecma_string_t *export_name_p = ecma_get_magic_string (LIT_MAGIC_STRING_DEFAULT);
if (parser_module_check_duplicate_export (context_p, export_name_p))
{
ecma_deref_ecma_string (name_p);
ecma_deref_ecma_string (export_name_p);
parser_raise_error (context_p, PARSER_ERR_DUPLICATED_EXPORT_IDENTIFIER);
}
parser_module_add_names_to_node (context_p,
export_name_p,
name_p);
ecma_deref_ecma_string (name_p);
ecma_deref_ecma_string (export_name_p);
break;
}
case LEXER_MULTIPLY:
{
lexer_next_token (context_p);
ecma_module_node_t **target_node_list_p = &(JERRY_CONTEXT (module_current_p)->star_exports_p);
if (lexer_token_is_identifier (context_p, "as", 2))
{
target_node_list_p = &(JERRY_CONTEXT (module_current_p)->indirect_exports_p);
lexer_next_token (context_p);
if (context_p->token.type != LEXER_LITERAL
|| context_p->token.lit_location.type != LEXER_IDENT_LITERAL)
{
parser_raise_error (context_p, PARSER_ERR_IDENTIFIER_EXPECTED);
}
lexer_construct_literal_object (context_p, &context_p->token.lit_location, LEXER_NEW_IDENT_LITERAL);
lexer_literal_t *literal_p = PARSER_GET_LITERAL (context_p->lit_object.index);
ecma_string_t *export_name_p = parser_new_ecma_string_from_literal (literal_p);
if (parser_module_check_duplicate_export (context_p, export_name_p))
{
ecma_deref_ecma_string (export_name_p);
parser_raise_error (context_p, PARSER_ERR_DUPLICATED_EXPORT_IDENTIFIER);
}
ecma_string_t *local_name_p = ecma_get_magic_string (LIT_MAGIC_STRING_ASTERIX_CHAR);
parser_module_add_names_to_node (context_p, export_name_p, local_name_p);
ecma_deref_ecma_string (export_name_p);
lexer_next_token (context_p);
}
if (!lexer_token_is_identifier (context_p, "from", 4))
{
parser_raise_error (context_p, PARSER_ERR_FROM_EXPECTED);
}
lexer_next_token (context_p);
parser_module_handle_module_specifier (context_p, target_node_list_p);
return false;
}
case LEXER_KEYW_VAR:
case LEXER_KEYW_LET:
case LEXER_KEYW_CONST:
{
context_p->status_flags |= PARSER_MODULE_STORE_IDENT;
parser_parse_var_statement (context_p);
break;
}
case LEXER_KEYW_CLASS:
{
context_p->status_flags |= PARSER_MODULE_STORE_IDENT;
parser_parse_class (context_p, true);
consume_last_statement = true;
break;
}
case LEXER_KEYW_FUNCTION:
{
context_p->status_flags |= PARSER_MODULE_STORE_IDENT;
parser_parse_function_statement (context_p);
consume_last_statement = true;
break;
}
case LEXER_LEFT_BRACE:
{
parser_module_parse_export_clause (context_p);
if (lexer_token_is_identifier (context_p, "from", 4))
{
lexer_next_token (context_p);
parser_module_handle_module_specifier (context_p, &(JERRY_CONTEXT (module_current_p)->indirect_exports_p));
return false;
}
break;
}
default:
{
parser_raise_error (context_p, PARSER_ERR_LEFT_BRACE_MULTIPLY_LITERAL_EXPECTED);
break;
}
}
context_p->status_flags &= (uint32_t) ~(PARSER_MODULE_DEFAULT_CLASS_OR_FUNC | PARSER_MODULE_STORE_IDENT);
parser_module_append_names (context_p, &(JERRY_CONTEXT (module_current_p)->local_exports_p));
return consume_last_statement;
} /* parser_parse_export_statement */
| 0
|
226,318
|
GF_Box *fecr_box_new()
{
ISOM_DECL_BOX_ALLOC(FECReservoirBox, GF_ISOM_BOX_TYPE_FECR);
return (GF_Box *)tmp;
| 0
|
357,668
|
bool SQClass::NewSlot(SQSharedState *ss,const SQObjectPtr &key,const SQObjectPtr &val,bool bstatic)
{
SQObjectPtr temp;
bool belongs_to_static_table = sq_type(val) == OT_CLOSURE || sq_type(val) == OT_NATIVECLOSURE || bstatic;
if(_locked && !belongs_to_static_table)
return false; //the class already has an instance so cannot be modified
if(_members->Get(key,temp) && _isfield(temp)) //overrides the default value
{
_defaultvalues[_member_idx(temp)].val = val;
return true;
}
if (_members->CountUsed() >= MEMBER_MAX_COUNT) {
return false;
}
if(belongs_to_static_table) {
SQInteger mmidx;
if((sq_type(val) == OT_CLOSURE || sq_type(val) == OT_NATIVECLOSURE) &&
(mmidx = ss->GetMetaMethodIdxByName(key)) != -1) {
_metamethods[mmidx] = val;
}
else {
SQObjectPtr theval = val;
if(_base && sq_type(val) == OT_CLOSURE) {
theval = _closure(val)->Clone();
_closure(theval)->_base = _base;
__ObjAddRef(_base); //ref for the closure
}
if(sq_type(temp) == OT_NULL) {
bool isconstructor;
SQVM::IsEqual(ss->_constructoridx, key, isconstructor);
if(isconstructor) {
_constructoridx = (SQInteger)_methods.size();
}
SQClassMember m;
m.val = theval;
_members->NewSlot(key,SQObjectPtr(_make_method_idx(_methods.size())));
_methods.push_back(m);
}
else {
_methods[_member_idx(temp)].val = theval;
}
}
return true;
}
SQClassMember m;
m.val = val;
_members->NewSlot(key,SQObjectPtr(_make_field_idx(_defaultvalues.size())));
_defaultvalues.push_back(m);
return true;
}
| 0
|
224,481
|
GF_Err txtin_initialize(GF_Filter *filter)
{
char data[1];
GF_TXTIn *ctx = gf_filter_get_udta(filter);
ctx->bs_w = gf_bs_new(data, 1, GF_BITSTREAM_WRITE);
return GF_OK;
}
| 0
|
409,439
|
cursor_off(void)
{
if (full_screen && !cursor_is_off)
{
out_str(T_VI); // disable cursor
cursor_is_off = TRUE;
}
}
| 0
|
364,786
|
matching_line_len(char_u *lbuf)
{
char_u *p = lbuf + 1;
// does the same thing as parse_match()
p += STRLEN(p) + 1;
#ifdef FEAT_EMACS_TAGS
p += STRLEN(p) + 1;
#endif
return (p - lbuf) + STRLEN(p);
}
| 0
|
247,335
|
static int getKeyID(const uint8_t *h, size_t hlen, pgpKeyID_t keyid)
{
uint8_t *fp = NULL;
size_t fplen = 0;
int rc = pgpPubkeyFingerprint(h, hlen, &fp, &fplen);
if (fp && fplen > 8) {
memcpy(keyid, (fp + (fplen-8)), 8);
free(fp);
}
return rc;
}
| 0
|
380,957
|
ins_eol(int c)
{
int i;
if (echeck_abbr(c + ABBR_OFF))
return OK;
if (stop_arrow() == FAIL)
return FAIL;
undisplay_dollar();
/*
* Strange Vi behaviour: In Replace mode, typing a NL will not delete the
* character under the cursor. Only push a NUL on the replace stack,
* nothing to put back when the NL is deleted.
*/
if ((State & REPLACE_FLAG) && !(State & VREPLACE_FLAG))
replace_push(NUL);
/*
* In MODE_VREPLACE state, a NL replaces the rest of the line, and starts
* replacing the next line, so we push all of the characters left on the
* line onto the replace stack. This is not done here though, it is done
* in open_line().
*/
// Put cursor on NUL if on the last char and coladd is 1 (happens after
// CTRL-O).
if (virtual_active() && curwin->w_cursor.coladd > 0)
coladvance(getviscol());
#ifdef FEAT_RIGHTLEFT
// NL in reverse insert will always start in the end of
// current line.
if (revins_on)
curwin->w_cursor.col += (colnr_T)STRLEN(ml_get_cursor());
#endif
AppendToRedobuff(NL_STR);
i = open_line(FORWARD,
has_format_option(FO_RET_COMS) ? OPENLINE_DO_COM : 0, old_indent,
NULL);
old_indent = 0;
can_cindent = TRUE;
#ifdef FEAT_FOLDING
// When inserting a line the cursor line must never be in a closed fold.
foldOpenCursor();
#endif
return i;
}
| 0
|
404,191
|
static BOOL recurse_check_bit(compiler_common *common, sljit_sw bit_index)
{
uint8_t *byte;
uint8_t mask;
SLJIT_ASSERT((bit_index & (sizeof(sljit_sw) - 1)) == 0);
bit_index >>= SLJIT_WORD_SHIFT;
mask = 1 << (bit_index & 0x7);
byte = common->recurse_bitset + (bit_index >> 3);
if (*byte & mask)
return FALSE;
*byte |= mask;
return TRUE;
}
| 0
|
196,620
|
void Compute(OpKernelContext* context) override {
const Tensor& input = context->input(0);
const TensorShape& input_shape = input.shape();
const int32_t input_dims = input_shape.dims();
const Tensor& segment_id = context->input(1);
const TensorShape& segment_id_shape = segment_id.shape();
const int32_t segment_dims = segment_id_shape.dims();
const Tensor& num_segments_tensor = context->input(2);
OP_REQUIRES(context, num_segments_tensor.NumElements() != 0,
errors::InvalidArgument("Number of segments cannot be empty."));
auto num_segments = num_segments_tensor.scalar<NUM_SEGMENTS_TYPE>()();
OP_REQUIRES(context, num_segments > 0,
errors::InvalidArgument("Number of segments must be positive"));
OP_REQUIRES(context, segment_dims != 0,
errors::InvalidArgument("Segment_id cannot have rank 0"));
OP_REQUIRES(
context, segment_dims <= input_dims,
errors::OutOfRange("Invalid segment_id rank ", segment_dims,
" for input with ", input_dims, " dimension(s)"));
for (auto i = 0; i < segment_dims; i++) {
OP_REQUIRES(
context, segment_id_shape.dim_size(i) == input_shape.dim_size(i),
errors::InvalidArgument(
"Segment dimension is ", segment_id_shape.dim_size(i),
" while input dimension is ", input_dims, " in rank ", i));
}
// Making output tensor.
Tensor* output_tensor = nullptr;
TensorShape output_shape =
GetOutputShape(input_shape, segment_id_shape, num_segments);
OP_REQUIRES_OK(context, context->allocate_output("output", output_shape,
&output_tensor));
// Preparating flat tensors.
auto output_flat = output_tensor->flat<tstring>();
auto flat_segment_id = segment_id.flat<INDICES_TYPE>();
auto flat_input = input.flat<tstring>();
for (int i = 0; i < flat_segment_id.size(); i++) {
OP_REQUIRES(
context,
((flat_segment_id(i) < num_segments) && (flat_segment_id(i) >= 0)),
errors::InvalidArgument(
"segment_ids are not allowed to exceed num_segments or"
" to have negative values."));
}
int64_t big_stride;
int64_t small_stride;
std::tie(big_stride, small_stride) =
GetStrides<INDICES_TYPE>(input_shape, segment_id_shape);
auto relative_offset_set =
GetFlattenedRelativeOffsets<INDICES_TYPE>(small_stride, big_stride);
for (auto start_offset = 0; start_offset < big_stride; start_offset++) {
for (auto i = 0; i < relative_offset_set.size(); i++) {
auto output_index = start_offset + flat_segment_id(i) * big_stride;
auto offset = start_offset + relative_offset_set[i];
if (output_flat(output_index).length() != 0)
output_flat(output_index).append(separator_.c_str());
output_flat(output_index).append(flat_input(offset));
}
}
}
| 1
|
242,928
|
Index operator()(const CPUDevice &d,
typename TTypes<Index>::ConstMatrix indices,
typename TTypes<T>::ConstFlat updates,
typename TTypes<T, NDIMS>::Tensor out) {
Eigen::array<Eigen::DenseIndex, NDIMS> idx;
const int num_nnz = static_cast<int>(indices.dimension(0));
for (int i = 0; i < num_nnz; ++i) {
for (int d = 0; d < NDIMS; ++d) {
idx[d] = internal::SubtleMustCopy(indices(i, d));
if (!FastBoundsCheck(idx[d], out.dimension(d))) {
return d; // on failure: d nonnegative
}
}
out(idx) += updates(i);
}
return -1; // on success
}
| 0
|
292,136
|
void LinkResolver::throw_abstract_method_error(const methodHandle& resolved_method,
const methodHandle& selected_method,
Klass *recv_klass, TRAPS) {
Klass *resolved_klass = resolved_method->method_holder();
ResourceMark rm(THREAD);
stringStream ss;
if (recv_klass != NULL) {
ss.print("Receiver class %s does not define or inherit an "
"implementation of the",
recv_klass->external_name());
} else {
ss.print("Missing implementation of");
}
assert(resolved_method.not_null(), "Sanity");
ss.print(" resolved method '%s%s",
resolved_method->is_abstract() ? "abstract " : "",
resolved_method->is_private() ? "private " : "");
resolved_method->signature()->print_as_signature_external_return_type(&ss);
ss.print(" %s(", resolved_method->name()->as_C_string());
resolved_method->signature()->print_as_signature_external_parameters(&ss);
ss.print(")' of %s %s.",
resolved_klass->external_kind(),
resolved_klass->external_name());
if (selected_method.not_null() && !(resolved_method == selected_method)) {
ss.print(" Selected method is '%s%s",
selected_method->is_abstract() ? "abstract " : "",
selected_method->is_private() ? "private " : "");
selected_method->print_external_name(&ss);
ss.print("'.");
}
THROW_MSG(vmSymbols::java_lang_AbstractMethodError(), ss.as_string());
}
| 0
|
206,043
|
load_image (const gchar *filename,
GError **error)
{
gchar *name;
gint fd;
BrushHeader bh;
guchar *brush_buf = NULL;
gint32 image_ID;
gint32 layer_ID;
GimpParasite *parasite;
GimpDrawable *drawable;
GimpPixelRgn pixel_rgn;
gint bn_size;
GimpImageBaseType base_type;
GimpImageType image_type;
gsize size;
fd = g_open (filename, O_RDONLY | _O_BINARY, 0);
if (fd == -1)
{
g_set_error (error, G_FILE_ERROR, g_file_error_from_errno (errno),
_("Could not open '%s' for reading: %s"),
gimp_filename_to_utf8 (filename), g_strerror (errno));
return -1;
}
gimp_progress_init_printf (_("Opening '%s'"),
gimp_filename_to_utf8 (filename));
if (read (fd, &bh, sizeof (BrushHeader)) != sizeof (BrushHeader))
{
close (fd);
return -1;
}
/* rearrange the bytes in each unsigned int */
bh.header_size = g_ntohl (bh.header_size);
bh.version = g_ntohl (bh.version);
bh.width = g_ntohl (bh.width);
bh.height = g_ntohl (bh.height);
bh.bytes = g_ntohl (bh.bytes);
bh.magic_number = g_ntohl (bh.magic_number);
bh.spacing = g_ntohl (bh.spacing);
/* Sanitize values */
if ((bh.width == 0) || (bh.width > GIMP_MAX_IMAGE_SIZE) ||
(bh.height == 0) || (bh.height > GIMP_MAX_IMAGE_SIZE) ||
((bh.bytes != 1) && (bh.bytes != 2) && (bh.bytes != 4) &&
(bh.bytes != 18)) ||
(G_MAXSIZE / bh.width / bh.height / bh.bytes < 1))
{
g_set_error (error, G_FILE_ERROR, G_FILE_ERROR_FAILED,
_("Invalid header data in '%s': width=%lu, height=%lu, "
"bytes=%lu"), gimp_filename_to_utf8 (filename),
(unsigned long int)bh.width, (unsigned long int)bh.height,
(unsigned long int)bh.bytes);
return -1;
}
switch (bh.version)
{
case 1:
/* Version 1 didn't have a magic number and had no spacing */
bh.spacing = 25;
/* And we need to rewind the handle, 4 due spacing and 4 due magic */
lseek (fd, -8, SEEK_CUR);
bh.header_size += 8;
break;
case 3: /* cinepaint brush */
if (bh.bytes == 18 /* FLOAT16_GRAY_GIMAGE */)
{
bh.bytes = 2;
}
else
{
g_message (_("Unsupported brush format"));
close (fd);
return -1;
}
/* fallthrough */
case 2:
if (bh.magic_number == GBRUSH_MAGIC &&
bh.header_size > sizeof (BrushHeader))
break;
default:
g_message (_("Unsupported brush format"));
close (fd);
return -1;
}
if ((bn_size = (bh.header_size - sizeof (BrushHeader))) > 0)
{
gchar *temp = g_new (gchar, bn_size);
if ((read (fd, temp, bn_size)) < bn_size)
{
g_set_error (error, G_FILE_ERROR, G_FILE_ERROR_FAILED,
_("Error in GIMP brush file '%s'"),
gimp_filename_to_utf8 (filename));
close (fd);
g_free (temp);
return -1;
}
name = gimp_any_to_utf8 (temp, -1,
_("Invalid UTF-8 string in brush file '%s'."),
gimp_filename_to_utf8 (filename));
g_free (temp);
}
else
{
name = g_strdup (_("Unnamed"));
}
/* Now there's just raw data left. */
size = bh.width * bh.height * bh.bytes;
brush_buf = g_malloc (size);
if (read (fd, brush_buf, size) != size)
{
close (fd);
g_free (brush_buf);
g_free (name);
return -1;
}
switch (bh.bytes)
{
case 1:
{
PatternHeader ph;
/* For backwards-compatibility, check if a pattern follows.
The obsolete .gpb format did it this way. */
if (read (fd, &ph, sizeof (PatternHeader)) == sizeof(PatternHeader))
{
/* rearrange the bytes in each unsigned int */
ph.header_size = g_ntohl (ph.header_size);
ph.version = g_ntohl (ph.version);
ph.width = g_ntohl (ph.width);
ph.height = g_ntohl (ph.height);
ph.bytes = g_ntohl (ph.bytes);
ph.magic_number = g_ntohl (ph.magic_number);
if (ph.magic_number == GPATTERN_MAGIC &&
ph.version == 1 &&
ph.header_size > sizeof (PatternHeader) &&
ph.bytes == 3 &&
ph.width == bh.width &&
ph.height == bh.height &&
lseek (fd, ph.header_size - sizeof (PatternHeader),
SEEK_CUR) > 0)
{
guchar *plain_brush = brush_buf;
gint i;
bh.bytes = 4;
brush_buf = g_malloc (4 * bh.width * bh.height);
for (i = 0; i < ph.width * ph.height; i++)
{
if (read (fd, brush_buf + i * 4, 3) != 3)
{
close (fd);
g_free (name);
g_free (plain_brush);
g_free (brush_buf);
return -1;
}
brush_buf[i * 4 + 3] = plain_brush[i];
}
g_free (plain_brush);
}
}
}
break;
case 2:
{
guint16 *buf = (guint16 *) brush_buf;
gint i;
for (i = 0; i < bh.width * bh.height; i++, buf++)
{
union
{
guint16 u[2];
gfloat f;
} short_float;
#if G_BYTE_ORDER == G_LITTLE_ENDIAN
short_float.u[0] = 0;
short_float.u[1] = GUINT16_FROM_BE (*buf);
#else
short_float.u[0] = GUINT16_FROM_BE (*buf);
short_float.u[1] = 0;
#endif
brush_buf[i] = (guchar) (short_float.f * 255.0 + 0.5);
}
bh.bytes = 1;
}
break;
default:
break;
}
/*
* Create a new image of the proper size and
* associate the filename with it.
*/
switch (bh.bytes)
{
case 1:
base_type = GIMP_GRAY;
image_type = GIMP_GRAY_IMAGE;
break;
case 4:
base_type = GIMP_RGB;
image_type = GIMP_RGBA_IMAGE;
break;
default:
g_message ("Unsupported brush depth: %d\n"
"GIMP Brushes must be GRAY or RGBA\n",
bh.bytes);
g_free (name);
return -1;
}
image_ID = gimp_image_new (bh.width, bh.height, base_type);
gimp_image_set_filename (image_ID, filename);
parasite = gimp_parasite_new ("gimp-brush-name",
GIMP_PARASITE_PERSISTENT,
strlen (name) + 1, name);
gimp_image_attach_parasite (image_ID, parasite);
gimp_parasite_free (parasite);
layer_ID = gimp_layer_new (image_ID, name, bh.width, bh.height,
image_type, 100, GIMP_NORMAL_MODE);
gimp_image_insert_layer (image_ID, layer_ID, -1, 0);
g_free (name);
drawable = gimp_drawable_get (layer_ID);
gimp_pixel_rgn_init (&pixel_rgn, drawable,
0, 0, drawable->width, drawable->height,
TRUE, FALSE);
gimp_pixel_rgn_set_rect (&pixel_rgn, brush_buf,
0, 0, bh.width, bh.height);
g_free (brush_buf);
if (image_type == GIMP_GRAY_IMAGE)
gimp_invert (layer_ID);
close (fd);
gimp_drawable_flush (drawable);
gimp_progress_update (1.0);
return image_ID;
}
| 1
|
195,629
|
Status GetDeviceForInput(const EagerOperation& op, const EagerContext& ctx,
TensorHandle* tensor_handle, Device** result) {
Device* cpu_device = ctx.HostCPU();
string device_name;
if (tensor_handle->Type() != TensorHandle::LOCAL) {
Device* device = tensor_handle->device();
device_name = device != nullptr ? device->name() : cpu_device->name();
*result = (device == nullptr ? cpu_device : device);
} else if (tensor_handle->dtype == DT_RESOURCE) {
// Use the resource's actual device because it is the device that will
// influence partitioning the multi-device function.
const Tensor* tensor;
// TODO(fishx): Avoid blocking here.
TF_RETURN_IF_ERROR(tensor_handle->Tensor(&tensor));
const ResourceHandle& handle = tensor->flat<ResourceHandle>()(0);
device_name = handle.device();
Device* input_device;
TF_RETURN_IF_ERROR(
ctx.FindDeviceFromName(device_name.c_str(), &input_device));
*result = input_device;
} else {
Device* device = tensor_handle->device();
const bool is_tpu = device != nullptr && device->device_type() == "TPU";
// int32 return values can be placed on TPUs.
const bool use_host_memory =
is_tpu ? MTypeFromDTypeIntsOnDevice(tensor_handle->dtype)
: MTypeFromDType(tensor_handle->dtype);
if (use_host_memory) {
*result = cpu_device;
} else {
// Eager ops executing as functions should have their preferred inputs set
// to the op's device. This allows us to avoid expensive D2H copies if a
// mirror of the tensor already exists on the op's device.
if (!op.is_function() && device != nullptr && device != cpu_device) {
device = absl::get<Device*>(op.Device());
}
*result = (device == nullptr ? cpu_device : device);
}
}
return Status::OK();
}
| 1
|
326,088
|
regoptail(char_u *p, char_u *val)
{
// When op is neither BRANCH nor BRACE_COMPLEX0-9, it is "operandless"
if (p == NULL || p == JUST_CALC_SIZE
|| (OP(p) != BRANCH
&& (OP(p) < BRACE_COMPLEX || OP(p) > BRACE_COMPLEX + 9)))
return;
regtail(OPERAND(p), val);
}
| 0
|
359,245
|
DEFUN (clear_ip_bgp_all_vpnv4_soft_out,
clear_ip_bgp_all_vpnv4_soft_out_cmd,
"clear ip bgp * vpnv4 unicast soft out",
CLEAR_STR
IP_STR
BGP_STR
"Clear all peers\n"
"Address family\n"
"Address Family Modifier\n"
"Soft reconfig\n"
"Soft reconfig outbound update\n")
{
return bgp_clear_vty (vty, NULL, AFI_IP, SAFI_MPLS_VPN, clear_all,
BGP_CLEAR_SOFT_OUT, NULL);
}
| 0
|
413,353
|
PHP_FUNCTION(snmp_set_valueretrieval)
{
long method;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "l", &method) == FAILURE) {
RETURN_FALSE;
}
if (method >= 0 && method <= (SNMP_VALUE_LIBRARY|SNMP_VALUE_PLAIN|SNMP_VALUE_OBJECT)) {
SNMP_G(valueretrieval) = method;
RETURN_TRUE;
} else {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Unknown SNMP value retrieval method '%ld'", method);
RETURN_FALSE;
}
}
| 0
|
139,239
|
explicit OverlayWindowFrameView(views::Widget* widget) : widget_(widget) {}
| 0
|
446,098
|
static void atusb_work_urbs(struct work_struct *work)
{
struct atusb *atusb =
container_of(to_delayed_work(work), struct atusb, work);
struct usb_device *usb_dev = atusb->usb_dev;
struct urb *urb;
int ret;
if (atusb->shutdown)
return;
do {
urb = usb_get_from_anchor(&atusb->idle_urbs);
if (!urb)
return;
ret = atusb_submit_rx_urb(atusb, urb);
} while (!ret);
usb_anchor_urb(urb, &atusb->idle_urbs);
dev_warn_ratelimited(&usb_dev->dev,
"atusb_in: can't allocate/submit URB (%d)\n", ret);
schedule_delayed_work(&atusb->work,
msecs_to_jiffies(ATUSB_ALLOC_DELAY_MS) + 1);
}
| 0
|
436,081
|
static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_async_msghdr iomsg, *kmsg;
struct socket *sock;
struct io_buffer *kbuf;
unsigned flags;
int min_ret = 0;
int ret, cflags = 0;
bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
sock = sock_from_file(req->file);
if (unlikely(!sock))
return -ENOTSOCK;
kmsg = req->async_data;
if (!kmsg) {
ret = io_recvmsg_copy_hdr(req, &iomsg);
if (ret)
return ret;
kmsg = &iomsg;
}
if (req->flags & REQ_F_BUFFER_SELECT) {
kbuf = io_recv_buffer_select(req, !force_nonblock);
if (IS_ERR(kbuf))
return PTR_ERR(kbuf);
kmsg->fast_iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
kmsg->fast_iov[0].iov_len = req->sr_msg.len;
iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->fast_iov,
1, req->sr_msg.len);
}
flags = req->sr_msg.msg_flags;
if (force_nonblock)
flags |= MSG_DONTWAIT;
if (flags & MSG_WAITALL)
min_ret = iov_iter_count(&kmsg->msg.msg_iter);
ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.umsg,
kmsg->uaddr, flags);
if (force_nonblock && ret == -EAGAIN)
return io_setup_async_msg(req, kmsg);
if (ret == -ERESTARTSYS)
ret = -EINTR;
if (req->flags & REQ_F_BUFFER_SELECTED)
cflags = io_put_recv_kbuf(req);
/* fast path, check for non-NULL to avoid function call */
if (kmsg->free_iov)
kfree(kmsg->free_iov);
req->flags &= ~REQ_F_NEED_CLEANUP;
if (ret < min_ret || ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))))
req_set_fail(req);
__io_req_complete(req, issue_flags, ret, cflags);
return 0;
}
| 0
|
387,565
|
int snd_ctl_create(struct snd_card *card)
{
static const struct snd_device_ops ops = {
.dev_free = snd_ctl_dev_free,
.dev_register = snd_ctl_dev_register,
.dev_disconnect = snd_ctl_dev_disconnect,
};
int err;
if (snd_BUG_ON(!card))
return -ENXIO;
if (snd_BUG_ON(card->number < 0 || card->number >= SNDRV_CARDS))
return -ENXIO;
snd_device_initialize(&card->ctl_dev, card);
dev_set_name(&card->ctl_dev, "controlC%d", card->number);
err = snd_device_new(card, SNDRV_DEV_CONTROL, card, &ops);
if (err < 0)
put_device(&card->ctl_dev);
return err;
}
| 0
|
270,116
|
TfLiteStatus PopulateConvolutionQuantizationParams(
TfLiteContext* context, const TfLiteTensor* input,
const TfLiteTensor* filter, const TfLiteTensor* bias, TfLiteTensor* output,
const TfLiteFusedActivation& activation, int32_t* multiplier, int* shift,
int32_t* output_activation_min, int32_t* output_activation_max,
int32_t* per_channel_multiplier, int32_t* per_channel_shift,
int num_channels) {
TF_LITE_ENSURE_EQ(context, input->quantization.type,
kTfLiteAffineQuantization);
TF_LITE_ENSURE_EQ(context, filter->quantization.type,
kTfLiteAffineQuantization);
// TODO(jianlijianli): Enable bias type check and bias scale == input scale
// * filter scale for each channel in affine quantization once bias
// quantization is properly populated.
// TF_LITE_ENSURE_EQ(context, bias->quantization.type,
// kTfLiteAffineQuantization);
// Check data type.
const auto* affine_quantization =
reinterpret_cast<TfLiteAffineQuantization*>(filter->quantization.params);
TF_LITE_ENSURE(context, affine_quantization);
TF_LITE_ENSURE(context, affine_quantization->scale);
const bool is_per_channel = affine_quantization->scale->size > 1;
if (is_per_channel) {
// Currently only Int8/Int16 is supported for per channel quantization.
TF_LITE_ENSURE(context,
input->type == kTfLiteInt8 || input->type == kTfLiteInt16);
TF_LITE_ENSURE_EQ(context, filter->type, kTfLiteInt8);
TF_LITE_ENSURE_EQ(context, affine_quantization->scale->size, num_channels);
TF_LITE_ENSURE_EQ(
context, num_channels,
filter->dims->data[affine_quantization->quantized_dimension]);
}
// Populate multiplier and shift using affine quantization.
const float input_scale = input->params.scale;
const float output_scale = output->params.scale;
const float* filter_scales = affine_quantization->scale->data;
for (int i = 0; i < num_channels; ++i) {
// If per-tensor quantization parameter is specified, broadcast it along the
// quantization dimension (channels_out).
const float scale = is_per_channel ? filter_scales[i] : filter_scales[0];
const double filter_scale = static_cast<double>(scale);
const double effective_output_scale = static_cast<double>(input_scale) *
filter_scale /
static_cast<double>(output_scale);
int32_t significand;
int channel_shift;
QuantizeMultiplier(effective_output_scale, &significand, &channel_shift);
per_channel_multiplier[i] = significand;
per_channel_shift[i] = channel_shift;
}
// Populate scalar quantization parameters.
// This check on legacy quantization parameters is kept only for backward
// compatibility.
if (input->type == kTfLiteUInt8) {
// Check bias scale == input scale * filter scale.
double real_multiplier = 0.0;
TF_LITE_ENSURE_STATUS(GetQuantizedConvolutionMultipler(
context, input, filter, bias, output, &real_multiplier));
int exponent;
// Populate quantization parameters with multiplier and shift.
QuantizeMultiplier(real_multiplier, multiplier, &exponent);
*shift = -exponent;
}
if (input->type == kTfLiteInt8 || input->type == kTfLiteUInt8 ||
input->type == kTfLiteInt16) {
TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized(
context, activation, output, output_activation_min,
output_activation_max));
}
return kTfLiteOk;
}
| 0
|
336,637
|
static RedClient *reds_get_client(RedsState *reds)
{
spice_assert(reds->clients.size() <= 1);
if (reds->clients.empty()) {
return NULL;
}
return *reds->clients.begin();
}
| 0
|
213,513
|
static inline void ConvertLuvToXYZ(const double L,const double u,const double v,
double *X,double *Y,double *Z)
{
assert(X != (double *) NULL);
assert(Y != (double *) NULL);
assert(Z != (double *) NULL);
if (L > (CIEK*CIEEpsilon))
*Y=(double) pow((L+16.0)/116.0,3.0);
else
*Y=L/CIEK;
*X=((*Y*((39.0*L/(v+13.0*L*(9.0*D65Y/(D65X+15.0*D65Y+3.0*D65Z))))-5.0))+
5.0*(*Y))/((((52.0*L/(u+13.0*L*(4.0*D65X/(D65X+15.0*D65Y+3.0*D65Z))))-1.0)/
3.0)-(-1.0/3.0));
*Z=(*X*(((52.0*L/(u+13.0*L*(4.0*D65X/(D65X+15.0*D65Y+3.0*D65Z))))-1.0)/3.0))-
5.0*(*Y);
}
| 1
|
508,875
|
void st_select_lex::print_order(String *str,
ORDER *order,
enum_query_type query_type)
{
for (; order; order= order->next)
{
if (order->counter_used)
{
char buffer[20];
size_t length= my_snprintf(buffer, 20, "%d", order->counter);
str->append(buffer, (uint) length);
}
else
{
/* replace numeric reference with equivalent for ORDER constant */
if (order->item[0]->type() == Item::INT_ITEM &&
order->item[0]->basic_const_item())
{
/* make it expression instead of integer constant */
str->append(STRING_WITH_LEN("''"));
}
else
(*order->item)->print(str, query_type);
}
if (order->direction == ORDER::ORDER_DESC)
str->append(STRING_WITH_LEN(" desc"));
if (order->next)
str->append(',');
}
}
| 0
|
207,990
|
static int get_recurse_data_length(compiler_common *common, PCRE2_SPTR cc, PCRE2_SPTR ccend,
BOOL *needs_control_head, BOOL *has_quit, BOOL *has_accept)
{
int length = 1;
int size;
PCRE2_SPTR alternative;
BOOL quit_found = FALSE;
BOOL accept_found = FALSE;
BOOL setsom_found = FALSE;
BOOL setmark_found = FALSE;
BOOL capture_last_found = FALSE;
BOOL control_head_found = FALSE;
#if defined DEBUG_FORCE_CONTROL_HEAD && DEBUG_FORCE_CONTROL_HEAD
SLJIT_ASSERT(common->control_head_ptr != 0);
control_head_found = TRUE;
#endif
/* Calculate the sum of the private machine words. */
while (cc < ccend)
{
size = 0;
switch(*cc)
{
case OP_SET_SOM:
SLJIT_ASSERT(common->has_set_som);
setsom_found = TRUE;
cc += 1;
break;
case OP_RECURSE:
if (common->has_set_som)
setsom_found = TRUE;
if (common->mark_ptr != 0)
setmark_found = TRUE;
if (common->capture_last_ptr != 0)
capture_last_found = TRUE;
cc += 1 + LINK_SIZE;
break;
case OP_KET:
if (PRIVATE_DATA(cc) != 0)
{
length++;
SLJIT_ASSERT(PRIVATE_DATA(cc + 1) != 0);
cc += PRIVATE_DATA(cc + 1);
}
cc += 1 + LINK_SIZE;
break;
case OP_ASSERT:
case OP_ASSERT_NOT:
case OP_ASSERTBACK:
case OP_ASSERTBACK_NOT:
case OP_ASSERT_NA:
case OP_ASSERTBACK_NA:
case OP_ONCE:
case OP_SCRIPT_RUN:
case OP_BRAPOS:
case OP_SBRA:
case OP_SBRAPOS:
case OP_SCOND:
length++;
SLJIT_ASSERT(PRIVATE_DATA(cc) != 0);
cc += 1 + LINK_SIZE;
break;
case OP_CBRA:
case OP_SCBRA:
length += 2;
if (common->capture_last_ptr != 0)
capture_last_found = TRUE;
if (common->optimized_cbracket[GET2(cc, 1 + LINK_SIZE)] == 0)
length++;
cc += 1 + LINK_SIZE + IMM2_SIZE;
break;
case OP_CBRAPOS:
case OP_SCBRAPOS:
length += 2 + 2;
if (common->capture_last_ptr != 0)
capture_last_found = TRUE;
cc += 1 + LINK_SIZE + IMM2_SIZE;
break;
case OP_COND:
/* Might be a hidden SCOND. */
alternative = cc + GET(cc, 1);
if (*alternative == OP_KETRMAX || *alternative == OP_KETRMIN)
length++;
cc += 1 + LINK_SIZE;
break;
CASE_ITERATOR_PRIVATE_DATA_1
if (PRIVATE_DATA(cc) != 0)
length++;
cc += 2;
#ifdef SUPPORT_UNICODE
if (common->utf && HAS_EXTRALEN(cc[-1])) cc += GET_EXTRALEN(cc[-1]);
#endif
break;
CASE_ITERATOR_PRIVATE_DATA_2A
if (PRIVATE_DATA(cc) != 0)
length += 2;
cc += 2;
#ifdef SUPPORT_UNICODE
if (common->utf && HAS_EXTRALEN(cc[-1])) cc += GET_EXTRALEN(cc[-1]);
#endif
break;
CASE_ITERATOR_PRIVATE_DATA_2B
if (PRIVATE_DATA(cc) != 0)
length += 2;
cc += 2 + IMM2_SIZE;
#ifdef SUPPORT_UNICODE
if (common->utf && HAS_EXTRALEN(cc[-1])) cc += GET_EXTRALEN(cc[-1]);
#endif
break;
CASE_ITERATOR_TYPE_PRIVATE_DATA_1
if (PRIVATE_DATA(cc) != 0)
length++;
cc += 1;
break;
CASE_ITERATOR_TYPE_PRIVATE_DATA_2A
if (PRIVATE_DATA(cc) != 0)
length += 2;
cc += 1;
break;
CASE_ITERATOR_TYPE_PRIVATE_DATA_2B
if (PRIVATE_DATA(cc) != 0)
length += 2;
cc += 1 + IMM2_SIZE;
break;
case OP_CLASS:
case OP_NCLASS:
#if defined SUPPORT_UNICODE || PCRE2_CODE_UNIT_WIDTH != 8
case OP_XCLASS:
size = (*cc == OP_XCLASS) ? GET(cc, 1) : 1 + 32 / (int)sizeof(PCRE2_UCHAR);
#else
size = 1 + 32 / (int)sizeof(PCRE2_UCHAR);
#endif
if (PRIVATE_DATA(cc) != 0)
length += get_class_iterator_size(cc + size);
cc += size;
break;
case OP_MARK:
case OP_COMMIT_ARG:
case OP_PRUNE_ARG:
case OP_THEN_ARG:
SLJIT_ASSERT(common->mark_ptr != 0);
if (!setmark_found)
setmark_found = TRUE;
if (common->control_head_ptr != 0)
control_head_found = TRUE;
if (*cc != OP_MARK)
quit_found = TRUE;
cc += 1 + 2 + cc[1];
break;
case OP_PRUNE:
case OP_SKIP:
case OP_COMMIT:
quit_found = TRUE;
cc++;
break;
case OP_SKIP_ARG:
quit_found = TRUE;
cc += 1 + 2 + cc[1];
break;
case OP_THEN:
SLJIT_ASSERT(common->control_head_ptr != 0);
quit_found = TRUE;
if (!control_head_found)
control_head_found = TRUE;
cc++;
break;
case OP_ACCEPT:
case OP_ASSERT_ACCEPT:
accept_found = TRUE;
cc++;
break;
default:
cc = next_opcode(common, cc);
SLJIT_ASSERT(cc != NULL);
break;
}
}
SLJIT_ASSERT(cc == ccend);
if (control_head_found)
length++;
if (capture_last_found)
length++;
if (quit_found)
{
if (setsom_found)
length++;
if (setmark_found)
length++;
}
*needs_control_head = control_head_found;
*has_quit = quit_found;
*has_accept = accept_found;
return length;
}
| 1
|
226,276
|
GF_Err payt_box_write(GF_Box *s, GF_BitStream *bs)
{
u32 len;
GF_Err e;
GF_PAYTBox *ptr = (GF_PAYTBox *)s;
if (ptr == NULL) return GF_BAD_PARAM;
e = gf_isom_box_write_header(s, bs);
if (e) return e;
gf_bs_write_u32(bs, ptr->payloadCode);
len = ptr->payloadString ? (u32) strlen(ptr->payloadString) : 0;
gf_bs_write_u8(bs, len);
if (len) gf_bs_write_data(bs, ptr->payloadString, len);
return GF_OK;
}
| 0
|
481,265
|
static inline int mlx5_fpga_conn_rts_qp(struct mlx5_fpga_conn *conn)
{
struct mlx5_fpga_device *fdev = conn->fdev;
struct mlx5_core_dev *mdev = fdev->mdev;
u32 *qpc = NULL;
u32 opt_mask;
int err;
mlx5_fpga_dbg(conn->fdev, "QP RTS\n");
qpc = kzalloc(MLX5_ST_SZ_BYTES(qpc), GFP_KERNEL);
if (!qpc) {
err = -ENOMEM;
goto out;
}
MLX5_SET(qpc, qpc, log_ack_req_freq, 8);
MLX5_SET(qpc, qpc, min_rnr_nak, 0x12);
MLX5_SET(qpc, qpc, primary_address_path.ack_timeout, 0x12); /* ~1.07s */
MLX5_SET(qpc, qpc, next_send_psn,
MLX5_GET(fpga_qpc, conn->fpga_qpc, next_rcv_psn));
MLX5_SET(qpc, qpc, retry_count, 7);
MLX5_SET(qpc, qpc, rnr_retry, 7); /* Infinite retry if RNR NACK */
opt_mask = MLX5_QP_OPTPAR_RNR_TIMEOUT;
err = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RTR2RTS_QP, opt_mask, qpc,
&conn->qp.mqp);
if (err) {
mlx5_fpga_warn(fdev, "qp_modify RST2INIT failed: %d\n", err);
goto out;
}
out:
kfree(qpc);
return err;
}
| 0
|
364,771
|
findtags_add_match(
findtags_state_T *st,
tagptrs_T *tagpp,
findtags_match_args_T *margs,
char_u *buf_ffname,
hash_T *hash)
{
#ifdef FEAT_CSCOPE
int use_cscope = (st->flags & TAG_CSCOPE);
#endif
int name_only = (st->flags & TAG_NAMES);
int mtt;
int len = 0;
int is_current; // file name matches
int is_static; // current tag line is static
char_u *mfp;
char_u *p;
char_u *s;
#ifdef FEAT_CSCOPE
if (use_cscope)
{
// Don't change the ordering, always use the same table.
mtt = MT_GL_OTH;
}
else
#endif
{
// Decide in which array to store this match.
is_current = test_for_current(
#ifdef FEAT_EMACS_TAGS
st->is_etag,
#endif
tagpp->fname, tagpp->fname_end, st->tag_fname, buf_ffname);
#ifdef FEAT_EMACS_TAGS
is_static = FALSE;
if (!st->is_etag) // emacs tags are never static
#endif
is_static = test_for_static(tagpp);
// decide in which of the sixteen tables to store this
// match
if (is_static)
{
if (is_current)
mtt = MT_ST_CUR;
else
mtt = MT_ST_OTH;
}
else
{
if (is_current)
mtt = MT_GL_CUR;
else
mtt = MT_GL_OTH;
}
if (st->orgpat->regmatch.rm_ic && !margs->match_no_ic)
mtt += MT_IC_OFF;
if (margs->match_re)
mtt += MT_RE_OFF;
}
// Add the found match in ht_match[mtt] and ga_match[mtt].
// Store the info we need later, which depends on the kind of
// tags we are dealing with.
if (st->help_only)
{
#ifdef FEAT_MULTI_LANG
# define ML_EXTRA 3
#else
# define ML_EXTRA 0
#endif
// Append the help-heuristic number after the tagname, for
// sorting it later. The heuristic is ignored for
// detecting duplicates.
// The format is {tagname}@{lang}NUL{heuristic}NUL
*tagpp->tagname_end = NUL;
len = (int)(tagpp->tagname_end - tagpp->tagname);
mfp = alloc(sizeof(char_u) + len + 10 + ML_EXTRA + 1);
if (mfp != NULL)
{
int heuristic;
p = mfp;
STRCPY(p, tagpp->tagname);
#ifdef FEAT_MULTI_LANG
p[len] = '@';
STRCPY(p + len + 1, st->help_lang);
#endif
heuristic = help_heuristic(tagpp->tagname,
margs->match_re ? margs->matchoff : 0,
!margs->match_no_ic);
#ifdef FEAT_MULTI_LANG
heuristic += st->help_pri;
#endif
sprintf((char *)p + len + 1 + ML_EXTRA, "%06d",
heuristic);
}
*tagpp->tagname_end = TAB;
}
else if (name_only)
{
if (st->get_searchpat)
{
char_u *temp_end = tagpp->command;
if (*temp_end == '/')
while (*temp_end && *temp_end != '\r'
&& *temp_end != '\n'
&& *temp_end != '$')
temp_end++;
if (tagpp->command + 2 < temp_end)
{
len = (int)(temp_end - tagpp->command - 2);
mfp = alloc(len + 2);
if (mfp != NULL)
vim_strncpy(mfp, tagpp->command + 2, len);
}
else
mfp = NULL;
st->get_searchpat = FALSE;
}
else
{
len = (int)(tagpp->tagname_end - tagpp->tagname);
mfp = alloc(sizeof(char_u) + len + 1);
if (mfp != NULL)
vim_strncpy(mfp, tagpp->tagname, len);
// if wanted, re-read line to get long form too
if (State & MODE_INSERT)
st->get_searchpat = p_sft;
}
}
else
{
size_t tag_fname_len = STRLEN(st->tag_fname);
#ifdef FEAT_EMACS_TAGS
size_t ebuf_len = 0;
#endif
// Save the tag in a buffer.
// Use 0x02 to separate fields (Can't use NUL because the
// hash key is terminated by NUL, or Ctrl_A because that is
// part of some Emacs tag files -- see parse_tag_line).
// Emacs tag: <mtt><tag_fname><0x02><ebuf><0x02><lbuf><NUL>
// other tag: <mtt><tag_fname><0x02><0x02><lbuf><NUL>
// without Emacs tags: <mtt><tag_fname><0x02><lbuf><NUL>
// Here <mtt> is the "mtt" value plus 1 to avoid NUL.
len = (int)tag_fname_len + (int)STRLEN(st->lbuf) + 3;
#ifdef FEAT_EMACS_TAGS
if (st->is_etag)
{
ebuf_len = STRLEN(st->ebuf);
len += (int)ebuf_len + 1;
}
else
++len;
#endif
mfp = alloc(sizeof(char_u) + len + 1);
if (mfp != NULL)
{
p = mfp;
p[0] = mtt + 1;
STRCPY(p + 1, st->tag_fname);
#ifdef BACKSLASH_IN_FILENAME
// Ignore differences in slashes, avoid adding
// both path/file and path\file.
slash_adjust(p + 1);
#endif
p[tag_fname_len + 1] = TAG_SEP;
s = p + 1 + tag_fname_len + 1;
#ifdef FEAT_EMACS_TAGS
if (st->is_etag)
{
STRCPY(s, st->ebuf);
s[ebuf_len] = TAG_SEP;
s += ebuf_len + 1;
}
else
*s++ = TAG_SEP;
#endif
STRCPY(s, st->lbuf);
}
}
if (mfp != NULL)
{
hashitem_T *hi;
// Don't add identical matches.
// Add all cscope tags, because they are all listed.
// "mfp" is used as a hash key, there is a NUL byte to end
// the part that matters for comparing, more bytes may
// follow after it. E.g. help tags store the priority
// after the NUL.
#ifdef FEAT_CSCOPE
if (use_cscope)
++*hash;
else
#endif
*hash = hash_hash(mfp);
hi = hash_lookup(&st->ht_match[mtt], mfp, *hash);
if (HASHITEM_EMPTY(hi))
{
if (hash_add_item(&st->ht_match[mtt], hi, mfp, *hash) == FAIL
|| ga_grow(&st->ga_match[mtt], 1) != OK)
{
// Out of memory! Just forget about the rest.
st->stop_searching = TRUE;
return FAIL;
}
((char_u **)(st->ga_match[mtt].ga_data))
[st->ga_match[mtt].ga_len++] = mfp;
st->match_count++;
}
else
// duplicate tag, drop it
vim_free(mfp);
}
return OK;
}
| 0
|
241,312
|
mrb_obj_new(mrb_state *mrb, struct RClass *c, mrb_int argc, const mrb_value *argv)
{
mrb_value obj;
mrb_sym mid;
obj = mrb_instance_alloc(mrb, mrb_obj_value(c));
mid = MRB_SYM(initialize);
if (!mrb_func_basic_p(mrb, obj, mid, mrb_do_nothing)) {
mrb_funcall_argv(mrb, obj, mid, argc, argv);
}
return obj;
}
| 0
|
313,824
|
nv_search(cmdarg_T *cap)
{
oparg_T *oap = cap->oap;
pos_T save_cursor = curwin->w_cursor;
if (cap->cmdchar == '?' && cap->oap->op_type == OP_ROT13)
{
// Translate "g??" to "g?g?"
cap->cmdchar = 'g';
cap->nchar = '?';
nv_operator(cap);
return;
}
// When using 'incsearch' the cursor may be moved to set a different search
// start position.
cap->searchbuf = getcmdline(cap->cmdchar, cap->count1, 0, 0);
if (cap->searchbuf == NULL)
{
clearop(oap);
return;
}
(void)normal_search(cap, cap->cmdchar, cap->searchbuf,
(cap->arg || !EQUAL_POS(save_cursor, curwin->w_cursor))
? 0 : SEARCH_MARK, NULL);
}
| 0
|
337,812
|
int sctp_init_cause(struct sctp_chunk *chunk, __be16 cause_code,
size_t paylen)
{
struct sctp_errhdr err;
__u16 len;
/* Cause code constants are now defined in network order. */
err.cause = cause_code;
len = sizeof(err) + paylen;
err.length = htons(len);
if (skb_tailroom(chunk->skb) < len)
return -ENOSPC;
chunk->subh.err_hdr = sctp_addto_chunk(chunk, sizeof(err), &err);
return 0;
}
| 0
|
345,216
|
unsigned short *set_translate(int m, struct vc_data *vc)
{
inv_translate[vc->vc_num] = m;
return translations[m];
}
| 0
|
238,396
|
njs_function_rest_parameters_init(njs_vm_t *vm, njs_native_frame_t *frame)
{
uint32_t length;
njs_uint_t nargs, n, i;
njs_array_t *array;
njs_value_t *rest_arguments;
nargs = frame->nargs;
n = frame->function->u.lambda->nargs;
length = (nargs >= n) ? (nargs - n + 1) : 0;
array = njs_array_alloc(vm, 1, length, 0);
if (njs_slow_path(array == NULL)) {
return NJS_ERROR;
}
for (i = 0; i < length; i++) {
array->start[i] = frame->arguments[i + n - 1];
}
rest_arguments = njs_mp_alloc(vm->mem_pool, sizeof(njs_value_t));
if (njs_slow_path(rest_arguments == NULL)) {
return NJS_ERROR;
}
/* GC: retain. */
njs_set_array(rest_arguments, array);
vm->top_frame->local[n] = rest_arguments;
return NJS_OK;
}
| 0
|
329,902
|
_cairo_image_mask_compositor_get (void)
{
static cairo_atomic_once_t once = CAIRO_ATOMIC_ONCE_INIT;
static cairo_mask_compositor_t compositor;
if (_cairo_atomic_init_once_enter(&once)) {
_cairo_mask_compositor_init (&compositor,
_cairo_image_traps_compositor_get ());
compositor.acquire = acquire;
compositor.release = release;
compositor.set_clip_region = set_clip_region;
compositor.pattern_to_surface = _cairo_image_source_create_for_pattern;
compositor.draw_image_boxes = draw_image_boxes;
compositor.fill_rectangles = fill_rectangles;
compositor.fill_boxes = fill_boxes;
compositor.check_composite = check_composite;
compositor.composite = composite;
//compositor.lerp = lerp;
//compositor.check_composite_boxes = check_composite_boxes;
compositor.composite_boxes = composite_boxes;
compositor.check_composite_glyphs = check_composite_glyphs;
compositor.composite_glyphs = composite_glyphs;
_cairo_atomic_init_once_leave(&once);
}
return &compositor.base;
}
| 0
|
194,996
|
Status GetInitOp(const string& export_dir, const MetaGraphDef& meta_graph_def,
string* init_op_name) {
const auto& sig_def_map = meta_graph_def.signature_def();
const auto& init_op_sig_it =
meta_graph_def.signature_def().find(kSavedModelInitOpSignatureKey);
if (init_op_sig_it != sig_def_map.end()) {
*init_op_name = init_op_sig_it->second.outputs()
.find(kSavedModelInitOpSignatureKey)
->second.name();
return Status::OK();
}
const auto& collection_def_map = meta_graph_def.collection_def();
string init_op_collection_key;
if (collection_def_map.find(kSavedModelMainOpKey) !=
collection_def_map.end()) {
init_op_collection_key = kSavedModelMainOpKey;
} else {
init_op_collection_key = kSavedModelLegacyInitOpKey;
}
const auto init_op_it = collection_def_map.find(init_op_collection_key);
if (init_op_it != collection_def_map.end()) {
if (init_op_it->second.node_list().value_size() != 1) {
return errors::FailedPrecondition(
strings::StrCat("Expected exactly one main op in : ", export_dir));
}
*init_op_name = init_op_it->second.node_list().value(0);
}
return Status::OK();
}
| 1
|
455,308
|
maybe_make_readline_line (new_line)
char *new_line;
{
if (new_line && strcmp (new_line, rl_line_buffer) != 0)
{
rl_point = rl_end;
rl_add_undo (UNDO_BEGIN, 0, 0, 0);
rl_delete_text (0, rl_point);
rl_point = rl_end = rl_mark = 0;
rl_insert_text (new_line);
rl_add_undo (UNDO_END, 0, 0, 0);
}
}
| 0
|
312,537
|
qf_free_fields(qffields_T *pfields)
{
vim_free(pfields->namebuf);
vim_free(pfields->module);
vim_free(pfields->errmsg);
vim_free(pfields->pattern);
}
| 0
|
90,861
|
void QuotaManagerTest::GetUsage_WithModifyTestBody(const StorageType type) {
const MockOriginData data[] = {
{ "http://foo.com/", type, 10 },
{ "http://foo.com:1/", type, 20 },
};
MockStorageClient* client = CreateClient(data, ARRAYSIZE_UNSAFE(data));
RegisterClient(client);
GetUsageAndQuota(GURL("http://foo.com/"), type);
MessageLoop::current()->RunAllPending();
EXPECT_EQ(kQuotaStatusOk, status());
EXPECT_EQ(10 + 20, usage());
client->ModifyOriginAndNotify(GURL("http://foo.com/"), type, 30);
client->ModifyOriginAndNotify(GURL("http://foo.com:1/"), type, -5);
client->AddOriginAndNotify(GURL("https://foo.com/"), type, 1);
GetUsageAndQuota(GURL("http://foo.com/"), type);
MessageLoop::current()->RunAllPending();
EXPECT_EQ(kQuotaStatusOk, status());
EXPECT_EQ(10 + 20 + 30 - 5 + 1, usage());
int foo_usage = usage();
client->AddOriginAndNotify(GURL("http://bar.com/"), type, 40);
GetUsageAndQuota(GURL("http://bar.com/"), type);
MessageLoop::current()->RunAllPending();
EXPECT_EQ(kQuotaStatusOk, status());
EXPECT_EQ(40, usage());
GetGlobalUsage(type);
MessageLoop::current()->RunAllPending();
EXPECT_EQ(foo_usage + 40, usage());
EXPECT_EQ(0, unlimited_usage());
}
| 0
|
244,200
|
GF_Err chnl_box_read(GF_Box *s,GF_BitStream *bs)
{
GF_ChannelLayoutBox *ptr = (GF_ChannelLayoutBox *) s;
ISOM_DECREASE_SIZE(s, 1)
ptr->layout.stream_structure = gf_bs_read_u8(bs);
if (ptr->layout.stream_structure & 1) {
ISOM_DECREASE_SIZE(s, 1)
ptr->layout.definedLayout = gf_bs_read_u8(bs);
if (ptr->layout.definedLayout) {
u32 remain = (u32) ptr->size;
if (ptr->layout.stream_structure & 2) remain--;
ptr->layout.channels_count = 0;
while (remain) {
ISOM_DECREASE_SIZE(s, 1)
ptr->layout.layouts[ptr->layout.channels_count].position = gf_bs_read_u8(bs);
remain--;
if (ptr->layout.layouts[ptr->layout.channels_count].position == 126) {
ISOM_DECREASE_SIZE(s, 3)
ptr->layout.layouts[ptr->layout.channels_count].azimuth = gf_bs_read_int(bs, 16);
ptr->layout.layouts[ptr->layout.channels_count].elevation = gf_bs_read_int(bs, 8);
remain-=3;
}
}
} else {
ISOM_DECREASE_SIZE(s, 8)
ptr->layout.omittedChannelsMap = gf_bs_read_u64(bs);
}
}
if (ptr->layout.stream_structure & 2) {
ISOM_DECREASE_SIZE(s, 1)
ptr->layout.object_count = gf_bs_read_u8(bs);
}
return GF_OK;
}
| 0
|
343,121
|
static int esp_output_tcp_finish(struct xfrm_state *x, struct sk_buff *skb)
{
struct sock *sk;
int err;
rcu_read_lock();
sk = esp_find_tcp_sk(x);
err = PTR_ERR_OR_ZERO(sk);
if (err)
goto out;
bh_lock_sock(sk);
if (sock_owned_by_user(sk))
err = espintcp_queue_out(sk, skb);
else
err = espintcp_push_skb(sk, skb);
bh_unlock_sock(sk);
out:
rcu_read_unlock();
return err;
}
| 0
|
243,987
|
GF_Box *saio_box_new()
{
ISOM_DECL_BOX_ALLOC(GF_SampleAuxiliaryInfoOffsetBox, GF_ISOM_BOX_TYPE_SAIO);
return (GF_Box *)tmp;
}
| 0
|
204,751
|
do_tag(
char_u *tag, // tag (pattern) to jump to
int type,
int count,
int forceit, // :ta with !
int verbose) // print "tag not found" message
{
taggy_T *tagstack = curwin->w_tagstack;
int tagstackidx = curwin->w_tagstackidx;
int tagstacklen = curwin->w_tagstacklen;
int cur_match = 0;
int cur_fnum = curbuf->b_fnum;
int oldtagstackidx = tagstackidx;
int prevtagstackidx = tagstackidx;
int prev_num_matches;
int new_tag = FALSE;
int i;
int ic;
int no_regexp = FALSE;
int error_cur_match = 0;
int save_pos = FALSE;
fmark_T saved_fmark;
#ifdef FEAT_CSCOPE
int jumped_to_tag = FALSE;
#endif
int new_num_matches;
char_u **new_matches;
int use_tagstack;
int skip_msg = FALSE;
char_u *buf_ffname = curbuf->b_ffname; // name to use for
// priority computation
int use_tfu = 1;
// remember the matches for the last used tag
static int num_matches = 0;
static int max_num_matches = 0; // limit used for match search
static char_u **matches = NULL;
static int flags;
#ifdef FEAT_EVAL
if (tfu_in_use)
{
emsg(_(e_cannot_modify_tag_stack_within_tagfunc));
return FALSE;
}
#endif
#ifdef EXITFREE
if (type == DT_FREE)
{
// remove the list of matches
FreeWild(num_matches, matches);
# ifdef FEAT_CSCOPE
cs_free_tags();
# endif
num_matches = 0;
return FALSE;
}
#endif
if (type == DT_HELP)
{
type = DT_TAG;
no_regexp = TRUE;
use_tfu = 0;
}
prev_num_matches = num_matches;
free_string_option(nofile_fname);
nofile_fname = NULL;
CLEAR_POS(&saved_fmark.mark); // shutup gcc 4.0
saved_fmark.fnum = 0;
/*
* Don't add a tag to the tagstack if 'tagstack' has been reset.
*/
if ((!p_tgst && *tag != NUL))
{
use_tagstack = FALSE;
new_tag = TRUE;
#if defined(FEAT_QUICKFIX)
if (g_do_tagpreview != 0)
{
tagstack_clear_entry(&ptag_entry);
if ((ptag_entry.tagname = vim_strsave(tag)) == NULL)
goto end_do_tag;
}
#endif
}
else
{
#if defined(FEAT_QUICKFIX)
if (g_do_tagpreview != 0)
use_tagstack = FALSE;
else
#endif
use_tagstack = TRUE;
// new pattern, add to the tag stack
if (*tag != NUL
&& (type == DT_TAG || type == DT_SELECT || type == DT_JUMP
#ifdef FEAT_QUICKFIX
|| type == DT_LTAG
#endif
#ifdef FEAT_CSCOPE
|| type == DT_CSCOPE
#endif
))
{
#if defined(FEAT_QUICKFIX)
if (g_do_tagpreview != 0)
{
if (ptag_entry.tagname != NULL
&& STRCMP(ptag_entry.tagname, tag) == 0)
{
// Jumping to same tag: keep the current match, so that
// the CursorHold autocommand example works.
cur_match = ptag_entry.cur_match;
cur_fnum = ptag_entry.cur_fnum;
}
else
{
tagstack_clear_entry(&ptag_entry);
if ((ptag_entry.tagname = vim_strsave(tag)) == NULL)
goto end_do_tag;
}
}
else
#endif
{
/*
* If the last used entry is not at the top, delete all tag
* stack entries above it.
*/
while (tagstackidx < tagstacklen)
tagstack_clear_entry(&tagstack[--tagstacklen]);
// if the tagstack is full: remove oldest entry
if (++tagstacklen > TAGSTACKSIZE)
{
tagstacklen = TAGSTACKSIZE;
tagstack_clear_entry(&tagstack[0]);
for (i = 1; i < tagstacklen; ++i)
tagstack[i - 1] = tagstack[i];
--tagstackidx;
}
/*
* put the tag name in the tag stack
*/
if ((tagstack[tagstackidx].tagname = vim_strsave(tag)) == NULL)
{
curwin->w_tagstacklen = tagstacklen - 1;
goto end_do_tag;
}
curwin->w_tagstacklen = tagstacklen;
save_pos = TRUE; // save the cursor position below
}
new_tag = TRUE;
}
else
{
if (
#if defined(FEAT_QUICKFIX)
g_do_tagpreview != 0 ? ptag_entry.tagname == NULL :
#endif
tagstacklen == 0)
{
// empty stack
emsg(_(e_tag_stack_empty));
goto end_do_tag;
}
if (type == DT_POP) // go to older position
{
#ifdef FEAT_FOLDING
int old_KeyTyped = KeyTyped;
#endif
if ((tagstackidx -= count) < 0)
{
emsg(_(e_at_bottom_of_tag_stack));
if (tagstackidx + count == 0)
{
// We did [num]^T from the bottom of the stack
tagstackidx = 0;
goto end_do_tag;
}
// We weren't at the bottom of the stack, so jump all the
// way to the bottom now.
tagstackidx = 0;
}
else if (tagstackidx >= tagstacklen) // count == 0?
{
emsg(_(e_at_top_of_tag_stack));
goto end_do_tag;
}
// Make a copy of the fmark, autocommands may invalidate the
// tagstack before it's used.
saved_fmark = tagstack[tagstackidx].fmark;
if (saved_fmark.fnum != curbuf->b_fnum)
{
/*
* Jump to other file. If this fails (e.g. because the
* file was changed) keep original position in tag stack.
*/
if (buflist_getfile(saved_fmark.fnum, saved_fmark.mark.lnum,
GETF_SETMARK, forceit) == FAIL)
{
tagstackidx = oldtagstackidx; // back to old posn
goto end_do_tag;
}
// An BufReadPost autocommand may jump to the '" mark, but
// we don't what that here.
curwin->w_cursor.lnum = saved_fmark.mark.lnum;
}
else
{
setpcmark();
curwin->w_cursor.lnum = saved_fmark.mark.lnum;
}
curwin->w_cursor.col = saved_fmark.mark.col;
curwin->w_set_curswant = TRUE;
check_cursor();
#ifdef FEAT_FOLDING
if ((fdo_flags & FDO_TAG) && old_KeyTyped)
foldOpenCursor();
#endif
// remove the old list of matches
FreeWild(num_matches, matches);
#ifdef FEAT_CSCOPE
cs_free_tags();
#endif
num_matches = 0;
tag_freematch();
goto end_do_tag;
}
if (type == DT_TAG
#if defined(FEAT_QUICKFIX)
|| type == DT_LTAG
#endif
)
{
#if defined(FEAT_QUICKFIX)
if (g_do_tagpreview != 0)
{
cur_match = ptag_entry.cur_match;
cur_fnum = ptag_entry.cur_fnum;
}
else
#endif
{
// ":tag" (no argument): go to newer pattern
save_pos = TRUE; // save the cursor position below
if ((tagstackidx += count - 1) >= tagstacklen)
{
/*
* Beyond the last one, just give an error message and
* go to the last one. Don't store the cursor
* position.
*/
tagstackidx = tagstacklen - 1;
emsg(_(e_at_top_of_tag_stack));
save_pos = FALSE;
}
else if (tagstackidx < 0) // must have been count == 0
{
emsg(_(e_at_bottom_of_tag_stack));
tagstackidx = 0;
goto end_do_tag;
}
cur_match = tagstack[tagstackidx].cur_match;
cur_fnum = tagstack[tagstackidx].cur_fnum;
}
new_tag = TRUE;
}
else // go to other matching tag
{
// Save index for when selection is cancelled.
prevtagstackidx = tagstackidx;
#if defined(FEAT_QUICKFIX)
if (g_do_tagpreview != 0)
{
cur_match = ptag_entry.cur_match;
cur_fnum = ptag_entry.cur_fnum;
}
else
#endif
{
if (--tagstackidx < 0)
tagstackidx = 0;
cur_match = tagstack[tagstackidx].cur_match;
cur_fnum = tagstack[tagstackidx].cur_fnum;
}
switch (type)
{
case DT_FIRST: cur_match = count - 1; break;
case DT_SELECT:
case DT_JUMP:
#ifdef FEAT_CSCOPE
case DT_CSCOPE:
#endif
case DT_LAST: cur_match = MAXCOL - 1; break;
case DT_NEXT: cur_match += count; break;
case DT_PREV: cur_match -= count; break;
}
if (cur_match >= MAXCOL)
cur_match = MAXCOL - 1;
else if (cur_match < 0)
{
emsg(_(e_cannot_go_before_first_matching_tag));
skip_msg = TRUE;
cur_match = 0;
cur_fnum = curbuf->b_fnum;
}
}
}
#if defined(FEAT_QUICKFIX)
if (g_do_tagpreview != 0)
{
if (type != DT_SELECT && type != DT_JUMP)
{
ptag_entry.cur_match = cur_match;
ptag_entry.cur_fnum = cur_fnum;
}
}
else
#endif
{
/*
* For ":tag [arg]" or ":tselect" remember position before the jump.
*/
saved_fmark = tagstack[tagstackidx].fmark;
if (save_pos)
{
tagstack[tagstackidx].fmark.mark = curwin->w_cursor;
tagstack[tagstackidx].fmark.fnum = curbuf->b_fnum;
}
// Curwin will change in the call to jumpto_tag() if ":stag" was
// used or an autocommand jumps to another window; store value of
// tagstackidx now.
curwin->w_tagstackidx = tagstackidx;
if (type != DT_SELECT && type != DT_JUMP)
{
curwin->w_tagstack[tagstackidx].cur_match = cur_match;
curwin->w_tagstack[tagstackidx].cur_fnum = cur_fnum;
}
}
}
// When not using the current buffer get the name of buffer "cur_fnum".
// Makes sure that the tag order doesn't change when using a remembered
// position for "cur_match".
if (cur_fnum != curbuf->b_fnum)
{
buf_T *buf = buflist_findnr(cur_fnum);
if (buf != NULL)
buf_ffname = buf->b_ffname;
}
/*
* Repeat searching for tags, when a file has not been found.
*/
for (;;)
{
int other_name;
char_u *name;
/*
* When desired match not found yet, try to find it (and others).
*/
if (use_tagstack)
name = tagstack[tagstackidx].tagname;
#if defined(FEAT_QUICKFIX)
else if (g_do_tagpreview != 0)
name = ptag_entry.tagname;
#endif
else
name = tag;
other_name = (tagmatchname == NULL || STRCMP(tagmatchname, name) != 0);
if (new_tag
|| (cur_match >= num_matches && max_num_matches != MAXCOL)
|| other_name)
{
if (other_name)
{
vim_free(tagmatchname);
tagmatchname = vim_strsave(name);
}
if (type == DT_SELECT || type == DT_JUMP
#if defined(FEAT_QUICKFIX)
|| type == DT_LTAG
#endif
)
cur_match = MAXCOL - 1;
if (type == DT_TAG)
max_num_matches = MAXCOL;
else
max_num_matches = cur_match + 1;
// when the argument starts with '/', use it as a regexp
if (!no_regexp && *name == '/')
{
flags = TAG_REGEXP;
++name;
}
else
flags = TAG_NOIC;
#ifdef FEAT_CSCOPE
if (type == DT_CSCOPE)
flags = TAG_CSCOPE;
#endif
if (verbose)
flags |= TAG_VERBOSE;
if (!use_tfu)
flags |= TAG_NO_TAGFUNC;
if (find_tags(name, &new_num_matches, &new_matches, flags,
max_num_matches, buf_ffname) == OK
&& new_num_matches < max_num_matches)
max_num_matches = MAXCOL; // If less than max_num_matches
// found: all matches found.
// If there already were some matches for the same name, move them
// to the start. Avoids that the order changes when using
// ":tnext" and jumping to another file.
if (!new_tag && !other_name)
{
int j, k;
int idx = 0;
tagptrs_T tagp, tagp2;
// Find the position of each old match in the new list. Need
// to use parse_match() to find the tag line.
for (j = 0; j < num_matches; ++j)
{
parse_match(matches[j], &tagp);
for (i = idx; i < new_num_matches; ++i)
{
parse_match(new_matches[i], &tagp2);
if (STRCMP(tagp.tagname, tagp2.tagname) == 0)
{
char_u *p = new_matches[i];
for (k = i; k > idx; --k)
new_matches[k] = new_matches[k - 1];
new_matches[idx++] = p;
break;
}
}
}
}
FreeWild(num_matches, matches);
num_matches = new_num_matches;
matches = new_matches;
}
if (num_matches <= 0)
{
if (verbose)
semsg(_(e_tag_not_found_str), name);
#if defined(FEAT_QUICKFIX)
g_do_tagpreview = 0;
#endif
}
else
{
int ask_for_selection = FALSE;
#ifdef FEAT_CSCOPE
if (type == DT_CSCOPE && num_matches > 1)
{
cs_print_tags();
ask_for_selection = TRUE;
}
else
#endif
if (type == DT_TAG && *tag != NUL)
// If a count is supplied to the ":tag <name>" command, then
// jump to count'th matching tag.
cur_match = count > 0 ? count - 1 : 0;
else if (type == DT_SELECT || (type == DT_JUMP && num_matches > 1))
{
print_tag_list(new_tag, use_tagstack, num_matches, matches);
ask_for_selection = TRUE;
}
#if defined(FEAT_QUICKFIX) && defined(FEAT_EVAL)
else if (type == DT_LTAG)
{
if (add_llist_tags(tag, num_matches, matches) == FAIL)
goto end_do_tag;
cur_match = 0; // Jump to the first tag
}
#endif
if (ask_for_selection == TRUE)
{
/*
* Ask to select a tag from the list.
*/
i = prompt_for_number(NULL);
if (i <= 0 || i > num_matches || got_int)
{
// no valid choice: don't change anything
if (use_tagstack)
{
tagstack[tagstackidx].fmark = saved_fmark;
tagstackidx = prevtagstackidx;
}
#ifdef FEAT_CSCOPE
cs_free_tags();
jumped_to_tag = TRUE;
#endif
break;
}
cur_match = i - 1;
}
if (cur_match >= num_matches)
{
// Avoid giving this error when a file wasn't found and we're
// looking for a match in another file, which wasn't found.
// There will be an emsg("file doesn't exist") below then.
if ((type == DT_NEXT || type == DT_FIRST)
&& nofile_fname == NULL)
{
if (num_matches == 1)
emsg(_(e_there_is_only_one_matching_tag));
else
emsg(_(e_cannot_go_beyond_last_matching_tag));
skip_msg = TRUE;
}
cur_match = num_matches - 1;
}
if (use_tagstack)
{
tagptrs_T tagp;
tagstack[tagstackidx].cur_match = cur_match;
tagstack[tagstackidx].cur_fnum = cur_fnum;
// store user-provided data originating from tagfunc
if (use_tfu && parse_match(matches[cur_match], &tagp) == OK
&& tagp.user_data)
{
VIM_CLEAR(tagstack[tagstackidx].user_data);
tagstack[tagstackidx].user_data = vim_strnsave(
tagp.user_data, tagp.user_data_end - tagp.user_data);
}
++tagstackidx;
}
#if defined(FEAT_QUICKFIX)
else if (g_do_tagpreview != 0)
{
ptag_entry.cur_match = cur_match;
ptag_entry.cur_fnum = cur_fnum;
}
#endif
/*
* Only when going to try the next match, report that the previous
* file didn't exist. Otherwise an emsg() is given below.
*/
if (nofile_fname != NULL && error_cur_match != cur_match)
smsg(_("File \"%s\" does not exist"), nofile_fname);
ic = (matches[cur_match][0] & MT_IC_OFF);
if (type != DT_TAG && type != DT_SELECT && type != DT_JUMP
#ifdef FEAT_CSCOPE
&& type != DT_CSCOPE
#endif
&& (num_matches > 1 || ic)
&& !skip_msg)
{
// Give an indication of the number of matching tags
sprintf((char *)IObuff, _("tag %d of %d%s"),
cur_match + 1,
num_matches,
max_num_matches != MAXCOL ? _(" or more") : "");
if (ic)
STRCAT(IObuff, _(" Using tag with different case!"));
if ((num_matches > prev_num_matches || new_tag)
&& num_matches > 1)
{
if (ic)
msg_attr((char *)IObuff, HL_ATTR(HLF_W));
else
msg((char *)IObuff);
msg_scroll = TRUE; // don't overwrite this message
}
else
give_warning(IObuff, ic);
if (ic && !msg_scrolled && msg_silent == 0)
{
out_flush();
ui_delay(1007L, TRUE);
}
}
#if defined(FEAT_EVAL)
// Let the SwapExists event know what tag we are jumping to.
vim_snprintf((char *)IObuff, IOSIZE, ":ta %s\r", name);
set_vim_var_string(VV_SWAPCOMMAND, IObuff, -1);
#endif
/*
* Jump to the desired match.
*/
i = jumpto_tag(matches[cur_match], forceit, type != DT_CSCOPE);
#if defined(FEAT_EVAL)
set_vim_var_string(VV_SWAPCOMMAND, NULL, -1);
#endif
if (i == NOTAGFILE)
{
// File not found: try again with another matching tag
if ((type == DT_PREV && cur_match > 0)
|| ((type == DT_TAG || type == DT_NEXT
|| type == DT_FIRST)
&& (max_num_matches != MAXCOL
|| cur_match < num_matches - 1)))
{
error_cur_match = cur_match;
if (use_tagstack)
--tagstackidx;
if (type == DT_PREV)
--cur_match;
else
{
type = DT_NEXT;
++cur_match;
}
continue;
}
semsg(_(e_file_str_does_not_exist), nofile_fname);
}
else
{
// We may have jumped to another window, check that
// tagstackidx is still valid.
if (use_tagstack && tagstackidx > curwin->w_tagstacklen)
tagstackidx = curwin->w_tagstackidx;
#ifdef FEAT_CSCOPE
jumped_to_tag = TRUE;
#endif
}
}
break;
}
end_do_tag:
// Only store the new index when using the tagstack and it's valid.
if (use_tagstack && tagstackidx <= curwin->w_tagstacklen)
curwin->w_tagstackidx = tagstackidx;
postponed_split = 0; // don't split next time
# ifdef FEAT_QUICKFIX
g_do_tagpreview = 0; // don't do tag preview next time
# endif
#ifdef FEAT_CSCOPE
return jumped_to_tag;
#else
return FALSE;
#endif
}
| 1
|
225,835
|
void mdhd_box_del(GF_Box *s)
{
GF_MediaHeaderBox *ptr = (GF_MediaHeaderBox *)s;
if (ptr == NULL) return;
gf_free(ptr);
}
| 0
|
513,160
|
static double *mysql_sys_var_double(THD* thd, int offset)
{
return (double *) intern_sys_var_ptr(thd, offset, true);
}
| 0
|
384,190
|
static bool nft_setelem_valid_key_end(const struct nft_set *set,
struct nlattr **nla, u32 flags)
{
if ((set->flags & (NFT_SET_CONCAT | NFT_SET_INTERVAL)) ==
(NFT_SET_CONCAT | NFT_SET_INTERVAL)) {
if (flags & NFT_SET_ELEM_INTERVAL_END)
return false;
if (!nla[NFTA_SET_ELEM_KEY_END] &&
!(flags & NFT_SET_ELEM_CATCHALL))
return false;
} else {
if (nla[NFTA_SET_ELEM_KEY_END])
return false;
}
return true;
}
| 0
|
432,242
|
MemoryRegionSection memory_region_find(MemoryRegion *mr,
hwaddr addr, uint64_t size)
{
MemoryRegionSection ret;
ret = memory_region_find_rcu(mr, addr, size);
return ret;
}
| 0
|
458,297
|
int ioprio_best(unsigned short aprio, unsigned short bprio)
{
unsigned short aclass;
unsigned short bclass;
if (!ioprio_valid(aprio))
aprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM);
if (!ioprio_valid(bprio))
bprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM);
aclass = IOPRIO_PRIO_CLASS(aprio);
bclass = IOPRIO_PRIO_CLASS(bprio);
if (aclass == bclass)
return min(aprio, bprio);
if (aclass > bclass)
return bprio;
else
return aprio;
}
| 0
|
369,222
|
static int io_accept(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_accept *accept = &req->accept;
bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
bool fixed = !!accept->file_slot;
struct file *file;
int ret, fd;
if (!fixed) {
fd = __get_unused_fd_flags(accept->flags, accept->nofile);
if (unlikely(fd < 0))
return fd;
}
file = do_accept(req->file, file_flags, accept->addr, accept->addr_len,
accept->flags);
if (IS_ERR(file)) {
if (!fixed)
put_unused_fd(fd);
ret = PTR_ERR(file);
if (ret == -EAGAIN && force_nonblock)
return -EAGAIN;
if (ret == -ERESTARTSYS)
ret = -EINTR;
req_set_fail(req);
} else if (!fixed) {
fd_install(fd, file);
ret = fd;
} else {
ret = io_install_fixed_file(req, file, issue_flags,
accept->file_slot - 1);
}
__io_req_complete(req, issue_flags, ret, 0);
return 0;
}
| 0
|
366,177
|
bool path_is_under(const struct path *path1, const struct path *path2)
{
bool res;
read_seqlock_excl(&mount_lock);
res = is_path_reachable(real_mount(path1->mnt), path1->dentry, path2);
read_sequnlock_excl(&mount_lock);
return res;
}
| 0
|
390,534
|
ProcXkbSelectEvents(ClientPtr client)
{
unsigned legal;
DeviceIntPtr dev;
XkbInterestPtr masks;
REQUEST(xkbSelectEventsReq);
REQUEST_AT_LEAST_SIZE(xkbSelectEventsReq);
if (!(client->xkbClientFlags&_XkbClientInitialized))
return BadAccess;
CHK_ANY_DEVICE(dev, stuff->deviceSpec, client, DixUseAccess);
if (((stuff->affectWhich&XkbMapNotifyMask)!=0)&&(stuff->affectMap)) {
client->mapNotifyMask&= ~stuff->affectMap;
client->mapNotifyMask|= (stuff->affectMap&stuff->map);
}
if ((stuff->affectWhich&(~XkbMapNotifyMask))==0)
return client->noClientException;
masks = XkbFindClientResource((DevicePtr)dev,client);
if (!masks){
XID id = FakeClientID(client->index);
AddResource(id,RT_XKBCLIENT,dev);
masks= XkbAddClientResource((DevicePtr)dev,client,id);
}
if (masks) {
union {
CARD8 *c8;
CARD16 *c16;
CARD32 *c32;
} from,to;
register unsigned bit,ndx,maskLeft,dataLeft,size;
from.c8= (CARD8 *)&stuff[1];
dataLeft= (stuff->length*4)-SIZEOF(xkbSelectEventsReq);
maskLeft= (stuff->affectWhich&(~XkbMapNotifyMask));
for (ndx=0,bit=1; (maskLeft!=0); ndx++, bit<<=1) {
if ((bit&maskLeft)==0)
continue;
maskLeft&= ~bit;
switch (ndx) {
case XkbNewKeyboardNotify:
to.c16= &client->newKeyboardNotifyMask;
legal= XkbAllNewKeyboardEventsMask;
size= 2;
break;
case XkbStateNotify:
to.c16= &masks->stateNotifyMask;
legal= XkbAllStateEventsMask;
size= 2;
break;
case XkbControlsNotify:
to.c32= &masks->ctrlsNotifyMask;
legal= XkbAllControlEventsMask;
size= 4;
break;
case XkbIndicatorStateNotify:
to.c32= &masks->iStateNotifyMask;
legal= XkbAllIndicatorEventsMask;
size= 4;
break;
case XkbIndicatorMapNotify:
to.c32= &masks->iMapNotifyMask;
legal= XkbAllIndicatorEventsMask;
size= 4;
break;
case XkbNamesNotify:
to.c16= &masks->namesNotifyMask;
legal= XkbAllNameEventsMask;
size= 2;
break;
case XkbCompatMapNotify:
to.c8= &masks->compatNotifyMask;
legal= XkbAllCompatMapEventsMask;
size= 1;
break;
case XkbBellNotify:
to.c8= &masks->bellNotifyMask;
legal= XkbAllBellEventsMask;
size= 1;
break;
case XkbActionMessage:
to.c8= &masks->actionMessageMask;
legal= XkbAllActionMessagesMask;
size= 1;
break;
case XkbAccessXNotify:
to.c16= &masks->accessXNotifyMask;
legal= XkbAllAccessXEventsMask;
size= 2;
break;
case XkbExtensionDeviceNotify:
to.c16= &masks->extDevNotifyMask;
legal= XkbAllExtensionDeviceEventsMask;
size= 2;
break;
default:
client->errorValue = _XkbErrCode2(33,bit);
return BadValue;
}
if (stuff->clear&bit) {
if (size==2) to.c16[0]= 0;
else if (size==4) to.c32[0]= 0;
else to.c8[0]= 0;
}
else if (stuff->selectAll&bit) {
if (size==2) to.c16[0]= ~0;
else if (size==4) to.c32[0]= ~0;
else to.c8[0]= ~0;
}
else {
if (dataLeft<(size*2))
return BadLength;
if (size==2) {
CHK_MASK_MATCH(ndx,from.c16[0],from.c16[1]);
CHK_MASK_LEGAL(ndx,from.c16[0],legal);
to.c16[0]&= ~from.c16[0];
to.c16[0]|= (from.c16[0]&from.c16[1]);
}
else if (size==4) {
CHK_MASK_MATCH(ndx,from.c32[0],from.c32[1]);
CHK_MASK_LEGAL(ndx,from.c32[0],legal);
to.c32[0]&= ~from.c32[0];
to.c32[0]|= (from.c32[0]&from.c32[1]);
}
else {
CHK_MASK_MATCH(ndx,from.c8[0],from.c8[1]);
CHK_MASK_LEGAL(ndx,from.c8[0],legal);
to.c8[0]&= ~from.c8[0];
to.c8[0]|= (from.c8[0]&from.c8[1]);
size= 2;
}
from.c8+= (size*2);
dataLeft-= (size*2);
}
}
if (dataLeft>2) {
ErrorF("[xkb] Extra data (%d bytes) after SelectEvents\n",dataLeft);
return BadLength;
}
return client->noClientException;
}
return BadAlloc;
}
| 0
|
225,734
|
void srpp_box_del(GF_Box *s)
{
gf_free(s);
}
| 0
|
309,973
|
check_sgr(TERMTYPE2 *tp, char *zero, int num, char *cap, const char *name)
{
char *test;
_nc_tparm_err = 0;
test = TIPARM_9(set_attributes,
num == 1,
num == 2,
num == 3,
num == 4,
num == 5,
num == 6,
num == 7,
num == 8,
num == 9);
if (test != 0) {
if (PRESENT(cap)) {
if (!similar_sgr(num, test, cap)) {
_nc_warning("%s differs from sgr(%d)\n\t%s=%s\n\tsgr(%d)=%s",
name, num,
name, _nc_visbuf2(1, cap),
num, _nc_visbuf2(2, test));
}
} else if (_nc_capcmp(test, zero)) {
_nc_warning("sgr(%d) present, but not %s", num, name);
}
} else if (PRESENT(cap)) {
_nc_warning("sgr(%d) missing, but %s present", num, name);
}
if (_nc_tparm_err)
_nc_warning("stack error in sgr(%d) string", num);
return test;
}
| 0
|
385,855
|
static int complete_walk(struct nameidata *nd)
{
struct dentry *dentry = nd->path.dentry;
int status;
if (nd->flags & LOOKUP_RCU) {
nd->flags &= ~LOOKUP_RCU;
if (!(nd->flags & LOOKUP_ROOT))
nd->root.mnt = NULL;
spin_lock(&dentry->d_lock);
if (unlikely(!__d_rcu_to_refcount(dentry, nd->seq))) {
spin_unlock(&dentry->d_lock);
unlock_rcu_walk();
return -ECHILD;
}
BUG_ON(nd->inode != dentry->d_inode);
spin_unlock(&dentry->d_lock);
mntget(nd->path.mnt);
unlock_rcu_walk();
}
if (likely(!(nd->flags & LOOKUP_JUMPED)))
return 0;
if (likely(!(dentry->d_flags & DCACHE_OP_WEAK_REVALIDATE)))
return 0;
status = dentry->d_op->d_weak_revalidate(dentry, nd->flags);
if (status > 0)
return 0;
if (!status)
status = -ESTALE;
path_put(&nd->path);
return status;
}
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.