idx
int64 | func
string | target
int64 |
|---|---|---|
338,087
|
void WasmBinaryWriter::writeMemory() {
if (!wasm->memory.exists || wasm->memory.imported()) {
return;
}
BYN_TRACE("== writeMemory\n");
auto start = startSection(BinaryConsts::Section::Memory);
o << U32LEB(1); // Define 1 memory
writeResizableLimits(wasm->memory.initial,
wasm->memory.max,
wasm->memory.hasMax(),
wasm->memory.shared,
wasm->memory.is64());
finishSection(start);
}
| 0
|
405,337
|
int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
{
struct net *net = xp_net(policy);
struct xfrm_policy *delpol;
struct hlist_head *chain;
spin_lock_bh(&net->xfrm.xfrm_policy_lock);
chain = policy_hash_bysel(net, &policy->selector, policy->family, dir);
if (chain)
delpol = xfrm_policy_insert_list(chain, policy, excl);
else
delpol = xfrm_policy_inexact_insert(policy, dir, excl);
if (IS_ERR(delpol)) {
spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
return PTR_ERR(delpol);
}
__xfrm_policy_link(policy, dir);
/* After previous checking, family can either be AF_INET or AF_INET6 */
if (policy->family == AF_INET)
rt_genid_bump_ipv4(net);
else
rt_genid_bump_ipv6(net);
if (delpol) {
xfrm_policy_requeue(delpol, policy);
__xfrm_policy_unlink(delpol, dir);
}
policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir, policy->index);
hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index));
policy->curlft.add_time = ktime_get_real_seconds();
policy->curlft.use_time = 0;
if (!mod_timer(&policy->timer, jiffies + HZ))
xfrm_pol_hold(policy);
spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
if (delpol)
xfrm_policy_kill(delpol);
else if (xfrm_bydst_should_resize(net, dir, NULL))
schedule_work(&net->xfrm.policy_hash_work);
return 0;
}
| 0
|
413,652
|
static void print_hint_tree(RBTree tree, int mode) {
#define END_ADDR if (mode == 'j') { pj_end (pj); } else if (mode != '*') { r_cons_newline (); }
PJ *pj = NULL;
if (mode == 'j') {
pj = pj_new ();
pj_a (pj);
}
RBIter it;
HintNode *node;
ut64 last_addr = 0;
bool in_addr = false;
r_rbtree_foreach (tree, it, node, HintNode, rb) {
if (!in_addr || last_addr != node->addr) {
if (in_addr) {
END_ADDR
}
in_addr = true;
last_addr = node->addr;
if (pj) {
pj_o (pj);
pj_kn (pj, "addr", node->addr);
} else if (mode != '*') {
r_cons_printf (" 0x%08"PFMT64x" =>", node->addr);
}
}
hint_node_print (node, mode, pj);
}
if (in_addr) {
END_ADDR
}
if (pj) {
pj_end (pj);
r_cons_printf ("%s\n", pj_string (pj));
pj_free (pj);
}
#undef END_ADDR
}
| 0
|
516,255
|
static DeviceState *failover_find_primary_device(VirtIONet *n)
{
char *id = failover_find_primary_device_id(n);
if (!id) {
return NULL;
}
return qdev_find_recursive(sysbus_get_default(), id);
}
| 0
|
317,093
|
static int selinux_kernel_module_from_file(struct file *file)
{
struct common_audit_data ad;
struct inode_security_struct *isec;
struct file_security_struct *fsec;
u32 sid = current_sid();
int rc;
/* init_module */
if (file == NULL)
return avc_has_perm(&selinux_state,
sid, sid, SECCLASS_SYSTEM,
SYSTEM__MODULE_LOAD, NULL);
/* finit_module */
ad.type = LSM_AUDIT_DATA_FILE;
ad.u.file = file;
fsec = selinux_file(file);
if (sid != fsec->sid) {
rc = avc_has_perm(&selinux_state,
sid, fsec->sid, SECCLASS_FD, FD__USE, &ad);
if (rc)
return rc;
}
isec = inode_security(file_inode(file));
return avc_has_perm(&selinux_state,
sid, isec->sid, SECCLASS_SYSTEM,
SYSTEM__MODULE_LOAD, &ad);
}
| 0
|
383,330
|
gdImageCopyMergeGray (gdImagePtr dst, gdImagePtr src, int dstX, int dstY, int srcX, int srcY, int w, int h, int pct)
{
int c, dc;
int x, y;
int tox, toy;
int ncR, ncG, ncB;
float g;
toy = dstY;
for (y = srcY; (y < (srcY + h)); y++) {
tox = dstX;
for (x = srcX; (x < (srcX + w)); x++) {
int nc;
c = gdImageGetPixel (src, x, y);
/* Added 7/24/95: support transparent copies */
if (gdImageGetTransparent(src) == c) {
tox++;
continue;
}
/* If it's the same image, mapping is trivial */
if (dst == src) {
nc = c;
} else {
dc = gdImageGetPixel(dst, tox, toy);
g = (0.29900f * gdImageRed(dst, dc)) + (0.58700f * gdImageGreen(dst, dc)) + (0.11400f * gdImageBlue(dst, dc));
ncR = (int)(gdImageRed (src, c) * (pct / 100.0f) + gdImageRed(dst, dc) * g * ((100 - pct) / 100.0f));
ncG = (int)(gdImageGreen (src, c) * (pct / 100.0f) + gdImageGreen(dst, dc) * g * ((100 - pct) / 100.0f));
ncB = (int)(gdImageBlue (src, c) * (pct / 100.0f) + gdImageBlue(dst, dc) * g * ((100 - pct) / 100.0f));
/* First look for an exact match */
nc = gdImageColorExact(dst, ncR, ncG, ncB);
if (nc == (-1)) {
/* No, so try to allocate it */
nc = gdImageColorAllocate(dst, ncR, ncG, ncB);
/* If we're out of colors, go for the closest color */
if (nc == (-1)) {
nc = gdImageColorClosest(dst, ncR, ncG, ncB);
}
}
}
gdImageSetPixel(dst, tox, toy, nc);
tox++;
}
toy++;
}
}
| 0
|
313,841
|
nv_window(cmdarg_T *cap)
{
if (cap->nchar == ':')
{
// "CTRL-W :" is the same as typing ":"; useful in a terminal window
cap->cmdchar = ':';
cap->nchar = NUL;
nv_colon(cap);
}
else if (!checkclearop(cap->oap))
do_window(cap->nchar, cap->count0, NUL); // everything is in window.c
}
| 0
|
216,906
|
void ha_maria::drop_table(const char *name)
{
DBUG_ASSERT(file->s->temporary);
(void) ha_close();
(void) maria_delete_table_files(name, 1, MY_WME);
}
| 1
|
379,654
|
R_API RAnalVar *r_anal_function_get_var(RAnalFunction *fcn, char kind, int delta) {
r_return_val_if_fail (fcn, NULL);
void **it;
r_pvector_foreach (&fcn->vars, it) {
RAnalVar *var = *it;
if (var->kind == kind && var->delta == delta) {
return var;
}
}
return NULL;
}
| 0
|
389,708
|
check_for_number_arg(typval_T *args, int idx)
{
if (args[idx].v_type != VAR_NUMBER)
{
semsg(_(e_number_required_for_argument_nr), idx + 1);
return FAIL;
}
return OK;
}
| 0
|
521,471
|
bool isExhausted() override
{
return headerSize <= 0 || pos >= zipEntryHolder.compressedSize;
}
| 0
|
427,192
|
static void codeclosure (LexState *ls, expdesc *v) {
FuncState *fs = ls->fs->prev;
init_exp(v, VRELOC, luaK_codeABx(fs, OP_CLOSURE, 0, fs->np - 1));
luaK_exp2nextreg(fs, v); /* fix it at the last register */
}
| 0
|
222,836
|
void ExtractValue(DimensionHandle d, int64_t* result) {
if (!InferenceContext::ValueKnown(d)) {
*result = -counter;
counter++;
} else {
int64_t val = InferenceContext::Value(d);
if (val >= 0) {
*result = val;
} else {
// A shape inference function generated an invalid dimension handle.
// Use a symbolic dimension to encode this.
*result = -counter;
counter++;
}
}
}
| 0
|
265,536
|
int mempool_create_with_shared_mem(
size_t pool_item_size, size_t pool_initial_size, size_t pool_expansion_size,
func_mem_available_callback_type mem_get_free_space_func,
func_mark_mem_used_callback_type mem_mark_used_space_func,
func_mark_mem_free_callback_type mem_mark_free_space_func,
func_log_callback_type log_callback_func, int flags,
MemoryPoolHandle *p_handle) {
int rc = 0;
struct mempool *pool = NULL;
if (mem_get_free_space_func == NULL || mem_mark_used_space_func == NULL ||
mem_mark_free_space_func == NULL || p_handle == NULL) {
return S3_MEMPOOL_INVALID_ARG;
}
if (pool_initial_size > mem_get_free_space_func()) {
return S3_MEMPOOL_THRESHOLD_EXCEEDED;
}
rc = mempool_create(pool_item_size, pool_initial_size, pool_expansion_size, 0,
log_callback_func, flags, p_handle);
if (rc != 0) {
return rc;
}
pool = (struct mempool *)*p_handle;
pool->mem_get_free_space_func = mem_get_free_space_func;
pool->mem_mark_used_space_func = mem_mark_used_space_func;
pool->mem_mark_free_space_func = mem_mark_free_space_func;
/* Explicitly mark used space, since mempool_create -> freelist_allocate
dont have the function callbacks set. */
pool->mem_mark_used_space_func(pool->total_bufs_allocated_by_pool *
pool->mempool_item_size);
return 0;
}
| 0
|
387,729
|
Method* InstanceKlass::method_with_orig_idnum(int idnum, int version) {
InstanceKlass* holder = get_klass_version(version);
if (holder == NULL) {
return NULL; // The version of klass is gone, no method is found
}
Method* method = holder->method_with_orig_idnum(idnum);
return method;
}
| 0
|
406,215
|
static void __attribute__((__noreturn__)) usage(FILE *out)
{
fputs(USAGE_HEADER, out);
fprintf(out, _(
" %1$s [-lhV]\n"
" %1$s -a [options]\n"
" %1$s [options] [--source] <source> | [--target] <directory>\n"
" %1$s [options] <source> <directory>\n"
" %1$s <operation> <mountpoint> [<target>]\n"),
program_invocation_short_name);
fputs(USAGE_OPTIONS, out);
fprintf(out, _(
" -a, --all mount all filesystems mentioned in fstab\n"
" -c, --no-canonicalize don't canonicalize paths\n"
" -f, --fake dry run; skip the mount(2) syscall\n"
" -F, --fork fork off for each device (use with -a)\n"
" -T, --fstab <path> alternative file to /etc/fstab\n"));
fprintf(out, _(
" -h, --help display this help text and exit\n"
" -i, --internal-only don't call the mount.<type> helpers\n"
" -l, --show-labels lists all mounts with LABELs\n"
" -n, --no-mtab don't write to /etc/mtab\n"));
fprintf(out, _(
" -o, --options <list> comma-separated list of mount options\n"
" -O, --test-opts <list> limit the set of filesystems (use with -a)\n"
" -r, --read-only mount the filesystem read-only (same as -o ro)\n"
" -t, --types <list> limit the set of filesystem types\n"));
fprintf(out, _(
" --source <src> explicitly specifies source (path, label, uuid)\n"
" --target <target> explicitly specifies mountpoint\n"));
fprintf(out, _(
" -v, --verbose say what is being done\n"
" -V, --version display version information and exit\n"
" -w, --read-write mount the filesystem read-write (default)\n"));
fputs(USAGE_SEPARATOR, out);
fputs(USAGE_HELP, out);
fputs(USAGE_VERSION, out);
fprintf(out, _(
"\nSource:\n"
" -L, --label <label> synonym for LABEL=<label>\n"
" -U, --uuid <uuid> synonym for UUID=<uuid>\n"
" LABEL=<label> specifies device by filesystem label\n"
" UUID=<uuid> specifies device by filesystem UUID\n"
" PARTLABEL=<label> specifies device by partition label\n"
" PARTUUID=<uuid> specifies device by partition UUID\n"));
fprintf(out, _(
" <device> specifies device by path\n"
" <directory> mountpoint for bind mounts (see --bind/rbind)\n"
" <file> regular file for loopdev setup\n"));
fprintf(out, _(
"\nOperations:\n"
" -B, --bind mount a subtree somewhere else (same as -o bind)\n"
" -M, --move move a subtree to some other place\n"
" -R, --rbind mount a subtree and all submounts somewhere else\n"));
fprintf(out, _(
" --make-shared mark a subtree as shared\n"
" --make-slave mark a subtree as slave\n"
" --make-private mark a subtree as private\n"
" --make-unbindable mark a subtree as unbindable\n"));
fprintf(out, _(
" --make-rshared recursively mark a whole subtree as shared\n"
" --make-rslave recursively mark a whole subtree as slave\n"
" --make-rprivate recursively mark a whole subtree as private\n"
" --make-runbindable recursively mark a whole subtree as unbindable\n"));
fprintf(out, USAGE_MAN_TAIL("mount(8)"));
exit(out == stderr ? MOUNT_EX_USAGE : MOUNT_EX_SUCCESS);
}
| 0
|
410,712
|
static int packet_release(struct socket *sock)
{
struct sock *sk = sock->sk;
struct packet_sock *po;
struct packet_fanout *f;
struct net *net;
union tpacket_req_u req_u;
if (!sk)
return 0;
net = sock_net(sk);
po = pkt_sk(sk);
mutex_lock(&net->packet.sklist_lock);
sk_del_node_init_rcu(sk);
mutex_unlock(&net->packet.sklist_lock);
preempt_disable();
sock_prot_inuse_add(net, sk->sk_prot, -1);
preempt_enable();
spin_lock(&po->bind_lock);
unregister_prot_hook(sk, false);
packet_cached_dev_reset(po);
if (po->prot_hook.dev) {
dev_put(po->prot_hook.dev);
po->prot_hook.dev = NULL;
}
spin_unlock(&po->bind_lock);
packet_flush_mclist(sk);
lock_sock(sk);
if (po->rx_ring.pg_vec) {
memset(&req_u, 0, sizeof(req_u));
packet_set_ring(sk, &req_u, 1, 0);
}
if (po->tx_ring.pg_vec) {
memset(&req_u, 0, sizeof(req_u));
packet_set_ring(sk, &req_u, 1, 1);
}
release_sock(sk);
f = fanout_release(sk);
synchronize_net();
kfree(po->rollover);
if (f) {
fanout_release_data(f);
kvfree(f);
}
/*
* Now the socket is dead. No more input will appear.
*/
sock_orphan(sk);
sock->sk = NULL;
/* Purge queues */
skb_queue_purge(&sk->sk_receive_queue);
packet_free_pending(po);
sk_refcnt_debug_release(sk);
sock_put(sk);
return 0;
}
| 0
|
247,123
|
static void gf_fs_print_filter_outputs(GF_Filter *f, GF_List *filters_done, u32 indent, GF_FilterPid *pid, GF_Filter *alias_for, u32 src_num_tiled_pids, Bool skip_print)
{
u32 i=0;
u32 num_tile_pids = 0;
if (!skip_print) {
while (i<indent) {
GF_LOG(GF_LOG_INFO, GF_LOG_APP, ("-"));
i++;
}
if (src_num_tiled_pids>1) {
GF_LOG(GF_LOG_INFO, GF_LOG_APP, ("(tilePID[%d]) ", src_num_tiled_pids));
}
else if (pid) {
GF_LOG(GF_LOG_INFO, GF_LOG_APP, ("(PID %s) ", pid->name));
}
print_filter_name(f, GF_TRUE, GF_FALSE);
if (f->id) {
GF_LOG(GF_LOG_INFO, GF_LOG_APP, (" (ID=%s)\n", f->id));
} else {
GF_LOG(GF_LOG_INFO, GF_LOG_APP, (" (ptr=%p)\n", f));
}
}
if (filters_done && (gf_list_find(filters_done, f)>=0))
return;
if (filters_done)
gf_list_add(filters_done, f);
if (alias_for && !skip_print) {
GF_LOG(GF_LOG_INFO, GF_LOG_APP, (" (<=> "));
print_filter_name(alias_for, GF_TRUE, GF_TRUE);
if (alias_for->id) {
GF_LOG(GF_LOG_INFO, GF_LOG_APP, (" ID=%s", alias_for->id));
} else {
GF_LOG(GF_LOG_INFO, GF_LOG_APP, (" ptr=%p", alias_for));
}
GF_LOG(GF_LOG_INFO, GF_LOG_APP, (")\n"));
}
for (i=0; i<f->num_output_pids; i++) {
u32 j, k;
Bool is_tiled = GF_FALSE;
Bool skip_tiled = skip_print;
GF_FilterPid *pidout = gf_list_get(f->output_pids, i);
const GF_PropertyValue *p = gf_filter_pid_get_property(pidout, GF_PROP_PID_CODECID);
if (p && (p->value.uint==GF_CODECID_HEVC_TILES)) {
is_tiled = GF_TRUE;
//only print the first tile pid
if (num_tile_pids) {
skip_tiled = GF_TRUE;
} else {
for (j=i; j<f->num_output_pids; j++) {
GF_FilterPid *apid = gf_list_get(f->output_pids, j);
const GF_PropertyValue *p = gf_filter_pid_get_property(apid, GF_PROP_PID_CODECID);
if (p && (p->value.uint==GF_CODECID_HEVC_TILES)) {
num_tile_pids++;
}
}
}
}
for (j=0; j<pidout->num_destinations; j++) {
GF_FilterPidInst *pidi = gf_list_get(pidout->destinations, j);
GF_Filter *alias = NULL;
for (k=0; k<gf_list_count(f->destination_filters); k++) {
alias = gf_list_get(f->destination_filters, k);
if (alias->multi_sink_target == pidi->filter)
break;
alias = NULL;
}
if (alias) {
gf_fs_print_filter_outputs(alias, filters_done, indent+1, pidout, pidi->filter, is_tiled ? num_tile_pids : src_num_tiled_pids, skip_tiled);
} else {
gf_fs_print_filter_outputs(pidi->filter, filters_done, indent+1, pidout, NULL, is_tiled ? num_tile_pids : src_num_tiled_pids, skip_tiled);
}
}
}
}
| 0
|
259,542
|
bool Curl_is_absolute_url(const char *url, char *buf, size_t buflen)
{
int i;
DEBUGASSERT(!buf || (buflen > MAX_SCHEME_LEN));
(void)buflen; /* only used in debug-builds */
if(buf)
buf[0] = 0; /* always leave a defined value in buf */
#ifdef WIN32
if(STARTS_WITH_DRIVE_PREFIX(url))
return FALSE;
#endif
for(i = 0; i < MAX_SCHEME_LEN; ++i) {
char s = url[i];
if(s && (ISALNUM(s) || (s == '+') || (s == '-') || (s == '.') )) {
/* RFC 3986 3.1 explains:
scheme = ALPHA *( ALPHA / DIGIT / "+" / "-" / "." )
*/
}
else {
break;
}
}
if(i && (url[i] == ':') && (url[i + 1] == '/')) {
if(buf) {
buf[i] = 0;
while(i--) {
buf[i] = (char)TOLOWER(url[i]);
}
}
return TRUE;
}
return FALSE;
}
| 0
|
270,120
|
TfLiteStatus CalculateActivationRangeQuantized(TfLiteContext* context,
TfLiteFusedActivation activation,
TfLiteTensor* output,
int32_t* act_min,
int32_t* act_max) {
int32_t qmin = 0;
int32_t qmax = 0;
if (output->type == kTfLiteUInt8) {
qmin = std::numeric_limits<uint8_t>::min();
qmax = std::numeric_limits<uint8_t>::max();
} else if (output->type == kTfLiteInt8) {
qmin = std::numeric_limits<int8_t>::min();
qmax = std::numeric_limits<int8_t>::max();
} else if (output->type == kTfLiteInt16) {
qmin = std::numeric_limits<int16_t>::min();
qmax = std::numeric_limits<int16_t>::max();
} else {
TF_LITE_ENSURE(context, false);
}
return CalculateActivationRangeQuantizedImpl(context, activation, qmin, qmax,
output, act_min, act_max);
}
| 0
|
398,487
|
RZ_API const char *rz_bin_dwarf_get_lang_name(ut64 lang) {
if (lang >= RZ_ARRAY_SIZE(dwarf_langs)) {
return NULL;
}
return dwarf_langs[lang];
}
| 0
|
309,822
|
drv_initcolor(TERMINAL_CONTROL_BLOCK * TCB,
int color, int r, int g, int b)
{
SCREEN *sp = TCB->csp;
AssertTCB();
if (initialize_color != NULL) {
NCURSES_PUTP2("initialize_color",
TIPARM_4(initialize_color, color, r, g, b));
}
}
| 0
|
232,291
|
bool SampleInterleavedLSScan::WriteMCU(void)
{
#if ACCUSOFT_CODE
int lines = m_ulRemaining[0]; // total number of MCU lines processed.
UBYTE preshift = m_ucLowBit + FractionalColorBitsOf();
struct Line *line[4];
UBYTE cx;
//
// A "MCU" in respect to the code organization is eight lines.
if (lines > 8) {
lines = 8;
}
m_ulRemaining[0] -= lines;
assert(lines > 0);
assert(m_ucCount < 4);
//
// Fill the line pointers.
for(cx = 0;cx < m_ucCount;cx++) {
line[cx] = CurrentLine(cx);
}
// Loop over lines and columns
do {
LONG length = m_ulWidth[0];
LONG *lp[4];
// Get the line pointers and initialize the internal backup lines.
for(cx = 0;cx < m_ucCount;cx++) {
lp[cx] = line[cx]->m_pData;
StartLine(cx);
}
//
BeginWriteMCU(m_Stream.ByteStreamOf());
do {
LONG a[4],b[4],c[4],d[4]; // neighbouring values.
LONG d1[4],d2[4],d3[4]; // local gradients.
bool isrun = true;
for(cx = 0;cx < m_ucCount;cx++) {
GetContext(cx,a[cx],b[cx],c[cx],d[cx]);
d1[cx] = d[cx] - b[cx]; // compute local gradients
d2[cx] = b[cx] - c[cx];
d3[cx] = c[cx] - a[cx];
//
// Run mode only if the run condition is met for all components
if (isrun && !isRunMode(d1[cx],d2[cx],d3[cx]))
isrun = false;
}
if (isrun) {
LONG runcnt = 0;
do {
//
// Check whether the pixel is close enough to continue the run.
for(cx = 0;cx < m_ucCount;cx++) {
LONG x = *lp[cx] >> preshift;
if (x - a[cx] < -m_lNear || x - a[cx] > m_lNear)
break;
}
if (cx < m_ucCount)
break; // run ends.
//
// Update so that the next process gets the correct value.
// Also updates the line pointers.
for(cx = 0;cx < m_ucCount;cx++) {
UpdateContext(cx,a[cx]);
lp[cx]++;
}
} while(runcnt++,--length);
//
// Encode the run. Note that only a single run index is used here.
EncodeRun(runcnt,length == 0,m_lRunIndex[0]);
// Continue the encoding of the end of the run if there are more
// samples to encode.
if (length) {
bool negative; // the sign variable
LONG errval; // the prediction error
LONG merr; // the mapped error (symbol)
LONG rx; // the reconstructed value
UBYTE k; // golomb parameter
//
// The complete pixel in all components is now to be encoded.
for(cx = 0;cx < m_ucCount;cx++) {
// Get the neighbourhood.
GetContext(cx,a[cx],b[cx],c[cx],d[cx]);
// The prediction mode is always fixed, but the sign
// has to be found.
negative = a[cx] > b[cx];
// Compute the error value.
errval = (*lp[cx]++ >> preshift) - b[cx];
if (negative)
errval = -errval;
// Quantize the error.
errval = QuantizePredictionError(errval);
// Compute the reconstructed value.
rx = Reconstruct(negative,b[cx],errval);
// Update so that the next process gets the correct value.
UpdateContext(cx,rx);
// Get the golomb parameter for run interruption coding.
k = GolombParameter(false);
// Map the error into a symbol.
merr = ErrorMapping(errval,ErrorMappingOffset(false,errval != 0,k));
// Golomb-coding of the error.
GolombCode(k,merr,m_lLimit - m_lJ[m_lRunIndex[0]] - 1);
// Update the variables of the run mode.
UpdateState(false,errval);
}
// Update the run index now. This is not part of
// EncodeRun because the non-reduced run-index is
// required for the golomb coder length limit.
if (m_lRunIndex[0] > 0)
m_lRunIndex[0]--;
} else break; // Line ended, abort the loop over the line.
} else {
UWORD ctxt;
bool negative; // the sign variable.
LONG px; // the predicted variable.
LONG rx; // the reconstructed value.
LONG errval; // the error value.
LONG merr; // the mapped error value.
UBYTE k; // the Golomb parameter.
//
for(cx = 0;cx < m_ucCount;cx++) {
// Quantize the gradients.
d1[cx] = QuantizedGradient(d1[cx]);
d2[cx] = QuantizedGradient(d2[cx]);
d3[cx] = QuantizedGradient(d3[cx]);
// Compute the context.
ctxt = Context(negative,d1[cx],d2[cx],d3[cx]);
// Compute the predicted value.
px = Predict(a[cx],b[cx],c[cx]);
// Correct the prediction.
px = CorrectPrediction(ctxt,negative,px);
// Compute the error value.
errval = (*lp[cx]++ >> preshift) - px;
if (negative)
errval = -errval;
// Quantize the prediction error if NEAR > 0
errval = QuantizePredictionError(errval);
// Compute the reconstructed value.
rx = Reconstruct(negative,px,errval);
// Update so that the next process gets the correct value.
UpdateContext(cx,rx);
// Compute the golomb parameter k from the context.
k = GolombParameter(ctxt);
// Map the error into a symbol
merr = ErrorMapping(errval,ErrorMappingOffset(ctxt,k));
// Golomb-coding of the error.
GolombCode(k,merr,m_lLimit);
// Update the variables.
UpdateState(ctxt,errval);
}
}
} while(--length);
//
// Advance the line pointers.
for(cx = 0;cx < m_ucCount;cx++) {
EndLine(cx);
line[cx] = line[cx]->m_pNext;
}
//
} while(--lines);
#endif
return false;
}
| 0
|
336,136
|
static int ip6gre_tunnel_ioctl(struct net_device *dev,
struct ifreq *ifr, int cmd)
{
int err = 0;
struct ip6_tnl_parm2 p;
struct __ip6_tnl_parm p1;
struct ip6_tnl *t = netdev_priv(dev);
struct net *net = t->net;
struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
memset(&p1, 0, sizeof(p1));
switch (cmd) {
case SIOCGETTUNNEL:
if (dev == ign->fb_tunnel_dev) {
if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
err = -EFAULT;
break;
}
ip6gre_tnl_parm_from_user(&p1, &p);
t = ip6gre_tunnel_locate(net, &p1, 0);
if (!t)
t = netdev_priv(dev);
}
memset(&p, 0, sizeof(p));
ip6gre_tnl_parm_to_user(&p, &t->parms);
if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
err = -EFAULT;
break;
case SIOCADDTUNNEL:
case SIOCCHGTUNNEL:
err = -EPERM;
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
goto done;
err = -EFAULT;
if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
goto done;
err = -EINVAL;
if ((p.i_flags|p.o_flags)&(GRE_VERSION|GRE_ROUTING))
goto done;
if (!(p.i_flags&GRE_KEY))
p.i_key = 0;
if (!(p.o_flags&GRE_KEY))
p.o_key = 0;
ip6gre_tnl_parm_from_user(&p1, &p);
t = ip6gre_tunnel_locate(net, &p1, cmd == SIOCADDTUNNEL);
if (dev != ign->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
if (t) {
if (t->dev != dev) {
err = -EEXIST;
break;
}
} else {
t = netdev_priv(dev);
ip6gre_tunnel_unlink(ign, t);
synchronize_net();
ip6gre_tnl_change(t, &p1, 1);
ip6gre_tunnel_link(ign, t);
netdev_state_change(dev);
}
}
if (t) {
err = 0;
memset(&p, 0, sizeof(p));
ip6gre_tnl_parm_to_user(&p, &t->parms);
if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
err = -EFAULT;
} else
err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
break;
case SIOCDELTUNNEL:
err = -EPERM;
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
goto done;
if (dev == ign->fb_tunnel_dev) {
err = -EFAULT;
if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
goto done;
err = -ENOENT;
ip6gre_tnl_parm_from_user(&p1, &p);
t = ip6gre_tunnel_locate(net, &p1, 0);
if (!t)
goto done;
err = -EPERM;
if (t == netdev_priv(ign->fb_tunnel_dev))
goto done;
dev = t->dev;
}
unregister_netdevice(dev);
err = 0;
break;
default:
err = -EINVAL;
}
done:
return err;
}
| 0
|
316,957
|
static int selinux_tun_dev_attach_queue(void *security)
{
struct tun_security_struct *tunsec = security;
return avc_has_perm(&selinux_state,
current_sid(), tunsec->sid, SECCLASS_TUN_SOCKET,
TUN_SOCKET__ATTACH_QUEUE, NULL);
}
| 0
|
233,952
|
DocumentSourceUnionWith::~DocumentSourceUnionWith() {
if (_pipeline && _pipeline->getContext()->explain) {
_pipeline->dispose(pExpCtx->opCtx);
_pipeline.reset();
}
}
| 0
|
264,255
|
static void vnc_dpy_update(DisplayChangeListener *dcl,
int x, int y, int w, int h)
{
VncDisplay *vd = container_of(dcl, VncDisplay, dcl);
struct VncSurface *s = &vd->guest;
int width = surface_width(vd->ds);
int height = surface_height(vd->ds);
/* this is needed this to ensure we updated all affected
* blocks if x % VNC_DIRTY_PIXELS_PER_BIT != 0 */
w += (x % VNC_DIRTY_PIXELS_PER_BIT);
x -= (x % VNC_DIRTY_PIXELS_PER_BIT);
x = MIN(x, width);
y = MIN(y, height);
w = MIN(x + w, width) - x;
h = MIN(y + h, height);
for (; y < h; y++) {
bitmap_set(s->dirty[y], x / VNC_DIRTY_PIXELS_PER_BIT,
DIV_ROUND_UP(w, VNC_DIRTY_PIXELS_PER_BIT));
}
}
| 0
|
317,082
|
static int selinux_move_mount(const struct path *from_path,
const struct path *to_path)
{
const struct cred *cred = current_cred();
return path_has_perm(cred, to_path, FILE__MOUNTON);
}
| 0
|
344,265
|
static const char *formatvarinfo (lua_State *L, const char *kind,
const char *name) {
if (kind == NULL)
return ""; /* no information */
else
return luaO_pushfstring(L, " (%s '%s')", kind, name);
}
| 0
|
506,433
|
static bool verify_credentials(struct rpa_auth_request *request,
const unsigned char *credentials, size_t size)
{
unsigned char response[MD5_RESULTLEN];
if (size != sizeof(request->pwd_md5)) {
e_error(request->auth_request.mech_event,
"invalid credentials length");
return FALSE;
}
memcpy(request->pwd_md5, credentials, sizeof(request->pwd_md5));
rpa_user_response(request, response);
return mem_equals_timing_safe(response, request->user_response, sizeof(response));
}
| 0
|
482,546
|
_lou_getTranslationTable(const char *tableList) {
TranslationTableHeader *table;
getTable(tableList, NULL, &table, NULL);
if (table)
if (!finalizeTable(table)) table = NULL;
return table;
}
| 0
|
455,396
|
xfs_reinit_inode(
struct xfs_mount *mp,
struct inode *inode)
{
int error;
uint32_t nlink = inode->i_nlink;
uint32_t generation = inode->i_generation;
uint64_t version = inode_peek_iversion(inode);
umode_t mode = inode->i_mode;
dev_t dev = inode->i_rdev;
error = inode_init_always(mp->m_super, inode);
set_nlink(inode, nlink);
inode->i_generation = generation;
inode_set_iversion_queried(inode, version);
inode->i_mode = mode;
inode->i_rdev = dev;
return error;
}
| 0
|
275,959
|
int uECC_shared_secret(const uint8_t *public_key,
const uint8_t *private_key,
uint8_t *secret,
uECC_Curve curve) {
uECC_word_t _public[uECC_MAX_WORDS * 2];
uECC_word_t _private[uECC_MAX_WORDS];
uECC_word_t tmp[uECC_MAX_WORDS];
uECC_word_t *p2[2] = {_private, tmp};
uECC_word_t *initial_Z = 0;
uECC_word_t carry;
wordcount_t num_words = curve->num_words;
wordcount_t num_bytes = curve->num_bytes;
#if uECC_VLI_NATIVE_LITTLE_ENDIAN
bcopy((uint8_t *) _private, private_key, num_bytes);
bcopy((uint8_t *) _public, public_key, num_bytes*2);
#else
uECC_vli_bytesToNative(_private, private_key, BITS_TO_BYTES(curve->num_n_bits));
uECC_vli_bytesToNative(_public, public_key, num_bytes);
uECC_vli_bytesToNative(_public + num_words, public_key + num_bytes, num_bytes);
#endif
/* Regularize the bitcount for the private key so that attackers cannot use a side channel
attack to learn the number of leading zeros. */
carry = regularize_k(_private, _private, tmp, curve);
/* If an RNG function was specified, try to get a random initial Z value to improve
protection against side-channel attacks. */
if (g_rng_function) {
if (!uECC_generate_random_int(p2[carry], curve->p, num_words)) {
return 0;
}
initial_Z = p2[carry];
}
EccPoint_mult(_public, _public, p2[!carry], initial_Z, curve->num_n_bits + 1, curve);
#if uECC_VLI_NATIVE_LITTLE_ENDIAN
bcopy((uint8_t *) secret, (uint8_t *) _public, num_bytes);
#else
uECC_vli_nativeToBytes(secret, num_bytes, _public);
#endif
return !EccPoint_isZero(_public, curve);
}
| 0
|
256,939
|
static void InsertBroadcastLabels(int num_bcast_dims, int num_named_labels,
int ellipsis_axis, Labels* labels,
LabelCounts* label_counts) {
labels->erase(labels->begin() + ellipsis_axis);
labels->insert(labels->begin() + ellipsis_axis, num_bcast_dims, 0);
std::iota(labels->begin() + ellipsis_axis,
labels->begin() + ellipsis_axis + num_bcast_dims,
num_named_labels);
// Increment label counts. Since these are new labels, the count is set
// to 1.
label_counts->resize(num_named_labels + num_bcast_dims, 1);
}
| 0
|
226,058
|
GF_Err moof_on_child_box(GF_Box *s, GF_Box *a, Bool is_rem)
{
GF_MovieFragmentBox *ptr = (GF_MovieFragmentBox *)s;
switch (a->type) {
case GF_ISOM_BOX_TYPE_MFHD:
BOX_FIELD_ASSIGN(mfhd, GF_MovieFragmentHeaderBox)
return GF_OK;
case GF_ISOM_BOX_TYPE_TRAF:
BOX_FIELD_LIST_ASSIGN(TrackList)
return GF_OK;
case GF_ISOM_BOX_TYPE_PSSH:
BOX_FIELD_LIST_ASSIGN(PSSHs)
return GF_OK;
}
return GF_OK;
}
| 0
|
221,472
|
flatpak_run_add_system_dbus_args (FlatpakBwrap *app_bwrap,
FlatpakBwrap *proxy_arg_bwrap,
FlatpakContext *context,
FlatpakRunFlags flags)
{
gboolean unrestricted, no_proxy;
const char *dbus_address = g_getenv ("DBUS_SYSTEM_BUS_ADDRESS");
g_autofree char *real_dbus_address = NULL;
g_autofree char *dbus_system_socket = NULL;
unrestricted = (context->sockets & FLATPAK_CONTEXT_SOCKET_SYSTEM_BUS) != 0;
if (unrestricted)
g_debug ("Allowing system-dbus access");
no_proxy = (flags & FLATPAK_RUN_FLAG_NO_SYSTEM_BUS_PROXY) != 0;
if (dbus_address != NULL)
dbus_system_socket = extract_unix_path_from_dbus_address (dbus_address);
else if (g_file_test ("/var/run/dbus/system_bus_socket", G_FILE_TEST_EXISTS))
dbus_system_socket = g_strdup ("/var/run/dbus/system_bus_socket");
if (dbus_system_socket != NULL && unrestricted)
{
flatpak_bwrap_add_args (app_bwrap,
"--ro-bind", dbus_system_socket, "/run/dbus/system_bus_socket",
NULL);
flatpak_bwrap_set_env (app_bwrap, "DBUS_SYSTEM_BUS_ADDRESS", "unix:path=/run/dbus/system_bus_socket", TRUE);
return TRUE;
}
else if (!no_proxy && flatpak_context_get_needs_system_bus_proxy (context))
{
g_autofree char *proxy_socket = create_proxy_socket ("system-bus-proxy-XXXXXX");
if (proxy_socket == NULL)
return FALSE;
if (dbus_address)
real_dbus_address = g_strdup (dbus_address);
else
real_dbus_address = g_strdup_printf ("unix:path=%s", dbus_system_socket);
flatpak_bwrap_add_args (proxy_arg_bwrap, real_dbus_address, proxy_socket, NULL);
if (!unrestricted)
flatpak_context_add_bus_filters (context, NULL, FALSE, flags & FLATPAK_RUN_FLAG_SANDBOX, proxy_arg_bwrap);
if ((flags & FLATPAK_RUN_FLAG_LOG_SYSTEM_BUS) != 0)
flatpak_bwrap_add_args (proxy_arg_bwrap, "--log", NULL);
flatpak_bwrap_add_args (app_bwrap,
"--ro-bind", proxy_socket, "/run/dbus/system_bus_socket",
NULL);
flatpak_bwrap_set_env (app_bwrap, "DBUS_SYSTEM_BUS_ADDRESS", "unix:path=/run/dbus/system_bus_socket", TRUE);
return TRUE;
}
return FALSE;
}
| 0
|
312,586
|
qf_parse_dir_pfx(int idx, qffields_T *fields, qf_list_T *qfl)
{
if (idx == 'D') // enter directory
{
if (*fields->namebuf == NUL)
{
emsg(_(e_missing_or_empty_directory_name));
return QF_FAIL;
}
qfl->qf_directory =
qf_push_dir(fields->namebuf, &qfl->qf_dir_stack, FALSE);
if (qfl->qf_directory == NULL)
return QF_FAIL;
}
else if (idx == 'X') // leave directory
qfl->qf_directory = qf_pop_dir(&qfl->qf_dir_stack);
return QF_OK;
}
| 0
|
197,095
|
inline void BinaryBroadcastFiveFold(const ArithmeticParams& unswitched_params,
const RuntimeShape& unswitched_input1_shape,
const T* unswitched_input1_data,
const RuntimeShape& unswitched_input2_shape,
const T* unswitched_input2_data,
const RuntimeShape& output_shape,
T* output_data, ElementwiseF elementwise_f,
ScalarBroadcastF scalar_broadcast_f) {
ArithmeticParams switched_params = unswitched_params;
switched_params.input1_offset = unswitched_params.input2_offset;
switched_params.input1_multiplier = unswitched_params.input2_multiplier;
switched_params.input1_shift = unswitched_params.input2_shift;
switched_params.input2_offset = unswitched_params.input1_offset;
switched_params.input2_multiplier = unswitched_params.input1_multiplier;
switched_params.input2_shift = unswitched_params.input1_shift;
const bool use_unswitched =
unswitched_params.broadcast_category ==
tflite::BroadcastableOpCategory::kFirstInputBroadcastsFast;
const ArithmeticParams& params =
use_unswitched ? unswitched_params : switched_params;
const T* input1_data =
use_unswitched ? unswitched_input1_data : unswitched_input2_data;
const T* input2_data =
use_unswitched ? unswitched_input2_data : unswitched_input1_data;
// Fivefold nested loops. The second input resets its position for each
// iteration of the second loop. The first input resets its position at the
// beginning of the fourth loop. The innermost loop is an elementwise add of
// sections of the arrays.
T* output_data_ptr = output_data;
const T* input1_data_ptr = input1_data;
const T* input2_data_reset = input2_data;
// In the fivefold pattern, y0, y2 and y4 are not broadcast, and so shared
// between input shapes. y3 for input 1 is always broadcast, and so the
// dimension there is 1, whereas optionally y1 might be broadcast for
// input 2. Put another way, input1.shape.FlatSize = y0 * y1 * y2 * y4,
// input2.shape.FlatSize = y0 * y2 * y3 * y4.
int y0 = params.broadcast_shape[0];
int y1 = params.broadcast_shape[1];
int y2 = params.broadcast_shape[2];
int y3 = params.broadcast_shape[3];
int y4 = params.broadcast_shape[4];
if (y4 > 1) {
// General fivefold pattern, with y4 > 1 so there is a non-broadcast inner
// dimension.
for (int i0 = 0; i0 < y0; ++i0) {
const T* input2_data_ptr = nullptr;
for (int i1 = 0; i1 < y1; ++i1) {
input2_data_ptr = input2_data_reset;
for (int i2 = 0; i2 < y2; ++i2) {
for (int i3 = 0; i3 < y3; ++i3) {
elementwise_f(y4, params, input1_data_ptr, input2_data_ptr,
output_data_ptr);
input2_data_ptr += y4;
output_data_ptr += y4;
}
// We have broadcast y4 of input1 data y3 times, and now move on.
input1_data_ptr += y4;
}
}
// We have broadcast y2*y3*y4 of input2 data y1 times, and now move on.
input2_data_reset = input2_data_ptr;
}
} else {
// Special case of y4 == 1, in which the innermost loop is a single
// element and can be combined with the next (y3) as an inner broadcast.
//
// Note that this handles the case of pure scalar broadcast when
// y0 == y1 == y2 == 1. With low overhead it handles cases such as scalar
// broadcast with batch (as y2 > 1).
//
// NOTE The process is the same as the above general case except
// simplified for y4 == 1 and the loop over y3 is contained within the
// AddScalarBroadcast function.
for (int i0 = 0; i0 < y0; ++i0) {
const T* input2_data_ptr = nullptr;
for (int i1 = 0; i1 < y1; ++i1) {
input2_data_ptr = input2_data_reset;
for (int i2 = 0; i2 < y2; ++i2) {
scalar_broadcast_f(y3, params, *input1_data_ptr, input2_data_ptr,
output_data_ptr);
input2_data_ptr += y3;
output_data_ptr += y3;
input1_data_ptr += 1;
}
}
input2_data_reset = input2_data_ptr;
}
}
}
| 1
|
498,122
|
void cgit_print_docstart(void)
{
if (ctx.cfg.embedded) {
if (ctx.cfg.header)
html_include(ctx.cfg.header);
return;
}
char *host = cgit_hosturl();
html(cgit_doctype);
html("<html xmlns='http://www.w3.org/1999/xhtml' xml:lang='en' lang='en'>\n");
html("<head>\n");
html("<title>");
html_txt(ctx.page.title);
html("</title>\n");
htmlf("<meta name='generator' content='cgit %s'/>\n", cgit_version);
if (ctx.cfg.robots && *ctx.cfg.robots)
htmlf("<meta name='robots' content='%s'/>\n", ctx.cfg.robots);
html("<link rel='stylesheet' type='text/css' href='");
html_attr(ctx.cfg.css);
html("'/>\n");
if (ctx.cfg.favicon) {
html("<link rel='shortcut icon' href='");
html_attr(ctx.cfg.favicon);
html("'/>\n");
}
if (host && ctx.repo && ctx.qry.head) {
char *fileurl;
struct strbuf sb = STRBUF_INIT;
strbuf_addf(&sb, "h=%s", ctx.qry.head);
html("<link rel='alternate' title='Atom feed' href='");
html(cgit_httpscheme());
html_attr(host);
fileurl = cgit_fileurl(ctx.repo->url, "atom", ctx.qry.vpath,
sb.buf);
html_attr(fileurl);
html("' type='application/atom+xml'/>\n");
strbuf_release(&sb);
free(fileurl);
}
if (ctx.repo)
cgit_add_clone_urls(print_rel_vcs_link);
if (ctx.cfg.head_include)
html_include(ctx.cfg.head_include);
html("</head>\n");
html("<body>\n");
if (ctx.cfg.header)
html_include(ctx.cfg.header);
free(host);
}
| 0
|
234,816
|
static int update_dev_stat_item(struct btrfs_trans_handle *trans,
struct btrfs_device *device)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_root *dev_root = fs_info->dev_root;
struct btrfs_path *path;
struct btrfs_key key;
struct extent_buffer *eb;
struct btrfs_dev_stats_item *ptr;
int ret;
int i;
key.objectid = BTRFS_DEV_STATS_OBJECTID;
key.type = BTRFS_PERSISTENT_ITEM_KEY;
key.offset = device->devid;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
if (ret < 0) {
btrfs_warn_in_rcu(fs_info,
"error %d while searching for dev_stats item for device %s",
ret, rcu_str_deref(device->name));
goto out;
}
if (ret == 0 &&
btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
/* need to delete old one and insert a new one */
ret = btrfs_del_item(trans, dev_root, path);
if (ret != 0) {
btrfs_warn_in_rcu(fs_info,
"delete too small dev_stats item for device %s failed %d",
rcu_str_deref(device->name), ret);
goto out;
}
ret = 1;
}
if (ret == 1) {
/* need to insert a new item */
btrfs_release_path(path);
ret = btrfs_insert_empty_item(trans, dev_root, path,
&key, sizeof(*ptr));
if (ret < 0) {
btrfs_warn_in_rcu(fs_info,
"insert dev_stats item for device %s failed %d",
rcu_str_deref(device->name), ret);
goto out;
}
}
eb = path->nodes[0];
ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item);
for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
btrfs_set_dev_stats_value(eb, ptr, i,
btrfs_dev_stat_read(device, i));
btrfs_mark_buffer_dirty(eb);
out:
btrfs_free_path(path);
return ret;
}
| 0
|
369,169
|
static __cold int io_register_iowq_aff(struct io_ring_ctx *ctx,
void __user *arg, unsigned len)
{
struct io_uring_task *tctx = current->io_uring;
cpumask_var_t new_mask;
int ret;
if (!tctx || !tctx->io_wq)
return -EINVAL;
if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
return -ENOMEM;
cpumask_clear(new_mask);
if (len > cpumask_size())
len = cpumask_size();
if (in_compat_syscall()) {
ret = compat_get_bitmap(cpumask_bits(new_mask),
(const compat_ulong_t __user *)arg,
len * 8 /* CHAR_BIT */);
} else {
ret = copy_from_user(new_mask, arg, len);
}
if (ret) {
free_cpumask_var(new_mask);
return -EFAULT;
}
ret = io_wq_cpu_affinity(tctx->io_wq, new_mask);
free_cpumask_var(new_mask);
return ret;
| 0
|
465,853
|
int nfcmrvl_parse_dt(struct device_node *node,
struct nfcmrvl_platform_data *pdata)
{
int reset_n_io;
reset_n_io = of_get_named_gpio(node, "reset-n-io", 0);
if (reset_n_io < 0) {
pr_info("no reset-n-io config\n");
} else if (!gpio_is_valid(reset_n_io)) {
pr_err("invalid reset-n-io GPIO\n");
return reset_n_io;
}
pdata->reset_n_io = reset_n_io;
if (of_find_property(node, "hci-muxed", NULL))
pdata->hci_muxed = 1;
else
pdata->hci_muxed = 0;
return 0;
}
| 0
|
446,419
|
static HtPU *create_path_to_index(RzBuffer *cache_buf, cache_img_t *img, cache_hdr_t *hdr) {
HtPU *path_to_idx = ht_pu_new0();
if (!path_to_idx) {
return NULL;
}
for (size_t i = 0; i != hdr->imagesCount; i++) {
char file[256];
if (rz_buf_read_at(cache_buf, img[i].pathFileOffset, (ut8 *)&file, sizeof(file)) != sizeof(file)) {
continue;
}
file[255] = 0;
ht_pu_insert(path_to_idx, file, (ut64)i);
}
return path_to_idx;
}
| 0
|
261,378
|
bool alloc_and_init_significant_coeff_ctxIdx_lookupTable_OLD()
{
int tableSize = 2*2*4*(4*4 + 8*8 + 16*16 + 32*32);
uint8_t* p = (uint8_t*)malloc(tableSize);
if (p==NULL) {
return false;
}
for (int log2w=2; log2w<=5 ; log2w++)
for (int cIdx=0;cIdx<2;cIdx++)
for (int scanIdx=0;scanIdx<2;scanIdx++)
for (int prevCsbf=0;prevCsbf<4;prevCsbf++)
{
// assign pointer into reserved memory area
ctxIdxLookup[log2w-2][cIdx][scanIdx][prevCsbf] = p;
p += (1<<log2w)*(1<<log2w);
const position* ScanOrderSub = get_scan_order(log2w-2, scanIdx);
const position* ScanOrderPos = get_scan_order(2, scanIdx);
//for (int yC=0;yC<(1<<log2w);yC++)
// for (int xC=0;xC<(1<<log2w);xC++)
for (int s=0;s<(1<<log2w)*(1<<log2w);s++)
{
position S = ScanOrderSub[s>>4];
int x0 = S.x<<2;
int y0 = S.y<<2;
int subX = ScanOrderPos[s & 0xF].x;
int subY = ScanOrderPos[s & 0xF].y;
int xC = x0 + subX;
int yC = y0 + subY;
int w = 1<<log2w;
int sbWidth = w>>2;
int sigCtx;
// if log2TrafoSize==2
if (sbWidth==1) {
sigCtx = ctxIdxMap[(yC<<2) + xC];
}
else if (xC+yC==0) {
sigCtx = 0;
}
else {
int xS = xC>>2;
int yS = yC>>2;
/*
int prevCsbf = 0;
if (xS < sbWidth-1) { prevCsbf += coded_sub_block_flag[xS+1 +yS*sbWidth]; }
if (yS < sbWidth-1) { prevCsbf += coded_sub_block_flag[xS+(1+yS)*sbWidth]<<1; }
*/
int xP = xC & 3;
int yP = yC & 3;
logtrace(LogSlice,"posInSubset: %d,%d\n",xP,yP);
logtrace(LogSlice,"prevCsbf: %d\n",prevCsbf);
//printf("%d | %d %d\n",prevCsbf,xP,yP);
switch (prevCsbf) {
case 0:
//sigCtx = (xP+yP==0) ? 2 : (xP+yP<3) ? 1 : 0;
sigCtx = (xP+yP>=3) ? 0 : (xP+yP>0) ? 1 : 2;
break;
case 1:
sigCtx = (yP==0) ? 2 : (yP==1) ? 1 : 0;
break;
case 2:
sigCtx = (xP==0) ? 2 : (xP==1) ? 1 : 0;
break;
default:
sigCtx = 2;
break;
}
logtrace(LogSlice,"a) sigCtx=%d\n",sigCtx);
if (cIdx==0) {
if (xS+yS > 0) sigCtx+=3;
logtrace(LogSlice,"b) sigCtx=%d\n",sigCtx);
// if log2TrafoSize==3
if (sbWidth==2) { // 8x8 block
sigCtx += (scanIdx==0) ? 9 : 15;
} else {
sigCtx += 21;
}
logtrace(LogSlice,"c) sigCtx=%d\n",sigCtx);
}
else {
// if log2TrafoSize==3
if (sbWidth==2) { // 8x8 block
sigCtx+=9;
}
else {
sigCtx+=12;
}
}
}
int ctxIdxInc;
if (cIdx==0) { ctxIdxInc=sigCtx; }
else { ctxIdxInc=27+sigCtx; }
ctxIdxLookup[log2w-2][cIdx][scanIdx][prevCsbf][xC+(yC<<log2w)] = ctxIdxInc;
//NOTE: when using this option, we have to include all three scanIdx in the table
//ctxIdxLookup[log2w-2][cIdx][scanIdx][prevCsbf][s] = ctxIdxInc;
}
}
return true;
}
| 0
|
468,368
|
g_socket_client_get_tls_validation_flags (GSocketClient *client)
{
return client->priv->tls_validation_flags;
}
| 0
|
384,826
|
pstrcmp(const void *a, const void *b)
{
return (pathcmp(*(char **)a, *(char **)b, -1));
}
| 0
|
497,806
|
kwsexec (kwset_t kwset, char const *text, size_t size,
struct kwsmatch *kwsmatch)
{
if (kwset->words == 1)
{
size_t ret = bmexec (kwset, text, size);
if (ret != (size_t) -1)
{
kwsmatch->index = 0;
kwsmatch->offset[0] = ret;
kwsmatch->size[0] = kwset->mind;
}
return ret;
}
else
return cwexec (kwset, text, size, kwsmatch);
}
| 0
|
349,899
|
static u32 hw_atl_utils_mpi_get_state(struct aq_hw_s *self)
{
return aq_hw_read_reg(self, HW_ATL_MPI_STATE_ADR);
}
| 0
|
513,105
|
Type_geometry_attributes(const Type_handler *handler,
const Type_all_attributes *gattr)
:m_geometry_type(m_geometry_type_unknown)
{
copy(handler, gattr);
}
| 0
|
207,826
|
inline int nci_request(struct nci_dev *ndev,
void (*req)(struct nci_dev *ndev,
const void *opt),
const void *opt, __u32 timeout)
{
int rc;
if (!test_bit(NCI_UP, &ndev->flags))
return -ENETDOWN;
/* Serialize all requests */
mutex_lock(&ndev->req_lock);
rc = __nci_request(ndev, req, opt, timeout);
mutex_unlock(&ndev->req_lock);
return rc;
}
| 1
|
264,252
|
static void pointer_event(VncState *vs, int button_mask, int x, int y)
{
static uint32_t bmap[INPUT_BUTTON_MAX] = {
[INPUT_BUTTON_LEFT] = 0x01,
[INPUT_BUTTON_MIDDLE] = 0x02,
[INPUT_BUTTON_RIGHT] = 0x04,
[INPUT_BUTTON_WHEEL_UP] = 0x08,
[INPUT_BUTTON_WHEEL_DOWN] = 0x10,
};
QemuConsole *con = vs->vd->dcl.con;
int width = surface_width(vs->vd->ds);
int height = surface_height(vs->vd->ds);
if (vs->last_bmask != button_mask) {
qemu_input_update_buttons(con, bmap, vs->last_bmask, button_mask);
vs->last_bmask = button_mask;
}
if (vs->absolute) {
qemu_input_queue_abs(con, INPUT_AXIS_X, x, width);
qemu_input_queue_abs(con, INPUT_AXIS_Y, y, height);
} else if (vnc_has_feature(vs, VNC_FEATURE_POINTER_TYPE_CHANGE)) {
qemu_input_queue_rel(con, INPUT_AXIS_X, x - 0x7FFF);
qemu_input_queue_rel(con, INPUT_AXIS_Y, y - 0x7FFF);
} else {
if (vs->last_x != -1) {
qemu_input_queue_rel(con, INPUT_AXIS_X, x - vs->last_x);
qemu_input_queue_rel(con, INPUT_AXIS_Y, y - vs->last_y);
}
vs->last_x = x;
vs->last_y = y;
}
qemu_input_event_sync();
}
| 0
|
254,708
|
njs_typed_array_compare(double a, double b)
{
if (njs_slow_path(isnan(a))) {
if (isnan(b)) {
return 0;
}
return 1;
}
if (njs_slow_path(isnan(b))) {
return -1;
}
if (a < b) {
return -1;
}
if (a > b) {
return 1;
}
return signbit(b) - signbit(a);
}
| 0
|
463,121
|
static void annotate_closedb(annotate_db_t *d)
{
annotate_db_t *dx, *prev = NULL;
int r;
/* detach from the global list */
for (dx = all_dbs_head ; dx && dx != d ; prev = dx, dx = dx->next)
;
assert(dx);
assert(d == dx);
detach_db(prev, d);
#if DEBUG
syslog(LOG_ERR, "Closing annotations db %s\n", d->filename);
#endif
r = cyrusdb_close(d->db);
if (r)
syslog(LOG_ERR, "DBERROR: error closing annotations %s: %s",
d->filename, cyrusdb_strerror(r));
free(d->filename);
free(d->mboxname);
memset(d, 0, sizeof(*d)); /* JIC */
free(d);
}
| 0
|
227,035
|
IRC_PROTOCOL_CALLBACK(whois_nick_msg)
{
IRC_PROTOCOL_MIN_ARGS(5);
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (
server, argv[3], command, "whois", NULL),
date,
irc_protocol_tags (command, "irc_numeric", NULL, NULL),
"%s%s[%s%s%s] %s%s",
weechat_prefix ("network"),
IRC_COLOR_CHAT_DELIMITERS,
irc_nick_color_for_msg (server, 1, NULL, argv[3]),
argv[3],
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_RESET,
(argv_eol[4][0] == ':') ? argv_eol[4] + 1 : argv_eol[4]);
return WEECHAT_RC_OK;
}
| 0
|
300,729
|
static void tipc_write_space(struct sock *sk)
{
struct socket_wq *wq;
rcu_read_lock();
wq = rcu_dereference(sk->sk_wq);
if (skwq_has_sleeper(wq))
wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT |
EPOLLWRNORM | EPOLLWRBAND);
rcu_read_unlock();
}
| 0
|
274,680
|
callbacks_get_selected_row_index (void)
{
GtkTreeSelection *selection;
GtkTreeIter iter;
GtkListStore *list_store = (GtkListStore *) gtk_tree_view_get_model
((GtkTreeView *) screen.win.layerTree);
gint index=-1,i=0;
/* This will only work in single or browse selection mode! */
selection = gtk_tree_view_get_selection((GtkTreeView *) screen.win.layerTree);
if (gtk_tree_selection_get_selected(selection, NULL, &iter)) {
while (gtk_tree_model_iter_nth_child ((GtkTreeModel *)list_store,
&iter, NULL, i)){
if (gtk_tree_selection_iter_is_selected (selection, &iter)) {
return i;
}
i++;
}
}
return index;
}
| 0
|
247,531
|
const std::string& expectedOcspResponse() const { return expected_ocsp_response_; }
| 0
|
196,231
|
void TensorSliceReader::LoadShard(int shard) const {
CHECK_LT(shard, sss_.size());
if (sss_[shard] || !status_.ok()) {
return; // Already loaded, or invalid.
}
string value;
SavedTensorSlices sts;
const string fname = fnames_[shard];
VLOG(1) << "Reading meta data from file " << fname << "...";
Table* table;
Status s = open_function_(fname, &table);
if (!s.ok()) {
status_ = errors::DataLoss("Unable to open table file ", fname, ": ",
s.ToString());
return;
}
sss_[shard].reset(table);
if (!(table->Get(kSavedTensorSlicesKey, &value) &&
ParseProtoUnlimited(&sts, value))) {
status_ = errors::Internal(
"Failed to find the saved tensor slices at the beginning of the "
"checkpoint file: ",
fname);
return;
}
status_ = CheckVersions(sts.meta().versions(), TF_CHECKPOINT_VERSION,
TF_CHECKPOINT_VERSION_MIN_PRODUCER, "Checkpoint",
"checkpoint");
if (!status_.ok()) return;
for (const SavedSliceMeta& ssm : sts.meta().tensor()) {
TensorShape ssm_shape(ssm.shape());
for (const TensorSliceProto& tsp : ssm.slice()) {
TensorSlice ss_slice(tsp);
status_ = RegisterTensorSlice(ssm.name(), ssm_shape, ssm.type(), fname,
ss_slice, &tensors_);
if (!status_.ok()) return;
}
}
}
| 1
|
424,512
|
static UINT video_control_on_data_received(IWTSVirtualChannelCallback* pChannelCallback, wStream* s)
{
VIDEO_CHANNEL_CALLBACK* callback = (VIDEO_CHANNEL_CALLBACK*)pChannelCallback;
VIDEO_PLUGIN* video;
VideoClientContext* context;
UINT ret = CHANNEL_RC_OK;
UINT32 cbSize, packetType;
video = (VIDEO_PLUGIN*)callback->plugin;
context = (VideoClientContext*)video->wtsPlugin.pInterface;
if (Stream_GetRemainingLength(s) < 4)
return ERROR_INVALID_DATA;
Stream_Read_UINT32(s, cbSize);
if (cbSize < 8 || Stream_GetRemainingLength(s) < (cbSize - 4))
{
WLog_ERR(TAG, "invalid cbSize");
return ERROR_INVALID_DATA;
}
Stream_Read_UINT32(s, packetType);
switch (packetType)
{
case TSMM_PACKET_TYPE_PRESENTATION_REQUEST:
ret = video_read_tsmm_presentation_req(context, s);
break;
default:
WLog_ERR(TAG, "not expecting packet type %" PRIu32 "", packetType);
ret = ERROR_UNSUPPORTED_TYPE;
break;
}
return ret;
}
| 0
|
430,407
|
int ovs_nla_get_match(struct net *net, struct sw_flow_match *match,
const struct nlattr *nla_key,
const struct nlattr *nla_mask,
bool log)
{
const struct nlattr *a[OVS_KEY_ATTR_MAX + 1];
struct nlattr *newmask = NULL;
u64 key_attrs = 0;
u64 mask_attrs = 0;
int err;
err = parse_flow_nlattrs(nla_key, a, &key_attrs, log);
if (err)
return err;
err = parse_vlan_from_nlattrs(match, &key_attrs, a, false, log);
if (err)
return err;
err = ovs_key_from_nlattrs(net, match, key_attrs, a, false, log);
if (err)
return err;
if (match->mask) {
if (!nla_mask) {
/* Create an exact match mask. We need to set to 0xff
* all the 'match->mask' fields that have been touched
* in 'match->key'. We cannot simply memset
* 'match->mask', because padding bytes and fields not
* specified in 'match->key' should be left to 0.
* Instead, we use a stream of netlink attributes,
* copied from 'key' and set to 0xff.
* ovs_key_from_nlattrs() will take care of filling
* 'match->mask' appropriately.
*/
newmask = kmemdup(nla_key,
nla_total_size(nla_len(nla_key)),
GFP_KERNEL);
if (!newmask)
return -ENOMEM;
mask_set_nlattr(newmask, 0xff);
/* The userspace does not send tunnel attributes that
* are 0, but we should not wildcard them nonetheless.
*/
if (match->key->tun_proto)
SW_FLOW_KEY_MEMSET_FIELD(match, tun_key,
0xff, true);
nla_mask = newmask;
}
err = parse_flow_mask_nlattrs(nla_mask, a, &mask_attrs, log);
if (err)
goto free_newmask;
/* Always match on tci. */
SW_FLOW_KEY_PUT(match, eth.vlan.tci, htons(0xffff), true);
SW_FLOW_KEY_PUT(match, eth.cvlan.tci, htons(0xffff), true);
err = parse_vlan_from_nlattrs(match, &mask_attrs, a, true, log);
if (err)
goto free_newmask;
err = ovs_key_from_nlattrs(net, match, mask_attrs, a, true,
log);
if (err)
goto free_newmask;
}
if (!match_validate(match, key_attrs, mask_attrs, log))
err = -EINVAL;
free_newmask:
kfree(newmask);
return err;
}
| 0
|
376,333
|
gpg_encrypt_sync (CamelCipherContext *context,
const gchar *userid,
GPtrArray *recipients,
CamelMimePart *ipart,
CamelMimePart *opart,
GCancellable *cancellable,
GError **error)
{
CamelCipherContextClass *class;
CamelGpgContext *ctx = (CamelGpgContext *) context;
struct _GpgCtx *gpg;
CamelStream *istream, *ostream, *vstream;
CamelMimePart *encpart, *verpart;
CamelDataWrapper *dw;
CamelContentType *ct;
CamelMultipartEncrypted *mpe;
gboolean success = FALSE;
gint i;
class = CAMEL_CIPHER_CONTEXT_GET_CLASS (context);
ostream = camel_stream_mem_new ();
istream = camel_stream_mem_new ();
if (camel_cipher_canonical_to_stream (
ipart, CAMEL_MIME_FILTER_CANON_CRLF, istream, NULL, error) == -1) {
g_prefix_error (
error, _("Could not generate encrypting data: "));
goto fail1;
}
gpg = gpg_ctx_new (context);
gpg_ctx_set_mode (gpg, GPG_CTX_MODE_ENCRYPT);
gpg_ctx_set_armor (gpg, TRUE);
gpg_ctx_set_userid (gpg, userid);
gpg_ctx_set_istream (gpg, istream);
gpg_ctx_set_ostream (gpg, ostream);
gpg_ctx_set_always_trust (gpg, ctx->priv->always_trust);
for (i = 0; i < recipients->len; i++) {
gpg_ctx_add_recipient (gpg, recipients->pdata[i]);
}
if (!gpg_ctx_op_start (gpg, error))
goto fail;
/* FIXME: move this to a common routine */
while (!gpg_ctx_op_complete (gpg)) {
if (gpg_ctx_op_step (gpg, cancellable, error) == -1) {
gpg_ctx_op_cancel (gpg);
goto fail;
}
}
if (gpg_ctx_op_wait (gpg) != 0) {
const gchar *diagnostics;
diagnostics = gpg_ctx_get_diagnostics (gpg);
g_set_error (
error, CAMEL_ERROR, CAMEL_ERROR_GENERIC, "%s",
(diagnostics != NULL && *diagnostics != '\0') ?
diagnostics : _("Failed to execute gpg."));
goto fail;
}
success = TRUE;
dw = camel_data_wrapper_new ();
camel_data_wrapper_construct_from_stream_sync (
dw, ostream, NULL, NULL);
encpart = camel_mime_part_new ();
ct = camel_content_type_new ("application", "octet-stream");
camel_content_type_set_param (ct, "name", "encrypted.asc");
camel_data_wrapper_set_mime_type_field (dw, ct);
camel_content_type_unref (ct);
camel_medium_set_content ((CamelMedium *) encpart, dw);
g_object_unref (dw);
camel_mime_part_set_description (encpart, _("This is a digitally encrypted message part"));
vstream = camel_stream_mem_new ();
camel_stream_write_string (vstream, "Version: 1\n", NULL, NULL);
g_seekable_seek (G_SEEKABLE (vstream), 0, G_SEEK_SET, NULL, NULL);
verpart = camel_mime_part_new ();
dw = camel_data_wrapper_new ();
camel_data_wrapper_set_mime_type (dw, class->encrypt_protocol);
camel_data_wrapper_construct_from_stream_sync (
dw, vstream, NULL, NULL);
g_object_unref (vstream);
camel_medium_set_content ((CamelMedium *) verpart, dw);
g_object_unref (dw);
mpe = camel_multipart_encrypted_new ();
ct = camel_content_type_new ("multipart", "encrypted");
camel_content_type_set_param (ct, "protocol", class->encrypt_protocol);
camel_data_wrapper_set_mime_type_field ((CamelDataWrapper *) mpe, ct);
camel_content_type_unref (ct);
camel_multipart_set_boundary ((CamelMultipart *) mpe, NULL);
mpe->decrypted = g_object_ref (ipart);
camel_multipart_add_part ((CamelMultipart *) mpe, verpart);
g_object_unref (verpart);
camel_multipart_add_part ((CamelMultipart *) mpe, encpart);
g_object_unref (encpart);
camel_medium_set_content ((CamelMedium *) opart, (CamelDataWrapper *) mpe);
fail:
gpg_ctx_free (gpg);
fail1:
g_object_unref (istream);
g_object_unref (ostream);
return success;
}
| 0
|
384,839
|
f_getcwd(typval_T *argvars, typval_T *rettv)
{
win_T *wp = NULL;
tabpage_T *tp = NULL;
char_u *cwd;
int global = FALSE;
rettv->v_type = VAR_STRING;
rettv->vval.v_string = NULL;
if (in_vim9script()
&& (check_for_opt_number_arg(argvars, 0) == FAIL
|| (argvars[0].v_type != VAR_UNKNOWN
&& check_for_opt_number_arg(argvars, 1) == FAIL)))
return;
if (argvars[0].v_type == VAR_NUMBER
&& argvars[0].vval.v_number == -1
&& argvars[1].v_type == VAR_UNKNOWN)
global = TRUE;
else
wp = find_tabwin(&argvars[0], &argvars[1], &tp);
if (wp != NULL && wp->w_localdir != NULL
&& argvars[0].v_type != VAR_UNKNOWN)
rettv->vval.v_string = vim_strsave(wp->w_localdir);
else if (tp != NULL && tp->tp_localdir != NULL
&& argvars[0].v_type != VAR_UNKNOWN)
rettv->vval.v_string = vim_strsave(tp->tp_localdir);
else if (wp != NULL || tp != NULL || global)
{
if (globaldir != NULL && argvars[0].v_type != VAR_UNKNOWN)
rettv->vval.v_string = vim_strsave(globaldir);
else
{
cwd = alloc(MAXPATHL);
if (cwd != NULL)
{
if (mch_dirname(cwd, MAXPATHL) != FAIL)
rettv->vval.v_string = vim_strsave(cwd);
vim_free(cwd);
}
}
}
#ifdef BACKSLASH_IN_FILENAME
if (rettv->vval.v_string != NULL)
slash_adjust(rettv->vval.v_string);
#endif
}
| 0
|
223,452
|
static void peek_char_back(compiler_common *common, sljit_u32 max, jump_list **backtracks)
{
/* Reads one character back without moving STR_PTR. TMP2 must
contain the start of the subject buffer. Affects TMP1, TMP2, and RETURN_ADDR. */
DEFINE_COMPILER;
#if defined SUPPORT_UNICODE && PCRE2_CODE_UNIT_WIDTH != 32
struct sljit_jump *jump;
#endif /* SUPPORT_UNICODE && PCRE2_CODE_UNIT_WIDTH != 32 */
SLJIT_UNUSED_ARG(max);
SLJIT_UNUSED_ARG(backtracks);
OP1(MOV_UCHAR, TMP1, 0, SLJIT_MEM1(STR_PTR), IN_UCHARS(-1));
#ifdef SUPPORT_UNICODE
#if PCRE2_CODE_UNIT_WIDTH == 8
if (common->utf)
{
if (max < 128) return;
jump = CMP(SLJIT_LESS, TMP1, 0, SLJIT_IMM, 0x80);
if (common->invalid_utf)
{
add_jump(compiler, &common->utfpeakcharback_invalid, JUMP(SLJIT_FAST_CALL));
if (backtracks != NULL)
add_jump(compiler, backtracks, CMP(SLJIT_EQUAL, TMP1, 0, SLJIT_IMM, INVALID_UTF_CHAR));
}
else
add_jump(compiler, &common->utfpeakcharback, JUMP(SLJIT_FAST_CALL));
JUMPHERE(jump);
}
#elif PCRE2_CODE_UNIT_WIDTH == 16
if (common->utf)
{
if (max < 0xd800) return;
if (common->invalid_utf)
{
jump = CMP(SLJIT_LESS, TMP1, 0, SLJIT_IMM, 0xd800);
add_jump(compiler, &common->utfpeakcharback_invalid, JUMP(SLJIT_FAST_CALL));
if (backtracks != NULL)
add_jump(compiler, backtracks, CMP(SLJIT_EQUAL, TMP1, 0, SLJIT_IMM, INVALID_UTF_CHAR));
}
else
{
OP2(SLJIT_SUB, TMP2, 0, TMP1, 0, SLJIT_IMM, 0xdc00);
jump = CMP(SLJIT_GREATER_EQUAL, TMP2, 0, SLJIT_IMM, 0xe000 - 0xdc00);
/* TMP2 contains the low surrogate. */
OP1(MOV_UCHAR, TMP1, 0, SLJIT_MEM1(STR_PTR), IN_UCHARS(-2));
OP2(SLJIT_ADD, TMP2, 0, TMP2, 0, SLJIT_IMM, 0x10000);
OP2(SLJIT_SUB, TMP1, 0, TMP1, 0, SLJIT_IMM, 0xd800);
OP2(SLJIT_SHL, TMP1, 0, TMP1, 0, SLJIT_IMM, 10);
OP2(SLJIT_ADD, TMP1, 0, TMP1, 0, TMP2, 0);
}
JUMPHERE(jump);
}
#elif PCRE2_CODE_UNIT_WIDTH == 32
if (common->invalid_utf)
{
OP2(SLJIT_SUB, TMP2, 0, TMP1, 0, SLJIT_IMM, 0xd800);
add_jump(compiler, backtracks, CMP(SLJIT_GREATER_EQUAL, TMP1, 0, SLJIT_IMM, 0x110000));
add_jump(compiler, backtracks, CMP(SLJIT_LESS, TMP2, 0, SLJIT_IMM, 0xe000 - 0xd800));
}
#endif /* PCRE2_CODE_UNIT_WIDTH == [8|16|32] */
#endif /* SUPPORT_UNICODE */
}
| 0
|
223,392
|
static PCRE2_SPTR compile_char1_matchingpath(compiler_common *common, PCRE2_UCHAR type, PCRE2_SPTR cc, jump_list **backtracks, BOOL check_str_ptr)
{
DEFINE_COMPILER;
int length;
unsigned int c, oc, bit;
compare_context context;
struct sljit_jump *jump[3];
jump_list *end_list;
#ifdef SUPPORT_UNICODE
PCRE2_UCHAR propdata[5];
#endif /* SUPPORT_UNICODE */
switch(type)
{
case OP_NOT_DIGIT:
case OP_DIGIT:
/* Digits are usually 0-9, so it is worth to optimize them. */
if (check_str_ptr)
detect_partial_match(common, backtracks);
#if defined SUPPORT_UNICODE && PCRE2_CODE_UNIT_WIDTH == 8
if (common->utf && is_char7_bitset((const sljit_u8*)common->ctypes - cbit_length + cbit_digit, FALSE))
read_char7_type(common, backtracks, type == OP_NOT_DIGIT);
else
#endif
read_char8_type(common, backtracks, type == OP_NOT_DIGIT);
/* Flip the starting bit in the negative case. */
OP2U(SLJIT_AND | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, ctype_digit);
add_jump(compiler, backtracks, JUMP(type == OP_DIGIT ? SLJIT_ZERO : SLJIT_NOT_ZERO));
return cc;
case OP_NOT_WHITESPACE:
case OP_WHITESPACE:
if (check_str_ptr)
detect_partial_match(common, backtracks);
#if defined SUPPORT_UNICODE && PCRE2_CODE_UNIT_WIDTH == 8
if (common->utf && is_char7_bitset((const sljit_u8*)common->ctypes - cbit_length + cbit_space, FALSE))
read_char7_type(common, backtracks, type == OP_NOT_WHITESPACE);
else
#endif
read_char8_type(common, backtracks, type == OP_NOT_WHITESPACE);
OP2U(SLJIT_AND | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, ctype_space);
add_jump(compiler, backtracks, JUMP(type == OP_WHITESPACE ? SLJIT_ZERO : SLJIT_NOT_ZERO));
return cc;
case OP_NOT_WORDCHAR:
case OP_WORDCHAR:
if (check_str_ptr)
detect_partial_match(common, backtracks);
#if defined SUPPORT_UNICODE && PCRE2_CODE_UNIT_WIDTH == 8
if (common->utf && is_char7_bitset((const sljit_u8*)common->ctypes - cbit_length + cbit_word, FALSE))
read_char7_type(common, backtracks, type == OP_NOT_WORDCHAR);
else
#endif
read_char8_type(common, backtracks, type == OP_NOT_WORDCHAR);
OP2U(SLJIT_AND | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, ctype_word);
add_jump(compiler, backtracks, JUMP(type == OP_WORDCHAR ? SLJIT_ZERO : SLJIT_NOT_ZERO));
return cc;
case OP_ANY:
if (check_str_ptr)
detect_partial_match(common, backtracks);
read_char(common, common->nlmin, common->nlmax, backtracks, READ_CHAR_UPDATE_STR_PTR);
if (common->nltype == NLTYPE_FIXED && common->newline > 255)
{
jump[0] = CMP(SLJIT_NOT_EQUAL, TMP1, 0, SLJIT_IMM, (common->newline >> 8) & 0xff);
end_list = NULL;
if (common->mode != PCRE2_JIT_PARTIAL_HARD)
add_jump(compiler, &end_list, CMP(SLJIT_GREATER_EQUAL, STR_PTR, 0, STR_END, 0));
else
check_str_end(common, &end_list);
OP1(MOV_UCHAR, TMP1, 0, SLJIT_MEM1(STR_PTR), 0);
add_jump(compiler, backtracks, CMP(SLJIT_EQUAL, TMP1, 0, SLJIT_IMM, common->newline & 0xff));
set_jumps(end_list, LABEL());
JUMPHERE(jump[0]);
}
else
check_newlinechar(common, common->nltype, backtracks, TRUE);
return cc;
case OP_ALLANY:
if (check_str_ptr)
detect_partial_match(common, backtracks);
#ifdef SUPPORT_UNICODE
if (common->utf)
{
if (common->invalid_utf)
{
read_char(common, 0, READ_CHAR_MAX, backtracks, READ_CHAR_UPDATE_STR_PTR);
return cc;
}
#if PCRE2_CODE_UNIT_WIDTH == 8 || PCRE2_CODE_UNIT_WIDTH == 16
OP1(MOV_UCHAR, TMP1, 0, SLJIT_MEM1(STR_PTR), 0);
OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1));
#if PCRE2_CODE_UNIT_WIDTH == 8
jump[0] = CMP(SLJIT_LESS, TMP1, 0, SLJIT_IMM, 0xc0);
OP1(SLJIT_MOV_U8, TMP1, 0, SLJIT_MEM1(TMP1), (sljit_sw)PRIV(utf8_table4) - 0xc0);
OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, TMP1, 0);
#elif PCRE2_CODE_UNIT_WIDTH == 16
jump[0] = CMP(SLJIT_LESS, TMP1, 0, SLJIT_IMM, 0xd800);
OP2(SLJIT_AND, TMP1, 0, TMP1, 0, SLJIT_IMM, 0xfc00);
OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, 0xd800);
OP_FLAGS(SLJIT_MOV, TMP1, 0, SLJIT_EQUAL);
OP2(SLJIT_SHL, TMP1, 0, TMP1, 0, SLJIT_IMM, 1);
OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, TMP1, 0);
#endif /* PCRE2_CODE_UNIT_WIDTH == 8 */
JUMPHERE(jump[0]);
return cc;
#endif /* PCRE2_CODE_UNIT_WIDTH == [8|16] */
}
#endif /* SUPPORT_UNICODE */
OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1));
return cc;
case OP_ANYBYTE:
if (check_str_ptr)
detect_partial_match(common, backtracks);
OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1));
return cc;
#ifdef SUPPORT_UNICODE
case OP_NOTPROP:
case OP_PROP:
propdata[0] = XCL_HASPROP;
propdata[1] = type == OP_NOTPROP ? XCL_NOTPROP : XCL_PROP;
propdata[2] = cc[0];
propdata[3] = cc[1];
propdata[4] = XCL_END;
if (check_str_ptr)
detect_partial_match(common, backtracks);
compile_xclass_matchingpath(common, propdata, backtracks);
return cc + 2;
#endif
case OP_ANYNL:
if (check_str_ptr)
detect_partial_match(common, backtracks);
read_char(common, common->bsr_nlmin, common->bsr_nlmax, NULL, 0);
jump[0] = CMP(SLJIT_NOT_EQUAL, TMP1, 0, SLJIT_IMM, CHAR_CR);
/* We don't need to handle soft partial matching case. */
end_list = NULL;
if (common->mode != PCRE2_JIT_PARTIAL_HARD)
add_jump(compiler, &end_list, CMP(SLJIT_GREATER_EQUAL, STR_PTR, 0, STR_END, 0));
else
check_str_end(common, &end_list);
OP1(MOV_UCHAR, TMP1, 0, SLJIT_MEM1(STR_PTR), 0);
jump[1] = CMP(SLJIT_NOT_EQUAL, TMP1, 0, SLJIT_IMM, CHAR_NL);
OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1));
jump[2] = JUMP(SLJIT_JUMP);
JUMPHERE(jump[0]);
check_newlinechar(common, common->bsr_nltype, backtracks, FALSE);
set_jumps(end_list, LABEL());
JUMPHERE(jump[1]);
JUMPHERE(jump[2]);
return cc;
case OP_NOT_HSPACE:
case OP_HSPACE:
if (check_str_ptr)
detect_partial_match(common, backtracks);
if (type == OP_NOT_HSPACE)
read_char(common, 0x9, 0x3000, backtracks, READ_CHAR_UPDATE_STR_PTR);
else
read_char(common, 0x9, 0x3000, NULL, 0);
add_jump(compiler, &common->hspace, JUMP(SLJIT_FAST_CALL));
sljit_set_current_flags(compiler, SLJIT_SET_Z);
add_jump(compiler, backtracks, JUMP(type == OP_NOT_HSPACE ? SLJIT_NOT_ZERO : SLJIT_ZERO));
return cc;
case OP_NOT_VSPACE:
case OP_VSPACE:
if (check_str_ptr)
detect_partial_match(common, backtracks);
if (type == OP_NOT_VSPACE)
read_char(common, 0xa, 0x2029, backtracks, READ_CHAR_UPDATE_STR_PTR);
else
read_char(common, 0xa, 0x2029, NULL, 0);
add_jump(compiler, &common->vspace, JUMP(SLJIT_FAST_CALL));
sljit_set_current_flags(compiler, SLJIT_SET_Z);
add_jump(compiler, backtracks, JUMP(type == OP_NOT_VSPACE ? SLJIT_NOT_ZERO : SLJIT_ZERO));
return cc;
#ifdef SUPPORT_UNICODE
case OP_EXTUNI:
if (check_str_ptr)
detect_partial_match(common, backtracks);
SLJIT_ASSERT(TMP1 == SLJIT_R0 && STR_PTR == SLJIT_R1);
OP1(SLJIT_MOV, SLJIT_R0, 0, ARGUMENTS, 0);
#if PCRE2_CODE_UNIT_WIDTH != 32
sljit_emit_icall(compiler, SLJIT_CALL, SLJIT_ARGS2(W, W, W), SLJIT_IMM,
common->utf ? (common->invalid_utf ? SLJIT_FUNC_ADDR(do_extuni_utf_invalid) : SLJIT_FUNC_ADDR(do_extuni_utf)) : SLJIT_FUNC_ADDR(do_extuni_no_utf));
if (common->invalid_utf)
add_jump(compiler, backtracks, CMP(SLJIT_EQUAL, SLJIT_RETURN_REG, 0, SLJIT_IMM, 0));
#else
sljit_emit_icall(compiler, SLJIT_CALL, SLJIT_ARGS2(W, W, W), SLJIT_IMM,
common->invalid_utf ? SLJIT_FUNC_ADDR(do_extuni_utf_invalid) : SLJIT_FUNC_ADDR(do_extuni_no_utf));
if (!common->utf || common->invalid_utf)
add_jump(compiler, backtracks, CMP(SLJIT_EQUAL, SLJIT_RETURN_REG, 0, SLJIT_IMM, 0));
#endif
OP1(SLJIT_MOV, STR_PTR, 0, SLJIT_RETURN_REG, 0);
if (common->mode == PCRE2_JIT_PARTIAL_HARD)
{
jump[0] = CMP(SLJIT_LESS, SLJIT_RETURN_REG, 0, STR_END, 0);
/* Since we successfully read a char above, partial matching must occure. */
check_partial(common, TRUE);
JUMPHERE(jump[0]);
}
return cc;
#endif
case OP_CHAR:
case OP_CHARI:
length = 1;
#ifdef SUPPORT_UNICODE
if (common->utf && HAS_EXTRALEN(*cc)) length += GET_EXTRALEN(*cc);
#endif
if (check_str_ptr && common->mode != PCRE2_JIT_COMPLETE)
detect_partial_match(common, backtracks);
if (type == OP_CHAR || !char_has_othercase(common, cc) || char_get_othercase_bit(common, cc) != 0)
{
OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(length));
if (length > 1 || (check_str_ptr && common->mode == PCRE2_JIT_COMPLETE))
add_jump(compiler, backtracks, CMP(SLJIT_GREATER, STR_PTR, 0, STR_END, 0));
context.length = IN_UCHARS(length);
context.sourcereg = -1;
#if defined SLJIT_UNALIGNED && SLJIT_UNALIGNED
context.ucharptr = 0;
#endif
return byte_sequence_compare(common, type == OP_CHARI, cc, &context, backtracks);
}
#ifdef SUPPORT_UNICODE
if (common->utf)
{
GETCHAR(c, cc);
}
else
#endif
c = *cc;
SLJIT_ASSERT(type == OP_CHARI && char_has_othercase(common, cc));
if (check_str_ptr && common->mode == PCRE2_JIT_COMPLETE)
add_jump(compiler, backtracks, CMP(SLJIT_GREATER_EQUAL, STR_PTR, 0, STR_END, 0));
oc = char_othercase(common, c);
read_char(common, c < oc ? c : oc, c > oc ? c : oc, NULL, 0);
SLJIT_ASSERT(!is_powerof2(c ^ oc));
if (sljit_has_cpu_feature(SLJIT_HAS_CMOV))
{
OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, oc);
CMOV(SLJIT_EQUAL, TMP1, SLJIT_IMM, c);
add_jump(compiler, backtracks, CMP(SLJIT_NOT_EQUAL, TMP1, 0, SLJIT_IMM, c));
}
else
{
jump[0] = CMP(SLJIT_EQUAL, TMP1, 0, SLJIT_IMM, c);
add_jump(compiler, backtracks, CMP(SLJIT_NOT_EQUAL, TMP1, 0, SLJIT_IMM, oc));
JUMPHERE(jump[0]);
}
return cc + length;
case OP_NOT:
case OP_NOTI:
if (check_str_ptr)
detect_partial_match(common, backtracks);
length = 1;
#ifdef SUPPORT_UNICODE
if (common->utf)
{
#if PCRE2_CODE_UNIT_WIDTH == 8
c = *cc;
if (c < 128 && !common->invalid_utf)
{
OP1(SLJIT_MOV_U8, TMP1, 0, SLJIT_MEM1(STR_PTR), 0);
if (type == OP_NOT || !char_has_othercase(common, cc))
add_jump(compiler, backtracks, CMP(SLJIT_EQUAL, TMP1, 0, SLJIT_IMM, c));
else
{
/* Since UTF8 code page is fixed, we know that c is in [a-z] or [A-Z] range. */
OP2(SLJIT_OR, TMP2, 0, TMP1, 0, SLJIT_IMM, 0x20);
add_jump(compiler, backtracks, CMP(SLJIT_EQUAL, TMP2, 0, SLJIT_IMM, c | 0x20));
}
/* Skip the variable-length character. */
OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1));
jump[0] = CMP(SLJIT_LESS, TMP1, 0, SLJIT_IMM, 0xc0);
OP1(MOV_UCHAR, TMP1, 0, SLJIT_MEM1(TMP1), (sljit_sw)PRIV(utf8_table4) - 0xc0);
OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, TMP1, 0);
JUMPHERE(jump[0]);
return cc + 1;
}
else
#endif /* PCRE2_CODE_UNIT_WIDTH == 8 */
{
GETCHARLEN(c, cc, length);
}
}
else
#endif /* SUPPORT_UNICODE */
c = *cc;
if (type == OP_NOT || !char_has_othercase(common, cc))
{
read_char(common, c, c, backtracks, READ_CHAR_UPDATE_STR_PTR);
add_jump(compiler, backtracks, CMP(SLJIT_EQUAL, TMP1, 0, SLJIT_IMM, c));
}
else
{
oc = char_othercase(common, c);
read_char(common, c < oc ? c : oc, c > oc ? c : oc, backtracks, READ_CHAR_UPDATE_STR_PTR);
bit = c ^ oc;
if (is_powerof2(bit))
{
OP2(SLJIT_OR, TMP1, 0, TMP1, 0, SLJIT_IMM, bit);
add_jump(compiler, backtracks, CMP(SLJIT_EQUAL, TMP1, 0, SLJIT_IMM, c | bit));
}
else
{
add_jump(compiler, backtracks, CMP(SLJIT_EQUAL, TMP1, 0, SLJIT_IMM, c));
add_jump(compiler, backtracks, CMP(SLJIT_EQUAL, TMP1, 0, SLJIT_IMM, oc));
}
}
return cc + length;
case OP_CLASS:
case OP_NCLASS:
if (check_str_ptr)
detect_partial_match(common, backtracks);
#if defined SUPPORT_UNICODE && PCRE2_CODE_UNIT_WIDTH == 8
bit = (common->utf && is_char7_bitset((const sljit_u8 *)cc, type == OP_NCLASS)) ? 127 : 255;
if (type == OP_NCLASS)
read_char(common, 0, bit, backtracks, READ_CHAR_UPDATE_STR_PTR);
else
read_char(common, 0, bit, NULL, 0);
#else
if (type == OP_NCLASS)
read_char(common, 0, 255, backtracks, READ_CHAR_UPDATE_STR_PTR);
else
read_char(common, 0, 255, NULL, 0);
#endif
if (optimize_class(common, (const sljit_u8 *)cc, type == OP_NCLASS, FALSE, backtracks))
return cc + 32 / sizeof(PCRE2_UCHAR);
#if defined SUPPORT_UNICODE && PCRE2_CODE_UNIT_WIDTH == 8
jump[0] = NULL;
if (common->utf)
{
jump[0] = CMP(SLJIT_GREATER, TMP1, 0, SLJIT_IMM, bit);
if (type == OP_CLASS)
{
add_jump(compiler, backtracks, jump[0]);
jump[0] = NULL;
}
}
#elif PCRE2_CODE_UNIT_WIDTH != 8
jump[0] = CMP(SLJIT_GREATER, TMP1, 0, SLJIT_IMM, 255);
if (type == OP_CLASS)
{
add_jump(compiler, backtracks, jump[0]);
jump[0] = NULL;
}
#endif /* SUPPORT_UNICODE && PCRE2_CODE_UNIT_WIDTH == 8 */
OP2(SLJIT_AND, TMP2, 0, TMP1, 0, SLJIT_IMM, 0x7);
OP2(SLJIT_LSHR, TMP1, 0, TMP1, 0, SLJIT_IMM, 3);
OP1(SLJIT_MOV_U8, TMP1, 0, SLJIT_MEM1(TMP1), (sljit_sw)cc);
OP2(SLJIT_SHL, TMP2, 0, SLJIT_IMM, 1, TMP2, 0);
OP2U(SLJIT_AND | SLJIT_SET_Z, TMP1, 0, TMP2, 0);
add_jump(compiler, backtracks, JUMP(SLJIT_ZERO));
#if defined SUPPORT_UNICODE || PCRE2_CODE_UNIT_WIDTH != 8
if (jump[0] != NULL)
JUMPHERE(jump[0]);
#endif
return cc + 32 / sizeof(PCRE2_UCHAR);
#if defined SUPPORT_UNICODE || PCRE2_CODE_UNIT_WIDTH == 16 || PCRE2_CODE_UNIT_WIDTH == 32
case OP_XCLASS:
if (check_str_ptr)
detect_partial_match(common, backtracks);
compile_xclass_matchingpath(common, cc + LINK_SIZE, backtracks);
return cc + GET(cc, 0) - 1;
#endif
}
SLJIT_UNREACHABLE();
return cc;
}
| 0
|
436,157
|
static int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg,
unsigned int size, unsigned int type)
{
struct io_uring_rsrc_register rr;
/* keep it extendible */
if (size != sizeof(rr))
return -EINVAL;
memset(&rr, 0, sizeof(rr));
if (copy_from_user(&rr, arg, size))
return -EFAULT;
if (!rr.nr || rr.resv || rr.resv2)
return -EINVAL;
switch (type) {
case IORING_RSRC_FILE:
return io_sqe_files_register(ctx, u64_to_user_ptr(rr.data),
rr.nr, u64_to_user_ptr(rr.tags));
case IORING_RSRC_BUFFER:
return io_sqe_buffers_register(ctx, u64_to_user_ptr(rr.data),
rr.nr, u64_to_user_ptr(rr.tags));
}
return -EINVAL;
| 0
|
349,267
|
int check_name(char *name, int size)
{
char *start = name;
if(name[0] == '.') {
if(name[1] == '.')
name++;
if(name[1] == '/' || name[1] == '\0')
return FALSE;
}
while(name[0] != '/' && name[0] != '\0')
name ++;
if(name[0] == '/')
return FALSE;
if((name - start) != size)
return FALSE;
return TRUE;
}
| 0
|
473,895
|
select_str_opcode(int mb_len, OnigDistance str_len, int ignore_case)
{
int op;
if (ignore_case) {
switch (str_len) {
case 1: op = OP_EXACT1_IC; break;
default: op = OP_EXACTN_IC; break;
}
}
else {
switch (mb_len) {
case 1:
switch (str_len) {
case 1: op = OP_EXACT1; break;
case 2: op = OP_EXACT2; break;
case 3: op = OP_EXACT3; break;
case 4: op = OP_EXACT4; break;
case 5: op = OP_EXACT5; break;
default: op = OP_EXACTN; break;
}
break;
case 2:
switch (str_len) {
case 1: op = OP_EXACTMB2N1; break;
case 2: op = OP_EXACTMB2N2; break;
case 3: op = OP_EXACTMB2N3; break;
default: op = OP_EXACTMB2N; break;
}
break;
case 3:
op = OP_EXACTMB3N;
break;
default:
op = OP_EXACTMBN;
break;
}
}
return op;
}
| 0
|
343,215
|
void dosize(const char *name)
{
struct stat st;
if (!name || !*name) {
addreply_noformat(501, MSG_MISSING_ARG);
} else if (stat(name, &st)) {
#ifdef DEBUG
if (debug != 0) {
addreply(0, "arg: %s, wd: %s", name, wd);
}
#endif
addreply_noformat(550, MSG_STAT_FAILURE2);
} else if (!S_ISREG(st.st_mode)) {
addreply_noformat(550, MSG_NOT_REGULAR_FILE);
} else {
addreply(213, "%llu", (unsigned long long) st.st_size);
}
}
| 0
|
513,206
|
static int item_is_unsigned(struct st_mysql_value *value)
{
Item *item= ((st_item_value_holder*)value)->item;
return item->unsigned_flag;
}
| 0
|
253,547
|
smb2_find_mid(struct TCP_Server_Info *server, char *buf)
{
return __smb2_find_mid(server, buf, false);
}
| 0
|
308,176
|
static int fastrpc_init_create_process(struct fastrpc_user *fl,
char __user *argp)
{
struct fastrpc_init_create init;
struct fastrpc_invoke_args *args;
struct fastrpc_phy_page pages[1];
struct fastrpc_map *map = NULL;
struct fastrpc_buf *imem = NULL;
int memlen;
int err;
struct {
int pgid;
u32 namelen;
u32 filelen;
u32 pageslen;
u32 attrs;
u32 siglen;
} inbuf;
u32 sc;
args = kcalloc(FASTRPC_CREATE_PROCESS_NARGS, sizeof(*args), GFP_KERNEL);
if (!args)
return -ENOMEM;
if (copy_from_user(&init, argp, sizeof(init))) {
err = -EFAULT;
goto err;
}
if (init.filelen > INIT_FILELEN_MAX) {
err = -EINVAL;
goto err;
}
inbuf.pgid = fl->tgid;
inbuf.namelen = strlen(current->comm) + 1;
inbuf.filelen = init.filelen;
inbuf.pageslen = 1;
inbuf.attrs = init.attrs;
inbuf.siglen = init.siglen;
fl->pd = 1;
if (init.filelen && init.filefd) {
err = fastrpc_map_create(fl, init.filefd, init.filelen, &map);
if (err)
goto err;
}
memlen = ALIGN(max(INIT_FILELEN_MAX, (int)init.filelen * 4),
1024 * 1024);
err = fastrpc_buf_alloc(fl, fl->sctx->dev, memlen,
&imem);
if (err)
goto err_alloc;
fl->init_mem = imem;
args[0].ptr = (u64)(uintptr_t)&inbuf;
args[0].length = sizeof(inbuf);
args[0].fd = -1;
args[1].ptr = (u64)(uintptr_t)current->comm;
args[1].length = inbuf.namelen;
args[1].fd = -1;
args[2].ptr = (u64) init.file;
args[2].length = inbuf.filelen;
args[2].fd = init.filefd;
pages[0].addr = imem->phys;
pages[0].size = imem->size;
args[3].ptr = (u64)(uintptr_t) pages;
args[3].length = 1 * sizeof(*pages);
args[3].fd = -1;
args[4].ptr = (u64)(uintptr_t)&inbuf.attrs;
args[4].length = sizeof(inbuf.attrs);
args[4].fd = -1;
args[5].ptr = (u64)(uintptr_t) &inbuf.siglen;
args[5].length = sizeof(inbuf.siglen);
args[5].fd = -1;
sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE, 4, 0);
if (init.attrs)
sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE_ATTR, 6, 0);
err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
sc, args);
if (err)
goto err_invoke;
kfree(args);
return 0;
err_invoke:
fl->init_mem = NULL;
fastrpc_buf_free(imem);
err_alloc:
if (map) {
spin_lock(&fl->lock);
list_del(&map->node);
spin_unlock(&fl->lock);
fastrpc_map_put(map);
}
err:
kfree(args);
return err;
}
| 0
|
225,071
|
PQconnectionNeedsPassword(const PGconn *conn)
{
char *password;
if (!conn)
return false;
password = PQpass(conn);
if (conn->password_needed &&
(password == NULL || password[0] == '\0'))
return true;
else
return false;
}
| 0
|
282,865
|
int rsi_send_block_unblock_frame(struct rsi_common *common, bool block_event)
{
struct rsi_block_unblock_data *mgmt_frame;
struct sk_buff *skb;
rsi_dbg(MGMT_TX_ZONE, "%s: Sending block/unblock frame\n", __func__);
skb = dev_alloc_skb(FRAME_DESC_SZ);
if (!skb) {
rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n",
__func__);
return -ENOMEM;
}
memset(skb->data, 0, FRAME_DESC_SZ);
mgmt_frame = (struct rsi_block_unblock_data *)skb->data;
rsi_set_len_qno(&mgmt_frame->desc_dword0.len_qno, 0, RSI_WIFI_MGMT_Q);
mgmt_frame->desc_dword0.frame_type = BLOCK_HW_QUEUE;
mgmt_frame->host_quiet_info = QUIET_INFO_VALID;
if (block_event) {
rsi_dbg(INFO_ZONE, "blocking the data qs\n");
mgmt_frame->block_q_bitmap = cpu_to_le16(0xf);
mgmt_frame->block_q_bitmap |= cpu_to_le16(0xf << 4);
} else {
rsi_dbg(INFO_ZONE, "unblocking the data qs\n");
mgmt_frame->unblock_q_bitmap = cpu_to_le16(0xf);
mgmt_frame->unblock_q_bitmap |= cpu_to_le16(0xf << 4);
}
skb_put(skb, FRAME_DESC_SZ);
return rsi_send_internal_mgmt_frame(common, skb);
}
| 0
|
508,898
|
bool LEX::only_view_structure()
{
switch (sql_command) {
case SQLCOM_SHOW_CREATE:
case SQLCOM_SHOW_TABLES:
case SQLCOM_SHOW_FIELDS:
case SQLCOM_REVOKE_ALL:
case SQLCOM_REVOKE:
case SQLCOM_GRANT:
case SQLCOM_CREATE_VIEW:
return TRUE;
default:
return FALSE;
}
}
| 0
|
259,242
|
static int mov_read_dac3(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{
AVStream *st;
enum AVAudioServiceType *ast;
int ac3info, acmod, lfeon, bsmod;
uint64_t mask;
if (c->fc->nb_streams < 1)
return 0;
st = c->fc->streams[c->fc->nb_streams-1];
ast = (enum AVAudioServiceType*)av_stream_new_side_data(st, AV_PKT_DATA_AUDIO_SERVICE_TYPE,
sizeof(*ast));
if (!ast)
return AVERROR(ENOMEM);
ac3info = avio_rb24(pb);
bsmod = (ac3info >> 14) & 0x7;
acmod = (ac3info >> 11) & 0x7;
lfeon = (ac3info >> 10) & 0x1;
mask = ff_ac3_channel_layout_tab[acmod];
if (lfeon)
mask |= AV_CH_LOW_FREQUENCY;
av_channel_layout_uninit(&st->codecpar->ch_layout);
av_channel_layout_from_mask(&st->codecpar->ch_layout, mask);
*ast = bsmod;
if (st->codecpar->ch_layout.nb_channels > 1 && bsmod == 0x7)
*ast = AV_AUDIO_SERVICE_TYPE_KARAOKE;
return 0;
}
| 0
|
441,816
|
SProcXkbUseExtension(ClientPtr client)
{
REQUEST(xkbUseExtensionReq);
swaps(&stuff->length);
REQUEST_SIZE_MATCH(xkbUseExtensionReq);
swaps(&stuff->wantedMajor);
swaps(&stuff->wantedMinor);
return ProcXkbUseExtension(client);
}
| 0
|
225,600
|
GF_Err subs_box_size(GF_Box *s)
{
GF_SubSampleInformationBox *ptr = (GF_SubSampleInformationBox *) s;
u32 entry_count, i;
u16 subsample_count;
// add 4 byte for entry_count
ptr->size += 4;
entry_count = gf_list_count(ptr->Samples);
for (i=0; i<entry_count; i++) {
GF_SubSampleInfoEntry *pSamp = (GF_SubSampleInfoEntry*) gf_list_get(ptr->Samples, i);
subsample_count = gf_list_count(pSamp->SubSamples);
// 4 byte for sample_delta, 2 byte for subsample_count
// and 6 + (4 or 2) bytes for each subsample
ptr->size += 4 + 2 + subsample_count * (6 + (ptr->version==1 ? 4 : 2));
}
return GF_OK;
| 0
|
317,070
|
static int smack_task_setscheduler(struct task_struct *p)
{
return smk_curacc_on_task(p, MAY_WRITE, __func__);
}
| 0
|
210,904
|
static void warnf(struct Configurable *config, const char *fmt, ...)
{
if(!(config->conf & CONF_MUTE)) {
va_list ap;
int len;
char *ptr;
char print_buffer[256];
va_start(ap, fmt);
va_start(ap, fmt);
len = vsnprintf(print_buffer, sizeof(print_buffer), fmt, ap);
va_end(ap);
ptr = print_buffer;
while(len > 0) {
fputs(WARN_PREFIX, config->errors);
if(len > (int)WARN_TEXTWIDTH) {
int cut = WARN_TEXTWIDTH-1;
while(!ISSPACE(ptr[cut]) && cut) {
cut--;
}
fwrite(ptr, cut + 1, 1, config->errors);
fputs("\n", config->errors);
ptr += cut+1; /* skip the space too */
len -= cut;
}
else {
fputs(ptr, config->errors);
len = 0;
}
}
}
}
| 1
|
294,718
|
c_gregorian_leap_p(int y)
{
return (MOD(y, 4) == 0 && y % 100 != 0) || MOD(y, 400) == 0;
}
| 0
|
512,645
|
Item_func_ifnull::str_op(String *str)
{
DBUG_ASSERT(fixed == 1);
String *res =args[0]->val_str(str);
if (!args[0]->null_value)
{
null_value=0;
res->set_charset(collation.collation);
return res;
}
res=args[1]->val_str(str);
if ((null_value=args[1]->null_value))
return 0;
res->set_charset(collation.collation);
return res;
}
| 0
|
294,722
|
local_df(union DateData *x)
{
assert(complex_dat_p(x));
assert(have_df_p(x));
return df_utc_to_local(x->c.df, x->c.of);
}
| 0
|
313,749
|
nv_gv_cmd(cmdarg_T *cap)
{
pos_T tpos;
int i;
if (checkclearop(cap->oap))
return;
if (curbuf->b_visual.vi_start.lnum == 0
|| curbuf->b_visual.vi_start.lnum > curbuf->b_ml.ml_line_count
|| curbuf->b_visual.vi_end.lnum == 0)
{
beep_flush();
return;
}
// set w_cursor to the start of the Visual area, tpos to the end
if (VIsual_active)
{
i = VIsual_mode;
VIsual_mode = curbuf->b_visual.vi_mode;
curbuf->b_visual.vi_mode = i;
# ifdef FEAT_EVAL
curbuf->b_visual_mode_eval = i;
# endif
i = curwin->w_curswant;
curwin->w_curswant = curbuf->b_visual.vi_curswant;
curbuf->b_visual.vi_curswant = i;
tpos = curbuf->b_visual.vi_end;
curbuf->b_visual.vi_end = curwin->w_cursor;
curwin->w_cursor = curbuf->b_visual.vi_start;
curbuf->b_visual.vi_start = VIsual;
}
else
{
VIsual_mode = curbuf->b_visual.vi_mode;
curwin->w_curswant = curbuf->b_visual.vi_curswant;
tpos = curbuf->b_visual.vi_end;
curwin->w_cursor = curbuf->b_visual.vi_start;
}
VIsual_active = TRUE;
VIsual_reselect = TRUE;
// Set Visual to the start and w_cursor to the end of the Visual
// area. Make sure they are on an existing character.
check_cursor();
VIsual = curwin->w_cursor;
curwin->w_cursor = tpos;
check_cursor();
update_topline();
// When called from normal "g" command: start Select mode when
// 'selectmode' contains "cmd". When called for K_SELECT, always
// start Select mode.
if (cap->arg)
{
VIsual_select = TRUE;
VIsual_select_reg = 0;
}
else
may_start_select('c');
setmouse();
#ifdef FEAT_CLIPBOARD
// Make sure the clipboard gets updated. Needed because start and
// end are still the same, and the selection needs to be owned
clip_star.vmode = NUL;
#endif
redraw_curbuf_later(INVERTED);
showmode();
}
| 0
|
317,316
|
static int smack_msg_queue_msgctl(struct kern_ipc_perm *isp, int cmd)
{
int may;
switch (cmd) {
case IPC_STAT:
case MSG_STAT:
case MSG_STAT_ANY:
may = MAY_READ;
break;
case IPC_SET:
case IPC_RMID:
may = MAY_READWRITE;
break;
case IPC_INFO:
case MSG_INFO:
/*
* System level information
*/
return 0;
default:
return -EINVAL;
}
return smk_curacc_msq(isp, may);
}
| 0
|
484,062
|
START_TEST(SecureChannel_sendSymmetricMessage_modeSignAndEncrypt)
{
// initialize dummy message
UA_ReadRequest dummyMessage;
UA_ReadRequest_init(&dummyMessage);
UA_DataType dummyType = UA_TYPES[UA_TYPES_READREQUEST];
testChannel.securityMode = UA_MESSAGESECURITYMODE_SIGNANDENCRYPT;
UA_StatusCode retval = UA_SecureChannel_sendSymmetricMessage(&testChannel, 42, UA_MESSAGETYPE_MSG,
&dummyMessage, &dummyType);
ck_assert_msg(retval == UA_STATUSCODE_GOOD, "Expected success");
ck_assert_msg(fCalled.sym_sign, "Expected message to have been signed");
ck_assert_msg(fCalled.sym_enc, "Expected message to have been encrypted");
} END_TEST
| 0
|
359,213
|
static int ringbuf_map_get_next_key(struct bpf_map *map, void *key,
void *next_key)
{
return -ENOTSUPP;
}
| 0
|
310,191
|
_nc_real_mvcur(NCURSES_SP_DCLx
int yold, int xold,
int ynew, int xnew,
NCURSES_SP_OUTC myOutCh,
int ovw)
{
NCURSES_CH_T oldattr;
int code;
TR(TRACE_CALLS | TRACE_MOVE, (T_CALLED("_nc_tinfo_mvcur(%p,%d,%d,%d,%d)"),
(void *) SP_PARM, yold, xold, ynew, xnew));
if (SP_PARM == 0) {
code = ERR;
} else if (yold == ynew && xold == xnew) {
code = OK;
} else {
/*
* Most work here is rounding for terminal boundaries getting the
* column position implied by wraparound or the lack thereof and
* rolling up the screen to get ynew on the screen.
*/
if (xnew >= screen_columns(SP_PARM)) {
ynew += xnew / screen_columns(SP_PARM);
xnew %= screen_columns(SP_PARM);
}
/*
* Force restore even if msgr is on when we're in an alternate
* character set -- these have a strong tendency to screw up the CR &
* LF used for local character motions!
*/
oldattr = SCREEN_ATTRS(SP_PARM);
if ((AttrOf(oldattr) & A_ALTCHARSET)
|| (AttrOf(oldattr) && !move_standout_mode)) {
TR(TRACE_CHARPUT, ("turning off (%#lx) %s before move",
(unsigned long) AttrOf(oldattr),
_traceattr(AttrOf(oldattr))));
VIDPUTS(SP_PARM, A_NORMAL, 0);
}
if (xold >= screen_columns(SP_PARM)) {
if (SP_PARM->_nl) {
int l = (xold + 1) / screen_columns(SP_PARM);
yold += l;
if (yold >= screen_lines(SP_PARM))
l -= (yold - screen_lines(SP_PARM) - 1);
if (l > 0) {
if (carriage_return) {
NCURSES_PUTP2("carriage_return", carriage_return);
} else {
myOutCh(NCURSES_SP_ARGx '\r');
}
xold = 0;
while (l > 0) {
if (newline) {
NCURSES_PUTP2("newline", newline);
} else {
myOutCh(NCURSES_SP_ARGx '\n');
}
l--;
}
}
} else {
/*
* If caller set nonl(), we cannot really use newlines to
* position to the next row.
*/
xold = -1;
yold = -1;
}
}
if (yold > screen_lines(SP_PARM) - 1)
yold = screen_lines(SP_PARM) - 1;
if (ynew > screen_lines(SP_PARM) - 1)
ynew = screen_lines(SP_PARM) - 1;
/* destination location is on screen now */
code = onscreen_mvcur(NCURSES_SP_ARGx yold, xold, ynew, xnew, ovw, myOutCh);
/*
* Restore attributes if we disabled them before moving.
*/
if (!SameAttrOf(oldattr, SCREEN_ATTRS(SP_PARM))) {
TR(TRACE_CHARPUT, ("turning on (%#lx) %s after move",
(unsigned long) AttrOf(oldattr),
_traceattr(AttrOf(oldattr))));
VIDPUTS(SP_PARM, AttrOf(oldattr), GetPair(oldattr));
}
}
returnCode(code);
}
| 0
|
318,104
|
static int rsi_usb_read_register_multiple(struct rsi_hw *adapter, u32 addr,
u8 *data, u16 count)
{
struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev;
u8 *buf;
u16 transfer;
int status;
if (!addr)
return -EINVAL;
buf = kzalloc(RSI_USB_BUF_SIZE, GFP_KERNEL);
if (!buf)
return -ENOMEM;
while (count) {
transfer = min_t(u16, count, RSI_USB_BUF_SIZE);
status = usb_control_msg(dev->usbdev,
usb_rcvctrlpipe(dev->usbdev, 0),
USB_VENDOR_REGISTER_READ,
RSI_USB_REQ_IN,
((addr & 0xffff0000) >> 16),
(addr & 0xffff), (void *)buf,
transfer, USB_CTRL_GET_TIMEOUT);
if (status < 0) {
rsi_dbg(ERR_ZONE,
"Reg read failed with error code :%d\n",
status);
kfree(buf);
return status;
}
memcpy(data, buf, transfer);
count -= transfer;
data += transfer;
addr += transfer;
}
kfree(buf);
return 0;
}
| 0
|
486,813
|
static void gem_receive_updatestats(CadenceGEMState *s, const uint8_t *packet,
unsigned bytes)
{
uint64_t octets;
/* Total octets (bytes) received */
octets = ((uint64_t)(s->regs[GEM_OCTRXLO]) << 32) |
s->regs[GEM_OCTRXHI];
octets += bytes;
s->regs[GEM_OCTRXLO] = octets >> 32;
s->regs[GEM_OCTRXHI] = octets;
/* Error-free Frames received */
s->regs[GEM_RXCNT]++;
/* Error-free Broadcast Frames counter */
if (!memcmp(packet, broadcast_addr, 6)) {
s->regs[GEM_RXBROADCNT]++;
}
/* Error-free Multicast Frames counter */
if (packet[0] == 0x01) {
s->regs[GEM_RXMULTICNT]++;
}
if (bytes <= 64) {
s->regs[GEM_RX64CNT]++;
} else if (bytes <= 127) {
s->regs[GEM_RX65CNT]++;
} else if (bytes <= 255) {
s->regs[GEM_RX128CNT]++;
} else if (bytes <= 511) {
s->regs[GEM_RX256CNT]++;
} else if (bytes <= 1023) {
s->regs[GEM_RX512CNT]++;
} else if (bytes <= 1518) {
s->regs[GEM_RX1024CNT]++;
} else {
s->regs[GEM_RX1519CNT]++;
}
}
| 0
|
446,056
|
codeLoop(TIFF* tif, const char* module)
{
TIFFErrorExt(tif->tif_clientdata, module,
"Bogus encoding, loop in the code table; scanline %d",
tif->tif_row);
}
| 0
|
222,555
|
bool operator<(const AttrKeyAndValue& b) const {
if (key_name_ != b.key_name_) {
return key_name_ < b.key_name_;
} else if (key_suffix_ != b.key_suffix_) {
return key_suffix_ < b.key_suffix_;
} else {
return value_ < b.value_;
}
}
| 0
|
317,101
|
static int smk_bu_note(char *note, struct smack_known *sskp,
struct smack_known *oskp, int mode, int rc)
{
char acc[SMK_NUM_ACCESS_TYPE + 1];
if (rc <= 0)
return rc;
if (rc > SMACK_UNCONFINED_OBJECT)
rc = 0;
smk_bu_mode(mode, acc);
pr_info("Smack %s: (%s %s %s) %s\n", smk_bu_mess[rc],
sskp->smk_known, oskp->smk_known, acc, note);
return 0;
}
| 0
|
486,827
|
static uint16_t gem_phy_read(CadenceGEMState *s, unsigned reg_num)
{
DB_PRINT("reg: %d value: 0x%04x\n", reg_num, s->phy_regs[reg_num]);
return s->phy_regs[reg_num];
}
| 0
|
225,838
|
GF_Box *co64_box_new()
{
ISOM_DECL_BOX_ALLOC(GF_ChunkLargeOffsetBox, GF_ISOM_BOX_TYPE_CO64);
return (GF_Box *)tmp;
}
| 0
|
246,654
|
static void naldmx_bs_log(void *udta, const char *field_name, u32 nb_bits, u64 field_val, s32 idx1, s32 idx2, s32 idx3)
{
GF_NALUDmxCtx *ctx = (GF_NALUDmxCtx *) udta;
GF_LOG(GF_LOG_DEBUG, GF_LOG_MEDIA, (" %s", field_name));
if (idx1>=0) {
GF_LOG(GF_LOG_DEBUG, GF_LOG_MEDIA, ("_%d", idx1));
if (idx2>=0) {
GF_LOG(GF_LOG_DEBUG, GF_LOG_MEDIA, ("_%d", idx2));
if (idx3>=0) {
GF_LOG(GF_LOG_DEBUG, GF_LOG_MEDIA, ("_%d", idx3));
}
}
}
GF_LOG(GF_LOG_DEBUG, GF_LOG_MEDIA, ("=\""LLD, field_val));
if ((ctx->bsdbg==2) && ((s32) nb_bits > 1) )
GF_LOG(GF_LOG_DEBUG, GF_LOG_MEDIA, ("(%u)", nb_bits));
GF_LOG(GF_LOG_DEBUG, GF_LOG_MEDIA, ("\" "));
}
| 0
|
328,991
|
R_API RBinJavaAttrInfo *r_bin_java_synthetic_attr_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz, ut64 buf_offset) {
if (sz < 8) {
return NULL;
}
RBinJavaAttrInfo *attr = r_bin_java_default_attr_new (bin, buffer, sz, buf_offset);
if (!attr) {
return NULL;
}
attr->type = R_BIN_JAVA_ATTR_TYPE_SYNTHETIC_ATTR;
attr->size = 6;
return attr;
}
| 0
|
246,718
|
static Bool strstr_nocase(const char *text, const char *subtext, u32 subtext_len)
{
if (!*text || !subtext || !subtext_len)
return GF_FALSE;
while (*text) {
if (tolower(*text) == *subtext) {
if (!strnicmp(text, subtext, subtext_len))
return GF_TRUE;
}
text++;
}
return GF_FALSE;
}
| 0
|
252,429
|
void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len,
size_t *pOut_len, int flags) {
tinfl_decompressor decomp;
void *pBuf = NULL, *pNew_buf;
size_t src_buf_ofs = 0, out_buf_capacity = 0;
*pOut_len = 0;
tinfl_init(&decomp);
for (;;) {
size_t src_buf_size = src_buf_len - src_buf_ofs,
dst_buf_size = out_buf_capacity - *pOut_len, new_out_buf_capacity;
tinfl_status status = tinfl_decompress(
&decomp, (const mz_uint8 *)pSrc_buf + src_buf_ofs, &src_buf_size,
(mz_uint8 *)pBuf, pBuf ? (mz_uint8 *)pBuf + *pOut_len : NULL,
&dst_buf_size,
(flags & ~TINFL_FLAG_HAS_MORE_INPUT) |
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF);
if ((status < 0) || (status == TINFL_STATUS_NEEDS_MORE_INPUT)) {
MZ_FREE(pBuf);
*pOut_len = 0;
return NULL;
}
src_buf_ofs += src_buf_size;
*pOut_len += dst_buf_size;
if (status == TINFL_STATUS_DONE) break;
new_out_buf_capacity = out_buf_capacity * 2;
if (new_out_buf_capacity < 128) new_out_buf_capacity = 128;
pNew_buf = MZ_REALLOC(pBuf, new_out_buf_capacity);
if (!pNew_buf) {
MZ_FREE(pBuf);
*pOut_len = 0;
return NULL;
}
pBuf = pNew_buf;
out_buf_capacity = new_out_buf_capacity;
}
return pBuf;
}
| 0
|
512,749
|
bool get_date_from_item(THD *thd, Item *item,
MYSQL_TIME *ltime, date_mode_t fuzzydate)
{
bool rc= item->get_date(thd, ltime, fuzzydate);
null_value= MY_TEST(rc || item->null_value);
return rc;
}
| 0
|
276,916
|
static void decode_bits (u_char const b, char const *str[], int const do_once)
{
u_char mask;
for (mask = 0x80; mask != 0x00; mask >>= 1, ++str) {
if (b & mask) {
puts (*str);
if (do_once)
return;
}
}
}
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.