idx
int64 | func
string | target
int64 |
|---|---|---|
55,347
|
static void expire_timers(struct timer_base *base, struct hlist_head *head)
{
/*
* This value is required only for tracing. base->clk was
* incremented directly before expire_timers was called. But expiry
* is related to the old base->clk value.
*/
unsigned long baseclk = base->clk - 1;
while (!hlist_empty(head)) {
struct timer_list *timer;
void (*fn)(struct timer_list *);
timer = hlist_entry(head->first, struct timer_list, entry);
base->running_timer = timer;
detach_timer(timer, true);
fn = timer->function;
if (timer->flags & TIMER_IRQSAFE) {
raw_spin_unlock(&base->lock);
call_timer_fn(timer, fn, baseclk);
base->running_timer = NULL;
raw_spin_lock(&base->lock);
} else {
raw_spin_unlock_irq(&base->lock);
call_timer_fn(timer, fn, baseclk);
base->running_timer = NULL;
timer_sync_wait_running(base);
raw_spin_lock_irq(&base->lock);
}
}
}
| 0
|
405,033
|
static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
{
struct l2cap_move_chan_cfm cfm;
BT_DBG("chan %p, result 0x%4.4x", chan, result);
chan->ident = l2cap_get_ident(chan->conn);
cfm.icid = cpu_to_le16(chan->scid);
cfm.result = cpu_to_le16(result);
l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
sizeof(cfm), &cfm);
__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
}
| 0
|
326,196
|
bool desc_ring_set_size(DescRing *ring, uint32_t size)
{
int i;
if (size < 2 || size > 0x10000 || (size & (size - 1))) {
DPRINTF("ERROR: ring[%d] size (%d) not a power of 2 "
"or in range [2, 64K]\n", ring->index, size);
return false;
}
for (i = 0; i < ring->size; i++) {
if (ring->info[i].buf) {
g_free(ring->info[i].buf);
}
}
ring->size = size;
ring->head = ring->tail = 0;
ring->info = g_realloc(ring->info, size * sizeof(DescInfo));
if (!ring->info) {
return false;
}
memset(ring->info, 0, size * sizeof(DescInfo));
for (i = 0; i < size; i++) {
ring->info[i].ring = ring;
}
return true;
}
| 0
|
206,431
|
static void cirrus_mmio_write(void *opaque, hwaddr addr,
uint64_t val, unsigned size)
{
CirrusVGAState *s = opaque;
if (addr >= 0x100) {
cirrus_mmio_blt_write(s, addr - 0x100, val);
} else {
cirrus_vga_ioport_write(s, addr + 0x10, val, size);
}
}
| 0
|
402,074
|
int
e1000e_core_post_load(E1000ECore *core)
{
NetClientState *nc = qemu_get_queue(core->owner_nic);
/* nc.link_down can't be migrated, so infer link_down according
* to link status bit in core.mac[STATUS].
*/
nc->link_down = (core->mac[STATUS] & E1000_STATUS_LU) == 0;
return 0;
| 0
|
19,302
|
static int dissect_h245_SEQUENCE_OF_MediaDistributionCapability ( tvbuff_t * tvb _U_ , int offset _U_ , asn1_ctx_t * actx _U_ , proto_tree * tree _U_ , int hf_index _U_ ) {
offset = dissect_per_sequence_of ( tvb , offset , actx , tree , hf_index , ett_h245_SEQUENCE_OF_MediaDistributionCapability , SEQUENCE_OF_MediaDistributionCapability_sequence_of ) ;
return offset ;
}
| 0
|
363,225
|
release_buffer (hb_buffer_t *buffer, gboolean free_buffer)
{
if (G_LIKELY (!free_buffer))
{
hb_buffer_clear (buffer);
G_UNLOCK (cached_buffer);
}
else
hb_buffer_free (buffer);
}
| 0
|
217,872
|
String8 effectFlagsToString(uint32_t flags) {
String8 s;
s.append("conn. mode: ");
switch (flags & EFFECT_FLAG_TYPE_MASK) {
case EFFECT_FLAG_TYPE_INSERT: s.append("insert"); break;
case EFFECT_FLAG_TYPE_AUXILIARY: s.append("auxiliary"); break;
case EFFECT_FLAG_TYPE_REPLACE: s.append("replace"); break;
case EFFECT_FLAG_TYPE_PRE_PROC: s.append("preproc"); break;
case EFFECT_FLAG_TYPE_POST_PROC: s.append("postproc"); break;
default: s.append("unknown/reserved"); break;
}
s.append(", ");
s.append("insert pref: ");
switch (flags & EFFECT_FLAG_INSERT_MASK) {
case EFFECT_FLAG_INSERT_ANY: s.append("any"); break;
case EFFECT_FLAG_INSERT_FIRST: s.append("first"); break;
case EFFECT_FLAG_INSERT_LAST: s.append("last"); break;
case EFFECT_FLAG_INSERT_EXCLUSIVE: s.append("exclusive"); break;
default: s.append("unknown/reserved"); break;
}
s.append(", ");
s.append("volume mgmt: ");
switch (flags & EFFECT_FLAG_VOLUME_MASK) {
case EFFECT_FLAG_VOLUME_NONE: s.append("none"); break;
case EFFECT_FLAG_VOLUME_CTRL: s.append("implements control"); break;
case EFFECT_FLAG_VOLUME_IND: s.append("requires indication"); break;
default: s.append("unknown/reserved"); break;
}
s.append(", ");
uint32_t devind = flags & EFFECT_FLAG_DEVICE_MASK;
if (devind) {
s.append("device indication: ");
switch (devind) {
case EFFECT_FLAG_DEVICE_IND: s.append("requires updates"); break;
default: s.append("unknown/reserved"); break;
}
s.append(", ");
}
s.append("input mode: ");
switch (flags & EFFECT_FLAG_INPUT_MASK) {
case EFFECT_FLAG_INPUT_DIRECT: s.append("direct"); break;
case EFFECT_FLAG_INPUT_PROVIDER: s.append("provider"); break;
case EFFECT_FLAG_INPUT_BOTH: s.append("direct+provider"); break;
default: s.append("not set"); break;
}
s.append(", ");
s.append("output mode: ");
switch (flags & EFFECT_FLAG_OUTPUT_MASK) {
case EFFECT_FLAG_OUTPUT_DIRECT: s.append("direct"); break;
case EFFECT_FLAG_OUTPUT_PROVIDER: s.append("provider"); break;
case EFFECT_FLAG_OUTPUT_BOTH: s.append("direct+provider"); break;
default: s.append("not set"); break;
}
s.append(", ");
uint32_t accel = flags & EFFECT_FLAG_HW_ACC_MASK;
if (accel) {
s.append("hardware acceleration: ");
switch (accel) {
case EFFECT_FLAG_HW_ACC_SIMPLE: s.append("non-tunneled"); break;
case EFFECT_FLAG_HW_ACC_TUNNEL: s.append("tunneled"); break;
default: s.append("unknown/reserved"); break;
}
s.append(", ");
}
uint32_t modeind = flags & EFFECT_FLAG_AUDIO_MODE_MASK;
if (modeind) {
s.append("mode indication: ");
switch (modeind) {
case EFFECT_FLAG_AUDIO_MODE_IND: s.append("required"); break;
default: s.append("unknown/reserved"); break;
}
s.append(", ");
}
uint32_t srcind = flags & EFFECT_FLAG_AUDIO_SOURCE_MASK;
if (srcind) {
s.append("source indication: ");
switch (srcind) {
case EFFECT_FLAG_AUDIO_SOURCE_IND: s.append("required"); break;
default: s.append("unknown/reserved"); break;
}
s.append(", ");
}
if (flags & EFFECT_FLAG_OFFLOAD_MASK) {
s.append("offloadable, ");
}
int len = s.length();
if (s.length() > 2) {
char *str = s.lockBuffer(len);
s.unlockBuffer(len - 2);
}
return s;
}
| 0
|
474,327
|
static int _crypt_load_integrity(struct crypt_device *cd,
struct crypt_params_integrity *params)
{
int r;
r = init_crypto(cd);
if (r < 0)
return r;
r = INTEGRITY_read_sb(cd, &cd->u.integrity.params, &cd->u.integrity.sb_flags);
if (r < 0)
return r;
// FIXME: add checks for fields in integrity sb vs params
if (params) {
cd->u.integrity.params.journal_watermark = params->journal_watermark;
cd->u.integrity.params.journal_commit_time = params->journal_commit_time;
cd->u.integrity.params.buffer_sectors = params->buffer_sectors;
// FIXME: check ENOMEM
if (params->integrity)
cd->u.integrity.params.integrity = strdup(params->integrity);
cd->u.integrity.params.integrity_key_size = params->integrity_key_size;
if (params->journal_integrity)
cd->u.integrity.params.journal_integrity = strdup(params->journal_integrity);
if (params->journal_crypt)
cd->u.integrity.params.journal_crypt = strdup(params->journal_crypt);
if (params->journal_crypt_key) {
cd->u.integrity.journal_crypt_key =
crypt_alloc_volume_key(params->journal_crypt_key_size,
params->journal_crypt_key);
if (!cd->u.integrity.journal_crypt_key)
return -ENOMEM;
}
if (params->journal_integrity_key) {
cd->u.integrity.journal_mac_key =
crypt_alloc_volume_key(params->journal_integrity_key_size,
params->journal_integrity_key);
if (!cd->u.integrity.journal_mac_key)
return -ENOMEM;
}
}
if (!cd->type && !(cd->type = strdup(CRYPT_INTEGRITY))) {
free(CONST_CAST(void*)cd->u.integrity.params.integrity);
return -ENOMEM;
}
return 0;
}
| 0
|
300,606
|
static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
{
int idx;
int s_idx = cb->family;
if (s_idx == 0)
s_idx = 1;
for (idx = 1; idx <= RTNL_FAMILY_MAX; idx++) {
int type = cb->nlh->nlmsg_type-RTM_BASE;
if (idx < s_idx || idx == PF_PACKET)
continue;
if (rtnl_msg_handlers[idx] == NULL ||
rtnl_msg_handlers[idx][type].dumpit == NULL)
continue;
if (idx > s_idx) {
memset(&cb->args[0], 0, sizeof(cb->args));
cb->prev_seq = 0;
cb->seq = 0;
}
if (rtnl_msg_handlers[idx][type].dumpit(skb, cb))
break;
}
cb->family = idx;
return skb->len;
}
| 0
|
156,355
|
void ms_handle_fast_accept(Connection *con) override {
Session *s = static_cast<Session*>(con->get_priv());
if (!s) {
s = new Session(con);
con->set_priv(s->get());
}
s->put();
}
| 0
|
331,637
|
static int64_t read_ts(const char *s)
{
int hh, mm, ss, ms;
if (sscanf(s, "%u:%u:%u.%u", &hh, &mm, &ss, &ms) == 4) return (hh*3600 + mm*60 + ss) * 1000 + ms;
if (sscanf(s, "%u:%u.%u", &mm, &ss, &ms) == 3) return ( mm*60 + ss) * 1000 + ms;
return AV_NOPTS_VALUE;
}
| 1
|
396,627
|
GC_API void * GC_CALL GC_generate_random_valid_address(void)
{
ptr_t result;
ptr_t base;
do {
result = GC_generate_random_heap_address();
base = GC_base(result);
} while (base == 0 || !GC_is_marked(base));
return result;
}
| 0
|
48,765
|
dwarf_uncompress_integer_block_a(Dwarf_Debug dbg,
Dwarf_Unsigned input_length_in_bytes,
void * input_block,
Dwarf_Unsigned * value_count,
Dwarf_Signed ** value_array,
Dwarf_Error * error)
{
Dwarf_Unsigned output_length_in_units = 0;
Dwarf_Signed * output_block = 0;
unsigned i = 0;
char * ptr = 0;
int remain = 0;
Dwarf_Signed * array = 0;
Dwarf_Byte_Ptr endptr = (Dwarf_Byte_Ptr)input_block+
input_length_in_bytes;
output_length_in_units = 0;
remain = input_length_in_bytes;
ptr = input_block;
while (remain > 0) {
Dwarf_Unsigned len = 0;
Dwarf_Signed value = 0;
int rres = 0;
rres = dwarf_decode_signed_leb128((char *)ptr,
&len, &value,(char *)endptr);
if (rres != DW_DLV_OK) {
_dwarf_error(NULL, error, DW_DLE_LEB_IMPROPER);
return DW_DLV_ERROR;
}
ptr += len;
remain -= len;
output_length_in_units++;
}
if (remain != 0) {
_dwarf_error(NULL, error, DW_DLE_ALLOC_FAIL);
return DW_DLV_ERROR;
}
output_block = (Dwarf_Signed*)
_dwarf_get_alloc(dbg,
DW_DLA_STRING,
output_length_in_units * sizeof(Dwarf_Signed));
if (!output_block) {
_dwarf_error(dbg, error, DW_DLE_ALLOC_FAIL);
return DW_DLV_ERROR;
}
array = output_block;
remain = input_length_in_bytes;
ptr = input_block;
for (i=0; i<output_length_in_units && remain>0; i++) {
Dwarf_Signed num;
Dwarf_Unsigned len;
int sres = 0;
sres = dwarf_decode_signed_leb128((char *)ptr,
&len, &num,(char *)endptr);
if (sres != DW_DLV_OK) {
dwarf_dealloc(dbg,output_block,DW_DLA_STRING);
_dwarf_error(NULL, error, DW_DLE_LEB_IMPROPER);
return DW_DLV_ERROR;
}
ptr += len;
remain -= len;
array[i] = num;
}
if (remain != 0) {
dwarf_dealloc(dbg, (unsigned char *)output_block,
DW_DLA_STRING);
_dwarf_error(dbg, error, DW_DLE_ALLOC_FAIL);
return DW_DLV_ERROR;
}
*value_count = output_length_in_units;
*value_array = output_block;
return DW_DLV_OK;
}
| 0
|
238,898
|
static void create_certinfo(struct curl_certinfo *ci, zval *listcode)
{
int i;
if (ci) {
zval certhash;
for (i=0; i<ci->num_of_certs; i++) {
struct curl_slist *slist;
array_init(&certhash);
for (slist = ci->certinfo[i]; slist; slist = slist->next) {
int len;
char s[64];
char *tmp;
strncpy(s, slist->data, 64);
tmp = memchr(s, ':', 64);
if(tmp) {
*tmp = '\0';
len = strlen(s);
add_assoc_string(&certhash, s, &slist->data[len+1]);
} else {
php_error_docref(NULL, E_WARNING, "Could not extract hash key from certificate info");
}
}
add_next_index_zval(listcode, &certhash);
}
}
}
| 0
|
409,489
|
static void bnx2x_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct bnx2x *bp;
if (!dev) {
dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
return;
}
bp = netdev_priv(dev);
__bnx2x_remove(pdev, dev, bp, true);
}
| 0
|
453,276
|
static void add_event_to_ctx(struct perf_event *event,
struct perf_event_context *ctx)
{
list_add_event(event, ctx);
perf_group_attach(event);
}
| 0
|
369,771
|
void qxl_guest_bug(PCIQXLDevice *qxl, const char *msg, ...)
{
#if SPICE_INTERFACE_QXL_MINOR >= 1
qxl_send_events(qxl, QXL_INTERRUPT_ERROR);
#endif
if (qxl->guestdebug) {
va_list ap;
va_start(ap, msg);
fprintf(stderr, "qxl-%d: guest bug: ", qxl->id);
vfprintf(stderr, msg, ap);
fprintf(stderr, "\n");
va_end(ap);
}
}
| 0
|
321,374
|
static AddressParts gen_lea_modrm_0(CPUX86State *env, DisasContext *s,
int modrm)
{
int def_seg, base, index, scale, mod, rm;
target_long disp;
bool havesib;
def_seg = R_DS;
index = -1;
scale = 0;
disp = 0;
mod = (modrm >> 6) & 3;
rm = modrm & 7;
base = rm | REX_B(s);
if (mod == 3) {
/* Normally filtered out earlier, but including this path
simplifies multi-byte nop, as well as bndcl, bndcu, bndcn. */
goto done;
}
switch (s->aflag) {
case MO_64:
case MO_32:
havesib = 0;
if (rm == 4) {
int code = cpu_ldub_code(env, s->pc++);
scale = (code >> 6) & 3;
index = ((code >> 3) & 7) | REX_X(s);
if (index == 4) {
index = -1; /* no index */
}
base = (code & 7) | REX_B(s);
havesib = 1;
}
switch (mod) {
case 0:
if ((base & 7) == 5) {
base = -1;
disp = (int32_t)cpu_ldl_code(env, s->pc);
s->pc += 4;
if (CODE64(s) && !havesib) {
base = -2;
disp += s->pc + s->rip_offset;
}
}
break;
case 1:
disp = (int8_t)cpu_ldub_code(env, s->pc++);
break;
default:
case 2:
disp = (int32_t)cpu_ldl_code(env, s->pc);
s->pc += 4;
break;
}
/* For correct popl handling with esp. */
if (base == R_ESP && s->popl_esp_hack) {
disp += s->popl_esp_hack;
}
if (base == R_EBP || base == R_ESP) {
def_seg = R_SS;
}
break;
case MO_16:
if (mod == 0) {
if (rm == 6) {
base = -1;
disp = cpu_lduw_code(env, s->pc);
s->pc += 2;
break;
}
} else if (mod == 1) {
disp = (int8_t)cpu_ldub_code(env, s->pc++);
} else {
disp = (int16_t)cpu_lduw_code(env, s->pc);
s->pc += 2;
}
switch (rm) {
case 0:
base = R_EBX;
index = R_ESI;
break;
case 1:
base = R_EBX;
index = R_EDI;
break;
case 2:
base = R_EBP;
index = R_ESI;
def_seg = R_SS;
break;
case 3:
base = R_EBP;
index = R_EDI;
def_seg = R_SS;
break;
case 4:
base = R_ESI;
break;
case 5:
base = R_EDI;
break;
case 6:
base = R_EBP;
def_seg = R_SS;
break;
default:
case 7:
base = R_EBX;
break;
}
break;
default:
tcg_abort();
}
done:
return (AddressParts){ def_seg, base, index, scale, disp };
}
| 0
|
417,322
|
void CrwMap::decodeBasic(const CiffComponent& ciffComponent,
const CrwMapping* pCrwMapping,
Image& image,
ByteOrder byteOrder)
{
assert(pCrwMapping != 0);
// create a key and value pair
ExifKey key(pCrwMapping->tag_, Internal::groupName(pCrwMapping->ifdId_));
Value::AutoPtr value;
if (ciffComponent.typeId() != directory) {
value = Value::create(ciffComponent.typeId());
uint32_t size = 0;
if (pCrwMapping->size_ != 0) {
// size in the mapping table overrides all
size = pCrwMapping->size_;
}
else if (ciffComponent.typeId() == asciiString) {
// determine size from the data, by looking for the first 0
uint32_t i = 0;
for (; i < ciffComponent.size()
&& ciffComponent.pData()[i] != '\0'; ++i) {
// empty
}
size = ++i;
}
else {
// by default, use the size from the directory entry
size = ciffComponent.size();
}
value->read(ciffComponent.pData(), size, byteOrder);
}
// Add metadatum to exif data
image.exifData().add(key, value.get());
} // CrwMap::decodeBasic
| 0
|
459,762
|
dissect_kafka_string(proto_tree *tree, int hf_item, tvbuff_t *tvb, packet_info *pinfo, int offset, int flexible,
int *p_offset, int *p_length)
{
if (flexible) {
return dissect_kafka_compact_string(tree, hf_item, tvb, pinfo, offset, p_offset, p_length);
} else {
return dissect_kafka_regular_string(tree, hf_item, tvb, pinfo, offset, p_offset, p_length);
}
}
| 0
|
435,935
|
#endif
static void netdev_unbind_all_sb_channels(struct net_device *dev)
{
struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues];
/* Unbind any subordinate channels */
while (txq-- != &dev->_tx[0]) {
if (txq->sb_dev)
netdev_unbind_sb_channel(dev, txq->sb_dev);
}
| 0
|
335,239
|
static int vc1_parse_init(AVCodecParserContext *s)
{
VC1ParseContext *vpc = s->priv_data;
vpc->v.s.slice_context_count = 1;
return 0;
}
| 0
|
78,825
|
void init_numa_balancing(unsigned long clone_flags, struct task_struct *p)
{
int mm_users = 0;
struct mm_struct *mm = p->mm;
if (mm) {
mm_users = atomic_read(&mm->mm_users);
if (mm_users == 1) {
mm->numa_next_scan = jiffies + msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
mm->numa_scan_seq = 0;
}
}
p->node_stamp = 0;
p->numa_scan_seq = mm ? mm->numa_scan_seq : 0;
p->numa_scan_period = sysctl_numa_balancing_scan_delay;
/* Protect against double add, see task_tick_numa and task_numa_work */
p->numa_work.next = &p->numa_work;
p->numa_faults = NULL;
RCU_INIT_POINTER(p->numa_group, NULL);
p->last_task_numa_placement = 0;
p->last_sum_exec_runtime = 0;
init_task_work(&p->numa_work, task_numa_work);
/* New address space, reset the preferred nid */
if (!(clone_flags & CLONE_VM)) {
p->numa_preferred_nid = NUMA_NO_NODE;
return;
}
/*
* New thread, keep existing numa_preferred_nid which should be copied
* already by arch_dup_task_struct but stagger when scans start.
*/
if (mm) {
unsigned int delay;
delay = min_t(unsigned int, task_scan_max(current),
current->numa_scan_period * mm_users * NSEC_PER_MSEC);
delay += 2 * TICK_NSEC;
p->node_stamp = delay;
}
}
| 0
|
293,083
|
INST_HANDLER (std) { // ST Y, Rr ST Z, Rr
// ST Y+, Rr ST Z+, Rr
// ST -Y, Rr ST -Z, Rr
// ST Y+q, Rr ST Z+q, Rr
// load register
ESIL_A ("r%d,", ((buf[1] & 1) << 4) | ((buf[0] >> 4) & 0xf));
// write in memory
__generic_ld_st (
op, "ram",
buf[0] & 0x8 ? 'y' : 'z', // index register Y/Z
0, // no use RAMP* registers
!(buf[1] & 0x10)
? 0 // no increment
: buf[0] & 0x1
? 1 // post incremented
: -1, // pre decremented
!(buf[1] & 0x10)
? (buf[1] & 0x20) // offset
| ((buf[1] & 0xc) << 1)
| (buf[0] & 0x7)
: 0, // no offset
1); // load operation (!st)
// // cycles
// op->cycles =
// buf[1] & 0x1 == 0
// ? !(offset ? 1 : 3) // LDD
// : buf[0] & 0x3 == 0
// ? 1 // LD Rd, X
// : buf[0] & 0x3 == 1
// ? 2 // LD Rd, X+
// : 3; // LD Rd, -X
// if (!STR_BEGINS (cpu->model, "ATxmega") && op->cycles > 1) {
// // AT*mega optimizes 1 cycle!
// op->cycles--;
// }
}
| 0
|
333,488
|
static int cloop_read(BlockDriverState *bs, int64_t sector_num,
uint8_t *buf, int nb_sectors)
{
BDRVCloopState *s = bs->opaque;
int i;
for (i = 0; i < nb_sectors; i++) {
uint32_t sector_offset_in_block =
((sector_num + i) % s->sectors_per_block),
block_num = (sector_num + i) / s->sectors_per_block;
if (cloop_read_block(bs, block_num) != 0) {
return -1;
}
memcpy(buf + i * 512,
s->uncompressed_block + sector_offset_in_block * 512, 512);
}
return 0;
}
| 0
|
39,674
|
void BytecodeFunctionGenerator::shrinkJump(offset_t loc) {
// We are shrinking a long jump into a short jump.
// The size of operand reduces from 4 bytes to 1 byte, a delta of 3.
const static int ShrinkOffset = 3;
std::rotate(
opcodes_.begin() + loc,
opcodes_.begin() + loc + ShrinkOffset,
opcodes_.end());
opcodes_.resize(opcodes_.size() - ShrinkOffset);
// Change this instruction from long jump to short jump.
longToShortJump(loc - 1);
}
| 0
|
450,177
|
static int ctnetlink_flush_conntrack(struct net *net,
const struct nlattr * const cda[],
u32 portid, int report, u8 family)
{
struct ctnetlink_filter *filter = NULL;
if (ctnetlink_needs_filter(family, cda)) {
if (cda[CTA_FILTER])
return -EOPNOTSUPP;
filter = ctnetlink_alloc_filter(cda, family);
if (IS_ERR(filter))
return PTR_ERR(filter);
}
nf_ct_iterate_cleanup_net(net, ctnetlink_flush_iterate, filter,
portid, report);
kfree(filter);
return 0;
}
| 0
|
253,428
|
void DidDeleteOriginData(QuotaStatusCode status) {
DCHECK_GT(remaining_deleters_, 0);
if (status != kQuotaStatusOk)
++error_count_;
if (--remaining_deleters_ == 0)
CallCompleted();
}
| 0
|
477,709
|
CImg<T> get_resize(const int size_x, const int size_y = -100,
const int size_z = -100, const int size_c = -100,
const int interpolation_type=1, const unsigned int boundary_conditions=0,
const float centering_x = 0, const float centering_y = 0,
const float centering_z = 0, const float centering_c = 0) const {
if (centering_x<0 || centering_x>1 || centering_y<0 || centering_y>1 ||
centering_z<0 || centering_z>1 || centering_c<0 || centering_c>1)
throw CImgArgumentException(_cimg_instance
"resize(): Specified centering arguments (%g,%g,%g,%g) are outside range [0,1].",
cimg_instance,
centering_x,centering_y,centering_z,centering_c);
if (!size_x || !size_y || !size_z || !size_c) return CImg<T>();
const unsigned int
sx = std::max(1U,(unsigned int)(size_x>=0?size_x:-size_x*width()/100)),
sy = std::max(1U,(unsigned int)(size_y>=0?size_y:-size_y*height()/100)),
sz = std::max(1U,(unsigned int)(size_z>=0?size_z:-size_z*depth()/100)),
sc = std::max(1U,(unsigned int)(size_c>=0?size_c:-size_c*spectrum()/100));
if (sx==_width && sy==_height && sz==_depth && sc==_spectrum) return +*this;
if (is_empty()) return CImg<T>(sx,sy,sz,sc,(T)0);
CImg<T> res;
switch (interpolation_type) {
// Raw resizing.
//
case -1 :
std::memcpy(res.assign(sx,sy,sz,sc,(T)0)._data,_data,sizeof(T)*std::min(size(),(ulongT)sx*sy*sz*sc));
break;
// No interpolation.
//
case 0 : {
const int
xc = (int)(centering_x*((int)sx - width())),
yc = (int)(centering_y*((int)sy - height())),
zc = (int)(centering_z*((int)sz - depth())),
cc = (int)(centering_c*((int)sc - spectrum()));
switch (boundary_conditions) {
case 3 : { // Mirror
res.assign(sx,sy,sz,sc);
const int w2 = 2*width(), h2 = 2*height(), d2 = 2*depth(), s2 = 2*spectrum();
cimg_pragma_openmp(parallel for cimg_openmp_collapse(3) cimg_openmp_if_size(res.size(),1024*1024))
cimg_forXYZC(res,x,y,z,c) {
const int
mx = cimg::mod(x - xc,w2), my = cimg::mod(y - yc,h2),
mz = cimg::mod(z - zc,d2), mc = cimg::mod(c - cc,s2);
res(x,y,z,c) = (*this)(mx<width()?mx:w2 - mx - 1,
my<height()?my:h2 - my - 1,
mz<depth()?mz:d2 - mz - 1,
mc<spectrum()?mc:s2 - mc - 1);
}
} break;
case 2 : { // Periodic
res.assign(sx,sy,sz,sc);
const int
x0 = ((int)xc%width()) - width(),
y0 = ((int)yc%height()) - height(),
z0 = ((int)zc%depth()) - depth(),
c0 = ((int)cc%spectrum()) - spectrum(),
dx = width(), dy = height(), dz = depth(), dc = spectrum();
cimg_pragma_openmp(parallel for cimg_openmp_collapse(3) cimg_openmp_if_size(res.size(),1024*1024))
for (int c = c0; c<(int)sc; c+=dc)
for (int z = z0; z<(int)sz; z+=dz)
for (int y = y0; y<(int)sy; y+=dy)
for (int x = x0; x<(int)sx; x+=dx)
res.draw_image(x,y,z,c,*this);
} break;
case 1 : { // Neumann
res.assign(sx,sy,sz,sc).draw_image(xc,yc,zc,cc,*this);
CImg<T> sprite;
if (xc>0) { // X-backward
res.get_crop(xc,yc,zc,cc,xc,yc + height() - 1,zc + depth() - 1,cc + spectrum() - 1).move_to(sprite);
for (int x = xc - 1; x>=0; --x) res.draw_image(x,yc,zc,cc,sprite);
}
if (xc + width()<(int)sx) { // X-forward
res.get_crop(xc + width() - 1,yc,zc,cc,xc + width() - 1,yc + height() - 1,
zc + depth() - 1,cc + spectrum() - 1).move_to(sprite);
for (int x = xc + width(); x<(int)sx; ++x) res.draw_image(x,yc,zc,cc,sprite);
}
if (yc>0) { // Y-backward
res.get_crop(0,yc,zc,cc,sx - 1,yc,zc + depth() - 1,cc + spectrum() - 1).move_to(sprite);
for (int y = yc - 1; y>=0; --y) res.draw_image(0,y,zc,cc,sprite);
}
if (yc + height()<(int)sy) { // Y-forward
res.get_crop(0,yc + height() - 1,zc,cc,sx - 1,yc + height() - 1,
zc + depth() - 1,cc + spectrum() - 1).move_to(sprite);
for (int y = yc + height(); y<(int)sy; ++y) res.draw_image(0,y,zc,cc,sprite);
}
if (zc>0) { // Z-backward
res.get_crop(0,0,zc,cc,sx - 1,sy - 1,zc,cc + spectrum() - 1).move_to(sprite);
for (int z = zc - 1; z>=0; --z) res.draw_image(0,0,z,cc,sprite);
}
if (zc + depth()<(int)sz) { // Z-forward
res.get_crop(0,0,zc +depth() - 1,cc,sx - 1,sy - 1,zc + depth() - 1,cc + spectrum() - 1).move_to(sprite);
for (int z = zc + depth(); z<(int)sz; ++z) res.draw_image(0,0,z,cc,sprite);
}
if (cc>0) { // C-backward
res.get_crop(0,0,0,cc,sx - 1,sy - 1,sz - 1,cc).move_to(sprite);
for (int c = cc - 1; c>=0; --c) res.draw_image(0,0,0,c,sprite);
}
if (cc + spectrum()<(int)sc) { // C-forward
res.get_crop(0,0,0,cc + spectrum() - 1,sx - 1,sy - 1,sz - 1,cc + spectrum() - 1).move_to(sprite);
for (int c = cc + spectrum(); c<(int)sc; ++c) res.draw_image(0,0,0,c,sprite);
}
} break;
default : // Dirichlet
res.assign(sx,sy,sz,sc,(T)0).draw_image(xc,yc,zc,cc,*this);
}
break;
} break;
// Nearest neighbor interpolation.
//
case 1 : {
res.assign(sx,sy,sz,sc);
CImg<ulongT> off_x(sx), off_y(sy + 1), off_z(sz + 1), off_c(sc + 1);
const ulongT
wh = (ulongT)_width*_height,
whd = (ulongT)_width*_height*_depth,
sxy = (ulongT)sx*sy,
sxyz = (ulongT)sx*sy*sz,
one = (ulongT)1;
if (sx==_width) off_x.fill(1);
else {
ulongT *poff_x = off_x._data, curr = 0;
cimg_forX(res,x) {
const ulongT old = curr;
curr = (x + one)*_width/sx;
*(poff_x++) = curr - old;
}
}
if (sy==_height) off_y.fill(_width);
else {
ulongT *poff_y = off_y._data, curr = 0;
cimg_forY(res,y) {
const ulongT old = curr;
curr = (y + one)*_height/sy;
*(poff_y++) = _width*(curr - old);
}
*poff_y = 0;
}
if (sz==_depth) off_z.fill(wh);
else {
ulongT *poff_z = off_z._data, curr = 0;
cimg_forZ(res,z) {
const ulongT old = curr;
curr = (z + one)*_depth/sz;
*(poff_z++) = wh*(curr - old);
}
*poff_z = 0;
}
if (sc==_spectrum) off_c.fill(whd);
else {
ulongT *poff_c = off_c._data, curr = 0;
cimg_forC(res,c) {
const ulongT old = curr;
curr = (c + one)*_spectrum/sc;
*(poff_c++) = whd*(curr - old);
}
*poff_c = 0;
}
T *ptrd = res._data;
const T* ptrc = _data;
const ulongT *poff_c = off_c._data;
for (unsigned int c = 0; c<sc; ) {
const T *ptrz = ptrc;
const ulongT *poff_z = off_z._data;
for (unsigned int z = 0; z<sz; ) {
const T *ptry = ptrz;
const ulongT *poff_y = off_y._data;
for (unsigned int y = 0; y<sy; ) {
const T *ptrx = ptry;
const ulongT *poff_x = off_x._data;
cimg_forX(res,x) { *(ptrd++) = *ptrx; ptrx+=*(poff_x++); }
++y;
ulongT dy = *(poff_y++);
for ( ; !dy && y<dy; std::memcpy(ptrd,ptrd - sx,sizeof(T)*sx), ++y, ptrd+=sx, dy = *(poff_y++)) {}
ptry+=dy;
}
++z;
ulongT dz = *(poff_z++);
for ( ; !dz && z<dz; std::memcpy(ptrd,ptrd - sxy,sizeof(T)*sxy), ++z, ptrd+=sxy, dz = *(poff_z++)) {}
ptrz+=dz;
}
++c;
ulongT dc = *(poff_c++);
for ( ; !dc && c<dc; std::memcpy(ptrd,ptrd - sxyz,sizeof(T)*sxyz), ++c, ptrd+=sxyz, dc = *(poff_c++)) {}
ptrc+=dc;
}
} break;
// Moving average.
//
case 2 : {
bool instance_first = true;
if (sx!=_width) {
if (sx>_width) get_resize(sx,_height,_depth,_spectrum,1).move_to(res);
else {
CImg<Tfloat> tmp(sx,_height,_depth,_spectrum,0);
cimg_pragma_openmp(parallel for cimg_openmp_collapse(3)
cimg_openmp_if(sx>=256 && _height*_depth*_spectrum>=256))
cimg_forYZC(tmp,y,z,v) {
for (unsigned int a = _width*sx, b = _width, c = sx, s = 0, t = 0; a; ) {
const unsigned int d = std::min(b,c);
a-=d; b-=d; c-=d;
tmp(t,y,z,v)+=(Tfloat)(*this)(s,y,z,v)*d;
if (!b) { tmp(t++,y,z,v)/=_width; b = _width; }
if (!c) { ++s; c = sx; }
}
}
tmp.move_to(res);
}
instance_first = false;
}
if (sy!=_height) {
if (sy>_height) get_resize(sx,sy,_depth,_spectrum,1).move_to(res);
else {
CImg<Tfloat> tmp(sx,sy,_depth,_spectrum,0);
cimg_pragma_openmp(parallel for cimg_openmp_collapse(3)
cimg_openmp_if(sy>=256 && _width*_depth*_spectrum>=256))
cimg_forXZC(tmp,x,z,v) {
for (unsigned int a = _height*sy, b = _height, c = sy, s = 0, t = 0; a; ) {
const unsigned int d = std::min(b,c);
a-=d; b-=d; c-=d;
if (instance_first) tmp(x,t,z,v)+=(Tfloat)(*this)(x,s,z,v)*d;
else tmp(x,t,z,v)+=(Tfloat)res(x,s,z,v)*d;
if (!b) { tmp(x,t++,z,v)/=_height; b = _height; }
if (!c) { ++s; c = sy; }
}
}
tmp.move_to(res);
}
instance_first = false;
}
if (sz!=_depth) {
if (sz>_depth) get_resize(sx,sy,sz,_spectrum,1).move_to(res);
else {
CImg<Tfloat> tmp(sx,sy,sz,_spectrum,0);
cimg_pragma_openmp(parallel for cimg_openmp_collapse(3)
cimg_openmp_if(sz>=256 && _width*_height*_spectrum>=256))
cimg_forXYC(tmp,x,y,v) {
for (unsigned int a = _depth*sz, b = _depth, c = sz, s = 0, t = 0; a; ) {
const unsigned int d = std::min(b,c);
a-=d; b-=d; c-=d;
if (instance_first) tmp(x,y,t,v)+=(Tfloat)(*this)(x,y,s,v)*d;
else tmp(x,y,t,v)+=(Tfloat)res(x,y,s,v)*d;
if (!b) { tmp(x,y,t++,v)/=_depth; b = _depth; }
if (!c) { ++s; c = sz; }
}
}
tmp.move_to(res);
}
instance_first = false;
}
if (sc!=_spectrum) {
if (sc>_spectrum) get_resize(sx,sy,sz,sc,1).move_to(res);
else {
CImg<Tfloat> tmp(sx,sy,sz,sc,0);
cimg_pragma_openmp(parallel for cimg_openmp_collapse(3)
cimg_openmp_if(sc>=256 && _width*_height*_depth>=256))
cimg_forXYZ(tmp,x,y,z) {
for (unsigned int a = _spectrum*sc, b = _spectrum, c = sc, s = 0, t = 0; a; ) {
const unsigned int d = std::min(b,c);
a-=d; b-=d; c-=d;
if (instance_first) tmp(x,y,z,t)+=(Tfloat)(*this)(x,y,z,s)*d;
else tmp(x,y,z,t)+=(Tfloat)res(x,y,z,s)*d;
if (!b) { tmp(x,y,z,t++)/=_spectrum; b = _spectrum; }
if (!c) { ++s; c = sc; }
}
}
tmp.move_to(res);
}
instance_first = false;
}
} break;
// Linear interpolation.
//
case 3 : {
CImg<uintT> off(cimg::max(sx,sy,sz,sc));
CImg<doubleT> foff(off._width);
CImg<T> resx, resy, resz, resc;
double curr, old;
if (sx!=_width) {
if (_width==1) get_resize(sx,_height,_depth,_spectrum,1).move_to(resx);
else if (_width>sx) get_resize(sx,_height,_depth,_spectrum,2).move_to(resx);
else {
const double fx = (!boundary_conditions && sx>_width)?(sx>1?(_width - 1.)/(sx - 1):0):
(double)_width/sx;
resx.assign(sx,_height,_depth,_spectrum);
curr = old = 0;
{
unsigned int *poff = off._data;
double *pfoff = foff._data;
cimg_forX(resx,x) {
*(pfoff++) = curr - (unsigned int)curr;
old = curr;
curr = std::min(width() - 1.,curr + fx);
*(poff++) = (unsigned int)curr - (unsigned int)old;
}
}
cimg_pragma_openmp(parallel for cimg_openmp_collapse(3)
cimg_openmp_if(resx._width>=256 && resx._height*resx._depth*resx._spectrum>=256))
cimg_forYZC(resx,y,z,c) {
const T *ptrs = data(0,y,z,c), *const ptrsmax = ptrs + _width - 1;
T *ptrd = resx.data(0,y,z,c);
const unsigned int *poff = off._data;
const double *pfoff = foff._data;
cimg_forX(resx,x) {
const double alpha = *(pfoff++);
const T val1 = *ptrs, val2 = ptrs<ptrsmax?*(ptrs + 1):val1;
*(ptrd++) = (T)((1 - alpha)*val1 + alpha*val2);
ptrs+=*(poff++);
}
}
}
} else resx.assign(*this,true);
if (sy!=_height) {
if (_height==1) resx.get_resize(sx,sy,_depth,_spectrum,1).move_to(resy);
else {
if (_height>sy) resx.get_resize(sx,sy,_depth,_spectrum,2).move_to(resy);
else {
const double fy = (!boundary_conditions && sy>_height)?(sy>1?(_height - 1.)/(sy - 1):0):
(double)_height/sy;
resy.assign(sx,sy,_depth,_spectrum);
curr = old = 0;
{
unsigned int *poff = off._data;
double *pfoff = foff._data;
cimg_forY(resy,y) {
*(pfoff++) = curr - (unsigned int)curr;
old = curr;
curr = std::min(height() - 1.,curr + fy);
*(poff++) = sx*((unsigned int)curr - (unsigned int)old);
}
}
cimg_pragma_openmp(parallel for cimg_openmp_collapse(3)
cimg_openmp_if(resy._height>=256 && resy._width*resy._depth*resy._spectrum>=256))
cimg_forXZC(resy,x,z,c) {
const T *ptrs = resx.data(x,0,z,c), *const ptrsmax = ptrs + (_height - 1)*sx;
T *ptrd = resy.data(x,0,z,c);
const unsigned int *poff = off._data;
const double *pfoff = foff._data;
cimg_forY(resy,y) {
const double alpha = *(pfoff++);
const T val1 = *ptrs, val2 = ptrs<ptrsmax?*(ptrs + sx):val1;
*ptrd = (T)((1 - alpha)*val1 + alpha*val2);
ptrd+=sx;
ptrs+=*(poff++);
}
}
}
}
resx.assign();
} else resy.assign(resx,true);
if (sz!=_depth) {
if (_depth==1) resy.get_resize(sx,sy,sz,_spectrum,1).move_to(resz);
else {
if (_depth>sz) resy.get_resize(sx,sy,sz,_spectrum,2).move_to(resz);
else {
const double fz = (!boundary_conditions && sz>_depth)?(sz>1?(_depth - 1.)/(sz - 1):0):
(double)_depth/sz;
const unsigned int sxy = sx*sy;
resz.assign(sx,sy,sz,_spectrum);
curr = old = 0;
{
unsigned int *poff = off._data;
double *pfoff = foff._data;
cimg_forZ(resz,z) {
*(pfoff++) = curr - (unsigned int)curr;
old = curr;
curr = std::min(depth() - 1.,curr + fz);
*(poff++) = sxy*((unsigned int)curr - (unsigned int)old);
}
}
cimg_pragma_openmp(parallel for cimg_openmp_collapse(3)
cimg_openmp_if(resz._depth>=256 && resz._width*resz._height*resz._spectrum>=256))
cimg_forXYC(resz,x,y,c) {
const T *ptrs = resy.data(x,y,0,c), *const ptrsmax = ptrs + (_depth - 1)*sxy;
T *ptrd = resz.data(x,y,0,c);
const unsigned int *poff = off._data;
const double *pfoff = foff._data;
cimg_forZ(resz,z) {
const double alpha = *(pfoff++);
const T val1 = *ptrs, val2 = ptrs<ptrsmax?*(ptrs + sxy):val1;
*ptrd = (T)((1 - alpha)*val1 + alpha*val2);
ptrd+=sxy;
ptrs+=*(poff++);
}
}
}
}
resy.assign();
} else resz.assign(resy,true);
if (sc!=_spectrum) {
if (_spectrum==1) resz.get_resize(sx,sy,sz,sc,1).move_to(resc);
else {
if (_spectrum>sc) resz.get_resize(sx,sy,sz,sc,2).move_to(resc);
else {
const double fc = (!boundary_conditions && sc>_spectrum)?(sc>1?(_spectrum - 1.)/(sc - 1):0):
(double)_spectrum/sc;
const unsigned int sxyz = sx*sy*sz;
resc.assign(sx,sy,sz,sc);
curr = old = 0;
{
unsigned int *poff = off._data;
double *pfoff = foff._data;
cimg_forC(resc,c) {
*(pfoff++) = curr - (unsigned int)curr;
old = curr;
curr = std::min(spectrum() - 1.,curr + fc);
*(poff++) = sxyz*((unsigned int)curr - (unsigned int)old);
}
}
cimg_pragma_openmp(parallel for cimg_openmp_collapse(3)
cimg_openmp_if(resc._spectrum>=256 && resc._width*resc._height*resc._depth>=256))
cimg_forXYZ(resc,x,y,z) {
const T *ptrs = resz.data(x,y,z,0), *const ptrsmax = ptrs + (_spectrum - 1)*sxyz;
T *ptrd = resc.data(x,y,z,0);
const unsigned int *poff = off._data;
const double *pfoff = foff._data;
cimg_forC(resc,c) {
const double alpha = *(pfoff++);
const T val1 = *ptrs, val2 = ptrs<ptrsmax?*(ptrs + sxyz):val1;
*ptrd = (T)((1 - alpha)*val1 + alpha*val2);
ptrd+=sxyz;
ptrs+=*(poff++);
}
}
}
}
resz.assign();
} else resc.assign(resz,true);
return resc._is_shared?(resz._is_shared?(resy._is_shared?(resx._is_shared?(+(*this)):resx):resy):resz):resc;
} break;
// Grid interpolation.
//
case 4 : {
CImg<T> resx, resy, resz, resc;
if (sx!=_width) {
if (sx<_width) get_resize(sx,_height,_depth,_spectrum,1).move_to(resx);
else {
resx.assign(sx,_height,_depth,_spectrum,(T)0);
const int dx = (int)(2*sx), dy = 2*width();
int err = (int)(dy + centering_x*(sx*dy/width() - dy)), xs = 0;
cimg_forX(resx,x) if ((err-=dy)<=0) {
cimg_forYZC(resx,y,z,c) resx(x,y,z,c) = (*this)(xs,y,z,c);
++xs;
err+=dx;
}
}
} else resx.assign(*this,true);
if (sy!=_height) {
if (sy<_height) resx.get_resize(sx,sy,_depth,_spectrum,1).move_to(resy);
else {
resy.assign(sx,sy,_depth,_spectrum,(T)0);
const int dx = (int)(2*sy), dy = 2*height();
int err = (int)(dy + centering_y*(sy*dy/height() - dy)), ys = 0;
cimg_forY(resy,y) if ((err-=dy)<=0) {
cimg_forXZC(resy,x,z,c) resy(x,y,z,c) = resx(x,ys,z,c);
++ys;
err+=dx;
}
}
resx.assign();
} else resy.assign(resx,true);
if (sz!=_depth) {
if (sz<_depth) resy.get_resize(sx,sy,sz,_spectrum,1).move_to(resz);
else {
resz.assign(sx,sy,sz,_spectrum,(T)0);
const int dx = (int)(2*sz), dy = 2*depth();
int err = (int)(dy + centering_z*(sz*dy/depth() - dy)), zs = 0;
cimg_forZ(resz,z) if ((err-=dy)<=0) {
cimg_forXYC(resz,x,y,c) resz(x,y,z,c) = resy(x,y,zs,c);
++zs;
err+=dx;
}
}
resy.assign();
} else resz.assign(resy,true);
if (sc!=_spectrum) {
if (sc<_spectrum) resz.get_resize(sx,sy,sz,sc,1).move_to(resc);
else {
resc.assign(sx,sy,sz,sc,(T)0);
const int dx = (int)(2*sc), dy = 2*spectrum();
int err = (int)(dy + centering_c*(sc*dy/spectrum() - dy)), cs = 0;
cimg_forC(resc,c) if ((err-=dy)<=0) {
cimg_forXYZ(resc,x,y,z) resc(x,y,z,c) = resz(x,y,z,cs);
++cs;
err+=dx;
}
}
resz.assign();
} else resc.assign(resz,true);
return resc._is_shared?(resz._is_shared?(resy._is_shared?(resx._is_shared?(+(*this)):resx):resy):resz):resc;
} break;
// Cubic interpolation.
//
case 5 : {
const Tfloat vmin = (Tfloat)cimg::type<T>::min(), vmax = (Tfloat)cimg::type<T>::max();
CImg<uintT> off(cimg::max(sx,sy,sz,sc));
CImg<doubleT> foff(off._width);
CImg<T> resx, resy, resz, resc;
double curr, old;
if (sx!=_width) {
if (_width==1) get_resize(sx,_height,_depth,_spectrum,1).move_to(resx);
else {
if (_width>sx) get_resize(sx,_height,_depth,_spectrum,2).move_to(resx);
else {
const double fx = (!boundary_conditions && sx>_width)?(sx>1?(_width - 1.)/(sx - 1):0):
(double)_width/sx;
resx.assign(sx,_height,_depth,_spectrum);
curr = old = 0;
{
unsigned int *poff = off._data;
double *pfoff = foff._data;
cimg_forX(resx,x) {
*(pfoff++) = curr - (unsigned int)curr;
old = curr;
curr = std::min(width() - 1.,curr + fx);
*(poff++) = (unsigned int)curr - (unsigned int)old;
}
}
cimg_pragma_openmp(parallel for cimg_openmp_collapse(3)
cimg_openmp_if(resx._width>=256 && resx._height*resx._depth*resx._spectrum>=256))
cimg_forYZC(resx,y,z,c) {
const T *const ptrs0 = data(0,y,z,c), *ptrs = ptrs0, *const ptrsmax = ptrs + (_width - 2);
T *ptrd = resx.data(0,y,z,c);
const unsigned int *poff = off._data;
const double *pfoff = foff._data;
cimg_forX(resx,x) {
const double
t = *(pfoff++),
val1 = (double)*ptrs,
val0 = ptrs>ptrs0?(double)*(ptrs - 1):val1,
val2 = ptrs<=ptrsmax?(double)*(ptrs + 1):val1,
val3 = ptrs<ptrsmax?(double)*(ptrs + 2):val2,
val = val1 + 0.5f*(t*(-val0 + val2) + t*t*(2*val0 - 5*val1 + 4*val2 - val3) +
t*t*t*(-val0 + 3*val1 - 3*val2 + val3));
*(ptrd++) = (T)(val<vmin?vmin:val>vmax?vmax:val);
ptrs+=*(poff++);
}
}
}
}
} else resx.assign(*this,true);
if (sy!=_height) {
if (_height==1) resx.get_resize(sx,sy,_depth,_spectrum,1).move_to(resy);
else {
if (_height>sy) resx.get_resize(sx,sy,_depth,_spectrum,2).move_to(resy);
else {
const double fy = (!boundary_conditions && sy>_height)?(sy>1?(_height - 1.)/(sy - 1):0):
(double)_height/sy;
resy.assign(sx,sy,_depth,_spectrum);
curr = old = 0;
{
unsigned int *poff = off._data;
double *pfoff = foff._data;
cimg_forY(resy,y) {
*(pfoff++) = curr - (unsigned int)curr;
old = curr;
curr = std::min(height() - 1.,curr + fy);
*(poff++) = sx*((unsigned int)curr - (unsigned int)old);
}
}
cimg_pragma_openmp(parallel for cimg_openmp_collapse(3)
cimg_openmp_if(resy._height>=256 && resy._width*resy._depth*resy._spectrum>=256))
cimg_forXZC(resy,x,z,c) {
const T *const ptrs0 = resx.data(x,0,z,c), *ptrs = ptrs0, *const ptrsmax = ptrs + (_height - 2)*sx;
T *ptrd = resy.data(x,0,z,c);
const unsigned int *poff = off._data;
const double *pfoff = foff._data;
cimg_forY(resy,y) {
const double
t = *(pfoff++),
val1 = (double)*ptrs,
val0 = ptrs>ptrs0?(double)*(ptrs - sx):val1,
val2 = ptrs<=ptrsmax?(double)*(ptrs + sx):val1,
val3 = ptrs<ptrsmax?(double)*(ptrs + 2*sx):val2,
val = val1 + 0.5f*(t*(-val0 + val2) + t*t*(2*val0 - 5*val1 + 4*val2 - val3) +
t*t*t*(-val0 + 3*val1 - 3*val2 + val3));
*ptrd = (T)(val<vmin?vmin:val>vmax?vmax:val);
ptrd+=sx;
ptrs+=*(poff++);
}
}
}
}
resx.assign();
} else resy.assign(resx,true);
if (sz!=_depth) {
if (_depth==1) resy.get_resize(sx,sy,sz,_spectrum,1).move_to(resz);
else {
if (_depth>sz) resy.get_resize(sx,sy,sz,_spectrum,2).move_to(resz);
else {
const double fz = (!boundary_conditions && sz>_depth)?(sz>1?(_depth - 1.)/(sz - 1):0):
(double)_depth/sz;
const unsigned int sxy = sx*sy;
resz.assign(sx,sy,sz,_spectrum);
curr = old = 0;
{
unsigned int *poff = off._data;
double *pfoff = foff._data;
cimg_forZ(resz,z) {
*(pfoff++) = curr - (unsigned int)curr;
old = curr;
curr = std::min(depth() - 1.,curr + fz);
*(poff++) = sxy*((unsigned int)curr - (unsigned int)old);
}
}
cimg_pragma_openmp(parallel for cimg_openmp_collapse(3)
cimg_openmp_if(resz._depth>=256 && resz._width*resz._height*resz._spectrum>=256))
cimg_forXYC(resz,x,y,c) {
const T *const ptrs0 = resy.data(x,y,0,c), *ptrs = ptrs0, *const ptrsmax = ptrs + (_depth - 2)*sxy;
T *ptrd = resz.data(x,y,0,c);
const unsigned int *poff = off._data;
const double *pfoff = foff._data;
cimg_forZ(resz,z) {
const double
t = *(pfoff++),
val1 = (double)*ptrs,
val0 = ptrs>ptrs0?(double)*(ptrs - sxy):val1,
val2 = ptrs<=ptrsmax?(double)*(ptrs + sxy):val1,
val3 = ptrs<ptrsmax?(double)*(ptrs + 2*sxy):val2,
val = val1 + 0.5f*(t*(-val0 + val2) + t*t*(2*val0 - 5*val1 + 4*val2 - val3) +
t*t*t*(-val0 + 3*val1 - 3*val2 + val3));
*ptrd = (T)(val<vmin?vmin:val>vmax?vmax:val);
ptrd+=sxy;
ptrs+=*(poff++);
}
}
}
}
resy.assign();
} else resz.assign(resy,true);
if (sc!=_spectrum) {
if (_spectrum==1) resz.get_resize(sx,sy,sz,sc,1).move_to(resc);
else {
if (_spectrum>sc) resz.get_resize(sx,sy,sz,sc,2).move_to(resc);
else {
const double fc = (!boundary_conditions && sc>_spectrum)?(sc>1?(_spectrum - 1.)/(sc - 1):0):
(double)_spectrum/sc;
const unsigned int sxyz = sx*sy*sz;
resc.assign(sx,sy,sz,sc);
curr = old = 0;
{
unsigned int *poff = off._data;
double *pfoff = foff._data;
cimg_forC(resc,c) {
*(pfoff++) = curr - (unsigned int)curr;
old = curr;
curr = std::min(spectrum() - 1.,curr + fc);
*(poff++) = sxyz*((unsigned int)curr - (unsigned int)old);
}
}
cimg_pragma_openmp(parallel for cimg_openmp_collapse(3)
cimg_openmp_if(resc._spectrum>=256 && resc._width*resc._height*resc._depth>=256))
cimg_forXYZ(resc,x,y,z) {
const T *const ptrs0 = resz.data(x,y,z,0), *ptrs = ptrs0, *const ptrsmax = ptrs + (_spectrum - 2)*sxyz;
T *ptrd = resc.data(x,y,z,0);
const unsigned int *poff = off._data;
const double *pfoff = foff._data;
cimg_forC(resc,c) {
const double
t = *(pfoff++),
val1 = (double)*ptrs,
val0 = ptrs>ptrs0?(double)*(ptrs - sxyz):val1,
val2 = ptrs<=ptrsmax?(double)*(ptrs + sxyz):val1,
val3 = ptrs<ptrsmax?(double)*(ptrs + 2*sxyz):val2,
val = val1 + 0.5f*(t*(-val0 + val2) + t*t*(2*val0 - 5*val1 + 4*val2 - val3) +
t*t*t*(-val0 + 3*val1 - 3*val2 + val3));
*ptrd = (T)(val<vmin?vmin:val>vmax?vmax:val);
ptrd+=sxyz;
ptrs+=*(poff++);
}
}
}
}
resz.assign();
} else resc.assign(resz,true);
return resc._is_shared?(resz._is_shared?(resy._is_shared?(resx._is_shared?(+(*this)):resx):resy):resz):resc;
} break;
// Lanczos interpolation.
//
case 6 : {
const double vmin = (double)cimg::type<T>::min(), vmax = (double)cimg::type<T>::max();
CImg<uintT> off(cimg::max(sx,sy,sz,sc));
CImg<doubleT> foff(off._width);
CImg<T> resx, resy, resz, resc;
double curr, old;
if (sx!=_width) {
if (_width==1) get_resize(sx,_height,_depth,_spectrum,1).move_to(resx);
else {
if (_width>sx) get_resize(sx,_height,_depth,_spectrum,2).move_to(resx);
else {
const double fx = (!boundary_conditions && sx>_width)?(sx>1?(_width - 1.)/(sx - 1):0):
(double)_width/sx;
resx.assign(sx,_height,_depth,_spectrum);
curr = old = 0;
{
unsigned int *poff = off._data;
double *pfoff = foff._data;
cimg_forX(resx,x) {
*(pfoff++) = curr - (unsigned int)curr;
old = curr;
curr = std::min(width() - 1.,curr + fx);
*(poff++) = (unsigned int)curr - (unsigned int)old;
}
}
cimg_pragma_openmp(parallel for cimg_openmp_collapse(3)
cimg_openmp_if(resx._width>=256 && resx._height*resx._depth*resx._spectrum>=256))
cimg_forYZC(resx,y,z,c) {
const T *const ptrs0 = data(0,y,z,c), *ptrs = ptrs0, *const ptrsmin = ptrs0 + 1,
*const ptrsmax = ptrs0 + (_width - 2);
T *ptrd = resx.data(0,y,z,c);
const unsigned int *poff = off._data;
const double *pfoff = foff._data;
cimg_forX(resx,x) {
const double
t = *(pfoff++),
w0 = _cimg_lanczos(t + 2),
w1 = _cimg_lanczos(t + 1),
w2 = _cimg_lanczos(t),
w3 = _cimg_lanczos(t - 1),
w4 = _cimg_lanczos(t - 2),
val2 = (double)*ptrs,
val1 = ptrs>=ptrsmin?(double)*(ptrs - 1):val2,
val0 = ptrs>ptrsmin?(double)*(ptrs - 2):val1,
val3 = ptrs<=ptrsmax?(double)*(ptrs + 1):val2,
val4 = ptrs<ptrsmax?(double)*(ptrs + 2):val3,
val = (val0*w0 + val1*w1 + val2*w2 + val3*w3 + val4*w4)/(w1 + w2 + w3 + w4);
*(ptrd++) = (T)(val<vmin?vmin:val>vmax?vmax:val);
ptrs+=*(poff++);
}
}
}
}
} else resx.assign(*this,true);
if (sy!=_height) {
if (_height==1) resx.get_resize(sx,sy,_depth,_spectrum,1).move_to(resy);
else {
if (_height>sy) resx.get_resize(sx,sy,_depth,_spectrum,2).move_to(resy);
else {
const double fy = (!boundary_conditions && sy>_height)?(sy>1?(_height - 1.)/(sy - 1):0):
(double)_height/sy;
resy.assign(sx,sy,_depth,_spectrum);
curr = old = 0;
{
unsigned int *poff = off._data;
double *pfoff = foff._data;
cimg_forY(resy,y) {
*(pfoff++) = curr - (unsigned int)curr;
old = curr;
curr = std::min(height() - 1.,curr + fy);
*(poff++) = sx*((unsigned int)curr - (unsigned int)old);
}
}
cimg_pragma_openmp(parallel for cimg_openmp_collapse(3)
cimg_openmp_if(resy._height>=256 && resy._width*resy._depth*resy._spectrum>=256))
cimg_forXZC(resy,x,z,c) {
const T *const ptrs0 = resx.data(x,0,z,c), *ptrs = ptrs0, *const ptrsmin = ptrs0 + sx,
*const ptrsmax = ptrs0 + (_height - 2)*sx;
T *ptrd = resy.data(x,0,z,c);
const unsigned int *poff = off._data;
const double *pfoff = foff._data;
cimg_forY(resy,y) {
const double
t = *(pfoff++),
w0 = _cimg_lanczos(t + 2),
w1 = _cimg_lanczos(t + 1),
w2 = _cimg_lanczos(t),
w3 = _cimg_lanczos(t - 1),
w4 = _cimg_lanczos(t - 2),
val2 = (double)*ptrs,
val1 = ptrs>=ptrsmin?(double)*(ptrs - sx):val2,
val0 = ptrs>ptrsmin?(double)*(ptrs - 2*sx):val1,
val3 = ptrs<=ptrsmax?(double)*(ptrs + sx):val2,
val4 = ptrs<ptrsmax?(double)*(ptrs + 2*sx):val3,
val = (val0*w0 + val1*w1 + val2*w2 + val3*w3 + val4*w4)/(w1 + w2 + w3 + w4);
*ptrd = (T)(val<vmin?vmin:val>vmax?vmax:val);
ptrd+=sx;
ptrs+=*(poff++);
}
}
}
}
resx.assign();
} else resy.assign(resx,true);
if (sz!=_depth) {
if (_depth==1) resy.get_resize(sx,sy,sz,_spectrum,1).move_to(resz);
else {
if (_depth>sz) resy.get_resize(sx,sy,sz,_spectrum,2).move_to(resz);
else {
const double fz = (!boundary_conditions && sz>_depth)?(sz>1?(_depth - 1.)/(sz - 1):0):
(double)_depth/sz;
const unsigned int sxy = sx*sy;
resz.assign(sx,sy,sz,_spectrum);
curr = old = 0;
{
unsigned int *poff = off._data;
double *pfoff = foff._data;
cimg_forZ(resz,z) {
*(pfoff++) = curr - (unsigned int)curr;
old = curr;
curr = std::min(depth() - 1.,curr + fz);
*(poff++) = sxy*((unsigned int)curr - (unsigned int)old);
}
}
cimg_pragma_openmp(parallel for cimg_openmp_collapse(3)
cimg_openmp_if(resz._depth>=256 && resz._width*resz._height*resz._spectrum>=256))
cimg_forXYC(resz,x,y,c) {
const T *const ptrs0 = resy.data(x,y,0,c), *ptrs = ptrs0, *const ptrsmin = ptrs0 + sxy,
*const ptrsmax = ptrs0 + (_depth - 2)*sxy;
T *ptrd = resz.data(x,y,0,c);
const unsigned int *poff = off._data;
const double *pfoff = foff._data;
cimg_forZ(resz,z) {
const double
t = *(pfoff++),
w0 = _cimg_lanczos(t + 2),
w1 = _cimg_lanczos(t + 1),
w2 = _cimg_lanczos(t),
w3 = _cimg_lanczos(t - 1),
w4 = _cimg_lanczos(t - 2),
val2 = (double)*ptrs,
val1 = ptrs>=ptrsmin?(double)*(ptrs - sxy):val2,
val0 = ptrs>ptrsmin?(double)*(ptrs - 2*sxy):val1,
val3 = ptrs<=ptrsmax?(double)*(ptrs + sxy):val2,
val4 = ptrs<ptrsmax?(double)*(ptrs + 2*sxy):val3,
val = (val0*w0 + val1*w1 + val2*w2 + val3*w3 + val4*w4)/(w1 + w2 + w3 + w4);
*ptrd = (T)(val<vmin?vmin:val>vmax?vmax:val);
ptrd+=sxy;
ptrs+=*(poff++);
}
}
}
}
resy.assign();
} else resz.assign(resy,true);
if (sc!=_spectrum) {
if (_spectrum==1) resz.get_resize(sx,sy,sz,sc,1).move_to(resc);
else {
if (_spectrum>sc) resz.get_resize(sx,sy,sz,sc,2).move_to(resc);
else {
const double fc = (!boundary_conditions && sc>_spectrum)?(sc>1?(_spectrum - 1.)/(sc - 1):0):
(double)_spectrum/sc;
const unsigned int sxyz = sx*sy*sz;
resc.assign(sx,sy,sz,sc);
curr = old = 0;
{
unsigned int *poff = off._data;
double *pfoff = foff._data;
cimg_forC(resc,c) {
*(pfoff++) = curr - (unsigned int)curr;
old = curr;
curr = std::min(spectrum() - 1.,curr + fc);
*(poff++) = sxyz*((unsigned int)curr - (unsigned int)old);
}
}
cimg_pragma_openmp(parallel for cimg_openmp_collapse(3)
cimg_openmp_if(resc._spectrum>=256 && resc._width*resc._height*resc._depth>=256))
cimg_forXYZ(resc,x,y,z) {
const T *const ptrs0 = resz.data(x,y,z,0), *ptrs = ptrs0, *const ptrsmin = ptrs0 + sxyz,
*const ptrsmax = ptrs + (_spectrum - 2)*sxyz;
T *ptrd = resc.data(x,y,z,0);
const unsigned int *poff = off._data;
const double *pfoff = foff._data;
cimg_forC(resc,c) {
const double
t = *(pfoff++),
w0 = _cimg_lanczos(t + 2),
w1 = _cimg_lanczos(t + 1),
w2 = _cimg_lanczos(t),
w3 = _cimg_lanczos(t - 1),
w4 = _cimg_lanczos(t - 2),
val2 = (double)*ptrs,
val1 = ptrs>=ptrsmin?(double)*(ptrs - sxyz):val2,
val0 = ptrs>ptrsmin?(double)*(ptrs - 2*sxyz):val1,
val3 = ptrs<=ptrsmax?(double)*(ptrs + sxyz):val2,
val4 = ptrs<ptrsmax?(double)*(ptrs + 2*sxyz):val3,
val = (val0*w0 + val1*w1 + val2*w2 + val3*w3 + val4*w4)/(w1 + w2 + w3 + w4);
*ptrd = (T)(val<vmin?vmin:val>vmax?vmax:val);
ptrd+=sxyz;
ptrs+=*(poff++);
}
}
}
}
resz.assign();
} else resc.assign(resz,true);
return resc._is_shared?(resz._is_shared?(resy._is_shared?(resx._is_shared?(+(*this)):resx):resy):resz):resc;
} break;
// Unknown interpolation.
//
default :
throw CImgArgumentException(_cimg_instance
"resize(): Invalid specified interpolation %d "
"(should be { -1=raw | 0=none | 1=nearest | 2=average | 3=linear | 4=grid | "
"5=cubic | 6=lanczos }).",
cimg_instance,
interpolation_type);
}
return res;
}
| 0
|
25,668
|
static vpx_codec_err_t ctrl_set_roi_map ( vpx_codec_alg_priv_t * ctx , va_list args ) {
( void ) ctx ;
( void ) args ;
return VPX_CODEC_INVALID_PARAM ;
}
| 0
|
360,295
|
fill_emblem_cache_if_needed (NautilusFile *file)
{
GList *node, *keywords;
char *scanner;
size_t length;
if (file->details->compare_by_emblem_cache != NULL) {
/* Got a cache already. */
return;
}
keywords = nautilus_file_get_keywords (file);
/* Add up the keyword string lengths */
length = 1;
for (node = keywords; node != NULL; node = node->next) {
length += strlen ((const char *) node->data) + 1;
}
/* Now that we know how large the cache struct needs to be, allocate it. */
file->details->compare_by_emblem_cache = g_malloc (sizeof(NautilusFileSortByEmblemCache) + length);
/* Copy them into the cache. */
scanner = file->details->compare_by_emblem_cache->emblem_keywords;
for (node = keywords; node != NULL; node = node->next) {
length = strlen ((const char *) node->data) + 1;
memcpy (scanner, (const char *) node->data, length);
scanner += length;
}
/* Zero-terminate so we can tell where the list ends. */
*scanner = 0;
eel_g_list_free_deep (keywords);
}
| 0
|
147,211
|
set_context(struct compiling *c, expr_ty e, expr_context_ty ctx, const node *n)
{
asdl_seq *s = NULL;
/* If a particular expression type can't be used for assign / delete,
set expr_name to its name and an error message will be generated.
*/
const char* expr_name = NULL;
/* The ast defines augmented store and load contexts, but the
implementation here doesn't actually use them. The code may be
a little more complex than necessary as a result. It also means
that expressions in an augmented assignment have a Store context.
Consider restructuring so that augmented assignment uses
set_context(), too.
*/
assert(ctx != AugStore && ctx != AugLoad);
switch (e->kind) {
case Attribute_kind:
if (ctx == NamedStore) {
expr_name = "attribute";
break;
}
e->v.Attribute.ctx = ctx;
if (ctx == Store && forbidden_name(c, e->v.Attribute.attr, n, 1))
return 0;
break;
case Subscript_kind:
if (ctx == NamedStore) {
expr_name = "subscript";
break;
}
e->v.Subscript.ctx = ctx;
break;
case Starred_kind:
if (ctx == NamedStore) {
expr_name = "starred";
break;
}
e->v.Starred.ctx = ctx;
if (!set_context(c, e->v.Starred.value, ctx, n))
return 0;
break;
case Name_kind:
if (ctx == Store) {
if (forbidden_name(c, e->v.Name.id, n, 0))
return 0; /* forbidden_name() calls ast_error() */
}
e->v.Name.ctx = ctx;
break;
case List_kind:
if (ctx == NamedStore) {
expr_name = "list";
break;
}
e->v.List.ctx = ctx;
s = e->v.List.elts;
break;
case Tuple_kind:
if (ctx == NamedStore) {
expr_name = "tuple";
break;
}
e->v.Tuple.ctx = ctx;
s = e->v.Tuple.elts;
break;
case Lambda_kind:
expr_name = "lambda";
break;
case Call_kind:
expr_name = "function call";
break;
case BoolOp_kind:
case BinOp_kind:
case UnaryOp_kind:
expr_name = "operator";
break;
case GeneratorExp_kind:
expr_name = "generator expression";
break;
case Yield_kind:
case YieldFrom_kind:
expr_name = "yield expression";
break;
case Await_kind:
expr_name = "await expression";
break;
case ListComp_kind:
expr_name = "list comprehension";
break;
case SetComp_kind:
expr_name = "set comprehension";
break;
case DictComp_kind:
expr_name = "dict comprehension";
break;
case Dict_kind:
expr_name = "dict display";
break;
case Set_kind:
expr_name = "set display";
break;
case JoinedStr_kind:
case FormattedValue_kind:
expr_name = "f-string expression";
break;
case Constant_kind: {
PyObject *value = e->v.Constant.value;
if (value == Py_None || value == Py_False || value == Py_True
|| value == Py_Ellipsis)
{
return ast_error(c, n, "cannot %s %R",
ctx == Store ? "assign to" : "delete",
value);
}
expr_name = "literal";
break;
}
case Compare_kind:
expr_name = "comparison";
break;
case IfExp_kind:
expr_name = "conditional expression";
break;
case NamedExpr_kind:
expr_name = "named expression";
break;
default:
PyErr_Format(PyExc_SystemError,
"unexpected expression in %sassignment %d (line %d)",
ctx == NamedStore ? "named ": "",
e->kind, e->lineno);
return 0;
}
/* Check for error string set by switch */
if (expr_name) {
if (ctx == NamedStore) {
return ast_error(c, n, "cannot use named assignment with %s",
expr_name);
}
else {
return ast_error(c, n, "cannot %s %s",
ctx == Store ? "assign to" : "delete",
expr_name);
}
}
/* If the LHS is a list or tuple, we need to set the assignment
context for all the contained elements.
*/
if (s) {
Py_ssize_t i;
for (i = 0; i < asdl_seq_LEN(s); i++) {
if (!set_context(c, (expr_ty)asdl_seq_GET(s, i), ctx, n))
return 0;
}
}
return 1;
}
| 0
|
413,014
|
void __init set_proc_pid_nlink(void)
{
nlink_tid = pid_entry_nlink(tid_base_stuff, ARRAY_SIZE(tid_base_stuff));
nlink_tgid = pid_entry_nlink(tgid_base_stuff, ARRAY_SIZE(tgid_base_stuff));
}
| 0
|
520,618
|
bool const_item() const { return false; }
| 0
|
108,344
|
entry_guard_succeeded(circuit_guard_state_t **guard_state_p)
{
if (BUG(*guard_state_p == NULL))
return GUARD_USABLE_NEVER;
entry_guard_t *guard = entry_guard_handle_get((*guard_state_p)->guard);
if (! guard || BUG(guard->in_selection == NULL))
return GUARD_USABLE_NEVER;
unsigned newstate =
entry_guards_note_guard_success(guard->in_selection, guard,
(*guard_state_p)->state);
(*guard_state_p)->state = newstate;
(*guard_state_p)->state_set_at = approx_time();
if (newstate == GUARD_CIRC_STATE_COMPLETE) {
return GUARD_USABLE_NOW;
} else {
return GUARD_MAYBE_USABLE_LATER;
}
}
| 0
|
15,317
|
void fz_set_icc_bgr ( fz_context * ctx , fz_colorspace * cs ) {
fz_iccprofile * profile ;
if ( cs == NULL || ! fz_colorspace_is_icc ( ctx , cs ) ) return ;
profile = cs -> data ;
profile -> bgr = 1 ;
return ;
}
| 0
|
105,361
|
static void *tty_ldiscs_seq_next(struct seq_file *m, void *v, loff_t *pos)
{
(*pos)++;
return (*pos < NR_LDISCS) ? pos : NULL;
}
| 0
|
203,000
|
void GLSurfaceOzoneSurfacelessSurfaceImpl::SwapBuffersAsync(
const SwapCompletionCallback& callback) {
if (!images_[current_surface_]->ScheduleOverlayPlane(
widget_, 0, OverlayTransform::OVERLAY_TRANSFORM_NONE,
gfx::Rect(GetSize()), gfx::RectF(1, 1))) {
callback.Run(gfx::SwapResult::SWAP_FAILED);
return;
}
GLSurfaceOzoneSurfaceless::SwapBuffersAsync(callback);
current_surface_ ^= 1;
BindFramebuffer();
}
| 0
|
448,181
|
static int netns_exec(int argc, char **argv)
{
/* Setup the proper environment for apps that are not netns
* aware, and execute a program in that environment.
*/
const char *cmd;
if (argc < 1 && !do_all) {
fprintf(stderr, "No netns name specified\n");
return -1;
}
if ((argc < 2 && !do_all) || (argc < 1 && do_all)) {
fprintf(stderr, "No command specified\n");
return -1;
}
if (do_all)
return do_each_netns(on_netns_exec, --argv, 1);
if (netns_switch(argv[0]))
return -1;
/* we just changed namespaces. clear any vrf association
* with prior namespace before exec'ing command
*/
vrf_reset();
/* ip must return the status of the child,
* but do_cmd() will add a minus to this,
* so let's add another one here to cancel it.
*/
cmd = argv[1];
return -cmd_exec(cmd, argv + 1, !!batch_mode);
}
| 0
|
96,202
|
get_option_sctx(char *name)
{
int idx = findoption((char_u *)name);
if (idx >= 0)
return &options[idx].script_ctx;
siemsg("no such option: %s", name);
return NULL;
}
| 0
|
159,841
|
static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u)
{
if (u) {
switch (size) {
case 0: gen_helper_neon_widen_u8(dest, src); break;
case 1: gen_helper_neon_widen_u16(dest, src); break;
case 2: tcg_gen_extu_i32_i64(dest, src); break;
default: abort();
}
} else {
switch (size) {
case 0: gen_helper_neon_widen_s8(dest, src); break;
case 1: gen_helper_neon_widen_s16(dest, src); break;
case 2: tcg_gen_ext_i32_i64(dest, src); break;
default: abort();
}
}
dead_tmp(src);
}
| 1
|
138,954
|
void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device *tgtdev)
{
struct btrfs_fs_devices *fs_devices = tgtdev->fs_info->fs_devices;
WARN_ON(!tgtdev);
mutex_lock(&fs_devices->device_list_mutex);
btrfs_sysfs_rm_device_link(fs_devices, tgtdev);
if (tgtdev->bdev)
fs_devices->open_devices--;
fs_devices->num_devices--;
btrfs_assign_next_active_device(tgtdev, NULL);
list_del_rcu(&tgtdev->dev_list);
mutex_unlock(&fs_devices->device_list_mutex);
/*
* The update_dev_time() with in btrfs_scratch_superblocks()
* may lead to a call to btrfs_show_devname() which will try
* to hold device_list_mutex. And here this device
* is already out of device list, so we don't have to hold
* the device_list_mutex lock.
*/
btrfs_scratch_superblocks(tgtdev->bdev, tgtdev->name->str);
btrfs_close_bdev(tgtdev);
call_rcu(&tgtdev->rcu, free_device_rcu);
}
| 0
|
248,724
|
void WebLocalFrameImpl::StopFinding(StopFindAction action) {
bool clear_selection = action == kStopFindActionClearSelection;
if (clear_selection)
ExecuteCommand(WebString::FromUTF8("Unselect"));
if (text_finder_) {
if (!clear_selection)
text_finder_->SetFindEndstateFocusAndSelection();
text_finder_->StopFindingAndClearSelection();
}
if (action == kStopFindActionActivateSelection && IsFocused()) {
WebDocument doc = GetDocument();
if (!doc.IsNull()) {
WebElement element = doc.FocusedElement();
if (!element.IsNull())
element.SimulateClick();
}
}
}
| 0
|
438,392
|
void set_primaries(uint64_t primaries) { primaries_ = primaries; }
| 0
|
155,193
|
static void announce_device(struct usb_device *udev)
{
dev_info(&udev->dev, "New USB device found, idVendor=%04x, idProduct=%04x\n",
le16_to_cpu(udev->descriptor.idVendor),
le16_to_cpu(udev->descriptor.idProduct));
dev_info(&udev->dev,
"New USB device strings: Mfr=%d, Product=%d, SerialNumber=%d\n",
udev->descriptor.iManufacturer,
udev->descriptor.iProduct,
udev->descriptor.iSerialNumber);
show_string(udev, "Product", udev->product);
show_string(udev, "Manufacturer", udev->manufacturer);
show_string(udev, "SerialNumber", udev->serial);
}
| 0
|
259,999
|
int ecryptfs_decrypt_page(struct page *page)
{
struct inode *ecryptfs_inode;
struct ecryptfs_crypt_stat *crypt_stat;
char *page_virt;
unsigned long extent_offset;
loff_t lower_offset;
int rc = 0;
ecryptfs_inode = page->mapping->host;
crypt_stat =
&(ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat);
BUG_ON(!(crypt_stat->flags & ECRYPTFS_ENCRYPTED));
lower_offset = lower_offset_for_page(crypt_stat, page);
page_virt = kmap(page);
rc = ecryptfs_read_lower(page_virt, lower_offset, PAGE_CACHE_SIZE,
ecryptfs_inode);
kunmap(page);
if (rc < 0) {
ecryptfs_printk(KERN_ERR,
"Error attempting to read lower page; rc = [%d]\n",
rc);
goto out;
}
for (extent_offset = 0;
extent_offset < (PAGE_CACHE_SIZE / crypt_stat->extent_size);
extent_offset++) {
rc = crypt_extent(crypt_stat, page, page,
extent_offset, DECRYPT);
if (rc) {
printk(KERN_ERR "%s: Error encrypting extent; "
"rc = [%d]\n", __func__, rc);
goto out;
}
}
out:
return rc;
}
| 0
|
211,832
|
static void reflectedTreatNullAsNullStringCustomURLAttrAttributeSetterCallback(v8::Local<v8::String>, v8::Local<v8::Value> jsValue, const v8::PropertyCallbackInfo<void>& info)
{
TRACE_EVENT_SET_SAMPLING_STATE("Blink", "DOMSetter");
CustomElementCallbackDispatcher::CallbackDeliveryScope deliveryScope;
TestObjectV8Internal::reflectedTreatNullAsNullStringCustomURLAttrAttributeSetter(jsValue, info);
TRACE_EVENT_SET_SAMPLING_STATE("V8", "V8Execution");
}
| 0
|
414,189
|
email_compare (EContact *contact1,
EContact *contact2)
{
const gchar *email1, *email2;
gint i;
/*
if (e_contact_get (contact1, E_CONTACT_IS_LIST))
return TRUE;
*/
for (i = 0; i < 4; i++) {
gboolean equal;
email1 = e_contact_get_const (contact1, email_ids[i]);
email2 = e_contact_get_const (contact2, email_ids[i]);
if (email1 && email2)
equal = !strcmp (email1, email2);
else
equal = (!!email1 == !!email2);
if (!equal)
return equal;
}
return TRUE;
}
| 0
|
517,131
|
Item_ignore_value(THD *thd, Name_resolution_context *context_arg)
:Item_default_value(thd, context_arg)
{};
| 0
|
368,389
|
extrainfo_free(extrainfo_t *extrainfo)
{
if (!extrainfo)
return;
tor_free(extrainfo->cache_info.signed_descriptor_body);
tor_free(extrainfo->pending_sig);
/* XXXX remove this if it turns out to slow us down. */
memset(extrainfo, 88, sizeof(extrainfo_t)); /* debug bad memory usage */
tor_free(extrainfo);
}
| 0
|
168,049
|
PHP_FUNCTION(pg_port)
{
php_pgsql_get_link_info(INTERNAL_FUNCTION_PARAM_PASSTHRU,PHP_PG_PORT);
}
| 0
|
231,575
|
bool PrintViewManagerBase::OnMessageReceived(
const IPC::Message& message,
content::RenderFrameHost* render_frame_host) {
bool handled = true;
IPC_BEGIN_MESSAGE_MAP(PrintViewManagerBase, message)
IPC_MESSAGE_HANDLER(PrintHostMsg_DidPrintPage, OnDidPrintPage)
IPC_MESSAGE_HANDLER(PrintHostMsg_ShowInvalidPrinterSettingsError,
OnShowInvalidPrinterSettingsError)
IPC_MESSAGE_UNHANDLED(handled = false)
IPC_END_MESSAGE_MAP()
return handled || PrintManager::OnMessageReceived(message, render_frame_host);
}
| 0
|
107,936
|
struct MACH0_(obj_t) {
struct MACH0_(mach_header) hdr;
struct MACH0_(segment_command) *segs;
char *intrp;
char *compiler;
int nsegs;
int segs_count;
struct r_dyld_chained_starts_in_segment **chained_starts;
struct dyld_chained_fixups_header fixups_header;
ut64 fixups_offset;
ut64 fixups_size;
struct MACH0_(section) *sects;
int nsects;
struct MACH0_(nlist) *symtab;
ut8 *symstr;
ut8 *func_start; //buffer that hold the data from LC_FUNCTION_STARTS
int symstrlen;
int nsymtab;
ut32 *indirectsyms;
int nindirectsyms;
RBinImport **imports_by_ord;
size_t imports_by_ord_size;
HtPP *imports_by_name;
struct dysymtab_command dysymtab;
struct load_command main_cmd;
struct dyld_info_command *dyld_info;
struct dylib_table_of_contents *toc;
int ntoc;
struct MACH0_(dylib_module) *modtab;
int nmodtab;
struct thread_command thread;
ut8 *signature;
union {
struct x86_thread_state32 x86_32;
struct x86_thread_state64 x86_64;
struct ppc_thread_state32 ppc_32;
struct ppc_thread_state64 ppc_64;
struct arm_thread_state32 arm_32;
struct arm_thread_state64 arm_64;
} thread_state;
char (*libs)[R_BIN_MACH0_STRING_LENGTH];
int nlibs;
int size;
ut64 baddr;
ut64 entry;
bool big_endian;
const char *file;
RBuffer *b;
int os;
Sdb *kv;
int has_crypto;
int has_canary;
int has_retguard;
int has_sanitizers;
int has_blocks_ext;
int dbg_info;
const char *lang;
int uuidn;
int func_size;
bool verbose;
ut64 header_at;
ut64 symbols_off;
void *user;
ut64 (*va2pa)(ut64 p, ut32 *offset, ut32 *left, RBinFile *bf);
struct symbol_t *symbols;
ut64 main_addr;
int (*original_io_read)(RIO *io, RIODesc *fd, ut8 *buf, int count);
bool rebasing_buffer;
};
| 0
|
29,927
|
EC_KEY * d2i_ECParameters ( EC_KEY * * a , const unsigned char * * in , long len ) {
EC_KEY * ret ;
if ( in == NULL || * in == NULL ) {
ECerr ( EC_F_D2I_ECPARAMETERS , ERR_R_PASSED_NULL_PARAMETER ) ;
return NULL ;
}
if ( a == NULL || * a == NULL ) {
if ( ( ret = EC_KEY_new ( ) ) == NULL ) {
ECerr ( EC_F_D2I_ECPARAMETERS , ERR_R_MALLOC_FAILURE ) ;
return NULL ;
}
if ( a ) * a = ret ;
}
else ret = * a ;
if ( ! d2i_ECPKParameters ( & ret -> group , in , len ) ) {
ECerr ( EC_F_D2I_ECPARAMETERS , ERR_R_EC_LIB ) ;
return NULL ;
}
return ret ;
}
| 0
|
137,982
|
static noinline int btrfs_ioctl_snap_create_v2(struct file *file,
void __user *arg, int subvol)
{
struct btrfs_ioctl_vol_args_v2 *vol_args;
int ret;
u64 transid = 0;
u64 *ptr = NULL;
bool readonly = false;
struct btrfs_qgroup_inherit *inherit = NULL;
if (!S_ISDIR(file_inode(file)->i_mode))
return -ENOTDIR;
vol_args = memdup_user(arg, sizeof(*vol_args));
if (IS_ERR(vol_args))
return PTR_ERR(vol_args);
vol_args->name[BTRFS_SUBVOL_NAME_MAX] = '\0';
if (vol_args->flags &
~(BTRFS_SUBVOL_CREATE_ASYNC | BTRFS_SUBVOL_RDONLY |
BTRFS_SUBVOL_QGROUP_INHERIT)) {
ret = -EOPNOTSUPP;
goto free_args;
}
if (vol_args->flags & BTRFS_SUBVOL_CREATE_ASYNC)
ptr = &transid;
if (vol_args->flags & BTRFS_SUBVOL_RDONLY)
readonly = true;
if (vol_args->flags & BTRFS_SUBVOL_QGROUP_INHERIT) {
if (vol_args->size > PAGE_SIZE) {
ret = -EINVAL;
goto free_args;
}
inherit = memdup_user(vol_args->qgroup_inherit, vol_args->size);
if (IS_ERR(inherit)) {
ret = PTR_ERR(inherit);
goto free_args;
}
}
ret = btrfs_ioctl_snap_create_transid(file, vol_args->name,
vol_args->fd, subvol, ptr,
readonly, inherit);
if (ret)
goto free_inherit;
if (ptr && copy_to_user(arg +
offsetof(struct btrfs_ioctl_vol_args_v2,
transid),
ptr, sizeof(*ptr)))
ret = -EFAULT;
free_inherit:
kfree(inherit);
free_args:
kfree(vol_args);
return ret;
}
| 0
|
143,101
|
static int fdt_include_supernodes(struct fdt_region_state *info, int depth)
{
int base = fdt_off_dt_struct(info->fdt);
int start, stop_at;
int i;
/*
* Work down the stack looking for supernodes that we didn't include.
* The algortihm here is actually pretty simple, since we know that
* no previous subnode had to include these nodes, or if it did, we
* marked them as included (on the stack) already.
*/
for (i = 0; i <= depth; i++) {
if (!info->stack[i].included) {
start = info->stack[i].offset;
/* Add the FDT_BEGIN_NODE tag of this supernode */
fdt_next_tag(info->fdt, start, &stop_at);
if (fdt_add_region(info, base + start, stop_at - start))
return -1;
/* Remember that this supernode is now included */
info->stack[i].included = 1;
info->can_merge = 1;
}
/* Force (later) generation of the FDT_END_NODE tag */
if (!info->stack[i].want)
info->stack[i].want = WANT_NODES_ONLY;
}
return 0;
}
| 0
|
165,035
|
PassRefPtrWillBeRawPtr<Node> Document::adoptNode(PassRefPtrWillBeRawPtr<Node> source, ExceptionState& exceptionState)
{
EventQueueScope scope;
switch (source->nodeType()) {
case DOCUMENT_NODE:
exceptionState.throwDOMException(NotSupportedError, "The node provided is of type '" + source->nodeName() + "', which may not be adopted.");
return nullptr;
case ATTRIBUTE_NODE: {
Attr* attr = toAttr(source.get());
if (RefPtrWillBeRawPtr<Element> ownerElement = attr->ownerElement())
ownerElement->removeAttributeNode(attr, exceptionState);
break;
}
default:
if (source->isShadowRoot()) {
exceptionState.throwDOMException(HierarchyRequestError, "The node provided is a shadow root, which may not be adopted.");
return nullptr;
}
if (source->isFrameOwnerElement()) {
HTMLFrameOwnerElement* frameOwnerElement = toHTMLFrameOwnerElement(source.get());
if (frame() && frame()->tree().isDescendantOf(frameOwnerElement->contentFrame())) {
exceptionState.throwDOMException(HierarchyRequestError, "The node provided is a frame which contains this document.");
return nullptr;
}
}
if (source->parentNode()) {
source->parentNode()->removeChild(source.get(), exceptionState);
if (exceptionState.hadException())
return nullptr;
RELEASE_ASSERT(!source->parentNode());
}
}
this->adoptIfNeeded(*source);
return source;
}
| 0
|
69,583
|
static int pop_check_mailbox (CONTEXT *ctx, int *index_hint)
{
int ret;
POP_DATA *pop_data = (POP_DATA *)ctx->data;
if ((pop_data->check_time + PopCheckTimeout) > time (NULL))
return 0;
pop_logout (ctx);
mutt_socket_close (pop_data->conn);
if (pop_open_connection (pop_data) < 0)
return -1;
ctx->size = pop_data->size;
mutt_message _("Checking for new messages...");
ret = pop_fetch_headers (ctx);
pop_clear_cache (pop_data);
if (ret < 0)
return -1;
if (ret > 0)
return MUTT_NEW_MAIL;
return 0;
}
| 0
|
416,067
|
zswapcolors(i_ctx_t * i_ctx_p)
{
ref_colorspace tmp_cs;
ref tmp_pat;
tmp_cs = istate->colorspace[0];
istate->colorspace[0] = istate->colorspace[1];
istate->colorspace[1] = tmp_cs;
tmp_pat = istate->pattern[0];
istate->pattern[0] = istate->pattern[1];
istate->pattern[1] = tmp_pat;
return gs_swapcolors(igs);
}
| 0
|
269,627
|
test_bson_append_deep (void)
{
bson_t *a;
bson_t *tmp;
int i;
a = bson_new ();
for (i = 0; i < 100; i++) {
tmp = a;
a = bson_new ();
BSON_ASSERT (bson_append_document (a, "a", -1, tmp));
bson_destroy (tmp);
}
BSON_ASSERT_BSON_EQUAL_FILE (a, "test38.bson");
bson_destroy (a);
}
| 0
|
233,823
|
GahpClient::gt4_gram_client_job_start(const char * job_contact)
{
static const char* command = "GT4_GRAM_JOB_START";
if (server->m_commands_supported->contains_anycase(command)==FALSE) {
return GAHPCLIENT_COMMAND_NOT_SUPPORTED;
}
if (!job_contact) job_contact=NULLSTRING;
std::string reqline;
int x = sprintf(reqline,"%s",escapeGahpString(job_contact));
ASSERT( x > 0 );
const char *buf = reqline.c_str();
if ( !is_pending(command,buf) ) {
if ( m_mode == results_only ) {
return GAHPCLIENT_COMMAND_NOT_SUBMITTED;
}
now_pending(command,buf,normal_proxy);
}
Gahp_Args* result = get_pending_result(command,buf);
if ( result ) {
if (result->argc != 3) {
EXCEPT("Bad %s Result",command);
}
int rc = atoi(result->argv[1]);
if ( strcasecmp(result->argv[2], NULLSTRING) ) {
error_string = result->argv[2];
} else {
error_string = "";
}
delete result;
return rc;
}
if ( check_pending_timeout(command,buf) ) {
sprintf( error_string, "%s timed out", command );
return GAHPCLIENT_COMMAND_TIMED_OUT;
}
return GAHPCLIENT_COMMAND_PENDING;
}
| 0
|
30,555
|
int kvm_device_intx_assign ( KVMState * s , uint32_t dev_id , bool use_host_msi , uint32_t guest_irq ) {
uint32_t irq_type = KVM_DEV_IRQ_GUEST_INTX | ( use_host_msi ? KVM_DEV_IRQ_HOST_MSI : KVM_DEV_IRQ_HOST_INTX ) ;
return kvm_assign_irq_internal ( s , dev_id , irq_type , guest_irq ) ;
}
| 0
|
161,995
|
MonoReflectionEvent *
mono_reflection_event_builder_get_event_info (MonoReflectionTypeBuilder *tb, MonoReflectionEventBuilder *eb)
{
g_assert_not_reached ();
return NULL;
| 0
|
224,689
|
void OnJsExecutionDone(base::Closure callback, const base::Value* value) {
js_result_.reset(value->DeepCopy());
callback.Run();
}
| 0
|
60,920
|
void pjsip_dlg_on_rx_request( pjsip_dialog *dlg, pjsip_rx_data *rdata )
{
pj_status_t status;
pjsip_transaction *tsx = NULL;
pj_bool_t processed = PJ_FALSE;
unsigned i;
PJ_LOG(5,(dlg->obj_name, "Received %s",
pjsip_rx_data_get_info(rdata)));
pj_log_push_indent();
/* Lock dialog and increment session. */
pjsip_dlg_inc_lock(dlg);
/* Check CSeq */
if (rdata->msg_info.cseq->cseq <= dlg->remote.cseq &&
rdata->msg_info.msg->line.req.method.id != PJSIP_ACK_METHOD &&
rdata->msg_info.msg->line.req.method.id != PJSIP_CANCEL_METHOD)
{
/* Invalid CSeq.
* Respond statelessly with 500 (Internal Server Error)
*/
pj_str_t warn_text;
/* Unlock dialog and dec session, may destroy dialog. */
pjsip_dlg_dec_lock(dlg);
pj_assert(pjsip_rdata_get_tsx(rdata) == NULL);
warn_text = pj_str("Invalid CSeq");
pjsip_endpt_respond_stateless(dlg->endpt,
rdata, 500, &warn_text, NULL, NULL);
pj_log_pop_indent();
return;
}
/* Update CSeq. */
dlg->remote.cseq = rdata->msg_info.cseq->cseq;
/* Update To tag if necessary.
* This only happens if UAS sends a new request before answering
* our request (e.g. UAS sends NOTIFY before answering our
* SUBSCRIBE request).
*/
if (dlg->remote.info->tag.slen == 0) {
pj_strdup(dlg->pool, &dlg->remote.info->tag,
&rdata->msg_info.from->tag);
}
/* Create UAS transaction for this request. */
if (pjsip_rdata_get_tsx(rdata) == NULL &&
rdata->msg_info.msg->line.req.method.id != PJSIP_ACK_METHOD)
{
status = pjsip_tsx_create_uas(dlg->ua, rdata, &tsx);
if (status != PJ_SUCCESS) {
/* Once case for this is when re-INVITE contains same
* Via branch value as previous INVITE (ticket #965).
*/
char errmsg[PJ_ERR_MSG_SIZE];
pj_str_t reason;
reason = pj_strerror(status, errmsg, sizeof(errmsg));
pjsip_endpt_respond_stateless(dlg->endpt, rdata, 500, &reason,
NULL, NULL);
goto on_return;
}
/* Put this dialog in the transaction data. */
tsx->mod_data[dlg->ua->id] = dlg;
/* Add transaction count. */
++dlg->tsx_count;
}
/* Update the target URI if this is a target refresh request.
* We have passed the basic checking for the request, I think we
* should update the target URI regardless of whether the request
* is accepted or not (e.g. when re-INVITE is answered with 488,
* we would still need to update the target URI, otherwise our
* target URI would be wrong, wouldn't it).
*/
if (pjsip_method_creates_dialog(&rdata->msg_info.cseq->method)) {
pjsip_contact_hdr *contact;
contact = (pjsip_contact_hdr*)
pjsip_msg_find_hdr(rdata->msg_info.msg, PJSIP_H_CONTACT,
NULL);
if (contact && contact->uri &&
(dlg->remote.contact==NULL ||
pjsip_uri_cmp(PJSIP_URI_IN_REQ_URI,
dlg->remote.contact->uri,
contact->uri)))
{
dlg->remote.contact = (pjsip_contact_hdr*)
pjsip_hdr_clone(dlg->pool, contact);
dlg->target = dlg->remote.contact->uri;
}
}
/* Report the request to dialog usages. */
for (i=0; i<dlg->usage_cnt; ++i) {
if (!dlg->usage[i]->on_rx_request)
continue;
processed = (*dlg->usage[i]->on_rx_request)(rdata);
if (processed)
break;
}
/* Feed the first request to the transaction. */
if (tsx)
pjsip_tsx_recv_msg(tsx, rdata);
/* If no dialog usages has claimed the processing of the transaction,
* and if transaction has not sent final response, respond with
* 500/Internal Server Error.
*/
if (!processed && tsx && tsx->status_code < 200) {
pjsip_tx_data *tdata;
const pj_str_t reason = { "Unhandled by dialog usages", 26};
PJ_LOG(4,(tsx->obj_name, "%s was unhandled by "
"dialog usages, sending 500 response",
pjsip_rx_data_get_info(rdata)));
status = pjsip_dlg_create_response(dlg, rdata, 500, &reason, &tdata);
if (status == PJ_SUCCESS) {
status = pjsip_dlg_send_response(dlg, tsx, tdata);
}
}
on_return:
/* Unlock dialog and dec session, may destroy dialog. */
pjsip_dlg_dec_lock(dlg);
pj_log_pop_indent();
}
| 0
|
17,930
|
static unsigned int hc_entries ( unsigned int cnt ) {
cnt = cnt & 7 ? ( cnt / 8 ) + 1 : cnt / 8 ;
return cnt < avail_tree_table_sz ? cnt : avail_tree_table_sz - 1 ;
}
| 0
|
485,803
|
int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_cancel *cancel = io_kiocb_to_cmd(req, struct io_cancel);
struct io_cancel_data cd = {
.ctx = req->ctx,
.data = cancel->addr,
.flags = cancel->flags,
.seq = atomic_inc_return(&req->ctx->cancel_seq),
};
struct io_uring_task *tctx = req->task->io_uring;
int ret;
if (cd.flags & IORING_ASYNC_CANCEL_FD) {
if (req->flags & REQ_F_FIXED_FILE ||
cd.flags & IORING_ASYNC_CANCEL_FD_FIXED) {
req->flags |= REQ_F_FIXED_FILE;
req->file = io_file_get_fixed(req, cancel->fd,
issue_flags);
} else {
req->file = io_file_get_normal(req, cancel->fd);
}
if (!req->file) {
ret = -EBADF;
goto done;
}
cd.file = req->file;
}
ret = __io_async_cancel(&cd, tctx, issue_flags);
done:
if (ret < 0)
req_set_fail(req);
io_req_set_res(req, ret, 0);
return IOU_OK;
}
| 0
|
384,598
|
xmlTextReaderReadState(xmlTextReaderPtr reader) {
if (reader == NULL)
return(-1);
return(reader->mode);
}
| 0
|
495,556
|
u32 gf_isom_get_pssh_count(GF_ISOFile *file)
{
u32 count=0;
u32 i=0;
GF_Box *a_box;
if (file->moov) {
while ((a_box = (GF_Box*)gf_list_enum(file->moov->child_boxes, &i))) {
if (a_box->type != GF_ISOM_BOX_TYPE_PSSH) continue;
count++;
}
}
if (file->meta) {
while ((a_box = (GF_Box*)gf_list_enum(file->meta->child_boxes, &i))) {
if (a_box->type != GF_ISOM_BOX_TYPE_PSSH) continue;
count++;
}
}
return count;
}
| 0
|
340,526
|
static inline int writer_print_string(WriterContext *wctx,
const char *key, const char *val, int opt)
{
const struct section *section = wctx->section[wctx->level];
int ret = 0;
if (opt && !(wctx->writer->flags & WRITER_FLAG_DISPLAY_OPTIONAL_FIELDS))
return 0;
if (section->show_all_entries || av_dict_get(section->entries_to_show, key, NULL, 0)) {
wctx->writer->print_string(wctx, key, val);
wctx->nb_item[wctx->level]++;
}
return ret;
}
| 0
|
66,510
|
inline unsigned int& exception_mode() {
return exception_mode(0,false);
}
| 0
|
171,557
|
static int auth_session_timeout_cb(CALLBACK_FRAME) {
pr_event_generate("core.timeout-session", NULL);
pr_response_send_async(R_421,
_("Session Timeout (%d seconds): closing control connection"),
TimeoutSession);
pr_log_pri(PR_LOG_INFO, "%s", "FTP session timed out, disconnected");
pr_session_disconnect(&auth_module, PR_SESS_DISCONNECT_TIMEOUT,
"TimeoutSession");
/* no need to restart the timer -- session's over */
return 0;
}
| 0
|
178,039
|
bool HasPermissionsForFileSystem(const std::string& filesystem_id,
int permissions) {
FileSystemMap::const_iterator it =
filesystem_permissions_.find(filesystem_id);
if (it == filesystem_permissions_.end())
return false;
return (it->second & permissions) == permissions;
}
| 0
|
69,473
|
bool CModules::OnUserCTCPReplyMessage(CCTCPMessage& Message) {
MODHALTCHK(OnUserCTCPReplyMessage(Message));
}
| 0
|
17,602
|
static void g2rgb ( fz_context * ctx , fz_color_converter * cc , float * dv , const float * sv ) {
dv [ 0 ] = sv [ 0 ] ;
dv [ 1 ] = sv [ 0 ] ;
dv [ 2 ] = sv [ 0 ] ;
}
| 0
|
472,028
|
STBIDEF float *stbi_loadf_from_file(FILE *f, int *x, int *y, int *comp, int req_comp)
{
stbi__context s;
stbi__start_file(&s,f);
return stbi__loadf_main(&s,x,y,comp,req_comp);
}
| 0
|
274,732
|
static struct reg_code_blocks *
S_alloc_code_blocks(pTHX_ int ncode)
{
struct reg_code_blocks *cbs;
Newx(cbs, 1, struct reg_code_blocks);
cbs->count = ncode;
cbs->refcnt = 1;
SAVEDESTRUCTOR_X(S_free_codeblocks, cbs);
if (ncode)
Newx(cbs->cb, ncode, struct reg_code_block);
else
cbs->cb = NULL;
return cbs;
| 0
|
449,968
|
static void hdr_dump_tokens(struct crypt_device *cd, json_object *hdr_jobj)
{
char token[16];
json_object *tokens_jobj, *jobj2, *jobj3, *val;
const char *tmps;
int i, j;
log_std(cd, "Tokens:\n");
json_object_object_get_ex(hdr_jobj, "tokens", &tokens_jobj);
for (j = 0; j < LUKS2_TOKENS_MAX; j++) {
(void) snprintf(token, sizeof(token), "%i", j);
json_object_object_get_ex(tokens_jobj, token, &val);
if (!val)
continue;
json_object_object_get_ex(val, "type", &jobj2);
tmps = json_object_get_string(jobj2);
log_std(cd, " %s: %s\n", token, tmps);
LUKS2_token_dump(cd, j);
json_object_object_get_ex(val, "keyslots", &jobj2);
for (i = 0; i < (int) json_object_array_length(jobj2); i++) {
jobj3 = json_object_array_get_idx(jobj2, i);
log_std(cd, "\tKeyslot: %s\n", json_object_get_string(jobj3));
}
}
}
| 0
|
260,776
|
static int pad_pkcs2(bn_t m, int *p_len, int m_len, int k_len, int operation) {
uint8_t pad, h1[RLC_MD_LEN], h2[RLC_MD_LEN];
/* MSVC does not allow dynamic stack arrays */
uint8_t *mask = RLC_ALLOCA(uint8_t, k_len);
int result = RLC_ERR;
bn_t t;
bn_null(t);
RLC_TRY {
bn_new(t);
switch (operation) {
case RSA_ENC:
/* DB = lHash | PS | 01 | D. */
md_map(h1, NULL, 0);
bn_read_bin(m, h1, RLC_MD_LEN);
*p_len = k_len - 2 * RLC_MD_LEN - 2 - m_len;
bn_lsh(m, m, *p_len * 8);
bn_lsh(m, m, 8);
bn_add_dig(m, m, 0x01);
/* Make room for the real message. */
bn_lsh(m, m, m_len * 8);
result = RLC_OK;
break;
case RSA_ENC_FIN:
/* EB = 00 | maskedSeed | maskedDB. */
rand_bytes(h1, RLC_MD_LEN);
md_mgf(mask, k_len - RLC_MD_LEN - 1, h1, RLC_MD_LEN);
bn_read_bin(t, mask, k_len - RLC_MD_LEN - 1);
for (int i = 0; i < t->used; i++) {
m->dp[i] ^= t->dp[i];
}
bn_write_bin(mask, k_len - RLC_MD_LEN - 1, m);
md_mgf(h2, RLC_MD_LEN, mask, k_len - RLC_MD_LEN - 1);
for (int i = 0; i < RLC_MD_LEN; i++) {
h1[i] ^= h2[i];
}
bn_read_bin(t, h1, RLC_MD_LEN);
bn_lsh(t, t, 8 * (k_len - RLC_MD_LEN - 1));
bn_add(t, t, m);
bn_copy(m, t);
result = RLC_OK;
break;
case RSA_DEC:
m_len = k_len - 1;
bn_rsh(t, m, 8 * m_len);
if (bn_is_zero(t)) {
m_len -= RLC_MD_LEN;
bn_rsh(t, m, 8 * m_len);
bn_write_bin(h1, RLC_MD_LEN, t);
bn_mod_2b(m, m, 8 * m_len);
bn_write_bin(mask, m_len, m);
md_mgf(h2, RLC_MD_LEN, mask, m_len);
for (int i = 0; i < RLC_MD_LEN; i++) {
h1[i] ^= h2[i];
}
md_mgf(mask, k_len - RLC_MD_LEN - 1, h1, RLC_MD_LEN);
bn_read_bin(t, mask, k_len - RLC_MD_LEN - 1);
for (int i = 0; i < t->used; i++) {
m->dp[i] ^= t->dp[i];
}
m_len -= RLC_MD_LEN;
bn_rsh(t, m, 8 * m_len);
bn_write_bin(h2, RLC_MD_LEN, t);
md_map(h1, NULL, 0);
pad = 0;
for (int i = 0; i < RLC_MD_LEN; i++) {
pad |= h1[i] ^ h2[i];
}
bn_mod_2b(m, m, 8 * m_len);
*p_len = bn_size_bin(m);
(*p_len)--;
bn_rsh(t, m, *p_len * 8);
if (pad == 0 && bn_cmp_dig(t, 1) == RLC_EQ) {
result = RLC_OK;
}
bn_mod_2b(m, m, *p_len * 8);
*p_len = k_len - *p_len;
}
break;
case RSA_SIG:
case RSA_SIG_HASH:
/* M' = 00 00 00 00 00 00 00 00 | H(M). */
bn_zero(m);
bn_lsh(m, m, 64);
/* Make room for the real message. */
bn_lsh(m, m, RLC_MD_LEN * 8);
result = RLC_OK;
break;
case RSA_SIG_FIN:
memset(mask, 0, 8);
bn_write_bin(mask + 8, RLC_MD_LEN, m);
md_map(h1, mask, RLC_MD_LEN + 8);
bn_read_bin(m, h1, RLC_MD_LEN);
md_mgf(mask, k_len - RLC_MD_LEN - 1, h1, RLC_MD_LEN);
bn_read_bin(t, mask, k_len - RLC_MD_LEN - 1);
t->dp[0] ^= 0x01;
/* m_len is now the size in bits of the modulus. */
bn_lsh(t, t, 8 * RLC_MD_LEN);
bn_add(m, t, m);
bn_lsh(m, m, 8);
bn_add_dig(m, m, RSA_PSS);
for (int i = m_len - 1; i < 8 * k_len; i++) {
bn_set_bit(m, i, 0);
}
result = RLC_OK;
break;
case RSA_VER:
case RSA_VER_HASH:
bn_mod_2b(t, m, 8);
pad = (uint8_t)t->dp[0];
if (pad == RSA_PSS) {
int r = 1;
for (int i = m_len; i < 8 * k_len; i++) {
if (bn_get_bit(m, i) != 0) {
r = 0;
}
}
bn_rsh(m, m, 8);
bn_mod_2b(t, m, 8 * RLC_MD_LEN);
bn_write_bin(h2, RLC_MD_LEN, t);
bn_rsh(m, m, 8 * RLC_MD_LEN);
bn_write_bin(h1, RLC_MD_LEN, t);
md_mgf(mask, k_len - RLC_MD_LEN - 1, h1, RLC_MD_LEN);
bn_read_bin(t, mask, k_len - RLC_MD_LEN - 1);
for (int i = 0; i < t->used; i++) {
m->dp[i] ^= t->dp[i];
}
m->dp[0] ^= 0x01;
for (int i = m_len - 1; i < 8 * k_len; i++) {
bn_set_bit(m, i - ((RLC_MD_LEN + 1) * 8), 0);
}
if (r == 1 && bn_is_zero(m)) {
result = RLC_OK;
}
bn_read_bin(m, h2, RLC_MD_LEN);
*p_len = k_len - RLC_MD_LEN;
}
break;
}
}
RLC_CATCH_ANY {
result = RLC_ERR;
}
RLC_FINALLY {
bn_free(t);
}
RLC_FREE(mask);
return result;
}
| 0
|
102,828
|
int lzxd_set_reference_data(struct lzxd_stream *lzx,
struct mspack_system *system,
struct mspack_file *input,
unsigned int length)
{
if (!lzx) return MSPACK_ERR_ARGS;
if (!lzx->is_delta) {
D(("only LZX DELTA streams support reference data"))
return MSPACK_ERR_ARGS;
}
if (lzx->offset) {
D(("too late to set reference data after decoding starts"))
return MSPACK_ERR_ARGS;
}
if (length > lzx->window_size) {
D(("reference length (%u) is longer than the window", length))
return MSPACK_ERR_ARGS;
}
if (length > 0 && (!system || !input)) {
D(("length > 0 but no system or input"))
return MSPACK_ERR_ARGS;
}
lzx->ref_data_size = length;
if (length > 0) {
/* copy reference data */
unsigned char *pos = &lzx->window[lzx->window_size - length];
int bytes = system->read(input, pos, length);
/* length can't be more than 2^25, so no signedness problem */
if (bytes < (int)length) return MSPACK_ERR_READ;
}
lzx->ref_data_size = length;
return MSPACK_ERR_OK;
}
| 0
|
262,064
|
static CPU_CONST *const_by_value(CPU_MODEL *cpu, int type, ut32 v) {
CPU_CONST **clist, *citem;
for (clist = cpu->consts; *clist; clist++) {
for (citem = *clist; citem && citem->key; citem++) {
if (citem->value == (MASK (citem->size * 8) & v)
&& (type == CPU_CONST_NONE || type == citem->type)) {
return citem;
}
}
}
if (cpu->inherit_cpu_p)
return const_by_value (cpu->inherit_cpu_p, type, v);
return NULL;
}
| 0
|
13,031
|
DisplayItemListTest()
: m_displayItemList(DisplayItemList::create())
, m_originalSlimmingPaintSubsequenceCachingEnabled(RuntimeEnabledFeatures::slimmingPaintSubsequenceCachingEnabled()) { }
| 1
|
9,763
|
void AutofillPopupBaseView::AddExtraInitParams(
views::Widget::InitParams* params) {
params->opacity = views::Widget::InitParams::TRANSLUCENT_WINDOW;
params->shadow_type = views::Widget::InitParams::SHADOW_TYPE_NONE;
}
| 1
|
365,453
|
xmlIsNameChar(xmlParserCtxtPtr ctxt, int c) {
if ((ctxt->options & XML_PARSE_OLD10) == 0) {
/*
* Use the new checks of production [4] [4a] amd [5] of the
* Update 5 of XML-1.0
*/
if ((c != ' ') && (c != '>') && (c != '/') && /* accelerators */
(((c >= 'a') && (c <= 'z')) ||
((c >= 'A') && (c <= 'Z')) ||
((c >= '0') && (c <= '9')) || /* !start */
(c == '_') || (c == ':') ||
(c == '-') || (c == '.') || (c == 0xB7) || /* !start */
((c >= 0xC0) && (c <= 0xD6)) ||
((c >= 0xD8) && (c <= 0xF6)) ||
((c >= 0xF8) && (c <= 0x2FF)) ||
((c >= 0x300) && (c <= 0x36F)) || /* !start */
((c >= 0x370) && (c <= 0x37D)) ||
((c >= 0x37F) && (c <= 0x1FFF)) ||
((c >= 0x200C) && (c <= 0x200D)) ||
((c >= 0x203F) && (c <= 0x2040)) || /* !start */
((c >= 0x2070) && (c <= 0x218F)) ||
((c >= 0x2C00) && (c <= 0x2FEF)) ||
((c >= 0x3001) && (c <= 0xD7FF)) ||
((c >= 0xF900) && (c <= 0xFDCF)) ||
((c >= 0xFDF0) && (c <= 0xFFFD)) ||
((c >= 0x10000) && (c <= 0xEFFFF))))
return(1);
} else {
if ((IS_LETTER(c)) || (IS_DIGIT(c)) ||
(c == '.') || (c == '-') ||
(c == '_') || (c == ':') ||
(IS_COMBINING(c)) ||
(IS_EXTENDER(c)))
return(1);
}
return(0);
}
| 0
|
330,915
|
static int inject_error(BlockDriverState *bs, BlkdebugRule *rule)
{
BDRVBlkdebugState *s = bs->opaque;
int error = rule->options.inject.error;
bool immediately = rule->options.inject.immediately;
if (rule->options.inject.once) {
QSIMPLEQ_REMOVE(&s->active_rules, rule, BlkdebugRule, active_next);
remove_rule(rule);
}
if (!immediately) {
aio_bh_schedule_oneshot(bdrv_get_aio_context(bs), error_callback_bh,
qemu_coroutine_self());
qemu_coroutine_yield();
}
return -error;
}
| 1
|
320,765
|
void FUNC(ff_emulated_edge_mc)(uint8_t *buf, const uint8_t *src,
ptrdiff_t linesize_arg,
int block_w, int block_h,
int src_x, int src_y, int w, int h)
{
int x, y;
int start_y, start_x, end_y, end_x;
emuedge_linesize_type linesize = linesize_arg;
if (!w || !h)
return;
if (src_y >= h) {
src -= src_y * linesize;
src += (h - 1) * linesize;
src_y = h - 1;
} else if (src_y <= -block_h) {
src -= src_y * linesize;
src += (1 - block_h) * linesize;
src_y = 1 - block_h;
}
if (src_x >= w) {
src += (w - 1 - src_x) * sizeof(pixel);
src_x = w - 1;
} else if (src_x <= -block_w) {
src += (1 - block_w - src_x) * sizeof(pixel);
src_x = 1 - block_w;
}
start_y = FFMAX(0, -src_y);
start_x = FFMAX(0, -src_x);
end_y = FFMIN(block_h, h-src_y);
end_x = FFMIN(block_w, w-src_x);
av_assert2(start_y < end_y && block_h);
av_assert2(start_x < end_x && block_w);
w = end_x - start_x;
src += start_y * linesize + start_x * sizeof(pixel);
buf += start_x * sizeof(pixel);
// top
for (y = 0; y < start_y; y++) {
memcpy(buf, src, w * sizeof(pixel));
buf += linesize;
}
// copy existing part
for (; y < end_y; y++) {
memcpy(buf, src, w * sizeof(pixel));
src += linesize;
buf += linesize;
}
// bottom
src -= linesize;
for (; y < block_h; y++) {
memcpy(buf, src, w * sizeof(pixel));
buf += linesize;
}
buf -= block_h * linesize + start_x * sizeof(pixel);
while (block_h--) {
pixel *bufp = (pixel *) buf;
// left
for(x = 0; x < start_x; x++) {
bufp[x] = bufp[start_x];
}
// right
for (x = end_x; x < block_w; x++) {
bufp[x] = bufp[end_x - 1];
}
buf += linesize;
}
}
| 1
|
284,678
|
bool Document::SetFocusedElement(Element* new_focused_element,
const FocusParams& params) {
DCHECK(!lifecycle_.InDetach());
clear_focused_element_timer_.Stop();
if (new_focused_element && (new_focused_element->GetDocument() != this))
return true;
if (NodeChildRemovalTracker::IsBeingRemoved(new_focused_element))
return true;
if (focused_element_ == new_focused_element)
return true;
bool focus_change_blocked = false;
Element* old_focused_element = focused_element_;
focused_element_ = nullptr;
UpdateDistributionForFlatTreeTraversal();
Node* ancestor = (old_focused_element && old_focused_element->isConnected() &&
new_focused_element)
? FlatTreeTraversal::CommonAncestor(*old_focused_element,
*new_focused_element)
: nullptr;
if (old_focused_element) {
old_focused_element->SetFocused(false, params.type);
old_focused_element->SetHasFocusWithinUpToAncestor(false, ancestor);
if (GetPage() && (GetPage()->GetFocusController().IsFocused())) {
old_focused_element->DispatchBlurEvent(new_focused_element, params.type,
params.source_capabilities);
if (focused_element_) {
focus_change_blocked = true;
new_focused_element = nullptr;
}
old_focused_element->DispatchFocusOutEvent(EventTypeNames::focusout,
new_focused_element,
params.source_capabilities);
old_focused_element->DispatchFocusOutEvent(EventTypeNames::DOMFocusOut,
new_focused_element,
params.source_capabilities);
if (focused_element_) {
focus_change_blocked = true;
new_focused_element = nullptr;
}
}
}
if (new_focused_element)
UpdateStyleAndLayoutTreeForNode(new_focused_element);
if (new_focused_element && new_focused_element->IsFocusable()) {
if (IsRootEditableElement(*new_focused_element) &&
!AcceptsEditingFocus(*new_focused_element)) {
focus_change_blocked = true;
goto SetFocusedElementDone;
}
focused_element_ = new_focused_element;
SetSequentialFocusNavigationStartingPoint(focused_element_.Get());
if (params.type != kWebFocusTypeNone)
last_focus_type_ = params.type;
focused_element_->SetFocused(true, params.type);
focused_element_->SetHasFocusWithinUpToAncestor(true, ancestor);
if (focused_element_ != new_focused_element) {
focus_change_blocked = true;
goto SetFocusedElementDone;
}
CancelFocusAppearanceUpdate();
EnsurePaintLocationDataValidForNode(focused_element_);
if (focused_element_ != new_focused_element) {
focus_change_blocked = true;
goto SetFocusedElementDone;
}
focused_element_->UpdateFocusAppearanceWithOptions(
params.selection_behavior, params.options);
if (GetPage() && (GetPage()->GetFocusController().IsFocused())) {
focused_element_->DispatchFocusEvent(old_focused_element, params.type,
params.source_capabilities);
if (focused_element_ != new_focused_element) {
focus_change_blocked = true;
goto SetFocusedElementDone;
}
focused_element_->DispatchFocusInEvent(EventTypeNames::focusin,
old_focused_element, params.type,
params.source_capabilities);
if (focused_element_ != new_focused_element) {
focus_change_blocked = true;
goto SetFocusedElementDone;
}
focused_element_->DispatchFocusInEvent(EventTypeNames::DOMFocusIn,
old_focused_element, params.type,
params.source_capabilities);
if (focused_element_ != new_focused_element) {
focus_change_blocked = true;
goto SetFocusedElementDone;
}
}
}
if (!focus_change_blocked && focused_element_) {
if (AXObjectCache* cache = ExistingAXObjectCache()) {
cache->HandleFocusedUIElementChanged(old_focused_element,
new_focused_element);
}
}
if (!focus_change_blocked && GetPage()) {
GetPage()->GetChromeClient().FocusedNodeChanged(old_focused_element,
focused_element_.Get());
}
SetFocusedElementDone:
UpdateStyleAndLayoutTree();
if (LocalFrame* frame = GetFrame())
frame->Selection().DidChangeFocus();
return !focus_change_blocked;
}
| 0
|
143,966
|
static void SendATCommand(struct mp_port *mtpt)
{
// a t cr lf
unsigned char ch[] = {0x61,0x74,0x0d,0x0a,0x0};
unsigned char lineControl;
unsigned char i=0;
unsigned char Divisor = 0xc;
lineControl = serial_inp(mtpt,UART_LCR);
serial_outp(mtpt,UART_LCR,(lineControl | UART_LCR_DLAB));
serial_outp(mtpt,UART_DLL,(Divisor & 0xff));
serial_outp(mtpt,UART_DLM,(Divisor & 0xff00)>>8); //baudrate is 4800
serial_outp(mtpt,UART_LCR,lineControl);
serial_outp(mtpt,UART_LCR,0x03); // N-8-1
serial_outp(mtpt,UART_FCR,7);
serial_outp(mtpt,UART_MCR,0x3);
while(ch[i]){
while((serial_inp(mtpt,UART_LSR) & 0x60) !=0x60){
;
}
serial_outp(mtpt,0,ch[i++]);
}
}// end of SendATCommand()
| 0
|
88,132
|
static int handle_interrupt_window(struct kvm_vcpu *vcpu)
{
vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL,
CPU_BASED_VIRTUAL_INTR_PENDING);
kvm_make_request(KVM_REQ_EVENT, vcpu);
++vcpu->stat.irq_window_exits;
return 1;
}
| 0
|
176,046
|
void ShellWindowFrameView::Init(views::Widget* frame) {
frame_ = frame;
if (!is_frameless_) {
ui::ResourceBundle& rb = ui::ResourceBundle::GetSharedInstance();
close_button_ = new views::ImageButton(this);
close_button_->SetImage(views::CustomButton::BS_NORMAL,
rb.GetNativeImageNamed(IDR_CLOSE_BAR).ToImageSkia());
close_button_->SetImage(views::CustomButton::BS_HOT,
rb.GetNativeImageNamed(IDR_CLOSE_BAR_H).ToImageSkia());
close_button_->SetImage(views::CustomButton::BS_PUSHED,
rb.GetNativeImageNamed(IDR_CLOSE_BAR_P).ToImageSkia());
close_button_->SetAccessibleName(
l10n_util::GetStringUTF16(IDS_APP_ACCNAME_CLOSE));
AddChildView(close_button_);
}
#if defined(USE_ASH)
aura::Window* window = frame->GetNativeWindow();
int outside_bounds = ui::GetDisplayLayout() == ui::LAYOUT_TOUCH ?
kResizeOutsideBoundsSizeTouch :
kResizeOutsideBoundsSize;
window->set_hit_test_bounds_override_outer(
gfx::Insets(-outside_bounds, -outside_bounds,
-outside_bounds, -outside_bounds));
window->set_hit_test_bounds_override_inner(
gfx::Insets(kResizeInsideBoundsSize, kResizeInsideBoundsSize,
kResizeInsideBoundsSize, kResizeInsideBoundsSize));
#endif
}
| 0
|
252,865
|
static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
{
int handled = 0;
int n;
do {
n = min(len, 8);
if (!(vcpu->arch.apic &&
!kvm_iodevice_read(&vcpu->arch.apic->dev, addr, n, v))
&& kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, addr, n, v))
break;
trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, *(u64 *)v);
handled += n;
addr += n;
len -= n;
v += n;
} while (len);
return handled;
}
| 0
|
183,767
|
void LocalFrameClientImpl::DidFinishSameDocumentNavigation(
HistoryItem* item,
WebHistoryCommitType commit_type,
bool content_initiated) {
bool should_create_history_entry = commit_type == kWebStandardCommit;
web_frame_->ViewImpl()->DidCommitLoad(should_create_history_entry, true);
if (web_frame_->Client()) {
web_frame_->Client()->DidFinishSameDocumentNavigation(
WebHistoryItem(item), commit_type, content_initiated);
}
virtual_time_pauser_.UnpauseVirtualTime();
}
| 0
|
182,905
|
void free_xbzrle_decoded_buf(void)
{
g_free(xbzrle_decoded_buf);
xbzrle_decoded_buf = NULL;
}
| 0
|
466,283
|
TEST_F(HttpConnectionManagerImplTest, Http10Rejected) {
setup(false, "");
EXPECT_CALL(*codec_, protocol()).Times(AnyNumber()).WillRepeatedly(Return(Protocol::Http10));
EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status {
decoder_ = &conn_manager_->newStream(response_encoder_);
RequestHeaderMapPtr headers{
new TestRequestHeaderMapImpl{{":authority", "host"}, {":method", "GET"}, {":path", "/"}}};
decoder_->decodeHeaders(std::move(headers), true);
data.drain(4);
return Http::okStatus();
}));
EXPECT_CALL(response_encoder_, encodeHeaders(_, true))
.WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void {
EXPECT_EQ("426", headers.getStatusValue());
EXPECT_EQ("close", headers.getConnectionValue());
}));
Buffer::OwnedImpl fake_input("1234");
conn_manager_->onData(fake_input, false);
}
| 0
|
465,464
|
static void kvm_mmu_notifier_invalidate_range(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long start, unsigned long end)
{
struct kvm *kvm = mmu_notifier_to_kvm(mn);
int idx;
idx = srcu_read_lock(&kvm->srcu);
kvm_arch_mmu_notifier_invalidate_range(kvm, start, end);
srcu_read_unlock(&kvm->srcu, idx);
}
| 0
|
58,396
|
cmsBool Type_UcrBg_Write(struct _cms_typehandler_struct* self, cmsIOHANDLER* io, void* Ptr, cmsUInt32Number nItems)
{
cmsUcrBg* Value = (cmsUcrBg*) Ptr;
cmsUInt32Number TextSize;
char* Text;
// First curve is Under color removal
if (!_cmsWriteUInt32Number(io, Value ->Ucr ->nEntries)) return FALSE;
if (!_cmsWriteUInt16Array(io, Value ->Ucr ->nEntries, Value ->Ucr ->Table16)) return FALSE;
// Then black generation
if (!_cmsWriteUInt32Number(io, Value ->Bg ->nEntries)) return FALSE;
if (!_cmsWriteUInt16Array(io, Value ->Bg ->nEntries, Value ->Bg ->Table16)) return FALSE;
// Now comes the text. The length is specified by the tag size
TextSize = cmsMLUgetASCII(Value ->Desc, cmsNoLanguage, cmsNoCountry, NULL, 0);
Text = (char*) _cmsMalloc(self ->ContextID, TextSize);
if (cmsMLUgetASCII(Value ->Desc, cmsNoLanguage, cmsNoCountry, Text, TextSize) != TextSize) return FALSE;
if (!io ->Write(io, TextSize, Text)) return FALSE;
_cmsFree(self ->ContextID, Text);
return TRUE;
cmsUNUSED_PARAMETER(nItems);
}
| 0
|
373,378
|
static const char *plug_to_string(test_plug_t plug)
{
switch (plug) {
case PLUG_NONE:
return "open, ";
case PLUG_RESET:
return "closed, ";
case PLUG_TIMEOUT:
return "timeout,";
default:
return "unknown,";
}
}
| 0
|
60,043
|
skip_for_lines(void *fi_void, evalarg_T *evalarg)
{
forinfo_T *fi = (forinfo_T *)fi_void;
int i;
for (i = 0; i < fi->fi_break_count; ++i)
eval_next_line(evalarg);
}
| 0
|
485,307
|
bgp_write (struct thread *thread)
{
struct peer *peer;
u_char type;
struct stream *s;
int num;
unsigned int count = 0;
int write_errno;
/* Yes first of all get peer pointer. */
peer = THREAD_ARG (thread);
peer->t_write = NULL;
/* For non-blocking IO check. */
if (peer->status == Connect)
{
bgp_connect_check (peer);
return 0;
}
/* Nonblocking write until TCP output buffer is full. */
while (1)
{
int writenum;
int val;
s = bgp_write_packet (peer);
if (! s)
return 0;
/* XXX: FIXME, the socket should be NONBLOCK from the start
* status shouldnt need to be toggled on each write
*/
val = fcntl (peer->fd, F_GETFL, 0);
fcntl (peer->fd, F_SETFL, val|O_NONBLOCK);
/* Number of bytes to be sent. */
writenum = stream_get_endp (s) - stream_get_getp (s);
/* Call write() system call. */
num = write (peer->fd, STREAM_PNT (s), writenum);
write_errno = errno;
fcntl (peer->fd, F_SETFL, val);
if (num <= 0)
{
/* Partial write. */
if (write_errno == EWOULDBLOCK || write_errno == EAGAIN)
break;
BGP_EVENT_ADD (peer, TCP_fatal_error);
return 0;
}
if (num != writenum)
{
stream_forward_getp (s, num);
if (write_errno == EAGAIN)
break;
continue;
}
/* Retrieve BGP packet type. */
stream_set_getp (s, BGP_MARKER_SIZE + 2);
type = stream_getc (s);
switch (type)
{
case BGP_MSG_OPEN:
peer->open_out++;
break;
case BGP_MSG_UPDATE:
peer->update_out++;
break;
case BGP_MSG_NOTIFY:
peer->notify_out++;
/* Double start timer. */
peer->v_start *= 2;
/* Overflow check. */
if (peer->v_start >= (60 * 2))
peer->v_start = (60 * 2);
/* Flush any existing events */
BGP_EVENT_ADD (peer, BGP_Stop);
return 0;
case BGP_MSG_KEEPALIVE:
peer->keepalive_out++;
break;
case BGP_MSG_ROUTE_REFRESH_NEW:
case BGP_MSG_ROUTE_REFRESH_OLD:
peer->refresh_out++;
break;
case BGP_MSG_CAPABILITY:
peer->dynamic_cap_out++;
break;
}
/* OK we send packet so delete it. */
bgp_packet_delete (peer);
if (++count >= BGP_WRITE_PACKET_MAX)
break;
}
if (bgp_write_proceed (peer))
BGP_WRITE_ON (peer->t_write, bgp_write, peer->fd);
return 0;
}
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.