idx
int64 | func
string | target
int64 |
|---|---|---|
246,746
|
static u32 do_write_udp()
{
GF_Err e;
GF_Socket *sock = gf_sk_new(GF_SOCK_TYPE_UDP);
u16 port = 2345;
char *sep = strrchr(udp_dest, ':');
if (sep) {
sep[0] = 0;
port = atoi(sep+1);
}
e = gf_sk_bind( sock, "127.0.0.1", 0, udp_dest, port, 0);
if (sep) sep[0] = ':';
if (e) {
M4_LOG(GF_LOG_ERROR, ("Failed to bind socket to %s: %s\n", udp_dest, gf_error_to_string(e) ));
} else {
e = gf_sk_send(sock, (u8 *) inName, (u32)strlen(inName));
if (e)
M4_LOG(GF_LOG_ERROR, ("Failed to send datagram: %s\n", gf_error_to_string(e) ));
}
gf_sk_del(sock);
return 0;
}
| 0
|
353,008
|
hashIter(
HASH_CONTEXT *HASHcontext,
unsigned char *HASHdigest,
unsigned char *value,
int len)
{
HASH_CONTEXT ctx = *HASHcontext;
HASH_Update( &ctx, value, len );
HASH_Final( HASHdigest, &ctx );
}
| 0
|
317,192
|
static int selinux_file_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
const struct cred *cred = current_cred();
int error = 0;
switch (cmd) {
case FIONREAD:
case FIBMAP:
case FIGETBSZ:
case FS_IOC_GETFLAGS:
case FS_IOC_GETVERSION:
error = file_has_perm(cred, file, FILE__GETATTR);
break;
case FS_IOC_SETFLAGS:
case FS_IOC_SETVERSION:
error = file_has_perm(cred, file, FILE__SETATTR);
break;
/* sys_ioctl() checks */
case FIONBIO:
case FIOASYNC:
error = file_has_perm(cred, file, 0);
break;
case KDSKBENT:
case KDSKBSENT:
error = cred_has_capability(cred, CAP_SYS_TTY_CONFIG,
CAP_OPT_NONE, true);
break;
/* default case assumes that the command will go
* to the file's ioctl() function.
*/
default:
error = ioctl_has_perm(cred, file, FILE__IOCTL, (u16) cmd);
}
return error;
}
| 0
|
359,311
|
DEFUN (no_bgp_redistribute_ipv4_metric,
no_bgp_redistribute_ipv4_metric_cmd,
"no redistribute (connected|kernel|ospf|rip|static) metric <0-4294967295>",
NO_STR
"Redistribute information from another routing protocol\n"
"Connected\n"
"Kernel routes\n"
"Open Shurtest Path First (OSPF)\n"
"Routing Information Protocol (RIP)\n"
"Static routes\n"
"Metric for redistributed routes\n"
"Default metric\n")
{
int type;
type = bgp_str2route_type (AFI_IP, argv[0]);
if (! type)
{
vty_out (vty, "%% Invalid route type%s", VTY_NEWLINE);
return CMD_WARNING;
}
bgp_redistribute_metric_unset (vty->index, AFI_IP, type);
return CMD_SUCCESS;
}
| 0
|
197,057
|
int HttpFileImpl::save(const std::string &path) const
{
assert(!path.empty());
if (fileName_.empty())
return -1;
filesystem::path fsPath(utils::toNativePath(path));
if (!fsPath.is_absolute() &&
(!fsPath.has_parent_path() ||
(fsPath.begin()->string() != "." && fsPath.begin()->string() != "..")))
{
filesystem::path fsUploadPath(utils::toNativePath(
HttpAppFrameworkImpl::instance().getUploadPath()));
fsPath = fsUploadPath / fsPath;
}
filesystem::path fsFileName(utils::toNativePath(fileName_));
if (!filesystem::exists(fsPath))
{
LOG_TRACE << "create path:" << fsPath;
drogon::error_code err;
filesystem::create_directories(fsPath, err);
if (err)
{
LOG_SYSERR;
return -1;
}
}
return saveTo(fsPath / fsFileName);
}
| 1
|
463,080
|
static void sungem_mmio_greg_write(void *opaque, hwaddr addr, uint64_t val,
unsigned size)
{
SunGEMState *s = opaque;
if (!(addr < 0x20) && !(addr >= 0x1000 && addr <= 0x1010)) {
qemu_log_mask(LOG_GUEST_ERROR,
"Write to unknown GREG register 0x%"HWADDR_PRIx"\n",
addr);
return;
}
trace_sungem_mmio_greg_write(addr, val);
/* Pre-write filter */
switch (addr) {
/* Read only registers */
case GREG_SEBSTATE:
case GREG_STAT:
case GREG_STAT2:
case GREG_PCIESTAT:
return; /* No actual write */
case GREG_IACK:
val &= GREG_STAT_LATCH;
s->gregs[GREG_STAT >> 2] &= ~val;
sungem_eval_irq(s);
return; /* No actual write */
case GREG_PCIEMASK:
val &= 0x7;
break;
}
s->gregs[addr >> 2] = val;
/* Post write action */
switch (addr) {
case GREG_IMASK:
/* Re-evaluate interrupt */
sungem_eval_irq(s);
break;
case GREG_SWRST:
switch (val & (GREG_SWRST_TXRST | GREG_SWRST_RXRST)) {
case GREG_SWRST_RXRST:
sungem_reset_rx(s);
break;
case GREG_SWRST_TXRST:
sungem_reset_tx(s);
break;
case GREG_SWRST_RXRST | GREG_SWRST_TXRST:
sungem_reset_all(s, false);
}
break;
}
}
| 0
|
444,909
|
mtab_unusable(void)
{
struct stat mstat;
if(lstat(_PATH_MOUNTED, &mstat))
return errno;
else if (S_ISLNK(mstat.st_mode))
return EMLINK;
return 0;
}
| 0
|
180,233
|
v8::Handle<v8::Value> V8ThrowException::createReferenceError(v8::Isolate* isolate, const String& message)
{
return v8::Exception::ReferenceError(v8String(isolate, message.isNull() ? "Reference error" : message));
}
| 0
|
430,416
|
int ovs_nla_put_mask(const struct sw_flow *flow, struct sk_buff *skb)
{
return ovs_nla_put_key(&flow->key, &flow->mask->key,
OVS_FLOW_ATTR_MASK, true, skb);
}
| 0
|
234,868
|
static struct btrfs_fs_devices *find_fsid_with_metadata_uuid(
struct btrfs_super_block *disk_super)
{
struct btrfs_fs_devices *fs_devices;
/*
* Handle scanned device having completed its fsid change but
* belonging to a fs_devices that was created by first scanning
* a device which didn't have its fsid/metadata_uuid changed
* at all and the CHANGING_FSID_V2 flag set.
*/
list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
if (fs_devices->fsid_change &&
memcmp(disk_super->metadata_uuid, fs_devices->fsid,
BTRFS_FSID_SIZE) == 0 &&
memcmp(fs_devices->fsid, fs_devices->metadata_uuid,
BTRFS_FSID_SIZE) == 0) {
return fs_devices;
}
}
/*
* Handle scanned device having completed its fsid change but
* belonging to a fs_devices that was created by a device that
* has an outdated pair of fsid/metadata_uuid and
* CHANGING_FSID_V2 flag set.
*/
list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
if (fs_devices->fsid_change &&
memcmp(fs_devices->metadata_uuid,
fs_devices->fsid, BTRFS_FSID_SIZE) != 0 &&
memcmp(disk_super->metadata_uuid, fs_devices->metadata_uuid,
BTRFS_FSID_SIZE) == 0) {
return fs_devices;
}
}
return find_fsid(disk_super->fsid, disk_super->metadata_uuid);
}
| 0
|
261,427
|
int check_CTB_available(const de265_image* img,
int xC,int yC, int xN,int yN)
{
// check whether neighbor is outside of frame
if (xN < 0 || yN < 0) { return 0; }
if (xN >= img->get_sps().pic_width_in_luma_samples) { return 0; }
if (yN >= img->get_sps().pic_height_in_luma_samples) { return 0; }
int current_ctbAddrRS = luma_pos_to_ctbAddrRS(&img->get_sps(), xC,yC);
int neighbor_ctbAddrRS = luma_pos_to_ctbAddrRS(&img->get_sps(), xN,yN);
// TODO: check if this is correct (6.4.1)
if (img->get_SliceAddrRS_atCtbRS(current_ctbAddrRS) !=
img->get_SliceAddrRS_atCtbRS(neighbor_ctbAddrRS)) {
return 0;
}
// check if both CTBs are in the same tile.
if (img->get_pps().TileIdRS[current_ctbAddrRS] !=
img->get_pps().TileIdRS[neighbor_ctbAddrRS]) {
return 0;
}
return 1;
}
| 0
|
359,318
|
DEFUN (neighbor_timers,
neighbor_timers_cmd,
NEIGHBOR_CMD2 "timers <0-65535> <0-65535>",
NEIGHBOR_STR
NEIGHBOR_ADDR_STR2
"BGP per neighbor timers\n"
"Keepalive interval\n"
"Holdtime\n")
{
return peer_timers_set_vty (vty, argv[0], argv[1], argv[2]);
}
| 0
|
224,542
|
Status DatasetIteratorShape(shape_inference::InferenceContext* c) {
shape_inference::ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 0, &unused));
std::vector<PartialTensorShape> output_shapes;
TF_RETURN_IF_ERROR(c->GetAttr("output_shapes", &output_shapes));
const int output_shapes_size = output_shapes.size();
if (output_shapes_size != c->num_outputs()) {
return errors::InvalidArgument(
"`output_shapes` must be the same length as `output_types` (",
output_shapes.size(), " vs. ", c->num_outputs());
}
for (size_t i = 0; i < output_shapes.size(); ++i) {
shape_inference::ShapeHandle output_shape_handle;
TF_RETURN_IF_ERROR(c->MakeShapeFromPartialTensorShape(
output_shapes[i], &output_shape_handle));
c->set_output(static_cast<int>(i), output_shape_handle);
}
return Status::OK();
}
| 0
|
468,354
|
g_socket_client_set_tls_validation_flags (GSocketClient *client,
GTlsCertificateFlags flags)
{
if (client->priv->tls_validation_flags != flags)
{
client->priv->tls_validation_flags = flags;
g_object_notify (G_OBJECT (client), "tls-validation-flags");
}
}
| 0
|
318,097
|
static int usb_ulp_read_write(struct rsi_hw *adapter, u16 addr, u32 data,
u16 len_in_bits)
{
int ret;
ret = rsi_usb_master_reg_write
(adapter, RSI_GSPI_DATA_REG1,
((addr << 6) | ((data >> 16) & 0xffff)), 2);
if (ret < 0)
return ret;
ret = rsi_usb_master_reg_write(adapter, RSI_GSPI_DATA_REG0,
(data & 0xffff), 2);
if (ret < 0)
return ret;
/* Initializing GSPI for ULP read/writes */
rsi_usb_master_reg_write(adapter, RSI_GSPI_CTRL_REG0,
RSI_GSPI_CTRL_REG0_VALUE, 2);
ret = rsi_usb_master_reg_write(adapter, RSI_GSPI_CTRL_REG1,
((len_in_bits - 1) | RSI_GSPI_TRIG), 2);
if (ret < 0)
return ret;
msleep(20);
return 0;
}
| 0
|
369,284
|
static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
if (sqe->ioprio || sqe->buf_index || sqe->off || sqe->splice_fd_in)
return -EINVAL;
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
req->madvise.addr = READ_ONCE(sqe->addr);
req->madvise.len = READ_ONCE(sqe->len);
req->madvise.advice = READ_ONCE(sqe->fadvise_advice);
return 0;
#else
return -EOPNOTSUPP;
#endif
}
| 0
|
256,402
|
static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
gfp_t gfp_mask)
{
unsigned int max_sectors = queue_max_hw_sectors(rq->q);
struct bio *bio;
int ret;
int j;
if (!iov_iter_count(iter))
return -EINVAL;
bio = bio_kmalloc(gfp_mask, iov_iter_npages(iter, BIO_MAX_VECS));
if (!bio)
return -ENOMEM;
bio->bi_opf |= req_op(rq);
while (iov_iter_count(iter)) {
struct page **pages;
ssize_t bytes;
size_t offs, added = 0;
int npages;
bytes = iov_iter_get_pages_alloc(iter, &pages, LONG_MAX, &offs);
if (unlikely(bytes <= 0)) {
ret = bytes ? bytes : -EFAULT;
goto out_unmap;
}
npages = DIV_ROUND_UP(offs + bytes, PAGE_SIZE);
if (unlikely(offs & queue_dma_alignment(rq->q))) {
ret = -EINVAL;
j = 0;
} else {
for (j = 0; j < npages; j++) {
struct page *page = pages[j];
unsigned int n = PAGE_SIZE - offs;
bool same_page = false;
if (n > bytes)
n = bytes;
if (!bio_add_hw_page(rq->q, bio, page, n, offs,
max_sectors, &same_page)) {
if (same_page)
put_page(page);
break;
}
added += n;
bytes -= n;
offs = 0;
}
iov_iter_advance(iter, added);
}
/*
* release the pages we didn't map into the bio, if any
*/
while (j < npages)
put_page(pages[j++]);
kvfree(pages);
/* couldn't stuff something into bio? */
if (bytes)
break;
}
ret = blk_rq_append_bio(rq, bio);
if (ret)
goto out_unmap;
return 0;
out_unmap:
bio_release_pages(bio, false);
bio_put(bio);
return ret;
}
| 0
|
366,219
|
static void *copy_mount_options(const void __user * data)
{
char *copy;
unsigned left, offset;
if (!data)
return NULL;
copy = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!copy)
return ERR_PTR(-ENOMEM);
left = copy_from_user(copy, data, PAGE_SIZE);
/*
* Not all architectures have an exact copy_from_user(). Resort to
* byte at a time.
*/
offset = PAGE_SIZE - left;
while (left) {
char c;
if (get_user(c, (const char __user *)data + offset))
break;
copy[offset] = c;
left--;
offset++;
}
if (left == PAGE_SIZE) {
kfree(copy);
return ERR_PTR(-EFAULT);
}
return copy;
}
| 0
|
425,259
|
static Image *ReadMETAImage(const ImageInfo *image_info,
ExceptionInfo *exception)
{
Image
*buff,
*image;
MagickBooleanType
status;
StringInfo
*profile;
size_t
length;
void
*blob;
/*
Open file containing binary metadata
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImage(image_info);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
image->columns=1;
image->rows=1;
if (SetImageBackgroundColor(image) == MagickFalse)
{
InheritException(exception,&image->exception);
image=DestroyImageList(image);
return((Image *) NULL);
}
length=1;
if (LocaleNCompare(image_info->magick,"8BIM",4) == 0)
{
/*
Read 8BIM binary metadata.
*/
buff=AcquireImage((ImageInfo *) NULL);
if (buff == (Image *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
blob=(unsigned char *) AcquireQuantumMemory(length,sizeof(unsigned char));
if (blob == (unsigned char *) NULL)
{
buff=DestroyImage(buff);
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
}
AttachBlob(buff->blob,blob,length);
if (LocaleCompare(image_info->magick,"8BIMTEXT") == 0)
{
length=(size_t) parse8BIM(image, buff);
if (length & 1)
(void) WriteBlobByte(buff,0x0);
}
else if (LocaleCompare(image_info->magick,"8BIMWTEXT") == 0)
{
length=(size_t) parse8BIMW(image, buff);
if (length & 1)
(void) WriteBlobByte(buff,0x0);
}
else
CopyBlob(image,buff);
profile=BlobToStringInfo(GetBlobStreamData(buff),(size_t)
GetBlobSize(buff));
if (profile == (StringInfo *) NULL)
{
blob=DetachBlob(buff->blob);
blob=(unsigned char *) RelinquishMagickMemory(blob);
buff=DestroyImage(buff);
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
}
status=SetImageProfile(image,"8bim",profile);
profile=DestroyStringInfo(profile);
blob=DetachBlob(buff->blob);
blob=(unsigned char *) RelinquishMagickMemory(blob);
buff=DestroyImage(buff);
if (status == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
}
if (LocaleNCompare(image_info->magick,"APP1",4) == 0)
{
char
name[MaxTextExtent];
(void) FormatLocaleString(name,MaxTextExtent,"APP%d",1);
buff=AcquireImage((ImageInfo *) NULL);
if (buff == (Image *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
blob=(unsigned char *) AcquireQuantumMemory(length,sizeof(unsigned char));
if (blob == (unsigned char *) NULL)
{
buff=DestroyImage(buff);
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
}
AttachBlob(buff->blob,blob,length);
if (LocaleCompare(image_info->magick,"APP1JPEG") == 0)
{
Image
*iptc;
int
result;
if (image_info->profile == (void *) NULL)
{
blob=DetachBlob(buff->blob);
blob=(unsigned char *) RelinquishMagickMemory(blob);
buff=DestroyImage(buff);
ThrowReaderException(CoderError,"NoIPTCProfileAvailable");
}
profile=CloneStringInfo((StringInfo *) image_info->profile);
iptc=AcquireImage((ImageInfo *) NULL);
if (iptc == (Image *) NULL)
{
blob=DetachBlob(buff->blob);
blob=(unsigned char *) RelinquishMagickMemory(blob);
buff=DestroyImage(buff);
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
}
AttachBlob(iptc->blob,GetStringInfoDatum(profile),
GetStringInfoLength(profile));
result=jpeg_embed(image,buff,iptc);
blob=DetachBlob(iptc->blob);
blob=(unsigned char *) RelinquishMagickMemory(blob);
iptc=DestroyImage(iptc);
if (result == 0)
ThrowReaderException(CoderError,"JPEGEmbeddingFailed");
}
else
CopyBlob(image,buff);
profile=BlobToStringInfo(GetBlobStreamData(buff),(size_t)
GetBlobSize(buff));
if (profile == (StringInfo *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
status=SetImageProfile(image,name,profile);
profile=DestroyStringInfo(profile);
blob=DetachBlob(buff->blob);
blob=(unsigned char *) RelinquishMagickMemory(blob);
buff=DestroyImage(buff);
if (status == MagickFalse)
{
buff=DestroyImage(buff);
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
}
}
if ((LocaleCompare(image_info->magick,"ICC") == 0) ||
(LocaleCompare(image_info->magick,"ICM") == 0))
{
buff=AcquireImage((ImageInfo *) NULL);
if (buff == (Image *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
blob=(unsigned char *) AcquireQuantumMemory(length,sizeof(unsigned char));
if (blob == (unsigned char *) NULL)
{
buff=DestroyImage(buff);
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
}
AttachBlob(buff->blob,blob,length);
CopyBlob(image,buff);
profile=BlobToStringInfo(GetBlobStreamData(buff),(size_t)
GetBlobSize(buff));
if (profile == (StringInfo *) NULL)
{
blob=DetachBlob(buff->blob);
blob=(unsigned char *) RelinquishMagickMemory(blob);
buff=DestroyImage(buff);
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
}
(void) SetImageProfile(image,"icc",profile);
profile=DestroyStringInfo(profile);
blob=DetachBlob(buff->blob);
blob=(unsigned char *) RelinquishMagickMemory(blob);
buff=DestroyImage(buff);
}
if (LocaleCompare(image_info->magick,"IPTC") == 0)
{
buff=AcquireImage((ImageInfo *) NULL);
if (buff == (Image *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
blob=(unsigned char *) AcquireQuantumMemory(length,sizeof(unsigned char));
if (blob == (unsigned char *) NULL)
{
buff=DestroyImage(buff);
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
}
AttachBlob(buff->blob,blob,length);
CopyBlob(image,buff);
profile=BlobToStringInfo(GetBlobStreamData(buff),(size_t)
GetBlobSize(buff));
if (profile == (StringInfo *) NULL)
{
blob=DetachBlob(buff->blob);
blob=(unsigned char *) RelinquishMagickMemory(blob);
buff=DestroyImage(buff);
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
}
(void) SetImageProfile(image,"iptc",profile);
profile=DestroyStringInfo(profile);
blob=DetachBlob(buff->blob);
blob=(unsigned char *) RelinquishMagickMemory(blob);
buff=DestroyImage(buff);
}
if (LocaleCompare(image_info->magick,"XMP") == 0)
{
buff=AcquireImage((ImageInfo *) NULL);
if (buff == (Image *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
blob=(unsigned char *) AcquireQuantumMemory(length,sizeof(unsigned char));
if (blob == (unsigned char *) NULL)
{
buff=DestroyImage(buff);
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
}
AttachBlob(buff->blob,blob,length);
CopyBlob(image,buff);
profile=BlobToStringInfo(GetBlobStreamData(buff),(size_t)
GetBlobSize(buff));
if (profile == (StringInfo *) NULL)
{
blob=DetachBlob(buff->blob);
blob=(unsigned char *) RelinquishMagickMemory(blob);
buff=DestroyImage(buff);
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
}
(void) SetImageProfile(image,"xmp",profile);
profile=DestroyStringInfo(profile);
blob=DetachBlob(buff->blob);
blob=(unsigned char *) RelinquishMagickMemory(blob);
buff=DestroyImage(buff);
}
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
| 0
|
508,855
|
void LEX::restore_backup_query_tables_list(Query_tables_list *backup)
{
this->destroy_query_tables_list();
this->set_query_tables_list(backup);
}
| 0
|
263,301
|
void test_urldecode(const char *v1, const char *v2)
{
char *v = strdup(v1);
_q_urldecode(v);
ASSERT_EQUAL_STR(v, v2);
free(v);
}
| 0
|
313,859
|
v_swap_corners(int cmdchar)
{
pos_T old_cursor;
colnr_T left, right;
if (cmdchar == 'O' && VIsual_mode == Ctrl_V)
{
old_cursor = curwin->w_cursor;
getvcols(curwin, &old_cursor, &VIsual, &left, &right);
curwin->w_cursor.lnum = VIsual.lnum;
coladvance(left);
VIsual = curwin->w_cursor;
curwin->w_cursor.lnum = old_cursor.lnum;
curwin->w_curswant = right;
// 'selection "exclusive" and cursor at right-bottom corner: move it
// right one column
if (old_cursor.lnum >= VIsual.lnum && *p_sel == 'e')
++curwin->w_curswant;
coladvance(curwin->w_curswant);
if (curwin->w_cursor.col == old_cursor.col
&& (!virtual_active()
|| curwin->w_cursor.coladd == old_cursor.coladd))
{
curwin->w_cursor.lnum = VIsual.lnum;
if (old_cursor.lnum <= VIsual.lnum && *p_sel == 'e')
++right;
coladvance(right);
VIsual = curwin->w_cursor;
curwin->w_cursor.lnum = old_cursor.lnum;
coladvance(left);
curwin->w_curswant = left;
}
}
else
{
old_cursor = curwin->w_cursor;
curwin->w_cursor = VIsual;
VIsual = old_cursor;
curwin->w_set_curswant = TRUE;
}
}
| 0
|
413,692
|
R_API RAnalOp* r_core_anal_op(RCore *core, ut64 addr, int mask) {
int len;
ut8 buf[32];
ut8 *ptr;
r_return_val_if_fail (core, NULL);
if (addr == UT64_MAX) {
return NULL;
}
RAnalOp *op = R_NEW0 (RAnalOp);
if (!op) {
return NULL;
}
int delta = (addr - core->offset);
int minopsz = 8;
if (delta > 0 && delta + minopsz < core->blocksize && addr >= core->offset && addr + 16 < core->offset + core->blocksize) {
ptr = core->block + delta;
len = core->blocksize - delta;
if (len < 1) {
goto err_op;
}
} else {
if (!r_io_read_at (core->io, addr, buf, sizeof (buf))) {
goto err_op;
}
ptr = buf;
len = sizeof (buf);
}
if (r_anal_op (core->anal, op, addr, ptr, len, mask) < 1) {
goto err_op;
}
// TODO This code block must be deleted when all the anal plugs support disasm
if (!op->mnemonic && mask & R_ANAL_OP_MASK_DISASM) {
RAsmOp asmop;
if (core->anal->verbose) {
eprintf ("Warning: Implement RAnalOp.MASK_DISASM for current anal.arch. Using the sluggish RAsmOp fallback for now.\n");
}
r_asm_set_pc (core->rasm, addr);
r_asm_op_init (&asmop);
if (r_asm_disassemble (core->rasm, &asmop, ptr, len) > 0) {
op->mnemonic = strdup (r_strbuf_get (&asmop.buf_asm));
}
r_asm_op_fini (&asmop);
}
return op;
err_op:
free (op);
return NULL;
}
| 0
|
462,099
|
static MagickBooleanType WriteGIFImage(const ImageInfo *image_info,Image *image,
ExceptionInfo *exception)
{
int
c;
ImageInfo
*write_info;
MagickBooleanType
status;
MagickOffsetType
scene;
RectangleInfo
page;
register ssize_t
i;
register unsigned char
*q;
size_t
bits_per_pixel,
delay,
imageListLength,
length,
one;
ssize_t
j,
opacity;
unsigned char
*colormap,
*global_colormap;
/*
Open output image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception);
if (status == MagickFalse)
return(status);
/*
Allocate colormap.
*/
global_colormap=(unsigned char *) AcquireQuantumMemory(768UL,
sizeof(*global_colormap));
colormap=(unsigned char *) AcquireQuantumMemory(768UL,sizeof(*colormap));
if ((global_colormap == (unsigned char *) NULL) ||
(colormap == (unsigned char *) NULL))
{
if (global_colormap != (unsigned char *) NULL)
global_colormap=(unsigned char *) RelinquishMagickMemory(
global_colormap);
if (colormap != (unsigned char *) NULL)
colormap=(unsigned char *) RelinquishMagickMemory(colormap);
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
}
for (i=0; i < 768; i++)
colormap[i]=(unsigned char) 0;
/*
Write GIF header.
*/
write_info=CloneImageInfo(image_info);
if (LocaleCompare(write_info->magick,"GIF87") != 0)
(void) WriteBlob(image,6,(unsigned char *) "GIF89a");
else
{
(void) WriteBlob(image,6,(unsigned char *) "GIF87a");
write_info->adjoin=MagickFalse;
}
/*
Determine image bounding box.
*/
page.width=image->columns;
if (image->page.width > page.width)
page.width=image->page.width;
page.height=image->rows;
if (image->page.height > page.height)
page.height=image->page.height;
page.x=image->page.x;
page.y=image->page.y;
(void) WriteBlobLSBShort(image,(unsigned short) page.width);
(void) WriteBlobLSBShort(image,(unsigned short) page.height);
/*
Write images to file.
*/
if ((write_info->adjoin != MagickFalse) &&
(GetNextImageInList(image) != (Image *) NULL))
write_info->interlace=NoInterlace;
scene=0;
one=1;
imageListLength=GetImageListLength(image);
do
{
(void) TransformImageColorspace(image,sRGBColorspace,exception);
opacity=(-1);
if (IsImageOpaque(image,exception) != MagickFalse)
{
if ((image->storage_class == DirectClass) || (image->colors > 256))
(void) SetImageType(image,PaletteType,exception);
}
else
{
double
alpha,
beta;
/*
Identify transparent colormap index.
*/
if ((image->storage_class == DirectClass) || (image->colors > 256))
(void) SetImageType(image,PaletteBilevelAlphaType,exception);
for (i=0; i < (ssize_t) image->colors; i++)
if (image->colormap[i].alpha != OpaqueAlpha)
{
if (opacity < 0)
{
opacity=i;
continue;
}
alpha=fabs(image->colormap[i].alpha-TransparentAlpha);
beta=fabs(image->colormap[opacity].alpha-TransparentAlpha);
if (alpha < beta)
opacity=i;
}
if (opacity == -1)
{
(void) SetImageType(image,PaletteBilevelAlphaType,exception);
for (i=0; i < (ssize_t) image->colors; i++)
if (image->colormap[i].alpha != OpaqueAlpha)
{
if (opacity < 0)
{
opacity=i;
continue;
}
alpha=fabs(image->colormap[i].alpha-TransparentAlpha);
beta=fabs(image->colormap[opacity].alpha-TransparentAlpha);
if (alpha < beta)
opacity=i;
}
}
if (opacity >= 0)
{
image->colormap[opacity].red=image->transparent_color.red;
image->colormap[opacity].green=image->transparent_color.green;
image->colormap[opacity].blue=image->transparent_color.blue;
}
}
if ((image->storage_class == DirectClass) || (image->colors > 256))
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
for (bits_per_pixel=1; bits_per_pixel < 8; bits_per_pixel++)
if ((one << bits_per_pixel) >= image->colors)
break;
q=colormap;
for (i=0; i < (ssize_t) image->colors; i++)
{
*q++=ScaleQuantumToChar(ClampToQuantum(image->colormap[i].red));
*q++=ScaleQuantumToChar(ClampToQuantum(image->colormap[i].green));
*q++=ScaleQuantumToChar(ClampToQuantum(image->colormap[i].blue));
}
for ( ; i < (ssize_t) (one << bits_per_pixel); i++)
{
*q++=(unsigned char) 0x0;
*q++=(unsigned char) 0x0;
*q++=(unsigned char) 0x0;
}
if ((GetPreviousImageInList(image) == (Image *) NULL) ||
(write_info->adjoin == MagickFalse))
{
/*
Write global colormap.
*/
c=0x80;
c|=(8-1) << 4; /* color resolution */
c|=(bits_per_pixel-1); /* size of global colormap */
(void) WriteBlobByte(image,(unsigned char) c);
for (j=0; j < (ssize_t) image->colors; j++)
if (IsPixelInfoEquivalent(&image->background_color,image->colormap+j))
break;
(void) WriteBlobByte(image,(unsigned char)
(j == (ssize_t) image->colors ? 0 : j)); /* background color */
(void) WriteBlobByte(image,(unsigned char) 0x00); /* reserved */
length=(size_t) (3*(one << bits_per_pixel));
(void) WriteBlob(image,length,colormap);
for (j=0; j < 768; j++)
global_colormap[j]=colormap[j];
}
if (LocaleCompare(write_info->magick,"GIF87") != 0)
{
const char
*value;
/*
Write graphics control extension.
*/
(void) WriteBlobByte(image,(unsigned char) 0x21);
(void) WriteBlobByte(image,(unsigned char) 0xf9);
(void) WriteBlobByte(image,(unsigned char) 0x04);
c=image->dispose << 2;
if (opacity >= 0)
c|=0x01;
(void) WriteBlobByte(image,(unsigned char) c);
delay=(size_t) (100*image->delay/MagickMax((size_t)
image->ticks_per_second,1));
(void) WriteBlobLSBShort(image,(unsigned short) delay);
(void) WriteBlobByte(image,(unsigned char) (opacity >= 0 ? opacity :
0));
(void) WriteBlobByte(image,(unsigned char) 0x00);
value=GetImageProperty(image,"comment",exception);
if (value != (const char *) NULL)
{
register const char
*p;
size_t
count;
/*
Write comment extension.
*/
(void) WriteBlobByte(image,(unsigned char) 0x21);
(void) WriteBlobByte(image,(unsigned char) 0xfe);
for (p=value; *p != '\0'; )
{
count=MagickMin(strlen(p),255);
(void) WriteBlobByte(image,(unsigned char) count);
for (i=0; i < (ssize_t) count; i++)
(void) WriteBlobByte(image,(unsigned char) *p++);
}
(void) WriteBlobByte(image,(unsigned char) 0x00);
}
if ((GetPreviousImageInList(image) == (Image *) NULL) &&
(GetNextImageInList(image) != (Image *) NULL) &&
(image->iterations != 1))
{
/*
Write Netscape Loop extension.
*/
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Writing GIF Extension %s","NETSCAPE2.0");
(void) WriteBlobByte(image,(unsigned char) 0x21);
(void) WriteBlobByte(image,(unsigned char) 0xff);
(void) WriteBlobByte(image,(unsigned char) 0x0b);
(void) WriteBlob(image,11,(unsigned char *) "NETSCAPE2.0");
(void) WriteBlobByte(image,(unsigned char) 0x03);
(void) WriteBlobByte(image,(unsigned char) 0x01);
(void) WriteBlobLSBShort(image,(unsigned short) (image->iterations ?
image->iterations-1 : 0));
(void) WriteBlobByte(image,(unsigned char) 0x00);
}
if ((image->gamma != 1.0f/2.2f))
{
char
attributes[MagickPathExtent];
ssize_t
count;
/*
Write ImageMagick extension.
*/
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Writing GIF Extension %s","ImageMagick");
(void) WriteBlobByte(image,(unsigned char) 0x21);
(void) WriteBlobByte(image,(unsigned char) 0xff);
(void) WriteBlobByte(image,(unsigned char) 0x0b);
(void) WriteBlob(image,11,(unsigned char *) "ImageMagick");
count=FormatLocaleString(attributes,MagickPathExtent,"gamma=%g",
image->gamma);
(void) WriteBlobByte(image,(unsigned char) count);
(void) WriteBlob(image,(size_t) count,(unsigned char *) attributes);
(void) WriteBlobByte(image,(unsigned char) 0x00);
}
ResetImageProfileIterator(image);
for ( ; ; )
{
char
*name;
const StringInfo
*profile;
name=GetNextImageProfile(image);
if (name == (const char *) NULL)
break;
profile=GetImageProfile(image,name);
if (profile != (StringInfo *) NULL)
{
if ((LocaleCompare(name,"ICC") == 0) ||
(LocaleCompare(name,"ICM") == 0) ||
(LocaleCompare(name,"IPTC") == 0) ||
(LocaleCompare(name,"8BIM") == 0) ||
(LocaleNCompare(name,"gif:",4) == 0))
{
ssize_t
offset;
unsigned char
*datum;
datum=GetStringInfoDatum(profile);
length=GetStringInfoLength(profile);
(void) WriteBlobByte(image,(unsigned char) 0x21);
(void) WriteBlobByte(image,(unsigned char) 0xff);
(void) WriteBlobByte(image,(unsigned char) 0x0b);
if ((LocaleCompare(name,"ICC") == 0) ||
(LocaleCompare(name,"ICM") == 0))
{
/*
Write ICC extension.
*/
(void) WriteBlob(image,11,(unsigned char *) "ICCRGBG1012");
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Writing GIF Extension %s","ICCRGBG1012");
}
else
if ((LocaleCompare(name,"IPTC") == 0))
{
/*
Write IPTC extension.
*/
(void) WriteBlob(image,11,(unsigned char *) "MGKIPTC0000");
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Writing GIF Extension %s","MGKIPTC0000");
}
else
if ((LocaleCompare(name,"8BIM") == 0))
{
/*
Write 8BIM extension.
*/
(void) WriteBlob(image,11,(unsigned char *)
"MGK8BIM0000");
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Writing GIF Extension %s","MGK8BIM0000");
}
else
{
char
extension[MagickPathExtent];
/*
Write generic extension.
*/
(void) CopyMagickString(extension,name+4,
sizeof(extension));
(void) WriteBlob(image,11,(unsigned char *) extension);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Writing GIF Extension %s",name);
}
offset=0;
while ((ssize_t) length > offset)
{
size_t
block_length;
if ((length-offset) < 255)
block_length=length-offset;
else
block_length=255;
(void) WriteBlobByte(image,(unsigned char) block_length);
(void) WriteBlob(image,(size_t) block_length,datum+offset);
offset+=(ssize_t) block_length;
}
(void) WriteBlobByte(image,(unsigned char) 0x00);
}
}
}
}
(void) WriteBlobByte(image,','); /* image separator */
/*
Write the image header.
*/
page.x=image->page.x;
page.y=image->page.y;
if ((image->page.width != 0) && (image->page.height != 0))
page=image->page;
(void) WriteBlobLSBShort(image,(unsigned short) (page.x < 0 ? 0 : page.x));
(void) WriteBlobLSBShort(image,(unsigned short) (page.y < 0 ? 0 : page.y));
(void) WriteBlobLSBShort(image,(unsigned short) image->columns);
(void) WriteBlobLSBShort(image,(unsigned short) image->rows);
c=0x00;
if (write_info->interlace != NoInterlace)
c|=0x40; /* pixel data is interlaced */
for (j=0; j < (ssize_t) (3*image->colors); j++)
if (colormap[j] != global_colormap[j])
break;
if (j == (ssize_t) (3*image->colors))
(void) WriteBlobByte(image,(unsigned char) c);
else
{
c|=0x80;
c|=(bits_per_pixel-1); /* size of local colormap */
(void) WriteBlobByte(image,(unsigned char) c);
length=(size_t) (3*(one << bits_per_pixel));
(void) WriteBlob(image,length,colormap);
}
/*
Write the image data.
*/
c=(int) MagickMax(bits_per_pixel,2);
(void) WriteBlobByte(image,(unsigned char) c);
status=EncodeImage(write_info,image,(size_t) MagickMax(bits_per_pixel,2)+1,
exception);
if (status == MagickFalse)
{
global_colormap=(unsigned char *) RelinquishMagickMemory(
global_colormap);
colormap=(unsigned char *) RelinquishMagickMemory(colormap);
write_info=DestroyImageInfo(write_info);
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
}
(void) WriteBlobByte(image,(unsigned char) 0x00);
if (GetNextImageInList(image) == (Image *) NULL)
break;
image=SyncNextImageInList(image);
scene++;
status=SetImageProgress(image,SaveImagesTag,scene,imageListLength);
if (status == MagickFalse)
break;
} while (write_info->adjoin != MagickFalse);
(void) WriteBlobByte(image,';'); /* terminator */
global_colormap=(unsigned char *) RelinquishMagickMemory(global_colormap);
colormap=(unsigned char *) RelinquishMagickMemory(colormap);
write_info=DestroyImageInfo(write_info);
(void) CloseBlob(image);
return(MagickTrue);
}
| 0
|
411,907
|
router_add_exit_policy(routerinfo_t *router, directory_token_t *tok)
{
addr_policy_t *newe;
newe = router_parse_addr_policy(tok);
if (!newe)
return -1;
if (! router->exit_policy)
router->exit_policy = smartlist_create();
if (((tok->tp == K_ACCEPT6 || tok->tp == K_REJECT6) &&
tor_addr_family(&newe->addr) == AF_INET)
||
((tok->tp == K_ACCEPT || tok->tp == K_REJECT) &&
tor_addr_family(&newe->addr) == AF_INET6)) {
log_warn(LD_DIR, "Mismatch between field type and address type in exit "
"policy");
addr_policy_free(newe);
return -1;
}
smartlist_add(router->exit_policy, newe);
return 0;
}
| 0
|
246,748
|
static u32 parse_meta_args(char *opts, MetaActionType act_type)
{
MetaAction *meta;
metas = gf_realloc(metas, sizeof(MetaAction) * (nb_meta_act + 1));
if (!metas) return 2;
meta = &metas[nb_meta_act];
nb_meta_act ++;
memset(meta, 0, sizeof(MetaAction));
meta->act_type = act_type;
meta->trackID = 0;
meta->root_meta = 1;
open_edit = GF_TRUE;
if (!opts) return 2;
if (act_type == META_ACTION_ADD_IMAGE_ITEM)
has_add_image = GF_TRUE;
while (1) {
char *next;
char *szSlot;
if (!opts || !opts[0]) return 0;
if (opts[0]==':') opts += 1;
szSlot = opts;
next = gf_url_colon_suffix(opts);
if (next) next[0] = 0;
if (!strnicmp(szSlot, "tk=", 3)) {
sscanf(szSlot, "tk=%u", &meta->trackID);
meta->root_meta = 0;
}
else if (!strnicmp(szSlot, "id=", 3)) {
meta->item_id = atoi(szSlot+3);
}
else if (!strnicmp(szSlot, "type=", 5)) {
meta->item_type = GF_4CC(szSlot[5], szSlot[6], szSlot[7], szSlot[8]);
}
//"ref" (without '=') is for data reference, "ref=" is for item references
else if (!strnicmp(szSlot, "ref=", 4)) {
char type[5];
MetaRef *ref;
if (!meta->item_refs) {
meta->item_refs = gf_list_new();
if (!meta->item_refs) return 2;
}
GF_SAFEALLOC(ref, MetaRef);
if (!ref) return 2;
sscanf(szSlot, "ref=%4s,%u", type, &(ref->ref_item_id));
ref->ref_type = GF_4CC(type[0], type[1], type[2], type[3]);
gf_list_add(meta->item_refs, ref);
}
else if (!strnicmp(szSlot, "name=", 5)) {
meta->szName = gf_strdup(szSlot+5);
}
else if (!strnicmp(szSlot, "path=", 5)) {
meta->szPath = gf_strdup(szSlot+5);
}
else if (!strnicmp(szSlot, "mime=", 5)) {
meta->item_type = GF_META_ITEM_TYPE_MIME;
meta->mime_type = gf_strdup(szSlot+5);
}
else if (!strnicmp(szSlot, "encoding=", 9)) {
meta->enc_type = gf_strdup(szSlot+9);
}
else if (!strnicmp(szSlot, "image-size=", 11)) {
if (!meta->image_props) {
GF_SAFEALLOC(meta->image_props, GF_ImageItemProperties);
if (!meta->image_props) return 2;
}
sscanf(szSlot+11, "%dx%d", &meta->image_props->width, &meta->image_props->height);
}
else if (!strnicmp(szSlot, "image-grid-size=", 16)) {
if (!meta->image_props) {
GF_SAFEALLOC(meta->image_props, GF_ImageItemProperties);
}
sscanf(szSlot+16, "%dx%d", &meta->image_props->num_grid_rows, &meta->image_props->num_grid_columns);
}
else if (!strnicmp(szSlot, "image-pasp=", 11)) {
if (!meta->image_props) {
GF_SAFEALLOC(meta->image_props, GF_ImageItemProperties);
if (!meta->image_props) return 2;
}
sscanf(szSlot+11, "%dx%d", &meta->image_props->hSpacing, &meta->image_props->vSpacing);
}
else if (!strnicmp(szSlot, "image-rloc=", 11)) {
if (!meta->image_props) {
GF_SAFEALLOC(meta->image_props, GF_ImageItemProperties);
if (!meta->image_props) return 2;
}
sscanf(szSlot+11, "%dx%d", &meta->image_props->hOffset, &meta->image_props->vOffset);
}
else if (!strnicmp(szSlot, "rotation=", 9)) {
if (!meta->image_props) {
GF_SAFEALLOC(meta->image_props, GF_ImageItemProperties);
if (!meta->image_props) return 2;
}
meta->image_props->angle = atoi(szSlot+9);
}
else if (!strnicmp(szSlot, "mirror-axis=", 12)) {
if (!meta->image_props) {
GF_SAFEALLOC(meta->image_props, GF_ImageItemProperties);
if (!meta->image_props) return 2;
}
meta->image_props->mirror = (!strnicmp(szSlot+12, "vertical", 8) ? 1 : 2);
}
else if (!strnicmp(szSlot, "clap=", 5)) {
if (!meta->image_props) {
GF_SAFEALLOC(meta->image_props, GF_ImageItemProperties);
if (!meta->image_props) return 2;
}
sscanf(szSlot + 5, "%d,%d,%d,%d,%d,%d,%d,%d", &meta->image_props->clap_wnum, &meta->image_props->clap_wden,
&meta->image_props->clap_hnum, &meta->image_props->clap_hden,
&meta->image_props->clap_honum, &meta->image_props->clap_hoden,
&meta->image_props->clap_vonum, &meta->image_props->clap_voden);
}
else if (!stricmp(szSlot, "hidden")) {
if (!meta->image_props) {
GF_SAFEALLOC(meta->image_props, GF_ImageItemProperties);
if (!meta->image_props) return 2;
}
meta->image_props->hidden = GF_TRUE;
}
else if (!stricmp(szSlot, "alpha")) {
if (!meta->image_props) {
GF_SAFEALLOC(meta->image_props, GF_ImageItemProperties);
if (!meta->image_props) return 2;
}
meta->image_props->alpha = GF_TRUE;
}
//"ref" (without '=') is for data reference, "ref=" is for item references
else if (!stricmp(szSlot, "ref")) {
if (!meta->image_props) {
GF_SAFEALLOC(meta->image_props, GF_ImageItemProperties);
if (!meta->image_props) return 2;
}
meta->image_props->use_reference = GF_TRUE;
}
else if (!strnicmp(szSlot, "time=", 5)) {
Float s=0, e=0, step=0;
if (!meta->image_props) {
GF_SAFEALLOC(meta->image_props, GF_ImageItemProperties);
if (!meta->image_props) return 2;
}
if (sscanf(szSlot+5, "%f-%f/%f", &s, &e, &step)==3) {
meta->image_props->time = s;
meta->image_props->end_time = e;
meta->image_props->step_time = step;
} else if (sscanf(szSlot+5, "%f-%f", &s, &e)==2) {
meta->image_props->time = s;
meta->image_props->end_time = e;
} else if (sscanf(szSlot+5, "%f/%f", &s, &step)==2) {
meta->image_props->time = s;
meta->image_props->step_time = step;
} else if (sscanf(szSlot+5, "%f", &s)==1) {
meta->image_props->time = s;
}
}
else if (!strnicmp(szSlot, "samp=", 5)) {
if (!meta->image_props) {
GF_SAFEALLOC(meta->image_props, GF_ImageItemProperties);
if (!meta->image_props) return 2;
}
meta->image_props->sample_num = atoi(szSlot+5);
meta->root_meta = 1;
}
else if (!strnicmp(szSlot, "group=", 6)) {
char type[5];
sscanf(szSlot, "group=%4s,%u", type, &meta->group_id);
meta->group_type = GF_4CC(type[0], type[1], type[2], type[3]);
}
else if (!stricmp(szSlot, "split_tiles")) {
if (!meta->image_props) {
GF_SAFEALLOC(meta->image_props, GF_ImageItemProperties);
if (!meta->image_props) return 2;
}
meta->image_props->tile_mode = TILE_ITEM_ALL_BASE;
}
else if (!stricmp(szSlot, "dref")) {
meta->use_dref = 1;
}
else if (!stricmp(szSlot, "primary")) {
meta->primary = 1;
}
else if (!stricmp(szSlot, "binary")) {
if (meta->act_type==META_ACTION_SET_XML) meta->act_type=META_ACTION_SET_BINARY_XML;
}
else if (!strnicmp(szSlot, "icc_path=", 9)) {
if (!meta->image_props) {
GF_SAFEALLOC(meta->image_props, GF_ImageItemProperties);
if (!meta->image_props) return 2;
}
strcpy(meta->image_props->iccPath, szSlot+9);
}
else if (!stricmp(szSlot, "agrid") || !strnicmp(szSlot, "agrid=", 6)) {
if (!meta->image_props) {
GF_SAFEALLOC(meta->image_props, GF_ImageItemProperties);
if (!meta->image_props) return 2;
}
meta->image_props->auto_grid = GF_TRUE;
if (!strnicmp(szSlot, "agrid=", 6))
meta->image_props->auto_grid_ratio = atof(szSlot+6);
}
else if (!strchr(szSlot, '=')) {
switch (meta->act_type) {
case META_ACTION_SET_TYPE:
if (!stricmp(szSlot, "null") || !stricmp(szSlot, "0")) meta->meta_4cc = 0;
else meta->meta_4cc = GF_4CC(szSlot[0], szSlot[1], szSlot[2], szSlot[3]);
break;
case META_ACTION_ADD_ITEM:
case META_ACTION_ADD_IMAGE_ITEM:
case META_ACTION_SET_XML:
case META_ACTION_DUMP_XML:
if (!strncmp(szSlot, "dopt", 4) || !strncmp(szSlot, "sopt", 4) || !strncmp(szSlot, "@", 1)) {
if (next) next[0]=':';
next=NULL;
}
//cat as -add arg
gf_dynstrcat(&meta->szPath, szSlot, ":");
if (!meta->szPath) return 2;
break;
case META_ACTION_REM_ITEM:
case META_ACTION_SET_PRIMARY_ITEM:
case META_ACTION_DUMP_ITEM:
meta->item_id = atoi(szSlot);
break;
default:
break;
}
}
if (!next) break;
opts += strlen(szSlot);
next[0] = ':';
}
return 0;
}
| 0
|
462,570
|
void controller::write_item(std::shared_ptr<rss_item> item, std::ostream& ostr) {
std::vector<std::pair<LineType, std::string>> lines;
std::vector<linkpair> links; // not used
std::string title(_("Title: "));
title.append(item->title());
lines.push_back(std::make_pair(LineType::wrappable, title));
std::string author(_("Author: "));
author.append(item->author());
lines.push_back(std::make_pair(LineType::wrappable, author));
std::string date(_("Date: "));
date.append(item->pubDate());
lines.push_back(std::make_pair(LineType::wrappable, date));
std::string link(_("Link: "));
link.append(item->link());
lines.push_back(std::make_pair(LineType::softwrappable, link));
if (item->enclosure_url() != "") {
std::string dlurl(_("Podcast Download URL: "));
dlurl.append(item->enclosure_url());
lines.push_back(std::make_pair(LineType::softwrappable, dlurl));
}
lines.push_back(std::make_pair(LineType::wrappable, std::string("")));
htmlrenderer rnd(true);
rnd.render(item->description(), lines, links, item->feedurl());
textformatter txtfmt;
txtfmt.add_lines(lines);
unsigned int width = cfg.get_configvalue_as_int("text-width");
if (width == 0)
width = 80;
ostr << txtfmt.format_text_plain(width) << std::endl;
}
| 0
|
281,122
|
static int flow_to_policy_dir(int dir)
{
if (XFRM_POLICY_IN == FLOW_DIR_IN &&
XFRM_POLICY_OUT == FLOW_DIR_OUT &&
XFRM_POLICY_FWD == FLOW_DIR_FWD)
return dir;
switch (dir) {
default:
case FLOW_DIR_IN:
return XFRM_POLICY_IN;
case FLOW_DIR_OUT:
return XFRM_POLICY_OUT;
case FLOW_DIR_FWD:
return XFRM_POLICY_FWD;
}
}
| 0
|
310,308
|
new_cached_dir(char *s, time_t published)
{
cached_dir_t *d = tor_malloc_zero(sizeof(cached_dir_t));
d->refcnt = 1;
d->dir = s;
d->dir_len = strlen(s);
d->published = published;
if (tor_gzip_compress(&(d->dir_z), &(d->dir_z_len), d->dir, d->dir_len,
ZLIB_METHOD)) {
log_warn(LD_BUG, "Error compressing directory");
}
return d;
}
| 0
|
484,799
|
static int netfront_tx_slot_available(struct netfront_queue *queue)
{
return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) <
(NET_TX_RING_SIZE - XEN_NETIF_NR_SLOTS_MIN - 1);
}
| 0
|
208,680
|
R_API void r_core_anal_esil(RCore *core, const char *str, const char *target) {
bool cfg_anal_strings = r_config_get_i (core->config, "anal.strings");
bool emu_lazy = r_config_get_i (core->config, "emu.lazy");
bool gp_fixed = r_config_get_i (core->config, "anal.gpfixed");
RAnalEsil *ESIL = core->anal->esil;
ut64 refptr = 0LL;
const char *pcname;
RAnalOp op = R_EMPTY;
ut8 *buf = NULL;
bool end_address_set = false;
int iend;
int minopsize = 4; // XXX this depends on asm->mininstrsize
bool archIsArm = false;
ut64 addr = core->offset;
ut64 start = addr;
ut64 end = 0LL;
ut64 cur;
if (esil_anal_stop || r_cons_is_breaked ()) {
// faster ^C
return;
}
mycore = core;
if (!strcmp (str, "?")) {
eprintf ("Usage: aae[f] [len] [addr] - analyze refs in function, section or len bytes with esil\n");
eprintf (" aae $SS @ $S - analyze the whole section\n");
eprintf (" aae $SS str.Hello @ $S - find references for str.Hellow\n");
eprintf (" aaef - analyze functions discovered with esil\n");
return;
}
#define CHECKREF(x) ((refptr && (x) == refptr) || !refptr)
if (target) {
const char *expr = r_str_trim_head_ro (target);
if (*expr) {
refptr = ntarget = r_num_math (core->num, expr);
if (!refptr) {
ntarget = refptr = addr;
}
} else {
ntarget = UT64_MAX;
refptr = 0LL;
}
} else {
ntarget = UT64_MAX;
refptr = 0LL;
}
RAnalFunction *fcn = NULL;
if (!strcmp (str, "f")) {
fcn = r_anal_get_fcn_in (core->anal, core->offset, 0);
if (fcn) {
start = r_anal_function_min_addr (fcn);
addr = fcn->addr;
end = r_anal_function_max_addr (fcn);
end_address_set = true;
}
}
if (!end_address_set) {
if (str[0] == ' ') {
end = addr + r_num_math (core->num, str + 1);
} else {
RIOMap *map = r_io_map_get_at (core->io, addr);
if (map) {
end = r_io_map_end (map);
} else {
end = addr + core->blocksize;
}
}
}
iend = end - start;
if (iend < 0) {
return;
}
if (iend > MAX_SCAN_SIZE) {
eprintf ("Warning: Not going to analyze 0x%08"PFMT64x" bytes.\n", (ut64)iend);
return;
}
buf = malloc ((size_t)iend + 2);
if (!buf) {
perror ("malloc");
return;
}
esilbreak_last_read = UT64_MAX;
r_io_read_at (core->io, start, buf, iend + 1);
if (!ESIL) {
r_core_cmd0 (core, "aei");
ESIL = core->anal->esil;
if (!ESIL) {
eprintf ("ESIL not initialized\n");
return;
}
r_core_cmd0 (core, "aeim");
ESIL = core->anal->esil;
}
const char *spname = r_reg_get_name (core->anal->reg, R_REG_NAME_SP);
if (!spname) {
eprintf ("Error: No =SP defined in the reg profile.\n");
return;
}
EsilBreakCtx ctx = {
&op,
fcn,
spname,
r_reg_getv (core->anal->reg, spname)
};
ESIL->cb.hook_reg_write = &esilbreak_reg_write;
//this is necessary for the hook to read the id of analop
ESIL->user = &ctx;
ESIL->cb.hook_mem_read = &esilbreak_mem_read;
ESIL->cb.hook_mem_write = &esilbreak_mem_write;
if (fcn && fcn->reg_save_area) {
r_reg_setv (core->anal->reg, ctx.spname, ctx.initial_sp - fcn->reg_save_area);
}
//eprintf ("Analyzing ESIL refs from 0x%"PFMT64x" - 0x%"PFMT64x"\n", addr, end);
// TODO: backup/restore register state before/after analysis
pcname = r_reg_get_name (core->anal->reg, R_REG_NAME_PC);
if (!pcname || !*pcname) {
eprintf ("Cannot find program counter register in the current profile.\n");
return;
}
esil_anal_stop = false;
r_cons_break_push (cccb, core);
int arch = -1;
if (!strcmp (core->anal->cur->arch, "arm")) {
switch (core->anal->cur->bits) {
case 64: arch = R2_ARCH_ARM64; break;
case 32: arch = R2_ARCH_ARM32; break;
case 16: arch = R2_ARCH_THUMB; break;
}
archIsArm = true;
}
ut64 gp = r_config_get_i (core->config, "anal.gp");
const char *gp_reg = NULL;
if (!strcmp (core->anal->cur->arch, "mips")) {
gp_reg = "gp";
arch = R2_ARCH_MIPS;
}
const char *sn = r_reg_get_name (core->anal->reg, R_REG_NAME_SN);
if (!sn) {
eprintf ("Warning: No SN reg alias for current architecture.\n");
}
r_reg_arena_push (core->anal->reg);
IterCtx ictx = { start, end, fcn, NULL };
size_t i = addr - start;
size_t i_old = 0;
do {
if (esil_anal_stop || r_cons_is_breaked ()) {
break;
}
cur = start + i;
if (!r_io_is_valid_offset (core->io, cur, 0)) {
break;
}
#if 0
// disabled because it causes some tests to fail
{
RPVector *list = r_meta_get_all_in (core->anal, cur, R_META_TYPE_ANY);
void **it;
r_pvector_foreach (list, it) {
RIntervalNode *node = *it;
RAnalMetaItem *meta = node->data;
switch (meta->type) {
case R_META_TYPE_DATA:
case R_META_TYPE_STRING:
case R_META_TYPE_FORMAT:
#if 0
{
int msz = r_meta_get_size (core->anal, meta->type);
i += (msz > 0)? msz: minopsize;
}
r_pvector_free (list);
goto loopback;
#elif 0
{
int msz = r_meta_get_size (core->anal, meta->type);
i += (msz > 0)? msz: minopsize;
i--;
}
#else
i += 4;
goto repeat;
#endif
default:
break;
}
}
r_pvector_free (list);
}
#endif
/* realign address if needed */
r_core_seek_arch_bits (core, cur);
int opalign = core->anal->pcalign;
if (opalign > 0) {
cur -= (cur % opalign);
}
r_anal_op_fini (&op);
r_asm_set_pc (core->rasm, cur);
i_old = i;
#if 1
if (i > iend) {
goto repeat;
}
#endif
if (!r_anal_op (core->anal, &op, cur, buf + i, iend - i, R_ANAL_OP_MASK_ESIL | R_ANAL_OP_MASK_VAL | R_ANAL_OP_MASK_HINT)) {
i += minopsize - 1; // XXX dupe in op.size below
}
if (op.type == R_ANAL_OP_TYPE_ILL || op.type == R_ANAL_OP_TYPE_UNK) {
// i += 2
r_anal_op_fini (&op);
goto repeat;
}
//we need to check again i because buf+i may goes beyond its boundaries
//because of i+= minopsize - 1
if (op.size < 1) {
i += minopsize - 1;
goto repeat;
}
if (emu_lazy) {
if (op.type & R_ANAL_OP_TYPE_REP) {
i += op.size - 1;
goto repeat;
}
switch (op.type & R_ANAL_OP_TYPE_MASK) {
case R_ANAL_OP_TYPE_JMP:
case R_ANAL_OP_TYPE_CJMP:
case R_ANAL_OP_TYPE_CALL:
case R_ANAL_OP_TYPE_RET:
case R_ANAL_OP_TYPE_ILL:
case R_ANAL_OP_TYPE_NOP:
case R_ANAL_OP_TYPE_UJMP:
case R_ANAL_OP_TYPE_IO:
case R_ANAL_OP_TYPE_LEAVE:
case R_ANAL_OP_TYPE_CRYPTO:
case R_ANAL_OP_TYPE_CPL:
case R_ANAL_OP_TYPE_SYNC:
case R_ANAL_OP_TYPE_SWI:
case R_ANAL_OP_TYPE_CMP:
case R_ANAL_OP_TYPE_ACMP:
case R_ANAL_OP_TYPE_NULL:
case R_ANAL_OP_TYPE_CSWI:
case R_ANAL_OP_TYPE_TRAP:
i += op.size - 1;
goto repeat;
// those require write support
case R_ANAL_OP_TYPE_PUSH:
case R_ANAL_OP_TYPE_POP:
i += op.size - 1;
goto repeat;
}
}
if (sn && op.type == R_ANAL_OP_TYPE_SWI) {
r_strf_buffer (64);
r_flag_space_set (core->flags, R_FLAGS_FS_SYSCALLS);
int snv = (arch == R2_ARCH_THUMB)? op.val: (int)r_reg_getv (core->anal->reg, sn);
RSyscallItem *si = r_syscall_get (core->anal->syscall, snv, -1);
if (si) {
// eprintf ("0x%08"PFMT64x" SYSCALL %-4d %s\n", cur, snv, si->name);
r_flag_set_next (core->flags, r_strf ("syscall.%s", si->name), cur, 1);
} else {
//todo were doing less filtering up top because we can't match against 80 on all platforms
// might get too many of this path now..
// eprintf ("0x%08"PFMT64x" SYSCALL %d\n", cur, snv);
r_flag_set_next (core->flags, r_strf ("syscall.%d", snv), cur, 1);
}
r_flag_space_set (core->flags, NULL);
r_syscall_item_free (si);
}
const char *esilstr = R_STRBUF_SAFEGET (&op.esil);
i += op.size - 1;
if (R_STR_ISEMPTY (esilstr)) {
goto repeat;
}
r_anal_esil_set_pc (ESIL, cur);
r_reg_setv (core->anal->reg, pcname, cur + op.size);
if (gp_fixed && gp_reg) {
r_reg_setv (core->anal->reg, gp_reg, gp);
}
(void)r_anal_esil_parse (ESIL, esilstr);
// looks like ^C is handled by esil_parse !!!!
//r_anal_esil_dumpstack (ESIL);
//r_anal_esil_stack_free (ESIL);
switch (op.type) {
case R_ANAL_OP_TYPE_LEA:
// arm64
if (core->anal->cur && arch == R2_ARCH_ARM64) {
if (CHECKREF (ESIL->cur)) {
r_anal_xrefs_set (core->anal, cur, ESIL->cur, R_ANAL_REF_TYPE_STRING);
}
} else if ((target && op.ptr == ntarget) || !target) {
if (CHECKREF (ESIL->cur)) {
if (op.ptr && r_io_is_valid_offset (core->io, op.ptr, !core->anal->opt.noncode)) {
r_anal_xrefs_set (core->anal, cur, op.ptr, R_ANAL_REF_TYPE_STRING);
} else {
r_anal_xrefs_set (core->anal, cur, ESIL->cur, R_ANAL_REF_TYPE_STRING);
}
}
}
if (cfg_anal_strings) {
add_string_ref (core, op.addr, op.ptr);
}
break;
case R_ANAL_OP_TYPE_ADD:
/* TODO: test if this is valid for other archs too */
if (core->anal->cur && archIsArm) {
/* This code is known to work on Thumb, ARM and ARM64 */
ut64 dst = ESIL->cur;
if ((target && dst == ntarget) || !target) {
if (CHECKREF (dst)) {
int type = core_type_by_addr (core, dst); // R_ANAL_REF_TYPE_DATA;
r_anal_xrefs_set (core->anal, cur, dst, type);
}
}
if (cfg_anal_strings) {
add_string_ref (core, op.addr, dst);
}
} else if ((core->anal->bits == 32 && core->anal->cur && arch == R2_ARCH_MIPS)) {
ut64 dst = ESIL->cur;
if (!op.src[0] || !op.src[0]->reg || !op.src[0]->reg->name) {
break;
}
if (!strcmp (op.src[0]->reg->name, "sp")) {
break;
}
if (!strcmp (op.src[0]->reg->name, "zero")) {
break;
}
if ((target && dst == ntarget) || !target) {
if (dst > 0xffff && op.src[1] && (dst & 0xffff) == (op.src[1]->imm & 0xffff) && myvalid (mycore->io, dst)) {
RFlagItem *f;
char *str;
if (CHECKREF (dst) || CHECKREF (cur)) {
r_anal_xrefs_set (core->anal, cur, dst, R_ANAL_REF_TYPE_DATA);
if (cfg_anal_strings) {
add_string_ref (core, op.addr, dst);
}
if ((f = r_core_flag_get_by_spaces (core->flags, dst))) {
r_meta_set_string (core->anal, R_META_TYPE_COMMENT, cur, f->name);
} else if ((str = is_string_at (mycore, dst, NULL))) {
char *str2 = r_str_newf ("esilref: '%s'", str);
// HACK avoid format string inside string used later as format
// string crashes disasm inside agf under some conditions.
// https://github.com/radareorg/radare2/issues/6937
r_str_replace_char (str2, '%', '&');
r_meta_set_string (core->anal, R_META_TYPE_COMMENT, cur, str2);
free (str2);
free (str);
}
}
}
}
}
break;
case R_ANAL_OP_TYPE_LOAD:
{
ut64 dst = esilbreak_last_read;
if (dst != UT64_MAX && CHECKREF (dst)) {
if (myvalid (mycore->io, dst)) {
r_anal_xrefs_set (core->anal, cur, dst, R_ANAL_REF_TYPE_DATA);
if (cfg_anal_strings) {
add_string_ref (core, op.addr, dst);
}
}
}
dst = esilbreak_last_data;
if (dst != UT64_MAX && CHECKREF (dst)) {
if (myvalid (mycore->io, dst)) {
r_anal_xrefs_set (core->anal, cur, dst, R_ANAL_REF_TYPE_DATA);
if (cfg_anal_strings) {
add_string_ref (core, op.addr, dst);
}
}
}
}
break;
case R_ANAL_OP_TYPE_JMP:
{
ut64 dst = op.jump;
if (CHECKREF (dst)) {
if (myvalid (core->io, dst)) {
r_anal_xrefs_set (core->anal, cur, dst, R_ANAL_REF_TYPE_CODE);
}
}
}
break;
case R_ANAL_OP_TYPE_CALL:
{
ut64 dst = op.jump;
if (CHECKREF (dst)) {
if (myvalid (core->io, dst)) {
r_anal_xrefs_set (core->anal, cur, dst, R_ANAL_REF_TYPE_CALL);
}
ESIL->old = cur + op.size;
getpcfromstack (core, ESIL);
}
}
break;
case R_ANAL_OP_TYPE_UJMP:
case R_ANAL_OP_TYPE_UCALL:
case R_ANAL_OP_TYPE_ICALL:
case R_ANAL_OP_TYPE_RCALL:
case R_ANAL_OP_TYPE_IRCALL:
case R_ANAL_OP_TYPE_MJMP:
{
ut64 dst = core->anal->esil->jump_target;
if (dst == 0 || dst == UT64_MAX) {
dst = r_reg_getv (core->anal->reg, pcname);
}
if (CHECKREF (dst)) {
if (myvalid (core->io, dst)) {
RAnalRefType ref =
(op.type & R_ANAL_OP_TYPE_MASK) == R_ANAL_OP_TYPE_UCALL
? R_ANAL_REF_TYPE_CALL
: R_ANAL_REF_TYPE_CODE;
r_anal_xrefs_set (core->anal, cur, dst, ref);
r_core_anal_fcn (core, dst, UT64_MAX, R_ANAL_REF_TYPE_NULL, 1);
// analyze function here
#if 0
if (op.type == R_ANAL_OP_TYPE_UCALL || op.type == R_ANAL_OP_TYPE_RCALL) {
eprintf ("0x%08"PFMT64x" RCALL TO %llx\n", cur, dst);
}
#endif
}
}
}
break;
default:
break;
}
r_anal_esil_stack_free (ESIL);
repeat:
if (!r_anal_get_block_at (core->anal, cur)) {
size_t fcn_i;
for (fcn_i = i_old + 1; fcn_i <= i; fcn_i++) {
if (r_anal_get_function_at (core->anal, start + fcn_i)) {
i = fcn_i - 1;
break;
}
}
}
if (i >= iend) {
break;
}
} while (get_next_i (&ictx, &i));
r_list_free (ictx.bbl);
r_list_free (ictx.path);
r_list_free (ictx.switch_path);
free (buf);
ESIL->cb.hook_mem_read = NULL;
ESIL->cb.hook_mem_write = NULL;
ESIL->cb.hook_reg_write = NULL;
ESIL->user = NULL;
r_anal_op_fini (&op);
r_cons_break_pop ();
// restore register
r_reg_arena_pop (core->anal->reg);
}
| 1
|
459,522
|
BPF_CALL_3(bpf_get_stackid_pe, struct bpf_perf_event_data_kern *, ctx,
struct bpf_map *, map, u64, flags)
{
struct perf_event *event = ctx->event;
struct perf_callchain_entry *trace;
bool kernel, user;
__u64 nr_kernel;
int ret;
/* perf_sample_data doesn't have callchain, use bpf_get_stackid */
if (!(event->attr.sample_type & __PERF_SAMPLE_CALLCHAIN_EARLY))
return bpf_get_stackid((unsigned long)(ctx->regs),
(unsigned long) map, flags, 0, 0);
if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID)))
return -EINVAL;
user = flags & BPF_F_USER_STACK;
kernel = !user;
trace = ctx->data->callchain;
if (unlikely(!trace))
return -EFAULT;
nr_kernel = count_kernel_ip(trace);
if (kernel) {
__u64 nr = trace->nr;
trace->nr = nr_kernel;
ret = __bpf_get_stackid(map, trace, flags);
/* restore nr */
trace->nr = nr;
} else { /* user */
u64 skip = flags & BPF_F_SKIP_FIELD_MASK;
skip += nr_kernel;
if (skip > BPF_F_SKIP_FIELD_MASK)
return -EFAULT;
flags = (flags & ~BPF_F_SKIP_FIELD_MASK) | skip;
ret = __bpf_get_stackid(map, trace, flags);
}
return ret;
}
| 0
|
336,126
|
static int __init ip6gre_init(void)
{
int err;
pr_info("GRE over IPv6 tunneling driver\n");
err = register_pernet_device(&ip6gre_net_ops);
if (err < 0)
return err;
err = inet6_add_protocol(&ip6gre_protocol, IPPROTO_GRE);
if (err < 0) {
pr_info("%s: can't add protocol\n", __func__);
goto add_proto_failed;
}
err = rtnl_link_register(&ip6gre_link_ops);
if (err < 0)
goto rtnl_link_failed;
err = rtnl_link_register(&ip6gre_tap_ops);
if (err < 0)
goto tap_ops_failed;
out:
return err;
tap_ops_failed:
rtnl_link_unregister(&ip6gre_link_ops);
rtnl_link_failed:
inet6_del_protocol(&ip6gre_protocol, IPPROTO_GRE);
add_proto_failed:
unregister_pernet_device(&ip6gre_net_ops);
goto out;
}
| 0
|
508,774
|
bool init_read_record_idx(READ_RECORD *info, THD *thd, TABLE *table,
bool print_error, uint idx, bool reverse)
{
int error= 0;
DBUG_ENTER("init_read_record_idx");
empty_record(table);
bzero((char*) info,sizeof(*info));
info->thd= thd;
info->table= table;
info->record= table->record[0];
info->print_error= print_error;
info->unlock_row= rr_unlock_row;
table->status=0; /* And it's always found */
if (!table->file->inited &&
(error= table->file->ha_index_init(idx, 1)))
{
if (print_error)
table->file->print_error(error, MYF(0));
}
/* read_record will be changed to rr_index in rr_index_first */
info->read_record= reverse ? rr_index_last : rr_index_first;
DBUG_RETURN(error != 0);
}
| 0
|
195,334
|
GF_Err iloc_box_read(GF_Box *s, GF_BitStream *bs)
{
u32 item_count, extent_count, i, j;
GF_ItemLocationBox *ptr = (GF_ItemLocationBox *)s;
ISOM_DECREASE_SIZE(ptr, 2)
ptr->offset_size = gf_bs_read_int(bs, 4);
ptr->length_size = gf_bs_read_int(bs, 4);
ptr->base_offset_size = gf_bs_read_int(bs, 4);
if (ptr->version == 1 || ptr->version == 2) {
ptr->index_size = gf_bs_read_int(bs, 4);
} else {
gf_bs_read_int(bs, 4);
}
if (ptr->version < 2) {
ISOM_DECREASE_SIZE(ptr, 2)
item_count = gf_bs_read_u16(bs);
} else {
ISOM_DECREASE_SIZE(ptr, 4)
item_count = gf_bs_read_u32(bs);
}
for (i = 0; i < item_count; i++) {
GF_ItemLocationEntry *location_entry = (GF_ItemLocationEntry *)gf_malloc(sizeof(GF_ItemLocationEntry));
if (!location_entry) return GF_OUT_OF_MEM;
gf_list_add(ptr->location_entries, location_entry);
if (ptr->version < 2) {
ISOM_DECREASE_SIZE(ptr, 2)
location_entry->item_ID = gf_bs_read_u16(bs);
} else {
ISOM_DECREASE_SIZE(ptr, 4)
location_entry->item_ID = gf_bs_read_u32(bs);
}
if (ptr->version == 1 || ptr->version == 2) {
ISOM_DECREASE_SIZE(ptr, 2)
location_entry->construction_method = gf_bs_read_u16(bs);
}
else {
location_entry->construction_method = 0;
}
ISOM_DECREASE_SIZE(ptr, (2 + ptr->base_offset_size) )
location_entry->data_reference_index = gf_bs_read_u16(bs);
location_entry->base_offset = gf_bs_read_int(bs, 8*ptr->base_offset_size);
#ifndef GPAC_DISABLE_ISOM_WRITE
location_entry->original_base_offset = location_entry->base_offset;
#endif
ISOM_DECREASE_SIZE(ptr, 2)
extent_count = gf_bs_read_u16(bs);
location_entry->extent_entries = gf_list_new();
for (j = 0; j < extent_count; j++) {
GF_ItemExtentEntry *extent_entry = (GF_ItemExtentEntry *)gf_malloc(sizeof(GF_ItemExtentEntry));
if (!extent_entry) return GF_OUT_OF_MEM;
gf_list_add(location_entry->extent_entries, extent_entry);
if ((ptr->version == 1 || ptr->version == 2) && ptr->index_size > 0) {
ISOM_DECREASE_SIZE(ptr, ptr->index_size)
extent_entry->extent_index = gf_bs_read_int(bs, 8 * ptr->index_size);
}
else {
extent_entry->extent_index = 0;
}
ISOM_DECREASE_SIZE(ptr, (ptr->offset_size+ptr->length_size) )
extent_entry->extent_offset = gf_bs_read_int(bs, 8*ptr->offset_size);
extent_entry->extent_length = gf_bs_read_int(bs, 8*ptr->length_size);
#ifndef GPAC_DISABLE_ISOM_WRITE
extent_entry->original_extent_offset = extent_entry->extent_offset;
#endif
}
}
return GF_OK;
}
| 1
|
221,075
|
OpTypeConstructor UnaryGeneric(FullTypeId t) {
return [t](OpDef* op_def) {
FullTypeDef* tdef =
op_def->mutable_output_arg(0)->mutable_experimental_full_type();
tdef->set_type_id(t);
FullTypeDef* arg = tdef->add_args();
arg->set_type_id(TFT_ANY);
return Status::OK();
};
}
| 0
|
206,025
|
gpg_ctx_add_recipient (struct _GpgCtx *gpg,
const gchar *keyid)
{
if (gpg->mode != GPG_CTX_MODE_ENCRYPT && gpg->mode != GPG_CTX_MODE_EXPORT)
return;
if (!gpg->recipients)
gpg->recipients = g_ptr_array_new ();
g_ptr_array_add (gpg->recipients, g_strdup (keyid));
}
| 1
|
337,814
|
int sctp_verify_init(struct net *net, const struct sctp_endpoint *ep,
const struct sctp_association *asoc, enum sctp_cid cid,
struct sctp_init_chunk *peer_init,
struct sctp_chunk *chunk, struct sctp_chunk **errp)
{
union sctp_params param;
bool has_cookie = false;
int result;
/* Check for missing mandatory parameters. Note: Initial TSN is
* also mandatory, but is not checked here since the valid range
* is 0..2**32-1. RFC4960, section 3.3.3.
*/
if (peer_init->init_hdr.num_outbound_streams == 0 ||
peer_init->init_hdr.num_inbound_streams == 0 ||
peer_init->init_hdr.init_tag == 0 ||
ntohl(peer_init->init_hdr.a_rwnd) < SCTP_DEFAULT_MINWINDOW)
return sctp_process_inv_mandatory(asoc, chunk, errp);
sctp_walk_params(param, peer_init, init_hdr.params) {
if (param.p->type == SCTP_PARAM_STATE_COOKIE)
has_cookie = true;
}
/* There is a possibility that a parameter length was bad and
* in that case we would have stoped walking the parameters.
* The current param.p would point at the bad one.
* Current consensus on the mailing list is to generate a PROTOCOL
* VIOLATION error. We build the ERROR chunk here and let the normal
* error handling code build and send the packet.
*/
if (param.v != (void *)chunk->chunk_end)
return sctp_process_inv_paramlength(asoc, param.p, chunk, errp);
/* The only missing mandatory param possible today is
* the state cookie for an INIT-ACK chunk.
*/
if ((SCTP_CID_INIT_ACK == cid) && !has_cookie)
return sctp_process_missing_param(asoc, SCTP_PARAM_STATE_COOKIE,
chunk, errp);
/* Verify all the variable length parameters */
sctp_walk_params(param, peer_init, init_hdr.params) {
result = sctp_verify_param(net, ep, asoc, param, cid,
chunk, errp);
switch (result) {
case SCTP_IERROR_ABORT:
case SCTP_IERROR_NOMEM:
return 0;
case SCTP_IERROR_ERROR:
return 1;
case SCTP_IERROR_NO_ERROR:
default:
break;
}
} /* for (loop through all parameters) */
return 1;
}
| 0
|
244,253
|
GF_Err lsr1_box_size(GF_Box *s)
{
u32 pos=0;
GF_LASeRSampleEntryBox *ptr = (GF_LASeRSampleEntryBox *)s;
s->size += 8;
gf_isom_check_position(s, (GF_Box *)ptr->lsr_config, &pos);
return GF_OK;
}
| 0
|
90,856
|
void GetLRUOrigin(StorageType type) {
lru_origin_ = GURL();
quota_manager_->GetLRUOrigin(type,
callback_factory_.NewCallback(&QuotaManagerTest::DidGetLRUOrigin));
}
| 0
|
175,784
|
const QuotaTableEntries& quota_entries() const { return quota_entries_; }
| 0
|
313,780
|
normal_search(
cmdarg_T *cap,
int dir,
char_u *pat,
int opt, // extra flags for do_search()
int *wrapped)
{
int i;
searchit_arg_T sia;
#ifdef FEAT_SEARCH_EXTRA
pos_T prev_cursor = curwin->w_cursor;
#endif
cap->oap->motion_type = MCHAR;
cap->oap->inclusive = FALSE;
cap->oap->use_reg_one = TRUE;
curwin->w_set_curswant = TRUE;
CLEAR_FIELD(sia);
i = do_search(cap->oap, dir, dir, pat, cap->count1,
opt | SEARCH_OPT | SEARCH_ECHO | SEARCH_MSG, &sia);
if (wrapped != NULL)
*wrapped = sia.sa_wrapped;
if (i == 0)
clearop(cap->oap);
else
{
if (i == 2)
cap->oap->motion_type = MLINE;
curwin->w_cursor.coladd = 0;
#ifdef FEAT_FOLDING
if (cap->oap->op_type == OP_NOP && (fdo_flags & FDO_SEARCH) && KeyTyped)
foldOpenCursor();
#endif
}
#ifdef FEAT_SEARCH_EXTRA
// Redraw the window to refresh the highlighted matches.
if (!EQUAL_POS(curwin->w_cursor, prev_cursor) && p_hls && !no_hlsearch)
redraw_later(SOME_VALID);
#endif
// "/$" will put the cursor after the end of the line, may need to
// correct that here
check_cursor();
return i;
}
| 0
|
359,269
|
DEFUN (no_router_bgp,
no_router_bgp_cmd,
"no router bgp <1-65535>",
NO_STR
ROUTER_STR
BGP_STR
AS_STR)
{
as_t as;
struct bgp *bgp;
const char *name = NULL;
VTY_GET_INTEGER_RANGE ("AS", as, argv[0], 1, 65535);
if (argc == 2)
name = argv[1];
/* Lookup bgp structure. */
bgp = bgp_lookup (as, name);
if (! bgp)
{
vty_out (vty, "%% Can't find BGP instance%s", VTY_NEWLINE);
return CMD_WARNING;
}
bgp_delete (bgp);
return CMD_SUCCESS;
}
| 0
|
248,314
|
DLLIMPORT const char *cfg_opt_getstr(cfg_opt_t *opt)
{
return cfg_opt_getnstr(opt, 0);
}
| 0
|
349,278
|
static void squashfs_stat(char *source)
{
time_t mkfs_time = (time_t) sBlk.s.mkfs_time;
struct tm *t = use_localtime ? localtime(&mkfs_time) :
gmtime(&mkfs_time);
char *mkfs_str = asctime(t);
long long xattr_ids = read_xattr_ids();
if(xattr_ids == -1)
EXIT_UNSQUASH("File system corruption detected\n");
printf("Found a valid SQUASHFS 4:0 superblock on %s.\n", source);
printf("Creation or last append time %s", mkfs_str ? mkfs_str :
"failed to get time\n");
printf("Filesystem size %llu bytes (%.2f Kbytes / %.2f Mbytes)\n",
sBlk.s.bytes_used, sBlk.s.bytes_used / 1024.0,
sBlk.s.bytes_used / (1024.0 * 1024.0));
printf("Compression %s\n", comp->name);
if(SQUASHFS_COMP_OPTS(sBlk.s.flags)) {
char buffer[SQUASHFS_METADATA_SIZE] __attribute__ ((aligned));
int bytes;
if(!comp->supported)
printf("\tCould not display compressor options, because"
" %s compression is not supported\n",
comp->name);
else {
bytes = read_block(fd, sizeof(sBlk.s), NULL, 0, buffer);
if(bytes == 0) {
ERROR("Failed to read compressor options\n");
return;
}
compressor_display_options(comp, buffer, bytes);
}
}
printf("Block size %d\n", sBlk.s.block_size);
printf("Filesystem is %sexportable via NFS\n",
SQUASHFS_EXPORTABLE(sBlk.s.flags) ? "" : "not ");
printf("Inodes are %scompressed\n",
SQUASHFS_UNCOMPRESSED_INODES(sBlk.s.flags) ? "un" : "");
printf("Data is %scompressed\n",
SQUASHFS_UNCOMPRESSED_DATA(sBlk.s.flags) ? "un" : "");
printf("Uids/Gids (Id table) are %scompressed\n",
SQUASHFS_UNCOMPRESSED_INODES(sBlk.s.flags) ||
SQUASHFS_UNCOMPRESSED_IDS(sBlk.s.flags) ? "un" : "");
if(SQUASHFS_NO_FRAGMENTS(sBlk.s.flags))
printf("Fragments are not stored\n");
else {
printf("Fragments are %scompressed\n",
SQUASHFS_UNCOMPRESSED_FRAGMENTS(sBlk.s.flags) ?
"un" : "");
printf("Always-use-fragments option is %sspecified\n",
SQUASHFS_ALWAYS_FRAGMENTS(sBlk.s.flags) ? "" : "not ");
}
if(SQUASHFS_NO_XATTRS(sBlk.s.flags))
printf("Xattrs are not stored\n");
else
printf("Xattrs are %scompressed\n",
SQUASHFS_UNCOMPRESSED_XATTRS(sBlk.s.flags) ? "un" : "");
printf("Duplicates are %sremoved\n", SQUASHFS_DUPLICATES(sBlk.s.flags)
? "" : "not ");
printf("Number of fragments %u\n", sBlk.s.fragments);
printf("Number of inodes %u\n", sBlk.s.inodes);
printf("Number of ids %d\n", sBlk.s.no_ids);
if(!SQUASHFS_NO_XATTRS(sBlk.s.flags))
printf("Number of xattr ids %lld\n", xattr_ids);
TRACE("sBlk.s.inode_table_start 0x%llx\n", sBlk.s.inode_table_start);
TRACE("sBlk.s.directory_table_start 0x%llx\n", sBlk.s.directory_table_start);
TRACE("sBlk.s.fragment_table_start 0x%llx\n", sBlk.s.fragment_table_start);
TRACE("sBlk.s.lookup_table_start 0x%llx\n", sBlk.s.lookup_table_start);
TRACE("sBlk.s.id_table_start 0x%llx\n", sBlk.s.id_table_start);
TRACE("sBlk.s.xattr_id_table_start 0x%llx\n", sBlk.s.xattr_id_table_start);
}
| 0
|
225,087
|
Status CheckOpDeprecation(const OpDef& op_def, int graph_def_version) {
if (op_def.has_deprecation()) {
const OpDeprecation& dep = op_def.deprecation();
if (graph_def_version >= dep.version()) {
return errors::Unimplemented(
"Op ", op_def.name(), " is not available in GraphDef version ",
graph_def_version, ". It has been removed in version ", dep.version(),
". ", dep.explanation(), ".");
} else {
// Warn only once for each op name, and do it in a threadsafe manner.
static mutex mu(LINKER_INITIALIZED);
static std::unordered_set<string> warned;
bool warn;
{
mutex_lock lock(mu);
warn = warned.insert(op_def.name()).second;
}
if (warn) {
LOG(WARNING) << "Op " << op_def.name() << " is deprecated."
<< " It will cease to work in GraphDef version "
<< dep.version() << ". " << dep.explanation() << ".";
}
}
}
return Status::OK();
}
| 0
|
289,266
|
static int _snd_pcm_hw_param_setinteger(struct snd_pcm_hw_params *params,
snd_pcm_hw_param_t var)
{
int changed;
changed = snd_interval_setinteger(hw_param_interval(params, var));
if (changed > 0) {
params->cmask |= 1 << var;
params->rmask |= 1 << var;
}
return changed;
}
| 0
|
294,655
|
dt_lite_jisx0301(int argc, VALUE *argv, VALUE self)
{
long n = 0;
rb_check_arity(argc, 0, 1);
if (argc >= 1)
n = NUM2LONG(argv[0]);
return rb_str_append(d_lite_jisx0301(self),
iso8601_timediv(self, n));
}
| 0
|
232,319
|
static GF_Err gf_isom_full_box_read(GF_Box *ptr, GF_BitStream *bs)
{
if (ptr->registry->max_version_plus_one) {
GF_FullBox *self = (GF_FullBox *) ptr;
ISOM_DECREASE_SIZE(ptr, 4)
self->version = gf_bs_read_u8(bs);
self->flags = gf_bs_read_u24(bs);
}
return GF_OK;
| 0
|
369,109
|
static int io_async_cancel_prep(struct io_kiocb *req,
const struct io_uring_sqe *sqe)
{
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
return -EINVAL;
if (sqe->ioprio || sqe->off || sqe->len || sqe->cancel_flags ||
sqe->splice_fd_in)
return -EINVAL;
req->cancel.addr = READ_ONCE(sqe->addr);
return 0;
| 0
|
216,126
|
kssl_keytab_is_available(KSSL_CTX *kssl_ctx)
{
krb5_context krb5context = NULL;
krb5_keytab krb5keytab = NULL;
krb5_keytab_entry entry;
krb5_principal princ = NULL;
krb5_error_code krb5rc = KRB5KRB_ERR_GENERIC;
int rc = 0;
if ((krb5rc = krb5_init_context(&krb5context)))
return(0);
/* kssl_ctx->keytab_file == NULL ==> use Kerberos default
*/
if (kssl_ctx->keytab_file)
{
krb5rc = krb5_kt_resolve(krb5context, kssl_ctx->keytab_file,
&krb5keytab);
if (krb5rc)
goto exit;
}
else
{
krb5rc = krb5_kt_default(krb5context,&krb5keytab);
if (krb5rc)
goto exit;
}
/* the host key we are looking for */
krb5rc = krb5_sname_to_principal(krb5context, NULL,
kssl_ctx->service_name ? kssl_ctx->service_name: KRB5SVC,
KRB5_NT_SRV_HST, &princ);
krb5rc = krb5_kt_get_entry(krb5context, krb5keytab,
princ,
0 /* IGNORE_VNO */,
0 /* IGNORE_ENCTYPE */,
&entry);
if ( krb5rc == KRB5_KT_NOTFOUND ) {
rc = 1;
goto exit;
} else if ( krb5rc )
goto exit;
krb5_kt_free_entry(krb5context, &entry);
rc = 1;
exit:
if (krb5keytab) krb5_kt_close(krb5context, krb5keytab);
if (princ) krb5_free_principal(krb5context, princ);
if (krb5context) krb5_free_context(krb5context);
return(rc);
}
| 1
|
293,550
|
PJ_DEF(void) pj_cis_add_alpha(pj_cis_t *cis)
{
pj_cis_add_range( cis, 'a', 'z'+1);
pj_cis_add_range( cis, 'A', 'Z'+1);
}
| 0
|
309,916
|
lookup_user_capability(const char *name)
{
struct user_table_entry const *result = 0;
if (*name != 'k') {
result = _nc_find_user_entry(name);
}
return result;
}
| 0
|
231,015
|
BaseType_t xQueueRemoveFromSet( QueueSetMemberHandle_t xQueueOrSemaphore,
QueueSetHandle_t xQueueSet )
{
BaseType_t xReturn;
Queue_t * const pxQueueOrSemaphore = ( Queue_t * ) xQueueOrSemaphore;
if( pxQueueOrSemaphore->pxQueueSetContainer != xQueueSet )
{
/* The queue was not a member of the set. */
xReturn = pdFAIL;
}
else if( pxQueueOrSemaphore->uxMessagesWaiting != ( UBaseType_t ) 0 )
{
/* It is dangerous to remove a queue from a set when the queue is
* not empty because the queue set will still hold pending events for
* the queue. */
xReturn = pdFAIL;
}
else
{
taskENTER_CRITICAL();
{
/* The queue is no longer contained in the set. */
pxQueueOrSemaphore->pxQueueSetContainer = NULL;
}
taskEXIT_CRITICAL();
xReturn = pdPASS;
}
return xReturn;
} /*lint !e818 xQueueSet could not be declared as pointing to const as it is a typedef. */
| 0
|
234,199
|
init_dwarf_regnames_riscv (void)
{
dwarf_regnames = NULL;
dwarf_regnames_count = 8192;
dwarf_regnames_lookup_func = regname_internal_riscv;
}
| 0
|
225,768
|
GF_Err mhap_box_write(GF_Box *s, GF_BitStream *bs)
{
u32 i;
GF_Err e;
GF_MHACompatibleProfilesBox *ptr = (GF_MHACompatibleProfilesBox *) s;
e = gf_isom_box_write_header(s, bs);
if (e) return e;
gf_bs_write_u8(bs, ptr->num_profiles);
for (i=0; i<ptr->num_profiles; i++) {
gf_bs_write_u8(bs, ptr->compat_profiles[i]);
}
return GF_OK;
| 0
|
259,715
|
static json_t * check_attestation_android_safetynet(json_t * j_params, cbor_item_t * auth_data, cbor_item_t * att_stmt, const unsigned char * client_data) {
json_t * j_error = json_array(), * j_return;
unsigned char pubkey_export[1024] = {0}, cert_export[32] = {0}, cert_export_b64[64], client_data_hash[32], * nonce_base = NULL, nonce_base_hash[32], * nonce_base_hash_b64 = NULL, * header_cert_decoded = NULL;
char * message = NULL, * response_token = NULL, issued_to[128] = {0}, * jwt_header = NULL;
size_t pubkey_export_len = 1024, cert_export_len = 32, cert_export_b64_len, issued_to_len = 128, client_data_hash_len = 32, nonce_base_hash_len = 32, nonce_base_hash_b64_len = 0, header_cert_decoded_len = 0;
gnutls_pubkey_t pubkey = NULL;
gnutls_x509_crt_t cert = NULL;
cbor_item_t * key, * response = NULL;
int i, ret;
jwt_t * j_response = NULL;
json_t * j_header_x5c = NULL, * j_cert = NULL, * j_header = NULL, * j_value = NULL;
gnutls_datum_t cert_dat;
int has_ver = 0;
if (j_error != NULL) {
do {
// Step 1
if (!cbor_isa_map(att_stmt) || cbor_map_size(att_stmt) != 2) {
json_array_append_new(j_error, json_string("CBOR map value 'attStmt' invalid format"));
break;
}
for (i=0; i<2; i++) {
key = cbor_map_handle(att_stmt)[i].key;
if (cbor_isa_string(key)) {
if (0 == o_strncmp((const char *)cbor_string_handle(key), "ver", MIN(o_strlen("ver"), cbor_string_length(key))) && cbor_isa_string(cbor_map_handle(att_stmt)[i].value)) {
has_ver = 1;
} else if (0 == o_strncmp((const char *)cbor_string_handle(key), "response", MIN(o_strlen("response"), cbor_string_length(key))) && cbor_isa_bytestring(cbor_map_handle(att_stmt)[i].value)) {
response = cbor_map_handle(att_stmt)[i].value;
} else {
message = msprintf("attStmt map element %d key is not valid: '%.*s'", i, cbor_string_length(key), cbor_string_handle(key));
json_array_append_new(j_error, json_string(message));
o_free(message);
break;
}
} else {
message = msprintf("attStmt map element %d key is not a string", i);
json_array_append_new(j_error, json_string(message));
o_free(message);
break;
}
}
if (!has_ver) {
json_array_append_new(j_error, json_string("version invalid"));
y_log_message(Y_LOG_LEVEL_DEBUG, "check_attestation_android_safetynet - Error ver missing");
break;
}
if (!generate_digest_raw(digest_SHA256, client_data, o_strlen((char *)client_data), client_data_hash, &client_data_hash_len)) {
json_array_append_new(j_error, json_string("Internal error"));
y_log_message(Y_LOG_LEVEL_ERROR, "check_attestation_android_safetynet - Error generate_digest_raw client_data");
break;
}
if ((nonce_base = o_malloc(32 + cbor_bytestring_length(auth_data))) == NULL) {
json_array_append_new(j_error, json_string("Internal error"));
y_log_message(Y_LOG_LEVEL_ERROR, "check_attestation_android_safetynet - Error allocating resources for nonce_base");
break;
}
memcpy(nonce_base, cbor_bytestring_handle(auth_data), cbor_bytestring_length(auth_data));
memcpy(nonce_base+cbor_bytestring_length(auth_data), client_data_hash, client_data_hash_len);
if (!generate_digest_raw(digest_SHA256, nonce_base, 32 + cbor_bytestring_length(auth_data), nonce_base_hash, &nonce_base_hash_len)) {
json_array_append_new(j_error, json_string("Internal error"));
y_log_message(Y_LOG_LEVEL_ERROR, "check_attestation_android_safetynet - Error generate_digest_raw nonce_base");
break;
}
if ((nonce_base_hash_b64 = o_malloc(64)) == NULL) {
json_array_append_new(j_error, json_string("Internal error"));
y_log_message(Y_LOG_LEVEL_ERROR, "check_attestation_android_safetynet - Error allocating resources for nonce_base_hash_b64");
break;
}
if (!o_base64_encode(nonce_base_hash, 32, nonce_base_hash_b64, &nonce_base_hash_b64_len)) {
json_array_append_new(j_error, json_string("Internal error"));
y_log_message(Y_LOG_LEVEL_DEBUG, "check_attestation_android_safetynet - Error o_base64_encode for nonce_base_hash_b64");
break;
}
if (response == NULL) {
json_array_append_new(j_error, json_string("response invalid"));
y_log_message(Y_LOG_LEVEL_DEBUG, "check_attestation_android_safetynet - Error response missing");
break;
}
if ((response_token = o_strndup((const char *)cbor_bytestring_handle(response), cbor_bytestring_length(response))) == NULL) {
json_array_append_new(j_error, json_string("Internal error"));
y_log_message(Y_LOG_LEVEL_ERROR, "check_attestation_android_safetynet - Error o_strndup for response_token");
break;
}
if (r_jwt_init(&j_response) != RHN_OK) {
json_array_append_new(j_error, json_string("Internal error"));
y_log_message(Y_LOG_LEVEL_DEBUG, "check_attestation_android_safetynet - Error r_jwt_init");
break;
}
if (r_jwt_parse(j_response, response_token, 0) != RHN_OK) {
json_array_append_new(j_error, json_string("response invalid"));
y_log_message(Y_LOG_LEVEL_DEBUG, "check_attestation_android_safetynet - Error r_jwt_parse");
break;
}
if (o_strcmp(r_jwt_get_claim_str_value(j_response, "nonce"), (const char *)nonce_base_hash_b64)) {
json_array_append_new(j_error, json_string("response invalid"));
y_log_message(Y_LOG_LEVEL_DEBUG, "check_attestation_android_safetynet - Error nonce invalid");
break;
}
if (json_integer_value(json_object_get(j_params, "ctsProfileMatch")) != -1 && json_integer_value(json_object_get(j_params, "ctsProfileMatch")) != ((j_value = r_jwt_get_claim_json_t_value(j_response, "ctsProfileMatch"))==json_true()?1:0)) {
json_array_append_new(j_error, json_string("response invalid"));
y_log_message(Y_LOG_LEVEL_DEBUG, "check_attestation_android_safetynet - Error ctsProfileMatch invalid");
json_decref(j_value);
j_value = NULL;
break;
}
json_decref(j_value);
j_value = NULL;
if (json_integer_value(json_object_get(j_params, "basicIntegrity")) != -1 && json_integer_value(json_object_get(j_params, "basicIntegrity")) != ((j_value = r_jwt_get_claim_json_t_value(j_response, "basicIntegrity"))==json_true()?1:0)) {
json_array_append_new(j_error, json_string("response invalid"));
y_log_message(Y_LOG_LEVEL_DEBUG, "check_attestation_android_safetynet - Error basicIntegrity invalid");
j_value = NULL;
break;
}
json_decref(j_value);
j_value = NULL;
if (r_jwt_verify_signature(j_response, NULL, 0) != RHN_OK) {
json_array_append_new(j_error, json_string("Invalid signature"));
y_log_message(Y_LOG_LEVEL_DEBUG, "check_attestation_android_safetynet - Error r_jwt_verify_signature");
break;
}
if ((j_header_x5c = r_jwt_get_header_json_t_value(j_response, "x5c")) == NULL) {
json_array_append_new(j_error, json_string("response invalid"));
y_log_message(Y_LOG_LEVEL_DEBUG, "check_attestation_android_safetynet - Error parsing x5c JSON");
break;
}
if (!json_is_string((j_cert = json_array_get(j_header_x5c, 0)))) {
json_array_append_new(j_error, json_string("response invalid"));
y_log_message(Y_LOG_LEVEL_DEBUG, "check_attestation_android_safetynet - Error x5c leaf not a string");
break;
}
if ((header_cert_decoded = o_malloc(json_string_length(j_cert))) == NULL) {
y_log_message(Y_LOG_LEVEL_ERROR, "check_attestation_android_safetynet - Error allocating resources for header_cert_decoded");
break;
}
if (!o_base64_decode((const unsigned char *)json_string_value(j_cert), json_string_length(j_cert), header_cert_decoded, &header_cert_decoded_len)) {
json_array_append_new(j_error, json_string("response invalid"));
y_log_message(Y_LOG_LEVEL_DEBUG, "check_attestation_android_safetynet - Error o_base64_decode x5c leaf");
break;
}
if (gnutls_x509_crt_init(&cert)) {
json_array_append_new(j_error, json_string("internal error"));
y_log_message(Y_LOG_LEVEL_ERROR, "check_attestation_android_safetynet - Error gnutls_x509_crt_init");
break;
}
if (gnutls_pubkey_init(&pubkey)) {
json_array_append_new(j_error, json_string("internal error"));
y_log_message(Y_LOG_LEVEL_ERROR, "check_attestation_android_safetynet - Error gnutls_pubkey_init");
break;
}
cert_dat.data = header_cert_decoded;
cert_dat.size = header_cert_decoded_len;
if ((ret = gnutls_x509_crt_import(cert, &cert_dat, GNUTLS_X509_FMT_DER)) < 0) {
json_array_append_new(j_error, json_string("Error importing x509 certificate"));
y_log_message(Y_LOG_LEVEL_DEBUG, "check_attestation_android_safetynet - Error gnutls_pcert_import_x509_raw: %d", ret);
break;
}
if ((ret = gnutls_pubkey_import_x509(pubkey, cert, 0)) < 0) {
json_array_append_new(j_error, json_string("Error importing x509 certificate"));
y_log_message(Y_LOG_LEVEL_DEBUG, "check_attestation_android_safetynet - Error gnutls_pubkey_import_x509: %d", ret);
break;
}
if ((ret = gnutls_x509_crt_get_key_id(cert, GNUTLS_KEYID_USE_SHA256, cert_export, &cert_export_len)) < 0) {
json_array_append_new(j_error, json_string("Error exporting x509 certificate"));
y_log_message(Y_LOG_LEVEL_DEBUG, "check_attestation_android_safetynet - Error gnutls_x509_crt_get_key_id: %d", ret);
break;
}
if ((ret = gnutls_x509_crt_get_dn(cert, issued_to, &issued_to_len)) < 0) {
json_array_append_new(j_error, json_string("Error x509 dn"));
y_log_message(Y_LOG_LEVEL_DEBUG, "check_attestation_android_safetynet - Error gnutls_x509_crt_get_dn: %d", ret);
break;
}
if (o_strnstr(issued_to, SAFETYNET_ISSUED_TO, issued_to_len) == NULL) {
json_array_append_new(j_error, json_string("Error x509 dn"));
y_log_message(Y_LOG_LEVEL_DEBUG, "check_attestation_android_safetynet - safetynet certificate issued for %.*s", issued_to_len, issued_to);
break;
}
if (json_object_get(j_params, "google-root-ca-r2") != json_null()) {
if ((ret = validate_safetynet_ca_root(j_params, cert, j_header_x5c)) == G_ERROR_UNAUTHORIZED) {
json_array_append_new(j_error, json_string("Error x509 certificate chain validation"));
break;
} else if (ret != G_OK) {
json_array_append_new(j_error, json_string("response invalid"));
y_log_message(Y_LOG_LEVEL_DEBUG, "check_attestation_android_safetynet - safetynet certificate chain certificate validation error");
break;
}
}
if (!o_base64_encode(cert_export, cert_export_len, cert_export_b64, &cert_export_b64_len)) {
json_array_append_new(j_error, json_string("response invalid"));
y_log_message(Y_LOG_LEVEL_DEBUG, "check_attestation_android_safetynet - Error o_base64_encode cert_export");
break;
}
if ((ret = gnutls_pubkey_export(pubkey, GNUTLS_X509_FMT_PEM, pubkey_export, &pubkey_export_len)) < 0) {
json_array_append_new(j_error, json_string("response invalid"));
y_log_message(Y_LOG_LEVEL_DEBUG, "check_attestation_android_safetynet - Error gnutls_pubkey_export: %d", ret);
break;
}
} while (0);
if (json_array_size(j_error)) {
j_return = json_pack("{sisO}", "result", G_ERROR_PARAM, "error", j_error);
} else {
j_return = json_pack("{sis{ss%}}", "result", G_OK, "data", "certificate", cert_export_b64, cert_export_b64_len);
}
json_decref(j_error);
json_decref(j_header);
json_decref(j_header_x5c);
gnutls_pubkey_deinit(pubkey);
gnutls_x509_crt_deinit(cert);
r_jwt_free(j_response);
o_free(nonce_base);
o_free(nonce_base_hash_b64);
o_free(response_token);
o_free(header_cert_decoded);
o_free(jwt_header);
} else {
y_log_message(Y_LOG_LEVEL_ERROR, "check_attestation_android_safetynet - Error allocating resources for j_error");
j_return = json_pack("{si}", "result", G_ERROR);
}
return j_return;
}
| 0
|
318,973
|
test_gui_tabmenu_event(dict_T *args UNUSED)
{
# ifdef FEAT_GUI_TABLINE
int tabnr;
int item;
if (dict_find(args, (char_u *)"tabnr", -1) == NULL
|| dict_find(args, (char_u *)"item", -1) == NULL)
return FALSE;
tabnr = (int)dict_get_number(args, (char_u *)"tabnr");
item = (int)dict_get_number(args, (char_u *)"item");
send_tabline_menu_event(tabnr, item);
# endif
return TRUE;
}
| 0
|
139,231
|
bool OverlayWindowViews::IsActive() const {
return views::Widget::IsActive();
}
| 0
|
234,153
|
process_cu_tu_index (struct dwarf_section *section, int do_display)
{
unsigned char *phdr = section->start;
unsigned char *limit = phdr + section->size;
unsigned char *phash;
unsigned char *pindex;
unsigned char *ppool;
unsigned int version;
unsigned int ncols = 0;
unsigned int nused;
unsigned int nslots;
unsigned int i;
unsigned int j;
dwarf_vma signature;
size_t total;
/* PR 17512: file: 002-168123-0.004. */
if (phdr == NULL)
{
warn (_("Section %s is empty\n"), section->name);
return 0;
}
/* PR 17512: file: 002-376-0.004. */
if (section->size < 24)
{
warn (_("Section %s is too small to contain a CU/TU header\n"),
section->name);
return 0;
}
phash = phdr;
SAFE_BYTE_GET_AND_INC (version, phash, 4, limit);
if (version >= 2)
SAFE_BYTE_GET_AND_INC (ncols, phash, 4, limit);
SAFE_BYTE_GET_AND_INC (nused, phash, 4, limit);
SAFE_BYTE_GET_AND_INC (nslots, phash, 4, limit);
pindex = phash + (size_t) nslots * 8;
ppool = pindex + (size_t) nslots * 4;
if (do_display)
{
introduce (section, false);
printf (_(" Version: %u\n"), version);
if (version >= 2)
printf (_(" Number of columns: %u\n"), ncols);
printf (_(" Number of used entries: %u\n"), nused);
printf (_(" Number of slots: %u\n\n"), nslots);
}
/* PR 17531: file: 45d69832. */
if (_mul_overflow ((size_t) nslots, 12, &total)
|| total > (size_t) (limit - phash))
{
warn (ngettext ("Section %s is too small for %u slot\n",
"Section %s is too small for %u slots\n",
nslots),
section->name, nslots);
return 0;
}
if (version == 1)
{
if (!do_display)
prealloc_cu_tu_list ((limit - ppool) / 4);
for (i = 0; i < nslots; i++)
{
unsigned char *shndx_list;
unsigned int shndx;
SAFE_BYTE_GET (signature, phash, 8, limit);
if (signature != 0)
{
SAFE_BYTE_GET (j, pindex, 4, limit);
shndx_list = ppool + j * 4;
/* PR 17531: file: 705e010d. */
if (shndx_list < ppool)
{
warn (_("Section index pool located before start of section\n"));
return 0;
}
if (do_display)
printf (_(" [%3d] Signature: 0x%s Sections: "),
i, dwarf_vmatoa ("x", signature));
for (;;)
{
if (shndx_list >= limit)
{
warn (_("Section %s too small for shndx pool\n"),
section->name);
return 0;
}
SAFE_BYTE_GET (shndx, shndx_list, 4, limit);
if (shndx == 0)
break;
if (do_display)
printf (" %d", shndx);
else
add_shndx_to_cu_tu_entry (shndx);
shndx_list += 4;
}
if (do_display)
printf ("\n");
else
end_cu_tu_entry ();
}
phash += 8;
pindex += 4;
}
}
else if (version == 2)
{
unsigned int val;
unsigned int dw_sect;
unsigned char *ph = phash;
unsigned char *pi = pindex;
unsigned char *poffsets = ppool + (size_t) ncols * 4;
unsigned char *psizes = poffsets + (size_t) nused * ncols * 4;
bool is_tu_index;
struct cu_tu_set *this_set = NULL;
unsigned int row;
unsigned char *prow;
size_t temp;
is_tu_index = strcmp (section->name, ".debug_tu_index") == 0;
/* PR 17531: file: 0dd159bf.
Check for integer overflow (can occur when size_t is 32-bit)
with overlarge ncols or nused values. */
if (nused == -1u
|| _mul_overflow ((size_t) ncols, 4, &temp)
|| _mul_overflow ((size_t) nused + 1, temp, &total)
|| total > (size_t) (limit - ppool))
{
warn (_("Section %s too small for offset and size tables\n"),
section->name);
return 0;
}
if (do_display)
{
printf (_(" Offset table\n"));
printf (" slot %-16s ",
is_tu_index ? _("signature") : _("dwo_id"));
}
else
{
if (is_tu_index)
{
tu_count = nused;
tu_sets = xcalloc2 (nused, sizeof (struct cu_tu_set));
this_set = tu_sets;
}
else
{
cu_count = nused;
cu_sets = xcalloc2 (nused, sizeof (struct cu_tu_set));
this_set = cu_sets;
}
}
if (do_display)
{
for (j = 0; j < ncols; j++)
{
unsigned char *p = ppool + j * 4;
SAFE_BYTE_GET (dw_sect, p, 4, limit);
printf (" %8s", get_DW_SECT_short_name (dw_sect));
}
printf ("\n");
}
for (i = 0; i < nslots; i++)
{
SAFE_BYTE_GET (signature, ph, 8, limit);
SAFE_BYTE_GET (row, pi, 4, limit);
if (row != 0)
{
/* PR 17531: file: a05f6ab3. */
if (row > nused)
{
warn (_("Row index (%u) is larger than number of used entries (%u)\n"),
row, nused);
return 0;
}
if (!do_display)
{
size_t num_copy = sizeof (uint64_t);
memcpy (&this_set[row - 1].signature, ph, num_copy);
}
prow = poffsets + (row - 1) * ncols * 4;
if (do_display)
printf (_(" [%3d] 0x%s"),
i, dwarf_vmatoa ("x", signature));
for (j = 0; j < ncols; j++)
{
unsigned char *p = prow + j * 4;
SAFE_BYTE_GET (val, p, 4, limit);
if (do_display)
printf (" %8d", val);
else
{
p = ppool + j * 4;
SAFE_BYTE_GET (dw_sect, p, 4, limit);
/* PR 17531: file: 10796eb3. */
if (dw_sect >= DW_SECT_MAX)
warn (_("Overlarge Dwarf section index detected: %u\n"), dw_sect);
else
this_set [row - 1].section_offsets [dw_sect] = val;
}
}
if (do_display)
printf ("\n");
}
ph += 8;
pi += 4;
}
ph = phash;
pi = pindex;
if (do_display)
{
printf ("\n");
printf (_(" Size table\n"));
printf (" slot %-16s ",
is_tu_index ? _("signature") : _("dwo_id"));
}
for (j = 0; j < ncols; j++)
{
unsigned char *p = ppool + j * 4;
SAFE_BYTE_GET (val, p, 4, limit);
if (do_display)
printf (" %8s", get_DW_SECT_short_name (val));
}
if (do_display)
printf ("\n");
for (i = 0; i < nslots; i++)
{
SAFE_BYTE_GET (signature, ph, 8, limit);
SAFE_BYTE_GET (row, pi, 4, limit);
if (row != 0)
{
prow = psizes + (row - 1) * ncols * 4;
if (do_display)
printf (_(" [%3d] 0x%s"),
i, dwarf_vmatoa ("x", signature));
for (j = 0; j < ncols; j++)
{
unsigned char *p = prow + j * 4;
/* PR 28645: Check for overflow. Since we do not know how
many populated rows there will be, we cannot just
perform a single check at the start of this function. */
if (p > (limit - 4))
{
if (do_display)
printf ("\n");
warn (_("Too many rows/columns in DWARF index section %s\n"),
section->name);
return 0;
}
SAFE_BYTE_GET (val, p, 4, limit);
if (do_display)
printf (" %8d", val);
else
{
p = ppool + j * 4;
SAFE_BYTE_GET (dw_sect, p, 4, limit);
if (dw_sect >= DW_SECT_MAX)
warn (_("Overlarge Dwarf section index detected: %u\n"), dw_sect);
else
this_set [row - 1].section_sizes [dw_sect] = val;
}
}
if (do_display)
printf ("\n");
}
ph += 8;
pi += 4;
}
}
else if (do_display)
printf (_(" Unsupported version (%d)\n"), version);
if (do_display)
printf ("\n");
return 1;
}
| 0
|
508,803
|
void LEX::free_arena_for_set_stmt()
{
DBUG_ENTER("LEX::free_arena_for_set_stmt");
if (!arena_for_set_stmt)
return;
DBUG_PRINT("info", ("mem_root: %p arena: %p",
arena_for_set_stmt->mem_root,
arena_for_set_stmt));
arena_for_set_stmt->free_items();
delete(arena_for_set_stmt);
free_root(mem_root_for_set_stmt, MYF(MY_KEEP_PREALLOC));
arena_for_set_stmt= 0;
DBUG_VOID_RETURN;
}
| 0
|
220,857
|
inline int SubscriptToIndex(const NdArrayDesc<8>& desc, int indexes[8]) {
return indexes[0] * desc.strides[0] + indexes[1] * desc.strides[1] +
indexes[2] * desc.strides[2] + indexes[3] * desc.strides[3] +
indexes[4] * desc.strides[4] + indexes[5] * desc.strides[5] +
indexes[6] * desc.strides[6] + indexes[7] * desc.strides[7];
}
| 0
|
430,394
|
void ovs_match_init(struct sw_flow_match *match,
struct sw_flow_key *key,
bool reset_key,
struct sw_flow_mask *mask)
{
memset(match, 0, sizeof(*match));
match->key = key;
match->mask = mask;
if (reset_key)
memset(key, 0, sizeof(*key));
if (mask) {
memset(&mask->key, 0, sizeof(mask->key));
mask->range.start = mask->range.end = 0;
}
}
| 0
|
512,242
|
Item_cache_str(THD *thd, const Item *item):
Item_cache(thd, item->type_handler()), value(0),
is_varbinary(item->type() == FIELD_ITEM &&
Item_cache_str::field_type() == MYSQL_TYPE_VARCHAR &&
!((const Item_field *) item)->field->has_charset())
{
collation.set(const_cast<DTCollation&>(item->collation));
}
| 0
|
278,266
|
tabstop_set(char_u *var, int **array)
{
int valcount = 1;
int t;
char_u *cp;
if (var[0] == NUL || (var[0] == '0' && var[1] == NUL))
{
*array = NULL;
return OK;
}
for (cp = var; *cp != NUL; ++cp)
{
if (cp == var || cp[-1] == ',')
{
char_u *end;
if (strtol((char *)cp, (char **)&end, 10) <= 0)
{
if (cp != end)
emsg(_(e_argument_must_be_positive));
else
semsg(_(e_invalid_argument_str), cp);
return FAIL;
}
}
if (VIM_ISDIGIT(*cp))
continue;
if (cp[0] == ',' && cp > var && cp[-1] != ',' && cp[1] != NUL)
{
++valcount;
continue;
}
semsg(_(e_invalid_argument_str), var);
return FAIL;
}
*array = ALLOC_MULT(int, valcount + 1);
if (*array == NULL)
return FAIL;
(*array)[0] = valcount;
t = 1;
for (cp = var; *cp != NUL;)
{
int n = atoi((char *)cp);
// Catch negative values, overflow and ridiculous big values.
if (n <= 0 || n > TABSTOP_MAX)
{
semsg(_(e_invalid_argument_str), cp);
vim_free(*array);
*array = NULL;
return FAIL;
}
(*array)[t++] = n;
while (*cp != NUL && *cp != ',')
++cp;
if (*cp != NUL)
++cp;
}
return OK;
}
| 0
|
282,871
|
int rsi_send_bgscan_probe_req(struct rsi_common *common,
struct ieee80211_vif *vif)
{
struct cfg80211_scan_request *scan_req = common->hwscan;
struct rsi_bgscan_probe *bgscan;
struct sk_buff *skb;
struct sk_buff *probereq_skb;
u16 frame_len = sizeof(*bgscan);
size_t ssid_len = 0;
u8 *ssid = NULL;
rsi_dbg(MGMT_TX_ZONE,
"%s: Sending bgscan probe req frame\n", __func__);
if (common->priv->sc_nvifs <= 0)
return -ENODEV;
if (scan_req->n_ssids) {
ssid = scan_req->ssids[0].ssid;
ssid_len = scan_req->ssids[0].ssid_len;
}
skb = dev_alloc_skb(frame_len + MAX_BGSCAN_PROBE_REQ_LEN);
if (!skb)
return -ENOMEM;
memset(skb->data, 0, frame_len + MAX_BGSCAN_PROBE_REQ_LEN);
bgscan = (struct rsi_bgscan_probe *)skb->data;
bgscan->desc_dword0.frame_type = BG_SCAN_PROBE_REQ;
bgscan->flags = cpu_to_le16(HOST_BG_SCAN_TRIG);
if (common->band == NL80211_BAND_5GHZ) {
bgscan->mgmt_rate = cpu_to_le16(RSI_RATE_6);
bgscan->def_chan = cpu_to_le16(40);
} else {
bgscan->mgmt_rate = cpu_to_le16(RSI_RATE_1);
bgscan->def_chan = cpu_to_le16(11);
}
bgscan->channel_scan_time = cpu_to_le16(RSI_CHANNEL_SCAN_TIME);
probereq_skb = ieee80211_probereq_get(common->priv->hw, vif->addr, ssid,
ssid_len, scan_req->ie_len);
if (!probereq_skb) {
dev_kfree_skb(skb);
return -ENOMEM;
}
memcpy(&skb->data[frame_len], probereq_skb->data, probereq_skb->len);
bgscan->probe_req_length = cpu_to_le16(probereq_skb->len);
rsi_set_len_qno(&bgscan->desc_dword0.len_qno,
(frame_len - FRAME_DESC_SZ + probereq_skb->len),
RSI_WIFI_MGMT_Q);
skb_put(skb, frame_len + probereq_skb->len);
dev_kfree_skb(probereq_skb);
return rsi_send_internal_mgmt_frame(common, skb);
}
| 0
|
409,424
|
set_color_count(int nr)
{
char_u nr_colors[20]; // string for number of colors
t_colors = nr;
if (t_colors > 1)
sprintf((char *)nr_colors, "%d", t_colors);
else
*nr_colors = NUL;
set_string_option_direct((char_u *)"t_Co", -1, nr_colors, OPT_FREE, 0);
}
| 0
|
474,452
|
ObjectGetNameAlg(
OBJECT *object // IN: handle of the object
)
{
return object->publicArea.nameAlg;
}
| 0
|
231,533
|
send_fd (const int sock, const int fd)
{
struct msghdr msg = {0};
union
{
struct cmsghdr hdr;
char buf[CMSG_SPACE (sizeof (int))];
} cmsgbuf = {0};
struct cmsghdr *cmsg;
struct iovec vec;
char ch = 'A';
ssize_t n;
msg.msg_control = &cmsgbuf.buf;
msg.msg_controllen = sizeof (cmsgbuf.buf);
cmsg = CMSG_FIRSTHDR (&msg);
cmsg->cmsg_len = CMSG_LEN (sizeof (int));
cmsg->cmsg_level = SOL_SOCKET;
cmsg->cmsg_type = SCM_RIGHTS;
memcpy (CMSG_DATA (cmsg), &fd, sizeof (fd));
vec.iov_base = &ch;
vec.iov_len = 1;
msg.msg_iov = &vec;
msg.msg_iovlen = 1;
while ((n = sendmsg (sock, &msg, 0)) == -1 && errno == EINTR);
TEST_VERIFY_EXIT (n == 1);
}
| 0
|
275,972
|
int uECC_sign(const uint8_t *private_key,
const uint8_t *message_hash,
unsigned hash_size,
uint8_t *signature,
uECC_Curve curve) {
uECC_word_t k[uECC_MAX_WORDS];
uECC_word_t tries;
for (tries = 0; tries < uECC_RNG_MAX_TRIES; ++tries) {
if (!uECC_generate_random_int(k, curve->n, BITS_TO_WORDS(curve->num_n_bits))) {
return 0;
}
if (uECC_sign_with_k(private_key, message_hash, hash_size, k, signature, curve)) {
return 1;
}
}
return 0;
}
| 0
|
273,874
|
static void handle_NLST(ctrl_t *ctrl, char *arg)
{
list(ctrl, arg, 1);
}
| 0
|
512,788
|
longlong val_int()
{
return longlong_from_hex_hybrid(str_value.ptr(), str_value.length());
}
| 0
|
349,901
|
static int hw_atl_utils_soft_reset_rbl(struct aq_hw_s *self)
{
u32 gsr, val, rbl_status;
int k;
aq_hw_write_reg(self, 0x404, 0x40e1);
aq_hw_write_reg(self, 0x3a0, 0x1);
aq_hw_write_reg(self, 0x32a8, 0x0);
/* Alter RBL status */
aq_hw_write_reg(self, 0x388, 0xDEAD);
/* Cleanup SPI */
val = aq_hw_read_reg(self, 0x53C);
aq_hw_write_reg(self, 0x53C, val | 0x10);
/* Global software reset*/
hw_atl_rx_rx_reg_res_dis_set(self, 0U);
hw_atl_tx_tx_reg_res_dis_set(self, 0U);
aq_hw_write_reg_bit(self, HW_ATL_MAC_PHY_CONTROL,
BIT(HW_ATL_MAC_PHY_MPI_RESET_BIT),
HW_ATL_MAC_PHY_MPI_RESET_BIT, 0x0);
gsr = aq_hw_read_reg(self, HW_ATL_GLB_SOFT_RES_ADR);
aq_hw_write_reg(self, HW_ATL_GLB_SOFT_RES_ADR,
(gsr & 0xFFFFBFFF) | 0x8000);
if (FORCE_FLASHLESS)
aq_hw_write_reg(self, 0x534, 0x0);
aq_hw_write_reg(self, 0x404, 0x40e0);
/* Wait for RBL boot */
for (k = 0; k < 1000; k++) {
rbl_status = aq_hw_read_reg(self, 0x388) & 0xFFFF;
if (rbl_status && rbl_status != 0xDEAD)
break;
AQ_HW_SLEEP(10);
}
if (!rbl_status || rbl_status == 0xDEAD) {
aq_pr_err("RBL Restart failed");
return -EIO;
}
/* Restore NVR */
if (FORCE_FLASHLESS)
aq_hw_write_reg(self, 0x534, 0xA0);
if (rbl_status == 0xF1A7) {
aq_pr_err("No FW detected. Dynamic FW load not implemented\n");
return -EOPNOTSUPP;
}
for (k = 0; k < 1000; k++) {
u32 fw_state = aq_hw_read_reg(self, HW_ATL_MPI_FW_VERSION);
if (fw_state)
break;
AQ_HW_SLEEP(10);
}
if (k == 1000) {
aq_pr_err("FW kickstart failed\n");
return -EIO;
}
/* Old FW requires fixed delay after init */
AQ_HW_SLEEP(15);
return 0;
}
| 0
|
312,540
|
get_mef_name(void)
{
char_u *p;
char_u *name;
static int start = -1;
static int off = 0;
#ifdef HAVE_LSTAT
stat_T sb;
#endif
if (*p_mef == NUL)
{
name = vim_tempname('e', FALSE);
if (name == NULL)
emsg(_(e_cant_get_temp_file_name));
return name;
}
for (p = p_mef; *p; ++p)
if (p[0] == '#' && p[1] == '#')
break;
if (*p == NUL)
return vim_strsave(p_mef);
// Keep trying until the name doesn't exist yet.
for (;;)
{
if (start == -1)
start = mch_get_pid();
else
off += 19;
name = alloc_id(STRLEN(p_mef) + 30, aid_qf_mef_name);
if (name == NULL)
break;
STRCPY(name, p_mef);
sprintf((char *)name + (p - p_mef), "%d%d", start, off);
STRCAT(name, p + 2);
if (mch_getperm(name) < 0
#ifdef HAVE_LSTAT
// Don't accept a symbolic link, it's a security risk.
&& mch_lstat((char *)name, &sb) < 0
#endif
)
break;
vim_free(name);
}
return name;
}
| 0
|
238,531
|
static void scalar32_min_max_sub(struct bpf_reg_state *dst_reg,
struct bpf_reg_state *src_reg)
{
s32 smin_val = src_reg->s32_min_value;
s32 smax_val = src_reg->s32_max_value;
u32 umin_val = src_reg->u32_min_value;
u32 umax_val = src_reg->u32_max_value;
if (signed_sub32_overflows(dst_reg->s32_min_value, smax_val) ||
signed_sub32_overflows(dst_reg->s32_max_value, smin_val)) {
/* Overflow possible, we know nothing */
dst_reg->s32_min_value = S32_MIN;
dst_reg->s32_max_value = S32_MAX;
} else {
dst_reg->s32_min_value -= smax_val;
dst_reg->s32_max_value -= smin_val;
}
if (dst_reg->u32_min_value < umax_val) {
/* Overflow possible, we know nothing */
dst_reg->u32_min_value = 0;
dst_reg->u32_max_value = U32_MAX;
} else {
/* Cannot overflow (as long as bounds are consistent) */
dst_reg->u32_min_value -= umax_val;
dst_reg->u32_max_value -= umin_val;
}
}
| 0
|
261,958
|
njs_string_base64(njs_vm_t *vm, njs_value_t *value, const njs_str_t *src)
{
size_t length;
njs_str_t dst;
length = njs_encode_base64_length(src, &dst.length);
if (njs_slow_path(dst.length == 0)) {
vm->retval = njs_string_empty;
return NJS_OK;
}
dst.start = njs_string_alloc(vm, value, dst.length, length);
if (njs_slow_path(dst.start == NULL)) {
return NJS_ERROR;
}
njs_encode_base64(&dst, src);
return NJS_OK;
}
| 0
|
393,479
|
static SQInteger base_setroottable(HSQUIRRELVM v)
{
SQObjectPtr o = v->_roottable;
if(SQ_FAILED(sq_setroottable(v))) return SQ_ERROR;
v->Push(o);
return 1;
}
| 0
|
294,665
|
date_s_test_all(VALUE klass)
{
if (date_s_test_civil(klass) == Qfalse)
return Qfalse;
if (date_s_test_ordinal(klass) == Qfalse)
return Qfalse;
if (date_s_test_commercial(klass) == Qfalse)
return Qfalse;
if (date_s_test_weeknum(klass) == Qfalse)
return Qfalse;
if (date_s_test_nth_kday(klass) == Qfalse)
return Qfalse;
if (date_s_test_unit_conv(klass) == Qfalse)
return Qfalse;
return Qtrue;
}
| 0
|
418,794
|
ins_mousescroll(int dir)
{
pos_T tpos;
win_T *old_curwin = curwin, *wp;
int did_scroll = FALSE;
tpos = curwin->w_cursor;
if (mouse_row >= 0 && mouse_col >= 0)
{
int row, col;
row = mouse_row;
col = mouse_col;
// find the window at the pointer coordinates
wp = mouse_find_win(&row, &col, FIND_POPUP);
if (wp == NULL)
return;
curwin = wp;
curbuf = curwin->w_buffer;
}
if (curwin == old_curwin)
undisplay_dollar();
// Don't scroll the window in which completion is being done.
if (!pum_visible() || curwin != old_curwin)
{
long step;
if (dir == MSCR_DOWN || dir == MSCR_UP)
{
if (mouse_vert_step < 0
|| mod_mask & (MOD_MASK_SHIFT | MOD_MASK_CTRL))
step = (long)(curwin->w_botline - curwin->w_topline);
else
step = mouse_vert_step;
scroll_redraw(dir, step);
# ifdef FEAT_PROP_POPUP
if (WIN_IS_POPUP(curwin))
popup_set_firstline(curwin);
# endif
}
#ifdef FEAT_GUI
else
{
int val;
if (mouse_hor_step < 0
|| mod_mask & (MOD_MASK_SHIFT | MOD_MASK_CTRL))
step = curwin->w_width;
else
step = mouse_hor_step;
val = curwin->w_leftcol + (dir == MSCR_RIGHT ? -step : step);
if (val < 0)
val = 0;
gui_do_horiz_scroll(val, TRUE);
}
#endif
did_scroll = TRUE;
may_trigger_winscrolled();
}
curwin->w_redr_status = TRUE;
curwin = old_curwin;
curbuf = curwin->w_buffer;
// The popup menu may overlay the window, need to redraw it.
// TODO: Would be more efficient to only redraw the windows that are
// overlapped by the popup menu.
if (pum_visible() && did_scroll)
{
redraw_all_later(UPD_NOT_VALID);
ins_compl_show_pum();
}
if (!EQUAL_POS(curwin->w_cursor, tpos))
{
start_arrow(&tpos);
set_can_cindent(TRUE);
}
}
| 0
|
273,411
|
Tensor UnalignedSlice(const Tensor& t, int pos) const {
Tensor res;
// CHECK should never fail here, since the number of elements must match
CHECK(res.CopyFrom(t.Slice(pos, pos + 1), {t.dim_size(1), t.dim_size(2)}));
return res;
}
| 0
|
359,287
|
zebra_route_char(u_int zroute)
{
return zroute_lookup(zroute)->chr;
}
| 0
|
291,770
|
static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
u32 imm_type, imm_payload;
bool w_inval = false;
int err;
if (wc->status != IB_WC_SUCCESS) {
if (wc->status != IB_WC_WR_FLUSH_ERR) {
rtrs_err(clt_path->clt, "RDMA failed: %s\n",
ib_wc_status_msg(wc->status));
rtrs_rdma_error_recovery(con);
}
return;
}
rtrs_clt_update_wc_stats(con);
switch (wc->opcode) {
case IB_WC_RECV_RDMA_WITH_IMM:
/*
* post_recv() RDMA write completions of IO reqs (read/write)
* and hb
*/
if (WARN_ON(wc->wr_cqe->done != rtrs_clt_rdma_done))
return;
rtrs_from_imm(be32_to_cpu(wc->ex.imm_data),
&imm_type, &imm_payload);
if (imm_type == RTRS_IO_RSP_IMM ||
imm_type == RTRS_IO_RSP_W_INV_IMM) {
u32 msg_id;
w_inval = (imm_type == RTRS_IO_RSP_W_INV_IMM);
rtrs_from_io_rsp_imm(imm_payload, &msg_id, &err);
process_io_rsp(clt_path, msg_id, err, w_inval);
} else if (imm_type == RTRS_HB_MSG_IMM) {
WARN_ON(con->c.cid);
rtrs_send_hb_ack(&clt_path->s);
if (clt_path->flags & RTRS_MSG_NEW_RKEY_F)
return rtrs_clt_recv_done(con, wc);
} else if (imm_type == RTRS_HB_ACK_IMM) {
WARN_ON(con->c.cid);
clt_path->s.hb_missed_cnt = 0;
clt_path->s.hb_cur_latency =
ktime_sub(ktime_get(), clt_path->s.hb_last_sent);
if (clt_path->flags & RTRS_MSG_NEW_RKEY_F)
return rtrs_clt_recv_done(con, wc);
} else {
rtrs_wrn(con->c.path, "Unknown IMM type %u\n",
imm_type);
}
if (w_inval)
/*
* Post x2 empty WRs: first is for this RDMA with IMM,
* second is for RECV with INV, which happened earlier.
*/
err = rtrs_post_recv_empty_x2(&con->c, &io_comp_cqe);
else
err = rtrs_post_recv_empty(&con->c, &io_comp_cqe);
if (err) {
rtrs_err(con->c.path, "rtrs_post_recv_empty(): %d\n",
err);
rtrs_rdma_error_recovery(con);
}
break;
case IB_WC_RECV:
/*
* Key invalidations from server side
*/
WARN_ON(!(wc->wc_flags & IB_WC_WITH_INVALIDATE ||
wc->wc_flags & IB_WC_WITH_IMM));
WARN_ON(wc->wr_cqe->done != rtrs_clt_rdma_done);
if (clt_path->flags & RTRS_MSG_NEW_RKEY_F) {
if (wc->wc_flags & IB_WC_WITH_INVALIDATE)
return rtrs_clt_recv_done(con, wc);
return rtrs_clt_rkey_rsp_done(con, wc);
}
break;
case IB_WC_RDMA_WRITE:
/*
* post_send() RDMA write completions of IO reqs (read/write)
* and hb.
*/
break;
default:
rtrs_wrn(clt_path->clt, "Unexpected WC type: %d\n", wc->opcode);
return;
}
}
| 0
|
383,375
|
gdImageColorExactAlpha (gdImagePtr im, int r, int g, int b, int a)
{
int i;
if (im->trueColor)
{
return gdTrueColorAlpha (r, g, b, a);
}
for (i = 0; (i < (im->colorsTotal)); i++)
{
if (im->open[i])
{
continue;
}
if ((im->red[i] == r) &&
(im->green[i] == g) &&
(im->blue[i] == b) &&
(im->alpha[i] == a))
{
return i;
}
}
return -1;
}
| 0
|
221,481
|
add_exports (GPtrArray *env_array,
const ExportData *exports,
gsize n_exports)
{
int i;
for (i = 0; i < n_exports; i++)
{
if (exports[i].val)
g_ptr_array_add (env_array, g_strdup_printf ("%s=%s", exports[i].env, exports[i].val));
}
}
| 0
|
294,494
|
valid_civil_p(VALUE y, int m, int d, double sg,
VALUE *nth, int *ry,
int *rm, int *rd, int *rjd,
int *ns)
{
double style = guess_style(y, sg);
int r;
if (style == 0) {
int jd;
r = c_valid_civil_p(FIX2INT(y), m, d, sg, rm, rd, &jd, ns);
if (!r)
return 0;
decode_jd(INT2FIX(jd), nth, rjd);
if (f_zero_p(*nth))
*ry = FIX2INT(y);
else {
VALUE nth2;
decode_year(y, *ns ? -1 : +1, &nth2, ry);
}
}
else {
decode_year(y, style, nth, ry);
if (style < 0)
r = c_valid_gregorian_p(*ry, m, d, rm, rd);
else
r = c_valid_julian_p(*ry, m, d, rm, rd);
if (!r)
return 0;
c_civil_to_jd(*ry, *rm, *rd, style, rjd, ns);
}
return r;
}
| 0
|
301,431
|
static ssize_t vfswrap_fgetxattr(struct vfs_handle_struct *handle, struct files_struct *fsp, const char *name, void *value, size_t size)
{
return fgetxattr(fsp->fh->fd, name, value, size);
}
| 0
|
486,832
|
static void gem_reset(DeviceState *d)
{
int i;
CadenceGEMState *s = CADENCE_GEM(d);
const uint8_t *a;
uint32_t queues_mask = 0;
DB_PRINT("\n");
/* Set post reset register values */
memset(&s->regs[0], 0, sizeof(s->regs));
s->regs[GEM_NWCFG] = 0x00080000;
s->regs[GEM_NWSTATUS] = 0x00000006;
s->regs[GEM_DMACFG] = 0x00020784;
s->regs[GEM_IMR] = 0x07ffffff;
s->regs[GEM_TXPAUSE] = 0x0000ffff;
s->regs[GEM_TXPARTIALSF] = 0x000003ff;
s->regs[GEM_RXPARTIALSF] = 0x000003ff;
s->regs[GEM_MODID] = s->revision;
s->regs[GEM_DESCONF] = 0x02D00111;
s->regs[GEM_DESCONF2] = 0x2ab10000 | s->jumbo_max_len;
s->regs[GEM_DESCONF5] = 0x002f2045;
s->regs[GEM_DESCONF6] = GEM_DESCONF6_64B_MASK;
s->regs[GEM_INT_Q1_MASK] = 0x00000CE6;
s->regs[GEM_JUMBO_MAX_LEN] = s->jumbo_max_len;
if (s->num_priority_queues > 1) {
queues_mask = MAKE_64BIT_MASK(1, s->num_priority_queues - 1);
s->regs[GEM_DESCONF6] |= queues_mask;
}
/* Set MAC address */
a = &s->conf.macaddr.a[0];
s->regs[GEM_SPADDR1LO] = a[0] | (a[1] << 8) | (a[2] << 16) | (a[3] << 24);
s->regs[GEM_SPADDR1HI] = a[4] | (a[5] << 8);
for (i = 0; i < 4; i++) {
s->sar_active[i] = false;
}
gem_phy_reset(s);
gem_update_int_status(s);
}
| 0
|
221,654
|
bool Socket::readyForOutput()
{
//if (!isssl) {
return BaseSocket::readyForOutput();
//}
//cant do this on a blocking ssl socket as far as i can work out
//return true;
}
| 0
|
317,225
|
static int smack_watch_key(struct key *key)
{
struct smk_audit_info ad;
struct smack_known *tkp = smk_of_current();
int rc;
if (key == NULL)
return -EINVAL;
/*
* If the key hasn't been initialized give it access so that
* it may do so.
*/
if (key->security == NULL)
return 0;
/*
* This should not occur
*/
if (tkp == NULL)
return -EACCES;
if (smack_privileged_cred(CAP_MAC_OVERRIDE, current_cred()))
return 0;
#ifdef CONFIG_AUDIT
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_KEY);
ad.a.u.key_struct.key = key->serial;
ad.a.u.key_struct.key_desc = key->description;
#endif
rc = smk_access(tkp, key->security, MAY_READ, &ad);
rc = smk_bu_note("key watch", tkp, key->security, MAY_READ, rc);
return rc;
}
| 0
|
197,262
|
void Compute(OpKernelContext* ctx) override {
const Tensor& a = ctx->input(0);
const Tensor& b = ctx->input(1);
OP_REQUIRES(ctx, TensorShapeUtils::IsMatrix(a.shape()),
errors::InvalidArgument("a is not a matrix"));
OP_REQUIRES(ctx, TensorShapeUtils::IsMatrix(b.shape()),
errors::InvalidArgument("b is not a matrix"));
const int m = transpose_a_ ? a.dim_size(1) : a.dim_size(0);
const int k = transpose_a_ ? a.dim_size(0) : a.dim_size(1);
const int n = transpose_b_ ? b.dim_size(0) : b.dim_size(1);
const int k2 = transpose_b_ ? b.dim_size(1) : b.dim_size(0);
OP_REQUIRES(ctx, k == k2,
errors::InvalidArgument(
"Matrix size incompatible: a: ", a.shape().DebugString(),
", b: ", b.shape().DebugString()));
Tensor* output = nullptr;
OP_REQUIRES_OK(ctx, ctx->allocate_output(0, TensorShape({m, n}), &output));
if (k == 0) {
// If the inner dimension k in the matrix multiplication is zero, we fill
// the output with zeros.
functor::SetZeroFunctor<CPUDevice, float> f;
f(ctx->eigen_device<CPUDevice>(), output->flat<float>());
return;
}
auto out = output->matrix<float>();
std::unique_ptr<Tensor> a_float;
std::unique_ptr<Tensor> b_float;
if (!a_is_sparse_ && !b_is_sparse_) {
auto left = &a;
auto right = &b;
// TODO(agarwal): multi-thread the conversions from bfloat16 to float.
if (std::is_same<TL, bfloat16>::value) {
a_float.reset(new Tensor(DT_FLOAT, a.shape()));
BFloat16ToFloat(a.flat<bfloat16>().data(),
a_float->flat<float>().data(), a.NumElements());
left = a_float.get();
}
if (std::is_same<TR, bfloat16>::value) {
b_float.reset(new Tensor(DT_FLOAT, b.shape()));
BFloat16ToFloat(b.flat<bfloat16>().data(),
b_float->flat<float>().data(), b.NumElements());
right = b_float.get();
}
Eigen::array<Eigen::IndexPair<Eigen::DenseIndex>, 1> dim_pair;
dim_pair[0].first = transpose_a_ ? 0 : 1;
dim_pair[0].second = transpose_b_ ? 1 : 0;
out.device(ctx->template eigen_device<CPUDevice>()) =
left->matrix<float>().contract(right->matrix<float>(), dim_pair);
return;
}
auto left = &a;
auto right = &b;
bool transpose_output = false;
bool transpose_a = transpose_a_;
bool transpose_b = transpose_b_;
if (!a_is_sparse_) {
// Swap the order of multiplications using the identity:
// A * B = (B' * A')'.
std::swap(left, right);
std::swap(transpose_a, transpose_b);
transpose_a = !transpose_a;
transpose_b = !transpose_b;
transpose_output = !transpose_output;
}
std::unique_ptr<Tensor> right_tr;
if (transpose_b) {
// TODO(agarwal): avoid transposing the matrix here and directly handle
// transpose in CreateDenseSlices.
OP_REQUIRES(ctx, right->dim_size(0) != 0,
errors::InvalidArgument("b has an entry 0 in it's shape."));
OP_REQUIRES(ctx, right->dim_size(1) != 0,
errors::InvalidArgument("b has an entry 0 in it's shape."));
right_tr.reset(
new Tensor(right->dtype(),
TensorShape({right->dim_size(1), right->dim_size(0)})));
const auto perm = dsizes_10();
if (transpose_output) {
right_tr->matrix<TL>().device(ctx->template eigen_device<CPUDevice>()) =
right->matrix<TL>().shuffle(perm);
} else {
right_tr->matrix<TR>().device(ctx->template eigen_device<CPUDevice>()) =
right->matrix<TR>().shuffle(perm);
}
right = right_tr.get();
}
if (transpose_output) {
DoMatMul<TR, TL>::Compute(&this->cache_tr_, left->matrix<TR>(),
right->matrix<TL>(), transpose_a,
ctx->device()->tensorflow_cpu_worker_threads(),
transpose_output, &out);
} else {
DoMatMul<TL, TR>::Compute(&this->cache_nt_, left->matrix<TL>(),
right->matrix<TR>(), transpose_a,
ctx->device()->tensorflow_cpu_worker_threads(),
transpose_output, &out);
}
}
| 1
|
369,240
|
static void io_req_complete_failed(struct io_kiocb *req, s32 res)
{
req_set_fail(req);
io_req_complete_post(req, res, io_put_kbuf(req, IO_URING_F_UNLOCKED));
}
| 0
|
344,803
|
strcmp_maybe_null(const char *a, const char *b)
{
if ((a == NULL && b != NULL) || (a != NULL && b == NULL))
return 0;
if (a != NULL && strcmp(a, b) != 0)
return 0;
return 1;
}
| 0
|
345,215
|
int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list)
{
int err = 0, err1, i;
struct uni_pagedir *p, *q;
struct unipair *unilist, *plist;
if (!ct)
return 0;
unilist = vmemdup_user(list, ct * sizeof(struct unipair));
if (IS_ERR(unilist))
return PTR_ERR(unilist);
console_lock();
/* Save original vc_unipagdir_loc in case we allocate a new one */
p = *vc->vc_uni_pagedir_loc;
if (!p) {
err = -EINVAL;
goto out_unlock;
}
if (p->refcount > 1) {
int j, k;
u16 **p1, *p2, l;
err1 = con_do_clear_unimap(vc);
if (err1) {
err = err1;
goto out_unlock;
}
/*
* Since refcount was > 1, con_clear_unimap() allocated a
* a new uni_pagedir for this vc. Re: p != q
*/
q = *vc->vc_uni_pagedir_loc;
/*
* uni_pgdir is a 32*32*64 table with rows allocated
* when its first entry is added. The unicode value must
* still be incremented for empty rows. We are copying
* entries from "p" (old) to "q" (new).
*/
l = 0; /* unicode value */
for (i = 0; i < 32; i++) {
p1 = p->uni_pgdir[i];
if (p1)
for (j = 0; j < 32; j++) {
p2 = p1[j];
if (p2) {
for (k = 0; k < 64; k++, l++)
if (p2[k] != 0xffff) {
/*
* Found one, copy entry for unicode
* l with fontpos value p2[k].
*/
err1 = con_insert_unipair(q, l, p2[k]);
if (err1) {
p->refcount++;
*vc->vc_uni_pagedir_loc = p;
con_release_unimap(q);
kfree(q);
err = err1;
goto out_unlock;
}
}
} else {
/* Account for row of 64 empty entries */
l += 64;
}
}
else
/* Account for empty table */
l += 32 * 64;
}
/*
* Finished copying font table, set vc_uni_pagedir to new table
*/
p = q;
} else if (p == dflt) {
dflt = NULL;
}
/*
* Insert user specified unicode pairs into new table.
*/
for (plist = unilist; ct; ct--, plist++) {
err1 = con_insert_unipair(p, plist->unicode, plist->fontpos);
if (err1)
err = err1;
}
/*
* Merge with fontmaps of any other virtual consoles.
*/
if (con_unify_unimap(vc, p))
goto out_unlock;
for (i = 0; i <= 3; i++)
set_inverse_transl(vc, p, i); /* Update inverse translations */
set_inverse_trans_unicode(vc, p);
out_unlock:
console_unlock();
kvfree(unilist);
return err;
}
| 0
|
448,912
|
int ZEXPORT inflate(strm, flush)
z_streamp strm;
int flush;
{
struct inflate_state FAR *state;
z_const unsigned char FAR *next; /* next input */
unsigned char FAR *put; /* next output */
unsigned have, left; /* available input and output */
unsigned long hold; /* bit buffer */
unsigned bits; /* bits in bit buffer */
unsigned in, out; /* save starting available input and output */
unsigned copy; /* number of stored or match bytes to copy */
unsigned char FAR *from; /* where to copy match bytes from */
code here; /* current decoding table entry */
code last; /* parent table entry */
unsigned len; /* length to copy for repeats, bits to drop */
int ret; /* return code */
#ifdef GUNZIP
unsigned char hbuf[4]; /* buffer for gzip header crc calculation */
#endif
static const unsigned short order[19] = /* permutation of code lengths */
{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
if (inflateStateCheck(strm) || strm->next_out == Z_NULL ||
(strm->next_in == Z_NULL && strm->avail_in != 0))
return Z_STREAM_ERROR;
state = (struct inflate_state FAR *)strm->state;
if (state->mode == TYPE) state->mode = TYPEDO; /* skip check */
LOAD();
in = have;
out = left;
ret = Z_OK;
for (;;)
switch (state->mode) {
case HEAD:
if (state->wrap == 0) {
state->mode = TYPEDO;
break;
}
NEEDBITS(16);
#ifdef GUNZIP
if ((state->wrap & 2) && hold == 0x8b1f) { /* gzip header */
if (state->wbits == 0)
state->wbits = 15;
state->check = crc32(0L, Z_NULL, 0);
CRC2(state->check, hold);
INITBITS();
state->mode = FLAGS;
break;
}
if (state->head != Z_NULL)
state->head->done = -1;
if (!(state->wrap & 1) || /* check if zlib header allowed */
#else
if (
#endif
((BITS(8) << 8) + (hold >> 8)) % 31) {
strm->msg = (char *)"incorrect header check";
state->mode = BAD;
break;
}
if (BITS(4) != Z_DEFLATED) {
strm->msg = (char *)"unknown compression method";
state->mode = BAD;
break;
}
DROPBITS(4);
len = BITS(4) + 8;
if (state->wbits == 0)
state->wbits = len;
if (len > 15 || len > state->wbits) {
strm->msg = (char *)"invalid window size";
state->mode = BAD;
break;
}
state->dmax = 1U << len;
state->flags = 0; /* indicate zlib header */
Tracev((stderr, "inflate: zlib header ok\n"));
strm->adler = state->check = adler32(0L, Z_NULL, 0);
state->mode = hold & 0x200 ? DICTID : TYPE;
INITBITS();
break;
#ifdef GUNZIP
case FLAGS:
NEEDBITS(16);
state->flags = (int)(hold);
if ((state->flags & 0xff) != Z_DEFLATED) {
strm->msg = (char *)"unknown compression method";
state->mode = BAD;
break;
}
if (state->flags & 0xe000) {
strm->msg = (char *)"unknown header flags set";
state->mode = BAD;
break;
}
if (state->head != Z_NULL)
state->head->text = (int)((hold >> 8) & 1);
if ((state->flags & 0x0200) && (state->wrap & 4))
CRC2(state->check, hold);
INITBITS();
state->mode = TIME;
/* fallthrough */
case TIME:
NEEDBITS(32);
if (state->head != Z_NULL)
state->head->time = hold;
if ((state->flags & 0x0200) && (state->wrap & 4))
CRC4(state->check, hold);
INITBITS();
state->mode = OS;
/* fallthrough */
case OS:
NEEDBITS(16);
if (state->head != Z_NULL) {
state->head->xflags = (int)(hold & 0xff);
state->head->os = (int)(hold >> 8);
}
if ((state->flags & 0x0200) && (state->wrap & 4))
CRC2(state->check, hold);
INITBITS();
state->mode = EXLEN;
/* fallthrough */
case EXLEN:
if (state->flags & 0x0400) {
NEEDBITS(16);
state->length = (unsigned)(hold);
if (state->head != Z_NULL)
state->head->extra_len = (unsigned)hold;
if ((state->flags & 0x0200) && (state->wrap & 4))
CRC2(state->check, hold);
INITBITS();
}
else if (state->head != Z_NULL)
state->head->extra = Z_NULL;
state->mode = EXTRA;
/* fallthrough */
case EXTRA:
if (state->flags & 0x0400) {
copy = state->length;
if (copy > have) copy = have;
if (copy) {
len = state->head->extra_len - state->length;
if (state->head != Z_NULL &&
state->head->extra != Z_NULL &&
len < state->head->extra_max) {
zmemcpy(state->head->extra + len, next,
len + copy > state->head->extra_max ?
state->head->extra_max - len : copy);
}
if ((state->flags & 0x0200) && (state->wrap & 4))
state->check = crc32(state->check, next, copy);
have -= copy;
next += copy;
state->length -= copy;
}
if (state->length) goto inf_leave;
}
state->length = 0;
state->mode = NAME;
/* fallthrough */
case NAME:
if (state->flags & 0x0800) {
if (have == 0) goto inf_leave;
copy = 0;
do {
len = (unsigned)(next[copy++]);
if (state->head != Z_NULL &&
state->head->name != Z_NULL &&
state->length < state->head->name_max)
state->head->name[state->length++] = (Bytef)len;
} while (len && copy < have);
if ((state->flags & 0x0200) && (state->wrap & 4))
state->check = crc32(state->check, next, copy);
have -= copy;
next += copy;
if (len) goto inf_leave;
}
else if (state->head != Z_NULL)
state->head->name = Z_NULL;
state->length = 0;
state->mode = COMMENT;
/* fallthrough */
case COMMENT:
if (state->flags & 0x1000) {
if (have == 0) goto inf_leave;
copy = 0;
do {
len = (unsigned)(next[copy++]);
if (state->head != Z_NULL &&
state->head->comment != Z_NULL &&
state->length < state->head->comm_max)
state->head->comment[state->length++] = (Bytef)len;
} while (len && copy < have);
if ((state->flags & 0x0200) && (state->wrap & 4))
state->check = crc32(state->check, next, copy);
have -= copy;
next += copy;
if (len) goto inf_leave;
}
else if (state->head != Z_NULL)
state->head->comment = Z_NULL;
state->mode = HCRC;
/* fallthrough */
case HCRC:
if (state->flags & 0x0200) {
NEEDBITS(16);
if ((state->wrap & 4) && hold != (state->check & 0xffff)) {
strm->msg = (char *)"header crc mismatch";
state->mode = BAD;
break;
}
INITBITS();
}
if (state->head != Z_NULL) {
state->head->hcrc = (int)((state->flags >> 9) & 1);
state->head->done = 1;
}
strm->adler = state->check = crc32(0L, Z_NULL, 0);
state->mode = TYPE;
break;
#endif
case DICTID:
NEEDBITS(32);
strm->adler = state->check = ZSWAP32(hold);
INITBITS();
state->mode = DICT;
/* fallthrough */
case DICT:
if (state->havedict == 0) {
RESTORE();
return Z_NEED_DICT;
}
strm->adler = state->check = adler32(0L, Z_NULL, 0);
state->mode = TYPE;
/* fallthrough */
case TYPE:
if (flush == Z_BLOCK || flush == Z_TREES) goto inf_leave;
/* fallthrough */
case TYPEDO:
if (state->last) {
BYTEBITS();
state->mode = CHECK;
break;
}
NEEDBITS(3);
state->last = BITS(1);
DROPBITS(1);
switch (BITS(2)) {
case 0: /* stored block */
Tracev((stderr, "inflate: stored block%s\n",
state->last ? " (last)" : ""));
state->mode = STORED;
break;
case 1: /* fixed block */
fixedtables(state);
Tracev((stderr, "inflate: fixed codes block%s\n",
state->last ? " (last)" : ""));
state->mode = LEN_; /* decode codes */
if (flush == Z_TREES) {
DROPBITS(2);
goto inf_leave;
}
break;
case 2: /* dynamic block */
Tracev((stderr, "inflate: dynamic codes block%s\n",
state->last ? " (last)" : ""));
state->mode = TABLE;
break;
case 3:
strm->msg = (char *)"invalid block type";
state->mode = BAD;
}
DROPBITS(2);
break;
case STORED:
BYTEBITS(); /* go to byte boundary */
NEEDBITS(32);
if ((hold & 0xffff) != ((hold >> 16) ^ 0xffff)) {
strm->msg = (char *)"invalid stored block lengths";
state->mode = BAD;
break;
}
state->length = (unsigned)hold & 0xffff;
Tracev((stderr, "inflate: stored length %u\n",
state->length));
INITBITS();
state->mode = COPY_;
if (flush == Z_TREES) goto inf_leave;
/* fallthrough */
case COPY_:
state->mode = COPY;
/* fallthrough */
case COPY:
copy = state->length;
if (copy) {
if (copy > have) copy = have;
if (copy > left) copy = left;
if (copy == 0) goto inf_leave;
zmemcpy(put, next, copy);
have -= copy;
next += copy;
left -= copy;
put += copy;
state->length -= copy;
break;
}
Tracev((stderr, "inflate: stored end\n"));
state->mode = TYPE;
break;
case TABLE:
NEEDBITS(14);
state->nlen = BITS(5) + 257;
DROPBITS(5);
state->ndist = BITS(5) + 1;
DROPBITS(5);
state->ncode = BITS(4) + 4;
DROPBITS(4);
#ifndef PKZIP_BUG_WORKAROUND
if (state->nlen > 286 || state->ndist > 30) {
strm->msg = (char *)"too many length or distance symbols";
state->mode = BAD;
break;
}
#endif
Tracev((stderr, "inflate: table sizes ok\n"));
state->have = 0;
state->mode = LENLENS;
/* fallthrough */
case LENLENS:
while (state->have < state->ncode) {
NEEDBITS(3);
state->lens[order[state->have++]] = (unsigned short)BITS(3);
DROPBITS(3);
}
while (state->have < 19)
state->lens[order[state->have++]] = 0;
state->next = state->codes;
state->lencode = (const code FAR *)(state->next);
state->lenbits = 7;
ret = inflate_table(CODES, state->lens, 19, &(state->next),
&(state->lenbits), state->work);
if (ret) {
strm->msg = (char *)"invalid code lengths set";
state->mode = BAD;
break;
}
Tracev((stderr, "inflate: code lengths ok\n"));
state->have = 0;
state->mode = CODELENS;
/* fallthrough */
case CODELENS:
while (state->have < state->nlen + state->ndist) {
for (;;) {
here = state->lencode[BITS(state->lenbits)];
if ((unsigned)(here.bits) <= bits) break;
PULLBYTE();
}
if (here.val < 16) {
DROPBITS(here.bits);
state->lens[state->have++] = here.val;
}
else {
if (here.val == 16) {
NEEDBITS(here.bits + 2);
DROPBITS(here.bits);
if (state->have == 0) {
strm->msg = (char *)"invalid bit length repeat";
state->mode = BAD;
break;
}
len = state->lens[state->have - 1];
copy = 3 + BITS(2);
DROPBITS(2);
}
else if (here.val == 17) {
NEEDBITS(here.bits + 3);
DROPBITS(here.bits);
len = 0;
copy = 3 + BITS(3);
DROPBITS(3);
}
else {
NEEDBITS(here.bits + 7);
DROPBITS(here.bits);
len = 0;
copy = 11 + BITS(7);
DROPBITS(7);
}
if (state->have + copy > state->nlen + state->ndist) {
strm->msg = (char *)"invalid bit length repeat";
state->mode = BAD;
break;
}
while (copy--)
state->lens[state->have++] = (unsigned short)len;
}
}
/* handle error breaks in while */
if (state->mode == BAD) break;
/* check for end-of-block code (better have one) */
if (state->lens[256] == 0) {
strm->msg = (char *)"invalid code -- missing end-of-block";
state->mode = BAD;
break;
}
/* build code tables -- note: do not change the lenbits or distbits
values here (9 and 6) without reading the comments in inftrees.h
concerning the ENOUGH constants, which depend on those values */
state->next = state->codes;
state->lencode = (const code FAR *)(state->next);
state->lenbits = 9;
ret = inflate_table(LENS, state->lens, state->nlen, &(state->next),
&(state->lenbits), state->work);
if (ret) {
strm->msg = (char *)"invalid literal/lengths set";
state->mode = BAD;
break;
}
state->distcode = (const code FAR *)(state->next);
state->distbits = 6;
ret = inflate_table(DISTS, state->lens + state->nlen, state->ndist,
&(state->next), &(state->distbits), state->work);
if (ret) {
strm->msg = (char *)"invalid distances set";
state->mode = BAD;
break;
}
Tracev((stderr, "inflate: codes ok\n"));
state->mode = LEN_;
if (flush == Z_TREES) goto inf_leave;
/* fallthrough */
case LEN_:
state->mode = LEN;
/* fallthrough */
case LEN:
if (have >= 6 && left >= 258) {
RESTORE();
inflate_fast(strm, out);
LOAD();
if (state->mode == TYPE)
state->back = -1;
break;
}
state->back = 0;
for (;;) {
here = state->lencode[BITS(state->lenbits)];
if ((unsigned)(here.bits) <= bits) break;
PULLBYTE();
}
if (here.op && (here.op & 0xf0) == 0) {
last = here;
for (;;) {
here = state->lencode[last.val +
(BITS(last.bits + last.op) >> last.bits)];
if ((unsigned)(last.bits + here.bits) <= bits) break;
PULLBYTE();
}
DROPBITS(last.bits);
state->back += last.bits;
}
DROPBITS(here.bits);
state->back += here.bits;
state->length = (unsigned)here.val;
if ((int)(here.op) == 0) {
Tracevv((stderr, here.val >= 0x20 && here.val < 0x7f ?
"inflate: literal '%c'\n" :
"inflate: literal 0x%02x\n", here.val));
state->mode = LIT;
break;
}
if (here.op & 32) {
Tracevv((stderr, "inflate: end of block\n"));
state->back = -1;
state->mode = TYPE;
break;
}
if (here.op & 64) {
strm->msg = (char *)"invalid literal/length code";
state->mode = BAD;
break;
}
state->extra = (unsigned)(here.op) & 15;
state->mode = LENEXT;
/* fallthrough */
case LENEXT:
if (state->extra) {
NEEDBITS(state->extra);
state->length += BITS(state->extra);
DROPBITS(state->extra);
state->back += state->extra;
}
Tracevv((stderr, "inflate: length %u\n", state->length));
state->was = state->length;
state->mode = DIST;
/* fallthrough */
case DIST:
for (;;) {
here = state->distcode[BITS(state->distbits)];
if ((unsigned)(here.bits) <= bits) break;
PULLBYTE();
}
if ((here.op & 0xf0) == 0) {
last = here;
for (;;) {
here = state->distcode[last.val +
(BITS(last.bits + last.op) >> last.bits)];
if ((unsigned)(last.bits + here.bits) <= bits) break;
PULLBYTE();
}
DROPBITS(last.bits);
state->back += last.bits;
}
DROPBITS(here.bits);
state->back += here.bits;
if (here.op & 64) {
strm->msg = (char *)"invalid distance code";
state->mode = BAD;
break;
}
state->offset = (unsigned)here.val;
state->extra = (unsigned)(here.op) & 15;
state->mode = DISTEXT;
/* fallthrough */
case DISTEXT:
if (state->extra) {
NEEDBITS(state->extra);
state->offset += BITS(state->extra);
DROPBITS(state->extra);
state->back += state->extra;
}
#ifdef INFLATE_STRICT
if (state->offset > state->dmax) {
strm->msg = (char *)"invalid distance too far back";
state->mode = BAD;
break;
}
#endif
Tracevv((stderr, "inflate: distance %u\n", state->offset));
state->mode = MATCH;
/* fallthrough */
case MATCH:
if (left == 0) goto inf_leave;
copy = out - left;
if (state->offset > copy) { /* copy from window */
copy = state->offset - copy;
if (copy > state->whave) {
if (state->sane) {
strm->msg = (char *)"invalid distance too far back";
state->mode = BAD;
break;
}
#ifdef INFLATE_ALLOW_INVALID_DISTANCE_TOOFAR_ARRR
Trace((stderr, "inflate.c too far\n"));
copy -= state->whave;
if (copy > state->length) copy = state->length;
if (copy > left) copy = left;
left -= copy;
state->length -= copy;
do {
*put++ = 0;
} while (--copy);
if (state->length == 0) state->mode = LEN;
break;
#endif
}
if (copy > state->wnext) {
copy -= state->wnext;
from = state->window + (state->wsize - copy);
}
else
from = state->window + (state->wnext - copy);
if (copy > state->length) copy = state->length;
}
else { /* copy from output */
from = put - state->offset;
copy = state->length;
}
if (copy > left) copy = left;
left -= copy;
state->length -= copy;
do {
*put++ = *from++;
} while (--copy);
if (state->length == 0) state->mode = LEN;
break;
case LIT:
if (left == 0) goto inf_leave;
*put++ = (unsigned char)(state->length);
left--;
state->mode = LEN;
break;
case CHECK:
if (state->wrap) {
NEEDBITS(32);
out -= left;
strm->total_out += out;
state->total += out;
if ((state->wrap & 4) && out)
strm->adler = state->check =
UPDATE_CHECK(state->check, put - out, out);
out = left;
if ((state->wrap & 4) && (
#ifdef GUNZIP
state->flags ? hold :
#endif
ZSWAP32(hold)) != state->check) {
strm->msg = (char *)"incorrect data check";
state->mode = BAD;
break;
}
INITBITS();
Tracev((stderr, "inflate: check matches trailer\n"));
}
#ifdef GUNZIP
state->mode = LENGTH;
/* fallthrough */
case LENGTH:
if (state->wrap && state->flags) {
NEEDBITS(32);
if ((state->wrap & 4) && hold != (state->total & 0xffffffff)) {
strm->msg = (char *)"incorrect length check";
state->mode = BAD;
break;
}
INITBITS();
Tracev((stderr, "inflate: length matches trailer\n"));
}
#endif
state->mode = DONE;
/* fallthrough */
case DONE:
ret = Z_STREAM_END;
goto inf_leave;
case BAD:
ret = Z_DATA_ERROR;
goto inf_leave;
case MEM:
return Z_MEM_ERROR;
case SYNC:
/* fallthrough */
default:
return Z_STREAM_ERROR;
}
/*
Return from inflate(), updating the total counts and the check value.
If there was no progress during the inflate() call, return a buffer
error. Call updatewindow() to create and/or update the window state.
Note: a memory error from inflate() is non-recoverable.
*/
inf_leave:
RESTORE();
if (state->wsize || (out != strm->avail_out && state->mode < BAD &&
(state->mode < CHECK || flush != Z_FINISH)))
if (updatewindow(strm, strm->next_out, out - strm->avail_out)) {
state->mode = MEM;
return Z_MEM_ERROR;
}
in -= strm->avail_in;
out -= strm->avail_out;
strm->total_in += in;
strm->total_out += out;
state->total += out;
if ((state->wrap & 4) && out)
strm->adler = state->check =
UPDATE_CHECK(state->check, strm->next_out - out, out);
strm->data_type = (int)state->bits + (state->last ? 64 : 0) +
(state->mode == TYPE ? 128 : 0) +
(state->mode == LEN_ || state->mode == COPY_ ? 256 : 0);
if (((in == 0 && out == 0) || flush == Z_FINISH) && ret == Z_OK)
ret = Z_BUF_ERROR;
return ret;
}
| 0
|
226,040
|
GF_Err fpar_box_size(GF_Box *s)
{
FilePartitionBox *ptr = (FilePartitionBox *)s;
ptr->size += 13 + (ptr->version ? 8 : 4);
if (ptr->scheme_specific_info)
ptr->size += strlen(ptr->scheme_specific_info);
ptr->size+= ptr->nb_entries * 6;
return GF_OK;
| 0
|
458,920
|
find_start_comment(int ind_maxcomment) // XXX
{
pos_T *pos;
char_u *line;
char_u *p;
int cur_maxcomment = ind_maxcomment;
for (;;)
{
pos = findmatchlimit(NULL, '*', FM_BACKWARD, cur_maxcomment);
if (pos == NULL)
break;
// Check if the comment start we found is inside a string.
// If it is then restrict the search to below this line and try again.
line = ml_get(pos->lnum);
for (p = line; *p && (colnr_T)(p - line) < pos->col; ++p)
p = skip_string(p);
if ((colnr_T)(p - line) <= pos->col)
break;
cur_maxcomment = curwin->w_cursor.lnum - pos->lnum - 1;
if (cur_maxcomment <= 0)
{
pos = NULL;
break;
}
}
return pos;
}
| 0
|
432,152
|
createRandomCursorExecutor(const CollectionPtr& coll,
const boost::intrusive_ptr<ExpressionContext>& expCtx,
long long sampleSize,
long long numRecords,
boost::optional<BucketUnpacker> bucketUnpacker) {
OperationContext* opCtx = expCtx->opCtx;
// Verify that we are already under a collection lock. We avoid taking locks ourselves in this
// function because double-locking forces any PlanExecutor we create to adopt a NO_YIELD policy.
invariant(opCtx->lockState()->isCollectionLockedForMode(coll->ns(), MODE_IS));
static const double kMaxSampleRatioForRandCursor = 0.05;
if (!expCtx->ns.isTimeseriesBucketsCollection()) {
if (sampleSize > numRecords * kMaxSampleRatioForRandCursor || numRecords <= 100) {
return std::pair{nullptr, false};
}
} else {
// Suppose that a time-series bucket collection is observed to contain 200 buckets, and the
// 'gTimeseriesBucketMaxCount' parameter is set to 1000. If all buckets are full, then the
// maximum possible measurment count would be 200 * 1000 = 200,000. While the
// 'SampleFromTimeseriesBucket' plan is more efficient when the sample size is small
// relative to the total number of measurements in the time-series collection, for larger
// sample sizes the top-k sort based sample is faster. Experiments have approximated that
// the tipping point is roughly when the requested sample size is greater than 1% of the
// maximum possible number of measurements in the collection (i.e. numBuckets *
// maxMeasurementsPerBucket).
static const double kCoefficient = 0.01;
if (sampleSize > kCoefficient * numRecords * gTimeseriesBucketMaxCount) {
return std::pair{nullptr, false};
}
}
// Attempt to get a random cursor from the RecordStore.
auto rsRandCursor = coll->getRecordStore()->getRandomCursor(opCtx);
if (!rsRandCursor) {
// The storage engine has no random cursor support.
return std::pair{nullptr, false};
}
// Build a MultiIteratorStage and pass it the random-sampling RecordCursor.
auto ws = std::make_unique<WorkingSet>();
std::unique_ptr<PlanStage> root =
std::make_unique<MultiIteratorStage>(expCtx.get(), ws.get(), coll);
static_cast<MultiIteratorStage*>(root.get())->addIterator(std::move(rsRandCursor));
TrialStage* trialStage = nullptr;
// Because 'numRecords' includes orphan documents, our initial decision to optimize the $sample
// cursor may have been mistaken. For sharded collections, build a TRIAL plan that will switch
// to a collection scan if the ratio of orphaned to owned documents encountered over the first
// 100 works() is such that we would have chosen not to optimize.
static const size_t kMaxPresampleSize = 100;
if (auto css = CollectionShardingState::get(opCtx, coll->ns());
css->getCollectionDescription(opCtx).isSharded() &&
!expCtx->ns.isTimeseriesBucketsCollection()) {
// The ratio of owned to orphaned documents must be at least equal to the ratio between the
// requested sampleSize and the maximum permitted sampleSize for the original constraints to
// be satisfied. For instance, if there are 200 documents and the sampleSize is 5, then at
// least (5 / (200*0.05)) = (5/10) = 50% of those documents must be owned. If less than 5%
// of the documents in the collection are owned, we default to the backup plan.
const auto minAdvancedToWorkRatio = std::max(
sampleSize / (numRecords * kMaxSampleRatioForRandCursor), kMaxSampleRatioForRandCursor);
// Since the incoming operation is sharded, use the CSS to infer the filtering metadata for
// the collection. We get the shard ownership filter after checking to see if the collection
// is sharded to avoid an invariant from being fired in this call.
auto collectionFilter = css->getOwnershipFilter(
opCtx, CollectionShardingState::OrphanCleanupPolicy::kDisallowOrphanCleanup);
// The trial plan is SHARDING_FILTER-MULTI_ITERATOR.
auto randomCursorPlan = std::make_unique<ShardFilterStage>(
expCtx.get(), collectionFilter, ws.get(), std::move(root));
// The backup plan is SHARDING_FILTER-COLLSCAN.
std::unique_ptr<PlanStage> collScanPlan = std::make_unique<CollectionScan>(
expCtx.get(), coll, CollectionScanParams{}, ws.get(), nullptr);
collScanPlan = std::make_unique<ShardFilterStage>(
expCtx.get(), collectionFilter, ws.get(), std::move(collScanPlan));
// Place a TRIAL stage at the root of the plan tree, and pass it the trial and backup plans.
root = std::make_unique<TrialStage>(expCtx.get(),
ws.get(),
std::move(randomCursorPlan),
std::move(collScanPlan),
kMaxPresampleSize,
minAdvancedToWorkRatio);
trialStage = static_cast<TrialStage*>(root.get());
} else if (expCtx->ns.isTimeseriesBucketsCollection()) {
// We can't take ARHASH optimization path for a direct $sample on the system.buckets
// collection because data is in compressed form. If we did have a direct $sample on the
// system.buckets collection, then the 'bucketUnpacker' would not be set up properly. We
// also should bail out early if a $sample is made against a time series collection that is
// empty. If we don't the 'minAdvancedToWorkRatio' can be nan/-nan depending on the
// architecture.
if (!(bucketUnpacker && numRecords)) {
return std::pair{nullptr, false};
}
// Use a 'TrialStage' to run a trial between 'SampleFromTimeseriesBucket' and
// 'UnpackTimeseriesBucket' with $sample left in the pipeline in-place. If the buckets are
// not sufficiently full, or the 'SampleFromTimeseriesBucket' plan draws too many
// duplicates, then we will fall back to the 'TrialStage' backup plan. This backup plan uses
// the top-k sort sampling approach.
//
// Suppose the 'gTimeseriesBucketMaxCount' is 1000, but each bucket only contains 500
// documents on average. The observed trial advanced/work ratio approximates the average
// bucket fullness, noted here as "abf". In this example, abf = 500 / 1000 = 0.5.
// Experiments have shown that the optimized 'SampleFromTimeseriesBucket' algorithm performs
// better than backup plan when
//
// sampleSize < 0.02 * abf * numRecords * gTimeseriesBucketMaxCount
//
// This inequality can be rewritten as
//
// abf > sampleSize / (0.02 * numRecords * gTimeseriesBucketMaxCount)
//
// Therefore, if the advanced/work ratio exceeds this threshold, we will use the
// 'SampleFromTimeseriesBucket' plan. Note that as the sample size requested by the user
// becomes larger with respect to the number of buckets, we require a higher advanced/work
// ratio in order to justify using 'SampleFromTimeseriesBucket'.
//
// Additionally, we require the 'TrialStage' to approximate the abf as at least 0.25. When
// buckets are mostly empty, the 'SampleFromTimeseriesBucket' will be inefficient due to a
// lot of sampling "misses".
static const auto kCoefficient = 0.02;
static const auto kMinBucketFullness = 0.25;
const auto minAdvancedToWorkRatio = std::max(
std::min(sampleSize / (kCoefficient * numRecords * gTimeseriesBucketMaxCount), 1.0),
kMinBucketFullness);
auto arhashPlan = std::make_unique<SampleFromTimeseriesBucket>(
expCtx.get(),
ws.get(),
std::move(root),
*bucketUnpacker,
// By using a quantity slightly higher than 'kMaxPresampleSize', we ensure that the
// 'SampleFromTimeseriesBucket' stage won't fail due to too many consecutive sampling
// attempts during the 'TrialStage's trial period.
kMaxPresampleSize + 5,
sampleSize,
gTimeseriesBucketMaxCount);
std::unique_ptr<PlanStage> collScanPlan = std::make_unique<CollectionScan>(
expCtx.get(), coll, CollectionScanParams{}, ws.get(), nullptr);
auto topkSortPlan = std::make_unique<UnpackTimeseriesBucket>(
expCtx.get(), ws.get(), std::move(collScanPlan), *bucketUnpacker);
root = std::make_unique<TrialStage>(expCtx.get(),
ws.get(),
std::move(arhashPlan),
std::move(topkSortPlan),
kMaxPresampleSize,
minAdvancedToWorkRatio);
trialStage = static_cast<TrialStage*>(root.get());
}
auto execStatus = plan_executor_factory::make(expCtx,
std::move(ws),
std::move(root),
&coll,
opCtx->inMultiDocumentTransaction()
? PlanYieldPolicy::YieldPolicy::INTERRUPT_ONLY
: PlanYieldPolicy::YieldPolicy::YIELD_AUTO,
QueryPlannerParams::RETURN_OWNED_DATA);
if (!execStatus.isOK()) {
return execStatus.getStatus();
}
// For sharded collections, the root of the plan tree is a TrialStage that may have chosen
// either a random-sampling cursor trial plan or a COLLSCAN backup plan. We can only optimize
// the $sample aggregation stage if the trial plan was chosen.
return std::pair{std::move(execStatus.getValue()),
!trialStage || !trialStage->pickedBackupPlan()};
}
| 0
|
486,806
|
static inline uint64_t tx_desc_get_buffer(CadenceGEMState *s, uint32_t *desc)
{
uint64_t ret = desc[0];
if (s->regs[GEM_DMACFG] & GEM_DMACFG_ADDR_64B) {
ret |= (uint64_t)desc[2] << 32;
}
return ret;
}
| 0
|
401,507
|
static inline void __run_timers(struct timer_base *base)
{
struct hlist_head heads[LVL_DEPTH];
int levels;
if (!time_after_eq(jiffies, base->clk))
return;
timer_base_lock_expiry(base);
raw_spin_lock_irq(&base->lock);
/*
* timer_base::must_forward_clk must be cleared before running
* timers so that any timer functions that call mod_timer() will
* not try to forward the base. Idle tracking / clock forwarding
* logic is only used with BASE_STD timers.
*
* The must_forward_clk flag is cleared unconditionally also for
* the deferrable base. The deferrable base is not affected by idle
* tracking and never forwarded, so clearing the flag is a NOOP.
*
* The fact that the deferrable base is never forwarded can cause
* large variations in granularity for deferrable timers, but they
* can be deferred for long periods due to idle anyway.
*/
base->must_forward_clk = false;
while (time_after_eq(jiffies, base->clk)) {
levels = collect_expired_timers(base, heads);
base->clk++;
while (levels--)
expire_timers(base, heads + levels);
}
raw_spin_unlock_irq(&base->lock);
timer_base_unlock_expiry(base);
}
| 0
|
317,356
|
static int selinux_sctp_bind_connect(struct sock *sk, int optname,
struct sockaddr *address,
int addrlen)
{
int len, err = 0, walk_size = 0;
void *addr_buf;
struct sockaddr *addr;
struct socket *sock;
if (!selinux_policycap_extsockclass())
return 0;
/* Process one or more addresses that may be IPv4 or IPv6 */
sock = sk->sk_socket;
addr_buf = address;
while (walk_size < addrlen) {
if (walk_size + sizeof(sa_family_t) > addrlen)
return -EINVAL;
addr = addr_buf;
switch (addr->sa_family) {
case AF_UNSPEC:
case AF_INET:
len = sizeof(struct sockaddr_in);
break;
case AF_INET6:
len = sizeof(struct sockaddr_in6);
break;
default:
return -EINVAL;
}
if (walk_size + len > addrlen)
return -EINVAL;
err = -EINVAL;
switch (optname) {
/* Bind checks */
case SCTP_PRIMARY_ADDR:
case SCTP_SET_PEER_PRIMARY_ADDR:
case SCTP_SOCKOPT_BINDX_ADD:
err = selinux_socket_bind(sock, addr, len);
break;
/* Connect checks */
case SCTP_SOCKOPT_CONNECTX:
case SCTP_PARAM_SET_PRIMARY:
case SCTP_PARAM_ADD_IP:
case SCTP_SENDMSG_CONNECT:
err = selinux_socket_connect_helper(sock, addr, len);
if (err)
return err;
/* As selinux_sctp_bind_connect() is called by the
* SCTP protocol layer, the socket is already locked,
* therefore selinux_netlbl_socket_connect_locked()
* is called here. The situations handled are:
* sctp_connectx(3), sctp_sendmsg(3), sendmsg(2),
* whenever a new IP address is added or when a new
* primary address is selected.
* Note that an SCTP connect(2) call happens before
* the SCTP protocol layer and is handled via
* selinux_socket_connect().
*/
err = selinux_netlbl_socket_connect_locked(sk, addr);
break;
}
if (err)
return err;
addr_buf += len;
walk_size += len;
}
return 0;
}
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.