idx
int64 | func
string | target
int64 |
|---|---|---|
344,792
|
get_u32_le(const void *vp)
{
const u_char *p = (const u_char *)vp;
u_int32_t v;
v = (u_int32_t)p[0];
v |= (u_int32_t)p[1] << 8;
v |= (u_int32_t)p[2] << 16;
v |= (u_int32_t)p[3] << 24;
return (v);
}
| 0
|
252,448
|
static MZ_FORCEINLINE mz_bool mz_zip_array_ensure_room(mz_zip_archive *pZip,
mz_zip_array *pArray,
size_t n) {
return mz_zip_array_reserve(pZip, pArray, pArray->m_size + n, MZ_TRUE);
}
| 0
|
225,006
|
defaultNoticeProcessor(void *arg, const char *message)
{
(void) arg; /* not used */
/* Note: we expect the supplied string to end with a newline already. */
fprintf(stderr, "%s", message);
}
| 0
|
445,875
|
fr_window_init (FrWindow *window)
{
window->priv = g_new0 (FrWindowPrivate, 1);
window->priv->update_dropped_files = FALSE;
window->priv->filter_mode = FALSE;
window->priv->use_progress_dialog = TRUE;
window->priv->batch_title = NULL;
window->priv->cancellable = g_cancellable_new ();
window->priv->compression = FR_COMPRESSION_NORMAL;
window->priv->window_group = gtk_window_group_new ();
window->priv->populating_file_list = FALSE;
window->priv->named_dialogs = g_hash_table_new (g_str_hash, g_str_equal);
gtk_window_group_add_window (window->priv->window_group, GTK_WINDOW (window));
window->archive = NULL;
}
| 0
|
210,283
|
vhost_user_set_inflight_fd(struct virtio_net **pdev,
struct vhu_msg_context *ctx,
int main_fd __rte_unused)
{
uint64_t mmap_size, mmap_offset;
uint16_t num_queues, queue_size;
struct virtio_net *dev = *pdev;
uint32_t pervq_inflight_size;
struct vhost_virtqueue *vq;
void *addr;
int fd, i;
int numa_node = SOCKET_ID_ANY;
fd = ctx->fds[0];
if (ctx->msg.size != sizeof(ctx->msg.payload.inflight) || fd < 0) {
VHOST_LOG_CONFIG(ERR, "(%s) invalid set_inflight_fd message size is %d,fd is %d\n",
dev->ifname, ctx->msg.size, fd);
return RTE_VHOST_MSG_RESULT_ERR;
}
mmap_size = ctx->msg.payload.inflight.mmap_size;
mmap_offset = ctx->msg.payload.inflight.mmap_offset;
num_queues = ctx->msg.payload.inflight.num_queues;
queue_size = ctx->msg.payload.inflight.queue_size;
if (vq_is_packed(dev))
pervq_inflight_size = get_pervq_shm_size_packed(queue_size);
else
pervq_inflight_size = get_pervq_shm_size_split(queue_size);
VHOST_LOG_CONFIG(INFO, "(%s) set_inflight_fd mmap_size: %"PRIu64"\n",
dev->ifname, mmap_size);
VHOST_LOG_CONFIG(INFO, "(%s) set_inflight_fd mmap_offset: %"PRIu64"\n",
dev->ifname, mmap_offset);
VHOST_LOG_CONFIG(INFO, "(%s) set_inflight_fd num_queues: %u\n", dev->ifname, num_queues);
VHOST_LOG_CONFIG(INFO, "(%s) set_inflight_fd queue_size: %u\n", dev->ifname, queue_size);
VHOST_LOG_CONFIG(INFO, "(%s) set_inflight_fd fd: %d\n", dev->ifname, fd);
VHOST_LOG_CONFIG(INFO, "(%s) set_inflight_fd pervq_inflight_size: %d\n",
dev->ifname, pervq_inflight_size);
/*
* If VQ 0 has already been allocated, try to allocate on the same
* NUMA node. It can be reallocated later in numa_realloc().
*/
if (dev->nr_vring > 0)
numa_node = dev->virtqueue[0]->numa_node;
if (!dev->inflight_info) {
dev->inflight_info = rte_zmalloc_socket("inflight_info",
sizeof(struct inflight_mem_info), 0, numa_node);
if (dev->inflight_info == NULL) {
VHOST_LOG_CONFIG(ERR, "(%s) failed to alloc dev inflight area\n",
dev->ifname);
return RTE_VHOST_MSG_RESULT_ERR;
}
dev->inflight_info->fd = -1;
}
if (dev->inflight_info->addr) {
munmap(dev->inflight_info->addr, dev->inflight_info->size);
dev->inflight_info->addr = NULL;
}
addr = mmap(0, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED,
fd, mmap_offset);
if (addr == MAP_FAILED) {
VHOST_LOG_CONFIG(ERR, "(%s) failed to mmap share memory.\n", dev->ifname);
return RTE_VHOST_MSG_RESULT_ERR;
}
if (dev->inflight_info->fd >= 0) {
close(dev->inflight_info->fd);
dev->inflight_info->fd = -1;
}
dev->inflight_info->fd = fd;
dev->inflight_info->addr = addr;
dev->inflight_info->size = mmap_size;
for (i = 0; i < num_queues; i++) {
vq = dev->virtqueue[i];
if (!vq)
continue;
if (vq_is_packed(dev)) {
vq->inflight_packed = addr;
vq->inflight_packed->desc_num = queue_size;
} else {
vq->inflight_split = addr;
vq->inflight_split->desc_num = queue_size;
}
addr = (void *)((char *)addr + pervq_inflight_size);
}
return RTE_VHOST_MSG_RESULT_OK;
}
| 1
|
222,843
|
Status GraphProperties::InferDynamically(Cluster* cluster) {
TF_RETURN_IF_ERROR(cluster->Initialize(item_));
// Runs the model once to collect the shapes in the cost model.
RunMetadata metadata;
TF_RETURN_IF_ERROR(
cluster->Run(item_.graph, item_.feed, item_.fetch, &metadata));
return InferFromCostGraph(metadata.cost_graph());
}
| 0
|
261,750
|
RtmpProtocol::~RtmpProtocol() {
reset();
}
| 0
|
310,156
|
NCURSES_SP_NAME(_nc_mvcur) (NCURSES_SP_DCLx
int yold, int xold,
int ynew, int xnew)
{
int rc;
rc = _nc_real_mvcur(NCURSES_SP_ARGx yold, xold, ynew, xnew,
NCURSES_SP_NAME(_nc_outch),
TRUE);
/*
* With the terminal-driver, we cannot distinguish between internal and
* external calls. Flush the output if the screen has not been
* initialized, e.g., when used from low-level terminfo programs.
*/
if ((SP_PARM != 0) && (SP_PARM->_endwin == ewInitial))
NCURSES_SP_NAME(_nc_flush) (NCURSES_SP_ARG);
return rc;
}
| 0
|
387,860
|
bool InstanceKlass::supers_have_passed_fingerprint_checks() {
if (java_super() != NULL && !java_super()->has_passed_fingerprint_check()) {
ResourceMark rm;
log_trace(class, fingerprint)("%s : super %s not fingerprinted", external_name(), java_super()->external_name());
return false;
}
Array<Klass*>* local_interfaces = this->local_interfaces();
if (local_interfaces != NULL) {
int length = local_interfaces->length();
for (int i = 0; i < length; i++) {
InstanceKlass* intf = InstanceKlass::cast(local_interfaces->at(i));
if (!intf->has_passed_fingerprint_check()) {
ResourceMark rm;
log_trace(class, fingerprint)("%s : interface %s not fingerprinted", external_name(), intf->external_name());
return false;
}
}
}
return true;
}
| 0
|
431,631
|
static MagickBooleanType WriteCINImage(const ImageInfo *image_info,Image *image,
ExceptionInfo *exception)
{
char
timestamp[MagickPathExtent];
const char
*value;
CINInfo
cin;
const StringInfo
*profile;
MagickBooleanType
status;
MagickOffsetType
offset;
QuantumInfo
*quantum_info;
QuantumType
quantum_type;
const Quantum
*p;
ssize_t
i;
size_t
length;
ssize_t
count,
y;
struct tm
utc_time;
time_t
seconds;
unsigned char
*pixels;
/*
Open output image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception);
if (status == MagickFalse)
return(status);
if (image->colorspace != LogColorspace)
(void) TransformImageColorspace(image,LogColorspace,exception);
/*
Write image information.
*/
(void) memset(&cin,0,sizeof(cin));
offset=0;
cin.file.magic=0x802A5FD7UL;
offset+=WriteBlobLong(image,(unsigned int) cin.file.magic);
cin.file.image_offset=0x800;
offset+=WriteBlobLong(image,(unsigned int) cin.file.image_offset);
cin.file.generic_length=0x400;
offset+=WriteBlobLong(image,(unsigned int) cin.file.generic_length);
cin.file.industry_length=0x400;
offset+=WriteBlobLong(image,(unsigned int) cin.file.industry_length);
cin.file.user_length=0x00;
profile=GetImageProfile(image,"dpx:user.data");
if (profile != (StringInfo *) NULL)
{
cin.file.user_length+=(size_t) GetStringInfoLength(profile);
cin.file.user_length=(((cin.file.user_length+0x2000-1)/0x2000)*0x2000);
}
offset+=WriteBlobLong(image,(unsigned int) cin.file.user_length);
cin.file.file_size=4*image->columns*image->rows+0x2000;
offset+=WriteBlobLong(image,(unsigned int) cin.file.file_size);
(void) CopyMagickString(cin.file.version,"V4.5",sizeof(cin.file.version));
offset+=WriteBlob(image,sizeof(cin.file.version),(unsigned char *)
cin.file.version);
value=GetCINProperty(image_info,image,"dpx:file.filename",exception);
if (value != (const char *) NULL)
(void) CopyMagickString(cin.file.filename,value,sizeof(cin.file.filename));
else
(void) CopyMagickString(cin.file.filename,image->filename,
sizeof(cin.file.filename));
offset+=WriteBlob(image,sizeof(cin.file.filename),(unsigned char *)
cin.file.filename);
seconds=GetMagickTime();
GetMagickUTCtime(&seconds,&utc_time);
(void) memset(timestamp,0,sizeof(timestamp));
(void) strftime(timestamp,MagickPathExtent,"%Y:%m:%d:%H:%M:%SUTC",&utc_time);
(void) memset(cin.file.create_date,0,sizeof(cin.file.create_date));
(void) CopyMagickString(cin.file.create_date,timestamp,11);
offset+=WriteBlob(image,sizeof(cin.file.create_date),(unsigned char *)
cin.file.create_date);
(void) memset(cin.file.create_time,0,sizeof(cin.file.create_time));
(void) CopyMagickString(cin.file.create_time,timestamp+11,11);
offset+=WriteBlob(image,sizeof(cin.file.create_time),(unsigned char *)
cin.file.create_time);
offset+=WriteBlob(image,sizeof(cin.file.reserve),(unsigned char *)
cin.file.reserve);
cin.image.orientation=0x00;
offset+=WriteBlobByte(image,cin.image.orientation);
cin.image.number_channels=3;
offset+=WriteBlobByte(image,cin.image.number_channels);
offset+=WriteBlob(image,sizeof(cin.image.reserve1),(unsigned char *)
cin.image.reserve1);
for (i=0; i < 8; i++)
{
cin.image.channel[i].designator[0]=0; /* universal metric */
offset+=WriteBlobByte(image,cin.image.channel[0].designator[0]);
cin.image.channel[i].designator[1]=(unsigned char) (i > 3 ? 0 : i+1); /* channel color */;
offset+=WriteBlobByte(image,cin.image.channel[1].designator[0]);
cin.image.channel[i].bits_per_pixel=(unsigned char) image->depth;
offset+=WriteBlobByte(image,cin.image.channel[0].bits_per_pixel);
offset+=WriteBlobByte(image,cin.image.channel[0].reserve);
cin.image.channel[i].pixels_per_line=image->columns;
offset+=WriteBlobLong(image,(unsigned int)
cin.image.channel[0].pixels_per_line);
cin.image.channel[i].lines_per_image=image->rows;
offset+=WriteBlobLong(image,(unsigned int)
cin.image.channel[0].lines_per_image);
cin.image.channel[i].min_data=0;
offset+=WriteBlobFloat(image,cin.image.channel[0].min_data);
cin.image.channel[i].min_quantity=0.0;
offset+=WriteBlobFloat(image,cin.image.channel[0].min_quantity);
cin.image.channel[i].max_data=(float) ((MagickOffsetType)
GetQuantumRange(image->depth));
offset+=WriteBlobFloat(image,cin.image.channel[0].max_data);
cin.image.channel[i].max_quantity=2.048f;
offset+=WriteBlobFloat(image,cin.image.channel[0].max_quantity);
}
offset+=WriteBlobFloat(image,image->chromaticity.white_point.x);
offset+=WriteBlobFloat(image,image->chromaticity.white_point.y);
offset+=WriteBlobFloat(image,image->chromaticity.red_primary.x);
offset+=WriteBlobFloat(image,image->chromaticity.red_primary.y);
offset+=WriteBlobFloat(image,image->chromaticity.green_primary.x);
offset+=WriteBlobFloat(image,image->chromaticity.green_primary.y);
offset+=WriteBlobFloat(image,image->chromaticity.blue_primary.x);
offset+=WriteBlobFloat(image,image->chromaticity.blue_primary.y);
value=GetCINProperty(image_info,image,"dpx:image.label",exception);
if (value != (const char *) NULL)
(void) CopyMagickString(cin.image.label,value,sizeof(cin.image.label));
offset+=WriteBlob(image,sizeof(cin.image.label),(unsigned char *)
cin.image.label);
offset+=WriteBlob(image,sizeof(cin.image.reserve),(unsigned char *)
cin.image.reserve);
/*
Write data format information.
*/
cin.data_format.interleave=0; /* pixel interleave (rgbrgbr...) */
offset+=WriteBlobByte(image,cin.data_format.interleave);
cin.data_format.packing=5; /* packing ssize_tword (32bit) boundaries */
offset+=WriteBlobByte(image,cin.data_format.packing);
cin.data_format.sign=0; /* unsigned data */
offset+=WriteBlobByte(image,cin.data_format.sign);
cin.data_format.sense=0; /* image sense: positive image */
offset+=WriteBlobByte(image,cin.data_format.sense);
cin.data_format.line_pad=0;
offset+=WriteBlobLong(image,(unsigned int) cin.data_format.line_pad);
cin.data_format.channel_pad=0;
offset+=WriteBlobLong(image,(unsigned int) cin.data_format.channel_pad);
offset+=WriteBlob(image,sizeof(cin.data_format.reserve),(unsigned char *)
cin.data_format.reserve);
/*
Write origination information.
*/
cin.origination.x_offset=0UL;
value=GetCINProperty(image_info,image,"dpx:origination.x_offset",exception);
if (value != (const char *) NULL)
cin.origination.x_offset=(ssize_t) StringToLong(value);
offset+=WriteBlobLong(image,(unsigned int) cin.origination.x_offset);
cin.origination.y_offset=0UL;
value=GetCINProperty(image_info,image,"dpx:origination.y_offset",exception);
if (value != (const char *) NULL)
cin.origination.y_offset=(ssize_t) StringToLong(value);
offset+=WriteBlobLong(image,(unsigned int) cin.origination.y_offset);
value=GetCINProperty(image_info,image,"dpx:origination.filename",exception);
if (value != (const char *) NULL)
(void) CopyMagickString(cin.origination.filename,value,
sizeof(cin.origination.filename));
else
(void) CopyMagickString(cin.origination.filename,image->filename,
sizeof(cin.origination.filename));
offset+=WriteBlob(image,sizeof(cin.origination.filename),(unsigned char *)
cin.origination.filename);
(void) memset(timestamp,0,sizeof(timestamp));
(void) strftime(timestamp,MagickPathExtent,"%Y:%m:%d:%H:%M:%SUTC",&utc_time);
(void) memset(cin.origination.create_date,0,
sizeof(cin.origination.create_date));
(void) CopyMagickString(cin.origination.create_date,timestamp,11);
offset+=WriteBlob(image,sizeof(cin.origination.create_date),(unsigned char *)
cin.origination.create_date);
(void) memset(cin.origination.create_time,0,
sizeof(cin.origination.create_time));
(void) CopyMagickString(cin.origination.create_time,timestamp+11,15);
offset+=WriteBlob(image,sizeof(cin.origination.create_time),(unsigned char *)
cin.origination.create_time);
value=GetCINProperty(image_info,image,"dpx:origination.device",exception);
if (value != (const char *) NULL)
(void) CopyMagickString(cin.origination.device,value,
sizeof(cin.origination.device));
offset+=WriteBlob(image,sizeof(cin.origination.device),(unsigned char *)
cin.origination.device);
value=GetCINProperty(image_info,image,"dpx:origination.model",exception);
if (value != (const char *) NULL)
(void) CopyMagickString(cin.origination.model,value,
sizeof(cin.origination.model));
offset+=WriteBlob(image,sizeof(cin.origination.model),(unsigned char *)
cin.origination.model);
value=GetCINProperty(image_info,image,"dpx:origination.serial",exception);
if (value != (const char *) NULL)
(void) CopyMagickString(cin.origination.serial,value,
sizeof(cin.origination.serial));
offset+=WriteBlob(image,sizeof(cin.origination.serial),(unsigned char *)
cin.origination.serial);
cin.origination.x_pitch=0.0f;
value=GetCINProperty(image_info,image,"dpx:origination.x_pitch",exception);
if (value != (const char *) NULL)
cin.origination.x_pitch=StringToDouble(value,(char **) NULL);
offset+=WriteBlobFloat(image,cin.origination.x_pitch);
cin.origination.y_pitch=0.0f;
value=GetCINProperty(image_info,image,"dpx:origination.y_pitch",exception);
if (value != (const char *) NULL)
cin.origination.y_pitch=StringToDouble(value,(char **) NULL);
offset+=WriteBlobFloat(image,cin.origination.y_pitch);
cin.origination.gamma=image->gamma;
offset+=WriteBlobFloat(image,cin.origination.gamma);
offset+=WriteBlob(image,sizeof(cin.origination.reserve),(unsigned char *)
cin.origination.reserve);
/*
Image film information.
*/
cin.film.id=0;
value=GetCINProperty(image_info,image,"dpx:film.id",exception);
if (value != (const char *) NULL)
cin.film.id=(char) StringToLong(value);
offset+=WriteBlobByte(image,(unsigned char) cin.film.id);
cin.film.type=0;
value=GetCINProperty(image_info,image,"dpx:film.type",exception);
if (value != (const char *) NULL)
cin.film.type=(char) StringToLong(value);
offset+=WriteBlobByte(image,(unsigned char) cin.film.type);
cin.film.offset=0;
value=GetCINProperty(image_info,image,"dpx:film.offset",exception);
if (value != (const char *) NULL)
cin.film.offset=(char) StringToLong(value);
offset+=WriteBlobByte(image,(unsigned char) cin.film.offset);
offset+=WriteBlobByte(image,(unsigned char) cin.film.reserve1);
cin.film.prefix=0UL;
value=GetCINProperty(image_info,image,"dpx:film.prefix",exception);
if (value != (const char *) NULL)
cin.film.prefix=StringToUnsignedLong(value);
offset+=WriteBlobLong(image,(unsigned int) cin.film.prefix);
cin.film.count=0UL;
value=GetCINProperty(image_info,image,"dpx:film.count",exception);
if (value != (const char *) NULL)
cin.film.count=StringToUnsignedLong(value);
offset+=WriteBlobLong(image,(unsigned int) cin.film.count);
value=GetCINProperty(image_info,image,"dpx:film.format",exception);
if (value != (const char *) NULL)
(void) CopyMagickString(cin.film.format,value,sizeof(cin.film.format));
offset+=WriteBlob(image,sizeof(cin.film.format),(unsigned char *)
cin.film.format);
cin.film.frame_position=0UL;
value=GetCINProperty(image_info,image,"dpx:film.frame_position",exception);
if (value != (const char *) NULL)
cin.film.frame_position=StringToUnsignedLong(value);
offset+=WriteBlobLong(image,(unsigned int) cin.film.frame_position);
cin.film.frame_rate=0.0f;
value=GetCINProperty(image_info,image,"dpx:film.frame_rate",exception);
if (value != (const char *) NULL)
cin.film.frame_rate=StringToDouble(value,(char **) NULL);
offset+=WriteBlobFloat(image,cin.film.frame_rate);
value=GetCINProperty(image_info,image,"dpx:film.frame_id",exception);
if (value != (const char *) NULL)
(void) CopyMagickString(cin.film.frame_id,value,sizeof(cin.film.frame_id));
offset+=WriteBlob(image,sizeof(cin.film.frame_id),(unsigned char *)
cin.film.frame_id);
value=GetCINProperty(image_info,image,"dpx:film.slate_info",exception);
if (value != (const char *) NULL)
(void) CopyMagickString(cin.film.slate_info,value,
sizeof(cin.film.slate_info));
offset+=WriteBlob(image,sizeof(cin.film.slate_info),(unsigned char *)
cin.film.slate_info);
offset+=WriteBlob(image,sizeof(cin.film.reserve),(unsigned char *)
cin.film.reserve);
if (profile != (StringInfo *) NULL)
offset+=WriteBlob(image,GetStringInfoLength(profile),
GetStringInfoDatum(profile));
while (offset < (MagickOffsetType) cin.file.image_offset)
offset+=WriteBlobByte(image,0x00);
/*
Convert pixel packets to CIN raster image.
*/
quantum_info=AcquireQuantumInfo(image_info,image);
if (quantum_info == (QuantumInfo *) NULL)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
quantum_info->quantum=32;
quantum_info->pack=MagickFalse;
quantum_type=RGBQuantum;
pixels=(unsigned char *) GetQuantumPixels(quantum_info);
length=GetBytesPerRow(image->columns,3,image->depth,MagickTrue);
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
(void) ExportQuantumPixels(image,(CacheView *) NULL,quantum_info,
quantum_type,pixels,exception);
count=WriteBlob(image,length,pixels);
if (count != (ssize_t) length)
break;
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
quantum_info=DestroyQuantumInfo(quantum_info);
(void) CloseBlob(image);
return(status);
}
| 0
|
220,196
|
void Graph::RecycleEdge(const Edge* e) {
free_edges_.push_back(const_cast<Edge*>(e));
}
| 0
|
261,738
|
const char* RtmpProtocol::handle_rtmp(const char *data, size_t len) {
auto ptr = data;
while (len) {
size_t offset = 0;
auto header = (RtmpHeader *) ptr;
auto header_len = HEADER_LENGTH[header->fmt];
_now_chunk_id = header->chunk_id;
switch (_now_chunk_id) {
case 0: {
//0 值表示二字节形式,并且 ID 范围 64 - 319
//(第二个字节 + 64)。
if (len < 2) {
//need more data
return ptr;
}
_now_chunk_id = 64 + (uint8_t) (ptr[1]);
offset = 1;
break;
}
case 1: {
//1 值表示三字节形式,并且 ID 范围为 64 - 65599
//((第三个字节) * 256 + 第二个字节 + 64)。
if (len < 3) {
//need more data
return ptr;
}
_now_chunk_id = 64 + ((uint8_t) (ptr[2]) << 8) + (uint8_t) (ptr[1]);
offset = 2;
break;
}
//带有 2 值的块流 ID 被保留,用于下层协议控制消息和命令。
default : break;
}
if (len < header_len + offset) {
//need more data
return ptr;
}
header = (RtmpHeader *) (ptr + offset);
auto &pr = _map_chunk_data[_now_chunk_id];
auto &now_packet = pr.first;
auto &last_packet = pr.second;
if (!now_packet) {
now_packet = RtmpPacket::create();
if (last_packet) {
//恢复chunk上下文
*now_packet = *last_packet;
}
//绝对时间戳标记复位
now_packet->is_abs_stamp = false;
}
auto &chunk_data = *now_packet;
chunk_data.chunk_id = _now_chunk_id;
switch (header_len) {
case 12:
chunk_data.is_abs_stamp = true;
chunk_data.stream_index = load_le32(header->stream_index);
case 8:
chunk_data.body_size = load_be24(header->body_size);
chunk_data.type_id = header->type_id;
case 4:
chunk_data.ts_field = load_be24(header->time_stamp);
}
auto time_stamp = chunk_data.ts_field;
if (chunk_data.ts_field == 0xFFFFFF) {
if (len < header_len + offset + 4) {
//need more data
return ptr;
}
time_stamp = load_be32(ptr + offset + header_len);
offset += 4;
}
if (chunk_data.body_size < chunk_data.buffer.size()) {
throw std::runtime_error("非法的bodySize");
}
auto more = min(_chunk_size_in, (size_t) (chunk_data.body_size - chunk_data.buffer.size()));
if (len < header_len + offset + more) {
//need more data
return ptr;
}
if (more) {
chunk_data.buffer.append(ptr + header_len + offset, more);
}
ptr += header_len + offset + more;
len -= header_len + offset + more;
if (chunk_data.buffer.size() == chunk_data.body_size) {
//frame is ready
_now_stream_index = chunk_data.stream_index;
chunk_data.time_stamp = time_stamp + (chunk_data.is_abs_stamp ? 0 : chunk_data.time_stamp);
//保存chunk上下文
last_packet = now_packet;
if (chunk_data.body_size) {
handle_chunk(std::move(now_packet));
} else {
now_packet = nullptr;
}
}
}
return ptr;
}
| 0
|
90,902
|
void ClientUsageTracker::NoopHostUsageCallback(
const std::string& host, StorageType type, int64 usage) {
}
| 0
|
231,041
|
BaseType_t xQueueCRReceive( QueueHandle_t xQueue,
void * pvBuffer,
TickType_t xTicksToWait )
{
BaseType_t xReturn;
Queue_t * const pxQueue = xQueue;
/* If the queue is already empty we may have to block. A critical section
* is required to prevent an interrupt adding something to the queue
* between the check to see if the queue is empty and blocking on the queue. */
portDISABLE_INTERRUPTS();
{
if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
{
/* There are no messages in the queue, do we want to block or just
* leave with nothing? */
if( xTicksToWait > ( TickType_t ) 0 )
{
/* As this is a co-routine we cannot block directly, but return
* indicating that we need to block. */
vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToReceive ) );
portENABLE_INTERRUPTS();
return errQUEUE_BLOCKED;
}
else
{
portENABLE_INTERRUPTS();
return errQUEUE_FULL;
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
portENABLE_INTERRUPTS();
portDISABLE_INTERRUPTS();
{
if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
{
/* Data is available from the queue. */
pxQueue->u.xQueue.pcReadFrom += pxQueue->uxItemSize;
if( pxQueue->u.xQueue.pcReadFrom >= pxQueue->u.xQueue.pcTail )
{
pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
--( pxQueue->uxMessagesWaiting );
( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.xQueue.pcReadFrom, ( unsigned ) pxQueue->uxItemSize );
xReturn = pdPASS;
/* Were any co-routines waiting for space to become available? */
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
{
/* In this instance the co-routine could be placed directly
* into the ready list as we are within a critical section.
* Instead the same pending ready list mechanism is used as if
* the event were caused from within an interrupt. */
if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
{
xReturn = errQUEUE_YIELD;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
xReturn = pdFAIL;
}
}
portENABLE_INTERRUPTS();
return xReturn;
}
| 0
|
195,398
|
static int vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
struct v4l2_loopback_device *dev = v4l2loopback_getdevice(file);
int labellen = (sizeof(cap->card) < sizeof(dev->card_label)) ?
sizeof(cap->card) :
sizeof(dev->card_label);
int device_nr =
((struct v4l2loopback_private *)video_get_drvdata(dev->vdev))
->device_nr;
__u32 capabilities = V4L2_CAP_STREAMING | V4L2_CAP_READWRITE;
strlcpy(cap->driver, "v4l2 loopback", sizeof(cap->driver));
snprintf(cap->card, labellen, dev->card_label);
snprintf(cap->bus_info, sizeof(cap->bus_info),
"platform:v4l2loopback-%03d", device_nr);
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 1, 0)
/* since 3.1.0, the v4l2-core system is supposed to set the version */
cap->version = V4L2LOOPBACK_VERSION_CODE;
#endif
#ifdef V4L2_CAP_VIDEO_M2M
capabilities |= V4L2_CAP_VIDEO_M2M;
#endif /* V4L2_CAP_VIDEO_M2M */
if (dev->announce_all_caps) {
capabilities |= V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT;
} else {
if (dev->ready_for_capture) {
capabilities |= V4L2_CAP_VIDEO_CAPTURE;
}
if (dev->ready_for_output) {
capabilities |= V4L2_CAP_VIDEO_OUTPUT;
}
}
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0)
dev->vdev->device_caps =
#endif /* >=linux-4.7.0 */
cap->device_caps = cap->capabilities = capabilities;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0)
cap->capabilities |= V4L2_CAP_DEVICE_CAPS;
#endif
memset(cap->reserved, 0, sizeof(cap->reserved));
return 0;
}
| 1
|
222,507
|
void AppendTo(bool first, string* s) const {
absl::string_view v;
bool add_escaped = false;
if ((value_op_ == kCEscape) && NeedsEscaping(value_)) {
// Use CEscape call below
add_escaped = true;
} else {
// Add raw value contents directly
v = value_;
}
if (key_suffix_ >= 0) {
strings::StrAppend(s, first ? "" : ",", key_name_, key_suffix_, "=", v);
} else {
strings::StrAppend(s, first ? "" : ",", key_name_, "=", v);
}
if (add_escaped) {
strings::StrAppend(s, absl::CEscape(value_));
}
}
| 0
|
90,911
|
virtual ~GatherGlobalUsageTask() {}
| 0
|
235,256
|
static void setup_buffer(uint8_t *buf, unsigned int seed, int len)
{
int i;
srandom(seed);
for (i=0;i<len;i++) buf[i] = random();
}
| 0
|
432,149
|
PipelineD::buildInnerQueryExecutor(const CollectionPtr& collection,
const NamespaceString& nss,
const AggregateCommandRequest* aggRequest,
Pipeline* pipeline) {
auto expCtx = pipeline->getContext();
// We will be modifying the source vector as we go.
Pipeline::SourceContainer& sources = pipeline->_sources;
if (!sources.empty() && !sources.front()->constraints().requiresInputDocSource) {
return {};
}
if (!sources.empty()) {
// Try to inspect if the DocumentSourceSample or a DocumentSourceInternalUnpackBucket stage
// can be optimized for sampling backed by a storage engine supplied random cursor.
auto&& [sampleStage, unpackBucketStage] = extractSampleUnpackBucket(sources);
// Optimize an initial $sample stage if possible.
if (collection && sampleStage) {
auto [attachExecutorCallback, exec] =
buildInnerQueryExecutorSample(sampleStage, unpackBucketStage, collection, pipeline);
if (exec) {
return std::make_pair(std::move(attachExecutorCallback), std::move(exec));
}
}
}
// If the first stage is $geoNear, prepare a special DocumentSourceGeoNearCursor stage;
// otherwise, create a generic DocumentSourceCursor.
const auto geoNearStage =
sources.empty() ? nullptr : dynamic_cast<DocumentSourceGeoNear*>(sources.front().get());
if (geoNearStage) {
return buildInnerQueryExecutorGeoNear(collection, nss, aggRequest, pipeline);
} else {
return buildInnerQueryExecutorGeneric(collection, nss, aggRequest, pipeline);
}
}
| 0
|
459,087
|
int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe)
{
if (!qe->info.block_index)
return 0;
return nla_put_u32(skb, attr_name, qe->info.block_index);
}
| 0
|
90,751
|
void QuotaManager::DeleteOriginData(
const GURL& origin, StorageType type, StatusCallback* callback) {
LazyInitialize();
if (origin.is_empty() || clients_.empty()) {
callback->Run(kQuotaStatusOk);
delete callback;
return;
}
OriginDataDeleter* deleter =
new OriginDataDeleter(this, origin, type, callback);
deleter->Start();
}
| 0
|
281,051
|
static inline int xfrm_bydst_should_resize(struct net *net, int dir, int *total)
{
unsigned int cnt = net->xfrm.policy_count[dir];
unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
if (total)
*total += cnt;
if ((hmask + 1) < xfrm_policy_hashmax &&
cnt > hmask)
return 1;
return 0;
}
| 0
|
275,515
|
njs_vm_start(njs_vm_t *vm)
{
njs_int_t ret;
ret = njs_module_load(vm);
if (njs_slow_path(ret != NJS_OK)) {
return ret;
}
ret = njs_vmcode_interpreter(vm, vm->start, NULL, NULL);
return (ret == NJS_ERROR) ? NJS_ERROR : NJS_OK;
}
| 0
|
247,149
|
void gf_fs_print_unused_args(GF_FilterSession *fsess, const char *ignore_args)
{
u32 idx = 0;
char *argname;
u32 argtype;
while (1) {
Bool found = GF_FALSE;
const char *loc_arg;
if (gf_fs_enum_unmapped_options(fsess, &idx, &argname, &argtype)==GF_FALSE)
break;
loc_arg = ignore_args;
while (loc_arg) {
u32 len;
char *sep;
char *match = strstr(loc_arg, argname);
if (!match) break;
len = (u32) strlen(argname);
if (!match[len] || (match[len]==',')) {
found = GF_TRUE;
break;
}
sep = strchr(loc_arg, ',');
if (!sep) break;
loc_arg = sep+1;
}
if (found) continue;
GF_LOG(GF_LOG_ERROR, GF_LOG_APP, ("Arg %s set but not used\n", argname));
}
}
| 0
|
229,327
|
Status AddOrExecuteNode(core::RefCountPtr<KernelAndDevice> kernel,
EagerOperation* op, TensorHandle** retvals) {
EagerExecutor& executor = op->Executor();
EagerContext& ctx = op->EagerContext();
GraphCollector* graph_collector = nullptr;
if (ctx.ShouldStoreGraphs()) {
graph_collector = ctx.GetGraphCollector();
}
const int num_outputs = kernel->num_outputs();
absl::optional<EagerFunctionParams> eager_func_params =
op->eager_func_params();
if (kernel->IsCrossProcess() && !eager_func_params.has_value()) {
// Create an eager op id for a cross-process function if not exist.
#if defined(IS_MOBILE_PLATFORM)
return errors::Unimplemented(
"Cross-process functions are not supported on mobile devices.");
#else // !IS_MOBILE_PLATFORM
const int64_t op_id = ctx.RemoteMgr()->NextOpId();
eager_func_params = EagerFunctionParams{op_id, /*step_id=*/absl::nullopt};
#endif // !IS_MOBILE_PLATFORM
}
if (executor.Async()) {
const DataTypeVector& output_dtypes = kernel->output_dtypes();
for (int i = 0, end = num_outputs; i < end; ++i) {
Device* output_device = ctx.CanonicalDevice(kernel->OutputDevice(i));
if (output_device == nullptr || output_device->IsLocal()) {
retvals[i] = TensorHandle::CreateEmptyLocalHandle(
/* d= */ output_device, /* op_device= */ kernel->device(),
/* resource_device= */ kernel->OutputResourceDevice(i),
output_dtypes[i], &ctx);
} else {
TF_RETURN_IF_ERROR(
CreateUnshapedOutput(*kernel, i, output_device, output_dtypes[i],
eager_func_params, &ctx, &retvals[i]));
}
}
const absl::InlinedVector<TensorHandle*, 4>* inputs;
TF_RETURN_IF_ERROR(op->TensorHandleInputs(&inputs));
auto node = absl::make_unique<AsyncExecuteNode>(
&ctx, *inputs, eager_func_params, std::move(kernel), graph_collector,
op->GetCancellationManager(),
absl::Span<TensorHandle*>(retvals, num_outputs), op->GetStackTrace());
// Release the inputs from the eager operation since the AsyncExecuteNode
// would have taken ownership. This allows the inputs to be forwarded if
// possible.
op->Clear();
// For async mode, execution order will make sure that all
// input handles are ready before executing them.
// TODO(b/137118203): Consider executing "cheap" kernels inline for
// performance.
return executor.AddOrExecute(std::move(node));
} else {
for (int i = 0, end = num_outputs; i < end; ++i) {
retvals[i] = nullptr;
}
const absl::InlinedVector<TensorHandle*, 4>* inputs;
TF_RETURN_IF_ERROR(op->TensorHandleInputs(&inputs));
ExecuteNode node(&ctx, *inputs, eager_func_params, kernel, graph_collector,
op->GetCancellationManager(),
{retvals, static_cast<size_t>(num_outputs)},
op->GetStackTrace());
Status s = executor.SyncExecute(&node);
// We release the inputs AFTER executing the operation in sync mode since
// ExecuteNode does not increment the reference count and thus does not have
// ownership of the inputs while executing.
op->Clear();
return s;
}
}
| 0
|
256,423
|
static void pjmedia_rtcp_fb_cap_dup(pj_pool_t *pool,
pjmedia_rtcp_fb_cap *dst,
const pjmedia_rtcp_fb_cap *src)
{
pj_strdup(pool, &dst->codec_id, &src->codec_id);
dst->type = src->type;
pj_strdup(pool, &dst->type_name, &src->type_name);
pj_strdup(pool, &dst->param, &src->param);
}
| 0
|
225,694
|
GF_Box *lsrc_box_new()
{
ISOM_DECL_BOX_ALLOC(GF_LASERConfigurationBox, GF_ISOM_BOX_TYPE_LSRC);
return (GF_Box *)tmp;
| 0
|
346,453
|
estack_top_is_ufunc(ufunc_T *ufunc, long lnum)
{
estack_T *entry;
if (exestack.ga_len == 0)
return FALSE;
entry = ((estack_T *)exestack.ga_data) + exestack.ga_len - 1;
return entry->es_type == ETYPE_UFUNC
&& STRCMP( entry->es_name, ufunc->uf_name_exp != NULL
? ufunc->uf_name_exp : ufunc->uf_name) == 0
&& entry->es_lnum == lnum;
}
| 0
|
424,976
|
static void iwl_pcie_map_rx_causes(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 offset =
trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0;
u32 val, idx;
/*
* The first RX queue - fallback queue, which is designated for
* management frame, command responses etc, is always mapped to the
* first interrupt vector. The other RX queues are mapped to
* the other (N - 2) interrupt vectors.
*/
val = BIT(MSIX_FH_INT_CAUSES_Q(0));
for (idx = 1; idx < trans->num_rx_queues; idx++) {
iwl_write8(trans, CSR_MSIX_RX_IVAR(idx),
MSIX_FH_INT_CAUSES_Q(idx - offset));
val |= BIT(MSIX_FH_INT_CAUSES_Q(idx));
}
iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~val);
val = MSIX_FH_INT_CAUSES_Q(0);
if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX)
val |= MSIX_NON_AUTO_CLEAR_CAUSE;
iwl_write8(trans, CSR_MSIX_RX_IVAR(0), val);
if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS)
iwl_write8(trans, CSR_MSIX_RX_IVAR(1), val);
}
| 0
|
226,994
|
IRC_PROTOCOL_CALLBACK(733)
{
char *pos_args;
IRC_PROTOCOL_MIN_ARGS(3);
pos_args = (argc > 3) ?
((argv_eol[3][0] == ':') ? argv_eol[3] + 1 : argv_eol[3]) : NULL;
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (
server, NULL, command, "monitor", NULL),
date,
irc_protocol_tags (command, "irc_numeric", NULL, NULL),
"%s%s",
weechat_prefix ("network"),
(pos_args && pos_args[0]) ? pos_args : "");
return WEECHAT_RC_OK;
}
| 0
|
424,954
|
void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power)
{
if (!max_power) {
/* default max_power is maximum */
max_power = 26;
} else {
max_power += 11;
}
if (WARN(max_power > 26,
"External buffer size for monitor is too big %d, check the FW TLV\n",
max_power))
return;
/*
* This function allocats the default fw monitor.
* The optional additional ones will be allocated in runtime
*/
if (trans->dbg.num_blocks)
return;
iwl_pcie_alloc_fw_monitor_block(trans, max_power, 11);
}
| 0
|
225,037
|
PQsetClientEncoding(PGconn *conn, const char *encoding)
{
char qbuf[128];
static const char query[] = "set client_encoding to '%s'";
PGresult *res;
int status;
if (!conn || conn->status != CONNECTION_OK)
return -1;
if (!encoding)
return -1;
/* Resolve special "auto" value from the locale */
if (strcmp(encoding, "auto") == 0)
encoding = pg_encoding_to_char(pg_get_encoding_from_locale(NULL, true));
/* check query buffer overflow */
if (sizeof(qbuf) < (sizeof(query) + strlen(encoding)))
return -1;
/* ok, now send a query */
sprintf(qbuf, query, encoding);
res = PQexec(conn, qbuf);
if (res == NULL)
return -1;
if (res->resultStatus != PGRES_COMMAND_OK)
status = -1;
else
{
/*
* We rely on the backend to report the parameter value, and we'll
* change state at that time.
*/
status = 0; /* everything is ok */
}
PQclear(res);
return status;
}
| 0
|
253,567
|
smb21_is_read_op(__u32 oplock)
{
return (oplock & SMB2_LEASE_READ_CACHING_HE) &&
!(oplock & SMB2_LEASE_WRITE_CACHING_HE);
}
| 0
|
256,952
|
static Status ProcessDimensions(
const OpInputList& inputs,
const gtl::InlinedVector<bool, 2>& input_has_ellipsis,
const bool output_has_ellipsis, OperandLabels* input_labels,
Labels* output_labels, std::vector<DimensionType>* label_types,
OperandLabelCounts* input_label_counts, LabelCounts* output_label_counts,
LabelToDimSizes* label_to_dim_sizes) {
if (inputs.size() != input_labels->size()) {
return errors::InvalidArgument("Expected ", input_labels->size(),
" inputs but got: ", inputs.size());
}
const int num_inputs = inputs.size();
// We infer the number of broadcasting dimensions by taking the maximum rank
// among the broadcasting subshapes of the input.
int max_bcast_dims = 0;
const int num_named_labels = label_types->size();
label_to_dim_sizes->resize(num_named_labels);
for (int i = 0; i < num_inputs; ++i) {
Labels* labels = &(*input_labels)[i];
if (!input_has_ellipsis[i]) {
if (inputs[i].dims() != labels->size()) {
return errors::InvalidArgument("Expected input ", i, " to have rank ",
labels->size(),
" but got: ", inputs[i].dims());
}
for (int label_idx = 0; label_idx < labels->size(); ++label_idx) {
const int label = (*labels)[label_idx];
TF_RETURN_IF_ERROR(RecordLabelToDimension(label, label_idx, inputs[i],
label_to_dim_sizes));
}
continue;
}
// Input has an ellipsis.
if (inputs[i].dims() + 1 < labels->size()) {
return errors::InvalidArgument(
"Expected input ", i, " to have rank at least ", labels->size() - 1,
" but got: ", inputs[i].dims());
}
int ellipsis_axis = -1;
const int num_bcast_dims = inputs[i].dims() - labels->size() + 1;
for (int label_idx = 0; label_idx < labels->size(); ++label_idx) {
const int label = (*labels)[label_idx];
if (label == kEllipsisLabel) {
ellipsis_axis = label_idx;
continue;
}
// Current label is not an ellipsis.
const int axis =
label_idx + (ellipsis_axis == -1 ? 0 : num_bcast_dims - 1);
TF_RETURN_IF_ERROR(
RecordLabelToDimension(label, axis, inputs[i], label_to_dim_sizes));
}
// Found an ellipsis. Replace 'kEllipsisLabel' with broadcasting
// dimensions.
if (ellipsis_axis != -1) {
InsertBroadcastLabels(num_bcast_dims, num_named_labels, ellipsis_axis,
labels, &input_label_counts->at(i));
max_bcast_dims = std::max(max_bcast_dims, num_bcast_dims);
}
}
if (!absl::c_linear_search(input_has_ellipsis, true) &&
!output_has_ellipsis) {
return Status::OK();
}
// Insert broadcasting dimensions in the output labels.
auto it =
std::find(output_labels->begin(), output_labels->end(), kEllipsisLabel);
if (it != output_labels->end()) {
const int ellipsis_axis = it - output_labels->begin();
InsertBroadcastLabels(max_bcast_dims, num_named_labels, ellipsis_axis,
output_labels, output_label_counts);
} else if (max_bcast_dims > 0) {
return errors::InvalidArgument(
"Output contains ", max_bcast_dims,
" broadcasting dimension(s) but no ellipsis "
"(...) was found in the output subscripts.");
}
// Populate DimensionType for the new broadcasting labels.
label_types->resize(num_named_labels + max_bcast_dims, kBroadcasting);
return Status::OK();
}
| 0
|
513,204
|
void sync_dynamic_session_variables(THD* thd, bool global_lock)
{
uint idx;
thd->variables.dynamic_variables_ptr= (char*)
my_realloc(thd->variables.dynamic_variables_ptr,
global_variables_dynamic_size,
MYF(MY_WME | MY_FAE | MY_ALLOW_ZERO_PTR));
if (global_lock)
mysql_mutex_lock(&LOCK_global_system_variables);
mysql_mutex_assert_owner(&LOCK_global_system_variables);
memcpy(thd->variables.dynamic_variables_ptr +
thd->variables.dynamic_variables_size,
global_system_variables.dynamic_variables_ptr +
thd->variables.dynamic_variables_size,
global_system_variables.dynamic_variables_size -
thd->variables.dynamic_variables_size);
/*
now we need to iterate through any newly copied 'defaults'
and if it is a string type with MEMALLOC flag, we need to strdup
*/
for (idx= 0; idx < bookmark_hash.records; idx++)
{
st_bookmark *v= (st_bookmark*) my_hash_element(&bookmark_hash,idx);
if (v->version <= thd->variables.dynamic_variables_version)
continue; /* already in thd->variables */
/* Here we do anything special that may be required of the data types */
if ((v->key[0] & PLUGIN_VAR_TYPEMASK) == PLUGIN_VAR_STR &&
v->key[0] & BOOKMARK_MEMALLOC)
{
char **pp= (char**) (thd->variables.dynamic_variables_ptr + v->offset);
if (*pp)
*pp= my_strdup(*pp, MYF(MY_WME|MY_FAE));
}
}
if (global_lock)
mysql_mutex_unlock(&LOCK_global_system_variables);
thd->variables.dynamic_variables_version=
global_system_variables.dynamic_variables_version;
thd->variables.dynamic_variables_head=
global_system_variables.dynamic_variables_head;
thd->variables.dynamic_variables_size=
global_system_variables.dynamic_variables_size;
}
| 0
|
412,104
|
dnsc_key_to_fingerprint(char fingerprint[80U], const uint8_t * const key)
{
const size_t fingerprint_size = 80U;
size_t fingerprint_pos = (size_t) 0U;
size_t key_pos = (size_t) 0U;
for (;;) {
assert(fingerprint_size > fingerprint_pos);
snprintf(&fingerprint[fingerprint_pos],
fingerprint_size - fingerprint_pos, "%02X%02X",
key[key_pos], key[key_pos + 1U]);
key_pos += 2U;
if (key_pos >= crypto_box_PUBLICKEYBYTES) {
break;
}
fingerprint[fingerprint_pos + 4U] = ':';
fingerprint_pos += 5U;
}
}
| 0
|
197,826
|
bool IsConstantFoldable(
const Node* n,
const std::unordered_map<string, std::vector<PartialTensorShape>>*
shape_map,
const std::function<bool(const Node*)>& consider,
int64_t max_constant_size_in_bytes,
std::unordered_map<const Node*, std::vector<Tensor>>*
shape_replacement_map) {
if (n->IsConstant()) {
return true;
}
if (MaybeReplaceShapeOp(n, shape_map, shape_replacement_map)) {
return true;
}
if (n->op_def().is_stateful()) {
return false;
}
if (consider && !consider(n)) {
return false;
}
if (shape_map != nullptr) {
// We can skip the node if an output is known to be oversized.
auto shape_it = shape_map->find(n->name());
if (shape_it != shape_map->end()) {
for (int64_t i = 0; i < shape_it->second.size(); ++i) {
const auto& out_shape = shape_it->second[i];
if (out_shape.IsFullyDefined() &&
out_shape.num_elements() * DataTypeSize(n->output_type(i)) >
max_constant_size_in_bytes) {
return false;
}
}
}
}
if (n->IsControlFlow() || n->IsSend() || n->IsRecv()) {
return false;
}
// TODO(yuanbyu): For now disable these session handle operations.
if (n->IsGetSessionHandle() || n->IsGetSessionTensor() ||
n->IsDeleteSessionTensor()) {
return false;
}
if (n->IsSource()) {
return false;
}
if (n->IsSink()) {
return false;
}
if (n->IsFakeParam()) {
return false;
}
// Since constant-folding runs on the CPU, do not attempt to constant-fold
// operators that have no CPU kernel. Also implies that we will not
// constant-fold functions.
// TODO(phawkins): allow constant-folding for functions; functions may
// be arbitrarily expensive to execute.
if (!KernelDefAvailable(DeviceType(DEVICE_CPU), n->def())) {
return false;
}
// Do not constant fold nodes which will be allocated by ScopedAllocator.
// This is because the constant-folding graph will not contain the
// `_ScopedAllocator` node, and that is necessary to be able to run a node
// that will use this allocator.
if (n->attrs().Find(kScopedAllocatorAttrName) != nullptr) {
VLOG(2) << "Skip node [" << n->DebugString()
<< "] for constant folding due to scoped allocator";
return false;
}
return true;
}
| 1
|
369,913
|
proc_map_files_instantiate(struct inode *dir, struct dentry *dentry,
struct task_struct *task, const void *ptr)
{
const struct file *file = ptr;
struct proc_inode *ei;
struct inode *inode;
if (!file)
return ERR_PTR(-ENOENT);
inode = proc_pid_make_inode(dir->i_sb, task);
if (!inode)
return ERR_PTR(-ENOENT);
ei = PROC_I(inode);
ei->op.proc_get_link = proc_map_files_get_link;
inode->i_op = &proc_pid_link_inode_operations;
inode->i_size = 64;
inode->i_mode = S_IFLNK;
if (file->f_mode & FMODE_READ)
inode->i_mode |= S_IRUSR;
if (file->f_mode & FMODE_WRITE)
inode->i_mode |= S_IWUSR;
d_set_d_op(dentry, &tid_map_files_dentry_operations);
d_add(dentry, inode);
return NULL;
}
| 0
|
281,147
|
static unsigned int xfrm_mtu(const struct dst_entry *dst)
{
unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
return mtu ? : dst_mtu(dst->path);
}
| 0
|
513,198
|
void wsrep_plugins_post_init()
{
THD *thd;
I_List_iterator<THD> it(threads);
while ((thd= it++))
{
if (IF_WSREP(thd->wsrep_applier,1))
{
// Save options_bits as it will get overwritten in plugin_thdvar_init()
ulonglong option_bits_saved= thd->variables.option_bits;
plugin_thdvar_init(thd);
// Restore option_bits
thd->variables.option_bits= option_bits_saved;
}
}
return;
}
| 0
|
487,657
|
asmlinkage long sys_setuid(uid_t uid)
{
int old_euid = current->euid;
int old_ruid, old_suid, new_suid;
int retval;
retval = security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_ID);
if (retval)
return retval;
old_ruid = current->uid;
old_suid = current->suid;
new_suid = old_suid;
if (capable(CAP_SETUID)) {
if (uid != old_ruid && set_user(uid, old_euid != uid) < 0)
return -EAGAIN;
new_suid = uid;
} else if ((uid != current->uid) && (uid != new_suid))
return -EPERM;
if (old_euid != uid) {
current->mm->dumpable = suid_dumpable;
smp_wmb();
}
current->fsuid = current->euid = uid;
current->suid = new_suid;
key_fsuid_changed(current);
proc_id_connector(current, PROC_EVENT_UID);
return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_ID);
}
| 0
|
309,818
|
__nc_putp_flush(SCREEN *sp, const char *name, const char *value)
{
int rc = __nc_putp(sp, name, value);
if (rc != ERR) {
NCURSES_SP_NAME(_nc_flush) (sp);
}
return rc;
}
| 0
|
333,097
|
pim_info(nfa_pim_T *pim)
{
static char buf[30];
if (pim == NULL || pim->result == NFA_PIM_UNUSED)
buf[0] = NUL;
else
{
sprintf(buf, " PIM col %d", REG_MULTI ? (int)pim->end.pos.col
: (int)(pim->end.ptr - rex.input));
}
return buf;
}
| 0
|
230,135
|
static json_t * generate_new_credential(struct config_module * config, json_t * j_params, const char * username) {
json_t * j_query, * j_return;
char * username_escaped, * mod_name_escaped, * username_clause, * challenge_hash;
int res;
size_t challenge_b64_len, challenge_len = (size_t)json_integer_value(json_object_get(j_params, "challenge-length"));
unsigned char challenge_b64[challenge_len*2], challenge[challenge_len+1];
char session[SESSION_LENGTH+1] = {0}, * session_hash;
gnutls_rnd(GNUTLS_RND_NONCE, challenge, challenge_len);
if (o_base64_encode(challenge, challenge_len, challenge_b64, &challenge_b64_len)) {
challenge_b64[challenge_b64_len] = '\0';
if ((challenge_hash = generate_hash(config->hash_algorithm, (const char *)challenge_b64)) != NULL) {
rand_string(session, SESSION_LENGTH);
if ((session_hash = generate_hash(config->hash_algorithm, session)) != NULL) {
username_escaped = h_escape_string_with_quotes(config->conn, username);
mod_name_escaped = h_escape_string_with_quotes(config->conn, json_string_value(json_object_get(j_params, "mod_name")));
username_clause = msprintf(" (SELECT gswu_id FROM "G_TABLE_WEBAUTHN_USER" WHERE UPPER(gswu_username) = UPPER(%s) AND gswu_mod_name = %s)", username_escaped, mod_name_escaped);
// Disable all credential with status 0 (new) of the same user
j_query = json_pack("{sss{si}s{s{ssss+}si}}",
"table",
G_TABLE_WEBAUTHN_CREDENTIAL,
"set",
"gswc_status",
2,
"where",
"gswu_id",
"operator",
"raw",
"value",
" =",
username_clause,
"gswc_status",
0);
res = h_update(config->conn, j_query, NULL);
json_decref(j_query);
if (res == H_OK) {
// Insert new credential
j_query = json_pack("{sss{s{ss}sssssi}}",
"table",
G_TABLE_WEBAUTHN_CREDENTIAL,
"values",
"gswu_id",
"raw",
username_clause,
"gswc_session_hash",
session_hash,
"gswc_challenge_hash",
challenge_hash,
"gswc_status",
0);
res = h_insert(config->conn, j_query, NULL);
json_decref(j_query);
if (res == H_OK) {
j_return = json_pack("{sis{ssss}}", "result", G_OK, "credential", "session", session, "challenge", challenge_b64);
} else {
y_log_message(Y_LOG_LEVEL_ERROR, "generate_new_credential - Error executing j_query insert");
config->glewlwyd_module_callback_metrics_increment_counter(config, GLWD_METRICS_DATABSE_ERROR, 1, NULL);
j_return = json_pack("{si}", "result", G_ERROR_DB);
}
} else {
y_log_message(Y_LOG_LEVEL_ERROR, "generate_new_credential - Error executing j_query update");
config->glewlwyd_module_callback_metrics_increment_counter(config, GLWD_METRICS_DATABSE_ERROR, 1, NULL);
j_return = json_pack("{si}", "result", G_ERROR_DB);
}
o_free(username_clause);
o_free(username_escaped);
o_free(mod_name_escaped);
} else {
y_log_message(Y_LOG_LEVEL_ERROR, "generate_new_credential - Error generate_hash session");
j_return = json_pack("{si}", "result", G_ERROR);
}
o_free(session_hash);
} else {
y_log_message(Y_LOG_LEVEL_ERROR, "generate_new_credential - Error generate_hash challenge");
j_return = json_pack("{si}", "result", G_ERROR);
}
o_free(challenge_hash);
} else {
y_log_message(Y_LOG_LEVEL_ERROR, "generate_new_credential - Error o_base64_encode challenge");
j_return = json_pack("{si}", "result", G_ERROR);
}
return j_return;
}
| 0
|
238,616
|
static void sort_kfunc_descs_by_imm(struct bpf_prog *prog)
{
struct bpf_kfunc_desc_tab *tab;
tab = prog->aux->kfunc_tab;
if (!tab)
return;
sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]),
kfunc_desc_cmp_by_imm, NULL);
}
| 0
|
317,148
|
static int sb_check_xattr_support(struct super_block *sb)
{
struct superblock_security_struct *sbsec = sb->s_security;
struct dentry *root = sb->s_root;
struct inode *root_inode = d_backing_inode(root);
u32 sid;
int rc;
/*
* Make sure that the xattr handler exists and that no
* error other than -ENODATA is returned by getxattr on
* the root directory. -ENODATA is ok, as this may be
* the first boot of the SELinux kernel before we have
* assigned xattr values to the filesystem.
*/
if (!(root_inode->i_opflags & IOP_XATTR)) {
pr_warn("SELinux: (dev %s, type %s) has no xattr support\n",
sb->s_id, sb->s_type->name);
goto fallback;
}
rc = __vfs_getxattr(root, root_inode, XATTR_NAME_SELINUX, NULL, 0);
if (rc < 0 && rc != -ENODATA) {
if (rc == -EOPNOTSUPP) {
pr_warn("SELinux: (dev %s, type %s) has no security xattr handler\n",
sb->s_id, sb->s_type->name);
goto fallback;
} else {
pr_warn("SELinux: (dev %s, type %s) getxattr errno %d\n",
sb->s_id, sb->s_type->name, -rc);
return rc;
}
}
return 0;
fallback:
/* No xattr support - try to fallback to genfs if possible. */
rc = security_genfs_sid(&selinux_state, sb->s_type->name, "/",
SECCLASS_DIR, &sid);
if (rc)
return -EOPNOTSUPP;
pr_warn("SELinux: (dev %s, type %s) falling back to genfs\n",
sb->s_id, sb->s_type->name);
sbsec->behavior = SECURITY_FS_USE_GENFS;
sbsec->sid = sid;
return 0;
}
| 0
|
244,343
|
GF_Err ainf_box_size(GF_Box *s)
{
GF_AssetInformationBox *ptr = (GF_AssetInformationBox *) s;
s->size += 4 + (ptr->APID ? strlen(ptr->APID) : 0 ) + 1;
return GF_OK;
}
| 0
|
376,350
|
gpg_ctx_add_recipient (struct _GpgCtx *gpg,
const gchar *keyid)
{
gchar *safe_keyid;
if (gpg->mode != GPG_CTX_MODE_ENCRYPT && gpg->mode != GPG_CTX_MODE_EXPORT)
return;
if (!gpg->recipients)
gpg->recipients = g_ptr_array_new ();
g_return_if_fail (keyid != NULL);
/* If the recipient looks like an email address,
* enclose it in brackets to ensure an exact match. */
if (strchr (keyid, '@') != NULL) {
safe_keyid = g_strdup_printf ("<%s>", keyid);
} else {
safe_keyid = g_strdup (keyid);
}
g_ptr_array_add (gpg->recipients, safe_keyid);
}
| 0
|
242,119
|
int LuaSettings::l_to_table(lua_State* L)
{
NO_MAP_LOCK_REQUIRED;
LuaSettings* o = checkobject(L, 1);
MutexAutoLock(o->m_settings->m_mutex);
push_settings_table(L, o->m_settings);
return 1;
}
| 0
|
234,244
|
is_max_address (dwarf_vma addr, unsigned int pointer_size)
{
dwarf_vma mask = ~(~(dwarf_vma) 1 << (pointer_size * 8 - 1));
return ((addr & mask) == mask);
}
| 0
|
417,132
|
mp_sint32 PlayerGeneric::getCurrentBeatIndex()
{
if (player)
return player->getBeatIndexFromSamplePos(getCurrentSamplePosition());
return 0;
}
| 0
|
234,719
|
static int should_balance_chunk(struct extent_buffer *leaf,
struct btrfs_chunk *chunk, u64 chunk_offset)
{
struct btrfs_fs_info *fs_info = leaf->fs_info;
struct btrfs_balance_control *bctl = fs_info->balance_ctl;
struct btrfs_balance_args *bargs = NULL;
u64 chunk_type = btrfs_chunk_type(leaf, chunk);
/* type filter */
if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
(bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
return 0;
}
if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
bargs = &bctl->data;
else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
bargs = &bctl->sys;
else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
bargs = &bctl->meta;
/* profiles filter */
if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
chunk_profiles_filter(chunk_type, bargs)) {
return 0;
}
/* usage filter */
if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
chunk_usage_filter(fs_info, chunk_offset, bargs)) {
return 0;
} else if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
chunk_usage_range_filter(fs_info, chunk_offset, bargs)) {
return 0;
}
/* devid filter */
if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
chunk_devid_filter(leaf, chunk, bargs)) {
return 0;
}
/* drange filter, makes sense only with devid filter */
if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
chunk_drange_filter(leaf, chunk, bargs)) {
return 0;
}
/* vrange filter */
if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
return 0;
}
/* stripes filter */
if ((bargs->flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) &&
chunk_stripes_range_filter(leaf, chunk, bargs)) {
return 0;
}
/* soft profile changing mode */
if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
chunk_soft_convert_filter(chunk_type, bargs)) {
return 0;
}
/*
* limited by count, must be the last filter
*/
if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT)) {
if (bargs->limit == 0)
return 0;
else
bargs->limit--;
} else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) {
/*
* Same logic as the 'limit' filter; the minimum cannot be
* determined here because we do not have the global information
* about the count of all chunks that satisfy the filters.
*/
if (bargs->limit_max == 0)
return 0;
else
bargs->limit_max--;
}
return 1;
}
| 0
|
359,556
|
DEFUN (no_bgp_redistribute_ipv4,
no_bgp_redistribute_ipv4_cmd,
"no redistribute (connected|kernel|ospf|rip|static)",
NO_STR
"Redistribute information from another routing protocol\n"
"Connected\n"
"Kernel routes\n"
"Open Shurtest Path First (OSPF)\n"
"Routing Information Protocol (RIP)\n"
"Static routes\n")
{
int type;
type = bgp_str2route_type (AFI_IP, argv[0]);
if (! type)
{
vty_out (vty, "%% Invalid route type%s", VTY_NEWLINE);
return CMD_WARNING;
}
return bgp_redistribute_unset (vty->index, AFI_IP, type);
}
| 0
|
244,105
|
GF_Err stvi_box_size(GF_Box *s)
{
GF_StereoVideoBox *ptr = (GF_StereoVideoBox *)s;
ptr->size+= 12 + ptr->sit_len;
return GF_OK;
}
| 0
|
253,730
|
static void ccp_sg_free(struct ccp_sg_workarea *wa)
{
if (wa->dma_count)
dma_unmap_sg(wa->dma_dev, wa->dma_sg_head, wa->nents, wa->dma_dir);
wa->dma_count = 0;
}
| 0
|
233,885
|
*/
static int wddx_stack_init(wddx_stack *stack)
{
stack->top = 0;
stack->elements = (void **) safe_emalloc(sizeof(void **), STACK_BLOCK_SIZE, 0);
stack->max = STACK_BLOCK_SIZE;
stack->varname = NULL;
stack->done = 0;
return SUCCESS;
| 0
|
359,257
|
bgp_write (struct thread *thread)
{
struct peer *peer;
u_char type;
struct stream *s;
int num;
unsigned int count = 0;
int write_errno;
/* Yes first of all get peer pointer. */
peer = THREAD_ARG (thread);
peer->t_write = NULL;
/* For non-blocking IO check. */
if (peer->status == Connect)
{
bgp_connect_check (peer);
return 0;
}
/* Nonblocking write until TCP output buffer is full. */
while (1)
{
int writenum;
int val;
s = bgp_write_packet (peer);
if (! s)
return 0;
/* XXX: FIXME, the socket should be NONBLOCK from the start
* status shouldnt need to be toggled on each write
*/
val = fcntl (peer->fd, F_GETFL, 0);
fcntl (peer->fd, F_SETFL, val|O_NONBLOCK);
/* Number of bytes to be sent. */
writenum = stream_get_endp (s) - stream_get_getp (s);
/* Call write() system call. */
num = write (peer->fd, STREAM_PNT (s), writenum);
write_errno = errno;
fcntl (peer->fd, F_SETFL, val);
if (num <= 0)
{
/* Partial write. */
if (write_errno == EWOULDBLOCK || write_errno == EAGAIN)
break;
BGP_EVENT_ADD (peer, TCP_fatal_error);
return 0;
}
if (num != writenum)
{
stream_forward_getp (s, num);
if (write_errno == EAGAIN)
break;
continue;
}
/* Retrieve BGP packet type. */
stream_set_getp (s, BGP_MARKER_SIZE + 2);
type = stream_getc (s);
switch (type)
{
case BGP_MSG_OPEN:
peer->open_out++;
break;
case BGP_MSG_UPDATE:
peer->update_out++;
break;
case BGP_MSG_NOTIFY:
peer->notify_out++;
/* Double start timer. */
peer->v_start *= 2;
/* Overflow check. */
if (peer->v_start >= (60 * 2))
peer->v_start = (60 * 2);
/* Flush any existing events */
BGP_EVENT_ADD (peer, BGP_Stop);
return 0;
case BGP_MSG_KEEPALIVE:
peer->keepalive_out++;
break;
case BGP_MSG_ROUTE_REFRESH_NEW:
case BGP_MSG_ROUTE_REFRESH_OLD:
peer->refresh_out++;
break;
case BGP_MSG_CAPABILITY:
peer->dynamic_cap_out++;
break;
}
/* OK we send packet so delete it. */
bgp_packet_delete (peer);
if (++count >= BGP_WRITE_PACKET_MAX)
break;
}
if (bgp_write_proceed (peer))
BGP_WRITE_ON (peer->t_write, bgp_write, peer->fd);
return 0;
}
| 0
|
90,187
|
bool CellularNetwork::StartActivation() const {
if (!EnsureCrosLoaded())
return false;
return ActivateCellularModem(service_path_.c_str(), NULL);
}
| 0
|
247,140
|
const GF_FilterRegister * gf_fs_get_filter_register(GF_FilterSession *fsess, u32 idx)
{
return gf_list_get(fsess->registry, idx);
}
| 0
|
226,104
|
GF_Box *mehd_box_new()
{
ISOM_DECL_BOX_ALLOC(GF_MovieExtendsHeaderBox, GF_ISOM_BOX_TYPE_MEHD);
return (GF_Box *)tmp;
}
| 0
|
387,725
|
void InstanceKlass::do_nonstatic_fields(FieldClosure* cl) {
InstanceKlass* super = superklass();
if (super != NULL) {
super->do_nonstatic_fields(cl);
}
fieldDescriptor fd;
int length = java_fields_count();
// In DebugInfo nonstatic fields are sorted by offset.
int* fields_sorted = NEW_C_HEAP_ARRAY(int, 2*(length+1), mtClass);
int j = 0;
for (int i = 0; i < length; i += 1) {
fd.reinitialize(this, i);
if (!fd.is_static()) {
fields_sorted[j + 0] = fd.offset();
fields_sorted[j + 1] = i;
j += 2;
}
}
if (j > 0) {
length = j;
// _sort_Fn is defined in growableArray.hpp.
qsort(fields_sorted, length/2, 2*sizeof(int), (_sort_Fn)compare_fields_by_offset);
for (int i = 0; i < length; i += 2) {
fd.reinitialize(this, fields_sorted[i + 1]);
assert(!fd.is_static() && fd.offset() == fields_sorted[i], "only nonstatic fields");
cl->do_field(&fd);
}
}
FREE_C_HEAP_ARRAY(int, fields_sorted);
}
| 0
|
338,187
|
void WasmBinaryBuilder::visitIf(If* curr) {
BYN_TRACE("zz node: If\n");
startControlFlow(curr);
curr->type = getType();
curr->condition = popNonVoidExpression();
curr->ifTrue = getBlockOrSingleton(curr->type);
if (lastSeparator == BinaryConsts::Else) {
curr->ifFalse = getBlockOrSingleton(curr->type);
}
curr->finalize(curr->type);
if (lastSeparator != BinaryConsts::End) {
throwError("if should end with End");
}
}
| 0
|
442,565
|
static void test_circular_small_chunks(void)
{
RedMemSlotInfo mem_info;
RedCursorCmd *red_cursor_cmd;
QXLCursorCmd cursor_cmd;
QXLCursor *cursor;
QXLDataChunk *chunks[2];
init_meminfo(&mem_info);
g_test_expect_message(G_LOG_DOMAIN, G_LOG_LEVEL_WARNING,
"*red_get_data_chunks_ptr: data split in too many chunks, avoiding DoS*");
/* a circular list of small chunks should not be a problems */
memset(&cursor_cmd, 0, sizeof(cursor_cmd));
cursor_cmd.type = QXL_CURSOR_SET;
cursor = create_chunk(SPICE_OFFSETOF(QXLCursor, chunk), 1, NULL, 0xaa);
cursor->header.unique = 1;
cursor->header.width = 128;
cursor->header.height = 128;
cursor->data_size = 128 * 128 * 4;
chunks[0] = create_chunk(0, 1, &cursor->chunk, 0xaa);
chunks[0]->next_chunk = to_physical(&cursor->chunk);
cursor_cmd.u.set.shape = to_physical(cursor);
red_cursor_cmd = red_cursor_cmd_new(NULL, &mem_info, 0, to_physical(&cursor_cmd));
if (red_cursor_cmd != NULL) {
/* function does not return errors so there should be no data */
g_assert_cmpuint(red_cursor_cmd->type, ==, QXL_CURSOR_SET);
g_assert_cmpuint(red_cursor_cmd->u.set.position.x, ==, 0);
g_assert_cmpuint(red_cursor_cmd->u.set.position.y, ==, 0);
g_assert_cmpuint(red_cursor_cmd->u.set.shape.data_size, ==, 0);
red_cursor_cmd_unref(red_cursor_cmd);
}
g_test_assert_expected_messages();
g_free(cursor);
g_free(chunks[0]);
memslot_info_destroy(&mem_info);
}
| 0
|
418,795
|
do_mouse(
oparg_T *oap, // operator argument, can be NULL
int c, // K_LEFTMOUSE, etc
int dir, // Direction to 'put' if necessary
long count,
int fixindent) // PUT_FIXINDENT if fixing indent necessary
{
static int do_always = FALSE; // ignore 'mouse' setting next time
static int got_click = FALSE; // got a click some time back
int which_button; // MOUSE_LEFT, _MIDDLE or _RIGHT
int is_click = FALSE; // If FALSE it's a drag or release event
int is_drag = FALSE; // If TRUE it's a drag event
int jump_flags = 0; // flags for jump_to_mouse()
pos_T start_visual;
int moved; // Has cursor moved?
int in_status_line; // mouse in status line
static int in_tab_line = FALSE; // mouse clicked in tab line
int in_sep_line; // mouse in vertical separator line
int c1, c2;
#if defined(FEAT_FOLDING)
pos_T save_cursor;
#endif
win_T *old_curwin = curwin;
static pos_T orig_cursor;
colnr_T leftcol, rightcol;
pos_T end_visual;
int diff;
int old_active = VIsual_active;
int old_mode = VIsual_mode;
int regname;
#if defined(FEAT_FOLDING)
save_cursor = curwin->w_cursor;
#endif
// When GUI is active, always recognize mouse events, otherwise:
// - Ignore mouse event in normal mode if 'mouse' doesn't include 'n'.
// - Ignore mouse event in visual mode if 'mouse' doesn't include 'v'.
// - For command line and insert mode 'mouse' is checked before calling
// do_mouse().
if (do_always)
do_always = FALSE;
else
#ifdef FEAT_GUI
if (!gui.in_use)
#endif
{
if (VIsual_active)
{
if (!mouse_has(MOUSE_VISUAL))
return FALSE;
}
else if (State == MODE_NORMAL && !mouse_has(MOUSE_NORMAL))
return FALSE;
}
for (;;)
{
which_button = get_mouse_button(KEY2TERMCAP1(c), &is_click, &is_drag);
if (is_drag)
{
// If the next character is the same mouse event then use that
// one. Speeds up dragging the status line.
// Note: Since characters added to the stuff buffer in the code
// below need to come before the next character, do not do this
// when the current character was stuffed.
if (!KeyStuffed && vpeekc() != NUL)
{
int nc;
int save_mouse_row = mouse_row;
int save_mouse_col = mouse_col;
// Need to get the character, peeking doesn't get the actual
// one.
nc = safe_vgetc();
if (c == nc)
continue;
vungetc(nc);
mouse_row = save_mouse_row;
mouse_col = save_mouse_col;
}
}
break;
}
if (c == K_MOUSEMOVE)
{
// Mouse moved without a button pressed.
#ifdef FEAT_BEVAL_TERM
ui_may_remove_balloon();
if (p_bevalterm)
{
profile_setlimit(p_bdlay, &bevalexpr_due);
bevalexpr_due_set = TRUE;
}
#endif
#ifdef FEAT_PROP_POPUP
popup_handle_mouse_moved();
#endif
return FALSE;
}
#ifdef FEAT_MOUSESHAPE
// May have stopped dragging the status or separator line. The pointer is
// most likely still on the status or separator line.
if (!is_drag && drag_status_line)
{
drag_status_line = FALSE;
update_mouseshape(SHAPE_IDX_STATUS);
}
if (!is_drag && drag_sep_line)
{
drag_sep_line = FALSE;
update_mouseshape(SHAPE_IDX_VSEP);
}
#endif
// Ignore drag and release events if we didn't get a click.
if (is_click)
got_click = TRUE;
else
{
if (!got_click) // didn't get click, ignore
return FALSE;
if (!is_drag) // release, reset got_click
{
got_click = FALSE;
if (in_tab_line)
{
in_tab_line = FALSE;
return FALSE;
}
}
}
// CTRL right mouse button does CTRL-T
if (is_click && (mod_mask & MOD_MASK_CTRL) && which_button == MOUSE_RIGHT)
{
if (State & MODE_INSERT)
stuffcharReadbuff(Ctrl_O);
if (count > 1)
stuffnumReadbuff(count);
stuffcharReadbuff(Ctrl_T);
got_click = FALSE; // ignore drag&release now
return FALSE;
}
// CTRL only works with left mouse button
if ((mod_mask & MOD_MASK_CTRL) && which_button != MOUSE_LEFT)
return FALSE;
// When a modifier is down, ignore drag and release events, as well as
// multiple clicks and the middle mouse button.
// Accept shift-leftmouse drags when 'mousemodel' is "popup.*".
if ((mod_mask & (MOD_MASK_SHIFT | MOD_MASK_CTRL | MOD_MASK_ALT
| MOD_MASK_META))
&& (!is_click
|| (mod_mask & MOD_MASK_MULTI_CLICK)
|| which_button == MOUSE_MIDDLE)
&& !((mod_mask & (MOD_MASK_SHIFT|MOD_MASK_ALT))
&& mouse_model_popup()
&& which_button == MOUSE_LEFT)
&& !((mod_mask & MOD_MASK_ALT)
&& !mouse_model_popup()
&& which_button == MOUSE_RIGHT)
)
return FALSE;
// If the button press was used as the movement command for an operator
// (eg "d<MOUSE>"), or it is the middle button that is held down, ignore
// drag/release events.
if (!is_click && which_button == MOUSE_MIDDLE)
return FALSE;
if (oap != NULL)
regname = oap->regname;
else
regname = 0;
// Middle mouse button does a 'put' of the selected text
if (which_button == MOUSE_MIDDLE)
{
if (State == MODE_NORMAL)
{
// If an operator was pending, we don't know what the user wanted
// to do. Go back to normal mode: Clear the operator and beep().
if (oap != NULL && oap->op_type != OP_NOP)
{
clearopbeep(oap);
return FALSE;
}
// If visual was active, yank the highlighted text and put it
// before the mouse pointer position.
// In Select mode replace the highlighted text with the clipboard.
if (VIsual_active)
{
if (VIsual_select)
{
stuffcharReadbuff(Ctrl_G);
stuffReadbuff((char_u *)"\"+p");
}
else
{
stuffcharReadbuff('y');
stuffcharReadbuff(K_MIDDLEMOUSE);
}
do_always = TRUE; // ignore 'mouse' setting next time
return FALSE;
}
// The rest is below jump_to_mouse()
}
else if ((State & MODE_INSERT) == 0)
return FALSE;
// Middle click in insert mode doesn't move the mouse, just insert the
// contents of a register. '.' register is special, can't insert that
// with do_put().
// Also paste at the cursor if the current mode isn't in 'mouse' (only
// happens for the GUI).
if ((State & MODE_INSERT) || !mouse_has(MOUSE_NORMAL))
{
if (regname == '.')
insert_reg(regname, TRUE);
else
{
#ifdef FEAT_CLIPBOARD
if (clip_star.available && regname == 0)
regname = '*';
#endif
if ((State & REPLACE_FLAG) && !yank_register_mline(regname))
insert_reg(regname, TRUE);
else
{
do_put(regname, NULL, BACKWARD, 1L,
fixindent | PUT_CURSEND);
// Repeat it with CTRL-R CTRL-O r or CTRL-R CTRL-P r
AppendCharToRedobuff(Ctrl_R);
AppendCharToRedobuff(fixindent ? Ctrl_P : Ctrl_O);
AppendCharToRedobuff(regname == 0 ? '"' : regname);
}
}
return FALSE;
}
}
// When dragging or button-up stay in the same window.
if (!is_click)
jump_flags |= MOUSE_FOCUS | MOUSE_DID_MOVE;
start_visual.lnum = 0;
if (TabPageIdxs != NULL) // only when initialized
{
// Check for clicking in the tab page line.
if (mouse_row == 0 && firstwin->w_winrow > 0)
{
if (is_drag)
{
if (in_tab_line)
{
c1 = TabPageIdxs[mouse_col];
tabpage_move(c1 <= 0 ? 9999 : c1 < tabpage_index(curtab)
? c1 - 1 : c1);
}
return FALSE;
}
// click in a tab selects that tab page
if (is_click
# ifdef FEAT_CMDWIN
&& cmdwin_type == 0
# endif
&& mouse_col < Columns)
{
in_tab_line = TRUE;
c1 = TabPageIdxs[mouse_col];
if (c1 >= 0)
{
if ((mod_mask & MOD_MASK_MULTI_CLICK) == MOD_MASK_2CLICK)
{
// double click opens new page
end_visual_mode_keep_button();
tabpage_new();
tabpage_move(c1 == 0 ? 9999 : c1 - 1);
}
else
{
// Go to specified tab page, or next one if not clicking
// on a label.
goto_tabpage(c1);
// It's like clicking on the status line of a window.
if (curwin != old_curwin)
end_visual_mode_keep_button();
}
}
else
{
tabpage_T *tp;
// Close the current or specified tab page.
if (c1 == -999)
tp = curtab;
else
tp = find_tabpage(-c1);
if (tp == curtab)
{
if (first_tabpage->tp_next != NULL)
tabpage_close(FALSE);
}
else if (tp != NULL)
tabpage_close_other(tp, FALSE);
}
}
return TRUE;
}
else if (is_drag && in_tab_line)
{
c1 = TabPageIdxs[mouse_col];
tabpage_move(c1 <= 0 ? 9999 : c1 - 1);
return FALSE;
}
}
// When 'mousemodel' is "popup" or "popup_setpos", translate mouse events:
// right button up -> pop-up menu
// shift-left button -> right button
// alt-left button -> alt-right button
if (mouse_model_popup())
{
if (which_button == MOUSE_RIGHT
&& !(mod_mask & (MOD_MASK_SHIFT | MOD_MASK_CTRL)))
{
#ifdef USE_POPUP_SETPOS
# ifdef FEAT_GUI
if (gui.in_use)
{
# if defined(FEAT_GUI_MOTIF) || defined(FEAT_GUI_GTK) \
|| defined(FEAT_GUI_PHOTON)
if (!is_click)
// Ignore right button release events, only shows the popup
// menu on the button down event.
return FALSE;
# endif
# if defined(FEAT_GUI_MSWIN) || defined(FEAT_GUI_HAIKU)
if (is_click || is_drag)
// Ignore right button down and drag mouse events. Windows
// only shows the popup menu on the button up event.
return FALSE;
# endif
}
# endif
# if defined(FEAT_GUI) && defined(FEAT_TERM_POPUP_MENU)
else
# endif
# if defined(FEAT_TERM_POPUP_MENU)
if (!is_click)
// Ignore right button release events, only shows the popup
// menu on the button down event.
return FALSE;
#endif
jump_flags = 0;
if (STRCMP(p_mousem, "popup_setpos") == 0)
{
// First set the cursor position before showing the popup
// menu.
if (VIsual_active)
{
pos_T m_pos;
// set MOUSE_MAY_STOP_VIS if we are outside the
// selection or the current window (might have false
// negative here)
if (mouse_row < curwin->w_winrow
|| mouse_row
> (curwin->w_winrow + curwin->w_height))
jump_flags = MOUSE_MAY_STOP_VIS;
else if (get_fpos_of_mouse(&m_pos) != IN_BUFFER)
jump_flags = MOUSE_MAY_STOP_VIS;
else
{
if ((LT_POS(curwin->w_cursor, VIsual)
&& (LT_POS(m_pos, curwin->w_cursor)
|| LT_POS(VIsual, m_pos)))
|| (LT_POS(VIsual, curwin->w_cursor)
&& (LT_POS(m_pos, VIsual)
|| LT_POS(curwin->w_cursor, m_pos))))
{
jump_flags = MOUSE_MAY_STOP_VIS;
}
else if (VIsual_mode == Ctrl_V)
{
getvcols(curwin, &curwin->w_cursor, &VIsual,
&leftcol, &rightcol);
getvcol(curwin, &m_pos, NULL, &m_pos.col, NULL);
if (m_pos.col < leftcol || m_pos.col > rightcol)
jump_flags = MOUSE_MAY_STOP_VIS;
}
}
}
else
jump_flags = MOUSE_MAY_STOP_VIS;
}
if (jump_flags)
{
jump_flags = jump_to_mouse(jump_flags, NULL, which_button);
update_curbuf(VIsual_active ? UPD_INVERTED : UPD_VALID);
setcursor();
out_flush(); // Update before showing popup menu
}
# ifdef FEAT_MENU
show_popupmenu();
got_click = FALSE; // ignore release events
# endif
return (jump_flags & CURSOR_MOVED) != 0;
#else
return FALSE;
#endif
}
if (which_button == MOUSE_LEFT
&& (mod_mask & (MOD_MASK_SHIFT|MOD_MASK_ALT)))
{
which_button = MOUSE_RIGHT;
mod_mask &= ~MOD_MASK_SHIFT;
}
}
if ((State & (MODE_NORMAL | MODE_INSERT))
&& !(mod_mask & (MOD_MASK_SHIFT | MOD_MASK_CTRL)))
{
if (which_button == MOUSE_LEFT)
{
if (is_click)
{
// stop Visual mode for a left click in a window, but not when
// on a status line
if (VIsual_active)
jump_flags |= MOUSE_MAY_STOP_VIS;
}
else if (mouse_has(MOUSE_VISUAL))
jump_flags |= MOUSE_MAY_VIS;
}
else if (which_button == MOUSE_RIGHT)
{
if (is_click && VIsual_active)
{
// Remember the start and end of visual before moving the
// cursor.
if (LT_POS(curwin->w_cursor, VIsual))
{
start_visual = curwin->w_cursor;
end_visual = VIsual;
}
else
{
start_visual = VIsual;
end_visual = curwin->w_cursor;
}
}
jump_flags |= MOUSE_FOCUS;
if (mouse_has(MOUSE_VISUAL))
jump_flags |= MOUSE_MAY_VIS;
}
}
// If an operator is pending, ignore all drags and releases until the
// next mouse click.
if (!is_drag && oap != NULL && oap->op_type != OP_NOP)
{
got_click = FALSE;
oap->motion_type = MCHAR;
}
// When releasing the button let jump_to_mouse() know.
if (!is_click && !is_drag)
jump_flags |= MOUSE_RELEASED;
// JUMP!
jump_flags = jump_to_mouse(jump_flags,
oap == NULL ? NULL : &(oap->inclusive), which_button);
#ifdef FEAT_MENU
// A click in the window toolbar has no side effects.
if (jump_flags & MOUSE_WINBAR)
return FALSE;
#endif
moved = (jump_flags & CURSOR_MOVED);
in_status_line = (jump_flags & IN_STATUS_LINE);
in_sep_line = (jump_flags & IN_SEP_LINE);
#ifdef FEAT_NETBEANS_INTG
if (isNetbeansBuffer(curbuf)
&& !(jump_flags & (IN_STATUS_LINE | IN_SEP_LINE)))
{
int key = KEY2TERMCAP1(c);
if (key == (int)KE_LEFTRELEASE || key == (int)KE_MIDDLERELEASE
|| key == (int)KE_RIGHTRELEASE)
netbeans_button_release(which_button);
}
#endif
// When jumping to another window, clear a pending operator. That's a bit
// friendlier than beeping and not jumping to that window.
if (curwin != old_curwin && oap != NULL && oap->op_type != OP_NOP)
clearop(oap);
#ifdef FEAT_FOLDING
if (mod_mask == 0
&& !is_drag
&& (jump_flags & (MOUSE_FOLD_CLOSE | MOUSE_FOLD_OPEN))
&& which_button == MOUSE_LEFT)
{
// open or close a fold at this line
if (jump_flags & MOUSE_FOLD_OPEN)
openFold(curwin->w_cursor.lnum, 1L);
else
closeFold(curwin->w_cursor.lnum, 1L);
// don't move the cursor if still in the same window
if (curwin == old_curwin)
curwin->w_cursor = save_cursor;
}
#endif
#if defined(FEAT_CLIPBOARD) && defined(FEAT_CMDWIN)
if ((jump_flags & IN_OTHER_WIN) && !VIsual_active && clip_star.available)
{
clip_modeless(which_button, is_click, is_drag);
return FALSE;
}
#endif
// Set global flag that we are extending the Visual area with mouse
// dragging; temporarily minimize 'scrolloff'.
if (VIsual_active && is_drag && get_scrolloff_value())
{
// In the very first line, allow scrolling one line
if (mouse_row == 0)
mouse_dragging = 2;
else
mouse_dragging = 1;
}
// When dragging the mouse above the window, scroll down.
if (is_drag && mouse_row < 0 && !in_status_line)
{
scroll_redraw(FALSE, 1L);
mouse_row = 0;
}
if (start_visual.lnum) // right click in visual mode
{
// When ALT is pressed make Visual mode blockwise.
if (mod_mask & MOD_MASK_ALT)
VIsual_mode = Ctrl_V;
// In Visual-block mode, divide the area in four, pick up the corner
// that is in the quarter that the cursor is in.
if (VIsual_mode == Ctrl_V)
{
getvcols(curwin, &start_visual, &end_visual, &leftcol, &rightcol);
if (curwin->w_curswant > (leftcol + rightcol) / 2)
end_visual.col = leftcol;
else
end_visual.col = rightcol;
if (curwin->w_cursor.lnum >=
(start_visual.lnum + end_visual.lnum) / 2)
end_visual.lnum = start_visual.lnum;
// move VIsual to the right column
start_visual = curwin->w_cursor; // save the cursor pos
curwin->w_cursor = end_visual;
coladvance(end_visual.col);
VIsual = curwin->w_cursor;
curwin->w_cursor = start_visual; // restore the cursor
}
else
{
// If the click is before the start of visual, change the start.
// If the click is after the end of visual, change the end. If
// the click is inside the visual, change the closest side.
if (LT_POS(curwin->w_cursor, start_visual))
VIsual = end_visual;
else if (LT_POS(end_visual, curwin->w_cursor))
VIsual = start_visual;
else
{
// In the same line, compare column number
if (end_visual.lnum == start_visual.lnum)
{
if (curwin->w_cursor.col - start_visual.col >
end_visual.col - curwin->w_cursor.col)
VIsual = start_visual;
else
VIsual = end_visual;
}
// In different lines, compare line number
else
{
diff = (curwin->w_cursor.lnum - start_visual.lnum) -
(end_visual.lnum - curwin->w_cursor.lnum);
if (diff > 0) // closest to end
VIsual = start_visual;
else if (diff < 0) // closest to start
VIsual = end_visual;
else // in the middle line
{
if (curwin->w_cursor.col <
(start_visual.col + end_visual.col) / 2)
VIsual = end_visual;
else
VIsual = start_visual;
}
}
}
}
}
// If Visual mode started in insert mode, execute "CTRL-O"
else if ((State & MODE_INSERT) && VIsual_active)
stuffcharReadbuff(Ctrl_O);
// Middle mouse click: Put text before cursor.
if (which_button == MOUSE_MIDDLE)
{
#ifdef FEAT_CLIPBOARD
if (clip_star.available && regname == 0)
regname = '*';
#endif
if (yank_register_mline(regname))
{
if (mouse_past_bottom)
dir = FORWARD;
}
else if (mouse_past_eol)
dir = FORWARD;
if (fixindent)
{
c1 = (dir == BACKWARD) ? '[' : ']';
c2 = 'p';
}
else
{
c1 = (dir == FORWARD) ? 'p' : 'P';
c2 = NUL;
}
prep_redo(regname, count, NUL, c1, NUL, c2, NUL);
// Remember where the paste started, so in edit() Insstart can be set
// to this position
if (restart_edit != 0)
where_paste_started = curwin->w_cursor;
do_put(regname, NULL, dir, count, fixindent | PUT_CURSEND);
}
#if defined(FEAT_QUICKFIX)
// Ctrl-Mouse click or double click in a quickfix window jumps to the
// error under the mouse pointer.
else if (((mod_mask & MOD_MASK_CTRL)
|| (mod_mask & MOD_MASK_MULTI_CLICK) == MOD_MASK_2CLICK)
&& bt_quickfix(curbuf))
{
if (curwin->w_llist_ref == NULL) // quickfix window
do_cmdline_cmd((char_u *)".cc");
else // location list window
do_cmdline_cmd((char_u *)".ll");
got_click = FALSE; // ignore drag&release now
}
#endif
// Ctrl-Mouse click (or double click in a help window) jumps to the tag
// under the mouse pointer.
else if ((mod_mask & MOD_MASK_CTRL) || (curbuf->b_help
&& (mod_mask & MOD_MASK_MULTI_CLICK) == MOD_MASK_2CLICK))
{
if (State & MODE_INSERT)
stuffcharReadbuff(Ctrl_O);
stuffcharReadbuff(Ctrl_RSB);
got_click = FALSE; // ignore drag&release now
}
// Shift-Mouse click searches for the next occurrence of the word under
// the mouse pointer
else if ((mod_mask & MOD_MASK_SHIFT))
{
if ((State & MODE_INSERT) || (VIsual_active && VIsual_select))
stuffcharReadbuff(Ctrl_O);
if (which_button == MOUSE_LEFT)
stuffcharReadbuff('*');
else // MOUSE_RIGHT
stuffcharReadbuff('#');
}
// Handle double clicks, unless on status line
else if (in_status_line)
{
#ifdef FEAT_MOUSESHAPE
if ((is_drag || is_click) && !drag_status_line)
{
drag_status_line = TRUE;
update_mouseshape(-1);
}
#endif
}
else if (in_sep_line)
{
#ifdef FEAT_MOUSESHAPE
if ((is_drag || is_click) && !drag_sep_line)
{
drag_sep_line = TRUE;
update_mouseshape(-1);
}
#endif
}
else if ((mod_mask & MOD_MASK_MULTI_CLICK)
&& (State & (MODE_NORMAL | MODE_INSERT))
&& mouse_has(MOUSE_VISUAL))
{
if (is_click || !VIsual_active)
{
if (VIsual_active)
orig_cursor = VIsual;
else
{
check_visual_highlight();
VIsual = curwin->w_cursor;
orig_cursor = VIsual;
VIsual_active = TRUE;
VIsual_reselect = TRUE;
// start Select mode if 'selectmode' contains "mouse"
may_start_select('o');
setmouse();
}
if ((mod_mask & MOD_MASK_MULTI_CLICK) == MOD_MASK_2CLICK)
{
// Double click with ALT pressed makes it blockwise.
if (mod_mask & MOD_MASK_ALT)
VIsual_mode = Ctrl_V;
else
VIsual_mode = 'v';
}
else if ((mod_mask & MOD_MASK_MULTI_CLICK) == MOD_MASK_3CLICK)
VIsual_mode = 'V';
else if ((mod_mask & MOD_MASK_MULTI_CLICK) == MOD_MASK_4CLICK)
VIsual_mode = Ctrl_V;
#ifdef FEAT_CLIPBOARD
// Make sure the clipboard gets updated. Needed because start and
// end may still be the same, and the selection needs to be owned
clip_star.vmode = NUL;
#endif
}
// A double click selects a word or a block.
if ((mod_mask & MOD_MASK_MULTI_CLICK) == MOD_MASK_2CLICK)
{
pos_T *pos = NULL;
int gc;
if (is_click)
{
// If the character under the cursor (skipping white space) is
// not a word character, try finding a match and select a (),
// {}, [], #if/#endif, etc. block.
end_visual = curwin->w_cursor;
while (gc = gchar_pos(&end_visual), VIM_ISWHITE(gc))
inc(&end_visual);
if (oap != NULL)
oap->motion_type = MCHAR;
if (oap != NULL
&& VIsual_mode == 'v'
&& !vim_iswordc(gchar_pos(&end_visual))
&& EQUAL_POS(curwin->w_cursor, VIsual)
&& (pos = findmatch(oap, NUL)) != NULL)
{
curwin->w_cursor = *pos;
if (oap->motion_type == MLINE)
VIsual_mode = 'V';
else if (*p_sel == 'e')
{
if (LT_POS(curwin->w_cursor, VIsual))
++VIsual.col;
else
++curwin->w_cursor.col;
}
}
}
if (pos == NULL && (is_click || is_drag))
{
// When not found a match or when dragging: extend to include
// a word.
if (LT_POS(curwin->w_cursor, orig_cursor))
{
find_start_of_word(&curwin->w_cursor);
find_end_of_word(&VIsual);
}
else
{
find_start_of_word(&VIsual);
if (*p_sel == 'e' && *ml_get_cursor() != NUL)
curwin->w_cursor.col +=
(*mb_ptr2len)(ml_get_cursor());
find_end_of_word(&curwin->w_cursor);
}
}
curwin->w_set_curswant = TRUE;
}
if (is_click)
redraw_curbuf_later(UPD_INVERTED); // update the inversion
}
else if (VIsual_active && !old_active)
{
if (mod_mask & MOD_MASK_ALT)
VIsual_mode = Ctrl_V;
else
VIsual_mode = 'v';
}
// If Visual mode changed show it later.
if ((!VIsual_active && old_active && mode_displayed)
|| (VIsual_active && p_smd && msg_silent == 0
&& (!old_active || VIsual_mode != old_mode)))
redraw_cmdline = TRUE;
return moved;
}
| 0
|
310,332
|
list_server_status_v1(smartlist_t *routers, char **router_status_out,
int for_controller)
{
/* List of entries in a router-status style: An optional !, then an optional
* equals-suffixed nickname, then a dollar-prefixed hexdigest. */
smartlist_t *rs_entries;
time_t now = time(NULL);
time_t cutoff = now - ROUTER_MAX_AGE_TO_PUBLISH;
or_options_t *options = get_options();
/* We include v2 dir auths here too, because they need to answer
* controllers. Eventually we'll deprecate this whole function;
* see also networkstatus_getinfo_by_purpose(). */
int authdir = authdir_mode_publishes_statuses(options);
tor_assert(router_status_out);
rs_entries = smartlist_create();
SMARTLIST_FOREACH_BEGIN(routers, routerinfo_t *, ri) {
if (authdir) {
/* Update router status in routerinfo_t. */
dirserv_set_router_is_running(ri, now);
}
if (for_controller) {
char name_buf[MAX_VERBOSE_NICKNAME_LEN+2];
char *cp = name_buf;
if (!ri->is_running)
*cp++ = '!';
router_get_verbose_nickname(cp, ri);
smartlist_add(rs_entries, tor_strdup(name_buf));
} else if (ri->cache_info.published_on >= cutoff) {
smartlist_add(rs_entries, list_single_server_status(ri, ri->is_running));
}
} SMARTLIST_FOREACH_END(ri);
*router_status_out = smartlist_join_strings(rs_entries, " ", 0, NULL);
SMARTLIST_FOREACH(rs_entries, char *, cp, tor_free(cp));
smartlist_free(rs_entries);
return 0;
}
| 0
|
317,133
|
static int match_opt_prefix(char *s, int l, char **arg)
{
int i;
for (i = 0; i < ARRAY_SIZE(tokens); i++) {
size_t len = tokens[i].len;
if (len > l || memcmp(s, tokens[i].name, len))
continue;
if (tokens[i].has_arg) {
if (len == l || s[len] != '=')
continue;
*arg = s + len + 1;
} else if (len != l)
continue;
return tokens[i].opt;
}
return Opt_error;
}
| 0
|
256,162
|
ALWAYS_INLINE bool IsZero(bfloat16 v) {
return !static_cast<bool>(v);
}
| 0
|
312,444
|
qf_history(exarg_T *eap)
{
qf_info_T *qi = qf_cmd_get_stack(eap, FALSE);
int i;
if (eap->addr_count > 0)
{
if (qi == NULL)
{
emsg(_(e_no_location_list));
return;
}
// Jump to the specified quickfix list
if (eap->line2 > 0 && eap->line2 <= qi->qf_listcount)
{
qi->qf_curlist = eap->line2 - 1;
qf_msg(qi, qi->qf_curlist, "");
qf_update_buffer(qi, NULL);
}
else
emsg(_(e_invalid_range));
return;
}
if (qf_stack_empty(qi))
msg(_("No entries"));
else
for (i = 0; i < qi->qf_listcount; ++i)
qf_msg(qi, i, i == qi->qf_curlist ? "> " : " ");
}
| 0
|
373,641
|
find_script_callback(char_u *fname, void *cookie)
{
int sid;
int error = OK;
int *ret_sid = cookie;
sid = find_script_by_name(fname);
if (sid < 0)
{
// script does not exist yet, create a new scriptitem
sid = get_new_scriptitem(&error);
if (error == OK)
{
scriptitem_T *si = SCRIPT_ITEM(sid);
si->sn_name = vim_strsave(fname);
si->sn_state = SN_STATE_NOT_LOADED;
}
}
*ret_sid = sid;
}
| 0
|
229,273
|
cql_server::connection::read_frame() {
using ret_type = std::optional<cql_binary_frame_v3>;
if (!_version) {
// We don't know the frame size before reading the first frame,
// so read just one byte, and then read the rest of the frame.
return _read_buf.read_exactly(1).then([this] (temporary_buffer<char> buf) {
if (buf.empty()) {
return make_ready_future<ret_type>();
}
_version = buf[0];
init_cql_serialization_format();
if (_version < 1 || _version > current_version) {
auto client_version = _version;
_version = current_version;
throw exceptions::protocol_exception(format("Invalid or unsupported protocol version: {:d}", client_version));
}
return _read_buf.read_exactly(frame_size() - 1).then([this] (temporary_buffer<char> tail) {
temporary_buffer<char> full(frame_size());
full.get_write()[0] = _version;
std::copy(tail.get(), tail.get() + tail.size(), full.get_write() + 1);
auto frame = parse_frame(std::move(full));
// This is the very first frame, so reject obviously incorrect frames, to
// avoid allocating large amounts of memory for the message body
if (frame.length > 100'000) {
// The STARTUP message body is a [string map] containing just a few options,
// so it should be smaller that 100kB. See #4366.
throw exceptions::protocol_exception(format("Initial message size too large ({:d}), rejecting as invalid", frame.length));
}
return make_ready_future<ret_type>(frame);
});
});
} else {
// Not the first frame, so we know the size.
return _read_buf.read_exactly(frame_size()).then([this] (temporary_buffer<char> buf) {
if (buf.empty()) {
return make_ready_future<ret_type>();
}
return make_ready_future<ret_type>(parse_frame(std::move(buf)));
});
}
}
| 0
|
225,864
|
GF_Box *lsr1_box_new()
{
ISOM_DECL_BOX_ALLOC(GF_LASeRSampleEntryBox, GF_ISOM_BOX_TYPE_LSR1);
gf_isom_sample_entry_init((GF_SampleEntryBox*)tmp);
return (GF_Box *)tmp;
| 0
|
198,523
|
void Compute(OpKernelContext* context) override {
// Get the stamp token.
const Tensor* stamp_token_t;
OP_REQUIRES_OK(context, context->input("stamp_token", &stamp_token_t));
int64_t stamp_token = stamp_token_t->scalar<int64>()();
// Get the tree ensemble proto.
const Tensor* tree_ensemble_serialized_t;
OP_REQUIRES_OK(context, context->input("tree_ensemble_serialized",
&tree_ensemble_serialized_t));
std::unique_ptr<BoostedTreesEnsembleResource> result(
new BoostedTreesEnsembleResource());
if (!result->InitFromSerialized(
tree_ensemble_serialized_t->scalar<tstring>()(), stamp_token)) {
result->Unref();
OP_REQUIRES(
context, false,
errors::InvalidArgument("Unable to parse tree ensemble proto."));
}
// Only create one, if one does not exist already. Report status for all
// other exceptions.
auto status =
CreateResource(context, HandleFromInput(context, 0), result.release());
if (status.code() != tensorflow::error::ALREADY_EXISTS) {
OP_REQUIRES_OK(context, status);
}
}
| 1
|
238,486
|
static int sanitize_err(struct bpf_verifier_env *env,
const struct bpf_insn *insn, int reason,
const struct bpf_reg_state *off_reg,
const struct bpf_reg_state *dst_reg)
{
static const char *err = "pointer arithmetic with it prohibited for !root";
const char *op = BPF_OP(insn->code) == BPF_ADD ? "add" : "sub";
u32 dst = insn->dst_reg, src = insn->src_reg;
switch (reason) {
case REASON_BOUNDS:
verbose(env, "R%d has unknown scalar with mixed signed bounds, %s\n",
off_reg == dst_reg ? dst : src, err);
break;
case REASON_TYPE:
verbose(env, "R%d has pointer with unsupported alu operation, %s\n",
off_reg == dst_reg ? src : dst, err);
break;
case REASON_PATHS:
verbose(env, "R%d tried to %s from different maps, paths or scalars, %s\n",
dst, op, err);
break;
case REASON_LIMIT:
verbose(env, "R%d tried to %s beyond pointer bounds, %s\n",
dst, op, err);
break;
case REASON_STACK:
verbose(env, "R%d could not be pushed for speculative verification, %s\n",
dst, err);
break;
default:
verbose(env, "verifier internal error: unknown reason (%d)\n",
reason);
break;
}
return -EACCES;
}
| 0
|
261,211
|
static int MqttClient_Publish_ReadPayload(MqttClient* client,
MqttPublish* publish, int timeout_ms)
{
int rc = MQTT_CODE_SUCCESS;
byte msg_done;
/* Handle packet callback and read remaining payload */
do {
/* Determine if message is done */
msg_done = ((publish->buffer_pos + publish->buffer_len) >=
publish->total_len) ? 1 : 0;
if (publish->buffer_new) {
/* Issue callback for new message (first time only) */
if (client->msg_cb) {
/* if using the temp publish message buffer,
then populate message context with client context */
if (publish->ctx == NULL && &client->msg.publish == publish) {
publish->ctx = client->ctx;
}
rc = client->msg_cb(client, publish, publish->buffer_new,
msg_done);
if (rc != MQTT_CODE_SUCCESS) {
return rc;
};
}
/* Reset topic name since valid on new message only */
publish->topic_name = NULL;
publish->topic_name_len = 0;
publish->buffer_new = 0;
}
/* Read payload */
if (!msg_done) {
int msg_len;
/* add last length to position and reset len */
publish->buffer_pos += publish->buffer_len;
publish->buffer_len = 0;
/* set state to reading payload */
publish->stat = MQTT_MSG_READ_PAYLOAD;
msg_len = (publish->total_len - publish->buffer_pos);
if (msg_len > client->rx_buf_len) {
msg_len = client->rx_buf_len;
}
/* make sure there is something to read */
if (msg_len > 0) {
#ifdef WOLFMQTT_TEST_NONBLOCK
if (!testNbAlt) {
testNbAlt = 1;
return MQTT_CODE_CONTINUE;
}
testNbAlt = 0;
#endif
rc = MqttSocket_Read(client, client->rx_buf, msg_len,
timeout_ms);
if (rc < 0) {
break;
}
/* Update message */
publish->buffer = client->rx_buf;
publish->buffer_len = rc;
rc = MQTT_CODE_SUCCESS; /* mark success */
msg_done = ((publish->buffer_pos + publish->buffer_len) >=
publish->total_len) ? 1 : 0;
/* Issue callback for additional publish payload */
if (client->msg_cb) {
rc = client->msg_cb(client, publish, publish->buffer_new,
msg_done);
if (rc != MQTT_CODE_SUCCESS) {
return rc;
};
}
}
}
} while (!msg_done);
return rc;
}
| 0
|
353,154
|
void SplashOutputDev::unsetSoftMaskFromImageMask(GfxState *state, double *baseMatrix) {
double bbox[4] = {0,0,1,1}; // dummy
/* transfer mask to alpha channel! */
// memcpy(maskBitmap->getAlphaPtr(), maskBitmap->getDataPtr(), bitmap->getRowSize() * bitmap->getHeight());
// memset(maskBitmap->getDataPtr(), 0, bitmap->getRowSize() * bitmap->getHeight());
if (transpGroupStack->softmask != nullptr) {
unsigned char *dest = bitmap->getAlphaPtr();
unsigned char *src = transpGroupStack->softmask->getDataPtr();
for (int c= 0; c < transpGroupStack->softmask->getRowSize() * transpGroupStack->softmask->getHeight(); c++) {
dest[c] = src[c];
}
delete transpGroupStack->softmask;
transpGroupStack->softmask = nullptr;
}
endTransparencyGroup(state);
baseMatrix[4] += transpGroupStack->tx;
baseMatrix[5] += transpGroupStack->ty;
paintTransparencyGroup(state, bbox);
}
| 0
|
238,800
|
set_search_direction(int cdir)
{
spats[0].off.dir = cdir;
}
| 0
|
415,213
|
cmd_pkauth (assuan_context_t ctx, char *line)
{
ctrl_t ctrl = assuan_get_pointer (ctx);
int rc;
unsigned char *outdata;
size_t outdatalen;
char *keyidstr;
if ( IS_LOCKED (ctrl) )
return gpg_error (GPG_ERR_LOCKED);
if ((rc = open_card (ctrl, NULL)))
return rc;
if (!ctrl->app_ctx)
return gpg_error (GPG_ERR_UNSUPPORTED_OPERATION);
/* We have to use a copy of the key ID because the function may use
the pin_cb which in turn uses the assuan line buffer and thus
overwriting the original line with the keyid */
keyidstr = xtrystrdup (line);
if (!keyidstr)
return out_of_core ();
rc = app_auth (ctrl->app_ctx,
keyidstr,
pin_cb, ctx,
ctrl->in_data.value, ctrl->in_data.valuelen,
&outdata, &outdatalen);
xfree (keyidstr);
if (rc)
{
log_error ("app_auth failed: %s\n", gpg_strerror (rc));
}
else
{
rc = assuan_send_data (ctx, outdata, outdatalen);
xfree (outdata);
if (rc)
return rc; /* that is already an assuan error code */
}
TEST_CARD_REMOVAL (ctrl, rc);
return rc;
}
| 0
|
259,219
|
static int mov_read_senc(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{
AVEncryptionInfo **encrypted_samples;
MOVEncryptionIndex *encryption_index;
MOVStreamContext *sc;
int use_subsamples, ret;
unsigned int sample_count, i, alloc_size = 0;
ret = get_current_encryption_info(c, &encryption_index, &sc);
if (ret != 1)
return ret;
if (encryption_index->nb_encrypted_samples) {
// This can happen if we have both saio/saiz and senc atoms.
av_log(c->fc, AV_LOG_DEBUG, "Ignoring duplicate encryption info in senc\n");
return 0;
}
avio_r8(pb); /* version */
use_subsamples = avio_rb24(pb) & 0x02; /* flags */
sample_count = avio_rb32(pb);
if (sample_count >= INT_MAX / sizeof(*encrypted_samples))
return AVERROR(ENOMEM);
for (i = 0; i < sample_count; i++) {
unsigned int min_samples = FFMIN(FFMAX(i + 1, 1024 * 1024), sample_count);
encrypted_samples = av_fast_realloc(encryption_index->encrypted_samples, &alloc_size,
min_samples * sizeof(*encrypted_samples));
if (encrypted_samples) {
encryption_index->encrypted_samples = encrypted_samples;
ret = mov_read_sample_encryption_info(
c, pb, sc, &encryption_index->encrypted_samples[i], use_subsamples);
} else {
ret = AVERROR(ENOMEM);
}
if (pb->eof_reached) {
av_log(c->fc, AV_LOG_ERROR, "Hit EOF while reading senc\n");
if (ret >= 0)
av_encryption_info_free(encryption_index->encrypted_samples[i]);
ret = AVERROR_INVALIDDATA;
}
if (ret < 0) {
for (; i > 0; i--)
av_encryption_info_free(encryption_index->encrypted_samples[i - 1]);
av_freep(&encryption_index->encrypted_samples);
return ret;
}
}
encryption_index->nb_encrypted_samples = sample_count;
return 0;
}
| 0
|
508,398
|
check_and_update_table_version(THD *thd,
TABLE_LIST *tables, TABLE_SHARE *table_share)
{
if (! tables->is_table_ref_id_equal(table_share))
{
if (thd->m_reprepare_observer &&
thd->m_reprepare_observer->report_error(thd))
{
/*
Version of the table share is different from the
previous execution of the prepared statement, and it is
unacceptable for this SQLCOM. Error has been reported.
*/
DBUG_ASSERT(thd->is_error());
return TRUE;
}
/* Always maintain the latest version and type */
tables->set_table_ref_id(table_share);
}
DBUG_EXECUTE_IF("reprepare_each_statement", return inject_reprepare(thd););
return FALSE;
}
| 0
|
513,179
|
static int check_func_longlong(THD *thd, struct st_mysql_sys_var *var,
void *save, st_mysql_value *value)
{
my_bool fixed1, fixed2;
long long orig, val;
struct my_option options;
value->val_int(value, &orig);
val= orig;
plugin_opt_set_limits(&options, var);
if (var->flags & PLUGIN_VAR_UNSIGNED)
{
if ((fixed1= (!value->is_unsigned(value) && val < 0)))
val=0;
*(ulonglong *)save= getopt_ull_limit_value((ulonglong) val, &options,
&fixed2);
}
else
{
if ((fixed1= (value->is_unsigned(value) && val < 0)))
val=LONGLONG_MAX;
*(longlong *)save= getopt_ll_limit_value(val, &options, &fixed2);
}
return throw_bounds_warning(thd, var->name, fixed1 || fixed2,
value->is_unsigned(value), (longlong) orig);
}
| 0
|
359,493
|
DEFUN (no_neighbor_local_as,
no_neighbor_local_as_cmd,
NO_NEIGHBOR_CMD2 "local-as",
NO_STR
NEIGHBOR_STR
NEIGHBOR_ADDR_STR2
"Specify a local-as number\n")
{
struct peer *peer;
int ret;
peer = peer_and_group_lookup_vty (vty, argv[0]);
if (! peer)
return CMD_WARNING;
ret = peer_local_as_unset (peer);
return bgp_vty_return (vty, ret);
}
| 0
|
482,530
|
hexValue(const FileInfo *file, const widechar *digits, int length) {
int k;
unsigned int binaryValue = 0;
for (k = 0; k < length; k++) {
unsigned int hexDigit = 0;
if (digits[k] >= '0' && digits[k] <= '9')
hexDigit = digits[k] - '0';
else if (digits[k] >= 'a' && digits[k] <= 'f')
hexDigit = digits[k] - 'a' + 10;
else if (digits[k] >= 'A' && digits[k] <= 'F')
hexDigit = digits[k] - 'A' + 10;
else {
compileError(file, "invalid %d-digit hexadecimal number", length);
return (widechar)0xffffffff;
}
binaryValue |= hexDigit << (4 * (length - 1 - k));
}
return (widechar)binaryValue;
}
| 0
|
247,595
|
TEST_P(SslSocketTest, FailedClientCertificateHashVerificationNoClientCertificate) {
const std::string client_ctx_yaml = R"EOF(
common_tls_context:
)EOF";
const std::string server_ctx_yaml = absl::StrCat(R"EOF(
common_tls_context:
tls_certificates:
certificate_chain:
filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/unittest_cert.pem"
private_key:
filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/unittest_key.pem"
validation_context:
trusted_ca:
filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem"
verify_certificate_hash: ")EOF",
TEST_SAN_URI_CERT_256_HASH, "\"");
TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, false, GetParam());
testUtil(test_options.setExpectedServerStats("ssl.fail_verify_no_cert"));
}
| 0
|
226,107
|
GF_Box *trgr_box_new()
{
ISOM_DECL_BOX_ALLOC(GF_TrackGroupBox, GF_ISOM_BOX_TYPE_TRGR);
tmp->groups = gf_list_new();
if (!tmp->groups) {
gf_free(tmp);
return NULL;
}
return (GF_Box *)tmp;
| 0
|
96,959
|
void encode(ArgumentEncoder* encoder, SecCertificateRef certificate)
{
RetainPtr<CFDataRef> data(AdoptCF, SecCertificateCopyData(certificate));
encode(encoder, data.get());
}
| 0
|
409,436
|
cursor_is_sleeping(void)
{
return cursor_is_asleep;
}
| 0
|
386,554
|
void DL_Dxf::writeXRecord(DL_WriterA& dw, int handle, double value) {
dw.dxfString( 0, "XRECORD");
dw.dxfHex(5, handle);
dw.dxfHex(330, appDictionaryHandle);
dw.dxfString(100, "AcDbXrecord");
dw.dxfInt(280, 1);
dw.dxfReal(40, value);
}
| 0
|
380,951
|
ins_scroll(void)
{
pos_T tpos;
undisplay_dollar();
tpos = curwin->w_cursor;
if (gui_do_scroll())
{
start_arrow(&tpos);
can_cindent = TRUE;
}
}
| 0
|
509,490
|
bool ha_maria::check_if_incompatible_data(HA_CREATE_INFO *create_info,
uint table_changes)
{
DBUG_ENTER("check_if_incompatible_data");
uint options= table->s->db_options_in_use;
enum ha_choice page_checksum= table->s->page_checksum;
if (page_checksum == HA_CHOICE_UNDEF)
page_checksum= file->s->options & HA_OPTION_PAGE_CHECKSUM ? HA_CHOICE_YES
: HA_CHOICE_NO;
if (create_info->auto_increment_value != stats.auto_increment_value ||
create_info->data_file_name != data_file_name ||
create_info->index_file_name != index_file_name ||
create_info->page_checksum != page_checksum ||
create_info->transactional != table->s->transactional ||
(maria_row_type(create_info) != data_file_type &&
create_info->row_type != ROW_TYPE_DEFAULT) ||
table_changes == IS_EQUAL_NO ||
(table_changes & IS_EQUAL_PACK_LENGTH)) // Not implemented yet
DBUG_RETURN(COMPATIBLE_DATA_NO);
if ((options & (HA_OPTION_CHECKSUM |
HA_OPTION_DELAY_KEY_WRITE)) !=
(create_info->table_options & (HA_OPTION_CHECKSUM |
HA_OPTION_DELAY_KEY_WRITE)))
DBUG_RETURN(COMPATIBLE_DATA_NO);
DBUG_RETURN(COMPATIBLE_DATA_YES);
}
| 0
|
359,552
|
DEFUN (bgp_redistribute_ipv6,
bgp_redistribute_ipv6_cmd,
"redistribute (connected|kernel|ospf6|ripng|static)",
"Redistribute information from another routing protocol\n"
"Connected\n"
"Kernel routes\n"
"Open Shurtest Path First (OSPFv3)\n"
"Routing Information Protocol (RIPng)\n"
"Static routes\n")
{
int type;
type = bgp_str2route_type (AFI_IP6, argv[0]);
if (! type)
{
vty_out (vty, "%% Invalid route type%s", VTY_NEWLINE);
return CMD_WARNING;
}
return bgp_redistribute_set (vty->index, AFI_IP6, type);
}
| 0
|
401,499
|
signed long __sched schedule_timeout(signed long timeout)
{
struct process_timer timer;
unsigned long expire;
switch (timeout)
{
case MAX_SCHEDULE_TIMEOUT:
/*
* These two special cases are useful to be comfortable
* in the caller. Nothing more. We could take
* MAX_SCHEDULE_TIMEOUT from one of the negative value
* but I' d like to return a valid offset (>=0) to allow
* the caller to do everything it want with the retval.
*/
schedule();
goto out;
default:
/*
* Another bit of PARANOID. Note that the retval will be
* 0 since no piece of kernel is supposed to do a check
* for a negative retval of schedule_timeout() (since it
* should never happens anyway). You just have the printk()
* that will tell you if something is gone wrong and where.
*/
if (timeout < 0) {
printk(KERN_ERR "schedule_timeout: wrong timeout "
"value %lx\n", timeout);
dump_stack();
current->state = TASK_RUNNING;
goto out;
}
}
expire = timeout + jiffies;
timer.task = current;
timer_setup_on_stack(&timer.timer, process_timeout, 0);
__mod_timer(&timer.timer, expire, MOD_TIMER_NOTPENDING);
schedule();
del_singleshot_timer_sync(&timer.timer);
/* Remove the timer from the object tracker */
destroy_timer_on_stack(&timer.timer);
timeout = expire - jiffies;
out:
return timeout < 0 ? 0 : timeout;
}
| 0
|
244,234
|
GF_Err unkn_box_write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
u32 type;
GF_UnknownBox *ptr = (GF_UnknownBox *)s;
if (!s) return GF_BAD_PARAM;
type = s->type;
ptr->type = ptr->original_4cc;
e = gf_isom_box_write_header(s, bs);
ptr->type = type;
if (e) return e;
if (ptr->sai_type) {
if (ptr->saio_box) {
u64 pos = gf_bs_get_position(bs);
gf_bs_seek(bs, ptr->saio_box->offset_first_offset_field);
if (ptr->saio_box->version)
gf_bs_write_u64(bs, pos);
else
gf_bs_write_u32(bs, (u32) pos);
gf_bs_seek(bs, pos);
} else {
ptr->sai_offset = gf_bs_get_position(bs);
}
}
if (ptr->dataSize && ptr->data) {
gf_bs_write_data(bs, ptr->data, ptr->dataSize);
}
return GF_OK;
}
| 0
|
512,587
|
String *val_str(String*) { return &str_value; }
| 0
|
212,436
|
static int prealloc_elems_and_freelist(struct bpf_stack_map *smap)
{
u32 elem_size = sizeof(struct stack_map_bucket) + smap->map.value_size;
int err;
smap->elems = bpf_map_area_alloc(elem_size * smap->map.max_entries,
smap->map.numa_node);
if (!smap->elems)
return -ENOMEM;
err = pcpu_freelist_init(&smap->freelist);
if (err)
goto free_elems;
pcpu_freelist_populate(&smap->freelist, smap->elems, elem_size,
smap->map.max_entries);
return 0;
free_elems:
bpf_map_area_free(smap->elems);
return err;
}
| 1
|
222,553
|
const FunctionDef* FunctionLibraryDefinition::Find(const string& func) const {
tf_shared_lock l(mu_);
auto result = FindHelper(func);
if (result) {
return &result->fdef;
} else {
return nullptr;
}
}
| 0
|
253,635
|
smb2_close_getattr(const unsigned int xid, struct cifs_tcon *tcon,
struct cifsFileInfo *cfile)
{
struct smb2_file_network_open_info file_inf;
struct inode *inode;
int rc;
rc = __SMB2_close(xid, tcon, cfile->fid.persistent_fid,
cfile->fid.volatile_fid, &file_inf);
if (rc)
return;
inode = d_inode(cfile->dentry);
spin_lock(&inode->i_lock);
CIFS_I(inode)->time = jiffies;
/* Creation time should not need to be updated on close */
if (file_inf.LastWriteTime)
inode->i_mtime = cifs_NTtimeToUnix(file_inf.LastWriteTime);
if (file_inf.ChangeTime)
inode->i_ctime = cifs_NTtimeToUnix(file_inf.ChangeTime);
if (file_inf.LastAccessTime)
inode->i_atime = cifs_NTtimeToUnix(file_inf.LastAccessTime);
/*
* i_blocks is not related to (i_size / i_blksize),
* but instead 512 byte (2**9) size is required for
* calculating num blocks.
*/
if (le64_to_cpu(file_inf.AllocationSize) > 4096)
inode->i_blocks =
(512 - 1 + le64_to_cpu(file_inf.AllocationSize)) >> 9;
/* End of file and Attributes should not have to be updated on close */
spin_unlock(&inode->i_lock);
}
| 0
|
366,317
|
static int do_remount(struct path *path, int ms_flags, int sb_flags,
int mnt_flags, void *data)
{
int err;
struct super_block *sb = path->mnt->mnt_sb;
struct mount *mnt = real_mount(path->mnt);
struct fs_context *fc;
if (!check_mnt(mnt))
return -EINVAL;
if (path->dentry != path->mnt->mnt_root)
return -EINVAL;
if (!can_change_locked_flags(mnt, mnt_flags))
return -EPERM;
fc = fs_context_for_reconfigure(path->dentry, sb_flags, MS_RMT_MASK);
if (IS_ERR(fc))
return PTR_ERR(fc);
fc->oldapi = true;
err = parse_monolithic_mount_data(fc, data);
if (!err) {
down_write(&sb->s_umount);
err = -EPERM;
if (ns_capable(sb->s_user_ns, CAP_SYS_ADMIN)) {
err = reconfigure_super(fc);
if (!err) {
lock_mount_hash();
set_mount_attributes(mnt, mnt_flags);
unlock_mount_hash();
}
}
up_write(&sb->s_umount);
}
mnt_warn_timestamp_expiry(path, &mnt->mnt);
put_fs_context(fc);
return err;
}
| 0
|
268,104
|
void Compute(OpKernelContext* context) override {
// Checks what we're remapping and inverts the relevant remapping Tensors to
// be maps with key = old ID, value = new ID.
std::unordered_map<int64_t, int64_t> old_row_to_new_row_map;
std::vector<bool> row_id_present;
const Tensor* row_remapping_t;
OP_REQUIRES_OK(context, context->input("row_remapping", &row_remapping_t));
OP_REQUIRES(
context, row_remapping_t->dims() == 1,
errors::InvalidArgument("The `row_remapping` tensor must be 1-D, got "
"a tensor of shape ",
row_remapping_t->shape().DebugString()));
const auto row_remapping = row_remapping_t->vec<int64_t>();
OP_REQUIRES(context, row_remapping.size() == num_rows_,
errors::InvalidArgument(strings::StrCat(
"Size of row_remapping is ", row_remapping.size(),
" instead of being equal to num_rows=", num_rows_)));
OP_REQUIRES_OK(context, RemapVectorToMap(row_remapping, &row_id_present,
&old_row_to_new_row_map));
// Calculates the min/max old row ID that we need to read, to save us from
// reading some unnecessary slices of the old tensor.
int64_t min_old_row = -1;
int64_t max_old_row = -1;
for (int i = 0; i < row_remapping.size(); ++i) {
if (min_old_row < 0 ||
(row_remapping(i) >= 0 && row_remapping(i) < min_old_row)) {
min_old_row = row_remapping(i);
}
if (max_old_row < 0 ||
(row_remapping(i) >= 0 && row_remapping(i) > max_old_row)) {
max_old_row = row_remapping(i);
}
}
// Processes the remapping for columns.
std::unordered_map<int64_t, int64_t> old_col_to_new_col_map;
std::vector<bool> col_id_present;
const Tensor* col_remapping_t;
OP_REQUIRES_OK(context, context->input("col_remapping", &col_remapping_t));
const auto col_remapping = col_remapping_t->vec<int64_t>();
// Note that we always "remap rows", even when the row vocabulary does
// not change, because partitioning requires a mapping from partitioned
// Variables to the full checkpoints we load.
const bool remap_cols = col_remapping.size() > 0;
if (remap_cols) {
OP_REQUIRES(
context, col_remapping.size() == num_cols_,
errors::InvalidArgument(strings::StrCat(
"Provided col_remapping, but its size is ", col_remapping.size(),
" instead of being equal to num_cols=", num_cols_)));
OP_REQUIRES_OK(context, RemapVectorToMap(col_remapping, &col_id_present,
&old_col_to_new_col_map));
} else {
col_id_present.clear();
col_id_present.resize(num_cols_, true);
}
// Processes the checkpoint source and the provided Tensor name.
const Tensor* ckpt_path_t;
OP_REQUIRES_OK(context, context->input("ckpt_path", &ckpt_path_t));
OP_REQUIRES(
context, ckpt_path_t->NumElements() == 1,
errors::InvalidArgument("The `ckpt_path` tensor must have exactly one "
"element, got tensor of shape ",
ckpt_path_t->shape().DebugString()));
const string& ckpt_path = ckpt_path_t->scalar<tstring>()();
const Tensor* old_tensor_name_t;
OP_REQUIRES_OK(context,
context->input("old_tensor_name", &old_tensor_name_t));
const string& old_tensor_name = old_tensor_name_t->scalar<tstring>()();
LOG(INFO) << "Processing checkpoint : " << ckpt_path;
BundleReader reader(context->env(), ckpt_path);
OP_REQUIRES_OK(context, reader.status());
DataType tensor_type;
TensorShape tensor_shape;
OP_REQUIRES_OK(context, reader.LookupDtypeAndShape(
old_tensor_name, &tensor_type, &tensor_shape));
OP_REQUIRES(context, tensor_type == DT_FLOAT,
errors::InvalidArgument(strings::StrCat(
"Tensor ", old_tensor_name, " has invalid type ",
DataTypeString(tensor_type), " instead of expected type ",
DataTypeString(DT_FLOAT))));
// This op is limited to loading Tensors of rank 2 (matrices).
OP_REQUIRES(
context, tensor_shape.dims() == 2,
errors::InvalidArgument(strings::StrCat(
"Tensor ", old_tensor_name, " has shape ",
tensor_shape.DebugString(), " of invalid rank ",
tensor_shape.dims(), " instead of expected shape of rank 2.")));
if (!remap_cols) {
// TODO(weiho): Consider relaxing this restriction to allow partial column
// loading (even when no column remapping is specified) if there turns out
// to be a use case for it.
OP_REQUIRES(context, num_cols_ == tensor_shape.dim_size(1),
errors::InvalidArgument(strings::StrCat(
"Tensor ", old_tensor_name, " has shape ",
tensor_shape.DebugString(),
", where the size of its 2nd dimension is ",
tensor_shape.dim_size(1),
" instead of being equal to num_cols=", num_cols_)));
}
// Uses TensorSlice to potentially load the old tensor in chunks in case
// memory usage is a concern.
std::vector<TensorSlice> tensor_slices;
TensorSlice slice(tensor_shape.dims());
if (min_old_row >= 0 && max_old_row >= 0) {
int64_t row_start = min_old_row;
// TODO(weiho): Given the list of old row IDs of interest (the keys of
// old_row_to_new_row_map), we could also try something smarter to
// find some minimal set of covering ranges for the list of old row IDs
// such that the size of each range is less than max_rows_in_memory_.
while (row_start <= max_old_row) {
const int64_t slice_length =
max_rows_in_memory_ <= 0
// If max_rows_in_memory_ <= 0, we just load the entire chunk.
? max_old_row - row_start + 1
: std::min(max_rows_in_memory_, max_old_row - row_start + 1);
slice.set_start(0, row_start);
slice.set_length(0, slice_length);
tensor_slices.push_back(slice);
row_start += slice_length;
}
}
// Allocates the output matrix.
Tensor* output_matrix_t = nullptr;
OP_REQUIRES_OK(context,
context->allocate_output("output_matrix",
TensorShape({num_rows_, num_cols_}),
&output_matrix_t));
auto output_matrix = output_matrix_t->matrix<float>();
// Iterates through tensor slices and copies over values from the old tensor
// to the output matrix.
int64_t row_index = min_old_row;
int64_t rows_copied = 0;
Tensor loaded_tensor_t;
for (const TensorSlice& tensor_slice : tensor_slices) {
LOG(INFO) << "Loading slice " << tensor_slice.DebugString();
TensorShape slice_shape;
OP_REQUIRES_OK(context,
tensor_slice.SliceTensorShape(tensor_shape, &slice_shape));
// Potentially re-allocates the tensor buffer since the last slice may
// have fewer rows than the other slices.
if (loaded_tensor_t.shape() != slice_shape) {
loaded_tensor_t = Tensor(DT_FLOAT, slice_shape);
}
OP_REQUIRES_OK(context, reader.LookupSlice(old_tensor_name, tensor_slice,
&loaded_tensor_t));
// Iterates through the old loaded tensor slice row-by-row.
for (int row = 0; row < loaded_tensor_t.dim_size(0); ++row, ++row_index) {
if (row_index % 500000 == min_old_row) {
LOG(INFO) << "Processing old row " << row_index;
}
// If the old row ID is not found in old_row_to_new_row_map, continue
// to the next row; otherwise, copy it to the output matrix.
const int64_t* new_row_ptr =
gtl::FindOrNull(old_row_to_new_row_map, row_index);
if (new_row_ptr == nullptr) {
continue;
}
++rows_copied;
const int64_t new_row = *new_row_ptr;
// Copies over the row element-by-element, in case remapping is needed
// along the column axis.
const auto& loaded_tensor = loaded_tensor_t.matrix<float>();
for (int old_col = 0; old_col < loaded_tensor_t.dim_size(1);
++old_col) {
int64_t new_col = old_col;
if (remap_cols) {
const int64_t* new_col_ptr =
gtl::FindOrNull(old_col_to_new_col_map, old_col);
if (new_col_ptr == nullptr) {
// Column remapping is specified, but this column is not found in
// old_col_to_new_col_map, so we leave it uninitialized, to be
// filled in with initializing_values later.
continue;
}
new_col = *new_col_ptr;
}
OP_REQUIRES(context,
new_row < num_rows_ && new_col < num_cols_ &&
new_row >= 0 && new_col >= 0,
errors::Internal(strings::StrCat(
"new_row=", new_row, " and new_col=", new_col,
" should have been less than num_rows_=", num_rows_,
" and num_cols_=", num_cols_,
" and non-negative. This should never have happened "
"if the code were correct. Please file a bug.")));
output_matrix(new_row, new_col) = loaded_tensor(row, old_col);
}
}
}
LOG(INFO) << "Copied " << rows_copied << " rows from old matrix (with "
<< tensor_shape.dim_size(0) << " rows) to new matrix (with "
<< num_rows_ << " rows).";
// At this point, there are potentially whole rows/columns uninitialized
// (corresponding to the indices where row_id_present/col_id_present are
// false). We fill this in cell-by-cell using row_id_present and
// col_id_present while dequeuing from the initializing_values vector.
const Tensor* initializing_values_t;
OP_REQUIRES_OK(
context, context->input("initializing_values", &initializing_values_t));
const auto initializing_values = initializing_values_t->flat<float>();
int64_t initializing_values_index = 0;
for (int i = 0; i < num_rows_; ++i) {
for (int j = 0; j < num_cols_; ++j) {
if (row_id_present[i] && col_id_present[j]) continue;
OP_REQUIRES(
context, initializing_values_index < initializing_values.size(),
errors::InvalidArgument(
"initializing_values contained ", initializing_values.size(),
" elements, but more missing values remain."));
output_matrix(i, j) = initializing_values(initializing_values_index);
++initializing_values_index;
}
}
// Checks that we used all the given initializing values.
OP_REQUIRES(
context, initializing_values_index == initializing_values.size(),
errors::InvalidArgument(
"initializing_values contained ", initializing_values.size(),
" elements, but only ", initializing_values_index,
" elements were used to fill in missing values."));
}
| 0
|
232,948
|
static void identity_close_writer(struct Curl_easy *data,
struct contenc_writer *writer)
{
(void) data;
(void) writer;
}
| 0
|
514,314
|
void multi_update::update_used_tables()
{
Item *item;
List_iterator_fast<Item> it(*values);
while ((item= it++))
{
item->update_used_tables();
}
}
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.