idx
int64 | func
string | target
int64 |
|---|---|---|
439,495
|
static struct dir *squashfs_opendir(unsigned int block_start, unsigned int offset,
struct inode **i)
{
squashfs_dir_header_2 dirh;
char buffer[sizeof(squashfs_dir_entry_2) + SQUASHFS_NAME_LEN + 1]
__attribute__((aligned));
squashfs_dir_entry_2 *dire = (squashfs_dir_entry_2 *) buffer;
long long start;
int bytes;
int dir_count, size;
struct dir_ent *new_dir;
struct dir *dir;
TRACE("squashfs_opendir: inode start block %d, offset %d\n",
block_start, offset);
*i = read_inode(block_start, offset);
dir = malloc(sizeof(struct dir));
if(dir == NULL)
EXIT_UNSQUASH("squashfs_opendir: malloc failed!\n");
dir->dir_count = 0;
dir->cur_entry = 0;
dir->mode = (*i)->mode;
dir->uid = (*i)->uid;
dir->guid = (*i)->gid;
dir->mtime = (*i)->time;
dir->xattr = (*i)->xattr;
dir->dirs = NULL;
if ((*i)->data == 0)
/*
* if the directory is empty, skip the unnecessary
* lookup_entry, this fixes the corner case with
* completely empty filesystems where lookup_entry correctly
* returning -1 is incorrectly treated as an error
*/
return dir;
start = sBlk.s.directory_table_start + (*i)->start;
bytes = lookup_entry(directory_table_hash, start);
if(bytes == -1)
EXIT_UNSQUASH("squashfs_opendir: directory block %d not "
"found!\n", block_start);
bytes += (*i)->offset;
size = (*i)->data + bytes;
while(bytes < size) {
if(swap) {
squashfs_dir_header_2 sdirh;
memcpy(&sdirh, directory_table + bytes, sizeof(sdirh));
SQUASHFS_SWAP_DIR_HEADER_2(&dirh, &sdirh);
} else
memcpy(&dirh, directory_table + bytes, sizeof(dirh));
dir_count = dirh.count + 1;
TRACE("squashfs_opendir: Read directory header @ byte position "
"%d, %d directory entries\n", bytes, dir_count);
bytes += sizeof(dirh);
/* dir_count should never be larger than SQUASHFS_DIR_COUNT */
if(dir_count > SQUASHFS_DIR_COUNT) {
ERROR("File system corrupted: too many entries in directory\n");
goto corrupted;
}
while(dir_count--) {
if(swap) {
squashfs_dir_entry_2 sdire;
memcpy(&sdire, directory_table + bytes,
sizeof(sdire));
SQUASHFS_SWAP_DIR_ENTRY_2(dire, &sdire);
} else
memcpy(dire, directory_table + bytes,
sizeof(*dire));
bytes += sizeof(*dire);
/* size should never be SQUASHFS_NAME_LEN or larger */
if(dire->size >= SQUASHFS_NAME_LEN) {
ERROR("File system corrupted: filename too long\n");
goto corrupted;
}
memcpy(dire->name, directory_table + bytes,
dire->size + 1);
dire->name[dire->size + 1] = '\0';
/* check name for invalid characters (i.e /, ., ..) */
if(check_name(dire->name, dire->size + 1) == FALSE) {
ERROR("File system corrupted: invalid characters in name\n");
goto corrupted;
}
TRACE("squashfs_opendir: directory entry %s, inode "
"%d:%d, type %d\n", dire->name,
dirh.start_block, dire->offset, dire->type);
if((dir->dir_count % DIR_ENT_SIZE) == 0) {
new_dir = realloc(dir->dirs, (dir->dir_count +
DIR_ENT_SIZE) * sizeof(struct dir_ent));
if(new_dir == NULL)
EXIT_UNSQUASH("squashfs_opendir: "
"realloc failed!\n");
dir->dirs = new_dir;
}
strcpy(dir->dirs[dir->dir_count].name, dire->name);
dir->dirs[dir->dir_count].start_block =
dirh.start_block;
dir->dirs[dir->dir_count].offset = dire->offset;
dir->dirs[dir->dir_count].type = dire->type;
dir->dir_count ++;
bytes += dire->size + 1;
}
}
return dir;
corrupted:
free(dir->dirs);
free(dir);
return NULL;
}
| 0
|
328,907
|
R_API ut64 r_bin_java_rti_annotations_attr_calc_size(RBinJavaAttrInfo *attr) {
ut64 size = 0;
if (!attr) {
// TODO eprintf allocation fail
return size;
}
size += (6 + r_bin_java_annotation_array_calc_size (&(attr->info.annotation_array)));
return size;
}
| 0
|
95,908
|
bool AppendPostInstallTasks(const InstallerState& installer_state,
const FilePath& setup_path,
const FilePath& new_chrome_exe,
const Version* current_version,
const Version& new_version,
const FilePath& temp_path,
WorkItemList* post_install_task_list) {
DCHECK(post_install_task_list);
HKEY root = installer_state.root_key();
const Products& products = installer_state.products();
{
scoped_ptr<WorkItemList> in_use_update_work_items(
WorkItem::CreateConditionalWorkItemList(
new ConditionRunIfFileExists(new_chrome_exe)));
in_use_update_work_items->set_log_message("InUseUpdateWorkItemList");
FilePath installer_path(installer_state.GetInstallerDirectory(new_version)
.Append(setup_path.BaseName()));
CommandLine rename(installer_path);
rename.AppendSwitch(switches::kRenameChromeExe);
if (installer_state.system_install())
rename.AppendSwitch(switches::kSystemLevel);
if (installer_state.verbose_logging())
rename.AppendSwitch(switches::kVerboseLogging);
std::wstring version_key;
for (size_t i = 0; i < products.size(); ++i) {
BrowserDistribution* dist = products[i]->distribution();
version_key = dist->GetVersionKey();
if (current_version != NULL) {
in_use_update_work_items->AddSetRegValueWorkItem(root, version_key,
google_update::kRegOldVersionField,
UTF8ToWide(current_version->GetString()), true);
}
CommandLine product_rename_cmd(rename);
products[i]->AppendRenameFlags(&product_rename_cmd);
in_use_update_work_items->AddSetRegValueWorkItem(
root,
version_key,
google_update::kRegRenameCmdField,
product_rename_cmd.command_line_string(),
true);
}
if (current_version != NULL && installer_state.is_multi_install()) {
BrowserDistribution* dist =
installer_state.multi_package_binaries_distribution();
in_use_update_work_items->AddSetRegValueWorkItem(
root,
dist->GetVersionKey(),
google_update::kRegOldVersionField,
UTF8ToWide(current_version->GetString()),
true);
}
post_install_task_list->AddWorkItem(in_use_update_work_items.release());
}
{
scoped_ptr<WorkItemList> regular_update_work_items(
WorkItem::CreateConditionalWorkItemList(
new Not(new ConditionRunIfFileExists(new_chrome_exe))));
regular_update_work_items->set_log_message("RegularUpdateWorkItemList");
for (size_t i = 0; i < products.size(); ++i) {
BrowserDistribution* dist = products[i]->distribution();
std::wstring version_key(dist->GetVersionKey());
regular_update_work_items->AddDeleteRegValueWorkItem(root, version_key,
google_update::kRegOldVersionField);
regular_update_work_items->AddDeleteRegValueWorkItem(root, version_key,
google_update::kRegRenameCmdField);
}
post_install_task_list->AddWorkItem(regular_update_work_items.release());
}
AddRegisterComDllWorkItemsForPackage(installer_state, current_version,
new_version, post_install_task_list);
if (installer_state.is_msi()) {
for (size_t i = 0; i < products.size(); ++i) {
const Product* product = products[i];
AddSetMsiMarkerWorkItem(installer_state, product->distribution(), true,
post_install_task_list);
AddDeleteUninstallShortcutsForMSIWorkItems(installer_state, *product,
temp_path,
post_install_task_list);
}
if (installer_state.is_multi_install()) {
AddSetMsiMarkerWorkItem(installer_state,
installer_state.multi_package_binaries_distribution(), true,
post_install_task_list);
}
}
return true;
}
| 0
|
427,176
|
static void block (LexState *ls) {
/* block -> statlist */
FuncState *fs = ls->fs;
BlockCnt bl;
enterblock(fs, &bl, 0);
statlist(ls);
leaveblock(fs);
}
| 0
|
328,810
|
R_API RBinJavaBootStrapArgument *r_bin_java_bootstrap_method_argument_new(ut8 *buffer, ut64 sz, ut64 buf_offset) {
ut64 offset = 0;
RBinJavaBootStrapArgument *bsm_arg = (RBinJavaBootStrapArgument *) malloc (sizeof (RBinJavaBootStrapArgument));
if (!bsm_arg) {
// TODO eprintf failed to allocate bytes for bootstrap_method.
return bsm_arg;
}
memset (bsm_arg, 0, sizeof (RBinJavaBootStrapArgument));
bsm_arg->file_offset = buf_offset;
bsm_arg->argument_info_idx = R_BIN_JAVA_USHORT (buffer, offset);
offset += 2;
bsm_arg->argument_info_cp_obj = r_bin_java_clone_cp_idx (R_BIN_JAVA_GLOBAL_BIN, bsm_arg->argument_info_idx);
bsm_arg->size = offset;
return bsm_arg;
}
| 0
|
369,281
|
static void io_buffer_add_list(struct io_ring_ctx *ctx,
struct io_buffer_list *bl, unsigned int bgid)
{
struct list_head *list;
list = &ctx->io_buffers[hash_32(bgid, IO_BUFFERS_HASH_BITS)];
INIT_LIST_HEAD(&bl->buf_list);
bl->bgid = bgid;
list_add(&bl->list, list);
}
| 0
|
509,577
|
int ha_maria::index_read_map(uchar * buf, const uchar * key,
key_part_map keypart_map,
enum ha_rkey_function find_flag)
{
DBUG_ASSERT(inited == INDEX);
register_handler(file);
int error= maria_rkey(file, buf, active_index, key, keypart_map, find_flag);
return error;
}
| 0
|
261,896
|
njs_decode_utf8_length(const njs_str_t *src, size_t *out_size)
{
njs_unicode_decode_t ctx;
njs_utf8_decode_init(&ctx);
return njs_utf8_stream_length(&ctx, src->start, src->length, 1, 0,
out_size);
}
| 0
|
274,875
|
TEST(QuantizedComparisonsTest, EqualUInt8Quantized) {
const float kMin = -1.f;
const float kMax = 128.f;
ComparisonOpModel model({TensorType_UINT8, {1, 2, 2, 1}, kMin, kMax},
{TensorType_UINT8, {1, 2, 2, 1}, kMin, kMax},
TensorType_UINT8, BuiltinOperator_EQUAL);
model.QuantizeAndPopulate<uint8_t>(model.input1(), {1, 9, 7, 3});
model.QuantizeAndPopulate<uint8_t>(model.input2(), {1, 2, 7, 5});
model.Invoke();
EXPECT_THAT(model.GetOutput(), ElementsAre(true, false, true, false));
}
| 0
|
211,563
|
n_start_visual_mode(int c)
{
#ifdef FEAT_CONCEAL
int cursor_line_was_concealed = curwin->w_p_cole > 0
&& conceal_cursor_line(curwin);
#endif
VIsual_mode = c;
VIsual_active = TRUE;
VIsual_reselect = TRUE;
trigger_modechanged();
// Corner case: the 0 position in a tab may change when going into
// virtualedit. Recalculate curwin->w_cursor to avoid bad highlighting.
if (c == Ctrl_V && (get_ve_flags() & VE_BLOCK) && gchar_cursor() == TAB)
{
validate_virtcol();
coladvance(curwin->w_virtcol);
}
VIsual = curwin->w_cursor;
#ifdef FEAT_FOLDING
foldAdjustVisual();
#endif
setmouse();
#ifdef FEAT_CONCEAL
// Check if redraw is needed after changing the state.
conceal_check_cursor_line(cursor_line_was_concealed);
#endif
if (p_smd && msg_silent == 0)
redraw_cmdline = TRUE; // show visual mode later
#ifdef FEAT_CLIPBOARD
// Make sure the clipboard gets updated. Needed because start and
// end may still be the same, and the selection needs to be owned
clip_star.vmode = NUL;
#endif
// Only need to redraw this line, unless still need to redraw an old
// Visual area (when 'lazyredraw' is set).
if (curwin->w_redr_type < INVERTED)
{
curwin->w_old_cursor_lnum = curwin->w_cursor.lnum;
curwin->w_old_visual_lnum = curwin->w_cursor.lnum;
}
}
| 1
|
450,409
|
static int vnc_update_client(VncState *vs, int has_dirty)
{
VncDisplay *vd = vs->vd;
VncJob *job;
int y;
int height, width;
int n = 0;
if (vs->disconnecting) {
vnc_disconnect_finish(vs);
return 0;
}
vs->has_dirty += has_dirty;
if (!vnc_should_update(vs)) {
return 0;
}
if (!vs->has_dirty && vs->update != VNC_STATE_UPDATE_FORCE) {
return 0;
}
/*
* Send screen updates to the vnc client using the server
* surface and server dirty map. guest surface updates
* happening in parallel don't disturb us, the next pass will
* send them to the client.
*/
job = vnc_job_new(vs);
height = pixman_image_get_height(vd->server);
width = pixman_image_get_width(vd->server);
y = 0;
for (;;) {
int x, h;
unsigned long x2;
unsigned long offset = find_next_bit((unsigned long *) &vs->dirty,
height * VNC_DIRTY_BPL(vs),
y * VNC_DIRTY_BPL(vs));
if (offset == height * VNC_DIRTY_BPL(vs)) {
/* no more dirty bits */
break;
}
y = offset / VNC_DIRTY_BPL(vs);
x = offset % VNC_DIRTY_BPL(vs);
x2 = find_next_zero_bit((unsigned long *) &vs->dirty[y],
VNC_DIRTY_BPL(vs), x);
bitmap_clear(vs->dirty[y], x, x2 - x);
h = find_and_clear_dirty_height(vs, y, x, x2, height);
x2 = MIN(x2, width / VNC_DIRTY_PIXELS_PER_BIT);
if (x2 > x) {
n += vnc_job_add_rect(job, x * VNC_DIRTY_PIXELS_PER_BIT, y,
(x2 - x) * VNC_DIRTY_PIXELS_PER_BIT, h);
}
if (!x && x2 == width / VNC_DIRTY_PIXELS_PER_BIT) {
y += h;
if (y == height) {
break;
}
}
}
vs->job_update = vs->update;
vs->update = VNC_STATE_UPDATE_NONE;
vnc_job_push(job);
vs->has_dirty = 0;
return n;
}
| 0
|
242,131
|
int LuaSettings::create_object(lua_State* L)
{
NO_MAP_LOCK_REQUIRED;
bool write_allowed = true;
const char* filename = luaL_checkstring(L, 1);
CHECK_SECURE_PATH_POSSIBLE_WRITE(L, filename, &write_allowed);
LuaSettings* o = new LuaSettings(filename, write_allowed);
*(void **)(lua_newuserdata(L, sizeof(void *))) = o;
luaL_getmetatable(L, className);
lua_setmetatable(L, -2);
return 1;
}
| 0
|
328,913
|
R_API void U(copy_type_info_to_stack_frame_list)(RList * type_list, RList * sf_list) {
RListIter *iter, *iter_tmp;
RBinJavaVerificationObj *ver_obj, *new_ver_obj;
if (!type_list || !sf_list) {
return;
}
r_list_foreach_safe (type_list, iter, iter_tmp, ver_obj) {
new_ver_obj = (RBinJavaVerificationObj *) malloc (sizeof (RBinJavaVerificationObj));
// FIXME: how to handle failed memory allocation?
if (new_ver_obj && ver_obj) {
memcpy (new_ver_obj, ver_obj, sizeof (RBinJavaVerificationObj));
if (!r_list_append (sf_list, (void *) new_ver_obj)) {
R_FREE (new_ver_obj);
}
} else {
R_FREE (new_ver_obj);
}
}
}
| 0
|
445,892
|
fr_clipboard_get (GtkClipboard *clipboard,
GtkSelectionData *selection_data,
guint info,
gpointer user_data_or_owner)
{
FrWindow *window = user_data_or_owner;
char *data;
if (gtk_selection_data_get_target (selection_data) != FR_SPECIAL_URI_LIST)
return;
data = get_selection_data_from_clipboard_data (window, window->priv->copy_data);
if (data != NULL) {
gtk_selection_data_set (selection_data,
gtk_selection_data_get_target (selection_data),
8,
(guchar *) data,
strlen (data));
g_free (data);
}
}
| 0
|
225,009
|
PQregisterThreadLock(pgthreadlock_t newhandler)
{
pgthreadlock_t prev = pg_g_threadlock;
if (newhandler)
pg_g_threadlock = newhandler;
else
pg_g_threadlock = default_threadlock;
return prev;
}
| 0
|
225,022
|
PQhost(const PGconn *conn)
{
if (!conn)
return NULL;
if (conn->connhost != NULL)
{
/*
* Return the verbatim host value provided by user, or hostaddr in its
* lack.
*/
if (conn->connhost[conn->whichhost].host != NULL &&
conn->connhost[conn->whichhost].host[0] != '\0')
return conn->connhost[conn->whichhost].host;
else if (conn->connhost[conn->whichhost].hostaddr != NULL &&
conn->connhost[conn->whichhost].hostaddr[0] != '\0')
return conn->connhost[conn->whichhost].hostaddr;
}
return "";
}
| 0
|
366,283
|
static void namespace_unlock(void)
{
struct hlist_head head;
struct hlist_node *p;
struct mount *m;
LIST_HEAD(list);
hlist_move_list(&unmounted, &head);
list_splice_init(&ex_mountpoints, &list);
up_write(&namespace_sem);
shrink_dentry_list(&list);
if (likely(hlist_empty(&head)))
return;
synchronize_rcu_expedited();
hlist_for_each_entry_safe(m, p, &head, mnt_umount) {
hlist_del(&m->mnt_umount);
mntput(&m->mnt);
}
}
| 0
|
294,396
|
m_sf_in_day(union DateData *x)
{
return ns_to_day(m_sf(x));
}
| 0
|
379,660
|
R_API void r_anal_function_delete_all_vars(RAnalFunction *fcn) {
r_return_if_fail (fcn);
if (fcn->vars.v.len > 0) {
void **it;
r_pvector_foreach (&fcn->vars, it) {
var_free (*it);
}
}
r_pvector_clear (&fcn->vars);
}
| 0
|
336,627
|
uint32_t reds_get_streaming_video(const RedsState *reds)
{
return reds->config->streaming_video;
}
| 0
|
508,376
|
Open_table_context::
request_backoff_action(enum_open_table_action action_arg,
TABLE_LIST *table)
{
/*
A back off action may be one of three kinds:
* We met a broken table that needs repair, or a table that
is not present on this MySQL server and needs re-discovery.
To perform the action, we need an exclusive metadata lock on
the table. Acquiring X lock while holding other shared
locks can easily lead to deadlocks. We rely on MDL deadlock
detector to discover them. If this is a multi-statement
transaction that holds metadata locks for completed statements,
we should keep these locks after discovery/repair.
The action type in this case is OT_DISCOVER or OT_REPAIR.
* Our attempt to acquire an MDL lock lead to a deadlock,
detected by the MDL deadlock detector. The current
session was chosen a victim. If this is a multi-statement
transaction that holds metadata locks taken by completed
statements, restarting locking for the current statement
may lead to a livelock. Releasing locks of completed
statements can not be done as will lead to violation
of ACID. Thus, again, if m_has_locks is set,
we report an error. Otherwise, when there are no metadata
locks other than which belong to this statement, we can
try to recover from error by releasing all locks and
restarting the pre-locking.
Similarly, a deadlock error can occur when the
pre-locking process met a TABLE_SHARE that is being
flushed, and unsuccessfully waited for the flush to
complete. A deadlock in this case can happen, e.g.,
when our session is holding a metadata lock that
is being waited on by a session which is using
the table which is being flushed. The only way
to recover from this error is, again, to close all
open tables, release all locks, and retry pre-locking.
Action type name is OT_REOPEN_TABLES. Re-trying
while holding some locks may lead to a livelock,
and thus we don't do it.
* Finally, this session has open TABLEs from different
"generations" of the table cache. This can happen, e.g.,
when, after this session has successfully opened one
table used for a statement, FLUSH TABLES interfered and
expelled another table used in it. FLUSH TABLES then
blocks and waits on the table already opened by this
statement.
We detect this situation by ensuring that table cache
version of all tables used in a statement is the same.
If it isn't, all tables needs to be reopened.
Note, that we can always perform a reopen in this case,
even if we already have metadata locks, since we don't
keep tables open between statements and a livelock
is not possible.
*/
if (action_arg == OT_BACKOFF_AND_RETRY && m_has_locks)
{
my_error(ER_LOCK_DEADLOCK, MYF(0));
m_thd->mark_transaction_to_rollback(true);
return TRUE;
}
/*
If auto-repair or discovery are requested, a pointer to table
list element must be provided.
*/
if (table)
{
DBUG_ASSERT(action_arg == OT_DISCOVER || action_arg == OT_REPAIR);
m_failed_table= (TABLE_LIST*) m_thd->alloc(sizeof(TABLE_LIST));
if (m_failed_table == NULL)
return TRUE;
m_failed_table->init_one_table(table->db, table->db_length,
table->table_name,
table->table_name_length,
table->alias, TL_WRITE);
m_failed_table->open_strategy= table->open_strategy;
m_failed_table->mdl_request.set_type(MDL_EXCLUSIVE);
}
m_action= action_arg;
return FALSE;
}
| 0
|
459,157
|
static int tcf_act_get_cookie(struct flow_action_entry *entry,
const struct tc_action *act)
{
struct tc_cookie *cookie;
int err = 0;
rcu_read_lock();
cookie = rcu_dereference(act->act_cookie);
if (cookie) {
entry->cookie = flow_action_cookie_create(cookie->data,
cookie->len,
GFP_ATOMIC);
if (!entry->cookie)
err = -ENOMEM;
}
rcu_read_unlock();
return err;
}
| 0
|
328,848
|
R_API RBinJavaAttrInfo *r_bin_java_inner_classes_attr_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz, ut64 buf_offset) {
RBinJavaClassesAttribute *icattr;
RBinJavaCPTypeObj *obj;
ut32 i = 0;
ut64 offset = 0, curpos;
if (sz < 8) {
return NULL;
}
RBinJavaAttrInfo *attr = r_bin_java_default_attr_new (bin, buffer, sz, buf_offset);
if (!attr) {
return NULL;
}
offset += 6;
attr->type = R_BIN_JAVA_ATTR_TYPE_INNER_CLASSES_ATTR;
attr->info.inner_classes_attr.number_of_classes = R_BIN_JAVA_USHORT (buffer, offset);
offset += 2;
attr->info.inner_classes_attr.classes = r_list_newf (r_bin_java_inner_classes_attr_entry_free);
for (i = 0; i < attr->info.inner_classes_attr.number_of_classes; i++) {
curpos = buf_offset + offset;
if (offset + 8 > sz) {
eprintf ("Invalid amount of inner classes\n");
break;
}
icattr = R_NEW0 (RBinJavaClassesAttribute);
if (!icattr) {
break;
}
icattr->inner_class_info_idx = R_BIN_JAVA_USHORT (buffer, offset);
offset += 2;
icattr->outer_class_info_idx = R_BIN_JAVA_USHORT (buffer, offset);
offset += 2;
icattr->inner_name_idx = R_BIN_JAVA_USHORT (buffer, offset);
offset += 2;
icattr->inner_class_access_flags = R_BIN_JAVA_USHORT (buffer, offset);
offset += 2;
icattr->flags_str = retrieve_class_method_access_string (icattr->inner_class_access_flags);
icattr->file_offset = curpos;
icattr->size = 8;
obj = r_bin_java_get_item_from_bin_cp_list (R_BIN_JAVA_GLOBAL_BIN, icattr->inner_name_idx);
if (!obj) {
eprintf ("BINCPLIS IS HULL %d\n", icattr->inner_name_idx);
}
icattr->name = r_bin_java_get_item_name_from_bin_cp_list (R_BIN_JAVA_GLOBAL_BIN, obj);
if (!icattr->name) {
obj = r_bin_java_get_item_from_bin_cp_list (R_BIN_JAVA_GLOBAL_BIN, icattr->inner_class_info_idx);
if (!obj) {
eprintf ("BINCPLIST IS NULL %d\n", icattr->inner_class_info_idx);
}
icattr->name = r_bin_java_get_item_name_from_bin_cp_list (R_BIN_JAVA_GLOBAL_BIN, obj);
if (!icattr->name) {
icattr->name = r_str_dup (NULL, "NULL");
eprintf ("r_bin_java_inner_classes_attr: Unable to find the name for %d index.\n", icattr->inner_name_idx);
free (icattr);
break;
}
}
IFDBG eprintf ("r_bin_java_inner_classes_attr: Inner class name %d is %s.\n", icattr->inner_name_idx, icattr->name);
r_list_append (attr->info.inner_classes_attr.classes, (void *) icattr);
}
attr->size = offset;
// IFDBG r_bin_java_print_inner_classes_attr_summary(attr);
return attr;
}
| 0
|
427,812
|
void sev_es_prepare_guest_switch(struct vcpu_svm *svm, unsigned int cpu)
{
struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
struct vmcb_save_area *hostsa;
/*
* As an SEV-ES guest, hardware will restore the host state on VMEXIT,
* of which one step is to perform a VMLOAD. Since hardware does not
* perform a VMSAVE on VMRUN, the host savearea must be updated.
*/
vmsave(__sme_page_pa(sd->save_area));
/* XCR0 is restored on VMEXIT, save the current host value */
hostsa = (struct vmcb_save_area *)(page_address(sd->save_area) + 0x400);
hostsa->xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
/* PKRU is restored on VMEXIT, save the current host value */
hostsa->pkru = read_pkru();
/* MSR_IA32_XSS is restored on VMEXIT, save the currnet host value */
hostsa->xss = host_xss;
}
| 0
|
264,250
|
static int protocol_client_init(VncState *vs, uint8_t *data, size_t len)
{
char buf[1024];
VncShareMode mode;
int size;
mode = data[0] ? VNC_SHARE_MODE_SHARED : VNC_SHARE_MODE_EXCLUSIVE;
switch (vs->vd->share_policy) {
case VNC_SHARE_POLICY_IGNORE:
/*
* Ignore the shared flag. Nothing to do here.
*
* Doesn't conform to the rfb spec but is traditional qemu
* behavior, thus left here as option for compatibility
* reasons.
*/
break;
case VNC_SHARE_POLICY_ALLOW_EXCLUSIVE:
/*
* Policy: Allow clients ask for exclusive access.
*
* Implementation: When a client asks for exclusive access,
* disconnect all others. Shared connects are allowed as long
* as no exclusive connection exists.
*
* This is how the rfb spec suggests to handle the shared flag.
*/
if (mode == VNC_SHARE_MODE_EXCLUSIVE) {
VncState *client;
QTAILQ_FOREACH(client, &vs->vd->clients, next) {
if (vs == client) {
continue;
}
if (client->share_mode != VNC_SHARE_MODE_EXCLUSIVE &&
client->share_mode != VNC_SHARE_MODE_SHARED) {
continue;
}
vnc_disconnect_start(client);
}
}
if (mode == VNC_SHARE_MODE_SHARED) {
if (vs->vd->num_exclusive > 0) {
vnc_disconnect_start(vs);
return 0;
}
}
break;
case VNC_SHARE_POLICY_FORCE_SHARED:
/*
* Policy: Shared connects only.
* Implementation: Disallow clients asking for exclusive access.
*
* Useful for shared desktop sessions where you don't want
* someone forgetting to say -shared when running the vnc
* client disconnect everybody else.
*/
if (mode == VNC_SHARE_MODE_EXCLUSIVE) {
vnc_disconnect_start(vs);
return 0;
}
break;
}
vnc_set_share_mode(vs, mode);
vs->client_width = surface_width(vs->vd->ds);
vs->client_height = surface_height(vs->vd->ds);
vnc_write_u16(vs, vs->client_width);
vnc_write_u16(vs, vs->client_height);
pixel_format_message(vs);
if (qemu_name)
size = snprintf(buf, sizeof(buf), "QEMU (%s)", qemu_name);
else
size = snprintf(buf, sizeof(buf), "QEMU");
vnc_write_u32(vs, size);
vnc_write(vs, buf, size);
vnc_flush(vs);
vnc_client_cache_auth(vs);
vnc_qmp_event(vs, QAPI_EVENT_VNC_INITIALIZED);
vnc_read_when(vs, protocol_client_msg, 1);
return 0;
}
| 0
|
317,194
|
static int selinux_inode_removexattr(struct user_namespace *mnt_userns,
struct dentry *dentry, const char *name)
{
if (strcmp(name, XATTR_NAME_SELINUX)) {
int rc = cap_inode_removexattr(mnt_userns, dentry, name);
if (rc)
return rc;
/* Not an attribute we recognize, so just check the
ordinary setattr permission. */
return dentry_has_perm(current_cred(), dentry, FILE__SETATTR);
}
if (!selinux_initialized(&selinux_state))
return 0;
/* No one is allowed to remove a SELinux security label.
You can change the label, but all data must be labeled. */
return -EACCES;
}
| 0
|
344,244
|
static void pushclosure (lua_State *L, Proto *p, UpVal **encup, StkId base,
StkId ra) {
int nup = p->sizeupvalues;
Upvaldesc *uv = p->upvalues;
int i;
LClosure *ncl = luaF_newLclosure(L, nup);
ncl->p = p;
setclLvalue2s(L, ra, ncl); /* anchor new closure in stack */
for (i = 0; i < nup; i++) { /* fill in its upvalues */
if (uv[i].instack) /* upvalue refers to local variable? */
ncl->upvals[i] = luaF_findupval(L, base + uv[i].idx);
else /* get upvalue from enclosing function */
ncl->upvals[i] = encup[uv[i].idx];
luaC_objbarrier(L, ncl, ncl->upvals[i]);
}
}
| 0
|
459,166
|
static void tcf_chain_head_change_item(struct tcf_filter_chain_list_item *item,
struct tcf_proto *tp_head)
{
if (item->chain_head_change)
item->chain_head_change(tp_head, item->chain_head_change_priv);
}
| 0
|
415,211
|
cmd_genkey (assuan_context_t ctx, char *line)
{
ctrl_t ctrl = assuan_get_pointer (ctx);
int rc;
char *keyno;
int force;
const char *s;
time_t timestamp;
if ( IS_LOCKED (ctrl) )
return gpg_error (GPG_ERR_LOCKED);
force = has_option (line, "--force");
if ((s=has_option_name (line, "--timestamp")))
{
if (*s != '=')
return set_error (GPG_ERR_ASS_PARAMETER, "missing value for option");
timestamp = isotime2epoch (s+1);
if (timestamp < 1)
return set_error (GPG_ERR_ASS_PARAMETER, "invalid time value");
}
else
timestamp = 0;
line = skip_options (line);
if (!*line)
return set_error (GPG_ERR_ASS_PARAMETER, "no key number given");
keyno = line;
while (*line && !spacep (line))
line++;
*line = 0;
if ((rc = open_card (ctrl, NULL)))
return rc;
if (!ctrl->app_ctx)
return gpg_error (GPG_ERR_UNSUPPORTED_OPERATION);
keyno = xtrystrdup (keyno);
if (!keyno)
return out_of_core ();
rc = app_genkey (ctrl->app_ctx, ctrl, keyno, force? 1:0,
timestamp, pin_cb, ctx);
xfree (keyno);
TEST_CARD_REMOVAL (ctrl, rc);
return rc;
}
| 0
|
355,623
|
garbage_collect(int testing)
{
int copyID;
int abort = FALSE;
buf_T *buf;
win_T *wp;
int did_free = FALSE;
tabpage_T *tp;
if (!testing)
{
// Only do this once.
want_garbage_collect = FALSE;
may_garbage_collect = FALSE;
garbage_collect_at_exit = FALSE;
}
// The execution stack can grow big, limit the size.
if (exestack.ga_maxlen - exestack.ga_len > 500)
{
size_t new_len;
char_u *pp;
int n;
// Keep 150% of the current size, with a minimum of the growth size.
n = exestack.ga_len / 2;
if (n < exestack.ga_growsize)
n = exestack.ga_growsize;
// Don't make it bigger though.
if (exestack.ga_len + n < exestack.ga_maxlen)
{
new_len = exestack.ga_itemsize * (exestack.ga_len + n);
pp = vim_realloc(exestack.ga_data, new_len);
if (pp == NULL)
return FAIL;
exestack.ga_maxlen = exestack.ga_len + n;
exestack.ga_data = pp;
}
}
// We advance by two because we add one for items referenced through
// previous_funccal.
copyID = get_copyID();
/*
* 1. Go through all accessible variables and mark all lists and dicts
* with copyID.
*/
// Don't free variables in the previous_funccal list unless they are only
// referenced through previous_funccal. This must be first, because if
// the item is referenced elsewhere the funccal must not be freed.
abort = abort || set_ref_in_previous_funccal(copyID);
// script-local variables
abort = abort || garbage_collect_scriptvars(copyID);
// buffer-local variables
FOR_ALL_BUFFERS(buf)
abort = abort || set_ref_in_item(&buf->b_bufvar.di_tv, copyID,
NULL, NULL);
// window-local variables
FOR_ALL_TAB_WINDOWS(tp, wp)
abort = abort || set_ref_in_item(&wp->w_winvar.di_tv, copyID,
NULL, NULL);
if (aucmd_win != NULL)
abort = abort || set_ref_in_item(&aucmd_win->w_winvar.di_tv, copyID,
NULL, NULL);
#ifdef FEAT_PROP_POPUP
FOR_ALL_POPUPWINS(wp)
abort = abort || set_ref_in_item(&wp->w_winvar.di_tv, copyID,
NULL, NULL);
FOR_ALL_TABPAGES(tp)
FOR_ALL_POPUPWINS_IN_TAB(tp, wp)
abort = abort || set_ref_in_item(&wp->w_winvar.di_tv, copyID,
NULL, NULL);
#endif
// tabpage-local variables
FOR_ALL_TABPAGES(tp)
abort = abort || set_ref_in_item(&tp->tp_winvar.di_tv, copyID,
NULL, NULL);
// global variables
abort = abort || garbage_collect_globvars(copyID);
// function-local variables
abort = abort || set_ref_in_call_stack(copyID);
// named functions (matters for closures)
abort = abort || set_ref_in_functions(copyID);
// function call arguments, if v:testing is set.
abort = abort || set_ref_in_func_args(copyID);
// funcstacks keep variables for closures
abort = abort || set_ref_in_funcstacks(copyID);
// v: vars
abort = abort || garbage_collect_vimvars(copyID);
// callbacks in buffers
abort = abort || set_ref_in_buffers(copyID);
// 'completefunc', 'omnifunc' and 'thesaurusfunc' callbacks
abort = abort || set_ref_in_insexpand_funcs(copyID);
// 'operatorfunc' callback
abort = abort || set_ref_in_opfunc(copyID);
// 'tagfunc' callback
abort = abort || set_ref_in_tagfunc(copyID);
// 'imactivatefunc' and 'imstatusfunc' callbacks
abort = abort || set_ref_in_im_funcs(copyID);
#ifdef FEAT_LUA
abort = abort || set_ref_in_lua(copyID);
#endif
#ifdef FEAT_PYTHON
abort = abort || set_ref_in_python(copyID);
#endif
#ifdef FEAT_PYTHON3
abort = abort || set_ref_in_python3(copyID);
#endif
#ifdef FEAT_JOB_CHANNEL
abort = abort || set_ref_in_channel(copyID);
abort = abort || set_ref_in_job(copyID);
#endif
#ifdef FEAT_NETBEANS_INTG
abort = abort || set_ref_in_nb_channel(copyID);
#endif
#ifdef FEAT_TIMERS
abort = abort || set_ref_in_timer(copyID);
#endif
#ifdef FEAT_QUICKFIX
abort = abort || set_ref_in_quickfix(copyID);
#endif
#ifdef FEAT_TERMINAL
abort = abort || set_ref_in_term(copyID);
#endif
#ifdef FEAT_PROP_POPUP
abort = abort || set_ref_in_popups(copyID);
#endif
if (!abort)
{
/*
* 2. Free lists and dictionaries that are not referenced.
*/
did_free = free_unref_items(copyID);
/*
* 3. Check if any funccal can be freed now.
* This may call us back recursively.
*/
free_unref_funccal(copyID, testing);
}
else if (p_verbose > 0)
{
verb_msg(_("Not enough memory to set references, garbage collection aborted!"));
}
return did_free;
}
| 0
|
206,781
|
int udf_expand_file_adinicb(struct inode *inode)
{
struct page *page;
char *kaddr;
struct udf_inode_info *iinfo = UDF_I(inode);
int err;
WARN_ON_ONCE(!inode_is_locked(inode));
if (!iinfo->i_lenAlloc) {
if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
iinfo->i_alloc_type = ICBTAG_FLAG_AD_SHORT;
else
iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG;
/* from now on we have normal address_space methods */
inode->i_data.a_ops = &udf_aops;
up_write(&iinfo->i_data_sem);
mark_inode_dirty(inode);
return 0;
}
/*
* Release i_data_sem so that we can lock a page - page lock ranks
* above i_data_sem. i_mutex still protects us against file changes.
*/
up_write(&iinfo->i_data_sem);
page = find_or_create_page(inode->i_mapping, 0, GFP_NOFS);
if (!page)
return -ENOMEM;
if (!PageUptodate(page)) {
kaddr = kmap_atomic(page);
memset(kaddr + iinfo->i_lenAlloc, 0x00,
PAGE_SIZE - iinfo->i_lenAlloc);
memcpy(kaddr, iinfo->i_data + iinfo->i_lenEAttr,
iinfo->i_lenAlloc);
flush_dcache_page(page);
SetPageUptodate(page);
kunmap_atomic(kaddr);
}
down_write(&iinfo->i_data_sem);
memset(iinfo->i_data + iinfo->i_lenEAttr, 0x00,
iinfo->i_lenAlloc);
iinfo->i_lenAlloc = 0;
if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
iinfo->i_alloc_type = ICBTAG_FLAG_AD_SHORT;
else
iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG;
/* from now on we have normal address_space methods */
inode->i_data.a_ops = &udf_aops;
set_page_dirty(page);
unlock_page(page);
up_write(&iinfo->i_data_sem);
err = filemap_fdatawrite(inode->i_mapping);
if (err) {
/* Restore everything back so that we don't lose data... */
lock_page(page);
down_write(&iinfo->i_data_sem);
kaddr = kmap_atomic(page);
memcpy(iinfo->i_data + iinfo->i_lenEAttr, kaddr, inode->i_size);
kunmap_atomic(kaddr);
unlock_page(page);
iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB;
inode->i_data.a_ops = &udf_adinicb_aops;
up_write(&iinfo->i_data_sem);
}
put_page(page);
mark_inode_dirty(inode);
return err;
}
| 1
|
221,675
|
int Socket::checkCertHostname(const std::string &_hostname)
{
#if OPENSSL_VERSION_NUMBER < 0x10100000L
String hostname = _hostname;
X509 *peercertificate = SSL_get_peer_certificate(ssl);
if (peercertificate == NULL) {
#ifdef NETDEBUG
std::cout << thread_id << "unable to get certificate for " << hostname << std::endl;
#endif
return -1;
}
//force to lower case as domain names are not case sensetive
hostname.toLower();
#ifdef NETDEBUG
std::cout << thread_id << "checking certificate" << hostname << std::endl;
std::cout << thread_id << "Checking hostname against subjectAltNames" << std::endl;
#endif
bool matched = false;
bool hasaltname = false;
//check the altname extension for additional valid names
STACK_OF(GENERAL_NAME) *gens = NULL;
gens = (STACK_OF(GENERAL_NAME) *)X509_get_ext_d2i(peercertificate, NID_subject_alt_name, 0, 0);
int r = sk_GENERAL_NAME_num(gens);
for (int i = 0; i < r; ++i) {
const GENERAL_NAME *gn = sk_GENERAL_NAME_value(gens, i);
//if its not a dns entry we really dont care about it
if (gn->type != GEN_DNS) {
continue;
}
//only mark hasaltname as true if it has a DNS altname
hasaltname = true;
//an ASN1_IA5STRING is a define of an ASN1_STRING so we can do it this way
unsigned char *nameutf8;
int len = ASN1_STRING_to_UTF8(&nameutf8, gn->d.ia5);
if (len < 0) {
break;
}
String altname = std::string((char *)nameutf8, len);
OPENSSL_free(nameutf8);
//force to lower case as domain names are not case sensetive
altname.toLower();
#ifdef NETDEBUG
std::cout << thread_id << "checking against alt name " << altname << std::endl;
#endif
if (hostname.compare(altname) == 0) {
matched = true;
break;
} else if (altname.contains("*")) {
#ifdef NETDEBUG
std::cout << thread_id << "Wildcard certificate is in use" << std::endl;
#endif
String anend;
anend = altname.after("*"); // need to keep the "."
if (hostname.endsWith(anend)) {
bool part_match = true;
String anstart = altname.before("*");
String t = hostname.before(anend.c_str());
if( anstart.length() > 0) { // if something before * we must also match this
if( hostname.startsWith(anstart)) {
t = t.after(anstart.c_str());
} else {
part_match = false; // even though after * matches, no match on before * - so cannot match
}
}
// t now contains what is matched by the '*" - this must not contain a '.'
if (part_match && !t.contains(".")) {
matched = true;
break;
}
}
}
}
sk_GENERAL_NAME_pop_free(gens, GENERAL_NAME_free);
if (matched) {
X509_free(peercertificate);
return 0;
} else if (hasaltname) {
X509_free(peercertificate);
return -1;
}
#ifdef NETDEBUG
std::cout << thread_id << "checking hostname against the following common names" << std::endl;
#endif
X509_NAME *name = X509_get_subject_name(peercertificate);
int current_entry = -1;
while (1) {
//get the common name from the certificate
current_entry = X509_NAME_get_index_by_NID(name, NID_commonName, current_entry);
if (current_entry == -1) {
//if we've run out of common names then move on to altnames
break;
}
//X509_NAME_get_entry result must not be freed
X509_NAME_ENTRY *entry = X509_NAME_get_entry(name, current_entry);
ASN1_STRING *asn1name = X509_NAME_ENTRY_get_data(entry);
unsigned char *nameutf8;
int len = ASN1_STRING_to_UTF8(&nameutf8, asn1name);
if (len < 0) {
break;
}
String commonname = std::string((char *)nameutf8, len);
OPENSSL_free(nameutf8);
//force to lower case as domain names are not case sensetive
commonname.toLower();
#ifdef NETDEBUG
std::cout << thread_id << "checking against common name " << commonname << std::endl;
#endif
//compare the hostname to the common name
if (hostname.compare(commonname) == 0) {
matched = true;
break;
}
//see if its a wildcard certificate
else if (commonname.startsWith("*.")) {
#ifdef NETDEBUG
std::cout << thread_id << "Wildcard certificate is in use" << std::endl;
#endif
commonname = commonname.after("*"); // need to keep the "."
if (hostname.endsWith(commonname)) {
matched = true;
break;
}
}
}
if (matched) {
X509_free(peercertificate);
return 0;
}
#else // is openssl v1.1 or above
return 0; //TODO
#endif
return -1;
}
| 0
|
343,160
|
static int esp_init_authenc(struct xfrm_state *x)
{
struct crypto_aead *aead;
struct crypto_authenc_key_param *param;
struct rtattr *rta;
char *key;
char *p;
char authenc_name[CRYPTO_MAX_ALG_NAME];
unsigned int keylen;
int err;
err = -EINVAL;
if (!x->ealg)
goto error;
err = -ENAMETOOLONG;
if ((x->props.flags & XFRM_STATE_ESN)) {
if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
"%s%sauthencesn(%s,%s)%s",
x->geniv ?: "", x->geniv ? "(" : "",
x->aalg ? x->aalg->alg_name : "digest_null",
x->ealg->alg_name,
x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME)
goto error;
} else {
if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
"%s%sauthenc(%s,%s)%s",
x->geniv ?: "", x->geniv ? "(" : "",
x->aalg ? x->aalg->alg_name : "digest_null",
x->ealg->alg_name,
x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME)
goto error;
}
aead = crypto_alloc_aead(authenc_name, 0, 0);
err = PTR_ERR(aead);
if (IS_ERR(aead))
goto error;
x->data = aead;
keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) +
(x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param));
err = -ENOMEM;
key = kmalloc(keylen, GFP_KERNEL);
if (!key)
goto error;
p = key;
rta = (void *)p;
rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
rta->rta_len = RTA_LENGTH(sizeof(*param));
param = RTA_DATA(rta);
p += RTA_SPACE(sizeof(*param));
if (x->aalg) {
struct xfrm_algo_desc *aalg_desc;
memcpy(p, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8);
p += (x->aalg->alg_key_len + 7) / 8;
aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
BUG_ON(!aalg_desc);
err = -EINVAL;
if (aalg_desc->uinfo.auth.icv_fullbits / 8 !=
crypto_aead_authsize(aead)) {
pr_info("ESP: %s digestsize %u != %u\n",
x->aalg->alg_name,
crypto_aead_authsize(aead),
aalg_desc->uinfo.auth.icv_fullbits / 8);
goto free_key;
}
err = crypto_aead_setauthsize(
aead, x->aalg->alg_trunc_len / 8);
if (err)
goto free_key;
}
param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8);
memcpy(p, x->ealg->alg_key, (x->ealg->alg_key_len + 7) / 8);
err = crypto_aead_setkey(aead, key, keylen);
free_key:
kfree(key);
error:
return err;
}
| 0
|
484,792
|
static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
{
RING_IDX req_prod = queue->rx.req_prod_pvt;
int notify;
int err = 0;
if (unlikely(!netif_carrier_ok(queue->info->netdev)))
return;
for (req_prod = queue->rx.req_prod_pvt;
req_prod - queue->rx.rsp_cons < NET_RX_RING_SIZE;
req_prod++) {
struct sk_buff *skb;
unsigned short id;
grant_ref_t ref;
struct page *page;
struct xen_netif_rx_request *req;
skb = xennet_alloc_one_rx_buffer(queue);
if (!skb) {
err = -ENOMEM;
break;
}
id = xennet_rxidx(req_prod);
BUG_ON(queue->rx_skbs[id]);
queue->rx_skbs[id] = skb;
ref = gnttab_claim_grant_reference(&queue->gref_rx_head);
WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
queue->grant_rx_ref[id] = ref;
page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
req = RING_GET_REQUEST(&queue->rx, req_prod);
gnttab_page_grant_foreign_access_ref_one(ref,
queue->info->xbdev->otherend_id,
page,
0);
req->id = id;
req->gref = ref;
}
queue->rx.req_prod_pvt = req_prod;
/* Try again later if there are not enough requests or skb allocation
* failed.
* Enough requests is quantified as the sum of newly created slots and
* the unconsumed slots at the backend.
*/
if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN ||
unlikely(err)) {
mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10));
return;
}
RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->rx, notify);
if (notify)
notify_remote_via_irq(queue->rx_irq);
}
| 0
|
225,966
|
GF_Box *stsz_box_new()
{
ISOM_DECL_BOX_ALLOC(GF_SampleSizeBox, 0);
//type is unknown here, can be regular or compact table
return (GF_Box *)tmp;
}
| 0
|
343,153
|
static void esp_output_done(struct crypto_async_request *base, int err)
{
struct sk_buff *skb = base->data;
struct xfrm_offload *xo = xfrm_offload(skb);
void *tmp;
struct xfrm_state *x;
if (xo && (xo->flags & XFRM_DEV_RESUME)) {
struct sec_path *sp = skb_sec_path(skb);
x = sp->xvec[sp->len - 1];
} else {
x = skb_dst(skb)->xfrm;
}
tmp = ESP_SKB_CB(skb)->tmp;
esp_ssg_unref(x, tmp);
kfree(tmp);
if (xo && (xo->flags & XFRM_DEV_RESUME)) {
if (err) {
XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
kfree_skb(skb);
return;
}
skb_push(skb, skb->data - skb_mac_header(skb));
secpath_reset(skb);
xfrm_dev_resume(skb);
} else {
if (!err &&
x->encap && x->encap->encap_type == TCP_ENCAP_ESPINTCP)
esp_output_tail_tcp(x, skb);
else
xfrm_output_resume(skb->sk, skb, err);
}
}
| 0
|
317,254
|
static int selinux_kernfs_init_security(struct kernfs_node *kn_dir,
struct kernfs_node *kn)
{
const struct task_security_struct *tsec = selinux_cred(current_cred());
u32 parent_sid, newsid, clen;
int rc;
char *context;
rc = kernfs_xattr_get(kn_dir, XATTR_NAME_SELINUX, NULL, 0);
if (rc == -ENODATA)
return 0;
else if (rc < 0)
return rc;
clen = (u32)rc;
context = kmalloc(clen, GFP_KERNEL);
if (!context)
return -ENOMEM;
rc = kernfs_xattr_get(kn_dir, XATTR_NAME_SELINUX, context, clen);
if (rc < 0) {
kfree(context);
return rc;
}
rc = security_context_to_sid(&selinux_state, context, clen, &parent_sid,
GFP_KERNEL);
kfree(context);
if (rc)
return rc;
if (tsec->create_sid) {
newsid = tsec->create_sid;
} else {
u16 secclass = inode_mode_to_security_class(kn->mode);
struct qstr q;
q.name = kn->name;
q.hash_len = hashlen_string(kn_dir, kn->name);
rc = security_transition_sid(&selinux_state, tsec->sid,
parent_sid, secclass, &q,
&newsid);
if (rc)
return rc;
}
rc = security_sid_to_context_force(&selinux_state, newsid,
&context, &clen);
if (rc)
return rc;
rc = kernfs_xattr_set(kn, XATTR_NAME_SELINUX, context, clen,
XATTR_CREATE);
kfree(context);
return rc;
}
| 0
|
202,677
|
static int parallels_open(BlockDriverState *bs, QDict *options, int flags,
Error **errp)
{
BDRVParallelsState *s = bs->opaque;
int i;
struct parallels_header ph;
int ret;
bs->read_only = 1; // no write support yet
ret = bdrv_pread(bs->file, 0, &ph, sizeof(ph));
if (ret < 0) {
goto fail;
}
if (memcmp(ph.magic, HEADER_MAGIC, 16) ||
(le32_to_cpu(ph.version) != HEADER_VERSION)) {
error_setg(errp, "Image not in Parallels format");
ret = -EINVAL;
goto fail;
}
bs->total_sectors = le32_to_cpu(ph.nb_sectors);
s->tracks = le32_to_cpu(ph.tracks);
s->catalog_size = le32_to_cpu(ph.catalog_entries);
if (s->catalog_size > INT_MAX / 4) {
error_setg(errp, "Catalog too large");
ret = -EFBIG;
goto fail;
}
s->catalog_bitmap = g_malloc(s->catalog_size * 4);
ret = bdrv_pread(bs->file, 64, s->catalog_bitmap, s->catalog_size * 4);
if (ret < 0) {
goto fail;
}
for (i = 0; i < s->catalog_size; i++)
le32_to_cpus(&s->catalog_bitmap[i]);
qemu_co_mutex_init(&s->lock);
return 0;
fail:
g_free(s->catalog_bitmap);
return ret;
}
| 1
|
432,328
|
RAMBlock *qemu_ram_block_from_host(struct uc_struct *uc, void *ptr,
bool round_offset, ram_addr_t *offset)
{
RAMBlock *block;
uint8_t *host = ptr;
block = uc->ram_list.mru_block;
if (block && block->host && host - block->host < block->max_length) {
goto found;
}
RAMBLOCK_FOREACH(block) {
/* This case append when the block is not mapped. */
if (block->host == NULL) {
continue;
}
if (host - block->host < block->max_length) {
goto found;
}
}
return NULL;
found:
*offset = (host - block->host);
if (round_offset) {
*offset &= TARGET_PAGE_MASK;
}
return block;
}
| 0
|
453,020
|
static void nft_dup_netdev_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
struct nft_dup_netdev *priv = nft_expr_priv(expr);
int oif = regs->data[priv->sreg_dev];
nf_dup_netdev_egress(pkt, oif);
}
| 0
|
234,854
|
void btrfs_init_devices_late(struct btrfs_fs_info *fs_info)
{
struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs;
struct btrfs_device *device;
fs_devices->fs_info = fs_info;
mutex_lock(&fs_devices->device_list_mutex);
list_for_each_entry(device, &fs_devices->devices, dev_list)
device->fs_info = fs_info;
list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) {
list_for_each_entry(device, &seed_devs->devices, dev_list)
device->fs_info = fs_info;
seed_devs->fs_info = fs_info;
}
mutex_unlock(&fs_devices->device_list_mutex);
}
| 0
|
195,233
|
Status SetUnknownShape(const NodeDef* node, int output_port) {
shape_inference::ShapeHandle shape =
GetUnknownOutputShape(node, output_port);
InferenceContext* ctx = GetContext(node);
if (ctx == nullptr) {
return errors::InvalidArgument("Missing context");
}
ctx->set_output(output_port, shape);
return Status::OK();
}
| 1
|
301,348
|
static int vfswrap_linux_setlease(vfs_handle_struct *handle, files_struct *fsp,
int leasetype)
{
int result = -1;
START_PROFILE(syscall_linux_setlease);
#ifdef HAVE_KERNEL_OPLOCKS_LINUX
result = linux_setlease(fsp->fh->fd, leasetype);
#else
errno = ENOSYS;
#endif
END_PROFILE(syscall_linux_setlease);
return result;
}
| 0
|
387,837
|
static void clear_all_breakpoints(Method* m) {
m->clear_all_breakpoints();
}
| 0
|
273,908
|
static void handle_MDTM(ctrl_t *ctrl, char *file)
{
struct stat st;
struct tm *tm;
char *path, *ptr;
char *mtime = NULL;
char buf[80];
/* Request to set mtime, ncftp does this */
ptr = strchr(file, ' ');
if (ptr) {
*ptr++ = 0;
mtime = file;
file = ptr;
}
path = compose_abspath(ctrl, file);
if (!path || stat(path, &st) || !S_ISREG(st.st_mode)) {
send_msg(ctrl->sd, "550 Not a regular file.\r\n");
return;
}
if (mtime) {
struct timespec times[2] = {
{ 0, UTIME_OMIT },
{ 0, 0 }
};
struct tm tm;
int rc;
if (!strptime(mtime, "%Y%m%d%H%M%S", &tm)) {
fail:
send_msg(ctrl->sd, "550 Invalid time format\r\n");
return;
}
times[1].tv_sec = mktime(&tm);
rc = utimensat(0, path, times, 0);
if (rc) {
ERR(errno, "Failed setting MTIME %s of %s", mtime, file);
goto fail;
}
(void)stat(path, &st);
}
tm = gmtime(&st.st_mtime);
strftime(buf, sizeof(buf), "213 %Y%m%d%H%M%S\r\n", tm);
send_msg(ctrl->sd, buf);
}
| 0
|
195,242
|
void Compute(OpKernelContext *ctx) override {
const Tensor *indices_t, *values_t, *shape_t, *dense_t;
OP_REQUIRES_OK(ctx, ctx->input("sp_indices", &indices_t));
OP_REQUIRES_OK(ctx, ctx->input("sp_values", &values_t));
OP_REQUIRES_OK(ctx, ctx->input("sp_shape", &shape_t));
OP_REQUIRES_OK(ctx, ctx->input("dense", &dense_t));
// Validations.
OP_REQUIRES(ctx, TensorShapeUtils::IsMatrix(indices_t->shape()),
errors::InvalidArgument(
"Input sp_indices should be a matrix but received shape: ",
indices_t->shape().DebugString()));
OP_REQUIRES(ctx,
TensorShapeUtils::IsVector(values_t->shape()) &&
TensorShapeUtils::IsVector(shape_t->shape()),
errors::InvalidArgument(
"Inputs sp_values and sp_shape should be vectors "
"but received shapes: ",
values_t->shape().DebugString(), " and ",
shape_t->shape().DebugString()));
OP_REQUIRES(
ctx, values_t->dim_size(0) == indices_t->dim_size(0),
errors::InvalidArgument(
"The first dimension of values and indices should match. (",
values_t->dim_size(0), " vs. ", indices_t->dim_size(0), ")"));
const auto indices_mat = indices_t->matrix<int64_t>();
const auto shape_vec = shape_t->vec<int64_t>();
const auto lhs_dims = BCast::FromShape(TensorShape(shape_vec));
const auto rhs_dims = BCast::FromShape(dense_t->shape());
BCast b(lhs_dims, rhs_dims, false); // false for keeping the same num dims.
// True iff (size(lhs) >= size(rhs)) and all dims in lhs is greater or equal
// to dims in rhs (from right to left).
auto VecGreaterEq = [](ArraySlice<int64_t> lhs, ArraySlice<int64_t> rhs) {
if (lhs.size() < rhs.size()) return false;
for (size_t i = 0; i < rhs.size(); ++i) {
if (lhs[lhs.size() - 1 - i] < rhs[rhs.size() - 1 - i]) return false;
}
return true;
};
OP_REQUIRES(ctx, VecGreaterEq(lhs_dims, rhs_dims) && b.IsValid(),
errors::InvalidArgument(
"SparseDenseBinaryOpShared broadcasts dense to sparse "
"only; got incompatible shapes: [",
absl::StrJoin(lhs_dims, ","), "] vs. [",
absl::StrJoin(rhs_dims, ","), "]"));
Tensor *output_values = nullptr;
Tensor dense_gathered;
const int64_t nnz = indices_t->dim_size(0);
OP_REQUIRES_OK(ctx,
ctx->allocate_output(0, TensorShape({nnz}), &output_values));
OP_REQUIRES_OK(
ctx, ctx->allocate_temp(DataTypeToEnum<T>::value, TensorShape({nnz}),
&dense_gathered));
bool op_is_div = false;
if (absl::StrContains(ctx->op_kernel().type_string_view(), "Div")) {
op_is_div = true;
}
// Pulls relevant entries from the dense side, with reshape and broadcasting
// *of the dense side* taken into account. Use a TensorRef to avoid blowing
// up memory.
//
// We can directly use the sparse indices to look up dense side, because
// "b.y_reshape()" and "b.y_bcast()" are guaranteed to have rank "ndims".
auto dense_gathered_flat = dense_gathered.flat<T>();
const int ndims = lhs_dims.size();
switch (ndims) {
#define CASE(NDIM) \
case NDIM: { \
TensorRef<Eigen::Tensor<const T, NDIM, Eigen::RowMajor>> rhs_ref = \
dense_t->shaped<T, NDIM>(b.y_reshape()) \
.broadcast(BCast::ToIndexArray<NDIM>(b.y_bcast())); \
Eigen::array<Eigen::DenseIndex, NDIM> idx; \
bool indices_valid = true; \
for (int i = 0; i < nnz; ++i) { \
for (int d = 0; d < NDIM; ++d) { \
idx[d] = internal::SubtleMustCopy(indices_mat(i, d)); \
if (!FastBoundsCheck(idx[d], rhs_ref.dimension(d))) { \
indices_valid = false; \
} \
} \
OP_REQUIRES( \
ctx, indices_valid, \
errors::InvalidArgument("Provided indices are out-of-bounds w.r.t. " \
"dense side with broadcasted shape")); \
dense_gathered_flat(i) = rhs_ref.coeff(idx); \
if (op_is_div) { \
OP_REQUIRES(ctx, dense_gathered_flat(i) != 0, \
errors::InvalidArgument( \
"SparseDenseCwiseDiv cannot divide by zero," \
"but input dense tensor contains zero ")); \
} \
} \
break; \
}
CASE(1);
CASE(2);
CASE(3);
CASE(4);
CASE(5);
default:
OP_REQUIRES(
ctx, false,
errors::InvalidArgument("Only tensors with ranks between 1 and 5 "
"are currently supported. Tensor rank: ",
ndims));
#undef CASE
}
output_values->flat<T>().device(ctx->eigen_device<Device>()) =
values_t->flat<T>().binaryExpr(dense_gathered_flat,
typename Functor::func());
}
| 1
|
312,396
|
qf_setup_state(
qfstate_T *pstate,
char_u *enc,
char_u *efile,
typval_T *tv,
buf_T *buf,
linenr_T lnumfirst,
linenr_T lnumlast)
{
pstate->vc.vc_type = CONV_NONE;
if (enc != NULL && *enc != NUL)
convert_setup(&pstate->vc, enc, p_enc);
if (efile != NULL && (pstate->fd = mch_fopen((char *)efile, "r")) == NULL)
{
semsg(_(e_cant_open_errorfile_str), efile);
return FAIL;
}
if (tv != NULL)
{
if (tv->v_type == VAR_STRING)
pstate->p_str = tv->vval.v_string;
else if (tv->v_type == VAR_LIST)
pstate->p_li = tv->vval.v_list->lv_first;
pstate->tv = tv;
}
pstate->buf = buf;
pstate->buflnum = lnumfirst;
pstate->lnumlast = lnumlast;
return OK;
}
| 0
|
225,486
|
bool MutableGraphView::RemoveRegularFaninInternal(NodeDef* node,
const OutputPort& fanin) {
auto remove_input = [this, node](const OutputPort& fanin_port,
int node_input_port, bool update_max_port) {
InputPort input(node, node_input_port);
absl::flat_hash_set<InputPort>* fanouts_set = &fanouts()[fanin_port];
fanouts_set->erase(input);
if (update_max_port) {
UpdateMaxRegularOutputPortForRemovedFanin(fanin_port, *fanouts_set);
}
return fanouts_set;
};
auto mutable_inputs = node->mutable_input();
bool modified = false;
const int num_regular_fanins =
NumFanins(*node, /*include_controlling_nodes=*/false);
int i;
int curr_pos = 0;
for (i = 0; i < num_regular_fanins; ++i) {
TensorId tensor_id = ParseTensorName(node->input(i));
if (tensor_id.node() == fanin.node->name() &&
tensor_id.index() == fanin.port_id) {
remove_input(fanin, i, /*update_max_port=*/true);
modified = true;
} else if (modified) {
// Regular inputs will need to have their ports updated.
OutputPort fanin_port(nodes()[tensor_id.node()], tensor_id.index());
auto fanouts_set = remove_input(fanin_port, i, /*update_max_port=*/false);
fanouts_set->insert({node, curr_pos});
// Shift inputs to be retained.
mutable_inputs->SwapElements(i, curr_pos);
++curr_pos;
} else {
// Skip inputs to be retained until first modification.
++curr_pos;
}
}
if (modified) {
const int last_regular_input_port = curr_pos - 1;
if (last_regular_input_port < 0) {
max_regular_input_port().erase(node);
} else {
max_regular_input_port()[node] = last_regular_input_port;
}
if (curr_pos < i) {
// Remove fanins from node inputs.
mutable_inputs->DeleteSubrange(curr_pos, i - curr_pos);
}
}
return modified;
}
| 0
|
317,031
|
static void selinux_inode_free_security(struct inode *inode)
{
inode_free_security(inode);
}
| 0
|
463,077
|
static uint64_t sungem_mmio_txdma_read(void *opaque, hwaddr addr, unsigned size)
{
SunGEMState *s = opaque;
uint32_t val;
if (!(addr < 0x38) && !(addr >= 0x100 && addr <= 0x118)) {
qemu_log_mask(LOG_GUEST_ERROR,
"Read from unknown TXDMA register 0x%"HWADDR_PRIx"\n",
addr);
return 0;
}
val = s->txdmaregs[addr >> 2];
trace_sungem_mmio_txdma_read(addr, val);
return val;
}
| 0
|
318,108
|
static int rsi_usb_init_rx(struct rsi_hw *adapter)
{
struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev;
struct rx_usb_ctrl_block *rx_cb;
u8 idx, num_rx_cb;
num_rx_cb = (adapter->priv->coex_mode > 1 ? 2 : 1);
for (idx = 0; idx < num_rx_cb; idx++) {
rx_cb = &dev->rx_cb[idx];
rx_cb->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!rx_cb->rx_urb) {
rsi_dbg(ERR_ZONE, "Failed alloc rx urb[%d]\n", idx);
goto err;
}
rx_cb->ep_num = idx + 1;
rx_cb->data = (void *)dev;
}
skb_queue_head_init(&dev->rx_q);
rsi_init_event(&dev->rx_thread.event);
if (rsi_create_kthread(adapter->priv, &dev->rx_thread,
rsi_usb_rx_thread, "RX-Thread")) {
rsi_dbg(ERR_ZONE, "%s: Unable to init rx thrd\n", __func__);
goto err;
}
return 0;
err:
usb_free_urb(dev->rx_cb[0].rx_urb);
if (adapter->priv->coex_mode > 1)
usb_free_urb(dev->rx_cb[1].rx_urb);
return -1;
}
| 0
|
369,365
|
static void io_prep_async_work(struct io_kiocb *req)
{
const struct io_op_def *def = &io_op_defs[req->opcode];
struct io_ring_ctx *ctx = req->ctx;
if (!(req->flags & REQ_F_CREDS)) {
req->flags |= REQ_F_CREDS;
req->creds = get_current_cred();
}
req->work.list.next = NULL;
req->work.flags = 0;
if (req->flags & REQ_F_FORCE_ASYNC)
req->work.flags |= IO_WQ_WORK_CONCURRENT;
if (req->flags & REQ_F_ISREG) {
if (def->hash_reg_file || (ctx->flags & IORING_SETUP_IOPOLL))
io_wq_hash_work(&req->work, file_inode(req->file));
} else if (!req->file || !S_ISBLK(file_inode(req->file)->i_mode)) {
if (def->unbound_nonreg_file)
req->work.flags |= IO_WQ_WORK_UNBOUND;
}
}
| 0
|
218,807
|
static size_t WritePascalString(Image *image,const char *value,size_t padding)
{
size_t
count,
length;
ssize_t
i;
/*
Max length is 255.
*/
count=0;
length=(strlen(value) > 255UL ) ? 255UL : strlen(value);
if (length == 0)
count+=WriteBlobByte(image,0);
else
{
count+=WriteBlobByte(image,(unsigned char) length);
count+=WriteBlob(image,length,(const unsigned char *) value);
}
length++;
if ((length % padding) == 0)
return(count);
for (i=0; i < (ssize_t) (padding-(length % padding)); i++)
count+=WriteBlobByte(image,0);
return(count);
}
| 0
|
244,050
|
void mvcg_box_del(GF_Box *s)
{
GF_MultiviewGroupBox *ptr = (GF_MultiviewGroupBox *) s;
if (ptr->entries) gf_free(ptr->entries);
gf_free(ptr);
}
| 0
|
376,340
|
next_token (const gchar *in,
gchar **token)
{
const gchar *start, *inptr = in;
while (*inptr == ' ')
inptr++;
if (*inptr == '\0' || *inptr == '\n') {
if (token)
*token = NULL;
return inptr;
}
start = inptr;
while (*inptr && *inptr != ' ' && *inptr != '\n')
inptr++;
if (token)
*token = g_strndup (start, inptr - start);
return inptr;
}
| 0
|
380,945
|
ins_down(
int startcol) // when TRUE move to Insstart.col
{
pos_T tpos;
linenr_T old_topline = curwin->w_topline;
#ifdef FEAT_DIFF
int old_topfill = curwin->w_topfill;
#endif
undisplay_dollar();
tpos = curwin->w_cursor;
if (cursor_down(1L, TRUE) == OK)
{
if (startcol)
coladvance(getvcol_nolist(&Insstart));
if (old_topline != curwin->w_topline
#ifdef FEAT_DIFF
|| old_topfill != curwin->w_topfill
#endif
)
redraw_later(VALID);
start_arrow(&tpos);
can_cindent = TRUE;
}
else
vim_beep(BO_CRSR);
}
| 0
|
234,232
|
init_dwarf_regnames_aarch64 (void)
{
dwarf_regnames = dwarf_regnames_aarch64;
dwarf_regnames_count = ARRAY_SIZE (dwarf_regnames_aarch64);
dwarf_regnames_lookup_func = regname_internal_by_table_only;
}
| 0
|
442,586
|
static void init_meminfo(RedMemSlotInfo *mem_info)
{
memslot_info_init(mem_info, 1 /* groups */, 1 /* slots */, 1, 1, 0);
memslot_info_add_slot(mem_info, 0, 0, 0 /* delta */, 0 /* start */, ~0ul /* end */, 0 /* generation */);
}
| 0
|
261,251
|
int MqttClient_PropsFree(MqttProp *head)
{
return MqttProps_Free(head);
}
| 0
|
230,383
|
PJ_DEF(pj_xml_node*) pj_xml_find_node_rec(const pj_xml_node *parent,
const pj_str_t *name)
{
const pj_xml_node *node = parent->node_head.next;
PJ_CHECK_STACK();
while (node != (void*)&parent->node_head) {
pj_xml_node *found;
if (pj_stricmp(&node->name, name) == 0)
return (pj_xml_node*)node;
found = pj_xml_find_node_rec(node, name);
if (found)
return (pj_xml_node*)found;
node = node->next;
}
return NULL;
}
| 0
|
369,201
|
static inline void io_req_set_rsrc_node(struct io_kiocb *req,
struct io_ring_ctx *ctx,
unsigned int issue_flags)
{
if (!req->fixed_rsrc_refs) {
req->fixed_rsrc_refs = &ctx->rsrc_node->refs;
if (!(issue_flags & IO_URING_F_UNLOCKED)) {
lockdep_assert_held(&ctx->uring_lock);
ctx->rsrc_cached_refs--;
if (unlikely(ctx->rsrc_cached_refs < 0))
io_rsrc_refs_refill(ctx);
} else {
percpu_ref_get(req->fixed_rsrc_refs);
}
}
}
| 0
|
275,522
|
njs_vm_handle_events(njs_vm_t *vm)
{
njs_int_t ret;
njs_str_t str;
njs_value_t string;
njs_event_t *ev;
njs_queue_t *promise_events, *posted_events;
njs_queue_link_t *link;
promise_events = &vm->promise_events;
posted_events = &vm->posted_events;
do {
for ( ;; ) {
link = njs_queue_first(promise_events);
if (link == njs_queue_tail(promise_events)) {
break;
}
ev = njs_queue_link_data(link, njs_event_t, link);
njs_queue_remove(&ev->link);
ret = njs_vm_call(vm, ev->function, ev->args, ev->nargs);
if (njs_slow_path(ret == NJS_ERROR)) {
return ret;
}
}
if (njs_vm_unhandled_rejection(vm)) {
ret = njs_value_to_string(vm, &string,
&vm->promise_reason->start[0]);
if (njs_slow_path(ret != NJS_OK)) {
return ret;
}
njs_string_get(&string, &str);
njs_vm_error(vm, "unhandled promise rejection: %V", &str);
njs_mp_free(vm->mem_pool, vm->promise_reason);
vm->promise_reason = NULL;
return NJS_ERROR;
}
for ( ;; ) {
link = njs_queue_first(posted_events);
if (link == njs_queue_tail(posted_events)) {
break;
}
ev = njs_queue_link_data(link, njs_event_t, link);
if (ev->once) {
njs_del_event(vm, ev, NJS_EVENT_RELEASE | NJS_EVENT_DELETE);
} else {
ev->posted = 0;
njs_queue_remove(&ev->link);
}
ret = njs_vm_call(vm, ev->function, ev->args, ev->nargs);
if (ret == NJS_ERROR) {
return ret;
}
}
} while (!njs_queue_is_empty(promise_events));
return njs_vm_pending(vm) ? NJS_AGAIN : NJS_OK;
}
| 0
|
338,132
|
void WasmBinaryBuilder::visitLocalGet(LocalGet* curr) {
BYN_TRACE("zz node: LocalGet " << pos << std::endl);
requireFunctionContext("local.get");
curr->index = getAbsoluteLocalIndex(getU32LEB());
if (curr->index >= currFunction->getNumLocals()) {
throwError("bad local.get index");
}
curr->type = currFunction->getLocalType(curr->index);
curr->finalize();
}
| 0
|
196,705
|
Status ValidateInputs(const Tensor *a_indices, const Tensor *a_values,
const Tensor *a_shape, const Tensor *b) {
if (!TensorShapeUtils::IsMatrix(a_indices->shape())) {
return errors::InvalidArgument(
"Input a_indices should be a matrix but received shape: ",
a_indices->shape().DebugString());
}
if (!TensorShapeUtils::IsVector(a_values->shape()) ||
!TensorShapeUtils::IsVector(a_shape->shape())) {
return errors::InvalidArgument(
"Inputs a_values and a_shape should be vectors "
"but received shapes: ",
a_values->shape().DebugString(), " and ",
a_shape->shape().DebugString());
}
if (a_shape->NumElements() != b->dims()) {
return errors::InvalidArgument(
"Two operands have different ranks; received: ", a_shape->NumElements(),
" and ", b->dims());
}
const auto a_shape_flat = a_shape->flat<Index>();
for (int i = 0; i < b->dims(); ++i) {
if (a_shape_flat(i) != b->dim_size(i)) {
return errors::InvalidArgument(
"Dimension ", i,
" does not equal (no broadcasting is supported): sparse side ",
a_shape_flat(i), " vs dense side ", b->dim_size(i));
}
}
return Status::OK();
}
| 1
|
293,777
|
static RKextIndex *r_kext_index_new(RList *kexts) {
if (!kexts) {
return NULL;
}
int length = r_list_length (kexts);
if (!length) {
return NULL;
}
RKextIndex *index = R_NEW0 (RKextIndex);
if (!index) {
return NULL;
}
index->entries = malloc (length *sizeof(RKext*));
if (!index->entries) {
R_FREE (index);
return NULL;
}
RListIter *iter;
RKext *kext;
int i = 0;
r_list_foreach (kexts, iter, kext) {
index->entries[i++] = kext;
}
index->length = i;
return index;
}
| 0
|
437,326
|
disable_noname_group_capture(Node** root, regex_t* reg, ScanEnv* env)
{
int r, i, pos, counter;
MemStatusType loc;
GroupNumRemap* map;
map = (GroupNumRemap* )xalloca(sizeof(GroupNumRemap) * (env->num_mem + 1));
CHECK_NULL_RETURN_MEMERR(map);
for (i = 1; i <= env->num_mem; i++) {
map[i].new_val = 0;
}
counter = 0;
r = noname_disable_map(root, map, &counter);
if (r != 0) return r;
r = renumber_by_map(*root, map);
if (r != 0) return r;
for (i = 1, pos = 1; i <= env->num_mem; i++) {
if (map[i].new_val > 0) {
SCANENV_MEMENV(env)[pos] = SCANENV_MEMENV(env)[i];
pos++;
}
}
loc = env->capture_history;
MEM_STATUS_CLEAR(env->capture_history);
for (i = 1; i <= ONIG_MAX_CAPTURE_HISTORY_GROUP; i++) {
if (MEM_STATUS_AT(loc, i)) {
MEM_STATUS_ON_SIMPLE(env->capture_history, map[i].new_val);
}
}
env->num_mem = env->num_named;
reg->num_mem = env->num_named;
return onig_renumber_name_table(reg, map);
}
| 0
|
270,380
|
static bool ok_inflater_zlib_header(ok_inflater *inflater) {
if (!ok_inflater_load_bits(inflater, 16)) {
return false;
} else {
uint32_t compression_method = ok_inflater_read_bits(inflater, 4);
uint32_t compression_info = ok_inflater_read_bits(inflater, 4);
uint32_t flag_check = ok_inflater_read_bits(inflater, 5);
uint32_t flag_dict = ok_inflater_read_bits(inflater, 1);
uint32_t flag_compression_level = ok_inflater_read_bits(inflater, 2);
uint32_t bits = ((compression_info << 12) | (compression_method << 8) |
(flag_compression_level << 6) | (flag_dict << 5) | flag_check);
if (bits % 31 != 0) {
ok_inflater_error(inflater, "Invalid zlib header");
return false;
}
if (compression_method != 8) {
ok_inflater_error(inflater, "Invalid inflater compression method");
return false;
}
if (compression_info > 7) {
ok_inflater_error(inflater, "Invalid window size");
return false;
}
if (flag_dict) {
ok_inflater_error(inflater, "Needs external dictionary");
return false;
}
inflater->state = OK_INFLATER_STATE_READY_FOR_NEXT_BLOCK;
return true;
}
}
| 0
|
223,386
|
static SLJIT_INLINE void match_once_common(compiler_common *common, PCRE2_UCHAR ket, int framesize, int private_data_ptr, BOOL has_alternatives, BOOL needs_control_head)
{
DEFINE_COMPILER;
int stacksize;
if (framesize < 0)
{
if (framesize == no_frame)
OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(SLJIT_SP), private_data_ptr);
else
{
stacksize = needs_control_head ? 1 : 0;
if (ket != OP_KET || has_alternatives)
stacksize++;
if (stacksize > 0)
free_stack(common, stacksize);
}
if (needs_control_head)
OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(STACK_TOP), (ket != OP_KET || has_alternatives) ? STACK(-2) : STACK(-1));
/* TMP2 which is set here used by OP_KETRMAX below. */
if (ket == OP_KETRMAX)
OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(STACK_TOP), STACK(-1));
else if (ket == OP_KETRMIN)
{
/* Move the STR_PTR to the private_data_ptr. */
OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), private_data_ptr, SLJIT_MEM1(STACK_TOP), STACK(-1));
}
}
else
{
stacksize = (ket != OP_KET || has_alternatives) ? 2 : 1;
OP2(SLJIT_SUB, STACK_TOP, 0, SLJIT_MEM1(SLJIT_SP), private_data_ptr, SLJIT_IMM, (framesize + stacksize) * sizeof(sljit_sw));
if (needs_control_head)
OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(STACK_TOP), STACK(-1));
if (ket == OP_KETRMAX)
{
/* TMP2 which is set here used by OP_KETRMAX below. */
OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(STACK_TOP), STACK(0));
}
}
if (needs_control_head)
OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), common->control_head_ptr, TMP1, 0);
}
| 0
|
409,472
|
term_cursor_color(char_u *color)
{
if (*T_CSC != NUL)
{
out_str(T_CSC); // set cursor color start
out_str_nf(color);
out_str(T_CEC); // set cursor color end
out_flush();
}
}
| 0
|
380,953
|
ins_up(
int startcol) // when TRUE move to Insstart.col
{
pos_T tpos;
linenr_T old_topline = curwin->w_topline;
#ifdef FEAT_DIFF
int old_topfill = curwin->w_topfill;
#endif
undisplay_dollar();
tpos = curwin->w_cursor;
if (cursor_up(1L, TRUE) == OK)
{
if (startcol)
coladvance(getvcol_nolist(&Insstart));
if (old_topline != curwin->w_topline
#ifdef FEAT_DIFF
|| old_topfill != curwin->w_topfill
#endif
)
redraw_later(VALID);
start_arrow(&tpos);
can_cindent = TRUE;
}
else
vim_beep(BO_CRSR);
}
| 0
|
349,530
|
static int virtbt_setup_zephyr(struct hci_dev *hdev)
{
struct sk_buff *skb;
/* Read Build Information */
skb = __hci_cmd_sync(hdev, 0xfc08, 0, NULL, HCI_INIT_TIMEOUT);
if (IS_ERR(skb))
return PTR_ERR(skb);
bt_dev_info(hdev, "%s", (char *)(skb->data + 1));
hci_set_fw_info(hdev, "%s", skb->data + 1);
kfree_skb(skb);
return 0;
}
| 0
|
247,588
|
TEST_P(SslSocketTest, TicketSessionResumptionDifferentServerCertIntermediateCA) {
const std::string server_ctx_yaml1 = R"EOF(
common_tls_context:
tls_certificates:
certificate_chain:
filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem"
private_key:
filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_key.pem"
session_ticket_keys:
keys:
filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ticket_key_a"
)EOF";
const std::string server_ctx_yaml2 = R"EOF(
common_tls_context:
tls_certificates:
certificate_chain:
filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns3_chain.pem"
private_key:
filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns3_key.pem"
session_ticket_keys:
keys:
filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ticket_key_a"
)EOF";
const std::string client_ctx_yaml = R"EOF(
common_tls_context:
)EOF";
testTicketSessionResumption(server_ctx_yaml1, {}, server_ctx_yaml2, {}, client_ctx_yaml, false,
GetParam());
}
| 0
|
369,239
|
static void io_wait_rsrc_data(struct io_rsrc_data *data)
{
if (data && !atomic_dec_and_test(&data->refs))
wait_for_completion(&data->done);
| 0
|
455,430
|
xfs_inode_free_eofblocks(
struct xfs_inode *ip,
int flags,
void *args)
{
int ret = 0;
struct xfs_eofblocks *eofb = args;
int match;
if (!xfs_can_free_eofblocks(ip, false)) {
/* inode could be preallocated or append-only */
trace_xfs_inode_free_eofblocks_invalid(ip);
xfs_inode_clear_eofblocks_tag(ip);
return 0;
}
/*
* If the mapping is dirty the operation can block and wait for some
* time. Unless we are waiting, skip it.
*/
if (!(flags & SYNC_WAIT) &&
mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY))
return 0;
if (eofb) {
if (eofb->eof_flags & XFS_EOF_FLAGS_UNION)
match = xfs_inode_match_id_union(ip, eofb);
else
match = xfs_inode_match_id(ip, eofb);
if (!match)
return 0;
/* skip the inode if the file size is too small */
if (eofb->eof_flags & XFS_EOF_FLAGS_MINFILESIZE &&
XFS_ISIZE(ip) < eofb->eof_min_file_size)
return 0;
}
/*
* If the caller is waiting, return -EAGAIN to keep the background
* scanner moving and revisit the inode in a subsequent pass.
*/
if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
if (flags & SYNC_WAIT)
ret = -EAGAIN;
return ret;
}
ret = xfs_free_eofblocks(ip);
xfs_iunlock(ip, XFS_IOLOCK_EXCL);
return ret;
}
| 0
|
336,151
|
static int ip6gre_tap_init(struct net_device *dev)
{
struct ip6_tnl *tunnel;
int ret;
ret = ip6gre_tunnel_init_common(dev);
if (ret)
return ret;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
tunnel = netdev_priv(dev);
ip6gre_tnl_link_config(tunnel, 1);
return 0;
}
| 0
|
339,728
|
static Bigint * diff(Bigint *a, Bigint *b)
{
Bigint *c;
int i, wa, wb;
Long borrow, y; /* We need signed shifts here. */
ULong *xa, *xae, *xb, *xbe, *xc;
#ifdef Pack_32
Long z;
#endif
i = cmp(a,b);
if (!i) {
c = Balloc(0);
c->wds = 1;
c->x[0] = 0;
return c;
}
if (i < 0) {
c = a;
a = b;
b = c;
i = 1;
} else {
i = 0;
}
c = Balloc(a->k);
c->sign = i;
wa = a->wds;
xa = a->x;
xae = xa + wa;
wb = b->wds;
xb = b->x;
xbe = xb + wb;
xc = c->x;
borrow = 0;
#ifdef Pack_32
do {
y = (*xa & 0xffff) - (*xb & 0xffff) + borrow;
borrow = y >> 16;
Sign_Extend(borrow, y);
z = (*xa++ >> 16) - (*xb++ >> 16) + borrow;
borrow = z >> 16;
Sign_Extend(borrow, z);
Storeinc(xc, z, y);
} while(xb < xbe);
while(xa < xae) {
y = (*xa & 0xffff) + borrow;
borrow = y >> 16;
Sign_Extend(borrow, y);
z = (*xa++ >> 16) + borrow;
borrow = z >> 16;
Sign_Extend(borrow, z);
Storeinc(xc, z, y);
}
#else
do {
y = *xa++ - *xb++ + borrow;
borrow = y >> 16;
Sign_Extend(borrow, y);
*xc++ = y & 0xffff;
} while(xb < xbe);
while(xa < xae) {
y = *xa++ + borrow;
borrow = y >> 16;
Sign_Extend(borrow, y);
*xc++ = y & 0xffff;
}
#endif
while(!*--xc) {
wa--;
}
c->wds = wa;
return c;
}
| 0
|
253,641
|
move_smb2_ea_to_cifs(char *dst, size_t dst_size,
struct smb2_file_full_ea_info *src, size_t src_size,
const unsigned char *ea_name)
{
int rc = 0;
unsigned int ea_name_len = ea_name ? strlen(ea_name) : 0;
char *name, *value;
size_t buf_size = dst_size;
size_t name_len, value_len, user_name_len;
while (src_size > 0) {
name = &src->ea_data[0];
name_len = (size_t)src->ea_name_length;
value = &src->ea_data[src->ea_name_length + 1];
value_len = (size_t)le16_to_cpu(src->ea_value_length);
if (name_len == 0)
break;
if (src_size < 8 + name_len + 1 + value_len) {
cifs_dbg(FYI, "EA entry goes beyond length of list\n");
rc = -EIO;
goto out;
}
if (ea_name) {
if (ea_name_len == name_len &&
memcmp(ea_name, name, name_len) == 0) {
rc = value_len;
if (dst_size == 0)
goto out;
if (dst_size < value_len) {
rc = -ERANGE;
goto out;
}
memcpy(dst, value, value_len);
goto out;
}
} else {
/* 'user.' plus a terminating null */
user_name_len = 5 + 1 + name_len;
if (buf_size == 0) {
/* skip copy - calc size only */
rc += user_name_len;
} else if (dst_size >= user_name_len) {
dst_size -= user_name_len;
memcpy(dst, "user.", 5);
dst += 5;
memcpy(dst, src->ea_data, name_len);
dst += name_len;
*dst = 0;
++dst;
rc += user_name_len;
} else {
/* stop before overrun buffer */
rc = -ERANGE;
break;
}
}
if (!src->next_entry_offset)
break;
if (src_size < le32_to_cpu(src->next_entry_offset)) {
/* stop before overrun buffer */
rc = -ERANGE;
break;
}
src_size -= le32_to_cpu(src->next_entry_offset);
src = (void *)((char *)src +
le32_to_cpu(src->next_entry_offset));
}
/* didn't find the named attribute */
if (ea_name)
rc = -ENODATA;
out:
return (ssize_t)rc;
}
| 0
|
409,456
|
init_term_props(int all)
{
int i;
term_props[TPR_CURSOR_STYLE].tpr_name = "cursor_style";
term_props[TPR_CURSOR_STYLE].tpr_set_by_termresponse = FALSE;
term_props[TPR_CURSOR_BLINK].tpr_name = "cursor_blink_mode";
term_props[TPR_CURSOR_BLINK].tpr_set_by_termresponse = FALSE;
term_props[TPR_UNDERLINE_RGB].tpr_name = "underline_rgb";
term_props[TPR_UNDERLINE_RGB].tpr_set_by_termresponse = TRUE;
term_props[TPR_MOUSE].tpr_name = "mouse";
term_props[TPR_MOUSE].tpr_set_by_termresponse = TRUE;
for (i = 0; i < TPR_COUNT; ++i)
if (all || term_props[i].tpr_set_by_termresponse)
term_props[i].tpr_status = TPR_UNKNOWN;
}
| 0
|
369,264
|
static int io_read(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_rw_state __s, *s = &__s;
struct iovec *iovec;
struct kiocb *kiocb = &req->rw.kiocb;
bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
struct io_async_rw *rw;
ssize_t ret, ret2;
loff_t *ppos;
if (!req_has_async_data(req)) {
ret = io_import_iovec(READ, req, &iovec, s, issue_flags);
if (unlikely(ret < 0))
return ret;
} else {
/*
* Safe and required to re-import if we're using provided
* buffers, as we dropped the selected one before retry.
*/
if (req->flags & REQ_F_BUFFER_SELECT) {
ret = io_import_iovec(READ, req, &iovec, s, issue_flags);
if (unlikely(ret < 0))
return ret;
}
rw = req->async_data;
s = &rw->s;
/*
* We come here from an earlier attempt, restore our state to
* match in case it doesn't. It's cheap enough that we don't
* need to make this conditional.
*/
iov_iter_restore(&s->iter, &s->iter_state);
iovec = NULL;
}
ret = io_rw_init_file(req, FMODE_READ);
if (unlikely(ret)) {
kfree(iovec);
return ret;
}
req->result = iov_iter_count(&s->iter);
if (force_nonblock) {
/* If the file doesn't support async, just async punt */
if (unlikely(!io_file_supports_nowait(req))) {
ret = io_setup_async_rw(req, iovec, s, true);
return ret ?: -EAGAIN;
}
kiocb->ki_flags |= IOCB_NOWAIT;
} else {
/* Ensure we clear previously set non-block flag */
kiocb->ki_flags &= ~IOCB_NOWAIT;
}
ppos = io_kiocb_update_pos(req);
ret = rw_verify_area(READ, req->file, ppos, req->result);
if (unlikely(ret)) {
kfree(iovec);
return ret;
}
ret = io_iter_do_read(req, &s->iter);
if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) {
req->flags &= ~REQ_F_REISSUE;
/* if we can poll, just do that */
if (req->opcode == IORING_OP_READ && file_can_poll(req->file))
return -EAGAIN;
/* IOPOLL retry should happen for io-wq threads */
if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
goto done;
/* no retry on NONBLOCK nor RWF_NOWAIT */
if (req->flags & REQ_F_NOWAIT)
goto done;
ret = 0;
} else if (ret == -EIOCBQUEUED) {
goto out_free;
} else if (ret == req->result || ret <= 0 || !force_nonblock ||
(req->flags & REQ_F_NOWAIT) || !need_read_all(req)) {
/* read all, failed, already did sync or don't want to retry */
goto done;
}
/*
* Don't depend on the iter state matching what was consumed, or being
* untouched in case of error. Restore it and we'll advance it
* manually if we need to.
*/
iov_iter_restore(&s->iter, &s->iter_state);
ret2 = io_setup_async_rw(req, iovec, s, true);
if (ret2)
return ret2;
iovec = NULL;
rw = req->async_data;
s = &rw->s;
/*
* Now use our persistent iterator and state, if we aren't already.
* We've restored and mapped the iter to match.
*/
do {
/*
* We end up here because of a partial read, either from
* above or inside this loop. Advance the iter by the bytes
* that were consumed.
*/
iov_iter_advance(&s->iter, ret);
if (!iov_iter_count(&s->iter))
break;
rw->bytes_done += ret;
iov_iter_save_state(&s->iter, &s->iter_state);
/* if we can retry, do so with the callbacks armed */
if (!io_rw_should_retry(req)) {
kiocb->ki_flags &= ~IOCB_WAITQ;
return -EAGAIN;
}
/*
* Now retry read with the IOCB_WAITQ parts set in the iocb. If
* we get -EIOCBQUEUED, then we'll get a notification when the
* desired page gets unlocked. We can also get a partial read
* here, and if we do, then just retry at the new offset.
*/
ret = io_iter_do_read(req, &s->iter);
if (ret == -EIOCBQUEUED)
return 0;
/* we got some bytes, but not all. retry. */
kiocb->ki_flags &= ~IOCB_WAITQ;
iov_iter_restore(&s->iter, &s->iter_state);
} while (ret > 0);
done:
kiocb_done(req, ret, issue_flags);
out_free:
/* it's faster to check here then delegate to kfree */
if (iovec)
kfree(iovec);
return 0;
}
| 0
|
217,557
|
static MagickBooleanType Get8BIMProperty(const Image *image,const char *key)
{
char
*attribute,
format[MaxTextExtent],
name[MaxTextExtent],
*resource;
const StringInfo
*profile;
const unsigned char
*info;
long
start,
stop;
MagickBooleanType
status;
ssize_t
i;
size_t
length;
ssize_t
count,
id,
sub_number;
/*
There are no newlines in path names, so it's safe as terminator.
*/
profile=GetImageProfile(image,"8bim");
if (profile == (StringInfo *) NULL)
return(MagickFalse);
count=(ssize_t) sscanf(key,"8BIM:%ld,%ld:%1024[^\n]\n%1024[^\n]",&start,&stop,
name,format);
if ((count != 2) && (count != 3) && (count != 4))
return(MagickFalse);
if (count < 4)
(void) CopyMagickString(format,"SVG",MaxTextExtent);
if (count < 3)
*name='\0';
sub_number=1;
if (*name == '#')
sub_number=(ssize_t) StringToLong(&name[1]);
sub_number=MagickMax(sub_number,1L);
resource=(char *) NULL;
status=MagickFalse;
length=GetStringInfoLength(profile);
info=GetStringInfoDatum(profile);
while ((length > 0) && (status == MagickFalse))
{
if (ReadPropertyByte(&info,&length) != (unsigned char) '8')
continue;
if (ReadPropertyByte(&info,&length) != (unsigned char) 'B')
continue;
if (ReadPropertyByte(&info,&length) != (unsigned char) 'I')
continue;
if (ReadPropertyByte(&info,&length) != (unsigned char) 'M')
continue;
id=(ssize_t) ReadPropertyMSBShort(&info,&length);
if (id < (ssize_t) start)
continue;
if (id > (ssize_t) stop)
continue;
if (resource != (char *) NULL)
resource=DestroyString(resource);
count=(ssize_t) ReadPropertyByte(&info,&length);
if ((count != 0) && ((size_t) count <= length))
{
resource=(char *) NULL;
if (~((size_t) count) >= (MaxTextExtent-1))
resource=(char *) AcquireQuantumMemory((size_t) count+MaxTextExtent,
sizeof(*resource));
if (resource != (char *) NULL)
{
for (i=0; i < (ssize_t) count; i++)
resource[i]=(char) ReadPropertyByte(&info,&length);
resource[count]='\0';
}
}
if ((count & 0x01) == 0)
(void) ReadPropertyByte(&info,&length);
count=(ssize_t) ReadPropertyMSBLong(&info,&length);
if ((count < 0) || ((size_t) count > length))
{
length=0;
continue;
}
if ((*name != '\0') && (*name != '#'))
if ((resource == (char *) NULL) || (LocaleCompare(name,resource) != 0))
{
/*
No name match, scroll forward and try next.
*/
info+=count;
length-=MagickMin(count,(ssize_t) length);
continue;
}
if ((*name == '#') && (sub_number != 1))
{
/*
No numbered match, scroll forward and try next.
*/
sub_number--;
info+=count;
length-=MagickMin(count,(ssize_t) length);
continue;
}
/*
We have the resource of interest.
*/
attribute=(char *) NULL;
if (~((size_t) count) >= (MaxTextExtent-1))
attribute=(char *) AcquireQuantumMemory((size_t) count+MaxTextExtent,
sizeof(*attribute));
if (attribute != (char *) NULL)
{
(void) memcpy(attribute,(char *) info,(size_t) count);
attribute[count]='\0';
info+=count;
length-=MagickMin(count,(ssize_t) length);
if ((id <= 1999) || (id >= 2999))
(void) SetImageProperty((Image *) image,key,(const char *) attribute);
else
{
char
*path;
if (LocaleCompare(format,"svg") == 0)
path=TraceSVGClippath((unsigned char *) attribute,(size_t) count,
image->columns,image->rows);
else
path=TracePSClippath((unsigned char *) attribute,(size_t) count,
image->columns,image->rows);
(void) SetImageProperty((Image *) image,key,(const char *) path);
path=DestroyString(path);
}
attribute=DestroyString(attribute);
status=MagickTrue;
}
}
if (resource != (char *) NULL)
resource=DestroyString(resource);
return(status);
}
| 0
|
247,560
|
TestUtilOptions& setExpectedSha256Digest(const std::string& expected_sha256_digest) {
expected_sha256_digest_ = expected_sha256_digest;
return *this;
}
| 0
|
395,078
|
win_update(win_T *wp)
{
buf_T *buf = wp->w_buffer;
int type;
int top_end = 0; // Below last row of the top area that needs
// updating. 0 when no top area updating.
int mid_start = 999;// first row of the mid area that needs
// updating. 999 when no mid area updating.
int mid_end = 0; // Below last row of the mid area that needs
// updating. 0 when no mid area updating.
int bot_start = 999;// first row of the bot area that needs
// updating. 999 when no bot area updating
int scrolled_down = FALSE; // TRUE when scrolled down when
// w_topline got smaller a bit
#ifdef FEAT_SEARCH_EXTRA
int top_to_mod = FALSE; // redraw above mod_top
#endif
int row; // current window row to display
linenr_T lnum; // current buffer lnum to display
int idx; // current index in w_lines[]
int srow; // starting row of the current line
int eof = FALSE; // if TRUE, we hit the end of the file
int didline = FALSE; // if TRUE, we finished the last line
int i;
long j;
static int recursive = FALSE; // being called recursively
linenr_T old_botline = wp->w_botline;
#ifdef FEAT_CONCEAL
int old_wrow = wp->w_wrow;
int old_wcol = wp->w_wcol;
#endif
#ifdef FEAT_FOLDING
long fold_count;
#endif
#ifdef FEAT_SYN_HL
// remember what happened to the previous line, to know if
// check_visual_highlight() can be used
#define DID_NONE 1 // didn't update a line
#define DID_LINE 2 // updated a normal line
#define DID_FOLD 3 // updated a folded line
int did_update = DID_NONE;
linenr_T syntax_last_parsed = 0; // last parsed text line
#endif
linenr_T mod_top = 0;
linenr_T mod_bot = 0;
#if defined(FEAT_SYN_HL) || defined(FEAT_SEARCH_EXTRA)
int save_got_int;
#endif
#ifdef SYN_TIME_LIMIT
proftime_T syntax_tm;
#endif
#if defined(FEAT_SEARCH_EXTRA) || defined(FEAT_CLIPBOARD)
// This needs to be done only for the first window when update_screen() is
// called.
if (!did_update_one_window)
{
did_update_one_window = TRUE;
# ifdef FEAT_SEARCH_EXTRA
start_search_hl();
# endif
# ifdef FEAT_CLIPBOARD
// When Visual area changed, may have to update selection.
if (clip_star.available && clip_isautosel_star())
clip_update_selection(&clip_star);
if (clip_plus.available && clip_isautosel_plus())
clip_update_selection(&clip_plus);
# endif
}
#endif
type = wp->w_redr_type;
if (type == NOT_VALID)
{
wp->w_redr_status = TRUE;
wp->w_lines_valid = 0;
}
// Window frame is zero-height: nothing to draw.
if (wp->w_height + WINBAR_HEIGHT(wp) == 0
|| (wp->w_frame->fr_height == wp->w_status_height
#if defined(FEAT_PROP_POPUP)
&& !popup_is_popup(wp)
#endif
))
{
wp->w_redr_type = 0;
return;
}
// Window is zero-width: Only need to draw the separator.
if (wp->w_width == 0)
{
// draw the vertical separator right of this window
draw_vsep_win(wp, 0);
wp->w_redr_type = 0;
return;
}
#ifdef FEAT_TERMINAL
// If this window contains a terminal, redraw works completely differently.
if (term_do_update_window(wp))
{
term_update_window(wp);
# ifdef FEAT_MENU
// Draw the window toolbar, if there is one.
if (winbar_height(wp) > 0)
redraw_win_toolbar(wp);
# endif
wp->w_redr_type = 0;
return;
}
#endif
#ifdef FEAT_SEARCH_EXTRA
init_search_hl(wp, &screen_search_hl);
#endif
#ifdef FEAT_LINEBREAK
// Force redraw when width of 'number' or 'relativenumber' column
// changes.
i = (wp->w_p_nu || wp->w_p_rnu) ? number_width(wp) : 0;
if (wp->w_nrwidth != i)
{
type = NOT_VALID;
wp->w_nrwidth = i;
}
else
#endif
if (buf->b_mod_set && buf->b_mod_xlines != 0 && wp->w_redraw_top != 0)
{
// When there are both inserted/deleted lines and specific lines to be
// redrawn, w_redraw_top and w_redraw_bot may be invalid, just redraw
// everything (only happens when redrawing is off for while).
type = NOT_VALID;
}
else
{
// Set mod_top to the first line that needs displaying because of
// changes. Set mod_bot to the first line after the changes.
mod_top = wp->w_redraw_top;
if (wp->w_redraw_bot != 0)
mod_bot = wp->w_redraw_bot + 1;
else
mod_bot = 0;
if (buf->b_mod_set)
{
if (mod_top == 0 || mod_top > buf->b_mod_top)
{
mod_top = buf->b_mod_top;
#ifdef FEAT_SYN_HL
// Need to redraw lines above the change that may be included
// in a pattern match.
if (syntax_present(wp))
{
mod_top -= buf->b_s.b_syn_sync_linebreaks;
if (mod_top < 1)
mod_top = 1;
}
#endif
}
if (mod_bot == 0 || mod_bot < buf->b_mod_bot)
mod_bot = buf->b_mod_bot;
#ifdef FEAT_SEARCH_EXTRA
// When 'hlsearch' is on and using a multi-line search pattern, a
// change in one line may make the Search highlighting in a
// previous line invalid. Simple solution: redraw all visible
// lines above the change.
// Same for a match pattern.
if (screen_search_hl.rm.regprog != NULL
&& re_multiline(screen_search_hl.rm.regprog))
top_to_mod = TRUE;
else
{
matchitem_T *cur = wp->w_match_head;
while (cur != NULL)
{
if (cur->match.regprog != NULL
&& re_multiline(cur->match.regprog))
{
top_to_mod = TRUE;
break;
}
cur = cur->next;
}
}
#endif
}
#ifdef FEAT_FOLDING
if (mod_top != 0 && hasAnyFolding(wp))
{
linenr_T lnumt, lnumb;
// A change in a line can cause lines above it to become folded or
// unfolded. Find the top most buffer line that may be affected.
// If the line was previously folded and displayed, get the first
// line of that fold. If the line is folded now, get the first
// folded line. Use the minimum of these two.
// Find last valid w_lines[] entry above mod_top. Set lnumt to
// the line below it. If there is no valid entry, use w_topline.
// Find the first valid w_lines[] entry below mod_bot. Set lnumb
// to this line. If there is no valid entry, use MAXLNUM.
lnumt = wp->w_topline;
lnumb = MAXLNUM;
for (i = 0; i < wp->w_lines_valid; ++i)
if (wp->w_lines[i].wl_valid)
{
if (wp->w_lines[i].wl_lastlnum < mod_top)
lnumt = wp->w_lines[i].wl_lastlnum + 1;
if (lnumb == MAXLNUM && wp->w_lines[i].wl_lnum >= mod_bot)
{
lnumb = wp->w_lines[i].wl_lnum;
// When there is a fold column it might need updating
// in the next line ("J" just above an open fold).
if (compute_foldcolumn(wp, 0) > 0)
++lnumb;
}
}
(void)hasFoldingWin(wp, mod_top, &mod_top, NULL, TRUE, NULL);
if (mod_top > lnumt)
mod_top = lnumt;
// Now do the same for the bottom line (one above mod_bot).
--mod_bot;
(void)hasFoldingWin(wp, mod_bot, NULL, &mod_bot, TRUE, NULL);
++mod_bot;
if (mod_bot < lnumb)
mod_bot = lnumb;
}
#endif
// When a change starts above w_topline and the end is below
// w_topline, start redrawing at w_topline.
// If the end of the change is above w_topline: do like no change was
// made, but redraw the first line to find changes in syntax.
if (mod_top != 0 && mod_top < wp->w_topline)
{
if (mod_bot > wp->w_topline)
mod_top = wp->w_topline;
#ifdef FEAT_SYN_HL
else if (syntax_present(wp))
top_end = 1;
#endif
}
// When line numbers are displayed need to redraw all lines below
// inserted/deleted lines.
if (mod_top != 0 && buf->b_mod_xlines != 0 && wp->w_p_nu)
mod_bot = MAXLNUM;
}
wp->w_redraw_top = 0; // reset for next time
wp->w_redraw_bot = 0;
// When only displaying the lines at the top, set top_end. Used when
// window has scrolled down for msg_scrolled.
if (type == REDRAW_TOP)
{
j = 0;
for (i = 0; i < wp->w_lines_valid; ++i)
{
j += wp->w_lines[i].wl_size;
if (j >= wp->w_upd_rows)
{
top_end = j;
break;
}
}
if (top_end == 0)
// not found (cannot happen?): redraw everything
type = NOT_VALID;
else
// top area defined, the rest is VALID
type = VALID;
}
// Trick: we want to avoid clearing the screen twice. screenclear() will
// set "screen_cleared" to TRUE. The special value MAYBE (which is still
// non-zero and thus not FALSE) will indicate that screenclear() was not
// called.
if (screen_cleared)
screen_cleared = MAYBE;
// If there are no changes on the screen that require a complete redraw,
// handle three cases:
// 1: we are off the top of the screen by a few lines: scroll down
// 2: wp->w_topline is below wp->w_lines[0].wl_lnum: may scroll up
// 3: wp->w_topline is wp->w_lines[0].wl_lnum: find first entry in
// w_lines[] that needs updating.
if ((type == VALID || type == SOME_VALID
|| type == INVERTED || type == INVERTED_ALL)
#ifdef FEAT_DIFF
&& !wp->w_botfill && !wp->w_old_botfill
#endif
)
{
if (mod_top != 0
&& wp->w_topline == mod_top
&& (!wp->w_lines[0].wl_valid
|| wp->w_topline <= wp->w_lines[0].wl_lnum))
{
// w_topline is the first changed line and window is not scrolled,
// the scrolling from changed lines will be done further down.
}
else if (wp->w_lines[0].wl_valid
&& (wp->w_topline < wp->w_lines[0].wl_lnum
#ifdef FEAT_DIFF
|| (wp->w_topline == wp->w_lines[0].wl_lnum
&& wp->w_topfill > wp->w_old_topfill)
#endif
))
{
// New topline is above old topline: May scroll down.
#ifdef FEAT_FOLDING
if (hasAnyFolding(wp))
{
linenr_T ln;
// count the number of lines we are off, counting a sequence
// of folded lines as one
j = 0;
for (ln = wp->w_topline; ln < wp->w_lines[0].wl_lnum; ++ln)
{
++j;
if (j >= wp->w_height - 2)
break;
(void)hasFoldingWin(wp, ln, NULL, &ln, TRUE, NULL);
}
}
else
#endif
j = wp->w_lines[0].wl_lnum - wp->w_topline;
if (j < wp->w_height - 2) // not too far off
{
i = plines_m_win(wp, wp->w_topline, wp->w_lines[0].wl_lnum - 1);
#ifdef FEAT_DIFF
// insert extra lines for previously invisible filler lines
if (wp->w_lines[0].wl_lnum != wp->w_topline)
i += diff_check_fill(wp, wp->w_lines[0].wl_lnum)
- wp->w_old_topfill;
#endif
if (i < wp->w_height - 2) // less than a screen off
{
// Try to insert the correct number of lines.
// If not the last window, delete the lines at the bottom.
// win_ins_lines may fail when the terminal can't do it.
if (i > 0)
check_for_delay(FALSE);
if (win_ins_lines(wp, 0, i, FALSE, wp == firstwin) == OK)
{
if (wp->w_lines_valid != 0)
{
// Need to update rows that are new, stop at the
// first one that scrolled down.
top_end = i;
scrolled_down = TRUE;
// Move the entries that were scrolled, disable
// the entries for the lines to be redrawn.
if ((wp->w_lines_valid += j) > wp->w_height)
wp->w_lines_valid = wp->w_height;
for (idx = wp->w_lines_valid; idx - j >= 0; idx--)
wp->w_lines[idx] = wp->w_lines[idx - j];
while (idx >= 0)
wp->w_lines[idx--].wl_valid = FALSE;
}
}
else
mid_start = 0; // redraw all lines
}
else
mid_start = 0; // redraw all lines
}
else
mid_start = 0; // redraw all lines
}
else
{
// New topline is at or below old topline: May scroll up.
// When topline didn't change, find first entry in w_lines[] that
// needs updating.
// try to find wp->w_topline in wp->w_lines[].wl_lnum
j = -1;
row = 0;
for (i = 0; i < wp->w_lines_valid; i++)
{
if (wp->w_lines[i].wl_valid
&& wp->w_lines[i].wl_lnum == wp->w_topline)
{
j = i;
break;
}
row += wp->w_lines[i].wl_size;
}
if (j == -1)
{
// if wp->w_topline is not in wp->w_lines[].wl_lnum redraw all
// lines
mid_start = 0;
}
else
{
// Try to delete the correct number of lines.
// wp->w_topline is at wp->w_lines[i].wl_lnum.
#ifdef FEAT_DIFF
// If the topline didn't change, delete old filler lines,
// otherwise delete filler lines of the new topline...
if (wp->w_lines[0].wl_lnum == wp->w_topline)
row += wp->w_old_topfill;
else
row += diff_check_fill(wp, wp->w_topline);
// ... but don't delete new filler lines.
row -= wp->w_topfill;
#endif
if (row > 0)
{
check_for_delay(FALSE);
if (win_del_lines(wp, 0, row, FALSE, wp == firstwin, 0)
== OK)
bot_start = wp->w_height - row;
else
mid_start = 0; // redraw all lines
}
if ((row == 0 || bot_start < 999) && wp->w_lines_valid != 0)
{
// Skip the lines (below the deleted lines) that are still
// valid and don't need redrawing. Copy their info
// upwards, to compensate for the deleted lines. Set
// bot_start to the first row that needs redrawing.
bot_start = 0;
idx = 0;
for (;;)
{
wp->w_lines[idx] = wp->w_lines[j];
// stop at line that didn't fit, unless it is still
// valid (no lines deleted)
if (row > 0 && bot_start + row
+ (int)wp->w_lines[j].wl_size > wp->w_height)
{
wp->w_lines_valid = idx + 1;
break;
}
bot_start += wp->w_lines[idx++].wl_size;
// stop at the last valid entry in w_lines[].wl_size
if (++j >= wp->w_lines_valid)
{
wp->w_lines_valid = idx;
break;
}
}
#ifdef FEAT_DIFF
// Correct the first entry for filler lines at the top
// when it won't get updated below.
if (wp->w_p_diff && bot_start > 0)
wp->w_lines[0].wl_size =
plines_win_nofill(wp, wp->w_topline, TRUE)
+ wp->w_topfill;
#endif
}
}
}
// When starting redraw in the first line, redraw all lines. When
// there is only one window it's probably faster to clear the screen
// first.
if (mid_start == 0)
{
mid_end = wp->w_height;
if (ONE_WINDOW && !WIN_IS_POPUP(wp))
{
// Clear the screen when it was not done by win_del_lines() or
// win_ins_lines() above, "screen_cleared" is FALSE or MAYBE
// then.
if (screen_cleared != TRUE)
screenclear();
// The screen was cleared, redraw the tab pages line.
if (redraw_tabline)
draw_tabline();
}
}
// When win_del_lines() or win_ins_lines() caused the screen to be
// cleared (only happens for the first window) or when screenclear()
// was called directly above, "must_redraw" will have been set to
// NOT_VALID, need to reset it here to avoid redrawing twice.
if (screen_cleared == TRUE)
must_redraw = 0;
}
else
{
// Not VALID or INVERTED: redraw all lines.
mid_start = 0;
mid_end = wp->w_height;
}
if (type == SOME_VALID)
{
// SOME_VALID: redraw all lines.
mid_start = 0;
mid_end = wp->w_height;
type = NOT_VALID;
}
// check if we are updating or removing the inverted part
if ((VIsual_active && buf == curwin->w_buffer)
|| (wp->w_old_cursor_lnum != 0 && type != NOT_VALID))
{
linenr_T from, to;
if (VIsual_active)
{
if (VIsual_active
&& (VIsual_mode != wp->w_old_visual_mode
|| type == INVERTED_ALL))
{
// If the type of Visual selection changed, redraw the whole
// selection. Also when the ownership of the X selection is
// gained or lost.
if (curwin->w_cursor.lnum < VIsual.lnum)
{
from = curwin->w_cursor.lnum;
to = VIsual.lnum;
}
else
{
from = VIsual.lnum;
to = curwin->w_cursor.lnum;
}
// redraw more when the cursor moved as well
if (wp->w_old_cursor_lnum < from)
from = wp->w_old_cursor_lnum;
if (wp->w_old_cursor_lnum > to)
to = wp->w_old_cursor_lnum;
if (wp->w_old_visual_lnum < from)
from = wp->w_old_visual_lnum;
if (wp->w_old_visual_lnum > to)
to = wp->w_old_visual_lnum;
}
else
{
// Find the line numbers that need to be updated: The lines
// between the old cursor position and the current cursor
// position. Also check if the Visual position changed.
if (curwin->w_cursor.lnum < wp->w_old_cursor_lnum)
{
from = curwin->w_cursor.lnum;
to = wp->w_old_cursor_lnum;
}
else
{
from = wp->w_old_cursor_lnum;
to = curwin->w_cursor.lnum;
if (from == 0) // Visual mode just started
from = to;
}
if (VIsual.lnum != wp->w_old_visual_lnum
|| VIsual.col != wp->w_old_visual_col)
{
if (wp->w_old_visual_lnum < from
&& wp->w_old_visual_lnum != 0)
from = wp->w_old_visual_lnum;
if (wp->w_old_visual_lnum > to)
to = wp->w_old_visual_lnum;
if (VIsual.lnum < from)
from = VIsual.lnum;
if (VIsual.lnum > to)
to = VIsual.lnum;
}
}
// If in block mode and changed column or curwin->w_curswant:
// update all lines.
// First compute the actual start and end column.
if (VIsual_mode == Ctrl_V)
{
colnr_T fromc, toc;
#if defined(FEAT_LINEBREAK)
int save_ve_flags = curwin->w_ve_flags;
if (curwin->w_p_lbr)
curwin->w_ve_flags = VE_ALL;
#endif
getvcols(wp, &VIsual, &curwin->w_cursor, &fromc, &toc);
++toc;
#if defined(FEAT_LINEBREAK)
curwin->w_ve_flags = save_ve_flags;
#endif
// Highlight to the end of the line, unless 'virtualedit' has
// "block".
if (curwin->w_curswant == MAXCOL)
{
if (get_ve_flags() & VE_BLOCK)
{
pos_T pos;
int cursor_above =
curwin->w_cursor.lnum < VIsual.lnum;
// Need to find the longest line.
toc = 0;
pos.coladd = 0;
for (pos.lnum = curwin->w_cursor.lnum; cursor_above
? pos.lnum <= VIsual.lnum
: pos.lnum >= VIsual.lnum;
pos.lnum += cursor_above ? 1 : -1)
{
colnr_T t;
pos.col = (int)STRLEN(ml_get_buf(wp->w_buffer,
pos.lnum, FALSE));
getvvcol(wp, &pos, NULL, NULL, &t);
if (toc < t)
toc = t;
}
++toc;
}
else
toc = MAXCOL;
}
if (fromc != wp->w_old_cursor_fcol
|| toc != wp->w_old_cursor_lcol)
{
if (from > VIsual.lnum)
from = VIsual.lnum;
if (to < VIsual.lnum)
to = VIsual.lnum;
}
wp->w_old_cursor_fcol = fromc;
wp->w_old_cursor_lcol = toc;
}
}
else
{
// Use the line numbers of the old Visual area.
if (wp->w_old_cursor_lnum < wp->w_old_visual_lnum)
{
from = wp->w_old_cursor_lnum;
to = wp->w_old_visual_lnum;
}
else
{
from = wp->w_old_visual_lnum;
to = wp->w_old_cursor_lnum;
}
}
// There is no need to update lines above the top of the window.
if (from < wp->w_topline)
from = wp->w_topline;
// If we know the value of w_botline, use it to restrict the update to
// the lines that are visible in the window.
if (wp->w_valid & VALID_BOTLINE)
{
if (from >= wp->w_botline)
from = wp->w_botline - 1;
if (to >= wp->w_botline)
to = wp->w_botline - 1;
}
// Find the minimal part to be updated.
// Watch out for scrolling that made entries in w_lines[] invalid.
// E.g., CTRL-U makes the first half of w_lines[] invalid and sets
// top_end; need to redraw from top_end to the "to" line.
// A middle mouse click with a Visual selection may change the text
// above the Visual area and reset wl_valid, do count these for
// mid_end (in srow).
if (mid_start > 0)
{
lnum = wp->w_topline;
idx = 0;
srow = 0;
if (scrolled_down)
mid_start = top_end;
else
mid_start = 0;
while (lnum < from && idx < wp->w_lines_valid) // find start
{
if (wp->w_lines[idx].wl_valid)
mid_start += wp->w_lines[idx].wl_size;
else if (!scrolled_down)
srow += wp->w_lines[idx].wl_size;
++idx;
# ifdef FEAT_FOLDING
if (idx < wp->w_lines_valid && wp->w_lines[idx].wl_valid)
lnum = wp->w_lines[idx].wl_lnum;
else
# endif
++lnum;
}
srow += mid_start;
mid_end = wp->w_height;
for ( ; idx < wp->w_lines_valid; ++idx) // find end
{
if (wp->w_lines[idx].wl_valid
&& wp->w_lines[idx].wl_lnum >= to + 1)
{
// Only update until first row of this line
mid_end = srow;
break;
}
srow += wp->w_lines[idx].wl_size;
}
}
}
if (VIsual_active && buf == curwin->w_buffer)
{
wp->w_old_visual_mode = VIsual_mode;
wp->w_old_cursor_lnum = curwin->w_cursor.lnum;
wp->w_old_visual_lnum = VIsual.lnum;
wp->w_old_visual_col = VIsual.col;
wp->w_old_curswant = curwin->w_curswant;
}
else
{
wp->w_old_visual_mode = 0;
wp->w_old_cursor_lnum = 0;
wp->w_old_visual_lnum = 0;
wp->w_old_visual_col = 0;
}
#if defined(FEAT_SYN_HL) || defined(FEAT_SEARCH_EXTRA)
// reset got_int, otherwise regexp won't work
save_got_int = got_int;
got_int = 0;
#endif
#ifdef SYN_TIME_LIMIT
// Set the time limit to 'redrawtime'.
profile_setlimit(p_rdt, &syntax_tm);
syn_set_timeout(&syntax_tm);
#endif
#ifdef FEAT_FOLDING
win_foldinfo.fi_level = 0;
#endif
#ifdef FEAT_MENU
// Draw the window toolbar, if there is one.
// TODO: only when needed.
if (winbar_height(wp) > 0)
redraw_win_toolbar(wp);
#endif
// Update all the window rows.
idx = 0; // first entry in w_lines[].wl_size
row = 0;
srow = 0;
lnum = wp->w_topline; // first line shown in window
for (;;)
{
// stop updating when reached the end of the window (check for _past_
// the end of the window is at the end of the loop)
if (row == wp->w_height)
{
didline = TRUE;
break;
}
// stop updating when hit the end of the file
if (lnum > buf->b_ml.ml_line_count)
{
eof = TRUE;
break;
}
// Remember the starting row of the line that is going to be dealt
// with. It is used further down when the line doesn't fit.
srow = row;
// Update a line when it is in an area that needs updating, when it
// has changes or w_lines[idx] is invalid.
// "bot_start" may be halfway a wrapped line after using
// win_del_lines(), check if the current line includes it.
// When syntax folding is being used, the saved syntax states will
// already have been updated, we can't see where the syntax state is
// the same again, just update until the end of the window.
if (row < top_end
|| (row >= mid_start && row < mid_end)
#ifdef FEAT_SEARCH_EXTRA
|| top_to_mod
#endif
|| idx >= wp->w_lines_valid
|| (row + wp->w_lines[idx].wl_size > bot_start)
|| (mod_top != 0
&& (lnum == mod_top
|| (lnum >= mod_top
&& (lnum < mod_bot
#ifdef FEAT_SYN_HL
|| did_update == DID_FOLD
|| (did_update == DID_LINE
&& syntax_present(wp)
&& (
# ifdef FEAT_FOLDING
(foldmethodIsSyntax(wp)
&& hasAnyFolding(wp)) ||
# endif
syntax_check_changed(lnum)))
#endif
#ifdef FEAT_SEARCH_EXTRA
// match in fixed position might need redraw
// if lines were inserted or deleted
|| (wp->w_match_head != NULL
&& buf->b_mod_xlines != 0)
#endif
))))
#ifdef FEAT_SYN_HL
|| (wp->w_p_cul && (lnum == wp->w_cursor.lnum
|| lnum == wp->w_last_cursorline))
#endif
)
{
#ifdef FEAT_SEARCH_EXTRA
if (lnum == mod_top)
top_to_mod = FALSE;
#endif
// When at start of changed lines: May scroll following lines
// up or down to minimize redrawing.
// Don't do this when the change continues until the end.
// Don't scroll when dollar_vcol >= 0, keep the "$".
// Don't scroll when redrawing the top, scrolled already above.
if (lnum == mod_top
&& mod_bot != MAXLNUM
&& !(dollar_vcol >= 0 && mod_bot == mod_top + 1)
&& row >= top_end)
{
int old_rows = 0;
int new_rows = 0;
int xtra_rows;
linenr_T l;
// Count the old number of window rows, using w_lines[], which
// should still contain the sizes for the lines as they are
// currently displayed.
for (i = idx; i < wp->w_lines_valid; ++i)
{
// Only valid lines have a meaningful wl_lnum. Invalid
// lines are part of the changed area.
if (wp->w_lines[i].wl_valid
&& wp->w_lines[i].wl_lnum == mod_bot)
break;
old_rows += wp->w_lines[i].wl_size;
#ifdef FEAT_FOLDING
if (wp->w_lines[i].wl_valid
&& wp->w_lines[i].wl_lastlnum + 1 == mod_bot)
{
// Must have found the last valid entry above mod_bot.
// Add following invalid entries.
++i;
while (i < wp->w_lines_valid
&& !wp->w_lines[i].wl_valid)
old_rows += wp->w_lines[i++].wl_size;
break;
}
#endif
}
if (i >= wp->w_lines_valid)
{
// We can't find a valid line below the changed lines,
// need to redraw until the end of the window.
// Inserting/deleting lines has no use.
bot_start = 0;
}
else
{
// Able to count old number of rows: Count new window
// rows, and may insert/delete lines
j = idx;
for (l = lnum; l < mod_bot; ++l)
{
#ifdef FEAT_FOLDING
if (hasFoldingWin(wp, l, NULL, &l, TRUE, NULL))
++new_rows;
else
#endif
#ifdef FEAT_DIFF
if (l == wp->w_topline)
new_rows += plines_win_nofill(wp, l, TRUE)
+ wp->w_topfill;
else
#endif
new_rows += plines_win(wp, l, TRUE);
++j;
if (new_rows > wp->w_height - row - 2)
{
// it's getting too much, must redraw the rest
new_rows = 9999;
break;
}
}
xtra_rows = new_rows - old_rows;
if (xtra_rows < 0)
{
// May scroll text up. If there is not enough
// remaining text or scrolling fails, must redraw the
// rest. If scrolling works, must redraw the text
// below the scrolled text.
if (row - xtra_rows >= wp->w_height - 2)
mod_bot = MAXLNUM;
else
{
check_for_delay(FALSE);
if (win_del_lines(wp, row,
-xtra_rows, FALSE, FALSE, 0) == FAIL)
mod_bot = MAXLNUM;
else
bot_start = wp->w_height + xtra_rows;
}
}
else if (xtra_rows > 0)
{
// May scroll text down. If there is not enough
// remaining text of scrolling fails, must redraw the
// rest.
if (row + xtra_rows >= wp->w_height - 2)
mod_bot = MAXLNUM;
else
{
check_for_delay(FALSE);
if (win_ins_lines(wp, row + old_rows,
xtra_rows, FALSE, FALSE) == FAIL)
mod_bot = MAXLNUM;
else if (top_end > row + old_rows)
// Scrolled the part at the top that requires
// updating down.
top_end += xtra_rows;
}
}
// When not updating the rest, may need to move w_lines[]
// entries.
if (mod_bot != MAXLNUM && i != j)
{
if (j < i)
{
int x = row + new_rows;
// move entries in w_lines[] upwards
for (;;)
{
// stop at last valid entry in w_lines[]
if (i >= wp->w_lines_valid)
{
wp->w_lines_valid = j;
break;
}
wp->w_lines[j] = wp->w_lines[i];
// stop at a line that won't fit
if (x + (int)wp->w_lines[j].wl_size
> wp->w_height)
{
wp->w_lines_valid = j + 1;
break;
}
x += wp->w_lines[j++].wl_size;
++i;
}
if (bot_start > x)
bot_start = x;
}
else // j > i
{
// move entries in w_lines[] downwards
j -= i;
wp->w_lines_valid += j;
if (wp->w_lines_valid > wp->w_height)
wp->w_lines_valid = wp->w_height;
for (i = wp->w_lines_valid; i - j >= idx; --i)
wp->w_lines[i] = wp->w_lines[i - j];
// The w_lines[] entries for inserted lines are
// now invalid, but wl_size may be used above.
// Reset to zero.
while (i >= idx)
{
wp->w_lines[i].wl_size = 0;
wp->w_lines[i--].wl_valid = FALSE;
}
}
}
}
}
#ifdef FEAT_FOLDING
// When lines are folded, display one line for all of them.
// Otherwise, display normally (can be several display lines when
// 'wrap' is on).
fold_count = foldedCount(wp, lnum, &win_foldinfo);
if (fold_count != 0)
{
fold_line(wp, fold_count, &win_foldinfo, lnum, row);
++row;
--fold_count;
wp->w_lines[idx].wl_folded = TRUE;
wp->w_lines[idx].wl_lastlnum = lnum + fold_count;
# ifdef FEAT_SYN_HL
did_update = DID_FOLD;
# endif
}
else
#endif
if (idx < wp->w_lines_valid
&& wp->w_lines[idx].wl_valid
&& wp->w_lines[idx].wl_lnum == lnum
&& lnum > wp->w_topline
&& !(dy_flags & (DY_LASTLINE | DY_TRUNCATE))
&& !WIN_IS_POPUP(wp)
&& srow + wp->w_lines[idx].wl_size > wp->w_height
#ifdef FEAT_DIFF
&& diff_check_fill(wp, lnum) == 0
#endif
)
{
// This line is not going to fit. Don't draw anything here,
// will draw "@ " lines below.
row = wp->w_height + 1;
}
else
{
#ifdef FEAT_SEARCH_EXTRA
prepare_search_hl(wp, &screen_search_hl, lnum);
#endif
#ifdef FEAT_SYN_HL
// Let the syntax stuff know we skipped a few lines.
if (syntax_last_parsed != 0 && syntax_last_parsed + 1 < lnum
&& syntax_present(wp))
syntax_end_parsing(syntax_last_parsed + 1);
#endif
// Display one line.
row = win_line(wp, lnum, srow, wp->w_height,
mod_top == 0, FALSE);
#ifdef FEAT_FOLDING
wp->w_lines[idx].wl_folded = FALSE;
wp->w_lines[idx].wl_lastlnum = lnum;
#endif
#ifdef FEAT_SYN_HL
did_update = DID_LINE;
syntax_last_parsed = lnum;
#endif
}
wp->w_lines[idx].wl_lnum = lnum;
wp->w_lines[idx].wl_valid = TRUE;
// Past end of the window or end of the screen. Note that after
// resizing wp->w_height may be end up too big. That's a problem
// elsewhere, but prevent a crash here.
if (row > wp->w_height || row + wp->w_winrow >= Rows)
{
// we may need the size of that too long line later on
if (dollar_vcol == -1)
wp->w_lines[idx].wl_size = plines_win(wp, lnum, TRUE);
++idx;
break;
}
if (dollar_vcol == -1)
wp->w_lines[idx].wl_size = row - srow;
++idx;
#ifdef FEAT_FOLDING
lnum += fold_count + 1;
#else
++lnum;
#endif
}
else
{
if (wp->w_p_rnu)
{
#ifdef FEAT_FOLDING
// 'relativenumber' set: The text doesn't need to be drawn, but
// the number column nearly always does.
fold_count = foldedCount(wp, lnum, &win_foldinfo);
if (fold_count != 0)
fold_line(wp, fold_count, &win_foldinfo, lnum, row);
else
#endif
(void)win_line(wp, lnum, srow, wp->w_height, TRUE, TRUE);
}
// This line does not need to be drawn, advance to the next one.
row += wp->w_lines[idx++].wl_size;
if (row > wp->w_height) // past end of screen
break;
#ifdef FEAT_FOLDING
lnum = wp->w_lines[idx - 1].wl_lastlnum + 1;
#else
++lnum;
#endif
#ifdef FEAT_SYN_HL
did_update = DID_NONE;
#endif
}
if (lnum > buf->b_ml.ml_line_count)
{
eof = TRUE;
break;
}
}
// End of loop over all window lines.
#ifdef FEAT_VTP
// Rewrite the character at the end of the screen line.
// See the version that was fixed.
if (use_vtp() && get_conpty_fix_type() < 1)
{
int i;
for (i = 0; i < Rows; ++i)
if (enc_utf8)
if ((*mb_off2cells)(LineOffset[i] + Columns - 2,
LineOffset[i] + screen_Columns) > 1)
screen_draw_rectangle(i, Columns - 2, 1, 2, FALSE);
else
screen_draw_rectangle(i, Columns - 1, 1, 1, FALSE);
else
screen_char(LineOffset[i] + Columns - 1, i, Columns - 1);
}
#endif
if (idx > wp->w_lines_valid)
wp->w_lines_valid = idx;
#ifdef FEAT_SYN_HL
// Let the syntax stuff know we stop parsing here.
if (syntax_last_parsed != 0 && syntax_present(wp))
syntax_end_parsing(syntax_last_parsed + 1);
#endif
// If we didn't hit the end of the file, and we didn't finish the last
// line we were working on, then the line didn't fit.
wp->w_empty_rows = 0;
#ifdef FEAT_DIFF
wp->w_filler_rows = 0;
#endif
if (!eof && !didline)
{
if (lnum == wp->w_topline)
{
// Single line that does not fit!
// Don't overwrite it, it can be edited.
wp->w_botline = lnum + 1;
}
#ifdef FEAT_DIFF
else if (diff_check_fill(wp, lnum) >= wp->w_height - srow)
{
// Window ends in filler lines.
wp->w_botline = lnum;
wp->w_filler_rows = wp->w_height - srow;
}
#endif
#ifdef FEAT_PROP_POPUP
else if (WIN_IS_POPUP(wp))
{
// popup line that doesn't fit is left as-is
wp->w_botline = lnum;
}
#endif
else if (dy_flags & DY_TRUNCATE) // 'display' has "truncate"
{
int scr_row = W_WINROW(wp) + wp->w_height - 1;
// Last line isn't finished: Display "@@@" in the last screen line.
screen_puts_len((char_u *)"@@", 2, scr_row, wp->w_wincol,
HL_ATTR(HLF_AT));
screen_fill(scr_row, scr_row + 1,
(int)wp->w_wincol + 2, (int)W_ENDCOL(wp),
'@', ' ', HL_ATTR(HLF_AT));
set_empty_rows(wp, srow);
wp->w_botline = lnum;
}
else if (dy_flags & DY_LASTLINE) // 'display' has "lastline"
{
// Last line isn't finished: Display "@@@" at the end.
screen_fill(W_WINROW(wp) + wp->w_height - 1,
W_WINROW(wp) + wp->w_height,
(int)W_ENDCOL(wp) - 3, (int)W_ENDCOL(wp),
'@', '@', HL_ATTR(HLF_AT));
set_empty_rows(wp, srow);
wp->w_botline = lnum;
}
else
{
win_draw_end(wp, '@', ' ', TRUE, srow, wp->w_height, HLF_AT);
wp->w_botline = lnum;
}
}
else
{
draw_vsep_win(wp, row);
if (eof) // we hit the end of the file
{
wp->w_botline = buf->b_ml.ml_line_count + 1;
#ifdef FEAT_DIFF
j = diff_check_fill(wp, wp->w_botline);
if (j > 0 && !wp->w_botfill)
{
// Display filler lines at the end of the file.
if (char2cells(fill_diff) > 1)
i = '-';
else
i = fill_diff;
if (row + j > wp->w_height)
j = wp->w_height - row;
win_draw_end(wp, i, i, TRUE, row, row + (int)j, HLF_DED);
row += j;
}
#endif
}
else if (dollar_vcol == -1)
wp->w_botline = lnum;
// Make sure the rest of the screen is blank
// write the 'fill_eob' character to rows that aren't part of the file
if (WIN_IS_POPUP(wp))
win_draw_end(wp, ' ', ' ', FALSE, row, wp->w_height, HLF_AT);
else
win_draw_end(wp, fill_eob, ' ', FALSE, row, wp->w_height, HLF_EOB);
}
#ifdef SYN_TIME_LIMIT
syn_set_timeout(NULL);
#endif
// Reset the type of redrawing required, the window has been updated.
wp->w_redr_type = 0;
#ifdef FEAT_DIFF
wp->w_old_topfill = wp->w_topfill;
wp->w_old_botfill = wp->w_botfill;
#endif
if (dollar_vcol == -1)
{
// There is a trick with w_botline. If we invalidate it on each
// change that might modify it, this will cause a lot of expensive
// calls to plines() in update_topline() each time. Therefore the
// value of w_botline is often approximated, and this value is used to
// compute the value of w_topline. If the value of w_botline was
// wrong, check that the value of w_topline is correct (cursor is on
// the visible part of the text). If it's not, we need to redraw
// again. Mostly this just means scrolling up a few lines, so it
// doesn't look too bad. Only do this for the current window (where
// changes are relevant).
wp->w_valid |= VALID_BOTLINE;
if (wp == curwin && wp->w_botline != old_botline && !recursive)
{
win_T *wwp;
#if defined(FEAT_CONCEAL)
linenr_T old_topline = wp->w_topline;
int new_wcol = wp->w_wcol;
#endif
recursive = TRUE;
curwin->w_valid &= ~VALID_TOPLINE;
update_topline(); // may invalidate w_botline again
#if defined(FEAT_CONCEAL)
if (old_wcol != new_wcol && (wp->w_valid & (VALID_WCOL|VALID_WROW))
!= (VALID_WCOL|VALID_WROW))
{
// A win_line() call applied a fix to screen cursor column to
// accommodate concealment of cursor line, but in this call to
// update_topline() the cursor's row or column got invalidated.
// If they are left invalid, setcursor() will recompute them
// but there won't be any further win_line() call to re-fix the
// column and the cursor will end up misplaced. So we call
// cursor validation now and reapply the fix again (or call
// win_line() to do it for us).
validate_cursor();
if (wp->w_wcol == old_wcol && wp->w_wrow == old_wrow
&& old_topline == wp->w_topline)
wp->w_wcol = new_wcol;
else
redrawWinline(wp, wp->w_cursor.lnum);
}
#endif
// New redraw either due to updated topline or due to wcol fix.
if (wp->w_redr_type != 0)
{
// Don't update for changes in buffer again.
i = curbuf->b_mod_set;
curbuf->b_mod_set = FALSE;
j = curbuf->b_mod_xlines;
curbuf->b_mod_xlines = 0;
win_update(curwin);
curbuf->b_mod_set = i;
curbuf->b_mod_xlines = j;
}
// Other windows might have w_redr_type raised in update_topline().
must_redraw = 0;
FOR_ALL_WINDOWS(wwp)
if (wwp->w_redr_type > must_redraw)
must_redraw = wwp->w_redr_type;
recursive = FALSE;
}
}
#if defined(FEAT_SYN_HL) || defined(FEAT_SEARCH_EXTRA)
// restore got_int, unless CTRL-C was hit while redrawing
if (!got_int)
got_int = save_got_int;
#endif
}
| 0
|
417,070
|
void PlayerGeneric::restart(mp_uint32 startPosition/* = 0*/, mp_uint32 startRow/* = 0*/, bool resetMixer/* = true*/, const mp_ubyte* customPanningTable/* = NULL*/, bool playOneRowOnly/* = false*/)
{
if (player)
player->restart(startPosition, startRow, resetMixer, customPanningTable, playOneRowOnly);
}
| 0
|
313,733
|
nv_regname(cmdarg_T *cap)
{
if (checkclearop(cap->oap))
return;
#ifdef FEAT_EVAL
if (cap->nchar == '=')
cap->nchar = get_expr_register();
#endif
if (cap->nchar != NUL && valid_yank_reg(cap->nchar, FALSE))
{
cap->oap->regname = cap->nchar;
cap->opcount = cap->count0; // remember count before '"'
#ifdef FEAT_EVAL
set_reg_var(cap->oap->regname);
#endif
}
else
clearopbeep(cap->oap);
}
| 0
|
218,995
|
bool PackedValuesNotEqual(float a, float b) {
return reinterpret_cast<int32_t&>(a) != reinterpret_cast<int32_t&>(b);
}
| 0
|
231,733
|
TEST_F(QuicServerTransportTest, TestOpenAckStreamFrame) {
StreamId streamId = server->createBidirectionalStream().value();
auto data = IOBuf::copyBuffer("Aloha");
// Remove any packets that might have been queued.
server->getNonConstConn().outstandings.packets.clear();
server->getNonConstConn().outstandings.initialPacketsCount = 0;
server->getNonConstConn().outstandings.handshakePacketsCount = 0;
server->writeChain(streamId, data->clone(), false);
loopForWrites();
server->writeChain(streamId, data->clone(), false);
server->writeChain(streamId, data->clone(), false);
loopForWrites();
auto stream = server->getNonConstConn().streamManager->getStream(streamId);
ASSERT_FALSE(server->getConn().outstandings.packets.empty());
ASSERT_FALSE(stream->retransmissionBuffer.empty());
// We need more than one packet for this test.
ASSERT_FALSE(server->getConn().outstandings.packets.empty());
PacketNum packetNum1 =
getFirstOutstandingPacket(
server->getNonConstConn(), PacketNumberSpace::AppData)
->packet.header.getPacketSequenceNum();
PacketNum lastPacketNum =
getLastOutstandingPacket(
server->getNonConstConn(), PacketNumberSpace::AppData)
->packet.header.getPacketSequenceNum();
uint32_t buffersInPacket1 = 0;
for (size_t i = 0; i < server->getNonConstConn().outstandings.packets.size();
++i) {
auto& packet = server->getNonConstConn().outstandings.packets[i];
if (packet.packet.header.getPacketNumberSpace() !=
PacketNumberSpace::AppData) {
continue;
}
PacketNum currentPacket = packet.packet.header.getPacketSequenceNum();
ASSERT_FALSE(packet.packet.frames.empty());
for (auto& quicFrame : packet.packet.frames) {
auto frame = quicFrame.asWriteStreamFrame();
if (!frame) {
continue;
}
auto it = stream->retransmissionBuffer.find(frame->offset);
ASSERT_TRUE(it != stream->retransmissionBuffer.end());
if (currentPacket == packetNum1 && frame->streamId == streamId) {
buffersInPacket1++;
}
}
}
auto originalRetransSize = stream->retransmissionBuffer.size();
AckBlocks acks = {{packetNum1, packetNum1}};
auto packet1 = createAckPacket(
server->getNonConstConn(),
++clientNextAppDataPacketNum,
acks,
PacketNumberSpace::AppData);
deliverData(packetToBuf(packet1));
EXPECT_EQ(
stream->retransmissionBuffer.size(),
originalRetransSize - buffersInPacket1);
EXPECT_EQ(stream->sendState, StreamSendState::Open);
EXPECT_EQ(stream->recvState, StreamRecvState::Open);
// Dup ack
auto packet2 = createAckPacket(
server->getNonConstConn(),
++clientNextAppDataPacketNum,
acks,
PacketNumberSpace::AppData);
deliverData(packetToBuf(packet2));
EXPECT_EQ(
stream->retransmissionBuffer.size(),
originalRetransSize - buffersInPacket1);
EXPECT_EQ(stream->sendState, StreamSendState::Open);
EXPECT_EQ(stream->recvState, StreamRecvState::Open);
AckBlocks acks2 = {{packetNum1, lastPacketNum}};
auto packet3 = createAckPacket(
server->getNonConstConn(),
++clientNextAppDataPacketNum,
acks2,
PacketNumberSpace::AppData);
deliverData(packetToBuf(packet3));
EXPECT_EQ(stream->retransmissionBuffer.size(), 0);
EXPECT_EQ(stream->sendState, StreamSendState::Open);
EXPECT_EQ(stream->recvState, StreamRecvState::Open);
auto empty = IOBuf::create(0);
server->writeChain(streamId, std::move(empty), true);
loopForWrites();
ASSERT_FALSE(server->getConn().outstandings.packets.empty());
PacketNum finPacketNum =
getFirstOutstandingPacket(
server->getNonConstConn(), PacketNumberSpace::AppData)
->packet.header.getPacketSequenceNum();
AckBlocks acks3 = {{lastPacketNum, finPacketNum}};
auto packet4 = createAckPacket(
server->getNonConstConn(),
++clientNextAppDataPacketNum,
acks3,
PacketNumberSpace::AppData);
deliverData(packetToBuf(packet4));
EXPECT_EQ(stream->sendState, StreamSendState::Closed);
EXPECT_EQ(stream->recvState, StreamRecvState::Open);
}
| 0
|
234,811
|
static void update_balance_args(struct btrfs_balance_control *bctl)
{
/*
* Turn on soft mode for chunk types that were being converted.
*/
if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)
bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT;
if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)
bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT;
if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)
bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT;
/*
* Turn on usage filter if is not already used. The idea is
* that chunks that we have already balanced should be
* reasonably full. Don't do it for chunks that are being
* converted - that will keep us from relocating unconverted
* (albeit full) chunks.
*/
if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) &&
!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
!(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE;
bctl->data.usage = 90;
}
if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) &&
!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
!(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE;
bctl->sys.usage = 90;
}
if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) &&
!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
!(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE;
bctl->meta.usage = 90;
}
}
| 0
|
424,968
|
static ssize_t iwl_dbgfs_csr_write(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct iwl_trans *trans = file->private_data;
iwl_pcie_dump_csr(trans);
return count;
}
| 0
|
521,497
|
ZipFile::ZipFile (InputStream* stream, bool deleteStreamWhenDestroyed)
: inputStream (stream)
{
if (deleteStreamWhenDestroyed)
streamToDelete.reset (inputStream);
init();
}
| 0
|
275,491
|
njs_vm_prop_name(njs_vm_t *vm, njs_object_prop_t *prop, njs_str_t *dst)
{
if (njs_slow_path(!njs_is_string(&prop->name))) {
njs_type_error(vm, "property name is not a string");
return NJS_ERROR;
}
njs_string_get(&prop->name, dst);
return NJS_OK;
}
| 0
|
338,051
|
void WasmBinaryBuilder::readElementSegments() {
BYN_TRACE("== readElementSegments\n");
auto numSegments = getU32LEB();
if (numSegments >= Table::kMaxSize) {
throwError("Too many segments");
}
for (size_t i = 0; i < numSegments; i++) {
auto flags = getU32LEB();
bool isPassive = (flags & BinaryConsts::IsPassive) != 0;
bool hasTableIdx = !isPassive && ((flags & BinaryConsts::HasIndex) != 0);
bool isDeclarative =
isPassive && ((flags & BinaryConsts::IsDeclarative) != 0);
bool usesExpressions = (flags & BinaryConsts::UsesExpressions) != 0;
if (isDeclarative) {
// Declared segments are needed in wasm text and binary, but not in
// Binaryen IR; skip over the segment
auto type = getU32LEB();
WASM_UNUSED(type);
auto num = getU32LEB();
for (Index i = 0; i < num; i++) {
getU32LEB();
}
continue;
}
auto segment = std::make_unique<ElementSegment>();
segment->setName(Name::fromInt(i), false);
if (!isPassive) {
Index tableIdx = 0;
if (hasTableIdx) {
tableIdx = getU32LEB();
}
Table* table = nullptr;
auto numTableImports = tableImports.size();
if (tableIdx < numTableImports) {
table = tableImports[tableIdx];
} else if (tableIdx - numTableImports < tables.size()) {
table = tables[tableIdx - numTableImports].get();
}
if (!table) {
throwError("Table index out of range.");
}
segment->table = table->name;
segment->offset = readExpression();
}
if (isPassive || hasTableIdx) {
if (usesExpressions) {
segment->type = getType();
if (!segment->type.isFunction()) {
throwError("Invalid type for a usesExpressions element segment");
}
} else {
auto elemKind = getU32LEB();
if (elemKind != 0x0) {
throwError("Invalid kind (!= funcref(0)) since !usesExpressions.");
}
}
}
auto& segmentData = segment->data;
auto size = getU32LEB();
if (usesExpressions) {
for (Index j = 0; j < size; j++) {
segmentData.push_back(readExpression());
}
} else {
for (Index j = 0; j < size; j++) {
Index index = getU32LEB();
auto sig = getTypeByFunctionIndex(index);
// Use a placeholder name for now
auto* refFunc = Builder(wasm).makeRefFunc(Name::fromInt(index), sig);
functionRefs[index].push_back(refFunc);
segmentData.push_back(refFunc);
}
}
elementSegments.push_back(std::move(segment));
}
}
| 0
|
369,423
|
static __cold void io_ring_exit_work(struct work_struct *work)
{
struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx, exit_work);
unsigned long timeout = jiffies + HZ * 60 * 5;
unsigned long interval = HZ / 20;
struct io_tctx_exit exit;
struct io_tctx_node *node;
int ret;
/*
* If we're doing polled IO and end up having requests being
* submitted async (out-of-line), then completions can come in while
* we're waiting for refs to drop. We need to reap these manually,
* as nobody else will be looking for them.
*/
do {
io_uring_try_cancel_requests(ctx, NULL, true);
if (ctx->sq_data) {
struct io_sq_data *sqd = ctx->sq_data;
struct task_struct *tsk;
io_sq_thread_park(sqd);
tsk = sqd->thread;
if (tsk && tsk->io_uring && tsk->io_uring->io_wq)
io_wq_cancel_cb(tsk->io_uring->io_wq,
io_cancel_ctx_cb, ctx, true);
io_sq_thread_unpark(sqd);
}
io_req_caches_free(ctx);
if (WARN_ON_ONCE(time_after(jiffies, timeout))) {
/* there is little hope left, don't run it too often */
interval = HZ * 60;
}
} while (!wait_for_completion_timeout(&ctx->ref_comp, interval));
init_completion(&exit.completion);
init_task_work(&exit.task_work, io_tctx_exit_cb);
exit.ctx = ctx;
/*
* Some may use context even when all refs and requests have been put,
* and they are free to do so while still holding uring_lock or
* completion_lock, see io_req_task_submit(). Apart from other work,
* this lock/unlock section also waits them to finish.
*/
mutex_lock(&ctx->uring_lock);
while (!list_empty(&ctx->tctx_list)) {
WARN_ON_ONCE(time_after(jiffies, timeout));
node = list_first_entry(&ctx->tctx_list, struct io_tctx_node,
ctx_node);
/* don't spin on a single task if cancellation failed */
list_rotate_left(&ctx->tctx_list);
ret = task_work_add(node->task, &exit.task_work, TWA_SIGNAL);
if (WARN_ON_ONCE(ret))
continue;
mutex_unlock(&ctx->uring_lock);
wait_for_completion(&exit.completion);
mutex_lock(&ctx->uring_lock);
}
mutex_unlock(&ctx->uring_lock);
spin_lock(&ctx->completion_lock);
spin_unlock(&ctx->completion_lock);
io_ring_ctx_free(ctx);
| 0
|
230,128
|
int user_auth_scheme_module_validate(struct config_module * config, const struct _u_request * http_request, const char * username, json_t * j_scheme_data, void * cls) {
UNUSED(http_request);
int ret, res;
json_t * j_user_id, * j_assertion;
j_user_id = get_user_id_from_username(config, (json_t *)cls, username, 0);
if (check_result_value(j_user_id, G_OK)) {
j_assertion = get_assertion_from_session(config, (json_t *)cls, username, json_string_value(json_object_get(j_scheme_data, "session")), 0);
if (check_result_value(j_assertion, G_OK)) {
if ((res = check_assertion(config, (json_t *)cls, username, j_scheme_data, json_object_get(j_assertion, "assertion"))) == G_OK) {
ret = G_OK;
} else if (res == G_ERROR_UNAUTHORIZED) {
ret = G_ERROR_UNAUTHORIZED;
} else {
y_log_message(Y_LOG_LEVEL_ERROR, "user_auth_scheme_module_validate webauthn - Error check_assertion");
ret = G_ERROR;
}
} else if (check_result_value(j_assertion, G_ERROR_NOT_FOUND)) {
ret = G_ERROR_UNAUTHORIZED;
} else {
y_log_message(Y_LOG_LEVEL_ERROR, "user_auth_scheme_module_register webauthn - Error get_credential");
ret = G_ERROR;
}
json_decref(j_assertion);
} else if (check_result_value(j_user_id, G_ERROR_NOT_FOUND)) {
ret = G_ERROR_UNAUTHORIZED;
} else {
y_log_message(Y_LOG_LEVEL_ERROR, "user_auth_scheme_module_validate webauthn - Error get_user_id_from_username");
ret = G_ERROR;
}
json_decref(j_user_id);
return ret;
}
| 0
|
386,576
|
void DL_Dxf::writeCircle(DL_WriterA& dw,
const DL_CircleData& data,
const DL_Attributes& attrib) {
dw.entity("CIRCLE");
if (version==DL_VERSION_2000) {
dw.dxfString(100, "AcDbEntity");
}
dw.entityAttributes(attrib);
if (version==DL_VERSION_2000) {
dw.dxfString(100, "AcDbCircle");
}
dw.coord(10, data.cx, data.cy, data.cz);
dw.dxfReal(40, data.radius);
}
| 0
|
261,761
|
void RtmpProtocol::startClientSession(const function<void()> &func) {
//发送 C0C1
char handshake_head = HANDSHAKE_PLAINTEXT;
onSendRawData(obtainBuffer(&handshake_head, 1));
RtmpHandshake c1(0);
c1.create_complex_c0c1();
onSendRawData(obtainBuffer((char *) (&c1), sizeof(c1)));
_next_step_func = [this, func](const char *data, size_t len) {
//等待 S0+S1+S2
return handle_S0S1S2(data, len, func);
};
}
| 0
|
508,848
|
void Lex_input_stream::body_utf8_start(THD *thd, const char *begin_ptr)
{
DBUG_ASSERT(begin_ptr);
DBUG_ASSERT(m_cpp_buf <= begin_ptr && begin_ptr <= m_cpp_buf + m_buf_length);
uint body_utf8_length= get_body_utf8_maximum_length(thd);
m_body_utf8= (char *) thd->alloc(body_utf8_length + 1);
m_body_utf8_ptr= m_body_utf8;
*m_body_utf8_ptr= 0;
m_cpp_utf8_processed_ptr= begin_ptr;
}
| 0
|
238,780
|
find_rawstring_end(char_u *linep, pos_T *startpos, pos_T *endpos)
{
char_u *p;
char_u *delim_copy;
size_t delim_len;
linenr_T lnum;
int found = FALSE;
for (p = linep + startpos->col + 1; *p && *p != '('; ++p)
;
delim_len = (p - linep) - startpos->col - 1;
delim_copy = vim_strnsave(linep + startpos->col + 1, delim_len);
if (delim_copy == NULL)
return FALSE;
for (lnum = startpos->lnum; lnum <= endpos->lnum; ++lnum)
{
char_u *line = ml_get(lnum);
for (p = line + (lnum == startpos->lnum
? startpos->col + 1 : 0); *p; ++p)
{
if (lnum == endpos->lnum && (colnr_T)(p - line) >= endpos->col)
break;
if (*p == ')' && STRNCMP(delim_copy, p + 1, delim_len) == 0
&& p[delim_len + 1] == '"')
{
found = TRUE;
break;
}
}
if (found)
break;
}
vim_free(delim_copy);
return found;
}
| 0
|
326,114
|
regstack_pop(char_u **scan)
{
regitem_T *rp;
rp = (regitem_T *)((char *)regstack.ga_data + regstack.ga_len) - 1;
*scan = rp->rs_scan;
regstack.ga_len -= sizeof(regitem_T);
}
| 0
|
257,689
|
TRIGGER_FUNC(mod_wstunnel_handle_trigger) {
const plugin_data * const p = p_d;
const unix_time64_t cur_ts = log_monotonic_secs + 1;
gw_handle_trigger(srv, p_d);
for (connection *con = srv->conns; con; con = con->next) {
request_st * const r = &con->request;
handler_ctx *hctx = r->plugin_ctx[p->id];
if (NULL == hctx || r->handler_module != p->self)
continue;
if (hctx->gw.state != GW_STATE_WRITE && hctx->gw.state != GW_STATE_READ)
continue;
if (cur_ts - con->read_idle_ts > r->conf.max_read_idle) {
DEBUG_LOG_INFO("timeout client (fd=%d)", con->fd);
mod_wstunnel_frame_send(hctx,MOD_WEBSOCKET_FRAME_TYPE_CLOSE,NULL,0);
gw_handle_request_reset(r, p_d);
joblist_append(con);
/* avoid server.c closing connection with error due to max_read_idle
* (might instead run joblist after plugins_call_handle_trigger())*/
con->read_idle_ts = cur_ts;
continue;
}
if (0 != hctx->hybivers
&& hctx->conf.ping_interval > 0
&& (int32_t)hctx->conf.ping_interval + hctx->ping_ts < cur_ts) {
hctx->ping_ts = cur_ts;
mod_wstunnel_frame_send(hctx, MOD_WEBSOCKET_FRAME_TYPE_PING, CONST_STR_LEN("ping"));
joblist_append(con);
continue;
}
}
return HANDLER_GO_ON;
}
| 0
|
441,804
|
SProcXkbGetControls(ClientPtr client)
{
REQUEST(xkbGetControlsReq);
swaps(&stuff->length);
REQUEST_SIZE_MATCH(xkbGetControlsReq);
swaps(&stuff->deviceSpec);
return ProcXkbGetControls(client);
}
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.