idx
int64 | func
string | target
int64 |
|---|---|---|
512,452
|
void in_double::set(uint pos,Item *item)
{
((double*) base)[pos]= item->val_real();
}
| 0
|
221,179
|
GF_Err gf_odf_get_laser_config(GF_DefaultDescriptor *dsi, GF_LASERConfig *cfg)
{
u32 to_skip;
GF_BitStream *bs;
if (!cfg) return GF_BAD_PARAM;
memset(cfg, 0, sizeof(GF_LASERConfig));
if (!dsi || !dsi->data || !dsi->dataLength) return GF_BAD_PARAM;
bs = gf_bs_new(dsi->data, dsi->dataLength, GF_BITSTREAM_READ);
memset(cfg, 0, sizeof(GF_LASERConfig));
cfg->tag = GF_ODF_LASER_CFG_TAG;
cfg->profile = gf_bs_read_int(bs, 8);
cfg->level = gf_bs_read_int(bs, 8);
/*cfg->reserved = */gf_bs_read_int(bs, 3);
cfg->pointsCodec = gf_bs_read_int(bs, 2);
cfg->pathComponents = gf_bs_read_int(bs, 4);
cfg->fullRequestHost = gf_bs_read_int(bs, 1);
if (gf_bs_read_int(bs, 1)) cfg->time_resolution = gf_bs_read_int(bs, 16);
else cfg->time_resolution = 1000;
cfg->colorComponentBits = 1 + gf_bs_read_int(bs, 4);
cfg->resolution = gf_bs_read_int(bs, 4);
if (cfg->resolution>7) cfg->resolution -= 16;
cfg->coord_bits = gf_bs_read_int(bs, 5);
cfg->scale_bits_minus_coord_bits = gf_bs_read_int(bs, 4);
cfg->newSceneIndicator = gf_bs_read_int(bs, 1);
/*reserved2*/ gf_bs_read_int(bs, 3);
cfg->extensionIDBits = gf_bs_read_int(bs, 4);
/*hasExtConfig - we just ignore it*/
if (gf_bs_read_int(bs, 1)) {
to_skip = gf_bs_read_vluimsbf5(bs);
while (to_skip) {
gf_bs_read_int(bs, 8);
to_skip--;
}
}
/*hasExtension - we just ignore it*/
if (gf_bs_read_int(bs, 1)) {
to_skip = gf_bs_read_vluimsbf5(bs);
while (to_skip) {
gf_bs_read_int(bs, 8);
to_skip--;
}
}
gf_bs_del(bs);
return GF_OK;
}
| 0
|
273,895
|
static void handle_OPTS(ctrl_t *ctrl, char *arg)
{
/* OPTS MLST type;size;modify;perm; */
if (strstr(arg, "MLST")) {
size_t i = 0;
char *ptr;
char buf[42] = "200 MLST OPTS ";
char facts[10] = { 0 };
ptr = strtok(arg + 4, " \t;");
while (ptr && i < sizeof(facts) - 1) {
if (!strcmp(ptr, "modify") ||
!strcmp(ptr, "perm") ||
!strcmp(ptr, "size") ||
!strcmp(ptr, "type")) {
facts[i++] = ptr[0];
strlcat(buf, ptr, sizeof(buf));
strlcat(buf, ";", sizeof(buf));
}
ptr = strtok(NULL, ";");
}
strlcat(buf, "\r\n", sizeof(buf));
DBG("New MLSD facts: %s", facts);
strlcpy(ctrl->facts, facts, sizeof(ctrl->facts));
send_msg(ctrl->sd, buf);
} else
send_msg(ctrl->sd, "200 UTF8 OPTS ON\r\n");
}
| 0
|
393,485
|
static SQInteger default_delegate_tofloat(HSQUIRRELVM v)
{
SQObjectPtr &o=stack_get(v,1);
switch(sq_type(o)){
case OT_STRING:{
SQObjectPtr res;
if(str2num(_stringval(o),res,10)){
v->Push(SQObjectPtr(tofloat(res)));
break;
}}
return sq_throwerror(v, _SC("cannot convert the string"));
break;
case OT_INTEGER:case OT_FLOAT:
v->Push(SQObjectPtr(tofloat(o)));
break;
case OT_BOOL:
v->Push(SQObjectPtr((SQFloat)(_integer(o)?1:0)));
break;
default:
v->PushNull();
break;
}
return 1;
}
| 0
|
338,065
|
void WasmBinaryBuilder::readDataSegments() {
BYN_TRACE("== readDataSegments\n");
auto num = getU32LEB();
for (size_t i = 0; i < num; i++) {
Memory::Segment curr;
uint32_t flags = getU32LEB();
if (flags > 2) {
throwError("bad segment flags, must be 0, 1, or 2, not " +
std::to_string(flags));
}
curr.isPassive = flags & BinaryConsts::IsPassive;
if (flags & BinaryConsts::HasIndex) {
auto memIndex = getU32LEB();
if (memIndex != 0) {
throwError("nonzero memory index");
}
}
if (!curr.isPassive) {
curr.offset = readExpression();
}
auto size = getU32LEB();
auto data = getByteView(size);
curr.data = {data.first, data.second};
wasm.memory.segments.push_back(std::move(curr));
}
}
| 0
|
387,722
|
void InstanceKlass::remove_java_mirror() {
Klass::remove_java_mirror();
// do array classes also.
if (array_klasses() != NULL) {
array_klasses()->remove_java_mirror();
}
}
| 0
|
445,970
|
extract_data_new (FrWindow *window,
GList *file_list,
GFile *destination,
const char *base_dir,
gboolean skip_older,
FrOverwrite overwrite,
gboolean junk_paths,
gboolean extract_here,
gboolean ask_to_open_destination)
{
ExtractData *edata;
edata = g_new0 (ExtractData, 1);
edata->window = window;
edata->file_list = _g_string_list_dup (file_list);
edata->destination = _g_object_ref (destination);
edata->skip_older = skip_older;
edata->overwrite = overwrite;
edata->junk_paths = junk_paths;
if (base_dir != NULL)
edata->base_dir = g_strdup (base_dir);
edata->ask_to_open_destination = ask_to_open_destination;
return edata;
}
| 0
|
227,027
|
IRC_PROTOCOL_CALLBACK(345)
{
IRC_PROTOCOL_MIN_ARGS(5);
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (server, NULL, command, "reop", NULL),
date,
irc_protocol_tags (command, "irc_numeric", NULL, NULL),
"%s%s%s%s: %s",
weechat_prefix ("network"),
IRC_COLOR_CHAT_CHANNEL,
argv[3],
IRC_COLOR_RESET,
(argv_eol[4][0] == ':') ? argv_eol[4] + 1 : argv_eol[4]);
return WEECHAT_RC_OK;
}
| 0
|
513,228
|
void plugin_shutdown(void)
{
uint i, count= plugin_array.elements;
struct st_plugin_int **plugins, *plugin;
struct st_plugin_dl **dl;
DBUG_ENTER("plugin_shutdown");
if (initialized)
{
mysql_mutex_lock(&LOCK_plugin);
reap_needed= true;
/*
We want to shut down plugins in a reasonable order, this will
become important when we have plugins which depend upon each other.
Circular references cannot be reaped so they are forced afterwards.
TODO: Have an additional step here to notify all active plugins that
shutdown is requested to allow plugins to deinitialize in parallel.
*/
while (reap_needed && (count= plugin_array.elements))
{
reap_plugins();
for (i= 0; i < count; i++)
{
plugin= *dynamic_element(&plugin_array, i, struct st_plugin_int **);
if (plugin->state == PLUGIN_IS_READY)
{
plugin->state= PLUGIN_IS_DELETED;
reap_needed= true;
}
}
if (!reap_needed)
{
/*
release any plugin references held.
*/
unlock_variables(NULL, &global_system_variables);
unlock_variables(NULL, &max_system_variables);
}
}
plugins= (struct st_plugin_int **) my_alloca(sizeof(void*) * (count+1));
/*
If we have any plugins which did not die cleanly, we force shutdown
*/
for (i= 0; i < count; i++)
{
plugins[i]= *dynamic_element(&plugin_array, i, struct st_plugin_int **);
/* change the state to ensure no reaping races */
if (plugins[i]->state == PLUGIN_IS_DELETED)
plugins[i]->state= PLUGIN_IS_DYING;
}
mysql_mutex_unlock(&LOCK_plugin);
/*
We loop through all plugins and call deinit() if they have one.
*/
for (i= 0; i < count; i++)
if (!(plugins[i]->state & (PLUGIN_IS_UNINITIALIZED | PLUGIN_IS_FREED |
PLUGIN_IS_DISABLED)))
{
/*
We are forcing deinit on plugins so we don't want to do a ref_count
check until we have processed all the plugins.
*/
plugin_deinitialize(plugins[i], false);
}
/*
It's perfectly safe not to lock LOCK_plugin, as there're no
concurrent threads anymore. But some functions called from here
use mysql_mutex_assert_owner(), so we lock the mutex to satisfy it
*/
mysql_mutex_lock(&LOCK_plugin);
/*
We defer checking ref_counts until after all plugins are deinitialized
as some may have worker threads holding on to plugin references.
*/
for (i= 0; i < count; i++)
{
if (plugins[i]->ref_count)
sql_print_error("Plugin '%s' has ref_count=%d after shutdown.",
plugins[i]->name.str, plugins[i]->ref_count);
if (plugins[i]->state & PLUGIN_IS_UNINITIALIZED ||
plugins[i]->state & PLUGIN_IS_DISABLED)
plugin_del(plugins[i]);
}
/*
Now we can deallocate all memory.
*/
cleanup_variables(&global_system_variables);
cleanup_variables(&max_system_variables);
mysql_mutex_unlock(&LOCK_plugin);
initialized= 0;
mysql_mutex_destroy(&LOCK_plugin);
my_afree(plugins);
}
/* Dispose of the memory */
for (i= 0; i < MYSQL_MAX_PLUGIN_TYPE_NUM; i++)
my_hash_free(&plugin_hash[i]);
delete_dynamic(&plugin_array);
count= plugin_dl_array.elements;
dl= (struct st_plugin_dl **)my_alloca(sizeof(void*) * count);
for (i= 0; i < count; i++)
dl[i]= *dynamic_element(&plugin_dl_array, i, struct st_plugin_dl **);
for (i= 0; i < plugin_dl_array.elements; i++)
free_plugin_mem(dl[i]);
my_afree(dl);
delete_dynamic(&plugin_dl_array);
my_hash_free(&bookmark_hash);
free_root(&plugin_mem_root, MYF(0));
free_root(&plugin_vars_mem_root, MYF(0));
global_variables_dynamic_size= 0;
DBUG_VOID_RETURN;
}
| 0
|
219,021
|
bool MaybeRemoveControlInput(const string& old_input, NodeDef* node,
GraphDef* graph, NodeMap* node_map) {
bool removed_input = false;
bool update_node_map = true;
const string old_input_ctrl_dep = AsControlDependency(NodeName(old_input));
for (int i = 0; i < node->input_size(); ++i) {
const string& input = node->input(i);
if (old_input_ctrl_dep == input) {
if (IsControlInput(input)) {
node->mutable_input()->SwapElements(i, node->input_size() - 1);
node->mutable_input()->RemoveLast();
removed_input = true;
} else {
// There is a non-control input from the same node.
// Don't remove the output from the NodeMap.
update_node_map = false;
}
}
}
if (update_node_map) {
node_map->RemoveOutput(NodeName(old_input), node->name());
}
return removed_input;
}
| 0
|
221,139
|
GF_VPConfig *gf_odf_vp_cfg_read_bs(GF_BitStream *bs, Bool is_v0)
{
GF_VPConfig *cfg = gf_odf_vp_cfg_new();
cfg->profile = gf_bs_read_int(bs, 8);
cfg->level = gf_bs_read_int(bs, 8);
cfg->bit_depth = gf_bs_read_int(bs, 4);
cfg->chroma_subsampling = gf_bs_read_int(bs, 3);
cfg->video_fullRange_flag = gf_bs_read_int(bs, 1);
cfg->colour_primaries = gf_bs_read_int(bs, 8);
cfg->transfer_characteristics = gf_bs_read_int(bs, 8);
cfg->matrix_coefficients = gf_bs_read_int(bs, 8);
if (is_v0)
return cfg;
cfg->codec_initdata_size = gf_bs_read_int(bs, 16);
// must be 0 according to spec
if (cfg->codec_initdata_size) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] VP Configuration Box: invalid data, codec_initdata_size must be 0, was %d\n", cfg->codec_initdata_size));
gf_odf_vp_cfg_del(cfg);
return NULL;
}
return cfg;
}
| 0
|
283,748
|
static uint64_t zynq_slcr_read(void *opaque, hwaddr offset,
unsigned size)
{
ZynqSLCRState *s = opaque;
offset /= 4;
uint32_t ret = s->regs[offset];
if (!zynq_slcr_check_offset(offset, true)) {
qemu_log_mask(LOG_GUEST_ERROR, "zynq_slcr: Invalid read access to "
" addr %" HWADDR_PRIx "\n", offset * 4);
}
DB_PRINT("addr: %08" HWADDR_PRIx " data: %08" PRIx32 "\n", offset * 4, ret);
return ret;
}
| 0
|
413,631
|
static int fcn_list_json(RCore *core, RList *fcns, bool quiet) {
RListIter *iter;
RAnalFunction *fcn;
PJ *pj = r_core_pj_new (core);
if (!pj) {
r_cons_println ("[]");
return -1;
}
pj_a (pj);
r_list_foreach (fcns, iter, fcn) {
if (quiet) {
pj_n (pj, fcn->addr);
} else {
fcn_print_json (core, fcn, pj);
}
}
pj_end (pj);
r_cons_println (pj_string (pj));
pj_free (pj);
return 0;
}
| 0
|
508,833
|
void st_select_lex_node::init_query()
{
options= 0;
sql_cache= SQL_CACHE_UNSPECIFIED;
linkage= UNSPECIFIED_TYPE;
no_table_names_allowed= 0;
uncacheable= 0;
}
| 0
|
445,915
|
fr_window_close (FrWindow *window)
{
if (window->priv->activity_ref > 0)
return;
if (window->priv->closing)
return;
window->priv->closing = TRUE;
if (gtk_widget_get_realized (GTK_WIDGET (window))) {
int width, height;
width = gtk_widget_get_allocated_width (GTK_WIDGET (window));
height = gtk_widget_get_allocated_height (GTK_WIDGET (window));
g_settings_set_int (window->priv->settings_ui, PREF_UI_WINDOW_WIDTH, width);
g_settings_set_int (window->priv->settings_ui, PREF_UI_WINDOW_HEIGHT, height);
width = gtk_paned_get_position (GTK_PANED (window->priv->paned));
if (width > 0)
g_settings_set_int (window->priv->settings_ui, PREF_UI_SIDEBAR_WIDTH, width);
width = gtk_tree_view_column_get_width (window->priv->filename_column);
if (width > 0)
g_settings_set_int (window->priv->settings_listing, PREF_LISTING_NAME_COLUMN_WIDTH, width);
}
gtk_widget_destroy (GTK_WIDGET (window));
}
| 0
|
229,260
|
const char* what() const throw () override {
return "bad cql binary frame";
}
| 0
|
308,194
|
static int fastrpc_invoke(struct fastrpc_user *fl, char __user *argp)
{
struct fastrpc_invoke_args *args = NULL;
struct fastrpc_invoke inv;
u32 nscalars;
int err;
if (copy_from_user(&inv, argp, sizeof(inv)))
return -EFAULT;
/* nscalars is truncated here to max supported value */
nscalars = REMOTE_SCALARS_LENGTH(inv.sc);
if (nscalars) {
args = kcalloc(nscalars, sizeof(*args), GFP_KERNEL);
if (!args)
return -ENOMEM;
if (copy_from_user(args, (void __user *)(uintptr_t)inv.args,
nscalars * sizeof(*args))) {
kfree(args);
return -EFAULT;
}
}
err = fastrpc_internal_invoke(fl, false, inv.handle, inv.sc, args);
kfree(args);
return err;
}
| 0
|
462,317
|
pcl_echo(pcl_args_t * pargs, pcl_state_t * pcs)
{
stream st;
status_begin(&st, pcs);
stprintf(&st, "ECHO %d\r\n", int_arg(pargs));
status_end(&st, pcs);
return 0;
}
| 0
|
218,770
|
static size_t WriteCompressionStart(const PSDInfo *psd_info,Image *image,
const Image *next_image,const ssize_t channels)
{
ssize_t
i,
offset,
y;
if (next_image->compression == RLECompression)
{
offset=WriteBlobMSBShort(image,RLE);
for (i=0; i < channels; i++)
for (y=0; y < (ssize_t) next_image->rows; y++)
offset+=SetPSDOffset(psd_info,image,0);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
else if (next_image->compression == ZipCompression)
offset=WriteBlobMSBShort(image,ZipWithoutPrediction);
#endif
else
offset=WriteBlobMSBShort(image,Raw);
return((size_t) offset);
}
| 0
|
286,730
|
size_t SWTPM_NVRAM_MigrationKey_Size(void)
{
return migrationkey.symkey.userKeyLength;
}
| 0
|
369,216
|
static int io_rsrc_node_switch_start(struct io_ring_ctx *ctx)
{
if (ctx->rsrc_backup_node)
return 0;
ctx->rsrc_backup_node = io_rsrc_node_alloc();
return ctx->rsrc_backup_node ? 0 : -ENOMEM;
| 0
|
444,900
|
add_mtab(char *devname, char *mountpoint, unsigned long flags, const char *fstype)
{
int rc = 0, tmprc, fd;
uid_t uid;
char *mount_user = NULL;
struct mntent mountent;
struct stat statbuf;
FILE *pmntfile;
sigset_t mask, oldmask;
uid = getuid();
if (uid != 0)
mount_user = getusername(uid);
/*
* Set the real uid to the effective uid. This prevents unprivileged
* users from sending signals to this process, though ^c on controlling
* terminal should still work.
*/
rc = setreuid(geteuid(), -1);
if (rc != 0) {
fprintf(stderr, "Unable to set real uid to effective uid: %s\n",
strerror(errno));
return EX_FILEIO;
}
rc = sigfillset(&mask);
if (rc) {
fprintf(stderr, "Unable to set filled signal mask\n");
return EX_FILEIO;
}
rc = sigprocmask(SIG_SETMASK, &mask, &oldmask);
if (rc) {
fprintf(stderr, "Unable to make process ignore signals\n");
return EX_FILEIO;
}
rc = toggle_dac_capability(1, 1);
if (rc)
return EX_FILEIO;
atexit(unlock_mtab);
rc = lock_mtab();
if (rc) {
fprintf(stderr, "cannot lock mtab");
rc = EX_FILEIO;
goto add_mtab_exit;
}
pmntfile = setmntent(MOUNTED, "a+");
if (!pmntfile) {
fprintf(stderr, "could not update mount table\n");
unlock_mtab();
rc = EX_FILEIO;
goto add_mtab_exit;
}
fd = fileno(pmntfile);
if (fd < 0) {
fprintf(stderr, "mntent does not appear to be valid\n");
unlock_mtab();
rc = EX_FILEIO;
goto add_mtab_exit;
}
rc = fstat(fd, &statbuf);
if (rc != 0) {
fprintf(stderr, "unable to fstat open mtab\n");
endmntent(pmntfile);
unlock_mtab();
rc = EX_FILEIO;
goto add_mtab_exit;
}
mountent.mnt_fsname = devname;
mountent.mnt_dir = mountpoint;
mountent.mnt_type = (char *)(void *)fstype;
mountent.mnt_opts = (char *)calloc(MTAB_OPTIONS_LEN, 1);
if (mountent.mnt_opts) {
if (flags & MS_RDONLY)
strlcat(mountent.mnt_opts, "ro", MTAB_OPTIONS_LEN);
else
strlcat(mountent.mnt_opts, "rw", MTAB_OPTIONS_LEN);
if (flags & MS_MANDLOCK)
strlcat(mountent.mnt_opts, ",mand", MTAB_OPTIONS_LEN);
if (flags & MS_NOEXEC)
strlcat(mountent.mnt_opts, ",noexec", MTAB_OPTIONS_LEN);
if (flags & MS_NOSUID)
strlcat(mountent.mnt_opts, ",nosuid", MTAB_OPTIONS_LEN);
if (flags & MS_NODEV)
strlcat(mountent.mnt_opts, ",nodev", MTAB_OPTIONS_LEN);
if (flags & MS_SYNCHRONOUS)
strlcat(mountent.mnt_opts, ",sync", MTAB_OPTIONS_LEN);
if (mount_user) {
strlcat(mountent.mnt_opts, ",user=", MTAB_OPTIONS_LEN);
strlcat(mountent.mnt_opts, mount_user,
MTAB_OPTIONS_LEN);
}
}
mountent.mnt_freq = 0;
mountent.mnt_passno = 0;
rc = addmntent(pmntfile, &mountent);
if (rc) {
fprintf(stderr, "unable to add mount entry to mtab\n");
ftruncate(fd, statbuf.st_size);
rc = EX_FILEIO;
}
tmprc = my_endmntent(pmntfile, statbuf.st_size);
if (tmprc) {
fprintf(stderr, "error %d detected on close of mtab\n", tmprc);
rc = EX_FILEIO;
}
unlock_mtab();
SAFE_FREE(mountent.mnt_opts);
add_mtab_exit:
toggle_dac_capability(1, 0);
sigprocmask(SIG_SETMASK, &oldmask, NULL);
return rc;
}
| 0
|
312,592
|
ex_helpgrep(exarg_T *eap)
{
regmatch_T regmatch;
char_u *save_cpo;
int save_cpo_allocated;
qf_info_T *qi = &ql_info;
int new_qi = FALSE;
char_u *au_name = NULL;
char_u *lang = NULL;
int updated = FALSE;
switch (eap->cmdidx)
{
case CMD_helpgrep: au_name = (char_u *)"helpgrep"; break;
case CMD_lhelpgrep: au_name = (char_u *)"lhelpgrep"; break;
default: break;
}
if (au_name != NULL && apply_autocmds(EVENT_QUICKFIXCMDPRE, au_name,
curbuf->b_fname, TRUE, curbuf))
{
#ifdef FEAT_EVAL
if (aborting())
return;
#endif
}
if (is_loclist_cmd(eap->cmdidx))
{
qi = hgr_get_ll(&new_qi);
if (qi == NULL)
return;
}
// Make 'cpoptions' empty, the 'l' flag should not be used here.
save_cpo = p_cpo;
save_cpo_allocated = is_option_allocated("cpo");
p_cpo = empty_option;
incr_quickfix_busy();
#ifdef FEAT_MULTI_LANG
// Check for a specified language
lang = check_help_lang(eap->arg);
#endif
regmatch.regprog = vim_regcomp(eap->arg, RE_MAGIC + RE_STRING);
regmatch.rm_ic = FALSE;
if (regmatch.regprog != NULL)
{
qf_list_T *qfl;
// create a new quickfix list
qf_new_list(qi, qf_cmdtitle(*eap->cmdlinep));
qfl = qf_get_curlist(qi);
hgr_search_in_rtp(qfl, ®match, lang);
vim_regfree(regmatch.regprog);
qfl->qf_nonevalid = FALSE;
qfl->qf_ptr = qfl->qf_start;
qfl->qf_index = 1;
qf_list_changed(qfl);
updated = TRUE;
}
if (p_cpo == empty_option)
p_cpo = save_cpo;
else
{
// Darn, some plugin changed the value. If it's still empty it was
// changed and restored, need to restore in the complicated way.
if (*p_cpo == NUL)
set_option_value_give_err((char_u *)"cpo", 0L, save_cpo, 0);
if (save_cpo_allocated)
free_string_option(save_cpo);
}
if (updated)
// This may open a window and source scripts, do this after 'cpo' was
// restored.
qf_update_buffer(qi, NULL);
if (au_name != NULL)
{
apply_autocmds(EVENT_QUICKFIXCMDPOST, au_name,
curbuf->b_fname, TRUE, curbuf);
// When adding a location list to an existing location list stack,
// if the autocmd made the stack invalid, then just return.
if (!new_qi && IS_LL_STACK(qi) && qf_find_win_with_loclist(qi) == NULL)
{
decr_quickfix_busy();
return;
}
}
// Jump to first match.
if (!qf_list_empty(qf_get_curlist(qi)))
qf_jump(qi, 0, 0, FALSE);
else
semsg(_(e_no_match_str_2), eap->arg);
decr_quickfix_busy();
if (eap->cmdidx == CMD_lhelpgrep)
{
// If the help window is not opened or if it already points to the
// correct location list, then free the new location list.
if (!bt_help(curwin->w_buffer) || curwin->w_llist == qi)
{
if (new_qi)
ll_free_all(&qi);
}
else if (curwin->w_llist == NULL && new_qi)
// current window didn't have a location list associated with it
// before. Associate the new location list now.
curwin->w_llist = qi;
}
}
| 0
|
220,166
|
explicit DecodeImageV2Op(OpKernelConstruction* context) : OpKernel(context) {
// Keep track of op string information because:
// [1] Currently by the API, PNG, JPEG and GIF can decode each other and
// depending on the op type, we need to return either 3-D or 4-D shapes.
// [2] Different ops have different attributes. e.g. `DecodeImage` op has
// `expand_animations` attribute that other ops don't.
// `DecodeAndDropJpeg` also has additional attributes.
op_type_ = type_string();
// Validate op type.
OP_REQUIRES(context,
op_type_ == "DecodeJpeg" || op_type_ == "DecodeAndCropJpeg" ||
op_type_ == "DecodePng" || op_type_ == "DecodeGif" ||
op_type_ == "DecodeBmp" || op_type_ == "DecodeImage",
errors::InvalidArgument("Bad op type ", op_type_));
// Get attributes from `DecodeJpeg` and `DecodeAndCropJpeg` op
// invocations. For `DecodeImage` op, set JPEG decoding setting to TF
// default.
if (op_type_ == "DecodeJpeg" || op_type_ == "DecodeAndCropJpeg") {
OP_REQUIRES_OK(context, context->GetAttr("ratio", &flags_.ratio));
OP_REQUIRES(context,
flags_.ratio == 1 || flags_.ratio == 2 || flags_.ratio == 4 ||
flags_.ratio == 8,
errors::InvalidArgument("ratio must be 1, 2, 4, or 8, got ",
flags_.ratio));
OP_REQUIRES_OK(context, context->GetAttr("fancy_upscaling",
&flags_.fancy_upscaling));
OP_REQUIRES_OK(context,
context->GetAttr("try_recover_truncated",
&flags_.try_recover_truncated_jpeg));
OP_REQUIRES_OK(context,
context->GetAttr("acceptable_fraction",
&flags_.min_acceptable_fraction));
string dct_method;
OP_REQUIRES_OK(context, context->GetAttr("dct_method", &dct_method));
OP_REQUIRES(
context,
(dct_method.empty() || dct_method == "INTEGER_FAST" ||
dct_method == "INTEGER_ACCURATE"),
errors::InvalidArgument("dct_method must be one of "
"{'', 'INTEGER_FAST', 'INTEGER_ACCURATE'}"));
// The TensorFlow-chosen default for JPEG decoding is IFAST, sacrificing
// image quality for speed.
if (dct_method.empty() || dct_method == "INTEGER_FAST") {
flags_.dct_method = JDCT_IFAST;
} else if (dct_method == "INTEGER_ACCURATE") {
flags_.dct_method = JDCT_ISLOW;
}
} else {
flags_ = jpeg::UncompressFlags();
flags_.dct_method = JDCT_IFAST;
}
// Get `dtype` attribute from `DecodePng` or `DecodeImage` op invocations.
if (op_type_ == "DecodePng" || op_type_ == "DecodeImage") {
OP_REQUIRES_OK(context, context->GetAttr("dtype", &data_type_));
if (op_type_ == "DecodePng") {
OP_REQUIRES(
context,
data_type_ == DataType::DT_UINT8 ||
data_type_ == DataType::DT_UINT16,
errors::InvalidArgument(
"`dtype` for `DecodePng` must be unit8, unit16 but got: ",
data_type_));
} else {
OP_REQUIRES(context,
data_type_ == DataType::DT_UINT8 ||
data_type_ == DataType::DT_UINT16 ||
data_type_ == DataType::DT_FLOAT,
errors::InvalidArgument("`dtype` for `DecodeImage` must be "
"unit8, unit16, float but got: ",
data_type_));
OP_REQUIRES_OK(context, context->GetAttr("expand_animations",
&expand_animations_));
}
}
// Get `channels` attribute for all ops except `DecodeGif` op.
// `DecodeGif` doesn't have `channels` attribute but it supports 3
// channels by default.
if (op_type_ != "DecodeGif") {
OP_REQUIRES_OK(context, context->GetAttr("channels", &channels_));
OP_REQUIRES(
context,
channels_ == 0 || channels_ == 1 || channels_ == 3 || channels_ == 4,
errors::InvalidArgument("`channels` must be 0, 1, 3 or 4 but got ",
channels_));
} else {
channels_ = 3;
}
}
| 0
|
317,051
|
static int file_has_perm(const struct cred *cred,
struct file *file,
u32 av)
{
struct file_security_struct *fsec = selinux_file(file);
struct inode *inode = file_inode(file);
struct common_audit_data ad;
u32 sid = cred_sid(cred);
int rc;
ad.type = LSM_AUDIT_DATA_FILE;
ad.u.file = file;
if (sid != fsec->sid) {
rc = avc_has_perm(&selinux_state,
sid, fsec->sid,
SECCLASS_FD,
FD__USE,
&ad);
if (rc)
goto out;
}
#ifdef CONFIG_BPF_SYSCALL
rc = bpf_fd_pass(file, cred_sid(cred));
if (rc)
return rc;
#endif
/* av is zero if only checking access to the descriptor. */
rc = 0;
if (av)
rc = inode_has_perm(cred, inode, av, &ad);
out:
return rc;
}
| 0
|
199,841
|
static bool load_buffer(RBinFile *bf, void **bin_obj, RBuffer *buf, ut64 loadaddr, Sdb *sdb) {
RBuffer *fbuf = r_buf_ref (buf);
struct MACH0_(opts_t) opts;
MACH0_(opts_set_default) (&opts, bf);
struct MACH0_(obj_t) *main_mach0 = MACH0_(new_buf) (fbuf, &opts);
if (!main_mach0) {
return false;
}
RRebaseInfo *rebase_info = r_rebase_info_new_from_mach0 (fbuf, main_mach0);
RKernelCacheObj *obj = NULL;
RPrelinkRange *prelink_range = get_prelink_info_range_from_mach0 (main_mach0);
if (!prelink_range) {
goto beach;
}
obj = R_NEW0 (RKernelCacheObj);
if (!obj) {
R_FREE (prelink_range);
goto beach;
}
RCFValueDict *prelink_info = NULL;
if (main_mach0->hdr.filetype != MH_FILESET && prelink_range->range.size) {
prelink_info = r_cf_value_dict_parse (fbuf, prelink_range->range.offset,
prelink_range->range.size, R_CF_OPTION_SKIP_NSDATA);
if (!prelink_info) {
R_FREE (prelink_range);
R_FREE (obj);
goto beach;
}
}
if (!pending_bin_files) {
pending_bin_files = r_list_new ();
if (!pending_bin_files) {
R_FREE (prelink_range);
R_FREE (obj);
R_FREE (prelink_info);
goto beach;
}
}
obj->mach0 = main_mach0;
obj->rebase_info = rebase_info;
obj->prelink_info = prelink_info;
obj->cache_buf = fbuf;
obj->pa2va_exec = prelink_range->pa2va_exec;
obj->pa2va_data = prelink_range->pa2va_data;
R_FREE (prelink_range);
*bin_obj = obj;
r_list_push (pending_bin_files, bf);
if (rebase_info || main_mach0->chained_starts) {
RIO *io = bf->rbin->iob.io;
swizzle_io_read (obj, io);
}
return true;
beach:
r_buf_free (fbuf);
obj->cache_buf = NULL;
MACH0_(mach0_free) (main_mach0);
return false;
}
| 1
|
273,062
|
keyval_add(struct keyval *kv, const char *name, const char *value)
{
return keyval_add_size(kv, name, value, strlen(value));
}
| 0
|
218,785
|
static MagickBooleanType ReadPSDChannelPixels(Image *image,
const size_t channels,const ssize_t row,const ssize_t type,
const unsigned char *pixels,ExceptionInfo *exception)
{
Quantum
pixel;
const unsigned char
*p;
IndexPacket
*indexes;
PixelPacket
*q;
ssize_t
x;
size_t
packet_size;
unsigned short
nibble;
p=pixels;
q=GetAuthenticPixels(image,0,row,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
return MagickFalse;
indexes=GetAuthenticIndexQueue(image);
packet_size=GetPSDPacketSize(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
if (packet_size == 1)
pixel=ScaleCharToQuantum(*p++);
else
if (packet_size == 2)
{
p=PushShortPixel(MSBEndian,p,&nibble);
pixel=ScaleShortToQuantum(nibble);
}
else
{
MagickFloatType
nibble;
p=PushFloatPixel(MSBEndian,p,&nibble);
pixel=ClampToQuantum((MagickRealType)QuantumRange*nibble);
}
if (image->depth > 1)
{
SetPSDPixel(image,channels,type,packet_size,pixel,q,indexes,x);
q++;
}
else
{
ssize_t
bit,
number_bits;
number_bits=(ssize_t) image->columns-x;
if (number_bits > 8)
number_bits=8;
for (bit=0; bit < number_bits; bit++)
{
SetPSDPixel(image,channels,type,packet_size,
(((unsigned char) ((ssize_t) pixel)) & (0x01 << (7-bit))) != 0 ? 0 :
QuantumRange,q++,indexes,x++);
}
if (x != (ssize_t) image->columns)
x--;
continue;
}
}
return(SyncAuthenticPixels(image,exception));
}
| 0
|
387,787
|
void InstanceKlass::clean_method_data() {
for (int m = 0; m < methods()->length(); m++) {
MethodData* mdo = methods()->at(m)->method_data();
if (mdo != NULL) {
MutexLockerEx ml(SafepointSynchronize::is_at_safepoint() ? NULL : mdo->extra_data_lock());
mdo->clean_method_data(/*always_clean*/false);
}
}
}
| 0
|
415,184
|
scd_update_reader_status_file (void)
{
int err;
err = npth_mutex_lock (&status_file_update_lock);
if (err)
return; /* locked - give up. */
update_reader_status_file (1);
err = npth_mutex_unlock (&status_file_update_lock);
if (err)
log_error ("failed to release status_file_update lock: %s\n",
strerror (err));
}
| 0
|
359,839
|
static inline void ReadTIM2ImageHeader(Image *image,TIM2ImageHeader *header)
{
header->total_size=ReadBlobLSBLong(image);
header->clut_size=ReadBlobLSBLong(image);
header->image_size=ReadBlobLSBLong(image);
header->header_size=ReadBlobLSBShort(image);
header->clut_color_count=ReadBlobLSBShort(image);
header->img_format=(unsigned char) ReadBlobByte(image);
header->mipmap_count=(unsigned char) ReadBlobByte(image);
header->clut_type=(unsigned char) ReadBlobByte(image);
header->bpp_type=(unsigned char) ReadBlobByte(image);
header->width=ReadBlobLSBShort(image);
header->height=ReadBlobLSBShort(image);
header->GsTex0=ReadBlobMSBLongLong(image);
header->GsTex1=ReadBlobMSBLongLong(image);
header->GsRegs=ReadBlobMSBLong(image);
header->GsTexClut=ReadBlobMSBLong(image);
}
| 0
|
252,301
|
mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **pBuf,
size_t *pSize) {
if ((!pZip) || (!pZip->m_pState) || (!pBuf) || (!pSize)) return MZ_FALSE;
if (pZip->m_pWrite != mz_zip_heap_write_func) return MZ_FALSE;
if (!mz_zip_writer_finalize_archive(pZip)) return MZ_FALSE;
*pBuf = pZip->m_pState->m_pMem;
*pSize = pZip->m_pState->m_mem_size;
pZip->m_pState->m_pMem = NULL;
pZip->m_pState->m_mem_size = pZip->m_pState->m_mem_capacity = 0;
return MZ_TRUE;
}
| 0
|
500,643
|
void sftp_free(sftp_session sftp){
sftp_request_queue ptr;
if (sftp == NULL) {
return;
}
ssh_channel_send_eof(sftp->channel);
ptr = sftp->queue;
while(ptr) {
sftp_request_queue old;
sftp_message_free(ptr->message);
old = ptr->next;
SAFE_FREE(ptr);
ptr = old;
}
ssh_channel_free(sftp->channel);
sftp_ext_free(sftp->ext);
ZERO_STRUCTP(sftp);
SAFE_FREE(sftp);
}
| 0
|
212,688
|
int hci_conn_check_link_mode(struct hci_conn *conn)
{
BT_DBG("hcon %p", conn);
/* In Secure Connections Only mode, it is required that Secure
* Connections is used and the link is encrypted with AES-CCM
* using a P-256 authenticated combination key.
*/
if (hci_dev_test_flag(conn->hdev, HCI_SC_ONLY)) {
if (!hci_conn_sc_enabled(conn) ||
!test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
conn->key_type != HCI_LK_AUTH_COMBINATION_P256)
return 0;
}
if (hci_conn_ssp_enabled(conn) &&
!test_bit(HCI_CONN_ENCRYPT, &conn->flags))
return 0;
return 1;
}
| 1
|
472,371
|
bool ciEnv::cache_jvmti_state() {
VM_ENTRY_MARK;
// Get Jvmti capabilities under lock to get consistant values.
MutexLocker mu(JvmtiThreadState_lock);
_jvmti_redefinition_count = JvmtiExport::redefinition_count();
_jvmti_can_hotswap_or_post_breakpoint = JvmtiExport::can_hotswap_or_post_breakpoint();
_jvmti_can_access_local_variables = JvmtiExport::can_access_local_variables();
_jvmti_can_post_on_exceptions = JvmtiExport::can_post_on_exceptions();
_jvmti_can_pop_frame = JvmtiExport::can_pop_frame();
_jvmti_can_get_owned_monitor_info = JvmtiExport::can_get_owned_monitor_info();
_jvmti_can_walk_any_space = JvmtiExport::can_walk_any_space();
return _task != NULL && _task->method()->is_old();
}
| 0
|
243,007
|
int mbedtls_ssl_handle_message_type( mbedtls_ssl_context *ssl )
{
int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED;
/*
* Handle particular types of records
*/
if( ssl->in_msgtype == MBEDTLS_SSL_MSG_HANDSHAKE )
{
if( ( ret = mbedtls_ssl_prepare_handshake_record( ssl ) ) != 0 )
{
return( ret );
}
}
if( ssl->in_msgtype == MBEDTLS_SSL_MSG_CHANGE_CIPHER_SPEC )
{
if( ssl->in_msglen != 1 )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "invalid CCS message, len: %" MBEDTLS_PRINTF_SIZET,
ssl->in_msglen ) );
return( MBEDTLS_ERR_SSL_INVALID_RECORD );
}
if( ssl->in_msg[0] != 1 )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "invalid CCS message, content: %02x",
ssl->in_msg[0] ) );
return( MBEDTLS_ERR_SSL_INVALID_RECORD );
}
#if defined(MBEDTLS_SSL_PROTO_DTLS)
if( ssl->conf->transport == MBEDTLS_SSL_TRANSPORT_DATAGRAM &&
ssl->state != MBEDTLS_SSL_CLIENT_CHANGE_CIPHER_SPEC &&
ssl->state != MBEDTLS_SSL_SERVER_CHANGE_CIPHER_SPEC )
{
if( ssl->handshake == NULL )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "dropping ChangeCipherSpec outside handshake" ) );
return( MBEDTLS_ERR_SSL_UNEXPECTED_RECORD );
}
MBEDTLS_SSL_DEBUG_MSG( 1, ( "received out-of-order ChangeCipherSpec - remember" ) );
return( MBEDTLS_ERR_SSL_EARLY_MESSAGE );
}
#endif
}
if( ssl->in_msgtype == MBEDTLS_SSL_MSG_ALERT )
{
if( ssl->in_msglen != 2 )
{
/* Note: Standard allows for more than one 2 byte alert
to be packed in a single message, but Mbed TLS doesn't
currently support this. */
MBEDTLS_SSL_DEBUG_MSG( 1, ( "invalid alert message, len: %" MBEDTLS_PRINTF_SIZET,
ssl->in_msglen ) );
return( MBEDTLS_ERR_SSL_INVALID_RECORD );
}
MBEDTLS_SSL_DEBUG_MSG( 2, ( "got an alert message, type: [%u:%u]",
ssl->in_msg[0], ssl->in_msg[1] ) );
/*
* Ignore non-fatal alerts, except close_notify and no_renegotiation
*/
if( ssl->in_msg[0] == MBEDTLS_SSL_ALERT_LEVEL_FATAL )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "is a fatal alert message (msg %d)",
ssl->in_msg[1] ) );
return( MBEDTLS_ERR_SSL_FATAL_ALERT_MESSAGE );
}
if( ssl->in_msg[0] == MBEDTLS_SSL_ALERT_LEVEL_WARNING &&
ssl->in_msg[1] == MBEDTLS_SSL_ALERT_MSG_CLOSE_NOTIFY )
{
MBEDTLS_SSL_DEBUG_MSG( 2, ( "is a close notify message" ) );
return( MBEDTLS_ERR_SSL_PEER_CLOSE_NOTIFY );
}
#if defined(MBEDTLS_SSL_RENEGOTIATION_ENABLED)
if( ssl->in_msg[0] == MBEDTLS_SSL_ALERT_LEVEL_WARNING &&
ssl->in_msg[1] == MBEDTLS_SSL_ALERT_MSG_NO_RENEGOTIATION )
{
MBEDTLS_SSL_DEBUG_MSG( 2, ( "is a SSLv3 no renegotiation alert" ) );
/* Will be handled when trying to parse ServerHello */
return( 0 );
}
#endif
#if defined(MBEDTLS_SSL_PROTO_SSL3) && defined(MBEDTLS_SSL_SRV_C)
if( ssl->minor_ver == MBEDTLS_SSL_MINOR_VERSION_0 &&
ssl->conf->endpoint == MBEDTLS_SSL_IS_SERVER &&
ssl->in_msg[0] == MBEDTLS_SSL_ALERT_LEVEL_WARNING &&
ssl->in_msg[1] == MBEDTLS_SSL_ALERT_MSG_NO_CERT )
{
MBEDTLS_SSL_DEBUG_MSG( 2, ( "is a SSLv3 no_cert" ) );
/* Will be handled in mbedtls_ssl_parse_certificate() */
return( 0 );
}
#endif /* MBEDTLS_SSL_PROTO_SSL3 && MBEDTLS_SSL_SRV_C */
/* Silently ignore: fetch new message */
return MBEDTLS_ERR_SSL_NON_FATAL;
}
#if defined(MBEDTLS_SSL_PROTO_DTLS)
if( ssl->conf->transport == MBEDTLS_SSL_TRANSPORT_DATAGRAM )
{
/* Drop unexpected ApplicationData records,
* except at the beginning of renegotiations */
if( ssl->in_msgtype == MBEDTLS_SSL_MSG_APPLICATION_DATA &&
ssl->state != MBEDTLS_SSL_HANDSHAKE_OVER
#if defined(MBEDTLS_SSL_RENEGOTIATION)
&& ! ( ssl->renego_status == MBEDTLS_SSL_RENEGOTIATION_IN_PROGRESS &&
ssl->state == MBEDTLS_SSL_SERVER_HELLO )
#endif
)
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "dropping unexpected ApplicationData" ) );
return( MBEDTLS_ERR_SSL_NON_FATAL );
}
if( ssl->handshake != NULL &&
ssl->state == MBEDTLS_SSL_HANDSHAKE_OVER )
{
mbedtls_ssl_handshake_wrapup_free_hs_transform( ssl );
}
}
#endif /* MBEDTLS_SSL_PROTO_DTLS */
return( 0 );
}
| 0
|
512,709
|
void in_datetime::set(uint pos,Item *item)
{
struct packed_longlong *buff= &((packed_longlong*) base)[pos];
buff->val= item->val_datetime_packed(current_thd);
buff->unsigned_flag= 1L;
}
| 0
|
450,820
|
prefix_array (const char *dirname, char **array, size_t n)
{
size_t i;
size_t dirlen = strlen (dirname);
char dirsep_char = '/';
if (dirlen == 1 && dirname[0] == '/')
/* DIRNAME is just "/", so normal prepending would get us "//foo".
We want "/foo" instead, so don't prepend any chars from DIRNAME. */
dirlen = 0;
#if defined __MSDOS__ || defined WINDOWS32
if (dirlen > 1)
{
if (dirname[dirlen - 1] == '/' && dirname[dirlen - 2] == ':')
/* DIRNAME is "d:/". Don't prepend the slash from DIRNAME. */
--dirlen;
else if (dirname[dirlen - 1] == ':')
{
/* DIRNAME is "d:". Use ':' instead of '/'. */
--dirlen;
dirsep_char = ':';
}
}
#endif
for (i = 0; i < n; ++i)
{
size_t eltlen = strlen (array[i]) + 1;
char *new = malloc (dirlen + 1 + eltlen);
if (new == NULL)
{
while (i > 0)
free (array[--i]);
return 1;
}
{
char *endp = mempcpy (new, dirname, dirlen);
*endp++ = dirsep_char;
mempcpy (endp, array[i], eltlen);
}
free (array[i]);
array[i] = new;
}
return 0;
}
| 0
|
453,027
|
static void nft_indr_block_cleanup(struct flow_block_cb *block_cb)
{
struct nft_base_chain *basechain = block_cb->indr.data;
struct net_device *dev = block_cb->indr.dev;
struct netlink_ext_ack extack = {};
struct nftables_pernet *nft_net;
struct net *net = dev_net(dev);
struct flow_block_offload bo;
nft_flow_block_offload_init(&bo, dev_net(dev), FLOW_BLOCK_UNBIND,
basechain, &extack);
nft_net = nft_pernet(net);
mutex_lock(&nft_net->commit_mutex);
list_del(&block_cb->driver_list);
list_move(&block_cb->list, &bo.cb_list);
nft_flow_offload_unbind(&bo, basechain);
mutex_unlock(&nft_net->commit_mutex);
}
| 0
|
248,751
|
static int cookie_sort(const void *p1, const void *p2)
{
struct Cookie *c1 = *(struct Cookie **)p1;
struct Cookie *c2 = *(struct Cookie **)p2;
size_t l1, l2;
/* 1 - compare cookie path lengths */
l1 = c1->path ? strlen(c1->path) : 0;
l2 = c2->path ? strlen(c2->path) : 0;
if(l1 != l2)
return (l2 > l1) ? 1 : -1 ; /* avoid size_t <=> int conversions */
/* 2 - compare cookie domain lengths */
l1 = c1->domain ? strlen(c1->domain) : 0;
l2 = c2->domain ? strlen(c2->domain) : 0;
if(l1 != l2)
return (l2 > l1) ? 1 : -1 ; /* avoid size_t <=> int conversions */
/* 3 - compare cookie name lengths */
l1 = c1->name ? strlen(c1->name) : 0;
l2 = c2->name ? strlen(c2->name) : 0;
if(l1 != l2)
return (l2 > l1) ? 1 : -1;
/* 4 - compare cookie creation time */
return (c2->creationtime > c1->creationtime) ? 1 : -1;
}
| 0
|
274,675
|
analyze_window_size_restore(GtkWidget *win)
{
GVariant *var;
const gint32 *xy;
gsize num;
if (!screen.settings)
return;
var = g_settings_get_value (screen.settings, "analyze-window-size");
xy = g_variant_get_fixed_array (var, &num, sizeof (*xy));
if (num == 2)
gtk_window_set_default_size (GTK_WINDOW (win), xy[0], xy[1]);
g_variant_unref (var);
var = g_settings_get_value (screen.settings, "analyze-window-position");
xy = g_variant_get_fixed_array (var, &num, sizeof (*xy));
if (num == 2)
gtk_window_move (GTK_WINDOW (win), xy[0], xy[1]);
g_variant_unref (var);
}
| 0
|
513,174
|
static st_bookmark *find_bookmark(const char *plugin, const char *name,
int flags)
{
st_bookmark *result= NULL;
uint namelen, length, pluginlen= 0;
char *varname, *p;
if (!(flags & PLUGIN_VAR_THDLOCAL))
return NULL;
namelen= strlen(name);
if (plugin)
pluginlen= strlen(plugin) + 1;
length= namelen + pluginlen + 2;
varname= (char*) my_alloca(length);
if (plugin)
{
strxmov(varname + 1, plugin, "_", name, NullS);
for (p= varname + 1; *p; p++)
if (*p == '-')
*p= '_';
}
else
memcpy(varname + 1, name, namelen + 1);
varname[0]= plugin_var_bookmark_key(flags);
result= (st_bookmark*) my_hash_search(&bookmark_hash,
(const uchar*) varname, length - 1);
my_afree(varname);
return result;
}
| 0
|
256,954
|
static Status RecordLabelToDimension(const int label, const int axis,
const Tensor& input,
LabelToDimSizes* label_to_dim_sizes) {
const int64_t input_dim = input.dim_size(axis);
// We know that label_to_dim_sizes has the size to accommodate named labels.
if (label_to_dim_sizes->at(label) != 0 &&
label_to_dim_sizes->at(label) != input_dim) {
return errors::InvalidArgument(
"Expected dimension ", label_to_dim_sizes->at(label), " at axis ",
axis, " of the input shaped ", input.shape().DebugString(),
" but got dimension ", input_dim);
}
(*label_to_dim_sizes)[label] = input_dim;
return Status::OK();
}
| 0
|
256,456
|
Janet janet_array_pop(JanetArray *array) {
if (array->count) {
return array->data[--array->count];
} else {
return janet_wrap_nil();
}
}
| 0
|
229,266
|
cql_server::response::placeholder<int32_t> cql_server::response::write_int_placeholder() {
return placeholder<int32_t>(_body.write_place_holder(sizeof(int32_t)));
}
| 0
|
476,094
|
static int config_buf(struct usb_configuration *config,
enum usb_device_speed speed, void *buf, u8 type)
{
struct usb_config_descriptor *c = buf;
void *next = buf + USB_DT_CONFIG_SIZE;
int len;
struct usb_function *f;
int status;
len = USB_COMP_EP0_BUFSIZ - USB_DT_CONFIG_SIZE;
/* write the config descriptor */
c = buf;
c->bLength = USB_DT_CONFIG_SIZE;
c->bDescriptorType = type;
/* wTotalLength is written later */
c->bNumInterfaces = config->next_interface_id;
c->bConfigurationValue = config->bConfigurationValue;
c->iConfiguration = config->iConfiguration;
c->bmAttributes = USB_CONFIG_ATT_ONE | config->bmAttributes;
c->bMaxPower = encode_bMaxPower(speed, config);
/* There may be e.g. OTG descriptors */
if (config->descriptors) {
status = usb_descriptor_fillbuf(next, len,
config->descriptors);
if (status < 0)
return status;
len -= status;
next += status;
}
/* add each function's descriptors */
list_for_each_entry(f, &config->functions, list) {
struct usb_descriptor_header **descriptors;
descriptors = function_descriptors(f, speed);
if (!descriptors)
continue;
status = usb_descriptor_fillbuf(next, len,
(const struct usb_descriptor_header **) descriptors);
if (status < 0)
return status;
len -= status;
next += status;
}
len = next - buf;
c->wTotalLength = cpu_to_le16(len);
return len;
}
| 0
|
459,508
|
static inline bool stack_map_use_build_id(struct bpf_map *map)
{
return (map->map_flags & BPF_F_STACK_BUILD_ID);
}
| 0
|
409,500
|
term_cursor_shape(int shape, int blink)
{
if (*T_CSH != NUL)
{
OUT_STR(tgoto((char *)T_CSH, 0, shape * 2 - blink));
out_flush();
}
else
{
int do_blink = blink;
// t_SH is empty: try setting just the blink state.
// The blink flags are XORed together, if the initial blinking from
// style and shape differs, we need to invert the flag here.
if (blink_state_is_inverted())
do_blink = !blink;
if (do_blink && *T_VS != NUL)
{
out_str(T_VS);
out_flush();
}
else if (!do_blink && *T_CVS != NUL)
{
out_str(T_CVS);
out_flush();
}
}
}
| 0
|
234,848
|
static int read_one_chunk(struct btrfs_key *key, struct extent_buffer *leaf,
struct btrfs_chunk *chunk)
{
struct btrfs_fs_info *fs_info = leaf->fs_info;
struct extent_map_tree *map_tree = &fs_info->mapping_tree;
struct map_lookup *map;
struct extent_map *em;
u64 logical;
u64 length;
u64 devid;
u64 type;
u8 uuid[BTRFS_UUID_SIZE];
int num_stripes;
int ret;
int i;
logical = key->offset;
length = btrfs_chunk_length(leaf, chunk);
type = btrfs_chunk_type(leaf, chunk);
num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
#if BITS_PER_LONG == 32
ret = check_32bit_meta_chunk(fs_info, logical, length, type);
if (ret < 0)
return ret;
warn_32bit_meta_chunk(fs_info, logical, length, type);
#endif
/*
* Only need to verify chunk item if we're reading from sys chunk array,
* as chunk item in tree block is already verified by tree-checker.
*/
if (leaf->start == BTRFS_SUPER_INFO_OFFSET) {
ret = btrfs_check_chunk_valid(leaf, chunk, logical);
if (ret)
return ret;
}
read_lock(&map_tree->lock);
em = lookup_extent_mapping(map_tree, logical, 1);
read_unlock(&map_tree->lock);
/* already mapped? */
if (em && em->start <= logical && em->start + em->len > logical) {
free_extent_map(em);
return 0;
} else if (em) {
free_extent_map(em);
}
em = alloc_extent_map();
if (!em)
return -ENOMEM;
map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
if (!map) {
free_extent_map(em);
return -ENOMEM;
}
set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
em->map_lookup = map;
em->start = logical;
em->len = length;
em->orig_start = 0;
em->block_start = 0;
em->block_len = em->len;
map->num_stripes = num_stripes;
map->io_width = btrfs_chunk_io_width(leaf, chunk);
map->io_align = btrfs_chunk_io_align(leaf, chunk);
map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
map->type = type;
map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
map->verified_stripes = 0;
em->orig_block_len = calc_stripe_length(type, em->len,
map->num_stripes);
for (i = 0; i < num_stripes; i++) {
map->stripes[i].physical =
btrfs_stripe_offset_nr(leaf, chunk, i);
devid = btrfs_stripe_devid_nr(leaf, chunk, i);
read_extent_buffer(leaf, uuid, (unsigned long)
btrfs_stripe_dev_uuid_nr(chunk, i),
BTRFS_UUID_SIZE);
map->stripes[i].dev = btrfs_find_device(fs_info->fs_devices,
devid, uuid, NULL);
if (!map->stripes[i].dev &&
!btrfs_test_opt(fs_info, DEGRADED)) {
free_extent_map(em);
btrfs_report_missing_device(fs_info, devid, uuid, true);
return -ENOENT;
}
if (!map->stripes[i].dev) {
map->stripes[i].dev =
add_missing_dev(fs_info->fs_devices, devid,
uuid);
if (IS_ERR(map->stripes[i].dev)) {
free_extent_map(em);
btrfs_err(fs_info,
"failed to init missing dev %llu: %ld",
devid, PTR_ERR(map->stripes[i].dev));
return PTR_ERR(map->stripes[i].dev);
}
btrfs_report_missing_device(fs_info, devid, uuid, false);
}
set_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
&(map->stripes[i].dev->dev_state));
}
write_lock(&map_tree->lock);
ret = add_extent_mapping(map_tree, em, 0);
write_unlock(&map_tree->lock);
if (ret < 0) {
btrfs_err(fs_info,
"failed to add chunk map, start=%llu len=%llu: %d",
em->start, em->len, ret);
}
free_extent_map(em);
return ret;
}
| 0
|
195,037
|
void Compute(OpKernelContext* context) override {
const Tensor* input_indices;
const Tensor* input_values;
const Tensor* input_shape;
SparseTensorsMap* map;
OP_REQUIRES_OK(context, context->input("sparse_indices", &input_indices));
OP_REQUIRES_OK(context, context->input("sparse_values", &input_values));
OP_REQUIRES_OK(context, context->input("sparse_shape", &input_shape));
OP_REQUIRES_OK(context, GetMap(context, true /* is_writing */, &map));
OP_REQUIRES(context, TensorShapeUtils::IsMatrix(input_indices->shape()),
errors::InvalidArgument(
"Input indices should be a matrix but received shape ",
input_indices->shape().DebugString()));
OP_REQUIRES(context, TensorShapeUtils::IsVector(input_values->shape()),
errors::InvalidArgument(
"Input values should be a vector but received shape ",
input_values->shape().DebugString()));
OP_REQUIRES(context, TensorShapeUtils::IsVector(input_shape->shape()),
errors::InvalidArgument(
"Input shape should be a vector but received shape ",
input_shape->shape().DebugString()));
int rank = input_shape->NumElements();
OP_REQUIRES(
context, rank > 1,
errors::InvalidArgument(
"Rank of input SparseTensor should be > 1, but saw rank: ", rank));
auto input_shape_vec = input_shape->vec<int64_t>();
int new_num_elements = 1;
bool overflow_ocurred = false;
for (int i = 0; i < input_shape_vec.size(); i++) {
new_num_elements =
MultiplyWithoutOverflow(new_num_elements, input_shape_vec(i));
if (new_num_elements < 0) {
overflow_ocurred = true;
break;
}
}
OP_REQUIRES(
context, !overflow_ocurred,
errors::Internal("Encountered overflow from large input shape."));
TensorShape tensor_input_shape(input_shape_vec);
gtl::InlinedVector<int64_t, 8> std_order(rank);
std::iota(std_order.begin(), std_order.end(), 0);
SparseTensor input_st;
OP_REQUIRES_OK(context, SparseTensor::Create(*input_indices, *input_values,
tensor_input_shape, std_order,
&input_st));
const int64_t N = input_shape_vec(0);
Tensor sparse_handles(DT_INT64, TensorShape({N}));
auto sparse_handles_t = sparse_handles.vec<int64_t>();
OP_REQUIRES_OK(context, input_st.IndicesValid());
// We can generate the output shape proto string now, for all
// minibatch entries.
TensorShape output_shape;
OP_REQUIRES_OK(context, TensorShapeUtils::MakeShape(
input_shape_vec.data() + 1,
input_shape->NumElements() - 1, &output_shape));
// Get groups by minibatch dimension
std::unordered_set<int64_t> visited;
sparse::GroupIterable minibatch = input_st.group({0});
for (const auto& subset : minibatch) {
const int64_t b = subset.group()[0];
visited.insert(b);
OP_REQUIRES(
context, b > -1 && b < N,
errors::InvalidArgument(
"Received unexpected column 0 value in input SparseTensor: ", b,
" < 0 or >= N (= ", N, ")"));
const auto indices = subset.indices();
const auto values = subset.values<T>();
const int64_t num_entries = values.size();
Tensor output_indices = Tensor(DT_INT64, {num_entries, rank - 1});
Tensor output_values = Tensor(DataTypeToEnum<T>::value, {num_entries});
auto output_indices_t = output_indices.matrix<int64_t>();
auto output_values_t = output_values.vec<T>();
for (int i = 0; i < num_entries; ++i) {
for (int d = 1; d < rank; ++d) {
output_indices_t(i, d - 1) = indices(i, d);
}
output_values_t(i) = values(i);
}
SparseTensor st_i;
OP_REQUIRES_OK(context,
SparseTensor::Create(output_indices, output_values,
output_shape, &st_i));
int64_t handle;
OP_REQUIRES_OK(context, map->AddSparseTensor(context, st_i, &handle));
sparse_handles_t(b) = handle;
}
// Fill in any gaps; we must provide an empty ST for batch entries
// the grouper didn't find.
if (visited.size() < N) {
Tensor empty_indices(DT_INT64, {0, rank - 1});
Tensor empty_values(DataTypeToEnum<T>::value, {0});
SparseTensor empty_st;
OP_REQUIRES_OK(context, SparseTensor::Create(empty_indices, empty_values,
output_shape, &empty_st));
for (int64_t b = 0; b < N; ++b) {
// We skipped this batch entry.
if (visited.find(b) == visited.end()) {
int64_t handle;
OP_REQUIRES_OK(context,
map->AddSparseTensor(context, empty_st, &handle));
sparse_handles_t(b) = handle;
}
}
}
context->set_output(0, sparse_handles);
}
| 1
|
310,064
|
is_sgr_string(char *value)
{
bool result = FALSE;
if (VALID_STRING(value)) {
int skip = csi_length(value);
if (skip) {
int ch;
result = TRUE;
value += skip;
while ((ch = UChar(*value++)) != '\0') {
if (isdigit(ch) || ch == ';') {
;
} else if (ch == 'm' && *value == '\0') {
;
} else {
result = FALSE;
break;
}
}
}
}
return result;
}
| 0
|
254,731
|
njs_typed_array_prototype_buffer(njs_vm_t *vm, njs_value_t *args,
njs_uint_t nargs, njs_index_t unused)
{
njs_value_t *this;
njs_typed_array_t *array;
this = njs_argument(args, 0);
if (!njs_is_typed_array(this) && !njs_is_data_view(this)) {
njs_type_error(vm, "Method TypedArray.prototype.buffer called "
"on incompatible receiver");
return NJS_ERROR;
}
array = njs_typed_array(this);
njs_set_array_buffer(&vm->retval, njs_typed_array_buffer(array));
return NJS_OK;
}
| 0
|
226,234
|
GF_Box *tssy_box_new()
{
ISOM_DECL_BOX_ALLOC(GF_TimeStampSynchronyBox, GF_ISOM_BOX_TYPE_TSSY);
return (GF_Box *)tmp;
}
| 0
|
359,419
|
DEFUN (clear_ip_bgp_peer_group_soft_out,
clear_ip_bgp_peer_group_soft_out_cmd,
"clear ip bgp peer-group WORD soft out",
CLEAR_STR
IP_STR
BGP_STR
"Clear all members of peer-group\n"
"BGP peer-group name\n"
"Soft reconfig\n"
"Soft reconfig outbound update\n")
{
return bgp_clear_vty (vty, NULL, AFI_IP, SAFI_UNICAST, clear_group,
BGP_CLEAR_SOFT_OUT, argv[0]);
}
| 0
|
508,797
|
void LEX::set_trg_event_type_for_tables()
{
uint8 new_trg_event_map= 0;
DBUG_ENTER("LEX::set_trg_event_type_for_tables");
/*
Some auxiliary operations
(e.g. GRANT processing) create TABLE_LIST instances outside
the parser. Additionally, some commands (e.g. OPTIMIZE) change
the lock type for a table only after parsing is done. Luckily,
these do not fire triggers and do not need to pre-load them.
For these TABLE_LISTs set_trg_event_type is never called, and
trg_event_map is always empty. That means that the pre-locking
algorithm will ignore triggers defined on these tables, if
any, and the execution will either fail with an assert in
sql_trigger.cc or with an error that a used table was not
pre-locked, in case of a production build.
TODO: this usage pattern creates unnecessary module dependencies
and should be rewritten to go through the parser.
Table list instances created outside the parser in most cases
refer to mysql.* system tables. It is not allowed to have
a trigger on a system table, but keeping track of
initialization provides extra safety in case this limitation
is circumvented.
*/
switch (sql_command) {
case SQLCOM_LOCK_TABLES:
/*
On a LOCK TABLE, all triggers must be pre-loaded for this TABLE_LIST
when opening an associated TABLE.
*/
new_trg_event_map= static_cast<uint8>
(1 << static_cast<int>(TRG_EVENT_INSERT)) |
static_cast<uint8>
(1 << static_cast<int>(TRG_EVENT_UPDATE)) |
static_cast<uint8>
(1 << static_cast<int>(TRG_EVENT_DELETE));
break;
/*
Basic INSERT. If there is an additional ON DUPLIATE KEY UPDATE
clause, it will be handled later in this method.
*/
case SQLCOM_INSERT: /* fall through */
case SQLCOM_INSERT_SELECT:
/*
LOAD DATA ... INFILE is expected to fire BEFORE/AFTER INSERT
triggers.
If the statement also has REPLACE clause, it will be
handled later in this method.
*/
case SQLCOM_LOAD: /* fall through */
/*
REPLACE is semantically equivalent to INSERT. In case
of a primary or unique key conflict, it deletes the old
record and inserts a new one. So we also may need to
fire ON DELETE triggers. This functionality is handled
later in this method.
*/
case SQLCOM_REPLACE: /* fall through */
case SQLCOM_REPLACE_SELECT:
/*
CREATE TABLE ... SELECT defaults to INSERT if the table or
view already exists. REPLACE option of CREATE TABLE ...
REPLACE SELECT is handled later in this method.
*/
case SQLCOM_CREATE_TABLE:
new_trg_event_map|= static_cast<uint8>
(1 << static_cast<int>(TRG_EVENT_INSERT));
break;
/* Basic update and multi-update */
case SQLCOM_UPDATE: /* fall through */
case SQLCOM_UPDATE_MULTI:
new_trg_event_map|= static_cast<uint8>
(1 << static_cast<int>(TRG_EVENT_UPDATE));
break;
/* Basic delete and multi-delete */
case SQLCOM_DELETE: /* fall through */
case SQLCOM_DELETE_MULTI:
new_trg_event_map|= static_cast<uint8>
(1 << static_cast<int>(TRG_EVENT_DELETE));
break;
default:
break;
}
switch (duplicates) {
case DUP_UPDATE:
new_trg_event_map|= static_cast<uint8>
(1 << static_cast<int>(TRG_EVENT_UPDATE));
break;
case DUP_REPLACE:
new_trg_event_map|= static_cast<uint8>
(1 << static_cast<int>(TRG_EVENT_DELETE));
break;
case DUP_ERROR:
default:
break;
}
/*
Do not iterate over sub-selects, only the tables in the outermost
SELECT_LEX can be modified, if any.
*/
TABLE_LIST *tables= select_lex.get_table_list();
while (tables)
{
/*
This is a fast check to filter out statements that do
not change data, or tables on the right side, in case of
INSERT .. SELECT, CREATE TABLE .. SELECT and so on.
Here we also filter out OPTIMIZE statement and non-updateable
views, for which lock_type is TL_UNLOCK or TL_READ after
parsing.
*/
if (static_cast<int>(tables->lock_type) >=
static_cast<int>(TL_WRITE_ALLOW_WRITE))
tables->trg_event_map= new_trg_event_map;
tables= tables->next_local;
}
DBUG_VOID_RETURN;
}
| 0
|
359,407
|
DEFUN (no_ip_extcommunity_list_name_expanded_all,
no_ip_extcommunity_list_name_expanded_all_cmd,
"no ip extcommunity-list expanded WORD",
NO_STR
IP_STR
EXTCOMMUNITY_LIST_STR
"Specify expanded extcommunity-list\n"
"Extended Community list name\n")
{
return extcommunity_list_unset_vty (vty, argc, argv, EXTCOMMUNITY_LIST_EXPANDED);
}
| 0
|
211,877
|
addBinding(XML_Parser parser, PREFIX *prefix, const ATTRIBUTE_ID *attId,
const XML_Char *uri, BINDING **bindingsPtr) {
static const XML_Char xmlNamespace[]
= {ASCII_h, ASCII_t, ASCII_t, ASCII_p, ASCII_COLON,
ASCII_SLASH, ASCII_SLASH, ASCII_w, ASCII_w, ASCII_w,
ASCII_PERIOD, ASCII_w, ASCII_3, ASCII_PERIOD, ASCII_o,
ASCII_r, ASCII_g, ASCII_SLASH, ASCII_X, ASCII_M,
ASCII_L, ASCII_SLASH, ASCII_1, ASCII_9, ASCII_9,
ASCII_8, ASCII_SLASH, ASCII_n, ASCII_a, ASCII_m,
ASCII_e, ASCII_s, ASCII_p, ASCII_a, ASCII_c,
ASCII_e, '\0'};
static const int xmlLen = (int)sizeof(xmlNamespace) / sizeof(XML_Char) - 1;
static const XML_Char xmlnsNamespace[]
= {ASCII_h, ASCII_t, ASCII_t, ASCII_p, ASCII_COLON, ASCII_SLASH,
ASCII_SLASH, ASCII_w, ASCII_w, ASCII_w, ASCII_PERIOD, ASCII_w,
ASCII_3, ASCII_PERIOD, ASCII_o, ASCII_r, ASCII_g, ASCII_SLASH,
ASCII_2, ASCII_0, ASCII_0, ASCII_0, ASCII_SLASH, ASCII_x,
ASCII_m, ASCII_l, ASCII_n, ASCII_s, ASCII_SLASH, '\0'};
static const int xmlnsLen
= (int)sizeof(xmlnsNamespace) / sizeof(XML_Char) - 1;
XML_Bool mustBeXML = XML_FALSE;
XML_Bool isXML = XML_TRUE;
XML_Bool isXMLNS = XML_TRUE;
BINDING *b;
int len;
/* empty URI is only valid for default namespace per XML NS 1.0 (not 1.1) */
if (*uri == XML_T('\0') && prefix->name)
return XML_ERROR_UNDECLARING_PREFIX;
if (prefix->name && prefix->name[0] == XML_T(ASCII_x)
&& prefix->name[1] == XML_T(ASCII_m)
&& prefix->name[2] == XML_T(ASCII_l)) {
/* Not allowed to bind xmlns */
if (prefix->name[3] == XML_T(ASCII_n) && prefix->name[4] == XML_T(ASCII_s)
&& prefix->name[5] == XML_T('\0'))
return XML_ERROR_RESERVED_PREFIX_XMLNS;
if (prefix->name[3] == XML_T('\0'))
mustBeXML = XML_TRUE;
}
for (len = 0; uri[len]; len++) {
if (isXML && (len > xmlLen || uri[len] != xmlNamespace[len]))
isXML = XML_FALSE;
if (! mustBeXML && isXMLNS
&& (len > xmlnsLen || uri[len] != xmlnsNamespace[len]))
isXMLNS = XML_FALSE;
}
isXML = isXML && len == xmlLen;
isXMLNS = isXMLNS && len == xmlnsLen;
if (mustBeXML != isXML)
return mustBeXML ? XML_ERROR_RESERVED_PREFIX_XML
: XML_ERROR_RESERVED_NAMESPACE_URI;
if (isXMLNS)
return XML_ERROR_RESERVED_NAMESPACE_URI;
if (parser->m_namespaceSeparator)
len++;
if (parser->m_freeBindingList) {
b = parser->m_freeBindingList;
if (len > b->uriAlloc) {
/* Detect and prevent integer overflow */
if (len > INT_MAX - EXPAND_SPARE) {
return XML_ERROR_NO_MEMORY;
}
/* Detect and prevent integer overflow.
* The preprocessor guard addresses the "always false" warning
* from -Wtype-limits on platforms where
* sizeof(unsigned int) < sizeof(size_t), e.g. on x86_64. */
#if UINT_MAX >= SIZE_MAX
if ((unsigned)(len + EXPAND_SPARE) > (size_t)(-1) / sizeof(XML_Char)) {
return XML_ERROR_NO_MEMORY;
}
#endif
XML_Char *temp = (XML_Char *)REALLOC(
parser, b->uri, sizeof(XML_Char) * (len + EXPAND_SPARE));
if (temp == NULL)
return XML_ERROR_NO_MEMORY;
b->uri = temp;
b->uriAlloc = len + EXPAND_SPARE;
}
parser->m_freeBindingList = b->nextTagBinding;
} else {
b = (BINDING *)MALLOC(parser, sizeof(BINDING));
if (! b)
return XML_ERROR_NO_MEMORY;
/* Detect and prevent integer overflow */
if (len > INT_MAX - EXPAND_SPARE) {
return XML_ERROR_NO_MEMORY;
}
/* Detect and prevent integer overflow.
* The preprocessor guard addresses the "always false" warning
* from -Wtype-limits on platforms where
* sizeof(unsigned int) < sizeof(size_t), e.g. on x86_64. */
#if UINT_MAX >= SIZE_MAX
if ((unsigned)(len + EXPAND_SPARE) > (size_t)(-1) / sizeof(XML_Char)) {
return XML_ERROR_NO_MEMORY;
}
#endif
b->uri
= (XML_Char *)MALLOC(parser, sizeof(XML_Char) * (len + EXPAND_SPARE));
if (! b->uri) {
FREE(parser, b);
return XML_ERROR_NO_MEMORY;
}
b->uriAlloc = len + EXPAND_SPARE;
}
b->uriLen = len;
memcpy(b->uri, uri, len * sizeof(XML_Char));
if (parser->m_namespaceSeparator)
b->uri[len - 1] = parser->m_namespaceSeparator;
b->prefix = prefix;
b->attId = attId;
b->prevPrefixBinding = prefix->binding;
/* NULL binding when default namespace undeclared */
if (*uri == XML_T('\0') && prefix == &parser->m_dtd->defaultPrefix)
prefix->binding = NULL;
else
prefix->binding = b;
b->nextTagBinding = *bindingsPtr;
*bindingsPtr = b;
/* if attId == NULL then we are not starting a namespace scope */
if (attId && parser->m_startNamespaceDeclHandler)
parser->m_startNamespaceDeclHandler(parser->m_handlerArg, prefix->name,
prefix->binding ? uri : 0);
return XML_ERROR_NONE;
}
| 1
|
240,610
|
explicit VarIsInitializedOp(OpKernelConstruction* c) : OpKernel(c) {}
| 0
|
397,643
|
static short ToS(unsigned char *puffer)
{
return ((short)(puffer[0] | puffer[1] << 8));
}
| 0
|
225,499
|
const MutableGraphView::OutputPort MutableGraphView::GetRegularFanin(
const GraphView::InputPort& port) const {
return GetRegularFanin(MutableGraphView::InputPort(
const_cast<NodeDef*>(port.node), port.port_id));
}
| 0
|
432,199
|
uint64_t memory_region_size(MemoryRegion *mr)
{
if (int128_eq(mr->size, int128_2_64())) {
return UINT64_MAX;
}
return int128_get64(mr->size);
}
| 0
|
281,664
|
void CLASS parse_riff()
{
unsigned i, size, end;
char tag[4], date[64], month[64];
static const char mon[12][4] =
{ "Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec" };
struct tm t;
order = 0x4949;
fread (tag, 4, 1, ifp);
size = get4();
#ifdef LIBRAW_LIBRARY_BUILD
if((int)size<0)
throw LIBRAW_EXCEPTION_IO_EOF;
#endif
end = ftell(ifp) + size;
if (!memcmp(tag,"RIFF",4) || !memcmp(tag,"LIST",4)) {
get4();
while (ftell(ifp)+7 < end)
parse_riff();
} else if (!memcmp(tag,"nctg",4)) {
while (ftell(ifp)+7 < end) {
i = get2();
size = get2();
if ((i+1) >> 1 == 10 && size == 20)
get_timestamp(0);
else fseek (ifp, size, SEEK_CUR);
}
} else if (!memcmp(tag,"IDIT",4) && size < 64) {
fread (date, 64, 1, ifp);
date[size] = 0;
memset (&t, 0, sizeof t);
if (sscanf (date, "%*s %s %d %d:%d:%d %d", month, &t.tm_mday,
&t.tm_hour, &t.tm_min, &t.tm_sec, &t.tm_year) == 6) {
for (i=0; i < 12 && strcasecmp(mon[i],month); i++);
t.tm_mon = i;
t.tm_year -= 1900;
if (mktime(&t) > 0)
timestamp = mktime(&t);
}
} else
fseek (ifp, size, SEEK_CUR);
}
| 0
|
500,651
|
static sftp_request_queue request_queue_new(sftp_message msg) {
sftp_request_queue queue = NULL;
queue = malloc(sizeof(struct sftp_request_queue_struct));
if (queue == NULL) {
ssh_set_error_oom(msg->sftp->session);
return NULL;
}
ZERO_STRUCTP(queue);
queue->message = msg;
return queue;
}
| 0
|
195,237
|
static Image *ReadPCLImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
#define CropBox "CropBox"
#define DeviceCMYK "DeviceCMYK"
#define MediaBox "MediaBox"
#define RenderPCLText " Rendering PCL... "
char
command[MagickPathExtent],
*density,
filename[MagickPathExtent],
geometry[MagickPathExtent],
*options,
input_filename[MagickPathExtent];
const DelegateInfo
*delegate_info;
Image
*image,
*next_image;
ImageInfo
*read_info;
MagickBooleanType
cmyk,
status;
PointInfo
delta;
RectangleInfo
bounding_box,
page;
char
*p;
ssize_t
c;
SegmentInfo
bounds;
size_t
height,
width;
ssize_t
count;
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
/*
Open image file.
*/
image=AcquireImage(image_info,exception);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
status=AcquireUniqueSymbolicLink(image_info->filename,input_filename);
if (status == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToCreateTemporaryFile",
image_info->filename);
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Set the page density.
*/
delta.x=DefaultResolution;
delta.y=DefaultResolution;
if ((image->resolution.x == 0.0) || (image->resolution.y == 0.0))
{
GeometryInfo
geometry_info;
MagickStatusType
flags;
flags=ParseGeometry(PSDensityGeometry,&geometry_info);
if ((flags & RhoValue) != 0)
image->resolution.x=geometry_info.rho;
image->resolution.y=image->resolution.x;
if ((flags & SigmaValue) != 0)
image->resolution.y=geometry_info.sigma;
}
/*
Determine page geometry from the PCL media box.
*/
cmyk=image->colorspace == CMYKColorspace ? MagickTrue : MagickFalse;
count=0;
(void) memset(&bounding_box,0,sizeof(bounding_box));
(void) memset(&bounds,0,sizeof(bounds));
(void) memset(&page,0,sizeof(page));
(void) memset(command,0,sizeof(command));
p=command;
for (c=ReadBlobByte(image); c != EOF; c=ReadBlobByte(image))
{
if (image_info->page != (char *) NULL)
continue;
/*
Note PCL elements.
*/
*p++=(char) c;
if ((c != (int) '/') && (c != '\n') &&
((size_t) (p-command) < (MagickPathExtent-1)))
continue;
*p='\0';
p=command;
/*
Is this a CMYK document?
*/
if (LocaleNCompare(DeviceCMYK,command,strlen(DeviceCMYK)) == 0)
cmyk=MagickTrue;
if (LocaleNCompare(CropBox,command,strlen(CropBox)) == 0)
{
/*
Note region defined by crop box.
*/
count=(ssize_t) sscanf(command,"CropBox [%lf %lf %lf %lf",
&bounds.x1,&bounds.y1,&bounds.x2,&bounds.y2);
if (count != 4)
count=(ssize_t) sscanf(command,"CropBox[%lf %lf %lf %lf",
&bounds.x1,&bounds.y1,&bounds.x2,&bounds.y2);
}
if (LocaleNCompare(MediaBox,command,strlen(MediaBox)) == 0)
{
/*
Note region defined by media box.
*/
count=(ssize_t) sscanf(command,"MediaBox [%lf %lf %lf %lf",
&bounds.x1,&bounds.y1,&bounds.x2,&bounds.y2);
if (count != 4)
count=(ssize_t) sscanf(command,"MediaBox[%lf %lf %lf %lf",
&bounds.x1,&bounds.y1,&bounds.x2,&bounds.y2);
}
if (count != 4)
continue;
/*
Set PCL render geometry.
*/
width=(size_t) floor(bounds.x2-bounds.x1+0.5);
height=(size_t) floor(bounds.y2-bounds.y1+0.5);
if (width > page.width)
page.width=width;
if (height > page.height)
page.height=height;
}
(void) CloseBlob(image);
/*
Render PCL with the GhostPCL delegate.
*/
if ((page.width == 0) || (page.height == 0))
(void) ParseAbsoluteGeometry(PSPageGeometry,&page);
if (image_info->page != (char *) NULL)
(void) ParseAbsoluteGeometry(image_info->page,&page);
(void) FormatLocaleString(geometry,MagickPathExtent,"%.20gx%.20g",(double)
page.width,(double) page.height);
if (image_info->monochrome != MagickFalse)
delegate_info=GetDelegateInfo("pcl:mono",(char *) NULL,exception);
else
if (cmyk != MagickFalse)
delegate_info=GetDelegateInfo("pcl:cmyk",(char *) NULL,exception);
else
delegate_info=GetDelegateInfo("pcl:color",(char *) NULL,exception);
if (delegate_info == (const DelegateInfo *) NULL)
{
image=DestroyImage(image);
return((Image *) NULL);
}
if ((page.width == 0) || (page.height == 0))
(void) ParseAbsoluteGeometry(PSPageGeometry,&page);
if (image_info->page != (char *) NULL)
(void) ParseAbsoluteGeometry(image_info->page,&page);
density=AcquireString("");
options=AcquireString("");
(void) FormatLocaleString(density,MagickPathExtent,"%gx%g",
image->resolution.x,image->resolution.y);
if (image_info->ping != MagickFalse)
(void) FormatLocaleString(density,MagickPathExtent,"2.0x2.0");
page.width=(size_t) floor(page.width*image->resolution.x/delta.x+0.5);
page.height=(size_t) floor(page.height*image->resolution.y/delta.y+0.5);
(void) FormatLocaleString(options,MagickPathExtent,"-g%.20gx%.20g ",(double)
page.width,(double) page.height);
image=DestroyImage(image);
read_info=CloneImageInfo(image_info);
*read_info->magick='\0';
if (read_info->number_scenes != 0)
{
if (read_info->number_scenes != 1)
(void) FormatLocaleString(options,MagickPathExtent,"-dLastPage=%.20g",
(double) (read_info->scene+read_info->number_scenes));
else
(void) FormatLocaleString(options,MagickPathExtent,
"-dFirstPage=%.20g -dLastPage=%.20g",(double) read_info->scene+1,
(double) (read_info->scene+read_info->number_scenes));
read_info->number_scenes=0;
if (read_info->scenes != (char *) NULL)
*read_info->scenes='\0';
}
(void) CopyMagickString(filename,read_info->filename,MagickPathExtent);
(void) AcquireUniqueFilename(read_info->filename);
(void) FormatLocaleString(command,MagickPathExtent,
GetDelegateCommands(delegate_info),
read_info->antialias != MagickFalse ? 4 : 1,
read_info->antialias != MagickFalse ? 4 : 1,density,options,
read_info->filename,input_filename);
options=DestroyString(options);
density=DestroyString(density);
status=ExternalDelegateCommand(MagickFalse,read_info->verbose,command,
(char *) NULL,exception) != 0 ? MagickTrue : MagickFalse;
image=ReadImage(read_info,exception);
(void) RelinquishUniqueFileResource(read_info->filename);
(void) RelinquishUniqueFileResource(input_filename);
read_info=DestroyImageInfo(read_info);
if (image == (Image *) NULL)
ThrowReaderException(DelegateError,"PCLDelegateFailed");
if (LocaleCompare(image->magick,"BMP") == 0)
{
Image
*cmyk_image;
cmyk_image=ConsolidateCMYKImages(image,exception);
if (cmyk_image != (Image *) NULL)
{
image=DestroyImageList(image);
image=cmyk_image;
}
}
do
{
(void) CopyMagickString(image->filename,filename,MagickPathExtent);
image->page=page;
if (image_info->ping != MagickFalse)
{
image->magick_columns*=image->resolution.x/2.0;
image->magick_rows*=image->resolution.y/2.0;
image->columns*=image->resolution.x/2.0;
image->rows*=image->resolution.y/2.0;
}
next_image=SyncNextImageInList(image);
if (next_image != (Image *) NULL)
image=next_image;
} while (next_image != (Image *) NULL);
return(GetFirstImageInList(image));
}
| 1
|
139,244
|
void OverlayWindowViews::SetUpViews() {
gfx::Rect larger_window_bounds = GetBounds();
larger_window_bounds.Inset(-1, -1);
window_background_view_->SetSize(larger_window_bounds.size());
window_background_view_->SetPaintToLayer(ui::LAYER_SOLID_COLOR);
GetWindowBackgroundLayer()->SetColor(SK_ColorBLACK);
controls_scrim_view_->SetSize(GetBounds().size());
controls_scrim_view_->SetPaintToLayer(ui::LAYER_SOLID_COLOR);
GetControlsScrimLayer()->SetColor(gfx::kGoogleGrey900);
GetControlsScrimLayer()->SetOpacity(0.43f);
controls_parent_view_->SetPaintToLayer(ui::LAYER_TEXTURED);
controls_parent_view_->SetSize(GetBounds().size());
controls_parent_view_->layer()->SetFillsBoundsOpaquely(false);
controls_parent_view_->set_owned_by_client();
close_controls_view_->SetPaintToLayer(ui::LAYER_TEXTURED);
close_controls_view_->layer()->SetFillsBoundsOpaquely(false);
close_controls_view_->set_owned_by_client();
video_view_->SetPaintToLayer(ui::LAYER_TEXTURED);
play_pause_controls_view_->SetImageAlignment(
views::ImageButton::ALIGN_CENTER, views::ImageButton::ALIGN_MIDDLE);
play_pause_controls_view_->SetToggled(controller_->IsPlayerActive());
play_pause_controls_view_->set_owned_by_client();
#if defined(OS_CHROMEOS)
resize_handle_view_->SetPaintToLayer(ui::LAYER_TEXTURED);
resize_handle_view_->layer()->SetFillsBoundsOpaquely(false);
resize_handle_view_->set_owned_by_client();
#endif
play_pause_controls_view_->SetFocusForPlatform(); // Make button focusable.
const base::string16 play_pause_accessible_button_label(
l10n_util::GetStringUTF16(
IDS_PICTURE_IN_PICTURE_PLAY_PAUSE_CONTROL_ACCESSIBLE_TEXT));
play_pause_controls_view_->SetAccessibleName(
play_pause_accessible_button_label);
const base::string16 play_button_label(
l10n_util::GetStringUTF16(IDS_PICTURE_IN_PICTURE_PLAY_CONTROL_TEXT));
play_pause_controls_view_->SetTooltipText(play_button_label);
const base::string16 pause_button_label(
l10n_util::GetStringUTF16(IDS_PICTURE_IN_PICTURE_PAUSE_CONTROL_TEXT));
play_pause_controls_view_->SetToggledTooltipText(pause_button_label);
play_pause_controls_view_->SetInstallFocusRingOnFocus(true);
controls_parent_view_->AddChildView(play_pause_controls_view_.get());
GetContentsView()->AddChildView(controls_scrim_view_.get());
GetContentsView()->AddChildView(controls_parent_view_.get());
GetContentsView()->AddChildView(close_controls_view_.get());
#if defined(OS_CHROMEOS)
GetContentsView()->AddChildView(resize_handle_view_.get());
#endif
UpdatePlayPauseControlsSize();
UpdateControlsVisibility(false);
}
| 0
|
261,441
|
bool alloc_and_init_significant_coeff_ctxIdx_lookupTable()
{
int tableSize = 4*4*(2) + 8*8*(2*2*4) + 16*16*(2*4) + 32*32*(2*4);
uint8_t* p = (uint8_t*)malloc(tableSize);
if (p==NULL) {
return false;
}
memset(p,0xFF,tableSize); // just for debugging
// --- Set pointers to memory areas. Note that some parameters share the same memory. ---
// 4x4
for (int cIdx=0;cIdx<2;cIdx++) {
for (int scanIdx=0;scanIdx<2;scanIdx++)
for (int prevCsbf=0;prevCsbf<4;prevCsbf++)
ctxIdxLookup[0][cIdx][scanIdx][prevCsbf] = p;
p += 4*4;
}
// 8x8
for (int cIdx=0;cIdx<2;cIdx++)
for (int scanIdx=0;scanIdx<2;scanIdx++)
for (int prevCsbf=0;prevCsbf<4;prevCsbf++) {
ctxIdxLookup[1][cIdx][scanIdx][prevCsbf] = p;
p += 8*8;
}
// 16x16
for (int cIdx=0;cIdx<2;cIdx++)
for (int prevCsbf=0;prevCsbf<4;prevCsbf++) {
for (int scanIdx=0;scanIdx<2;scanIdx++) {
ctxIdxLookup[2][cIdx][scanIdx][prevCsbf] = p;
}
p += 16*16;
}
// 32x32
for (int cIdx=0;cIdx<2;cIdx++)
for (int prevCsbf=0;prevCsbf<4;prevCsbf++) {
for (int scanIdx=0;scanIdx<2;scanIdx++) {
ctxIdxLookup[3][cIdx][scanIdx][prevCsbf] = p;
}
p += 32*32;
}
// --- precompute ctxIdx tables ---
for (int log2w=2; log2w<=5 ; log2w++)
for (int cIdx=0;cIdx<2;cIdx++)
for (int scanIdx=0;scanIdx<2;scanIdx++)
for (int prevCsbf=0;prevCsbf<4;prevCsbf++)
{
for (int yC=0;yC<(1<<log2w);yC++)
for (int xC=0;xC<(1<<log2w);xC++)
{
int w = 1<<log2w;
int sbWidth = w>>2;
int sigCtx;
// if log2TrafoSize==2
if (sbWidth==1) {
sigCtx = ctxIdxMap[(yC<<2) + xC];
}
else if (xC+yC==0) {
sigCtx = 0;
}
else {
int xS = xC>>2;
int yS = yC>>2;
/*
int prevCsbf = 0;
if (xS < sbWidth-1) { prevCsbf += coded_sub_block_flag[xS+1 +yS*sbWidth]; }
if (yS < sbWidth-1) { prevCsbf += coded_sub_block_flag[xS+(1+yS)*sbWidth]<<1; }
*/
int xP = xC & 3;
int yP = yC & 3;
//logtrace(LogSlice,"posInSubset: %d,%d\n",xP,yP);
//logtrace(LogSlice,"prevCsbf: %d\n",prevCsbf);
switch (prevCsbf) {
case 0:
sigCtx = (xP+yP>=3) ? 0 : (xP+yP>0) ? 1 : 2;
break;
case 1:
sigCtx = (yP==0) ? 2 : (yP==1) ? 1 : 0;
break;
case 2:
sigCtx = (xP==0) ? 2 : (xP==1) ? 1 : 0;
break;
default:
sigCtx = 2;
break;
}
//logtrace(LogSlice,"a) sigCtx=%d\n",sigCtx);
if (cIdx==0) {
if (xS+yS > 0) sigCtx+=3;
//logtrace(LogSlice,"b) sigCtx=%d\n",sigCtx);
// if log2TrafoSize==3
if (sbWidth==2) { // 8x8 block
sigCtx += (scanIdx==0) ? 9 : 15;
} else {
sigCtx += 21;
}
//logtrace(LogSlice,"c) sigCtx=%d\n",sigCtx);
}
else {
// if log2TrafoSize==3
if (sbWidth==2) { // 8x8 block
sigCtx+=9;
}
else {
sigCtx+=12;
}
}
}
int ctxIdxInc;
if (cIdx==0) { ctxIdxInc=sigCtx; }
else { ctxIdxInc=27+sigCtx; }
if (ctxIdxLookup[log2w-2][cIdx][scanIdx][prevCsbf][xC+(yC<<log2w)] != 0xFF) {
assert(ctxIdxLookup[log2w-2][cIdx][scanIdx][prevCsbf][xC+(yC<<log2w)] == ctxIdxInc);
}
ctxIdxLookup[log2w-2][cIdx][scanIdx][prevCsbf][xC+(yC<<log2w)] = ctxIdxInc;
//NOTE: when using this option, we have to include all three scanIdx in the table
//ctxIdxLookup[log2w-2][cIdx][scanIdx][prevCsbf][s] = ctxIdxInc;
}
}
return true;
}
| 0
|
281,133
|
static void xfrm_init_pmtu(struct dst_entry *dst)
{
do {
struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
u32 pmtu, route_mtu_cached;
pmtu = dst_mtu(dst->child);
xdst->child_mtu_cached = pmtu;
pmtu = xfrm_state_mtu(dst->xfrm, pmtu);
route_mtu_cached = dst_mtu(xdst->route);
xdst->route_mtu_cached = route_mtu_cached;
if (pmtu > route_mtu_cached)
pmtu = route_mtu_cached;
dst_metric_set(dst, RTAX_MTU, pmtu);
} while ((dst = dst->next));
}
| 0
|
336,505
|
static void reds_set_one_channel_security(RedsState *reds, int id, uint32_t security)
{
ChannelSecurityOptions *security_options;
if ((security_options = reds_find_channel_security(reds, id))) {
security_options->options = security;
return;
}
security_options = g_new(ChannelSecurityOptions, 1);
security_options->channel_id = id;
security_options->options = security;
security_options->next = reds->config->channels_security;
reds->config->channels_security = security_options;
}
| 0
|
508,852
|
void lex_end(LEX *lex)
{
DBUG_ENTER("lex_end");
DBUG_PRINT("enter", ("lex: %p", lex));
lex_unlock_plugins(lex);
lex_end_nops(lex);
DBUG_VOID_RETURN;
}
| 0
|
259,083
|
bool should_run_in_pool(BundleReader* reader) const {
TensorShape restored_full_shape;
// Ignore status here; we'll catch the error later.
if (!reader->LookupTensorShape(tensor_name, &restored_full_shape).ok()) {
return false;
}
return restored_full_shape.num_elements() > kLargeShapeThreshold;
}
| 0
|
338,101
|
bool WasmBinaryBuilder::maybeVisitMemoryFill(Expression*& out, uint32_t code) {
if (code != BinaryConsts::MemoryFill) {
return false;
}
auto* curr = allocator.alloc<MemoryFill>();
curr->size = popNonVoidExpression();
curr->value = popNonVoidExpression();
curr->dest = popNonVoidExpression();
if (getInt8() != 0) {
throwError("Unexpected nonzero memory index");
}
curr->finalize();
out = curr;
return true;
}
| 0
|
225,496
|
Status MutableGraphView::AddControllingFanin(absl::string_view node_name,
const TensorId& fanin) {
auto error_status = [node_name, fanin](absl::string_view msg) {
string params = absl::Substitute("node_name='$0', fanin='$1'", node_name,
fanin.ToString());
return MutationError("AddControllingFanin", params, msg);
};
TF_RETURN_IF_ERROR(CheckFaninIsValid(fanin, error_status));
TF_RETURN_IF_ERROR(CheckAddingFaninToSelf(node_name, fanin, error_status));
NodeDef* node = GetNode(node_name);
TF_RETURN_IF_ERROR(CheckNodeExists(node_name, node, error_status));
NodeDef* fanin_node = GetNode(fanin.node());
TF_RETURN_IF_ERROR(CheckNodeExists(fanin.node(), fanin_node, error_status));
OutputPort fanin_port(fanin_node, fanin.index());
string error_msg = "";
NodeDef* control_node = GetControllingFaninToAdd(
node_name, {fanin_node, fanin.index()}, &error_msg);
if (!error_msg.empty()) {
return error_status(error_msg);
}
if (control_node == nullptr) {
control_node = GetOrCreateIdentityConsumingSwitch(fanin_port);
}
AddFaninInternal(node, {control_node, Graph::kControlSlot});
return Status::OK();
}
| 0
|
512,267
|
int Arg_comparator::compare_e_str_json()
{
return compare_e_json_str_basic(*b, *a);
}
| 0
|
409,441
|
req_codes_from_term(void)
{
xt_index_out = 0;
xt_index_in = 0;
req_more_codes_from_term();
}
| 0
|
255,939
|
ShapeRefiner::ShapeRefiner(int graph_def_version,
const OpRegistryInterface* ops)
: graph_def_version_(graph_def_version),
ops_registry_(ops),
graph_runner_(Env::Default()) {}
| 0
|
317,329
|
static void smack_file_set_fowner(struct file *file)
{
struct smack_known **blob = smack_file(file);
*blob = smk_of_current();
}
| 0
|
430,437
|
static void __ovs_nla_free_flow_actions(struct rcu_head *head)
{
ovs_nla_free_flow_actions(container_of(head, struct sw_flow_actions, rcu));
}
| 0
|
226,120
|
void extr_box_del(GF_Box *s)
{
GF_ExtraDataBox *ptr = (GF_ExtraDataBox *)s;
if (ptr == NULL) return;
if (ptr->feci) gf_isom_box_del((GF_Box*)ptr->feci);
if (ptr->data) gf_free(ptr->data);
gf_free(ptr);
| 0
|
216,945
|
bool Item_equal::create_pushable_equalities(THD *thd,
List<Item> *equalities,
Pushdown_checker checker,
uchar *arg,
bool clone_const)
{
Item *item;
Item *left_item= NULL;
Item *right_item = get_const();
Item_equal_fields_iterator it(*this);
while ((item=it++))
{
left_item= item;
if (checker && !((item->*checker) (arg)))
continue;
break;
}
if (!left_item)
return false;
if (right_item)
{
Item_func_eq *eq= 0;
Item *left_item_clone= left_item->build_clone(thd);
Item *right_item_clone= !clone_const ?
right_item : right_item->build_clone(thd);
if (!left_item_clone || !right_item_clone)
return true;
eq= new (thd->mem_root) Item_func_eq(thd,
left_item_clone,
right_item_clone);
if (!eq || equalities->push_back(eq, thd->mem_root))
return true;
if (!clone_const)
right_item->set_extraction_flag(IMMUTABLE_FL);
}
while ((item=it++))
{
if (checker && !((item->*checker) (arg)))
continue;
Item_func_eq *eq= 0;
Item *left_item_clone= left_item->build_clone(thd);
Item *right_item_clone= item->build_clone(thd);
if (!(left_item_clone && right_item_clone))
return true;
left_item_clone->set_item_equal(NULL);
right_item_clone->set_item_equal(NULL);
eq= new (thd->mem_root) Item_func_eq(thd,
right_item_clone,
left_item_clone);
if (!eq || equalities->push_back(eq, thd->mem_root))
return true;
}
return false;
}
| 1
|
513,101
|
void Item_func_not::print(String *str, enum_query_type query_type)
{
str->append('!');
args[0]->print_parenthesised(str, query_type, precedence());
}
| 0
|
387,644
|
static inline void add_hash_entries(struct snd_card *card,
struct snd_kcontrol *kcontrol)
{
}
| 0
|
436,152
|
void __io_uring_cancel(struct files_struct *files)
{
io_uring_cancel_generic(!files, NULL);
| 0
|
139,210
|
gfx::Rect OverlayWindowViews::CalculateAndUpdateWindowBounds() {
gfx::Rect work_area =
display::Screen::GetScreen()
->GetDisplayNearestWindow(
controller_->GetInitiatorWebContents()->GetTopLevelNativeWindow())
.work_area();
max_size_ = gfx::Size(work_area.width() / 2, work_area.height() / 2);
min_size_ = kMinWindowSize;
gfx::Size window_size = window_bounds_.size();
if (!has_been_shown_) {
window_size = gfx::Size(work_area.width() / 5, work_area.height() / 5);
window_size.set_width(std::min(
max_size_.width(), std::max(min_size_.width(), window_size.width())));
window_size.set_height(
std::min(max_size_.height(),
std::max(min_size_.height(), window_size.height())));
}
if (!window_size.IsEmpty() && !natural_size_.IsEmpty()) {
float aspect_ratio = (float)natural_size_.width() / natural_size_.height();
gfx::Rect window_rect(GetBounds().origin(), window_size);
views::WindowResizeUtils::SizeRectToAspectRatio(
views::HitTest::kBottomRight, aspect_ratio, min_size_, max_size_,
&window_rect);
window_size.SetSize(window_rect.width(), window_rect.height());
UpdateLayerBoundsWithLetterboxing(window_size);
}
gfx::Point origin = window_bounds_.origin();
int window_diff_width = work_area.right() - window_size.width();
int window_diff_height = work_area.bottom() - window_size.height();
int buffer = (window_diff_width + window_diff_height) / 2 * 0.02;
gfx::Point default_origin =
gfx::Point(window_diff_width - buffer, window_diff_height - buffer);
if (has_been_shown_) {
origin.SetToMin(default_origin);
} else {
origin = default_origin;
}
window_bounds_ = gfx::Rect(origin, window_size);
return window_bounds_;
}
| 0
|
224,173
|
std::size_t get_tuple_bytes(const OptionalTuple& tuple) {
return std::accumulate(
tuple.begin(), tuple.end(), static_cast<std::size_t>(0),
[](const std::size_t& lhs, const OptionalTensor& rhs) {
return (lhs + rhs.has_value()) ? rhs.value().TotalBytes() : 0;
});
}
| 0
|
247,340
|
pgpDig pgpFreeDig(pgpDig dig)
{
if (dig != NULL) {
/* DUmp the signature/pubkey data. */
pgpCleanDig(dig);
dig = _free(dig);
}
return dig;
}
| 0
|
253,560
|
smb2_negotiate_rsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
{
struct TCP_Server_Info *server = tcon->ses->server;
unsigned int rsize;
/* start with specified rsize, or default */
rsize = ctx->rsize ? ctx->rsize : CIFS_DEFAULT_IOSIZE;
rsize = min_t(unsigned int, rsize, server->max_read);
if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
rsize = min_t(unsigned int, rsize, SMB2_MAX_BUFFER_SIZE);
return rsize;
}
| 0
|
463,066
|
static void sungem_eval_irq(SunGEMState *s)
{
uint32_t stat, mask;
mask = s->gregs[GREG_IMASK >> 2];
stat = s->gregs[GREG_STAT >> 2] & ~GREG_STAT_TXNR;
if (stat & ~mask) {
pci_set_irq(PCI_DEVICE(s), 1);
} else {
pci_set_irq(PCI_DEVICE(s), 0);
}
}
| 0
|
462,222
|
PJ_DEF(pj_status_t) pj_stun_msg_decode(pj_pool_t *pool,
const pj_uint8_t *pdu,
pj_size_t pdu_len,
unsigned options,
pj_stun_msg **p_msg,
pj_size_t *p_parsed_len,
pj_stun_msg **p_response)
{
pj_stun_msg *msg;
const pj_uint8_t *start_pdu = pdu;
pj_bool_t has_msg_int = PJ_FALSE;
pj_bool_t has_fingerprint = PJ_FALSE;
pj_status_t status;
PJ_UNUSED_ARG(options);
PJ_ASSERT_RETURN(pool && pdu && pdu_len && p_msg, PJ_EINVAL);
PJ_ASSERT_RETURN(sizeof(pj_stun_msg_hdr) == 20, PJ_EBUG);
if (p_parsed_len)
*p_parsed_len = 0;
if (p_response)
*p_response = NULL;
/* Check if this is a STUN message, if necessary */
if (options & PJ_STUN_CHECK_PACKET) {
status = pj_stun_msg_check(pdu, pdu_len, options);
if (status != PJ_SUCCESS)
return status;
}
/* Create the message, copy the header, and convert to host byte order */
msg = PJ_POOL_ZALLOC_T(pool, pj_stun_msg);
pj_memcpy(&msg->hdr, pdu, sizeof(pj_stun_msg_hdr));
msg->hdr.type = pj_ntohs(msg->hdr.type);
msg->hdr.length = pj_ntohs(msg->hdr.length);
msg->hdr.magic = pj_ntohl(msg->hdr.magic);
pdu += sizeof(pj_stun_msg_hdr);
/* pdu_len -= sizeof(pj_stun_msg_hdr); */
pdu_len = msg->hdr.length;
/* No need to create response if this is not a request */
if (!PJ_STUN_IS_REQUEST(msg->hdr.type))
p_response = NULL;
/* Parse attributes */
while (pdu_len >= 4) {
unsigned attr_type, attr_val_len;
const struct attr_desc *adesc;
/* Get attribute type and length. If length is not aligned
* to 4 bytes boundary, add padding.
*/
attr_type = GETVAL16H(pdu, 0);
attr_val_len = GETVAL16H(pdu, 2);
attr_val_len = (attr_val_len + 3) & (~3);
/* Check length */
if (pdu_len < attr_val_len) {
pj_str_t err_msg;
char err_msg_buf[80];
err_msg.ptr = err_msg_buf;
err_msg.slen = pj_ansi_snprintf(err_msg_buf, sizeof(err_msg_buf),
"Attribute %s has invalid length",
pj_stun_get_attr_name(attr_type));
PJ_LOG(4,(THIS_FILE, "Error decoding message: %.*s",
(int)err_msg.slen, err_msg.ptr));
if (p_response) {
pj_stun_msg_create_response(pool, msg,
PJ_STUN_SC_BAD_REQUEST,
&err_msg, p_response);
}
return PJNATH_ESTUNINATTRLEN;
}
/* Get the attribute descriptor */
adesc = find_attr_desc(attr_type);
if (adesc == NULL) {
/* Unrecognized attribute */
pj_stun_binary_attr *attr = NULL;
PJ_LOG(5,(THIS_FILE, "Unrecognized attribute type 0x%x",
attr_type));
/* Is this a fatal condition? */
if (attr_type <= 0x7FFF) {
/* This is a mandatory attribute, we must return error
* if we don't understand the attribute.
*/
if (p_response) {
unsigned err_code = PJ_STUN_SC_UNKNOWN_ATTRIBUTE;
status = pj_stun_msg_create_response(pool, msg,
err_code, NULL,
p_response);
if (status==PJ_SUCCESS) {
pj_uint16_t d = (pj_uint16_t)attr_type;
pj_stun_msg_add_unknown_attr(pool, *p_response, 1, &d);
}
}
return PJ_STATUS_FROM_STUN_CODE(PJ_STUN_SC_UNKNOWN_ATTRIBUTE);
}
/* Make sure we have rooms for the new attribute */
if (msg->attr_count >= PJ_STUN_MAX_ATTR) {
if (p_response) {
pj_stun_msg_create_response(pool, msg,
PJ_STUN_SC_SERVER_ERROR,
NULL, p_response);
}
return PJNATH_ESTUNTOOMANYATTR;
}
/* Create binary attribute to represent this */
status = pj_stun_binary_attr_create(pool, attr_type, pdu+4,
GETVAL16H(pdu, 2), &attr);
if (status != PJ_SUCCESS) {
if (p_response) {
pj_stun_msg_create_response(pool, msg,
PJ_STUN_SC_SERVER_ERROR,
NULL, p_response);
}
PJ_LOG(4,(THIS_FILE,
"Error parsing unknown STUN attribute type %d",
attr_type));
return status;
}
/* Add the attribute */
msg->attr[msg->attr_count++] = &attr->hdr;
} else {
void *attr;
char err_msg1[PJ_ERR_MSG_SIZE],
err_msg2[PJ_ERR_MSG_SIZE];
/* Parse the attribute */
status = (adesc->decode_attr)(pool, pdu, &msg->hdr, &attr);
if (status != PJ_SUCCESS) {
pj_strerror(status, err_msg1, sizeof(err_msg1));
if (p_response) {
pj_str_t e;
e.ptr = err_msg2;
e.slen= pj_ansi_snprintf(err_msg2, sizeof(err_msg2),
"%s in %s",
err_msg1,
pj_stun_get_attr_name(attr_type));
if (e.slen < 1 || e.slen >= (int)sizeof(err_msg2))
e.slen = sizeof(err_msg2) - 1;
pj_stun_msg_create_response(pool, msg,
PJ_STUN_SC_BAD_REQUEST,
&e, p_response);
}
PJ_LOG(4,(THIS_FILE,
"Error parsing STUN attribute %s: %s",
pj_stun_get_attr_name(attr_type),
err_msg1));
return status;
}
if (attr_type == PJ_STUN_ATTR_MESSAGE_INTEGRITY &&
!has_fingerprint)
{
if (has_msg_int) {
/* Already has MESSAGE-INTEGRITY */
if (p_response) {
pj_stun_msg_create_response(pool, msg,
PJ_STUN_SC_BAD_REQUEST,
NULL, p_response);
}
return PJNATH_ESTUNDUPATTR;
}
has_msg_int = PJ_TRUE;
} else if (attr_type == PJ_STUN_ATTR_FINGERPRINT) {
if (has_fingerprint) {
/* Already has FINGERPRINT */
if (p_response) {
pj_stun_msg_create_response(pool, msg,
PJ_STUN_SC_BAD_REQUEST,
NULL, p_response);
}
return PJNATH_ESTUNDUPATTR;
}
has_fingerprint = PJ_TRUE;
} else {
if (has_fingerprint) {
/* Another attribute is found which is not FINGERPRINT
* after FINGERPRINT. Note that non-FINGERPRINT is
* allowed to appear after M-I
*/
if (p_response) {
pj_stun_msg_create_response(pool, msg,
PJ_STUN_SC_BAD_REQUEST,
NULL, p_response);
}
return PJNATH_ESTUNFINGERPOS;
}
}
/* Make sure we have rooms for the new attribute */
if (msg->attr_count >= PJ_STUN_MAX_ATTR) {
if (p_response) {
pj_stun_msg_create_response(pool, msg,
PJ_STUN_SC_SERVER_ERROR,
NULL, p_response);
}
return PJNATH_ESTUNTOOMANYATTR;
}
/* Add the attribute */
msg->attr[msg->attr_count++] = (pj_stun_attr_hdr*)attr;
}
/* Next attribute */
if (attr_val_len + 4 >= pdu_len) {
pdu += pdu_len;
pdu_len = 0;
} else {
pdu += (attr_val_len + 4);
pdu_len -= (attr_val_len + 4);
}
}
if (pdu_len > 0) {
/* Stray trailing bytes */
PJ_LOG(4,(THIS_FILE,
"Error decoding STUN message: unparsed trailing %d bytes",
pdu_len));
return PJNATH_EINSTUNMSGLEN;
}
*p_msg = msg;
if (p_parsed_len)
*p_parsed_len = (pdu - start_pdu);
return PJ_SUCCESS;
}
| 0
|
221,414
|
void svm_free_nested(struct vcpu_svm *svm)
{
if (!svm->nested.initialized)
return;
svm_vcpu_free_msrpm(svm->nested.msrpm);
svm->nested.msrpm = NULL;
__free_page(virt_to_page(svm->nested.vmcb02.ptr));
svm->nested.vmcb02.ptr = NULL;
/*
* When last_vmcb12_gpa matches the current vmcb12 gpa,
* some vmcb12 fields are not loaded if they are marked clean
* in the vmcb12, since in this case they are up to date already.
*
* When the vmcb02 is freed, this optimization becomes invalid.
*/
svm->nested.last_vmcb12_gpa = INVALID_GPA;
svm->nested.initialized = false;
}
| 0
|
220,447
|
ary_dup(mrb_state *mrb, struct RArray *a)
{
return ary_new_from_values(mrb, ARY_LEN(a), ARY_PTR(a));
}
| 0
|
232,941
|
char *Curl_all_content_encodings(void)
{
return strdup(CONTENT_ENCODING_DEFAULT); /* Satisfy caller. */
}
| 0
|
233,935
|
void DocumentSourceUnionWith::recordPlanSummaryStats(const Pipeline& pipeline) {
for (auto&& source : pipeline.getSources()) {
if (auto specificStats = source->getSpecificStats()) {
specificStats->accumulate(_stats.planSummaryStats);
}
}
}
| 0
|
293,762
|
static void sections_from_mach0(RList *ret, struct MACH0_(obj_t) *mach0, RBinFile *bf, ut64 paddr, char *prefix, RKernelCacheObj *obj) {
struct section_t *sections = NULL;
if (!(sections = MACH0_(get_sections) (mach0))) {
return;
}
int i;
for (i = 0; !sections[i].last; i++) {
RBinSection *ptr;
if (!(ptr = R_NEW0 (RBinSection))) {
break;
}
if (prefix) {
ptr->name = r_str_newf ("%s.%s", prefix, (char*)sections[i].name);
} else {
ptr->name = r_str_newf ("%s", (char*)sections[i].name);
}
if (strstr (ptr->name, "la_symbol_ptr")) {
int len = sections[i].size / 8;
ptr->format = r_str_newf ("Cd %d[%d]", 8, len);
}
handle_data_sections (ptr);
ptr->size = sections[i].size;
ptr->vsize = sections[i].vsize;
ptr->paddr = sections[i].offset + bf->o->boffset + paddr;
ptr->vaddr = K_PPTR (sections[i].addr);
if (!ptr->vaddr) {
ptr->vaddr = ptr->paddr;
}
ptr->perm = sections[i].perm;
if (!ptr->perm && strstr (sections[i].name, "__TEXT_EXEC.__text")) {
ptr->perm = 1 | 4;
}
r_list_append (ret, ptr);
}
free (sections);
}
| 0
|
240,288
|
get_expr_line_src(void)
{
if (expr_line == NULL)
return NULL;
return vim_strsave(expr_line);
}
| 0
|
436,058
|
static int io_write(struct io_kiocb *req, unsigned int issue_flags)
{
struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
struct kiocb *kiocb = &req->rw.kiocb;
struct iov_iter __iter, *iter = &__iter;
struct io_async_rw *rw = req->async_data;
ssize_t ret, ret2, io_size;
bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
if (rw) {
iter = &rw->iter;
iovec = NULL;
} else {
ret = io_import_iovec(WRITE, req, &iovec, iter, !force_nonblock);
if (ret < 0)
return ret;
}
io_size = iov_iter_count(iter);
req->result = io_size;
/* Ensure we clear previously set non-block flag */
if (!force_nonblock)
kiocb->ki_flags &= ~IOCB_NOWAIT;
else
kiocb->ki_flags |= IOCB_NOWAIT;
/* If the file doesn't support async, just async punt */
if (force_nonblock && !io_file_supports_async(req, WRITE))
goto copy_iov;
/* file path doesn't support NOWAIT for non-direct_IO */
if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT) &&
(req->flags & REQ_F_ISREG))
goto copy_iov;
ret = rw_verify_area(WRITE, req->file, io_kiocb_ppos(kiocb), io_size);
if (unlikely(ret))
goto out_free;
/*
* Open-code file_start_write here to grab freeze protection,
* which will be released by another thread in
* io_complete_rw(). Fool lockdep by telling it the lock got
* released so that it doesn't complain about the held lock when
* we return to userspace.
*/
if (req->flags & REQ_F_ISREG) {
sb_start_write(file_inode(req->file)->i_sb);
__sb_writers_release(file_inode(req->file)->i_sb,
SB_FREEZE_WRITE);
}
kiocb->ki_flags |= IOCB_WRITE;
if (req->file->f_op->write_iter)
ret2 = call_write_iter(req->file, kiocb, iter);
else if (req->file->f_op->write)
ret2 = loop_rw_iter(WRITE, req, iter);
else
ret2 = -EINVAL;
if (req->flags & REQ_F_REISSUE) {
req->flags &= ~REQ_F_REISSUE;
ret2 = -EAGAIN;
}
/*
* Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
* retry them without IOCB_NOWAIT.
*/
if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
ret2 = -EAGAIN;
/* no retry on NONBLOCK nor RWF_NOWAIT */
if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT))
goto done;
if (!force_nonblock || ret2 != -EAGAIN) {
/* IOPOLL retry should happen for io-wq threads */
if ((req->ctx->flags & IORING_SETUP_IOPOLL) && ret2 == -EAGAIN)
goto copy_iov;
done:
kiocb_done(kiocb, ret2, issue_flags);
} else {
copy_iov:
/* some cases will consume bytes even on error returns */
iov_iter_reexpand(iter, iter->count + iter->truncated);
iov_iter_revert(iter, io_size - iov_iter_count(iter));
ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false);
return ret ?: -EAGAIN;
}
out_free:
/* it's reportedly faster than delegating the null check to kfree() */
if (iovec)
kfree(iovec);
return ret;
}
| 0
|
275,514
|
njs_vm_retval_set(njs_vm_t *vm, const njs_value_t *value)
{
vm->retval = *value;
}
| 0
|
246,457
|
static bool consume_encoded_name_new(RBuffer *b, ut64 bound, ut32 *len_out, char **str_out) {
ut32 len;
char *orig = NULL;
if (!consume_str_new (b, bound, &len, &orig)) {
return false;
}
// room for even every character getting encoded
size_t maxsize = (len * 4) + 2;
char *sout = malloc (maxsize);
if (!sout) {
free (orig);
return false;
}
size_t i, oi = 0;
for (i = 0; i < len && oi + 6 < maxsize; i++) {
if (WASM_IS_OK (orig, i, len)) {
sout[oi++] = orig[i];
} else {
int res = snprintf (sout + oi, maxsize - oi, "_%02x_", orig[i]);
oi += res;
}
}
if (oi >= maxsize) {
sout[maxsize - 1] = '\0';
} else {
sout[oi++] = '\0';
}
free (orig);
char *tmp = realloc (sout, oi);
if (!tmp) {
free (sout);
free (tmp);
return false;
}
*str_out = tmp;
if (len_out) {
*len_out = len;
}
return true;
}
| 0
|
222,513
|
uint64 FunctionDefHash(const FunctionDef& fdef) {
// signature
uint64 h = OpDefHash(fdef.signature());
// attrs
std::map<string, AttrValue> attrs = GetSetAttrs(fdef);
for (const auto& p : attrs) {
h = Hash64(p.first.data(), p.first.size(), h);
h = Hash64Combine(AttrValueHash(p.second), h);
}
// node defs
h = Hash64Combine(RepeatedNodeDefHash(fdef.node_def()), h);
// output names
std::map<string, string> ret(fdef.ret().begin(), fdef.ret().end());
for (const auto& p : ret) {
h = Hash64(p.first.data(), p.first.size(), h);
h = Hash64(p.second.data(), p.second.size(), h);
}
// control output names
std::map<string, string> control_ret(fdef.control_ret().begin(),
fdef.control_ret().end());
for (const auto& p : control_ret) {
h = Hash64(p.first.data(), p.first.size(), h);
h = Hash64(p.second.data(), p.second.size(), h);
}
return h;
}
| 0
|
224,157
|
Status pop(const KeyType* key, const Tensor* indices, Tuple* tuple) {
tensorflow::mutex_lock lock(mu_);
// Sanity check the indices
TF_RETURN_IF_ERROR(check_index_ordering(*indices));
typename MapType::iterator it;
// Wait until the element with the requested key is present
while ((it = map_.find(*key)) == map_.end()) {
not_empty_.wait(lock);
}
TF_RETURN_IF_ERROR(
copy_or_move_tensors(&it->second, *key, *indices, tuple));
// Remove entry if all the values have been consumed
if (!std::any_of(
it->second.begin(), it->second.end(),
[](const OptionalTensor& tensor) { return tensor.has_value(); })) {
map_.erase(it);
}
// Update bytes in the Staging Area
current_bytes_ -= get_tuple_bytes(*tuple);
notify_inserters_if_bounded();
return Status::OK();
}
| 0
|
436,150
|
static int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_ring_ctx *ctx = req->ctx;
struct io_uring_rsrc_update2 up;
int ret;
if (issue_flags & IO_URING_F_NONBLOCK)
return -EAGAIN;
up.offset = req->rsrc_update.offset;
up.data = req->rsrc_update.arg;
up.nr = 0;
up.tags = 0;
up.resv = 0;
mutex_lock(&ctx->uring_lock);
ret = __io_register_rsrc_update(ctx, IORING_RSRC_FILE,
&up, req->rsrc_update.nr_args);
mutex_unlock(&ctx->uring_lock);
if (ret < 0)
req_set_fail(req);
__io_req_complete(req, issue_flags, ret, 0);
return 0;
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.