idx
int64 | func
string | target
int64 |
|---|---|---|
106,336
|
TRIO_PUBLIC_STRING int trio_string_equal_case TRIO_ARGS2((self, other), trio_string_t* self,
trio_string_t* other)
{
assert(self);
assert(other);
return trio_equal_case(self->content, other->content);
}
| 0
|
448,696
|
okiibm_print_page(gx_device_printer *pdev, gp_file *prn_stream)
{
char init_string[16], end_string[16];
int init_length, end_length;
init_length = sizeof(okiibm_init_string);
memcpy(init_string, okiibm_init_string, init_length);
end_length = sizeof(okiibm_end_string);
memcpy(end_string, okiibm_end_string, end_length);
if ( pdev->y_pixels_per_inch > 72 &&
pdev->x_pixels_per_inch > 60 )
{
/* Unidirectional printing for the higher resolutions. */
memcpy( init_string + init_length, okiibm_one_direct,
sizeof(okiibm_one_direct) );
init_length += sizeof(okiibm_one_direct);
memcpy( end_string + end_length, okiibm_two_direct,
sizeof(okiibm_two_direct) );
end_length += sizeof(okiibm_two_direct);
}
return okiibm_print_page1( pdev, prn_stream,
pdev->y_pixels_per_inch > 72 ? 1 : 0,
init_string, init_length,
end_string, end_length );
}
| 0
|
225,676
|
void BluetoothDeviceChromeOS::RequestPinCode(
| 0
|
118,060
|
void posix_acl_fix_xattr_from_user(void *value, size_t size)
{
struct user_namespace *user_ns = current_user_ns();
if (user_ns == &init_user_ns)
return;
posix_acl_fix_xattr_userns(&init_user_ns, user_ns, value, size);
}
| 0
|
194,140
|
bool ExtensionApiTest::RunExtensionSubtest(const std::string& extension_name,
const std::string& page_url,
int flags) {
DCHECK(!page_url.empty()) << "Argument page_url is required.";
if (ExtensionSubtestsAreSkipped())
return true;
return RunExtensionTestImpl(extension_name, page_url, NULL, flags);
}
| 0
|
371,885
|
fr_window_open_extracted_files (OpenFilesData *odata)
{
GList *file_list = odata->cdata->file_list;
GFile *first_file;
const char *first_mime_type;
GAppInfo *app;
GList *files_to_open = NULL;
GdkAppLaunchContext *context;
gboolean result;
GError *error = NULL;
g_return_val_if_fail (file_list != NULL, FALSE);
first_file = G_FILE (file_list->data);
if (first_file == NULL)
return FALSE;
if (! odata->window->archive->read_only)
monitor_extracted_files (odata);
if (odata->ask_application) {
dlg_open_with (odata->window, file_list);
return FALSE;
}
first_mime_type = _g_file_get_mime_type (first_file, FALSE);
app = g_app_info_get_default_for_type (first_mime_type, FALSE);
if (app == NULL) {
dlg_open_with (odata->window, file_list);
return FALSE;
}
files_to_open = g_list_append (files_to_open, g_file_get_uri (first_file));
if (g_app_info_supports_files (app)) {
GList *scan;
for (scan = file_list->next; scan; scan = scan->next) {
GFile *file = G_FILE (scan->data);
const char *mime_type;
mime_type = _g_file_get_mime_type (file, FALSE);
if (mime_type == NULL)
continue;
if (strcmp (mime_type, first_mime_type) == 0) {
files_to_open = g_list_append (files_to_open, g_file_get_uri (file));
}
else {
GAppInfo *app2;
app2 = g_app_info_get_default_for_type (mime_type, FALSE);
if (g_app_info_equal (app, app2))
files_to_open = g_list_append (files_to_open, g_file_get_uri (file));
g_object_unref (app2);
}
}
}
context = gdk_display_get_app_launch_context (gtk_widget_get_display (GTK_WIDGET (odata->window)));
gdk_app_launch_context_set_screen (context, gtk_widget_get_screen (GTK_WIDGET (odata->window)));
gdk_app_launch_context_set_timestamp (context, 0);
result = g_app_info_launch_uris (app, files_to_open, G_APP_LAUNCH_CONTEXT (context), &error);
if (! result) {
_gtk_error_dialog_run (GTK_WINDOW (odata->window),
_("Could not perform the operation"),
"%s",
error->message);
g_clear_error (&error);
}
g_object_unref (context);
g_object_unref (app);
_g_string_list_free (files_to_open);
return result;
}
| 0
|
512,631
|
void ssh_scp_free(ssh_scp scp)
{
if (scp == NULL) {
return;
}
if (scp->state != SSH_SCP_NEW) {
ssh_scp_close(scp);
}
if (scp->channel) {
ssh_channel_free(scp->channel);
}
SAFE_FREE(scp->location);
SAFE_FREE(scp->request_name);
SAFE_FREE(scp->warning);
SAFE_FREE(scp);
}
| 0
|
109,276
|
static int ext2_remount (struct super_block * sb, int * flags, char * data)
{
struct ext2_sb_info * sbi = EXT2_SB(sb);
struct ext2_super_block * es;
struct ext2_mount_options old_opts;
unsigned long old_sb_flags;
int err;
sync_filesystem(sb);
spin_lock(&sbi->s_lock);
/* Store the old options */
old_sb_flags = sb->s_flags;
old_opts.s_mount_opt = sbi->s_mount_opt;
old_opts.s_resuid = sbi->s_resuid;
old_opts.s_resgid = sbi->s_resgid;
/*
* Allow the "check" option to be passed as a remount option.
*/
if (!parse_options(data, sb)) {
err = -EINVAL;
goto restore_opts;
}
sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
((sbi->s_mount_opt & EXT2_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0);
es = sbi->s_es;
if ((sbi->s_mount_opt ^ old_opts.s_mount_opt) & EXT2_MOUNT_DAX) {
ext2_msg(sb, KERN_WARNING, "warning: refusing change of "
"dax flag with busy inodes while remounting");
sbi->s_mount_opt ^= EXT2_MOUNT_DAX;
}
if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY)) {
spin_unlock(&sbi->s_lock);
return 0;
}
if (*flags & MS_RDONLY) {
if (le16_to_cpu(es->s_state) & EXT2_VALID_FS ||
!(sbi->s_mount_state & EXT2_VALID_FS)) {
spin_unlock(&sbi->s_lock);
return 0;
}
/*
* OK, we are remounting a valid rw partition rdonly, so set
* the rdonly flag and then mark the partition as valid again.
*/
es->s_state = cpu_to_le16(sbi->s_mount_state);
es->s_mtime = cpu_to_le32(get_seconds());
spin_unlock(&sbi->s_lock);
err = dquot_suspend(sb, -1);
if (err < 0) {
spin_lock(&sbi->s_lock);
goto restore_opts;
}
ext2_sync_super(sb, es, 1);
} else {
__le32 ret = EXT2_HAS_RO_COMPAT_FEATURE(sb,
~EXT2_FEATURE_RO_COMPAT_SUPP);
if (ret) {
ext2_msg(sb, KERN_WARNING,
"warning: couldn't remount RDWR because of "
"unsupported optional features (%x).",
le32_to_cpu(ret));
err = -EROFS;
goto restore_opts;
}
/*
* Mounting a RDONLY partition read-write, so reread and
* store the current valid flag. (It may have been changed
* by e2fsck since we originally mounted the partition.)
*/
sbi->s_mount_state = le16_to_cpu(es->s_state);
if (!ext2_setup_super (sb, es, 0))
sb->s_flags &= ~MS_RDONLY;
spin_unlock(&sbi->s_lock);
ext2_write_super(sb);
dquot_resume(sb, -1);
}
return 0;
restore_opts:
sbi->s_mount_opt = old_opts.s_mount_opt;
sbi->s_resuid = old_opts.s_resuid;
sbi->s_resgid = old_opts.s_resgid;
sb->s_flags = old_sb_flags;
spin_unlock(&sbi->s_lock);
return err;
}
| 0
|
441,369
|
int RGWHandler_REST_S3::init(RGWRados *store, struct req_state *s,
rgw::io::BasicClient *cio)
{
int ret;
s->dialect = "s3";
ret = rgw_validate_tenant_name(s->bucket_tenant);
if (ret)
return ret;
bool relaxed_names = s->cct->_conf->rgw_relaxed_s3_bucket_names;
if (!s->bucket_name.empty()) {
ret = valid_s3_bucket_name(s->bucket_name, relaxed_names);
if (ret)
return ret;
ret = validate_object_name(s->object.name);
if (ret)
return ret;
}
const char *cacl = s->info.env->get("HTTP_X_AMZ_ACL");
if (cacl)
s->canned_acl = cacl;
s->has_acl_header = s->info.env->exists_prefix("HTTP_X_AMZ_GRANT");
const char *copy_source = s->info.env->get("HTTP_X_AMZ_COPY_SOURCE");
if (copy_source &&
(! s->info.env->get("HTTP_X_AMZ_COPY_SOURCE_RANGE")) &&
(! s->info.args.exists("uploadId"))) {
ret = RGWCopyObj::parse_copy_location(copy_source,
s->init_state.src_bucket,
s->src_object);
if (!ret) {
ldout(s->cct, 0) << "failed to parse copy location" << dendl;
return -EINVAL; // XXX why not -ERR_INVALID_BUCKET_NAME or -ERR_BAD_URL?
}
}
const char *sc = s->info.env->get("HTTP_X_AMZ_STORAGE_CLASS");
if (sc) {
s->info.storage_class = sc;
}
return RGWHandler_REST::init(store, s, cio);
}
| 0
|
502,053
|
static NTSTATUS pdb_samba_dsdb_add_groupmem(struct pdb_methods *m,
TALLOC_CTX *mem_ctx,
uint32_t group_rid, uint32_t member_rid)
{
return pdb_samba_dsdb_mod_groupmem(m, mem_ctx, group_rid, member_rid,
LDB_FLAG_MOD_ADD);
}
| 0
|
273,765
|
confirm_multiple_windows (GtkWindow *parent_window,
int count,
gboolean use_tabs)
{
GtkDialog *dialog;
char *prompt;
char *detail;
int response;
if (count <= SILENT_WINDOW_OPEN_LIMIT)
{
return TRUE;
}
prompt = _("Are you sure you want to open all files?");
if (use_tabs)
{
detail = g_strdup_printf (ngettext ("This will open %d separate tab.",
"This will open %d separate tabs.", count), count);
}
else
{
detail = g_strdup_printf (ngettext ("This will open %d separate window.",
"This will open %d separate windows.", count), count);
}
dialog = eel_show_yes_no_dialog (prompt, detail,
_("_OK"), _("_Cancel"),
parent_window);
g_free (detail);
response = gtk_dialog_run (dialog);
gtk_widget_destroy (GTK_WIDGET (dialog));
return response == GTK_RESPONSE_YES;
}
| 0
|
435,804
|
int should_update_submodules(void)
{
return config_update_recurse_submodules == RECURSE_SUBMODULES_ON;
}
| 0
|
178,773
|
bool RenderWidgetHostViewAura::ShouldSkipFrame(gfx::Size size_in_dip) const {
if (can_lock_compositor_ == NO_PENDING_RENDERER_FRAME ||
can_lock_compositor_ == NO_PENDING_COMMIT ||
!resize_lock_.get())
return false;
return size_in_dip != resize_lock_->expected_size();
}
| 0
|
294,667
|
vrrp_gnotify_fault_handler(vector_t *strvec)
{
vrrp_sgroup_t *vgroup = LIST_TAIL_DATA(vrrp_data->vrrp_sync_group);
if (vgroup->script_fault) {
report_config_error(CONFIG_GENERAL_ERROR, "vrrp group %s: notify_fault script already specified - ignoring %s", vgroup->gname, FMT_STR_VSLOT(strvec,1));
return;
}
vgroup->script_fault = set_vrrp_notify_script(strvec, 0);
vgroup->notify_exec = true;
}
| 0
|
53,946
|
R_API ut64 r_bin_java_integer_cp_calc_size(RBinJavaCPTypeObj *obj) {
ut64 size = 0;
// tag
size += 1;
// obj->info.cp_string.string_idx = R_BIN_JAVA_USHORT (buffer, 1);
size += 4;
return size;
}
| 0
|
449,778
|
static void pp_pre_define(char *definition)
{
Token *def, *space;
Line *l;
char *equals;
equals = strchr(definition, '=');
space = new_White(NULL);
def = new_Token(space, TOK_PREPROC_ID, "%define", 0);
if (equals)
*equals = ' ';
space->next = tokenize(definition);
if (equals)
*equals = '=';
/* We can't predefine a TOK_LOCAL_MACRO for obvious reasons... */
if (space->next->type != TOK_PREPROC_ID &&
space->next->type != TOK_ID)
nasm_warn(WARN_OTHER, "pre-defining non ID `%s\'\n", definition);
l = nasm_malloc(sizeof(Line));
l->next = predef;
l->first = def;
l->finishes = NULL;
predef = l;
}
| 0
|
308,721
|
Response InspectorNetworkAgent::replayXHR(const String& request_id) {
String actual_request_id = request_id;
XHRReplayData* xhr_replay_data = resources_data_->XhrReplayData(request_id);
if (!xhr_replay_data)
return Response::Error("Given id does not correspond to XHR");
ExecutionContext* execution_context = xhr_replay_data->GetExecutionContext();
if (execution_context->IsContextDestroyed()) {
resources_data_->SetXHRReplayData(request_id, 0);
return Response::Error("Document is already detached");
}
XMLHttpRequest* xhr = XMLHttpRequest::Create(execution_context);
execution_context->RemoveURLFromMemoryCache(xhr_replay_data->Url());
xhr->open(xhr_replay_data->Method(), xhr_replay_data->Url(),
xhr_replay_data->Async(), IGNORE_EXCEPTION_FOR_TESTING);
if (xhr_replay_data->IncludeCredentials())
xhr->setWithCredentials(true, IGNORE_EXCEPTION_FOR_TESTING);
for (const auto& header : xhr_replay_data->Headers()) {
xhr->setRequestHeader(header.key, header.value,
IGNORE_EXCEPTION_FOR_TESTING);
}
xhr->SendForInspectorXHRReplay(xhr_replay_data->FormData(),
IGNORE_EXCEPTION_FOR_TESTING);
replay_xhrs_.insert(xhr);
return Response::OK();
}
| 0
|
154,623
|
static bool ims_pcu_byte_needs_escape(u8 byte)
{
return byte == IMS_PCU_PROTOCOL_STX ||
byte == IMS_PCU_PROTOCOL_ETX ||
byte == IMS_PCU_PROTOCOL_DLE;
}
| 0
|
378,387
|
static void sdap_get_initgr_done(struct tevent_req *subreq)
{
struct tevent_req *req = tevent_req_callback_data(subreq,
struct tevent_req);
struct sdap_get_initgr_state *state = tevent_req_data(req,
struct sdap_get_initgr_state);
int ret;
TALLOC_CTX *tmp_ctx;
gid_t primary_gid;
char *gid;
char *sid_str;
char *dom_sid_str;
char *group_sid_str;
struct sdap_options *opts = state->opts;
DEBUG(SSSDBG_TRACE_ALL, "Initgroups done\n");
tmp_ctx = talloc_new(NULL);
if (!tmp_ctx) {
tevent_req_error(req, ENOMEM);
return;
}
switch (state->opts->schema_type) {
case SDAP_SCHEMA_RFC2307:
ret = sdap_initgr_rfc2307_recv(subreq);
break;
case SDAP_SCHEMA_RFC2307BIS:
case SDAP_SCHEMA_AD:
if (state->opts->dc_functional_level >= DS_BEHAVIOR_WIN2003
&& dp_opt_get_bool(state->opts->basic, SDAP_AD_USE_TOKENGROUPS)) {
ret = sdap_ad_tokengroups_initgroups_recv(subreq);
}
else if (state->opts->support_matching_rule
&& dp_opt_get_bool(state->opts->basic,
SDAP_AD_MATCHING_RULE_INITGROUPS)) {
ret = sdap_get_ad_match_rule_initgroups_recv(subreq);
} else {
ret = sdap_initgr_rfc2307bis_recv(subreq);
}
break;
case SDAP_SCHEMA_IPA_V1:
ret = sdap_initgr_nested_recv(subreq);
break;
default:
ret = EINVAL;
break;
}
talloc_zfree(subreq);
if (ret) {
DEBUG(SSSDBG_TRACE_ALL, "Error in initgroups: [%d][%s]\n",
ret, strerror(ret));
goto fail;
}
/* We also need to update the user's primary group, since
* the user may not be an explicit member of that group
*/
if (state->use_id_mapping) {
DEBUG(SSSDBG_TRACE_LIBS,
"Mapping primary group to unix ID\n");
/* The primary group ID is just the RID part of the objectSID
* of the group. Generate the GID by adding this to the domain
* SID value.
*/
/* Get the user SID so we can extract the domain SID
* from it.
*/
ret = sdap_attrs_get_sid_str(
tmp_ctx, opts->idmap_ctx, state->orig_user,
opts->user_map[SDAP_AT_USER_OBJECTSID].sys_name,
&sid_str);
if (ret != EOK) goto fail;
/* Get the domain SID from the user SID */
ret = sdap_idmap_get_dom_sid_from_object(tmp_ctx, sid_str,
&dom_sid_str);
if (ret != EOK) {
DEBUG(SSSDBG_MINOR_FAILURE,
"Could not parse domain SID from [%s]\n", sid_str);
goto fail;
}
ret = sysdb_attrs_get_uint32_t(
state->orig_user,
opts->user_map[SDAP_AT_USER_PRIMARY_GROUP].sys_name,
&primary_gid);
if (ret != EOK) {
DEBUG(SSSDBG_MINOR_FAILURE,
"no primary group ID provided\n");
ret = EINVAL;
goto fail;
}
/* Add the RID to the end */
group_sid_str = talloc_asprintf(tmp_ctx, "%s-%lu",
dom_sid_str,
(unsigned long)primary_gid);
if (!group_sid_str) {
ret = ENOMEM;
goto fail;
}
/* Convert the SID into a UNIX group ID */
ret = sdap_idmap_sid_to_unix(opts->idmap_ctx, group_sid_str,
&primary_gid);
if (ret != EOK) goto fail;
} else {
ret = sysdb_attrs_get_uint32_t(state->orig_user, SYSDB_GIDNUM,
&primary_gid);
if (ret != EOK) {
DEBUG(SSSDBG_TRACE_FUNC, "Could not find user's primary GID\n");
goto fail;
}
}
gid = talloc_asprintf(state, "%lu", (unsigned long)primary_gid);
if (gid == NULL) {
ret = ENOMEM;
goto fail;
}
subreq = groups_get_send(req, state->ev, state->id_ctx,
state->id_ctx->opts->sdom, state->conn,
gid, BE_FILTER_IDNUM, BE_ATTR_ALL, NULL);
if (!subreq) {
ret = ENOMEM;
goto fail;
}
tevent_req_set_callback(subreq, sdap_get_initgr_pgid, req);
talloc_free(tmp_ctx);
return;
fail:
talloc_free(tmp_ctx);
tevent_req_error(req, ret);
return;
}
| 0
|
436,354
|
code_to_mbc(OnigCodePoint code, UChar *buf)
{
#define UTF8_TRAILS(code, shift) (UChar )((((code) >> (shift)) & 0x3f) | 0x80)
#define UTF8_TRAIL0(code) (UChar )(((code) & 0x3f) | 0x80)
if ((code & 0xffffff80) == 0) {
*buf = (UChar )code;
return 1;
}
else {
UChar *p = buf;
if ((code & 0xfffff800) == 0) {
*p++ = (UChar )(((code>>6)& 0x1f) | 0xc0);
}
else if ((code & 0xffff0000) == 0) {
*p++ = (UChar )(((code>>12) & 0x0f) | 0xe0);
*p++ = UTF8_TRAILS(code, 6);
}
else if ((code & 0xffe00000) == 0) {
*p++ = (UChar )(((code>>18) & 0x07) | 0xf0);
*p++ = UTF8_TRAILS(code, 12);
*p++ = UTF8_TRAILS(code, 6);
}
else if ((code & 0xfc000000) == 0) {
*p++ = (UChar )(((code>>24) & 0x03) | 0xf8);
*p++ = UTF8_TRAILS(code, 18);
*p++ = UTF8_TRAILS(code, 12);
*p++ = UTF8_TRAILS(code, 6);
}
else if ((code & 0x80000000) == 0) {
*p++ = (UChar )(((code>>30) & 0x01) | 0xfc);
*p++ = UTF8_TRAILS(code, 24);
*p++ = UTF8_TRAILS(code, 18);
*p++ = UTF8_TRAILS(code, 12);
*p++ = UTF8_TRAILS(code, 6);
}
#ifdef USE_INVALID_CODE_SCHEME
else if (code == INVALID_CODE_FE) {
*p = 0xfe;
return 1;
}
else if (code == INVALID_CODE_FF) {
*p = 0xff;
return 1;
}
#endif
else {
return ONIGERR_TOO_BIG_WIDE_CHAR_VALUE;
}
*p++ = UTF8_TRAIL0(code);
return p - buf;
}
}
| 0
|
235,525
|
linux_md_remove_component_device_not_seen_cb (gpointer user_data)
{
RemoveComponentData *data = user_data;
throw_error (data->context,
ERROR_FAILED,
"Error removing component: timeout (10s) waiting for slave to stop being busy");
g_signal_handler_disconnect (data->slave->priv->daemon, data->device_changed_signal_handler_id);
remove_component_data_unref (data);
return FALSE;
}
| 0
|
128,551
|
void nft_unregister_expr(struct nft_expr_type *type)
{
nfnl_lock(NFNL_SUBSYS_NFTABLES);
list_del_rcu(&type->list);
nfnl_unlock(NFNL_SUBSYS_NFTABLES);
}
| 0
|
184,806
|
Color Document::themeColor() const
{
for (HTMLMetaElement* metaElement = head() ? Traversal<HTMLMetaElement>::firstChild(*head()) : 0; metaElement; metaElement = Traversal<HTMLMetaElement>::nextSibling(*metaElement)) {
RGBA32 rgb = Color::transparent;
if (equalIgnoringCase(metaElement->name(), "theme-color") && CSSParser::parseColor(rgb, metaElement->content().string().stripWhiteSpace(), true))
return Color(rgb);
}
return Color();
}
| 0
|
509,694
|
static void test_store_result()
{
MYSQL_STMT *stmt;
int rc;
int32 nData;
char szData[100];
MYSQL_BIND my_bind[2];
ulong length, length1;
my_bool is_null[2];
myheader("test_store_result");
rc= mysql_query(mysql, "DROP TABLE IF EXISTS test_store_result");
myquery(rc);
rc= mysql_query(mysql, "CREATE TABLE test_store_result(col1 int , col2 varchar(50))");
myquery(rc);
rc= mysql_query(mysql, "INSERT INTO test_store_result VALUES(10, 'venu'), (20, 'mysql')");
myquery(rc);
rc= mysql_query(mysql, "INSERT INTO test_store_result(col2) VALUES('monty')");
myquery(rc);
rc= mysql_commit(mysql);
myquery(rc);
/* fetch */
bzero((char*) my_bind, sizeof(my_bind));
my_bind[0].buffer_type= MYSQL_TYPE_LONG;
my_bind[0].buffer= (void *) &nData; /* integer data */
my_bind[0].length= &length;
my_bind[0].is_null= &is_null[0];
length= 0;
my_bind[1].buffer_type= MYSQL_TYPE_STRING;
my_bind[1].buffer= szData; /* string data */
my_bind[1].buffer_length= sizeof(szData);
my_bind[1].length= &length1;
my_bind[1].is_null= &is_null[1];
length1= 0;
stmt= mysql_simple_prepare(mysql, "SELECT * FROM test_store_result");
check_stmt(stmt);
rc= mysql_stmt_bind_result(stmt, my_bind);
check_execute(stmt, rc);
rc= mysql_stmt_execute(stmt);
check_execute(stmt, rc);
rc= mysql_stmt_store_result(stmt);
check_execute(stmt, rc);
rc= mysql_stmt_fetch(stmt);
check_execute(stmt, rc);
if (!opt_silent)
fprintf(stdout, "\n row 1: %ld, %s(%lu)", (long) nData, szData, length1);
DIE_UNLESS(nData == 10);
DIE_UNLESS(strcmp(szData, "venu") == 0);
DIE_UNLESS(length1 == 4);
rc= mysql_stmt_fetch(stmt);
check_execute(stmt, rc);
if (!opt_silent)
fprintf(stdout, "\n row 2: %ld, %s(%lu)", (long) nData, szData, length1);
DIE_UNLESS(nData == 20);
DIE_UNLESS(strcmp(szData, "mysql") == 0);
DIE_UNLESS(length1 == 5);
length= 99;
rc= mysql_stmt_fetch(stmt);
check_execute(stmt, rc);
if (!opt_silent && is_null[0])
fprintf(stdout, "\n row 3: NULL, %s(%lu)", szData, length1);
DIE_UNLESS(is_null[0]);
DIE_UNLESS(strcmp(szData, "monty") == 0);
DIE_UNLESS(length1 == 5);
rc= mysql_stmt_fetch(stmt);
DIE_UNLESS(rc == MYSQL_NO_DATA);
rc= mysql_stmt_execute(stmt);
check_execute(stmt, rc);
rc= mysql_stmt_store_result(stmt);
check_execute(stmt, rc);
rc= mysql_stmt_fetch(stmt);
check_execute(stmt, rc);
if (!opt_silent)
fprintf(stdout, "\n row 1: %ld, %s(%lu)", (long) nData, szData, length1);
DIE_UNLESS(nData == 10);
DIE_UNLESS(strcmp(szData, "venu") == 0);
DIE_UNLESS(length1 == 4);
rc= mysql_stmt_fetch(stmt);
check_execute(stmt, rc);
if (!opt_silent)
fprintf(stdout, "\n row 2: %ld, %s(%lu)", (long) nData, szData, length1);
DIE_UNLESS(nData == 20);
DIE_UNLESS(strcmp(szData, "mysql") == 0);
DIE_UNLESS(length1 == 5);
length= 99;
rc= mysql_stmt_fetch(stmt);
check_execute(stmt, rc);
if (!opt_silent && is_null[0])
fprintf(stdout, "\n row 3: NULL, %s(%lu)", szData, length1);
DIE_UNLESS(is_null[0]);
DIE_UNLESS(strcmp(szData, "monty") == 0);
DIE_UNLESS(length1 == 5);
rc= mysql_stmt_fetch(stmt);
DIE_UNLESS(rc == MYSQL_NO_DATA);
mysql_stmt_close(stmt);
}
| 0
|
423,132
|
static int ioat1_enumerate_channels(struct ioatdma_device *device)
{
u8 xfercap_scale;
u32 xfercap;
int i;
struct ioat_dma_chan *ioat;
struct device *dev = &device->pdev->dev;
struct dma_device *dma = &device->common;
INIT_LIST_HEAD(&dma->channels);
dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
dma->chancnt &= 0x1f; /* bits [4:0] valid */
if (dma->chancnt > ARRAY_SIZE(device->idx)) {
dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n",
dma->chancnt, ARRAY_SIZE(device->idx));
dma->chancnt = ARRAY_SIZE(device->idx);
}
xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
xfercap_scale &= 0x1f; /* bits [4:0] valid */
xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale));
dev_dbg(dev, "%s: xfercap = %d\n", __func__, xfercap);
#ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL
if (i7300_idle_platform_probe(NULL, NULL, 1) == 0)
dma->chancnt--;
#endif
for (i = 0; i < dma->chancnt; i++) {
ioat = devm_kzalloc(dev, sizeof(*ioat), GFP_KERNEL);
if (!ioat)
break;
ioat_init_channel(device, &ioat->base, i);
ioat->xfercap = xfercap;
spin_lock_init(&ioat->desc_lock);
INIT_LIST_HEAD(&ioat->free_desc);
INIT_LIST_HEAD(&ioat->used_desc);
}
dma->chancnt = i;
return i;
}
| 0
|
277,023
|
gfx::Size LayerTreeHostImpl::DrawViewportSize() const {
return DeviceViewport().size();
}
| 0
|
196,468
|
ListValue* ExtensionTabUtil::CreateTabList(
const Browser* browser,
const extensions::Extension* extension) {
NOTIMPLEMENTED();
return NULL;
}
| 0
|
66,517
|
static inline void halt(void)
{
PVOP_VCALL0(irq.halt);
}
| 0
|
296,742
|
TfLiteStatus ResizeTensor(TfLiteContext* context,
const TfLiteTensor* shape_tensor,
TfLiteTensor* tensor_to_resize) {
// Currently only support int32 for output shape.
if (shape_tensor->type != kTfLiteInt32) {
TF_LITE_KERNEL_LOG(context, "Output shape is %s, not int32.",
TfLiteTypeGetName(shape_tensor->type));
return kTfLiteError;
}
TfLiteIntArray* shape = TfLiteIntArrayCreate(NumElements(shape_tensor));
for (int i = 0; i < shape->size; ++i) {
shape->data[i] = GetTensorData<int32_t>(shape_tensor)[i];
}
return context->ResizeTensor(context, tensor_to_resize, shape);
}
| 0
|
267,094
|
static int ndp_sock_recv(struct ndp *ndp)
{
struct ndp_msg *msg;
enum ndp_msg_type msg_type;
size_t len;
int err;
msg = ndp_msg_alloc();
if (!msg)
return -ENOMEM;
len = ndp_msg_payload_maxlen(msg);
err = myrecvfrom6(ndp->sock, msg->buf, &len, 0,
&msg->addrto, &msg->ifindex, &msg->hoplimit);
if (err) {
err(ndp, "Failed to receive message");
goto free_msg;
}
dbg(ndp, "rcvd from: %s, ifindex: %u, hoplimit: %d",
str_in6_addr(&msg->addrto), msg->ifindex, msg->hoplimit);
if (msg->hoplimit != 255) {
warn(ndp, "ignoring packet with bad hop limit (%d)", msg->hoplimit);
err = 0;
goto free_msg;
}
if (len < sizeof(*msg->icmp6_hdr)) {
warn(ndp, "rcvd icmp6 packet too short (%luB)", len);
err = 0;
goto free_msg;
}
err = ndp_msg_type_by_raw_type(&msg_type, msg->icmp6_hdr->icmp6_type);
if (err) {
err = 0;
goto free_msg;
}
ndp_msg_init(msg, msg_type);
ndp_msg_payload_len_set(msg, len);
if (!ndp_msg_check_valid(msg)) {
warn(ndp, "rcvd invalid ND message");
err = 0;
goto free_msg;
}
dbg(ndp, "rcvd %s, len: %zuB",
ndp_msg_type_info(msg_type)->strabbr, len);
if (!ndp_msg_check_opts(msg)) {
err = 0;
goto free_msg;
}
err = ndp_call_handlers(ndp, msg);;
free_msg:
ndp_msg_destroy(msg);
return err;
}
| 0
|
119,778
|
int nfs4_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
int plus)
{
uint32_t bitmap[3] = {0};
uint32_t len;
__be32 *p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
goto out_overflow;
if (*p == xdr_zero) {
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
goto out_overflow;
if (*p == xdr_zero)
return -EAGAIN;
entry->eof = 1;
return -EBADCOOKIE;
}
p = xdr_inline_decode(xdr, 12);
if (unlikely(!p))
goto out_overflow;
entry->prev_cookie = entry->cookie;
p = xdr_decode_hyper(p, &entry->cookie);
entry->len = be32_to_cpup(p);
p = xdr_inline_decode(xdr, entry->len);
if (unlikely(!p))
goto out_overflow;
entry->name = (const char *) p;
/*
* In case the server doesn't return an inode number,
* we fake one here. (We don't use inode number 0,
* since glibc seems to choke on it...)
*/
entry->ino = 1;
entry->fattr->valid = 0;
if (decode_attr_bitmap(xdr, bitmap) < 0)
goto out_overflow;
if (decode_attr_length(xdr, &len, &p) < 0)
goto out_overflow;
if (decode_getfattr_attrs(xdr, bitmap, entry->fattr, entry->fh,
entry->server, 1) < 0)
goto out_overflow;
if (entry->fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID)
entry->ino = entry->fattr->mounted_on_fileid;
else if (entry->fattr->valid & NFS_ATTR_FATTR_FILEID)
entry->ino = entry->fattr->fileid;
entry->d_type = DT_UNKNOWN;
if (entry->fattr->valid & NFS_ATTR_FATTR_TYPE)
entry->d_type = nfs_umode_to_dtype(entry->fattr->mode);
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EAGAIN;
}
| 0
|
355,906
|
int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
{
pgtable_t new = pte_alloc_one(mm, address);
if (!new)
return -ENOMEM;
/*
* Ensure all pte setup (eg. pte page lock and page clearing) are
* visible before the pte is made visible to other CPUs by being
* put into page tables.
*
* The other side of the story is the pointer chasing in the page
* table walking code (when walking the page table without locking;
* ie. most of the time). Fortunately, these data accesses consist
* of a chain of data-dependent loads, meaning most CPUs (alpha
* being the notable exception) will already guarantee loads are
* seen in-order. See the alpha page table accessors for the
* smp_read_barrier_depends() barriers in page table walking code.
*/
smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */
spin_lock(&mm->page_table_lock);
if (!pmd_present(*pmd)) { /* Has another populated it ? */
mm->nr_ptes++;
pmd_populate(mm, pmd, new);
new = NULL;
}
spin_unlock(&mm->page_table_lock);
if (new)
pte_free(mm, new);
return 0;
}
| 0
|
9,384
|
long long Chapters::Atom::GetTime(
const Chapters* pChapters,
long long timecode)
{
if (pChapters == NULL)
return -1;
Segment* const pSegment = pChapters->m_pSegment;
if (pSegment == NULL) // weird
return -1;
const SegmentInfo* const pInfo = pSegment->GetInfo();
if (pInfo == NULL)
return -1;
const long long timecode_scale = pInfo->GetTimeCodeScale();
if (timecode_scale < 1) // weird
return -1;
if (timecode < 0)
return -1;
const long long result = timecode_scale * timecode;
return result;
}
| 1
|
119,337
|
static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env)
{
struct bpf_insn *insn = env->prog->insnsi;
int insn_cnt = env->prog->len;
int i, j, err;
err = bpf_prog_calc_tag(env->prog);
if (err)
return err;
for (i = 0; i < insn_cnt; i++, insn++) {
if (BPF_CLASS(insn->code) == BPF_LDX &&
(BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) {
verbose("BPF_LDX uses reserved fields\n");
return -EINVAL;
}
if (BPF_CLASS(insn->code) == BPF_STX &&
((BPF_MODE(insn->code) != BPF_MEM &&
BPF_MODE(insn->code) != BPF_XADD) || insn->imm != 0)) {
verbose("BPF_STX uses reserved fields\n");
return -EINVAL;
}
if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) {
struct bpf_map *map;
struct fd f;
if (i == insn_cnt - 1 || insn[1].code != 0 ||
insn[1].dst_reg != 0 || insn[1].src_reg != 0 ||
insn[1].off != 0) {
verbose("invalid bpf_ld_imm64 insn\n");
return -EINVAL;
}
if (insn->src_reg == 0)
/* valid generic load 64-bit imm */
goto next_insn;
if (insn->src_reg != BPF_PSEUDO_MAP_FD) {
verbose("unrecognized bpf_ld_imm64 insn\n");
return -EINVAL;
}
f = fdget(insn->imm);
map = __bpf_map_get(f);
if (IS_ERR(map)) {
verbose("fd %d is not pointing to valid bpf_map\n",
insn->imm);
return PTR_ERR(map);
}
err = check_map_prog_compatibility(map, env->prog);
if (err) {
fdput(f);
return err;
}
/* store map pointer inside BPF_LD_IMM64 instruction */
insn[0].imm = (u32) (unsigned long) map;
insn[1].imm = ((u64) (unsigned long) map) >> 32;
/* check whether we recorded this map already */
for (j = 0; j < env->used_map_cnt; j++)
if (env->used_maps[j] == map) {
fdput(f);
goto next_insn;
}
if (env->used_map_cnt >= MAX_USED_MAPS) {
fdput(f);
return -E2BIG;
}
/* hold the map. If the program is rejected by verifier,
* the map will be released by release_maps() or it
* will be used by the valid program until it's unloaded
* and all maps are released in free_bpf_prog_info()
*/
map = bpf_map_inc(map, false);
if (IS_ERR(map)) {
fdput(f);
return PTR_ERR(map);
}
env->used_maps[env->used_map_cnt++] = map;
fdput(f);
next_insn:
insn++;
i++;
}
}
/* now all pseudo BPF_LD_IMM64 instructions load valid
* 'struct bpf_map *' into a register instead of user map_fd.
* These pointers will be used later by verifier to validate map access.
*/
return 0;
}
| 0
|
151,071
|
static int rose_dev_exists(rose_address *addr)
{
struct net_device *dev;
rcu_read_lock();
for_each_netdev_rcu(&init_net, dev) {
if ((dev->flags & IFF_UP) && dev->type == ARPHRD_ROSE && rosecmp(addr, (rose_address *)dev->dev_addr) == 0)
goto out;
}
dev = NULL;
out:
rcu_read_unlock();
return dev != NULL;
}
| 0
|
12,084
|
bool FileUtilProxy::Write(
scoped_refptr<MessageLoopProxy> message_loop_proxy,
PlatformFile file,
int64 offset,
const char* buffer,
int bytes_to_write,
WriteCallback* callback) {
if (bytes_to_write <= 0)
return false;
return Start(FROM_HERE, message_loop_proxy,
new RelayWrite(file, offset, buffer, bytes_to_write, callback));
}
| 1
|
370,637
|
GuestFsfreezeStatus qmp_guest_fsfreeze_status(Error **err)
{
if (ga_is_frozen(ga_state)) {
return GUEST_FSFREEZE_STATUS_FROZEN;
}
return GUEST_FSFREEZE_STATUS_THAWED;
}
| 0
|
181,017
|
void ResourceDispatcherHost::OnSetCookieBlocked(URLRequest* request) {
RESOURCE_LOG("OnSetCookieBlocked: " << request->url().spec());
int render_process_id, render_view_id;
if (!RenderViewForRequest(request, &render_process_id, &render_view_id))
return;
CallRenderViewHostResourceDelegate(
render_process_id, render_view_id,
&RenderViewHostDelegate::Resource::OnContentBlocked,
CONTENT_SETTINGS_TYPE_COOKIES);
}
| 0
|
354,500
|
static int blkpg_ioctl_trans(unsigned int fd, unsigned int cmd, unsigned long arg)
{
struct blkpg_ioctl_arg32 __user *ua32 = compat_ptr(arg);
struct blkpg_ioctl_arg __user *a = compat_alloc_user_space(sizeof(*a));
compat_caddr_t udata;
compat_int_t n;
int err;
err = get_user(n, &ua32->op);
err |= put_user(n, &a->op);
err |= get_user(n, &ua32->flags);
err |= put_user(n, &a->flags);
err |= get_user(n, &ua32->datalen);
err |= put_user(n, &a->datalen);
err |= get_user(udata, &ua32->data);
err |= put_user(compat_ptr(udata), &a->data);
if (err)
return err;
return sys_ioctl(fd, cmd, (unsigned long)a);
}
| 0
|
241,395
|
int phar_mount_entry(phar_archive_data *phar, char *filename, int filename_len, char *path, int path_len TSRMLS_DC) /* {{{ */
{
phar_entry_info entry = {0};
php_stream_statbuf ssb;
int is_phar;
const char *err;
if (phar_path_check(&path, &path_len, &err) > pcr_is_ok) {
return FAILURE;
}
if (path_len >= sizeof(".phar")-1 && !memcmp(path, ".phar", sizeof(".phar")-1)) {
/* no creating magic phar files by mounting them */
return FAILURE;
}
is_phar = (filename_len > 7 && !memcmp(filename, "phar://", 7));
entry.phar = phar;
entry.filename = estrndup(path, path_len);
#ifdef PHP_WIN32
phar_unixify_path_separators(entry.filename, path_len);
#endif
entry.filename_len = path_len;
if (is_phar) {
entry.tmp = estrndup(filename, filename_len);
} else {
entry.tmp = expand_filepath(filename, NULL TSRMLS_CC);
if (!entry.tmp) {
entry.tmp = estrndup(filename, filename_len);
}
}
#if PHP_API_VERSION < 20100412
if (PG(safe_mode) && !is_phar && (!php_checkuid(entry.tmp, NULL, CHECKUID_CHECK_FILE_AND_DIR))) {
efree(entry.tmp);
efree(entry.filename);
return FAILURE;
}
#endif
filename = entry.tmp;
/* only check openbasedir for files, not for phar streams */
if (!is_phar && php_check_open_basedir(filename TSRMLS_CC)) {
efree(entry.tmp);
efree(entry.filename);
return FAILURE;
}
entry.is_mounted = 1;
entry.is_crc_checked = 1;
entry.fp_type = PHAR_TMP;
if (SUCCESS != php_stream_stat_path(filename, &ssb)) {
efree(entry.tmp);
efree(entry.filename);
return FAILURE;
}
if (ssb.sb.st_mode & S_IFDIR) {
entry.is_dir = 1;
if (SUCCESS != zend_hash_add(&phar->mounted_dirs, entry.filename, path_len, (void *)&(entry.filename), sizeof(char *), NULL)) {
/* directory already mounted */
efree(entry.tmp);
efree(entry.filename);
return FAILURE;
}
} else {
entry.is_dir = 0;
entry.uncompressed_filesize = entry.compressed_filesize = ssb.sb.st_size;
}
entry.flags = ssb.sb.st_mode;
if (SUCCESS == zend_hash_add(&phar->manifest, entry.filename, path_len, (void*)&entry, sizeof(phar_entry_info), NULL)) {
return SUCCESS;
}
efree(entry.tmp);
efree(entry.filename);
return FAILURE;
}
/* }}} */
| 0
|
477,137
|
int vhost_vsock_common_pre_save(void *opaque)
{
VHostVSockCommon *vvc = opaque;
/*
* At this point, backend must be stopped, otherwise
* it might keep writing to memory.
*/
assert(!vvc->vhost_dev.started);
return 0;
}
| 0
|
431,786
|
bool run(OperationContext* txn,
const string& dbname,
BSONObj& cmdObj,
int options,
string& errmsg,
BSONObjBuilder& result) {
RoleName roleName;
PrivilegeVector privilegesToRemove;
Status status = auth::parseAndValidateRolePrivilegeManipulationCommands(
cmdObj, "revokePrivilegesFromRole", dbname, &roleName, &privilegesToRemove);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
ServiceContext* serviceContext = txn->getClient()->getServiceContext();
stdx::lock_guard<stdx::mutex> lk(getAuthzDataMutex(serviceContext));
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
status = requireAuthSchemaVersion26Final(txn, authzManager);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
if (RoleGraph::isBuiltinRole(roleName)) {
return appendCommandStatus(
result,
Status(ErrorCodes::InvalidRoleModification,
str::stream() << roleName.getFullName()
<< " is a built-in role and cannot be modified."));
}
BSONObj roleDoc;
status = authzManager->getRoleDescription(
txn, roleName, PrivilegeFormat::kShowSeparate, &roleDoc);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
PrivilegeVector privileges;
status = auth::parseAndValidatePrivilegeArray(BSONArray(roleDoc["privileges"].Obj()),
&privileges);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
for (PrivilegeVector::iterator itToRm = privilegesToRemove.begin();
itToRm != privilegesToRemove.end();
++itToRm) {
for (PrivilegeVector::iterator curIt = privileges.begin(); curIt != privileges.end();
++curIt) {
if (curIt->getResourcePattern() == itToRm->getResourcePattern()) {
curIt->removeActions(itToRm->getActions());
if (curIt->getActions().empty()) {
privileges.erase(curIt);
}
break;
}
}
}
// Build up update modifier object to $set privileges.
mutablebson::Document updateObj;
mutablebson::Element setElement = updateObj.makeElementObject("$set");
status = updateObj.root().pushBack(setElement);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
mutablebson::Element privilegesElement = updateObj.makeElementArray("privileges");
status = setElement.pushBack(privilegesElement);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
status = authzManager->getBSONForPrivileges(privileges, privilegesElement);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
audit::logRevokePrivilegesFromRole(Client::getCurrent(), roleName, privilegesToRemove);
BSONObjBuilder updateBSONBuilder;
updateObj.writeTo(&updateBSONBuilder);
status = updateRoleDocument(txn, roleName, updateBSONBuilder.done());
// Must invalidate even on bad status - what if the write succeeded but the GLE failed?
authzManager->invalidateUserCache();
return appendCommandStatus(result, status);
}
| 0
|
18,315
|
static ossl_inline t2 * sk_ ## t1 ## _shift ( STACK_OF ( t1 ) * sk ) {
return ( t2 * ) OPENSSL_sk_shift ( ( OPENSSL_STACK * ) sk ) ;
}
static ossl_inline void sk_ ## t1 ## _pop_free ( STACK_OF ( t1 ) * sk , sk_ ## t1 ## _freefunc freefunc ) {
OPENSSL_sk_pop_free ( ( OPENSSL_STACK * ) sk , ( OPENSSL_sk_freefunc ) freefunc ) ;
}
static ossl_inline int sk_ ## t1 ## _insert ( STACK_OF ( t1 ) * sk , t2 * ptr , int idx ) {
return OPENSSL_sk_insert ( ( OPENSSL_STACK * ) sk , ( const void * ) ptr , idx ) ;
}
static ossl_inline t2 * sk_ ## t1 ## _set ( STACK_OF ( t1 ) * sk , int idx , t2 * ptr ) {
return ( t2 * ) OPENSSL_sk_set ( ( OPENSSL_STACK * ) sk , idx , ( const void * ) ptr ) ;
}
static ossl_inline int sk_ ## t1 ## _find ( STACK_OF ( t1 ) * sk , t2 * ptr ) {
return OPENSSL_sk_find ( ( OPENSSL_STACK * ) sk , ( const void * ) ptr ) ;
}
static ossl_inline int sk_ ## t1 ## _find_ex ( STACK_OF ( t1 ) * sk , t2 * ptr ) {
return OPENSSL_sk_find_ex ( ( OPENSSL_STACK * ) sk , ( const void * ) ptr ) ;
}
static ossl_inline void sk_ ## t1 ## _sort ( STACK_OF ( t1 ) * sk ) {
OPENSSL_sk_sort ( ( OPENSSL_STACK * ) sk ) ;
}
static ossl_inline int sk_ ## t1 ## _is_sorted ( const STACK_OF ( t1 ) * sk ) {
return OPENSSL_sk_is_sorted ( ( const OPENSSL_STACK * ) sk ) ;
}
static ossl_inline STACK_OF ( t1 ) * sk_ ## t1 ## _dup ( const STACK_OF ( t1 ) * sk ) {
return ( STACK_OF ( t1 ) * ) OPENSSL_sk_dup ( ( const OPENSSL_STACK * ) sk ) ;
}
static ossl_inline STACK_OF ( t1 ) * sk_ ## t1 ## _deep_copy ( const STACK_OF ( t1 ) * sk , sk_ ## t1 ## _copyfunc copyfunc , sk_ ## t1 ## _freefunc freefunc ) {
return ( STACK_OF ( t1 ) * ) OPENSSL_sk_deep_copy ( ( const OPENSSL_STACK * ) sk , ( OPENSSL_sk_copyfunc ) copyfunc , ( OPENSSL_sk_freefunc ) freefunc ) ;
}
static ossl_inline sk_ ## t1 ## _compfunc sk_ ## t1 ## _set_cmp_func ( STACK_OF ( t1 ) * sk , sk_ ## t1 ## _compfunc compare ) {
return ( sk_ ## t1 ## _compfunc ) OPENSSL_sk_set_cmp_func ( ( OPENSSL_STACK * ) sk , ( OPENSSL_sk_compfunc ) compare ) ;
}
# define DEFINE_SPECIAL_STACK_OF ( t1 , t2 ) SKM_DEFINE_STACK_OF ( t1 , t2 , t2 ) # define DEFINE_STACK_OF ( t ) SKM_DEFINE_STACK_OF ( t , t , t ) # define DEFINE_SPECIAL_STACK_OF_CONST ( t1 , t2 ) SKM_DEFINE_STACK_OF ( t1 , const t2 , t2 ) # define DEFINE_STACK_OF_CONST ( t ) SKM_DEFINE_STACK_OF ( t , const t , t ) typedef char * OPENSSL_STRING ;
typedef const char * OPENSSL_CSTRING ;
DEFINE_SPECIAL_STACK_OF ( OPENSSL_STRING , char )
| 0
|
292,726
|
Variant HHVM_FUNCTION(imageloadfont, const String& /*file*/) {
// TODO: ind = 5 + zend_list_insert(font, le_gd_font);
throw_not_supported(__func__, "NYI");
#ifdef NEVER
Variant stream;
zval **file;
int hdr_size = sizeof(gdFont) - sizeof(char *);
int ind, body_size, n = 0, b, i, body_size_check;
gdFontPtr font;
php_stream *stream;
stream = File::Open(file, "rb");
if (!stream) {
raise_warning("failed to open file: %s", file.c_str());
return false;
}
/* Only supports a architecture-dependent binary dump format
* at the moment.
* The file format is like this on machines with 32-byte integers:
*
* byte 0-3: (int) number of characters in the font
* byte 4-7: (int) value of first character in the font (often 32, space)
* byte 8-11: (int) pixel width of each character
* byte 12-15: (int) pixel height of each character
* bytes 16-: (char) array with character data, one byte per pixel
* in each character, for a total of
* (nchars*width*height) bytes.
*/
font = (gdFontPtr) IM_MALLOC(sizeof(gdFont));
CHECK_ALLOC_R(font, sizeof(gdFont), false);
b = 0;
String hdr = stream->read(hdr_size);
if (hdr.length() < hdr_size) {
IM_FREE(font);
if (stream->eof()) {
raise_warning("End of file while reading header");
} else {
raise_warning("Error while reading header");
}
stream->close();
return false;
}
memcpy((void*)font, hdr.c_str(), hdr.length());
i = int64_t(f_tell(stream));
stream->seek(0, SEEK_END);
body_size_check = int64_t(f_tell(stream)) - hdr_size;
stream->seek(i, SEEK_SET);
body_size = font->w * font->h * font->nchars;
if (body_size != body_size_check) {
font->w = FLIPWORD(font->w);
font->h = FLIPWORD(font->h);
font->nchars = FLIPWORD(font->nchars);
body_size = font->w * font->h * font->nchars;
}
if (font->nchars <= 0 ||
font->h <= 0 ||
font->nchars >= INT_MAX || font->h >= INT_MAX) {
raise_warning("Error reading font, invalid font header");
IM_FREE(font);
stream->close();
return false;
}
if ((font->nchars * font->h) <= 0 ||
font->w <= 0 ||
(font->nchars * font->h) >= INT_MAX || font->w >= INT_MAX) {
raise_warning("Error reading font, invalid font header");
IM_FREE(font);
stream->close();
return false;
}
if (body_size != body_size_check) {
raise_warning("Error reading font");
IM_FREE(font);
stream->close();
return false;
}
String body = stream->read(body_size);
if (body.length() < body_size) {
IM_FREE(font);
if (stream->eof()) {
raise_warning("End of file while reading body");
} else {
raise_warning("Error while reading body");
}
stream->close();
return false;
}
font->data = IM_MALLOC(body_size);
CHECK_ALLOC_R(font->data, body_size, false);
memcpy((void*)font->data, body.c_str(), body.length());
stream->close();
/* Adding 5 to the font index so we will never have font indices
* that overlap with the old fonts (with indices 1-5). The first
* list index given out is always 1.
*/
// ind = 5 + zend_list_insert(font, le_gd_font);
return ind;
#endif
}
| 0
|
319,112
|
DISAS_INSN(shift_im)
{
TCGv reg;
int tmp;
TCGv shift;
set_cc_op(s, CC_OP_FLAGS);
reg = DREG(insn, 0);
tmp = (insn >> 9) & 7;
if (tmp == 0)
tmp = 8;
shift = tcg_const_i32(tmp);
/* No need to flush flags becuse we know we will set C flag. */
if (insn & 0x100) {
gen_helper_shl_cc(reg, cpu_env, reg, shift);
} else {
if (insn & 8) {
gen_helper_shr_cc(reg, cpu_env, reg, shift);
} else {
gen_helper_sar_cc(reg, cpu_env, reg, shift);
}
}
}
| 1
|
373,265
|
void tls_cert_dummy(void) {}
| 0
|
320,382
|
int s390_virtio_hypercall(CPUS390XState *env)
{
s390_virtio_fn fn = s390_diag500_table[env->regs[1]];
if (!fn) {
return -EINVAL;
}
return fn(&env->regs[2]);
}
| 1
|
400,890
|
ref_param_write_typed_array(gs_param_list * plist, gs_param_name pkey,
void *pvalue, uint count,
int (*make)(ref *, const void *, uint,
gs_ref_memory_t *))
{
iparam_list *const iplist = (iparam_list *) plist;
ref value;
uint i;
ref *pe;
int code;
if ((code = ref_array_param_requested(iplist, pkey, &value, count,
"ref_param_write_typed_array")) <= 0)
return code;
for (i = 0, pe = value.value.refs; i < count; ++i, ++pe)
if ((code = (*make) (pe, pvalue, i, iplist->ref_memory)) < 0)
return code;
return ref_param_write(iplist, pkey, &value);
}
| 0
|
7,875
|
static char *create_output_name(unsigned char *fname, unsigned char *dir,
int lower, int isunix, int utf8)
{
unsigned char *p, *name, c, *fe, sep, slash;
unsigned int x;
sep = (isunix) ? '/' : '\\'; /* the path-seperator */
slash = (isunix) ? '\\' : '/'; /* the other slash */
/* length of filename */
x = strlen((char *) fname);
/* UTF8 worst case scenario: tolower() expands all chars from 1 to 3 bytes */
if (utf8) x *= 3;
/* length of output directory */
if (dir) x += strlen((char *) dir);
if (!(name = (unsigned char *) malloc(x + 2))) {
fprintf(stderr, "out of memory!\n");
return NULL;
}
/* start with blank name */
*name = '\0';
/* add output directory if needed */
if (dir) {
strcpy((char *) name, (char *) dir);
strcat((char *) name, "/");
}
/* remove leading slashes */
while (*fname == sep) fname++;
/* copy from fi->filename to new name, converting MS-DOS slashes to UNIX
* slashes as we go. Also lowercases characters if needed.
*/
p = &name[strlen((char *)name)];
fe = &fname[strlen((char *)fname)];
if (utf8) {
/* UTF8 translates two-byte unicode characters into 1, 2 or 3 bytes.
* %000000000xxxxxxx -> %0xxxxxxx
* %00000xxxxxyyyyyy -> %110xxxxx %10yyyyyy
* %xxxxyyyyyyzzzzzz -> %1110xxxx %10yyyyyy %10zzzzzz
*
* Therefore, the inverse is as follows:
* First char:
* 0x00 - 0x7F = one byte char
* 0x80 - 0xBF = invalid
* 0xC0 - 0xDF = 2 byte char (next char only 0x80-0xBF is valid)
* 0xE0 - 0xEF = 3 byte char (next 2 chars only 0x80-0xBF is valid)
* 0xF0 - 0xFF = invalid
*/
do {
if (fname >= fe) {
free(name);
return NULL;
}
/* get next UTF8 char */
if ((c = *fname++) < 0x80) x = c;
else {
if ((c >= 0xC0) && (c < 0xE0)) {
x = (c & 0x1F) << 6;
x |= *fname++ & 0x3F;
}
else if ((c >= 0xE0) && (c < 0xF0)) {
x = (c & 0xF) << 12;
x |= (*fname++ & 0x3F) << 6;
x |= *fname++ & 0x3F;
}
else x = '?';
}
/* whatever is the path seperator -> '/'
* whatever is the other slash -> '\\'
* otherwise, if lower is set, the lowercase version */
if (x == sep) x = '/';
else if (x == slash) x = '\\';
else if (lower) x = (unsigned int) tolower((int) x);
/* integer back to UTF8 */
if (x < 0x80) {
*p++ = (unsigned char) x;
}
else if (x < 0x800) {
*p++ = 0xC0 | (x >> 6);
*p++ = 0x80 | (x & 0x3F);
}
else {
*p++ = 0xE0 | (x >> 12);
*p++ = 0x80 | ((x >> 6) & 0x3F);
*p++ = 0x80 | (x & 0x3F);
}
} while (x);
}
else {
/* regular non-utf8 version */
do {
c = *fname++;
if (c == sep) c = '/';
else if (c == slash) c = '\\';
else if (lower) c = (unsigned char) tolower((int) c);
} while ((*p++ = c));
}
return (char *) name;
}
| 1
|
251,721
|
error::Error GLES2DecoderPassthroughImpl::DoTexStorage3D(GLenum target,
GLsizei levels,
GLenum internalFormat,
GLsizei width,
GLsizei height,
GLsizei depth) {
CheckErrorCallbackState();
api()->glTexStorage3DFn(target, levels, internalFormat, width, height, depth);
if (CheckErrorCallbackState()) {
return error::kNoError;
}
UpdateTextureSizeFromTarget(target);
return error::kNoError;
}
| 0
|
411,329
|
}
//! Unserialize a CImg<unsigned char> serialized buffer into a CImgList<T> list.
template<typename t>
static CImgList<T> get_unserialize(const CImg<t>& buffer) {
#ifdef cimg_use_zlib
#define _cimgz_unserialize_case(Tss) { \
Bytef *cbuf = 0; \
if (sizeof(t)!=1 || cimg::type<t>::string()==cimg::type<bool>::string()) { \
cbuf = new Bytef[csiz]; Bytef *_cbuf = cbuf; \
for (ulongT i = 0; i<csiz; ++i) *(_cbuf++) = (Bytef)*(stream++); \
is_bytef = false; \
} else { cbuf = (Bytef*)stream; stream+=csiz; is_bytef = true; } \
raw.assign(W,H,D,C); \
uLongf destlen = raw.size()*sizeof(Tss); \
uncompress((Bytef*)raw._data,&destlen,cbuf,csiz); \
if (!is_bytef) delete[] cbuf; \
}
#else
#define _cimgz_unserialize_case(Tss) \
throw CImgArgumentException("CImgList<%s>::get_unserialize(): Unable to unserialize compressed data " \
"unless zlib is enabled.", \
pixel_type());
#endif
#define _cimg_unserialize_case(Ts,Tss) \
if (!loaded && !cimg::strcasecmp(Ts,str_pixeltype)) { \
for (unsigned int l = 0; l<N; ++l) { \
j = 0; while ((i=(int)*stream)!='\n' && stream<estream && j<255) { ++stream; tmp[j++] = (char)i; } \
++stream; tmp[j] = 0; \
W = H = D = C = 0; csiz = 0; \
if ((err = cimg_sscanf(tmp,"%u %u %u %u #" cimg_fuint64,&W,&H,&D,&C,&csiz))<4) \
throw CImgArgumentException("CImgList<%s>::unserialize(): Invalid specified size (%u,%u,%u,%u) for " \
"image #%u in serialized buffer.", \
pixel_type(),W,H,D,C,l); \
if (W*H*D*C>0) { \
CImg<Tss> raw; \
CImg<T> &img = res._data[l]; \
if (err==5) _cimgz_unserialize_case(Tss) \
else if (sizeof(Tss)==sizeof(t) && cimg::type<Tss>::is_float()==cimg::type<t>::is_float()) { \
raw.assign((Tss*)stream,W,H,D,C,true); \
stream+=raw.size(); \
} else { \
raw.assign(W,H,D,C); \
CImg<ucharT> _raw((unsigned char*)raw._data,W*sizeof(Tss),H,D,C,true); \
cimg_for(_raw,p,unsigned char) *p = (unsigned char)*(stream++); \
} \
if (endian!=cimg::endianness()) cimg::invert_endianness(raw._data,raw.size()); \
raw.move_to(img); \
} \
} \
loaded = true; \
}
if (buffer.is_empty())
throw CImgArgumentException("CImgList<%s>::get_unserialize(): Specified serialized buffer is (null).",
pixel_type());
CImgList<T> res;
const t *stream = buffer._data, *const estream = buffer._data + buffer.size();
bool loaded = false, endian = cimg::endianness(), is_bytef = false;
CImg<charT> tmp(256), str_pixeltype(256), str_endian(256);
*tmp = *str_pixeltype = *str_endian = 0;
unsigned int j, N = 0, W, H, D, C;
uint64T csiz;
int i, err;
cimg::unused(is_bytef);
do {
j = 0; while ((i=(int)*stream)!='\n' && stream<estream && j<255) { ++stream; tmp[j++] = (char)i; }
++stream; tmp[j] = 0;
} while (*tmp=='#' && stream<estream);
err = cimg_sscanf(tmp,"%u%*c%255[A-Za-z64_]%*c%255[sA-Za-z_ ]",
&N,str_pixeltype._data,str_endian._data);
if (err<2)
throw CImgArgumentException("CImgList<%s>::get_unserialize(): CImg header not found in serialized buffer.",
pixel_type());
if (!cimg::strncasecmp("little",str_endian,6)) endian = false;
else if (!cimg::strncasecmp("big",str_endian,3)) endian = true;
res.assign(N);
_cimg_unserialize_case("bool",bool);
_cimg_unserialize_case("unsigned_char",unsigned char);
_cimg_unserialize_case("uchar",unsigned char);
_cimg_unserialize_case("char",char);
_cimg_unserialize_case("unsigned_short",unsigned short);
_cimg_unserialize_case("ushort",unsigned short);
_cimg_unserialize_case("short",short);
_cimg_unserialize_case("unsigned_int",unsigned int);
_cimg_unserialize_case("uint",unsigned int);
_cimg_unserialize_case("int",int);
_cimg_unserialize_case("unsigned_int64",uint64T);
_cimg_unserialize_case("uint64",uint64T);
_cimg_unserialize_case("int64",int64T);
_cimg_unserialize_case("float",float);
_cimg_unserialize_case("double",double);
if (!loaded)
throw CImgArgumentException("CImgList<%s>::get_unserialize(): Unsupported pixel type '%s' defined "
| 0
|
500,705
|
static int call_function(cfg_t *cfg, cfg_opt_t *opt, cfg_opt_t *funcopt)
{
int ret;
const char **argv;
unsigned int i;
if (!cfg || !opt ||!funcopt) {
errno = EINVAL;
return CFG_FAIL;
}
/*
* create am argv string vector and call the registered function
*/
argv = calloc(funcopt->nvalues, sizeof(char *));
if (!argv)
return CFG_FAIL;
for (i = 0; i < funcopt->nvalues; i++)
argv[i] = funcopt->values[i]->string;
ret = (*opt->func) (cfg, opt, funcopt->nvalues, argv);
cfg_free_value(funcopt);
free(argv);
return ret;
}
| 0
|
31,725
|
std::vector<Box_iref::Reference> Box_iref::get_references_from(heif_item_id itemID) const
{
std::vector<Reference> references;
for (const Reference& ref : m_references) {
if (ref.from_item_ID == itemID) {
references.push_back(ref);
}
}
return references;
}
| 0
|
168,018
|
void IndexedDBDispatcher::RequestIDBObjectStoreGet(
const IndexedDBKey& key,
WebIDBCallbacks* callbacks_ptr,
int32 idb_object_store_id,
const WebIDBTransaction& transaction,
WebExceptionCode* ec) {
ResetCursorPrefetchCaches();
scoped_ptr<WebIDBCallbacks> callbacks(callbacks_ptr);
int32 response_id = pending_callbacks_.Add(callbacks.release());
Send(new IndexedDBHostMsg_ObjectStoreGet(
idb_object_store_id, CurrentWorkerId(), response_id,
key, TransactionId(transaction), ec));
if (*ec)
pending_callbacks_.Remove(response_id);
}
| 0
|
489,778
|
GF_Err fdpa_box_dump(GF_Box *a, FILE * trace)
{
u32 i;
GF_FDpacketBox *ptr = (GF_FDpacketBox *) a;
if (!a) return GF_BAD_PARAM;
gf_isom_box_dump_start(a, "FDpacketBox", trace);
gf_fprintf(trace, "sender_current_time_present=\"%d\" expected_residual_time_present=\"%d\" session_close_bit=\"%d\" object_close_bit=\"%d\" transport_object_identifier=\"%d\">\n", ptr->info.sender_current_time_present, ptr->info.expected_residual_time_present, ptr->info.session_close_bit, ptr->info.object_close_bit, ptr->info.transport_object_identifier);
for (i=0; i<ptr->header_ext_count; i++) {
gf_fprintf(trace, "<FDHeaderExt type=\"%d\"", ptr->headers[i].header_extension_type);
if (ptr->headers[i].header_extension_type > 127) {
dump_data_attribute(trace, "content", (char *) ptr->headers[i].content, 3);
} else if (ptr->headers[i].data_length) {
dump_data_attribute(trace, "data", ptr->headers[i].data, ptr->headers[i].data_length);
}
gf_fprintf(trace, "/>\n");
}
if (!ptr->size) {
gf_fprintf(trace, "<FDHeaderExt type=\"\" content=\"\" data=\"\"/>\n");
}
gf_isom_box_dump_done("FDpacketBox", a, trace);
return GF_OK;
}
| 0
|
266,102
|
void shutdown_executor(void) /* {{{ */
{
zend_function *func;
zend_class_entry *ce;
zend_try {
/* Removed because this can not be safely done, e.g. in this situation:
Object 1 creates object 2
Object 3 holds reference to object 2.
Now when 1 and 2 are destroyed, 3 can still access 2 in its destructor, with
very problematic results */
/* zend_objects_store_call_destructors(&EG(objects_store)); */
/* Moved after symbol table cleaners, because some of the cleaners can call
destructors, which would use EG(symtable_cache_ptr) and thus leave leaks */
/* while (EG(symtable_cache_ptr)>=EG(symtable_cache)) {
zend_hash_destroy(*EG(symtable_cache_ptr));
efree(*EG(symtable_cache_ptr));
EG(symtable_cache_ptr)--;
}
*/
zend_llist_apply(&zend_extensions, (llist_apply_func_t) zend_extension_deactivator);
if (CG(unclean_shutdown)) {
EG(symbol_table).pDestructor = zend_unclean_zval_ptr_dtor;
}
zend_hash_graceful_reverse_destroy(&EG(symbol_table));
} zend_end_try();
EG(valid_symbol_table) = 0;
zend_try {
zval *zeh;
/* remove error handlers before destroying classes and functions,
* so that if handler used some class, crash would not happen */
if (Z_TYPE(EG(user_error_handler)) != IS_UNDEF) {
zeh = &EG(user_error_handler);
zval_ptr_dtor(zeh);
ZVAL_UNDEF(&EG(user_error_handler));
}
if (Z_TYPE(EG(user_exception_handler)) != IS_UNDEF) {
zeh = &EG(user_exception_handler);
zval_ptr_dtor(zeh);
ZVAL_UNDEF(&EG(user_exception_handler));
}
zend_stack_clean(&EG(user_error_handlers_error_reporting), NULL, 1);
zend_stack_clean(&EG(user_error_handlers), (void (*)(void *))ZVAL_DESTRUCTOR, 1);
zend_stack_clean(&EG(user_exception_handlers), (void (*)(void *))ZVAL_DESTRUCTOR, 1);
} zend_end_try();
zend_try {
/* Cleanup static data for functions and arrays.
* We need a separate cleanup stage because of the following problem:
* Suppose we destroy class X, which destroys the class's function table,
* and in the function table we have function foo() that has static $bar.
* Now if an object of class X is assigned to $bar, its destructor will be
* called and will fail since X's function table is in mid-destruction.
* So we want first of all to clean up all data and then move to tables destruction.
* Note that only run-time accessed data need to be cleaned up, pre-defined data can
* not contain objects and thus are not probelmatic */
if (EG(full_tables_cleanup)) {
ZEND_HASH_FOREACH_PTR(EG(function_table), func) {
if (func->type == ZEND_USER_FUNCTION) {
zend_cleanup_op_array_data((zend_op_array *) func);
}
} ZEND_HASH_FOREACH_END();
ZEND_HASH_REVERSE_FOREACH_PTR(EG(class_table), ce) {
if (ce->type == ZEND_USER_CLASS) {
zend_cleanup_user_class_data(ce);
} else {
zend_cleanup_internal_class_data(ce);
}
} ZEND_HASH_FOREACH_END();
} else {
ZEND_HASH_REVERSE_FOREACH_PTR(EG(function_table), func) {
if (func->type != ZEND_USER_FUNCTION) {
break;
}
zend_cleanup_op_array_data((zend_op_array *) func);
} ZEND_HASH_FOREACH_END();
ZEND_HASH_REVERSE_FOREACH_PTR(EG(class_table), ce) {
if (ce->type != ZEND_USER_CLASS) {
break;
}
zend_cleanup_user_class_data(ce);
} ZEND_HASH_FOREACH_END();
zend_cleanup_internal_classes();
}
} zend_end_try();
zend_try {
zend_llist_destroy(&CG(open_files));
} zend_end_try();
zend_try {
zend_close_rsrc_list(&EG(regular_list));
} zend_end_try();
#if ZEND_DEBUG
if (GC_G(gc_enabled) && !CG(unclean_shutdown)) {
gc_collect_cycles();
}
#endif
zend_try {
zend_objects_store_free_object_storage(&EG(objects_store));
zend_vm_stack_destroy();
/* Destroy all op arrays */
if (EG(full_tables_cleanup)) {
zend_hash_reverse_apply(EG(function_table), clean_non_persistent_function_full);
zend_hash_reverse_apply(EG(class_table), clean_non_persistent_class_full);
} else {
zend_hash_reverse_apply(EG(function_table), clean_non_persistent_function);
zend_hash_reverse_apply(EG(class_table), clean_non_persistent_class);
}
while (EG(symtable_cache_ptr)>=EG(symtable_cache)) {
zend_hash_destroy(*EG(symtable_cache_ptr));
FREE_HASHTABLE(*EG(symtable_cache_ptr));
EG(symtable_cache_ptr)--;
}
} zend_end_try();
zend_try {
clean_non_persistent_constants();
} zend_end_try();
zend_try {
#if 0&&ZEND_DEBUG
signal(SIGSEGV, original_sigsegv_handler);
#endif
zend_hash_destroy(&EG(included_files));
zend_stack_destroy(&EG(user_error_handlers_error_reporting));
zend_stack_destroy(&EG(user_error_handlers));
zend_stack_destroy(&EG(user_exception_handlers));
zend_objects_store_destroy(&EG(objects_store));
if (EG(in_autoload)) {
zend_hash_destroy(EG(in_autoload));
FREE_HASHTABLE(EG(in_autoload));
}
} zend_end_try();
zend_shutdown_fpu();
EG(ht_iterators_used) = 0;
if (EG(ht_iterators) != EG(ht_iterators_slots)) {
efree(EG(ht_iterators));
}
EG(active) = 0;
}
| 0
|
168,561
|
v8::Local<v8::Context> V8Proxy::isolatedWorldContext(int worldId)
{
IsolatedWorldMap::iterator iter = m_isolatedWorlds.find(worldId);
if (iter == m_isolatedWorlds.end())
return v8::Local<v8::Context>();
return v8::Local<v8::Context>::New(iter->second->context());
}
| 0
|
441,140
|
void RGWPutBucketReplication_ObjStore_S3::send_response()
{
if (op_ret)
set_req_state_err(s, op_ret);
dump_errno(s);
end_header(s, this, "application/xml");
dump_start(s);
}
| 0
|
468,612
|
ClientRequestContext::clientRedirectDone(const Helper::Reply &reply)
{
HttpRequest *old_request = http->request;
debugs(85, 5, HERE << "'" << http->uri << "' result=" << reply);
assert(redirect_state == REDIRECT_PENDING);
redirect_state = REDIRECT_DONE;
// Put helper response Notes into the transaction state record (ALE) eventually
// do it early to ensure that no matter what the outcome the notes are present.
if (http->al)
http->al->syncNotes(old_request);
UpdateRequestNotes(http->getConn(), *old_request, reply.notes);
switch (reply.result) {
case Helper::TimedOut:
if (Config.onUrlRewriteTimeout.action != toutActBypass) {
static const auto d = MakeNamedErrorDetail("REDIRECTOR_TIMEDOUT");
http->calloutsError(ERR_GATEWAY_FAILURE, d);
debugs(85, DBG_IMPORTANT, "ERROR: URL rewrite helper: Timedout");
}
break;
case Helper::Unknown:
case Helper::TT:
// Handler in redirect.cc should have already mapped Unknown
// IF it contained valid entry for the old URL-rewrite helper protocol
debugs(85, DBG_IMPORTANT, "ERROR: URL rewrite helper returned invalid result code. Wrong helper? " << reply);
break;
case Helper::BrokenHelper:
debugs(85, DBG_IMPORTANT, "ERROR: URL rewrite helper: " << reply);
break;
case Helper::Error:
// no change to be done.
break;
case Helper::Okay: {
// #1: redirect with a specific status code OK status=NNN url="..."
// #2: redirect with a default status code OK url="..."
// #3: re-write the URL OK rewrite-url="..."
const char *statusNote = reply.notes.findFirst("status");
const char *urlNote = reply.notes.findFirst("url");
if (urlNote != NULL) {
// HTTP protocol redirect to be done.
// TODO: change default redirect status for appropriate requests
// Squid defaults to 302 status for now for better compatibility with old clients.
// HTTP/1.0 client should get 302 (Http::scFound)
// HTTP/1.1 client contacting reverse-proxy should get 307 (Http::scTemporaryRedirect)
// HTTP/1.1 client being diverted by forward-proxy should get 303 (Http::scSeeOther)
Http::StatusCode status = Http::scFound;
if (statusNote != NULL) {
const char * result = statusNote;
status = static_cast<Http::StatusCode>(atoi(result));
}
if (status == Http::scMovedPermanently
|| status == Http::scFound
|| status == Http::scSeeOther
|| status == Http::scPermanentRedirect
|| status == Http::scTemporaryRedirect) {
http->redirect.status = status;
http->redirect.location = xstrdup(urlNote);
// TODO: validate the URL produced here is RFC 2616 compliant absolute URI
} else {
debugs(85, DBG_CRITICAL, "ERROR: URL-rewrite produces invalid " << status << " redirect Location: " << urlNote);
}
} else {
// URL-rewrite wanted. Ew.
urlNote = reply.notes.findFirst("rewrite-url");
// prevent broken helpers causing too much damage. If old URL == new URL skip the re-write.
if (urlNote != NULL && strcmp(urlNote, http->uri)) {
AnyP::Uri tmpUrl;
if (tmpUrl.parse(old_request->method, SBuf(urlNote))) {
HttpRequest *new_request = old_request->clone();
new_request->url = tmpUrl;
debugs(61, 2, "URL-rewriter diverts URL from " << old_request->effectiveRequestUri() << " to " << new_request->effectiveRequestUri());
// update the new request to flag the re-writing was done on it
new_request->flags.redirected = true;
// unlink bodypipe from the old request. Not needed there any longer.
if (old_request->body_pipe != NULL) {
old_request->body_pipe = NULL;
debugs(61,2, HERE << "URL-rewriter diverts body_pipe " << new_request->body_pipe <<
" from request " << old_request << " to " << new_request);
}
http->resetRequest(new_request);
old_request = nullptr;
} else {
debugs(85, DBG_CRITICAL, "ERROR: URL-rewrite produces invalid request: " <<
old_request->method << " " << urlNote << " " << old_request->http_ver);
}
}
}
}
break;
}
/* XXX PIPELINE: This is inaccurate during pipelining */
if (http->getConn() != NULL && Comm::IsConnOpen(http->getConn()->clientConnection))
fd_note(http->getConn()->clientConnection->fd, http->uri);
assert(http->uri);
http->doCallouts();
}
| 0
|
215,224
|
static void update_rq_clock_task(struct rq *rq, s64 delta)
{
/*
* In theory, the compile should just see 0 here, and optimize out the call
* to sched_rt_avg_update. But I don't trust it...
*/
#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
s64 steal = 0, irq_delta = 0;
#endif
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
/*
* Since irq_time is only updated on {soft,}irq_exit, we might run into
* this case when a previous update_rq_clock() happened inside a
* {soft,}irq region.
*
* When this happens, we stop ->clock_task and only update the
* prev_irq_time stamp to account for the part that fit, so that a next
* update will consume the rest. This ensures ->clock_task is
* monotonic.
*
* It does however cause some slight miss-attribution of {soft,}irq
* time, a more accurate solution would be to update the irq_time using
* the current rq->clock timestamp, except that would require using
* atomic ops.
*/
if (irq_delta > delta)
irq_delta = delta;
rq->prev_irq_time += irq_delta;
delta -= irq_delta;
#endif
#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
if (static_key_false((¶virt_steal_rq_enabled))) {
steal = paravirt_steal_clock(cpu_of(rq));
steal -= rq->prev_steal_time_rq;
if (unlikely(steal > delta))
steal = delta;
rq->prev_steal_time_rq += steal;
delta -= steal;
}
#endif
rq->clock_task += delta;
#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY))
sched_rt_avg_update(rq, irq_delta + steal);
#endif
}
| 0
|
131,958
|
static int au1200fb_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
unsigned blue, unsigned transp, struct fb_info *fbi)
{
volatile u32 *palette = lcd->palette;
u32 value;
if (regno > (AU1200_LCD_NBR_PALETTE_ENTRIES - 1))
return -EINVAL;
if (fbi->var.grayscale) {
/* Convert color to grayscale */
red = green = blue =
(19595 * red + 38470 * green + 7471 * blue) >> 16;
}
if (fbi->fix.visual == FB_VISUAL_TRUECOLOR) {
/* Place color in the pseudopalette */
if (regno > 16)
return -EINVAL;
palette = (u32*) fbi->pseudo_palette;
red >>= (16 - fbi->var.red.length);
green >>= (16 - fbi->var.green.length);
blue >>= (16 - fbi->var.blue.length);
value = (red << fbi->var.red.offset) |
(green << fbi->var.green.offset)|
(blue << fbi->var.blue.offset);
value &= 0xFFFF;
} else if (1 /*FIX!!! panel_is_active(fbdev->panel)*/) {
/* COLOR TFT PALLETTIZED (use RGB 565) */
value = (red & 0xF800)|((green >> 5) &
0x07E0)|((blue >> 11) & 0x001F);
value &= 0xFFFF;
} else if (0 /*panel_is_color(fbdev->panel)*/) {
/* COLOR STN MODE */
value = 0x1234;
value &= 0xFFF;
} else {
/* MONOCHROME MODE */
value = (green >> 12) & 0x000F;
value &= 0xF;
}
palette[regno] = value;
return 0;
}
| 0
|
444,925
|
TEST_F(Http1ServerConnectionImplTest, LargeRequestHeadersAcceptedMaxConfigurable) {
max_request_headers_kb_ = 96;
std::string long_string = "big: " + std::string(95 * 1024, 'q') + "\r\n";
testRequestHeadersAccepted(long_string);
}
| 0
|
269,983
|
static void processCertificateElements(struct ndpi_detection_module_struct *ndpi_struct,
struct ndpi_flow_struct *flow,
u_int16_t p_offset, u_int16_t certificate_len) {
struct ndpi_packet_struct *packet = &flow->packet;
u_int num_found = 0, i;
char buffer[64] = { '\0' }, rdnSeqBuf[2048] = { '\0' };
u_int rdn_len = 0;
#ifdef DEBUG_TLS
printf("[TLS] %s() [offset: %u][certificate_len: %u]\n", __FUNCTION__, p_offset, certificate_len);
#endif
/* Check after handshake protocol header (5 bytes) and message header (4 bytes) */
for(i = p_offset; i < certificate_len; i++) {
/*
See https://www.ibm.com/support/knowledgecenter/SSFKSJ_7.5.0/com.ibm.mq.sec.doc/q009860_.htm
for X.509 certificate labels
*/
if((packet->payload[i] == 0x55) && (packet->payload[i+1] == 0x04) && (packet->payload[i+2] == 0x03)) {
/* Common Name */
int rc = extractRDNSequence(packet, i, buffer, sizeof(buffer), rdnSeqBuf, &rdn_len, sizeof(rdnSeqBuf), "CN");
if(rc == -1) break;
#ifdef DEBUG_TLS
printf("[TLS] %s() [%s][%s: %s]\n", __FUNCTION__, (num_found == 0) ? "Subject" : "Issuer", "Common Name", buffer);
#endif
} else if((packet->payload[i] == 0x55) && (packet->payload[i+1] == 0x04) && (packet->payload[i+2] == 0x06)) {
/* Country */
int rc = extractRDNSequence(packet, i, buffer, sizeof(buffer), rdnSeqBuf, &rdn_len, sizeof(rdnSeqBuf), "C");
if(rc == -1) break;
#ifdef DEBUG_TLS
printf("[TLS] %s() [%s][%s: %s]\n", __FUNCTION__, (num_found == 0) ? "Subject" : "Issuer", "Country", buffer);
#endif
} else if((packet->payload[i] == 0x55) && (packet->payload[i+1] == 0x04) && (packet->payload[i+2] == 0x07)) {
/* Locality */
int rc = extractRDNSequence(packet, i, buffer, sizeof(buffer), rdnSeqBuf, &rdn_len, sizeof(rdnSeqBuf), "L");
if(rc == -1) break;
#ifdef DEBUG_TLS
printf("[TLS] %s() [%s][%s: %s]\n", __FUNCTION__, (num_found == 0) ? "Subject" : "Issuer", "Locality", buffer);
#endif
} else if((packet->payload[i] == 0x55) && (packet->payload[i+1] == 0x04) && (packet->payload[i+2] == 0x08)) {
/* State or Province */
int rc = extractRDNSequence(packet, i, buffer, sizeof(buffer), rdnSeqBuf, &rdn_len, sizeof(rdnSeqBuf), "ST");
if(rc == -1) break;
#ifdef DEBUG_TLS
printf("[TLS] %s() [%s][%s: %s]\n", __FUNCTION__, (num_found == 0) ? "Subject" : "Issuer", "State or Province", buffer);
#endif
} else if((packet->payload[i] == 0x55) && (packet->payload[i+1] == 0x04) && (packet->payload[i+2] == 0x0a)) {
/* Organization Name */
int rc = extractRDNSequence(packet, i, buffer, sizeof(buffer), rdnSeqBuf, &rdn_len, sizeof(rdnSeqBuf), "O");
if(rc == -1) break;
#ifdef DEBUG_TLS
printf("[TLS] %s() [%s][%s: %s]\n", __FUNCTION__, (num_found == 0) ? "Subject" : "Issuer", "Organization Name", buffer);
#endif
} else if((packet->payload[i] == 0x55) && (packet->payload[i+1] == 0x04) && (packet->payload[i+2] == 0x0b)) {
/* Organization Unit */
int rc = extractRDNSequence(packet, i, buffer, sizeof(buffer), rdnSeqBuf, &rdn_len, sizeof(rdnSeqBuf), "OU");
if(rc == -1) break;
#ifdef DEBUG_TLS
printf("[TLS] %s() [%s][%s: %s]\n", __FUNCTION__, (num_found == 0) ? "Subject" : "Issuer", "Organization Unit", buffer);
#endif
} else if((packet->payload[i] == 0x30) && (packet->payload[i+1] == 0x1e) && (packet->payload[i+2] == 0x17)) {
/* Certificate Validity */
u_int8_t len = packet->payload[i+3];
u_int offset = i+4;
if(num_found == 0) {
num_found++;
#ifdef DEBUG_TLS
printf("[TLS] %s() IssuerDN [%s]\n", __FUNCTION__, rdnSeqBuf);
#endif
if(rdn_len) flow->protos.stun_ssl.ssl.issuerDN = ndpi_strdup(rdnSeqBuf);
rdn_len = 0; /* Reset buffer */
}
if((offset+len) < packet->payload_packet_len) {
char utcDate[32];
#ifdef DEBUG_TLS
u_int j;
printf("[CERTIFICATE] notBefore [len: %u][", len);
for(j=0; j<len; j++) printf("%c", packet->payload[i+4+j]);
printf("]\n");
#endif
if(len < (sizeof(utcDate)-1)) {
struct tm utc;
utc.tm_isdst = -1; /* Not set by strptime */
strncpy(utcDate, (const char*)&packet->payload[i+4], len);
utcDate[len] = '\0';
/* 141021000000Z */
if(strptime(utcDate, "%y%m%d%H%M%SZ", &utc) != NULL) {
flow->protos.stun_ssl.ssl.notBefore = timegm(&utc);
#ifdef DEBUG_TLS
printf("[CERTIFICATE] notBefore %u [%s]\n",
flow->protos.stun_ssl.ssl.notBefore, utcDate);
#endif
}
}
offset += len;
if((offset+1) < packet->payload_packet_len) {
len = packet->payload[offset+1];
offset += 2;
if((offset+len) < packet->payload_packet_len) {
u_int32_t time_sec = flow->packet.current_time_ms / 1000;
#ifdef DEBUG_TLS
u_int j;
printf("[CERTIFICATE] notAfter [len: %u][", len);
for(j=0; j<len; j++) printf("%c", packet->payload[offset+j]);
printf("]\n");
#endif
if(len < (sizeof(utcDate)-1)) {
struct tm utc;
utc.tm_isdst = -1; /* Not set by strptime */
strncpy(utcDate, (const char*)&packet->payload[offset], len);
utcDate[len] = '\0';
/* 141021000000Z */
if(strptime(utcDate, "%y%m%d%H%M%SZ", &utc) != NULL) {
flow->protos.stun_ssl.ssl.notAfter = timegm(&utc);
#ifdef DEBUG_TLS
printf("[CERTIFICATE] notAfter %u [%s]\n",
flow->protos.stun_ssl.ssl.notAfter, utcDate);
#endif
}
}
if((time_sec < flow->protos.stun_ssl.ssl.notBefore)
|| (time_sec > flow->protos.stun_ssl.ssl.notAfter))
NDPI_SET_BIT(flow->risk, NDPI_TLS_CERTIFICATE_EXPIRED); /* Certificate expired */
}
}
}
} else if((packet->payload[i] == 0x55) && (packet->payload[i+1] == 0x1d) && (packet->payload[i+2] == 0x11)) {
/* Organization OID: 2.5.29.17 (subjectAltName) */
u_int8_t matched_name = 0;
#ifdef DEBUG_TLS
printf("******* [TLS] Found subjectAltName\n");
#endif
i += 3 /* skip the initial patten 55 1D 11 */;
i++; /* skip the first type, 0x04 == BIT STRING, and jump to it's length */
if(i < packet->payload_packet_len) {
i += (packet->payload[i] & 0x80) ? (packet->payload[i] & 0x7F) : 0; /* skip BIT STRING length */
if(i < packet->payload_packet_len) {
i += 2; /* skip the second type, 0x30 == SEQUENCE, and jump to it's length */
if(i < packet->payload_packet_len) {
i += (packet->payload[i] & 0x80) ? (packet->payload[i] & 0x7F) : 0; /* skip SEQUENCE length */
i++;
while(i < packet->payload_packet_len) {
if(packet->payload[i] == 0x82) {
if((i < (packet->payload_packet_len - 1))
&& ((i + packet->payload[i + 1] + 2) < packet->payload_packet_len)) {
u_int8_t len = packet->payload[i + 1];
char dNSName[256];
i += 2;
/* The check "len > sizeof(dNSName) - 1" will be always false. If we add it,
the compiler is smart enough to detect it and throws a warning */
if(len == 0 /* Looks something went wrong */)
break;
strncpy(dNSName, (const char*)&packet->payload[i], len);
dNSName[len] = '\0';
cleanupServerName(dNSName, len);
#if DEBUG_TLS
printf("[TLS] dNSName %s [%s]\n", dNSName, flow->protos.stun_ssl.ssl.client_requested_server_name);
#endif
if(matched_name == 0) {
if((dNSName[0] == '*') && strstr(flow->protos.stun_ssl.ssl.client_requested_server_name, &dNSName[1]))
matched_name = 1;
else if(strcmp(flow->protos.stun_ssl.ssl.client_requested_server_name, dNSName) == 0)
matched_name = 1;
}
if(flow->protos.stun_ssl.ssl.server_names == NULL)
flow->protos.stun_ssl.ssl.server_names = ndpi_strdup(dNSName),
flow->protos.stun_ssl.ssl.server_names_len = strlen(dNSName);
else {
u_int16_t dNSName_len = strlen(dNSName);
u_int16_t newstr_len = flow->protos.stun_ssl.ssl.server_names_len + dNSName_len + 1;
char *newstr = (char*)ndpi_realloc(flow->protos.stun_ssl.ssl.server_names,
flow->protos.stun_ssl.ssl.server_names_len+1, newstr_len+1);
if(newstr) {
flow->protos.stun_ssl.ssl.server_names = newstr;
flow->protos.stun_ssl.ssl.server_names[flow->protos.stun_ssl.ssl.server_names_len] = ',';
strncpy(&flow->protos.stun_ssl.ssl.server_names[flow->protos.stun_ssl.ssl.server_names_len+1],
dNSName, dNSName_len+1);
flow->protos.stun_ssl.ssl.server_names[newstr_len] = '\0';
flow->protos.stun_ssl.ssl.server_names_len = newstr_len;
}
}
if(!flow->l4.tcp.tls.subprotocol_detected)
if(ndpi_match_hostname_protocol(ndpi_struct, flow, NDPI_PROTOCOL_TLS, dNSName, len))
flow->l4.tcp.tls.subprotocol_detected = 1;
i += len;
} else {
#if DEBUG_TLS
printf("[TLS] Leftover %u bytes", packet->payload_packet_len - i);
#endif
break;
}
} else {
break;
}
} /* while */
if(!matched_name)
NDPI_SET_BIT(flow->risk, NDPI_TLS_CERTIFICATE_MISMATCH); /* Certificate mismatch */
}
}
}
}
}
if(rdn_len) flow->protos.stun_ssl.ssl.subjectDN = ndpi_strdup(rdnSeqBuf);
if(flow->protos.stun_ssl.ssl.subjectDN && flow->protos.stun_ssl.ssl.issuerDN
&& (!strcmp(flow->protos.stun_ssl.ssl.subjectDN, flow->protos.stun_ssl.ssl.issuerDN)))
NDPI_SET_BIT(flow->risk, NDPI_TLS_SELFSIGNED_CERTIFICATE);
#if DEBUG_TLS
printf("[TLS] %s() SubjectDN [%s]\n", __FUNCTION__, rdnSeqBuf);
#endif
}
| 0
|
96,645
|
smb2_new_read_req(struct kvec *iov, struct cifs_io_parms *io_parms,
unsigned int remaining_bytes, int request_type)
{
int rc = -EACCES;
struct smb2_read_req *req = NULL;
rc = small_smb2_init(SMB2_READ, io_parms->tcon, (void **) &req);
if (rc)
return rc;
if (io_parms->tcon->ses->server == NULL)
return -ECONNABORTED;
req->hdr.ProcessId = cpu_to_le32(io_parms->pid);
req->PersistentFileId = io_parms->persistent_fid;
req->VolatileFileId = io_parms->volatile_fid;
req->ReadChannelInfoOffset = 0; /* reserved */
req->ReadChannelInfoLength = 0; /* reserved */
req->Channel = 0; /* reserved */
req->MinimumCount = 0;
req->Length = cpu_to_le32(io_parms->length);
req->Offset = cpu_to_le64(io_parms->offset);
if (request_type & CHAINED_REQUEST) {
if (!(request_type & END_OF_CHAIN)) {
/* 4 for rfc1002 length field */
req->hdr.NextCommand =
cpu_to_le32(get_rfc1002_length(req) + 4);
} else /* END_OF_CHAIN */
req->hdr.NextCommand = 0;
if (request_type & RELATED_REQUEST) {
req->hdr.Flags |= SMB2_FLAGS_RELATED_OPERATIONS;
/*
* Related requests use info from previous read request
* in chain.
*/
req->hdr.SessionId = 0xFFFFFFFF;
req->hdr.TreeId = 0xFFFFFFFF;
req->PersistentFileId = 0xFFFFFFFF;
req->VolatileFileId = 0xFFFFFFFF;
}
}
if (remaining_bytes > io_parms->length)
req->RemainingBytes = cpu_to_le32(remaining_bytes);
else
req->RemainingBytes = 0;
iov[0].iov_base = (char *)req;
/* 4 for rfc1002 length field */
iov[0].iov_len = get_rfc1002_length(req) + 4;
return rc;
}
| 0
|
25,146
|
static int phar_tar_process_metadata ( phar_entry_info * entry , php_stream * fp TSRMLS_DC ) {
char * metadata ;
size_t save = php_stream_tell ( fp ) , read ;
phar_entry_info * mentry ;
metadata = ( char * ) safe_emalloc ( 1 , entry -> uncompressed_filesize , 1 ) ;
read = php_stream_read ( fp , metadata , entry -> uncompressed_filesize ) ;
if ( read != entry -> uncompressed_filesize ) {
efree ( metadata ) ;
php_stream_seek ( fp , save , SEEK_SET ) ;
return FAILURE ;
}
if ( phar_parse_metadata ( & metadata , & entry -> metadata , entry -> uncompressed_filesize TSRMLS_CC ) == FAILURE ) {
efree ( metadata ) ;
php_stream_seek ( fp , save , SEEK_SET ) ;
return FAILURE ;
}
if ( entry -> filename_len == sizeof ( ".phar/.metadata.bin" ) - 1 && ! memcmp ( entry -> filename , ".phar/.metadata.bin" , sizeof ( ".phar/.metadata.bin" ) - 1 ) ) {
entry -> phar -> metadata = entry -> metadata ;
entry -> metadata = NULL ;
}
else if ( entry -> filename_len >= sizeof ( ".phar/.metadata/" ) + sizeof ( "/.metadata.bin" ) - 1 && SUCCESS == zend_hash_find ( & ( entry -> phar -> manifest ) , entry -> filename + sizeof ( ".phar/.metadata/" ) - 1 , entry -> filename_len - ( sizeof ( "/.metadata.bin" ) - 1 + sizeof ( ".phar/.metadata/" ) - 1 ) , ( void * ) & mentry ) ) {
mentry -> metadata = entry -> metadata ;
entry -> metadata = NULL ;
}
efree ( metadata ) ;
php_stream_seek ( fp , save , SEEK_SET ) ;
return SUCCESS ;
}
| 0
|
65,369
|
//! Return reference to the first image of the list \const.
const CImg<T>& front() const {
return *_data;
| 0
|
361,927
|
vte_sequence_handler_set_icon_title (VteTerminal *terminal, GValueArray *params)
{
vte_sequence_handler_set_title_internal(terminal, params, TRUE, FALSE);
}
| 0
|
324,753
|
static void bamboo_init(MachineState *machine)
{
ram_addr_t ram_size = machine->ram_size;
const char *kernel_filename = machine->kernel_filename;
const char *kernel_cmdline = machine->kernel_cmdline;
const char *initrd_filename = machine->initrd_filename;
unsigned int pci_irq_nrs[4] = { 28, 27, 26, 25 };
MemoryRegion *address_space_mem = get_system_memory();
MemoryRegion *isa = g_new(MemoryRegion, 1);
MemoryRegion *ram_memories
= g_malloc(PPC440EP_SDRAM_NR_BANKS * sizeof(*ram_memories));
hwaddr ram_bases[PPC440EP_SDRAM_NR_BANKS];
hwaddr ram_sizes[PPC440EP_SDRAM_NR_BANKS];
qemu_irq *pic;
qemu_irq *irqs;
PCIBus *pcibus;
PowerPCCPU *cpu;
CPUPPCState *env;
uint64_t elf_entry;
uint64_t elf_lowaddr;
hwaddr loadaddr = 0;
target_long initrd_size = 0;
DeviceState *dev;
int success;
int i;
/* Setup CPU. */
if (machine->cpu_model == NULL) {
machine->cpu_model = "440EP";
}
cpu = POWERPC_CPU(cpu_generic_init(TYPE_POWERPC_CPU, machine->cpu_model));
if (cpu == NULL) {
fprintf(stderr, "Unable to initialize CPU!\n");
exit(1);
}
env = &cpu->env;
if (env->mmu_model != POWERPC_MMU_BOOKE) {
fprintf(stderr, "MMU model %i not supported by this machine.\n",
env->mmu_model);
exit(1);
}
qemu_register_reset(main_cpu_reset, cpu);
ppc_booke_timers_init(cpu, 400000000, 0);
ppc_dcr_init(env, NULL, NULL);
/* interrupt controller */
irqs = g_malloc0(sizeof(qemu_irq) * PPCUIC_OUTPUT_NB);
irqs[PPCUIC_OUTPUT_INT] = ((qemu_irq *)env->irq_inputs)[PPC40x_INPUT_INT];
irqs[PPCUIC_OUTPUT_CINT] = ((qemu_irq *)env->irq_inputs)[PPC40x_INPUT_CINT];
pic = ppcuic_init(env, irqs, 0x0C0, 0, 1);
/* SDRAM controller */
memset(ram_bases, 0, sizeof(ram_bases));
memset(ram_sizes, 0, sizeof(ram_sizes));
ram_size = ppc4xx_sdram_adjust(ram_size, PPC440EP_SDRAM_NR_BANKS,
ram_memories,
ram_bases, ram_sizes,
ppc440ep_sdram_bank_sizes);
/* XXX 440EP's ECC interrupts are on UIC1, but we've only created UIC0. */
ppc4xx_sdram_init(env, pic[14], PPC440EP_SDRAM_NR_BANKS, ram_memories,
ram_bases, ram_sizes, 1);
/* PCI */
dev = sysbus_create_varargs(TYPE_PPC4xx_PCI_HOST_BRIDGE,
PPC440EP_PCI_CONFIG,
pic[pci_irq_nrs[0]], pic[pci_irq_nrs[1]],
pic[pci_irq_nrs[2]], pic[pci_irq_nrs[3]],
NULL);
pcibus = (PCIBus *)qdev_get_child_bus(dev, "pci.0");
if (!pcibus) {
fprintf(stderr, "couldn't create PCI controller!\n");
exit(1);
}
memory_region_init_alias(isa, NULL, "isa_mmio",
get_system_io(), 0, PPC440EP_PCI_IOLEN);
memory_region_add_subregion(get_system_memory(), PPC440EP_PCI_IO, isa);
if (serial_hds[0] != NULL) {
serial_mm_init(address_space_mem, 0xef600300, 0, pic[0],
PPC_SERIAL_MM_BAUDBASE, serial_hds[0],
DEVICE_BIG_ENDIAN);
}
if (serial_hds[1] != NULL) {
serial_mm_init(address_space_mem, 0xef600400, 0, pic[1],
PPC_SERIAL_MM_BAUDBASE, serial_hds[1],
DEVICE_BIG_ENDIAN);
}
if (pcibus) {
/* Register network interfaces. */
for (i = 0; i < nb_nics; i++) {
/* There are no PCI NICs on the Bamboo board, but there are
* PCI slots, so we can pick whatever default model we want. */
pci_nic_init_nofail(&nd_table[i], pcibus, "e1000", NULL);
}
}
/* Load kernel. */
if (kernel_filename) {
success = load_uimage(kernel_filename, &entry, &loadaddr, NULL,
NULL, NULL);
if (success < 0) {
success = load_elf(kernel_filename, NULL, NULL, &elf_entry,
&elf_lowaddr, NULL, 1, PPC_ELF_MACHINE,
0, 0);
entry = elf_entry;
loadaddr = elf_lowaddr;
}
/* XXX try again as binary */
if (success < 0) {
fprintf(stderr, "qemu: could not load kernel '%s'\n",
kernel_filename);
exit(1);
}
}
/* Load initrd. */
if (initrd_filename) {
initrd_size = load_image_targphys(initrd_filename, RAMDISK_ADDR,
ram_size - RAMDISK_ADDR);
if (initrd_size < 0) {
fprintf(stderr, "qemu: could not load ram disk '%s' at %x\n",
initrd_filename, RAMDISK_ADDR);
exit(1);
}
}
/* If we're loading a kernel directly, we must load the device tree too. */
if (kernel_filename) {
if (bamboo_load_device_tree(FDT_ADDR, ram_size, RAMDISK_ADDR,
initrd_size, kernel_cmdline) < 0) {
fprintf(stderr, "couldn't load device tree\n");
exit(1);
}
}
}
| 1
|
510,840
|
inline void Http2Session::RemoveStream(int32_t id) {
streams_.erase(id);
}
| 0
|
164,965
|
void tracing_reset_all_online_cpus(void)
{
struct trace_array *tr;
list_for_each_entry(tr, &ftrace_trace_arrays, list) {
if (!tr->clear_trace)
continue;
tr->clear_trace = false;
tracing_reset_online_cpus(&tr->trace_buffer);
#ifdef CONFIG_TRACER_MAX_TRACE
tracing_reset_online_cpus(&tr->max_buffer);
#endif
}
}
| 0
|
322,157
|
static void dec_bit(DisasContext *dc)
{
TCGv t0, t1;
unsigned int op;
int mem_index = cpu_mmu_index(dc->env);
op = dc->ir & ((1 << 9) - 1);
switch (op) {
case 0x21:
/* src. */
t0 = tcg_temp_new();
LOG_DIS("src r%d r%d\n", dc->rd, dc->ra);
tcg_gen_andi_tl(t0, cpu_R[dc->ra], 1);
if (dc->rd) {
t1 = tcg_temp_new();
read_carry(dc, t1);
tcg_gen_shli_tl(t1, t1, 31);
tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->rd], t1);
tcg_temp_free(t1);
}
/* Update carry. */
write_carry(dc, t0);
tcg_temp_free(t0);
break;
case 0x1:
case 0x41:
/* srl. */
t0 = tcg_temp_new();
LOG_DIS("srl r%d r%d\n", dc->rd, dc->ra);
/* Update carry. */
tcg_gen_andi_tl(t0, cpu_R[dc->ra], 1);
write_carry(dc, t0);
tcg_temp_free(t0);
if (dc->rd) {
if (op == 0x41)
tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
else
tcg_gen_sari_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
}
break;
case 0x60:
LOG_DIS("ext8s r%d r%d\n", dc->rd, dc->ra);
tcg_gen_ext8s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
break;
case 0x61:
LOG_DIS("ext16s r%d r%d\n", dc->rd, dc->ra);
tcg_gen_ext16s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
break;
case 0x64:
case 0x66:
case 0x74:
case 0x76:
/* wdc. */
LOG_DIS("wdc r%d\n", dc->ra);
if ((dc->tb_flags & MSR_EE_FLAG)
&& mem_index == MMU_USER_IDX) {
tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
t_gen_raise_exception(dc, EXCP_HW_EXCP);
return;
}
break;
case 0x68:
/* wic. */
LOG_DIS("wic r%d\n", dc->ra);
if ((dc->tb_flags & MSR_EE_FLAG)
&& mem_index == MMU_USER_IDX) {
tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
t_gen_raise_exception(dc, EXCP_HW_EXCP);
return;
}
break;
case 0xe0:
if ((dc->tb_flags & MSR_EE_FLAG)
&& (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
&& !((dc->env->pvr.regs[2] & PVR2_USE_PCMP_INSTR))) {
tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
t_gen_raise_exception(dc, EXCP_HW_EXCP);
}
if (dc->env->pvr.regs[2] & PVR2_USE_PCMP_INSTR) {
gen_helper_clz(cpu_R[dc->rd], cpu_R[dc->ra]);
}
break;
case 0x1e0:
/* swapb */
LOG_DIS("swapb r%d r%d\n", dc->rd, dc->ra);
tcg_gen_bswap32_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
break;
case 0x1e2:
/*swaph */
LOG_DIS("swaph r%d r%d\n", dc->rd, dc->ra);
tcg_gen_rotri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 16);
break;
default:
cpu_abort(dc->env, "unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\n",
dc->pc, op, dc->rd, dc->ra, dc->rb);
break;
}
}
| 0
|
309,280
|
static void cwd_globals_dtor(virtual_cwd_globals *cwd_g TSRMLS_DC) /* {{{ */
{
CWD_STATE_FREE(&cwd_g->cwd);
realpath_cache_clean(TSRMLS_C);
}
/* }}} */
| 0
|
417,006
|
int ovl_permission(struct inode *inode, int mask)
{
struct ovl_entry *oe = inode->i_private;
bool is_upper;
struct dentry *realdentry = ovl_entry_real(oe, &is_upper);
struct inode *realinode;
const struct cred *old_cred;
int err;
if (ovl_is_default_permissions(inode)) {
struct kstat stat;
struct path realpath = { .dentry = realdentry };
if (mask & MAY_NOT_BLOCK)
return -ECHILD;
realpath.mnt = ovl_entry_mnt_real(oe, inode, is_upper);
err = vfs_getattr(&realpath, &stat);
if (err)
return err;
if ((stat.mode ^ inode->i_mode) & S_IFMT)
return -ESTALE;
inode->i_mode = stat.mode;
inode->i_uid = stat.uid;
inode->i_gid = stat.gid;
return generic_permission(inode, mask);
}
/* Careful in RCU walk mode */
realinode = d_inode_rcu(realdentry);
if (!realinode) {
WARN_ON(!(mask & MAY_NOT_BLOCK));
return -ENOENT;
}
if (mask & MAY_WRITE) {
umode_t mode = realinode->i_mode;
/*
* Writes will always be redirected to upper layer, so
* ignore lower layer being read-only.
*
* If the overlay itself is read-only then proceed
* with the permission check, don't return EROFS.
* This will only happen if this is the lower layer of
* another overlayfs.
*
* If upper fs becomes read-only after the overlay was
* constructed return EROFS to prevent modification of
* upper layer.
*/
if (is_upper && !IS_RDONLY(inode) && IS_RDONLY(realinode) &&
(S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)))
return -EROFS;
}
/*
* Check overlay inode with the creds of task and underlying inode
* with creds of mounter
*/
err = generic_permission(inode, mask);
if (err)
return err;
old_cred = ovl_override_creds(inode->i_sb);
err = __inode_permission(realinode, mask);
revert_creds(old_cred);
return err;
}
| 0
|
135,844
|
inline int web_client_api_request_v1_alarm_variables(RRDHOST *host, struct web_client *w, char *url) {
return web_client_api_request_single_chart(host, w, url, health_api_v1_chart_variables2json);
}
| 0
|
363,552
|
irc_server_switch_address (struct t_irc_server *server, int connection)
{
if (server->addresses_count > 1)
{
irc_server_set_index_current_address (server,
(server->index_current_address + 1) % server->addresses_count);
weechat_printf (server->buffer,
_("%s: switching address to %s/%d"),
IRC_PLUGIN_NAME,
server->current_address,
server->current_port);
if (connection)
{
if (server->index_current_address == 0)
irc_server_reconnect_schedule (server);
else
irc_server_connect (server);
}
}
else
{
if (connection)
irc_server_reconnect_schedule (server);
}
}
| 0
|
519,768
|
inline uint32 query_length() const
{
return static_cast<uint32>(query_string.length());
}
| 0
|
226,435
|
void DataReductionProxyIOData::SetProxyPrefs(bool enabled, bool at_startup) {
DCHECK(io_task_runner_->BelongsToCurrentThread());
enabled_ = enabled;
config_->SetProxyConfig(enabled, at_startup);
if (config_client_) {
config_client_->SetEnabled(enabled);
if (enabled)
config_client_->RetrieveConfig();
}
if (!enabled) {
if (proxy_config_client_)
proxy_config_client_->ClearBadProxiesCache();
}
}
| 0
|
44,047
|
static TEE_Result op_attr_secret_value_from_obj(void *attr, void *src_attr)
{
struct tee_cryp_obj_secret *key = attr;
struct tee_cryp_obj_secret *src_key = src_attr;
if (src_key->key_size > key->alloc_size)
return TEE_ERROR_BAD_STATE;
memcpy(key + 1, src_key + 1, src_key->key_size);
key->key_size = src_key->key_size;
return TEE_SUCCESS;
}
| 0
|
341,115
|
static void v9fs_create_post_mksock(V9fsState *s, V9fsCreateState *vs,
int err)
{
if (err) {
err = -errno;
goto out;
}
err = v9fs_do_chmod(s, &vs->fullname, vs->perm & 0777);
v9fs_create_post_perms(s, vs, err);
return;
out:
v9fs_post_create(s, vs, err);
}
| 0
|
460,065
|
proto_tree_add_uint(proto_tree *tree, int hfindex, tvbuff_t *tvb, gint start,
gint length, guint32 value)
{
proto_item *pi = NULL;
header_field_info *hfinfo;
CHECK_FOR_NULL_TREE(tree);
TRY_TO_FAKE_THIS_ITEM(tree, hfindex, hfinfo);
switch (hfinfo->type) {
case FT_CHAR:
case FT_UINT8:
case FT_UINT16:
case FT_UINT24:
case FT_UINT32:
case FT_FRAMENUM:
pi = proto_tree_add_pi(tree, hfinfo, tvb, start, &length);
proto_tree_set_uint(PNODE_FINFO(pi), value);
break;
default:
REPORT_DISSECTOR_BUG("field %s is not of type FT_CHAR, FT_UINT8, FT_UINT16, FT_UINT24, FT_UINT32, or FT_FRAMENUM",
hfinfo->abbrev);
}
return pi;
}
| 0
|
75,981
|
error_t httpSend(HttpConnection *connection,
const void *data, size_t length, uint_t flags)
{
#if (NET_RTOS_SUPPORT == ENABLED)
error_t error;
#if (HTTP_SERVER_TLS_SUPPORT == ENABLED)
//Check whether a secure connection is being used
if(connection->tlsContext != NULL)
{
//Use TLS to transmit data to the client
error = tlsWrite(connection->tlsContext, data, length, NULL, flags);
}
else
#endif
{
//Transmit data to the client
error = socketSend(connection->socket, data, length, NULL, flags);
}
//Return status code
return error;
#else
//Prevent buffer overflow
if((connection->bufferLen + length) > HTTP_SERVER_BUFFER_SIZE)
return ERROR_BUFFER_OVERFLOW;
//Copy user data
osMemcpy(connection->buffer + connection->bufferLen, data, length);
//Adjust the length of the buffer
connection->bufferLen += length;
//Successful processing
return NO_ERROR;
#endif
}
| 0
|
179,474
|
InputHandler::ScrollStatus LayerTreeHostImpl::ScrollBegin(
ScrollState* scroll_state,
InputHandler::ScrollInputType type) {
ScrollStatus scroll_status;
scroll_status.main_thread_scrolling_reasons =
MainThreadScrollingReason::kNotScrollingOnMain;
TRACE_EVENT0("cc", "LayerTreeHostImpl::ScrollBegin");
ScrollNode* scrolling_node = nullptr;
bool scroll_on_main_thread = false;
if (scroll_state->is_in_inertial_phase())
scrolling_node = CurrentlyScrollingNode();
if (!scrolling_node) {
ClearCurrentlyScrollingNode();
gfx::Point viewport_point(scroll_state->position_x(),
scroll_state->position_y());
gfx::PointF device_viewport_point = gfx::ScalePoint(
gfx::PointF(viewport_point), active_tree_->device_scale_factor());
LayerImpl* layer_impl =
active_tree_->FindLayerThatIsHitByPoint(device_viewport_point);
if (layer_impl) {
if (!IsInitialScrollHitTestReliable(layer_impl, device_viewport_point)) {
scroll_status.thread = SCROLL_UNKNOWN;
scroll_status.main_thread_scrolling_reasons =
MainThreadScrollingReason::kFailedHitTest;
return scroll_status;
}
}
auto* scrolling_layer = FindScrollLayerForDeviceViewportPoint(
device_viewport_point, type, layer_impl, &scroll_on_main_thread,
&scroll_status.main_thread_scrolling_reasons);
ScrollTree& scroll_tree = active_tree_->property_trees()->scroll_tree;
scrolling_node =
scrolling_layer ? scroll_tree.Node(scrolling_layer->scroll_tree_index())
: nullptr;
}
if (scroll_on_main_thread) {
RecordCompositorSlowScrollMetric(type, MAIN_THREAD);
scroll_status.thread = SCROLL_ON_MAIN_THREAD;
return scroll_status;
} else if (scrolling_node) {
scroll_affects_scroll_handler_ = active_tree_->have_scroll_event_handlers();
}
return ScrollBeginImpl(scroll_state, scrolling_node, type);
}
| 0
|
323,652
|
static inline void RENAME(uyvyToUV)(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1, const uint8_t *src2, long width, uint32_t *unused)
{
#if COMPILE_TEMPLATE_MMX
__asm__ volatile(
"movq "MANGLE(bm01010101)", %%mm4 \n\t"
"mov %0, %%"REG_a" \n\t"
"1: \n\t"
"movq (%1, %%"REG_a",4), %%mm0 \n\t"
"movq 8(%1, %%"REG_a",4), %%mm1 \n\t"
"pand %%mm4, %%mm0 \n\t"
"pand %%mm4, %%mm1 \n\t"
"packuswb %%mm1, %%mm0 \n\t"
"movq %%mm0, %%mm1 \n\t"
"psrlw $8, %%mm0 \n\t"
"pand %%mm4, %%mm1 \n\t"
"packuswb %%mm0, %%mm0 \n\t"
"packuswb %%mm1, %%mm1 \n\t"
"movd %%mm0, (%3, %%"REG_a") \n\t"
"movd %%mm1, (%2, %%"REG_a") \n\t"
"add $4, %%"REG_a" \n\t"
" js 1b \n\t"
: : "g" ((x86_reg)-width), "r" (src1+width*4), "r" (dstU+width), "r" (dstV+width)
: "%"REG_a
);
#else
int i;
for (i=0; i<width; i++) {
dstU[i]= src1[4*i + 0];
dstV[i]= src1[4*i + 2];
}
#endif
assert(src1 == src2);
}
| 0
|
331,736
|
void pci_cirrus_vga_init(PCIBus *bus, DisplayState *ds, uint8_t *vga_ram_base,
unsigned long vga_ram_offset, int vga_ram_size)
{
PCICirrusVGAState *d;
uint8_t *pci_conf;
CirrusVGAState *s;
int device_id;
device_id = CIRRUS_ID_CLGD5446;
/* setup PCI configuration registers */
d = (PCICirrusVGAState *)pci_register_device(bus, "Cirrus VGA",
sizeof(PCICirrusVGAState),
-1, NULL, NULL);
pci_conf = d->dev.config;
pci_conf[0x00] = (uint8_t) (PCI_VENDOR_CIRRUS & 0xff);
pci_conf[0x01] = (uint8_t) (PCI_VENDOR_CIRRUS >> 8);
pci_conf[0x02] = (uint8_t) (device_id & 0xff);
pci_conf[0x03] = (uint8_t) (device_id >> 8);
pci_conf[0x04] = PCI_COMMAND_IOACCESS | PCI_COMMAND_MEMACCESS;
pci_conf[0x0a] = PCI_CLASS_SUB_VGA;
pci_conf[0x0b] = PCI_CLASS_BASE_DISPLAY;
pci_conf[0x0e] = PCI_CLASS_HEADERTYPE_00h;
/* setup VGA */
s = &d->cirrus_vga;
vga_common_init((VGAState *)s,
ds, vga_ram_base, vga_ram_offset, vga_ram_size);
cirrus_init_common(s, device_id, 1);
s->console = graphic_console_init(s->ds, s->update, s->invalidate,
s->screen_dump, s->text_update, s);
s->pci_dev = (PCIDevice *)d;
/* setup memory space */
/* memory #0 LFB */
/* memory #1 memory-mapped I/O */
/* XXX: s->vram_size must be a power of two */
pci_register_io_region((PCIDevice *)d, 0, 0x2000000,
PCI_ADDRESS_SPACE_MEM_PREFETCH, cirrus_pci_lfb_map);
if (device_id == CIRRUS_ID_CLGD5446) {
pci_register_io_region((PCIDevice *)d, 1, CIRRUS_PNPMMIO_SIZE,
PCI_ADDRESS_SPACE_MEM, cirrus_pci_mmio_map);
}
/* XXX: ROM BIOS */
}
| 0
|
5,268
|
void operator()(const CPUDevice& d, typename TTypes<T, 4>::ConstTensor input,
typename TTypes<T, 3>::ConstTensor filter,
typename TTypes<T, 4>::ConstTensor out_backprop,
int stride_rows, int stride_cols, int rate_rows,
int rate_cols, int pad_top, int pad_left,
typename TTypes<T, 4>::Tensor in_backprop) {
const int batch = input.dimension(0);
const int input_rows = input.dimension(1);
const int input_cols = input.dimension(2);
const int depth = input.dimension(3);
const int filter_rows = filter.dimension(0);
const int filter_cols = filter.dimension(1);
const int output_rows = out_backprop.dimension(1);
const int output_cols = out_backprop.dimension(2);
// Initialize gradient with all zeros.
in_backprop.setZero();
// This is a reference implementation, likely to be slow.
// TODO(gpapan): Write multi-threaded implementation.
// In the case of multiple argmax branches, we only back-propagate along the
// last branch, i.e., the one with largest value of `h * filter_cols + w`,
// similarly to the max-pooling backward routines.
for (int b = 0; b < batch; ++b) {
for (int h_out = 0; h_out < output_rows; ++h_out) {
int h_beg = h_out * stride_rows - pad_top;
for (int w_out = 0; w_out < output_cols; ++w_out) {
int w_beg = w_out * stride_cols - pad_left;
for (int d = 0; d < depth; ++d) {
T cur_val = Eigen::NumTraits<T>::lowest();
int h_in_max = (h_beg < 0) ? 0 : h_beg;
int w_in_max = (w_beg < 0) ? 0 : w_beg;
for (int h = 0; h < filter_rows; ++h) {
const int h_in = h_beg + h * rate_rows;
if (h_in >= 0 && h_in < input_rows) {
for (int w = 0; w < filter_cols; ++w) {
const int w_in = w_beg + w * rate_cols;
if (w_in >= 0 && w_in < input_cols) {
const T val = input(b, h_in, w_in, d) + filter(h, w, d);
if (val > cur_val) {
cur_val = val;
h_in_max = h_in;
w_in_max = w_in;
}
}
}
}
}
in_backprop(b, h_in_max, w_in_max, d) +=
out_backprop(b, h_out, w_out, d);
}
}
}
}
}
| 1
|
443,408
|
reply_get_NS_rrset(struct reply_info* rep)
{
size_t i;
for(i=0; i<rep->rrset_count; i++) {
if(rep->rrsets[i]->rk.type == htons(LDNS_RR_TYPE_NS)) {
return rep->rrsets[i];
}
}
return NULL;
}
| 0
|
333,628
|
static void rgb24_to_yuv444p(AVPicture *dst, AVPicture *src,
int width, int height)
{
int src_wrap, x, y;
int r, g, b;
uint8_t *lum, *cb, *cr;
const uint8_t *p;
lum = dst->data[0];
cb = dst->data[1];
cr = dst->data[2];
src_wrap = src->linesize[0] - width * BPP;
p = src->data[0];
for(y=0;y<height;y++) {
for(x=0;x<width;x++) {
RGB_IN(r, g, b, p);
lum[0] = RGB_TO_Y_CCIR(r, g, b);
cb[0] = RGB_TO_U_CCIR(r, g, b, 0);
cr[0] = RGB_TO_V_CCIR(r, g, b, 0);
cb++;
cr++;
lum++;
}
p += src_wrap;
lum += dst->linesize[0] - width;
cb += dst->linesize[1] - width;
cr += dst->linesize[2] - width;
}
}
| 0
|
445,882
|
static void PresentationContext_unref(PresentationContext* presentation)
{
VideoClientContextPriv* priv;
MAPPED_GEOMETRY* geometry;
if (!presentation)
return;
if (InterlockedDecrement(&presentation->refCounter) != 0)
return;
geometry = presentation->geometry;
if (geometry)
{
geometry->MappedGeometryUpdate = NULL;
geometry->MappedGeometryClear = NULL;
geometry->custom = NULL;
mappedGeometryUnref(geometry);
}
priv = presentation->video->priv;
h264_context_free(presentation->h264);
Stream_Free(presentation->currentSample, TRUE);
presentation->video->deleteSurface(presentation->video, presentation->surface);
BufferPool_Return(priv->surfacePool, presentation->surfaceData);
yuv_context_free(presentation->yuv);
free(presentation);
}
| 0
|
488,986
|
static int dns_transaction_requires_rrsig(DnsTransaction *t, DnsResourceRecord *rr) {
int r;
assert(t);
assert(rr);
/* Checks if the RR we are looking for must be signed with an
* RRSIG. This is used for positive responses. */
if (t->scope->dnssec_mode == DNSSEC_NO)
return false;
if (dns_type_is_pseudo(rr->key->type))
return -EINVAL;
r = dns_transaction_negative_trust_anchor_lookup(t, dns_resource_key_name(rr->key));
if (r < 0)
return r;
if (r > 0)
return false;
switch (rr->key->type) {
case DNS_TYPE_RRSIG:
/* RRSIGs are the signatures themselves, they need no signing. */
return false;
case DNS_TYPE_SOA:
case DNS_TYPE_NS: {
DnsTransaction *dt;
Iterator i;
/* For SOA or NS RRs we look for a matching DS transaction */
SET_FOREACH(dt, t->dnssec_transactions, i) {
if (dt->key->class != rr->key->class)
continue;
if (dt->key->type != DNS_TYPE_DS)
continue;
r = dns_name_equal(dns_resource_key_name(dt->key), dns_resource_key_name(rr->key));
if (r < 0)
return r;
if (r == 0)
continue;
/* We found a DS transactions for the SOA/NS
* RRs we are looking at. If it discovered signed DS
* RRs, then we need to be signed, too. */
if (!dt->answer_authenticated)
return false;
return dns_answer_match_key(dt->answer, dt->key, NULL);
}
/* We found nothing that proves this is safe to leave
* this unauthenticated, hence ask inist on
* authentication. */
return true;
}
case DNS_TYPE_DS:
case DNS_TYPE_CNAME:
case DNS_TYPE_DNAME: {
const char *parent = NULL;
DnsTransaction *dt;
Iterator i;
/*
* CNAME/DNAME RRs cannot be located at a zone apex, hence look directly for the parent SOA.
*
* DS RRs are signed if the parent is signed, hence also look at the parent SOA
*/
SET_FOREACH(dt, t->dnssec_transactions, i) {
if (dt->key->class != rr->key->class)
continue;
if (dt->key->type != DNS_TYPE_SOA)
continue;
if (!parent) {
parent = dns_resource_key_name(rr->key);
r = dns_name_parent(&parent);
if (r < 0)
return r;
if (r == 0) {
if (rr->key->type == DNS_TYPE_DS)
return true;
/* A CNAME/DNAME without a parent? That's sooo weird. */
return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
"Transaction %" PRIu16 " claims CNAME/DNAME at root. Refusing.", t->id);
}
}
r = dns_name_equal(dns_resource_key_name(dt->key), parent);
if (r < 0)
return r;
if (r == 0)
continue;
return t->answer_authenticated;
}
return true;
}
default: {
DnsTransaction *dt;
Iterator i;
/* Any other kind of RR (including DNSKEY/NSEC/NSEC3). Let's see if our SOA lookup was authenticated */
SET_FOREACH(dt, t->dnssec_transactions, i) {
if (dt->key->class != rr->key->class)
continue;
if (dt->key->type != DNS_TYPE_SOA)
continue;
r = dns_name_equal(dns_resource_key_name(dt->key), dns_resource_key_name(rr->key));
if (r < 0)
return r;
if (r == 0)
continue;
/* We found the transaction that was supposed to find
* the SOA RR for us. It was successful, but found no
* RR for us. This means we are not at a zone cut. In
* this case, we require authentication if the SOA
* lookup was authenticated too. */
return t->answer_authenticated;
}
return true;
}}
}
| 0
|
163,275
|
bool DesktopWindowTreeHostX11::IsFullscreen() const {
return is_fullscreen_;
}
| 0
|
87,134
|
static void simplify_merges(struct rev_info *revs)
{
struct commit_list *list, *next;
struct commit_list *yet_to_do, **tail;
struct commit *commit;
if (!revs->prune)
return;
/* feed the list reversed */
yet_to_do = NULL;
for (list = revs->commits; list; list = next) {
commit = list->item;
next = list->next;
/*
* Do not free(list) here yet; the original list
* is used later in this function.
*/
commit_list_insert(commit, &yet_to_do);
}
while (yet_to_do) {
list = yet_to_do;
yet_to_do = NULL;
tail = &yet_to_do;
while (list) {
commit = pop_commit(&list);
tail = simplify_one(revs, commit, tail);
}
}
/* clean up the result, removing the simplified ones */
list = revs->commits;
revs->commits = NULL;
tail = &revs->commits;
while (list) {
struct merge_simplify_state *st;
commit = pop_commit(&list);
st = locate_simplify_state(revs, commit);
if (st->simplified == commit)
tail = &commit_list_insert(commit, tail)->next;
}
}
| 0
|
254,013
|
int WebFrame::InstanceCount() {
return g_frame_count;
}
| 0
|
113,609
|
static int hfsplus_cat_build_record(hfsplus_cat_entry *entry,
u32 cnid, struct inode *inode)
{
struct hfsplus_sb_info *sbi = HFSPLUS_SB(inode->i_sb);
if (S_ISDIR(inode->i_mode)) {
struct hfsplus_cat_folder *folder;
folder = &entry->folder;
memset(folder, 0, sizeof(*folder));
folder->type = cpu_to_be16(HFSPLUS_FOLDER);
folder->id = cpu_to_be32(inode->i_ino);
HFSPLUS_I(inode)->create_date =
folder->create_date =
folder->content_mod_date =
folder->attribute_mod_date =
folder->access_date = hfsp_now2mt();
hfsplus_cat_set_perms(inode, &folder->permissions);
if (inode == sbi->hidden_dir)
/* invisible and namelocked */
folder->user_info.frFlags = cpu_to_be16(0x5000);
return sizeof(*folder);
} else {
struct hfsplus_cat_file *file;
file = &entry->file;
memset(file, 0, sizeof(*file));
file->type = cpu_to_be16(HFSPLUS_FILE);
file->flags = cpu_to_be16(HFSPLUS_FILE_THREAD_EXISTS);
file->id = cpu_to_be32(cnid);
HFSPLUS_I(inode)->create_date =
file->create_date =
file->content_mod_date =
file->attribute_mod_date =
file->access_date = hfsp_now2mt();
if (cnid == inode->i_ino) {
hfsplus_cat_set_perms(inode, &file->permissions);
if (S_ISLNK(inode->i_mode)) {
file->user_info.fdType =
cpu_to_be32(HFSP_SYMLINK_TYPE);
file->user_info.fdCreator =
cpu_to_be32(HFSP_SYMLINK_CREATOR);
} else {
file->user_info.fdType =
cpu_to_be32(sbi->type);
file->user_info.fdCreator =
cpu_to_be32(sbi->creator);
}
if (HFSPLUS_FLG_IMMUTABLE &
(file->permissions.rootflags |
file->permissions.userflags))
file->flags |=
cpu_to_be16(HFSPLUS_FILE_LOCKED);
} else {
file->user_info.fdType =
cpu_to_be32(HFSP_HARDLINK_TYPE);
file->user_info.fdCreator =
cpu_to_be32(HFSP_HFSPLUS_CREATOR);
file->user_info.fdFlags =
cpu_to_be16(0x100);
file->create_date =
HFSPLUS_I(sbi->hidden_dir)->create_date;
file->permissions.dev =
cpu_to_be32(HFSPLUS_I(inode)->linkid);
}
return sizeof(*file);
}
}
| 0
|
176,695
|
WORD32 ih264d_parse_imb_cabac(dec_struct_t * ps_dec,
dec_mb_info_t * ps_cur_mb_info,
UWORD8 u1_mb_type)
{
WORD8 i1_delta_qp;
UWORD8 u1_cbp;
UWORD8 u1_offset;
/* Variables for handling Cabac contexts */
ctxt_inc_mb_info_t *p_curr_ctxt = ps_dec->ps_curr_ctxt_mb_info;
ctxt_inc_mb_info_t *ps_left_ctxt = ps_dec->p_left_ctxt_mb_info;
dec_bit_stream_t * const ps_bitstrm = ps_dec->ps_bitstrm;
bin_ctxt_model_t *p_bin_ctxt;
UWORD8 u1_intra_chrom_pred_mode;
UWORD8 u1_dc_block_flag = 0;
WORD32 ret;
ps_cur_mb_info->u1_yuv_dc_block_flag = 0;
if(ps_left_ctxt == ps_dec->ps_def_ctxt_mb_info)
{
ps_dec->pu1_left_yuv_dc_csbp[0] = 0xf;
}
if(ps_dec->ps_cur_slice->u1_slice_type != I_SLICE)
{
WORD32 *pi4_buf;
WORD8 *pi1_buf;
MEMSET_16BYTES(&ps_dec->pu1_left_mv_ctxt_inc[0][0], 0);
*((UWORD32 *)ps_dec->pi1_left_ref_idx_ctxt_inc) = 0;
MEMSET_16BYTES(p_curr_ctxt->u1_mv, 0);
pi1_buf = p_curr_ctxt->i1_ref_idx;
pi4_buf = (WORD32 *)pi1_buf;
*pi4_buf = 0;
}
if(u1_mb_type == I_4x4_MB)
{
ps_cur_mb_info->ps_curmb->u1_mb_type = I_4x4_MB;
p_curr_ctxt->u1_mb_type = CAB_I4x4;
u1_offset = 0;
ps_cur_mb_info->u1_tran_form8x8 = 0;
ps_cur_mb_info->ps_curmb->u1_tran_form8x8 = 0;
/*--------------------------------------------------------------------*/
/* Read transform_size_8x8_flag if present */
/*--------------------------------------------------------------------*/
if(ps_dec->s_high_profile.u1_transform8x8_present)
{
ps_cur_mb_info->u1_tran_form8x8 = ih264d_parse_transform8x8flag_cabac(
ps_dec, ps_cur_mb_info);
COPYTHECONTEXT("transform_size_8x8_flag", ps_cur_mb_info->u1_tran_form8x8);
p_curr_ctxt->u1_transform8x8_ctxt = ps_cur_mb_info->u1_tran_form8x8;
ps_cur_mb_info->ps_curmb->u1_tran_form8x8 = ps_cur_mb_info->u1_tran_form8x8;
}
else
{
p_curr_ctxt->u1_transform8x8_ctxt = 0;
}
/*--------------------------------------------------------------------*/
/* Read the IntraPrediction modes for LUMA */
/*--------------------------------------------------------------------*/
if (!ps_cur_mb_info->u1_tran_form8x8)
{
UWORD8 *pu1_temp;
ih264d_read_intra_pred_modes_cabac(
ps_dec,
((UWORD8 *)ps_dec->pv_parse_tu_coeff_data),
((UWORD8 *)ps_dec->pv_parse_tu_coeff_data+16),
ps_cur_mb_info->u1_tran_form8x8);
pu1_temp = (UWORD8 *)ps_dec->pv_parse_tu_coeff_data;
pu1_temp += 32;
ps_dec->pv_parse_tu_coeff_data = (void *)pu1_temp;
}
else
{
UWORD8 *pu1_temp;
ih264d_read_intra_pred_modes_cabac(
ps_dec,
((UWORD8 *)ps_dec->pv_parse_tu_coeff_data),
((UWORD8 *)ps_dec->pv_parse_tu_coeff_data+4),
ps_cur_mb_info->u1_tran_form8x8);
pu1_temp = (UWORD8 *)ps_dec->pv_parse_tu_coeff_data;
pu1_temp += 8;
ps_dec->pv_parse_tu_coeff_data = (void *)pu1_temp;
}
/*--------------------------------------------------------------------*/
/* Read the IntraPrediction mode for CHROMA */
/*--------------------------------------------------------------------*/
u1_intra_chrom_pred_mode = ih264d_parse_chroma_pred_mode_cabac(ps_dec);
COPYTHECONTEXT("intra_chroma_pred_mode", u1_intra_chrom_pred_mode);
p_curr_ctxt->u1_intra_chroma_pred_mode = ps_cur_mb_info->u1_chroma_pred_mode =
u1_intra_chrom_pred_mode;
/*--------------------------------------------------------------------*/
/* Read the Coded block pattern */
/*--------------------------------------------------------------------*/
u1_cbp = ih264d_parse_ctx_cbp_cabac(ps_dec);
COPYTHECONTEXT("coded_block_pattern", u1_cbp);
ps_cur_mb_info->u1_cbp = u1_cbp;
p_curr_ctxt->u1_cbp = u1_cbp;
/*--------------------------------------------------------------------*/
/* Read mb_qp_delta */
/*--------------------------------------------------------------------*/
if(ps_cur_mb_info->u1_cbp)
{
ret = ih264d_parse_mb_qp_delta_cabac(ps_dec, &i1_delta_qp);
if(ret != OK)
return ret;
COPYTHECONTEXT("mb_qp_delta", i1_delta_qp);
if(i1_delta_qp != 0)
{
ret = ih264d_update_qp(ps_dec, i1_delta_qp);
if(ret != OK)
return ret;
}
}
else
ps_dec->i1_prev_mb_qp_delta = 0;
p_curr_ctxt->u1_yuv_dc_csbp &= 0xFE;
}
else
{
u1_offset = 1;
ps_cur_mb_info->ps_curmb->u1_mb_type = I_16x16_MB;
p_curr_ctxt->u1_mb_type = CAB_I16x16;
ps_cur_mb_info->u1_tran_form8x8 = 0;
p_curr_ctxt->u1_transform8x8_ctxt = 0;
ps_cur_mb_info->ps_curmb->u1_tran_form8x8 = 0;
/*--------------------------------------------------------------------*/
/* Read the IntraPrediction mode for CHROMA */
/*--------------------------------------------------------------------*/
u1_intra_chrom_pred_mode = ih264d_parse_chroma_pred_mode_cabac(ps_dec);
if(u1_intra_chrom_pred_mode > 3)
return ERROR_CHROMA_PRED_MODE;
COPYTHECONTEXT("Chroma intra_chroma_pred_mode pred mode", u1_intra_chrom_pred_mode);
p_curr_ctxt->u1_intra_chroma_pred_mode = ps_cur_mb_info->u1_chroma_pred_mode =
u1_intra_chrom_pred_mode;
/*--------------------------------------------------------------------*/
/* Read the Coded block pattern */
/*--------------------------------------------------------------------*/
u1_cbp = gau1_ih264d_cbp_tab[(u1_mb_type - 1) >> 2];
ps_cur_mb_info->u1_cbp = u1_cbp;
p_curr_ctxt->u1_cbp = u1_cbp;
/*--------------------------------------------------------------------*/
/* Read mb_qp_delta */
/*--------------------------------------------------------------------*/
ret = ih264d_parse_mb_qp_delta_cabac(ps_dec, &i1_delta_qp);
if(ret != OK)
return ret;
COPYTHECONTEXT("mb_qp_delta", i1_delta_qp);
if(i1_delta_qp != 0)
{
ret = ih264d_update_qp(ps_dec, i1_delta_qp);
if(ret != OK)
return ret;
}
{
WORD16 i_scaleFactor;
WORD16* pi2_scale_matrix_ptr;
/*******************************************************************/
/* for luma DC coefficients the scaling is done during the parsing */
/* to preserve the precision */
/*******************************************************************/
if(ps_dec->s_high_profile.u1_scaling_present)
{
pi2_scale_matrix_ptr =
ps_dec->s_high_profile.i2_scalinglist4x4[0];
}
else
{
i_scaleFactor = 16;
pi2_scale_matrix_ptr = &i_scaleFactor;
}
{
ctxt_inc_mb_info_t *ps_top_ctxt = ps_dec->p_top_ctxt_mb_info;
UWORD8 uc_a, uc_b;
UWORD32 u4_ctx_inc;
INC_SYM_COUNT(&(ps_dec->s_cab_dec_env));
/* if MbAddrN not available then CondTermN = 1 */
uc_b = ((ps_top_ctxt->u1_yuv_dc_csbp) & 0x01);
/* if MbAddrN not available then CondTermN = 1 */
uc_a = ((ps_dec->pu1_left_yuv_dc_csbp[0]) & 0x01);
u4_ctx_inc = (uc_a + (uc_b << 1));
{
WORD16 pi2_dc_coef[16];
tu_sblk4x4_coeff_data_t *ps_tu_4x4 =
(tu_sblk4x4_coeff_data_t *)ps_dec->pv_parse_tu_coeff_data;
WORD16 *pi2_coeff_block =
(WORD16 *)ps_dec->pv_parse_tu_coeff_data;
p_bin_ctxt = (ps_dec->p_cbf_t[LUMA_DC_CTXCAT]) + u4_ctx_inc;
u1_dc_block_flag =
ih264d_read_coeff4x4_cabac(ps_bitstrm,
LUMA_DC_CTXCAT,
ps_dec->p_significant_coeff_flag_t[LUMA_DC_CTXCAT],
ps_dec, p_bin_ctxt);
/* Store coded_block_flag */
p_curr_ctxt->u1_yuv_dc_csbp &= 0xFE;
p_curr_ctxt->u1_yuv_dc_csbp |= u1_dc_block_flag;
if(u1_dc_block_flag)
{
WORD32 pi4_tmp[16];
memset(pi2_dc_coef,0,sizeof(pi2_dc_coef));
ih264d_unpack_coeff4x4_dc_4x4blk(ps_tu_4x4,
pi2_dc_coef,
ps_dec->pu1_inv_scan);
PROFILE_DISABLE_IQ_IT_RECON()
ps_dec->pf_ihadamard_scaling_4x4(pi2_dc_coef,
pi2_coeff_block,
ps_dec->pu2_quant_scale_y,
(UWORD16 *)pi2_scale_matrix_ptr,
ps_dec->u1_qp_y_div6,
pi4_tmp);
pi2_coeff_block += 16;
ps_dec->pv_parse_tu_coeff_data = (void *)pi2_coeff_block;
SET_BIT(ps_cur_mb_info->u1_yuv_dc_block_flag,0);
}
}
}
}
}
ps_dec->pu1_left_yuv_dc_csbp[0] &= 0x6;
ps_dec->pu1_left_yuv_dc_csbp[0] |= u1_dc_block_flag;
ih264d_parse_residual4x4_cabac(ps_dec, ps_cur_mb_info, u1_offset);
if(EXCEED_OFFSET(ps_bitstrm))
return ERROR_EOB_TERMINATE_T;
return OK;
}
| 0
|
11,224
|
long Cluster::ParseBlockGroup(long long payload_size, long long& pos,
long& len) {
const long long payload_start = pos;
const long long payload_stop = pos + payload_size;
IMkvReader* const pReader = m_pSegment->m_pReader;
long long total, avail;
long status = pReader->Length(&total, &avail);
if (status < 0) // error
return status;
assert((total < 0) || (avail <= total));
if ((total >= 0) && (payload_stop > total))
return E_FILE_FORMAT_INVALID;
if (payload_stop > avail) {
len = static_cast<long>(payload_size);
return E_BUFFER_NOT_FULL;
}
long long discard_padding = 0;
while (pos < payload_stop) {
if ((pos + 1) > avail) {
len = 1;
return E_BUFFER_NOT_FULL;
}
long long result = GetUIntLength(pReader, pos, len);
if (result < 0) // error
return static_cast<long>(result);
if (result > 0) // weird
return E_BUFFER_NOT_FULL;
if ((pos + len) > payload_stop)
return E_FILE_FORMAT_INVALID;
if ((pos + len) > avail)
return E_BUFFER_NOT_FULL;
const long long id = ReadUInt(pReader, pos, len);
if (id < 0) // error
return static_cast<long>(id);
if (id == 0) // not a value ID
return E_FILE_FORMAT_INVALID;
pos += len; // consume ID field
if ((pos + 1) > avail) {
len = 1;
return E_BUFFER_NOT_FULL;
}
result = GetUIntLength(pReader, pos, len);
if (result < 0) // error
return static_cast<long>(result);
if (result > 0) // weird
return E_BUFFER_NOT_FULL;
if ((pos + len) > payload_stop)
return E_FILE_FORMAT_INVALID;
if ((pos + len) > avail)
return E_BUFFER_NOT_FULL;
const long long size = ReadUInt(pReader, pos, len);
if (size < 0) // error
return static_cast<long>(size);
pos += len; // consume size field
if (pos > payload_stop)
return E_FILE_FORMAT_INVALID;
if (size == 0) // weird
continue;
const long long unknown_size = (1LL << (7 * len)) - 1;
if (size == unknown_size)
return E_FILE_FORMAT_INVALID;
if (id == 0x35A2) { // DiscardPadding
status = UnserializeInt(pReader, pos, size, discard_padding);
if (status < 0) // error
return status;
}
if (id != 0x21) { // sub-part of BlockGroup is not a Block
pos += size; // consume sub-part of block group
if (pos > payload_stop)
return E_FILE_FORMAT_INVALID;
continue;
}
const long long block_stop = pos + size;
if (block_stop > payload_stop)
return E_FILE_FORMAT_INVALID;
if ((pos + 1) > avail) {
len = 1;
return E_BUFFER_NOT_FULL;
}
result = GetUIntLength(pReader, pos, len);
if (result < 0) // error
return static_cast<long>(result);
if (result > 0) // weird
return E_BUFFER_NOT_FULL;
if ((pos + len) > block_stop)
return E_FILE_FORMAT_INVALID;
if ((pos + len) > avail)
return E_BUFFER_NOT_FULL;
const long long track = ReadUInt(pReader, pos, len);
if (track < 0) // error
return static_cast<long>(track);
if (track == 0)
return E_FILE_FORMAT_INVALID;
#if 0
const Tracks* const pTracks = m_pSegment->GetTracks();
assert(pTracks);
const long tn = static_cast<long>(track);
const Track* const pTrack = pTracks->GetTrackByNumber(tn);
if (pTrack == NULL)
return E_FILE_FORMAT_INVALID;
#endif
pos += len; // consume track number
if ((pos + 2) > block_stop)
return E_FILE_FORMAT_INVALID;
if ((pos + 2) > avail) {
len = 2;
return E_BUFFER_NOT_FULL;
}
pos += 2; // consume timecode
if ((pos + 1) > block_stop)
return E_FILE_FORMAT_INVALID;
if ((pos + 1) > avail) {
len = 1;
return E_BUFFER_NOT_FULL;
}
unsigned char flags;
status = pReader->Read(pos, 1, &flags);
if (status < 0) { // error or underflow
len = 1;
return status;
}
++pos; // consume flags byte
assert(pos <= avail);
if (pos >= block_stop)
return E_FILE_FORMAT_INVALID;
const int lacing = int(flags & 0x06) >> 1;
if ((lacing != 0) && (block_stop > avail)) {
len = static_cast<long>(block_stop - pos);
return E_BUFFER_NOT_FULL;
}
pos = block_stop; // consume block-part of block group
assert(pos <= payload_stop);
}
assert(pos == payload_stop);
status = CreateBlock(0x20, // BlockGroup ID
payload_start, payload_size, discard_padding);
if (status != 0)
return status;
m_pos = payload_stop;
return 0; // success
}
| 1
|
323,046
|
ram_addr_t ppc4xx_sdram_adjust(ram_addr_t ram_size, int nr_banks,
MemoryRegion ram_memories[],
hwaddr ram_bases[],
hwaddr ram_sizes[],
const unsigned int sdram_bank_sizes[])
{
ram_addr_t size_left = ram_size;
ram_addr_t base = 0;
int i;
int j;
for (i = 0; i < nr_banks; i++) {
for (j = 0; sdram_bank_sizes[j] != 0; j++) {
unsigned int bank_size = sdram_bank_sizes[j];
if (bank_size <= size_left) {
char name[32];
snprintf(name, sizeof(name), "ppc4xx.sdram%d", i);
memory_region_allocate_system_memory(&ram_memories[i], NULL,
name, bank_size);
ram_bases[i] = base;
ram_sizes[i] = bank_size;
base += bank_size;
size_left -= bank_size;
break;
}
}
if (!size_left) {
/* No need to use the remaining banks. */
break;
}
}
ram_size -= size_left;
if (size_left)
printf("Truncating memory to %d MiB to fit SDRAM controller limits.\n",
(int)(ram_size >> 20));
return ram_size;
}
| 1
|
450,464
|
static int decode_lookupp(struct xdr_stream *xdr)
{
return decode_op_hdr(xdr, OP_LOOKUPP);
}
| 0
|
391,636
|
xmlNewReconciliedNs(xmlDocPtr doc, xmlNodePtr tree, xmlNsPtr ns) {
xmlNsPtr def;
xmlChar prefix[50];
int counter = 1;
if ((tree == NULL) || (tree->type != XML_ELEMENT_NODE)) {
#ifdef DEBUG_TREE
xmlGenericError(xmlGenericErrorContext,
"xmlNewReconciliedNs : tree == NULL\n");
#endif
return(NULL);
}
if ((ns == NULL) || (ns->type != XML_NAMESPACE_DECL)) {
#ifdef DEBUG_TREE
xmlGenericError(xmlGenericErrorContext,
"xmlNewReconciliedNs : ns == NULL\n");
#endif
return(NULL);
}
/*
* Search an existing namespace definition inherited.
*/
def = xmlSearchNsByHref(doc, tree, ns->href);
if (def != NULL)
return(def);
/*
* Find a close prefix which is not already in use.
* Let's strip namespace prefixes longer than 20 chars !
*/
if (ns->prefix == NULL)
snprintf((char *) prefix, sizeof(prefix), "default");
else
snprintf((char *) prefix, sizeof(prefix), "%.20s", (char *)ns->prefix);
def = xmlSearchNs(doc, tree, prefix);
while (def != NULL) {
if (counter > 1000) return(NULL);
if (ns->prefix == NULL)
snprintf((char *) prefix, sizeof(prefix), "default%d", counter++);
else
snprintf((char *) prefix, sizeof(prefix), "%.20s%d",
(char *)ns->prefix, counter++);
def = xmlSearchNs(doc, tree, prefix);
}
/*
* OK, now we are ready to create a new one.
*/
def = xmlNewNs(tree, ns->href, prefix);
return(def);
}
| 0
|
286,006
|
static const IntSize MaxDragImageSize(float device_scale_factor) {
#if defined(OS_MACOSX)
static const IntSize kMaxDragImageSize(400, 400);
#else
static const IntSize kMaxDragImageSize(200, 200);
#endif
IntSize max_size_in_pixels = kMaxDragImageSize;
max_size_in_pixels.Scale(device_scale_factor);
return max_size_in_pixels;
}
| 0
|
278,708
|
void HTMLMediaElement::VideoWillBeDrawnToCanvas() const {
DCHECK(IsHTMLVideoElement());
UseCounter::Count(GetDocument(), WebFeature::kVideoInCanvas);
autoplay_policy_->VideoWillBeDrawnToCanvas();
}
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.