idx
int64 | func
string | target
int64 |
|---|---|---|
224,343
|
void Document::updateStyleForNodeIfNeeded(Node* node)
{
if (!hasPendingForcedStyleRecalc() && !childNeedsStyleRecalc() && !needsStyleRecalc())
return;
bool needsStyleRecalc = hasPendingForcedStyleRecalc();
for (Node* ancestor = node; ancestor && !needsStyleRecalc; ancestor = ancestor->parentOrShadowHostNode())
needsStyleRecalc = ancestor->needsStyleRecalc();
if (needsStyleRecalc)
updateStyleIfNeeded();
}
| 0
|
496,555
|
std::string index(Volume_iterator c) const
{ return CI(c,verbose); }
| 0
|
370,313
|
xsltGenerateIdFunction(xmlXPathParserContextPtr ctxt, int nargs){
xmlNodePtr cur = NULL;
xmlXPathObjectPtr obj = NULL;
long val;
xmlChar str[30];
xmlDocPtr doc;
if (nargs == 0) {
cur = ctxt->context->node;
} else if (nargs == 1) {
xmlNodeSetPtr nodelist;
int i, ret;
if ((ctxt->value == NULL) || (ctxt->value->type != XPATH_NODESET)) {
ctxt->error = XPATH_INVALID_TYPE;
xsltTransformError(xsltXPathGetTransformContext(ctxt), NULL, NULL,
"generate-id() : invalid arg expecting a node-set\n");
return;
}
obj = valuePop(ctxt);
nodelist = obj->nodesetval;
if ((nodelist == NULL) || (nodelist->nodeNr <= 0)) {
xmlXPathFreeObject(obj);
valuePush(ctxt, xmlXPathNewCString(""));
return;
}
cur = nodelist->nodeTab[0];
for (i = 1;i < nodelist->nodeNr;i++) {
ret = xmlXPathCmpNodes(cur, nodelist->nodeTab[i]);
if (ret == -1)
cur = nodelist->nodeTab[i];
}
} else {
xsltTransformError(xsltXPathGetTransformContext(ctxt), NULL, NULL,
"generate-id() : invalid number of args %d\n", nargs);
ctxt->error = XPATH_INVALID_ARITY;
return;
}
/*
* Okay this is ugly but should work, use the NodePtr address
* to forge the ID
*/
if (cur->type != XML_NAMESPACE_DECL)
doc = cur->doc;
else {
xmlNsPtr ns = (xmlNsPtr) cur;
if (ns->context != NULL)
doc = ns->context;
else
doc = ctxt->context->doc;
}
if (obj)
xmlXPathFreeObject(obj);
val = (long)((char *)cur - (char *)doc);
if (val >= 0) {
sprintf((char *)str, "idp%ld", val);
} else {
sprintf((char *)str, "idm%ld", -val);
}
valuePush(ctxt, xmlXPathNewString(str));
}
| 0
|
339,708
|
static int bt_hci_parse(const char *str)
{
struct HCIInfo *hci;
bdaddr_t bdaddr;
if (nb_hcis >= MAX_NICS) {
fprintf(stderr, "qemu: Too many bluetooth HCIs (max %i).\n", MAX_NICS);
return -1;
}
hci = hci_init(str);
if (!hci)
return -1;
bdaddr.b[0] = 0x52;
bdaddr.b[1] = 0x54;
bdaddr.b[2] = 0x00;
bdaddr.b[3] = 0x12;
bdaddr.b[4] = 0x34;
bdaddr.b[5] = 0x56 + nb_hcis;
hci->bdaddr_set(hci, bdaddr.b);
hci_table[nb_hcis++] = hci;
return 0;
}
| 0
|
359,349
|
tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts,
struct tcf_ext_map *map)
{
#ifdef CONFIG_NET_CLS_ACT
if (map->action && exts->action) {
/*
* again for backward compatible mode - we want
* to work with both old and new modes of entering
* tc data even if iproute2 was newer - jhs
*/
struct rtattr * p_rta = (struct rtattr*) skb->tail;
if (exts->action->type != TCA_OLD_COMPAT) {
RTA_PUT(skb, map->action, 0, NULL);
if (tcf_action_dump(skb, exts->action, 0, 0) < 0)
goto rtattr_failure;
p_rta->rta_len = skb->tail - (u8*)p_rta;
} else if (map->police) {
RTA_PUT(skb, map->police, 0, NULL);
if (tcf_action_dump_old(skb, exts->action, 0, 0) < 0)
goto rtattr_failure;
p_rta->rta_len = skb->tail - (u8*)p_rta;
}
}
#elif defined CONFIG_NET_CLS_POLICE
if (map->police && exts->police) {
struct rtattr * p_rta = (struct rtattr*) skb->tail;
RTA_PUT(skb, map->police, 0, NULL);
if (tcf_police_dump(skb, exts->police) < 0)
goto rtattr_failure;
p_rta->rta_len = skb->tail - (u8*)p_rta;
}
#endif
return 0;
rtattr_failure: __attribute__ ((unused))
return -1;
}
| 0
|
474,713
|
int nfc_tm_deactivated(struct nfc_dev *dev)
{
dev->dep_link_up = false;
dev->rf_mode = NFC_RF_NONE;
return nfc_genl_tm_deactivated(dev);
}
| 0
|
142,191
|
static int allocate_buffers(struct v4l2_loopback_device *dev)
{
int err;
MARK();
/* vfree on close file operation in case no open handles left */
if (dev->buffer_size < 1 || dev->buffers_number < 1)
return -EINVAL;
if ((__LONG_MAX__ / dev->buffer_size) < dev->buffers_number)
return -ENOSPC;
if (dev->image) {
dprintk("allocating buffers again: %ld %ld\n",
dev->buffer_size * dev->buffers_number, dev->imagesize);
/* FIXME: prevent double allocation more intelligently! */
if (dev->buffer_size * dev->buffers_number == dev->imagesize)
return 0;
/* if there is only one writer, no problem should occur */
if (dev->open_count.counter == 1)
free_buffers(dev);
else
return -EINVAL;
}
dev->imagesize = (unsigned long)dev->buffer_size *
(unsigned long)dev->buffers_number;
dprintk("allocating %ld = %ldx%d\n", dev->imagesize, dev->buffer_size,
dev->buffers_number);
err = -ENOMEM;
if (dev->timeout_jiffies > 0) {
err = allocate_timeout_image(dev);
if (err < 0)
goto error;
}
dev->image = vmalloc(dev->imagesize);
if (dev->image == NULL)
goto error;
dprintk("vmallocated %ld bytes\n", dev->imagesize);
MARK();
init_buffers(dev);
return 0;
error:
free_buffers(dev);
return err;
}
| 0
|
491,949
|
static void wstunnel_handler_ctx_free(void *gwhctx) {
handler_ctx *hctx = (handler_ctx *)gwhctx;
chunk_buffer_release(hctx->frame.payload);
}
| 0
|
234,950
|
void Document::styleResolverChanged(StyleResolverUpdateFlag updateFlag)
{
if (!attached() || (!m_didCalculateStyleResolver && !haveStylesheetsLoaded())) {
m_styleResolver.clear();
return;
}
m_didCalculateStyleResolver = true;
#ifdef INSTRUMENT_LAYOUT_SCHEDULING
if (!ownerElement())
printf("Beginning update of style selector at time %d.\n", elapsedTime());
#endif
DocumentStyleSheetCollection::UpdateFlag styleSheetUpdate = (updateFlag == RecalcStyleIfNeeded)
? DocumentStyleSheetCollection::OptimizedUpdate
: DocumentStyleSheetCollection::FullUpdate;
bool stylesheetChangeRequiresStyleRecalc = m_styleSheetCollection->updateActiveStyleSheets(styleSheetUpdate);
if (updateFlag == DeferRecalcStyle) {
scheduleForcedStyleRecalc();
return;
}
if (didLayoutWithPendingStylesheets() && !m_styleSheetCollection->hasPendingSheets()) {
m_pendingSheetLayout = IgnoreLayoutWithPendingSheets;
if (renderer())
renderView()->repaintViewAndCompositedLayers();
}
if (!stylesheetChangeRequiresStyleRecalc)
return;
{
AnimationUpdateBlock animationUpdateBlock(m_frame ? m_frame->animation() : 0);
recalcStyle(Force);
}
#ifdef INSTRUMENT_LAYOUT_SCHEDULING
if (!ownerElement())
printf("Finished update of style selector at time %d\n", elapsedTime());
#endif
if (renderer()) {
renderer()->setNeedsLayoutAndPrefWidthsRecalc();
if (view())
view()->scheduleRelayout();
}
evaluateMediaQueryList();
}
| 0
|
55,440
|
static __always_inline u32 vmcs_read32(unsigned long field)
{
vmcs_check32(field);
return __vmcs_readl(field);
}
| 0
|
430,749
|
static void build_mixer_unit_ctl(struct mixer_build *state,
struct uac_mixer_unit_descriptor *desc,
int in_pin, int in_ch, int num_outs,
int unitid, struct usb_audio_term *iterm)
{
struct usb_mixer_elem_info *cval;
unsigned int i, len;
struct snd_kcontrol *kctl;
const struct usbmix_name_map *map;
map = find_map(state->map, unitid, 0);
if (check_ignored_ctl(map))
return;
cval = kzalloc(sizeof(*cval), GFP_KERNEL);
if (!cval)
return;
snd_usb_mixer_elem_init_std(&cval->head, state->mixer, unitid);
cval->control = in_ch + 1; /* based on 1 */
cval->val_type = USB_MIXER_S16;
for (i = 0; i < num_outs; i++) {
__u8 *c = uac_mixer_unit_bmControls(desc, state->mixer->protocol);
if (check_matrix_bitmap(c, in_ch, i, num_outs)) {
cval->cmask |= (1 << i);
cval->channels++;
}
}
/* get min/max values */
get_min_max(cval, 0);
kctl = snd_ctl_new1(&usb_feature_unit_ctl, cval);
if (!kctl) {
usb_audio_err(state->chip, "cannot malloc kcontrol\n");
kfree(cval);
return;
}
kctl->private_free = snd_usb_mixer_elem_free;
len = check_mapped_name(map, kctl->id.name, sizeof(kctl->id.name));
if (!len)
len = get_term_name(state->chip, iterm, kctl->id.name,
sizeof(kctl->id.name), 0);
if (!len)
len = sprintf(kctl->id.name, "Mixer Source %d", in_ch + 1);
append_ctl_name(kctl, " Volume");
usb_audio_dbg(state->chip, "[%d] MU [%s] ch = %d, val = %d/%d\n",
cval->head.id, kctl->id.name, cval->channels, cval->min, cval->max);
snd_usb_mixer_add_control(&cval->head, kctl);
}
| 0
|
39,815
|
TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node,
TfLiteDepthwiseConvParams* params, int width,
int height, int filter_width, int filter_height,
const TfLiteType data_type, OpData* data) {
bool has_bias = node->inputs->size == 3;
// Check number of inputs/outputs
TF_LITE_ENSURE(context, has_bias || node->inputs->size == 2);
TF_LITE_ENSURE_EQ(context, node->outputs->size, 1);
int unused_output_height, unused_output_width;
data->padding = ComputePaddingHeightWidth(
params->stride_height, params->stride_width, 1, 1, height, width,
filter_height, filter_width, params->padding, &unused_output_height,
&unused_output_width);
// Note that quantized inference requires that all tensors have their
// parameters set. This is usually done during quantized training.
if (data_type != kTfLiteFloat32) {
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
TF_LITE_ENSURE(context, input != nullptr);
const TfLiteTensor* filter = GetInput(context, node, kFilterTensor);
TF_LITE_ENSURE(context, filter != nullptr);
const TfLiteTensor* bias =
GetOptionalInputTensor(context, node, kBiasTensor);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
TF_LITE_ENSURE(context, output != nullptr);
int num_channels = filter->dims->data[kDepthwiseConvQuantizedDimension];
return tflite::PopulateConvolutionQuantizationParams(
context, input, filter, bias, output, params->activation,
&data->output_multiplier, &data->output_shift,
&data->output_activation_min, &data->output_activation_max,
data->per_channel_output_multiplier,
reinterpret_cast<int*>(data->per_channel_output_shift), num_channels);
}
return kTfLiteOk;
}
| 0
|
306,403
|
lyp_check_import(struct lys_module *module, const char *value, struct lys_import *imp)
{
int i;
struct lys_module *dup = NULL;
struct ly_ctx *ctx = module->ctx;
/* check for importing a single module in multiple revisions */
for (i = 0; i < module->imp_size; i++) {
if (!module->imp[i].module) {
/* skip the not yet filled records */
continue;
}
if (ly_strequal(module->imp[i].module->name, value, 1)) {
/* check revisions, including multiple revisions of a single module is error */
if (imp->rev[0] && (!module->imp[i].module->rev_size || strcmp(module->imp[i].module->rev[0].date, imp->rev))) {
/* the already imported module has
* - no revision, but here we require some
* - different revision than the one required here */
LOGVAL(ctx, LYE_INARG, LY_VLOG_NONE, NULL, value, "import");
LOGVAL(ctx, LYE_SPEC, LY_VLOG_NONE, NULL, "Importing multiple revisions of module \"%s\".", value);
return -1;
} else if (!imp->rev[0]) {
/* no revision, remember the duplication, but check revisions after loading the module
* because the current revision can be the same (then it is ok) or it can differ (then it
* is error */
dup = module->imp[i].module;
break;
}
/* there is duplication, but since prefixes differs (checked in caller of this function),
* it is ok */
imp->module = module->imp[i].module;
return 0;
}
}
/* circular import check */
if (lyp_check_circmod(module, value, 1)) {
return -1;
}
/* load module - in specific situations it tries to get the module from the context */
imp->module = (struct lys_module *)ly_ctx_load_sub_module(module->ctx, NULL, value, imp->rev[0] ? imp->rev : NULL,
module->ctx->models.flags & LY_CTX_ALLIMPLEMENTED ? 1 : 0,
NULL);
/* check the result */
if (!imp->module) {
LOGERR(ctx, LY_EVALID, "Importing \"%s\" module into \"%s\" failed.", value, module->name);
return -1;
}
if (imp->rev[0] && imp->module->rev_size && strcmp(imp->rev, imp->module->rev[0].date)) {
LOGERR(ctx, LY_EVALID, "\"%s\" import of module \"%s\" in revision \"%s\" not found.",
module->name, value, imp->rev);
return -1;
}
if (dup) {
/* check the revisions */
if ((dup != imp->module) ||
(dup->rev_size != imp->module->rev_size && (!dup->rev_size || imp->module->rev_size)) ||
(dup->rev_size && strcmp(dup->rev[0].date, imp->module->rev[0].date))) {
/* - modules are not the same
* - one of modules has no revision (except they both has no revision)
* - revisions of the modules are not the same */
LOGVAL(ctx, LYE_INARG, LY_VLOG_NONE, NULL, value, "import");
LOGVAL(ctx, LYE_SPEC, LY_VLOG_NONE, NULL, "Importing multiple revisions of module \"%s\".", value);
return -1;
} else {
LOGWRN(ctx, "Module \"%s\" is imported by \"%s\" multiple times with different prefixes.", dup->name, module->name);
}
}
return 0;
}
| 0
|
113,213
|
gnutls_ocsp_resp_get_single(gnutls_ocsp_resp_t resp,
unsigned indx,
gnutls_digest_algorithm_t * digest,
gnutls_datum_t * issuer_name_hash,
gnutls_datum_t * issuer_key_hash,
gnutls_datum_t * serial_number,
unsigned int *cert_status,
time_t * this_update,
time_t * next_update,
time_t * revocation_time,
unsigned int *revocation_reason)
{
gnutls_datum_t sa;
char name[ASN1_MAX_NAME_SIZE];
int ret;
snprintf(name, sizeof(name),
"tbsResponseData.responses.?%u.certID.hashAlgorithm.algorithm",
indx + 1);
ret = _gnutls_x509_read_value(resp->basicresp, name, &sa);
if (ret == GNUTLS_E_ASN1_ELEMENT_NOT_FOUND)
return GNUTLS_E_REQUESTED_DATA_NOT_AVAILABLE;
else if (ret < 0) {
gnutls_assert();
return ret;
}
ret = gnutls_oid_to_digest((char *) sa.data);
_gnutls_free_datum(&sa);
if (ret < 0) {
gnutls_assert();
return ret;
}
if (digest)
*digest = ret;
if (issuer_name_hash) {
snprintf(name, sizeof(name),
"tbsResponseData.responses.?%u.certID.issuerNameHash",
indx + 1);
ret = _gnutls_x509_read_value(resp->basicresp, name,
issuer_name_hash);
if (ret != GNUTLS_E_SUCCESS) {
gnutls_assert();
return ret;
}
}
if (issuer_key_hash) {
snprintf(name, sizeof(name),
"tbsResponseData.responses.?%u.certID.issuerKeyHash",
indx + 1);
ret = _gnutls_x509_read_value(resp->basicresp, name,
issuer_key_hash);
if (ret != GNUTLS_E_SUCCESS) {
gnutls_assert();
if (issuer_name_hash)
gnutls_free(issuer_name_hash->data);
return ret;
}
}
if (serial_number) {
snprintf(name, sizeof(name),
"tbsResponseData.responses.?%u.certID.serialNumber",
indx + 1);
ret = _gnutls_x509_read_value(resp->basicresp, name,
serial_number);
if (ret != GNUTLS_E_SUCCESS) {
gnutls_assert();
if (issuer_name_hash)
gnutls_free(issuer_name_hash->data);
if (issuer_key_hash)
gnutls_free(issuer_key_hash->data);
return ret;
}
}
if (cert_status) {
snprintf(name, sizeof(name),
"tbsResponseData.responses.?%u.certStatus",
indx + 1);
ret = _gnutls_x509_read_value(resp->basicresp, name, &sa);
if (ret == GNUTLS_E_ASN1_ELEMENT_NOT_FOUND)
return GNUTLS_E_REQUESTED_DATA_NOT_AVAILABLE;
else if (ret < 0) {
gnutls_assert();
return ret;
}
if (sa.size == 5 && memcmp(sa.data, "good", sa.size) == 0)
*cert_status = GNUTLS_OCSP_CERT_GOOD;
else if (sa.size == 8
&& memcmp(sa.data, "revoked", sa.size) == 0)
*cert_status = GNUTLS_OCSP_CERT_REVOKED;
else if (sa.size == 8
&& memcmp(sa.data, "unknown", sa.size) == 0)
*cert_status = GNUTLS_OCSP_CERT_UNKNOWN;
else {
gnutls_assert();
gnutls_free(sa.data);
return GNUTLS_E_ASN1_DER_ERROR;
}
gnutls_free(sa.data);
}
if (this_update) {
char ttime[MAX_TIME];
int len;
snprintf(name, sizeof(name),
"tbsResponseData.responses.?%u.thisUpdate",
indx + 1);
len = sizeof(ttime) - 1;
ret = asn1_read_value(resp->basicresp, name, ttime, &len);
if (ret != ASN1_SUCCESS) {
gnutls_assert();
return GNUTLS_E_ASN1_DER_ERROR;
} else {
*this_update =
_gnutls_x509_generalTime2gtime(ttime);
}
}
if (next_update) {
char ttime[MAX_TIME];
int len;
snprintf(name, sizeof(name),
"tbsResponseData.responses.?%u.nextUpdate",
indx + 1);
len = sizeof(ttime) - 1;
ret = asn1_read_value(resp->basicresp, name, ttime, &len);
if (ret != ASN1_SUCCESS) {
gnutls_assert();
*next_update = (time_t) (-1);
} else
*next_update =
_gnutls_x509_generalTime2gtime(ttime);
}
if (revocation_time) {
char ttime[MAX_TIME];
int len;
snprintf(name, sizeof(name),
"tbsResponseData.responses.?%u.certStatus."
"revoked.revocationTime", indx + 1);
len = sizeof(ttime) - 1;
ret = asn1_read_value(resp->basicresp, name, ttime, &len);
if (ret != ASN1_SUCCESS) {
gnutls_assert();
*revocation_time = (time_t) (-1);
} else
*revocation_time =
_gnutls_x509_generalTime2gtime(ttime);
}
/* revocation_reason */
if (revocation_reason) {
snprintf(name, sizeof(name),
"tbsResponseData.responses.?%u.certStatus."
"revoked.revocationReason", indx + 1);
ret = _gnutls_x509_read_uint(resp->basicresp, name,
revocation_reason);
if (ret < 0)
*revocation_reason =
GNUTLS_X509_CRLREASON_UNSPECIFIED;
}
return GNUTLS_E_SUCCESS;
}
| 0
|
503,476
|
static void restore_rgb_planes10(AVFrame *frame, int width, int height)
{
uint16_t *src_r = (uint16_t *)frame->data[2];
uint16_t *src_g = (uint16_t *)frame->data[0];
uint16_t *src_b = (uint16_t *)frame->data[1];
int r, g, b;
int i, j;
for (j = 0; j < height; j++) {
for (i = 0; i < width; i++) {
r = src_r[i];
g = src_g[i];
b = src_b[i];
src_r[i] = (r + g - 0x200) & 0x3FF;
src_b[i] = (b + g - 0x200) & 0x3FF;
}
src_r += frame->linesize[2] / 2;
src_g += frame->linesize[0] / 2;
src_b += frame->linesize[1] / 2;
}
}
| 0
|
27,103
|
static void test_select_version ( ) {
MYSQL_STMT * stmt ;
int rc ;
myheader ( "test_select_version" ) ;
stmt = mysql_simple_prepare ( mysql , "SELECT @@version" ) ;
check_stmt ( stmt ) ;
verify_param_count ( stmt , 0 ) ;
rc = mysql_stmt_execute ( stmt ) ;
check_execute ( stmt , rc ) ;
my_process_stmt_result ( stmt ) ;
mysql_stmt_close ( stmt ) ;
}
| 0
|
433,269
|
static int dgram_bind(struct sock *sk, struct sockaddr *uaddr, int len)
{
struct sockaddr_ieee802154 *addr = (struct sockaddr_ieee802154 *)uaddr;
struct ieee802154_addr haddr;
struct dgram_sock *ro = dgram_sk(sk);
int err = -EINVAL;
struct net_device *dev;
lock_sock(sk);
ro->bound = 0;
if (len < sizeof(*addr))
goto out;
if (addr->family != AF_IEEE802154)
goto out;
ieee802154_addr_from_sa(&haddr, &addr->addr);
dev = ieee802154_get_dev(sock_net(sk), &haddr);
if (!dev) {
err = -ENODEV;
goto out;
}
if (dev->type != ARPHRD_IEEE802154) {
err = -ENODEV;
goto out_put;
}
ro->src_addr = haddr;
ro->bound = 1;
err = 0;
out_put:
dev_put(dev);
out:
release_sock(sk);
return err;
}
| 0
|
443,071
|
static void shrink_ple_window(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
struct vmcb_control_area *control = &svm->vmcb->control;
int old = control->pause_filter_count;
control->pause_filter_count =
__shrink_ple_window(old,
pause_filter_count,
pause_filter_count_shrink,
pause_filter_count);
if (control->pause_filter_count != old) {
mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
trace_kvm_ple_window_update(vcpu->vcpu_id,
control->pause_filter_count, old);
}
}
| 0
|
155,124
|
GF_Err fiin_dump(GF_Box *a, FILE * trace)
{
FDItemInformationBox *ptr = (FDItemInformationBox *) a;
gf_isom_box_dump_start(a, "FDItemInformationBox", trace);
fprintf(trace, ">\n");
if (ptr->partition_entries)
gf_isom_box_array_dump(ptr->partition_entries, trace);
if (ptr->session_info)
gf_isom_box_dump(ptr->session_info, trace);
if (ptr->group_id_to_name)
gf_isom_box_dump(ptr->group_id_to_name, trace);
gf_isom_box_dump_done("FDItemInformationBox", a, trace);
return GF_OK;
}
| 0
|
343,825
|
nautilus_application_startup (NautilusApplication *application,
gboolean kill_shell,
gboolean no_default_window,
gboolean no_desktop,
gboolean browser_window,
const char *geometry,
char **urls)
{
UniqueMessageData *message;
/* Check the user's ~/.nautilus directories and post warnings
* if there are problems.
*/
if (!kill_shell && !check_required_directories (application)) {
return;
}
if (kill_shell) {
if (unique_app_is_running (application->unique_app)) {
unique_app_send_message (application->unique_app,
UNIQUE_CLOSE, NULL);
}
} else {
/* If KDE desktop is running, then force no_desktop */
if (is_kdesktop_present ()) {
no_desktop = TRUE;
}
if (!no_desktop && eel_preferences_get_boolean (NAUTILUS_PREFERENCES_SHOW_DESKTOP)) {
if (unique_app_is_running (application->unique_app)) {
unique_app_send_message (application->unique_app,
COMMAND_START_DESKTOP, NULL);
} else {
nautilus_application_open_desktop (application);
}
}
if (!unique_app_is_running (application->unique_app)) {
finish_startup (application);
g_signal_connect (application->unique_app, "message-received", G_CALLBACK (message_received_cb), application);
}
/* Monitor the preference to show or hide the desktop */
eel_preferences_add_callback_while_alive (NAUTILUS_PREFERENCES_SHOW_DESKTOP,
desktop_changed_callback,
application,
G_OBJECT (application));
/* Monitor the preference to have the desktop */
/* point to the Unix home folder */
eel_preferences_add_callback_while_alive (NAUTILUS_PREFERENCES_DESKTOP_IS_HOME_DIR,
desktop_location_changed_callback,
NULL,
G_OBJECT (application));
/* Create the other windows. */
if (urls != NULL || !no_default_window) {
if (unique_app_is_running (application->unique_app)) {
message = unique_message_data_new ();
_unique_message_data_set_geometry_and_uris (message, geometry, urls);
if (browser_window) {
unique_app_send_message (application->unique_app,
COMMAND_OPEN_BROWSER, message);
} else {
unique_app_send_message (application->unique_app,
UNIQUE_OPEN, message);
}
unique_message_data_free (message);
} else {
open_windows (application, NULL,
urls,
geometry,
browser_window);
}
}
/* Load session info if availible */
nautilus_application_load_session (application);
}
}
| 1
|
69,646
|
int cil_resolve_blockinherit_link(struct cil_tree_node *current, void *extra_args)
{
struct cil_blockinherit *inherit = current->data;
struct cil_symtab_datum *block_datum = NULL;
struct cil_tree_node *node = NULL;
int rc = SEPOL_ERR;
rc = cil_resolve_name(current, inherit->block_str, CIL_SYM_BLOCKS, extra_args, &block_datum);
if (rc != SEPOL_OK) {
goto exit;
}
node = NODE(block_datum);
if (node->flavor != CIL_BLOCK) {
cil_log(CIL_ERR, "%s is not a block\n", cil_node_to_string(node));
rc = SEPOL_ERR;
goto exit;
}
inherit->block = (struct cil_block *)block_datum;
if (inherit->block->bi_nodes == NULL) {
cil_list_init(&inherit->block->bi_nodes, CIL_NODE);
}
cil_list_append(inherit->block->bi_nodes, CIL_NODE, current);
return SEPOL_OK;
exit:
return rc;
}
| 0
|
219,089
|
HTMLCollection* Document::anchors()
{
return ensureCachedCollection<HTMLCollection>(DocAnchors);
}
| 0
|
6,755
|
static inline __u16 inet_getid(struct inet_peer *p, int more)
{
more++;
inet_peer_refcheck(p);
return atomic_add_return(more, &p->ip_id_count) - more;
}
| 1
|
9,620
|
static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
unsigned long addr, unsigned long end,
struct mm_walk *walk)
{
struct vm_area_struct *vma = walk->private;
pte_t *pte;
spinlock_t *ptl;
split_huge_page_pmd(walk->mm, pmd);
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
for (; addr != end; pte++, addr += PAGE_SIZE)
if (is_target_pte_for_mc(vma, addr, *pte, NULL))
mc.precharge++; /* increment precharge temporarily */
pte_unmap_unlock(pte - 1, ptl);
cond_resched();
return 0;
}
| 1
|
240,369
|
jlong AwContents::CapturePicture(JNIEnv* env,
jobject obj,
int width,
int height) {
DCHECK_CURRENTLY_ON(BrowserThread::UI);
return reinterpret_cast<intptr_t>(
new AwPicture(browser_view_renderer_.CapturePicture(width, height)));
}
| 0
|
30,166
|
METHOD ( x509_t , create_name_constraint_enumerator , enumerator_t * , private_x509_cert_t * this , bool perm ) {
if ( perm ) {
return this -> permitted_names -> create_enumerator ( this -> permitted_names ) ;
}
return this -> excluded_names -> create_enumerator ( this -> excluded_names ) ;
}
| 0
|
132,591
|
static int sctp_v6_is_any(const union sctp_addr *addr)
{
return ipv6_addr_any(&addr->v6.sin6_addr);
}
| 0
|
32,587
|
int PemToDer(const unsigned char* buff, long longSz, int type,
DerBuffer** pDer, void* heap, EncryptedInfo* info, int* keyFormat)
{
const char* header = NULL;
const char* footer = NULL;
const char* headerEnd;
const char* footerEnd;
const char* consumedEnd;
const char* bufferEnd = (const char*)(buff + longSz);
long neededSz;
int ret = 0;
int sz = (int)longSz;
int encrypted_key = 0;
DerBuffer* der;
#if defined(HAVE_PKCS8) || defined(WOLFSSL_ENCRYPTED_KEYS)
word32 algId = 0;
#if defined(WOLFSSL_ENCRYPTED_KEYS) && !defined(NO_DES3) && !defined(NO_WOLFSSL_SKIP_TRAILING_PAD)
int padVal = 0;
#endif
#endif
#ifdef OPENSSL_EXTRA
char beginBuf[PEM_LINE_LEN + 1]; /* add 1 for null terminator */
char endBuf[PEM_LINE_LEN + 1]; /* add 1 for null terminator */
#endif
WOLFSSL_ENTER("PemToDer");
/* get PEM header and footer based on type */
ret = wc_PemGetHeaderFooter(type, &header, &footer);
if (ret != 0)
return ret;
/* map header if not found for type */
for (;;) {
headerEnd = XSTRNSTR((char*)buff, header, sz);
if (headerEnd) {
break;
} else
if (type == PRIVATEKEY_TYPE) {
if (header == BEGIN_RSA_PRIV) {
header = BEGIN_PRIV_KEY; footer = END_PRIV_KEY;
} else
if (header == BEGIN_PRIV_KEY) {
header = BEGIN_ENC_PRIV_KEY; footer = END_ENC_PRIV_KEY;
} else
#ifdef HAVE_ECC
if (header == BEGIN_ENC_PRIV_KEY) {
header = BEGIN_EC_PRIV; footer = END_EC_PRIV;
} else
if (header == BEGIN_EC_PRIV) {
header = BEGIN_DSA_PRIV; footer = END_DSA_PRIV;
} else
#endif
#if defined(HAVE_ED25519) || defined(HAVE_ED448)
#ifdef HAVE_ECC
if (header == BEGIN_DSA_PRIV)
#else
if (header == BEGIN_ENC_PRIV_KEY)
#endif
{
header = BEGIN_EDDSA_PRIV; footer = END_EDDSA_PRIV;
} else
#endif
{
break;
}
} else
#ifdef HAVE_CRL
if ((type == CRL_TYPE) && (header != BEGIN_X509_CRL)) {
header = BEGIN_X509_CRL; footer = END_X509_CRL;
} else
#endif
{
break;
}
}
if (!headerEnd) {
#ifdef OPENSSL_EXTRA
if (type == PRIVATEKEY_TYPE) {
const char* beginEnd;
int endLen;
/* see if there is a -----BEGIN * PRIVATE KEY----- header */
headerEnd = XSTRNSTR((char*)buff, PRIV_KEY_SUFFIX, sz);
if (headerEnd) {
beginEnd = headerEnd + XSTR_SIZEOF(PRIV_KEY_SUFFIX);
if (beginEnd >= (char*)buff + sz) {
return BUFFER_E;
}
/* back up to BEGIN_PRIV_KEY_PREFIX */
while (headerEnd > (char*)buff &&
XSTRNCMP(headerEnd, BEGIN_PRIV_KEY_PREFIX,
XSTR_SIZEOF(BEGIN_PRIV_KEY_PREFIX)) != 0 &&
*headerEnd != '\n') {
headerEnd--;
}
if (headerEnd <= (char*)buff ||
XSTRNCMP(headerEnd, BEGIN_PRIV_KEY_PREFIX,
XSTR_SIZEOF(BEGIN_PRIV_KEY_PREFIX)) != 0 ||
beginEnd - headerEnd > PEM_LINE_LEN) {
WOLFSSL_MSG("Couldn't find PEM header");
WOLFSSL_ERROR(ASN_NO_PEM_HEADER);
return ASN_NO_PEM_HEADER;
}
/* headerEnd now points to beginning of header */
XMEMCPY(beginBuf, headerEnd, beginEnd - headerEnd);
beginBuf[beginEnd - headerEnd] = '\0';
/* look for matching footer */
footer = XSTRNSTR(beginEnd,
beginBuf + XSTR_SIZEOF(BEGIN_PRIV_KEY_PREFIX),
(unsigned int)((char*)buff + sz - beginEnd));
if (!footer) {
WOLFSSL_MSG("Couldn't find PEM footer");
WOLFSSL_ERROR(ASN_NO_PEM_HEADER);
return ASN_NO_PEM_HEADER;
}
footer -= XSTR_SIZEOF(END_PRIV_KEY_PREFIX);
if (footer > (char*)buff + sz - XSTR_SIZEOF(END_PRIV_KEY_PREFIX)
|| XSTRNCMP(footer, END_PRIV_KEY_PREFIX,
XSTR_SIZEOF(END_PRIV_KEY_PREFIX)) != 0) {
WOLFSSL_MSG("Unexpected footer for PEM");
return BUFFER_E;
}
endLen = (unsigned int)(beginEnd - headerEnd -
(XSTR_SIZEOF(BEGIN_PRIV_KEY_PREFIX) -
XSTR_SIZEOF(END_PRIV_KEY_PREFIX)));
XMEMCPY(endBuf, footer, endLen);
endBuf[endLen] = '\0';
header = beginBuf;
footer = endBuf;
headerEnd = beginEnd;
}
}
if (!headerEnd) {
WOLFSSL_MSG("Couldn't find PEM header");
WOLFSSL_ERROR(ASN_NO_PEM_HEADER);
return ASN_NO_PEM_HEADER;
}
#else
WOLFSSL_MSG("Couldn't find PEM header");
return ASN_NO_PEM_HEADER;
#endif
} else {
headerEnd += XSTRLEN(header);
}
/* eat end of line characters */
headerEnd = SkipEndOfLineChars(headerEnd, bufferEnd);
if (type == PRIVATEKEY_TYPE) {
/* keyFormat is Key_Sum enum */
if (keyFormat) {
#ifdef HAVE_ECC
if (header == BEGIN_EC_PRIV)
*keyFormat = ECDSAk;
#endif
#if !defined(NO_DSA)
if (header == BEGIN_DSA_PRIV)
*keyFormat = DSAk;
#endif
}
}
#ifdef WOLFSSL_ENCRYPTED_KEYS
if (info) {
ret = wc_EncryptedInfoParse(info, &headerEnd, bufferEnd - headerEnd);
if (ret < 0)
return ret;
if (info->set)
encrypted_key = 1;
}
#endif /* WOLFSSL_ENCRYPTED_KEYS */
/* find footer */
footerEnd = XSTRNSTR(headerEnd, footer, (unsigned int)((char*)buff + sz - headerEnd));
if (!footerEnd) {
if (info)
info->consumed = longSz; /* No more certs if no footer */
return BUFFER_E;
}
consumedEnd = footerEnd + XSTRLEN(footer);
if (consumedEnd < bufferEnd) { /* handle no end of line on last line */
/* eat end of line characters */
consumedEnd = SkipEndOfLineChars(consumedEnd, bufferEnd);
/* skip possible null term */
if (consumedEnd < bufferEnd && consumedEnd[0] == '\0')
consumedEnd++;
}
if (info)
info->consumed = (long)(consumedEnd - (const char*)buff);
/* set up der buffer */
neededSz = (long)(footerEnd - headerEnd);
if (neededSz > sz || neededSz <= 0)
return BUFFER_E;
ret = AllocDer(pDer, (word32)neededSz, type, heap);
if (ret < 0) {
return ret;
}
der = *pDer;
if (Base64_Decode((byte*)headerEnd, (word32)neededSz,
der->buffer, &der->length) < 0)
return BUFFER_E;
if ((header == BEGIN_PRIV_KEY
#ifdef OPENSSL_EXTRA
|| header == beginBuf
#endif
#ifdef HAVE_ECC
|| header == BEGIN_EC_PRIV
#endif
) && !encrypted_key)
{
#ifdef HAVE_PKCS8
/* pkcs8 key, convert and adjust length */
if ((ret = ToTraditional_ex(der->buffer, der->length, &algId)) > 0) {
der->length = ret;
if (keyFormat) {
*keyFormat = algId;
}
}
else {
/* ignore failure here and assume key is not pkcs8 wrapped */
}
#endif
return 0;
}
#ifdef WOLFSSL_ENCRYPTED_KEYS
if (encrypted_key || header == BEGIN_ENC_PRIV_KEY) {
int passwordSz = NAME_SZ;
#ifdef WOLFSSL_SMALL_STACK
char* password = NULL;
#else
char password[NAME_SZ];
#endif
if (!info || !info->passwd_cb) {
WOLFSSL_MSG("No password callback set");
return NO_PASSWORD;
}
#ifdef WOLFSSL_SMALL_STACK
password = (char*)XMALLOC(passwordSz, heap, DYNAMIC_TYPE_STRING);
if (password == NULL)
return MEMORY_E;
#endif
/* get password */
ret = info->passwd_cb(password, passwordSz, PEM_PASS_READ,
info->passwd_userdata);
if (ret >= 0) {
passwordSz = ret;
/* convert and adjust length */
if (header == BEGIN_ENC_PRIV_KEY) {
#ifndef NO_PWDBASED
ret = ToTraditionalEnc(der->buffer, der->length,
password, passwordSz, &algId);
if (ret >= 0) {
der->length = ret;
if (keyFormat) {
*keyFormat = algId;
}
ret = 0;
}
#else
ret = NOT_COMPILED_IN;
#endif
}
/* decrypt the key */
else {
if (passwordSz == 0) {
/* The key is encrypted but does not have a password */
WOLFSSL_MSG("No password for encrypted key");
ret = NO_PASSWORD;
}
else {
ret = wc_BufferKeyDecrypt(info, der->buffer, der->length,
(byte*)password, passwordSz, WC_MD5);
#ifndef NO_WOLFSSL_SKIP_TRAILING_PAD
#ifndef NO_DES3
if (info->cipherType == WC_CIPHER_DES3) {
/* Assuming there is padding:
* (der->length > 0 && der->length > DES_BLOCK_SIZE &&
* (der->length % DES_BLOCK_SIZE) != 0)
* and assuming the last value signifies the number of
* padded bytes IE if last value is 0x08 then there are
* 8 bytes of padding:
* padVal = der->buffer[der->length-1];
* then strip this padding before proceeding:
* der->length -= padVal;
*/
if (der->length > DES_BLOCK_SIZE &&
(der->length % DES_BLOCK_SIZE) != 0) {
padVal = der->buffer[der->length-1];
if (padVal < DES_BLOCK_SIZE) {
der->length -= padVal;
}
}
}
#endif /* !NO_DES3 */
#endif /* !NO_WOLFSSL_SKIP_TRAILING_PAD */
}
}
#ifdef OPENSSL_EXTRA
if (ret) {
PEMerr(0, PEM_R_BAD_DECRYPT);
}
#endif
ForceZero(password, passwordSz);
}
#ifdef OPENSSL_EXTRA
else {
PEMerr(0, PEM_R_BAD_PASSWORD_READ);
}
#endif
#ifdef WOLFSSL_SMALL_STACK
XFREE(password, heap, DYNAMIC_TYPE_STRING);
#endif
}
#endif /* WOLFSSL_ENCRYPTED_KEYS */
return ret;
}
| 0
|
474,313
|
int crypt_metadata_locking(struct crypt_device *cd __attribute__((unused)), int enable)
{
if (enable && !_metadata_locking)
return -EPERM;
_metadata_locking = enable ? 1 : 0;
return 0;
}
| 0
|
232,743
|
DomDistillerViewerSource::RequestViewerHandle::RequestViewerHandle(
content::WebContents* web_contents,
const std::string& expected_scheme,
const std::string& expected_request_path,
DistilledPagePrefs* distilled_page_prefs)
: DomDistillerRequestViewBase(distilled_page_prefs),
expected_scheme_(expected_scheme),
expected_request_path_(expected_request_path),
waiting_for_page_ready_(true) {
content::WebContentsObserver::Observe(web_contents);
distilled_page_prefs_->AddObserver(this);
}
| 0
|
333,089
|
av_cold void ff_rv34dsp_init(RV34DSPContext *c, DSPContext* dsp) {
c->rv34_inv_transform = rv34_inv_transform_noround_c;
c->rv34_inv_transform_dc = rv34_inv_transform_dc_noround_c;
c->rv34_idct_add = rv34_idct_add_c;
c->rv34_idct_dc_add = rv34_idct_dc_add_c;
if (HAVE_NEON)
ff_rv34dsp_init_neon(c, dsp);
if (ARCH_X86)
ff_rv34dsp_init_x86(c, dsp);
}
| 0
|
306,867
|
bool ShouldQuicMigrateSessionsEarlyV2(
const VariationParameters& quic_trial_params) {
return base::LowerCaseEqualsASCII(
GetVariationParam(quic_trial_params, "migrate_sessions_early_v2"),
"true");
}
| 0
|
125,461
|
cursor_correct(void)
{
int above = 0; // screen lines above topline
linenr_T topline;
int below = 0; // screen lines below botline
linenr_T botline;
int above_wanted, below_wanted;
linenr_T cln; // Cursor Line Number
int max_off;
long so = get_scrolloff_value();
/*
* How many lines we would like to have above/below the cursor depends on
* whether the first/last line of the file is on screen.
*/
above_wanted = so;
below_wanted = so;
if (mouse_dragging > 0)
{
above_wanted = mouse_dragging - 1;
below_wanted = mouse_dragging - 1;
}
if (curwin->w_topline == 1)
{
above_wanted = 0;
max_off = curwin->w_height / 2;
if (below_wanted > max_off)
below_wanted = max_off;
}
validate_botline();
if (curwin->w_botline == curbuf->b_ml.ml_line_count + 1
&& mouse_dragging == 0)
{
below_wanted = 0;
max_off = (curwin->w_height - 1) / 2;
if (above_wanted > max_off)
above_wanted = max_off;
}
/*
* If there are sufficient file-lines above and below the cursor, we can
* return now.
*/
cln = curwin->w_cursor.lnum;
if (cln >= curwin->w_topline + above_wanted
&& cln < curwin->w_botline - below_wanted
#ifdef FEAT_FOLDING
&& !hasAnyFolding(curwin)
#endif
)
return;
/*
* Narrow down the area where the cursor can be put by taking lines from
* the top and the bottom until:
* - the desired context lines are found
* - the lines from the top is past the lines from the bottom
*/
topline = curwin->w_topline;
botline = curwin->w_botline - 1;
#ifdef FEAT_DIFF
// count filler lines as context
above = curwin->w_topfill;
below = curwin->w_filler_rows;
#endif
while ((above < above_wanted || below < below_wanted) && topline < botline)
{
if (below < below_wanted && (below <= above || above >= above_wanted))
{
#ifdef FEAT_FOLDING
if (hasFolding(botline, &botline, NULL))
++below;
else
#endif
below += plines(botline);
--botline;
}
if (above < above_wanted && (above < below || below >= below_wanted))
{
#ifdef FEAT_FOLDING
if (hasFolding(topline, NULL, &topline))
++above;
else
#endif
above += PLINES_NOFILL(topline);
#ifdef FEAT_DIFF
// Count filler lines below this line as context.
if (topline < botline)
above += diff_check_fill(curwin, topline + 1);
#endif
++topline;
}
}
if (topline == botline || botline == 0)
curwin->w_cursor.lnum = topline;
else if (topline > botline)
curwin->w_cursor.lnum = botline;
else
{
if (cln < topline && curwin->w_topline > 1)
{
curwin->w_cursor.lnum = topline;
curwin->w_valid &=
~(VALID_WROW|VALID_WCOL|VALID_CHEIGHT|VALID_CROW);
}
if (cln > botline && curwin->w_botline <= curbuf->b_ml.ml_line_count)
{
curwin->w_cursor.lnum = botline;
curwin->w_valid &=
~(VALID_WROW|VALID_WCOL|VALID_CHEIGHT|VALID_CROW);
}
}
curwin->w_valid |= VALID_TOPLINE;
}
| 0
|
301,025
|
smb_ofile_disallow_fclose(smb_ofile_t *of)
{
ASSERT(of);
ASSERT(of->f_magic == SMB_OFILE_MAGIC);
ASSERT(of->f_refcnt);
switch (of->f_ftype) {
case SMB_FTYPE_DISK:
ASSERT(of->f_tree);
return (of->f_node == of->f_tree->t_snode);
case SMB_FTYPE_MESG_PIPE:
ASSERT(of->f_pipe);
if (smb_strcasecmp(of->f_pipe->p_name, "SRVSVC", 0) == 0)
return (B_TRUE);
break;
default:
break;
}
return (B_FALSE);
}
| 0
|
404,147
|
void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len)
{
int pos = skb_headlen(skb);
if (len < pos) /* Split line is inside header. */
skb_split_inside_header(skb, skb1, len, pos);
else /* Second chunk has no header, nothing to copy. */
skb_split_no_header(skb, skb1, len, pos);
}
| 0
|
285,662
|
v8::Handle<v8::Value> V8DOMWrapper::convertEventToV8Object(Event* event)
{
if (!event)
return v8::Null();
v8::Handle<v8::Object> wrapper = getDOMObjectMap().get(event);
if (!wrapper.IsEmpty())
return wrapper;
V8ClassIndex::V8WrapperType type = V8ClassIndex::EVENT;
if (event->isUIEvent()) {
if (event->isKeyboardEvent())
type = V8ClassIndex::KEYBOARDEVENT;
else if (event->isTextEvent())
type = V8ClassIndex::TEXTEVENT;
else if (event->isMouseEvent())
type = V8ClassIndex::MOUSEEVENT;
else if (event->isWheelEvent())
type = V8ClassIndex::WHEELEVENT;
#if ENABLE(SVG)
else if (event->isSVGZoomEvent())
type = V8ClassIndex::SVGZOOMEVENT;
#endif
else
type = V8ClassIndex::UIEVENT;
} else if (event->isMutationEvent())
type = V8ClassIndex::MUTATIONEVENT;
else if (event->isOverflowEvent())
type = V8ClassIndex::OVERFLOWEVENT;
else if (event->isMessageEvent())
type = V8ClassIndex::MESSAGEEVENT;
else if (event->isProgressEvent()) {
if (event->isXMLHttpRequestProgressEvent())
type = V8ClassIndex::XMLHTTPREQUESTPROGRESSEVENT;
else
type = V8ClassIndex::PROGRESSEVENT;
} else if (event->isWebKitAnimationEvent())
type = V8ClassIndex::WEBKITANIMATIONEVENT;
else if (event->isWebKitTransitionEvent())
type = V8ClassIndex::WEBKITTRANSITIONEVENT;
#if ENABLE(WORKERS)
else if (event->isErrorEvent())
type = V8ClassIndex::ERROREVENT;
#endif
v8::Handle<v8::Object> result = instantiateV8Object(type, V8ClassIndex::EVENT, event);
if (result.IsEmpty()) {
return v8::Null();
}
event->ref(); // fast ref
setJSWrapperForDOMObject(event, v8::Persistent<v8::Object>::New(result));
return result;
}
| 0
|
483,264
|
struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos,
struct cgroup_subsys_state *parent)
{
struct cgroup_subsys_state *next;
cgroup_assert_mutex_or_rcu_locked();
/*
* @pos could already have been unlinked from the sibling list.
* Once a cgroup is removed, its ->sibling.next is no longer
* updated when its next sibling changes. CSS_RELEASED is set when
* @pos is taken off list, at which time its next pointer is valid,
* and, as releases are serialized, the one pointed to by the next
* pointer is guaranteed to not have started release yet. This
* implies that if we observe !CSS_RELEASED on @pos in this RCU
* critical section, the one pointed to by its next pointer is
* guaranteed to not have finished its RCU grace period even if we
* have dropped rcu_read_lock() in-between iterations.
*
* If @pos has CSS_RELEASED set, its next pointer can't be
* dereferenced; however, as each css is given a monotonically
* increasing unique serial number and always appended to the
* sibling list, the next one can be found by walking the parent's
* children until the first css with higher serial number than
* @pos's. While this path can be slower, it happens iff iteration
* races against release and the race window is very small.
*/
if (!pos) {
next = list_entry_rcu(parent->children.next, struct cgroup_subsys_state, sibling);
} else if (likely(!(pos->flags & CSS_RELEASED))) {
next = list_entry_rcu(pos->sibling.next, struct cgroup_subsys_state, sibling);
} else {
list_for_each_entry_rcu(next, &parent->children, sibling,
lockdep_is_held(&cgroup_mutex))
if (next->serial_nr > pos->serial_nr)
break;
}
/*
* @next, if not pointing to the head, can be dereferenced and is
* the next sibling.
*/
if (&next->sibling != &parent->children)
return next;
return NULL;
}
| 0
|
110,109
|
template<typename t>
CImg(const CImg<t>& img, const char *const dimensions, const T& value):
_width(0),_height(0),_depth(0),_spectrum(0),_is_shared(false),_data(0) {
assign(img,dimensions).fill(value);
| 0
|
49,908
|
static void cmd_handle_fatal (IMAP_DATA* idata)
{
idata->status = IMAP_FATAL;
if ((idata->state >= IMAP_SELECTED) &&
(idata->reopen & IMAP_REOPEN_ALLOW))
{
mx_fastclose_mailbox (idata->ctx);
mutt_socket_close (idata->conn);
mutt_error (_("Mailbox %s@%s closed"),
idata->conn->account.login, idata->conn->account.host);
mutt_sleep (1);
idata->state = IMAP_DISCONNECTED;
}
if (idata->state < IMAP_SELECTED)
imap_close_connection (idata);
}
| 0
|
186,873
|
static void emitnumber(JF, double num)
{
if (num == 0) {
emit(J, F, OP_NUMBER_0);
if (signbit(num))
emit(J, F, OP_NEG);
} else if (num == 1) {
emit(J, F, OP_NUMBER_1);
} else if (num == (js_Instruction)num) {
emit(J, F, OP_NUMBER_POS);
emitraw(J, F, (js_Instruction)num);
} else if (num < 0 && -num == (js_Instruction)(-num)) {
emit(J, F, OP_NUMBER_NEG);
emitraw(J, F, (js_Instruction)(-num));
} else {
emit(J, F, OP_NUMBER);
emitraw(J, F, addnumber(J, F, num));
}
}
| 0
|
426,262
|
static int binder_thread_read(struct binder_proc *proc,
struct binder_thread *thread,
binder_uintptr_t binder_buffer, size_t size,
binder_size_t *consumed, int non_block)
{
void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
void __user *ptr = buffer + *consumed;
void __user *end = buffer + size;
int ret = 0;
int wait_for_proc_work;
if (*consumed == 0) {
if (put_user(BR_NOOP, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
}
retry:
binder_inner_proc_lock(proc);
wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
binder_inner_proc_unlock(proc);
thread->looper |= BINDER_LOOPER_STATE_WAITING;
trace_binder_wait_for_work(wait_for_proc_work,
!!thread->transaction_stack,
!binder_worklist_empty(proc, &thread->todo));
if (wait_for_proc_work) {
if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
BINDER_LOOPER_STATE_ENTERED))) {
binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
proc->pid, thread->pid, thread->looper);
wait_event_interruptible(binder_user_error_wait,
binder_stop_on_user_error < 2);
}
binder_set_nice(proc->default_priority);
}
if (non_block) {
if (!binder_has_work(thread, wait_for_proc_work))
ret = -EAGAIN;
} else {
ret = binder_wait_for_work(thread, wait_for_proc_work);
}
thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
if (ret)
return ret;
while (1) {
uint32_t cmd;
struct binder_transaction_data tr;
struct binder_work *w = NULL;
struct list_head *list = NULL;
struct binder_transaction *t = NULL;
struct binder_thread *t_from;
binder_inner_proc_lock(proc);
if (!binder_worklist_empty_ilocked(&thread->todo))
list = &thread->todo;
else if (!binder_worklist_empty_ilocked(&proc->todo) &&
wait_for_proc_work)
list = &proc->todo;
else {
binder_inner_proc_unlock(proc);
/* no data added */
if (ptr - buffer == 4 && !thread->looper_need_return)
goto retry;
break;
}
if (end - ptr < sizeof(tr) + 4) {
binder_inner_proc_unlock(proc);
break;
}
w = binder_dequeue_work_head_ilocked(list);
if (binder_worklist_empty_ilocked(&thread->todo))
thread->process_todo = false;
switch (w->type) {
case BINDER_WORK_TRANSACTION: {
binder_inner_proc_unlock(proc);
t = container_of(w, struct binder_transaction, work);
} break;
case BINDER_WORK_RETURN_ERROR: {
struct binder_error *e = container_of(
w, struct binder_error, work);
WARN_ON(e->cmd == BR_OK);
binder_inner_proc_unlock(proc);
if (put_user(e->cmd, (uint32_t __user *)ptr))
return -EFAULT;
cmd = e->cmd;
e->cmd = BR_OK;
ptr += sizeof(uint32_t);
binder_stat_br(proc, thread, cmd);
} break;
case BINDER_WORK_TRANSACTION_COMPLETE: {
binder_inner_proc_unlock(proc);
cmd = BR_TRANSACTION_COMPLETE;
if (put_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
binder_stat_br(proc, thread, cmd);
binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
"%d:%d BR_TRANSACTION_COMPLETE\n",
proc->pid, thread->pid);
kfree(w);
binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
} break;
case BINDER_WORK_NODE: {
struct binder_node *node = container_of(w, struct binder_node, work);
int strong, weak;
binder_uintptr_t node_ptr = node->ptr;
binder_uintptr_t node_cookie = node->cookie;
int node_debug_id = node->debug_id;
int has_weak_ref;
int has_strong_ref;
void __user *orig_ptr = ptr;
BUG_ON(proc != node->proc);
strong = node->internal_strong_refs ||
node->local_strong_refs;
weak = !hlist_empty(&node->refs) ||
node->local_weak_refs ||
node->tmp_refs || strong;
has_strong_ref = node->has_strong_ref;
has_weak_ref = node->has_weak_ref;
if (weak && !has_weak_ref) {
node->has_weak_ref = 1;
node->pending_weak_ref = 1;
node->local_weak_refs++;
}
if (strong && !has_strong_ref) {
node->has_strong_ref = 1;
node->pending_strong_ref = 1;
node->local_strong_refs++;
}
if (!strong && has_strong_ref)
node->has_strong_ref = 0;
if (!weak && has_weak_ref)
node->has_weak_ref = 0;
if (!weak && !strong) {
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
"%d:%d node %d u%016llx c%016llx deleted\n",
proc->pid, thread->pid,
node_debug_id,
(u64)node_ptr,
(u64)node_cookie);
rb_erase(&node->rb_node, &proc->nodes);
binder_inner_proc_unlock(proc);
binder_node_lock(node);
/*
* Acquire the node lock before freeing the
* node to serialize with other threads that
* may have been holding the node lock while
* decrementing this node (avoids race where
* this thread frees while the other thread
* is unlocking the node after the final
* decrement)
*/
binder_node_unlock(node);
binder_free_node(node);
} else
binder_inner_proc_unlock(proc);
if (weak && !has_weak_ref)
ret = binder_put_node_cmd(
proc, thread, &ptr, node_ptr,
node_cookie, node_debug_id,
BR_INCREFS, "BR_INCREFS");
if (!ret && strong && !has_strong_ref)
ret = binder_put_node_cmd(
proc, thread, &ptr, node_ptr,
node_cookie, node_debug_id,
BR_ACQUIRE, "BR_ACQUIRE");
if (!ret && !strong && has_strong_ref)
ret = binder_put_node_cmd(
proc, thread, &ptr, node_ptr,
node_cookie, node_debug_id,
BR_RELEASE, "BR_RELEASE");
if (!ret && !weak && has_weak_ref)
ret = binder_put_node_cmd(
proc, thread, &ptr, node_ptr,
node_cookie, node_debug_id,
BR_DECREFS, "BR_DECREFS");
if (orig_ptr == ptr)
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
"%d:%d node %d u%016llx c%016llx state unchanged\n",
proc->pid, thread->pid,
node_debug_id,
(u64)node_ptr,
(u64)node_cookie);
if (ret)
return ret;
} break;
case BINDER_WORK_DEAD_BINDER:
case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
struct binder_ref_death *death;
uint32_t cmd;
binder_uintptr_t cookie;
death = container_of(w, struct binder_ref_death, work);
if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
else
cmd = BR_DEAD_BINDER;
cookie = death->cookie;
binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
"%d:%d %s %016llx\n",
proc->pid, thread->pid,
cmd == BR_DEAD_BINDER ?
"BR_DEAD_BINDER" :
"BR_CLEAR_DEATH_NOTIFICATION_DONE",
(u64)cookie);
if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
binder_inner_proc_unlock(proc);
kfree(death);
binder_stats_deleted(BINDER_STAT_DEATH);
} else {
binder_enqueue_work_ilocked(
w, &proc->delivered_death);
binder_inner_proc_unlock(proc);
}
if (put_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
if (put_user(cookie,
(binder_uintptr_t __user *)ptr))
return -EFAULT;
ptr += sizeof(binder_uintptr_t);
binder_stat_br(proc, thread, cmd);
if (cmd == BR_DEAD_BINDER)
goto done; /* DEAD_BINDER notifications can cause transactions */
} break;
}
if (!t)
continue;
BUG_ON(t->buffer == NULL);
if (t->buffer->target_node) {
struct binder_node *target_node = t->buffer->target_node;
tr.target.ptr = target_node->ptr;
tr.cookie = target_node->cookie;
t->saved_priority = task_nice(current);
if (t->priority < target_node->min_priority &&
!(t->flags & TF_ONE_WAY))
binder_set_nice(t->priority);
else if (!(t->flags & TF_ONE_WAY) ||
t->saved_priority > target_node->min_priority)
binder_set_nice(target_node->min_priority);
cmd = BR_TRANSACTION;
} else {
tr.target.ptr = 0;
tr.cookie = 0;
cmd = BR_REPLY;
}
tr.code = t->code;
tr.flags = t->flags;
tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
t_from = binder_get_txn_from(t);
if (t_from) {
struct task_struct *sender = t_from->proc->tsk;
tr.sender_pid = task_tgid_nr_ns(sender,
task_active_pid_ns(current));
} else {
tr.sender_pid = 0;
}
ret = binder_apply_fd_fixups(t);
if (ret) {
struct binder_buffer *buffer = t->buffer;
bool oneway = !!(t->flags & TF_ONE_WAY);
int tid = t->debug_id;
if (t_from)
binder_thread_dec_tmpref(t_from);
buffer->transaction = NULL;
binder_cleanup_transaction(t, "fd fixups failed",
BR_FAILED_REPLY);
binder_free_buf(proc, buffer);
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
"%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
proc->pid, thread->pid,
oneway ? "async " :
(cmd == BR_REPLY ? "reply " : ""),
tid, BR_FAILED_REPLY, ret, __LINE__);
if (cmd == BR_REPLY) {
cmd = BR_FAILED_REPLY;
if (put_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
binder_stat_br(proc, thread, cmd);
break;
}
continue;
}
tr.data_size = t->buffer->data_size;
tr.offsets_size = t->buffer->offsets_size;
tr.data.ptr.buffer = (binder_uintptr_t)
((uintptr_t)t->buffer->data +
binder_alloc_get_user_buffer_offset(&proc->alloc));
tr.data.ptr.offsets = tr.data.ptr.buffer +
ALIGN(t->buffer->data_size,
sizeof(void *));
if (put_user(cmd, (uint32_t __user *)ptr)) {
if (t_from)
binder_thread_dec_tmpref(t_from);
binder_cleanup_transaction(t, "put_user failed",
BR_FAILED_REPLY);
return -EFAULT;
}
ptr += sizeof(uint32_t);
if (copy_to_user(ptr, &tr, sizeof(tr))) {
if (t_from)
binder_thread_dec_tmpref(t_from);
binder_cleanup_transaction(t, "copy_to_user failed",
BR_FAILED_REPLY);
return -EFAULT;
}
ptr += sizeof(tr);
trace_binder_transaction_received(t);
binder_stat_br(proc, thread, cmd);
binder_debug(BINDER_DEBUG_TRANSACTION,
"%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
proc->pid, thread->pid,
(cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
"BR_REPLY",
t->debug_id, t_from ? t_from->proc->pid : 0,
t_from ? t_from->pid : 0, cmd,
t->buffer->data_size, t->buffer->offsets_size,
(u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
if (t_from)
binder_thread_dec_tmpref(t_from);
t->buffer->allow_user_free = 1;
if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
binder_inner_proc_lock(thread->proc);
t->to_parent = thread->transaction_stack;
t->to_thread = thread;
thread->transaction_stack = t;
binder_inner_proc_unlock(thread->proc);
} else {
binder_free_transaction(t);
}
break;
}
done:
*consumed = ptr - buffer;
binder_inner_proc_lock(proc);
if (proc->requested_threads == 0 &&
list_empty(&thread->proc->waiting_threads) &&
proc->requested_threads_started < proc->max_threads &&
(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
/*spawn a new thread if we leave this out */) {
proc->requested_threads++;
binder_inner_proc_unlock(proc);
binder_debug(BINDER_DEBUG_THREADS,
"%d:%d BR_SPAWN_LOOPER\n",
proc->pid, thread->pid);
if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
return -EFAULT;
binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
} else
binder_inner_proc_unlock(proc);
return 0;
}
| 0
|
304,167
|
StreamClose(pgsocket sock)
{
closesocket(sock);
}
| 0
|
13,562
|
long Segment::DoLoadClusterUnknownSize(
long long& pos,
long& len)
{
assert(m_pos < 0);
assert(m_pUnknownSize);
#if 0
assert(m_pUnknownSize->GetElementSize() < 0); //TODO: verify this
const long long element_start = m_pUnknownSize->m_element_start;
pos = -m_pos;
assert(pos > element_start);
long long total, avail;
long status = m_pReader->Length(&total, &avail);
if (status < 0) //error
return status;
assert((total < 0) || (avail <= total));
const long long segment_stop = (m_size < 0) ? -1 : m_start + m_size;
long long element_size = -1;
for (;;) //determine cluster size
{
if ((total >= 0) && (pos >= total))
{
element_size = total - element_start;
assert(element_size > 0);
break;
}
if ((segment_stop >= 0) && (pos >= segment_stop))
{
element_size = segment_stop - element_start;
assert(element_size > 0);
break;
}
if ((pos + 1) > avail)
{
len = 1;
return E_BUFFER_NOT_FULL;
}
long long result = GetUIntLength(m_pReader, pos, len);
if (result < 0) //error
return static_cast<long>(result);
if (result > 0) //weird
return E_BUFFER_NOT_FULL;
if ((segment_stop >= 0) && ((pos + len) > segment_stop))
return E_FILE_FORMAT_INVALID;
if ((pos + len) > avail)
return E_BUFFER_NOT_FULL;
const long long idpos = pos;
const long long id = ReadUInt(m_pReader, idpos, len);
if (id < 0) //error (or underflow)
return static_cast<long>(id);
if ((id == 0x0F43B675) || (id == 0x0C53BB6B)) //Cluster ID or Cues ID
{
element_size = pos - element_start;
assert(element_size > 0);
break;
}
#ifdef _DEBUG
switch (id)
{
case 0x20: //BlockGroup
case 0x23: //Simple Block
case 0x67: //TimeCode
case 0x2B: //PrevSize
break;
default:
assert(false);
break;
}
#endif
pos += len; //consume ID (of sub-element)
if ((pos + 1) > avail)
{
len = 1;
return E_BUFFER_NOT_FULL;
}
result = GetUIntLength(m_pReader, pos, len);
if (result < 0) //error
return static_cast<long>(result);
if (result > 0) //weird
return E_BUFFER_NOT_FULL;
if ((segment_stop >= 0) && ((pos + len) > segment_stop))
return E_FILE_FORMAT_INVALID;
if ((pos + len) > avail)
return E_BUFFER_NOT_FULL;
const long long size = ReadUInt(m_pReader, pos, len);
if (size < 0) //error
return static_cast<long>(size);
pos += len; //consume size field of element
if (size == 0) //weird
continue;
const long long unknown_size = (1LL << (7 * len)) - 1;
if (size == unknown_size)
return E_FILE_FORMAT_INVALID; //not allowed for sub-elements
if ((segment_stop >= 0) && ((pos + size) > segment_stop)) //weird
return E_FILE_FORMAT_INVALID;
pos += size; //consume payload of sub-element
assert((segment_stop < 0) || (pos <= segment_stop));
} //determine cluster size
assert(element_size >= 0);
m_pos = element_start + element_size;
m_pUnknownSize = 0;
return 2; //continue parsing
#else
const long status = m_pUnknownSize->Parse(pos, len);
if (status < 0) //error or underflow
return status;
if (status == 0) //parsed a block
return 2; //continue parsing
assert(status > 0); //nothing left to parse of this cluster
const long long start = m_pUnknownSize->m_element_start;
const long long size = m_pUnknownSize->GetElementSize();
assert(size >= 0);
pos = start + size;
m_pos = pos;
m_pUnknownSize = 0;
return 2; //continue parsing
#endif
}
| 1
|
456,846
|
static inline void *get_freepointer(struct kmem_cache *s, void *object)
{
return freelist_dereference(s, object + s->offset);
}
| 0
|
53,460
|
void btd_adapter_register_msd_cb(struct btd_adapter *adapter,
btd_msd_cb_t cb)
{
adapter->msd_callbacks = g_slist_prepend(adapter->msd_callbacks, cb);
}
| 0
|
370,653
|
void ga_command_state_init(GAState *s, GACommandState *cs)
{
#if defined(CONFIG_FSFREEZE)
ga_command_state_add(cs, NULL, guest_fsfreeze_cleanup);
#endif
ga_command_state_add(cs, guest_file_init, NULL);
}
| 0
|
481,175
|
static struct kvm_rmap_head *gfn_to_rmap(gfn_t gfn, int level,
const struct kvm_memory_slot *slot)
{
unsigned long idx;
idx = gfn_to_index(gfn, slot->base_gfn, level);
return &slot->arch.rmap[level - PG_LEVEL_4K][idx];
}
| 0
|
391,240
|
static void rng_egd_finalize(Object *obj)
{
RngEgd *s = RNG_EGD(obj);
if (s->chr) {
qemu_chr_add_handlers(s->chr, NULL, NULL, NULL, NULL);
qemu_chr_fe_release(s->chr);
}
g_free(s->chr_name);
}
| 0
|
131,865
|
static int nf_conntrack_standalone_init_sysctl(struct net *net)
{
struct nf_conntrack_net *cnet = net_generic(net, nf_conntrack_net_id);
struct nf_udp_net *un = nf_udp_pernet(net);
struct ctl_table *table;
BUILD_BUG_ON(ARRAY_SIZE(nf_ct_sysctl_table) != NF_SYSCTL_CT_LAST_SYSCTL);
table = kmemdup(nf_ct_sysctl_table, sizeof(nf_ct_sysctl_table),
GFP_KERNEL);
if (!table)
return -ENOMEM;
table[NF_SYSCTL_CT_COUNT].data = &net->ct.count;
table[NF_SYSCTL_CT_CHECKSUM].data = &net->ct.sysctl_checksum;
table[NF_SYSCTL_CT_LOG_INVALID].data = &net->ct.sysctl_log_invalid;
table[NF_SYSCTL_CT_ACCT].data = &net->ct.sysctl_acct;
table[NF_SYSCTL_CT_HELPER].data = &net->ct.sysctl_auto_assign_helper;
#ifdef CONFIG_NF_CONNTRACK_EVENTS
table[NF_SYSCTL_CT_EVENTS].data = &net->ct.sysctl_events;
#endif
#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
table[NF_SYSCTL_CT_TIMESTAMP].data = &net->ct.sysctl_tstamp;
#endif
table[NF_SYSCTL_CT_PROTO_TIMEOUT_GENERIC].data = &nf_generic_pernet(net)->timeout;
table[NF_SYSCTL_CT_PROTO_TIMEOUT_ICMP].data = &nf_icmp_pernet(net)->timeout;
table[NF_SYSCTL_CT_PROTO_TIMEOUT_ICMPV6].data = &nf_icmpv6_pernet(net)->timeout;
table[NF_SYSCTL_CT_PROTO_TIMEOUT_UDP].data = &un->timeouts[UDP_CT_UNREPLIED];
table[NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_STREAM].data = &un->timeouts[UDP_CT_REPLIED];
nf_conntrack_standalone_init_tcp_sysctl(net, table);
nf_conntrack_standalone_init_sctp_sysctl(net, table);
nf_conntrack_standalone_init_dccp_sysctl(net, table);
nf_conntrack_standalone_init_gre_sysctl(net, table);
/* Don't allow non-init_net ns to alter global sysctls */
if (!net_eq(&init_net, net)) {
table[NF_SYSCTL_CT_MAX].mode = 0444;
table[NF_SYSCTL_CT_EXPECT_MAX].mode = 0444;
table[NF_SYSCTL_CT_BUCKETS].mode = 0444;
}
cnet->sysctl_header = register_net_sysctl(net, "net/netfilter", table);
if (!cnet->sysctl_header)
goto out_unregister_netfilter;
return 0;
out_unregister_netfilter:
kfree(table);
return -ENOMEM;
}
| 0
|
379,336
|
static int ZEND_FASTCALL ZEND_BW_OR_SPEC_VAR_CV_HANDLER(ZEND_OPCODE_HANDLER_ARGS)
{
zend_op *opline = EX(opline);
zend_free_op free_op1;
bitwise_or_function(&EX_T(opline->result.u.var).tmp_var,
_get_zval_ptr_var(&opline->op1, EX(Ts), &free_op1 TSRMLS_CC),
_get_zval_ptr_cv(&opline->op2, EX(Ts), BP_VAR_R TSRMLS_CC) TSRMLS_CC);
if (free_op1.var) {zval_ptr_dtor(&free_op1.var);};
ZEND_VM_NEXT_OPCODE();
}
| 0
|
7,757
|
static int fsmMkdirs(rpmfiles files, rpmfs fs, rpmPlugins plugins)
{
DNLI_t dnli = dnlInitIterator(files, fs, 0);
struct stat sb;
const char *dpath;
int rc = 0;
int i;
size_t ldnlen = 0;
const char * ldn = NULL;
while ((dpath = dnlNextIterator(dnli)) != NULL) {
size_t dnlen = strlen(dpath);
char * te, dn[dnlen+1];
if (dnlen <= 1)
continue;
if (dnlen == ldnlen && rstreq(dpath, ldn))
continue;
/* Copy as we need to modify the string */
(void) stpcpy(dn, dpath);
/* Assume '/' directory exists, "mkdir -p" for others if non-existent */
for (i = 1, te = dn + 1; *te != '\0'; te++, i++) {
if (*te != '/')
continue;
/* Already validated? */
if (i < ldnlen &&
(ldn[i] == '/' || ldn[i] == '\0') && rstreqn(dn, ldn, i))
continue;
/* Validate next component of path. */
*te = '\0';
rc = fsmStat(dn, 1, &sb); /* lstat */
*te = '/';
/* Directory already exists? */
if (rc == 0 && S_ISDIR(sb.st_mode)) {
continue;
} else if (rc == RPMERR_ENOENT) {
*te = '\0';
mode_t mode = S_IFDIR | (_dirPerms & 07777);
rpmFsmOp op = (FA_CREATE|FAF_UNOWNED);
/* Run fsm file pre hook for all plugins */
rc = rpmpluginsCallFsmFilePre(plugins, NULL, dn, mode, op);
if (!rc)
rc = fsmMkdir(dn, mode);
if (!rc) {
rc = rpmpluginsCallFsmFilePrepare(plugins, NULL, dn, dn,
mode, op);
}
/* Run fsm file post hook for all plugins */
rpmpluginsCallFsmFilePost(plugins, NULL, dn, mode, op, rc);
if (!rc) {
rpmlog(RPMLOG_DEBUG,
"%s directory created with perms %04o\n",
dn, (unsigned)(mode & 07777));
}
*te = '/';
}
if (rc)
break;
}
if (rc) break;
/* Save last validated path. */
ldn = dpath;
ldnlen = dnlen;
}
dnlFreeIterator(dnli);
return rc;
}
| 1
|
512,905
|
void Gfx::opSetFillCMYKColor(Object args[], int numArgs) {
GfxColor color;
int i;
if (textHaveCSPattern && drawText) {
GBool needFill = out->deviceHasTextClip(state);
out->endTextObject(state);
if (needFill) {
doPatternFill(gTrue);
}
out->restoreState(state);
}
state->setFillPattern(NULL);
state->setFillColorSpace(new GfxDeviceCMYKColorSpace());
out->updateFillColorSpace(state);
for (i = 0; i < 4; ++i) {
color.c[i] = dblToCol(args[i].getNum());
}
state->setFillColor(&color);
out->updateFillColor(state);
if (textHaveCSPattern) {
out->beginTextObject(state);
out->updateRender(state);
out->updateTextMat(state);
out->updateTextPos(state);
textHaveCSPattern = gFalse;
}
}
| 0
|
278,941
|
static inline void ohci_set_interrupt(OHCIState *ohci, uint32_t intr)
{
ohci->intr_status |= intr;
ohci_intr_update(ohci);
}
| 0
|
19,833
|
static cmsBool ReadOneElem ( cmsIOHANDLER * io , _cmsDICelem * e , cmsUInt32Number i , cmsUInt32Number BaseOffset ) {
if ( ! _cmsReadUInt32Number ( io , & e -> Offsets [ i ] ) ) return FALSE ;
if ( ! _cmsReadUInt32Number ( io , & e -> Sizes [ i ] ) ) return FALSE ;
if ( e -> Offsets [ i ] > 0 ) e -> Offsets [ i ] += BaseOffset ;
return TRUE ;
}
| 0
|
120,239
|
njs_generate_function_expression(njs_vm_t *vm, njs_generator_t *generator,
njs_parser_node_t *node)
{
njs_int_t ret;
njs_variable_t *var;
njs_function_lambda_t *lambda;
njs_vmcode_function_t *function;
const njs_lexer_entry_t *lex_entry;
var = njs_variable_reference(vm, node->left);
if (njs_slow_path(var == NULL)) {
ret = njs_generate_reference_error(vm, generator, node->left);
if (njs_slow_path(ret != NJS_OK)) {
return ret;
}
return njs_generator_stack_pop(vm, generator, NULL);
}
lambda = node->u.value.data.u.lambda;
lex_entry = njs_lexer_entry(var->unique_id);
if (njs_slow_path(lex_entry == NULL)) {
return NJS_ERROR;
}
ret = njs_generate_function_scope(vm, generator, lambda, node,
&lex_entry->name);
if (njs_slow_path(ret != NJS_OK)) {
return ret;
}
njs_generate_code(generator, njs_vmcode_function_t, function,
NJS_VMCODE_FUNCTION, 1, node);
function->lambda = lambda;
function->async = (node->token_type == NJS_TOKEN_ASYNC_FUNCTION_EXPRESSION);
node->index = njs_generate_object_dest_index(vm, generator, node);
if (njs_slow_path(node->index == NJS_INDEX_ERROR)) {
return NJS_ERROR;
}
function->retval = node->index;
return njs_generator_stack_pop(vm, generator, NULL);
}
| 0
|
329,580
|
static void qxl_render_update_area_unlocked(PCIQXLDevice *qxl)
{
VGACommonState *vga = &qxl->vga;
DisplaySurface *surface;
int i;
if (qxl->guest_primary.resized) {
qxl->guest_primary.resized = 0;
qxl->guest_primary.data = qxl_phys2virt(qxl,
qxl->guest_primary.surface.mem,
MEMSLOT_GROUP_GUEST);
if (!qxl->guest_primary.data) {
return;
}
qxl_set_rect_to_surface(qxl, &qxl->dirty[0]);
qxl->num_dirty_rects = 1;
trace_qxl_render_guest_primary_resized(
qxl->guest_primary.surface.width,
qxl->guest_primary.surface.height,
qxl->guest_primary.qxl_stride,
qxl->guest_primary.bytes_pp,
qxl->guest_primary.bits_pp);
if (qxl->guest_primary.qxl_stride > 0) {
surface = qemu_create_displaysurface_from
(qxl->guest_primary.surface.width,
qxl->guest_primary.surface.height,
qxl->guest_primary.bits_pp,
qxl->guest_primary.abs_stride,
qxl->guest_primary.data,
false);
} else {
surface = qemu_create_displaysurface
(qxl->guest_primary.surface.width,
qxl->guest_primary.surface.height);
}
dpy_gfx_replace_surface(vga->con, surface);
}
if (!qxl->guest_primary.data) {
return;
}
for (i = 0; i < qxl->num_dirty_rects; i++) {
if (qemu_spice_rect_is_empty(qxl->dirty+i)) {
break;
}
if (qxl->dirty[i].left > qxl->dirty[i].right ||
qxl->dirty[i].top > qxl->dirty[i].bottom ||
qxl->dirty[i].right > qxl->guest_primary.surface.width ||
qxl->dirty[i].bottom > qxl->guest_primary.surface.height) {
continue;
}
qxl_blit(qxl, qxl->dirty+i);
dpy_gfx_update(vga->con,
qxl->dirty[i].left, qxl->dirty[i].top,
qxl->dirty[i].right - qxl->dirty[i].left,
qxl->dirty[i].bottom - qxl->dirty[i].top);
}
qxl->num_dirty_rects = 0;
}
| 1
|
168,503
|
bool RenderLayerCompositor::requiresCompositingForTransform(RenderObject* renderer) const
{
if (!(m_compositingTriggers & ChromeClient::ThreeDTransformTrigger))
return false;
RenderStyle* style = renderer->style();
return renderer->hasTransform() && style->transform().has3DOperation();
}
| 0
|
108,815
|
~KeyedDenseTensorColumn() override {}
| 0
|
335,745
|
static int is_intra_more_likely(ERContext *s)
{
int is_intra_likely, i, j, undamaged_count, skip_amount, mb_x, mb_y;
if (!s->last_pic.f || !s->last_pic.f->data[0])
return 1; // no previous frame available -> use spatial prediction
undamaged_count = 0;
for (i = 0; i < s->mb_num; i++) {
const int mb_xy = s->mb_index2xy[i];
const int error = s->error_status_table[mb_xy];
if (!((error & ER_DC_ERROR) && (error & ER_MV_ERROR)))
undamaged_count++;
}
if (s->avctx->codec_id == AV_CODEC_ID_H264 && s->ref_count <= 0)
return 1;
if (undamaged_count < 5)
return 0; // almost all MBs damaged -> use temporal prediction
#if FF_API_XVMC
FF_DISABLE_DEPRECATION_WARNINGS
// prevent dsp.sad() check, that requires access to the image
if (CONFIG_MPEG_XVMC_DECODER &&
s->avctx->xvmc_acceleration &&
s->cur_pic.f->pict_type == AV_PICTURE_TYPE_I)
return 1;
FF_ENABLE_DEPRECATION_WARNINGS
#endif /* FF_API_XVMC */
skip_amount = FFMAX(undamaged_count / 50, 1); // check only up to 50 MBs
is_intra_likely = 0;
j = 0;
for (mb_y = 0; mb_y < s->mb_height - 1; mb_y++) {
for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
int error;
const int mb_xy = mb_x + mb_y * s->mb_stride;
error = s->error_status_table[mb_xy];
if ((error & ER_DC_ERROR) && (error & ER_MV_ERROR))
continue; // skip damaged
j++;
// skip a few to speed things up
if ((j % skip_amount) != 0)
continue;
if (s->cur_pic.f->pict_type == AV_PICTURE_TYPE_I) {
int *linesize = s->cur_pic.f->linesize;
uint8_t *mb_ptr = s->cur_pic.f->data[0] +
mb_x * 16 + mb_y * 16 * linesize[0];
uint8_t *last_mb_ptr = s->last_pic.f->data[0] +
mb_x * 16 + mb_y * 16 * linesize[0];
if (s->avctx->codec_id == AV_CODEC_ID_H264) {
// FIXME
} else {
ff_thread_await_progress(s->last_pic.tf, mb_y, 0);
}
is_intra_likely += s->mecc->sad[0](NULL, last_mb_ptr, mb_ptr,
linesize[0], 16);
is_intra_likely -= s->mecc->sad[0](NULL, last_mb_ptr,
last_mb_ptr + linesize[0] * 16,
linesize[0], 16);
} else {
if (IS_INTRA(s->cur_pic.mb_type[mb_xy]))
is_intra_likely++;
else
is_intra_likely--;
}
}
}
return is_intra_likely > 0;
}
| 0
|
402,356
|
static void ehci_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
k->realize = usb_ehci_pci_realize;
k->exit = usb_ehci_pci_exit;
k->class_id = PCI_CLASS_SERIAL_USB;
k->config_write = usb_ehci_pci_write_config;
dc->vmsd = &vmstate_ehci_pci;
dc->props = ehci_pci_properties;
dc->reset = usb_ehci_pci_reset;
}
| 0
|
288,911
|
static void Type_LUTA2B_Free ( struct _cms_typehandler_struct * self , void * Ptr ) {
cmsPipelineFree ( ( cmsPipeline * ) Ptr ) ;
return ;
cmsUNUSED_PARAMETER ( self ) ;
}
| 0
|
400,324
|
void am_cache_delete(server_rec *s, am_cache_entry_t *cache)
{
/* We write a null-byte at the beginning of the key to
* mark this slot as unused.
*/
cache->key[0] = '\0';
/* Unlock the entry. */
am_cache_unlock(s, cache);
}
| 0
|
244,099
|
void InputDispatcher::drainInboundQueueLocked() {
while (! mInboundQueue.isEmpty()) {
EventEntry* entry = mInboundQueue.dequeueAtHead();
releaseInboundEventLocked(entry);
}
traceInboundQueueLengthLocked();
}
| 0
|
245,148
|
RTCPeerConnectionHandler::~RTCPeerConnectionHandler() {
DCHECK(task_runner_->RunsTasksInCurrentSequence());
Stop();
GetPeerConnectionHandlers()->erase(this);
if (peer_connection_tracker_)
peer_connection_tracker_->UnregisterPeerConnection(this);
UMA_HISTOGRAM_COUNTS_10000(
"WebRTC.NumDataChannelsPerPeerConnection", num_data_channels_created_);
}
| 0
|
80,545
|
static int OGRExpatUnknownEncodingHandler(
void * /* unused_encodingHandlerData */,
const XML_Char *name,
XML_Encoding *info )
{
if( EQUAL(name, "WINDOWS-1252") )
FillWINDOWS1252(info);
else if( EQUAL(name, "ISO-8859-15") )
FillISO885915(info);
else
{
CPLDebug("OGR", "Unhandled encoding %s", name);
return XML_STATUS_ERROR;
}
info->data = nullptr;
info->convert = nullptr;
info->release = nullptr;
return XML_STATUS_OK;
}
| 0
|
353,933
|
reexec_in_user_namespace (int ready, char *pause_pid_file_path, char *file_to_read, int outputfd)
{
int ret;
pid_t pid;
char b;
pid_t ppid = getpid ();
char **argv;
char uid[16];
char gid[16];
char *listen_fds = NULL;
char *listen_pid = NULL;
bool do_socket_activation = false;
char *cwd = getcwd (NULL, 0);
sigset_t sigset, oldsigset;
if (cwd == NULL)
{
fprintf (stderr, "error getting current working directory: %s\n", strerror (errno));
_exit (EXIT_FAILURE);
}
listen_pid = getenv("LISTEN_PID");
listen_fds = getenv("LISTEN_FDS");
if (listen_pid != NULL && listen_fds != NULL)
{
if (strtol(listen_pid, NULL, 10) == getpid())
do_socket_activation = true;
}
sprintf (uid, "%d", geteuid ());
sprintf (gid, "%d", getegid ());
pid = syscall_clone (CLONE_NEWUSER|CLONE_NEWNS|SIGCHLD, NULL);
if (pid < 0)
{
fprintf (stderr, "cannot clone: %s\n", strerror (errno));
check_proc_sys_userns_file (_max_user_namespaces);
check_proc_sys_userns_file (_unprivileged_user_namespaces);
}
if (pid)
{
if (do_socket_activation)
{
long num_fds;
num_fds = strtol (listen_fds, NULL, 10);
if (num_fds != LONG_MIN && num_fds != LONG_MAX)
{
long i;
for (i = 3; i < num_fds + 3; i++)
if (FD_ISSET (i, &open_files_set))
close (i);
}
unsetenv ("LISTEN_PID");
unsetenv ("LISTEN_FDS");
unsetenv ("LISTEN_FDNAMES");
}
return pid;
}
if (sigfillset (&sigset) < 0)
{
fprintf (stderr, "cannot fill sigset: %s\n", strerror (errno));
_exit (EXIT_FAILURE);
}
if (sigdelset (&sigset, SIGCHLD) < 0)
{
fprintf (stderr, "cannot sigdelset(SIGCHLD): %s\n", strerror (errno));
_exit (EXIT_FAILURE);
}
if (sigdelset (&sigset, SIGTERM) < 0)
{
fprintf (stderr, "cannot sigdelset(SIGTERM): %s\n", strerror (errno));
_exit (EXIT_FAILURE);
}
if (sigprocmask (SIG_BLOCK, &sigset, &oldsigset) < 0)
{
fprintf (stderr, "cannot block signals: %s\n", strerror (errno));
_exit (EXIT_FAILURE);
}
argv = get_cmd_line_args (ppid);
if (argv == NULL)
{
fprintf (stderr, "cannot read argv: %s\n", strerror (errno));
_exit (EXIT_FAILURE);
}
if (do_socket_activation)
{
char s[32];
sprintf (s, "%d", getpid());
setenv ("LISTEN_PID", s, true);
}
setenv ("_CONTAINERS_USERNS_CONFIGURED", "init", 1);
setenv ("_CONTAINERS_ROOTLESS_UID", uid, 1);
setenv ("_CONTAINERS_ROOTLESS_GID", gid, 1);
ret = TEMP_FAILURE_RETRY (read (ready, &b, 1));
if (ret < 0)
{
fprintf (stderr, "cannot read from sync pipe: %s\n", strerror (errno));
_exit (EXIT_FAILURE);
}
if (b != '0')
_exit (EXIT_FAILURE);
if (syscall_setresgid (0, 0, 0) < 0)
{
fprintf (stderr, "cannot setresgid: %s\n", strerror (errno));
TEMP_FAILURE_RETRY (write (ready, "1", 1));
_exit (EXIT_FAILURE);
}
if (syscall_setresuid (0, 0, 0) < 0)
{
fprintf (stderr, "cannot setresuid: %s\n", strerror (errno));
TEMP_FAILURE_RETRY (write (ready, "1", 1));
_exit (EXIT_FAILURE);
}
if (chdir (cwd) < 0)
{
fprintf (stderr, "cannot chdir: %s\n", strerror (errno));
TEMP_FAILURE_RETRY (write (ready, "1", 1));
_exit (EXIT_FAILURE);
}
free (cwd);
if (pause_pid_file_path && pause_pid_file_path[0] != '\0')
{
if (create_pause_process (pause_pid_file_path, argv) < 0)
{
TEMP_FAILURE_RETRY (write (ready, "2", 1));
_exit (EXIT_FAILURE);
}
}
ret = TEMP_FAILURE_RETRY (write (ready, "0", 1));
if (ret < 0)
{
fprintf (stderr, "cannot write to ready pipe: %s\n", strerror (errno));
_exit (EXIT_FAILURE);
}
close (ready);
if (sigprocmask (SIG_SETMASK, &oldsigset, NULL) < 0)
{
fprintf (stderr, "cannot block signals: %s\n", strerror (errno));
_exit (EXIT_FAILURE);
}
if (file_to_read && file_to_read[0])
{
ret = copy_file_to_fd (file_to_read, outputfd);
close (outputfd);
_exit (ret == 0 ? EXIT_SUCCESS : EXIT_FAILURE);
}
execvp (argv[0], argv);
_exit (EXIT_FAILURE);
}
| 1
|
408,792
|
query (void)
{
static const GimpParamDef load_args[] =
{
{ GIMP_PDB_INT32, "run-mode", "The run mode { RUN-INTERACTIVE (0), RUN-NONINTERACTIVE (1) }" },
{ GIMP_PDB_STRING, "filename", "The name of the file to load" },
{ GIMP_PDB_STRING, "raw-filename", "The name of the file to load" }
};
static const GimpParamDef load_return_vals[] =
{
{ GIMP_PDB_IMAGE, "image", "Output image" }
};
#if 0
static const GimpParamDef save_args[] =
{
{ GIMP_PDB_INT32, "run-mode", "The run mode { RUN-INTERACTIVE (0), RUN-NONINTERACTIVE (1) }" },
{ GIMP_PDB_IMAGE, "image", "Input image" },
{ GIMP_PDB_DRAWABLE, "drawable", "Drawable to export" },
{ GIMP_PDB_STRING, "filename", "The name of the file to export the image in" },
{ GIMP_PDB_STRING, "raw-filename", "The name of the file to export the image in" },
{ GIMP_PDB_INT32, "compression", "Specify 0 for no compression, 1 for RLE, and 2 for LZ77" }
};
#endif
gimp_install_procedure (LOAD_PROC,
"loads images from the Paint Shop Pro PSP file format",
"This plug-in loads and exports images in "
"Paint Shop Pro's native PSP format. "
"Vector layers aren't handled. Exporting isn't "
"yet implemented.",
"Tor Lillqvist",
"Tor Lillqvist",
"1999",
N_("Paint Shop Pro image"),
NULL,
GIMP_PLUGIN,
G_N_ELEMENTS (load_args),
G_N_ELEMENTS (load_return_vals),
load_args, load_return_vals);
gimp_register_file_handler_mime (LOAD_PROC, "image/x-psp");
gimp_register_magic_load_handler (LOAD_PROC,
"psp,tub,pspimage",
"",
"0,string,Paint\\040Shop\\040Pro\\040Image\\040File\n\032");
/* commented out until exporting is implemented */
#if 0
gimp_install_procedure (SAVE_PROC,
"exports images in the Paint Shop Pro PSP file format",
"This plug-in loads and exports images in "
"Paint Shop Pro's native PSP format. "
"Vector layers aren't handled. Exporting isn't "
"yet implemented.",
"Tor Lillqvist",
"Tor Lillqvist",
"1999",
N_("Paint Shop Pro image"),
"RGB*, GRAY*, INDEXED*",
GIMP_PLUGIN,
G_N_ELEMENTS (save_args), 0,
save_args, NULL);
gimp_register_save_handler (SAVE_PROC, "psp,tub", "");
#endif
}
| 0
|
393,582
|
xmlFACompareRanges(xmlRegRangePtr range1, xmlRegRangePtr range2) {
int ret = 0;
if ((range1->type == XML_REGEXP_RANGES) ||
(range2->type == XML_REGEXP_RANGES) ||
(range2->type == XML_REGEXP_SUBREG) ||
(range1->type == XML_REGEXP_SUBREG) ||
(range1->type == XML_REGEXP_STRING) ||
(range2->type == XML_REGEXP_STRING))
return(-1);
/* put them in order */
if (range1->type > range2->type) {
xmlRegRangePtr tmp;
tmp = range1;
range1 = range2;
range2 = tmp;
}
if ((range1->type == XML_REGEXP_ANYCHAR) ||
(range2->type == XML_REGEXP_ANYCHAR)) {
ret = 1;
} else if ((range1->type == XML_REGEXP_EPSILON) ||
(range2->type == XML_REGEXP_EPSILON)) {
return(0);
} else if (range1->type == range2->type) {
if (range1->type != XML_REGEXP_CHARVAL)
ret = 1;
else if ((range1->end < range2->start) ||
(range2->end < range1->start))
ret = 0;
else
ret = 1;
} else if (range1->type == XML_REGEXP_CHARVAL) {
int codepoint;
int neg = 0;
/*
* just check all codepoints in the range for acceptance,
* this is usually way cheaper since done only once at
* compilation than testing over and over at runtime or
* pushing too many states when evaluating.
*/
if (((range1->neg == 0) && (range2->neg != 0)) ||
((range1->neg != 0) && (range2->neg == 0)))
neg = 1;
for (codepoint = range1->start;codepoint <= range1->end ;codepoint++) {
ret = xmlRegCheckCharacterRange(range2->type, codepoint,
0, range2->start, range2->end,
range2->blockName);
if (ret < 0)
return(-1);
if (((neg == 1) && (ret == 0)) ||
((neg == 0) && (ret == 1)))
return(1);
}
return(0);
} else if ((range1->type == XML_REGEXP_BLOCK_NAME) ||
(range2->type == XML_REGEXP_BLOCK_NAME)) {
if (range1->type == range2->type) {
ret = xmlStrEqual(range1->blockName, range2->blockName);
} else {
/*
* comparing a block range with anything else is way
* too costly, and maintining the table is like too much
* memory too, so let's force the automata to save state
* here.
*/
return(1);
}
} else if ((range1->type < XML_REGEXP_LETTER) ||
(range2->type < XML_REGEXP_LETTER)) {
if ((range1->type == XML_REGEXP_ANYSPACE) &&
(range2->type == XML_REGEXP_NOTSPACE))
ret = 0;
else if ((range1->type == XML_REGEXP_INITNAME) &&
(range2->type == XML_REGEXP_NOTINITNAME))
ret = 0;
else if ((range1->type == XML_REGEXP_NAMECHAR) &&
(range2->type == XML_REGEXP_NOTNAMECHAR))
ret = 0;
else if ((range1->type == XML_REGEXP_DECIMAL) &&
(range2->type == XML_REGEXP_NOTDECIMAL))
ret = 0;
else if ((range1->type == XML_REGEXP_REALCHAR) &&
(range2->type == XML_REGEXP_NOTREALCHAR))
ret = 0;
else {
/* same thing to limit complexity */
return(1);
}
} else {
ret = 0;
/* range1->type < range2->type here */
switch (range1->type) {
case XML_REGEXP_LETTER:
/* all disjoint except in the subgroups */
if ((range2->type == XML_REGEXP_LETTER_UPPERCASE) ||
(range2->type == XML_REGEXP_LETTER_LOWERCASE) ||
(range2->type == XML_REGEXP_LETTER_TITLECASE) ||
(range2->type == XML_REGEXP_LETTER_MODIFIER) ||
(range2->type == XML_REGEXP_LETTER_OTHERS))
ret = 1;
break;
case XML_REGEXP_MARK:
if ((range2->type == XML_REGEXP_MARK_NONSPACING) ||
(range2->type == XML_REGEXP_MARK_SPACECOMBINING) ||
(range2->type == XML_REGEXP_MARK_ENCLOSING))
ret = 1;
break;
case XML_REGEXP_NUMBER:
if ((range2->type == XML_REGEXP_NUMBER_DECIMAL) ||
(range2->type == XML_REGEXP_NUMBER_LETTER) ||
(range2->type == XML_REGEXP_NUMBER_OTHERS))
ret = 1;
break;
case XML_REGEXP_PUNCT:
if ((range2->type == XML_REGEXP_PUNCT_CONNECTOR) ||
(range2->type == XML_REGEXP_PUNCT_DASH) ||
(range2->type == XML_REGEXP_PUNCT_OPEN) ||
(range2->type == XML_REGEXP_PUNCT_CLOSE) ||
(range2->type == XML_REGEXP_PUNCT_INITQUOTE) ||
(range2->type == XML_REGEXP_PUNCT_FINQUOTE) ||
(range2->type == XML_REGEXP_PUNCT_OTHERS))
ret = 1;
break;
case XML_REGEXP_SEPAR:
if ((range2->type == XML_REGEXP_SEPAR_SPACE) ||
(range2->type == XML_REGEXP_SEPAR_LINE) ||
(range2->type == XML_REGEXP_SEPAR_PARA))
ret = 1;
break;
case XML_REGEXP_SYMBOL:
if ((range2->type == XML_REGEXP_SYMBOL_MATH) ||
(range2->type == XML_REGEXP_SYMBOL_CURRENCY) ||
(range2->type == XML_REGEXP_SYMBOL_MODIFIER) ||
(range2->type == XML_REGEXP_SYMBOL_OTHERS))
ret = 1;
break;
case XML_REGEXP_OTHER:
if ((range2->type == XML_REGEXP_OTHER_CONTROL) ||
(range2->type == XML_REGEXP_OTHER_FORMAT) ||
(range2->type == XML_REGEXP_OTHER_PRIVATE))
ret = 1;
break;
default:
if ((range2->type >= XML_REGEXP_LETTER) &&
(range2->type < XML_REGEXP_BLOCK_NAME))
ret = 0;
else {
/* safety net ! */
return(1);
}
}
}
if (((range1->neg == 0) && (range2->neg != 0)) ||
((range1->neg != 0) && (range2->neg == 0)))
ret = !ret;
return(ret);
}
| 0
|
461,113
|
static char* sdl_deserialize_string(char **in)
{
char *s;
int len;
WSDL_CACHE_GET_INT(len, in);
if (len == WSDL_NO_STRING_MARKER) {
return NULL;
} else {
s = emalloc(len+1);
WSDL_CACHE_GET_N(s, len, in);
s[len] = '\0';
return s;
}
}
| 0
|
9,796
|
DirectoryEntrySync* DirectoryEntrySync::getDirectory(const String& path, const Dictionary& options, ExceptionState& exceptionState)
{
FileSystemFlags flags(options);
RefPtr<EntrySyncCallbackHelper> helper = EntrySyncCallbackHelper::create();
m_fileSystem->getDirectory(this, path, flags, helper->successCallback(), helper->errorCallback(), DOMFileSystemBase::Synchronous);
return static_cast<DirectoryEntrySync*>(helper->getResult(exceptionState));
}
| 1
|
214,163
|
explicit CloseWindowTask(Browser* browser) : browser_(browser) {}
| 0
|
360,026
|
find_monitor (NautilusDirectory *directory,
NautilusFile *file,
gconstpointer client)
{
Monitor monitor;
monitor.client = client;
monitor.file = file;
return g_list_find_custom (directory->details->monitor_list,
&monitor,
monitor_key_compare);
}
| 0
|
302,529
|
header_cache_t *nntp_hcache_open(struct NntpData *nntp_data)
{
struct Url url;
char file[PATH_MAX];
if (!nntp_data->nserv || !nntp_data->nserv->cacheable ||
!nntp_data->nserv->conn || !nntp_data->group ||
!(nntp_data->newsrc_ent || nntp_data->subscribed || SaveUnsubscribed))
{
return NULL;
}
mutt_account_tourl(&nntp_data->nserv->conn->account, &url);
url.path = nntp_data->group;
url_tostring(&url, file, sizeof(file), U_PATH);
return mutt_hcache_open(NewsCacheDir, file, nntp_hcache_namer);
}
| 0
|
115,848
|
static int ext4_journalled_write_end(struct file *file,
struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata)
{
handle_t *handle = ext4_journal_current_handle();
struct inode *inode = mapping->host;
loff_t old_size = inode->i_size;
int ret = 0, ret2;
int partial = 0;
unsigned from, to;
int size_changed = 0;
trace_ext4_journalled_write_end(inode, pos, len, copied);
from = pos & (PAGE_SIZE - 1);
to = from + len;
BUG_ON(!ext4_handle_valid(handle));
if (ext4_has_inline_data(inode))
copied = ext4_write_inline_data_end(inode, pos, len,
copied, page);
else {
if (copied < len) {
if (!PageUptodate(page))
copied = 0;
zero_new_buffers(page, from+copied, to);
}
ret = ext4_walk_page_buffers(handle, page_buffers(page), from,
to, &partial, write_end_fn);
if (!partial)
SetPageUptodate(page);
}
size_changed = ext4_update_inode_size(inode, pos + copied);
ext4_set_inode_state(inode, EXT4_STATE_JDATA);
EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
unlock_page(page);
put_page(page);
if (old_size < pos)
pagecache_isize_extended(inode, old_size, pos);
if (size_changed) {
ret2 = ext4_mark_inode_dirty(handle, inode);
if (!ret)
ret = ret2;
}
if (pos + len > inode->i_size && ext4_can_truncate(inode))
/* if we have allocated more blocks and copied
* less. We will have blocks allocated outside
* inode->i_size. So truncate them
*/
ext4_orphan_add(handle, inode);
ret2 = ext4_journal_stop(handle);
if (!ret)
ret = ret2;
if (pos + len > inode->i_size) {
ext4_truncate_failed_write(inode);
/*
* If truncate failed early the inode might still be
* on the orphan list; we need to make sure the inode
* is removed from the orphan list in that case.
*/
if (inode->i_nlink)
ext4_orphan_del(NULL, inode);
}
return ret ? ret : copied;
}
| 0
|
355,590
|
init_sched_build_groups(const cpumask_t *span, const cpumask_t *cpu_map,
int (*group_fn)(int cpu, const cpumask_t *cpu_map,
struct sched_group **sg,
cpumask_t *tmpmask),
cpumask_t *covered, cpumask_t *tmpmask)
{
struct sched_group *first = NULL, *last = NULL;
int i;
cpus_clear(*covered);
for_each_cpu_mask(i, *span) {
struct sched_group *sg;
int group = group_fn(i, cpu_map, &sg, tmpmask);
int j;
if (cpu_isset(i, *covered))
continue;
cpus_clear(sg->cpumask);
sg->__cpu_power = 0;
for_each_cpu_mask(j, *span) {
if (group_fn(j, cpu_map, NULL, tmpmask) != group)
continue;
cpu_set(j, *covered);
cpu_set(j, sg->cpumask);
}
if (!first)
first = sg;
if (last)
last->next = sg;
last = sg;
}
last->next = first;
}
| 0
|
325,686
|
static int qxl_init_common(PCIQXLDevice *qxl)
{
uint8_t* config = qxl->pci.config;
uint32_t pci_device_id;
uint32_t pci_device_rev;
uint32_t io_size;
qxl->mode = QXL_MODE_UNDEFINED;
qxl->generation = 1;
qxl->num_memslots = NUM_MEMSLOTS;
qxl->num_surfaces = NUM_SURFACES;
switch (qxl->revision) {
case 1: /* spice 0.4 -- qxl-1 */
pci_device_id = QXL_DEVICE_ID_STABLE;
pci_device_rev = QXL_REVISION_STABLE_V04;
break;
case 2: /* spice 0.6 -- qxl-2 */
pci_device_id = QXL_DEVICE_ID_STABLE;
pci_device_rev = QXL_REVISION_STABLE_V06;
break;
default: /* experimental */
pci_device_id = QXL_DEVICE_ID_DEVEL;
pci_device_rev = 1;
break;
}
pci_config_set_vendor_id(config, REDHAT_PCI_VENDOR_ID);
pci_config_set_device_id(config, pci_device_id);
pci_set_byte(&config[PCI_REVISION_ID], pci_device_rev);
pci_set_byte(&config[PCI_INTERRUPT_PIN], 1);
qxl->rom_size = qxl_rom_size();
qxl->rom_offset = qemu_ram_alloc(&qxl->pci.qdev, "qxl.vrom", qxl->rom_size);
init_qxl_rom(qxl);
init_qxl_ram(qxl);
if (qxl->vram_size < 16 * 1024 * 1024) {
qxl->vram_size = 16 * 1024 * 1024;
}
if (qxl->revision == 1) {
qxl->vram_size = 4096;
}
qxl->vram_size = msb_mask(qxl->vram_size * 2 - 1);
qxl->vram_offset = qemu_ram_alloc(&qxl->pci.qdev, "qxl.vram", qxl->vram_size);
io_size = msb_mask(QXL_IO_RANGE_SIZE * 2 - 1);
if (qxl->revision == 1) {
io_size = 8;
}
pci_register_bar(&qxl->pci, QXL_IO_RANGE_INDEX,
io_size, PCI_BASE_ADDRESS_SPACE_IO, qxl_map);
pci_register_bar(&qxl->pci, QXL_ROM_RANGE_INDEX,
qxl->rom_size, PCI_BASE_ADDRESS_SPACE_MEMORY,
qxl_map);
pci_register_bar(&qxl->pci, QXL_RAM_RANGE_INDEX,
qxl->vga.vram_size, PCI_BASE_ADDRESS_SPACE_MEMORY,
qxl_map);
pci_register_bar(&qxl->pci, QXL_VRAM_RANGE_INDEX, qxl->vram_size,
PCI_BASE_ADDRESS_SPACE_MEMORY, qxl_map);
qxl->ssd.qxl.base.sif = &qxl_interface.base;
qxl->ssd.qxl.id = qxl->id;
qemu_spice_add_interface(&qxl->ssd.qxl.base);
qemu_add_vm_change_state_handler(qxl_vm_change_state_handler, qxl);
init_pipe_signaling(qxl);
qxl_reset_state(qxl);
return 0;
}
| 0
|
96,106
|
test_ctrl_queue(struct usbtest_dev *dev, struct usbtest_param_32 *param)
{
struct usb_device *udev = testdev_to_usbdev(dev);
struct urb **urb;
struct ctrl_ctx context;
int i;
if (param->sglen == 0 || param->iterations > UINT_MAX / param->sglen)
return -EOPNOTSUPP;
spin_lock_init(&context.lock);
context.dev = dev;
init_completion(&context.complete);
context.count = param->sglen * param->iterations;
context.pending = 0;
context.status = -ENOMEM;
context.param = param;
context.last = -1;
/* allocate and init the urbs we'll queue.
* as with bulk/intr sglists, sglen is the queue depth; it also
* controls which subtests run (more tests than sglen) or rerun.
*/
urb = kcalloc(param->sglen, sizeof(struct urb *), GFP_KERNEL);
if (!urb)
return -ENOMEM;
for (i = 0; i < param->sglen; i++) {
int pipe = usb_rcvctrlpipe(udev, 0);
unsigned len;
struct urb *u;
struct usb_ctrlrequest req;
struct subcase *reqp;
/* sign of this variable means:
* -: tested code must return this (negative) error code
* +: tested code may return this (negative too) error code
*/
int expected = 0;
/* requests here are mostly expected to succeed on any
* device, but some are chosen to trigger protocol stalls
* or short reads.
*/
memset(&req, 0, sizeof(req));
req.bRequest = USB_REQ_GET_DESCRIPTOR;
req.bRequestType = USB_DIR_IN|USB_RECIP_DEVICE;
switch (i % NUM_SUBCASES) {
case 0: /* get device descriptor */
req.wValue = cpu_to_le16(USB_DT_DEVICE << 8);
len = sizeof(struct usb_device_descriptor);
break;
case 1: /* get first config descriptor (only) */
req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
len = sizeof(struct usb_config_descriptor);
break;
case 2: /* get altsetting (OFTEN STALLS) */
req.bRequest = USB_REQ_GET_INTERFACE;
req.bRequestType = USB_DIR_IN|USB_RECIP_INTERFACE;
/* index = 0 means first interface */
len = 1;
expected = EPIPE;
break;
case 3: /* get interface status */
req.bRequest = USB_REQ_GET_STATUS;
req.bRequestType = USB_DIR_IN|USB_RECIP_INTERFACE;
/* interface 0 */
len = 2;
break;
case 4: /* get device status */
req.bRequest = USB_REQ_GET_STATUS;
req.bRequestType = USB_DIR_IN|USB_RECIP_DEVICE;
len = 2;
break;
case 5: /* get device qualifier (MAY STALL) */
req.wValue = cpu_to_le16 (USB_DT_DEVICE_QUALIFIER << 8);
len = sizeof(struct usb_qualifier_descriptor);
if (udev->speed != USB_SPEED_HIGH)
expected = EPIPE;
break;
case 6: /* get first config descriptor, plus interface */
req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
len = sizeof(struct usb_config_descriptor);
len += sizeof(struct usb_interface_descriptor);
break;
case 7: /* get interface descriptor (ALWAYS STALLS) */
req.wValue = cpu_to_le16 (USB_DT_INTERFACE << 8);
/* interface == 0 */
len = sizeof(struct usb_interface_descriptor);
expected = -EPIPE;
break;
/* NOTE: two consecutive stalls in the queue here.
* that tests fault recovery a bit more aggressively. */
case 8: /* clear endpoint halt (MAY STALL) */
req.bRequest = USB_REQ_CLEAR_FEATURE;
req.bRequestType = USB_RECIP_ENDPOINT;
/* wValue 0 == ep halt */
/* wIndex 0 == ep0 (shouldn't halt!) */
len = 0;
pipe = usb_sndctrlpipe(udev, 0);
expected = EPIPE;
break;
case 9: /* get endpoint status */
req.bRequest = USB_REQ_GET_STATUS;
req.bRequestType = USB_DIR_IN|USB_RECIP_ENDPOINT;
/* endpoint 0 */
len = 2;
break;
case 10: /* trigger short read (EREMOTEIO) */
req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
len = 1024;
expected = -EREMOTEIO;
break;
/* NOTE: two consecutive _different_ faults in the queue. */
case 11: /* get endpoint descriptor (ALWAYS STALLS) */
req.wValue = cpu_to_le16(USB_DT_ENDPOINT << 8);
/* endpoint == 0 */
len = sizeof(struct usb_interface_descriptor);
expected = EPIPE;
break;
/* NOTE: sometimes even a third fault in the queue! */
case 12: /* get string 0 descriptor (MAY STALL) */
req.wValue = cpu_to_le16(USB_DT_STRING << 8);
/* string == 0, for language IDs */
len = sizeof(struct usb_interface_descriptor);
/* may succeed when > 4 languages */
expected = EREMOTEIO; /* or EPIPE, if no strings */
break;
case 13: /* short read, resembling case 10 */
req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
/* last data packet "should" be DATA1, not DATA0 */
if (udev->speed == USB_SPEED_SUPER)
len = 1024 - 512;
else
len = 1024 - udev->descriptor.bMaxPacketSize0;
expected = -EREMOTEIO;
break;
case 14: /* short read; try to fill the last packet */
req.wValue = cpu_to_le16((USB_DT_DEVICE << 8) | 0);
/* device descriptor size == 18 bytes */
len = udev->descriptor.bMaxPacketSize0;
if (udev->speed == USB_SPEED_SUPER)
len = 512;
switch (len) {
case 8:
len = 24;
break;
case 16:
len = 32;
break;
}
expected = -EREMOTEIO;
break;
case 15:
req.wValue = cpu_to_le16(USB_DT_BOS << 8);
if (udev->bos)
len = le16_to_cpu(udev->bos->desc->wTotalLength);
else
len = sizeof(struct usb_bos_descriptor);
if (le16_to_cpu(udev->descriptor.bcdUSB) < 0x0201)
expected = -EPIPE;
break;
default:
ERROR(dev, "bogus number of ctrl queue testcases!\n");
context.status = -EINVAL;
goto cleanup;
}
req.wLength = cpu_to_le16(len);
urb[i] = u = simple_alloc_urb(udev, pipe, len, 0);
if (!u)
goto cleanup;
reqp = kmalloc(sizeof(*reqp), GFP_KERNEL);
if (!reqp)
goto cleanup;
reqp->setup = req;
reqp->number = i % NUM_SUBCASES;
reqp->expected = expected;
u->setup_packet = (char *) &reqp->setup;
u->context = &context;
u->complete = ctrl_complete;
}
/* queue the urbs */
context.urb = urb;
spin_lock_irq(&context.lock);
for (i = 0; i < param->sglen; i++) {
context.status = usb_submit_urb(urb[i], GFP_ATOMIC);
if (context.status != 0) {
ERROR(dev, "can't submit urb[%d], status %d\n",
i, context.status);
context.count = context.pending;
break;
}
context.pending++;
}
spin_unlock_irq(&context.lock);
/* FIXME set timer and time out; provide a disconnect hook */
/* wait for the last one to complete */
if (context.pending > 0)
wait_for_completion(&context.complete);
cleanup:
for (i = 0; i < param->sglen; i++) {
if (!urb[i])
continue;
urb[i]->dev = udev;
kfree(urb[i]->setup_packet);
simple_free_urb(urb[i]);
}
kfree(urb);
return context.status;
}
| 0
|
223,690
|
virtual void EnableWifiNetworkDevice(bool enable) {}
| 0
|
501,537
|
TEST_F(TcpHealthCheckerImplTest, WrongData) {
InSequence s;
setupDataDontReuseConnection();
cluster_->prioritySet().getMockHostSet(0)->hosts_ = {
makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())};
expectSessionCreate();
expectClientCreate();
EXPECT_CALL(*connection_, write(_, _));
EXPECT_CALL(*timeout_timer_, enableTimer(_, _));
health_checker_->start();
connection_->raiseEvent(Network::ConnectionEvent::Connected);
// Not the expected response
Buffer::OwnedImpl response;
addUint8(response, 3);
read_filter_->onData(response, false);
// These are the expected metric results after testing.
EXPECT_EQ(0UL, cluster_->info_->stats_store_.counter("health_check.success").value());
// TODO(lilika): The TCP health checker does generic pattern matching so we can't differentiate
// between wrong data and not enough data. We could likely do better here and figure out cases in
// which a match is not possible but that is not done now.
EXPECT_EQ(0UL, cluster_->info_->stats_store_.counter("health_check.failure").value());
}
| 0
|
295,777
|
support_nvme_encapsulation_show(struct device_driver *dd, char *buf)
{
return sprintf(buf, "%u\n", support_nvme_encapsulation);
}
| 0
|
67,951
|
void __fastcall TSCPFileSystem::Source(
TLocalFileHandle & /*Handle*/, const UnicodeString & /*TargetDir*/, UnicodeString & /*DestFileName*/,
const TCopyParamType * /*CopyParam*/, int /*Params*/,
TFileOperationProgressType * /*OperationProgress*/, unsigned int /*Flags*/,
TUploadSessionAction & /*Action*/, bool & /*ChildError*/)
{
DebugFail();
}
| 0
|
193,673
|
void BrowserView::CreateLauncherIcon() {
#if defined(USE_ASH)
if (chrome::IsNativeWindowInAsh(GetNativeWindow()) &&
!launcher_item_controller_.get()) {
launcher_item_controller_.reset(
BrowserLauncherItemController::Create(browser_.get()));
}
#endif // defined(USE_ASH)
}
| 0
|
506,605
|
void SSL_free(SSL *s)
{
int i;
if(s == NULL)
return;
i=CRYPTO_add(&s->references,-1,CRYPTO_LOCK_SSL);
#ifdef REF_PRINT
REF_PRINT("SSL",s);
#endif
if (i > 0) return;
#ifdef REF_CHECK
if (i < 0)
{
fprintf(stderr,"SSL_free, bad reference count\n");
abort(); /* ok */
}
#endif
if (s->param)
X509_VERIFY_PARAM_free(s->param);
CRYPTO_free_ex_data(CRYPTO_EX_INDEX_SSL, s, &s->ex_data);
if (s->bbio != NULL)
{
/* If the buffering BIO is in place, pop it off */
if (s->bbio == s->wbio)
{
s->wbio=BIO_pop(s->wbio);
}
BIO_free(s->bbio);
s->bbio=NULL;
}
if (s->rbio != NULL)
BIO_free_all(s->rbio);
if ((s->wbio != NULL) && (s->wbio != s->rbio))
BIO_free_all(s->wbio);
if (s->init_buf != NULL) BUF_MEM_free(s->init_buf);
/* add extra stuff */
if (s->cipher_list != NULL) sk_SSL_CIPHER_free(s->cipher_list);
if (s->cipher_list_by_id != NULL) sk_SSL_CIPHER_free(s->cipher_list_by_id);
/* Make the next call work :-) */
if (s->session != NULL)
{
ssl_clear_bad_session(s);
SSL_SESSION_free(s->session);
}
ssl_clear_cipher_ctx(s);
ssl_clear_hash_ctx(&s->read_hash);
ssl_clear_hash_ctx(&s->write_hash);
if (s->cert != NULL) ssl_cert_free(s->cert);
/* Free up if allocated */
#ifndef OPENSSL_NO_TLSEXT
if (s->tlsext_hostname)
OPENSSL_free(s->tlsext_hostname);
if (s->initial_ctx) SSL_CTX_free(s->initial_ctx);
#ifndef OPENSSL_NO_EC
if (s->tlsext_ecpointformatlist) OPENSSL_free(s->tlsext_ecpointformatlist);
if (s->tlsext_ellipticcurvelist) OPENSSL_free(s->tlsext_ellipticcurvelist);
#endif /* OPENSSL_NO_EC */
if (s->tlsext_opaque_prf_input) OPENSSL_free(s->tlsext_opaque_prf_input);
if (s->tlsext_ocsp_exts)
sk_X509_EXTENSION_pop_free(s->tlsext_ocsp_exts,
X509_EXTENSION_free);
if (s->tlsext_ocsp_ids)
sk_OCSP_RESPID_pop_free(s->tlsext_ocsp_ids, OCSP_RESPID_free);
if (s->tlsext_ocsp_resp)
OPENSSL_free(s->tlsext_ocsp_resp);
#endif
if (s->client_CA != NULL)
sk_X509_NAME_pop_free(s->client_CA,X509_NAME_free);
if (s->method != NULL) s->method->ssl_free(s);
if (s->ctx) SSL_CTX_free(s->ctx);
#ifndef OPENSSL_NO_KRB5
if (s->kssl_ctx != NULL)
kssl_ctx_free(s->kssl_ctx);
#endif /* OPENSSL_NO_KRB5 */
#if !defined(OPENSSL_NO_TLSEXT) && !defined(OPENSSL_NO_NEXTPROTONEG)
if (s->next_proto_negotiated)
OPENSSL_free(s->next_proto_negotiated);
#endif
OPENSSL_free(s);
}
| 0
|
65,501
|
Status ValidateInput(const OpInputList& indices_list_in,
const OpInputList& values_list_in,
const OpInputList& shapes_list_in,
const OpInputList& dense_list_in,
const DataType& internal_type) {
const auto size = indices_list_in.size();
// Only perform internal_type check for SparseCrossOp.
// Check if the internal_type is not invalid before doing so.
bool check_type = internal_type != DT_INVALID;
// Validates indices_list_in OpInputList.
for (int i = 0; i < size; i++) {
if (check_type && indices_list_in[i].dtype() != DT_INT64) {
return errors::InvalidArgument("Input indices should be of type ",
DT_INT64, " but received ",
indices_list_in[i].dtype());
}
if (!TensorShapeUtils::IsMatrix(indices_list_in[i].shape())) {
return errors::InvalidArgument(
"Input indices should be a matrix but received shape ",
indices_list_in[i].shape().DebugString(), " at position ", i);
}
if (indices_list_in[i].shape().dim_size(1) != 2) {
return errors::InvalidArgument("Expected D2 of index to be 2 got ",
indices_list_in[i].shape().dim_size(1),
" at position ", i);
}
}
// Validates values_list_in OpInputList.
if (values_list_in.size() != size) {
return errors::InvalidArgument("Expected ", size, " input values, got ",
values_list_in.size());
}
for (int i = 0; i < size; i++) {
// Make sure to avoid the expected type to be string, but input values to be
// int64.
if (check_type && internal_type == DT_STRING &&
values_list_in[i].dtype() == DT_INT64) {
return errors::InvalidArgument("Input values should be of internal type ",
internal_type, " but received ",
values_list_in[i].dtype());
}
if (!TensorShapeUtils::IsVector(values_list_in[i].shape())) {
return errors::InvalidArgument(
"Input values should be a vector but received shape ",
values_list_in[i].shape().DebugString(), " at position ", i);
}
if (indices_list_in[i].shape().dim_size(0) !=
values_list_in[i].shape().dim_size(0)) {
return errors::InvalidArgument(
"Expected size of values to be ",
indices_list_in[i].shape().dim_size(0), " got ",
values_list_in[i].shape().dim_size(0), " at position ", i);
}
}
// Validates shapes_list_in OpInputList
if (shapes_list_in.size() != size) {
return errors::InvalidArgument("Expected ", size, " input shapes, got ",
shapes_list_in.size());
}
for (int i = 0; i < size; i++) {
if (check_type && shapes_list_in[i].dtype() != DT_INT64) {
return errors::InvalidArgument("Input shape should be of type ", DT_INT64,
" but received ",
shapes_list_in[i].dtype());
}
if (!TensorShapeUtils::IsVector(shapes_list_in[i].shape())) {
return errors::InvalidArgument(
"Input shapes should be a vector but received shape ",
shapes_list_in[i].shape().DebugString(), " at position ", i);
}
if (shapes_list_in[i].vec<int64>().size() != 2) {
return errors::InvalidArgument("shape should imply a 2D tensor, but got ",
shapes_list_in[i].shape().DebugString(),
" at position ", i);
}
}
// Validates dense_list_in OpInputList
for (int i = 0; i < dense_list_in.size(); ++i) {
// Make sure to avoid the expected type to be string, but input values to be
// int64.
if (check_type && internal_type == DT_STRING &&
dense_list_in[i].dtype() == DT_INT64) {
return errors::InvalidArgument("Dense inputs should be of internal type ",
internal_type, " but received ",
dense_list_in[i].dtype());
}
if (!TensorShapeUtils::IsMatrix(dense_list_in[i].shape())) {
return errors::InvalidArgument(
"Dense inputs should be a matrix but received shape ",
dense_list_in[i].shape().DebugString(), " at position ", i);
}
}
// Validates batch sizes. (Note: we do this after validating the input
// shapes, because CalculateBatchSize() depends on inputs having valid
// shapes).
const auto batch_size = CalculateBatchSize(shapes_list_in, dense_list_in);
for (int i = 0; i < size; i++) {
if (shapes_list_in[i].vec<int64>()(0) != batch_size) {
return errors::InvalidArgument("Expected batch size ", batch_size,
" got ", shapes_list_in[i].vec<int64>()(0),
" at position ", i);
}
}
for (int i = 0; i < dense_list_in.size(); ++i) {
if (dense_list_in[i].dim_size(0) != batch_size) {
return errors::InvalidArgument("Expected batch size ", batch_size,
" got ", dense_list_in[i].dim_size(0),
" at dense tensor ", i);
}
}
return Status::OK();
}
| 0
|
466,216
|
TEST_F(HttpConnectionManagerImplTest, RequestTimeoutIsDisarmedOnCompleteRequestWithData) {
request_timeout_ = std::chrono::milliseconds(10);
setup(false, "");
EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status {
Event::MockTimer* request_timer = setUpTimer();
EXPECT_CALL(*request_timer, enableTimer(request_timeout_, _));
decoder_ = &conn_manager_->newStream(response_encoder_);
RequestHeaderMapPtr headers{
new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "POST"}}};
decoder_->decodeHeaders(std::move(headers), false);
EXPECT_CALL(*request_timer, disableTimer()).Times(2);
decoder_->decodeData(data, true);
return Http::okStatus();
}));
Buffer::OwnedImpl fake_input("1234");
conn_manager_->onData(fake_input, false);
EXPECT_EQ(0U, stats_.named_.downstream_rq_timeout_.value());
expectOnDestroy();
filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose);
}
| 0
|
402,404
|
eddsa_encodempi (gcry_mpi_t mpi, unsigned int minlen,
unsigned char **r_buffer, unsigned int *r_buflen)
{
unsigned char *rawmpi;
unsigned int rawmpilen;
rawmpi = _gcry_mpi_get_buffer (mpi, minlen, &rawmpilen, NULL);
if (!rawmpi)
return gpg_err_code_from_syserror ();
*r_buffer = rawmpi;
*r_buflen = rawmpilen;
return 0;
}
| 0
|
94,435
|
__ip_vs_get_service_entries(const struct ip_vs_get_services *get,
struct ip_vs_get_services __user *uptr)
{
int idx, count=0;
struct ip_vs_service *svc;
struct ip_vs_service_entry entry;
int ret = 0;
for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
list_for_each_entry(svc, &ip_vs_svc_table[idx], s_list) {
/* Only expose IPv4 entries to old interface */
if (svc->af != AF_INET)
continue;
if (count >= get->num_services)
goto out;
memset(&entry, 0, sizeof(entry));
ip_vs_copy_service(&entry, svc);
if (copy_to_user(&uptr->entrytable[count],
&entry, sizeof(entry))) {
ret = -EFAULT;
goto out;
}
count++;
}
}
for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
list_for_each_entry(svc, &ip_vs_svc_fwm_table[idx], f_list) {
/* Only expose IPv4 entries to old interface */
if (svc->af != AF_INET)
continue;
if (count >= get->num_services)
goto out;
memset(&entry, 0, sizeof(entry));
ip_vs_copy_service(&entry, svc);
if (copy_to_user(&uptr->entrytable[count],
&entry, sizeof(entry))) {
ret = -EFAULT;
goto out;
}
count++;
}
}
out:
return ret;
}
| 0
|
261,757
|
static void rtas_start_cpu(sPAPREnvironment *spapr,
uint32_t token, uint32_t nargs,
target_ulong args,
uint32_t nret, target_ulong rets)
{
target_ulong id, start, r3;
CPUState *cs;
if (nargs != 3 || nret != 1) {
rtas_st(rets, 0, -3);
return;
}
id = rtas_ld(args, 0);
start = rtas_ld(args, 1);
r3 = rtas_ld(args, 2);
cs = qemu_get_cpu(id);
if (cs != NULL) {
PowerPCCPU *cpu = POWERPC_CPU(cs);
CPUPPCState *env = &cpu->env;
if (!cs->halted) {
rtas_st(rets, 0, -1);
return;
}
/* This will make sure qemu state is up to date with kvm, and
* mark it dirty so our changes get flushed back before the
* new cpu enters */
kvm_cpu_synchronize_state(cs);
env->msr = (1ULL << MSR_SF) | (1ULL << MSR_ME);
env->nip = start;
env->gpr[3] = r3;
cs->halted = 0;
qemu_cpu_kick(cs);
rtas_st(rets, 0, 0);
return;
}
/* Didn't find a matching cpu */
rtas_st(rets, 0, -3);
}
| 0
|
434,380
|
void mutt_expand_fmt (char *dest, size_t destlen, const char *fmt, const char *src)
{
const char *p;
char *d;
size_t slen;
int found = 0;
slen = mutt_strlen (src);
destlen--;
for (p = fmt, d = dest; destlen && *p; p++)
{
if (*p == '%')
{
switch (p[1])
{
case '%':
*d++ = *p++;
destlen--;
break;
case 's':
found = 1;
strfcpy (d, src, destlen + 1);
d += destlen > slen ? slen : destlen;
destlen -= destlen > slen ? slen : destlen;
p++;
break;
default:
*d++ = *p;
destlen--;
break;
}
}
else
{
*d++ = *p;
destlen--;
}
}
*d = '\0';
if (!found && destlen > 0)
{
safe_strcat (dest, destlen, " ");
safe_strcat (dest, destlen, src);
}
}
| 0
|
307,624
|
AXObject* AXNodeObject::activeDescendant() {
if (!m_node || !m_node->isElementNode())
return nullptr;
const AtomicString& activeDescendantAttr =
getAttribute(aria_activedescendantAttr);
if (activeDescendantAttr.isNull() || activeDescendantAttr.isEmpty())
return nullptr;
Element* element = toElement(getNode());
Element* descendant =
element->treeScope().getElementById(activeDescendantAttr);
if (!descendant)
return nullptr;
AXObject* axDescendant = axObjectCache().getOrCreate(descendant);
return axDescendant;
}
| 0
|
257,556
|
int vp8_update_reference ( VP8_COMP * cpi , int ref_frame_flags ) {
if ( ref_frame_flags > 7 ) return - 1 ;
cpi -> common . refresh_golden_frame = 0 ;
cpi -> common . refresh_alt_ref_frame = 0 ;
cpi -> common . refresh_last_frame = 0 ;
if ( ref_frame_flags & VP8_LAST_FRAME ) cpi -> common . refresh_last_frame = 1 ;
if ( ref_frame_flags & VP8_GOLD_FRAME ) cpi -> common . refresh_golden_frame = 1 ;
if ( ref_frame_flags & VP8_ALTR_FRAME ) cpi -> common . refresh_alt_ref_frame = 1 ;
return 0 ;
}
| 0
|
219,940
|
static void AddHistogramSample(void* hist, int sample) {
Histogram* histogram = static_cast<Histogram *>(hist);
histogram->Add(sample);
}
| 0
|
418,688
|
fix_message_string(
message_t *message,
gboolean want_quoted,
char *msg)
{
char *m;
char num[NUM_STR_SIZE];
char code[100];
char *c;
int i;
char *quoted;
GString *result;
if (!msg)
return NULL;
result = g_string_sized_new(strlen(msg)*2);
for (m = msg; *m != '\0'; m++) {
c = code;
if (*m == '%' && *(m+1) == '%') {
g_string_append_c(result, *m);
m++;
} else if (*m == '%' && *(m+1) == '{') {
m += 2;
while (*m != '}') {
*c++ = *m++;
}
*c = '\0';
if (strcmp(code, "file") == 0) {
if (want_quoted) {
quoted = quote_string(message->file);
g_string_append(result, quoted);
g_free(quoted);
} else {
g_string_append(result, message->file);
}
} else if (strcmp(code, "line") == 0) {
g_snprintf(num, sizeof(num),
"%d", message->line);
g_string_append(result, num);
} else if (strcmp(code, "code") == 0) {
g_snprintf(num, sizeof(num),
"%d", message->code);
g_string_append(result, num);
} else if (strcmp(code, "severity") == 0) {
g_string_append(result, severity_name(message->severity));
} else if (strcmp(code, "errnostr") == 0) {
g_string_append(result, message->errnostr);
} else if (strcmp(code, "errnocode") == 0) {
g_string_append(result, message->errnocode);
} else {
char *c = strchr(code, ':');
char *format = NULL;
char *ccode = code;
if (c) {
*c = '\0';
format = code;
ccode = c+1;
}
i = 0;
while (message->arg_array[i].key != NULL &&
strcmp(message->arg_array[i].key, ccode) != 0) {
i++;
}
if (message->arg_array[i].key != NULL) {
if (format) {
assert(message->arg_array[i].value.type == MESSAGE_STRING);
if (strcmp(format,"size") == 0) {
long long llvalue = atoll(message->arg_array[i].value.string);
g_string_append_printf(result, "%lld %sB", llvalue/getconf_unit_divisor(),
getconf_str(CNF_DISPLAYUNIT));
} else {
g_string_append(result, "BAD-FORMAT");
}
} else {
if (message->arg_array[i].value.type == MESSAGE_NULL) {
} else if (message->arg_array[i].value.type == MESSAGE_STRING) {
if (message->arg_array[i].value.string == NULL) {
g_string_append(result, "null");
} else if (want_quoted) {
quoted = quote_string(message->arg_array[i].value.string);
g_string_append(result, quoted);
g_free(quoted);
} else {
g_string_append(result, message->arg_array[i].value.string);
}
}
}
} else {
g_string_append(result, "NONE");
}
}
} else {
g_string_append_c(result, *m);
}
}
return result;
}
| 0
|
342,231
|
static void report_unsupported_feature(BlockDriverState *bs,
Error **errp, Qcow2Feature *table, uint64_t mask)
{
while (table && table->name[0] != '\0') {
if (table->type == QCOW2_FEAT_TYPE_INCOMPATIBLE) {
if (mask & (1 << table->bit)) {
report_unsupported(bs, errp, "%.46s", table->name);
mask &= ~(1 << table->bit);
}
}
table++;
}
if (mask) {
report_unsupported(bs, errp, "Unknown incompatible feature: %" PRIx64,
mask);
}
}
| 1
|
271,713
|
static u32 tcp_v6_init_ts_off(const struct sk_buff *skb)
{
return secure_tcpv6_ts_off(ipv6_hdr(skb)->daddr.s6_addr32,
ipv6_hdr(skb)->saddr.s6_addr32);
}
| 0
|
221,020
|
void ChildProcessSecurityPolicyImpl::Remove(int child_id) {
base::AutoLock lock(lock_);
if (!security_state_.count(child_id))
return; // May be called multiple times.
delete security_state_[child_id];
security_state_.erase(child_id);
worker_map_.erase(child_id);
}
| 0
|
122,770
|
static int show_ssl_get_cipher_list(THD *thd, SHOW_VAR *var, char *buff)
{
var->type= SHOW_CHAR;
var->value= buff;
if (thd->vio_ok() && thd->net.vio->ssl_arg)
{
int i;
const char *p;
char *end= buff + SHOW_VAR_FUNC_BUFF_SIZE;
for (i=0; (p= SSL_get_cipher_list((SSL*) thd->net.vio->ssl_arg,i)) &&
buff < end; i++)
{
buff= strnmov(buff, p, end-buff-1);
*buff++= ':';
}
if (i)
buff--;
}
*buff=0;
return 0;
}
| 0
|
425,278
|
static void intel_iommu_get_resv_regions(struct device *device,
struct list_head *head)
{
struct iommu_resv_region *reg;
struct dmar_rmrr_unit *rmrr;
struct device *i_dev;
int i;
rcu_read_lock();
for_each_rmrr_units(rmrr) {
for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
i, i_dev) {
if (i_dev != device)
continue;
list_add_tail(&rmrr->resv->list, head);
}
}
rcu_read_unlock();
reg = iommu_alloc_resv_region(IOAPIC_RANGE_START,
IOAPIC_RANGE_END - IOAPIC_RANGE_START + 1,
0, IOMMU_RESV_MSI);
if (!reg)
return;
list_add_tail(®->list, head);
}
| 0
|
240,226
|
nfsd4_commit(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_commit *commit)
{
gen_boot_verifier(&commit->co_verf, SVC_NET(rqstp));
return nfsd_commit(rqstp, &cstate->current_fh, commit->co_offset,
commit->co_count);
}
| 0
|
399,269
|
static void test_wl5968()
{
int rc;
myheader("test_wl5968");
rc= mysql_query(mysql, "START TRANSACTION");
myquery(rc);
DIE_UNLESS(mysql->server_status & SERVER_STATUS_IN_TRANS);
DIE_UNLESS(!(mysql->server_status & SERVER_STATUS_IN_TRANS_READONLY));
rc= mysql_query(mysql, "COMMIT");
myquery(rc);
rc= mysql_query(mysql, "START TRANSACTION READ ONLY");
myquery(rc);
DIE_UNLESS(mysql->server_status & SERVER_STATUS_IN_TRANS);
DIE_UNLESS(mysql->server_status & SERVER_STATUS_IN_TRANS_READONLY);
rc= mysql_query(mysql, "COMMIT");
myquery(rc);
DIE_UNLESS(!(mysql->server_status & SERVER_STATUS_IN_TRANS));
DIE_UNLESS(!(mysql->server_status & SERVER_STATUS_IN_TRANS_READONLY));
rc= mysql_query(mysql, "START TRANSACTION");
myquery(rc);
DIE_UNLESS(mysql->server_status & SERVER_STATUS_IN_TRANS);
DIE_UNLESS(!(mysql->server_status & SERVER_STATUS_IN_TRANS_READONLY));
rc= mysql_query(mysql, "COMMIT");
myquery(rc);
}
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.